xref: /linux/drivers/crypto/stm32/stm32-hash.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of STM32 Crypto driver for Linux.
4  *
5  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7  */
8 
9 #include <crypto/engine.h>
10 #include <crypto/internal/hash.h>
11 #include <crypto/md5.h>
12 #include <crypto/scatterwalk.h>
13 #include <crypto/sha1.h>
14 #include <crypto/sha2.h>
15 #include <crypto/sha3.h>
16 #include <linux/clk.h>
17 #include <linux/delay.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/dmaengine.h>
20 #include <linux/interrupt.h>
21 #include <linux/iopoll.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28 #include <linux/string.h>
29 
30 #define HASH_CR				0x00
31 #define HASH_DIN			0x04
32 #define HASH_STR			0x08
33 #define HASH_UX500_HREG(x)		(0x0c + ((x) * 0x04))
34 #define HASH_IMR			0x20
35 #define HASH_SR				0x24
36 #define HASH_CSR(x)			(0x0F8 + ((x) * 0x04))
37 #define HASH_HREG(x)			(0x310 + ((x) * 0x04))
38 #define HASH_HWCFGR			0x3F0
39 #define HASH_VER			0x3F4
40 #define HASH_ID				0x3F8
41 
42 /* Control Register */
43 #define HASH_CR_INIT			BIT(2)
44 #define HASH_CR_DMAE			BIT(3)
45 #define HASH_CR_DATATYPE_POS		4
46 #define HASH_CR_MODE			BIT(6)
47 #define HASH_CR_ALGO_POS		7
48 #define HASH_CR_MDMAT			BIT(13)
49 #define HASH_CR_DMAA			BIT(14)
50 #define HASH_CR_LKEY			BIT(16)
51 
52 /* Interrupt */
53 #define HASH_DINIE			BIT(0)
54 #define HASH_DCIE			BIT(1)
55 
56 /* Interrupt Mask */
57 #define HASH_MASK_CALC_COMPLETION	BIT(0)
58 #define HASH_MASK_DATA_INPUT		BIT(1)
59 
60 /* Status Flags */
61 #define HASH_SR_DATA_INPUT_READY	BIT(0)
62 #define HASH_SR_OUTPUT_READY		BIT(1)
63 #define HASH_SR_DMA_ACTIVE		BIT(2)
64 #define HASH_SR_BUSY			BIT(3)
65 
66 /* STR Register */
67 #define HASH_STR_NBLW_MASK		GENMASK(4, 0)
68 #define HASH_STR_DCAL			BIT(8)
69 
70 /* HWCFGR Register */
71 #define HASH_HWCFG_DMA_MASK		GENMASK(3, 0)
72 
73 /* Context swap register */
74 #define HASH_CSR_NB_SHA256_HMAC		54
75 #define HASH_CSR_NB_SHA256		38
76 #define HASH_CSR_NB_SHA512_HMAC		103
77 #define HASH_CSR_NB_SHA512		91
78 #define HASH_CSR_NB_SHA3_HMAC		88
79 #define HASH_CSR_NB_SHA3		72
80 #define HASH_CSR_NB_MAX			HASH_CSR_NB_SHA512_HMAC
81 
82 #define HASH_FLAGS_INIT			BIT(0)
83 #define HASH_FLAGS_OUTPUT_READY		BIT(1)
84 #define HASH_FLAGS_CPU			BIT(2)
85 #define HASH_FLAGS_DMA_ACTIVE		BIT(3)
86 #define HASH_FLAGS_HMAC_INIT		BIT(4)
87 #define HASH_FLAGS_HMAC_FINAL		BIT(5)
88 #define HASH_FLAGS_HMAC_KEY		BIT(6)
89 #define HASH_FLAGS_SHA3_MODE		BIT(7)
90 #define HASH_FLAGS_FINAL		BIT(15)
91 #define HASH_FLAGS_FINUP		BIT(16)
92 #define HASH_FLAGS_ALGO_MASK		GENMASK(20, 17)
93 #define HASH_FLAGS_ALGO_SHIFT		17
94 #define HASH_FLAGS_ERRORS		BIT(21)
95 #define HASH_FLAGS_EMPTY		BIT(22)
96 #define HASH_FLAGS_HMAC			BIT(23)
97 #define HASH_FLAGS_SGS_COPIED		BIT(24)
98 
99 #define HASH_OP_UPDATE			1
100 #define HASH_OP_FINAL			2
101 
102 #define HASH_BURST_LEVEL		4
103 
104 enum stm32_hash_data_format {
105 	HASH_DATA_32_BITS		= 0x0,
106 	HASH_DATA_16_BITS		= 0x1,
107 	HASH_DATA_8_BITS		= 0x2,
108 	HASH_DATA_1_BIT			= 0x3
109 };
110 
111 #define HASH_BUFLEN			(SHA3_224_BLOCK_SIZE + 4)
112 #define HASH_MAX_KEY_SIZE		(SHA512_BLOCK_SIZE * 8)
113 
114 enum stm32_hash_algo {
115 	HASH_SHA1			= 0,
116 	HASH_MD5			= 1,
117 	HASH_SHA224			= 2,
118 	HASH_SHA256			= 3,
119 	HASH_SHA3_224			= 4,
120 	HASH_SHA3_256			= 5,
121 	HASH_SHA3_384			= 6,
122 	HASH_SHA3_512			= 7,
123 	HASH_SHA384			= 12,
124 	HASH_SHA512			= 15,
125 };
126 
127 enum ux500_hash_algo {
128 	HASH_SHA256_UX500		= 0,
129 	HASH_SHA1_UX500			= 1,
130 };
131 
132 #define HASH_AUTOSUSPEND_DELAY		50
133 
134 struct stm32_hash_ctx {
135 	struct stm32_hash_dev	*hdev;
136 	struct crypto_shash	*xtfm;
137 	unsigned long		flags;
138 
139 	u8			key[HASH_MAX_KEY_SIZE];
140 	int			keylen;
141 };
142 
143 struct stm32_hash_state {
144 	u32			flags;
145 
146 	u16			bufcnt;
147 	u16			blocklen;
148 
149 	u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
150 
151 	/* hash state */
152 	u32			hw_context[3 + HASH_CSR_NB_MAX];
153 };
154 
155 struct stm32_hash_request_ctx {
156 	struct stm32_hash_dev	*hdev;
157 	unsigned long		op;
158 
159 	u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
160 	size_t			digcnt;
161 
162 	struct scatterlist	*sg;
163 	struct scatterlist	sgl[2]; /* scatterlist used to realize alignment */
164 	unsigned int		offset;
165 	unsigned int		total;
166 	struct scatterlist	sg_key;
167 
168 	dma_addr_t		dma_addr;
169 	size_t			dma_ct;
170 	int			nents;
171 
172 	u8			data_type;
173 
174 	struct stm32_hash_state state;
175 };
176 
177 struct stm32_hash_algs_info {
178 	struct ahash_engine_alg	*algs_list;
179 	size_t			size;
180 };
181 
182 struct stm32_hash_pdata {
183 	const int				alg_shift;
184 	const struct stm32_hash_algs_info	*algs_info;
185 	size_t					algs_info_size;
186 	bool					has_sr;
187 	bool					has_mdmat;
188 	bool					context_secured;
189 	bool					broken_emptymsg;
190 	bool					ux500;
191 };
192 
193 struct stm32_hash_dev {
194 	struct list_head	list;
195 	struct device		*dev;
196 	struct clk		*clk;
197 	struct reset_control	*rst;
198 	void __iomem		*io_base;
199 	phys_addr_t		phys_base;
200 	u8			xmit_buf[HASH_BUFLEN] __aligned(sizeof(u32));
201 	u32			dma_mode;
202 	bool			polled;
203 
204 	struct ahash_request	*req;
205 	struct crypto_engine	*engine;
206 
207 	unsigned long		flags;
208 
209 	struct dma_chan		*dma_lch;
210 	struct completion	dma_completion;
211 
212 	const struct stm32_hash_pdata	*pdata;
213 };
214 
215 struct stm32_hash_drv {
216 	struct list_head	dev_list;
217 	spinlock_t		lock; /* List protection access */
218 };
219 
220 static struct stm32_hash_drv stm32_hash = {
221 	.dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
222 	.lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
223 };
224 
225 static void stm32_hash_dma_callback(void *param);
226 static int stm32_hash_prepare_request(struct ahash_request *req);
227 static void stm32_hash_unprepare_request(struct ahash_request *req);
228 
stm32_hash_read(struct stm32_hash_dev * hdev,u32 offset)229 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
230 {
231 	return readl_relaxed(hdev->io_base + offset);
232 }
233 
stm32_hash_write(struct stm32_hash_dev * hdev,u32 offset,u32 value)234 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
235 				    u32 offset, u32 value)
236 {
237 	writel_relaxed(value, hdev->io_base + offset);
238 }
239 
240 /**
241  * stm32_hash_wait_busy - wait until hash processor is available. It return an
242  * error if the hash core is processing a block of data for more than 10 ms.
243  * @hdev: the stm32_hash_dev device.
244  */
stm32_hash_wait_busy(struct stm32_hash_dev * hdev)245 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
246 {
247 	u32 status;
248 
249 	/* The Ux500 lacks the special status register, we poll the DCAL bit instead */
250 	if (!hdev->pdata->has_sr)
251 		return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
252 						  !(status & HASH_STR_DCAL), 10, 10000);
253 
254 	return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
255 				   !(status & HASH_SR_BUSY), 10, 10000);
256 }
257 
258 /**
259  * stm32_hash_set_nblw - set the number of valid bytes in the last word.
260  * @hdev: the stm32_hash_dev device.
261  * @length: the length of the final word.
262  */
stm32_hash_set_nblw(struct stm32_hash_dev * hdev,int length)263 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
264 {
265 	u32 reg;
266 
267 	reg = stm32_hash_read(hdev, HASH_STR);
268 	reg &= ~(HASH_STR_NBLW_MASK);
269 	reg |= (8U * ((length) % 4U));
270 	stm32_hash_write(hdev, HASH_STR, reg);
271 }
272 
stm32_hash_write_key(struct stm32_hash_dev * hdev)273 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
274 {
275 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
276 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
277 	u32 reg;
278 	int keylen = ctx->keylen;
279 	void *key = ctx->key;
280 
281 	if (keylen) {
282 		stm32_hash_set_nblw(hdev, keylen);
283 
284 		while (keylen > 0) {
285 			stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
286 			keylen -= 4;
287 			key += 4;
288 		}
289 
290 		reg = stm32_hash_read(hdev, HASH_STR);
291 		reg |= HASH_STR_DCAL;
292 		stm32_hash_write(hdev, HASH_STR, reg);
293 
294 		return -EINPROGRESS;
295 	}
296 
297 	return 0;
298 }
299 
300 /**
301  * stm32_hash_write_ctrl - Initialize the hash processor, only if
302  * HASH_FLAGS_INIT is set.
303  * @hdev: the stm32_hash_dev device
304  */
stm32_hash_write_ctrl(struct stm32_hash_dev * hdev)305 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
306 {
307 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
308 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
309 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
310 	struct stm32_hash_state *state = &rctx->state;
311 	u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT;
312 
313 	u32 reg = HASH_CR_INIT;
314 
315 	if (!(hdev->flags & HASH_FLAGS_INIT)) {
316 		if (hdev->pdata->ux500) {
317 			reg |= ((alg & BIT(0)) << HASH_CR_ALGO_POS);
318 		} else {
319 			if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS)
320 				reg |= ((alg & BIT(1)) << 17) |
321 				       ((alg & BIT(0)) << HASH_CR_ALGO_POS);
322 			else
323 				reg |= alg << hdev->pdata->alg_shift;
324 		}
325 
326 		reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
327 
328 		if (state->flags & HASH_FLAGS_HMAC) {
329 			hdev->flags |= HASH_FLAGS_HMAC;
330 			reg |= HASH_CR_MODE;
331 			if (ctx->keylen > crypto_ahash_blocksize(tfm))
332 				reg |= HASH_CR_LKEY;
333 		}
334 
335 		if (!hdev->polled)
336 			stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
337 
338 		stm32_hash_write(hdev, HASH_CR, reg);
339 
340 		hdev->flags |= HASH_FLAGS_INIT;
341 
342 		/*
343 		 * After first block + 1 words are fill up,
344 		 * we only need to fill 1 block to start partial computation
345 		 */
346 		rctx->state.blocklen -= sizeof(u32);
347 
348 		dev_dbg(hdev->dev, "Write Control %x\n", reg);
349 	}
350 }
351 
stm32_hash_append_sg(struct stm32_hash_request_ctx * rctx)352 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
353 {
354 	struct stm32_hash_state *state = &rctx->state;
355 	size_t count;
356 
357 	while ((state->bufcnt < state->blocklen) && rctx->total) {
358 		count = min(rctx->sg->length - rctx->offset, rctx->total);
359 		count = min_t(size_t, count, state->blocklen - state->bufcnt);
360 
361 		if (count <= 0) {
362 			if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
363 				rctx->sg = sg_next(rctx->sg);
364 				continue;
365 			} else {
366 				break;
367 			}
368 		}
369 
370 		scatterwalk_map_and_copy(state->buffer + state->bufcnt,
371 					 rctx->sg, rctx->offset, count, 0);
372 
373 		state->bufcnt += count;
374 		rctx->offset += count;
375 		rctx->total -= count;
376 
377 		if (rctx->offset == rctx->sg->length) {
378 			rctx->sg = sg_next(rctx->sg);
379 			if (rctx->sg)
380 				rctx->offset = 0;
381 			else
382 				rctx->total = 0;
383 		}
384 	}
385 }
386 
stm32_hash_xmit_cpu(struct stm32_hash_dev * hdev,const u8 * buf,size_t length,int final)387 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
388 			       const u8 *buf, size_t length, int final)
389 {
390 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
391 	struct stm32_hash_state *state = &rctx->state;
392 	unsigned int count, len32;
393 	const u32 *buffer = (const u32 *)buf;
394 	u32 reg;
395 
396 	if (final) {
397 		hdev->flags |= HASH_FLAGS_FINAL;
398 
399 		/* Do not process empty messages if hw is buggy. */
400 		if (!(hdev->flags & HASH_FLAGS_INIT) && !length &&
401 		    hdev->pdata->broken_emptymsg) {
402 			state->flags |= HASH_FLAGS_EMPTY;
403 			return 0;
404 		}
405 	}
406 
407 	len32 = DIV_ROUND_UP(length, sizeof(u32));
408 
409 	dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
410 		__func__, length, final, len32);
411 
412 	hdev->flags |= HASH_FLAGS_CPU;
413 
414 	stm32_hash_write_ctrl(hdev);
415 
416 	if (stm32_hash_wait_busy(hdev))
417 		return -ETIMEDOUT;
418 
419 	if ((hdev->flags & HASH_FLAGS_HMAC) &&
420 	    (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
421 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
422 		stm32_hash_write_key(hdev);
423 		if (stm32_hash_wait_busy(hdev))
424 			return -ETIMEDOUT;
425 	}
426 
427 	for (count = 0; count < len32; count++)
428 		stm32_hash_write(hdev, HASH_DIN, buffer[count]);
429 
430 	if (final) {
431 		if (stm32_hash_wait_busy(hdev))
432 			return -ETIMEDOUT;
433 
434 		stm32_hash_set_nblw(hdev, length);
435 		reg = stm32_hash_read(hdev, HASH_STR);
436 		reg |= HASH_STR_DCAL;
437 		stm32_hash_write(hdev, HASH_STR, reg);
438 		if (hdev->flags & HASH_FLAGS_HMAC) {
439 			if (stm32_hash_wait_busy(hdev))
440 				return -ETIMEDOUT;
441 			stm32_hash_write_key(hdev);
442 		}
443 		return -EINPROGRESS;
444 	}
445 
446 	return 0;
447 }
448 
hash_swap_reg(struct stm32_hash_request_ctx * rctx)449 static int hash_swap_reg(struct stm32_hash_request_ctx *rctx)
450 {
451 	struct stm32_hash_state *state = &rctx->state;
452 
453 	switch ((state->flags & HASH_FLAGS_ALGO_MASK) >>
454 		HASH_FLAGS_ALGO_SHIFT) {
455 	case HASH_MD5:
456 	case HASH_SHA1:
457 	case HASH_SHA224:
458 	case HASH_SHA256:
459 		if (state->flags & HASH_FLAGS_HMAC)
460 			return HASH_CSR_NB_SHA256_HMAC;
461 		else
462 			return HASH_CSR_NB_SHA256;
463 		break;
464 
465 	case HASH_SHA384:
466 	case HASH_SHA512:
467 		if (state->flags & HASH_FLAGS_HMAC)
468 			return HASH_CSR_NB_SHA512_HMAC;
469 		else
470 			return HASH_CSR_NB_SHA512;
471 		break;
472 
473 	case HASH_SHA3_224:
474 	case HASH_SHA3_256:
475 	case HASH_SHA3_384:
476 	case HASH_SHA3_512:
477 		if (state->flags & HASH_FLAGS_HMAC)
478 			return HASH_CSR_NB_SHA3_HMAC;
479 		else
480 			return HASH_CSR_NB_SHA3;
481 		break;
482 
483 	default:
484 		return -EINVAL;
485 	}
486 }
487 
stm32_hash_update_cpu(struct stm32_hash_dev * hdev)488 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
489 {
490 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
491 	struct stm32_hash_state *state = &rctx->state;
492 	int bufcnt, err = 0, final;
493 
494 	dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
495 
496 	final = state->flags & HASH_FLAGS_FINAL;
497 
498 	while ((rctx->total >= state->blocklen) ||
499 	       (state->bufcnt + rctx->total >= state->blocklen)) {
500 		stm32_hash_append_sg(rctx);
501 		bufcnt = state->bufcnt;
502 		state->bufcnt = 0;
503 		err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0);
504 		if (err)
505 			return err;
506 	}
507 
508 	stm32_hash_append_sg(rctx);
509 
510 	if (final) {
511 		bufcnt = state->bufcnt;
512 		state->bufcnt = 0;
513 		return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
514 	}
515 
516 	return err;
517 }
518 
stm32_hash_xmit_dma(struct stm32_hash_dev * hdev,struct scatterlist * sg,int length,int mdmat)519 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
520 			       struct scatterlist *sg, int length, int mdmat)
521 {
522 	struct dma_async_tx_descriptor *in_desc;
523 	dma_cookie_t cookie;
524 	u32 reg;
525 	int err;
526 
527 	dev_dbg(hdev->dev, "%s mdmat: %x length: %d\n", __func__, mdmat, length);
528 
529 	/* do not use dma if there is no data to send */
530 	if (length <= 0)
531 		return 0;
532 
533 	in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
534 					  DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
535 					  DMA_CTRL_ACK);
536 	if (!in_desc) {
537 		dev_err(hdev->dev, "dmaengine_prep_slave error\n");
538 		return -ENOMEM;
539 	}
540 
541 	reinit_completion(&hdev->dma_completion);
542 	in_desc->callback = stm32_hash_dma_callback;
543 	in_desc->callback_param = hdev;
544 
545 	hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
546 
547 	reg = stm32_hash_read(hdev, HASH_CR);
548 
549 	if (hdev->pdata->has_mdmat) {
550 		if (mdmat)
551 			reg |= HASH_CR_MDMAT;
552 		else
553 			reg &= ~HASH_CR_MDMAT;
554 	}
555 	reg |= HASH_CR_DMAE;
556 
557 	stm32_hash_write(hdev, HASH_CR, reg);
558 
559 
560 	cookie = dmaengine_submit(in_desc);
561 	err = dma_submit_error(cookie);
562 	if (err)
563 		return -ENOMEM;
564 
565 	dma_async_issue_pending(hdev->dma_lch);
566 
567 	if (!wait_for_completion_timeout(&hdev->dma_completion,
568 					 msecs_to_jiffies(100)))
569 		err = -ETIMEDOUT;
570 
571 	if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
572 				     NULL, NULL) != DMA_COMPLETE)
573 		err = -ETIMEDOUT;
574 
575 	if (err) {
576 		dev_err(hdev->dev, "DMA Error %i\n", err);
577 		dmaengine_terminate_all(hdev->dma_lch);
578 		return err;
579 	}
580 
581 	return -EINPROGRESS;
582 }
583 
stm32_hash_dma_callback(void * param)584 static void stm32_hash_dma_callback(void *param)
585 {
586 	struct stm32_hash_dev *hdev = param;
587 
588 	complete(&hdev->dma_completion);
589 }
590 
stm32_hash_hmac_dma_send(struct stm32_hash_dev * hdev)591 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
592 {
593 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
594 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
595 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
596 	int err;
597 
598 	if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode > 0) {
599 		err = stm32_hash_write_key(hdev);
600 		if (stm32_hash_wait_busy(hdev))
601 			return -ETIMEDOUT;
602 	} else {
603 		if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
604 			sg_init_one(&rctx->sg_key, ctx->key,
605 				    ALIGN(ctx->keylen, sizeof(u32)));
606 
607 		rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
608 					  DMA_TO_DEVICE);
609 		if (rctx->dma_ct == 0) {
610 			dev_err(hdev->dev, "dma_map_sg error\n");
611 			return -ENOMEM;
612 		}
613 
614 		err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
615 
616 		dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
617 	}
618 
619 	return err;
620 }
621 
stm32_hash_dma_init(struct stm32_hash_dev * hdev)622 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
623 {
624 	struct dma_slave_config dma_conf;
625 	struct dma_chan *chan;
626 	int err;
627 
628 	memset(&dma_conf, 0, sizeof(dma_conf));
629 
630 	dma_conf.direction = DMA_MEM_TO_DEV;
631 	dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
632 	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
633 	dma_conf.src_maxburst = HASH_BURST_LEVEL;
634 	dma_conf.dst_maxburst = HASH_BURST_LEVEL;
635 	dma_conf.device_fc = false;
636 
637 	chan = dma_request_chan(hdev->dev, "in");
638 	if (IS_ERR(chan))
639 		return PTR_ERR(chan);
640 
641 	hdev->dma_lch = chan;
642 
643 	err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
644 	if (err) {
645 		dma_release_channel(hdev->dma_lch);
646 		hdev->dma_lch = NULL;
647 		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
648 		return err;
649 	}
650 
651 	init_completion(&hdev->dma_completion);
652 
653 	return 0;
654 }
655 
stm32_hash_dma_send(struct stm32_hash_dev * hdev)656 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
657 {
658 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
659 	u32 *buffer = (void *)rctx->state.buffer;
660 	struct scatterlist sg[1], *tsg;
661 	int err = 0, reg, ncp = 0;
662 	unsigned int i, len = 0, bufcnt = 0;
663 	bool final = hdev->flags & HASH_FLAGS_FINAL;
664 	bool is_last = false;
665 	u32 last_word;
666 
667 	dev_dbg(hdev->dev, "%s total: %d bufcnt: %d final: %d\n",
668 		__func__, rctx->total, rctx->state.bufcnt, final);
669 
670 	if (rctx->nents < 0)
671 		return -EINVAL;
672 
673 	stm32_hash_write_ctrl(hdev);
674 
675 	if (hdev->flags & HASH_FLAGS_HMAC && (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
676 		hdev->flags |= HASH_FLAGS_HMAC_KEY;
677 		err = stm32_hash_hmac_dma_send(hdev);
678 		if (err != -EINPROGRESS)
679 			return err;
680 	}
681 
682 	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
683 		sg[0] = *tsg;
684 		len = sg->length;
685 
686 		if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) {
687 			if (!final) {
688 				/* Always manually put the last word of a non-final transfer. */
689 				len -= sizeof(u32);
690 				sg_pcopy_to_buffer(rctx->sg, rctx->nents, &last_word, 4, len);
691 				sg->length -= sizeof(u32);
692 			} else {
693 				/*
694 				 * In Multiple DMA mode, DMA must be aborted before the final
695 				 * transfer.
696 				 */
697 				sg->length = rctx->total - bufcnt;
698 				if (hdev->dma_mode > 0) {
699 					len = (ALIGN(sg->length, 16) - 16);
700 
701 					ncp = sg_pcopy_to_buffer(rctx->sg, rctx->nents,
702 								 rctx->state.buffer,
703 								 sg->length - len,
704 								 rctx->total - sg->length + len);
705 
706 					if (!len)
707 						break;
708 
709 					sg->length = len;
710 				} else {
711 					is_last = true;
712 					if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
713 						len = sg->length;
714 						sg->length = ALIGN(sg->length,
715 								   sizeof(u32));
716 					}
717 				}
718 			}
719 		}
720 
721 		rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
722 					  DMA_TO_DEVICE);
723 		if (rctx->dma_ct == 0) {
724 			dev_err(hdev->dev, "dma_map_sg error\n");
725 			return -ENOMEM;
726 		}
727 
728 		err = stm32_hash_xmit_dma(hdev, sg, len, !is_last);
729 
730 		/* The last word of a non final transfer is sent manually. */
731 		if (!final) {
732 			stm32_hash_write(hdev, HASH_DIN, last_word);
733 			len += sizeof(u32);
734 		}
735 
736 		rctx->total -= len;
737 
738 		bufcnt += sg[0].length;
739 		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
740 
741 		if (err == -ENOMEM || err == -ETIMEDOUT)
742 			return err;
743 		if (is_last)
744 			break;
745 	}
746 
747 	/*
748 	 * When the second last block transfer of 4 words is performed by the DMA,
749 	 * the software must set the DMA Abort bit (DMAA) to 1 before completing the
750 	 * last transfer of 4 words or less.
751 	 */
752 	if (final) {
753 		if (hdev->dma_mode > 0) {
754 			if (stm32_hash_wait_busy(hdev))
755 				return -ETIMEDOUT;
756 			reg = stm32_hash_read(hdev, HASH_CR);
757 			reg &= ~HASH_CR_DMAE;
758 			reg |= HASH_CR_DMAA;
759 			stm32_hash_write(hdev, HASH_CR, reg);
760 
761 			if (ncp) {
762 				memset(buffer + ncp, 0, 4 - DIV_ROUND_UP(ncp, sizeof(u32)));
763 				writesl(hdev->io_base + HASH_DIN, buffer,
764 					DIV_ROUND_UP(ncp, sizeof(u32)));
765 			}
766 
767 			stm32_hash_set_nblw(hdev, ncp);
768 			reg = stm32_hash_read(hdev, HASH_STR);
769 			reg |= HASH_STR_DCAL;
770 			stm32_hash_write(hdev, HASH_STR, reg);
771 			err = -EINPROGRESS;
772 		}
773 
774 		/*
775 		 * The hash processor needs the key to be loaded a second time in order
776 		 * to process the HMAC.
777 		 */
778 		if (hdev->flags & HASH_FLAGS_HMAC) {
779 			if (stm32_hash_wait_busy(hdev))
780 				return -ETIMEDOUT;
781 			err = stm32_hash_hmac_dma_send(hdev);
782 		}
783 
784 		return err;
785 	}
786 
787 	if (err != -EINPROGRESS)
788 		return err;
789 
790 	return 0;
791 }
792 
stm32_hash_find_dev(struct stm32_hash_ctx * ctx)793 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
794 {
795 	struct stm32_hash_dev *hdev = NULL, *tmp;
796 
797 	spin_lock_bh(&stm32_hash.lock);
798 	if (!ctx->hdev) {
799 		list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
800 			hdev = tmp;
801 			break;
802 		}
803 		ctx->hdev = hdev;
804 	} else {
805 		hdev = ctx->hdev;
806 	}
807 
808 	spin_unlock_bh(&stm32_hash.lock);
809 
810 	return hdev;
811 }
812 
stm32_hash_init(struct ahash_request * req)813 static int stm32_hash_init(struct ahash_request *req)
814 {
815 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
816 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
817 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
818 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
819 	struct stm32_hash_state *state = &rctx->state;
820 	bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE;
821 
822 	rctx->hdev = hdev;
823 	state->flags = 0;
824 
825 	if (!(hdev->dma_lch &&  hdev->pdata->has_mdmat))
826 		state->flags |= HASH_FLAGS_CPU;
827 
828 	if (sha3_mode)
829 		state->flags |= HASH_FLAGS_SHA3_MODE;
830 
831 	rctx->digcnt = crypto_ahash_digestsize(tfm);
832 	switch (rctx->digcnt) {
833 	case MD5_DIGEST_SIZE:
834 		state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT;
835 		break;
836 	case SHA1_DIGEST_SIZE:
837 		if (hdev->pdata->ux500)
838 			state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT;
839 		else
840 			state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT;
841 		break;
842 	case SHA224_DIGEST_SIZE:
843 		if (sha3_mode)
844 			state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT;
845 		else
846 			state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT;
847 		break;
848 	case SHA256_DIGEST_SIZE:
849 		if (sha3_mode) {
850 			state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT;
851 		} else {
852 			if (hdev->pdata->ux500)
853 				state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT;
854 			else
855 				state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT;
856 		}
857 		break;
858 	case SHA384_DIGEST_SIZE:
859 		if (sha3_mode)
860 			state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT;
861 		else
862 			state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT;
863 		break;
864 	case SHA512_DIGEST_SIZE:
865 		if (sha3_mode)
866 			state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT;
867 		else
868 			state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT;
869 		break;
870 	default:
871 		return -EINVAL;
872 	}
873 
874 	rctx->state.bufcnt = 0;
875 	rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32);
876 	if (rctx->state.blocklen > HASH_BUFLEN) {
877 		dev_err(hdev->dev, "Error, block too large");
878 		return -EINVAL;
879 	}
880 	rctx->nents = 0;
881 	rctx->total = 0;
882 	rctx->offset = 0;
883 	rctx->data_type = HASH_DATA_8_BITS;
884 
885 	if (ctx->flags & HASH_FLAGS_HMAC)
886 		state->flags |= HASH_FLAGS_HMAC;
887 
888 	dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags);
889 
890 	return 0;
891 }
892 
stm32_hash_update_req(struct stm32_hash_dev * hdev)893 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
894 {
895 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
896 	struct stm32_hash_state *state = &rctx->state;
897 
898 	dev_dbg(hdev->dev, "update_req: total: %u, digcnt: %zd, final: 0",
899 		rctx->total, rctx->digcnt);
900 
901 	if (!(state->flags & HASH_FLAGS_CPU))
902 		return stm32_hash_dma_send(hdev);
903 
904 	return stm32_hash_update_cpu(hdev);
905 }
906 
stm32_hash_final_req(struct stm32_hash_dev * hdev)907 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
908 {
909 	struct ahash_request *req = hdev->req;
910 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
911 	struct stm32_hash_state *state = &rctx->state;
912 	int buflen = state->bufcnt;
913 
914 	if (!(state->flags & HASH_FLAGS_CPU)) {
915 		hdev->flags |= HASH_FLAGS_FINAL;
916 		return stm32_hash_dma_send(hdev);
917 	}
918 
919 	if (state->flags & HASH_FLAGS_FINUP)
920 		return stm32_hash_update_req(hdev);
921 
922 	state->bufcnt = 0;
923 
924 	return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1);
925 }
926 
stm32_hash_emptymsg_fallback(struct ahash_request * req)927 static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
928 {
929 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
930 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
931 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
932 	struct stm32_hash_dev *hdev = rctx->hdev;
933 	int ret;
934 
935 	dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
936 		ctx->keylen);
937 
938 	if (!ctx->xtfm) {
939 		dev_err(hdev->dev, "no fallback engine\n");
940 		return;
941 	}
942 
943 	if (ctx->keylen) {
944 		ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
945 		if (ret) {
946 			dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
947 			return;
948 		}
949 	}
950 
951 	ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
952 	if (ret)
953 		dev_err(hdev->dev, "shash digest error\n");
954 }
955 
stm32_hash_copy_hash(struct ahash_request * req)956 static void stm32_hash_copy_hash(struct ahash_request *req)
957 {
958 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
959 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
960 	struct stm32_hash_state *state = &rctx->state;
961 	struct stm32_hash_dev *hdev = rctx->hdev;
962 	__be32 *hash = (void *)rctx->digest;
963 	unsigned int i, hashsize;
964 
965 	if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
966 		return stm32_hash_emptymsg_fallback(req);
967 
968 	hashsize = crypto_ahash_digestsize(tfm);
969 
970 	for (i = 0; i < hashsize / sizeof(u32); i++) {
971 		if (hdev->pdata->ux500)
972 			hash[i] = cpu_to_be32(stm32_hash_read(hdev,
973 					      HASH_UX500_HREG(i)));
974 		else
975 			hash[i] = cpu_to_be32(stm32_hash_read(hdev,
976 					      HASH_HREG(i)));
977 	}
978 }
979 
stm32_hash_finish(struct ahash_request * req)980 static int stm32_hash_finish(struct ahash_request *req)
981 {
982 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
983 	u32 reg;
984 
985 	reg = stm32_hash_read(rctx->hdev, HASH_SR);
986 	reg &= ~HASH_SR_OUTPUT_READY;
987 	stm32_hash_write(rctx->hdev, HASH_SR, reg);
988 
989 	if (!req->result)
990 		return -EINVAL;
991 
992 	memcpy(req->result, rctx->digest, rctx->digcnt);
993 
994 	return 0;
995 }
996 
stm32_hash_finish_req(struct ahash_request * req,int err)997 static void stm32_hash_finish_req(struct ahash_request *req, int err)
998 {
999 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1000 	struct stm32_hash_state *state = &rctx->state;
1001 	struct stm32_hash_dev *hdev = rctx->hdev;
1002 
1003 	if (hdev->flags & HASH_FLAGS_DMA_ACTIVE)
1004 		state->flags |= HASH_FLAGS_DMA_ACTIVE;
1005 	else
1006 		state->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1007 
1008 	if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
1009 		stm32_hash_copy_hash(req);
1010 		err = stm32_hash_finish(req);
1011 	}
1012 
1013 	/* Finalized request mist be unprepared here */
1014 	stm32_hash_unprepare_request(req);
1015 
1016 	crypto_finalize_hash_request(hdev->engine, req, err);
1017 }
1018 
stm32_hash_handle_queue(struct stm32_hash_dev * hdev,struct ahash_request * req)1019 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
1020 				   struct ahash_request *req)
1021 {
1022 	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
1023 }
1024 
stm32_hash_one_request(struct crypto_engine * engine,void * areq)1025 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
1026 {
1027 	struct ahash_request *req = container_of(areq, struct ahash_request,
1028 						 base);
1029 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1030 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1031 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1032 	struct stm32_hash_state *state = &rctx->state;
1033 	int swap_reg;
1034 	int err = 0;
1035 
1036 	if (!hdev)
1037 		return -ENODEV;
1038 
1039 	dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
1040 		rctx->op, req->nbytes);
1041 
1042 	pm_runtime_get_sync(hdev->dev);
1043 
1044 	err = stm32_hash_prepare_request(req);
1045 	if (err)
1046 		return err;
1047 
1048 	hdev->req = req;
1049 	hdev->flags = 0;
1050 	swap_reg = hash_swap_reg(rctx);
1051 
1052 	if (state->flags & HASH_FLAGS_INIT) {
1053 		u32 *preg = rctx->state.hw_context;
1054 		u32 reg;
1055 		int i;
1056 
1057 		if (!hdev->pdata->ux500)
1058 			stm32_hash_write(hdev, HASH_IMR, *preg++);
1059 		stm32_hash_write(hdev, HASH_STR, *preg++);
1060 		stm32_hash_write(hdev, HASH_CR, *preg);
1061 		reg = *preg++ | HASH_CR_INIT;
1062 		stm32_hash_write(hdev, HASH_CR, reg);
1063 
1064 		for (i = 0; i < swap_reg; i++)
1065 			stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1066 
1067 		hdev->flags |= HASH_FLAGS_INIT;
1068 
1069 		if (state->flags & HASH_FLAGS_HMAC)
1070 			hdev->flags |= HASH_FLAGS_HMAC |
1071 				       HASH_FLAGS_HMAC_KEY;
1072 
1073 		if (state->flags & HASH_FLAGS_CPU)
1074 			hdev->flags |= HASH_FLAGS_CPU;
1075 
1076 		if (state->flags & HASH_FLAGS_DMA_ACTIVE)
1077 			hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
1078 	}
1079 
1080 	if (rctx->op == HASH_OP_UPDATE)
1081 		err = stm32_hash_update_req(hdev);
1082 	else if (rctx->op == HASH_OP_FINAL)
1083 		err = stm32_hash_final_req(hdev);
1084 
1085 	/* If we have an IRQ, wait for that, else poll for completion */
1086 	if (err == -EINPROGRESS && hdev->polled) {
1087 		if (stm32_hash_wait_busy(hdev))
1088 			err = -ETIMEDOUT;
1089 		else {
1090 			hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1091 			err = 0;
1092 		}
1093 	}
1094 
1095 	if (err != -EINPROGRESS)
1096 	/* done task will not finish it, so do it here */
1097 		stm32_hash_finish_req(req, err);
1098 
1099 	return 0;
1100 }
1101 
stm32_hash_copy_sgs(struct stm32_hash_request_ctx * rctx,struct scatterlist * sg,int bs,unsigned int new_len)1102 static int stm32_hash_copy_sgs(struct stm32_hash_request_ctx *rctx,
1103 			       struct scatterlist *sg, int bs,
1104 			       unsigned int new_len)
1105 {
1106 	struct stm32_hash_state *state = &rctx->state;
1107 	int pages;
1108 	void *buf;
1109 
1110 	pages = get_order(new_len);
1111 
1112 	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
1113 	if (!buf) {
1114 		pr_err("Couldn't allocate pages for unaligned cases.\n");
1115 		return -ENOMEM;
1116 	}
1117 
1118 	if (state->bufcnt)
1119 		memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt);
1120 
1121 	scatterwalk_map_and_copy(buf + state->bufcnt, sg, rctx->offset,
1122 				 min(new_len, rctx->total) - state->bufcnt, 0);
1123 	sg_init_table(rctx->sgl, 1);
1124 	sg_set_buf(rctx->sgl, buf, new_len);
1125 	rctx->sg = rctx->sgl;
1126 	state->flags |= HASH_FLAGS_SGS_COPIED;
1127 	rctx->nents = 1;
1128 	rctx->offset += new_len - state->bufcnt;
1129 	state->bufcnt = 0;
1130 	rctx->total = new_len;
1131 
1132 	return 0;
1133 }
1134 
stm32_hash_align_sgs(struct scatterlist * sg,int nbytes,int bs,bool init,bool final,struct stm32_hash_request_ctx * rctx)1135 static int stm32_hash_align_sgs(struct scatterlist *sg,
1136 				int nbytes, int bs, bool init, bool final,
1137 				struct stm32_hash_request_ctx *rctx)
1138 {
1139 	struct stm32_hash_state *state = &rctx->state;
1140 	struct stm32_hash_dev *hdev = rctx->hdev;
1141 	struct scatterlist *sg_tmp = sg;
1142 	int offset = rctx->offset;
1143 	int new_len;
1144 	int n = 0;
1145 	int bufcnt = state->bufcnt;
1146 	bool secure_ctx = hdev->pdata->context_secured;
1147 	bool aligned = true;
1148 
1149 	if (!sg || !sg->length || !nbytes) {
1150 		if (bufcnt) {
1151 			bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
1152 			sg_init_table(rctx->sgl, 1);
1153 			sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, bufcnt);
1154 			rctx->sg = rctx->sgl;
1155 			rctx->nents = 1;
1156 		}
1157 
1158 		return 0;
1159 	}
1160 
1161 	new_len = nbytes;
1162 
1163 	if (offset)
1164 		aligned = false;
1165 
1166 	if (final) {
1167 		new_len = DIV_ROUND_UP(new_len, bs) * bs;
1168 	} else {
1169 		new_len = (new_len - 1) / bs * bs; // return n block - 1 block
1170 
1171 		/*
1172 		 * Context save in some version of HASH IP can only be done when the
1173 		 * FIFO is ready to get a new block. This implies to send n block plus a
1174 		 * 32 bit word in the first DMA send.
1175 		 */
1176 		if (init && secure_ctx) {
1177 			new_len += sizeof(u32);
1178 			if (unlikely(new_len > nbytes))
1179 				new_len -= bs;
1180 		}
1181 	}
1182 
1183 	if (!new_len)
1184 		return 0;
1185 
1186 	if (nbytes != new_len)
1187 		aligned = false;
1188 
1189 	while (nbytes > 0 && sg_tmp) {
1190 		n++;
1191 
1192 		if (bufcnt) {
1193 			if (!IS_ALIGNED(bufcnt, bs)) {
1194 				aligned = false;
1195 				break;
1196 			}
1197 			nbytes -= bufcnt;
1198 			bufcnt = 0;
1199 			if (!nbytes)
1200 				aligned = false;
1201 
1202 			continue;
1203 		}
1204 
1205 		if (offset < sg_tmp->length) {
1206 			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
1207 				aligned = false;
1208 				break;
1209 			}
1210 
1211 			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
1212 				aligned = false;
1213 				break;
1214 			}
1215 		}
1216 
1217 		if (offset) {
1218 			offset -= sg_tmp->length;
1219 			if (offset < 0) {
1220 				nbytes += offset;
1221 				offset = 0;
1222 			}
1223 		} else {
1224 			nbytes -= sg_tmp->length;
1225 		}
1226 
1227 		sg_tmp = sg_next(sg_tmp);
1228 
1229 		if (nbytes < 0) {
1230 			aligned = false;
1231 			break;
1232 		}
1233 	}
1234 
1235 	if (!aligned)
1236 		return stm32_hash_copy_sgs(rctx, sg, bs, new_len);
1237 
1238 	rctx->total = new_len;
1239 	rctx->offset += new_len;
1240 	rctx->nents = n;
1241 	if (state->bufcnt) {
1242 		sg_init_table(rctx->sgl, 2);
1243 		sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, state->bufcnt);
1244 		sg_chain(rctx->sgl, 2, sg);
1245 		rctx->sg = rctx->sgl;
1246 	} else {
1247 		rctx->sg = sg;
1248 	}
1249 
1250 	return 0;
1251 }
1252 
stm32_hash_prepare_request(struct ahash_request * req)1253 static int stm32_hash_prepare_request(struct ahash_request *req)
1254 {
1255 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1256 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1257 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1258 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1259 	struct stm32_hash_state *state = &rctx->state;
1260 	unsigned int nbytes;
1261 	int ret, hash_later, bs;
1262 	bool update = rctx->op & HASH_OP_UPDATE;
1263 	bool init = !(state->flags & HASH_FLAGS_INIT);
1264 	bool finup = state->flags & HASH_FLAGS_FINUP;
1265 	bool final = state->flags & HASH_FLAGS_FINAL;
1266 
1267 	if (!hdev->dma_lch || state->flags & HASH_FLAGS_CPU)
1268 		return 0;
1269 
1270 	bs = crypto_ahash_blocksize(tfm);
1271 
1272 	nbytes = state->bufcnt;
1273 
1274 	/*
1275 	 * In case of update request nbytes must correspond to the content of the
1276 	 * buffer + the offset minus the content of the request already in the
1277 	 * buffer.
1278 	 */
1279 	if (update || finup)
1280 		nbytes += req->nbytes - rctx->offset;
1281 
1282 	dev_dbg(hdev->dev,
1283 		"%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
1284 		__func__, nbytes, bs, rctx->total, rctx->offset, state->bufcnt);
1285 
1286 	if (!nbytes)
1287 		return 0;
1288 
1289 	rctx->total = nbytes;
1290 
1291 	if (update && req->nbytes && (!IS_ALIGNED(state->bufcnt, bs))) {
1292 		int len = bs - state->bufcnt % bs;
1293 
1294 		if (len > req->nbytes)
1295 			len = req->nbytes;
1296 		scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
1297 					 0, len, 0);
1298 		state->bufcnt += len;
1299 		rctx->offset = len;
1300 	}
1301 
1302 	/* copy buffer in a temporary one that is used for sg alignment */
1303 	if (state->bufcnt)
1304 		memcpy(hdev->xmit_buf, state->buffer, state->bufcnt);
1305 
1306 	ret = stm32_hash_align_sgs(req->src, nbytes, bs, init, final, rctx);
1307 	if (ret)
1308 		return ret;
1309 
1310 	hash_later = nbytes - rctx->total;
1311 	if (hash_later < 0)
1312 		hash_later = 0;
1313 
1314 	if (hash_later && hash_later <= state->blocklen) {
1315 		scatterwalk_map_and_copy(state->buffer,
1316 					 req->src,
1317 					 req->nbytes - hash_later,
1318 					 hash_later, 0);
1319 
1320 		state->bufcnt = hash_later;
1321 	} else {
1322 		state->bufcnt = 0;
1323 	}
1324 
1325 	if (hash_later > state->blocklen) {
1326 		/* FIXME: add support of this case */
1327 		pr_err("Buffer contains more than one block.\n");
1328 		return -ENOMEM;
1329 	}
1330 
1331 	rctx->total = min(nbytes, rctx->total);
1332 
1333 	return 0;
1334 }
1335 
stm32_hash_unprepare_request(struct ahash_request * req)1336 static void stm32_hash_unprepare_request(struct ahash_request *req)
1337 {
1338 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1339 	struct stm32_hash_state *state = &rctx->state;
1340 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1341 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1342 	u32 *preg = state->hw_context;
1343 	int swap_reg, i;
1344 
1345 	if (hdev->dma_lch)
1346 		dmaengine_terminate_sync(hdev->dma_lch);
1347 
1348 	if (state->flags & HASH_FLAGS_SGS_COPIED)
1349 		free_pages((unsigned long)sg_virt(rctx->sg), get_order(rctx->sg->length));
1350 
1351 	rctx->sg = NULL;
1352 	rctx->offset = 0;
1353 
1354 	state->flags &= ~(HASH_FLAGS_SGS_COPIED);
1355 
1356 	if (!(hdev->flags & HASH_FLAGS_INIT))
1357 		goto pm_runtime;
1358 
1359 	state->flags |= HASH_FLAGS_INIT;
1360 
1361 	if (stm32_hash_wait_busy(hdev)) {
1362 		dev_warn(hdev->dev, "Wait busy failed.");
1363 		return;
1364 	}
1365 
1366 	swap_reg = hash_swap_reg(rctx);
1367 
1368 	if (!hdev->pdata->ux500)
1369 		*preg++ = stm32_hash_read(hdev, HASH_IMR);
1370 	*preg++ = stm32_hash_read(hdev, HASH_STR);
1371 	*preg++ = stm32_hash_read(hdev, HASH_CR);
1372 	for (i = 0; i < swap_reg; i++)
1373 		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
1374 
1375 pm_runtime:
1376 	pm_runtime_mark_last_busy(hdev->dev);
1377 	pm_runtime_put_autosuspend(hdev->dev);
1378 }
1379 
stm32_hash_enqueue(struct ahash_request * req,unsigned int op)1380 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
1381 {
1382 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1383 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1384 	struct stm32_hash_dev *hdev = ctx->hdev;
1385 
1386 	rctx->op = op;
1387 
1388 	return stm32_hash_handle_queue(hdev, req);
1389 }
1390 
stm32_hash_update(struct ahash_request * req)1391 static int stm32_hash_update(struct ahash_request *req)
1392 {
1393 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1394 	struct stm32_hash_state *state = &rctx->state;
1395 
1396 	if (!req->nbytes)
1397 		return 0;
1398 
1399 
1400 	if (state->flags & HASH_FLAGS_CPU) {
1401 		rctx->total = req->nbytes;
1402 		rctx->sg = req->src;
1403 		rctx->offset = 0;
1404 
1405 		if ((state->bufcnt + rctx->total < state->blocklen)) {
1406 			stm32_hash_append_sg(rctx);
1407 			return 0;
1408 		}
1409 	} else { /* DMA mode */
1410 		if (state->bufcnt + req->nbytes <= state->blocklen) {
1411 			scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
1412 						 0, req->nbytes, 0);
1413 			state->bufcnt += req->nbytes;
1414 			return 0;
1415 		}
1416 	}
1417 
1418 	return stm32_hash_enqueue(req, HASH_OP_UPDATE);
1419 }
1420 
stm32_hash_final(struct ahash_request * req)1421 static int stm32_hash_final(struct ahash_request *req)
1422 {
1423 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1424 	struct stm32_hash_state *state = &rctx->state;
1425 
1426 	state->flags |= HASH_FLAGS_FINAL;
1427 
1428 	return stm32_hash_enqueue(req, HASH_OP_FINAL);
1429 }
1430 
stm32_hash_finup(struct ahash_request * req)1431 static int stm32_hash_finup(struct ahash_request *req)
1432 {
1433 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1434 	struct stm32_hash_state *state = &rctx->state;
1435 
1436 	if (!req->nbytes)
1437 		goto out;
1438 
1439 	state->flags |= HASH_FLAGS_FINUP;
1440 
1441 	if ((state->flags & HASH_FLAGS_CPU)) {
1442 		rctx->total = req->nbytes;
1443 		rctx->sg = req->src;
1444 		rctx->offset = 0;
1445 	}
1446 
1447 out:
1448 	return stm32_hash_final(req);
1449 }
1450 
stm32_hash_digest(struct ahash_request * req)1451 static int stm32_hash_digest(struct ahash_request *req)
1452 {
1453 	return stm32_hash_init(req) ?: stm32_hash_finup(req);
1454 }
1455 
stm32_hash_export(struct ahash_request * req,void * out)1456 static int stm32_hash_export(struct ahash_request *req, void *out)
1457 {
1458 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1459 
1460 	memcpy(out, &rctx->state, sizeof(rctx->state));
1461 
1462 	return 0;
1463 }
1464 
stm32_hash_import(struct ahash_request * req,const void * in)1465 static int stm32_hash_import(struct ahash_request *req, const void *in)
1466 {
1467 	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1468 
1469 	stm32_hash_init(req);
1470 	memcpy(&rctx->state, in, sizeof(rctx->state));
1471 
1472 	return 0;
1473 }
1474 
stm32_hash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1475 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1476 			     const u8 *key, unsigned int keylen)
1477 {
1478 	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1479 
1480 	if (keylen <= HASH_MAX_KEY_SIZE) {
1481 		memcpy(ctx->key, key, keylen);
1482 		ctx->keylen = keylen;
1483 	} else {
1484 		return -ENOMEM;
1485 	}
1486 
1487 	return 0;
1488 }
1489 
stm32_hash_init_fallback(struct crypto_tfm * tfm)1490 static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
1491 {
1492 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1493 	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1494 	const char *name = crypto_tfm_alg_name(tfm);
1495 	struct crypto_shash *xtfm;
1496 
1497 	/* The fallback is only needed on Ux500 */
1498 	if (!hdev->pdata->ux500)
1499 		return 0;
1500 
1501 	xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
1502 	if (IS_ERR(xtfm)) {
1503 		dev_err(hdev->dev, "failed to allocate %s fallback\n",
1504 			name);
1505 		return PTR_ERR(xtfm);
1506 	}
1507 	dev_info(hdev->dev, "allocated %s fallback\n", name);
1508 	ctx->xtfm = xtfm;
1509 
1510 	return 0;
1511 }
1512 
stm32_hash_cra_init_algs(struct crypto_tfm * tfm,u32 algs_flags)1513 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, u32 algs_flags)
1514 {
1515 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1516 
1517 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1518 				 sizeof(struct stm32_hash_request_ctx));
1519 
1520 	ctx->keylen = 0;
1521 
1522 	if (algs_flags)
1523 		ctx->flags |= algs_flags;
1524 
1525 	return stm32_hash_init_fallback(tfm);
1526 }
1527 
stm32_hash_cra_init(struct crypto_tfm * tfm)1528 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1529 {
1530 	return stm32_hash_cra_init_algs(tfm, 0);
1531 }
1532 
stm32_hash_cra_hmac_init(struct crypto_tfm * tfm)1533 static int stm32_hash_cra_hmac_init(struct crypto_tfm *tfm)
1534 {
1535 	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_HMAC);
1536 }
1537 
stm32_hash_cra_sha3_init(struct crypto_tfm * tfm)1538 static int stm32_hash_cra_sha3_init(struct crypto_tfm *tfm)
1539 {
1540 	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE);
1541 }
1542 
stm32_hash_cra_sha3_hmac_init(struct crypto_tfm * tfm)1543 static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm)
1544 {
1545 	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE |
1546 					HASH_FLAGS_HMAC);
1547 }
1548 
stm32_hash_cra_exit(struct crypto_tfm * tfm)1549 static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
1550 {
1551 	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1552 
1553 	if (ctx->xtfm)
1554 		crypto_free_shash(ctx->xtfm);
1555 }
1556 
stm32_hash_irq_thread(int irq,void * dev_id)1557 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1558 {
1559 	struct stm32_hash_dev *hdev = dev_id;
1560 
1561 	if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1562 		hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1563 		goto finish;
1564 	}
1565 
1566 	return IRQ_HANDLED;
1567 
1568 finish:
1569 	/* Finish current request */
1570 	stm32_hash_finish_req(hdev->req, 0);
1571 
1572 	return IRQ_HANDLED;
1573 }
1574 
stm32_hash_irq_handler(int irq,void * dev_id)1575 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1576 {
1577 	struct stm32_hash_dev *hdev = dev_id;
1578 	u32 reg;
1579 
1580 	reg = stm32_hash_read(hdev, HASH_SR);
1581 	if (reg & HASH_SR_OUTPUT_READY) {
1582 		hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1583 		/* Disable IT*/
1584 		stm32_hash_write(hdev, HASH_IMR, 0);
1585 		return IRQ_WAKE_THREAD;
1586 	}
1587 
1588 	return IRQ_NONE;
1589 }
1590 
1591 static struct ahash_engine_alg algs_md5[] = {
1592 	{
1593 		.base.init = stm32_hash_init,
1594 		.base.update = stm32_hash_update,
1595 		.base.final = stm32_hash_final,
1596 		.base.finup = stm32_hash_finup,
1597 		.base.digest = stm32_hash_digest,
1598 		.base.export = stm32_hash_export,
1599 		.base.import = stm32_hash_import,
1600 		.base.halg = {
1601 			.digestsize = MD5_DIGEST_SIZE,
1602 			.statesize = sizeof(struct stm32_hash_state),
1603 			.base = {
1604 				.cra_name = "md5",
1605 				.cra_driver_name = "stm32-md5",
1606 				.cra_priority = 200,
1607 				.cra_flags = CRYPTO_ALG_ASYNC |
1608 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1609 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1610 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1611 				.cra_init = stm32_hash_cra_init,
1612 				.cra_exit = stm32_hash_cra_exit,
1613 				.cra_module = THIS_MODULE,
1614 			}
1615 		},
1616 		.op = {
1617 			.do_one_request = stm32_hash_one_request,
1618 		},
1619 	},
1620 	{
1621 		.base.init = stm32_hash_init,
1622 		.base.update = stm32_hash_update,
1623 		.base.final = stm32_hash_final,
1624 		.base.finup = stm32_hash_finup,
1625 		.base.digest = stm32_hash_digest,
1626 		.base.export = stm32_hash_export,
1627 		.base.import = stm32_hash_import,
1628 		.base.setkey = stm32_hash_setkey,
1629 		.base.halg = {
1630 			.digestsize = MD5_DIGEST_SIZE,
1631 			.statesize = sizeof(struct stm32_hash_state),
1632 			.base = {
1633 				.cra_name = "hmac(md5)",
1634 				.cra_driver_name = "stm32-hmac-md5",
1635 				.cra_priority = 200,
1636 				.cra_flags = CRYPTO_ALG_ASYNC |
1637 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1638 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1639 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1640 				.cra_init = stm32_hash_cra_hmac_init,
1641 				.cra_exit = stm32_hash_cra_exit,
1642 				.cra_module = THIS_MODULE,
1643 			}
1644 		},
1645 		.op = {
1646 			.do_one_request = stm32_hash_one_request,
1647 		},
1648 	}
1649 };
1650 
1651 static struct ahash_engine_alg algs_sha1[] = {
1652 	{
1653 		.base.init = stm32_hash_init,
1654 		.base.update = stm32_hash_update,
1655 		.base.final = stm32_hash_final,
1656 		.base.finup = stm32_hash_finup,
1657 		.base.digest = stm32_hash_digest,
1658 		.base.export = stm32_hash_export,
1659 		.base.import = stm32_hash_import,
1660 		.base.halg = {
1661 			.digestsize = SHA1_DIGEST_SIZE,
1662 			.statesize = sizeof(struct stm32_hash_state),
1663 			.base = {
1664 				.cra_name = "sha1",
1665 				.cra_driver_name = "stm32-sha1",
1666 				.cra_priority = 200,
1667 				.cra_flags = CRYPTO_ALG_ASYNC |
1668 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1669 				.cra_blocksize = SHA1_BLOCK_SIZE,
1670 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1671 				.cra_init = stm32_hash_cra_init,
1672 				.cra_exit = stm32_hash_cra_exit,
1673 				.cra_module = THIS_MODULE,
1674 			}
1675 		},
1676 		.op = {
1677 			.do_one_request = stm32_hash_one_request,
1678 		},
1679 	},
1680 	{
1681 		.base.init = stm32_hash_init,
1682 		.base.update = stm32_hash_update,
1683 		.base.final = stm32_hash_final,
1684 		.base.finup = stm32_hash_finup,
1685 		.base.digest = stm32_hash_digest,
1686 		.base.export = stm32_hash_export,
1687 		.base.import = stm32_hash_import,
1688 		.base.setkey = stm32_hash_setkey,
1689 		.base.halg = {
1690 			.digestsize = SHA1_DIGEST_SIZE,
1691 			.statesize = sizeof(struct stm32_hash_state),
1692 			.base = {
1693 				.cra_name = "hmac(sha1)",
1694 				.cra_driver_name = "stm32-hmac-sha1",
1695 				.cra_priority = 200,
1696 				.cra_flags = CRYPTO_ALG_ASYNC |
1697 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1698 				.cra_blocksize = SHA1_BLOCK_SIZE,
1699 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1700 				.cra_init = stm32_hash_cra_hmac_init,
1701 				.cra_exit = stm32_hash_cra_exit,
1702 				.cra_module = THIS_MODULE,
1703 			}
1704 		},
1705 		.op = {
1706 			.do_one_request = stm32_hash_one_request,
1707 		},
1708 	},
1709 };
1710 
1711 static struct ahash_engine_alg algs_sha224[] = {
1712 	{
1713 		.base.init = stm32_hash_init,
1714 		.base.update = stm32_hash_update,
1715 		.base.final = stm32_hash_final,
1716 		.base.finup = stm32_hash_finup,
1717 		.base.digest = stm32_hash_digest,
1718 		.base.export = stm32_hash_export,
1719 		.base.import = stm32_hash_import,
1720 		.base.halg = {
1721 			.digestsize = SHA224_DIGEST_SIZE,
1722 			.statesize = sizeof(struct stm32_hash_state),
1723 			.base = {
1724 				.cra_name = "sha224",
1725 				.cra_driver_name = "stm32-sha224",
1726 				.cra_priority = 200,
1727 				.cra_flags = CRYPTO_ALG_ASYNC |
1728 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1729 				.cra_blocksize = SHA224_BLOCK_SIZE,
1730 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1731 				.cra_init = stm32_hash_cra_init,
1732 				.cra_exit = stm32_hash_cra_exit,
1733 				.cra_module = THIS_MODULE,
1734 			}
1735 		},
1736 		.op = {
1737 			.do_one_request = stm32_hash_one_request,
1738 		},
1739 	},
1740 	{
1741 		.base.init = stm32_hash_init,
1742 		.base.update = stm32_hash_update,
1743 		.base.final = stm32_hash_final,
1744 		.base.finup = stm32_hash_finup,
1745 		.base.digest = stm32_hash_digest,
1746 		.base.setkey = stm32_hash_setkey,
1747 		.base.export = stm32_hash_export,
1748 		.base.import = stm32_hash_import,
1749 		.base.halg = {
1750 			.digestsize = SHA224_DIGEST_SIZE,
1751 			.statesize = sizeof(struct stm32_hash_state),
1752 			.base = {
1753 				.cra_name = "hmac(sha224)",
1754 				.cra_driver_name = "stm32-hmac-sha224",
1755 				.cra_priority = 200,
1756 				.cra_flags = CRYPTO_ALG_ASYNC |
1757 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1758 				.cra_blocksize = SHA224_BLOCK_SIZE,
1759 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1760 				.cra_init = stm32_hash_cra_hmac_init,
1761 				.cra_exit = stm32_hash_cra_exit,
1762 				.cra_module = THIS_MODULE,
1763 			}
1764 		},
1765 		.op = {
1766 			.do_one_request = stm32_hash_one_request,
1767 		},
1768 	},
1769 };
1770 
1771 static struct ahash_engine_alg algs_sha256[] = {
1772 	{
1773 		.base.init = stm32_hash_init,
1774 		.base.update = stm32_hash_update,
1775 		.base.final = stm32_hash_final,
1776 		.base.finup = stm32_hash_finup,
1777 		.base.digest = stm32_hash_digest,
1778 		.base.export = stm32_hash_export,
1779 		.base.import = stm32_hash_import,
1780 		.base.halg = {
1781 			.digestsize = SHA256_DIGEST_SIZE,
1782 			.statesize = sizeof(struct stm32_hash_state),
1783 			.base = {
1784 				.cra_name = "sha256",
1785 				.cra_driver_name = "stm32-sha256",
1786 				.cra_priority = 200,
1787 				.cra_flags = CRYPTO_ALG_ASYNC |
1788 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1789 				.cra_blocksize = SHA256_BLOCK_SIZE,
1790 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1791 				.cra_init = stm32_hash_cra_init,
1792 				.cra_exit = stm32_hash_cra_exit,
1793 				.cra_module = THIS_MODULE,
1794 			}
1795 		},
1796 		.op = {
1797 			.do_one_request = stm32_hash_one_request,
1798 		},
1799 	},
1800 	{
1801 		.base.init = stm32_hash_init,
1802 		.base.update = stm32_hash_update,
1803 		.base.final = stm32_hash_final,
1804 		.base.finup = stm32_hash_finup,
1805 		.base.digest = stm32_hash_digest,
1806 		.base.export = stm32_hash_export,
1807 		.base.import = stm32_hash_import,
1808 		.base.setkey = stm32_hash_setkey,
1809 		.base.halg = {
1810 			.digestsize = SHA256_DIGEST_SIZE,
1811 			.statesize = sizeof(struct stm32_hash_state),
1812 			.base = {
1813 				.cra_name = "hmac(sha256)",
1814 				.cra_driver_name = "stm32-hmac-sha256",
1815 				.cra_priority = 200,
1816 				.cra_flags = CRYPTO_ALG_ASYNC |
1817 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1818 				.cra_blocksize = SHA256_BLOCK_SIZE,
1819 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1820 				.cra_init = stm32_hash_cra_hmac_init,
1821 				.cra_exit = stm32_hash_cra_exit,
1822 				.cra_module = THIS_MODULE,
1823 			}
1824 		},
1825 		.op = {
1826 			.do_one_request = stm32_hash_one_request,
1827 		},
1828 	},
1829 };
1830 
1831 static struct ahash_engine_alg algs_sha384_sha512[] = {
1832 	{
1833 		.base.init = stm32_hash_init,
1834 		.base.update = stm32_hash_update,
1835 		.base.final = stm32_hash_final,
1836 		.base.finup = stm32_hash_finup,
1837 		.base.digest = stm32_hash_digest,
1838 		.base.export = stm32_hash_export,
1839 		.base.import = stm32_hash_import,
1840 		.base.halg = {
1841 			.digestsize = SHA384_DIGEST_SIZE,
1842 			.statesize = sizeof(struct stm32_hash_state),
1843 			.base = {
1844 				.cra_name = "sha384",
1845 				.cra_driver_name = "stm32-sha384",
1846 				.cra_priority = 200,
1847 				.cra_flags = CRYPTO_ALG_ASYNC |
1848 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1849 				.cra_blocksize = SHA384_BLOCK_SIZE,
1850 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1851 				.cra_init = stm32_hash_cra_init,
1852 				.cra_exit = stm32_hash_cra_exit,
1853 				.cra_module = THIS_MODULE,
1854 			}
1855 		},
1856 		.op = {
1857 			.do_one_request = stm32_hash_one_request,
1858 		},
1859 	},
1860 	{
1861 		.base.init = stm32_hash_init,
1862 		.base.update = stm32_hash_update,
1863 		.base.final = stm32_hash_final,
1864 		.base.finup = stm32_hash_finup,
1865 		.base.digest = stm32_hash_digest,
1866 		.base.setkey = stm32_hash_setkey,
1867 		.base.export = stm32_hash_export,
1868 		.base.import = stm32_hash_import,
1869 		.base.halg = {
1870 			.digestsize = SHA384_DIGEST_SIZE,
1871 			.statesize = sizeof(struct stm32_hash_state),
1872 			.base = {
1873 				.cra_name = "hmac(sha384)",
1874 				.cra_driver_name = "stm32-hmac-sha384",
1875 				.cra_priority = 200,
1876 				.cra_flags = CRYPTO_ALG_ASYNC |
1877 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1878 				.cra_blocksize = SHA384_BLOCK_SIZE,
1879 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1880 				.cra_init = stm32_hash_cra_hmac_init,
1881 				.cra_exit = stm32_hash_cra_exit,
1882 				.cra_module = THIS_MODULE,
1883 			}
1884 		},
1885 		.op = {
1886 			.do_one_request = stm32_hash_one_request,
1887 		},
1888 	},
1889 	{
1890 		.base.init = stm32_hash_init,
1891 		.base.update = stm32_hash_update,
1892 		.base.final = stm32_hash_final,
1893 		.base.finup = stm32_hash_finup,
1894 		.base.digest = stm32_hash_digest,
1895 		.base.export = stm32_hash_export,
1896 		.base.import = stm32_hash_import,
1897 		.base.halg = {
1898 			.digestsize = SHA512_DIGEST_SIZE,
1899 			.statesize = sizeof(struct stm32_hash_state),
1900 			.base = {
1901 				.cra_name = "sha512",
1902 				.cra_driver_name = "stm32-sha512",
1903 				.cra_priority = 200,
1904 				.cra_flags = CRYPTO_ALG_ASYNC |
1905 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1906 				.cra_blocksize = SHA512_BLOCK_SIZE,
1907 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1908 				.cra_init = stm32_hash_cra_init,
1909 				.cra_exit = stm32_hash_cra_exit,
1910 				.cra_module = THIS_MODULE,
1911 			}
1912 		},
1913 		.op = {
1914 			.do_one_request = stm32_hash_one_request,
1915 		},
1916 	},
1917 	{
1918 		.base.init = stm32_hash_init,
1919 		.base.update = stm32_hash_update,
1920 		.base.final = stm32_hash_final,
1921 		.base.finup = stm32_hash_finup,
1922 		.base.digest = stm32_hash_digest,
1923 		.base.export = stm32_hash_export,
1924 		.base.import = stm32_hash_import,
1925 		.base.setkey = stm32_hash_setkey,
1926 		.base.halg = {
1927 			.digestsize = SHA512_DIGEST_SIZE,
1928 			.statesize = sizeof(struct stm32_hash_state),
1929 			.base = {
1930 				.cra_name = "hmac(sha512)",
1931 				.cra_driver_name = "stm32-hmac-sha512",
1932 				.cra_priority = 200,
1933 				.cra_flags = CRYPTO_ALG_ASYNC |
1934 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1935 				.cra_blocksize = SHA512_BLOCK_SIZE,
1936 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1937 				.cra_init = stm32_hash_cra_hmac_init,
1938 				.cra_exit = stm32_hash_cra_exit,
1939 				.cra_module = THIS_MODULE,
1940 			}
1941 		},
1942 		.op = {
1943 			.do_one_request = stm32_hash_one_request,
1944 		},
1945 	},
1946 };
1947 
1948 static struct ahash_engine_alg algs_sha3[] = {
1949 	{
1950 		.base.init = stm32_hash_init,
1951 		.base.update = stm32_hash_update,
1952 		.base.final = stm32_hash_final,
1953 		.base.finup = stm32_hash_finup,
1954 		.base.digest = stm32_hash_digest,
1955 		.base.export = stm32_hash_export,
1956 		.base.import = stm32_hash_import,
1957 		.base.halg = {
1958 			.digestsize = SHA3_224_DIGEST_SIZE,
1959 			.statesize = sizeof(struct stm32_hash_state),
1960 			.base = {
1961 				.cra_name = "sha3-224",
1962 				.cra_driver_name = "stm32-sha3-224",
1963 				.cra_priority = 200,
1964 				.cra_flags = CRYPTO_ALG_ASYNC |
1965 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1966 				.cra_blocksize = SHA3_224_BLOCK_SIZE,
1967 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1968 				.cra_init = stm32_hash_cra_sha3_init,
1969 				.cra_exit = stm32_hash_cra_exit,
1970 				.cra_module = THIS_MODULE,
1971 			}
1972 		},
1973 		.op = {
1974 			.do_one_request = stm32_hash_one_request,
1975 		},
1976 	},
1977 	{
1978 		.base.init = stm32_hash_init,
1979 		.base.update = stm32_hash_update,
1980 		.base.final = stm32_hash_final,
1981 		.base.finup = stm32_hash_finup,
1982 		.base.digest = stm32_hash_digest,
1983 		.base.export = stm32_hash_export,
1984 		.base.import = stm32_hash_import,
1985 		.base.setkey = stm32_hash_setkey,
1986 		.base.halg = {
1987 			.digestsize = SHA3_224_DIGEST_SIZE,
1988 			.statesize = sizeof(struct stm32_hash_state),
1989 			.base = {
1990 				.cra_name = "hmac(sha3-224)",
1991 				.cra_driver_name = "stm32-hmac-sha3-224",
1992 				.cra_priority = 200,
1993 				.cra_flags = CRYPTO_ALG_ASYNC |
1994 					CRYPTO_ALG_KERN_DRIVER_ONLY,
1995 				.cra_blocksize = SHA3_224_BLOCK_SIZE,
1996 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1997 				.cra_init = stm32_hash_cra_sha3_hmac_init,
1998 				.cra_exit = stm32_hash_cra_exit,
1999 				.cra_module = THIS_MODULE,
2000 			}
2001 		},
2002 		.op = {
2003 			.do_one_request = stm32_hash_one_request,
2004 		},
2005 	},
2006 	{
2007 		.base.init = stm32_hash_init,
2008 		.base.update = stm32_hash_update,
2009 		.base.final = stm32_hash_final,
2010 		.base.finup = stm32_hash_finup,
2011 		.base.digest = stm32_hash_digest,
2012 		.base.export = stm32_hash_export,
2013 		.base.import = stm32_hash_import,
2014 		.base.halg = {
2015 			.digestsize = SHA3_256_DIGEST_SIZE,
2016 			.statesize = sizeof(struct stm32_hash_state),
2017 			.base = {
2018 				.cra_name = "sha3-256",
2019 				.cra_driver_name = "stm32-sha3-256",
2020 				.cra_priority = 200,
2021 				.cra_flags = CRYPTO_ALG_ASYNC |
2022 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2023 				.cra_blocksize = SHA3_256_BLOCK_SIZE,
2024 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2025 				.cra_init = stm32_hash_cra_sha3_init,
2026 				.cra_exit = stm32_hash_cra_exit,
2027 				.cra_module = THIS_MODULE,
2028 			}
2029 		},
2030 		.op = {
2031 			.do_one_request = stm32_hash_one_request,
2032 		},
2033 	},
2034 	{
2035 		.base.init = stm32_hash_init,
2036 		.base.update = stm32_hash_update,
2037 		.base.final = stm32_hash_final,
2038 		.base.finup = stm32_hash_finup,
2039 		.base.digest = stm32_hash_digest,
2040 		.base.export = stm32_hash_export,
2041 		.base.import = stm32_hash_import,
2042 		.base.setkey = stm32_hash_setkey,
2043 		.base.halg = {
2044 			.digestsize = SHA3_256_DIGEST_SIZE,
2045 			.statesize = sizeof(struct stm32_hash_state),
2046 			.base = {
2047 				.cra_name = "hmac(sha3-256)",
2048 				.cra_driver_name = "stm32-hmac-sha3-256",
2049 				.cra_priority = 200,
2050 				.cra_flags = CRYPTO_ALG_ASYNC |
2051 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2052 				.cra_blocksize = SHA3_256_BLOCK_SIZE,
2053 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2054 				.cra_init = stm32_hash_cra_sha3_hmac_init,
2055 				.cra_exit = stm32_hash_cra_exit,
2056 				.cra_module = THIS_MODULE,
2057 			}
2058 		},
2059 		.op = {
2060 			.do_one_request = stm32_hash_one_request,
2061 		},
2062 	},
2063 	{
2064 		.base.init = stm32_hash_init,
2065 		.base.update = stm32_hash_update,
2066 		.base.final = stm32_hash_final,
2067 		.base.finup = stm32_hash_finup,
2068 		.base.digest = stm32_hash_digest,
2069 		.base.export = stm32_hash_export,
2070 		.base.import = stm32_hash_import,
2071 		.base.halg = {
2072 			.digestsize = SHA3_384_DIGEST_SIZE,
2073 			.statesize = sizeof(struct stm32_hash_state),
2074 			.base = {
2075 				.cra_name = "sha3-384",
2076 				.cra_driver_name = "stm32-sha3-384",
2077 				.cra_priority = 200,
2078 				.cra_flags = CRYPTO_ALG_ASYNC |
2079 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2080 				.cra_blocksize = SHA3_384_BLOCK_SIZE,
2081 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2082 				.cra_init = stm32_hash_cra_sha3_init,
2083 				.cra_exit = stm32_hash_cra_exit,
2084 				.cra_module = THIS_MODULE,
2085 			}
2086 		},
2087 		.op = {
2088 			.do_one_request = stm32_hash_one_request,
2089 		},
2090 	},
2091 	{
2092 		.base.init = stm32_hash_init,
2093 		.base.update = stm32_hash_update,
2094 		.base.final = stm32_hash_final,
2095 		.base.finup = stm32_hash_finup,
2096 		.base.digest = stm32_hash_digest,
2097 		.base.export = stm32_hash_export,
2098 		.base.import = stm32_hash_import,
2099 		.base.setkey = stm32_hash_setkey,
2100 		.base.halg = {
2101 			.digestsize = SHA3_384_DIGEST_SIZE,
2102 			.statesize = sizeof(struct stm32_hash_state),
2103 			.base = {
2104 				.cra_name = "hmac(sha3-384)",
2105 				.cra_driver_name = "stm32-hmac-sha3-384",
2106 				.cra_priority = 200,
2107 				.cra_flags = CRYPTO_ALG_ASYNC |
2108 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2109 				.cra_blocksize = SHA3_384_BLOCK_SIZE,
2110 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2111 				.cra_init = stm32_hash_cra_sha3_hmac_init,
2112 				.cra_exit = stm32_hash_cra_exit,
2113 				.cra_module = THIS_MODULE,
2114 			}
2115 		},
2116 		.op = {
2117 			.do_one_request = stm32_hash_one_request,
2118 		},
2119 	},
2120 	{
2121 		.base.init = stm32_hash_init,
2122 		.base.update = stm32_hash_update,
2123 		.base.final = stm32_hash_final,
2124 		.base.finup = stm32_hash_finup,
2125 		.base.digest = stm32_hash_digest,
2126 		.base.export = stm32_hash_export,
2127 		.base.import = stm32_hash_import,
2128 		.base.halg = {
2129 			.digestsize = SHA3_512_DIGEST_SIZE,
2130 			.statesize = sizeof(struct stm32_hash_state),
2131 			.base = {
2132 				.cra_name = "sha3-512",
2133 				.cra_driver_name = "stm32-sha3-512",
2134 				.cra_priority = 200,
2135 				.cra_flags = CRYPTO_ALG_ASYNC |
2136 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2137 				.cra_blocksize = SHA3_512_BLOCK_SIZE,
2138 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2139 				.cra_init = stm32_hash_cra_sha3_init,
2140 				.cra_exit = stm32_hash_cra_exit,
2141 				.cra_module = THIS_MODULE,
2142 			}
2143 		},
2144 		.op = {
2145 			.do_one_request = stm32_hash_one_request,
2146 		},
2147 	},
2148 	{
2149 		.base.init = stm32_hash_init,
2150 		.base.update = stm32_hash_update,
2151 		.base.final = stm32_hash_final,
2152 		.base.finup = stm32_hash_finup,
2153 		.base.digest = stm32_hash_digest,
2154 		.base.export = stm32_hash_export,
2155 		.base.import = stm32_hash_import,
2156 		.base.setkey = stm32_hash_setkey,
2157 		.base.halg = {
2158 			.digestsize = SHA3_512_DIGEST_SIZE,
2159 			.statesize = sizeof(struct stm32_hash_state),
2160 			.base = {
2161 				.cra_name = "hmac(sha3-512)",
2162 				.cra_driver_name = "stm32-hmac-sha3-512",
2163 				.cra_priority = 200,
2164 				.cra_flags = CRYPTO_ALG_ASYNC |
2165 					CRYPTO_ALG_KERN_DRIVER_ONLY,
2166 				.cra_blocksize = SHA3_512_BLOCK_SIZE,
2167 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2168 				.cra_init = stm32_hash_cra_sha3_hmac_init,
2169 				.cra_exit = stm32_hash_cra_exit,
2170 				.cra_module = THIS_MODULE,
2171 			}
2172 		},
2173 		.op = {
2174 			.do_one_request = stm32_hash_one_request,
2175 		},
2176 	}
2177 };
2178 
stm32_hash_register_algs(struct stm32_hash_dev * hdev)2179 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
2180 {
2181 	unsigned int i, j;
2182 	int err;
2183 
2184 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
2185 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
2186 			err = crypto_engine_register_ahash(
2187 				&hdev->pdata->algs_info[i].algs_list[j]);
2188 			if (err)
2189 				goto err_algs;
2190 		}
2191 	}
2192 
2193 	return 0;
2194 err_algs:
2195 	dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
2196 	for (; i--; ) {
2197 		for (; j--;)
2198 			crypto_engine_unregister_ahash(
2199 				&hdev->pdata->algs_info[i].algs_list[j]);
2200 	}
2201 
2202 	return err;
2203 }
2204 
stm32_hash_unregister_algs(struct stm32_hash_dev * hdev)2205 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
2206 {
2207 	unsigned int i, j;
2208 
2209 	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
2210 		for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
2211 			crypto_engine_unregister_ahash(
2212 				&hdev->pdata->algs_info[i].algs_list[j]);
2213 	}
2214 
2215 	return 0;
2216 }
2217 
2218 static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
2219 	{
2220 		.algs_list	= algs_sha1,
2221 		.size		= ARRAY_SIZE(algs_sha1),
2222 	},
2223 	{
2224 		.algs_list	= algs_sha256,
2225 		.size		= ARRAY_SIZE(algs_sha256),
2226 	},
2227 };
2228 
2229 static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
2230 	.alg_shift	= 7,
2231 	.algs_info	= stm32_hash_algs_info_ux500,
2232 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_ux500),
2233 	.broken_emptymsg = true,
2234 	.ux500		= true,
2235 };
2236 
2237 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
2238 	{
2239 		.algs_list	= algs_md5,
2240 		.size		= ARRAY_SIZE(algs_md5),
2241 	},
2242 	{
2243 		.algs_list	= algs_sha1,
2244 		.size		= ARRAY_SIZE(algs_sha1),
2245 	},
2246 };
2247 
2248 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
2249 	.alg_shift	= 7,
2250 	.algs_info	= stm32_hash_algs_info_stm32f4,
2251 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
2252 	.has_sr		= true,
2253 	.has_mdmat	= true,
2254 };
2255 
2256 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
2257 	{
2258 		.algs_list	= algs_md5,
2259 		.size		= ARRAY_SIZE(algs_md5),
2260 	},
2261 	{
2262 		.algs_list	= algs_sha1,
2263 		.size		= ARRAY_SIZE(algs_sha1),
2264 	},
2265 	{
2266 		.algs_list	= algs_sha224,
2267 		.size		= ARRAY_SIZE(algs_sha224),
2268 	},
2269 	{
2270 		.algs_list	= algs_sha256,
2271 		.size		= ARRAY_SIZE(algs_sha256),
2272 	},
2273 };
2274 
2275 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
2276 	.alg_shift	= 7,
2277 	.algs_info	= stm32_hash_algs_info_stm32f7,
2278 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
2279 	.has_sr		= true,
2280 	.has_mdmat	= true,
2281 };
2282 
2283 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32mp13[] = {
2284 	{
2285 		.algs_list	= algs_sha1,
2286 		.size		= ARRAY_SIZE(algs_sha1),
2287 	},
2288 	{
2289 		.algs_list	= algs_sha224,
2290 		.size		= ARRAY_SIZE(algs_sha224),
2291 	},
2292 	{
2293 		.algs_list	= algs_sha256,
2294 		.size		= ARRAY_SIZE(algs_sha256),
2295 	},
2296 	{
2297 		.algs_list	= algs_sha384_sha512,
2298 		.size		= ARRAY_SIZE(algs_sha384_sha512),
2299 	},
2300 	{
2301 		.algs_list	= algs_sha3,
2302 		.size		= ARRAY_SIZE(algs_sha3),
2303 	},
2304 };
2305 
2306 static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = {
2307 	.alg_shift	= 17,
2308 	.algs_info	= stm32_hash_algs_info_stm32mp13,
2309 	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32mp13),
2310 	.has_sr		= true,
2311 	.has_mdmat	= true,
2312 	.context_secured = true,
2313 };
2314 
2315 static const struct of_device_id stm32_hash_of_match[] = {
2316 	{ .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 },
2317 	{ .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 },
2318 	{ .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 },
2319 	{ .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 },
2320 	{},
2321 };
2322 
2323 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
2324 
stm32_hash_get_of_match(struct stm32_hash_dev * hdev,struct device * dev)2325 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
2326 				   struct device *dev)
2327 {
2328 	hdev->pdata = of_device_get_match_data(dev);
2329 	if (!hdev->pdata) {
2330 		dev_err(dev, "no compatible OF match\n");
2331 		return -EINVAL;
2332 	}
2333 
2334 	return 0;
2335 }
2336 
stm32_hash_probe(struct platform_device * pdev)2337 static int stm32_hash_probe(struct platform_device *pdev)
2338 {
2339 	struct stm32_hash_dev *hdev;
2340 	struct device *dev = &pdev->dev;
2341 	struct resource *res;
2342 	int ret, irq;
2343 
2344 	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
2345 	if (!hdev)
2346 		return -ENOMEM;
2347 
2348 	hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2349 	if (IS_ERR(hdev->io_base))
2350 		return PTR_ERR(hdev->io_base);
2351 
2352 	hdev->phys_base = res->start;
2353 
2354 	ret = stm32_hash_get_of_match(hdev, dev);
2355 	if (ret)
2356 		return ret;
2357 
2358 	irq = platform_get_irq_optional(pdev, 0);
2359 	if (irq < 0 && irq != -ENXIO)
2360 		return irq;
2361 
2362 	if (irq > 0) {
2363 		ret = devm_request_threaded_irq(dev, irq,
2364 						stm32_hash_irq_handler,
2365 						stm32_hash_irq_thread,
2366 						IRQF_ONESHOT,
2367 						dev_name(dev), hdev);
2368 		if (ret) {
2369 			dev_err(dev, "Cannot grab IRQ\n");
2370 			return ret;
2371 		}
2372 	} else {
2373 		dev_info(dev, "No IRQ, use polling mode\n");
2374 		hdev->polled = true;
2375 	}
2376 
2377 	hdev->clk = devm_clk_get(&pdev->dev, NULL);
2378 	if (IS_ERR(hdev->clk))
2379 		return dev_err_probe(dev, PTR_ERR(hdev->clk),
2380 				     "failed to get clock for hash\n");
2381 
2382 	ret = clk_prepare_enable(hdev->clk);
2383 	if (ret) {
2384 		dev_err(dev, "failed to enable hash clock (%d)\n", ret);
2385 		return ret;
2386 	}
2387 
2388 	pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
2389 	pm_runtime_use_autosuspend(dev);
2390 
2391 	pm_runtime_get_noresume(dev);
2392 	pm_runtime_set_active(dev);
2393 	pm_runtime_enable(dev);
2394 
2395 	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
2396 	if (IS_ERR(hdev->rst)) {
2397 		if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
2398 			ret = -EPROBE_DEFER;
2399 			goto err_reset;
2400 		}
2401 	} else {
2402 		reset_control_assert(hdev->rst);
2403 		udelay(2);
2404 		reset_control_deassert(hdev->rst);
2405 	}
2406 
2407 	hdev->dev = dev;
2408 
2409 	platform_set_drvdata(pdev, hdev);
2410 
2411 	ret = stm32_hash_dma_init(hdev);
2412 	switch (ret) {
2413 	case 0:
2414 		break;
2415 	case -ENOENT:
2416 	case -ENODEV:
2417 		dev_info(dev, "DMA mode not available\n");
2418 		break;
2419 	default:
2420 		dev_err(dev, "DMA init error %d\n", ret);
2421 		goto err_dma;
2422 	}
2423 
2424 	spin_lock(&stm32_hash.lock);
2425 	list_add_tail(&hdev->list, &stm32_hash.dev_list);
2426 	spin_unlock(&stm32_hash.lock);
2427 
2428 	/* Initialize crypto engine */
2429 	hdev->engine = crypto_engine_alloc_init(dev, 1);
2430 	if (!hdev->engine) {
2431 		ret = -ENOMEM;
2432 		goto err_engine;
2433 	}
2434 
2435 	ret = crypto_engine_start(hdev->engine);
2436 	if (ret)
2437 		goto err_engine_start;
2438 
2439 	if (hdev->pdata->ux500)
2440 		/* FIXME: implement DMA mode for Ux500 */
2441 		hdev->dma_mode = 0;
2442 	else
2443 		hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK;
2444 
2445 	/* Register algos */
2446 	ret = stm32_hash_register_algs(hdev);
2447 	if (ret)
2448 		goto err_algs;
2449 
2450 	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
2451 		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
2452 
2453 	pm_runtime_put_sync(dev);
2454 
2455 	return 0;
2456 
2457 err_algs:
2458 err_engine_start:
2459 	crypto_engine_exit(hdev->engine);
2460 err_engine:
2461 	spin_lock(&stm32_hash.lock);
2462 	list_del(&hdev->list);
2463 	spin_unlock(&stm32_hash.lock);
2464 err_dma:
2465 	if (hdev->dma_lch)
2466 		dma_release_channel(hdev->dma_lch);
2467 err_reset:
2468 	pm_runtime_disable(dev);
2469 	pm_runtime_put_noidle(dev);
2470 
2471 	clk_disable_unprepare(hdev->clk);
2472 
2473 	return ret;
2474 }
2475 
stm32_hash_remove(struct platform_device * pdev)2476 static void stm32_hash_remove(struct platform_device *pdev)
2477 {
2478 	struct stm32_hash_dev *hdev = platform_get_drvdata(pdev);
2479 	int ret;
2480 
2481 	ret = pm_runtime_get_sync(hdev->dev);
2482 
2483 	stm32_hash_unregister_algs(hdev);
2484 
2485 	crypto_engine_exit(hdev->engine);
2486 
2487 	spin_lock(&stm32_hash.lock);
2488 	list_del(&hdev->list);
2489 	spin_unlock(&stm32_hash.lock);
2490 
2491 	if (hdev->dma_lch)
2492 		dma_release_channel(hdev->dma_lch);
2493 
2494 	pm_runtime_disable(hdev->dev);
2495 	pm_runtime_put_noidle(hdev->dev);
2496 
2497 	if (ret >= 0)
2498 		clk_disable_unprepare(hdev->clk);
2499 }
2500 
2501 #ifdef CONFIG_PM
stm32_hash_runtime_suspend(struct device * dev)2502 static int stm32_hash_runtime_suspend(struct device *dev)
2503 {
2504 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
2505 
2506 	clk_disable_unprepare(hdev->clk);
2507 
2508 	return 0;
2509 }
2510 
stm32_hash_runtime_resume(struct device * dev)2511 static int stm32_hash_runtime_resume(struct device *dev)
2512 {
2513 	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
2514 	int ret;
2515 
2516 	ret = clk_prepare_enable(hdev->clk);
2517 	if (ret) {
2518 		dev_err(hdev->dev, "Failed to prepare_enable clock\n");
2519 		return ret;
2520 	}
2521 
2522 	return 0;
2523 }
2524 #endif
2525 
2526 static const struct dev_pm_ops stm32_hash_pm_ops = {
2527 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2528 				pm_runtime_force_resume)
2529 	SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
2530 			   stm32_hash_runtime_resume, NULL)
2531 };
2532 
2533 static struct platform_driver stm32_hash_driver = {
2534 	.probe		= stm32_hash_probe,
2535 	.remove_new	= stm32_hash_remove,
2536 	.driver		= {
2537 		.name	= "stm32-hash",
2538 		.pm = &stm32_hash_pm_ops,
2539 		.of_match_table	= stm32_hash_of_match,
2540 	}
2541 };
2542 
2543 module_platform_driver(stm32_hash_driver);
2544 
2545 MODULE_DESCRIPTION("STM32 SHA1/SHA2/SHA3 & MD5 (HMAC) hw accelerator driver");
2546 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
2547 MODULE_LICENSE("GPL v2");
2548