xref: /linux/drivers/crypto/omap-sham.c (revision 4dd4d5e486ebdeb48dbc558237d4ba8aab8917d5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Support for OMAP SHA1/MD5 HW acceleration.
6  *
7  * Copyright (c) 2010 Nokia Corporation
8  * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
9  * Copyright (c) 2011 Texas Instruments Incorporated
10  *
11  * Some ideas are from old omap-sha1-md5.c driver.
12  */
13 
14 #define pr_fmt(fmt) "%s: " fmt, __func__
15 
16 #include <linux/err.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/irq.h>
24 #include <linux/io.h>
25 #include <linux/platform_device.h>
26 #include <linux/scatterlist.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dmaengine.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_address.h>
33 #include <linux/of_irq.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/sha1.h>
39 #include <crypto/sha2.h>
40 #include <crypto/hash.h>
41 #include <crypto/hmac.h>
42 #include <crypto/internal/hash.h>
43 #include <crypto/engine.h>
44 
45 #define MD5_DIGEST_SIZE			16
46 
47 #define SHA_REG_IDIGEST(dd, x)		((dd)->pdata->idigest_ofs + ((x)*0x04))
48 #define SHA_REG_DIN(dd, x)		((dd)->pdata->din_ofs + ((x) * 0x04))
49 #define SHA_REG_DIGCNT(dd)		((dd)->pdata->digcnt_ofs)
50 
51 #define SHA_REG_ODIGEST(dd, x)		((dd)->pdata->odigest_ofs + (x * 0x04))
52 
53 #define SHA_REG_CTRL			0x18
54 #define SHA_REG_CTRL_LENGTH		(0xFFFFFFFF << 5)
55 #define SHA_REG_CTRL_CLOSE_HASH		(1 << 4)
56 #define SHA_REG_CTRL_ALGO_CONST		(1 << 3)
57 #define SHA_REG_CTRL_ALGO		(1 << 2)
58 #define SHA_REG_CTRL_INPUT_READY	(1 << 1)
59 #define SHA_REG_CTRL_OUTPUT_READY	(1 << 0)
60 
61 #define SHA_REG_REV(dd)			((dd)->pdata->rev_ofs)
62 
63 #define SHA_REG_MASK(dd)		((dd)->pdata->mask_ofs)
64 #define SHA_REG_MASK_DMA_EN		(1 << 3)
65 #define SHA_REG_MASK_IT_EN		(1 << 2)
66 #define SHA_REG_MASK_SOFTRESET		(1 << 1)
67 #define SHA_REG_AUTOIDLE		(1 << 0)
68 
69 #define SHA_REG_SYSSTATUS(dd)		((dd)->pdata->sysstatus_ofs)
70 #define SHA_REG_SYSSTATUS_RESETDONE	(1 << 0)
71 
72 #define SHA_REG_MODE(dd)		((dd)->pdata->mode_ofs)
73 #define SHA_REG_MODE_HMAC_OUTER_HASH	(1 << 7)
74 #define SHA_REG_MODE_HMAC_KEY_PROC	(1 << 5)
75 #define SHA_REG_MODE_CLOSE_HASH		(1 << 4)
76 #define SHA_REG_MODE_ALGO_CONSTANT	(1 << 3)
77 
78 #define SHA_REG_MODE_ALGO_MASK		(7 << 0)
79 #define SHA_REG_MODE_ALGO_MD5_128	(0 << 1)
80 #define SHA_REG_MODE_ALGO_SHA1_160	(1 << 1)
81 #define SHA_REG_MODE_ALGO_SHA2_224	(2 << 1)
82 #define SHA_REG_MODE_ALGO_SHA2_256	(3 << 1)
83 #define SHA_REG_MODE_ALGO_SHA2_384	(1 << 0)
84 #define SHA_REG_MODE_ALGO_SHA2_512	(3 << 0)
85 
86 #define SHA_REG_LENGTH(dd)		((dd)->pdata->length_ofs)
87 
88 #define SHA_REG_IRQSTATUS		0x118
89 #define SHA_REG_IRQSTATUS_CTX_RDY	(1 << 3)
90 #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
91 #define SHA_REG_IRQSTATUS_INPUT_RDY	(1 << 1)
92 #define SHA_REG_IRQSTATUS_OUTPUT_RDY	(1 << 0)
93 
94 #define SHA_REG_IRQENA			0x11C
95 #define SHA_REG_IRQENA_CTX_RDY		(1 << 3)
96 #define SHA_REG_IRQENA_PARTHASH_RDY	(1 << 2)
97 #define SHA_REG_IRQENA_INPUT_RDY	(1 << 1)
98 #define SHA_REG_IRQENA_OUTPUT_RDY	(1 << 0)
99 
100 #define DEFAULT_TIMEOUT_INTERVAL	HZ
101 
102 #define DEFAULT_AUTOSUSPEND_DELAY	1000
103 
104 /* mostly device flags */
105 #define FLAGS_FINAL		1
106 #define FLAGS_DMA_ACTIVE	2
107 #define FLAGS_OUTPUT_READY	3
108 #define FLAGS_CPU		5
109 #define FLAGS_DMA_READY		6
110 #define FLAGS_AUTO_XOR		7
111 #define FLAGS_BE32_SHA1		8
112 #define FLAGS_SGS_COPIED	9
113 #define FLAGS_SGS_ALLOCED	10
114 #define FLAGS_HUGE		11
115 
116 /* context flags */
117 #define FLAGS_FINUP		16
118 
119 #define FLAGS_MODE_SHIFT	18
120 #define FLAGS_MODE_MASK		(SHA_REG_MODE_ALGO_MASK	<< FLAGS_MODE_SHIFT)
121 #define FLAGS_MODE_MD5		(SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
122 #define FLAGS_MODE_SHA1		(SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
123 #define FLAGS_MODE_SHA224	(SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
124 #define FLAGS_MODE_SHA256	(SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
125 #define FLAGS_MODE_SHA384	(SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
126 #define FLAGS_MODE_SHA512	(SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
127 
128 #define FLAGS_HMAC		21
129 #define FLAGS_ERROR		22
130 
131 #define OP_UPDATE		1
132 #define OP_FINAL		2
133 
134 #define OMAP_ALIGN_MASK		(sizeof(u32)-1)
135 #define OMAP_ALIGNED		__attribute__((aligned(sizeof(u32))))
136 
137 #define BUFLEN			SHA512_BLOCK_SIZE
138 #define OMAP_SHA_DMA_THRESHOLD	256
139 
140 #define OMAP_SHA_MAX_DMA_LEN	(1024 * 2048)
141 
142 struct omap_sham_dev;
143 
144 struct omap_sham_reqctx {
145 	struct omap_sham_dev	*dd;
146 	unsigned long		flags;
147 	u8			op;
148 
149 	u8			digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
150 	size_t			digcnt;
151 	size_t			bufcnt;
152 	size_t			buflen;
153 
154 	/* walk state */
155 	struct scatterlist	*sg;
156 	struct scatterlist	sgl[2];
157 	int			offset;	/* offset in current sg */
158 	int			sg_len;
159 	unsigned int		total;	/* total request */
160 
161 	u8			buffer[] OMAP_ALIGNED;
162 };
163 
164 struct omap_sham_hmac_ctx {
165 	struct crypto_shash	*shash;
166 	u8			ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
167 	u8			opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
168 };
169 
170 struct omap_sham_ctx {
171 	struct crypto_engine_ctx	enginectx;
172 	unsigned long		flags;
173 
174 	/* fallback stuff */
175 	struct crypto_shash	*fallback;
176 
177 	struct omap_sham_hmac_ctx base[];
178 };
179 
180 #define OMAP_SHAM_QUEUE_LENGTH	10
181 
182 struct omap_sham_algs_info {
183 	struct ahash_alg	*algs_list;
184 	unsigned int		size;
185 	unsigned int		registered;
186 };
187 
188 struct omap_sham_pdata {
189 	struct omap_sham_algs_info	*algs_info;
190 	unsigned int	algs_info_size;
191 	unsigned long	flags;
192 	int		digest_size;
193 
194 	void		(*copy_hash)(struct ahash_request *req, int out);
195 	void		(*write_ctrl)(struct omap_sham_dev *dd, size_t length,
196 				      int final, int dma);
197 	void		(*trigger)(struct omap_sham_dev *dd, size_t length);
198 	int		(*poll_irq)(struct omap_sham_dev *dd);
199 	irqreturn_t	(*intr_hdlr)(int irq, void *dev_id);
200 
201 	u32		odigest_ofs;
202 	u32		idigest_ofs;
203 	u32		din_ofs;
204 	u32		digcnt_ofs;
205 	u32		rev_ofs;
206 	u32		mask_ofs;
207 	u32		sysstatus_ofs;
208 	u32		mode_ofs;
209 	u32		length_ofs;
210 
211 	u32		major_mask;
212 	u32		major_shift;
213 	u32		minor_mask;
214 	u32		minor_shift;
215 };
216 
217 struct omap_sham_dev {
218 	struct list_head	list;
219 	unsigned long		phys_base;
220 	struct device		*dev;
221 	void __iomem		*io_base;
222 	int			irq;
223 	int			err;
224 	struct dma_chan		*dma_lch;
225 	struct tasklet_struct	done_task;
226 	u8			polling_mode;
227 	u8			xmit_buf[BUFLEN] OMAP_ALIGNED;
228 
229 	unsigned long		flags;
230 	int			fallback_sz;
231 	struct crypto_queue	queue;
232 	struct ahash_request	*req;
233 	struct crypto_engine	*engine;
234 
235 	const struct omap_sham_pdata	*pdata;
236 };
237 
238 struct omap_sham_drv {
239 	struct list_head	dev_list;
240 	spinlock_t		lock;
241 	unsigned long		flags;
242 };
243 
244 static struct omap_sham_drv sham = {
245 	.dev_list = LIST_HEAD_INIT(sham.dev_list),
246 	.lock = __SPIN_LOCK_UNLOCKED(sham.lock),
247 };
248 
249 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op);
250 static void omap_sham_finish_req(struct ahash_request *req, int err);
251 
252 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
253 {
254 	return __raw_readl(dd->io_base + offset);
255 }
256 
257 static inline void omap_sham_write(struct omap_sham_dev *dd,
258 					u32 offset, u32 value)
259 {
260 	__raw_writel(value, dd->io_base + offset);
261 }
262 
263 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
264 					u32 value, u32 mask)
265 {
266 	u32 val;
267 
268 	val = omap_sham_read(dd, address);
269 	val &= ~mask;
270 	val |= value;
271 	omap_sham_write(dd, address, val);
272 }
273 
274 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
275 {
276 	unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
277 
278 	while (!(omap_sham_read(dd, offset) & bit)) {
279 		if (time_is_before_jiffies(timeout))
280 			return -ETIMEDOUT;
281 	}
282 
283 	return 0;
284 }
285 
286 static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
287 {
288 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
289 	struct omap_sham_dev *dd = ctx->dd;
290 	u32 *hash = (u32 *)ctx->digest;
291 	int i;
292 
293 	for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
294 		if (out)
295 			hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
296 		else
297 			omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
298 	}
299 }
300 
301 static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
302 {
303 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
304 	struct omap_sham_dev *dd = ctx->dd;
305 	int i;
306 
307 	if (ctx->flags & BIT(FLAGS_HMAC)) {
308 		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
309 		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
310 		struct omap_sham_hmac_ctx *bctx = tctx->base;
311 		u32 *opad = (u32 *)bctx->opad;
312 
313 		for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
314 			if (out)
315 				opad[i] = omap_sham_read(dd,
316 						SHA_REG_ODIGEST(dd, i));
317 			else
318 				omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
319 						opad[i]);
320 		}
321 	}
322 
323 	omap_sham_copy_hash_omap2(req, out);
324 }
325 
326 static void omap_sham_copy_ready_hash(struct ahash_request *req)
327 {
328 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
329 	u32 *in = (u32 *)ctx->digest;
330 	u32 *hash = (u32 *)req->result;
331 	int i, d, big_endian = 0;
332 
333 	if (!hash)
334 		return;
335 
336 	switch (ctx->flags & FLAGS_MODE_MASK) {
337 	case FLAGS_MODE_MD5:
338 		d = MD5_DIGEST_SIZE / sizeof(u32);
339 		break;
340 	case FLAGS_MODE_SHA1:
341 		/* OMAP2 SHA1 is big endian */
342 		if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
343 			big_endian = 1;
344 		d = SHA1_DIGEST_SIZE / sizeof(u32);
345 		break;
346 	case FLAGS_MODE_SHA224:
347 		d = SHA224_DIGEST_SIZE / sizeof(u32);
348 		break;
349 	case FLAGS_MODE_SHA256:
350 		d = SHA256_DIGEST_SIZE / sizeof(u32);
351 		break;
352 	case FLAGS_MODE_SHA384:
353 		d = SHA384_DIGEST_SIZE / sizeof(u32);
354 		break;
355 	case FLAGS_MODE_SHA512:
356 		d = SHA512_DIGEST_SIZE / sizeof(u32);
357 		break;
358 	default:
359 		d = 0;
360 	}
361 
362 	if (big_endian)
363 		for (i = 0; i < d; i++)
364 			hash[i] = be32_to_cpup((__be32 *)in + i);
365 	else
366 		for (i = 0; i < d; i++)
367 			hash[i] = le32_to_cpup((__le32 *)in + i);
368 }
369 
370 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
371 				 int final, int dma)
372 {
373 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
374 	u32 val = length << 5, mask;
375 
376 	if (likely(ctx->digcnt))
377 		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
378 
379 	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
380 		SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
381 		SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
382 	/*
383 	 * Setting ALGO_CONST only for the first iteration
384 	 * and CLOSE_HASH only for the last one.
385 	 */
386 	if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
387 		val |= SHA_REG_CTRL_ALGO;
388 	if (!ctx->digcnt)
389 		val |= SHA_REG_CTRL_ALGO_CONST;
390 	if (final)
391 		val |= SHA_REG_CTRL_CLOSE_HASH;
392 
393 	mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
394 			SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
395 
396 	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
397 }
398 
399 static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
400 {
401 }
402 
403 static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
404 {
405 	return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
406 }
407 
408 static int get_block_size(struct omap_sham_reqctx *ctx)
409 {
410 	int d;
411 
412 	switch (ctx->flags & FLAGS_MODE_MASK) {
413 	case FLAGS_MODE_MD5:
414 	case FLAGS_MODE_SHA1:
415 		d = SHA1_BLOCK_SIZE;
416 		break;
417 	case FLAGS_MODE_SHA224:
418 	case FLAGS_MODE_SHA256:
419 		d = SHA256_BLOCK_SIZE;
420 		break;
421 	case FLAGS_MODE_SHA384:
422 	case FLAGS_MODE_SHA512:
423 		d = SHA512_BLOCK_SIZE;
424 		break;
425 	default:
426 		d = 0;
427 	}
428 
429 	return d;
430 }
431 
432 static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
433 				    u32 *value, int count)
434 {
435 	for (; count--; value++, offset += 4)
436 		omap_sham_write(dd, offset, *value);
437 }
438 
439 static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
440 				 int final, int dma)
441 {
442 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
443 	u32 val, mask;
444 
445 	if (likely(ctx->digcnt))
446 		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
447 
448 	/*
449 	 * Setting ALGO_CONST only for the first iteration and
450 	 * CLOSE_HASH only for the last one. Note that flags mode bits
451 	 * correspond to algorithm encoding in mode register.
452 	 */
453 	val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
454 	if (!ctx->digcnt) {
455 		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
456 		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
457 		struct omap_sham_hmac_ctx *bctx = tctx->base;
458 		int bs, nr_dr;
459 
460 		val |= SHA_REG_MODE_ALGO_CONSTANT;
461 
462 		if (ctx->flags & BIT(FLAGS_HMAC)) {
463 			bs = get_block_size(ctx);
464 			nr_dr = bs / (2 * sizeof(u32));
465 			val |= SHA_REG_MODE_HMAC_KEY_PROC;
466 			omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
467 					  (u32 *)bctx->ipad, nr_dr);
468 			omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
469 					  (u32 *)bctx->ipad + nr_dr, nr_dr);
470 			ctx->digcnt += bs;
471 		}
472 	}
473 
474 	if (final) {
475 		val |= SHA_REG_MODE_CLOSE_HASH;
476 
477 		if (ctx->flags & BIT(FLAGS_HMAC))
478 			val |= SHA_REG_MODE_HMAC_OUTER_HASH;
479 	}
480 
481 	mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
482 	       SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
483 	       SHA_REG_MODE_HMAC_KEY_PROC;
484 
485 	dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
486 	omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
487 	omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
488 	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
489 			     SHA_REG_MASK_IT_EN |
490 				     (dma ? SHA_REG_MASK_DMA_EN : 0),
491 			     SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
492 }
493 
494 static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
495 {
496 	omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
497 }
498 
499 static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
500 {
501 	return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
502 			      SHA_REG_IRQSTATUS_INPUT_RDY);
503 }
504 
505 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
506 			      int final)
507 {
508 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
509 	int count, len32, bs32, offset = 0;
510 	const u32 *buffer;
511 	int mlen;
512 	struct sg_mapping_iter mi;
513 
514 	dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n",
515 						ctx->digcnt, length, final);
516 
517 	dd->pdata->write_ctrl(dd, length, final, 0);
518 	dd->pdata->trigger(dd, length);
519 
520 	/* should be non-zero before next lines to disable clocks later */
521 	ctx->digcnt += length;
522 	ctx->total -= length;
523 
524 	if (final)
525 		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
526 
527 	set_bit(FLAGS_CPU, &dd->flags);
528 
529 	len32 = DIV_ROUND_UP(length, sizeof(u32));
530 	bs32 = get_block_size(ctx) / sizeof(u32);
531 
532 	sg_miter_start(&mi, ctx->sg, ctx->sg_len,
533 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
534 
535 	mlen = 0;
536 
537 	while (len32) {
538 		if (dd->pdata->poll_irq(dd))
539 			return -ETIMEDOUT;
540 
541 		for (count = 0; count < min(len32, bs32); count++, offset++) {
542 			if (!mlen) {
543 				sg_miter_next(&mi);
544 				mlen = mi.length;
545 				if (!mlen) {
546 					pr_err("sg miter failure.\n");
547 					return -EINVAL;
548 				}
549 				offset = 0;
550 				buffer = mi.addr;
551 			}
552 			omap_sham_write(dd, SHA_REG_DIN(dd, count),
553 					buffer[offset]);
554 			mlen -= 4;
555 		}
556 		len32 -= min(len32, bs32);
557 	}
558 
559 	sg_miter_stop(&mi);
560 
561 	return -EINPROGRESS;
562 }
563 
564 static void omap_sham_dma_callback(void *param)
565 {
566 	struct omap_sham_dev *dd = param;
567 
568 	set_bit(FLAGS_DMA_READY, &dd->flags);
569 	tasklet_schedule(&dd->done_task);
570 }
571 
572 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
573 			      int final)
574 {
575 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
576 	struct dma_async_tx_descriptor *tx;
577 	struct dma_slave_config cfg;
578 	int ret;
579 
580 	dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n",
581 						ctx->digcnt, length, final);
582 
583 	if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
584 		dev_err(dd->dev, "dma_map_sg error\n");
585 		return -EINVAL;
586 	}
587 
588 	memset(&cfg, 0, sizeof(cfg));
589 
590 	cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
591 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
592 	cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
593 
594 	ret = dmaengine_slave_config(dd->dma_lch, &cfg);
595 	if (ret) {
596 		pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
597 		return ret;
598 	}
599 
600 	tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
601 				     DMA_MEM_TO_DEV,
602 				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
603 
604 	if (!tx) {
605 		dev_err(dd->dev, "prep_slave_sg failed\n");
606 		return -EINVAL;
607 	}
608 
609 	tx->callback = omap_sham_dma_callback;
610 	tx->callback_param = dd;
611 
612 	dd->pdata->write_ctrl(dd, length, final, 1);
613 
614 	ctx->digcnt += length;
615 	ctx->total -= length;
616 
617 	if (final)
618 		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
619 
620 	set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
621 
622 	dmaengine_submit(tx);
623 	dma_async_issue_pending(dd->dma_lch);
624 
625 	dd->pdata->trigger(dd, length);
626 
627 	return -EINPROGRESS;
628 }
629 
630 static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
631 				   struct scatterlist *sg, int bs, int new_len)
632 {
633 	int n = sg_nents(sg);
634 	struct scatterlist *tmp;
635 	int offset = ctx->offset;
636 
637 	ctx->total = new_len;
638 
639 	if (ctx->bufcnt)
640 		n++;
641 
642 	ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
643 	if (!ctx->sg)
644 		return -ENOMEM;
645 
646 	sg_init_table(ctx->sg, n);
647 
648 	tmp = ctx->sg;
649 
650 	ctx->sg_len = 0;
651 
652 	if (ctx->bufcnt) {
653 		sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
654 		tmp = sg_next(tmp);
655 		ctx->sg_len++;
656 		new_len -= ctx->bufcnt;
657 	}
658 
659 	while (sg && new_len) {
660 		int len = sg->length - offset;
661 
662 		if (len <= 0) {
663 			offset -= sg->length;
664 			sg = sg_next(sg);
665 			continue;
666 		}
667 
668 		if (new_len < len)
669 			len = new_len;
670 
671 		if (len > 0) {
672 			new_len -= len;
673 			sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
674 			offset = 0;
675 			ctx->offset = 0;
676 			ctx->sg_len++;
677 			if (new_len <= 0)
678 				break;
679 			tmp = sg_next(tmp);
680 		}
681 
682 		sg = sg_next(sg);
683 	}
684 
685 	if (tmp)
686 		sg_mark_end(tmp);
687 
688 	set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
689 
690 	ctx->offset += new_len - ctx->bufcnt;
691 	ctx->bufcnt = 0;
692 
693 	return 0;
694 }
695 
696 static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
697 			      struct scatterlist *sg, int bs,
698 			      unsigned int new_len)
699 {
700 	int pages;
701 	void *buf;
702 
703 	pages = get_order(new_len);
704 
705 	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
706 	if (!buf) {
707 		pr_err("Couldn't allocate pages for unaligned cases.\n");
708 		return -ENOMEM;
709 	}
710 
711 	if (ctx->bufcnt)
712 		memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
713 
714 	scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
715 				 min(new_len, ctx->total) - ctx->bufcnt, 0);
716 	sg_init_table(ctx->sgl, 1);
717 	sg_set_buf(ctx->sgl, buf, new_len);
718 	ctx->sg = ctx->sgl;
719 	set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
720 	ctx->sg_len = 1;
721 	ctx->offset += new_len - ctx->bufcnt;
722 	ctx->bufcnt = 0;
723 	ctx->total = new_len;
724 
725 	return 0;
726 }
727 
728 static int omap_sham_align_sgs(struct scatterlist *sg,
729 			       int nbytes, int bs, bool final,
730 			       struct omap_sham_reqctx *rctx)
731 {
732 	int n = 0;
733 	bool aligned = true;
734 	bool list_ok = true;
735 	struct scatterlist *sg_tmp = sg;
736 	int new_len;
737 	int offset = rctx->offset;
738 	int bufcnt = rctx->bufcnt;
739 
740 	if (!sg || !sg->length || !nbytes) {
741 		if (bufcnt) {
742 			bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
743 			sg_init_table(rctx->sgl, 1);
744 			sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
745 			rctx->sg = rctx->sgl;
746 			rctx->sg_len = 1;
747 		}
748 
749 		return 0;
750 	}
751 
752 	new_len = nbytes;
753 
754 	if (offset)
755 		list_ok = false;
756 
757 	if (final)
758 		new_len = DIV_ROUND_UP(new_len, bs) * bs;
759 	else
760 		new_len = (new_len - 1) / bs * bs;
761 
762 	if (!new_len)
763 		return 0;
764 
765 	if (nbytes != new_len)
766 		list_ok = false;
767 
768 	while (nbytes > 0 && sg_tmp) {
769 		n++;
770 
771 		if (bufcnt) {
772 			if (!IS_ALIGNED(bufcnt, bs)) {
773 				aligned = false;
774 				break;
775 			}
776 			nbytes -= bufcnt;
777 			bufcnt = 0;
778 			if (!nbytes)
779 				list_ok = false;
780 
781 			continue;
782 		}
783 
784 #ifdef CONFIG_ZONE_DMA
785 		if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
786 			aligned = false;
787 			break;
788 		}
789 #endif
790 
791 		if (offset < sg_tmp->length) {
792 			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
793 				aligned = false;
794 				break;
795 			}
796 
797 			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
798 				aligned = false;
799 				break;
800 			}
801 		}
802 
803 		if (offset) {
804 			offset -= sg_tmp->length;
805 			if (offset < 0) {
806 				nbytes += offset;
807 				offset = 0;
808 			}
809 		} else {
810 			nbytes -= sg_tmp->length;
811 		}
812 
813 		sg_tmp = sg_next(sg_tmp);
814 
815 		if (nbytes < 0) {
816 			list_ok = false;
817 			break;
818 		}
819 	}
820 
821 	if (new_len > OMAP_SHA_MAX_DMA_LEN) {
822 		new_len = OMAP_SHA_MAX_DMA_LEN;
823 		aligned = false;
824 	}
825 
826 	if (!aligned)
827 		return omap_sham_copy_sgs(rctx, sg, bs, new_len);
828 	else if (!list_ok)
829 		return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
830 
831 	rctx->total = new_len;
832 	rctx->offset += new_len;
833 	rctx->sg_len = n;
834 	if (rctx->bufcnt) {
835 		sg_init_table(rctx->sgl, 2);
836 		sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
837 		sg_chain(rctx->sgl, 2, sg);
838 		rctx->sg = rctx->sgl;
839 	} else {
840 		rctx->sg = sg;
841 	}
842 
843 	return 0;
844 }
845 
846 static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
847 {
848 	struct ahash_request *req = container_of(areq, struct ahash_request,
849 						 base);
850 	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
851 	int bs;
852 	int ret;
853 	unsigned int nbytes;
854 	bool final = rctx->flags & BIT(FLAGS_FINUP);
855 	bool update = rctx->op == OP_UPDATE;
856 	int hash_later;
857 
858 	bs = get_block_size(rctx);
859 
860 	nbytes = rctx->bufcnt;
861 
862 	if (update)
863 		nbytes += req->nbytes - rctx->offset;
864 
865 	dev_dbg(rctx->dd->dev,
866 		"%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n",
867 		__func__, nbytes, bs, rctx->total, rctx->offset,
868 		rctx->bufcnt);
869 
870 	if (!nbytes)
871 		return 0;
872 
873 	rctx->total = nbytes;
874 
875 	if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
876 		int len = bs - rctx->bufcnt % bs;
877 
878 		if (len > req->nbytes)
879 			len = req->nbytes;
880 		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
881 					 0, len, 0);
882 		rctx->bufcnt += len;
883 		rctx->offset = len;
884 	}
885 
886 	if (rctx->bufcnt)
887 		memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
888 
889 	ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
890 	if (ret)
891 		return ret;
892 
893 	hash_later = nbytes - rctx->total;
894 	if (hash_later < 0)
895 		hash_later = 0;
896 
897 	if (hash_later && hash_later <= rctx->buflen) {
898 		scatterwalk_map_and_copy(rctx->buffer,
899 					 req->src,
900 					 req->nbytes - hash_later,
901 					 hash_later, 0);
902 
903 		rctx->bufcnt = hash_later;
904 	} else {
905 		rctx->bufcnt = 0;
906 	}
907 
908 	if (hash_later > rctx->buflen)
909 		set_bit(FLAGS_HUGE, &rctx->dd->flags);
910 
911 	rctx->total = min(nbytes, rctx->total);
912 
913 	return 0;
914 }
915 
916 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
917 {
918 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
919 
920 	dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
921 
922 	clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
923 
924 	return 0;
925 }
926 
927 static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
928 {
929 	struct omap_sham_dev *dd;
930 
931 	if (ctx->dd)
932 		return ctx->dd;
933 
934 	spin_lock_bh(&sham.lock);
935 	dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
936 	list_move_tail(&dd->list, &sham.dev_list);
937 	ctx->dd = dd;
938 	spin_unlock_bh(&sham.lock);
939 
940 	return dd;
941 }
942 
943 static int omap_sham_init(struct ahash_request *req)
944 {
945 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
946 	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
947 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
948 	struct omap_sham_dev *dd;
949 	int bs = 0;
950 
951 	ctx->dd = NULL;
952 
953 	dd = omap_sham_find_dev(ctx);
954 	if (!dd)
955 		return -ENODEV;
956 
957 	ctx->flags = 0;
958 
959 	dev_dbg(dd->dev, "init: digest size: %d\n",
960 		crypto_ahash_digestsize(tfm));
961 
962 	switch (crypto_ahash_digestsize(tfm)) {
963 	case MD5_DIGEST_SIZE:
964 		ctx->flags |= FLAGS_MODE_MD5;
965 		bs = SHA1_BLOCK_SIZE;
966 		break;
967 	case SHA1_DIGEST_SIZE:
968 		ctx->flags |= FLAGS_MODE_SHA1;
969 		bs = SHA1_BLOCK_SIZE;
970 		break;
971 	case SHA224_DIGEST_SIZE:
972 		ctx->flags |= FLAGS_MODE_SHA224;
973 		bs = SHA224_BLOCK_SIZE;
974 		break;
975 	case SHA256_DIGEST_SIZE:
976 		ctx->flags |= FLAGS_MODE_SHA256;
977 		bs = SHA256_BLOCK_SIZE;
978 		break;
979 	case SHA384_DIGEST_SIZE:
980 		ctx->flags |= FLAGS_MODE_SHA384;
981 		bs = SHA384_BLOCK_SIZE;
982 		break;
983 	case SHA512_DIGEST_SIZE:
984 		ctx->flags |= FLAGS_MODE_SHA512;
985 		bs = SHA512_BLOCK_SIZE;
986 		break;
987 	}
988 
989 	ctx->bufcnt = 0;
990 	ctx->digcnt = 0;
991 	ctx->total = 0;
992 	ctx->offset = 0;
993 	ctx->buflen = BUFLEN;
994 
995 	if (tctx->flags & BIT(FLAGS_HMAC)) {
996 		if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
997 			struct omap_sham_hmac_ctx *bctx = tctx->base;
998 
999 			memcpy(ctx->buffer, bctx->ipad, bs);
1000 			ctx->bufcnt = bs;
1001 		}
1002 
1003 		ctx->flags |= BIT(FLAGS_HMAC);
1004 	}
1005 
1006 	return 0;
1007 
1008 }
1009 
1010 static int omap_sham_update_req(struct omap_sham_dev *dd)
1011 {
1012 	struct ahash_request *req = dd->req;
1013 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1014 	int err;
1015 	bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1016 		!(dd->flags & BIT(FLAGS_HUGE));
1017 
1018 	dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d",
1019 		ctx->total, ctx->digcnt, final);
1020 
1021 	if (ctx->total < get_block_size(ctx) ||
1022 	    ctx->total < dd->fallback_sz)
1023 		ctx->flags |= BIT(FLAGS_CPU);
1024 
1025 	if (ctx->flags & BIT(FLAGS_CPU))
1026 		err = omap_sham_xmit_cpu(dd, ctx->total, final);
1027 	else
1028 		err = omap_sham_xmit_dma(dd, ctx->total, final);
1029 
1030 	/* wait for dma completion before can take more data */
1031 	dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt);
1032 
1033 	return err;
1034 }
1035 
1036 static int omap_sham_final_req(struct omap_sham_dev *dd)
1037 {
1038 	struct ahash_request *req = dd->req;
1039 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1040 	int err = 0, use_dma = 1;
1041 
1042 	if (dd->flags & BIT(FLAGS_HUGE))
1043 		return 0;
1044 
1045 	if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
1046 		/*
1047 		 * faster to handle last block with cpu or
1048 		 * use cpu when dma is not present.
1049 		 */
1050 		use_dma = 0;
1051 
1052 	if (use_dma)
1053 		err = omap_sham_xmit_dma(dd, ctx->total, 1);
1054 	else
1055 		err = omap_sham_xmit_cpu(dd, ctx->total, 1);
1056 
1057 	ctx->bufcnt = 0;
1058 
1059 	dev_dbg(dd->dev, "final_req: err: %d\n", err);
1060 
1061 	return err;
1062 }
1063 
1064 static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
1065 {
1066 	struct ahash_request *req = container_of(areq, struct ahash_request,
1067 						 base);
1068 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1069 	struct omap_sham_dev *dd = ctx->dd;
1070 	int err;
1071 	bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1072 			!(dd->flags & BIT(FLAGS_HUGE));
1073 
1074 	dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
1075 		ctx->op, ctx->total, ctx->digcnt, final);
1076 
1077 	err = omap_sham_prepare_request(engine, areq);
1078 	if (err)
1079 		return err;
1080 
1081 	err = pm_runtime_resume_and_get(dd->dev);
1082 	if (err < 0) {
1083 		dev_err(dd->dev, "failed to get sync: %d\n", err);
1084 		return err;
1085 	}
1086 
1087 	dd->err = 0;
1088 	dd->req = req;
1089 
1090 	if (ctx->digcnt)
1091 		dd->pdata->copy_hash(req, 0);
1092 
1093 	if (ctx->op == OP_UPDATE)
1094 		err = omap_sham_update_req(dd);
1095 	else if (ctx->op == OP_FINAL)
1096 		err = omap_sham_final_req(dd);
1097 
1098 	if (err != -EINPROGRESS)
1099 		omap_sham_finish_req(req, err);
1100 
1101 	return 0;
1102 }
1103 
1104 static int omap_sham_finish_hmac(struct ahash_request *req)
1105 {
1106 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1107 	struct omap_sham_hmac_ctx *bctx = tctx->base;
1108 	int bs = crypto_shash_blocksize(bctx->shash);
1109 	int ds = crypto_shash_digestsize(bctx->shash);
1110 	SHASH_DESC_ON_STACK(shash, bctx->shash);
1111 
1112 	shash->tfm = bctx->shash;
1113 
1114 	return crypto_shash_init(shash) ?:
1115 	       crypto_shash_update(shash, bctx->opad, bs) ?:
1116 	       crypto_shash_finup(shash, req->result, ds, req->result);
1117 }
1118 
1119 static int omap_sham_finish(struct ahash_request *req)
1120 {
1121 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1122 	struct omap_sham_dev *dd = ctx->dd;
1123 	int err = 0;
1124 
1125 	if (ctx->digcnt) {
1126 		omap_sham_copy_ready_hash(req);
1127 		if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1128 				!test_bit(FLAGS_AUTO_XOR, &dd->flags))
1129 			err = omap_sham_finish_hmac(req);
1130 	}
1131 
1132 	dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt);
1133 
1134 	return err;
1135 }
1136 
1137 static void omap_sham_finish_req(struct ahash_request *req, int err)
1138 {
1139 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1140 	struct omap_sham_dev *dd = ctx->dd;
1141 
1142 	if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1143 		free_pages((unsigned long)sg_virt(ctx->sg),
1144 			   get_order(ctx->sg->length));
1145 
1146 	if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1147 		kfree(ctx->sg);
1148 
1149 	ctx->sg = NULL;
1150 
1151 	dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED) |
1152 		       BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
1153 		       BIT(FLAGS_OUTPUT_READY));
1154 
1155 	if (!err)
1156 		dd->pdata->copy_hash(req, 1);
1157 
1158 	if (dd->flags & BIT(FLAGS_HUGE)) {
1159 		/* Re-enqueue the request */
1160 		omap_sham_enqueue(req, ctx->op);
1161 		return;
1162 	}
1163 
1164 	if (!err) {
1165 		if (test_bit(FLAGS_FINAL, &dd->flags))
1166 			err = omap_sham_finish(req);
1167 	} else {
1168 		ctx->flags |= BIT(FLAGS_ERROR);
1169 	}
1170 
1171 	/* atomic operation is not needed here */
1172 	dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1173 			BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1174 
1175 	pm_runtime_mark_last_busy(dd->dev);
1176 	pm_runtime_put_autosuspend(dd->dev);
1177 
1178 	ctx->offset = 0;
1179 
1180 	crypto_finalize_hash_request(dd->engine, req, err);
1181 }
1182 
1183 static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1184 				  struct ahash_request *req)
1185 {
1186 	return crypto_transfer_hash_request_to_engine(dd->engine, req);
1187 }
1188 
1189 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1190 {
1191 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1192 	struct omap_sham_dev *dd = ctx->dd;
1193 
1194 	ctx->op = op;
1195 
1196 	return omap_sham_handle_queue(dd, req);
1197 }
1198 
1199 static int omap_sham_update(struct ahash_request *req)
1200 {
1201 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1202 	struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
1203 
1204 	if (!req->nbytes)
1205 		return 0;
1206 
1207 	if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
1208 		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1209 					 0, req->nbytes, 0);
1210 		ctx->bufcnt += req->nbytes;
1211 		return 0;
1212 	}
1213 
1214 	if (dd->polling_mode)
1215 		ctx->flags |= BIT(FLAGS_CPU);
1216 
1217 	return omap_sham_enqueue(req, OP_UPDATE);
1218 }
1219 
1220 static int omap_sham_final_shash(struct ahash_request *req)
1221 {
1222 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1223 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1224 	int offset = 0;
1225 
1226 	/*
1227 	 * If we are running HMAC on limited hardware support, skip
1228 	 * the ipad in the beginning of the buffer if we are going for
1229 	 * software fallback algorithm.
1230 	 */
1231 	if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1232 	    !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1233 		offset = get_block_size(ctx);
1234 
1235 	return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
1236 				       ctx->bufcnt - offset, req->result);
1237 }
1238 
1239 static int omap_sham_final(struct ahash_request *req)
1240 {
1241 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1242 
1243 	ctx->flags |= BIT(FLAGS_FINUP);
1244 
1245 	if (ctx->flags & BIT(FLAGS_ERROR))
1246 		return 0; /* uncompleted hash is not needed */
1247 
1248 	/*
1249 	 * OMAP HW accel works only with buffers >= 9.
1250 	 * HMAC is always >= 9 because ipad == block size.
1251 	 * If buffersize is less than fallback_sz, we use fallback
1252 	 * SW encoding, as using DMA + HW in this case doesn't provide
1253 	 * any benefit.
1254 	 */
1255 	if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
1256 		return omap_sham_final_shash(req);
1257 	else if (ctx->bufcnt)
1258 		return omap_sham_enqueue(req, OP_FINAL);
1259 
1260 	/* copy ready hash (+ finalize hmac) */
1261 	return omap_sham_finish(req);
1262 }
1263 
1264 static int omap_sham_finup(struct ahash_request *req)
1265 {
1266 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1267 	int err1, err2;
1268 
1269 	ctx->flags |= BIT(FLAGS_FINUP);
1270 
1271 	err1 = omap_sham_update(req);
1272 	if (err1 == -EINPROGRESS || err1 == -EBUSY)
1273 		return err1;
1274 	/*
1275 	 * final() has to be always called to cleanup resources
1276 	 * even if udpate() failed, except EINPROGRESS
1277 	 */
1278 	err2 = omap_sham_final(req);
1279 
1280 	return err1 ?: err2;
1281 }
1282 
1283 static int omap_sham_digest(struct ahash_request *req)
1284 {
1285 	return omap_sham_init(req) ?: omap_sham_finup(req);
1286 }
1287 
1288 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1289 		      unsigned int keylen)
1290 {
1291 	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1292 	struct omap_sham_hmac_ctx *bctx = tctx->base;
1293 	int bs = crypto_shash_blocksize(bctx->shash);
1294 	int ds = crypto_shash_digestsize(bctx->shash);
1295 	int err, i;
1296 
1297 	err = crypto_shash_setkey(tctx->fallback, key, keylen);
1298 	if (err)
1299 		return err;
1300 
1301 	if (keylen > bs) {
1302 		err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
1303 					      bctx->ipad);
1304 		if (err)
1305 			return err;
1306 		keylen = ds;
1307 	} else {
1308 		memcpy(bctx->ipad, key, keylen);
1309 	}
1310 
1311 	memset(bctx->ipad + keylen, 0, bs - keylen);
1312 
1313 	if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
1314 		memcpy(bctx->opad, bctx->ipad, bs);
1315 
1316 		for (i = 0; i < bs; i++) {
1317 			bctx->ipad[i] ^= HMAC_IPAD_VALUE;
1318 			bctx->opad[i] ^= HMAC_OPAD_VALUE;
1319 		}
1320 	}
1321 
1322 	return err;
1323 }
1324 
1325 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1326 {
1327 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1328 	const char *alg_name = crypto_tfm_alg_name(tfm);
1329 
1330 	/* Allocate a fallback and abort if it failed. */
1331 	tctx->fallback = crypto_alloc_shash(alg_name, 0,
1332 					    CRYPTO_ALG_NEED_FALLBACK);
1333 	if (IS_ERR(tctx->fallback)) {
1334 		pr_err("omap-sham: fallback driver '%s' "
1335 				"could not be loaded.\n", alg_name);
1336 		return PTR_ERR(tctx->fallback);
1337 	}
1338 
1339 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1340 				 sizeof(struct omap_sham_reqctx) + BUFLEN);
1341 
1342 	if (alg_base) {
1343 		struct omap_sham_hmac_ctx *bctx = tctx->base;
1344 		tctx->flags |= BIT(FLAGS_HMAC);
1345 		bctx->shash = crypto_alloc_shash(alg_base, 0,
1346 						CRYPTO_ALG_NEED_FALLBACK);
1347 		if (IS_ERR(bctx->shash)) {
1348 			pr_err("omap-sham: base driver '%s' "
1349 					"could not be loaded.\n", alg_base);
1350 			crypto_free_shash(tctx->fallback);
1351 			return PTR_ERR(bctx->shash);
1352 		}
1353 
1354 	}
1355 
1356 	tctx->enginectx.op.do_one_request = omap_sham_hash_one_req;
1357 
1358 	return 0;
1359 }
1360 
1361 static int omap_sham_cra_init(struct crypto_tfm *tfm)
1362 {
1363 	return omap_sham_cra_init_alg(tfm, NULL);
1364 }
1365 
1366 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1367 {
1368 	return omap_sham_cra_init_alg(tfm, "sha1");
1369 }
1370 
1371 static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1372 {
1373 	return omap_sham_cra_init_alg(tfm, "sha224");
1374 }
1375 
1376 static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1377 {
1378 	return omap_sham_cra_init_alg(tfm, "sha256");
1379 }
1380 
1381 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1382 {
1383 	return omap_sham_cra_init_alg(tfm, "md5");
1384 }
1385 
1386 static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1387 {
1388 	return omap_sham_cra_init_alg(tfm, "sha384");
1389 }
1390 
1391 static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1392 {
1393 	return omap_sham_cra_init_alg(tfm, "sha512");
1394 }
1395 
1396 static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1397 {
1398 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1399 
1400 	crypto_free_shash(tctx->fallback);
1401 	tctx->fallback = NULL;
1402 
1403 	if (tctx->flags & BIT(FLAGS_HMAC)) {
1404 		struct omap_sham_hmac_ctx *bctx = tctx->base;
1405 		crypto_free_shash(bctx->shash);
1406 	}
1407 }
1408 
1409 static int omap_sham_export(struct ahash_request *req, void *out)
1410 {
1411 	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1412 
1413 	memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
1414 
1415 	return 0;
1416 }
1417 
1418 static int omap_sham_import(struct ahash_request *req, const void *in)
1419 {
1420 	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1421 	const struct omap_sham_reqctx *ctx_in = in;
1422 
1423 	memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
1424 
1425 	return 0;
1426 }
1427 
1428 static struct ahash_alg algs_sha1_md5[] = {
1429 {
1430 	.init		= omap_sham_init,
1431 	.update		= omap_sham_update,
1432 	.final		= omap_sham_final,
1433 	.finup		= omap_sham_finup,
1434 	.digest		= omap_sham_digest,
1435 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1436 	.halg.base	= {
1437 		.cra_name		= "sha1",
1438 		.cra_driver_name	= "omap-sha1",
1439 		.cra_priority		= 400,
1440 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1441 						CRYPTO_ALG_ASYNC |
1442 						CRYPTO_ALG_NEED_FALLBACK,
1443 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1444 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1445 		.cra_alignmask		= OMAP_ALIGN_MASK,
1446 		.cra_module		= THIS_MODULE,
1447 		.cra_init		= omap_sham_cra_init,
1448 		.cra_exit		= omap_sham_cra_exit,
1449 	}
1450 },
1451 {
1452 	.init		= omap_sham_init,
1453 	.update		= omap_sham_update,
1454 	.final		= omap_sham_final,
1455 	.finup		= omap_sham_finup,
1456 	.digest		= omap_sham_digest,
1457 	.halg.digestsize	= MD5_DIGEST_SIZE,
1458 	.halg.base	= {
1459 		.cra_name		= "md5",
1460 		.cra_driver_name	= "omap-md5",
1461 		.cra_priority		= 400,
1462 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1463 						CRYPTO_ALG_ASYNC |
1464 						CRYPTO_ALG_NEED_FALLBACK,
1465 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1466 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1467 		.cra_alignmask		= OMAP_ALIGN_MASK,
1468 		.cra_module		= THIS_MODULE,
1469 		.cra_init		= omap_sham_cra_init,
1470 		.cra_exit		= omap_sham_cra_exit,
1471 	}
1472 },
1473 {
1474 	.init		= omap_sham_init,
1475 	.update		= omap_sham_update,
1476 	.final		= omap_sham_final,
1477 	.finup		= omap_sham_finup,
1478 	.digest		= omap_sham_digest,
1479 	.setkey		= omap_sham_setkey,
1480 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1481 	.halg.base	= {
1482 		.cra_name		= "hmac(sha1)",
1483 		.cra_driver_name	= "omap-hmac-sha1",
1484 		.cra_priority		= 400,
1485 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1486 						CRYPTO_ALG_ASYNC |
1487 						CRYPTO_ALG_NEED_FALLBACK,
1488 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1489 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1490 					sizeof(struct omap_sham_hmac_ctx),
1491 		.cra_alignmask		= OMAP_ALIGN_MASK,
1492 		.cra_module		= THIS_MODULE,
1493 		.cra_init		= omap_sham_cra_sha1_init,
1494 		.cra_exit		= omap_sham_cra_exit,
1495 	}
1496 },
1497 {
1498 	.init		= omap_sham_init,
1499 	.update		= omap_sham_update,
1500 	.final		= omap_sham_final,
1501 	.finup		= omap_sham_finup,
1502 	.digest		= omap_sham_digest,
1503 	.setkey		= omap_sham_setkey,
1504 	.halg.digestsize	= MD5_DIGEST_SIZE,
1505 	.halg.base	= {
1506 		.cra_name		= "hmac(md5)",
1507 		.cra_driver_name	= "omap-hmac-md5",
1508 		.cra_priority		= 400,
1509 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1510 						CRYPTO_ALG_ASYNC |
1511 						CRYPTO_ALG_NEED_FALLBACK,
1512 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1513 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1514 					sizeof(struct omap_sham_hmac_ctx),
1515 		.cra_alignmask		= OMAP_ALIGN_MASK,
1516 		.cra_module		= THIS_MODULE,
1517 		.cra_init		= omap_sham_cra_md5_init,
1518 		.cra_exit		= omap_sham_cra_exit,
1519 	}
1520 }
1521 };
1522 
1523 /* OMAP4 has some algs in addition to what OMAP2 has */
1524 static struct ahash_alg algs_sha224_sha256[] = {
1525 {
1526 	.init		= omap_sham_init,
1527 	.update		= omap_sham_update,
1528 	.final		= omap_sham_final,
1529 	.finup		= omap_sham_finup,
1530 	.digest		= omap_sham_digest,
1531 	.halg.digestsize	= SHA224_DIGEST_SIZE,
1532 	.halg.base	= {
1533 		.cra_name		= "sha224",
1534 		.cra_driver_name	= "omap-sha224",
1535 		.cra_priority		= 400,
1536 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1537 						CRYPTO_ALG_ASYNC |
1538 						CRYPTO_ALG_NEED_FALLBACK,
1539 		.cra_blocksize		= SHA224_BLOCK_SIZE,
1540 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1541 		.cra_alignmask		= OMAP_ALIGN_MASK,
1542 		.cra_module		= THIS_MODULE,
1543 		.cra_init		= omap_sham_cra_init,
1544 		.cra_exit		= omap_sham_cra_exit,
1545 	}
1546 },
1547 {
1548 	.init		= omap_sham_init,
1549 	.update		= omap_sham_update,
1550 	.final		= omap_sham_final,
1551 	.finup		= omap_sham_finup,
1552 	.digest		= omap_sham_digest,
1553 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1554 	.halg.base	= {
1555 		.cra_name		= "sha256",
1556 		.cra_driver_name	= "omap-sha256",
1557 		.cra_priority		= 400,
1558 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1559 						CRYPTO_ALG_ASYNC |
1560 						CRYPTO_ALG_NEED_FALLBACK,
1561 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1562 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1563 		.cra_alignmask		= OMAP_ALIGN_MASK,
1564 		.cra_module		= THIS_MODULE,
1565 		.cra_init		= omap_sham_cra_init,
1566 		.cra_exit		= omap_sham_cra_exit,
1567 	}
1568 },
1569 {
1570 	.init		= omap_sham_init,
1571 	.update		= omap_sham_update,
1572 	.final		= omap_sham_final,
1573 	.finup		= omap_sham_finup,
1574 	.digest		= omap_sham_digest,
1575 	.setkey		= omap_sham_setkey,
1576 	.halg.digestsize	= SHA224_DIGEST_SIZE,
1577 	.halg.base	= {
1578 		.cra_name		= "hmac(sha224)",
1579 		.cra_driver_name	= "omap-hmac-sha224",
1580 		.cra_priority		= 400,
1581 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1582 						CRYPTO_ALG_ASYNC |
1583 						CRYPTO_ALG_NEED_FALLBACK,
1584 		.cra_blocksize		= SHA224_BLOCK_SIZE,
1585 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1586 					sizeof(struct omap_sham_hmac_ctx),
1587 		.cra_alignmask		= OMAP_ALIGN_MASK,
1588 		.cra_module		= THIS_MODULE,
1589 		.cra_init		= omap_sham_cra_sha224_init,
1590 		.cra_exit		= omap_sham_cra_exit,
1591 	}
1592 },
1593 {
1594 	.init		= omap_sham_init,
1595 	.update		= omap_sham_update,
1596 	.final		= omap_sham_final,
1597 	.finup		= omap_sham_finup,
1598 	.digest		= omap_sham_digest,
1599 	.setkey		= omap_sham_setkey,
1600 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1601 	.halg.base	= {
1602 		.cra_name		= "hmac(sha256)",
1603 		.cra_driver_name	= "omap-hmac-sha256",
1604 		.cra_priority		= 400,
1605 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1606 						CRYPTO_ALG_ASYNC |
1607 						CRYPTO_ALG_NEED_FALLBACK,
1608 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1609 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1610 					sizeof(struct omap_sham_hmac_ctx),
1611 		.cra_alignmask		= OMAP_ALIGN_MASK,
1612 		.cra_module		= THIS_MODULE,
1613 		.cra_init		= omap_sham_cra_sha256_init,
1614 		.cra_exit		= omap_sham_cra_exit,
1615 	}
1616 },
1617 };
1618 
1619 static struct ahash_alg algs_sha384_sha512[] = {
1620 {
1621 	.init		= omap_sham_init,
1622 	.update		= omap_sham_update,
1623 	.final		= omap_sham_final,
1624 	.finup		= omap_sham_finup,
1625 	.digest		= omap_sham_digest,
1626 	.halg.digestsize	= SHA384_DIGEST_SIZE,
1627 	.halg.base	= {
1628 		.cra_name		= "sha384",
1629 		.cra_driver_name	= "omap-sha384",
1630 		.cra_priority		= 400,
1631 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1632 						CRYPTO_ALG_ASYNC |
1633 						CRYPTO_ALG_NEED_FALLBACK,
1634 		.cra_blocksize		= SHA384_BLOCK_SIZE,
1635 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1636 		.cra_alignmask		= OMAP_ALIGN_MASK,
1637 		.cra_module		= THIS_MODULE,
1638 		.cra_init		= omap_sham_cra_init,
1639 		.cra_exit		= omap_sham_cra_exit,
1640 	}
1641 },
1642 {
1643 	.init		= omap_sham_init,
1644 	.update		= omap_sham_update,
1645 	.final		= omap_sham_final,
1646 	.finup		= omap_sham_finup,
1647 	.digest		= omap_sham_digest,
1648 	.halg.digestsize	= SHA512_DIGEST_SIZE,
1649 	.halg.base	= {
1650 		.cra_name		= "sha512",
1651 		.cra_driver_name	= "omap-sha512",
1652 		.cra_priority		= 400,
1653 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1654 						CRYPTO_ALG_ASYNC |
1655 						CRYPTO_ALG_NEED_FALLBACK,
1656 		.cra_blocksize		= SHA512_BLOCK_SIZE,
1657 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1658 		.cra_alignmask		= OMAP_ALIGN_MASK,
1659 		.cra_module		= THIS_MODULE,
1660 		.cra_init		= omap_sham_cra_init,
1661 		.cra_exit		= omap_sham_cra_exit,
1662 	}
1663 },
1664 {
1665 	.init		= omap_sham_init,
1666 	.update		= omap_sham_update,
1667 	.final		= omap_sham_final,
1668 	.finup		= omap_sham_finup,
1669 	.digest		= omap_sham_digest,
1670 	.setkey		= omap_sham_setkey,
1671 	.halg.digestsize	= SHA384_DIGEST_SIZE,
1672 	.halg.base	= {
1673 		.cra_name		= "hmac(sha384)",
1674 		.cra_driver_name	= "omap-hmac-sha384",
1675 		.cra_priority		= 400,
1676 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1677 						CRYPTO_ALG_ASYNC |
1678 						CRYPTO_ALG_NEED_FALLBACK,
1679 		.cra_blocksize		= SHA384_BLOCK_SIZE,
1680 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1681 					sizeof(struct omap_sham_hmac_ctx),
1682 		.cra_alignmask		= OMAP_ALIGN_MASK,
1683 		.cra_module		= THIS_MODULE,
1684 		.cra_init		= omap_sham_cra_sha384_init,
1685 		.cra_exit		= omap_sham_cra_exit,
1686 	}
1687 },
1688 {
1689 	.init		= omap_sham_init,
1690 	.update		= omap_sham_update,
1691 	.final		= omap_sham_final,
1692 	.finup		= omap_sham_finup,
1693 	.digest		= omap_sham_digest,
1694 	.setkey		= omap_sham_setkey,
1695 	.halg.digestsize	= SHA512_DIGEST_SIZE,
1696 	.halg.base	= {
1697 		.cra_name		= "hmac(sha512)",
1698 		.cra_driver_name	= "omap-hmac-sha512",
1699 		.cra_priority		= 400,
1700 		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1701 						CRYPTO_ALG_ASYNC |
1702 						CRYPTO_ALG_NEED_FALLBACK,
1703 		.cra_blocksize		= SHA512_BLOCK_SIZE,
1704 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1705 					sizeof(struct omap_sham_hmac_ctx),
1706 		.cra_alignmask		= OMAP_ALIGN_MASK,
1707 		.cra_module		= THIS_MODULE,
1708 		.cra_init		= omap_sham_cra_sha512_init,
1709 		.cra_exit		= omap_sham_cra_exit,
1710 	}
1711 },
1712 };
1713 
1714 static void omap_sham_done_task(unsigned long data)
1715 {
1716 	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1717 	int err = 0;
1718 
1719 	dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
1720 
1721 	if (test_bit(FLAGS_CPU, &dd->flags)) {
1722 		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1723 			goto finish;
1724 	} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1725 		if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1726 			omap_sham_update_dma_stop(dd);
1727 			if (dd->err) {
1728 				err = dd->err;
1729 				goto finish;
1730 			}
1731 		}
1732 		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1733 			/* hash or semi-hash ready */
1734 			clear_bit(FLAGS_DMA_READY, &dd->flags);
1735 			goto finish;
1736 		}
1737 	}
1738 
1739 	return;
1740 
1741 finish:
1742 	dev_dbg(dd->dev, "update done: err: %d\n", err);
1743 	/* finish curent request */
1744 	omap_sham_finish_req(dd->req, err);
1745 }
1746 
1747 static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1748 {
1749 	set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1750 	tasklet_schedule(&dd->done_task);
1751 
1752 	return IRQ_HANDLED;
1753 }
1754 
1755 static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1756 {
1757 	struct omap_sham_dev *dd = dev_id;
1758 
1759 	if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1760 		/* final -> allow device to go to power-saving mode */
1761 		omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1762 
1763 	omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1764 				 SHA_REG_CTRL_OUTPUT_READY);
1765 	omap_sham_read(dd, SHA_REG_CTRL);
1766 
1767 	return omap_sham_irq_common(dd);
1768 }
1769 
1770 static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1771 {
1772 	struct omap_sham_dev *dd = dev_id;
1773 
1774 	omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1775 
1776 	return omap_sham_irq_common(dd);
1777 }
1778 
1779 static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1780 	{
1781 		.algs_list	= algs_sha1_md5,
1782 		.size		= ARRAY_SIZE(algs_sha1_md5),
1783 	},
1784 };
1785 
1786 static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1787 	.algs_info	= omap_sham_algs_info_omap2,
1788 	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap2),
1789 	.flags		= BIT(FLAGS_BE32_SHA1),
1790 	.digest_size	= SHA1_DIGEST_SIZE,
1791 	.copy_hash	= omap_sham_copy_hash_omap2,
1792 	.write_ctrl	= omap_sham_write_ctrl_omap2,
1793 	.trigger	= omap_sham_trigger_omap2,
1794 	.poll_irq	= omap_sham_poll_irq_omap2,
1795 	.intr_hdlr	= omap_sham_irq_omap2,
1796 	.idigest_ofs	= 0x00,
1797 	.din_ofs	= 0x1c,
1798 	.digcnt_ofs	= 0x14,
1799 	.rev_ofs	= 0x5c,
1800 	.mask_ofs	= 0x60,
1801 	.sysstatus_ofs	= 0x64,
1802 	.major_mask	= 0xf0,
1803 	.major_shift	= 4,
1804 	.minor_mask	= 0x0f,
1805 	.minor_shift	= 0,
1806 };
1807 
1808 #ifdef CONFIG_OF
1809 static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1810 	{
1811 		.algs_list	= algs_sha1_md5,
1812 		.size		= ARRAY_SIZE(algs_sha1_md5),
1813 	},
1814 	{
1815 		.algs_list	= algs_sha224_sha256,
1816 		.size		= ARRAY_SIZE(algs_sha224_sha256),
1817 	},
1818 };
1819 
1820 static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1821 	.algs_info	= omap_sham_algs_info_omap4,
1822 	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap4),
1823 	.flags		= BIT(FLAGS_AUTO_XOR),
1824 	.digest_size	= SHA256_DIGEST_SIZE,
1825 	.copy_hash	= omap_sham_copy_hash_omap4,
1826 	.write_ctrl	= omap_sham_write_ctrl_omap4,
1827 	.trigger	= omap_sham_trigger_omap4,
1828 	.poll_irq	= omap_sham_poll_irq_omap4,
1829 	.intr_hdlr	= omap_sham_irq_omap4,
1830 	.idigest_ofs	= 0x020,
1831 	.odigest_ofs	= 0x0,
1832 	.din_ofs	= 0x080,
1833 	.digcnt_ofs	= 0x040,
1834 	.rev_ofs	= 0x100,
1835 	.mask_ofs	= 0x110,
1836 	.sysstatus_ofs	= 0x114,
1837 	.mode_ofs	= 0x44,
1838 	.length_ofs	= 0x48,
1839 	.major_mask	= 0x0700,
1840 	.major_shift	= 8,
1841 	.minor_mask	= 0x003f,
1842 	.minor_shift	= 0,
1843 };
1844 
1845 static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1846 	{
1847 		.algs_list	= algs_sha1_md5,
1848 		.size		= ARRAY_SIZE(algs_sha1_md5),
1849 	},
1850 	{
1851 		.algs_list	= algs_sha224_sha256,
1852 		.size		= ARRAY_SIZE(algs_sha224_sha256),
1853 	},
1854 	{
1855 		.algs_list	= algs_sha384_sha512,
1856 		.size		= ARRAY_SIZE(algs_sha384_sha512),
1857 	},
1858 };
1859 
1860 static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1861 	.algs_info	= omap_sham_algs_info_omap5,
1862 	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap5),
1863 	.flags		= BIT(FLAGS_AUTO_XOR),
1864 	.digest_size	= SHA512_DIGEST_SIZE,
1865 	.copy_hash	= omap_sham_copy_hash_omap4,
1866 	.write_ctrl	= omap_sham_write_ctrl_omap4,
1867 	.trigger	= omap_sham_trigger_omap4,
1868 	.poll_irq	= omap_sham_poll_irq_omap4,
1869 	.intr_hdlr	= omap_sham_irq_omap4,
1870 	.idigest_ofs	= 0x240,
1871 	.odigest_ofs	= 0x200,
1872 	.din_ofs	= 0x080,
1873 	.digcnt_ofs	= 0x280,
1874 	.rev_ofs	= 0x100,
1875 	.mask_ofs	= 0x110,
1876 	.sysstatus_ofs	= 0x114,
1877 	.mode_ofs	= 0x284,
1878 	.length_ofs	= 0x288,
1879 	.major_mask	= 0x0700,
1880 	.major_shift	= 8,
1881 	.minor_mask	= 0x003f,
1882 	.minor_shift	= 0,
1883 };
1884 
1885 static const struct of_device_id omap_sham_of_match[] = {
1886 	{
1887 		.compatible	= "ti,omap2-sham",
1888 		.data		= &omap_sham_pdata_omap2,
1889 	},
1890 	{
1891 		.compatible	= "ti,omap3-sham",
1892 		.data		= &omap_sham_pdata_omap2,
1893 	},
1894 	{
1895 		.compatible	= "ti,omap4-sham",
1896 		.data		= &omap_sham_pdata_omap4,
1897 	},
1898 	{
1899 		.compatible	= "ti,omap5-sham",
1900 		.data		= &omap_sham_pdata_omap5,
1901 	},
1902 	{},
1903 };
1904 MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1905 
1906 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1907 		struct device *dev, struct resource *res)
1908 {
1909 	struct device_node *node = dev->of_node;
1910 	int err = 0;
1911 
1912 	dd->pdata = of_device_get_match_data(dev);
1913 	if (!dd->pdata) {
1914 		dev_err(dev, "no compatible OF match\n");
1915 		err = -EINVAL;
1916 		goto err;
1917 	}
1918 
1919 	err = of_address_to_resource(node, 0, res);
1920 	if (err < 0) {
1921 		dev_err(dev, "can't translate OF node address\n");
1922 		err = -EINVAL;
1923 		goto err;
1924 	}
1925 
1926 	dd->irq = irq_of_parse_and_map(node, 0);
1927 	if (!dd->irq) {
1928 		dev_err(dev, "can't translate OF irq value\n");
1929 		err = -EINVAL;
1930 		goto err;
1931 	}
1932 
1933 err:
1934 	return err;
1935 }
1936 #else
1937 static const struct of_device_id omap_sham_of_match[] = {
1938 	{},
1939 };
1940 
1941 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1942 		struct device *dev, struct resource *res)
1943 {
1944 	return -EINVAL;
1945 }
1946 #endif
1947 
1948 static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1949 		struct platform_device *pdev, struct resource *res)
1950 {
1951 	struct device *dev = &pdev->dev;
1952 	struct resource *r;
1953 	int err = 0;
1954 
1955 	/* Get the base address */
1956 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1957 	if (!r) {
1958 		dev_err(dev, "no MEM resource info\n");
1959 		err = -ENODEV;
1960 		goto err;
1961 	}
1962 	memcpy(res, r, sizeof(*res));
1963 
1964 	/* Get the IRQ */
1965 	dd->irq = platform_get_irq(pdev, 0);
1966 	if (dd->irq < 0) {
1967 		err = dd->irq;
1968 		goto err;
1969 	}
1970 
1971 	/* Only OMAP2/3 can be non-DT */
1972 	dd->pdata = &omap_sham_pdata_omap2;
1973 
1974 err:
1975 	return err;
1976 }
1977 
1978 static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
1979 			     char *buf)
1980 {
1981 	struct omap_sham_dev *dd = dev_get_drvdata(dev);
1982 
1983 	return sprintf(buf, "%d\n", dd->fallback_sz);
1984 }
1985 
1986 static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
1987 			      const char *buf, size_t size)
1988 {
1989 	struct omap_sham_dev *dd = dev_get_drvdata(dev);
1990 	ssize_t status;
1991 	long value;
1992 
1993 	status = kstrtol(buf, 0, &value);
1994 	if (status)
1995 		return status;
1996 
1997 	/* HW accelerator only works with buffers > 9 */
1998 	if (value < 9) {
1999 		dev_err(dev, "minimum fallback size 9\n");
2000 		return -EINVAL;
2001 	}
2002 
2003 	dd->fallback_sz = value;
2004 
2005 	return size;
2006 }
2007 
2008 static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
2009 			      char *buf)
2010 {
2011 	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2012 
2013 	return sprintf(buf, "%d\n", dd->queue.max_qlen);
2014 }
2015 
2016 static ssize_t queue_len_store(struct device *dev,
2017 			       struct device_attribute *attr, const char *buf,
2018 			       size_t size)
2019 {
2020 	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2021 	ssize_t status;
2022 	long value;
2023 
2024 	status = kstrtol(buf, 0, &value);
2025 	if (status)
2026 		return status;
2027 
2028 	if (value < 1)
2029 		return -EINVAL;
2030 
2031 	/*
2032 	 * Changing the queue size in fly is safe, if size becomes smaller
2033 	 * than current size, it will just not accept new entries until
2034 	 * it has shrank enough.
2035 	 */
2036 	dd->queue.max_qlen = value;
2037 
2038 	return size;
2039 }
2040 
2041 static DEVICE_ATTR_RW(queue_len);
2042 static DEVICE_ATTR_RW(fallback);
2043 
2044 static struct attribute *omap_sham_attrs[] = {
2045 	&dev_attr_queue_len.attr,
2046 	&dev_attr_fallback.attr,
2047 	NULL,
2048 };
2049 
2050 static const struct attribute_group omap_sham_attr_group = {
2051 	.attrs = omap_sham_attrs,
2052 };
2053 
2054 static int omap_sham_probe(struct platform_device *pdev)
2055 {
2056 	struct omap_sham_dev *dd;
2057 	struct device *dev = &pdev->dev;
2058 	struct resource res;
2059 	dma_cap_mask_t mask;
2060 	int err, i, j;
2061 	u32 rev;
2062 
2063 	dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
2064 	if (dd == NULL) {
2065 		dev_err(dev, "unable to alloc data struct.\n");
2066 		err = -ENOMEM;
2067 		goto data_err;
2068 	}
2069 	dd->dev = dev;
2070 	platform_set_drvdata(pdev, dd);
2071 
2072 	INIT_LIST_HEAD(&dd->list);
2073 	tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
2074 	crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2075 
2076 	err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2077 			       omap_sham_get_res_pdev(dd, pdev, &res);
2078 	if (err)
2079 		goto data_err;
2080 
2081 	dd->io_base = devm_ioremap_resource(dev, &res);
2082 	if (IS_ERR(dd->io_base)) {
2083 		err = PTR_ERR(dd->io_base);
2084 		goto data_err;
2085 	}
2086 	dd->phys_base = res.start;
2087 
2088 	err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2089 			       IRQF_TRIGGER_NONE, dev_name(dev), dd);
2090 	if (err) {
2091 		dev_err(dev, "unable to request irq %d, err = %d\n",
2092 			dd->irq, err);
2093 		goto data_err;
2094 	}
2095 
2096 	dma_cap_zero(mask);
2097 	dma_cap_set(DMA_SLAVE, mask);
2098 
2099 	dd->dma_lch = dma_request_chan(dev, "rx");
2100 	if (IS_ERR(dd->dma_lch)) {
2101 		err = PTR_ERR(dd->dma_lch);
2102 		if (err == -EPROBE_DEFER)
2103 			goto data_err;
2104 
2105 		dd->polling_mode = 1;
2106 		dev_dbg(dev, "using polling mode instead of dma\n");
2107 	}
2108 
2109 	dd->flags |= dd->pdata->flags;
2110 	sham.flags |= dd->pdata->flags;
2111 
2112 	pm_runtime_use_autosuspend(dev);
2113 	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2114 
2115 	dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
2116 
2117 	pm_runtime_enable(dev);
2118 
2119 	err = pm_runtime_resume_and_get(dev);
2120 	if (err < 0) {
2121 		dev_err(dev, "failed to get sync: %d\n", err);
2122 		goto err_pm;
2123 	}
2124 
2125 	rev = omap_sham_read(dd, SHA_REG_REV(dd));
2126 	pm_runtime_put_sync(&pdev->dev);
2127 
2128 	dev_info(dev, "hw accel on OMAP rev %u.%u\n",
2129 		(rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2130 		(rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2131 
2132 	spin_lock_bh(&sham.lock);
2133 	list_add_tail(&dd->list, &sham.dev_list);
2134 	spin_unlock_bh(&sham.lock);
2135 
2136 	dd->engine = crypto_engine_alloc_init(dev, 1);
2137 	if (!dd->engine) {
2138 		err = -ENOMEM;
2139 		goto err_engine;
2140 	}
2141 
2142 	err = crypto_engine_start(dd->engine);
2143 	if (err)
2144 		goto err_engine_start;
2145 
2146 	for (i = 0; i < dd->pdata->algs_info_size; i++) {
2147 		if (dd->pdata->algs_info[i].registered)
2148 			break;
2149 
2150 		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
2151 			struct ahash_alg *alg;
2152 
2153 			alg = &dd->pdata->algs_info[i].algs_list[j];
2154 			alg->export = omap_sham_export;
2155 			alg->import = omap_sham_import;
2156 			alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
2157 					      BUFLEN;
2158 			err = crypto_register_ahash(alg);
2159 			if (err)
2160 				goto err_algs;
2161 
2162 			dd->pdata->algs_info[i].registered++;
2163 		}
2164 	}
2165 
2166 	err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
2167 	if (err) {
2168 		dev_err(dev, "could not create sysfs device attrs\n");
2169 		goto err_algs;
2170 	}
2171 
2172 	return 0;
2173 
2174 err_algs:
2175 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2176 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2177 			crypto_unregister_ahash(
2178 					&dd->pdata->algs_info[i].algs_list[j]);
2179 err_engine_start:
2180 	crypto_engine_exit(dd->engine);
2181 err_engine:
2182 	spin_lock_bh(&sham.lock);
2183 	list_del(&dd->list);
2184 	spin_unlock_bh(&sham.lock);
2185 err_pm:
2186 	pm_runtime_dont_use_autosuspend(dev);
2187 	pm_runtime_disable(dev);
2188 	if (!dd->polling_mode)
2189 		dma_release_channel(dd->dma_lch);
2190 data_err:
2191 	dev_err(dev, "initialization failed.\n");
2192 
2193 	return err;
2194 }
2195 
2196 static int omap_sham_remove(struct platform_device *pdev)
2197 {
2198 	struct omap_sham_dev *dd;
2199 	int i, j;
2200 
2201 	dd = platform_get_drvdata(pdev);
2202 
2203 	spin_lock_bh(&sham.lock);
2204 	list_del(&dd->list);
2205 	spin_unlock_bh(&sham.lock);
2206 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2207 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
2208 			crypto_unregister_ahash(
2209 					&dd->pdata->algs_info[i].algs_list[j]);
2210 			dd->pdata->algs_info[i].registered--;
2211 		}
2212 	tasklet_kill(&dd->done_task);
2213 	pm_runtime_dont_use_autosuspend(&pdev->dev);
2214 	pm_runtime_disable(&pdev->dev);
2215 
2216 	if (!dd->polling_mode)
2217 		dma_release_channel(dd->dma_lch);
2218 
2219 	sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
2220 
2221 	return 0;
2222 }
2223 
2224 static struct platform_driver omap_sham_driver = {
2225 	.probe	= omap_sham_probe,
2226 	.remove	= omap_sham_remove,
2227 	.driver	= {
2228 		.name	= "omap-sham",
2229 		.of_match_table	= omap_sham_of_match,
2230 	},
2231 };
2232 
2233 module_platform_driver(omap_sham_driver);
2234 
2235 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2236 MODULE_LICENSE("GPL v2");
2237 MODULE_AUTHOR("Dmitry Kasatkin");
2238 MODULE_ALIAS("platform:omap-sham");
2239