xref: /linux/drivers/crypto/omap-aes.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Cryptographic API.
3  *
4  * Support for OMAP AES HW acceleration.
5  *
6  * Copyright (c) 2010 Nokia Corporation
7  * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8  * Copyright (c) 2011 Texas Instruments Incorporated
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as published
12  * by the Free Software Foundation.
13  *
14  */
15 
16 #define pr_fmt(fmt) "%20s: " fmt, __func__
17 #define prn(num) pr_debug(#num "=%d\n", num)
18 #define prx(num) pr_debug(#num "=%x\n", num)
19 
20 #include <linux/err.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/kernel.h>
25 #include <linux/platform_device.h>
26 #include <linux/scatterlist.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dmaengine.h>
29 #include <linux/omap-dma.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/of.h>
32 #include <linux/of_device.h>
33 #include <linux/of_address.h>
34 #include <linux/io.h>
35 #include <linux/crypto.h>
36 #include <linux/interrupt.h>
37 #include <crypto/scatterwalk.h>
38 #include <crypto/aes.h>
39 
40 #define DST_MAXBURST			4
41 #define DMA_MIN				(DST_MAXBURST * sizeof(u32))
42 
43 #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
44 
45 /* OMAP TRM gives bitfields as start:end, where start is the higher bit
46    number. For example 7:0 */
47 #define FLD_MASK(start, end)	(((1 << ((start) - (end) + 1)) - 1) << (end))
48 #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
49 
50 #define AES_REG_KEY(dd, x)		((dd)->pdata->key_ofs - \
51 						((x ^ 0x01) * 0x04))
52 #define AES_REG_IV(dd, x)		((dd)->pdata->iv_ofs + ((x) * 0x04))
53 
54 #define AES_REG_CTRL(dd)		((dd)->pdata->ctrl_ofs)
55 #define AES_REG_CTRL_CTR_WIDTH_MASK	GENMASK(8, 7)
56 #define AES_REG_CTRL_CTR_WIDTH_32	0
57 #define AES_REG_CTRL_CTR_WIDTH_64	BIT(7)
58 #define AES_REG_CTRL_CTR_WIDTH_96	BIT(8)
59 #define AES_REG_CTRL_CTR_WIDTH_128	GENMASK(8, 7)
60 #define AES_REG_CTRL_CTR		BIT(6)
61 #define AES_REG_CTRL_CBC		BIT(5)
62 #define AES_REG_CTRL_KEY_SIZE		GENMASK(4, 3)
63 #define AES_REG_CTRL_DIRECTION		BIT(2)
64 #define AES_REG_CTRL_INPUT_READY	BIT(1)
65 #define AES_REG_CTRL_OUTPUT_READY	BIT(0)
66 #define AES_REG_CTRL_MASK		GENMASK(24, 2)
67 
68 #define AES_REG_DATA_N(dd, x)		((dd)->pdata->data_ofs + ((x) * 0x04))
69 
70 #define AES_REG_REV(dd)			((dd)->pdata->rev_ofs)
71 
72 #define AES_REG_MASK(dd)		((dd)->pdata->mask_ofs)
73 #define AES_REG_MASK_SIDLE		BIT(6)
74 #define AES_REG_MASK_START		BIT(5)
75 #define AES_REG_MASK_DMA_OUT_EN		BIT(3)
76 #define AES_REG_MASK_DMA_IN_EN		BIT(2)
77 #define AES_REG_MASK_SOFTRESET		BIT(1)
78 #define AES_REG_AUTOIDLE		BIT(0)
79 
80 #define AES_REG_LENGTH_N(x)		(0x54 + ((x) * 0x04))
81 
82 #define AES_REG_IRQ_STATUS(dd)         ((dd)->pdata->irq_status_ofs)
83 #define AES_REG_IRQ_ENABLE(dd)         ((dd)->pdata->irq_enable_ofs)
84 #define AES_REG_IRQ_DATA_IN            BIT(1)
85 #define AES_REG_IRQ_DATA_OUT           BIT(2)
86 #define DEFAULT_TIMEOUT		(5*HZ)
87 
88 #define FLAGS_MODE_MASK		0x000f
89 #define FLAGS_ENCRYPT		BIT(0)
90 #define FLAGS_CBC		BIT(1)
91 #define FLAGS_GIV		BIT(2)
92 #define FLAGS_CTR		BIT(3)
93 
94 #define FLAGS_INIT		BIT(4)
95 #define FLAGS_FAST		BIT(5)
96 #define FLAGS_BUSY		BIT(6)
97 
98 #define AES_BLOCK_WORDS		(AES_BLOCK_SIZE >> 2)
99 
100 struct omap_aes_ctx {
101 	struct omap_aes_dev *dd;
102 
103 	int		keylen;
104 	u32		key[AES_KEYSIZE_256 / sizeof(u32)];
105 	unsigned long	flags;
106 };
107 
108 struct omap_aes_reqctx {
109 	unsigned long mode;
110 };
111 
112 #define OMAP_AES_QUEUE_LENGTH	1
113 #define OMAP_AES_CACHE_SIZE	0
114 
115 struct omap_aes_algs_info {
116 	struct crypto_alg	*algs_list;
117 	unsigned int		size;
118 	unsigned int		registered;
119 };
120 
121 struct omap_aes_pdata {
122 	struct omap_aes_algs_info	*algs_info;
123 	unsigned int	algs_info_size;
124 
125 	void		(*trigger)(struct omap_aes_dev *dd, int length);
126 
127 	u32		key_ofs;
128 	u32		iv_ofs;
129 	u32		ctrl_ofs;
130 	u32		data_ofs;
131 	u32		rev_ofs;
132 	u32		mask_ofs;
133 	u32             irq_enable_ofs;
134 	u32             irq_status_ofs;
135 
136 	u32		dma_enable_in;
137 	u32		dma_enable_out;
138 	u32		dma_start;
139 
140 	u32		major_mask;
141 	u32		major_shift;
142 	u32		minor_mask;
143 	u32		minor_shift;
144 };
145 
146 struct omap_aes_dev {
147 	struct list_head	list;
148 	unsigned long		phys_base;
149 	void __iomem		*io_base;
150 	struct omap_aes_ctx	*ctx;
151 	struct device		*dev;
152 	unsigned long		flags;
153 	int			err;
154 
155 	spinlock_t		lock;
156 	struct crypto_queue	queue;
157 
158 	struct tasklet_struct	done_task;
159 	struct tasklet_struct	queue_task;
160 
161 	struct ablkcipher_request	*req;
162 
163 	/*
164 	 * total is used by PIO mode for book keeping so introduce
165 	 * variable total_save as need it to calc page_order
166 	 */
167 	size_t				total;
168 	size_t				total_save;
169 
170 	struct scatterlist		*in_sg;
171 	struct scatterlist		*out_sg;
172 
173 	/* Buffers for copying for unaligned cases */
174 	struct scatterlist		in_sgl;
175 	struct scatterlist		out_sgl;
176 	struct scatterlist		*orig_out;
177 	int				sgs_copied;
178 
179 	struct scatter_walk		in_walk;
180 	struct scatter_walk		out_walk;
181 	int			dma_in;
182 	struct dma_chan		*dma_lch_in;
183 	int			dma_out;
184 	struct dma_chan		*dma_lch_out;
185 	int			in_sg_len;
186 	int			out_sg_len;
187 	int			pio_only;
188 	const struct omap_aes_pdata	*pdata;
189 };
190 
191 /* keep registered devices data here */
192 static LIST_HEAD(dev_list);
193 static DEFINE_SPINLOCK(list_lock);
194 
195 #ifdef DEBUG
196 #define omap_aes_read(dd, offset)				\
197 ({								\
198 	int _read_ret;						\
199 	_read_ret = __raw_readl(dd->io_base + offset);		\
200 	pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n",	\
201 		 offset, _read_ret);				\
202 	_read_ret;						\
203 })
204 #else
205 static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
206 {
207 	return __raw_readl(dd->io_base + offset);
208 }
209 #endif
210 
211 #ifdef DEBUG
212 #define omap_aes_write(dd, offset, value)				\
213 	do {								\
214 		pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n",	\
215 			 offset, value);				\
216 		__raw_writel(value, dd->io_base + offset);		\
217 	} while (0)
218 #else
219 static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
220 				  u32 value)
221 {
222 	__raw_writel(value, dd->io_base + offset);
223 }
224 #endif
225 
226 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
227 					u32 value, u32 mask)
228 {
229 	u32 val;
230 
231 	val = omap_aes_read(dd, offset);
232 	val &= ~mask;
233 	val |= value;
234 	omap_aes_write(dd, offset, val);
235 }
236 
237 static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
238 					u32 *value, int count)
239 {
240 	for (; count--; value++, offset += 4)
241 		omap_aes_write(dd, offset, *value);
242 }
243 
244 static int omap_aes_hw_init(struct omap_aes_dev *dd)
245 {
246 	if (!(dd->flags & FLAGS_INIT)) {
247 		dd->flags |= FLAGS_INIT;
248 		dd->err = 0;
249 	}
250 
251 	return 0;
252 }
253 
254 static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
255 {
256 	unsigned int key32;
257 	int i, err;
258 	u32 val;
259 
260 	err = omap_aes_hw_init(dd);
261 	if (err)
262 		return err;
263 
264 	key32 = dd->ctx->keylen / sizeof(u32);
265 
266 	/* it seems a key should always be set even if it has not changed */
267 	for (i = 0; i < key32; i++) {
268 		omap_aes_write(dd, AES_REG_KEY(dd, i),
269 			__le32_to_cpu(dd->ctx->key[i]));
270 	}
271 
272 	if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
273 		omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
274 
275 	val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
276 	if (dd->flags & FLAGS_CBC)
277 		val |= AES_REG_CTRL_CBC;
278 	if (dd->flags & FLAGS_CTR)
279 		val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
280 
281 	if (dd->flags & FLAGS_ENCRYPT)
282 		val |= AES_REG_CTRL_DIRECTION;
283 
284 	omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
285 
286 	return 0;
287 }
288 
289 static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
290 {
291 	u32 mask, val;
292 
293 	val = dd->pdata->dma_start;
294 
295 	if (dd->dma_lch_out != NULL)
296 		val |= dd->pdata->dma_enable_out;
297 	if (dd->dma_lch_in != NULL)
298 		val |= dd->pdata->dma_enable_in;
299 
300 	mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
301 	       dd->pdata->dma_start;
302 
303 	omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
304 
305 }
306 
307 static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
308 {
309 	omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
310 	omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
311 
312 	omap_aes_dma_trigger_omap2(dd, length);
313 }
314 
315 static void omap_aes_dma_stop(struct omap_aes_dev *dd)
316 {
317 	u32 mask;
318 
319 	mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
320 	       dd->pdata->dma_start;
321 
322 	omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
323 }
324 
325 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
326 {
327 	struct omap_aes_dev *dd = NULL, *tmp;
328 
329 	spin_lock_bh(&list_lock);
330 	if (!ctx->dd) {
331 		list_for_each_entry(tmp, &dev_list, list) {
332 			/* FIXME: take fist available aes core */
333 			dd = tmp;
334 			break;
335 		}
336 		ctx->dd = dd;
337 	} else {
338 		/* already found before */
339 		dd = ctx->dd;
340 	}
341 	spin_unlock_bh(&list_lock);
342 
343 	return dd;
344 }
345 
346 static void omap_aes_dma_out_callback(void *data)
347 {
348 	struct omap_aes_dev *dd = data;
349 
350 	/* dma_lch_out - completed */
351 	tasklet_schedule(&dd->done_task);
352 }
353 
354 static int omap_aes_dma_init(struct omap_aes_dev *dd)
355 {
356 	int err = -ENOMEM;
357 	dma_cap_mask_t mask;
358 
359 	dd->dma_lch_out = NULL;
360 	dd->dma_lch_in = NULL;
361 
362 	dma_cap_zero(mask);
363 	dma_cap_set(DMA_SLAVE, mask);
364 
365 	dd->dma_lch_in = dma_request_slave_channel_compat(mask,
366 							  omap_dma_filter_fn,
367 							  &dd->dma_in,
368 							  dd->dev, "rx");
369 	if (!dd->dma_lch_in) {
370 		dev_err(dd->dev, "Unable to request in DMA channel\n");
371 		goto err_dma_in;
372 	}
373 
374 	dd->dma_lch_out = dma_request_slave_channel_compat(mask,
375 							   omap_dma_filter_fn,
376 							   &dd->dma_out,
377 							   dd->dev, "tx");
378 	if (!dd->dma_lch_out) {
379 		dev_err(dd->dev, "Unable to request out DMA channel\n");
380 		goto err_dma_out;
381 	}
382 
383 	return 0;
384 
385 err_dma_out:
386 	dma_release_channel(dd->dma_lch_in);
387 err_dma_in:
388 	if (err)
389 		pr_err("error: %d\n", err);
390 	return err;
391 }
392 
393 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
394 {
395 	dma_release_channel(dd->dma_lch_out);
396 	dma_release_channel(dd->dma_lch_in);
397 }
398 
399 static void sg_copy_buf(void *buf, struct scatterlist *sg,
400 			      unsigned int start, unsigned int nbytes, int out)
401 {
402 	struct scatter_walk walk;
403 
404 	if (!nbytes)
405 		return;
406 
407 	scatterwalk_start(&walk, sg);
408 	scatterwalk_advance(&walk, start);
409 	scatterwalk_copychunks(buf, &walk, nbytes, out);
410 	scatterwalk_done(&walk, out, 0);
411 }
412 
413 static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
414 		struct scatterlist *in_sg, struct scatterlist *out_sg,
415 		int in_sg_len, int out_sg_len)
416 {
417 	struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
418 	struct omap_aes_dev *dd = ctx->dd;
419 	struct dma_async_tx_descriptor *tx_in, *tx_out;
420 	struct dma_slave_config cfg;
421 	int ret;
422 
423 	if (dd->pio_only) {
424 		scatterwalk_start(&dd->in_walk, dd->in_sg);
425 		scatterwalk_start(&dd->out_walk, dd->out_sg);
426 
427 		/* Enable DATAIN interrupt and let it take
428 		   care of the rest */
429 		omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
430 		return 0;
431 	}
432 
433 	dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
434 
435 	memset(&cfg, 0, sizeof(cfg));
436 
437 	cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
438 	cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
439 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
440 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
441 	cfg.src_maxburst = DST_MAXBURST;
442 	cfg.dst_maxburst = DST_MAXBURST;
443 
444 	/* IN */
445 	ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
446 	if (ret) {
447 		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
448 			ret);
449 		return ret;
450 	}
451 
452 	tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
453 					DMA_MEM_TO_DEV,
454 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
455 	if (!tx_in) {
456 		dev_err(dd->dev, "IN prep_slave_sg() failed\n");
457 		return -EINVAL;
458 	}
459 
460 	/* No callback necessary */
461 	tx_in->callback_param = dd;
462 
463 	/* OUT */
464 	ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
465 	if (ret) {
466 		dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
467 			ret);
468 		return ret;
469 	}
470 
471 	tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
472 					DMA_DEV_TO_MEM,
473 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
474 	if (!tx_out) {
475 		dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
476 		return -EINVAL;
477 	}
478 
479 	tx_out->callback = omap_aes_dma_out_callback;
480 	tx_out->callback_param = dd;
481 
482 	dmaengine_submit(tx_in);
483 	dmaengine_submit(tx_out);
484 
485 	dma_async_issue_pending(dd->dma_lch_in);
486 	dma_async_issue_pending(dd->dma_lch_out);
487 
488 	/* start DMA */
489 	dd->pdata->trigger(dd, dd->total);
490 
491 	return 0;
492 }
493 
494 static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
495 {
496 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
497 					crypto_ablkcipher_reqtfm(dd->req));
498 	int err;
499 
500 	pr_debug("total: %d\n", dd->total);
501 
502 	if (!dd->pio_only) {
503 		err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
504 				 DMA_TO_DEVICE);
505 		if (!err) {
506 			dev_err(dd->dev, "dma_map_sg() error\n");
507 			return -EINVAL;
508 		}
509 
510 		err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
511 				 DMA_FROM_DEVICE);
512 		if (!err) {
513 			dev_err(dd->dev, "dma_map_sg() error\n");
514 			return -EINVAL;
515 		}
516 	}
517 
518 	err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
519 				 dd->out_sg_len);
520 	if (err && !dd->pio_only) {
521 		dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
522 		dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
523 			     DMA_FROM_DEVICE);
524 	}
525 
526 	return err;
527 }
528 
529 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
530 {
531 	struct ablkcipher_request *req = dd->req;
532 
533 	pr_debug("err: %d\n", err);
534 
535 	dd->flags &= ~FLAGS_BUSY;
536 
537 	req->base.complete(&req->base, err);
538 }
539 
540 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
541 {
542 	int err = 0;
543 
544 	pr_debug("total: %d\n", dd->total);
545 
546 	omap_aes_dma_stop(dd);
547 
548 	dmaengine_terminate_all(dd->dma_lch_in);
549 	dmaengine_terminate_all(dd->dma_lch_out);
550 
551 	return err;
552 }
553 
554 static int omap_aes_check_aligned(struct scatterlist *sg, int total)
555 {
556 	int len = 0;
557 
558 	if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
559 		return -EINVAL;
560 
561 	while (sg) {
562 		if (!IS_ALIGNED(sg->offset, 4))
563 			return -1;
564 		if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
565 			return -1;
566 
567 		len += sg->length;
568 		sg = sg_next(sg);
569 	}
570 
571 	if (len != total)
572 		return -1;
573 
574 	return 0;
575 }
576 
577 static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
578 {
579 	void *buf_in, *buf_out;
580 	int pages, total;
581 
582 	total = ALIGN(dd->total, AES_BLOCK_SIZE);
583 	pages = get_order(total);
584 
585 	buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
586 	buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
587 
588 	if (!buf_in || !buf_out) {
589 		pr_err("Couldn't allocated pages for unaligned cases.\n");
590 		return -1;
591 	}
592 
593 	dd->orig_out = dd->out_sg;
594 
595 	sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
596 
597 	sg_init_table(&dd->in_sgl, 1);
598 	sg_set_buf(&dd->in_sgl, buf_in, total);
599 	dd->in_sg = &dd->in_sgl;
600 
601 	sg_init_table(&dd->out_sgl, 1);
602 	sg_set_buf(&dd->out_sgl, buf_out, total);
603 	dd->out_sg = &dd->out_sgl;
604 
605 	return 0;
606 }
607 
608 static int omap_aes_handle_queue(struct omap_aes_dev *dd,
609 			       struct ablkcipher_request *req)
610 {
611 	struct crypto_async_request *async_req, *backlog;
612 	struct omap_aes_ctx *ctx;
613 	struct omap_aes_reqctx *rctx;
614 	unsigned long flags;
615 	int err, ret = 0, len;
616 
617 	spin_lock_irqsave(&dd->lock, flags);
618 	if (req)
619 		ret = ablkcipher_enqueue_request(&dd->queue, req);
620 	if (dd->flags & FLAGS_BUSY) {
621 		spin_unlock_irqrestore(&dd->lock, flags);
622 		return ret;
623 	}
624 	backlog = crypto_get_backlog(&dd->queue);
625 	async_req = crypto_dequeue_request(&dd->queue);
626 	if (async_req)
627 		dd->flags |= FLAGS_BUSY;
628 	spin_unlock_irqrestore(&dd->lock, flags);
629 
630 	if (!async_req)
631 		return ret;
632 
633 	if (backlog)
634 		backlog->complete(backlog, -EINPROGRESS);
635 
636 	req = ablkcipher_request_cast(async_req);
637 
638 	/* assign new request to device */
639 	dd->req = req;
640 	dd->total = req->nbytes;
641 	dd->total_save = req->nbytes;
642 	dd->in_sg = req->src;
643 	dd->out_sg = req->dst;
644 
645 	if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
646 	    omap_aes_check_aligned(dd->out_sg, dd->total)) {
647 		if (omap_aes_copy_sgs(dd))
648 			pr_err("Failed to copy SGs for unaligned cases\n");
649 		dd->sgs_copied = 1;
650 	} else {
651 		dd->sgs_copied = 0;
652 	}
653 
654 	len = ALIGN(dd->total, AES_BLOCK_SIZE);
655 	dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
656 	dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
657 	BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
658 
659 	rctx = ablkcipher_request_ctx(req);
660 	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
661 	rctx->mode &= FLAGS_MODE_MASK;
662 	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
663 
664 	dd->ctx = ctx;
665 	ctx->dd = dd;
666 
667 	err = omap_aes_write_ctrl(dd);
668 	if (!err)
669 		err = omap_aes_crypt_dma_start(dd);
670 	if (err) {
671 		/* aes_task will not finish it, so do it here */
672 		omap_aes_finish_req(dd, err);
673 		tasklet_schedule(&dd->queue_task);
674 	}
675 
676 	return ret; /* return ret, which is enqueue return value */
677 }
678 
679 static void omap_aes_done_task(unsigned long data)
680 {
681 	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
682 	void *buf_in, *buf_out;
683 	int pages, len;
684 
685 	pr_debug("enter done_task\n");
686 
687 	if (!dd->pio_only) {
688 		dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
689 				       DMA_FROM_DEVICE);
690 		dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
691 		dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
692 			     DMA_FROM_DEVICE);
693 		omap_aes_crypt_dma_stop(dd);
694 	}
695 
696 	if (dd->sgs_copied) {
697 		buf_in = sg_virt(&dd->in_sgl);
698 		buf_out = sg_virt(&dd->out_sgl);
699 
700 		sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
701 
702 		len = ALIGN(dd->total_save, AES_BLOCK_SIZE);
703 		pages = get_order(len);
704 		free_pages((unsigned long)buf_in, pages);
705 		free_pages((unsigned long)buf_out, pages);
706 	}
707 
708 	omap_aes_finish_req(dd, 0);
709 	omap_aes_handle_queue(dd, NULL);
710 
711 	pr_debug("exit\n");
712 }
713 
714 static void omap_aes_queue_task(unsigned long data)
715 {
716 	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
717 
718 	omap_aes_handle_queue(dd, NULL);
719 }
720 
721 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
722 {
723 	struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
724 			crypto_ablkcipher_reqtfm(req));
725 	struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
726 	struct omap_aes_dev *dd;
727 
728 	pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
729 		  !!(mode & FLAGS_ENCRYPT),
730 		  !!(mode & FLAGS_CBC));
731 
732 	dd = omap_aes_find_dev(ctx);
733 	if (!dd)
734 		return -ENODEV;
735 
736 	rctx->mode = mode;
737 
738 	return omap_aes_handle_queue(dd, req);
739 }
740 
741 /* ********************** ALG API ************************************ */
742 
743 static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
744 			   unsigned int keylen)
745 {
746 	struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
747 
748 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
749 		   keylen != AES_KEYSIZE_256)
750 		return -EINVAL;
751 
752 	pr_debug("enter, keylen: %d\n", keylen);
753 
754 	memcpy(ctx->key, key, keylen);
755 	ctx->keylen = keylen;
756 
757 	return 0;
758 }
759 
760 static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
761 {
762 	return omap_aes_crypt(req, FLAGS_ENCRYPT);
763 }
764 
765 static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
766 {
767 	return omap_aes_crypt(req, 0);
768 }
769 
770 static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
771 {
772 	return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
773 }
774 
775 static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
776 {
777 	return omap_aes_crypt(req, FLAGS_CBC);
778 }
779 
780 static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
781 {
782 	return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
783 }
784 
785 static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
786 {
787 	return omap_aes_crypt(req, FLAGS_CTR);
788 }
789 
790 static int omap_aes_cra_init(struct crypto_tfm *tfm)
791 {
792 	struct omap_aes_dev *dd = NULL;
793 	int err;
794 
795 	/* Find AES device, currently picks the first device */
796 	spin_lock_bh(&list_lock);
797 	list_for_each_entry(dd, &dev_list, list) {
798 		break;
799 	}
800 	spin_unlock_bh(&list_lock);
801 
802 	err = pm_runtime_get_sync(dd->dev);
803 	if (err < 0) {
804 		dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
805 			__func__, err);
806 		return err;
807 	}
808 
809 	tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
810 
811 	return 0;
812 }
813 
814 static void omap_aes_cra_exit(struct crypto_tfm *tfm)
815 {
816 	struct omap_aes_dev *dd = NULL;
817 
818 	/* Find AES device, currently picks the first device */
819 	spin_lock_bh(&list_lock);
820 	list_for_each_entry(dd, &dev_list, list) {
821 		break;
822 	}
823 	spin_unlock_bh(&list_lock);
824 
825 	pm_runtime_put_sync(dd->dev);
826 }
827 
828 /* ********************** ALGS ************************************ */
829 
830 static struct crypto_alg algs_ecb_cbc[] = {
831 {
832 	.cra_name		= "ecb(aes)",
833 	.cra_driver_name	= "ecb-aes-omap",
834 	.cra_priority		= 300,
835 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
836 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
837 				  CRYPTO_ALG_ASYNC,
838 	.cra_blocksize		= AES_BLOCK_SIZE,
839 	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
840 	.cra_alignmask		= 0,
841 	.cra_type		= &crypto_ablkcipher_type,
842 	.cra_module		= THIS_MODULE,
843 	.cra_init		= omap_aes_cra_init,
844 	.cra_exit		= omap_aes_cra_exit,
845 	.cra_u.ablkcipher = {
846 		.min_keysize	= AES_MIN_KEY_SIZE,
847 		.max_keysize	= AES_MAX_KEY_SIZE,
848 		.setkey		= omap_aes_setkey,
849 		.encrypt	= omap_aes_ecb_encrypt,
850 		.decrypt	= omap_aes_ecb_decrypt,
851 	}
852 },
853 {
854 	.cra_name		= "cbc(aes)",
855 	.cra_driver_name	= "cbc-aes-omap",
856 	.cra_priority		= 300,
857 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
858 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
859 				  CRYPTO_ALG_ASYNC,
860 	.cra_blocksize		= AES_BLOCK_SIZE,
861 	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
862 	.cra_alignmask		= 0,
863 	.cra_type		= &crypto_ablkcipher_type,
864 	.cra_module		= THIS_MODULE,
865 	.cra_init		= omap_aes_cra_init,
866 	.cra_exit		= omap_aes_cra_exit,
867 	.cra_u.ablkcipher = {
868 		.min_keysize	= AES_MIN_KEY_SIZE,
869 		.max_keysize	= AES_MAX_KEY_SIZE,
870 		.ivsize		= AES_BLOCK_SIZE,
871 		.setkey		= omap_aes_setkey,
872 		.encrypt	= omap_aes_cbc_encrypt,
873 		.decrypt	= omap_aes_cbc_decrypt,
874 	}
875 }
876 };
877 
878 static struct crypto_alg algs_ctr[] = {
879 {
880 	.cra_name		= "ctr(aes)",
881 	.cra_driver_name	= "ctr-aes-omap",
882 	.cra_priority		= 300,
883 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
884 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
885 				  CRYPTO_ALG_ASYNC,
886 	.cra_blocksize		= AES_BLOCK_SIZE,
887 	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
888 	.cra_alignmask		= 0,
889 	.cra_type		= &crypto_ablkcipher_type,
890 	.cra_module		= THIS_MODULE,
891 	.cra_init		= omap_aes_cra_init,
892 	.cra_exit		= omap_aes_cra_exit,
893 	.cra_u.ablkcipher = {
894 		.min_keysize	= AES_MIN_KEY_SIZE,
895 		.max_keysize	= AES_MAX_KEY_SIZE,
896 		.geniv		= "eseqiv",
897 		.ivsize		= AES_BLOCK_SIZE,
898 		.setkey		= omap_aes_setkey,
899 		.encrypt	= omap_aes_ctr_encrypt,
900 		.decrypt	= omap_aes_ctr_decrypt,
901 	}
902 } ,
903 };
904 
905 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
906 	{
907 		.algs_list	= algs_ecb_cbc,
908 		.size		= ARRAY_SIZE(algs_ecb_cbc),
909 	},
910 };
911 
912 static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
913 	.algs_info	= omap_aes_algs_info_ecb_cbc,
914 	.algs_info_size	= ARRAY_SIZE(omap_aes_algs_info_ecb_cbc),
915 	.trigger	= omap_aes_dma_trigger_omap2,
916 	.key_ofs	= 0x1c,
917 	.iv_ofs		= 0x20,
918 	.ctrl_ofs	= 0x30,
919 	.data_ofs	= 0x34,
920 	.rev_ofs	= 0x44,
921 	.mask_ofs	= 0x48,
922 	.dma_enable_in	= BIT(2),
923 	.dma_enable_out	= BIT(3),
924 	.dma_start	= BIT(5),
925 	.major_mask	= 0xf0,
926 	.major_shift	= 4,
927 	.minor_mask	= 0x0f,
928 	.minor_shift	= 0,
929 };
930 
931 #ifdef CONFIG_OF
932 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = {
933 	{
934 		.algs_list	= algs_ecb_cbc,
935 		.size		= ARRAY_SIZE(algs_ecb_cbc),
936 	},
937 	{
938 		.algs_list	= algs_ctr,
939 		.size		= ARRAY_SIZE(algs_ctr),
940 	},
941 };
942 
943 static const struct omap_aes_pdata omap_aes_pdata_omap3 = {
944 	.algs_info	= omap_aes_algs_info_ecb_cbc_ctr,
945 	.algs_info_size	= ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
946 	.trigger	= omap_aes_dma_trigger_omap2,
947 	.key_ofs	= 0x1c,
948 	.iv_ofs		= 0x20,
949 	.ctrl_ofs	= 0x30,
950 	.data_ofs	= 0x34,
951 	.rev_ofs	= 0x44,
952 	.mask_ofs	= 0x48,
953 	.dma_enable_in	= BIT(2),
954 	.dma_enable_out	= BIT(3),
955 	.dma_start	= BIT(5),
956 	.major_mask	= 0xf0,
957 	.major_shift	= 4,
958 	.minor_mask	= 0x0f,
959 	.minor_shift	= 0,
960 };
961 
962 static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
963 	.algs_info	= omap_aes_algs_info_ecb_cbc_ctr,
964 	.algs_info_size	= ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
965 	.trigger	= omap_aes_dma_trigger_omap4,
966 	.key_ofs	= 0x3c,
967 	.iv_ofs		= 0x40,
968 	.ctrl_ofs	= 0x50,
969 	.data_ofs	= 0x60,
970 	.rev_ofs	= 0x80,
971 	.mask_ofs	= 0x84,
972 	.irq_status_ofs = 0x8c,
973 	.irq_enable_ofs = 0x90,
974 	.dma_enable_in	= BIT(5),
975 	.dma_enable_out	= BIT(6),
976 	.major_mask	= 0x0700,
977 	.major_shift	= 8,
978 	.minor_mask	= 0x003f,
979 	.minor_shift	= 0,
980 };
981 
982 static irqreturn_t omap_aes_irq(int irq, void *dev_id)
983 {
984 	struct omap_aes_dev *dd = dev_id;
985 	u32 status, i;
986 	u32 *src, *dst;
987 
988 	status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
989 	if (status & AES_REG_IRQ_DATA_IN) {
990 		omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
991 
992 		BUG_ON(!dd->in_sg);
993 
994 		BUG_ON(_calc_walked(in) > dd->in_sg->length);
995 
996 		src = sg_virt(dd->in_sg) + _calc_walked(in);
997 
998 		for (i = 0; i < AES_BLOCK_WORDS; i++) {
999 			omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
1000 
1001 			scatterwalk_advance(&dd->in_walk, 4);
1002 			if (dd->in_sg->length == _calc_walked(in)) {
1003 				dd->in_sg = sg_next(dd->in_sg);
1004 				if (dd->in_sg) {
1005 					scatterwalk_start(&dd->in_walk,
1006 							  dd->in_sg);
1007 					src = sg_virt(dd->in_sg) +
1008 					      _calc_walked(in);
1009 				}
1010 			} else {
1011 				src++;
1012 			}
1013 		}
1014 
1015 		/* Clear IRQ status */
1016 		status &= ~AES_REG_IRQ_DATA_IN;
1017 		omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
1018 
1019 		/* Enable DATA_OUT interrupt */
1020 		omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
1021 
1022 	} else if (status & AES_REG_IRQ_DATA_OUT) {
1023 		omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
1024 
1025 		BUG_ON(!dd->out_sg);
1026 
1027 		BUG_ON(_calc_walked(out) > dd->out_sg->length);
1028 
1029 		dst = sg_virt(dd->out_sg) + _calc_walked(out);
1030 
1031 		for (i = 0; i < AES_BLOCK_WORDS; i++) {
1032 			*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
1033 			scatterwalk_advance(&dd->out_walk, 4);
1034 			if (dd->out_sg->length == _calc_walked(out)) {
1035 				dd->out_sg = sg_next(dd->out_sg);
1036 				if (dd->out_sg) {
1037 					scatterwalk_start(&dd->out_walk,
1038 							  dd->out_sg);
1039 					dst = sg_virt(dd->out_sg) +
1040 					      _calc_walked(out);
1041 				}
1042 			} else {
1043 				dst++;
1044 			}
1045 		}
1046 
1047 		dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
1048 
1049 		/* Clear IRQ status */
1050 		status &= ~AES_REG_IRQ_DATA_OUT;
1051 		omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
1052 
1053 		if (!dd->total)
1054 			/* All bytes read! */
1055 			tasklet_schedule(&dd->done_task);
1056 		else
1057 			/* Enable DATA_IN interrupt for next block */
1058 			omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
1059 	}
1060 
1061 	return IRQ_HANDLED;
1062 }
1063 
1064 static const struct of_device_id omap_aes_of_match[] = {
1065 	{
1066 		.compatible	= "ti,omap2-aes",
1067 		.data		= &omap_aes_pdata_omap2,
1068 	},
1069 	{
1070 		.compatible	= "ti,omap3-aes",
1071 		.data		= &omap_aes_pdata_omap3,
1072 	},
1073 	{
1074 		.compatible	= "ti,omap4-aes",
1075 		.data		= &omap_aes_pdata_omap4,
1076 	},
1077 	{},
1078 };
1079 MODULE_DEVICE_TABLE(of, omap_aes_of_match);
1080 
1081 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
1082 		struct device *dev, struct resource *res)
1083 {
1084 	struct device_node *node = dev->of_node;
1085 	const struct of_device_id *match;
1086 	int err = 0;
1087 
1088 	match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
1089 	if (!match) {
1090 		dev_err(dev, "no compatible OF match\n");
1091 		err = -EINVAL;
1092 		goto err;
1093 	}
1094 
1095 	err = of_address_to_resource(node, 0, res);
1096 	if (err < 0) {
1097 		dev_err(dev, "can't translate OF node address\n");
1098 		err = -EINVAL;
1099 		goto err;
1100 	}
1101 
1102 	dd->dma_out = -1; /* Dummy value that's unused */
1103 	dd->dma_in = -1; /* Dummy value that's unused */
1104 
1105 	dd->pdata = match->data;
1106 
1107 err:
1108 	return err;
1109 }
1110 #else
1111 static const struct of_device_id omap_aes_of_match[] = {
1112 	{},
1113 };
1114 
1115 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
1116 		struct device *dev, struct resource *res)
1117 {
1118 	return -EINVAL;
1119 }
1120 #endif
1121 
1122 static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
1123 		struct platform_device *pdev, struct resource *res)
1124 {
1125 	struct device *dev = &pdev->dev;
1126 	struct resource *r;
1127 	int err = 0;
1128 
1129 	/* Get the base address */
1130 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1131 	if (!r) {
1132 		dev_err(dev, "no MEM resource info\n");
1133 		err = -ENODEV;
1134 		goto err;
1135 	}
1136 	memcpy(res, r, sizeof(*res));
1137 
1138 	/* Get the DMA out channel */
1139 	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1140 	if (!r) {
1141 		dev_err(dev, "no DMA out resource info\n");
1142 		err = -ENODEV;
1143 		goto err;
1144 	}
1145 	dd->dma_out = r->start;
1146 
1147 	/* Get the DMA in channel */
1148 	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1149 	if (!r) {
1150 		dev_err(dev, "no DMA in resource info\n");
1151 		err = -ENODEV;
1152 		goto err;
1153 	}
1154 	dd->dma_in = r->start;
1155 
1156 	/* Only OMAP2/3 can be non-DT */
1157 	dd->pdata = &omap_aes_pdata_omap2;
1158 
1159 err:
1160 	return err;
1161 }
1162 
1163 static int omap_aes_probe(struct platform_device *pdev)
1164 {
1165 	struct device *dev = &pdev->dev;
1166 	struct omap_aes_dev *dd;
1167 	struct crypto_alg *algp;
1168 	struct resource res;
1169 	int err = -ENOMEM, i, j, irq = -1;
1170 	u32 reg;
1171 
1172 	dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
1173 	if (dd == NULL) {
1174 		dev_err(dev, "unable to alloc data struct.\n");
1175 		goto err_data;
1176 	}
1177 	dd->dev = dev;
1178 	platform_set_drvdata(pdev, dd);
1179 
1180 	spin_lock_init(&dd->lock);
1181 	crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
1182 
1183 	err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
1184 			       omap_aes_get_res_pdev(dd, pdev, &res);
1185 	if (err)
1186 		goto err_res;
1187 
1188 	dd->io_base = devm_ioremap_resource(dev, &res);
1189 	if (IS_ERR(dd->io_base)) {
1190 		err = PTR_ERR(dd->io_base);
1191 		goto err_res;
1192 	}
1193 	dd->phys_base = res.start;
1194 
1195 	pm_runtime_enable(dev);
1196 	err = pm_runtime_get_sync(dev);
1197 	if (err < 0) {
1198 		dev_err(dev, "%s: failed to get_sync(%d)\n",
1199 			__func__, err);
1200 		goto err_res;
1201 	}
1202 
1203 	omap_aes_dma_stop(dd);
1204 
1205 	reg = omap_aes_read(dd, AES_REG_REV(dd));
1206 
1207 	pm_runtime_put_sync(dev);
1208 
1209 	dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
1210 		 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
1211 		 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
1212 
1213 	tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
1214 	tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
1215 
1216 	err = omap_aes_dma_init(dd);
1217 	if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
1218 		dd->pio_only = 1;
1219 
1220 		irq = platform_get_irq(pdev, 0);
1221 		if (irq < 0) {
1222 			dev_err(dev, "can't get IRQ resource\n");
1223 			goto err_irq;
1224 		}
1225 
1226 		err = devm_request_irq(dev, irq, omap_aes_irq, 0,
1227 				dev_name(dev), dd);
1228 		if (err) {
1229 			dev_err(dev, "Unable to grab omap-aes IRQ\n");
1230 			goto err_irq;
1231 		}
1232 	}
1233 
1234 
1235 	INIT_LIST_HEAD(&dd->list);
1236 	spin_lock(&list_lock);
1237 	list_add_tail(&dd->list, &dev_list);
1238 	spin_unlock(&list_lock);
1239 
1240 	for (i = 0; i < dd->pdata->algs_info_size; i++) {
1241 		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
1242 			algp = &dd->pdata->algs_info[i].algs_list[j];
1243 
1244 			pr_debug("reg alg: %s\n", algp->cra_name);
1245 			INIT_LIST_HEAD(&algp->cra_list);
1246 
1247 			err = crypto_register_alg(algp);
1248 			if (err)
1249 				goto err_algs;
1250 
1251 			dd->pdata->algs_info[i].registered++;
1252 		}
1253 	}
1254 
1255 	return 0;
1256 err_algs:
1257 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1258 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1259 			crypto_unregister_alg(
1260 					&dd->pdata->algs_info[i].algs_list[j]);
1261 	if (!dd->pio_only)
1262 		omap_aes_dma_cleanup(dd);
1263 err_irq:
1264 	tasklet_kill(&dd->done_task);
1265 	tasklet_kill(&dd->queue_task);
1266 	pm_runtime_disable(dev);
1267 err_res:
1268 	dd = NULL;
1269 err_data:
1270 	dev_err(dev, "initialization failed.\n");
1271 	return err;
1272 }
1273 
1274 static int omap_aes_remove(struct platform_device *pdev)
1275 {
1276 	struct omap_aes_dev *dd = platform_get_drvdata(pdev);
1277 	int i, j;
1278 
1279 	if (!dd)
1280 		return -ENODEV;
1281 
1282 	spin_lock(&list_lock);
1283 	list_del(&dd->list);
1284 	spin_unlock(&list_lock);
1285 
1286 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1287 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1288 			crypto_unregister_alg(
1289 					&dd->pdata->algs_info[i].algs_list[j]);
1290 
1291 	tasklet_kill(&dd->done_task);
1292 	tasklet_kill(&dd->queue_task);
1293 	omap_aes_dma_cleanup(dd);
1294 	pm_runtime_disable(dd->dev);
1295 	dd = NULL;
1296 
1297 	return 0;
1298 }
1299 
1300 #ifdef CONFIG_PM_SLEEP
1301 static int omap_aes_suspend(struct device *dev)
1302 {
1303 	pm_runtime_put_sync(dev);
1304 	return 0;
1305 }
1306 
1307 static int omap_aes_resume(struct device *dev)
1308 {
1309 	pm_runtime_get_sync(dev);
1310 	return 0;
1311 }
1312 #endif
1313 
1314 static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
1315 
1316 static struct platform_driver omap_aes_driver = {
1317 	.probe	= omap_aes_probe,
1318 	.remove	= omap_aes_remove,
1319 	.driver	= {
1320 		.name	= "omap-aes",
1321 		.pm	= &omap_aes_pm_ops,
1322 		.of_match_table	= omap_aes_of_match,
1323 	},
1324 };
1325 
1326 module_platform_driver(omap_aes_driver);
1327 
1328 MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
1329 MODULE_LICENSE("GPL v2");
1330 MODULE_AUTHOR("Dmitry Kasatkin");
1331 
1332