xref: /linux/drivers/crypto/atmel-aes.c (revision a8fe58cec351c25e09c393bf46117c0c47b5a17c)
1 /*
2  * Cryptographic API.
3  *
4  * Support for ATMEL AES HW acceleration.
5  *
6  * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7  * Author: Nicolas Royer <nicolas@eukrea.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as published
11  * by the Free Software Foundation.
12  *
13  * Some ideas are from omap-aes.c driver.
14  */
15 
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/io.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
25 
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/aes.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/platform_data/crypto-atmel.h>
41 #include <dt-bindings/dma/at91.h>
42 #include "atmel-aes-regs.h"
43 
44 #define ATMEL_AES_PRIORITY	300
45 
46 #define ATMEL_AES_BUFFER_ORDER	2
47 #define ATMEL_AES_BUFFER_SIZE	(PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
48 
49 #define CFB8_BLOCK_SIZE		1
50 #define CFB16_BLOCK_SIZE	2
51 #define CFB32_BLOCK_SIZE	4
52 #define CFB64_BLOCK_SIZE	8
53 
54 #define SIZE_IN_WORDS(x)	((x) >> 2)
55 
56 /* AES flags */
57 /* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
58 #define AES_FLAGS_ENCRYPT	AES_MR_CYPHER_ENC
59 #define AES_FLAGS_GTAGEN	AES_MR_GTAGEN
60 #define AES_FLAGS_OPMODE_MASK	(AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
61 #define AES_FLAGS_ECB		AES_MR_OPMOD_ECB
62 #define AES_FLAGS_CBC		AES_MR_OPMOD_CBC
63 #define AES_FLAGS_OFB		AES_MR_OPMOD_OFB
64 #define AES_FLAGS_CFB128	(AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
65 #define AES_FLAGS_CFB64		(AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
66 #define AES_FLAGS_CFB32		(AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
67 #define AES_FLAGS_CFB16		(AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
68 #define AES_FLAGS_CFB8		(AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
69 #define AES_FLAGS_CTR		AES_MR_OPMOD_CTR
70 #define AES_FLAGS_GCM		AES_MR_OPMOD_GCM
71 
72 #define AES_FLAGS_MODE_MASK	(AES_FLAGS_OPMODE_MASK |	\
73 				 AES_FLAGS_ENCRYPT |		\
74 				 AES_FLAGS_GTAGEN)
75 
76 #define AES_FLAGS_INIT		BIT(2)
77 #define AES_FLAGS_BUSY		BIT(3)
78 #define AES_FLAGS_DUMP_REG	BIT(4)
79 
80 #define AES_FLAGS_PERSISTENT	(AES_FLAGS_INIT | AES_FLAGS_BUSY)
81 
82 #define ATMEL_AES_QUEUE_LENGTH	50
83 
84 #define ATMEL_AES_DMA_THRESHOLD		256
85 
86 
87 struct atmel_aes_caps {
88 	bool			has_dualbuff;
89 	bool			has_cfb64;
90 	bool			has_ctr32;
91 	bool			has_gcm;
92 	u32			max_burst_size;
93 };
94 
95 struct atmel_aes_dev;
96 
97 
98 typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
99 
100 
101 struct atmel_aes_base_ctx {
102 	struct atmel_aes_dev	*dd;
103 	atmel_aes_fn_t		start;
104 	int			keylen;
105 	u32			key[AES_KEYSIZE_256 / sizeof(u32)];
106 	u16			block_size;
107 };
108 
109 struct atmel_aes_ctx {
110 	struct atmel_aes_base_ctx	base;
111 };
112 
113 struct atmel_aes_ctr_ctx {
114 	struct atmel_aes_base_ctx	base;
115 
116 	u32			iv[AES_BLOCK_SIZE / sizeof(u32)];
117 	size_t			offset;
118 	struct scatterlist	src[2];
119 	struct scatterlist	dst[2];
120 };
121 
122 struct atmel_aes_gcm_ctx {
123 	struct atmel_aes_base_ctx	base;
124 
125 	struct scatterlist	src[2];
126 	struct scatterlist	dst[2];
127 
128 	u32			j0[AES_BLOCK_SIZE / sizeof(u32)];
129 	u32			tag[AES_BLOCK_SIZE / sizeof(u32)];
130 	u32			ghash[AES_BLOCK_SIZE / sizeof(u32)];
131 	size_t			textlen;
132 
133 	const u32		*ghash_in;
134 	u32			*ghash_out;
135 	atmel_aes_fn_t		ghash_resume;
136 };
137 
138 struct atmel_aes_reqctx {
139 	unsigned long		mode;
140 };
141 
142 struct atmel_aes_dma {
143 	struct dma_chan		*chan;
144 	struct scatterlist	*sg;
145 	int			nents;
146 	unsigned int		remainder;
147 	unsigned int		sg_len;
148 };
149 
150 struct atmel_aes_dev {
151 	struct list_head	list;
152 	unsigned long		phys_base;
153 	void __iomem		*io_base;
154 
155 	struct crypto_async_request	*areq;
156 	struct atmel_aes_base_ctx	*ctx;
157 
158 	bool			is_async;
159 	atmel_aes_fn_t		resume;
160 	atmel_aes_fn_t		cpu_transfer_complete;
161 
162 	struct device		*dev;
163 	struct clk		*iclk;
164 	int			irq;
165 
166 	unsigned long		flags;
167 
168 	spinlock_t		lock;
169 	struct crypto_queue	queue;
170 
171 	struct tasklet_struct	done_task;
172 	struct tasklet_struct	queue_task;
173 
174 	size_t			total;
175 	size_t			datalen;
176 	u32			*data;
177 
178 	struct atmel_aes_dma	src;
179 	struct atmel_aes_dma	dst;
180 
181 	size_t			buflen;
182 	void			*buf;
183 	struct scatterlist	aligned_sg;
184 	struct scatterlist	*real_dst;
185 
186 	struct atmel_aes_caps	caps;
187 
188 	u32			hw_version;
189 };
190 
191 struct atmel_aes_drv {
192 	struct list_head	dev_list;
193 	spinlock_t		lock;
194 };
195 
196 static struct atmel_aes_drv atmel_aes = {
197 	.dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
198 	.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
199 };
200 
201 #ifdef VERBOSE_DEBUG
202 static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
203 {
204 	switch (offset) {
205 	case AES_CR:
206 		return "CR";
207 
208 	case AES_MR:
209 		return "MR";
210 
211 	case AES_ISR:
212 		return "ISR";
213 
214 	case AES_IMR:
215 		return "IMR";
216 
217 	case AES_IER:
218 		return "IER";
219 
220 	case AES_IDR:
221 		return "IDR";
222 
223 	case AES_KEYWR(0):
224 	case AES_KEYWR(1):
225 	case AES_KEYWR(2):
226 	case AES_KEYWR(3):
227 	case AES_KEYWR(4):
228 	case AES_KEYWR(5):
229 	case AES_KEYWR(6):
230 	case AES_KEYWR(7):
231 		snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
232 		break;
233 
234 	case AES_IDATAR(0):
235 	case AES_IDATAR(1):
236 	case AES_IDATAR(2):
237 	case AES_IDATAR(3):
238 		snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
239 		break;
240 
241 	case AES_ODATAR(0):
242 	case AES_ODATAR(1):
243 	case AES_ODATAR(2):
244 	case AES_ODATAR(3):
245 		snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
246 		break;
247 
248 	case AES_IVR(0):
249 	case AES_IVR(1):
250 	case AES_IVR(2):
251 	case AES_IVR(3):
252 		snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
253 		break;
254 
255 	case AES_AADLENR:
256 		return "AADLENR";
257 
258 	case AES_CLENR:
259 		return "CLENR";
260 
261 	case AES_GHASHR(0):
262 	case AES_GHASHR(1):
263 	case AES_GHASHR(2):
264 	case AES_GHASHR(3):
265 		snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
266 		break;
267 
268 	case AES_TAGR(0):
269 	case AES_TAGR(1):
270 	case AES_TAGR(2):
271 	case AES_TAGR(3):
272 		snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
273 		break;
274 
275 	case AES_CTRR:
276 		return "CTRR";
277 
278 	case AES_GCMHR(0):
279 	case AES_GCMHR(1):
280 	case AES_GCMHR(2):
281 	case AES_GCMHR(3):
282 		snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
283 		break;
284 
285 	default:
286 		snprintf(tmp, sz, "0x%02x", offset);
287 		break;
288 	}
289 
290 	return tmp;
291 }
292 #endif /* VERBOSE_DEBUG */
293 
294 /* Shared functions */
295 
296 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
297 {
298 	u32 value = readl_relaxed(dd->io_base + offset);
299 
300 #ifdef VERBOSE_DEBUG
301 	if (dd->flags & AES_FLAGS_DUMP_REG) {
302 		char tmp[16];
303 
304 		dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
305 			 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
306 	}
307 #endif /* VERBOSE_DEBUG */
308 
309 	return value;
310 }
311 
312 static inline void atmel_aes_write(struct atmel_aes_dev *dd,
313 					u32 offset, u32 value)
314 {
315 #ifdef VERBOSE_DEBUG
316 	if (dd->flags & AES_FLAGS_DUMP_REG) {
317 		char tmp[16];
318 
319 		dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
320 			 atmel_aes_reg_name(offset, tmp));
321 	}
322 #endif /* VERBOSE_DEBUG */
323 
324 	writel_relaxed(value, dd->io_base + offset);
325 }
326 
327 static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
328 					u32 *value, int count)
329 {
330 	for (; count--; value++, offset += 4)
331 		*value = atmel_aes_read(dd, offset);
332 }
333 
334 static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
335 			      const u32 *value, int count)
336 {
337 	for (; count--; value++, offset += 4)
338 		atmel_aes_write(dd, offset, *value);
339 }
340 
341 static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
342 					u32 *value)
343 {
344 	atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
345 }
346 
347 static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
348 					 const u32 *value)
349 {
350 	atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
351 }
352 
353 static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
354 						atmel_aes_fn_t resume)
355 {
356 	u32 isr = atmel_aes_read(dd, AES_ISR);
357 
358 	if (unlikely(isr & AES_INT_DATARDY))
359 		return resume(dd);
360 
361 	dd->resume = resume;
362 	atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
363 	return -EINPROGRESS;
364 }
365 
366 static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
367 {
368 	len &= block_size - 1;
369 	return len ? block_size - len : 0;
370 }
371 
372 static inline struct aead_request *
373 aead_request_cast(struct crypto_async_request *req)
374 {
375 	return container_of(req, struct aead_request, base);
376 }
377 
378 static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
379 {
380 	struct atmel_aes_dev *aes_dd = NULL;
381 	struct atmel_aes_dev *tmp;
382 
383 	spin_lock_bh(&atmel_aes.lock);
384 	if (!ctx->dd) {
385 		list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
386 			aes_dd = tmp;
387 			break;
388 		}
389 		ctx->dd = aes_dd;
390 	} else {
391 		aes_dd = ctx->dd;
392 	}
393 
394 	spin_unlock_bh(&atmel_aes.lock);
395 
396 	return aes_dd;
397 }
398 
399 static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
400 {
401 	int err;
402 
403 	err = clk_enable(dd->iclk);
404 	if (err)
405 		return err;
406 
407 	if (!(dd->flags & AES_FLAGS_INIT)) {
408 		atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
409 		atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
410 		dd->flags |= AES_FLAGS_INIT;
411 	}
412 
413 	return 0;
414 }
415 
416 static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
417 {
418 	return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
419 }
420 
421 static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
422 {
423 	int err;
424 
425 	err = atmel_aes_hw_init(dd);
426 	if (err)
427 		return err;
428 
429 	dd->hw_version = atmel_aes_get_version(dd);
430 
431 	dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
432 
433 	clk_disable(dd->iclk);
434 	return 0;
435 }
436 
437 static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
438 				      const struct atmel_aes_reqctx *rctx)
439 {
440 	/* Clear all but persistent flags and set request flags. */
441 	dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
442 }
443 
444 static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
445 {
446 	return (dd->flags & AES_FLAGS_ENCRYPT);
447 }
448 
449 static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
450 {
451 	clk_disable(dd->iclk);
452 	dd->flags &= ~AES_FLAGS_BUSY;
453 
454 	if (dd->is_async)
455 		dd->areq->complete(dd->areq, err);
456 
457 	tasklet_schedule(&dd->queue_task);
458 
459 	return err;
460 }
461 
462 static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
463 				 const u32 *iv)
464 {
465 	u32 valmr = 0;
466 
467 	/* MR register must be set before IV registers */
468 	if (dd->ctx->keylen == AES_KEYSIZE_128)
469 		valmr |= AES_MR_KEYSIZE_128;
470 	else if (dd->ctx->keylen == AES_KEYSIZE_192)
471 		valmr |= AES_MR_KEYSIZE_192;
472 	else
473 		valmr |= AES_MR_KEYSIZE_256;
474 
475 	valmr |= dd->flags & AES_FLAGS_MODE_MASK;
476 
477 	if (use_dma) {
478 		valmr |= AES_MR_SMOD_IDATAR0;
479 		if (dd->caps.has_dualbuff)
480 			valmr |= AES_MR_DUALBUFF;
481 	} else {
482 		valmr |= AES_MR_SMOD_AUTO;
483 	}
484 
485 	atmel_aes_write(dd, AES_MR, valmr);
486 
487 	atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
488 			  SIZE_IN_WORDS(dd->ctx->keylen));
489 
490 	if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
491 		atmel_aes_write_block(dd, AES_IVR(0), iv);
492 }
493 
494 
495 /* CPU transfer */
496 
497 static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
498 {
499 	int err = 0;
500 	u32 isr;
501 
502 	for (;;) {
503 		atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
504 		dd->data += 4;
505 		dd->datalen -= AES_BLOCK_SIZE;
506 
507 		if (dd->datalen < AES_BLOCK_SIZE)
508 			break;
509 
510 		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
511 
512 		isr = atmel_aes_read(dd, AES_ISR);
513 		if (!(isr & AES_INT_DATARDY)) {
514 			dd->resume = atmel_aes_cpu_transfer;
515 			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
516 			return -EINPROGRESS;
517 		}
518 	}
519 
520 	if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
521 				 dd->buf, dd->total))
522 		err = -EINVAL;
523 
524 	if (err)
525 		return atmel_aes_complete(dd, err);
526 
527 	return dd->cpu_transfer_complete(dd);
528 }
529 
530 static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
531 			       struct scatterlist *src,
532 			       struct scatterlist *dst,
533 			       size_t len,
534 			       atmel_aes_fn_t resume)
535 {
536 	size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
537 
538 	if (unlikely(len == 0))
539 		return -EINVAL;
540 
541 	sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
542 
543 	dd->total = len;
544 	dd->real_dst = dst;
545 	dd->cpu_transfer_complete = resume;
546 	dd->datalen = len + padlen;
547 	dd->data = (u32 *)dd->buf;
548 	atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
549 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
550 }
551 
552 
553 /* DMA transfer */
554 
555 static void atmel_aes_dma_callback(void *data);
556 
557 static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
558 				    struct scatterlist *sg,
559 				    size_t len,
560 				    struct atmel_aes_dma *dma)
561 {
562 	int nents;
563 
564 	if (!IS_ALIGNED(len, dd->ctx->block_size))
565 		return false;
566 
567 	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
568 		if (!IS_ALIGNED(sg->offset, sizeof(u32)))
569 			return false;
570 
571 		if (len <= sg->length) {
572 			if (!IS_ALIGNED(len, dd->ctx->block_size))
573 				return false;
574 
575 			dma->nents = nents+1;
576 			dma->remainder = sg->length - len;
577 			sg->length = len;
578 			return true;
579 		}
580 
581 		if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
582 			return false;
583 
584 		len -= sg->length;
585 	}
586 
587 	return false;
588 }
589 
590 static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
591 {
592 	struct scatterlist *sg = dma->sg;
593 	int nents = dma->nents;
594 
595 	if (!dma->remainder)
596 		return;
597 
598 	while (--nents > 0 && sg)
599 		sg = sg_next(sg);
600 
601 	if (!sg)
602 		return;
603 
604 	sg->length += dma->remainder;
605 }
606 
607 static int atmel_aes_map(struct atmel_aes_dev *dd,
608 			 struct scatterlist *src,
609 			 struct scatterlist *dst,
610 			 size_t len)
611 {
612 	bool src_aligned, dst_aligned;
613 	size_t padlen;
614 
615 	dd->total = len;
616 	dd->src.sg = src;
617 	dd->dst.sg = dst;
618 	dd->real_dst = dst;
619 
620 	src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
621 	if (src == dst)
622 		dst_aligned = src_aligned;
623 	else
624 		dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
625 	if (!src_aligned || !dst_aligned) {
626 		padlen = atmel_aes_padlen(len, dd->ctx->block_size);
627 
628 		if (dd->buflen < len + padlen)
629 			return -ENOMEM;
630 
631 		if (!src_aligned) {
632 			sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
633 			dd->src.sg = &dd->aligned_sg;
634 			dd->src.nents = 1;
635 			dd->src.remainder = 0;
636 		}
637 
638 		if (!dst_aligned) {
639 			dd->dst.sg = &dd->aligned_sg;
640 			dd->dst.nents = 1;
641 			dd->dst.remainder = 0;
642 		}
643 
644 		sg_init_table(&dd->aligned_sg, 1);
645 		sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
646 	}
647 
648 	if (dd->src.sg == dd->dst.sg) {
649 		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
650 					    DMA_BIDIRECTIONAL);
651 		dd->dst.sg_len = dd->src.sg_len;
652 		if (!dd->src.sg_len)
653 			return -EFAULT;
654 	} else {
655 		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
656 					    DMA_TO_DEVICE);
657 		if (!dd->src.sg_len)
658 			return -EFAULT;
659 
660 		dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
661 					    DMA_FROM_DEVICE);
662 		if (!dd->dst.sg_len) {
663 			dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
664 				     DMA_TO_DEVICE);
665 			return -EFAULT;
666 		}
667 	}
668 
669 	return 0;
670 }
671 
672 static void atmel_aes_unmap(struct atmel_aes_dev *dd)
673 {
674 	if (dd->src.sg == dd->dst.sg) {
675 		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
676 			     DMA_BIDIRECTIONAL);
677 
678 		if (dd->src.sg != &dd->aligned_sg)
679 			atmel_aes_restore_sg(&dd->src);
680 	} else {
681 		dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
682 			     DMA_FROM_DEVICE);
683 
684 		if (dd->dst.sg != &dd->aligned_sg)
685 			atmel_aes_restore_sg(&dd->dst);
686 
687 		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
688 			     DMA_TO_DEVICE);
689 
690 		if (dd->src.sg != &dd->aligned_sg)
691 			atmel_aes_restore_sg(&dd->src);
692 	}
693 
694 	if (dd->dst.sg == &dd->aligned_sg)
695 		sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
696 				    dd->buf, dd->total);
697 }
698 
699 static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
700 					enum dma_slave_buswidth addr_width,
701 					enum dma_transfer_direction dir,
702 					u32 maxburst)
703 {
704 	struct dma_async_tx_descriptor *desc;
705 	struct dma_slave_config config;
706 	dma_async_tx_callback callback;
707 	struct atmel_aes_dma *dma;
708 	int err;
709 
710 	memset(&config, 0, sizeof(config));
711 	config.direction = dir;
712 	config.src_addr_width = addr_width;
713 	config.dst_addr_width = addr_width;
714 	config.src_maxburst = maxburst;
715 	config.dst_maxburst = maxburst;
716 
717 	switch (dir) {
718 	case DMA_MEM_TO_DEV:
719 		dma = &dd->src;
720 		callback = NULL;
721 		config.dst_addr = dd->phys_base + AES_IDATAR(0);
722 		break;
723 
724 	case DMA_DEV_TO_MEM:
725 		dma = &dd->dst;
726 		callback = atmel_aes_dma_callback;
727 		config.src_addr = dd->phys_base + AES_ODATAR(0);
728 		break;
729 
730 	default:
731 		return -EINVAL;
732 	}
733 
734 	err = dmaengine_slave_config(dma->chan, &config);
735 	if (err)
736 		return err;
737 
738 	desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
739 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
740 	if (!desc)
741 		return -ENOMEM;
742 
743 	desc->callback = callback;
744 	desc->callback_param = dd;
745 	dmaengine_submit(desc);
746 	dma_async_issue_pending(dma->chan);
747 
748 	return 0;
749 }
750 
751 static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
752 					enum dma_transfer_direction dir)
753 {
754 	struct atmel_aes_dma *dma;
755 
756 	switch (dir) {
757 	case DMA_MEM_TO_DEV:
758 		dma = &dd->src;
759 		break;
760 
761 	case DMA_DEV_TO_MEM:
762 		dma = &dd->dst;
763 		break;
764 
765 	default:
766 		return;
767 	}
768 
769 	dmaengine_terminate_all(dma->chan);
770 }
771 
772 static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
773 			       struct scatterlist *src,
774 			       struct scatterlist *dst,
775 			       size_t len,
776 			       atmel_aes_fn_t resume)
777 {
778 	enum dma_slave_buswidth addr_width;
779 	u32 maxburst;
780 	int err;
781 
782 	switch (dd->ctx->block_size) {
783 	case CFB8_BLOCK_SIZE:
784 		addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
785 		maxburst = 1;
786 		break;
787 
788 	case CFB16_BLOCK_SIZE:
789 		addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
790 		maxburst = 1;
791 		break;
792 
793 	case CFB32_BLOCK_SIZE:
794 	case CFB64_BLOCK_SIZE:
795 		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
796 		maxburst = 1;
797 		break;
798 
799 	case AES_BLOCK_SIZE:
800 		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
801 		maxburst = dd->caps.max_burst_size;
802 		break;
803 
804 	default:
805 		err = -EINVAL;
806 		goto exit;
807 	}
808 
809 	err = atmel_aes_map(dd, src, dst, len);
810 	if (err)
811 		goto exit;
812 
813 	dd->resume = resume;
814 
815 	/* Set output DMA transfer first */
816 	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
817 					   maxburst);
818 	if (err)
819 		goto unmap;
820 
821 	/* Then set input DMA transfer */
822 	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
823 					   maxburst);
824 	if (err)
825 		goto output_transfer_stop;
826 
827 	return -EINPROGRESS;
828 
829 output_transfer_stop:
830 	atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
831 unmap:
832 	atmel_aes_unmap(dd);
833 exit:
834 	return atmel_aes_complete(dd, err);
835 }
836 
837 static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
838 {
839 	atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
840 	atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
841 	atmel_aes_unmap(dd);
842 }
843 
844 static void atmel_aes_dma_callback(void *data)
845 {
846 	struct atmel_aes_dev *dd = data;
847 
848 	atmel_aes_dma_stop(dd);
849 	dd->is_async = true;
850 	(void)dd->resume(dd);
851 }
852 
853 static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
854 				  struct crypto_async_request *new_areq)
855 {
856 	struct crypto_async_request *areq, *backlog;
857 	struct atmel_aes_base_ctx *ctx;
858 	unsigned long flags;
859 	int err, ret = 0;
860 
861 	spin_lock_irqsave(&dd->lock, flags);
862 	if (new_areq)
863 		ret = crypto_enqueue_request(&dd->queue, new_areq);
864 	if (dd->flags & AES_FLAGS_BUSY) {
865 		spin_unlock_irqrestore(&dd->lock, flags);
866 		return ret;
867 	}
868 	backlog = crypto_get_backlog(&dd->queue);
869 	areq = crypto_dequeue_request(&dd->queue);
870 	if (areq)
871 		dd->flags |= AES_FLAGS_BUSY;
872 	spin_unlock_irqrestore(&dd->lock, flags);
873 
874 	if (!areq)
875 		return ret;
876 
877 	if (backlog)
878 		backlog->complete(backlog, -EINPROGRESS);
879 
880 	ctx = crypto_tfm_ctx(areq->tfm);
881 
882 	dd->areq = areq;
883 	dd->ctx = ctx;
884 	dd->is_async = (areq != new_areq);
885 
886 	err = ctx->start(dd);
887 	return (dd->is_async) ? ret : err;
888 }
889 
890 
891 /* AES async block ciphers */
892 
893 static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
894 {
895 	return atmel_aes_complete(dd, 0);
896 }
897 
898 static int atmel_aes_start(struct atmel_aes_dev *dd)
899 {
900 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
901 	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
902 	bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
903 			dd->ctx->block_size != AES_BLOCK_SIZE);
904 	int err;
905 
906 	atmel_aes_set_mode(dd, rctx);
907 
908 	err = atmel_aes_hw_init(dd);
909 	if (err)
910 		return atmel_aes_complete(dd, err);
911 
912 	atmel_aes_write_ctrl(dd, use_dma, req->info);
913 	if (use_dma)
914 		return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
915 					   atmel_aes_transfer_complete);
916 
917 	return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
918 				   atmel_aes_transfer_complete);
919 }
920 
921 static inline struct atmel_aes_ctr_ctx *
922 atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
923 {
924 	return container_of(ctx, struct atmel_aes_ctr_ctx, base);
925 }
926 
927 static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
928 {
929 	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
930 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
931 	struct scatterlist *src, *dst;
932 	u32 ctr, blocks;
933 	size_t datalen;
934 	bool use_dma, fragmented = false;
935 
936 	/* Check for transfer completion. */
937 	ctx->offset += dd->total;
938 	if (ctx->offset >= req->nbytes)
939 		return atmel_aes_transfer_complete(dd);
940 
941 	/* Compute data length. */
942 	datalen = req->nbytes - ctx->offset;
943 	blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
944 	ctr = be32_to_cpu(ctx->iv[3]);
945 	if (dd->caps.has_ctr32) {
946 		/* Check 32bit counter overflow. */
947 		u32 start = ctr;
948 		u32 end = start + blocks - 1;
949 
950 		if (end < start) {
951 			ctr |= 0xffffffff;
952 			datalen = AES_BLOCK_SIZE * -start;
953 			fragmented = true;
954 		}
955 	} else {
956 		/* Check 16bit counter overflow. */
957 		u16 start = ctr & 0xffff;
958 		u16 end = start + (u16)blocks - 1;
959 
960 		if (blocks >> 16 || end < start) {
961 			ctr |= 0xffff;
962 			datalen = AES_BLOCK_SIZE * (0x10000-start);
963 			fragmented = true;
964 		}
965 	}
966 	use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
967 
968 	/* Jump to offset. */
969 	src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
970 	dst = ((req->src == req->dst) ? src :
971 	       scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
972 
973 	/* Configure hardware. */
974 	atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
975 	if (unlikely(fragmented)) {
976 		/*
977 		 * Increment the counter manually to cope with the hardware
978 		 * counter overflow.
979 		 */
980 		ctx->iv[3] = cpu_to_be32(ctr);
981 		crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
982 	}
983 
984 	if (use_dma)
985 		return atmel_aes_dma_start(dd, src, dst, datalen,
986 					   atmel_aes_ctr_transfer);
987 
988 	return atmel_aes_cpu_start(dd, src, dst, datalen,
989 				   atmel_aes_ctr_transfer);
990 }
991 
992 static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
993 {
994 	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
995 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
996 	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
997 	int err;
998 
999 	atmel_aes_set_mode(dd, rctx);
1000 
1001 	err = atmel_aes_hw_init(dd);
1002 	if (err)
1003 		return atmel_aes_complete(dd, err);
1004 
1005 	memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
1006 	ctx->offset = 0;
1007 	dd->total = 0;
1008 	return atmel_aes_ctr_transfer(dd);
1009 }
1010 
1011 static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
1012 {
1013 	struct atmel_aes_base_ctx *ctx;
1014 	struct atmel_aes_reqctx *rctx;
1015 	struct atmel_aes_dev *dd;
1016 
1017 	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1018 	switch (mode & AES_FLAGS_OPMODE_MASK) {
1019 	case AES_FLAGS_CFB8:
1020 		ctx->block_size = CFB8_BLOCK_SIZE;
1021 		break;
1022 
1023 	case AES_FLAGS_CFB16:
1024 		ctx->block_size = CFB16_BLOCK_SIZE;
1025 		break;
1026 
1027 	case AES_FLAGS_CFB32:
1028 		ctx->block_size = CFB32_BLOCK_SIZE;
1029 		break;
1030 
1031 	case AES_FLAGS_CFB64:
1032 		ctx->block_size = CFB64_BLOCK_SIZE;
1033 		break;
1034 
1035 	default:
1036 		ctx->block_size = AES_BLOCK_SIZE;
1037 		break;
1038 	}
1039 
1040 	dd = atmel_aes_find_dev(ctx);
1041 	if (!dd)
1042 		return -ENODEV;
1043 
1044 	rctx = ablkcipher_request_ctx(req);
1045 	rctx->mode = mode;
1046 
1047 	return atmel_aes_handle_queue(dd, &req->base);
1048 }
1049 
1050 static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1051 			   unsigned int keylen)
1052 {
1053 	struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1054 
1055 	if (keylen != AES_KEYSIZE_128 &&
1056 	    keylen != AES_KEYSIZE_192 &&
1057 	    keylen != AES_KEYSIZE_256) {
1058 		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1059 		return -EINVAL;
1060 	}
1061 
1062 	memcpy(ctx->key, key, keylen);
1063 	ctx->keylen = keylen;
1064 
1065 	return 0;
1066 }
1067 
1068 static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
1069 {
1070 	return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1071 }
1072 
1073 static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
1074 {
1075 	return atmel_aes_crypt(req, AES_FLAGS_ECB);
1076 }
1077 
1078 static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
1079 {
1080 	return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
1081 }
1082 
1083 static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
1084 {
1085 	return atmel_aes_crypt(req, AES_FLAGS_CBC);
1086 }
1087 
1088 static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
1089 {
1090 	return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
1091 }
1092 
1093 static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
1094 {
1095 	return atmel_aes_crypt(req, AES_FLAGS_OFB);
1096 }
1097 
1098 static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
1099 {
1100 	return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
1101 }
1102 
1103 static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
1104 {
1105 	return atmel_aes_crypt(req, AES_FLAGS_CFB128);
1106 }
1107 
1108 static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
1109 {
1110 	return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
1111 }
1112 
1113 static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
1114 {
1115 	return atmel_aes_crypt(req, AES_FLAGS_CFB64);
1116 }
1117 
1118 static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
1119 {
1120 	return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
1121 }
1122 
1123 static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
1124 {
1125 	return atmel_aes_crypt(req, AES_FLAGS_CFB32);
1126 }
1127 
1128 static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
1129 {
1130 	return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
1131 }
1132 
1133 static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
1134 {
1135 	return atmel_aes_crypt(req, AES_FLAGS_CFB16);
1136 }
1137 
1138 static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
1139 {
1140 	return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
1141 }
1142 
1143 static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
1144 {
1145 	return atmel_aes_crypt(req, AES_FLAGS_CFB8);
1146 }
1147 
1148 static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
1149 {
1150 	return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
1151 }
1152 
1153 static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
1154 {
1155 	return atmel_aes_crypt(req, AES_FLAGS_CTR);
1156 }
1157 
1158 static int atmel_aes_cra_init(struct crypto_tfm *tfm)
1159 {
1160 	struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1161 
1162 	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1163 	ctx->base.start = atmel_aes_start;
1164 
1165 	return 0;
1166 }
1167 
1168 static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
1169 {
1170 	struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1171 
1172 	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1173 	ctx->base.start = atmel_aes_ctr_start;
1174 
1175 	return 0;
1176 }
1177 
1178 static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
1179 {
1180 }
1181 
1182 static struct crypto_alg aes_algs[] = {
1183 {
1184 	.cra_name		= "ecb(aes)",
1185 	.cra_driver_name	= "atmel-ecb-aes",
1186 	.cra_priority		= ATMEL_AES_PRIORITY,
1187 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1188 	.cra_blocksize		= AES_BLOCK_SIZE,
1189 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1190 	.cra_alignmask		= 0xf,
1191 	.cra_type		= &crypto_ablkcipher_type,
1192 	.cra_module		= THIS_MODULE,
1193 	.cra_init		= atmel_aes_cra_init,
1194 	.cra_exit		= atmel_aes_cra_exit,
1195 	.cra_u.ablkcipher = {
1196 		.min_keysize	= AES_MIN_KEY_SIZE,
1197 		.max_keysize	= AES_MAX_KEY_SIZE,
1198 		.setkey		= atmel_aes_setkey,
1199 		.encrypt	= atmel_aes_ecb_encrypt,
1200 		.decrypt	= atmel_aes_ecb_decrypt,
1201 	}
1202 },
1203 {
1204 	.cra_name		= "cbc(aes)",
1205 	.cra_driver_name	= "atmel-cbc-aes",
1206 	.cra_priority		= ATMEL_AES_PRIORITY,
1207 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1208 	.cra_blocksize		= AES_BLOCK_SIZE,
1209 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1210 	.cra_alignmask		= 0xf,
1211 	.cra_type		= &crypto_ablkcipher_type,
1212 	.cra_module		= THIS_MODULE,
1213 	.cra_init		= atmel_aes_cra_init,
1214 	.cra_exit		= atmel_aes_cra_exit,
1215 	.cra_u.ablkcipher = {
1216 		.min_keysize	= AES_MIN_KEY_SIZE,
1217 		.max_keysize	= AES_MAX_KEY_SIZE,
1218 		.ivsize		= AES_BLOCK_SIZE,
1219 		.setkey		= atmel_aes_setkey,
1220 		.encrypt	= atmel_aes_cbc_encrypt,
1221 		.decrypt	= atmel_aes_cbc_decrypt,
1222 	}
1223 },
1224 {
1225 	.cra_name		= "ofb(aes)",
1226 	.cra_driver_name	= "atmel-ofb-aes",
1227 	.cra_priority		= ATMEL_AES_PRIORITY,
1228 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1229 	.cra_blocksize		= AES_BLOCK_SIZE,
1230 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1231 	.cra_alignmask		= 0xf,
1232 	.cra_type		= &crypto_ablkcipher_type,
1233 	.cra_module		= THIS_MODULE,
1234 	.cra_init		= atmel_aes_cra_init,
1235 	.cra_exit		= atmel_aes_cra_exit,
1236 	.cra_u.ablkcipher = {
1237 		.min_keysize	= AES_MIN_KEY_SIZE,
1238 		.max_keysize	= AES_MAX_KEY_SIZE,
1239 		.ivsize		= AES_BLOCK_SIZE,
1240 		.setkey		= atmel_aes_setkey,
1241 		.encrypt	= atmel_aes_ofb_encrypt,
1242 		.decrypt	= atmel_aes_ofb_decrypt,
1243 	}
1244 },
1245 {
1246 	.cra_name		= "cfb(aes)",
1247 	.cra_driver_name	= "atmel-cfb-aes",
1248 	.cra_priority		= ATMEL_AES_PRIORITY,
1249 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1250 	.cra_blocksize		= AES_BLOCK_SIZE,
1251 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1252 	.cra_alignmask		= 0xf,
1253 	.cra_type		= &crypto_ablkcipher_type,
1254 	.cra_module		= THIS_MODULE,
1255 	.cra_init		= atmel_aes_cra_init,
1256 	.cra_exit		= atmel_aes_cra_exit,
1257 	.cra_u.ablkcipher = {
1258 		.min_keysize	= AES_MIN_KEY_SIZE,
1259 		.max_keysize	= AES_MAX_KEY_SIZE,
1260 		.ivsize		= AES_BLOCK_SIZE,
1261 		.setkey		= atmel_aes_setkey,
1262 		.encrypt	= atmel_aes_cfb_encrypt,
1263 		.decrypt	= atmel_aes_cfb_decrypt,
1264 	}
1265 },
1266 {
1267 	.cra_name		= "cfb32(aes)",
1268 	.cra_driver_name	= "atmel-cfb32-aes",
1269 	.cra_priority		= ATMEL_AES_PRIORITY,
1270 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1271 	.cra_blocksize		= CFB32_BLOCK_SIZE,
1272 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1273 	.cra_alignmask		= 0x3,
1274 	.cra_type		= &crypto_ablkcipher_type,
1275 	.cra_module		= THIS_MODULE,
1276 	.cra_init		= atmel_aes_cra_init,
1277 	.cra_exit		= atmel_aes_cra_exit,
1278 	.cra_u.ablkcipher = {
1279 		.min_keysize	= AES_MIN_KEY_SIZE,
1280 		.max_keysize	= AES_MAX_KEY_SIZE,
1281 		.ivsize		= AES_BLOCK_SIZE,
1282 		.setkey		= atmel_aes_setkey,
1283 		.encrypt	= atmel_aes_cfb32_encrypt,
1284 		.decrypt	= atmel_aes_cfb32_decrypt,
1285 	}
1286 },
1287 {
1288 	.cra_name		= "cfb16(aes)",
1289 	.cra_driver_name	= "atmel-cfb16-aes",
1290 	.cra_priority		= ATMEL_AES_PRIORITY,
1291 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1292 	.cra_blocksize		= CFB16_BLOCK_SIZE,
1293 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1294 	.cra_alignmask		= 0x1,
1295 	.cra_type		= &crypto_ablkcipher_type,
1296 	.cra_module		= THIS_MODULE,
1297 	.cra_init		= atmel_aes_cra_init,
1298 	.cra_exit		= atmel_aes_cra_exit,
1299 	.cra_u.ablkcipher = {
1300 		.min_keysize	= AES_MIN_KEY_SIZE,
1301 		.max_keysize	= AES_MAX_KEY_SIZE,
1302 		.ivsize		= AES_BLOCK_SIZE,
1303 		.setkey		= atmel_aes_setkey,
1304 		.encrypt	= atmel_aes_cfb16_encrypt,
1305 		.decrypt	= atmel_aes_cfb16_decrypt,
1306 	}
1307 },
1308 {
1309 	.cra_name		= "cfb8(aes)",
1310 	.cra_driver_name	= "atmel-cfb8-aes",
1311 	.cra_priority		= ATMEL_AES_PRIORITY,
1312 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1313 	.cra_blocksize		= CFB8_BLOCK_SIZE,
1314 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1315 	.cra_alignmask		= 0x0,
1316 	.cra_type		= &crypto_ablkcipher_type,
1317 	.cra_module		= THIS_MODULE,
1318 	.cra_init		= atmel_aes_cra_init,
1319 	.cra_exit		= atmel_aes_cra_exit,
1320 	.cra_u.ablkcipher = {
1321 		.min_keysize	= AES_MIN_KEY_SIZE,
1322 		.max_keysize	= AES_MAX_KEY_SIZE,
1323 		.ivsize		= AES_BLOCK_SIZE,
1324 		.setkey		= atmel_aes_setkey,
1325 		.encrypt	= atmel_aes_cfb8_encrypt,
1326 		.decrypt	= atmel_aes_cfb8_decrypt,
1327 	}
1328 },
1329 {
1330 	.cra_name		= "ctr(aes)",
1331 	.cra_driver_name	= "atmel-ctr-aes",
1332 	.cra_priority		= ATMEL_AES_PRIORITY,
1333 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1334 	.cra_blocksize		= 1,
1335 	.cra_ctxsize		= sizeof(struct atmel_aes_ctr_ctx),
1336 	.cra_alignmask		= 0xf,
1337 	.cra_type		= &crypto_ablkcipher_type,
1338 	.cra_module		= THIS_MODULE,
1339 	.cra_init		= atmel_aes_ctr_cra_init,
1340 	.cra_exit		= atmel_aes_cra_exit,
1341 	.cra_u.ablkcipher = {
1342 		.min_keysize	= AES_MIN_KEY_SIZE,
1343 		.max_keysize	= AES_MAX_KEY_SIZE,
1344 		.ivsize		= AES_BLOCK_SIZE,
1345 		.setkey		= atmel_aes_setkey,
1346 		.encrypt	= atmel_aes_ctr_encrypt,
1347 		.decrypt	= atmel_aes_ctr_decrypt,
1348 	}
1349 },
1350 };
1351 
1352 static struct crypto_alg aes_cfb64_alg = {
1353 	.cra_name		= "cfb64(aes)",
1354 	.cra_driver_name	= "atmel-cfb64-aes",
1355 	.cra_priority		= ATMEL_AES_PRIORITY,
1356 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1357 	.cra_blocksize		= CFB64_BLOCK_SIZE,
1358 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1359 	.cra_alignmask		= 0x7,
1360 	.cra_type		= &crypto_ablkcipher_type,
1361 	.cra_module		= THIS_MODULE,
1362 	.cra_init		= atmel_aes_cra_init,
1363 	.cra_exit		= atmel_aes_cra_exit,
1364 	.cra_u.ablkcipher = {
1365 		.min_keysize	= AES_MIN_KEY_SIZE,
1366 		.max_keysize	= AES_MAX_KEY_SIZE,
1367 		.ivsize		= AES_BLOCK_SIZE,
1368 		.setkey		= atmel_aes_setkey,
1369 		.encrypt	= atmel_aes_cfb64_encrypt,
1370 		.decrypt	= atmel_aes_cfb64_decrypt,
1371 	}
1372 };
1373 
1374 
1375 /* gcm aead functions */
1376 
1377 static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1378 			       const u32 *data, size_t datalen,
1379 			       const u32 *ghash_in, u32 *ghash_out,
1380 			       atmel_aes_fn_t resume);
1381 static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
1382 static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
1383 
1384 static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
1385 static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
1386 static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
1387 static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
1388 static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
1389 static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
1390 static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
1391 
1392 static inline struct atmel_aes_gcm_ctx *
1393 atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
1394 {
1395 	return container_of(ctx, struct atmel_aes_gcm_ctx, base);
1396 }
1397 
1398 static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1399 			       const u32 *data, size_t datalen,
1400 			       const u32 *ghash_in, u32 *ghash_out,
1401 			       atmel_aes_fn_t resume)
1402 {
1403 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1404 
1405 	dd->data = (u32 *)data;
1406 	dd->datalen = datalen;
1407 	ctx->ghash_in = ghash_in;
1408 	ctx->ghash_out = ghash_out;
1409 	ctx->ghash_resume = resume;
1410 
1411 	atmel_aes_write_ctrl(dd, false, NULL);
1412 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
1413 }
1414 
1415 static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
1416 {
1417 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1418 
1419 	/* Set the data length. */
1420 	atmel_aes_write(dd, AES_AADLENR, dd->total);
1421 	atmel_aes_write(dd, AES_CLENR, 0);
1422 
1423 	/* If needed, overwrite the GCM Intermediate Hash Word Registers */
1424 	if (ctx->ghash_in)
1425 		atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
1426 
1427 	return atmel_aes_gcm_ghash_finalize(dd);
1428 }
1429 
1430 static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
1431 {
1432 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1433 	u32 isr;
1434 
1435 	/* Write data into the Input Data Registers. */
1436 	while (dd->datalen > 0) {
1437 		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1438 		dd->data += 4;
1439 		dd->datalen -= AES_BLOCK_SIZE;
1440 
1441 		isr = atmel_aes_read(dd, AES_ISR);
1442 		if (!(isr & AES_INT_DATARDY)) {
1443 			dd->resume = atmel_aes_gcm_ghash_finalize;
1444 			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1445 			return -EINPROGRESS;
1446 		}
1447 	}
1448 
1449 	/* Read the computed hash from GHASHRx. */
1450 	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
1451 
1452 	return ctx->ghash_resume(dd);
1453 }
1454 
1455 
1456 static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
1457 {
1458 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1459 	struct aead_request *req = aead_request_cast(dd->areq);
1460 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1461 	struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
1462 	size_t ivsize = crypto_aead_ivsize(tfm);
1463 	size_t datalen, padlen;
1464 	const void *iv = req->iv;
1465 	u8 *data = dd->buf;
1466 	int err;
1467 
1468 	atmel_aes_set_mode(dd, rctx);
1469 
1470 	err = atmel_aes_hw_init(dd);
1471 	if (err)
1472 		return atmel_aes_complete(dd, err);
1473 
1474 	if (likely(ivsize == 12)) {
1475 		memcpy(ctx->j0, iv, ivsize);
1476 		ctx->j0[3] = cpu_to_be32(1);
1477 		return atmel_aes_gcm_process(dd);
1478 	}
1479 
1480 	padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
1481 	datalen = ivsize + padlen + AES_BLOCK_SIZE;
1482 	if (datalen > dd->buflen)
1483 		return atmel_aes_complete(dd, -EINVAL);
1484 
1485 	memcpy(data, iv, ivsize);
1486 	memset(data + ivsize, 0, padlen + sizeof(u64));
1487 	((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
1488 
1489 	return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
1490 				   NULL, ctx->j0, atmel_aes_gcm_process);
1491 }
1492 
1493 static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
1494 {
1495 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1496 	struct aead_request *req = aead_request_cast(dd->areq);
1497 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1498 	bool enc = atmel_aes_is_encrypt(dd);
1499 	u32 authsize;
1500 
1501 	/* Compute text length. */
1502 	authsize = crypto_aead_authsize(tfm);
1503 	ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
1504 
1505 	/*
1506 	 * According to tcrypt test suite, the GCM Automatic Tag Generation
1507 	 * fails when both the message and its associated data are empty.
1508 	 */
1509 	if (likely(req->assoclen != 0 || ctx->textlen != 0))
1510 		dd->flags |= AES_FLAGS_GTAGEN;
1511 
1512 	atmel_aes_write_ctrl(dd, false, NULL);
1513 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
1514 }
1515 
1516 static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
1517 {
1518 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1519 	struct aead_request *req = aead_request_cast(dd->areq);
1520 	u32 j0_lsw, *j0 = ctx->j0;
1521 	size_t padlen;
1522 
1523 	/* Write incr32(J0) into IV. */
1524 	j0_lsw = j0[3];
1525 	j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1);
1526 	atmel_aes_write_block(dd, AES_IVR(0), j0);
1527 	j0[3] = j0_lsw;
1528 
1529 	/* Set aad and text lengths. */
1530 	atmel_aes_write(dd, AES_AADLENR, req->assoclen);
1531 	atmel_aes_write(dd, AES_CLENR, ctx->textlen);
1532 
1533 	/* Check whether AAD are present. */
1534 	if (unlikely(req->assoclen == 0)) {
1535 		dd->datalen = 0;
1536 		return atmel_aes_gcm_data(dd);
1537 	}
1538 
1539 	/* Copy assoc data and add padding. */
1540 	padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
1541 	if (unlikely(req->assoclen + padlen > dd->buflen))
1542 		return atmel_aes_complete(dd, -EINVAL);
1543 	sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
1544 
1545 	/* Write assoc data into the Input Data register. */
1546 	dd->data = (u32 *)dd->buf;
1547 	dd->datalen = req->assoclen + padlen;
1548 	return atmel_aes_gcm_data(dd);
1549 }
1550 
1551 static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
1552 {
1553 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1554 	struct aead_request *req = aead_request_cast(dd->areq);
1555 	bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
1556 	struct scatterlist *src, *dst;
1557 	u32 isr, mr;
1558 
1559 	/* Write AAD first. */
1560 	while (dd->datalen > 0) {
1561 		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1562 		dd->data += 4;
1563 		dd->datalen -= AES_BLOCK_SIZE;
1564 
1565 		isr = atmel_aes_read(dd, AES_ISR);
1566 		if (!(isr & AES_INT_DATARDY)) {
1567 			dd->resume = atmel_aes_gcm_data;
1568 			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1569 			return -EINPROGRESS;
1570 		}
1571 	}
1572 
1573 	/* GMAC only. */
1574 	if (unlikely(ctx->textlen == 0))
1575 		return atmel_aes_gcm_tag_init(dd);
1576 
1577 	/* Prepare src and dst scatter lists to transfer cipher/plain texts */
1578 	src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
1579 	dst = ((req->src == req->dst) ? src :
1580 	       scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
1581 
1582 	if (use_dma) {
1583 		/* Update the Mode Register for DMA transfers. */
1584 		mr = atmel_aes_read(dd, AES_MR);
1585 		mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
1586 		mr |= AES_MR_SMOD_IDATAR0;
1587 		if (dd->caps.has_dualbuff)
1588 			mr |= AES_MR_DUALBUFF;
1589 		atmel_aes_write(dd, AES_MR, mr);
1590 
1591 		return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
1592 					   atmel_aes_gcm_tag_init);
1593 	}
1594 
1595 	return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
1596 				   atmel_aes_gcm_tag_init);
1597 }
1598 
1599 static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
1600 {
1601 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1602 	struct aead_request *req = aead_request_cast(dd->areq);
1603 	u64 *data = dd->buf;
1604 
1605 	if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
1606 		if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
1607 			dd->resume = atmel_aes_gcm_tag_init;
1608 			atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
1609 			return -EINPROGRESS;
1610 		}
1611 
1612 		return atmel_aes_gcm_finalize(dd);
1613 	}
1614 
1615 	/* Read the GCM Intermediate Hash Word Registers. */
1616 	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
1617 
1618 	data[0] = cpu_to_be64(req->assoclen * 8);
1619 	data[1] = cpu_to_be64(ctx->textlen * 8);
1620 
1621 	return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
1622 				   ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
1623 }
1624 
1625 static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
1626 {
1627 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1628 	unsigned long flags;
1629 
1630 	/*
1631 	 * Change mode to CTR to complete the tag generation.
1632 	 * Use J0 as Initialization Vector.
1633 	 */
1634 	flags = dd->flags;
1635 	dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
1636 	dd->flags |= AES_FLAGS_CTR;
1637 	atmel_aes_write_ctrl(dd, false, ctx->j0);
1638 	dd->flags = flags;
1639 
1640 	atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
1641 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
1642 }
1643 
1644 static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
1645 {
1646 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1647 	struct aead_request *req = aead_request_cast(dd->areq);
1648 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1649 	bool enc = atmel_aes_is_encrypt(dd);
1650 	u32 offset, authsize, itag[4], *otag = ctx->tag;
1651 	int err;
1652 
1653 	/* Read the computed tag. */
1654 	if (likely(dd->flags & AES_FLAGS_GTAGEN))
1655 		atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
1656 	else
1657 		atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
1658 
1659 	offset = req->assoclen + ctx->textlen;
1660 	authsize = crypto_aead_authsize(tfm);
1661 	if (enc) {
1662 		scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
1663 		err = 0;
1664 	} else {
1665 		scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
1666 		err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
1667 	}
1668 
1669 	return atmel_aes_complete(dd, err);
1670 }
1671 
1672 static int atmel_aes_gcm_crypt(struct aead_request *req,
1673 			       unsigned long mode)
1674 {
1675 	struct atmel_aes_base_ctx *ctx;
1676 	struct atmel_aes_reqctx *rctx;
1677 	struct atmel_aes_dev *dd;
1678 
1679 	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1680 	ctx->block_size = AES_BLOCK_SIZE;
1681 
1682 	dd = atmel_aes_find_dev(ctx);
1683 	if (!dd)
1684 		return -ENODEV;
1685 
1686 	rctx = aead_request_ctx(req);
1687 	rctx->mode = AES_FLAGS_GCM | mode;
1688 
1689 	return atmel_aes_handle_queue(dd, &req->base);
1690 }
1691 
1692 static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
1693 				unsigned int keylen)
1694 {
1695 	struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1696 
1697 	if (keylen != AES_KEYSIZE_256 &&
1698 	    keylen != AES_KEYSIZE_192 &&
1699 	    keylen != AES_KEYSIZE_128) {
1700 		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1701 		return -EINVAL;
1702 	}
1703 
1704 	memcpy(ctx->key, key, keylen);
1705 	ctx->keylen = keylen;
1706 
1707 	return 0;
1708 }
1709 
1710 static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
1711 				     unsigned int authsize)
1712 {
1713 	/* Same as crypto_gcm_authsize() from crypto/gcm.c */
1714 	switch (authsize) {
1715 	case 4:
1716 	case 8:
1717 	case 12:
1718 	case 13:
1719 	case 14:
1720 	case 15:
1721 	case 16:
1722 		break;
1723 	default:
1724 		return -EINVAL;
1725 	}
1726 
1727 	return 0;
1728 }
1729 
1730 static int atmel_aes_gcm_encrypt(struct aead_request *req)
1731 {
1732 	return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1733 }
1734 
1735 static int atmel_aes_gcm_decrypt(struct aead_request *req)
1736 {
1737 	return atmel_aes_gcm_crypt(req, 0);
1738 }
1739 
1740 static int atmel_aes_gcm_init(struct crypto_aead *tfm)
1741 {
1742 	struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
1743 
1744 	crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1745 	ctx->base.start = atmel_aes_gcm_start;
1746 
1747 	return 0;
1748 }
1749 
1750 static void atmel_aes_gcm_exit(struct crypto_aead *tfm)
1751 {
1752 
1753 }
1754 
1755 static struct aead_alg aes_gcm_alg = {
1756 	.setkey		= atmel_aes_gcm_setkey,
1757 	.setauthsize	= atmel_aes_gcm_setauthsize,
1758 	.encrypt	= atmel_aes_gcm_encrypt,
1759 	.decrypt	= atmel_aes_gcm_decrypt,
1760 	.init		= atmel_aes_gcm_init,
1761 	.exit		= atmel_aes_gcm_exit,
1762 	.ivsize		= 12,
1763 	.maxauthsize	= AES_BLOCK_SIZE,
1764 
1765 	.base = {
1766 		.cra_name		= "gcm(aes)",
1767 		.cra_driver_name	= "atmel-gcm-aes",
1768 		.cra_priority		= ATMEL_AES_PRIORITY,
1769 		.cra_flags		= CRYPTO_ALG_ASYNC,
1770 		.cra_blocksize		= 1,
1771 		.cra_ctxsize		= sizeof(struct atmel_aes_gcm_ctx),
1772 		.cra_alignmask		= 0xf,
1773 		.cra_module		= THIS_MODULE,
1774 	},
1775 };
1776 
1777 
1778 /* Probe functions */
1779 
1780 static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
1781 {
1782 	dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
1783 	dd->buflen = ATMEL_AES_BUFFER_SIZE;
1784 	dd->buflen &= ~(AES_BLOCK_SIZE - 1);
1785 
1786 	if (!dd->buf) {
1787 		dev_err(dd->dev, "unable to alloc pages.\n");
1788 		return -ENOMEM;
1789 	}
1790 
1791 	return 0;
1792 }
1793 
1794 static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
1795 {
1796 	free_page((unsigned long)dd->buf);
1797 }
1798 
1799 static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
1800 {
1801 	struct at_dma_slave	*sl = slave;
1802 
1803 	if (sl && sl->dma_dev == chan->device->dev) {
1804 		chan->private = sl;
1805 		return true;
1806 	} else {
1807 		return false;
1808 	}
1809 }
1810 
1811 static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
1812 			      struct crypto_platform_data *pdata)
1813 {
1814 	struct at_dma_slave *slave;
1815 	int err = -ENOMEM;
1816 	dma_cap_mask_t mask;
1817 
1818 	dma_cap_zero(mask);
1819 	dma_cap_set(DMA_SLAVE, mask);
1820 
1821 	/* Try to grab 2 DMA channels */
1822 	slave = &pdata->dma_slave->rxdata;
1823 	dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1824 							slave, dd->dev, "tx");
1825 	if (!dd->src.chan)
1826 		goto err_dma_in;
1827 
1828 	slave = &pdata->dma_slave->txdata;
1829 	dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1830 							slave, dd->dev, "rx");
1831 	if (!dd->dst.chan)
1832 		goto err_dma_out;
1833 
1834 	return 0;
1835 
1836 err_dma_out:
1837 	dma_release_channel(dd->src.chan);
1838 err_dma_in:
1839 	dev_warn(dd->dev, "no DMA channel available\n");
1840 	return err;
1841 }
1842 
1843 static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
1844 {
1845 	dma_release_channel(dd->dst.chan);
1846 	dma_release_channel(dd->src.chan);
1847 }
1848 
1849 static void atmel_aes_queue_task(unsigned long data)
1850 {
1851 	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1852 
1853 	atmel_aes_handle_queue(dd, NULL);
1854 }
1855 
1856 static void atmel_aes_done_task(unsigned long data)
1857 {
1858 	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1859 
1860 	dd->is_async = true;
1861 	(void)dd->resume(dd);
1862 }
1863 
1864 static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1865 {
1866 	struct atmel_aes_dev *aes_dd = dev_id;
1867 	u32 reg;
1868 
1869 	reg = atmel_aes_read(aes_dd, AES_ISR);
1870 	if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1871 		atmel_aes_write(aes_dd, AES_IDR, reg);
1872 		if (AES_FLAGS_BUSY & aes_dd->flags)
1873 			tasklet_schedule(&aes_dd->done_task);
1874 		else
1875 			dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1876 		return IRQ_HANDLED;
1877 	}
1878 
1879 	return IRQ_NONE;
1880 }
1881 
1882 static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1883 {
1884 	int i;
1885 
1886 	if (dd->caps.has_gcm)
1887 		crypto_unregister_aead(&aes_gcm_alg);
1888 
1889 	if (dd->caps.has_cfb64)
1890 		crypto_unregister_alg(&aes_cfb64_alg);
1891 
1892 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1893 		crypto_unregister_alg(&aes_algs[i]);
1894 }
1895 
1896 static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1897 {
1898 	int err, i, j;
1899 
1900 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1901 		err = crypto_register_alg(&aes_algs[i]);
1902 		if (err)
1903 			goto err_aes_algs;
1904 	}
1905 
1906 	if (dd->caps.has_cfb64) {
1907 		err = crypto_register_alg(&aes_cfb64_alg);
1908 		if (err)
1909 			goto err_aes_cfb64_alg;
1910 	}
1911 
1912 	if (dd->caps.has_gcm) {
1913 		err = crypto_register_aead(&aes_gcm_alg);
1914 		if (err)
1915 			goto err_aes_gcm_alg;
1916 	}
1917 
1918 	return 0;
1919 
1920 err_aes_gcm_alg:
1921 	crypto_unregister_alg(&aes_cfb64_alg);
1922 err_aes_cfb64_alg:
1923 	i = ARRAY_SIZE(aes_algs);
1924 err_aes_algs:
1925 	for (j = 0; j < i; j++)
1926 		crypto_unregister_alg(&aes_algs[j]);
1927 
1928 	return err;
1929 }
1930 
1931 static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1932 {
1933 	dd->caps.has_dualbuff = 0;
1934 	dd->caps.has_cfb64 = 0;
1935 	dd->caps.has_ctr32 = 0;
1936 	dd->caps.has_gcm = 0;
1937 	dd->caps.max_burst_size = 1;
1938 
1939 	/* keep only major version number */
1940 	switch (dd->hw_version & 0xff0) {
1941 	case 0x500:
1942 		dd->caps.has_dualbuff = 1;
1943 		dd->caps.has_cfb64 = 1;
1944 		dd->caps.has_ctr32 = 1;
1945 		dd->caps.has_gcm = 1;
1946 		dd->caps.max_burst_size = 4;
1947 		break;
1948 	case 0x200:
1949 		dd->caps.has_dualbuff = 1;
1950 		dd->caps.has_cfb64 = 1;
1951 		dd->caps.has_ctr32 = 1;
1952 		dd->caps.has_gcm = 1;
1953 		dd->caps.max_burst_size = 4;
1954 		break;
1955 	case 0x130:
1956 		dd->caps.has_dualbuff = 1;
1957 		dd->caps.has_cfb64 = 1;
1958 		dd->caps.max_burst_size = 4;
1959 		break;
1960 	case 0x120:
1961 		break;
1962 	default:
1963 		dev_warn(dd->dev,
1964 				"Unmanaged aes version, set minimum capabilities\n");
1965 		break;
1966 	}
1967 }
1968 
1969 #if defined(CONFIG_OF)
1970 static const struct of_device_id atmel_aes_dt_ids[] = {
1971 	{ .compatible = "atmel,at91sam9g46-aes" },
1972 	{ /* sentinel */ }
1973 };
1974 MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
1975 
1976 static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1977 {
1978 	struct device_node *np = pdev->dev.of_node;
1979 	struct crypto_platform_data *pdata;
1980 
1981 	if (!np) {
1982 		dev_err(&pdev->dev, "device node not found\n");
1983 		return ERR_PTR(-EINVAL);
1984 	}
1985 
1986 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1987 	if (!pdata) {
1988 		dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1989 		return ERR_PTR(-ENOMEM);
1990 	}
1991 
1992 	pdata->dma_slave = devm_kzalloc(&pdev->dev,
1993 					sizeof(*(pdata->dma_slave)),
1994 					GFP_KERNEL);
1995 	if (!pdata->dma_slave) {
1996 		dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1997 		devm_kfree(&pdev->dev, pdata);
1998 		return ERR_PTR(-ENOMEM);
1999 	}
2000 
2001 	return pdata;
2002 }
2003 #else
2004 static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
2005 {
2006 	return ERR_PTR(-EINVAL);
2007 }
2008 #endif
2009 
2010 static int atmel_aes_probe(struct platform_device *pdev)
2011 {
2012 	struct atmel_aes_dev *aes_dd;
2013 	struct crypto_platform_data *pdata;
2014 	struct device *dev = &pdev->dev;
2015 	struct resource *aes_res;
2016 	int err;
2017 
2018 	pdata = pdev->dev.platform_data;
2019 	if (!pdata) {
2020 		pdata = atmel_aes_of_init(pdev);
2021 		if (IS_ERR(pdata)) {
2022 			err = PTR_ERR(pdata);
2023 			goto aes_dd_err;
2024 		}
2025 	}
2026 
2027 	if (!pdata->dma_slave) {
2028 		err = -ENXIO;
2029 		goto aes_dd_err;
2030 	}
2031 
2032 	aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
2033 	if (aes_dd == NULL) {
2034 		dev_err(dev, "unable to alloc data struct.\n");
2035 		err = -ENOMEM;
2036 		goto aes_dd_err;
2037 	}
2038 
2039 	aes_dd->dev = dev;
2040 
2041 	platform_set_drvdata(pdev, aes_dd);
2042 
2043 	INIT_LIST_HEAD(&aes_dd->list);
2044 	spin_lock_init(&aes_dd->lock);
2045 
2046 	tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
2047 					(unsigned long)aes_dd);
2048 	tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
2049 					(unsigned long)aes_dd);
2050 
2051 	crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
2052 
2053 	aes_dd->irq = -1;
2054 
2055 	/* Get the base address */
2056 	aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2057 	if (!aes_res) {
2058 		dev_err(dev, "no MEM resource info\n");
2059 		err = -ENODEV;
2060 		goto res_err;
2061 	}
2062 	aes_dd->phys_base = aes_res->start;
2063 
2064 	/* Get the IRQ */
2065 	aes_dd->irq = platform_get_irq(pdev,  0);
2066 	if (aes_dd->irq < 0) {
2067 		dev_err(dev, "no IRQ resource info\n");
2068 		err = aes_dd->irq;
2069 		goto res_err;
2070 	}
2071 
2072 	err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
2073 			       IRQF_SHARED, "atmel-aes", aes_dd);
2074 	if (err) {
2075 		dev_err(dev, "unable to request aes irq.\n");
2076 		goto res_err;
2077 	}
2078 
2079 	/* Initializing the clock */
2080 	aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
2081 	if (IS_ERR(aes_dd->iclk)) {
2082 		dev_err(dev, "clock initialization failed.\n");
2083 		err = PTR_ERR(aes_dd->iclk);
2084 		goto res_err;
2085 	}
2086 
2087 	aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
2088 	if (!aes_dd->io_base) {
2089 		dev_err(dev, "can't ioremap\n");
2090 		err = -ENOMEM;
2091 		goto res_err;
2092 	}
2093 
2094 	err = clk_prepare(aes_dd->iclk);
2095 	if (err)
2096 		goto res_err;
2097 
2098 	err = atmel_aes_hw_version_init(aes_dd);
2099 	if (err)
2100 		goto iclk_unprepare;
2101 
2102 	atmel_aes_get_cap(aes_dd);
2103 
2104 	err = atmel_aes_buff_init(aes_dd);
2105 	if (err)
2106 		goto err_aes_buff;
2107 
2108 	err = atmel_aes_dma_init(aes_dd, pdata);
2109 	if (err)
2110 		goto err_aes_dma;
2111 
2112 	spin_lock(&atmel_aes.lock);
2113 	list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
2114 	spin_unlock(&atmel_aes.lock);
2115 
2116 	err = atmel_aes_register_algs(aes_dd);
2117 	if (err)
2118 		goto err_algs;
2119 
2120 	dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
2121 			dma_chan_name(aes_dd->src.chan),
2122 			dma_chan_name(aes_dd->dst.chan));
2123 
2124 	return 0;
2125 
2126 err_algs:
2127 	spin_lock(&atmel_aes.lock);
2128 	list_del(&aes_dd->list);
2129 	spin_unlock(&atmel_aes.lock);
2130 	atmel_aes_dma_cleanup(aes_dd);
2131 err_aes_dma:
2132 	atmel_aes_buff_cleanup(aes_dd);
2133 err_aes_buff:
2134 iclk_unprepare:
2135 	clk_unprepare(aes_dd->iclk);
2136 res_err:
2137 	tasklet_kill(&aes_dd->done_task);
2138 	tasklet_kill(&aes_dd->queue_task);
2139 aes_dd_err:
2140 	dev_err(dev, "initialization failed.\n");
2141 
2142 	return err;
2143 }
2144 
2145 static int atmel_aes_remove(struct platform_device *pdev)
2146 {
2147 	static struct atmel_aes_dev *aes_dd;
2148 
2149 	aes_dd = platform_get_drvdata(pdev);
2150 	if (!aes_dd)
2151 		return -ENODEV;
2152 	spin_lock(&atmel_aes.lock);
2153 	list_del(&aes_dd->list);
2154 	spin_unlock(&atmel_aes.lock);
2155 
2156 	atmel_aes_unregister_algs(aes_dd);
2157 
2158 	tasklet_kill(&aes_dd->done_task);
2159 	tasklet_kill(&aes_dd->queue_task);
2160 
2161 	atmel_aes_dma_cleanup(aes_dd);
2162 	atmel_aes_buff_cleanup(aes_dd);
2163 
2164 	clk_unprepare(aes_dd->iclk);
2165 
2166 	return 0;
2167 }
2168 
2169 static struct platform_driver atmel_aes_driver = {
2170 	.probe		= atmel_aes_probe,
2171 	.remove		= atmel_aes_remove,
2172 	.driver		= {
2173 		.name	= "atmel_aes",
2174 		.of_match_table = of_match_ptr(atmel_aes_dt_ids),
2175 	},
2176 };
2177 
2178 module_platform_driver(atmel_aes_driver);
2179 
2180 MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
2181 MODULE_LICENSE("GPL v2");
2182 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
2183