xref: /linux/drivers/crypto/atmel-aes.c (revision f3956ebb3bf06ab2266ad5ee2214aed46405810c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cryptographic API.
4  *
5  * Support for ATMEL AES HW acceleration.
6  *
7  * Copyright (c) 2012 Eukréa Electromatique - ATMEL
8  * Author: Nicolas Royer <nicolas@eukrea.com>
9  *
10  * Some ideas are from omap-aes.c driver.
11  */
12 
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/clk.h>
19 #include <linux/io.h>
20 #include <linux/hw_random.h>
21 #include <linux/platform_device.h>
22 
23 #include <linux/device.h>
24 #include <linux/dmaengine.h>
25 #include <linux/init.h>
26 #include <linux/errno.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/scatterlist.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/of_device.h>
32 #include <linux/delay.h>
33 #include <linux/crypto.h>
34 #include <crypto/scatterwalk.h>
35 #include <crypto/algapi.h>
36 #include <crypto/aes.h>
37 #include <crypto/gcm.h>
38 #include <crypto/xts.h>
39 #include <crypto/internal/aead.h>
40 #include <crypto/internal/skcipher.h>
41 #include "atmel-aes-regs.h"
42 #include "atmel-authenc.h"
43 
44 #define ATMEL_AES_PRIORITY	300
45 
46 #define ATMEL_AES_BUFFER_ORDER	2
47 #define ATMEL_AES_BUFFER_SIZE	(PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
48 
49 #define CFB8_BLOCK_SIZE		1
50 #define CFB16_BLOCK_SIZE	2
51 #define CFB32_BLOCK_SIZE	4
52 #define CFB64_BLOCK_SIZE	8
53 
54 #define SIZE_IN_WORDS(x)	((x) >> 2)
55 
56 /* AES flags */
57 /* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
58 #define AES_FLAGS_ENCRYPT	AES_MR_CYPHER_ENC
59 #define AES_FLAGS_GTAGEN	AES_MR_GTAGEN
60 #define AES_FLAGS_OPMODE_MASK	(AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
61 #define AES_FLAGS_ECB		AES_MR_OPMOD_ECB
62 #define AES_FLAGS_CBC		AES_MR_OPMOD_CBC
63 #define AES_FLAGS_OFB		AES_MR_OPMOD_OFB
64 #define AES_FLAGS_CFB128	(AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
65 #define AES_FLAGS_CFB64		(AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
66 #define AES_FLAGS_CFB32		(AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
67 #define AES_FLAGS_CFB16		(AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
68 #define AES_FLAGS_CFB8		(AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
69 #define AES_FLAGS_CTR		AES_MR_OPMOD_CTR
70 #define AES_FLAGS_GCM		AES_MR_OPMOD_GCM
71 #define AES_FLAGS_XTS		AES_MR_OPMOD_XTS
72 
73 #define AES_FLAGS_MODE_MASK	(AES_FLAGS_OPMODE_MASK |	\
74 				 AES_FLAGS_ENCRYPT |		\
75 				 AES_FLAGS_GTAGEN)
76 
77 #define AES_FLAGS_BUSY		BIT(3)
78 #define AES_FLAGS_DUMP_REG	BIT(4)
79 #define AES_FLAGS_OWN_SHA	BIT(5)
80 
81 #define AES_FLAGS_PERSISTENT	AES_FLAGS_BUSY
82 
83 #define ATMEL_AES_QUEUE_LENGTH	50
84 
85 #define ATMEL_AES_DMA_THRESHOLD		256
86 
87 
88 struct atmel_aes_caps {
89 	bool			has_dualbuff;
90 	bool			has_cfb64;
91 	bool			has_gcm;
92 	bool			has_xts;
93 	bool			has_authenc;
94 	u32			max_burst_size;
95 };
96 
97 struct atmel_aes_dev;
98 
99 
100 typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
101 
102 
103 struct atmel_aes_base_ctx {
104 	struct atmel_aes_dev	*dd;
105 	atmel_aes_fn_t		start;
106 	int			keylen;
107 	u32			key[AES_KEYSIZE_256 / sizeof(u32)];
108 	u16			block_size;
109 	bool			is_aead;
110 };
111 
112 struct atmel_aes_ctx {
113 	struct atmel_aes_base_ctx	base;
114 };
115 
116 struct atmel_aes_ctr_ctx {
117 	struct atmel_aes_base_ctx	base;
118 
119 	__be32			iv[AES_BLOCK_SIZE / sizeof(u32)];
120 	size_t			offset;
121 	struct scatterlist	src[2];
122 	struct scatterlist	dst[2];
123 	u32			blocks;
124 };
125 
126 struct atmel_aes_gcm_ctx {
127 	struct atmel_aes_base_ctx	base;
128 
129 	struct scatterlist	src[2];
130 	struct scatterlist	dst[2];
131 
132 	__be32			j0[AES_BLOCK_SIZE / sizeof(u32)];
133 	u32			tag[AES_BLOCK_SIZE / sizeof(u32)];
134 	__be32			ghash[AES_BLOCK_SIZE / sizeof(u32)];
135 	size_t			textlen;
136 
137 	const __be32		*ghash_in;
138 	__be32			*ghash_out;
139 	atmel_aes_fn_t		ghash_resume;
140 };
141 
142 struct atmel_aes_xts_ctx {
143 	struct atmel_aes_base_ctx	base;
144 
145 	u32			key2[AES_KEYSIZE_256 / sizeof(u32)];
146 	struct crypto_skcipher *fallback_tfm;
147 };
148 
149 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
150 struct atmel_aes_authenc_ctx {
151 	struct atmel_aes_base_ctx	base;
152 	struct atmel_sha_authenc_ctx	*auth;
153 };
154 #endif
155 
156 struct atmel_aes_reqctx {
157 	unsigned long		mode;
158 	u8			lastc[AES_BLOCK_SIZE];
159 	struct skcipher_request fallback_req;
160 };
161 
162 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
163 struct atmel_aes_authenc_reqctx {
164 	struct atmel_aes_reqctx	base;
165 
166 	struct scatterlist	src[2];
167 	struct scatterlist	dst[2];
168 	size_t			textlen;
169 	u32			digest[SHA512_DIGEST_SIZE / sizeof(u32)];
170 
171 	/* auth_req MUST be place last. */
172 	struct ahash_request	auth_req;
173 };
174 #endif
175 
176 struct atmel_aes_dma {
177 	struct dma_chan		*chan;
178 	struct scatterlist	*sg;
179 	int			nents;
180 	unsigned int		remainder;
181 	unsigned int		sg_len;
182 };
183 
184 struct atmel_aes_dev {
185 	struct list_head	list;
186 	unsigned long		phys_base;
187 	void __iomem		*io_base;
188 
189 	struct crypto_async_request	*areq;
190 	struct atmel_aes_base_ctx	*ctx;
191 
192 	bool			is_async;
193 	atmel_aes_fn_t		resume;
194 	atmel_aes_fn_t		cpu_transfer_complete;
195 
196 	struct device		*dev;
197 	struct clk		*iclk;
198 	int			irq;
199 
200 	unsigned long		flags;
201 
202 	spinlock_t		lock;
203 	struct crypto_queue	queue;
204 
205 	struct tasklet_struct	done_task;
206 	struct tasklet_struct	queue_task;
207 
208 	size_t			total;
209 	size_t			datalen;
210 	u32			*data;
211 
212 	struct atmel_aes_dma	src;
213 	struct atmel_aes_dma	dst;
214 
215 	size_t			buflen;
216 	void			*buf;
217 	struct scatterlist	aligned_sg;
218 	struct scatterlist	*real_dst;
219 
220 	struct atmel_aes_caps	caps;
221 
222 	u32			hw_version;
223 };
224 
225 struct atmel_aes_drv {
226 	struct list_head	dev_list;
227 	spinlock_t		lock;
228 };
229 
230 static struct atmel_aes_drv atmel_aes = {
231 	.dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
232 	.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
233 };
234 
235 #ifdef VERBOSE_DEBUG
236 static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
237 {
238 	switch (offset) {
239 	case AES_CR:
240 		return "CR";
241 
242 	case AES_MR:
243 		return "MR";
244 
245 	case AES_ISR:
246 		return "ISR";
247 
248 	case AES_IMR:
249 		return "IMR";
250 
251 	case AES_IER:
252 		return "IER";
253 
254 	case AES_IDR:
255 		return "IDR";
256 
257 	case AES_KEYWR(0):
258 	case AES_KEYWR(1):
259 	case AES_KEYWR(2):
260 	case AES_KEYWR(3):
261 	case AES_KEYWR(4):
262 	case AES_KEYWR(5):
263 	case AES_KEYWR(6):
264 	case AES_KEYWR(7):
265 		snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
266 		break;
267 
268 	case AES_IDATAR(0):
269 	case AES_IDATAR(1):
270 	case AES_IDATAR(2):
271 	case AES_IDATAR(3):
272 		snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
273 		break;
274 
275 	case AES_ODATAR(0):
276 	case AES_ODATAR(1):
277 	case AES_ODATAR(2):
278 	case AES_ODATAR(3):
279 		snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
280 		break;
281 
282 	case AES_IVR(0):
283 	case AES_IVR(1):
284 	case AES_IVR(2):
285 	case AES_IVR(3):
286 		snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
287 		break;
288 
289 	case AES_AADLENR:
290 		return "AADLENR";
291 
292 	case AES_CLENR:
293 		return "CLENR";
294 
295 	case AES_GHASHR(0):
296 	case AES_GHASHR(1):
297 	case AES_GHASHR(2):
298 	case AES_GHASHR(3):
299 		snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
300 		break;
301 
302 	case AES_TAGR(0):
303 	case AES_TAGR(1):
304 	case AES_TAGR(2):
305 	case AES_TAGR(3):
306 		snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
307 		break;
308 
309 	case AES_CTRR:
310 		return "CTRR";
311 
312 	case AES_GCMHR(0):
313 	case AES_GCMHR(1):
314 	case AES_GCMHR(2):
315 	case AES_GCMHR(3):
316 		snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
317 		break;
318 
319 	case AES_EMR:
320 		return "EMR";
321 
322 	case AES_TWR(0):
323 	case AES_TWR(1):
324 	case AES_TWR(2):
325 	case AES_TWR(3):
326 		snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
327 		break;
328 
329 	case AES_ALPHAR(0):
330 	case AES_ALPHAR(1):
331 	case AES_ALPHAR(2):
332 	case AES_ALPHAR(3):
333 		snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
334 		break;
335 
336 	default:
337 		snprintf(tmp, sz, "0x%02x", offset);
338 		break;
339 	}
340 
341 	return tmp;
342 }
343 #endif /* VERBOSE_DEBUG */
344 
345 /* Shared functions */
346 
347 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
348 {
349 	u32 value = readl_relaxed(dd->io_base + offset);
350 
351 #ifdef VERBOSE_DEBUG
352 	if (dd->flags & AES_FLAGS_DUMP_REG) {
353 		char tmp[16];
354 
355 		dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
356 			 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
357 	}
358 #endif /* VERBOSE_DEBUG */
359 
360 	return value;
361 }
362 
363 static inline void atmel_aes_write(struct atmel_aes_dev *dd,
364 					u32 offset, u32 value)
365 {
366 #ifdef VERBOSE_DEBUG
367 	if (dd->flags & AES_FLAGS_DUMP_REG) {
368 		char tmp[16];
369 
370 		dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
371 			 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
372 	}
373 #endif /* VERBOSE_DEBUG */
374 
375 	writel_relaxed(value, dd->io_base + offset);
376 }
377 
378 static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
379 					u32 *value, int count)
380 {
381 	for (; count--; value++, offset += 4)
382 		*value = atmel_aes_read(dd, offset);
383 }
384 
385 static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
386 			      const u32 *value, int count)
387 {
388 	for (; count--; value++, offset += 4)
389 		atmel_aes_write(dd, offset, *value);
390 }
391 
392 static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
393 					void *value)
394 {
395 	atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
396 }
397 
398 static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
399 					 const void *value)
400 {
401 	atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
402 }
403 
404 static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
405 						atmel_aes_fn_t resume)
406 {
407 	u32 isr = atmel_aes_read(dd, AES_ISR);
408 
409 	if (unlikely(isr & AES_INT_DATARDY))
410 		return resume(dd);
411 
412 	dd->resume = resume;
413 	atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
414 	return -EINPROGRESS;
415 }
416 
417 static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
418 {
419 	len &= block_size - 1;
420 	return len ? block_size - len : 0;
421 }
422 
423 static struct atmel_aes_dev *atmel_aes_dev_alloc(struct atmel_aes_base_ctx *ctx)
424 {
425 	struct atmel_aes_dev *aes_dd;
426 
427 	spin_lock_bh(&atmel_aes.lock);
428 	/* One AES IP per SoC. */
429 	aes_dd = list_first_entry_or_null(&atmel_aes.dev_list,
430 					  struct atmel_aes_dev, list);
431 	spin_unlock_bh(&atmel_aes.lock);
432 	return aes_dd;
433 }
434 
435 static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
436 {
437 	int err;
438 
439 	err = clk_enable(dd->iclk);
440 	if (err)
441 		return err;
442 
443 	atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
444 	atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
445 
446 	return 0;
447 }
448 
449 static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
450 {
451 	return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
452 }
453 
454 static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
455 {
456 	int err;
457 
458 	err = atmel_aes_hw_init(dd);
459 	if (err)
460 		return err;
461 
462 	dd->hw_version = atmel_aes_get_version(dd);
463 
464 	dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
465 
466 	clk_disable(dd->iclk);
467 	return 0;
468 }
469 
470 static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
471 				      const struct atmel_aes_reqctx *rctx)
472 {
473 	/* Clear all but persistent flags and set request flags. */
474 	dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
475 }
476 
477 static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
478 {
479 	return (dd->flags & AES_FLAGS_ENCRYPT);
480 }
481 
482 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
483 static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
484 #endif
485 
486 static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
487 {
488 	struct skcipher_request *req = skcipher_request_cast(dd->areq);
489 	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
490 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
491 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
492 
493 	if (req->cryptlen < ivsize)
494 		return;
495 
496 	if (rctx->mode & AES_FLAGS_ENCRYPT) {
497 		scatterwalk_map_and_copy(req->iv, req->dst,
498 					 req->cryptlen - ivsize, ivsize, 0);
499 	} else {
500 		if (req->src == req->dst)
501 			memcpy(req->iv, rctx->lastc, ivsize);
502 		else
503 			scatterwalk_map_and_copy(req->iv, req->src,
504 						 req->cryptlen - ivsize,
505 						 ivsize, 0);
506 	}
507 }
508 
509 static inline struct atmel_aes_ctr_ctx *
510 atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
511 {
512 	return container_of(ctx, struct atmel_aes_ctr_ctx, base);
513 }
514 
515 static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
516 {
517 	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
518 	struct skcipher_request *req = skcipher_request_cast(dd->areq);
519 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
520 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
521 	int i;
522 
523 	/*
524 	 * The CTR transfer works in fragments of data of maximum 1 MByte
525 	 * because of the 16 bit CTR counter embedded in the IP. When reaching
526 	 * here, ctx->blocks contains the number of blocks of the last fragment
527 	 * processed, there is no need to explicit cast it to u16.
528 	 */
529 	for (i = 0; i < ctx->blocks; i++)
530 		crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
531 
532 	memcpy(req->iv, ctx->iv, ivsize);
533 }
534 
535 static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
536 {
537 	struct skcipher_request *req = skcipher_request_cast(dd->areq);
538 	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
539 
540 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
541 	if (dd->ctx->is_aead)
542 		atmel_aes_authenc_complete(dd, err);
543 #endif
544 
545 	clk_disable(dd->iclk);
546 	dd->flags &= ~AES_FLAGS_BUSY;
547 
548 	if (!err && !dd->ctx->is_aead &&
549 	    (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) {
550 		if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR)
551 			atmel_aes_set_iv_as_last_ciphertext_block(dd);
552 		else
553 			atmel_aes_ctr_update_req_iv(dd);
554 	}
555 
556 	if (dd->is_async)
557 		dd->areq->complete(dd->areq, err);
558 
559 	tasklet_schedule(&dd->queue_task);
560 
561 	return err;
562 }
563 
564 static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
565 				     const __be32 *iv, const u32 *key, int keylen)
566 {
567 	u32 valmr = 0;
568 
569 	/* MR register must be set before IV registers */
570 	if (keylen == AES_KEYSIZE_128)
571 		valmr |= AES_MR_KEYSIZE_128;
572 	else if (keylen == AES_KEYSIZE_192)
573 		valmr |= AES_MR_KEYSIZE_192;
574 	else
575 		valmr |= AES_MR_KEYSIZE_256;
576 
577 	valmr |= dd->flags & AES_FLAGS_MODE_MASK;
578 
579 	if (use_dma) {
580 		valmr |= AES_MR_SMOD_IDATAR0;
581 		if (dd->caps.has_dualbuff)
582 			valmr |= AES_MR_DUALBUFF;
583 	} else {
584 		valmr |= AES_MR_SMOD_AUTO;
585 	}
586 
587 	atmel_aes_write(dd, AES_MR, valmr);
588 
589 	atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
590 
591 	if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
592 		atmel_aes_write_block(dd, AES_IVR(0), iv);
593 }
594 
595 static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
596 					const __be32 *iv)
597 
598 {
599 	atmel_aes_write_ctrl_key(dd, use_dma, iv,
600 				 dd->ctx->key, dd->ctx->keylen);
601 }
602 
603 /* CPU transfer */
604 
605 static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
606 {
607 	int err = 0;
608 	u32 isr;
609 
610 	for (;;) {
611 		atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
612 		dd->data += 4;
613 		dd->datalen -= AES_BLOCK_SIZE;
614 
615 		if (dd->datalen < AES_BLOCK_SIZE)
616 			break;
617 
618 		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
619 
620 		isr = atmel_aes_read(dd, AES_ISR);
621 		if (!(isr & AES_INT_DATARDY)) {
622 			dd->resume = atmel_aes_cpu_transfer;
623 			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
624 			return -EINPROGRESS;
625 		}
626 	}
627 
628 	if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
629 				 dd->buf, dd->total))
630 		err = -EINVAL;
631 
632 	if (err)
633 		return atmel_aes_complete(dd, err);
634 
635 	return dd->cpu_transfer_complete(dd);
636 }
637 
638 static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
639 			       struct scatterlist *src,
640 			       struct scatterlist *dst,
641 			       size_t len,
642 			       atmel_aes_fn_t resume)
643 {
644 	size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
645 
646 	if (unlikely(len == 0))
647 		return -EINVAL;
648 
649 	sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
650 
651 	dd->total = len;
652 	dd->real_dst = dst;
653 	dd->cpu_transfer_complete = resume;
654 	dd->datalen = len + padlen;
655 	dd->data = (u32 *)dd->buf;
656 	atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
657 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
658 }
659 
660 
661 /* DMA transfer */
662 
663 static void atmel_aes_dma_callback(void *data);
664 
665 static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
666 				    struct scatterlist *sg,
667 				    size_t len,
668 				    struct atmel_aes_dma *dma)
669 {
670 	int nents;
671 
672 	if (!IS_ALIGNED(len, dd->ctx->block_size))
673 		return false;
674 
675 	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
676 		if (!IS_ALIGNED(sg->offset, sizeof(u32)))
677 			return false;
678 
679 		if (len <= sg->length) {
680 			if (!IS_ALIGNED(len, dd->ctx->block_size))
681 				return false;
682 
683 			dma->nents = nents+1;
684 			dma->remainder = sg->length - len;
685 			sg->length = len;
686 			return true;
687 		}
688 
689 		if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
690 			return false;
691 
692 		len -= sg->length;
693 	}
694 
695 	return false;
696 }
697 
698 static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
699 {
700 	struct scatterlist *sg = dma->sg;
701 	int nents = dma->nents;
702 
703 	if (!dma->remainder)
704 		return;
705 
706 	while (--nents > 0 && sg)
707 		sg = sg_next(sg);
708 
709 	if (!sg)
710 		return;
711 
712 	sg->length += dma->remainder;
713 }
714 
715 static int atmel_aes_map(struct atmel_aes_dev *dd,
716 			 struct scatterlist *src,
717 			 struct scatterlist *dst,
718 			 size_t len)
719 {
720 	bool src_aligned, dst_aligned;
721 	size_t padlen;
722 
723 	dd->total = len;
724 	dd->src.sg = src;
725 	dd->dst.sg = dst;
726 	dd->real_dst = dst;
727 
728 	src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
729 	if (src == dst)
730 		dst_aligned = src_aligned;
731 	else
732 		dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
733 	if (!src_aligned || !dst_aligned) {
734 		padlen = atmel_aes_padlen(len, dd->ctx->block_size);
735 
736 		if (dd->buflen < len + padlen)
737 			return -ENOMEM;
738 
739 		if (!src_aligned) {
740 			sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
741 			dd->src.sg = &dd->aligned_sg;
742 			dd->src.nents = 1;
743 			dd->src.remainder = 0;
744 		}
745 
746 		if (!dst_aligned) {
747 			dd->dst.sg = &dd->aligned_sg;
748 			dd->dst.nents = 1;
749 			dd->dst.remainder = 0;
750 		}
751 
752 		sg_init_table(&dd->aligned_sg, 1);
753 		sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
754 	}
755 
756 	if (dd->src.sg == dd->dst.sg) {
757 		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
758 					    DMA_BIDIRECTIONAL);
759 		dd->dst.sg_len = dd->src.sg_len;
760 		if (!dd->src.sg_len)
761 			return -EFAULT;
762 	} else {
763 		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
764 					    DMA_TO_DEVICE);
765 		if (!dd->src.sg_len)
766 			return -EFAULT;
767 
768 		dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
769 					    DMA_FROM_DEVICE);
770 		if (!dd->dst.sg_len) {
771 			dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
772 				     DMA_TO_DEVICE);
773 			return -EFAULT;
774 		}
775 	}
776 
777 	return 0;
778 }
779 
780 static void atmel_aes_unmap(struct atmel_aes_dev *dd)
781 {
782 	if (dd->src.sg == dd->dst.sg) {
783 		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
784 			     DMA_BIDIRECTIONAL);
785 
786 		if (dd->src.sg != &dd->aligned_sg)
787 			atmel_aes_restore_sg(&dd->src);
788 	} else {
789 		dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
790 			     DMA_FROM_DEVICE);
791 
792 		if (dd->dst.sg != &dd->aligned_sg)
793 			atmel_aes_restore_sg(&dd->dst);
794 
795 		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
796 			     DMA_TO_DEVICE);
797 
798 		if (dd->src.sg != &dd->aligned_sg)
799 			atmel_aes_restore_sg(&dd->src);
800 	}
801 
802 	if (dd->dst.sg == &dd->aligned_sg)
803 		sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
804 				    dd->buf, dd->total);
805 }
806 
807 static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
808 					enum dma_slave_buswidth addr_width,
809 					enum dma_transfer_direction dir,
810 					u32 maxburst)
811 {
812 	struct dma_async_tx_descriptor *desc;
813 	struct dma_slave_config config;
814 	dma_async_tx_callback callback;
815 	struct atmel_aes_dma *dma;
816 	int err;
817 
818 	memset(&config, 0, sizeof(config));
819 	config.src_addr_width = addr_width;
820 	config.dst_addr_width = addr_width;
821 	config.src_maxburst = maxburst;
822 	config.dst_maxburst = maxburst;
823 
824 	switch (dir) {
825 	case DMA_MEM_TO_DEV:
826 		dma = &dd->src;
827 		callback = NULL;
828 		config.dst_addr = dd->phys_base + AES_IDATAR(0);
829 		break;
830 
831 	case DMA_DEV_TO_MEM:
832 		dma = &dd->dst;
833 		callback = atmel_aes_dma_callback;
834 		config.src_addr = dd->phys_base + AES_ODATAR(0);
835 		break;
836 
837 	default:
838 		return -EINVAL;
839 	}
840 
841 	err = dmaengine_slave_config(dma->chan, &config);
842 	if (err)
843 		return err;
844 
845 	desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
846 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
847 	if (!desc)
848 		return -ENOMEM;
849 
850 	desc->callback = callback;
851 	desc->callback_param = dd;
852 	dmaengine_submit(desc);
853 	dma_async_issue_pending(dma->chan);
854 
855 	return 0;
856 }
857 
858 static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
859 			       struct scatterlist *src,
860 			       struct scatterlist *dst,
861 			       size_t len,
862 			       atmel_aes_fn_t resume)
863 {
864 	enum dma_slave_buswidth addr_width;
865 	u32 maxburst;
866 	int err;
867 
868 	switch (dd->ctx->block_size) {
869 	case CFB8_BLOCK_SIZE:
870 		addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
871 		maxburst = 1;
872 		break;
873 
874 	case CFB16_BLOCK_SIZE:
875 		addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
876 		maxburst = 1;
877 		break;
878 
879 	case CFB32_BLOCK_SIZE:
880 	case CFB64_BLOCK_SIZE:
881 		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
882 		maxburst = 1;
883 		break;
884 
885 	case AES_BLOCK_SIZE:
886 		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
887 		maxburst = dd->caps.max_burst_size;
888 		break;
889 
890 	default:
891 		err = -EINVAL;
892 		goto exit;
893 	}
894 
895 	err = atmel_aes_map(dd, src, dst, len);
896 	if (err)
897 		goto exit;
898 
899 	dd->resume = resume;
900 
901 	/* Set output DMA transfer first */
902 	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
903 					   maxburst);
904 	if (err)
905 		goto unmap;
906 
907 	/* Then set input DMA transfer */
908 	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
909 					   maxburst);
910 	if (err)
911 		goto output_transfer_stop;
912 
913 	return -EINPROGRESS;
914 
915 output_transfer_stop:
916 	dmaengine_terminate_sync(dd->dst.chan);
917 unmap:
918 	atmel_aes_unmap(dd);
919 exit:
920 	return atmel_aes_complete(dd, err);
921 }
922 
923 static void atmel_aes_dma_callback(void *data)
924 {
925 	struct atmel_aes_dev *dd = data;
926 
927 	atmel_aes_unmap(dd);
928 	dd->is_async = true;
929 	(void)dd->resume(dd);
930 }
931 
932 static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
933 				  struct crypto_async_request *new_areq)
934 {
935 	struct crypto_async_request *areq, *backlog;
936 	struct atmel_aes_base_ctx *ctx;
937 	unsigned long flags;
938 	bool start_async;
939 	int err, ret = 0;
940 
941 	spin_lock_irqsave(&dd->lock, flags);
942 	if (new_areq)
943 		ret = crypto_enqueue_request(&dd->queue, new_areq);
944 	if (dd->flags & AES_FLAGS_BUSY) {
945 		spin_unlock_irqrestore(&dd->lock, flags);
946 		return ret;
947 	}
948 	backlog = crypto_get_backlog(&dd->queue);
949 	areq = crypto_dequeue_request(&dd->queue);
950 	if (areq)
951 		dd->flags |= AES_FLAGS_BUSY;
952 	spin_unlock_irqrestore(&dd->lock, flags);
953 
954 	if (!areq)
955 		return ret;
956 
957 	if (backlog)
958 		backlog->complete(backlog, -EINPROGRESS);
959 
960 	ctx = crypto_tfm_ctx(areq->tfm);
961 
962 	dd->areq = areq;
963 	start_async = (areq != new_areq);
964 	dd->is_async = start_async;
965 
966 	/* WARNING: ctx->start() MAY change dd->is_async. */
967 	err = ctx->start(dd);
968 	return (start_async) ? ret : err;
969 }
970 
971 
972 /* AES async block ciphers */
973 
974 static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
975 {
976 	return atmel_aes_complete(dd, 0);
977 }
978 
979 static int atmel_aes_start(struct atmel_aes_dev *dd)
980 {
981 	struct skcipher_request *req = skcipher_request_cast(dd->areq);
982 	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
983 	bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD ||
984 			dd->ctx->block_size != AES_BLOCK_SIZE);
985 	int err;
986 
987 	atmel_aes_set_mode(dd, rctx);
988 
989 	err = atmel_aes_hw_init(dd);
990 	if (err)
991 		return atmel_aes_complete(dd, err);
992 
993 	atmel_aes_write_ctrl(dd, use_dma, (void *)req->iv);
994 	if (use_dma)
995 		return atmel_aes_dma_start(dd, req->src, req->dst,
996 					   req->cryptlen,
997 					   atmel_aes_transfer_complete);
998 
999 	return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
1000 				   atmel_aes_transfer_complete);
1001 }
1002 
1003 static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
1004 {
1005 	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
1006 	struct skcipher_request *req = skcipher_request_cast(dd->areq);
1007 	struct scatterlist *src, *dst;
1008 	size_t datalen;
1009 	u32 ctr;
1010 	u16 start, end;
1011 	bool use_dma, fragmented = false;
1012 
1013 	/* Check for transfer completion. */
1014 	ctx->offset += dd->total;
1015 	if (ctx->offset >= req->cryptlen)
1016 		return atmel_aes_transfer_complete(dd);
1017 
1018 	/* Compute data length. */
1019 	datalen = req->cryptlen - ctx->offset;
1020 	ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
1021 	ctr = be32_to_cpu(ctx->iv[3]);
1022 
1023 	/* Check 16bit counter overflow. */
1024 	start = ctr & 0xffff;
1025 	end = start + ctx->blocks - 1;
1026 
1027 	if (ctx->blocks >> 16 || end < start) {
1028 		ctr |= 0xffff;
1029 		datalen = AES_BLOCK_SIZE * (0x10000 - start);
1030 		fragmented = true;
1031 	}
1032 
1033 	use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
1034 
1035 	/* Jump to offset. */
1036 	src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
1037 	dst = ((req->src == req->dst) ? src :
1038 	       scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
1039 
1040 	/* Configure hardware. */
1041 	atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
1042 	if (unlikely(fragmented)) {
1043 		/*
1044 		 * Increment the counter manually to cope with the hardware
1045 		 * counter overflow.
1046 		 */
1047 		ctx->iv[3] = cpu_to_be32(ctr);
1048 		crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
1049 	}
1050 
1051 	if (use_dma)
1052 		return atmel_aes_dma_start(dd, src, dst, datalen,
1053 					   atmel_aes_ctr_transfer);
1054 
1055 	return atmel_aes_cpu_start(dd, src, dst, datalen,
1056 				   atmel_aes_ctr_transfer);
1057 }
1058 
1059 static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
1060 {
1061 	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
1062 	struct skcipher_request *req = skcipher_request_cast(dd->areq);
1063 	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
1064 	int err;
1065 
1066 	atmel_aes_set_mode(dd, rctx);
1067 
1068 	err = atmel_aes_hw_init(dd);
1069 	if (err)
1070 		return atmel_aes_complete(dd, err);
1071 
1072 	memcpy(ctx->iv, req->iv, AES_BLOCK_SIZE);
1073 	ctx->offset = 0;
1074 	dd->total = 0;
1075 	return atmel_aes_ctr_transfer(dd);
1076 }
1077 
1078 static int atmel_aes_xts_fallback(struct skcipher_request *req, bool enc)
1079 {
1080 	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
1081 	struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(
1082 			crypto_skcipher_reqtfm(req));
1083 
1084 	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
1085 	skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
1086 				      req->base.complete, req->base.data);
1087 	skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
1088 				   req->cryptlen, req->iv);
1089 
1090 	return enc ? crypto_skcipher_encrypt(&rctx->fallback_req) :
1091 		     crypto_skcipher_decrypt(&rctx->fallback_req);
1092 }
1093 
1094 static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
1095 {
1096 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1097 	struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
1098 	struct atmel_aes_reqctx *rctx;
1099 	u32 opmode = mode & AES_FLAGS_OPMODE_MASK;
1100 
1101 	if (opmode == AES_FLAGS_XTS) {
1102 		if (req->cryptlen < XTS_BLOCK_SIZE)
1103 			return -EINVAL;
1104 
1105 		if (!IS_ALIGNED(req->cryptlen, XTS_BLOCK_SIZE))
1106 			return atmel_aes_xts_fallback(req,
1107 						      mode & AES_FLAGS_ENCRYPT);
1108 	}
1109 
1110 	/*
1111 	 * ECB, CBC, CFB, OFB or CTR mode require the plaintext and ciphertext
1112 	 * to have a positve integer length.
1113 	 */
1114 	if (!req->cryptlen && opmode != AES_FLAGS_XTS)
1115 		return 0;
1116 
1117 	if ((opmode == AES_FLAGS_ECB || opmode == AES_FLAGS_CBC) &&
1118 	    !IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
1119 		return -EINVAL;
1120 
1121 	switch (mode & AES_FLAGS_OPMODE_MASK) {
1122 	case AES_FLAGS_CFB8:
1123 		ctx->block_size = CFB8_BLOCK_SIZE;
1124 		break;
1125 
1126 	case AES_FLAGS_CFB16:
1127 		ctx->block_size = CFB16_BLOCK_SIZE;
1128 		break;
1129 
1130 	case AES_FLAGS_CFB32:
1131 		ctx->block_size = CFB32_BLOCK_SIZE;
1132 		break;
1133 
1134 	case AES_FLAGS_CFB64:
1135 		ctx->block_size = CFB64_BLOCK_SIZE;
1136 		break;
1137 
1138 	default:
1139 		ctx->block_size = AES_BLOCK_SIZE;
1140 		break;
1141 	}
1142 	ctx->is_aead = false;
1143 
1144 	rctx = skcipher_request_ctx(req);
1145 	rctx->mode = mode;
1146 
1147 	if (opmode != AES_FLAGS_ECB &&
1148 	    !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
1149 		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1150 
1151 		if (req->cryptlen >= ivsize)
1152 			scatterwalk_map_and_copy(rctx->lastc, req->src,
1153 						 req->cryptlen - ivsize,
1154 						 ivsize, 0);
1155 	}
1156 
1157 	return atmel_aes_handle_queue(ctx->dd, &req->base);
1158 }
1159 
1160 static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
1161 			   unsigned int keylen)
1162 {
1163 	struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
1164 
1165 	if (keylen != AES_KEYSIZE_128 &&
1166 	    keylen != AES_KEYSIZE_192 &&
1167 	    keylen != AES_KEYSIZE_256)
1168 		return -EINVAL;
1169 
1170 	memcpy(ctx->key, key, keylen);
1171 	ctx->keylen = keylen;
1172 
1173 	return 0;
1174 }
1175 
1176 static int atmel_aes_ecb_encrypt(struct skcipher_request *req)
1177 {
1178 	return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1179 }
1180 
1181 static int atmel_aes_ecb_decrypt(struct skcipher_request *req)
1182 {
1183 	return atmel_aes_crypt(req, AES_FLAGS_ECB);
1184 }
1185 
1186 static int atmel_aes_cbc_encrypt(struct skcipher_request *req)
1187 {
1188 	return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
1189 }
1190 
1191 static int atmel_aes_cbc_decrypt(struct skcipher_request *req)
1192 {
1193 	return atmel_aes_crypt(req, AES_FLAGS_CBC);
1194 }
1195 
1196 static int atmel_aes_ofb_encrypt(struct skcipher_request *req)
1197 {
1198 	return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
1199 }
1200 
1201 static int atmel_aes_ofb_decrypt(struct skcipher_request *req)
1202 {
1203 	return atmel_aes_crypt(req, AES_FLAGS_OFB);
1204 }
1205 
1206 static int atmel_aes_cfb_encrypt(struct skcipher_request *req)
1207 {
1208 	return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
1209 }
1210 
1211 static int atmel_aes_cfb_decrypt(struct skcipher_request *req)
1212 {
1213 	return atmel_aes_crypt(req, AES_FLAGS_CFB128);
1214 }
1215 
1216 static int atmel_aes_cfb64_encrypt(struct skcipher_request *req)
1217 {
1218 	return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
1219 }
1220 
1221 static int atmel_aes_cfb64_decrypt(struct skcipher_request *req)
1222 {
1223 	return atmel_aes_crypt(req, AES_FLAGS_CFB64);
1224 }
1225 
1226 static int atmel_aes_cfb32_encrypt(struct skcipher_request *req)
1227 {
1228 	return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
1229 }
1230 
1231 static int atmel_aes_cfb32_decrypt(struct skcipher_request *req)
1232 {
1233 	return atmel_aes_crypt(req, AES_FLAGS_CFB32);
1234 }
1235 
1236 static int atmel_aes_cfb16_encrypt(struct skcipher_request *req)
1237 {
1238 	return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
1239 }
1240 
1241 static int atmel_aes_cfb16_decrypt(struct skcipher_request *req)
1242 {
1243 	return atmel_aes_crypt(req, AES_FLAGS_CFB16);
1244 }
1245 
1246 static int atmel_aes_cfb8_encrypt(struct skcipher_request *req)
1247 {
1248 	return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
1249 }
1250 
1251 static int atmel_aes_cfb8_decrypt(struct skcipher_request *req)
1252 {
1253 	return atmel_aes_crypt(req, AES_FLAGS_CFB8);
1254 }
1255 
1256 static int atmel_aes_ctr_encrypt(struct skcipher_request *req)
1257 {
1258 	return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
1259 }
1260 
1261 static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
1262 {
1263 	return atmel_aes_crypt(req, AES_FLAGS_CTR);
1264 }
1265 
1266 static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
1267 {
1268 	struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
1269 	struct atmel_aes_dev *dd;
1270 
1271 	dd = atmel_aes_dev_alloc(&ctx->base);
1272 	if (!dd)
1273 		return -ENODEV;
1274 
1275 	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1276 	ctx->base.dd = dd;
1277 	ctx->base.dd->ctx = &ctx->base;
1278 	ctx->base.start = atmel_aes_start;
1279 
1280 	return 0;
1281 }
1282 
1283 static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
1284 {
1285 	struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
1286 	struct atmel_aes_dev *dd;
1287 
1288 	dd = atmel_aes_dev_alloc(&ctx->base);
1289 	if (!dd)
1290 		return -ENODEV;
1291 
1292 	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1293 	ctx->base.dd = dd;
1294 	ctx->base.dd->ctx = &ctx->base;
1295 	ctx->base.start = atmel_aes_ctr_start;
1296 
1297 	return 0;
1298 }
1299 
1300 static struct skcipher_alg aes_algs[] = {
1301 {
1302 	.base.cra_name		= "ecb(aes)",
1303 	.base.cra_driver_name	= "atmel-ecb-aes",
1304 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1305 	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
1306 
1307 	.init			= atmel_aes_init_tfm,
1308 	.min_keysize		= AES_MIN_KEY_SIZE,
1309 	.max_keysize		= AES_MAX_KEY_SIZE,
1310 	.setkey			= atmel_aes_setkey,
1311 	.encrypt		= atmel_aes_ecb_encrypt,
1312 	.decrypt		= atmel_aes_ecb_decrypt,
1313 },
1314 {
1315 	.base.cra_name		= "cbc(aes)",
1316 	.base.cra_driver_name	= "atmel-cbc-aes",
1317 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1318 	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
1319 
1320 	.init			= atmel_aes_init_tfm,
1321 	.min_keysize		= AES_MIN_KEY_SIZE,
1322 	.max_keysize		= AES_MAX_KEY_SIZE,
1323 	.setkey			= atmel_aes_setkey,
1324 	.encrypt		= atmel_aes_cbc_encrypt,
1325 	.decrypt		= atmel_aes_cbc_decrypt,
1326 	.ivsize			= AES_BLOCK_SIZE,
1327 },
1328 {
1329 	.base.cra_name		= "ofb(aes)",
1330 	.base.cra_driver_name	= "atmel-ofb-aes",
1331 	.base.cra_blocksize	= 1,
1332 	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
1333 
1334 	.init			= atmel_aes_init_tfm,
1335 	.min_keysize		= AES_MIN_KEY_SIZE,
1336 	.max_keysize		= AES_MAX_KEY_SIZE,
1337 	.setkey			= atmel_aes_setkey,
1338 	.encrypt		= atmel_aes_ofb_encrypt,
1339 	.decrypt		= atmel_aes_ofb_decrypt,
1340 	.ivsize			= AES_BLOCK_SIZE,
1341 },
1342 {
1343 	.base.cra_name		= "cfb(aes)",
1344 	.base.cra_driver_name	= "atmel-cfb-aes",
1345 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1346 	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
1347 
1348 	.init			= atmel_aes_init_tfm,
1349 	.min_keysize		= AES_MIN_KEY_SIZE,
1350 	.max_keysize		= AES_MAX_KEY_SIZE,
1351 	.setkey			= atmel_aes_setkey,
1352 	.encrypt		= atmel_aes_cfb_encrypt,
1353 	.decrypt		= atmel_aes_cfb_decrypt,
1354 	.ivsize			= AES_BLOCK_SIZE,
1355 },
1356 {
1357 	.base.cra_name		= "cfb32(aes)",
1358 	.base.cra_driver_name	= "atmel-cfb32-aes",
1359 	.base.cra_blocksize	= CFB32_BLOCK_SIZE,
1360 	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
1361 
1362 	.init			= atmel_aes_init_tfm,
1363 	.min_keysize		= AES_MIN_KEY_SIZE,
1364 	.max_keysize		= AES_MAX_KEY_SIZE,
1365 	.setkey			= atmel_aes_setkey,
1366 	.encrypt		= atmel_aes_cfb32_encrypt,
1367 	.decrypt		= atmel_aes_cfb32_decrypt,
1368 	.ivsize			= AES_BLOCK_SIZE,
1369 },
1370 {
1371 	.base.cra_name		= "cfb16(aes)",
1372 	.base.cra_driver_name	= "atmel-cfb16-aes",
1373 	.base.cra_blocksize	= CFB16_BLOCK_SIZE,
1374 	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
1375 
1376 	.init			= atmel_aes_init_tfm,
1377 	.min_keysize		= AES_MIN_KEY_SIZE,
1378 	.max_keysize		= AES_MAX_KEY_SIZE,
1379 	.setkey			= atmel_aes_setkey,
1380 	.encrypt		= atmel_aes_cfb16_encrypt,
1381 	.decrypt		= atmel_aes_cfb16_decrypt,
1382 	.ivsize			= AES_BLOCK_SIZE,
1383 },
1384 {
1385 	.base.cra_name		= "cfb8(aes)",
1386 	.base.cra_driver_name	= "atmel-cfb8-aes",
1387 	.base.cra_blocksize	= CFB8_BLOCK_SIZE,
1388 	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
1389 
1390 	.init			= atmel_aes_init_tfm,
1391 	.min_keysize		= AES_MIN_KEY_SIZE,
1392 	.max_keysize		= AES_MAX_KEY_SIZE,
1393 	.setkey			= atmel_aes_setkey,
1394 	.encrypt		= atmel_aes_cfb8_encrypt,
1395 	.decrypt		= atmel_aes_cfb8_decrypt,
1396 	.ivsize			= AES_BLOCK_SIZE,
1397 },
1398 {
1399 	.base.cra_name		= "ctr(aes)",
1400 	.base.cra_driver_name	= "atmel-ctr-aes",
1401 	.base.cra_blocksize	= 1,
1402 	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctr_ctx),
1403 
1404 	.init			= atmel_aes_ctr_init_tfm,
1405 	.min_keysize		= AES_MIN_KEY_SIZE,
1406 	.max_keysize		= AES_MAX_KEY_SIZE,
1407 	.setkey			= atmel_aes_setkey,
1408 	.encrypt		= atmel_aes_ctr_encrypt,
1409 	.decrypt		= atmel_aes_ctr_decrypt,
1410 	.ivsize			= AES_BLOCK_SIZE,
1411 },
1412 };
1413 
1414 static struct skcipher_alg aes_cfb64_alg = {
1415 	.base.cra_name		= "cfb64(aes)",
1416 	.base.cra_driver_name	= "atmel-cfb64-aes",
1417 	.base.cra_blocksize	= CFB64_BLOCK_SIZE,
1418 	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
1419 
1420 	.init			= atmel_aes_init_tfm,
1421 	.min_keysize		= AES_MIN_KEY_SIZE,
1422 	.max_keysize		= AES_MAX_KEY_SIZE,
1423 	.setkey			= atmel_aes_setkey,
1424 	.encrypt		= atmel_aes_cfb64_encrypt,
1425 	.decrypt		= atmel_aes_cfb64_decrypt,
1426 	.ivsize			= AES_BLOCK_SIZE,
1427 };
1428 
1429 
1430 /* gcm aead functions */
1431 
1432 static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1433 			       const u32 *data, size_t datalen,
1434 			       const __be32 *ghash_in, __be32 *ghash_out,
1435 			       atmel_aes_fn_t resume);
1436 static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
1437 static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
1438 
1439 static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
1440 static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
1441 static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
1442 static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
1443 static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
1444 static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
1445 static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
1446 
1447 static inline struct atmel_aes_gcm_ctx *
1448 atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
1449 {
1450 	return container_of(ctx, struct atmel_aes_gcm_ctx, base);
1451 }
1452 
1453 static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1454 			       const u32 *data, size_t datalen,
1455 			       const __be32 *ghash_in, __be32 *ghash_out,
1456 			       atmel_aes_fn_t resume)
1457 {
1458 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1459 
1460 	dd->data = (u32 *)data;
1461 	dd->datalen = datalen;
1462 	ctx->ghash_in = ghash_in;
1463 	ctx->ghash_out = ghash_out;
1464 	ctx->ghash_resume = resume;
1465 
1466 	atmel_aes_write_ctrl(dd, false, NULL);
1467 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
1468 }
1469 
1470 static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
1471 {
1472 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1473 
1474 	/* Set the data length. */
1475 	atmel_aes_write(dd, AES_AADLENR, dd->total);
1476 	atmel_aes_write(dd, AES_CLENR, 0);
1477 
1478 	/* If needed, overwrite the GCM Intermediate Hash Word Registers */
1479 	if (ctx->ghash_in)
1480 		atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
1481 
1482 	return atmel_aes_gcm_ghash_finalize(dd);
1483 }
1484 
1485 static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
1486 {
1487 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1488 	u32 isr;
1489 
1490 	/* Write data into the Input Data Registers. */
1491 	while (dd->datalen > 0) {
1492 		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1493 		dd->data += 4;
1494 		dd->datalen -= AES_BLOCK_SIZE;
1495 
1496 		isr = atmel_aes_read(dd, AES_ISR);
1497 		if (!(isr & AES_INT_DATARDY)) {
1498 			dd->resume = atmel_aes_gcm_ghash_finalize;
1499 			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1500 			return -EINPROGRESS;
1501 		}
1502 	}
1503 
1504 	/* Read the computed hash from GHASHRx. */
1505 	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
1506 
1507 	return ctx->ghash_resume(dd);
1508 }
1509 
1510 
1511 static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
1512 {
1513 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1514 	struct aead_request *req = aead_request_cast(dd->areq);
1515 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1516 	struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
1517 	size_t ivsize = crypto_aead_ivsize(tfm);
1518 	size_t datalen, padlen;
1519 	const void *iv = req->iv;
1520 	u8 *data = dd->buf;
1521 	int err;
1522 
1523 	atmel_aes_set_mode(dd, rctx);
1524 
1525 	err = atmel_aes_hw_init(dd);
1526 	if (err)
1527 		return atmel_aes_complete(dd, err);
1528 
1529 	if (likely(ivsize == GCM_AES_IV_SIZE)) {
1530 		memcpy(ctx->j0, iv, ivsize);
1531 		ctx->j0[3] = cpu_to_be32(1);
1532 		return atmel_aes_gcm_process(dd);
1533 	}
1534 
1535 	padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
1536 	datalen = ivsize + padlen + AES_BLOCK_SIZE;
1537 	if (datalen > dd->buflen)
1538 		return atmel_aes_complete(dd, -EINVAL);
1539 
1540 	memcpy(data, iv, ivsize);
1541 	memset(data + ivsize, 0, padlen + sizeof(u64));
1542 	((__be64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
1543 
1544 	return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
1545 				   NULL, ctx->j0, atmel_aes_gcm_process);
1546 }
1547 
1548 static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
1549 {
1550 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1551 	struct aead_request *req = aead_request_cast(dd->areq);
1552 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1553 	bool enc = atmel_aes_is_encrypt(dd);
1554 	u32 authsize;
1555 
1556 	/* Compute text length. */
1557 	authsize = crypto_aead_authsize(tfm);
1558 	ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
1559 
1560 	/*
1561 	 * According to tcrypt test suite, the GCM Automatic Tag Generation
1562 	 * fails when both the message and its associated data are empty.
1563 	 */
1564 	if (likely(req->assoclen != 0 || ctx->textlen != 0))
1565 		dd->flags |= AES_FLAGS_GTAGEN;
1566 
1567 	atmel_aes_write_ctrl(dd, false, NULL);
1568 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
1569 }
1570 
1571 static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
1572 {
1573 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1574 	struct aead_request *req = aead_request_cast(dd->areq);
1575 	__be32 j0_lsw, *j0 = ctx->j0;
1576 	size_t padlen;
1577 
1578 	/* Write incr32(J0) into IV. */
1579 	j0_lsw = j0[3];
1580 	be32_add_cpu(&j0[3], 1);
1581 	atmel_aes_write_block(dd, AES_IVR(0), j0);
1582 	j0[3] = j0_lsw;
1583 
1584 	/* Set aad and text lengths. */
1585 	atmel_aes_write(dd, AES_AADLENR, req->assoclen);
1586 	atmel_aes_write(dd, AES_CLENR, ctx->textlen);
1587 
1588 	/* Check whether AAD are present. */
1589 	if (unlikely(req->assoclen == 0)) {
1590 		dd->datalen = 0;
1591 		return atmel_aes_gcm_data(dd);
1592 	}
1593 
1594 	/* Copy assoc data and add padding. */
1595 	padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
1596 	if (unlikely(req->assoclen + padlen > dd->buflen))
1597 		return atmel_aes_complete(dd, -EINVAL);
1598 	sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
1599 
1600 	/* Write assoc data into the Input Data register. */
1601 	dd->data = (u32 *)dd->buf;
1602 	dd->datalen = req->assoclen + padlen;
1603 	return atmel_aes_gcm_data(dd);
1604 }
1605 
1606 static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
1607 {
1608 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1609 	struct aead_request *req = aead_request_cast(dd->areq);
1610 	bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
1611 	struct scatterlist *src, *dst;
1612 	u32 isr, mr;
1613 
1614 	/* Write AAD first. */
1615 	while (dd->datalen > 0) {
1616 		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1617 		dd->data += 4;
1618 		dd->datalen -= AES_BLOCK_SIZE;
1619 
1620 		isr = atmel_aes_read(dd, AES_ISR);
1621 		if (!(isr & AES_INT_DATARDY)) {
1622 			dd->resume = atmel_aes_gcm_data;
1623 			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1624 			return -EINPROGRESS;
1625 		}
1626 	}
1627 
1628 	/* GMAC only. */
1629 	if (unlikely(ctx->textlen == 0))
1630 		return atmel_aes_gcm_tag_init(dd);
1631 
1632 	/* Prepare src and dst scatter lists to transfer cipher/plain texts */
1633 	src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
1634 	dst = ((req->src == req->dst) ? src :
1635 	       scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
1636 
1637 	if (use_dma) {
1638 		/* Update the Mode Register for DMA transfers. */
1639 		mr = atmel_aes_read(dd, AES_MR);
1640 		mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
1641 		mr |= AES_MR_SMOD_IDATAR0;
1642 		if (dd->caps.has_dualbuff)
1643 			mr |= AES_MR_DUALBUFF;
1644 		atmel_aes_write(dd, AES_MR, mr);
1645 
1646 		return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
1647 					   atmel_aes_gcm_tag_init);
1648 	}
1649 
1650 	return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
1651 				   atmel_aes_gcm_tag_init);
1652 }
1653 
1654 static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
1655 {
1656 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1657 	struct aead_request *req = aead_request_cast(dd->areq);
1658 	__be64 *data = dd->buf;
1659 
1660 	if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
1661 		if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
1662 			dd->resume = atmel_aes_gcm_tag_init;
1663 			atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
1664 			return -EINPROGRESS;
1665 		}
1666 
1667 		return atmel_aes_gcm_finalize(dd);
1668 	}
1669 
1670 	/* Read the GCM Intermediate Hash Word Registers. */
1671 	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
1672 
1673 	data[0] = cpu_to_be64(req->assoclen * 8);
1674 	data[1] = cpu_to_be64(ctx->textlen * 8);
1675 
1676 	return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
1677 				   ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
1678 }
1679 
1680 static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
1681 {
1682 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1683 	unsigned long flags;
1684 
1685 	/*
1686 	 * Change mode to CTR to complete the tag generation.
1687 	 * Use J0 as Initialization Vector.
1688 	 */
1689 	flags = dd->flags;
1690 	dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
1691 	dd->flags |= AES_FLAGS_CTR;
1692 	atmel_aes_write_ctrl(dd, false, ctx->j0);
1693 	dd->flags = flags;
1694 
1695 	atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
1696 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
1697 }
1698 
1699 static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
1700 {
1701 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1702 	struct aead_request *req = aead_request_cast(dd->areq);
1703 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1704 	bool enc = atmel_aes_is_encrypt(dd);
1705 	u32 offset, authsize, itag[4], *otag = ctx->tag;
1706 	int err;
1707 
1708 	/* Read the computed tag. */
1709 	if (likely(dd->flags & AES_FLAGS_GTAGEN))
1710 		atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
1711 	else
1712 		atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
1713 
1714 	offset = req->assoclen + ctx->textlen;
1715 	authsize = crypto_aead_authsize(tfm);
1716 	if (enc) {
1717 		scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
1718 		err = 0;
1719 	} else {
1720 		scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
1721 		err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
1722 	}
1723 
1724 	return atmel_aes_complete(dd, err);
1725 }
1726 
1727 static int atmel_aes_gcm_crypt(struct aead_request *req,
1728 			       unsigned long mode)
1729 {
1730 	struct atmel_aes_base_ctx *ctx;
1731 	struct atmel_aes_reqctx *rctx;
1732 
1733 	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1734 	ctx->block_size = AES_BLOCK_SIZE;
1735 	ctx->is_aead = true;
1736 
1737 	rctx = aead_request_ctx(req);
1738 	rctx->mode = AES_FLAGS_GCM | mode;
1739 
1740 	return atmel_aes_handle_queue(ctx->dd, &req->base);
1741 }
1742 
1743 static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
1744 				unsigned int keylen)
1745 {
1746 	struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1747 
1748 	if (keylen != AES_KEYSIZE_256 &&
1749 	    keylen != AES_KEYSIZE_192 &&
1750 	    keylen != AES_KEYSIZE_128)
1751 		return -EINVAL;
1752 
1753 	memcpy(ctx->key, key, keylen);
1754 	ctx->keylen = keylen;
1755 
1756 	return 0;
1757 }
1758 
1759 static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
1760 				     unsigned int authsize)
1761 {
1762 	return crypto_gcm_check_authsize(authsize);
1763 }
1764 
1765 static int atmel_aes_gcm_encrypt(struct aead_request *req)
1766 {
1767 	return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1768 }
1769 
1770 static int atmel_aes_gcm_decrypt(struct aead_request *req)
1771 {
1772 	return atmel_aes_gcm_crypt(req, 0);
1773 }
1774 
1775 static int atmel_aes_gcm_init(struct crypto_aead *tfm)
1776 {
1777 	struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
1778 	struct atmel_aes_dev *dd;
1779 
1780 	dd = atmel_aes_dev_alloc(&ctx->base);
1781 	if (!dd)
1782 		return -ENODEV;
1783 
1784 	crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1785 	ctx->base.dd = dd;
1786 	ctx->base.dd->ctx = &ctx->base;
1787 	ctx->base.start = atmel_aes_gcm_start;
1788 
1789 	return 0;
1790 }
1791 
1792 static struct aead_alg aes_gcm_alg = {
1793 	.setkey		= atmel_aes_gcm_setkey,
1794 	.setauthsize	= atmel_aes_gcm_setauthsize,
1795 	.encrypt	= atmel_aes_gcm_encrypt,
1796 	.decrypt	= atmel_aes_gcm_decrypt,
1797 	.init		= atmel_aes_gcm_init,
1798 	.ivsize		= GCM_AES_IV_SIZE,
1799 	.maxauthsize	= AES_BLOCK_SIZE,
1800 
1801 	.base = {
1802 		.cra_name		= "gcm(aes)",
1803 		.cra_driver_name	= "atmel-gcm-aes",
1804 		.cra_blocksize		= 1,
1805 		.cra_ctxsize		= sizeof(struct atmel_aes_gcm_ctx),
1806 	},
1807 };
1808 
1809 
1810 /* xts functions */
1811 
1812 static inline struct atmel_aes_xts_ctx *
1813 atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
1814 {
1815 	return container_of(ctx, struct atmel_aes_xts_ctx, base);
1816 }
1817 
1818 static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
1819 
1820 static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
1821 {
1822 	struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
1823 	struct skcipher_request *req = skcipher_request_cast(dd->areq);
1824 	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
1825 	unsigned long flags;
1826 	int err;
1827 
1828 	atmel_aes_set_mode(dd, rctx);
1829 
1830 	err = atmel_aes_hw_init(dd);
1831 	if (err)
1832 		return atmel_aes_complete(dd, err);
1833 
1834 	/* Compute the tweak value from req->iv with ecb(aes). */
1835 	flags = dd->flags;
1836 	dd->flags &= ~AES_FLAGS_MODE_MASK;
1837 	dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1838 	atmel_aes_write_ctrl_key(dd, false, NULL,
1839 				 ctx->key2, ctx->base.keylen);
1840 	dd->flags = flags;
1841 
1842 	atmel_aes_write_block(dd, AES_IDATAR(0), req->iv);
1843 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
1844 }
1845 
1846 static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
1847 {
1848 	struct skcipher_request *req = skcipher_request_cast(dd->areq);
1849 	bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD);
1850 	u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
1851 	static const __le32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
1852 	u8 *tweak_bytes = (u8 *)tweak;
1853 	int i;
1854 
1855 	/* Read the computed ciphered tweak value. */
1856 	atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
1857 	/*
1858 	 * Hardware quirk:
1859 	 * the order of the ciphered tweak bytes need to be reversed before
1860 	 * writing them into the ODATARx registers.
1861 	 */
1862 	for (i = 0; i < AES_BLOCK_SIZE/2; ++i)
1863 		swap(tweak_bytes[i], tweak_bytes[AES_BLOCK_SIZE - 1 - i]);
1864 
1865 	/* Process the data. */
1866 	atmel_aes_write_ctrl(dd, use_dma, NULL);
1867 	atmel_aes_write_block(dd, AES_TWR(0), tweak);
1868 	atmel_aes_write_block(dd, AES_ALPHAR(0), one);
1869 	if (use_dma)
1870 		return atmel_aes_dma_start(dd, req->src, req->dst,
1871 					   req->cryptlen,
1872 					   atmel_aes_transfer_complete);
1873 
1874 	return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
1875 				   atmel_aes_transfer_complete);
1876 }
1877 
1878 static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
1879 				unsigned int keylen)
1880 {
1881 	struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
1882 	int err;
1883 
1884 	err = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
1885 	if (err)
1886 		return err;
1887 
1888 	crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
1889 	crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags &
1890 				  CRYPTO_TFM_REQ_MASK);
1891 	err = crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
1892 	if (err)
1893 		return err;
1894 
1895 	memcpy(ctx->base.key, key, keylen/2);
1896 	memcpy(ctx->key2, key + keylen/2, keylen/2);
1897 	ctx->base.keylen = keylen/2;
1898 
1899 	return 0;
1900 }
1901 
1902 static int atmel_aes_xts_encrypt(struct skcipher_request *req)
1903 {
1904 	return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
1905 }
1906 
1907 static int atmel_aes_xts_decrypt(struct skcipher_request *req)
1908 {
1909 	return atmel_aes_crypt(req, AES_FLAGS_XTS);
1910 }
1911 
1912 static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
1913 {
1914 	struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
1915 	struct atmel_aes_dev *dd;
1916 	const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1917 
1918 	dd = atmel_aes_dev_alloc(&ctx->base);
1919 	if (!dd)
1920 		return -ENODEV;
1921 
1922 	ctx->fallback_tfm = crypto_alloc_skcipher(tfm_name, 0,
1923 						  CRYPTO_ALG_NEED_FALLBACK);
1924 	if (IS_ERR(ctx->fallback_tfm))
1925 		return PTR_ERR(ctx->fallback_tfm);
1926 
1927 	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) +
1928 				    crypto_skcipher_reqsize(ctx->fallback_tfm));
1929 	ctx->base.dd = dd;
1930 	ctx->base.dd->ctx = &ctx->base;
1931 	ctx->base.start = atmel_aes_xts_start;
1932 
1933 	return 0;
1934 }
1935 
1936 static void atmel_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
1937 {
1938 	struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
1939 
1940 	crypto_free_skcipher(ctx->fallback_tfm);
1941 }
1942 
1943 static struct skcipher_alg aes_xts_alg = {
1944 	.base.cra_name		= "xts(aes)",
1945 	.base.cra_driver_name	= "atmel-xts-aes",
1946 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1947 	.base.cra_ctxsize	= sizeof(struct atmel_aes_xts_ctx),
1948 	.base.cra_flags		= CRYPTO_ALG_NEED_FALLBACK,
1949 
1950 	.min_keysize		= 2 * AES_MIN_KEY_SIZE,
1951 	.max_keysize		= 2 * AES_MAX_KEY_SIZE,
1952 	.ivsize			= AES_BLOCK_SIZE,
1953 	.setkey			= atmel_aes_xts_setkey,
1954 	.encrypt		= atmel_aes_xts_encrypt,
1955 	.decrypt		= atmel_aes_xts_decrypt,
1956 	.init			= atmel_aes_xts_init_tfm,
1957 	.exit			= atmel_aes_xts_exit_tfm,
1958 };
1959 
1960 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
1961 /* authenc aead functions */
1962 
1963 static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
1964 static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
1965 				  bool is_async);
1966 static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
1967 				      bool is_async);
1968 static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd);
1969 static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
1970 				   bool is_async);
1971 
1972 static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err)
1973 {
1974 	struct aead_request *req = aead_request_cast(dd->areq);
1975 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1976 
1977 	if (err && (dd->flags & AES_FLAGS_OWN_SHA))
1978 		atmel_sha_authenc_abort(&rctx->auth_req);
1979 	dd->flags &= ~AES_FLAGS_OWN_SHA;
1980 }
1981 
1982 static int atmel_aes_authenc_start(struct atmel_aes_dev *dd)
1983 {
1984 	struct aead_request *req = aead_request_cast(dd->areq);
1985 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1986 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1987 	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1988 	int err;
1989 
1990 	atmel_aes_set_mode(dd, &rctx->base);
1991 
1992 	err = atmel_aes_hw_init(dd);
1993 	if (err)
1994 		return atmel_aes_complete(dd, err);
1995 
1996 	return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth,
1997 					  atmel_aes_authenc_init, dd);
1998 }
1999 
2000 static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
2001 				  bool is_async)
2002 {
2003 	struct aead_request *req = aead_request_cast(dd->areq);
2004 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
2005 
2006 	if (is_async)
2007 		dd->is_async = true;
2008 	if (err)
2009 		return atmel_aes_complete(dd, err);
2010 
2011 	/* If here, we've got the ownership of the SHA device. */
2012 	dd->flags |= AES_FLAGS_OWN_SHA;
2013 
2014 	/* Configure the SHA device. */
2015 	return atmel_sha_authenc_init(&rctx->auth_req,
2016 				      req->src, req->assoclen,
2017 				      rctx->textlen,
2018 				      atmel_aes_authenc_transfer, dd);
2019 }
2020 
2021 static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
2022 				      bool is_async)
2023 {
2024 	struct aead_request *req = aead_request_cast(dd->areq);
2025 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
2026 	bool enc = atmel_aes_is_encrypt(dd);
2027 	struct scatterlist *src, *dst;
2028 	__be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
2029 	u32 emr;
2030 
2031 	if (is_async)
2032 		dd->is_async = true;
2033 	if (err)
2034 		return atmel_aes_complete(dd, err);
2035 
2036 	/* Prepare src and dst scatter-lists to transfer cipher/plain texts. */
2037 	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
2038 	dst = src;
2039 
2040 	if (req->src != req->dst)
2041 		dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
2042 
2043 	/* Configure the AES device. */
2044 	memcpy(iv, req->iv, sizeof(iv));
2045 
2046 	/*
2047 	 * Here we always set the 2nd parameter of atmel_aes_write_ctrl() to
2048 	 * 'true' even if the data transfer is actually performed by the CPU (so
2049 	 * not by the DMA) because we must force the AES_MR_SMOD bitfield to the
2050 	 * value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD
2051 	 * must be set to *_MR_SMOD_IDATAR0.
2052 	 */
2053 	atmel_aes_write_ctrl(dd, true, iv);
2054 	emr = AES_EMR_PLIPEN;
2055 	if (!enc)
2056 		emr |= AES_EMR_PLIPD;
2057 	atmel_aes_write(dd, AES_EMR, emr);
2058 
2059 	/* Transfer data. */
2060 	return atmel_aes_dma_start(dd, src, dst, rctx->textlen,
2061 				   atmel_aes_authenc_digest);
2062 }
2063 
2064 static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd)
2065 {
2066 	struct aead_request *req = aead_request_cast(dd->areq);
2067 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
2068 
2069 	/* atmel_sha_authenc_final() releases the SHA device. */
2070 	dd->flags &= ~AES_FLAGS_OWN_SHA;
2071 	return atmel_sha_authenc_final(&rctx->auth_req,
2072 				       rctx->digest, sizeof(rctx->digest),
2073 				       atmel_aes_authenc_final, dd);
2074 }
2075 
2076 static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
2077 				   bool is_async)
2078 {
2079 	struct aead_request *req = aead_request_cast(dd->areq);
2080 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
2081 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2082 	bool enc = atmel_aes_is_encrypt(dd);
2083 	u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest;
2084 	u32 offs, authsize;
2085 
2086 	if (is_async)
2087 		dd->is_async = true;
2088 	if (err)
2089 		goto complete;
2090 
2091 	offs = req->assoclen + rctx->textlen;
2092 	authsize = crypto_aead_authsize(tfm);
2093 	if (enc) {
2094 		scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1);
2095 	} else {
2096 		scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0);
2097 		if (crypto_memneq(idigest, odigest, authsize))
2098 			err = -EBADMSG;
2099 	}
2100 
2101 complete:
2102 	return atmel_aes_complete(dd, err);
2103 }
2104 
2105 static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
2106 				    unsigned int keylen)
2107 {
2108 	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
2109 	struct crypto_authenc_keys keys;
2110 	int err;
2111 
2112 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
2113 		goto badkey;
2114 
2115 	if (keys.enckeylen > sizeof(ctx->base.key))
2116 		goto badkey;
2117 
2118 	/* Save auth key. */
2119 	err = atmel_sha_authenc_setkey(ctx->auth,
2120 				       keys.authkey, keys.authkeylen,
2121 				       crypto_aead_get_flags(tfm));
2122 	if (err) {
2123 		memzero_explicit(&keys, sizeof(keys));
2124 		return err;
2125 	}
2126 
2127 	/* Save enc key. */
2128 	ctx->base.keylen = keys.enckeylen;
2129 	memcpy(ctx->base.key, keys.enckey, keys.enckeylen);
2130 
2131 	memzero_explicit(&keys, sizeof(keys));
2132 	return 0;
2133 
2134 badkey:
2135 	memzero_explicit(&keys, sizeof(keys));
2136 	return -EINVAL;
2137 }
2138 
2139 static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
2140 				      unsigned long auth_mode)
2141 {
2142 	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
2143 	unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
2144 	struct atmel_aes_dev *dd;
2145 
2146 	dd = atmel_aes_dev_alloc(&ctx->base);
2147 	if (!dd)
2148 		return -ENODEV;
2149 
2150 	ctx->auth = atmel_sha_authenc_spawn(auth_mode);
2151 	if (IS_ERR(ctx->auth))
2152 		return PTR_ERR(ctx->auth);
2153 
2154 	crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
2155 				      auth_reqsize));
2156 	ctx->base.dd = dd;
2157 	ctx->base.dd->ctx = &ctx->base;
2158 	ctx->base.start = atmel_aes_authenc_start;
2159 
2160 	return 0;
2161 }
2162 
2163 static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm)
2164 {
2165 	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1);
2166 }
2167 
2168 static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm)
2169 {
2170 	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224);
2171 }
2172 
2173 static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm)
2174 {
2175 	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256);
2176 }
2177 
2178 static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm)
2179 {
2180 	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384);
2181 }
2182 
2183 static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm)
2184 {
2185 	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512);
2186 }
2187 
2188 static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm)
2189 {
2190 	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
2191 
2192 	atmel_sha_authenc_free(ctx->auth);
2193 }
2194 
2195 static int atmel_aes_authenc_crypt(struct aead_request *req,
2196 				   unsigned long mode)
2197 {
2198 	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
2199 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2200 	struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
2201 	u32 authsize = crypto_aead_authsize(tfm);
2202 	bool enc = (mode & AES_FLAGS_ENCRYPT);
2203 
2204 	/* Compute text length. */
2205 	if (!enc && req->cryptlen < authsize)
2206 		return -EINVAL;
2207 	rctx->textlen = req->cryptlen - (enc ? 0 : authsize);
2208 
2209 	/*
2210 	 * Currently, empty messages are not supported yet:
2211 	 * the SHA auto-padding can be used only on non-empty messages.
2212 	 * Hence a special case needs to be implemented for empty message.
2213 	 */
2214 	if (!rctx->textlen && !req->assoclen)
2215 		return -EINVAL;
2216 
2217 	rctx->base.mode = mode;
2218 	ctx->block_size = AES_BLOCK_SIZE;
2219 	ctx->is_aead = true;
2220 
2221 	return atmel_aes_handle_queue(ctx->dd, &req->base);
2222 }
2223 
2224 static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
2225 {
2226 	return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
2227 }
2228 
2229 static int atmel_aes_authenc_cbc_aes_decrypt(struct aead_request *req)
2230 {
2231 	return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC);
2232 }
2233 
2234 static struct aead_alg aes_authenc_algs[] = {
2235 {
2236 	.setkey		= atmel_aes_authenc_setkey,
2237 	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
2238 	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
2239 	.init		= atmel_aes_authenc_hmac_sha1_init_tfm,
2240 	.exit		= atmel_aes_authenc_exit_tfm,
2241 	.ivsize		= AES_BLOCK_SIZE,
2242 	.maxauthsize	= SHA1_DIGEST_SIZE,
2243 
2244 	.base = {
2245 		.cra_name		= "authenc(hmac(sha1),cbc(aes))",
2246 		.cra_driver_name	= "atmel-authenc-hmac-sha1-cbc-aes",
2247 		.cra_blocksize		= AES_BLOCK_SIZE,
2248 		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
2249 	},
2250 },
2251 {
2252 	.setkey		= atmel_aes_authenc_setkey,
2253 	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
2254 	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
2255 	.init		= atmel_aes_authenc_hmac_sha224_init_tfm,
2256 	.exit		= atmel_aes_authenc_exit_tfm,
2257 	.ivsize		= AES_BLOCK_SIZE,
2258 	.maxauthsize	= SHA224_DIGEST_SIZE,
2259 
2260 	.base = {
2261 		.cra_name		= "authenc(hmac(sha224),cbc(aes))",
2262 		.cra_driver_name	= "atmel-authenc-hmac-sha224-cbc-aes",
2263 		.cra_blocksize		= AES_BLOCK_SIZE,
2264 		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
2265 	},
2266 },
2267 {
2268 	.setkey		= atmel_aes_authenc_setkey,
2269 	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
2270 	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
2271 	.init		= atmel_aes_authenc_hmac_sha256_init_tfm,
2272 	.exit		= atmel_aes_authenc_exit_tfm,
2273 	.ivsize		= AES_BLOCK_SIZE,
2274 	.maxauthsize	= SHA256_DIGEST_SIZE,
2275 
2276 	.base = {
2277 		.cra_name		= "authenc(hmac(sha256),cbc(aes))",
2278 		.cra_driver_name	= "atmel-authenc-hmac-sha256-cbc-aes",
2279 		.cra_blocksize		= AES_BLOCK_SIZE,
2280 		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
2281 	},
2282 },
2283 {
2284 	.setkey		= atmel_aes_authenc_setkey,
2285 	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
2286 	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
2287 	.init		= atmel_aes_authenc_hmac_sha384_init_tfm,
2288 	.exit		= atmel_aes_authenc_exit_tfm,
2289 	.ivsize		= AES_BLOCK_SIZE,
2290 	.maxauthsize	= SHA384_DIGEST_SIZE,
2291 
2292 	.base = {
2293 		.cra_name		= "authenc(hmac(sha384),cbc(aes))",
2294 		.cra_driver_name	= "atmel-authenc-hmac-sha384-cbc-aes",
2295 		.cra_blocksize		= AES_BLOCK_SIZE,
2296 		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
2297 	},
2298 },
2299 {
2300 	.setkey		= atmel_aes_authenc_setkey,
2301 	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
2302 	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
2303 	.init		= atmel_aes_authenc_hmac_sha512_init_tfm,
2304 	.exit		= atmel_aes_authenc_exit_tfm,
2305 	.ivsize		= AES_BLOCK_SIZE,
2306 	.maxauthsize	= SHA512_DIGEST_SIZE,
2307 
2308 	.base = {
2309 		.cra_name		= "authenc(hmac(sha512),cbc(aes))",
2310 		.cra_driver_name	= "atmel-authenc-hmac-sha512-cbc-aes",
2311 		.cra_blocksize		= AES_BLOCK_SIZE,
2312 		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
2313 	},
2314 },
2315 };
2316 #endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
2317 
2318 /* Probe functions */
2319 
2320 static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
2321 {
2322 	dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
2323 	dd->buflen = ATMEL_AES_BUFFER_SIZE;
2324 	dd->buflen &= ~(AES_BLOCK_SIZE - 1);
2325 
2326 	if (!dd->buf) {
2327 		dev_err(dd->dev, "unable to alloc pages.\n");
2328 		return -ENOMEM;
2329 	}
2330 
2331 	return 0;
2332 }
2333 
2334 static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
2335 {
2336 	free_page((unsigned long)dd->buf);
2337 }
2338 
2339 static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
2340 {
2341 	int ret;
2342 
2343 	/* Try to grab 2 DMA channels */
2344 	dd->src.chan = dma_request_chan(dd->dev, "tx");
2345 	if (IS_ERR(dd->src.chan)) {
2346 		ret = PTR_ERR(dd->src.chan);
2347 		goto err_dma_in;
2348 	}
2349 
2350 	dd->dst.chan = dma_request_chan(dd->dev, "rx");
2351 	if (IS_ERR(dd->dst.chan)) {
2352 		ret = PTR_ERR(dd->dst.chan);
2353 		goto err_dma_out;
2354 	}
2355 
2356 	return 0;
2357 
2358 err_dma_out:
2359 	dma_release_channel(dd->src.chan);
2360 err_dma_in:
2361 	dev_err(dd->dev, "no DMA channel available\n");
2362 	return ret;
2363 }
2364 
2365 static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
2366 {
2367 	dma_release_channel(dd->dst.chan);
2368 	dma_release_channel(dd->src.chan);
2369 }
2370 
2371 static void atmel_aes_queue_task(unsigned long data)
2372 {
2373 	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
2374 
2375 	atmel_aes_handle_queue(dd, NULL);
2376 }
2377 
2378 static void atmel_aes_done_task(unsigned long data)
2379 {
2380 	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
2381 
2382 	dd->is_async = true;
2383 	(void)dd->resume(dd);
2384 }
2385 
2386 static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
2387 {
2388 	struct atmel_aes_dev *aes_dd = dev_id;
2389 	u32 reg;
2390 
2391 	reg = atmel_aes_read(aes_dd, AES_ISR);
2392 	if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
2393 		atmel_aes_write(aes_dd, AES_IDR, reg);
2394 		if (AES_FLAGS_BUSY & aes_dd->flags)
2395 			tasklet_schedule(&aes_dd->done_task);
2396 		else
2397 			dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
2398 		return IRQ_HANDLED;
2399 	}
2400 
2401 	return IRQ_NONE;
2402 }
2403 
2404 static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
2405 {
2406 	int i;
2407 
2408 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2409 	if (dd->caps.has_authenc)
2410 		for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
2411 			crypto_unregister_aead(&aes_authenc_algs[i]);
2412 #endif
2413 
2414 	if (dd->caps.has_xts)
2415 		crypto_unregister_skcipher(&aes_xts_alg);
2416 
2417 	if (dd->caps.has_gcm)
2418 		crypto_unregister_aead(&aes_gcm_alg);
2419 
2420 	if (dd->caps.has_cfb64)
2421 		crypto_unregister_skcipher(&aes_cfb64_alg);
2422 
2423 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
2424 		crypto_unregister_skcipher(&aes_algs[i]);
2425 }
2426 
2427 static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
2428 {
2429 	alg->cra_flags |= CRYPTO_ALG_ASYNC;
2430 	alg->cra_alignmask = 0xf;
2431 	alg->cra_priority = ATMEL_AES_PRIORITY;
2432 	alg->cra_module = THIS_MODULE;
2433 }
2434 
2435 static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
2436 {
2437 	int err, i, j;
2438 
2439 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
2440 		atmel_aes_crypto_alg_init(&aes_algs[i].base);
2441 
2442 		err = crypto_register_skcipher(&aes_algs[i]);
2443 		if (err)
2444 			goto err_aes_algs;
2445 	}
2446 
2447 	if (dd->caps.has_cfb64) {
2448 		atmel_aes_crypto_alg_init(&aes_cfb64_alg.base);
2449 
2450 		err = crypto_register_skcipher(&aes_cfb64_alg);
2451 		if (err)
2452 			goto err_aes_cfb64_alg;
2453 	}
2454 
2455 	if (dd->caps.has_gcm) {
2456 		atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
2457 
2458 		err = crypto_register_aead(&aes_gcm_alg);
2459 		if (err)
2460 			goto err_aes_gcm_alg;
2461 	}
2462 
2463 	if (dd->caps.has_xts) {
2464 		atmel_aes_crypto_alg_init(&aes_xts_alg.base);
2465 
2466 		err = crypto_register_skcipher(&aes_xts_alg);
2467 		if (err)
2468 			goto err_aes_xts_alg;
2469 	}
2470 
2471 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2472 	if (dd->caps.has_authenc) {
2473 		for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
2474 			atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base);
2475 
2476 			err = crypto_register_aead(&aes_authenc_algs[i]);
2477 			if (err)
2478 				goto err_aes_authenc_alg;
2479 		}
2480 	}
2481 #endif
2482 
2483 	return 0;
2484 
2485 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2486 	/* i = ARRAY_SIZE(aes_authenc_algs); */
2487 err_aes_authenc_alg:
2488 	for (j = 0; j < i; j++)
2489 		crypto_unregister_aead(&aes_authenc_algs[j]);
2490 	crypto_unregister_skcipher(&aes_xts_alg);
2491 #endif
2492 err_aes_xts_alg:
2493 	crypto_unregister_aead(&aes_gcm_alg);
2494 err_aes_gcm_alg:
2495 	crypto_unregister_skcipher(&aes_cfb64_alg);
2496 err_aes_cfb64_alg:
2497 	i = ARRAY_SIZE(aes_algs);
2498 err_aes_algs:
2499 	for (j = 0; j < i; j++)
2500 		crypto_unregister_skcipher(&aes_algs[j]);
2501 
2502 	return err;
2503 }
2504 
2505 static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
2506 {
2507 	dd->caps.has_dualbuff = 0;
2508 	dd->caps.has_cfb64 = 0;
2509 	dd->caps.has_gcm = 0;
2510 	dd->caps.has_xts = 0;
2511 	dd->caps.has_authenc = 0;
2512 	dd->caps.max_burst_size = 1;
2513 
2514 	/* keep only major version number */
2515 	switch (dd->hw_version & 0xff0) {
2516 	case 0x500:
2517 		dd->caps.has_dualbuff = 1;
2518 		dd->caps.has_cfb64 = 1;
2519 		dd->caps.has_gcm = 1;
2520 		dd->caps.has_xts = 1;
2521 		dd->caps.has_authenc = 1;
2522 		dd->caps.max_burst_size = 4;
2523 		break;
2524 	case 0x200:
2525 		dd->caps.has_dualbuff = 1;
2526 		dd->caps.has_cfb64 = 1;
2527 		dd->caps.has_gcm = 1;
2528 		dd->caps.max_burst_size = 4;
2529 		break;
2530 	case 0x130:
2531 		dd->caps.has_dualbuff = 1;
2532 		dd->caps.has_cfb64 = 1;
2533 		dd->caps.max_burst_size = 4;
2534 		break;
2535 	case 0x120:
2536 		break;
2537 	default:
2538 		dev_warn(dd->dev,
2539 				"Unmanaged aes version, set minimum capabilities\n");
2540 		break;
2541 	}
2542 }
2543 
2544 #if defined(CONFIG_OF)
2545 static const struct of_device_id atmel_aes_dt_ids[] = {
2546 	{ .compatible = "atmel,at91sam9g46-aes" },
2547 	{ /* sentinel */ }
2548 };
2549 MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
2550 #endif
2551 
2552 static int atmel_aes_probe(struct platform_device *pdev)
2553 {
2554 	struct atmel_aes_dev *aes_dd;
2555 	struct device *dev = &pdev->dev;
2556 	struct resource *aes_res;
2557 	int err;
2558 
2559 	aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
2560 	if (!aes_dd)
2561 		return -ENOMEM;
2562 
2563 	aes_dd->dev = dev;
2564 
2565 	platform_set_drvdata(pdev, aes_dd);
2566 
2567 	INIT_LIST_HEAD(&aes_dd->list);
2568 	spin_lock_init(&aes_dd->lock);
2569 
2570 	tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
2571 					(unsigned long)aes_dd);
2572 	tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
2573 					(unsigned long)aes_dd);
2574 
2575 	crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
2576 
2577 	/* Get the base address */
2578 	aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2579 	if (!aes_res) {
2580 		dev_err(dev, "no MEM resource info\n");
2581 		err = -ENODEV;
2582 		goto err_tasklet_kill;
2583 	}
2584 	aes_dd->phys_base = aes_res->start;
2585 
2586 	/* Get the IRQ */
2587 	aes_dd->irq = platform_get_irq(pdev,  0);
2588 	if (aes_dd->irq < 0) {
2589 		err = aes_dd->irq;
2590 		goto err_tasklet_kill;
2591 	}
2592 
2593 	err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
2594 			       IRQF_SHARED, "atmel-aes", aes_dd);
2595 	if (err) {
2596 		dev_err(dev, "unable to request aes irq.\n");
2597 		goto err_tasklet_kill;
2598 	}
2599 
2600 	/* Initializing the clock */
2601 	aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
2602 	if (IS_ERR(aes_dd->iclk)) {
2603 		dev_err(dev, "clock initialization failed.\n");
2604 		err = PTR_ERR(aes_dd->iclk);
2605 		goto err_tasklet_kill;
2606 	}
2607 
2608 	aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
2609 	if (IS_ERR(aes_dd->io_base)) {
2610 		dev_err(dev, "can't ioremap\n");
2611 		err = PTR_ERR(aes_dd->io_base);
2612 		goto err_tasklet_kill;
2613 	}
2614 
2615 	err = clk_prepare(aes_dd->iclk);
2616 	if (err)
2617 		goto err_tasklet_kill;
2618 
2619 	err = atmel_aes_hw_version_init(aes_dd);
2620 	if (err)
2621 		goto err_iclk_unprepare;
2622 
2623 	atmel_aes_get_cap(aes_dd);
2624 
2625 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2626 	if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
2627 		err = -EPROBE_DEFER;
2628 		goto err_iclk_unprepare;
2629 	}
2630 #endif
2631 
2632 	err = atmel_aes_buff_init(aes_dd);
2633 	if (err)
2634 		goto err_iclk_unprepare;
2635 
2636 	err = atmel_aes_dma_init(aes_dd);
2637 	if (err)
2638 		goto err_buff_cleanup;
2639 
2640 	spin_lock(&atmel_aes.lock);
2641 	list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
2642 	spin_unlock(&atmel_aes.lock);
2643 
2644 	err = atmel_aes_register_algs(aes_dd);
2645 	if (err)
2646 		goto err_algs;
2647 
2648 	dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
2649 			dma_chan_name(aes_dd->src.chan),
2650 			dma_chan_name(aes_dd->dst.chan));
2651 
2652 	return 0;
2653 
2654 err_algs:
2655 	spin_lock(&atmel_aes.lock);
2656 	list_del(&aes_dd->list);
2657 	spin_unlock(&atmel_aes.lock);
2658 	atmel_aes_dma_cleanup(aes_dd);
2659 err_buff_cleanup:
2660 	atmel_aes_buff_cleanup(aes_dd);
2661 err_iclk_unprepare:
2662 	clk_unprepare(aes_dd->iclk);
2663 err_tasklet_kill:
2664 	tasklet_kill(&aes_dd->done_task);
2665 	tasklet_kill(&aes_dd->queue_task);
2666 
2667 	return err;
2668 }
2669 
2670 static int atmel_aes_remove(struct platform_device *pdev)
2671 {
2672 	struct atmel_aes_dev *aes_dd;
2673 
2674 	aes_dd = platform_get_drvdata(pdev);
2675 	if (!aes_dd)
2676 		return -ENODEV;
2677 	spin_lock(&atmel_aes.lock);
2678 	list_del(&aes_dd->list);
2679 	spin_unlock(&atmel_aes.lock);
2680 
2681 	atmel_aes_unregister_algs(aes_dd);
2682 
2683 	tasklet_kill(&aes_dd->done_task);
2684 	tasklet_kill(&aes_dd->queue_task);
2685 
2686 	atmel_aes_dma_cleanup(aes_dd);
2687 	atmel_aes_buff_cleanup(aes_dd);
2688 
2689 	clk_unprepare(aes_dd->iclk);
2690 
2691 	return 0;
2692 }
2693 
2694 static struct platform_driver atmel_aes_driver = {
2695 	.probe		= atmel_aes_probe,
2696 	.remove		= atmel_aes_remove,
2697 	.driver		= {
2698 		.name	= "atmel_aes",
2699 		.of_match_table = of_match_ptr(atmel_aes_dt_ids),
2700 	},
2701 };
2702 
2703 module_platform_driver(atmel_aes_driver);
2704 
2705 MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
2706 MODULE_LICENSE("GPL v2");
2707 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
2708