xref: /linux/drivers/crypto/talitos.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * talitos - Freescale Integrated Security Engine (SEC) device driver
4  *
5  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6  *
7  * Scatterlist Crypto API glue code copied from files with the following:
8  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * Crypto algorithm registration code copied from hifn driver:
11  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12  * All rights reserved.
13  */
14 
15 #include <linux/workqueue.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/device.h>
20 #include <linux/interrupt.h>
21 #include <linux/crypto.h>
22 #include <linux/hw_random.h>
23 #include <linux/of.h>
24 #include <linux/of_irq.h>
25 #include <linux/platform_device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/io.h>
28 #include <linux/spinlock.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/slab.h>
31 
32 #include <crypto/algapi.h>
33 #include <crypto/aes.h>
34 #include <crypto/internal/des.h>
35 #include <crypto/sha1.h>
36 #include <crypto/sha2.h>
37 #include <crypto/md5.h>
38 #include <crypto/internal/aead.h>
39 #include <crypto/authenc.h>
40 #include <crypto/internal/skcipher.h>
41 #include <crypto/hash.h>
42 #include <crypto/internal/hash.h>
43 #include <crypto/scatterwalk.h>
44 
45 #include "talitos.h"
46 
47 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
48 			   unsigned int len, bool is_sec1)
49 {
50 	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
51 	if (is_sec1) {
52 		ptr->len1 = cpu_to_be16(len);
53 	} else {
54 		ptr->len = cpu_to_be16(len);
55 		ptr->eptr = upper_32_bits(dma_addr);
56 	}
57 }
58 
59 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
60 			     struct talitos_ptr *src_ptr, bool is_sec1)
61 {
62 	dst_ptr->ptr = src_ptr->ptr;
63 	if (is_sec1) {
64 		dst_ptr->len1 = src_ptr->len1;
65 	} else {
66 		dst_ptr->len = src_ptr->len;
67 		dst_ptr->eptr = src_ptr->eptr;
68 	}
69 }
70 
71 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
72 					   bool is_sec1)
73 {
74 	if (is_sec1)
75 		return be16_to_cpu(ptr->len1);
76 	else
77 		return be16_to_cpu(ptr->len);
78 }
79 
80 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
81 				   bool is_sec1)
82 {
83 	if (!is_sec1)
84 		ptr->j_extent = val;
85 }
86 
87 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
88 {
89 	if (!is_sec1)
90 		ptr->j_extent |= val;
91 }
92 
93 /*
94  * map virtual single (contiguous) pointer to h/w descriptor pointer
95  */
96 static void __map_single_talitos_ptr(struct device *dev,
97 				     struct talitos_ptr *ptr,
98 				     unsigned int len, void *data,
99 				     enum dma_data_direction dir,
100 				     unsigned long attrs)
101 {
102 	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
103 	struct talitos_private *priv = dev_get_drvdata(dev);
104 	bool is_sec1 = has_ftr_sec1(priv);
105 
106 	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
107 }
108 
109 static void map_single_talitos_ptr(struct device *dev,
110 				   struct talitos_ptr *ptr,
111 				   unsigned int len, void *data,
112 				   enum dma_data_direction dir)
113 {
114 	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
115 }
116 
117 static void map_single_talitos_ptr_nosync(struct device *dev,
118 					  struct talitos_ptr *ptr,
119 					  unsigned int len, void *data,
120 					  enum dma_data_direction dir)
121 {
122 	__map_single_talitos_ptr(dev, ptr, len, data, dir,
123 				 DMA_ATTR_SKIP_CPU_SYNC);
124 }
125 
126 /*
127  * unmap bus single (contiguous) h/w descriptor pointer
128  */
129 static void unmap_single_talitos_ptr(struct device *dev,
130 				     struct talitos_ptr *ptr,
131 				     enum dma_data_direction dir)
132 {
133 	struct talitos_private *priv = dev_get_drvdata(dev);
134 	bool is_sec1 = has_ftr_sec1(priv);
135 
136 	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
137 			 from_talitos_ptr_len(ptr, is_sec1), dir);
138 }
139 
140 static int reset_channel(struct device *dev, int ch)
141 {
142 	struct talitos_private *priv = dev_get_drvdata(dev);
143 	unsigned int timeout = TALITOS_TIMEOUT;
144 	bool is_sec1 = has_ftr_sec1(priv);
145 
146 	if (is_sec1) {
147 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
148 			  TALITOS1_CCCR_LO_RESET);
149 
150 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
151 			TALITOS1_CCCR_LO_RESET) && --timeout)
152 			cpu_relax();
153 	} else {
154 		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
155 			  TALITOS2_CCCR_RESET);
156 
157 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
158 			TALITOS2_CCCR_RESET) && --timeout)
159 			cpu_relax();
160 	}
161 
162 	if (timeout == 0) {
163 		dev_err(dev, "failed to reset channel %d\n", ch);
164 		return -EIO;
165 	}
166 
167 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
168 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
169 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
170 	/* enable chaining descriptors */
171 	if (is_sec1)
172 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
173 			  TALITOS_CCCR_LO_NE);
174 
175 	/* and ICCR writeback, if available */
176 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
177 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
178 		          TALITOS_CCCR_LO_IWSE);
179 
180 	return 0;
181 }
182 
183 static int reset_device(struct device *dev)
184 {
185 	struct talitos_private *priv = dev_get_drvdata(dev);
186 	unsigned int timeout = TALITOS_TIMEOUT;
187 	bool is_sec1 = has_ftr_sec1(priv);
188 	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
189 
190 	setbits32(priv->reg + TALITOS_MCR, mcr);
191 
192 	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
193 	       && --timeout)
194 		cpu_relax();
195 
196 	if (priv->irq[1]) {
197 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
198 		setbits32(priv->reg + TALITOS_MCR, mcr);
199 	}
200 
201 	if (timeout == 0) {
202 		dev_err(dev, "failed to reset device\n");
203 		return -EIO;
204 	}
205 
206 	return 0;
207 }
208 
209 /*
210  * Reset and initialize the device
211  */
212 static int init_device(struct device *dev)
213 {
214 	struct talitos_private *priv = dev_get_drvdata(dev);
215 	int ch, err;
216 	bool is_sec1 = has_ftr_sec1(priv);
217 
218 	/*
219 	 * Master reset
220 	 * errata documentation: warning: certain SEC interrupts
221 	 * are not fully cleared by writing the MCR:SWR bit,
222 	 * set bit twice to completely reset
223 	 */
224 	err = reset_device(dev);
225 	if (err)
226 		return err;
227 
228 	err = reset_device(dev);
229 	if (err)
230 		return err;
231 
232 	/* reset channels */
233 	for (ch = 0; ch < priv->num_channels; ch++) {
234 		err = reset_channel(dev, ch);
235 		if (err)
236 			return err;
237 	}
238 
239 	/* enable channel done and error interrupts */
240 	if (is_sec1) {
241 		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
242 		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
243 		/* disable parity error check in DEU (erroneous? test vect.) */
244 		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
245 	} else {
246 		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
247 		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
248 	}
249 
250 	/* disable integrity check error interrupts (use writeback instead) */
251 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
252 		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
253 		          TALITOS_MDEUICR_LO_ICE);
254 
255 	return 0;
256 }
257 
258 /**
259  * talitos_submit - submits a descriptor to the device for processing
260  * @dev:	the SEC device to be used
261  * @ch:		the SEC device channel to be used
262  * @desc:	the descriptor to be processed by the device
263  * @callback:	whom to call when processing is complete
264  * @context:	a handle for use by caller (optional)
265  *
266  * desc must contain valid dma-mapped (bus physical) address pointers.
267  * callback must check err and feedback in descriptor header
268  * for device processing status.
269  */
270 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
271 			  void (*callback)(struct device *dev,
272 					   struct talitos_desc *desc,
273 					   void *context, int error),
274 			  void *context)
275 {
276 	struct talitos_private *priv = dev_get_drvdata(dev);
277 	struct talitos_request *request;
278 	unsigned long flags;
279 	int head;
280 	bool is_sec1 = has_ftr_sec1(priv);
281 
282 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
283 
284 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
285 		/* h/w fifo is full */
286 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
287 		return -EAGAIN;
288 	}
289 
290 	head = priv->chan[ch].head;
291 	request = &priv->chan[ch].fifo[head];
292 
293 	/* map descriptor and save caller data */
294 	if (is_sec1) {
295 		desc->hdr1 = desc->hdr;
296 		request->dma_desc = dma_map_single(dev, &desc->hdr1,
297 						   TALITOS_DESC_SIZE,
298 						   DMA_BIDIRECTIONAL);
299 	} else {
300 		request->dma_desc = dma_map_single(dev, desc,
301 						   TALITOS_DESC_SIZE,
302 						   DMA_BIDIRECTIONAL);
303 	}
304 	request->callback = callback;
305 	request->context = context;
306 
307 	/* increment fifo head */
308 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
309 
310 	smp_wmb();
311 	request->desc = desc;
312 
313 	/* GO! */
314 	wmb();
315 	out_be32(priv->chan[ch].reg + TALITOS_FF,
316 		 upper_32_bits(request->dma_desc));
317 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
318 		 lower_32_bits(request->dma_desc));
319 
320 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
321 
322 	return -EINPROGRESS;
323 }
324 
325 static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
326 {
327 	struct talitos_edesc *edesc;
328 
329 	if (!is_sec1)
330 		return request->desc->hdr;
331 
332 	if (!request->desc->next_desc)
333 		return request->desc->hdr1;
334 
335 	edesc = container_of(request->desc, struct talitos_edesc, desc);
336 
337 	return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
338 }
339 
340 /*
341  * process what was done, notify callback of error if not
342  */
343 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
344 {
345 	struct talitos_private *priv = dev_get_drvdata(dev);
346 	struct talitos_request *request, saved_req;
347 	unsigned long flags;
348 	int tail, status;
349 	bool is_sec1 = has_ftr_sec1(priv);
350 
351 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
352 
353 	tail = priv->chan[ch].tail;
354 	while (priv->chan[ch].fifo[tail].desc) {
355 		__be32 hdr;
356 
357 		request = &priv->chan[ch].fifo[tail];
358 
359 		/* descriptors with their done bits set don't get the error */
360 		rmb();
361 		hdr = get_request_hdr(request, is_sec1);
362 
363 		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
364 			status = 0;
365 		else
366 			if (!error)
367 				break;
368 			else
369 				status = error;
370 
371 		dma_unmap_single(dev, request->dma_desc,
372 				 TALITOS_DESC_SIZE,
373 				 DMA_BIDIRECTIONAL);
374 
375 		/* copy entries so we can call callback outside lock */
376 		saved_req.desc = request->desc;
377 		saved_req.callback = request->callback;
378 		saved_req.context = request->context;
379 
380 		/* release request entry in fifo */
381 		smp_wmb();
382 		request->desc = NULL;
383 
384 		/* increment fifo tail */
385 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
386 
387 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
388 
389 		atomic_dec(&priv->chan[ch].submit_count);
390 
391 		saved_req.callback(dev, saved_req.desc, saved_req.context,
392 				   status);
393 		/* channel may resume processing in single desc error case */
394 		if (error && !reset_ch && status == error)
395 			return;
396 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
397 		tail = priv->chan[ch].tail;
398 	}
399 
400 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
401 }
402 
403 /*
404  * process completed requests for channels that have done status
405  */
406 #define DEF_TALITOS1_DONE(name, ch_done_mask)				\
407 static void talitos1_done_##name(unsigned long data)			\
408 {									\
409 	struct device *dev = (struct device *)data;			\
410 	struct talitos_private *priv = dev_get_drvdata(dev);		\
411 	unsigned long flags;						\
412 									\
413 	if (ch_done_mask & 0x10000000)					\
414 		flush_channel(dev, 0, 0, 0);			\
415 	if (ch_done_mask & 0x40000000)					\
416 		flush_channel(dev, 1, 0, 0);			\
417 	if (ch_done_mask & 0x00010000)					\
418 		flush_channel(dev, 2, 0, 0);			\
419 	if (ch_done_mask & 0x00040000)					\
420 		flush_channel(dev, 3, 0, 0);			\
421 									\
422 	/* At this point, all completed channels have been processed */	\
423 	/* Unmask done interrupts for channels completed later on. */	\
424 	spin_lock_irqsave(&priv->reg_lock, flags);			\
425 	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
426 	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
427 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
428 }
429 
430 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
431 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
432 
433 #define DEF_TALITOS2_DONE(name, ch_done_mask)				\
434 static void talitos2_done_##name(unsigned long data)			\
435 {									\
436 	struct device *dev = (struct device *)data;			\
437 	struct talitos_private *priv = dev_get_drvdata(dev);		\
438 	unsigned long flags;						\
439 									\
440 	if (ch_done_mask & 1)						\
441 		flush_channel(dev, 0, 0, 0);				\
442 	if (ch_done_mask & (1 << 2))					\
443 		flush_channel(dev, 1, 0, 0);				\
444 	if (ch_done_mask & (1 << 4))					\
445 		flush_channel(dev, 2, 0, 0);				\
446 	if (ch_done_mask & (1 << 6))					\
447 		flush_channel(dev, 3, 0, 0);				\
448 									\
449 	/* At this point, all completed channels have been processed */	\
450 	/* Unmask done interrupts for channels completed later on. */	\
451 	spin_lock_irqsave(&priv->reg_lock, flags);			\
452 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
453 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
454 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
455 }
456 
457 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
458 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
459 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
460 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
461 
462 /*
463  * locate current (offending) descriptor
464  */
465 static __be32 current_desc_hdr(struct device *dev, int ch)
466 {
467 	struct talitos_private *priv = dev_get_drvdata(dev);
468 	int tail, iter;
469 	dma_addr_t cur_desc;
470 
471 	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
472 	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
473 
474 	if (!cur_desc) {
475 		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
476 		return 0;
477 	}
478 
479 	tail = priv->chan[ch].tail;
480 
481 	iter = tail;
482 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
483 	       priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
484 		iter = (iter + 1) & (priv->fifo_len - 1);
485 		if (iter == tail) {
486 			dev_err(dev, "couldn't locate current descriptor\n");
487 			return 0;
488 		}
489 	}
490 
491 	if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
492 		struct talitos_edesc *edesc;
493 
494 		edesc = container_of(priv->chan[ch].fifo[iter].desc,
495 				     struct talitos_edesc, desc);
496 		return ((struct talitos_desc *)
497 			(edesc->buf + edesc->dma_len))->hdr;
498 	}
499 
500 	return priv->chan[ch].fifo[iter].desc->hdr;
501 }
502 
503 /*
504  * user diagnostics; report root cause of error based on execution unit status
505  */
506 static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
507 {
508 	struct talitos_private *priv = dev_get_drvdata(dev);
509 	int i;
510 
511 	if (!desc_hdr)
512 		desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
513 
514 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
515 	case DESC_HDR_SEL0_AFEU:
516 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
517 			in_be32(priv->reg_afeu + TALITOS_EUISR),
518 			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
519 		break;
520 	case DESC_HDR_SEL0_DEU:
521 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
522 			in_be32(priv->reg_deu + TALITOS_EUISR),
523 			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
524 		break;
525 	case DESC_HDR_SEL0_MDEUA:
526 	case DESC_HDR_SEL0_MDEUB:
527 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
528 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
529 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
530 		break;
531 	case DESC_HDR_SEL0_RNG:
532 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
533 			in_be32(priv->reg_rngu + TALITOS_ISR),
534 			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
535 		break;
536 	case DESC_HDR_SEL0_PKEU:
537 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
538 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
539 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
540 		break;
541 	case DESC_HDR_SEL0_AESU:
542 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
543 			in_be32(priv->reg_aesu + TALITOS_EUISR),
544 			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
545 		break;
546 	case DESC_HDR_SEL0_CRCU:
547 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
548 			in_be32(priv->reg_crcu + TALITOS_EUISR),
549 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
550 		break;
551 	case DESC_HDR_SEL0_KEU:
552 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
553 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
554 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
555 		break;
556 	}
557 
558 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
559 	case DESC_HDR_SEL1_MDEUA:
560 	case DESC_HDR_SEL1_MDEUB:
561 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
562 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
563 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
564 		break;
565 	case DESC_HDR_SEL1_CRCU:
566 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
567 			in_be32(priv->reg_crcu + TALITOS_EUISR),
568 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
569 		break;
570 	}
571 
572 	for (i = 0; i < 8; i++)
573 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
574 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
575 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
576 }
577 
578 /*
579  * recover from error interrupts
580  */
581 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
582 {
583 	struct talitos_private *priv = dev_get_drvdata(dev);
584 	unsigned int timeout = TALITOS_TIMEOUT;
585 	int ch, error, reset_dev = 0;
586 	u32 v_lo;
587 	bool is_sec1 = has_ftr_sec1(priv);
588 	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
589 
590 	for (ch = 0; ch < priv->num_channels; ch++) {
591 		/* skip channels without errors */
592 		if (is_sec1) {
593 			/* bits 29, 31, 17, 19 */
594 			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
595 				continue;
596 		} else {
597 			if (!(isr & (1 << (ch * 2 + 1))))
598 				continue;
599 		}
600 
601 		error = -EINVAL;
602 
603 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
604 
605 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
606 			dev_err(dev, "double fetch fifo overflow error\n");
607 			error = -EAGAIN;
608 			reset_ch = 1;
609 		}
610 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
611 			/* h/w dropped descriptor */
612 			dev_err(dev, "single fetch fifo overflow error\n");
613 			error = -EAGAIN;
614 		}
615 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
616 			dev_err(dev, "master data transfer error\n");
617 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
618 			dev_err(dev, is_sec1 ? "pointer not complete error\n"
619 					     : "s/g data length zero error\n");
620 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
621 			dev_err(dev, is_sec1 ? "parity error\n"
622 					     : "fetch pointer zero error\n");
623 		if (v_lo & TALITOS_CCPSR_LO_IDH)
624 			dev_err(dev, "illegal descriptor header error\n");
625 		if (v_lo & TALITOS_CCPSR_LO_IEU)
626 			dev_err(dev, is_sec1 ? "static assignment error\n"
627 					     : "invalid exec unit error\n");
628 		if (v_lo & TALITOS_CCPSR_LO_EU)
629 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
630 		if (!is_sec1) {
631 			if (v_lo & TALITOS_CCPSR_LO_GB)
632 				dev_err(dev, "gather boundary error\n");
633 			if (v_lo & TALITOS_CCPSR_LO_GRL)
634 				dev_err(dev, "gather return/length error\n");
635 			if (v_lo & TALITOS_CCPSR_LO_SB)
636 				dev_err(dev, "scatter boundary error\n");
637 			if (v_lo & TALITOS_CCPSR_LO_SRL)
638 				dev_err(dev, "scatter return/length error\n");
639 		}
640 
641 		flush_channel(dev, ch, error, reset_ch);
642 
643 		if (reset_ch) {
644 			reset_channel(dev, ch);
645 		} else {
646 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
647 				  TALITOS2_CCCR_CONT);
648 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
649 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
650 			       TALITOS2_CCCR_CONT) && --timeout)
651 				cpu_relax();
652 			if (timeout == 0) {
653 				dev_err(dev, "failed to restart channel %d\n",
654 					ch);
655 				reset_dev = 1;
656 			}
657 		}
658 	}
659 	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
660 	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
661 		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
662 			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
663 				isr, isr_lo);
664 		else
665 			dev_err(dev, "done overflow, internal time out, or "
666 				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
667 
668 		/* purge request queues */
669 		for (ch = 0; ch < priv->num_channels; ch++)
670 			flush_channel(dev, ch, -EIO, 1);
671 
672 		/* reset and reinitialize the device */
673 		init_device(dev);
674 	}
675 }
676 
677 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
678 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
679 {									       \
680 	struct device *dev = data;					       \
681 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
682 	u32 isr, isr_lo;						       \
683 	unsigned long flags;						       \
684 									       \
685 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
686 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
687 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
688 	/* Acknowledge interrupt */					       \
689 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
690 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
691 									       \
692 	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
693 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
694 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
695 	}								       \
696 	else {								       \
697 		if (likely(isr & ch_done_mask)) {			       \
698 			/* mask further done interrupts. */		       \
699 			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
700 			/* done_task will unmask done interrupts at exit */    \
701 			tasklet_schedule(&priv->done_task[tlet]);	       \
702 		}							       \
703 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
704 	}								       \
705 									       \
706 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
707 								IRQ_NONE;      \
708 }
709 
710 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
711 
712 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
713 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
714 {									       \
715 	struct device *dev = data;					       \
716 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
717 	u32 isr, isr_lo;						       \
718 	unsigned long flags;						       \
719 									       \
720 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
721 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
722 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
723 	/* Acknowledge interrupt */					       \
724 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
725 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
726 									       \
727 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
728 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
729 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
730 	}								       \
731 	else {								       \
732 		if (likely(isr & ch_done_mask)) {			       \
733 			/* mask further done interrupts. */		       \
734 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
735 			/* done_task will unmask done interrupts at exit */    \
736 			tasklet_schedule(&priv->done_task[tlet]);	       \
737 		}							       \
738 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
739 	}								       \
740 									       \
741 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
742 								IRQ_NONE;      \
743 }
744 
745 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
746 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
747 		       0)
748 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
749 		       1)
750 
751 /*
752  * hwrng
753  */
754 static int talitos_rng_data_present(struct hwrng *rng, int wait)
755 {
756 	struct device *dev = (struct device *)rng->priv;
757 	struct talitos_private *priv = dev_get_drvdata(dev);
758 	u32 ofl;
759 	int i;
760 
761 	for (i = 0; i < 20; i++) {
762 		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
763 		      TALITOS_RNGUSR_LO_OFL;
764 		if (ofl || !wait)
765 			break;
766 		udelay(10);
767 	}
768 
769 	return !!ofl;
770 }
771 
772 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
773 {
774 	struct device *dev = (struct device *)rng->priv;
775 	struct talitos_private *priv = dev_get_drvdata(dev);
776 
777 	/* rng fifo requires 64-bit accesses */
778 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
779 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
780 
781 	return sizeof(u32);
782 }
783 
784 static int talitos_rng_init(struct hwrng *rng)
785 {
786 	struct device *dev = (struct device *)rng->priv;
787 	struct talitos_private *priv = dev_get_drvdata(dev);
788 	unsigned int timeout = TALITOS_TIMEOUT;
789 
790 	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
791 	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
792 		 & TALITOS_RNGUSR_LO_RD)
793 	       && --timeout)
794 		cpu_relax();
795 	if (timeout == 0) {
796 		dev_err(dev, "failed to reset rng hw\n");
797 		return -ENODEV;
798 	}
799 
800 	/* start generating */
801 	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
802 
803 	return 0;
804 }
805 
806 static int talitos_register_rng(struct device *dev)
807 {
808 	struct talitos_private *priv = dev_get_drvdata(dev);
809 	int err;
810 
811 	priv->rng.name		= dev_driver_string(dev);
812 	priv->rng.init		= talitos_rng_init;
813 	priv->rng.data_present	= talitos_rng_data_present;
814 	priv->rng.data_read	= talitos_rng_data_read;
815 	priv->rng.priv		= (unsigned long)dev;
816 
817 	err = hwrng_register(&priv->rng);
818 	if (!err)
819 		priv->rng_registered = true;
820 
821 	return err;
822 }
823 
824 static void talitos_unregister_rng(struct device *dev)
825 {
826 	struct talitos_private *priv = dev_get_drvdata(dev);
827 
828 	if (!priv->rng_registered)
829 		return;
830 
831 	hwrng_unregister(&priv->rng);
832 	priv->rng_registered = false;
833 }
834 
835 /*
836  * crypto alg
837  */
838 #define TALITOS_CRA_PRIORITY		3000
839 /*
840  * Defines a priority for doing AEAD with descriptors type
841  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
842  */
843 #define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
844 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
845 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
846 #else
847 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
848 #endif
849 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
850 
851 struct talitos_ctx {
852 	struct device *dev;
853 	int ch;
854 	__be32 desc_hdr_template;
855 	u8 key[TALITOS_MAX_KEY_SIZE];
856 	u8 iv[TALITOS_MAX_IV_LENGTH];
857 	dma_addr_t dma_key;
858 	unsigned int keylen;
859 	unsigned int enckeylen;
860 	unsigned int authkeylen;
861 };
862 
863 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
864 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
865 
866 struct talitos_ahash_req_ctx {
867 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
868 	unsigned int hw_context_size;
869 	u8 buf[2][HASH_MAX_BLOCK_SIZE];
870 	int buf_idx;
871 	unsigned int swinit;
872 	unsigned int first_desc;
873 	unsigned int last_desc;
874 	unsigned int last_request;
875 	unsigned int to_hash_later;
876 	unsigned int nbuf;
877 	struct scatterlist bufsl[2];
878 	struct scatterlist *psrc;
879 
880 	struct scatterlist request_bufsl[2];
881 	struct ahash_request *areq;
882 	struct scatterlist *request_sl;
883 	unsigned int remaining_ahash_request_bytes;
884 	unsigned int current_ahash_request_bytes;
885 	struct work_struct sec1_ahash_process_remaining;
886 };
887 
888 struct talitos_export_state {
889 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
890 	u8 buf[HASH_MAX_BLOCK_SIZE];
891 	unsigned int swinit;
892 	unsigned int first_desc;
893 	unsigned int last_desc;
894 	unsigned int to_hash_later;
895 	unsigned int nbuf;
896 };
897 
898 static int aead_setkey(struct crypto_aead *authenc,
899 		       const u8 *key, unsigned int keylen)
900 {
901 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
902 	struct device *dev = ctx->dev;
903 	struct crypto_authenc_keys keys;
904 
905 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
906 		goto badkey;
907 
908 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
909 		goto badkey;
910 
911 	if (ctx->keylen)
912 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
913 
914 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
915 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
916 
917 	ctx->keylen = keys.authkeylen + keys.enckeylen;
918 	ctx->enckeylen = keys.enckeylen;
919 	ctx->authkeylen = keys.authkeylen;
920 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
921 				      DMA_TO_DEVICE);
922 
923 	memzero_explicit(&keys, sizeof(keys));
924 	return 0;
925 
926 badkey:
927 	memzero_explicit(&keys, sizeof(keys));
928 	return -EINVAL;
929 }
930 
931 static int aead_des3_setkey(struct crypto_aead *authenc,
932 			    const u8 *key, unsigned int keylen)
933 {
934 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
935 	struct device *dev = ctx->dev;
936 	struct crypto_authenc_keys keys;
937 	int err;
938 
939 	err = crypto_authenc_extractkeys(&keys, key, keylen);
940 	if (unlikely(err))
941 		goto out;
942 
943 	err = -EINVAL;
944 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
945 		goto out;
946 
947 	err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
948 	if (err)
949 		goto out;
950 
951 	if (ctx->keylen)
952 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
953 
954 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
955 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
956 
957 	ctx->keylen = keys.authkeylen + keys.enckeylen;
958 	ctx->enckeylen = keys.enckeylen;
959 	ctx->authkeylen = keys.authkeylen;
960 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
961 				      DMA_TO_DEVICE);
962 
963 out:
964 	memzero_explicit(&keys, sizeof(keys));
965 	return err;
966 }
967 
968 static void talitos_sg_unmap(struct device *dev,
969 			     struct talitos_edesc *edesc,
970 			     struct scatterlist *src,
971 			     struct scatterlist *dst,
972 			     unsigned int len, unsigned int offset)
973 {
974 	struct talitos_private *priv = dev_get_drvdata(dev);
975 	bool is_sec1 = has_ftr_sec1(priv);
976 	unsigned int src_nents = edesc->src_nents ? : 1;
977 	unsigned int dst_nents = edesc->dst_nents ? : 1;
978 
979 	if (is_sec1 && dst && dst_nents > 1) {
980 		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
981 					   len, DMA_FROM_DEVICE);
982 		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
983 				     offset);
984 	}
985 	if (src != dst) {
986 		if (src_nents == 1 || !is_sec1)
987 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
988 
989 		if (dst && (dst_nents == 1 || !is_sec1))
990 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
991 	} else if (src_nents == 1 || !is_sec1) {
992 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
993 	}
994 }
995 
996 static void ipsec_esp_unmap(struct device *dev,
997 			    struct talitos_edesc *edesc,
998 			    struct aead_request *areq, bool encrypt)
999 {
1000 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1001 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1002 	unsigned int ivsize = crypto_aead_ivsize(aead);
1003 	unsigned int authsize = crypto_aead_authsize(aead);
1004 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1005 	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
1006 	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
1007 
1008 	if (is_ipsec_esp)
1009 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1010 					 DMA_FROM_DEVICE);
1011 	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1012 
1013 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1014 			 cryptlen + authsize, areq->assoclen);
1015 
1016 	if (edesc->dma_len)
1017 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1018 				 DMA_BIDIRECTIONAL);
1019 
1020 	if (!is_ipsec_esp) {
1021 		unsigned int dst_nents = edesc->dst_nents ? : 1;
1022 
1023 		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1024 				   areq->assoclen + cryptlen - ivsize);
1025 	}
1026 }
1027 
1028 /*
1029  * ipsec_esp descriptor callbacks
1030  */
1031 static void ipsec_esp_encrypt_done(struct device *dev,
1032 				   struct talitos_desc *desc, void *context,
1033 				   int err)
1034 {
1035 	struct aead_request *areq = context;
1036 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1037 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1038 	struct talitos_edesc *edesc;
1039 
1040 	edesc = container_of(desc, struct talitos_edesc, desc);
1041 
1042 	ipsec_esp_unmap(dev, edesc, areq, true);
1043 
1044 	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1045 
1046 	kfree(edesc);
1047 
1048 	aead_request_complete(areq, err);
1049 }
1050 
1051 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1052 					  struct talitos_desc *desc,
1053 					  void *context, int err)
1054 {
1055 	struct aead_request *req = context;
1056 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1057 	unsigned int authsize = crypto_aead_authsize(authenc);
1058 	struct talitos_edesc *edesc;
1059 	char *oicv, *icv;
1060 
1061 	edesc = container_of(desc, struct talitos_edesc, desc);
1062 
1063 	ipsec_esp_unmap(dev, edesc, req, false);
1064 
1065 	if (!err) {
1066 		/* auth check */
1067 		oicv = edesc->buf + edesc->dma_len;
1068 		icv = oicv - authsize;
1069 
1070 		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1071 	}
1072 
1073 	kfree(edesc);
1074 
1075 	aead_request_complete(req, err);
1076 }
1077 
1078 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1079 					  struct talitos_desc *desc,
1080 					  void *context, int err)
1081 {
1082 	struct aead_request *req = context;
1083 	struct talitos_edesc *edesc;
1084 
1085 	edesc = container_of(desc, struct talitos_edesc, desc);
1086 
1087 	ipsec_esp_unmap(dev, edesc, req, false);
1088 
1089 	/* check ICV auth status */
1090 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1091 		     DESC_HDR_LO_ICCR1_PASS))
1092 		err = -EBADMSG;
1093 
1094 	kfree(edesc);
1095 
1096 	aead_request_complete(req, err);
1097 }
1098 
1099 /*
1100  * convert scatterlist to SEC h/w link table format
1101  * stop at cryptlen bytes
1102  */
1103 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1104 				 unsigned int offset, int datalen, int elen,
1105 				 struct talitos_ptr *link_tbl_ptr, int align)
1106 {
1107 	int n_sg = elen ? sg_count + 1 : sg_count;
1108 	int count = 0;
1109 	int cryptlen = datalen + elen;
1110 	int padding = ALIGN(cryptlen, align) - cryptlen;
1111 
1112 	while (cryptlen && sg && n_sg--) {
1113 		unsigned int len = sg_dma_len(sg);
1114 
1115 		if (offset >= len) {
1116 			offset -= len;
1117 			goto next;
1118 		}
1119 
1120 		len -= offset;
1121 
1122 		if (len > cryptlen)
1123 			len = cryptlen;
1124 
1125 		if (datalen > 0 && len > datalen) {
1126 			to_talitos_ptr(link_tbl_ptr + count,
1127 				       sg_dma_address(sg) + offset, datalen, 0);
1128 			to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1129 			count++;
1130 			len -= datalen;
1131 			offset += datalen;
1132 		}
1133 		to_talitos_ptr(link_tbl_ptr + count,
1134 			       sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
1135 		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1136 		count++;
1137 		cryptlen -= len;
1138 		datalen -= len;
1139 		offset = 0;
1140 
1141 next:
1142 		sg = sg_next(sg);
1143 	}
1144 
1145 	/* tag end of link table */
1146 	if (count > 0)
1147 		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1148 				       DESC_PTR_LNKTBL_RET, 0);
1149 
1150 	return count;
1151 }
1152 
1153 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1154 			      unsigned int len, struct talitos_edesc *edesc,
1155 			      struct talitos_ptr *ptr, int sg_count,
1156 			      unsigned int offset, int tbl_off, int elen,
1157 			      bool force, int align)
1158 {
1159 	struct talitos_private *priv = dev_get_drvdata(dev);
1160 	bool is_sec1 = has_ftr_sec1(priv);
1161 	int aligned_len = ALIGN(len, align);
1162 
1163 	if (!src) {
1164 		to_talitos_ptr(ptr, 0, 0, is_sec1);
1165 		return 1;
1166 	}
1167 	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1168 	if (sg_count == 1 && !force) {
1169 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
1170 		return sg_count;
1171 	}
1172 	if (is_sec1) {
1173 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
1174 		return sg_count;
1175 	}
1176 	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1177 					 &edesc->link_tbl[tbl_off], align);
1178 	if (sg_count == 1 && !force) {
1179 		/* Only one segment now, so no link tbl needed*/
1180 		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1181 		return sg_count;
1182 	}
1183 	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1184 			    tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
1185 	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1186 
1187 	return sg_count;
1188 }
1189 
1190 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1191 			  unsigned int len, struct talitos_edesc *edesc,
1192 			  struct talitos_ptr *ptr, int sg_count,
1193 			  unsigned int offset, int tbl_off)
1194 {
1195 	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1196 				  tbl_off, 0, false, 1);
1197 }
1198 
1199 /*
1200  * fill in and submit ipsec_esp descriptor
1201  */
1202 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1203 		     bool encrypt,
1204 		     void (*callback)(struct device *dev,
1205 				      struct talitos_desc *desc,
1206 				      void *context, int error))
1207 {
1208 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1209 	unsigned int authsize = crypto_aead_authsize(aead);
1210 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1211 	struct device *dev = ctx->dev;
1212 	struct talitos_desc *desc = &edesc->desc;
1213 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1214 	unsigned int ivsize = crypto_aead_ivsize(aead);
1215 	int tbl_off = 0;
1216 	int sg_count, ret;
1217 	int elen = 0;
1218 	bool sync_needed = false;
1219 	struct talitos_private *priv = dev_get_drvdata(dev);
1220 	bool is_sec1 = has_ftr_sec1(priv);
1221 	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1222 	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1223 	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1224 	dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1225 
1226 	/* hmac key */
1227 	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1228 
1229 	sg_count = edesc->src_nents ?: 1;
1230 	if (is_sec1 && sg_count > 1)
1231 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1232 				  areq->assoclen + cryptlen);
1233 	else
1234 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1235 				      (areq->src == areq->dst) ?
1236 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1237 
1238 	/* hmac data */
1239 	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1240 			     &desc->ptr[1], sg_count, 0, tbl_off);
1241 
1242 	if (ret > 1) {
1243 		tbl_off += ret;
1244 		sync_needed = true;
1245 	}
1246 
1247 	/* cipher iv */
1248 	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1249 
1250 	/* cipher key */
1251 	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1252 		       ctx->enckeylen, is_sec1);
1253 
1254 	/*
1255 	 * cipher in
1256 	 * map and adjust cipher len to aead request cryptlen.
1257 	 * extent is bytes of HMAC postpended to ciphertext,
1258 	 * typically 12 for ipsec
1259 	 */
1260 	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1261 		elen = authsize;
1262 
1263 	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1264 				 sg_count, areq->assoclen, tbl_off, elen,
1265 				 false, 1);
1266 
1267 	if (ret > 1) {
1268 		tbl_off += ret;
1269 		sync_needed = true;
1270 	}
1271 
1272 	/* cipher out */
1273 	if (areq->src != areq->dst) {
1274 		sg_count = edesc->dst_nents ? : 1;
1275 		if (!is_sec1 || sg_count == 1)
1276 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1277 	}
1278 
1279 	if (is_ipsec_esp && encrypt)
1280 		elen = authsize;
1281 	else
1282 		elen = 0;
1283 	ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1284 				 sg_count, areq->assoclen, tbl_off, elen,
1285 				 is_ipsec_esp && !encrypt, 1);
1286 	tbl_off += ret;
1287 
1288 	if (!encrypt && is_ipsec_esp) {
1289 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1290 
1291 		/* Add an entry to the link table for ICV data */
1292 		to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1293 		to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1294 
1295 		/* icv data follows link tables */
1296 		to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1297 		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1298 		sync_needed = true;
1299 	} else if (!encrypt) {
1300 		to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1301 		sync_needed = true;
1302 	} else if (!is_ipsec_esp) {
1303 		talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1304 			       sg_count, areq->assoclen + cryptlen, tbl_off);
1305 	}
1306 
1307 	/* iv out */
1308 	if (is_ipsec_esp)
1309 		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1310 				       DMA_FROM_DEVICE);
1311 
1312 	if (sync_needed)
1313 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1314 					   edesc->dma_len,
1315 					   DMA_BIDIRECTIONAL);
1316 
1317 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1318 	if (ret != -EINPROGRESS) {
1319 		ipsec_esp_unmap(dev, edesc, areq, encrypt);
1320 		kfree(edesc);
1321 	}
1322 	return ret;
1323 }
1324 
1325 /*
1326  * allocate and map the extended descriptor
1327  */
1328 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1329 						 struct scatterlist *src,
1330 						 struct scatterlist *dst,
1331 						 u8 *iv,
1332 						 unsigned int assoclen,
1333 						 unsigned int cryptlen,
1334 						 unsigned int authsize,
1335 						 unsigned int ivsize,
1336 						 int icv_stashing,
1337 						 u32 cryptoflags,
1338 						 bool encrypt)
1339 {
1340 	struct talitos_edesc *edesc;
1341 	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1342 	dma_addr_t iv_dma = 0;
1343 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1344 		      GFP_ATOMIC;
1345 	struct talitos_private *priv = dev_get_drvdata(dev);
1346 	bool is_sec1 = has_ftr_sec1(priv);
1347 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1348 
1349 	if (cryptlen + authsize > max_len) {
1350 		dev_err(dev, "length exceeds h/w max limit\n");
1351 		return ERR_PTR(-EINVAL);
1352 	}
1353 
1354 	if (!dst || dst == src) {
1355 		src_len = assoclen + cryptlen + authsize;
1356 		src_nents = sg_nents_for_len(src, src_len);
1357 		if (src_nents < 0) {
1358 			dev_err(dev, "Invalid number of src SG.\n");
1359 			return ERR_PTR(-EINVAL);
1360 		}
1361 		src_nents = (src_nents == 1) ? 0 : src_nents;
1362 		dst_nents = dst ? src_nents : 0;
1363 		dst_len = 0;
1364 	} else { /* dst && dst != src*/
1365 		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1366 		src_nents = sg_nents_for_len(src, src_len);
1367 		if (src_nents < 0) {
1368 			dev_err(dev, "Invalid number of src SG.\n");
1369 			return ERR_PTR(-EINVAL);
1370 		}
1371 		src_nents = (src_nents == 1) ? 0 : src_nents;
1372 		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1373 		dst_nents = sg_nents_for_len(dst, dst_len);
1374 		if (dst_nents < 0) {
1375 			dev_err(dev, "Invalid number of dst SG.\n");
1376 			return ERR_PTR(-EINVAL);
1377 		}
1378 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1379 	}
1380 
1381 	/*
1382 	 * allocate space for base edesc plus the link tables,
1383 	 * allowing for two separate entries for AD and generated ICV (+ 2),
1384 	 * and space for two sets of ICVs (stashed and generated)
1385 	 */
1386 	alloc_len = sizeof(struct talitos_edesc);
1387 	if (src_nents || dst_nents || !encrypt) {
1388 		if (is_sec1)
1389 			dma_len = (src_nents ? src_len : 0) +
1390 				  (dst_nents ? dst_len : 0) + authsize;
1391 		else
1392 			dma_len = (src_nents + dst_nents + 2) *
1393 				  sizeof(struct talitos_ptr) + authsize;
1394 		alloc_len += dma_len;
1395 	} else {
1396 		dma_len = 0;
1397 	}
1398 	alloc_len += icv_stashing ? authsize : 0;
1399 
1400 	/* if its a ahash, add space for a second desc next to the first one */
1401 	if (is_sec1 && !dst)
1402 		alloc_len += sizeof(struct talitos_desc);
1403 	alloc_len += ivsize;
1404 
1405 	edesc = kmalloc(ALIGN(alloc_len, dma_get_cache_alignment()), flags);
1406 	if (!edesc)
1407 		return ERR_PTR(-ENOMEM);
1408 	if (ivsize) {
1409 		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1410 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1411 	}
1412 	memset(&edesc->desc, 0, sizeof(edesc->desc));
1413 
1414 	edesc->src_nents = src_nents;
1415 	edesc->dst_nents = dst_nents;
1416 	edesc->iv_dma = iv_dma;
1417 	edesc->dma_len = dma_len;
1418 	if (dma_len)
1419 		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1420 						     edesc->dma_len,
1421 						     DMA_BIDIRECTIONAL);
1422 
1423 	return edesc;
1424 }
1425 
1426 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1427 					      int icv_stashing, bool encrypt)
1428 {
1429 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1430 	unsigned int authsize = crypto_aead_authsize(authenc);
1431 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1432 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1433 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1434 
1435 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1436 				   iv, areq->assoclen, cryptlen,
1437 				   authsize, ivsize, icv_stashing,
1438 				   areq->base.flags, encrypt);
1439 }
1440 
1441 static int aead_encrypt(struct aead_request *req)
1442 {
1443 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1444 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1445 	struct talitos_edesc *edesc;
1446 
1447 	/* allocate extended descriptor */
1448 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1449 	if (IS_ERR(edesc))
1450 		return PTR_ERR(edesc);
1451 
1452 	/* set encrypt */
1453 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1454 
1455 	return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1456 }
1457 
1458 static int aead_decrypt(struct aead_request *req)
1459 {
1460 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1461 	unsigned int authsize = crypto_aead_authsize(authenc);
1462 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1463 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1464 	struct talitos_edesc *edesc;
1465 	void *icvdata;
1466 
1467 	/* allocate extended descriptor */
1468 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1469 	if (IS_ERR(edesc))
1470 		return PTR_ERR(edesc);
1471 
1472 	if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1473 	    (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1474 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1475 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1476 
1477 		/* decrypt and check the ICV */
1478 		edesc->desc.hdr = ctx->desc_hdr_template |
1479 				  DESC_HDR_DIR_INBOUND |
1480 				  DESC_HDR_MODE1_MDEU_CICV;
1481 
1482 		/* reset integrity check result bits */
1483 
1484 		return ipsec_esp(edesc, req, false,
1485 				 ipsec_esp_decrypt_hwauth_done);
1486 	}
1487 
1488 	/* Have to check the ICV with software */
1489 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1490 
1491 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1492 	icvdata = edesc->buf + edesc->dma_len;
1493 
1494 	sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1495 			   req->assoclen + req->cryptlen - authsize);
1496 
1497 	return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1498 }
1499 
1500 static int skcipher_setkey(struct crypto_skcipher *cipher,
1501 			     const u8 *key, unsigned int keylen)
1502 {
1503 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1504 	struct device *dev = ctx->dev;
1505 
1506 	if (ctx->keylen)
1507 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1508 
1509 	memcpy(&ctx->key, key, keylen);
1510 	ctx->keylen = keylen;
1511 
1512 	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1513 
1514 	return 0;
1515 }
1516 
1517 static int skcipher_des_setkey(struct crypto_skcipher *cipher,
1518 				 const u8 *key, unsigned int keylen)
1519 {
1520 	return verify_skcipher_des_key(cipher, key) ?:
1521 	       skcipher_setkey(cipher, key, keylen);
1522 }
1523 
1524 static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
1525 				  const u8 *key, unsigned int keylen)
1526 {
1527 	return verify_skcipher_des3_key(cipher, key) ?:
1528 	       skcipher_setkey(cipher, key, keylen);
1529 }
1530 
1531 static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
1532 				  const u8 *key, unsigned int keylen)
1533 {
1534 	if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1535 	    keylen == AES_KEYSIZE_256)
1536 		return skcipher_setkey(cipher, key, keylen);
1537 
1538 	return -EINVAL;
1539 }
1540 
1541 static void common_nonsnoop_unmap(struct device *dev,
1542 				  struct talitos_edesc *edesc,
1543 				  struct skcipher_request *areq)
1544 {
1545 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1546 
1547 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
1548 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1549 
1550 	if (edesc->dma_len)
1551 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1552 				 DMA_BIDIRECTIONAL);
1553 }
1554 
1555 static void skcipher_done(struct device *dev,
1556 			    struct talitos_desc *desc, void *context,
1557 			    int err)
1558 {
1559 	struct skcipher_request *areq = context;
1560 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1561 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1562 	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1563 	struct talitos_edesc *edesc;
1564 
1565 	edesc = container_of(desc, struct talitos_edesc, desc);
1566 
1567 	common_nonsnoop_unmap(dev, edesc, areq);
1568 	memcpy(areq->iv, ctx->iv, ivsize);
1569 
1570 	kfree(edesc);
1571 
1572 	skcipher_request_complete(areq, err);
1573 }
1574 
1575 static int common_nonsnoop(struct talitos_edesc *edesc,
1576 			   struct skcipher_request *areq,
1577 			   void (*callback) (struct device *dev,
1578 					     struct talitos_desc *desc,
1579 					     void *context, int error))
1580 {
1581 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1582 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1583 	struct device *dev = ctx->dev;
1584 	struct talitos_desc *desc = &edesc->desc;
1585 	unsigned int cryptlen = areq->cryptlen;
1586 	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1587 	int sg_count, ret;
1588 	bool sync_needed = false;
1589 	struct talitos_private *priv = dev_get_drvdata(dev);
1590 	bool is_sec1 = has_ftr_sec1(priv);
1591 	bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
1592 		      (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
1593 
1594 	/* first DWORD empty */
1595 
1596 	/* cipher iv */
1597 	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1598 
1599 	/* cipher key */
1600 	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1601 
1602 	sg_count = edesc->src_nents ?: 1;
1603 	if (is_sec1 && sg_count > 1)
1604 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1605 				  cryptlen);
1606 	else
1607 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1608 				      (areq->src == areq->dst) ?
1609 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1610 	/*
1611 	 * cipher in
1612 	 */
1613 	sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
1614 				      sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
1615 	if (sg_count > 1)
1616 		sync_needed = true;
1617 
1618 	/* cipher out */
1619 	if (areq->src != areq->dst) {
1620 		sg_count = edesc->dst_nents ? : 1;
1621 		if (!is_sec1 || sg_count == 1)
1622 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1623 	}
1624 
1625 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1626 			     sg_count, 0, (edesc->src_nents + 1));
1627 	if (ret > 1)
1628 		sync_needed = true;
1629 
1630 	/* iv out */
1631 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1632 			       DMA_FROM_DEVICE);
1633 
1634 	/* last DWORD empty */
1635 
1636 	if (sync_needed)
1637 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1638 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1639 
1640 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1641 	if (ret != -EINPROGRESS) {
1642 		common_nonsnoop_unmap(dev, edesc, areq);
1643 		kfree(edesc);
1644 	}
1645 	return ret;
1646 }
1647 
1648 static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
1649 						    areq, bool encrypt)
1650 {
1651 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1652 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1653 	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1654 
1655 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1656 				   areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
1657 				   areq->base.flags, encrypt);
1658 }
1659 
1660 static int skcipher_encrypt(struct skcipher_request *areq)
1661 {
1662 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1663 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1664 	struct talitos_edesc *edesc;
1665 	unsigned int blocksize =
1666 			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1667 
1668 	if (!areq->cryptlen)
1669 		return 0;
1670 
1671 	if (areq->cryptlen % blocksize)
1672 		return -EINVAL;
1673 
1674 	/* allocate extended descriptor */
1675 	edesc = skcipher_edesc_alloc(areq, true);
1676 	if (IS_ERR(edesc))
1677 		return PTR_ERR(edesc);
1678 
1679 	/* set encrypt */
1680 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1681 
1682 	return common_nonsnoop(edesc, areq, skcipher_done);
1683 }
1684 
1685 static int skcipher_decrypt(struct skcipher_request *areq)
1686 {
1687 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1688 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1689 	struct talitos_edesc *edesc;
1690 	unsigned int blocksize =
1691 			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1692 
1693 	if (!areq->cryptlen)
1694 		return 0;
1695 
1696 	if (areq->cryptlen % blocksize)
1697 		return -EINVAL;
1698 
1699 	/* allocate extended descriptor */
1700 	edesc = skcipher_edesc_alloc(areq, false);
1701 	if (IS_ERR(edesc))
1702 		return PTR_ERR(edesc);
1703 
1704 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1705 
1706 	return common_nonsnoop(edesc, areq, skcipher_done);
1707 }
1708 
1709 static void common_nonsnoop_hash_unmap(struct device *dev,
1710 				       struct talitos_edesc *edesc,
1711 				       struct ahash_request *areq)
1712 {
1713 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1714 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1715 	struct talitos_private *priv = dev_get_drvdata(dev);
1716 	bool is_sec1 = has_ftr_sec1(priv);
1717 	struct talitos_desc *desc = &edesc->desc;
1718 	struct talitos_desc *desc2 = (struct talitos_desc *)
1719 				     (edesc->buf + edesc->dma_len);
1720 
1721 	unmap_single_talitos_ptr(dev, &desc->ptr[5], DMA_FROM_DEVICE);
1722 	if (desc->next_desc &&
1723 	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
1724 		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1725 	if (req_ctx->last_desc)
1726 		memcpy(areq->result, req_ctx->hw_context,
1727 		       crypto_ahash_digestsize(tfm));
1728 
1729 	if (req_ctx->psrc)
1730 		talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1731 
1732 	/* When using hashctx-in, must unmap it. */
1733 	if (from_talitos_ptr_len(&desc->ptr[1], is_sec1))
1734 		unmap_single_talitos_ptr(dev, &desc->ptr[1],
1735 					 DMA_TO_DEVICE);
1736 	else if (desc->next_desc)
1737 		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1738 					 DMA_TO_DEVICE);
1739 
1740 	if (is_sec1 && req_ctx->nbuf)
1741 		unmap_single_talitos_ptr(dev, &desc->ptr[3],
1742 					 DMA_TO_DEVICE);
1743 
1744 	if (edesc->dma_len)
1745 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1746 				 DMA_BIDIRECTIONAL);
1747 
1748 	if (desc->next_desc)
1749 		dma_unmap_single(dev, be32_to_cpu(desc->next_desc),
1750 				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1751 }
1752 
1753 static void ahash_done(struct device *dev,
1754 		       struct talitos_desc *desc, void *context,
1755 		       int err)
1756 {
1757 	struct ahash_request *areq = context;
1758 	struct talitos_edesc *edesc =
1759 		 container_of(desc, struct talitos_edesc, desc);
1760 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1761 
1762 	if (!req_ctx->last_desc && req_ctx->to_hash_later) {
1763 		/* Position any partial block for next update/final/finup */
1764 		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1765 		req_ctx->nbuf = req_ctx->to_hash_later;
1766 	}
1767 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1768 
1769 	kfree(edesc);
1770 
1771 	if (err) {
1772 		ahash_request_complete(areq, err);
1773 		return;
1774 	}
1775 
1776 	req_ctx->remaining_ahash_request_bytes -=
1777 		req_ctx->current_ahash_request_bytes;
1778 
1779 	if (!req_ctx->remaining_ahash_request_bytes) {
1780 		ahash_request_complete(areq, 0);
1781 		return;
1782 	}
1783 
1784 	schedule_work(&req_ctx->sec1_ahash_process_remaining);
1785 }
1786 
1787 /*
1788  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1789  * ourself and submit a padded block
1790  */
1791 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1792 			       struct talitos_edesc *edesc,
1793 			       struct talitos_ptr *ptr)
1794 {
1795 	static u8 padded_hash[64] = {
1796 		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1797 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1798 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1799 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1800 	};
1801 
1802 	pr_err_once("Bug in SEC1, padding ourself\n");
1803 	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1804 	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1805 			       (char *)padded_hash, DMA_TO_DEVICE);
1806 }
1807 
1808 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1809 				struct ahash_request *areq, unsigned int length,
1810 				void (*callback) (struct device *dev,
1811 						  struct talitos_desc *desc,
1812 						  void *context, int error))
1813 {
1814 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1815 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1816 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1817 	struct device *dev = ctx->dev;
1818 	struct talitos_desc *desc = &edesc->desc;
1819 	int ret;
1820 	bool sync_needed = false;
1821 	struct talitos_private *priv = dev_get_drvdata(dev);
1822 	bool is_sec1 = has_ftr_sec1(priv);
1823 	int sg_count;
1824 
1825 	/* first DWORD empty */
1826 
1827 	/* hash context in */
1828 	if (!req_ctx->first_desc || req_ctx->swinit) {
1829 		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1830 					      req_ctx->hw_context_size,
1831 					      req_ctx->hw_context,
1832 					      DMA_TO_DEVICE);
1833 		req_ctx->swinit = 0;
1834 	}
1835 	/* Indicate next op is not the first. */
1836 	req_ctx->first_desc = 0;
1837 
1838 	/* HMAC key */
1839 	if (ctx->keylen)
1840 		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1841 			       is_sec1);
1842 
1843 	if (is_sec1 && req_ctx->nbuf)
1844 		length -= req_ctx->nbuf;
1845 
1846 	sg_count = edesc->src_nents ?: 1;
1847 	if (is_sec1 && sg_count > 1)
1848 		sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1849 	else if (length)
1850 		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1851 				      DMA_TO_DEVICE);
1852 	/*
1853 	 * data in
1854 	 */
1855 	if (is_sec1 && req_ctx->nbuf) {
1856 		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1857 				       req_ctx->buf[req_ctx->buf_idx],
1858 				       DMA_TO_DEVICE);
1859 	} else {
1860 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1861 					  &desc->ptr[3], sg_count, 0, 0);
1862 		if (sg_count > 1)
1863 			sync_needed = true;
1864 	}
1865 
1866 	/* fifth DWORD empty */
1867 
1868 	/* hash/HMAC out -or- hash context out */
1869 	if (req_ctx->last_desc)
1870 		map_single_talitos_ptr(dev, &desc->ptr[5],
1871 				       crypto_ahash_digestsize(tfm),
1872 				       req_ctx->hw_context, DMA_FROM_DEVICE);
1873 	else
1874 		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1875 					      req_ctx->hw_context_size,
1876 					      req_ctx->hw_context,
1877 					      DMA_FROM_DEVICE);
1878 
1879 	/* last DWORD empty */
1880 
1881 	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1882 		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1883 
1884 	if (is_sec1 && req_ctx->nbuf && length) {
1885 		struct talitos_desc *desc2 = (struct talitos_desc *)
1886 					     (edesc->buf + edesc->dma_len);
1887 		dma_addr_t next_desc;
1888 
1889 		memset(desc2, 0, sizeof(*desc2));
1890 		desc2->hdr = desc->hdr;
1891 		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1892 		desc2->hdr1 = desc2->hdr;
1893 		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1894 		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1895 		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1896 
1897 		if (desc->ptr[1].ptr)
1898 			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1899 					 is_sec1);
1900 		else
1901 			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1902 						      req_ctx->hw_context_size,
1903 						      req_ctx->hw_context,
1904 						      DMA_TO_DEVICE);
1905 		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1906 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1907 					  &desc2->ptr[3], sg_count, 0, 0);
1908 		if (sg_count > 1)
1909 			sync_needed = true;
1910 		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1911 		if (req_ctx->last_desc)
1912 			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1913 						      req_ctx->hw_context_size,
1914 						      req_ctx->hw_context,
1915 						      DMA_FROM_DEVICE);
1916 
1917 		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1918 					   DMA_BIDIRECTIONAL);
1919 		desc->next_desc = cpu_to_be32(next_desc);
1920 	}
1921 
1922 	if (sync_needed)
1923 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1924 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1925 
1926 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1927 	if (ret != -EINPROGRESS) {
1928 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1929 		kfree(edesc);
1930 	}
1931 	return ret;
1932 }
1933 
1934 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1935 					       unsigned int nbytes)
1936 {
1937 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1938 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1939 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1940 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1941 	bool is_sec1 = has_ftr_sec1(priv);
1942 
1943 	if (is_sec1)
1944 		nbytes -= req_ctx->nbuf;
1945 
1946 	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1947 				   nbytes, 0, 0, 0, areq->base.flags, false);
1948 }
1949 
1950 static int ahash_process_req_one(struct ahash_request *areq, unsigned int nbytes)
1951 {
1952 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1953 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1954 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1955 	struct talitos_edesc *edesc;
1956 	unsigned int blocksize =
1957 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1958 	unsigned int nbytes_to_hash;
1959 	unsigned int to_hash_later;
1960 	unsigned int nsg;
1961 	int nents;
1962 	struct device *dev = ctx->dev;
1963 	struct talitos_private *priv = dev_get_drvdata(dev);
1964 	bool is_sec1 = has_ftr_sec1(priv);
1965 	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1966 
1967 	if (!req_ctx->last_desc && (nbytes + req_ctx->nbuf <= blocksize)) {
1968 		/* Buffer up to one whole block */
1969 		nents = sg_nents_for_len(req_ctx->request_sl, nbytes);
1970 		if (nents < 0) {
1971 			dev_err(dev, "Invalid number of src SG.\n");
1972 			return nents;
1973 		}
1974 		sg_copy_to_buffer(req_ctx->request_sl, nents,
1975 				  ctx_buf + req_ctx->nbuf, nbytes);
1976 		req_ctx->nbuf += nbytes;
1977 		return 0;
1978 	}
1979 
1980 	/* At least (blocksize + 1) bytes are available to hash */
1981 	nbytes_to_hash = nbytes + req_ctx->nbuf;
1982 	to_hash_later = nbytes_to_hash & (blocksize - 1);
1983 
1984 	if (req_ctx->last_desc)
1985 		to_hash_later = 0;
1986 	else if (to_hash_later)
1987 		/* There is a partial block. Hash the full block(s) now */
1988 		nbytes_to_hash -= to_hash_later;
1989 	else {
1990 		/* Keep one block buffered */
1991 		nbytes_to_hash -= blocksize;
1992 		to_hash_later = blocksize;
1993 	}
1994 
1995 	/* Chain in any previously buffered data */
1996 	if (!is_sec1 && req_ctx->nbuf) {
1997 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1998 		sg_init_table(req_ctx->bufsl, nsg);
1999 		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2000 		if (nsg > 1)
2001 			sg_chain(req_ctx->bufsl, 2, req_ctx->request_sl);
2002 		req_ctx->psrc = req_ctx->bufsl;
2003 	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2004 		int offset;
2005 
2006 		if (nbytes_to_hash > blocksize)
2007 			offset = blocksize - req_ctx->nbuf;
2008 		else
2009 			offset = nbytes_to_hash - req_ctx->nbuf;
2010 		nents = sg_nents_for_len(req_ctx->request_sl, offset);
2011 		if (nents < 0) {
2012 			dev_err(dev, "Invalid number of src SG.\n");
2013 			return nents;
2014 		}
2015 		sg_copy_to_buffer(req_ctx->request_sl, nents,
2016 				  ctx_buf + req_ctx->nbuf, offset);
2017 		req_ctx->nbuf += offset;
2018 		req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, req_ctx->request_sl,
2019 						 offset);
2020 	} else
2021 		req_ctx->psrc = req_ctx->request_sl;
2022 
2023 	if (to_hash_later) {
2024 		nents = sg_nents_for_len(req_ctx->request_sl, nbytes);
2025 		if (nents < 0) {
2026 			dev_err(dev, "Invalid number of src SG.\n");
2027 			return nents;
2028 		}
2029 		sg_pcopy_to_buffer(req_ctx->request_sl, nents,
2030 				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2031 				      to_hash_later,
2032 				      nbytes - to_hash_later);
2033 	}
2034 	req_ctx->to_hash_later = to_hash_later;
2035 
2036 	/* Allocate extended descriptor */
2037 	edesc = ahash_edesc_alloc(req_ctx->areq, nbytes_to_hash);
2038 	if (IS_ERR(edesc))
2039 		return PTR_ERR(edesc);
2040 
2041 	edesc->desc.hdr = ctx->desc_hdr_template;
2042 
2043 	/* On last one, request SEC to pad; otherwise continue */
2044 	if (req_ctx->last_desc)
2045 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2046 	else
2047 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2048 
2049 	/* request SEC to INIT hash. */
2050 	if (req_ctx->first_desc && !req_ctx->swinit)
2051 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2052 
2053 	/* When the tfm context has a keylen, it's an HMAC.
2054 	 * A first or last (ie. not middle) descriptor must request HMAC.
2055 	 */
2056 	if (ctx->keylen && (req_ctx->first_desc || req_ctx->last_desc))
2057 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2058 
2059 	return common_nonsnoop_hash(edesc, req_ctx->areq, nbytes_to_hash, ahash_done);
2060 }
2061 
2062 static void sec1_ahash_process_remaining(struct work_struct *work)
2063 {
2064 	struct talitos_ahash_req_ctx *req_ctx =
2065 		container_of(work, struct talitos_ahash_req_ctx,
2066 			     sec1_ahash_process_remaining);
2067 	int err = 0;
2068 
2069 	req_ctx->request_sl = scatterwalk_ffwd(req_ctx->request_bufsl,
2070 					       req_ctx->request_sl, TALITOS1_MAX_DATA_LEN);
2071 
2072 	if (req_ctx->remaining_ahash_request_bytes > TALITOS1_MAX_DATA_LEN)
2073 		req_ctx->current_ahash_request_bytes = TALITOS1_MAX_DATA_LEN;
2074 	else {
2075 		req_ctx->current_ahash_request_bytes =
2076 			req_ctx->remaining_ahash_request_bytes;
2077 
2078 		if (req_ctx->last_request)
2079 			req_ctx->last_desc = 1;
2080 	}
2081 
2082 	err = ahash_process_req_one(req_ctx->areq,
2083 				    req_ctx->current_ahash_request_bytes);
2084 
2085 	if (err != -EINPROGRESS)
2086 		ahash_request_complete(req_ctx->areq, err);
2087 }
2088 
2089 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2090 {
2091 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2092 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2093 	struct device *dev = ctx->dev;
2094 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2095 	struct talitos_private *priv = dev_get_drvdata(dev);
2096 	bool is_sec1 = has_ftr_sec1(priv);
2097 
2098 	req_ctx->areq = areq;
2099 	req_ctx->request_sl = areq->src;
2100 	req_ctx->remaining_ahash_request_bytes = nbytes;
2101 
2102 	if (is_sec1) {
2103 		if (nbytes > TALITOS1_MAX_DATA_LEN)
2104 			nbytes = TALITOS1_MAX_DATA_LEN;
2105 		else if (req_ctx->last_request)
2106 			req_ctx->last_desc = 1;
2107 	}
2108 
2109 	req_ctx->current_ahash_request_bytes = nbytes;
2110 
2111 	return ahash_process_req_one(req_ctx->areq,
2112 				     req_ctx->current_ahash_request_bytes);
2113 }
2114 
2115 static int ahash_init(struct ahash_request *areq)
2116 {
2117 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2118 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2119 	struct device *dev = ctx->dev;
2120 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2121 	unsigned int size;
2122 	dma_addr_t dma;
2123 
2124 	/* Initialize the context */
2125 	req_ctx->buf_idx = 0;
2126 	req_ctx->nbuf = 0;
2127 	req_ctx->first_desc = 1; /* first_desc indicates h/w must init its context */
2128 	req_ctx->swinit = 0; /* assume h/w init of context */
2129 	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2130 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2131 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2132 	req_ctx->hw_context_size = size;
2133 	req_ctx->last_request = 0;
2134 	req_ctx->last_desc = 0;
2135 	INIT_WORK(&req_ctx->sec1_ahash_process_remaining, sec1_ahash_process_remaining);
2136 
2137 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2138 			     DMA_TO_DEVICE);
2139 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2140 
2141 	return 0;
2142 }
2143 
2144 /*
2145  * on h/w without explicit sha224 support, we initialize h/w context
2146  * manually with sha224 constants, and tell it to run sha256.
2147  */
2148 static int ahash_init_sha224_swinit(struct ahash_request *areq)
2149 {
2150 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2151 
2152 	req_ctx->hw_context[0] = SHA224_H0;
2153 	req_ctx->hw_context[1] = SHA224_H1;
2154 	req_ctx->hw_context[2] = SHA224_H2;
2155 	req_ctx->hw_context[3] = SHA224_H3;
2156 	req_ctx->hw_context[4] = SHA224_H4;
2157 	req_ctx->hw_context[5] = SHA224_H5;
2158 	req_ctx->hw_context[6] = SHA224_H6;
2159 	req_ctx->hw_context[7] = SHA224_H7;
2160 
2161 	/* init 64-bit count */
2162 	req_ctx->hw_context[8] = 0;
2163 	req_ctx->hw_context[9] = 0;
2164 
2165 	ahash_init(areq);
2166 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
2167 
2168 	return 0;
2169 }
2170 
2171 static int ahash_update(struct ahash_request *areq)
2172 {
2173 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2174 
2175 	req_ctx->last_request = 0;
2176 
2177 	return ahash_process_req(areq, areq->nbytes);
2178 }
2179 
2180 static int ahash_final(struct ahash_request *areq)
2181 {
2182 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2183 
2184 	req_ctx->last_request = 1;
2185 
2186 	return ahash_process_req(areq, 0);
2187 }
2188 
2189 static int ahash_finup(struct ahash_request *areq)
2190 {
2191 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2192 
2193 	req_ctx->last_request = 1;
2194 
2195 	return ahash_process_req(areq, areq->nbytes);
2196 }
2197 
2198 static int ahash_digest(struct ahash_request *areq)
2199 {
2200 	ahash_init(areq);
2201 	return ahash_finup(areq);
2202 }
2203 
2204 static int ahash_digest_sha224_swinit(struct ahash_request *areq)
2205 {
2206 	ahash_init_sha224_swinit(areq);
2207 	return ahash_finup(areq);
2208 }
2209 
2210 static int ahash_export(struct ahash_request *areq, void *out)
2211 {
2212 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2213 	struct talitos_export_state *export = out;
2214 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2215 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2216 	struct device *dev = ctx->dev;
2217 	dma_addr_t dma;
2218 
2219 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2220 			     DMA_FROM_DEVICE);
2221 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2222 
2223 	memcpy(export->hw_context, req_ctx->hw_context,
2224 	       req_ctx->hw_context_size);
2225 	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2226 	export->swinit = req_ctx->swinit;
2227 	export->first_desc = req_ctx->first_desc;
2228 	export->last_desc = req_ctx->last_desc;
2229 	export->to_hash_later = req_ctx->to_hash_later;
2230 	export->nbuf = req_ctx->nbuf;
2231 
2232 	return 0;
2233 }
2234 
2235 static int ahash_import(struct ahash_request *areq, const void *in)
2236 {
2237 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2238 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2239 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2240 	struct device *dev = ctx->dev;
2241 	const struct talitos_export_state *export = in;
2242 	unsigned int size;
2243 	dma_addr_t dma;
2244 
2245 	memset(req_ctx, 0, sizeof(*req_ctx));
2246 	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2247 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2248 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2249 	req_ctx->hw_context_size = size;
2250 	memcpy(req_ctx->hw_context, export->hw_context, size);
2251 	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2252 	req_ctx->swinit = export->swinit;
2253 	req_ctx->first_desc = export->first_desc;
2254 	req_ctx->last_desc = export->last_desc;
2255 	req_ctx->to_hash_later = export->to_hash_later;
2256 	req_ctx->nbuf = export->nbuf;
2257 
2258 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2259 			     DMA_TO_DEVICE);
2260 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2261 
2262 	return 0;
2263 }
2264 
2265 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2266 		   u8 *hash)
2267 {
2268 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2269 
2270 	struct scatterlist sg[1];
2271 	struct ahash_request *req;
2272 	struct crypto_wait wait;
2273 	int ret;
2274 
2275 	crypto_init_wait(&wait);
2276 
2277 	req = ahash_request_alloc(tfm, GFP_KERNEL);
2278 	if (!req)
2279 		return -ENOMEM;
2280 
2281 	/* Keep tfm keylen == 0 during hash of the long key */
2282 	ctx->keylen = 0;
2283 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2284 				   crypto_req_done, &wait);
2285 
2286 	sg_init_one(&sg[0], key, keylen);
2287 
2288 	ahash_request_set_crypt(req, sg, hash, keylen);
2289 	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2290 
2291 	ahash_request_free(req);
2292 
2293 	return ret;
2294 }
2295 
2296 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2297 			unsigned int keylen)
2298 {
2299 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2300 	struct device *dev = ctx->dev;
2301 	unsigned int blocksize =
2302 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2303 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2304 	unsigned int keysize = keylen;
2305 	u8 hash[SHA512_DIGEST_SIZE];
2306 	int ret;
2307 
2308 	if (keylen <= blocksize)
2309 		memcpy(ctx->key, key, keysize);
2310 	else {
2311 		/* Must get the hash of the long key */
2312 		ret = keyhash(tfm, key, keylen, hash);
2313 
2314 		if (ret)
2315 			return -EINVAL;
2316 
2317 		keysize = digestsize;
2318 		memcpy(ctx->key, hash, digestsize);
2319 	}
2320 
2321 	if (ctx->keylen)
2322 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2323 
2324 	ctx->keylen = keysize;
2325 	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2326 
2327 	return 0;
2328 }
2329 
2330 
2331 struct talitos_alg_template {
2332 	u32 type;
2333 	u32 priority;
2334 	union {
2335 		struct skcipher_alg skcipher;
2336 		struct ahash_alg hash;
2337 		struct aead_alg aead;
2338 	} alg;
2339 	__be32 desc_hdr_template;
2340 };
2341 
2342 static struct talitos_alg_template driver_algs[] = {
2343 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2344 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2345 		.alg.aead = {
2346 			.base = {
2347 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2348 				.cra_driver_name = "authenc-hmac-sha1-"
2349 						   "cbc-aes-talitos",
2350 				.cra_blocksize = AES_BLOCK_SIZE,
2351 				.cra_flags = CRYPTO_ALG_ASYNC |
2352 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2353 			},
2354 			.ivsize = AES_BLOCK_SIZE,
2355 			.maxauthsize = SHA1_DIGEST_SIZE,
2356 		},
2357 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2358 			             DESC_HDR_SEL0_AESU |
2359 		                     DESC_HDR_MODE0_AESU_CBC |
2360 		                     DESC_HDR_SEL1_MDEUA |
2361 		                     DESC_HDR_MODE1_MDEU_INIT |
2362 		                     DESC_HDR_MODE1_MDEU_PAD |
2363 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2364 	},
2365 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2366 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2367 		.alg.aead = {
2368 			.base = {
2369 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2370 				.cra_driver_name = "authenc-hmac-sha1-"
2371 						   "cbc-aes-talitos-hsna",
2372 				.cra_blocksize = AES_BLOCK_SIZE,
2373 				.cra_flags = CRYPTO_ALG_ASYNC |
2374 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2375 			},
2376 			.ivsize = AES_BLOCK_SIZE,
2377 			.maxauthsize = SHA1_DIGEST_SIZE,
2378 		},
2379 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2380 				     DESC_HDR_SEL0_AESU |
2381 				     DESC_HDR_MODE0_AESU_CBC |
2382 				     DESC_HDR_SEL1_MDEUA |
2383 				     DESC_HDR_MODE1_MDEU_INIT |
2384 				     DESC_HDR_MODE1_MDEU_PAD |
2385 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2386 	},
2387 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2388 		.alg.aead = {
2389 			.base = {
2390 				.cra_name = "authenc(hmac(sha1),"
2391 					    "cbc(des3_ede))",
2392 				.cra_driver_name = "authenc-hmac-sha1-"
2393 						   "cbc-3des-talitos",
2394 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2395 				.cra_flags = CRYPTO_ALG_ASYNC |
2396 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2397 			},
2398 			.ivsize = DES3_EDE_BLOCK_SIZE,
2399 			.maxauthsize = SHA1_DIGEST_SIZE,
2400 			.setkey = aead_des3_setkey,
2401 		},
2402 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2403 			             DESC_HDR_SEL0_DEU |
2404 		                     DESC_HDR_MODE0_DEU_CBC |
2405 		                     DESC_HDR_MODE0_DEU_3DES |
2406 		                     DESC_HDR_SEL1_MDEUA |
2407 		                     DESC_HDR_MODE1_MDEU_INIT |
2408 		                     DESC_HDR_MODE1_MDEU_PAD |
2409 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2410 	},
2411 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2412 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2413 		.alg.aead = {
2414 			.base = {
2415 				.cra_name = "authenc(hmac(sha1),"
2416 					    "cbc(des3_ede))",
2417 				.cra_driver_name = "authenc-hmac-sha1-"
2418 						   "cbc-3des-talitos-hsna",
2419 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2420 				.cra_flags = CRYPTO_ALG_ASYNC |
2421 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2422 			},
2423 			.ivsize = DES3_EDE_BLOCK_SIZE,
2424 			.maxauthsize = SHA1_DIGEST_SIZE,
2425 			.setkey = aead_des3_setkey,
2426 		},
2427 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2428 				     DESC_HDR_SEL0_DEU |
2429 				     DESC_HDR_MODE0_DEU_CBC |
2430 				     DESC_HDR_MODE0_DEU_3DES |
2431 				     DESC_HDR_SEL1_MDEUA |
2432 				     DESC_HDR_MODE1_MDEU_INIT |
2433 				     DESC_HDR_MODE1_MDEU_PAD |
2434 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2435 	},
2436 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2437 		.alg.aead = {
2438 			.base = {
2439 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2440 				.cra_driver_name = "authenc-hmac-sha224-"
2441 						   "cbc-aes-talitos",
2442 				.cra_blocksize = AES_BLOCK_SIZE,
2443 				.cra_flags = CRYPTO_ALG_ASYNC |
2444 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2445 			},
2446 			.ivsize = AES_BLOCK_SIZE,
2447 			.maxauthsize = SHA224_DIGEST_SIZE,
2448 		},
2449 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2450 				     DESC_HDR_SEL0_AESU |
2451 				     DESC_HDR_MODE0_AESU_CBC |
2452 				     DESC_HDR_SEL1_MDEUA |
2453 				     DESC_HDR_MODE1_MDEU_INIT |
2454 				     DESC_HDR_MODE1_MDEU_PAD |
2455 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2456 	},
2457 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2458 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2459 		.alg.aead = {
2460 			.base = {
2461 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2462 				.cra_driver_name = "authenc-hmac-sha224-"
2463 						   "cbc-aes-talitos-hsna",
2464 				.cra_blocksize = AES_BLOCK_SIZE,
2465 				.cra_flags = CRYPTO_ALG_ASYNC |
2466 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2467 			},
2468 			.ivsize = AES_BLOCK_SIZE,
2469 			.maxauthsize = SHA224_DIGEST_SIZE,
2470 		},
2471 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2472 				     DESC_HDR_SEL0_AESU |
2473 				     DESC_HDR_MODE0_AESU_CBC |
2474 				     DESC_HDR_SEL1_MDEUA |
2475 				     DESC_HDR_MODE1_MDEU_INIT |
2476 				     DESC_HDR_MODE1_MDEU_PAD |
2477 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2478 	},
2479 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2480 		.alg.aead = {
2481 			.base = {
2482 				.cra_name = "authenc(hmac(sha224),"
2483 					    "cbc(des3_ede))",
2484 				.cra_driver_name = "authenc-hmac-sha224-"
2485 						   "cbc-3des-talitos",
2486 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2487 				.cra_flags = CRYPTO_ALG_ASYNC |
2488 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2489 			},
2490 			.ivsize = DES3_EDE_BLOCK_SIZE,
2491 			.maxauthsize = SHA224_DIGEST_SIZE,
2492 			.setkey = aead_des3_setkey,
2493 		},
2494 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2495 			             DESC_HDR_SEL0_DEU |
2496 		                     DESC_HDR_MODE0_DEU_CBC |
2497 		                     DESC_HDR_MODE0_DEU_3DES |
2498 		                     DESC_HDR_SEL1_MDEUA |
2499 		                     DESC_HDR_MODE1_MDEU_INIT |
2500 		                     DESC_HDR_MODE1_MDEU_PAD |
2501 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2502 	},
2503 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2504 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2505 		.alg.aead = {
2506 			.base = {
2507 				.cra_name = "authenc(hmac(sha224),"
2508 					    "cbc(des3_ede))",
2509 				.cra_driver_name = "authenc-hmac-sha224-"
2510 						   "cbc-3des-talitos-hsna",
2511 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2512 				.cra_flags = CRYPTO_ALG_ASYNC |
2513 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2514 			},
2515 			.ivsize = DES3_EDE_BLOCK_SIZE,
2516 			.maxauthsize = SHA224_DIGEST_SIZE,
2517 			.setkey = aead_des3_setkey,
2518 		},
2519 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2520 				     DESC_HDR_SEL0_DEU |
2521 				     DESC_HDR_MODE0_DEU_CBC |
2522 				     DESC_HDR_MODE0_DEU_3DES |
2523 				     DESC_HDR_SEL1_MDEUA |
2524 				     DESC_HDR_MODE1_MDEU_INIT |
2525 				     DESC_HDR_MODE1_MDEU_PAD |
2526 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2527 	},
2528 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2529 		.alg.aead = {
2530 			.base = {
2531 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2532 				.cra_driver_name = "authenc-hmac-sha256-"
2533 						   "cbc-aes-talitos",
2534 				.cra_blocksize = AES_BLOCK_SIZE,
2535 				.cra_flags = CRYPTO_ALG_ASYNC |
2536 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2537 			},
2538 			.ivsize = AES_BLOCK_SIZE,
2539 			.maxauthsize = SHA256_DIGEST_SIZE,
2540 		},
2541 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2542 			             DESC_HDR_SEL0_AESU |
2543 		                     DESC_HDR_MODE0_AESU_CBC |
2544 		                     DESC_HDR_SEL1_MDEUA |
2545 		                     DESC_HDR_MODE1_MDEU_INIT |
2546 		                     DESC_HDR_MODE1_MDEU_PAD |
2547 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2548 	},
2549 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2550 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2551 		.alg.aead = {
2552 			.base = {
2553 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2554 				.cra_driver_name = "authenc-hmac-sha256-"
2555 						   "cbc-aes-talitos-hsna",
2556 				.cra_blocksize = AES_BLOCK_SIZE,
2557 				.cra_flags = CRYPTO_ALG_ASYNC |
2558 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2559 			},
2560 			.ivsize = AES_BLOCK_SIZE,
2561 			.maxauthsize = SHA256_DIGEST_SIZE,
2562 		},
2563 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2564 				     DESC_HDR_SEL0_AESU |
2565 				     DESC_HDR_MODE0_AESU_CBC |
2566 				     DESC_HDR_SEL1_MDEUA |
2567 				     DESC_HDR_MODE1_MDEU_INIT |
2568 				     DESC_HDR_MODE1_MDEU_PAD |
2569 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2570 	},
2571 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2572 		.alg.aead = {
2573 			.base = {
2574 				.cra_name = "authenc(hmac(sha256),"
2575 					    "cbc(des3_ede))",
2576 				.cra_driver_name = "authenc-hmac-sha256-"
2577 						   "cbc-3des-talitos",
2578 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2579 				.cra_flags = CRYPTO_ALG_ASYNC |
2580 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2581 			},
2582 			.ivsize = DES3_EDE_BLOCK_SIZE,
2583 			.maxauthsize = SHA256_DIGEST_SIZE,
2584 			.setkey = aead_des3_setkey,
2585 		},
2586 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2587 			             DESC_HDR_SEL0_DEU |
2588 		                     DESC_HDR_MODE0_DEU_CBC |
2589 		                     DESC_HDR_MODE0_DEU_3DES |
2590 		                     DESC_HDR_SEL1_MDEUA |
2591 		                     DESC_HDR_MODE1_MDEU_INIT |
2592 		                     DESC_HDR_MODE1_MDEU_PAD |
2593 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2594 	},
2595 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2596 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2597 		.alg.aead = {
2598 			.base = {
2599 				.cra_name = "authenc(hmac(sha256),"
2600 					    "cbc(des3_ede))",
2601 				.cra_driver_name = "authenc-hmac-sha256-"
2602 						   "cbc-3des-talitos-hsna",
2603 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2604 				.cra_flags = CRYPTO_ALG_ASYNC |
2605 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2606 			},
2607 			.ivsize = DES3_EDE_BLOCK_SIZE,
2608 			.maxauthsize = SHA256_DIGEST_SIZE,
2609 			.setkey = aead_des3_setkey,
2610 		},
2611 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2612 				     DESC_HDR_SEL0_DEU |
2613 				     DESC_HDR_MODE0_DEU_CBC |
2614 				     DESC_HDR_MODE0_DEU_3DES |
2615 				     DESC_HDR_SEL1_MDEUA |
2616 				     DESC_HDR_MODE1_MDEU_INIT |
2617 				     DESC_HDR_MODE1_MDEU_PAD |
2618 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2619 	},
2620 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2621 		.alg.aead = {
2622 			.base = {
2623 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2624 				.cra_driver_name = "authenc-hmac-sha384-"
2625 						   "cbc-aes-talitos",
2626 				.cra_blocksize = AES_BLOCK_SIZE,
2627 				.cra_flags = CRYPTO_ALG_ASYNC |
2628 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2629 			},
2630 			.ivsize = AES_BLOCK_SIZE,
2631 			.maxauthsize = SHA384_DIGEST_SIZE,
2632 		},
2633 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2634 			             DESC_HDR_SEL0_AESU |
2635 		                     DESC_HDR_MODE0_AESU_CBC |
2636 		                     DESC_HDR_SEL1_MDEUB |
2637 		                     DESC_HDR_MODE1_MDEU_INIT |
2638 		                     DESC_HDR_MODE1_MDEU_PAD |
2639 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2640 	},
2641 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2642 		.alg.aead = {
2643 			.base = {
2644 				.cra_name = "authenc(hmac(sha384),"
2645 					    "cbc(des3_ede))",
2646 				.cra_driver_name = "authenc-hmac-sha384-"
2647 						   "cbc-3des-talitos",
2648 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2649 				.cra_flags = CRYPTO_ALG_ASYNC |
2650 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2651 			},
2652 			.ivsize = DES3_EDE_BLOCK_SIZE,
2653 			.maxauthsize = SHA384_DIGEST_SIZE,
2654 			.setkey = aead_des3_setkey,
2655 		},
2656 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2657 			             DESC_HDR_SEL0_DEU |
2658 		                     DESC_HDR_MODE0_DEU_CBC |
2659 		                     DESC_HDR_MODE0_DEU_3DES |
2660 		                     DESC_HDR_SEL1_MDEUB |
2661 		                     DESC_HDR_MODE1_MDEU_INIT |
2662 		                     DESC_HDR_MODE1_MDEU_PAD |
2663 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2664 	},
2665 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2666 		.alg.aead = {
2667 			.base = {
2668 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2669 				.cra_driver_name = "authenc-hmac-sha512-"
2670 						   "cbc-aes-talitos",
2671 				.cra_blocksize = AES_BLOCK_SIZE,
2672 				.cra_flags = CRYPTO_ALG_ASYNC |
2673 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2674 			},
2675 			.ivsize = AES_BLOCK_SIZE,
2676 			.maxauthsize = SHA512_DIGEST_SIZE,
2677 		},
2678 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2679 			             DESC_HDR_SEL0_AESU |
2680 		                     DESC_HDR_MODE0_AESU_CBC |
2681 		                     DESC_HDR_SEL1_MDEUB |
2682 		                     DESC_HDR_MODE1_MDEU_INIT |
2683 		                     DESC_HDR_MODE1_MDEU_PAD |
2684 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2685 	},
2686 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2687 		.alg.aead = {
2688 			.base = {
2689 				.cra_name = "authenc(hmac(sha512),"
2690 					    "cbc(des3_ede))",
2691 				.cra_driver_name = "authenc-hmac-sha512-"
2692 						   "cbc-3des-talitos",
2693 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2694 				.cra_flags = CRYPTO_ALG_ASYNC |
2695 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2696 			},
2697 			.ivsize = DES3_EDE_BLOCK_SIZE,
2698 			.maxauthsize = SHA512_DIGEST_SIZE,
2699 			.setkey = aead_des3_setkey,
2700 		},
2701 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2702 			             DESC_HDR_SEL0_DEU |
2703 		                     DESC_HDR_MODE0_DEU_CBC |
2704 		                     DESC_HDR_MODE0_DEU_3DES |
2705 		                     DESC_HDR_SEL1_MDEUB |
2706 		                     DESC_HDR_MODE1_MDEU_INIT |
2707 		                     DESC_HDR_MODE1_MDEU_PAD |
2708 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2709 	},
2710 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2711 		.alg.aead = {
2712 			.base = {
2713 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2714 				.cra_driver_name = "authenc-hmac-md5-"
2715 						   "cbc-aes-talitos",
2716 				.cra_blocksize = AES_BLOCK_SIZE,
2717 				.cra_flags = CRYPTO_ALG_ASYNC |
2718 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2719 			},
2720 			.ivsize = AES_BLOCK_SIZE,
2721 			.maxauthsize = MD5_DIGEST_SIZE,
2722 		},
2723 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2724 			             DESC_HDR_SEL0_AESU |
2725 		                     DESC_HDR_MODE0_AESU_CBC |
2726 		                     DESC_HDR_SEL1_MDEUA |
2727 		                     DESC_HDR_MODE1_MDEU_INIT |
2728 		                     DESC_HDR_MODE1_MDEU_PAD |
2729 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2730 	},
2731 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2732 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2733 		.alg.aead = {
2734 			.base = {
2735 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2736 				.cra_driver_name = "authenc-hmac-md5-"
2737 						   "cbc-aes-talitos-hsna",
2738 				.cra_blocksize = AES_BLOCK_SIZE,
2739 				.cra_flags = CRYPTO_ALG_ASYNC |
2740 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2741 			},
2742 			.ivsize = AES_BLOCK_SIZE,
2743 			.maxauthsize = MD5_DIGEST_SIZE,
2744 		},
2745 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2746 				     DESC_HDR_SEL0_AESU |
2747 				     DESC_HDR_MODE0_AESU_CBC |
2748 				     DESC_HDR_SEL1_MDEUA |
2749 				     DESC_HDR_MODE1_MDEU_INIT |
2750 				     DESC_HDR_MODE1_MDEU_PAD |
2751 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2752 	},
2753 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2754 		.alg.aead = {
2755 			.base = {
2756 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2757 				.cra_driver_name = "authenc-hmac-md5-"
2758 						   "cbc-3des-talitos",
2759 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2760 				.cra_flags = CRYPTO_ALG_ASYNC |
2761 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2762 			},
2763 			.ivsize = DES3_EDE_BLOCK_SIZE,
2764 			.maxauthsize = MD5_DIGEST_SIZE,
2765 			.setkey = aead_des3_setkey,
2766 		},
2767 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2768 			             DESC_HDR_SEL0_DEU |
2769 		                     DESC_HDR_MODE0_DEU_CBC |
2770 		                     DESC_HDR_MODE0_DEU_3DES |
2771 		                     DESC_HDR_SEL1_MDEUA |
2772 		                     DESC_HDR_MODE1_MDEU_INIT |
2773 		                     DESC_HDR_MODE1_MDEU_PAD |
2774 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2775 	},
2776 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2777 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2778 		.alg.aead = {
2779 			.base = {
2780 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2781 				.cra_driver_name = "authenc-hmac-md5-"
2782 						   "cbc-3des-talitos-hsna",
2783 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2784 				.cra_flags = CRYPTO_ALG_ASYNC |
2785 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2786 			},
2787 			.ivsize = DES3_EDE_BLOCK_SIZE,
2788 			.maxauthsize = MD5_DIGEST_SIZE,
2789 			.setkey = aead_des3_setkey,
2790 		},
2791 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2792 				     DESC_HDR_SEL0_DEU |
2793 				     DESC_HDR_MODE0_DEU_CBC |
2794 				     DESC_HDR_MODE0_DEU_3DES |
2795 				     DESC_HDR_SEL1_MDEUA |
2796 				     DESC_HDR_MODE1_MDEU_INIT |
2797 				     DESC_HDR_MODE1_MDEU_PAD |
2798 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2799 	},
2800 	/* SKCIPHER algorithms. */
2801 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2802 		.alg.skcipher = {
2803 			.base.cra_name = "ecb(aes)",
2804 			.base.cra_driver_name = "ecb-aes-talitos",
2805 			.base.cra_blocksize = AES_BLOCK_SIZE,
2806 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2807 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2808 			.min_keysize = AES_MIN_KEY_SIZE,
2809 			.max_keysize = AES_MAX_KEY_SIZE,
2810 			.setkey = skcipher_aes_setkey,
2811 		},
2812 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2813 				     DESC_HDR_SEL0_AESU,
2814 	},
2815 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2816 		.alg.skcipher = {
2817 			.base.cra_name = "cbc(aes)",
2818 			.base.cra_driver_name = "cbc-aes-talitos",
2819 			.base.cra_blocksize = AES_BLOCK_SIZE,
2820 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2821 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2822 			.min_keysize = AES_MIN_KEY_SIZE,
2823 			.max_keysize = AES_MAX_KEY_SIZE,
2824 			.ivsize = AES_BLOCK_SIZE,
2825 			.setkey = skcipher_aes_setkey,
2826 		},
2827 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2828 				     DESC_HDR_SEL0_AESU |
2829 				     DESC_HDR_MODE0_AESU_CBC,
2830 	},
2831 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2832 		.alg.skcipher = {
2833 			.base.cra_name = "ctr(aes)",
2834 			.base.cra_driver_name = "ctr-aes-talitos",
2835 			.base.cra_blocksize = 1,
2836 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2837 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2838 			.min_keysize = AES_MIN_KEY_SIZE,
2839 			.max_keysize = AES_MAX_KEY_SIZE,
2840 			.ivsize = AES_BLOCK_SIZE,
2841 			.setkey = skcipher_aes_setkey,
2842 		},
2843 		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2844 				     DESC_HDR_SEL0_AESU |
2845 				     DESC_HDR_MODE0_AESU_CTR,
2846 	},
2847 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2848 		.alg.skcipher = {
2849 			.base.cra_name = "ctr(aes)",
2850 			.base.cra_driver_name = "ctr-aes-talitos",
2851 			.base.cra_blocksize = 1,
2852 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2853 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2854 			.min_keysize = AES_MIN_KEY_SIZE,
2855 			.max_keysize = AES_MAX_KEY_SIZE,
2856 			.ivsize = AES_BLOCK_SIZE,
2857 			.setkey = skcipher_aes_setkey,
2858 		},
2859 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2860 				     DESC_HDR_SEL0_AESU |
2861 				     DESC_HDR_MODE0_AESU_CTR,
2862 	},
2863 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2864 		.alg.skcipher = {
2865 			.base.cra_name = "ecb(des)",
2866 			.base.cra_driver_name = "ecb-des-talitos",
2867 			.base.cra_blocksize = DES_BLOCK_SIZE,
2868 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2869 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2870 			.min_keysize = DES_KEY_SIZE,
2871 			.max_keysize = DES_KEY_SIZE,
2872 			.setkey = skcipher_des_setkey,
2873 		},
2874 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2875 				     DESC_HDR_SEL0_DEU,
2876 	},
2877 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2878 		.alg.skcipher = {
2879 			.base.cra_name = "cbc(des)",
2880 			.base.cra_driver_name = "cbc-des-talitos",
2881 			.base.cra_blocksize = DES_BLOCK_SIZE,
2882 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2883 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2884 			.min_keysize = DES_KEY_SIZE,
2885 			.max_keysize = DES_KEY_SIZE,
2886 			.ivsize = DES_BLOCK_SIZE,
2887 			.setkey = skcipher_des_setkey,
2888 		},
2889 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2890 				     DESC_HDR_SEL0_DEU |
2891 				     DESC_HDR_MODE0_DEU_CBC,
2892 	},
2893 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2894 		.alg.skcipher = {
2895 			.base.cra_name = "ecb(des3_ede)",
2896 			.base.cra_driver_name = "ecb-3des-talitos",
2897 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2898 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2899 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2900 			.min_keysize = DES3_EDE_KEY_SIZE,
2901 			.max_keysize = DES3_EDE_KEY_SIZE,
2902 			.setkey = skcipher_des3_setkey,
2903 		},
2904 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2905 				     DESC_HDR_SEL0_DEU |
2906 				     DESC_HDR_MODE0_DEU_3DES,
2907 	},
2908 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2909 		.alg.skcipher = {
2910 			.base.cra_name = "cbc(des3_ede)",
2911 			.base.cra_driver_name = "cbc-3des-talitos",
2912 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2913 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2914 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2915 			.min_keysize = DES3_EDE_KEY_SIZE,
2916 			.max_keysize = DES3_EDE_KEY_SIZE,
2917 			.ivsize = DES3_EDE_BLOCK_SIZE,
2918 			.setkey = skcipher_des3_setkey,
2919 		},
2920 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2921 			             DESC_HDR_SEL0_DEU |
2922 		                     DESC_HDR_MODE0_DEU_CBC |
2923 		                     DESC_HDR_MODE0_DEU_3DES,
2924 	},
2925 	/* AHASH algorithms. */
2926 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2927 		.alg.hash = {
2928 			.halg.digestsize = MD5_DIGEST_SIZE,
2929 			.halg.statesize = sizeof(struct talitos_export_state),
2930 			.halg.base = {
2931 				.cra_name = "md5",
2932 				.cra_driver_name = "md5-talitos",
2933 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2934 				.cra_flags = CRYPTO_ALG_ASYNC |
2935 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2936 			}
2937 		},
2938 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2939 				     DESC_HDR_SEL0_MDEUA |
2940 				     DESC_HDR_MODE0_MDEU_MD5,
2941 	},
2942 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2943 		.alg.hash = {
2944 			.halg.digestsize = SHA1_DIGEST_SIZE,
2945 			.halg.statesize = sizeof(struct talitos_export_state),
2946 			.halg.base = {
2947 				.cra_name = "sha1",
2948 				.cra_driver_name = "sha1-talitos",
2949 				.cra_blocksize = SHA1_BLOCK_SIZE,
2950 				.cra_flags = CRYPTO_ALG_ASYNC |
2951 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2952 			}
2953 		},
2954 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2955 				     DESC_HDR_SEL0_MDEUA |
2956 				     DESC_HDR_MODE0_MDEU_SHA1,
2957 	},
2958 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2959 		.alg.hash = {
2960 			.halg.digestsize = SHA224_DIGEST_SIZE,
2961 			.halg.statesize = sizeof(struct talitos_export_state),
2962 			.halg.base = {
2963 				.cra_name = "sha224",
2964 				.cra_driver_name = "sha224-talitos",
2965 				.cra_blocksize = SHA224_BLOCK_SIZE,
2966 				.cra_flags = CRYPTO_ALG_ASYNC |
2967 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2968 			}
2969 		},
2970 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2971 				     DESC_HDR_SEL0_MDEUA |
2972 				     DESC_HDR_MODE0_MDEU_SHA224,
2973 	},
2974 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2975 		.alg.hash = {
2976 			.halg.digestsize = SHA256_DIGEST_SIZE,
2977 			.halg.statesize = sizeof(struct talitos_export_state),
2978 			.halg.base = {
2979 				.cra_name = "sha256",
2980 				.cra_driver_name = "sha256-talitos",
2981 				.cra_blocksize = SHA256_BLOCK_SIZE,
2982 				.cra_flags = CRYPTO_ALG_ASYNC |
2983 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2984 			}
2985 		},
2986 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2987 				     DESC_HDR_SEL0_MDEUA |
2988 				     DESC_HDR_MODE0_MDEU_SHA256,
2989 	},
2990 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2991 		.alg.hash = {
2992 			.halg.digestsize = SHA384_DIGEST_SIZE,
2993 			.halg.statesize = sizeof(struct talitos_export_state),
2994 			.halg.base = {
2995 				.cra_name = "sha384",
2996 				.cra_driver_name = "sha384-talitos",
2997 				.cra_blocksize = SHA384_BLOCK_SIZE,
2998 				.cra_flags = CRYPTO_ALG_ASYNC |
2999 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3000 			}
3001 		},
3002 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3003 				     DESC_HDR_SEL0_MDEUB |
3004 				     DESC_HDR_MODE0_MDEUB_SHA384,
3005 	},
3006 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3007 		.alg.hash = {
3008 			.halg.digestsize = SHA512_DIGEST_SIZE,
3009 			.halg.statesize = sizeof(struct talitos_export_state),
3010 			.halg.base = {
3011 				.cra_name = "sha512",
3012 				.cra_driver_name = "sha512-talitos",
3013 				.cra_blocksize = SHA512_BLOCK_SIZE,
3014 				.cra_flags = CRYPTO_ALG_ASYNC |
3015 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3016 			}
3017 		},
3018 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3019 				     DESC_HDR_SEL0_MDEUB |
3020 				     DESC_HDR_MODE0_MDEUB_SHA512,
3021 	},
3022 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3023 		.alg.hash = {
3024 			.halg.digestsize = MD5_DIGEST_SIZE,
3025 			.halg.statesize = sizeof(struct talitos_export_state),
3026 			.halg.base = {
3027 				.cra_name = "hmac(md5)",
3028 				.cra_driver_name = "hmac-md5-talitos",
3029 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
3030 				.cra_flags = CRYPTO_ALG_ASYNC |
3031 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3032 			}
3033 		},
3034 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3035 				     DESC_HDR_SEL0_MDEUA |
3036 				     DESC_HDR_MODE0_MDEU_MD5,
3037 	},
3038 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3039 		.alg.hash = {
3040 			.halg.digestsize = SHA1_DIGEST_SIZE,
3041 			.halg.statesize = sizeof(struct talitos_export_state),
3042 			.halg.base = {
3043 				.cra_name = "hmac(sha1)",
3044 				.cra_driver_name = "hmac-sha1-talitos",
3045 				.cra_blocksize = SHA1_BLOCK_SIZE,
3046 				.cra_flags = CRYPTO_ALG_ASYNC |
3047 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3048 			}
3049 		},
3050 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3051 				     DESC_HDR_SEL0_MDEUA |
3052 				     DESC_HDR_MODE0_MDEU_SHA1,
3053 	},
3054 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3055 		.alg.hash = {
3056 			.halg.digestsize = SHA224_DIGEST_SIZE,
3057 			.halg.statesize = sizeof(struct talitos_export_state),
3058 			.halg.base = {
3059 				.cra_name = "hmac(sha224)",
3060 				.cra_driver_name = "hmac-sha224-talitos",
3061 				.cra_blocksize = SHA224_BLOCK_SIZE,
3062 				.cra_flags = CRYPTO_ALG_ASYNC |
3063 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3064 			}
3065 		},
3066 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3067 				     DESC_HDR_SEL0_MDEUA |
3068 				     DESC_HDR_MODE0_MDEU_SHA224,
3069 	},
3070 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3071 		.alg.hash = {
3072 			.halg.digestsize = SHA256_DIGEST_SIZE,
3073 			.halg.statesize = sizeof(struct talitos_export_state),
3074 			.halg.base = {
3075 				.cra_name = "hmac(sha256)",
3076 				.cra_driver_name = "hmac-sha256-talitos",
3077 				.cra_blocksize = SHA256_BLOCK_SIZE,
3078 				.cra_flags = CRYPTO_ALG_ASYNC |
3079 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3080 			}
3081 		},
3082 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3083 				     DESC_HDR_SEL0_MDEUA |
3084 				     DESC_HDR_MODE0_MDEU_SHA256,
3085 	},
3086 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3087 		.alg.hash = {
3088 			.halg.digestsize = SHA384_DIGEST_SIZE,
3089 			.halg.statesize = sizeof(struct talitos_export_state),
3090 			.halg.base = {
3091 				.cra_name = "hmac(sha384)",
3092 				.cra_driver_name = "hmac-sha384-talitos",
3093 				.cra_blocksize = SHA384_BLOCK_SIZE,
3094 				.cra_flags = CRYPTO_ALG_ASYNC |
3095 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3096 			}
3097 		},
3098 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3099 				     DESC_HDR_SEL0_MDEUB |
3100 				     DESC_HDR_MODE0_MDEUB_SHA384,
3101 	},
3102 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3103 		.alg.hash = {
3104 			.halg.digestsize = SHA512_DIGEST_SIZE,
3105 			.halg.statesize = sizeof(struct talitos_export_state),
3106 			.halg.base = {
3107 				.cra_name = "hmac(sha512)",
3108 				.cra_driver_name = "hmac-sha512-talitos",
3109 				.cra_blocksize = SHA512_BLOCK_SIZE,
3110 				.cra_flags = CRYPTO_ALG_ASYNC |
3111 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3112 			}
3113 		},
3114 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3115 				     DESC_HDR_SEL0_MDEUB |
3116 				     DESC_HDR_MODE0_MDEUB_SHA512,
3117 	}
3118 };
3119 
3120 struct talitos_crypto_alg {
3121 	struct list_head entry;
3122 	struct device *dev;
3123 	struct talitos_alg_template algt;
3124 };
3125 
3126 static int talitos_init_common(struct talitos_ctx *ctx,
3127 			       struct talitos_crypto_alg *talitos_alg)
3128 {
3129 	struct talitos_private *priv;
3130 
3131 	/* update context with ptr to dev */
3132 	ctx->dev = talitos_alg->dev;
3133 
3134 	/* assign SEC channel to tfm in round-robin fashion */
3135 	priv = dev_get_drvdata(ctx->dev);
3136 	ctx->ch = atomic_inc_return(&priv->last_chan) &
3137 		  (priv->num_channels - 1);
3138 
3139 	/* copy descriptor header template value */
3140 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3141 
3142 	/* select done notification */
3143 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3144 
3145 	return 0;
3146 }
3147 
3148 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3149 {
3150 	struct aead_alg *alg = crypto_aead_alg(tfm);
3151 	struct talitos_crypto_alg *talitos_alg;
3152 	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3153 
3154 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3155 				   algt.alg.aead);
3156 
3157 	return talitos_init_common(ctx, talitos_alg);
3158 }
3159 
3160 static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
3161 {
3162 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3163 	struct talitos_crypto_alg *talitos_alg;
3164 	struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
3165 
3166 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3167 				   algt.alg.skcipher);
3168 
3169 	return talitos_init_common(ctx, talitos_alg);
3170 }
3171 
3172 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3173 {
3174 	struct crypto_alg *alg = tfm->__crt_alg;
3175 	struct talitos_crypto_alg *talitos_alg;
3176 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3177 
3178 	talitos_alg = container_of(__crypto_ahash_alg(alg),
3179 				   struct talitos_crypto_alg,
3180 				   algt.alg.hash);
3181 
3182 	ctx->keylen = 0;
3183 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3184 				 sizeof(struct talitos_ahash_req_ctx));
3185 
3186 	return talitos_init_common(ctx, talitos_alg);
3187 }
3188 
3189 static void talitos_cra_exit(struct crypto_tfm *tfm)
3190 {
3191 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3192 	struct device *dev = ctx->dev;
3193 
3194 	if (ctx->keylen)
3195 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3196 }
3197 
3198 /*
3199  * given the alg's descriptor header template, determine whether descriptor
3200  * type and primary/secondary execution units required match the hw
3201  * capabilities description provided in the device tree node.
3202  */
3203 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3204 {
3205 	struct talitos_private *priv = dev_get_drvdata(dev);
3206 	int ret;
3207 
3208 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3209 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3210 
3211 	if (SECONDARY_EU(desc_hdr_template))
3212 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3213 		              & priv->exec_units);
3214 
3215 	return ret;
3216 }
3217 
3218 static void talitos_remove(struct platform_device *ofdev)
3219 {
3220 	struct device *dev = &ofdev->dev;
3221 	struct talitos_private *priv = dev_get_drvdata(dev);
3222 	struct talitos_crypto_alg *t_alg, *n;
3223 	int i;
3224 
3225 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3226 		switch (t_alg->algt.type) {
3227 		case CRYPTO_ALG_TYPE_SKCIPHER:
3228 			crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
3229 			break;
3230 		case CRYPTO_ALG_TYPE_AEAD:
3231 			crypto_unregister_aead(&t_alg->algt.alg.aead);
3232 			break;
3233 		case CRYPTO_ALG_TYPE_AHASH:
3234 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3235 			break;
3236 		}
3237 		list_del(&t_alg->entry);
3238 	}
3239 
3240 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3241 		talitos_unregister_rng(dev);
3242 
3243 	for (i = 0; i < 2; i++)
3244 		if (priv->irq[i]) {
3245 			free_irq(priv->irq[i], dev);
3246 			irq_dispose_mapping(priv->irq[i]);
3247 		}
3248 
3249 	tasklet_kill(&priv->done_task[0]);
3250 	if (priv->irq[1])
3251 		tasklet_kill(&priv->done_task[1]);
3252 }
3253 
3254 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3255 						    struct talitos_alg_template
3256 						           *template)
3257 {
3258 	struct talitos_private *priv = dev_get_drvdata(dev);
3259 	struct talitos_crypto_alg *t_alg;
3260 	struct crypto_alg *alg;
3261 
3262 	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3263 			     GFP_KERNEL);
3264 	if (!t_alg)
3265 		return ERR_PTR(-ENOMEM);
3266 
3267 	t_alg->algt = *template;
3268 
3269 	switch (t_alg->algt.type) {
3270 	case CRYPTO_ALG_TYPE_SKCIPHER:
3271 		alg = &t_alg->algt.alg.skcipher.base;
3272 		alg->cra_exit = talitos_cra_exit;
3273 		t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
3274 		t_alg->algt.alg.skcipher.setkey =
3275 			t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
3276 		t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
3277 		t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
3278 		if (!strcmp(alg->cra_name, "ctr(aes)") && !has_ftr_sec1(priv) &&
3279 		    DESC_TYPE(t_alg->algt.desc_hdr_template) !=
3280 		    DESC_TYPE(DESC_HDR_TYPE_AESU_CTR_NONSNOOP)) {
3281 			devm_kfree(dev, t_alg);
3282 			return ERR_PTR(-ENOTSUPP);
3283 		}
3284 		break;
3285 	case CRYPTO_ALG_TYPE_AEAD:
3286 		alg = &t_alg->algt.alg.aead.base;
3287 		alg->cra_exit = talitos_cra_exit;
3288 		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3289 		t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3290 					      aead_setkey;
3291 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3292 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3293 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3294 		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3295 			devm_kfree(dev, t_alg);
3296 			return ERR_PTR(-ENOTSUPP);
3297 		}
3298 		break;
3299 	case CRYPTO_ALG_TYPE_AHASH:
3300 		alg = &t_alg->algt.alg.hash.halg.base;
3301 		alg->cra_init = talitos_cra_init_ahash;
3302 		alg->cra_exit = talitos_cra_exit;
3303 		t_alg->algt.alg.hash.init = ahash_init;
3304 		t_alg->algt.alg.hash.update = ahash_update;
3305 		t_alg->algt.alg.hash.final = ahash_final;
3306 		t_alg->algt.alg.hash.finup = ahash_finup;
3307 		t_alg->algt.alg.hash.digest = ahash_digest;
3308 		if (!strncmp(alg->cra_name, "hmac", 4))
3309 			t_alg->algt.alg.hash.setkey = ahash_setkey;
3310 		t_alg->algt.alg.hash.import = ahash_import;
3311 		t_alg->algt.alg.hash.export = ahash_export;
3312 
3313 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3314 		    !strncmp(alg->cra_name, "hmac", 4)) {
3315 			devm_kfree(dev, t_alg);
3316 			return ERR_PTR(-ENOTSUPP);
3317 		}
3318 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3319 		    (!strcmp(alg->cra_name, "sha224") ||
3320 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3321 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3322 			t_alg->algt.alg.hash.digest =
3323 				ahash_digest_sha224_swinit;
3324 			t_alg->algt.desc_hdr_template =
3325 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3326 					DESC_HDR_SEL0_MDEUA |
3327 					DESC_HDR_MODE0_MDEU_SHA256;
3328 		}
3329 		break;
3330 	default:
3331 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3332 		devm_kfree(dev, t_alg);
3333 		return ERR_PTR(-EINVAL);
3334 	}
3335 
3336 	alg->cra_module = THIS_MODULE;
3337 	if (t_alg->algt.priority)
3338 		alg->cra_priority = t_alg->algt.priority;
3339 	else
3340 		alg->cra_priority = TALITOS_CRA_PRIORITY;
3341 	if (has_ftr_sec1(priv) && t_alg->algt.type != CRYPTO_ALG_TYPE_AHASH)
3342 		alg->cra_alignmask = 3;
3343 	else
3344 		alg->cra_alignmask = 0;
3345 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3346 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3347 
3348 	t_alg->dev = dev;
3349 
3350 	return t_alg;
3351 }
3352 
3353 static int talitos_probe_irq(struct platform_device *ofdev)
3354 {
3355 	struct device *dev = &ofdev->dev;
3356 	struct device_node *np = ofdev->dev.of_node;
3357 	struct talitos_private *priv = dev_get_drvdata(dev);
3358 	int err;
3359 	bool is_sec1 = has_ftr_sec1(priv);
3360 
3361 	priv->irq[0] = irq_of_parse_and_map(np, 0);
3362 	if (!priv->irq[0]) {
3363 		dev_err(dev, "failed to map irq\n");
3364 		return -EINVAL;
3365 	}
3366 	if (is_sec1) {
3367 		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3368 				  dev_driver_string(dev), dev);
3369 		goto primary_out;
3370 	}
3371 
3372 	priv->irq[1] = irq_of_parse_and_map(np, 1);
3373 
3374 	/* get the primary irq line */
3375 	if (!priv->irq[1]) {
3376 		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3377 				  dev_driver_string(dev), dev);
3378 		goto primary_out;
3379 	}
3380 
3381 	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3382 			  dev_driver_string(dev), dev);
3383 	if (err)
3384 		goto primary_out;
3385 
3386 	/* get the secondary irq line */
3387 	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3388 			  dev_driver_string(dev), dev);
3389 	if (err) {
3390 		dev_err(dev, "failed to request secondary irq\n");
3391 		irq_dispose_mapping(priv->irq[1]);
3392 		priv->irq[1] = 0;
3393 	}
3394 
3395 	return err;
3396 
3397 primary_out:
3398 	if (err) {
3399 		dev_err(dev, "failed to request primary irq\n");
3400 		irq_dispose_mapping(priv->irq[0]);
3401 		priv->irq[0] = 0;
3402 	}
3403 
3404 	return err;
3405 }
3406 
3407 static int talitos_probe(struct platform_device *ofdev)
3408 {
3409 	struct device *dev = &ofdev->dev;
3410 	struct device_node *np = ofdev->dev.of_node;
3411 	struct talitos_private *priv;
3412 	int i, err;
3413 	int stride;
3414 	struct resource *res;
3415 
3416 	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3417 	if (!priv)
3418 		return -ENOMEM;
3419 
3420 	INIT_LIST_HEAD(&priv->alg_list);
3421 
3422 	dev_set_drvdata(dev, priv);
3423 
3424 	priv->ofdev = ofdev;
3425 
3426 	spin_lock_init(&priv->reg_lock);
3427 
3428 	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3429 	if (!res)
3430 		return -ENXIO;
3431 	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3432 	if (!priv->reg) {
3433 		dev_err(dev, "failed to of_iomap\n");
3434 		err = -ENOMEM;
3435 		goto err_out;
3436 	}
3437 
3438 	/* get SEC version capabilities from device tree */
3439 	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3440 	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3441 	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3442 	of_property_read_u32(np, "fsl,descriptor-types-mask",
3443 			     &priv->desc_types);
3444 
3445 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3446 	    !priv->exec_units || !priv->desc_types) {
3447 		dev_err(dev, "invalid property data in device tree node\n");
3448 		err = -EINVAL;
3449 		goto err_out;
3450 	}
3451 
3452 	if (of_device_is_compatible(np, "fsl,sec3.0"))
3453 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3454 
3455 	if (of_device_is_compatible(np, "fsl,sec2.1"))
3456 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3457 				  TALITOS_FTR_SHA224_HWINIT |
3458 				  TALITOS_FTR_HMAC_OK;
3459 
3460 	if (of_device_is_compatible(np, "fsl,sec1.0"))
3461 		priv->features |= TALITOS_FTR_SEC1;
3462 
3463 	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3464 		priv->reg_deu = priv->reg + TALITOS12_DEU;
3465 		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3466 		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3467 		stride = TALITOS1_CH_STRIDE;
3468 	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3469 		priv->reg_deu = priv->reg + TALITOS10_DEU;
3470 		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3471 		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3472 		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3473 		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3474 		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3475 		stride = TALITOS1_CH_STRIDE;
3476 	} else {
3477 		priv->reg_deu = priv->reg + TALITOS2_DEU;
3478 		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3479 		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3480 		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3481 		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3482 		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3483 		priv->reg_keu = priv->reg + TALITOS2_KEU;
3484 		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3485 		stride = TALITOS2_CH_STRIDE;
3486 	}
3487 
3488 	err = talitos_probe_irq(ofdev);
3489 	if (err)
3490 		goto err_out;
3491 
3492 	if (has_ftr_sec1(priv)) {
3493 		if (priv->num_channels == 1)
3494 			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3495 				     (unsigned long)dev);
3496 		else
3497 			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3498 				     (unsigned long)dev);
3499 	} else {
3500 		if (priv->irq[1]) {
3501 			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3502 				     (unsigned long)dev);
3503 			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3504 				     (unsigned long)dev);
3505 		} else if (priv->num_channels == 1) {
3506 			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3507 				     (unsigned long)dev);
3508 		} else {
3509 			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3510 				     (unsigned long)dev);
3511 		}
3512 	}
3513 
3514 	priv->chan = devm_kcalloc(dev,
3515 				  priv->num_channels,
3516 				  sizeof(struct talitos_channel),
3517 				  GFP_KERNEL);
3518 	if (!priv->chan) {
3519 		dev_err(dev, "failed to allocate channel management space\n");
3520 		err = -ENOMEM;
3521 		goto err_out;
3522 	}
3523 
3524 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3525 
3526 	for (i = 0; i < priv->num_channels; i++) {
3527 		priv->chan[i].reg = priv->reg + stride * (i + 1);
3528 		if (!priv->irq[1] || !(i & 1))
3529 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3530 
3531 		spin_lock_init(&priv->chan[i].head_lock);
3532 		spin_lock_init(&priv->chan[i].tail_lock);
3533 
3534 		priv->chan[i].fifo = devm_kcalloc(dev,
3535 						priv->fifo_len,
3536 						sizeof(struct talitos_request),
3537 						GFP_KERNEL);
3538 		if (!priv->chan[i].fifo) {
3539 			dev_err(dev, "failed to allocate request fifo %d\n", i);
3540 			err = -ENOMEM;
3541 			goto err_out;
3542 		}
3543 
3544 		atomic_set(&priv->chan[i].submit_count,
3545 			   -(priv->chfifo_len - 1));
3546 	}
3547 
3548 	dma_set_mask(dev, DMA_BIT_MASK(36));
3549 
3550 	/* reset and initialize the h/w */
3551 	err = init_device(dev);
3552 	if (err) {
3553 		dev_err(dev, "failed to initialize device\n");
3554 		goto err_out;
3555 	}
3556 
3557 	/* register the RNG, if available */
3558 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3559 		err = talitos_register_rng(dev);
3560 		if (err) {
3561 			dev_err(dev, "failed to register hwrng: %d\n", err);
3562 			goto err_out;
3563 		} else
3564 			dev_info(dev, "hwrng\n");
3565 	}
3566 
3567 	/* register crypto algorithms the device supports */
3568 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3569 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3570 			struct talitos_crypto_alg *t_alg;
3571 			struct crypto_alg *alg = NULL;
3572 
3573 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3574 			if (IS_ERR(t_alg)) {
3575 				err = PTR_ERR(t_alg);
3576 				if (err == -ENOTSUPP)
3577 					continue;
3578 				goto err_out;
3579 			}
3580 
3581 			switch (t_alg->algt.type) {
3582 			case CRYPTO_ALG_TYPE_SKCIPHER:
3583 				err = crypto_register_skcipher(
3584 						&t_alg->algt.alg.skcipher);
3585 				alg = &t_alg->algt.alg.skcipher.base;
3586 				break;
3587 
3588 			case CRYPTO_ALG_TYPE_AEAD:
3589 				err = crypto_register_aead(
3590 					&t_alg->algt.alg.aead);
3591 				alg = &t_alg->algt.alg.aead.base;
3592 				break;
3593 
3594 			case CRYPTO_ALG_TYPE_AHASH:
3595 				err = crypto_register_ahash(
3596 						&t_alg->algt.alg.hash);
3597 				alg = &t_alg->algt.alg.hash.halg.base;
3598 				break;
3599 			}
3600 			if (err) {
3601 				dev_err(dev, "%s alg registration failed\n",
3602 					alg->cra_driver_name);
3603 				devm_kfree(dev, t_alg);
3604 			} else
3605 				list_add_tail(&t_alg->entry, &priv->alg_list);
3606 		}
3607 	}
3608 	if (!list_empty(&priv->alg_list))
3609 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3610 			 (char *)of_get_property(np, "compatible", NULL));
3611 
3612 	return 0;
3613 
3614 err_out:
3615 	talitos_remove(ofdev);
3616 
3617 	return err;
3618 }
3619 
3620 static const struct of_device_id talitos_match[] = {
3621 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3622 	{
3623 		.compatible = "fsl,sec1.0",
3624 	},
3625 #endif
3626 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3627 	{
3628 		.compatible = "fsl,sec2.0",
3629 	},
3630 #endif
3631 	{},
3632 };
3633 MODULE_DEVICE_TABLE(of, talitos_match);
3634 
3635 static struct platform_driver talitos_driver = {
3636 	.driver = {
3637 		.name = "talitos",
3638 		.of_match_table = talitos_match,
3639 	},
3640 	.probe = talitos_probe,
3641 	.remove = talitos_remove,
3642 };
3643 
3644 module_platform_driver(talitos_driver);
3645 
3646 MODULE_LICENSE("GPL");
3647 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3648 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3649