xref: /linux/drivers/crypto/talitos.c (revision f7ead7b47a758bbee6fdc66f95f27fdb866e5e9d)
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43 
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55 
56 #include "talitos.h"
57 
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 			   bool is_sec1)
60 {
61 	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 	if (!is_sec1)
63 		ptr->eptr = upper_32_bits(dma_addr);
64 }
65 
66 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len,
67 			       bool is_sec1)
68 {
69 	if (is_sec1) {
70 		ptr->res = 0;
71 		ptr->len1 = cpu_to_be16(len);
72 	} else {
73 		ptr->len = cpu_to_be16(len);
74 	}
75 }
76 
77 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
78 					   bool is_sec1)
79 {
80 	if (is_sec1)
81 		return be16_to_cpu(ptr->len1);
82 	else
83 		return be16_to_cpu(ptr->len);
84 }
85 
86 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
87 {
88 	if (!is_sec1)
89 		ptr->j_extent = 0;
90 }
91 
92 /*
93  * map virtual single (contiguous) pointer to h/w descriptor pointer
94  */
95 static void map_single_talitos_ptr(struct device *dev,
96 				   struct talitos_ptr *ptr,
97 				   unsigned short len, void *data,
98 				   enum dma_data_direction dir)
99 {
100 	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
101 	struct talitos_private *priv = dev_get_drvdata(dev);
102 	bool is_sec1 = has_ftr_sec1(priv);
103 
104 	to_talitos_ptr_len(ptr, len, is_sec1);
105 	to_talitos_ptr(ptr, dma_addr, is_sec1);
106 	to_talitos_ptr_extent_clear(ptr, is_sec1);
107 }
108 
109 /*
110  * unmap bus single (contiguous) h/w descriptor pointer
111  */
112 static void unmap_single_talitos_ptr(struct device *dev,
113 				     struct talitos_ptr *ptr,
114 				     enum dma_data_direction dir)
115 {
116 	struct talitos_private *priv = dev_get_drvdata(dev);
117 	bool is_sec1 = has_ftr_sec1(priv);
118 
119 	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
120 			 from_talitos_ptr_len(ptr, is_sec1), dir);
121 }
122 
123 static int reset_channel(struct device *dev, int ch)
124 {
125 	struct talitos_private *priv = dev_get_drvdata(dev);
126 	unsigned int timeout = TALITOS_TIMEOUT;
127 	bool is_sec1 = has_ftr_sec1(priv);
128 
129 	if (is_sec1) {
130 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
131 			  TALITOS1_CCCR_LO_RESET);
132 
133 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
134 			TALITOS1_CCCR_LO_RESET) && --timeout)
135 			cpu_relax();
136 	} else {
137 		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
138 			  TALITOS2_CCCR_RESET);
139 
140 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
141 			TALITOS2_CCCR_RESET) && --timeout)
142 			cpu_relax();
143 	}
144 
145 	if (timeout == 0) {
146 		dev_err(dev, "failed to reset channel %d\n", ch);
147 		return -EIO;
148 	}
149 
150 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
151 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
152 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
153 
154 	/* and ICCR writeback, if available */
155 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
156 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
157 		          TALITOS_CCCR_LO_IWSE);
158 
159 	return 0;
160 }
161 
162 static int reset_device(struct device *dev)
163 {
164 	struct talitos_private *priv = dev_get_drvdata(dev);
165 	unsigned int timeout = TALITOS_TIMEOUT;
166 	bool is_sec1 = has_ftr_sec1(priv);
167 	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
168 
169 	setbits32(priv->reg + TALITOS_MCR, mcr);
170 
171 	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
172 	       && --timeout)
173 		cpu_relax();
174 
175 	if (priv->irq[1]) {
176 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
177 		setbits32(priv->reg + TALITOS_MCR, mcr);
178 	}
179 
180 	if (timeout == 0) {
181 		dev_err(dev, "failed to reset device\n");
182 		return -EIO;
183 	}
184 
185 	return 0;
186 }
187 
188 /*
189  * Reset and initialize the device
190  */
191 static int init_device(struct device *dev)
192 {
193 	struct talitos_private *priv = dev_get_drvdata(dev);
194 	int ch, err;
195 	bool is_sec1 = has_ftr_sec1(priv);
196 
197 	/*
198 	 * Master reset
199 	 * errata documentation: warning: certain SEC interrupts
200 	 * are not fully cleared by writing the MCR:SWR bit,
201 	 * set bit twice to completely reset
202 	 */
203 	err = reset_device(dev);
204 	if (err)
205 		return err;
206 
207 	err = reset_device(dev);
208 	if (err)
209 		return err;
210 
211 	/* reset channels */
212 	for (ch = 0; ch < priv->num_channels; ch++) {
213 		err = reset_channel(dev, ch);
214 		if (err)
215 			return err;
216 	}
217 
218 	/* enable channel done and error interrupts */
219 	if (is_sec1) {
220 		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
221 		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
222 		/* disable parity error check in DEU (erroneous? test vect.) */
223 		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
224 	} else {
225 		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
226 		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
227 	}
228 
229 	/* disable integrity check error interrupts (use writeback instead) */
230 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
231 		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
232 		          TALITOS_MDEUICR_LO_ICE);
233 
234 	return 0;
235 }
236 
237 /**
238  * talitos_submit - submits a descriptor to the device for processing
239  * @dev:	the SEC device to be used
240  * @ch:		the SEC device channel to be used
241  * @desc:	the descriptor to be processed by the device
242  * @callback:	whom to call when processing is complete
243  * @context:	a handle for use by caller (optional)
244  *
245  * desc must contain valid dma-mapped (bus physical) address pointers.
246  * callback must check err and feedback in descriptor header
247  * for device processing status.
248  */
249 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
250 		   void (*callback)(struct device *dev,
251 				    struct talitos_desc *desc,
252 				    void *context, int error),
253 		   void *context)
254 {
255 	struct talitos_private *priv = dev_get_drvdata(dev);
256 	struct talitos_request *request;
257 	unsigned long flags;
258 	int head;
259 	bool is_sec1 = has_ftr_sec1(priv);
260 
261 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
262 
263 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
264 		/* h/w fifo is full */
265 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
266 		return -EAGAIN;
267 	}
268 
269 	head = priv->chan[ch].head;
270 	request = &priv->chan[ch].fifo[head];
271 
272 	/* map descriptor and save caller data */
273 	if (is_sec1) {
274 		desc->hdr1 = desc->hdr;
275 		desc->next_desc = 0;
276 		request->dma_desc = dma_map_single(dev, &desc->hdr1,
277 						   TALITOS_DESC_SIZE,
278 						   DMA_BIDIRECTIONAL);
279 	} else {
280 		request->dma_desc = dma_map_single(dev, desc,
281 						   TALITOS_DESC_SIZE,
282 						   DMA_BIDIRECTIONAL);
283 	}
284 	request->callback = callback;
285 	request->context = context;
286 
287 	/* increment fifo head */
288 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
289 
290 	smp_wmb();
291 	request->desc = desc;
292 
293 	/* GO! */
294 	wmb();
295 	out_be32(priv->chan[ch].reg + TALITOS_FF,
296 		 upper_32_bits(request->dma_desc));
297 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
298 		 lower_32_bits(request->dma_desc));
299 
300 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
301 
302 	return -EINPROGRESS;
303 }
304 EXPORT_SYMBOL(talitos_submit);
305 
306 /*
307  * process what was done, notify callback of error if not
308  */
309 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
310 {
311 	struct talitos_private *priv = dev_get_drvdata(dev);
312 	struct talitos_request *request, saved_req;
313 	unsigned long flags;
314 	int tail, status;
315 	bool is_sec1 = has_ftr_sec1(priv);
316 
317 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
318 
319 	tail = priv->chan[ch].tail;
320 	while (priv->chan[ch].fifo[tail].desc) {
321 		__be32 hdr;
322 
323 		request = &priv->chan[ch].fifo[tail];
324 
325 		/* descriptors with their done bits set don't get the error */
326 		rmb();
327 		hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
328 
329 		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
330 			status = 0;
331 		else
332 			if (!error)
333 				break;
334 			else
335 				status = error;
336 
337 		dma_unmap_single(dev, request->dma_desc,
338 				 TALITOS_DESC_SIZE,
339 				 DMA_BIDIRECTIONAL);
340 
341 		/* copy entries so we can call callback outside lock */
342 		saved_req.desc = request->desc;
343 		saved_req.callback = request->callback;
344 		saved_req.context = request->context;
345 
346 		/* release request entry in fifo */
347 		smp_wmb();
348 		request->desc = NULL;
349 
350 		/* increment fifo tail */
351 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
352 
353 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
354 
355 		atomic_dec(&priv->chan[ch].submit_count);
356 
357 		saved_req.callback(dev, saved_req.desc, saved_req.context,
358 				   status);
359 		/* channel may resume processing in single desc error case */
360 		if (error && !reset_ch && status == error)
361 			return;
362 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
363 		tail = priv->chan[ch].tail;
364 	}
365 
366 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
367 }
368 
369 /*
370  * process completed requests for channels that have done status
371  */
372 #define DEF_TALITOS1_DONE(name, ch_done_mask)				\
373 static void talitos1_done_##name(unsigned long data)			\
374 {									\
375 	struct device *dev = (struct device *)data;			\
376 	struct talitos_private *priv = dev_get_drvdata(dev);		\
377 	unsigned long flags;						\
378 									\
379 	if (ch_done_mask & 0x10000000)					\
380 		flush_channel(dev, 0, 0, 0);			\
381 	if (priv->num_channels == 1)					\
382 		goto out;						\
383 	if (ch_done_mask & 0x40000000)					\
384 		flush_channel(dev, 1, 0, 0);			\
385 	if (ch_done_mask & 0x00010000)					\
386 		flush_channel(dev, 2, 0, 0);			\
387 	if (ch_done_mask & 0x00040000)					\
388 		flush_channel(dev, 3, 0, 0);			\
389 									\
390 out:									\
391 	/* At this point, all completed channels have been processed */	\
392 	/* Unmask done interrupts for channels completed later on. */	\
393 	spin_lock_irqsave(&priv->reg_lock, flags);			\
394 	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
395 	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
396 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
397 }
398 
399 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
400 
401 #define DEF_TALITOS2_DONE(name, ch_done_mask)				\
402 static void talitos2_done_##name(unsigned long data)			\
403 {									\
404 	struct device *dev = (struct device *)data;			\
405 	struct talitos_private *priv = dev_get_drvdata(dev);		\
406 	unsigned long flags;						\
407 									\
408 	if (ch_done_mask & 1)						\
409 		flush_channel(dev, 0, 0, 0);				\
410 	if (priv->num_channels == 1)					\
411 		goto out;						\
412 	if (ch_done_mask & (1 << 2))					\
413 		flush_channel(dev, 1, 0, 0);				\
414 	if (ch_done_mask & (1 << 4))					\
415 		flush_channel(dev, 2, 0, 0);				\
416 	if (ch_done_mask & (1 << 6))					\
417 		flush_channel(dev, 3, 0, 0);				\
418 									\
419 out:									\
420 	/* At this point, all completed channels have been processed */	\
421 	/* Unmask done interrupts for channels completed later on. */	\
422 	spin_lock_irqsave(&priv->reg_lock, flags);			\
423 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
424 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
425 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
426 }
427 
428 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
429 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
430 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
431 
432 /*
433  * locate current (offending) descriptor
434  */
435 static u32 current_desc_hdr(struct device *dev, int ch)
436 {
437 	struct talitos_private *priv = dev_get_drvdata(dev);
438 	int tail, iter;
439 	dma_addr_t cur_desc;
440 
441 	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
442 	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
443 
444 	if (!cur_desc) {
445 		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
446 		return 0;
447 	}
448 
449 	tail = priv->chan[ch].tail;
450 
451 	iter = tail;
452 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
453 		iter = (iter + 1) & (priv->fifo_len - 1);
454 		if (iter == tail) {
455 			dev_err(dev, "couldn't locate current descriptor\n");
456 			return 0;
457 		}
458 	}
459 
460 	return priv->chan[ch].fifo[iter].desc->hdr;
461 }
462 
463 /*
464  * user diagnostics; report root cause of error based on execution unit status
465  */
466 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
467 {
468 	struct talitos_private *priv = dev_get_drvdata(dev);
469 	int i;
470 
471 	if (!desc_hdr)
472 		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
473 
474 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
475 	case DESC_HDR_SEL0_AFEU:
476 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
477 			in_be32(priv->reg_afeu + TALITOS_EUISR),
478 			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
479 		break;
480 	case DESC_HDR_SEL0_DEU:
481 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
482 			in_be32(priv->reg_deu + TALITOS_EUISR),
483 			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
484 		break;
485 	case DESC_HDR_SEL0_MDEUA:
486 	case DESC_HDR_SEL0_MDEUB:
487 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
488 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
489 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
490 		break;
491 	case DESC_HDR_SEL0_RNG:
492 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
493 			in_be32(priv->reg_rngu + TALITOS_ISR),
494 			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
495 		break;
496 	case DESC_HDR_SEL0_PKEU:
497 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
498 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
499 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
500 		break;
501 	case DESC_HDR_SEL0_AESU:
502 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
503 			in_be32(priv->reg_aesu + TALITOS_EUISR),
504 			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
505 		break;
506 	case DESC_HDR_SEL0_CRCU:
507 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
508 			in_be32(priv->reg_crcu + TALITOS_EUISR),
509 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
510 		break;
511 	case DESC_HDR_SEL0_KEU:
512 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
513 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
515 		break;
516 	}
517 
518 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
519 	case DESC_HDR_SEL1_MDEUA:
520 	case DESC_HDR_SEL1_MDEUB:
521 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
522 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
523 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
524 		break;
525 	case DESC_HDR_SEL1_CRCU:
526 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
527 			in_be32(priv->reg_crcu + TALITOS_EUISR),
528 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
529 		break;
530 	}
531 
532 	for (i = 0; i < 8; i++)
533 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
534 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
535 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
536 }
537 
538 /*
539  * recover from error interrupts
540  */
541 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
542 {
543 	struct talitos_private *priv = dev_get_drvdata(dev);
544 	unsigned int timeout = TALITOS_TIMEOUT;
545 	int ch, error, reset_dev = 0;
546 	u32 v, v_lo;
547 	bool is_sec1 = has_ftr_sec1(priv);
548 	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
549 
550 	for (ch = 0; ch < priv->num_channels; ch++) {
551 		/* skip channels without errors */
552 		if (is_sec1) {
553 			/* bits 29, 31, 17, 19 */
554 			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
555 				continue;
556 		} else {
557 			if (!(isr & (1 << (ch * 2 + 1))))
558 				continue;
559 		}
560 
561 		error = -EINVAL;
562 
563 		v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
564 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
565 
566 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
567 			dev_err(dev, "double fetch fifo overflow error\n");
568 			error = -EAGAIN;
569 			reset_ch = 1;
570 		}
571 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
572 			/* h/w dropped descriptor */
573 			dev_err(dev, "single fetch fifo overflow error\n");
574 			error = -EAGAIN;
575 		}
576 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
577 			dev_err(dev, "master data transfer error\n");
578 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
579 			dev_err(dev, is_sec1 ? "pointeur not complete error\n"
580 					     : "s/g data length zero error\n");
581 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
582 			dev_err(dev, is_sec1 ? "parity error\n"
583 					     : "fetch pointer zero error\n");
584 		if (v_lo & TALITOS_CCPSR_LO_IDH)
585 			dev_err(dev, "illegal descriptor header error\n");
586 		if (v_lo & TALITOS_CCPSR_LO_IEU)
587 			dev_err(dev, is_sec1 ? "static assignment error\n"
588 					     : "invalid exec unit error\n");
589 		if (v_lo & TALITOS_CCPSR_LO_EU)
590 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
591 		if (!is_sec1) {
592 			if (v_lo & TALITOS_CCPSR_LO_GB)
593 				dev_err(dev, "gather boundary error\n");
594 			if (v_lo & TALITOS_CCPSR_LO_GRL)
595 				dev_err(dev, "gather return/length error\n");
596 			if (v_lo & TALITOS_CCPSR_LO_SB)
597 				dev_err(dev, "scatter boundary error\n");
598 			if (v_lo & TALITOS_CCPSR_LO_SRL)
599 				dev_err(dev, "scatter return/length error\n");
600 		}
601 
602 		flush_channel(dev, ch, error, reset_ch);
603 
604 		if (reset_ch) {
605 			reset_channel(dev, ch);
606 		} else {
607 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
608 				  TALITOS2_CCCR_CONT);
609 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
610 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
611 			       TALITOS2_CCCR_CONT) && --timeout)
612 				cpu_relax();
613 			if (timeout == 0) {
614 				dev_err(dev, "failed to restart channel %d\n",
615 					ch);
616 				reset_dev = 1;
617 			}
618 		}
619 	}
620 	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
621 	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
622 		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
623 			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
624 				isr, isr_lo);
625 		else
626 			dev_err(dev, "done overflow, internal time out, or "
627 				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
628 
629 		/* purge request queues */
630 		for (ch = 0; ch < priv->num_channels; ch++)
631 			flush_channel(dev, ch, -EIO, 1);
632 
633 		/* reset and reinitialize the device */
634 		init_device(dev);
635 	}
636 }
637 
638 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
639 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
640 {									       \
641 	struct device *dev = data;					       \
642 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
643 	u32 isr, isr_lo;						       \
644 	unsigned long flags;						       \
645 									       \
646 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
647 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
648 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
649 	/* Acknowledge interrupt */					       \
650 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
651 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
652 									       \
653 	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
654 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
655 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
656 	}								       \
657 	else {								       \
658 		if (likely(isr & ch_done_mask)) {			       \
659 			/* mask further done interrupts. */		       \
660 			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
661 			/* done_task will unmask done interrupts at exit */    \
662 			tasklet_schedule(&priv->done_task[tlet]);	       \
663 		}							       \
664 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
665 	}								       \
666 									       \
667 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
668 								IRQ_NONE;      \
669 }
670 
671 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
672 
673 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
674 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
675 {									       \
676 	struct device *dev = data;					       \
677 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
678 	u32 isr, isr_lo;						       \
679 	unsigned long flags;						       \
680 									       \
681 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
682 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
683 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
684 	/* Acknowledge interrupt */					       \
685 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
686 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
687 									       \
688 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
689 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
690 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
691 	}								       \
692 	else {								       \
693 		if (likely(isr & ch_done_mask)) {			       \
694 			/* mask further done interrupts. */		       \
695 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
696 			/* done_task will unmask done interrupts at exit */    \
697 			tasklet_schedule(&priv->done_task[tlet]);	       \
698 		}							       \
699 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
700 	}								       \
701 									       \
702 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
703 								IRQ_NONE;      \
704 }
705 
706 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
707 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
708 		       0)
709 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
710 		       1)
711 
712 /*
713  * hwrng
714  */
715 static int talitos_rng_data_present(struct hwrng *rng, int wait)
716 {
717 	struct device *dev = (struct device *)rng->priv;
718 	struct talitos_private *priv = dev_get_drvdata(dev);
719 	u32 ofl;
720 	int i;
721 
722 	for (i = 0; i < 20; i++) {
723 		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
724 		      TALITOS_RNGUSR_LO_OFL;
725 		if (ofl || !wait)
726 			break;
727 		udelay(10);
728 	}
729 
730 	return !!ofl;
731 }
732 
733 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
734 {
735 	struct device *dev = (struct device *)rng->priv;
736 	struct talitos_private *priv = dev_get_drvdata(dev);
737 
738 	/* rng fifo requires 64-bit accesses */
739 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
740 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
741 
742 	return sizeof(u32);
743 }
744 
745 static int talitos_rng_init(struct hwrng *rng)
746 {
747 	struct device *dev = (struct device *)rng->priv;
748 	struct talitos_private *priv = dev_get_drvdata(dev);
749 	unsigned int timeout = TALITOS_TIMEOUT;
750 
751 	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
752 	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
753 		 & TALITOS_RNGUSR_LO_RD)
754 	       && --timeout)
755 		cpu_relax();
756 	if (timeout == 0) {
757 		dev_err(dev, "failed to reset rng hw\n");
758 		return -ENODEV;
759 	}
760 
761 	/* start generating */
762 	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
763 
764 	return 0;
765 }
766 
767 static int talitos_register_rng(struct device *dev)
768 {
769 	struct talitos_private *priv = dev_get_drvdata(dev);
770 
771 	priv->rng.name		= dev_driver_string(dev),
772 	priv->rng.init		= talitos_rng_init,
773 	priv->rng.data_present	= talitos_rng_data_present,
774 	priv->rng.data_read	= talitos_rng_data_read,
775 	priv->rng.priv		= (unsigned long)dev;
776 
777 	return hwrng_register(&priv->rng);
778 }
779 
780 static void talitos_unregister_rng(struct device *dev)
781 {
782 	struct talitos_private *priv = dev_get_drvdata(dev);
783 
784 	hwrng_unregister(&priv->rng);
785 }
786 
787 /*
788  * crypto alg
789  */
790 #define TALITOS_CRA_PRIORITY		3000
791 #define TALITOS_MAX_KEY_SIZE		96
792 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
793 
794 struct talitos_ctx {
795 	struct device *dev;
796 	int ch;
797 	__be32 desc_hdr_template;
798 	u8 key[TALITOS_MAX_KEY_SIZE];
799 	u8 iv[TALITOS_MAX_IV_LENGTH];
800 	unsigned int keylen;
801 	unsigned int enckeylen;
802 	unsigned int authkeylen;
803 	unsigned int authsize;
804 };
805 
806 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
807 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
808 
809 struct talitos_ahash_req_ctx {
810 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
811 	unsigned int hw_context_size;
812 	u8 buf[HASH_MAX_BLOCK_SIZE];
813 	u8 bufnext[HASH_MAX_BLOCK_SIZE];
814 	unsigned int swinit;
815 	unsigned int first;
816 	unsigned int last;
817 	unsigned int to_hash_later;
818 	u64 nbuf;
819 	struct scatterlist bufsl[2];
820 	struct scatterlist *psrc;
821 };
822 
823 static int aead_setauthsize(struct crypto_aead *authenc,
824 			    unsigned int authsize)
825 {
826 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
827 
828 	ctx->authsize = authsize;
829 
830 	return 0;
831 }
832 
833 static int aead_setkey(struct crypto_aead *authenc,
834 		       const u8 *key, unsigned int keylen)
835 {
836 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
837 	struct crypto_authenc_keys keys;
838 
839 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
840 		goto badkey;
841 
842 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
843 		goto badkey;
844 
845 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
846 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
847 
848 	ctx->keylen = keys.authkeylen + keys.enckeylen;
849 	ctx->enckeylen = keys.enckeylen;
850 	ctx->authkeylen = keys.authkeylen;
851 
852 	return 0;
853 
854 badkey:
855 	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
856 	return -EINVAL;
857 }
858 
859 /*
860  * talitos_edesc - s/w-extended descriptor
861  * @assoc_nents: number of segments in associated data scatterlist
862  * @src_nents: number of segments in input scatterlist
863  * @dst_nents: number of segments in output scatterlist
864  * @assoc_chained: whether assoc is chained or not
865  * @src_chained: whether src is chained or not
866  * @dst_chained: whether dst is chained or not
867  * @iv_dma: dma address of iv for checking continuity and link table
868  * @dma_len: length of dma mapped link_tbl space
869  * @dma_link_tbl: bus physical address of link_tbl/buf
870  * @desc: h/w descriptor
871  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
872  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
873  *
874  * if decrypting (with authcheck), or either one of src_nents or dst_nents
875  * is greater than 1, an integrity check value is concatenated to the end
876  * of link_tbl data
877  */
878 struct talitos_edesc {
879 	int assoc_nents;
880 	int src_nents;
881 	int dst_nents;
882 	bool assoc_chained;
883 	bool src_chained;
884 	bool dst_chained;
885 	dma_addr_t iv_dma;
886 	int dma_len;
887 	dma_addr_t dma_link_tbl;
888 	struct talitos_desc desc;
889 	union {
890 		struct talitos_ptr link_tbl[0];
891 		u8 buf[0];
892 	};
893 };
894 
895 static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
896 			  unsigned int nents, enum dma_data_direction dir,
897 			  bool chained)
898 {
899 	if (unlikely(chained))
900 		while (sg) {
901 			dma_map_sg(dev, sg, 1, dir);
902 			sg = sg_next(sg);
903 		}
904 	else
905 		dma_map_sg(dev, sg, nents, dir);
906 	return nents;
907 }
908 
909 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
910 				   enum dma_data_direction dir)
911 {
912 	while (sg) {
913 		dma_unmap_sg(dev, sg, 1, dir);
914 		sg = sg_next(sg);
915 	}
916 }
917 
918 static void talitos_sg_unmap(struct device *dev,
919 			     struct talitos_edesc *edesc,
920 			     struct scatterlist *src,
921 			     struct scatterlist *dst)
922 {
923 	unsigned int src_nents = edesc->src_nents ? : 1;
924 	unsigned int dst_nents = edesc->dst_nents ? : 1;
925 
926 	if (src != dst) {
927 		if (edesc->src_chained)
928 			talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
929 		else
930 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
931 
932 		if (dst) {
933 			if (edesc->dst_chained)
934 				talitos_unmap_sg_chain(dev, dst,
935 						       DMA_FROM_DEVICE);
936 			else
937 				dma_unmap_sg(dev, dst, dst_nents,
938 					     DMA_FROM_DEVICE);
939 		}
940 	} else
941 		if (edesc->src_chained)
942 			talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
943 		else
944 			dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
945 }
946 
947 static void ipsec_esp_unmap(struct device *dev,
948 			    struct talitos_edesc *edesc,
949 			    struct aead_request *areq)
950 {
951 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
952 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
953 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
954 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
955 
956 	if (edesc->assoc_chained)
957 		talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
958 	else if (areq->assoclen)
959 		/* assoc_nents counts also for IV in non-contiguous cases */
960 		dma_unmap_sg(dev, areq->assoc,
961 			     edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
962 			     DMA_TO_DEVICE);
963 
964 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
965 
966 	if (edesc->dma_len)
967 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
968 				 DMA_BIDIRECTIONAL);
969 }
970 
971 /*
972  * ipsec_esp descriptor callbacks
973  */
974 static void ipsec_esp_encrypt_done(struct device *dev,
975 				   struct talitos_desc *desc, void *context,
976 				   int err)
977 {
978 	struct aead_request *areq = context;
979 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
980 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
981 	struct talitos_edesc *edesc;
982 	struct scatterlist *sg;
983 	void *icvdata;
984 
985 	edesc = container_of(desc, struct talitos_edesc, desc);
986 
987 	ipsec_esp_unmap(dev, edesc, areq);
988 
989 	/* copy the generated ICV to dst */
990 	if (edesc->dst_nents) {
991 		icvdata = &edesc->link_tbl[edesc->src_nents +
992 					   edesc->dst_nents + 2 +
993 					   edesc->assoc_nents];
994 		sg = sg_last(areq->dst, edesc->dst_nents);
995 		memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
996 		       icvdata, ctx->authsize);
997 	}
998 
999 	kfree(edesc);
1000 
1001 	aead_request_complete(areq, err);
1002 }
1003 
1004 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1005 					  struct talitos_desc *desc,
1006 					  void *context, int err)
1007 {
1008 	struct aead_request *req = context;
1009 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1010 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1011 	struct talitos_edesc *edesc;
1012 	struct scatterlist *sg;
1013 	void *icvdata;
1014 
1015 	edesc = container_of(desc, struct talitos_edesc, desc);
1016 
1017 	ipsec_esp_unmap(dev, edesc, req);
1018 
1019 	if (!err) {
1020 		/* auth check */
1021 		if (edesc->dma_len)
1022 			icvdata = &edesc->link_tbl[edesc->src_nents +
1023 						   edesc->dst_nents + 2 +
1024 						   edesc->assoc_nents];
1025 		else
1026 			icvdata = &edesc->link_tbl[0];
1027 
1028 		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1029 		err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
1030 			     ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
1031 	}
1032 
1033 	kfree(edesc);
1034 
1035 	aead_request_complete(req, err);
1036 }
1037 
1038 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1039 					  struct talitos_desc *desc,
1040 					  void *context, int err)
1041 {
1042 	struct aead_request *req = context;
1043 	struct talitos_edesc *edesc;
1044 
1045 	edesc = container_of(desc, struct talitos_edesc, desc);
1046 
1047 	ipsec_esp_unmap(dev, edesc, req);
1048 
1049 	/* check ICV auth status */
1050 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1051 		     DESC_HDR_LO_ICCR1_PASS))
1052 		err = -EBADMSG;
1053 
1054 	kfree(edesc);
1055 
1056 	aead_request_complete(req, err);
1057 }
1058 
1059 /*
1060  * convert scatterlist to SEC h/w link table format
1061  * stop at cryptlen bytes
1062  */
1063 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1064 			   int cryptlen, struct talitos_ptr *link_tbl_ptr)
1065 {
1066 	int n_sg = sg_count;
1067 
1068 	while (n_sg--) {
1069 		to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0);
1070 		link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
1071 		link_tbl_ptr->j_extent = 0;
1072 		link_tbl_ptr++;
1073 		cryptlen -= sg_dma_len(sg);
1074 		sg = sg_next(sg);
1075 	}
1076 
1077 	/* adjust (decrease) last one (or two) entry's len to cryptlen */
1078 	link_tbl_ptr--;
1079 	while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
1080 		/* Empty this entry, and move to previous one */
1081 		cryptlen += be16_to_cpu(link_tbl_ptr->len);
1082 		link_tbl_ptr->len = 0;
1083 		sg_count--;
1084 		link_tbl_ptr--;
1085 	}
1086 	be16_add_cpu(&link_tbl_ptr->len, cryptlen);
1087 
1088 	/* tag end of link table */
1089 	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1090 
1091 	return sg_count;
1092 }
1093 
1094 /*
1095  * fill in and submit ipsec_esp descriptor
1096  */
1097 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1098 		     u64 seq, void (*callback) (struct device *dev,
1099 						struct talitos_desc *desc,
1100 						void *context, int error))
1101 {
1102 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1103 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1104 	struct device *dev = ctx->dev;
1105 	struct talitos_desc *desc = &edesc->desc;
1106 	unsigned int cryptlen = areq->cryptlen;
1107 	unsigned int authsize = ctx->authsize;
1108 	unsigned int ivsize = crypto_aead_ivsize(aead);
1109 	int sg_count, ret;
1110 	int sg_link_tbl_len;
1111 
1112 	/* hmac key */
1113 	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1114 			       DMA_TO_DEVICE);
1115 
1116 	/* hmac data */
1117 	desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
1118 	if (edesc->assoc_nents) {
1119 		int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
1120 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1121 
1122 		to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1123 			       sizeof(struct talitos_ptr), 0);
1124 		desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1125 
1126 		/* assoc_nents - 1 entries for assoc, 1 for IV */
1127 		sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
1128 					  areq->assoclen, tbl_ptr);
1129 
1130 		/* add IV to link table */
1131 		tbl_ptr += sg_count - 1;
1132 		tbl_ptr->j_extent = 0;
1133 		tbl_ptr++;
1134 		to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0);
1135 		tbl_ptr->len = cpu_to_be16(ivsize);
1136 		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1137 
1138 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1139 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1140 	} else {
1141 		if (areq->assoclen)
1142 			to_talitos_ptr(&desc->ptr[1],
1143 				       sg_dma_address(areq->assoc), 0);
1144 		else
1145 			to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0);
1146 		desc->ptr[1].j_extent = 0;
1147 	}
1148 
1149 	/* cipher iv */
1150 	to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1151 	desc->ptr[2].len = cpu_to_be16(ivsize);
1152 	desc->ptr[2].j_extent = 0;
1153 	/* Sync needed for the aead_givencrypt case */
1154 	dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1155 
1156 	/* cipher key */
1157 	map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1158 			       (char *)&ctx->key + ctx->authkeylen,
1159 			       DMA_TO_DEVICE);
1160 
1161 	/*
1162 	 * cipher in
1163 	 * map and adjust cipher len to aead request cryptlen.
1164 	 * extent is bytes of HMAC postpended to ciphertext,
1165 	 * typically 12 for ipsec
1166 	 */
1167 	desc->ptr[4].len = cpu_to_be16(cryptlen);
1168 	desc->ptr[4].j_extent = authsize;
1169 
1170 	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1171 				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1172 							   : DMA_TO_DEVICE,
1173 				  edesc->src_chained);
1174 
1175 	if (sg_count == 1) {
1176 		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
1177 	} else {
1178 		sg_link_tbl_len = cryptlen;
1179 
1180 		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1181 			sg_link_tbl_len = cryptlen + authsize;
1182 
1183 		sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1184 					  &edesc->link_tbl[0]);
1185 		if (sg_count > 1) {
1186 			desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1187 			to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0);
1188 			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1189 						   edesc->dma_len,
1190 						   DMA_BIDIRECTIONAL);
1191 		} else {
1192 			/* Only one segment now, so no link tbl needed */
1193 			to_talitos_ptr(&desc->ptr[4],
1194 				       sg_dma_address(areq->src), 0);
1195 		}
1196 	}
1197 
1198 	/* cipher out */
1199 	desc->ptr[5].len = cpu_to_be16(cryptlen);
1200 	desc->ptr[5].j_extent = authsize;
1201 
1202 	if (areq->src != areq->dst)
1203 		sg_count = talitos_map_sg(dev, areq->dst,
1204 					  edesc->dst_nents ? : 1,
1205 					  DMA_FROM_DEVICE, edesc->dst_chained);
1206 
1207 	if (sg_count == 1) {
1208 		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
1209 	} else {
1210 		int tbl_off = edesc->src_nents + 1;
1211 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1212 
1213 		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1214 			       tbl_off * sizeof(struct talitos_ptr), 0);
1215 		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1216 					  tbl_ptr);
1217 
1218 		/* Add an entry to the link table for ICV data */
1219 		tbl_ptr += sg_count - 1;
1220 		tbl_ptr->j_extent = 0;
1221 		tbl_ptr++;
1222 		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1223 		tbl_ptr->len = cpu_to_be16(authsize);
1224 
1225 		/* icv data follows link tables */
1226 		to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1227 			       (tbl_off + edesc->dst_nents + 1 +
1228 				edesc->assoc_nents) *
1229 			       sizeof(struct talitos_ptr), 0);
1230 		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1231 		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1232 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1233 	}
1234 
1235 	/* iv out */
1236 	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1237 			       DMA_FROM_DEVICE);
1238 
1239 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1240 	if (ret != -EINPROGRESS) {
1241 		ipsec_esp_unmap(dev, edesc, areq);
1242 		kfree(edesc);
1243 	}
1244 	return ret;
1245 }
1246 
1247 /*
1248  * derive number of elements in scatterlist
1249  */
1250 static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
1251 {
1252 	struct scatterlist *sg = sg_list;
1253 	int sg_nents = 0;
1254 
1255 	*chained = false;
1256 	while (nbytes > 0) {
1257 		sg_nents++;
1258 		nbytes -= sg->length;
1259 		if (!sg_is_last(sg) && (sg + 1)->length == 0)
1260 			*chained = true;
1261 		sg = sg_next(sg);
1262 	}
1263 
1264 	return sg_nents;
1265 }
1266 
1267 /*
1268  * allocate and map the extended descriptor
1269  */
1270 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1271 						 struct scatterlist *assoc,
1272 						 struct scatterlist *src,
1273 						 struct scatterlist *dst,
1274 						 u8 *iv,
1275 						 unsigned int assoclen,
1276 						 unsigned int cryptlen,
1277 						 unsigned int authsize,
1278 						 unsigned int ivsize,
1279 						 int icv_stashing,
1280 						 u32 cryptoflags,
1281 						 bool encrypt)
1282 {
1283 	struct talitos_edesc *edesc;
1284 	int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
1285 	bool assoc_chained = false, src_chained = false, dst_chained = false;
1286 	dma_addr_t iv_dma = 0;
1287 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1288 		      GFP_ATOMIC;
1289 	struct talitos_private *priv = dev_get_drvdata(dev);
1290 	bool is_sec1 = has_ftr_sec1(priv);
1291 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1292 
1293 	if (cryptlen + authsize > max_len) {
1294 		dev_err(dev, "length exceeds h/w max limit\n");
1295 		return ERR_PTR(-EINVAL);
1296 	}
1297 
1298 	if (ivsize)
1299 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1300 
1301 	if (assoclen) {
1302 		/*
1303 		 * Currently it is assumed that iv is provided whenever assoc
1304 		 * is.
1305 		 */
1306 		BUG_ON(!iv);
1307 
1308 		assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
1309 		talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
1310 			       assoc_chained);
1311 		assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
1312 
1313 		if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
1314 			assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1315 	}
1316 
1317 	if (!dst || dst == src) {
1318 		src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1319 		src_nents = (src_nents == 1) ? 0 : src_nents;
1320 		dst_nents = dst ? src_nents : 0;
1321 	} else { /* dst && dst != src*/
1322 		src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1323 				     &src_chained);
1324 		src_nents = (src_nents == 1) ? 0 : src_nents;
1325 		dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1326 				     &dst_chained);
1327 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1328 	}
1329 
1330 	/*
1331 	 * allocate space for base edesc plus the link tables,
1332 	 * allowing for two separate entries for ICV and generated ICV (+ 2),
1333 	 * and the ICV data itself
1334 	 */
1335 	alloc_len = sizeof(struct talitos_edesc);
1336 	if (assoc_nents || src_nents || dst_nents) {
1337 		if (is_sec1)
1338 			dma_len = (src_nents ? cryptlen : 0) +
1339 				  (dst_nents ? cryptlen : 0);
1340 		else
1341 			dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
1342 				  sizeof(struct talitos_ptr) + authsize;
1343 		alloc_len += dma_len;
1344 	} else {
1345 		dma_len = 0;
1346 		alloc_len += icv_stashing ? authsize : 0;
1347 	}
1348 
1349 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1350 	if (!edesc) {
1351 		if (assoc_chained)
1352 			talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1353 		else if (assoclen)
1354 			dma_unmap_sg(dev, assoc,
1355 				     assoc_nents ? assoc_nents - 1 : 1,
1356 				     DMA_TO_DEVICE);
1357 
1358 		if (iv_dma)
1359 			dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1360 
1361 		dev_err(dev, "could not allocate edescriptor\n");
1362 		return ERR_PTR(-ENOMEM);
1363 	}
1364 
1365 	edesc->assoc_nents = assoc_nents;
1366 	edesc->src_nents = src_nents;
1367 	edesc->dst_nents = dst_nents;
1368 	edesc->assoc_chained = assoc_chained;
1369 	edesc->src_chained = src_chained;
1370 	edesc->dst_chained = dst_chained;
1371 	edesc->iv_dma = iv_dma;
1372 	edesc->dma_len = dma_len;
1373 	if (dma_len)
1374 		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1375 						     edesc->dma_len,
1376 						     DMA_BIDIRECTIONAL);
1377 
1378 	return edesc;
1379 }
1380 
1381 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1382 					      int icv_stashing, bool encrypt)
1383 {
1384 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1385 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1386 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1387 
1388 	return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1389 				   iv, areq->assoclen, areq->cryptlen,
1390 				   ctx->authsize, ivsize, icv_stashing,
1391 				   areq->base.flags, encrypt);
1392 }
1393 
1394 static int aead_encrypt(struct aead_request *req)
1395 {
1396 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1397 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1398 	struct talitos_edesc *edesc;
1399 
1400 	/* allocate extended descriptor */
1401 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1402 	if (IS_ERR(edesc))
1403 		return PTR_ERR(edesc);
1404 
1405 	/* set encrypt */
1406 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1407 
1408 	return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
1409 }
1410 
1411 static int aead_decrypt(struct aead_request *req)
1412 {
1413 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1414 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1415 	unsigned int authsize = ctx->authsize;
1416 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1417 	struct talitos_edesc *edesc;
1418 	struct scatterlist *sg;
1419 	void *icvdata;
1420 
1421 	req->cryptlen -= authsize;
1422 
1423 	/* allocate extended descriptor */
1424 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1425 	if (IS_ERR(edesc))
1426 		return PTR_ERR(edesc);
1427 
1428 	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1429 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1430 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1431 
1432 		/* decrypt and check the ICV */
1433 		edesc->desc.hdr = ctx->desc_hdr_template |
1434 				  DESC_HDR_DIR_INBOUND |
1435 				  DESC_HDR_MODE1_MDEU_CICV;
1436 
1437 		/* reset integrity check result bits */
1438 		edesc->desc.hdr_lo = 0;
1439 
1440 		return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
1441 	}
1442 
1443 	/* Have to check the ICV with software */
1444 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1445 
1446 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1447 	if (edesc->dma_len)
1448 		icvdata = &edesc->link_tbl[edesc->src_nents +
1449 					   edesc->dst_nents + 2 +
1450 					   edesc->assoc_nents];
1451 	else
1452 		icvdata = &edesc->link_tbl[0];
1453 
1454 	sg = sg_last(req->src, edesc->src_nents ? : 1);
1455 
1456 	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1457 	       ctx->authsize);
1458 
1459 	return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
1460 }
1461 
1462 static int aead_givencrypt(struct aead_givcrypt_request *req)
1463 {
1464 	struct aead_request *areq = &req->areq;
1465 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1466 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1467 	struct talitos_edesc *edesc;
1468 
1469 	/* allocate extended descriptor */
1470 	edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1471 	if (IS_ERR(edesc))
1472 		return PTR_ERR(edesc);
1473 
1474 	/* set encrypt */
1475 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1476 
1477 	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1478 	/* avoid consecutive packets going out with same IV */
1479 	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1480 
1481 	return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
1482 }
1483 
1484 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1485 			     const u8 *key, unsigned int keylen)
1486 {
1487 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1488 
1489 	memcpy(&ctx->key, key, keylen);
1490 	ctx->keylen = keylen;
1491 
1492 	return 0;
1493 }
1494 
1495 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1496 				 struct scatterlist *dst, unsigned int len,
1497 				 struct talitos_edesc *edesc)
1498 {
1499 	struct talitos_private *priv = dev_get_drvdata(dev);
1500 	bool is_sec1 = has_ftr_sec1(priv);
1501 
1502 	if (is_sec1) {
1503 		if (!edesc->src_nents) {
1504 			dma_unmap_sg(dev, src, 1,
1505 				     dst != src ? DMA_TO_DEVICE
1506 						: DMA_BIDIRECTIONAL);
1507 		}
1508 		if (dst && edesc->dst_nents) {
1509 			dma_sync_single_for_device(dev,
1510 						   edesc->dma_link_tbl + len,
1511 						   len, DMA_FROM_DEVICE);
1512 			sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1513 					    edesc->buf + len, len);
1514 		} else if (dst && dst != src) {
1515 			dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1516 		}
1517 	} else {
1518 		talitos_sg_unmap(dev, edesc, src, dst);
1519 	}
1520 }
1521 
1522 static void common_nonsnoop_unmap(struct device *dev,
1523 				  struct talitos_edesc *edesc,
1524 				  struct ablkcipher_request *areq)
1525 {
1526 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1527 
1528 	unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1529 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1530 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1531 
1532 	if (edesc->dma_len)
1533 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1534 				 DMA_BIDIRECTIONAL);
1535 }
1536 
1537 static void ablkcipher_done(struct device *dev,
1538 			    struct talitos_desc *desc, void *context,
1539 			    int err)
1540 {
1541 	struct ablkcipher_request *areq = context;
1542 	struct talitos_edesc *edesc;
1543 
1544 	edesc = container_of(desc, struct talitos_edesc, desc);
1545 
1546 	common_nonsnoop_unmap(dev, edesc, areq);
1547 
1548 	kfree(edesc);
1549 
1550 	areq->base.complete(&areq->base, err);
1551 }
1552 
1553 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1554 			  unsigned int len, struct talitos_edesc *edesc,
1555 			  enum dma_data_direction dir, struct talitos_ptr *ptr)
1556 {
1557 	int sg_count;
1558 	struct talitos_private *priv = dev_get_drvdata(dev);
1559 	bool is_sec1 = has_ftr_sec1(priv);
1560 
1561 	to_talitos_ptr_len(ptr, len, is_sec1);
1562 
1563 	if (is_sec1) {
1564 		sg_count = edesc->src_nents ? : 1;
1565 
1566 		if (sg_count == 1) {
1567 			dma_map_sg(dev, src, 1, dir);
1568 			to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1569 		} else {
1570 			sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1571 			to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1572 			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1573 						   len, DMA_TO_DEVICE);
1574 		}
1575 	} else {
1576 		to_talitos_ptr_extent_clear(ptr, is_sec1);
1577 
1578 		sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
1579 					  edesc->src_chained);
1580 
1581 		if (sg_count == 1) {
1582 			to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1583 		} else {
1584 			sg_count = sg_to_link_tbl(src, sg_count, len,
1585 						  &edesc->link_tbl[0]);
1586 			if (sg_count > 1) {
1587 				to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1588 				ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1589 				dma_sync_single_for_device(dev,
1590 							   edesc->dma_link_tbl,
1591 							   edesc->dma_len,
1592 							   DMA_BIDIRECTIONAL);
1593 			} else {
1594 				/* Only one segment now, so no link tbl needed*/
1595 				to_talitos_ptr(ptr, sg_dma_address(src),
1596 					       is_sec1);
1597 			}
1598 		}
1599 	}
1600 	return sg_count;
1601 }
1602 
1603 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1604 			    unsigned int len, struct talitos_edesc *edesc,
1605 			    enum dma_data_direction dir,
1606 			    struct talitos_ptr *ptr, int sg_count)
1607 {
1608 	struct talitos_private *priv = dev_get_drvdata(dev);
1609 	bool is_sec1 = has_ftr_sec1(priv);
1610 
1611 	if (dir != DMA_NONE)
1612 		sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
1613 					  dir, edesc->dst_chained);
1614 
1615 	to_talitos_ptr_len(ptr, len, is_sec1);
1616 
1617 	if (is_sec1) {
1618 		if (sg_count == 1) {
1619 			if (dir != DMA_NONE)
1620 				dma_map_sg(dev, dst, 1, dir);
1621 			to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1622 		} else {
1623 			to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1624 			dma_sync_single_for_device(dev,
1625 						   edesc->dma_link_tbl + len,
1626 						   len, DMA_FROM_DEVICE);
1627 		}
1628 	} else {
1629 		to_talitos_ptr_extent_clear(ptr, is_sec1);
1630 
1631 		if (sg_count == 1) {
1632 			to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1633 		} else {
1634 			struct talitos_ptr *link_tbl_ptr =
1635 				&edesc->link_tbl[edesc->src_nents + 1];
1636 
1637 			to_talitos_ptr(ptr, edesc->dma_link_tbl +
1638 					    (edesc->src_nents + 1) *
1639 					     sizeof(struct talitos_ptr), 0);
1640 			ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1641 			sg_count = sg_to_link_tbl(dst, sg_count, len,
1642 						  link_tbl_ptr);
1643 			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1644 						   edesc->dma_len,
1645 						   DMA_BIDIRECTIONAL);
1646 		}
1647 	}
1648 }
1649 
1650 static int common_nonsnoop(struct talitos_edesc *edesc,
1651 			   struct ablkcipher_request *areq,
1652 			   void (*callback) (struct device *dev,
1653 					     struct talitos_desc *desc,
1654 					     void *context, int error))
1655 {
1656 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1657 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1658 	struct device *dev = ctx->dev;
1659 	struct talitos_desc *desc = &edesc->desc;
1660 	unsigned int cryptlen = areq->nbytes;
1661 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1662 	int sg_count, ret;
1663 	struct talitos_private *priv = dev_get_drvdata(dev);
1664 	bool is_sec1 = has_ftr_sec1(priv);
1665 
1666 	/* first DWORD empty */
1667 	desc->ptr[0] = zero_entry;
1668 
1669 	/* cipher iv */
1670 	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1671 	to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1672 	to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1673 
1674 	/* cipher key */
1675 	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1676 			       (char *)&ctx->key, DMA_TO_DEVICE);
1677 
1678 	/*
1679 	 * cipher in
1680 	 */
1681 	sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1682 					 (areq->src == areq->dst) ?
1683 					  DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1684 					  &desc->ptr[3]);
1685 
1686 	/* cipher out */
1687 	map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1688 			       (areq->src == areq->dst) ? DMA_NONE
1689 							: DMA_FROM_DEVICE,
1690 			       &desc->ptr[4], sg_count);
1691 
1692 	/* iv out */
1693 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1694 			       DMA_FROM_DEVICE);
1695 
1696 	/* last DWORD empty */
1697 	desc->ptr[6] = zero_entry;
1698 
1699 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1700 	if (ret != -EINPROGRESS) {
1701 		common_nonsnoop_unmap(dev, edesc, areq);
1702 		kfree(edesc);
1703 	}
1704 	return ret;
1705 }
1706 
1707 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1708 						    areq, bool encrypt)
1709 {
1710 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1711 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1712 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1713 
1714 	return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1715 				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1716 				   areq->base.flags, encrypt);
1717 }
1718 
1719 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1720 {
1721 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1722 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1723 	struct talitos_edesc *edesc;
1724 
1725 	/* allocate extended descriptor */
1726 	edesc = ablkcipher_edesc_alloc(areq, true);
1727 	if (IS_ERR(edesc))
1728 		return PTR_ERR(edesc);
1729 
1730 	/* set encrypt */
1731 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1732 
1733 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1734 }
1735 
1736 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1737 {
1738 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1739 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1740 	struct talitos_edesc *edesc;
1741 
1742 	/* allocate extended descriptor */
1743 	edesc = ablkcipher_edesc_alloc(areq, false);
1744 	if (IS_ERR(edesc))
1745 		return PTR_ERR(edesc);
1746 
1747 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1748 
1749 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1750 }
1751 
1752 static void common_nonsnoop_hash_unmap(struct device *dev,
1753 				       struct talitos_edesc *edesc,
1754 				       struct ahash_request *areq)
1755 {
1756 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1757 	struct talitos_private *priv = dev_get_drvdata(dev);
1758 	bool is_sec1 = has_ftr_sec1(priv);
1759 
1760 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1761 
1762 	unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1763 
1764 	/* When using hashctx-in, must unmap it. */
1765 	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1766 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1767 					 DMA_TO_DEVICE);
1768 
1769 	if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1770 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1771 					 DMA_TO_DEVICE);
1772 
1773 	if (edesc->dma_len)
1774 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1775 				 DMA_BIDIRECTIONAL);
1776 
1777 }
1778 
1779 static void ahash_done(struct device *dev,
1780 		       struct talitos_desc *desc, void *context,
1781 		       int err)
1782 {
1783 	struct ahash_request *areq = context;
1784 	struct talitos_edesc *edesc =
1785 		 container_of(desc, struct talitos_edesc, desc);
1786 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1787 
1788 	if (!req_ctx->last && req_ctx->to_hash_later) {
1789 		/* Position any partial block for next update/final/finup */
1790 		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1791 		req_ctx->nbuf = req_ctx->to_hash_later;
1792 	}
1793 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1794 
1795 	kfree(edesc);
1796 
1797 	areq->base.complete(&areq->base, err);
1798 }
1799 
1800 /*
1801  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1802  * ourself and submit a padded block
1803  */
1804 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1805 			       struct talitos_edesc *edesc,
1806 			       struct talitos_ptr *ptr)
1807 {
1808 	static u8 padded_hash[64] = {
1809 		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1810 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1811 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1812 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1813 	};
1814 
1815 	pr_err_once("Bug in SEC1, padding ourself\n");
1816 	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1817 	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1818 			       (char *)padded_hash, DMA_TO_DEVICE);
1819 }
1820 
1821 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1822 				struct ahash_request *areq, unsigned int length,
1823 				void (*callback) (struct device *dev,
1824 						  struct talitos_desc *desc,
1825 						  void *context, int error))
1826 {
1827 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1828 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1829 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1830 	struct device *dev = ctx->dev;
1831 	struct talitos_desc *desc = &edesc->desc;
1832 	int ret;
1833 	struct talitos_private *priv = dev_get_drvdata(dev);
1834 	bool is_sec1 = has_ftr_sec1(priv);
1835 
1836 	/* first DWORD empty */
1837 	desc->ptr[0] = zero_entry;
1838 
1839 	/* hash context in */
1840 	if (!req_ctx->first || req_ctx->swinit) {
1841 		map_single_talitos_ptr(dev, &desc->ptr[1],
1842 				       req_ctx->hw_context_size,
1843 				       (char *)req_ctx->hw_context,
1844 				       DMA_TO_DEVICE);
1845 		req_ctx->swinit = 0;
1846 	} else {
1847 		desc->ptr[1] = zero_entry;
1848 		/* Indicate next op is not the first. */
1849 		req_ctx->first = 0;
1850 	}
1851 
1852 	/* HMAC key */
1853 	if (ctx->keylen)
1854 		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1855 				       (char *)&ctx->key, DMA_TO_DEVICE);
1856 	else
1857 		desc->ptr[2] = zero_entry;
1858 
1859 	/*
1860 	 * data in
1861 	 */
1862 	map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1863 			      DMA_TO_DEVICE, &desc->ptr[3]);
1864 
1865 	/* fifth DWORD empty */
1866 	desc->ptr[4] = zero_entry;
1867 
1868 	/* hash/HMAC out -or- hash context out */
1869 	if (req_ctx->last)
1870 		map_single_talitos_ptr(dev, &desc->ptr[5],
1871 				       crypto_ahash_digestsize(tfm),
1872 				       areq->result, DMA_FROM_DEVICE);
1873 	else
1874 		map_single_talitos_ptr(dev, &desc->ptr[5],
1875 				       req_ctx->hw_context_size,
1876 				       req_ctx->hw_context, DMA_FROM_DEVICE);
1877 
1878 	/* last DWORD empty */
1879 	desc->ptr[6] = zero_entry;
1880 
1881 	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1882 		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1883 
1884 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1885 	if (ret != -EINPROGRESS) {
1886 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1887 		kfree(edesc);
1888 	}
1889 	return ret;
1890 }
1891 
1892 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1893 					       unsigned int nbytes)
1894 {
1895 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1896 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1897 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1898 
1899 	return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1900 				   nbytes, 0, 0, 0, areq->base.flags, false);
1901 }
1902 
1903 static int ahash_init(struct ahash_request *areq)
1904 {
1905 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1906 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1907 
1908 	/* Initialize the context */
1909 	req_ctx->nbuf = 0;
1910 	req_ctx->first = 1; /* first indicates h/w must init its context */
1911 	req_ctx->swinit = 0; /* assume h/w init of context */
1912 	req_ctx->hw_context_size =
1913 		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1914 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1915 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1916 
1917 	return 0;
1918 }
1919 
1920 /*
1921  * on h/w without explicit sha224 support, we initialize h/w context
1922  * manually with sha224 constants, and tell it to run sha256.
1923  */
1924 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1925 {
1926 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1927 
1928 	ahash_init(areq);
1929 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1930 
1931 	req_ctx->hw_context[0] = SHA224_H0;
1932 	req_ctx->hw_context[1] = SHA224_H1;
1933 	req_ctx->hw_context[2] = SHA224_H2;
1934 	req_ctx->hw_context[3] = SHA224_H3;
1935 	req_ctx->hw_context[4] = SHA224_H4;
1936 	req_ctx->hw_context[5] = SHA224_H5;
1937 	req_ctx->hw_context[6] = SHA224_H6;
1938 	req_ctx->hw_context[7] = SHA224_H7;
1939 
1940 	/* init 64-bit count */
1941 	req_ctx->hw_context[8] = 0;
1942 	req_ctx->hw_context[9] = 0;
1943 
1944 	return 0;
1945 }
1946 
1947 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1948 {
1949 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1950 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1951 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1952 	struct talitos_edesc *edesc;
1953 	unsigned int blocksize =
1954 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1955 	unsigned int nbytes_to_hash;
1956 	unsigned int to_hash_later;
1957 	unsigned int nsg;
1958 	bool chained;
1959 
1960 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1961 		/* Buffer up to one whole block */
1962 		sg_copy_to_buffer(areq->src,
1963 				  sg_count(areq->src, nbytes, &chained),
1964 				  req_ctx->buf + req_ctx->nbuf, nbytes);
1965 		req_ctx->nbuf += nbytes;
1966 		return 0;
1967 	}
1968 
1969 	/* At least (blocksize + 1) bytes are available to hash */
1970 	nbytes_to_hash = nbytes + req_ctx->nbuf;
1971 	to_hash_later = nbytes_to_hash & (blocksize - 1);
1972 
1973 	if (req_ctx->last)
1974 		to_hash_later = 0;
1975 	else if (to_hash_later)
1976 		/* There is a partial block. Hash the full block(s) now */
1977 		nbytes_to_hash -= to_hash_later;
1978 	else {
1979 		/* Keep one block buffered */
1980 		nbytes_to_hash -= blocksize;
1981 		to_hash_later = blocksize;
1982 	}
1983 
1984 	/* Chain in any previously buffered data */
1985 	if (req_ctx->nbuf) {
1986 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1987 		sg_init_table(req_ctx->bufsl, nsg);
1988 		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1989 		if (nsg > 1)
1990 			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1991 		req_ctx->psrc = req_ctx->bufsl;
1992 	} else
1993 		req_ctx->psrc = areq->src;
1994 
1995 	if (to_hash_later) {
1996 		int nents = sg_count(areq->src, nbytes, &chained);
1997 		sg_pcopy_to_buffer(areq->src, nents,
1998 				      req_ctx->bufnext,
1999 				      to_hash_later,
2000 				      nbytes - to_hash_later);
2001 	}
2002 	req_ctx->to_hash_later = to_hash_later;
2003 
2004 	/* Allocate extended descriptor */
2005 	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2006 	if (IS_ERR(edesc))
2007 		return PTR_ERR(edesc);
2008 
2009 	edesc->desc.hdr = ctx->desc_hdr_template;
2010 
2011 	/* On last one, request SEC to pad; otherwise continue */
2012 	if (req_ctx->last)
2013 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2014 	else
2015 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2016 
2017 	/* request SEC to INIT hash. */
2018 	if (req_ctx->first && !req_ctx->swinit)
2019 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2020 
2021 	/* When the tfm context has a keylen, it's an HMAC.
2022 	 * A first or last (ie. not middle) descriptor must request HMAC.
2023 	 */
2024 	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2025 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2026 
2027 	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
2028 				    ahash_done);
2029 }
2030 
2031 static int ahash_update(struct ahash_request *areq)
2032 {
2033 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2034 
2035 	req_ctx->last = 0;
2036 
2037 	return ahash_process_req(areq, areq->nbytes);
2038 }
2039 
2040 static int ahash_final(struct ahash_request *areq)
2041 {
2042 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2043 
2044 	req_ctx->last = 1;
2045 
2046 	return ahash_process_req(areq, 0);
2047 }
2048 
2049 static int ahash_finup(struct ahash_request *areq)
2050 {
2051 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2052 
2053 	req_ctx->last = 1;
2054 
2055 	return ahash_process_req(areq, areq->nbytes);
2056 }
2057 
2058 static int ahash_digest(struct ahash_request *areq)
2059 {
2060 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2061 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2062 
2063 	ahash->init(areq);
2064 	req_ctx->last = 1;
2065 
2066 	return ahash_process_req(areq, areq->nbytes);
2067 }
2068 
2069 struct keyhash_result {
2070 	struct completion completion;
2071 	int err;
2072 };
2073 
2074 static void keyhash_complete(struct crypto_async_request *req, int err)
2075 {
2076 	struct keyhash_result *res = req->data;
2077 
2078 	if (err == -EINPROGRESS)
2079 		return;
2080 
2081 	res->err = err;
2082 	complete(&res->completion);
2083 }
2084 
2085 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2086 		   u8 *hash)
2087 {
2088 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2089 
2090 	struct scatterlist sg[1];
2091 	struct ahash_request *req;
2092 	struct keyhash_result hresult;
2093 	int ret;
2094 
2095 	init_completion(&hresult.completion);
2096 
2097 	req = ahash_request_alloc(tfm, GFP_KERNEL);
2098 	if (!req)
2099 		return -ENOMEM;
2100 
2101 	/* Keep tfm keylen == 0 during hash of the long key */
2102 	ctx->keylen = 0;
2103 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2104 				   keyhash_complete, &hresult);
2105 
2106 	sg_init_one(&sg[0], key, keylen);
2107 
2108 	ahash_request_set_crypt(req, sg, hash, keylen);
2109 	ret = crypto_ahash_digest(req);
2110 	switch (ret) {
2111 	case 0:
2112 		break;
2113 	case -EINPROGRESS:
2114 	case -EBUSY:
2115 		ret = wait_for_completion_interruptible(
2116 			&hresult.completion);
2117 		if (!ret)
2118 			ret = hresult.err;
2119 		break;
2120 	default:
2121 		break;
2122 	}
2123 	ahash_request_free(req);
2124 
2125 	return ret;
2126 }
2127 
2128 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2129 			unsigned int keylen)
2130 {
2131 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2132 	unsigned int blocksize =
2133 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2134 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2135 	unsigned int keysize = keylen;
2136 	u8 hash[SHA512_DIGEST_SIZE];
2137 	int ret;
2138 
2139 	if (keylen <= blocksize)
2140 		memcpy(ctx->key, key, keysize);
2141 	else {
2142 		/* Must get the hash of the long key */
2143 		ret = keyhash(tfm, key, keylen, hash);
2144 
2145 		if (ret) {
2146 			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2147 			return -EINVAL;
2148 		}
2149 
2150 		keysize = digestsize;
2151 		memcpy(ctx->key, hash, digestsize);
2152 	}
2153 
2154 	ctx->keylen = keysize;
2155 
2156 	return 0;
2157 }
2158 
2159 
2160 struct talitos_alg_template {
2161 	u32 type;
2162 	union {
2163 		struct crypto_alg crypto;
2164 		struct ahash_alg hash;
2165 	} alg;
2166 	__be32 desc_hdr_template;
2167 };
2168 
2169 static struct talitos_alg_template driver_algs[] = {
2170 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2171 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2172 		.alg.crypto = {
2173 			.cra_name = "authenc(hmac(sha1),cbc(aes))",
2174 			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
2175 			.cra_blocksize = AES_BLOCK_SIZE,
2176 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2177 			.cra_aead = {
2178 				.ivsize = AES_BLOCK_SIZE,
2179 				.maxauthsize = SHA1_DIGEST_SIZE,
2180 			}
2181 		},
2182 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2183 			             DESC_HDR_SEL0_AESU |
2184 		                     DESC_HDR_MODE0_AESU_CBC |
2185 		                     DESC_HDR_SEL1_MDEUA |
2186 		                     DESC_HDR_MODE1_MDEU_INIT |
2187 		                     DESC_HDR_MODE1_MDEU_PAD |
2188 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2189 	},
2190 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2191 		.alg.crypto = {
2192 			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
2193 			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
2194 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2195 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2196 			.cra_aead = {
2197 				.ivsize = DES3_EDE_BLOCK_SIZE,
2198 				.maxauthsize = SHA1_DIGEST_SIZE,
2199 			}
2200 		},
2201 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2202 			             DESC_HDR_SEL0_DEU |
2203 		                     DESC_HDR_MODE0_DEU_CBC |
2204 		                     DESC_HDR_MODE0_DEU_3DES |
2205 		                     DESC_HDR_SEL1_MDEUA |
2206 		                     DESC_HDR_MODE1_MDEU_INIT |
2207 		                     DESC_HDR_MODE1_MDEU_PAD |
2208 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2209 	},
2210 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2211 		.alg.crypto = {
2212 			.cra_name = "authenc(hmac(sha224),cbc(aes))",
2213 			.cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
2214 			.cra_blocksize = AES_BLOCK_SIZE,
2215 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2216 			.cra_aead = {
2217 				.ivsize = AES_BLOCK_SIZE,
2218 				.maxauthsize = SHA224_DIGEST_SIZE,
2219 			}
2220 		},
2221 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2222 				     DESC_HDR_SEL0_AESU |
2223 				     DESC_HDR_MODE0_AESU_CBC |
2224 				     DESC_HDR_SEL1_MDEUA |
2225 				     DESC_HDR_MODE1_MDEU_INIT |
2226 				     DESC_HDR_MODE1_MDEU_PAD |
2227 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2228 	},
2229 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2230 		.alg.crypto = {
2231 			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
2232 			.cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
2233 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2234 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2235 			.cra_aead = {
2236 				.ivsize = DES3_EDE_BLOCK_SIZE,
2237 				.maxauthsize = SHA224_DIGEST_SIZE,
2238 			}
2239 		},
2240 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2241 			             DESC_HDR_SEL0_DEU |
2242 		                     DESC_HDR_MODE0_DEU_CBC |
2243 		                     DESC_HDR_MODE0_DEU_3DES |
2244 		                     DESC_HDR_SEL1_MDEUA |
2245 		                     DESC_HDR_MODE1_MDEU_INIT |
2246 		                     DESC_HDR_MODE1_MDEU_PAD |
2247 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2248 	},
2249 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2250 		.alg.crypto = {
2251 			.cra_name = "authenc(hmac(sha256),cbc(aes))",
2252 			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2253 			.cra_blocksize = AES_BLOCK_SIZE,
2254 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2255 			.cra_aead = {
2256 				.ivsize = AES_BLOCK_SIZE,
2257 				.maxauthsize = SHA256_DIGEST_SIZE,
2258 			}
2259 		},
2260 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2261 			             DESC_HDR_SEL0_AESU |
2262 		                     DESC_HDR_MODE0_AESU_CBC |
2263 		                     DESC_HDR_SEL1_MDEUA |
2264 		                     DESC_HDR_MODE1_MDEU_INIT |
2265 		                     DESC_HDR_MODE1_MDEU_PAD |
2266 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2267 	},
2268 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2269 		.alg.crypto = {
2270 			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2271 			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2272 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2273 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2274 			.cra_aead = {
2275 				.ivsize = DES3_EDE_BLOCK_SIZE,
2276 				.maxauthsize = SHA256_DIGEST_SIZE,
2277 			}
2278 		},
2279 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2280 			             DESC_HDR_SEL0_DEU |
2281 		                     DESC_HDR_MODE0_DEU_CBC |
2282 		                     DESC_HDR_MODE0_DEU_3DES |
2283 		                     DESC_HDR_SEL1_MDEUA |
2284 		                     DESC_HDR_MODE1_MDEU_INIT |
2285 		                     DESC_HDR_MODE1_MDEU_PAD |
2286 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2287 	},
2288 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2289 		.alg.crypto = {
2290 			.cra_name = "authenc(hmac(sha384),cbc(aes))",
2291 			.cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2292 			.cra_blocksize = AES_BLOCK_SIZE,
2293 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2294 			.cra_aead = {
2295 				.ivsize = AES_BLOCK_SIZE,
2296 				.maxauthsize = SHA384_DIGEST_SIZE,
2297 			}
2298 		},
2299 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2300 			             DESC_HDR_SEL0_AESU |
2301 		                     DESC_HDR_MODE0_AESU_CBC |
2302 		                     DESC_HDR_SEL1_MDEUB |
2303 		                     DESC_HDR_MODE1_MDEU_INIT |
2304 		                     DESC_HDR_MODE1_MDEU_PAD |
2305 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2306 	},
2307 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2308 		.alg.crypto = {
2309 			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2310 			.cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2311 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2312 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2313 			.cra_aead = {
2314 				.ivsize = DES3_EDE_BLOCK_SIZE,
2315 				.maxauthsize = SHA384_DIGEST_SIZE,
2316 			}
2317 		},
2318 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2319 			             DESC_HDR_SEL0_DEU |
2320 		                     DESC_HDR_MODE0_DEU_CBC |
2321 		                     DESC_HDR_MODE0_DEU_3DES |
2322 		                     DESC_HDR_SEL1_MDEUB |
2323 		                     DESC_HDR_MODE1_MDEU_INIT |
2324 		                     DESC_HDR_MODE1_MDEU_PAD |
2325 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2326 	},
2327 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2328 		.alg.crypto = {
2329 			.cra_name = "authenc(hmac(sha512),cbc(aes))",
2330 			.cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2331 			.cra_blocksize = AES_BLOCK_SIZE,
2332 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2333 			.cra_aead = {
2334 				.ivsize = AES_BLOCK_SIZE,
2335 				.maxauthsize = SHA512_DIGEST_SIZE,
2336 			}
2337 		},
2338 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2339 			             DESC_HDR_SEL0_AESU |
2340 		                     DESC_HDR_MODE0_AESU_CBC |
2341 		                     DESC_HDR_SEL1_MDEUB |
2342 		                     DESC_HDR_MODE1_MDEU_INIT |
2343 		                     DESC_HDR_MODE1_MDEU_PAD |
2344 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2345 	},
2346 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2347 		.alg.crypto = {
2348 			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2349 			.cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2350 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2351 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2352 			.cra_aead = {
2353 				.ivsize = DES3_EDE_BLOCK_SIZE,
2354 				.maxauthsize = SHA512_DIGEST_SIZE,
2355 			}
2356 		},
2357 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2358 			             DESC_HDR_SEL0_DEU |
2359 		                     DESC_HDR_MODE0_DEU_CBC |
2360 		                     DESC_HDR_MODE0_DEU_3DES |
2361 		                     DESC_HDR_SEL1_MDEUB |
2362 		                     DESC_HDR_MODE1_MDEU_INIT |
2363 		                     DESC_HDR_MODE1_MDEU_PAD |
2364 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2365 	},
2366 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2367 		.alg.crypto = {
2368 			.cra_name = "authenc(hmac(md5),cbc(aes))",
2369 			.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2370 			.cra_blocksize = AES_BLOCK_SIZE,
2371 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2372 			.cra_aead = {
2373 				.ivsize = AES_BLOCK_SIZE,
2374 				.maxauthsize = MD5_DIGEST_SIZE,
2375 			}
2376 		},
2377 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2378 			             DESC_HDR_SEL0_AESU |
2379 		                     DESC_HDR_MODE0_AESU_CBC |
2380 		                     DESC_HDR_SEL1_MDEUA |
2381 		                     DESC_HDR_MODE1_MDEU_INIT |
2382 		                     DESC_HDR_MODE1_MDEU_PAD |
2383 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2384 	},
2385 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2386 		.alg.crypto = {
2387 			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2388 			.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2389 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2390 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2391 			.cra_aead = {
2392 				.ivsize = DES3_EDE_BLOCK_SIZE,
2393 				.maxauthsize = MD5_DIGEST_SIZE,
2394 			}
2395 		},
2396 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2397 			             DESC_HDR_SEL0_DEU |
2398 		                     DESC_HDR_MODE0_DEU_CBC |
2399 		                     DESC_HDR_MODE0_DEU_3DES |
2400 		                     DESC_HDR_SEL1_MDEUA |
2401 		                     DESC_HDR_MODE1_MDEU_INIT |
2402 		                     DESC_HDR_MODE1_MDEU_PAD |
2403 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2404 	},
2405 	/* ABLKCIPHER algorithms. */
2406 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2407 		.alg.crypto = {
2408 			.cra_name = "cbc(aes)",
2409 			.cra_driver_name = "cbc-aes-talitos",
2410 			.cra_blocksize = AES_BLOCK_SIZE,
2411 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2412                                      CRYPTO_ALG_ASYNC,
2413 			.cra_ablkcipher = {
2414 				.min_keysize = AES_MIN_KEY_SIZE,
2415 				.max_keysize = AES_MAX_KEY_SIZE,
2416 				.ivsize = AES_BLOCK_SIZE,
2417 			}
2418 		},
2419 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2420 				     DESC_HDR_SEL0_AESU |
2421 				     DESC_HDR_MODE0_AESU_CBC,
2422 	},
2423 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2424 		.alg.crypto = {
2425 			.cra_name = "cbc(des3_ede)",
2426 			.cra_driver_name = "cbc-3des-talitos",
2427 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2428 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2429                                      CRYPTO_ALG_ASYNC,
2430 			.cra_ablkcipher = {
2431 				.min_keysize = DES3_EDE_KEY_SIZE,
2432 				.max_keysize = DES3_EDE_KEY_SIZE,
2433 				.ivsize = DES3_EDE_BLOCK_SIZE,
2434 			}
2435 		},
2436 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2437 			             DESC_HDR_SEL0_DEU |
2438 		                     DESC_HDR_MODE0_DEU_CBC |
2439 		                     DESC_HDR_MODE0_DEU_3DES,
2440 	},
2441 	/* AHASH algorithms. */
2442 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2443 		.alg.hash = {
2444 			.halg.digestsize = MD5_DIGEST_SIZE,
2445 			.halg.base = {
2446 				.cra_name = "md5",
2447 				.cra_driver_name = "md5-talitos",
2448 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2449 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2450 					     CRYPTO_ALG_ASYNC,
2451 			}
2452 		},
2453 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2454 				     DESC_HDR_SEL0_MDEUA |
2455 				     DESC_HDR_MODE0_MDEU_MD5,
2456 	},
2457 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2458 		.alg.hash = {
2459 			.halg.digestsize = SHA1_DIGEST_SIZE,
2460 			.halg.base = {
2461 				.cra_name = "sha1",
2462 				.cra_driver_name = "sha1-talitos",
2463 				.cra_blocksize = SHA1_BLOCK_SIZE,
2464 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2465 					     CRYPTO_ALG_ASYNC,
2466 			}
2467 		},
2468 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2469 				     DESC_HDR_SEL0_MDEUA |
2470 				     DESC_HDR_MODE0_MDEU_SHA1,
2471 	},
2472 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2473 		.alg.hash = {
2474 			.halg.digestsize = SHA224_DIGEST_SIZE,
2475 			.halg.base = {
2476 				.cra_name = "sha224",
2477 				.cra_driver_name = "sha224-talitos",
2478 				.cra_blocksize = SHA224_BLOCK_SIZE,
2479 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2480 					     CRYPTO_ALG_ASYNC,
2481 			}
2482 		},
2483 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2484 				     DESC_HDR_SEL0_MDEUA |
2485 				     DESC_HDR_MODE0_MDEU_SHA224,
2486 	},
2487 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2488 		.alg.hash = {
2489 			.halg.digestsize = SHA256_DIGEST_SIZE,
2490 			.halg.base = {
2491 				.cra_name = "sha256",
2492 				.cra_driver_name = "sha256-talitos",
2493 				.cra_blocksize = SHA256_BLOCK_SIZE,
2494 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2495 					     CRYPTO_ALG_ASYNC,
2496 			}
2497 		},
2498 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2499 				     DESC_HDR_SEL0_MDEUA |
2500 				     DESC_HDR_MODE0_MDEU_SHA256,
2501 	},
2502 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2503 		.alg.hash = {
2504 			.halg.digestsize = SHA384_DIGEST_SIZE,
2505 			.halg.base = {
2506 				.cra_name = "sha384",
2507 				.cra_driver_name = "sha384-talitos",
2508 				.cra_blocksize = SHA384_BLOCK_SIZE,
2509 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2510 					     CRYPTO_ALG_ASYNC,
2511 			}
2512 		},
2513 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2514 				     DESC_HDR_SEL0_MDEUB |
2515 				     DESC_HDR_MODE0_MDEUB_SHA384,
2516 	},
2517 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2518 		.alg.hash = {
2519 			.halg.digestsize = SHA512_DIGEST_SIZE,
2520 			.halg.base = {
2521 				.cra_name = "sha512",
2522 				.cra_driver_name = "sha512-talitos",
2523 				.cra_blocksize = SHA512_BLOCK_SIZE,
2524 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2525 					     CRYPTO_ALG_ASYNC,
2526 			}
2527 		},
2528 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2529 				     DESC_HDR_SEL0_MDEUB |
2530 				     DESC_HDR_MODE0_MDEUB_SHA512,
2531 	},
2532 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2533 		.alg.hash = {
2534 			.halg.digestsize = MD5_DIGEST_SIZE,
2535 			.halg.base = {
2536 				.cra_name = "hmac(md5)",
2537 				.cra_driver_name = "hmac-md5-talitos",
2538 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2539 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2540 					     CRYPTO_ALG_ASYNC,
2541 			}
2542 		},
2543 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2544 				     DESC_HDR_SEL0_MDEUA |
2545 				     DESC_HDR_MODE0_MDEU_MD5,
2546 	},
2547 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2548 		.alg.hash = {
2549 			.halg.digestsize = SHA1_DIGEST_SIZE,
2550 			.halg.base = {
2551 				.cra_name = "hmac(sha1)",
2552 				.cra_driver_name = "hmac-sha1-talitos",
2553 				.cra_blocksize = SHA1_BLOCK_SIZE,
2554 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2555 					     CRYPTO_ALG_ASYNC,
2556 			}
2557 		},
2558 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2559 				     DESC_HDR_SEL0_MDEUA |
2560 				     DESC_HDR_MODE0_MDEU_SHA1,
2561 	},
2562 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2563 		.alg.hash = {
2564 			.halg.digestsize = SHA224_DIGEST_SIZE,
2565 			.halg.base = {
2566 				.cra_name = "hmac(sha224)",
2567 				.cra_driver_name = "hmac-sha224-talitos",
2568 				.cra_blocksize = SHA224_BLOCK_SIZE,
2569 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2570 					     CRYPTO_ALG_ASYNC,
2571 			}
2572 		},
2573 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2574 				     DESC_HDR_SEL0_MDEUA |
2575 				     DESC_HDR_MODE0_MDEU_SHA224,
2576 	},
2577 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2578 		.alg.hash = {
2579 			.halg.digestsize = SHA256_DIGEST_SIZE,
2580 			.halg.base = {
2581 				.cra_name = "hmac(sha256)",
2582 				.cra_driver_name = "hmac-sha256-talitos",
2583 				.cra_blocksize = SHA256_BLOCK_SIZE,
2584 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2585 					     CRYPTO_ALG_ASYNC,
2586 			}
2587 		},
2588 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2589 				     DESC_HDR_SEL0_MDEUA |
2590 				     DESC_HDR_MODE0_MDEU_SHA256,
2591 	},
2592 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2593 		.alg.hash = {
2594 			.halg.digestsize = SHA384_DIGEST_SIZE,
2595 			.halg.base = {
2596 				.cra_name = "hmac(sha384)",
2597 				.cra_driver_name = "hmac-sha384-talitos",
2598 				.cra_blocksize = SHA384_BLOCK_SIZE,
2599 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2600 					     CRYPTO_ALG_ASYNC,
2601 			}
2602 		},
2603 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2604 				     DESC_HDR_SEL0_MDEUB |
2605 				     DESC_HDR_MODE0_MDEUB_SHA384,
2606 	},
2607 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2608 		.alg.hash = {
2609 			.halg.digestsize = SHA512_DIGEST_SIZE,
2610 			.halg.base = {
2611 				.cra_name = "hmac(sha512)",
2612 				.cra_driver_name = "hmac-sha512-talitos",
2613 				.cra_blocksize = SHA512_BLOCK_SIZE,
2614 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2615 					     CRYPTO_ALG_ASYNC,
2616 			}
2617 		},
2618 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2619 				     DESC_HDR_SEL0_MDEUB |
2620 				     DESC_HDR_MODE0_MDEUB_SHA512,
2621 	}
2622 };
2623 
2624 struct talitos_crypto_alg {
2625 	struct list_head entry;
2626 	struct device *dev;
2627 	struct talitos_alg_template algt;
2628 };
2629 
2630 static int talitos_cra_init(struct crypto_tfm *tfm)
2631 {
2632 	struct crypto_alg *alg = tfm->__crt_alg;
2633 	struct talitos_crypto_alg *talitos_alg;
2634 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2635 	struct talitos_private *priv;
2636 
2637 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2638 		talitos_alg = container_of(__crypto_ahash_alg(alg),
2639 					   struct talitos_crypto_alg,
2640 					   algt.alg.hash);
2641 	else
2642 		talitos_alg = container_of(alg, struct talitos_crypto_alg,
2643 					   algt.alg.crypto);
2644 
2645 	/* update context with ptr to dev */
2646 	ctx->dev = talitos_alg->dev;
2647 
2648 	/* assign SEC channel to tfm in round-robin fashion */
2649 	priv = dev_get_drvdata(ctx->dev);
2650 	ctx->ch = atomic_inc_return(&priv->last_chan) &
2651 		  (priv->num_channels - 1);
2652 
2653 	/* copy descriptor header template value */
2654 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2655 
2656 	/* select done notification */
2657 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2658 
2659 	return 0;
2660 }
2661 
2662 static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2663 {
2664 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2665 
2666 	talitos_cra_init(tfm);
2667 
2668 	/* random first IV */
2669 	get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2670 
2671 	return 0;
2672 }
2673 
2674 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2675 {
2676 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2677 
2678 	talitos_cra_init(tfm);
2679 
2680 	ctx->keylen = 0;
2681 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2682 				 sizeof(struct talitos_ahash_req_ctx));
2683 
2684 	return 0;
2685 }
2686 
2687 /*
2688  * given the alg's descriptor header template, determine whether descriptor
2689  * type and primary/secondary execution units required match the hw
2690  * capabilities description provided in the device tree node.
2691  */
2692 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2693 {
2694 	struct talitos_private *priv = dev_get_drvdata(dev);
2695 	int ret;
2696 
2697 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2698 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2699 
2700 	if (SECONDARY_EU(desc_hdr_template))
2701 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2702 		              & priv->exec_units);
2703 
2704 	return ret;
2705 }
2706 
2707 static int talitos_remove(struct platform_device *ofdev)
2708 {
2709 	struct device *dev = &ofdev->dev;
2710 	struct talitos_private *priv = dev_get_drvdata(dev);
2711 	struct talitos_crypto_alg *t_alg, *n;
2712 	int i;
2713 
2714 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2715 		switch (t_alg->algt.type) {
2716 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2717 		case CRYPTO_ALG_TYPE_AEAD:
2718 			crypto_unregister_alg(&t_alg->algt.alg.crypto);
2719 			break;
2720 		case CRYPTO_ALG_TYPE_AHASH:
2721 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
2722 			break;
2723 		}
2724 		list_del(&t_alg->entry);
2725 		kfree(t_alg);
2726 	}
2727 
2728 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2729 		talitos_unregister_rng(dev);
2730 
2731 	for (i = 0; i < priv->num_channels; i++)
2732 		kfree(priv->chan[i].fifo);
2733 
2734 	kfree(priv->chan);
2735 
2736 	for (i = 0; i < 2; i++)
2737 		if (priv->irq[i]) {
2738 			free_irq(priv->irq[i], dev);
2739 			irq_dispose_mapping(priv->irq[i]);
2740 		}
2741 
2742 	tasklet_kill(&priv->done_task[0]);
2743 	if (priv->irq[1])
2744 		tasklet_kill(&priv->done_task[1]);
2745 
2746 	iounmap(priv->reg);
2747 
2748 	kfree(priv);
2749 
2750 	return 0;
2751 }
2752 
2753 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2754 						    struct talitos_alg_template
2755 						           *template)
2756 {
2757 	struct talitos_private *priv = dev_get_drvdata(dev);
2758 	struct talitos_crypto_alg *t_alg;
2759 	struct crypto_alg *alg;
2760 
2761 	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2762 	if (!t_alg)
2763 		return ERR_PTR(-ENOMEM);
2764 
2765 	t_alg->algt = *template;
2766 
2767 	switch (t_alg->algt.type) {
2768 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2769 		alg = &t_alg->algt.alg.crypto;
2770 		alg->cra_init = talitos_cra_init;
2771 		alg->cra_type = &crypto_ablkcipher_type;
2772 		alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2773 		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2774 		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2775 		alg->cra_ablkcipher.geniv = "eseqiv";
2776 		break;
2777 	case CRYPTO_ALG_TYPE_AEAD:
2778 		alg = &t_alg->algt.alg.crypto;
2779 		alg->cra_init = talitos_cra_init_aead;
2780 		alg->cra_type = &crypto_aead_type;
2781 		alg->cra_aead.setkey = aead_setkey;
2782 		alg->cra_aead.setauthsize = aead_setauthsize;
2783 		alg->cra_aead.encrypt = aead_encrypt;
2784 		alg->cra_aead.decrypt = aead_decrypt;
2785 		alg->cra_aead.givencrypt = aead_givencrypt;
2786 		alg->cra_aead.geniv = "<built-in>";
2787 		break;
2788 	case CRYPTO_ALG_TYPE_AHASH:
2789 		alg = &t_alg->algt.alg.hash.halg.base;
2790 		alg->cra_init = talitos_cra_init_ahash;
2791 		alg->cra_type = &crypto_ahash_type;
2792 		t_alg->algt.alg.hash.init = ahash_init;
2793 		t_alg->algt.alg.hash.update = ahash_update;
2794 		t_alg->algt.alg.hash.final = ahash_final;
2795 		t_alg->algt.alg.hash.finup = ahash_finup;
2796 		t_alg->algt.alg.hash.digest = ahash_digest;
2797 		t_alg->algt.alg.hash.setkey = ahash_setkey;
2798 
2799 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2800 		    !strncmp(alg->cra_name, "hmac", 4)) {
2801 			kfree(t_alg);
2802 			return ERR_PTR(-ENOTSUPP);
2803 		}
2804 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2805 		    (!strcmp(alg->cra_name, "sha224") ||
2806 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
2807 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2808 			t_alg->algt.desc_hdr_template =
2809 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2810 					DESC_HDR_SEL0_MDEUA |
2811 					DESC_HDR_MODE0_MDEU_SHA256;
2812 		}
2813 		break;
2814 	default:
2815 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2816 		return ERR_PTR(-EINVAL);
2817 	}
2818 
2819 	alg->cra_module = THIS_MODULE;
2820 	alg->cra_priority = TALITOS_CRA_PRIORITY;
2821 	alg->cra_alignmask = 0;
2822 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
2823 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2824 
2825 	t_alg->dev = dev;
2826 
2827 	return t_alg;
2828 }
2829 
2830 static int talitos_probe_irq(struct platform_device *ofdev)
2831 {
2832 	struct device *dev = &ofdev->dev;
2833 	struct device_node *np = ofdev->dev.of_node;
2834 	struct talitos_private *priv = dev_get_drvdata(dev);
2835 	int err;
2836 	bool is_sec1 = has_ftr_sec1(priv);
2837 
2838 	priv->irq[0] = irq_of_parse_and_map(np, 0);
2839 	if (!priv->irq[0]) {
2840 		dev_err(dev, "failed to map irq\n");
2841 		return -EINVAL;
2842 	}
2843 	if (is_sec1) {
2844 		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2845 				  dev_driver_string(dev), dev);
2846 		goto primary_out;
2847 	}
2848 
2849 	priv->irq[1] = irq_of_parse_and_map(np, 1);
2850 
2851 	/* get the primary irq line */
2852 	if (!priv->irq[1]) {
2853 		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2854 				  dev_driver_string(dev), dev);
2855 		goto primary_out;
2856 	}
2857 
2858 	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2859 			  dev_driver_string(dev), dev);
2860 	if (err)
2861 		goto primary_out;
2862 
2863 	/* get the secondary irq line */
2864 	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2865 			  dev_driver_string(dev), dev);
2866 	if (err) {
2867 		dev_err(dev, "failed to request secondary irq\n");
2868 		irq_dispose_mapping(priv->irq[1]);
2869 		priv->irq[1] = 0;
2870 	}
2871 
2872 	return err;
2873 
2874 primary_out:
2875 	if (err) {
2876 		dev_err(dev, "failed to request primary irq\n");
2877 		irq_dispose_mapping(priv->irq[0]);
2878 		priv->irq[0] = 0;
2879 	}
2880 
2881 	return err;
2882 }
2883 
2884 static int talitos_probe(struct platform_device *ofdev)
2885 {
2886 	struct device *dev = &ofdev->dev;
2887 	struct device_node *np = ofdev->dev.of_node;
2888 	struct talitos_private *priv;
2889 	const unsigned int *prop;
2890 	int i, err;
2891 	int stride;
2892 
2893 	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2894 	if (!priv)
2895 		return -ENOMEM;
2896 
2897 	INIT_LIST_HEAD(&priv->alg_list);
2898 
2899 	dev_set_drvdata(dev, priv);
2900 
2901 	priv->ofdev = ofdev;
2902 
2903 	spin_lock_init(&priv->reg_lock);
2904 
2905 	priv->reg = of_iomap(np, 0);
2906 	if (!priv->reg) {
2907 		dev_err(dev, "failed to of_iomap\n");
2908 		err = -ENOMEM;
2909 		goto err_out;
2910 	}
2911 
2912 	/* get SEC version capabilities from device tree */
2913 	prop = of_get_property(np, "fsl,num-channels", NULL);
2914 	if (prop)
2915 		priv->num_channels = *prop;
2916 
2917 	prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2918 	if (prop)
2919 		priv->chfifo_len = *prop;
2920 
2921 	prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2922 	if (prop)
2923 		priv->exec_units = *prop;
2924 
2925 	prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2926 	if (prop)
2927 		priv->desc_types = *prop;
2928 
2929 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2930 	    !priv->exec_units || !priv->desc_types) {
2931 		dev_err(dev, "invalid property data in device tree node\n");
2932 		err = -EINVAL;
2933 		goto err_out;
2934 	}
2935 
2936 	if (of_device_is_compatible(np, "fsl,sec3.0"))
2937 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2938 
2939 	if (of_device_is_compatible(np, "fsl,sec2.1"))
2940 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2941 				  TALITOS_FTR_SHA224_HWINIT |
2942 				  TALITOS_FTR_HMAC_OK;
2943 
2944 	if (of_device_is_compatible(np, "fsl,sec1.0"))
2945 		priv->features |= TALITOS_FTR_SEC1;
2946 
2947 	if (of_device_is_compatible(np, "fsl,sec1.2")) {
2948 		priv->reg_deu = priv->reg + TALITOS12_DEU;
2949 		priv->reg_aesu = priv->reg + TALITOS12_AESU;
2950 		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2951 		stride = TALITOS1_CH_STRIDE;
2952 	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2953 		priv->reg_deu = priv->reg + TALITOS10_DEU;
2954 		priv->reg_aesu = priv->reg + TALITOS10_AESU;
2955 		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2956 		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2957 		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2958 		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2959 		stride = TALITOS1_CH_STRIDE;
2960 	} else {
2961 		priv->reg_deu = priv->reg + TALITOS2_DEU;
2962 		priv->reg_aesu = priv->reg + TALITOS2_AESU;
2963 		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2964 		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2965 		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2966 		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2967 		priv->reg_keu = priv->reg + TALITOS2_KEU;
2968 		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2969 		stride = TALITOS2_CH_STRIDE;
2970 	}
2971 
2972 	err = talitos_probe_irq(ofdev);
2973 	if (err)
2974 		goto err_out;
2975 
2976 	if (of_device_is_compatible(np, "fsl,sec1.0")) {
2977 		tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2978 			     (unsigned long)dev);
2979 	} else {
2980 		if (!priv->irq[1]) {
2981 			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2982 				     (unsigned long)dev);
2983 		} else {
2984 			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2985 				     (unsigned long)dev);
2986 			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2987 				     (unsigned long)dev);
2988 		}
2989 	}
2990 
2991 	priv->chan = kzalloc(sizeof(struct talitos_channel) *
2992 			     priv->num_channels, GFP_KERNEL);
2993 	if (!priv->chan) {
2994 		dev_err(dev, "failed to allocate channel management space\n");
2995 		err = -ENOMEM;
2996 		goto err_out;
2997 	}
2998 
2999 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3000 
3001 	for (i = 0; i < priv->num_channels; i++) {
3002 		priv->chan[i].reg = priv->reg + stride * (i + 1);
3003 		if (!priv->irq[1] || !(i & 1))
3004 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3005 
3006 		spin_lock_init(&priv->chan[i].head_lock);
3007 		spin_lock_init(&priv->chan[i].tail_lock);
3008 
3009 		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3010 					     priv->fifo_len, GFP_KERNEL);
3011 		if (!priv->chan[i].fifo) {
3012 			dev_err(dev, "failed to allocate request fifo %d\n", i);
3013 			err = -ENOMEM;
3014 			goto err_out;
3015 		}
3016 
3017 		atomic_set(&priv->chan[i].submit_count,
3018 			   -(priv->chfifo_len - 1));
3019 	}
3020 
3021 	dma_set_mask(dev, DMA_BIT_MASK(36));
3022 
3023 	/* reset and initialize the h/w */
3024 	err = init_device(dev);
3025 	if (err) {
3026 		dev_err(dev, "failed to initialize device\n");
3027 		goto err_out;
3028 	}
3029 
3030 	/* register the RNG, if available */
3031 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3032 		err = talitos_register_rng(dev);
3033 		if (err) {
3034 			dev_err(dev, "failed to register hwrng: %d\n", err);
3035 			goto err_out;
3036 		} else
3037 			dev_info(dev, "hwrng\n");
3038 	}
3039 
3040 	/* register crypto algorithms the device supports */
3041 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3042 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3043 			struct talitos_crypto_alg *t_alg;
3044 			char *name = NULL;
3045 
3046 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3047 			if (IS_ERR(t_alg)) {
3048 				err = PTR_ERR(t_alg);
3049 				if (err == -ENOTSUPP)
3050 					continue;
3051 				goto err_out;
3052 			}
3053 
3054 			switch (t_alg->algt.type) {
3055 			case CRYPTO_ALG_TYPE_ABLKCIPHER:
3056 			case CRYPTO_ALG_TYPE_AEAD:
3057 				err = crypto_register_alg(
3058 						&t_alg->algt.alg.crypto);
3059 				name = t_alg->algt.alg.crypto.cra_driver_name;
3060 				break;
3061 			case CRYPTO_ALG_TYPE_AHASH:
3062 				err = crypto_register_ahash(
3063 						&t_alg->algt.alg.hash);
3064 				name =
3065 				 t_alg->algt.alg.hash.halg.base.cra_driver_name;
3066 				break;
3067 			}
3068 			if (err) {
3069 				dev_err(dev, "%s alg registration failed\n",
3070 					name);
3071 				kfree(t_alg);
3072 			} else
3073 				list_add_tail(&t_alg->entry, &priv->alg_list);
3074 		}
3075 	}
3076 	if (!list_empty(&priv->alg_list))
3077 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3078 			 (char *)of_get_property(np, "compatible", NULL));
3079 
3080 	return 0;
3081 
3082 err_out:
3083 	talitos_remove(ofdev);
3084 
3085 	return err;
3086 }
3087 
3088 static const struct of_device_id talitos_match[] = {
3089 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3090 	{
3091 		.compatible = "fsl,sec1.0",
3092 	},
3093 #endif
3094 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3095 	{
3096 		.compatible = "fsl,sec2.0",
3097 	},
3098 #endif
3099 	{},
3100 };
3101 MODULE_DEVICE_TABLE(of, talitos_match);
3102 
3103 static struct platform_driver talitos_driver = {
3104 	.driver = {
3105 		.name = "talitos",
3106 		.of_match_table = talitos_match,
3107 	},
3108 	.probe = talitos_probe,
3109 	.remove = talitos_remove,
3110 };
3111 
3112 module_platform_driver(talitos_driver);
3113 
3114 MODULE_LICENSE("GPL");
3115 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3116 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3117