xref: /linux/drivers/crypto/talitos.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * talitos - Freescale Integrated Security Engine (SEC) device driver
4  *
5  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6  *
7  * Scatterlist Crypto API glue code copied from files with the following:
8  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * Crypto algorithm registration code copied from hifn driver:
11  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12  * All rights reserved.
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/crypto.h>
21 #include <linux/hw_random.h>
22 #include <linux/of.h>
23 #include <linux/of_irq.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/io.h>
27 #include <linux/spinlock.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/slab.h>
30 
31 #include <crypto/algapi.h>
32 #include <crypto/aes.h>
33 #include <crypto/internal/des.h>
34 #include <crypto/sha1.h>
35 #include <crypto/sha2.h>
36 #include <crypto/md5.h>
37 #include <crypto/internal/aead.h>
38 #include <crypto/authenc.h>
39 #include <crypto/internal/skcipher.h>
40 #include <crypto/hash.h>
41 #include <crypto/internal/hash.h>
42 #include <crypto/scatterwalk.h>
43 
44 #include "talitos.h"
45 
to_talitos_ptr(struct talitos_ptr * ptr,dma_addr_t dma_addr,unsigned int len,bool is_sec1)46 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
47 			   unsigned int len, bool is_sec1)
48 {
49 	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
50 	if (is_sec1) {
51 		ptr->len1 = cpu_to_be16(len);
52 	} else {
53 		ptr->len = cpu_to_be16(len);
54 		ptr->eptr = upper_32_bits(dma_addr);
55 	}
56 }
57 
copy_talitos_ptr(struct talitos_ptr * dst_ptr,struct talitos_ptr * src_ptr,bool is_sec1)58 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
59 			     struct talitos_ptr *src_ptr, bool is_sec1)
60 {
61 	dst_ptr->ptr = src_ptr->ptr;
62 	if (is_sec1) {
63 		dst_ptr->len1 = src_ptr->len1;
64 	} else {
65 		dst_ptr->len = src_ptr->len;
66 		dst_ptr->eptr = src_ptr->eptr;
67 	}
68 }
69 
from_talitos_ptr_len(struct talitos_ptr * ptr,bool is_sec1)70 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
71 					   bool is_sec1)
72 {
73 	if (is_sec1)
74 		return be16_to_cpu(ptr->len1);
75 	else
76 		return be16_to_cpu(ptr->len);
77 }
78 
to_talitos_ptr_ext_set(struct talitos_ptr * ptr,u8 val,bool is_sec1)79 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
80 				   bool is_sec1)
81 {
82 	if (!is_sec1)
83 		ptr->j_extent = val;
84 }
85 
to_talitos_ptr_ext_or(struct talitos_ptr * ptr,u8 val,bool is_sec1)86 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
87 {
88 	if (!is_sec1)
89 		ptr->j_extent |= val;
90 }
91 
92 /*
93  * map virtual single (contiguous) pointer to h/w descriptor pointer
94  */
__map_single_talitos_ptr(struct device * dev,struct talitos_ptr * ptr,unsigned int len,void * data,enum dma_data_direction dir,unsigned long attrs)95 static void __map_single_talitos_ptr(struct device *dev,
96 				     struct talitos_ptr *ptr,
97 				     unsigned int len, void *data,
98 				     enum dma_data_direction dir,
99 				     unsigned long attrs)
100 {
101 	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
102 	struct talitos_private *priv = dev_get_drvdata(dev);
103 	bool is_sec1 = has_ftr_sec1(priv);
104 
105 	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
106 }
107 
map_single_talitos_ptr(struct device * dev,struct talitos_ptr * ptr,unsigned int len,void * data,enum dma_data_direction dir)108 static void map_single_talitos_ptr(struct device *dev,
109 				   struct talitos_ptr *ptr,
110 				   unsigned int len, void *data,
111 				   enum dma_data_direction dir)
112 {
113 	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
114 }
115 
map_single_talitos_ptr_nosync(struct device * dev,struct talitos_ptr * ptr,unsigned int len,void * data,enum dma_data_direction dir)116 static void map_single_talitos_ptr_nosync(struct device *dev,
117 					  struct talitos_ptr *ptr,
118 					  unsigned int len, void *data,
119 					  enum dma_data_direction dir)
120 {
121 	__map_single_talitos_ptr(dev, ptr, len, data, dir,
122 				 DMA_ATTR_SKIP_CPU_SYNC);
123 }
124 
125 /*
126  * unmap bus single (contiguous) h/w descriptor pointer
127  */
unmap_single_talitos_ptr(struct device * dev,struct talitos_ptr * ptr,enum dma_data_direction dir)128 static void unmap_single_talitos_ptr(struct device *dev,
129 				     struct talitos_ptr *ptr,
130 				     enum dma_data_direction dir)
131 {
132 	struct talitos_private *priv = dev_get_drvdata(dev);
133 	bool is_sec1 = has_ftr_sec1(priv);
134 
135 	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
136 			 from_talitos_ptr_len(ptr, is_sec1), dir);
137 }
138 
reset_channel(struct device * dev,int ch)139 static int reset_channel(struct device *dev, int ch)
140 {
141 	struct talitos_private *priv = dev_get_drvdata(dev);
142 	unsigned int timeout = TALITOS_TIMEOUT;
143 	bool is_sec1 = has_ftr_sec1(priv);
144 
145 	if (is_sec1) {
146 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
147 			  TALITOS1_CCCR_LO_RESET);
148 
149 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
150 			TALITOS1_CCCR_LO_RESET) && --timeout)
151 			cpu_relax();
152 	} else {
153 		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
154 			  TALITOS2_CCCR_RESET);
155 
156 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
157 			TALITOS2_CCCR_RESET) && --timeout)
158 			cpu_relax();
159 	}
160 
161 	if (timeout == 0) {
162 		dev_err(dev, "failed to reset channel %d\n", ch);
163 		return -EIO;
164 	}
165 
166 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
167 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
168 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
169 	/* enable chaining descriptors */
170 	if (is_sec1)
171 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
172 			  TALITOS_CCCR_LO_NE);
173 
174 	/* and ICCR writeback, if available */
175 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
176 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
177 		          TALITOS_CCCR_LO_IWSE);
178 
179 	return 0;
180 }
181 
reset_device(struct device * dev)182 static int reset_device(struct device *dev)
183 {
184 	struct talitos_private *priv = dev_get_drvdata(dev);
185 	unsigned int timeout = TALITOS_TIMEOUT;
186 	bool is_sec1 = has_ftr_sec1(priv);
187 	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
188 
189 	setbits32(priv->reg + TALITOS_MCR, mcr);
190 
191 	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
192 	       && --timeout)
193 		cpu_relax();
194 
195 	if (priv->irq[1]) {
196 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
197 		setbits32(priv->reg + TALITOS_MCR, mcr);
198 	}
199 
200 	if (timeout == 0) {
201 		dev_err(dev, "failed to reset device\n");
202 		return -EIO;
203 	}
204 
205 	return 0;
206 }
207 
208 /*
209  * Reset and initialize the device
210  */
init_device(struct device * dev)211 static int init_device(struct device *dev)
212 {
213 	struct talitos_private *priv = dev_get_drvdata(dev);
214 	int ch, err;
215 	bool is_sec1 = has_ftr_sec1(priv);
216 
217 	/*
218 	 * Master reset
219 	 * errata documentation: warning: certain SEC interrupts
220 	 * are not fully cleared by writing the MCR:SWR bit,
221 	 * set bit twice to completely reset
222 	 */
223 	err = reset_device(dev);
224 	if (err)
225 		return err;
226 
227 	err = reset_device(dev);
228 	if (err)
229 		return err;
230 
231 	/* reset channels */
232 	for (ch = 0; ch < priv->num_channels; ch++) {
233 		err = reset_channel(dev, ch);
234 		if (err)
235 			return err;
236 	}
237 
238 	/* enable channel done and error interrupts */
239 	if (is_sec1) {
240 		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
241 		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
242 		/* disable parity error check in DEU (erroneous? test vect.) */
243 		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
244 	} else {
245 		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
246 		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
247 	}
248 
249 	/* disable integrity check error interrupts (use writeback instead) */
250 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
251 		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
252 		          TALITOS_MDEUICR_LO_ICE);
253 
254 	return 0;
255 }
256 
257 /**
258  * talitos_submit - submits a descriptor to the device for processing
259  * @dev:	the SEC device to be used
260  * @ch:		the SEC device channel to be used
261  * @desc:	the descriptor to be processed by the device
262  * @callback:	whom to call when processing is complete
263  * @context:	a handle for use by caller (optional)
264  *
265  * desc must contain valid dma-mapped (bus physical) address pointers.
266  * callback must check err and feedback in descriptor header
267  * for device processing status.
268  */
talitos_submit(struct device * dev,int ch,struct talitos_desc * desc,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error),void * context)269 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
270 			  void (*callback)(struct device *dev,
271 					   struct talitos_desc *desc,
272 					   void *context, int error),
273 			  void *context)
274 {
275 	struct talitos_private *priv = dev_get_drvdata(dev);
276 	struct talitos_request *request;
277 	unsigned long flags;
278 	int head;
279 	bool is_sec1 = has_ftr_sec1(priv);
280 
281 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
282 
283 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
284 		/* h/w fifo is full */
285 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
286 		return -EAGAIN;
287 	}
288 
289 	head = priv->chan[ch].head;
290 	request = &priv->chan[ch].fifo[head];
291 
292 	/* map descriptor and save caller data */
293 	if (is_sec1) {
294 		desc->hdr1 = desc->hdr;
295 		request->dma_desc = dma_map_single(dev, &desc->hdr1,
296 						   TALITOS_DESC_SIZE,
297 						   DMA_BIDIRECTIONAL);
298 	} else {
299 		request->dma_desc = dma_map_single(dev, desc,
300 						   TALITOS_DESC_SIZE,
301 						   DMA_BIDIRECTIONAL);
302 	}
303 	request->callback = callback;
304 	request->context = context;
305 
306 	/* increment fifo head */
307 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
308 
309 	smp_wmb();
310 	request->desc = desc;
311 
312 	/* GO! */
313 	wmb();
314 	out_be32(priv->chan[ch].reg + TALITOS_FF,
315 		 upper_32_bits(request->dma_desc));
316 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
317 		 lower_32_bits(request->dma_desc));
318 
319 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
320 
321 	return -EINPROGRESS;
322 }
323 
get_request_hdr(struct talitos_request * request,bool is_sec1)324 static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
325 {
326 	struct talitos_edesc *edesc;
327 
328 	if (!is_sec1)
329 		return request->desc->hdr;
330 
331 	if (!request->desc->next_desc)
332 		return request->desc->hdr1;
333 
334 	edesc = container_of(request->desc, struct talitos_edesc, desc);
335 
336 	return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
337 }
338 
339 /*
340  * process what was done, notify callback of error if not
341  */
flush_channel(struct device * dev,int ch,int error,int reset_ch)342 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
343 {
344 	struct talitos_private *priv = dev_get_drvdata(dev);
345 	struct talitos_request *request, saved_req;
346 	unsigned long flags;
347 	int tail, status;
348 	bool is_sec1 = has_ftr_sec1(priv);
349 
350 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
351 
352 	tail = priv->chan[ch].tail;
353 	while (priv->chan[ch].fifo[tail].desc) {
354 		__be32 hdr;
355 
356 		request = &priv->chan[ch].fifo[tail];
357 
358 		/* descriptors with their done bits set don't get the error */
359 		rmb();
360 		hdr = get_request_hdr(request, is_sec1);
361 
362 		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
363 			status = 0;
364 		else
365 			if (!error)
366 				break;
367 			else
368 				status = error;
369 
370 		dma_unmap_single(dev, request->dma_desc,
371 				 TALITOS_DESC_SIZE,
372 				 DMA_BIDIRECTIONAL);
373 
374 		/* copy entries so we can call callback outside lock */
375 		saved_req.desc = request->desc;
376 		saved_req.callback = request->callback;
377 		saved_req.context = request->context;
378 
379 		/* release request entry in fifo */
380 		smp_wmb();
381 		request->desc = NULL;
382 
383 		/* increment fifo tail */
384 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
385 
386 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
387 
388 		atomic_dec(&priv->chan[ch].submit_count);
389 
390 		saved_req.callback(dev, saved_req.desc, saved_req.context,
391 				   status);
392 		/* channel may resume processing in single desc error case */
393 		if (error && !reset_ch && status == error)
394 			return;
395 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
396 		tail = priv->chan[ch].tail;
397 	}
398 
399 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
400 }
401 
402 /*
403  * process completed requests for channels that have done status
404  */
405 #define DEF_TALITOS1_DONE(name, ch_done_mask)				\
406 static void talitos1_done_##name(unsigned long data)			\
407 {									\
408 	struct device *dev = (struct device *)data;			\
409 	struct talitos_private *priv = dev_get_drvdata(dev);		\
410 	unsigned long flags;						\
411 									\
412 	if (ch_done_mask & 0x10000000)					\
413 		flush_channel(dev, 0, 0, 0);			\
414 	if (ch_done_mask & 0x40000000)					\
415 		flush_channel(dev, 1, 0, 0);			\
416 	if (ch_done_mask & 0x00010000)					\
417 		flush_channel(dev, 2, 0, 0);			\
418 	if (ch_done_mask & 0x00040000)					\
419 		flush_channel(dev, 3, 0, 0);			\
420 									\
421 	/* At this point, all completed channels have been processed */	\
422 	/* Unmask done interrupts for channels completed later on. */	\
423 	spin_lock_irqsave(&priv->reg_lock, flags);			\
424 	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
425 	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
426 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
427 }
428 
429 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
DEF_TALITOS1_DONE(ch0,TALITOS1_ISR_CH_0_DONE)430 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
431 
432 #define DEF_TALITOS2_DONE(name, ch_done_mask)				\
433 static void talitos2_done_##name(unsigned long data)			\
434 {									\
435 	struct device *dev = (struct device *)data;			\
436 	struct talitos_private *priv = dev_get_drvdata(dev);		\
437 	unsigned long flags;						\
438 									\
439 	if (ch_done_mask & 1)						\
440 		flush_channel(dev, 0, 0, 0);				\
441 	if (ch_done_mask & (1 << 2))					\
442 		flush_channel(dev, 1, 0, 0);				\
443 	if (ch_done_mask & (1 << 4))					\
444 		flush_channel(dev, 2, 0, 0);				\
445 	if (ch_done_mask & (1 << 6))					\
446 		flush_channel(dev, 3, 0, 0);				\
447 									\
448 	/* At this point, all completed channels have been processed */	\
449 	/* Unmask done interrupts for channels completed later on. */	\
450 	spin_lock_irqsave(&priv->reg_lock, flags);			\
451 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
452 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
453 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
454 }
455 
456 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
457 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
458 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
459 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
460 
461 /*
462  * locate current (offending) descriptor
463  */
464 static __be32 current_desc_hdr(struct device *dev, int ch)
465 {
466 	struct talitos_private *priv = dev_get_drvdata(dev);
467 	int tail, iter;
468 	dma_addr_t cur_desc;
469 
470 	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
471 	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
472 
473 	if (!cur_desc) {
474 		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
475 		return 0;
476 	}
477 
478 	tail = priv->chan[ch].tail;
479 
480 	iter = tail;
481 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
482 	       priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
483 		iter = (iter + 1) & (priv->fifo_len - 1);
484 		if (iter == tail) {
485 			dev_err(dev, "couldn't locate current descriptor\n");
486 			return 0;
487 		}
488 	}
489 
490 	if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
491 		struct talitos_edesc *edesc;
492 
493 		edesc = container_of(priv->chan[ch].fifo[iter].desc,
494 				     struct talitos_edesc, desc);
495 		return ((struct talitos_desc *)
496 			(edesc->buf + edesc->dma_len))->hdr;
497 	}
498 
499 	return priv->chan[ch].fifo[iter].desc->hdr;
500 }
501 
502 /*
503  * user diagnostics; report root cause of error based on execution unit status
504  */
report_eu_error(struct device * dev,int ch,__be32 desc_hdr)505 static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
506 {
507 	struct talitos_private *priv = dev_get_drvdata(dev);
508 	int i;
509 
510 	if (!desc_hdr)
511 		desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
512 
513 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
514 	case DESC_HDR_SEL0_AFEU:
515 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
516 			in_be32(priv->reg_afeu + TALITOS_EUISR),
517 			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
518 		break;
519 	case DESC_HDR_SEL0_DEU:
520 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
521 			in_be32(priv->reg_deu + TALITOS_EUISR),
522 			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
523 		break;
524 	case DESC_HDR_SEL0_MDEUA:
525 	case DESC_HDR_SEL0_MDEUB:
526 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
527 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
528 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
529 		break;
530 	case DESC_HDR_SEL0_RNG:
531 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
532 			in_be32(priv->reg_rngu + TALITOS_ISR),
533 			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
534 		break;
535 	case DESC_HDR_SEL0_PKEU:
536 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
537 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
538 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
539 		break;
540 	case DESC_HDR_SEL0_AESU:
541 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
542 			in_be32(priv->reg_aesu + TALITOS_EUISR),
543 			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
544 		break;
545 	case DESC_HDR_SEL0_CRCU:
546 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
547 			in_be32(priv->reg_crcu + TALITOS_EUISR),
548 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
549 		break;
550 	case DESC_HDR_SEL0_KEU:
551 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
552 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
553 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
554 		break;
555 	}
556 
557 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
558 	case DESC_HDR_SEL1_MDEUA:
559 	case DESC_HDR_SEL1_MDEUB:
560 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
561 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
562 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
563 		break;
564 	case DESC_HDR_SEL1_CRCU:
565 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
566 			in_be32(priv->reg_crcu + TALITOS_EUISR),
567 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
568 		break;
569 	}
570 
571 	for (i = 0; i < 8; i++)
572 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
573 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
574 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
575 }
576 
577 /*
578  * recover from error interrupts
579  */
talitos_error(struct device * dev,u32 isr,u32 isr_lo)580 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
581 {
582 	struct talitos_private *priv = dev_get_drvdata(dev);
583 	unsigned int timeout = TALITOS_TIMEOUT;
584 	int ch, error, reset_dev = 0;
585 	u32 v_lo;
586 	bool is_sec1 = has_ftr_sec1(priv);
587 	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
588 
589 	for (ch = 0; ch < priv->num_channels; ch++) {
590 		/* skip channels without errors */
591 		if (is_sec1) {
592 			/* bits 29, 31, 17, 19 */
593 			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
594 				continue;
595 		} else {
596 			if (!(isr & (1 << (ch * 2 + 1))))
597 				continue;
598 		}
599 
600 		error = -EINVAL;
601 
602 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
603 
604 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
605 			dev_err(dev, "double fetch fifo overflow error\n");
606 			error = -EAGAIN;
607 			reset_ch = 1;
608 		}
609 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
610 			/* h/w dropped descriptor */
611 			dev_err(dev, "single fetch fifo overflow error\n");
612 			error = -EAGAIN;
613 		}
614 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
615 			dev_err(dev, "master data transfer error\n");
616 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
617 			dev_err(dev, is_sec1 ? "pointer not complete error\n"
618 					     : "s/g data length zero error\n");
619 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
620 			dev_err(dev, is_sec1 ? "parity error\n"
621 					     : "fetch pointer zero error\n");
622 		if (v_lo & TALITOS_CCPSR_LO_IDH)
623 			dev_err(dev, "illegal descriptor header error\n");
624 		if (v_lo & TALITOS_CCPSR_LO_IEU)
625 			dev_err(dev, is_sec1 ? "static assignment error\n"
626 					     : "invalid exec unit error\n");
627 		if (v_lo & TALITOS_CCPSR_LO_EU)
628 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
629 		if (!is_sec1) {
630 			if (v_lo & TALITOS_CCPSR_LO_GB)
631 				dev_err(dev, "gather boundary error\n");
632 			if (v_lo & TALITOS_CCPSR_LO_GRL)
633 				dev_err(dev, "gather return/length error\n");
634 			if (v_lo & TALITOS_CCPSR_LO_SB)
635 				dev_err(dev, "scatter boundary error\n");
636 			if (v_lo & TALITOS_CCPSR_LO_SRL)
637 				dev_err(dev, "scatter return/length error\n");
638 		}
639 
640 		flush_channel(dev, ch, error, reset_ch);
641 
642 		if (reset_ch) {
643 			reset_channel(dev, ch);
644 		} else {
645 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
646 				  TALITOS2_CCCR_CONT);
647 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
648 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
649 			       TALITOS2_CCCR_CONT) && --timeout)
650 				cpu_relax();
651 			if (timeout == 0) {
652 				dev_err(dev, "failed to restart channel %d\n",
653 					ch);
654 				reset_dev = 1;
655 			}
656 		}
657 	}
658 	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
659 	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
660 		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
661 			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
662 				isr, isr_lo);
663 		else
664 			dev_err(dev, "done overflow, internal time out, or "
665 				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
666 
667 		/* purge request queues */
668 		for (ch = 0; ch < priv->num_channels; ch++)
669 			flush_channel(dev, ch, -EIO, 1);
670 
671 		/* reset and reinitialize the device */
672 		init_device(dev);
673 	}
674 }
675 
676 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
677 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
678 {									       \
679 	struct device *dev = data;					       \
680 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
681 	u32 isr, isr_lo;						       \
682 	unsigned long flags;						       \
683 									       \
684 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
685 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
686 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
687 	/* Acknowledge interrupt */					       \
688 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
689 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
690 									       \
691 	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
692 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
693 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
694 	}								       \
695 	else {								       \
696 		if (likely(isr & ch_done_mask)) {			       \
697 			/* mask further done interrupts. */		       \
698 			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
699 			/* done_task will unmask done interrupts at exit */    \
700 			tasklet_schedule(&priv->done_task[tlet]);	       \
701 		}							       \
702 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
703 	}								       \
704 									       \
705 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
706 								IRQ_NONE;      \
707 }
708 
709 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
710 
711 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
712 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
713 {									       \
714 	struct device *dev = data;					       \
715 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
716 	u32 isr, isr_lo;						       \
717 	unsigned long flags;						       \
718 									       \
719 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
720 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
721 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
722 	/* Acknowledge interrupt */					       \
723 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
724 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
725 									       \
726 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
727 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
728 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
729 	}								       \
730 	else {								       \
731 		if (likely(isr & ch_done_mask)) {			       \
732 			/* mask further done interrupts. */		       \
733 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
734 			/* done_task will unmask done interrupts at exit */    \
735 			tasklet_schedule(&priv->done_task[tlet]);	       \
736 		}							       \
737 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
738 	}								       \
739 									       \
740 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
741 								IRQ_NONE;      \
742 }
743 
744 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
745 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
746 		       0)
747 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
748 		       1)
749 
750 /*
751  * hwrng
752  */
talitos_rng_data_present(struct hwrng * rng,int wait)753 static int talitos_rng_data_present(struct hwrng *rng, int wait)
754 {
755 	struct device *dev = (struct device *)rng->priv;
756 	struct talitos_private *priv = dev_get_drvdata(dev);
757 	u32 ofl;
758 	int i;
759 
760 	for (i = 0; i < 20; i++) {
761 		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
762 		      TALITOS_RNGUSR_LO_OFL;
763 		if (ofl || !wait)
764 			break;
765 		udelay(10);
766 	}
767 
768 	return !!ofl;
769 }
770 
talitos_rng_data_read(struct hwrng * rng,u32 * data)771 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
772 {
773 	struct device *dev = (struct device *)rng->priv;
774 	struct talitos_private *priv = dev_get_drvdata(dev);
775 
776 	/* rng fifo requires 64-bit accesses */
777 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
778 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
779 
780 	return sizeof(u32);
781 }
782 
talitos_rng_init(struct hwrng * rng)783 static int talitos_rng_init(struct hwrng *rng)
784 {
785 	struct device *dev = (struct device *)rng->priv;
786 	struct talitos_private *priv = dev_get_drvdata(dev);
787 	unsigned int timeout = TALITOS_TIMEOUT;
788 
789 	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
790 	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
791 		 & TALITOS_RNGUSR_LO_RD)
792 	       && --timeout)
793 		cpu_relax();
794 	if (timeout == 0) {
795 		dev_err(dev, "failed to reset rng hw\n");
796 		return -ENODEV;
797 	}
798 
799 	/* start generating */
800 	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
801 
802 	return 0;
803 }
804 
talitos_register_rng(struct device * dev)805 static int talitos_register_rng(struct device *dev)
806 {
807 	struct talitos_private *priv = dev_get_drvdata(dev);
808 	int err;
809 
810 	priv->rng.name		= dev_driver_string(dev);
811 	priv->rng.init		= talitos_rng_init;
812 	priv->rng.data_present	= talitos_rng_data_present;
813 	priv->rng.data_read	= talitos_rng_data_read;
814 	priv->rng.priv		= (unsigned long)dev;
815 
816 	err = hwrng_register(&priv->rng);
817 	if (!err)
818 		priv->rng_registered = true;
819 
820 	return err;
821 }
822 
talitos_unregister_rng(struct device * dev)823 static void talitos_unregister_rng(struct device *dev)
824 {
825 	struct talitos_private *priv = dev_get_drvdata(dev);
826 
827 	if (!priv->rng_registered)
828 		return;
829 
830 	hwrng_unregister(&priv->rng);
831 	priv->rng_registered = false;
832 }
833 
834 /*
835  * crypto alg
836  */
837 #define TALITOS_CRA_PRIORITY		3000
838 /*
839  * Defines a priority for doing AEAD with descriptors type
840  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
841  */
842 #define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
843 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
844 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
845 #else
846 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
847 #endif
848 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
849 
850 struct talitos_ctx {
851 	struct device *dev;
852 	int ch;
853 	__be32 desc_hdr_template;
854 	u8 key[TALITOS_MAX_KEY_SIZE];
855 	u8 iv[TALITOS_MAX_IV_LENGTH];
856 	dma_addr_t dma_key;
857 	unsigned int keylen;
858 	unsigned int enckeylen;
859 	unsigned int authkeylen;
860 };
861 
862 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
863 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
864 
865 struct talitos_ahash_req_ctx {
866 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
867 	unsigned int hw_context_size;
868 	u8 buf[2][HASH_MAX_BLOCK_SIZE];
869 	int buf_idx;
870 	unsigned int swinit;
871 	unsigned int first;
872 	unsigned int last;
873 	unsigned int to_hash_later;
874 	unsigned int nbuf;
875 	struct scatterlist bufsl[2];
876 	struct scatterlist *psrc;
877 };
878 
879 struct talitos_export_state {
880 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
881 	u8 buf[HASH_MAX_BLOCK_SIZE];
882 	unsigned int swinit;
883 	unsigned int first;
884 	unsigned int last;
885 	unsigned int to_hash_later;
886 	unsigned int nbuf;
887 };
888 
aead_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)889 static int aead_setkey(struct crypto_aead *authenc,
890 		       const u8 *key, unsigned int keylen)
891 {
892 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
893 	struct device *dev = ctx->dev;
894 	struct crypto_authenc_keys keys;
895 
896 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
897 		goto badkey;
898 
899 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
900 		goto badkey;
901 
902 	if (ctx->keylen)
903 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
904 
905 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
906 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
907 
908 	ctx->keylen = keys.authkeylen + keys.enckeylen;
909 	ctx->enckeylen = keys.enckeylen;
910 	ctx->authkeylen = keys.authkeylen;
911 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
912 				      DMA_TO_DEVICE);
913 
914 	memzero_explicit(&keys, sizeof(keys));
915 	return 0;
916 
917 badkey:
918 	memzero_explicit(&keys, sizeof(keys));
919 	return -EINVAL;
920 }
921 
aead_des3_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)922 static int aead_des3_setkey(struct crypto_aead *authenc,
923 			    const u8 *key, unsigned int keylen)
924 {
925 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
926 	struct device *dev = ctx->dev;
927 	struct crypto_authenc_keys keys;
928 	int err;
929 
930 	err = crypto_authenc_extractkeys(&keys, key, keylen);
931 	if (unlikely(err))
932 		goto out;
933 
934 	err = -EINVAL;
935 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
936 		goto out;
937 
938 	err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
939 	if (err)
940 		goto out;
941 
942 	if (ctx->keylen)
943 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
944 
945 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
946 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
947 
948 	ctx->keylen = keys.authkeylen + keys.enckeylen;
949 	ctx->enckeylen = keys.enckeylen;
950 	ctx->authkeylen = keys.authkeylen;
951 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
952 				      DMA_TO_DEVICE);
953 
954 out:
955 	memzero_explicit(&keys, sizeof(keys));
956 	return err;
957 }
958 
talitos_sg_unmap(struct device * dev,struct talitos_edesc * edesc,struct scatterlist * src,struct scatterlist * dst,unsigned int len,unsigned int offset)959 static void talitos_sg_unmap(struct device *dev,
960 			     struct talitos_edesc *edesc,
961 			     struct scatterlist *src,
962 			     struct scatterlist *dst,
963 			     unsigned int len, unsigned int offset)
964 {
965 	struct talitos_private *priv = dev_get_drvdata(dev);
966 	bool is_sec1 = has_ftr_sec1(priv);
967 	unsigned int src_nents = edesc->src_nents ? : 1;
968 	unsigned int dst_nents = edesc->dst_nents ? : 1;
969 
970 	if (is_sec1 && dst && dst_nents > 1) {
971 		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
972 					   len, DMA_FROM_DEVICE);
973 		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
974 				     offset);
975 	}
976 	if (src != dst) {
977 		if (src_nents == 1 || !is_sec1)
978 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
979 
980 		if (dst && (dst_nents == 1 || !is_sec1))
981 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
982 	} else if (src_nents == 1 || !is_sec1) {
983 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
984 	}
985 }
986 
ipsec_esp_unmap(struct device * dev,struct talitos_edesc * edesc,struct aead_request * areq,bool encrypt)987 static void ipsec_esp_unmap(struct device *dev,
988 			    struct talitos_edesc *edesc,
989 			    struct aead_request *areq, bool encrypt)
990 {
991 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
992 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
993 	unsigned int ivsize = crypto_aead_ivsize(aead);
994 	unsigned int authsize = crypto_aead_authsize(aead);
995 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
996 	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
997 	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
998 
999 	if (is_ipsec_esp)
1000 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1001 					 DMA_FROM_DEVICE);
1002 	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1003 
1004 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1005 			 cryptlen + authsize, areq->assoclen);
1006 
1007 	if (edesc->dma_len)
1008 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1009 				 DMA_BIDIRECTIONAL);
1010 
1011 	if (!is_ipsec_esp) {
1012 		unsigned int dst_nents = edesc->dst_nents ? : 1;
1013 
1014 		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1015 				   areq->assoclen + cryptlen - ivsize);
1016 	}
1017 }
1018 
1019 /*
1020  * ipsec_esp descriptor callbacks
1021  */
ipsec_esp_encrypt_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1022 static void ipsec_esp_encrypt_done(struct device *dev,
1023 				   struct talitos_desc *desc, void *context,
1024 				   int err)
1025 {
1026 	struct aead_request *areq = context;
1027 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1028 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1029 	struct talitos_edesc *edesc;
1030 
1031 	edesc = container_of(desc, struct talitos_edesc, desc);
1032 
1033 	ipsec_esp_unmap(dev, edesc, areq, true);
1034 
1035 	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1036 
1037 	kfree(edesc);
1038 
1039 	aead_request_complete(areq, err);
1040 }
1041 
ipsec_esp_decrypt_swauth_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1042 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1043 					  struct talitos_desc *desc,
1044 					  void *context, int err)
1045 {
1046 	struct aead_request *req = context;
1047 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1048 	unsigned int authsize = crypto_aead_authsize(authenc);
1049 	struct talitos_edesc *edesc;
1050 	char *oicv, *icv;
1051 
1052 	edesc = container_of(desc, struct talitos_edesc, desc);
1053 
1054 	ipsec_esp_unmap(dev, edesc, req, false);
1055 
1056 	if (!err) {
1057 		/* auth check */
1058 		oicv = edesc->buf + edesc->dma_len;
1059 		icv = oicv - authsize;
1060 
1061 		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1062 	}
1063 
1064 	kfree(edesc);
1065 
1066 	aead_request_complete(req, err);
1067 }
1068 
ipsec_esp_decrypt_hwauth_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1069 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1070 					  struct talitos_desc *desc,
1071 					  void *context, int err)
1072 {
1073 	struct aead_request *req = context;
1074 	struct talitos_edesc *edesc;
1075 
1076 	edesc = container_of(desc, struct talitos_edesc, desc);
1077 
1078 	ipsec_esp_unmap(dev, edesc, req, false);
1079 
1080 	/* check ICV auth status */
1081 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1082 		     DESC_HDR_LO_ICCR1_PASS))
1083 		err = -EBADMSG;
1084 
1085 	kfree(edesc);
1086 
1087 	aead_request_complete(req, err);
1088 }
1089 
1090 /*
1091  * convert scatterlist to SEC h/w link table format
1092  * stop at cryptlen bytes
1093  */
sg_to_link_tbl_offset(struct scatterlist * sg,int sg_count,unsigned int offset,int datalen,int elen,struct talitos_ptr * link_tbl_ptr,int align)1094 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1095 				 unsigned int offset, int datalen, int elen,
1096 				 struct talitos_ptr *link_tbl_ptr, int align)
1097 {
1098 	int n_sg = elen ? sg_count + 1 : sg_count;
1099 	int count = 0;
1100 	int cryptlen = datalen + elen;
1101 	int padding = ALIGN(cryptlen, align) - cryptlen;
1102 
1103 	while (cryptlen && sg && n_sg--) {
1104 		unsigned int len = sg_dma_len(sg);
1105 
1106 		if (offset >= len) {
1107 			offset -= len;
1108 			goto next;
1109 		}
1110 
1111 		len -= offset;
1112 
1113 		if (len > cryptlen)
1114 			len = cryptlen;
1115 
1116 		if (datalen > 0 && len > datalen) {
1117 			to_talitos_ptr(link_tbl_ptr + count,
1118 				       sg_dma_address(sg) + offset, datalen, 0);
1119 			to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1120 			count++;
1121 			len -= datalen;
1122 			offset += datalen;
1123 		}
1124 		to_talitos_ptr(link_tbl_ptr + count,
1125 			       sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
1126 		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1127 		count++;
1128 		cryptlen -= len;
1129 		datalen -= len;
1130 		offset = 0;
1131 
1132 next:
1133 		sg = sg_next(sg);
1134 	}
1135 
1136 	/* tag end of link table */
1137 	if (count > 0)
1138 		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1139 				       DESC_PTR_LNKTBL_RET, 0);
1140 
1141 	return count;
1142 }
1143 
talitos_sg_map_ext(struct device * dev,struct scatterlist * src,unsigned int len,struct talitos_edesc * edesc,struct talitos_ptr * ptr,int sg_count,unsigned int offset,int tbl_off,int elen,bool force,int align)1144 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1145 			      unsigned int len, struct talitos_edesc *edesc,
1146 			      struct talitos_ptr *ptr, int sg_count,
1147 			      unsigned int offset, int tbl_off, int elen,
1148 			      bool force, int align)
1149 {
1150 	struct talitos_private *priv = dev_get_drvdata(dev);
1151 	bool is_sec1 = has_ftr_sec1(priv);
1152 	int aligned_len = ALIGN(len, align);
1153 
1154 	if (!src) {
1155 		to_talitos_ptr(ptr, 0, 0, is_sec1);
1156 		return 1;
1157 	}
1158 	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1159 	if (sg_count == 1 && !force) {
1160 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
1161 		return sg_count;
1162 	}
1163 	if (is_sec1) {
1164 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
1165 		return sg_count;
1166 	}
1167 	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1168 					 &edesc->link_tbl[tbl_off], align);
1169 	if (sg_count == 1 && !force) {
1170 		/* Only one segment now, so no link tbl needed*/
1171 		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1172 		return sg_count;
1173 	}
1174 	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1175 			    tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
1176 	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1177 
1178 	return sg_count;
1179 }
1180 
talitos_sg_map(struct device * dev,struct scatterlist * src,unsigned int len,struct talitos_edesc * edesc,struct talitos_ptr * ptr,int sg_count,unsigned int offset,int tbl_off)1181 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1182 			  unsigned int len, struct talitos_edesc *edesc,
1183 			  struct talitos_ptr *ptr, int sg_count,
1184 			  unsigned int offset, int tbl_off)
1185 {
1186 	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1187 				  tbl_off, 0, false, 1);
1188 }
1189 
1190 /*
1191  * fill in and submit ipsec_esp descriptor
1192  */
ipsec_esp(struct talitos_edesc * edesc,struct aead_request * areq,bool encrypt,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error))1193 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1194 		     bool encrypt,
1195 		     void (*callback)(struct device *dev,
1196 				      struct talitos_desc *desc,
1197 				      void *context, int error))
1198 {
1199 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1200 	unsigned int authsize = crypto_aead_authsize(aead);
1201 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1202 	struct device *dev = ctx->dev;
1203 	struct talitos_desc *desc = &edesc->desc;
1204 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1205 	unsigned int ivsize = crypto_aead_ivsize(aead);
1206 	int tbl_off = 0;
1207 	int sg_count, ret;
1208 	int elen = 0;
1209 	bool sync_needed = false;
1210 	struct talitos_private *priv = dev_get_drvdata(dev);
1211 	bool is_sec1 = has_ftr_sec1(priv);
1212 	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1213 	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1214 	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1215 	dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1216 
1217 	/* hmac key */
1218 	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1219 
1220 	sg_count = edesc->src_nents ?: 1;
1221 	if (is_sec1 && sg_count > 1)
1222 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1223 				  areq->assoclen + cryptlen);
1224 	else
1225 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1226 				      (areq->src == areq->dst) ?
1227 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1228 
1229 	/* hmac data */
1230 	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1231 			     &desc->ptr[1], sg_count, 0, tbl_off);
1232 
1233 	if (ret > 1) {
1234 		tbl_off += ret;
1235 		sync_needed = true;
1236 	}
1237 
1238 	/* cipher iv */
1239 	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1240 
1241 	/* cipher key */
1242 	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1243 		       ctx->enckeylen, is_sec1);
1244 
1245 	/*
1246 	 * cipher in
1247 	 * map and adjust cipher len to aead request cryptlen.
1248 	 * extent is bytes of HMAC postpended to ciphertext,
1249 	 * typically 12 for ipsec
1250 	 */
1251 	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1252 		elen = authsize;
1253 
1254 	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1255 				 sg_count, areq->assoclen, tbl_off, elen,
1256 				 false, 1);
1257 
1258 	if (ret > 1) {
1259 		tbl_off += ret;
1260 		sync_needed = true;
1261 	}
1262 
1263 	/* cipher out */
1264 	if (areq->src != areq->dst) {
1265 		sg_count = edesc->dst_nents ? : 1;
1266 		if (!is_sec1 || sg_count == 1)
1267 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1268 	}
1269 
1270 	if (is_ipsec_esp && encrypt)
1271 		elen = authsize;
1272 	else
1273 		elen = 0;
1274 	ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1275 				 sg_count, areq->assoclen, tbl_off, elen,
1276 				 is_ipsec_esp && !encrypt, 1);
1277 	tbl_off += ret;
1278 
1279 	if (!encrypt && is_ipsec_esp) {
1280 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1281 
1282 		/* Add an entry to the link table for ICV data */
1283 		to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1284 		to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1285 
1286 		/* icv data follows link tables */
1287 		to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1288 		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1289 		sync_needed = true;
1290 	} else if (!encrypt) {
1291 		to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1292 		sync_needed = true;
1293 	} else if (!is_ipsec_esp) {
1294 		talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1295 			       sg_count, areq->assoclen + cryptlen, tbl_off);
1296 	}
1297 
1298 	/* iv out */
1299 	if (is_ipsec_esp)
1300 		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1301 				       DMA_FROM_DEVICE);
1302 
1303 	if (sync_needed)
1304 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1305 					   edesc->dma_len,
1306 					   DMA_BIDIRECTIONAL);
1307 
1308 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1309 	if (ret != -EINPROGRESS) {
1310 		ipsec_esp_unmap(dev, edesc, areq, encrypt);
1311 		kfree(edesc);
1312 	}
1313 	return ret;
1314 }
1315 
1316 /*
1317  * allocate and map the extended descriptor
1318  */
talitos_edesc_alloc(struct device * dev,struct scatterlist * src,struct scatterlist * dst,u8 * iv,unsigned int assoclen,unsigned int cryptlen,unsigned int authsize,unsigned int ivsize,int icv_stashing,u32 cryptoflags,bool encrypt)1319 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1320 						 struct scatterlist *src,
1321 						 struct scatterlist *dst,
1322 						 u8 *iv,
1323 						 unsigned int assoclen,
1324 						 unsigned int cryptlen,
1325 						 unsigned int authsize,
1326 						 unsigned int ivsize,
1327 						 int icv_stashing,
1328 						 u32 cryptoflags,
1329 						 bool encrypt)
1330 {
1331 	struct talitos_edesc *edesc;
1332 	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1333 	dma_addr_t iv_dma = 0;
1334 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1335 		      GFP_ATOMIC;
1336 	struct talitos_private *priv = dev_get_drvdata(dev);
1337 	bool is_sec1 = has_ftr_sec1(priv);
1338 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1339 
1340 	if (cryptlen + authsize > max_len) {
1341 		dev_err(dev, "length exceeds h/w max limit\n");
1342 		return ERR_PTR(-EINVAL);
1343 	}
1344 
1345 	if (!dst || dst == src) {
1346 		src_len = assoclen + cryptlen + authsize;
1347 		src_nents = sg_nents_for_len(src, src_len);
1348 		if (src_nents < 0) {
1349 			dev_err(dev, "Invalid number of src SG.\n");
1350 			return ERR_PTR(-EINVAL);
1351 		}
1352 		src_nents = (src_nents == 1) ? 0 : src_nents;
1353 		dst_nents = dst ? src_nents : 0;
1354 		dst_len = 0;
1355 	} else { /* dst && dst != src*/
1356 		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1357 		src_nents = sg_nents_for_len(src, src_len);
1358 		if (src_nents < 0) {
1359 			dev_err(dev, "Invalid number of src SG.\n");
1360 			return ERR_PTR(-EINVAL);
1361 		}
1362 		src_nents = (src_nents == 1) ? 0 : src_nents;
1363 		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1364 		dst_nents = sg_nents_for_len(dst, dst_len);
1365 		if (dst_nents < 0) {
1366 			dev_err(dev, "Invalid number of dst SG.\n");
1367 			return ERR_PTR(-EINVAL);
1368 		}
1369 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1370 	}
1371 
1372 	/*
1373 	 * allocate space for base edesc plus the link tables,
1374 	 * allowing for two separate entries for AD and generated ICV (+ 2),
1375 	 * and space for two sets of ICVs (stashed and generated)
1376 	 */
1377 	alloc_len = sizeof(struct talitos_edesc);
1378 	if (src_nents || dst_nents || !encrypt) {
1379 		if (is_sec1)
1380 			dma_len = (src_nents ? src_len : 0) +
1381 				  (dst_nents ? dst_len : 0) + authsize;
1382 		else
1383 			dma_len = (src_nents + dst_nents + 2) *
1384 				  sizeof(struct talitos_ptr) + authsize;
1385 		alloc_len += dma_len;
1386 	} else {
1387 		dma_len = 0;
1388 	}
1389 	alloc_len += icv_stashing ? authsize : 0;
1390 
1391 	/* if its a ahash, add space for a second desc next to the first one */
1392 	if (is_sec1 && !dst)
1393 		alloc_len += sizeof(struct talitos_desc);
1394 	alloc_len += ivsize;
1395 
1396 	edesc = kmalloc(ALIGN(alloc_len, dma_get_cache_alignment()), flags);
1397 	if (!edesc)
1398 		return ERR_PTR(-ENOMEM);
1399 	if (ivsize) {
1400 		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1401 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1402 	}
1403 	memset(&edesc->desc, 0, sizeof(edesc->desc));
1404 
1405 	edesc->src_nents = src_nents;
1406 	edesc->dst_nents = dst_nents;
1407 	edesc->iv_dma = iv_dma;
1408 	edesc->dma_len = dma_len;
1409 	if (dma_len)
1410 		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1411 						     edesc->dma_len,
1412 						     DMA_BIDIRECTIONAL);
1413 
1414 	return edesc;
1415 }
1416 
aead_edesc_alloc(struct aead_request * areq,u8 * iv,int icv_stashing,bool encrypt)1417 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1418 					      int icv_stashing, bool encrypt)
1419 {
1420 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1421 	unsigned int authsize = crypto_aead_authsize(authenc);
1422 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1423 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1424 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1425 
1426 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1427 				   iv, areq->assoclen, cryptlen,
1428 				   authsize, ivsize, icv_stashing,
1429 				   areq->base.flags, encrypt);
1430 }
1431 
aead_encrypt(struct aead_request * req)1432 static int aead_encrypt(struct aead_request *req)
1433 {
1434 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1435 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1436 	struct talitos_edesc *edesc;
1437 
1438 	/* allocate extended descriptor */
1439 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1440 	if (IS_ERR(edesc))
1441 		return PTR_ERR(edesc);
1442 
1443 	/* set encrypt */
1444 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1445 
1446 	return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1447 }
1448 
aead_decrypt(struct aead_request * req)1449 static int aead_decrypt(struct aead_request *req)
1450 {
1451 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1452 	unsigned int authsize = crypto_aead_authsize(authenc);
1453 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1454 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1455 	struct talitos_edesc *edesc;
1456 	void *icvdata;
1457 
1458 	/* allocate extended descriptor */
1459 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1460 	if (IS_ERR(edesc))
1461 		return PTR_ERR(edesc);
1462 
1463 	if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1464 	    (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1465 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1466 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1467 
1468 		/* decrypt and check the ICV */
1469 		edesc->desc.hdr = ctx->desc_hdr_template |
1470 				  DESC_HDR_DIR_INBOUND |
1471 				  DESC_HDR_MODE1_MDEU_CICV;
1472 
1473 		/* reset integrity check result bits */
1474 
1475 		return ipsec_esp(edesc, req, false,
1476 				 ipsec_esp_decrypt_hwauth_done);
1477 	}
1478 
1479 	/* Have to check the ICV with software */
1480 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1481 
1482 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1483 	icvdata = edesc->buf + edesc->dma_len;
1484 
1485 	sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1486 			   req->assoclen + req->cryptlen - authsize);
1487 
1488 	return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1489 }
1490 
skcipher_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1491 static int skcipher_setkey(struct crypto_skcipher *cipher,
1492 			     const u8 *key, unsigned int keylen)
1493 {
1494 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1495 	struct device *dev = ctx->dev;
1496 
1497 	if (ctx->keylen)
1498 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1499 
1500 	memcpy(&ctx->key, key, keylen);
1501 	ctx->keylen = keylen;
1502 
1503 	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1504 
1505 	return 0;
1506 }
1507 
skcipher_des_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1508 static int skcipher_des_setkey(struct crypto_skcipher *cipher,
1509 				 const u8 *key, unsigned int keylen)
1510 {
1511 	return verify_skcipher_des_key(cipher, key) ?:
1512 	       skcipher_setkey(cipher, key, keylen);
1513 }
1514 
skcipher_des3_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1515 static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
1516 				  const u8 *key, unsigned int keylen)
1517 {
1518 	return verify_skcipher_des3_key(cipher, key) ?:
1519 	       skcipher_setkey(cipher, key, keylen);
1520 }
1521 
skcipher_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1522 static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
1523 				  const u8 *key, unsigned int keylen)
1524 {
1525 	if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1526 	    keylen == AES_KEYSIZE_256)
1527 		return skcipher_setkey(cipher, key, keylen);
1528 
1529 	return -EINVAL;
1530 }
1531 
common_nonsnoop_unmap(struct device * dev,struct talitos_edesc * edesc,struct skcipher_request * areq)1532 static void common_nonsnoop_unmap(struct device *dev,
1533 				  struct talitos_edesc *edesc,
1534 				  struct skcipher_request *areq)
1535 {
1536 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1537 
1538 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
1539 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1540 
1541 	if (edesc->dma_len)
1542 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1543 				 DMA_BIDIRECTIONAL);
1544 }
1545 
skcipher_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1546 static void skcipher_done(struct device *dev,
1547 			    struct talitos_desc *desc, void *context,
1548 			    int err)
1549 {
1550 	struct skcipher_request *areq = context;
1551 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1552 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1553 	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1554 	struct talitos_edesc *edesc;
1555 
1556 	edesc = container_of(desc, struct talitos_edesc, desc);
1557 
1558 	common_nonsnoop_unmap(dev, edesc, areq);
1559 	memcpy(areq->iv, ctx->iv, ivsize);
1560 
1561 	kfree(edesc);
1562 
1563 	skcipher_request_complete(areq, err);
1564 }
1565 
common_nonsnoop(struct talitos_edesc * edesc,struct skcipher_request * areq,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error))1566 static int common_nonsnoop(struct talitos_edesc *edesc,
1567 			   struct skcipher_request *areq,
1568 			   void (*callback) (struct device *dev,
1569 					     struct talitos_desc *desc,
1570 					     void *context, int error))
1571 {
1572 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1573 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1574 	struct device *dev = ctx->dev;
1575 	struct talitos_desc *desc = &edesc->desc;
1576 	unsigned int cryptlen = areq->cryptlen;
1577 	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1578 	int sg_count, ret;
1579 	bool sync_needed = false;
1580 	struct talitos_private *priv = dev_get_drvdata(dev);
1581 	bool is_sec1 = has_ftr_sec1(priv);
1582 	bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
1583 		      (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
1584 
1585 	/* first DWORD empty */
1586 
1587 	/* cipher iv */
1588 	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1589 
1590 	/* cipher key */
1591 	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1592 
1593 	sg_count = edesc->src_nents ?: 1;
1594 	if (is_sec1 && sg_count > 1)
1595 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1596 				  cryptlen);
1597 	else
1598 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1599 				      (areq->src == areq->dst) ?
1600 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1601 	/*
1602 	 * cipher in
1603 	 */
1604 	sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
1605 				      sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
1606 	if (sg_count > 1)
1607 		sync_needed = true;
1608 
1609 	/* cipher out */
1610 	if (areq->src != areq->dst) {
1611 		sg_count = edesc->dst_nents ? : 1;
1612 		if (!is_sec1 || sg_count == 1)
1613 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1614 	}
1615 
1616 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1617 			     sg_count, 0, (edesc->src_nents + 1));
1618 	if (ret > 1)
1619 		sync_needed = true;
1620 
1621 	/* iv out */
1622 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1623 			       DMA_FROM_DEVICE);
1624 
1625 	/* last DWORD empty */
1626 
1627 	if (sync_needed)
1628 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1629 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1630 
1631 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1632 	if (ret != -EINPROGRESS) {
1633 		common_nonsnoop_unmap(dev, edesc, areq);
1634 		kfree(edesc);
1635 	}
1636 	return ret;
1637 }
1638 
skcipher_edesc_alloc(struct skcipher_request * areq,bool encrypt)1639 static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
1640 						    areq, bool encrypt)
1641 {
1642 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1643 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1644 	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1645 
1646 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1647 				   areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
1648 				   areq->base.flags, encrypt);
1649 }
1650 
skcipher_encrypt(struct skcipher_request * areq)1651 static int skcipher_encrypt(struct skcipher_request *areq)
1652 {
1653 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1654 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1655 	struct talitos_edesc *edesc;
1656 	unsigned int blocksize =
1657 			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1658 
1659 	if (!areq->cryptlen)
1660 		return 0;
1661 
1662 	if (areq->cryptlen % blocksize)
1663 		return -EINVAL;
1664 
1665 	/* allocate extended descriptor */
1666 	edesc = skcipher_edesc_alloc(areq, true);
1667 	if (IS_ERR(edesc))
1668 		return PTR_ERR(edesc);
1669 
1670 	/* set encrypt */
1671 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1672 
1673 	return common_nonsnoop(edesc, areq, skcipher_done);
1674 }
1675 
skcipher_decrypt(struct skcipher_request * areq)1676 static int skcipher_decrypt(struct skcipher_request *areq)
1677 {
1678 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1679 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1680 	struct talitos_edesc *edesc;
1681 	unsigned int blocksize =
1682 			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1683 
1684 	if (!areq->cryptlen)
1685 		return 0;
1686 
1687 	if (areq->cryptlen % blocksize)
1688 		return -EINVAL;
1689 
1690 	/* allocate extended descriptor */
1691 	edesc = skcipher_edesc_alloc(areq, false);
1692 	if (IS_ERR(edesc))
1693 		return PTR_ERR(edesc);
1694 
1695 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1696 
1697 	return common_nonsnoop(edesc, areq, skcipher_done);
1698 }
1699 
common_nonsnoop_hash_unmap(struct device * dev,struct talitos_edesc * edesc,struct ahash_request * areq)1700 static void common_nonsnoop_hash_unmap(struct device *dev,
1701 				       struct talitos_edesc *edesc,
1702 				       struct ahash_request *areq)
1703 {
1704 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1705 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1706 	struct talitos_private *priv = dev_get_drvdata(dev);
1707 	bool is_sec1 = has_ftr_sec1(priv);
1708 	struct talitos_desc *desc = &edesc->desc;
1709 	struct talitos_desc *desc2 = (struct talitos_desc *)
1710 				     (edesc->buf + edesc->dma_len);
1711 
1712 	unmap_single_talitos_ptr(dev, &desc->ptr[5], DMA_FROM_DEVICE);
1713 	if (desc->next_desc &&
1714 	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
1715 		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1716 	if (req_ctx->last)
1717 		memcpy(areq->result, req_ctx->hw_context,
1718 		       crypto_ahash_digestsize(tfm));
1719 
1720 	if (req_ctx->psrc)
1721 		talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1722 
1723 	/* When using hashctx-in, must unmap it. */
1724 	if (from_talitos_ptr_len(&desc->ptr[1], is_sec1))
1725 		unmap_single_talitos_ptr(dev, &desc->ptr[1],
1726 					 DMA_TO_DEVICE);
1727 	else if (desc->next_desc)
1728 		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1729 					 DMA_TO_DEVICE);
1730 
1731 	if (is_sec1 && req_ctx->nbuf)
1732 		unmap_single_talitos_ptr(dev, &desc->ptr[3],
1733 					 DMA_TO_DEVICE);
1734 
1735 	if (edesc->dma_len)
1736 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1737 				 DMA_BIDIRECTIONAL);
1738 
1739 	if (desc->next_desc)
1740 		dma_unmap_single(dev, be32_to_cpu(desc->next_desc),
1741 				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1742 }
1743 
ahash_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1744 static void ahash_done(struct device *dev,
1745 		       struct talitos_desc *desc, void *context,
1746 		       int err)
1747 {
1748 	struct ahash_request *areq = context;
1749 	struct talitos_edesc *edesc =
1750 		 container_of(desc, struct talitos_edesc, desc);
1751 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1752 
1753 	if (!req_ctx->last && req_ctx->to_hash_later) {
1754 		/* Position any partial block for next update/final/finup */
1755 		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1756 		req_ctx->nbuf = req_ctx->to_hash_later;
1757 	}
1758 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1759 
1760 	kfree(edesc);
1761 
1762 	ahash_request_complete(areq, err);
1763 }
1764 
1765 /*
1766  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1767  * ourself and submit a padded block
1768  */
talitos_handle_buggy_hash(struct talitos_ctx * ctx,struct talitos_edesc * edesc,struct talitos_ptr * ptr)1769 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1770 			       struct talitos_edesc *edesc,
1771 			       struct talitos_ptr *ptr)
1772 {
1773 	static u8 padded_hash[64] = {
1774 		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1775 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1776 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1777 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1778 	};
1779 
1780 	pr_err_once("Bug in SEC1, padding ourself\n");
1781 	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1782 	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1783 			       (char *)padded_hash, DMA_TO_DEVICE);
1784 }
1785 
common_nonsnoop_hash(struct talitos_edesc * edesc,struct ahash_request * areq,unsigned int length,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error))1786 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1787 				struct ahash_request *areq, unsigned int length,
1788 				void (*callback) (struct device *dev,
1789 						  struct talitos_desc *desc,
1790 						  void *context, int error))
1791 {
1792 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1793 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1794 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1795 	struct device *dev = ctx->dev;
1796 	struct talitos_desc *desc = &edesc->desc;
1797 	int ret;
1798 	bool sync_needed = false;
1799 	struct talitos_private *priv = dev_get_drvdata(dev);
1800 	bool is_sec1 = has_ftr_sec1(priv);
1801 	int sg_count;
1802 
1803 	/* first DWORD empty */
1804 
1805 	/* hash context in */
1806 	if (!req_ctx->first || req_ctx->swinit) {
1807 		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1808 					      req_ctx->hw_context_size,
1809 					      req_ctx->hw_context,
1810 					      DMA_TO_DEVICE);
1811 		req_ctx->swinit = 0;
1812 	}
1813 	/* Indicate next op is not the first. */
1814 	req_ctx->first = 0;
1815 
1816 	/* HMAC key */
1817 	if (ctx->keylen)
1818 		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1819 			       is_sec1);
1820 
1821 	if (is_sec1 && req_ctx->nbuf)
1822 		length -= req_ctx->nbuf;
1823 
1824 	sg_count = edesc->src_nents ?: 1;
1825 	if (is_sec1 && sg_count > 1)
1826 		sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1827 	else if (length)
1828 		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1829 				      DMA_TO_DEVICE);
1830 	/*
1831 	 * data in
1832 	 */
1833 	if (is_sec1 && req_ctx->nbuf) {
1834 		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1835 				       req_ctx->buf[req_ctx->buf_idx],
1836 				       DMA_TO_DEVICE);
1837 	} else {
1838 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1839 					  &desc->ptr[3], sg_count, 0, 0);
1840 		if (sg_count > 1)
1841 			sync_needed = true;
1842 	}
1843 
1844 	/* fifth DWORD empty */
1845 
1846 	/* hash/HMAC out -or- hash context out */
1847 	if (req_ctx->last)
1848 		map_single_talitos_ptr(dev, &desc->ptr[5],
1849 				       crypto_ahash_digestsize(tfm),
1850 				       req_ctx->hw_context, DMA_FROM_DEVICE);
1851 	else
1852 		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1853 					      req_ctx->hw_context_size,
1854 					      req_ctx->hw_context,
1855 					      DMA_FROM_DEVICE);
1856 
1857 	/* last DWORD empty */
1858 
1859 	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1860 		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1861 
1862 	if (is_sec1 && req_ctx->nbuf && length) {
1863 		struct talitos_desc *desc2 = (struct talitos_desc *)
1864 					     (edesc->buf + edesc->dma_len);
1865 		dma_addr_t next_desc;
1866 
1867 		memset(desc2, 0, sizeof(*desc2));
1868 		desc2->hdr = desc->hdr;
1869 		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1870 		desc2->hdr1 = desc2->hdr;
1871 		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1872 		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1873 		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1874 
1875 		if (desc->ptr[1].ptr)
1876 			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1877 					 is_sec1);
1878 		else
1879 			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1880 						      req_ctx->hw_context_size,
1881 						      req_ctx->hw_context,
1882 						      DMA_TO_DEVICE);
1883 		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1884 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1885 					  &desc2->ptr[3], sg_count, 0, 0);
1886 		if (sg_count > 1)
1887 			sync_needed = true;
1888 		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1889 		if (req_ctx->last)
1890 			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1891 						      req_ctx->hw_context_size,
1892 						      req_ctx->hw_context,
1893 						      DMA_FROM_DEVICE);
1894 
1895 		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1896 					   DMA_BIDIRECTIONAL);
1897 		desc->next_desc = cpu_to_be32(next_desc);
1898 	}
1899 
1900 	if (sync_needed)
1901 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1902 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1903 
1904 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1905 	if (ret != -EINPROGRESS) {
1906 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1907 		kfree(edesc);
1908 	}
1909 	return ret;
1910 }
1911 
ahash_edesc_alloc(struct ahash_request * areq,unsigned int nbytes)1912 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1913 					       unsigned int nbytes)
1914 {
1915 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1916 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1917 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1918 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1919 	bool is_sec1 = has_ftr_sec1(priv);
1920 
1921 	if (is_sec1)
1922 		nbytes -= req_ctx->nbuf;
1923 
1924 	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1925 				   nbytes, 0, 0, 0, areq->base.flags, false);
1926 }
1927 
ahash_init(struct ahash_request * areq)1928 static int ahash_init(struct ahash_request *areq)
1929 {
1930 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1931 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1932 	struct device *dev = ctx->dev;
1933 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1934 	unsigned int size;
1935 	dma_addr_t dma;
1936 
1937 	/* Initialize the context */
1938 	req_ctx->buf_idx = 0;
1939 	req_ctx->nbuf = 0;
1940 	req_ctx->first = 1; /* first indicates h/w must init its context */
1941 	req_ctx->swinit = 0; /* assume h/w init of context */
1942 	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1943 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1944 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1945 	req_ctx->hw_context_size = size;
1946 
1947 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1948 			     DMA_TO_DEVICE);
1949 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1950 
1951 	return 0;
1952 }
1953 
1954 /*
1955  * on h/w without explicit sha224 support, we initialize h/w context
1956  * manually with sha224 constants, and tell it to run sha256.
1957  */
ahash_init_sha224_swinit(struct ahash_request * areq)1958 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1959 {
1960 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1961 
1962 	req_ctx->hw_context[0] = SHA224_H0;
1963 	req_ctx->hw_context[1] = SHA224_H1;
1964 	req_ctx->hw_context[2] = SHA224_H2;
1965 	req_ctx->hw_context[3] = SHA224_H3;
1966 	req_ctx->hw_context[4] = SHA224_H4;
1967 	req_ctx->hw_context[5] = SHA224_H5;
1968 	req_ctx->hw_context[6] = SHA224_H6;
1969 	req_ctx->hw_context[7] = SHA224_H7;
1970 
1971 	/* init 64-bit count */
1972 	req_ctx->hw_context[8] = 0;
1973 	req_ctx->hw_context[9] = 0;
1974 
1975 	ahash_init(areq);
1976 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1977 
1978 	return 0;
1979 }
1980 
ahash_process_req(struct ahash_request * areq,unsigned int nbytes)1981 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1982 {
1983 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1984 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1985 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1986 	struct talitos_edesc *edesc;
1987 	unsigned int blocksize =
1988 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1989 	unsigned int nbytes_to_hash;
1990 	unsigned int to_hash_later;
1991 	unsigned int nsg;
1992 	int nents;
1993 	struct device *dev = ctx->dev;
1994 	struct talitos_private *priv = dev_get_drvdata(dev);
1995 	bool is_sec1 = has_ftr_sec1(priv);
1996 	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1997 
1998 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1999 		/* Buffer up to one whole block */
2000 		nents = sg_nents_for_len(areq->src, nbytes);
2001 		if (nents < 0) {
2002 			dev_err(dev, "Invalid number of src SG.\n");
2003 			return nents;
2004 		}
2005 		sg_copy_to_buffer(areq->src, nents,
2006 				  ctx_buf + req_ctx->nbuf, nbytes);
2007 		req_ctx->nbuf += nbytes;
2008 		return 0;
2009 	}
2010 
2011 	/* At least (blocksize + 1) bytes are available to hash */
2012 	nbytes_to_hash = nbytes + req_ctx->nbuf;
2013 	to_hash_later = nbytes_to_hash & (blocksize - 1);
2014 
2015 	if (req_ctx->last)
2016 		to_hash_later = 0;
2017 	else if (to_hash_later)
2018 		/* There is a partial block. Hash the full block(s) now */
2019 		nbytes_to_hash -= to_hash_later;
2020 	else {
2021 		/* Keep one block buffered */
2022 		nbytes_to_hash -= blocksize;
2023 		to_hash_later = blocksize;
2024 	}
2025 
2026 	/* Chain in any previously buffered data */
2027 	if (!is_sec1 && req_ctx->nbuf) {
2028 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2029 		sg_init_table(req_ctx->bufsl, nsg);
2030 		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2031 		if (nsg > 1)
2032 			sg_chain(req_ctx->bufsl, 2, areq->src);
2033 		req_ctx->psrc = req_ctx->bufsl;
2034 	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2035 		int offset;
2036 
2037 		if (nbytes_to_hash > blocksize)
2038 			offset = blocksize - req_ctx->nbuf;
2039 		else
2040 			offset = nbytes_to_hash - req_ctx->nbuf;
2041 		nents = sg_nents_for_len(areq->src, offset);
2042 		if (nents < 0) {
2043 			dev_err(dev, "Invalid number of src SG.\n");
2044 			return nents;
2045 		}
2046 		sg_copy_to_buffer(areq->src, nents,
2047 				  ctx_buf + req_ctx->nbuf, offset);
2048 		req_ctx->nbuf += offset;
2049 		req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2050 						 offset);
2051 	} else
2052 		req_ctx->psrc = areq->src;
2053 
2054 	if (to_hash_later) {
2055 		nents = sg_nents_for_len(areq->src, nbytes);
2056 		if (nents < 0) {
2057 			dev_err(dev, "Invalid number of src SG.\n");
2058 			return nents;
2059 		}
2060 		sg_pcopy_to_buffer(areq->src, nents,
2061 				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2062 				      to_hash_later,
2063 				      nbytes - to_hash_later);
2064 	}
2065 	req_ctx->to_hash_later = to_hash_later;
2066 
2067 	/* Allocate extended descriptor */
2068 	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2069 	if (IS_ERR(edesc))
2070 		return PTR_ERR(edesc);
2071 
2072 	edesc->desc.hdr = ctx->desc_hdr_template;
2073 
2074 	/* On last one, request SEC to pad; otherwise continue */
2075 	if (req_ctx->last)
2076 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2077 	else
2078 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2079 
2080 	/* request SEC to INIT hash. */
2081 	if (req_ctx->first && !req_ctx->swinit)
2082 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2083 
2084 	/* When the tfm context has a keylen, it's an HMAC.
2085 	 * A first or last (ie. not middle) descriptor must request HMAC.
2086 	 */
2087 	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2088 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2089 
2090 	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2091 }
2092 
ahash_update(struct ahash_request * areq)2093 static int ahash_update(struct ahash_request *areq)
2094 {
2095 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2096 
2097 	req_ctx->last = 0;
2098 
2099 	return ahash_process_req(areq, areq->nbytes);
2100 }
2101 
ahash_final(struct ahash_request * areq)2102 static int ahash_final(struct ahash_request *areq)
2103 {
2104 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2105 
2106 	req_ctx->last = 1;
2107 
2108 	return ahash_process_req(areq, 0);
2109 }
2110 
ahash_finup(struct ahash_request * areq)2111 static int ahash_finup(struct ahash_request *areq)
2112 {
2113 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2114 
2115 	req_ctx->last = 1;
2116 
2117 	return ahash_process_req(areq, areq->nbytes);
2118 }
2119 
ahash_digest(struct ahash_request * areq)2120 static int ahash_digest(struct ahash_request *areq)
2121 {
2122 	ahash_init(areq);
2123 	return ahash_finup(areq);
2124 }
2125 
ahash_digest_sha224_swinit(struct ahash_request * areq)2126 static int ahash_digest_sha224_swinit(struct ahash_request *areq)
2127 {
2128 	ahash_init_sha224_swinit(areq);
2129 	return ahash_finup(areq);
2130 }
2131 
ahash_export(struct ahash_request * areq,void * out)2132 static int ahash_export(struct ahash_request *areq, void *out)
2133 {
2134 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2135 	struct talitos_export_state *export = out;
2136 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2137 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2138 	struct device *dev = ctx->dev;
2139 	dma_addr_t dma;
2140 
2141 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2142 			     DMA_FROM_DEVICE);
2143 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2144 
2145 	memcpy(export->hw_context, req_ctx->hw_context,
2146 	       req_ctx->hw_context_size);
2147 	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2148 	export->swinit = req_ctx->swinit;
2149 	export->first = req_ctx->first;
2150 	export->last = req_ctx->last;
2151 	export->to_hash_later = req_ctx->to_hash_later;
2152 	export->nbuf = req_ctx->nbuf;
2153 
2154 	return 0;
2155 }
2156 
ahash_import(struct ahash_request * areq,const void * in)2157 static int ahash_import(struct ahash_request *areq, const void *in)
2158 {
2159 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2160 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2161 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2162 	struct device *dev = ctx->dev;
2163 	const struct talitos_export_state *export = in;
2164 	unsigned int size;
2165 	dma_addr_t dma;
2166 
2167 	memset(req_ctx, 0, sizeof(*req_ctx));
2168 	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2169 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2170 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2171 	req_ctx->hw_context_size = size;
2172 	memcpy(req_ctx->hw_context, export->hw_context, size);
2173 	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2174 	req_ctx->swinit = export->swinit;
2175 	req_ctx->first = export->first;
2176 	req_ctx->last = export->last;
2177 	req_ctx->to_hash_later = export->to_hash_later;
2178 	req_ctx->nbuf = export->nbuf;
2179 
2180 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2181 			     DMA_TO_DEVICE);
2182 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2183 
2184 	return 0;
2185 }
2186 
keyhash(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen,u8 * hash)2187 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2188 		   u8 *hash)
2189 {
2190 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2191 
2192 	struct scatterlist sg[1];
2193 	struct ahash_request *req;
2194 	struct crypto_wait wait;
2195 	int ret;
2196 
2197 	crypto_init_wait(&wait);
2198 
2199 	req = ahash_request_alloc(tfm, GFP_KERNEL);
2200 	if (!req)
2201 		return -ENOMEM;
2202 
2203 	/* Keep tfm keylen == 0 during hash of the long key */
2204 	ctx->keylen = 0;
2205 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2206 				   crypto_req_done, &wait);
2207 
2208 	sg_init_one(&sg[0], key, keylen);
2209 
2210 	ahash_request_set_crypt(req, sg, hash, keylen);
2211 	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2212 
2213 	ahash_request_free(req);
2214 
2215 	return ret;
2216 }
2217 
ahash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)2218 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2219 			unsigned int keylen)
2220 {
2221 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2222 	struct device *dev = ctx->dev;
2223 	unsigned int blocksize =
2224 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2225 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2226 	unsigned int keysize = keylen;
2227 	u8 hash[SHA512_DIGEST_SIZE];
2228 	int ret;
2229 
2230 	if (keylen <= blocksize)
2231 		memcpy(ctx->key, key, keysize);
2232 	else {
2233 		/* Must get the hash of the long key */
2234 		ret = keyhash(tfm, key, keylen, hash);
2235 
2236 		if (ret)
2237 			return -EINVAL;
2238 
2239 		keysize = digestsize;
2240 		memcpy(ctx->key, hash, digestsize);
2241 	}
2242 
2243 	if (ctx->keylen)
2244 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2245 
2246 	ctx->keylen = keysize;
2247 	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2248 
2249 	return 0;
2250 }
2251 
2252 
2253 struct talitos_alg_template {
2254 	u32 type;
2255 	u32 priority;
2256 	union {
2257 		struct skcipher_alg skcipher;
2258 		struct ahash_alg hash;
2259 		struct aead_alg aead;
2260 	} alg;
2261 	__be32 desc_hdr_template;
2262 };
2263 
2264 static struct talitos_alg_template driver_algs[] = {
2265 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2266 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2267 		.alg.aead = {
2268 			.base = {
2269 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2270 				.cra_driver_name = "authenc-hmac-sha1-"
2271 						   "cbc-aes-talitos",
2272 				.cra_blocksize = AES_BLOCK_SIZE,
2273 				.cra_flags = CRYPTO_ALG_ASYNC |
2274 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2275 			},
2276 			.ivsize = AES_BLOCK_SIZE,
2277 			.maxauthsize = SHA1_DIGEST_SIZE,
2278 		},
2279 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2280 			             DESC_HDR_SEL0_AESU |
2281 		                     DESC_HDR_MODE0_AESU_CBC |
2282 		                     DESC_HDR_SEL1_MDEUA |
2283 		                     DESC_HDR_MODE1_MDEU_INIT |
2284 		                     DESC_HDR_MODE1_MDEU_PAD |
2285 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2286 	},
2287 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2288 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2289 		.alg.aead = {
2290 			.base = {
2291 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2292 				.cra_driver_name = "authenc-hmac-sha1-"
2293 						   "cbc-aes-talitos-hsna",
2294 				.cra_blocksize = AES_BLOCK_SIZE,
2295 				.cra_flags = CRYPTO_ALG_ASYNC |
2296 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2297 			},
2298 			.ivsize = AES_BLOCK_SIZE,
2299 			.maxauthsize = SHA1_DIGEST_SIZE,
2300 		},
2301 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2302 				     DESC_HDR_SEL0_AESU |
2303 				     DESC_HDR_MODE0_AESU_CBC |
2304 				     DESC_HDR_SEL1_MDEUA |
2305 				     DESC_HDR_MODE1_MDEU_INIT |
2306 				     DESC_HDR_MODE1_MDEU_PAD |
2307 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2308 	},
2309 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2310 		.alg.aead = {
2311 			.base = {
2312 				.cra_name = "authenc(hmac(sha1),"
2313 					    "cbc(des3_ede))",
2314 				.cra_driver_name = "authenc-hmac-sha1-"
2315 						   "cbc-3des-talitos",
2316 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2317 				.cra_flags = CRYPTO_ALG_ASYNC |
2318 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2319 			},
2320 			.ivsize = DES3_EDE_BLOCK_SIZE,
2321 			.maxauthsize = SHA1_DIGEST_SIZE,
2322 			.setkey = aead_des3_setkey,
2323 		},
2324 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2325 			             DESC_HDR_SEL0_DEU |
2326 		                     DESC_HDR_MODE0_DEU_CBC |
2327 		                     DESC_HDR_MODE0_DEU_3DES |
2328 		                     DESC_HDR_SEL1_MDEUA |
2329 		                     DESC_HDR_MODE1_MDEU_INIT |
2330 		                     DESC_HDR_MODE1_MDEU_PAD |
2331 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2332 	},
2333 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2334 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2335 		.alg.aead = {
2336 			.base = {
2337 				.cra_name = "authenc(hmac(sha1),"
2338 					    "cbc(des3_ede))",
2339 				.cra_driver_name = "authenc-hmac-sha1-"
2340 						   "cbc-3des-talitos-hsna",
2341 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2342 				.cra_flags = CRYPTO_ALG_ASYNC |
2343 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2344 			},
2345 			.ivsize = DES3_EDE_BLOCK_SIZE,
2346 			.maxauthsize = SHA1_DIGEST_SIZE,
2347 			.setkey = aead_des3_setkey,
2348 		},
2349 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2350 				     DESC_HDR_SEL0_DEU |
2351 				     DESC_HDR_MODE0_DEU_CBC |
2352 				     DESC_HDR_MODE0_DEU_3DES |
2353 				     DESC_HDR_SEL1_MDEUA |
2354 				     DESC_HDR_MODE1_MDEU_INIT |
2355 				     DESC_HDR_MODE1_MDEU_PAD |
2356 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2357 	},
2358 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2359 		.alg.aead = {
2360 			.base = {
2361 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2362 				.cra_driver_name = "authenc-hmac-sha224-"
2363 						   "cbc-aes-talitos",
2364 				.cra_blocksize = AES_BLOCK_SIZE,
2365 				.cra_flags = CRYPTO_ALG_ASYNC |
2366 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2367 			},
2368 			.ivsize = AES_BLOCK_SIZE,
2369 			.maxauthsize = SHA224_DIGEST_SIZE,
2370 		},
2371 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2372 				     DESC_HDR_SEL0_AESU |
2373 				     DESC_HDR_MODE0_AESU_CBC |
2374 				     DESC_HDR_SEL1_MDEUA |
2375 				     DESC_HDR_MODE1_MDEU_INIT |
2376 				     DESC_HDR_MODE1_MDEU_PAD |
2377 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2378 	},
2379 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2380 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2381 		.alg.aead = {
2382 			.base = {
2383 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2384 				.cra_driver_name = "authenc-hmac-sha224-"
2385 						   "cbc-aes-talitos-hsna",
2386 				.cra_blocksize = AES_BLOCK_SIZE,
2387 				.cra_flags = CRYPTO_ALG_ASYNC |
2388 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2389 			},
2390 			.ivsize = AES_BLOCK_SIZE,
2391 			.maxauthsize = SHA224_DIGEST_SIZE,
2392 		},
2393 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2394 				     DESC_HDR_SEL0_AESU |
2395 				     DESC_HDR_MODE0_AESU_CBC |
2396 				     DESC_HDR_SEL1_MDEUA |
2397 				     DESC_HDR_MODE1_MDEU_INIT |
2398 				     DESC_HDR_MODE1_MDEU_PAD |
2399 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2400 	},
2401 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2402 		.alg.aead = {
2403 			.base = {
2404 				.cra_name = "authenc(hmac(sha224),"
2405 					    "cbc(des3_ede))",
2406 				.cra_driver_name = "authenc-hmac-sha224-"
2407 						   "cbc-3des-talitos",
2408 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2409 				.cra_flags = CRYPTO_ALG_ASYNC |
2410 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2411 			},
2412 			.ivsize = DES3_EDE_BLOCK_SIZE,
2413 			.maxauthsize = SHA224_DIGEST_SIZE,
2414 			.setkey = aead_des3_setkey,
2415 		},
2416 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2417 			             DESC_HDR_SEL0_DEU |
2418 		                     DESC_HDR_MODE0_DEU_CBC |
2419 		                     DESC_HDR_MODE0_DEU_3DES |
2420 		                     DESC_HDR_SEL1_MDEUA |
2421 		                     DESC_HDR_MODE1_MDEU_INIT |
2422 		                     DESC_HDR_MODE1_MDEU_PAD |
2423 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2424 	},
2425 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2426 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2427 		.alg.aead = {
2428 			.base = {
2429 				.cra_name = "authenc(hmac(sha224),"
2430 					    "cbc(des3_ede))",
2431 				.cra_driver_name = "authenc-hmac-sha224-"
2432 						   "cbc-3des-talitos-hsna",
2433 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2434 				.cra_flags = CRYPTO_ALG_ASYNC |
2435 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2436 			},
2437 			.ivsize = DES3_EDE_BLOCK_SIZE,
2438 			.maxauthsize = SHA224_DIGEST_SIZE,
2439 			.setkey = aead_des3_setkey,
2440 		},
2441 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2442 				     DESC_HDR_SEL0_DEU |
2443 				     DESC_HDR_MODE0_DEU_CBC |
2444 				     DESC_HDR_MODE0_DEU_3DES |
2445 				     DESC_HDR_SEL1_MDEUA |
2446 				     DESC_HDR_MODE1_MDEU_INIT |
2447 				     DESC_HDR_MODE1_MDEU_PAD |
2448 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2449 	},
2450 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2451 		.alg.aead = {
2452 			.base = {
2453 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2454 				.cra_driver_name = "authenc-hmac-sha256-"
2455 						   "cbc-aes-talitos",
2456 				.cra_blocksize = AES_BLOCK_SIZE,
2457 				.cra_flags = CRYPTO_ALG_ASYNC |
2458 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2459 			},
2460 			.ivsize = AES_BLOCK_SIZE,
2461 			.maxauthsize = SHA256_DIGEST_SIZE,
2462 		},
2463 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2464 			             DESC_HDR_SEL0_AESU |
2465 		                     DESC_HDR_MODE0_AESU_CBC |
2466 		                     DESC_HDR_SEL1_MDEUA |
2467 		                     DESC_HDR_MODE1_MDEU_INIT |
2468 		                     DESC_HDR_MODE1_MDEU_PAD |
2469 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2470 	},
2471 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2472 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2473 		.alg.aead = {
2474 			.base = {
2475 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2476 				.cra_driver_name = "authenc-hmac-sha256-"
2477 						   "cbc-aes-talitos-hsna",
2478 				.cra_blocksize = AES_BLOCK_SIZE,
2479 				.cra_flags = CRYPTO_ALG_ASYNC |
2480 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2481 			},
2482 			.ivsize = AES_BLOCK_SIZE,
2483 			.maxauthsize = SHA256_DIGEST_SIZE,
2484 		},
2485 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2486 				     DESC_HDR_SEL0_AESU |
2487 				     DESC_HDR_MODE0_AESU_CBC |
2488 				     DESC_HDR_SEL1_MDEUA |
2489 				     DESC_HDR_MODE1_MDEU_INIT |
2490 				     DESC_HDR_MODE1_MDEU_PAD |
2491 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2492 	},
2493 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2494 		.alg.aead = {
2495 			.base = {
2496 				.cra_name = "authenc(hmac(sha256),"
2497 					    "cbc(des3_ede))",
2498 				.cra_driver_name = "authenc-hmac-sha256-"
2499 						   "cbc-3des-talitos",
2500 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2501 				.cra_flags = CRYPTO_ALG_ASYNC |
2502 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2503 			},
2504 			.ivsize = DES3_EDE_BLOCK_SIZE,
2505 			.maxauthsize = SHA256_DIGEST_SIZE,
2506 			.setkey = aead_des3_setkey,
2507 		},
2508 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2509 			             DESC_HDR_SEL0_DEU |
2510 		                     DESC_HDR_MODE0_DEU_CBC |
2511 		                     DESC_HDR_MODE0_DEU_3DES |
2512 		                     DESC_HDR_SEL1_MDEUA |
2513 		                     DESC_HDR_MODE1_MDEU_INIT |
2514 		                     DESC_HDR_MODE1_MDEU_PAD |
2515 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2516 	},
2517 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2518 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2519 		.alg.aead = {
2520 			.base = {
2521 				.cra_name = "authenc(hmac(sha256),"
2522 					    "cbc(des3_ede))",
2523 				.cra_driver_name = "authenc-hmac-sha256-"
2524 						   "cbc-3des-talitos-hsna",
2525 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2526 				.cra_flags = CRYPTO_ALG_ASYNC |
2527 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2528 			},
2529 			.ivsize = DES3_EDE_BLOCK_SIZE,
2530 			.maxauthsize = SHA256_DIGEST_SIZE,
2531 			.setkey = aead_des3_setkey,
2532 		},
2533 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2534 				     DESC_HDR_SEL0_DEU |
2535 				     DESC_HDR_MODE0_DEU_CBC |
2536 				     DESC_HDR_MODE0_DEU_3DES |
2537 				     DESC_HDR_SEL1_MDEUA |
2538 				     DESC_HDR_MODE1_MDEU_INIT |
2539 				     DESC_HDR_MODE1_MDEU_PAD |
2540 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2541 	},
2542 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2543 		.alg.aead = {
2544 			.base = {
2545 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2546 				.cra_driver_name = "authenc-hmac-sha384-"
2547 						   "cbc-aes-talitos",
2548 				.cra_blocksize = AES_BLOCK_SIZE,
2549 				.cra_flags = CRYPTO_ALG_ASYNC |
2550 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2551 			},
2552 			.ivsize = AES_BLOCK_SIZE,
2553 			.maxauthsize = SHA384_DIGEST_SIZE,
2554 		},
2555 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2556 			             DESC_HDR_SEL0_AESU |
2557 		                     DESC_HDR_MODE0_AESU_CBC |
2558 		                     DESC_HDR_SEL1_MDEUB |
2559 		                     DESC_HDR_MODE1_MDEU_INIT |
2560 		                     DESC_HDR_MODE1_MDEU_PAD |
2561 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2562 	},
2563 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2564 		.alg.aead = {
2565 			.base = {
2566 				.cra_name = "authenc(hmac(sha384),"
2567 					    "cbc(des3_ede))",
2568 				.cra_driver_name = "authenc-hmac-sha384-"
2569 						   "cbc-3des-talitos",
2570 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2571 				.cra_flags = CRYPTO_ALG_ASYNC |
2572 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2573 			},
2574 			.ivsize = DES3_EDE_BLOCK_SIZE,
2575 			.maxauthsize = SHA384_DIGEST_SIZE,
2576 			.setkey = aead_des3_setkey,
2577 		},
2578 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2579 			             DESC_HDR_SEL0_DEU |
2580 		                     DESC_HDR_MODE0_DEU_CBC |
2581 		                     DESC_HDR_MODE0_DEU_3DES |
2582 		                     DESC_HDR_SEL1_MDEUB |
2583 		                     DESC_HDR_MODE1_MDEU_INIT |
2584 		                     DESC_HDR_MODE1_MDEU_PAD |
2585 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2586 	},
2587 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2588 		.alg.aead = {
2589 			.base = {
2590 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2591 				.cra_driver_name = "authenc-hmac-sha512-"
2592 						   "cbc-aes-talitos",
2593 				.cra_blocksize = AES_BLOCK_SIZE,
2594 				.cra_flags = CRYPTO_ALG_ASYNC |
2595 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2596 			},
2597 			.ivsize = AES_BLOCK_SIZE,
2598 			.maxauthsize = SHA512_DIGEST_SIZE,
2599 		},
2600 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2601 			             DESC_HDR_SEL0_AESU |
2602 		                     DESC_HDR_MODE0_AESU_CBC |
2603 		                     DESC_HDR_SEL1_MDEUB |
2604 		                     DESC_HDR_MODE1_MDEU_INIT |
2605 		                     DESC_HDR_MODE1_MDEU_PAD |
2606 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2607 	},
2608 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2609 		.alg.aead = {
2610 			.base = {
2611 				.cra_name = "authenc(hmac(sha512),"
2612 					    "cbc(des3_ede))",
2613 				.cra_driver_name = "authenc-hmac-sha512-"
2614 						   "cbc-3des-talitos",
2615 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2616 				.cra_flags = CRYPTO_ALG_ASYNC |
2617 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2618 			},
2619 			.ivsize = DES3_EDE_BLOCK_SIZE,
2620 			.maxauthsize = SHA512_DIGEST_SIZE,
2621 			.setkey = aead_des3_setkey,
2622 		},
2623 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2624 			             DESC_HDR_SEL0_DEU |
2625 		                     DESC_HDR_MODE0_DEU_CBC |
2626 		                     DESC_HDR_MODE0_DEU_3DES |
2627 		                     DESC_HDR_SEL1_MDEUB |
2628 		                     DESC_HDR_MODE1_MDEU_INIT |
2629 		                     DESC_HDR_MODE1_MDEU_PAD |
2630 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2631 	},
2632 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2633 		.alg.aead = {
2634 			.base = {
2635 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2636 				.cra_driver_name = "authenc-hmac-md5-"
2637 						   "cbc-aes-talitos",
2638 				.cra_blocksize = AES_BLOCK_SIZE,
2639 				.cra_flags = CRYPTO_ALG_ASYNC |
2640 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2641 			},
2642 			.ivsize = AES_BLOCK_SIZE,
2643 			.maxauthsize = MD5_DIGEST_SIZE,
2644 		},
2645 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2646 			             DESC_HDR_SEL0_AESU |
2647 		                     DESC_HDR_MODE0_AESU_CBC |
2648 		                     DESC_HDR_SEL1_MDEUA |
2649 		                     DESC_HDR_MODE1_MDEU_INIT |
2650 		                     DESC_HDR_MODE1_MDEU_PAD |
2651 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2652 	},
2653 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2654 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2655 		.alg.aead = {
2656 			.base = {
2657 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2658 				.cra_driver_name = "authenc-hmac-md5-"
2659 						   "cbc-aes-talitos-hsna",
2660 				.cra_blocksize = AES_BLOCK_SIZE,
2661 				.cra_flags = CRYPTO_ALG_ASYNC |
2662 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2663 			},
2664 			.ivsize = AES_BLOCK_SIZE,
2665 			.maxauthsize = MD5_DIGEST_SIZE,
2666 		},
2667 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2668 				     DESC_HDR_SEL0_AESU |
2669 				     DESC_HDR_MODE0_AESU_CBC |
2670 				     DESC_HDR_SEL1_MDEUA |
2671 				     DESC_HDR_MODE1_MDEU_INIT |
2672 				     DESC_HDR_MODE1_MDEU_PAD |
2673 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2674 	},
2675 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2676 		.alg.aead = {
2677 			.base = {
2678 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2679 				.cra_driver_name = "authenc-hmac-md5-"
2680 						   "cbc-3des-talitos",
2681 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2682 				.cra_flags = CRYPTO_ALG_ASYNC |
2683 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2684 			},
2685 			.ivsize = DES3_EDE_BLOCK_SIZE,
2686 			.maxauthsize = MD5_DIGEST_SIZE,
2687 			.setkey = aead_des3_setkey,
2688 		},
2689 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2690 			             DESC_HDR_SEL0_DEU |
2691 		                     DESC_HDR_MODE0_DEU_CBC |
2692 		                     DESC_HDR_MODE0_DEU_3DES |
2693 		                     DESC_HDR_SEL1_MDEUA |
2694 		                     DESC_HDR_MODE1_MDEU_INIT |
2695 		                     DESC_HDR_MODE1_MDEU_PAD |
2696 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2697 	},
2698 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2699 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2700 		.alg.aead = {
2701 			.base = {
2702 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2703 				.cra_driver_name = "authenc-hmac-md5-"
2704 						   "cbc-3des-talitos-hsna",
2705 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2706 				.cra_flags = CRYPTO_ALG_ASYNC |
2707 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2708 			},
2709 			.ivsize = DES3_EDE_BLOCK_SIZE,
2710 			.maxauthsize = MD5_DIGEST_SIZE,
2711 			.setkey = aead_des3_setkey,
2712 		},
2713 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2714 				     DESC_HDR_SEL0_DEU |
2715 				     DESC_HDR_MODE0_DEU_CBC |
2716 				     DESC_HDR_MODE0_DEU_3DES |
2717 				     DESC_HDR_SEL1_MDEUA |
2718 				     DESC_HDR_MODE1_MDEU_INIT |
2719 				     DESC_HDR_MODE1_MDEU_PAD |
2720 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2721 	},
2722 	/* SKCIPHER algorithms. */
2723 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2724 		.alg.skcipher = {
2725 			.base.cra_name = "ecb(aes)",
2726 			.base.cra_driver_name = "ecb-aes-talitos",
2727 			.base.cra_blocksize = AES_BLOCK_SIZE,
2728 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2729 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2730 			.min_keysize = AES_MIN_KEY_SIZE,
2731 			.max_keysize = AES_MAX_KEY_SIZE,
2732 			.setkey = skcipher_aes_setkey,
2733 		},
2734 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2735 				     DESC_HDR_SEL0_AESU,
2736 	},
2737 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2738 		.alg.skcipher = {
2739 			.base.cra_name = "cbc(aes)",
2740 			.base.cra_driver_name = "cbc-aes-talitos",
2741 			.base.cra_blocksize = AES_BLOCK_SIZE,
2742 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2743 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2744 			.min_keysize = AES_MIN_KEY_SIZE,
2745 			.max_keysize = AES_MAX_KEY_SIZE,
2746 			.ivsize = AES_BLOCK_SIZE,
2747 			.setkey = skcipher_aes_setkey,
2748 		},
2749 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2750 				     DESC_HDR_SEL0_AESU |
2751 				     DESC_HDR_MODE0_AESU_CBC,
2752 	},
2753 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2754 		.alg.skcipher = {
2755 			.base.cra_name = "ctr(aes)",
2756 			.base.cra_driver_name = "ctr-aes-talitos",
2757 			.base.cra_blocksize = 1,
2758 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2759 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2760 			.min_keysize = AES_MIN_KEY_SIZE,
2761 			.max_keysize = AES_MAX_KEY_SIZE,
2762 			.ivsize = AES_BLOCK_SIZE,
2763 			.setkey = skcipher_aes_setkey,
2764 		},
2765 		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2766 				     DESC_HDR_SEL0_AESU |
2767 				     DESC_HDR_MODE0_AESU_CTR,
2768 	},
2769 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2770 		.alg.skcipher = {
2771 			.base.cra_name = "ctr(aes)",
2772 			.base.cra_driver_name = "ctr-aes-talitos",
2773 			.base.cra_blocksize = 1,
2774 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2775 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2776 			.min_keysize = AES_MIN_KEY_SIZE,
2777 			.max_keysize = AES_MAX_KEY_SIZE,
2778 			.ivsize = AES_BLOCK_SIZE,
2779 			.setkey = skcipher_aes_setkey,
2780 		},
2781 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2782 				     DESC_HDR_SEL0_AESU |
2783 				     DESC_HDR_MODE0_AESU_CTR,
2784 	},
2785 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2786 		.alg.skcipher = {
2787 			.base.cra_name = "ecb(des)",
2788 			.base.cra_driver_name = "ecb-des-talitos",
2789 			.base.cra_blocksize = DES_BLOCK_SIZE,
2790 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2791 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2792 			.min_keysize = DES_KEY_SIZE,
2793 			.max_keysize = DES_KEY_SIZE,
2794 			.setkey = skcipher_des_setkey,
2795 		},
2796 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2797 				     DESC_HDR_SEL0_DEU,
2798 	},
2799 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2800 		.alg.skcipher = {
2801 			.base.cra_name = "cbc(des)",
2802 			.base.cra_driver_name = "cbc-des-talitos",
2803 			.base.cra_blocksize = DES_BLOCK_SIZE,
2804 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2805 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2806 			.min_keysize = DES_KEY_SIZE,
2807 			.max_keysize = DES_KEY_SIZE,
2808 			.ivsize = DES_BLOCK_SIZE,
2809 			.setkey = skcipher_des_setkey,
2810 		},
2811 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2812 				     DESC_HDR_SEL0_DEU |
2813 				     DESC_HDR_MODE0_DEU_CBC,
2814 	},
2815 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2816 		.alg.skcipher = {
2817 			.base.cra_name = "ecb(des3_ede)",
2818 			.base.cra_driver_name = "ecb-3des-talitos",
2819 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2820 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2821 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2822 			.min_keysize = DES3_EDE_KEY_SIZE,
2823 			.max_keysize = DES3_EDE_KEY_SIZE,
2824 			.setkey = skcipher_des3_setkey,
2825 		},
2826 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2827 				     DESC_HDR_SEL0_DEU |
2828 				     DESC_HDR_MODE0_DEU_3DES,
2829 	},
2830 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2831 		.alg.skcipher = {
2832 			.base.cra_name = "cbc(des3_ede)",
2833 			.base.cra_driver_name = "cbc-3des-talitos",
2834 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2835 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2836 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2837 			.min_keysize = DES3_EDE_KEY_SIZE,
2838 			.max_keysize = DES3_EDE_KEY_SIZE,
2839 			.ivsize = DES3_EDE_BLOCK_SIZE,
2840 			.setkey = skcipher_des3_setkey,
2841 		},
2842 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2843 			             DESC_HDR_SEL0_DEU |
2844 		                     DESC_HDR_MODE0_DEU_CBC |
2845 		                     DESC_HDR_MODE0_DEU_3DES,
2846 	},
2847 	/* AHASH algorithms. */
2848 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2849 		.alg.hash = {
2850 			.halg.digestsize = MD5_DIGEST_SIZE,
2851 			.halg.statesize = sizeof(struct talitos_export_state),
2852 			.halg.base = {
2853 				.cra_name = "md5",
2854 				.cra_driver_name = "md5-talitos",
2855 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2856 				.cra_flags = CRYPTO_ALG_ASYNC |
2857 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2858 			}
2859 		},
2860 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2861 				     DESC_HDR_SEL0_MDEUA |
2862 				     DESC_HDR_MODE0_MDEU_MD5,
2863 	},
2864 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2865 		.alg.hash = {
2866 			.halg.digestsize = SHA1_DIGEST_SIZE,
2867 			.halg.statesize = sizeof(struct talitos_export_state),
2868 			.halg.base = {
2869 				.cra_name = "sha1",
2870 				.cra_driver_name = "sha1-talitos",
2871 				.cra_blocksize = SHA1_BLOCK_SIZE,
2872 				.cra_flags = CRYPTO_ALG_ASYNC |
2873 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2874 			}
2875 		},
2876 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2877 				     DESC_HDR_SEL0_MDEUA |
2878 				     DESC_HDR_MODE0_MDEU_SHA1,
2879 	},
2880 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2881 		.alg.hash = {
2882 			.halg.digestsize = SHA224_DIGEST_SIZE,
2883 			.halg.statesize = sizeof(struct talitos_export_state),
2884 			.halg.base = {
2885 				.cra_name = "sha224",
2886 				.cra_driver_name = "sha224-talitos",
2887 				.cra_blocksize = SHA224_BLOCK_SIZE,
2888 				.cra_flags = CRYPTO_ALG_ASYNC |
2889 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2890 			}
2891 		},
2892 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2893 				     DESC_HDR_SEL0_MDEUA |
2894 				     DESC_HDR_MODE0_MDEU_SHA224,
2895 	},
2896 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2897 		.alg.hash = {
2898 			.halg.digestsize = SHA256_DIGEST_SIZE,
2899 			.halg.statesize = sizeof(struct talitos_export_state),
2900 			.halg.base = {
2901 				.cra_name = "sha256",
2902 				.cra_driver_name = "sha256-talitos",
2903 				.cra_blocksize = SHA256_BLOCK_SIZE,
2904 				.cra_flags = CRYPTO_ALG_ASYNC |
2905 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2906 			}
2907 		},
2908 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2909 				     DESC_HDR_SEL0_MDEUA |
2910 				     DESC_HDR_MODE0_MDEU_SHA256,
2911 	},
2912 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2913 		.alg.hash = {
2914 			.halg.digestsize = SHA384_DIGEST_SIZE,
2915 			.halg.statesize = sizeof(struct talitos_export_state),
2916 			.halg.base = {
2917 				.cra_name = "sha384",
2918 				.cra_driver_name = "sha384-talitos",
2919 				.cra_blocksize = SHA384_BLOCK_SIZE,
2920 				.cra_flags = CRYPTO_ALG_ASYNC |
2921 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2922 			}
2923 		},
2924 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2925 				     DESC_HDR_SEL0_MDEUB |
2926 				     DESC_HDR_MODE0_MDEUB_SHA384,
2927 	},
2928 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2929 		.alg.hash = {
2930 			.halg.digestsize = SHA512_DIGEST_SIZE,
2931 			.halg.statesize = sizeof(struct talitos_export_state),
2932 			.halg.base = {
2933 				.cra_name = "sha512",
2934 				.cra_driver_name = "sha512-talitos",
2935 				.cra_blocksize = SHA512_BLOCK_SIZE,
2936 				.cra_flags = CRYPTO_ALG_ASYNC |
2937 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2938 			}
2939 		},
2940 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2941 				     DESC_HDR_SEL0_MDEUB |
2942 				     DESC_HDR_MODE0_MDEUB_SHA512,
2943 	},
2944 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2945 		.alg.hash = {
2946 			.halg.digestsize = MD5_DIGEST_SIZE,
2947 			.halg.statesize = sizeof(struct talitos_export_state),
2948 			.halg.base = {
2949 				.cra_name = "hmac(md5)",
2950 				.cra_driver_name = "hmac-md5-talitos",
2951 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2952 				.cra_flags = CRYPTO_ALG_ASYNC |
2953 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2954 			}
2955 		},
2956 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2957 				     DESC_HDR_SEL0_MDEUA |
2958 				     DESC_HDR_MODE0_MDEU_MD5,
2959 	},
2960 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2961 		.alg.hash = {
2962 			.halg.digestsize = SHA1_DIGEST_SIZE,
2963 			.halg.statesize = sizeof(struct talitos_export_state),
2964 			.halg.base = {
2965 				.cra_name = "hmac(sha1)",
2966 				.cra_driver_name = "hmac-sha1-talitos",
2967 				.cra_blocksize = SHA1_BLOCK_SIZE,
2968 				.cra_flags = CRYPTO_ALG_ASYNC |
2969 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2970 			}
2971 		},
2972 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2973 				     DESC_HDR_SEL0_MDEUA |
2974 				     DESC_HDR_MODE0_MDEU_SHA1,
2975 	},
2976 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2977 		.alg.hash = {
2978 			.halg.digestsize = SHA224_DIGEST_SIZE,
2979 			.halg.statesize = sizeof(struct talitos_export_state),
2980 			.halg.base = {
2981 				.cra_name = "hmac(sha224)",
2982 				.cra_driver_name = "hmac-sha224-talitos",
2983 				.cra_blocksize = SHA224_BLOCK_SIZE,
2984 				.cra_flags = CRYPTO_ALG_ASYNC |
2985 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2986 			}
2987 		},
2988 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2989 				     DESC_HDR_SEL0_MDEUA |
2990 				     DESC_HDR_MODE0_MDEU_SHA224,
2991 	},
2992 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2993 		.alg.hash = {
2994 			.halg.digestsize = SHA256_DIGEST_SIZE,
2995 			.halg.statesize = sizeof(struct talitos_export_state),
2996 			.halg.base = {
2997 				.cra_name = "hmac(sha256)",
2998 				.cra_driver_name = "hmac-sha256-talitos",
2999 				.cra_blocksize = SHA256_BLOCK_SIZE,
3000 				.cra_flags = CRYPTO_ALG_ASYNC |
3001 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3002 			}
3003 		},
3004 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3005 				     DESC_HDR_SEL0_MDEUA |
3006 				     DESC_HDR_MODE0_MDEU_SHA256,
3007 	},
3008 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3009 		.alg.hash = {
3010 			.halg.digestsize = SHA384_DIGEST_SIZE,
3011 			.halg.statesize = sizeof(struct talitos_export_state),
3012 			.halg.base = {
3013 				.cra_name = "hmac(sha384)",
3014 				.cra_driver_name = "hmac-sha384-talitos",
3015 				.cra_blocksize = SHA384_BLOCK_SIZE,
3016 				.cra_flags = CRYPTO_ALG_ASYNC |
3017 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3018 			}
3019 		},
3020 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3021 				     DESC_HDR_SEL0_MDEUB |
3022 				     DESC_HDR_MODE0_MDEUB_SHA384,
3023 	},
3024 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3025 		.alg.hash = {
3026 			.halg.digestsize = SHA512_DIGEST_SIZE,
3027 			.halg.statesize = sizeof(struct talitos_export_state),
3028 			.halg.base = {
3029 				.cra_name = "hmac(sha512)",
3030 				.cra_driver_name = "hmac-sha512-talitos",
3031 				.cra_blocksize = SHA512_BLOCK_SIZE,
3032 				.cra_flags = CRYPTO_ALG_ASYNC |
3033 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3034 			}
3035 		},
3036 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3037 				     DESC_HDR_SEL0_MDEUB |
3038 				     DESC_HDR_MODE0_MDEUB_SHA512,
3039 	}
3040 };
3041 
3042 struct talitos_crypto_alg {
3043 	struct list_head entry;
3044 	struct device *dev;
3045 	struct talitos_alg_template algt;
3046 };
3047 
talitos_init_common(struct talitos_ctx * ctx,struct talitos_crypto_alg * talitos_alg)3048 static int talitos_init_common(struct talitos_ctx *ctx,
3049 			       struct talitos_crypto_alg *talitos_alg)
3050 {
3051 	struct talitos_private *priv;
3052 
3053 	/* update context with ptr to dev */
3054 	ctx->dev = talitos_alg->dev;
3055 
3056 	/* assign SEC channel to tfm in round-robin fashion */
3057 	priv = dev_get_drvdata(ctx->dev);
3058 	ctx->ch = atomic_inc_return(&priv->last_chan) &
3059 		  (priv->num_channels - 1);
3060 
3061 	/* copy descriptor header template value */
3062 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3063 
3064 	/* select done notification */
3065 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3066 
3067 	return 0;
3068 }
3069 
talitos_cra_init_aead(struct crypto_aead * tfm)3070 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3071 {
3072 	struct aead_alg *alg = crypto_aead_alg(tfm);
3073 	struct talitos_crypto_alg *talitos_alg;
3074 	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3075 
3076 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3077 				   algt.alg.aead);
3078 
3079 	return talitos_init_common(ctx, talitos_alg);
3080 }
3081 
talitos_cra_init_skcipher(struct crypto_skcipher * tfm)3082 static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
3083 {
3084 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3085 	struct talitos_crypto_alg *talitos_alg;
3086 	struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
3087 
3088 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3089 				   algt.alg.skcipher);
3090 
3091 	return talitos_init_common(ctx, talitos_alg);
3092 }
3093 
talitos_cra_init_ahash(struct crypto_tfm * tfm)3094 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3095 {
3096 	struct crypto_alg *alg = tfm->__crt_alg;
3097 	struct talitos_crypto_alg *talitos_alg;
3098 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3099 
3100 	talitos_alg = container_of(__crypto_ahash_alg(alg),
3101 				   struct talitos_crypto_alg,
3102 				   algt.alg.hash);
3103 
3104 	ctx->keylen = 0;
3105 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3106 				 sizeof(struct talitos_ahash_req_ctx));
3107 
3108 	return talitos_init_common(ctx, talitos_alg);
3109 }
3110 
talitos_cra_exit(struct crypto_tfm * tfm)3111 static void talitos_cra_exit(struct crypto_tfm *tfm)
3112 {
3113 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3114 	struct device *dev = ctx->dev;
3115 
3116 	if (ctx->keylen)
3117 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3118 }
3119 
3120 /*
3121  * given the alg's descriptor header template, determine whether descriptor
3122  * type and primary/secondary execution units required match the hw
3123  * capabilities description provided in the device tree node.
3124  */
hw_supports(struct device * dev,__be32 desc_hdr_template)3125 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3126 {
3127 	struct talitos_private *priv = dev_get_drvdata(dev);
3128 	int ret;
3129 
3130 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3131 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3132 
3133 	if (SECONDARY_EU(desc_hdr_template))
3134 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3135 		              & priv->exec_units);
3136 
3137 	return ret;
3138 }
3139 
talitos_remove(struct platform_device * ofdev)3140 static void talitos_remove(struct platform_device *ofdev)
3141 {
3142 	struct device *dev = &ofdev->dev;
3143 	struct talitos_private *priv = dev_get_drvdata(dev);
3144 	struct talitos_crypto_alg *t_alg, *n;
3145 	int i;
3146 
3147 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3148 		switch (t_alg->algt.type) {
3149 		case CRYPTO_ALG_TYPE_SKCIPHER:
3150 			crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
3151 			break;
3152 		case CRYPTO_ALG_TYPE_AEAD:
3153 			crypto_unregister_aead(&t_alg->algt.alg.aead);
3154 			break;
3155 		case CRYPTO_ALG_TYPE_AHASH:
3156 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3157 			break;
3158 		}
3159 		list_del(&t_alg->entry);
3160 	}
3161 
3162 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3163 		talitos_unregister_rng(dev);
3164 
3165 	for (i = 0; i < 2; i++)
3166 		if (priv->irq[i]) {
3167 			free_irq(priv->irq[i], dev);
3168 			irq_dispose_mapping(priv->irq[i]);
3169 		}
3170 
3171 	tasklet_kill(&priv->done_task[0]);
3172 	if (priv->irq[1])
3173 		tasklet_kill(&priv->done_task[1]);
3174 }
3175 
talitos_alg_alloc(struct device * dev,struct talitos_alg_template * template)3176 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3177 						    struct talitos_alg_template
3178 						           *template)
3179 {
3180 	struct talitos_private *priv = dev_get_drvdata(dev);
3181 	struct talitos_crypto_alg *t_alg;
3182 	struct crypto_alg *alg;
3183 
3184 	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3185 			     GFP_KERNEL);
3186 	if (!t_alg)
3187 		return ERR_PTR(-ENOMEM);
3188 
3189 	t_alg->algt = *template;
3190 
3191 	switch (t_alg->algt.type) {
3192 	case CRYPTO_ALG_TYPE_SKCIPHER:
3193 		alg = &t_alg->algt.alg.skcipher.base;
3194 		alg->cra_exit = talitos_cra_exit;
3195 		t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
3196 		t_alg->algt.alg.skcipher.setkey =
3197 			t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
3198 		t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
3199 		t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
3200 		if (!strcmp(alg->cra_name, "ctr(aes)") && !has_ftr_sec1(priv) &&
3201 		    DESC_TYPE(t_alg->algt.desc_hdr_template) !=
3202 		    DESC_TYPE(DESC_HDR_TYPE_AESU_CTR_NONSNOOP)) {
3203 			devm_kfree(dev, t_alg);
3204 			return ERR_PTR(-ENOTSUPP);
3205 		}
3206 		break;
3207 	case CRYPTO_ALG_TYPE_AEAD:
3208 		alg = &t_alg->algt.alg.aead.base;
3209 		alg->cra_exit = talitos_cra_exit;
3210 		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3211 		t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3212 					      aead_setkey;
3213 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3214 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3215 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3216 		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3217 			devm_kfree(dev, t_alg);
3218 			return ERR_PTR(-ENOTSUPP);
3219 		}
3220 		break;
3221 	case CRYPTO_ALG_TYPE_AHASH:
3222 		alg = &t_alg->algt.alg.hash.halg.base;
3223 		alg->cra_init = talitos_cra_init_ahash;
3224 		alg->cra_exit = talitos_cra_exit;
3225 		t_alg->algt.alg.hash.init = ahash_init;
3226 		t_alg->algt.alg.hash.update = ahash_update;
3227 		t_alg->algt.alg.hash.final = ahash_final;
3228 		t_alg->algt.alg.hash.finup = ahash_finup;
3229 		t_alg->algt.alg.hash.digest = ahash_digest;
3230 		if (!strncmp(alg->cra_name, "hmac", 4))
3231 			t_alg->algt.alg.hash.setkey = ahash_setkey;
3232 		t_alg->algt.alg.hash.import = ahash_import;
3233 		t_alg->algt.alg.hash.export = ahash_export;
3234 
3235 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3236 		    !strncmp(alg->cra_name, "hmac", 4)) {
3237 			devm_kfree(dev, t_alg);
3238 			return ERR_PTR(-ENOTSUPP);
3239 		}
3240 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3241 		    (!strcmp(alg->cra_name, "sha224") ||
3242 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3243 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3244 			t_alg->algt.alg.hash.digest =
3245 				ahash_digest_sha224_swinit;
3246 			t_alg->algt.desc_hdr_template =
3247 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3248 					DESC_HDR_SEL0_MDEUA |
3249 					DESC_HDR_MODE0_MDEU_SHA256;
3250 		}
3251 		break;
3252 	default:
3253 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3254 		devm_kfree(dev, t_alg);
3255 		return ERR_PTR(-EINVAL);
3256 	}
3257 
3258 	alg->cra_module = THIS_MODULE;
3259 	if (t_alg->algt.priority)
3260 		alg->cra_priority = t_alg->algt.priority;
3261 	else
3262 		alg->cra_priority = TALITOS_CRA_PRIORITY;
3263 	if (has_ftr_sec1(priv) && t_alg->algt.type != CRYPTO_ALG_TYPE_AHASH)
3264 		alg->cra_alignmask = 3;
3265 	else
3266 		alg->cra_alignmask = 0;
3267 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3268 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3269 
3270 	t_alg->dev = dev;
3271 
3272 	return t_alg;
3273 }
3274 
talitos_probe_irq(struct platform_device * ofdev)3275 static int talitos_probe_irq(struct platform_device *ofdev)
3276 {
3277 	struct device *dev = &ofdev->dev;
3278 	struct device_node *np = ofdev->dev.of_node;
3279 	struct talitos_private *priv = dev_get_drvdata(dev);
3280 	int err;
3281 	bool is_sec1 = has_ftr_sec1(priv);
3282 
3283 	priv->irq[0] = irq_of_parse_and_map(np, 0);
3284 	if (!priv->irq[0]) {
3285 		dev_err(dev, "failed to map irq\n");
3286 		return -EINVAL;
3287 	}
3288 	if (is_sec1) {
3289 		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3290 				  dev_driver_string(dev), dev);
3291 		goto primary_out;
3292 	}
3293 
3294 	priv->irq[1] = irq_of_parse_and_map(np, 1);
3295 
3296 	/* get the primary irq line */
3297 	if (!priv->irq[1]) {
3298 		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3299 				  dev_driver_string(dev), dev);
3300 		goto primary_out;
3301 	}
3302 
3303 	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3304 			  dev_driver_string(dev), dev);
3305 	if (err)
3306 		goto primary_out;
3307 
3308 	/* get the secondary irq line */
3309 	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3310 			  dev_driver_string(dev), dev);
3311 	if (err) {
3312 		dev_err(dev, "failed to request secondary irq\n");
3313 		irq_dispose_mapping(priv->irq[1]);
3314 		priv->irq[1] = 0;
3315 	}
3316 
3317 	return err;
3318 
3319 primary_out:
3320 	if (err) {
3321 		dev_err(dev, "failed to request primary irq\n");
3322 		irq_dispose_mapping(priv->irq[0]);
3323 		priv->irq[0] = 0;
3324 	}
3325 
3326 	return err;
3327 }
3328 
talitos_probe(struct platform_device * ofdev)3329 static int talitos_probe(struct platform_device *ofdev)
3330 {
3331 	struct device *dev = &ofdev->dev;
3332 	struct device_node *np = ofdev->dev.of_node;
3333 	struct talitos_private *priv;
3334 	int i, err;
3335 	int stride;
3336 	struct resource *res;
3337 
3338 	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3339 	if (!priv)
3340 		return -ENOMEM;
3341 
3342 	INIT_LIST_HEAD(&priv->alg_list);
3343 
3344 	dev_set_drvdata(dev, priv);
3345 
3346 	priv->ofdev = ofdev;
3347 
3348 	spin_lock_init(&priv->reg_lock);
3349 
3350 	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3351 	if (!res)
3352 		return -ENXIO;
3353 	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3354 	if (!priv->reg) {
3355 		dev_err(dev, "failed to of_iomap\n");
3356 		err = -ENOMEM;
3357 		goto err_out;
3358 	}
3359 
3360 	/* get SEC version capabilities from device tree */
3361 	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3362 	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3363 	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3364 	of_property_read_u32(np, "fsl,descriptor-types-mask",
3365 			     &priv->desc_types);
3366 
3367 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3368 	    !priv->exec_units || !priv->desc_types) {
3369 		dev_err(dev, "invalid property data in device tree node\n");
3370 		err = -EINVAL;
3371 		goto err_out;
3372 	}
3373 
3374 	if (of_device_is_compatible(np, "fsl,sec3.0"))
3375 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3376 
3377 	if (of_device_is_compatible(np, "fsl,sec2.1"))
3378 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3379 				  TALITOS_FTR_SHA224_HWINIT |
3380 				  TALITOS_FTR_HMAC_OK;
3381 
3382 	if (of_device_is_compatible(np, "fsl,sec1.0"))
3383 		priv->features |= TALITOS_FTR_SEC1;
3384 
3385 	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3386 		priv->reg_deu = priv->reg + TALITOS12_DEU;
3387 		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3388 		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3389 		stride = TALITOS1_CH_STRIDE;
3390 	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3391 		priv->reg_deu = priv->reg + TALITOS10_DEU;
3392 		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3393 		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3394 		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3395 		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3396 		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3397 		stride = TALITOS1_CH_STRIDE;
3398 	} else {
3399 		priv->reg_deu = priv->reg + TALITOS2_DEU;
3400 		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3401 		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3402 		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3403 		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3404 		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3405 		priv->reg_keu = priv->reg + TALITOS2_KEU;
3406 		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3407 		stride = TALITOS2_CH_STRIDE;
3408 	}
3409 
3410 	err = talitos_probe_irq(ofdev);
3411 	if (err)
3412 		goto err_out;
3413 
3414 	if (has_ftr_sec1(priv)) {
3415 		if (priv->num_channels == 1)
3416 			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3417 				     (unsigned long)dev);
3418 		else
3419 			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3420 				     (unsigned long)dev);
3421 	} else {
3422 		if (priv->irq[1]) {
3423 			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3424 				     (unsigned long)dev);
3425 			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3426 				     (unsigned long)dev);
3427 		} else if (priv->num_channels == 1) {
3428 			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3429 				     (unsigned long)dev);
3430 		} else {
3431 			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3432 				     (unsigned long)dev);
3433 		}
3434 	}
3435 
3436 	priv->chan = devm_kcalloc(dev,
3437 				  priv->num_channels,
3438 				  sizeof(struct talitos_channel),
3439 				  GFP_KERNEL);
3440 	if (!priv->chan) {
3441 		dev_err(dev, "failed to allocate channel management space\n");
3442 		err = -ENOMEM;
3443 		goto err_out;
3444 	}
3445 
3446 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3447 
3448 	for (i = 0; i < priv->num_channels; i++) {
3449 		priv->chan[i].reg = priv->reg + stride * (i + 1);
3450 		if (!priv->irq[1] || !(i & 1))
3451 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3452 
3453 		spin_lock_init(&priv->chan[i].head_lock);
3454 		spin_lock_init(&priv->chan[i].tail_lock);
3455 
3456 		priv->chan[i].fifo = devm_kcalloc(dev,
3457 						priv->fifo_len,
3458 						sizeof(struct talitos_request),
3459 						GFP_KERNEL);
3460 		if (!priv->chan[i].fifo) {
3461 			dev_err(dev, "failed to allocate request fifo %d\n", i);
3462 			err = -ENOMEM;
3463 			goto err_out;
3464 		}
3465 
3466 		atomic_set(&priv->chan[i].submit_count,
3467 			   -(priv->chfifo_len - 1));
3468 	}
3469 
3470 	dma_set_mask(dev, DMA_BIT_MASK(36));
3471 
3472 	/* reset and initialize the h/w */
3473 	err = init_device(dev);
3474 	if (err) {
3475 		dev_err(dev, "failed to initialize device\n");
3476 		goto err_out;
3477 	}
3478 
3479 	/* register the RNG, if available */
3480 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3481 		err = talitos_register_rng(dev);
3482 		if (err) {
3483 			dev_err(dev, "failed to register hwrng: %d\n", err);
3484 			goto err_out;
3485 		} else
3486 			dev_info(dev, "hwrng\n");
3487 	}
3488 
3489 	/* register crypto algorithms the device supports */
3490 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3491 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3492 			struct talitos_crypto_alg *t_alg;
3493 			struct crypto_alg *alg = NULL;
3494 
3495 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3496 			if (IS_ERR(t_alg)) {
3497 				err = PTR_ERR(t_alg);
3498 				if (err == -ENOTSUPP)
3499 					continue;
3500 				goto err_out;
3501 			}
3502 
3503 			switch (t_alg->algt.type) {
3504 			case CRYPTO_ALG_TYPE_SKCIPHER:
3505 				err = crypto_register_skcipher(
3506 						&t_alg->algt.alg.skcipher);
3507 				alg = &t_alg->algt.alg.skcipher.base;
3508 				break;
3509 
3510 			case CRYPTO_ALG_TYPE_AEAD:
3511 				err = crypto_register_aead(
3512 					&t_alg->algt.alg.aead);
3513 				alg = &t_alg->algt.alg.aead.base;
3514 				break;
3515 
3516 			case CRYPTO_ALG_TYPE_AHASH:
3517 				err = crypto_register_ahash(
3518 						&t_alg->algt.alg.hash);
3519 				alg = &t_alg->algt.alg.hash.halg.base;
3520 				break;
3521 			}
3522 			if (err) {
3523 				dev_err(dev, "%s alg registration failed\n",
3524 					alg->cra_driver_name);
3525 				devm_kfree(dev, t_alg);
3526 			} else
3527 				list_add_tail(&t_alg->entry, &priv->alg_list);
3528 		}
3529 	}
3530 	if (!list_empty(&priv->alg_list))
3531 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3532 			 (char *)of_get_property(np, "compatible", NULL));
3533 
3534 	return 0;
3535 
3536 err_out:
3537 	talitos_remove(ofdev);
3538 
3539 	return err;
3540 }
3541 
3542 static const struct of_device_id talitos_match[] = {
3543 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3544 	{
3545 		.compatible = "fsl,sec1.0",
3546 	},
3547 #endif
3548 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3549 	{
3550 		.compatible = "fsl,sec2.0",
3551 	},
3552 #endif
3553 	{},
3554 };
3555 MODULE_DEVICE_TABLE(of, talitos_match);
3556 
3557 static struct platform_driver talitos_driver = {
3558 	.driver = {
3559 		.name = "talitos",
3560 		.of_match_table = talitos_match,
3561 	},
3562 	.probe = talitos_probe,
3563 	.remove_new = talitos_remove,
3564 };
3565 
3566 module_platform_driver(talitos_driver);
3567 
3568 MODULE_LICENSE("GPL");
3569 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3570 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3571