xref: /linux/drivers/crypto/bcm/cipher.c (revision 0866ba23b7efcc6837d6b4231bf91b79647b81ea)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2016 Broadcom
4  */
5 
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/errno.h>
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/platform_device.h>
13 #include <linux/scatterlist.h>
14 #include <linux/crypto.h>
15 #include <linux/kthread.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/sched.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
20 #include <linux/io.h>
21 #include <linux/bitops.h>
22 
23 #include <crypto/algapi.h>
24 #include <crypto/aead.h>
25 #include <crypto/internal/aead.h>
26 #include <crypto/aes.h>
27 #include <crypto/des.h>
28 #include <crypto/hmac.h>
29 #include <crypto/sha.h>
30 #include <crypto/md5.h>
31 #include <crypto/authenc.h>
32 #include <crypto/skcipher.h>
33 #include <crypto/hash.h>
34 #include <crypto/sha3.h>
35 
36 #include "util.h"
37 #include "cipher.h"
38 #include "spu.h"
39 #include "spum.h"
40 #include "spu2.h"
41 
42 /* ================= Device Structure ================== */
43 
44 struct device_private iproc_priv;
45 
46 /* ==================== Parameters ===================== */
47 
48 int flow_debug_logging;
49 module_param(flow_debug_logging, int, 0644);
50 MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
51 
52 int packet_debug_logging;
53 module_param(packet_debug_logging, int, 0644);
54 MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
55 
56 int debug_logging_sleep;
57 module_param(debug_logging_sleep, int, 0644);
58 MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
59 
60 /*
61  * The value of these module parameters is used to set the priority for each
62  * algo type when this driver registers algos with the kernel crypto API.
63  * To use a priority other than the default, set the priority in the insmod or
64  * modprobe. Changing the module priority after init time has no effect.
65  *
66  * The default priorities are chosen to be lower (less preferred) than ARMv8 CE
67  * algos, but more preferred than generic software algos.
68  */
69 static int cipher_pri = 150;
70 module_param(cipher_pri, int, 0644);
71 MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
72 
73 static int hash_pri = 100;
74 module_param(hash_pri, int, 0644);
75 MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
76 
77 static int aead_pri = 150;
78 module_param(aead_pri, int, 0644);
79 MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
80 
81 /* A type 3 BCM header, expected to precede the SPU header for SPU-M.
82  * Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
83  * 0x60 - ring 0
84  * 0x68 - ring 1
85  * 0x70 - ring 2
86  * 0x78 - ring 3
87  */
88 static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
89 /*
90  * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
91  * is set dynamically after reading SPU type from device tree.
92  */
93 #define BCM_HDR_LEN  iproc_priv.bcm_hdr_len
94 
95 /* min and max time to sleep before retrying when mbox queue is full. usec */
96 #define MBOX_SLEEP_MIN  800
97 #define MBOX_SLEEP_MAX 1000
98 
99 /**
100  * select_channel() - Select a SPU channel to handle a crypto request. Selects
101  * channel in round robin order.
102  *
103  * Return:  channel index
104  */
105 static u8 select_channel(void)
106 {
107 	u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
108 
109 	return chan_idx % iproc_priv.spu.num_chan;
110 }
111 
112 /**
113  * spu_ablkcipher_rx_sg_create() - Build up the scatterlist of buffers used to
114  * receive a SPU response message for an ablkcipher request. Includes buffers to
115  * catch SPU message headers and the response data.
116  * @mssg:	mailbox message containing the receive sg
117  * @rctx:	crypto request context
118  * @rx_frag_num: number of scatterlist elements required to hold the
119  *		SPU response message
120  * @chunksize:	Number of bytes of response data expected
121  * @stat_pad_len: Number of bytes required to pad the STAT field to
122  *		a 4-byte boundary
123  *
124  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
125  * when the request completes, whether the request is handled successfully or
126  * there is an error.
127  *
128  * Returns:
129  *   0 if successful
130  *   < 0 if an error
131  */
132 static int
133 spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
134 			    struct iproc_reqctx_s *rctx,
135 			    u8 rx_frag_num,
136 			    unsigned int chunksize, u32 stat_pad_len)
137 {
138 	struct spu_hw *spu = &iproc_priv.spu;
139 	struct scatterlist *sg;	/* used to build sgs in mbox message */
140 	struct iproc_ctx_s *ctx = rctx->ctx;
141 	u32 datalen;		/* Number of bytes of response data expected */
142 
143 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
144 				rctx->gfp);
145 	if (!mssg->spu.dst)
146 		return -ENOMEM;
147 
148 	sg = mssg->spu.dst;
149 	sg_init_table(sg, rx_frag_num);
150 	/* Space for SPU message header */
151 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
152 
153 	/* If XTS tweak in payload, add buffer to receive encrypted tweak */
154 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
155 	    spu->spu_xts_tweak_in_payload())
156 		sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
157 			   SPU_XTS_TWEAK_SIZE);
158 
159 	/* Copy in each dst sg entry from request, up to chunksize */
160 	datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
161 				 rctx->dst_nents, chunksize);
162 	if (datalen < chunksize) {
163 		pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
164 		       __func__, chunksize, datalen);
165 		return -EFAULT;
166 	}
167 
168 	if (ctx->cipher.alg == CIPHER_ALG_RC4)
169 		/* Add buffer to catch 260-byte SUPDT field for RC4 */
170 		sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
171 
172 	if (stat_pad_len)
173 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
174 
175 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
176 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
177 
178 	return 0;
179 }
180 
181 /**
182  * spu_ablkcipher_tx_sg_create() - Build up the scatterlist of buffers used to
183  * send a SPU request message for an ablkcipher request. Includes SPU message
184  * headers and the request data.
185  * @mssg:	mailbox message containing the transmit sg
186  * @rctx:	crypto request context
187  * @tx_frag_num: number of scatterlist elements required to construct the
188  *		SPU request message
189  * @chunksize:	Number of bytes of request data
190  * @pad_len:	Number of pad bytes
191  *
192  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
193  * when the request completes, whether the request is handled successfully or
194  * there is an error.
195  *
196  * Returns:
197  *   0 if successful
198  *   < 0 if an error
199  */
200 static int
201 spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
202 			    struct iproc_reqctx_s *rctx,
203 			    u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
204 {
205 	struct spu_hw *spu = &iproc_priv.spu;
206 	struct scatterlist *sg;	/* used to build sgs in mbox message */
207 	struct iproc_ctx_s *ctx = rctx->ctx;
208 	u32 datalen;		/* Number of bytes of response data expected */
209 	u32 stat_len;
210 
211 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
212 				rctx->gfp);
213 	if (unlikely(!mssg->spu.src))
214 		return -ENOMEM;
215 
216 	sg = mssg->spu.src;
217 	sg_init_table(sg, tx_frag_num);
218 
219 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
220 		   BCM_HDR_LEN + ctx->spu_req_hdr_len);
221 
222 	/* if XTS tweak in payload, copy from IV (where crypto API puts it) */
223 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
224 	    spu->spu_xts_tweak_in_payload())
225 		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
226 
227 	/* Copy in each src sg entry from request, up to chunksize */
228 	datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
229 				 rctx->src_nents, chunksize);
230 	if (unlikely(datalen < chunksize)) {
231 		pr_err("%s(): failed to copy src sg to mbox msg",
232 		       __func__);
233 		return -EFAULT;
234 	}
235 
236 	if (pad_len)
237 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
238 
239 	stat_len = spu->spu_tx_status_len();
240 	if (stat_len) {
241 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
242 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
243 	}
244 	return 0;
245 }
246 
247 static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
248 				u8 chan_idx)
249 {
250 	int err;
251 	int retry_cnt = 0;
252 	struct device *dev = &(iproc_priv.pdev->dev);
253 
254 	err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
255 	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
256 		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
257 			/*
258 			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
259 			 * not in atomic context and we can wait and try again.
260 			 */
261 			retry_cnt++;
262 			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
263 			err = mbox_send_message(iproc_priv.mbox[chan_idx],
264 						mssg);
265 			atomic_inc(&iproc_priv.mb_no_spc);
266 		}
267 	}
268 	if (err < 0) {
269 		atomic_inc(&iproc_priv.mb_send_fail);
270 		return err;
271 	}
272 
273 	/* Check error returned by mailbox controller */
274 	err = mssg->error;
275 	if (unlikely(err < 0)) {
276 		dev_err(dev, "message error %d", err);
277 		/* Signal txdone for mailbox channel */
278 	}
279 
280 	/* Signal txdone for mailbox channel */
281 	mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
282 	return err;
283 }
284 
285 /**
286  * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
287  * a single SPU request message, starting at the current position in the request
288  * data.
289  * @rctx:	Crypto request context
290  *
291  * This may be called on the crypto API thread, or, when a request is so large
292  * it must be broken into multiple SPU messages, on the thread used to invoke
293  * the response callback. When requests are broken into multiple SPU
294  * messages, we assume subsequent messages depend on previous results, and
295  * thus always wait for previous results before submitting the next message.
296  * Because requests are submitted in lock step like this, there is no need
297  * to synchronize access to request data structures.
298  *
299  * Return: -EINPROGRESS: request has been accepted and result will be returned
300  *			 asynchronously
301  *         Any other value indicates an error
302  */
303 static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
304 {
305 	struct spu_hw *spu = &iproc_priv.spu;
306 	struct crypto_async_request *areq = rctx->parent;
307 	struct ablkcipher_request *req =
308 	    container_of(areq, struct ablkcipher_request, base);
309 	struct iproc_ctx_s *ctx = rctx->ctx;
310 	struct spu_cipher_parms cipher_parms;
311 	int err = 0;
312 	unsigned int chunksize = 0;	/* Num bytes of request to submit */
313 	int remaining = 0;	/* Bytes of request still to process */
314 	int chunk_start;	/* Beginning of data for current SPU msg */
315 
316 	/* IV or ctr value to use in this SPU msg */
317 	u8 local_iv_ctr[MAX_IV_SIZE];
318 	u32 stat_pad_len;	/* num bytes to align status field */
319 	u32 pad_len;		/* total length of all padding */
320 	bool update_key = false;
321 	struct brcm_message *mssg;	/* mailbox message */
322 
323 	/* number of entries in src and dst sg in mailbox message. */
324 	u8 rx_frag_num = 2;	/* response header and STATUS */
325 	u8 tx_frag_num = 1;	/* request header */
326 
327 	flow_log("%s\n", __func__);
328 
329 	cipher_parms.alg = ctx->cipher.alg;
330 	cipher_parms.mode = ctx->cipher.mode;
331 	cipher_parms.type = ctx->cipher_type;
332 	cipher_parms.key_len = ctx->enckeylen;
333 	cipher_parms.key_buf = ctx->enckey;
334 	cipher_parms.iv_buf = local_iv_ctr;
335 	cipher_parms.iv_len = rctx->iv_ctr_len;
336 
337 	mssg = &rctx->mb_mssg;
338 	chunk_start = rctx->src_sent;
339 	remaining = rctx->total_todo - chunk_start;
340 
341 	/* determine the chunk we are breaking off and update the indexes */
342 	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
343 	    (remaining > ctx->max_payload))
344 		chunksize = ctx->max_payload;
345 	else
346 		chunksize = remaining;
347 
348 	rctx->src_sent += chunksize;
349 	rctx->total_sent = rctx->src_sent;
350 
351 	/* Count number of sg entries to be included in this request */
352 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
353 	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
354 
355 	if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
356 	    rctx->is_encrypt && chunk_start)
357 		/*
358 		 * Encrypting non-first first chunk. Copy last block of
359 		 * previous result to IV for this chunk.
360 		 */
361 		sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
362 				    rctx->iv_ctr_len,
363 				    chunk_start - rctx->iv_ctr_len);
364 
365 	if (rctx->iv_ctr_len) {
366 		/* get our local copy of the iv */
367 		__builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
368 				 rctx->iv_ctr_len);
369 
370 		/* generate the next IV if possible */
371 		if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
372 		    !rctx->is_encrypt) {
373 			/*
374 			 * CBC Decrypt: next IV is the last ciphertext block in
375 			 * this chunk
376 			 */
377 			sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
378 					    rctx->iv_ctr_len,
379 					    rctx->src_sent - rctx->iv_ctr_len);
380 		} else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
381 			/*
382 			 * The SPU hardware increments the counter once for
383 			 * each AES block of 16 bytes. So update the counter
384 			 * for the next chunk, if there is one. Note that for
385 			 * this chunk, the counter has already been copied to
386 			 * local_iv_ctr. We can assume a block size of 16,
387 			 * because we only support CTR mode for AES, not for
388 			 * any other cipher alg.
389 			 */
390 			add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
391 		}
392 	}
393 
394 	if (ctx->cipher.alg == CIPHER_ALG_RC4) {
395 		rx_frag_num++;
396 		if (chunk_start) {
397 			/*
398 			 * for non-first RC4 chunks, use SUPDT from previous
399 			 * response as key for this chunk.
400 			 */
401 			cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
402 			update_key = true;
403 			cipher_parms.type = CIPHER_TYPE_UPDT;
404 		} else if (!rctx->is_encrypt) {
405 			/*
406 			 * First RC4 chunk. For decrypt, key in pre-built msg
407 			 * header may have been changed if encrypt required
408 			 * multiple chunks. So revert the key to the
409 			 * ctx->enckey value.
410 			 */
411 			update_key = true;
412 			cipher_parms.type = CIPHER_TYPE_INIT;
413 		}
414 	}
415 
416 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
417 		flow_log("max_payload infinite\n");
418 	else
419 		flow_log("max_payload %u\n", ctx->max_payload);
420 
421 	flow_log("sent:%u start:%u remains:%u size:%u\n",
422 		 rctx->src_sent, chunk_start, remaining, chunksize);
423 
424 	/* Copy SPU header template created at setkey time */
425 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
426 	       sizeof(rctx->msg_buf.bcm_spu_req_hdr));
427 
428 	/*
429 	 * Pass SUPDT field as key. Key field in finish() call is only used
430 	 * when update_key has been set above for RC4. Will be ignored in
431 	 * all other cases.
432 	 */
433 	spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
434 				   ctx->spu_req_hdr_len, !(rctx->is_encrypt),
435 				   &cipher_parms, update_key, chunksize);
436 
437 	atomic64_add(chunksize, &iproc_priv.bytes_out);
438 
439 	stat_pad_len = spu->spu_wordalign_padlen(chunksize);
440 	if (stat_pad_len)
441 		rx_frag_num++;
442 	pad_len = stat_pad_len;
443 	if (pad_len) {
444 		tx_frag_num++;
445 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
446 				     0, ctx->auth.alg, ctx->auth.mode,
447 				     rctx->total_sent, stat_pad_len);
448 	}
449 
450 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
451 			      ctx->spu_req_hdr_len);
452 	packet_log("payload:\n");
453 	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
454 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
455 
456 	/*
457 	 * Build mailbox message containing SPU request msg and rx buffers
458 	 * to catch response message
459 	 */
460 	memset(mssg, 0, sizeof(*mssg));
461 	mssg->type = BRCM_MESSAGE_SPU;
462 	mssg->ctx = rctx;	/* Will be returned in response */
463 
464 	/* Create rx scatterlist to catch result */
465 	rx_frag_num += rctx->dst_nents;
466 
467 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
468 	    spu->spu_xts_tweak_in_payload())
469 		rx_frag_num++;	/* extra sg to insert tweak */
470 
471 	err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
472 					  stat_pad_len);
473 	if (err)
474 		return err;
475 
476 	/* Create tx scatterlist containing SPU request message */
477 	tx_frag_num += rctx->src_nents;
478 	if (spu->spu_tx_status_len())
479 		tx_frag_num++;
480 
481 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
482 	    spu->spu_xts_tweak_in_payload())
483 		tx_frag_num++;	/* extra sg to insert tweak */
484 
485 	err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
486 					  pad_len);
487 	if (err)
488 		return err;
489 
490 	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
491 	if (unlikely(err < 0))
492 		return err;
493 
494 	return -EINPROGRESS;
495 }
496 
497 /**
498  * handle_ablkcipher_resp() - Process a block cipher SPU response. Updates the
499  * total received count for the request and updates global stats.
500  * @rctx:	Crypto request context
501  */
502 static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
503 {
504 	struct spu_hw *spu = &iproc_priv.spu;
505 #ifdef DEBUG
506 	struct crypto_async_request *areq = rctx->parent;
507 	struct ablkcipher_request *req = ablkcipher_request_cast(areq);
508 #endif
509 	struct iproc_ctx_s *ctx = rctx->ctx;
510 	u32 payload_len;
511 
512 	/* See how much data was returned */
513 	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
514 
515 	/*
516 	 * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the
517 	 * encrypted tweak ("i") value; we don't count those.
518 	 */
519 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
520 	    spu->spu_xts_tweak_in_payload() &&
521 	    (payload_len >= SPU_XTS_TWEAK_SIZE))
522 		payload_len -= SPU_XTS_TWEAK_SIZE;
523 
524 	atomic64_add(payload_len, &iproc_priv.bytes_in);
525 
526 	flow_log("%s() offset: %u, bd_len: %u BD:\n",
527 		 __func__, rctx->total_received, payload_len);
528 
529 	dump_sg(req->dst, rctx->total_received, payload_len);
530 	if (ctx->cipher.alg == CIPHER_ALG_RC4)
531 		packet_dump("  supdt ", rctx->msg_buf.c.supdt_tweak,
532 			    SPU_SUPDT_LEN);
533 
534 	rctx->total_received += payload_len;
535 	if (rctx->total_received == rctx->total_todo) {
536 		atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
537 		atomic_inc(
538 		   &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
539 	}
540 }
541 
542 /**
543  * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to
544  * receive a SPU response message for an ahash request.
545  * @mssg:	mailbox message containing the receive sg
546  * @rctx:	crypto request context
547  * @rx_frag_num: number of scatterlist elements required to hold the
548  *		SPU response message
549  * @digestsize: length of hash digest, in bytes
550  * @stat_pad_len: Number of bytes required to pad the STAT field to
551  *		a 4-byte boundary
552  *
553  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
554  * when the request completes, whether the request is handled successfully or
555  * there is an error.
556  *
557  * Return:
558  *   0 if successful
559  *   < 0 if an error
560  */
561 static int
562 spu_ahash_rx_sg_create(struct brcm_message *mssg,
563 		       struct iproc_reqctx_s *rctx,
564 		       u8 rx_frag_num, unsigned int digestsize,
565 		       u32 stat_pad_len)
566 {
567 	struct spu_hw *spu = &iproc_priv.spu;
568 	struct scatterlist *sg;	/* used to build sgs in mbox message */
569 	struct iproc_ctx_s *ctx = rctx->ctx;
570 
571 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
572 				rctx->gfp);
573 	if (!mssg->spu.dst)
574 		return -ENOMEM;
575 
576 	sg = mssg->spu.dst;
577 	sg_init_table(sg, rx_frag_num);
578 	/* Space for SPU message header */
579 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
580 
581 	/* Space for digest */
582 	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
583 
584 	if (stat_pad_len)
585 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
586 
587 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
588 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
589 	return 0;
590 }
591 
592 /**
593  * spu_ahash_tx_sg_create() -  Build up the scatterlist of buffers used to send
594  * a SPU request message for an ahash request. Includes SPU message headers and
595  * the request data.
596  * @mssg:	mailbox message containing the transmit sg
597  * @rctx:	crypto request context
598  * @tx_frag_num: number of scatterlist elements required to construct the
599  *		SPU request message
600  * @spu_hdr_len: length in bytes of SPU message header
601  * @hash_carry_len: Number of bytes of data carried over from previous req
602  * @new_data_len: Number of bytes of new request data
603  * @pad_len:	Number of pad bytes
604  *
605  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
606  * when the request completes, whether the request is handled successfully or
607  * there is an error.
608  *
609  * Return:
610  *   0 if successful
611  *   < 0 if an error
612  */
613 static int
614 spu_ahash_tx_sg_create(struct brcm_message *mssg,
615 		       struct iproc_reqctx_s *rctx,
616 		       u8 tx_frag_num,
617 		       u32 spu_hdr_len,
618 		       unsigned int hash_carry_len,
619 		       unsigned int new_data_len, u32 pad_len)
620 {
621 	struct spu_hw *spu = &iproc_priv.spu;
622 	struct scatterlist *sg;	/* used to build sgs in mbox message */
623 	u32 datalen;		/* Number of bytes of response data expected */
624 	u32 stat_len;
625 
626 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
627 				rctx->gfp);
628 	if (!mssg->spu.src)
629 		return -ENOMEM;
630 
631 	sg = mssg->spu.src;
632 	sg_init_table(sg, tx_frag_num);
633 
634 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
635 		   BCM_HDR_LEN + spu_hdr_len);
636 
637 	if (hash_carry_len)
638 		sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
639 
640 	if (new_data_len) {
641 		/* Copy in each src sg entry from request, up to chunksize */
642 		datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
643 					 rctx->src_nents, new_data_len);
644 		if (datalen < new_data_len) {
645 			pr_err("%s(): failed to copy src sg to mbox msg",
646 			       __func__);
647 			return -EFAULT;
648 		}
649 	}
650 
651 	if (pad_len)
652 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
653 
654 	stat_len = spu->spu_tx_status_len();
655 	if (stat_len) {
656 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
657 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
658 	}
659 
660 	return 0;
661 }
662 
663 /**
664  * handle_ahash_req() - Process an asynchronous hash request from the crypto
665  * API.
666  * @rctx:  Crypto request context
667  *
668  * Builds a SPU request message embedded in a mailbox message and submits the
669  * mailbox message on a selected mailbox channel. The SPU request message is
670  * constructed as a scatterlist, including entries from the crypto API's
671  * src scatterlist to avoid copying the data to be hashed. This function is
672  * called either on the thread from the crypto API, or, in the case that the
673  * crypto API request is too large to fit in a single SPU request message,
674  * on the thread that invokes the receive callback with a response message.
675  * Because some operations require the response from one chunk before the next
676  * chunk can be submitted, we always wait for the response for the previous
677  * chunk before submitting the next chunk. Because requests are submitted in
678  * lock step like this, there is no need to synchronize access to request data
679  * structures.
680  *
681  * Return:
682  *   -EINPROGRESS: request has been submitted to SPU and response will be
683  *		   returned asynchronously
684  *   -EAGAIN:      non-final request included a small amount of data, which for
685  *		   efficiency we did not submit to the SPU, but instead stored
686  *		   to be submitted to the SPU with the next part of the request
687  *   other:        an error code
688  */
689 static int handle_ahash_req(struct iproc_reqctx_s *rctx)
690 {
691 	struct spu_hw *spu = &iproc_priv.spu;
692 	struct crypto_async_request *areq = rctx->parent;
693 	struct ahash_request *req = ahash_request_cast(areq);
694 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
695 	struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
696 	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
697 	struct iproc_ctx_s *ctx = rctx->ctx;
698 
699 	/* number of bytes still to be hashed in this req */
700 	unsigned int nbytes_to_hash = 0;
701 	int err = 0;
702 	unsigned int chunksize = 0;	/* length of hash carry + new data */
703 	/*
704 	 * length of new data, not from hash carry, to be submitted in
705 	 * this hw request
706 	 */
707 	unsigned int new_data_len;
708 
709 	unsigned int __maybe_unused chunk_start = 0;
710 	u32 db_size;	 /* Length of data field, incl gcm and hash padding */
711 	int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
712 	u32 data_pad_len = 0;	/* length of GCM/CCM padding */
713 	u32 stat_pad_len = 0;	/* length of padding to align STATUS word */
714 	struct brcm_message *mssg;	/* mailbox message */
715 	struct spu_request_opts req_opts;
716 	struct spu_cipher_parms cipher_parms;
717 	struct spu_hash_parms hash_parms;
718 	struct spu_aead_parms aead_parms;
719 	unsigned int local_nbuf;
720 	u32 spu_hdr_len;
721 	unsigned int digestsize;
722 	u16 rem = 0;
723 
724 	/*
725 	 * number of entries in src and dst sg. Always includes SPU msg header.
726 	 * rx always includes a buffer to catch digest and STATUS.
727 	 */
728 	u8 rx_frag_num = 3;
729 	u8 tx_frag_num = 1;
730 
731 	flow_log("total_todo %u, total_sent %u\n",
732 		 rctx->total_todo, rctx->total_sent);
733 
734 	memset(&req_opts, 0, sizeof(req_opts));
735 	memset(&cipher_parms, 0, sizeof(cipher_parms));
736 	memset(&hash_parms, 0, sizeof(hash_parms));
737 	memset(&aead_parms, 0, sizeof(aead_parms));
738 
739 	req_opts.bd_suppress = true;
740 	hash_parms.alg = ctx->auth.alg;
741 	hash_parms.mode = ctx->auth.mode;
742 	hash_parms.type = HASH_TYPE_NONE;
743 	hash_parms.key_buf = (u8 *)ctx->authkey;
744 	hash_parms.key_len = ctx->authkeylen;
745 
746 	/*
747 	 * For hash algorithms below assignment looks bit odd but
748 	 * it's needed for AES-XCBC and AES-CMAC hash algorithms
749 	 * to differentiate between 128, 192, 256 bit key values.
750 	 * Based on the key values, hash algorithm is selected.
751 	 * For example for 128 bit key, hash algorithm is AES-128.
752 	 */
753 	cipher_parms.type = ctx->cipher_type;
754 
755 	mssg = &rctx->mb_mssg;
756 	chunk_start = rctx->src_sent;
757 
758 	/*
759 	 * Compute the amount remaining to hash. This may include data
760 	 * carried over from previous requests.
761 	 */
762 	nbytes_to_hash = rctx->total_todo - rctx->total_sent;
763 	chunksize = nbytes_to_hash;
764 	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
765 	    (chunksize > ctx->max_payload))
766 		chunksize = ctx->max_payload;
767 
768 	/*
769 	 * If this is not a final request and the request data is not a multiple
770 	 * of a full block, then simply park the extra data and prefix it to the
771 	 * data for the next request.
772 	 */
773 	if (!rctx->is_final) {
774 		u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
775 		u16 new_len;  /* len of data to add to hash carry */
776 
777 		rem = chunksize % blocksize;   /* remainder */
778 		if (rem) {
779 			/* chunksize not a multiple of blocksize */
780 			chunksize -= rem;
781 			if (chunksize == 0) {
782 				/* Don't have a full block to submit to hw */
783 				new_len = rem - rctx->hash_carry_len;
784 				sg_copy_part_to_buf(req->src, dest, new_len,
785 						    rctx->src_sent);
786 				rctx->hash_carry_len = rem;
787 				flow_log("Exiting with hash carry len: %u\n",
788 					 rctx->hash_carry_len);
789 				packet_dump("  buf: ",
790 					    rctx->hash_carry,
791 					    rctx->hash_carry_len);
792 				return -EAGAIN;
793 			}
794 		}
795 	}
796 
797 	/* if we have hash carry, then prefix it to the data in this request */
798 	local_nbuf = rctx->hash_carry_len;
799 	rctx->hash_carry_len = 0;
800 	if (local_nbuf)
801 		tx_frag_num++;
802 	new_data_len = chunksize - local_nbuf;
803 
804 	/* Count number of sg entries to be used in this request */
805 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
806 				       new_data_len);
807 
808 	/* AES hashing keeps key size in type field, so need to copy it here */
809 	if (hash_parms.alg == HASH_ALG_AES)
810 		hash_parms.type = (enum hash_type)cipher_parms.type;
811 	else
812 		hash_parms.type = spu->spu_hash_type(rctx->total_sent);
813 
814 	digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
815 					  hash_parms.type);
816 	hash_parms.digestsize =	digestsize;
817 
818 	/* update the indexes */
819 	rctx->total_sent += chunksize;
820 	/* if you sent a prebuf then that wasn't from this req->src */
821 	rctx->src_sent += new_data_len;
822 
823 	if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
824 		hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
825 							   hash_parms.mode,
826 							   chunksize,
827 							   blocksize);
828 
829 	/*
830 	 * If a non-first chunk, then include the digest returned from the
831 	 * previous chunk so that hw can add to it (except for AES types).
832 	 */
833 	if ((hash_parms.type == HASH_TYPE_UPDT) &&
834 	    (hash_parms.alg != HASH_ALG_AES)) {
835 		hash_parms.key_buf = rctx->incr_hash;
836 		hash_parms.key_len = digestsize;
837 	}
838 
839 	atomic64_add(chunksize, &iproc_priv.bytes_out);
840 
841 	flow_log("%s() final: %u nbuf: %u ",
842 		 __func__, rctx->is_final, local_nbuf);
843 
844 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
845 		flow_log("max_payload infinite\n");
846 	else
847 		flow_log("max_payload %u\n", ctx->max_payload);
848 
849 	flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
850 
851 	/* Prepend SPU header with type 3 BCM header */
852 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
853 
854 	hash_parms.prebuf_len = local_nbuf;
855 	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
856 					      BCM_HDR_LEN,
857 					      &req_opts, &cipher_parms,
858 					      &hash_parms, &aead_parms,
859 					      new_data_len);
860 
861 	if (spu_hdr_len == 0) {
862 		pr_err("Failed to create SPU request header\n");
863 		return -EFAULT;
864 	}
865 
866 	/*
867 	 * Determine total length of padding required. Put all padding in one
868 	 * buffer.
869 	 */
870 	data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
871 	db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
872 				   0, 0, hash_parms.pad_len);
873 	if (spu->spu_tx_status_len())
874 		stat_pad_len = spu->spu_wordalign_padlen(db_size);
875 	if (stat_pad_len)
876 		rx_frag_num++;
877 	pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
878 	if (pad_len) {
879 		tx_frag_num++;
880 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
881 				     hash_parms.pad_len, ctx->auth.alg,
882 				     ctx->auth.mode, rctx->total_sent,
883 				     stat_pad_len);
884 	}
885 
886 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
887 			      spu_hdr_len);
888 	packet_dump("    prebuf: ", rctx->hash_carry, local_nbuf);
889 	flow_log("Data:\n");
890 	dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
891 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
892 
893 	/*
894 	 * Build mailbox message containing SPU request msg and rx buffers
895 	 * to catch response message
896 	 */
897 	memset(mssg, 0, sizeof(*mssg));
898 	mssg->type = BRCM_MESSAGE_SPU;
899 	mssg->ctx = rctx;	/* Will be returned in response */
900 
901 	/* Create rx scatterlist to catch result */
902 	err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
903 				     stat_pad_len);
904 	if (err)
905 		return err;
906 
907 	/* Create tx scatterlist containing SPU request message */
908 	tx_frag_num += rctx->src_nents;
909 	if (spu->spu_tx_status_len())
910 		tx_frag_num++;
911 	err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
912 				     local_nbuf, new_data_len, pad_len);
913 	if (err)
914 		return err;
915 
916 	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
917 	if (unlikely(err < 0))
918 		return err;
919 
920 	return -EINPROGRESS;
921 }
922 
923 /**
924  * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash
925  * for an HMAC request.
926  * @req:  The HMAC request from the crypto API
927  * @ctx:  The session context
928  *
929  * Return: 0 if synchronous hash operation successful
930  *         -EINVAL if the hash algo is unrecognized
931  *         any other value indicates an error
932  */
933 static int spu_hmac_outer_hash(struct ahash_request *req,
934 			       struct iproc_ctx_s *ctx)
935 {
936 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
937 	unsigned int blocksize =
938 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
939 	int rc;
940 
941 	switch (ctx->auth.alg) {
942 	case HASH_ALG_MD5:
943 		rc = do_shash("md5", req->result, ctx->opad, blocksize,
944 			      req->result, ctx->digestsize, NULL, 0);
945 		break;
946 	case HASH_ALG_SHA1:
947 		rc = do_shash("sha1", req->result, ctx->opad, blocksize,
948 			      req->result, ctx->digestsize, NULL, 0);
949 		break;
950 	case HASH_ALG_SHA224:
951 		rc = do_shash("sha224", req->result, ctx->opad, blocksize,
952 			      req->result, ctx->digestsize, NULL, 0);
953 		break;
954 	case HASH_ALG_SHA256:
955 		rc = do_shash("sha256", req->result, ctx->opad, blocksize,
956 			      req->result, ctx->digestsize, NULL, 0);
957 		break;
958 	case HASH_ALG_SHA384:
959 		rc = do_shash("sha384", req->result, ctx->opad, blocksize,
960 			      req->result, ctx->digestsize, NULL, 0);
961 		break;
962 	case HASH_ALG_SHA512:
963 		rc = do_shash("sha512", req->result, ctx->opad, blocksize,
964 			      req->result, ctx->digestsize, NULL, 0);
965 		break;
966 	default:
967 		pr_err("%s() Error : unknown hmac type\n", __func__);
968 		rc = -EINVAL;
969 	}
970 	return rc;
971 }
972 
973 /**
974  * ahash_req_done() - Process a hash result from the SPU hardware.
975  * @rctx: Crypto request context
976  *
977  * Return: 0 if successful
978  *         < 0 if an error
979  */
980 static int ahash_req_done(struct iproc_reqctx_s *rctx)
981 {
982 	struct spu_hw *spu = &iproc_priv.spu;
983 	struct crypto_async_request *areq = rctx->parent;
984 	struct ahash_request *req = ahash_request_cast(areq);
985 	struct iproc_ctx_s *ctx = rctx->ctx;
986 	int err;
987 
988 	memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
989 
990 	if (spu->spu_type == SPU_TYPE_SPUM) {
991 		/* byte swap the output from the UPDT function to network byte
992 		 * order
993 		 */
994 		if (ctx->auth.alg == HASH_ALG_MD5) {
995 			__swab32s((u32 *)req->result);
996 			__swab32s(((u32 *)req->result) + 1);
997 			__swab32s(((u32 *)req->result) + 2);
998 			__swab32s(((u32 *)req->result) + 3);
999 			__swab32s(((u32 *)req->result) + 4);
1000 		}
1001 	}
1002 
1003 	flow_dump("  digest ", req->result, ctx->digestsize);
1004 
1005 	/* if this an HMAC then do the outer hash */
1006 	if (rctx->is_sw_hmac) {
1007 		err = spu_hmac_outer_hash(req, ctx);
1008 		if (err < 0)
1009 			return err;
1010 		flow_dump("  hmac: ", req->result, ctx->digestsize);
1011 	}
1012 
1013 	if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
1014 		atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
1015 		atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
1016 	} else {
1017 		atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
1018 		atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
1019 	}
1020 
1021 	return 0;
1022 }
1023 
1024 /**
1025  * handle_ahash_resp() - Process a SPU response message for a hash request.
1026  * Checks if the entire crypto API request has been processed, and if so,
1027  * invokes post processing on the result.
1028  * @rctx: Crypto request context
1029  */
1030 static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
1031 {
1032 	struct iproc_ctx_s *ctx = rctx->ctx;
1033 #ifdef DEBUG
1034 	struct crypto_async_request *areq = rctx->parent;
1035 	struct ahash_request *req = ahash_request_cast(areq);
1036 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1037 	unsigned int blocksize =
1038 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1039 #endif
1040 	/*
1041 	 * Save hash to use as input to next op if incremental. Might be copying
1042 	 * too much, but that's easier than figuring out actual digest size here
1043 	 */
1044 	memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1045 
1046 	flow_log("%s() blocksize:%u digestsize:%u\n",
1047 		 __func__, blocksize, ctx->digestsize);
1048 
1049 	atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1050 
1051 	if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1052 		ahash_req_done(rctx);
1053 }
1054 
1055 /**
1056  * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive
1057  * a SPU response message for an AEAD request. Includes buffers to catch SPU
1058  * message headers and the response data.
1059  * @mssg:	mailbox message containing the receive sg
1060  * @rctx:	crypto request context
1061  * @rx_frag_num: number of scatterlist elements required to hold the
1062  *		SPU response message
1063  * @assoc_len:	Length of associated data included in the crypto request
1064  * @ret_iv_len: Length of IV returned in response
1065  * @resp_len:	Number of bytes of response data expected to be written to
1066  *              dst buffer from crypto API
1067  * @digestsize: Length of hash digest, in bytes
1068  * @stat_pad_len: Number of bytes required to pad the STAT field to
1069  *		a 4-byte boundary
1070  *
1071  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1072  * when the request completes, whether the request is handled successfully or
1073  * there is an error.
1074  *
1075  * Returns:
1076  *   0 if successful
1077  *   < 0 if an error
1078  */
1079 static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1080 				 struct aead_request *req,
1081 				 struct iproc_reqctx_s *rctx,
1082 				 u8 rx_frag_num,
1083 				 unsigned int assoc_len,
1084 				 u32 ret_iv_len, unsigned int resp_len,
1085 				 unsigned int digestsize, u32 stat_pad_len)
1086 {
1087 	struct spu_hw *spu = &iproc_priv.spu;
1088 	struct scatterlist *sg;	/* used to build sgs in mbox message */
1089 	struct iproc_ctx_s *ctx = rctx->ctx;
1090 	u32 datalen;		/* Number of bytes of response data expected */
1091 	u32 assoc_buf_len;
1092 	u8 data_padlen = 0;
1093 
1094 	if (ctx->is_rfc4543) {
1095 		/* RFC4543: only pad after data, not after AAD */
1096 		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1097 							  assoc_len + resp_len);
1098 		assoc_buf_len = assoc_len;
1099 	} else {
1100 		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1101 							  resp_len);
1102 		assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1103 						assoc_len, ret_iv_len,
1104 						rctx->is_encrypt);
1105 	}
1106 
1107 	if (ctx->cipher.mode == CIPHER_MODE_CCM)
1108 		/* ICV (after data) must be in the next 32-bit word for CCM */
1109 		data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1110 							 resp_len +
1111 							 data_padlen);
1112 
1113 	if (data_padlen)
1114 		/* have to catch gcm pad in separate buffer */
1115 		rx_frag_num++;
1116 
1117 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1118 				rctx->gfp);
1119 	if (!mssg->spu.dst)
1120 		return -ENOMEM;
1121 
1122 	sg = mssg->spu.dst;
1123 	sg_init_table(sg, rx_frag_num);
1124 
1125 	/* Space for SPU message header */
1126 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1127 
1128 	if (assoc_buf_len) {
1129 		/*
1130 		 * Don't write directly to req->dst, because SPU may pad the
1131 		 * assoc data in the response
1132 		 */
1133 		memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1134 		sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1135 	}
1136 
1137 	if (resp_len) {
1138 		/*
1139 		 * Copy in each dst sg entry from request, up to chunksize.
1140 		 * dst sg catches just the data. digest caught in separate buf.
1141 		 */
1142 		datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1143 					 rctx->dst_nents, resp_len);
1144 		if (datalen < (resp_len)) {
1145 			pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1146 			       __func__, resp_len, datalen);
1147 			return -EFAULT;
1148 		}
1149 	}
1150 
1151 	/* If GCM/CCM data is padded, catch padding in separate buffer */
1152 	if (data_padlen) {
1153 		memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1154 		sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1155 	}
1156 
1157 	/* Always catch ICV in separate buffer */
1158 	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1159 
1160 	flow_log("stat_pad_len %u\n", stat_pad_len);
1161 	if (stat_pad_len) {
1162 		memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1163 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1164 	}
1165 
1166 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1167 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1168 
1169 	return 0;
1170 }
1171 
1172 /**
1173  * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a
1174  * SPU request message for an AEAD request. Includes SPU message headers and the
1175  * request data.
1176  * @mssg:	mailbox message containing the transmit sg
1177  * @rctx:	crypto request context
1178  * @tx_frag_num: number of scatterlist elements required to construct the
1179  *		SPU request message
1180  * @spu_hdr_len: length of SPU message header in bytes
1181  * @assoc:	crypto API associated data scatterlist
1182  * @assoc_len:	length of associated data
1183  * @assoc_nents: number of scatterlist entries containing assoc data
1184  * @aead_iv_len: length of AEAD IV, if included
1185  * @chunksize:	Number of bytes of request data
1186  * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM.
1187  * @pad_len:	Number of pad bytes
1188  * @incl_icv:	If true, write separate ICV buffer after data and
1189  *              any padding
1190  *
1191  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1192  * when the request completes, whether the request is handled successfully or
1193  * there is an error.
1194  *
1195  * Return:
1196  *   0 if successful
1197  *   < 0 if an error
1198  */
1199 static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1200 				 struct iproc_reqctx_s *rctx,
1201 				 u8 tx_frag_num,
1202 				 u32 spu_hdr_len,
1203 				 struct scatterlist *assoc,
1204 				 unsigned int assoc_len,
1205 				 int assoc_nents,
1206 				 unsigned int aead_iv_len,
1207 				 unsigned int chunksize,
1208 				 u32 aad_pad_len, u32 pad_len, bool incl_icv)
1209 {
1210 	struct spu_hw *spu = &iproc_priv.spu;
1211 	struct scatterlist *sg;	/* used to build sgs in mbox message */
1212 	struct scatterlist *assoc_sg = assoc;
1213 	struct iproc_ctx_s *ctx = rctx->ctx;
1214 	u32 datalen;		/* Number of bytes of data to write */
1215 	u32 written;		/* Number of bytes of data written */
1216 	u32 assoc_offset = 0;
1217 	u32 stat_len;
1218 
1219 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1220 				rctx->gfp);
1221 	if (!mssg->spu.src)
1222 		return -ENOMEM;
1223 
1224 	sg = mssg->spu.src;
1225 	sg_init_table(sg, tx_frag_num);
1226 
1227 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1228 		   BCM_HDR_LEN + spu_hdr_len);
1229 
1230 	if (assoc_len) {
1231 		/* Copy in each associated data sg entry from request */
1232 		written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1233 					 assoc_nents, assoc_len);
1234 		if (written < assoc_len) {
1235 			pr_err("%s(): failed to copy assoc sg to mbox msg",
1236 			       __func__);
1237 			return -EFAULT;
1238 		}
1239 	}
1240 
1241 	if (aead_iv_len)
1242 		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1243 
1244 	if (aad_pad_len) {
1245 		memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1246 		sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1247 	}
1248 
1249 	datalen = chunksize;
1250 	if ((chunksize > ctx->digestsize) && incl_icv)
1251 		datalen -= ctx->digestsize;
1252 	if (datalen) {
1253 		/* For aead, a single msg should consume the entire src sg */
1254 		written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1255 					 rctx->src_nents, datalen);
1256 		if (written < datalen) {
1257 			pr_err("%s(): failed to copy src sg to mbox msg",
1258 			       __func__);
1259 			return -EFAULT;
1260 		}
1261 	}
1262 
1263 	if (pad_len) {
1264 		memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1265 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1266 	}
1267 
1268 	if (incl_icv)
1269 		sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1270 
1271 	stat_len = spu->spu_tx_status_len();
1272 	if (stat_len) {
1273 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
1274 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1275 	}
1276 	return 0;
1277 }
1278 
1279 /**
1280  * handle_aead_req() - Submit a SPU request message for the next chunk of the
1281  * current AEAD request.
1282  * @rctx:  Crypto request context
1283  *
1284  * Unlike other operation types, we assume the length of the request fits in
1285  * a single SPU request message. aead_enqueue() makes sure this is true.
1286  * Comments for other op types regarding threads applies here as well.
1287  *
1288  * Unlike incremental hash ops, where the spu returns the entire hash for
1289  * truncated algs like sha-224, the SPU returns just the truncated hash in
1290  * response to aead requests. So digestsize is always ctx->digestsize here.
1291  *
1292  * Return: -EINPROGRESS: crypto request has been accepted and result will be
1293  *			 returned asynchronously
1294  *         Any other value indicates an error
1295  */
1296 static int handle_aead_req(struct iproc_reqctx_s *rctx)
1297 {
1298 	struct spu_hw *spu = &iproc_priv.spu;
1299 	struct crypto_async_request *areq = rctx->parent;
1300 	struct aead_request *req = container_of(areq,
1301 						struct aead_request, base);
1302 	struct iproc_ctx_s *ctx = rctx->ctx;
1303 	int err;
1304 	unsigned int chunksize;
1305 	unsigned int resp_len;
1306 	u32 spu_hdr_len;
1307 	u32 db_size;
1308 	u32 stat_pad_len;
1309 	u32 pad_len;
1310 	struct brcm_message *mssg;	/* mailbox message */
1311 	struct spu_request_opts req_opts;
1312 	struct spu_cipher_parms cipher_parms;
1313 	struct spu_hash_parms hash_parms;
1314 	struct spu_aead_parms aead_parms;
1315 	int assoc_nents = 0;
1316 	bool incl_icv = false;
1317 	unsigned int digestsize = ctx->digestsize;
1318 
1319 	/* number of entries in src and dst sg. Always includes SPU msg header.
1320 	 */
1321 	u8 rx_frag_num = 2;	/* and STATUS */
1322 	u8 tx_frag_num = 1;
1323 
1324 	/* doing the whole thing at once */
1325 	chunksize = rctx->total_todo;
1326 
1327 	flow_log("%s: chunksize %u\n", __func__, chunksize);
1328 
1329 	memset(&req_opts, 0, sizeof(req_opts));
1330 	memset(&hash_parms, 0, sizeof(hash_parms));
1331 	memset(&aead_parms, 0, sizeof(aead_parms));
1332 
1333 	req_opts.is_inbound = !(rctx->is_encrypt);
1334 	req_opts.auth_first = ctx->auth_first;
1335 	req_opts.is_aead = true;
1336 	req_opts.is_esp = ctx->is_esp;
1337 
1338 	cipher_parms.alg = ctx->cipher.alg;
1339 	cipher_parms.mode = ctx->cipher.mode;
1340 	cipher_parms.type = ctx->cipher_type;
1341 	cipher_parms.key_buf = ctx->enckey;
1342 	cipher_parms.key_len = ctx->enckeylen;
1343 	cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1344 	cipher_parms.iv_len = rctx->iv_ctr_len;
1345 
1346 	hash_parms.alg = ctx->auth.alg;
1347 	hash_parms.mode = ctx->auth.mode;
1348 	hash_parms.type = HASH_TYPE_NONE;
1349 	hash_parms.key_buf = (u8 *)ctx->authkey;
1350 	hash_parms.key_len = ctx->authkeylen;
1351 	hash_parms.digestsize = digestsize;
1352 
1353 	if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1354 	    (ctx->authkeylen < SHA224_DIGEST_SIZE))
1355 		hash_parms.key_len = SHA224_DIGEST_SIZE;
1356 
1357 	aead_parms.assoc_size = req->assoclen;
1358 	if (ctx->is_esp && !ctx->is_rfc4543) {
1359 		/*
1360 		 * 8-byte IV is included assoc data in request. SPU2
1361 		 * expects AAD to include just SPI and seqno. So
1362 		 * subtract off the IV len.
1363 		 */
1364 		aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
1365 
1366 		if (rctx->is_encrypt) {
1367 			aead_parms.return_iv = true;
1368 			aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
1369 			aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1370 		}
1371 	} else {
1372 		aead_parms.ret_iv_len = 0;
1373 	}
1374 
1375 	/*
1376 	 * Count number of sg entries from the crypto API request that are to
1377 	 * be included in this mailbox message. For dst sg, don't count space
1378 	 * for digest. Digest gets caught in a separate buffer and copied back
1379 	 * to dst sg when processing response.
1380 	 */
1381 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1382 	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1383 	if (aead_parms.assoc_size)
1384 		assoc_nents = spu_sg_count(rctx->assoc, 0,
1385 					   aead_parms.assoc_size);
1386 
1387 	mssg = &rctx->mb_mssg;
1388 
1389 	rctx->total_sent = chunksize;
1390 	rctx->src_sent = chunksize;
1391 	if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1392 				    aead_parms.assoc_size,
1393 				    aead_parms.ret_iv_len,
1394 				    rctx->is_encrypt))
1395 		rx_frag_num++;
1396 
1397 	aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1398 						rctx->iv_ctr_len);
1399 
1400 	if (ctx->auth.alg == HASH_ALG_AES)
1401 		hash_parms.type = (enum hash_type)ctx->cipher_type;
1402 
1403 	/* General case AAD padding (CCM and RFC4543 special cases below) */
1404 	aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1405 						 aead_parms.assoc_size);
1406 
1407 	/* General case data padding (CCM decrypt special case below) */
1408 	aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1409 							   chunksize);
1410 
1411 	if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1412 		/*
1413 		 * for CCM, AAD len + 2 (rather than AAD len) needs to be
1414 		 * 128-bit aligned
1415 		 */
1416 		aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1417 					 ctx->cipher.mode,
1418 					 aead_parms.assoc_size + 2);
1419 
1420 		/*
1421 		 * And when decrypting CCM, need to pad without including
1422 		 * size of ICV which is tacked on to end of chunk
1423 		 */
1424 		if (!rctx->is_encrypt)
1425 			aead_parms.data_pad_len =
1426 				spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1427 							chunksize - digestsize);
1428 
1429 		/* CCM also requires software to rewrite portions of IV: */
1430 		spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1431 				       chunksize, rctx->is_encrypt,
1432 				       ctx->is_esp);
1433 	}
1434 
1435 	if (ctx->is_rfc4543) {
1436 		/*
1437 		 * RFC4543: data is included in AAD, so don't pad after AAD
1438 		 * and pad data based on both AAD + data size
1439 		 */
1440 		aead_parms.aad_pad_len = 0;
1441 		if (!rctx->is_encrypt)
1442 			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1443 					ctx->cipher.mode,
1444 					aead_parms.assoc_size + chunksize -
1445 					digestsize);
1446 		else
1447 			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1448 					ctx->cipher.mode,
1449 					aead_parms.assoc_size + chunksize);
1450 
1451 		req_opts.is_rfc4543 = true;
1452 	}
1453 
1454 	if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1455 		incl_icv = true;
1456 		tx_frag_num++;
1457 		/* Copy ICV from end of src scatterlist to digest buf */
1458 		sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1459 				    req->assoclen + rctx->total_sent -
1460 				    digestsize);
1461 	}
1462 
1463 	atomic64_add(chunksize, &iproc_priv.bytes_out);
1464 
1465 	flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1466 
1467 	/* Prepend SPU header with type 3 BCM header */
1468 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1469 
1470 	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1471 					      BCM_HDR_LEN, &req_opts,
1472 					      &cipher_parms, &hash_parms,
1473 					      &aead_parms, chunksize);
1474 
1475 	/* Determine total length of padding. Put all padding in one buffer. */
1476 	db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1477 				   chunksize, aead_parms.aad_pad_len,
1478 				   aead_parms.data_pad_len, 0);
1479 
1480 	stat_pad_len = spu->spu_wordalign_padlen(db_size);
1481 
1482 	if (stat_pad_len)
1483 		rx_frag_num++;
1484 	pad_len = aead_parms.data_pad_len + stat_pad_len;
1485 	if (pad_len) {
1486 		tx_frag_num++;
1487 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1488 				     aead_parms.data_pad_len, 0,
1489 				     ctx->auth.alg, ctx->auth.mode,
1490 				     rctx->total_sent, stat_pad_len);
1491 	}
1492 
1493 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1494 			      spu_hdr_len);
1495 	dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1496 	packet_dump("    aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1497 	packet_log("BD:\n");
1498 	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1499 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1500 
1501 	/*
1502 	 * Build mailbox message containing SPU request msg and rx buffers
1503 	 * to catch response message
1504 	 */
1505 	memset(mssg, 0, sizeof(*mssg));
1506 	mssg->type = BRCM_MESSAGE_SPU;
1507 	mssg->ctx = rctx;	/* Will be returned in response */
1508 
1509 	/* Create rx scatterlist to catch result */
1510 	rx_frag_num += rctx->dst_nents;
1511 	resp_len = chunksize;
1512 
1513 	/*
1514 	 * Always catch ICV in separate buffer. Have to for GCM/CCM because of
1515 	 * padding. Have to for SHA-224 and other truncated SHAs because SPU
1516 	 * sends entire digest back.
1517 	 */
1518 	rx_frag_num++;
1519 
1520 	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1521 	     (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1522 		/*
1523 		 * Input is ciphertxt plus ICV, but ICV not incl
1524 		 * in output.
1525 		 */
1526 		resp_len -= ctx->digestsize;
1527 		if (resp_len == 0)
1528 			/* no rx frags to catch output data */
1529 			rx_frag_num -= rctx->dst_nents;
1530 	}
1531 
1532 	err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1533 				    aead_parms.assoc_size,
1534 				    aead_parms.ret_iv_len, resp_len, digestsize,
1535 				    stat_pad_len);
1536 	if (err)
1537 		return err;
1538 
1539 	/* Create tx scatterlist containing SPU request message */
1540 	tx_frag_num += rctx->src_nents;
1541 	tx_frag_num += assoc_nents;
1542 	if (aead_parms.aad_pad_len)
1543 		tx_frag_num++;
1544 	if (aead_parms.iv_len)
1545 		tx_frag_num++;
1546 	if (spu->spu_tx_status_len())
1547 		tx_frag_num++;
1548 	err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1549 				    rctx->assoc, aead_parms.assoc_size,
1550 				    assoc_nents, aead_parms.iv_len, chunksize,
1551 				    aead_parms.aad_pad_len, pad_len, incl_icv);
1552 	if (err)
1553 		return err;
1554 
1555 	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1556 	if (unlikely(err < 0))
1557 		return err;
1558 
1559 	return -EINPROGRESS;
1560 }
1561 
1562 /**
1563  * handle_aead_resp() - Process a SPU response message for an AEAD request.
1564  * @rctx:  Crypto request context
1565  */
1566 static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1567 {
1568 	struct spu_hw *spu = &iproc_priv.spu;
1569 	struct crypto_async_request *areq = rctx->parent;
1570 	struct aead_request *req = container_of(areq,
1571 						struct aead_request, base);
1572 	struct iproc_ctx_s *ctx = rctx->ctx;
1573 	u32 payload_len;
1574 	unsigned int icv_offset;
1575 	u32 result_len;
1576 
1577 	/* See how much data was returned */
1578 	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1579 	flow_log("payload_len %u\n", payload_len);
1580 
1581 	/* only count payload */
1582 	atomic64_add(payload_len, &iproc_priv.bytes_in);
1583 
1584 	if (req->assoclen)
1585 		packet_dump("  assoc_data ", rctx->msg_buf.a.resp_aad,
1586 			    req->assoclen);
1587 
1588 	/*
1589 	 * Copy the ICV back to the destination
1590 	 * buffer. In decrypt case, SPU gives us back the digest, but crypto
1591 	 * API doesn't expect ICV in dst buffer.
1592 	 */
1593 	result_len = req->cryptlen;
1594 	if (rctx->is_encrypt) {
1595 		icv_offset = req->assoclen + rctx->total_sent;
1596 		packet_dump("  ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1597 		flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1598 		sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1599 				      ctx->digestsize, icv_offset);
1600 		result_len += ctx->digestsize;
1601 	}
1602 
1603 	packet_log("response data:  ");
1604 	dump_sg(req->dst, req->assoclen, result_len);
1605 
1606 	atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1607 	if (ctx->cipher.alg == CIPHER_ALG_AES) {
1608 		if (ctx->cipher.mode == CIPHER_MODE_CCM)
1609 			atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1610 		else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1611 			atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1612 		else
1613 			atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1614 	} else {
1615 		atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1616 	}
1617 }
1618 
1619 /**
1620  * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request
1621  * @rctx:  request context
1622  *
1623  * Mailbox scatterlists are allocated for each chunk. So free them after
1624  * processing each chunk.
1625  */
1626 static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1627 {
1628 	/* mailbox message used to tx request */
1629 	struct brcm_message *mssg = &rctx->mb_mssg;
1630 
1631 	kfree(mssg->spu.src);
1632 	kfree(mssg->spu.dst);
1633 	memset(mssg, 0, sizeof(struct brcm_message));
1634 }
1635 
1636 /**
1637  * finish_req() - Used to invoke the complete callback from the requester when
1638  * a request has been handled asynchronously.
1639  * @rctx:  Request context
1640  * @err:   Indicates whether the request was successful or not
1641  *
1642  * Ensures that cleanup has been done for request
1643  */
1644 static void finish_req(struct iproc_reqctx_s *rctx, int err)
1645 {
1646 	struct crypto_async_request *areq = rctx->parent;
1647 
1648 	flow_log("%s() err:%d\n\n", __func__, err);
1649 
1650 	/* No harm done if already called */
1651 	spu_chunk_cleanup(rctx);
1652 
1653 	if (areq)
1654 		areq->complete(areq, err);
1655 }
1656 
1657 /**
1658  * spu_rx_callback() - Callback from mailbox framework with a SPU response.
1659  * @cl:		mailbox client structure for SPU driver
1660  * @msg:	mailbox message containing SPU response
1661  */
1662 static void spu_rx_callback(struct mbox_client *cl, void *msg)
1663 {
1664 	struct spu_hw *spu = &iproc_priv.spu;
1665 	struct brcm_message *mssg = msg;
1666 	struct iproc_reqctx_s *rctx;
1667 	int err = 0;
1668 
1669 	rctx = mssg->ctx;
1670 	if (unlikely(!rctx)) {
1671 		/* This is fatal */
1672 		pr_err("%s(): no request context", __func__);
1673 		err = -EFAULT;
1674 		goto cb_finish;
1675 	}
1676 
1677 	/* process the SPU status */
1678 	err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1679 	if (err != 0) {
1680 		if (err == SPU_INVALID_ICV)
1681 			atomic_inc(&iproc_priv.bad_icv);
1682 		err = -EBADMSG;
1683 		goto cb_finish;
1684 	}
1685 
1686 	/* Process the SPU response message */
1687 	switch (rctx->ctx->alg->type) {
1688 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
1689 		handle_ablkcipher_resp(rctx);
1690 		break;
1691 	case CRYPTO_ALG_TYPE_AHASH:
1692 		handle_ahash_resp(rctx);
1693 		break;
1694 	case CRYPTO_ALG_TYPE_AEAD:
1695 		handle_aead_resp(rctx);
1696 		break;
1697 	default:
1698 		err = -EINVAL;
1699 		goto cb_finish;
1700 	}
1701 
1702 	/*
1703 	 * If this response does not complete the request, then send the next
1704 	 * request chunk.
1705 	 */
1706 	if (rctx->total_sent < rctx->total_todo) {
1707 		/* Deallocate anything specific to previous chunk */
1708 		spu_chunk_cleanup(rctx);
1709 
1710 		switch (rctx->ctx->alg->type) {
1711 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
1712 			err = handle_ablkcipher_req(rctx);
1713 			break;
1714 		case CRYPTO_ALG_TYPE_AHASH:
1715 			err = handle_ahash_req(rctx);
1716 			if (err == -EAGAIN)
1717 				/*
1718 				 * we saved data in hash carry, but tell crypto
1719 				 * API we successfully completed request.
1720 				 */
1721 				err = 0;
1722 			break;
1723 		case CRYPTO_ALG_TYPE_AEAD:
1724 			err = handle_aead_req(rctx);
1725 			break;
1726 		default:
1727 			err = -EINVAL;
1728 		}
1729 
1730 		if (err == -EINPROGRESS)
1731 			/* Successfully submitted request for next chunk */
1732 			return;
1733 	}
1734 
1735 cb_finish:
1736 	finish_req(rctx, err);
1737 }
1738 
1739 /* ==================== Kernel Cryptographic API ==================== */
1740 
1741 /**
1742  * ablkcipher_enqueue() - Handle ablkcipher encrypt or decrypt request.
1743  * @req:	Crypto API request
1744  * @encrypt:	true if encrypting; false if decrypting
1745  *
1746  * Return: -EINPROGRESS if request accepted and result will be returned
1747  *			asynchronously
1748  *	   < 0 if an error
1749  */
1750 static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
1751 {
1752 	struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
1753 	struct iproc_ctx_s *ctx =
1754 	    crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1755 	int err;
1756 
1757 	flow_log("%s() enc:%u\n", __func__, encrypt);
1758 
1759 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1760 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1761 	rctx->parent = &req->base;
1762 	rctx->is_encrypt = encrypt;
1763 	rctx->bd_suppress = false;
1764 	rctx->total_todo = req->nbytes;
1765 	rctx->src_sent = 0;
1766 	rctx->total_sent = 0;
1767 	rctx->total_received = 0;
1768 	rctx->ctx = ctx;
1769 
1770 	/* Initialize current position in src and dst scatterlists */
1771 	rctx->src_sg = req->src;
1772 	rctx->src_nents = 0;
1773 	rctx->src_skip = 0;
1774 	rctx->dst_sg = req->dst;
1775 	rctx->dst_nents = 0;
1776 	rctx->dst_skip = 0;
1777 
1778 	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1779 	    ctx->cipher.mode == CIPHER_MODE_CTR ||
1780 	    ctx->cipher.mode == CIPHER_MODE_OFB ||
1781 	    ctx->cipher.mode == CIPHER_MODE_XTS ||
1782 	    ctx->cipher.mode == CIPHER_MODE_GCM ||
1783 	    ctx->cipher.mode == CIPHER_MODE_CCM) {
1784 		rctx->iv_ctr_len =
1785 		    crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
1786 		memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
1787 	} else {
1788 		rctx->iv_ctr_len = 0;
1789 	}
1790 
1791 	/* Choose a SPU to process this request */
1792 	rctx->chan_idx = select_channel();
1793 	err = handle_ablkcipher_req(rctx);
1794 	if (err != -EINPROGRESS)
1795 		/* synchronous result */
1796 		spu_chunk_cleanup(rctx);
1797 
1798 	return err;
1799 }
1800 
1801 static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1802 		      unsigned int keylen)
1803 {
1804 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1805 	u32 tmp[DES_EXPKEY_WORDS];
1806 
1807 	if (keylen == DES_KEY_SIZE) {
1808 		if (des_ekey(tmp, key) == 0) {
1809 			if (crypto_ablkcipher_get_flags(cipher) &
1810 			    CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
1811 				u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
1812 
1813 				crypto_ablkcipher_set_flags(cipher, flags);
1814 				return -EINVAL;
1815 			}
1816 		}
1817 
1818 		ctx->cipher_type = CIPHER_TYPE_DES;
1819 	} else {
1820 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1821 		return -EINVAL;
1822 	}
1823 	return 0;
1824 }
1825 
1826 static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1827 			   unsigned int keylen)
1828 {
1829 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1830 
1831 	if (keylen == (DES_KEY_SIZE * 3)) {
1832 		u32 flags;
1833 		int ret;
1834 
1835 		flags = crypto_ablkcipher_get_flags(cipher);
1836 		ret = __des3_verify_key(&flags, key);
1837 		if (unlikely(ret)) {
1838 			crypto_ablkcipher_set_flags(cipher, flags);
1839 			return ret;
1840 		}
1841 
1842 		ctx->cipher_type = CIPHER_TYPE_3DES;
1843 	} else {
1844 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1845 		return -EINVAL;
1846 	}
1847 	return 0;
1848 }
1849 
1850 static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1851 		      unsigned int keylen)
1852 {
1853 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1854 
1855 	if (ctx->cipher.mode == CIPHER_MODE_XTS)
1856 		/* XTS includes two keys of equal length */
1857 		keylen = keylen / 2;
1858 
1859 	switch (keylen) {
1860 	case AES_KEYSIZE_128:
1861 		ctx->cipher_type = CIPHER_TYPE_AES128;
1862 		break;
1863 	case AES_KEYSIZE_192:
1864 		ctx->cipher_type = CIPHER_TYPE_AES192;
1865 		break;
1866 	case AES_KEYSIZE_256:
1867 		ctx->cipher_type = CIPHER_TYPE_AES256;
1868 		break;
1869 	default:
1870 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1871 		return -EINVAL;
1872 	}
1873 	WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1874 		((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1875 	return 0;
1876 }
1877 
1878 static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1879 		      unsigned int keylen)
1880 {
1881 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1882 	int i;
1883 
1884 	ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
1885 
1886 	ctx->enckey[0] = 0x00;	/* 0x00 */
1887 	ctx->enckey[1] = 0x00;	/* i    */
1888 	ctx->enckey[2] = 0x00;	/* 0x00 */
1889 	ctx->enckey[3] = 0x00;	/* j    */
1890 	for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
1891 		ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
1892 
1893 	ctx->cipher_type = CIPHER_TYPE_INIT;
1894 
1895 	return 0;
1896 }
1897 
1898 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1899 			     unsigned int keylen)
1900 {
1901 	struct spu_hw *spu = &iproc_priv.spu;
1902 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1903 	struct spu_cipher_parms cipher_parms;
1904 	u32 alloc_len = 0;
1905 	int err;
1906 
1907 	flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
1908 	flow_dump("  key: ", key, keylen);
1909 
1910 	switch (ctx->cipher.alg) {
1911 	case CIPHER_ALG_DES:
1912 		err = des_setkey(cipher, key, keylen);
1913 		break;
1914 	case CIPHER_ALG_3DES:
1915 		err = threedes_setkey(cipher, key, keylen);
1916 		break;
1917 	case CIPHER_ALG_AES:
1918 		err = aes_setkey(cipher, key, keylen);
1919 		break;
1920 	case CIPHER_ALG_RC4:
1921 		err = rc4_setkey(cipher, key, keylen);
1922 		break;
1923 	default:
1924 		pr_err("%s() Error: unknown cipher alg\n", __func__);
1925 		err = -EINVAL;
1926 	}
1927 	if (err)
1928 		return err;
1929 
1930 	/* RC4 already populated ctx->enkey */
1931 	if (ctx->cipher.alg != CIPHER_ALG_RC4) {
1932 		memcpy(ctx->enckey, key, keylen);
1933 		ctx->enckeylen = keylen;
1934 	}
1935 	/* SPU needs XTS keys in the reverse order the crypto API presents */
1936 	if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1937 	    (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1938 		unsigned int xts_keylen = keylen / 2;
1939 
1940 		memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1941 		memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1942 	}
1943 
1944 	if (spu->spu_type == SPU_TYPE_SPUM)
1945 		alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1946 	else if (spu->spu_type == SPU_TYPE_SPU2)
1947 		alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1948 	memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1949 	cipher_parms.iv_buf = NULL;
1950 	cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
1951 	flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1952 
1953 	cipher_parms.alg = ctx->cipher.alg;
1954 	cipher_parms.mode = ctx->cipher.mode;
1955 	cipher_parms.type = ctx->cipher_type;
1956 	cipher_parms.key_buf = ctx->enckey;
1957 	cipher_parms.key_len = ctx->enckeylen;
1958 
1959 	/* Prepend SPU request message with BCM header */
1960 	memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1961 	ctx->spu_req_hdr_len =
1962 	    spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1963 				     &cipher_parms);
1964 
1965 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1966 							  ctx->enckeylen,
1967 							  false);
1968 
1969 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1970 
1971 	return 0;
1972 }
1973 
1974 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1975 {
1976 	flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
1977 
1978 	return ablkcipher_enqueue(req, true);
1979 }
1980 
1981 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1982 {
1983 	flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
1984 	return ablkcipher_enqueue(req, false);
1985 }
1986 
1987 static int ahash_enqueue(struct ahash_request *req)
1988 {
1989 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1990 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1991 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1992 	int err = 0;
1993 	const char *alg_name;
1994 
1995 	flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
1996 
1997 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1998 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1999 	rctx->parent = &req->base;
2000 	rctx->ctx = ctx;
2001 	rctx->bd_suppress = true;
2002 	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2003 
2004 	/* Initialize position in src scatterlist */
2005 	rctx->src_sg = req->src;
2006 	rctx->src_skip = 0;
2007 	rctx->src_nents = 0;
2008 	rctx->dst_sg = NULL;
2009 	rctx->dst_skip = 0;
2010 	rctx->dst_nents = 0;
2011 
2012 	/* SPU2 hardware does not compute hash of zero length data */
2013 	if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
2014 	    (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
2015 		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2016 		flow_log("Doing %sfinal %s zero-len hash request in software\n",
2017 			 rctx->is_final ? "" : "non-", alg_name);
2018 		err = do_shash((unsigned char *)alg_name, req->result,
2019 			       NULL, 0, NULL, 0, ctx->authkey,
2020 			       ctx->authkeylen);
2021 		if (err < 0)
2022 			flow_log("Hash request failed with error %d\n", err);
2023 		return err;
2024 	}
2025 	/* Choose a SPU to process this request */
2026 	rctx->chan_idx = select_channel();
2027 
2028 	err = handle_ahash_req(rctx);
2029 	if (err != -EINPROGRESS)
2030 		/* synchronous result */
2031 		spu_chunk_cleanup(rctx);
2032 
2033 	if (err == -EAGAIN)
2034 		/*
2035 		 * we saved data in hash carry, but tell crypto API
2036 		 * we successfully completed request.
2037 		 */
2038 		err = 0;
2039 
2040 	return err;
2041 }
2042 
2043 static int __ahash_init(struct ahash_request *req)
2044 {
2045 	struct spu_hw *spu = &iproc_priv.spu;
2046 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2047 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2048 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2049 
2050 	flow_log("%s()\n", __func__);
2051 
2052 	/* Initialize the context */
2053 	rctx->hash_carry_len = 0;
2054 	rctx->is_final = 0;
2055 
2056 	rctx->total_todo = 0;
2057 	rctx->src_sent = 0;
2058 	rctx->total_sent = 0;
2059 	rctx->total_received = 0;
2060 
2061 	ctx->digestsize = crypto_ahash_digestsize(tfm);
2062 	/* If we add a hash whose digest is larger, catch it here. */
2063 	WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
2064 
2065 	rctx->is_sw_hmac = false;
2066 
2067 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
2068 							  true);
2069 
2070 	return 0;
2071 }
2072 
2073 /**
2074  * spu_no_incr_hash() - Determine whether incremental hashing is supported.
2075  * @ctx:  Crypto session context
2076  *
2077  * SPU-2 does not support incremental hashing (we'll have to revisit and
2078  * condition based on chip revision or device tree entry if future versions do
2079  * support incremental hash)
2080  *
2081  * SPU-M also doesn't support incremental hashing of AES-XCBC
2082  *
2083  * Return: true if incremental hashing is not supported
2084  *         false otherwise
2085  */
2086 static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2087 {
2088 	struct spu_hw *spu = &iproc_priv.spu;
2089 
2090 	if (spu->spu_type == SPU_TYPE_SPU2)
2091 		return true;
2092 
2093 	if ((ctx->auth.alg == HASH_ALG_AES) &&
2094 	    (ctx->auth.mode == HASH_MODE_XCBC))
2095 		return true;
2096 
2097 	/* Otherwise, incremental hashing is supported */
2098 	return false;
2099 }
2100 
2101 static int ahash_init(struct ahash_request *req)
2102 {
2103 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2104 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2105 	const char *alg_name;
2106 	struct crypto_shash *hash;
2107 	int ret;
2108 	gfp_t gfp;
2109 
2110 	if (spu_no_incr_hash(ctx)) {
2111 		/*
2112 		 * If we get an incremental hashing request and it's not
2113 		 * supported by the hardware, we need to handle it in software
2114 		 * by calling synchronous hash functions.
2115 		 */
2116 		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2117 		hash = crypto_alloc_shash(alg_name, 0, 0);
2118 		if (IS_ERR(hash)) {
2119 			ret = PTR_ERR(hash);
2120 			goto err;
2121 		}
2122 
2123 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2124 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2125 		ctx->shash = kmalloc(sizeof(*ctx->shash) +
2126 				     crypto_shash_descsize(hash), gfp);
2127 		if (!ctx->shash) {
2128 			ret = -ENOMEM;
2129 			goto err_hash;
2130 		}
2131 		ctx->shash->tfm = hash;
2132 
2133 		/* Set the key using data we already have from setkey */
2134 		if (ctx->authkeylen > 0) {
2135 			ret = crypto_shash_setkey(hash, ctx->authkey,
2136 						  ctx->authkeylen);
2137 			if (ret)
2138 				goto err_shash;
2139 		}
2140 
2141 		/* Initialize hash w/ this key and other params */
2142 		ret = crypto_shash_init(ctx->shash);
2143 		if (ret)
2144 			goto err_shash;
2145 	} else {
2146 		/* Otherwise call the internal function which uses SPU hw */
2147 		ret = __ahash_init(req);
2148 	}
2149 
2150 	return ret;
2151 
2152 err_shash:
2153 	kfree(ctx->shash);
2154 err_hash:
2155 	crypto_free_shash(hash);
2156 err:
2157 	return ret;
2158 }
2159 
2160 static int __ahash_update(struct ahash_request *req)
2161 {
2162 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2163 
2164 	flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2165 
2166 	if (!req->nbytes)
2167 		return 0;
2168 	rctx->total_todo += req->nbytes;
2169 	rctx->src_sent = 0;
2170 
2171 	return ahash_enqueue(req);
2172 }
2173 
2174 static int ahash_update(struct ahash_request *req)
2175 {
2176 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2177 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2178 	u8 *tmpbuf;
2179 	int ret;
2180 	int nents;
2181 	gfp_t gfp;
2182 
2183 	if (spu_no_incr_hash(ctx)) {
2184 		/*
2185 		 * If we get an incremental hashing request and it's not
2186 		 * supported by the hardware, we need to handle it in software
2187 		 * by calling synchronous hash functions.
2188 		 */
2189 		if (req->src)
2190 			nents = sg_nents(req->src);
2191 		else
2192 			return -EINVAL;
2193 
2194 		/* Copy data from req scatterlist to tmp buffer */
2195 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2196 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2197 		tmpbuf = kmalloc(req->nbytes, gfp);
2198 		if (!tmpbuf)
2199 			return -ENOMEM;
2200 
2201 		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2202 				req->nbytes) {
2203 			kfree(tmpbuf);
2204 			return -EINVAL;
2205 		}
2206 
2207 		/* Call synchronous update */
2208 		ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2209 		kfree(tmpbuf);
2210 	} else {
2211 		/* Otherwise call the internal function which uses SPU hw */
2212 		ret = __ahash_update(req);
2213 	}
2214 
2215 	return ret;
2216 }
2217 
2218 static int __ahash_final(struct ahash_request *req)
2219 {
2220 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2221 
2222 	flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2223 
2224 	rctx->is_final = 1;
2225 
2226 	return ahash_enqueue(req);
2227 }
2228 
2229 static int ahash_final(struct ahash_request *req)
2230 {
2231 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2232 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2233 	int ret;
2234 
2235 	if (spu_no_incr_hash(ctx)) {
2236 		/*
2237 		 * If we get an incremental hashing request and it's not
2238 		 * supported by the hardware, we need to handle it in software
2239 		 * by calling synchronous hash functions.
2240 		 */
2241 		ret = crypto_shash_final(ctx->shash, req->result);
2242 
2243 		/* Done with hash, can deallocate it now */
2244 		crypto_free_shash(ctx->shash->tfm);
2245 		kfree(ctx->shash);
2246 
2247 	} else {
2248 		/* Otherwise call the internal function which uses SPU hw */
2249 		ret = __ahash_final(req);
2250 	}
2251 
2252 	return ret;
2253 }
2254 
2255 static int __ahash_finup(struct ahash_request *req)
2256 {
2257 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2258 
2259 	flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2260 
2261 	rctx->total_todo += req->nbytes;
2262 	rctx->src_sent = 0;
2263 	rctx->is_final = 1;
2264 
2265 	return ahash_enqueue(req);
2266 }
2267 
2268 static int ahash_finup(struct ahash_request *req)
2269 {
2270 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2271 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2272 	u8 *tmpbuf;
2273 	int ret;
2274 	int nents;
2275 	gfp_t gfp;
2276 
2277 	if (spu_no_incr_hash(ctx)) {
2278 		/*
2279 		 * If we get an incremental hashing request and it's not
2280 		 * supported by the hardware, we need to handle it in software
2281 		 * by calling synchronous hash functions.
2282 		 */
2283 		if (req->src) {
2284 			nents = sg_nents(req->src);
2285 		} else {
2286 			ret = -EINVAL;
2287 			goto ahash_finup_exit;
2288 		}
2289 
2290 		/* Copy data from req scatterlist to tmp buffer */
2291 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2292 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2293 		tmpbuf = kmalloc(req->nbytes, gfp);
2294 		if (!tmpbuf) {
2295 			ret = -ENOMEM;
2296 			goto ahash_finup_exit;
2297 		}
2298 
2299 		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2300 				req->nbytes) {
2301 			ret = -EINVAL;
2302 			goto ahash_finup_free;
2303 		}
2304 
2305 		/* Call synchronous update */
2306 		ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2307 					 req->result);
2308 	} else {
2309 		/* Otherwise call the internal function which uses SPU hw */
2310 		return __ahash_finup(req);
2311 	}
2312 ahash_finup_free:
2313 	kfree(tmpbuf);
2314 
2315 ahash_finup_exit:
2316 	/* Done with hash, can deallocate it now */
2317 	crypto_free_shash(ctx->shash->tfm);
2318 	kfree(ctx->shash);
2319 	return ret;
2320 }
2321 
2322 static int ahash_digest(struct ahash_request *req)
2323 {
2324 	int err = 0;
2325 
2326 	flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2327 
2328 	/* whole thing at once */
2329 	err = __ahash_init(req);
2330 	if (!err)
2331 		err = __ahash_finup(req);
2332 
2333 	return err;
2334 }
2335 
2336 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2337 			unsigned int keylen)
2338 {
2339 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2340 
2341 	flow_log("%s() ahash:%p key:%p keylen:%u\n",
2342 		 __func__, ahash, key, keylen);
2343 	flow_dump("  key: ", key, keylen);
2344 
2345 	if (ctx->auth.alg == HASH_ALG_AES) {
2346 		switch (keylen) {
2347 		case AES_KEYSIZE_128:
2348 			ctx->cipher_type = CIPHER_TYPE_AES128;
2349 			break;
2350 		case AES_KEYSIZE_192:
2351 			ctx->cipher_type = CIPHER_TYPE_AES192;
2352 			break;
2353 		case AES_KEYSIZE_256:
2354 			ctx->cipher_type = CIPHER_TYPE_AES256;
2355 			break;
2356 		default:
2357 			pr_err("%s() Error: Invalid key length\n", __func__);
2358 			return -EINVAL;
2359 		}
2360 	} else {
2361 		pr_err("%s() Error: unknown hash alg\n", __func__);
2362 		return -EINVAL;
2363 	}
2364 	memcpy(ctx->authkey, key, keylen);
2365 	ctx->authkeylen = keylen;
2366 
2367 	return 0;
2368 }
2369 
2370 static int ahash_export(struct ahash_request *req, void *out)
2371 {
2372 	const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2373 	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2374 
2375 	spu_exp->total_todo = rctx->total_todo;
2376 	spu_exp->total_sent = rctx->total_sent;
2377 	spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2378 	memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2379 	spu_exp->hash_carry_len = rctx->hash_carry_len;
2380 	memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2381 
2382 	return 0;
2383 }
2384 
2385 static int ahash_import(struct ahash_request *req, const void *in)
2386 {
2387 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2388 	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2389 
2390 	rctx->total_todo = spu_exp->total_todo;
2391 	rctx->total_sent = spu_exp->total_sent;
2392 	rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2393 	memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2394 	rctx->hash_carry_len = spu_exp->hash_carry_len;
2395 	memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2396 
2397 	return 0;
2398 }
2399 
2400 static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2401 			     unsigned int keylen)
2402 {
2403 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2404 	unsigned int blocksize =
2405 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2406 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
2407 	unsigned int index;
2408 	int rc;
2409 
2410 	flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2411 		 __func__, ahash, key, keylen, blocksize, digestsize);
2412 	flow_dump("  key: ", key, keylen);
2413 
2414 	if (keylen > blocksize) {
2415 		switch (ctx->auth.alg) {
2416 		case HASH_ALG_MD5:
2417 			rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2418 				      0, NULL, 0);
2419 			break;
2420 		case HASH_ALG_SHA1:
2421 			rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2422 				      0, NULL, 0);
2423 			break;
2424 		case HASH_ALG_SHA224:
2425 			rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2426 				      0, NULL, 0);
2427 			break;
2428 		case HASH_ALG_SHA256:
2429 			rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2430 				      0, NULL, 0);
2431 			break;
2432 		case HASH_ALG_SHA384:
2433 			rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2434 				      0, NULL, 0);
2435 			break;
2436 		case HASH_ALG_SHA512:
2437 			rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2438 				      0, NULL, 0);
2439 			break;
2440 		case HASH_ALG_SHA3_224:
2441 			rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2442 				      NULL, 0, NULL, 0);
2443 			break;
2444 		case HASH_ALG_SHA3_256:
2445 			rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2446 				      NULL, 0, NULL, 0);
2447 			break;
2448 		case HASH_ALG_SHA3_384:
2449 			rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2450 				      NULL, 0, NULL, 0);
2451 			break;
2452 		case HASH_ALG_SHA3_512:
2453 			rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2454 				      NULL, 0, NULL, 0);
2455 			break;
2456 		default:
2457 			pr_err("%s() Error: unknown hash alg\n", __func__);
2458 			return -EINVAL;
2459 		}
2460 		if (rc < 0) {
2461 			pr_err("%s() Error %d computing shash for %s\n",
2462 			       __func__, rc, hash_alg_name[ctx->auth.alg]);
2463 			return rc;
2464 		}
2465 		ctx->authkeylen = digestsize;
2466 
2467 		flow_log("  keylen > digestsize... hashed\n");
2468 		flow_dump("  newkey: ", ctx->authkey, ctx->authkeylen);
2469 	} else {
2470 		memcpy(ctx->authkey, key, keylen);
2471 		ctx->authkeylen = keylen;
2472 	}
2473 
2474 	/*
2475 	 * Full HMAC operation in SPUM is not verified,
2476 	 * So keeping the generation of IPAD, OPAD and
2477 	 * outer hashing in software.
2478 	 */
2479 	if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2480 		memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2481 		memset(ctx->ipad + ctx->authkeylen, 0,
2482 		       blocksize - ctx->authkeylen);
2483 		ctx->authkeylen = 0;
2484 		memcpy(ctx->opad, ctx->ipad, blocksize);
2485 
2486 		for (index = 0; index < blocksize; index++) {
2487 			ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2488 			ctx->opad[index] ^= HMAC_OPAD_VALUE;
2489 		}
2490 
2491 		flow_dump("  ipad: ", ctx->ipad, blocksize);
2492 		flow_dump("  opad: ", ctx->opad, blocksize);
2493 	}
2494 	ctx->digestsize = digestsize;
2495 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2496 
2497 	return 0;
2498 }
2499 
2500 static int ahash_hmac_init(struct ahash_request *req)
2501 {
2502 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2503 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2504 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2505 	unsigned int blocksize =
2506 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2507 
2508 	flow_log("ahash_hmac_init()\n");
2509 
2510 	/* init the context as a hash */
2511 	ahash_init(req);
2512 
2513 	if (!spu_no_incr_hash(ctx)) {
2514 		/* SPU-M can do incr hashing but needs sw for outer HMAC */
2515 		rctx->is_sw_hmac = true;
2516 		ctx->auth.mode = HASH_MODE_HASH;
2517 		/* start with a prepended ipad */
2518 		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2519 		rctx->hash_carry_len = blocksize;
2520 		rctx->total_todo += blocksize;
2521 	}
2522 
2523 	return 0;
2524 }
2525 
2526 static int ahash_hmac_update(struct ahash_request *req)
2527 {
2528 	flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2529 
2530 	if (!req->nbytes)
2531 		return 0;
2532 
2533 	return ahash_update(req);
2534 }
2535 
2536 static int ahash_hmac_final(struct ahash_request *req)
2537 {
2538 	flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2539 
2540 	return ahash_final(req);
2541 }
2542 
2543 static int ahash_hmac_finup(struct ahash_request *req)
2544 {
2545 	flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2546 
2547 	return ahash_finup(req);
2548 }
2549 
2550 static int ahash_hmac_digest(struct ahash_request *req)
2551 {
2552 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2553 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2554 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2555 	unsigned int blocksize =
2556 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2557 
2558 	flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2559 
2560 	/* Perform initialization and then call finup */
2561 	__ahash_init(req);
2562 
2563 	if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2564 		/*
2565 		 * SPU2 supports full HMAC implementation in the
2566 		 * hardware, need not to generate IPAD, OPAD and
2567 		 * outer hash in software.
2568 		 * Only for hash key len > hash block size, SPU2
2569 		 * expects to perform hashing on the key, shorten
2570 		 * it to digest size and feed it as hash key.
2571 		 */
2572 		rctx->is_sw_hmac = false;
2573 		ctx->auth.mode = HASH_MODE_HMAC;
2574 	} else {
2575 		rctx->is_sw_hmac = true;
2576 		ctx->auth.mode = HASH_MODE_HASH;
2577 		/* start with a prepended ipad */
2578 		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2579 		rctx->hash_carry_len = blocksize;
2580 		rctx->total_todo += blocksize;
2581 	}
2582 
2583 	return __ahash_finup(req);
2584 }
2585 
2586 /* aead helpers */
2587 
2588 static int aead_need_fallback(struct aead_request *req)
2589 {
2590 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2591 	struct spu_hw *spu = &iproc_priv.spu;
2592 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2593 	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2594 	u32 payload_len;
2595 
2596 	/*
2597 	 * SPU hardware cannot handle the AES-GCM/CCM case where plaintext
2598 	 * and AAD are both 0 bytes long. So use fallback in this case.
2599 	 */
2600 	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2601 	     (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2602 	    (req->assoclen == 0)) {
2603 		if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2604 		    (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2605 			flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2606 			return 1;
2607 		}
2608 	}
2609 
2610 	/* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */
2611 	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2612 	    (spu->spu_type == SPU_TYPE_SPUM) &&
2613 	    (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2614 	    (ctx->digestsize != 16)) {
2615 		flow_log("%s() AES CCM needs fallback for digest size %d\n",
2616 			 __func__, ctx->digestsize);
2617 		return 1;
2618 	}
2619 
2620 	/*
2621 	 * SPU-M on NSP has an issue where AES-CCM hash is not correct
2622 	 * when AAD size is 0
2623 	 */
2624 	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2625 	    (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2626 	    (req->assoclen == 0)) {
2627 		flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2628 			 __func__);
2629 		return 1;
2630 	}
2631 
2632 	/*
2633 	 * RFC4106 and RFC4543 cannot handle the case where AAD is other than
2634 	 * 16 or 20 bytes long. So use fallback in this case.
2635 	 */
2636 	if (ctx->cipher.mode == CIPHER_MODE_GCM &&
2637 	    ctx->cipher.alg == CIPHER_ALG_AES &&
2638 	    rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE &&
2639 	    req->assoclen != 16 && req->assoclen != 20) {
2640 		flow_log("RFC4106/RFC4543 needs fallback for assoclen"
2641 			 " other than 16 or 20 bytes\n");
2642 		return 1;
2643 	}
2644 
2645 	payload_len = req->cryptlen;
2646 	if (spu->spu_type == SPU_TYPE_SPUM)
2647 		payload_len += req->assoclen;
2648 
2649 	flow_log("%s() payload len: %u\n", __func__, payload_len);
2650 
2651 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2652 		return 0;
2653 	else
2654 		return payload_len > ctx->max_payload;
2655 }
2656 
2657 static void aead_complete(struct crypto_async_request *areq, int err)
2658 {
2659 	struct aead_request *req =
2660 	    container_of(areq, struct aead_request, base);
2661 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2662 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2663 
2664 	flow_log("%s() err:%d\n", __func__, err);
2665 
2666 	areq->tfm = crypto_aead_tfm(aead);
2667 
2668 	areq->complete = rctx->old_complete;
2669 	areq->data = rctx->old_data;
2670 
2671 	areq->complete(areq, err);
2672 }
2673 
2674 static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2675 {
2676 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2677 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2678 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2679 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2680 	int err;
2681 	u32 req_flags;
2682 
2683 	flow_log("%s() enc:%u\n", __func__, is_encrypt);
2684 
2685 	if (ctx->fallback_cipher) {
2686 		/* Store the cipher tfm and then use the fallback tfm */
2687 		rctx->old_tfm = tfm;
2688 		aead_request_set_tfm(req, ctx->fallback_cipher);
2689 		/*
2690 		 * Save the callback and chain ourselves in, so we can restore
2691 		 * the tfm
2692 		 */
2693 		rctx->old_complete = req->base.complete;
2694 		rctx->old_data = req->base.data;
2695 		req_flags = aead_request_flags(req);
2696 		aead_request_set_callback(req, req_flags, aead_complete, req);
2697 		err = is_encrypt ? crypto_aead_encrypt(req) :
2698 		    crypto_aead_decrypt(req);
2699 
2700 		if (err == 0) {
2701 			/*
2702 			 * fallback was synchronous (did not return
2703 			 * -EINPROGRESS). So restore request state here.
2704 			 */
2705 			aead_request_set_callback(req, req_flags,
2706 						  rctx->old_complete, req);
2707 			req->base.data = rctx->old_data;
2708 			aead_request_set_tfm(req, aead);
2709 			flow_log("%s() fallback completed successfully\n\n",
2710 				 __func__);
2711 		}
2712 	} else {
2713 		err = -EINVAL;
2714 	}
2715 
2716 	return err;
2717 }
2718 
2719 static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2720 {
2721 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2722 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2723 	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2724 	int err;
2725 
2726 	flow_log("%s() enc:%u\n", __func__, is_encrypt);
2727 
2728 	if (req->assoclen > MAX_ASSOC_SIZE) {
2729 		pr_err
2730 		    ("%s() Error: associated data too long. (%u > %u bytes)\n",
2731 		     __func__, req->assoclen, MAX_ASSOC_SIZE);
2732 		return -EINVAL;
2733 	}
2734 
2735 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2736 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2737 	rctx->parent = &req->base;
2738 	rctx->is_encrypt = is_encrypt;
2739 	rctx->bd_suppress = false;
2740 	rctx->total_todo = req->cryptlen;
2741 	rctx->src_sent = 0;
2742 	rctx->total_sent = 0;
2743 	rctx->total_received = 0;
2744 	rctx->is_sw_hmac = false;
2745 	rctx->ctx = ctx;
2746 	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2747 
2748 	/* assoc data is at start of src sg */
2749 	rctx->assoc = req->src;
2750 
2751 	/*
2752 	 * Init current position in src scatterlist to be after assoc data.
2753 	 * src_skip set to buffer offset where data begins. (Assoc data could
2754 	 * end in the middle of a buffer.)
2755 	 */
2756 	if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2757 			     &rctx->src_skip) < 0) {
2758 		pr_err("%s() Error: Unable to find start of src data\n",
2759 		       __func__);
2760 		return -EINVAL;
2761 	}
2762 
2763 	rctx->src_nents = 0;
2764 	rctx->dst_nents = 0;
2765 	if (req->dst == req->src) {
2766 		rctx->dst_sg = rctx->src_sg;
2767 		rctx->dst_skip = rctx->src_skip;
2768 	} else {
2769 		/*
2770 		 * Expect req->dst to have room for assoc data followed by
2771 		 * output data and ICV, if encrypt. So initialize dst_sg
2772 		 * to point beyond assoc len offset.
2773 		 */
2774 		if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2775 				     &rctx->dst_skip) < 0) {
2776 			pr_err("%s() Error: Unable to find start of dst data\n",
2777 			       __func__);
2778 			return -EINVAL;
2779 		}
2780 	}
2781 
2782 	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2783 	    ctx->cipher.mode == CIPHER_MODE_CTR ||
2784 	    ctx->cipher.mode == CIPHER_MODE_OFB ||
2785 	    ctx->cipher.mode == CIPHER_MODE_XTS ||
2786 	    ctx->cipher.mode == CIPHER_MODE_GCM) {
2787 		rctx->iv_ctr_len =
2788 			ctx->salt_len +
2789 			crypto_aead_ivsize(crypto_aead_reqtfm(req));
2790 	} else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2791 		rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2792 	} else {
2793 		rctx->iv_ctr_len = 0;
2794 	}
2795 
2796 	rctx->hash_carry_len = 0;
2797 
2798 	flow_log("  src sg: %p\n", req->src);
2799 	flow_log("  rctx->src_sg: %p, src_skip %u\n",
2800 		 rctx->src_sg, rctx->src_skip);
2801 	flow_log("  assoc:  %p, assoclen %u\n", rctx->assoc, req->assoclen);
2802 	flow_log("  dst sg: %p\n", req->dst);
2803 	flow_log("  rctx->dst_sg: %p, dst_skip %u\n",
2804 		 rctx->dst_sg, rctx->dst_skip);
2805 	flow_log("  iv_ctr_len:%u\n", rctx->iv_ctr_len);
2806 	flow_dump("  iv: ", req->iv, rctx->iv_ctr_len);
2807 	flow_log("  authkeylen:%u\n", ctx->authkeylen);
2808 	flow_log("  is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2809 
2810 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2811 		flow_log("  max_payload infinite");
2812 	else
2813 		flow_log("  max_payload: %u\n", ctx->max_payload);
2814 
2815 	if (unlikely(aead_need_fallback(req)))
2816 		return aead_do_fallback(req, is_encrypt);
2817 
2818 	/*
2819 	 * Do memory allocations for request after fallback check, because if we
2820 	 * do fallback, we won't call finish_req() to dealloc.
2821 	 */
2822 	if (rctx->iv_ctr_len) {
2823 		if (ctx->salt_len)
2824 			memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2825 			       ctx->salt, ctx->salt_len);
2826 		memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2827 		       req->iv,
2828 		       rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2829 	}
2830 
2831 	rctx->chan_idx = select_channel();
2832 	err = handle_aead_req(rctx);
2833 	if (err != -EINPROGRESS)
2834 		/* synchronous result */
2835 		spu_chunk_cleanup(rctx);
2836 
2837 	return err;
2838 }
2839 
2840 static int aead_authenc_setkey(struct crypto_aead *cipher,
2841 			       const u8 *key, unsigned int keylen)
2842 {
2843 	struct spu_hw *spu = &iproc_priv.spu;
2844 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2845 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2846 	struct crypto_authenc_keys keys;
2847 	int ret;
2848 
2849 	flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2850 		 keylen);
2851 	flow_dump("  key: ", key, keylen);
2852 
2853 	ret = crypto_authenc_extractkeys(&keys, key, keylen);
2854 	if (ret)
2855 		goto badkey;
2856 
2857 	if (keys.enckeylen > MAX_KEY_SIZE ||
2858 	    keys.authkeylen > MAX_KEY_SIZE)
2859 		goto badkey;
2860 
2861 	ctx->enckeylen = keys.enckeylen;
2862 	ctx->authkeylen = keys.authkeylen;
2863 
2864 	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2865 	/* May end up padding auth key. So make sure it's zeroed. */
2866 	memset(ctx->authkey, 0, sizeof(ctx->authkey));
2867 	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2868 
2869 	switch (ctx->alg->cipher_info.alg) {
2870 	case CIPHER_ALG_DES:
2871 		if (ctx->enckeylen == DES_KEY_SIZE) {
2872 			u32 tmp[DES_EXPKEY_WORDS];
2873 			u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
2874 
2875 			if (des_ekey(tmp, keys.enckey) == 0) {
2876 				if (crypto_aead_get_flags(cipher) &
2877 				    CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
2878 					crypto_aead_set_flags(cipher, flags);
2879 					return -EINVAL;
2880 				}
2881 			}
2882 
2883 			ctx->cipher_type = CIPHER_TYPE_DES;
2884 		} else {
2885 			goto badkey;
2886 		}
2887 		break;
2888 	case CIPHER_ALG_3DES:
2889 		if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
2890 			u32 flags;
2891 
2892 			flags = crypto_aead_get_flags(cipher);
2893 			ret = __des3_verify_key(&flags, keys.enckey);
2894 			if (unlikely(ret)) {
2895 				crypto_aead_set_flags(cipher, flags);
2896 				return ret;
2897 			}
2898 
2899 			ctx->cipher_type = CIPHER_TYPE_3DES;
2900 		} else {
2901 			crypto_aead_set_flags(cipher,
2902 					      CRYPTO_TFM_RES_BAD_KEY_LEN);
2903 			return -EINVAL;
2904 		}
2905 		break;
2906 	case CIPHER_ALG_AES:
2907 		switch (ctx->enckeylen) {
2908 		case AES_KEYSIZE_128:
2909 			ctx->cipher_type = CIPHER_TYPE_AES128;
2910 			break;
2911 		case AES_KEYSIZE_192:
2912 			ctx->cipher_type = CIPHER_TYPE_AES192;
2913 			break;
2914 		case AES_KEYSIZE_256:
2915 			ctx->cipher_type = CIPHER_TYPE_AES256;
2916 			break;
2917 		default:
2918 			goto badkey;
2919 		}
2920 		break;
2921 	case CIPHER_ALG_RC4:
2922 		ctx->cipher_type = CIPHER_TYPE_INIT;
2923 		break;
2924 	default:
2925 		pr_err("%s() Error: Unknown cipher alg\n", __func__);
2926 		return -EINVAL;
2927 	}
2928 
2929 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2930 		 ctx->authkeylen);
2931 	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
2932 	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
2933 
2934 	/* setkey the fallback just in case we needto use it */
2935 	if (ctx->fallback_cipher) {
2936 		flow_log("  running fallback setkey()\n");
2937 
2938 		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2939 		ctx->fallback_cipher->base.crt_flags |=
2940 		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2941 		ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2942 		if (ret) {
2943 			flow_log("  fallback setkey() returned:%d\n", ret);
2944 			tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
2945 			tfm->crt_flags |=
2946 			    (ctx->fallback_cipher->base.crt_flags &
2947 			     CRYPTO_TFM_RES_MASK);
2948 		}
2949 	}
2950 
2951 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2952 							  ctx->enckeylen,
2953 							  false);
2954 
2955 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2956 
2957 	return ret;
2958 
2959 badkey:
2960 	ctx->enckeylen = 0;
2961 	ctx->authkeylen = 0;
2962 	ctx->digestsize = 0;
2963 
2964 	crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2965 	return -EINVAL;
2966 }
2967 
2968 static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
2969 			       const u8 *key, unsigned int keylen)
2970 {
2971 	struct spu_hw *spu = &iproc_priv.spu;
2972 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2973 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2974 
2975 	int ret = 0;
2976 
2977 	flow_log("%s() keylen:%u\n", __func__, keylen);
2978 	flow_dump("  key: ", key, keylen);
2979 
2980 	if (!ctx->is_esp)
2981 		ctx->digestsize = keylen;
2982 
2983 	ctx->enckeylen = keylen;
2984 	ctx->authkeylen = 0;
2985 	memcpy(ctx->enckey, key, ctx->enckeylen);
2986 
2987 	switch (ctx->enckeylen) {
2988 	case AES_KEYSIZE_128:
2989 		ctx->cipher_type = CIPHER_TYPE_AES128;
2990 		break;
2991 	case AES_KEYSIZE_192:
2992 		ctx->cipher_type = CIPHER_TYPE_AES192;
2993 		break;
2994 	case AES_KEYSIZE_256:
2995 		ctx->cipher_type = CIPHER_TYPE_AES256;
2996 		break;
2997 	default:
2998 		goto badkey;
2999 	}
3000 
3001 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3002 		 ctx->authkeylen);
3003 	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
3004 	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
3005 
3006 	/* setkey the fallback just in case we need to use it */
3007 	if (ctx->fallback_cipher) {
3008 		flow_log("  running fallback setkey()\n");
3009 
3010 		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
3011 		ctx->fallback_cipher->base.crt_flags |=
3012 		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
3013 		ret = crypto_aead_setkey(ctx->fallback_cipher, key,
3014 					 keylen + ctx->salt_len);
3015 		if (ret) {
3016 			flow_log("  fallback setkey() returned:%d\n", ret);
3017 			tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
3018 			tfm->crt_flags |=
3019 			    (ctx->fallback_cipher->base.crt_flags &
3020 			     CRYPTO_TFM_RES_MASK);
3021 		}
3022 	}
3023 
3024 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
3025 							  ctx->enckeylen,
3026 							  false);
3027 
3028 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
3029 
3030 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3031 		 ctx->authkeylen);
3032 
3033 	return ret;
3034 
3035 badkey:
3036 	ctx->enckeylen = 0;
3037 	ctx->authkeylen = 0;
3038 	ctx->digestsize = 0;
3039 
3040 	crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
3041 	return -EINVAL;
3042 }
3043 
3044 /**
3045  * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES.
3046  * @cipher: AEAD structure
3047  * @key:    Key followed by 4 bytes of salt
3048  * @keylen: Length of key plus salt, in bytes
3049  *
3050  * Extracts salt from key and stores it to be prepended to IV on each request.
3051  * Digest is always 16 bytes
3052  *
3053  * Return: Value from generic gcm setkey.
3054  */
3055 static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
3056 			       const u8 *key, unsigned int keylen)
3057 {
3058 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3059 
3060 	flow_log("%s\n", __func__);
3061 	ctx->salt_len = GCM_ESP_SALT_SIZE;
3062 	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3063 	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3064 	keylen -= GCM_ESP_SALT_SIZE;
3065 	ctx->digestsize = GCM_ESP_DIGESTSIZE;
3066 	ctx->is_esp = true;
3067 	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3068 
3069 	return aead_gcm_ccm_setkey(cipher, key, keylen);
3070 }
3071 
3072 /**
3073  * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
3074  * cipher: AEAD structure
3075  * key:    Key followed by 4 bytes of salt
3076  * keylen: Length of key plus salt, in bytes
3077  *
3078  * Extracts salt from key and stores it to be prepended to IV on each request.
3079  * Digest is always 16 bytes
3080  *
3081  * Return: Value from generic gcm setkey.
3082  */
3083 static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
3084 				  const u8 *key, unsigned int keylen)
3085 {
3086 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3087 
3088 	flow_log("%s\n", __func__);
3089 	ctx->salt_len = GCM_ESP_SALT_SIZE;
3090 	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3091 	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3092 	keylen -= GCM_ESP_SALT_SIZE;
3093 	ctx->digestsize = GCM_ESP_DIGESTSIZE;
3094 	ctx->is_esp = true;
3095 	ctx->is_rfc4543 = true;
3096 	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3097 
3098 	return aead_gcm_ccm_setkey(cipher, key, keylen);
3099 }
3100 
3101 /**
3102  * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES.
3103  * @cipher: AEAD structure
3104  * @key:    Key followed by 4 bytes of salt
3105  * @keylen: Length of key plus salt, in bytes
3106  *
3107  * Extracts salt from key and stores it to be prepended to IV on each request.
3108  * Digest is always 16 bytes
3109  *
3110  * Return: Value from generic ccm setkey.
3111  */
3112 static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
3113 			       const u8 *key, unsigned int keylen)
3114 {
3115 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3116 
3117 	flow_log("%s\n", __func__);
3118 	ctx->salt_len = CCM_ESP_SALT_SIZE;
3119 	ctx->salt_offset = CCM_ESP_SALT_OFFSET;
3120 	memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
3121 	keylen -= CCM_ESP_SALT_SIZE;
3122 	ctx->is_esp = true;
3123 	flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
3124 
3125 	return aead_gcm_ccm_setkey(cipher, key, keylen);
3126 }
3127 
3128 static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
3129 {
3130 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3131 	int ret = 0;
3132 
3133 	flow_log("%s() authkeylen:%u authsize:%u\n",
3134 		 __func__, ctx->authkeylen, authsize);
3135 
3136 	ctx->digestsize = authsize;
3137 
3138 	/* setkey the fallback just in case we needto use it */
3139 	if (ctx->fallback_cipher) {
3140 		flow_log("  running fallback setauth()\n");
3141 
3142 		ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3143 		if (ret)
3144 			flow_log("  fallback setauth() returned:%d\n", ret);
3145 	}
3146 
3147 	return ret;
3148 }
3149 
3150 static int aead_encrypt(struct aead_request *req)
3151 {
3152 	flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3153 		 req->cryptlen);
3154 	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3155 	flow_log("  assoc_len:%u\n", req->assoclen);
3156 
3157 	return aead_enqueue(req, true);
3158 }
3159 
3160 static int aead_decrypt(struct aead_request *req)
3161 {
3162 	flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3163 	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3164 	flow_log("  assoc_len:%u\n", req->assoclen);
3165 
3166 	return aead_enqueue(req, false);
3167 }
3168 
3169 /* ==================== Supported Cipher Algorithms ==================== */
3170 
3171 static struct iproc_alg_s driver_algs[] = {
3172 	{
3173 	 .type = CRYPTO_ALG_TYPE_AEAD,
3174 	 .alg.aead = {
3175 		 .base = {
3176 			.cra_name = "gcm(aes)",
3177 			.cra_driver_name = "gcm-aes-iproc",
3178 			.cra_blocksize = AES_BLOCK_SIZE,
3179 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3180 		 },
3181 		 .setkey = aead_gcm_ccm_setkey,
3182 		 .ivsize = GCM_AES_IV_SIZE,
3183 		.maxauthsize = AES_BLOCK_SIZE,
3184 	 },
3185 	 .cipher_info = {
3186 			 .alg = CIPHER_ALG_AES,
3187 			 .mode = CIPHER_MODE_GCM,
3188 			 },
3189 	 .auth_info = {
3190 		       .alg = HASH_ALG_AES,
3191 		       .mode = HASH_MODE_GCM,
3192 		       },
3193 	 .auth_first = 0,
3194 	 },
3195 	{
3196 	 .type = CRYPTO_ALG_TYPE_AEAD,
3197 	 .alg.aead = {
3198 		 .base = {
3199 			.cra_name = "ccm(aes)",
3200 			.cra_driver_name = "ccm-aes-iproc",
3201 			.cra_blocksize = AES_BLOCK_SIZE,
3202 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3203 		 },
3204 		 .setkey = aead_gcm_ccm_setkey,
3205 		 .ivsize = CCM_AES_IV_SIZE,
3206 		.maxauthsize = AES_BLOCK_SIZE,
3207 	 },
3208 	 .cipher_info = {
3209 			 .alg = CIPHER_ALG_AES,
3210 			 .mode = CIPHER_MODE_CCM,
3211 			 },
3212 	 .auth_info = {
3213 		       .alg = HASH_ALG_AES,
3214 		       .mode = HASH_MODE_CCM,
3215 		       },
3216 	 .auth_first = 0,
3217 	 },
3218 	{
3219 	 .type = CRYPTO_ALG_TYPE_AEAD,
3220 	 .alg.aead = {
3221 		 .base = {
3222 			.cra_name = "rfc4106(gcm(aes))",
3223 			.cra_driver_name = "gcm-aes-esp-iproc",
3224 			.cra_blocksize = AES_BLOCK_SIZE,
3225 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3226 		 },
3227 		 .setkey = aead_gcm_esp_setkey,
3228 		 .ivsize = GCM_RFC4106_IV_SIZE,
3229 		 .maxauthsize = AES_BLOCK_SIZE,
3230 	 },
3231 	 .cipher_info = {
3232 			 .alg = CIPHER_ALG_AES,
3233 			 .mode = CIPHER_MODE_GCM,
3234 			 },
3235 	 .auth_info = {
3236 		       .alg = HASH_ALG_AES,
3237 		       .mode = HASH_MODE_GCM,
3238 		       },
3239 	 .auth_first = 0,
3240 	 },
3241 	{
3242 	 .type = CRYPTO_ALG_TYPE_AEAD,
3243 	 .alg.aead = {
3244 		 .base = {
3245 			.cra_name = "rfc4309(ccm(aes))",
3246 			.cra_driver_name = "ccm-aes-esp-iproc",
3247 			.cra_blocksize = AES_BLOCK_SIZE,
3248 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3249 		 },
3250 		 .setkey = aead_ccm_esp_setkey,
3251 		 .ivsize = CCM_AES_IV_SIZE,
3252 		 .maxauthsize = AES_BLOCK_SIZE,
3253 	 },
3254 	 .cipher_info = {
3255 			 .alg = CIPHER_ALG_AES,
3256 			 .mode = CIPHER_MODE_CCM,
3257 			 },
3258 	 .auth_info = {
3259 		       .alg = HASH_ALG_AES,
3260 		       .mode = HASH_MODE_CCM,
3261 		       },
3262 	 .auth_first = 0,
3263 	 },
3264 	{
3265 	 .type = CRYPTO_ALG_TYPE_AEAD,
3266 	 .alg.aead = {
3267 		 .base = {
3268 			.cra_name = "rfc4543(gcm(aes))",
3269 			.cra_driver_name = "gmac-aes-esp-iproc",
3270 			.cra_blocksize = AES_BLOCK_SIZE,
3271 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3272 		 },
3273 		 .setkey = rfc4543_gcm_esp_setkey,
3274 		 .ivsize = GCM_RFC4106_IV_SIZE,
3275 		 .maxauthsize = AES_BLOCK_SIZE,
3276 	 },
3277 	 .cipher_info = {
3278 			 .alg = CIPHER_ALG_AES,
3279 			 .mode = CIPHER_MODE_GCM,
3280 			 },
3281 	 .auth_info = {
3282 		       .alg = HASH_ALG_AES,
3283 		       .mode = HASH_MODE_GCM,
3284 		       },
3285 	 .auth_first = 0,
3286 	 },
3287 	{
3288 	 .type = CRYPTO_ALG_TYPE_AEAD,
3289 	 .alg.aead = {
3290 		 .base = {
3291 			.cra_name = "authenc(hmac(md5),cbc(aes))",
3292 			.cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3293 			.cra_blocksize = AES_BLOCK_SIZE,
3294 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3295 		 },
3296 		 .setkey = aead_authenc_setkey,
3297 		.ivsize = AES_BLOCK_SIZE,
3298 		.maxauthsize = MD5_DIGEST_SIZE,
3299 	 },
3300 	 .cipher_info = {
3301 			 .alg = CIPHER_ALG_AES,
3302 			 .mode = CIPHER_MODE_CBC,
3303 			 },
3304 	 .auth_info = {
3305 		       .alg = HASH_ALG_MD5,
3306 		       .mode = HASH_MODE_HMAC,
3307 		       },
3308 	 .auth_first = 0,
3309 	 },
3310 	{
3311 	 .type = CRYPTO_ALG_TYPE_AEAD,
3312 	 .alg.aead = {
3313 		 .base = {
3314 			.cra_name = "authenc(hmac(sha1),cbc(aes))",
3315 			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3316 			.cra_blocksize = AES_BLOCK_SIZE,
3317 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3318 		 },
3319 		 .setkey = aead_authenc_setkey,
3320 		 .ivsize = AES_BLOCK_SIZE,
3321 		 .maxauthsize = SHA1_DIGEST_SIZE,
3322 	 },
3323 	 .cipher_info = {
3324 			 .alg = CIPHER_ALG_AES,
3325 			 .mode = CIPHER_MODE_CBC,
3326 			 },
3327 	 .auth_info = {
3328 		       .alg = HASH_ALG_SHA1,
3329 		       .mode = HASH_MODE_HMAC,
3330 		       },
3331 	 .auth_first = 0,
3332 	 },
3333 	{
3334 	 .type = CRYPTO_ALG_TYPE_AEAD,
3335 	 .alg.aead = {
3336 		 .base = {
3337 			.cra_name = "authenc(hmac(sha256),cbc(aes))",
3338 			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3339 			.cra_blocksize = AES_BLOCK_SIZE,
3340 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3341 		 },
3342 		 .setkey = aead_authenc_setkey,
3343 		 .ivsize = AES_BLOCK_SIZE,
3344 		 .maxauthsize = SHA256_DIGEST_SIZE,
3345 	 },
3346 	 .cipher_info = {
3347 			 .alg = CIPHER_ALG_AES,
3348 			 .mode = CIPHER_MODE_CBC,
3349 			 },
3350 	 .auth_info = {
3351 		       .alg = HASH_ALG_SHA256,
3352 		       .mode = HASH_MODE_HMAC,
3353 		       },
3354 	 .auth_first = 0,
3355 	 },
3356 	{
3357 	 .type = CRYPTO_ALG_TYPE_AEAD,
3358 	 .alg.aead = {
3359 		 .base = {
3360 			.cra_name = "authenc(hmac(md5),cbc(des))",
3361 			.cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3362 			.cra_blocksize = DES_BLOCK_SIZE,
3363 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3364 		 },
3365 		 .setkey = aead_authenc_setkey,
3366 		 .ivsize = DES_BLOCK_SIZE,
3367 		 .maxauthsize = MD5_DIGEST_SIZE,
3368 	 },
3369 	 .cipher_info = {
3370 			 .alg = CIPHER_ALG_DES,
3371 			 .mode = CIPHER_MODE_CBC,
3372 			 },
3373 	 .auth_info = {
3374 		       .alg = HASH_ALG_MD5,
3375 		       .mode = HASH_MODE_HMAC,
3376 		       },
3377 	 .auth_first = 0,
3378 	 },
3379 	{
3380 	 .type = CRYPTO_ALG_TYPE_AEAD,
3381 	 .alg.aead = {
3382 		 .base = {
3383 			.cra_name = "authenc(hmac(sha1),cbc(des))",
3384 			.cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3385 			.cra_blocksize = DES_BLOCK_SIZE,
3386 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3387 		 },
3388 		 .setkey = aead_authenc_setkey,
3389 		 .ivsize = DES_BLOCK_SIZE,
3390 		 .maxauthsize = SHA1_DIGEST_SIZE,
3391 	 },
3392 	 .cipher_info = {
3393 			 .alg = CIPHER_ALG_DES,
3394 			 .mode = CIPHER_MODE_CBC,
3395 			 },
3396 	 .auth_info = {
3397 		       .alg = HASH_ALG_SHA1,
3398 		       .mode = HASH_MODE_HMAC,
3399 		       },
3400 	 .auth_first = 0,
3401 	 },
3402 	{
3403 	 .type = CRYPTO_ALG_TYPE_AEAD,
3404 	 .alg.aead = {
3405 		 .base = {
3406 			.cra_name = "authenc(hmac(sha224),cbc(des))",
3407 			.cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3408 			.cra_blocksize = DES_BLOCK_SIZE,
3409 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3410 		 },
3411 		 .setkey = aead_authenc_setkey,
3412 		 .ivsize = DES_BLOCK_SIZE,
3413 		 .maxauthsize = SHA224_DIGEST_SIZE,
3414 	 },
3415 	 .cipher_info = {
3416 			 .alg = CIPHER_ALG_DES,
3417 			 .mode = CIPHER_MODE_CBC,
3418 			 },
3419 	 .auth_info = {
3420 		       .alg = HASH_ALG_SHA224,
3421 		       .mode = HASH_MODE_HMAC,
3422 		       },
3423 	 .auth_first = 0,
3424 	 },
3425 	{
3426 	 .type = CRYPTO_ALG_TYPE_AEAD,
3427 	 .alg.aead = {
3428 		 .base = {
3429 			.cra_name = "authenc(hmac(sha256),cbc(des))",
3430 			.cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3431 			.cra_blocksize = DES_BLOCK_SIZE,
3432 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3433 		 },
3434 		 .setkey = aead_authenc_setkey,
3435 		 .ivsize = DES_BLOCK_SIZE,
3436 		 .maxauthsize = SHA256_DIGEST_SIZE,
3437 	 },
3438 	 .cipher_info = {
3439 			 .alg = CIPHER_ALG_DES,
3440 			 .mode = CIPHER_MODE_CBC,
3441 			 },
3442 	 .auth_info = {
3443 		       .alg = HASH_ALG_SHA256,
3444 		       .mode = HASH_MODE_HMAC,
3445 		       },
3446 	 .auth_first = 0,
3447 	 },
3448 	{
3449 	 .type = CRYPTO_ALG_TYPE_AEAD,
3450 	 .alg.aead = {
3451 		 .base = {
3452 			.cra_name = "authenc(hmac(sha384),cbc(des))",
3453 			.cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3454 			.cra_blocksize = DES_BLOCK_SIZE,
3455 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3456 		 },
3457 		 .setkey = aead_authenc_setkey,
3458 		 .ivsize = DES_BLOCK_SIZE,
3459 		 .maxauthsize = SHA384_DIGEST_SIZE,
3460 	 },
3461 	 .cipher_info = {
3462 			 .alg = CIPHER_ALG_DES,
3463 			 .mode = CIPHER_MODE_CBC,
3464 			 },
3465 	 .auth_info = {
3466 		       .alg = HASH_ALG_SHA384,
3467 		       .mode = HASH_MODE_HMAC,
3468 		       },
3469 	 .auth_first = 0,
3470 	 },
3471 	{
3472 	 .type = CRYPTO_ALG_TYPE_AEAD,
3473 	 .alg.aead = {
3474 		 .base = {
3475 			.cra_name = "authenc(hmac(sha512),cbc(des))",
3476 			.cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3477 			.cra_blocksize = DES_BLOCK_SIZE,
3478 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3479 		 },
3480 		 .setkey = aead_authenc_setkey,
3481 		 .ivsize = DES_BLOCK_SIZE,
3482 		 .maxauthsize = SHA512_DIGEST_SIZE,
3483 	 },
3484 	 .cipher_info = {
3485 			 .alg = CIPHER_ALG_DES,
3486 			 .mode = CIPHER_MODE_CBC,
3487 			 },
3488 	 .auth_info = {
3489 		       .alg = HASH_ALG_SHA512,
3490 		       .mode = HASH_MODE_HMAC,
3491 		       },
3492 	 .auth_first = 0,
3493 	 },
3494 	{
3495 	 .type = CRYPTO_ALG_TYPE_AEAD,
3496 	 .alg.aead = {
3497 		 .base = {
3498 			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3499 			.cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3500 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3501 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3502 		 },
3503 		 .setkey = aead_authenc_setkey,
3504 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3505 		 .maxauthsize = MD5_DIGEST_SIZE,
3506 	 },
3507 	 .cipher_info = {
3508 			 .alg = CIPHER_ALG_3DES,
3509 			 .mode = CIPHER_MODE_CBC,
3510 			 },
3511 	 .auth_info = {
3512 		       .alg = HASH_ALG_MD5,
3513 		       .mode = HASH_MODE_HMAC,
3514 		       },
3515 	 .auth_first = 0,
3516 	 },
3517 	{
3518 	 .type = CRYPTO_ALG_TYPE_AEAD,
3519 	 .alg.aead = {
3520 		 .base = {
3521 			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3522 			.cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3523 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3524 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3525 		 },
3526 		 .setkey = aead_authenc_setkey,
3527 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3528 		 .maxauthsize = SHA1_DIGEST_SIZE,
3529 	 },
3530 	 .cipher_info = {
3531 			 .alg = CIPHER_ALG_3DES,
3532 			 .mode = CIPHER_MODE_CBC,
3533 			 },
3534 	 .auth_info = {
3535 		       .alg = HASH_ALG_SHA1,
3536 		       .mode = HASH_MODE_HMAC,
3537 		       },
3538 	 .auth_first = 0,
3539 	 },
3540 	{
3541 	 .type = CRYPTO_ALG_TYPE_AEAD,
3542 	 .alg.aead = {
3543 		 .base = {
3544 			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3545 			.cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3546 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3547 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3548 		 },
3549 		 .setkey = aead_authenc_setkey,
3550 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3551 		 .maxauthsize = SHA224_DIGEST_SIZE,
3552 	 },
3553 	 .cipher_info = {
3554 			 .alg = CIPHER_ALG_3DES,
3555 			 .mode = CIPHER_MODE_CBC,
3556 			 },
3557 	 .auth_info = {
3558 		       .alg = HASH_ALG_SHA224,
3559 		       .mode = HASH_MODE_HMAC,
3560 		       },
3561 	 .auth_first = 0,
3562 	 },
3563 	{
3564 	 .type = CRYPTO_ALG_TYPE_AEAD,
3565 	 .alg.aead = {
3566 		 .base = {
3567 			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3568 			.cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3569 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3570 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3571 		 },
3572 		 .setkey = aead_authenc_setkey,
3573 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3574 		 .maxauthsize = SHA256_DIGEST_SIZE,
3575 	 },
3576 	 .cipher_info = {
3577 			 .alg = CIPHER_ALG_3DES,
3578 			 .mode = CIPHER_MODE_CBC,
3579 			 },
3580 	 .auth_info = {
3581 		       .alg = HASH_ALG_SHA256,
3582 		       .mode = HASH_MODE_HMAC,
3583 		       },
3584 	 .auth_first = 0,
3585 	 },
3586 	{
3587 	 .type = CRYPTO_ALG_TYPE_AEAD,
3588 	 .alg.aead = {
3589 		 .base = {
3590 			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3591 			.cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3592 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3593 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3594 		 },
3595 		 .setkey = aead_authenc_setkey,
3596 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3597 		 .maxauthsize = SHA384_DIGEST_SIZE,
3598 	 },
3599 	 .cipher_info = {
3600 			 .alg = CIPHER_ALG_3DES,
3601 			 .mode = CIPHER_MODE_CBC,
3602 			 },
3603 	 .auth_info = {
3604 		       .alg = HASH_ALG_SHA384,
3605 		       .mode = HASH_MODE_HMAC,
3606 		       },
3607 	 .auth_first = 0,
3608 	 },
3609 	{
3610 	 .type = CRYPTO_ALG_TYPE_AEAD,
3611 	 .alg.aead = {
3612 		 .base = {
3613 			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3614 			.cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3615 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3616 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3617 		 },
3618 		 .setkey = aead_authenc_setkey,
3619 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3620 		 .maxauthsize = SHA512_DIGEST_SIZE,
3621 	 },
3622 	 .cipher_info = {
3623 			 .alg = CIPHER_ALG_3DES,
3624 			 .mode = CIPHER_MODE_CBC,
3625 			 },
3626 	 .auth_info = {
3627 		       .alg = HASH_ALG_SHA512,
3628 		       .mode = HASH_MODE_HMAC,
3629 		       },
3630 	 .auth_first = 0,
3631 	 },
3632 
3633 /* ABLKCIPHER algorithms. */
3634 	{
3635 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3636 	 .alg.crypto = {
3637 			.cra_name = "ecb(arc4)",
3638 			.cra_driver_name = "ecb-arc4-iproc",
3639 			.cra_blocksize = ARC4_BLOCK_SIZE,
3640 			.cra_ablkcipher = {
3641 					   .min_keysize = ARC4_MIN_KEY_SIZE,
3642 					   .max_keysize = ARC4_MAX_KEY_SIZE,
3643 					   .ivsize = 0,
3644 					}
3645 			},
3646 	 .cipher_info = {
3647 			 .alg = CIPHER_ALG_RC4,
3648 			 .mode = CIPHER_MODE_NONE,
3649 			 },
3650 	 .auth_info = {
3651 		       .alg = HASH_ALG_NONE,
3652 		       .mode = HASH_MODE_NONE,
3653 		       },
3654 	 },
3655 	{
3656 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3657 	 .alg.crypto = {
3658 			.cra_name = "ofb(des)",
3659 			.cra_driver_name = "ofb-des-iproc",
3660 			.cra_blocksize = DES_BLOCK_SIZE,
3661 			.cra_ablkcipher = {
3662 					   .min_keysize = DES_KEY_SIZE,
3663 					   .max_keysize = DES_KEY_SIZE,
3664 					   .ivsize = DES_BLOCK_SIZE,
3665 					}
3666 			},
3667 	 .cipher_info = {
3668 			 .alg = CIPHER_ALG_DES,
3669 			 .mode = CIPHER_MODE_OFB,
3670 			 },
3671 	 .auth_info = {
3672 		       .alg = HASH_ALG_NONE,
3673 		       .mode = HASH_MODE_NONE,
3674 		       },
3675 	 },
3676 	{
3677 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3678 	 .alg.crypto = {
3679 			.cra_name = "cbc(des)",
3680 			.cra_driver_name = "cbc-des-iproc",
3681 			.cra_blocksize = DES_BLOCK_SIZE,
3682 			.cra_ablkcipher = {
3683 					   .min_keysize = DES_KEY_SIZE,
3684 					   .max_keysize = DES_KEY_SIZE,
3685 					   .ivsize = DES_BLOCK_SIZE,
3686 					}
3687 			},
3688 	 .cipher_info = {
3689 			 .alg = CIPHER_ALG_DES,
3690 			 .mode = CIPHER_MODE_CBC,
3691 			 },
3692 	 .auth_info = {
3693 		       .alg = HASH_ALG_NONE,
3694 		       .mode = HASH_MODE_NONE,
3695 		       },
3696 	 },
3697 	{
3698 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3699 	 .alg.crypto = {
3700 			.cra_name = "ecb(des)",
3701 			.cra_driver_name = "ecb-des-iproc",
3702 			.cra_blocksize = DES_BLOCK_SIZE,
3703 			.cra_ablkcipher = {
3704 					   .min_keysize = DES_KEY_SIZE,
3705 					   .max_keysize = DES_KEY_SIZE,
3706 					   .ivsize = 0,
3707 					}
3708 			},
3709 	 .cipher_info = {
3710 			 .alg = CIPHER_ALG_DES,
3711 			 .mode = CIPHER_MODE_ECB,
3712 			 },
3713 	 .auth_info = {
3714 		       .alg = HASH_ALG_NONE,
3715 		       .mode = HASH_MODE_NONE,
3716 		       },
3717 	 },
3718 	{
3719 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3720 	 .alg.crypto = {
3721 			.cra_name = "ofb(des3_ede)",
3722 			.cra_driver_name = "ofb-des3-iproc",
3723 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3724 			.cra_ablkcipher = {
3725 					   .min_keysize = DES3_EDE_KEY_SIZE,
3726 					   .max_keysize = DES3_EDE_KEY_SIZE,
3727 					   .ivsize = DES3_EDE_BLOCK_SIZE,
3728 					}
3729 			},
3730 	 .cipher_info = {
3731 			 .alg = CIPHER_ALG_3DES,
3732 			 .mode = CIPHER_MODE_OFB,
3733 			 },
3734 	 .auth_info = {
3735 		       .alg = HASH_ALG_NONE,
3736 		       .mode = HASH_MODE_NONE,
3737 		       },
3738 	 },
3739 	{
3740 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3741 	 .alg.crypto = {
3742 			.cra_name = "cbc(des3_ede)",
3743 			.cra_driver_name = "cbc-des3-iproc",
3744 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3745 			.cra_ablkcipher = {
3746 					   .min_keysize = DES3_EDE_KEY_SIZE,
3747 					   .max_keysize = DES3_EDE_KEY_SIZE,
3748 					   .ivsize = DES3_EDE_BLOCK_SIZE,
3749 					}
3750 			},
3751 	 .cipher_info = {
3752 			 .alg = CIPHER_ALG_3DES,
3753 			 .mode = CIPHER_MODE_CBC,
3754 			 },
3755 	 .auth_info = {
3756 		       .alg = HASH_ALG_NONE,
3757 		       .mode = HASH_MODE_NONE,
3758 		       },
3759 	 },
3760 	{
3761 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3762 	 .alg.crypto = {
3763 			.cra_name = "ecb(des3_ede)",
3764 			.cra_driver_name = "ecb-des3-iproc",
3765 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3766 			.cra_ablkcipher = {
3767 					   .min_keysize = DES3_EDE_KEY_SIZE,
3768 					   .max_keysize = DES3_EDE_KEY_SIZE,
3769 					   .ivsize = 0,
3770 					}
3771 			},
3772 	 .cipher_info = {
3773 			 .alg = CIPHER_ALG_3DES,
3774 			 .mode = CIPHER_MODE_ECB,
3775 			 },
3776 	 .auth_info = {
3777 		       .alg = HASH_ALG_NONE,
3778 		       .mode = HASH_MODE_NONE,
3779 		       },
3780 	 },
3781 	{
3782 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3783 	 .alg.crypto = {
3784 			.cra_name = "ofb(aes)",
3785 			.cra_driver_name = "ofb-aes-iproc",
3786 			.cra_blocksize = AES_BLOCK_SIZE,
3787 			.cra_ablkcipher = {
3788 					   .min_keysize = AES_MIN_KEY_SIZE,
3789 					   .max_keysize = AES_MAX_KEY_SIZE,
3790 					   .ivsize = AES_BLOCK_SIZE,
3791 					}
3792 			},
3793 	 .cipher_info = {
3794 			 .alg = CIPHER_ALG_AES,
3795 			 .mode = CIPHER_MODE_OFB,
3796 			 },
3797 	 .auth_info = {
3798 		       .alg = HASH_ALG_NONE,
3799 		       .mode = HASH_MODE_NONE,
3800 		       },
3801 	 },
3802 	{
3803 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3804 	 .alg.crypto = {
3805 			.cra_name = "cbc(aes)",
3806 			.cra_driver_name = "cbc-aes-iproc",
3807 			.cra_blocksize = AES_BLOCK_SIZE,
3808 			.cra_ablkcipher = {
3809 					   .min_keysize = AES_MIN_KEY_SIZE,
3810 					   .max_keysize = AES_MAX_KEY_SIZE,
3811 					   .ivsize = AES_BLOCK_SIZE,
3812 					}
3813 			},
3814 	 .cipher_info = {
3815 			 .alg = CIPHER_ALG_AES,
3816 			 .mode = CIPHER_MODE_CBC,
3817 			 },
3818 	 .auth_info = {
3819 		       .alg = HASH_ALG_NONE,
3820 		       .mode = HASH_MODE_NONE,
3821 		       },
3822 	 },
3823 	{
3824 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3825 	 .alg.crypto = {
3826 			.cra_name = "ecb(aes)",
3827 			.cra_driver_name = "ecb-aes-iproc",
3828 			.cra_blocksize = AES_BLOCK_SIZE,
3829 			.cra_ablkcipher = {
3830 					   .min_keysize = AES_MIN_KEY_SIZE,
3831 					   .max_keysize = AES_MAX_KEY_SIZE,
3832 					   .ivsize = 0,
3833 					}
3834 			},
3835 	 .cipher_info = {
3836 			 .alg = CIPHER_ALG_AES,
3837 			 .mode = CIPHER_MODE_ECB,
3838 			 },
3839 	 .auth_info = {
3840 		       .alg = HASH_ALG_NONE,
3841 		       .mode = HASH_MODE_NONE,
3842 		       },
3843 	 },
3844 	{
3845 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3846 	 .alg.crypto = {
3847 			.cra_name = "ctr(aes)",
3848 			.cra_driver_name = "ctr-aes-iproc",
3849 			.cra_blocksize = AES_BLOCK_SIZE,
3850 			.cra_ablkcipher = {
3851 					   .min_keysize = AES_MIN_KEY_SIZE,
3852 					   .max_keysize = AES_MAX_KEY_SIZE,
3853 					   .ivsize = AES_BLOCK_SIZE,
3854 					}
3855 			},
3856 	 .cipher_info = {
3857 			 .alg = CIPHER_ALG_AES,
3858 			 .mode = CIPHER_MODE_CTR,
3859 			 },
3860 	 .auth_info = {
3861 		       .alg = HASH_ALG_NONE,
3862 		       .mode = HASH_MODE_NONE,
3863 		       },
3864 	 },
3865 {
3866 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3867 	 .alg.crypto = {
3868 			.cra_name = "xts(aes)",
3869 			.cra_driver_name = "xts-aes-iproc",
3870 			.cra_blocksize = AES_BLOCK_SIZE,
3871 			.cra_ablkcipher = {
3872 				.min_keysize = 2 * AES_MIN_KEY_SIZE,
3873 				.max_keysize = 2 * AES_MAX_KEY_SIZE,
3874 				.ivsize = AES_BLOCK_SIZE,
3875 				}
3876 			},
3877 	 .cipher_info = {
3878 			 .alg = CIPHER_ALG_AES,
3879 			 .mode = CIPHER_MODE_XTS,
3880 			 },
3881 	 .auth_info = {
3882 		       .alg = HASH_ALG_NONE,
3883 		       .mode = HASH_MODE_NONE,
3884 		       },
3885 	 },
3886 
3887 /* AHASH algorithms. */
3888 	{
3889 	 .type = CRYPTO_ALG_TYPE_AHASH,
3890 	 .alg.hash = {
3891 		      .halg.digestsize = MD5_DIGEST_SIZE,
3892 		      .halg.base = {
3893 				    .cra_name = "md5",
3894 				    .cra_driver_name = "md5-iproc",
3895 				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
3896 				    .cra_flags = CRYPTO_ALG_ASYNC,
3897 				}
3898 		      },
3899 	 .cipher_info = {
3900 			 .alg = CIPHER_ALG_NONE,
3901 			 .mode = CIPHER_MODE_NONE,
3902 			 },
3903 	 .auth_info = {
3904 		       .alg = HASH_ALG_MD5,
3905 		       .mode = HASH_MODE_HASH,
3906 		       },
3907 	 },
3908 	{
3909 	 .type = CRYPTO_ALG_TYPE_AHASH,
3910 	 .alg.hash = {
3911 		      .halg.digestsize = MD5_DIGEST_SIZE,
3912 		      .halg.base = {
3913 				    .cra_name = "hmac(md5)",
3914 				    .cra_driver_name = "hmac-md5-iproc",
3915 				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
3916 				}
3917 		      },
3918 	 .cipher_info = {
3919 			 .alg = CIPHER_ALG_NONE,
3920 			 .mode = CIPHER_MODE_NONE,
3921 			 },
3922 	 .auth_info = {
3923 		       .alg = HASH_ALG_MD5,
3924 		       .mode = HASH_MODE_HMAC,
3925 		       },
3926 	 },
3927 	{.type = CRYPTO_ALG_TYPE_AHASH,
3928 	 .alg.hash = {
3929 		      .halg.digestsize = SHA1_DIGEST_SIZE,
3930 		      .halg.base = {
3931 				    .cra_name = "sha1",
3932 				    .cra_driver_name = "sha1-iproc",
3933 				    .cra_blocksize = SHA1_BLOCK_SIZE,
3934 				}
3935 		      },
3936 	 .cipher_info = {
3937 			 .alg = CIPHER_ALG_NONE,
3938 			 .mode = CIPHER_MODE_NONE,
3939 			 },
3940 	 .auth_info = {
3941 		       .alg = HASH_ALG_SHA1,
3942 		       .mode = HASH_MODE_HASH,
3943 		       },
3944 	 },
3945 	{.type = CRYPTO_ALG_TYPE_AHASH,
3946 	 .alg.hash = {
3947 		      .halg.digestsize = SHA1_DIGEST_SIZE,
3948 		      .halg.base = {
3949 				    .cra_name = "hmac(sha1)",
3950 				    .cra_driver_name = "hmac-sha1-iproc",
3951 				    .cra_blocksize = SHA1_BLOCK_SIZE,
3952 				}
3953 		      },
3954 	 .cipher_info = {
3955 			 .alg = CIPHER_ALG_NONE,
3956 			 .mode = CIPHER_MODE_NONE,
3957 			 },
3958 	 .auth_info = {
3959 		       .alg = HASH_ALG_SHA1,
3960 		       .mode = HASH_MODE_HMAC,
3961 		       },
3962 	 },
3963 	{.type = CRYPTO_ALG_TYPE_AHASH,
3964 	 .alg.hash = {
3965 			.halg.digestsize = SHA224_DIGEST_SIZE,
3966 			.halg.base = {
3967 				    .cra_name = "sha224",
3968 				    .cra_driver_name = "sha224-iproc",
3969 				    .cra_blocksize = SHA224_BLOCK_SIZE,
3970 			}
3971 		      },
3972 	 .cipher_info = {
3973 			 .alg = CIPHER_ALG_NONE,
3974 			 .mode = CIPHER_MODE_NONE,
3975 			 },
3976 	 .auth_info = {
3977 		       .alg = HASH_ALG_SHA224,
3978 		       .mode = HASH_MODE_HASH,
3979 		       },
3980 	 },
3981 	{.type = CRYPTO_ALG_TYPE_AHASH,
3982 	 .alg.hash = {
3983 		      .halg.digestsize = SHA224_DIGEST_SIZE,
3984 		      .halg.base = {
3985 				    .cra_name = "hmac(sha224)",
3986 				    .cra_driver_name = "hmac-sha224-iproc",
3987 				    .cra_blocksize = SHA224_BLOCK_SIZE,
3988 				}
3989 		      },
3990 	 .cipher_info = {
3991 			 .alg = CIPHER_ALG_NONE,
3992 			 .mode = CIPHER_MODE_NONE,
3993 			 },
3994 	 .auth_info = {
3995 		       .alg = HASH_ALG_SHA224,
3996 		       .mode = HASH_MODE_HMAC,
3997 		       },
3998 	 },
3999 	{.type = CRYPTO_ALG_TYPE_AHASH,
4000 	 .alg.hash = {
4001 		      .halg.digestsize = SHA256_DIGEST_SIZE,
4002 		      .halg.base = {
4003 				    .cra_name = "sha256",
4004 				    .cra_driver_name = "sha256-iproc",
4005 				    .cra_blocksize = SHA256_BLOCK_SIZE,
4006 				}
4007 		      },
4008 	 .cipher_info = {
4009 			 .alg = CIPHER_ALG_NONE,
4010 			 .mode = CIPHER_MODE_NONE,
4011 			 },
4012 	 .auth_info = {
4013 		       .alg = HASH_ALG_SHA256,
4014 		       .mode = HASH_MODE_HASH,
4015 		       },
4016 	 },
4017 	{.type = CRYPTO_ALG_TYPE_AHASH,
4018 	 .alg.hash = {
4019 		      .halg.digestsize = SHA256_DIGEST_SIZE,
4020 		      .halg.base = {
4021 				    .cra_name = "hmac(sha256)",
4022 				    .cra_driver_name = "hmac-sha256-iproc",
4023 				    .cra_blocksize = SHA256_BLOCK_SIZE,
4024 				}
4025 		      },
4026 	 .cipher_info = {
4027 			 .alg = CIPHER_ALG_NONE,
4028 			 .mode = CIPHER_MODE_NONE,
4029 			 },
4030 	 .auth_info = {
4031 		       .alg = HASH_ALG_SHA256,
4032 		       .mode = HASH_MODE_HMAC,
4033 		       },
4034 	 },
4035 	{
4036 	.type = CRYPTO_ALG_TYPE_AHASH,
4037 	 .alg.hash = {
4038 		      .halg.digestsize = SHA384_DIGEST_SIZE,
4039 		      .halg.base = {
4040 				    .cra_name = "sha384",
4041 				    .cra_driver_name = "sha384-iproc",
4042 				    .cra_blocksize = SHA384_BLOCK_SIZE,
4043 				}
4044 		      },
4045 	 .cipher_info = {
4046 			 .alg = CIPHER_ALG_NONE,
4047 			 .mode = CIPHER_MODE_NONE,
4048 			 },
4049 	 .auth_info = {
4050 		       .alg = HASH_ALG_SHA384,
4051 		       .mode = HASH_MODE_HASH,
4052 		       },
4053 	 },
4054 	{
4055 	 .type = CRYPTO_ALG_TYPE_AHASH,
4056 	 .alg.hash = {
4057 		      .halg.digestsize = SHA384_DIGEST_SIZE,
4058 		      .halg.base = {
4059 				    .cra_name = "hmac(sha384)",
4060 				    .cra_driver_name = "hmac-sha384-iproc",
4061 				    .cra_blocksize = SHA384_BLOCK_SIZE,
4062 				}
4063 		      },
4064 	 .cipher_info = {
4065 			 .alg = CIPHER_ALG_NONE,
4066 			 .mode = CIPHER_MODE_NONE,
4067 			 },
4068 	 .auth_info = {
4069 		       .alg = HASH_ALG_SHA384,
4070 		       .mode = HASH_MODE_HMAC,
4071 		       },
4072 	 },
4073 	{
4074 	 .type = CRYPTO_ALG_TYPE_AHASH,
4075 	 .alg.hash = {
4076 		      .halg.digestsize = SHA512_DIGEST_SIZE,
4077 		      .halg.base = {
4078 				    .cra_name = "sha512",
4079 				    .cra_driver_name = "sha512-iproc",
4080 				    .cra_blocksize = SHA512_BLOCK_SIZE,
4081 				}
4082 		      },
4083 	 .cipher_info = {
4084 			 .alg = CIPHER_ALG_NONE,
4085 			 .mode = CIPHER_MODE_NONE,
4086 			 },
4087 	 .auth_info = {
4088 		       .alg = HASH_ALG_SHA512,
4089 		       .mode = HASH_MODE_HASH,
4090 		       },
4091 	 },
4092 	{
4093 	 .type = CRYPTO_ALG_TYPE_AHASH,
4094 	 .alg.hash = {
4095 		      .halg.digestsize = SHA512_DIGEST_SIZE,
4096 		      .halg.base = {
4097 				    .cra_name = "hmac(sha512)",
4098 				    .cra_driver_name = "hmac-sha512-iproc",
4099 				    .cra_blocksize = SHA512_BLOCK_SIZE,
4100 				}
4101 		      },
4102 	 .cipher_info = {
4103 			 .alg = CIPHER_ALG_NONE,
4104 			 .mode = CIPHER_MODE_NONE,
4105 			 },
4106 	 .auth_info = {
4107 		       .alg = HASH_ALG_SHA512,
4108 		       .mode = HASH_MODE_HMAC,
4109 		       },
4110 	 },
4111 	{
4112 	 .type = CRYPTO_ALG_TYPE_AHASH,
4113 	 .alg.hash = {
4114 		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
4115 		      .halg.base = {
4116 				    .cra_name = "sha3-224",
4117 				    .cra_driver_name = "sha3-224-iproc",
4118 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4119 				}
4120 		      },
4121 	 .cipher_info = {
4122 			 .alg = CIPHER_ALG_NONE,
4123 			 .mode = CIPHER_MODE_NONE,
4124 			 },
4125 	 .auth_info = {
4126 		       .alg = HASH_ALG_SHA3_224,
4127 		       .mode = HASH_MODE_HASH,
4128 		       },
4129 	 },
4130 	{
4131 	 .type = CRYPTO_ALG_TYPE_AHASH,
4132 	 .alg.hash = {
4133 		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
4134 		      .halg.base = {
4135 				    .cra_name = "hmac(sha3-224)",
4136 				    .cra_driver_name = "hmac-sha3-224-iproc",
4137 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4138 				}
4139 		      },
4140 	 .cipher_info = {
4141 			 .alg = CIPHER_ALG_NONE,
4142 			 .mode = CIPHER_MODE_NONE,
4143 			 },
4144 	 .auth_info = {
4145 		       .alg = HASH_ALG_SHA3_224,
4146 		       .mode = HASH_MODE_HMAC
4147 		       },
4148 	 },
4149 	{
4150 	 .type = CRYPTO_ALG_TYPE_AHASH,
4151 	 .alg.hash = {
4152 		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
4153 		      .halg.base = {
4154 				    .cra_name = "sha3-256",
4155 				    .cra_driver_name = "sha3-256-iproc",
4156 				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
4157 				}
4158 		      },
4159 	 .cipher_info = {
4160 			 .alg = CIPHER_ALG_NONE,
4161 			 .mode = CIPHER_MODE_NONE,
4162 			 },
4163 	 .auth_info = {
4164 		       .alg = HASH_ALG_SHA3_256,
4165 		       .mode = HASH_MODE_HASH,
4166 		       },
4167 	 },
4168 	{
4169 	 .type = CRYPTO_ALG_TYPE_AHASH,
4170 	 .alg.hash = {
4171 		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
4172 		      .halg.base = {
4173 				    .cra_name = "hmac(sha3-256)",
4174 				    .cra_driver_name = "hmac-sha3-256-iproc",
4175 				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
4176 				}
4177 		      },
4178 	 .cipher_info = {
4179 			 .alg = CIPHER_ALG_NONE,
4180 			 .mode = CIPHER_MODE_NONE,
4181 			 },
4182 	 .auth_info = {
4183 		       .alg = HASH_ALG_SHA3_256,
4184 		       .mode = HASH_MODE_HMAC,
4185 		       },
4186 	 },
4187 	{
4188 	 .type = CRYPTO_ALG_TYPE_AHASH,
4189 	 .alg.hash = {
4190 		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
4191 		      .halg.base = {
4192 				    .cra_name = "sha3-384",
4193 				    .cra_driver_name = "sha3-384-iproc",
4194 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4195 				}
4196 		      },
4197 	 .cipher_info = {
4198 			 .alg = CIPHER_ALG_NONE,
4199 			 .mode = CIPHER_MODE_NONE,
4200 			 },
4201 	 .auth_info = {
4202 		       .alg = HASH_ALG_SHA3_384,
4203 		       .mode = HASH_MODE_HASH,
4204 		       },
4205 	 },
4206 	{
4207 	 .type = CRYPTO_ALG_TYPE_AHASH,
4208 	 .alg.hash = {
4209 		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
4210 		      .halg.base = {
4211 				    .cra_name = "hmac(sha3-384)",
4212 				    .cra_driver_name = "hmac-sha3-384-iproc",
4213 				    .cra_blocksize = SHA3_384_BLOCK_SIZE,
4214 				}
4215 		      },
4216 	 .cipher_info = {
4217 			 .alg = CIPHER_ALG_NONE,
4218 			 .mode = CIPHER_MODE_NONE,
4219 			 },
4220 	 .auth_info = {
4221 		       .alg = HASH_ALG_SHA3_384,
4222 		       .mode = HASH_MODE_HMAC,
4223 		       },
4224 	 },
4225 	{
4226 	 .type = CRYPTO_ALG_TYPE_AHASH,
4227 	 .alg.hash = {
4228 		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
4229 		      .halg.base = {
4230 				    .cra_name = "sha3-512",
4231 				    .cra_driver_name = "sha3-512-iproc",
4232 				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
4233 				}
4234 		      },
4235 	 .cipher_info = {
4236 			 .alg = CIPHER_ALG_NONE,
4237 			 .mode = CIPHER_MODE_NONE,
4238 			 },
4239 	 .auth_info = {
4240 		       .alg = HASH_ALG_SHA3_512,
4241 		       .mode = HASH_MODE_HASH,
4242 		       },
4243 	 },
4244 	{
4245 	 .type = CRYPTO_ALG_TYPE_AHASH,
4246 	 .alg.hash = {
4247 		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
4248 		      .halg.base = {
4249 				    .cra_name = "hmac(sha3-512)",
4250 				    .cra_driver_name = "hmac-sha3-512-iproc",
4251 				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
4252 				}
4253 		      },
4254 	 .cipher_info = {
4255 			 .alg = CIPHER_ALG_NONE,
4256 			 .mode = CIPHER_MODE_NONE,
4257 			 },
4258 	 .auth_info = {
4259 		       .alg = HASH_ALG_SHA3_512,
4260 		       .mode = HASH_MODE_HMAC,
4261 		       },
4262 	 },
4263 	{
4264 	 .type = CRYPTO_ALG_TYPE_AHASH,
4265 	 .alg.hash = {
4266 		      .halg.digestsize = AES_BLOCK_SIZE,
4267 		      .halg.base = {
4268 				    .cra_name = "xcbc(aes)",
4269 				    .cra_driver_name = "xcbc-aes-iproc",
4270 				    .cra_blocksize = AES_BLOCK_SIZE,
4271 				}
4272 		      },
4273 	 .cipher_info = {
4274 			 .alg = CIPHER_ALG_NONE,
4275 			 .mode = CIPHER_MODE_NONE,
4276 			 },
4277 	 .auth_info = {
4278 		       .alg = HASH_ALG_AES,
4279 		       .mode = HASH_MODE_XCBC,
4280 		       },
4281 	 },
4282 	{
4283 	 .type = CRYPTO_ALG_TYPE_AHASH,
4284 	 .alg.hash = {
4285 		      .halg.digestsize = AES_BLOCK_SIZE,
4286 		      .halg.base = {
4287 				    .cra_name = "cmac(aes)",
4288 				    .cra_driver_name = "cmac-aes-iproc",
4289 				    .cra_blocksize = AES_BLOCK_SIZE,
4290 				}
4291 		      },
4292 	 .cipher_info = {
4293 			 .alg = CIPHER_ALG_NONE,
4294 			 .mode = CIPHER_MODE_NONE,
4295 			 },
4296 	 .auth_info = {
4297 		       .alg = HASH_ALG_AES,
4298 		       .mode = HASH_MODE_CMAC,
4299 		       },
4300 	 },
4301 };
4302 
4303 static int generic_cra_init(struct crypto_tfm *tfm,
4304 			    struct iproc_alg_s *cipher_alg)
4305 {
4306 	struct spu_hw *spu = &iproc_priv.spu;
4307 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4308 	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4309 
4310 	flow_log("%s()\n", __func__);
4311 
4312 	ctx->alg = cipher_alg;
4313 	ctx->cipher = cipher_alg->cipher_info;
4314 	ctx->auth = cipher_alg->auth_info;
4315 	ctx->auth_first = cipher_alg->auth_first;
4316 	ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4317 						    ctx->cipher.mode,
4318 						    blocksize);
4319 	ctx->fallback_cipher = NULL;
4320 
4321 	ctx->enckeylen = 0;
4322 	ctx->authkeylen = 0;
4323 
4324 	atomic_inc(&iproc_priv.stream_count);
4325 	atomic_inc(&iproc_priv.session_count);
4326 
4327 	return 0;
4328 }
4329 
4330 static int ablkcipher_cra_init(struct crypto_tfm *tfm)
4331 {
4332 	struct crypto_alg *alg = tfm->__crt_alg;
4333 	struct iproc_alg_s *cipher_alg;
4334 
4335 	flow_log("%s()\n", __func__);
4336 
4337 	tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
4338 
4339 	cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
4340 	return generic_cra_init(tfm, cipher_alg);
4341 }
4342 
4343 static int ahash_cra_init(struct crypto_tfm *tfm)
4344 {
4345 	int err;
4346 	struct crypto_alg *alg = tfm->__crt_alg;
4347 	struct iproc_alg_s *cipher_alg;
4348 
4349 	cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4350 				  alg.hash);
4351 
4352 	err = generic_cra_init(tfm, cipher_alg);
4353 	flow_log("%s()\n", __func__);
4354 
4355 	/*
4356 	 * export state size has to be < 512 bytes. So don't include msg bufs
4357 	 * in state size.
4358 	 */
4359 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4360 				 sizeof(struct iproc_reqctx_s));
4361 
4362 	return err;
4363 }
4364 
4365 static int aead_cra_init(struct crypto_aead *aead)
4366 {
4367 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4368 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4369 	struct crypto_alg *alg = tfm->__crt_alg;
4370 	struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4371 	struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4372 						      alg.aead);
4373 
4374 	int err = generic_cra_init(tfm, cipher_alg);
4375 
4376 	flow_log("%s()\n", __func__);
4377 
4378 	crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
4379 	ctx->is_esp = false;
4380 	ctx->salt_len = 0;
4381 	ctx->salt_offset = 0;
4382 
4383 	/* random first IV */
4384 	get_random_bytes(ctx->iv, MAX_IV_SIZE);
4385 	flow_dump("  iv: ", ctx->iv, MAX_IV_SIZE);
4386 
4387 	if (!err) {
4388 		if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
4389 			flow_log("%s() creating fallback cipher\n", __func__);
4390 
4391 			ctx->fallback_cipher =
4392 			    crypto_alloc_aead(alg->cra_name, 0,
4393 					      CRYPTO_ALG_ASYNC |
4394 					      CRYPTO_ALG_NEED_FALLBACK);
4395 			if (IS_ERR(ctx->fallback_cipher)) {
4396 				pr_err("%s() Error: failed to allocate fallback for %s\n",
4397 				       __func__, alg->cra_name);
4398 				return PTR_ERR(ctx->fallback_cipher);
4399 			}
4400 		}
4401 	}
4402 
4403 	return err;
4404 }
4405 
4406 static void generic_cra_exit(struct crypto_tfm *tfm)
4407 {
4408 	atomic_dec(&iproc_priv.session_count);
4409 }
4410 
4411 static void aead_cra_exit(struct crypto_aead *aead)
4412 {
4413 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4414 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4415 
4416 	generic_cra_exit(tfm);
4417 
4418 	if (ctx->fallback_cipher) {
4419 		crypto_free_aead(ctx->fallback_cipher);
4420 		ctx->fallback_cipher = NULL;
4421 	}
4422 }
4423 
4424 /**
4425  * spu_functions_register() - Specify hardware-specific SPU functions based on
4426  * SPU type read from device tree.
4427  * @dev:	device structure
4428  * @spu_type:	SPU hardware generation
4429  * @spu_subtype: SPU hardware version
4430  */
4431 static void spu_functions_register(struct device *dev,
4432 				   enum spu_spu_type spu_type,
4433 				   enum spu_spu_subtype spu_subtype)
4434 {
4435 	struct spu_hw *spu = &iproc_priv.spu;
4436 
4437 	if (spu_type == SPU_TYPE_SPUM) {
4438 		dev_dbg(dev, "Registering SPUM functions");
4439 		spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4440 		spu->spu_payload_length = spum_payload_length;
4441 		spu->spu_response_hdr_len = spum_response_hdr_len;
4442 		spu->spu_hash_pad_len = spum_hash_pad_len;
4443 		spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4444 		spu->spu_assoc_resp_len = spum_assoc_resp_len;
4445 		spu->spu_aead_ivlen = spum_aead_ivlen;
4446 		spu->spu_hash_type = spum_hash_type;
4447 		spu->spu_digest_size = spum_digest_size;
4448 		spu->spu_create_request = spum_create_request;
4449 		spu->spu_cipher_req_init = spum_cipher_req_init;
4450 		spu->spu_cipher_req_finish = spum_cipher_req_finish;
4451 		spu->spu_request_pad = spum_request_pad;
4452 		spu->spu_tx_status_len = spum_tx_status_len;
4453 		spu->spu_rx_status_len = spum_rx_status_len;
4454 		spu->spu_status_process = spum_status_process;
4455 		spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4456 		spu->spu_ccm_update_iv = spum_ccm_update_iv;
4457 		spu->spu_wordalign_padlen = spum_wordalign_padlen;
4458 		if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4459 			spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4460 		else
4461 			spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4462 	} else {
4463 		dev_dbg(dev, "Registering SPU2 functions");
4464 		spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4465 		spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4466 		spu->spu_payload_length = spu2_payload_length;
4467 		spu->spu_response_hdr_len = spu2_response_hdr_len;
4468 		spu->spu_hash_pad_len = spu2_hash_pad_len;
4469 		spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4470 		spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4471 		spu->spu_aead_ivlen = spu2_aead_ivlen;
4472 		spu->spu_hash_type = spu2_hash_type;
4473 		spu->spu_digest_size = spu2_digest_size;
4474 		spu->spu_create_request = spu2_create_request;
4475 		spu->spu_cipher_req_init = spu2_cipher_req_init;
4476 		spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4477 		spu->spu_request_pad = spu2_request_pad;
4478 		spu->spu_tx_status_len = spu2_tx_status_len;
4479 		spu->spu_rx_status_len = spu2_rx_status_len;
4480 		spu->spu_status_process = spu2_status_process;
4481 		spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4482 		spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4483 		spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4484 	}
4485 }
4486 
4487 /**
4488  * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox
4489  * channel for the SPU being probed.
4490  * @dev:  SPU driver device structure
4491  *
4492  * Return: 0 if successful
4493  *	   < 0 otherwise
4494  */
4495 static int spu_mb_init(struct device *dev)
4496 {
4497 	struct mbox_client *mcl = &iproc_priv.mcl;
4498 	int err, i;
4499 
4500 	iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
4501 				  sizeof(struct mbox_chan *), GFP_KERNEL);
4502 	if (!iproc_priv.mbox)
4503 		return -ENOMEM;
4504 
4505 	mcl->dev = dev;
4506 	mcl->tx_block = false;
4507 	mcl->tx_tout = 0;
4508 	mcl->knows_txdone = true;
4509 	mcl->rx_callback = spu_rx_callback;
4510 	mcl->tx_done = NULL;
4511 
4512 	for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4513 		iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
4514 		if (IS_ERR(iproc_priv.mbox[i])) {
4515 			err = (int)PTR_ERR(iproc_priv.mbox[i]);
4516 			dev_err(dev,
4517 				"Mbox channel %d request failed with err %d",
4518 				i, err);
4519 			iproc_priv.mbox[i] = NULL;
4520 			goto free_channels;
4521 		}
4522 	}
4523 
4524 	return 0;
4525 free_channels:
4526 	for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4527 		if (iproc_priv.mbox[i])
4528 			mbox_free_channel(iproc_priv.mbox[i]);
4529 	}
4530 
4531 	return err;
4532 }
4533 
4534 static void spu_mb_release(struct platform_device *pdev)
4535 {
4536 	int i;
4537 
4538 	for (i = 0; i < iproc_priv.spu.num_chan; i++)
4539 		mbox_free_channel(iproc_priv.mbox[i]);
4540 }
4541 
4542 static void spu_counters_init(void)
4543 {
4544 	int i;
4545 	int j;
4546 
4547 	atomic_set(&iproc_priv.session_count, 0);
4548 	atomic_set(&iproc_priv.stream_count, 0);
4549 	atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
4550 	atomic64_set(&iproc_priv.bytes_in, 0);
4551 	atomic64_set(&iproc_priv.bytes_out, 0);
4552 	for (i = 0; i < SPU_OP_NUM; i++) {
4553 		atomic_set(&iproc_priv.op_counts[i], 0);
4554 		atomic_set(&iproc_priv.setkey_cnt[i], 0);
4555 	}
4556 	for (i = 0; i < CIPHER_ALG_LAST; i++)
4557 		for (j = 0; j < CIPHER_MODE_LAST; j++)
4558 			atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4559 
4560 	for (i = 0; i < HASH_ALG_LAST; i++) {
4561 		atomic_set(&iproc_priv.hash_cnt[i], 0);
4562 		atomic_set(&iproc_priv.hmac_cnt[i], 0);
4563 	}
4564 	for (i = 0; i < AEAD_TYPE_LAST; i++)
4565 		atomic_set(&iproc_priv.aead_cnt[i], 0);
4566 
4567 	atomic_set(&iproc_priv.mb_no_spc, 0);
4568 	atomic_set(&iproc_priv.mb_send_fail, 0);
4569 	atomic_set(&iproc_priv.bad_icv, 0);
4570 }
4571 
4572 static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
4573 {
4574 	struct spu_hw *spu = &iproc_priv.spu;
4575 	struct crypto_alg *crypto = &driver_alg->alg.crypto;
4576 	int err;
4577 
4578 	/* SPU2 does not support RC4 */
4579 	if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
4580 	    (spu->spu_type == SPU_TYPE_SPU2))
4581 		return 0;
4582 
4583 	crypto->cra_module = THIS_MODULE;
4584 	crypto->cra_priority = cipher_pri;
4585 	crypto->cra_alignmask = 0;
4586 	crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
4587 
4588 	crypto->cra_init = ablkcipher_cra_init;
4589 	crypto->cra_exit = generic_cra_exit;
4590 	crypto->cra_type = &crypto_ablkcipher_type;
4591 	crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4592 				CRYPTO_ALG_KERN_DRIVER_ONLY;
4593 
4594 	crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
4595 	crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
4596 	crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
4597 
4598 	err = crypto_register_alg(crypto);
4599 	/* Mark alg as having been registered, if successful */
4600 	if (err == 0)
4601 		driver_alg->registered = true;
4602 	pr_debug("  registered ablkcipher %s\n", crypto->cra_driver_name);
4603 	return err;
4604 }
4605 
4606 static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4607 {
4608 	struct spu_hw *spu = &iproc_priv.spu;
4609 	struct ahash_alg *hash = &driver_alg->alg.hash;
4610 	int err;
4611 
4612 	/* AES-XCBC is the only AES hash type currently supported on SPU-M */
4613 	if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4614 	    (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4615 	    (spu->spu_type == SPU_TYPE_SPUM))
4616 		return 0;
4617 
4618 	/* SHA3 algorithm variants are not registered for SPU-M or SPU2. */
4619 	if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4620 	    (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4621 		return 0;
4622 
4623 	hash->halg.base.cra_module = THIS_MODULE;
4624 	hash->halg.base.cra_priority = hash_pri;
4625 	hash->halg.base.cra_alignmask = 0;
4626 	hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4627 	hash->halg.base.cra_init = ahash_cra_init;
4628 	hash->halg.base.cra_exit = generic_cra_exit;
4629 	hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4630 	hash->halg.statesize = sizeof(struct spu_hash_export_s);
4631 
4632 	if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4633 		hash->init = ahash_init;
4634 		hash->update = ahash_update;
4635 		hash->final = ahash_final;
4636 		hash->finup = ahash_finup;
4637 		hash->digest = ahash_digest;
4638 		if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4639 		    ((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
4640 		    (driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
4641 			hash->setkey = ahash_setkey;
4642 		}
4643 	} else {
4644 		hash->setkey = ahash_hmac_setkey;
4645 		hash->init = ahash_hmac_init;
4646 		hash->update = ahash_hmac_update;
4647 		hash->final = ahash_hmac_final;
4648 		hash->finup = ahash_hmac_finup;
4649 		hash->digest = ahash_hmac_digest;
4650 	}
4651 	hash->export = ahash_export;
4652 	hash->import = ahash_import;
4653 
4654 	err = crypto_register_ahash(hash);
4655 	/* Mark alg as having been registered, if successful */
4656 	if (err == 0)
4657 		driver_alg->registered = true;
4658 	pr_debug("  registered ahash %s\n",
4659 		 hash->halg.base.cra_driver_name);
4660 	return err;
4661 }
4662 
4663 static int spu_register_aead(struct iproc_alg_s *driver_alg)
4664 {
4665 	struct aead_alg *aead = &driver_alg->alg.aead;
4666 	int err;
4667 
4668 	aead->base.cra_module = THIS_MODULE;
4669 	aead->base.cra_priority = aead_pri;
4670 	aead->base.cra_alignmask = 0;
4671 	aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4672 
4673 	aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
4674 	/* setkey set in alg initialization */
4675 	aead->setauthsize = aead_setauthsize;
4676 	aead->encrypt = aead_encrypt;
4677 	aead->decrypt = aead_decrypt;
4678 	aead->init = aead_cra_init;
4679 	aead->exit = aead_cra_exit;
4680 
4681 	err = crypto_register_aead(aead);
4682 	/* Mark alg as having been registered, if successful */
4683 	if (err == 0)
4684 		driver_alg->registered = true;
4685 	pr_debug("  registered aead %s\n", aead->base.cra_driver_name);
4686 	return err;
4687 }
4688 
4689 /* register crypto algorithms the device supports */
4690 static int spu_algs_register(struct device *dev)
4691 {
4692 	int i, j;
4693 	int err;
4694 
4695 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4696 		switch (driver_algs[i].type) {
4697 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4698 			err = spu_register_ablkcipher(&driver_algs[i]);
4699 			break;
4700 		case CRYPTO_ALG_TYPE_AHASH:
4701 			err = spu_register_ahash(&driver_algs[i]);
4702 			break;
4703 		case CRYPTO_ALG_TYPE_AEAD:
4704 			err = spu_register_aead(&driver_algs[i]);
4705 			break;
4706 		default:
4707 			dev_err(dev,
4708 				"iproc-crypto: unknown alg type: %d",
4709 				driver_algs[i].type);
4710 			err = -EINVAL;
4711 		}
4712 
4713 		if (err) {
4714 			dev_err(dev, "alg registration failed with error %d\n",
4715 				err);
4716 			goto err_algs;
4717 		}
4718 	}
4719 
4720 	return 0;
4721 
4722 err_algs:
4723 	for (j = 0; j < i; j++) {
4724 		/* Skip any algorithm not registered */
4725 		if (!driver_algs[j].registered)
4726 			continue;
4727 		switch (driver_algs[j].type) {
4728 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4729 			crypto_unregister_alg(&driver_algs[j].alg.crypto);
4730 			driver_algs[j].registered = false;
4731 			break;
4732 		case CRYPTO_ALG_TYPE_AHASH:
4733 			crypto_unregister_ahash(&driver_algs[j].alg.hash);
4734 			driver_algs[j].registered = false;
4735 			break;
4736 		case CRYPTO_ALG_TYPE_AEAD:
4737 			crypto_unregister_aead(&driver_algs[j].alg.aead);
4738 			driver_algs[j].registered = false;
4739 			break;
4740 		}
4741 	}
4742 	return err;
4743 }
4744 
4745 /* ==================== Kernel Platform API ==================== */
4746 
4747 static struct spu_type_subtype spum_ns2_types = {
4748 	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4749 };
4750 
4751 static struct spu_type_subtype spum_nsp_types = {
4752 	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4753 };
4754 
4755 static struct spu_type_subtype spu2_types = {
4756 	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4757 };
4758 
4759 static struct spu_type_subtype spu2_v2_types = {
4760 	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4761 };
4762 
4763 static const struct of_device_id bcm_spu_dt_ids[] = {
4764 	{
4765 		.compatible = "brcm,spum-crypto",
4766 		.data = &spum_ns2_types,
4767 	},
4768 	{
4769 		.compatible = "brcm,spum-nsp-crypto",
4770 		.data = &spum_nsp_types,
4771 	},
4772 	{
4773 		.compatible = "brcm,spu2-crypto",
4774 		.data = &spu2_types,
4775 	},
4776 	{
4777 		.compatible = "brcm,spu2-v2-crypto",
4778 		.data = &spu2_v2_types,
4779 	},
4780 	{ /* sentinel */ }
4781 };
4782 
4783 MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4784 
4785 static int spu_dt_read(struct platform_device *pdev)
4786 {
4787 	struct device *dev = &pdev->dev;
4788 	struct spu_hw *spu = &iproc_priv.spu;
4789 	struct resource *spu_ctrl_regs;
4790 	const struct spu_type_subtype *matched_spu_type;
4791 	struct device_node *dn = pdev->dev.of_node;
4792 	int err, i;
4793 
4794 	/* Count number of mailbox channels */
4795 	spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
4796 
4797 	matched_spu_type = of_device_get_match_data(dev);
4798 	if (!matched_spu_type) {
4799 		dev_err(&pdev->dev, "Failed to match device\n");
4800 		return -ENODEV;
4801 	}
4802 
4803 	spu->spu_type = matched_spu_type->type;
4804 	spu->spu_subtype = matched_spu_type->subtype;
4805 
4806 	i = 0;
4807 	for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
4808 		platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
4809 
4810 		spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
4811 		if (IS_ERR(spu->reg_vbase[i])) {
4812 			err = PTR_ERR(spu->reg_vbase[i]);
4813 			dev_err(&pdev->dev, "Failed to map registers: %d\n",
4814 				err);
4815 			spu->reg_vbase[i] = NULL;
4816 			return err;
4817 		}
4818 	}
4819 	spu->num_spu = i;
4820 	dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
4821 
4822 	return 0;
4823 }
4824 
4825 static int bcm_spu_probe(struct platform_device *pdev)
4826 {
4827 	struct device *dev = &pdev->dev;
4828 	struct spu_hw *spu = &iproc_priv.spu;
4829 	int err = 0;
4830 
4831 	iproc_priv.pdev  = pdev;
4832 	platform_set_drvdata(iproc_priv.pdev,
4833 			     &iproc_priv);
4834 
4835 	err = spu_dt_read(pdev);
4836 	if (err < 0)
4837 		goto failure;
4838 
4839 	err = spu_mb_init(&pdev->dev);
4840 	if (err < 0)
4841 		goto failure;
4842 
4843 	if (spu->spu_type == SPU_TYPE_SPUM)
4844 		iproc_priv.bcm_hdr_len = 8;
4845 	else if (spu->spu_type == SPU_TYPE_SPU2)
4846 		iproc_priv.bcm_hdr_len = 0;
4847 
4848 	spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
4849 
4850 	spu_counters_init();
4851 
4852 	spu_setup_debugfs();
4853 
4854 	err = spu_algs_register(dev);
4855 	if (err < 0)
4856 		goto fail_reg;
4857 
4858 	return 0;
4859 
4860 fail_reg:
4861 	spu_free_debugfs();
4862 failure:
4863 	spu_mb_release(pdev);
4864 	dev_err(dev, "%s failed with error %d.\n", __func__, err);
4865 
4866 	return err;
4867 }
4868 
4869 static int bcm_spu_remove(struct platform_device *pdev)
4870 {
4871 	int i;
4872 	struct device *dev = &pdev->dev;
4873 	char *cdn;
4874 
4875 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4876 		/*
4877 		 * Not all algorithms were registered, depending on whether
4878 		 * hardware is SPU or SPU2.  So here we make sure to skip
4879 		 * those algorithms that were not previously registered.
4880 		 */
4881 		if (!driver_algs[i].registered)
4882 			continue;
4883 
4884 		switch (driver_algs[i].type) {
4885 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4886 			crypto_unregister_alg(&driver_algs[i].alg.crypto);
4887 			dev_dbg(dev, "  unregistered cipher %s\n",
4888 				driver_algs[i].alg.crypto.cra_driver_name);
4889 			driver_algs[i].registered = false;
4890 			break;
4891 		case CRYPTO_ALG_TYPE_AHASH:
4892 			crypto_unregister_ahash(&driver_algs[i].alg.hash);
4893 			cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4894 			dev_dbg(dev, "  unregistered hash %s\n", cdn);
4895 			driver_algs[i].registered = false;
4896 			break;
4897 		case CRYPTO_ALG_TYPE_AEAD:
4898 			crypto_unregister_aead(&driver_algs[i].alg.aead);
4899 			dev_dbg(dev, "  unregistered aead %s\n",
4900 				driver_algs[i].alg.aead.base.cra_driver_name);
4901 			driver_algs[i].registered = false;
4902 			break;
4903 		}
4904 	}
4905 	spu_free_debugfs();
4906 	spu_mb_release(pdev);
4907 	return 0;
4908 }
4909 
4910 /* ===== Kernel Module API ===== */
4911 
4912 static struct platform_driver bcm_spu_pdriver = {
4913 	.driver = {
4914 		   .name = "brcm-spu-crypto",
4915 		   .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4916 		   },
4917 	.probe = bcm_spu_probe,
4918 	.remove = bcm_spu_remove,
4919 };
4920 module_platform_driver(bcm_spu_pdriver);
4921 
4922 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4923 MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4924 MODULE_LICENSE("GPL v2");
4925