xref: /linux/drivers/crypto/chelsio/chcr_algo.c (revision 9ffd93852fc66f2bdf61a574db107f106e9fb2ff)
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/ctr.h>
59 #include <crypto/gf128mul.h>
60 #include <crypto/internal/aead.h>
61 #include <crypto/null.h>
62 #include <crypto/internal/skcipher.h>
63 #include <crypto/aead.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/internal/hash.h>
66 
67 #include "t4fw_api.h"
68 #include "t4_msg.h"
69 #include "chcr_core.h"
70 #include "chcr_algo.h"
71 #include "chcr_crypto.h"
72 
73 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
74 {
75 	return ctx->crypto_ctx->aeadctx;
76 }
77 
78 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
79 {
80 	return ctx->crypto_ctx->ablkctx;
81 }
82 
83 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
84 {
85 	return ctx->crypto_ctx->hmacctx;
86 }
87 
88 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
89 {
90 	return gctx->ctx->gcm;
91 }
92 
93 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
94 {
95 	return gctx->ctx->authenc;
96 }
97 
98 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
99 {
100 	return ctx->dev->u_ctx;
101 }
102 
103 static inline int is_ofld_imm(const struct sk_buff *skb)
104 {
105 	return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
106 }
107 
108 /*
109  *	sgl_len - calculates the size of an SGL of the given capacity
110  *	@n: the number of SGL entries
111  *	Calculates the number of flits needed for a scatter/gather list that
112  *	can hold the given number of entries.
113  */
114 static inline unsigned int sgl_len(unsigned int n)
115 {
116 	n--;
117 	return (3 * n) / 2 + (n & 1) + 2;
118 }
119 
120 static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
121 {
122 	u8 temp[SHA512_DIGEST_SIZE];
123 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
124 	int authsize = crypto_aead_authsize(tfm);
125 	struct cpl_fw6_pld *fw6_pld;
126 	int cmp = 0;
127 
128 	fw6_pld = (struct cpl_fw6_pld *)input;
129 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
130 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
131 		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
132 	} else {
133 
134 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
135 				authsize, req->assoclen +
136 				req->cryptlen - authsize);
137 		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
138 	}
139 	if (cmp)
140 		*err = -EBADMSG;
141 	else
142 		*err = 0;
143 }
144 
145 /*
146  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
147  *	@req: crypto request
148  */
149 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
150 			 int err)
151 {
152 	struct crypto_tfm *tfm = req->tfm;
153 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
154 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
155 	struct chcr_req_ctx ctx_req;
156 	unsigned int digestsize, updated_digestsize;
157 	struct adapter *adap = padap(ctx->dev);
158 
159 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
160 	case CRYPTO_ALG_TYPE_AEAD:
161 		ctx_req.req.aead_req = aead_request_cast(req);
162 		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
163 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
164 			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
165 		if (ctx_req.ctx.reqctx->skb) {
166 			kfree_skb(ctx_req.ctx.reqctx->skb);
167 			ctx_req.ctx.reqctx->skb = NULL;
168 		}
169 		free_new_sg(ctx_req.ctx.reqctx->newdstsg);
170 		ctx_req.ctx.reqctx->newdstsg = NULL;
171 		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
172 			chcr_verify_tag(ctx_req.req.aead_req, input,
173 					&err);
174 			ctx_req.ctx.reqctx->verify = VERIFY_HW;
175 		}
176 		ctx_req.req.aead_req->base.complete(req, err);
177 		break;
178 
179 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
180 		 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
181 					       input, err);
182 		break;
183 
184 	case CRYPTO_ALG_TYPE_AHASH:
185 		ctx_req.req.ahash_req = ahash_request_cast(req);
186 		ctx_req.ctx.ahash_ctx =
187 			ahash_request_ctx(ctx_req.req.ahash_req);
188 		digestsize =
189 			crypto_ahash_digestsize(crypto_ahash_reqtfm(
190 							ctx_req.req.ahash_req));
191 		updated_digestsize = digestsize;
192 		if (digestsize == SHA224_DIGEST_SIZE)
193 			updated_digestsize = SHA256_DIGEST_SIZE;
194 		else if (digestsize == SHA384_DIGEST_SIZE)
195 			updated_digestsize = SHA512_DIGEST_SIZE;
196 		if (ctx_req.ctx.ahash_ctx->skb) {
197 			kfree_skb(ctx_req.ctx.ahash_ctx->skb);
198 			ctx_req.ctx.ahash_ctx->skb = NULL;
199 		}
200 		if (ctx_req.ctx.ahash_ctx->result == 1) {
201 			ctx_req.ctx.ahash_ctx->result = 0;
202 			memcpy(ctx_req.req.ahash_req->result, input +
203 			       sizeof(struct cpl_fw6_pld),
204 			       digestsize);
205 		} else {
206 			memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
207 			       sizeof(struct cpl_fw6_pld),
208 			       updated_digestsize);
209 		}
210 		ctx_req.req.ahash_req->base.complete(req, err);
211 		break;
212 	}
213 	atomic_inc(&adap->chcr_stats.complete);
214 	return err;
215 }
216 
217 /*
218  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
219  *	@skb: the packet
220  *	Returns the number of flits needed for the given offload packet.
221  *	These packets are already fully constructed and no additional headers
222  *	will be added.
223  */
224 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
225 {
226 	unsigned int flits, cnt;
227 
228 	if (is_ofld_imm(skb))
229 		return DIV_ROUND_UP(skb->len, 8);
230 
231 	flits = skb_transport_offset(skb) / 8;   /* headers */
232 	cnt = skb_shinfo(skb)->nr_frags;
233 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
234 		cnt++;
235 	return flits + sgl_len(cnt);
236 }
237 
238 static inline void get_aes_decrypt_key(unsigned char *dec_key,
239 				       const unsigned char *key,
240 				       unsigned int keylength)
241 {
242 	u32 temp;
243 	u32 w_ring[MAX_NK];
244 	int i, j, k;
245 	u8  nr, nk;
246 
247 	switch (keylength) {
248 	case AES_KEYLENGTH_128BIT:
249 		nk = KEYLENGTH_4BYTES;
250 		nr = NUMBER_OF_ROUNDS_10;
251 		break;
252 	case AES_KEYLENGTH_192BIT:
253 		nk = KEYLENGTH_6BYTES;
254 		nr = NUMBER_OF_ROUNDS_12;
255 		break;
256 	case AES_KEYLENGTH_256BIT:
257 		nk = KEYLENGTH_8BYTES;
258 		nr = NUMBER_OF_ROUNDS_14;
259 		break;
260 	default:
261 		return;
262 	}
263 	for (i = 0; i < nk; i++)
264 		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
265 
266 	i = 0;
267 	temp = w_ring[nk - 1];
268 	while (i + nk < (nr + 1) * 4) {
269 		if (!(i % nk)) {
270 			/* RotWord(temp) */
271 			temp = (temp << 8) | (temp >> 24);
272 			temp = aes_ks_subword(temp);
273 			temp ^= round_constant[i / nk];
274 		} else if (nk == 8 && (i % 4 == 0)) {
275 			temp = aes_ks_subword(temp);
276 		}
277 		w_ring[i % nk] ^= temp;
278 		temp = w_ring[i % nk];
279 		i++;
280 	}
281 	i--;
282 	for (k = 0, j = i % nk; k < nk; k++) {
283 		*((u32 *)dec_key + k) = htonl(w_ring[j]);
284 		j--;
285 		if (j < 0)
286 			j += nk;
287 	}
288 }
289 
290 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
291 {
292 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
293 
294 	switch (ds) {
295 	case SHA1_DIGEST_SIZE:
296 		base_hash = crypto_alloc_shash("sha1", 0, 0);
297 		break;
298 	case SHA224_DIGEST_SIZE:
299 		base_hash = crypto_alloc_shash("sha224", 0, 0);
300 		break;
301 	case SHA256_DIGEST_SIZE:
302 		base_hash = crypto_alloc_shash("sha256", 0, 0);
303 		break;
304 	case SHA384_DIGEST_SIZE:
305 		base_hash = crypto_alloc_shash("sha384", 0, 0);
306 		break;
307 	case SHA512_DIGEST_SIZE:
308 		base_hash = crypto_alloc_shash("sha512", 0, 0);
309 		break;
310 	}
311 
312 	return base_hash;
313 }
314 
315 static int chcr_compute_partial_hash(struct shash_desc *desc,
316 				     char *iopad, char *result_hash,
317 				     int digest_size)
318 {
319 	struct sha1_state sha1_st;
320 	struct sha256_state sha256_st;
321 	struct sha512_state sha512_st;
322 	int error;
323 
324 	if (digest_size == SHA1_DIGEST_SIZE) {
325 		error = crypto_shash_init(desc) ?:
326 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
327 			crypto_shash_export(desc, (void *)&sha1_st);
328 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
329 	} else if (digest_size == SHA224_DIGEST_SIZE) {
330 		error = crypto_shash_init(desc) ?:
331 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
332 			crypto_shash_export(desc, (void *)&sha256_st);
333 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
334 
335 	} else if (digest_size == SHA256_DIGEST_SIZE) {
336 		error = crypto_shash_init(desc) ?:
337 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
338 			crypto_shash_export(desc, (void *)&sha256_st);
339 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
340 
341 	} else if (digest_size == SHA384_DIGEST_SIZE) {
342 		error = crypto_shash_init(desc) ?:
343 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
344 			crypto_shash_export(desc, (void *)&sha512_st);
345 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
346 
347 	} else if (digest_size == SHA512_DIGEST_SIZE) {
348 		error = crypto_shash_init(desc) ?:
349 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
350 			crypto_shash_export(desc, (void *)&sha512_st);
351 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
352 	} else {
353 		error = -EINVAL;
354 		pr_err("Unknown digest size %d\n", digest_size);
355 	}
356 	return error;
357 }
358 
359 static void chcr_change_order(char *buf, int ds)
360 {
361 	int i;
362 
363 	if (ds == SHA512_DIGEST_SIZE) {
364 		for (i = 0; i < (ds / sizeof(u64)); i++)
365 			*((__be64 *)buf + i) =
366 				cpu_to_be64(*((u64 *)buf + i));
367 	} else {
368 		for (i = 0; i < (ds / sizeof(u32)); i++)
369 			*((__be32 *)buf + i) =
370 				cpu_to_be32(*((u32 *)buf + i));
371 	}
372 }
373 
374 static inline int is_hmac(struct crypto_tfm *tfm)
375 {
376 	struct crypto_alg *alg = tfm->__crt_alg;
377 	struct chcr_alg_template *chcr_crypto_alg =
378 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
379 			     alg.hash);
380 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
381 		return 1;
382 	return 0;
383 }
384 
385 static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
386 			   struct scatterlist *sg,
387 			   struct phys_sge_parm *sg_param)
388 {
389 	struct phys_sge_pairs *to;
390 	unsigned int len = 0, left_size = sg_param->obsize;
391 	unsigned int nents = sg_param->nents, i, j = 0;
392 
393 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
394 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
395 	phys_cpl->pcirlxorder_to_noofsgentr =
396 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
397 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
398 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
399 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
400 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
401 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
402 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
403 	phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
404 	phys_cpl->rss_hdr_int.hash_val = 0;
405 	to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
406 				       sizeof(struct cpl_rx_phys_dsgl));
407 	for (i = 0; nents && left_size; to++) {
408 		for (j = 0; j < 8 && nents && left_size; j++, nents--) {
409 			len = min(left_size, sg_dma_len(sg));
410 			to->len[j] = htons(len);
411 			to->addr[j] = cpu_to_be64(sg_dma_address(sg));
412 			left_size -= len;
413 			sg = sg_next(sg);
414 		}
415 	}
416 }
417 
418 static inline int map_writesg_phys_cpl(struct device *dev,
419 					struct cpl_rx_phys_dsgl *phys_cpl,
420 					struct scatterlist *sg,
421 					struct phys_sge_parm *sg_param)
422 {
423 	if (!sg || !sg_param->nents)
424 		return -EINVAL;
425 
426 	sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
427 	if (sg_param->nents == 0) {
428 		pr_err("CHCR : DMA mapping failed\n");
429 		return -EINVAL;
430 	}
431 	write_phys_cpl(phys_cpl, sg, sg_param);
432 	return 0;
433 }
434 
435 static inline int get_aead_subtype(struct crypto_aead *aead)
436 {
437 	struct aead_alg *alg = crypto_aead_alg(aead);
438 	struct chcr_alg_template *chcr_crypto_alg =
439 		container_of(alg, struct chcr_alg_template, alg.aead);
440 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
441 }
442 
443 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
444 {
445 	struct crypto_alg *alg = tfm->__crt_alg;
446 	struct chcr_alg_template *chcr_crypto_alg =
447 		container_of(alg, struct chcr_alg_template, alg.crypto);
448 
449 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
450 }
451 
452 static inline void write_buffer_to_skb(struct sk_buff *skb,
453 					unsigned int *frags,
454 					char *bfr,
455 					u8 bfr_len)
456 {
457 	skb->len += bfr_len;
458 	skb->data_len += bfr_len;
459 	skb->truesize += bfr_len;
460 	get_page(virt_to_page(bfr));
461 	skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
462 			   offset_in_page(bfr), bfr_len);
463 	(*frags)++;
464 }
465 
466 
467 static inline void
468 write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
469 			struct scatterlist *sg, unsigned int count)
470 {
471 	struct page *spage;
472 	unsigned int page_len;
473 
474 	skb->len += count;
475 	skb->data_len += count;
476 	skb->truesize += count;
477 
478 	while (count > 0) {
479 		if (!sg || (!(sg->length)))
480 			break;
481 		spage = sg_page(sg);
482 		get_page(spage);
483 		page_len = min(sg->length, count);
484 		skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
485 		(*frags)++;
486 		count -= page_len;
487 		sg = sg_next(sg);
488 	}
489 }
490 
491 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
492 {
493 	struct adapter *adap = netdev2adap(dev);
494 	struct sge_uld_txq_info *txq_info =
495 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
496 	struct sge_uld_txq *txq;
497 	int ret = 0;
498 
499 	local_bh_disable();
500 	txq = &txq_info->uldtxq[idx];
501 	spin_lock(&txq->sendq.lock);
502 	if (txq->full)
503 		ret = -1;
504 	spin_unlock(&txq->sendq.lock);
505 	local_bh_enable();
506 	return ret;
507 }
508 
509 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
510 			       struct _key_ctx *key_ctx)
511 {
512 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
513 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
514 	} else {
515 		memcpy(key_ctx->key,
516 		       ablkctx->key + (ablkctx->enckey_len >> 1),
517 		       ablkctx->enckey_len >> 1);
518 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
519 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
520 	}
521 	return 0;
522 }
523 static int chcr_sg_ent_in_wr(struct scatterlist *src,
524 			     struct scatterlist *dst,
525 			     unsigned int minsg,
526 			     unsigned int space,
527 			     short int *sent,
528 			     short int *dent)
529 {
530 	int srclen = 0, dstlen = 0;
531 	int srcsg = minsg, dstsg = 0;
532 
533 	*sent = 0;
534 	*dent = 0;
535 	while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) &&
536 	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
537 		srclen += src->length;
538 		srcsg++;
539 		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
540 		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
541 			if (srclen <= dstlen)
542 				break;
543 			dstlen += dst->length;
544 			dst = sg_next(dst);
545 			dstsg++;
546 		}
547 		src = sg_next(src);
548 	}
549 	*sent = srcsg - minsg;
550 	*dent = dstsg;
551 	return min(srclen, dstlen);
552 }
553 
554 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
555 				u32 flags,
556 				struct scatterlist *src,
557 				struct scatterlist *dst,
558 				unsigned int nbytes,
559 				u8 *iv,
560 				unsigned short op_type)
561 {
562 	int err;
563 
564 	SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
565 	skcipher_request_set_tfm(subreq, cipher);
566 	skcipher_request_set_callback(subreq, flags, NULL, NULL);
567 	skcipher_request_set_crypt(subreq, src, dst,
568 				   nbytes, iv);
569 
570 	err = op_type ? crypto_skcipher_decrypt(subreq) :
571 		crypto_skcipher_encrypt(subreq);
572 	skcipher_request_zero(subreq);
573 
574 	return err;
575 
576 }
577 static inline void create_wreq(struct chcr_context *ctx,
578 			       struct chcr_wr *chcr_req,
579 			       void *req, struct sk_buff *skb,
580 			       int kctx_len, int hash_sz,
581 			       int is_iv,
582 			       unsigned int sc_len,
583 			       unsigned int lcb)
584 {
585 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
586 	int iv_loc = IV_DSGL;
587 	int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
588 	unsigned int immdatalen = 0, nr_frags = 0;
589 
590 	if (is_ofld_imm(skb)) {
591 		immdatalen = skb->data_len;
592 		iv_loc = IV_IMMEDIATE;
593 	} else {
594 		nr_frags = skb_shinfo(skb)->nr_frags;
595 	}
596 
597 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
598 				((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
599 	chcr_req->wreq.pld_size_hash_size =
600 		htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
601 		      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
602 	chcr_req->wreq.len16_pkd =
603 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
604 				    (calc_tx_flits_ofld(skb) * 8), 16)));
605 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
606 	chcr_req->wreq.rx_chid_to_rx_q_id =
607 		FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
608 				is_iv ? iv_loc : IV_NOP, !!lcb,
609 				ctx->tx_qidx);
610 
611 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
612 						       qid);
613 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
614 					16) - ((sizeof(chcr_req->wreq)) >> 4)));
615 
616 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
617 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
618 				   sizeof(chcr_req->key_ctx) +
619 				   kctx_len + sc_len + immdatalen);
620 }
621 
622 /**
623  *	create_cipher_wr - form the WR for cipher operations
624  *	@req: cipher req.
625  *	@ctx: crypto driver context of the request.
626  *	@qid: ingress qid where response of this WR should be received.
627  *	@op_type:	encryption or decryption
628  */
629 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
630 {
631 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
632 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
633 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
634 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
635 	struct sk_buff *skb = NULL;
636 	struct chcr_wr *chcr_req;
637 	struct cpl_rx_phys_dsgl *phys_cpl;
638 	struct chcr_blkcipher_req_ctx *reqctx =
639 		ablkcipher_request_ctx(wrparam->req);
640 	struct phys_sge_parm sg_param;
641 	unsigned int frags = 0, transhdr_len, phys_dsgl;
642 	int error;
643 	unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
644 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
645 			GFP_KERNEL : GFP_ATOMIC;
646 	struct adapter *adap = padap(ctx->dev);
647 
648 	phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
649 
650 	kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
651 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
652 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
653 	if (!skb) {
654 		error = -ENOMEM;
655 		goto err;
656 	}
657 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
658 	chcr_req = __skb_put_zero(skb, transhdr_len);
659 	chcr_req->sec_cpl.op_ivinsrtofst =
660 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
661 
662 	chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes);
663 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
664 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
665 
666 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
667 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
668 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
669 							 ablkctx->ciph_mode,
670 							 0, 0, ivsize >> 1);
671 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
672 							  0, 1, phys_dsgl);
673 
674 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
675 	if ((reqctx->op == CHCR_DECRYPT_OP) &&
676 	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
677 	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
678 	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
679 	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
680 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
681 	} else {
682 		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
683 		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
684 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
685 			       ablkctx->enckey_len);
686 		} else {
687 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
688 			       (ablkctx->enckey_len >> 1),
689 			       ablkctx->enckey_len >> 1);
690 			memcpy(chcr_req->key_ctx.key +
691 			       (ablkctx->enckey_len >> 1),
692 			       ablkctx->key,
693 			       ablkctx->enckey_len >> 1);
694 		}
695 	}
696 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
697 	sg_param.nents = reqctx->dst_nents;
698 	sg_param.obsize =  wrparam->bytes;
699 	sg_param.qid = wrparam->qid;
700 	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
701 				       reqctx->dst, &sg_param);
702 	if (error)
703 		goto map_fail1;
704 
705 	skb_set_transport_header(skb, transhdr_len);
706 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
707 	write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes);
708 	atomic_inc(&adap->chcr_stats.cipher_rqst);
709 	create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1,
710 			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
711 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
712 	reqctx->skb = skb;
713 	skb_get(skb);
714 	return skb;
715 map_fail1:
716 	kfree_skb(skb);
717 err:
718 	return ERR_PTR(error);
719 }
720 
721 static inline int chcr_keyctx_ck_size(unsigned int keylen)
722 {
723 	int ck_size = 0;
724 
725 	if (keylen == AES_KEYSIZE_128)
726 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
727 	else if (keylen == AES_KEYSIZE_192)
728 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
729 	else if (keylen == AES_KEYSIZE_256)
730 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
731 	else
732 		ck_size = 0;
733 
734 	return ck_size;
735 }
736 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
737 				       const u8 *key,
738 				       unsigned int keylen)
739 {
740 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
741 	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
742 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
743 	int err = 0;
744 
745 	crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
746 	crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
747 				  CRYPTO_TFM_REQ_MASK);
748 	err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
749 	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
750 	tfm->crt_flags |=
751 		crypto_skcipher_get_flags(ablkctx->sw_cipher) &
752 		CRYPTO_TFM_RES_MASK;
753 	return err;
754 }
755 
756 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
757 			       const u8 *key,
758 			       unsigned int keylen)
759 {
760 	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
761 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
762 	unsigned int ck_size, context_size;
763 	u16 alignment = 0;
764 	int err;
765 
766 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
767 	if (err)
768 		goto badkey_err;
769 
770 	ck_size = chcr_keyctx_ck_size(keylen);
771 	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
772 	memcpy(ablkctx->key, key, keylen);
773 	ablkctx->enckey_len = keylen;
774 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
775 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
776 			keylen + alignment) >> 4;
777 
778 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
779 						0, 0, context_size);
780 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
781 	return 0;
782 badkey_err:
783 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
784 	ablkctx->enckey_len = 0;
785 
786 	return err;
787 }
788 
789 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
790 				   const u8 *key,
791 				   unsigned int keylen)
792 {
793 	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
794 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
795 	unsigned int ck_size, context_size;
796 	u16 alignment = 0;
797 	int err;
798 
799 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
800 	if (err)
801 		goto badkey_err;
802 	ck_size = chcr_keyctx_ck_size(keylen);
803 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
804 	memcpy(ablkctx->key, key, keylen);
805 	ablkctx->enckey_len = keylen;
806 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
807 			keylen + alignment) >> 4;
808 
809 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
810 						0, 0, context_size);
811 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
812 
813 	return 0;
814 badkey_err:
815 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
816 	ablkctx->enckey_len = 0;
817 
818 	return err;
819 }
820 
821 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
822 				   const u8 *key,
823 				   unsigned int keylen)
824 {
825 	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
826 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
827 	unsigned int ck_size, context_size;
828 	u16 alignment = 0;
829 	int err;
830 
831 	if (keylen < CTR_RFC3686_NONCE_SIZE)
832 		return -EINVAL;
833 	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
834 	       CTR_RFC3686_NONCE_SIZE);
835 
836 	keylen -= CTR_RFC3686_NONCE_SIZE;
837 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
838 	if (err)
839 		goto badkey_err;
840 
841 	ck_size = chcr_keyctx_ck_size(keylen);
842 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
843 	memcpy(ablkctx->key, key, keylen);
844 	ablkctx->enckey_len = keylen;
845 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
846 			keylen + alignment) >> 4;
847 
848 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
849 						0, 0, context_size);
850 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
851 
852 	return 0;
853 badkey_err:
854 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
855 	ablkctx->enckey_len = 0;
856 
857 	return err;
858 }
859 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
860 {
861 	unsigned int size = AES_BLOCK_SIZE;
862 	__be32 *b = (__be32 *)(dstiv + size);
863 	u32 c, prev;
864 
865 	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
866 	for (; size >= 4; size -= 4) {
867 		prev = be32_to_cpu(*--b);
868 		c = prev + add;
869 		*b = cpu_to_be32(c);
870 		if (prev < c)
871 			break;
872 		add = 1;
873 	}
874 
875 }
876 
877 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
878 {
879 	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
880 	u64 c;
881 	u32 temp = be32_to_cpu(*--b);
882 
883 	temp = ~temp;
884 	c = (u64)temp +  1; // No of block can processed withou overflow
885 	if ((bytes / AES_BLOCK_SIZE) > c)
886 		bytes = c * AES_BLOCK_SIZE;
887 	return bytes;
888 }
889 
890 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
891 {
892 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
893 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
894 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
895 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
896 	struct crypto_cipher *cipher;
897 	int ret, i;
898 	u8 *key;
899 	unsigned int keylen;
900 
901 	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
902 	memcpy(iv, req->info, AES_BLOCK_SIZE);
903 
904 	if (IS_ERR(cipher)) {
905 		ret = -ENOMEM;
906 		goto out;
907 	}
908 	keylen = ablkctx->enckey_len / 2;
909 	key = ablkctx->key + keylen;
910 	ret = crypto_cipher_setkey(cipher, key, keylen);
911 	if (ret)
912 		goto out1;
913 
914 	crypto_cipher_encrypt_one(cipher, iv, iv);
915 	for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++)
916 		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
917 
918 	crypto_cipher_decrypt_one(cipher, iv, iv);
919 out1:
920 	crypto_free_cipher(cipher);
921 out:
922 	return ret;
923 }
924 
925 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
926 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
927 {
928 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
929 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
930 	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
931 	int ret = 0;
932 
933 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
934 		ctr_add_iv(iv, req->info, (reqctx->processed /
935 			   AES_BLOCK_SIZE));
936 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
937 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
938 			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
939 						AES_BLOCK_SIZE) + 1);
940 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
941 		ret = chcr_update_tweak(req, iv);
942 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
943 		if (reqctx->op)
944 			sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
945 					   16,
946 					   reqctx->processed - AES_BLOCK_SIZE);
947 		else
948 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
949 	}
950 
951 	return ret;
952 
953 }
954 
955 /* We need separate function for final iv because in rfc3686  Initial counter
956  * starts from 1 and buffer size of iv is 8 byte only which remains constant
957  * for subsequent update requests
958  */
959 
960 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
961 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
962 {
963 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
964 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
965 	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
966 	int ret = 0;
967 
968 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
969 		ctr_add_iv(iv, req->info, (reqctx->processed /
970 			   AES_BLOCK_SIZE));
971 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
972 		ret = chcr_update_tweak(req, iv);
973 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
974 		if (reqctx->op)
975 			sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
976 					   16,
977 					   reqctx->processed - AES_BLOCK_SIZE);
978 		else
979 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
980 
981 	}
982 	return ret;
983 
984 }
985 
986 
987 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
988 				   unsigned char *input, int err)
989 {
990 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
991 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
992 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
993 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
994 	struct sk_buff *skb;
995 	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
996 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
997 	struct  cipher_wr_param wrparam;
998 	int bytes;
999 
1000 	dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents,
1001 		     DMA_FROM_DEVICE);
1002 
1003 	if (reqctx->skb) {
1004 		kfree_skb(reqctx->skb);
1005 		reqctx->skb = NULL;
1006 	}
1007 	if (err)
1008 		goto complete;
1009 
1010 	if (req->nbytes == reqctx->processed) {
1011 		err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1012 		goto complete;
1013 	}
1014 
1015 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1016 					    ctx->tx_qidx))) {
1017 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1018 			err = -EBUSY;
1019 			goto complete;
1020 		}
1021 
1022 	}
1023 	wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src,
1024 				       reqctx->processed);
1025 	reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg,
1026 					 reqctx->processed);
1027 	if (!wrparam.srcsg || !reqctx->dst) {
1028 		pr_err("Input sg list length less that nbytes\n");
1029 		err = -EINVAL;
1030 		goto complete;
1031 	}
1032 	bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1,
1033 				 SPACE_LEFT(ablkctx->enckey_len),
1034 				 &wrparam.snent, &reqctx->dst_nents);
1035 	if ((bytes + reqctx->processed) >= req->nbytes)
1036 		bytes  = req->nbytes - reqctx->processed;
1037 	else
1038 		bytes = ROUND_16(bytes);
1039 	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1040 	if (err)
1041 		goto complete;
1042 
1043 	if (unlikely(bytes == 0)) {
1044 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1045 				     req->base.flags,
1046 				     wrparam.srcsg,
1047 				     reqctx->dst,
1048 				     req->nbytes - reqctx->processed,
1049 				     reqctx->iv,
1050 				     reqctx->op);
1051 		goto complete;
1052 	}
1053 
1054 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1055 	    CRYPTO_ALG_SUB_TYPE_CTR)
1056 		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1057 	reqctx->processed += bytes;
1058 	wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
1059 	wrparam.req = req;
1060 	wrparam.bytes = bytes;
1061 	skb = create_cipher_wr(&wrparam);
1062 	if (IS_ERR(skb)) {
1063 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1064 		err = PTR_ERR(skb);
1065 		goto complete;
1066 	}
1067 	skb->dev = u_ctx->lldi.ports[0];
1068 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1069 	chcr_send_wr(skb);
1070 	return 0;
1071 complete:
1072 	free_new_sg(reqctx->newdstsg);
1073 	reqctx->newdstsg = NULL;
1074 	req->base.complete(&req->base, err);
1075 	return err;
1076 }
1077 
1078 static int process_cipher(struct ablkcipher_request *req,
1079 				  unsigned short qid,
1080 				  struct sk_buff **skb,
1081 				  unsigned short op_type)
1082 {
1083 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1084 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1085 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1086 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1087 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1088 	struct	cipher_wr_param wrparam;
1089 	int bytes, nents, err = -EINVAL;
1090 
1091 	reqctx->newdstsg = NULL;
1092 	reqctx->processed = 0;
1093 	if (!req->info)
1094 		goto error;
1095 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1096 	    (req->nbytes == 0) ||
1097 	    (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1098 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1099 		       ablkctx->enckey_len, req->nbytes, ivsize);
1100 		goto error;
1101 	}
1102 	wrparam.srcsg = req->src;
1103 	if (is_newsg(req->dst, &nents)) {
1104 		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
1105 		if (IS_ERR(reqctx->newdstsg))
1106 			return PTR_ERR(reqctx->newdstsg);
1107 		reqctx->dstsg = reqctx->newdstsg;
1108 	} else {
1109 		reqctx->dstsg = req->dst;
1110 	}
1111 	bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG,
1112 				 SPACE_LEFT(ablkctx->enckey_len),
1113 				 &wrparam.snent,
1114 				 &reqctx->dst_nents);
1115 	if ((bytes + reqctx->processed) >= req->nbytes)
1116 		bytes  = req->nbytes - reqctx->processed;
1117 	else
1118 		bytes = ROUND_16(bytes);
1119 	if (unlikely(bytes > req->nbytes))
1120 		bytes = req->nbytes;
1121 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1122 				  CRYPTO_ALG_SUB_TYPE_CTR) {
1123 		bytes = adjust_ctr_overflow(req->info, bytes);
1124 	}
1125 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1126 	    CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1127 		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1128 		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1129 				CTR_RFC3686_IV_SIZE);
1130 
1131 		/* initialize counter portion of counter block */
1132 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1133 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1134 
1135 	} else {
1136 
1137 		memcpy(reqctx->iv, req->info, ivsize);
1138 	}
1139 	if (unlikely(bytes == 0)) {
1140 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1141 					   req->base.flags,
1142 					   req->src,
1143 					   req->dst,
1144 					   req->nbytes,
1145 					   req->info,
1146 					   op_type);
1147 		goto error;
1148 	}
1149 	reqctx->processed = bytes;
1150 	reqctx->dst = reqctx->dstsg;
1151 	reqctx->op = op_type;
1152 	wrparam.qid = qid;
1153 	wrparam.req = req;
1154 	wrparam.bytes = bytes;
1155 	*skb = create_cipher_wr(&wrparam);
1156 	if (IS_ERR(*skb)) {
1157 		err = PTR_ERR(*skb);
1158 		goto error;
1159 	}
1160 
1161 	return 0;
1162 error:
1163 	free_new_sg(reqctx->newdstsg);
1164 	reqctx->newdstsg = NULL;
1165 	return err;
1166 }
1167 
1168 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1169 {
1170 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1171 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1172 	struct sk_buff *skb = NULL;
1173 	int err;
1174 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1175 
1176 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1177 					    ctx->tx_qidx))) {
1178 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1179 			return -EBUSY;
1180 	}
1181 
1182 	err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
1183 			       CHCR_ENCRYPT_OP);
1184 	if (err || !skb)
1185 		return  err;
1186 	skb->dev = u_ctx->lldi.ports[0];
1187 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1188 	chcr_send_wr(skb);
1189 	return -EINPROGRESS;
1190 }
1191 
1192 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1193 {
1194 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1195 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1196 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1197 	struct sk_buff *skb = NULL;
1198 	int err;
1199 
1200 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1201 					    ctx->tx_qidx))) {
1202 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1203 			return -EBUSY;
1204 	}
1205 
1206 	 err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
1207 			       CHCR_DECRYPT_OP);
1208 	if (err || !skb)
1209 		return err;
1210 	skb->dev = u_ctx->lldi.ports[0];
1211 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1212 	chcr_send_wr(skb);
1213 	return -EINPROGRESS;
1214 }
1215 
1216 static int chcr_device_init(struct chcr_context *ctx)
1217 {
1218 	struct uld_ctx *u_ctx = NULL;
1219 	struct adapter *adap;
1220 	unsigned int id;
1221 	int txq_perchan, txq_idx, ntxq;
1222 	int err = 0, rxq_perchan, rxq_idx;
1223 
1224 	id = smp_processor_id();
1225 	if (!ctx->dev) {
1226 		u_ctx = assign_chcr_device();
1227 		if (!u_ctx) {
1228 			pr_err("chcr device assignment fails\n");
1229 			goto out;
1230 		}
1231 		ctx->dev = u_ctx->dev;
1232 		adap = padap(ctx->dev);
1233 		ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1234 				    adap->vres.ncrypto_fc);
1235 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1236 		txq_perchan = ntxq / u_ctx->lldi.nchan;
1237 		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
1238 		rxq_idx += id % rxq_perchan;
1239 		txq_idx = ctx->dev->tx_channel_id * txq_perchan;
1240 		txq_idx += id % txq_perchan;
1241 		spin_lock(&ctx->dev->lock_chcr_dev);
1242 		ctx->rx_qidx = rxq_idx;
1243 		ctx->tx_qidx = txq_idx;
1244 		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1245 		ctx->dev->rx_channel_id = 0;
1246 		spin_unlock(&ctx->dev->lock_chcr_dev);
1247 	}
1248 out:
1249 	return err;
1250 }
1251 
1252 static int chcr_cra_init(struct crypto_tfm *tfm)
1253 {
1254 	struct crypto_alg *alg = tfm->__crt_alg;
1255 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1256 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1257 
1258 	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1259 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1260 	if (IS_ERR(ablkctx->sw_cipher)) {
1261 		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1262 		return PTR_ERR(ablkctx->sw_cipher);
1263 	}
1264 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1265 	return chcr_device_init(crypto_tfm_ctx(tfm));
1266 }
1267 
1268 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1269 {
1270 	struct crypto_alg *alg = tfm->__crt_alg;
1271 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1272 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1273 
1274 	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1275 	 * cannot be used as fallback in chcr_handle_cipher_response
1276 	 */
1277 	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1278 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1279 	if (IS_ERR(ablkctx->sw_cipher)) {
1280 		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1281 		return PTR_ERR(ablkctx->sw_cipher);
1282 	}
1283 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1284 	return chcr_device_init(crypto_tfm_ctx(tfm));
1285 }
1286 
1287 
1288 static void chcr_cra_exit(struct crypto_tfm *tfm)
1289 {
1290 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1291 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1292 
1293 	crypto_free_skcipher(ablkctx->sw_cipher);
1294 }
1295 
1296 static int get_alg_config(struct algo_param *params,
1297 			  unsigned int auth_size)
1298 {
1299 	switch (auth_size) {
1300 	case SHA1_DIGEST_SIZE:
1301 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1302 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1303 		params->result_size = SHA1_DIGEST_SIZE;
1304 		break;
1305 	case SHA224_DIGEST_SIZE:
1306 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1307 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1308 		params->result_size = SHA256_DIGEST_SIZE;
1309 		break;
1310 	case SHA256_DIGEST_SIZE:
1311 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1312 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1313 		params->result_size = SHA256_DIGEST_SIZE;
1314 		break;
1315 	case SHA384_DIGEST_SIZE:
1316 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1317 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1318 		params->result_size = SHA512_DIGEST_SIZE;
1319 		break;
1320 	case SHA512_DIGEST_SIZE:
1321 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1322 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1323 		params->result_size = SHA512_DIGEST_SIZE;
1324 		break;
1325 	default:
1326 		pr_err("chcr : ERROR, unsupported digest size\n");
1327 		return -EINVAL;
1328 	}
1329 	return 0;
1330 }
1331 
1332 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1333 {
1334 		crypto_free_shash(base_hash);
1335 }
1336 
1337 /**
1338  *	create_hash_wr - Create hash work request
1339  *	@req - Cipher req base
1340  */
1341 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1342 				      struct hash_wr_param *param)
1343 {
1344 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1345 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1346 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1347 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1348 	struct sk_buff *skb = NULL;
1349 	struct chcr_wr *chcr_req;
1350 	unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
1351 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1352 	unsigned int kctx_len = 0;
1353 	u8 hash_size_in_response = 0;
1354 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1355 		GFP_ATOMIC;
1356 	struct adapter *adap = padap(ctx->dev);
1357 
1358 	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
1359 	kctx_len = param->alg_prm.result_size + iopad_alignment;
1360 	if (param->opad_needed)
1361 		kctx_len += param->alg_prm.result_size + iopad_alignment;
1362 
1363 	if (req_ctx->result)
1364 		hash_size_in_response = digestsize;
1365 	else
1366 		hash_size_in_response = param->alg_prm.result_size;
1367 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
1368 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1369 	if (!skb)
1370 		return skb;
1371 
1372 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1373 	chcr_req = __skb_put_zero(skb, transhdr_len);
1374 
1375 	chcr_req->sec_cpl.op_ivinsrtofst =
1376 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
1377 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1378 
1379 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1380 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1381 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1382 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1383 	chcr_req->sec_cpl.seqno_numivs =
1384 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1385 					 param->opad_needed, 0);
1386 
1387 	chcr_req->sec_cpl.ivgen_hdrlen =
1388 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1389 
1390 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1391 	       param->alg_prm.result_size);
1392 
1393 	if (param->opad_needed)
1394 		memcpy(chcr_req->key_ctx.key +
1395 		       ((param->alg_prm.result_size <= 32) ? 32 :
1396 			CHCR_HASH_MAX_DIGEST_SIZE),
1397 		       hmacctx->opad, param->alg_prm.result_size);
1398 
1399 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1400 					    param->alg_prm.mk_size, 0,
1401 					    param->opad_needed,
1402 					    ((kctx_len +
1403 					     sizeof(chcr_req->key_ctx)) >> 4));
1404 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1405 
1406 	skb_set_transport_header(skb, transhdr_len);
1407 	if (param->bfr_len != 0)
1408 		write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
1409 				    param->bfr_len);
1410 	if (param->sg_len != 0)
1411 		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
1412 	atomic_inc(&adap->chcr_stats.digest_rqst);
1413 	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len,
1414 		    hash_size_in_response, 0, DUMMY_BYTES, 0);
1415 	req_ctx->skb = skb;
1416 	skb_get(skb);
1417 	return skb;
1418 }
1419 
1420 static int chcr_ahash_update(struct ahash_request *req)
1421 {
1422 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1423 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1424 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1425 	struct uld_ctx *u_ctx = NULL;
1426 	struct sk_buff *skb;
1427 	u8 remainder = 0, bs;
1428 	unsigned int nbytes = req->nbytes;
1429 	struct hash_wr_param params;
1430 
1431 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1432 
1433 	u_ctx = ULD_CTX(ctx);
1434 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1435 					    ctx->tx_qidx))) {
1436 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1437 			return -EBUSY;
1438 	}
1439 
1440 	if (nbytes + req_ctx->reqlen >= bs) {
1441 		remainder = (nbytes + req_ctx->reqlen) % bs;
1442 		nbytes = nbytes + req_ctx->reqlen - remainder;
1443 	} else {
1444 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1445 				   + req_ctx->reqlen, nbytes, 0);
1446 		req_ctx->reqlen += nbytes;
1447 		return 0;
1448 	}
1449 
1450 	params.opad_needed = 0;
1451 	params.more = 1;
1452 	params.last = 0;
1453 	params.sg_len = nbytes - req_ctx->reqlen;
1454 	params.bfr_len = req_ctx->reqlen;
1455 	params.scmd1 = 0;
1456 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1457 	req_ctx->result = 0;
1458 	req_ctx->data_len += params.sg_len + params.bfr_len;
1459 	skb = create_hash_wr(req, &params);
1460 	if (!skb)
1461 		return -ENOMEM;
1462 
1463 	if (remainder) {
1464 		u8 *temp;
1465 		/* Swap buffers */
1466 		temp = req_ctx->reqbfr;
1467 		req_ctx->reqbfr = req_ctx->skbfr;
1468 		req_ctx->skbfr = temp;
1469 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1470 				   req_ctx->reqbfr, remainder, req->nbytes -
1471 				   remainder);
1472 	}
1473 	req_ctx->reqlen = remainder;
1474 	skb->dev = u_ctx->lldi.ports[0];
1475 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1476 	chcr_send_wr(skb);
1477 
1478 	return -EINPROGRESS;
1479 }
1480 
1481 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1482 {
1483 	memset(bfr_ptr, 0, bs);
1484 	*bfr_ptr = 0x80;
1485 	if (bs == 64)
1486 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1487 	else
1488 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1489 }
1490 
1491 static int chcr_ahash_final(struct ahash_request *req)
1492 {
1493 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1494 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1495 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1496 	struct hash_wr_param params;
1497 	struct sk_buff *skb;
1498 	struct uld_ctx *u_ctx = NULL;
1499 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1500 
1501 	u_ctx = ULD_CTX(ctx);
1502 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1503 		params.opad_needed = 1;
1504 	else
1505 		params.opad_needed = 0;
1506 	params.sg_len = 0;
1507 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1508 	req_ctx->result = 1;
1509 	params.bfr_len = req_ctx->reqlen;
1510 	req_ctx->data_len += params.bfr_len + params.sg_len;
1511 	if (req_ctx->reqlen == 0) {
1512 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1513 		params.last = 0;
1514 		params.more = 1;
1515 		params.scmd1 = 0;
1516 		params.bfr_len = bs;
1517 
1518 	} else {
1519 		params.scmd1 = req_ctx->data_len;
1520 		params.last = 1;
1521 		params.more = 0;
1522 	}
1523 	skb = create_hash_wr(req, &params);
1524 	if (!skb)
1525 		return -ENOMEM;
1526 
1527 	skb->dev = u_ctx->lldi.ports[0];
1528 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1529 	chcr_send_wr(skb);
1530 	return -EINPROGRESS;
1531 }
1532 
1533 static int chcr_ahash_finup(struct ahash_request *req)
1534 {
1535 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1536 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1537 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1538 	struct uld_ctx *u_ctx = NULL;
1539 	struct sk_buff *skb;
1540 	struct hash_wr_param params;
1541 	u8  bs;
1542 
1543 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1544 	u_ctx = ULD_CTX(ctx);
1545 
1546 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1547 					    ctx->tx_qidx))) {
1548 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1549 			return -EBUSY;
1550 	}
1551 
1552 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1553 		params.opad_needed = 1;
1554 	else
1555 		params.opad_needed = 0;
1556 
1557 	params.sg_len = req->nbytes;
1558 	params.bfr_len = req_ctx->reqlen;
1559 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1560 	req_ctx->data_len += params.bfr_len + params.sg_len;
1561 	req_ctx->result = 1;
1562 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1563 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1564 		params.last = 0;
1565 		params.more = 1;
1566 		params.scmd1 = 0;
1567 		params.bfr_len = bs;
1568 	} else {
1569 		params.scmd1 = req_ctx->data_len;
1570 		params.last = 1;
1571 		params.more = 0;
1572 	}
1573 
1574 	skb = create_hash_wr(req, &params);
1575 	if (!skb)
1576 		return -ENOMEM;
1577 
1578 	skb->dev = u_ctx->lldi.ports[0];
1579 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1580 	chcr_send_wr(skb);
1581 
1582 	return -EINPROGRESS;
1583 }
1584 
1585 static int chcr_ahash_digest(struct ahash_request *req)
1586 {
1587 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1588 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1589 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1590 	struct uld_ctx *u_ctx = NULL;
1591 	struct sk_buff *skb;
1592 	struct hash_wr_param params;
1593 	u8  bs;
1594 
1595 	rtfm->init(req);
1596 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1597 
1598 	u_ctx = ULD_CTX(ctx);
1599 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1600 					    ctx->tx_qidx))) {
1601 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1602 			return -EBUSY;
1603 	}
1604 
1605 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1606 		params.opad_needed = 1;
1607 	else
1608 		params.opad_needed = 0;
1609 
1610 	params.last = 0;
1611 	params.more = 0;
1612 	params.sg_len = req->nbytes;
1613 	params.bfr_len = 0;
1614 	params.scmd1 = 0;
1615 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1616 	req_ctx->result = 1;
1617 	req_ctx->data_len += params.bfr_len + params.sg_len;
1618 
1619 	if (req->nbytes == 0) {
1620 		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1621 		params.more = 1;
1622 		params.bfr_len = bs;
1623 	}
1624 
1625 	skb = create_hash_wr(req, &params);
1626 	if (!skb)
1627 		return -ENOMEM;
1628 
1629 	skb->dev = u_ctx->lldi.ports[0];
1630 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1631 	chcr_send_wr(skb);
1632 	return -EINPROGRESS;
1633 }
1634 
1635 static int chcr_ahash_export(struct ahash_request *areq, void *out)
1636 {
1637 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1638 	struct chcr_ahash_req_ctx *state = out;
1639 
1640 	state->reqlen = req_ctx->reqlen;
1641 	state->data_len = req_ctx->data_len;
1642 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
1643 	memcpy(state->partial_hash, req_ctx->partial_hash,
1644 	       CHCR_HASH_MAX_DIGEST_SIZE);
1645 		return 0;
1646 }
1647 
1648 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1649 {
1650 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1651 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1652 
1653 	req_ctx->reqlen = state->reqlen;
1654 	req_ctx->data_len = state->data_len;
1655 	req_ctx->reqbfr = req_ctx->bfr1;
1656 	req_ctx->skbfr = req_ctx->bfr2;
1657 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
1658 	memcpy(req_ctx->partial_hash, state->partial_hash,
1659 	       CHCR_HASH_MAX_DIGEST_SIZE);
1660 	return 0;
1661 }
1662 
1663 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1664 			     unsigned int keylen)
1665 {
1666 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1667 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1668 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1669 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1670 	unsigned int i, err = 0, updated_digestsize;
1671 
1672 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1673 
1674 	/* use the key to calculate the ipad and opad. ipad will sent with the
1675 	 * first request's data. opad will be sent with the final hash result
1676 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1677 	 */
1678 	shash->tfm = hmacctx->base_hash;
1679 	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
1680 	if (keylen > bs) {
1681 		err = crypto_shash_digest(shash, key, keylen,
1682 					  hmacctx->ipad);
1683 		if (err)
1684 			goto out;
1685 		keylen = digestsize;
1686 	} else {
1687 		memcpy(hmacctx->ipad, key, keylen);
1688 	}
1689 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
1690 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
1691 
1692 	for (i = 0; i < bs / sizeof(int); i++) {
1693 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1694 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1695 	}
1696 
1697 	updated_digestsize = digestsize;
1698 	if (digestsize == SHA224_DIGEST_SIZE)
1699 		updated_digestsize = SHA256_DIGEST_SIZE;
1700 	else if (digestsize == SHA384_DIGEST_SIZE)
1701 		updated_digestsize = SHA512_DIGEST_SIZE;
1702 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
1703 					hmacctx->ipad, digestsize);
1704 	if (err)
1705 		goto out;
1706 	chcr_change_order(hmacctx->ipad, updated_digestsize);
1707 
1708 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
1709 					hmacctx->opad, digestsize);
1710 	if (err)
1711 		goto out;
1712 	chcr_change_order(hmacctx->opad, updated_digestsize);
1713 out:
1714 	return err;
1715 }
1716 
1717 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1718 			       unsigned int key_len)
1719 {
1720 	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
1721 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1722 	unsigned short context_size = 0;
1723 	int err;
1724 
1725 	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
1726 	if (err)
1727 		goto badkey_err;
1728 
1729 	memcpy(ablkctx->key, key, key_len);
1730 	ablkctx->enckey_len = key_len;
1731 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1732 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1733 	ablkctx->key_ctx_hdr =
1734 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1735 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1736 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1737 				 CHCR_KEYCTX_NO_KEY, 1,
1738 				 0, context_size);
1739 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1740 	return 0;
1741 badkey_err:
1742 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1743 	ablkctx->enckey_len = 0;
1744 
1745 	return err;
1746 }
1747 
1748 static int chcr_sha_init(struct ahash_request *areq)
1749 {
1750 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1751 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1752 	int digestsize =  crypto_ahash_digestsize(tfm);
1753 
1754 	req_ctx->data_len = 0;
1755 	req_ctx->reqlen = 0;
1756 	req_ctx->reqbfr = req_ctx->bfr1;
1757 	req_ctx->skbfr = req_ctx->bfr2;
1758 	req_ctx->skb = NULL;
1759 	req_ctx->result = 0;
1760 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
1761 	return 0;
1762 }
1763 
1764 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1765 {
1766 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1767 				 sizeof(struct chcr_ahash_req_ctx));
1768 	return chcr_device_init(crypto_tfm_ctx(tfm));
1769 }
1770 
1771 static int chcr_hmac_init(struct ahash_request *areq)
1772 {
1773 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1774 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1775 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1776 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1777 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1778 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1779 
1780 	chcr_sha_init(areq);
1781 	req_ctx->data_len = bs;
1782 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1783 		if (digestsize == SHA224_DIGEST_SIZE)
1784 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1785 			       SHA256_DIGEST_SIZE);
1786 		else if (digestsize == SHA384_DIGEST_SIZE)
1787 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1788 			       SHA512_DIGEST_SIZE);
1789 		else
1790 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1791 			       digestsize);
1792 	}
1793 	return 0;
1794 }
1795 
1796 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1797 {
1798 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1799 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1800 	unsigned int digestsize =
1801 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1802 
1803 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1804 				 sizeof(struct chcr_ahash_req_ctx));
1805 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
1806 	if (IS_ERR(hmacctx->base_hash))
1807 		return PTR_ERR(hmacctx->base_hash);
1808 	return chcr_device_init(crypto_tfm_ctx(tfm));
1809 }
1810 
1811 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1812 {
1813 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1814 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1815 
1816 	if (hmacctx->base_hash) {
1817 		chcr_free_shash(hmacctx->base_hash);
1818 		hmacctx->base_hash = NULL;
1819 	}
1820 }
1821 
1822 static int is_newsg(struct scatterlist *sgl, unsigned int *newents)
1823 {
1824 	int nents = 0;
1825 	int ret = 0;
1826 
1827 	while (sgl) {
1828 		if (sgl->length > CHCR_SG_SIZE)
1829 			ret = 1;
1830 		nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE);
1831 		sgl = sg_next(sgl);
1832 	}
1833 	*newents = nents;
1834 	return ret;
1835 }
1836 
1837 static inline void free_new_sg(struct scatterlist *sgl)
1838 {
1839 	kfree(sgl);
1840 }
1841 
1842 static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
1843 				       unsigned int nents)
1844 {
1845 	struct scatterlist *newsg, *sg;
1846 	int i, len, processed = 0;
1847 	struct page *spage;
1848 	int offset;
1849 
1850 	newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL);
1851 	if (!newsg)
1852 		return ERR_PTR(-ENOMEM);
1853 	sg = newsg;
1854 	sg_init_table(sg, nents);
1855 	offset = sgl->offset;
1856 	spage = sg_page(sgl);
1857 	for (i = 0; i < nents; i++) {
1858 		len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE);
1859 		sg_set_page(sg, spage, len, offset);
1860 		processed += len;
1861 		offset += len;
1862 		if (offset >= PAGE_SIZE) {
1863 			offset = offset % PAGE_SIZE;
1864 			spage++;
1865 		}
1866 		if (processed == sgl->length) {
1867 			processed = 0;
1868 			sgl = sg_next(sgl);
1869 			if (!sgl)
1870 				break;
1871 			spage = sg_page(sgl);
1872 			offset = sgl->offset;
1873 		}
1874 		sg = sg_next(sg);
1875 	}
1876 	return newsg;
1877 }
1878 
1879 static int chcr_copy_assoc(struct aead_request *req,
1880 				struct chcr_aead_ctx *ctx)
1881 {
1882 	SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
1883 
1884 	skcipher_request_set_tfm(skreq, ctx->null);
1885 	skcipher_request_set_callback(skreq, aead_request_flags(req),
1886 			NULL, NULL);
1887 	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
1888 			NULL);
1889 
1890 	return crypto_skcipher_encrypt(skreq);
1891 }
1892 static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
1893 				   int aadmax, int wrlen,
1894 				   unsigned short op_type)
1895 {
1896 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
1897 
1898 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
1899 	    (req->assoclen > aadmax) ||
1900 	    (src_nent > MAX_SKB_FRAGS) ||
1901 	    (wrlen > MAX_WR_SIZE))
1902 		return 1;
1903 	return 0;
1904 }
1905 
1906 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
1907 {
1908 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1909 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1910 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1911 	struct aead_request *subreq = aead_request_ctx(req);
1912 
1913 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
1914 	aead_request_set_callback(subreq, req->base.flags,
1915 				  req->base.complete, req->base.data);
1916 	 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
1917 				 req->iv);
1918 	 aead_request_set_ad(subreq, req->assoclen);
1919 	return op_type ? crypto_aead_decrypt(subreq) :
1920 		crypto_aead_encrypt(subreq);
1921 }
1922 
1923 static struct sk_buff *create_authenc_wr(struct aead_request *req,
1924 					 unsigned short qid,
1925 					 int size,
1926 					 unsigned short op_type)
1927 {
1928 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1929 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1930 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1931 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1932 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
1933 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1934 	struct sk_buff *skb = NULL;
1935 	struct chcr_wr *chcr_req;
1936 	struct cpl_rx_phys_dsgl *phys_cpl;
1937 	struct phys_sge_parm sg_param;
1938 	struct scatterlist *src;
1939 	unsigned int frags = 0, transhdr_len;
1940 	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
1941 	unsigned int   kctx_len = 0, nents;
1942 	unsigned short stop_offset = 0;
1943 	unsigned int  assoclen = req->assoclen;
1944 	unsigned int  authsize = crypto_aead_authsize(tfm);
1945 	int error = -EINVAL, src_nent;
1946 	int null = 0;
1947 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1948 		GFP_ATOMIC;
1949 	struct adapter *adap = padap(ctx->dev);
1950 
1951 	reqctx->newdstsg = NULL;
1952 	dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
1953 						   authsize);
1954 	if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
1955 		goto err;
1956 
1957 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1958 		goto err;
1959 	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1960 	if (src_nent < 0)
1961 		goto err;
1962 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1963 
1964 	if (req->src != req->dst) {
1965 		error = chcr_copy_assoc(req, aeadctx);
1966 		if (error)
1967 			return ERR_PTR(error);
1968 	}
1969 	if (dst_size && is_newsg(req->dst, &nents)) {
1970 		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
1971 		if (IS_ERR(reqctx->newdstsg))
1972 			return ERR_CAST(reqctx->newdstsg);
1973 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
1974 					       reqctx->newdstsg, req->assoclen);
1975 	} else {
1976 		if (req->src == req->dst)
1977 			reqctx->dst = src;
1978 		else
1979 			reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
1980 						       req->dst, req->assoclen);
1981 	}
1982 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
1983 		null = 1;
1984 		assoclen = 0;
1985 	}
1986 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1987 					     (op_type ? -authsize : authsize));
1988 	if (reqctx->dst_nents < 0) {
1989 		pr_err("AUTHENC:Invalid Destination sg entries\n");
1990 		error = -EINVAL;
1991 		goto err;
1992 	}
1993 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1994 	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
1995 		- sizeof(chcr_req->key_ctx);
1996 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1997 	if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
1998 			T6_MAX_AAD_SIZE,
1999 			transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
2000 				op_type)) {
2001 		atomic_inc(&adap->chcr_stats.fallback);
2002 		free_new_sg(reqctx->newdstsg);
2003 		reqctx->newdstsg = NULL;
2004 		return ERR_PTR(chcr_aead_fallback(req, op_type));
2005 	}
2006 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
2007 	if (!skb) {
2008 		error = -ENOMEM;
2009 		goto err;
2010 	}
2011 
2012 	/* LLD is going to write the sge hdr. */
2013 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
2014 
2015 	/* Write WR */
2016 	chcr_req = __skb_put_zero(skb, transhdr_len);
2017 
2018 	stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2019 
2020 	/*
2021 	 * Input order	is AAD,IV and Payload. where IV should be included as
2022 	 * the part of authdata. All other fields should be filled according
2023 	 * to the hardware spec
2024 	 */
2025 	chcr_req->sec_cpl.op_ivinsrtofst =
2026 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2,
2027 				       (ivsize ? (assoclen + 1) : 0));
2028 	chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
2029 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2030 					assoclen ? 1 : 0, assoclen,
2031 					assoclen + ivsize + 1,
2032 					(stop_offset & 0x1F0) >> 4);
2033 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2034 					stop_offset & 0xF,
2035 					null ? 0 : assoclen + ivsize + 1,
2036 					stop_offset, stop_offset);
2037 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2038 					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
2039 					CHCR_SCMD_CIPHER_MODE_AES_CBC,
2040 					actx->auth_mode, aeadctx->hmac_ctrl,
2041 					ivsize >> 1);
2042 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2043 					 0, 1, dst_size);
2044 
2045 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2046 	if (op_type == CHCR_ENCRYPT_OP)
2047 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2048 		       aeadctx->enckey_len);
2049 	else
2050 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2051 		       aeadctx->enckey_len);
2052 
2053 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
2054 					4), actx->h_iopad, kctx_len -
2055 				(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
2056 
2057 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2058 	sg_param.nents = reqctx->dst_nents;
2059 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
2060 	sg_param.qid = qid;
2061 	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
2062 					reqctx->dst, &sg_param);
2063 	if (error)
2064 		goto dstmap_fail;
2065 
2066 	skb_set_transport_header(skb, transhdr_len);
2067 
2068 	if (assoclen) {
2069 		/* AAD buffer in */
2070 		write_sg_to_skb(skb, &frags, req->src, assoclen);
2071 
2072 	}
2073 	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
2074 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
2075 	atomic_inc(&adap->chcr_stats.cipher_rqst);
2076 	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
2077 		   sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
2078 	reqctx->skb = skb;
2079 	skb_get(skb);
2080 
2081 	return skb;
2082 dstmap_fail:
2083 	/* ivmap_fail: */
2084 	kfree_skb(skb);
2085 err:
2086 	free_new_sg(reqctx->newdstsg);
2087 	reqctx->newdstsg = NULL;
2088 	return ERR_PTR(error);
2089 }
2090 
2091 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2092 {
2093 	__be32 data;
2094 
2095 	memset(block, 0, csize);
2096 	block += csize;
2097 
2098 	if (csize >= 4)
2099 		csize = 4;
2100 	else if (msglen > (unsigned int)(1 << (8 * csize)))
2101 		return -EOVERFLOW;
2102 
2103 	data = cpu_to_be32(msglen);
2104 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2105 
2106 	return 0;
2107 }
2108 
2109 static void generate_b0(struct aead_request *req,
2110 			struct chcr_aead_ctx *aeadctx,
2111 			unsigned short op_type)
2112 {
2113 	unsigned int l, lp, m;
2114 	int rc;
2115 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2116 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2117 	u8 *b0 = reqctx->scratch_pad;
2118 
2119 	m = crypto_aead_authsize(aead);
2120 
2121 	memcpy(b0, reqctx->iv, 16);
2122 
2123 	lp = b0[0];
2124 	l = lp + 1;
2125 
2126 	/* set m, bits 3-5 */
2127 	*b0 |= (8 * ((m - 2) / 2));
2128 
2129 	/* set adata, bit 6, if associated data is used */
2130 	if (req->assoclen)
2131 		*b0 |= 64;
2132 	rc = set_msg_len(b0 + 16 - l,
2133 			 (op_type == CHCR_DECRYPT_OP) ?
2134 			 req->cryptlen - m : req->cryptlen, l);
2135 }
2136 
2137 static inline int crypto_ccm_check_iv(const u8 *iv)
2138 {
2139 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2140 	if (iv[0] < 1 || iv[0] > 7)
2141 		return -EINVAL;
2142 
2143 	return 0;
2144 }
2145 
2146 static int ccm_format_packet(struct aead_request *req,
2147 			     struct chcr_aead_ctx *aeadctx,
2148 			     unsigned int sub_type,
2149 			     unsigned short op_type)
2150 {
2151 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2152 	int rc = 0;
2153 
2154 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2155 		reqctx->iv[0] = 3;
2156 		memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2157 		memcpy(reqctx->iv + 4, req->iv, 8);
2158 		memset(reqctx->iv + 12, 0, 4);
2159 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2160 			htons(req->assoclen - 8);
2161 	} else {
2162 		memcpy(reqctx->iv, req->iv, 16);
2163 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2164 			htons(req->assoclen);
2165 	}
2166 	generate_b0(req, aeadctx, op_type);
2167 	/* zero the ctr value */
2168 	memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2169 	return rc;
2170 }
2171 
2172 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2173 				  unsigned int dst_size,
2174 				  struct aead_request *req,
2175 				  unsigned short op_type,
2176 					  struct chcr_context *chcrctx)
2177 {
2178 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2179 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2180 	unsigned int ivsize = AES_BLOCK_SIZE;
2181 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2182 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2183 	unsigned int c_id = chcrctx->dev->rx_channel_id;
2184 	unsigned int ccm_xtra;
2185 	unsigned char tag_offset = 0, auth_offset = 0;
2186 	unsigned int assoclen;
2187 
2188 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2189 		assoclen = req->assoclen - 8;
2190 	else
2191 		assoclen = req->assoclen;
2192 	ccm_xtra = CCM_B0_SIZE +
2193 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2194 
2195 	auth_offset = req->cryptlen ?
2196 		(assoclen + ivsize + 1 + ccm_xtra) : 0;
2197 	if (op_type == CHCR_DECRYPT_OP) {
2198 		if (crypto_aead_authsize(tfm) != req->cryptlen)
2199 			tag_offset = crypto_aead_authsize(tfm);
2200 		else
2201 			auth_offset = 0;
2202 	}
2203 
2204 
2205 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2206 					 2, (ivsize ?  (assoclen + 1) :  0) +
2207 					 ccm_xtra);
2208 	sec_cpl->pldlen =
2209 		htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
2210 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2211 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2212 					1, assoclen + ccm_xtra, assoclen
2213 					+ ivsize + 1 + ccm_xtra, 0);
2214 
2215 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2216 					auth_offset, tag_offset,
2217 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
2218 					crypto_aead_authsize(tfm));
2219 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2220 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2221 					cipher_mode, mac_mode,
2222 					aeadctx->hmac_ctrl, ivsize >> 1);
2223 
2224 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2225 					1, dst_size);
2226 }
2227 
2228 int aead_ccm_validate_input(unsigned short op_type,
2229 			    struct aead_request *req,
2230 			    struct chcr_aead_ctx *aeadctx,
2231 			    unsigned int sub_type)
2232 {
2233 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2234 		if (crypto_ccm_check_iv(req->iv)) {
2235 			pr_err("CCM: IV check fails\n");
2236 			return -EINVAL;
2237 		}
2238 	} else {
2239 		if (req->assoclen != 16 && req->assoclen != 20) {
2240 			pr_err("RFC4309: Invalid AAD length %d\n",
2241 			       req->assoclen);
2242 			return -EINVAL;
2243 		}
2244 	}
2245 	if (aeadctx->enckey_len == 0) {
2246 		pr_err("CCM: Encryption key not set\n");
2247 		return -EINVAL;
2248 	}
2249 	return 0;
2250 }
2251 
2252 unsigned int fill_aead_req_fields(struct sk_buff *skb,
2253 				  struct aead_request *req,
2254 				  struct scatterlist *src,
2255 				  unsigned int ivsize,
2256 				  struct chcr_aead_ctx *aeadctx)
2257 {
2258 	unsigned int frags = 0;
2259 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2260 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2261 	/* b0 and aad length(if available) */
2262 
2263 	write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
2264 				(req->assoclen ?  CCM_AAD_FIELD_SIZE : 0));
2265 	if (req->assoclen) {
2266 		if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2267 			write_sg_to_skb(skb, &frags, req->src,
2268 					req->assoclen - 8);
2269 		else
2270 			write_sg_to_skb(skb, &frags, req->src, req->assoclen);
2271 	}
2272 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
2273 	if (req->cryptlen)
2274 		write_sg_to_skb(skb, &frags, src, req->cryptlen);
2275 
2276 	return frags;
2277 }
2278 
2279 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2280 					  unsigned short qid,
2281 					  int size,
2282 					  unsigned short op_type)
2283 {
2284 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2285 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2286 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2287 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2288 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2289 	struct sk_buff *skb = NULL;
2290 	struct chcr_wr *chcr_req;
2291 	struct cpl_rx_phys_dsgl *phys_cpl;
2292 	struct phys_sge_parm sg_param;
2293 	struct scatterlist *src;
2294 	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
2295 	unsigned int dst_size = 0, kctx_len, nents;
2296 	unsigned int sub_type;
2297 	unsigned int authsize = crypto_aead_authsize(tfm);
2298 	int error = -EINVAL, src_nent;
2299 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2300 		GFP_ATOMIC;
2301 	struct adapter *adap = padap(ctx->dev);
2302 
2303 	dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
2304 						   authsize);
2305 	reqctx->newdstsg = NULL;
2306 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
2307 		goto err;
2308 	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
2309 	if (src_nent < 0)
2310 		goto err;
2311 
2312 	sub_type = get_aead_subtype(tfm);
2313 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
2314 	if (req->src != req->dst) {
2315 		error = chcr_copy_assoc(req, aeadctx);
2316 		if (error) {
2317 			pr_err("AAD copy to destination buffer fails\n");
2318 			return ERR_PTR(error);
2319 		}
2320 	}
2321 	if (dst_size && is_newsg(req->dst, &nents)) {
2322 		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
2323 		if (IS_ERR(reqctx->newdstsg))
2324 			return ERR_CAST(reqctx->newdstsg);
2325 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2326 					       reqctx->newdstsg, req->assoclen);
2327 	} else {
2328 		if (req->src == req->dst)
2329 			reqctx->dst = src;
2330 		else
2331 			reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2332 						       req->dst, req->assoclen);
2333 	}
2334 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
2335 					     (op_type ? -authsize : authsize));
2336 	if (reqctx->dst_nents < 0) {
2337 		pr_err("CCM:Invalid Destination sg entries\n");
2338 		error = -EINVAL;
2339 		goto err;
2340 	}
2341 	error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
2342 	if (error)
2343 		goto err;
2344 
2345 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
2346 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
2347 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2348 	if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
2349 			    T6_MAX_AAD_SIZE - 18,
2350 			    transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
2351 			    op_type)) {
2352 		atomic_inc(&adap->chcr_stats.fallback);
2353 		free_new_sg(reqctx->newdstsg);
2354 		reqctx->newdstsg = NULL;
2355 		return ERR_PTR(chcr_aead_fallback(req, op_type));
2356 	}
2357 
2358 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
2359 
2360 	if (!skb) {
2361 		error = -ENOMEM;
2362 		goto err;
2363 	}
2364 
2365 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
2366 
2367 	chcr_req = __skb_put_zero(skb, transhdr_len);
2368 
2369 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
2370 
2371 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2372 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2373 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2374 					16), aeadctx->key, aeadctx->enckey_len);
2375 
2376 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2377 	error = ccm_format_packet(req, aeadctx, sub_type, op_type);
2378 	if (error)
2379 		goto dstmap_fail;
2380 
2381 	sg_param.nents = reqctx->dst_nents;
2382 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
2383 	sg_param.qid = qid;
2384 	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
2385 				 reqctx->dst, &sg_param);
2386 	if (error)
2387 		goto dstmap_fail;
2388 
2389 	skb_set_transport_header(skb, transhdr_len);
2390 	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
2391 	atomic_inc(&adap->chcr_stats.aead_rqst);
2392 	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1,
2393 		    sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
2394 	reqctx->skb = skb;
2395 	skb_get(skb);
2396 	return skb;
2397 dstmap_fail:
2398 	kfree_skb(skb);
2399 err:
2400 	free_new_sg(reqctx->newdstsg);
2401 	reqctx->newdstsg = NULL;
2402 	return ERR_PTR(error);
2403 }
2404 
2405 static struct sk_buff *create_gcm_wr(struct aead_request *req,
2406 				     unsigned short qid,
2407 				     int size,
2408 				     unsigned short op_type)
2409 {
2410 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2411 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2412 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2413 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2414 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2415 	struct sk_buff *skb = NULL;
2416 	struct chcr_wr *chcr_req;
2417 	struct cpl_rx_phys_dsgl *phys_cpl;
2418 	struct phys_sge_parm sg_param;
2419 	struct scatterlist *src;
2420 	unsigned int frags = 0, transhdr_len;
2421 	unsigned int ivsize = AES_BLOCK_SIZE;
2422 	unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen;
2423 	unsigned char tag_offset = 0;
2424 	unsigned int authsize = crypto_aead_authsize(tfm);
2425 	int error = -EINVAL, src_nent;
2426 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2427 		GFP_ATOMIC;
2428 	struct adapter *adap = padap(ctx->dev);
2429 
2430 	reqctx->newdstsg = NULL;
2431 	dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
2432 						    authsize);
2433 	/* validate key size */
2434 	if (aeadctx->enckey_len == 0)
2435 		goto err;
2436 
2437 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
2438 		goto err;
2439 	src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen);
2440 	if (src_nent < 0)
2441 		goto err;
2442 
2443 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen);
2444 	if (req->src != req->dst) {
2445 		error = chcr_copy_assoc(req, aeadctx);
2446 		if (error)
2447 			return	ERR_PTR(error);
2448 	}
2449 
2450 	if (dst_size && is_newsg(req->dst, &nents)) {
2451 		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
2452 		if (IS_ERR(reqctx->newdstsg))
2453 			return ERR_CAST(reqctx->newdstsg);
2454 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2455 					       reqctx->newdstsg, assoclen);
2456 	} else {
2457 		if (req->src == req->dst)
2458 			reqctx->dst = src;
2459 		else
2460 			reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2461 						       req->dst, assoclen);
2462 	}
2463 
2464 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
2465 					     (op_type ? -authsize : authsize));
2466 	if (reqctx->dst_nents < 0) {
2467 		pr_err("GCM:Invalid Destination sg entries\n");
2468 		error = -EINVAL;
2469 		goto err;
2470 	}
2471 
2472 
2473 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
2474 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
2475 		AEAD_H_SIZE;
2476 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2477 	if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
2478 			    T6_MAX_AAD_SIZE,
2479 			    transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
2480 			    op_type)) {
2481 		atomic_inc(&adap->chcr_stats.fallback);
2482 		free_new_sg(reqctx->newdstsg);
2483 		reqctx->newdstsg = NULL;
2484 		return ERR_PTR(chcr_aead_fallback(req, op_type));
2485 	}
2486 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
2487 	if (!skb) {
2488 		error = -ENOMEM;
2489 		goto err;
2490 	}
2491 
2492 	/* NIC driver is going to write the sge hdr. */
2493 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
2494 
2495 	chcr_req = __skb_put_zero(skb, transhdr_len);
2496 
2497 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2498 		assoclen = req->assoclen - 8;
2499 
2500 	tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2501 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
2502 					ctx->dev->rx_channel_id, 2, (ivsize ?
2503 					(assoclen + 1) : 0));
2504 	chcr_req->sec_cpl.pldlen =
2505 		htonl(assoclen + ivsize + req->cryptlen);
2506 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2507 					assoclen ? 1 : 0, assoclen,
2508 					assoclen + ivsize + 1, 0);
2509 		chcr_req->sec_cpl.cipherstop_lo_authinsert =
2510 			FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1,
2511 						tag_offset, tag_offset);
2512 		chcr_req->sec_cpl.seqno_numivs =
2513 			FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
2514 					CHCR_ENCRYPT_OP) ? 1 : 0,
2515 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
2516 					CHCR_SCMD_AUTH_MODE_GHASH,
2517 					aeadctx->hmac_ctrl, ivsize >> 1);
2518 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2519 					0, 1, dst_size);
2520 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2521 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2522 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2523 				16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
2524 
2525 	/* prepare a 16 byte iv */
2526 	/* S   A   L  T |  IV | 0x00000001 */
2527 	if (get_aead_subtype(tfm) ==
2528 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
2529 		memcpy(reqctx->iv, aeadctx->salt, 4);
2530 		memcpy(reqctx->iv + 4, req->iv, 8);
2531 	} else {
2532 		memcpy(reqctx->iv, req->iv, 12);
2533 	}
2534 	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
2535 
2536 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2537 	sg_param.nents = reqctx->dst_nents;
2538 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
2539 	sg_param.qid = qid;
2540 	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
2541 					  reqctx->dst, &sg_param);
2542 	if (error)
2543 		goto dstmap_fail;
2544 
2545 	skb_set_transport_header(skb, transhdr_len);
2546 	write_sg_to_skb(skb, &frags, req->src, assoclen);
2547 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
2548 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
2549 	atomic_inc(&adap->chcr_stats.aead_rqst);
2550 	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
2551 			sizeof(struct cpl_rx_phys_dsgl) + dst_size,
2552 			reqctx->verify);
2553 	reqctx->skb = skb;
2554 	skb_get(skb);
2555 	return skb;
2556 
2557 dstmap_fail:
2558 	/* ivmap_fail: */
2559 	kfree_skb(skb);
2560 err:
2561 	free_new_sg(reqctx->newdstsg);
2562 	reqctx->newdstsg = NULL;
2563 	return ERR_PTR(error);
2564 }
2565 
2566 
2567 
2568 static int chcr_aead_cra_init(struct crypto_aead *tfm)
2569 {
2570 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2571 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2572 	struct aead_alg *alg = crypto_aead_alg(tfm);
2573 
2574 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
2575 					       CRYPTO_ALG_NEED_FALLBACK |
2576 					       CRYPTO_ALG_ASYNC);
2577 	if  (IS_ERR(aeadctx->sw_cipher))
2578 		return PTR_ERR(aeadctx->sw_cipher);
2579 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
2580 				 sizeof(struct aead_request) +
2581 				 crypto_aead_reqsize(aeadctx->sw_cipher)));
2582 	aeadctx->null = crypto_get_default_null_skcipher();
2583 	if (IS_ERR(aeadctx->null))
2584 		return PTR_ERR(aeadctx->null);
2585 	return chcr_device_init(ctx);
2586 }
2587 
2588 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
2589 {
2590 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2591 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2592 
2593 	crypto_put_default_null_skcipher();
2594 	crypto_free_aead(aeadctx->sw_cipher);
2595 }
2596 
2597 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
2598 					unsigned int authsize)
2599 {
2600 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2601 
2602 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
2603 	aeadctx->mayverify = VERIFY_HW;
2604 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2605 }
2606 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
2607 				    unsigned int authsize)
2608 {
2609 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2610 	u32 maxauth = crypto_aead_maxauthsize(tfm);
2611 
2612 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
2613 	 * true for sha1. authsize == 12 condition should be before
2614 	 * authsize == (maxauth >> 1)
2615 	 */
2616 	if (authsize == ICV_4) {
2617 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2618 		aeadctx->mayverify = VERIFY_HW;
2619 	} else if (authsize == ICV_6) {
2620 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2621 		aeadctx->mayverify = VERIFY_HW;
2622 	} else if (authsize == ICV_10) {
2623 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2624 		aeadctx->mayverify = VERIFY_HW;
2625 	} else if (authsize == ICV_12) {
2626 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2627 		aeadctx->mayverify = VERIFY_HW;
2628 	} else if (authsize == ICV_14) {
2629 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2630 		aeadctx->mayverify = VERIFY_HW;
2631 	} else if (authsize == (maxauth >> 1)) {
2632 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2633 		aeadctx->mayverify = VERIFY_HW;
2634 	} else if (authsize == maxauth) {
2635 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2636 		aeadctx->mayverify = VERIFY_HW;
2637 	} else {
2638 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2639 		aeadctx->mayverify = VERIFY_SW;
2640 	}
2641 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2642 }
2643 
2644 
2645 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2646 {
2647 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2648 
2649 	switch (authsize) {
2650 	case ICV_4:
2651 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2652 		aeadctx->mayverify = VERIFY_HW;
2653 		break;
2654 	case ICV_8:
2655 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2656 		aeadctx->mayverify = VERIFY_HW;
2657 		break;
2658 	case ICV_12:
2659 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2660 		 aeadctx->mayverify = VERIFY_HW;
2661 		break;
2662 	case ICV_14:
2663 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2664 		 aeadctx->mayverify = VERIFY_HW;
2665 		break;
2666 	case ICV_16:
2667 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2668 		aeadctx->mayverify = VERIFY_HW;
2669 		break;
2670 	case ICV_13:
2671 	case ICV_15:
2672 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2673 		aeadctx->mayverify = VERIFY_SW;
2674 		break;
2675 	default:
2676 
2677 		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
2678 			CRYPTO_TFM_RES_BAD_KEY_LEN);
2679 		return -EINVAL;
2680 	}
2681 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2682 }
2683 
2684 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2685 					  unsigned int authsize)
2686 {
2687 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2688 
2689 	switch (authsize) {
2690 	case ICV_8:
2691 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2692 		aeadctx->mayverify = VERIFY_HW;
2693 		break;
2694 	case ICV_12:
2695 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2696 		aeadctx->mayverify = VERIFY_HW;
2697 		break;
2698 	case ICV_16:
2699 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2700 		aeadctx->mayverify = VERIFY_HW;
2701 		break;
2702 	default:
2703 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2704 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2705 		return -EINVAL;
2706 	}
2707 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2708 }
2709 
2710 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
2711 				unsigned int authsize)
2712 {
2713 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2714 
2715 	switch (authsize) {
2716 	case ICV_4:
2717 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2718 		aeadctx->mayverify = VERIFY_HW;
2719 		break;
2720 	case ICV_6:
2721 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2722 		aeadctx->mayverify = VERIFY_HW;
2723 		break;
2724 	case ICV_8:
2725 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2726 		aeadctx->mayverify = VERIFY_HW;
2727 		break;
2728 	case ICV_10:
2729 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2730 		aeadctx->mayverify = VERIFY_HW;
2731 		break;
2732 	case ICV_12:
2733 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2734 		aeadctx->mayverify = VERIFY_HW;
2735 		break;
2736 	case ICV_14:
2737 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2738 		aeadctx->mayverify = VERIFY_HW;
2739 		break;
2740 	case ICV_16:
2741 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2742 		aeadctx->mayverify = VERIFY_HW;
2743 		break;
2744 	default:
2745 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2746 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2747 		return -EINVAL;
2748 	}
2749 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2750 }
2751 
2752 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
2753 				const u8 *key,
2754 				unsigned int keylen)
2755 {
2756 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2757 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2758 	unsigned char ck_size, mk_size;
2759 	int key_ctx_size = 0;
2760 
2761 	key_ctx_size = sizeof(struct _key_ctx) +
2762 		((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
2763 	if (keylen == AES_KEYSIZE_128) {
2764 		mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2765 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2766 	} else if (keylen == AES_KEYSIZE_192) {
2767 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2768 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2769 	} else if (keylen == AES_KEYSIZE_256) {
2770 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2771 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2772 	} else {
2773 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2774 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2775 		aeadctx->enckey_len = 0;
2776 		return	-EINVAL;
2777 	}
2778 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
2779 						key_ctx_size >> 4);
2780 	memcpy(aeadctx->key, key, keylen);
2781 	aeadctx->enckey_len = keylen;
2782 
2783 	return 0;
2784 }
2785 
2786 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
2787 				const u8 *key,
2788 				unsigned int keylen)
2789 {
2790 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2791 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2792 	int error;
2793 
2794 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2795 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
2796 			      CRYPTO_TFM_REQ_MASK);
2797 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2798 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2799 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2800 			      CRYPTO_TFM_RES_MASK);
2801 	if (error)
2802 		return error;
2803 	return chcr_ccm_common_setkey(aead, key, keylen);
2804 }
2805 
2806 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
2807 				    unsigned int keylen)
2808 {
2809 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2810 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2811 	int error;
2812 
2813 	if (keylen < 3) {
2814 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2815 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2816 		aeadctx->enckey_len = 0;
2817 		return	-EINVAL;
2818 	}
2819 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2820 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
2821 			      CRYPTO_TFM_REQ_MASK);
2822 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2823 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2824 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2825 			      CRYPTO_TFM_RES_MASK);
2826 	if (error)
2827 		return error;
2828 	keylen -= 3;
2829 	memcpy(aeadctx->salt, key + keylen, 3);
2830 	return chcr_ccm_common_setkey(aead, key, keylen);
2831 }
2832 
2833 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2834 			   unsigned int keylen)
2835 {
2836 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2837 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2838 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
2839 	struct crypto_cipher *cipher;
2840 	unsigned int ck_size;
2841 	int ret = 0, key_ctx_size = 0;
2842 
2843 	aeadctx->enckey_len = 0;
2844 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2845 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
2846 			      & CRYPTO_TFM_REQ_MASK);
2847 	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2848 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2849 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2850 			      CRYPTO_TFM_RES_MASK);
2851 	if (ret)
2852 		goto out;
2853 
2854 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
2855 	    keylen > 3) {
2856 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
2857 		memcpy(aeadctx->salt, key + keylen, 4);
2858 	}
2859 	if (keylen == AES_KEYSIZE_128) {
2860 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2861 	} else if (keylen == AES_KEYSIZE_192) {
2862 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2863 	} else if (keylen == AES_KEYSIZE_256) {
2864 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2865 	} else {
2866 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2867 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2868 		pr_err("GCM: Invalid key length %d\n", keylen);
2869 		ret = -EINVAL;
2870 		goto out;
2871 	}
2872 
2873 	memcpy(aeadctx->key, key, keylen);
2874 	aeadctx->enckey_len = keylen;
2875 	key_ctx_size = sizeof(struct _key_ctx) +
2876 		((DIV_ROUND_UP(keylen, 16)) << 4) +
2877 		AEAD_H_SIZE;
2878 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
2879 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
2880 						0, 0,
2881 						key_ctx_size >> 4);
2882 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
2883 	 * It will go in key context
2884 	 */
2885 	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
2886 	if (IS_ERR(cipher)) {
2887 		aeadctx->enckey_len = 0;
2888 		ret = -ENOMEM;
2889 		goto out;
2890 	}
2891 
2892 	ret = crypto_cipher_setkey(cipher, key, keylen);
2893 	if (ret) {
2894 		aeadctx->enckey_len = 0;
2895 		goto out1;
2896 	}
2897 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
2898 	crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
2899 
2900 out1:
2901 	crypto_free_cipher(cipher);
2902 out:
2903 	return ret;
2904 }
2905 
2906 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
2907 				   unsigned int keylen)
2908 {
2909 	struct chcr_context *ctx = crypto_aead_ctx(authenc);
2910 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2911 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2912 	/* it contains auth and cipher key both*/
2913 	struct crypto_authenc_keys keys;
2914 	unsigned int bs;
2915 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
2916 	int err = 0, i, key_ctx_len = 0;
2917 	unsigned char ck_size = 0;
2918 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
2919 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
2920 	struct algo_param param;
2921 	int align;
2922 	u8 *o_ptr = NULL;
2923 
2924 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2925 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
2926 			      & CRYPTO_TFM_REQ_MASK);
2927 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2928 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
2929 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
2930 			      & CRYPTO_TFM_RES_MASK);
2931 	if (err)
2932 		goto out;
2933 
2934 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2935 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2936 		goto out;
2937 	}
2938 
2939 	if (get_alg_config(&param, max_authsize)) {
2940 		pr_err("chcr : Unsupported digest size\n");
2941 		goto out;
2942 	}
2943 	if (keys.enckeylen == AES_KEYSIZE_128) {
2944 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2945 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
2946 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2947 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
2948 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2949 	} else {
2950 		pr_err("chcr : Unsupported cipher key\n");
2951 		goto out;
2952 	}
2953 
2954 	/* Copy only encryption key. We use authkey to generate h(ipad) and
2955 	 * h(opad) so authkey is not needed again. authkeylen size have the
2956 	 * size of the hash digest size.
2957 	 */
2958 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2959 	aeadctx->enckey_len = keys.enckeylen;
2960 	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2961 			    aeadctx->enckey_len << 3);
2962 
2963 	base_hash  = chcr_alloc_shash(max_authsize);
2964 	if (IS_ERR(base_hash)) {
2965 		pr_err("chcr : Base driver cannot be loaded\n");
2966 		aeadctx->enckey_len = 0;
2967 		return -EINVAL;
2968 	}
2969 	{
2970 		SHASH_DESC_ON_STACK(shash, base_hash);
2971 		shash->tfm = base_hash;
2972 		shash->flags = crypto_shash_get_flags(base_hash);
2973 		bs = crypto_shash_blocksize(base_hash);
2974 		align = KEYCTX_ALIGN_PAD(max_authsize);
2975 		o_ptr =  actx->h_iopad + param.result_size + align;
2976 
2977 		if (keys.authkeylen > bs) {
2978 			err = crypto_shash_digest(shash, keys.authkey,
2979 						  keys.authkeylen,
2980 						  o_ptr);
2981 			if (err) {
2982 				pr_err("chcr : Base driver cannot be loaded\n");
2983 				goto out;
2984 			}
2985 			keys.authkeylen = max_authsize;
2986 		} else
2987 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
2988 
2989 		/* Compute the ipad-digest*/
2990 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2991 		memcpy(pad, o_ptr, keys.authkeylen);
2992 		for (i = 0; i < bs >> 2; i++)
2993 			*((unsigned int *)pad + i) ^= IPAD_DATA;
2994 
2995 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
2996 					      max_authsize))
2997 			goto out;
2998 		/* Compute the opad-digest */
2999 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3000 		memcpy(pad, o_ptr, keys.authkeylen);
3001 		for (i = 0; i < bs >> 2; i++)
3002 			*((unsigned int *)pad + i) ^= OPAD_DATA;
3003 
3004 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3005 			goto out;
3006 
3007 		/* convert the ipad and opad digest to network order */
3008 		chcr_change_order(actx->h_iopad, param.result_size);
3009 		chcr_change_order(o_ptr, param.result_size);
3010 		key_ctx_len = sizeof(struct _key_ctx) +
3011 			((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
3012 			(param.result_size + align) * 2;
3013 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3014 						0, 1, key_ctx_len >> 4);
3015 		actx->auth_mode = param.auth_mode;
3016 		chcr_free_shash(base_hash);
3017 
3018 		return 0;
3019 	}
3020 out:
3021 	aeadctx->enckey_len = 0;
3022 	if (!IS_ERR(base_hash))
3023 		chcr_free_shash(base_hash);
3024 	return -EINVAL;
3025 }
3026 
3027 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3028 					const u8 *key, unsigned int keylen)
3029 {
3030 	struct chcr_context *ctx = crypto_aead_ctx(authenc);
3031 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3032 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3033 	struct crypto_authenc_keys keys;
3034 	int err;
3035 	/* it contains auth and cipher key both*/
3036 	int key_ctx_len = 0;
3037 	unsigned char ck_size = 0;
3038 
3039 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3040 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3041 			      & CRYPTO_TFM_REQ_MASK);
3042 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3043 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3044 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3045 			      & CRYPTO_TFM_RES_MASK);
3046 	if (err)
3047 		goto out;
3048 
3049 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3050 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3051 		goto out;
3052 	}
3053 	if (keys.enckeylen == AES_KEYSIZE_128) {
3054 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3055 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3056 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3057 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3058 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3059 	} else {
3060 		pr_err("chcr : Unsupported cipher key\n");
3061 		goto out;
3062 	}
3063 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3064 	aeadctx->enckey_len = keys.enckeylen;
3065 	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3066 				    aeadctx->enckey_len << 3);
3067 	key_ctx_len =  sizeof(struct _key_ctx)
3068 		+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
3069 
3070 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3071 						0, key_ctx_len >> 4);
3072 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3073 	return 0;
3074 out:
3075 	aeadctx->enckey_len = 0;
3076 	return -EINVAL;
3077 }
3078 static int chcr_aead_encrypt(struct aead_request *req)
3079 {
3080 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3081 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3082 
3083 	reqctx->verify = VERIFY_HW;
3084 
3085 	switch (get_aead_subtype(tfm)) {
3086 	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
3087 	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
3088 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3089 				    create_authenc_wr);
3090 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3091 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3092 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3093 				    create_aead_ccm_wr);
3094 	default:
3095 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3096 				    create_gcm_wr);
3097 	}
3098 }
3099 
3100 static int chcr_aead_decrypt(struct aead_request *req)
3101 {
3102 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3103 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
3104 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3105 	int size;
3106 
3107 	if (aeadctx->mayverify == VERIFY_SW) {
3108 		size = crypto_aead_maxauthsize(tfm);
3109 		reqctx->verify = VERIFY_SW;
3110 	} else {
3111 		size = 0;
3112 		reqctx->verify = VERIFY_HW;
3113 	}
3114 
3115 	switch (get_aead_subtype(tfm)) {
3116 	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
3117 	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
3118 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3119 				    create_authenc_wr);
3120 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3121 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3122 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3123 				    create_aead_ccm_wr);
3124 	default:
3125 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3126 				    create_gcm_wr);
3127 	}
3128 }
3129 
3130 static int chcr_aead_op(struct aead_request *req,
3131 			  unsigned short op_type,
3132 			  int size,
3133 			  create_wr_t create_wr_fn)
3134 {
3135 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3136 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
3137 	struct uld_ctx *u_ctx;
3138 	struct sk_buff *skb;
3139 
3140 	if (!ctx->dev) {
3141 		pr_err("chcr : %s : No crypto device.\n", __func__);
3142 		return -ENXIO;
3143 	}
3144 	u_ctx = ULD_CTX(ctx);
3145 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3146 				   ctx->tx_qidx)) {
3147 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3148 			return -EBUSY;
3149 	}
3150 
3151 	/* Form a WR from req */
3152 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
3153 			   op_type);
3154 
3155 	if (IS_ERR(skb) || !skb)
3156 		return PTR_ERR(skb);
3157 
3158 	skb->dev = u_ctx->lldi.ports[0];
3159 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
3160 	chcr_send_wr(skb);
3161 	return -EINPROGRESS;
3162 }
3163 static struct chcr_alg_template driver_algs[] = {
3164 	/* AES-CBC */
3165 	{
3166 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3167 		.is_registered = 0,
3168 		.alg.crypto = {
3169 			.cra_name		= "cbc(aes)",
3170 			.cra_driver_name	= "cbc-aes-chcr",
3171 			.cra_blocksize		= AES_BLOCK_SIZE,
3172 			.cra_init		= chcr_cra_init,
3173 			.cra_exit		= chcr_cra_exit,
3174 			.cra_u.ablkcipher	= {
3175 				.min_keysize	= AES_MIN_KEY_SIZE,
3176 				.max_keysize	= AES_MAX_KEY_SIZE,
3177 				.ivsize		= AES_BLOCK_SIZE,
3178 				.setkey			= chcr_aes_cbc_setkey,
3179 				.encrypt		= chcr_aes_encrypt,
3180 				.decrypt		= chcr_aes_decrypt,
3181 			}
3182 		}
3183 	},
3184 	{
3185 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3186 		.is_registered = 0,
3187 		.alg.crypto =   {
3188 			.cra_name		= "xts(aes)",
3189 			.cra_driver_name	= "xts-aes-chcr",
3190 			.cra_blocksize		= AES_BLOCK_SIZE,
3191 			.cra_init		= chcr_cra_init,
3192 			.cra_exit		= NULL,
3193 			.cra_u .ablkcipher = {
3194 					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
3195 					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
3196 					.ivsize		= AES_BLOCK_SIZE,
3197 					.setkey		= chcr_aes_xts_setkey,
3198 					.encrypt	= chcr_aes_encrypt,
3199 					.decrypt	= chcr_aes_decrypt,
3200 				}
3201 			}
3202 	},
3203 	{
3204 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3205 		.is_registered = 0,
3206 		.alg.crypto = {
3207 			.cra_name		= "ctr(aes)",
3208 			.cra_driver_name	= "ctr-aes-chcr",
3209 			.cra_blocksize		= 1,
3210 			.cra_init		= chcr_cra_init,
3211 			.cra_exit		= chcr_cra_exit,
3212 			.cra_u.ablkcipher	= {
3213 				.min_keysize	= AES_MIN_KEY_SIZE,
3214 				.max_keysize	= AES_MAX_KEY_SIZE,
3215 				.ivsize		= AES_BLOCK_SIZE,
3216 				.setkey		= chcr_aes_ctr_setkey,
3217 				.encrypt	= chcr_aes_encrypt,
3218 				.decrypt	= chcr_aes_decrypt,
3219 			}
3220 		}
3221 	},
3222 	{
3223 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3224 			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3225 		.is_registered = 0,
3226 		.alg.crypto = {
3227 			.cra_name		= "rfc3686(ctr(aes))",
3228 			.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3229 			.cra_blocksize		= 1,
3230 			.cra_init		= chcr_rfc3686_init,
3231 			.cra_exit		= chcr_cra_exit,
3232 			.cra_u.ablkcipher	= {
3233 				.min_keysize	= AES_MIN_KEY_SIZE +
3234 					CTR_RFC3686_NONCE_SIZE,
3235 				.max_keysize	= AES_MAX_KEY_SIZE +
3236 					CTR_RFC3686_NONCE_SIZE,
3237 				.ivsize		= CTR_RFC3686_IV_SIZE,
3238 				.setkey		= chcr_aes_rfc3686_setkey,
3239 				.encrypt	= chcr_aes_encrypt,
3240 				.decrypt	= chcr_aes_decrypt,
3241 				.geniv          = "seqiv",
3242 			}
3243 		}
3244 	},
3245 	/* SHA */
3246 	{
3247 		.type = CRYPTO_ALG_TYPE_AHASH,
3248 		.is_registered = 0,
3249 		.alg.hash = {
3250 			.halg.digestsize = SHA1_DIGEST_SIZE,
3251 			.halg.base = {
3252 				.cra_name = "sha1",
3253 				.cra_driver_name = "sha1-chcr",
3254 				.cra_blocksize = SHA1_BLOCK_SIZE,
3255 			}
3256 		}
3257 	},
3258 	{
3259 		.type = CRYPTO_ALG_TYPE_AHASH,
3260 		.is_registered = 0,
3261 		.alg.hash = {
3262 			.halg.digestsize = SHA256_DIGEST_SIZE,
3263 			.halg.base = {
3264 				.cra_name = "sha256",
3265 				.cra_driver_name = "sha256-chcr",
3266 				.cra_blocksize = SHA256_BLOCK_SIZE,
3267 			}
3268 		}
3269 	},
3270 	{
3271 		.type = CRYPTO_ALG_TYPE_AHASH,
3272 		.is_registered = 0,
3273 		.alg.hash = {
3274 			.halg.digestsize = SHA224_DIGEST_SIZE,
3275 			.halg.base = {
3276 				.cra_name = "sha224",
3277 				.cra_driver_name = "sha224-chcr",
3278 				.cra_blocksize = SHA224_BLOCK_SIZE,
3279 			}
3280 		}
3281 	},
3282 	{
3283 		.type = CRYPTO_ALG_TYPE_AHASH,
3284 		.is_registered = 0,
3285 		.alg.hash = {
3286 			.halg.digestsize = SHA384_DIGEST_SIZE,
3287 			.halg.base = {
3288 				.cra_name = "sha384",
3289 				.cra_driver_name = "sha384-chcr",
3290 				.cra_blocksize = SHA384_BLOCK_SIZE,
3291 			}
3292 		}
3293 	},
3294 	{
3295 		.type = CRYPTO_ALG_TYPE_AHASH,
3296 		.is_registered = 0,
3297 		.alg.hash = {
3298 			.halg.digestsize = SHA512_DIGEST_SIZE,
3299 			.halg.base = {
3300 				.cra_name = "sha512",
3301 				.cra_driver_name = "sha512-chcr",
3302 				.cra_blocksize = SHA512_BLOCK_SIZE,
3303 			}
3304 		}
3305 	},
3306 	/* HMAC */
3307 	{
3308 		.type = CRYPTO_ALG_TYPE_HMAC,
3309 		.is_registered = 0,
3310 		.alg.hash = {
3311 			.halg.digestsize = SHA1_DIGEST_SIZE,
3312 			.halg.base = {
3313 				.cra_name = "hmac(sha1)",
3314 				.cra_driver_name = "hmac-sha1-chcr",
3315 				.cra_blocksize = SHA1_BLOCK_SIZE,
3316 			}
3317 		}
3318 	},
3319 	{
3320 		.type = CRYPTO_ALG_TYPE_HMAC,
3321 		.is_registered = 0,
3322 		.alg.hash = {
3323 			.halg.digestsize = SHA224_DIGEST_SIZE,
3324 			.halg.base = {
3325 				.cra_name = "hmac(sha224)",
3326 				.cra_driver_name = "hmac-sha224-chcr",
3327 				.cra_blocksize = SHA224_BLOCK_SIZE,
3328 			}
3329 		}
3330 	},
3331 	{
3332 		.type = CRYPTO_ALG_TYPE_HMAC,
3333 		.is_registered = 0,
3334 		.alg.hash = {
3335 			.halg.digestsize = SHA256_DIGEST_SIZE,
3336 			.halg.base = {
3337 				.cra_name = "hmac(sha256)",
3338 				.cra_driver_name = "hmac-sha256-chcr",
3339 				.cra_blocksize = SHA256_BLOCK_SIZE,
3340 			}
3341 		}
3342 	},
3343 	{
3344 		.type = CRYPTO_ALG_TYPE_HMAC,
3345 		.is_registered = 0,
3346 		.alg.hash = {
3347 			.halg.digestsize = SHA384_DIGEST_SIZE,
3348 			.halg.base = {
3349 				.cra_name = "hmac(sha384)",
3350 				.cra_driver_name = "hmac-sha384-chcr",
3351 				.cra_blocksize = SHA384_BLOCK_SIZE,
3352 			}
3353 		}
3354 	},
3355 	{
3356 		.type = CRYPTO_ALG_TYPE_HMAC,
3357 		.is_registered = 0,
3358 		.alg.hash = {
3359 			.halg.digestsize = SHA512_DIGEST_SIZE,
3360 			.halg.base = {
3361 				.cra_name = "hmac(sha512)",
3362 				.cra_driver_name = "hmac-sha512-chcr",
3363 				.cra_blocksize = SHA512_BLOCK_SIZE,
3364 			}
3365 		}
3366 	},
3367 	/* Add AEAD Algorithms */
3368 	{
3369 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3370 		.is_registered = 0,
3371 		.alg.aead = {
3372 			.base = {
3373 				.cra_name = "gcm(aes)",
3374 				.cra_driver_name = "gcm-aes-chcr",
3375 				.cra_blocksize	= 1,
3376 				.cra_priority = CHCR_AEAD_PRIORITY,
3377 				.cra_ctxsize =	sizeof(struct chcr_context) +
3378 						sizeof(struct chcr_aead_ctx) +
3379 						sizeof(struct chcr_gcm_ctx),
3380 			},
3381 			.ivsize = 12,
3382 			.maxauthsize = GHASH_DIGEST_SIZE,
3383 			.setkey = chcr_gcm_setkey,
3384 			.setauthsize = chcr_gcm_setauthsize,
3385 		}
3386 	},
3387 	{
3388 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3389 		.is_registered = 0,
3390 		.alg.aead = {
3391 			.base = {
3392 				.cra_name = "rfc4106(gcm(aes))",
3393 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
3394 				.cra_blocksize	 = 1,
3395 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3396 				.cra_ctxsize =	sizeof(struct chcr_context) +
3397 						sizeof(struct chcr_aead_ctx) +
3398 						sizeof(struct chcr_gcm_ctx),
3399 
3400 			},
3401 			.ivsize = 8,
3402 			.maxauthsize	= GHASH_DIGEST_SIZE,
3403 			.setkey = chcr_gcm_setkey,
3404 			.setauthsize	= chcr_4106_4309_setauthsize,
3405 		}
3406 	},
3407 	{
3408 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3409 		.is_registered = 0,
3410 		.alg.aead = {
3411 			.base = {
3412 				.cra_name = "ccm(aes)",
3413 				.cra_driver_name = "ccm-aes-chcr",
3414 				.cra_blocksize	 = 1,
3415 				.cra_priority = CHCR_AEAD_PRIORITY,
3416 				.cra_ctxsize =	sizeof(struct chcr_context) +
3417 						sizeof(struct chcr_aead_ctx),
3418 
3419 			},
3420 			.ivsize = AES_BLOCK_SIZE,
3421 			.maxauthsize	= GHASH_DIGEST_SIZE,
3422 			.setkey = chcr_aead_ccm_setkey,
3423 			.setauthsize	= chcr_ccm_setauthsize,
3424 		}
3425 	},
3426 	{
3427 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3428 		.is_registered = 0,
3429 		.alg.aead = {
3430 			.base = {
3431 				.cra_name = "rfc4309(ccm(aes))",
3432 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
3433 				.cra_blocksize	 = 1,
3434 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3435 				.cra_ctxsize =	sizeof(struct chcr_context) +
3436 						sizeof(struct chcr_aead_ctx),
3437 
3438 			},
3439 			.ivsize = 8,
3440 			.maxauthsize	= GHASH_DIGEST_SIZE,
3441 			.setkey = chcr_aead_rfc4309_setkey,
3442 			.setauthsize = chcr_4106_4309_setauthsize,
3443 		}
3444 	},
3445 	{
3446 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3447 		.is_registered = 0,
3448 		.alg.aead = {
3449 			.base = {
3450 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
3451 				.cra_driver_name =
3452 					"authenc-hmac-sha1-cbc-aes-chcr",
3453 				.cra_blocksize	 = AES_BLOCK_SIZE,
3454 				.cra_priority = CHCR_AEAD_PRIORITY,
3455 				.cra_ctxsize =	sizeof(struct chcr_context) +
3456 						sizeof(struct chcr_aead_ctx) +
3457 						sizeof(struct chcr_authenc_ctx),
3458 
3459 			},
3460 			.ivsize = AES_BLOCK_SIZE,
3461 			.maxauthsize = SHA1_DIGEST_SIZE,
3462 			.setkey = chcr_authenc_setkey,
3463 			.setauthsize = chcr_authenc_setauthsize,
3464 		}
3465 	},
3466 	{
3467 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3468 		.is_registered = 0,
3469 		.alg.aead = {
3470 			.base = {
3471 
3472 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
3473 				.cra_driver_name =
3474 					"authenc-hmac-sha256-cbc-aes-chcr",
3475 				.cra_blocksize	 = AES_BLOCK_SIZE,
3476 				.cra_priority = CHCR_AEAD_PRIORITY,
3477 				.cra_ctxsize =	sizeof(struct chcr_context) +
3478 						sizeof(struct chcr_aead_ctx) +
3479 						sizeof(struct chcr_authenc_ctx),
3480 
3481 			},
3482 			.ivsize = AES_BLOCK_SIZE,
3483 			.maxauthsize	= SHA256_DIGEST_SIZE,
3484 			.setkey = chcr_authenc_setkey,
3485 			.setauthsize = chcr_authenc_setauthsize,
3486 		}
3487 	},
3488 	{
3489 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3490 		.is_registered = 0,
3491 		.alg.aead = {
3492 			.base = {
3493 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
3494 				.cra_driver_name =
3495 					"authenc-hmac-sha224-cbc-aes-chcr",
3496 				.cra_blocksize	 = AES_BLOCK_SIZE,
3497 				.cra_priority = CHCR_AEAD_PRIORITY,
3498 				.cra_ctxsize =	sizeof(struct chcr_context) +
3499 						sizeof(struct chcr_aead_ctx) +
3500 						sizeof(struct chcr_authenc_ctx),
3501 			},
3502 			.ivsize = AES_BLOCK_SIZE,
3503 			.maxauthsize = SHA224_DIGEST_SIZE,
3504 			.setkey = chcr_authenc_setkey,
3505 			.setauthsize = chcr_authenc_setauthsize,
3506 		}
3507 	},
3508 	{
3509 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3510 		.is_registered = 0,
3511 		.alg.aead = {
3512 			.base = {
3513 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
3514 				.cra_driver_name =
3515 					"authenc-hmac-sha384-cbc-aes-chcr",
3516 				.cra_blocksize	 = AES_BLOCK_SIZE,
3517 				.cra_priority = CHCR_AEAD_PRIORITY,
3518 				.cra_ctxsize =	sizeof(struct chcr_context) +
3519 						sizeof(struct chcr_aead_ctx) +
3520 						sizeof(struct chcr_authenc_ctx),
3521 
3522 			},
3523 			.ivsize = AES_BLOCK_SIZE,
3524 			.maxauthsize = SHA384_DIGEST_SIZE,
3525 			.setkey = chcr_authenc_setkey,
3526 			.setauthsize = chcr_authenc_setauthsize,
3527 		}
3528 	},
3529 	{
3530 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3531 		.is_registered = 0,
3532 		.alg.aead = {
3533 			.base = {
3534 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
3535 				.cra_driver_name =
3536 					"authenc-hmac-sha512-cbc-aes-chcr",
3537 				.cra_blocksize	 = AES_BLOCK_SIZE,
3538 				.cra_priority = CHCR_AEAD_PRIORITY,
3539 				.cra_ctxsize =	sizeof(struct chcr_context) +
3540 						sizeof(struct chcr_aead_ctx) +
3541 						sizeof(struct chcr_authenc_ctx),
3542 
3543 			},
3544 			.ivsize = AES_BLOCK_SIZE,
3545 			.maxauthsize = SHA512_DIGEST_SIZE,
3546 			.setkey = chcr_authenc_setkey,
3547 			.setauthsize = chcr_authenc_setauthsize,
3548 		}
3549 	},
3550 	{
3551 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
3552 		.is_registered = 0,
3553 		.alg.aead = {
3554 			.base = {
3555 				.cra_name = "authenc(digest_null,cbc(aes))",
3556 				.cra_driver_name =
3557 					"authenc-digest_null-cbc-aes-chcr",
3558 				.cra_blocksize	 = AES_BLOCK_SIZE,
3559 				.cra_priority = CHCR_AEAD_PRIORITY,
3560 				.cra_ctxsize =	sizeof(struct chcr_context) +
3561 						sizeof(struct chcr_aead_ctx) +
3562 						sizeof(struct chcr_authenc_ctx),
3563 
3564 			},
3565 			.ivsize  = AES_BLOCK_SIZE,
3566 			.maxauthsize = 0,
3567 			.setkey  = chcr_aead_digest_null_setkey,
3568 			.setauthsize = chcr_authenc_null_setauthsize,
3569 		}
3570 	},
3571 };
3572 
3573 /*
3574  *	chcr_unregister_alg - Deregister crypto algorithms with
3575  *	kernel framework.
3576  */
3577 static int chcr_unregister_alg(void)
3578 {
3579 	int i;
3580 
3581 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3582 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
3583 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
3584 			if (driver_algs[i].is_registered)
3585 				crypto_unregister_alg(
3586 						&driver_algs[i].alg.crypto);
3587 			break;
3588 		case CRYPTO_ALG_TYPE_AEAD:
3589 			if (driver_algs[i].is_registered)
3590 				crypto_unregister_aead(
3591 						&driver_algs[i].alg.aead);
3592 			break;
3593 		case CRYPTO_ALG_TYPE_AHASH:
3594 			if (driver_algs[i].is_registered)
3595 				crypto_unregister_ahash(
3596 						&driver_algs[i].alg.hash);
3597 			break;
3598 		}
3599 		driver_algs[i].is_registered = 0;
3600 	}
3601 	return 0;
3602 }
3603 
3604 #define SZ_AHASH_CTX sizeof(struct chcr_context)
3605 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
3606 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
3607 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
3608 
3609 /*
3610  *	chcr_register_alg - Register crypto algorithms with kernel framework.
3611  */
3612 static int chcr_register_alg(void)
3613 {
3614 	struct crypto_alg ai;
3615 	struct ahash_alg *a_hash;
3616 	int err = 0, i;
3617 	char *name = NULL;
3618 
3619 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3620 		if (driver_algs[i].is_registered)
3621 			continue;
3622 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
3623 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
3624 			driver_algs[i].alg.crypto.cra_priority =
3625 				CHCR_CRA_PRIORITY;
3626 			driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
3627 			driver_algs[i].alg.crypto.cra_flags =
3628 				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
3629 				CRYPTO_ALG_NEED_FALLBACK;
3630 			driver_algs[i].alg.crypto.cra_ctxsize =
3631 				sizeof(struct chcr_context) +
3632 				sizeof(struct ablk_ctx);
3633 			driver_algs[i].alg.crypto.cra_alignmask = 0;
3634 			driver_algs[i].alg.crypto.cra_type =
3635 				&crypto_ablkcipher_type;
3636 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
3637 			name = driver_algs[i].alg.crypto.cra_driver_name;
3638 			break;
3639 		case CRYPTO_ALG_TYPE_AEAD:
3640 			driver_algs[i].alg.aead.base.cra_flags =
3641 				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
3642 				CRYPTO_ALG_NEED_FALLBACK;
3643 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
3644 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
3645 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
3646 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
3647 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
3648 			err = crypto_register_aead(&driver_algs[i].alg.aead);
3649 			name = driver_algs[i].alg.aead.base.cra_driver_name;
3650 			break;
3651 		case CRYPTO_ALG_TYPE_AHASH:
3652 			a_hash = &driver_algs[i].alg.hash;
3653 			a_hash->update = chcr_ahash_update;
3654 			a_hash->final = chcr_ahash_final;
3655 			a_hash->finup = chcr_ahash_finup;
3656 			a_hash->digest = chcr_ahash_digest;
3657 			a_hash->export = chcr_ahash_export;
3658 			a_hash->import = chcr_ahash_import;
3659 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
3660 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
3661 			a_hash->halg.base.cra_module = THIS_MODULE;
3662 			a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
3663 			a_hash->halg.base.cra_alignmask = 0;
3664 			a_hash->halg.base.cra_exit = NULL;
3665 			a_hash->halg.base.cra_type = &crypto_ahash_type;
3666 
3667 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
3668 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
3669 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
3670 				a_hash->init = chcr_hmac_init;
3671 				a_hash->setkey = chcr_ahash_setkey;
3672 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
3673 			} else {
3674 				a_hash->init = chcr_sha_init;
3675 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
3676 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
3677 			}
3678 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
3679 			ai = driver_algs[i].alg.hash.halg.base;
3680 			name = ai.cra_driver_name;
3681 			break;
3682 		}
3683 		if (err) {
3684 			pr_err("chcr : %s : Algorithm registration failed\n",
3685 			       name);
3686 			goto register_err;
3687 		} else {
3688 			driver_algs[i].is_registered = 1;
3689 		}
3690 	}
3691 	return 0;
3692 
3693 register_err:
3694 	chcr_unregister_alg();
3695 	return err;
3696 }
3697 
3698 /*
3699  *	start_crypto - Register the crypto algorithms.
3700  *	This should called once when the first device comesup. After this
3701  *	kernel will start calling driver APIs for crypto operations.
3702  */
3703 int start_crypto(void)
3704 {
3705 	return chcr_register_alg();
3706 }
3707 
3708 /*
3709  *	stop_crypto - Deregister all the crypto algorithms with kernel.
3710  *	This should be called once when the last device goes down. After this
3711  *	kernel will not call the driver API for crypto operations.
3712  */
3713 int stop_crypto(void)
3714 {
3715 	chcr_unregister_alg();
3716 	return 0;
3717 }
3718