xref: /linux/drivers/crypto/chelsio/chcr_algo.c (revision bb1c928df78ee6e3665a0d013e74108cc9abf34b)
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/ctr.h>
59 #include <crypto/gf128mul.h>
60 #include <crypto/internal/aead.h>
61 #include <crypto/null.h>
62 #include <crypto/internal/skcipher.h>
63 #include <crypto/aead.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/internal/hash.h>
66 
67 #include "t4fw_api.h"
68 #include "t4_msg.h"
69 #include "chcr_core.h"
70 #include "chcr_algo.h"
71 #include "chcr_crypto.h"
72 
73 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
74 {
75 	return ctx->crypto_ctx->aeadctx;
76 }
77 
78 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
79 {
80 	return ctx->crypto_ctx->ablkctx;
81 }
82 
83 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
84 {
85 	return ctx->crypto_ctx->hmacctx;
86 }
87 
88 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
89 {
90 	return gctx->ctx->gcm;
91 }
92 
93 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
94 {
95 	return gctx->ctx->authenc;
96 }
97 
98 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
99 {
100 	return ctx->dev->u_ctx;
101 }
102 
103 static inline int is_ofld_imm(const struct sk_buff *skb)
104 {
105 	return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
106 }
107 
108 /*
109  *	sgl_len - calculates the size of an SGL of the given capacity
110  *	@n: the number of SGL entries
111  *	Calculates the number of flits needed for a scatter/gather list that
112  *	can hold the given number of entries.
113  */
114 static inline unsigned int sgl_len(unsigned int n)
115 {
116 	n--;
117 	return (3 * n) / 2 + (n & 1) + 2;
118 }
119 
120 static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
121 {
122 	u8 temp[SHA512_DIGEST_SIZE];
123 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
124 	int authsize = crypto_aead_authsize(tfm);
125 	struct cpl_fw6_pld *fw6_pld;
126 	int cmp = 0;
127 
128 	fw6_pld = (struct cpl_fw6_pld *)input;
129 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
130 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
131 		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
132 	} else {
133 
134 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
135 				authsize, req->assoclen +
136 				req->cryptlen - authsize);
137 		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
138 	}
139 	if (cmp)
140 		*err = -EBADMSG;
141 	else
142 		*err = 0;
143 }
144 
145 /*
146  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
147  *	@req: crypto request
148  */
149 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
150 			 int err)
151 {
152 	struct crypto_tfm *tfm = req->tfm;
153 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
154 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
155 	struct chcr_req_ctx ctx_req;
156 	unsigned int digestsize, updated_digestsize;
157 	struct adapter *adap = padap(ctx->dev);
158 
159 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
160 	case CRYPTO_ALG_TYPE_AEAD:
161 		ctx_req.req.aead_req = aead_request_cast(req);
162 		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
163 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
164 			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
165 		if (ctx_req.ctx.reqctx->skb) {
166 			kfree_skb(ctx_req.ctx.reqctx->skb);
167 			ctx_req.ctx.reqctx->skb = NULL;
168 		}
169 		free_new_sg(ctx_req.ctx.reqctx->newdstsg);
170 		ctx_req.ctx.reqctx->newdstsg = NULL;
171 		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
172 			chcr_verify_tag(ctx_req.req.aead_req, input,
173 					&err);
174 			ctx_req.ctx.reqctx->verify = VERIFY_HW;
175 		}
176 		ctx_req.req.aead_req->base.complete(req, err);
177 		break;
178 
179 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
180 		 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
181 					       input, err);
182 		break;
183 
184 	case CRYPTO_ALG_TYPE_AHASH:
185 		ctx_req.req.ahash_req = ahash_request_cast(req);
186 		ctx_req.ctx.ahash_ctx =
187 			ahash_request_ctx(ctx_req.req.ahash_req);
188 		digestsize =
189 			crypto_ahash_digestsize(crypto_ahash_reqtfm(
190 							ctx_req.req.ahash_req));
191 		updated_digestsize = digestsize;
192 		if (digestsize == SHA224_DIGEST_SIZE)
193 			updated_digestsize = SHA256_DIGEST_SIZE;
194 		else if (digestsize == SHA384_DIGEST_SIZE)
195 			updated_digestsize = SHA512_DIGEST_SIZE;
196 		if (ctx_req.ctx.ahash_ctx->skb) {
197 			kfree_skb(ctx_req.ctx.ahash_ctx->skb);
198 			ctx_req.ctx.ahash_ctx->skb = NULL;
199 		}
200 		if (ctx_req.ctx.ahash_ctx->result == 1) {
201 			ctx_req.ctx.ahash_ctx->result = 0;
202 			memcpy(ctx_req.req.ahash_req->result, input +
203 			       sizeof(struct cpl_fw6_pld),
204 			       digestsize);
205 		} else {
206 			memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
207 			       sizeof(struct cpl_fw6_pld),
208 			       updated_digestsize);
209 		}
210 		ctx_req.req.ahash_req->base.complete(req, err);
211 		break;
212 	}
213 	atomic_inc(&adap->chcr_stats.complete);
214 	return err;
215 }
216 
217 /*
218  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
219  *	@skb: the packet
220  *	Returns the number of flits needed for the given offload packet.
221  *	These packets are already fully constructed and no additional headers
222  *	will be added.
223  */
224 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
225 {
226 	unsigned int flits, cnt;
227 
228 	if (is_ofld_imm(skb))
229 		return DIV_ROUND_UP(skb->len, 8);
230 
231 	flits = skb_transport_offset(skb) / 8;   /* headers */
232 	cnt = skb_shinfo(skb)->nr_frags;
233 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
234 		cnt++;
235 	return flits + sgl_len(cnt);
236 }
237 
238 static inline void get_aes_decrypt_key(unsigned char *dec_key,
239 				       const unsigned char *key,
240 				       unsigned int keylength)
241 {
242 	u32 temp;
243 	u32 w_ring[MAX_NK];
244 	int i, j, k;
245 	u8  nr, nk;
246 
247 	switch (keylength) {
248 	case AES_KEYLENGTH_128BIT:
249 		nk = KEYLENGTH_4BYTES;
250 		nr = NUMBER_OF_ROUNDS_10;
251 		break;
252 	case AES_KEYLENGTH_192BIT:
253 		nk = KEYLENGTH_6BYTES;
254 		nr = NUMBER_OF_ROUNDS_12;
255 		break;
256 	case AES_KEYLENGTH_256BIT:
257 		nk = KEYLENGTH_8BYTES;
258 		nr = NUMBER_OF_ROUNDS_14;
259 		break;
260 	default:
261 		return;
262 	}
263 	for (i = 0; i < nk; i++)
264 		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
265 
266 	i = 0;
267 	temp = w_ring[nk - 1];
268 	while (i + nk < (nr + 1) * 4) {
269 		if (!(i % nk)) {
270 			/* RotWord(temp) */
271 			temp = (temp << 8) | (temp >> 24);
272 			temp = aes_ks_subword(temp);
273 			temp ^= round_constant[i / nk];
274 		} else if (nk == 8 && (i % 4 == 0)) {
275 			temp = aes_ks_subword(temp);
276 		}
277 		w_ring[i % nk] ^= temp;
278 		temp = w_ring[i % nk];
279 		i++;
280 	}
281 	i--;
282 	for (k = 0, j = i % nk; k < nk; k++) {
283 		*((u32 *)dec_key + k) = htonl(w_ring[j]);
284 		j--;
285 		if (j < 0)
286 			j += nk;
287 	}
288 }
289 
290 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
291 {
292 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
293 
294 	switch (ds) {
295 	case SHA1_DIGEST_SIZE:
296 		base_hash = crypto_alloc_shash("sha1", 0, 0);
297 		break;
298 	case SHA224_DIGEST_SIZE:
299 		base_hash = crypto_alloc_shash("sha224", 0, 0);
300 		break;
301 	case SHA256_DIGEST_SIZE:
302 		base_hash = crypto_alloc_shash("sha256", 0, 0);
303 		break;
304 	case SHA384_DIGEST_SIZE:
305 		base_hash = crypto_alloc_shash("sha384", 0, 0);
306 		break;
307 	case SHA512_DIGEST_SIZE:
308 		base_hash = crypto_alloc_shash("sha512", 0, 0);
309 		break;
310 	}
311 
312 	return base_hash;
313 }
314 
315 static int chcr_compute_partial_hash(struct shash_desc *desc,
316 				     char *iopad, char *result_hash,
317 				     int digest_size)
318 {
319 	struct sha1_state sha1_st;
320 	struct sha256_state sha256_st;
321 	struct sha512_state sha512_st;
322 	int error;
323 
324 	if (digest_size == SHA1_DIGEST_SIZE) {
325 		error = crypto_shash_init(desc) ?:
326 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
327 			crypto_shash_export(desc, (void *)&sha1_st);
328 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
329 	} else if (digest_size == SHA224_DIGEST_SIZE) {
330 		error = crypto_shash_init(desc) ?:
331 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
332 			crypto_shash_export(desc, (void *)&sha256_st);
333 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
334 
335 	} else if (digest_size == SHA256_DIGEST_SIZE) {
336 		error = crypto_shash_init(desc) ?:
337 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
338 			crypto_shash_export(desc, (void *)&sha256_st);
339 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
340 
341 	} else if (digest_size == SHA384_DIGEST_SIZE) {
342 		error = crypto_shash_init(desc) ?:
343 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
344 			crypto_shash_export(desc, (void *)&sha512_st);
345 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
346 
347 	} else if (digest_size == SHA512_DIGEST_SIZE) {
348 		error = crypto_shash_init(desc) ?:
349 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
350 			crypto_shash_export(desc, (void *)&sha512_st);
351 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
352 	} else {
353 		error = -EINVAL;
354 		pr_err("Unknown digest size %d\n", digest_size);
355 	}
356 	return error;
357 }
358 
359 static void chcr_change_order(char *buf, int ds)
360 {
361 	int i;
362 
363 	if (ds == SHA512_DIGEST_SIZE) {
364 		for (i = 0; i < (ds / sizeof(u64)); i++)
365 			*((__be64 *)buf + i) =
366 				cpu_to_be64(*((u64 *)buf + i));
367 	} else {
368 		for (i = 0; i < (ds / sizeof(u32)); i++)
369 			*((__be32 *)buf + i) =
370 				cpu_to_be32(*((u32 *)buf + i));
371 	}
372 }
373 
374 static inline int is_hmac(struct crypto_tfm *tfm)
375 {
376 	struct crypto_alg *alg = tfm->__crt_alg;
377 	struct chcr_alg_template *chcr_crypto_alg =
378 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
379 			     alg.hash);
380 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
381 		return 1;
382 	return 0;
383 }
384 
385 static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
386 			   struct scatterlist *sg,
387 			   struct phys_sge_parm *sg_param)
388 {
389 	struct phys_sge_pairs *to;
390 	unsigned int len = 0, left_size = sg_param->obsize;
391 	unsigned int nents = sg_param->nents, i, j = 0;
392 
393 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
394 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
395 	phys_cpl->pcirlxorder_to_noofsgentr =
396 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
397 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
398 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
399 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
400 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
401 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
402 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
403 	phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
404 	phys_cpl->rss_hdr_int.hash_val = 0;
405 	to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
406 				       sizeof(struct cpl_rx_phys_dsgl));
407 	for (i = 0; nents && left_size; to++) {
408 		for (j = 0; j < 8 && nents && left_size; j++, nents--) {
409 			len = min(left_size, sg_dma_len(sg));
410 			to->len[j] = htons(len);
411 			to->addr[j] = cpu_to_be64(sg_dma_address(sg));
412 			left_size -= len;
413 			sg = sg_next(sg);
414 		}
415 	}
416 }
417 
418 static inline int map_writesg_phys_cpl(struct device *dev,
419 					struct cpl_rx_phys_dsgl *phys_cpl,
420 					struct scatterlist *sg,
421 					struct phys_sge_parm *sg_param)
422 {
423 	if (!sg || !sg_param->nents)
424 		return -EINVAL;
425 
426 	sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
427 	if (sg_param->nents == 0) {
428 		pr_err("CHCR : DMA mapping failed\n");
429 		return -EINVAL;
430 	}
431 	write_phys_cpl(phys_cpl, sg, sg_param);
432 	return 0;
433 }
434 
435 static inline int get_aead_subtype(struct crypto_aead *aead)
436 {
437 	struct aead_alg *alg = crypto_aead_alg(aead);
438 	struct chcr_alg_template *chcr_crypto_alg =
439 		container_of(alg, struct chcr_alg_template, alg.aead);
440 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
441 }
442 
443 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
444 {
445 	struct crypto_alg *alg = tfm->__crt_alg;
446 	struct chcr_alg_template *chcr_crypto_alg =
447 		container_of(alg, struct chcr_alg_template, alg.crypto);
448 
449 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
450 }
451 
452 static inline void write_buffer_to_skb(struct sk_buff *skb,
453 					unsigned int *frags,
454 					char *bfr,
455 					u8 bfr_len)
456 {
457 	skb->len += bfr_len;
458 	skb->data_len += bfr_len;
459 	skb->truesize += bfr_len;
460 	get_page(virt_to_page(bfr));
461 	skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
462 			   offset_in_page(bfr), bfr_len);
463 	(*frags)++;
464 }
465 
466 
467 static inline void
468 write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
469 			struct scatterlist *sg, unsigned int count)
470 {
471 	struct page *spage;
472 	unsigned int page_len;
473 
474 	skb->len += count;
475 	skb->data_len += count;
476 	skb->truesize += count;
477 
478 	while (count > 0) {
479 		if (!sg || (!(sg->length)))
480 			break;
481 		spage = sg_page(sg);
482 		get_page(spage);
483 		page_len = min(sg->length, count);
484 		skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
485 		(*frags)++;
486 		count -= page_len;
487 		sg = sg_next(sg);
488 	}
489 }
490 
491 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
492 {
493 	struct adapter *adap = netdev2adap(dev);
494 	struct sge_uld_txq_info *txq_info =
495 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
496 	struct sge_uld_txq *txq;
497 	int ret = 0;
498 
499 	local_bh_disable();
500 	txq = &txq_info->uldtxq[idx];
501 	spin_lock(&txq->sendq.lock);
502 	if (txq->full)
503 		ret = -1;
504 	spin_unlock(&txq->sendq.lock);
505 	local_bh_enable();
506 	return ret;
507 }
508 
509 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
510 			       struct _key_ctx *key_ctx)
511 {
512 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
513 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
514 	} else {
515 		memcpy(key_ctx->key,
516 		       ablkctx->key + (ablkctx->enckey_len >> 1),
517 		       ablkctx->enckey_len >> 1);
518 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
519 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
520 	}
521 	return 0;
522 }
523 static int chcr_sg_ent_in_wr(struct scatterlist *src,
524 			     struct scatterlist *dst,
525 			     unsigned int minsg,
526 			     unsigned int space,
527 			     short int *sent,
528 			     short int *dent)
529 {
530 	int srclen = 0, dstlen = 0;
531 	int srcsg = minsg, dstsg = 0;
532 
533 	*sent = 0;
534 	*dent = 0;
535 	while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) &&
536 	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
537 		srclen += src->length;
538 		srcsg++;
539 		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
540 		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
541 			if (srclen <= dstlen)
542 				break;
543 			dstlen += dst->length;
544 			dst = sg_next(dst);
545 			dstsg++;
546 		}
547 		src = sg_next(src);
548 	}
549 	*sent = srcsg - minsg;
550 	*dent = dstsg;
551 	return min(srclen, dstlen);
552 }
553 
554 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
555 				u32 flags,
556 				struct scatterlist *src,
557 				struct scatterlist *dst,
558 				unsigned int nbytes,
559 				u8 *iv,
560 				unsigned short op_type)
561 {
562 	int err;
563 
564 	SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
565 	skcipher_request_set_tfm(subreq, cipher);
566 	skcipher_request_set_callback(subreq, flags, NULL, NULL);
567 	skcipher_request_set_crypt(subreq, src, dst,
568 				   nbytes, iv);
569 
570 	err = op_type ? crypto_skcipher_decrypt(subreq) :
571 		crypto_skcipher_encrypt(subreq);
572 	skcipher_request_zero(subreq);
573 
574 	return err;
575 
576 }
577 static inline void create_wreq(struct chcr_context *ctx,
578 			       struct chcr_wr *chcr_req,
579 			       void *req, struct sk_buff *skb,
580 			       int kctx_len, int hash_sz,
581 			       int is_iv,
582 			       unsigned int sc_len,
583 			       unsigned int lcb)
584 {
585 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
586 	int iv_loc = IV_DSGL;
587 	int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
588 	unsigned int immdatalen = 0, nr_frags = 0;
589 
590 	if (is_ofld_imm(skb)) {
591 		immdatalen = skb->data_len;
592 		iv_loc = IV_IMMEDIATE;
593 	} else {
594 		nr_frags = skb_shinfo(skb)->nr_frags;
595 	}
596 
597 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
598 				((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
599 	chcr_req->wreq.pld_size_hash_size =
600 		htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
601 		      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
602 	chcr_req->wreq.len16_pkd =
603 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
604 				    (calc_tx_flits_ofld(skb) * 8), 16)));
605 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
606 	chcr_req->wreq.rx_chid_to_rx_q_id =
607 		FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
608 				is_iv ? iv_loc : IV_NOP, !!lcb,
609 				ctx->tx_qidx);
610 
611 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
612 						       qid);
613 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
614 					16) - ((sizeof(chcr_req->wreq)) >> 4)));
615 
616 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
617 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
618 				   sizeof(chcr_req->key_ctx) +
619 				   kctx_len + sc_len + immdatalen);
620 }
621 
622 /**
623  *	create_cipher_wr - form the WR for cipher operations
624  *	@req: cipher req.
625  *	@ctx: crypto driver context of the request.
626  *	@qid: ingress qid where response of this WR should be received.
627  *	@op_type:	encryption or decryption
628  */
629 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
630 {
631 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
632 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
633 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
634 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
635 	struct sk_buff *skb = NULL;
636 	struct chcr_wr *chcr_req;
637 	struct cpl_rx_phys_dsgl *phys_cpl;
638 	struct chcr_blkcipher_req_ctx *reqctx =
639 		ablkcipher_request_ctx(wrparam->req);
640 	struct phys_sge_parm sg_param;
641 	unsigned int frags = 0, transhdr_len, phys_dsgl;
642 	int error;
643 	unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
644 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
645 			GFP_KERNEL : GFP_ATOMIC;
646 	struct adapter *adap = padap(ctx->dev);
647 
648 	phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
649 
650 	kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
651 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
652 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
653 	if (!skb) {
654 		error = -ENOMEM;
655 		goto err;
656 	}
657 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
658 	chcr_req = __skb_put_zero(skb, transhdr_len);
659 	chcr_req->sec_cpl.op_ivinsrtofst =
660 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
661 
662 	chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes);
663 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
664 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
665 
666 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
667 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
668 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
669 							 ablkctx->ciph_mode,
670 							 0, 0, ivsize >> 1);
671 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
672 							  0, 1, phys_dsgl);
673 
674 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
675 	if ((reqctx->op == CHCR_DECRYPT_OP) &&
676 	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
677 	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
678 	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
679 	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
680 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
681 	} else {
682 		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
683 		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
684 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
685 			       ablkctx->enckey_len);
686 		} else {
687 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
688 			       (ablkctx->enckey_len >> 1),
689 			       ablkctx->enckey_len >> 1);
690 			memcpy(chcr_req->key_ctx.key +
691 			       (ablkctx->enckey_len >> 1),
692 			       ablkctx->key,
693 			       ablkctx->enckey_len >> 1);
694 		}
695 	}
696 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
697 	sg_param.nents = reqctx->dst_nents;
698 	sg_param.obsize =  wrparam->bytes;
699 	sg_param.qid = wrparam->qid;
700 	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
701 				       reqctx->dst, &sg_param);
702 	if (error)
703 		goto map_fail1;
704 
705 	skb_set_transport_header(skb, transhdr_len);
706 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
707 	write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes);
708 	atomic_inc(&adap->chcr_stats.cipher_rqst);
709 	create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1,
710 			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
711 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
712 	reqctx->skb = skb;
713 	skb_get(skb);
714 	return skb;
715 map_fail1:
716 	kfree_skb(skb);
717 err:
718 	return ERR_PTR(error);
719 }
720 
721 static inline int chcr_keyctx_ck_size(unsigned int keylen)
722 {
723 	int ck_size = 0;
724 
725 	if (keylen == AES_KEYSIZE_128)
726 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
727 	else if (keylen == AES_KEYSIZE_192)
728 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
729 	else if (keylen == AES_KEYSIZE_256)
730 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
731 	else
732 		ck_size = 0;
733 
734 	return ck_size;
735 }
736 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
737 				       const u8 *key,
738 				       unsigned int keylen)
739 {
740 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
741 	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
742 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
743 	int err = 0;
744 
745 	crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
746 	crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
747 				  CRYPTO_TFM_REQ_MASK);
748 	err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
749 	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
750 	tfm->crt_flags |=
751 		crypto_skcipher_get_flags(ablkctx->sw_cipher) &
752 		CRYPTO_TFM_RES_MASK;
753 	return err;
754 }
755 
756 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
757 			       const u8 *key,
758 			       unsigned int keylen)
759 {
760 	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
761 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
762 	unsigned int ck_size, context_size;
763 	u16 alignment = 0;
764 	int err;
765 
766 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
767 	if (err)
768 		goto badkey_err;
769 
770 	ck_size = chcr_keyctx_ck_size(keylen);
771 	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
772 	memcpy(ablkctx->key, key, keylen);
773 	ablkctx->enckey_len = keylen;
774 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
775 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
776 			keylen + alignment) >> 4;
777 
778 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
779 						0, 0, context_size);
780 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
781 	return 0;
782 badkey_err:
783 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
784 	ablkctx->enckey_len = 0;
785 
786 	return err;
787 }
788 
789 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
790 				   const u8 *key,
791 				   unsigned int keylen)
792 {
793 	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
794 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
795 	unsigned int ck_size, context_size;
796 	u16 alignment = 0;
797 	int err;
798 
799 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
800 	if (err)
801 		goto badkey_err;
802 	ck_size = chcr_keyctx_ck_size(keylen);
803 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
804 	memcpy(ablkctx->key, key, keylen);
805 	ablkctx->enckey_len = keylen;
806 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
807 			keylen + alignment) >> 4;
808 
809 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
810 						0, 0, context_size);
811 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
812 
813 	return 0;
814 badkey_err:
815 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
816 	ablkctx->enckey_len = 0;
817 
818 	return err;
819 }
820 
821 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
822 				   const u8 *key,
823 				   unsigned int keylen)
824 {
825 	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
826 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
827 	unsigned int ck_size, context_size;
828 	u16 alignment = 0;
829 	int err;
830 
831 	if (keylen < CTR_RFC3686_NONCE_SIZE)
832 		return -EINVAL;
833 	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
834 	       CTR_RFC3686_NONCE_SIZE);
835 
836 	keylen -= CTR_RFC3686_NONCE_SIZE;
837 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
838 	if (err)
839 		goto badkey_err;
840 
841 	ck_size = chcr_keyctx_ck_size(keylen);
842 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
843 	memcpy(ablkctx->key, key, keylen);
844 	ablkctx->enckey_len = keylen;
845 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
846 			keylen + alignment) >> 4;
847 
848 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
849 						0, 0, context_size);
850 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
851 
852 	return 0;
853 badkey_err:
854 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
855 	ablkctx->enckey_len = 0;
856 
857 	return err;
858 }
859 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
860 {
861 	unsigned int size = AES_BLOCK_SIZE;
862 	__be32 *b = (__be32 *)(dstiv + size);
863 	u32 c, prev;
864 
865 	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
866 	for (; size >= 4; size -= 4) {
867 		prev = be32_to_cpu(*--b);
868 		c = prev + add;
869 		*b = cpu_to_be32(c);
870 		if (prev < c)
871 			break;
872 		add = 1;
873 	}
874 
875 }
876 
877 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
878 {
879 	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
880 	u64 c;
881 	u32 temp = be32_to_cpu(*--b);
882 
883 	temp = ~temp;
884 	c = (u64)temp +  1; // No of block can processed withou overflow
885 	if ((bytes / AES_BLOCK_SIZE) > c)
886 		bytes = c * AES_BLOCK_SIZE;
887 	return bytes;
888 }
889 
890 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
891 {
892 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
893 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
894 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
895 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
896 	struct crypto_cipher *cipher;
897 	int ret, i;
898 	u8 *key;
899 	unsigned int keylen;
900 
901 	cipher = ablkctx->aes_generic;
902 	memcpy(iv, req->info, AES_BLOCK_SIZE);
903 
904 	keylen = ablkctx->enckey_len / 2;
905 	key = ablkctx->key + keylen;
906 	ret = crypto_cipher_setkey(cipher, key, keylen);
907 	if (ret)
908 		goto out;
909 
910 	crypto_cipher_encrypt_one(cipher, iv, iv);
911 	for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++)
912 		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
913 
914 	crypto_cipher_decrypt_one(cipher, iv, iv);
915 out:
916 	return ret;
917 }
918 
919 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
920 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
921 {
922 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
923 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
924 	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
925 	int ret = 0;
926 
927 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
928 		ctr_add_iv(iv, req->info, (reqctx->processed /
929 			   AES_BLOCK_SIZE));
930 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
931 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
932 			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
933 						AES_BLOCK_SIZE) + 1);
934 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
935 		ret = chcr_update_tweak(req, iv);
936 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
937 		if (reqctx->op)
938 			sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
939 					   16,
940 					   reqctx->processed - AES_BLOCK_SIZE);
941 		else
942 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
943 	}
944 
945 	return ret;
946 
947 }
948 
949 /* We need separate function for final iv because in rfc3686  Initial counter
950  * starts from 1 and buffer size of iv is 8 byte only which remains constant
951  * for subsequent update requests
952  */
953 
954 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
955 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
956 {
957 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
958 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
959 	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
960 	int ret = 0;
961 
962 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
963 		ctr_add_iv(iv, req->info, (reqctx->processed /
964 			   AES_BLOCK_SIZE));
965 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
966 		ret = chcr_update_tweak(req, iv);
967 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
968 		if (reqctx->op)
969 			sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
970 					   16,
971 					   reqctx->processed - AES_BLOCK_SIZE);
972 		else
973 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
974 
975 	}
976 	return ret;
977 
978 }
979 
980 
981 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
982 				   unsigned char *input, int err)
983 {
984 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
985 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
986 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
987 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
988 	struct sk_buff *skb;
989 	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
990 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
991 	struct  cipher_wr_param wrparam;
992 	int bytes;
993 
994 	dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents,
995 		     DMA_FROM_DEVICE);
996 
997 	if (reqctx->skb) {
998 		kfree_skb(reqctx->skb);
999 		reqctx->skb = NULL;
1000 	}
1001 	if (err)
1002 		goto complete;
1003 
1004 	if (req->nbytes == reqctx->processed) {
1005 		err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1006 		goto complete;
1007 	}
1008 
1009 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1010 					    ctx->tx_qidx))) {
1011 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1012 			err = -EBUSY;
1013 			goto complete;
1014 		}
1015 
1016 	}
1017 	wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src,
1018 				       reqctx->processed);
1019 	reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg,
1020 					 reqctx->processed);
1021 	if (!wrparam.srcsg || !reqctx->dst) {
1022 		pr_err("Input sg list length less that nbytes\n");
1023 		err = -EINVAL;
1024 		goto complete;
1025 	}
1026 	bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1,
1027 				 SPACE_LEFT(ablkctx->enckey_len),
1028 				 &wrparam.snent, &reqctx->dst_nents);
1029 	if ((bytes + reqctx->processed) >= req->nbytes)
1030 		bytes  = req->nbytes - reqctx->processed;
1031 	else
1032 		bytes = ROUND_16(bytes);
1033 	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1034 	if (err)
1035 		goto complete;
1036 
1037 	if (unlikely(bytes == 0)) {
1038 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1039 				     req->base.flags,
1040 				     wrparam.srcsg,
1041 				     reqctx->dst,
1042 				     req->nbytes - reqctx->processed,
1043 				     reqctx->iv,
1044 				     reqctx->op);
1045 		goto complete;
1046 	}
1047 
1048 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1049 	    CRYPTO_ALG_SUB_TYPE_CTR)
1050 		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1051 	reqctx->processed += bytes;
1052 	wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
1053 	wrparam.req = req;
1054 	wrparam.bytes = bytes;
1055 	skb = create_cipher_wr(&wrparam);
1056 	if (IS_ERR(skb)) {
1057 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1058 		err = PTR_ERR(skb);
1059 		goto complete;
1060 	}
1061 	skb->dev = u_ctx->lldi.ports[0];
1062 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1063 	chcr_send_wr(skb);
1064 	return 0;
1065 complete:
1066 	free_new_sg(reqctx->newdstsg);
1067 	reqctx->newdstsg = NULL;
1068 	req->base.complete(&req->base, err);
1069 	return err;
1070 }
1071 
1072 static int process_cipher(struct ablkcipher_request *req,
1073 				  unsigned short qid,
1074 				  struct sk_buff **skb,
1075 				  unsigned short op_type)
1076 {
1077 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1078 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1079 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1080 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1081 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1082 	struct	cipher_wr_param wrparam;
1083 	int bytes, nents, err = -EINVAL;
1084 
1085 	reqctx->newdstsg = NULL;
1086 	reqctx->processed = 0;
1087 	if (!req->info)
1088 		goto error;
1089 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1090 	    (req->nbytes == 0) ||
1091 	    (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1092 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1093 		       ablkctx->enckey_len, req->nbytes, ivsize);
1094 		goto error;
1095 	}
1096 	wrparam.srcsg = req->src;
1097 	if (is_newsg(req->dst, &nents)) {
1098 		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
1099 		if (IS_ERR(reqctx->newdstsg))
1100 			return PTR_ERR(reqctx->newdstsg);
1101 		reqctx->dstsg = reqctx->newdstsg;
1102 	} else {
1103 		reqctx->dstsg = req->dst;
1104 	}
1105 	bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG,
1106 				 SPACE_LEFT(ablkctx->enckey_len),
1107 				 &wrparam.snent,
1108 				 &reqctx->dst_nents);
1109 	if ((bytes + reqctx->processed) >= req->nbytes)
1110 		bytes  = req->nbytes - reqctx->processed;
1111 	else
1112 		bytes = ROUND_16(bytes);
1113 	if (unlikely(bytes > req->nbytes))
1114 		bytes = req->nbytes;
1115 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1116 				  CRYPTO_ALG_SUB_TYPE_CTR) {
1117 		bytes = adjust_ctr_overflow(req->info, bytes);
1118 	}
1119 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1120 	    CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1121 		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1122 		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1123 				CTR_RFC3686_IV_SIZE);
1124 
1125 		/* initialize counter portion of counter block */
1126 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1127 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1128 
1129 	} else {
1130 
1131 		memcpy(reqctx->iv, req->info, ivsize);
1132 	}
1133 	if (unlikely(bytes == 0)) {
1134 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1135 					   req->base.flags,
1136 					   req->src,
1137 					   req->dst,
1138 					   req->nbytes,
1139 					   req->info,
1140 					   op_type);
1141 		goto error;
1142 	}
1143 	reqctx->processed = bytes;
1144 	reqctx->dst = reqctx->dstsg;
1145 	reqctx->op = op_type;
1146 	wrparam.qid = qid;
1147 	wrparam.req = req;
1148 	wrparam.bytes = bytes;
1149 	*skb = create_cipher_wr(&wrparam);
1150 	if (IS_ERR(*skb)) {
1151 		err = PTR_ERR(*skb);
1152 		goto error;
1153 	}
1154 
1155 	return 0;
1156 error:
1157 	free_new_sg(reqctx->newdstsg);
1158 	reqctx->newdstsg = NULL;
1159 	return err;
1160 }
1161 
1162 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1163 {
1164 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1165 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1166 	struct sk_buff *skb = NULL;
1167 	int err;
1168 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1169 
1170 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1171 					    ctx->tx_qidx))) {
1172 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1173 			return -EBUSY;
1174 	}
1175 
1176 	err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
1177 			       CHCR_ENCRYPT_OP);
1178 	if (err || !skb)
1179 		return  err;
1180 	skb->dev = u_ctx->lldi.ports[0];
1181 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1182 	chcr_send_wr(skb);
1183 	return -EINPROGRESS;
1184 }
1185 
1186 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1187 {
1188 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1189 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1190 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1191 	struct sk_buff *skb = NULL;
1192 	int err;
1193 
1194 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1195 					    ctx->tx_qidx))) {
1196 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1197 			return -EBUSY;
1198 	}
1199 
1200 	 err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
1201 			       CHCR_DECRYPT_OP);
1202 	if (err || !skb)
1203 		return err;
1204 	skb->dev = u_ctx->lldi.ports[0];
1205 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1206 	chcr_send_wr(skb);
1207 	return -EINPROGRESS;
1208 }
1209 
1210 static int chcr_device_init(struct chcr_context *ctx)
1211 {
1212 	struct uld_ctx *u_ctx = NULL;
1213 	struct adapter *adap;
1214 	unsigned int id;
1215 	int txq_perchan, txq_idx, ntxq;
1216 	int err = 0, rxq_perchan, rxq_idx;
1217 
1218 	id = smp_processor_id();
1219 	if (!ctx->dev) {
1220 		u_ctx = assign_chcr_device();
1221 		if (!u_ctx) {
1222 			pr_err("chcr device assignment fails\n");
1223 			goto out;
1224 		}
1225 		ctx->dev = u_ctx->dev;
1226 		adap = padap(ctx->dev);
1227 		ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1228 				    adap->vres.ncrypto_fc);
1229 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1230 		txq_perchan = ntxq / u_ctx->lldi.nchan;
1231 		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
1232 		rxq_idx += id % rxq_perchan;
1233 		txq_idx = ctx->dev->tx_channel_id * txq_perchan;
1234 		txq_idx += id % txq_perchan;
1235 		spin_lock(&ctx->dev->lock_chcr_dev);
1236 		ctx->rx_qidx = rxq_idx;
1237 		ctx->tx_qidx = txq_idx;
1238 		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1239 		ctx->dev->rx_channel_id = 0;
1240 		spin_unlock(&ctx->dev->lock_chcr_dev);
1241 	}
1242 out:
1243 	return err;
1244 }
1245 
1246 static int chcr_cra_init(struct crypto_tfm *tfm)
1247 {
1248 	struct crypto_alg *alg = tfm->__crt_alg;
1249 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1250 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1251 
1252 	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1253 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1254 	if (IS_ERR(ablkctx->sw_cipher)) {
1255 		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1256 		return PTR_ERR(ablkctx->sw_cipher);
1257 	}
1258 
1259 	if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1260 		/* To update tweak*/
1261 		ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1262 		if (IS_ERR(ablkctx->aes_generic)) {
1263 			pr_err("failed to allocate aes cipher for tweak\n");
1264 			return PTR_ERR(ablkctx->aes_generic);
1265 		}
1266 	} else
1267 		ablkctx->aes_generic = NULL;
1268 
1269 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1270 	return chcr_device_init(crypto_tfm_ctx(tfm));
1271 }
1272 
1273 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1274 {
1275 	struct crypto_alg *alg = tfm->__crt_alg;
1276 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1277 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1278 
1279 	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1280 	 * cannot be used as fallback in chcr_handle_cipher_response
1281 	 */
1282 	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1283 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1284 	if (IS_ERR(ablkctx->sw_cipher)) {
1285 		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1286 		return PTR_ERR(ablkctx->sw_cipher);
1287 	}
1288 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1289 	return chcr_device_init(crypto_tfm_ctx(tfm));
1290 }
1291 
1292 
1293 static void chcr_cra_exit(struct crypto_tfm *tfm)
1294 {
1295 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1296 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1297 
1298 	crypto_free_skcipher(ablkctx->sw_cipher);
1299 	if (ablkctx->aes_generic)
1300 		crypto_free_cipher(ablkctx->aes_generic);
1301 }
1302 
1303 static int get_alg_config(struct algo_param *params,
1304 			  unsigned int auth_size)
1305 {
1306 	switch (auth_size) {
1307 	case SHA1_DIGEST_SIZE:
1308 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1309 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1310 		params->result_size = SHA1_DIGEST_SIZE;
1311 		break;
1312 	case SHA224_DIGEST_SIZE:
1313 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1314 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1315 		params->result_size = SHA256_DIGEST_SIZE;
1316 		break;
1317 	case SHA256_DIGEST_SIZE:
1318 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1319 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1320 		params->result_size = SHA256_DIGEST_SIZE;
1321 		break;
1322 	case SHA384_DIGEST_SIZE:
1323 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1324 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1325 		params->result_size = SHA512_DIGEST_SIZE;
1326 		break;
1327 	case SHA512_DIGEST_SIZE:
1328 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1329 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1330 		params->result_size = SHA512_DIGEST_SIZE;
1331 		break;
1332 	default:
1333 		pr_err("chcr : ERROR, unsupported digest size\n");
1334 		return -EINVAL;
1335 	}
1336 	return 0;
1337 }
1338 
1339 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1340 {
1341 		crypto_free_shash(base_hash);
1342 }
1343 
1344 /**
1345  *	create_hash_wr - Create hash work request
1346  *	@req - Cipher req base
1347  */
1348 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1349 				      struct hash_wr_param *param)
1350 {
1351 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1352 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1353 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1354 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1355 	struct sk_buff *skb = NULL;
1356 	struct chcr_wr *chcr_req;
1357 	unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
1358 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1359 	unsigned int kctx_len = 0;
1360 	u8 hash_size_in_response = 0;
1361 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1362 		GFP_ATOMIC;
1363 	struct adapter *adap = padap(ctx->dev);
1364 
1365 	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
1366 	kctx_len = param->alg_prm.result_size + iopad_alignment;
1367 	if (param->opad_needed)
1368 		kctx_len += param->alg_prm.result_size + iopad_alignment;
1369 
1370 	if (req_ctx->result)
1371 		hash_size_in_response = digestsize;
1372 	else
1373 		hash_size_in_response = param->alg_prm.result_size;
1374 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
1375 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1376 	if (!skb)
1377 		return skb;
1378 
1379 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1380 	chcr_req = __skb_put_zero(skb, transhdr_len);
1381 
1382 	chcr_req->sec_cpl.op_ivinsrtofst =
1383 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
1384 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1385 
1386 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1387 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1388 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1389 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1390 	chcr_req->sec_cpl.seqno_numivs =
1391 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1392 					 param->opad_needed, 0);
1393 
1394 	chcr_req->sec_cpl.ivgen_hdrlen =
1395 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1396 
1397 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1398 	       param->alg_prm.result_size);
1399 
1400 	if (param->opad_needed)
1401 		memcpy(chcr_req->key_ctx.key +
1402 		       ((param->alg_prm.result_size <= 32) ? 32 :
1403 			CHCR_HASH_MAX_DIGEST_SIZE),
1404 		       hmacctx->opad, param->alg_prm.result_size);
1405 
1406 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1407 					    param->alg_prm.mk_size, 0,
1408 					    param->opad_needed,
1409 					    ((kctx_len +
1410 					     sizeof(chcr_req->key_ctx)) >> 4));
1411 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1412 
1413 	skb_set_transport_header(skb, transhdr_len);
1414 	if (param->bfr_len != 0)
1415 		write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
1416 				    param->bfr_len);
1417 	if (param->sg_len != 0)
1418 		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
1419 	atomic_inc(&adap->chcr_stats.digest_rqst);
1420 	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len,
1421 		    hash_size_in_response, 0, DUMMY_BYTES, 0);
1422 	req_ctx->skb = skb;
1423 	skb_get(skb);
1424 	return skb;
1425 }
1426 
1427 static int chcr_ahash_update(struct ahash_request *req)
1428 {
1429 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1430 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1431 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1432 	struct uld_ctx *u_ctx = NULL;
1433 	struct sk_buff *skb;
1434 	u8 remainder = 0, bs;
1435 	unsigned int nbytes = req->nbytes;
1436 	struct hash_wr_param params;
1437 
1438 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1439 
1440 	u_ctx = ULD_CTX(ctx);
1441 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1442 					    ctx->tx_qidx))) {
1443 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1444 			return -EBUSY;
1445 	}
1446 
1447 	if (nbytes + req_ctx->reqlen >= bs) {
1448 		remainder = (nbytes + req_ctx->reqlen) % bs;
1449 		nbytes = nbytes + req_ctx->reqlen - remainder;
1450 	} else {
1451 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1452 				   + req_ctx->reqlen, nbytes, 0);
1453 		req_ctx->reqlen += nbytes;
1454 		return 0;
1455 	}
1456 
1457 	params.opad_needed = 0;
1458 	params.more = 1;
1459 	params.last = 0;
1460 	params.sg_len = nbytes - req_ctx->reqlen;
1461 	params.bfr_len = req_ctx->reqlen;
1462 	params.scmd1 = 0;
1463 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1464 	req_ctx->result = 0;
1465 	req_ctx->data_len += params.sg_len + params.bfr_len;
1466 	skb = create_hash_wr(req, &params);
1467 	if (!skb)
1468 		return -ENOMEM;
1469 
1470 	if (remainder) {
1471 		u8 *temp;
1472 		/* Swap buffers */
1473 		temp = req_ctx->reqbfr;
1474 		req_ctx->reqbfr = req_ctx->skbfr;
1475 		req_ctx->skbfr = temp;
1476 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1477 				   req_ctx->reqbfr, remainder, req->nbytes -
1478 				   remainder);
1479 	}
1480 	req_ctx->reqlen = remainder;
1481 	skb->dev = u_ctx->lldi.ports[0];
1482 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1483 	chcr_send_wr(skb);
1484 
1485 	return -EINPROGRESS;
1486 }
1487 
1488 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1489 {
1490 	memset(bfr_ptr, 0, bs);
1491 	*bfr_ptr = 0x80;
1492 	if (bs == 64)
1493 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1494 	else
1495 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1496 }
1497 
1498 static int chcr_ahash_final(struct ahash_request *req)
1499 {
1500 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1501 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1502 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1503 	struct hash_wr_param params;
1504 	struct sk_buff *skb;
1505 	struct uld_ctx *u_ctx = NULL;
1506 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1507 
1508 	u_ctx = ULD_CTX(ctx);
1509 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1510 		params.opad_needed = 1;
1511 	else
1512 		params.opad_needed = 0;
1513 	params.sg_len = 0;
1514 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1515 	req_ctx->result = 1;
1516 	params.bfr_len = req_ctx->reqlen;
1517 	req_ctx->data_len += params.bfr_len + params.sg_len;
1518 	if (req_ctx->reqlen == 0) {
1519 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1520 		params.last = 0;
1521 		params.more = 1;
1522 		params.scmd1 = 0;
1523 		params.bfr_len = bs;
1524 
1525 	} else {
1526 		params.scmd1 = req_ctx->data_len;
1527 		params.last = 1;
1528 		params.more = 0;
1529 	}
1530 	skb = create_hash_wr(req, &params);
1531 	if (!skb)
1532 		return -ENOMEM;
1533 
1534 	skb->dev = u_ctx->lldi.ports[0];
1535 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1536 	chcr_send_wr(skb);
1537 	return -EINPROGRESS;
1538 }
1539 
1540 static int chcr_ahash_finup(struct ahash_request *req)
1541 {
1542 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1543 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1544 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1545 	struct uld_ctx *u_ctx = NULL;
1546 	struct sk_buff *skb;
1547 	struct hash_wr_param params;
1548 	u8  bs;
1549 
1550 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1551 	u_ctx = ULD_CTX(ctx);
1552 
1553 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1554 					    ctx->tx_qidx))) {
1555 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1556 			return -EBUSY;
1557 	}
1558 
1559 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1560 		params.opad_needed = 1;
1561 	else
1562 		params.opad_needed = 0;
1563 
1564 	params.sg_len = req->nbytes;
1565 	params.bfr_len = req_ctx->reqlen;
1566 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1567 	req_ctx->data_len += params.bfr_len + params.sg_len;
1568 	req_ctx->result = 1;
1569 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1570 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1571 		params.last = 0;
1572 		params.more = 1;
1573 		params.scmd1 = 0;
1574 		params.bfr_len = bs;
1575 	} else {
1576 		params.scmd1 = req_ctx->data_len;
1577 		params.last = 1;
1578 		params.more = 0;
1579 	}
1580 
1581 	skb = create_hash_wr(req, &params);
1582 	if (!skb)
1583 		return -ENOMEM;
1584 
1585 	skb->dev = u_ctx->lldi.ports[0];
1586 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1587 	chcr_send_wr(skb);
1588 
1589 	return -EINPROGRESS;
1590 }
1591 
1592 static int chcr_ahash_digest(struct ahash_request *req)
1593 {
1594 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1595 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1596 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1597 	struct uld_ctx *u_ctx = NULL;
1598 	struct sk_buff *skb;
1599 	struct hash_wr_param params;
1600 	u8  bs;
1601 
1602 	rtfm->init(req);
1603 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1604 
1605 	u_ctx = ULD_CTX(ctx);
1606 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1607 					    ctx->tx_qidx))) {
1608 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1609 			return -EBUSY;
1610 	}
1611 
1612 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1613 		params.opad_needed = 1;
1614 	else
1615 		params.opad_needed = 0;
1616 
1617 	params.last = 0;
1618 	params.more = 0;
1619 	params.sg_len = req->nbytes;
1620 	params.bfr_len = 0;
1621 	params.scmd1 = 0;
1622 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1623 	req_ctx->result = 1;
1624 	req_ctx->data_len += params.bfr_len + params.sg_len;
1625 
1626 	if (req->nbytes == 0) {
1627 		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1628 		params.more = 1;
1629 		params.bfr_len = bs;
1630 	}
1631 
1632 	skb = create_hash_wr(req, &params);
1633 	if (!skb)
1634 		return -ENOMEM;
1635 
1636 	skb->dev = u_ctx->lldi.ports[0];
1637 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1638 	chcr_send_wr(skb);
1639 	return -EINPROGRESS;
1640 }
1641 
1642 static int chcr_ahash_export(struct ahash_request *areq, void *out)
1643 {
1644 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1645 	struct chcr_ahash_req_ctx *state = out;
1646 
1647 	state->reqlen = req_ctx->reqlen;
1648 	state->data_len = req_ctx->data_len;
1649 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
1650 	memcpy(state->partial_hash, req_ctx->partial_hash,
1651 	       CHCR_HASH_MAX_DIGEST_SIZE);
1652 		return 0;
1653 }
1654 
1655 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1656 {
1657 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1658 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1659 
1660 	req_ctx->reqlen = state->reqlen;
1661 	req_ctx->data_len = state->data_len;
1662 	req_ctx->reqbfr = req_ctx->bfr1;
1663 	req_ctx->skbfr = req_ctx->bfr2;
1664 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
1665 	memcpy(req_ctx->partial_hash, state->partial_hash,
1666 	       CHCR_HASH_MAX_DIGEST_SIZE);
1667 	return 0;
1668 }
1669 
1670 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1671 			     unsigned int keylen)
1672 {
1673 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1674 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1675 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1676 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1677 	unsigned int i, err = 0, updated_digestsize;
1678 
1679 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1680 
1681 	/* use the key to calculate the ipad and opad. ipad will sent with the
1682 	 * first request's data. opad will be sent with the final hash result
1683 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1684 	 */
1685 	shash->tfm = hmacctx->base_hash;
1686 	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
1687 	if (keylen > bs) {
1688 		err = crypto_shash_digest(shash, key, keylen,
1689 					  hmacctx->ipad);
1690 		if (err)
1691 			goto out;
1692 		keylen = digestsize;
1693 	} else {
1694 		memcpy(hmacctx->ipad, key, keylen);
1695 	}
1696 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
1697 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
1698 
1699 	for (i = 0; i < bs / sizeof(int); i++) {
1700 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1701 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1702 	}
1703 
1704 	updated_digestsize = digestsize;
1705 	if (digestsize == SHA224_DIGEST_SIZE)
1706 		updated_digestsize = SHA256_DIGEST_SIZE;
1707 	else if (digestsize == SHA384_DIGEST_SIZE)
1708 		updated_digestsize = SHA512_DIGEST_SIZE;
1709 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
1710 					hmacctx->ipad, digestsize);
1711 	if (err)
1712 		goto out;
1713 	chcr_change_order(hmacctx->ipad, updated_digestsize);
1714 
1715 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
1716 					hmacctx->opad, digestsize);
1717 	if (err)
1718 		goto out;
1719 	chcr_change_order(hmacctx->opad, updated_digestsize);
1720 out:
1721 	return err;
1722 }
1723 
1724 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1725 			       unsigned int key_len)
1726 {
1727 	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
1728 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1729 	unsigned short context_size = 0;
1730 	int err;
1731 
1732 	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
1733 	if (err)
1734 		goto badkey_err;
1735 
1736 	memcpy(ablkctx->key, key, key_len);
1737 	ablkctx->enckey_len = key_len;
1738 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1739 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1740 	ablkctx->key_ctx_hdr =
1741 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1742 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1743 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1744 				 CHCR_KEYCTX_NO_KEY, 1,
1745 				 0, context_size);
1746 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1747 	return 0;
1748 badkey_err:
1749 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1750 	ablkctx->enckey_len = 0;
1751 
1752 	return err;
1753 }
1754 
1755 static int chcr_sha_init(struct ahash_request *areq)
1756 {
1757 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1758 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1759 	int digestsize =  crypto_ahash_digestsize(tfm);
1760 
1761 	req_ctx->data_len = 0;
1762 	req_ctx->reqlen = 0;
1763 	req_ctx->reqbfr = req_ctx->bfr1;
1764 	req_ctx->skbfr = req_ctx->bfr2;
1765 	req_ctx->skb = NULL;
1766 	req_ctx->result = 0;
1767 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
1768 	return 0;
1769 }
1770 
1771 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1772 {
1773 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1774 				 sizeof(struct chcr_ahash_req_ctx));
1775 	return chcr_device_init(crypto_tfm_ctx(tfm));
1776 }
1777 
1778 static int chcr_hmac_init(struct ahash_request *areq)
1779 {
1780 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1781 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1782 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1783 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1784 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1785 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1786 
1787 	chcr_sha_init(areq);
1788 	req_ctx->data_len = bs;
1789 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1790 		if (digestsize == SHA224_DIGEST_SIZE)
1791 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1792 			       SHA256_DIGEST_SIZE);
1793 		else if (digestsize == SHA384_DIGEST_SIZE)
1794 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1795 			       SHA512_DIGEST_SIZE);
1796 		else
1797 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1798 			       digestsize);
1799 	}
1800 	return 0;
1801 }
1802 
1803 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1804 {
1805 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1806 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1807 	unsigned int digestsize =
1808 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1809 
1810 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1811 				 sizeof(struct chcr_ahash_req_ctx));
1812 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
1813 	if (IS_ERR(hmacctx->base_hash))
1814 		return PTR_ERR(hmacctx->base_hash);
1815 	return chcr_device_init(crypto_tfm_ctx(tfm));
1816 }
1817 
1818 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1819 {
1820 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1821 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1822 
1823 	if (hmacctx->base_hash) {
1824 		chcr_free_shash(hmacctx->base_hash);
1825 		hmacctx->base_hash = NULL;
1826 	}
1827 }
1828 
1829 static int is_newsg(struct scatterlist *sgl, unsigned int *newents)
1830 {
1831 	int nents = 0;
1832 	int ret = 0;
1833 
1834 	while (sgl) {
1835 		if (sgl->length > CHCR_SG_SIZE)
1836 			ret = 1;
1837 		nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE);
1838 		sgl = sg_next(sgl);
1839 	}
1840 	*newents = nents;
1841 	return ret;
1842 }
1843 
1844 static inline void free_new_sg(struct scatterlist *sgl)
1845 {
1846 	kfree(sgl);
1847 }
1848 
1849 static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
1850 				       unsigned int nents)
1851 {
1852 	struct scatterlist *newsg, *sg;
1853 	int i, len, processed = 0;
1854 	struct page *spage;
1855 	int offset;
1856 
1857 	newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL);
1858 	if (!newsg)
1859 		return ERR_PTR(-ENOMEM);
1860 	sg = newsg;
1861 	sg_init_table(sg, nents);
1862 	offset = sgl->offset;
1863 	spage = sg_page(sgl);
1864 	for (i = 0; i < nents; i++) {
1865 		len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE);
1866 		sg_set_page(sg, spage, len, offset);
1867 		processed += len;
1868 		offset += len;
1869 		if (offset >= PAGE_SIZE) {
1870 			offset = offset % PAGE_SIZE;
1871 			spage++;
1872 		}
1873 		if (processed == sgl->length) {
1874 			processed = 0;
1875 			sgl = sg_next(sgl);
1876 			if (!sgl)
1877 				break;
1878 			spage = sg_page(sgl);
1879 			offset = sgl->offset;
1880 		}
1881 		sg = sg_next(sg);
1882 	}
1883 	return newsg;
1884 }
1885 
1886 static int chcr_copy_assoc(struct aead_request *req,
1887 				struct chcr_aead_ctx *ctx)
1888 {
1889 	SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
1890 
1891 	skcipher_request_set_tfm(skreq, ctx->null);
1892 	skcipher_request_set_callback(skreq, aead_request_flags(req),
1893 			NULL, NULL);
1894 	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
1895 			NULL);
1896 
1897 	return crypto_skcipher_encrypt(skreq);
1898 }
1899 static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
1900 				   int aadmax, int wrlen,
1901 				   unsigned short op_type)
1902 {
1903 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
1904 
1905 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
1906 	    (req->assoclen > aadmax) ||
1907 	    (src_nent > MAX_SKB_FRAGS) ||
1908 	    (wrlen > MAX_WR_SIZE))
1909 		return 1;
1910 	return 0;
1911 }
1912 
1913 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
1914 {
1915 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1916 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1917 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1918 	struct aead_request *subreq = aead_request_ctx(req);
1919 
1920 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
1921 	aead_request_set_callback(subreq, req->base.flags,
1922 				  req->base.complete, req->base.data);
1923 	 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
1924 				 req->iv);
1925 	 aead_request_set_ad(subreq, req->assoclen);
1926 	return op_type ? crypto_aead_decrypt(subreq) :
1927 		crypto_aead_encrypt(subreq);
1928 }
1929 
1930 static struct sk_buff *create_authenc_wr(struct aead_request *req,
1931 					 unsigned short qid,
1932 					 int size,
1933 					 unsigned short op_type)
1934 {
1935 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1936 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1937 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1938 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1939 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
1940 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1941 	struct sk_buff *skb = NULL;
1942 	struct chcr_wr *chcr_req;
1943 	struct cpl_rx_phys_dsgl *phys_cpl;
1944 	struct phys_sge_parm sg_param;
1945 	struct scatterlist *src;
1946 	unsigned int frags = 0, transhdr_len;
1947 	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
1948 	unsigned int   kctx_len = 0, nents;
1949 	unsigned short stop_offset = 0;
1950 	unsigned int  assoclen = req->assoclen;
1951 	unsigned int  authsize = crypto_aead_authsize(tfm);
1952 	int error = -EINVAL, src_nent;
1953 	int null = 0;
1954 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1955 		GFP_ATOMIC;
1956 	struct adapter *adap = padap(ctx->dev);
1957 
1958 	reqctx->newdstsg = NULL;
1959 	dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
1960 						   authsize);
1961 	if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
1962 		goto err;
1963 
1964 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1965 		goto err;
1966 	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1967 	if (src_nent < 0)
1968 		goto err;
1969 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1970 
1971 	if (req->src != req->dst) {
1972 		error = chcr_copy_assoc(req, aeadctx);
1973 		if (error)
1974 			return ERR_PTR(error);
1975 	}
1976 	if (dst_size && is_newsg(req->dst, &nents)) {
1977 		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
1978 		if (IS_ERR(reqctx->newdstsg))
1979 			return ERR_CAST(reqctx->newdstsg);
1980 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
1981 					       reqctx->newdstsg, req->assoclen);
1982 	} else {
1983 		if (req->src == req->dst)
1984 			reqctx->dst = src;
1985 		else
1986 			reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
1987 						       req->dst, req->assoclen);
1988 	}
1989 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
1990 		null = 1;
1991 		assoclen = 0;
1992 	}
1993 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1994 					     (op_type ? -authsize : authsize));
1995 	if (reqctx->dst_nents < 0) {
1996 		pr_err("AUTHENC:Invalid Destination sg entries\n");
1997 		error = -EINVAL;
1998 		goto err;
1999 	}
2000 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
2001 	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2002 		- sizeof(chcr_req->key_ctx);
2003 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2004 	if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
2005 			T6_MAX_AAD_SIZE,
2006 			transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
2007 				op_type)) {
2008 		atomic_inc(&adap->chcr_stats.fallback);
2009 		free_new_sg(reqctx->newdstsg);
2010 		reqctx->newdstsg = NULL;
2011 		return ERR_PTR(chcr_aead_fallback(req, op_type));
2012 	}
2013 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
2014 	if (!skb) {
2015 		error = -ENOMEM;
2016 		goto err;
2017 	}
2018 
2019 	/* LLD is going to write the sge hdr. */
2020 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
2021 
2022 	/* Write WR */
2023 	chcr_req = __skb_put_zero(skb, transhdr_len);
2024 
2025 	stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2026 
2027 	/*
2028 	 * Input order	is AAD,IV and Payload. where IV should be included as
2029 	 * the part of authdata. All other fields should be filled according
2030 	 * to the hardware spec
2031 	 */
2032 	chcr_req->sec_cpl.op_ivinsrtofst =
2033 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2,
2034 				       (ivsize ? (assoclen + 1) : 0));
2035 	chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
2036 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2037 					assoclen ? 1 : 0, assoclen,
2038 					assoclen + ivsize + 1,
2039 					(stop_offset & 0x1F0) >> 4);
2040 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2041 					stop_offset & 0xF,
2042 					null ? 0 : assoclen + ivsize + 1,
2043 					stop_offset, stop_offset);
2044 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2045 					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
2046 					CHCR_SCMD_CIPHER_MODE_AES_CBC,
2047 					actx->auth_mode, aeadctx->hmac_ctrl,
2048 					ivsize >> 1);
2049 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2050 					 0, 1, dst_size);
2051 
2052 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2053 	if (op_type == CHCR_ENCRYPT_OP)
2054 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2055 		       aeadctx->enckey_len);
2056 	else
2057 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2058 		       aeadctx->enckey_len);
2059 
2060 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
2061 					4), actx->h_iopad, kctx_len -
2062 				(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
2063 
2064 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2065 	sg_param.nents = reqctx->dst_nents;
2066 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
2067 	sg_param.qid = qid;
2068 	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
2069 					reqctx->dst, &sg_param);
2070 	if (error)
2071 		goto dstmap_fail;
2072 
2073 	skb_set_transport_header(skb, transhdr_len);
2074 
2075 	if (assoclen) {
2076 		/* AAD buffer in */
2077 		write_sg_to_skb(skb, &frags, req->src, assoclen);
2078 
2079 	}
2080 	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
2081 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
2082 	atomic_inc(&adap->chcr_stats.cipher_rqst);
2083 	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
2084 		   sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
2085 	reqctx->skb = skb;
2086 	skb_get(skb);
2087 
2088 	return skb;
2089 dstmap_fail:
2090 	/* ivmap_fail: */
2091 	kfree_skb(skb);
2092 err:
2093 	free_new_sg(reqctx->newdstsg);
2094 	reqctx->newdstsg = NULL;
2095 	return ERR_PTR(error);
2096 }
2097 
2098 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2099 {
2100 	__be32 data;
2101 
2102 	memset(block, 0, csize);
2103 	block += csize;
2104 
2105 	if (csize >= 4)
2106 		csize = 4;
2107 	else if (msglen > (unsigned int)(1 << (8 * csize)))
2108 		return -EOVERFLOW;
2109 
2110 	data = cpu_to_be32(msglen);
2111 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2112 
2113 	return 0;
2114 }
2115 
2116 static void generate_b0(struct aead_request *req,
2117 			struct chcr_aead_ctx *aeadctx,
2118 			unsigned short op_type)
2119 {
2120 	unsigned int l, lp, m;
2121 	int rc;
2122 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2123 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2124 	u8 *b0 = reqctx->scratch_pad;
2125 
2126 	m = crypto_aead_authsize(aead);
2127 
2128 	memcpy(b0, reqctx->iv, 16);
2129 
2130 	lp = b0[0];
2131 	l = lp + 1;
2132 
2133 	/* set m, bits 3-5 */
2134 	*b0 |= (8 * ((m - 2) / 2));
2135 
2136 	/* set adata, bit 6, if associated data is used */
2137 	if (req->assoclen)
2138 		*b0 |= 64;
2139 	rc = set_msg_len(b0 + 16 - l,
2140 			 (op_type == CHCR_DECRYPT_OP) ?
2141 			 req->cryptlen - m : req->cryptlen, l);
2142 }
2143 
2144 static inline int crypto_ccm_check_iv(const u8 *iv)
2145 {
2146 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2147 	if (iv[0] < 1 || iv[0] > 7)
2148 		return -EINVAL;
2149 
2150 	return 0;
2151 }
2152 
2153 static int ccm_format_packet(struct aead_request *req,
2154 			     struct chcr_aead_ctx *aeadctx,
2155 			     unsigned int sub_type,
2156 			     unsigned short op_type)
2157 {
2158 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2159 	int rc = 0;
2160 
2161 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2162 		reqctx->iv[0] = 3;
2163 		memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2164 		memcpy(reqctx->iv + 4, req->iv, 8);
2165 		memset(reqctx->iv + 12, 0, 4);
2166 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2167 			htons(req->assoclen - 8);
2168 	} else {
2169 		memcpy(reqctx->iv, req->iv, 16);
2170 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2171 			htons(req->assoclen);
2172 	}
2173 	generate_b0(req, aeadctx, op_type);
2174 	/* zero the ctr value */
2175 	memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2176 	return rc;
2177 }
2178 
2179 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2180 				  unsigned int dst_size,
2181 				  struct aead_request *req,
2182 				  unsigned short op_type,
2183 					  struct chcr_context *chcrctx)
2184 {
2185 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2186 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2187 	unsigned int ivsize = AES_BLOCK_SIZE;
2188 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2189 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2190 	unsigned int c_id = chcrctx->dev->rx_channel_id;
2191 	unsigned int ccm_xtra;
2192 	unsigned char tag_offset = 0, auth_offset = 0;
2193 	unsigned int assoclen;
2194 
2195 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2196 		assoclen = req->assoclen - 8;
2197 	else
2198 		assoclen = req->assoclen;
2199 	ccm_xtra = CCM_B0_SIZE +
2200 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2201 
2202 	auth_offset = req->cryptlen ?
2203 		(assoclen + ivsize + 1 + ccm_xtra) : 0;
2204 	if (op_type == CHCR_DECRYPT_OP) {
2205 		if (crypto_aead_authsize(tfm) != req->cryptlen)
2206 			tag_offset = crypto_aead_authsize(tfm);
2207 		else
2208 			auth_offset = 0;
2209 	}
2210 
2211 
2212 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2213 					 2, (ivsize ?  (assoclen + 1) :  0) +
2214 					 ccm_xtra);
2215 	sec_cpl->pldlen =
2216 		htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
2217 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2218 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2219 					1, assoclen + ccm_xtra, assoclen
2220 					+ ivsize + 1 + ccm_xtra, 0);
2221 
2222 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2223 					auth_offset, tag_offset,
2224 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
2225 					crypto_aead_authsize(tfm));
2226 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2227 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2228 					cipher_mode, mac_mode,
2229 					aeadctx->hmac_ctrl, ivsize >> 1);
2230 
2231 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2232 					1, dst_size);
2233 }
2234 
2235 int aead_ccm_validate_input(unsigned short op_type,
2236 			    struct aead_request *req,
2237 			    struct chcr_aead_ctx *aeadctx,
2238 			    unsigned int sub_type)
2239 {
2240 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2241 		if (crypto_ccm_check_iv(req->iv)) {
2242 			pr_err("CCM: IV check fails\n");
2243 			return -EINVAL;
2244 		}
2245 	} else {
2246 		if (req->assoclen != 16 && req->assoclen != 20) {
2247 			pr_err("RFC4309: Invalid AAD length %d\n",
2248 			       req->assoclen);
2249 			return -EINVAL;
2250 		}
2251 	}
2252 	if (aeadctx->enckey_len == 0) {
2253 		pr_err("CCM: Encryption key not set\n");
2254 		return -EINVAL;
2255 	}
2256 	return 0;
2257 }
2258 
2259 unsigned int fill_aead_req_fields(struct sk_buff *skb,
2260 				  struct aead_request *req,
2261 				  struct scatterlist *src,
2262 				  unsigned int ivsize,
2263 				  struct chcr_aead_ctx *aeadctx)
2264 {
2265 	unsigned int frags = 0;
2266 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2267 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2268 	/* b0 and aad length(if available) */
2269 
2270 	write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
2271 				(req->assoclen ?  CCM_AAD_FIELD_SIZE : 0));
2272 	if (req->assoclen) {
2273 		if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2274 			write_sg_to_skb(skb, &frags, req->src,
2275 					req->assoclen - 8);
2276 		else
2277 			write_sg_to_skb(skb, &frags, req->src, req->assoclen);
2278 	}
2279 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
2280 	if (req->cryptlen)
2281 		write_sg_to_skb(skb, &frags, src, req->cryptlen);
2282 
2283 	return frags;
2284 }
2285 
2286 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2287 					  unsigned short qid,
2288 					  int size,
2289 					  unsigned short op_type)
2290 {
2291 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2292 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2293 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2294 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2295 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2296 	struct sk_buff *skb = NULL;
2297 	struct chcr_wr *chcr_req;
2298 	struct cpl_rx_phys_dsgl *phys_cpl;
2299 	struct phys_sge_parm sg_param;
2300 	struct scatterlist *src;
2301 	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
2302 	unsigned int dst_size = 0, kctx_len, nents;
2303 	unsigned int sub_type;
2304 	unsigned int authsize = crypto_aead_authsize(tfm);
2305 	int error = -EINVAL, src_nent;
2306 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2307 		GFP_ATOMIC;
2308 	struct adapter *adap = padap(ctx->dev);
2309 
2310 	dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
2311 						   authsize);
2312 	reqctx->newdstsg = NULL;
2313 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
2314 		goto err;
2315 	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
2316 	if (src_nent < 0)
2317 		goto err;
2318 
2319 	sub_type = get_aead_subtype(tfm);
2320 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
2321 	if (req->src != req->dst) {
2322 		error = chcr_copy_assoc(req, aeadctx);
2323 		if (error) {
2324 			pr_err("AAD copy to destination buffer fails\n");
2325 			return ERR_PTR(error);
2326 		}
2327 	}
2328 	if (dst_size && is_newsg(req->dst, &nents)) {
2329 		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
2330 		if (IS_ERR(reqctx->newdstsg))
2331 			return ERR_CAST(reqctx->newdstsg);
2332 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2333 					       reqctx->newdstsg, req->assoclen);
2334 	} else {
2335 		if (req->src == req->dst)
2336 			reqctx->dst = src;
2337 		else
2338 			reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2339 						       req->dst, req->assoclen);
2340 	}
2341 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
2342 					     (op_type ? -authsize : authsize));
2343 	if (reqctx->dst_nents < 0) {
2344 		pr_err("CCM:Invalid Destination sg entries\n");
2345 		error = -EINVAL;
2346 		goto err;
2347 	}
2348 	error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
2349 	if (error)
2350 		goto err;
2351 
2352 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
2353 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
2354 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2355 	if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
2356 			    T6_MAX_AAD_SIZE - 18,
2357 			    transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
2358 			    op_type)) {
2359 		atomic_inc(&adap->chcr_stats.fallback);
2360 		free_new_sg(reqctx->newdstsg);
2361 		reqctx->newdstsg = NULL;
2362 		return ERR_PTR(chcr_aead_fallback(req, op_type));
2363 	}
2364 
2365 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
2366 
2367 	if (!skb) {
2368 		error = -ENOMEM;
2369 		goto err;
2370 	}
2371 
2372 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
2373 
2374 	chcr_req = __skb_put_zero(skb, transhdr_len);
2375 
2376 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
2377 
2378 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2379 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2380 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2381 					16), aeadctx->key, aeadctx->enckey_len);
2382 
2383 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2384 	error = ccm_format_packet(req, aeadctx, sub_type, op_type);
2385 	if (error)
2386 		goto dstmap_fail;
2387 
2388 	sg_param.nents = reqctx->dst_nents;
2389 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
2390 	sg_param.qid = qid;
2391 	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
2392 				 reqctx->dst, &sg_param);
2393 	if (error)
2394 		goto dstmap_fail;
2395 
2396 	skb_set_transport_header(skb, transhdr_len);
2397 	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
2398 	atomic_inc(&adap->chcr_stats.aead_rqst);
2399 	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1,
2400 		    sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
2401 	reqctx->skb = skb;
2402 	skb_get(skb);
2403 	return skb;
2404 dstmap_fail:
2405 	kfree_skb(skb);
2406 err:
2407 	free_new_sg(reqctx->newdstsg);
2408 	reqctx->newdstsg = NULL;
2409 	return ERR_PTR(error);
2410 }
2411 
2412 static struct sk_buff *create_gcm_wr(struct aead_request *req,
2413 				     unsigned short qid,
2414 				     int size,
2415 				     unsigned short op_type)
2416 {
2417 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2418 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2419 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2420 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2421 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2422 	struct sk_buff *skb = NULL;
2423 	struct chcr_wr *chcr_req;
2424 	struct cpl_rx_phys_dsgl *phys_cpl;
2425 	struct phys_sge_parm sg_param;
2426 	struct scatterlist *src;
2427 	unsigned int frags = 0, transhdr_len;
2428 	unsigned int ivsize = AES_BLOCK_SIZE;
2429 	unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen;
2430 	unsigned char tag_offset = 0;
2431 	unsigned int authsize = crypto_aead_authsize(tfm);
2432 	int error = -EINVAL, src_nent;
2433 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2434 		GFP_ATOMIC;
2435 	struct adapter *adap = padap(ctx->dev);
2436 
2437 	reqctx->newdstsg = NULL;
2438 	dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
2439 						    authsize);
2440 	/* validate key size */
2441 	if (aeadctx->enckey_len == 0)
2442 		goto err;
2443 
2444 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
2445 		goto err;
2446 	src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen);
2447 	if (src_nent < 0)
2448 		goto err;
2449 
2450 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen);
2451 	if (req->src != req->dst) {
2452 		error = chcr_copy_assoc(req, aeadctx);
2453 		if (error)
2454 			return	ERR_PTR(error);
2455 	}
2456 
2457 	if (dst_size && is_newsg(req->dst, &nents)) {
2458 		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
2459 		if (IS_ERR(reqctx->newdstsg))
2460 			return ERR_CAST(reqctx->newdstsg);
2461 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2462 					       reqctx->newdstsg, assoclen);
2463 	} else {
2464 		if (req->src == req->dst)
2465 			reqctx->dst = src;
2466 		else
2467 			reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2468 						       req->dst, assoclen);
2469 	}
2470 
2471 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
2472 					     (op_type ? -authsize : authsize));
2473 	if (reqctx->dst_nents < 0) {
2474 		pr_err("GCM:Invalid Destination sg entries\n");
2475 		error = -EINVAL;
2476 		goto err;
2477 	}
2478 
2479 
2480 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
2481 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
2482 		AEAD_H_SIZE;
2483 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2484 	if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
2485 			    T6_MAX_AAD_SIZE,
2486 			    transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
2487 			    op_type)) {
2488 		atomic_inc(&adap->chcr_stats.fallback);
2489 		free_new_sg(reqctx->newdstsg);
2490 		reqctx->newdstsg = NULL;
2491 		return ERR_PTR(chcr_aead_fallback(req, op_type));
2492 	}
2493 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
2494 	if (!skb) {
2495 		error = -ENOMEM;
2496 		goto err;
2497 	}
2498 
2499 	/* NIC driver is going to write the sge hdr. */
2500 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
2501 
2502 	chcr_req = __skb_put_zero(skb, transhdr_len);
2503 
2504 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2505 		assoclen = req->assoclen - 8;
2506 
2507 	tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2508 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
2509 					ctx->dev->rx_channel_id, 2, (ivsize ?
2510 					(assoclen + 1) : 0));
2511 	chcr_req->sec_cpl.pldlen =
2512 		htonl(assoclen + ivsize + req->cryptlen);
2513 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2514 					assoclen ? 1 : 0, assoclen,
2515 					assoclen + ivsize + 1, 0);
2516 		chcr_req->sec_cpl.cipherstop_lo_authinsert =
2517 			FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1,
2518 						tag_offset, tag_offset);
2519 		chcr_req->sec_cpl.seqno_numivs =
2520 			FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
2521 					CHCR_ENCRYPT_OP) ? 1 : 0,
2522 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
2523 					CHCR_SCMD_AUTH_MODE_GHASH,
2524 					aeadctx->hmac_ctrl, ivsize >> 1);
2525 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2526 					0, 1, dst_size);
2527 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2528 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2529 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2530 				16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
2531 
2532 	/* prepare a 16 byte iv */
2533 	/* S   A   L  T |  IV | 0x00000001 */
2534 	if (get_aead_subtype(tfm) ==
2535 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
2536 		memcpy(reqctx->iv, aeadctx->salt, 4);
2537 		memcpy(reqctx->iv + 4, req->iv, 8);
2538 	} else {
2539 		memcpy(reqctx->iv, req->iv, 12);
2540 	}
2541 	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
2542 
2543 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2544 	sg_param.nents = reqctx->dst_nents;
2545 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
2546 	sg_param.qid = qid;
2547 	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
2548 					  reqctx->dst, &sg_param);
2549 	if (error)
2550 		goto dstmap_fail;
2551 
2552 	skb_set_transport_header(skb, transhdr_len);
2553 	write_sg_to_skb(skb, &frags, req->src, assoclen);
2554 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
2555 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
2556 	atomic_inc(&adap->chcr_stats.aead_rqst);
2557 	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
2558 			sizeof(struct cpl_rx_phys_dsgl) + dst_size,
2559 			reqctx->verify);
2560 	reqctx->skb = skb;
2561 	skb_get(skb);
2562 	return skb;
2563 
2564 dstmap_fail:
2565 	/* ivmap_fail: */
2566 	kfree_skb(skb);
2567 err:
2568 	free_new_sg(reqctx->newdstsg);
2569 	reqctx->newdstsg = NULL;
2570 	return ERR_PTR(error);
2571 }
2572 
2573 
2574 
2575 static int chcr_aead_cra_init(struct crypto_aead *tfm)
2576 {
2577 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2578 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2579 	struct aead_alg *alg = crypto_aead_alg(tfm);
2580 
2581 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
2582 					       CRYPTO_ALG_NEED_FALLBACK |
2583 					       CRYPTO_ALG_ASYNC);
2584 	if  (IS_ERR(aeadctx->sw_cipher))
2585 		return PTR_ERR(aeadctx->sw_cipher);
2586 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
2587 				 sizeof(struct aead_request) +
2588 				 crypto_aead_reqsize(aeadctx->sw_cipher)));
2589 	aeadctx->null = crypto_get_default_null_skcipher();
2590 	if (IS_ERR(aeadctx->null))
2591 		return PTR_ERR(aeadctx->null);
2592 	return chcr_device_init(ctx);
2593 }
2594 
2595 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
2596 {
2597 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2598 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2599 
2600 	crypto_put_default_null_skcipher();
2601 	crypto_free_aead(aeadctx->sw_cipher);
2602 }
2603 
2604 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
2605 					unsigned int authsize)
2606 {
2607 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2608 
2609 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
2610 	aeadctx->mayverify = VERIFY_HW;
2611 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2612 }
2613 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
2614 				    unsigned int authsize)
2615 {
2616 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2617 	u32 maxauth = crypto_aead_maxauthsize(tfm);
2618 
2619 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
2620 	 * true for sha1. authsize == 12 condition should be before
2621 	 * authsize == (maxauth >> 1)
2622 	 */
2623 	if (authsize == ICV_4) {
2624 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2625 		aeadctx->mayverify = VERIFY_HW;
2626 	} else if (authsize == ICV_6) {
2627 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2628 		aeadctx->mayverify = VERIFY_HW;
2629 	} else if (authsize == ICV_10) {
2630 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2631 		aeadctx->mayverify = VERIFY_HW;
2632 	} else if (authsize == ICV_12) {
2633 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2634 		aeadctx->mayverify = VERIFY_HW;
2635 	} else if (authsize == ICV_14) {
2636 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2637 		aeadctx->mayverify = VERIFY_HW;
2638 	} else if (authsize == (maxauth >> 1)) {
2639 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2640 		aeadctx->mayverify = VERIFY_HW;
2641 	} else if (authsize == maxauth) {
2642 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2643 		aeadctx->mayverify = VERIFY_HW;
2644 	} else {
2645 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2646 		aeadctx->mayverify = VERIFY_SW;
2647 	}
2648 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2649 }
2650 
2651 
2652 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2653 {
2654 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2655 
2656 	switch (authsize) {
2657 	case ICV_4:
2658 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2659 		aeadctx->mayverify = VERIFY_HW;
2660 		break;
2661 	case ICV_8:
2662 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2663 		aeadctx->mayverify = VERIFY_HW;
2664 		break;
2665 	case ICV_12:
2666 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2667 		 aeadctx->mayverify = VERIFY_HW;
2668 		break;
2669 	case ICV_14:
2670 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2671 		 aeadctx->mayverify = VERIFY_HW;
2672 		break;
2673 	case ICV_16:
2674 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2675 		aeadctx->mayverify = VERIFY_HW;
2676 		break;
2677 	case ICV_13:
2678 	case ICV_15:
2679 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2680 		aeadctx->mayverify = VERIFY_SW;
2681 		break;
2682 	default:
2683 
2684 		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
2685 			CRYPTO_TFM_RES_BAD_KEY_LEN);
2686 		return -EINVAL;
2687 	}
2688 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2689 }
2690 
2691 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2692 					  unsigned int authsize)
2693 {
2694 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2695 
2696 	switch (authsize) {
2697 	case ICV_8:
2698 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2699 		aeadctx->mayverify = VERIFY_HW;
2700 		break;
2701 	case ICV_12:
2702 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2703 		aeadctx->mayverify = VERIFY_HW;
2704 		break;
2705 	case ICV_16:
2706 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2707 		aeadctx->mayverify = VERIFY_HW;
2708 		break;
2709 	default:
2710 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2711 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2712 		return -EINVAL;
2713 	}
2714 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2715 }
2716 
2717 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
2718 				unsigned int authsize)
2719 {
2720 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2721 
2722 	switch (authsize) {
2723 	case ICV_4:
2724 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2725 		aeadctx->mayverify = VERIFY_HW;
2726 		break;
2727 	case ICV_6:
2728 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2729 		aeadctx->mayverify = VERIFY_HW;
2730 		break;
2731 	case ICV_8:
2732 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2733 		aeadctx->mayverify = VERIFY_HW;
2734 		break;
2735 	case ICV_10:
2736 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2737 		aeadctx->mayverify = VERIFY_HW;
2738 		break;
2739 	case ICV_12:
2740 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2741 		aeadctx->mayverify = VERIFY_HW;
2742 		break;
2743 	case ICV_14:
2744 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2745 		aeadctx->mayverify = VERIFY_HW;
2746 		break;
2747 	case ICV_16:
2748 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2749 		aeadctx->mayverify = VERIFY_HW;
2750 		break;
2751 	default:
2752 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2753 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2754 		return -EINVAL;
2755 	}
2756 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2757 }
2758 
2759 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
2760 				const u8 *key,
2761 				unsigned int keylen)
2762 {
2763 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2764 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2765 	unsigned char ck_size, mk_size;
2766 	int key_ctx_size = 0;
2767 
2768 	key_ctx_size = sizeof(struct _key_ctx) +
2769 		((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
2770 	if (keylen == AES_KEYSIZE_128) {
2771 		mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2772 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2773 	} else if (keylen == AES_KEYSIZE_192) {
2774 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2775 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2776 	} else if (keylen == AES_KEYSIZE_256) {
2777 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2778 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2779 	} else {
2780 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2781 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2782 		aeadctx->enckey_len = 0;
2783 		return	-EINVAL;
2784 	}
2785 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
2786 						key_ctx_size >> 4);
2787 	memcpy(aeadctx->key, key, keylen);
2788 	aeadctx->enckey_len = keylen;
2789 
2790 	return 0;
2791 }
2792 
2793 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
2794 				const u8 *key,
2795 				unsigned int keylen)
2796 {
2797 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2798 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2799 	int error;
2800 
2801 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2802 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
2803 			      CRYPTO_TFM_REQ_MASK);
2804 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2805 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2806 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2807 			      CRYPTO_TFM_RES_MASK);
2808 	if (error)
2809 		return error;
2810 	return chcr_ccm_common_setkey(aead, key, keylen);
2811 }
2812 
2813 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
2814 				    unsigned int keylen)
2815 {
2816 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2817 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2818 	int error;
2819 
2820 	if (keylen < 3) {
2821 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2822 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2823 		aeadctx->enckey_len = 0;
2824 		return	-EINVAL;
2825 	}
2826 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2827 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
2828 			      CRYPTO_TFM_REQ_MASK);
2829 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2830 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2831 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2832 			      CRYPTO_TFM_RES_MASK);
2833 	if (error)
2834 		return error;
2835 	keylen -= 3;
2836 	memcpy(aeadctx->salt, key + keylen, 3);
2837 	return chcr_ccm_common_setkey(aead, key, keylen);
2838 }
2839 
2840 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2841 			   unsigned int keylen)
2842 {
2843 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2844 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2845 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
2846 	struct crypto_cipher *cipher;
2847 	unsigned int ck_size;
2848 	int ret = 0, key_ctx_size = 0;
2849 
2850 	aeadctx->enckey_len = 0;
2851 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2852 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
2853 			      & CRYPTO_TFM_REQ_MASK);
2854 	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2855 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2856 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2857 			      CRYPTO_TFM_RES_MASK);
2858 	if (ret)
2859 		goto out;
2860 
2861 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
2862 	    keylen > 3) {
2863 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
2864 		memcpy(aeadctx->salt, key + keylen, 4);
2865 	}
2866 	if (keylen == AES_KEYSIZE_128) {
2867 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2868 	} else if (keylen == AES_KEYSIZE_192) {
2869 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2870 	} else if (keylen == AES_KEYSIZE_256) {
2871 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2872 	} else {
2873 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2874 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2875 		pr_err("GCM: Invalid key length %d\n", keylen);
2876 		ret = -EINVAL;
2877 		goto out;
2878 	}
2879 
2880 	memcpy(aeadctx->key, key, keylen);
2881 	aeadctx->enckey_len = keylen;
2882 	key_ctx_size = sizeof(struct _key_ctx) +
2883 		((DIV_ROUND_UP(keylen, 16)) << 4) +
2884 		AEAD_H_SIZE;
2885 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
2886 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
2887 						0, 0,
2888 						key_ctx_size >> 4);
2889 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
2890 	 * It will go in key context
2891 	 */
2892 	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
2893 	if (IS_ERR(cipher)) {
2894 		aeadctx->enckey_len = 0;
2895 		ret = -ENOMEM;
2896 		goto out;
2897 	}
2898 
2899 	ret = crypto_cipher_setkey(cipher, key, keylen);
2900 	if (ret) {
2901 		aeadctx->enckey_len = 0;
2902 		goto out1;
2903 	}
2904 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
2905 	crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
2906 
2907 out1:
2908 	crypto_free_cipher(cipher);
2909 out:
2910 	return ret;
2911 }
2912 
2913 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
2914 				   unsigned int keylen)
2915 {
2916 	struct chcr_context *ctx = crypto_aead_ctx(authenc);
2917 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2918 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2919 	/* it contains auth and cipher key both*/
2920 	struct crypto_authenc_keys keys;
2921 	unsigned int bs;
2922 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
2923 	int err = 0, i, key_ctx_len = 0;
2924 	unsigned char ck_size = 0;
2925 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
2926 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
2927 	struct algo_param param;
2928 	int align;
2929 	u8 *o_ptr = NULL;
2930 
2931 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2932 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
2933 			      & CRYPTO_TFM_REQ_MASK);
2934 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2935 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
2936 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
2937 			      & CRYPTO_TFM_RES_MASK);
2938 	if (err)
2939 		goto out;
2940 
2941 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2942 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2943 		goto out;
2944 	}
2945 
2946 	if (get_alg_config(&param, max_authsize)) {
2947 		pr_err("chcr : Unsupported digest size\n");
2948 		goto out;
2949 	}
2950 	if (keys.enckeylen == AES_KEYSIZE_128) {
2951 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2952 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
2953 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2954 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
2955 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2956 	} else {
2957 		pr_err("chcr : Unsupported cipher key\n");
2958 		goto out;
2959 	}
2960 
2961 	/* Copy only encryption key. We use authkey to generate h(ipad) and
2962 	 * h(opad) so authkey is not needed again. authkeylen size have the
2963 	 * size of the hash digest size.
2964 	 */
2965 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2966 	aeadctx->enckey_len = keys.enckeylen;
2967 	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2968 			    aeadctx->enckey_len << 3);
2969 
2970 	base_hash  = chcr_alloc_shash(max_authsize);
2971 	if (IS_ERR(base_hash)) {
2972 		pr_err("chcr : Base driver cannot be loaded\n");
2973 		aeadctx->enckey_len = 0;
2974 		return -EINVAL;
2975 	}
2976 	{
2977 		SHASH_DESC_ON_STACK(shash, base_hash);
2978 		shash->tfm = base_hash;
2979 		shash->flags = crypto_shash_get_flags(base_hash);
2980 		bs = crypto_shash_blocksize(base_hash);
2981 		align = KEYCTX_ALIGN_PAD(max_authsize);
2982 		o_ptr =  actx->h_iopad + param.result_size + align;
2983 
2984 		if (keys.authkeylen > bs) {
2985 			err = crypto_shash_digest(shash, keys.authkey,
2986 						  keys.authkeylen,
2987 						  o_ptr);
2988 			if (err) {
2989 				pr_err("chcr : Base driver cannot be loaded\n");
2990 				goto out;
2991 			}
2992 			keys.authkeylen = max_authsize;
2993 		} else
2994 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
2995 
2996 		/* Compute the ipad-digest*/
2997 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2998 		memcpy(pad, o_ptr, keys.authkeylen);
2999 		for (i = 0; i < bs >> 2; i++)
3000 			*((unsigned int *)pad + i) ^= IPAD_DATA;
3001 
3002 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3003 					      max_authsize))
3004 			goto out;
3005 		/* Compute the opad-digest */
3006 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3007 		memcpy(pad, o_ptr, keys.authkeylen);
3008 		for (i = 0; i < bs >> 2; i++)
3009 			*((unsigned int *)pad + i) ^= OPAD_DATA;
3010 
3011 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3012 			goto out;
3013 
3014 		/* convert the ipad and opad digest to network order */
3015 		chcr_change_order(actx->h_iopad, param.result_size);
3016 		chcr_change_order(o_ptr, param.result_size);
3017 		key_ctx_len = sizeof(struct _key_ctx) +
3018 			((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
3019 			(param.result_size + align) * 2;
3020 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3021 						0, 1, key_ctx_len >> 4);
3022 		actx->auth_mode = param.auth_mode;
3023 		chcr_free_shash(base_hash);
3024 
3025 		return 0;
3026 	}
3027 out:
3028 	aeadctx->enckey_len = 0;
3029 	if (!IS_ERR(base_hash))
3030 		chcr_free_shash(base_hash);
3031 	return -EINVAL;
3032 }
3033 
3034 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3035 					const u8 *key, unsigned int keylen)
3036 {
3037 	struct chcr_context *ctx = crypto_aead_ctx(authenc);
3038 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3039 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3040 	struct crypto_authenc_keys keys;
3041 	int err;
3042 	/* it contains auth and cipher key both*/
3043 	int key_ctx_len = 0;
3044 	unsigned char ck_size = 0;
3045 
3046 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3047 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3048 			      & CRYPTO_TFM_REQ_MASK);
3049 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3050 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3051 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3052 			      & CRYPTO_TFM_RES_MASK);
3053 	if (err)
3054 		goto out;
3055 
3056 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3057 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3058 		goto out;
3059 	}
3060 	if (keys.enckeylen == AES_KEYSIZE_128) {
3061 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3062 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3063 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3064 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3065 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3066 	} else {
3067 		pr_err("chcr : Unsupported cipher key\n");
3068 		goto out;
3069 	}
3070 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3071 	aeadctx->enckey_len = keys.enckeylen;
3072 	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3073 				    aeadctx->enckey_len << 3);
3074 	key_ctx_len =  sizeof(struct _key_ctx)
3075 		+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
3076 
3077 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3078 						0, key_ctx_len >> 4);
3079 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3080 	return 0;
3081 out:
3082 	aeadctx->enckey_len = 0;
3083 	return -EINVAL;
3084 }
3085 static int chcr_aead_encrypt(struct aead_request *req)
3086 {
3087 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3088 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3089 
3090 	reqctx->verify = VERIFY_HW;
3091 
3092 	switch (get_aead_subtype(tfm)) {
3093 	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
3094 	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
3095 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3096 				    create_authenc_wr);
3097 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3098 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3099 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3100 				    create_aead_ccm_wr);
3101 	default:
3102 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3103 				    create_gcm_wr);
3104 	}
3105 }
3106 
3107 static int chcr_aead_decrypt(struct aead_request *req)
3108 {
3109 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3110 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
3111 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3112 	int size;
3113 
3114 	if (aeadctx->mayverify == VERIFY_SW) {
3115 		size = crypto_aead_maxauthsize(tfm);
3116 		reqctx->verify = VERIFY_SW;
3117 	} else {
3118 		size = 0;
3119 		reqctx->verify = VERIFY_HW;
3120 	}
3121 
3122 	switch (get_aead_subtype(tfm)) {
3123 	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
3124 	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
3125 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3126 				    create_authenc_wr);
3127 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3128 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3129 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3130 				    create_aead_ccm_wr);
3131 	default:
3132 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3133 				    create_gcm_wr);
3134 	}
3135 }
3136 
3137 static int chcr_aead_op(struct aead_request *req,
3138 			  unsigned short op_type,
3139 			  int size,
3140 			  create_wr_t create_wr_fn)
3141 {
3142 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3143 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
3144 	struct uld_ctx *u_ctx;
3145 	struct sk_buff *skb;
3146 
3147 	if (!ctx->dev) {
3148 		pr_err("chcr : %s : No crypto device.\n", __func__);
3149 		return -ENXIO;
3150 	}
3151 	u_ctx = ULD_CTX(ctx);
3152 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3153 				   ctx->tx_qidx)) {
3154 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3155 			return -EBUSY;
3156 	}
3157 
3158 	/* Form a WR from req */
3159 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
3160 			   op_type);
3161 
3162 	if (IS_ERR(skb) || !skb)
3163 		return PTR_ERR(skb);
3164 
3165 	skb->dev = u_ctx->lldi.ports[0];
3166 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
3167 	chcr_send_wr(skb);
3168 	return -EINPROGRESS;
3169 }
3170 static struct chcr_alg_template driver_algs[] = {
3171 	/* AES-CBC */
3172 	{
3173 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3174 		.is_registered = 0,
3175 		.alg.crypto = {
3176 			.cra_name		= "cbc(aes)",
3177 			.cra_driver_name	= "cbc-aes-chcr",
3178 			.cra_blocksize		= AES_BLOCK_SIZE,
3179 			.cra_init		= chcr_cra_init,
3180 			.cra_exit		= chcr_cra_exit,
3181 			.cra_u.ablkcipher	= {
3182 				.min_keysize	= AES_MIN_KEY_SIZE,
3183 				.max_keysize	= AES_MAX_KEY_SIZE,
3184 				.ivsize		= AES_BLOCK_SIZE,
3185 				.setkey			= chcr_aes_cbc_setkey,
3186 				.encrypt		= chcr_aes_encrypt,
3187 				.decrypt		= chcr_aes_decrypt,
3188 			}
3189 		}
3190 	},
3191 	{
3192 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3193 		.is_registered = 0,
3194 		.alg.crypto =   {
3195 			.cra_name		= "xts(aes)",
3196 			.cra_driver_name	= "xts-aes-chcr",
3197 			.cra_blocksize		= AES_BLOCK_SIZE,
3198 			.cra_init		= chcr_cra_init,
3199 			.cra_exit		= NULL,
3200 			.cra_u .ablkcipher = {
3201 					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
3202 					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
3203 					.ivsize		= AES_BLOCK_SIZE,
3204 					.setkey		= chcr_aes_xts_setkey,
3205 					.encrypt	= chcr_aes_encrypt,
3206 					.decrypt	= chcr_aes_decrypt,
3207 				}
3208 			}
3209 	},
3210 	{
3211 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3212 		.is_registered = 0,
3213 		.alg.crypto = {
3214 			.cra_name		= "ctr(aes)",
3215 			.cra_driver_name	= "ctr-aes-chcr",
3216 			.cra_blocksize		= 1,
3217 			.cra_init		= chcr_cra_init,
3218 			.cra_exit		= chcr_cra_exit,
3219 			.cra_u.ablkcipher	= {
3220 				.min_keysize	= AES_MIN_KEY_SIZE,
3221 				.max_keysize	= AES_MAX_KEY_SIZE,
3222 				.ivsize		= AES_BLOCK_SIZE,
3223 				.setkey		= chcr_aes_ctr_setkey,
3224 				.encrypt	= chcr_aes_encrypt,
3225 				.decrypt	= chcr_aes_decrypt,
3226 			}
3227 		}
3228 	},
3229 	{
3230 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3231 			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3232 		.is_registered = 0,
3233 		.alg.crypto = {
3234 			.cra_name		= "rfc3686(ctr(aes))",
3235 			.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3236 			.cra_blocksize		= 1,
3237 			.cra_init		= chcr_rfc3686_init,
3238 			.cra_exit		= chcr_cra_exit,
3239 			.cra_u.ablkcipher	= {
3240 				.min_keysize	= AES_MIN_KEY_SIZE +
3241 					CTR_RFC3686_NONCE_SIZE,
3242 				.max_keysize	= AES_MAX_KEY_SIZE +
3243 					CTR_RFC3686_NONCE_SIZE,
3244 				.ivsize		= CTR_RFC3686_IV_SIZE,
3245 				.setkey		= chcr_aes_rfc3686_setkey,
3246 				.encrypt	= chcr_aes_encrypt,
3247 				.decrypt	= chcr_aes_decrypt,
3248 				.geniv          = "seqiv",
3249 			}
3250 		}
3251 	},
3252 	/* SHA */
3253 	{
3254 		.type = CRYPTO_ALG_TYPE_AHASH,
3255 		.is_registered = 0,
3256 		.alg.hash = {
3257 			.halg.digestsize = SHA1_DIGEST_SIZE,
3258 			.halg.base = {
3259 				.cra_name = "sha1",
3260 				.cra_driver_name = "sha1-chcr",
3261 				.cra_blocksize = SHA1_BLOCK_SIZE,
3262 			}
3263 		}
3264 	},
3265 	{
3266 		.type = CRYPTO_ALG_TYPE_AHASH,
3267 		.is_registered = 0,
3268 		.alg.hash = {
3269 			.halg.digestsize = SHA256_DIGEST_SIZE,
3270 			.halg.base = {
3271 				.cra_name = "sha256",
3272 				.cra_driver_name = "sha256-chcr",
3273 				.cra_blocksize = SHA256_BLOCK_SIZE,
3274 			}
3275 		}
3276 	},
3277 	{
3278 		.type = CRYPTO_ALG_TYPE_AHASH,
3279 		.is_registered = 0,
3280 		.alg.hash = {
3281 			.halg.digestsize = SHA224_DIGEST_SIZE,
3282 			.halg.base = {
3283 				.cra_name = "sha224",
3284 				.cra_driver_name = "sha224-chcr",
3285 				.cra_blocksize = SHA224_BLOCK_SIZE,
3286 			}
3287 		}
3288 	},
3289 	{
3290 		.type = CRYPTO_ALG_TYPE_AHASH,
3291 		.is_registered = 0,
3292 		.alg.hash = {
3293 			.halg.digestsize = SHA384_DIGEST_SIZE,
3294 			.halg.base = {
3295 				.cra_name = "sha384",
3296 				.cra_driver_name = "sha384-chcr",
3297 				.cra_blocksize = SHA384_BLOCK_SIZE,
3298 			}
3299 		}
3300 	},
3301 	{
3302 		.type = CRYPTO_ALG_TYPE_AHASH,
3303 		.is_registered = 0,
3304 		.alg.hash = {
3305 			.halg.digestsize = SHA512_DIGEST_SIZE,
3306 			.halg.base = {
3307 				.cra_name = "sha512",
3308 				.cra_driver_name = "sha512-chcr",
3309 				.cra_blocksize = SHA512_BLOCK_SIZE,
3310 			}
3311 		}
3312 	},
3313 	/* HMAC */
3314 	{
3315 		.type = CRYPTO_ALG_TYPE_HMAC,
3316 		.is_registered = 0,
3317 		.alg.hash = {
3318 			.halg.digestsize = SHA1_DIGEST_SIZE,
3319 			.halg.base = {
3320 				.cra_name = "hmac(sha1)",
3321 				.cra_driver_name = "hmac-sha1-chcr",
3322 				.cra_blocksize = SHA1_BLOCK_SIZE,
3323 			}
3324 		}
3325 	},
3326 	{
3327 		.type = CRYPTO_ALG_TYPE_HMAC,
3328 		.is_registered = 0,
3329 		.alg.hash = {
3330 			.halg.digestsize = SHA224_DIGEST_SIZE,
3331 			.halg.base = {
3332 				.cra_name = "hmac(sha224)",
3333 				.cra_driver_name = "hmac-sha224-chcr",
3334 				.cra_blocksize = SHA224_BLOCK_SIZE,
3335 			}
3336 		}
3337 	},
3338 	{
3339 		.type = CRYPTO_ALG_TYPE_HMAC,
3340 		.is_registered = 0,
3341 		.alg.hash = {
3342 			.halg.digestsize = SHA256_DIGEST_SIZE,
3343 			.halg.base = {
3344 				.cra_name = "hmac(sha256)",
3345 				.cra_driver_name = "hmac-sha256-chcr",
3346 				.cra_blocksize = SHA256_BLOCK_SIZE,
3347 			}
3348 		}
3349 	},
3350 	{
3351 		.type = CRYPTO_ALG_TYPE_HMAC,
3352 		.is_registered = 0,
3353 		.alg.hash = {
3354 			.halg.digestsize = SHA384_DIGEST_SIZE,
3355 			.halg.base = {
3356 				.cra_name = "hmac(sha384)",
3357 				.cra_driver_name = "hmac-sha384-chcr",
3358 				.cra_blocksize = SHA384_BLOCK_SIZE,
3359 			}
3360 		}
3361 	},
3362 	{
3363 		.type = CRYPTO_ALG_TYPE_HMAC,
3364 		.is_registered = 0,
3365 		.alg.hash = {
3366 			.halg.digestsize = SHA512_DIGEST_SIZE,
3367 			.halg.base = {
3368 				.cra_name = "hmac(sha512)",
3369 				.cra_driver_name = "hmac-sha512-chcr",
3370 				.cra_blocksize = SHA512_BLOCK_SIZE,
3371 			}
3372 		}
3373 	},
3374 	/* Add AEAD Algorithms */
3375 	{
3376 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3377 		.is_registered = 0,
3378 		.alg.aead = {
3379 			.base = {
3380 				.cra_name = "gcm(aes)",
3381 				.cra_driver_name = "gcm-aes-chcr",
3382 				.cra_blocksize	= 1,
3383 				.cra_priority = CHCR_AEAD_PRIORITY,
3384 				.cra_ctxsize =	sizeof(struct chcr_context) +
3385 						sizeof(struct chcr_aead_ctx) +
3386 						sizeof(struct chcr_gcm_ctx),
3387 			},
3388 			.ivsize = 12,
3389 			.maxauthsize = GHASH_DIGEST_SIZE,
3390 			.setkey = chcr_gcm_setkey,
3391 			.setauthsize = chcr_gcm_setauthsize,
3392 		}
3393 	},
3394 	{
3395 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3396 		.is_registered = 0,
3397 		.alg.aead = {
3398 			.base = {
3399 				.cra_name = "rfc4106(gcm(aes))",
3400 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
3401 				.cra_blocksize	 = 1,
3402 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3403 				.cra_ctxsize =	sizeof(struct chcr_context) +
3404 						sizeof(struct chcr_aead_ctx) +
3405 						sizeof(struct chcr_gcm_ctx),
3406 
3407 			},
3408 			.ivsize = 8,
3409 			.maxauthsize	= GHASH_DIGEST_SIZE,
3410 			.setkey = chcr_gcm_setkey,
3411 			.setauthsize	= chcr_4106_4309_setauthsize,
3412 		}
3413 	},
3414 	{
3415 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3416 		.is_registered = 0,
3417 		.alg.aead = {
3418 			.base = {
3419 				.cra_name = "ccm(aes)",
3420 				.cra_driver_name = "ccm-aes-chcr",
3421 				.cra_blocksize	 = 1,
3422 				.cra_priority = CHCR_AEAD_PRIORITY,
3423 				.cra_ctxsize =	sizeof(struct chcr_context) +
3424 						sizeof(struct chcr_aead_ctx),
3425 
3426 			},
3427 			.ivsize = AES_BLOCK_SIZE,
3428 			.maxauthsize	= GHASH_DIGEST_SIZE,
3429 			.setkey = chcr_aead_ccm_setkey,
3430 			.setauthsize	= chcr_ccm_setauthsize,
3431 		}
3432 	},
3433 	{
3434 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3435 		.is_registered = 0,
3436 		.alg.aead = {
3437 			.base = {
3438 				.cra_name = "rfc4309(ccm(aes))",
3439 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
3440 				.cra_blocksize	 = 1,
3441 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3442 				.cra_ctxsize =	sizeof(struct chcr_context) +
3443 						sizeof(struct chcr_aead_ctx),
3444 
3445 			},
3446 			.ivsize = 8,
3447 			.maxauthsize	= GHASH_DIGEST_SIZE,
3448 			.setkey = chcr_aead_rfc4309_setkey,
3449 			.setauthsize = chcr_4106_4309_setauthsize,
3450 		}
3451 	},
3452 	{
3453 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3454 		.is_registered = 0,
3455 		.alg.aead = {
3456 			.base = {
3457 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
3458 				.cra_driver_name =
3459 					"authenc-hmac-sha1-cbc-aes-chcr",
3460 				.cra_blocksize	 = AES_BLOCK_SIZE,
3461 				.cra_priority = CHCR_AEAD_PRIORITY,
3462 				.cra_ctxsize =	sizeof(struct chcr_context) +
3463 						sizeof(struct chcr_aead_ctx) +
3464 						sizeof(struct chcr_authenc_ctx),
3465 
3466 			},
3467 			.ivsize = AES_BLOCK_SIZE,
3468 			.maxauthsize = SHA1_DIGEST_SIZE,
3469 			.setkey = chcr_authenc_setkey,
3470 			.setauthsize = chcr_authenc_setauthsize,
3471 		}
3472 	},
3473 	{
3474 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3475 		.is_registered = 0,
3476 		.alg.aead = {
3477 			.base = {
3478 
3479 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
3480 				.cra_driver_name =
3481 					"authenc-hmac-sha256-cbc-aes-chcr",
3482 				.cra_blocksize	 = AES_BLOCK_SIZE,
3483 				.cra_priority = CHCR_AEAD_PRIORITY,
3484 				.cra_ctxsize =	sizeof(struct chcr_context) +
3485 						sizeof(struct chcr_aead_ctx) +
3486 						sizeof(struct chcr_authenc_ctx),
3487 
3488 			},
3489 			.ivsize = AES_BLOCK_SIZE,
3490 			.maxauthsize	= SHA256_DIGEST_SIZE,
3491 			.setkey = chcr_authenc_setkey,
3492 			.setauthsize = chcr_authenc_setauthsize,
3493 		}
3494 	},
3495 	{
3496 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3497 		.is_registered = 0,
3498 		.alg.aead = {
3499 			.base = {
3500 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
3501 				.cra_driver_name =
3502 					"authenc-hmac-sha224-cbc-aes-chcr",
3503 				.cra_blocksize	 = AES_BLOCK_SIZE,
3504 				.cra_priority = CHCR_AEAD_PRIORITY,
3505 				.cra_ctxsize =	sizeof(struct chcr_context) +
3506 						sizeof(struct chcr_aead_ctx) +
3507 						sizeof(struct chcr_authenc_ctx),
3508 			},
3509 			.ivsize = AES_BLOCK_SIZE,
3510 			.maxauthsize = SHA224_DIGEST_SIZE,
3511 			.setkey = chcr_authenc_setkey,
3512 			.setauthsize = chcr_authenc_setauthsize,
3513 		}
3514 	},
3515 	{
3516 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3517 		.is_registered = 0,
3518 		.alg.aead = {
3519 			.base = {
3520 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
3521 				.cra_driver_name =
3522 					"authenc-hmac-sha384-cbc-aes-chcr",
3523 				.cra_blocksize	 = AES_BLOCK_SIZE,
3524 				.cra_priority = CHCR_AEAD_PRIORITY,
3525 				.cra_ctxsize =	sizeof(struct chcr_context) +
3526 						sizeof(struct chcr_aead_ctx) +
3527 						sizeof(struct chcr_authenc_ctx),
3528 
3529 			},
3530 			.ivsize = AES_BLOCK_SIZE,
3531 			.maxauthsize = SHA384_DIGEST_SIZE,
3532 			.setkey = chcr_authenc_setkey,
3533 			.setauthsize = chcr_authenc_setauthsize,
3534 		}
3535 	},
3536 	{
3537 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3538 		.is_registered = 0,
3539 		.alg.aead = {
3540 			.base = {
3541 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
3542 				.cra_driver_name =
3543 					"authenc-hmac-sha512-cbc-aes-chcr",
3544 				.cra_blocksize	 = AES_BLOCK_SIZE,
3545 				.cra_priority = CHCR_AEAD_PRIORITY,
3546 				.cra_ctxsize =	sizeof(struct chcr_context) +
3547 						sizeof(struct chcr_aead_ctx) +
3548 						sizeof(struct chcr_authenc_ctx),
3549 
3550 			},
3551 			.ivsize = AES_BLOCK_SIZE,
3552 			.maxauthsize = SHA512_DIGEST_SIZE,
3553 			.setkey = chcr_authenc_setkey,
3554 			.setauthsize = chcr_authenc_setauthsize,
3555 		}
3556 	},
3557 	{
3558 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
3559 		.is_registered = 0,
3560 		.alg.aead = {
3561 			.base = {
3562 				.cra_name = "authenc(digest_null,cbc(aes))",
3563 				.cra_driver_name =
3564 					"authenc-digest_null-cbc-aes-chcr",
3565 				.cra_blocksize	 = AES_BLOCK_SIZE,
3566 				.cra_priority = CHCR_AEAD_PRIORITY,
3567 				.cra_ctxsize =	sizeof(struct chcr_context) +
3568 						sizeof(struct chcr_aead_ctx) +
3569 						sizeof(struct chcr_authenc_ctx),
3570 
3571 			},
3572 			.ivsize  = AES_BLOCK_SIZE,
3573 			.maxauthsize = 0,
3574 			.setkey  = chcr_aead_digest_null_setkey,
3575 			.setauthsize = chcr_authenc_null_setauthsize,
3576 		}
3577 	},
3578 };
3579 
3580 /*
3581  *	chcr_unregister_alg - Deregister crypto algorithms with
3582  *	kernel framework.
3583  */
3584 static int chcr_unregister_alg(void)
3585 {
3586 	int i;
3587 
3588 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3589 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
3590 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
3591 			if (driver_algs[i].is_registered)
3592 				crypto_unregister_alg(
3593 						&driver_algs[i].alg.crypto);
3594 			break;
3595 		case CRYPTO_ALG_TYPE_AEAD:
3596 			if (driver_algs[i].is_registered)
3597 				crypto_unregister_aead(
3598 						&driver_algs[i].alg.aead);
3599 			break;
3600 		case CRYPTO_ALG_TYPE_AHASH:
3601 			if (driver_algs[i].is_registered)
3602 				crypto_unregister_ahash(
3603 						&driver_algs[i].alg.hash);
3604 			break;
3605 		}
3606 		driver_algs[i].is_registered = 0;
3607 	}
3608 	return 0;
3609 }
3610 
3611 #define SZ_AHASH_CTX sizeof(struct chcr_context)
3612 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
3613 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
3614 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
3615 
3616 /*
3617  *	chcr_register_alg - Register crypto algorithms with kernel framework.
3618  */
3619 static int chcr_register_alg(void)
3620 {
3621 	struct crypto_alg ai;
3622 	struct ahash_alg *a_hash;
3623 	int err = 0, i;
3624 	char *name = NULL;
3625 
3626 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3627 		if (driver_algs[i].is_registered)
3628 			continue;
3629 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
3630 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
3631 			driver_algs[i].alg.crypto.cra_priority =
3632 				CHCR_CRA_PRIORITY;
3633 			driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
3634 			driver_algs[i].alg.crypto.cra_flags =
3635 				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
3636 				CRYPTO_ALG_NEED_FALLBACK;
3637 			driver_algs[i].alg.crypto.cra_ctxsize =
3638 				sizeof(struct chcr_context) +
3639 				sizeof(struct ablk_ctx);
3640 			driver_algs[i].alg.crypto.cra_alignmask = 0;
3641 			driver_algs[i].alg.crypto.cra_type =
3642 				&crypto_ablkcipher_type;
3643 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
3644 			name = driver_algs[i].alg.crypto.cra_driver_name;
3645 			break;
3646 		case CRYPTO_ALG_TYPE_AEAD:
3647 			driver_algs[i].alg.aead.base.cra_flags =
3648 				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
3649 				CRYPTO_ALG_NEED_FALLBACK;
3650 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
3651 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
3652 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
3653 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
3654 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
3655 			err = crypto_register_aead(&driver_algs[i].alg.aead);
3656 			name = driver_algs[i].alg.aead.base.cra_driver_name;
3657 			break;
3658 		case CRYPTO_ALG_TYPE_AHASH:
3659 			a_hash = &driver_algs[i].alg.hash;
3660 			a_hash->update = chcr_ahash_update;
3661 			a_hash->final = chcr_ahash_final;
3662 			a_hash->finup = chcr_ahash_finup;
3663 			a_hash->digest = chcr_ahash_digest;
3664 			a_hash->export = chcr_ahash_export;
3665 			a_hash->import = chcr_ahash_import;
3666 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
3667 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
3668 			a_hash->halg.base.cra_module = THIS_MODULE;
3669 			a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
3670 			a_hash->halg.base.cra_alignmask = 0;
3671 			a_hash->halg.base.cra_exit = NULL;
3672 			a_hash->halg.base.cra_type = &crypto_ahash_type;
3673 
3674 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
3675 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
3676 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
3677 				a_hash->init = chcr_hmac_init;
3678 				a_hash->setkey = chcr_ahash_setkey;
3679 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
3680 			} else {
3681 				a_hash->init = chcr_sha_init;
3682 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
3683 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
3684 			}
3685 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
3686 			ai = driver_algs[i].alg.hash.halg.base;
3687 			name = ai.cra_driver_name;
3688 			break;
3689 		}
3690 		if (err) {
3691 			pr_err("chcr : %s : Algorithm registration failed\n",
3692 			       name);
3693 			goto register_err;
3694 		} else {
3695 			driver_algs[i].is_registered = 1;
3696 		}
3697 	}
3698 	return 0;
3699 
3700 register_err:
3701 	chcr_unregister_alg();
3702 	return err;
3703 }
3704 
3705 /*
3706  *	start_crypto - Register the crypto algorithms.
3707  *	This should called once when the first device comesup. After this
3708  *	kernel will start calling driver APIs for crypto operations.
3709  */
3710 int start_crypto(void)
3711 {
3712 	return chcr_register_alg();
3713 }
3714 
3715 /*
3716  *	stop_crypto - Deregister all the crypto algorithms with kernel.
3717  *	This should be called once when the last device goes down. After this
3718  *	kernel will not call the driver API for crypto operations.
3719  */
3720 int stop_crypto(void)
3721 {
3722 	chcr_unregister_alg();
3723 	return 0;
3724 }
3725