xref: /linux/drivers/crypto/chelsio/chcr_algo.c (revision 6f23fc47c1b2ac226704fb7294f43ed3b0965e51)
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/skbuff.h>
48 #include <linux/rtnetlink.h>
49 #include <linux/highmem.h>
50 #include <linux/scatterlist.h>
51 
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/gcm.h>
56 #include <crypto/sha1.h>
57 #include <crypto/sha2.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67 
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73 
74 #define IV AES_BLOCK_SIZE
75 
76 static unsigned int sgl_ent_len[] = {
77 	0, 0, 16, 24, 40, 48, 64, 72, 88,
78 	96, 112, 120, 136, 144, 160, 168, 184,
79 	192, 208, 216, 232, 240, 256, 264, 280,
80 	288, 304, 312, 328, 336, 352, 360, 376
81 };
82 
83 static unsigned int dsgl_ent_len[] = {
84 	0, 32, 32, 48, 48, 64, 64, 80, 80,
85 	112, 112, 128, 128, 144, 144, 160, 160,
86 	192, 192, 208, 208, 224, 224, 240, 240,
87 	272, 272, 288, 288, 304, 304, 320, 320
88 };
89 
90 static u32 round_constant[11] = {
91 	0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 	0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 	0x1B000000, 0x36000000, 0x6C000000
94 };
95 
96 static int chcr_handle_cipher_resp(struct skcipher_request *req,
97 				   unsigned char *input, int err);
98 
99 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101 	return &ctx->crypto_ctx->aeadctx;
102 }
103 
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106 	return &ctx->crypto_ctx->ablkctx;
107 }
108 
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111 	return &ctx->crypto_ctx->hmacctx;
112 }
113 
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116 	return gctx->ctx->gcm;
117 }
118 
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121 	return gctx->ctx->authenc;
122 }
123 
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126 	return container_of(ctx->dev, struct uld_ctx, dev);
127 }
128 
129 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
130 {
131 	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
132 }
133 
134 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
135 			 unsigned int entlen,
136 			 unsigned int skip)
137 {
138 	int nents = 0;
139 	unsigned int less;
140 	unsigned int skip_len = 0;
141 
142 	while (sg && skip) {
143 		if (sg_dma_len(sg) <= skip) {
144 			skip -= sg_dma_len(sg);
145 			skip_len = 0;
146 			sg = sg_next(sg);
147 		} else {
148 			skip_len = skip;
149 			skip = 0;
150 		}
151 	}
152 
153 	while (sg && reqlen) {
154 		less = min(reqlen, sg_dma_len(sg) - skip_len);
155 		nents += DIV_ROUND_UP(less, entlen);
156 		reqlen -= less;
157 		skip_len = 0;
158 		sg = sg_next(sg);
159 	}
160 	return nents;
161 }
162 
163 static inline int get_aead_subtype(struct crypto_aead *aead)
164 {
165 	struct aead_alg *alg = crypto_aead_alg(aead);
166 	struct chcr_alg_template *chcr_crypto_alg =
167 		container_of(alg, struct chcr_alg_template, alg.aead);
168 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
169 }
170 
171 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
172 {
173 	u8 temp[SHA512_DIGEST_SIZE];
174 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
175 	int authsize = crypto_aead_authsize(tfm);
176 	struct cpl_fw6_pld *fw6_pld;
177 	int cmp = 0;
178 
179 	fw6_pld = (struct cpl_fw6_pld *)input;
180 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
181 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
182 		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
183 	} else {
184 
185 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
186 				authsize, req->assoclen +
187 				req->cryptlen - authsize);
188 		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
189 	}
190 	if (cmp)
191 		*err = -EBADMSG;
192 	else
193 		*err = 0;
194 }
195 
196 static int chcr_inc_wrcount(struct chcr_dev *dev)
197 {
198 	if (dev->state == CHCR_DETACH)
199 		return 1;
200 	atomic_inc(&dev->inflight);
201 	return 0;
202 }
203 
204 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
205 {
206 	atomic_dec(&dev->inflight);
207 }
208 
209 static inline int chcr_handle_aead_resp(struct aead_request *req,
210 					 unsigned char *input,
211 					 int err)
212 {
213 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
214 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
215 	struct chcr_dev *dev = a_ctx(tfm)->dev;
216 
217 	chcr_aead_common_exit(req);
218 	if (reqctx->verify == VERIFY_SW) {
219 		chcr_verify_tag(req, input, &err);
220 		reqctx->verify = VERIFY_HW;
221 	}
222 	chcr_dec_wrcount(dev);
223 	aead_request_complete(req, err);
224 
225 	return err;
226 }
227 
228 static void get_aes_decrypt_key(unsigned char *dec_key,
229 				       const unsigned char *key,
230 				       unsigned int keylength)
231 {
232 	u32 temp;
233 	u32 w_ring[MAX_NK];
234 	int i, j, k;
235 	u8  nr, nk;
236 
237 	switch (keylength) {
238 	case AES_KEYLENGTH_128BIT:
239 		nk = KEYLENGTH_4BYTES;
240 		nr = NUMBER_OF_ROUNDS_10;
241 		break;
242 	case AES_KEYLENGTH_192BIT:
243 		nk = KEYLENGTH_6BYTES;
244 		nr = NUMBER_OF_ROUNDS_12;
245 		break;
246 	case AES_KEYLENGTH_256BIT:
247 		nk = KEYLENGTH_8BYTES;
248 		nr = NUMBER_OF_ROUNDS_14;
249 		break;
250 	default:
251 		return;
252 	}
253 	for (i = 0; i < nk; i++)
254 		w_ring[i] = get_unaligned_be32(&key[i * 4]);
255 
256 	i = 0;
257 	temp = w_ring[nk - 1];
258 	while (i + nk < (nr + 1) * 4) {
259 		if (!(i % nk)) {
260 			/* RotWord(temp) */
261 			temp = (temp << 8) | (temp >> 24);
262 			temp = aes_ks_subword(temp);
263 			temp ^= round_constant[i / nk];
264 		} else if (nk == 8 && (i % 4 == 0)) {
265 			temp = aes_ks_subword(temp);
266 		}
267 		w_ring[i % nk] ^= temp;
268 		temp = w_ring[i % nk];
269 		i++;
270 	}
271 	i--;
272 	for (k = 0, j = i % nk; k < nk; k++) {
273 		put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
274 		j--;
275 		if (j < 0)
276 			j += nk;
277 	}
278 }
279 
280 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
281 {
282 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
283 
284 	switch (ds) {
285 	case SHA1_DIGEST_SIZE:
286 		base_hash = crypto_alloc_shash("sha1", 0, 0);
287 		break;
288 	case SHA224_DIGEST_SIZE:
289 		base_hash = crypto_alloc_shash("sha224", 0, 0);
290 		break;
291 	case SHA256_DIGEST_SIZE:
292 		base_hash = crypto_alloc_shash("sha256", 0, 0);
293 		break;
294 	case SHA384_DIGEST_SIZE:
295 		base_hash = crypto_alloc_shash("sha384", 0, 0);
296 		break;
297 	case SHA512_DIGEST_SIZE:
298 		base_hash = crypto_alloc_shash("sha512", 0, 0);
299 		break;
300 	}
301 
302 	return base_hash;
303 }
304 
305 static int chcr_compute_partial_hash(struct shash_desc *desc,
306 				     char *iopad, char *result_hash,
307 				     int digest_size)
308 {
309 	struct sha1_state sha1_st;
310 	struct sha256_state sha256_st;
311 	struct sha512_state sha512_st;
312 	int error;
313 
314 	if (digest_size == SHA1_DIGEST_SIZE) {
315 		error = crypto_shash_init(desc) ?:
316 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
317 			crypto_shash_export(desc, (void *)&sha1_st);
318 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
319 	} else if (digest_size == SHA224_DIGEST_SIZE) {
320 		error = crypto_shash_init(desc) ?:
321 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
322 			crypto_shash_export(desc, (void *)&sha256_st);
323 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
324 
325 	} else if (digest_size == SHA256_DIGEST_SIZE) {
326 		error = crypto_shash_init(desc) ?:
327 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
328 			crypto_shash_export(desc, (void *)&sha256_st);
329 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
330 
331 	} else if (digest_size == SHA384_DIGEST_SIZE) {
332 		error = crypto_shash_init(desc) ?:
333 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
334 			crypto_shash_export(desc, (void *)&sha512_st);
335 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
336 
337 	} else if (digest_size == SHA512_DIGEST_SIZE) {
338 		error = crypto_shash_init(desc) ?:
339 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
340 			crypto_shash_export(desc, (void *)&sha512_st);
341 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
342 	} else {
343 		error = -EINVAL;
344 		pr_err("Unknown digest size %d\n", digest_size);
345 	}
346 	return error;
347 }
348 
349 static void chcr_change_order(char *buf, int ds)
350 {
351 	int i;
352 
353 	if (ds == SHA512_DIGEST_SIZE) {
354 		for (i = 0; i < (ds / sizeof(u64)); i++)
355 			*((__be64 *)buf + i) =
356 				cpu_to_be64(*((u64 *)buf + i));
357 	} else {
358 		for (i = 0; i < (ds / sizeof(u32)); i++)
359 			*((__be32 *)buf + i) =
360 				cpu_to_be32(*((u32 *)buf + i));
361 	}
362 }
363 
364 static inline int is_hmac(struct crypto_tfm *tfm)
365 {
366 	struct crypto_alg *alg = tfm->__crt_alg;
367 	struct chcr_alg_template *chcr_crypto_alg =
368 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
369 			     alg.hash);
370 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
371 		return 1;
372 	return 0;
373 }
374 
375 static inline void dsgl_walk_init(struct dsgl_walk *walk,
376 				   struct cpl_rx_phys_dsgl *dsgl)
377 {
378 	walk->dsgl = dsgl;
379 	walk->nents = 0;
380 	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
381 }
382 
383 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
384 				 int pci_chan_id)
385 {
386 	struct cpl_rx_phys_dsgl *phys_cpl;
387 
388 	phys_cpl = walk->dsgl;
389 
390 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
391 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
392 	phys_cpl->pcirlxorder_to_noofsgentr =
393 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
394 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
395 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
396 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
397 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
398 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
399 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
400 	phys_cpl->rss_hdr_int.qid = htons(qid);
401 	phys_cpl->rss_hdr_int.hash_val = 0;
402 	phys_cpl->rss_hdr_int.channel = pci_chan_id;
403 }
404 
405 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
406 					size_t size,
407 					dma_addr_t addr)
408 {
409 	int j;
410 
411 	if (!size)
412 		return;
413 	j = walk->nents;
414 	walk->to->len[j % 8] = htons(size);
415 	walk->to->addr[j % 8] = cpu_to_be64(addr);
416 	j++;
417 	if ((j % 8) == 0)
418 		walk->to++;
419 	walk->nents = j;
420 }
421 
422 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
423 			   struct scatterlist *sg,
424 			      unsigned int slen,
425 			      unsigned int skip)
426 {
427 	int skip_len = 0;
428 	unsigned int left_size = slen, len = 0;
429 	unsigned int j = walk->nents;
430 	int offset, ent_len;
431 
432 	if (!slen)
433 		return;
434 	while (sg && skip) {
435 		if (sg_dma_len(sg) <= skip) {
436 			skip -= sg_dma_len(sg);
437 			skip_len = 0;
438 			sg = sg_next(sg);
439 		} else {
440 			skip_len = skip;
441 			skip = 0;
442 		}
443 	}
444 
445 	while (left_size && sg) {
446 		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
447 		offset = 0;
448 		while (len) {
449 			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
450 			walk->to->len[j % 8] = htons(ent_len);
451 			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
452 						      offset + skip_len);
453 			offset += ent_len;
454 			len -= ent_len;
455 			j++;
456 			if ((j % 8) == 0)
457 				walk->to++;
458 		}
459 		walk->last_sg = sg;
460 		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
461 					  skip_len) + skip_len;
462 		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
463 		skip_len = 0;
464 		sg = sg_next(sg);
465 	}
466 	walk->nents = j;
467 }
468 
469 static inline void ulptx_walk_init(struct ulptx_walk *walk,
470 				   struct ulptx_sgl *ulp)
471 {
472 	walk->sgl = ulp;
473 	walk->nents = 0;
474 	walk->pair_idx = 0;
475 	walk->pair = ulp->sge;
476 	walk->last_sg = NULL;
477 	walk->last_sg_len = 0;
478 }
479 
480 static inline void ulptx_walk_end(struct ulptx_walk *walk)
481 {
482 	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
483 			      ULPTX_NSGE_V(walk->nents));
484 }
485 
486 
487 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
488 					size_t size,
489 					dma_addr_t addr)
490 {
491 	if (!size)
492 		return;
493 
494 	if (walk->nents == 0) {
495 		walk->sgl->len0 = cpu_to_be32(size);
496 		walk->sgl->addr0 = cpu_to_be64(addr);
497 	} else {
498 		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
499 		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
500 		walk->pair_idx = !walk->pair_idx;
501 		if (!walk->pair_idx)
502 			walk->pair++;
503 	}
504 	walk->nents++;
505 }
506 
507 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
508 					struct scatterlist *sg,
509 			       unsigned int len,
510 			       unsigned int skip)
511 {
512 	int small;
513 	int skip_len = 0;
514 	unsigned int sgmin;
515 
516 	if (!len)
517 		return;
518 	while (sg && skip) {
519 		if (sg_dma_len(sg) <= skip) {
520 			skip -= sg_dma_len(sg);
521 			skip_len = 0;
522 			sg = sg_next(sg);
523 		} else {
524 			skip_len = skip;
525 			skip = 0;
526 		}
527 	}
528 	WARN(!sg, "SG should not be null here\n");
529 	if (sg && (walk->nents == 0)) {
530 		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
531 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
532 		walk->sgl->len0 = cpu_to_be32(sgmin);
533 		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
534 		walk->nents++;
535 		len -= sgmin;
536 		walk->last_sg = sg;
537 		walk->last_sg_len = sgmin + skip_len;
538 		skip_len += sgmin;
539 		if (sg_dma_len(sg) == skip_len) {
540 			sg = sg_next(sg);
541 			skip_len = 0;
542 		}
543 	}
544 
545 	while (sg && len) {
546 		small = min(sg_dma_len(sg) - skip_len, len);
547 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
548 		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
549 		walk->pair->addr[walk->pair_idx] =
550 			cpu_to_be64(sg_dma_address(sg) + skip_len);
551 		walk->pair_idx = !walk->pair_idx;
552 		walk->nents++;
553 		if (!walk->pair_idx)
554 			walk->pair++;
555 		len -= sgmin;
556 		skip_len += sgmin;
557 		walk->last_sg = sg;
558 		walk->last_sg_len = skip_len;
559 		if (sg_dma_len(sg) == skip_len) {
560 			sg = sg_next(sg);
561 			skip_len = 0;
562 		}
563 	}
564 }
565 
566 static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
567 {
568 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
569 	struct chcr_alg_template *chcr_crypto_alg =
570 		container_of(alg, struct chcr_alg_template, alg.skcipher);
571 
572 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
573 }
574 
575 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
576 {
577 	struct adapter *adap = netdev2adap(dev);
578 	struct sge_uld_txq_info *txq_info =
579 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
580 	struct sge_uld_txq *txq;
581 	int ret = 0;
582 
583 	local_bh_disable();
584 	txq = &txq_info->uldtxq[idx];
585 	spin_lock(&txq->sendq.lock);
586 	if (txq->full)
587 		ret = -1;
588 	spin_unlock(&txq->sendq.lock);
589 	local_bh_enable();
590 	return ret;
591 }
592 
593 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
594 			       struct _key_ctx *key_ctx)
595 {
596 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
597 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
598 	} else {
599 		memcpy(key_ctx->key,
600 		       ablkctx->key + (ablkctx->enckey_len >> 1),
601 		       ablkctx->enckey_len >> 1);
602 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
603 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
604 	}
605 	return 0;
606 }
607 
608 static int chcr_hash_ent_in_wr(struct scatterlist *src,
609 			     unsigned int minsg,
610 			     unsigned int space,
611 			     unsigned int srcskip)
612 {
613 	int srclen = 0;
614 	int srcsg = minsg;
615 	int soffset = 0, sless;
616 
617 	if (sg_dma_len(src) == srcskip) {
618 		src = sg_next(src);
619 		srcskip = 0;
620 	}
621 	while (src && space > (sgl_ent_len[srcsg + 1])) {
622 		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
623 							CHCR_SRC_SG_SIZE);
624 		srclen += sless;
625 		soffset += sless;
626 		srcsg++;
627 		if (sg_dma_len(src) == (soffset + srcskip)) {
628 			src = sg_next(src);
629 			soffset = 0;
630 			srcskip = 0;
631 		}
632 	}
633 	return srclen;
634 }
635 
636 static int chcr_sg_ent_in_wr(struct scatterlist *src,
637 			     struct scatterlist *dst,
638 			     unsigned int minsg,
639 			     unsigned int space,
640 			     unsigned int srcskip,
641 			     unsigned int dstskip)
642 {
643 	int srclen = 0, dstlen = 0;
644 	int srcsg = minsg, dstsg = minsg;
645 	int offset = 0, soffset = 0, less, sless = 0;
646 
647 	if (sg_dma_len(src) == srcskip) {
648 		src = sg_next(src);
649 		srcskip = 0;
650 	}
651 	if (sg_dma_len(dst) == dstskip) {
652 		dst = sg_next(dst);
653 		dstskip = 0;
654 	}
655 
656 	while (src && dst &&
657 	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
658 		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
659 				CHCR_SRC_SG_SIZE);
660 		srclen += sless;
661 		srcsg++;
662 		offset = 0;
663 		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
664 		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
665 			if (srclen <= dstlen)
666 				break;
667 			less = min_t(unsigned int, sg_dma_len(dst) - offset -
668 				     dstskip, CHCR_DST_SG_SIZE);
669 			dstlen += less;
670 			offset += less;
671 			if ((offset + dstskip) == sg_dma_len(dst)) {
672 				dst = sg_next(dst);
673 				offset = 0;
674 			}
675 			dstsg++;
676 			dstskip = 0;
677 		}
678 		soffset += sless;
679 		if ((soffset + srcskip) == sg_dma_len(src)) {
680 			src = sg_next(src);
681 			srcskip = 0;
682 			soffset = 0;
683 		}
684 
685 	}
686 	return min(srclen, dstlen);
687 }
688 
689 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
690 				struct skcipher_request *req,
691 				u8 *iv,
692 				unsigned short op_type)
693 {
694 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
695 	int err;
696 
697 	skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
698 	skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
699 				      req->base.complete, req->base.data);
700 	skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
701 				   req->cryptlen, iv);
702 
703 	err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
704 			crypto_skcipher_encrypt(&reqctx->fallback_req);
705 
706 	return err;
707 
708 }
709 
710 static inline int get_qidxs(struct crypto_async_request *req,
711 			    unsigned int *txqidx, unsigned int *rxqidx)
712 {
713 	struct crypto_tfm *tfm = req->tfm;
714 	int ret = 0;
715 
716 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
717 	case CRYPTO_ALG_TYPE_AEAD:
718 	{
719 		struct aead_request *aead_req =
720 			container_of(req, struct aead_request, base);
721 		struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(aead_req);
722 		*txqidx = reqctx->txqidx;
723 		*rxqidx = reqctx->rxqidx;
724 		break;
725 	}
726 	case CRYPTO_ALG_TYPE_SKCIPHER:
727 	{
728 		struct skcipher_request *sk_req =
729 			container_of(req, struct skcipher_request, base);
730 		struct chcr_skcipher_req_ctx *reqctx =
731 			skcipher_request_ctx(sk_req);
732 		*txqidx = reqctx->txqidx;
733 		*rxqidx = reqctx->rxqidx;
734 		break;
735 	}
736 	case CRYPTO_ALG_TYPE_AHASH:
737 	{
738 		struct ahash_request *ahash_req =
739 			container_of(req, struct ahash_request, base);
740 		struct chcr_ahash_req_ctx *reqctx =
741 			ahash_request_ctx(ahash_req);
742 		*txqidx = reqctx->txqidx;
743 		*rxqidx = reqctx->rxqidx;
744 		break;
745 	}
746 	default:
747 		ret = -EINVAL;
748 		/* should never get here */
749 		BUG();
750 		break;
751 	}
752 	return ret;
753 }
754 
755 static inline void create_wreq(struct chcr_context *ctx,
756 			       struct chcr_wr *chcr_req,
757 			       struct crypto_async_request *req,
758 			       unsigned int imm,
759 			       int hash_sz,
760 			       unsigned int len16,
761 			       unsigned int sc_len,
762 			       unsigned int lcb)
763 {
764 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
765 	unsigned int tx_channel_id, rx_channel_id;
766 	unsigned int txqidx = 0, rxqidx = 0;
767 	unsigned int qid, fid, portno;
768 
769 	get_qidxs(req, &txqidx, &rxqidx);
770 	qid = u_ctx->lldi.rxq_ids[rxqidx];
771 	fid = u_ctx->lldi.rxq_ids[0];
772 	portno = rxqidx / ctx->rxq_perchan;
773 	tx_channel_id = txqidx / ctx->txq_perchan;
774 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
775 
776 
777 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
778 	chcr_req->wreq.pld_size_hash_size =
779 		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
780 	chcr_req->wreq.len16_pkd =
781 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
782 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
783 	chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
784 							    !!lcb, txqidx);
785 
786 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
787 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
788 				((sizeof(chcr_req->wreq)) >> 4)));
789 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
790 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
791 					   sizeof(chcr_req->key_ctx) + sc_len);
792 }
793 
794 /**
795  *	create_cipher_wr - form the WR for cipher operations
796  *	@wrparam: Container for create_cipher_wr()'s parameters
797  */
798 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
799 {
800 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
801 	struct chcr_context *ctx = c_ctx(tfm);
802 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
803 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
804 	struct sk_buff *skb = NULL;
805 	struct chcr_wr *chcr_req;
806 	struct cpl_rx_phys_dsgl *phys_cpl;
807 	struct ulptx_sgl *ulptx;
808 	struct chcr_skcipher_req_ctx *reqctx =
809 		skcipher_request_ctx(wrparam->req);
810 	unsigned int temp = 0, transhdr_len, dst_size;
811 	int error;
812 	int nents;
813 	unsigned int kctx_len;
814 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
815 			GFP_KERNEL : GFP_ATOMIC;
816 	struct adapter *adap = padap(ctx->dev);
817 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
818 
819 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
820 	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
821 			      reqctx->dst_ofst);
822 	dst_size = get_space_for_phys_dsgl(nents);
823 	kctx_len = roundup(ablkctx->enckey_len, 16);
824 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
825 	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
826 				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
827 	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
828 				     (sgl_len(nents) * 8);
829 	transhdr_len += temp;
830 	transhdr_len = roundup(transhdr_len, 16);
831 	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
832 	if (!skb) {
833 		error = -ENOMEM;
834 		goto err;
835 	}
836 	chcr_req = __skb_put_zero(skb, transhdr_len);
837 	chcr_req->sec_cpl.op_ivinsrtofst =
838 			FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
839 
840 	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
841 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
842 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
843 
844 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
845 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
846 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
847 							 ablkctx->ciph_mode,
848 							 0, 0, IV >> 1);
849 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
850 							  0, 1, dst_size);
851 
852 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
853 	if ((reqctx->op == CHCR_DECRYPT_OP) &&
854 	    (!(get_cryptoalg_subtype(tfm) ==
855 	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
856 	    (!(get_cryptoalg_subtype(tfm) ==
857 	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
858 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
859 	} else {
860 		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
861 		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
862 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
863 			       ablkctx->enckey_len);
864 		} else {
865 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
866 			       (ablkctx->enckey_len >> 1),
867 			       ablkctx->enckey_len >> 1);
868 			memcpy(chcr_req->key_ctx.key +
869 			       (ablkctx->enckey_len >> 1),
870 			       ablkctx->key,
871 			       ablkctx->enckey_len >> 1);
872 		}
873 	}
874 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
875 	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
876 	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
877 	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
878 
879 	atomic_inc(&adap->chcr_stats.cipher_rqst);
880 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
881 		+ (reqctx->imm ? (wrparam->bytes) : 0);
882 	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
883 		    transhdr_len, temp,
884 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
885 	reqctx->skb = skb;
886 
887 	if (reqctx->op && (ablkctx->ciph_mode ==
888 			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
889 		sg_pcopy_to_buffer(wrparam->req->src,
890 			sg_nents(wrparam->req->src), wrparam->req->iv, 16,
891 			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
892 
893 	return skb;
894 err:
895 	return ERR_PTR(error);
896 }
897 
898 static inline int chcr_keyctx_ck_size(unsigned int keylen)
899 {
900 	int ck_size = 0;
901 
902 	if (keylen == AES_KEYSIZE_128)
903 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
904 	else if (keylen == AES_KEYSIZE_192)
905 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
906 	else if (keylen == AES_KEYSIZE_256)
907 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
908 	else
909 		ck_size = 0;
910 
911 	return ck_size;
912 }
913 static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
914 				       const u8 *key,
915 				       unsigned int keylen)
916 {
917 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
918 
919 	crypto_skcipher_clear_flags(ablkctx->sw_cipher,
920 				CRYPTO_TFM_REQ_MASK);
921 	crypto_skcipher_set_flags(ablkctx->sw_cipher,
922 				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
923 	return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
924 }
925 
926 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
927 			       const u8 *key,
928 			       unsigned int keylen)
929 {
930 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
931 	unsigned int ck_size, context_size;
932 	u16 alignment = 0;
933 	int err;
934 
935 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
936 	if (err)
937 		goto badkey_err;
938 
939 	ck_size = chcr_keyctx_ck_size(keylen);
940 	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
941 	memcpy(ablkctx->key, key, keylen);
942 	ablkctx->enckey_len = keylen;
943 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
944 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
945 			keylen + alignment) >> 4;
946 
947 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
948 						0, 0, context_size);
949 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
950 	return 0;
951 badkey_err:
952 	ablkctx->enckey_len = 0;
953 
954 	return err;
955 }
956 
957 static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
958 				   const u8 *key,
959 				   unsigned int keylen)
960 {
961 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
962 	unsigned int ck_size, context_size;
963 	u16 alignment = 0;
964 	int err;
965 
966 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
967 	if (err)
968 		goto badkey_err;
969 	ck_size = chcr_keyctx_ck_size(keylen);
970 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
971 	memcpy(ablkctx->key, key, keylen);
972 	ablkctx->enckey_len = keylen;
973 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
974 			keylen + alignment) >> 4;
975 
976 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
977 						0, 0, context_size);
978 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
979 
980 	return 0;
981 badkey_err:
982 	ablkctx->enckey_len = 0;
983 
984 	return err;
985 }
986 
987 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
988 				   const u8 *key,
989 				   unsigned int keylen)
990 {
991 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
992 	unsigned int ck_size, context_size;
993 	u16 alignment = 0;
994 	int err;
995 
996 	if (keylen < CTR_RFC3686_NONCE_SIZE)
997 		return -EINVAL;
998 	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
999 	       CTR_RFC3686_NONCE_SIZE);
1000 
1001 	keylen -= CTR_RFC3686_NONCE_SIZE;
1002 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1003 	if (err)
1004 		goto badkey_err;
1005 
1006 	ck_size = chcr_keyctx_ck_size(keylen);
1007 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1008 	memcpy(ablkctx->key, key, keylen);
1009 	ablkctx->enckey_len = keylen;
1010 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1011 			keylen + alignment) >> 4;
1012 
1013 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1014 						0, 0, context_size);
1015 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1016 
1017 	return 0;
1018 badkey_err:
1019 	ablkctx->enckey_len = 0;
1020 
1021 	return err;
1022 }
1023 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1024 {
1025 	unsigned int size = AES_BLOCK_SIZE;
1026 	__be32 *b = (__be32 *)(dstiv + size);
1027 	u32 c, prev;
1028 
1029 	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1030 	for (; size >= 4; size -= 4) {
1031 		prev = be32_to_cpu(*--b);
1032 		c = prev + add;
1033 		*b = cpu_to_be32(c);
1034 		if (prev < c)
1035 			break;
1036 		add = 1;
1037 	}
1038 
1039 }
1040 
1041 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1042 {
1043 	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1044 	u64 c;
1045 	u32 temp = be32_to_cpu(*--b);
1046 
1047 	temp = ~temp;
1048 	c = (u64)temp +  1; // No of block can processed without overflow
1049 	if ((bytes / AES_BLOCK_SIZE) >= c)
1050 		bytes = c * AES_BLOCK_SIZE;
1051 	return bytes;
1052 }
1053 
1054 static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1055 			     u32 isfinal)
1056 {
1057 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1058 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1059 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1060 	struct crypto_aes_ctx aes;
1061 	int ret, i;
1062 	u8 *key;
1063 	unsigned int keylen;
1064 	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1065 	int round8 = round / 8;
1066 
1067 	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1068 
1069 	keylen = ablkctx->enckey_len / 2;
1070 	key = ablkctx->key + keylen;
1071 	/* For a 192 bit key remove the padded zeroes which was
1072 	 * added in chcr_xts_setkey
1073 	 */
1074 	if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1075 			== CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1076 		ret = aes_expandkey(&aes, key, keylen - 8);
1077 	else
1078 		ret = aes_expandkey(&aes, key, keylen);
1079 	if (ret)
1080 		return ret;
1081 	aes_encrypt(&aes, iv, iv);
1082 	for (i = 0; i < round8; i++)
1083 		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1084 
1085 	for (i = 0; i < (round % 8); i++)
1086 		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1087 
1088 	if (!isfinal)
1089 		aes_decrypt(&aes, iv, iv);
1090 
1091 	memzero_explicit(&aes, sizeof(aes));
1092 	return 0;
1093 }
1094 
1095 static int chcr_update_cipher_iv(struct skcipher_request *req,
1096 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1097 {
1098 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1099 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1100 	int subtype = get_cryptoalg_subtype(tfm);
1101 	int ret = 0;
1102 
1103 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1104 		ctr_add_iv(iv, req->iv, (reqctx->processed /
1105 			   AES_BLOCK_SIZE));
1106 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1107 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1108 			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1109 						AES_BLOCK_SIZE) + 1);
1110 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1111 		ret = chcr_update_tweak(req, iv, 0);
1112 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1113 		if (reqctx->op)
1114 			/*Updated before sending last WR*/
1115 			memcpy(iv, req->iv, AES_BLOCK_SIZE);
1116 		else
1117 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1118 	}
1119 
1120 	return ret;
1121 
1122 }
1123 
1124 /* We need separate function for final iv because in rfc3686  Initial counter
1125  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1126  * for subsequent update requests
1127  */
1128 
1129 static int chcr_final_cipher_iv(struct skcipher_request *req,
1130 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1131 {
1132 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1133 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1134 	int subtype = get_cryptoalg_subtype(tfm);
1135 	int ret = 0;
1136 
1137 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1138 		ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1139 						       AES_BLOCK_SIZE));
1140 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1141 		if (!reqctx->partial_req)
1142 			memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1143 		else
1144 			ret = chcr_update_tweak(req, iv, 1);
1145 	}
1146 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1147 		/*Already updated for Decrypt*/
1148 		if (!reqctx->op)
1149 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1150 
1151 	}
1152 	return ret;
1153 
1154 }
1155 
1156 static int chcr_handle_cipher_resp(struct skcipher_request *req,
1157 				   unsigned char *input, int err)
1158 {
1159 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1160 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1161 	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1162 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1163 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1164 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1165 	struct chcr_context *ctx = c_ctx(tfm);
1166 	struct adapter *adap = padap(ctx->dev);
1167 	struct cipher_wr_param wrparam;
1168 	struct sk_buff *skb;
1169 	int bytes;
1170 
1171 	if (err)
1172 		goto unmap;
1173 	if (req->cryptlen == reqctx->processed) {
1174 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1175 				      req);
1176 		err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1177 		goto complete;
1178 	}
1179 
1180 	if (!reqctx->imm) {
1181 		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1182 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1183 					  reqctx->src_ofst, reqctx->dst_ofst);
1184 		if ((bytes + reqctx->processed) >= req->cryptlen)
1185 			bytes  = req->cryptlen - reqctx->processed;
1186 		else
1187 			bytes = rounddown(bytes, 16);
1188 	} else {
1189 		/*CTR mode counter overfloa*/
1190 		bytes  = req->cryptlen - reqctx->processed;
1191 	}
1192 	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1193 	if (err)
1194 		goto unmap;
1195 
1196 	if (unlikely(bytes == 0)) {
1197 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1198 				      req);
1199 		memcpy(req->iv, reqctx->init_iv, IV);
1200 		atomic_inc(&adap->chcr_stats.fallback);
1201 		err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1202 					   reqctx->op);
1203 		goto complete;
1204 	}
1205 
1206 	if (get_cryptoalg_subtype(tfm) ==
1207 	    CRYPTO_ALG_SUB_TYPE_CTR)
1208 		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1209 	wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1210 	wrparam.req = req;
1211 	wrparam.bytes = bytes;
1212 	skb = create_cipher_wr(&wrparam);
1213 	if (IS_ERR(skb)) {
1214 		pr_err("%s : Failed to form WR. No memory\n", __func__);
1215 		err = PTR_ERR(skb);
1216 		goto unmap;
1217 	}
1218 	skb->dev = u_ctx->lldi.ports[0];
1219 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1220 	chcr_send_wr(skb);
1221 	reqctx->last_req_len = bytes;
1222 	reqctx->processed += bytes;
1223 	if (get_cryptoalg_subtype(tfm) ==
1224 		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1225 			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1226 		complete(&ctx->cbc_aes_aio_done);
1227 	}
1228 	return 0;
1229 unmap:
1230 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1231 complete:
1232 	if (get_cryptoalg_subtype(tfm) ==
1233 		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1234 			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1235 		complete(&ctx->cbc_aes_aio_done);
1236 	}
1237 	chcr_dec_wrcount(dev);
1238 	skcipher_request_complete(req, err);
1239 	return err;
1240 }
1241 
1242 static int process_cipher(struct skcipher_request *req,
1243 				  unsigned short qid,
1244 				  struct sk_buff **skb,
1245 				  unsigned short op_type)
1246 {
1247 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1248 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1249 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1250 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1251 	struct adapter *adap = padap(c_ctx(tfm)->dev);
1252 	struct	cipher_wr_param wrparam;
1253 	int bytes, err = -EINVAL;
1254 	int subtype;
1255 
1256 	reqctx->processed = 0;
1257 	reqctx->partial_req = 0;
1258 	if (!req->iv)
1259 		goto error;
1260 	subtype = get_cryptoalg_subtype(tfm);
1261 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1262 	    (req->cryptlen == 0) ||
1263 	    (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1264 		if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1265 			goto fallback;
1266 		else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1267 			 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1268 			goto fallback;
1269 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1270 		       ablkctx->enckey_len, req->cryptlen, ivsize);
1271 		goto error;
1272 	}
1273 
1274 	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1275 	if (err)
1276 		goto error;
1277 	if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1278 					    AES_MIN_KEY_SIZE +
1279 					    sizeof(struct cpl_rx_phys_dsgl) +
1280 					/*Min dsgl size*/
1281 					    32))) {
1282 		/* Can be sent as Imm*/
1283 		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1284 
1285 		dnents = sg_nents_xlen(req->dst, req->cryptlen,
1286 				       CHCR_DST_SG_SIZE, 0);
1287 		phys_dsgl = get_space_for_phys_dsgl(dnents);
1288 		kctx_len = roundup(ablkctx->enckey_len, 16);
1289 		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1290 		reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1291 			SGE_MAX_WR_LEN;
1292 		bytes = IV + req->cryptlen;
1293 
1294 	} else {
1295 		reqctx->imm = 0;
1296 	}
1297 
1298 	if (!reqctx->imm) {
1299 		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1300 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1301 					  0, 0);
1302 		if ((bytes + reqctx->processed) >= req->cryptlen)
1303 			bytes  = req->cryptlen - reqctx->processed;
1304 		else
1305 			bytes = rounddown(bytes, 16);
1306 	} else {
1307 		bytes = req->cryptlen;
1308 	}
1309 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1310 		bytes = adjust_ctr_overflow(req->iv, bytes);
1311 	}
1312 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1313 		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1314 		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1315 				CTR_RFC3686_IV_SIZE);
1316 
1317 		/* initialize counter portion of counter block */
1318 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1319 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1320 		memcpy(reqctx->init_iv, reqctx->iv, IV);
1321 
1322 	} else {
1323 
1324 		memcpy(reqctx->iv, req->iv, IV);
1325 		memcpy(reqctx->init_iv, req->iv, IV);
1326 	}
1327 	if (unlikely(bytes == 0)) {
1328 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1329 				      req);
1330 fallback:       atomic_inc(&adap->chcr_stats.fallback);
1331 		err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1332 					   subtype ==
1333 					   CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1334 					   reqctx->iv : req->iv,
1335 					   op_type);
1336 		goto error;
1337 	}
1338 	reqctx->op = op_type;
1339 	reqctx->srcsg = req->src;
1340 	reqctx->dstsg = req->dst;
1341 	reqctx->src_ofst = 0;
1342 	reqctx->dst_ofst = 0;
1343 	wrparam.qid = qid;
1344 	wrparam.req = req;
1345 	wrparam.bytes = bytes;
1346 	*skb = create_cipher_wr(&wrparam);
1347 	if (IS_ERR(*skb)) {
1348 		err = PTR_ERR(*skb);
1349 		goto unmap;
1350 	}
1351 	reqctx->processed = bytes;
1352 	reqctx->last_req_len = bytes;
1353 	reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1354 
1355 	return 0;
1356 unmap:
1357 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1358 error:
1359 	return err;
1360 }
1361 
1362 static int chcr_aes_encrypt(struct skcipher_request *req)
1363 {
1364 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1365 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1366 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1367 	struct sk_buff *skb = NULL;
1368 	int err;
1369 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1370 	struct chcr_context *ctx = c_ctx(tfm);
1371 	unsigned int cpu;
1372 
1373 	cpu = get_cpu();
1374 	reqctx->txqidx = cpu % ctx->ntxq;
1375 	reqctx->rxqidx = cpu % ctx->nrxq;
1376 	put_cpu();
1377 
1378 	err = chcr_inc_wrcount(dev);
1379 	if (err)
1380 		return -ENXIO;
1381 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1382 						reqctx->txqidx) &&
1383 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1384 			err = -ENOSPC;
1385 			goto error;
1386 	}
1387 
1388 	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1389 			     &skb, CHCR_ENCRYPT_OP);
1390 	if (err || !skb)
1391 		return  err;
1392 	skb->dev = u_ctx->lldi.ports[0];
1393 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1394 	chcr_send_wr(skb);
1395 	if (get_cryptoalg_subtype(tfm) ==
1396 		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1397 			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1398 			reqctx->partial_req = 1;
1399 			wait_for_completion(&ctx->cbc_aes_aio_done);
1400         }
1401 	return -EINPROGRESS;
1402 error:
1403 	chcr_dec_wrcount(dev);
1404 	return err;
1405 }
1406 
1407 static int chcr_aes_decrypt(struct skcipher_request *req)
1408 {
1409 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1410 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1411 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1412 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1413 	struct sk_buff *skb = NULL;
1414 	int err;
1415 	struct chcr_context *ctx = c_ctx(tfm);
1416 	unsigned int cpu;
1417 
1418 	cpu = get_cpu();
1419 	reqctx->txqidx = cpu % ctx->ntxq;
1420 	reqctx->rxqidx = cpu % ctx->nrxq;
1421 	put_cpu();
1422 
1423 	err = chcr_inc_wrcount(dev);
1424 	if (err)
1425 		return -ENXIO;
1426 
1427 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1428 						reqctx->txqidx) &&
1429 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1430 			return -ENOSPC;
1431 	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1432 			     &skb, CHCR_DECRYPT_OP);
1433 	if (err || !skb)
1434 		return err;
1435 	skb->dev = u_ctx->lldi.ports[0];
1436 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1437 	chcr_send_wr(skb);
1438 	return -EINPROGRESS;
1439 }
1440 static int chcr_device_init(struct chcr_context *ctx)
1441 {
1442 	struct uld_ctx *u_ctx = NULL;
1443 	int txq_perchan, ntxq;
1444 	int err = 0, rxq_perchan;
1445 
1446 	if (!ctx->dev) {
1447 		u_ctx = assign_chcr_device();
1448 		if (!u_ctx) {
1449 			err = -ENXIO;
1450 			pr_err("chcr device assignment fails\n");
1451 			goto out;
1452 		}
1453 		ctx->dev = &u_ctx->dev;
1454 		ntxq = u_ctx->lldi.ntxq;
1455 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1456 		txq_perchan = ntxq / u_ctx->lldi.nchan;
1457 		ctx->ntxq = ntxq;
1458 		ctx->nrxq = u_ctx->lldi.nrxq;
1459 		ctx->rxq_perchan = rxq_perchan;
1460 		ctx->txq_perchan = txq_perchan;
1461 	}
1462 out:
1463 	return err;
1464 }
1465 
1466 static int chcr_init_tfm(struct crypto_skcipher *tfm)
1467 {
1468 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1469 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1470 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1471 
1472 	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1473 				CRYPTO_ALG_NEED_FALLBACK);
1474 	if (IS_ERR(ablkctx->sw_cipher)) {
1475 		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1476 		return PTR_ERR(ablkctx->sw_cipher);
1477 	}
1478 	init_completion(&ctx->cbc_aes_aio_done);
1479 	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1480 					 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1481 
1482 	return chcr_device_init(ctx);
1483 }
1484 
1485 static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1486 {
1487 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1488 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1489 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1490 
1491 	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1492 	 * cannot be used as fallback in chcr_handle_cipher_response
1493 	 */
1494 	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1495 				CRYPTO_ALG_NEED_FALLBACK);
1496 	if (IS_ERR(ablkctx->sw_cipher)) {
1497 		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1498 		return PTR_ERR(ablkctx->sw_cipher);
1499 	}
1500 	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1501 				    crypto_skcipher_reqsize(ablkctx->sw_cipher));
1502 	return chcr_device_init(ctx);
1503 }
1504 
1505 
1506 static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1507 {
1508 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1509 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1510 
1511 	crypto_free_skcipher(ablkctx->sw_cipher);
1512 }
1513 
1514 static int get_alg_config(struct algo_param *params,
1515 			  unsigned int auth_size)
1516 {
1517 	switch (auth_size) {
1518 	case SHA1_DIGEST_SIZE:
1519 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1520 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1521 		params->result_size = SHA1_DIGEST_SIZE;
1522 		break;
1523 	case SHA224_DIGEST_SIZE:
1524 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1525 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1526 		params->result_size = SHA256_DIGEST_SIZE;
1527 		break;
1528 	case SHA256_DIGEST_SIZE:
1529 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1530 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1531 		params->result_size = SHA256_DIGEST_SIZE;
1532 		break;
1533 	case SHA384_DIGEST_SIZE:
1534 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1535 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1536 		params->result_size = SHA512_DIGEST_SIZE;
1537 		break;
1538 	case SHA512_DIGEST_SIZE:
1539 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1540 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1541 		params->result_size = SHA512_DIGEST_SIZE;
1542 		break;
1543 	default:
1544 		pr_err("ERROR, unsupported digest size\n");
1545 		return -EINVAL;
1546 	}
1547 	return 0;
1548 }
1549 
1550 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1551 {
1552 		crypto_free_shash(base_hash);
1553 }
1554 
1555 /**
1556  *	create_hash_wr - Create hash work request
1557  *	@req: Cipher req base
1558  *	@param: Container for create_hash_wr()'s parameters
1559  */
1560 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1561 				      struct hash_wr_param *param)
1562 {
1563 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1564 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1565 	struct chcr_context *ctx = h_ctx(tfm);
1566 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1567 	struct sk_buff *skb = NULL;
1568 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1569 	struct chcr_wr *chcr_req;
1570 	struct ulptx_sgl *ulptx;
1571 	unsigned int nents = 0, transhdr_len;
1572 	unsigned int temp = 0;
1573 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1574 		GFP_ATOMIC;
1575 	struct adapter *adap = padap(h_ctx(tfm)->dev);
1576 	int error = 0;
1577 	unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1578 
1579 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
1580 	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1581 	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1582 				param->sg_len) <= SGE_MAX_WR_LEN;
1583 	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1584 		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1585 	nents += param->bfr_len ? 1 : 0;
1586 	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1587 				param->sg_len, 16) : (sgl_len(nents) * 8);
1588 	transhdr_len = roundup(transhdr_len, 16);
1589 
1590 	skb = alloc_skb(transhdr_len, flags);
1591 	if (!skb)
1592 		return ERR_PTR(-ENOMEM);
1593 	chcr_req = __skb_put_zero(skb, transhdr_len);
1594 
1595 	chcr_req->sec_cpl.op_ivinsrtofst =
1596 		FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1597 
1598 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1599 
1600 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1601 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1602 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1603 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1604 	chcr_req->sec_cpl.seqno_numivs =
1605 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1606 					 param->opad_needed, 0);
1607 
1608 	chcr_req->sec_cpl.ivgen_hdrlen =
1609 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1610 
1611 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1612 	       param->alg_prm.result_size);
1613 
1614 	if (param->opad_needed)
1615 		memcpy(chcr_req->key_ctx.key +
1616 		       ((param->alg_prm.result_size <= 32) ? 32 :
1617 			CHCR_HASH_MAX_DIGEST_SIZE),
1618 		       hmacctx->opad, param->alg_prm.result_size);
1619 
1620 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1621 					    param->alg_prm.mk_size, 0,
1622 					    param->opad_needed,
1623 					    ((param->kctx_len +
1624 					     sizeof(chcr_req->key_ctx)) >> 4));
1625 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1626 	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1627 				     DUMMY_BYTES);
1628 	if (param->bfr_len != 0) {
1629 		req_ctx->hctx_wr.dma_addr =
1630 			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1631 				       param->bfr_len, DMA_TO_DEVICE);
1632 		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1633 				       req_ctx->hctx_wr. dma_addr)) {
1634 			error = -ENOMEM;
1635 			goto err;
1636 		}
1637 		req_ctx->hctx_wr.dma_len = param->bfr_len;
1638 	} else {
1639 		req_ctx->hctx_wr.dma_addr = 0;
1640 	}
1641 	chcr_add_hash_src_ent(req, ulptx, param);
1642 	/* Request upto max wr size */
1643 	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1644 				(param->sg_len + param->bfr_len) : 0);
1645 	atomic_inc(&adap->chcr_stats.digest_rqst);
1646 	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1647 		    param->hash_size, transhdr_len,
1648 		    temp,  0);
1649 	req_ctx->hctx_wr.skb = skb;
1650 	return skb;
1651 err:
1652 	kfree_skb(skb);
1653 	return  ERR_PTR(error);
1654 }
1655 
1656 static int chcr_ahash_update(struct ahash_request *req)
1657 {
1658 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1659 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1660 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1661 	struct chcr_context *ctx = h_ctx(rtfm);
1662 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1663 	struct sk_buff *skb;
1664 	u8 remainder = 0, bs;
1665 	unsigned int nbytes = req->nbytes;
1666 	struct hash_wr_param params;
1667 	int error;
1668 	unsigned int cpu;
1669 
1670 	cpu = get_cpu();
1671 	req_ctx->txqidx = cpu % ctx->ntxq;
1672 	req_ctx->rxqidx = cpu % ctx->nrxq;
1673 	put_cpu();
1674 
1675 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1676 
1677 	if (nbytes + req_ctx->reqlen >= bs) {
1678 		remainder = (nbytes + req_ctx->reqlen) % bs;
1679 		nbytes = nbytes + req_ctx->reqlen - remainder;
1680 	} else {
1681 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1682 				   + req_ctx->reqlen, nbytes, 0);
1683 		req_ctx->reqlen += nbytes;
1684 		return 0;
1685 	}
1686 	error = chcr_inc_wrcount(dev);
1687 	if (error)
1688 		return -ENXIO;
1689 	/* Detach state for CHCR means lldi or padap is freed. Increasing
1690 	 * inflight count for dev guarantees that lldi and padap is valid
1691 	 */
1692 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1693 						req_ctx->txqidx) &&
1694 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1695 			error = -ENOSPC;
1696 			goto err;
1697 	}
1698 
1699 	chcr_init_hctx_per_wr(req_ctx);
1700 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1701 	if (error) {
1702 		error = -ENOMEM;
1703 		goto err;
1704 	}
1705 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1706 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1707 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1708 				     HASH_SPACE_LEFT(params.kctx_len), 0);
1709 	if (params.sg_len > req->nbytes)
1710 		params.sg_len = req->nbytes;
1711 	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1712 			req_ctx->reqlen;
1713 	params.opad_needed = 0;
1714 	params.more = 1;
1715 	params.last = 0;
1716 	params.bfr_len = req_ctx->reqlen;
1717 	params.scmd1 = 0;
1718 	req_ctx->hctx_wr.srcsg = req->src;
1719 
1720 	params.hash_size = params.alg_prm.result_size;
1721 	req_ctx->data_len += params.sg_len + params.bfr_len;
1722 	skb = create_hash_wr(req, &params);
1723 	if (IS_ERR(skb)) {
1724 		error = PTR_ERR(skb);
1725 		goto unmap;
1726 	}
1727 
1728 	req_ctx->hctx_wr.processed += params.sg_len;
1729 	if (remainder) {
1730 		/* Swap buffers */
1731 		swap(req_ctx->reqbfr, req_ctx->skbfr);
1732 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1733 				   req_ctx->reqbfr, remainder, req->nbytes -
1734 				   remainder);
1735 	}
1736 	req_ctx->reqlen = remainder;
1737 	skb->dev = u_ctx->lldi.ports[0];
1738 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1739 	chcr_send_wr(skb);
1740 	return -EINPROGRESS;
1741 unmap:
1742 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1743 err:
1744 	chcr_dec_wrcount(dev);
1745 	return error;
1746 }
1747 
1748 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1749 {
1750 	memset(bfr_ptr, 0, bs);
1751 	*bfr_ptr = 0x80;
1752 	if (bs == 64)
1753 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1754 	else
1755 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1756 }
1757 
1758 static int chcr_ahash_final(struct ahash_request *req)
1759 {
1760 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1761 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1762 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1763 	struct hash_wr_param params;
1764 	struct sk_buff *skb;
1765 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1766 	struct chcr_context *ctx = h_ctx(rtfm);
1767 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1768 	int error;
1769 	unsigned int cpu;
1770 
1771 	cpu = get_cpu();
1772 	req_ctx->txqidx = cpu % ctx->ntxq;
1773 	req_ctx->rxqidx = cpu % ctx->nrxq;
1774 	put_cpu();
1775 
1776 	error = chcr_inc_wrcount(dev);
1777 	if (error)
1778 		return -ENXIO;
1779 
1780 	chcr_init_hctx_per_wr(req_ctx);
1781 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1782 		params.opad_needed = 1;
1783 	else
1784 		params.opad_needed = 0;
1785 	params.sg_len = 0;
1786 	req_ctx->hctx_wr.isfinal = 1;
1787 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1788 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1789 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1790 		params.opad_needed = 1;
1791 		params.kctx_len *= 2;
1792 	} else {
1793 		params.opad_needed = 0;
1794 	}
1795 
1796 	req_ctx->hctx_wr.result = 1;
1797 	params.bfr_len = req_ctx->reqlen;
1798 	req_ctx->data_len += params.bfr_len + params.sg_len;
1799 	req_ctx->hctx_wr.srcsg = req->src;
1800 	if (req_ctx->reqlen == 0) {
1801 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1802 		params.last = 0;
1803 		params.more = 1;
1804 		params.scmd1 = 0;
1805 		params.bfr_len = bs;
1806 
1807 	} else {
1808 		params.scmd1 = req_ctx->data_len;
1809 		params.last = 1;
1810 		params.more = 0;
1811 	}
1812 	params.hash_size = crypto_ahash_digestsize(rtfm);
1813 	skb = create_hash_wr(req, &params);
1814 	if (IS_ERR(skb)) {
1815 		error = PTR_ERR(skb);
1816 		goto err;
1817 	}
1818 	req_ctx->reqlen = 0;
1819 	skb->dev = u_ctx->lldi.ports[0];
1820 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1821 	chcr_send_wr(skb);
1822 	return -EINPROGRESS;
1823 err:
1824 	chcr_dec_wrcount(dev);
1825 	return error;
1826 }
1827 
1828 static int chcr_ahash_finup(struct ahash_request *req)
1829 {
1830 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1831 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1832 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1833 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1834 	struct chcr_context *ctx = h_ctx(rtfm);
1835 	struct sk_buff *skb;
1836 	struct hash_wr_param params;
1837 	u8  bs;
1838 	int error;
1839 	unsigned int cpu;
1840 
1841 	cpu = get_cpu();
1842 	req_ctx->txqidx = cpu % ctx->ntxq;
1843 	req_ctx->rxqidx = cpu % ctx->nrxq;
1844 	put_cpu();
1845 
1846 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1847 	error = chcr_inc_wrcount(dev);
1848 	if (error)
1849 		return -ENXIO;
1850 
1851 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1852 						req_ctx->txqidx) &&
1853 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1854 			error = -ENOSPC;
1855 			goto err;
1856 	}
1857 	chcr_init_hctx_per_wr(req_ctx);
1858 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1859 	if (error) {
1860 		error = -ENOMEM;
1861 		goto err;
1862 	}
1863 
1864 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1865 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1866 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1867 		params.kctx_len *= 2;
1868 		params.opad_needed = 1;
1869 	} else {
1870 		params.opad_needed = 0;
1871 	}
1872 
1873 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1874 				    HASH_SPACE_LEFT(params.kctx_len), 0);
1875 	if (params.sg_len < req->nbytes) {
1876 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1877 			params.kctx_len /= 2;
1878 			params.opad_needed = 0;
1879 		}
1880 		params.last = 0;
1881 		params.more = 1;
1882 		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1883 					- req_ctx->reqlen;
1884 		params.hash_size = params.alg_prm.result_size;
1885 		params.scmd1 = 0;
1886 	} else {
1887 		params.last = 1;
1888 		params.more = 0;
1889 		params.sg_len = req->nbytes;
1890 		params.hash_size = crypto_ahash_digestsize(rtfm);
1891 		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1892 				params.sg_len;
1893 	}
1894 	params.bfr_len = req_ctx->reqlen;
1895 	req_ctx->data_len += params.bfr_len + params.sg_len;
1896 	req_ctx->hctx_wr.result = 1;
1897 	req_ctx->hctx_wr.srcsg = req->src;
1898 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1899 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1900 		params.last = 0;
1901 		params.more = 1;
1902 		params.scmd1 = 0;
1903 		params.bfr_len = bs;
1904 	}
1905 	skb = create_hash_wr(req, &params);
1906 	if (IS_ERR(skb)) {
1907 		error = PTR_ERR(skb);
1908 		goto unmap;
1909 	}
1910 	req_ctx->reqlen = 0;
1911 	req_ctx->hctx_wr.processed += params.sg_len;
1912 	skb->dev = u_ctx->lldi.ports[0];
1913 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1914 	chcr_send_wr(skb);
1915 	return -EINPROGRESS;
1916 unmap:
1917 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1918 err:
1919 	chcr_dec_wrcount(dev);
1920 	return error;
1921 }
1922 
1923 static int chcr_ahash_digest(struct ahash_request *req)
1924 {
1925 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1926 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1927 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1928 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1929 	struct chcr_context *ctx = h_ctx(rtfm);
1930 	struct sk_buff *skb;
1931 	struct hash_wr_param params;
1932 	u8  bs;
1933 	int error;
1934 	unsigned int cpu;
1935 
1936 	cpu = get_cpu();
1937 	req_ctx->txqidx = cpu % ctx->ntxq;
1938 	req_ctx->rxqidx = cpu % ctx->nrxq;
1939 	put_cpu();
1940 
1941 	rtfm->init(req);
1942 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1943 	error = chcr_inc_wrcount(dev);
1944 	if (error)
1945 		return -ENXIO;
1946 
1947 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1948 						req_ctx->txqidx) &&
1949 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1950 			error = -ENOSPC;
1951 			goto err;
1952 	}
1953 
1954 	chcr_init_hctx_per_wr(req_ctx);
1955 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1956 	if (error) {
1957 		error = -ENOMEM;
1958 		goto err;
1959 	}
1960 
1961 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1962 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1963 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1964 		params.kctx_len *= 2;
1965 		params.opad_needed = 1;
1966 	} else {
1967 		params.opad_needed = 0;
1968 	}
1969 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1970 				HASH_SPACE_LEFT(params.kctx_len), 0);
1971 	if (params.sg_len < req->nbytes) {
1972 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1973 			params.kctx_len /= 2;
1974 			params.opad_needed = 0;
1975 		}
1976 		params.last = 0;
1977 		params.more = 1;
1978 		params.scmd1 = 0;
1979 		params.sg_len = rounddown(params.sg_len, bs);
1980 		params.hash_size = params.alg_prm.result_size;
1981 	} else {
1982 		params.sg_len = req->nbytes;
1983 		params.hash_size = crypto_ahash_digestsize(rtfm);
1984 		params.last = 1;
1985 		params.more = 0;
1986 		params.scmd1 = req->nbytes + req_ctx->data_len;
1987 
1988 	}
1989 	params.bfr_len = 0;
1990 	req_ctx->hctx_wr.result = 1;
1991 	req_ctx->hctx_wr.srcsg = req->src;
1992 	req_ctx->data_len += params.bfr_len + params.sg_len;
1993 
1994 	if (req->nbytes == 0) {
1995 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1996 		params.more = 1;
1997 		params.bfr_len = bs;
1998 	}
1999 
2000 	skb = create_hash_wr(req, &params);
2001 	if (IS_ERR(skb)) {
2002 		error = PTR_ERR(skb);
2003 		goto unmap;
2004 	}
2005 	req_ctx->hctx_wr.processed += params.sg_len;
2006 	skb->dev = u_ctx->lldi.ports[0];
2007 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2008 	chcr_send_wr(skb);
2009 	return -EINPROGRESS;
2010 unmap:
2011 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2012 err:
2013 	chcr_dec_wrcount(dev);
2014 	return error;
2015 }
2016 
2017 static int chcr_ahash_continue(struct ahash_request *req)
2018 {
2019 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2020 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2021 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2022 	struct chcr_context *ctx = h_ctx(rtfm);
2023 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2024 	struct sk_buff *skb;
2025 	struct hash_wr_param params;
2026 	u8  bs;
2027 	int error;
2028 	unsigned int cpu;
2029 
2030 	cpu = get_cpu();
2031 	reqctx->txqidx = cpu % ctx->ntxq;
2032 	reqctx->rxqidx = cpu % ctx->nrxq;
2033 	put_cpu();
2034 
2035 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2036 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2037 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
2038 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2039 		params.kctx_len *= 2;
2040 		params.opad_needed = 1;
2041 	} else {
2042 		params.opad_needed = 0;
2043 	}
2044 	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2045 					    HASH_SPACE_LEFT(params.kctx_len),
2046 					    hctx_wr->src_ofst);
2047 	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2048 		params.sg_len = req->nbytes - hctx_wr->processed;
2049 	if (!hctx_wr->result ||
2050 	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2051 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
2052 			params.kctx_len /= 2;
2053 			params.opad_needed = 0;
2054 		}
2055 		params.last = 0;
2056 		params.more = 1;
2057 		params.sg_len = rounddown(params.sg_len, bs);
2058 		params.hash_size = params.alg_prm.result_size;
2059 		params.scmd1 = 0;
2060 	} else {
2061 		params.last = 1;
2062 		params.more = 0;
2063 		params.hash_size = crypto_ahash_digestsize(rtfm);
2064 		params.scmd1 = reqctx->data_len + params.sg_len;
2065 	}
2066 	params.bfr_len = 0;
2067 	reqctx->data_len += params.sg_len;
2068 	skb = create_hash_wr(req, &params);
2069 	if (IS_ERR(skb)) {
2070 		error = PTR_ERR(skb);
2071 		goto err;
2072 	}
2073 	hctx_wr->processed += params.sg_len;
2074 	skb->dev = u_ctx->lldi.ports[0];
2075 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2076 	chcr_send_wr(skb);
2077 	return 0;
2078 err:
2079 	return error;
2080 }
2081 
2082 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2083 					  unsigned char *input,
2084 					  int err)
2085 {
2086 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2087 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2088 	int digestsize, updated_digestsize;
2089 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2090 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2091 	struct chcr_dev *dev = h_ctx(tfm)->dev;
2092 
2093 	if (input == NULL)
2094 		goto out;
2095 	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2096 	updated_digestsize = digestsize;
2097 	if (digestsize == SHA224_DIGEST_SIZE)
2098 		updated_digestsize = SHA256_DIGEST_SIZE;
2099 	else if (digestsize == SHA384_DIGEST_SIZE)
2100 		updated_digestsize = SHA512_DIGEST_SIZE;
2101 
2102 	if (hctx_wr->dma_addr) {
2103 		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2104 				 hctx_wr->dma_len, DMA_TO_DEVICE);
2105 		hctx_wr->dma_addr = 0;
2106 	}
2107 	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2108 				 req->nbytes)) {
2109 		if (hctx_wr->result == 1) {
2110 			hctx_wr->result = 0;
2111 			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2112 			       digestsize);
2113 		} else {
2114 			memcpy(reqctx->partial_hash,
2115 			       input + sizeof(struct cpl_fw6_pld),
2116 			       updated_digestsize);
2117 
2118 		}
2119 		goto unmap;
2120 	}
2121 	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2122 	       updated_digestsize);
2123 
2124 	err = chcr_ahash_continue(req);
2125 	if (err)
2126 		goto unmap;
2127 	return;
2128 unmap:
2129 	if (hctx_wr->is_sg_map)
2130 		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2131 
2132 
2133 out:
2134 	chcr_dec_wrcount(dev);
2135 	ahash_request_complete(req, err);
2136 }
2137 
2138 /*
2139  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2140  *	@req: crypto request
2141  */
2142 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2143 			 int err)
2144 {
2145 	struct crypto_tfm *tfm = req->tfm;
2146 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2147 	struct adapter *adap = padap(ctx->dev);
2148 
2149 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2150 	case CRYPTO_ALG_TYPE_AEAD:
2151 		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2152 		break;
2153 
2154 	case CRYPTO_ALG_TYPE_SKCIPHER:
2155 		 chcr_handle_cipher_resp(skcipher_request_cast(req),
2156 					       input, err);
2157 		break;
2158 	case CRYPTO_ALG_TYPE_AHASH:
2159 		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2160 		}
2161 	atomic_inc(&adap->chcr_stats.complete);
2162 	return err;
2163 }
2164 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2165 {
2166 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2167 	struct chcr_ahash_req_ctx *state = out;
2168 
2169 	state->reqlen = req_ctx->reqlen;
2170 	state->data_len = req_ctx->data_len;
2171 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2172 	memcpy(state->partial_hash, req_ctx->partial_hash,
2173 	       CHCR_HASH_MAX_DIGEST_SIZE);
2174 	chcr_init_hctx_per_wr(state);
2175 	return 0;
2176 }
2177 
2178 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2179 {
2180 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2181 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2182 
2183 	req_ctx->reqlen = state->reqlen;
2184 	req_ctx->data_len = state->data_len;
2185 	req_ctx->reqbfr = req_ctx->bfr1;
2186 	req_ctx->skbfr = req_ctx->bfr2;
2187 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2188 	memcpy(req_ctx->partial_hash, state->partial_hash,
2189 	       CHCR_HASH_MAX_DIGEST_SIZE);
2190 	chcr_init_hctx_per_wr(req_ctx);
2191 	return 0;
2192 }
2193 
2194 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2195 			     unsigned int keylen)
2196 {
2197 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2198 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2199 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2200 	unsigned int i, err = 0, updated_digestsize;
2201 
2202 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2203 
2204 	/* use the key to calculate the ipad and opad. ipad will sent with the
2205 	 * first request's data. opad will be sent with the final hash result
2206 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2207 	 */
2208 	shash->tfm = hmacctx->base_hash;
2209 	if (keylen > bs) {
2210 		err = crypto_shash_digest(shash, key, keylen,
2211 					  hmacctx->ipad);
2212 		if (err)
2213 			goto out;
2214 		keylen = digestsize;
2215 	} else {
2216 		memcpy(hmacctx->ipad, key, keylen);
2217 	}
2218 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2219 	unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs,
2220 		      "fortified memcpy causes -Wrestrict warning");
2221 
2222 	for (i = 0; i < bs / sizeof(int); i++) {
2223 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2224 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2225 	}
2226 
2227 	updated_digestsize = digestsize;
2228 	if (digestsize == SHA224_DIGEST_SIZE)
2229 		updated_digestsize = SHA256_DIGEST_SIZE;
2230 	else if (digestsize == SHA384_DIGEST_SIZE)
2231 		updated_digestsize = SHA512_DIGEST_SIZE;
2232 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2233 					hmacctx->ipad, digestsize);
2234 	if (err)
2235 		goto out;
2236 	chcr_change_order(hmacctx->ipad, updated_digestsize);
2237 
2238 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2239 					hmacctx->opad, digestsize);
2240 	if (err)
2241 		goto out;
2242 	chcr_change_order(hmacctx->opad, updated_digestsize);
2243 out:
2244 	return err;
2245 }
2246 
2247 static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2248 			       unsigned int key_len)
2249 {
2250 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2251 	unsigned short context_size = 0;
2252 	int err;
2253 
2254 	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2255 	if (err)
2256 		goto badkey_err;
2257 
2258 	memcpy(ablkctx->key, key, key_len);
2259 	ablkctx->enckey_len = key_len;
2260 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2261 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2262 	/* Both keys for xts must be aligned to 16 byte boundary
2263 	 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2264 	 */
2265 	if (key_len == 48) {
2266 		context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2267 				+ 16) >> 4;
2268 		memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2269 		memset(ablkctx->key + 24, 0, 8);
2270 		memset(ablkctx->key + 56, 0, 8);
2271 		ablkctx->enckey_len = 64;
2272 		ablkctx->key_ctx_hdr =
2273 			FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2274 					 CHCR_KEYCTX_NO_KEY, 1,
2275 					 0, context_size);
2276 	} else {
2277 		ablkctx->key_ctx_hdr =
2278 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2279 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2280 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2281 				 CHCR_KEYCTX_NO_KEY, 1,
2282 				 0, context_size);
2283 	}
2284 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2285 	return 0;
2286 badkey_err:
2287 	ablkctx->enckey_len = 0;
2288 
2289 	return err;
2290 }
2291 
2292 static int chcr_sha_init(struct ahash_request *areq)
2293 {
2294 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2295 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2296 	int digestsize =  crypto_ahash_digestsize(tfm);
2297 
2298 	req_ctx->data_len = 0;
2299 	req_ctx->reqlen = 0;
2300 	req_ctx->reqbfr = req_ctx->bfr1;
2301 	req_ctx->skbfr = req_ctx->bfr2;
2302 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2303 
2304 	return 0;
2305 }
2306 
2307 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2308 {
2309 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2310 				 sizeof(struct chcr_ahash_req_ctx));
2311 	return chcr_device_init(crypto_tfm_ctx(tfm));
2312 }
2313 
2314 static int chcr_hmac_init(struct ahash_request *areq)
2315 {
2316 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2317 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2318 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2319 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2320 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2321 
2322 	chcr_sha_init(areq);
2323 	req_ctx->data_len = bs;
2324 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2325 		if (digestsize == SHA224_DIGEST_SIZE)
2326 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2327 			       SHA256_DIGEST_SIZE);
2328 		else if (digestsize == SHA384_DIGEST_SIZE)
2329 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2330 			       SHA512_DIGEST_SIZE);
2331 		else
2332 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2333 			       digestsize);
2334 	}
2335 	return 0;
2336 }
2337 
2338 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2339 {
2340 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2341 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2342 	unsigned int digestsize =
2343 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2344 
2345 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2346 				 sizeof(struct chcr_ahash_req_ctx));
2347 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2348 	if (IS_ERR(hmacctx->base_hash))
2349 		return PTR_ERR(hmacctx->base_hash);
2350 	return chcr_device_init(crypto_tfm_ctx(tfm));
2351 }
2352 
2353 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2354 {
2355 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2356 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2357 
2358 	if (hmacctx->base_hash) {
2359 		chcr_free_shash(hmacctx->base_hash);
2360 		hmacctx->base_hash = NULL;
2361 	}
2362 }
2363 
2364 inline void chcr_aead_common_exit(struct aead_request *req)
2365 {
2366 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2367 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2368 	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2369 
2370 	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2371 }
2372 
2373 static int chcr_aead_common_init(struct aead_request *req)
2374 {
2375 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2376 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2377 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2378 	unsigned int authsize = crypto_aead_authsize(tfm);
2379 	int error = -EINVAL;
2380 
2381 	/* validate key size */
2382 	if (aeadctx->enckey_len == 0)
2383 		goto err;
2384 	if (reqctx->op && req->cryptlen < authsize)
2385 		goto err;
2386 	if (reqctx->b0_len)
2387 		reqctx->scratch_pad = reqctx->iv + IV;
2388 	else
2389 		reqctx->scratch_pad = NULL;
2390 
2391 	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2392 				  reqctx->op);
2393 	if (error) {
2394 		error = -ENOMEM;
2395 		goto err;
2396 	}
2397 
2398 	return 0;
2399 err:
2400 	return error;
2401 }
2402 
2403 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2404 				   int aadmax, int wrlen,
2405 				   unsigned short op_type)
2406 {
2407 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2408 
2409 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2410 	    dst_nents > MAX_DSGL_ENT ||
2411 	    (req->assoclen > aadmax) ||
2412 	    (wrlen > SGE_MAX_WR_LEN))
2413 		return 1;
2414 	return 0;
2415 }
2416 
2417 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2418 {
2419 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2420 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2421 	struct aead_request *subreq = aead_request_ctx_dma(req);
2422 
2423 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2424 	aead_request_set_callback(subreq, req->base.flags,
2425 				  req->base.complete, req->base.data);
2426 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2427 				 req->iv);
2428 	aead_request_set_ad(subreq, req->assoclen);
2429 	return op_type ? crypto_aead_decrypt(subreq) :
2430 		crypto_aead_encrypt(subreq);
2431 }
2432 
2433 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2434 					 unsigned short qid,
2435 					 int size)
2436 {
2437 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2438 	struct chcr_context *ctx = a_ctx(tfm);
2439 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2440 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2441 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2442 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2443 	struct sk_buff *skb = NULL;
2444 	struct chcr_wr *chcr_req;
2445 	struct cpl_rx_phys_dsgl *phys_cpl;
2446 	struct ulptx_sgl *ulptx;
2447 	unsigned int transhdr_len;
2448 	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2449 	unsigned int   kctx_len = 0, dnents, snents;
2450 	unsigned int  authsize = crypto_aead_authsize(tfm);
2451 	int error = -EINVAL;
2452 	u8 *ivptr;
2453 	int null = 0;
2454 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2455 		GFP_ATOMIC;
2456 	struct adapter *adap = padap(ctx->dev);
2457 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2458 
2459 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2460 	if (req->cryptlen == 0)
2461 		return NULL;
2462 
2463 	reqctx->b0_len = 0;
2464 	error = chcr_aead_common_init(req);
2465 	if (error)
2466 		return ERR_PTR(error);
2467 
2468 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2469 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2470 		null = 1;
2471 	}
2472 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2473 		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2474 	dnents += MIN_AUTH_SG; // For IV
2475 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2476 			       CHCR_SRC_SG_SIZE, 0);
2477 	dst_size = get_space_for_phys_dsgl(dnents);
2478 	kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2479 		- sizeof(chcr_req->key_ctx);
2480 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2481 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2482 			SGE_MAX_WR_LEN;
2483 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2484 			: (sgl_len(snents) * 8);
2485 	transhdr_len += temp;
2486 	transhdr_len = roundup(transhdr_len, 16);
2487 
2488 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2489 				    transhdr_len, reqctx->op)) {
2490 		atomic_inc(&adap->chcr_stats.fallback);
2491 		chcr_aead_common_exit(req);
2492 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2493 	}
2494 	skb = alloc_skb(transhdr_len, flags);
2495 	if (!skb) {
2496 		error = -ENOMEM;
2497 		goto err;
2498 	}
2499 
2500 	chcr_req = __skb_put_zero(skb, transhdr_len);
2501 
2502 	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2503 
2504 	/*
2505 	 * Input order	is AAD,IV and Payload. where IV should be included as
2506 	 * the part of authdata. All other fields should be filled according
2507 	 * to the hardware spec
2508 	 */
2509 	chcr_req->sec_cpl.op_ivinsrtofst =
2510 				FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2511 	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2512 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2513 					null ? 0 : 1 + IV,
2514 					null ? 0 : IV + req->assoclen,
2515 					req->assoclen + IV + 1,
2516 					(temp & 0x1F0) >> 4);
2517 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2518 					temp & 0xF,
2519 					null ? 0 : req->assoclen + IV + 1,
2520 					temp, temp);
2521 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2522 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2523 		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2524 	else
2525 		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2526 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2527 					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2528 					temp,
2529 					actx->auth_mode, aeadctx->hmac_ctrl,
2530 					IV >> 1);
2531 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2532 					 0, 0, dst_size);
2533 
2534 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2535 	if (reqctx->op == CHCR_ENCRYPT_OP ||
2536 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2537 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2538 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2539 		       aeadctx->enckey_len);
2540 	else
2541 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2542 		       aeadctx->enckey_len);
2543 
2544 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2545 	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2546 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2547 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2548 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2549 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2550 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2551 		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2552 		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2553 				CTR_RFC3686_IV_SIZE);
2554 		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2555 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2556 	} else {
2557 		memcpy(ivptr, req->iv, IV);
2558 	}
2559 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2560 	chcr_add_aead_src_ent(req, ulptx);
2561 	atomic_inc(&adap->chcr_stats.cipher_rqst);
2562 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2563 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2564 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2565 		   transhdr_len, temp, 0);
2566 	reqctx->skb = skb;
2567 
2568 	return skb;
2569 err:
2570 	chcr_aead_common_exit(req);
2571 
2572 	return ERR_PTR(error);
2573 }
2574 
2575 int chcr_aead_dma_map(struct device *dev,
2576 		      struct aead_request *req,
2577 		      unsigned short op_type)
2578 {
2579 	int error;
2580 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2581 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2582 	unsigned int authsize = crypto_aead_authsize(tfm);
2583 	int src_len, dst_len;
2584 
2585 	/* calculate and handle src and dst sg length separately
2586 	 * for inplace and out-of place operations
2587 	 */
2588 	if (req->src == req->dst) {
2589 		src_len = req->assoclen + req->cryptlen + (op_type ?
2590 							0 : authsize);
2591 		dst_len = src_len;
2592 	} else {
2593 		src_len = req->assoclen + req->cryptlen;
2594 		dst_len = req->assoclen + req->cryptlen + (op_type ?
2595 							-authsize : authsize);
2596 	}
2597 
2598 	if (!req->cryptlen || !src_len || !dst_len)
2599 		return 0;
2600 	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2601 					DMA_BIDIRECTIONAL);
2602 	if (dma_mapping_error(dev, reqctx->iv_dma))
2603 		return -ENOMEM;
2604 	if (reqctx->b0_len)
2605 		reqctx->b0_dma = reqctx->iv_dma + IV;
2606 	else
2607 		reqctx->b0_dma = 0;
2608 	if (req->src == req->dst) {
2609 		error = dma_map_sg(dev, req->src,
2610 				sg_nents_for_len(req->src, src_len),
2611 					DMA_BIDIRECTIONAL);
2612 		if (!error)
2613 			goto err;
2614 	} else {
2615 		error = dma_map_sg(dev, req->src,
2616 				   sg_nents_for_len(req->src, src_len),
2617 				   DMA_TO_DEVICE);
2618 		if (!error)
2619 			goto err;
2620 		error = dma_map_sg(dev, req->dst,
2621 				   sg_nents_for_len(req->dst, dst_len),
2622 				   DMA_FROM_DEVICE);
2623 		if (!error) {
2624 			dma_unmap_sg(dev, req->src,
2625 				     sg_nents_for_len(req->src, src_len),
2626 				     DMA_TO_DEVICE);
2627 			goto err;
2628 		}
2629 	}
2630 
2631 	return 0;
2632 err:
2633 	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2634 	return -ENOMEM;
2635 }
2636 
2637 void chcr_aead_dma_unmap(struct device *dev,
2638 			 struct aead_request *req,
2639 			 unsigned short op_type)
2640 {
2641 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2642 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2643 	unsigned int authsize = crypto_aead_authsize(tfm);
2644 	int src_len, dst_len;
2645 
2646 	/* calculate and handle src and dst sg length separately
2647 	 * for inplace and out-of place operations
2648 	 */
2649 	if (req->src == req->dst) {
2650 		src_len = req->assoclen + req->cryptlen + (op_type ?
2651 							0 : authsize);
2652 		dst_len = src_len;
2653 	} else {
2654 		src_len = req->assoclen + req->cryptlen;
2655 		dst_len = req->assoclen + req->cryptlen + (op_type ?
2656 						-authsize : authsize);
2657 	}
2658 
2659 	if (!req->cryptlen || !src_len || !dst_len)
2660 		return;
2661 
2662 	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2663 					DMA_BIDIRECTIONAL);
2664 	if (req->src == req->dst) {
2665 		dma_unmap_sg(dev, req->src,
2666 			     sg_nents_for_len(req->src, src_len),
2667 			     DMA_BIDIRECTIONAL);
2668 	} else {
2669 		dma_unmap_sg(dev, req->src,
2670 			     sg_nents_for_len(req->src, src_len),
2671 			     DMA_TO_DEVICE);
2672 		dma_unmap_sg(dev, req->dst,
2673 			     sg_nents_for_len(req->dst, dst_len),
2674 			     DMA_FROM_DEVICE);
2675 	}
2676 }
2677 
2678 void chcr_add_aead_src_ent(struct aead_request *req,
2679 			   struct ulptx_sgl *ulptx)
2680 {
2681 	struct ulptx_walk ulp_walk;
2682 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2683 
2684 	if (reqctx->imm) {
2685 		u8 *buf = (u8 *)ulptx;
2686 
2687 		if (reqctx->b0_len) {
2688 			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2689 			buf += reqctx->b0_len;
2690 		}
2691 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2692 				   buf, req->cryptlen + req->assoclen, 0);
2693 	} else {
2694 		ulptx_walk_init(&ulp_walk, ulptx);
2695 		if (reqctx->b0_len)
2696 			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2697 					    reqctx->b0_dma);
2698 		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2699 				  req->assoclen,  0);
2700 		ulptx_walk_end(&ulp_walk);
2701 	}
2702 }
2703 
2704 void chcr_add_aead_dst_ent(struct aead_request *req,
2705 			   struct cpl_rx_phys_dsgl *phys_cpl,
2706 			   unsigned short qid)
2707 {
2708 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2709 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2710 	struct dsgl_walk dsgl_walk;
2711 	unsigned int authsize = crypto_aead_authsize(tfm);
2712 	struct chcr_context *ctx = a_ctx(tfm);
2713 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2714 	u32 temp;
2715 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2716 
2717 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2718 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2719 	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2720 	temp = req->assoclen + req->cryptlen +
2721 		(reqctx->op ? -authsize : authsize);
2722 	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2723 	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2724 }
2725 
2726 void chcr_add_cipher_src_ent(struct skcipher_request *req,
2727 			     void *ulptx,
2728 			     struct  cipher_wr_param *wrparam)
2729 {
2730 	struct ulptx_walk ulp_walk;
2731 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2732 	u8 *buf = ulptx;
2733 
2734 	memcpy(buf, reqctx->iv, IV);
2735 	buf += IV;
2736 	if (reqctx->imm) {
2737 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2738 				   buf, wrparam->bytes, reqctx->processed);
2739 	} else {
2740 		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2741 		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2742 				  reqctx->src_ofst);
2743 		reqctx->srcsg = ulp_walk.last_sg;
2744 		reqctx->src_ofst = ulp_walk.last_sg_len;
2745 		ulptx_walk_end(&ulp_walk);
2746 	}
2747 }
2748 
2749 void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2750 			     struct cpl_rx_phys_dsgl *phys_cpl,
2751 			     struct  cipher_wr_param *wrparam,
2752 			     unsigned short qid)
2753 {
2754 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2755 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2756 	struct chcr_context *ctx = c_ctx(tfm);
2757 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2758 	struct dsgl_walk dsgl_walk;
2759 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2760 
2761 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2762 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2763 	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2764 			 reqctx->dst_ofst);
2765 	reqctx->dstsg = dsgl_walk.last_sg;
2766 	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2767 	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2768 }
2769 
2770 void chcr_add_hash_src_ent(struct ahash_request *req,
2771 			   struct ulptx_sgl *ulptx,
2772 			   struct hash_wr_param *param)
2773 {
2774 	struct ulptx_walk ulp_walk;
2775 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2776 
2777 	if (reqctx->hctx_wr.imm) {
2778 		u8 *buf = (u8 *)ulptx;
2779 
2780 		if (param->bfr_len) {
2781 			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2782 			buf += param->bfr_len;
2783 		}
2784 
2785 		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2786 				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2787 				   param->sg_len, 0);
2788 	} else {
2789 		ulptx_walk_init(&ulp_walk, ulptx);
2790 		if (param->bfr_len)
2791 			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2792 					    reqctx->hctx_wr.dma_addr);
2793 		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2794 				  param->sg_len, reqctx->hctx_wr.src_ofst);
2795 		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2796 		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2797 		ulptx_walk_end(&ulp_walk);
2798 	}
2799 }
2800 
2801 int chcr_hash_dma_map(struct device *dev,
2802 		      struct ahash_request *req)
2803 {
2804 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2805 	int error = 0;
2806 
2807 	if (!req->nbytes)
2808 		return 0;
2809 	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2810 			   DMA_TO_DEVICE);
2811 	if (!error)
2812 		return -ENOMEM;
2813 	req_ctx->hctx_wr.is_sg_map = 1;
2814 	return 0;
2815 }
2816 
2817 void chcr_hash_dma_unmap(struct device *dev,
2818 			 struct ahash_request *req)
2819 {
2820 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2821 
2822 	if (!req->nbytes)
2823 		return;
2824 
2825 	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2826 			   DMA_TO_DEVICE);
2827 	req_ctx->hctx_wr.is_sg_map = 0;
2828 
2829 }
2830 
2831 int chcr_cipher_dma_map(struct device *dev,
2832 			struct skcipher_request *req)
2833 {
2834 	int error;
2835 
2836 	if (req->src == req->dst) {
2837 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2838 				   DMA_BIDIRECTIONAL);
2839 		if (!error)
2840 			goto err;
2841 	} else {
2842 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2843 				   DMA_TO_DEVICE);
2844 		if (!error)
2845 			goto err;
2846 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2847 				   DMA_FROM_DEVICE);
2848 		if (!error) {
2849 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2850 				   DMA_TO_DEVICE);
2851 			goto err;
2852 		}
2853 	}
2854 
2855 	return 0;
2856 err:
2857 	return -ENOMEM;
2858 }
2859 
2860 void chcr_cipher_dma_unmap(struct device *dev,
2861 			   struct skcipher_request *req)
2862 {
2863 	if (req->src == req->dst) {
2864 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2865 				   DMA_BIDIRECTIONAL);
2866 	} else {
2867 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2868 				   DMA_TO_DEVICE);
2869 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2870 				   DMA_FROM_DEVICE);
2871 	}
2872 }
2873 
2874 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2875 {
2876 	__be32 data;
2877 
2878 	memset(block, 0, csize);
2879 	block += csize;
2880 
2881 	if (csize >= 4)
2882 		csize = 4;
2883 	else if (msglen > (unsigned int)(1 << (8 * csize)))
2884 		return -EOVERFLOW;
2885 
2886 	data = cpu_to_be32(msglen);
2887 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2888 
2889 	return 0;
2890 }
2891 
2892 static int generate_b0(struct aead_request *req, u8 *ivptr,
2893 			unsigned short op_type)
2894 {
2895 	unsigned int l, lp, m;
2896 	int rc;
2897 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2898 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2899 	u8 *b0 = reqctx->scratch_pad;
2900 
2901 	m = crypto_aead_authsize(aead);
2902 
2903 	memcpy(b0, ivptr, 16);
2904 
2905 	lp = b0[0];
2906 	l = lp + 1;
2907 
2908 	/* set m, bits 3-5 */
2909 	*b0 |= (8 * ((m - 2) / 2));
2910 
2911 	/* set adata, bit 6, if associated data is used */
2912 	if (req->assoclen)
2913 		*b0 |= 64;
2914 	rc = set_msg_len(b0 + 16 - l,
2915 			 (op_type == CHCR_DECRYPT_OP) ?
2916 			 req->cryptlen - m : req->cryptlen, l);
2917 
2918 	return rc;
2919 }
2920 
2921 static inline int crypto_ccm_check_iv(const u8 *iv)
2922 {
2923 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2924 	if (iv[0] < 1 || iv[0] > 7)
2925 		return -EINVAL;
2926 
2927 	return 0;
2928 }
2929 
2930 static int ccm_format_packet(struct aead_request *req,
2931 			     u8 *ivptr,
2932 			     unsigned int sub_type,
2933 			     unsigned short op_type,
2934 			     unsigned int assoclen)
2935 {
2936 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2937 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2938 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2939 	int rc = 0;
2940 
2941 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2942 		ivptr[0] = 3;
2943 		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2944 		memcpy(ivptr + 4, req->iv, 8);
2945 		memset(ivptr + 12, 0, 4);
2946 	} else {
2947 		memcpy(ivptr, req->iv, 16);
2948 	}
2949 	if (assoclen)
2950 		put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2951 
2952 	rc = generate_b0(req, ivptr, op_type);
2953 	/* zero the ctr value */
2954 	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2955 	return rc;
2956 }
2957 
2958 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2959 				  unsigned int dst_size,
2960 				  struct aead_request *req,
2961 				  unsigned short op_type)
2962 {
2963 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2964 	struct chcr_context *ctx = a_ctx(tfm);
2965 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2966 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2967 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2968 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2969 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2970 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2971 	unsigned int ccm_xtra;
2972 	unsigned int tag_offset = 0, auth_offset = 0;
2973 	unsigned int assoclen;
2974 
2975 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2976 
2977 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2978 		assoclen = req->assoclen - 8;
2979 	else
2980 		assoclen = req->assoclen;
2981 	ccm_xtra = CCM_B0_SIZE +
2982 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2983 
2984 	auth_offset = req->cryptlen ?
2985 		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2986 	if (op_type == CHCR_DECRYPT_OP) {
2987 		if (crypto_aead_authsize(tfm) != req->cryptlen)
2988 			tag_offset = crypto_aead_authsize(tfm);
2989 		else
2990 			auth_offset = 0;
2991 	}
2992 
2993 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2994 	sec_cpl->pldlen =
2995 		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2996 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2997 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2998 				1 + IV,	IV + assoclen + ccm_xtra,
2999 				req->assoclen + IV + 1 + ccm_xtra, 0);
3000 
3001 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
3002 					auth_offset, tag_offset,
3003 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
3004 					crypto_aead_authsize(tfm));
3005 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
3006 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
3007 					cipher_mode, mac_mode,
3008 					aeadctx->hmac_ctrl, IV >> 1);
3009 
3010 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3011 					0, dst_size);
3012 }
3013 
3014 static int aead_ccm_validate_input(unsigned short op_type,
3015 				   struct aead_request *req,
3016 				   struct chcr_aead_ctx *aeadctx,
3017 				   unsigned int sub_type)
3018 {
3019 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3020 		if (crypto_ccm_check_iv(req->iv)) {
3021 			pr_err("CCM: IV check fails\n");
3022 			return -EINVAL;
3023 		}
3024 	} else {
3025 		if (req->assoclen != 16 && req->assoclen != 20) {
3026 			pr_err("RFC4309: Invalid AAD length %d\n",
3027 			       req->assoclen);
3028 			return -EINVAL;
3029 		}
3030 	}
3031 	return 0;
3032 }
3033 
3034 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3035 					  unsigned short qid,
3036 					  int size)
3037 {
3038 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3039 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3040 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3041 	struct sk_buff *skb = NULL;
3042 	struct chcr_wr *chcr_req;
3043 	struct cpl_rx_phys_dsgl *phys_cpl;
3044 	struct ulptx_sgl *ulptx;
3045 	unsigned int transhdr_len;
3046 	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3047 	unsigned int sub_type, assoclen = req->assoclen;
3048 	unsigned int authsize = crypto_aead_authsize(tfm);
3049 	int error = -EINVAL;
3050 	u8 *ivptr;
3051 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3052 		GFP_ATOMIC;
3053 	struct adapter *adap = padap(a_ctx(tfm)->dev);
3054 
3055 	sub_type = get_aead_subtype(tfm);
3056 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3057 		assoclen -= 8;
3058 	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3059 	error = chcr_aead_common_init(req);
3060 	if (error)
3061 		return ERR_PTR(error);
3062 
3063 	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3064 	if (error)
3065 		goto err;
3066 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3067 			+ (reqctx->op ? -authsize : authsize),
3068 			CHCR_DST_SG_SIZE, 0);
3069 	dnents += MIN_CCM_SG; // For IV and B0
3070 	dst_size = get_space_for_phys_dsgl(dnents);
3071 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3072 			       CHCR_SRC_SG_SIZE, 0);
3073 	snents += MIN_CCM_SG; //For B0
3074 	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3075 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3076 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3077 		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
3078 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3079 				     reqctx->b0_len, 16) :
3080 		(sgl_len(snents) *  8);
3081 	transhdr_len += temp;
3082 	transhdr_len = roundup(transhdr_len, 16);
3083 
3084 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3085 				reqctx->b0_len, transhdr_len, reqctx->op)) {
3086 		atomic_inc(&adap->chcr_stats.fallback);
3087 		chcr_aead_common_exit(req);
3088 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3089 	}
3090 	skb = alloc_skb(transhdr_len,  flags);
3091 
3092 	if (!skb) {
3093 		error = -ENOMEM;
3094 		goto err;
3095 	}
3096 
3097 	chcr_req = __skb_put_zero(skb, transhdr_len);
3098 
3099 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3100 
3101 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3102 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3103 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3104 			aeadctx->key, aeadctx->enckey_len);
3105 
3106 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3107 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3108 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
3109 	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3110 	if (error)
3111 		goto dstmap_fail;
3112 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3113 	chcr_add_aead_src_ent(req, ulptx);
3114 
3115 	atomic_inc(&adap->chcr_stats.aead_rqst);
3116 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3117 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3118 		reqctx->b0_len) : 0);
3119 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3120 		    transhdr_len, temp, 0);
3121 	reqctx->skb = skb;
3122 
3123 	return skb;
3124 dstmap_fail:
3125 	kfree_skb(skb);
3126 err:
3127 	chcr_aead_common_exit(req);
3128 	return ERR_PTR(error);
3129 }
3130 
3131 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3132 				     unsigned short qid,
3133 				     int size)
3134 {
3135 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3136 	struct chcr_context *ctx = a_ctx(tfm);
3137 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3138 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3139 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3140 	struct sk_buff *skb = NULL;
3141 	struct chcr_wr *chcr_req;
3142 	struct cpl_rx_phys_dsgl *phys_cpl;
3143 	struct ulptx_sgl *ulptx;
3144 	unsigned int transhdr_len, dnents = 0, snents;
3145 	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3146 	unsigned int authsize = crypto_aead_authsize(tfm);
3147 	int error = -EINVAL;
3148 	u8 *ivptr;
3149 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3150 		GFP_ATOMIC;
3151 	struct adapter *adap = padap(ctx->dev);
3152 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3153 
3154 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
3155 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3156 		assoclen = req->assoclen - 8;
3157 
3158 	reqctx->b0_len = 0;
3159 	error = chcr_aead_common_init(req);
3160 	if (error)
3161 		return ERR_PTR(error);
3162 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3163 				(reqctx->op ? -authsize : authsize),
3164 				CHCR_DST_SG_SIZE, 0);
3165 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3166 			       CHCR_SRC_SG_SIZE, 0);
3167 	dnents += MIN_GCM_SG; // For IV
3168 	dst_size = get_space_for_phys_dsgl(dnents);
3169 	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3170 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3171 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3172 			SGE_MAX_WR_LEN;
3173 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3174 		(sgl_len(snents) * 8);
3175 	transhdr_len += temp;
3176 	transhdr_len = roundup(transhdr_len, 16);
3177 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3178 			    transhdr_len, reqctx->op)) {
3179 
3180 		atomic_inc(&adap->chcr_stats.fallback);
3181 		chcr_aead_common_exit(req);
3182 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3183 	}
3184 	skb = alloc_skb(transhdr_len, flags);
3185 	if (!skb) {
3186 		error = -ENOMEM;
3187 		goto err;
3188 	}
3189 
3190 	chcr_req = __skb_put_zero(skb, transhdr_len);
3191 
3192 	//Offset of tag from end
3193 	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3194 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3195 						rx_channel_id, 2, 1);
3196 	chcr_req->sec_cpl.pldlen =
3197 		htonl(req->assoclen + IV + req->cryptlen);
3198 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3199 					assoclen ? 1 + IV : 0,
3200 					assoclen ? IV + assoclen : 0,
3201 					req->assoclen + IV + 1, 0);
3202 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3203 			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3204 						temp, temp);
3205 	chcr_req->sec_cpl.seqno_numivs =
3206 			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3207 					CHCR_ENCRYPT_OP) ? 1 : 0,
3208 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3209 					CHCR_SCMD_AUTH_MODE_GHASH,
3210 					aeadctx->hmac_ctrl, IV >> 1);
3211 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3212 					0, 0, dst_size);
3213 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3214 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3215 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3216 	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3217 
3218 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3219 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3220 	/* prepare a 16 byte iv */
3221 	/* S   A   L  T |  IV | 0x00000001 */
3222 	if (get_aead_subtype(tfm) ==
3223 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3224 		memcpy(ivptr, aeadctx->salt, 4);
3225 		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3226 	} else {
3227 		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3228 	}
3229 	put_unaligned_be32(0x01, &ivptr[12]);
3230 	ulptx = (struct ulptx_sgl *)(ivptr + 16);
3231 
3232 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3233 	chcr_add_aead_src_ent(req, ulptx);
3234 	atomic_inc(&adap->chcr_stats.aead_rqst);
3235 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3236 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3237 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3238 		    transhdr_len, temp, reqctx->verify);
3239 	reqctx->skb = skb;
3240 	return skb;
3241 
3242 err:
3243 	chcr_aead_common_exit(req);
3244 	return ERR_PTR(error);
3245 }
3246 
3247 
3248 
3249 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3250 {
3251 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3252 	struct aead_alg *alg = crypto_aead_alg(tfm);
3253 
3254 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3255 					       CRYPTO_ALG_NEED_FALLBACK |
3256 					       CRYPTO_ALG_ASYNC);
3257 	if  (IS_ERR(aeadctx->sw_cipher))
3258 		return PTR_ERR(aeadctx->sw_cipher);
3259 	crypto_aead_set_reqsize_dma(
3260 		tfm, max(sizeof(struct chcr_aead_reqctx),
3261 			 sizeof(struct aead_request) +
3262 			 crypto_aead_reqsize(aeadctx->sw_cipher)));
3263 	return chcr_device_init(a_ctx(tfm));
3264 }
3265 
3266 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3267 {
3268 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3269 
3270 	crypto_free_aead(aeadctx->sw_cipher);
3271 }
3272 
3273 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3274 					unsigned int authsize)
3275 {
3276 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3277 
3278 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3279 	aeadctx->mayverify = VERIFY_HW;
3280 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3281 }
3282 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3283 				    unsigned int authsize)
3284 {
3285 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3286 	u32 maxauth = crypto_aead_maxauthsize(tfm);
3287 
3288 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3289 	 * true for sha1. authsize == 12 condition should be before
3290 	 * authsize == (maxauth >> 1)
3291 	 */
3292 	if (authsize == ICV_4) {
3293 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3294 		aeadctx->mayverify = VERIFY_HW;
3295 	} else if (authsize == ICV_6) {
3296 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3297 		aeadctx->mayverify = VERIFY_HW;
3298 	} else if (authsize == ICV_10) {
3299 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3300 		aeadctx->mayverify = VERIFY_HW;
3301 	} else if (authsize == ICV_12) {
3302 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3303 		aeadctx->mayverify = VERIFY_HW;
3304 	} else if (authsize == ICV_14) {
3305 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3306 		aeadctx->mayverify = VERIFY_HW;
3307 	} else if (authsize == (maxauth >> 1)) {
3308 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3309 		aeadctx->mayverify = VERIFY_HW;
3310 	} else if (authsize == maxauth) {
3311 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3312 		aeadctx->mayverify = VERIFY_HW;
3313 	} else {
3314 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3315 		aeadctx->mayverify = VERIFY_SW;
3316 	}
3317 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3318 }
3319 
3320 
3321 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3322 {
3323 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3324 
3325 	switch (authsize) {
3326 	case ICV_4:
3327 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3328 		aeadctx->mayverify = VERIFY_HW;
3329 		break;
3330 	case ICV_8:
3331 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3332 		aeadctx->mayverify = VERIFY_HW;
3333 		break;
3334 	case ICV_12:
3335 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3336 		aeadctx->mayverify = VERIFY_HW;
3337 		break;
3338 	case ICV_14:
3339 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3340 		aeadctx->mayverify = VERIFY_HW;
3341 		break;
3342 	case ICV_16:
3343 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3344 		aeadctx->mayverify = VERIFY_HW;
3345 		break;
3346 	case ICV_13:
3347 	case ICV_15:
3348 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3349 		aeadctx->mayverify = VERIFY_SW;
3350 		break;
3351 	default:
3352 		return -EINVAL;
3353 	}
3354 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3355 }
3356 
3357 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3358 					  unsigned int authsize)
3359 {
3360 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3361 
3362 	switch (authsize) {
3363 	case ICV_8:
3364 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3365 		aeadctx->mayverify = VERIFY_HW;
3366 		break;
3367 	case ICV_12:
3368 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3369 		aeadctx->mayverify = VERIFY_HW;
3370 		break;
3371 	case ICV_16:
3372 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3373 		aeadctx->mayverify = VERIFY_HW;
3374 		break;
3375 	default:
3376 		return -EINVAL;
3377 	}
3378 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3379 }
3380 
3381 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3382 				unsigned int authsize)
3383 {
3384 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3385 
3386 	switch (authsize) {
3387 	case ICV_4:
3388 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3389 		aeadctx->mayverify = VERIFY_HW;
3390 		break;
3391 	case ICV_6:
3392 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3393 		aeadctx->mayverify = VERIFY_HW;
3394 		break;
3395 	case ICV_8:
3396 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3397 		aeadctx->mayverify = VERIFY_HW;
3398 		break;
3399 	case ICV_10:
3400 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3401 		aeadctx->mayverify = VERIFY_HW;
3402 		break;
3403 	case ICV_12:
3404 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3405 		aeadctx->mayverify = VERIFY_HW;
3406 		break;
3407 	case ICV_14:
3408 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3409 		aeadctx->mayverify = VERIFY_HW;
3410 		break;
3411 	case ICV_16:
3412 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3413 		aeadctx->mayverify = VERIFY_HW;
3414 		break;
3415 	default:
3416 		return -EINVAL;
3417 	}
3418 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3419 }
3420 
3421 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3422 				const u8 *key,
3423 				unsigned int keylen)
3424 {
3425 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3426 	unsigned char ck_size, mk_size;
3427 	int key_ctx_size = 0;
3428 
3429 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3430 	if (keylen == AES_KEYSIZE_128) {
3431 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3432 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3433 	} else if (keylen == AES_KEYSIZE_192) {
3434 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3435 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3436 	} else if (keylen == AES_KEYSIZE_256) {
3437 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3438 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3439 	} else {
3440 		aeadctx->enckey_len = 0;
3441 		return	-EINVAL;
3442 	}
3443 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3444 						key_ctx_size >> 4);
3445 	memcpy(aeadctx->key, key, keylen);
3446 	aeadctx->enckey_len = keylen;
3447 
3448 	return 0;
3449 }
3450 
3451 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3452 				const u8 *key,
3453 				unsigned int keylen)
3454 {
3455 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3456 	int error;
3457 
3458 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3459 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3460 			      CRYPTO_TFM_REQ_MASK);
3461 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3462 	if (error)
3463 		return error;
3464 	return chcr_ccm_common_setkey(aead, key, keylen);
3465 }
3466 
3467 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3468 				    unsigned int keylen)
3469 {
3470 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3471 	int error;
3472 
3473 	if (keylen < 3) {
3474 		aeadctx->enckey_len = 0;
3475 		return	-EINVAL;
3476 	}
3477 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3478 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3479 			      CRYPTO_TFM_REQ_MASK);
3480 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3481 	if (error)
3482 		return error;
3483 	keylen -= 3;
3484 	memcpy(aeadctx->salt, key + keylen, 3);
3485 	return chcr_ccm_common_setkey(aead, key, keylen);
3486 }
3487 
3488 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3489 			   unsigned int keylen)
3490 {
3491 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3492 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3493 	unsigned int ck_size;
3494 	int ret = 0, key_ctx_size = 0;
3495 	struct crypto_aes_ctx aes;
3496 
3497 	aeadctx->enckey_len = 0;
3498 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3499 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3500 			      & CRYPTO_TFM_REQ_MASK);
3501 	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3502 	if (ret)
3503 		goto out;
3504 
3505 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3506 	    keylen > 3) {
3507 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3508 		memcpy(aeadctx->salt, key + keylen, 4);
3509 	}
3510 	if (keylen == AES_KEYSIZE_128) {
3511 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3512 	} else if (keylen == AES_KEYSIZE_192) {
3513 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3514 	} else if (keylen == AES_KEYSIZE_256) {
3515 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3516 	} else {
3517 		pr_err("GCM: Invalid key length %d\n", keylen);
3518 		ret = -EINVAL;
3519 		goto out;
3520 	}
3521 
3522 	memcpy(aeadctx->key, key, keylen);
3523 	aeadctx->enckey_len = keylen;
3524 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3525 		AEAD_H_SIZE;
3526 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3527 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3528 						0, 0,
3529 						key_ctx_size >> 4);
3530 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3531 	 * It will go in key context
3532 	 */
3533 	ret = aes_expandkey(&aes, key, keylen);
3534 	if (ret) {
3535 		aeadctx->enckey_len = 0;
3536 		goto out;
3537 	}
3538 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3539 	aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3540 	memzero_explicit(&aes, sizeof(aes));
3541 
3542 out:
3543 	return ret;
3544 }
3545 
3546 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3547 				   unsigned int keylen)
3548 {
3549 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3550 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3551 	/* it contains auth and cipher key both*/
3552 	struct crypto_authenc_keys keys;
3553 	unsigned int bs, subtype;
3554 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3555 	int err = 0, i, key_ctx_len = 0;
3556 	unsigned char ck_size = 0;
3557 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3558 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3559 	struct algo_param param;
3560 	int align;
3561 	u8 *o_ptr = NULL;
3562 
3563 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3564 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3565 			      & CRYPTO_TFM_REQ_MASK);
3566 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3567 	if (err)
3568 		goto out;
3569 
3570 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3571 		goto out;
3572 
3573 	if (get_alg_config(&param, max_authsize)) {
3574 		pr_err("Unsupported digest size\n");
3575 		goto out;
3576 	}
3577 	subtype = get_aead_subtype(authenc);
3578 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3579 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3580 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3581 			goto out;
3582 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3583 		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3584 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3585 	}
3586 	if (keys.enckeylen == AES_KEYSIZE_128) {
3587 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3588 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3589 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3590 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3591 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3592 	} else {
3593 		pr_err("Unsupported cipher key\n");
3594 		goto out;
3595 	}
3596 
3597 	/* Copy only encryption key. We use authkey to generate h(ipad) and
3598 	 * h(opad) so authkey is not needed again. authkeylen size have the
3599 	 * size of the hash digest size.
3600 	 */
3601 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3602 	aeadctx->enckey_len = keys.enckeylen;
3603 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3604 		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3605 
3606 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3607 			    aeadctx->enckey_len << 3);
3608 	}
3609 	base_hash  = chcr_alloc_shash(max_authsize);
3610 	if (IS_ERR(base_hash)) {
3611 		pr_err("Base driver cannot be loaded\n");
3612 		goto out;
3613 	}
3614 	{
3615 		SHASH_DESC_ON_STACK(shash, base_hash);
3616 
3617 		shash->tfm = base_hash;
3618 		bs = crypto_shash_blocksize(base_hash);
3619 		align = KEYCTX_ALIGN_PAD(max_authsize);
3620 		o_ptr =  actx->h_iopad + param.result_size + align;
3621 
3622 		if (keys.authkeylen > bs) {
3623 			err = crypto_shash_digest(shash, keys.authkey,
3624 						  keys.authkeylen,
3625 						  o_ptr);
3626 			if (err) {
3627 				pr_err("Base driver cannot be loaded\n");
3628 				goto out;
3629 			}
3630 			keys.authkeylen = max_authsize;
3631 		} else
3632 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3633 
3634 		/* Compute the ipad-digest*/
3635 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3636 		memcpy(pad, o_ptr, keys.authkeylen);
3637 		for (i = 0; i < bs >> 2; i++)
3638 			*((unsigned int *)pad + i) ^= IPAD_DATA;
3639 
3640 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3641 					      max_authsize))
3642 			goto out;
3643 		/* Compute the opad-digest */
3644 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3645 		memcpy(pad, o_ptr, keys.authkeylen);
3646 		for (i = 0; i < bs >> 2; i++)
3647 			*((unsigned int *)pad + i) ^= OPAD_DATA;
3648 
3649 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3650 			goto out;
3651 
3652 		/* convert the ipad and opad digest to network order */
3653 		chcr_change_order(actx->h_iopad, param.result_size);
3654 		chcr_change_order(o_ptr, param.result_size);
3655 		key_ctx_len = sizeof(struct _key_ctx) +
3656 			roundup(keys.enckeylen, 16) +
3657 			(param.result_size + align) * 2;
3658 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3659 						0, 1, key_ctx_len >> 4);
3660 		actx->auth_mode = param.auth_mode;
3661 		chcr_free_shash(base_hash);
3662 
3663 		memzero_explicit(&keys, sizeof(keys));
3664 		return 0;
3665 	}
3666 out:
3667 	aeadctx->enckey_len = 0;
3668 	memzero_explicit(&keys, sizeof(keys));
3669 	if (!IS_ERR(base_hash))
3670 		chcr_free_shash(base_hash);
3671 	return -EINVAL;
3672 }
3673 
3674 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3675 					const u8 *key, unsigned int keylen)
3676 {
3677 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3678 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3679 	struct crypto_authenc_keys keys;
3680 	int err;
3681 	/* it contains auth and cipher key both*/
3682 	unsigned int subtype;
3683 	int key_ctx_len = 0;
3684 	unsigned char ck_size = 0;
3685 
3686 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3687 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3688 			      & CRYPTO_TFM_REQ_MASK);
3689 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3690 	if (err)
3691 		goto out;
3692 
3693 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3694 		goto out;
3695 
3696 	subtype = get_aead_subtype(authenc);
3697 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3698 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3699 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3700 			goto out;
3701 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3702 			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3703 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3704 	}
3705 	if (keys.enckeylen == AES_KEYSIZE_128) {
3706 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3707 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3708 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3709 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3710 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3711 	} else {
3712 		pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3713 		goto out;
3714 	}
3715 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3716 	aeadctx->enckey_len = keys.enckeylen;
3717 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3718 	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3719 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3720 				aeadctx->enckey_len << 3);
3721 	}
3722 	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3723 
3724 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3725 						0, key_ctx_len >> 4);
3726 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3727 	memzero_explicit(&keys, sizeof(keys));
3728 	return 0;
3729 out:
3730 	aeadctx->enckey_len = 0;
3731 	memzero_explicit(&keys, sizeof(keys));
3732 	return -EINVAL;
3733 }
3734 
3735 static int chcr_aead_op(struct aead_request *req,
3736 			int size,
3737 			create_wr_t create_wr_fn)
3738 {
3739 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3740 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3741 	struct chcr_context *ctx = a_ctx(tfm);
3742 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3743 	struct sk_buff *skb;
3744 	struct chcr_dev *cdev;
3745 
3746 	cdev = a_ctx(tfm)->dev;
3747 	if (!cdev) {
3748 		pr_err("%s : No crypto device.\n", __func__);
3749 		return -ENXIO;
3750 	}
3751 
3752 	if (chcr_inc_wrcount(cdev)) {
3753 	/* Detach state for CHCR means lldi or padap is freed.
3754 	 * We cannot increment fallback here.
3755 	 */
3756 		return chcr_aead_fallback(req, reqctx->op);
3757 	}
3758 
3759 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3760 					reqctx->txqidx) &&
3761 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3762 			chcr_dec_wrcount(cdev);
3763 			return -ENOSPC;
3764 	}
3765 
3766 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3767 	    crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3768 		pr_err("RFC4106: Invalid value of assoclen %d\n",
3769 		       req->assoclen);
3770 		return -EINVAL;
3771 	}
3772 
3773 	/* Form a WR from req */
3774 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3775 
3776 	if (IS_ERR_OR_NULL(skb)) {
3777 		chcr_dec_wrcount(cdev);
3778 		return PTR_ERR_OR_ZERO(skb);
3779 	}
3780 
3781 	skb->dev = u_ctx->lldi.ports[0];
3782 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3783 	chcr_send_wr(skb);
3784 	return -EINPROGRESS;
3785 }
3786 
3787 static int chcr_aead_encrypt(struct aead_request *req)
3788 {
3789 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3790 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3791 	struct chcr_context *ctx = a_ctx(tfm);
3792 	unsigned int cpu;
3793 
3794 	cpu = get_cpu();
3795 	reqctx->txqidx = cpu % ctx->ntxq;
3796 	reqctx->rxqidx = cpu % ctx->nrxq;
3797 	put_cpu();
3798 
3799 	reqctx->verify = VERIFY_HW;
3800 	reqctx->op = CHCR_ENCRYPT_OP;
3801 
3802 	switch (get_aead_subtype(tfm)) {
3803 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3804 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3805 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3806 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3807 		return chcr_aead_op(req, 0, create_authenc_wr);
3808 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3809 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3810 		return chcr_aead_op(req, 0, create_aead_ccm_wr);
3811 	default:
3812 		return chcr_aead_op(req, 0, create_gcm_wr);
3813 	}
3814 }
3815 
3816 static int chcr_aead_decrypt(struct aead_request *req)
3817 {
3818 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3819 	struct chcr_context *ctx = a_ctx(tfm);
3820 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3821 	struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3822 	int size;
3823 	unsigned int cpu;
3824 
3825 	cpu = get_cpu();
3826 	reqctx->txqidx = cpu % ctx->ntxq;
3827 	reqctx->rxqidx = cpu % ctx->nrxq;
3828 	put_cpu();
3829 
3830 	if (aeadctx->mayverify == VERIFY_SW) {
3831 		size = crypto_aead_maxauthsize(tfm);
3832 		reqctx->verify = VERIFY_SW;
3833 	} else {
3834 		size = 0;
3835 		reqctx->verify = VERIFY_HW;
3836 	}
3837 	reqctx->op = CHCR_DECRYPT_OP;
3838 	switch (get_aead_subtype(tfm)) {
3839 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3840 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3841 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3842 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3843 		return chcr_aead_op(req, size, create_authenc_wr);
3844 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3845 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3846 		return chcr_aead_op(req, size, create_aead_ccm_wr);
3847 	default:
3848 		return chcr_aead_op(req, size, create_gcm_wr);
3849 	}
3850 }
3851 
3852 static struct chcr_alg_template driver_algs[] = {
3853 	/* AES-CBC */
3854 	{
3855 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3856 		.is_registered = 0,
3857 		.alg.skcipher = {
3858 			.base.cra_name		= "cbc(aes)",
3859 			.base.cra_driver_name	= "cbc-aes-chcr",
3860 			.base.cra_blocksize	= AES_BLOCK_SIZE,
3861 
3862 			.init			= chcr_init_tfm,
3863 			.exit			= chcr_exit_tfm,
3864 			.min_keysize		= AES_MIN_KEY_SIZE,
3865 			.max_keysize		= AES_MAX_KEY_SIZE,
3866 			.ivsize			= AES_BLOCK_SIZE,
3867 			.setkey			= chcr_aes_cbc_setkey,
3868 			.encrypt		= chcr_aes_encrypt,
3869 			.decrypt		= chcr_aes_decrypt,
3870 			}
3871 	},
3872 	{
3873 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3874 		.is_registered = 0,
3875 		.alg.skcipher = {
3876 			.base.cra_name		= "xts(aes)",
3877 			.base.cra_driver_name	= "xts-aes-chcr",
3878 			.base.cra_blocksize	= AES_BLOCK_SIZE,
3879 
3880 			.init			= chcr_init_tfm,
3881 			.exit			= chcr_exit_tfm,
3882 			.min_keysize		= 2 * AES_MIN_KEY_SIZE,
3883 			.max_keysize		= 2 * AES_MAX_KEY_SIZE,
3884 			.ivsize			= AES_BLOCK_SIZE,
3885 			.setkey			= chcr_aes_xts_setkey,
3886 			.encrypt		= chcr_aes_encrypt,
3887 			.decrypt		= chcr_aes_decrypt,
3888 			}
3889 	},
3890 	{
3891 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3892 		.is_registered = 0,
3893 		.alg.skcipher = {
3894 			.base.cra_name		= "ctr(aes)",
3895 			.base.cra_driver_name	= "ctr-aes-chcr",
3896 			.base.cra_blocksize	= 1,
3897 
3898 			.init			= chcr_init_tfm,
3899 			.exit			= chcr_exit_tfm,
3900 			.min_keysize		= AES_MIN_KEY_SIZE,
3901 			.max_keysize		= AES_MAX_KEY_SIZE,
3902 			.ivsize			= AES_BLOCK_SIZE,
3903 			.setkey			= chcr_aes_ctr_setkey,
3904 			.encrypt		= chcr_aes_encrypt,
3905 			.decrypt		= chcr_aes_decrypt,
3906 		}
3907 	},
3908 	{
3909 		.type = CRYPTO_ALG_TYPE_SKCIPHER |
3910 			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3911 		.is_registered = 0,
3912 		.alg.skcipher = {
3913 			.base.cra_name		= "rfc3686(ctr(aes))",
3914 			.base.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3915 			.base.cra_blocksize	= 1,
3916 
3917 			.init			= chcr_rfc3686_init,
3918 			.exit			= chcr_exit_tfm,
3919 			.min_keysize		= AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3920 			.max_keysize		= AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3921 			.ivsize			= CTR_RFC3686_IV_SIZE,
3922 			.setkey			= chcr_aes_rfc3686_setkey,
3923 			.encrypt		= chcr_aes_encrypt,
3924 			.decrypt		= chcr_aes_decrypt,
3925 		}
3926 	},
3927 	/* SHA */
3928 	{
3929 		.type = CRYPTO_ALG_TYPE_AHASH,
3930 		.is_registered = 0,
3931 		.alg.hash = {
3932 			.halg.digestsize = SHA1_DIGEST_SIZE,
3933 			.halg.base = {
3934 				.cra_name = "sha1",
3935 				.cra_driver_name = "sha1-chcr",
3936 				.cra_blocksize = SHA1_BLOCK_SIZE,
3937 			}
3938 		}
3939 	},
3940 	{
3941 		.type = CRYPTO_ALG_TYPE_AHASH,
3942 		.is_registered = 0,
3943 		.alg.hash = {
3944 			.halg.digestsize = SHA256_DIGEST_SIZE,
3945 			.halg.base = {
3946 				.cra_name = "sha256",
3947 				.cra_driver_name = "sha256-chcr",
3948 				.cra_blocksize = SHA256_BLOCK_SIZE,
3949 			}
3950 		}
3951 	},
3952 	{
3953 		.type = CRYPTO_ALG_TYPE_AHASH,
3954 		.is_registered = 0,
3955 		.alg.hash = {
3956 			.halg.digestsize = SHA224_DIGEST_SIZE,
3957 			.halg.base = {
3958 				.cra_name = "sha224",
3959 				.cra_driver_name = "sha224-chcr",
3960 				.cra_blocksize = SHA224_BLOCK_SIZE,
3961 			}
3962 		}
3963 	},
3964 	{
3965 		.type = CRYPTO_ALG_TYPE_AHASH,
3966 		.is_registered = 0,
3967 		.alg.hash = {
3968 			.halg.digestsize = SHA384_DIGEST_SIZE,
3969 			.halg.base = {
3970 				.cra_name = "sha384",
3971 				.cra_driver_name = "sha384-chcr",
3972 				.cra_blocksize = SHA384_BLOCK_SIZE,
3973 			}
3974 		}
3975 	},
3976 	{
3977 		.type = CRYPTO_ALG_TYPE_AHASH,
3978 		.is_registered = 0,
3979 		.alg.hash = {
3980 			.halg.digestsize = SHA512_DIGEST_SIZE,
3981 			.halg.base = {
3982 				.cra_name = "sha512",
3983 				.cra_driver_name = "sha512-chcr",
3984 				.cra_blocksize = SHA512_BLOCK_SIZE,
3985 			}
3986 		}
3987 	},
3988 	/* HMAC */
3989 	{
3990 		.type = CRYPTO_ALG_TYPE_HMAC,
3991 		.is_registered = 0,
3992 		.alg.hash = {
3993 			.halg.digestsize = SHA1_DIGEST_SIZE,
3994 			.halg.base = {
3995 				.cra_name = "hmac(sha1)",
3996 				.cra_driver_name = "hmac-sha1-chcr",
3997 				.cra_blocksize = SHA1_BLOCK_SIZE,
3998 			}
3999 		}
4000 	},
4001 	{
4002 		.type = CRYPTO_ALG_TYPE_HMAC,
4003 		.is_registered = 0,
4004 		.alg.hash = {
4005 			.halg.digestsize = SHA224_DIGEST_SIZE,
4006 			.halg.base = {
4007 				.cra_name = "hmac(sha224)",
4008 				.cra_driver_name = "hmac-sha224-chcr",
4009 				.cra_blocksize = SHA224_BLOCK_SIZE,
4010 			}
4011 		}
4012 	},
4013 	{
4014 		.type = CRYPTO_ALG_TYPE_HMAC,
4015 		.is_registered = 0,
4016 		.alg.hash = {
4017 			.halg.digestsize = SHA256_DIGEST_SIZE,
4018 			.halg.base = {
4019 				.cra_name = "hmac(sha256)",
4020 				.cra_driver_name = "hmac-sha256-chcr",
4021 				.cra_blocksize = SHA256_BLOCK_SIZE,
4022 			}
4023 		}
4024 	},
4025 	{
4026 		.type = CRYPTO_ALG_TYPE_HMAC,
4027 		.is_registered = 0,
4028 		.alg.hash = {
4029 			.halg.digestsize = SHA384_DIGEST_SIZE,
4030 			.halg.base = {
4031 				.cra_name = "hmac(sha384)",
4032 				.cra_driver_name = "hmac-sha384-chcr",
4033 				.cra_blocksize = SHA384_BLOCK_SIZE,
4034 			}
4035 		}
4036 	},
4037 	{
4038 		.type = CRYPTO_ALG_TYPE_HMAC,
4039 		.is_registered = 0,
4040 		.alg.hash = {
4041 			.halg.digestsize = SHA512_DIGEST_SIZE,
4042 			.halg.base = {
4043 				.cra_name = "hmac(sha512)",
4044 				.cra_driver_name = "hmac-sha512-chcr",
4045 				.cra_blocksize = SHA512_BLOCK_SIZE,
4046 			}
4047 		}
4048 	},
4049 	/* Add AEAD Algorithms */
4050 	{
4051 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4052 		.is_registered = 0,
4053 		.alg.aead = {
4054 			.base = {
4055 				.cra_name = "gcm(aes)",
4056 				.cra_driver_name = "gcm-aes-chcr",
4057 				.cra_blocksize	= 1,
4058 				.cra_priority = CHCR_AEAD_PRIORITY,
4059 				.cra_ctxsize =	sizeof(struct chcr_context) +
4060 						sizeof(struct chcr_aead_ctx) +
4061 						sizeof(struct chcr_gcm_ctx),
4062 			},
4063 			.ivsize = GCM_AES_IV_SIZE,
4064 			.maxauthsize = GHASH_DIGEST_SIZE,
4065 			.setkey = chcr_gcm_setkey,
4066 			.setauthsize = chcr_gcm_setauthsize,
4067 		}
4068 	},
4069 	{
4070 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4071 		.is_registered = 0,
4072 		.alg.aead = {
4073 			.base = {
4074 				.cra_name = "rfc4106(gcm(aes))",
4075 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
4076 				.cra_blocksize	 = 1,
4077 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4078 				.cra_ctxsize =	sizeof(struct chcr_context) +
4079 						sizeof(struct chcr_aead_ctx) +
4080 						sizeof(struct chcr_gcm_ctx),
4081 
4082 			},
4083 			.ivsize = GCM_RFC4106_IV_SIZE,
4084 			.maxauthsize	= GHASH_DIGEST_SIZE,
4085 			.setkey = chcr_gcm_setkey,
4086 			.setauthsize	= chcr_4106_4309_setauthsize,
4087 		}
4088 	},
4089 	{
4090 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4091 		.is_registered = 0,
4092 		.alg.aead = {
4093 			.base = {
4094 				.cra_name = "ccm(aes)",
4095 				.cra_driver_name = "ccm-aes-chcr",
4096 				.cra_blocksize	 = 1,
4097 				.cra_priority = CHCR_AEAD_PRIORITY,
4098 				.cra_ctxsize =	sizeof(struct chcr_context) +
4099 						sizeof(struct chcr_aead_ctx),
4100 
4101 			},
4102 			.ivsize = AES_BLOCK_SIZE,
4103 			.maxauthsize	= GHASH_DIGEST_SIZE,
4104 			.setkey = chcr_aead_ccm_setkey,
4105 			.setauthsize	= chcr_ccm_setauthsize,
4106 		}
4107 	},
4108 	{
4109 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4110 		.is_registered = 0,
4111 		.alg.aead = {
4112 			.base = {
4113 				.cra_name = "rfc4309(ccm(aes))",
4114 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
4115 				.cra_blocksize	 = 1,
4116 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4117 				.cra_ctxsize =	sizeof(struct chcr_context) +
4118 						sizeof(struct chcr_aead_ctx),
4119 
4120 			},
4121 			.ivsize = 8,
4122 			.maxauthsize	= GHASH_DIGEST_SIZE,
4123 			.setkey = chcr_aead_rfc4309_setkey,
4124 			.setauthsize = chcr_4106_4309_setauthsize,
4125 		}
4126 	},
4127 	{
4128 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4129 		.is_registered = 0,
4130 		.alg.aead = {
4131 			.base = {
4132 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
4133 				.cra_driver_name =
4134 					"authenc-hmac-sha1-cbc-aes-chcr",
4135 				.cra_blocksize	 = AES_BLOCK_SIZE,
4136 				.cra_priority = CHCR_AEAD_PRIORITY,
4137 				.cra_ctxsize =	sizeof(struct chcr_context) +
4138 						sizeof(struct chcr_aead_ctx) +
4139 						sizeof(struct chcr_authenc_ctx),
4140 
4141 			},
4142 			.ivsize = AES_BLOCK_SIZE,
4143 			.maxauthsize = SHA1_DIGEST_SIZE,
4144 			.setkey = chcr_authenc_setkey,
4145 			.setauthsize = chcr_authenc_setauthsize,
4146 		}
4147 	},
4148 	{
4149 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4150 		.is_registered = 0,
4151 		.alg.aead = {
4152 			.base = {
4153 
4154 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
4155 				.cra_driver_name =
4156 					"authenc-hmac-sha256-cbc-aes-chcr",
4157 				.cra_blocksize	 = AES_BLOCK_SIZE,
4158 				.cra_priority = CHCR_AEAD_PRIORITY,
4159 				.cra_ctxsize =	sizeof(struct chcr_context) +
4160 						sizeof(struct chcr_aead_ctx) +
4161 						sizeof(struct chcr_authenc_ctx),
4162 
4163 			},
4164 			.ivsize = AES_BLOCK_SIZE,
4165 			.maxauthsize	= SHA256_DIGEST_SIZE,
4166 			.setkey = chcr_authenc_setkey,
4167 			.setauthsize = chcr_authenc_setauthsize,
4168 		}
4169 	},
4170 	{
4171 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4172 		.is_registered = 0,
4173 		.alg.aead = {
4174 			.base = {
4175 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4176 				.cra_driver_name =
4177 					"authenc-hmac-sha224-cbc-aes-chcr",
4178 				.cra_blocksize	 = AES_BLOCK_SIZE,
4179 				.cra_priority = CHCR_AEAD_PRIORITY,
4180 				.cra_ctxsize =	sizeof(struct chcr_context) +
4181 						sizeof(struct chcr_aead_ctx) +
4182 						sizeof(struct chcr_authenc_ctx),
4183 			},
4184 			.ivsize = AES_BLOCK_SIZE,
4185 			.maxauthsize = SHA224_DIGEST_SIZE,
4186 			.setkey = chcr_authenc_setkey,
4187 			.setauthsize = chcr_authenc_setauthsize,
4188 		}
4189 	},
4190 	{
4191 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4192 		.is_registered = 0,
4193 		.alg.aead = {
4194 			.base = {
4195 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4196 				.cra_driver_name =
4197 					"authenc-hmac-sha384-cbc-aes-chcr",
4198 				.cra_blocksize	 = AES_BLOCK_SIZE,
4199 				.cra_priority = CHCR_AEAD_PRIORITY,
4200 				.cra_ctxsize =	sizeof(struct chcr_context) +
4201 						sizeof(struct chcr_aead_ctx) +
4202 						sizeof(struct chcr_authenc_ctx),
4203 
4204 			},
4205 			.ivsize = AES_BLOCK_SIZE,
4206 			.maxauthsize = SHA384_DIGEST_SIZE,
4207 			.setkey = chcr_authenc_setkey,
4208 			.setauthsize = chcr_authenc_setauthsize,
4209 		}
4210 	},
4211 	{
4212 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4213 		.is_registered = 0,
4214 		.alg.aead = {
4215 			.base = {
4216 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4217 				.cra_driver_name =
4218 					"authenc-hmac-sha512-cbc-aes-chcr",
4219 				.cra_blocksize	 = AES_BLOCK_SIZE,
4220 				.cra_priority = CHCR_AEAD_PRIORITY,
4221 				.cra_ctxsize =	sizeof(struct chcr_context) +
4222 						sizeof(struct chcr_aead_ctx) +
4223 						sizeof(struct chcr_authenc_ctx),
4224 
4225 			},
4226 			.ivsize = AES_BLOCK_SIZE,
4227 			.maxauthsize = SHA512_DIGEST_SIZE,
4228 			.setkey = chcr_authenc_setkey,
4229 			.setauthsize = chcr_authenc_setauthsize,
4230 		}
4231 	},
4232 	{
4233 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4234 		.is_registered = 0,
4235 		.alg.aead = {
4236 			.base = {
4237 				.cra_name = "authenc(digest_null,cbc(aes))",
4238 				.cra_driver_name =
4239 					"authenc-digest_null-cbc-aes-chcr",
4240 				.cra_blocksize	 = AES_BLOCK_SIZE,
4241 				.cra_priority = CHCR_AEAD_PRIORITY,
4242 				.cra_ctxsize =	sizeof(struct chcr_context) +
4243 						sizeof(struct chcr_aead_ctx) +
4244 						sizeof(struct chcr_authenc_ctx),
4245 
4246 			},
4247 			.ivsize  = AES_BLOCK_SIZE,
4248 			.maxauthsize = 0,
4249 			.setkey  = chcr_aead_digest_null_setkey,
4250 			.setauthsize = chcr_authenc_null_setauthsize,
4251 		}
4252 	},
4253 	{
4254 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4255 		.is_registered = 0,
4256 		.alg.aead = {
4257 			.base = {
4258 				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4259 				.cra_driver_name =
4260 				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4261 				.cra_blocksize	 = 1,
4262 				.cra_priority = CHCR_AEAD_PRIORITY,
4263 				.cra_ctxsize =	sizeof(struct chcr_context) +
4264 						sizeof(struct chcr_aead_ctx) +
4265 						sizeof(struct chcr_authenc_ctx),
4266 
4267 			},
4268 			.ivsize = CTR_RFC3686_IV_SIZE,
4269 			.maxauthsize = SHA1_DIGEST_SIZE,
4270 			.setkey = chcr_authenc_setkey,
4271 			.setauthsize = chcr_authenc_setauthsize,
4272 		}
4273 	},
4274 	{
4275 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4276 		.is_registered = 0,
4277 		.alg.aead = {
4278 			.base = {
4279 
4280 				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4281 				.cra_driver_name =
4282 				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4283 				.cra_blocksize	 = 1,
4284 				.cra_priority = CHCR_AEAD_PRIORITY,
4285 				.cra_ctxsize =	sizeof(struct chcr_context) +
4286 						sizeof(struct chcr_aead_ctx) +
4287 						sizeof(struct chcr_authenc_ctx),
4288 
4289 			},
4290 			.ivsize = CTR_RFC3686_IV_SIZE,
4291 			.maxauthsize	= SHA256_DIGEST_SIZE,
4292 			.setkey = chcr_authenc_setkey,
4293 			.setauthsize = chcr_authenc_setauthsize,
4294 		}
4295 	},
4296 	{
4297 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4298 		.is_registered = 0,
4299 		.alg.aead = {
4300 			.base = {
4301 				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4302 				.cra_driver_name =
4303 				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4304 				.cra_blocksize	 = 1,
4305 				.cra_priority = CHCR_AEAD_PRIORITY,
4306 				.cra_ctxsize =	sizeof(struct chcr_context) +
4307 						sizeof(struct chcr_aead_ctx) +
4308 						sizeof(struct chcr_authenc_ctx),
4309 			},
4310 			.ivsize = CTR_RFC3686_IV_SIZE,
4311 			.maxauthsize = SHA224_DIGEST_SIZE,
4312 			.setkey = chcr_authenc_setkey,
4313 			.setauthsize = chcr_authenc_setauthsize,
4314 		}
4315 	},
4316 	{
4317 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4318 		.is_registered = 0,
4319 		.alg.aead = {
4320 			.base = {
4321 				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4322 				.cra_driver_name =
4323 				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4324 				.cra_blocksize	 = 1,
4325 				.cra_priority = CHCR_AEAD_PRIORITY,
4326 				.cra_ctxsize =	sizeof(struct chcr_context) +
4327 						sizeof(struct chcr_aead_ctx) +
4328 						sizeof(struct chcr_authenc_ctx),
4329 
4330 			},
4331 			.ivsize = CTR_RFC3686_IV_SIZE,
4332 			.maxauthsize = SHA384_DIGEST_SIZE,
4333 			.setkey = chcr_authenc_setkey,
4334 			.setauthsize = chcr_authenc_setauthsize,
4335 		}
4336 	},
4337 	{
4338 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4339 		.is_registered = 0,
4340 		.alg.aead = {
4341 			.base = {
4342 				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4343 				.cra_driver_name =
4344 				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4345 				.cra_blocksize	 = 1,
4346 				.cra_priority = CHCR_AEAD_PRIORITY,
4347 				.cra_ctxsize =	sizeof(struct chcr_context) +
4348 						sizeof(struct chcr_aead_ctx) +
4349 						sizeof(struct chcr_authenc_ctx),
4350 
4351 			},
4352 			.ivsize = CTR_RFC3686_IV_SIZE,
4353 			.maxauthsize = SHA512_DIGEST_SIZE,
4354 			.setkey = chcr_authenc_setkey,
4355 			.setauthsize = chcr_authenc_setauthsize,
4356 		}
4357 	},
4358 	{
4359 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4360 		.is_registered = 0,
4361 		.alg.aead = {
4362 			.base = {
4363 				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4364 				.cra_driver_name =
4365 				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4366 				.cra_blocksize	 = 1,
4367 				.cra_priority = CHCR_AEAD_PRIORITY,
4368 				.cra_ctxsize =	sizeof(struct chcr_context) +
4369 						sizeof(struct chcr_aead_ctx) +
4370 						sizeof(struct chcr_authenc_ctx),
4371 
4372 			},
4373 			.ivsize  = CTR_RFC3686_IV_SIZE,
4374 			.maxauthsize = 0,
4375 			.setkey  = chcr_aead_digest_null_setkey,
4376 			.setauthsize = chcr_authenc_null_setauthsize,
4377 		}
4378 	},
4379 };
4380 
4381 /*
4382  *	chcr_unregister_alg - Deregister crypto algorithms with
4383  *	kernel framework.
4384  */
4385 static int chcr_unregister_alg(void)
4386 {
4387 	int i;
4388 
4389 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4390 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4391 		case CRYPTO_ALG_TYPE_SKCIPHER:
4392 			if (driver_algs[i].is_registered && refcount_read(
4393 			    &driver_algs[i].alg.skcipher.base.cra_refcnt)
4394 			    == 1) {
4395 				crypto_unregister_skcipher(
4396 						&driver_algs[i].alg.skcipher);
4397 				driver_algs[i].is_registered = 0;
4398 			}
4399 			break;
4400 		case CRYPTO_ALG_TYPE_AEAD:
4401 			if (driver_algs[i].is_registered && refcount_read(
4402 			    &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4403 				crypto_unregister_aead(
4404 						&driver_algs[i].alg.aead);
4405 				driver_algs[i].is_registered = 0;
4406 			}
4407 			break;
4408 		case CRYPTO_ALG_TYPE_AHASH:
4409 			if (driver_algs[i].is_registered && refcount_read(
4410 			    &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4411 			    == 1) {
4412 				crypto_unregister_ahash(
4413 						&driver_algs[i].alg.hash);
4414 				driver_algs[i].is_registered = 0;
4415 			}
4416 			break;
4417 		}
4418 	}
4419 	return 0;
4420 }
4421 
4422 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4423 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4424 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4425 
4426 /*
4427  *	chcr_register_alg - Register crypto algorithms with kernel framework.
4428  */
4429 static int chcr_register_alg(void)
4430 {
4431 	struct crypto_alg ai;
4432 	struct ahash_alg *a_hash;
4433 	int err = 0, i;
4434 	char *name = NULL;
4435 
4436 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4437 		if (driver_algs[i].is_registered)
4438 			continue;
4439 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4440 		case CRYPTO_ALG_TYPE_SKCIPHER:
4441 			driver_algs[i].alg.skcipher.base.cra_priority =
4442 				CHCR_CRA_PRIORITY;
4443 			driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4444 			driver_algs[i].alg.skcipher.base.cra_flags =
4445 				CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4446 				CRYPTO_ALG_ALLOCATES_MEMORY |
4447 				CRYPTO_ALG_NEED_FALLBACK;
4448 			driver_algs[i].alg.skcipher.base.cra_ctxsize =
4449 				sizeof(struct chcr_context) +
4450 				sizeof(struct ablk_ctx);
4451 			driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4452 
4453 			err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4454 			name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4455 			break;
4456 		case CRYPTO_ALG_TYPE_AEAD:
4457 			driver_algs[i].alg.aead.base.cra_flags =
4458 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4459 				CRYPTO_ALG_ALLOCATES_MEMORY;
4460 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4461 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4462 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4463 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4464 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4465 			err = crypto_register_aead(&driver_algs[i].alg.aead);
4466 			name = driver_algs[i].alg.aead.base.cra_driver_name;
4467 			break;
4468 		case CRYPTO_ALG_TYPE_AHASH:
4469 			a_hash = &driver_algs[i].alg.hash;
4470 			a_hash->update = chcr_ahash_update;
4471 			a_hash->final = chcr_ahash_final;
4472 			a_hash->finup = chcr_ahash_finup;
4473 			a_hash->digest = chcr_ahash_digest;
4474 			a_hash->export = chcr_ahash_export;
4475 			a_hash->import = chcr_ahash_import;
4476 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4477 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4478 			a_hash->halg.base.cra_module = THIS_MODULE;
4479 			a_hash->halg.base.cra_flags =
4480 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4481 			a_hash->halg.base.cra_alignmask = 0;
4482 			a_hash->halg.base.cra_exit = NULL;
4483 
4484 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4485 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4486 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4487 				a_hash->init = chcr_hmac_init;
4488 				a_hash->setkey = chcr_ahash_setkey;
4489 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4490 			} else {
4491 				a_hash->init = chcr_sha_init;
4492 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4493 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4494 			}
4495 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4496 			ai = driver_algs[i].alg.hash.halg.base;
4497 			name = ai.cra_driver_name;
4498 			break;
4499 		}
4500 		if (err) {
4501 			pr_err("%s : Algorithm registration failed\n", name);
4502 			goto register_err;
4503 		} else {
4504 			driver_algs[i].is_registered = 1;
4505 		}
4506 	}
4507 	return 0;
4508 
4509 register_err:
4510 	chcr_unregister_alg();
4511 	return err;
4512 }
4513 
4514 /*
4515  *	start_crypto - Register the crypto algorithms.
4516  *	This should called once when the first device comesup. After this
4517  *	kernel will start calling driver APIs for crypto operations.
4518  */
4519 int start_crypto(void)
4520 {
4521 	return chcr_register_alg();
4522 }
4523 
4524 /*
4525  *	stop_crypto - Deregister all the crypto algorithms with kernel.
4526  *	This should be called once when the last device goes down. After this
4527  *	kernel will not call the driver API for crypto operations.
4528  */
4529 int stop_crypto(void)
4530 {
4531 	chcr_unregister_alg();
4532 	return 0;
4533 }
4534