xref: /linux/drivers/crypto/chelsio/chcr_algo.c (revision 0408c58be5a475c99b271f08d85859f7b59ec767)
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/internal/aead.h>
59 #include <crypto/null.h>
60 #include <crypto/internal/skcipher.h>
61 #include <crypto/aead.h>
62 #include <crypto/scatterwalk.h>
63 #include <crypto/internal/hash.h>
64 
65 #include "t4fw_api.h"
66 #include "t4_msg.h"
67 #include "chcr_core.h"
68 #include "chcr_algo.h"
69 #include "chcr_crypto.h"
70 
71 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
72 {
73 	return ctx->crypto_ctx->aeadctx;
74 }
75 
76 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
77 {
78 	return ctx->crypto_ctx->ablkctx;
79 }
80 
81 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
82 {
83 	return ctx->crypto_ctx->hmacctx;
84 }
85 
86 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
87 {
88 	return gctx->ctx->gcm;
89 }
90 
91 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
92 {
93 	return gctx->ctx->authenc;
94 }
95 
96 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
97 {
98 	return ctx->dev->u_ctx;
99 }
100 
101 static inline int is_ofld_imm(const struct sk_buff *skb)
102 {
103 	return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
104 }
105 
106 /*
107  *	sgl_len - calculates the size of an SGL of the given capacity
108  *	@n: the number of SGL entries
109  *	Calculates the number of flits needed for a scatter/gather list that
110  *	can hold the given number of entries.
111  */
112 static inline unsigned int sgl_len(unsigned int n)
113 {
114 	n--;
115 	return (3 * n) / 2 + (n & 1) + 2;
116 }
117 
118 static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
119 {
120 	u8 temp[SHA512_DIGEST_SIZE];
121 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
122 	int authsize = crypto_aead_authsize(tfm);
123 	struct cpl_fw6_pld *fw6_pld;
124 	int cmp = 0;
125 
126 	fw6_pld = (struct cpl_fw6_pld *)input;
127 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
128 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
129 		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
130 	} else {
131 
132 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
133 				authsize, req->assoclen +
134 				req->cryptlen - authsize);
135 		cmp = memcmp(temp, (fw6_pld + 1), authsize);
136 	}
137 	if (cmp)
138 		*err = -EBADMSG;
139 	else
140 		*err = 0;
141 }
142 
143 /*
144  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
145  *	@req: crypto request
146  */
147 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
148 			 int err)
149 {
150 	struct crypto_tfm *tfm = req->tfm;
151 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
152 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
153 	struct chcr_req_ctx ctx_req;
154 	struct cpl_fw6_pld *fw6_pld;
155 	unsigned int digestsize, updated_digestsize;
156 
157 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
158 	case CRYPTO_ALG_TYPE_AEAD:
159 		ctx_req.req.aead_req = (struct aead_request *)req;
160 		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
161 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
162 			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
163 		if (ctx_req.ctx.reqctx->skb) {
164 			kfree_skb(ctx_req.ctx.reqctx->skb);
165 			ctx_req.ctx.reqctx->skb = NULL;
166 		}
167 		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
168 			chcr_verify_tag(ctx_req.req.aead_req, input,
169 					&err);
170 			ctx_req.ctx.reqctx->verify = VERIFY_HW;
171 		}
172 		break;
173 
174 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
175 		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
176 		ctx_req.ctx.ablk_ctx =
177 			ablkcipher_request_ctx(ctx_req.req.ablk_req);
178 		if (!err) {
179 			fw6_pld = (struct cpl_fw6_pld *)input;
180 			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
181 			       AES_BLOCK_SIZE);
182 		}
183 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
184 			     ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
185 		if (ctx_req.ctx.ablk_ctx->skb) {
186 			kfree_skb(ctx_req.ctx.ablk_ctx->skb);
187 			ctx_req.ctx.ablk_ctx->skb = NULL;
188 		}
189 		break;
190 
191 	case CRYPTO_ALG_TYPE_AHASH:
192 		ctx_req.req.ahash_req = (struct ahash_request *)req;
193 		ctx_req.ctx.ahash_ctx =
194 			ahash_request_ctx(ctx_req.req.ahash_req);
195 		digestsize =
196 			crypto_ahash_digestsize(crypto_ahash_reqtfm(
197 							ctx_req.req.ahash_req));
198 		updated_digestsize = digestsize;
199 		if (digestsize == SHA224_DIGEST_SIZE)
200 			updated_digestsize = SHA256_DIGEST_SIZE;
201 		else if (digestsize == SHA384_DIGEST_SIZE)
202 			updated_digestsize = SHA512_DIGEST_SIZE;
203 		if (ctx_req.ctx.ahash_ctx->skb) {
204 			kfree_skb(ctx_req.ctx.ahash_ctx->skb);
205 			ctx_req.ctx.ahash_ctx->skb = NULL;
206 		}
207 		if (ctx_req.ctx.ahash_ctx->result == 1) {
208 			ctx_req.ctx.ahash_ctx->result = 0;
209 			memcpy(ctx_req.req.ahash_req->result, input +
210 			       sizeof(struct cpl_fw6_pld),
211 			       digestsize);
212 		} else {
213 			memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
214 			       sizeof(struct cpl_fw6_pld),
215 			       updated_digestsize);
216 		}
217 		break;
218 	}
219 	return err;
220 }
221 
222 /*
223  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
224  *	@skb: the packet
225  *	Returns the number of flits needed for the given offload packet.
226  *	These packets are already fully constructed and no additional headers
227  *	will be added.
228  */
229 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
230 {
231 	unsigned int flits, cnt;
232 
233 	if (is_ofld_imm(skb))
234 		return DIV_ROUND_UP(skb->len, 8);
235 
236 	flits = skb_transport_offset(skb) / 8;   /* headers */
237 	cnt = skb_shinfo(skb)->nr_frags;
238 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
239 		cnt++;
240 	return flits + sgl_len(cnt);
241 }
242 
243 static inline void get_aes_decrypt_key(unsigned char *dec_key,
244 				       const unsigned char *key,
245 				       unsigned int keylength)
246 {
247 	u32 temp;
248 	u32 w_ring[MAX_NK];
249 	int i, j, k;
250 	u8  nr, nk;
251 
252 	switch (keylength) {
253 	case AES_KEYLENGTH_128BIT:
254 		nk = KEYLENGTH_4BYTES;
255 		nr = NUMBER_OF_ROUNDS_10;
256 		break;
257 	case AES_KEYLENGTH_192BIT:
258 		nk = KEYLENGTH_6BYTES;
259 		nr = NUMBER_OF_ROUNDS_12;
260 		break;
261 	case AES_KEYLENGTH_256BIT:
262 		nk = KEYLENGTH_8BYTES;
263 		nr = NUMBER_OF_ROUNDS_14;
264 		break;
265 	default:
266 		return;
267 	}
268 	for (i = 0; i < nk; i++)
269 		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
270 
271 	i = 0;
272 	temp = w_ring[nk - 1];
273 	while (i + nk < (nr + 1) * 4) {
274 		if (!(i % nk)) {
275 			/* RotWord(temp) */
276 			temp = (temp << 8) | (temp >> 24);
277 			temp = aes_ks_subword(temp);
278 			temp ^= round_constant[i / nk];
279 		} else if (nk == 8 && (i % 4 == 0)) {
280 			temp = aes_ks_subword(temp);
281 		}
282 		w_ring[i % nk] ^= temp;
283 		temp = w_ring[i % nk];
284 		i++;
285 	}
286 	i--;
287 	for (k = 0, j = i % nk; k < nk; k++) {
288 		*((u32 *)dec_key + k) = htonl(w_ring[j]);
289 		j--;
290 		if (j < 0)
291 			j += nk;
292 	}
293 }
294 
295 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
296 {
297 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
298 
299 	switch (ds) {
300 	case SHA1_DIGEST_SIZE:
301 		base_hash = crypto_alloc_shash("sha1", 0, 0);
302 		break;
303 	case SHA224_DIGEST_SIZE:
304 		base_hash = crypto_alloc_shash("sha224", 0, 0);
305 		break;
306 	case SHA256_DIGEST_SIZE:
307 		base_hash = crypto_alloc_shash("sha256", 0, 0);
308 		break;
309 	case SHA384_DIGEST_SIZE:
310 		base_hash = crypto_alloc_shash("sha384", 0, 0);
311 		break;
312 	case SHA512_DIGEST_SIZE:
313 		base_hash = crypto_alloc_shash("sha512", 0, 0);
314 		break;
315 	}
316 
317 	return base_hash;
318 }
319 
320 static int chcr_compute_partial_hash(struct shash_desc *desc,
321 				     char *iopad, char *result_hash,
322 				     int digest_size)
323 {
324 	struct sha1_state sha1_st;
325 	struct sha256_state sha256_st;
326 	struct sha512_state sha512_st;
327 	int error;
328 
329 	if (digest_size == SHA1_DIGEST_SIZE) {
330 		error = crypto_shash_init(desc) ?:
331 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
332 			crypto_shash_export(desc, (void *)&sha1_st);
333 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
334 	} else if (digest_size == SHA224_DIGEST_SIZE) {
335 		error = crypto_shash_init(desc) ?:
336 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
337 			crypto_shash_export(desc, (void *)&sha256_st);
338 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
339 
340 	} else if (digest_size == SHA256_DIGEST_SIZE) {
341 		error = crypto_shash_init(desc) ?:
342 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
343 			crypto_shash_export(desc, (void *)&sha256_st);
344 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
345 
346 	} else if (digest_size == SHA384_DIGEST_SIZE) {
347 		error = crypto_shash_init(desc) ?:
348 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
349 			crypto_shash_export(desc, (void *)&sha512_st);
350 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
351 
352 	} else if (digest_size == SHA512_DIGEST_SIZE) {
353 		error = crypto_shash_init(desc) ?:
354 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
355 			crypto_shash_export(desc, (void *)&sha512_st);
356 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
357 	} else {
358 		error = -EINVAL;
359 		pr_err("Unknown digest size %d\n", digest_size);
360 	}
361 	return error;
362 }
363 
364 static void chcr_change_order(char *buf, int ds)
365 {
366 	int i;
367 
368 	if (ds == SHA512_DIGEST_SIZE) {
369 		for (i = 0; i < (ds / sizeof(u64)); i++)
370 			*((__be64 *)buf + i) =
371 				cpu_to_be64(*((u64 *)buf + i));
372 	} else {
373 		for (i = 0; i < (ds / sizeof(u32)); i++)
374 			*((__be32 *)buf + i) =
375 				cpu_to_be32(*((u32 *)buf + i));
376 	}
377 }
378 
379 static inline int is_hmac(struct crypto_tfm *tfm)
380 {
381 	struct crypto_alg *alg = tfm->__crt_alg;
382 	struct chcr_alg_template *chcr_crypto_alg =
383 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
384 			     alg.hash);
385 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
386 		return 1;
387 	return 0;
388 }
389 
390 static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
391 			   struct scatterlist *sg,
392 			   struct phys_sge_parm *sg_param)
393 {
394 	struct phys_sge_pairs *to;
395 	int out_buf_size = sg_param->obsize;
396 	unsigned int nents = sg_param->nents, i, j = 0;
397 
398 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
399 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
400 	phys_cpl->pcirlxorder_to_noofsgentr =
401 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
402 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
403 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
404 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
405 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
406 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
407 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
408 	phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
409 	phys_cpl->rss_hdr_int.hash_val = 0;
410 	to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
411 				       sizeof(struct cpl_rx_phys_dsgl));
412 
413 	for (i = 0; nents; to++) {
414 		for (j = 0; j < 8 && nents; j++, nents--) {
415 			out_buf_size -= sg_dma_len(sg);
416 			to->len[j] = htons(sg_dma_len(sg));
417 			to->addr[j] = cpu_to_be64(sg_dma_address(sg));
418 			sg = sg_next(sg);
419 		}
420 	}
421 	if (out_buf_size) {
422 		j--;
423 		to--;
424 		to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
425 	}
426 }
427 
428 static inline int map_writesg_phys_cpl(struct device *dev,
429 					struct cpl_rx_phys_dsgl *phys_cpl,
430 					struct scatterlist *sg,
431 					struct phys_sge_parm *sg_param)
432 {
433 	if (!sg || !sg_param->nents)
434 		return 0;
435 
436 	sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
437 	if (sg_param->nents == 0) {
438 		pr_err("CHCR : DMA mapping failed\n");
439 		return -EINVAL;
440 	}
441 	write_phys_cpl(phys_cpl, sg, sg_param);
442 	return 0;
443 }
444 
445 static inline int get_aead_subtype(struct crypto_aead *aead)
446 {
447 	struct aead_alg *alg = crypto_aead_alg(aead);
448 	struct chcr_alg_template *chcr_crypto_alg =
449 		container_of(alg, struct chcr_alg_template, alg.aead);
450 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
451 }
452 
453 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
454 {
455 	struct crypto_alg *alg = tfm->__crt_alg;
456 	struct chcr_alg_template *chcr_crypto_alg =
457 		container_of(alg, struct chcr_alg_template, alg.crypto);
458 
459 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
460 }
461 
462 static inline void write_buffer_to_skb(struct sk_buff *skb,
463 					unsigned int *frags,
464 					char *bfr,
465 					u8 bfr_len)
466 {
467 	skb->len += bfr_len;
468 	skb->data_len += bfr_len;
469 	skb->truesize += bfr_len;
470 	get_page(virt_to_page(bfr));
471 	skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
472 			   offset_in_page(bfr), bfr_len);
473 	(*frags)++;
474 }
475 
476 
477 static inline void
478 write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
479 			struct scatterlist *sg, unsigned int count)
480 {
481 	struct page *spage;
482 	unsigned int page_len;
483 
484 	skb->len += count;
485 	skb->data_len += count;
486 	skb->truesize += count;
487 
488 	while (count > 0) {
489 		if (!sg || (!(sg->length)))
490 			break;
491 		spage = sg_page(sg);
492 		get_page(spage);
493 		page_len = min(sg->length, count);
494 		skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
495 		(*frags)++;
496 		count -= page_len;
497 		sg = sg_next(sg);
498 	}
499 }
500 
501 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
502 			       struct _key_ctx *key_ctx)
503 {
504 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
505 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
506 	} else {
507 		memcpy(key_ctx->key,
508 		       ablkctx->key + (ablkctx->enckey_len >> 1),
509 		       ablkctx->enckey_len >> 1);
510 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
511 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
512 	}
513 	return 0;
514 }
515 
516 static inline void create_wreq(struct chcr_context *ctx,
517 			       struct chcr_wr *chcr_req,
518 			       void *req, struct sk_buff *skb,
519 			       int kctx_len, int hash_sz,
520 			       int is_iv,
521 			       unsigned int sc_len)
522 {
523 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
524 	int iv_loc = IV_DSGL;
525 	int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
526 	unsigned int immdatalen = 0, nr_frags = 0;
527 
528 	if (is_ofld_imm(skb)) {
529 		immdatalen = skb->data_len;
530 		iv_loc = IV_IMMEDIATE;
531 	} else {
532 		nr_frags = skb_shinfo(skb)->nr_frags;
533 	}
534 
535 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
536 				((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
537 	chcr_req->wreq.pld_size_hash_size =
538 		htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
539 		      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
540 	chcr_req->wreq.len16_pkd =
541 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
542 				    (calc_tx_flits_ofld(skb) * 8), 16)));
543 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
544 	chcr_req->wreq.rx_chid_to_rx_q_id =
545 		FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
546 				is_iv ? iv_loc : IV_NOP, ctx->tx_qidx);
547 
548 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
549 						       qid);
550 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
551 					16) - ((sizeof(chcr_req->wreq)) >> 4)));
552 
553 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
554 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
555 				   sizeof(chcr_req->key_ctx) +
556 				   kctx_len + sc_len + immdatalen);
557 }
558 
559 /**
560  *	create_cipher_wr - form the WR for cipher operations
561  *	@req: cipher req.
562  *	@ctx: crypto driver context of the request.
563  *	@qid: ingress qid where response of this WR should be received.
564  *	@op_type:	encryption or decryption
565  */
566 static struct sk_buff
567 *create_cipher_wr(struct ablkcipher_request *req,
568 		  unsigned short qid,
569 		  unsigned short op_type)
570 {
571 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
572 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
573 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
574 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
575 	struct sk_buff *skb = NULL;
576 	struct chcr_wr *chcr_req;
577 	struct cpl_rx_phys_dsgl *phys_cpl;
578 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
579 	struct phys_sge_parm sg_param;
580 	unsigned int frags = 0, transhdr_len, phys_dsgl;
581 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
582 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
583 			GFP_ATOMIC;
584 
585 	if (!req->info)
586 		return ERR_PTR(-EINVAL);
587 	reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
588 	if (reqctx->dst_nents <= 0) {
589 		pr_err("AES:Invalid Destination sg lists\n");
590 		return ERR_PTR(-EINVAL);
591 	}
592 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
593 	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
594 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
595 		       ablkctx->enckey_len, req->nbytes, ivsize);
596 		return ERR_PTR(-EINVAL);
597 	}
598 
599 	phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
600 
601 	kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
602 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
603 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
604 	if (!skb)
605 		return ERR_PTR(-ENOMEM);
606 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
607 	chcr_req = __skb_put_zero(skb, transhdr_len);
608 	chcr_req->sec_cpl.op_ivinsrtofst =
609 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
610 
611 	chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
612 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
613 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
614 
615 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
616 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
617 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
618 							 ablkctx->ciph_mode,
619 							 0, 0, ivsize >> 1);
620 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
621 							  0, 1, phys_dsgl);
622 
623 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
624 	if (op_type == CHCR_DECRYPT_OP) {
625 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
626 	} else {
627 		if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
628 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
629 			       ablkctx->enckey_len);
630 		} else {
631 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
632 			       (ablkctx->enckey_len >> 1),
633 			       ablkctx->enckey_len >> 1);
634 			memcpy(chcr_req->key_ctx.key +
635 			       (ablkctx->enckey_len >> 1),
636 			       ablkctx->key,
637 			       ablkctx->enckey_len >> 1);
638 		}
639 	}
640 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
641 	sg_param.nents = reqctx->dst_nents;
642 	sg_param.obsize = req->nbytes;
643 	sg_param.qid = qid;
644 	sg_param.align = 1;
645 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
646 				 &sg_param))
647 		goto map_fail1;
648 
649 	skb_set_transport_header(skb, transhdr_len);
650 	memcpy(reqctx->iv, req->info, ivsize);
651 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
652 	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
653 	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
654 			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
655 	reqctx->skb = skb;
656 	skb_get(skb);
657 	return skb;
658 map_fail1:
659 	kfree_skb(skb);
660 	return ERR_PTR(-ENOMEM);
661 }
662 
663 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
664 			       unsigned int keylen)
665 {
666 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
667 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
668 	unsigned int ck_size, context_size;
669 	u16 alignment = 0;
670 
671 	if (keylen == AES_KEYSIZE_128) {
672 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
673 	} else if (keylen == AES_KEYSIZE_192) {
674 		alignment = 8;
675 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
676 	} else if (keylen == AES_KEYSIZE_256) {
677 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
678 	} else {
679 		goto badkey_err;
680 	}
681 	memcpy(ablkctx->key, key, keylen);
682 	ablkctx->enckey_len = keylen;
683 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
684 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
685 			keylen + alignment) >> 4;
686 
687 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
688 						0, 0, context_size);
689 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
690 	return 0;
691 badkey_err:
692 	crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
693 	ablkctx->enckey_len = 0;
694 	return -EINVAL;
695 }
696 
697 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
698 {
699 	struct adapter *adap = netdev2adap(dev);
700 	struct sge_uld_txq_info *txq_info =
701 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
702 	struct sge_uld_txq *txq;
703 	int ret = 0;
704 
705 	local_bh_disable();
706 	txq = &txq_info->uldtxq[idx];
707 	spin_lock(&txq->sendq.lock);
708 	if (txq->full)
709 		ret = -1;
710 	spin_unlock(&txq->sendq.lock);
711 	local_bh_enable();
712 	return ret;
713 }
714 
715 static int chcr_aes_encrypt(struct ablkcipher_request *req)
716 {
717 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
718 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
719 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
720 	struct sk_buff *skb;
721 
722 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
723 					    ctx->tx_qidx))) {
724 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
725 			return -EBUSY;
726 	}
727 
728 	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx],
729 			       CHCR_ENCRYPT_OP);
730 	if (IS_ERR(skb)) {
731 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
732 		return  PTR_ERR(skb);
733 	}
734 	skb->dev = u_ctx->lldi.ports[0];
735 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
736 	chcr_send_wr(skb);
737 	return -EINPROGRESS;
738 }
739 
740 static int chcr_aes_decrypt(struct ablkcipher_request *req)
741 {
742 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
743 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
744 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
745 	struct sk_buff *skb;
746 
747 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
748 					    ctx->tx_qidx))) {
749 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
750 			return -EBUSY;
751 	}
752 
753 	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx],
754 			       CHCR_DECRYPT_OP);
755 	if (IS_ERR(skb)) {
756 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
757 		return PTR_ERR(skb);
758 	}
759 	skb->dev = u_ctx->lldi.ports[0];
760 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
761 	chcr_send_wr(skb);
762 	return -EINPROGRESS;
763 }
764 
765 static int chcr_device_init(struct chcr_context *ctx)
766 {
767 	struct uld_ctx *u_ctx;
768 	struct adapter *adap;
769 	unsigned int id;
770 	int txq_perchan, txq_idx, ntxq;
771 	int err = 0, rxq_perchan, rxq_idx;
772 
773 	id = smp_processor_id();
774 	if (!ctx->dev) {
775 		err = assign_chcr_device(&ctx->dev);
776 		if (err) {
777 			pr_err("chcr device assignment fails\n");
778 			goto out;
779 		}
780 		u_ctx = ULD_CTX(ctx);
781 		adap = padap(ctx->dev);
782 		ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
783 				    adap->vres.ncrypto_fc);
784 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
785 		txq_perchan = ntxq / u_ctx->lldi.nchan;
786 		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
787 		rxq_idx += id % rxq_perchan;
788 		txq_idx = ctx->dev->tx_channel_id * txq_perchan;
789 		txq_idx += id % txq_perchan;
790 		spin_lock(&ctx->dev->lock_chcr_dev);
791 		ctx->rx_qidx = rxq_idx;
792 		ctx->tx_qidx = txq_idx;
793 		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
794 		ctx->dev->rx_channel_id = 0;
795 		spin_unlock(&ctx->dev->lock_chcr_dev);
796 	}
797 out:
798 	return err;
799 }
800 
801 static int chcr_cra_init(struct crypto_tfm *tfm)
802 {
803 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
804 	return chcr_device_init(crypto_tfm_ctx(tfm));
805 }
806 
807 static int get_alg_config(struct algo_param *params,
808 			  unsigned int auth_size)
809 {
810 	switch (auth_size) {
811 	case SHA1_DIGEST_SIZE:
812 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
813 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
814 		params->result_size = SHA1_DIGEST_SIZE;
815 		break;
816 	case SHA224_DIGEST_SIZE:
817 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
818 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
819 		params->result_size = SHA256_DIGEST_SIZE;
820 		break;
821 	case SHA256_DIGEST_SIZE:
822 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
823 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
824 		params->result_size = SHA256_DIGEST_SIZE;
825 		break;
826 	case SHA384_DIGEST_SIZE:
827 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
828 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
829 		params->result_size = SHA512_DIGEST_SIZE;
830 		break;
831 	case SHA512_DIGEST_SIZE:
832 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
833 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
834 		params->result_size = SHA512_DIGEST_SIZE;
835 		break;
836 	default:
837 		pr_err("chcr : ERROR, unsupported digest size\n");
838 		return -EINVAL;
839 	}
840 	return 0;
841 }
842 
843 static inline void chcr_free_shash(struct crypto_shash *base_hash)
844 {
845 		crypto_free_shash(base_hash);
846 }
847 
848 /**
849  *	create_hash_wr - Create hash work request
850  *	@req - Cipher req base
851  */
852 static struct sk_buff *create_hash_wr(struct ahash_request *req,
853 				      struct hash_wr_param *param)
854 {
855 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
856 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
857 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
858 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
859 	struct sk_buff *skb = NULL;
860 	struct chcr_wr *chcr_req;
861 	unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
862 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
863 	unsigned int kctx_len = 0;
864 	u8 hash_size_in_response = 0;
865 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
866 		GFP_ATOMIC;
867 
868 	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
869 	kctx_len = param->alg_prm.result_size + iopad_alignment;
870 	if (param->opad_needed)
871 		kctx_len += param->alg_prm.result_size + iopad_alignment;
872 
873 	if (req_ctx->result)
874 		hash_size_in_response = digestsize;
875 	else
876 		hash_size_in_response = param->alg_prm.result_size;
877 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
878 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
879 	if (!skb)
880 		return skb;
881 
882 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
883 	chcr_req = __skb_put_zero(skb, transhdr_len);
884 
885 	chcr_req->sec_cpl.op_ivinsrtofst =
886 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
887 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
888 
889 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
890 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
891 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
892 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
893 	chcr_req->sec_cpl.seqno_numivs =
894 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
895 					 param->opad_needed, 0);
896 
897 	chcr_req->sec_cpl.ivgen_hdrlen =
898 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
899 
900 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
901 	       param->alg_prm.result_size);
902 
903 	if (param->opad_needed)
904 		memcpy(chcr_req->key_ctx.key +
905 		       ((param->alg_prm.result_size <= 32) ? 32 :
906 			CHCR_HASH_MAX_DIGEST_SIZE),
907 		       hmacctx->opad, param->alg_prm.result_size);
908 
909 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
910 					    param->alg_prm.mk_size, 0,
911 					    param->opad_needed,
912 					    ((kctx_len +
913 					     sizeof(chcr_req->key_ctx)) >> 4));
914 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
915 
916 	skb_set_transport_header(skb, transhdr_len);
917 	if (param->bfr_len != 0)
918 		write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
919 				    param->bfr_len);
920 	if (param->sg_len != 0)
921 		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
922 
923 	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
924 			DUMMY_BYTES);
925 	req_ctx->skb = skb;
926 	skb_get(skb);
927 	return skb;
928 }
929 
930 static int chcr_ahash_update(struct ahash_request *req)
931 {
932 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
933 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
934 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
935 	struct uld_ctx *u_ctx = NULL;
936 	struct sk_buff *skb;
937 	u8 remainder = 0, bs;
938 	unsigned int nbytes = req->nbytes;
939 	struct hash_wr_param params;
940 
941 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
942 
943 	u_ctx = ULD_CTX(ctx);
944 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
945 					    ctx->tx_qidx))) {
946 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
947 			return -EBUSY;
948 	}
949 
950 	if (nbytes + req_ctx->reqlen >= bs) {
951 		remainder = (nbytes + req_ctx->reqlen) % bs;
952 		nbytes = nbytes + req_ctx->reqlen - remainder;
953 	} else {
954 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
955 				   + req_ctx->reqlen, nbytes, 0);
956 		req_ctx->reqlen += nbytes;
957 		return 0;
958 	}
959 
960 	params.opad_needed = 0;
961 	params.more = 1;
962 	params.last = 0;
963 	params.sg_len = nbytes - req_ctx->reqlen;
964 	params.bfr_len = req_ctx->reqlen;
965 	params.scmd1 = 0;
966 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
967 	req_ctx->result = 0;
968 	req_ctx->data_len += params.sg_len + params.bfr_len;
969 	skb = create_hash_wr(req, &params);
970 	if (!skb)
971 		return -ENOMEM;
972 
973 	if (remainder) {
974 		u8 *temp;
975 		/* Swap buffers */
976 		temp = req_ctx->reqbfr;
977 		req_ctx->reqbfr = req_ctx->skbfr;
978 		req_ctx->skbfr = temp;
979 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
980 				   req_ctx->reqbfr, remainder, req->nbytes -
981 				   remainder);
982 	}
983 	req_ctx->reqlen = remainder;
984 	skb->dev = u_ctx->lldi.ports[0];
985 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
986 	chcr_send_wr(skb);
987 
988 	return -EINPROGRESS;
989 }
990 
991 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
992 {
993 	memset(bfr_ptr, 0, bs);
994 	*bfr_ptr = 0x80;
995 	if (bs == 64)
996 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
997 	else
998 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
999 }
1000 
1001 static int chcr_ahash_final(struct ahash_request *req)
1002 {
1003 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1004 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1005 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1006 	struct hash_wr_param params;
1007 	struct sk_buff *skb;
1008 	struct uld_ctx *u_ctx = NULL;
1009 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1010 
1011 	u_ctx = ULD_CTX(ctx);
1012 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1013 		params.opad_needed = 1;
1014 	else
1015 		params.opad_needed = 0;
1016 	params.sg_len = 0;
1017 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1018 	req_ctx->result = 1;
1019 	params.bfr_len = req_ctx->reqlen;
1020 	req_ctx->data_len += params.bfr_len + params.sg_len;
1021 	if (req_ctx->reqlen == 0) {
1022 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1023 		params.last = 0;
1024 		params.more = 1;
1025 		params.scmd1 = 0;
1026 		params.bfr_len = bs;
1027 
1028 	} else {
1029 		params.scmd1 = req_ctx->data_len;
1030 		params.last = 1;
1031 		params.more = 0;
1032 	}
1033 	skb = create_hash_wr(req, &params);
1034 	if (!skb)
1035 		return -ENOMEM;
1036 
1037 	skb->dev = u_ctx->lldi.ports[0];
1038 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1039 	chcr_send_wr(skb);
1040 	return -EINPROGRESS;
1041 }
1042 
1043 static int chcr_ahash_finup(struct ahash_request *req)
1044 {
1045 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1046 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1047 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1048 	struct uld_ctx *u_ctx = NULL;
1049 	struct sk_buff *skb;
1050 	struct hash_wr_param params;
1051 	u8  bs;
1052 
1053 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1054 	u_ctx = ULD_CTX(ctx);
1055 
1056 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1057 					    ctx->tx_qidx))) {
1058 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1059 			return -EBUSY;
1060 	}
1061 
1062 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1063 		params.opad_needed = 1;
1064 	else
1065 		params.opad_needed = 0;
1066 
1067 	params.sg_len = req->nbytes;
1068 	params.bfr_len = req_ctx->reqlen;
1069 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1070 	req_ctx->data_len += params.bfr_len + params.sg_len;
1071 	req_ctx->result = 1;
1072 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1073 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1074 		params.last = 0;
1075 		params.more = 1;
1076 		params.scmd1 = 0;
1077 		params.bfr_len = bs;
1078 	} else {
1079 		params.scmd1 = req_ctx->data_len;
1080 		params.last = 1;
1081 		params.more = 0;
1082 	}
1083 
1084 	skb = create_hash_wr(req, &params);
1085 	if (!skb)
1086 		return -ENOMEM;
1087 
1088 	skb->dev = u_ctx->lldi.ports[0];
1089 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1090 	chcr_send_wr(skb);
1091 
1092 	return -EINPROGRESS;
1093 }
1094 
1095 static int chcr_ahash_digest(struct ahash_request *req)
1096 {
1097 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1098 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1099 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1100 	struct uld_ctx *u_ctx = NULL;
1101 	struct sk_buff *skb;
1102 	struct hash_wr_param params;
1103 	u8  bs;
1104 
1105 	rtfm->init(req);
1106 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1107 
1108 	u_ctx = ULD_CTX(ctx);
1109 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1110 					    ctx->tx_qidx))) {
1111 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1112 			return -EBUSY;
1113 	}
1114 
1115 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1116 		params.opad_needed = 1;
1117 	else
1118 		params.opad_needed = 0;
1119 
1120 	params.last = 0;
1121 	params.more = 0;
1122 	params.sg_len = req->nbytes;
1123 	params.bfr_len = 0;
1124 	params.scmd1 = 0;
1125 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1126 	req_ctx->result = 1;
1127 	req_ctx->data_len += params.bfr_len + params.sg_len;
1128 
1129 	if (req->nbytes == 0) {
1130 		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1131 		params.more = 1;
1132 		params.bfr_len = bs;
1133 	}
1134 
1135 	skb = create_hash_wr(req, &params);
1136 	if (!skb)
1137 		return -ENOMEM;
1138 
1139 	skb->dev = u_ctx->lldi.ports[0];
1140 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1141 	chcr_send_wr(skb);
1142 	return -EINPROGRESS;
1143 }
1144 
1145 static int chcr_ahash_export(struct ahash_request *areq, void *out)
1146 {
1147 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1148 	struct chcr_ahash_req_ctx *state = out;
1149 
1150 	state->reqlen = req_ctx->reqlen;
1151 	state->data_len = req_ctx->data_len;
1152 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
1153 	memcpy(state->partial_hash, req_ctx->partial_hash,
1154 	       CHCR_HASH_MAX_DIGEST_SIZE);
1155 		return 0;
1156 }
1157 
1158 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1159 {
1160 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1161 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1162 
1163 	req_ctx->reqlen = state->reqlen;
1164 	req_ctx->data_len = state->data_len;
1165 	req_ctx->reqbfr = req_ctx->bfr1;
1166 	req_ctx->skbfr = req_ctx->bfr2;
1167 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
1168 	memcpy(req_ctx->partial_hash, state->partial_hash,
1169 	       CHCR_HASH_MAX_DIGEST_SIZE);
1170 	return 0;
1171 }
1172 
1173 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1174 			     unsigned int keylen)
1175 {
1176 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1177 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1178 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1179 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1180 	unsigned int i, err = 0, updated_digestsize;
1181 
1182 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1183 
1184 	/* use the key to calculate the ipad and opad. ipad will sent with the
1185 	 * first request's data. opad will be sent with the final hash result
1186 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1187 	 */
1188 	shash->tfm = hmacctx->base_hash;
1189 	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
1190 	if (keylen > bs) {
1191 		err = crypto_shash_digest(shash, key, keylen,
1192 					  hmacctx->ipad);
1193 		if (err)
1194 			goto out;
1195 		keylen = digestsize;
1196 	} else {
1197 		memcpy(hmacctx->ipad, key, keylen);
1198 	}
1199 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
1200 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
1201 
1202 	for (i = 0; i < bs / sizeof(int); i++) {
1203 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1204 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1205 	}
1206 
1207 	updated_digestsize = digestsize;
1208 	if (digestsize == SHA224_DIGEST_SIZE)
1209 		updated_digestsize = SHA256_DIGEST_SIZE;
1210 	else if (digestsize == SHA384_DIGEST_SIZE)
1211 		updated_digestsize = SHA512_DIGEST_SIZE;
1212 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
1213 					hmacctx->ipad, digestsize);
1214 	if (err)
1215 		goto out;
1216 	chcr_change_order(hmacctx->ipad, updated_digestsize);
1217 
1218 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
1219 					hmacctx->opad, digestsize);
1220 	if (err)
1221 		goto out;
1222 	chcr_change_order(hmacctx->opad, updated_digestsize);
1223 out:
1224 	return err;
1225 }
1226 
1227 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1228 			       unsigned int key_len)
1229 {
1230 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1231 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1232 	unsigned short context_size = 0;
1233 
1234 	if ((key_len != (AES_KEYSIZE_128 << 1)) &&
1235 	    (key_len != (AES_KEYSIZE_256 << 1))) {
1236 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
1237 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
1238 		ablkctx->enckey_len = 0;
1239 		return -EINVAL;
1240 
1241 	}
1242 
1243 	memcpy(ablkctx->key, key, key_len);
1244 	ablkctx->enckey_len = key_len;
1245 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1246 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1247 	ablkctx->key_ctx_hdr =
1248 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1249 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1250 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1251 				 CHCR_KEYCTX_NO_KEY, 1,
1252 				 0, context_size);
1253 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1254 	return 0;
1255 }
1256 
1257 static int chcr_sha_init(struct ahash_request *areq)
1258 {
1259 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1260 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1261 	int digestsize =  crypto_ahash_digestsize(tfm);
1262 
1263 	req_ctx->data_len = 0;
1264 	req_ctx->reqlen = 0;
1265 	req_ctx->reqbfr = req_ctx->bfr1;
1266 	req_ctx->skbfr = req_ctx->bfr2;
1267 	req_ctx->skb = NULL;
1268 	req_ctx->result = 0;
1269 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
1270 	return 0;
1271 }
1272 
1273 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1274 {
1275 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1276 				 sizeof(struct chcr_ahash_req_ctx));
1277 	return chcr_device_init(crypto_tfm_ctx(tfm));
1278 }
1279 
1280 static int chcr_hmac_init(struct ahash_request *areq)
1281 {
1282 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1283 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1284 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1285 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1286 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1287 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1288 
1289 	chcr_sha_init(areq);
1290 	req_ctx->data_len = bs;
1291 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1292 		if (digestsize == SHA224_DIGEST_SIZE)
1293 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1294 			       SHA256_DIGEST_SIZE);
1295 		else if (digestsize == SHA384_DIGEST_SIZE)
1296 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1297 			       SHA512_DIGEST_SIZE);
1298 		else
1299 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1300 			       digestsize);
1301 	}
1302 	return 0;
1303 }
1304 
1305 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1306 {
1307 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1308 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1309 	unsigned int digestsize =
1310 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1311 
1312 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1313 				 sizeof(struct chcr_ahash_req_ctx));
1314 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
1315 	if (IS_ERR(hmacctx->base_hash))
1316 		return PTR_ERR(hmacctx->base_hash);
1317 	return chcr_device_init(crypto_tfm_ctx(tfm));
1318 }
1319 
1320 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1321 {
1322 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1323 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1324 
1325 	if (hmacctx->base_hash) {
1326 		chcr_free_shash(hmacctx->base_hash);
1327 		hmacctx->base_hash = NULL;
1328 	}
1329 }
1330 
1331 static int chcr_copy_assoc(struct aead_request *req,
1332 				struct chcr_aead_ctx *ctx)
1333 {
1334 	SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
1335 
1336 	skcipher_request_set_tfm(skreq, ctx->null);
1337 	skcipher_request_set_callback(skreq, aead_request_flags(req),
1338 			NULL, NULL);
1339 	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
1340 			NULL);
1341 
1342 	return crypto_skcipher_encrypt(skreq);
1343 }
1344 static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
1345 				   int aadmax, int wrlen,
1346 				   unsigned short op_type)
1347 {
1348 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
1349 
1350 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
1351 	    (req->assoclen > aadmax) ||
1352 	    (src_nent > MAX_SKB_FRAGS) ||
1353 	    (wrlen > MAX_WR_SIZE))
1354 		return 1;
1355 	return 0;
1356 }
1357 
1358 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
1359 {
1360 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1361 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1362 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1363 	struct aead_request *subreq = aead_request_ctx(req);
1364 
1365 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
1366 	aead_request_set_callback(subreq, req->base.flags,
1367 				  req->base.complete, req->base.data);
1368 	 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
1369 				 req->iv);
1370 	 aead_request_set_ad(subreq, req->assoclen);
1371 	return op_type ? crypto_aead_decrypt(subreq) :
1372 		crypto_aead_encrypt(subreq);
1373 }
1374 
1375 static struct sk_buff *create_authenc_wr(struct aead_request *req,
1376 					 unsigned short qid,
1377 					 int size,
1378 					 unsigned short op_type)
1379 {
1380 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1381 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1382 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1383 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1384 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
1385 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1386 	struct sk_buff *skb = NULL;
1387 	struct chcr_wr *chcr_req;
1388 	struct cpl_rx_phys_dsgl *phys_cpl;
1389 	struct phys_sge_parm sg_param;
1390 	struct scatterlist *src;
1391 	unsigned int frags = 0, transhdr_len;
1392 	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
1393 	unsigned int   kctx_len = 0;
1394 	unsigned short stop_offset = 0;
1395 	unsigned int  assoclen = req->assoclen;
1396 	unsigned int  authsize = crypto_aead_authsize(tfm);
1397 	int err = -EINVAL, src_nent;
1398 	int null = 0;
1399 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1400 		GFP_ATOMIC;
1401 
1402 	if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
1403 		goto err;
1404 
1405 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1406 		goto err;
1407 	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1408 	if (src_nent < 0)
1409 		goto err;
1410 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1411 	reqctx->dst = src;
1412 
1413 	if (req->src != req->dst) {
1414 		err = chcr_copy_assoc(req, aeadctx);
1415 		if (err)
1416 			return ERR_PTR(err);
1417 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1418 					       req->assoclen);
1419 	}
1420 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
1421 		null = 1;
1422 		assoclen = 0;
1423 	}
1424 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1425 					     (op_type ? -authsize : authsize));
1426 	if (reqctx->dst_nents < 0) {
1427 		pr_err("AUTHENC:Invalid Destination sg entries\n");
1428 		goto err;
1429 	}
1430 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1431 	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
1432 		- sizeof(chcr_req->key_ctx);
1433 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1434 	if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
1435 			T6_MAX_AAD_SIZE,
1436 			transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
1437 				op_type)) {
1438 		return ERR_PTR(chcr_aead_fallback(req, op_type));
1439 	}
1440 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1441 	if (!skb)
1442 		goto err;
1443 
1444 	/* LLD is going to write the sge hdr. */
1445 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1446 
1447 	/* Write WR */
1448 	chcr_req = __skb_put_zero(skb, transhdr_len);
1449 
1450 	stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
1451 
1452 	/*
1453 	 * Input order	is AAD,IV and Payload. where IV should be included as
1454 	 * the part of authdata. All other fields should be filled according
1455 	 * to the hardware spec
1456 	 */
1457 	chcr_req->sec_cpl.op_ivinsrtofst =
1458 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2,
1459 				       (ivsize ? (assoclen + 1) : 0));
1460 	chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
1461 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1462 					assoclen ? 1 : 0, assoclen,
1463 					assoclen + ivsize + 1,
1464 					(stop_offset & 0x1F0) >> 4);
1465 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
1466 					stop_offset & 0xF,
1467 					null ? 0 : assoclen + ivsize + 1,
1468 					stop_offset, stop_offset);
1469 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1470 					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
1471 					CHCR_SCMD_CIPHER_MODE_AES_CBC,
1472 					actx->auth_mode, aeadctx->hmac_ctrl,
1473 					ivsize >> 1);
1474 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
1475 					 0, 1, dst_size);
1476 
1477 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1478 	if (op_type == CHCR_ENCRYPT_OP)
1479 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
1480 		       aeadctx->enckey_len);
1481 	else
1482 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
1483 		       aeadctx->enckey_len);
1484 
1485 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
1486 					4), actx->h_iopad, kctx_len -
1487 				(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
1488 
1489 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1490 	sg_param.nents = reqctx->dst_nents;
1491 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1492 	sg_param.qid = qid;
1493 	sg_param.align = 0;
1494 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1495 				  &sg_param))
1496 		goto dstmap_fail;
1497 
1498 	skb_set_transport_header(skb, transhdr_len);
1499 
1500 	if (assoclen) {
1501 		/* AAD buffer in */
1502 		write_sg_to_skb(skb, &frags, req->src, assoclen);
1503 
1504 	}
1505 	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
1506 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
1507 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
1508 		   sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1509 	reqctx->skb = skb;
1510 	skb_get(skb);
1511 
1512 	return skb;
1513 dstmap_fail:
1514 	/* ivmap_fail: */
1515 	kfree_skb(skb);
1516 err:
1517 	return ERR_PTR(-EINVAL);
1518 }
1519 
1520 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
1521 {
1522 	__be32 data;
1523 
1524 	memset(block, 0, csize);
1525 	block += csize;
1526 
1527 	if (csize >= 4)
1528 		csize = 4;
1529 	else if (msglen > (unsigned int)(1 << (8 * csize)))
1530 		return -EOVERFLOW;
1531 
1532 	data = cpu_to_be32(msglen);
1533 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1534 
1535 	return 0;
1536 }
1537 
1538 static void generate_b0(struct aead_request *req,
1539 			struct chcr_aead_ctx *aeadctx,
1540 			unsigned short op_type)
1541 {
1542 	unsigned int l, lp, m;
1543 	int rc;
1544 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1545 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1546 	u8 *b0 = reqctx->scratch_pad;
1547 
1548 	m = crypto_aead_authsize(aead);
1549 
1550 	memcpy(b0, reqctx->iv, 16);
1551 
1552 	lp = b0[0];
1553 	l = lp + 1;
1554 
1555 	/* set m, bits 3-5 */
1556 	*b0 |= (8 * ((m - 2) / 2));
1557 
1558 	/* set adata, bit 6, if associated data is used */
1559 	if (req->assoclen)
1560 		*b0 |= 64;
1561 	rc = set_msg_len(b0 + 16 - l,
1562 			 (op_type == CHCR_DECRYPT_OP) ?
1563 			 req->cryptlen - m : req->cryptlen, l);
1564 }
1565 
1566 static inline int crypto_ccm_check_iv(const u8 *iv)
1567 {
1568 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
1569 	if (iv[0] < 1 || iv[0] > 7)
1570 		return -EINVAL;
1571 
1572 	return 0;
1573 }
1574 
1575 static int ccm_format_packet(struct aead_request *req,
1576 			     struct chcr_aead_ctx *aeadctx,
1577 			     unsigned int sub_type,
1578 			     unsigned short op_type)
1579 {
1580 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1581 	int rc = 0;
1582 
1583 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1584 		reqctx->iv[0] = 3;
1585 		memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
1586 		memcpy(reqctx->iv + 4, req->iv, 8);
1587 		memset(reqctx->iv + 12, 0, 4);
1588 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
1589 			htons(req->assoclen - 8);
1590 	} else {
1591 		memcpy(reqctx->iv, req->iv, 16);
1592 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
1593 			htons(req->assoclen);
1594 	}
1595 	generate_b0(req, aeadctx, op_type);
1596 	/* zero the ctr value */
1597 	memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
1598 	return rc;
1599 }
1600 
1601 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
1602 				  unsigned int dst_size,
1603 				  struct aead_request *req,
1604 				  unsigned short op_type,
1605 					  struct chcr_context *chcrctx)
1606 {
1607 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1608 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
1609 	unsigned int ivsize = AES_BLOCK_SIZE;
1610 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
1611 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
1612 	unsigned int c_id = chcrctx->dev->rx_channel_id;
1613 	unsigned int ccm_xtra;
1614 	unsigned char tag_offset = 0, auth_offset = 0;
1615 	unsigned int assoclen;
1616 
1617 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
1618 		assoclen = req->assoclen - 8;
1619 	else
1620 		assoclen = req->assoclen;
1621 	ccm_xtra = CCM_B0_SIZE +
1622 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
1623 
1624 	auth_offset = req->cryptlen ?
1625 		(assoclen + ivsize + 1 + ccm_xtra) : 0;
1626 	if (op_type == CHCR_DECRYPT_OP) {
1627 		if (crypto_aead_authsize(tfm) != req->cryptlen)
1628 			tag_offset = crypto_aead_authsize(tfm);
1629 		else
1630 			auth_offset = 0;
1631 	}
1632 
1633 
1634 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
1635 					 2, (ivsize ?  (assoclen + 1) :  0) +
1636 					 ccm_xtra);
1637 	sec_cpl->pldlen =
1638 		htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
1639 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
1640 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1641 					1, assoclen + ccm_xtra, assoclen
1642 					+ ivsize + 1 + ccm_xtra, 0);
1643 
1644 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
1645 					auth_offset, tag_offset,
1646 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
1647 					crypto_aead_authsize(tfm));
1648 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1649 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
1650 					cipher_mode, mac_mode,
1651 					aeadctx->hmac_ctrl, ivsize >> 1);
1652 
1653 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
1654 					1, dst_size);
1655 }
1656 
1657 int aead_ccm_validate_input(unsigned short op_type,
1658 			    struct aead_request *req,
1659 			    struct chcr_aead_ctx *aeadctx,
1660 			    unsigned int sub_type)
1661 {
1662 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1663 		if (crypto_ccm_check_iv(req->iv)) {
1664 			pr_err("CCM: IV check fails\n");
1665 			return -EINVAL;
1666 		}
1667 	} else {
1668 		if (req->assoclen != 16 && req->assoclen != 20) {
1669 			pr_err("RFC4309: Invalid AAD length %d\n",
1670 			       req->assoclen);
1671 			return -EINVAL;
1672 		}
1673 	}
1674 	if (aeadctx->enckey_len == 0) {
1675 		pr_err("CCM: Encryption key not set\n");
1676 		return -EINVAL;
1677 	}
1678 	return 0;
1679 }
1680 
1681 unsigned int fill_aead_req_fields(struct sk_buff *skb,
1682 				  struct aead_request *req,
1683 				  struct scatterlist *src,
1684 				  unsigned int ivsize,
1685 				  struct chcr_aead_ctx *aeadctx)
1686 {
1687 	unsigned int frags = 0;
1688 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1689 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1690 	/* b0 and aad length(if available) */
1691 
1692 	write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
1693 				(req->assoclen ?  CCM_AAD_FIELD_SIZE : 0));
1694 	if (req->assoclen) {
1695 		if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
1696 			write_sg_to_skb(skb, &frags, req->src,
1697 					req->assoclen - 8);
1698 		else
1699 			write_sg_to_skb(skb, &frags, req->src, req->assoclen);
1700 	}
1701 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
1702 	if (req->cryptlen)
1703 		write_sg_to_skb(skb, &frags, src, req->cryptlen);
1704 
1705 	return frags;
1706 }
1707 
1708 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1709 					  unsigned short qid,
1710 					  int size,
1711 					  unsigned short op_type)
1712 {
1713 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1714 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1715 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1716 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1717 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1718 	struct sk_buff *skb = NULL;
1719 	struct chcr_wr *chcr_req;
1720 	struct cpl_rx_phys_dsgl *phys_cpl;
1721 	struct phys_sge_parm sg_param;
1722 	struct scatterlist *src;
1723 	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
1724 	unsigned int dst_size = 0, kctx_len;
1725 	unsigned int sub_type;
1726 	unsigned int authsize = crypto_aead_authsize(tfm);
1727 	int err = -EINVAL, src_nent;
1728 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1729 		GFP_ATOMIC;
1730 
1731 
1732 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1733 		goto err;
1734 	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1735 	if (src_nent < 0)
1736 		goto err;
1737 
1738 	sub_type = get_aead_subtype(tfm);
1739 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1740 	reqctx->dst = src;
1741 
1742 	if (req->src != req->dst) {
1743 		err = chcr_copy_assoc(req, aeadctx);
1744 		if (err) {
1745 			pr_err("AAD copy to destination buffer fails\n");
1746 			return ERR_PTR(err);
1747 		}
1748 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1749 					       req->assoclen);
1750 	}
1751 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1752 					     (op_type ? -authsize : authsize));
1753 	if (reqctx->dst_nents < 0) {
1754 		pr_err("CCM:Invalid Destination sg entries\n");
1755 		goto err;
1756 	}
1757 
1758 
1759 	if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
1760 		goto err;
1761 
1762 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1763 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
1764 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1765 	if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
1766 			    T6_MAX_AAD_SIZE - 18,
1767 			    transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
1768 			    op_type)) {
1769 		return ERR_PTR(chcr_aead_fallback(req, op_type));
1770 	}
1771 
1772 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
1773 
1774 	if (!skb)
1775 		goto err;
1776 
1777 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1778 
1779 	chcr_req = __skb_put_zero(skb, transhdr_len);
1780 
1781 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
1782 
1783 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1784 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
1785 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
1786 					16), aeadctx->key, aeadctx->enckey_len);
1787 
1788 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1789 	if (ccm_format_packet(req, aeadctx, sub_type, op_type))
1790 		goto dstmap_fail;
1791 
1792 	sg_param.nents = reqctx->dst_nents;
1793 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1794 	sg_param.qid = qid;
1795 	sg_param.align = 0;
1796 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1797 				  &sg_param))
1798 		goto dstmap_fail;
1799 
1800 	skb_set_transport_header(skb, transhdr_len);
1801 	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
1802 	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
1803 		    sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1804 	reqctx->skb = skb;
1805 	skb_get(skb);
1806 	return skb;
1807 dstmap_fail:
1808 	kfree_skb(skb);
1809 	skb = NULL;
1810 err:
1811 	return ERR_PTR(-EINVAL);
1812 }
1813 
1814 static struct sk_buff *create_gcm_wr(struct aead_request *req,
1815 				     unsigned short qid,
1816 				     int size,
1817 				     unsigned short op_type)
1818 {
1819 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1820 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1821 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1822 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1823 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
1824 	struct sk_buff *skb = NULL;
1825 	struct chcr_wr *chcr_req;
1826 	struct cpl_rx_phys_dsgl *phys_cpl;
1827 	struct phys_sge_parm sg_param;
1828 	struct scatterlist *src;
1829 	unsigned int frags = 0, transhdr_len;
1830 	unsigned int ivsize = AES_BLOCK_SIZE;
1831 	unsigned int dst_size = 0, kctx_len;
1832 	unsigned char tag_offset = 0;
1833 	unsigned int crypt_len = 0;
1834 	unsigned int authsize = crypto_aead_authsize(tfm);
1835 	int err = -EINVAL, src_nent;
1836 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1837 		GFP_ATOMIC;
1838 
1839 	/* validate key size */
1840 	if (aeadctx->enckey_len == 0)
1841 		goto err;
1842 
1843 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1844 		goto err;
1845 	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1846 	if (src_nent < 0)
1847 		goto err;
1848 
1849 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1850 	reqctx->dst = src;
1851 	if (req->src != req->dst) {
1852 		err = chcr_copy_assoc(req, aeadctx);
1853 		if (err)
1854 			return	ERR_PTR(err);
1855 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1856 					       req->assoclen);
1857 	}
1858 
1859 	if (!req->cryptlen)
1860 		/* null-payload is not supported in the hardware.
1861 		 * software is sending block size
1862 		 */
1863 		crypt_len = AES_BLOCK_SIZE;
1864 	else
1865 		crypt_len = req->cryptlen;
1866 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1867 					     (op_type ? -authsize : authsize));
1868 	if (reqctx->dst_nents < 0) {
1869 		pr_err("GCM:Invalid Destination sg entries\n");
1870 		goto err;
1871 	}
1872 
1873 
1874 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1875 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
1876 		AEAD_H_SIZE;
1877 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1878 	if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
1879 			    T6_MAX_AAD_SIZE,
1880 			    transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
1881 			    op_type)) {
1882 		return ERR_PTR(chcr_aead_fallback(req, op_type));
1883 	}
1884 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1885 	if (!skb)
1886 		goto err;
1887 
1888 	/* NIC driver is going to write the sge hdr. */
1889 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1890 
1891 	chcr_req = __skb_put_zero(skb, transhdr_len);
1892 
1893 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
1894 		req->assoclen -= 8;
1895 
1896 	tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
1897 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
1898 					ctx->dev->rx_channel_id, 2, (ivsize ?
1899 					(req->assoclen + 1) : 0));
1900 	chcr_req->sec_cpl.pldlen =
1901 		htonl(req->assoclen + ivsize + req->cryptlen);
1902 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1903 					req->assoclen ? 1 : 0, req->assoclen,
1904 					req->assoclen + ivsize + 1, 0);
1905 		chcr_req->sec_cpl.cipherstop_lo_authinsert =
1906 			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
1907 						tag_offset, tag_offset);
1908 		chcr_req->sec_cpl.seqno_numivs =
1909 			FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
1910 					CHCR_ENCRYPT_OP) ? 1 : 0,
1911 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
1912 					CHCR_SCMD_AUTH_MODE_GHASH,
1913 					aeadctx->hmac_ctrl, ivsize >> 1);
1914 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
1915 					0, 1, dst_size);
1916 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1917 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
1918 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
1919 				16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
1920 
1921 	/* prepare a 16 byte iv */
1922 	/* S   A   L  T |  IV | 0x00000001 */
1923 	if (get_aead_subtype(tfm) ==
1924 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
1925 		memcpy(reqctx->iv, aeadctx->salt, 4);
1926 		memcpy(reqctx->iv + 4, req->iv, 8);
1927 	} else {
1928 		memcpy(reqctx->iv, req->iv, 12);
1929 	}
1930 	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
1931 
1932 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1933 	sg_param.nents = reqctx->dst_nents;
1934 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1935 	sg_param.qid = qid;
1936 	sg_param.align = 0;
1937 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1938 				  &sg_param))
1939 		goto dstmap_fail;
1940 
1941 	skb_set_transport_header(skb, transhdr_len);
1942 
1943 	write_sg_to_skb(skb, &frags, req->src, req->assoclen);
1944 
1945 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
1946 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
1947 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
1948 			sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1949 	reqctx->skb = skb;
1950 	skb_get(skb);
1951 	return skb;
1952 
1953 dstmap_fail:
1954 	/* ivmap_fail: */
1955 	kfree_skb(skb);
1956 	skb = NULL;
1957 err:
1958 	return skb;
1959 }
1960 
1961 
1962 
1963 static int chcr_aead_cra_init(struct crypto_aead *tfm)
1964 {
1965 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1966 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1967 	struct aead_alg *alg = crypto_aead_alg(tfm);
1968 
1969 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
1970 					       CRYPTO_ALG_NEED_FALLBACK);
1971 	if  (IS_ERR(aeadctx->sw_cipher))
1972 		return PTR_ERR(aeadctx->sw_cipher);
1973 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
1974 				 sizeof(struct aead_request) +
1975 				 crypto_aead_reqsize(aeadctx->sw_cipher)));
1976 	aeadctx->null = crypto_get_default_null_skcipher();
1977 	if (IS_ERR(aeadctx->null))
1978 		return PTR_ERR(aeadctx->null);
1979 	return chcr_device_init(ctx);
1980 }
1981 
1982 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
1983 {
1984 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1985 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1986 
1987 	crypto_put_default_null_skcipher();
1988 	crypto_free_aead(aeadctx->sw_cipher);
1989 }
1990 
1991 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
1992 					unsigned int authsize)
1993 {
1994 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
1995 
1996 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
1997 	aeadctx->mayverify = VERIFY_HW;
1998 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
1999 }
2000 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
2001 				    unsigned int authsize)
2002 {
2003 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2004 	u32 maxauth = crypto_aead_maxauthsize(tfm);
2005 
2006 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
2007 	 * true for sha1. authsize == 12 condition should be before
2008 	 * authsize == (maxauth >> 1)
2009 	 */
2010 	if (authsize == ICV_4) {
2011 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2012 		aeadctx->mayverify = VERIFY_HW;
2013 	} else if (authsize == ICV_6) {
2014 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2015 		aeadctx->mayverify = VERIFY_HW;
2016 	} else if (authsize == ICV_10) {
2017 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2018 		aeadctx->mayverify = VERIFY_HW;
2019 	} else if (authsize == ICV_12) {
2020 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2021 		aeadctx->mayverify = VERIFY_HW;
2022 	} else if (authsize == ICV_14) {
2023 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2024 		aeadctx->mayverify = VERIFY_HW;
2025 	} else if (authsize == (maxauth >> 1)) {
2026 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2027 		aeadctx->mayverify = VERIFY_HW;
2028 	} else if (authsize == maxauth) {
2029 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2030 		aeadctx->mayverify = VERIFY_HW;
2031 	} else {
2032 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2033 		aeadctx->mayverify = VERIFY_SW;
2034 	}
2035 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2036 }
2037 
2038 
2039 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2040 {
2041 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2042 
2043 	switch (authsize) {
2044 	case ICV_4:
2045 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2046 		aeadctx->mayverify = VERIFY_HW;
2047 		break;
2048 	case ICV_8:
2049 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2050 		aeadctx->mayverify = VERIFY_HW;
2051 		break;
2052 	case ICV_12:
2053 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2054 		 aeadctx->mayverify = VERIFY_HW;
2055 		break;
2056 	case ICV_14:
2057 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2058 		 aeadctx->mayverify = VERIFY_HW;
2059 		break;
2060 	case ICV_16:
2061 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2062 		aeadctx->mayverify = VERIFY_HW;
2063 		break;
2064 	case ICV_13:
2065 	case ICV_15:
2066 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2067 		aeadctx->mayverify = VERIFY_SW;
2068 		break;
2069 	default:
2070 
2071 		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
2072 			CRYPTO_TFM_RES_BAD_KEY_LEN);
2073 		return -EINVAL;
2074 	}
2075 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2076 }
2077 
2078 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2079 					  unsigned int authsize)
2080 {
2081 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2082 
2083 	switch (authsize) {
2084 	case ICV_8:
2085 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2086 		aeadctx->mayverify = VERIFY_HW;
2087 		break;
2088 	case ICV_12:
2089 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2090 		aeadctx->mayverify = VERIFY_HW;
2091 		break;
2092 	case ICV_16:
2093 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2094 		aeadctx->mayverify = VERIFY_HW;
2095 		break;
2096 	default:
2097 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2098 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2099 		return -EINVAL;
2100 	}
2101 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2102 }
2103 
2104 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
2105 				unsigned int authsize)
2106 {
2107 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2108 
2109 	switch (authsize) {
2110 	case ICV_4:
2111 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2112 		aeadctx->mayverify = VERIFY_HW;
2113 		break;
2114 	case ICV_6:
2115 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2116 		aeadctx->mayverify = VERIFY_HW;
2117 		break;
2118 	case ICV_8:
2119 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2120 		aeadctx->mayverify = VERIFY_HW;
2121 		break;
2122 	case ICV_10:
2123 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2124 		aeadctx->mayverify = VERIFY_HW;
2125 		break;
2126 	case ICV_12:
2127 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2128 		aeadctx->mayverify = VERIFY_HW;
2129 		break;
2130 	case ICV_14:
2131 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2132 		aeadctx->mayverify = VERIFY_HW;
2133 		break;
2134 	case ICV_16:
2135 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2136 		aeadctx->mayverify = VERIFY_HW;
2137 		break;
2138 	default:
2139 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2140 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2141 		return -EINVAL;
2142 	}
2143 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2144 }
2145 
2146 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
2147 				const u8 *key,
2148 				unsigned int keylen)
2149 {
2150 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2151 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2152 	unsigned char ck_size, mk_size;
2153 	int key_ctx_size = 0;
2154 
2155 	key_ctx_size = sizeof(struct _key_ctx) +
2156 		((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
2157 	if (keylen == AES_KEYSIZE_128) {
2158 		mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2159 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2160 	} else if (keylen == AES_KEYSIZE_192) {
2161 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2162 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2163 	} else if (keylen == AES_KEYSIZE_256) {
2164 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2165 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2166 	} else {
2167 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2168 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2169 		aeadctx->enckey_len = 0;
2170 		return	-EINVAL;
2171 	}
2172 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
2173 						key_ctx_size >> 4);
2174 	memcpy(aeadctx->key, key, keylen);
2175 	aeadctx->enckey_len = keylen;
2176 
2177 	return 0;
2178 }
2179 
2180 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
2181 				const u8 *key,
2182 				unsigned int keylen)
2183 {
2184 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2185 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2186 	int error;
2187 
2188 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2189 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
2190 			      CRYPTO_TFM_REQ_MASK);
2191 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2192 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2193 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2194 			      CRYPTO_TFM_RES_MASK);
2195 	if (error)
2196 		return error;
2197 	return chcr_ccm_common_setkey(aead, key, keylen);
2198 }
2199 
2200 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
2201 				    unsigned int keylen)
2202 {
2203 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2204 	 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2205 
2206 	if (keylen < 3) {
2207 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2208 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2209 		aeadctx->enckey_len = 0;
2210 		return	-EINVAL;
2211 	}
2212 	keylen -= 3;
2213 	memcpy(aeadctx->salt, key + keylen, 3);
2214 	return chcr_ccm_common_setkey(aead, key, keylen);
2215 }
2216 
2217 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2218 			   unsigned int keylen)
2219 {
2220 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2221 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2222 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
2223 	struct crypto_cipher *cipher;
2224 	unsigned int ck_size;
2225 	int ret = 0, key_ctx_size = 0;
2226 
2227 	aeadctx->enckey_len = 0;
2228 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2229 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
2230 			      & CRYPTO_TFM_REQ_MASK);
2231 	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2232 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2233 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2234 			      CRYPTO_TFM_RES_MASK);
2235 	if (ret)
2236 		goto out;
2237 
2238 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
2239 	    keylen > 3) {
2240 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
2241 		memcpy(aeadctx->salt, key + keylen, 4);
2242 	}
2243 	if (keylen == AES_KEYSIZE_128) {
2244 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2245 	} else if (keylen == AES_KEYSIZE_192) {
2246 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2247 	} else if (keylen == AES_KEYSIZE_256) {
2248 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2249 	} else {
2250 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2251 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2252 		pr_err("GCM: Invalid key length %d\n", keylen);
2253 		ret = -EINVAL;
2254 		goto out;
2255 	}
2256 
2257 	memcpy(aeadctx->key, key, keylen);
2258 	aeadctx->enckey_len = keylen;
2259 	key_ctx_size = sizeof(struct _key_ctx) +
2260 		((DIV_ROUND_UP(keylen, 16)) << 4) +
2261 		AEAD_H_SIZE;
2262 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
2263 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
2264 						0, 0,
2265 						key_ctx_size >> 4);
2266 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
2267 	 * It will go in key context
2268 	 */
2269 	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
2270 	if (IS_ERR(cipher)) {
2271 		aeadctx->enckey_len = 0;
2272 		ret = -ENOMEM;
2273 		goto out;
2274 	}
2275 
2276 	ret = crypto_cipher_setkey(cipher, key, keylen);
2277 	if (ret) {
2278 		aeadctx->enckey_len = 0;
2279 		goto out1;
2280 	}
2281 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
2282 	crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
2283 
2284 out1:
2285 	crypto_free_cipher(cipher);
2286 out:
2287 	return ret;
2288 }
2289 
2290 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
2291 				   unsigned int keylen)
2292 {
2293 	struct chcr_context *ctx = crypto_aead_ctx(authenc);
2294 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2295 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2296 	/* it contains auth and cipher key both*/
2297 	struct crypto_authenc_keys keys;
2298 	unsigned int bs;
2299 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
2300 	int err = 0, i, key_ctx_len = 0;
2301 	unsigned char ck_size = 0;
2302 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
2303 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
2304 	struct algo_param param;
2305 	int align;
2306 	u8 *o_ptr = NULL;
2307 
2308 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2309 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
2310 			      & CRYPTO_TFM_REQ_MASK);
2311 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2312 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
2313 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
2314 			      & CRYPTO_TFM_RES_MASK);
2315 	if (err)
2316 		goto out;
2317 
2318 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2319 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2320 		goto out;
2321 	}
2322 
2323 	if (get_alg_config(&param, max_authsize)) {
2324 		pr_err("chcr : Unsupported digest size\n");
2325 		goto out;
2326 	}
2327 	if (keys.enckeylen == AES_KEYSIZE_128) {
2328 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2329 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
2330 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2331 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
2332 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2333 	} else {
2334 		pr_err("chcr : Unsupported cipher key\n");
2335 		goto out;
2336 	}
2337 
2338 	/* Copy only encryption key. We use authkey to generate h(ipad) and
2339 	 * h(opad) so authkey is not needed again. authkeylen size have the
2340 	 * size of the hash digest size.
2341 	 */
2342 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2343 	aeadctx->enckey_len = keys.enckeylen;
2344 	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2345 			    aeadctx->enckey_len << 3);
2346 
2347 	base_hash  = chcr_alloc_shash(max_authsize);
2348 	if (IS_ERR(base_hash)) {
2349 		pr_err("chcr : Base driver cannot be loaded\n");
2350 		aeadctx->enckey_len = 0;
2351 		return -EINVAL;
2352 	}
2353 	{
2354 		SHASH_DESC_ON_STACK(shash, base_hash);
2355 		shash->tfm = base_hash;
2356 		shash->flags = crypto_shash_get_flags(base_hash);
2357 		bs = crypto_shash_blocksize(base_hash);
2358 		align = KEYCTX_ALIGN_PAD(max_authsize);
2359 		o_ptr =  actx->h_iopad + param.result_size + align;
2360 
2361 		if (keys.authkeylen > bs) {
2362 			err = crypto_shash_digest(shash, keys.authkey,
2363 						  keys.authkeylen,
2364 						  o_ptr);
2365 			if (err) {
2366 				pr_err("chcr : Base driver cannot be loaded\n");
2367 				goto out;
2368 			}
2369 			keys.authkeylen = max_authsize;
2370 		} else
2371 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
2372 
2373 		/* Compute the ipad-digest*/
2374 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2375 		memcpy(pad, o_ptr, keys.authkeylen);
2376 		for (i = 0; i < bs >> 2; i++)
2377 			*((unsigned int *)pad + i) ^= IPAD_DATA;
2378 
2379 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
2380 					      max_authsize))
2381 			goto out;
2382 		/* Compute the opad-digest */
2383 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2384 		memcpy(pad, o_ptr, keys.authkeylen);
2385 		for (i = 0; i < bs >> 2; i++)
2386 			*((unsigned int *)pad + i) ^= OPAD_DATA;
2387 
2388 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
2389 			goto out;
2390 
2391 		/* convert the ipad and opad digest to network order */
2392 		chcr_change_order(actx->h_iopad, param.result_size);
2393 		chcr_change_order(o_ptr, param.result_size);
2394 		key_ctx_len = sizeof(struct _key_ctx) +
2395 			((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
2396 			(param.result_size + align) * 2;
2397 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
2398 						0, 1, key_ctx_len >> 4);
2399 		actx->auth_mode = param.auth_mode;
2400 		chcr_free_shash(base_hash);
2401 
2402 		return 0;
2403 	}
2404 out:
2405 	aeadctx->enckey_len = 0;
2406 	if (!IS_ERR(base_hash))
2407 		chcr_free_shash(base_hash);
2408 	return -EINVAL;
2409 }
2410 
2411 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
2412 					const u8 *key, unsigned int keylen)
2413 {
2414 	struct chcr_context *ctx = crypto_aead_ctx(authenc);
2415 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2416 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2417 	struct crypto_authenc_keys keys;
2418 	int err;
2419 	/* it contains auth and cipher key both*/
2420 	int key_ctx_len = 0;
2421 	unsigned char ck_size = 0;
2422 
2423 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2424 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
2425 			      & CRYPTO_TFM_REQ_MASK);
2426 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2427 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
2428 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
2429 			      & CRYPTO_TFM_RES_MASK);
2430 	if (err)
2431 		goto out;
2432 
2433 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2434 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2435 		goto out;
2436 	}
2437 	if (keys.enckeylen == AES_KEYSIZE_128) {
2438 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2439 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
2440 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2441 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
2442 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2443 	} else {
2444 		pr_err("chcr : Unsupported cipher key\n");
2445 		goto out;
2446 	}
2447 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2448 	aeadctx->enckey_len = keys.enckeylen;
2449 	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2450 				    aeadctx->enckey_len << 3);
2451 	key_ctx_len =  sizeof(struct _key_ctx)
2452 		+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
2453 
2454 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
2455 						0, key_ctx_len >> 4);
2456 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
2457 	return 0;
2458 out:
2459 	aeadctx->enckey_len = 0;
2460 	return -EINVAL;
2461 }
2462 static int chcr_aead_encrypt(struct aead_request *req)
2463 {
2464 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2465 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2466 
2467 	reqctx->verify = VERIFY_HW;
2468 
2469 	switch (get_aead_subtype(tfm)) {
2470 	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
2471 	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
2472 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2473 				    create_authenc_wr);
2474 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
2475 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
2476 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2477 				    create_aead_ccm_wr);
2478 	default:
2479 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2480 				    create_gcm_wr);
2481 	}
2482 }
2483 
2484 static int chcr_aead_decrypt(struct aead_request *req)
2485 {
2486 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2487 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2488 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2489 	int size;
2490 
2491 	if (aeadctx->mayverify == VERIFY_SW) {
2492 		size = crypto_aead_maxauthsize(tfm);
2493 		reqctx->verify = VERIFY_SW;
2494 	} else {
2495 		size = 0;
2496 		reqctx->verify = VERIFY_HW;
2497 	}
2498 
2499 	switch (get_aead_subtype(tfm)) {
2500 	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
2501 	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
2502 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2503 				    create_authenc_wr);
2504 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
2505 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
2506 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2507 				    create_aead_ccm_wr);
2508 	default:
2509 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2510 				    create_gcm_wr);
2511 	}
2512 }
2513 
2514 static int chcr_aead_op(struct aead_request *req,
2515 			  unsigned short op_type,
2516 			  int size,
2517 			  create_wr_t create_wr_fn)
2518 {
2519 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2520 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2521 	struct uld_ctx *u_ctx;
2522 	struct sk_buff *skb;
2523 
2524 	if (!ctx->dev) {
2525 		pr_err("chcr : %s : No crypto device.\n", __func__);
2526 		return -ENXIO;
2527 	}
2528 	u_ctx = ULD_CTX(ctx);
2529 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2530 				   ctx->tx_qidx)) {
2531 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
2532 			return -EBUSY;
2533 	}
2534 
2535 	/* Form a WR from req */
2536 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
2537 			   op_type);
2538 
2539 	if (IS_ERR(skb) || !skb)
2540 		return PTR_ERR(skb);
2541 
2542 	skb->dev = u_ctx->lldi.ports[0];
2543 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
2544 	chcr_send_wr(skb);
2545 	return -EINPROGRESS;
2546 }
2547 static struct chcr_alg_template driver_algs[] = {
2548 	/* AES-CBC */
2549 	{
2550 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2551 		.is_registered = 0,
2552 		.alg.crypto = {
2553 			.cra_name		= "cbc(aes)",
2554 			.cra_driver_name	= "cbc-aes-chcr",
2555 			.cra_priority		= CHCR_CRA_PRIORITY,
2556 			.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
2557 				CRYPTO_ALG_ASYNC,
2558 			.cra_blocksize		= AES_BLOCK_SIZE,
2559 			.cra_ctxsize		= sizeof(struct chcr_context)
2560 				+ sizeof(struct ablk_ctx),
2561 			.cra_alignmask		= 0,
2562 			.cra_type		= &crypto_ablkcipher_type,
2563 			.cra_module		= THIS_MODULE,
2564 			.cra_init		= chcr_cra_init,
2565 			.cra_exit		= NULL,
2566 			.cra_u.ablkcipher	= {
2567 				.min_keysize	= AES_MIN_KEY_SIZE,
2568 				.max_keysize	= AES_MAX_KEY_SIZE,
2569 				.ivsize		= AES_BLOCK_SIZE,
2570 				.setkey			= chcr_aes_cbc_setkey,
2571 				.encrypt		= chcr_aes_encrypt,
2572 				.decrypt		= chcr_aes_decrypt,
2573 			}
2574 		}
2575 	},
2576 	{
2577 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2578 		.is_registered = 0,
2579 		.alg.crypto =   {
2580 			.cra_name		= "xts(aes)",
2581 			.cra_driver_name	= "xts-aes-chcr",
2582 			.cra_priority		= CHCR_CRA_PRIORITY,
2583 			.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
2584 				CRYPTO_ALG_ASYNC,
2585 			.cra_blocksize		= AES_BLOCK_SIZE,
2586 			.cra_ctxsize		= sizeof(struct chcr_context) +
2587 				sizeof(struct ablk_ctx),
2588 			.cra_alignmask		= 0,
2589 			.cra_type		= &crypto_ablkcipher_type,
2590 			.cra_module		= THIS_MODULE,
2591 			.cra_init		= chcr_cra_init,
2592 			.cra_exit		= NULL,
2593 			.cra_u = {
2594 				.ablkcipher = {
2595 					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
2596 					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
2597 					.ivsize		= AES_BLOCK_SIZE,
2598 					.setkey		= chcr_aes_xts_setkey,
2599 					.encrypt	= chcr_aes_encrypt,
2600 					.decrypt	= chcr_aes_decrypt,
2601 				}
2602 			}
2603 		}
2604 	},
2605 	/* SHA */
2606 	{
2607 		.type = CRYPTO_ALG_TYPE_AHASH,
2608 		.is_registered = 0,
2609 		.alg.hash = {
2610 			.halg.digestsize = SHA1_DIGEST_SIZE,
2611 			.halg.base = {
2612 				.cra_name = "sha1",
2613 				.cra_driver_name = "sha1-chcr",
2614 				.cra_blocksize = SHA1_BLOCK_SIZE,
2615 			}
2616 		}
2617 	},
2618 	{
2619 		.type = CRYPTO_ALG_TYPE_AHASH,
2620 		.is_registered = 0,
2621 		.alg.hash = {
2622 			.halg.digestsize = SHA256_DIGEST_SIZE,
2623 			.halg.base = {
2624 				.cra_name = "sha256",
2625 				.cra_driver_name = "sha256-chcr",
2626 				.cra_blocksize = SHA256_BLOCK_SIZE,
2627 			}
2628 		}
2629 	},
2630 	{
2631 		.type = CRYPTO_ALG_TYPE_AHASH,
2632 		.is_registered = 0,
2633 		.alg.hash = {
2634 			.halg.digestsize = SHA224_DIGEST_SIZE,
2635 			.halg.base = {
2636 				.cra_name = "sha224",
2637 				.cra_driver_name = "sha224-chcr",
2638 				.cra_blocksize = SHA224_BLOCK_SIZE,
2639 			}
2640 		}
2641 	},
2642 	{
2643 		.type = CRYPTO_ALG_TYPE_AHASH,
2644 		.is_registered = 0,
2645 		.alg.hash = {
2646 			.halg.digestsize = SHA384_DIGEST_SIZE,
2647 			.halg.base = {
2648 				.cra_name = "sha384",
2649 				.cra_driver_name = "sha384-chcr",
2650 				.cra_blocksize = SHA384_BLOCK_SIZE,
2651 			}
2652 		}
2653 	},
2654 	{
2655 		.type = CRYPTO_ALG_TYPE_AHASH,
2656 		.is_registered = 0,
2657 		.alg.hash = {
2658 			.halg.digestsize = SHA512_DIGEST_SIZE,
2659 			.halg.base = {
2660 				.cra_name = "sha512",
2661 				.cra_driver_name = "sha512-chcr",
2662 				.cra_blocksize = SHA512_BLOCK_SIZE,
2663 			}
2664 		}
2665 	},
2666 	/* HMAC */
2667 	{
2668 		.type = CRYPTO_ALG_TYPE_HMAC,
2669 		.is_registered = 0,
2670 		.alg.hash = {
2671 			.halg.digestsize = SHA1_DIGEST_SIZE,
2672 			.halg.base = {
2673 				.cra_name = "hmac(sha1)",
2674 				.cra_driver_name = "hmac-sha1-chcr",
2675 				.cra_blocksize = SHA1_BLOCK_SIZE,
2676 			}
2677 		}
2678 	},
2679 	{
2680 		.type = CRYPTO_ALG_TYPE_HMAC,
2681 		.is_registered = 0,
2682 		.alg.hash = {
2683 			.halg.digestsize = SHA224_DIGEST_SIZE,
2684 			.halg.base = {
2685 				.cra_name = "hmac(sha224)",
2686 				.cra_driver_name = "hmac-sha224-chcr",
2687 				.cra_blocksize = SHA224_BLOCK_SIZE,
2688 			}
2689 		}
2690 	},
2691 	{
2692 		.type = CRYPTO_ALG_TYPE_HMAC,
2693 		.is_registered = 0,
2694 		.alg.hash = {
2695 			.halg.digestsize = SHA256_DIGEST_SIZE,
2696 			.halg.base = {
2697 				.cra_name = "hmac(sha256)",
2698 				.cra_driver_name = "hmac-sha256-chcr",
2699 				.cra_blocksize = SHA256_BLOCK_SIZE,
2700 			}
2701 		}
2702 	},
2703 	{
2704 		.type = CRYPTO_ALG_TYPE_HMAC,
2705 		.is_registered = 0,
2706 		.alg.hash = {
2707 			.halg.digestsize = SHA384_DIGEST_SIZE,
2708 			.halg.base = {
2709 				.cra_name = "hmac(sha384)",
2710 				.cra_driver_name = "hmac-sha384-chcr",
2711 				.cra_blocksize = SHA384_BLOCK_SIZE,
2712 			}
2713 		}
2714 	},
2715 	{
2716 		.type = CRYPTO_ALG_TYPE_HMAC,
2717 		.is_registered = 0,
2718 		.alg.hash = {
2719 			.halg.digestsize = SHA512_DIGEST_SIZE,
2720 			.halg.base = {
2721 				.cra_name = "hmac(sha512)",
2722 				.cra_driver_name = "hmac-sha512-chcr",
2723 				.cra_blocksize = SHA512_BLOCK_SIZE,
2724 			}
2725 		}
2726 	},
2727 	/* Add AEAD Algorithms */
2728 	{
2729 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
2730 		.is_registered = 0,
2731 		.alg.aead = {
2732 			.base = {
2733 				.cra_name = "gcm(aes)",
2734 				.cra_driver_name = "gcm-aes-chcr",
2735 				.cra_blocksize	= 1,
2736 				.cra_priority = CHCR_AEAD_PRIORITY,
2737 				.cra_ctxsize =	sizeof(struct chcr_context) +
2738 						sizeof(struct chcr_aead_ctx) +
2739 						sizeof(struct chcr_gcm_ctx),
2740 			},
2741 			.ivsize = 12,
2742 			.maxauthsize = GHASH_DIGEST_SIZE,
2743 			.setkey = chcr_gcm_setkey,
2744 			.setauthsize = chcr_gcm_setauthsize,
2745 		}
2746 	},
2747 	{
2748 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
2749 		.is_registered = 0,
2750 		.alg.aead = {
2751 			.base = {
2752 				.cra_name = "rfc4106(gcm(aes))",
2753 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
2754 				.cra_blocksize	 = 1,
2755 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
2756 				.cra_ctxsize =	sizeof(struct chcr_context) +
2757 						sizeof(struct chcr_aead_ctx) +
2758 						sizeof(struct chcr_gcm_ctx),
2759 
2760 			},
2761 			.ivsize = 8,
2762 			.maxauthsize	= GHASH_DIGEST_SIZE,
2763 			.setkey = chcr_gcm_setkey,
2764 			.setauthsize	= chcr_4106_4309_setauthsize,
2765 		}
2766 	},
2767 	{
2768 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
2769 		.is_registered = 0,
2770 		.alg.aead = {
2771 			.base = {
2772 				.cra_name = "ccm(aes)",
2773 				.cra_driver_name = "ccm-aes-chcr",
2774 				.cra_blocksize	 = 1,
2775 				.cra_priority = CHCR_AEAD_PRIORITY,
2776 				.cra_ctxsize =	sizeof(struct chcr_context) +
2777 						sizeof(struct chcr_aead_ctx),
2778 
2779 			},
2780 			.ivsize = AES_BLOCK_SIZE,
2781 			.maxauthsize	= GHASH_DIGEST_SIZE,
2782 			.setkey = chcr_aead_ccm_setkey,
2783 			.setauthsize	= chcr_ccm_setauthsize,
2784 		}
2785 	},
2786 	{
2787 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
2788 		.is_registered = 0,
2789 		.alg.aead = {
2790 			.base = {
2791 				.cra_name = "rfc4309(ccm(aes))",
2792 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
2793 				.cra_blocksize	 = 1,
2794 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
2795 				.cra_ctxsize =	sizeof(struct chcr_context) +
2796 						sizeof(struct chcr_aead_ctx),
2797 
2798 			},
2799 			.ivsize = 8,
2800 			.maxauthsize	= GHASH_DIGEST_SIZE,
2801 			.setkey = chcr_aead_rfc4309_setkey,
2802 			.setauthsize = chcr_4106_4309_setauthsize,
2803 		}
2804 	},
2805 	{
2806 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2807 		.is_registered = 0,
2808 		.alg.aead = {
2809 			.base = {
2810 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2811 				.cra_driver_name =
2812 					"authenc-hmac-sha1-cbc-aes-chcr",
2813 				.cra_blocksize	 = AES_BLOCK_SIZE,
2814 				.cra_priority = CHCR_AEAD_PRIORITY,
2815 				.cra_ctxsize =	sizeof(struct chcr_context) +
2816 						sizeof(struct chcr_aead_ctx) +
2817 						sizeof(struct chcr_authenc_ctx),
2818 
2819 			},
2820 			.ivsize = AES_BLOCK_SIZE,
2821 			.maxauthsize = SHA1_DIGEST_SIZE,
2822 			.setkey = chcr_authenc_setkey,
2823 			.setauthsize = chcr_authenc_setauthsize,
2824 		}
2825 	},
2826 	{
2827 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2828 		.is_registered = 0,
2829 		.alg.aead = {
2830 			.base = {
2831 
2832 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2833 				.cra_driver_name =
2834 					"authenc-hmac-sha256-cbc-aes-chcr",
2835 				.cra_blocksize	 = AES_BLOCK_SIZE,
2836 				.cra_priority = CHCR_AEAD_PRIORITY,
2837 				.cra_ctxsize =	sizeof(struct chcr_context) +
2838 						sizeof(struct chcr_aead_ctx) +
2839 						sizeof(struct chcr_authenc_ctx),
2840 
2841 			},
2842 			.ivsize = AES_BLOCK_SIZE,
2843 			.maxauthsize	= SHA256_DIGEST_SIZE,
2844 			.setkey = chcr_authenc_setkey,
2845 			.setauthsize = chcr_authenc_setauthsize,
2846 		}
2847 	},
2848 	{
2849 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2850 		.is_registered = 0,
2851 		.alg.aead = {
2852 			.base = {
2853 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2854 				.cra_driver_name =
2855 					"authenc-hmac-sha224-cbc-aes-chcr",
2856 				.cra_blocksize	 = AES_BLOCK_SIZE,
2857 				.cra_priority = CHCR_AEAD_PRIORITY,
2858 				.cra_ctxsize =	sizeof(struct chcr_context) +
2859 						sizeof(struct chcr_aead_ctx) +
2860 						sizeof(struct chcr_authenc_ctx),
2861 			},
2862 			.ivsize = AES_BLOCK_SIZE,
2863 			.maxauthsize = SHA224_DIGEST_SIZE,
2864 			.setkey = chcr_authenc_setkey,
2865 			.setauthsize = chcr_authenc_setauthsize,
2866 		}
2867 	},
2868 	{
2869 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2870 		.is_registered = 0,
2871 		.alg.aead = {
2872 			.base = {
2873 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2874 				.cra_driver_name =
2875 					"authenc-hmac-sha384-cbc-aes-chcr",
2876 				.cra_blocksize	 = AES_BLOCK_SIZE,
2877 				.cra_priority = CHCR_AEAD_PRIORITY,
2878 				.cra_ctxsize =	sizeof(struct chcr_context) +
2879 						sizeof(struct chcr_aead_ctx) +
2880 						sizeof(struct chcr_authenc_ctx),
2881 
2882 			},
2883 			.ivsize = AES_BLOCK_SIZE,
2884 			.maxauthsize = SHA384_DIGEST_SIZE,
2885 			.setkey = chcr_authenc_setkey,
2886 			.setauthsize = chcr_authenc_setauthsize,
2887 		}
2888 	},
2889 	{
2890 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2891 		.is_registered = 0,
2892 		.alg.aead = {
2893 			.base = {
2894 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2895 				.cra_driver_name =
2896 					"authenc-hmac-sha512-cbc-aes-chcr",
2897 				.cra_blocksize	 = AES_BLOCK_SIZE,
2898 				.cra_priority = CHCR_AEAD_PRIORITY,
2899 				.cra_ctxsize =	sizeof(struct chcr_context) +
2900 						sizeof(struct chcr_aead_ctx) +
2901 						sizeof(struct chcr_authenc_ctx),
2902 
2903 			},
2904 			.ivsize = AES_BLOCK_SIZE,
2905 			.maxauthsize = SHA512_DIGEST_SIZE,
2906 			.setkey = chcr_authenc_setkey,
2907 			.setauthsize = chcr_authenc_setauthsize,
2908 		}
2909 	},
2910 	{
2911 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
2912 		.is_registered = 0,
2913 		.alg.aead = {
2914 			.base = {
2915 				.cra_name = "authenc(digest_null,cbc(aes))",
2916 				.cra_driver_name =
2917 					"authenc-digest_null-cbc-aes-chcr",
2918 				.cra_blocksize	 = AES_BLOCK_SIZE,
2919 				.cra_priority = CHCR_AEAD_PRIORITY,
2920 				.cra_ctxsize =	sizeof(struct chcr_context) +
2921 						sizeof(struct chcr_aead_ctx) +
2922 						sizeof(struct chcr_authenc_ctx),
2923 
2924 			},
2925 			.ivsize  = AES_BLOCK_SIZE,
2926 			.maxauthsize = 0,
2927 			.setkey  = chcr_aead_digest_null_setkey,
2928 			.setauthsize = chcr_authenc_null_setauthsize,
2929 		}
2930 	},
2931 };
2932 
2933 /*
2934  *	chcr_unregister_alg - Deregister crypto algorithms with
2935  *	kernel framework.
2936  */
2937 static int chcr_unregister_alg(void)
2938 {
2939 	int i;
2940 
2941 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2942 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
2943 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2944 			if (driver_algs[i].is_registered)
2945 				crypto_unregister_alg(
2946 						&driver_algs[i].alg.crypto);
2947 			break;
2948 		case CRYPTO_ALG_TYPE_AEAD:
2949 			if (driver_algs[i].is_registered)
2950 				crypto_unregister_aead(
2951 						&driver_algs[i].alg.aead);
2952 			break;
2953 		case CRYPTO_ALG_TYPE_AHASH:
2954 			if (driver_algs[i].is_registered)
2955 				crypto_unregister_ahash(
2956 						&driver_algs[i].alg.hash);
2957 			break;
2958 		}
2959 		driver_algs[i].is_registered = 0;
2960 	}
2961 	return 0;
2962 }
2963 
2964 #define SZ_AHASH_CTX sizeof(struct chcr_context)
2965 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
2966 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
2967 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
2968 
2969 /*
2970  *	chcr_register_alg - Register crypto algorithms with kernel framework.
2971  */
2972 static int chcr_register_alg(void)
2973 {
2974 	struct crypto_alg ai;
2975 	struct ahash_alg *a_hash;
2976 	int err = 0, i;
2977 	char *name = NULL;
2978 
2979 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2980 		if (driver_algs[i].is_registered)
2981 			continue;
2982 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
2983 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2984 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
2985 			name = driver_algs[i].alg.crypto.cra_driver_name;
2986 			break;
2987 		case CRYPTO_ALG_TYPE_AEAD:
2988 			driver_algs[i].alg.aead.base.cra_flags =
2989 				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
2990 				CRYPTO_ALG_NEED_FALLBACK;
2991 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
2992 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
2993 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
2994 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
2995 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
2996 			err = crypto_register_aead(&driver_algs[i].alg.aead);
2997 			name = driver_algs[i].alg.aead.base.cra_driver_name;
2998 			break;
2999 		case CRYPTO_ALG_TYPE_AHASH:
3000 			a_hash = &driver_algs[i].alg.hash;
3001 			a_hash->update = chcr_ahash_update;
3002 			a_hash->final = chcr_ahash_final;
3003 			a_hash->finup = chcr_ahash_finup;
3004 			a_hash->digest = chcr_ahash_digest;
3005 			a_hash->export = chcr_ahash_export;
3006 			a_hash->import = chcr_ahash_import;
3007 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
3008 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
3009 			a_hash->halg.base.cra_module = THIS_MODULE;
3010 			a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
3011 			a_hash->halg.base.cra_alignmask = 0;
3012 			a_hash->halg.base.cra_exit = NULL;
3013 			a_hash->halg.base.cra_type = &crypto_ahash_type;
3014 
3015 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
3016 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
3017 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
3018 				a_hash->init = chcr_hmac_init;
3019 				a_hash->setkey = chcr_ahash_setkey;
3020 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
3021 			} else {
3022 				a_hash->init = chcr_sha_init;
3023 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
3024 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
3025 			}
3026 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
3027 			ai = driver_algs[i].alg.hash.halg.base;
3028 			name = ai.cra_driver_name;
3029 			break;
3030 		}
3031 		if (err) {
3032 			pr_err("chcr : %s : Algorithm registration failed\n",
3033 			       name);
3034 			goto register_err;
3035 		} else {
3036 			driver_algs[i].is_registered = 1;
3037 		}
3038 	}
3039 	return 0;
3040 
3041 register_err:
3042 	chcr_unregister_alg();
3043 	return err;
3044 }
3045 
3046 /*
3047  *	start_crypto - Register the crypto algorithms.
3048  *	This should called once when the first device comesup. After this
3049  *	kernel will start calling driver APIs for crypto operations.
3050  */
3051 int start_crypto(void)
3052 {
3053 	return chcr_register_alg();
3054 }
3055 
3056 /*
3057  *	stop_crypto - Deregister all the crypto algorithms with kernel.
3058  *	This should be called once when the last device goes down. After this
3059  *	kernel will not call the driver API for crypto operations.
3060  */
3061 int stop_crypto(void)
3062 {
3063 	chcr_unregister_alg();
3064 	return 0;
3065 }
3066