xref: /linux/drivers/crypto/chelsio/chcr_algo.c (revision c1aac62f36c1e37ee81c9e09ee9ee733eef05dcb)
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/internal/aead.h>
59 #include <crypto/null.h>
60 #include <crypto/internal/skcipher.h>
61 #include <crypto/aead.h>
62 #include <crypto/scatterwalk.h>
63 #include <crypto/internal/hash.h>
64 
65 #include "t4fw_api.h"
66 #include "t4_msg.h"
67 #include "chcr_core.h"
68 #include "chcr_algo.h"
69 #include "chcr_crypto.h"
70 
71 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
72 {
73 	return ctx->crypto_ctx->aeadctx;
74 }
75 
76 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
77 {
78 	return ctx->crypto_ctx->ablkctx;
79 }
80 
81 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
82 {
83 	return ctx->crypto_ctx->hmacctx;
84 }
85 
86 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
87 {
88 	return gctx->ctx->gcm;
89 }
90 
91 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
92 {
93 	return gctx->ctx->authenc;
94 }
95 
96 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
97 {
98 	return ctx->dev->u_ctx;
99 }
100 
101 static inline int is_ofld_imm(const struct sk_buff *skb)
102 {
103 	return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
104 }
105 
106 /*
107  *	sgl_len - calculates the size of an SGL of the given capacity
108  *	@n: the number of SGL entries
109  *	Calculates the number of flits needed for a scatter/gather list that
110  *	can hold the given number of entries.
111  */
112 static inline unsigned int sgl_len(unsigned int n)
113 {
114 	n--;
115 	return (3 * n) / 2 + (n & 1) + 2;
116 }
117 
118 static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
119 {
120 	u8 temp[SHA512_DIGEST_SIZE];
121 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
122 	int authsize = crypto_aead_authsize(tfm);
123 	struct cpl_fw6_pld *fw6_pld;
124 	int cmp = 0;
125 
126 	fw6_pld = (struct cpl_fw6_pld *)input;
127 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
128 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
129 		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
130 	} else {
131 
132 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
133 				authsize, req->assoclen +
134 				req->cryptlen - authsize);
135 		cmp = memcmp(temp, (fw6_pld + 1), authsize);
136 	}
137 	if (cmp)
138 		*err = -EBADMSG;
139 	else
140 		*err = 0;
141 }
142 
143 /*
144  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
145  *	@req: crypto request
146  */
147 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
148 			 int err)
149 {
150 	struct crypto_tfm *tfm = req->tfm;
151 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
152 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
153 	struct chcr_req_ctx ctx_req;
154 	struct cpl_fw6_pld *fw6_pld;
155 	unsigned int digestsize, updated_digestsize;
156 
157 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
158 	case CRYPTO_ALG_TYPE_AEAD:
159 		ctx_req.req.aead_req = (struct aead_request *)req;
160 		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
161 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
162 			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
163 		if (ctx_req.ctx.reqctx->skb) {
164 			kfree_skb(ctx_req.ctx.reqctx->skb);
165 			ctx_req.ctx.reqctx->skb = NULL;
166 		}
167 		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
168 			chcr_verify_tag(ctx_req.req.aead_req, input,
169 					&err);
170 			ctx_req.ctx.reqctx->verify = VERIFY_HW;
171 		}
172 		break;
173 
174 	case CRYPTO_ALG_TYPE_BLKCIPHER:
175 		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
176 		ctx_req.ctx.ablk_ctx =
177 			ablkcipher_request_ctx(ctx_req.req.ablk_req);
178 		if (!err) {
179 			fw6_pld = (struct cpl_fw6_pld *)input;
180 			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
181 			       AES_BLOCK_SIZE);
182 		}
183 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
184 			     ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
185 		if (ctx_req.ctx.ablk_ctx->skb) {
186 			kfree_skb(ctx_req.ctx.ablk_ctx->skb);
187 			ctx_req.ctx.ablk_ctx->skb = NULL;
188 		}
189 		break;
190 
191 	case CRYPTO_ALG_TYPE_AHASH:
192 		ctx_req.req.ahash_req = (struct ahash_request *)req;
193 		ctx_req.ctx.ahash_ctx =
194 			ahash_request_ctx(ctx_req.req.ahash_req);
195 		digestsize =
196 			crypto_ahash_digestsize(crypto_ahash_reqtfm(
197 							ctx_req.req.ahash_req));
198 		updated_digestsize = digestsize;
199 		if (digestsize == SHA224_DIGEST_SIZE)
200 			updated_digestsize = SHA256_DIGEST_SIZE;
201 		else if (digestsize == SHA384_DIGEST_SIZE)
202 			updated_digestsize = SHA512_DIGEST_SIZE;
203 		if (ctx_req.ctx.ahash_ctx->skb) {
204 			kfree_skb(ctx_req.ctx.ahash_ctx->skb);
205 			ctx_req.ctx.ahash_ctx->skb = NULL;
206 		}
207 		if (ctx_req.ctx.ahash_ctx->result == 1) {
208 			ctx_req.ctx.ahash_ctx->result = 0;
209 			memcpy(ctx_req.req.ahash_req->result, input +
210 			       sizeof(struct cpl_fw6_pld),
211 			       digestsize);
212 		} else {
213 			memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
214 			       sizeof(struct cpl_fw6_pld),
215 			       updated_digestsize);
216 		}
217 		break;
218 	}
219 	return err;
220 }
221 
222 /*
223  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
224  *	@skb: the packet
225  *	Returns the number of flits needed for the given offload packet.
226  *	These packets are already fully constructed and no additional headers
227  *	will be added.
228  */
229 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
230 {
231 	unsigned int flits, cnt;
232 
233 	if (is_ofld_imm(skb))
234 		return DIV_ROUND_UP(skb->len, 8);
235 
236 	flits = skb_transport_offset(skb) / 8;   /* headers */
237 	cnt = skb_shinfo(skb)->nr_frags;
238 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
239 		cnt++;
240 	return flits + sgl_len(cnt);
241 }
242 
243 static inline void get_aes_decrypt_key(unsigned char *dec_key,
244 				       const unsigned char *key,
245 				       unsigned int keylength)
246 {
247 	u32 temp;
248 	u32 w_ring[MAX_NK];
249 	int i, j, k;
250 	u8  nr, nk;
251 
252 	switch (keylength) {
253 	case AES_KEYLENGTH_128BIT:
254 		nk = KEYLENGTH_4BYTES;
255 		nr = NUMBER_OF_ROUNDS_10;
256 		break;
257 	case AES_KEYLENGTH_192BIT:
258 		nk = KEYLENGTH_6BYTES;
259 		nr = NUMBER_OF_ROUNDS_12;
260 		break;
261 	case AES_KEYLENGTH_256BIT:
262 		nk = KEYLENGTH_8BYTES;
263 		nr = NUMBER_OF_ROUNDS_14;
264 		break;
265 	default:
266 		return;
267 	}
268 	for (i = 0; i < nk; i++)
269 		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
270 
271 	i = 0;
272 	temp = w_ring[nk - 1];
273 	while (i + nk < (nr + 1) * 4) {
274 		if (!(i % nk)) {
275 			/* RotWord(temp) */
276 			temp = (temp << 8) | (temp >> 24);
277 			temp = aes_ks_subword(temp);
278 			temp ^= round_constant[i / nk];
279 		} else if (nk == 8 && (i % 4 == 0)) {
280 			temp = aes_ks_subword(temp);
281 		}
282 		w_ring[i % nk] ^= temp;
283 		temp = w_ring[i % nk];
284 		i++;
285 	}
286 	i--;
287 	for (k = 0, j = i % nk; k < nk; k++) {
288 		*((u32 *)dec_key + k) = htonl(w_ring[j]);
289 		j--;
290 		if (j < 0)
291 			j += nk;
292 	}
293 }
294 
295 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
296 {
297 	struct crypto_shash *base_hash = NULL;
298 
299 	switch (ds) {
300 	case SHA1_DIGEST_SIZE:
301 		base_hash = crypto_alloc_shash("sha1", 0, 0);
302 		break;
303 	case SHA224_DIGEST_SIZE:
304 		base_hash = crypto_alloc_shash("sha224", 0, 0);
305 		break;
306 	case SHA256_DIGEST_SIZE:
307 		base_hash = crypto_alloc_shash("sha256", 0, 0);
308 		break;
309 	case SHA384_DIGEST_SIZE:
310 		base_hash = crypto_alloc_shash("sha384", 0, 0);
311 		break;
312 	case SHA512_DIGEST_SIZE:
313 		base_hash = crypto_alloc_shash("sha512", 0, 0);
314 		break;
315 	}
316 
317 	return base_hash;
318 }
319 
320 static int chcr_compute_partial_hash(struct shash_desc *desc,
321 				     char *iopad, char *result_hash,
322 				     int digest_size)
323 {
324 	struct sha1_state sha1_st;
325 	struct sha256_state sha256_st;
326 	struct sha512_state sha512_st;
327 	int error;
328 
329 	if (digest_size == SHA1_DIGEST_SIZE) {
330 		error = crypto_shash_init(desc) ?:
331 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
332 			crypto_shash_export(desc, (void *)&sha1_st);
333 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
334 	} else if (digest_size == SHA224_DIGEST_SIZE) {
335 		error = crypto_shash_init(desc) ?:
336 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
337 			crypto_shash_export(desc, (void *)&sha256_st);
338 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
339 
340 	} else if (digest_size == SHA256_DIGEST_SIZE) {
341 		error = crypto_shash_init(desc) ?:
342 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
343 			crypto_shash_export(desc, (void *)&sha256_st);
344 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
345 
346 	} else if (digest_size == SHA384_DIGEST_SIZE) {
347 		error = crypto_shash_init(desc) ?:
348 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
349 			crypto_shash_export(desc, (void *)&sha512_st);
350 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
351 
352 	} else if (digest_size == SHA512_DIGEST_SIZE) {
353 		error = crypto_shash_init(desc) ?:
354 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
355 			crypto_shash_export(desc, (void *)&sha512_st);
356 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
357 	} else {
358 		error = -EINVAL;
359 		pr_err("Unknown digest size %d\n", digest_size);
360 	}
361 	return error;
362 }
363 
364 static void chcr_change_order(char *buf, int ds)
365 {
366 	int i;
367 
368 	if (ds == SHA512_DIGEST_SIZE) {
369 		for (i = 0; i < (ds / sizeof(u64)); i++)
370 			*((__be64 *)buf + i) =
371 				cpu_to_be64(*((u64 *)buf + i));
372 	} else {
373 		for (i = 0; i < (ds / sizeof(u32)); i++)
374 			*((__be32 *)buf + i) =
375 				cpu_to_be32(*((u32 *)buf + i));
376 	}
377 }
378 
379 static inline int is_hmac(struct crypto_tfm *tfm)
380 {
381 	struct crypto_alg *alg = tfm->__crt_alg;
382 	struct chcr_alg_template *chcr_crypto_alg =
383 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
384 			     alg.hash);
385 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
386 		return 1;
387 	return 0;
388 }
389 
390 static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
391 			   struct scatterlist *sg,
392 			   struct phys_sge_parm *sg_param)
393 {
394 	struct phys_sge_pairs *to;
395 	int out_buf_size = sg_param->obsize;
396 	unsigned int nents = sg_param->nents, i, j = 0;
397 
398 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
399 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
400 	phys_cpl->pcirlxorder_to_noofsgentr =
401 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
402 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
403 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
404 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
405 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
406 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
407 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
408 	phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
409 	phys_cpl->rss_hdr_int.hash_val = 0;
410 	to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
411 				       sizeof(struct cpl_rx_phys_dsgl));
412 
413 	for (i = 0; nents; to++) {
414 		for (j = 0; j < 8 && nents; j++, nents--) {
415 			out_buf_size -= sg_dma_len(sg);
416 			to->len[j] = htons(sg_dma_len(sg));
417 			to->addr[j] = cpu_to_be64(sg_dma_address(sg));
418 			sg = sg_next(sg);
419 		}
420 	}
421 	if (out_buf_size) {
422 		j--;
423 		to--;
424 		to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
425 	}
426 }
427 
428 static inline int map_writesg_phys_cpl(struct device *dev,
429 					struct cpl_rx_phys_dsgl *phys_cpl,
430 					struct scatterlist *sg,
431 					struct phys_sge_parm *sg_param)
432 {
433 	if (!sg || !sg_param->nents)
434 		return 0;
435 
436 	sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
437 	if (sg_param->nents == 0) {
438 		pr_err("CHCR : DMA mapping failed\n");
439 		return -EINVAL;
440 	}
441 	write_phys_cpl(phys_cpl, sg, sg_param);
442 	return 0;
443 }
444 
445 static inline int get_aead_subtype(struct crypto_aead *aead)
446 {
447 	struct aead_alg *alg = crypto_aead_alg(aead);
448 	struct chcr_alg_template *chcr_crypto_alg =
449 		container_of(alg, struct chcr_alg_template, alg.aead);
450 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
451 }
452 
453 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
454 {
455 	struct crypto_alg *alg = tfm->__crt_alg;
456 	struct chcr_alg_template *chcr_crypto_alg =
457 		container_of(alg, struct chcr_alg_template, alg.crypto);
458 
459 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
460 }
461 
462 static inline void write_buffer_to_skb(struct sk_buff *skb,
463 					unsigned int *frags,
464 					char *bfr,
465 					u8 bfr_len)
466 {
467 	skb->len += bfr_len;
468 	skb->data_len += bfr_len;
469 	skb->truesize += bfr_len;
470 	get_page(virt_to_page(bfr));
471 	skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
472 			   offset_in_page(bfr), bfr_len);
473 	(*frags)++;
474 }
475 
476 
477 static inline void
478 write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
479 			struct scatterlist *sg, unsigned int count)
480 {
481 	struct page *spage;
482 	unsigned int page_len;
483 
484 	skb->len += count;
485 	skb->data_len += count;
486 	skb->truesize += count;
487 
488 	while (count > 0) {
489 		if (!sg || (!(sg->length)))
490 			break;
491 		spage = sg_page(sg);
492 		get_page(spage);
493 		page_len = min(sg->length, count);
494 		skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
495 		(*frags)++;
496 		count -= page_len;
497 		sg = sg_next(sg);
498 	}
499 }
500 
501 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
502 			       struct _key_ctx *key_ctx)
503 {
504 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
505 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
506 	} else {
507 		memcpy(key_ctx->key,
508 		       ablkctx->key + (ablkctx->enckey_len >> 1),
509 		       ablkctx->enckey_len >> 1);
510 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
511 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
512 	}
513 	return 0;
514 }
515 
516 static inline void create_wreq(struct chcr_context *ctx,
517 			       struct chcr_wr *chcr_req,
518 			       void *req, struct sk_buff *skb,
519 			       int kctx_len, int hash_sz,
520 			       int is_iv,
521 			       unsigned int sc_len)
522 {
523 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
524 	int iv_loc = IV_DSGL;
525 	int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
526 	unsigned int immdatalen = 0, nr_frags = 0;
527 
528 	if (is_ofld_imm(skb)) {
529 		immdatalen = skb->data_len;
530 		iv_loc = IV_IMMEDIATE;
531 	} else {
532 		nr_frags = skb_shinfo(skb)->nr_frags;
533 	}
534 
535 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
536 				((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
537 	chcr_req->wreq.pld_size_hash_size =
538 		htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
539 		      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
540 	chcr_req->wreq.len16_pkd =
541 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
542 				    (calc_tx_flits_ofld(skb) * 8), 16)));
543 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
544 	chcr_req->wreq.rx_chid_to_rx_q_id =
545 		FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
546 				is_iv ? iv_loc : IV_NOP);
547 
548 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
549 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
550 					16) - ((sizeof(chcr_req->wreq)) >> 4)));
551 
552 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
553 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
554 				   sizeof(chcr_req->key_ctx) +
555 				   kctx_len + sc_len + immdatalen);
556 }
557 
558 /**
559  *	create_cipher_wr - form the WR for cipher operations
560  *	@req: cipher req.
561  *	@ctx: crypto driver context of the request.
562  *	@qid: ingress qid where response of this WR should be received.
563  *	@op_type:	encryption or decryption
564  */
565 static struct sk_buff
566 *create_cipher_wr(struct ablkcipher_request *req,
567 		  unsigned short qid,
568 		  unsigned short op_type)
569 {
570 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
571 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
572 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
573 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
574 	struct sk_buff *skb = NULL;
575 	struct chcr_wr *chcr_req;
576 	struct cpl_rx_phys_dsgl *phys_cpl;
577 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
578 	struct phys_sge_parm sg_param;
579 	unsigned int frags = 0, transhdr_len, phys_dsgl;
580 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
581 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
582 			GFP_ATOMIC;
583 
584 	if (!req->info)
585 		return ERR_PTR(-EINVAL);
586 	reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
587 	if (reqctx->dst_nents <= 0) {
588 		pr_err("AES:Invalid Destination sg lists\n");
589 		return ERR_PTR(-EINVAL);
590 	}
591 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
592 	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
593 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
594 		       ablkctx->enckey_len, req->nbytes, ivsize);
595 		return ERR_PTR(-EINVAL);
596 	}
597 
598 	phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
599 
600 	kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
601 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
602 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
603 	if (!skb)
604 		return ERR_PTR(-ENOMEM);
605 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
606 	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
607 	memset(chcr_req, 0, transhdr_len);
608 	chcr_req->sec_cpl.op_ivinsrtofst =
609 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
610 
611 	chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
612 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
613 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
614 
615 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
616 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
617 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
618 							 ablkctx->ciph_mode,
619 							 0, 0, ivsize >> 1);
620 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
621 							  0, 1, phys_dsgl);
622 
623 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
624 	if (op_type == CHCR_DECRYPT_OP) {
625 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
626 	} else {
627 		if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
628 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
629 			       ablkctx->enckey_len);
630 		} else {
631 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
632 			       (ablkctx->enckey_len >> 1),
633 			       ablkctx->enckey_len >> 1);
634 			memcpy(chcr_req->key_ctx.key +
635 			       (ablkctx->enckey_len >> 1),
636 			       ablkctx->key,
637 			       ablkctx->enckey_len >> 1);
638 		}
639 	}
640 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
641 	sg_param.nents = reqctx->dst_nents;
642 	sg_param.obsize = req->nbytes;
643 	sg_param.qid = qid;
644 	sg_param.align = 1;
645 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
646 				 &sg_param))
647 		goto map_fail1;
648 
649 	skb_set_transport_header(skb, transhdr_len);
650 	memcpy(reqctx->iv, req->info, ivsize);
651 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
652 	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
653 	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
654 			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
655 	reqctx->skb = skb;
656 	skb_get(skb);
657 	return skb;
658 map_fail1:
659 	kfree_skb(skb);
660 	return ERR_PTR(-ENOMEM);
661 }
662 
663 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
664 			       unsigned int keylen)
665 {
666 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
667 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
668 	unsigned int ck_size, context_size;
669 	u16 alignment = 0;
670 
671 	if (keylen == AES_KEYSIZE_128) {
672 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
673 	} else if (keylen == AES_KEYSIZE_192) {
674 		alignment = 8;
675 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
676 	} else if (keylen == AES_KEYSIZE_256) {
677 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
678 	} else {
679 		goto badkey_err;
680 	}
681 	memcpy(ablkctx->key, key, keylen);
682 	ablkctx->enckey_len = keylen;
683 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
684 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
685 			keylen + alignment) >> 4;
686 
687 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
688 						0, 0, context_size);
689 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
690 	return 0;
691 badkey_err:
692 	crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
693 	ablkctx->enckey_len = 0;
694 	return -EINVAL;
695 }
696 
697 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
698 {
699 	struct adapter *adap = netdev2adap(dev);
700 	struct sge_uld_txq_info *txq_info =
701 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
702 	struct sge_uld_txq *txq;
703 	int ret = 0;
704 
705 	local_bh_disable();
706 	txq = &txq_info->uldtxq[idx];
707 	spin_lock(&txq->sendq.lock);
708 	if (txq->full)
709 		ret = -1;
710 	spin_unlock(&txq->sendq.lock);
711 	local_bh_enable();
712 	return ret;
713 }
714 
715 static int chcr_aes_encrypt(struct ablkcipher_request *req)
716 {
717 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
718 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
719 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
720 	struct sk_buff *skb;
721 
722 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
723 					    ctx->tx_channel_id))) {
724 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
725 			return -EBUSY;
726 	}
727 
728 	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
729 			       CHCR_ENCRYPT_OP);
730 	if (IS_ERR(skb)) {
731 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
732 		return  PTR_ERR(skb);
733 	}
734 	skb->dev = u_ctx->lldi.ports[0];
735 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
736 	chcr_send_wr(skb);
737 	return -EINPROGRESS;
738 }
739 
740 static int chcr_aes_decrypt(struct ablkcipher_request *req)
741 {
742 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
743 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
744 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
745 	struct sk_buff *skb;
746 
747 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
748 					    ctx->tx_channel_id))) {
749 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
750 			return -EBUSY;
751 	}
752 
753 	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
754 			       CHCR_DECRYPT_OP);
755 	if (IS_ERR(skb)) {
756 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
757 		return PTR_ERR(skb);
758 	}
759 	skb->dev = u_ctx->lldi.ports[0];
760 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
761 	chcr_send_wr(skb);
762 	return -EINPROGRESS;
763 }
764 
765 static int chcr_device_init(struct chcr_context *ctx)
766 {
767 	struct uld_ctx *u_ctx;
768 	unsigned int id;
769 	int err = 0, rxq_perchan, rxq_idx;
770 
771 	id = smp_processor_id();
772 	if (!ctx->dev) {
773 		err = assign_chcr_device(&ctx->dev);
774 		if (err) {
775 			pr_err("chcr device assignment fails\n");
776 			goto out;
777 		}
778 		u_ctx = ULD_CTX(ctx);
779 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
780 		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
781 		rxq_idx += id % rxq_perchan;
782 		spin_lock(&ctx->dev->lock_chcr_dev);
783 		ctx->tx_channel_id = rxq_idx;
784 		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
785 		spin_unlock(&ctx->dev->lock_chcr_dev);
786 	}
787 out:
788 	return err;
789 }
790 
791 static int chcr_cra_init(struct crypto_tfm *tfm)
792 {
793 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
794 	return chcr_device_init(crypto_tfm_ctx(tfm));
795 }
796 
797 static int get_alg_config(struct algo_param *params,
798 			  unsigned int auth_size)
799 {
800 	switch (auth_size) {
801 	case SHA1_DIGEST_SIZE:
802 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
803 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
804 		params->result_size = SHA1_DIGEST_SIZE;
805 		break;
806 	case SHA224_DIGEST_SIZE:
807 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
808 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
809 		params->result_size = SHA256_DIGEST_SIZE;
810 		break;
811 	case SHA256_DIGEST_SIZE:
812 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
813 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
814 		params->result_size = SHA256_DIGEST_SIZE;
815 		break;
816 	case SHA384_DIGEST_SIZE:
817 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
818 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
819 		params->result_size = SHA512_DIGEST_SIZE;
820 		break;
821 	case SHA512_DIGEST_SIZE:
822 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
823 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
824 		params->result_size = SHA512_DIGEST_SIZE;
825 		break;
826 	default:
827 		pr_err("chcr : ERROR, unsupported digest size\n");
828 		return -EINVAL;
829 	}
830 	return 0;
831 }
832 
833 static inline void chcr_free_shash(struct crypto_shash *base_hash)
834 {
835 		crypto_free_shash(base_hash);
836 }
837 
838 /**
839  *	create_hash_wr - Create hash work request
840  *	@req - Cipher req base
841  */
842 static struct sk_buff *create_hash_wr(struct ahash_request *req,
843 				      struct hash_wr_param *param)
844 {
845 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
846 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
847 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
848 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
849 	struct sk_buff *skb = NULL;
850 	struct chcr_wr *chcr_req;
851 	unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
852 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
853 	unsigned int kctx_len = 0;
854 	u8 hash_size_in_response = 0;
855 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
856 		GFP_ATOMIC;
857 
858 	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
859 	kctx_len = param->alg_prm.result_size + iopad_alignment;
860 	if (param->opad_needed)
861 		kctx_len += param->alg_prm.result_size + iopad_alignment;
862 
863 	if (req_ctx->result)
864 		hash_size_in_response = digestsize;
865 	else
866 		hash_size_in_response = param->alg_prm.result_size;
867 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
868 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
869 	if (!skb)
870 		return skb;
871 
872 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
873 	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
874 	memset(chcr_req, 0, transhdr_len);
875 
876 	chcr_req->sec_cpl.op_ivinsrtofst =
877 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
878 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
879 
880 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
881 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
882 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
883 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
884 	chcr_req->sec_cpl.seqno_numivs =
885 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
886 					 param->opad_needed, 0);
887 
888 	chcr_req->sec_cpl.ivgen_hdrlen =
889 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
890 
891 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
892 	       param->alg_prm.result_size);
893 
894 	if (param->opad_needed)
895 		memcpy(chcr_req->key_ctx.key +
896 		       ((param->alg_prm.result_size <= 32) ? 32 :
897 			CHCR_HASH_MAX_DIGEST_SIZE),
898 		       hmacctx->opad, param->alg_prm.result_size);
899 
900 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
901 					    param->alg_prm.mk_size, 0,
902 					    param->opad_needed,
903 					    ((kctx_len +
904 					     sizeof(chcr_req->key_ctx)) >> 4));
905 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
906 
907 	skb_set_transport_header(skb, transhdr_len);
908 	if (param->bfr_len != 0)
909 		write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
910 				    param->bfr_len);
911 	if (param->sg_len != 0)
912 		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
913 
914 	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
915 			DUMMY_BYTES);
916 	req_ctx->skb = skb;
917 	skb_get(skb);
918 	return skb;
919 }
920 
921 static int chcr_ahash_update(struct ahash_request *req)
922 {
923 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
924 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
925 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
926 	struct uld_ctx *u_ctx = NULL;
927 	struct sk_buff *skb;
928 	u8 remainder = 0, bs;
929 	unsigned int nbytes = req->nbytes;
930 	struct hash_wr_param params;
931 
932 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
933 
934 	u_ctx = ULD_CTX(ctx);
935 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
936 					    ctx->tx_channel_id))) {
937 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
938 			return -EBUSY;
939 	}
940 
941 	if (nbytes + req_ctx->reqlen >= bs) {
942 		remainder = (nbytes + req_ctx->reqlen) % bs;
943 		nbytes = nbytes + req_ctx->reqlen - remainder;
944 	} else {
945 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
946 				   + req_ctx->reqlen, nbytes, 0);
947 		req_ctx->reqlen += nbytes;
948 		return 0;
949 	}
950 
951 	params.opad_needed = 0;
952 	params.more = 1;
953 	params.last = 0;
954 	params.sg_len = nbytes - req_ctx->reqlen;
955 	params.bfr_len = req_ctx->reqlen;
956 	params.scmd1 = 0;
957 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
958 	req_ctx->result = 0;
959 	req_ctx->data_len += params.sg_len + params.bfr_len;
960 	skb = create_hash_wr(req, &params);
961 	if (!skb)
962 		return -ENOMEM;
963 
964 	if (remainder) {
965 		u8 *temp;
966 		/* Swap buffers */
967 		temp = req_ctx->reqbfr;
968 		req_ctx->reqbfr = req_ctx->skbfr;
969 		req_ctx->skbfr = temp;
970 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
971 				   req_ctx->reqbfr, remainder, req->nbytes -
972 				   remainder);
973 	}
974 	req_ctx->reqlen = remainder;
975 	skb->dev = u_ctx->lldi.ports[0];
976 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
977 	chcr_send_wr(skb);
978 
979 	return -EINPROGRESS;
980 }
981 
982 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
983 {
984 	memset(bfr_ptr, 0, bs);
985 	*bfr_ptr = 0x80;
986 	if (bs == 64)
987 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
988 	else
989 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
990 }
991 
992 static int chcr_ahash_final(struct ahash_request *req)
993 {
994 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
995 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
996 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
997 	struct hash_wr_param params;
998 	struct sk_buff *skb;
999 	struct uld_ctx *u_ctx = NULL;
1000 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1001 
1002 	u_ctx = ULD_CTX(ctx);
1003 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1004 		params.opad_needed = 1;
1005 	else
1006 		params.opad_needed = 0;
1007 	params.sg_len = 0;
1008 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1009 	req_ctx->result = 1;
1010 	params.bfr_len = req_ctx->reqlen;
1011 	req_ctx->data_len += params.bfr_len + params.sg_len;
1012 	if (req_ctx->reqlen == 0) {
1013 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1014 		params.last = 0;
1015 		params.more = 1;
1016 		params.scmd1 = 0;
1017 		params.bfr_len = bs;
1018 
1019 	} else {
1020 		params.scmd1 = req_ctx->data_len;
1021 		params.last = 1;
1022 		params.more = 0;
1023 	}
1024 	skb = create_hash_wr(req, &params);
1025 	if (!skb)
1026 		return -ENOMEM;
1027 
1028 	skb->dev = u_ctx->lldi.ports[0];
1029 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1030 	chcr_send_wr(skb);
1031 	return -EINPROGRESS;
1032 }
1033 
1034 static int chcr_ahash_finup(struct ahash_request *req)
1035 {
1036 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1037 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1038 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1039 	struct uld_ctx *u_ctx = NULL;
1040 	struct sk_buff *skb;
1041 	struct hash_wr_param params;
1042 	u8  bs;
1043 
1044 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1045 	u_ctx = ULD_CTX(ctx);
1046 
1047 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1048 					    ctx->tx_channel_id))) {
1049 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1050 			return -EBUSY;
1051 	}
1052 
1053 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1054 		params.opad_needed = 1;
1055 	else
1056 		params.opad_needed = 0;
1057 
1058 	params.sg_len = req->nbytes;
1059 	params.bfr_len = req_ctx->reqlen;
1060 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1061 	req_ctx->data_len += params.bfr_len + params.sg_len;
1062 	req_ctx->result = 1;
1063 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1064 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1065 		params.last = 0;
1066 		params.more = 1;
1067 		params.scmd1 = 0;
1068 		params.bfr_len = bs;
1069 	} else {
1070 		params.scmd1 = req_ctx->data_len;
1071 		params.last = 1;
1072 		params.more = 0;
1073 	}
1074 
1075 	skb = create_hash_wr(req, &params);
1076 	if (!skb)
1077 		return -ENOMEM;
1078 
1079 	skb->dev = u_ctx->lldi.ports[0];
1080 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1081 	chcr_send_wr(skb);
1082 
1083 	return -EINPROGRESS;
1084 }
1085 
1086 static int chcr_ahash_digest(struct ahash_request *req)
1087 {
1088 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1089 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1090 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1091 	struct uld_ctx *u_ctx = NULL;
1092 	struct sk_buff *skb;
1093 	struct hash_wr_param params;
1094 	u8  bs;
1095 
1096 	rtfm->init(req);
1097 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1098 
1099 	u_ctx = ULD_CTX(ctx);
1100 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1101 					    ctx->tx_channel_id))) {
1102 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1103 			return -EBUSY;
1104 	}
1105 
1106 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1107 		params.opad_needed = 1;
1108 	else
1109 		params.opad_needed = 0;
1110 
1111 	params.last = 0;
1112 	params.more = 0;
1113 	params.sg_len = req->nbytes;
1114 	params.bfr_len = 0;
1115 	params.scmd1 = 0;
1116 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1117 	req_ctx->result = 1;
1118 	req_ctx->data_len += params.bfr_len + params.sg_len;
1119 
1120 	if (req->nbytes == 0) {
1121 		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1122 		params.more = 1;
1123 		params.bfr_len = bs;
1124 	}
1125 
1126 	skb = create_hash_wr(req, &params);
1127 	if (!skb)
1128 		return -ENOMEM;
1129 
1130 	skb->dev = u_ctx->lldi.ports[0];
1131 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1132 	chcr_send_wr(skb);
1133 	return -EINPROGRESS;
1134 }
1135 
1136 static int chcr_ahash_export(struct ahash_request *areq, void *out)
1137 {
1138 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1139 	struct chcr_ahash_req_ctx *state = out;
1140 
1141 	state->reqlen = req_ctx->reqlen;
1142 	state->data_len = req_ctx->data_len;
1143 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
1144 	memcpy(state->partial_hash, req_ctx->partial_hash,
1145 	       CHCR_HASH_MAX_DIGEST_SIZE);
1146 		return 0;
1147 }
1148 
1149 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1150 {
1151 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1152 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1153 
1154 	req_ctx->reqlen = state->reqlen;
1155 	req_ctx->data_len = state->data_len;
1156 	req_ctx->reqbfr = req_ctx->bfr1;
1157 	req_ctx->skbfr = req_ctx->bfr2;
1158 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
1159 	memcpy(req_ctx->partial_hash, state->partial_hash,
1160 	       CHCR_HASH_MAX_DIGEST_SIZE);
1161 	return 0;
1162 }
1163 
1164 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1165 			     unsigned int keylen)
1166 {
1167 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1168 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1169 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1170 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1171 	unsigned int i, err = 0, updated_digestsize;
1172 
1173 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1174 
1175 	/* use the key to calculate the ipad and opad. ipad will sent with the
1176 	 * first request's data. opad will be sent with the final hash result
1177 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1178 	 */
1179 	shash->tfm = hmacctx->base_hash;
1180 	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
1181 	if (keylen > bs) {
1182 		err = crypto_shash_digest(shash, key, keylen,
1183 					  hmacctx->ipad);
1184 		if (err)
1185 			goto out;
1186 		keylen = digestsize;
1187 	} else {
1188 		memcpy(hmacctx->ipad, key, keylen);
1189 	}
1190 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
1191 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
1192 
1193 	for (i = 0; i < bs / sizeof(int); i++) {
1194 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1195 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1196 	}
1197 
1198 	updated_digestsize = digestsize;
1199 	if (digestsize == SHA224_DIGEST_SIZE)
1200 		updated_digestsize = SHA256_DIGEST_SIZE;
1201 	else if (digestsize == SHA384_DIGEST_SIZE)
1202 		updated_digestsize = SHA512_DIGEST_SIZE;
1203 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
1204 					hmacctx->ipad, digestsize);
1205 	if (err)
1206 		goto out;
1207 	chcr_change_order(hmacctx->ipad, updated_digestsize);
1208 
1209 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
1210 					hmacctx->opad, digestsize);
1211 	if (err)
1212 		goto out;
1213 	chcr_change_order(hmacctx->opad, updated_digestsize);
1214 out:
1215 	return err;
1216 }
1217 
1218 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1219 			       unsigned int key_len)
1220 {
1221 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1222 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1223 	unsigned short context_size = 0;
1224 
1225 	if ((key_len != (AES_KEYSIZE_128 << 1)) &&
1226 	    (key_len != (AES_KEYSIZE_256 << 1))) {
1227 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
1228 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
1229 		ablkctx->enckey_len = 0;
1230 		return -EINVAL;
1231 
1232 	}
1233 
1234 	memcpy(ablkctx->key, key, key_len);
1235 	ablkctx->enckey_len = key_len;
1236 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1237 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1238 	ablkctx->key_ctx_hdr =
1239 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1240 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1241 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1242 				 CHCR_KEYCTX_NO_KEY, 1,
1243 				 0, context_size);
1244 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1245 	return 0;
1246 }
1247 
1248 static int chcr_sha_init(struct ahash_request *areq)
1249 {
1250 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1251 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1252 	int digestsize =  crypto_ahash_digestsize(tfm);
1253 
1254 	req_ctx->data_len = 0;
1255 	req_ctx->reqlen = 0;
1256 	req_ctx->reqbfr = req_ctx->bfr1;
1257 	req_ctx->skbfr = req_ctx->bfr2;
1258 	req_ctx->skb = NULL;
1259 	req_ctx->result = 0;
1260 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
1261 	return 0;
1262 }
1263 
1264 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1265 {
1266 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1267 				 sizeof(struct chcr_ahash_req_ctx));
1268 	return chcr_device_init(crypto_tfm_ctx(tfm));
1269 }
1270 
1271 static int chcr_hmac_init(struct ahash_request *areq)
1272 {
1273 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1274 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1275 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1276 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1277 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1278 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1279 
1280 	chcr_sha_init(areq);
1281 	req_ctx->data_len = bs;
1282 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1283 		if (digestsize == SHA224_DIGEST_SIZE)
1284 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1285 			       SHA256_DIGEST_SIZE);
1286 		else if (digestsize == SHA384_DIGEST_SIZE)
1287 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1288 			       SHA512_DIGEST_SIZE);
1289 		else
1290 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1291 			       digestsize);
1292 	}
1293 	return 0;
1294 }
1295 
1296 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1297 {
1298 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1299 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1300 	unsigned int digestsize =
1301 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1302 
1303 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1304 				 sizeof(struct chcr_ahash_req_ctx));
1305 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
1306 	if (IS_ERR(hmacctx->base_hash))
1307 		return PTR_ERR(hmacctx->base_hash);
1308 	return chcr_device_init(crypto_tfm_ctx(tfm));
1309 }
1310 
1311 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1312 {
1313 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1314 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1315 
1316 	if (hmacctx->base_hash) {
1317 		chcr_free_shash(hmacctx->base_hash);
1318 		hmacctx->base_hash = NULL;
1319 	}
1320 }
1321 
1322 static int chcr_copy_assoc(struct aead_request *req,
1323 				struct chcr_aead_ctx *ctx)
1324 {
1325 	SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
1326 
1327 	skcipher_request_set_tfm(skreq, ctx->null);
1328 	skcipher_request_set_callback(skreq, aead_request_flags(req),
1329 			NULL, NULL);
1330 	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
1331 			NULL);
1332 
1333 	return crypto_skcipher_encrypt(skreq);
1334 }
1335 
1336 static unsigned char get_hmac(unsigned int authsize)
1337 {
1338 	switch (authsize) {
1339 	case ICV_8:
1340 		return CHCR_SCMD_HMAC_CTRL_PL1;
1341 	case ICV_10:
1342 		return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
1343 	case ICV_12:
1344 		return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
1345 	}
1346 	return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
1347 }
1348 
1349 
1350 static struct sk_buff *create_authenc_wr(struct aead_request *req,
1351 					 unsigned short qid,
1352 					 int size,
1353 					 unsigned short op_type)
1354 {
1355 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1356 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1357 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1358 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1359 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
1360 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1361 	struct sk_buff *skb = NULL;
1362 	struct chcr_wr *chcr_req;
1363 	struct cpl_rx_phys_dsgl *phys_cpl;
1364 	struct phys_sge_parm sg_param;
1365 	struct scatterlist *src;
1366 	unsigned int frags = 0, transhdr_len;
1367 	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
1368 	unsigned int   kctx_len = 0;
1369 	unsigned short stop_offset = 0;
1370 	unsigned int  assoclen = req->assoclen;
1371 	unsigned int  authsize = crypto_aead_authsize(tfm);
1372 	int err = 0;
1373 	int null = 0;
1374 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1375 		GFP_ATOMIC;
1376 
1377 	if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
1378 		goto err;
1379 
1380 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1381 		goto err;
1382 
1383 	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1384 		goto err;
1385 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1386 	reqctx->dst = src;
1387 
1388 	if (req->src != req->dst) {
1389 		err = chcr_copy_assoc(req, aeadctx);
1390 		if (err)
1391 			return ERR_PTR(err);
1392 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1393 					       req->assoclen);
1394 	}
1395 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
1396 		null = 1;
1397 		assoclen = 0;
1398 	}
1399 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1400 					     (op_type ? -authsize : authsize));
1401 	if (reqctx->dst_nents <= 0) {
1402 		pr_err("AUTHENC:Invalid Destination sg entries\n");
1403 		goto err;
1404 	}
1405 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1406 	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
1407 		- sizeof(chcr_req->key_ctx);
1408 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1409 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1410 	if (!skb)
1411 		goto err;
1412 
1413 	/* LLD is going to write the sge hdr. */
1414 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1415 
1416 	/* Write WR */
1417 	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
1418 	memset(chcr_req, 0, transhdr_len);
1419 
1420 	stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
1421 
1422 	/*
1423 	 * Input order	is AAD,IV and Payload. where IV should be included as
1424 	 * the part of authdata. All other fields should be filled according
1425 	 * to the hardware spec
1426 	 */
1427 	chcr_req->sec_cpl.op_ivinsrtofst =
1428 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
1429 				       (ivsize ? (assoclen + 1) : 0));
1430 	chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
1431 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1432 					assoclen ? 1 : 0, assoclen,
1433 					assoclen + ivsize + 1,
1434 					(stop_offset & 0x1F0) >> 4);
1435 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
1436 					stop_offset & 0xF,
1437 					null ? 0 : assoclen + ivsize + 1,
1438 					stop_offset, stop_offset);
1439 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1440 					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
1441 					CHCR_SCMD_CIPHER_MODE_AES_CBC,
1442 					actx->auth_mode, aeadctx->hmac_ctrl,
1443 					ivsize >> 1);
1444 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
1445 					 0, 1, dst_size);
1446 
1447 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1448 	if (op_type == CHCR_ENCRYPT_OP)
1449 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
1450 		       aeadctx->enckey_len);
1451 	else
1452 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
1453 		       aeadctx->enckey_len);
1454 
1455 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
1456 					4), actx->h_iopad, kctx_len -
1457 				(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
1458 
1459 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1460 	sg_param.nents = reqctx->dst_nents;
1461 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1462 	sg_param.qid = qid;
1463 	sg_param.align = 0;
1464 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1465 				  &sg_param))
1466 		goto dstmap_fail;
1467 
1468 	skb_set_transport_header(skb, transhdr_len);
1469 
1470 	if (assoclen) {
1471 		/* AAD buffer in */
1472 		write_sg_to_skb(skb, &frags, req->src, assoclen);
1473 
1474 	}
1475 	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
1476 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
1477 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
1478 		   sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1479 	reqctx->skb = skb;
1480 	skb_get(skb);
1481 
1482 	return skb;
1483 dstmap_fail:
1484 	/* ivmap_fail: */
1485 	kfree_skb(skb);
1486 err:
1487 	return ERR_PTR(-EINVAL);
1488 }
1489 
1490 static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
1491 				  unsigned short offset)
1492 {
1493 	struct page *spage;
1494 	unsigned char *addr;
1495 
1496 	spage = sg_page(sg);
1497 	get_page(spage); /* so that it is not freed by NIC */
1498 #ifdef KMAP_ATOMIC_ARGS
1499 	addr = kmap_atomic(spage, KM_SOFTIRQ0);
1500 #else
1501 	addr = kmap_atomic(spage);
1502 #endif
1503 	memset(addr + sg->offset, 0, offset + 1);
1504 
1505 	kunmap_atomic(addr);
1506 }
1507 
1508 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
1509 {
1510 	__be32 data;
1511 
1512 	memset(block, 0, csize);
1513 	block += csize;
1514 
1515 	if (csize >= 4)
1516 		csize = 4;
1517 	else if (msglen > (unsigned int)(1 << (8 * csize)))
1518 		return -EOVERFLOW;
1519 
1520 	data = cpu_to_be32(msglen);
1521 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1522 
1523 	return 0;
1524 }
1525 
1526 static void generate_b0(struct aead_request *req,
1527 			struct chcr_aead_ctx *aeadctx,
1528 			unsigned short op_type)
1529 {
1530 	unsigned int l, lp, m;
1531 	int rc;
1532 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1533 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1534 	u8 *b0 = reqctx->scratch_pad;
1535 
1536 	m = crypto_aead_authsize(aead);
1537 
1538 	memcpy(b0, reqctx->iv, 16);
1539 
1540 	lp = b0[0];
1541 	l = lp + 1;
1542 
1543 	/* set m, bits 3-5 */
1544 	*b0 |= (8 * ((m - 2) / 2));
1545 
1546 	/* set adata, bit 6, if associated data is used */
1547 	if (req->assoclen)
1548 		*b0 |= 64;
1549 	rc = set_msg_len(b0 + 16 - l,
1550 			 (op_type == CHCR_DECRYPT_OP) ?
1551 			 req->cryptlen - m : req->cryptlen, l);
1552 }
1553 
1554 static inline int crypto_ccm_check_iv(const u8 *iv)
1555 {
1556 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
1557 	if (iv[0] < 1 || iv[0] > 7)
1558 		return -EINVAL;
1559 
1560 	return 0;
1561 }
1562 
1563 static int ccm_format_packet(struct aead_request *req,
1564 			     struct chcr_aead_ctx *aeadctx,
1565 			     unsigned int sub_type,
1566 			     unsigned short op_type)
1567 {
1568 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1569 	int rc = 0;
1570 
1571 	if (req->assoclen > T5_MAX_AAD_SIZE) {
1572 		pr_err("CCM: Unsupported AAD data. It should be < %d\n",
1573 		       T5_MAX_AAD_SIZE);
1574 		return -EINVAL;
1575 	}
1576 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1577 		reqctx->iv[0] = 3;
1578 		memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
1579 		memcpy(reqctx->iv + 4, req->iv, 8);
1580 		memset(reqctx->iv + 12, 0, 4);
1581 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
1582 			htons(req->assoclen - 8);
1583 	} else {
1584 		memcpy(reqctx->iv, req->iv, 16);
1585 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
1586 			htons(req->assoclen);
1587 	}
1588 	generate_b0(req, aeadctx, op_type);
1589 	/* zero the ctr value */
1590 	memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
1591 	return rc;
1592 }
1593 
1594 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
1595 				  unsigned int dst_size,
1596 				  struct aead_request *req,
1597 				  unsigned short op_type,
1598 					  struct chcr_context *chcrctx)
1599 {
1600 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1601 	unsigned int ivsize = AES_BLOCK_SIZE;
1602 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
1603 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
1604 	unsigned int c_id = chcrctx->dev->tx_channel_id;
1605 	unsigned int ccm_xtra;
1606 	unsigned char tag_offset = 0, auth_offset = 0;
1607 	unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
1608 	unsigned int assoclen;
1609 
1610 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
1611 		assoclen = req->assoclen - 8;
1612 	else
1613 		assoclen = req->assoclen;
1614 	ccm_xtra = CCM_B0_SIZE +
1615 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
1616 
1617 	auth_offset = req->cryptlen ?
1618 		(assoclen + ivsize + 1 + ccm_xtra) : 0;
1619 	if (op_type == CHCR_DECRYPT_OP) {
1620 		if (crypto_aead_authsize(tfm) != req->cryptlen)
1621 			tag_offset = crypto_aead_authsize(tfm);
1622 		else
1623 			auth_offset = 0;
1624 	}
1625 
1626 
1627 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
1628 					 2, (ivsize ?  (assoclen + 1) :  0) +
1629 					 ccm_xtra);
1630 	sec_cpl->pldlen =
1631 		htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
1632 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
1633 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1634 					1, assoclen + ccm_xtra, assoclen
1635 					+ ivsize + 1 + ccm_xtra, 0);
1636 
1637 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
1638 					auth_offset, tag_offset,
1639 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
1640 					crypto_aead_authsize(tfm));
1641 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1642 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
1643 					cipher_mode, mac_mode, hmac_ctrl,
1644 					ivsize >> 1);
1645 
1646 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
1647 					1, dst_size);
1648 }
1649 
1650 int aead_ccm_validate_input(unsigned short op_type,
1651 			    struct aead_request *req,
1652 			    struct chcr_aead_ctx *aeadctx,
1653 			    unsigned int sub_type)
1654 {
1655 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1656 		if (crypto_ccm_check_iv(req->iv)) {
1657 			pr_err("CCM: IV check fails\n");
1658 			return -EINVAL;
1659 		}
1660 	} else {
1661 		if (req->assoclen != 16 && req->assoclen != 20) {
1662 			pr_err("RFC4309: Invalid AAD length %d\n",
1663 			       req->assoclen);
1664 			return -EINVAL;
1665 		}
1666 	}
1667 	if (aeadctx->enckey_len == 0) {
1668 		pr_err("CCM: Encryption key not set\n");
1669 		return -EINVAL;
1670 	}
1671 	return 0;
1672 }
1673 
1674 unsigned int fill_aead_req_fields(struct sk_buff *skb,
1675 				  struct aead_request *req,
1676 				  struct scatterlist *src,
1677 				  unsigned int ivsize,
1678 				  struct chcr_aead_ctx *aeadctx)
1679 {
1680 	unsigned int frags = 0;
1681 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1682 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1683 	/* b0 and aad length(if available) */
1684 
1685 	write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
1686 				(req->assoclen ?  CCM_AAD_FIELD_SIZE : 0));
1687 	if (req->assoclen) {
1688 		if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
1689 			write_sg_to_skb(skb, &frags, req->src,
1690 					req->assoclen - 8);
1691 		else
1692 			write_sg_to_skb(skb, &frags, req->src, req->assoclen);
1693 	}
1694 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
1695 	if (req->cryptlen)
1696 		write_sg_to_skb(skb, &frags, src, req->cryptlen);
1697 
1698 	return frags;
1699 }
1700 
1701 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1702 					  unsigned short qid,
1703 					  int size,
1704 					  unsigned short op_type)
1705 {
1706 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1707 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1708 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1709 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1710 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1711 	struct sk_buff *skb = NULL;
1712 	struct chcr_wr *chcr_req;
1713 	struct cpl_rx_phys_dsgl *phys_cpl;
1714 	struct phys_sge_parm sg_param;
1715 	struct scatterlist *src;
1716 	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
1717 	unsigned int dst_size = 0, kctx_len;
1718 	unsigned int sub_type;
1719 	unsigned int authsize = crypto_aead_authsize(tfm);
1720 	int err = 0;
1721 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1722 		GFP_ATOMIC;
1723 
1724 
1725 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1726 		goto err;
1727 
1728 	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1729 		goto err;
1730 	sub_type = get_aead_subtype(tfm);
1731 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1732 	reqctx->dst = src;
1733 
1734 	if (req->src != req->dst) {
1735 		err = chcr_copy_assoc(req, aeadctx);
1736 		if (err) {
1737 			pr_err("AAD copy to destination buffer fails\n");
1738 			return ERR_PTR(err);
1739 		}
1740 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1741 					       req->assoclen);
1742 	}
1743 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1744 					     (op_type ? -authsize : authsize));
1745 	if (reqctx->dst_nents <= 0) {
1746 		pr_err("CCM:Invalid Destination sg entries\n");
1747 		goto err;
1748 	}
1749 
1750 
1751 	if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
1752 		goto err;
1753 
1754 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1755 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
1756 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1757 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
1758 
1759 	if (!skb)
1760 		goto err;
1761 
1762 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1763 
1764 	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
1765 	memset(chcr_req, 0, transhdr_len);
1766 
1767 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
1768 
1769 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1770 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
1771 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
1772 					16), aeadctx->key, aeadctx->enckey_len);
1773 
1774 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1775 	if (ccm_format_packet(req, aeadctx, sub_type, op_type))
1776 		goto dstmap_fail;
1777 
1778 	sg_param.nents = reqctx->dst_nents;
1779 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1780 	sg_param.qid = qid;
1781 	sg_param.align = 0;
1782 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1783 				  &sg_param))
1784 		goto dstmap_fail;
1785 
1786 	skb_set_transport_header(skb, transhdr_len);
1787 	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
1788 	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
1789 		    sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1790 	reqctx->skb = skb;
1791 	skb_get(skb);
1792 	return skb;
1793 dstmap_fail:
1794 	kfree_skb(skb);
1795 	skb = NULL;
1796 err:
1797 	return ERR_PTR(-EINVAL);
1798 }
1799 
1800 static struct sk_buff *create_gcm_wr(struct aead_request *req,
1801 				     unsigned short qid,
1802 				     int size,
1803 				     unsigned short op_type)
1804 {
1805 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1806 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1807 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1808 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1809 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
1810 	struct sk_buff *skb = NULL;
1811 	struct chcr_wr *chcr_req;
1812 	struct cpl_rx_phys_dsgl *phys_cpl;
1813 	struct phys_sge_parm sg_param;
1814 	struct scatterlist *src;
1815 	unsigned int frags = 0, transhdr_len;
1816 	unsigned int ivsize = AES_BLOCK_SIZE;
1817 	unsigned int dst_size = 0, kctx_len;
1818 	unsigned char tag_offset = 0;
1819 	unsigned int crypt_len = 0;
1820 	unsigned int authsize = crypto_aead_authsize(tfm);
1821 	unsigned char hmac_ctrl = get_hmac(authsize);
1822 	int err = 0;
1823 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1824 		GFP_ATOMIC;
1825 
1826 	/* validate key size */
1827 	if (aeadctx->enckey_len == 0)
1828 		goto err;
1829 
1830 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1831 		goto err;
1832 
1833 	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1834 		goto err;
1835 
1836 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1837 	reqctx->dst = src;
1838 	if (req->src != req->dst) {
1839 		err = chcr_copy_assoc(req, aeadctx);
1840 		if (err)
1841 			return	ERR_PTR(err);
1842 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1843 					       req->assoclen);
1844 	}
1845 
1846 	if (!req->cryptlen)
1847 		/* null-payload is not supported in the hardware.
1848 		 * software is sending block size
1849 		 */
1850 		crypt_len = AES_BLOCK_SIZE;
1851 	else
1852 		crypt_len = req->cryptlen;
1853 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1854 					     (op_type ? -authsize : authsize));
1855 	if (reqctx->dst_nents <= 0) {
1856 		pr_err("GCM:Invalid Destination sg entries\n");
1857 		goto err;
1858 	}
1859 
1860 
1861 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1862 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
1863 		AEAD_H_SIZE;
1864 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1865 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1866 	if (!skb)
1867 		goto err;
1868 
1869 	/* NIC driver is going to write the sge hdr. */
1870 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1871 
1872 	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
1873 	memset(chcr_req, 0, transhdr_len);
1874 
1875 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
1876 		req->assoclen -= 8;
1877 
1878 	tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
1879 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
1880 					ctx->dev->tx_channel_id, 2, (ivsize ?
1881 					(req->assoclen + 1) : 0));
1882 	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
1883 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1884 					req->assoclen ? 1 : 0, req->assoclen,
1885 					req->assoclen + ivsize + 1, 0);
1886 	if (req->cryptlen) {
1887 		chcr_req->sec_cpl.cipherstop_lo_authinsert =
1888 			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
1889 						tag_offset, tag_offset);
1890 		chcr_req->sec_cpl.seqno_numivs =
1891 			FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
1892 					CHCR_ENCRYPT_OP) ? 1 : 0,
1893 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
1894 					CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
1895 					ivsize >> 1);
1896 	} else {
1897 		chcr_req->sec_cpl.cipherstop_lo_authinsert =
1898 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
1899 		chcr_req->sec_cpl.seqno_numivs =
1900 			FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1901 					(op_type ==  CHCR_ENCRYPT_OP) ?
1902 					1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
1903 					0, 0, ivsize >> 1);
1904 	}
1905 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
1906 					0, 1, dst_size);
1907 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1908 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
1909 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
1910 				16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
1911 
1912 	/* prepare a 16 byte iv */
1913 	/* S   A   L  T |  IV | 0x00000001 */
1914 	if (get_aead_subtype(tfm) ==
1915 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
1916 		memcpy(reqctx->iv, aeadctx->salt, 4);
1917 		memcpy(reqctx->iv + 4, req->iv, 8);
1918 	} else {
1919 		memcpy(reqctx->iv, req->iv, 12);
1920 	}
1921 	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
1922 
1923 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1924 	sg_param.nents = reqctx->dst_nents;
1925 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1926 	sg_param.qid = qid;
1927 	sg_param.align = 0;
1928 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1929 				  &sg_param))
1930 		goto dstmap_fail;
1931 
1932 	skb_set_transport_header(skb, transhdr_len);
1933 
1934 	write_sg_to_skb(skb, &frags, req->src, req->assoclen);
1935 
1936 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
1937 
1938 	if (req->cryptlen) {
1939 		write_sg_to_skb(skb, &frags, src, req->cryptlen);
1940 	} else {
1941 		aes_gcm_empty_pld_pad(req->dst, authsize - 1);
1942 		write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
1943 
1944 	}
1945 
1946 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
1947 			sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1948 	reqctx->skb = skb;
1949 	skb_get(skb);
1950 	return skb;
1951 
1952 dstmap_fail:
1953 	/* ivmap_fail: */
1954 	kfree_skb(skb);
1955 	skb = NULL;
1956 err:
1957 	return skb;
1958 }
1959 
1960 
1961 
1962 static int chcr_aead_cra_init(struct crypto_aead *tfm)
1963 {
1964 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1965 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1966 
1967 	crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
1968 	aeadctx->null = crypto_get_default_null_skcipher();
1969 	if (IS_ERR(aeadctx->null))
1970 		return PTR_ERR(aeadctx->null);
1971 	return chcr_device_init(ctx);
1972 }
1973 
1974 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
1975 {
1976 	crypto_put_default_null_skcipher();
1977 }
1978 
1979 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
1980 					unsigned int authsize)
1981 {
1982 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
1983 
1984 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
1985 	aeadctx->mayverify = VERIFY_HW;
1986 	return 0;
1987 }
1988 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
1989 				    unsigned int authsize)
1990 {
1991 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
1992 	u32 maxauth = crypto_aead_maxauthsize(tfm);
1993 
1994 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
1995 	 * true for sha1. authsize == 12 condition should be before
1996 	 * authsize == (maxauth >> 1)
1997 	 */
1998 	if (authsize == ICV_4) {
1999 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2000 		aeadctx->mayverify = VERIFY_HW;
2001 	} else if (authsize == ICV_6) {
2002 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2003 		aeadctx->mayverify = VERIFY_HW;
2004 	} else if (authsize == ICV_10) {
2005 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2006 		aeadctx->mayverify = VERIFY_HW;
2007 	} else if (authsize == ICV_12) {
2008 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2009 		aeadctx->mayverify = VERIFY_HW;
2010 	} else if (authsize == ICV_14) {
2011 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2012 		aeadctx->mayverify = VERIFY_HW;
2013 	} else if (authsize == (maxauth >> 1)) {
2014 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2015 		aeadctx->mayverify = VERIFY_HW;
2016 	} else if (authsize == maxauth) {
2017 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2018 		aeadctx->mayverify = VERIFY_HW;
2019 	} else {
2020 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2021 		aeadctx->mayverify = VERIFY_SW;
2022 	}
2023 	return 0;
2024 }
2025 
2026 
2027 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2028 {
2029 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2030 
2031 	switch (authsize) {
2032 	case ICV_4:
2033 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2034 		aeadctx->mayverify = VERIFY_HW;
2035 		break;
2036 	case ICV_8:
2037 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2038 		aeadctx->mayverify = VERIFY_HW;
2039 		break;
2040 	case ICV_12:
2041 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2042 		 aeadctx->mayverify = VERIFY_HW;
2043 		break;
2044 	case ICV_14:
2045 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2046 		 aeadctx->mayverify = VERIFY_HW;
2047 		break;
2048 	case ICV_16:
2049 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2050 		aeadctx->mayverify = VERIFY_HW;
2051 		break;
2052 	case ICV_13:
2053 	case ICV_15:
2054 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2055 		aeadctx->mayverify = VERIFY_SW;
2056 		break;
2057 	default:
2058 
2059 		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
2060 			CRYPTO_TFM_RES_BAD_KEY_LEN);
2061 		return -EINVAL;
2062 	}
2063 	return 0;
2064 }
2065 
2066 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2067 					  unsigned int authsize)
2068 {
2069 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2070 
2071 	switch (authsize) {
2072 	case ICV_8:
2073 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2074 		aeadctx->mayverify = VERIFY_HW;
2075 		break;
2076 	case ICV_12:
2077 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2078 		aeadctx->mayverify = VERIFY_HW;
2079 		break;
2080 	case ICV_16:
2081 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2082 		aeadctx->mayverify = VERIFY_HW;
2083 		break;
2084 	default:
2085 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2086 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2087 		return -EINVAL;
2088 	}
2089 	return 0;
2090 }
2091 
2092 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
2093 				unsigned int authsize)
2094 {
2095 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2096 
2097 	switch (authsize) {
2098 	case ICV_4:
2099 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2100 		aeadctx->mayverify = VERIFY_HW;
2101 		break;
2102 	case ICV_6:
2103 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2104 		aeadctx->mayverify = VERIFY_HW;
2105 		break;
2106 	case ICV_8:
2107 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2108 		aeadctx->mayverify = VERIFY_HW;
2109 		break;
2110 	case ICV_10:
2111 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2112 		aeadctx->mayverify = VERIFY_HW;
2113 		break;
2114 	case ICV_12:
2115 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2116 		aeadctx->mayverify = VERIFY_HW;
2117 		break;
2118 	case ICV_14:
2119 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2120 		aeadctx->mayverify = VERIFY_HW;
2121 		break;
2122 	case ICV_16:
2123 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2124 		aeadctx->mayverify = VERIFY_HW;
2125 		break;
2126 	default:
2127 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2128 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2129 		return -EINVAL;
2130 	}
2131 	return 0;
2132 }
2133 
2134 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
2135 				const u8 *key,
2136 				unsigned int keylen)
2137 {
2138 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2139 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2140 	unsigned char ck_size, mk_size;
2141 	int key_ctx_size = 0;
2142 
2143 	memcpy(aeadctx->key, key, keylen);
2144 	aeadctx->enckey_len = keylen;
2145 	key_ctx_size = sizeof(struct _key_ctx) +
2146 		((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
2147 	if (keylen == AES_KEYSIZE_128) {
2148 		mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2149 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2150 	} else if (keylen == AES_KEYSIZE_192) {
2151 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2152 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2153 	} else if (keylen == AES_KEYSIZE_256) {
2154 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2155 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2156 	} else {
2157 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2158 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2159 		aeadctx->enckey_len = 0;
2160 		return	-EINVAL;
2161 	}
2162 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
2163 						key_ctx_size >> 4);
2164 	return 0;
2165 }
2166 
2167 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
2168 				    unsigned int keylen)
2169 {
2170 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2171 	 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2172 
2173 	if (keylen < 3) {
2174 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2175 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2176 		aeadctx->enckey_len = 0;
2177 		return	-EINVAL;
2178 	}
2179 	keylen -= 3;
2180 	memcpy(aeadctx->salt, key + keylen, 3);
2181 	return chcr_aead_ccm_setkey(aead, key, keylen);
2182 }
2183 
2184 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2185 			   unsigned int keylen)
2186 {
2187 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2188 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2189 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
2190 	struct blkcipher_desc h_desc;
2191 	struct scatterlist src[1];
2192 	unsigned int ck_size;
2193 	int ret = 0, key_ctx_size = 0;
2194 
2195 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
2196 	    keylen > 3) {
2197 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
2198 		memcpy(aeadctx->salt, key + keylen, 4);
2199 	}
2200 	if (keylen == AES_KEYSIZE_128) {
2201 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2202 	} else if (keylen == AES_KEYSIZE_192) {
2203 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2204 	} else if (keylen == AES_KEYSIZE_256) {
2205 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2206 	} else {
2207 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2208 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2209 		aeadctx->enckey_len = 0;
2210 		pr_err("GCM: Invalid key length %d", keylen);
2211 		ret = -EINVAL;
2212 		goto out;
2213 	}
2214 
2215 	memcpy(aeadctx->key, key, keylen);
2216 	aeadctx->enckey_len = keylen;
2217 	key_ctx_size = sizeof(struct _key_ctx) +
2218 		((DIV_ROUND_UP(keylen, 16)) << 4) +
2219 		AEAD_H_SIZE;
2220 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
2221 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
2222 						0, 0,
2223 						key_ctx_size >> 4);
2224 	/* Calculate the H = CIPH(K, 0 repeated 16 times) using sync aes
2225 	 * blkcipher It will go on key context
2226 	 */
2227 	h_desc.tfm = crypto_alloc_blkcipher("cbc(aes-generic)", 0, 0);
2228 	if (IS_ERR(h_desc.tfm)) {
2229 		aeadctx->enckey_len = 0;
2230 		ret = -ENOMEM;
2231 		goto out;
2232 	}
2233 	h_desc.flags = 0;
2234 	ret = crypto_blkcipher_setkey(h_desc.tfm, key, keylen);
2235 	if (ret) {
2236 		aeadctx->enckey_len = 0;
2237 		goto out1;
2238 	}
2239 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
2240 	sg_init_one(&src[0], gctx->ghash_h, AEAD_H_SIZE);
2241 	ret = crypto_blkcipher_encrypt(&h_desc, &src[0], &src[0], AEAD_H_SIZE);
2242 
2243 out1:
2244 	crypto_free_blkcipher(h_desc.tfm);
2245 out:
2246 	return ret;
2247 }
2248 
2249 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
2250 				   unsigned int keylen)
2251 {
2252 	struct chcr_context *ctx = crypto_aead_ctx(authenc);
2253 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2254 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2255 	/* it contains auth and cipher key both*/
2256 	struct crypto_authenc_keys keys;
2257 	unsigned int bs;
2258 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
2259 	int err = 0, i, key_ctx_len = 0;
2260 	unsigned char ck_size = 0;
2261 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
2262 	struct crypto_shash *base_hash = NULL;
2263 	struct algo_param param;
2264 	int align;
2265 	u8 *o_ptr = NULL;
2266 
2267 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2268 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2269 		goto out;
2270 	}
2271 
2272 	if (get_alg_config(&param, max_authsize)) {
2273 		pr_err("chcr : Unsupported digest size\n");
2274 		goto out;
2275 	}
2276 	if (keys.enckeylen == AES_KEYSIZE_128) {
2277 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2278 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
2279 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2280 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
2281 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2282 	} else {
2283 		pr_err("chcr : Unsupported cipher key\n");
2284 		goto out;
2285 	}
2286 
2287 	/* Copy only encryption key. We use authkey to generate h(ipad) and
2288 	 * h(opad) so authkey is not needed again. authkeylen size have the
2289 	 * size of the hash digest size.
2290 	 */
2291 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2292 	aeadctx->enckey_len = keys.enckeylen;
2293 	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2294 			    aeadctx->enckey_len << 3);
2295 
2296 	base_hash  = chcr_alloc_shash(max_authsize);
2297 	if (IS_ERR(base_hash)) {
2298 		pr_err("chcr : Base driver cannot be loaded\n");
2299 		goto out;
2300 	}
2301 	{
2302 		SHASH_DESC_ON_STACK(shash, base_hash);
2303 		shash->tfm = base_hash;
2304 		shash->flags = crypto_shash_get_flags(base_hash);
2305 		bs = crypto_shash_blocksize(base_hash);
2306 		align = KEYCTX_ALIGN_PAD(max_authsize);
2307 		o_ptr =  actx->h_iopad + param.result_size + align;
2308 
2309 		if (keys.authkeylen > bs) {
2310 			err = crypto_shash_digest(shash, keys.authkey,
2311 						  keys.authkeylen,
2312 						  o_ptr);
2313 			if (err) {
2314 				pr_err("chcr : Base driver cannot be loaded\n");
2315 				goto out;
2316 			}
2317 			keys.authkeylen = max_authsize;
2318 		} else
2319 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
2320 
2321 		/* Compute the ipad-digest*/
2322 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2323 		memcpy(pad, o_ptr, keys.authkeylen);
2324 		for (i = 0; i < bs >> 2; i++)
2325 			*((unsigned int *)pad + i) ^= IPAD_DATA;
2326 
2327 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
2328 					      max_authsize))
2329 			goto out;
2330 		/* Compute the opad-digest */
2331 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2332 		memcpy(pad, o_ptr, keys.authkeylen);
2333 		for (i = 0; i < bs >> 2; i++)
2334 			*((unsigned int *)pad + i) ^= OPAD_DATA;
2335 
2336 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
2337 			goto out;
2338 
2339 		/* convert the ipad and opad digest to network order */
2340 		chcr_change_order(actx->h_iopad, param.result_size);
2341 		chcr_change_order(o_ptr, param.result_size);
2342 		key_ctx_len = sizeof(struct _key_ctx) +
2343 			((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
2344 			(param.result_size + align) * 2;
2345 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
2346 						0, 1, key_ctx_len >> 4);
2347 		actx->auth_mode = param.auth_mode;
2348 		chcr_free_shash(base_hash);
2349 
2350 		return 0;
2351 	}
2352 out:
2353 	aeadctx->enckey_len = 0;
2354 	if (base_hash)
2355 		chcr_free_shash(base_hash);
2356 	return -EINVAL;
2357 }
2358 
2359 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
2360 					const u8 *key, unsigned int keylen)
2361 {
2362 	struct chcr_context *ctx = crypto_aead_ctx(authenc);
2363 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2364 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2365 	struct crypto_authenc_keys keys;
2366 
2367 	/* it contains auth and cipher key both*/
2368 	int key_ctx_len = 0;
2369 	unsigned char ck_size = 0;
2370 
2371 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2372 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2373 		goto out;
2374 	}
2375 	if (keys.enckeylen == AES_KEYSIZE_128) {
2376 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2377 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
2378 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2379 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
2380 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2381 	} else {
2382 		pr_err("chcr : Unsupported cipher key\n");
2383 		goto out;
2384 	}
2385 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2386 	aeadctx->enckey_len = keys.enckeylen;
2387 	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2388 				    aeadctx->enckey_len << 3);
2389 	key_ctx_len =  sizeof(struct _key_ctx)
2390 		+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
2391 
2392 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
2393 						0, key_ctx_len >> 4);
2394 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
2395 	return 0;
2396 out:
2397 	aeadctx->enckey_len = 0;
2398 	return -EINVAL;
2399 }
2400 static int chcr_aead_encrypt(struct aead_request *req)
2401 {
2402 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2403 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2404 
2405 	reqctx->verify = VERIFY_HW;
2406 
2407 	switch (get_aead_subtype(tfm)) {
2408 	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
2409 	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
2410 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2411 				    create_authenc_wr);
2412 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
2413 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
2414 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2415 				    create_aead_ccm_wr);
2416 	default:
2417 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2418 				    create_gcm_wr);
2419 	}
2420 }
2421 
2422 static int chcr_aead_decrypt(struct aead_request *req)
2423 {
2424 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2425 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2426 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2427 	int size;
2428 
2429 	if (aeadctx->mayverify == VERIFY_SW) {
2430 		size = crypto_aead_maxauthsize(tfm);
2431 		reqctx->verify = VERIFY_SW;
2432 	} else {
2433 		size = 0;
2434 		reqctx->verify = VERIFY_HW;
2435 	}
2436 
2437 	switch (get_aead_subtype(tfm)) {
2438 	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
2439 	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
2440 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2441 				    create_authenc_wr);
2442 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
2443 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
2444 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2445 				    create_aead_ccm_wr);
2446 	default:
2447 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2448 				    create_gcm_wr);
2449 	}
2450 }
2451 
2452 static int chcr_aead_op(struct aead_request *req,
2453 			  unsigned short op_type,
2454 			  int size,
2455 			  create_wr_t create_wr_fn)
2456 {
2457 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2458 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2459 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2460 	struct sk_buff *skb;
2461 
2462 	if (ctx && !ctx->dev) {
2463 		pr_err("chcr : %s : No crypto device.\n", __func__);
2464 		return -ENXIO;
2465 	}
2466 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2467 				   ctx->tx_channel_id)) {
2468 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
2469 			return -EBUSY;
2470 	}
2471 
2472 	/* Form a WR from req */
2473 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
2474 			   op_type);
2475 
2476 	if (IS_ERR(skb) || skb == NULL) {
2477 		pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
2478 		return PTR_ERR(skb);
2479 	}
2480 
2481 	skb->dev = u_ctx->lldi.ports[0];
2482 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
2483 	chcr_send_wr(skb);
2484 	return -EINPROGRESS;
2485 }
2486 static struct chcr_alg_template driver_algs[] = {
2487 	/* AES-CBC */
2488 	{
2489 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2490 		.is_registered = 0,
2491 		.alg.crypto = {
2492 			.cra_name		= "cbc(aes)",
2493 			.cra_driver_name	= "cbc-aes-chcr",
2494 			.cra_priority		= CHCR_CRA_PRIORITY,
2495 			.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
2496 				CRYPTO_ALG_ASYNC,
2497 			.cra_blocksize		= AES_BLOCK_SIZE,
2498 			.cra_ctxsize		= sizeof(struct chcr_context)
2499 				+ sizeof(struct ablk_ctx),
2500 			.cra_alignmask		= 0,
2501 			.cra_type		= &crypto_ablkcipher_type,
2502 			.cra_module		= THIS_MODULE,
2503 			.cra_init		= chcr_cra_init,
2504 			.cra_exit		= NULL,
2505 			.cra_u.ablkcipher	= {
2506 				.min_keysize	= AES_MIN_KEY_SIZE,
2507 				.max_keysize	= AES_MAX_KEY_SIZE,
2508 				.ivsize		= AES_BLOCK_SIZE,
2509 				.setkey			= chcr_aes_cbc_setkey,
2510 				.encrypt		= chcr_aes_encrypt,
2511 				.decrypt		= chcr_aes_decrypt,
2512 			}
2513 		}
2514 	},
2515 	{
2516 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2517 		.is_registered = 0,
2518 		.alg.crypto =   {
2519 			.cra_name		= "xts(aes)",
2520 			.cra_driver_name	= "xts-aes-chcr",
2521 			.cra_priority		= CHCR_CRA_PRIORITY,
2522 			.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
2523 				CRYPTO_ALG_ASYNC,
2524 			.cra_blocksize		= AES_BLOCK_SIZE,
2525 			.cra_ctxsize		= sizeof(struct chcr_context) +
2526 				sizeof(struct ablk_ctx),
2527 			.cra_alignmask		= 0,
2528 			.cra_type		= &crypto_ablkcipher_type,
2529 			.cra_module		= THIS_MODULE,
2530 			.cra_init		= chcr_cra_init,
2531 			.cra_exit		= NULL,
2532 			.cra_u = {
2533 				.ablkcipher = {
2534 					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
2535 					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
2536 					.ivsize		= AES_BLOCK_SIZE,
2537 					.setkey		= chcr_aes_xts_setkey,
2538 					.encrypt	= chcr_aes_encrypt,
2539 					.decrypt	= chcr_aes_decrypt,
2540 				}
2541 			}
2542 		}
2543 	},
2544 	/* SHA */
2545 	{
2546 		.type = CRYPTO_ALG_TYPE_AHASH,
2547 		.is_registered = 0,
2548 		.alg.hash = {
2549 			.halg.digestsize = SHA1_DIGEST_SIZE,
2550 			.halg.base = {
2551 				.cra_name = "sha1",
2552 				.cra_driver_name = "sha1-chcr",
2553 				.cra_blocksize = SHA1_BLOCK_SIZE,
2554 			}
2555 		}
2556 	},
2557 	{
2558 		.type = CRYPTO_ALG_TYPE_AHASH,
2559 		.is_registered = 0,
2560 		.alg.hash = {
2561 			.halg.digestsize = SHA256_DIGEST_SIZE,
2562 			.halg.base = {
2563 				.cra_name = "sha256",
2564 				.cra_driver_name = "sha256-chcr",
2565 				.cra_blocksize = SHA256_BLOCK_SIZE,
2566 			}
2567 		}
2568 	},
2569 	{
2570 		.type = CRYPTO_ALG_TYPE_AHASH,
2571 		.is_registered = 0,
2572 		.alg.hash = {
2573 			.halg.digestsize = SHA224_DIGEST_SIZE,
2574 			.halg.base = {
2575 				.cra_name = "sha224",
2576 				.cra_driver_name = "sha224-chcr",
2577 				.cra_blocksize = SHA224_BLOCK_SIZE,
2578 			}
2579 		}
2580 	},
2581 	{
2582 		.type = CRYPTO_ALG_TYPE_AHASH,
2583 		.is_registered = 0,
2584 		.alg.hash = {
2585 			.halg.digestsize = SHA384_DIGEST_SIZE,
2586 			.halg.base = {
2587 				.cra_name = "sha384",
2588 				.cra_driver_name = "sha384-chcr",
2589 				.cra_blocksize = SHA384_BLOCK_SIZE,
2590 			}
2591 		}
2592 	},
2593 	{
2594 		.type = CRYPTO_ALG_TYPE_AHASH,
2595 		.is_registered = 0,
2596 		.alg.hash = {
2597 			.halg.digestsize = SHA512_DIGEST_SIZE,
2598 			.halg.base = {
2599 				.cra_name = "sha512",
2600 				.cra_driver_name = "sha512-chcr",
2601 				.cra_blocksize = SHA512_BLOCK_SIZE,
2602 			}
2603 		}
2604 	},
2605 	/* HMAC */
2606 	{
2607 		.type = CRYPTO_ALG_TYPE_HMAC,
2608 		.is_registered = 0,
2609 		.alg.hash = {
2610 			.halg.digestsize = SHA1_DIGEST_SIZE,
2611 			.halg.base = {
2612 				.cra_name = "hmac(sha1)",
2613 				.cra_driver_name = "hmac-sha1-chcr",
2614 				.cra_blocksize = SHA1_BLOCK_SIZE,
2615 			}
2616 		}
2617 	},
2618 	{
2619 		.type = CRYPTO_ALG_TYPE_HMAC,
2620 		.is_registered = 0,
2621 		.alg.hash = {
2622 			.halg.digestsize = SHA224_DIGEST_SIZE,
2623 			.halg.base = {
2624 				.cra_name = "hmac(sha224)",
2625 				.cra_driver_name = "hmac-sha224-chcr",
2626 				.cra_blocksize = SHA224_BLOCK_SIZE,
2627 			}
2628 		}
2629 	},
2630 	{
2631 		.type = CRYPTO_ALG_TYPE_HMAC,
2632 		.is_registered = 0,
2633 		.alg.hash = {
2634 			.halg.digestsize = SHA256_DIGEST_SIZE,
2635 			.halg.base = {
2636 				.cra_name = "hmac(sha256)",
2637 				.cra_driver_name = "hmac-sha256-chcr",
2638 				.cra_blocksize = SHA256_BLOCK_SIZE,
2639 			}
2640 		}
2641 	},
2642 	{
2643 		.type = CRYPTO_ALG_TYPE_HMAC,
2644 		.is_registered = 0,
2645 		.alg.hash = {
2646 			.halg.digestsize = SHA384_DIGEST_SIZE,
2647 			.halg.base = {
2648 				.cra_name = "hmac(sha384)",
2649 				.cra_driver_name = "hmac-sha384-chcr",
2650 				.cra_blocksize = SHA384_BLOCK_SIZE,
2651 			}
2652 		}
2653 	},
2654 	{
2655 		.type = CRYPTO_ALG_TYPE_HMAC,
2656 		.is_registered = 0,
2657 		.alg.hash = {
2658 			.halg.digestsize = SHA512_DIGEST_SIZE,
2659 			.halg.base = {
2660 				.cra_name = "hmac(sha512)",
2661 				.cra_driver_name = "hmac-sha512-chcr",
2662 				.cra_blocksize = SHA512_BLOCK_SIZE,
2663 			}
2664 		}
2665 	},
2666 	/* Add AEAD Algorithms */
2667 	{
2668 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
2669 		.is_registered = 0,
2670 		.alg.aead = {
2671 			.base = {
2672 				.cra_name = "gcm(aes)",
2673 				.cra_driver_name = "gcm-aes-chcr",
2674 				.cra_blocksize	= 1,
2675 				.cra_ctxsize =	sizeof(struct chcr_context) +
2676 						sizeof(struct chcr_aead_ctx) +
2677 						sizeof(struct chcr_gcm_ctx),
2678 			},
2679 			.ivsize = 12,
2680 			.maxauthsize = GHASH_DIGEST_SIZE,
2681 			.setkey = chcr_gcm_setkey,
2682 			.setauthsize = chcr_gcm_setauthsize,
2683 		}
2684 	},
2685 	{
2686 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
2687 		.is_registered = 0,
2688 		.alg.aead = {
2689 			.base = {
2690 				.cra_name = "rfc4106(gcm(aes))",
2691 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
2692 				.cra_blocksize	 = 1,
2693 				.cra_ctxsize =	sizeof(struct chcr_context) +
2694 						sizeof(struct chcr_aead_ctx) +
2695 						sizeof(struct chcr_gcm_ctx),
2696 
2697 			},
2698 			.ivsize = 8,
2699 			.maxauthsize	= GHASH_DIGEST_SIZE,
2700 			.setkey = chcr_gcm_setkey,
2701 			.setauthsize	= chcr_4106_4309_setauthsize,
2702 		}
2703 	},
2704 	{
2705 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
2706 		.is_registered = 0,
2707 		.alg.aead = {
2708 			.base = {
2709 				.cra_name = "ccm(aes)",
2710 				.cra_driver_name = "ccm-aes-chcr",
2711 				.cra_blocksize	 = 1,
2712 				.cra_ctxsize =	sizeof(struct chcr_context) +
2713 						sizeof(struct chcr_aead_ctx),
2714 
2715 			},
2716 			.ivsize = AES_BLOCK_SIZE,
2717 			.maxauthsize	= GHASH_DIGEST_SIZE,
2718 			.setkey = chcr_aead_ccm_setkey,
2719 			.setauthsize	= chcr_ccm_setauthsize,
2720 		}
2721 	},
2722 	{
2723 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
2724 		.is_registered = 0,
2725 		.alg.aead = {
2726 			.base = {
2727 				.cra_name = "rfc4309(ccm(aes))",
2728 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
2729 				.cra_blocksize	 = 1,
2730 				.cra_ctxsize =	sizeof(struct chcr_context) +
2731 						sizeof(struct chcr_aead_ctx),
2732 
2733 			},
2734 			.ivsize = 8,
2735 			.maxauthsize	= GHASH_DIGEST_SIZE,
2736 			.setkey = chcr_aead_rfc4309_setkey,
2737 			.setauthsize = chcr_4106_4309_setauthsize,
2738 		}
2739 	},
2740 	{
2741 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2742 		.is_registered = 0,
2743 		.alg.aead = {
2744 			.base = {
2745 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2746 				.cra_driver_name =
2747 					"authenc-hmac-sha1-cbc-aes-chcr",
2748 				.cra_blocksize	 = AES_BLOCK_SIZE,
2749 				.cra_ctxsize =	sizeof(struct chcr_context) +
2750 						sizeof(struct chcr_aead_ctx) +
2751 						sizeof(struct chcr_authenc_ctx),
2752 
2753 			},
2754 			.ivsize = AES_BLOCK_SIZE,
2755 			.maxauthsize = SHA1_DIGEST_SIZE,
2756 			.setkey = chcr_authenc_setkey,
2757 			.setauthsize = chcr_authenc_setauthsize,
2758 		}
2759 	},
2760 	{
2761 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2762 		.is_registered = 0,
2763 		.alg.aead = {
2764 			.base = {
2765 
2766 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2767 				.cra_driver_name =
2768 					"authenc-hmac-sha256-cbc-aes-chcr",
2769 				.cra_blocksize	 = AES_BLOCK_SIZE,
2770 				.cra_ctxsize =	sizeof(struct chcr_context) +
2771 						sizeof(struct chcr_aead_ctx) +
2772 						sizeof(struct chcr_authenc_ctx),
2773 
2774 			},
2775 			.ivsize = AES_BLOCK_SIZE,
2776 			.maxauthsize	= SHA256_DIGEST_SIZE,
2777 			.setkey = chcr_authenc_setkey,
2778 			.setauthsize = chcr_authenc_setauthsize,
2779 		}
2780 	},
2781 	{
2782 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2783 		.is_registered = 0,
2784 		.alg.aead = {
2785 			.base = {
2786 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2787 				.cra_driver_name =
2788 					"authenc-hmac-sha224-cbc-aes-chcr",
2789 				.cra_blocksize	 = AES_BLOCK_SIZE,
2790 				.cra_ctxsize =	sizeof(struct chcr_context) +
2791 						sizeof(struct chcr_aead_ctx) +
2792 						sizeof(struct chcr_authenc_ctx),
2793 			},
2794 			.ivsize = AES_BLOCK_SIZE,
2795 			.maxauthsize = SHA224_DIGEST_SIZE,
2796 			.setkey = chcr_authenc_setkey,
2797 			.setauthsize = chcr_authenc_setauthsize,
2798 		}
2799 	},
2800 	{
2801 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2802 		.is_registered = 0,
2803 		.alg.aead = {
2804 			.base = {
2805 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2806 				.cra_driver_name =
2807 					"authenc-hmac-sha384-cbc-aes-chcr",
2808 				.cra_blocksize	 = AES_BLOCK_SIZE,
2809 				.cra_ctxsize =	sizeof(struct chcr_context) +
2810 						sizeof(struct chcr_aead_ctx) +
2811 						sizeof(struct chcr_authenc_ctx),
2812 
2813 			},
2814 			.ivsize = AES_BLOCK_SIZE,
2815 			.maxauthsize = SHA384_DIGEST_SIZE,
2816 			.setkey = chcr_authenc_setkey,
2817 			.setauthsize = chcr_authenc_setauthsize,
2818 		}
2819 	},
2820 	{
2821 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2822 		.is_registered = 0,
2823 		.alg.aead = {
2824 			.base = {
2825 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2826 				.cra_driver_name =
2827 					"authenc-hmac-sha512-cbc-aes-chcr",
2828 				.cra_blocksize	 = AES_BLOCK_SIZE,
2829 				.cra_ctxsize =	sizeof(struct chcr_context) +
2830 						sizeof(struct chcr_aead_ctx) +
2831 						sizeof(struct chcr_authenc_ctx),
2832 
2833 			},
2834 			.ivsize = AES_BLOCK_SIZE,
2835 			.maxauthsize = SHA512_DIGEST_SIZE,
2836 			.setkey = chcr_authenc_setkey,
2837 			.setauthsize = chcr_authenc_setauthsize,
2838 		}
2839 	},
2840 	{
2841 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
2842 		.is_registered = 0,
2843 		.alg.aead = {
2844 			.base = {
2845 				.cra_name = "authenc(digest_null,cbc(aes))",
2846 				.cra_driver_name =
2847 					"authenc-digest_null-cbc-aes-chcr",
2848 				.cra_blocksize	 = AES_BLOCK_SIZE,
2849 				.cra_ctxsize =	sizeof(struct chcr_context) +
2850 						sizeof(struct chcr_aead_ctx) +
2851 						sizeof(struct chcr_authenc_ctx),
2852 
2853 			},
2854 			.ivsize  = AES_BLOCK_SIZE,
2855 			.maxauthsize = 0,
2856 			.setkey  = chcr_aead_digest_null_setkey,
2857 			.setauthsize = chcr_authenc_null_setauthsize,
2858 		}
2859 	},
2860 };
2861 
2862 /*
2863  *	chcr_unregister_alg - Deregister crypto algorithms with
2864  *	kernel framework.
2865  */
2866 static int chcr_unregister_alg(void)
2867 {
2868 	int i;
2869 
2870 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2871 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
2872 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2873 			if (driver_algs[i].is_registered)
2874 				crypto_unregister_alg(
2875 						&driver_algs[i].alg.crypto);
2876 			break;
2877 		case CRYPTO_ALG_TYPE_AEAD:
2878 			if (driver_algs[i].is_registered)
2879 				crypto_unregister_aead(
2880 						&driver_algs[i].alg.aead);
2881 			break;
2882 		case CRYPTO_ALG_TYPE_AHASH:
2883 			if (driver_algs[i].is_registered)
2884 				crypto_unregister_ahash(
2885 						&driver_algs[i].alg.hash);
2886 			break;
2887 		}
2888 		driver_algs[i].is_registered = 0;
2889 	}
2890 	return 0;
2891 }
2892 
2893 #define SZ_AHASH_CTX sizeof(struct chcr_context)
2894 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
2895 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
2896 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
2897 
2898 /*
2899  *	chcr_register_alg - Register crypto algorithms with kernel framework.
2900  */
2901 static int chcr_register_alg(void)
2902 {
2903 	struct crypto_alg ai;
2904 	struct ahash_alg *a_hash;
2905 	int err = 0, i;
2906 	char *name = NULL;
2907 
2908 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2909 		if (driver_algs[i].is_registered)
2910 			continue;
2911 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
2912 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2913 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
2914 			name = driver_algs[i].alg.crypto.cra_driver_name;
2915 			break;
2916 		case CRYPTO_ALG_TYPE_AEAD:
2917 			driver_algs[i].alg.aead.base.cra_priority =
2918 				CHCR_CRA_PRIORITY;
2919 			driver_algs[i].alg.aead.base.cra_flags =
2920 				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
2921 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
2922 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
2923 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
2924 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
2925 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
2926 			err = crypto_register_aead(&driver_algs[i].alg.aead);
2927 			name = driver_algs[i].alg.aead.base.cra_driver_name;
2928 			break;
2929 		case CRYPTO_ALG_TYPE_AHASH:
2930 			a_hash = &driver_algs[i].alg.hash;
2931 			a_hash->update = chcr_ahash_update;
2932 			a_hash->final = chcr_ahash_final;
2933 			a_hash->finup = chcr_ahash_finup;
2934 			a_hash->digest = chcr_ahash_digest;
2935 			a_hash->export = chcr_ahash_export;
2936 			a_hash->import = chcr_ahash_import;
2937 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
2938 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
2939 			a_hash->halg.base.cra_module = THIS_MODULE;
2940 			a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
2941 			a_hash->halg.base.cra_alignmask = 0;
2942 			a_hash->halg.base.cra_exit = NULL;
2943 			a_hash->halg.base.cra_type = &crypto_ahash_type;
2944 
2945 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
2946 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
2947 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
2948 				a_hash->init = chcr_hmac_init;
2949 				a_hash->setkey = chcr_ahash_setkey;
2950 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
2951 			} else {
2952 				a_hash->init = chcr_sha_init;
2953 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
2954 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
2955 			}
2956 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
2957 			ai = driver_algs[i].alg.hash.halg.base;
2958 			name = ai.cra_driver_name;
2959 			break;
2960 		}
2961 		if (err) {
2962 			pr_err("chcr : %s : Algorithm registration failed\n",
2963 			       name);
2964 			goto register_err;
2965 		} else {
2966 			driver_algs[i].is_registered = 1;
2967 		}
2968 	}
2969 	return 0;
2970 
2971 register_err:
2972 	chcr_unregister_alg();
2973 	return err;
2974 }
2975 
2976 /*
2977  *	start_crypto - Register the crypto algorithms.
2978  *	This should called once when the first device comesup. After this
2979  *	kernel will start calling driver APIs for crypto operations.
2980  */
2981 int start_crypto(void)
2982 {
2983 	return chcr_register_alg();
2984 }
2985 
2986 /*
2987  *	stop_crypto - Deregister all the crypto algorithms with kernel.
2988  *	This should be called once when the last device goes down. After this
2989  *	kernel will not call the driver API for crypto operations.
2990  */
2991 int stop_crypto(void)
2992 {
2993 	chcr_unregister_alg();
2994 	return 0;
2995 }
2996