xref: /linux/drivers/crypto/hisilicon/sec2/sec_crypto.c (revision 576d7fed09c7edbae7600f29a8a3ed6c1ead904f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 
4 #include <crypto/aes.h>
5 #include <crypto/aead.h>
6 #include <crypto/algapi.h>
7 #include <crypto/authenc.h>
8 #include <crypto/des.h>
9 #include <crypto/hash.h>
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/des.h>
12 #include <crypto/sha1.h>
13 #include <crypto/sha2.h>
14 #include <crypto/skcipher.h>
15 #include <crypto/xts.h>
16 #include <linux/crypto.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/idr.h>
19 
20 #include "sec.h"
21 #include "sec_crypto.h"
22 
23 #define SEC_PRIORITY		4001
24 #define SEC_XTS_MIN_KEY_SIZE	(2 * AES_MIN_KEY_SIZE)
25 #define SEC_XTS_MID_KEY_SIZE	(3 * AES_MIN_KEY_SIZE)
26 #define SEC_XTS_MAX_KEY_SIZE	(2 * AES_MAX_KEY_SIZE)
27 #define SEC_DES3_2KEY_SIZE	(2 * DES_KEY_SIZE)
28 #define SEC_DES3_3KEY_SIZE	(3 * DES_KEY_SIZE)
29 
30 /* SEC sqe(bd) bit operational relative MACRO */
31 #define SEC_DE_OFFSET		1
32 #define SEC_CIPHER_OFFSET	4
33 #define SEC_SCENE_OFFSET	3
34 #define SEC_DST_SGL_OFFSET	2
35 #define SEC_SRC_SGL_OFFSET	7
36 #define SEC_CKEY_OFFSET		9
37 #define SEC_CMODE_OFFSET	12
38 #define SEC_AKEY_OFFSET         5
39 #define SEC_AEAD_ALG_OFFSET     11
40 #define SEC_AUTH_OFFSET		6
41 
42 #define SEC_DE_OFFSET_V3		9
43 #define SEC_SCENE_OFFSET_V3	5
44 #define SEC_CKEY_OFFSET_V3	13
45 #define SEC_CTR_CNT_OFFSET	25
46 #define SEC_CTR_CNT_ROLLOVER	2
47 #define SEC_SRC_SGL_OFFSET_V3	11
48 #define SEC_DST_SGL_OFFSET_V3	14
49 #define SEC_CALG_OFFSET_V3	4
50 #define SEC_AKEY_OFFSET_V3	9
51 #define SEC_MAC_OFFSET_V3	4
52 #define SEC_AUTH_ALG_OFFSET_V3	15
53 #define SEC_CIPHER_AUTH_V3	0xbf
54 #define SEC_AUTH_CIPHER_V3	0x40
55 #define SEC_FLAG_OFFSET		7
56 #define SEC_FLAG_MASK		0x0780
57 #define SEC_TYPE_MASK		0x0F
58 #define SEC_DONE_MASK		0x0001
59 #define SEC_ICV_MASK		0x000E
60 #define SEC_SQE_LEN_RATE_MASK	0x3
61 
62 #define SEC_TOTAL_IV_SZ(depth)	(SEC_IV_SIZE * (depth))
63 #define SEC_SGL_SGE_NR		128
64 #define SEC_CIPHER_AUTH		0xfe
65 #define SEC_AUTH_CIPHER		0x1
66 #define SEC_MAX_MAC_LEN		64
67 #define SEC_MAX_AAD_LEN		65535
68 #define SEC_MAX_CCM_AAD_LEN	65279
69 #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
70 
71 #define SEC_PBUF_SZ			512
72 #define SEC_PBUF_IV_OFFSET		SEC_PBUF_SZ
73 #define SEC_PBUF_MAC_OFFSET		(SEC_PBUF_SZ + SEC_IV_SIZE)
74 #define SEC_PBUF_PKG		(SEC_PBUF_SZ + SEC_IV_SIZE +	\
75 			SEC_MAX_MAC_LEN * 2)
76 #define SEC_PBUF_NUM		(PAGE_SIZE / SEC_PBUF_PKG)
77 #define SEC_PBUF_PAGE_NUM(depth)	((depth) / SEC_PBUF_NUM)
78 #define SEC_PBUF_LEFT_SZ(depth)		(SEC_PBUF_PKG * ((depth) -	\
79 				SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
80 #define SEC_TOTAL_PBUF_SZ(depth)	(PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) +	\
81 				SEC_PBUF_LEFT_SZ(depth))
82 
83 #define SEC_SQE_LEN_RATE	4
84 #define SEC_SQE_CFLAG		2
85 #define SEC_SQE_AEAD_FLAG	3
86 #define SEC_SQE_DONE		0x1
87 #define SEC_ICV_ERR		0x2
88 #define MIN_MAC_LEN		4
89 #define MAC_LEN_MASK		0x1U
90 #define MAX_INPUT_DATA_LEN	0xFFFE00
91 #define BITS_MASK		0xFF
92 #define BYTE_BITS		0x8
93 #define SEC_XTS_NAME_SZ		0x3
94 #define IV_CM_CAL_NUM		2
95 #define IV_CL_MASK		0x7
96 #define IV_CL_MIN		2
97 #define IV_CL_MID		4
98 #define IV_CL_MAX		8
99 #define IV_FLAGS_OFFSET	0x6
100 #define IV_CM_OFFSET		0x3
101 #define IV_LAST_BYTE1		1
102 #define IV_LAST_BYTE2		2
103 #define IV_LAST_BYTE_MASK	0xFF
104 #define IV_CTR_INIT		0x1
105 #define IV_BYTE_OFFSET		0x8
106 
107 static DEFINE_MUTEX(sec_algs_lock);
108 static unsigned int sec_available_devs;
109 
110 struct sec_skcipher {
111 	u64 alg_msk;
112 	struct skcipher_alg alg;
113 };
114 
115 struct sec_aead {
116 	u64 alg_msk;
117 	struct aead_alg alg;
118 };
119 
120 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
121 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
122 {
123 	if (req->c_req.encrypt)
124 		return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
125 				 ctx->hlf_q_num;
126 
127 	return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
128 				 ctx->hlf_q_num;
129 }
130 
131 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
132 {
133 	if (req->c_req.encrypt)
134 		atomic_dec(&ctx->enc_qcyclic);
135 	else
136 		atomic_dec(&ctx->dec_qcyclic);
137 }
138 
139 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
140 {
141 	int req_id;
142 
143 	spin_lock_bh(&qp_ctx->req_lock);
144 	req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
145 	spin_unlock_bh(&qp_ctx->req_lock);
146 	if (unlikely(req_id < 0)) {
147 		dev_err(req->ctx->dev, "alloc req id fail!\n");
148 		return req_id;
149 	}
150 
151 	req->qp_ctx = qp_ctx;
152 	qp_ctx->req_list[req_id] = req;
153 
154 	return req_id;
155 }
156 
157 static void sec_free_req_id(struct sec_req *req)
158 {
159 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
160 	int req_id = req->req_id;
161 
162 	if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
163 		dev_err(req->ctx->dev, "free request id invalid!\n");
164 		return;
165 	}
166 
167 	qp_ctx->req_list[req_id] = NULL;
168 	req->qp_ctx = NULL;
169 
170 	spin_lock_bh(&qp_ctx->req_lock);
171 	idr_remove(&qp_ctx->req_idr, req_id);
172 	spin_unlock_bh(&qp_ctx->req_lock);
173 }
174 
175 static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
176 {
177 	struct sec_sqe *bd = resp;
178 
179 	status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
180 	status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
181 	status->flag = (le16_to_cpu(bd->type2.done_flag) &
182 					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
183 	status->tag = le16_to_cpu(bd->type2.tag);
184 	status->err_type = bd->type2.error_type;
185 
186 	return bd->type_cipher_auth & SEC_TYPE_MASK;
187 }
188 
189 static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
190 {
191 	struct sec_sqe3 *bd3 = resp;
192 
193 	status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
194 	status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
195 	status->flag = (le16_to_cpu(bd3->done_flag) &
196 					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
197 	status->tag = le64_to_cpu(bd3->tag);
198 	status->err_type = bd3->error_type;
199 
200 	return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
201 }
202 
203 static int sec_cb_status_check(struct sec_req *req,
204 			       struct bd_status *status)
205 {
206 	struct sec_ctx *ctx = req->ctx;
207 
208 	if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
209 		dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
210 				    req->err_type, status->done);
211 		return -EIO;
212 	}
213 
214 	if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
215 		if (unlikely(status->flag != SEC_SQE_CFLAG)) {
216 			dev_err_ratelimited(ctx->dev, "flag[%u]\n",
217 					    status->flag);
218 			return -EIO;
219 		}
220 	} else if (unlikely(ctx->alg_type == SEC_AEAD)) {
221 		if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
222 			     status->icv == SEC_ICV_ERR)) {
223 			dev_err_ratelimited(ctx->dev,
224 					    "flag[%u], icv[%u]\n",
225 					    status->flag, status->icv);
226 			return -EBADMSG;
227 		}
228 	}
229 
230 	return 0;
231 }
232 
233 static void sec_req_cb(struct hisi_qp *qp, void *resp)
234 {
235 	struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
236 	struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
237 	u8 type_supported = qp_ctx->ctx->type_supported;
238 	struct bd_status status;
239 	struct sec_ctx *ctx;
240 	struct sec_req *req;
241 	int err;
242 	u8 type;
243 
244 	if (type_supported == SEC_BD_TYPE2) {
245 		type = pre_parse_finished_bd(&status, resp);
246 		req = qp_ctx->req_list[status.tag];
247 	} else {
248 		type = pre_parse_finished_bd3(&status, resp);
249 		req = (void *)(uintptr_t)status.tag;
250 	}
251 
252 	if (unlikely(type != type_supported)) {
253 		atomic64_inc(&dfx->err_bd_cnt);
254 		pr_err("err bd type [%u]\n", type);
255 		return;
256 	}
257 
258 	if (unlikely(!req)) {
259 		atomic64_inc(&dfx->invalid_req_cnt);
260 		atomic_inc(&qp->qp_status.used);
261 		return;
262 	}
263 
264 	req->err_type = status.err_type;
265 	ctx = req->ctx;
266 	err = sec_cb_status_check(req, &status);
267 	if (err)
268 		atomic64_inc(&dfx->done_flag_cnt);
269 
270 	atomic64_inc(&dfx->recv_cnt);
271 
272 	ctx->req_op->buf_unmap(ctx, req);
273 
274 	ctx->req_op->callback(ctx, req, err);
275 }
276 
277 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
278 {
279 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
280 	int ret;
281 
282 	if (ctx->fake_req_limit <=
283 	    atomic_read(&qp_ctx->qp->qp_status.used) &&
284 	    !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
285 		return -EBUSY;
286 
287 	spin_lock_bh(&qp_ctx->req_lock);
288 	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
289 	if (ctx->fake_req_limit <=
290 	    atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
291 		list_add_tail(&req->backlog_head, &qp_ctx->backlog);
292 		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
293 		atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
294 		spin_unlock_bh(&qp_ctx->req_lock);
295 		return -EBUSY;
296 	}
297 	spin_unlock_bh(&qp_ctx->req_lock);
298 
299 	if (unlikely(ret == -EBUSY))
300 		return -ENOBUFS;
301 
302 	if (likely(!ret)) {
303 		ret = -EINPROGRESS;
304 		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
305 	}
306 
307 	return ret;
308 }
309 
310 /* Get DMA memory resources */
311 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
312 {
313 	u16 q_depth = res->depth;
314 	int i;
315 
316 	res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
317 					 &res->c_ivin_dma, GFP_KERNEL);
318 	if (!res->c_ivin)
319 		return -ENOMEM;
320 
321 	for (i = 1; i < q_depth; i++) {
322 		res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
323 		res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
324 	}
325 
326 	return 0;
327 }
328 
329 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
330 {
331 	if (res->c_ivin)
332 		dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
333 				  res->c_ivin, res->c_ivin_dma);
334 }
335 
336 static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
337 {
338 	u16 q_depth = res->depth;
339 	int i;
340 
341 	res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
342 					 &res->a_ivin_dma, GFP_KERNEL);
343 	if (!res->a_ivin)
344 		return -ENOMEM;
345 
346 	for (i = 1; i < q_depth; i++) {
347 		res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
348 		res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
349 	}
350 
351 	return 0;
352 }
353 
354 static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
355 {
356 	if (res->a_ivin)
357 		dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
358 				  res->a_ivin, res->a_ivin_dma);
359 }
360 
361 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
362 {
363 	u16 q_depth = res->depth;
364 	int i;
365 
366 	res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
367 					  &res->out_mac_dma, GFP_KERNEL);
368 	if (!res->out_mac)
369 		return -ENOMEM;
370 
371 	for (i = 1; i < q_depth; i++) {
372 		res[i].out_mac_dma = res->out_mac_dma +
373 				     i * (SEC_MAX_MAC_LEN << 1);
374 		res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
375 	}
376 
377 	return 0;
378 }
379 
380 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
381 {
382 	if (res->out_mac)
383 		dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,
384 				  res->out_mac, res->out_mac_dma);
385 }
386 
387 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
388 {
389 	if (res->pbuf)
390 		dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),
391 				  res->pbuf, res->pbuf_dma);
392 }
393 
394 /*
395  * To improve performance, pbuffer is used for
396  * small packets (< 512Bytes) as IOMMU translation using.
397  */
398 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
399 {
400 	u16 q_depth = res->depth;
401 	int size = SEC_PBUF_PAGE_NUM(q_depth);
402 	int pbuf_page_offset;
403 	int i, j, k;
404 
405 	res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),
406 				&res->pbuf_dma, GFP_KERNEL);
407 	if (!res->pbuf)
408 		return -ENOMEM;
409 
410 	/*
411 	 * SEC_PBUF_PKG contains data pbuf, iv and
412 	 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
413 	 * Every PAGE contains six SEC_PBUF_PKG
414 	 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
415 	 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
416 	 * for the SEC_TOTAL_PBUF_SZ
417 	 */
418 	for (i = 0; i <= size; i++) {
419 		pbuf_page_offset = PAGE_SIZE * i;
420 		for (j = 0; j < SEC_PBUF_NUM; j++) {
421 			k = i * SEC_PBUF_NUM + j;
422 			if (k == q_depth)
423 				break;
424 			res[k].pbuf = res->pbuf +
425 				j * SEC_PBUF_PKG + pbuf_page_offset;
426 			res[k].pbuf_dma = res->pbuf_dma +
427 				j * SEC_PBUF_PKG + pbuf_page_offset;
428 		}
429 	}
430 
431 	return 0;
432 }
433 
434 static int sec_alg_resource_alloc(struct sec_ctx *ctx,
435 				  struct sec_qp_ctx *qp_ctx)
436 {
437 	struct sec_alg_res *res = qp_ctx->res;
438 	struct device *dev = ctx->dev;
439 	int ret;
440 
441 	ret = sec_alloc_civ_resource(dev, res);
442 	if (ret)
443 		return ret;
444 
445 	if (ctx->alg_type == SEC_AEAD) {
446 		ret = sec_alloc_aiv_resource(dev, res);
447 		if (ret)
448 			goto alloc_aiv_fail;
449 
450 		ret = sec_alloc_mac_resource(dev, res);
451 		if (ret)
452 			goto alloc_mac_fail;
453 	}
454 	if (ctx->pbuf_supported) {
455 		ret = sec_alloc_pbuf_resource(dev, res);
456 		if (ret) {
457 			dev_err(dev, "fail to alloc pbuf dma resource!\n");
458 			goto alloc_pbuf_fail;
459 		}
460 	}
461 
462 	return 0;
463 
464 alloc_pbuf_fail:
465 	if (ctx->alg_type == SEC_AEAD)
466 		sec_free_mac_resource(dev, qp_ctx->res);
467 alloc_mac_fail:
468 	if (ctx->alg_type == SEC_AEAD)
469 		sec_free_aiv_resource(dev, res);
470 alloc_aiv_fail:
471 	sec_free_civ_resource(dev, res);
472 	return ret;
473 }
474 
475 static void sec_alg_resource_free(struct sec_ctx *ctx,
476 				  struct sec_qp_ctx *qp_ctx)
477 {
478 	struct device *dev = ctx->dev;
479 
480 	sec_free_civ_resource(dev, qp_ctx->res);
481 
482 	if (ctx->pbuf_supported)
483 		sec_free_pbuf_resource(dev, qp_ctx->res);
484 	if (ctx->alg_type == SEC_AEAD)
485 		sec_free_mac_resource(dev, qp_ctx->res);
486 }
487 
488 static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx,
489 				     struct sec_qp_ctx *qp_ctx)
490 {
491 	u16 q_depth = qp_ctx->qp->sq_depth;
492 	struct device *dev = ctx->dev;
493 	int ret = -ENOMEM;
494 
495 	qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);
496 	if (!qp_ctx->req_list)
497 		return ret;
498 
499 	qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);
500 	if (!qp_ctx->res)
501 		goto err_free_req_list;
502 	qp_ctx->res->depth = q_depth;
503 
504 	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
505 	if (IS_ERR(qp_ctx->c_in_pool)) {
506 		dev_err(dev, "fail to create sgl pool for input!\n");
507 		goto err_free_res;
508 	}
509 
510 	qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
511 	if (IS_ERR(qp_ctx->c_out_pool)) {
512 		dev_err(dev, "fail to create sgl pool for output!\n");
513 		goto err_free_c_in_pool;
514 	}
515 
516 	ret = sec_alg_resource_alloc(ctx, qp_ctx);
517 	if (ret)
518 		goto err_free_c_out_pool;
519 
520 	return 0;
521 
522 err_free_c_out_pool:
523 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
524 err_free_c_in_pool:
525 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
526 err_free_res:
527 	kfree(qp_ctx->res);
528 err_free_req_list:
529 	kfree(qp_ctx->req_list);
530 	return ret;
531 }
532 
533 static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
534 {
535 	struct device *dev = ctx->dev;
536 
537 	sec_alg_resource_free(ctx, qp_ctx);
538 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
539 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
540 	kfree(qp_ctx->res);
541 	kfree(qp_ctx->req_list);
542 }
543 
544 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
545 			     int qp_ctx_id, int alg_type)
546 {
547 	struct sec_qp_ctx *qp_ctx;
548 	struct hisi_qp *qp;
549 	int ret;
550 
551 	qp_ctx = &ctx->qp_ctx[qp_ctx_id];
552 	qp = ctx->qps[qp_ctx_id];
553 	qp->req_type = 0;
554 	qp->qp_ctx = qp_ctx;
555 	qp_ctx->qp = qp;
556 	qp_ctx->ctx = ctx;
557 
558 	qp->req_cb = sec_req_cb;
559 
560 	spin_lock_init(&qp_ctx->req_lock);
561 	idr_init(&qp_ctx->req_idr);
562 	INIT_LIST_HEAD(&qp_ctx->backlog);
563 
564 	ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx);
565 	if (ret)
566 		goto err_destroy_idr;
567 
568 	ret = hisi_qm_start_qp(qp, 0);
569 	if (ret < 0)
570 		goto err_resource_free;
571 
572 	return 0;
573 
574 err_resource_free:
575 	sec_free_qp_ctx_resource(ctx, qp_ctx);
576 err_destroy_idr:
577 	idr_destroy(&qp_ctx->req_idr);
578 	return ret;
579 }
580 
581 static void sec_release_qp_ctx(struct sec_ctx *ctx,
582 			       struct sec_qp_ctx *qp_ctx)
583 {
584 	hisi_qm_stop_qp(qp_ctx->qp);
585 	sec_free_qp_ctx_resource(ctx, qp_ctx);
586 	idr_destroy(&qp_ctx->req_idr);
587 }
588 
589 static int sec_ctx_base_init(struct sec_ctx *ctx)
590 {
591 	struct sec_dev *sec;
592 	int i, ret;
593 
594 	ctx->qps = sec_create_qps();
595 	if (!ctx->qps) {
596 		pr_err("Can not create sec qps!\n");
597 		return -ENODEV;
598 	}
599 
600 	sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
601 	ctx->sec = sec;
602 	ctx->dev = &sec->qm.pdev->dev;
603 	ctx->hlf_q_num = sec->ctx_q_num >> 1;
604 
605 	ctx->pbuf_supported = ctx->sec->iommu_used;
606 
607 	/* Half of queue depth is taken as fake requests limit in the queue. */
608 	ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
609 	ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
610 			      GFP_KERNEL);
611 	if (!ctx->qp_ctx) {
612 		ret = -ENOMEM;
613 		goto err_destroy_qps;
614 	}
615 
616 	for (i = 0; i < sec->ctx_q_num; i++) {
617 		ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
618 		if (ret)
619 			goto err_sec_release_qp_ctx;
620 	}
621 
622 	return 0;
623 
624 err_sec_release_qp_ctx:
625 	for (i = i - 1; i >= 0; i--)
626 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
627 	kfree(ctx->qp_ctx);
628 err_destroy_qps:
629 	sec_destroy_qps(ctx->qps, sec->ctx_q_num);
630 	return ret;
631 }
632 
633 static void sec_ctx_base_uninit(struct sec_ctx *ctx)
634 {
635 	int i;
636 
637 	for (i = 0; i < ctx->sec->ctx_q_num; i++)
638 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
639 
640 	sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
641 	kfree(ctx->qp_ctx);
642 }
643 
644 static int sec_cipher_init(struct sec_ctx *ctx)
645 {
646 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
647 
648 	c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
649 					  &c_ctx->c_key_dma, GFP_KERNEL);
650 	if (!c_ctx->c_key)
651 		return -ENOMEM;
652 
653 	return 0;
654 }
655 
656 static void sec_cipher_uninit(struct sec_ctx *ctx)
657 {
658 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
659 
660 	memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
661 	dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
662 			  c_ctx->c_key, c_ctx->c_key_dma);
663 }
664 
665 static int sec_auth_init(struct sec_ctx *ctx)
666 {
667 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
668 
669 	a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
670 					  &a_ctx->a_key_dma, GFP_KERNEL);
671 	if (!a_ctx->a_key)
672 		return -ENOMEM;
673 
674 	return 0;
675 }
676 
677 static void sec_auth_uninit(struct sec_ctx *ctx)
678 {
679 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
680 
681 	memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
682 	dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
683 			  a_ctx->a_key, a_ctx->a_key_dma);
684 }
685 
686 static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
687 {
688 	const char *alg = crypto_tfm_alg_name(&tfm->base);
689 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
690 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
691 
692 	c_ctx->fallback = false;
693 
694 	/* Currently, only XTS mode need fallback tfm when using 192bit key */
695 	if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
696 		return 0;
697 
698 	c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
699 						  CRYPTO_ALG_NEED_FALLBACK);
700 	if (IS_ERR(c_ctx->fbtfm)) {
701 		pr_err("failed to alloc xts mode fallback tfm!\n");
702 		return PTR_ERR(c_ctx->fbtfm);
703 	}
704 
705 	return 0;
706 }
707 
708 static int sec_skcipher_init(struct crypto_skcipher *tfm)
709 {
710 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
711 	int ret;
712 
713 	ctx->alg_type = SEC_SKCIPHER;
714 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
715 	ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
716 	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
717 		pr_err("get error skcipher iv size!\n");
718 		return -EINVAL;
719 	}
720 
721 	ret = sec_ctx_base_init(ctx);
722 	if (ret)
723 		return ret;
724 
725 	ret = sec_cipher_init(ctx);
726 	if (ret)
727 		goto err_cipher_init;
728 
729 	ret = sec_skcipher_fbtfm_init(tfm);
730 	if (ret)
731 		goto err_fbtfm_init;
732 
733 	return 0;
734 
735 err_fbtfm_init:
736 	sec_cipher_uninit(ctx);
737 err_cipher_init:
738 	sec_ctx_base_uninit(ctx);
739 	return ret;
740 }
741 
742 static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
743 {
744 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
745 
746 	if (ctx->c_ctx.fbtfm)
747 		crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
748 
749 	sec_cipher_uninit(ctx);
750 	sec_ctx_base_uninit(ctx);
751 }
752 
753 static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
754 				    const u32 keylen,
755 				    const enum sec_cmode c_mode)
756 {
757 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
758 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
759 	int ret;
760 
761 	ret = verify_skcipher_des3_key(tfm, key);
762 	if (ret)
763 		return ret;
764 
765 	switch (keylen) {
766 	case SEC_DES3_2KEY_SIZE:
767 		c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
768 		break;
769 	case SEC_DES3_3KEY_SIZE:
770 		c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
771 		break;
772 	default:
773 		return -EINVAL;
774 	}
775 
776 	return 0;
777 }
778 
779 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
780 				       const u32 keylen,
781 				       const enum sec_cmode c_mode)
782 {
783 	if (c_mode == SEC_CMODE_XTS) {
784 		switch (keylen) {
785 		case SEC_XTS_MIN_KEY_SIZE:
786 			c_ctx->c_key_len = SEC_CKEY_128BIT;
787 			break;
788 		case SEC_XTS_MID_KEY_SIZE:
789 			c_ctx->fallback = true;
790 			break;
791 		case SEC_XTS_MAX_KEY_SIZE:
792 			c_ctx->c_key_len = SEC_CKEY_256BIT;
793 			break;
794 		default:
795 			pr_err("hisi_sec2: xts mode key error!\n");
796 			return -EINVAL;
797 		}
798 	} else {
799 		if (c_ctx->c_alg == SEC_CALG_SM4 &&
800 		    keylen != AES_KEYSIZE_128) {
801 			pr_err("hisi_sec2: sm4 key error!\n");
802 			return -EINVAL;
803 		} else {
804 			switch (keylen) {
805 			case AES_KEYSIZE_128:
806 				c_ctx->c_key_len = SEC_CKEY_128BIT;
807 				break;
808 			case AES_KEYSIZE_192:
809 				c_ctx->c_key_len = SEC_CKEY_192BIT;
810 				break;
811 			case AES_KEYSIZE_256:
812 				c_ctx->c_key_len = SEC_CKEY_256BIT;
813 				break;
814 			default:
815 				pr_err("hisi_sec2: aes key error!\n");
816 				return -EINVAL;
817 			}
818 		}
819 	}
820 
821 	return 0;
822 }
823 
824 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
825 			       const u32 keylen, const enum sec_calg c_alg,
826 			       const enum sec_cmode c_mode)
827 {
828 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
829 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
830 	struct device *dev = ctx->dev;
831 	int ret;
832 
833 	if (c_mode == SEC_CMODE_XTS) {
834 		ret = xts_verify_key(tfm, key, keylen);
835 		if (ret) {
836 			dev_err(dev, "xts mode key err!\n");
837 			return ret;
838 		}
839 	}
840 
841 	c_ctx->c_alg  = c_alg;
842 	c_ctx->c_mode = c_mode;
843 
844 	switch (c_alg) {
845 	case SEC_CALG_3DES:
846 		ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
847 		break;
848 	case SEC_CALG_AES:
849 	case SEC_CALG_SM4:
850 		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
851 		break;
852 	default:
853 		return -EINVAL;
854 	}
855 
856 	if (ret) {
857 		dev_err(dev, "set sec key err!\n");
858 		return ret;
859 	}
860 
861 	memcpy(c_ctx->c_key, key, keylen);
862 	if (c_ctx->fallback && c_ctx->fbtfm) {
863 		ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
864 		if (ret) {
865 			dev_err(dev, "failed to set fallback skcipher key!\n");
866 			return ret;
867 		}
868 	}
869 	return 0;
870 }
871 
872 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode)			\
873 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
874 	u32 keylen)							\
875 {									\
876 	return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode);	\
877 }
878 
879 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
880 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
881 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
882 GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB)
883 GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB)
884 GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
885 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
886 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
887 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
888 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
889 GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB)
890 GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB)
891 GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
892 
893 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
894 			struct scatterlist *src)
895 {
896 	struct sec_aead_req *a_req = &req->aead_req;
897 	struct aead_request *aead_req = a_req->aead_req;
898 	struct sec_cipher_req *c_req = &req->c_req;
899 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
900 	struct device *dev = ctx->dev;
901 	int copy_size, pbuf_length;
902 	int req_id = req->req_id;
903 	struct crypto_aead *tfm;
904 	size_t authsize;
905 	u8 *mac_offset;
906 
907 	if (ctx->alg_type == SEC_AEAD)
908 		copy_size = aead_req->cryptlen + aead_req->assoclen;
909 	else
910 		copy_size = c_req->c_len;
911 
912 	pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
913 			qp_ctx->res[req_id].pbuf, copy_size);
914 	if (unlikely(pbuf_length != copy_size)) {
915 		dev_err(dev, "copy src data to pbuf error!\n");
916 		return -EINVAL;
917 	}
918 	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
919 		tfm = crypto_aead_reqtfm(aead_req);
920 		authsize = crypto_aead_authsize(tfm);
921 		mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
922 		memcpy(a_req->out_mac, mac_offset, authsize);
923 	}
924 
925 	req->in_dma = qp_ctx->res[req_id].pbuf_dma;
926 	c_req->c_out_dma = req->in_dma;
927 
928 	return 0;
929 }
930 
931 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
932 			struct scatterlist *dst)
933 {
934 	struct aead_request *aead_req = req->aead_req.aead_req;
935 	struct sec_cipher_req *c_req = &req->c_req;
936 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
937 	int copy_size, pbuf_length;
938 	int req_id = req->req_id;
939 
940 	if (ctx->alg_type == SEC_AEAD)
941 		copy_size = c_req->c_len + aead_req->assoclen;
942 	else
943 		copy_size = c_req->c_len;
944 
945 	pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
946 			qp_ctx->res[req_id].pbuf, copy_size);
947 	if (unlikely(pbuf_length != copy_size))
948 		dev_err(ctx->dev, "copy pbuf data to dst error!\n");
949 }
950 
951 static int sec_aead_mac_init(struct sec_aead_req *req)
952 {
953 	struct aead_request *aead_req = req->aead_req;
954 	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
955 	size_t authsize = crypto_aead_authsize(tfm);
956 	u8 *mac_out = req->out_mac;
957 	struct scatterlist *sgl = aead_req->src;
958 	size_t copy_size;
959 	off_t skip_size;
960 
961 	/* Copy input mac */
962 	skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
963 	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
964 				       authsize, skip_size);
965 	if (unlikely(copy_size != authsize))
966 		return -EINVAL;
967 
968 	return 0;
969 }
970 
971 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
972 			  struct scatterlist *src, struct scatterlist *dst)
973 {
974 	struct sec_cipher_req *c_req = &req->c_req;
975 	struct sec_aead_req *a_req = &req->aead_req;
976 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
977 	struct sec_alg_res *res = &qp_ctx->res[req->req_id];
978 	struct device *dev = ctx->dev;
979 	int ret;
980 
981 	if (req->use_pbuf) {
982 		c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
983 		c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
984 		if (ctx->alg_type == SEC_AEAD) {
985 			a_req->a_ivin = res->a_ivin;
986 			a_req->a_ivin_dma = res->a_ivin_dma;
987 			a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
988 			a_req->out_mac_dma = res->pbuf_dma +
989 					SEC_PBUF_MAC_OFFSET;
990 		}
991 		ret = sec_cipher_pbuf_map(ctx, req, src);
992 
993 		return ret;
994 	}
995 	c_req->c_ivin = res->c_ivin;
996 	c_req->c_ivin_dma = res->c_ivin_dma;
997 	if (ctx->alg_type == SEC_AEAD) {
998 		a_req->a_ivin = res->a_ivin;
999 		a_req->a_ivin_dma = res->a_ivin_dma;
1000 		a_req->out_mac = res->out_mac;
1001 		a_req->out_mac_dma = res->out_mac_dma;
1002 	}
1003 
1004 	req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
1005 						qp_ctx->c_in_pool,
1006 						req->req_id,
1007 						&req->in_dma);
1008 	if (IS_ERR(req->in)) {
1009 		dev_err(dev, "fail to dma map input sgl buffers!\n");
1010 		return PTR_ERR(req->in);
1011 	}
1012 
1013 	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
1014 		ret = sec_aead_mac_init(a_req);
1015 		if (unlikely(ret)) {
1016 			dev_err(dev, "fail to init mac data for ICV!\n");
1017 			hisi_acc_sg_buf_unmap(dev, src, req->in);
1018 			return ret;
1019 		}
1020 	}
1021 
1022 	if (dst == src) {
1023 		c_req->c_out = req->in;
1024 		c_req->c_out_dma = req->in_dma;
1025 	} else {
1026 		c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
1027 							     qp_ctx->c_out_pool,
1028 							     req->req_id,
1029 							     &c_req->c_out_dma);
1030 
1031 		if (IS_ERR(c_req->c_out)) {
1032 			dev_err(dev, "fail to dma map output sgl buffers!\n");
1033 			hisi_acc_sg_buf_unmap(dev, src, req->in);
1034 			return PTR_ERR(c_req->c_out);
1035 		}
1036 	}
1037 
1038 	return 0;
1039 }
1040 
1041 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
1042 			     struct scatterlist *src, struct scatterlist *dst)
1043 {
1044 	struct sec_cipher_req *c_req = &req->c_req;
1045 	struct device *dev = ctx->dev;
1046 
1047 	if (req->use_pbuf) {
1048 		sec_cipher_pbuf_unmap(ctx, req, dst);
1049 	} else {
1050 		if (dst != src)
1051 			hisi_acc_sg_buf_unmap(dev, src, req->in);
1052 
1053 		hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
1054 	}
1055 }
1056 
1057 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1058 {
1059 	struct skcipher_request *sq = req->c_req.sk_req;
1060 
1061 	return sec_cipher_map(ctx, req, sq->src, sq->dst);
1062 }
1063 
1064 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1065 {
1066 	struct skcipher_request *sq = req->c_req.sk_req;
1067 
1068 	sec_cipher_unmap(ctx, req, sq->src, sq->dst);
1069 }
1070 
1071 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
1072 				struct crypto_authenc_keys *keys)
1073 {
1074 	switch (keys->enckeylen) {
1075 	case AES_KEYSIZE_128:
1076 		c_ctx->c_key_len = SEC_CKEY_128BIT;
1077 		break;
1078 	case AES_KEYSIZE_192:
1079 		c_ctx->c_key_len = SEC_CKEY_192BIT;
1080 		break;
1081 	case AES_KEYSIZE_256:
1082 		c_ctx->c_key_len = SEC_CKEY_256BIT;
1083 		break;
1084 	default:
1085 		pr_err("hisi_sec2: aead aes key error!\n");
1086 		return -EINVAL;
1087 	}
1088 	memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
1089 
1090 	return 0;
1091 }
1092 
1093 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
1094 				 struct crypto_authenc_keys *keys)
1095 {
1096 	struct crypto_shash *hash_tfm = ctx->hash_tfm;
1097 	int blocksize, digestsize, ret;
1098 
1099 	if (!keys->authkeylen) {
1100 		pr_err("hisi_sec2: aead auth key error!\n");
1101 		return -EINVAL;
1102 	}
1103 
1104 	blocksize = crypto_shash_blocksize(hash_tfm);
1105 	digestsize = crypto_shash_digestsize(hash_tfm);
1106 	if (keys->authkeylen > blocksize) {
1107 		ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
1108 					      keys->authkeylen, ctx->a_key);
1109 		if (ret) {
1110 			pr_err("hisi_sec2: aead auth digest error!\n");
1111 			return -EINVAL;
1112 		}
1113 		ctx->a_key_len = digestsize;
1114 	} else {
1115 		memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
1116 		ctx->a_key_len = keys->authkeylen;
1117 	}
1118 
1119 	return 0;
1120 }
1121 
1122 static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
1123 {
1124 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
1125 	struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
1126 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1127 
1128 	if (unlikely(a_ctx->fallback_aead_tfm))
1129 		return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
1130 
1131 	return 0;
1132 }
1133 
1134 static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
1135 				    struct crypto_aead *tfm, const u8 *key,
1136 				    unsigned int keylen)
1137 {
1138 	crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
1139 	crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
1140 			      crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
1141 	return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
1142 }
1143 
1144 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1145 			   const u32 keylen, const enum sec_hash_alg a_alg,
1146 			   const enum sec_calg c_alg,
1147 			   const enum sec_mac_len mac_len,
1148 			   const enum sec_cmode c_mode)
1149 {
1150 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1151 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1152 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1153 	struct device *dev = ctx->dev;
1154 	struct crypto_authenc_keys keys;
1155 	int ret;
1156 
1157 	ctx->a_ctx.a_alg = a_alg;
1158 	ctx->c_ctx.c_alg = c_alg;
1159 	ctx->a_ctx.mac_len = mac_len;
1160 	c_ctx->c_mode = c_mode;
1161 
1162 	if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
1163 		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
1164 		if (ret) {
1165 			dev_err(dev, "set sec aes ccm cipher key err!\n");
1166 			return ret;
1167 		}
1168 		memcpy(c_ctx->c_key, key, keylen);
1169 
1170 		if (unlikely(a_ctx->fallback_aead_tfm)) {
1171 			ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
1172 			if (ret)
1173 				return ret;
1174 		}
1175 
1176 		return 0;
1177 	}
1178 
1179 	if (crypto_authenc_extractkeys(&keys, key, keylen))
1180 		goto bad_key;
1181 
1182 	ret = sec_aead_aes_set_key(c_ctx, &keys);
1183 	if (ret) {
1184 		dev_err(dev, "set sec cipher key err!\n");
1185 		goto bad_key;
1186 	}
1187 
1188 	ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
1189 	if (ret) {
1190 		dev_err(dev, "set sec auth key err!\n");
1191 		goto bad_key;
1192 	}
1193 
1194 	if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK)  ||
1195 	    (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
1196 		dev_err(dev, "MAC or AUTH key length error!\n");
1197 		goto bad_key;
1198 	}
1199 
1200 	return 0;
1201 
1202 bad_key:
1203 	memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
1204 	return -EINVAL;
1205 }
1206 
1207 
1208 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode)	\
1209 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key,	\
1210 	u32 keylen)							\
1211 {									\
1212 	return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
1213 }
1214 
1215 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
1216 			 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
1217 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
1218 			 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
1219 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
1220 			 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
1221 GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
1222 			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
1223 GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
1224 			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
1225 GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
1226 			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
1227 GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
1228 			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
1229 
1230 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1231 {
1232 	struct aead_request *aq = req->aead_req.aead_req;
1233 
1234 	return sec_cipher_map(ctx, req, aq->src, aq->dst);
1235 }
1236 
1237 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1238 {
1239 	struct aead_request *aq = req->aead_req.aead_req;
1240 
1241 	sec_cipher_unmap(ctx, req, aq->src, aq->dst);
1242 }
1243 
1244 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
1245 {
1246 	int ret;
1247 
1248 	ret = ctx->req_op->buf_map(ctx, req);
1249 	if (unlikely(ret))
1250 		return ret;
1251 
1252 	ctx->req_op->do_transfer(ctx, req);
1253 
1254 	ret = ctx->req_op->bd_fill(ctx, req);
1255 	if (unlikely(ret))
1256 		goto unmap_req_buf;
1257 
1258 	return ret;
1259 
1260 unmap_req_buf:
1261 	ctx->req_op->buf_unmap(ctx, req);
1262 	return ret;
1263 }
1264 
1265 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
1266 {
1267 	ctx->req_op->buf_unmap(ctx, req);
1268 }
1269 
1270 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1271 {
1272 	struct skcipher_request *sk_req = req->c_req.sk_req;
1273 	struct sec_cipher_req *c_req = &req->c_req;
1274 
1275 	memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
1276 }
1277 
1278 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1279 {
1280 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1281 	struct sec_cipher_req *c_req = &req->c_req;
1282 	struct sec_sqe *sec_sqe = &req->sec_sqe;
1283 	u8 scene, sa_type, da_type;
1284 	u8 bd_type, cipher;
1285 	u8 de = 0;
1286 
1287 	memset(sec_sqe, 0, sizeof(struct sec_sqe));
1288 
1289 	sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1290 	sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1291 	sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
1292 	sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1293 
1294 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
1295 						SEC_CMODE_OFFSET);
1296 	sec_sqe->type2.c_alg = c_ctx->c_alg;
1297 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1298 						SEC_CKEY_OFFSET);
1299 
1300 	bd_type = SEC_BD_TYPE2;
1301 	if (c_req->encrypt)
1302 		cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
1303 	else
1304 		cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
1305 	sec_sqe->type_cipher_auth = bd_type | cipher;
1306 
1307 	/* Set destination and source address type */
1308 	if (req->use_pbuf) {
1309 		sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1310 		da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
1311 	} else {
1312 		sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1313 		da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
1314 	}
1315 
1316 	sec_sqe->sdm_addr_type |= da_type;
1317 	scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
1318 	if (req->in_dma != c_req->c_out_dma)
1319 		de = 0x1 << SEC_DE_OFFSET;
1320 
1321 	sec_sqe->sds_sa_type = (de | scene | sa_type);
1322 
1323 	sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
1324 	sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
1325 
1326 	return 0;
1327 }
1328 
1329 static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1330 {
1331 	struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1332 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1333 	struct sec_cipher_req *c_req = &req->c_req;
1334 	u32 bd_param = 0;
1335 	u16 cipher;
1336 
1337 	memset(sec_sqe3, 0, sizeof(struct sec_sqe3));
1338 
1339 	sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1340 	sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1341 	sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
1342 	sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1343 
1344 	sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
1345 						c_ctx->c_mode;
1346 	sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1347 						SEC_CKEY_OFFSET_V3);
1348 
1349 	if (c_req->encrypt)
1350 		cipher = SEC_CIPHER_ENC;
1351 	else
1352 		cipher = SEC_CIPHER_DEC;
1353 	sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
1354 
1355 	/* Set the CTR counter mode is 128bit rollover */
1356 	sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER <<
1357 					SEC_CTR_CNT_OFFSET);
1358 
1359 	if (req->use_pbuf) {
1360 		bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
1361 		bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
1362 	} else {
1363 		bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
1364 		bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
1365 	}
1366 
1367 	bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
1368 	if (req->in_dma != c_req->c_out_dma)
1369 		bd_param |= 0x1 << SEC_DE_OFFSET_V3;
1370 
1371 	bd_param |= SEC_BD_TYPE3;
1372 	sec_sqe3->bd_param = cpu_to_le32(bd_param);
1373 
1374 	sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
1375 	sec_sqe3->tag = cpu_to_le64(req);
1376 
1377 	return 0;
1378 }
1379 
1380 /* increment counter (128-bit int) */
1381 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
1382 {
1383 	do {
1384 		--bits;
1385 		nums += counter[bits];
1386 		counter[bits] = nums & BITS_MASK;
1387 		nums >>= BYTE_BITS;
1388 	} while (bits && nums);
1389 }
1390 
1391 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1392 {
1393 	struct aead_request *aead_req = req->aead_req.aead_req;
1394 	struct skcipher_request *sk_req = req->c_req.sk_req;
1395 	u32 iv_size = req->ctx->c_ctx.ivsize;
1396 	struct scatterlist *sgl;
1397 	unsigned int cryptlen;
1398 	size_t sz;
1399 	u8 *iv;
1400 
1401 	if (req->c_req.encrypt)
1402 		sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
1403 	else
1404 		sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
1405 
1406 	if (alg_type == SEC_SKCIPHER) {
1407 		iv = sk_req->iv;
1408 		cryptlen = sk_req->cryptlen;
1409 	} else {
1410 		iv = aead_req->iv;
1411 		cryptlen = aead_req->cryptlen;
1412 	}
1413 
1414 	if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
1415 		sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
1416 					cryptlen - iv_size);
1417 		if (unlikely(sz != iv_size))
1418 			dev_err(req->ctx->dev, "copy output iv error!\n");
1419 	} else {
1420 		sz = cryptlen / iv_size;
1421 		if (cryptlen % iv_size)
1422 			sz += 1;
1423 		ctr_iv_inc(iv, iv_size, sz);
1424 	}
1425 }
1426 
1427 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
1428 				struct sec_qp_ctx *qp_ctx)
1429 {
1430 	struct sec_req *backlog_req = NULL;
1431 
1432 	spin_lock_bh(&qp_ctx->req_lock);
1433 	if (ctx->fake_req_limit >=
1434 	    atomic_read(&qp_ctx->qp->qp_status.used) &&
1435 	    !list_empty(&qp_ctx->backlog)) {
1436 		backlog_req = list_first_entry(&qp_ctx->backlog,
1437 				typeof(*backlog_req), backlog_head);
1438 		list_del(&backlog_req->backlog_head);
1439 	}
1440 	spin_unlock_bh(&qp_ctx->req_lock);
1441 
1442 	return backlog_req;
1443 }
1444 
1445 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1446 				  int err)
1447 {
1448 	struct skcipher_request *sk_req = req->c_req.sk_req;
1449 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1450 	struct skcipher_request *backlog_sk_req;
1451 	struct sec_req *backlog_req;
1452 
1453 	sec_free_req_id(req);
1454 
1455 	/* IV output at encrypto of CBC/CTR mode */
1456 	if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1457 	    ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
1458 		sec_update_iv(req, SEC_SKCIPHER);
1459 
1460 	while (1) {
1461 		backlog_req = sec_back_req_clear(ctx, qp_ctx);
1462 		if (!backlog_req)
1463 			break;
1464 
1465 		backlog_sk_req = backlog_req->c_req.sk_req;
1466 		skcipher_request_complete(backlog_sk_req, -EINPROGRESS);
1467 		atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
1468 	}
1469 
1470 	skcipher_request_complete(sk_req, err);
1471 }
1472 
1473 static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
1474 {
1475 	struct aead_request *aead_req = req->aead_req.aead_req;
1476 	struct sec_cipher_req *c_req = &req->c_req;
1477 	struct sec_aead_req *a_req = &req->aead_req;
1478 	size_t authsize = ctx->a_ctx.mac_len;
1479 	u32 data_size = aead_req->cryptlen;
1480 	u8 flage = 0;
1481 	u8 cm, cl;
1482 
1483 	/* the specification has been checked in aead_iv_demension_check() */
1484 	cl = c_req->c_ivin[0] + 1;
1485 	c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
1486 	memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
1487 	c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
1488 
1489 	/* the last 3bit is L' */
1490 	flage |= c_req->c_ivin[0] & IV_CL_MASK;
1491 
1492 	/* the M' is bit3~bit5, the Flags is bit6 */
1493 	cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
1494 	flage |= cm << IV_CM_OFFSET;
1495 	if (aead_req->assoclen)
1496 		flage |= 0x01 << IV_FLAGS_OFFSET;
1497 
1498 	memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
1499 	a_req->a_ivin[0] = flage;
1500 
1501 	/*
1502 	 * the last 32bit is counter's initial number,
1503 	 * but the nonce uses the first 16bit
1504 	 * the tail 16bit fill with the cipher length
1505 	 */
1506 	if (!c_req->encrypt)
1507 		data_size = aead_req->cryptlen - authsize;
1508 
1509 	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
1510 			data_size & IV_LAST_BYTE_MASK;
1511 	data_size >>= IV_BYTE_OFFSET;
1512 	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
1513 			data_size & IV_LAST_BYTE_MASK;
1514 }
1515 
1516 static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
1517 {
1518 	struct aead_request *aead_req = req->aead_req.aead_req;
1519 	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
1520 	size_t authsize = crypto_aead_authsize(tfm);
1521 	struct sec_cipher_req *c_req = &req->c_req;
1522 	struct sec_aead_req *a_req = &req->aead_req;
1523 
1524 	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1525 
1526 	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
1527 		/*
1528 		 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
1529 		 * the  counter must set to 0x01
1530 		 */
1531 		ctx->a_ctx.mac_len = authsize;
1532 		/* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
1533 		set_aead_auth_iv(ctx, req);
1534 	}
1535 
1536 	/* GCM 12Byte Cipher_IV == Auth_IV */
1537 	if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
1538 		ctx->a_ctx.mac_len = authsize;
1539 		memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
1540 	}
1541 }
1542 
1543 static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
1544 				 struct sec_req *req, struct sec_sqe *sec_sqe)
1545 {
1546 	struct sec_aead_req *a_req = &req->aead_req;
1547 	struct aead_request *aq = a_req->aead_req;
1548 
1549 	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1550 	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
1551 
1552 	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1553 	sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
1554 	sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1555 	sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
1556 
1557 	if (dir)
1558 		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1559 	else
1560 		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1561 
1562 	sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
1563 	sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
1564 	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1565 
1566 	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1567 }
1568 
1569 static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
1570 				    struct sec_req *req, struct sec_sqe3 *sqe3)
1571 {
1572 	struct sec_aead_req *a_req = &req->aead_req;
1573 	struct aead_request *aq = a_req->aead_req;
1574 
1575 	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1576 	sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
1577 
1578 	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1579 	sqe3->a_key_addr = sqe3->c_key_addr;
1580 	sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1581 	sqe3->auth_mac_key |= SEC_NO_AUTH;
1582 
1583 	if (dir)
1584 		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1585 	else
1586 		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1587 
1588 	sqe3->a_len_key = cpu_to_le32(aq->assoclen);
1589 	sqe3->auth_src_offset = cpu_to_le16(0x0);
1590 	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1591 	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1592 }
1593 
1594 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1595 			       struct sec_req *req, struct sec_sqe *sec_sqe)
1596 {
1597 	struct sec_aead_req *a_req = &req->aead_req;
1598 	struct sec_cipher_req *c_req = &req->c_req;
1599 	struct aead_request *aq = a_req->aead_req;
1600 
1601 	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1602 
1603 	sec_sqe->type2.mac_key_alg =
1604 			cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
1605 
1606 	sec_sqe->type2.mac_key_alg |=
1607 			cpu_to_le32((u32)((ctx->a_key_len) /
1608 			SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
1609 
1610 	sec_sqe->type2.mac_key_alg |=
1611 			cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1612 
1613 	if (dir) {
1614 		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1615 		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1616 	} else {
1617 		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
1618 		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1619 	}
1620 	sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
1621 
1622 	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1623 
1624 	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1625 }
1626 
1627 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1628 {
1629 	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1630 	struct sec_sqe *sec_sqe = &req->sec_sqe;
1631 	int ret;
1632 
1633 	ret = sec_skcipher_bd_fill(ctx, req);
1634 	if (unlikely(ret)) {
1635 		dev_err(ctx->dev, "skcipher bd fill is error!\n");
1636 		return ret;
1637 	}
1638 
1639 	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1640 	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1641 		sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1642 	else
1643 		sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1644 
1645 	return 0;
1646 }
1647 
1648 static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
1649 				   struct sec_req *req, struct sec_sqe3 *sqe3)
1650 {
1651 	struct sec_aead_req *a_req = &req->aead_req;
1652 	struct sec_cipher_req *c_req = &req->c_req;
1653 	struct aead_request *aq = a_req->aead_req;
1654 
1655 	sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
1656 
1657 	sqe3->auth_mac_key |=
1658 			cpu_to_le32((u32)(ctx->mac_len /
1659 			SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
1660 
1661 	sqe3->auth_mac_key |=
1662 			cpu_to_le32((u32)(ctx->a_key_len /
1663 			SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
1664 
1665 	sqe3->auth_mac_key |=
1666 			cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
1667 
1668 	if (dir) {
1669 		sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
1670 		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1671 	} else {
1672 		sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2);
1673 		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1674 	}
1675 	sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
1676 
1677 	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1678 
1679 	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1680 }
1681 
1682 static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1683 {
1684 	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1685 	struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1686 	int ret;
1687 
1688 	ret = sec_skcipher_bd_fill_v3(ctx, req);
1689 	if (unlikely(ret)) {
1690 		dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
1691 		return ret;
1692 	}
1693 
1694 	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1695 	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1696 		sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
1697 					req, sec_sqe3);
1698 	else
1699 		sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
1700 				       req, sec_sqe3);
1701 
1702 	return 0;
1703 }
1704 
1705 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1706 {
1707 	struct aead_request *a_req = req->aead_req.aead_req;
1708 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1709 	struct sec_aead_req *aead_req = &req->aead_req;
1710 	struct sec_cipher_req *c_req = &req->c_req;
1711 	size_t authsize = crypto_aead_authsize(tfm);
1712 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1713 	struct aead_request *backlog_aead_req;
1714 	struct sec_req *backlog_req;
1715 	size_t sz;
1716 
1717 	if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
1718 		sec_update_iv(req, SEC_AEAD);
1719 
1720 	/* Copy output mac */
1721 	if (!err && c_req->encrypt) {
1722 		struct scatterlist *sgl = a_req->dst;
1723 
1724 		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
1725 					  aead_req->out_mac,
1726 					  authsize, a_req->cryptlen +
1727 					  a_req->assoclen);
1728 		if (unlikely(sz != authsize)) {
1729 			dev_err(c->dev, "copy out mac err!\n");
1730 			err = -EINVAL;
1731 		}
1732 	}
1733 
1734 	sec_free_req_id(req);
1735 
1736 	while (1) {
1737 		backlog_req = sec_back_req_clear(c, qp_ctx);
1738 		if (!backlog_req)
1739 			break;
1740 
1741 		backlog_aead_req = backlog_req->aead_req.aead_req;
1742 		aead_request_complete(backlog_aead_req, -EINPROGRESS);
1743 		atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
1744 	}
1745 
1746 	aead_request_complete(a_req, err);
1747 }
1748 
1749 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
1750 {
1751 	sec_free_req_id(req);
1752 	sec_free_queue_id(ctx, req);
1753 }
1754 
1755 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1756 {
1757 	struct sec_qp_ctx *qp_ctx;
1758 	int queue_id;
1759 
1760 	/* To load balance */
1761 	queue_id = sec_alloc_queue_id(ctx, req);
1762 	qp_ctx = &ctx->qp_ctx[queue_id];
1763 
1764 	req->req_id = sec_alloc_req_id(req, qp_ctx);
1765 	if (unlikely(req->req_id < 0)) {
1766 		sec_free_queue_id(ctx, req);
1767 		return req->req_id;
1768 	}
1769 
1770 	return 0;
1771 }
1772 
1773 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1774 {
1775 	struct sec_cipher_req *c_req = &req->c_req;
1776 	int ret;
1777 
1778 	ret = sec_request_init(ctx, req);
1779 	if (unlikely(ret))
1780 		return ret;
1781 
1782 	ret = sec_request_transfer(ctx, req);
1783 	if (unlikely(ret))
1784 		goto err_uninit_req;
1785 
1786 	/* Output IV as decrypto */
1787 	if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1788 	    ctx->c_ctx.c_mode == SEC_CMODE_CTR))
1789 		sec_update_iv(req, ctx->alg_type);
1790 
1791 	ret = ctx->req_op->bd_send(ctx, req);
1792 	if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
1793 		(ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1794 		dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1795 		goto err_send_req;
1796 	}
1797 
1798 	return ret;
1799 
1800 err_send_req:
1801 	/* As failing, restore the IV from user */
1802 	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1803 		if (ctx->alg_type == SEC_SKCIPHER)
1804 			memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
1805 			       ctx->c_ctx.ivsize);
1806 		else
1807 			memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
1808 			       ctx->c_ctx.ivsize);
1809 	}
1810 
1811 	sec_request_untransfer(ctx, req);
1812 err_uninit_req:
1813 	sec_request_uninit(ctx, req);
1814 	return ret;
1815 }
1816 
1817 static const struct sec_req_op sec_skcipher_req_ops = {
1818 	.buf_map	= sec_skcipher_sgl_map,
1819 	.buf_unmap	= sec_skcipher_sgl_unmap,
1820 	.do_transfer	= sec_skcipher_copy_iv,
1821 	.bd_fill	= sec_skcipher_bd_fill,
1822 	.bd_send	= sec_bd_send,
1823 	.callback	= sec_skcipher_callback,
1824 	.process	= sec_process,
1825 };
1826 
1827 static const struct sec_req_op sec_aead_req_ops = {
1828 	.buf_map	= sec_aead_sgl_map,
1829 	.buf_unmap	= sec_aead_sgl_unmap,
1830 	.do_transfer	= sec_aead_set_iv,
1831 	.bd_fill	= sec_aead_bd_fill,
1832 	.bd_send	= sec_bd_send,
1833 	.callback	= sec_aead_callback,
1834 	.process	= sec_process,
1835 };
1836 
1837 static const struct sec_req_op sec_skcipher_req_ops_v3 = {
1838 	.buf_map	= sec_skcipher_sgl_map,
1839 	.buf_unmap	= sec_skcipher_sgl_unmap,
1840 	.do_transfer	= sec_skcipher_copy_iv,
1841 	.bd_fill	= sec_skcipher_bd_fill_v3,
1842 	.bd_send	= sec_bd_send,
1843 	.callback	= sec_skcipher_callback,
1844 	.process	= sec_process,
1845 };
1846 
1847 static const struct sec_req_op sec_aead_req_ops_v3 = {
1848 	.buf_map	= sec_aead_sgl_map,
1849 	.buf_unmap	= sec_aead_sgl_unmap,
1850 	.do_transfer	= sec_aead_set_iv,
1851 	.bd_fill	= sec_aead_bd_fill_v3,
1852 	.bd_send	= sec_bd_send,
1853 	.callback	= sec_aead_callback,
1854 	.process	= sec_process,
1855 };
1856 
1857 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
1858 {
1859 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1860 	int ret;
1861 
1862 	ret = sec_skcipher_init(tfm);
1863 	if (ret)
1864 		return ret;
1865 
1866 	if (ctx->sec->qm.ver < QM_HW_V3) {
1867 		ctx->type_supported = SEC_BD_TYPE2;
1868 		ctx->req_op = &sec_skcipher_req_ops;
1869 	} else {
1870 		ctx->type_supported = SEC_BD_TYPE3;
1871 		ctx->req_op = &sec_skcipher_req_ops_v3;
1872 	}
1873 
1874 	return ret;
1875 }
1876 
1877 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
1878 {
1879 	sec_skcipher_uninit(tfm);
1880 }
1881 
1882 static int sec_aead_init(struct crypto_aead *tfm)
1883 {
1884 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1885 	int ret;
1886 
1887 	crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
1888 	ctx->alg_type = SEC_AEAD;
1889 	ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
1890 	if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
1891 	    ctx->c_ctx.ivsize > SEC_IV_SIZE) {
1892 		pr_err("get error aead iv size!\n");
1893 		return -EINVAL;
1894 	}
1895 
1896 	ret = sec_ctx_base_init(ctx);
1897 	if (ret)
1898 		return ret;
1899 	if (ctx->sec->qm.ver < QM_HW_V3) {
1900 		ctx->type_supported = SEC_BD_TYPE2;
1901 		ctx->req_op = &sec_aead_req_ops;
1902 	} else {
1903 		ctx->type_supported = SEC_BD_TYPE3;
1904 		ctx->req_op = &sec_aead_req_ops_v3;
1905 	}
1906 
1907 	ret = sec_auth_init(ctx);
1908 	if (ret)
1909 		goto err_auth_init;
1910 
1911 	ret = sec_cipher_init(ctx);
1912 	if (ret)
1913 		goto err_cipher_init;
1914 
1915 	return ret;
1916 
1917 err_cipher_init:
1918 	sec_auth_uninit(ctx);
1919 err_auth_init:
1920 	sec_ctx_base_uninit(ctx);
1921 	return ret;
1922 }
1923 
1924 static void sec_aead_exit(struct crypto_aead *tfm)
1925 {
1926 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1927 
1928 	sec_cipher_uninit(ctx);
1929 	sec_auth_uninit(ctx);
1930 	sec_ctx_base_uninit(ctx);
1931 }
1932 
1933 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
1934 {
1935 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1936 	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1937 	int ret;
1938 
1939 	ret = sec_aead_init(tfm);
1940 	if (ret) {
1941 		pr_err("hisi_sec2: aead init error!\n");
1942 		return ret;
1943 	}
1944 
1945 	auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1946 	if (IS_ERR(auth_ctx->hash_tfm)) {
1947 		dev_err(ctx->dev, "aead alloc shash error!\n");
1948 		sec_aead_exit(tfm);
1949 		return PTR_ERR(auth_ctx->hash_tfm);
1950 	}
1951 
1952 	return 0;
1953 }
1954 
1955 static void sec_aead_ctx_exit(struct crypto_aead *tfm)
1956 {
1957 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1958 
1959 	crypto_free_shash(ctx->a_ctx.hash_tfm);
1960 	sec_aead_exit(tfm);
1961 }
1962 
1963 static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
1964 {
1965 	struct aead_alg *alg = crypto_aead_alg(tfm);
1966 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1967 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1968 	const char *aead_name = alg->base.cra_name;
1969 	int ret;
1970 
1971 	ret = sec_aead_init(tfm);
1972 	if (ret) {
1973 		dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
1974 		return ret;
1975 	}
1976 
1977 	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
1978 						     CRYPTO_ALG_NEED_FALLBACK |
1979 						     CRYPTO_ALG_ASYNC);
1980 	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
1981 		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
1982 		sec_aead_exit(tfm);
1983 		return PTR_ERR(a_ctx->fallback_aead_tfm);
1984 	}
1985 	a_ctx->fallback = false;
1986 
1987 	return 0;
1988 }
1989 
1990 static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
1991 {
1992 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1993 
1994 	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
1995 	sec_aead_exit(tfm);
1996 }
1997 
1998 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
1999 {
2000 	return sec_aead_ctx_init(tfm, "sha1");
2001 }
2002 
2003 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
2004 {
2005 	return sec_aead_ctx_init(tfm, "sha256");
2006 }
2007 
2008 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
2009 {
2010 	return sec_aead_ctx_init(tfm, "sha512");
2011 }
2012 
2013 static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
2014 	struct sec_req *sreq)
2015 {
2016 	u32 cryptlen = sreq->c_req.sk_req->cryptlen;
2017 	struct device *dev = ctx->dev;
2018 	u8 c_mode = ctx->c_ctx.c_mode;
2019 	int ret = 0;
2020 
2021 	switch (c_mode) {
2022 	case SEC_CMODE_XTS:
2023 		if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
2024 			dev_err(dev, "skcipher XTS mode input length error!\n");
2025 			ret = -EINVAL;
2026 		}
2027 		break;
2028 	case SEC_CMODE_ECB:
2029 	case SEC_CMODE_CBC:
2030 		if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
2031 			dev_err(dev, "skcipher AES input length error!\n");
2032 			ret = -EINVAL;
2033 		}
2034 		break;
2035 	case SEC_CMODE_CFB:
2036 	case SEC_CMODE_OFB:
2037 	case SEC_CMODE_CTR:
2038 		if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
2039 			dev_err(dev, "skcipher HW version error!\n");
2040 			ret = -EINVAL;
2041 		}
2042 		break;
2043 	default:
2044 		ret = -EINVAL;
2045 	}
2046 
2047 	return ret;
2048 }
2049 
2050 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2051 {
2052 	struct skcipher_request *sk_req = sreq->c_req.sk_req;
2053 	struct device *dev = ctx->dev;
2054 	u8 c_alg = ctx->c_ctx.c_alg;
2055 
2056 	if (unlikely(!sk_req->src || !sk_req->dst ||
2057 		     sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
2058 		dev_err(dev, "skcipher input param error!\n");
2059 		return -EINVAL;
2060 	}
2061 	sreq->c_req.c_len = sk_req->cryptlen;
2062 
2063 	if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
2064 		sreq->use_pbuf = true;
2065 	else
2066 		sreq->use_pbuf = false;
2067 
2068 	if (c_alg == SEC_CALG_3DES) {
2069 		if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
2070 			dev_err(dev, "skcipher 3des input length error!\n");
2071 			return -EINVAL;
2072 		}
2073 		return 0;
2074 	} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
2075 		return sec_skcipher_cryptlen_check(ctx, sreq);
2076 	}
2077 
2078 	dev_err(dev, "skcipher algorithm error!\n");
2079 
2080 	return -EINVAL;
2081 }
2082 
2083 static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
2084 				    struct skcipher_request *sreq, bool encrypt)
2085 {
2086 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
2087 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
2088 	struct device *dev = ctx->dev;
2089 	int ret;
2090 
2091 	if (!c_ctx->fbtfm) {
2092 		dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n");
2093 		return -EINVAL;
2094 	}
2095 
2096 	skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
2097 
2098 	/* software need sync mode to do crypto */
2099 	skcipher_request_set_callback(subreq, sreq->base.flags,
2100 				      NULL, NULL);
2101 	skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
2102 				   sreq->cryptlen, sreq->iv);
2103 	if (encrypt)
2104 		ret = crypto_skcipher_encrypt(subreq);
2105 	else
2106 		ret = crypto_skcipher_decrypt(subreq);
2107 
2108 	skcipher_request_zero(subreq);
2109 
2110 	return ret;
2111 }
2112 
2113 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
2114 {
2115 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
2116 	struct sec_req *req = skcipher_request_ctx(sk_req);
2117 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
2118 	int ret;
2119 
2120 	if (!sk_req->cryptlen) {
2121 		if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
2122 			return -EINVAL;
2123 		return 0;
2124 	}
2125 
2126 	req->flag = sk_req->base.flags;
2127 	req->c_req.sk_req = sk_req;
2128 	req->c_req.encrypt = encrypt;
2129 	req->ctx = ctx;
2130 
2131 	ret = sec_skcipher_param_check(ctx, req);
2132 	if (unlikely(ret))
2133 		return -EINVAL;
2134 
2135 	if (unlikely(ctx->c_ctx.fallback))
2136 		return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
2137 
2138 	return ctx->req_op->process(ctx, req);
2139 }
2140 
2141 static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
2142 {
2143 	return sec_skcipher_crypto(sk_req, true);
2144 }
2145 
2146 static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
2147 {
2148 	return sec_skcipher_crypto(sk_req, false);
2149 }
2150 
2151 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
2152 	sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
2153 {\
2154 	.base = {\
2155 		.cra_name = sec_cra_name,\
2156 		.cra_driver_name = "hisi_sec_"sec_cra_name,\
2157 		.cra_priority = SEC_PRIORITY,\
2158 		.cra_flags = CRYPTO_ALG_ASYNC |\
2159 		 CRYPTO_ALG_NEED_FALLBACK,\
2160 		.cra_blocksize = blk_size,\
2161 		.cra_ctxsize = sizeof(struct sec_ctx),\
2162 		.cra_module = THIS_MODULE,\
2163 	},\
2164 	.init = ctx_init,\
2165 	.exit = ctx_exit,\
2166 	.setkey = sec_set_key,\
2167 	.decrypt = sec_skcipher_decrypt,\
2168 	.encrypt = sec_skcipher_encrypt,\
2169 	.min_keysize = sec_min_key_size,\
2170 	.max_keysize = sec_max_key_size,\
2171 	.ivsize = iv_size,\
2172 }
2173 
2174 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
2175 	max_key_size, blk_size, iv_size) \
2176 	SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
2177 	sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
2178 
2179 static struct sec_skcipher sec_skciphers[] = {
2180 	{
2181 		.alg_msk = BIT(0),
2182 		.alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
2183 					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
2184 	},
2185 	{
2186 		.alg_msk = BIT(1),
2187 		.alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
2188 					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2189 	},
2190 	{
2191 		.alg_msk = BIT(2),
2192 		.alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,	AES_MIN_KEY_SIZE,
2193 					AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2194 	},
2195 	{
2196 		.alg_msk = BIT(3),
2197 		.alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,	SEC_XTS_MIN_KEY_SIZE,
2198 					SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2199 	},
2200 	{
2201 		.alg_msk = BIT(4),
2202 		.alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,	AES_MIN_KEY_SIZE,
2203 					AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2204 	},
2205 	{
2206 		.alg_msk = BIT(5),
2207 		.alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,	AES_MIN_KEY_SIZE,
2208 					AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2209 	},
2210 	{
2211 		.alg_msk = BIT(12),
2212 		.alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,	AES_MIN_KEY_SIZE,
2213 					AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2214 	},
2215 	{
2216 		.alg_msk = BIT(13),
2217 		.alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
2218 					AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2219 	},
2220 	{
2221 		.alg_msk = BIT(14),
2222 		.alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,	SEC_XTS_MIN_KEY_SIZE,
2223 					SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2224 	},
2225 	{
2226 		.alg_msk = BIT(15),
2227 		.alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,	AES_MIN_KEY_SIZE,
2228 					AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2229 	},
2230 	{
2231 		.alg_msk = BIT(16),
2232 		.alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,	AES_MIN_KEY_SIZE,
2233 					AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2234 	},
2235 	{
2236 		.alg_msk = BIT(23),
2237 		.alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
2238 					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
2239 	},
2240 	{
2241 		.alg_msk = BIT(24),
2242 		.alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
2243 					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
2244 					DES3_EDE_BLOCK_SIZE),
2245 	},
2246 };
2247 
2248 static int aead_iv_demension_check(struct aead_request *aead_req)
2249 {
2250 	u8 cl;
2251 
2252 	cl = aead_req->iv[0] + 1;
2253 	if (cl < IV_CL_MIN || cl > IV_CL_MAX)
2254 		return -EINVAL;
2255 
2256 	if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
2257 		return -EOVERFLOW;
2258 
2259 	return 0;
2260 }
2261 
2262 static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
2263 {
2264 	struct aead_request *req = sreq->aead_req.aead_req;
2265 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2266 	size_t authsize = crypto_aead_authsize(tfm);
2267 	u8 c_mode = ctx->c_ctx.c_mode;
2268 	struct device *dev = ctx->dev;
2269 	int ret;
2270 
2271 	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
2272 	    req->assoclen > SEC_MAX_AAD_LEN)) {
2273 		dev_err(dev, "aead input spec error!\n");
2274 		return -EINVAL;
2275 	}
2276 
2277 	if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
2278 	   (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
2279 		authsize & MAC_LEN_MASK)))) {
2280 		dev_err(dev, "aead input mac length error!\n");
2281 		return -EINVAL;
2282 	}
2283 
2284 	if (c_mode == SEC_CMODE_CCM) {
2285 		if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
2286 			dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
2287 			return -EINVAL;
2288 		}
2289 		ret = aead_iv_demension_check(req);
2290 		if (ret) {
2291 			dev_err(dev, "aead input iv param error!\n");
2292 			return ret;
2293 		}
2294 	}
2295 
2296 	if (sreq->c_req.encrypt)
2297 		sreq->c_req.c_len = req->cryptlen;
2298 	else
2299 		sreq->c_req.c_len = req->cryptlen - authsize;
2300 	if (c_mode == SEC_CMODE_CBC) {
2301 		if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
2302 			dev_err(dev, "aead crypto length error!\n");
2303 			return -EINVAL;
2304 		}
2305 	}
2306 
2307 	return 0;
2308 }
2309 
2310 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2311 {
2312 	struct aead_request *req = sreq->aead_req.aead_req;
2313 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2314 	size_t authsize = crypto_aead_authsize(tfm);
2315 	struct device *dev = ctx->dev;
2316 	u8 c_alg = ctx->c_ctx.c_alg;
2317 
2318 	if (unlikely(!req->src || !req->dst)) {
2319 		dev_err(dev, "aead input param error!\n");
2320 		return -EINVAL;
2321 	}
2322 
2323 	if (ctx->sec->qm.ver == QM_HW_V2) {
2324 		if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
2325 		    req->cryptlen <= authsize))) {
2326 			ctx->a_ctx.fallback = true;
2327 			return -EINVAL;
2328 		}
2329 	}
2330 
2331 	/* Support AES or SM4 */
2332 	if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
2333 		dev_err(dev, "aead crypto alg error!\n");
2334 		return -EINVAL;
2335 	}
2336 
2337 	if (unlikely(sec_aead_spec_check(ctx, sreq)))
2338 		return -EINVAL;
2339 
2340 	if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
2341 		SEC_PBUF_SZ)
2342 		sreq->use_pbuf = true;
2343 	else
2344 		sreq->use_pbuf = false;
2345 
2346 	return 0;
2347 }
2348 
2349 static int sec_aead_soft_crypto(struct sec_ctx *ctx,
2350 				struct aead_request *aead_req,
2351 				bool encrypt)
2352 {
2353 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
2354 	struct device *dev = ctx->dev;
2355 	struct aead_request *subreq;
2356 	int ret;
2357 
2358 	/* Kunpeng920 aead mode not support input 0 size */
2359 	if (!a_ctx->fallback_aead_tfm) {
2360 		dev_err(dev, "aead fallback tfm is NULL!\n");
2361 		return -EINVAL;
2362 	}
2363 
2364 	subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
2365 	if (!subreq)
2366 		return -ENOMEM;
2367 
2368 	aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
2369 	aead_request_set_callback(subreq, aead_req->base.flags,
2370 				  aead_req->base.complete, aead_req->base.data);
2371 	aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
2372 			       aead_req->cryptlen, aead_req->iv);
2373 	aead_request_set_ad(subreq, aead_req->assoclen);
2374 
2375 	if (encrypt)
2376 		ret = crypto_aead_encrypt(subreq);
2377 	else
2378 		ret = crypto_aead_decrypt(subreq);
2379 	aead_request_free(subreq);
2380 
2381 	return ret;
2382 }
2383 
2384 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
2385 {
2386 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
2387 	struct sec_req *req = aead_request_ctx(a_req);
2388 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2389 	int ret;
2390 
2391 	req->flag = a_req->base.flags;
2392 	req->aead_req.aead_req = a_req;
2393 	req->c_req.encrypt = encrypt;
2394 	req->ctx = ctx;
2395 
2396 	ret = sec_aead_param_check(ctx, req);
2397 	if (unlikely(ret)) {
2398 		if (ctx->a_ctx.fallback)
2399 			return sec_aead_soft_crypto(ctx, a_req, encrypt);
2400 		return -EINVAL;
2401 	}
2402 
2403 	return ctx->req_op->process(ctx, req);
2404 }
2405 
2406 static int sec_aead_encrypt(struct aead_request *a_req)
2407 {
2408 	return sec_aead_crypto(a_req, true);
2409 }
2410 
2411 static int sec_aead_decrypt(struct aead_request *a_req)
2412 {
2413 	return sec_aead_crypto(a_req, false);
2414 }
2415 
2416 #define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
2417 			 ctx_exit, blk_size, iv_size, max_authsize)\
2418 {\
2419 	.base = {\
2420 		.cra_name = sec_cra_name,\
2421 		.cra_driver_name = "hisi_sec_"sec_cra_name,\
2422 		.cra_priority = SEC_PRIORITY,\
2423 		.cra_flags = CRYPTO_ALG_ASYNC |\
2424 		 CRYPTO_ALG_NEED_FALLBACK,\
2425 		.cra_blocksize = blk_size,\
2426 		.cra_ctxsize = sizeof(struct sec_ctx),\
2427 		.cra_module = THIS_MODULE,\
2428 	},\
2429 	.init = ctx_init,\
2430 	.exit = ctx_exit,\
2431 	.setkey = sec_set_key,\
2432 	.setauthsize = sec_aead_setauthsize,\
2433 	.decrypt = sec_aead_decrypt,\
2434 	.encrypt = sec_aead_encrypt,\
2435 	.ivsize = iv_size,\
2436 	.maxauthsize = max_authsize,\
2437 }
2438 
2439 static struct sec_aead sec_aeads[] = {
2440 	{
2441 		.alg_msk = BIT(6),
2442 		.alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
2443 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
2444 				    AES_BLOCK_SIZE),
2445 	},
2446 	{
2447 		.alg_msk = BIT(7),
2448 		.alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
2449 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
2450 				    AES_BLOCK_SIZE),
2451 	},
2452 	{
2453 		.alg_msk = BIT(17),
2454 		.alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
2455 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
2456 				    AES_BLOCK_SIZE),
2457 	},
2458 	{
2459 		.alg_msk = BIT(18),
2460 		.alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
2461 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
2462 				    AES_BLOCK_SIZE),
2463 	},
2464 	{
2465 		.alg_msk = BIT(43),
2466 		.alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
2467 				    sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2468 				    AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
2469 	},
2470 	{
2471 		.alg_msk = BIT(44),
2472 		.alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
2473 				    sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2474 				    AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
2475 	},
2476 	{
2477 		.alg_msk = BIT(45),
2478 		.alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
2479 				    sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2480 				    AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
2481 	},
2482 };
2483 
2484 static void sec_unregister_skcipher(u64 alg_mask, int end)
2485 {
2486 	int i;
2487 
2488 	for (i = 0; i < end; i++)
2489 		if (sec_skciphers[i].alg_msk & alg_mask)
2490 			crypto_unregister_skcipher(&sec_skciphers[i].alg);
2491 }
2492 
2493 static int sec_register_skcipher(u64 alg_mask)
2494 {
2495 	int i, ret, count;
2496 
2497 	count = ARRAY_SIZE(sec_skciphers);
2498 
2499 	for (i = 0; i < count; i++) {
2500 		if (!(sec_skciphers[i].alg_msk & alg_mask))
2501 			continue;
2502 
2503 		ret = crypto_register_skcipher(&sec_skciphers[i].alg);
2504 		if (ret)
2505 			goto err;
2506 	}
2507 
2508 	return 0;
2509 
2510 err:
2511 	sec_unregister_skcipher(alg_mask, i);
2512 
2513 	return ret;
2514 }
2515 
2516 static void sec_unregister_aead(u64 alg_mask, int end)
2517 {
2518 	int i;
2519 
2520 	for (i = 0; i < end; i++)
2521 		if (sec_aeads[i].alg_msk & alg_mask)
2522 			crypto_unregister_aead(&sec_aeads[i].alg);
2523 }
2524 
2525 static int sec_register_aead(u64 alg_mask)
2526 {
2527 	int i, ret, count;
2528 
2529 	count = ARRAY_SIZE(sec_aeads);
2530 
2531 	for (i = 0; i < count; i++) {
2532 		if (!(sec_aeads[i].alg_msk & alg_mask))
2533 			continue;
2534 
2535 		ret = crypto_register_aead(&sec_aeads[i].alg);
2536 		if (ret)
2537 			goto err;
2538 	}
2539 
2540 	return 0;
2541 
2542 err:
2543 	sec_unregister_aead(alg_mask, i);
2544 
2545 	return ret;
2546 }
2547 
2548 int sec_register_to_crypto(struct hisi_qm *qm)
2549 {
2550 	u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
2551 	int ret = 0;
2552 
2553 	mutex_lock(&sec_algs_lock);
2554 	if (sec_available_devs) {
2555 		sec_available_devs++;
2556 		goto unlock;
2557 	}
2558 
2559 	ret = sec_register_skcipher(alg_mask);
2560 	if (ret)
2561 		goto unlock;
2562 
2563 	ret = sec_register_aead(alg_mask);
2564 	if (ret)
2565 		goto unreg_skcipher;
2566 
2567 	sec_available_devs++;
2568 	mutex_unlock(&sec_algs_lock);
2569 
2570 	return 0;
2571 
2572 unreg_skcipher:
2573 	sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
2574 unlock:
2575 	mutex_unlock(&sec_algs_lock);
2576 	return ret;
2577 }
2578 
2579 void sec_unregister_from_crypto(struct hisi_qm *qm)
2580 {
2581 	u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
2582 
2583 	mutex_lock(&sec_algs_lock);
2584 	if (--sec_available_devs)
2585 		goto unlock;
2586 
2587 	sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
2588 	sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
2589 
2590 unlock:
2591 	mutex_unlock(&sec_algs_lock);
2592 }
2593