xref: /linux/drivers/crypto/hisilicon/zip/zip_crypto.c (revision 85ffc6e4ed3712f8b3fedb3fbe42afae644a699c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/internal/acompress.h>
4 #include <linux/bitfield.h>
5 #include <linux/bitmap.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8 #include "zip.h"
9 
10 /* hisi_zip_sqe dw3 */
11 #define HZIP_BD_STATUS_M			GENMASK(7, 0)
12 /* hisi_zip_sqe dw7 */
13 #define HZIP_IN_SGE_DATA_OFFSET_M		GENMASK(23, 0)
14 #define HZIP_SQE_TYPE_M				GENMASK(31, 28)
15 /* hisi_zip_sqe dw8 */
16 #define HZIP_OUT_SGE_DATA_OFFSET_M		GENMASK(23, 0)
17 /* hisi_zip_sqe dw9 */
18 #define HZIP_REQ_TYPE_M				GENMASK(7, 0)
19 #define HZIP_ALG_TYPE_DEFLATE			0x01
20 #define HZIP_BUF_TYPE_M				GENMASK(11, 8)
21 #define HZIP_SGL				0x1
22 
23 #define HZIP_ALG_PRIORITY			300
24 #define HZIP_SGL_SGE_NR				10
25 
26 #define HZIP_ALG_DEFLATE			GENMASK(5, 4)
27 
28 static DEFINE_MUTEX(zip_algs_lock);
29 static unsigned int zip_available_devs;
30 
31 enum hisi_zip_alg_type {
32 	HZIP_ALG_TYPE_COMP = 0,
33 	HZIP_ALG_TYPE_DECOMP = 1,
34 };
35 
36 enum {
37 	HZIP_QPC_COMP,
38 	HZIP_QPC_DECOMP,
39 	HZIP_CTX_Q_NUM
40 };
41 
42 #define COMP_NAME_TO_TYPE(alg_name)					\
43 	(!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE : 0)
44 
45 struct hisi_zip_req {
46 	struct acomp_req *req;
47 	struct hisi_acc_hw_sgl *hw_src;
48 	struct hisi_acc_hw_sgl *hw_dst;
49 	dma_addr_t dma_src;
50 	dma_addr_t dma_dst;
51 	u16 req_id;
52 };
53 
54 struct hisi_zip_req_q {
55 	struct hisi_zip_req *q;
56 	unsigned long *req_bitmap;
57 	spinlock_t req_lock;
58 	u16 size;
59 };
60 
61 struct hisi_zip_qp_ctx {
62 	struct hisi_qp *qp;
63 	struct hisi_zip_req_q req_q;
64 	struct hisi_acc_sgl_pool *sgl_pool;
65 	struct hisi_zip *zip_dev;
66 	struct hisi_zip_ctx *ctx;
67 };
68 
69 struct hisi_zip_sqe_ops {
70 	u8 sqe_type;
71 	void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
72 	void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
73 	void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
74 	void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
75 	void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
76 	void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
77 	u32 (*get_tag)(struct hisi_zip_sqe *sqe);
78 	u32 (*get_status)(struct hisi_zip_sqe *sqe);
79 	u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
80 };
81 
82 struct hisi_zip_ctx {
83 	struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
84 	const struct hisi_zip_sqe_ops *ops;
85 };
86 
sgl_sge_nr_set(const char * val,const struct kernel_param * kp)87 static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
88 {
89 	int ret;
90 	u16 n;
91 
92 	if (!val)
93 		return -EINVAL;
94 
95 	ret = kstrtou16(val, 10, &n);
96 	if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
97 		return -EINVAL;
98 
99 	return param_set_ushort(val, kp);
100 }
101 
102 static const struct kernel_param_ops sgl_sge_nr_ops = {
103 	.set = sgl_sge_nr_set,
104 	.get = param_get_ushort,
105 };
106 
107 static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
108 module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
109 MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
110 
hisi_zip_create_req(struct hisi_zip_qp_ctx * qp_ctx,struct acomp_req * req)111 static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
112 						struct acomp_req *req)
113 {
114 	struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
115 	struct hisi_zip_req *q = req_q->q;
116 	struct hisi_zip_req *req_cache;
117 	int req_id;
118 
119 	spin_lock(&req_q->req_lock);
120 
121 	req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
122 	if (req_id >= req_q->size) {
123 		spin_unlock(&req_q->req_lock);
124 		dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
125 		return ERR_PTR(-EAGAIN);
126 	}
127 	set_bit(req_id, req_q->req_bitmap);
128 
129 	spin_unlock(&req_q->req_lock);
130 
131 	req_cache = q + req_id;
132 	req_cache->req_id = req_id;
133 	req_cache->req = req;
134 
135 	return req_cache;
136 }
137 
hisi_zip_remove_req(struct hisi_zip_qp_ctx * qp_ctx,struct hisi_zip_req * req)138 static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
139 				struct hisi_zip_req *req)
140 {
141 	struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
142 
143 	spin_lock(&req_q->req_lock);
144 	clear_bit(req->req_id, req_q->req_bitmap);
145 	spin_unlock(&req_q->req_lock);
146 }
147 
hisi_zip_fill_addr(struct hisi_zip_sqe * sqe,struct hisi_zip_req * req)148 static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
149 {
150 	sqe->source_addr_l = lower_32_bits(req->dma_src);
151 	sqe->source_addr_h = upper_32_bits(req->dma_src);
152 	sqe->dest_addr_l = lower_32_bits(req->dma_dst);
153 	sqe->dest_addr_h = upper_32_bits(req->dma_dst);
154 }
155 
hisi_zip_fill_buf_size(struct hisi_zip_sqe * sqe,struct hisi_zip_req * req)156 static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
157 {
158 	struct acomp_req *a_req = req->req;
159 
160 	sqe->input_data_length = a_req->slen;
161 	sqe->dest_avail_out = a_req->dlen;
162 }
163 
hisi_zip_fill_buf_type(struct hisi_zip_sqe * sqe,u8 buf_type)164 static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
165 {
166 	u32 val;
167 
168 	val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
169 	val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
170 	sqe->dw9 = val;
171 }
172 
hisi_zip_fill_req_type(struct hisi_zip_sqe * sqe,u8 req_type)173 static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
174 {
175 	u32 val;
176 
177 	val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
178 	val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
179 	sqe->dw9 = val;
180 }
181 
hisi_zip_fill_tag(struct hisi_zip_sqe * sqe,struct hisi_zip_req * req)182 static void hisi_zip_fill_tag(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
183 {
184 	sqe->dw26 = req->req_id;
185 }
186 
hisi_zip_fill_sqe_type(struct hisi_zip_sqe * sqe,u8 sqe_type)187 static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
188 {
189 	u32 val;
190 
191 	val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
192 	val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type);
193 	sqe->dw7 = val;
194 }
195 
hisi_zip_fill_sqe(struct hisi_zip_ctx * ctx,struct hisi_zip_sqe * sqe,u8 req_type,struct hisi_zip_req * req)196 static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
197 			      u8 req_type, struct hisi_zip_req *req)
198 {
199 	const struct hisi_zip_sqe_ops *ops = ctx->ops;
200 
201 	memset(sqe, 0, sizeof(struct hisi_zip_sqe));
202 
203 	ops->fill_addr(sqe, req);
204 	ops->fill_buf_size(sqe, req);
205 	ops->fill_buf_type(sqe, HZIP_SGL);
206 	ops->fill_req_type(sqe, req_type);
207 	ops->fill_tag(sqe, req);
208 	ops->fill_sqe_type(sqe, ops->sqe_type);
209 }
210 
hisi_zip_do_work(struct hisi_zip_qp_ctx * qp_ctx,struct hisi_zip_req * req)211 static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
212 			    struct hisi_zip_req *req)
213 {
214 	struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
215 	struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
216 	struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
217 	struct acomp_req *a_req = req->req;
218 	struct hisi_qp *qp = qp_ctx->qp;
219 	struct device *dev = &qp->qm->pdev->dev;
220 	struct hisi_zip_sqe zip_sqe;
221 	int ret;
222 
223 	if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
224 		return -EINVAL;
225 
226 	req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
227 						    req->req_id << 1, &req->dma_src);
228 	if (IS_ERR(req->hw_src)) {
229 		dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
230 			PTR_ERR(req->hw_src));
231 		return PTR_ERR(req->hw_src);
232 	}
233 
234 	req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
235 						    (req->req_id << 1) + 1,
236 						    &req->dma_dst);
237 	if (IS_ERR(req->hw_dst)) {
238 		ret = PTR_ERR(req->hw_dst);
239 		dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
240 			ret);
241 		goto err_unmap_input;
242 	}
243 
244 	hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req);
245 
246 	/* send command to start a task */
247 	atomic64_inc(&dfx->send_cnt);
248 	spin_lock_bh(&req_q->req_lock);
249 	ret = hisi_qp_send(qp, &zip_sqe);
250 	spin_unlock_bh(&req_q->req_lock);
251 	if (unlikely(ret < 0)) {
252 		atomic64_inc(&dfx->send_busy_cnt);
253 		ret = -EAGAIN;
254 		dev_dbg_ratelimited(dev, "failed to send request!\n");
255 		goto err_unmap_output;
256 	}
257 
258 	return -EINPROGRESS;
259 
260 err_unmap_output:
261 	hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
262 err_unmap_input:
263 	hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
264 	return ret;
265 }
266 
hisi_zip_get_tag(struct hisi_zip_sqe * sqe)267 static u32 hisi_zip_get_tag(struct hisi_zip_sqe *sqe)
268 {
269 	return sqe->dw26;
270 }
271 
hisi_zip_get_status(struct hisi_zip_sqe * sqe)272 static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
273 {
274 	return sqe->dw3 & HZIP_BD_STATUS_M;
275 }
276 
hisi_zip_get_dstlen(struct hisi_zip_sqe * sqe)277 static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
278 {
279 	return sqe->produced;
280 }
281 
hisi_zip_acomp_cb(struct hisi_qp * qp,void * data)282 static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
283 {
284 	struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
285 	const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
286 	struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
287 	struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
288 	struct device *dev = &qp->qm->pdev->dev;
289 	struct hisi_zip_sqe *sqe = data;
290 	u32 tag = ops->get_tag(sqe);
291 	struct hisi_zip_req *req = req_q->q + tag;
292 	struct acomp_req *acomp_req = req->req;
293 	int err = 0;
294 	u32 status;
295 
296 	atomic64_inc(&dfx->recv_cnt);
297 	status = ops->get_status(sqe);
298 	if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
299 		dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
300 			(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
301 			sqe->produced);
302 		atomic64_inc(&dfx->err_bd_cnt);
303 		err = -EIO;
304 	}
305 
306 	hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
307 	hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
308 
309 	acomp_req->dlen = ops->get_dstlen(sqe);
310 
311 	if (acomp_req->base.complete)
312 		acomp_request_complete(acomp_req, err);
313 
314 	hisi_zip_remove_req(qp_ctx, req);
315 }
316 
hisi_zip_acompress(struct acomp_req * acomp_req)317 static int hisi_zip_acompress(struct acomp_req *acomp_req)
318 {
319 	struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
320 	struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
321 	struct device *dev = &qp_ctx->qp->qm->pdev->dev;
322 	struct hisi_zip_req *req;
323 	int ret;
324 
325 	req = hisi_zip_create_req(qp_ctx, acomp_req);
326 	if (IS_ERR(req))
327 		return PTR_ERR(req);
328 
329 	ret = hisi_zip_do_work(qp_ctx, req);
330 	if (unlikely(ret != -EINPROGRESS)) {
331 		dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
332 		hisi_zip_remove_req(qp_ctx, req);
333 	}
334 
335 	return ret;
336 }
337 
hisi_zip_adecompress(struct acomp_req * acomp_req)338 static int hisi_zip_adecompress(struct acomp_req *acomp_req)
339 {
340 	struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
341 	struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
342 	struct device *dev = &qp_ctx->qp->qm->pdev->dev;
343 	struct hisi_zip_req *req;
344 	int ret;
345 
346 	req = hisi_zip_create_req(qp_ctx, acomp_req);
347 	if (IS_ERR(req))
348 		return PTR_ERR(req);
349 
350 	ret = hisi_zip_do_work(qp_ctx, req);
351 	if (unlikely(ret != -EINPROGRESS)) {
352 		dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
353 				     ret);
354 		hisi_zip_remove_req(qp_ctx, req);
355 	}
356 
357 	return ret;
358 }
359 
hisi_zip_start_qp(struct hisi_qp * qp,struct hisi_zip_qp_ctx * qp_ctx,int alg_type,int req_type)360 static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
361 			     int alg_type, int req_type)
362 {
363 	struct device *dev = &qp->qm->pdev->dev;
364 	int ret;
365 
366 	qp->req_type = req_type;
367 	qp->alg_type = alg_type;
368 	qp->qp_ctx = qp_ctx;
369 
370 	ret = hisi_qm_start_qp(qp, 0);
371 	if (ret < 0) {
372 		dev_err(dev, "failed to start qp (%d)!\n", ret);
373 		return ret;
374 	}
375 
376 	qp_ctx->qp = qp;
377 
378 	return 0;
379 }
380 
hisi_zip_release_qp(struct hisi_zip_qp_ctx * qp_ctx)381 static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
382 {
383 	hisi_qm_stop_qp(qp_ctx->qp);
384 	hisi_qm_free_qps(&qp_ctx->qp, 1);
385 }
386 
387 static const struct hisi_zip_sqe_ops hisi_zip_ops = {
388 	.sqe_type		= 0x3,
389 	.fill_addr		= hisi_zip_fill_addr,
390 	.fill_buf_size		= hisi_zip_fill_buf_size,
391 	.fill_buf_type		= hisi_zip_fill_buf_type,
392 	.fill_req_type		= hisi_zip_fill_req_type,
393 	.fill_tag		= hisi_zip_fill_tag,
394 	.fill_sqe_type		= hisi_zip_fill_sqe_type,
395 	.get_tag		= hisi_zip_get_tag,
396 	.get_status		= hisi_zip_get_status,
397 	.get_dstlen		= hisi_zip_get_dstlen,
398 };
399 
hisi_zip_ctx_init(struct hisi_zip_ctx * hisi_zip_ctx,u8 req_type,int node)400 static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
401 {
402 	struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
403 	struct hisi_zip_qp_ctx *qp_ctx;
404 	struct hisi_zip *hisi_zip;
405 	int ret, i, j;
406 
407 	ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node);
408 	if (ret) {
409 		pr_err("failed to create zip qps (%d)!\n", ret);
410 		return -ENODEV;
411 	}
412 
413 	hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
414 
415 	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
416 		/* alg_type = 0 for compress, 1 for decompress in hw sqe */
417 		qp_ctx = &hisi_zip_ctx->qp_ctx[i];
418 		qp_ctx->ctx = hisi_zip_ctx;
419 		ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type);
420 		if (ret) {
421 			for (j = i - 1; j >= 0; j--)
422 				hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
423 
424 			hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
425 			return ret;
426 		}
427 
428 		qp_ctx->zip_dev = hisi_zip;
429 	}
430 
431 	hisi_zip_ctx->ops = &hisi_zip_ops;
432 
433 	return 0;
434 }
435 
hisi_zip_ctx_exit(struct hisi_zip_ctx * hisi_zip_ctx)436 static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
437 {
438 	int i;
439 
440 	for (i = 0; i < HZIP_CTX_Q_NUM; i++)
441 		hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
442 }
443 
hisi_zip_create_req_q(struct hisi_zip_ctx * ctx)444 static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
445 {
446 	u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
447 	struct hisi_zip_req_q *req_q;
448 	int i, ret;
449 
450 	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
451 		req_q = &ctx->qp_ctx[i].req_q;
452 		req_q->size = q_depth;
453 
454 		req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
455 		if (!req_q->req_bitmap) {
456 			ret = -ENOMEM;
457 			if (i == 0)
458 				return ret;
459 
460 			goto err_free_comp_q;
461 		}
462 		spin_lock_init(&req_q->req_lock);
463 
464 		req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req),
465 				   GFP_KERNEL);
466 		if (!req_q->q) {
467 			ret = -ENOMEM;
468 			if (i == 0)
469 				goto err_free_comp_bitmap;
470 			else
471 				goto err_free_decomp_bitmap;
472 		}
473 	}
474 
475 	return 0;
476 
477 err_free_decomp_bitmap:
478 	bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
479 err_free_comp_q:
480 	kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
481 err_free_comp_bitmap:
482 	bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
483 	return ret;
484 }
485 
hisi_zip_release_req_q(struct hisi_zip_ctx * ctx)486 static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
487 {
488 	int i;
489 
490 	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
491 		kfree(ctx->qp_ctx[i].req_q.q);
492 		bitmap_free(ctx->qp_ctx[i].req_q.req_bitmap);
493 	}
494 }
495 
hisi_zip_create_sgl_pool(struct hisi_zip_ctx * ctx)496 static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
497 {
498 	u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
499 	struct hisi_zip_qp_ctx *tmp;
500 	struct device *dev;
501 	int i;
502 
503 	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
504 		tmp = &ctx->qp_ctx[i];
505 		dev = &tmp->qp->qm->pdev->dev;
506 		tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
507 							 sgl_sge_nr);
508 		if (IS_ERR(tmp->sgl_pool)) {
509 			if (i == 1)
510 				goto err_free_sgl_pool0;
511 			return -ENOMEM;
512 		}
513 	}
514 
515 	return 0;
516 
517 err_free_sgl_pool0:
518 	hisi_acc_free_sgl_pool(&ctx->qp_ctx[HZIP_QPC_COMP].qp->qm->pdev->dev,
519 			       ctx->qp_ctx[HZIP_QPC_COMP].sgl_pool);
520 	return -ENOMEM;
521 }
522 
hisi_zip_release_sgl_pool(struct hisi_zip_ctx * ctx)523 static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
524 {
525 	int i;
526 
527 	for (i = 0; i < HZIP_CTX_Q_NUM; i++)
528 		hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev,
529 				       ctx->qp_ctx[i].sgl_pool);
530 }
531 
hisi_zip_set_acomp_cb(struct hisi_zip_ctx * ctx,void (* fn)(struct hisi_qp *,void *))532 static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
533 				  void (*fn)(struct hisi_qp *, void *))
534 {
535 	int i;
536 
537 	for (i = 0; i < HZIP_CTX_Q_NUM; i++)
538 		ctx->qp_ctx[i].qp->req_cb = fn;
539 }
540 
hisi_zip_acomp_init(struct crypto_acomp * tfm)541 static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
542 {
543 	const char *alg_name = crypto_tfm_alg_name(&tfm->base);
544 	struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
545 	struct device *dev;
546 	int ret;
547 
548 	ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
549 	if (ret) {
550 		pr_err("failed to init ctx (%d)!\n", ret);
551 		return ret;
552 	}
553 
554 	dev = &ctx->qp_ctx[0].qp->qm->pdev->dev;
555 
556 	ret = hisi_zip_create_req_q(ctx);
557 	if (ret) {
558 		dev_err(dev, "failed to create request queue (%d)!\n", ret);
559 		goto err_ctx_exit;
560 	}
561 
562 	ret = hisi_zip_create_sgl_pool(ctx);
563 	if (ret) {
564 		dev_err(dev, "failed to create sgl pool (%d)!\n", ret);
565 		goto err_release_req_q;
566 	}
567 
568 	hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb);
569 
570 	return 0;
571 
572 err_release_req_q:
573 	hisi_zip_release_req_q(ctx);
574 err_ctx_exit:
575 	hisi_zip_ctx_exit(ctx);
576 	return ret;
577 }
578 
hisi_zip_acomp_exit(struct crypto_acomp * tfm)579 static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
580 {
581 	struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
582 
583 	hisi_zip_set_acomp_cb(ctx, NULL);
584 	hisi_zip_release_sgl_pool(ctx);
585 	hisi_zip_release_req_q(ctx);
586 	hisi_zip_ctx_exit(ctx);
587 }
588 
589 static struct acomp_alg hisi_zip_acomp_deflate = {
590 	.init			= hisi_zip_acomp_init,
591 	.exit			= hisi_zip_acomp_exit,
592 	.compress		= hisi_zip_acompress,
593 	.decompress		= hisi_zip_adecompress,
594 	.base			= {
595 		.cra_name		= "deflate",
596 		.cra_driver_name	= "hisi-deflate-acomp",
597 		.cra_flags		= CRYPTO_ALG_ASYNC,
598 		.cra_module		= THIS_MODULE,
599 		.cra_priority		= HZIP_ALG_PRIORITY,
600 		.cra_ctxsize		= sizeof(struct hisi_zip_ctx),
601 	}
602 };
603 
hisi_zip_register_deflate(struct hisi_qm * qm)604 static int hisi_zip_register_deflate(struct hisi_qm *qm)
605 {
606 	int ret;
607 
608 	if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
609 		return 0;
610 
611 	ret = crypto_register_acomp(&hisi_zip_acomp_deflate);
612 	if (ret)
613 		dev_err(&qm->pdev->dev, "failed to register to deflate (%d)!\n", ret);
614 
615 	return ret;
616 }
617 
hisi_zip_unregister_deflate(struct hisi_qm * qm)618 static void hisi_zip_unregister_deflate(struct hisi_qm *qm)
619 {
620 	if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
621 		return;
622 
623 	crypto_unregister_acomp(&hisi_zip_acomp_deflate);
624 }
625 
hisi_zip_register_to_crypto(struct hisi_qm * qm)626 int hisi_zip_register_to_crypto(struct hisi_qm *qm)
627 {
628 	int ret = 0;
629 
630 	mutex_lock(&zip_algs_lock);
631 	if (zip_available_devs++)
632 		goto unlock;
633 
634 	ret = hisi_zip_register_deflate(qm);
635 	if (ret)
636 		zip_available_devs--;
637 
638 unlock:
639 	mutex_unlock(&zip_algs_lock);
640 	return ret;
641 }
642 
hisi_zip_unregister_from_crypto(struct hisi_qm * qm)643 void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
644 {
645 	mutex_lock(&zip_algs_lock);
646 	if (--zip_available_devs)
647 		goto unlock;
648 
649 	hisi_zip_unregister_deflate(qm);
650 
651 unlock:
652 	mutex_unlock(&zip_algs_lock);
653 }
654