xref: /linux/drivers/crypto/hisilicon/zip/zip_crypto.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/internal/acompress.h>
4 #include <linux/bitfield.h>
5 #include <linux/bitmap.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8 #include "zip.h"
9 
10 /* hisi_zip_sqe dw3 */
11 #define HZIP_BD_STATUS_M			GENMASK(7, 0)
12 /* hisi_zip_sqe dw7 */
13 #define HZIP_IN_SGE_DATA_OFFSET_M		GENMASK(23, 0)
14 #define HZIP_SQE_TYPE_M				GENMASK(31, 28)
15 /* hisi_zip_sqe dw8 */
16 #define HZIP_OUT_SGE_DATA_OFFSET_M		GENMASK(23, 0)
17 /* hisi_zip_sqe dw9 */
18 #define HZIP_REQ_TYPE_M				GENMASK(7, 0)
19 #define HZIP_ALG_TYPE_DEFLATE			0x01
20 #define HZIP_ALG_TYPE_LZ4			0x04
21 #define HZIP_BUF_TYPE_M				GENMASK(11, 8)
22 #define HZIP_SGL				0x1
23 #define HZIP_WIN_SIZE_M				GENMASK(15, 12)
24 #define HZIP_16K_WINSZ				0x2
25 
26 #define HZIP_ALG_PRIORITY			300
27 #define HZIP_SGL_SGE_NR				10
28 
29 #define HZIP_ALG_DEFLATE			GENMASK(5, 4)
30 #define HZIP_ALG_LZ4				BIT(8)
31 
32 static DEFINE_MUTEX(zip_algs_lock);
33 static unsigned int zip_available_devs;
34 
35 enum hisi_zip_alg_type {
36 	HZIP_ALG_TYPE_COMP = 0,
37 	HZIP_ALG_TYPE_DECOMP = 1,
38 };
39 
40 enum {
41 	HZIP_QPC_COMP,
42 	HZIP_QPC_DECOMP,
43 	HZIP_CTX_Q_NUM
44 };
45 
46 #define GET_REQ_FROM_SQE(sqe)	((u64)(sqe)->dw26 | (u64)(sqe)->dw27 << 32)
47 #define COMP_NAME_TO_TYPE(alg_name)					\
48 	(!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE :	\
49 	(!strcmp((alg_name), "lz4") ? HZIP_ALG_TYPE_LZ4 : 0))
50 
51 struct hisi_zip_req {
52 	struct acomp_req *req;
53 	struct hisi_acc_hw_sgl *hw_src;
54 	struct hisi_acc_hw_sgl *hw_dst;
55 	dma_addr_t dma_src;
56 	dma_addr_t dma_dst;
57 	struct hisi_zip_qp_ctx *qp_ctx;
58 	u16 req_id;
59 };
60 
61 struct hisi_zip_req_q {
62 	struct hisi_zip_req *q;
63 	unsigned long *req_bitmap;
64 	spinlock_t req_lock;
65 	u16 size;
66 };
67 
68 struct hisi_zip_qp_ctx {
69 	struct hisi_qp *qp;
70 	struct hisi_zip_req_q req_q;
71 	struct hisi_acc_sgl_pool *sgl_pool;
72 	struct hisi_zip *zip_dev;
73 	struct hisi_zip_ctx *ctx;
74 	u8 req_type;
75 };
76 
77 struct hisi_zip_sqe_ops {
78 	u8 sqe_type;
79 	void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
80 	void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
81 	void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
82 	void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
83 	void (*fill_win_size)(struct hisi_zip_sqe *sqe, u8 win_size);
84 	void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
85 	void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
86 	u32 (*get_status)(struct hisi_zip_sqe *sqe);
87 	u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
88 };
89 
90 struct hisi_zip_ctx {
91 	struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
92 	const struct hisi_zip_sqe_ops *ops;
93 	bool fallback;
94 };
95 
sgl_sge_nr_set(const char * val,const struct kernel_param * kp)96 static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
97 {
98 	int ret;
99 	u16 n;
100 
101 	if (!val)
102 		return -EINVAL;
103 
104 	ret = kstrtou16(val, 10, &n);
105 	if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
106 		return -EINVAL;
107 
108 	return param_set_ushort(val, kp);
109 }
110 
111 static const struct kernel_param_ops sgl_sge_nr_ops = {
112 	.set = sgl_sge_nr_set,
113 	.get = param_get_ushort,
114 };
115 
116 static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
117 module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
118 MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
119 
hisi_zip_fallback_do_work(struct acomp_req * acomp_req,bool is_decompress)120 static int hisi_zip_fallback_do_work(struct acomp_req *acomp_req, bool is_decompress)
121 {
122 	ACOMP_FBREQ_ON_STACK(fbreq, acomp_req);
123 	int ret;
124 
125 	if (!is_decompress)
126 		ret = crypto_acomp_compress(fbreq);
127 	else
128 		ret = crypto_acomp_decompress(fbreq);
129 	if (ret) {
130 		pr_err("failed to do fallback work, ret=%d\n", ret);
131 		return ret;
132 	}
133 
134 	acomp_req->dlen = fbreq->dlen;
135 	return ret;
136 }
137 
hisi_zip_create_req(struct hisi_zip_qp_ctx * qp_ctx,struct acomp_req * req)138 static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
139 						struct acomp_req *req)
140 {
141 	struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
142 	struct hisi_zip_req *q = req_q->q;
143 	struct hisi_zip_req *req_cache;
144 	int req_id;
145 
146 	spin_lock(&req_q->req_lock);
147 
148 	req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
149 	if (req_id >= req_q->size) {
150 		spin_unlock(&req_q->req_lock);
151 		dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
152 		return ERR_PTR(-EAGAIN);
153 	}
154 	set_bit(req_id, req_q->req_bitmap);
155 
156 	spin_unlock(&req_q->req_lock);
157 
158 	req_cache = q + req_id;
159 	req_cache->req_id = req_id;
160 	req_cache->req = req;
161 	req_cache->qp_ctx = qp_ctx;
162 
163 	return req_cache;
164 }
165 
hisi_zip_remove_req(struct hisi_zip_qp_ctx * qp_ctx,struct hisi_zip_req * req)166 static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
167 				struct hisi_zip_req *req)
168 {
169 	struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
170 
171 	spin_lock(&req_q->req_lock);
172 	clear_bit(req->req_id, req_q->req_bitmap);
173 	spin_unlock(&req_q->req_lock);
174 }
175 
hisi_zip_fill_addr(struct hisi_zip_sqe * sqe,struct hisi_zip_req * req)176 static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
177 {
178 	sqe->source_addr_l = lower_32_bits(req->dma_src);
179 	sqe->source_addr_h = upper_32_bits(req->dma_src);
180 	sqe->dest_addr_l = lower_32_bits(req->dma_dst);
181 	sqe->dest_addr_h = upper_32_bits(req->dma_dst);
182 }
183 
hisi_zip_fill_buf_size(struct hisi_zip_sqe * sqe,struct hisi_zip_req * req)184 static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
185 {
186 	struct acomp_req *a_req = req->req;
187 
188 	sqe->input_data_length = a_req->slen;
189 	sqe->dest_avail_out = a_req->dlen;
190 }
191 
hisi_zip_fill_buf_type(struct hisi_zip_sqe * sqe,u8 buf_type)192 static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
193 {
194 	u32 val;
195 
196 	val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
197 	val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
198 	sqe->dw9 = val;
199 }
200 
hisi_zip_fill_req_type(struct hisi_zip_sqe * sqe,u8 req_type)201 static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
202 {
203 	u32 val;
204 
205 	val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
206 	val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
207 	sqe->dw9 = val;
208 }
209 
hisi_zip_fill_win_size(struct hisi_zip_sqe * sqe,u8 win_size)210 static void hisi_zip_fill_win_size(struct hisi_zip_sqe *sqe, u8 win_size)
211 {
212 	u32 val;
213 
214 	val = sqe->dw9 & ~HZIP_WIN_SIZE_M;
215 	val |= FIELD_PREP(HZIP_WIN_SIZE_M, win_size);
216 	sqe->dw9 = val;
217 }
218 
hisi_zip_fill_tag(struct hisi_zip_sqe * sqe,struct hisi_zip_req * req)219 static void hisi_zip_fill_tag(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
220 {
221 	sqe->dw26 = lower_32_bits((u64)req);
222 	sqe->dw27 = upper_32_bits((u64)req);
223 }
224 
hisi_zip_fill_sqe_type(struct hisi_zip_sqe * sqe,u8 sqe_type)225 static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
226 {
227 	u32 val;
228 
229 	val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
230 	val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type);
231 	sqe->dw7 = val;
232 }
233 
hisi_zip_fill_sqe(struct hisi_zip_ctx * ctx,struct hisi_zip_sqe * sqe,u8 req_type,struct hisi_zip_req * req)234 static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
235 			      u8 req_type, struct hisi_zip_req *req)
236 {
237 	const struct hisi_zip_sqe_ops *ops = ctx->ops;
238 
239 	memset(sqe, 0, sizeof(struct hisi_zip_sqe));
240 
241 	ops->fill_addr(sqe, req);
242 	ops->fill_buf_size(sqe, req);
243 	ops->fill_buf_type(sqe, HZIP_SGL);
244 	ops->fill_req_type(sqe, req_type);
245 	ops->fill_win_size(sqe, HZIP_16K_WINSZ);
246 	ops->fill_tag(sqe, req);
247 	ops->fill_sqe_type(sqe, ops->sqe_type);
248 }
249 
hisi_zip_do_work(struct hisi_zip_qp_ctx * qp_ctx,struct hisi_zip_req * req)250 static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
251 			    struct hisi_zip_req *req)
252 {
253 	struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
254 	struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
255 	struct acomp_req *a_req = req->req;
256 	struct hisi_qp *qp = qp_ctx->qp;
257 	struct device *dev = &qp->qm->pdev->dev;
258 	struct hisi_zip_sqe zip_sqe;
259 	int ret;
260 
261 	if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
262 		return -EINVAL;
263 
264 	req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
265 						    req->req_id << 1, &req->dma_src,
266 						    DMA_TO_DEVICE);
267 	if (IS_ERR(req->hw_src)) {
268 		dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
269 			PTR_ERR(req->hw_src));
270 		return PTR_ERR(req->hw_src);
271 	}
272 
273 	req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
274 						    (req->req_id << 1) + 1,
275 						    &req->dma_dst, DMA_FROM_DEVICE);
276 	if (IS_ERR(req->hw_dst)) {
277 		ret = PTR_ERR(req->hw_dst);
278 		dev_err(dev, "failed to map the dst buffer to hw sgl (%d)!\n",
279 			ret);
280 		goto err_unmap_input;
281 	}
282 
283 	hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp_ctx->req_type, req);
284 
285 	/* send command to start a task */
286 	atomic64_inc(&dfx->send_cnt);
287 	ret = hisi_qp_send(qp, &zip_sqe);
288 	if (unlikely(ret < 0)) {
289 		atomic64_inc(&dfx->send_busy_cnt);
290 		ret = -EAGAIN;
291 		dev_dbg_ratelimited(dev, "failed to send request!\n");
292 		goto err_unmap_output;
293 	}
294 
295 	return -EINPROGRESS;
296 
297 err_unmap_output:
298 	hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst, DMA_FROM_DEVICE);
299 err_unmap_input:
300 	hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src, DMA_TO_DEVICE);
301 	return ret;
302 }
303 
hisi_zip_get_status(struct hisi_zip_sqe * sqe)304 static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
305 {
306 	return sqe->dw3 & HZIP_BD_STATUS_M;
307 }
308 
hisi_zip_get_dstlen(struct hisi_zip_sqe * sqe)309 static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
310 {
311 	return sqe->produced;
312 }
313 
hisi_zip_acomp_cb(struct hisi_qp * qp,void * data)314 static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
315 {
316 	struct hisi_zip_sqe *sqe = data;
317 	struct hisi_zip_req *req = (struct hisi_zip_req *)GET_REQ_FROM_SQE(sqe);
318 	struct hisi_zip_qp_ctx *qp_ctx = req->qp_ctx;
319 	const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
320 	struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
321 	struct device *dev = &qp->qm->pdev->dev;
322 	struct acomp_req *acomp_req = req->req;
323 	int err = 0;
324 	u32 status;
325 
326 	atomic64_inc(&dfx->recv_cnt);
327 	status = ops->get_status(sqe);
328 	if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
329 		dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
330 			(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
331 			sqe->produced);
332 		atomic64_inc(&dfx->err_bd_cnt);
333 		err = -EIO;
334 	}
335 
336 	hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst, DMA_FROM_DEVICE);
337 	hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src, DMA_TO_DEVICE);
338 
339 	acomp_req->dlen = ops->get_dstlen(sqe);
340 
341 	if (acomp_req->base.complete)
342 		acomp_request_complete(acomp_req, err);
343 
344 	hisi_zip_remove_req(qp_ctx, req);
345 }
346 
hisi_zip_acompress(struct acomp_req * acomp_req)347 static int hisi_zip_acompress(struct acomp_req *acomp_req)
348 {
349 	struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
350 	struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
351 	struct hisi_zip_req *req;
352 	struct device *dev;
353 	int ret;
354 
355 	if (ctx->fallback)
356 		return hisi_zip_fallback_do_work(acomp_req, 0);
357 
358 	dev = &qp_ctx->qp->qm->pdev->dev;
359 
360 	req = hisi_zip_create_req(qp_ctx, acomp_req);
361 	if (IS_ERR(req))
362 		return PTR_ERR(req);
363 
364 	ret = hisi_zip_do_work(qp_ctx, req);
365 	if (unlikely(ret != -EINPROGRESS)) {
366 		dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
367 		hisi_zip_remove_req(qp_ctx, req);
368 	}
369 
370 	return ret;
371 }
372 
hisi_zip_adecompress(struct acomp_req * acomp_req)373 static int hisi_zip_adecompress(struct acomp_req *acomp_req)
374 {
375 	struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
376 	struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
377 	struct hisi_zip_req *req;
378 	struct device *dev;
379 	int ret;
380 
381 	if (ctx->fallback)
382 		return hisi_zip_fallback_do_work(acomp_req, 1);
383 
384 	dev = &qp_ctx->qp->qm->pdev->dev;
385 
386 	req = hisi_zip_create_req(qp_ctx, acomp_req);
387 	if (IS_ERR(req))
388 		return PTR_ERR(req);
389 
390 	ret = hisi_zip_do_work(qp_ctx, req);
391 	if (unlikely(ret != -EINPROGRESS)) {
392 		dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
393 				     ret);
394 		hisi_zip_remove_req(qp_ctx, req);
395 	}
396 
397 	return ret;
398 }
399 
hisi_zip_decompress(struct acomp_req * acomp_req)400 static int hisi_zip_decompress(struct acomp_req *acomp_req)
401 {
402 	return hisi_zip_fallback_do_work(acomp_req, 1);
403 }
404 
405 static const struct hisi_zip_sqe_ops hisi_zip_ops = {
406 	.sqe_type		= 0x3,
407 	.fill_addr		= hisi_zip_fill_addr,
408 	.fill_buf_size		= hisi_zip_fill_buf_size,
409 	.fill_buf_type		= hisi_zip_fill_buf_type,
410 	.fill_req_type		= hisi_zip_fill_req_type,
411 	.fill_win_size		= hisi_zip_fill_win_size,
412 	.fill_tag		= hisi_zip_fill_tag,
413 	.fill_sqe_type		= hisi_zip_fill_sqe_type,
414 	.get_status		= hisi_zip_get_status,
415 	.get_dstlen		= hisi_zip_get_dstlen,
416 };
417 
hisi_zip_ctx_init(struct hisi_zip_ctx * hisi_zip_ctx,u8 req_type,int node)418 static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
419 {
420 	struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
421 	struct hisi_zip_qp_ctx *qp_ctx;
422 	u8 alg_type[HZIP_CTX_Q_NUM];
423 	struct hisi_zip *hisi_zip;
424 	int ret, i;
425 
426 	/* alg_type = 0 for compress, 1 for decompress in hw sqe */
427 	for (i = 0; i < HZIP_CTX_Q_NUM; i++)
428 		alg_type[i] = i;
429 
430 	ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node, alg_type);
431 	if (ret) {
432 		pr_err("failed to create zip qps (%d)!\n", ret);
433 		return -ENODEV;
434 	}
435 
436 	hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
437 
438 	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
439 		qp_ctx = &hisi_zip_ctx->qp_ctx[i];
440 		qp_ctx->ctx = hisi_zip_ctx;
441 		qp_ctx->zip_dev = hisi_zip;
442 		qp_ctx->req_type = req_type;
443 		qp_ctx->qp = qps[i];
444 	}
445 
446 	hisi_zip_ctx->ops = &hisi_zip_ops;
447 
448 	return 0;
449 }
450 
hisi_zip_ctx_exit(struct hisi_zip_ctx * hisi_zip_ctx)451 static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
452 {
453 	struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
454 	int i;
455 
456 	for (i = 0; i < HZIP_CTX_Q_NUM; i++)
457 		qps[i] = hisi_zip_ctx->qp_ctx[i].qp;
458 
459 	hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
460 }
461 
hisi_zip_create_req_q(struct hisi_zip_ctx * ctx)462 static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
463 {
464 	u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
465 	struct hisi_zip_req_q *req_q;
466 	int i, ret;
467 
468 	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
469 		req_q = &ctx->qp_ctx[i].req_q;
470 		req_q->size = q_depth;
471 
472 		req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
473 		if (!req_q->req_bitmap) {
474 			ret = -ENOMEM;
475 			if (i == 0)
476 				return ret;
477 
478 			goto err_free_comp_q;
479 		}
480 		spin_lock_init(&req_q->req_lock);
481 
482 		req_q->q = kzalloc_objs(struct hisi_zip_req, req_q->size);
483 		if (!req_q->q) {
484 			ret = -ENOMEM;
485 			if (i == 0)
486 				goto err_free_comp_bitmap;
487 			else
488 				goto err_free_decomp_bitmap;
489 		}
490 	}
491 
492 	return 0;
493 
494 err_free_decomp_bitmap:
495 	bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
496 err_free_comp_q:
497 	kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
498 err_free_comp_bitmap:
499 	bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
500 	return ret;
501 }
502 
hisi_zip_release_req_q(struct hisi_zip_ctx * ctx)503 static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
504 {
505 	int i;
506 
507 	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
508 		kfree(ctx->qp_ctx[i].req_q.q);
509 		bitmap_free(ctx->qp_ctx[i].req_q.req_bitmap);
510 	}
511 }
512 
hisi_zip_create_sgl_pool(struct hisi_zip_ctx * ctx)513 static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
514 {
515 	u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
516 	struct hisi_zip_qp_ctx *tmp;
517 	struct device *dev;
518 	int i;
519 
520 	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
521 		tmp = &ctx->qp_ctx[i];
522 		dev = &tmp->qp->qm->pdev->dev;
523 		tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
524 							 sgl_sge_nr);
525 		if (IS_ERR(tmp->sgl_pool)) {
526 			if (i == 1)
527 				goto err_free_sgl_pool0;
528 			return -ENOMEM;
529 		}
530 	}
531 
532 	return 0;
533 
534 err_free_sgl_pool0:
535 	hisi_acc_free_sgl_pool(&ctx->qp_ctx[HZIP_QPC_COMP].qp->qm->pdev->dev,
536 			       ctx->qp_ctx[HZIP_QPC_COMP].sgl_pool);
537 	return -ENOMEM;
538 }
539 
hisi_zip_release_sgl_pool(struct hisi_zip_ctx * ctx)540 static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
541 {
542 	int i;
543 
544 	for (i = 0; i < HZIP_CTX_Q_NUM; i++)
545 		hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev,
546 				       ctx->qp_ctx[i].sgl_pool);
547 }
548 
hisi_zip_set_acomp_cb(struct hisi_zip_ctx * ctx,void (* fn)(struct hisi_qp *,void *))549 static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
550 				  void (*fn)(struct hisi_qp *, void *))
551 {
552 	int i;
553 
554 	for (i = 0; i < HZIP_CTX_Q_NUM; i++)
555 		ctx->qp_ctx[i].qp->req_cb = fn;
556 }
557 
hisi_zip_acomp_init(struct crypto_acomp * tfm)558 static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
559 {
560 	const char *alg_name = crypto_tfm_alg_name(&tfm->base);
561 	struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
562 	struct device *dev;
563 	int ret;
564 
565 	ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
566 	if (ret) {
567 		pr_err("failed to init ctx (%d)!\n", ret);
568 		goto switch_to_soft;
569 	}
570 
571 	dev = &ctx->qp_ctx[0].qp->qm->pdev->dev;
572 
573 	ret = hisi_zip_create_req_q(ctx);
574 	if (ret) {
575 		dev_err(dev, "failed to create request queue (%d)!\n", ret);
576 		goto err_ctx_exit;
577 	}
578 
579 	ret = hisi_zip_create_sgl_pool(ctx);
580 	if (ret) {
581 		dev_err(dev, "failed to create sgl pool (%d)!\n", ret);
582 		goto err_release_req_q;
583 	}
584 
585 	hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb);
586 
587 	return 0;
588 
589 err_release_req_q:
590 	hisi_zip_release_req_q(ctx);
591 err_ctx_exit:
592 	hisi_zip_ctx_exit(ctx);
593 switch_to_soft:
594 	ctx->fallback = true;
595 	return 0;
596 }
597 
hisi_zip_acomp_exit(struct crypto_acomp * tfm)598 static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
599 {
600 	struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
601 
602 	if (ctx->fallback)
603 		return;
604 
605 	hisi_zip_release_sgl_pool(ctx);
606 	hisi_zip_release_req_q(ctx);
607 	hisi_zip_ctx_exit(ctx);
608 }
609 
610 static struct acomp_alg hisi_zip_acomp_deflate = {
611 	.init			= hisi_zip_acomp_init,
612 	.exit			= hisi_zip_acomp_exit,
613 	.compress		= hisi_zip_acompress,
614 	.decompress		= hisi_zip_adecompress,
615 	.base			= {
616 		.cra_name		= "deflate",
617 		.cra_driver_name	= "hisi-deflate-acomp",
618 		.cra_flags		= CRYPTO_ALG_ASYNC |
619 					  CRYPTO_ALG_NEED_FALLBACK,
620 		.cra_module		= THIS_MODULE,
621 		.cra_priority		= HZIP_ALG_PRIORITY,
622 		.cra_ctxsize		= sizeof(struct hisi_zip_ctx),
623 	}
624 };
625 
hisi_zip_register_deflate(struct hisi_qm * qm)626 static int hisi_zip_register_deflate(struct hisi_qm *qm)
627 {
628 	int ret;
629 
630 	if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
631 		return 0;
632 
633 	ret = crypto_register_acomp(&hisi_zip_acomp_deflate);
634 	if (ret)
635 		dev_err(&qm->pdev->dev, "failed to register to deflate (%d)!\n", ret);
636 
637 	return ret;
638 }
639 
hisi_zip_unregister_deflate(struct hisi_qm * qm)640 static void hisi_zip_unregister_deflate(struct hisi_qm *qm)
641 {
642 	if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
643 		return;
644 
645 	crypto_unregister_acomp(&hisi_zip_acomp_deflate);
646 }
647 
648 static struct acomp_alg hisi_zip_acomp_lz4 = {
649 	.init			= hisi_zip_acomp_init,
650 	.exit			= hisi_zip_acomp_exit,
651 	.compress		= hisi_zip_acompress,
652 	.decompress		= hisi_zip_decompress,
653 	.base			= {
654 		.cra_name		= "lz4",
655 		.cra_driver_name	= "hisi-lz4-acomp",
656 		.cra_flags		= CRYPTO_ALG_ASYNC |
657 					  CRYPTO_ALG_NEED_FALLBACK,
658 		.cra_module		= THIS_MODULE,
659 		.cra_priority		= HZIP_ALG_PRIORITY,
660 		.cra_ctxsize		= sizeof(struct hisi_zip_ctx),
661 	}
662 };
663 
hisi_zip_register_lz4(struct hisi_qm * qm)664 static int hisi_zip_register_lz4(struct hisi_qm *qm)
665 {
666 	int ret;
667 
668 	if (!hisi_zip_alg_support(qm, HZIP_ALG_LZ4))
669 		return 0;
670 
671 	ret = crypto_register_acomp(&hisi_zip_acomp_lz4);
672 	if (ret)
673 		dev_err(&qm->pdev->dev, "failed to register to LZ4 (%d)!\n", ret);
674 
675 	return ret;
676 }
677 
hisi_zip_unregister_lz4(struct hisi_qm * qm)678 static void hisi_zip_unregister_lz4(struct hisi_qm *qm)
679 {
680 	if (!hisi_zip_alg_support(qm, HZIP_ALG_LZ4))
681 		return;
682 
683 	crypto_unregister_acomp(&hisi_zip_acomp_lz4);
684 }
685 
hisi_zip_register_to_crypto(struct hisi_qm * qm)686 int hisi_zip_register_to_crypto(struct hisi_qm *qm)
687 {
688 	int ret = 0;
689 
690 	mutex_lock(&zip_algs_lock);
691 	if (zip_available_devs) {
692 		zip_available_devs++;
693 		goto unlock;
694 	}
695 
696 	ret = hisi_zip_register_deflate(qm);
697 	if (ret)
698 		goto unlock;
699 
700 	ret = hisi_zip_register_lz4(qm);
701 	if (ret)
702 		goto unreg_deflate;
703 
704 	zip_available_devs++;
705 	mutex_unlock(&zip_algs_lock);
706 
707 	return 0;
708 
709 unreg_deflate:
710 	hisi_zip_unregister_deflate(qm);
711 unlock:
712 	mutex_unlock(&zip_algs_lock);
713 	return ret;
714 }
715 
hisi_zip_unregister_from_crypto(struct hisi_qm * qm)716 void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
717 {
718 	mutex_lock(&zip_algs_lock);
719 	if (--zip_available_devs)
720 		goto unlock;
721 
722 	hisi_zip_unregister_deflate(qm);
723 	hisi_zip_unregister_lz4(qm);
724 
725 unlock:
726 	mutex_unlock(&zip_algs_lock);
727 }
728