xref: /linux/drivers/crypto/intel/qat/qat_common/qat_bl.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2014 - 2022 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/dma-mapping.h>
5 #include <linux/pci.h>
6 #include <linux/scatterlist.h>
7 #include <linux/slab.h>
8 #include <linux/types.h>
9 #include "adf_accel_devices.h"
10 #include "qat_bl.h"
11 #include "qat_crypto.h"
12 
13 void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
14 		      struct qat_request_buffs *buf)
15 {
16 	struct device *dev = &GET_DEV(accel_dev);
17 	struct qat_alg_buf_list *bl = buf->bl;
18 	struct qat_alg_buf_list *blout = buf->blout;
19 	dma_addr_t blp = buf->blp;
20 	dma_addr_t blpout = buf->bloutp;
21 	size_t sz = buf->sz;
22 	size_t sz_out = buf->sz_out;
23 	int bl_dma_dir;
24 	int i;
25 
26 	bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
27 
28 	for (i = 0; i < bl->num_bufs; i++)
29 		dma_unmap_single(dev, bl->buffers[i].addr,
30 				 bl->buffers[i].len, bl_dma_dir);
31 
32 	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
33 
34 	if (!buf->sgl_src_valid)
35 		kfree(bl);
36 
37 	if (blp != blpout) {
38 		for (i = 0; i < blout->num_mapped_bufs; i++) {
39 			dma_unmap_single(dev, blout->buffers[i].addr,
40 					 blout->buffers[i].len,
41 					 DMA_FROM_DEVICE);
42 		}
43 		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
44 
45 		if (!buf->sgl_dst_valid)
46 			kfree(blout);
47 	}
48 }
49 
50 static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
51 				struct scatterlist *sgl,
52 				struct scatterlist *sglout,
53 				struct qat_request_buffs *buf,
54 				dma_addr_t extra_dst_buff,
55 				size_t sz_extra_dst_buff,
56 				unsigned int sskip,
57 				unsigned int dskip,
58 				gfp_t flags)
59 {
60 	struct device *dev = &GET_DEV(accel_dev);
61 	int i, sg_nctr = 0;
62 	int n = sg_nents(sgl);
63 	struct qat_alg_buf_list *bufl;
64 	struct qat_alg_buf_list *buflout = NULL;
65 	dma_addr_t blp = DMA_MAPPING_ERROR;
66 	dma_addr_t bloutp = DMA_MAPPING_ERROR;
67 	struct scatterlist *sg;
68 	size_t sz_out, sz = struct_size(bufl, buffers, n);
69 	int node = dev_to_node(&GET_DEV(accel_dev));
70 	unsigned int left;
71 	int bufl_dma_dir;
72 
73 	if (unlikely(!n))
74 		return -EINVAL;
75 
76 	buf->sgl_src_valid = false;
77 	buf->sgl_dst_valid = false;
78 
79 	if (n > QAT_MAX_BUFF_DESC) {
80 		bufl = kzalloc_node(sz, flags, node);
81 		if (unlikely(!bufl))
82 			return -ENOMEM;
83 	} else {
84 		bufl = container_of(&buf->sgl_src.sgl_hdr,
85 				    struct qat_alg_buf_list, hdr);
86 		memset(bufl, 0, sizeof(struct qat_alg_buf_list));
87 		buf->sgl_src_valid = true;
88 	}
89 
90 	bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
91 
92 	for (i = 0; i < n; i++)
93 		bufl->buffers[i].addr = DMA_MAPPING_ERROR;
94 
95 	left = sskip;
96 
97 	for_each_sg(sgl, sg, n, i) {
98 		int y = sg_nctr;
99 
100 		if (!sg->length)
101 			continue;
102 
103 		if (left >= sg->length) {
104 			left -= sg->length;
105 			continue;
106 		}
107 		bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
108 						       sg->length - left,
109 						       bufl_dma_dir);
110 		bufl->buffers[y].len = sg->length;
111 		if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
112 			goto err_in;
113 		sg_nctr++;
114 		if (left) {
115 			bufl->buffers[y].len -= left;
116 			left = 0;
117 		}
118 	}
119 	bufl->num_bufs = sg_nctr;
120 	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
121 	if (unlikely(dma_mapping_error(dev, blp)))
122 		goto err_in;
123 	buf->bl = bufl;
124 	buf->blp = blp;
125 	buf->sz = sz;
126 	/* Handle out of place operation */
127 	if (sgl != sglout) {
128 		struct qat_alg_buf *buffers;
129 		int extra_buff = extra_dst_buff ? 1 : 0;
130 		int n_sglout = sg_nents(sglout);
131 
132 		n = n_sglout + extra_buff;
133 		sz_out = struct_size(buflout, buffers, n);
134 		left = dskip;
135 
136 		sg_nctr = 0;
137 
138 		if (n > QAT_MAX_BUFF_DESC) {
139 			buflout = kzalloc_node(sz_out, flags, node);
140 			if (unlikely(!buflout))
141 				goto err_in;
142 		} else {
143 			buflout = container_of(&buf->sgl_dst.sgl_hdr,
144 					       struct qat_alg_buf_list, hdr);
145 			memset(buflout, 0, sizeof(struct qat_alg_buf_list));
146 			buf->sgl_dst_valid = true;
147 		}
148 
149 		buffers = buflout->buffers;
150 		for (i = 0; i < n; i++)
151 			buffers[i].addr = DMA_MAPPING_ERROR;
152 
153 		for_each_sg(sglout, sg, n_sglout, i) {
154 			int y = sg_nctr;
155 
156 			if (!sg->length)
157 				continue;
158 
159 			if (left >= sg->length) {
160 				left -= sg->length;
161 				continue;
162 			}
163 			buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
164 							 sg->length - left,
165 							 DMA_FROM_DEVICE);
166 			if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
167 				goto err_out;
168 			buffers[y].len = sg->length;
169 			sg_nctr++;
170 			if (left) {
171 				buffers[y].len -= left;
172 				left = 0;
173 			}
174 		}
175 		if (extra_buff) {
176 			buffers[sg_nctr].addr = extra_dst_buff;
177 			buffers[sg_nctr].len = sz_extra_dst_buff;
178 		}
179 
180 		buflout->num_bufs = sg_nctr;
181 		buflout->num_bufs += extra_buff;
182 		buflout->num_mapped_bufs = sg_nctr;
183 		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
184 		if (unlikely(dma_mapping_error(dev, bloutp)))
185 			goto err_out;
186 		buf->blout = buflout;
187 		buf->bloutp = bloutp;
188 		buf->sz_out = sz_out;
189 	} else {
190 		/* Otherwise set the src and dst to the same address */
191 		buf->bloutp = buf->blp;
192 		buf->sz_out = 0;
193 	}
194 	return 0;
195 
196 err_out:
197 	if (!dma_mapping_error(dev, bloutp))
198 		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
199 
200 	n = sg_nents(sglout);
201 	for (i = 0; i < n; i++) {
202 		if (buflout->buffers[i].addr == extra_dst_buff)
203 			break;
204 		if (!dma_mapping_error(dev, buflout->buffers[i].addr))
205 			dma_unmap_single(dev, buflout->buffers[i].addr,
206 					 buflout->buffers[i].len,
207 					 DMA_FROM_DEVICE);
208 	}
209 
210 	if (!buf->sgl_dst_valid)
211 		kfree(buflout);
212 
213 err_in:
214 	if (!dma_mapping_error(dev, blp))
215 		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
216 
217 	n = sg_nents(sgl);
218 	for (i = 0; i < n; i++)
219 		if (!dma_mapping_error(dev, bufl->buffers[i].addr))
220 			dma_unmap_single(dev, bufl->buffers[i].addr,
221 					 bufl->buffers[i].len,
222 					 bufl_dma_dir);
223 
224 	if (!buf->sgl_src_valid)
225 		kfree(bufl);
226 
227 	dev_err(dev, "Failed to map buf for dma\n");
228 	return -ENOMEM;
229 }
230 
231 int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
232 		       struct scatterlist *sgl,
233 		       struct scatterlist *sglout,
234 		       struct qat_request_buffs *buf,
235 		       struct qat_sgl_to_bufl_params *params,
236 		       gfp_t flags)
237 {
238 	dma_addr_t extra_dst_buff = 0;
239 	size_t sz_extra_dst_buff = 0;
240 	unsigned int sskip = 0;
241 	unsigned int dskip = 0;
242 
243 	if (params) {
244 		extra_dst_buff = params->extra_dst_buff;
245 		sz_extra_dst_buff = params->sz_extra_dst_buff;
246 		sskip = params->sskip;
247 		dskip = params->dskip;
248 	}
249 
250 	return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
251 				    extra_dst_buff, sz_extra_dst_buff,
252 				    sskip, dskip, flags);
253 }
254 
255 static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
256 			     struct qat_alg_buf_list *bl)
257 {
258 	struct device *dev = &GET_DEV(accel_dev);
259 	int n = bl->num_bufs;
260 	int i;
261 
262 	for (i = 0; i < n; i++)
263 		if (!dma_mapping_error(dev, bl->buffers[i].addr))
264 			dma_unmap_single(dev, bl->buffers[i].addr,
265 					 bl->buffers[i].len, DMA_FROM_DEVICE);
266 }
267 
268 static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
269 			  struct scatterlist *sgl,
270 			  struct qat_alg_buf_list **bl)
271 {
272 	struct device *dev = &GET_DEV(accel_dev);
273 	struct qat_alg_buf_list *bufl;
274 	int node = dev_to_node(dev);
275 	struct scatterlist *sg;
276 	int n, i, sg_nctr;
277 	size_t sz;
278 
279 	n = sg_nents(sgl);
280 	sz = struct_size(bufl, buffers, n);
281 	bufl = kzalloc_node(sz, GFP_KERNEL, node);
282 	if (unlikely(!bufl))
283 		return -ENOMEM;
284 
285 	for (i = 0; i < n; i++)
286 		bufl->buffers[i].addr = DMA_MAPPING_ERROR;
287 
288 	sg_nctr = 0;
289 	for_each_sg(sgl, sg, n, i) {
290 		int y = sg_nctr;
291 
292 		if (!sg->length)
293 			continue;
294 
295 		bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
296 						       sg->length,
297 						       DMA_FROM_DEVICE);
298 		bufl->buffers[y].len = sg->length;
299 		if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
300 			goto err_map;
301 		sg_nctr++;
302 	}
303 	bufl->num_bufs = sg_nctr;
304 	bufl->num_mapped_bufs = sg_nctr;
305 
306 	*bl = bufl;
307 
308 	return 0;
309 
310 err_map:
311 	for (i = 0; i < n; i++)
312 		if (!dma_mapping_error(dev, bufl->buffers[i].addr))
313 			dma_unmap_single(dev, bufl->buffers[i].addr,
314 					 bufl->buffers[i].len,
315 					 DMA_FROM_DEVICE);
316 	kfree(bufl);
317 	*bl = NULL;
318 
319 	return -ENOMEM;
320 }
321 
322 static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
323 				  struct scatterlist *sgl,
324 				  struct qat_alg_buf_list *bl,
325 				  bool free_bl)
326 {
327 	if (bl) {
328 		qat_bl_sgl_unmap(accel_dev, bl);
329 
330 		if (free_bl)
331 			kfree(bl);
332 	}
333 	if (sgl)
334 		sgl_free(sgl);
335 }
336 
337 static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
338 				struct scatterlist **sgl,
339 				struct qat_alg_buf_list **bl,
340 				unsigned int dlen,
341 				gfp_t gfp)
342 {
343 	struct scatterlist *dst;
344 	int ret;
345 
346 	dst = sgl_alloc(dlen, gfp, NULL);
347 	if (!dst) {
348 		dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
349 		return -ENOMEM;
350 	}
351 
352 	ret = qat_bl_sgl_map(accel_dev, dst, bl);
353 	if (ret)
354 		goto err;
355 
356 	*sgl = dst;
357 
358 	return 0;
359 
360 err:
361 	sgl_free(dst);
362 	*sgl = NULL;
363 	return ret;
364 }
365 
366 int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
367 			       struct scatterlist **sg,
368 			       unsigned int dlen,
369 			       struct qat_request_buffs *qat_bufs,
370 			       gfp_t gfp)
371 {
372 	struct device *dev = &GET_DEV(accel_dev);
373 	dma_addr_t new_blp = DMA_MAPPING_ERROR;
374 	struct qat_alg_buf_list *new_bl;
375 	struct scatterlist *new_sg;
376 	size_t new_bl_size;
377 	int ret;
378 
379 	ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
380 	if (ret)
381 		return ret;
382 
383 	new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
384 
385 	/* Map new firmware SGL descriptor */
386 	new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
387 	if (unlikely(dma_mapping_error(dev, new_blp)))
388 		goto err;
389 
390 	/* Unmap old firmware SGL descriptor */
391 	dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
392 
393 	/* Free and unmap old scatterlist */
394 	qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
395 			      !qat_bufs->sgl_dst_valid);
396 
397 	qat_bufs->sgl_dst_valid = false;
398 	qat_bufs->blout = new_bl;
399 	qat_bufs->bloutp = new_blp;
400 	qat_bufs->sz_out = new_bl_size;
401 
402 	*sg = new_sg;
403 
404 	return 0;
405 err:
406 	qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
407 
408 	if (!dma_mapping_error(dev, new_blp))
409 		dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
410 
411 	return -ENOMEM;
412 }
413