xref: /linux/drivers/fpga/dfl-afu-dma-region.c (revision 13845bdc869f136f92ad3d40ea09b867bb4ce467)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
4  *
5  * Copyright (C) 2017-2018 Intel Corporation, Inc.
6  *
7  * Authors:
8  *   Wu Hao <hao.wu@intel.com>
9  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
10  */
11 
12 #include <linux/dma-mapping.h>
13 #include <linux/sched/signal.h>
14 #include <linux/uaccess.h>
15 #include <linux/mm.h>
16 
17 #include "dfl-afu.h"
18 
19 void afu_dma_region_init(struct dfl_feature_dev_data *fdata)
20 {
21 	struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
22 
23 	afu->dma_regions = RB_ROOT;
24 }
25 
26 /**
27  * afu_dma_pin_pages - pin pages of given dma memory region
28  * @fdata: feature dev data
29  * @region: dma memory region to be pinned
30  *
31  * Pin all the pages of given dfl_afu_dma_region.
32  * Return 0 for success or negative error code.
33  */
34 static int afu_dma_pin_pages(struct dfl_feature_dev_data *fdata,
35 			     struct dfl_afu_dma_region *region)
36 {
37 	int npages = region->length >> PAGE_SHIFT;
38 	struct device *dev = &fdata->dev->dev;
39 	int ret, pinned;
40 
41 	ret = account_locked_vm(current->mm, npages, true);
42 	if (ret)
43 		return ret;
44 
45 	region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
46 	if (!region->pages) {
47 		ret = -ENOMEM;
48 		goto unlock_vm;
49 	}
50 
51 	pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
52 				     region->pages);
53 	if (pinned < 0) {
54 		ret = pinned;
55 		goto free_pages;
56 	} else if (pinned != npages) {
57 		ret = -EFAULT;
58 		goto unpin_pages;
59 	}
60 
61 	dev_dbg(dev, "%d pages pinned\n", pinned);
62 
63 	return 0;
64 
65 unpin_pages:
66 	unpin_user_pages(region->pages, pinned);
67 free_pages:
68 	kfree(region->pages);
69 unlock_vm:
70 	account_locked_vm(current->mm, npages, false);
71 	return ret;
72 }
73 
74 /**
75  * afu_dma_unpin_pages - unpin pages of given dma memory region
76  * @fdata: feature dev data
77  * @region: dma memory region to be unpinned
78  *
79  * Unpin all the pages of given dfl_afu_dma_region.
80  * Return 0 for success or negative error code.
81  */
82 static void afu_dma_unpin_pages(struct dfl_feature_dev_data *fdata,
83 				struct dfl_afu_dma_region *region)
84 {
85 	long npages = region->length >> PAGE_SHIFT;
86 	struct device *dev = &fdata->dev->dev;
87 
88 	unpin_user_pages(region->pages, npages);
89 	kfree(region->pages);
90 	account_locked_vm(current->mm, npages, false);
91 
92 	dev_dbg(dev, "%ld pages unpinned\n", npages);
93 }
94 
95 /**
96  * afu_dma_check_continuous_pages - check if pages are continuous
97  * @region: dma memory region
98  *
99  * Return true if pages of given dma memory region have continuous physical
100  * address, otherwise return false.
101  */
102 static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region)
103 {
104 	int npages = region->length >> PAGE_SHIFT;
105 	int i;
106 
107 	for (i = 0; i < npages - 1; i++)
108 		if (page_to_pfn(region->pages[i]) + 1 !=
109 				page_to_pfn(region->pages[i + 1]))
110 			return false;
111 
112 	return true;
113 }
114 
115 /**
116  * dma_region_check_iova - check if memory area is fully contained in the region
117  * @region: dma memory region
118  * @iova: address of the dma memory area
119  * @size: size of the dma memory area
120  *
121  * Compare the dma memory area defined by @iova and @size with given dma region.
122  * Return true if memory area is fully contained in the region, otherwise false.
123  */
124 static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
125 				  u64 iova, u64 size)
126 {
127 	if (!size && region->iova != iova)
128 		return false;
129 
130 	return (region->iova <= iova) &&
131 		(region->length + region->iova >= iova + size);
132 }
133 
134 /**
135  * afu_dma_region_add - add given dma region to rbtree
136  * @fdata: feature dev data
137  * @region: dma region to be added
138  *
139  * Return 0 for success, -EEXIST if dma region has already been added.
140  *
141  * Needs to be called with fdata->lock held.
142  */
143 static int afu_dma_region_add(struct dfl_feature_dev_data *fdata,
144 			      struct dfl_afu_dma_region *region)
145 {
146 	struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
147 	struct rb_node **new, *parent = NULL;
148 
149 	dev_dbg(&fdata->dev->dev, "add region (iova = %llx)\n",
150 		(unsigned long long)region->iova);
151 
152 	new = &afu->dma_regions.rb_node;
153 
154 	while (*new) {
155 		struct dfl_afu_dma_region *this;
156 
157 		this = container_of(*new, struct dfl_afu_dma_region, node);
158 
159 		parent = *new;
160 
161 		if (dma_region_check_iova(this, region->iova, region->length))
162 			return -EEXIST;
163 
164 		if (region->iova < this->iova)
165 			new = &((*new)->rb_left);
166 		else if (region->iova > this->iova)
167 			new = &((*new)->rb_right);
168 		else
169 			return -EEXIST;
170 	}
171 
172 	rb_link_node(&region->node, parent, new);
173 	rb_insert_color(&region->node, &afu->dma_regions);
174 
175 	return 0;
176 }
177 
178 /**
179  * afu_dma_region_remove - remove given dma region from rbtree
180  * @fdata: feature dev data
181  * @region: dma region to be removed
182  *
183  * Needs to be called with fdata->lock held.
184  */
185 static void afu_dma_region_remove(struct dfl_feature_dev_data *fdata,
186 				  struct dfl_afu_dma_region *region)
187 {
188 	struct dfl_afu *afu;
189 
190 	dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
191 		(unsigned long long)region->iova);
192 
193 	afu = dfl_fpga_fdata_get_private(fdata);
194 	rb_erase(&region->node, &afu->dma_regions);
195 }
196 
197 /**
198  * afu_dma_region_destroy - destroy all regions in rbtree
199  * @fdata: feature dev data
200  *
201  * Needs to be called with fdata->lock held.
202  */
203 void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata)
204 {
205 	struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
206 	struct rb_node *node = rb_first(&afu->dma_regions);
207 	struct dfl_afu_dma_region *region;
208 
209 	while (node) {
210 		region = container_of(node, struct dfl_afu_dma_region, node);
211 
212 		dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
213 			(unsigned long long)region->iova);
214 
215 		rb_erase(node, &afu->dma_regions);
216 
217 		if (region->iova)
218 			dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
219 				       region->iova, region->length,
220 				       DMA_BIDIRECTIONAL);
221 
222 		if (region->pages)
223 			afu_dma_unpin_pages(fdata, region);
224 
225 		node = rb_next(node);
226 		kfree(region);
227 	}
228 }
229 
230 /**
231  * afu_dma_region_find - find the dma region from rbtree based on iova and size
232  * @fdata: feature dev data
233  * @iova: address of the dma memory area
234  * @size: size of the dma memory area
235  *
236  * It finds the dma region from the rbtree based on @iova and @size:
237  * - if @size == 0, it finds the dma region which starts from @iova
238  * - otherwise, it finds the dma region which fully contains
239  *   [@iova, @iova+size)
240  * If nothing is matched returns NULL.
241  *
242  * Needs to be called with fdata->lock held.
243  */
244 struct dfl_afu_dma_region *
245 afu_dma_region_find(struct dfl_feature_dev_data *fdata, u64 iova, u64 size)
246 {
247 	struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
248 	struct rb_node *node = afu->dma_regions.rb_node;
249 	struct device *dev = &fdata->dev->dev;
250 
251 	while (node) {
252 		struct dfl_afu_dma_region *region;
253 
254 		region = container_of(node, struct dfl_afu_dma_region, node);
255 
256 		if (dma_region_check_iova(region, iova, size)) {
257 			dev_dbg(dev, "find region (iova = %llx)\n",
258 				(unsigned long long)region->iova);
259 			return region;
260 		}
261 
262 		if (iova < region->iova)
263 			node = node->rb_left;
264 		else if (iova > region->iova)
265 			node = node->rb_right;
266 		else
267 			/* the iova region is not fully covered. */
268 			break;
269 	}
270 
271 	dev_dbg(dev, "region with iova %llx and size %llx is not found\n",
272 		(unsigned long long)iova, (unsigned long long)size);
273 
274 	return NULL;
275 }
276 
277 /**
278  * afu_dma_region_find_iova - find the dma region from rbtree by iova
279  * @fdata: feature dev data
280  * @iova: address of the dma region
281  *
282  * Needs to be called with fdata->lock held.
283  */
284 static struct dfl_afu_dma_region *
285 afu_dma_region_find_iova(struct dfl_feature_dev_data *fdata, u64 iova)
286 {
287 	return afu_dma_region_find(fdata, iova, 0);
288 }
289 
290 /**
291  * afu_dma_map_region - map memory region for dma
292  * @fdata: feature dev data
293  * @user_addr: address of the memory region
294  * @length: size of the memory region
295  * @iova: pointer of iova address
296  *
297  * Map memory region defined by @user_addr and @length, and return dma address
298  * of the memory region via @iova.
299  * Return 0 for success, otherwise error code.
300  */
301 int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
302 		       u64 user_addr, u64 length, u64 *iova)
303 {
304 	struct device *dev = &fdata->dev->dev;
305 	struct dfl_afu_dma_region *region;
306 	int ret;
307 
308 	/*
309 	 * Check Inputs, only accept page-aligned user memory region with
310 	 * valid length.
311 	 */
312 	if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length)
313 		return -EINVAL;
314 
315 	/* Check overflow */
316 	if (user_addr + length < user_addr)
317 		return -EINVAL;
318 
319 	region = kzalloc(sizeof(*region), GFP_KERNEL);
320 	if (!region)
321 		return -ENOMEM;
322 
323 	region->user_addr = user_addr;
324 	region->length = length;
325 
326 	/* Pin the user memory region */
327 	ret = afu_dma_pin_pages(fdata, region);
328 	if (ret) {
329 		dev_err(dev, "failed to pin memory region\n");
330 		goto free_region;
331 	}
332 
333 	/* Only accept continuous pages, return error else */
334 	if (!afu_dma_check_continuous_pages(region)) {
335 		dev_err(dev, "pages are not continuous\n");
336 		ret = -EINVAL;
337 		goto unpin_pages;
338 	}
339 
340 	/* As pages are continuous then start to do DMA mapping */
341 	region->iova = dma_map_page(dfl_fpga_fdata_to_parent(fdata),
342 				    region->pages[0], 0,
343 				    region->length,
344 				    DMA_BIDIRECTIONAL);
345 	if (dma_mapping_error(dfl_fpga_fdata_to_parent(fdata), region->iova)) {
346 		dev_err(dev, "failed to map for dma\n");
347 		ret = -EFAULT;
348 		goto unpin_pages;
349 	}
350 
351 	*iova = region->iova;
352 
353 	mutex_lock(&fdata->lock);
354 	ret = afu_dma_region_add(fdata, region);
355 	mutex_unlock(&fdata->lock);
356 	if (ret) {
357 		dev_err(dev, "failed to add dma region\n");
358 		goto unmap_dma;
359 	}
360 
361 	return 0;
362 
363 unmap_dma:
364 	dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
365 		       region->iova, region->length, DMA_BIDIRECTIONAL);
366 unpin_pages:
367 	afu_dma_unpin_pages(fdata, region);
368 free_region:
369 	kfree(region);
370 	return ret;
371 }
372 
373 /**
374  * afu_dma_unmap_region - unmap dma memory region
375  * @fdata: feature dev data
376  * @iova: dma address of the region
377  *
378  * Unmap dma memory region based on @iova.
379  * Return 0 for success, otherwise error code.
380  */
381 int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova)
382 {
383 	struct dfl_afu_dma_region *region;
384 
385 	mutex_lock(&fdata->lock);
386 	region = afu_dma_region_find_iova(fdata, iova);
387 	if (!region) {
388 		mutex_unlock(&fdata->lock);
389 		return -EINVAL;
390 	}
391 
392 	if (region->in_use) {
393 		mutex_unlock(&fdata->lock);
394 		return -EBUSY;
395 	}
396 
397 	afu_dma_region_remove(fdata, region);
398 	mutex_unlock(&fdata->lock);
399 
400 	dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
401 		       region->iova, region->length, DMA_BIDIRECTIONAL);
402 	afu_dma_unpin_pages(fdata, region);
403 	kfree(region);
404 
405 	return 0;
406 }
407