Lines Matching +full:memory +full:- +full:region

1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
12 #include <linux/dma-mapping.h>
17 #include "dfl-afu.h"
23 afu->dma_regions = RB_ROOT; in afu_dma_region_init()
27 * afu_dma_pin_pages - pin pages of given dma memory region
29 * @region: dma memory region to be pinned
35 struct dfl_afu_dma_region *region) in afu_dma_pin_pages() argument
37 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages()
38 struct device *dev = &pdata->dev->dev; in afu_dma_pin_pages()
41 ret = account_locked_vm(current->mm, npages, true); in afu_dma_pin_pages()
45 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); in afu_dma_pin_pages()
46 if (!region->pages) { in afu_dma_pin_pages()
47 ret = -ENOMEM; in afu_dma_pin_pages()
51 pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE, in afu_dma_pin_pages()
52 region->pages); in afu_dma_pin_pages()
57 ret = -EFAULT; in afu_dma_pin_pages()
66 unpin_user_pages(region->pages, pinned); in afu_dma_pin_pages()
68 kfree(region->pages); in afu_dma_pin_pages()
70 account_locked_vm(current->mm, npages, false); in afu_dma_pin_pages()
75 * afu_dma_unpin_pages - unpin pages of given dma memory region
77 * @region: dma memory region to be unpinned
83 struct dfl_afu_dma_region *region) in afu_dma_unpin_pages() argument
85 long npages = region->length >> PAGE_SHIFT; in afu_dma_unpin_pages()
86 struct device *dev = &pdata->dev->dev; in afu_dma_unpin_pages()
88 unpin_user_pages(region->pages, npages); in afu_dma_unpin_pages()
89 kfree(region->pages); in afu_dma_unpin_pages()
90 account_locked_vm(current->mm, npages, false); in afu_dma_unpin_pages()
96 * afu_dma_check_continuous_pages - check if pages are continuous
97 * @region: dma memory region
99 * Return true if pages of given dma memory region have continuous physical
102 static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region) in afu_dma_check_continuous_pages() argument
104 int npages = region->length >> PAGE_SHIFT; in afu_dma_check_continuous_pages()
107 for (i = 0; i < npages - 1; i++) in afu_dma_check_continuous_pages()
108 if (page_to_pfn(region->pages[i]) + 1 != in afu_dma_check_continuous_pages()
109 page_to_pfn(region->pages[i + 1])) in afu_dma_check_continuous_pages()
116 * dma_region_check_iova - check if memory area is fully contained in the region
117 * @region: dma memory region
118 * @iova: address of the dma memory area
119 * @size: size of the dma memory area
121 * Compare the dma memory area defined by @iova and @size with given dma region.
122 * Return true if memory area is fully contained in the region, otherwise false.
124 static bool dma_region_check_iova(struct dfl_afu_dma_region *region, in dma_region_check_iova() argument
127 if (!size && region->iova != iova) in dma_region_check_iova()
130 return (region->iova <= iova) && in dma_region_check_iova()
131 (region->length + region->iova >= iova + size); in dma_region_check_iova()
135 * afu_dma_region_add - add given dma region to rbtree
137 * @region: dma region to be added
139 * Return 0 for success, -EEXIST if dma region has already been added.
141 * Needs to be called with pdata->lock heold.
144 struct dfl_afu_dma_region *region) in afu_dma_region_add() argument
149 dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n", in afu_dma_region_add()
150 (unsigned long long)region->iova); in afu_dma_region_add()
152 new = &afu->dma_regions.rb_node; in afu_dma_region_add()
161 if (dma_region_check_iova(this, region->iova, region->length)) in afu_dma_region_add()
162 return -EEXIST; in afu_dma_region_add()
164 if (region->iova < this->iova) in afu_dma_region_add()
165 new = &((*new)->rb_left); in afu_dma_region_add()
166 else if (region->iova > this->iova) in afu_dma_region_add()
167 new = &((*new)->rb_right); in afu_dma_region_add()
169 return -EEXIST; in afu_dma_region_add()
172 rb_link_node(&region->node, parent, new); in afu_dma_region_add()
173 rb_insert_color(&region->node, &afu->dma_regions); in afu_dma_region_add()
179 * afu_dma_region_remove - remove given dma region from rbtree
181 * @region: dma region to be removed
183 * Needs to be called with pdata->lock heold.
186 struct dfl_afu_dma_region *region) in afu_dma_region_remove() argument
190 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", in afu_dma_region_remove()
191 (unsigned long long)region->iova); in afu_dma_region_remove()
194 rb_erase(&region->node, &afu->dma_regions); in afu_dma_region_remove()
198 * afu_dma_region_destroy - destroy all regions in rbtree
201 * Needs to be called with pdata->lock heold.
206 struct rb_node *node = rb_first(&afu->dma_regions); in afu_dma_region_destroy()
207 struct dfl_afu_dma_region *region; in afu_dma_region_destroy() local
210 region = container_of(node, struct dfl_afu_dma_region, node); in afu_dma_region_destroy()
212 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", in afu_dma_region_destroy()
213 (unsigned long long)region->iova); in afu_dma_region_destroy()
215 rb_erase(node, &afu->dma_regions); in afu_dma_region_destroy()
217 if (region->iova) in afu_dma_region_destroy()
219 region->iova, region->length, in afu_dma_region_destroy()
222 if (region->pages) in afu_dma_region_destroy()
223 afu_dma_unpin_pages(pdata, region); in afu_dma_region_destroy()
226 kfree(region); in afu_dma_region_destroy()
231 * afu_dma_region_find - find the dma region from rbtree based on iova and size
233 * @iova: address of the dma memory area
234 * @size: size of the dma memory area
236 * It finds the dma region from the rbtree based on @iova and @size:
237 * - if @size == 0, it finds the dma region which starts from @iova
238 * - otherwise, it finds the dma region which fully contains
242 * Needs to be called with pdata->lock held.
248 struct rb_node *node = afu->dma_regions.rb_node; in afu_dma_region_find()
249 struct device *dev = &pdata->dev->dev; in afu_dma_region_find()
252 struct dfl_afu_dma_region *region; in afu_dma_region_find() local
254 region = container_of(node, struct dfl_afu_dma_region, node); in afu_dma_region_find()
256 if (dma_region_check_iova(region, iova, size)) { in afu_dma_region_find()
257 dev_dbg(dev, "find region (iova = %llx)\n", in afu_dma_region_find()
258 (unsigned long long)region->iova); in afu_dma_region_find()
259 return region; in afu_dma_region_find()
262 if (iova < region->iova) in afu_dma_region_find()
263 node = node->rb_left; in afu_dma_region_find()
264 else if (iova > region->iova) in afu_dma_region_find()
265 node = node->rb_right; in afu_dma_region_find()
267 /* the iova region is not fully covered. */ in afu_dma_region_find()
271 dev_dbg(dev, "region with iova %llx and size %llx is not found\n", in afu_dma_region_find()
278 * afu_dma_region_find_iova - find the dma region from rbtree by iova
280 * @iova: address of the dma region
282 * Needs to be called with pdata->lock held.
291 * afu_dma_map_region - map memory region for dma
293 * @user_addr: address of the memory region
294 * @length: size of the memory region
297 * Map memory region defined by @user_addr and @length, and return dma address
298 * of the memory region via @iova.
304 struct dfl_afu_dma_region *region; in afu_dma_map_region() local
308 * Check Inputs, only accept page-aligned user memory region with in afu_dma_map_region()
312 return -EINVAL; in afu_dma_map_region()
316 return -EINVAL; in afu_dma_map_region()
318 region = kzalloc(sizeof(*region), GFP_KERNEL); in afu_dma_map_region()
319 if (!region) in afu_dma_map_region()
320 return -ENOMEM; in afu_dma_map_region()
322 region->user_addr = user_addr; in afu_dma_map_region()
323 region->length = length; in afu_dma_map_region()
325 /* Pin the user memory region */ in afu_dma_map_region()
326 ret = afu_dma_pin_pages(pdata, region); in afu_dma_map_region()
328 dev_err(&pdata->dev->dev, "failed to pin memory region\n"); in afu_dma_map_region()
333 if (!afu_dma_check_continuous_pages(region)) { in afu_dma_map_region()
334 dev_err(&pdata->dev->dev, "pages are not continuous\n"); in afu_dma_map_region()
335 ret = -EINVAL; in afu_dma_map_region()
340 region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata), in afu_dma_map_region()
341 region->pages[0], 0, in afu_dma_map_region()
342 region->length, in afu_dma_map_region()
344 if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) { in afu_dma_map_region()
345 dev_err(&pdata->dev->dev, "failed to map for dma\n"); in afu_dma_map_region()
346 ret = -EFAULT; in afu_dma_map_region()
350 *iova = region->iova; in afu_dma_map_region()
352 mutex_lock(&pdata->lock); in afu_dma_map_region()
353 ret = afu_dma_region_add(pdata, region); in afu_dma_map_region()
354 mutex_unlock(&pdata->lock); in afu_dma_map_region()
356 dev_err(&pdata->dev->dev, "failed to add dma region\n"); in afu_dma_map_region()
364 region->iova, region->length, DMA_BIDIRECTIONAL); in afu_dma_map_region()
366 afu_dma_unpin_pages(pdata, region); in afu_dma_map_region()
368 kfree(region); in afu_dma_map_region()
373 * afu_dma_unmap_region - unmap dma memory region
375 * @iova: dma address of the region
377 * Unmap dma memory region based on @iova.
382 struct dfl_afu_dma_region *region; in afu_dma_unmap_region() local
384 mutex_lock(&pdata->lock); in afu_dma_unmap_region()
385 region = afu_dma_region_find_iova(pdata, iova); in afu_dma_unmap_region()
386 if (!region) { in afu_dma_unmap_region()
387 mutex_unlock(&pdata->lock); in afu_dma_unmap_region()
388 return -EINVAL; in afu_dma_unmap_region()
391 if (region->in_use) { in afu_dma_unmap_region()
392 mutex_unlock(&pdata->lock); in afu_dma_unmap_region()
393 return -EBUSY; in afu_dma_unmap_region()
396 afu_dma_region_remove(pdata, region); in afu_dma_unmap_region()
397 mutex_unlock(&pdata->lock); in afu_dma_unmap_region()
400 region->iova, region->length, DMA_BIDIRECTIONAL); in afu_dma_unmap_region()
401 afu_dma_unpin_pages(pdata, region); in afu_dma_unmap_region()
402 kfree(region); in afu_dma_unmap_region()