xref: /linux/drivers/accel/ivpu/ivpu_gem_userptr.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2025 Intel Corporation
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/err.h>
8 #include <linux/highmem.h>
9 #include <linux/mm.h>
10 #include <linux/mman.h>
11 #include <linux/scatterlist.h>
12 #include <linux/slab.h>
13 #include <linux/capability.h>
14 
15 #include <drm/drm_device.h>
16 #include <drm/drm_file.h>
17 #include <drm/drm_gem.h>
18 
19 #include "ivpu_drv.h"
20 #include "ivpu_gem.h"
21 
22 static struct sg_table *
23 ivpu_gem_userptr_dmabuf_map(struct dma_buf_attachment *attachment,
24 			    enum dma_data_direction direction)
25 {
26 	struct sg_table *sgt = attachment->dmabuf->priv;
27 	int ret;
28 
29 	ret = dma_map_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC);
30 	if (ret)
31 		return ERR_PTR(ret);
32 
33 	return sgt;
34 }
35 
36 static void ivpu_gem_userptr_dmabuf_unmap(struct dma_buf_attachment *attachment,
37 					  struct sg_table *sgt,
38 					  enum dma_data_direction direction)
39 {
40 	dma_unmap_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC);
41 }
42 
43 static void ivpu_gem_userptr_dmabuf_release(struct dma_buf *dma_buf)
44 {
45 	struct sg_table *sgt = dma_buf->priv;
46 	struct sg_page_iter page_iter;
47 	struct page *page;
48 
49 	for_each_sgtable_page(sgt, &page_iter, 0) {
50 		page = sg_page_iter_page(&page_iter);
51 		unpin_user_page(page);
52 	}
53 
54 	sg_free_table(sgt);
55 	kfree(sgt);
56 }
57 
58 static const struct dma_buf_ops ivpu_gem_userptr_dmabuf_ops = {
59 	.map_dma_buf = ivpu_gem_userptr_dmabuf_map,
60 	.unmap_dma_buf = ivpu_gem_userptr_dmabuf_unmap,
61 	.release = ivpu_gem_userptr_dmabuf_release,
62 };
63 
64 static struct dma_buf *
65 ivpu_create_userptr_dmabuf(struct ivpu_device *vdev, void __user *user_ptr,
66 			   size_t size, uint32_t flags)
67 {
68 	struct dma_buf_export_info exp_info = {};
69 	struct dma_buf *dma_buf;
70 	struct sg_table *sgt;
71 	struct page **pages;
72 	unsigned long nr_pages = size >> PAGE_SHIFT;
73 	unsigned int gup_flags = FOLL_LONGTERM;
74 	int ret, i, pinned;
75 
76 	/* Add FOLL_WRITE only if the BO is not read-only */
77 	if (!(flags & DRM_IVPU_BO_READ_ONLY))
78 		gup_flags |= FOLL_WRITE;
79 
80 	pages = kvmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL);
81 	if (!pages)
82 		return ERR_PTR(-ENOMEM);
83 
84 	pinned = pin_user_pages_fast((unsigned long)user_ptr, nr_pages, gup_flags, pages);
85 	if (pinned < 0) {
86 		ret = pinned;
87 		ivpu_warn(vdev, "Failed to pin user pages: %d\n", ret);
88 		goto free_pages_array;
89 	}
90 
91 	if (pinned != nr_pages) {
92 		ivpu_warn(vdev, "Pinned %d pages, expected %lu\n", pinned, nr_pages);
93 		ret = -EFAULT;
94 		goto unpin_pages;
95 	}
96 
97 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
98 	if (!sgt) {
99 		ret = -ENOMEM;
100 		goto unpin_pages;
101 	}
102 
103 	ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, size, GFP_KERNEL);
104 	if (ret) {
105 		ivpu_warn(vdev, "Failed to create sg table: %d\n", ret);
106 		goto free_sgt;
107 	}
108 
109 	exp_info.exp_name = "ivpu_userptr_dmabuf";
110 	exp_info.owner = THIS_MODULE;
111 	exp_info.ops = &ivpu_gem_userptr_dmabuf_ops;
112 	exp_info.size = size;
113 	exp_info.flags = O_RDWR | O_CLOEXEC;
114 	exp_info.priv = sgt;
115 
116 	dma_buf = dma_buf_export(&exp_info);
117 	if (IS_ERR(dma_buf)) {
118 		ret = PTR_ERR(dma_buf);
119 		ivpu_warn(vdev, "Failed to export userptr dma-buf: %d\n", ret);
120 		goto free_sg_table;
121 	}
122 
123 	kvfree(pages);
124 	return dma_buf;
125 
126 free_sg_table:
127 	sg_free_table(sgt);
128 free_sgt:
129 	kfree(sgt);
130 unpin_pages:
131 	for (i = 0; i < pinned; i++)
132 		unpin_user_page(pages[i]);
133 free_pages_array:
134 	kvfree(pages);
135 	return ERR_PTR(ret);
136 }
137 
138 static struct ivpu_bo *
139 ivpu_bo_create_from_userptr(struct ivpu_device *vdev, void __user *user_ptr,
140 			    size_t size, uint32_t flags)
141 {
142 	struct dma_buf *dma_buf;
143 	struct drm_gem_object *obj;
144 	struct ivpu_bo *bo;
145 
146 	dma_buf = ivpu_create_userptr_dmabuf(vdev, user_ptr, size, flags);
147 	if (IS_ERR(dma_buf))
148 		return ERR_CAST(dma_buf);
149 
150 	obj = ivpu_gem_prime_import(&vdev->drm, dma_buf);
151 	if (IS_ERR(obj)) {
152 		dma_buf_put(dma_buf);
153 		return ERR_CAST(obj);
154 	}
155 
156 	dma_buf_put(dma_buf);
157 
158 	bo = to_ivpu_bo(obj);
159 	bo->flags = flags;
160 
161 	return bo;
162 }
163 
164 int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
165 {
166 	struct drm_ivpu_bo_create_from_userptr *args = data;
167 	struct ivpu_file_priv *file_priv = file->driver_priv;
168 	struct ivpu_device *vdev = to_ivpu_device(dev);
169 	void __user *user_ptr = u64_to_user_ptr(args->user_ptr);
170 	struct ivpu_bo *bo;
171 	int ret;
172 
173 	if (args->flags & ~(DRM_IVPU_BO_HIGH_MEM | DRM_IVPU_BO_DMA_MEM | DRM_IVPU_BO_READ_ONLY))
174 		return -EINVAL;
175 
176 	if (!args->user_ptr || !args->size)
177 		return -EINVAL;
178 
179 	if (!PAGE_ALIGNED(args->user_ptr) || !PAGE_ALIGNED(args->size))
180 		return -EINVAL;
181 
182 	if (!access_ok(user_ptr, args->size))
183 		return -EFAULT;
184 
185 	bo = ivpu_bo_create_from_userptr(vdev, user_ptr, args->size, args->flags);
186 	if (IS_ERR(bo))
187 		return PTR_ERR(bo);
188 
189 	ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
190 	if (ret) {
191 		ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
192 			 bo, file_priv->ctx.id, args->size, args->flags);
193 	} else {
194 		ivpu_dbg(vdev, BO, "Created userptr BO: handle=%u vpu_addr=0x%llx size=%llu flags=0x%x\n",
195 			 args->handle, bo->vpu_addr, args->size, bo->flags);
196 		args->vpu_addr = bo->vpu_addr;
197 	}
198 
199 	drm_gem_object_put(&bo->base.base);
200 
201 	return ret;
202 }
203