xref: /linux/drivers/gpu/drm/xen/xen_drm_front_gem.c (revision a36e9f5cfe9eb3a1dce8769c7058251c42705357)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 
3 /*
4  *  Xen para-virtual DRM device
5  *
6  * Copyright (C) 2016-2018 EPAM Systems Inc.
7  *
8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9  */
10 
11 #include <linux/dma-buf.h>
12 #include <linux/scatterlist.h>
13 #include <linux/shmem_fs.h>
14 #include <linux/vmalloc.h>
15 
16 #include <drm/drm_gem.h>
17 #include <drm/drm_prime.h>
18 #include <drm/drm_probe_helper.h>
19 
20 #include <xen/balloon.h>
21 #include <xen/xen.h>
22 
23 #include "xen_drm_front.h"
24 #include "xen_drm_front_gem.h"
25 
26 struct xen_gem_object {
27 	struct drm_gem_object base;
28 
29 	size_t num_pages;
30 	struct page **pages;
31 
32 	/* set for buffers allocated by the backend */
33 	bool be_alloc;
34 
35 	/* this is for imported PRIME buffer */
36 	struct sg_table *sgt_imported;
37 };
38 
39 static inline struct xen_gem_object *
40 to_xen_gem_obj(struct drm_gem_object *gem_obj)
41 {
42 	return container_of(gem_obj, struct xen_gem_object, base);
43 }
44 
45 static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
46 				 size_t buf_size)
47 {
48 	xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
49 	xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
50 					sizeof(struct page *), GFP_KERNEL);
51 	return !xen_obj->pages ? -ENOMEM : 0;
52 }
53 
54 static void gem_free_pages_array(struct xen_gem_object *xen_obj)
55 {
56 	kvfree(xen_obj->pages);
57 	xen_obj->pages = NULL;
58 }
59 
60 static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
61 					 struct vm_area_struct *vma)
62 {
63 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
64 	int ret;
65 
66 	vma->vm_ops = gem_obj->funcs->vm_ops;
67 
68 	/*
69 	 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
70 	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
71 	 * the whole buffer.
72 	 */
73 	vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
74 	vma->vm_pgoff = 0;
75 
76 	/*
77 	 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
78 	 * all memory which is shared with other entities in the system
79 	 * (including the hypervisor and other guests) must reside in memory
80 	 * which is mapped as Normal Inner Write-Back Outer Write-Back
81 	 * Inner-Shareable.
82 	 */
83 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
84 
85 	/*
86 	 * vm_operations_struct.fault handler will be called if CPU access
87 	 * to VM is here. For GPUs this isn't the case, because CPU  doesn't
88 	 * touch the memory. Insert pages now, so both CPU and GPU are happy.
89 	 *
90 	 * FIXME: as we insert all the pages now then no .fault handler must
91 	 * be called, so don't provide one
92 	 */
93 	ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
94 	if (ret < 0)
95 		DRM_ERROR("Failed to map pages into vma: %d\n", ret);
96 
97 	return ret;
98 }
99 
100 static const struct vm_operations_struct xen_drm_drv_vm_ops = {
101 	.open           = drm_gem_vm_open,
102 	.close          = drm_gem_vm_close,
103 };
104 
105 static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
106 	.free = xen_drm_front_gem_object_free,
107 	.get_sg_table = xen_drm_front_gem_get_sg_table,
108 	.vmap = xen_drm_front_gem_prime_vmap,
109 	.vunmap = xen_drm_front_gem_prime_vunmap,
110 	.mmap = xen_drm_front_gem_object_mmap,
111 	.vm_ops = &xen_drm_drv_vm_ops,
112 };
113 
114 static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
115 					     size_t size)
116 {
117 	struct xen_gem_object *xen_obj;
118 	int ret;
119 
120 	xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
121 	if (!xen_obj)
122 		return ERR_PTR(-ENOMEM);
123 
124 	xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
125 
126 	ret = drm_gem_object_init(dev, &xen_obj->base, size);
127 	if (ret < 0) {
128 		kfree(xen_obj);
129 		return ERR_PTR(ret);
130 	}
131 
132 	return xen_obj;
133 }
134 
135 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
136 {
137 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
138 	struct xen_gem_object *xen_obj;
139 	int ret;
140 
141 	size = round_up(size, PAGE_SIZE);
142 	xen_obj = gem_create_obj(dev, size);
143 	if (IS_ERR(xen_obj))
144 		return xen_obj;
145 
146 	if (drm_info->front_info->cfg.be_alloc) {
147 		/*
148 		 * backend will allocate space for this buffer, so
149 		 * only allocate array of pointers to pages
150 		 */
151 		ret = gem_alloc_pages_array(xen_obj, size);
152 		if (ret < 0)
153 			goto fail;
154 
155 		/*
156 		 * allocate ballooned pages which will be used to map
157 		 * grant references provided by the backend
158 		 */
159 		ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
160 					          xen_obj->pages);
161 		if (ret < 0) {
162 			DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
163 				  xen_obj->num_pages, ret);
164 			gem_free_pages_array(xen_obj);
165 			goto fail;
166 		}
167 
168 		xen_obj->be_alloc = true;
169 		return xen_obj;
170 	}
171 	/*
172 	 * need to allocate backing pages now, so we can share those
173 	 * with the backend
174 	 */
175 	xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
176 	xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
177 	if (IS_ERR(xen_obj->pages)) {
178 		ret = PTR_ERR(xen_obj->pages);
179 		xen_obj->pages = NULL;
180 		goto fail;
181 	}
182 
183 	return xen_obj;
184 
185 fail:
186 	DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
187 	return ERR_PTR(ret);
188 }
189 
190 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
191 						size_t size)
192 {
193 	struct xen_gem_object *xen_obj;
194 
195 	xen_obj = gem_create(dev, size);
196 	if (IS_ERR(xen_obj))
197 		return ERR_CAST(xen_obj);
198 
199 	return &xen_obj->base;
200 }
201 
202 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
203 {
204 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
205 
206 	if (xen_obj->base.import_attach) {
207 		drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
208 		gem_free_pages_array(xen_obj);
209 	} else {
210 		if (xen_obj->pages) {
211 			if (xen_obj->be_alloc) {
212 				xen_free_unpopulated_pages(xen_obj->num_pages,
213 							   xen_obj->pages);
214 				gem_free_pages_array(xen_obj);
215 			} else {
216 				drm_gem_put_pages(&xen_obj->base,
217 						  xen_obj->pages, true, false);
218 			}
219 		}
220 	}
221 	drm_gem_object_release(gem_obj);
222 	kfree(xen_obj);
223 }
224 
225 struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
226 {
227 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
228 
229 	return xen_obj->pages;
230 }
231 
232 struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
233 {
234 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
235 
236 	if (!xen_obj->pages)
237 		return ERR_PTR(-ENOMEM);
238 
239 	return drm_prime_pages_to_sg(gem_obj->dev,
240 				     xen_obj->pages, xen_obj->num_pages);
241 }
242 
243 struct drm_gem_object *
244 xen_drm_front_gem_import_sg_table(struct drm_device *dev,
245 				  struct dma_buf_attachment *attach,
246 				  struct sg_table *sgt)
247 {
248 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
249 	struct xen_gem_object *xen_obj;
250 	size_t size;
251 	int ret;
252 
253 	size = attach->dmabuf->size;
254 	xen_obj = gem_create_obj(dev, size);
255 	if (IS_ERR(xen_obj))
256 		return ERR_CAST(xen_obj);
257 
258 	ret = gem_alloc_pages_array(xen_obj, size);
259 	if (ret < 0)
260 		return ERR_PTR(ret);
261 
262 	xen_obj->sgt_imported = sgt;
263 
264 	ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
265 					 xen_obj->num_pages);
266 	if (ret < 0)
267 		return ERR_PTR(ret);
268 
269 	ret = xen_drm_front_dbuf_create(drm_info->front_info,
270 					xen_drm_front_dbuf_to_cookie(&xen_obj->base),
271 					0, 0, 0, size, sgt->sgl->offset,
272 					xen_obj->pages);
273 	if (ret < 0)
274 		return ERR_PTR(ret);
275 
276 	DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
277 		  size, sgt->orig_nents);
278 
279 	return &xen_obj->base;
280 }
281 
282 int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj,
283 				 struct iosys_map *map)
284 {
285 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
286 	void *vaddr;
287 
288 	if (!xen_obj->pages)
289 		return -ENOMEM;
290 
291 	/* Please see comment in gem_mmap_obj on mapping and attributes. */
292 	vaddr = vmap(xen_obj->pages, xen_obj->num_pages,
293 		     VM_MAP, PAGE_KERNEL);
294 	if (!vaddr)
295 		return -ENOMEM;
296 	iosys_map_set_vaddr(map, vaddr);
297 
298 	return 0;
299 }
300 
301 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
302 				    struct iosys_map *map)
303 {
304 	vunmap(map->vaddr);
305 }
306