1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <linux/dma-mapping.h>
27 #include <linux/moduleparam.h>
28
29 #include <drm/drm_print.h>
30
31 #include "virtgpu_drv.h"
32
33 static int virtio_gpu_virglrenderer_workaround = 1;
34 module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
35
virtio_gpu_resource_id_get(struct virtio_gpu_device * vgdev,uint32_t * resid)36 int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
37 {
38 if (virtio_gpu_virglrenderer_workaround) {
39 /*
40 * Hack to avoid re-using resource IDs.
41 *
42 * virglrenderer versions up to (and including) 0.7.0
43 * can't deal with that. virglrenderer commit
44 * "f91a9dd35715 Fix unlinking resources from hash
45 * table." (Feb 2019) fixes the bug.
46 */
47 static atomic_t seqno = ATOMIC_INIT(0);
48 int handle = atomic_inc_return(&seqno);
49 *resid = handle + 1;
50 } else {
51 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
52
53 if (handle < 0)
54 return handle;
55 *resid = handle + 1;
56 }
57 return 0;
58 }
59
virtio_gpu_resource_id_put(struct virtio_gpu_device * vgdev,uint32_t id)60 static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
61 {
62 if (!virtio_gpu_virglrenderer_workaround)
63 ida_free(&vgdev->resource_ida, id - 1);
64 }
65
virtio_gpu_cleanup_object(struct virtio_gpu_object * bo)66 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
67 {
68 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
69
70 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
71 if (virtio_gpu_is_shmem(bo)) {
72 drm_gem_shmem_free(&bo->base);
73 } else if (virtio_gpu_is_vram(bo)) {
74 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
75
76 spin_lock(&vgdev->host_visible_lock);
77 if (drm_mm_node_allocated(&vram->vram_node))
78 drm_mm_remove_node(&vram->vram_node);
79
80 spin_unlock(&vgdev->host_visible_lock);
81
82 drm_gem_free_mmap_offset(&vram->base.base.base);
83 drm_gem_object_release(&vram->base.base.base);
84 kfree(vram);
85 } else {
86 drm_gem_object_release(&bo->base.base);
87 kfree(bo);
88 }
89 }
90
virtio_gpu_free_object(struct drm_gem_object * obj)91 static void virtio_gpu_free_object(struct drm_gem_object *obj)
92 {
93 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
94 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
95
96 if (bo->created) {
97 virtio_gpu_cmd_unref_resource(vgdev, bo);
98 virtio_gpu_notify(vgdev);
99 /* completion handler calls virtio_gpu_cleanup_object() */
100 return;
101 }
102 virtio_gpu_cleanup_object(bo);
103 }
104
virtio_gpu_detach_object_fenced(struct virtio_gpu_object * bo)105 int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo)
106 {
107 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
108 struct virtio_gpu_fence *fence;
109
110 if (!bo->attached)
111 return 0;
112
113 fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
114 if (!fence)
115 return -ENOMEM;
116
117 virtio_gpu_object_detach(vgdev, bo, fence);
118 virtio_gpu_notify(vgdev);
119
120 dma_fence_wait(&fence->f, false);
121 dma_fence_put(&fence->f);
122
123 return 0;
124 }
125
126 static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
127 .free = virtio_gpu_free_object,
128 .open = virtio_gpu_gem_object_open,
129 .close = virtio_gpu_gem_object_close,
130 .print_info = drm_gem_shmem_object_print_info,
131 .export = virtgpu_gem_prime_export,
132 .pin = drm_gem_shmem_object_pin,
133 .unpin = drm_gem_shmem_object_unpin,
134 .get_sg_table = drm_gem_shmem_object_get_sg_table,
135 .vmap = drm_gem_shmem_object_vmap,
136 .vunmap = drm_gem_shmem_object_vunmap,
137 .mmap = drm_gem_shmem_object_mmap,
138 .vm_ops = &drm_gem_shmem_vm_ops,
139 };
140
virtio_gpu_is_shmem(struct virtio_gpu_object * bo)141 bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
142 {
143 return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
144 }
145
virtio_gpu_create_object(struct drm_device * dev,size_t size)146 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
147 size_t size)
148 {
149 struct virtio_gpu_object_shmem *shmem;
150 struct drm_gem_shmem_object *dshmem;
151
152 shmem = kzalloc_obj(*shmem);
153 if (!shmem)
154 return ERR_PTR(-ENOMEM);
155
156 dshmem = &shmem->base.base;
157 dshmem->base.funcs = &virtio_gpu_shmem_funcs;
158 return &dshmem->base;
159 }
160
virtio_gpu_object_shmem_init(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_mem_entry ** ents,unsigned int * nents)161 static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
162 struct virtio_gpu_object *bo,
163 struct virtio_gpu_mem_entry **ents,
164 unsigned int *nents)
165 {
166 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
167 struct scatterlist *sg;
168 struct sg_table *pages;
169 int si;
170
171 pages = drm_gem_shmem_get_pages_sgt(&bo->base);
172 if (IS_ERR(pages))
173 return PTR_ERR(pages);
174
175 if (use_dma_api)
176 *nents = pages->nents;
177 else
178 *nents = pages->orig_nents;
179
180 *ents = kvmalloc_objs(struct virtio_gpu_mem_entry, *nents);
181 if (!(*ents)) {
182 DRM_ERROR("failed to allocate ent list\n");
183 return -ENOMEM;
184 }
185
186 if (use_dma_api) {
187 for_each_sgtable_dma_sg(pages, sg, si) {
188 (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
189 (*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
190 (*ents)[si].padding = 0;
191 }
192 } else {
193 for_each_sgtable_sg(pages, sg, si) {
194 (*ents)[si].addr = cpu_to_le64(sg_phys(sg));
195 (*ents)[si].length = cpu_to_le32(sg->length);
196 (*ents)[si].padding = 0;
197 }
198 }
199
200 return 0;
201 }
202
virtio_gpu_object_create(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_params * params,struct virtio_gpu_object ** bo_ptr,struct virtio_gpu_fence * fence)203 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
204 struct virtio_gpu_object_params *params,
205 struct virtio_gpu_object **bo_ptr,
206 struct virtio_gpu_fence *fence)
207 {
208 struct virtio_gpu_object_array *objs = NULL;
209 struct drm_gem_shmem_object *shmem_obj;
210 struct virtio_gpu_object *bo;
211 struct virtio_gpu_mem_entry *ents = NULL;
212 unsigned int nents;
213 int ret;
214
215 *bo_ptr = NULL;
216
217 params->size = roundup(params->size, PAGE_SIZE);
218 shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
219 if (IS_ERR(shmem_obj))
220 return PTR_ERR(shmem_obj);
221 bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
222
223 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
224 if (ret < 0)
225 goto err_free_gem;
226
227 bo->dumb = params->dumb;
228
229 ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
230 if (ret != 0)
231 goto err_put_id;
232
233 if (fence) {
234 ret = -ENOMEM;
235 objs = virtio_gpu_array_alloc(1);
236 if (!objs)
237 goto err_free_entry;
238 virtio_gpu_array_add_obj(objs, &bo->base.base);
239
240 ret = virtio_gpu_array_lock_resv(objs);
241 if (ret != 0)
242 goto err_put_objs;
243 }
244
245 if (params->blob) {
246 if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
247 bo->guest_blob = true;
248
249 virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
250 ents, nents);
251 } else if (params->virgl) {
252 virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
253 objs, fence);
254 virtio_gpu_object_attach(vgdev, bo, ents, nents);
255 } else {
256 virtio_gpu_cmd_create_resource(vgdev, bo, params,
257 objs, fence);
258 virtio_gpu_object_attach(vgdev, bo, ents, nents);
259 }
260
261 *bo_ptr = bo;
262 return 0;
263
264 err_put_objs:
265 virtio_gpu_array_put_free(objs);
266 err_free_entry:
267 kvfree(ents);
268 err_put_id:
269 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
270 err_free_gem:
271 drm_gem_shmem_free(shmem_obj);
272 return ret;
273 }
274