xref: /linux/drivers/gpu/drm/virtio/virtgpu_gem.c (revision 6e5bea1c93062a43bc0435ae6cd589448094edaa)
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <drm/drm_file.h>
27 #include <drm/drm_fourcc.h>
28 
29 #include "virtgpu_drv.h"
30 
virtio_gpu_gem_create(struct drm_file * file,struct drm_device * dev,struct virtio_gpu_object_params * params,struct drm_gem_object ** obj_p,uint32_t * handle_p)31 static int virtio_gpu_gem_create(struct drm_file *file,
32 				 struct drm_device *dev,
33 				 struct virtio_gpu_object_params *params,
34 				 struct drm_gem_object **obj_p,
35 				 uint32_t *handle_p)
36 {
37 	struct virtio_gpu_device *vgdev = dev->dev_private;
38 	struct virtio_gpu_object *obj;
39 	int ret;
40 	u32 handle;
41 
42 	ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
43 	if (ret < 0)
44 		return ret;
45 
46 	ret = drm_gem_handle_create(file, &obj->base.base, &handle);
47 	if (ret) {
48 		drm_gem_object_release(&obj->base.base);
49 		return ret;
50 	}
51 
52 	*obj_p = &obj->base.base;
53 
54 	/* drop reference from allocate - handle holds it now */
55 	drm_gem_object_put(&obj->base.base);
56 
57 	*handle_p = handle;
58 	return 0;
59 }
60 
virtio_gpu_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)61 int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
62 				struct drm_device *dev,
63 				struct drm_mode_create_dumb *args)
64 {
65 	struct drm_gem_object *gobj;
66 	struct virtio_gpu_object_params params = { 0 };
67 	struct virtio_gpu_device *vgdev = dev->dev_private;
68 	int ret;
69 	uint32_t pitch;
70 
71 	if (args->bpp != 32)
72 		return -EINVAL;
73 
74 	pitch = args->width * 4;
75 	args->size = pitch * args->height;
76 	args->size = ALIGN(args->size, PAGE_SIZE);
77 
78 	params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
79 	params.width = args->width;
80 	params.height = args->height;
81 	params.size = args->size;
82 	params.dumb = true;
83 
84 	if (vgdev->has_resource_blob && !vgdev->has_virgl_3d) {
85 		params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
86 		params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
87 		params.blob = true;
88 	}
89 
90 	ret = virtio_gpu_gem_create(file_priv, dev, &params, &gobj,
91 				    &args->handle);
92 	if (ret)
93 		goto fail;
94 
95 	args->pitch = pitch;
96 	return ret;
97 
98 fail:
99 	return ret;
100 }
101 
virtio_gpu_gem_object_open(struct drm_gem_object * obj,struct drm_file * file)102 int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
103 			       struct drm_file *file)
104 {
105 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
106 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
107 	struct virtio_gpu_object_array *objs;
108 
109 	if (!vgdev->has_virgl_3d)
110 		goto out_notify;
111 
112 	/* the context might still be missing when the first ioctl is
113 	 * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
114 	 */
115 	if (!vgdev->has_context_init)
116 		virtio_gpu_create_context(obj->dev, file);
117 
118 	if (vfpriv->context_created) {
119 		objs = virtio_gpu_array_alloc(1);
120 		if (!objs)
121 			return -ENOMEM;
122 		virtio_gpu_array_add_obj(objs, obj);
123 
124 		virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs);
125 	}
126 
127 out_notify:
128 	virtio_gpu_notify(vgdev);
129 	return 0;
130 }
131 
virtio_gpu_gem_object_close(struct drm_gem_object * obj,struct drm_file * file)132 void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
133 				 struct drm_file *file)
134 {
135 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
136 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
137 	struct virtio_gpu_object_array *objs;
138 
139 	if (!vgdev->has_virgl_3d)
140 		return;
141 
142 	objs = virtio_gpu_array_alloc(1);
143 	if (!objs)
144 		return;
145 	virtio_gpu_array_add_obj(objs, obj);
146 
147 	virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
148 					       objs);
149 	virtio_gpu_notify(vgdev);
150 }
151 
152 /* For drm panic */
virtio_gpu_panic_array_alloc(void)153 struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void)
154 {
155 	struct virtio_gpu_object_array *objs;
156 
157 	objs = kmalloc(sizeof(struct virtio_gpu_object_array), GFP_ATOMIC);
158 	if (!objs)
159 		return NULL;
160 
161 	objs->nents = 0;
162 	objs->total = 1;
163 	return objs;
164 }
165 
virtio_gpu_array_alloc(u32 nents)166 struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
167 {
168 	struct virtio_gpu_object_array *objs;
169 
170 	objs = kmalloc(struct_size(objs, objs, nents), GFP_KERNEL);
171 	if (!objs)
172 		return NULL;
173 
174 	objs->nents = 0;
175 	objs->total = nents;
176 	return objs;
177 }
178 
virtio_gpu_array_free(struct virtio_gpu_object_array * objs)179 static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
180 {
181 	kfree(objs);
182 }
183 
184 struct virtio_gpu_object_array*
virtio_gpu_array_from_handles(struct drm_file * drm_file,u32 * handles,u32 nents)185 virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
186 {
187 	struct virtio_gpu_object_array *objs;
188 	u32 i;
189 
190 	objs = virtio_gpu_array_alloc(nents);
191 	if (!objs)
192 		return NULL;
193 
194 	for (i = 0; i < nents; i++) {
195 		objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
196 		if (!objs->objs[i]) {
197 			objs->nents = i;
198 			virtio_gpu_array_put_free(objs);
199 			return NULL;
200 		}
201 	}
202 	objs->nents = i;
203 	return objs;
204 }
205 
virtio_gpu_array_add_obj(struct virtio_gpu_object_array * objs,struct drm_gem_object * obj)206 void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
207 			      struct drm_gem_object *obj)
208 {
209 	if (WARN_ON_ONCE(objs->nents == objs->total))
210 		return;
211 
212 	drm_gem_object_get(obj);
213 	objs->objs[objs->nents] = obj;
214 	objs->nents++;
215 }
216 
virtio_gpu_array_lock_resv(struct virtio_gpu_object_array * objs)217 int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
218 {
219 	unsigned int i;
220 	int ret;
221 
222 	if (objs->nents == 1) {
223 		ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
224 	} else {
225 		ret = drm_gem_lock_reservations(objs->objs, objs->nents,
226 						&objs->ticket);
227 	}
228 	if (ret)
229 		return ret;
230 
231 	for (i = 0; i < objs->nents; ++i) {
232 		ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1);
233 		if (ret) {
234 			virtio_gpu_array_unlock_resv(objs);
235 			return ret;
236 		}
237 	}
238 	return ret;
239 }
240 
virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array * objs)241 void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
242 {
243 	if (objs->nents == 1) {
244 		dma_resv_unlock(objs->objs[0]->resv);
245 	} else {
246 		drm_gem_unlock_reservations(objs->objs, objs->nents,
247 					    &objs->ticket);
248 	}
249 }
250 
virtio_gpu_array_add_fence(struct virtio_gpu_object_array * objs,struct dma_fence * fence)251 void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
252 				struct dma_fence *fence)
253 {
254 	int i;
255 
256 	for (i = 0; i < objs->nents; i++)
257 		dma_resv_add_fence(objs->objs[i]->resv, fence,
258 				   DMA_RESV_USAGE_WRITE);
259 }
260 
virtio_gpu_array_put_free(struct virtio_gpu_object_array * objs)261 void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
262 {
263 	u32 i;
264 
265 	if (!objs)
266 		return;
267 
268 	for (i = 0; i < objs->nents; i++)
269 		drm_gem_object_put(objs->objs[i]);
270 	virtio_gpu_array_free(objs);
271 }
272 
virtio_gpu_array_put_free_delayed(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs)273 void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
274 				       struct virtio_gpu_object_array *objs)
275 {
276 	spin_lock(&vgdev->obj_free_lock);
277 	list_add_tail(&objs->next, &vgdev->obj_free_list);
278 	spin_unlock(&vgdev->obj_free_lock);
279 	schedule_work(&vgdev->obj_free_work);
280 }
281 
virtio_gpu_array_put_free_work(struct work_struct * work)282 void virtio_gpu_array_put_free_work(struct work_struct *work)
283 {
284 	struct virtio_gpu_device *vgdev =
285 		container_of(work, struct virtio_gpu_device, obj_free_work);
286 	struct virtio_gpu_object_array *objs;
287 
288 	spin_lock(&vgdev->obj_free_lock);
289 	while (!list_empty(&vgdev->obj_free_list)) {
290 		objs = list_first_entry(&vgdev->obj_free_list,
291 					struct virtio_gpu_object_array, next);
292 		list_del(&objs->next);
293 		spin_unlock(&vgdev->obj_free_lock);
294 		virtio_gpu_array_put_free(objs);
295 		spin_lock(&vgdev->obj_free_lock);
296 	}
297 	spin_unlock(&vgdev->obj_free_lock);
298 }
299