xref: /linux/drivers/gpu/drm/virtio/virtgpu_ioctl.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <linux/file.h>
29 #include <linux/sync_file.h>
30 #include <linux/uaccess.h>
31 
32 #include <drm/drm_file.h>
33 #include <drm/virtgpu_drm.h>
34 
35 #include "virtgpu_drv.h"
36 
37 #define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
38 				    VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
39 				    VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
40 
41 static int virtio_gpu_fence_event_create(struct drm_device *dev,
42 					 struct drm_file *file,
43 					 struct virtio_gpu_fence *fence,
44 					 uint32_t ring_idx)
45 {
46 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
47 	struct virtio_gpu_fence_event *e = NULL;
48 	int ret;
49 
50 	if (!(vfpriv->ring_idx_mask & (1 << ring_idx)))
51 		return 0;
52 
53 	e = kzalloc(sizeof(*e), GFP_KERNEL);
54 	if (!e)
55 		return -ENOMEM;
56 
57 	e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
58 	e->event.length = sizeof(e->event);
59 
60 	ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
61 	if (ret)
62 		goto free;
63 
64 	fence->e = e;
65 	return 0;
66 free:
67 	kfree(e);
68 	return ret;
69 }
70 
71 /* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
72 static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
73 					     struct virtio_gpu_fpriv *vfpriv)
74 {
75 	char dbgname[TASK_COMM_LEN];
76 
77 	get_task_comm(dbgname, current);
78 	virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
79 				      vfpriv->context_init, strlen(dbgname),
80 				      dbgname);
81 
82 	vfpriv->context_created = true;
83 }
84 
85 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
86 {
87 	struct virtio_gpu_device *vgdev = dev->dev_private;
88 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
89 
90 	mutex_lock(&vfpriv->context_lock);
91 	if (vfpriv->context_created)
92 		goto out_unlock;
93 
94 	virtio_gpu_create_context_locked(vgdev, vfpriv);
95 
96 out_unlock:
97 	mutex_unlock(&vfpriv->context_lock);
98 }
99 
100 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
101 				struct drm_file *file)
102 {
103 	struct virtio_gpu_device *vgdev = dev->dev_private;
104 	struct drm_virtgpu_map *virtio_gpu_map = data;
105 
106 	return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
107 					 virtio_gpu_map->handle,
108 					 &virtio_gpu_map->offset);
109 }
110 
111 /*
112  * Usage of execbuffer:
113  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
114  * However, the command as passed from user space must *not* contain the initial
115  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
116  */
117 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
118 				 struct drm_file *file)
119 {
120 	struct drm_virtgpu_execbuffer *exbuf = data;
121 	struct virtio_gpu_device *vgdev = dev->dev_private;
122 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
123 	struct virtio_gpu_fence *out_fence;
124 	int ret;
125 	uint32_t *bo_handles = NULL;
126 	void __user *user_bo_handles = NULL;
127 	struct virtio_gpu_object_array *buflist = NULL;
128 	struct sync_file *sync_file;
129 	int in_fence_fd = exbuf->fence_fd;
130 	int out_fence_fd = -1;
131 	void *buf;
132 	uint64_t fence_ctx;
133 	uint32_t ring_idx;
134 
135 	fence_ctx = vgdev->fence_drv.context;
136 	ring_idx = 0;
137 
138 	if (vgdev->has_virgl_3d == false)
139 		return -ENOSYS;
140 
141 	if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
142 		return -EINVAL;
143 
144 	if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) {
145 		if (exbuf->ring_idx >= vfpriv->num_rings)
146 			return -EINVAL;
147 
148 		if (!vfpriv->base_fence_ctx)
149 			return -EINVAL;
150 
151 		fence_ctx = vfpriv->base_fence_ctx;
152 		ring_idx = exbuf->ring_idx;
153 	}
154 
155 	exbuf->fence_fd = -1;
156 
157 	virtio_gpu_create_context(dev, file);
158 	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
159 		struct dma_fence *in_fence;
160 
161 		in_fence = sync_file_get_fence(in_fence_fd);
162 
163 		if (!in_fence)
164 			return -EINVAL;
165 
166 		/*
167 		 * Wait if the fence is from a foreign context, or if the fence
168 		 * array contains any fence from a foreign context.
169 		 */
170 		ret = 0;
171 		if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
172 			ret = dma_fence_wait(in_fence, true);
173 
174 		dma_fence_put(in_fence);
175 		if (ret)
176 			return ret;
177 	}
178 
179 	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
180 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
181 		if (out_fence_fd < 0)
182 			return out_fence_fd;
183 	}
184 
185 	if (exbuf->num_bo_handles) {
186 		bo_handles = kvmalloc_array(exbuf->num_bo_handles,
187 					    sizeof(uint32_t), GFP_KERNEL);
188 		if (!bo_handles) {
189 			ret = -ENOMEM;
190 			goto out_unused_fd;
191 		}
192 
193 		user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
194 		if (copy_from_user(bo_handles, user_bo_handles,
195 				   exbuf->num_bo_handles * sizeof(uint32_t))) {
196 			ret = -EFAULT;
197 			goto out_unused_fd;
198 		}
199 
200 		buflist = virtio_gpu_array_from_handles(file, bo_handles,
201 							exbuf->num_bo_handles);
202 		if (!buflist) {
203 			ret = -ENOENT;
204 			goto out_unused_fd;
205 		}
206 		kvfree(bo_handles);
207 		bo_handles = NULL;
208 	}
209 
210 	buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
211 	if (IS_ERR(buf)) {
212 		ret = PTR_ERR(buf);
213 		goto out_unused_fd;
214 	}
215 
216 	if (buflist) {
217 		ret = virtio_gpu_array_lock_resv(buflist);
218 		if (ret)
219 			goto out_memdup;
220 	}
221 
222 	out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
223 	if(!out_fence) {
224 		ret = -ENOMEM;
225 		goto out_unresv;
226 	}
227 
228 	ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
229 	if (ret)
230 		goto out_unresv;
231 
232 	if (out_fence_fd >= 0) {
233 		sync_file = sync_file_create(&out_fence->f);
234 		if (!sync_file) {
235 			dma_fence_put(&out_fence->f);
236 			ret = -ENOMEM;
237 			goto out_unresv;
238 		}
239 
240 		exbuf->fence_fd = out_fence_fd;
241 		fd_install(out_fence_fd, sync_file->file);
242 	}
243 
244 	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
245 			      vfpriv->ctx_id, buflist, out_fence);
246 	dma_fence_put(&out_fence->f);
247 	virtio_gpu_notify(vgdev);
248 	return 0;
249 
250 out_unresv:
251 	if (buflist)
252 		virtio_gpu_array_unlock_resv(buflist);
253 out_memdup:
254 	kvfree(buf);
255 out_unused_fd:
256 	kvfree(bo_handles);
257 	if (buflist)
258 		virtio_gpu_array_put_free(buflist);
259 
260 	if (out_fence_fd >= 0)
261 		put_unused_fd(out_fence_fd);
262 
263 	return ret;
264 }
265 
266 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
267 				     struct drm_file *file)
268 {
269 	struct virtio_gpu_device *vgdev = dev->dev_private;
270 	struct drm_virtgpu_getparam *param = data;
271 	int value;
272 
273 	switch (param->param) {
274 	case VIRTGPU_PARAM_3D_FEATURES:
275 		value = vgdev->has_virgl_3d ? 1 : 0;
276 		break;
277 	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
278 		value = 1;
279 		break;
280 	case VIRTGPU_PARAM_RESOURCE_BLOB:
281 		value = vgdev->has_resource_blob ? 1 : 0;
282 		break;
283 	case VIRTGPU_PARAM_HOST_VISIBLE:
284 		value = vgdev->has_host_visible ? 1 : 0;
285 		break;
286 	case VIRTGPU_PARAM_CROSS_DEVICE:
287 		value = vgdev->has_resource_assign_uuid ? 1 : 0;
288 		break;
289 	case VIRTGPU_PARAM_CONTEXT_INIT:
290 		value = vgdev->has_context_init ? 1 : 0;
291 		break;
292 	case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
293 		value = vgdev->capset_id_mask;
294 		break;
295 	default:
296 		return -EINVAL;
297 	}
298 	if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
299 		return -EFAULT;
300 
301 	return 0;
302 }
303 
304 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
305 					    struct drm_file *file)
306 {
307 	struct virtio_gpu_device *vgdev = dev->dev_private;
308 	struct drm_virtgpu_resource_create *rc = data;
309 	struct virtio_gpu_fence *fence;
310 	int ret;
311 	struct virtio_gpu_object *qobj;
312 	struct drm_gem_object *obj;
313 	uint32_t handle = 0;
314 	struct virtio_gpu_object_params params = { 0 };
315 
316 	if (vgdev->has_virgl_3d) {
317 		virtio_gpu_create_context(dev, file);
318 		params.virgl = true;
319 		params.target = rc->target;
320 		params.bind = rc->bind;
321 		params.depth = rc->depth;
322 		params.array_size = rc->array_size;
323 		params.last_level = rc->last_level;
324 		params.nr_samples = rc->nr_samples;
325 		params.flags = rc->flags;
326 	} else {
327 		if (rc->depth > 1)
328 			return -EINVAL;
329 		if (rc->nr_samples > 1)
330 			return -EINVAL;
331 		if (rc->last_level > 1)
332 			return -EINVAL;
333 		if (rc->target != 2)
334 			return -EINVAL;
335 		if (rc->array_size > 1)
336 			return -EINVAL;
337 	}
338 
339 	params.format = rc->format;
340 	params.width = rc->width;
341 	params.height = rc->height;
342 	params.size = rc->size;
343 	/* allocate a single page size object */
344 	if (params.size == 0)
345 		params.size = PAGE_SIZE;
346 
347 	fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
348 	if (!fence)
349 		return -ENOMEM;
350 	ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
351 	dma_fence_put(&fence->f);
352 	if (ret < 0)
353 		return ret;
354 	obj = &qobj->base.base;
355 
356 	ret = drm_gem_handle_create(file, obj, &handle);
357 	if (ret) {
358 		drm_gem_object_release(obj);
359 		return ret;
360 	}
361 	drm_gem_object_put(obj);
362 
363 	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
364 	rc->bo_handle = handle;
365 	return 0;
366 }
367 
368 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
369 					  struct drm_file *file)
370 {
371 	struct drm_virtgpu_resource_info *ri = data;
372 	struct drm_gem_object *gobj = NULL;
373 	struct virtio_gpu_object *qobj = NULL;
374 
375 	gobj = drm_gem_object_lookup(file, ri->bo_handle);
376 	if (gobj == NULL)
377 		return -ENOENT;
378 
379 	qobj = gem_to_virtio_gpu_obj(gobj);
380 
381 	ri->size = qobj->base.base.size;
382 	ri->res_handle = qobj->hw_res_handle;
383 	if (qobj->host3d_blob || qobj->guest_blob)
384 		ri->blob_mem = qobj->blob_mem;
385 
386 	drm_gem_object_put(gobj);
387 	return 0;
388 }
389 
390 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
391 					       void *data,
392 					       struct drm_file *file)
393 {
394 	struct virtio_gpu_device *vgdev = dev->dev_private;
395 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
396 	struct drm_virtgpu_3d_transfer_from_host *args = data;
397 	struct virtio_gpu_object *bo;
398 	struct virtio_gpu_object_array *objs;
399 	struct virtio_gpu_fence *fence;
400 	int ret;
401 	u32 offset = args->offset;
402 
403 	if (vgdev->has_virgl_3d == false)
404 		return -ENOSYS;
405 
406 	virtio_gpu_create_context(dev, file);
407 	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
408 	if (objs == NULL)
409 		return -ENOENT;
410 
411 	bo = gem_to_virtio_gpu_obj(objs->objs[0]);
412 	if (bo->guest_blob && !bo->host3d_blob) {
413 		ret = -EINVAL;
414 		goto err_put_free;
415 	}
416 
417 	if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
418 		ret = -EINVAL;
419 		goto err_put_free;
420 	}
421 
422 	ret = virtio_gpu_array_lock_resv(objs);
423 	if (ret != 0)
424 		goto err_put_free;
425 
426 	fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
427 	if (!fence) {
428 		ret = -ENOMEM;
429 		goto err_unlock;
430 	}
431 
432 	virtio_gpu_cmd_transfer_from_host_3d
433 		(vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
434 		 args->layer_stride, &args->box, objs, fence);
435 	dma_fence_put(&fence->f);
436 	virtio_gpu_notify(vgdev);
437 	return 0;
438 
439 err_unlock:
440 	virtio_gpu_array_unlock_resv(objs);
441 err_put_free:
442 	virtio_gpu_array_put_free(objs);
443 	return ret;
444 }
445 
446 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
447 					     struct drm_file *file)
448 {
449 	struct virtio_gpu_device *vgdev = dev->dev_private;
450 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
451 	struct drm_virtgpu_3d_transfer_to_host *args = data;
452 	struct virtio_gpu_object *bo;
453 	struct virtio_gpu_object_array *objs;
454 	struct virtio_gpu_fence *fence;
455 	int ret;
456 	u32 offset = args->offset;
457 
458 	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
459 	if (objs == NULL)
460 		return -ENOENT;
461 
462 	bo = gem_to_virtio_gpu_obj(objs->objs[0]);
463 	if (bo->guest_blob && !bo->host3d_blob) {
464 		ret = -EINVAL;
465 		goto err_put_free;
466 	}
467 
468 	if (!vgdev->has_virgl_3d) {
469 		virtio_gpu_cmd_transfer_to_host_2d
470 			(vgdev, offset,
471 			 args->box.w, args->box.h, args->box.x, args->box.y,
472 			 objs, NULL);
473 	} else {
474 		virtio_gpu_create_context(dev, file);
475 
476 		if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
477 			ret = -EINVAL;
478 			goto err_put_free;
479 		}
480 
481 		ret = virtio_gpu_array_lock_resv(objs);
482 		if (ret != 0)
483 			goto err_put_free;
484 
485 		ret = -ENOMEM;
486 		fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
487 					       0);
488 		if (!fence)
489 			goto err_unlock;
490 
491 		virtio_gpu_cmd_transfer_to_host_3d
492 			(vgdev,
493 			 vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
494 			 args->stride, args->layer_stride, &args->box, objs,
495 			 fence);
496 		dma_fence_put(&fence->f);
497 	}
498 	virtio_gpu_notify(vgdev);
499 	return 0;
500 
501 err_unlock:
502 	virtio_gpu_array_unlock_resv(objs);
503 err_put_free:
504 	virtio_gpu_array_put_free(objs);
505 	return ret;
506 }
507 
508 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
509 				 struct drm_file *file)
510 {
511 	struct drm_virtgpu_3d_wait *args = data;
512 	struct drm_gem_object *obj;
513 	long timeout = 15 * HZ;
514 	int ret;
515 
516 	obj = drm_gem_object_lookup(file, args->handle);
517 	if (obj == NULL)
518 		return -ENOENT;
519 
520 	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
521 		ret = dma_resv_test_signaled(obj->resv, true);
522 	} else {
523 		ret = dma_resv_wait_timeout(obj->resv, true, true, timeout);
524 	}
525 	if (ret == 0)
526 		ret = -EBUSY;
527 	else if (ret > 0)
528 		ret = 0;
529 
530 	drm_gem_object_put(obj);
531 	return ret;
532 }
533 
534 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
535 				void *data, struct drm_file *file)
536 {
537 	struct virtio_gpu_device *vgdev = dev->dev_private;
538 	struct drm_virtgpu_get_caps *args = data;
539 	unsigned size, host_caps_size;
540 	int i;
541 	int found_valid = -1;
542 	int ret;
543 	struct virtio_gpu_drv_cap_cache *cache_ent;
544 	void *ptr;
545 
546 	if (vgdev->num_capsets == 0)
547 		return -ENOSYS;
548 
549 	/* don't allow userspace to pass 0 */
550 	if (args->size == 0)
551 		return -EINVAL;
552 
553 	spin_lock(&vgdev->display_info_lock);
554 	for (i = 0; i < vgdev->num_capsets; i++) {
555 		if (vgdev->capsets[i].id == args->cap_set_id) {
556 			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
557 				found_valid = i;
558 				break;
559 			}
560 		}
561 	}
562 
563 	if (found_valid == -1) {
564 		spin_unlock(&vgdev->display_info_lock);
565 		return -EINVAL;
566 	}
567 
568 	host_caps_size = vgdev->capsets[found_valid].max_size;
569 	/* only copy to user the minimum of the host caps size or the guest caps size */
570 	size = min(args->size, host_caps_size);
571 
572 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
573 		if (cache_ent->id == args->cap_set_id &&
574 		    cache_ent->version == args->cap_set_ver) {
575 			spin_unlock(&vgdev->display_info_lock);
576 			goto copy_exit;
577 		}
578 	}
579 	spin_unlock(&vgdev->display_info_lock);
580 
581 	/* not in cache - need to talk to hw */
582 	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
583 				  &cache_ent);
584 	virtio_gpu_notify(vgdev);
585 
586 copy_exit:
587 	ret = wait_event_timeout(vgdev->resp_wq,
588 				 atomic_read(&cache_ent->is_valid), 5 * HZ);
589 	if (!ret)
590 		return -EBUSY;
591 
592 	/* is_valid check must proceed before copy of the cache entry. */
593 	smp_rmb();
594 
595 	ptr = cache_ent->caps_cache;
596 
597 	if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
598 		return -EFAULT;
599 
600 	return 0;
601 }
602 
603 static int verify_blob(struct virtio_gpu_device *vgdev,
604 		       struct virtio_gpu_fpriv *vfpriv,
605 		       struct virtio_gpu_object_params *params,
606 		       struct drm_virtgpu_resource_create_blob *rc_blob,
607 		       bool *guest_blob, bool *host3d_blob)
608 {
609 	if (!vgdev->has_resource_blob)
610 		return -EINVAL;
611 
612 	if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) ||
613 	    !rc_blob->blob_flags)
614 		return -EINVAL;
615 
616 	if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
617 		if (!vgdev->has_resource_assign_uuid)
618 			return -EINVAL;
619 	}
620 
621 	switch (rc_blob->blob_mem) {
622 	case VIRTGPU_BLOB_MEM_GUEST:
623 		*guest_blob = true;
624 		break;
625 	case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
626 		*guest_blob = true;
627 		fallthrough;
628 	case VIRTGPU_BLOB_MEM_HOST3D:
629 		*host3d_blob = true;
630 		break;
631 	default:
632 		return -EINVAL;
633 	}
634 
635 	if (*host3d_blob) {
636 		if (!vgdev->has_virgl_3d)
637 			return -EINVAL;
638 
639 		/* Must be dword aligned. */
640 		if (rc_blob->cmd_size % 4 != 0)
641 			return -EINVAL;
642 
643 		params->ctx_id = vfpriv->ctx_id;
644 		params->blob_id = rc_blob->blob_id;
645 	} else {
646 		if (rc_blob->blob_id != 0)
647 			return -EINVAL;
648 
649 		if (rc_blob->cmd_size != 0)
650 			return -EINVAL;
651 	}
652 
653 	params->blob_mem = rc_blob->blob_mem;
654 	params->size = rc_blob->size;
655 	params->blob = true;
656 	params->blob_flags = rc_blob->blob_flags;
657 	return 0;
658 }
659 
660 static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
661 						 void *data,
662 						 struct drm_file *file)
663 {
664 	int ret = 0;
665 	uint32_t handle = 0;
666 	bool guest_blob = false;
667 	bool host3d_blob = false;
668 	struct drm_gem_object *obj;
669 	struct virtio_gpu_object *bo;
670 	struct virtio_gpu_object_params params = { 0 };
671 	struct virtio_gpu_device *vgdev = dev->dev_private;
672 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
673 	struct drm_virtgpu_resource_create_blob *rc_blob = data;
674 
675 	if (verify_blob(vgdev, vfpriv, &params, rc_blob,
676 			&guest_blob, &host3d_blob))
677 		return -EINVAL;
678 
679 	if (vgdev->has_virgl_3d)
680 		virtio_gpu_create_context(dev, file);
681 
682 	if (rc_blob->cmd_size) {
683 		void *buf;
684 
685 		buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
686 				  rc_blob->cmd_size);
687 
688 		if (IS_ERR(buf))
689 			return PTR_ERR(buf);
690 
691 		virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
692 				      vfpriv->ctx_id, NULL, NULL);
693 	}
694 
695 	if (guest_blob)
696 		ret = virtio_gpu_object_create(vgdev, &params, &bo, NULL);
697 	else if (!guest_blob && host3d_blob)
698 		ret = virtio_gpu_vram_create(vgdev, &params, &bo);
699 	else
700 		return -EINVAL;
701 
702 	if (ret < 0)
703 		return ret;
704 
705 	bo->guest_blob = guest_blob;
706 	bo->host3d_blob = host3d_blob;
707 	bo->blob_mem = rc_blob->blob_mem;
708 	bo->blob_flags = rc_blob->blob_flags;
709 
710 	obj = &bo->base.base;
711 	if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
712 		ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
713 		if (ret) {
714 			drm_gem_object_release(obj);
715 			return ret;
716 		}
717 	}
718 
719 	ret = drm_gem_handle_create(file, obj, &handle);
720 	if (ret) {
721 		drm_gem_object_release(obj);
722 		return ret;
723 	}
724 	drm_gem_object_put(obj);
725 
726 	rc_blob->res_handle = bo->hw_res_handle;
727 	rc_blob->bo_handle = handle;
728 
729 	return 0;
730 }
731 
732 static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
733 					 void *data, struct drm_file *file)
734 {
735 	int ret = 0;
736 	uint32_t num_params, i, param, value;
737 	uint64_t valid_ring_mask;
738 	size_t len;
739 	struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
740 	struct virtio_gpu_device *vgdev = dev->dev_private;
741 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
742 	struct drm_virtgpu_context_init *args = data;
743 
744 	num_params = args->num_params;
745 	len = num_params * sizeof(struct drm_virtgpu_context_set_param);
746 
747 	if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
748 		return -EINVAL;
749 
750 	/* Number of unique parameters supported at this time. */
751 	if (num_params > 3)
752 		return -EINVAL;
753 
754 	ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
755 				     len);
756 
757 	if (IS_ERR(ctx_set_params))
758 		return PTR_ERR(ctx_set_params);
759 
760 	mutex_lock(&vfpriv->context_lock);
761 	if (vfpriv->context_created) {
762 		ret = -EEXIST;
763 		goto out_unlock;
764 	}
765 
766 	for (i = 0; i < num_params; i++) {
767 		param = ctx_set_params[i].param;
768 		value = ctx_set_params[i].value;
769 
770 		switch (param) {
771 		case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
772 			if (value > MAX_CAPSET_ID) {
773 				ret = -EINVAL;
774 				goto out_unlock;
775 			}
776 
777 			if ((vgdev->capset_id_mask & (1ULL << value)) == 0) {
778 				ret = -EINVAL;
779 				goto out_unlock;
780 			}
781 
782 			/* Context capset ID already set */
783 			if (vfpriv->context_init &
784 			    VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
785 				ret = -EINVAL;
786 				goto out_unlock;
787 			}
788 
789 			vfpriv->context_init |= value;
790 			break;
791 		case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
792 			if (vfpriv->base_fence_ctx) {
793 				ret = -EINVAL;
794 				goto out_unlock;
795 			}
796 
797 			if (value > MAX_RINGS) {
798 				ret = -EINVAL;
799 				goto out_unlock;
800 			}
801 
802 			vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
803 			vfpriv->num_rings = value;
804 			break;
805 		case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
806 			if (vfpriv->ring_idx_mask) {
807 				ret = -EINVAL;
808 				goto out_unlock;
809 			}
810 
811 			vfpriv->ring_idx_mask = value;
812 			break;
813 		default:
814 			ret = -EINVAL;
815 			goto out_unlock;
816 		}
817 	}
818 
819 	if (vfpriv->ring_idx_mask) {
820 		valid_ring_mask = 0;
821 		for (i = 0; i < vfpriv->num_rings; i++)
822 			valid_ring_mask |= 1ULL << i;
823 
824 		if (~valid_ring_mask & vfpriv->ring_idx_mask) {
825 			ret = -EINVAL;
826 			goto out_unlock;
827 		}
828 	}
829 
830 	virtio_gpu_create_context_locked(vgdev, vfpriv);
831 	virtio_gpu_notify(vgdev);
832 
833 out_unlock:
834 	mutex_unlock(&vfpriv->context_lock);
835 	kfree(ctx_set_params);
836 	return ret;
837 }
838 
839 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
840 	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
841 			  DRM_RENDER_ALLOW),
842 
843 	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
844 			  DRM_RENDER_ALLOW),
845 
846 	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
847 			  DRM_RENDER_ALLOW),
848 
849 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
850 			  virtio_gpu_resource_create_ioctl,
851 			  DRM_RENDER_ALLOW),
852 
853 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
854 			  DRM_RENDER_ALLOW),
855 
856 	/* make transfer async to the main ring? - no sure, can we
857 	 * thread these in the underlying GL
858 	 */
859 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
860 			  virtio_gpu_transfer_from_host_ioctl,
861 			  DRM_RENDER_ALLOW),
862 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
863 			  virtio_gpu_transfer_to_host_ioctl,
864 			  DRM_RENDER_ALLOW),
865 
866 	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
867 			  DRM_RENDER_ALLOW),
868 
869 	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
870 			  DRM_RENDER_ALLOW),
871 
872 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
873 			  virtio_gpu_resource_create_blob_ioctl,
874 			  DRM_RENDER_ALLOW),
875 
876 	DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
877 			  DRM_RENDER_ALLOW),
878 };
879