xref: /linux/drivers/gpu/drm/virtio/virtgpu_ioctl.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <linux/file.h>
29 #include <linux/sync_file.h>
30 #include <linux/uaccess.h>
31 
32 #include <drm/drm_file.h>
33 #include <drm/virtgpu_drm.h>
34 
35 #include "virtgpu_drv.h"
36 
37 #define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
38 				    VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
39 				    VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
40 
41 /* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
virtio_gpu_create_context_locked(struct virtio_gpu_device * vgdev,struct virtio_gpu_fpriv * vfpriv)42 static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
43 					     struct virtio_gpu_fpriv *vfpriv)
44 {
45 	if (vfpriv->explicit_debug_name) {
46 		virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
47 					      vfpriv->context_init,
48 					      strlen(vfpriv->debug_name),
49 					      vfpriv->debug_name);
50 	} else {
51 		char dbgname[TASK_COMM_LEN];
52 
53 		get_task_comm(dbgname, current);
54 		virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
55 					      vfpriv->context_init, strlen(dbgname),
56 					      dbgname);
57 	}
58 
59 	vfpriv->context_created = true;
60 }
61 
virtio_gpu_create_context(struct drm_device * dev,struct drm_file * file)62 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
63 {
64 	struct virtio_gpu_device *vgdev = dev->dev_private;
65 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
66 
67 	mutex_lock(&vfpriv->context_lock);
68 	if (vfpriv->context_created)
69 		goto out_unlock;
70 
71 	virtio_gpu_create_context_locked(vgdev, vfpriv);
72 
73 out_unlock:
74 	mutex_unlock(&vfpriv->context_lock);
75 }
76 
virtio_gpu_map_ioctl(struct drm_device * dev,void * data,struct drm_file * file)77 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
78 				struct drm_file *file)
79 {
80 	struct virtio_gpu_device *vgdev = dev->dev_private;
81 	struct drm_virtgpu_map *virtio_gpu_map = data;
82 
83 	return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
84 					 virtio_gpu_map->handle,
85 					 &virtio_gpu_map->offset);
86 }
87 
virtio_gpu_getparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file)88 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
89 				     struct drm_file *file)
90 {
91 	struct virtio_gpu_device *vgdev = dev->dev_private;
92 	struct drm_virtgpu_getparam *param = data;
93 	int value;
94 
95 	switch (param->param) {
96 	case VIRTGPU_PARAM_3D_FEATURES:
97 		value = vgdev->has_virgl_3d ? 1 : 0;
98 		break;
99 	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
100 		value = 1;
101 		break;
102 	case VIRTGPU_PARAM_RESOURCE_BLOB:
103 		value = vgdev->has_resource_blob ? 1 : 0;
104 		break;
105 	case VIRTGPU_PARAM_HOST_VISIBLE:
106 		value = vgdev->has_host_visible ? 1 : 0;
107 		break;
108 	case VIRTGPU_PARAM_CROSS_DEVICE:
109 		value = vgdev->has_resource_assign_uuid ? 1 : 0;
110 		break;
111 	case VIRTGPU_PARAM_CONTEXT_INIT:
112 		value = vgdev->has_context_init ? 1 : 0;
113 		break;
114 	case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
115 		value = vgdev->capset_id_mask;
116 		break;
117 	case VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME:
118 		value = vgdev->has_context_init ? 1 : 0;
119 		break;
120 	default:
121 		return -EINVAL;
122 	}
123 	if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
124 		return -EFAULT;
125 
126 	return 0;
127 }
128 
virtio_gpu_resource_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)129 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
130 					    struct drm_file *file)
131 {
132 	struct virtio_gpu_device *vgdev = dev->dev_private;
133 	struct drm_virtgpu_resource_create *rc = data;
134 	struct virtio_gpu_fence *fence;
135 	int ret;
136 	struct virtio_gpu_object *qobj;
137 	struct drm_gem_object *obj;
138 	uint32_t handle = 0;
139 	struct virtio_gpu_object_params params = { 0 };
140 
141 	if (vgdev->has_virgl_3d) {
142 		virtio_gpu_create_context(dev, file);
143 		params.virgl = true;
144 		params.target = rc->target;
145 		params.bind = rc->bind;
146 		params.depth = rc->depth;
147 		params.array_size = rc->array_size;
148 		params.last_level = rc->last_level;
149 		params.nr_samples = rc->nr_samples;
150 		params.flags = rc->flags;
151 	} else {
152 		if (rc->depth > 1)
153 			return -EINVAL;
154 		if (rc->nr_samples > 1)
155 			return -EINVAL;
156 		if (rc->last_level > 1)
157 			return -EINVAL;
158 		if (rc->target != 2)
159 			return -EINVAL;
160 		if (rc->array_size > 1)
161 			return -EINVAL;
162 	}
163 
164 	params.format = rc->format;
165 	params.width = rc->width;
166 	params.height = rc->height;
167 	params.size = rc->size;
168 	/* allocate a single page size object */
169 	if (params.size == 0)
170 		params.size = PAGE_SIZE;
171 
172 	fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
173 	if (!fence)
174 		return -ENOMEM;
175 	ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
176 	dma_fence_put(&fence->f);
177 	if (ret < 0)
178 		return ret;
179 	obj = &qobj->base.base;
180 
181 	ret = drm_gem_handle_create(file, obj, &handle);
182 	if (ret) {
183 		drm_gem_object_release(obj);
184 		return ret;
185 	}
186 
187 	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
188 	rc->bo_handle = handle;
189 
190 	/*
191 	 * The handle owns the reference now.  But we must drop our
192 	 * remaining reference *after* we no longer need to dereference
193 	 * the obj.  Otherwise userspace could guess the handle and
194 	 * race closing it from another thread.
195 	 */
196 	drm_gem_object_put(obj);
197 
198 	return 0;
199 }
200 
virtio_gpu_resource_info_ioctl(struct drm_device * dev,void * data,struct drm_file * file)201 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
202 					  struct drm_file *file)
203 {
204 	struct drm_virtgpu_resource_info *ri = data;
205 	struct drm_gem_object *gobj = NULL;
206 	struct virtio_gpu_object *qobj = NULL;
207 
208 	gobj = drm_gem_object_lookup(file, ri->bo_handle);
209 	if (gobj == NULL)
210 		return -ENOENT;
211 
212 	qobj = gem_to_virtio_gpu_obj(gobj);
213 
214 	ri->size = qobj->base.base.size;
215 	ri->res_handle = qobj->hw_res_handle;
216 	if (qobj->host3d_blob || qobj->guest_blob)
217 		ri->blob_mem = qobj->blob_mem;
218 
219 	drm_gem_object_put(gobj);
220 	return 0;
221 }
222 
virtio_gpu_transfer_from_host_ioctl(struct drm_device * dev,void * data,struct drm_file * file)223 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
224 					       void *data,
225 					       struct drm_file *file)
226 {
227 	struct virtio_gpu_device *vgdev = dev->dev_private;
228 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
229 	struct drm_virtgpu_3d_transfer_from_host *args = data;
230 	struct virtio_gpu_object *bo;
231 	struct virtio_gpu_object_array *objs;
232 	struct virtio_gpu_fence *fence;
233 	int ret;
234 	u32 offset = args->offset;
235 
236 	if (vgdev->has_virgl_3d == false)
237 		return -ENOSYS;
238 
239 	virtio_gpu_create_context(dev, file);
240 	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
241 	if (objs == NULL)
242 		return -ENOENT;
243 
244 	bo = gem_to_virtio_gpu_obj(objs->objs[0]);
245 	if (bo->guest_blob && !bo->host3d_blob) {
246 		ret = -EINVAL;
247 		goto err_put_free;
248 	}
249 
250 	if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
251 		ret = -EINVAL;
252 		goto err_put_free;
253 	}
254 
255 	ret = virtio_gpu_array_lock_resv(objs);
256 	if (ret != 0)
257 		goto err_put_free;
258 
259 	fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
260 	if (!fence) {
261 		ret = -ENOMEM;
262 		goto err_unlock;
263 	}
264 
265 	virtio_gpu_cmd_transfer_from_host_3d
266 		(vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
267 		 args->layer_stride, &args->box, objs, fence);
268 	dma_fence_put(&fence->f);
269 	virtio_gpu_notify(vgdev);
270 	return 0;
271 
272 err_unlock:
273 	virtio_gpu_array_unlock_resv(objs);
274 err_put_free:
275 	virtio_gpu_array_put_free(objs);
276 	return ret;
277 }
278 
virtio_gpu_transfer_to_host_ioctl(struct drm_device * dev,void * data,struct drm_file * file)279 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
280 					     struct drm_file *file)
281 {
282 	struct virtio_gpu_device *vgdev = dev->dev_private;
283 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
284 	struct drm_virtgpu_3d_transfer_to_host *args = data;
285 	struct virtio_gpu_object *bo;
286 	struct virtio_gpu_object_array *objs;
287 	struct virtio_gpu_fence *fence;
288 	int ret;
289 	u32 offset = args->offset;
290 
291 	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
292 	if (objs == NULL)
293 		return -ENOENT;
294 
295 	bo = gem_to_virtio_gpu_obj(objs->objs[0]);
296 	if (bo->guest_blob && !bo->host3d_blob) {
297 		ret = -EINVAL;
298 		goto err_put_free;
299 	}
300 
301 	if (!vgdev->has_virgl_3d) {
302 		virtio_gpu_cmd_transfer_to_host_2d
303 			(vgdev, offset,
304 			 args->box.w, args->box.h, args->box.x, args->box.y,
305 			 objs, NULL);
306 	} else {
307 		virtio_gpu_create_context(dev, file);
308 
309 		if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
310 			ret = -EINVAL;
311 			goto err_put_free;
312 		}
313 
314 		ret = virtio_gpu_array_lock_resv(objs);
315 		if (ret != 0)
316 			goto err_put_free;
317 
318 		ret = -ENOMEM;
319 		fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
320 					       0);
321 		if (!fence)
322 			goto err_unlock;
323 
324 		virtio_gpu_cmd_transfer_to_host_3d
325 			(vgdev,
326 			 vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
327 			 args->stride, args->layer_stride, &args->box, objs,
328 			 fence);
329 		dma_fence_put(&fence->f);
330 	}
331 	virtio_gpu_notify(vgdev);
332 	return 0;
333 
334 err_unlock:
335 	virtio_gpu_array_unlock_resv(objs);
336 err_put_free:
337 	virtio_gpu_array_put_free(objs);
338 	return ret;
339 }
340 
virtio_gpu_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * file)341 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
342 				 struct drm_file *file)
343 {
344 	struct drm_virtgpu_3d_wait *args = data;
345 	struct drm_gem_object *obj;
346 	long timeout = 15 * HZ;
347 	int ret;
348 
349 	obj = drm_gem_object_lookup(file, args->handle);
350 	if (obj == NULL)
351 		return -ENOENT;
352 
353 	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
354 		ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
355 	} else {
356 		ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ,
357 					    true, timeout);
358 	}
359 	if (ret == 0)
360 		ret = -EBUSY;
361 	else if (ret > 0)
362 		ret = 0;
363 
364 	drm_gem_object_put(obj);
365 	return ret;
366 }
367 
virtio_gpu_get_caps_ioctl(struct drm_device * dev,void * data,struct drm_file * file)368 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
369 				void *data, struct drm_file *file)
370 {
371 	struct virtio_gpu_device *vgdev = dev->dev_private;
372 	struct drm_virtgpu_get_caps *args = data;
373 	unsigned size, host_caps_size;
374 	int i;
375 	int found_valid = -1;
376 	int ret;
377 	struct virtio_gpu_drv_cap_cache *cache_ent;
378 	void *ptr;
379 
380 	if (vgdev->num_capsets == 0)
381 		return -ENOSYS;
382 
383 	/* don't allow userspace to pass 0 */
384 	if (args->size == 0)
385 		return -EINVAL;
386 
387 	spin_lock(&vgdev->display_info_lock);
388 	for (i = 0; i < vgdev->num_capsets; i++) {
389 		if (vgdev->capsets[i].id == args->cap_set_id) {
390 			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
391 				found_valid = i;
392 				break;
393 			}
394 		}
395 	}
396 
397 	if (found_valid == -1) {
398 		spin_unlock(&vgdev->display_info_lock);
399 		return -EINVAL;
400 	}
401 
402 	host_caps_size = vgdev->capsets[found_valid].max_size;
403 	/* only copy to user the minimum of the host caps size or the guest caps size */
404 	size = min(args->size, host_caps_size);
405 
406 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
407 		if (cache_ent->id == args->cap_set_id &&
408 		    cache_ent->version == args->cap_set_ver) {
409 			spin_unlock(&vgdev->display_info_lock);
410 			goto copy_exit;
411 		}
412 	}
413 	spin_unlock(&vgdev->display_info_lock);
414 
415 	/* not in cache - need to talk to hw */
416 	ret = virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
417 					&cache_ent);
418 	if (ret)
419 		return ret;
420 	virtio_gpu_notify(vgdev);
421 
422 copy_exit:
423 	ret = wait_event_timeout(vgdev->resp_wq,
424 				 atomic_read(&cache_ent->is_valid), 5 * HZ);
425 	if (!ret)
426 		return -EBUSY;
427 
428 	/* is_valid check must proceed before copy of the cache entry. */
429 	smp_rmb();
430 
431 	ptr = cache_ent->caps_cache;
432 
433 	if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
434 		return -EFAULT;
435 
436 	return 0;
437 }
438 
verify_blob(struct virtio_gpu_device * vgdev,struct virtio_gpu_fpriv * vfpriv,struct virtio_gpu_object_params * params,struct drm_virtgpu_resource_create_blob * rc_blob,bool * guest_blob,bool * host3d_blob)439 static int verify_blob(struct virtio_gpu_device *vgdev,
440 		       struct virtio_gpu_fpriv *vfpriv,
441 		       struct virtio_gpu_object_params *params,
442 		       struct drm_virtgpu_resource_create_blob *rc_blob,
443 		       bool *guest_blob, bool *host3d_blob)
444 {
445 	if (!vgdev->has_resource_blob)
446 		return -EINVAL;
447 
448 	if (rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK)
449 		return -EINVAL;
450 
451 	if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
452 		if (!vgdev->has_resource_assign_uuid)
453 			return -EINVAL;
454 	}
455 
456 	switch (rc_blob->blob_mem) {
457 	case VIRTGPU_BLOB_MEM_GUEST:
458 		*guest_blob = true;
459 		break;
460 	case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
461 		*guest_blob = true;
462 		fallthrough;
463 	case VIRTGPU_BLOB_MEM_HOST3D:
464 		*host3d_blob = true;
465 		break;
466 	default:
467 		return -EINVAL;
468 	}
469 
470 	if (*host3d_blob) {
471 		if (!vgdev->has_virgl_3d)
472 			return -EINVAL;
473 
474 		/* Must be dword aligned. */
475 		if (rc_blob->cmd_size % 4 != 0)
476 			return -EINVAL;
477 
478 		params->ctx_id = vfpriv->ctx_id;
479 		params->blob_id = rc_blob->blob_id;
480 	} else {
481 		if (rc_blob->blob_id != 0)
482 			return -EINVAL;
483 
484 		if (rc_blob->cmd_size != 0)
485 			return -EINVAL;
486 	}
487 
488 	params->blob_mem = rc_blob->blob_mem;
489 	params->size = rc_blob->size;
490 	params->blob = true;
491 	params->blob_flags = rc_blob->blob_flags;
492 	return 0;
493 }
494 
virtio_gpu_resource_create_blob_ioctl(struct drm_device * dev,void * data,struct drm_file * file)495 static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
496 						 void *data,
497 						 struct drm_file *file)
498 {
499 	int ret = 0;
500 	uint32_t handle = 0;
501 	bool guest_blob = false;
502 	bool host3d_blob = false;
503 	struct drm_gem_object *obj;
504 	struct virtio_gpu_object *bo;
505 	struct virtio_gpu_object_params params = { 0 };
506 	struct virtio_gpu_device *vgdev = dev->dev_private;
507 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
508 	struct drm_virtgpu_resource_create_blob *rc_blob = data;
509 
510 	if (verify_blob(vgdev, vfpriv, &params, rc_blob,
511 			&guest_blob, &host3d_blob))
512 		return -EINVAL;
513 
514 	if (vgdev->has_virgl_3d)
515 		virtio_gpu_create_context(dev, file);
516 
517 	if (rc_blob->cmd_size) {
518 		void *buf;
519 
520 		buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
521 				  rc_blob->cmd_size);
522 
523 		if (IS_ERR(buf))
524 			return PTR_ERR(buf);
525 
526 		virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
527 				      vfpriv->ctx_id, NULL, NULL);
528 	}
529 
530 	if (guest_blob)
531 		ret = virtio_gpu_object_create(vgdev, &params, &bo, NULL);
532 	else if (!guest_blob && host3d_blob)
533 		ret = virtio_gpu_vram_create(vgdev, &params, &bo);
534 	else
535 		return -EINVAL;
536 
537 	if (ret < 0)
538 		return ret;
539 
540 	bo->guest_blob = guest_blob;
541 	bo->host3d_blob = host3d_blob;
542 	bo->blob_mem = rc_blob->blob_mem;
543 	bo->blob_flags = rc_blob->blob_flags;
544 
545 	obj = &bo->base.base;
546 	if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
547 		ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
548 		if (ret) {
549 			drm_gem_object_release(obj);
550 			return ret;
551 		}
552 	}
553 
554 	ret = drm_gem_handle_create(file, obj, &handle);
555 	if (ret) {
556 		drm_gem_object_release(obj);
557 		return ret;
558 	}
559 
560 	rc_blob->res_handle = bo->hw_res_handle;
561 	rc_blob->bo_handle = handle;
562 
563 	/*
564 	 * The handle owns the reference now.  But we must drop our
565 	 * remaining reference *after* we no longer need to dereference
566 	 * the obj.  Otherwise userspace could guess the handle and
567 	 * race closing it from another thread.
568 	 */
569 	drm_gem_object_put(obj);
570 
571 	return 0;
572 }
573 
virtio_gpu_context_init_ioctl(struct drm_device * dev,void * data,struct drm_file * file)574 static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
575 					 void *data, struct drm_file *file)
576 {
577 	int ret = 0;
578 	uint32_t num_params, i;
579 	uint64_t valid_ring_mask, param, value;
580 	size_t len;
581 	struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
582 	struct virtio_gpu_device *vgdev = dev->dev_private;
583 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
584 	struct drm_virtgpu_context_init *args = data;
585 
586 	num_params = args->num_params;
587 	len = num_params * sizeof(struct drm_virtgpu_context_set_param);
588 
589 	if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
590 		return -EINVAL;
591 
592 	/* Number of unique parameters supported at this time. */
593 	if (num_params > 4)
594 		return -EINVAL;
595 
596 	ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
597 				     len);
598 
599 	if (IS_ERR(ctx_set_params))
600 		return PTR_ERR(ctx_set_params);
601 
602 	mutex_lock(&vfpriv->context_lock);
603 	if (vfpriv->context_created) {
604 		ret = -EEXIST;
605 		goto out_unlock;
606 	}
607 
608 	for (i = 0; i < num_params; i++) {
609 		param = ctx_set_params[i].param;
610 		value = ctx_set_params[i].value;
611 
612 		switch (param) {
613 		case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
614 			if (value > MAX_CAPSET_ID) {
615 				ret = -EINVAL;
616 				goto out_unlock;
617 			}
618 
619 			if ((vgdev->capset_id_mask & (1ULL << value)) == 0) {
620 				ret = -EINVAL;
621 				goto out_unlock;
622 			}
623 
624 			/* Context capset ID already set */
625 			if (vfpriv->context_init &
626 			    VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
627 				ret = -EINVAL;
628 				goto out_unlock;
629 			}
630 
631 			vfpriv->context_init |= value;
632 			break;
633 		case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
634 			if (vfpriv->base_fence_ctx) {
635 				ret = -EINVAL;
636 				goto out_unlock;
637 			}
638 
639 			if (value > MAX_RINGS) {
640 				ret = -EINVAL;
641 				goto out_unlock;
642 			}
643 
644 			vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
645 			vfpriv->num_rings = value;
646 			break;
647 		case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
648 			if (vfpriv->ring_idx_mask) {
649 				ret = -EINVAL;
650 				goto out_unlock;
651 			}
652 
653 			vfpriv->ring_idx_mask = value;
654 			break;
655 		case VIRTGPU_CONTEXT_PARAM_DEBUG_NAME:
656 			if (vfpriv->explicit_debug_name) {
657 				ret = -EINVAL;
658 				goto out_unlock;
659 			}
660 
661 			ret = strncpy_from_user(vfpriv->debug_name,
662 						u64_to_user_ptr(value),
663 						DEBUG_NAME_MAX_LEN - 1);
664 			if (ret < 0)
665 				goto out_unlock;
666 
667 			vfpriv->explicit_debug_name = true;
668 			ret = 0;
669 			break;
670 		default:
671 			ret = -EINVAL;
672 			goto out_unlock;
673 		}
674 	}
675 
676 	if (vfpriv->ring_idx_mask) {
677 		valid_ring_mask = 0;
678 		for (i = 0; i < vfpriv->num_rings; i++)
679 			valid_ring_mask |= 1ULL << i;
680 
681 		if (~valid_ring_mask & vfpriv->ring_idx_mask) {
682 			ret = -EINVAL;
683 			goto out_unlock;
684 		}
685 	}
686 
687 	virtio_gpu_create_context_locked(vgdev, vfpriv);
688 	virtio_gpu_notify(vgdev);
689 
690 out_unlock:
691 	mutex_unlock(&vfpriv->context_lock);
692 	kfree(ctx_set_params);
693 	return ret;
694 }
695 
696 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
697 	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
698 			  DRM_RENDER_ALLOW),
699 
700 	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
701 			  DRM_RENDER_ALLOW),
702 
703 	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
704 			  DRM_RENDER_ALLOW),
705 
706 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
707 			  virtio_gpu_resource_create_ioctl,
708 			  DRM_RENDER_ALLOW),
709 
710 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
711 			  DRM_RENDER_ALLOW),
712 
713 	/* make transfer async to the main ring? - no sure, can we
714 	 * thread these in the underlying GL
715 	 */
716 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
717 			  virtio_gpu_transfer_from_host_ioctl,
718 			  DRM_RENDER_ALLOW),
719 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
720 			  virtio_gpu_transfer_to_host_ioctl,
721 			  DRM_RENDER_ALLOW),
722 
723 	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
724 			  DRM_RENDER_ALLOW),
725 
726 	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
727 			  DRM_RENDER_ALLOW),
728 
729 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
730 			  virtio_gpu_resource_create_blob_ioctl,
731 			  DRM_RENDER_ALLOW),
732 
733 	DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
734 			  DRM_RENDER_ALLOW),
735 };
736