xref: /linux/drivers/gpu/drm/qxl/qxl_ioctl.c (revision b8d312aa075f33282565467662c4628dae0a2aff)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include <linux/pci.h>
27 #include <linux/uaccess.h>
28 
29 #include "qxl_drv.h"
30 #include "qxl_object.h"
31 
32 /*
33  * TODO: allocating a new gem(in qxl_bo) for each request.
34  * This is wasteful since bo's are page aligned.
35  */
36 static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
37 			   struct drm_file *file_priv)
38 {
39 	struct qxl_device *qdev = dev->dev_private;
40 	struct drm_qxl_alloc *qxl_alloc = data;
41 	int ret;
42 	struct qxl_bo *qobj;
43 	uint32_t handle;
44 	u32 domain = QXL_GEM_DOMAIN_VRAM;
45 
46 	if (qxl_alloc->size == 0) {
47 		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
48 		return -EINVAL;
49 	}
50 	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
51 						domain,
52 						qxl_alloc->size,
53 						NULL,
54 						&qobj, &handle);
55 	if (ret) {
56 		DRM_ERROR("%s: failed to create gem ret=%d\n",
57 			  __func__, ret);
58 		return -ENOMEM;
59 	}
60 	qxl_alloc->handle = handle;
61 	return 0;
62 }
63 
64 static int qxl_map_ioctl(struct drm_device *dev, void *data,
65 			 struct drm_file *file_priv)
66 {
67 	struct qxl_device *qdev = dev->dev_private;
68 	struct drm_qxl_map *qxl_map = data;
69 
70 	return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
71 				  &qxl_map->offset);
72 }
73 
74 struct qxl_reloc_info {
75 	int type;
76 	struct qxl_bo *dst_bo;
77 	uint32_t dst_offset;
78 	struct qxl_bo *src_bo;
79 	int src_offset;
80 };
81 
82 /*
83  * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
84  * are on vram).
85  * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
86  */
87 static void
88 apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
89 {
90 	void *reloc_page;
91 
92 	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
93 	*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
94 											      info->src_bo,
95 											      info->src_offset);
96 	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
97 }
98 
99 static void
100 apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
101 {
102 	uint32_t id = 0;
103 	void *reloc_page;
104 
105 	if (info->src_bo && !info->src_bo->is_primary)
106 		id = info->src_bo->surface_id;
107 
108 	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
109 	*(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
110 	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
111 }
112 
113 /* return holding the reference to this object */
114 static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
115 			      struct qxl_release *release, struct qxl_bo **qbo_p)
116 {
117 	struct drm_gem_object *gobj;
118 	struct qxl_bo *qobj;
119 	int ret;
120 
121 	gobj = drm_gem_object_lookup(file_priv, handle);
122 	if (!gobj)
123 		return -EINVAL;
124 
125 	qobj = gem_to_qxl_bo(gobj);
126 
127 	ret = qxl_release_list_add(release, qobj);
128 	drm_gem_object_put_unlocked(gobj);
129 	if (ret)
130 		return ret;
131 
132 	*qbo_p = qobj;
133 	return 0;
134 }
135 
136 /*
137  * Usage of execbuffer:
138  * Relocations need to take into account the full QXLDrawable size.
139  * However, the command as passed from user space must *not* contain the initial
140  * QXLReleaseInfo struct (first XXX bytes)
141  */
142 static int qxl_process_single_command(struct qxl_device *qdev,
143 				      struct drm_qxl_command *cmd,
144 				      struct drm_file *file_priv)
145 {
146 	struct qxl_reloc_info *reloc_info;
147 	int release_type;
148 	struct qxl_release *release;
149 	struct qxl_bo *cmd_bo;
150 	void *fb_cmd;
151 	int i, ret, num_relocs;
152 	int unwritten;
153 
154 	switch (cmd->type) {
155 	case QXL_CMD_DRAW:
156 		release_type = QXL_RELEASE_DRAWABLE;
157 		break;
158 	case QXL_CMD_SURFACE:
159 	case QXL_CMD_CURSOR:
160 	default:
161 		DRM_DEBUG("Only draw commands in execbuffers\n");
162 		return -EINVAL;
163 		break;
164 	}
165 
166 	if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
167 		return -EINVAL;
168 
169 	if (!access_ok(u64_to_user_ptr(cmd->command),
170 		       cmd->command_size))
171 		return -EFAULT;
172 
173 	reloc_info = kmalloc_array(cmd->relocs_num,
174 				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
175 	if (!reloc_info)
176 		return -ENOMEM;
177 
178 	ret = qxl_alloc_release_reserved(qdev,
179 					 sizeof(union qxl_release_info) +
180 					 cmd->command_size,
181 					 release_type,
182 					 &release,
183 					 &cmd_bo);
184 	if (ret)
185 		goto out_free_reloc;
186 
187 	/* TODO copy slow path code from i915 */
188 	fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
189 	unwritten = __copy_from_user_inatomic_nocache
190 		(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
191 		 u64_to_user_ptr(cmd->command), cmd->command_size);
192 
193 	{
194 		struct qxl_drawable *draw = fb_cmd;
195 
196 		draw->mm_time = qdev->rom->mm_clock;
197 	}
198 
199 	qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
200 	if (unwritten) {
201 		DRM_ERROR("got unwritten %d\n", unwritten);
202 		ret = -EFAULT;
203 		goto out_free_release;
204 	}
205 
206 	/* fill out reloc info structs */
207 	num_relocs = 0;
208 	for (i = 0; i < cmd->relocs_num; ++i) {
209 		struct drm_qxl_reloc reloc;
210 		struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
211 
212 		if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
213 			ret = -EFAULT;
214 			goto out_free_bos;
215 		}
216 
217 		/* add the bos to the list of bos to validate -
218 		   need to validate first then process relocs? */
219 		if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
220 			DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
221 
222 			ret = -EINVAL;
223 			goto out_free_bos;
224 		}
225 		reloc_info[i].type = reloc.reloc_type;
226 
227 		if (reloc.dst_handle) {
228 			ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
229 						 &reloc_info[i].dst_bo);
230 			if (ret)
231 				goto out_free_bos;
232 			reloc_info[i].dst_offset = reloc.dst_offset;
233 		} else {
234 			reloc_info[i].dst_bo = cmd_bo;
235 			reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
236 		}
237 		num_relocs++;
238 
239 		/* reserve and validate the reloc dst bo */
240 		if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
241 			ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
242 						 &reloc_info[i].src_bo);
243 			if (ret)
244 				goto out_free_bos;
245 			reloc_info[i].src_offset = reloc.src_offset;
246 		} else {
247 			reloc_info[i].src_bo = NULL;
248 			reloc_info[i].src_offset = 0;
249 		}
250 	}
251 
252 	/* validate all buffers */
253 	ret = qxl_release_reserve_list(release, false);
254 	if (ret)
255 		goto out_free_bos;
256 
257 	for (i = 0; i < cmd->relocs_num; ++i) {
258 		if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
259 			apply_reloc(qdev, &reloc_info[i]);
260 		else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
261 			apply_surf_reloc(qdev, &reloc_info[i]);
262 	}
263 
264 	ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
265 	if (ret)
266 		qxl_release_backoff_reserve_list(release);
267 	else
268 		qxl_release_fence_buffer_objects(release);
269 
270 out_free_bos:
271 out_free_release:
272 	if (ret)
273 		qxl_release_free(qdev, release);
274 out_free_reloc:
275 	kfree(reloc_info);
276 	return ret;
277 }
278 
279 static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
280 				struct drm_file *file_priv)
281 {
282 	struct qxl_device *qdev = dev->dev_private;
283 	struct drm_qxl_execbuffer *execbuffer = data;
284 	struct drm_qxl_command user_cmd;
285 	int cmd_num;
286 	int ret;
287 
288 	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
289 
290 		struct drm_qxl_command __user *commands =
291 			u64_to_user_ptr(execbuffer->commands);
292 
293 		if (copy_from_user(&user_cmd, commands + cmd_num,
294 				       sizeof(user_cmd)))
295 			return -EFAULT;
296 
297 		ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
298 		if (ret)
299 			return ret;
300 	}
301 	return 0;
302 }
303 
304 static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
305 				 struct drm_file *file)
306 {
307 	struct qxl_device *qdev = dev->dev_private;
308 	struct drm_qxl_update_area *update_area = data;
309 	struct qxl_rect area = {.left = update_area->left,
310 				.top = update_area->top,
311 				.right = update_area->right,
312 				.bottom = update_area->bottom};
313 	int ret;
314 	struct drm_gem_object *gobj = NULL;
315 	struct qxl_bo *qobj = NULL;
316 	struct ttm_operation_ctx ctx = { true, false };
317 
318 	if (update_area->left >= update_area->right ||
319 	    update_area->top >= update_area->bottom)
320 		return -EINVAL;
321 
322 	gobj = drm_gem_object_lookup(file, update_area->handle);
323 	if (gobj == NULL)
324 		return -ENOENT;
325 
326 	qobj = gem_to_qxl_bo(gobj);
327 
328 	ret = qxl_bo_reserve(qobj, false);
329 	if (ret)
330 		goto out;
331 
332 	if (!qobj->pin_count) {
333 		qxl_ttm_placement_from_domain(qobj, qobj->type, false);
334 		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
335 		if (unlikely(ret))
336 			goto out;
337 	}
338 
339 	ret = qxl_bo_check_id(qdev, qobj);
340 	if (ret)
341 		goto out2;
342 	if (!qobj->surface_id)
343 		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
344 	ret = qxl_io_update_area(qdev, qobj, &area);
345 
346 out2:
347 	qxl_bo_unreserve(qobj);
348 
349 out:
350 	drm_gem_object_put_unlocked(gobj);
351 	return ret;
352 }
353 
354 static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
355 		       struct drm_file *file_priv)
356 {
357 	struct qxl_device *qdev = dev->dev_private;
358 	struct drm_qxl_getparam *param = data;
359 
360 	switch (param->param) {
361 	case QXL_PARAM_NUM_SURFACES:
362 		param->value = qdev->rom->n_surfaces;
363 		break;
364 	case QXL_PARAM_MAX_RELOCS:
365 		param->value = QXL_MAX_RES;
366 		break;
367 	default:
368 		return -EINVAL;
369 	}
370 	return 0;
371 }
372 
373 static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
374 				  struct drm_file *file_priv)
375 {
376 	struct qxl_device *qdev = dev->dev_private;
377 	struct drm_qxl_clientcap *param = data;
378 	int byte, idx;
379 
380 	byte = param->index / 8;
381 	idx = param->index % 8;
382 
383 	if (dev->pdev->revision < 4)
384 		return -ENOSYS;
385 
386 	if (byte >= 58)
387 		return -ENOSYS;
388 
389 	if (qdev->rom->client_capabilities[byte] & (1 << idx))
390 		return 0;
391 	return -ENOSYS;
392 }
393 
394 static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
395 				struct drm_file *file)
396 {
397 	struct qxl_device *qdev = dev->dev_private;
398 	struct drm_qxl_alloc_surf *param = data;
399 	struct qxl_bo *qobj;
400 	int handle;
401 	int ret;
402 	int size, actual_stride;
403 	struct qxl_surface surf;
404 
405 	/* work out size allocate bo with handle */
406 	actual_stride = param->stride < 0 ? -param->stride : param->stride;
407 	size = actual_stride * param->height + actual_stride;
408 
409 	surf.format = param->format;
410 	surf.width = param->width;
411 	surf.height = param->height;
412 	surf.stride = param->stride;
413 	surf.data = 0;
414 
415 	ret = qxl_gem_object_create_with_handle(qdev, file,
416 						QXL_GEM_DOMAIN_SURFACE,
417 						size,
418 						&surf,
419 						&qobj, &handle);
420 	if (ret) {
421 		DRM_ERROR("%s: failed to create gem ret=%d\n",
422 			  __func__, ret);
423 		return -ENOMEM;
424 	} else
425 		param->handle = handle;
426 	return ret;
427 }
428 
429 const struct drm_ioctl_desc qxl_ioctls[] = {
430 	DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
431 
432 	DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
433 
434 	DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
435 							DRM_AUTH),
436 	DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
437 							DRM_AUTH),
438 	DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
439 							DRM_AUTH),
440 	DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
441 							DRM_AUTH),
442 
443 	DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
444 			  DRM_AUTH),
445 };
446 
447 int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
448