xref: /linux/drivers/gpu/drm/qxl/qxl_ioctl.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include <linux/pci.h>
27 #include <linux/uaccess.h>
28 
29 #include <drm/drm_print.h>
30 
31 #include "qxl_drv.h"
32 #include "qxl_object.h"
33 
34 /*
35  * TODO: allocating a new gem(in qxl_bo) for each request.
36  * This is wasteful since bo's are page aligned.
37  */
qxl_alloc_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)38 int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
39 {
40 	struct qxl_device *qdev = to_qxl(dev);
41 	struct drm_qxl_alloc *qxl_alloc = data;
42 	int ret;
43 	uint32_t handle;
44 	u32 domain = QXL_GEM_DOMAIN_VRAM;
45 
46 	if (qxl_alloc->size == 0) {
47 		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
48 		return -EINVAL;
49 	}
50 	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
51 						domain,
52 						qxl_alloc->size,
53 						NULL,
54 						NULL, &handle);
55 	if (ret) {
56 		DRM_ERROR("%s: failed to create gem ret=%d\n",
57 			  __func__, ret);
58 		return -ENOMEM;
59 	}
60 	qxl_alloc->handle = handle;
61 	return 0;
62 }
63 
qxl_map_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)64 int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
65 {
66 	struct qxl_device *qdev = to_qxl(dev);
67 	struct drm_qxl_map *qxl_map = data;
68 
69 	return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle,
70 					   &qxl_map->offset);
71 }
72 
73 struct qxl_reloc_info {
74 	int type;
75 	struct qxl_bo *dst_bo;
76 	uint32_t dst_offset;
77 	struct qxl_bo *src_bo;
78 	int src_offset;
79 };
80 
81 /*
82  * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
83  * are on vram).
84  * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
85  */
86 static void
apply_reloc(struct qxl_device * qdev,struct qxl_reloc_info * info)87 apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
88 {
89 	void *reloc_page;
90 
91 	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
92 	*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
93 											      info->src_bo,
94 											      info->src_offset);
95 	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
96 }
97 
98 static void
apply_surf_reloc(struct qxl_device * qdev,struct qxl_reloc_info * info)99 apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
100 {
101 	uint32_t id = 0;
102 	void *reloc_page;
103 
104 	if (info->src_bo && !info->src_bo->is_primary)
105 		id = info->src_bo->surface_id;
106 
107 	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
108 	*(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
109 	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
110 }
111 
112 /* return holding the reference to this object */
qxlhw_handle_to_bo(struct drm_file * file_priv,uint64_t handle,struct qxl_release * release,struct qxl_bo ** qbo_p)113 static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
114 			      struct qxl_release *release, struct qxl_bo **qbo_p)
115 {
116 	struct drm_gem_object *gobj;
117 	struct qxl_bo *qobj;
118 	int ret;
119 
120 	gobj = drm_gem_object_lookup(file_priv, handle);
121 	if (!gobj)
122 		return -EINVAL;
123 
124 	qobj = gem_to_qxl_bo(gobj);
125 
126 	ret = qxl_release_list_add(release, qobj);
127 	drm_gem_object_put(gobj);
128 	if (ret)
129 		return ret;
130 
131 	*qbo_p = qobj;
132 	return 0;
133 }
134 
135 /*
136  * Usage of execbuffer:
137  * Relocations need to take into account the full QXLDrawable size.
138  * However, the command as passed from user space must *not* contain the initial
139  * QXLReleaseInfo struct (first XXX bytes)
140  */
qxl_process_single_command(struct qxl_device * qdev,struct drm_qxl_command * cmd,struct drm_file * file_priv)141 static int qxl_process_single_command(struct qxl_device *qdev,
142 				      struct drm_qxl_command *cmd,
143 				      struct drm_file *file_priv)
144 {
145 	struct qxl_reloc_info *reloc_info;
146 	int release_type;
147 	struct qxl_release *release;
148 	struct qxl_bo *cmd_bo;
149 	void *fb_cmd;
150 	int i, ret;
151 	int unwritten;
152 
153 	switch (cmd->type) {
154 	case QXL_CMD_DRAW:
155 		release_type = QXL_RELEASE_DRAWABLE;
156 		break;
157 	case QXL_CMD_SURFACE:
158 	case QXL_CMD_CURSOR:
159 	default:
160 		DRM_DEBUG("Only draw commands in execbuffers\n");
161 		return -EINVAL;
162 	}
163 
164 	if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
165 		return -EINVAL;
166 
167 	if (!access_ok(u64_to_user_ptr(cmd->command),
168 		       cmd->command_size))
169 		return -EFAULT;
170 
171 	reloc_info = kmalloc_objs(struct qxl_reloc_info, cmd->relocs_num);
172 	if (!reloc_info)
173 		return -ENOMEM;
174 
175 	ret = qxl_alloc_release_reserved(qdev,
176 					 sizeof(union qxl_release_info) +
177 					 cmd->command_size,
178 					 release_type,
179 					 &release,
180 					 &cmd_bo);
181 	if (ret)
182 		goto out_free_reloc;
183 
184 	/* TODO copy slow path code from i915 */
185 	fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
186 	unwritten = __copy_from_user_inatomic_nocache
187 		(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
188 		 u64_to_user_ptr(cmd->command), cmd->command_size);
189 
190 	{
191 		struct qxl_drawable *draw = fb_cmd;
192 
193 		draw->mm_time = qdev->rom->mm_clock;
194 	}
195 
196 	qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
197 	if (unwritten) {
198 		DRM_ERROR("got unwritten %d\n", unwritten);
199 		ret = -EFAULT;
200 		goto out_free_release;
201 	}
202 
203 	/* fill out reloc info structs */
204 	for (i = 0; i < cmd->relocs_num; ++i) {
205 		struct drm_qxl_reloc reloc;
206 		struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
207 
208 		if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
209 			ret = -EFAULT;
210 			goto out_free_bos;
211 		}
212 
213 		/* add the bos to the list of bos to validate -
214 		   need to validate first then process relocs? */
215 		if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
216 			DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
217 
218 			ret = -EINVAL;
219 			goto out_free_bos;
220 		}
221 		reloc_info[i].type = reloc.reloc_type;
222 
223 		if (reloc.dst_handle) {
224 			ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
225 						 &reloc_info[i].dst_bo);
226 			if (ret)
227 				goto out_free_bos;
228 			reloc_info[i].dst_offset = reloc.dst_offset;
229 		} else {
230 			reloc_info[i].dst_bo = cmd_bo;
231 			reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
232 		}
233 
234 		/* reserve and validate the reloc dst bo */
235 		if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
236 			ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
237 						 &reloc_info[i].src_bo);
238 			if (ret)
239 				goto out_free_bos;
240 			reloc_info[i].src_offset = reloc.src_offset;
241 		} else {
242 			reloc_info[i].src_bo = NULL;
243 			reloc_info[i].src_offset = 0;
244 		}
245 	}
246 
247 	/* validate all buffers */
248 	ret = qxl_release_reserve_list(release, false);
249 	if (ret)
250 		goto out_free_bos;
251 
252 	for (i = 0; i < cmd->relocs_num; ++i) {
253 		if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
254 			apply_reloc(qdev, &reloc_info[i]);
255 		else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
256 			apply_surf_reloc(qdev, &reloc_info[i]);
257 	}
258 
259 	qxl_release_fence_buffer_objects(release);
260 	ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
261 
262 out_free_bos:
263 out_free_release:
264 	if (ret)
265 		qxl_release_free(qdev, release);
266 out_free_reloc:
267 	kfree(reloc_info);
268 	return ret;
269 }
270 
qxl_execbuffer_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)271 int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
272 {
273 	struct qxl_device *qdev = to_qxl(dev);
274 	struct drm_qxl_execbuffer *execbuffer = data;
275 	struct drm_qxl_command user_cmd;
276 	int cmd_num;
277 	int ret;
278 
279 	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
280 
281 		struct drm_qxl_command __user *commands =
282 			u64_to_user_ptr(execbuffer->commands);
283 
284 		if (copy_from_user(&user_cmd, commands + cmd_num,
285 				       sizeof(user_cmd)))
286 			return -EFAULT;
287 
288 		ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
289 		if (ret)
290 			return ret;
291 	}
292 	return 0;
293 }
294 
qxl_update_area_ioctl(struct drm_device * dev,void * data,struct drm_file * file)295 int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
296 {
297 	struct qxl_device *qdev = to_qxl(dev);
298 	struct drm_qxl_update_area *update_area = data;
299 	struct qxl_rect area = {.left = update_area->left,
300 				.top = update_area->top,
301 				.right = update_area->right,
302 				.bottom = update_area->bottom};
303 	int ret;
304 	struct drm_gem_object *gobj = NULL;
305 	struct qxl_bo *qobj = NULL;
306 	struct ttm_operation_ctx ctx = { true, false };
307 
308 	if (update_area->left >= update_area->right ||
309 	    update_area->top >= update_area->bottom)
310 		return -EINVAL;
311 
312 	gobj = drm_gem_object_lookup(file, update_area->handle);
313 	if (gobj == NULL)
314 		return -ENOENT;
315 
316 	qobj = gem_to_qxl_bo(gobj);
317 
318 	ret = qxl_bo_reserve(qobj);
319 	if (ret)
320 		goto out;
321 
322 	if (!qobj->tbo.pin_count) {
323 		qxl_ttm_placement_from_domain(qobj, qobj->type);
324 		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
325 		if (unlikely(ret))
326 			goto out;
327 	}
328 
329 	ret = qxl_bo_check_id(qdev, qobj);
330 	if (ret)
331 		goto out2;
332 	if (!qobj->surface_id)
333 		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
334 	ret = qxl_io_update_area(qdev, qobj, &area);
335 
336 out2:
337 	qxl_bo_unreserve(qobj);
338 
339 out:
340 	drm_gem_object_put(gobj);
341 	return ret;
342 }
343 
qxl_getparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)344 int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
345 {
346 	struct qxl_device *qdev = to_qxl(dev);
347 	struct drm_qxl_getparam *param = data;
348 
349 	switch (param->param) {
350 	case QXL_PARAM_NUM_SURFACES:
351 		param->value = qdev->rom->n_surfaces;
352 		break;
353 	case QXL_PARAM_MAX_RELOCS:
354 		param->value = QXL_MAX_RES;
355 		break;
356 	default:
357 		return -EINVAL;
358 	}
359 	return 0;
360 }
361 
qxl_clientcap_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)362 int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
363 {
364 	struct qxl_device *qdev = to_qxl(dev);
365 	struct pci_dev *pdev = to_pci_dev(dev->dev);
366 	struct drm_qxl_clientcap *param = data;
367 	int byte, idx;
368 
369 	byte = param->index / 8;
370 	idx = param->index % 8;
371 
372 	if (pdev->revision < 4)
373 		return -ENOSYS;
374 
375 	if (byte >= 58)
376 		return -ENOSYS;
377 
378 	if (qdev->rom->client_capabilities[byte] & (1 << idx))
379 		return 0;
380 	return -ENOSYS;
381 }
382 
qxl_alloc_surf_ioctl(struct drm_device * dev,void * data,struct drm_file * file)383 int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
384 {
385 	struct qxl_device *qdev = to_qxl(dev);
386 	struct drm_qxl_alloc_surf *param = data;
387 	int handle;
388 	int ret;
389 	int size, actual_stride;
390 	struct qxl_surface surf;
391 
392 	/* work out size allocate bo with handle */
393 	actual_stride = param->stride < 0 ? -param->stride : param->stride;
394 	size = actual_stride * param->height + actual_stride;
395 
396 	surf.format = param->format;
397 	surf.width = param->width;
398 	surf.height = param->height;
399 	surf.stride = param->stride;
400 	surf.data = 0;
401 
402 	ret = qxl_gem_object_create_with_handle(qdev, file,
403 						QXL_GEM_DOMAIN_SURFACE,
404 						size,
405 						&surf,
406 						NULL, &handle);
407 	if (ret) {
408 		DRM_ERROR("%s: failed to create gem ret=%d\n",
409 			  __func__, ret);
410 		return -ENOMEM;
411 	} else
412 		param->handle = handle;
413 	return ret;
414 }
415