xref: /linux/drivers/gpu/drm/qxl/qxl_ioctl.c (revision 148f9bb87745ed45f7a11b2cbd3bc0f017d5d257)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28 
29 /*
30  * TODO: allocating a new gem(in qxl_bo) for each request.
31  * This is wasteful since bo's are page aligned.
32  */
33 static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
34 			   struct drm_file *file_priv)
35 {
36 	struct qxl_device *qdev = dev->dev_private;
37 	struct drm_qxl_alloc *qxl_alloc = data;
38 	int ret;
39 	struct qxl_bo *qobj;
40 	uint32_t handle;
41 	u32 domain = QXL_GEM_DOMAIN_VRAM;
42 
43 	if (qxl_alloc->size == 0) {
44 		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
45 		return -EINVAL;
46 	}
47 	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
48 						domain,
49 						qxl_alloc->size,
50 						NULL,
51 						&qobj, &handle);
52 	if (ret) {
53 		DRM_ERROR("%s: failed to create gem ret=%d\n",
54 			  __func__, ret);
55 		return -ENOMEM;
56 	}
57 	qxl_alloc->handle = handle;
58 	return 0;
59 }
60 
61 static int qxl_map_ioctl(struct drm_device *dev, void *data,
62 			 struct drm_file *file_priv)
63 {
64 	struct qxl_device *qdev = dev->dev_private;
65 	struct drm_qxl_map *qxl_map = data;
66 
67 	return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
68 				  &qxl_map->offset);
69 }
70 
71 /*
72  * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
73  * are on vram).
74  * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
75  */
76 static void
77 apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
78 	    struct qxl_bo *src, uint64_t src_off)
79 {
80 	void *reloc_page;
81 
82 	reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
83 	*(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
84 								     src, src_off);
85 	qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
86 }
87 
88 static void
89 apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
90 		 struct qxl_bo *src)
91 {
92 	uint32_t id = 0;
93 	void *reloc_page;
94 
95 	if (src && !src->is_primary)
96 		id = src->surface_id;
97 
98 	reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
99 	*(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
100 	qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
101 }
102 
103 /* return holding the reference to this object */
104 static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
105 					 struct drm_file *file_priv, uint64_t handle,
106 					 struct qxl_reloc_list *reloc_list)
107 {
108 	struct drm_gem_object *gobj;
109 	struct qxl_bo *qobj;
110 	int ret;
111 
112 	gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
113 	if (!gobj) {
114 		DRM_ERROR("bad bo handle %lld\n", handle);
115 		return NULL;
116 	}
117 	qobj = gem_to_qxl_bo(gobj);
118 
119 	ret = qxl_bo_list_add(reloc_list, qobj);
120 	if (ret)
121 		return NULL;
122 
123 	return qobj;
124 }
125 
126 /*
127  * Usage of execbuffer:
128  * Relocations need to take into account the full QXLDrawable size.
129  * However, the command as passed from user space must *not* contain the initial
130  * QXLReleaseInfo struct (first XXX bytes)
131  */
132 static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
133 				struct drm_file *file_priv)
134 {
135 	struct qxl_device *qdev = dev->dev_private;
136 	struct drm_qxl_execbuffer *execbuffer = data;
137 	struct drm_qxl_command user_cmd;
138 	int cmd_num;
139 	struct qxl_bo *reloc_src_bo;
140 	struct qxl_bo *reloc_dst_bo;
141 	struct drm_qxl_reloc reloc;
142 	void *fb_cmd;
143 	int i, ret;
144 	struct qxl_reloc_list reloc_list;
145 	int unwritten;
146 	uint32_t reloc_dst_offset;
147 	INIT_LIST_HEAD(&reloc_list.bos);
148 
149 	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
150 		struct qxl_release *release;
151 		struct qxl_bo *cmd_bo;
152 		int release_type;
153 		struct drm_qxl_command *commands =
154 			(struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
155 
156 		if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
157 				       sizeof(user_cmd)))
158 			return -EFAULT;
159 		switch (user_cmd.type) {
160 		case QXL_CMD_DRAW:
161 			release_type = QXL_RELEASE_DRAWABLE;
162 			break;
163 		case QXL_CMD_SURFACE:
164 		case QXL_CMD_CURSOR:
165 		default:
166 			DRM_DEBUG("Only draw commands in execbuffers\n");
167 			return -EINVAL;
168 			break;
169 		}
170 
171 		if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
172 			return -EINVAL;
173 
174 		if (!access_ok(VERIFY_READ,
175 			       (void *)(unsigned long)user_cmd.command,
176 			       user_cmd.command_size))
177 			return -EFAULT;
178 
179 		ret = qxl_alloc_release_reserved(qdev,
180 						 sizeof(union qxl_release_info) +
181 						 user_cmd.command_size,
182 						 release_type,
183 						 &release,
184 						 &cmd_bo);
185 		if (ret)
186 			return ret;
187 
188 		/* TODO copy slow path code from i915 */
189 		fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
190 		unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
191 
192 		{
193 			struct qxl_drawable *draw = fb_cmd;
194 
195 			draw->mm_time = qdev->rom->mm_clock;
196 		}
197 		qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
198 		if (unwritten) {
199 			DRM_ERROR("got unwritten %d\n", unwritten);
200 			qxl_release_unreserve(qdev, release);
201 			qxl_release_free(qdev, release);
202 			return -EFAULT;
203 		}
204 
205 		for (i = 0 ; i < user_cmd.relocs_num; ++i) {
206 			if (DRM_COPY_FROM_USER(&reloc,
207 					       &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
208 					       sizeof(reloc))) {
209 				qxl_bo_list_unreserve(&reloc_list, true);
210 				qxl_release_unreserve(qdev, release);
211 				qxl_release_free(qdev, release);
212 				return -EFAULT;
213 			}
214 
215 			/* add the bos to the list of bos to validate -
216 			   need to validate first then process relocs? */
217 			if (reloc.dst_handle) {
218 				reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
219 								  reloc.dst_handle, &reloc_list);
220 				if (!reloc_dst_bo) {
221 					qxl_bo_list_unreserve(&reloc_list, true);
222 					qxl_release_unreserve(qdev, release);
223 					qxl_release_free(qdev, release);
224 					return -EINVAL;
225 				}
226 				reloc_dst_offset = 0;
227 			} else {
228 				reloc_dst_bo = cmd_bo;
229 				reloc_dst_offset = release->release_offset;
230 			}
231 
232 			/* reserve and validate the reloc dst bo */
233 			if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
234 				reloc_src_bo =
235 					qxlhw_handle_to_bo(qdev, file_priv,
236 							   reloc.src_handle, &reloc_list);
237 				if (!reloc_src_bo) {
238 					if (reloc_dst_bo != cmd_bo)
239 						drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
240 					qxl_bo_list_unreserve(&reloc_list, true);
241 					qxl_release_unreserve(qdev, release);
242 					qxl_release_free(qdev, release);
243 					return -EINVAL;
244 				}
245 			} else
246 				reloc_src_bo = NULL;
247 			if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
248 				apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
249 					    reloc_src_bo, reloc.src_offset);
250 			} else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
251 				apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
252 			} else {
253 				DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
254 				return -EINVAL;
255 			}
256 
257 			if (reloc_src_bo && reloc_src_bo != cmd_bo) {
258 				qxl_release_add_res(qdev, release, reloc_src_bo);
259 				drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
260 			}
261 
262 			if (reloc_dst_bo != cmd_bo)
263 				drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
264 		}
265 		qxl_fence_releaseable(qdev, release);
266 
267 		ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
268 		if (ret == -ERESTARTSYS) {
269 			qxl_release_unreserve(qdev, release);
270 			qxl_release_free(qdev, release);
271 			qxl_bo_list_unreserve(&reloc_list, true);
272 			return ret;
273 		}
274 		qxl_release_unreserve(qdev, release);
275 	}
276 	qxl_bo_list_unreserve(&reloc_list, 0);
277 	return 0;
278 }
279 
280 static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
281 				 struct drm_file *file)
282 {
283 	struct qxl_device *qdev = dev->dev_private;
284 	struct drm_qxl_update_area *update_area = data;
285 	struct qxl_rect area = {.left = update_area->left,
286 				.top = update_area->top,
287 				.right = update_area->right,
288 				.bottom = update_area->bottom};
289 	int ret;
290 	struct drm_gem_object *gobj = NULL;
291 	struct qxl_bo *qobj = NULL;
292 
293 	if (update_area->left >= update_area->right ||
294 	    update_area->top >= update_area->bottom)
295 		return -EINVAL;
296 
297 	gobj = drm_gem_object_lookup(dev, file, update_area->handle);
298 	if (gobj == NULL)
299 		return -ENOENT;
300 
301 	qobj = gem_to_qxl_bo(gobj);
302 
303 	ret = qxl_bo_reserve(qobj, false);
304 	if (ret)
305 		goto out;
306 
307 	if (!qobj->pin_count) {
308 		qxl_ttm_placement_from_domain(qobj, qobj->type);
309 		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
310 				      true, false);
311 		if (unlikely(ret))
312 			goto out;
313 	}
314 
315 	ret = qxl_bo_check_id(qdev, qobj);
316 	if (ret)
317 		goto out2;
318 	if (!qobj->surface_id)
319 		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
320 	ret = qxl_io_update_area(qdev, qobj, &area);
321 
322 out2:
323 	qxl_bo_unreserve(qobj);
324 
325 out:
326 	drm_gem_object_unreference_unlocked(gobj);
327 	return ret;
328 }
329 
330 static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
331 		       struct drm_file *file_priv)
332 {
333 	struct qxl_device *qdev = dev->dev_private;
334 	struct drm_qxl_getparam *param = data;
335 
336 	switch (param->param) {
337 	case QXL_PARAM_NUM_SURFACES:
338 		param->value = qdev->rom->n_surfaces;
339 		break;
340 	case QXL_PARAM_MAX_RELOCS:
341 		param->value = QXL_MAX_RES;
342 		break;
343 	default:
344 		return -EINVAL;
345 	}
346 	return 0;
347 }
348 
349 static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
350 				  struct drm_file *file_priv)
351 {
352 	struct qxl_device *qdev = dev->dev_private;
353 	struct drm_qxl_clientcap *param = data;
354 	int byte, idx;
355 
356 	byte = param->index / 8;
357 	idx = param->index % 8;
358 
359 	if (qdev->pdev->revision < 4)
360 		return -ENOSYS;
361 
362 	if (byte >= 58)
363 		return -ENOSYS;
364 
365 	if (qdev->rom->client_capabilities[byte] & (1 << idx))
366 		return 0;
367 	return -ENOSYS;
368 }
369 
370 static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
371 				struct drm_file *file)
372 {
373 	struct qxl_device *qdev = dev->dev_private;
374 	struct drm_qxl_alloc_surf *param = data;
375 	struct qxl_bo *qobj;
376 	int handle;
377 	int ret;
378 	int size, actual_stride;
379 	struct qxl_surface surf;
380 
381 	/* work out size allocate bo with handle */
382 	actual_stride = param->stride < 0 ? -param->stride : param->stride;
383 	size = actual_stride * param->height + actual_stride;
384 
385 	surf.format = param->format;
386 	surf.width = param->width;
387 	surf.height = param->height;
388 	surf.stride = param->stride;
389 	surf.data = 0;
390 
391 	ret = qxl_gem_object_create_with_handle(qdev, file,
392 						QXL_GEM_DOMAIN_SURFACE,
393 						size,
394 						&surf,
395 						&qobj, &handle);
396 	if (ret) {
397 		DRM_ERROR("%s: failed to create gem ret=%d\n",
398 			  __func__, ret);
399 		return -ENOMEM;
400 	} else
401 		param->handle = handle;
402 	return ret;
403 }
404 
405 struct drm_ioctl_desc qxl_ioctls[] = {
406 	DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
407 
408 	DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
409 
410 	DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
411 							DRM_AUTH|DRM_UNLOCKED),
412 	DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
413 							DRM_AUTH|DRM_UNLOCKED),
414 	DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
415 							DRM_AUTH|DRM_UNLOCKED),
416 	DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
417 							DRM_AUTH|DRM_UNLOCKED),
418 
419 	DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
420 			  DRM_AUTH|DRM_UNLOCKED),
421 };
422 
423 int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
424