xref: /linux/drivers/gpu/drm/radeon/radeon_gem.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include "drmP.h"
29 #include "drm.h"
30 #include "radeon_drm.h"
31 #include "radeon.h"
32 
33 int radeon_gem_object_init(struct drm_gem_object *obj)
34 {
35 	BUG();
36 
37 	return 0;
38 }
39 
40 void radeon_gem_object_free(struct drm_gem_object *gobj)
41 {
42 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
43 
44 	if (robj) {
45 		if (robj->gem_base.import_attach)
46 			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
47 		radeon_bo_unref(&robj);
48 	}
49 }
50 
51 int radeon_gem_object_create(struct radeon_device *rdev, int size,
52 				int alignment, int initial_domain,
53 				bool discardable, bool kernel,
54 				struct drm_gem_object **obj)
55 {
56 	struct radeon_bo *robj;
57 	int r;
58 
59 	*obj = NULL;
60 	/* At least align on page size */
61 	if (alignment < PAGE_SIZE) {
62 		alignment = PAGE_SIZE;
63 	}
64 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
65 	if (r) {
66 		if (r != -ERESTARTSYS)
67 			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
68 				  size, initial_domain, alignment, r);
69 		return r;
70 	}
71 	*obj = &robj->gem_base;
72 
73 	mutex_lock(&rdev->gem.mutex);
74 	list_add_tail(&robj->list, &rdev->gem.objects);
75 	mutex_unlock(&rdev->gem.mutex);
76 
77 	return 0;
78 }
79 
80 int radeon_gem_set_domain(struct drm_gem_object *gobj,
81 			  uint32_t rdomain, uint32_t wdomain)
82 {
83 	struct radeon_bo *robj;
84 	uint32_t domain;
85 	int r;
86 
87 	/* FIXME: reeimplement */
88 	robj = gem_to_radeon_bo(gobj);
89 	/* work out where to validate the buffer to */
90 	domain = wdomain;
91 	if (!domain) {
92 		domain = rdomain;
93 	}
94 	if (!domain) {
95 		/* Do nothings */
96 		printk(KERN_WARNING "Set domain without domain !\n");
97 		return 0;
98 	}
99 	if (domain == RADEON_GEM_DOMAIN_CPU) {
100 		/* Asking for cpu access wait for object idle */
101 		r = radeon_bo_wait(robj, NULL, false);
102 		if (r) {
103 			printk(KERN_ERR "Failed to wait for object !\n");
104 			return r;
105 		}
106 	}
107 	return 0;
108 }
109 
110 int radeon_gem_init(struct radeon_device *rdev)
111 {
112 	INIT_LIST_HEAD(&rdev->gem.objects);
113 	return 0;
114 }
115 
116 void radeon_gem_fini(struct radeon_device *rdev)
117 {
118 	radeon_bo_force_delete(rdev);
119 }
120 
121 /*
122  * Call from drm_gem_handle_create which appear in both new and open ioctl
123  * case.
124  */
125 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
126 {
127 	return 0;
128 }
129 
130 void radeon_gem_object_close(struct drm_gem_object *obj,
131 			     struct drm_file *file_priv)
132 {
133 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
134 	struct radeon_device *rdev = rbo->rdev;
135 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
136 	struct radeon_vm *vm = &fpriv->vm;
137 
138 	if (rdev->family < CHIP_CAYMAN) {
139 		return;
140 	}
141 
142 	if (radeon_bo_reserve(rbo, false)) {
143 		dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n");
144 		return;
145 	}
146 	radeon_vm_bo_rmv(rdev, vm, rbo);
147 	radeon_bo_unreserve(rbo);
148 }
149 
150 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
151 {
152 	if (r == -EDEADLK) {
153 		r = radeon_gpu_reset(rdev);
154 		if (!r)
155 			r = -EAGAIN;
156 	}
157 	return r;
158 }
159 
160 /*
161  * GEM ioctls.
162  */
163 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
164 			  struct drm_file *filp)
165 {
166 	struct radeon_device *rdev = dev->dev_private;
167 	struct drm_radeon_gem_info *args = data;
168 	struct ttm_mem_type_manager *man;
169 	unsigned i;
170 
171 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
172 
173 	args->vram_size = rdev->mc.real_vram_size;
174 	args->vram_visible = (u64)man->size << PAGE_SHIFT;
175 	if (rdev->stollen_vga_memory)
176 		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
177 	args->vram_visible -= radeon_fbdev_total_size(rdev);
178 	args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
179 	for(i = 0; i < RADEON_NUM_RINGS; ++i)
180 		args->gart_size -= rdev->ring[i].ring_size;
181 	return 0;
182 }
183 
184 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
185 			   struct drm_file *filp)
186 {
187 	/* TODO: implement */
188 	DRM_ERROR("unimplemented %s\n", __func__);
189 	return -ENOSYS;
190 }
191 
192 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
193 			    struct drm_file *filp)
194 {
195 	/* TODO: implement */
196 	DRM_ERROR("unimplemented %s\n", __func__);
197 	return -ENOSYS;
198 }
199 
200 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
201 			    struct drm_file *filp)
202 {
203 	struct radeon_device *rdev = dev->dev_private;
204 	struct drm_radeon_gem_create *args = data;
205 	struct drm_gem_object *gobj;
206 	uint32_t handle;
207 	int r;
208 
209 	down_read(&rdev->exclusive_lock);
210 	/* create a gem object to contain this object in */
211 	args->size = roundup(args->size, PAGE_SIZE);
212 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
213 					args->initial_domain, false,
214 					false, &gobj);
215 	if (r) {
216 		up_read(&rdev->exclusive_lock);
217 		r = radeon_gem_handle_lockup(rdev, r);
218 		return r;
219 	}
220 	r = drm_gem_handle_create(filp, gobj, &handle);
221 	/* drop reference from allocate - handle holds it now */
222 	drm_gem_object_unreference_unlocked(gobj);
223 	if (r) {
224 		up_read(&rdev->exclusive_lock);
225 		r = radeon_gem_handle_lockup(rdev, r);
226 		return r;
227 	}
228 	args->handle = handle;
229 	up_read(&rdev->exclusive_lock);
230 	return 0;
231 }
232 
233 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
234 				struct drm_file *filp)
235 {
236 	/* transition the BO to a domain -
237 	 * just validate the BO into a certain domain */
238 	struct radeon_device *rdev = dev->dev_private;
239 	struct drm_radeon_gem_set_domain *args = data;
240 	struct drm_gem_object *gobj;
241 	struct radeon_bo *robj;
242 	int r;
243 
244 	/* for now if someone requests domain CPU -
245 	 * just make sure the buffer is finished with */
246 	down_read(&rdev->exclusive_lock);
247 
248 	/* just do a BO wait for now */
249 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
250 	if (gobj == NULL) {
251 		up_read(&rdev->exclusive_lock);
252 		return -ENOENT;
253 	}
254 	robj = gem_to_radeon_bo(gobj);
255 
256 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
257 
258 	drm_gem_object_unreference_unlocked(gobj);
259 	up_read(&rdev->exclusive_lock);
260 	r = radeon_gem_handle_lockup(robj->rdev, r);
261 	return r;
262 }
263 
264 int radeon_mode_dumb_mmap(struct drm_file *filp,
265 			  struct drm_device *dev,
266 			  uint32_t handle, uint64_t *offset_p)
267 {
268 	struct drm_gem_object *gobj;
269 	struct radeon_bo *robj;
270 
271 	gobj = drm_gem_object_lookup(dev, filp, handle);
272 	if (gobj == NULL) {
273 		return -ENOENT;
274 	}
275 	robj = gem_to_radeon_bo(gobj);
276 	*offset_p = radeon_bo_mmap_offset(robj);
277 	drm_gem_object_unreference_unlocked(gobj);
278 	return 0;
279 }
280 
281 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
282 			  struct drm_file *filp)
283 {
284 	struct drm_radeon_gem_mmap *args = data;
285 
286 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
287 }
288 
289 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
290 			  struct drm_file *filp)
291 {
292 	struct radeon_device *rdev = dev->dev_private;
293 	struct drm_radeon_gem_busy *args = data;
294 	struct drm_gem_object *gobj;
295 	struct radeon_bo *robj;
296 	int r;
297 	uint32_t cur_placement = 0;
298 
299 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
300 	if (gobj == NULL) {
301 		return -ENOENT;
302 	}
303 	robj = gem_to_radeon_bo(gobj);
304 	r = radeon_bo_wait(robj, &cur_placement, true);
305 	switch (cur_placement) {
306 	case TTM_PL_VRAM:
307 		args->domain = RADEON_GEM_DOMAIN_VRAM;
308 		break;
309 	case TTM_PL_TT:
310 		args->domain = RADEON_GEM_DOMAIN_GTT;
311 		break;
312 	case TTM_PL_SYSTEM:
313 		args->domain = RADEON_GEM_DOMAIN_CPU;
314 	default:
315 		break;
316 	}
317 	drm_gem_object_unreference_unlocked(gobj);
318 	r = radeon_gem_handle_lockup(rdev, r);
319 	return r;
320 }
321 
322 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
323 			      struct drm_file *filp)
324 {
325 	struct radeon_device *rdev = dev->dev_private;
326 	struct drm_radeon_gem_wait_idle *args = data;
327 	struct drm_gem_object *gobj;
328 	struct radeon_bo *robj;
329 	int r;
330 
331 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
332 	if (gobj == NULL) {
333 		return -ENOENT;
334 	}
335 	robj = gem_to_radeon_bo(gobj);
336 	r = radeon_bo_wait(robj, NULL, false);
337 	/* callback hw specific functions if any */
338 	if (rdev->asic->ioctl_wait_idle)
339 		robj->rdev->asic->ioctl_wait_idle(rdev, robj);
340 	drm_gem_object_unreference_unlocked(gobj);
341 	r = radeon_gem_handle_lockup(rdev, r);
342 	return r;
343 }
344 
345 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
346 				struct drm_file *filp)
347 {
348 	struct drm_radeon_gem_set_tiling *args = data;
349 	struct drm_gem_object *gobj;
350 	struct radeon_bo *robj;
351 	int r = 0;
352 
353 	DRM_DEBUG("%d \n", args->handle);
354 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
355 	if (gobj == NULL)
356 		return -ENOENT;
357 	robj = gem_to_radeon_bo(gobj);
358 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
359 	drm_gem_object_unreference_unlocked(gobj);
360 	return r;
361 }
362 
363 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
364 				struct drm_file *filp)
365 {
366 	struct drm_radeon_gem_get_tiling *args = data;
367 	struct drm_gem_object *gobj;
368 	struct radeon_bo *rbo;
369 	int r = 0;
370 
371 	DRM_DEBUG("\n");
372 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
373 	if (gobj == NULL)
374 		return -ENOENT;
375 	rbo = gem_to_radeon_bo(gobj);
376 	r = radeon_bo_reserve(rbo, false);
377 	if (unlikely(r != 0))
378 		goto out;
379 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
380 	radeon_bo_unreserve(rbo);
381 out:
382 	drm_gem_object_unreference_unlocked(gobj);
383 	return r;
384 }
385 
386 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
387 			  struct drm_file *filp)
388 {
389 	struct drm_radeon_gem_va *args = data;
390 	struct drm_gem_object *gobj;
391 	struct radeon_device *rdev = dev->dev_private;
392 	struct radeon_fpriv *fpriv = filp->driver_priv;
393 	struct radeon_bo *rbo;
394 	struct radeon_bo_va *bo_va;
395 	u32 invalid_flags;
396 	int r = 0;
397 
398 	if (!rdev->vm_manager.enabled) {
399 		args->operation = RADEON_VA_RESULT_ERROR;
400 		return -ENOTTY;
401 	}
402 
403 	/* !! DONT REMOVE !!
404 	 * We don't support vm_id yet, to be sure we don't have have broken
405 	 * userspace, reject anyone trying to use non 0 value thus moving
406 	 * forward we can use those fields without breaking existant userspace
407 	 */
408 	if (args->vm_id) {
409 		args->operation = RADEON_VA_RESULT_ERROR;
410 		return -EINVAL;
411 	}
412 
413 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
414 		dev_err(&dev->pdev->dev,
415 			"offset 0x%lX is in reserved area 0x%X\n",
416 			(unsigned long)args->offset,
417 			RADEON_VA_RESERVED_SIZE);
418 		args->operation = RADEON_VA_RESULT_ERROR;
419 		return -EINVAL;
420 	}
421 
422 	/* don't remove, we need to enforce userspace to set the snooped flag
423 	 * otherwise we will endup with broken userspace and we won't be able
424 	 * to enable this feature without adding new interface
425 	 */
426 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
427 	if ((args->flags & invalid_flags)) {
428 		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
429 			args->flags, invalid_flags);
430 		args->operation = RADEON_VA_RESULT_ERROR;
431 		return -EINVAL;
432 	}
433 	if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
434 		dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
435 		args->operation = RADEON_VA_RESULT_ERROR;
436 		return -EINVAL;
437 	}
438 
439 	switch (args->operation) {
440 	case RADEON_VA_MAP:
441 	case RADEON_VA_UNMAP:
442 		break;
443 	default:
444 		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
445 			args->operation);
446 		args->operation = RADEON_VA_RESULT_ERROR;
447 		return -EINVAL;
448 	}
449 
450 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
451 	if (gobj == NULL) {
452 		args->operation = RADEON_VA_RESULT_ERROR;
453 		return -ENOENT;
454 	}
455 	rbo = gem_to_radeon_bo(gobj);
456 	r = radeon_bo_reserve(rbo, false);
457 	if (r) {
458 		args->operation = RADEON_VA_RESULT_ERROR;
459 		drm_gem_object_unreference_unlocked(gobj);
460 		return r;
461 	}
462 	switch (args->operation) {
463 	case RADEON_VA_MAP:
464 		bo_va = radeon_bo_va(rbo, &fpriv->vm);
465 		if (bo_va) {
466 			args->operation = RADEON_VA_RESULT_VA_EXIST;
467 			args->offset = bo_va->soffset;
468 			goto out;
469 		}
470 		r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
471 				     args->offset, args->flags);
472 		break;
473 	case RADEON_VA_UNMAP:
474 		r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
475 		break;
476 	default:
477 		break;
478 	}
479 	args->operation = RADEON_VA_RESULT_OK;
480 	if (r) {
481 		args->operation = RADEON_VA_RESULT_ERROR;
482 	}
483 out:
484 	radeon_bo_unreserve(rbo);
485 	drm_gem_object_unreference_unlocked(gobj);
486 	return r;
487 }
488 
489 int radeon_mode_dumb_create(struct drm_file *file_priv,
490 			    struct drm_device *dev,
491 			    struct drm_mode_create_dumb *args)
492 {
493 	struct radeon_device *rdev = dev->dev_private;
494 	struct drm_gem_object *gobj;
495 	uint32_t handle;
496 	int r;
497 
498 	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
499 	args->size = args->pitch * args->height;
500 	args->size = ALIGN(args->size, PAGE_SIZE);
501 
502 	r = radeon_gem_object_create(rdev, args->size, 0,
503 				     RADEON_GEM_DOMAIN_VRAM,
504 				     false, ttm_bo_type_device,
505 				     &gobj);
506 	if (r)
507 		return -ENOMEM;
508 
509 	r = drm_gem_handle_create(file_priv, gobj, &handle);
510 	/* drop reference from allocate - handle holds it now */
511 	drm_gem_object_unreference_unlocked(gobj);
512 	if (r) {
513 		return r;
514 	}
515 	args->handle = handle;
516 	return 0;
517 }
518 
519 int radeon_mode_dumb_destroy(struct drm_file *file_priv,
520 			     struct drm_device *dev,
521 			     uint32_t handle)
522 {
523 	return drm_gem_handle_delete(file_priv, handle);
524 }
525