xref: /linux/drivers/gpu/drm/radeon/radeon_gem.c (revision 08ec212c0f92cbf30e3ecc7349f18151714041d6)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 
32 int radeon_gem_object_init(struct drm_gem_object *obj)
33 {
34 	BUG();
35 
36 	return 0;
37 }
38 
39 void radeon_gem_object_free(struct drm_gem_object *gobj)
40 {
41 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
42 
43 	if (robj) {
44 		if (robj->gem_base.import_attach)
45 			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
46 		radeon_bo_unref(&robj);
47 	}
48 }
49 
50 int radeon_gem_object_create(struct radeon_device *rdev, int size,
51 				int alignment, int initial_domain,
52 				bool discardable, bool kernel,
53 				struct drm_gem_object **obj)
54 {
55 	struct radeon_bo *robj;
56 	int r;
57 
58 	*obj = NULL;
59 	/* At least align on page size */
60 	if (alignment < PAGE_SIZE) {
61 		alignment = PAGE_SIZE;
62 	}
63 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
64 	if (r) {
65 		if (r != -ERESTARTSYS)
66 			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
67 				  size, initial_domain, alignment, r);
68 		return r;
69 	}
70 	*obj = &robj->gem_base;
71 
72 	mutex_lock(&rdev->gem.mutex);
73 	list_add_tail(&robj->list, &rdev->gem.objects);
74 	mutex_unlock(&rdev->gem.mutex);
75 
76 	return 0;
77 }
78 
79 int radeon_gem_set_domain(struct drm_gem_object *gobj,
80 			  uint32_t rdomain, uint32_t wdomain)
81 {
82 	struct radeon_bo *robj;
83 	uint32_t domain;
84 	int r;
85 
86 	/* FIXME: reeimplement */
87 	robj = gem_to_radeon_bo(gobj);
88 	/* work out where to validate the buffer to */
89 	domain = wdomain;
90 	if (!domain) {
91 		domain = rdomain;
92 	}
93 	if (!domain) {
94 		/* Do nothings */
95 		printk(KERN_WARNING "Set domain without domain !\n");
96 		return 0;
97 	}
98 	if (domain == RADEON_GEM_DOMAIN_CPU) {
99 		/* Asking for cpu access wait for object idle */
100 		r = radeon_bo_wait(robj, NULL, false);
101 		if (r) {
102 			printk(KERN_ERR "Failed to wait for object !\n");
103 			return r;
104 		}
105 	}
106 	return 0;
107 }
108 
109 int radeon_gem_init(struct radeon_device *rdev)
110 {
111 	INIT_LIST_HEAD(&rdev->gem.objects);
112 	return 0;
113 }
114 
115 void radeon_gem_fini(struct radeon_device *rdev)
116 {
117 	radeon_bo_force_delete(rdev);
118 }
119 
120 /*
121  * Call from drm_gem_handle_create which appear in both new and open ioctl
122  * case.
123  */
124 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
125 {
126 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
127 	struct radeon_device *rdev = rbo->rdev;
128 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
129 	struct radeon_vm *vm = &fpriv->vm;
130 	struct radeon_bo_va *bo_va;
131 	int r;
132 
133 	if (rdev->family < CHIP_CAYMAN) {
134 		return 0;
135 	}
136 
137 	r = radeon_bo_reserve(rbo, false);
138 	if (r) {
139 		return r;
140 	}
141 
142 	bo_va = radeon_vm_bo_find(vm, rbo);
143 	if (!bo_va) {
144 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
145 	} else {
146 		++bo_va->ref_count;
147 	}
148 	radeon_bo_unreserve(rbo);
149 
150 	return 0;
151 }
152 
153 void radeon_gem_object_close(struct drm_gem_object *obj,
154 			     struct drm_file *file_priv)
155 {
156 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
157 	struct radeon_device *rdev = rbo->rdev;
158 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
159 	struct radeon_vm *vm = &fpriv->vm;
160 	struct radeon_bo_va *bo_va;
161 	int r;
162 
163 	if (rdev->family < CHIP_CAYMAN) {
164 		return;
165 	}
166 
167 	r = radeon_bo_reserve(rbo, true);
168 	if (r) {
169 		dev_err(rdev->dev, "leaking bo va because "
170 			"we fail to reserve bo (%d)\n", r);
171 		return;
172 	}
173 	bo_va = radeon_vm_bo_find(vm, rbo);
174 	if (bo_va) {
175 		if (--bo_va->ref_count == 0) {
176 			radeon_vm_bo_rmv(rdev, bo_va);
177 		}
178 	}
179 	radeon_bo_unreserve(rbo);
180 }
181 
182 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
183 {
184 	if (r == -EDEADLK) {
185 		r = radeon_gpu_reset(rdev);
186 		if (!r)
187 			r = -EAGAIN;
188 	}
189 	return r;
190 }
191 
192 /*
193  * GEM ioctls.
194  */
195 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
196 			  struct drm_file *filp)
197 {
198 	struct radeon_device *rdev = dev->dev_private;
199 	struct drm_radeon_gem_info *args = data;
200 	struct ttm_mem_type_manager *man;
201 	unsigned i;
202 
203 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
204 
205 	args->vram_size = rdev->mc.real_vram_size;
206 	args->vram_visible = (u64)man->size << PAGE_SHIFT;
207 	if (rdev->stollen_vga_memory)
208 		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
209 	args->vram_visible -= radeon_fbdev_total_size(rdev);
210 	args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
211 	for(i = 0; i < RADEON_NUM_RINGS; ++i)
212 		args->gart_size -= rdev->ring[i].ring_size;
213 	return 0;
214 }
215 
216 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
217 			   struct drm_file *filp)
218 {
219 	/* TODO: implement */
220 	DRM_ERROR("unimplemented %s\n", __func__);
221 	return -ENOSYS;
222 }
223 
224 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
225 			    struct drm_file *filp)
226 {
227 	/* TODO: implement */
228 	DRM_ERROR("unimplemented %s\n", __func__);
229 	return -ENOSYS;
230 }
231 
232 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
233 			    struct drm_file *filp)
234 {
235 	struct radeon_device *rdev = dev->dev_private;
236 	struct drm_radeon_gem_create *args = data;
237 	struct drm_gem_object *gobj;
238 	uint32_t handle;
239 	int r;
240 
241 	down_read(&rdev->exclusive_lock);
242 	/* create a gem object to contain this object in */
243 	args->size = roundup(args->size, PAGE_SIZE);
244 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
245 					args->initial_domain, false,
246 					false, &gobj);
247 	if (r) {
248 		up_read(&rdev->exclusive_lock);
249 		r = radeon_gem_handle_lockup(rdev, r);
250 		return r;
251 	}
252 	r = drm_gem_handle_create(filp, gobj, &handle);
253 	/* drop reference from allocate - handle holds it now */
254 	drm_gem_object_unreference_unlocked(gobj);
255 	if (r) {
256 		up_read(&rdev->exclusive_lock);
257 		r = radeon_gem_handle_lockup(rdev, r);
258 		return r;
259 	}
260 	args->handle = handle;
261 	up_read(&rdev->exclusive_lock);
262 	return 0;
263 }
264 
265 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
266 				struct drm_file *filp)
267 {
268 	/* transition the BO to a domain -
269 	 * just validate the BO into a certain domain */
270 	struct radeon_device *rdev = dev->dev_private;
271 	struct drm_radeon_gem_set_domain *args = data;
272 	struct drm_gem_object *gobj;
273 	struct radeon_bo *robj;
274 	int r;
275 
276 	/* for now if someone requests domain CPU -
277 	 * just make sure the buffer is finished with */
278 	down_read(&rdev->exclusive_lock);
279 
280 	/* just do a BO wait for now */
281 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
282 	if (gobj == NULL) {
283 		up_read(&rdev->exclusive_lock);
284 		return -ENOENT;
285 	}
286 	robj = gem_to_radeon_bo(gobj);
287 
288 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
289 
290 	drm_gem_object_unreference_unlocked(gobj);
291 	up_read(&rdev->exclusive_lock);
292 	r = radeon_gem_handle_lockup(robj->rdev, r);
293 	return r;
294 }
295 
296 int radeon_mode_dumb_mmap(struct drm_file *filp,
297 			  struct drm_device *dev,
298 			  uint32_t handle, uint64_t *offset_p)
299 {
300 	struct drm_gem_object *gobj;
301 	struct radeon_bo *robj;
302 
303 	gobj = drm_gem_object_lookup(dev, filp, handle);
304 	if (gobj == NULL) {
305 		return -ENOENT;
306 	}
307 	robj = gem_to_radeon_bo(gobj);
308 	*offset_p = radeon_bo_mmap_offset(robj);
309 	drm_gem_object_unreference_unlocked(gobj);
310 	return 0;
311 }
312 
313 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
314 			  struct drm_file *filp)
315 {
316 	struct drm_radeon_gem_mmap *args = data;
317 
318 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
319 }
320 
321 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
322 			  struct drm_file *filp)
323 {
324 	struct radeon_device *rdev = dev->dev_private;
325 	struct drm_radeon_gem_busy *args = data;
326 	struct drm_gem_object *gobj;
327 	struct radeon_bo *robj;
328 	int r;
329 	uint32_t cur_placement = 0;
330 
331 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
332 	if (gobj == NULL) {
333 		return -ENOENT;
334 	}
335 	robj = gem_to_radeon_bo(gobj);
336 	r = radeon_bo_wait(robj, &cur_placement, true);
337 	switch (cur_placement) {
338 	case TTM_PL_VRAM:
339 		args->domain = RADEON_GEM_DOMAIN_VRAM;
340 		break;
341 	case TTM_PL_TT:
342 		args->domain = RADEON_GEM_DOMAIN_GTT;
343 		break;
344 	case TTM_PL_SYSTEM:
345 		args->domain = RADEON_GEM_DOMAIN_CPU;
346 	default:
347 		break;
348 	}
349 	drm_gem_object_unreference_unlocked(gobj);
350 	r = radeon_gem_handle_lockup(rdev, r);
351 	return r;
352 }
353 
354 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
355 			      struct drm_file *filp)
356 {
357 	struct radeon_device *rdev = dev->dev_private;
358 	struct drm_radeon_gem_wait_idle *args = data;
359 	struct drm_gem_object *gobj;
360 	struct radeon_bo *robj;
361 	int r;
362 
363 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
364 	if (gobj == NULL) {
365 		return -ENOENT;
366 	}
367 	robj = gem_to_radeon_bo(gobj);
368 	r = radeon_bo_wait(robj, NULL, false);
369 	/* callback hw specific functions if any */
370 	if (rdev->asic->ioctl_wait_idle)
371 		robj->rdev->asic->ioctl_wait_idle(rdev, robj);
372 	drm_gem_object_unreference_unlocked(gobj);
373 	r = radeon_gem_handle_lockup(rdev, r);
374 	return r;
375 }
376 
377 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
378 				struct drm_file *filp)
379 {
380 	struct drm_radeon_gem_set_tiling *args = data;
381 	struct drm_gem_object *gobj;
382 	struct radeon_bo *robj;
383 	int r = 0;
384 
385 	DRM_DEBUG("%d \n", args->handle);
386 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
387 	if (gobj == NULL)
388 		return -ENOENT;
389 	robj = gem_to_radeon_bo(gobj);
390 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
391 	drm_gem_object_unreference_unlocked(gobj);
392 	return r;
393 }
394 
395 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
396 				struct drm_file *filp)
397 {
398 	struct drm_radeon_gem_get_tiling *args = data;
399 	struct drm_gem_object *gobj;
400 	struct radeon_bo *rbo;
401 	int r = 0;
402 
403 	DRM_DEBUG("\n");
404 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
405 	if (gobj == NULL)
406 		return -ENOENT;
407 	rbo = gem_to_radeon_bo(gobj);
408 	r = radeon_bo_reserve(rbo, false);
409 	if (unlikely(r != 0))
410 		goto out;
411 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
412 	radeon_bo_unreserve(rbo);
413 out:
414 	drm_gem_object_unreference_unlocked(gobj);
415 	return r;
416 }
417 
418 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
419 			  struct drm_file *filp)
420 {
421 	struct drm_radeon_gem_va *args = data;
422 	struct drm_gem_object *gobj;
423 	struct radeon_device *rdev = dev->dev_private;
424 	struct radeon_fpriv *fpriv = filp->driver_priv;
425 	struct radeon_bo *rbo;
426 	struct radeon_bo_va *bo_va;
427 	u32 invalid_flags;
428 	int r = 0;
429 
430 	if (!rdev->vm_manager.enabled) {
431 		args->operation = RADEON_VA_RESULT_ERROR;
432 		return -ENOTTY;
433 	}
434 
435 	/* !! DONT REMOVE !!
436 	 * We don't support vm_id yet, to be sure we don't have have broken
437 	 * userspace, reject anyone trying to use non 0 value thus moving
438 	 * forward we can use those fields without breaking existant userspace
439 	 */
440 	if (args->vm_id) {
441 		args->operation = RADEON_VA_RESULT_ERROR;
442 		return -EINVAL;
443 	}
444 
445 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
446 		dev_err(&dev->pdev->dev,
447 			"offset 0x%lX is in reserved area 0x%X\n",
448 			(unsigned long)args->offset,
449 			RADEON_VA_RESERVED_SIZE);
450 		args->operation = RADEON_VA_RESULT_ERROR;
451 		return -EINVAL;
452 	}
453 
454 	/* don't remove, we need to enforce userspace to set the snooped flag
455 	 * otherwise we will endup with broken userspace and we won't be able
456 	 * to enable this feature without adding new interface
457 	 */
458 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
459 	if ((args->flags & invalid_flags)) {
460 		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
461 			args->flags, invalid_flags);
462 		args->operation = RADEON_VA_RESULT_ERROR;
463 		return -EINVAL;
464 	}
465 	if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
466 		dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
467 		args->operation = RADEON_VA_RESULT_ERROR;
468 		return -EINVAL;
469 	}
470 
471 	switch (args->operation) {
472 	case RADEON_VA_MAP:
473 	case RADEON_VA_UNMAP:
474 		break;
475 	default:
476 		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
477 			args->operation);
478 		args->operation = RADEON_VA_RESULT_ERROR;
479 		return -EINVAL;
480 	}
481 
482 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
483 	if (gobj == NULL) {
484 		args->operation = RADEON_VA_RESULT_ERROR;
485 		return -ENOENT;
486 	}
487 	rbo = gem_to_radeon_bo(gobj);
488 	r = radeon_bo_reserve(rbo, false);
489 	if (r) {
490 		args->operation = RADEON_VA_RESULT_ERROR;
491 		drm_gem_object_unreference_unlocked(gobj);
492 		return r;
493 	}
494 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
495 	if (!bo_va) {
496 		args->operation = RADEON_VA_RESULT_ERROR;
497 		drm_gem_object_unreference_unlocked(gobj);
498 		return -ENOENT;
499 	}
500 
501 	switch (args->operation) {
502 	case RADEON_VA_MAP:
503 		if (bo_va->soffset) {
504 			args->operation = RADEON_VA_RESULT_VA_EXIST;
505 			args->offset = bo_va->soffset;
506 			goto out;
507 		}
508 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
509 		break;
510 	case RADEON_VA_UNMAP:
511 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
512 		break;
513 	default:
514 		break;
515 	}
516 	args->operation = RADEON_VA_RESULT_OK;
517 	if (r) {
518 		args->operation = RADEON_VA_RESULT_ERROR;
519 	}
520 out:
521 	radeon_bo_unreserve(rbo);
522 	drm_gem_object_unreference_unlocked(gobj);
523 	return r;
524 }
525 
526 int radeon_mode_dumb_create(struct drm_file *file_priv,
527 			    struct drm_device *dev,
528 			    struct drm_mode_create_dumb *args)
529 {
530 	struct radeon_device *rdev = dev->dev_private;
531 	struct drm_gem_object *gobj;
532 	uint32_t handle;
533 	int r;
534 
535 	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
536 	args->size = args->pitch * args->height;
537 	args->size = ALIGN(args->size, PAGE_SIZE);
538 
539 	r = radeon_gem_object_create(rdev, args->size, 0,
540 				     RADEON_GEM_DOMAIN_VRAM,
541 				     false, ttm_bo_type_device,
542 				     &gobj);
543 	if (r)
544 		return -ENOMEM;
545 
546 	r = drm_gem_handle_create(file_priv, gobj, &handle);
547 	/* drop reference from allocate - handle holds it now */
548 	drm_gem_object_unreference_unlocked(gobj);
549 	if (r) {
550 		return r;
551 	}
552 	args->handle = handle;
553 	return 0;
554 }
555 
556 int radeon_mode_dumb_destroy(struct drm_file *file_priv,
557 			     struct drm_device *dev,
558 			     uint32_t handle)
559 {
560 	return drm_gem_handle_delete(file_priv, handle);
561 }
562