xref: /linux/drivers/gpu/drm/radeon/radeon_object.c (revision 67638e4043083cdc6f10386a75fef87ba46eecb3)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <drm/drmP.h>
34 #include "radeon_drm.h"
35 #include "radeon.h"
36 
37 struct radeon_object {
38 	struct ttm_buffer_object	tobj;
39 	struct list_head		list;
40 	struct radeon_device		*rdev;
41 	struct drm_gem_object		*gobj;
42 	struct ttm_bo_kmap_obj		kmap;
43 	unsigned			pin_count;
44 	uint64_t			gpu_addr;
45 	void				*kptr;
46 	bool				is_iomem;
47 	uint32_t			tiling_flags;
48 	uint32_t			pitch;
49 	int				surface_reg;
50 };
51 
52 int radeon_ttm_init(struct radeon_device *rdev);
53 void radeon_ttm_fini(struct radeon_device *rdev);
54 
55 /*
56  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
57  * function are calling it.
58  */
59 
60 static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
61 {
62 	return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
63 }
64 
65 static void radeon_object_unreserve(struct radeon_object *robj)
66 {
67 	ttm_bo_unreserve(&robj->tobj);
68 }
69 
70 static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
71 {
72 	struct radeon_object *robj;
73 
74 	robj = container_of(tobj, struct radeon_object, tobj);
75 	list_del_init(&robj->list);
76 	radeon_object_clear_surface_reg(robj);
77 	kfree(robj);
78 }
79 
80 static inline void radeon_object_gpu_addr(struct radeon_object *robj)
81 {
82 	/* Default gpu address */
83 	robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
84 	if (robj->tobj.mem.mm_node == NULL) {
85 		return;
86 	}
87 	robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
88 	switch (robj->tobj.mem.mem_type) {
89 	case TTM_PL_VRAM:
90 		robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
91 		break;
92 	case TTM_PL_TT:
93 		robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
94 		break;
95 	default:
96 		DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
97 		robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
98 		return;
99 	}
100 }
101 
102 static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
103 {
104 	uint32_t flags = 0;
105 	if (domain & RADEON_GEM_DOMAIN_VRAM) {
106 		flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
107 	}
108 	if (domain & RADEON_GEM_DOMAIN_GTT) {
109 		flags |= TTM_PL_FLAG_TT | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
110 	}
111 	if (domain & RADEON_GEM_DOMAIN_CPU) {
112 		flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
113 	}
114 	if (!flags) {
115 		flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
116 	}
117 	return flags;
118 }
119 
120 int radeon_object_create(struct radeon_device *rdev,
121 			 struct drm_gem_object *gobj,
122 			 unsigned long size,
123 			 bool kernel,
124 			 uint32_t domain,
125 			 bool interruptible,
126 			 struct radeon_object **robj_ptr)
127 {
128 	struct radeon_object *robj;
129 	enum ttm_bo_type type;
130 	uint32_t flags;
131 	int r;
132 
133 	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
134 		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
135 	}
136 	if (kernel) {
137 		type = ttm_bo_type_kernel;
138 	} else {
139 		type = ttm_bo_type_device;
140 	}
141 	*robj_ptr = NULL;
142 	robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
143 	if (robj == NULL) {
144 		return -ENOMEM;
145 	}
146 	robj->rdev = rdev;
147 	robj->gobj = gobj;
148 	robj->surface_reg = -1;
149 	INIT_LIST_HEAD(&robj->list);
150 
151 	flags = radeon_object_flags_from_domain(domain);
152 	r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
153 				   0, 0, false, NULL, size,
154 				   &radeon_ttm_object_object_destroy);
155 	if (unlikely(r != 0)) {
156 		/* ttm call radeon_ttm_object_object_destroy if error happen */
157 		DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
158 			  size, flags, 0);
159 		return r;
160 	}
161 	*robj_ptr = robj;
162 	if (gobj) {
163 		list_add_tail(&robj->list, &rdev->gem.objects);
164 	}
165 	return 0;
166 }
167 
168 int radeon_object_kmap(struct radeon_object *robj, void **ptr)
169 {
170 	int r;
171 
172 	spin_lock(&robj->tobj.lock);
173 	if (robj->kptr) {
174 		if (ptr) {
175 			*ptr = robj->kptr;
176 		}
177 		spin_unlock(&robj->tobj.lock);
178 		return 0;
179 	}
180 	spin_unlock(&robj->tobj.lock);
181 	r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
182 	if (r) {
183 		return r;
184 	}
185 	spin_lock(&robj->tobj.lock);
186 	robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
187 	spin_unlock(&robj->tobj.lock);
188 	if (ptr) {
189 		*ptr = robj->kptr;
190 	}
191 	return 0;
192 }
193 
194 void radeon_object_kunmap(struct radeon_object *robj)
195 {
196 	spin_lock(&robj->tobj.lock);
197 	if (robj->kptr == NULL) {
198 		spin_unlock(&robj->tobj.lock);
199 		return;
200 	}
201 	robj->kptr = NULL;
202 	spin_unlock(&robj->tobj.lock);
203 	ttm_bo_kunmap(&robj->kmap);
204 }
205 
206 void radeon_object_unref(struct radeon_object **robj)
207 {
208 	struct ttm_buffer_object *tobj;
209 
210 	if ((*robj) == NULL) {
211 		return;
212 	}
213 	tobj = &((*robj)->tobj);
214 	ttm_bo_unref(&tobj);
215 	if (tobj == NULL) {
216 		*robj = NULL;
217 	}
218 }
219 
220 int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
221 {
222 	*offset = robj->tobj.addr_space_offset;
223 	return 0;
224 }
225 
226 int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
227 		      uint64_t *gpu_addr)
228 {
229 	uint32_t flags;
230 	uint32_t tmp;
231 	int r;
232 
233 	flags = radeon_object_flags_from_domain(domain);
234 	spin_lock(&robj->tobj.lock);
235 	if (robj->pin_count) {
236 		robj->pin_count++;
237 		if (gpu_addr != NULL) {
238 			*gpu_addr = robj->gpu_addr;
239 		}
240 		spin_unlock(&robj->tobj.lock);
241 		return 0;
242 	}
243 	spin_unlock(&robj->tobj.lock);
244 	r = radeon_object_reserve(robj, false);
245 	if (unlikely(r != 0)) {
246 		DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
247 		return r;
248 	}
249 	tmp = robj->tobj.mem.placement;
250 	ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
251 	robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
252 	r = ttm_buffer_object_validate(&robj->tobj,
253 				       robj->tobj.proposed_placement,
254 				       false, false);
255 	radeon_object_gpu_addr(robj);
256 	if (gpu_addr != NULL) {
257 		*gpu_addr = robj->gpu_addr;
258 	}
259 	robj->pin_count = 1;
260 	if (unlikely(r != 0)) {
261 		DRM_ERROR("radeon: failed to pin object.\n");
262 	}
263 	radeon_object_unreserve(robj);
264 	return r;
265 }
266 
267 void radeon_object_unpin(struct radeon_object *robj)
268 {
269 	uint32_t flags;
270 	int r;
271 
272 	spin_lock(&robj->tobj.lock);
273 	if (!robj->pin_count) {
274 		spin_unlock(&robj->tobj.lock);
275 		printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
276 		return;
277 	}
278 	robj->pin_count--;
279 	if (robj->pin_count) {
280 		spin_unlock(&robj->tobj.lock);
281 		return;
282 	}
283 	spin_unlock(&robj->tobj.lock);
284 	r = radeon_object_reserve(robj, false);
285 	if (unlikely(r != 0)) {
286 		DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
287 		return;
288 	}
289 	flags = robj->tobj.mem.placement;
290 	robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
291 	r = ttm_buffer_object_validate(&robj->tobj,
292 				       robj->tobj.proposed_placement,
293 				       false, false);
294 	if (unlikely(r != 0)) {
295 		DRM_ERROR("radeon: failed to unpin buffer.\n");
296 	}
297 	radeon_object_unreserve(robj);
298 }
299 
300 int radeon_object_wait(struct radeon_object *robj)
301 {
302 	int r = 0;
303 
304 	/* FIXME: should use block reservation instead */
305 	r = radeon_object_reserve(robj, true);
306 	if (unlikely(r != 0)) {
307 		DRM_ERROR("radeon: failed to reserve object for waiting.\n");
308 		return r;
309 	}
310 	spin_lock(&robj->tobj.lock);
311 	if (robj->tobj.sync_obj) {
312 		r = ttm_bo_wait(&robj->tobj, true, true, false);
313 	}
314 	spin_unlock(&robj->tobj.lock);
315 	radeon_object_unreserve(robj);
316 	return r;
317 }
318 
319 int radeon_object_evict_vram(struct radeon_device *rdev)
320 {
321 	if (rdev->flags & RADEON_IS_IGP) {
322 		/* Useless to evict on IGP chips */
323 		return 0;
324 	}
325 	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
326 }
327 
328 void radeon_object_force_delete(struct radeon_device *rdev)
329 {
330 	struct radeon_object *robj, *n;
331 	struct drm_gem_object *gobj;
332 
333 	if (list_empty(&rdev->gem.objects)) {
334 		return;
335 	}
336 	DRM_ERROR("Userspace still has active objects !\n");
337 	list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
338 		mutex_lock(&rdev->ddev->struct_mutex);
339 		gobj = robj->gobj;
340 		DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
341 			  gobj, robj, (unsigned long)gobj->size,
342 			  *((unsigned long *)&gobj->refcount));
343 		list_del_init(&robj->list);
344 		radeon_object_unref(&robj);
345 		gobj->driver_private = NULL;
346 		drm_gem_object_unreference(gobj);
347 		mutex_unlock(&rdev->ddev->struct_mutex);
348 	}
349 }
350 
351 int radeon_object_init(struct radeon_device *rdev)
352 {
353 	return radeon_ttm_init(rdev);
354 }
355 
356 void radeon_object_fini(struct radeon_device *rdev)
357 {
358 	radeon_ttm_fini(rdev);
359 }
360 
361 void radeon_object_list_add_object(struct radeon_object_list *lobj,
362 				   struct list_head *head)
363 {
364 	if (lobj->wdomain) {
365 		list_add(&lobj->list, head);
366 	} else {
367 		list_add_tail(&lobj->list, head);
368 	}
369 }
370 
371 int radeon_object_list_reserve(struct list_head *head)
372 {
373 	struct radeon_object_list *lobj;
374 	struct list_head *i;
375 	int r;
376 
377 	list_for_each(i, head) {
378 		lobj = list_entry(i, struct radeon_object_list, list);
379 		if (!lobj->robj->pin_count) {
380 			r = radeon_object_reserve(lobj->robj, true);
381 			if (unlikely(r != 0)) {
382 				DRM_ERROR("radeon: failed to reserve object.\n");
383 				return r;
384 			}
385 		} else {
386 		}
387 	}
388 	return 0;
389 }
390 
391 void radeon_object_list_unreserve(struct list_head *head)
392 {
393 	struct radeon_object_list *lobj;
394 	struct list_head *i;
395 
396 	list_for_each(i, head) {
397 		lobj = list_entry(i, struct radeon_object_list, list);
398 		if (!lobj->robj->pin_count) {
399 			radeon_object_unreserve(lobj->robj);
400 		} else {
401 		}
402 	}
403 }
404 
405 int radeon_object_list_validate(struct list_head *head, void *fence)
406 {
407 	struct radeon_object_list *lobj;
408 	struct radeon_object *robj;
409 	struct radeon_fence *old_fence = NULL;
410 	struct list_head *i;
411 	int r;
412 
413 	r = radeon_object_list_reserve(head);
414 	if (unlikely(r != 0)) {
415 		radeon_object_list_unreserve(head);
416 		return r;
417 	}
418 	list_for_each(i, head) {
419 		lobj = list_entry(i, struct radeon_object_list, list);
420 		robj = lobj->robj;
421 		if (!robj->pin_count) {
422 			if (lobj->wdomain) {
423 				robj->tobj.proposed_placement =
424 					radeon_object_flags_from_domain(lobj->wdomain);
425 			} else {
426 				robj->tobj.proposed_placement =
427 					radeon_object_flags_from_domain(lobj->rdomain);
428 			}
429 			r = ttm_buffer_object_validate(&robj->tobj,
430 						       robj->tobj.proposed_placement,
431 						       true, false);
432 			if (unlikely(r)) {
433 				DRM_ERROR("radeon: failed to validate.\n");
434 				return r;
435 			}
436 			radeon_object_gpu_addr(robj);
437 		}
438 		lobj->gpu_offset = robj->gpu_addr;
439 		lobj->tiling_flags = robj->tiling_flags;
440 		if (fence) {
441 			old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
442 			robj->tobj.sync_obj = radeon_fence_ref(fence);
443 			robj->tobj.sync_obj_arg = NULL;
444 		}
445 		if (old_fence) {
446 			radeon_fence_unref(&old_fence);
447 		}
448 	}
449 	return 0;
450 }
451 
452 void radeon_object_list_unvalidate(struct list_head *head)
453 {
454 	struct radeon_object_list *lobj;
455 	struct radeon_fence *old_fence = NULL;
456 	struct list_head *i;
457 
458 	list_for_each(i, head) {
459 		lobj = list_entry(i, struct radeon_object_list, list);
460 		old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
461 		lobj->robj->tobj.sync_obj = NULL;
462 		if (old_fence) {
463 			radeon_fence_unref(&old_fence);
464 		}
465 	}
466 	radeon_object_list_unreserve(head);
467 }
468 
469 void radeon_object_list_clean(struct list_head *head)
470 {
471 	radeon_object_list_unreserve(head);
472 }
473 
474 int radeon_object_fbdev_mmap(struct radeon_object *robj,
475 			     struct vm_area_struct *vma)
476 {
477 	return ttm_fbdev_mmap(vma, &robj->tobj);
478 }
479 
480 unsigned long radeon_object_size(struct radeon_object *robj)
481 {
482 	return robj->tobj.num_pages << PAGE_SHIFT;
483 }
484 
485 int radeon_object_get_surface_reg(struct radeon_object *robj)
486 {
487 	struct radeon_device *rdev = robj->rdev;
488 	struct radeon_surface_reg *reg;
489 	struct radeon_object *old_object;
490 	int steal;
491 	int i;
492 
493 	if (!robj->tiling_flags)
494 		return 0;
495 
496 	if (robj->surface_reg >= 0) {
497 		reg = &rdev->surface_regs[robj->surface_reg];
498 		i = robj->surface_reg;
499 		goto out;
500 	}
501 
502 	steal = -1;
503 	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
504 
505 		reg = &rdev->surface_regs[i];
506 		if (!reg->robj)
507 			break;
508 
509 		old_object = reg->robj;
510 		if (old_object->pin_count == 0)
511 			steal = i;
512 	}
513 
514 	/* if we are all out */
515 	if (i == RADEON_GEM_MAX_SURFACES) {
516 		if (steal == -1)
517 			return -ENOMEM;
518 		/* find someone with a surface reg and nuke their BO */
519 		reg = &rdev->surface_regs[steal];
520 		old_object = reg->robj;
521 		/* blow away the mapping */
522 		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
523 		ttm_bo_unmap_virtual(&old_object->tobj);
524 		old_object->surface_reg = -1;
525 		i = steal;
526 	}
527 
528 	robj->surface_reg = i;
529 	reg->robj = robj;
530 
531 out:
532 	radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
533 			       robj->tobj.mem.mm_node->start << PAGE_SHIFT,
534 			       robj->tobj.num_pages << PAGE_SHIFT);
535 	return 0;
536 }
537 
538 void radeon_object_clear_surface_reg(struct radeon_object *robj)
539 {
540 	struct radeon_device *rdev = robj->rdev;
541 	struct radeon_surface_reg *reg;
542 
543 	if (robj->surface_reg == -1)
544 		return;
545 
546 	reg = &rdev->surface_regs[robj->surface_reg];
547 	radeon_clear_surface_reg(rdev, robj->surface_reg);
548 
549 	reg->robj = NULL;
550 	robj->surface_reg = -1;
551 }
552 
553 void radeon_object_set_tiling_flags(struct radeon_object *robj,
554 				    uint32_t tiling_flags, uint32_t pitch)
555 {
556 	robj->tiling_flags = tiling_flags;
557 	robj->pitch = pitch;
558 }
559 
560 void radeon_object_get_tiling_flags(struct radeon_object *robj,
561 				    uint32_t *tiling_flags,
562 				    uint32_t *pitch)
563 {
564 	if (tiling_flags)
565 		*tiling_flags = robj->tiling_flags;
566 	if (pitch)
567 		*pitch = robj->pitch;
568 }
569 
570 int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
571 			       bool force_drop)
572 {
573 	if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
574 		return 0;
575 
576 	if (force_drop) {
577 		radeon_object_clear_surface_reg(robj);
578 		return 0;
579 	}
580 
581 	if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
582 		if (!has_moved)
583 			return 0;
584 
585 		if (robj->surface_reg >= 0)
586 			radeon_object_clear_surface_reg(robj);
587 		return 0;
588 	}
589 
590 	if ((robj->surface_reg >= 0) && !has_moved)
591 		return 0;
592 
593 	return radeon_object_get_surface_reg(robj);
594 }
595 
596 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
597 			  struct ttm_mem_reg *mem)
598 {
599 	struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
600 	radeon_object_check_tiling(robj, 0, 1);
601 }
602 
603 void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
604 {
605 	struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
606 	radeon_object_check_tiling(robj, 0, 0);
607 }
608