xref: /linux/drivers/gpu/drm/radeon/radeon_object.c (revision b3b77c8caef1750ebeea1054e39e358550ea9f55)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include "radeon_drm.h"
36 #include "radeon.h"
37 
38 
39 int radeon_ttm_init(struct radeon_device *rdev);
40 void radeon_ttm_fini(struct radeon_device *rdev);
41 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
42 
43 /*
44  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
45  * function are calling it.
46  */
47 
48 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
49 {
50 	struct radeon_bo *bo;
51 
52 	bo = container_of(tbo, struct radeon_bo, tbo);
53 	mutex_lock(&bo->rdev->gem.mutex);
54 	list_del_init(&bo->list);
55 	mutex_unlock(&bo->rdev->gem.mutex);
56 	radeon_bo_clear_surface_reg(bo);
57 	kfree(bo);
58 }
59 
60 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
61 {
62 	if (bo->destroy == &radeon_ttm_bo_destroy)
63 		return true;
64 	return false;
65 }
66 
67 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
68 {
69 	u32 c = 0;
70 
71 	rbo->placement.fpfn = 0;
72 	rbo->placement.lpfn = 0;
73 	rbo->placement.placement = rbo->placements;
74 	rbo->placement.busy_placement = rbo->placements;
75 	if (domain & RADEON_GEM_DOMAIN_VRAM)
76 		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
77 					TTM_PL_FLAG_VRAM;
78 	if (domain & RADEON_GEM_DOMAIN_GTT)
79 		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
80 	if (domain & RADEON_GEM_DOMAIN_CPU)
81 		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
82 	if (!c)
83 		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
84 	rbo->placement.num_placement = c;
85 	rbo->placement.num_busy_placement = c;
86 }
87 
88 int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
89 			unsigned long size, bool kernel, u32 domain,
90 			struct radeon_bo **bo_ptr)
91 {
92 	struct radeon_bo *bo;
93 	enum ttm_bo_type type;
94 	int r;
95 
96 	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
97 		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
98 	}
99 	if (kernel) {
100 		type = ttm_bo_type_kernel;
101 	} else {
102 		type = ttm_bo_type_device;
103 	}
104 	*bo_ptr = NULL;
105 	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
106 	if (bo == NULL)
107 		return -ENOMEM;
108 	bo->rdev = rdev;
109 	bo->gobj = gobj;
110 	bo->surface_reg = -1;
111 	INIT_LIST_HEAD(&bo->list);
112 
113 	radeon_ttm_placement_from_domain(bo, domain);
114 	/* Kernel allocation are uninterruptible */
115 	mutex_lock(&rdev->vram_mutex);
116 	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
117 			&bo->placement, 0, 0, !kernel, NULL, size,
118 			&radeon_ttm_bo_destroy);
119 	mutex_unlock(&rdev->vram_mutex);
120 	if (unlikely(r != 0)) {
121 		if (r != -ERESTARTSYS)
122 			dev_err(rdev->dev,
123 				"object_init failed for (%lu, 0x%08X)\n",
124 				size, domain);
125 		return r;
126 	}
127 	*bo_ptr = bo;
128 	if (gobj) {
129 		mutex_lock(&bo->rdev->gem.mutex);
130 		list_add_tail(&bo->list, &rdev->gem.objects);
131 		mutex_unlock(&bo->rdev->gem.mutex);
132 	}
133 	return 0;
134 }
135 
136 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
137 {
138 	bool is_iomem;
139 	int r;
140 
141 	if (bo->kptr) {
142 		if (ptr) {
143 			*ptr = bo->kptr;
144 		}
145 		return 0;
146 	}
147 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
148 	if (r) {
149 		return r;
150 	}
151 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
152 	if (ptr) {
153 		*ptr = bo->kptr;
154 	}
155 	radeon_bo_check_tiling(bo, 0, 0);
156 	return 0;
157 }
158 
159 void radeon_bo_kunmap(struct radeon_bo *bo)
160 {
161 	if (bo->kptr == NULL)
162 		return;
163 	bo->kptr = NULL;
164 	radeon_bo_check_tiling(bo, 0, 0);
165 	ttm_bo_kunmap(&bo->kmap);
166 }
167 
168 void radeon_bo_unref(struct radeon_bo **bo)
169 {
170 	struct ttm_buffer_object *tbo;
171 	struct radeon_device *rdev;
172 
173 	if ((*bo) == NULL)
174 		return;
175 	rdev = (*bo)->rdev;
176 	tbo = &((*bo)->tbo);
177 	mutex_lock(&rdev->vram_mutex);
178 	ttm_bo_unref(&tbo);
179 	mutex_unlock(&rdev->vram_mutex);
180 	if (tbo == NULL)
181 		*bo = NULL;
182 }
183 
184 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
185 {
186 	int r, i;
187 
188 	if (bo->pin_count) {
189 		bo->pin_count++;
190 		if (gpu_addr)
191 			*gpu_addr = radeon_bo_gpu_offset(bo);
192 		return 0;
193 	}
194 	radeon_ttm_placement_from_domain(bo, domain);
195 	if (domain == RADEON_GEM_DOMAIN_VRAM) {
196 		/* force to pin into visible video ram */
197 		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
198 	}
199 	for (i = 0; i < bo->placement.num_placement; i++)
200 		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
201 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
202 	if (likely(r == 0)) {
203 		bo->pin_count = 1;
204 		if (gpu_addr != NULL)
205 			*gpu_addr = radeon_bo_gpu_offset(bo);
206 	}
207 	if (unlikely(r != 0))
208 		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
209 	return r;
210 }
211 
212 int radeon_bo_unpin(struct radeon_bo *bo)
213 {
214 	int r, i;
215 
216 	if (!bo->pin_count) {
217 		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
218 		return 0;
219 	}
220 	bo->pin_count--;
221 	if (bo->pin_count)
222 		return 0;
223 	for (i = 0; i < bo->placement.num_placement; i++)
224 		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
225 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
226 	if (unlikely(r != 0))
227 		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
228 	return r;
229 }
230 
231 int radeon_bo_evict_vram(struct radeon_device *rdev)
232 {
233 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
234 	if (0 && (rdev->flags & RADEON_IS_IGP)) {
235 		if (rdev->mc.igp_sideport_enabled == false)
236 			/* Useless to evict on IGP chips */
237 			return 0;
238 	}
239 	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
240 }
241 
242 void radeon_bo_force_delete(struct radeon_device *rdev)
243 {
244 	struct radeon_bo *bo, *n;
245 	struct drm_gem_object *gobj;
246 
247 	if (list_empty(&rdev->gem.objects)) {
248 		return;
249 	}
250 	dev_err(rdev->dev, "Userspace still has active objects !\n");
251 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
252 		mutex_lock(&rdev->ddev->struct_mutex);
253 		gobj = bo->gobj;
254 		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
255 			gobj, bo, (unsigned long)gobj->size,
256 			*((unsigned long *)&gobj->refcount));
257 		mutex_lock(&bo->rdev->gem.mutex);
258 		list_del_init(&bo->list);
259 		mutex_unlock(&bo->rdev->gem.mutex);
260 		radeon_bo_unref(&bo);
261 		gobj->driver_private = NULL;
262 		drm_gem_object_unreference(gobj);
263 		mutex_unlock(&rdev->ddev->struct_mutex);
264 	}
265 }
266 
267 int radeon_bo_init(struct radeon_device *rdev)
268 {
269 	/* Add an MTRR for the VRAM */
270 	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
271 			MTRR_TYPE_WRCOMB, 1);
272 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
273 		rdev->mc.mc_vram_size >> 20,
274 		(unsigned long long)rdev->mc.aper_size >> 20);
275 	DRM_INFO("RAM width %dbits %cDR\n",
276 			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
277 	return radeon_ttm_init(rdev);
278 }
279 
280 void radeon_bo_fini(struct radeon_device *rdev)
281 {
282 	radeon_ttm_fini(rdev);
283 }
284 
285 void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
286 				struct list_head *head)
287 {
288 	if (lobj->wdomain) {
289 		list_add(&lobj->list, head);
290 	} else {
291 		list_add_tail(&lobj->list, head);
292 	}
293 }
294 
295 int radeon_bo_list_reserve(struct list_head *head)
296 {
297 	struct radeon_bo_list *lobj;
298 	int r;
299 
300 	list_for_each_entry(lobj, head, list){
301 		r = radeon_bo_reserve(lobj->bo, false);
302 		if (unlikely(r != 0))
303 			return r;
304 		lobj->reserved = true;
305 	}
306 	return 0;
307 }
308 
309 void radeon_bo_list_unreserve(struct list_head *head)
310 {
311 	struct radeon_bo_list *lobj;
312 
313 	list_for_each_entry(lobj, head, list) {
314 		/* only unreserve object we successfully reserved */
315 		if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
316 			radeon_bo_unreserve(lobj->bo);
317 	}
318 }
319 
320 int radeon_bo_list_validate(struct list_head *head)
321 {
322 	struct radeon_bo_list *lobj;
323 	struct radeon_bo *bo;
324 	int r;
325 
326 	list_for_each_entry(lobj, head, list) {
327 		lobj->reserved = false;
328 	}
329 	r = radeon_bo_list_reserve(head);
330 	if (unlikely(r != 0)) {
331 		return r;
332 	}
333 	list_for_each_entry(lobj, head, list) {
334 		bo = lobj->bo;
335 		if (!bo->pin_count) {
336 			if (lobj->wdomain) {
337 				radeon_ttm_placement_from_domain(bo,
338 								lobj->wdomain);
339 			} else {
340 				radeon_ttm_placement_from_domain(bo,
341 								lobj->rdomain);
342 			}
343 			r = ttm_bo_validate(&bo->tbo, &bo->placement,
344 						true, false, false);
345 			if (unlikely(r))
346 				return r;
347 		}
348 		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
349 		lobj->tiling_flags = bo->tiling_flags;
350 	}
351 	return 0;
352 }
353 
354 void radeon_bo_list_fence(struct list_head *head, void *fence)
355 {
356 	struct radeon_bo_list *lobj;
357 	struct radeon_bo *bo;
358 	struct radeon_fence *old_fence = NULL;
359 
360 	list_for_each_entry(lobj, head, list) {
361 		bo = lobj->bo;
362 		spin_lock(&bo->tbo.lock);
363 		old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
364 		bo->tbo.sync_obj = radeon_fence_ref(fence);
365 		bo->tbo.sync_obj_arg = NULL;
366 		spin_unlock(&bo->tbo.lock);
367 		if (old_fence) {
368 			radeon_fence_unref(&old_fence);
369 		}
370 	}
371 }
372 
373 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
374 			     struct vm_area_struct *vma)
375 {
376 	return ttm_fbdev_mmap(vma, &bo->tbo);
377 }
378 
379 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
380 {
381 	struct radeon_device *rdev = bo->rdev;
382 	struct radeon_surface_reg *reg;
383 	struct radeon_bo *old_object;
384 	int steal;
385 	int i;
386 
387 	BUG_ON(!atomic_read(&bo->tbo.reserved));
388 
389 	if (!bo->tiling_flags)
390 		return 0;
391 
392 	if (bo->surface_reg >= 0) {
393 		reg = &rdev->surface_regs[bo->surface_reg];
394 		i = bo->surface_reg;
395 		goto out;
396 	}
397 
398 	steal = -1;
399 	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
400 
401 		reg = &rdev->surface_regs[i];
402 		if (!reg->bo)
403 			break;
404 
405 		old_object = reg->bo;
406 		if (old_object->pin_count == 0)
407 			steal = i;
408 	}
409 
410 	/* if we are all out */
411 	if (i == RADEON_GEM_MAX_SURFACES) {
412 		if (steal == -1)
413 			return -ENOMEM;
414 		/* find someone with a surface reg and nuke their BO */
415 		reg = &rdev->surface_regs[steal];
416 		old_object = reg->bo;
417 		/* blow away the mapping */
418 		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
419 		ttm_bo_unmap_virtual(&old_object->tbo);
420 		old_object->surface_reg = -1;
421 		i = steal;
422 	}
423 
424 	bo->surface_reg = i;
425 	reg->bo = bo;
426 
427 out:
428 	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
429 			       bo->tbo.mem.mm_node->start << PAGE_SHIFT,
430 			       bo->tbo.num_pages << PAGE_SHIFT);
431 	return 0;
432 }
433 
434 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
435 {
436 	struct radeon_device *rdev = bo->rdev;
437 	struct radeon_surface_reg *reg;
438 
439 	if (bo->surface_reg == -1)
440 		return;
441 
442 	reg = &rdev->surface_regs[bo->surface_reg];
443 	radeon_clear_surface_reg(rdev, bo->surface_reg);
444 
445 	reg->bo = NULL;
446 	bo->surface_reg = -1;
447 }
448 
449 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
450 				uint32_t tiling_flags, uint32_t pitch)
451 {
452 	int r;
453 
454 	r = radeon_bo_reserve(bo, false);
455 	if (unlikely(r != 0))
456 		return r;
457 	bo->tiling_flags = tiling_flags;
458 	bo->pitch = pitch;
459 	radeon_bo_unreserve(bo);
460 	return 0;
461 }
462 
463 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
464 				uint32_t *tiling_flags,
465 				uint32_t *pitch)
466 {
467 	BUG_ON(!atomic_read(&bo->tbo.reserved));
468 	if (tiling_flags)
469 		*tiling_flags = bo->tiling_flags;
470 	if (pitch)
471 		*pitch = bo->pitch;
472 }
473 
474 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
475 				bool force_drop)
476 {
477 	BUG_ON(!atomic_read(&bo->tbo.reserved));
478 
479 	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
480 		return 0;
481 
482 	if (force_drop) {
483 		radeon_bo_clear_surface_reg(bo);
484 		return 0;
485 	}
486 
487 	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
488 		if (!has_moved)
489 			return 0;
490 
491 		if (bo->surface_reg >= 0)
492 			radeon_bo_clear_surface_reg(bo);
493 		return 0;
494 	}
495 
496 	if ((bo->surface_reg >= 0) && !has_moved)
497 		return 0;
498 
499 	return radeon_bo_get_surface_reg(bo);
500 }
501 
502 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
503 			   struct ttm_mem_reg *mem)
504 {
505 	struct radeon_bo *rbo;
506 	if (!radeon_ttm_bo_is_radeon_bo(bo))
507 		return;
508 	rbo = container_of(bo, struct radeon_bo, tbo);
509 	radeon_bo_check_tiling(rbo, 0, 1);
510 }
511 
512 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
513 {
514 	struct radeon_device *rdev;
515 	struct radeon_bo *rbo;
516 	unsigned long offset, size;
517 	int r;
518 
519 	if (!radeon_ttm_bo_is_radeon_bo(bo))
520 		return 0;
521 	rbo = container_of(bo, struct radeon_bo, tbo);
522 	radeon_bo_check_tiling(rbo, 0, 0);
523 	rdev = rbo->rdev;
524 	if (bo->mem.mem_type == TTM_PL_VRAM) {
525 		size = bo->mem.num_pages << PAGE_SHIFT;
526 		offset = bo->mem.mm_node->start << PAGE_SHIFT;
527 		if ((offset + size) > rdev->mc.visible_vram_size) {
528 			/* hurrah the memory is not visible ! */
529 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
530 			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
531 			r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
532 			if (unlikely(r != 0))
533 				return r;
534 			offset = bo->mem.mm_node->start << PAGE_SHIFT;
535 			/* this should not happen */
536 			if ((offset + size) > rdev->mc.visible_vram_size)
537 				return -EINVAL;
538 		}
539 	}
540 	return 0;
541 }
542