xref: /linux/drivers/gpu/drm/radeon/radeon_object.c (revision a1c3be890440a1769ed6f822376a3e3ab0d42994)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 
33 #include <linux/io.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 
37 #include <drm/drm_cache.h>
38 #include <drm/drm_prime.h>
39 #include <drm/radeon_drm.h>
40 
41 #include "radeon.h"
42 #include "radeon_trace.h"
43 #include "radeon_ttm.h"
44 
45 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
46 
47 /*
48  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
49  * function are calling it.
50  */
51 
52 static void radeon_update_memory_usage(struct radeon_bo *bo,
53 				       unsigned mem_type, int sign)
54 {
55 	struct radeon_device *rdev = bo->rdev;
56 
57 	switch (mem_type) {
58 	case TTM_PL_TT:
59 		if (sign > 0)
60 			atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
61 		else
62 			atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
63 		break;
64 	case TTM_PL_VRAM:
65 		if (sign > 0)
66 			atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
67 		else
68 			atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
69 		break;
70 	}
71 }
72 
73 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
74 {
75 	struct radeon_bo *bo;
76 
77 	bo = container_of(tbo, struct radeon_bo, tbo);
78 
79 	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
80 
81 	mutex_lock(&bo->rdev->gem.mutex);
82 	list_del_init(&bo->list);
83 	mutex_unlock(&bo->rdev->gem.mutex);
84 	radeon_bo_clear_surface_reg(bo);
85 	WARN_ON_ONCE(!list_empty(&bo->va));
86 	if (bo->tbo.base.import_attach)
87 		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
88 	drm_gem_object_release(&bo->tbo.base);
89 	kfree(bo);
90 }
91 
92 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
93 {
94 	if (bo->destroy == &radeon_ttm_bo_destroy)
95 		return true;
96 	return false;
97 }
98 
99 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
100 {
101 	u32 c = 0, i;
102 
103 	rbo->placement.placement = rbo->placements;
104 	rbo->placement.busy_placement = rbo->placements;
105 	if (domain & RADEON_GEM_DOMAIN_VRAM) {
106 		/* Try placing BOs which don't need CPU access outside of the
107 		 * CPU accessible part of VRAM
108 		 */
109 		if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
110 		    rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
111 			rbo->placements[c].fpfn =
112 				rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
113 			rbo->placements[c].mem_type = TTM_PL_VRAM;
114 			rbo->placements[c++].flags = 0;
115 		}
116 
117 		rbo->placements[c].fpfn = 0;
118 		rbo->placements[c].mem_type = TTM_PL_VRAM;
119 		rbo->placements[c++].flags = 0;
120 	}
121 
122 	if (domain & RADEON_GEM_DOMAIN_GTT) {
123 		rbo->placements[c].fpfn = 0;
124 		rbo->placements[c].mem_type = TTM_PL_TT;
125 		rbo->placements[c++].flags = 0;
126 	}
127 
128 	if (domain & RADEON_GEM_DOMAIN_CPU) {
129 		rbo->placements[c].fpfn = 0;
130 		rbo->placements[c].mem_type = TTM_PL_SYSTEM;
131 		rbo->placements[c++].flags = 0;
132 	}
133 	if (!c) {
134 		rbo->placements[c].fpfn = 0;
135 		rbo->placements[c].mem_type = TTM_PL_SYSTEM;
136 		rbo->placements[c++].flags = 0;
137 	}
138 
139 	rbo->placement.num_placement = c;
140 	rbo->placement.num_busy_placement = c;
141 
142 	for (i = 0; i < c; ++i) {
143 		if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
144 		    (rbo->placements[i].mem_type == TTM_PL_VRAM) &&
145 		    !rbo->placements[i].fpfn)
146 			rbo->placements[i].lpfn =
147 				rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
148 		else
149 			rbo->placements[i].lpfn = 0;
150 	}
151 }
152 
153 int radeon_bo_create(struct radeon_device *rdev,
154 		     unsigned long size, int byte_align, bool kernel,
155 		     u32 domain, u32 flags, struct sg_table *sg,
156 		     struct dma_resv *resv,
157 		     struct radeon_bo **bo_ptr)
158 {
159 	struct radeon_bo *bo;
160 	enum ttm_bo_type type;
161 	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
162 	int r;
163 
164 	size = ALIGN(size, PAGE_SIZE);
165 
166 	if (kernel) {
167 		type = ttm_bo_type_kernel;
168 	} else if (sg) {
169 		type = ttm_bo_type_sg;
170 	} else {
171 		type = ttm_bo_type_device;
172 	}
173 	*bo_ptr = NULL;
174 
175 	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
176 	if (bo == NULL)
177 		return -ENOMEM;
178 	drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
179 	bo->rdev = rdev;
180 	bo->surface_reg = -1;
181 	INIT_LIST_HEAD(&bo->list);
182 	INIT_LIST_HEAD(&bo->va);
183 	bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
184 				       RADEON_GEM_DOMAIN_GTT |
185 				       RADEON_GEM_DOMAIN_CPU);
186 
187 	bo->flags = flags;
188 	/* PCI GART is always snooped */
189 	if (!(rdev->flags & RADEON_IS_PCIE))
190 		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
191 
192 	/* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
193 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
194 	 */
195 	if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
196 		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
197 
198 #ifdef CONFIG_X86_32
199 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
200 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
201 	 */
202 	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
203 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
204 	/* Don't try to enable write-combining when it can't work, or things
205 	 * may be slow
206 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
207 	 */
208 #ifndef CONFIG_COMPILE_TEST
209 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
210 	 thanks to write-combining
211 #endif
212 
213 	if (bo->flags & RADEON_GEM_GTT_WC)
214 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
215 			      "better performance thanks to write-combining\n");
216 	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
217 #else
218 	/* For architectures that don't support WC memory,
219 	 * mask out the WC flag from the BO
220 	 */
221 	if (!drm_arch_can_wc_memory())
222 		bo->flags &= ~RADEON_GEM_GTT_WC;
223 #endif
224 
225 	radeon_ttm_placement_from_domain(bo, domain);
226 	/* Kernel allocation are uninterruptible */
227 	down_read(&rdev->pm.mclk_lock);
228 	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
229 			&bo->placement, page_align, !kernel, sg, resv,
230 			&radeon_ttm_bo_destroy);
231 	up_read(&rdev->pm.mclk_lock);
232 	if (unlikely(r != 0)) {
233 		return r;
234 	}
235 	*bo_ptr = bo;
236 
237 	trace_radeon_bo_create(bo);
238 
239 	return 0;
240 }
241 
242 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
243 {
244 	bool is_iomem;
245 	int r;
246 
247 	if (bo->kptr) {
248 		if (ptr) {
249 			*ptr = bo->kptr;
250 		}
251 		return 0;
252 	}
253 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
254 	if (r) {
255 		return r;
256 	}
257 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
258 	if (ptr) {
259 		*ptr = bo->kptr;
260 	}
261 	radeon_bo_check_tiling(bo, 0, 0);
262 	return 0;
263 }
264 
265 void radeon_bo_kunmap(struct radeon_bo *bo)
266 {
267 	if (bo->kptr == NULL)
268 		return;
269 	bo->kptr = NULL;
270 	radeon_bo_check_tiling(bo, 0, 0);
271 	ttm_bo_kunmap(&bo->kmap);
272 }
273 
274 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
275 {
276 	if (bo == NULL)
277 		return NULL;
278 
279 	ttm_bo_get(&bo->tbo);
280 	return bo;
281 }
282 
283 void radeon_bo_unref(struct radeon_bo **bo)
284 {
285 	struct ttm_buffer_object *tbo;
286 
287 	if ((*bo) == NULL)
288 		return;
289 	tbo = &((*bo)->tbo);
290 	ttm_bo_put(tbo);
291 	*bo = NULL;
292 }
293 
294 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
295 			     u64 *gpu_addr)
296 {
297 	struct ttm_operation_ctx ctx = { false, false };
298 	int r, i;
299 
300 	if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm))
301 		return -EPERM;
302 
303 	if (bo->tbo.pin_count) {
304 		ttm_bo_pin(&bo->tbo);
305 		if (gpu_addr)
306 			*gpu_addr = radeon_bo_gpu_offset(bo);
307 
308 		if (max_offset != 0) {
309 			u64 domain_start;
310 
311 			if (domain == RADEON_GEM_DOMAIN_VRAM)
312 				domain_start = bo->rdev->mc.vram_start;
313 			else
314 				domain_start = bo->rdev->mc.gtt_start;
315 			WARN_ON_ONCE(max_offset <
316 				     (radeon_bo_gpu_offset(bo) - domain_start));
317 		}
318 
319 		return 0;
320 	}
321 	if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
322 		/* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
323 		return -EINVAL;
324 	}
325 
326 	radeon_ttm_placement_from_domain(bo, domain);
327 	for (i = 0; i < bo->placement.num_placement; i++) {
328 		/* force to pin into visible video ram */
329 		if ((bo->placements[i].mem_type == TTM_PL_VRAM) &&
330 		    !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
331 		    (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
332 			bo->placements[i].lpfn =
333 				bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
334 		else
335 			bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
336 	}
337 
338 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
339 	if (likely(r == 0)) {
340 		ttm_bo_pin(&bo->tbo);
341 		if (gpu_addr != NULL)
342 			*gpu_addr = radeon_bo_gpu_offset(bo);
343 		if (domain == RADEON_GEM_DOMAIN_VRAM)
344 			bo->rdev->vram_pin_size += radeon_bo_size(bo);
345 		else
346 			bo->rdev->gart_pin_size += radeon_bo_size(bo);
347 	} else {
348 		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
349 	}
350 	return r;
351 }
352 
353 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
354 {
355 	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
356 }
357 
358 void radeon_bo_unpin(struct radeon_bo *bo)
359 {
360 	ttm_bo_unpin(&bo->tbo);
361 	if (!bo->tbo.pin_count) {
362 		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
363 			bo->rdev->vram_pin_size -= radeon_bo_size(bo);
364 		else
365 			bo->rdev->gart_pin_size -= radeon_bo_size(bo);
366 	}
367 }
368 
369 int radeon_bo_evict_vram(struct radeon_device *rdev)
370 {
371 	struct ttm_device *bdev = &rdev->mman.bdev;
372 	struct ttm_resource_manager *man;
373 
374 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
375 #ifndef CONFIG_HIBERNATION
376 	if (rdev->flags & RADEON_IS_IGP) {
377 		if (rdev->mc.igp_sideport_enabled == false)
378 			/* Useless to evict on IGP chips */
379 			return 0;
380 	}
381 #endif
382 	man = ttm_manager_type(bdev, TTM_PL_VRAM);
383 	return ttm_resource_manager_evict_all(bdev, man);
384 }
385 
386 void radeon_bo_force_delete(struct radeon_device *rdev)
387 {
388 	struct radeon_bo *bo, *n;
389 
390 	if (list_empty(&rdev->gem.objects)) {
391 		return;
392 	}
393 	dev_err(rdev->dev, "Userspace still has active objects !\n");
394 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
395 		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
396 			&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
397 			*((unsigned long *)&bo->tbo.base.refcount));
398 		mutex_lock(&bo->rdev->gem.mutex);
399 		list_del_init(&bo->list);
400 		mutex_unlock(&bo->rdev->gem.mutex);
401 		/* this should unref the ttm bo */
402 		drm_gem_object_put(&bo->tbo.base);
403 	}
404 }
405 
406 int radeon_bo_init(struct radeon_device *rdev)
407 {
408 	/* reserve PAT memory space to WC for VRAM */
409 	arch_io_reserve_memtype_wc(rdev->mc.aper_base,
410 				   rdev->mc.aper_size);
411 
412 	/* Add an MTRR for the VRAM */
413 	if (!rdev->fastfb_working) {
414 		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
415 						      rdev->mc.aper_size);
416 	}
417 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
418 		rdev->mc.mc_vram_size >> 20,
419 		(unsigned long long)rdev->mc.aper_size >> 20);
420 	DRM_INFO("RAM width %dbits %cDR\n",
421 			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
422 	return radeon_ttm_init(rdev);
423 }
424 
425 void radeon_bo_fini(struct radeon_device *rdev)
426 {
427 	radeon_ttm_fini(rdev);
428 	arch_phys_wc_del(rdev->mc.vram_mtrr);
429 	arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
430 }
431 
432 /* Returns how many bytes TTM can move per IB.
433  */
434 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
435 {
436 	u64 real_vram_size = rdev->mc.real_vram_size;
437 	u64 vram_usage = atomic64_read(&rdev->vram_usage);
438 
439 	/* This function is based on the current VRAM usage.
440 	 *
441 	 * - If all of VRAM is free, allow relocating the number of bytes that
442 	 *   is equal to 1/4 of the size of VRAM for this IB.
443 
444 	 * - If more than one half of VRAM is occupied, only allow relocating
445 	 *   1 MB of data for this IB.
446 	 *
447 	 * - From 0 to one half of used VRAM, the threshold decreases
448 	 *   linearly.
449 	 *         __________________
450 	 * 1/4 of -|\               |
451 	 * VRAM    | \              |
452 	 *         |  \             |
453 	 *         |   \            |
454 	 *         |    \           |
455 	 *         |     \          |
456 	 *         |      \         |
457 	 *         |       \________|1 MB
458 	 *         |----------------|
459 	 *    VRAM 0 %             100 %
460 	 *         used            used
461 	 *
462 	 * Note: It's a threshold, not a limit. The threshold must be crossed
463 	 * for buffer relocations to stop, so any buffer of an arbitrary size
464 	 * can be moved as long as the threshold isn't crossed before
465 	 * the relocation takes place. We don't want to disable buffer
466 	 * relocations completely.
467 	 *
468 	 * The idea is that buffers should be placed in VRAM at creation time
469 	 * and TTM should only do a minimum number of relocations during
470 	 * command submission. In practice, you need to submit at least
471 	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
472 	 *
473 	 * Also, things can get pretty crazy under memory pressure and actual
474 	 * VRAM usage can change a lot, so playing safe even at 50% does
475 	 * consistently increase performance.
476 	 */
477 
478 	u64 half_vram = real_vram_size >> 1;
479 	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
480 	u64 bytes_moved_threshold = half_free_vram >> 1;
481 	return max(bytes_moved_threshold, 1024*1024ull);
482 }
483 
484 int radeon_bo_list_validate(struct radeon_device *rdev,
485 			    struct ww_acquire_ctx *ticket,
486 			    struct list_head *head, int ring)
487 {
488 	struct ttm_operation_ctx ctx = { true, false };
489 	struct radeon_bo_list *lobj;
490 	struct list_head duplicates;
491 	int r;
492 	u64 bytes_moved = 0, initial_bytes_moved;
493 	u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
494 
495 	INIT_LIST_HEAD(&duplicates);
496 	r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
497 	if (unlikely(r != 0)) {
498 		return r;
499 	}
500 
501 	list_for_each_entry(lobj, head, tv.head) {
502 		struct radeon_bo *bo = lobj->robj;
503 		if (!bo->tbo.pin_count) {
504 			u32 domain = lobj->preferred_domains;
505 			u32 allowed = lobj->allowed_domains;
506 			u32 current_domain =
507 				radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
508 
509 			/* Check if this buffer will be moved and don't move it
510 			 * if we have moved too many buffers for this IB already.
511 			 *
512 			 * Note that this allows moving at least one buffer of
513 			 * any size, because it doesn't take the current "bo"
514 			 * into account. We don't want to disallow buffer moves
515 			 * completely.
516 			 */
517 			if ((allowed & current_domain) != 0 &&
518 			    (domain & current_domain) == 0 && /* will be moved */
519 			    bytes_moved > bytes_moved_threshold) {
520 				/* don't move it */
521 				domain = current_domain;
522 			}
523 
524 		retry:
525 			radeon_ttm_placement_from_domain(bo, domain);
526 			if (ring == R600_RING_TYPE_UVD_INDEX)
527 				radeon_uvd_force_into_uvd_segment(bo, allowed);
528 
529 			initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
530 			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
531 			bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
532 				       initial_bytes_moved;
533 
534 			if (unlikely(r)) {
535 				if (r != -ERESTARTSYS &&
536 				    domain != lobj->allowed_domains) {
537 					domain = lobj->allowed_domains;
538 					goto retry;
539 				}
540 				ttm_eu_backoff_reservation(ticket, head);
541 				return r;
542 			}
543 		}
544 		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
545 		lobj->tiling_flags = bo->tiling_flags;
546 	}
547 
548 	list_for_each_entry(lobj, &duplicates, tv.head) {
549 		lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
550 		lobj->tiling_flags = lobj->robj->tiling_flags;
551 	}
552 
553 	return 0;
554 }
555 
556 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
557 {
558 	struct radeon_device *rdev = bo->rdev;
559 	struct radeon_surface_reg *reg;
560 	struct radeon_bo *old_object;
561 	int steal;
562 	int i;
563 
564 	dma_resv_assert_held(bo->tbo.base.resv);
565 
566 	if (!bo->tiling_flags)
567 		return 0;
568 
569 	if (bo->surface_reg >= 0) {
570 		reg = &rdev->surface_regs[bo->surface_reg];
571 		i = bo->surface_reg;
572 		goto out;
573 	}
574 
575 	steal = -1;
576 	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
577 
578 		reg = &rdev->surface_regs[i];
579 		if (!reg->bo)
580 			break;
581 
582 		old_object = reg->bo;
583 		if (old_object->tbo.pin_count == 0)
584 			steal = i;
585 	}
586 
587 	/* if we are all out */
588 	if (i == RADEON_GEM_MAX_SURFACES) {
589 		if (steal == -1)
590 			return -ENOMEM;
591 		/* find someone with a surface reg and nuke their BO */
592 		reg = &rdev->surface_regs[steal];
593 		old_object = reg->bo;
594 		/* blow away the mapping */
595 		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
596 		ttm_bo_unmap_virtual(&old_object->tbo);
597 		old_object->surface_reg = -1;
598 		i = steal;
599 	}
600 
601 	bo->surface_reg = i;
602 	reg->bo = bo;
603 
604 out:
605 	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
606 			       bo->tbo.mem.start << PAGE_SHIFT,
607 			       bo->tbo.base.size);
608 	return 0;
609 }
610 
611 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
612 {
613 	struct radeon_device *rdev = bo->rdev;
614 	struct radeon_surface_reg *reg;
615 
616 	if (bo->surface_reg == -1)
617 		return;
618 
619 	reg = &rdev->surface_regs[bo->surface_reg];
620 	radeon_clear_surface_reg(rdev, bo->surface_reg);
621 
622 	reg->bo = NULL;
623 	bo->surface_reg = -1;
624 }
625 
626 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
627 				uint32_t tiling_flags, uint32_t pitch)
628 {
629 	struct radeon_device *rdev = bo->rdev;
630 	int r;
631 
632 	if (rdev->family >= CHIP_CEDAR) {
633 		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
634 
635 		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
636 		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
637 		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
638 		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
639 		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
640 		switch (bankw) {
641 		case 0:
642 		case 1:
643 		case 2:
644 		case 4:
645 		case 8:
646 			break;
647 		default:
648 			return -EINVAL;
649 		}
650 		switch (bankh) {
651 		case 0:
652 		case 1:
653 		case 2:
654 		case 4:
655 		case 8:
656 			break;
657 		default:
658 			return -EINVAL;
659 		}
660 		switch (mtaspect) {
661 		case 0:
662 		case 1:
663 		case 2:
664 		case 4:
665 		case 8:
666 			break;
667 		default:
668 			return -EINVAL;
669 		}
670 		if (tilesplit > 6) {
671 			return -EINVAL;
672 		}
673 		if (stilesplit > 6) {
674 			return -EINVAL;
675 		}
676 	}
677 	r = radeon_bo_reserve(bo, false);
678 	if (unlikely(r != 0))
679 		return r;
680 	bo->tiling_flags = tiling_flags;
681 	bo->pitch = pitch;
682 	radeon_bo_unreserve(bo);
683 	return 0;
684 }
685 
686 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
687 				uint32_t *tiling_flags,
688 				uint32_t *pitch)
689 {
690 	dma_resv_assert_held(bo->tbo.base.resv);
691 
692 	if (tiling_flags)
693 		*tiling_flags = bo->tiling_flags;
694 	if (pitch)
695 		*pitch = bo->pitch;
696 }
697 
698 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
699 				bool force_drop)
700 {
701 	if (!force_drop)
702 		dma_resv_assert_held(bo->tbo.base.resv);
703 
704 	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
705 		return 0;
706 
707 	if (force_drop) {
708 		radeon_bo_clear_surface_reg(bo);
709 		return 0;
710 	}
711 
712 	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
713 		if (!has_moved)
714 			return 0;
715 
716 		if (bo->surface_reg >= 0)
717 			radeon_bo_clear_surface_reg(bo);
718 		return 0;
719 	}
720 
721 	if ((bo->surface_reg >= 0) && !has_moved)
722 		return 0;
723 
724 	return radeon_bo_get_surface_reg(bo);
725 }
726 
727 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
728 			   bool evict,
729 			   struct ttm_resource *new_mem)
730 {
731 	struct radeon_bo *rbo;
732 
733 	if (!radeon_ttm_bo_is_radeon_bo(bo))
734 		return;
735 
736 	rbo = container_of(bo, struct radeon_bo, tbo);
737 	radeon_bo_check_tiling(rbo, 0, 1);
738 	radeon_vm_bo_invalidate(rbo->rdev, rbo);
739 
740 	/* update statistics */
741 	if (!new_mem)
742 		return;
743 
744 	radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
745 	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
746 }
747 
748 vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
749 {
750 	struct ttm_operation_ctx ctx = { false, false };
751 	struct radeon_device *rdev;
752 	struct radeon_bo *rbo;
753 	unsigned long offset, size, lpfn;
754 	int i, r;
755 
756 	if (!radeon_ttm_bo_is_radeon_bo(bo))
757 		return 0;
758 	rbo = container_of(bo, struct radeon_bo, tbo);
759 	radeon_bo_check_tiling(rbo, 0, 0);
760 	rdev = rbo->rdev;
761 	if (bo->mem.mem_type != TTM_PL_VRAM)
762 		return 0;
763 
764 	size = bo->mem.num_pages << PAGE_SHIFT;
765 	offset = bo->mem.start << PAGE_SHIFT;
766 	if ((offset + size) <= rdev->mc.visible_vram_size)
767 		return 0;
768 
769 	/* Can't move a pinned BO to visible VRAM */
770 	if (rbo->tbo.pin_count > 0)
771 		return VM_FAULT_SIGBUS;
772 
773 	/* hurrah the memory is not visible ! */
774 	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
775 	lpfn =	rdev->mc.visible_vram_size >> PAGE_SHIFT;
776 	for (i = 0; i < rbo->placement.num_placement; i++) {
777 		/* Force into visible VRAM */
778 		if ((rbo->placements[i].mem_type == TTM_PL_VRAM) &&
779 		    (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
780 			rbo->placements[i].lpfn = lpfn;
781 	}
782 	r = ttm_bo_validate(bo, &rbo->placement, &ctx);
783 	if (unlikely(r == -ENOMEM)) {
784 		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
785 		r = ttm_bo_validate(bo, &rbo->placement, &ctx);
786 	} else if (likely(!r)) {
787 		offset = bo->mem.start << PAGE_SHIFT;
788 		/* this should never happen */
789 		if ((offset + size) > rdev->mc.visible_vram_size)
790 			return VM_FAULT_SIGBUS;
791 	}
792 
793 	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
794 		return VM_FAULT_NOPAGE;
795 	else if (unlikely(r))
796 		return VM_FAULT_SIGBUS;
797 
798 	ttm_bo_move_to_lru_tail_unlocked(bo);
799 	return 0;
800 }
801 
802 /**
803  * radeon_bo_fence - add fence to buffer object
804  *
805  * @bo: buffer object in question
806  * @fence: fence to add
807  * @shared: true if fence should be added shared
808  *
809  */
810 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
811 		     bool shared)
812 {
813 	struct dma_resv *resv = bo->tbo.base.resv;
814 
815 	if (shared)
816 		dma_resv_add_shared_fence(resv, &fence->base);
817 	else
818 		dma_resv_add_excl_fence(resv, &fence->base);
819 }
820