xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/amdgpu_drm.h>
36 #include "amdgpu.h"
37 #include "amdgpu_trace.h"
38 
39 
40 int amdgpu_ttm_init(struct amdgpu_device *adev);
41 void amdgpu_ttm_fini(struct amdgpu_device *adev);
42 
43 static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
44 						struct ttm_mem_reg *mem)
45 {
46 	u64 ret = 0;
47 	if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
48 		ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
49 			   adev->mc.visible_vram_size ?
50 			   adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
51 			   mem->size;
52 	}
53 	return ret;
54 }
55 
56 static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
57 		       struct ttm_mem_reg *old_mem,
58 		       struct ttm_mem_reg *new_mem)
59 {
60 	u64 vis_size;
61 	if (!adev)
62 		return;
63 
64 	if (new_mem) {
65 		switch (new_mem->mem_type) {
66 		case TTM_PL_TT:
67 			atomic64_add(new_mem->size, &adev->gtt_usage);
68 			break;
69 		case TTM_PL_VRAM:
70 			atomic64_add(new_mem->size, &adev->vram_usage);
71 			vis_size = amdgpu_get_vis_part_size(adev, new_mem);
72 			atomic64_add(vis_size, &adev->vram_vis_usage);
73 			break;
74 		}
75 	}
76 
77 	if (old_mem) {
78 		switch (old_mem->mem_type) {
79 		case TTM_PL_TT:
80 			atomic64_sub(old_mem->size, &adev->gtt_usage);
81 			break;
82 		case TTM_PL_VRAM:
83 			atomic64_sub(old_mem->size, &adev->vram_usage);
84 			vis_size = amdgpu_get_vis_part_size(adev, old_mem);
85 			atomic64_sub(vis_size, &adev->vram_vis_usage);
86 			break;
87 		}
88 	}
89 }
90 
91 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
92 {
93 	struct amdgpu_bo *bo;
94 
95 	bo = container_of(tbo, struct amdgpu_bo, tbo);
96 
97 	amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
98 
99 	mutex_lock(&bo->adev->gem.mutex);
100 	list_del_init(&bo->list);
101 	mutex_unlock(&bo->adev->gem.mutex);
102 	drm_gem_object_release(&bo->gem_base);
103 	kfree(bo->metadata);
104 	kfree(bo);
105 }
106 
107 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
108 {
109 	if (bo->destroy == &amdgpu_ttm_bo_destroy)
110 		return true;
111 	return false;
112 }
113 
114 static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
115 				      struct ttm_placement *placement,
116 				      struct ttm_place *placements,
117 				      u32 domain, u64 flags)
118 {
119 	u32 c = 0, i;
120 
121 	placement->placement = placements;
122 	placement->busy_placement = placements;
123 
124 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
125 		if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
126 			adev->mc.visible_vram_size < adev->mc.real_vram_size) {
127 			placements[c].fpfn =
128 				adev->mc.visible_vram_size >> PAGE_SHIFT;
129 			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
130 				TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
131 		}
132 		placements[c].fpfn = 0;
133 		placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
134 			TTM_PL_FLAG_VRAM;
135 	}
136 
137 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
138 		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
139 			placements[c].fpfn = 0;
140 			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
141 				TTM_PL_FLAG_UNCACHED;
142 		} else {
143 			placements[c].fpfn = 0;
144 			placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
145 		}
146 	}
147 
148 	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
149 		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
150 			placements[c].fpfn = 0;
151 			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
152 				TTM_PL_FLAG_UNCACHED;
153 		} else {
154 			placements[c].fpfn = 0;
155 			placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
156 		}
157 	}
158 
159 	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
160 		placements[c].fpfn = 0;
161 		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
162 			AMDGPU_PL_FLAG_GDS;
163 	}
164 	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
165 		placements[c].fpfn = 0;
166 		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
167 			AMDGPU_PL_FLAG_GWS;
168 	}
169 	if (domain & AMDGPU_GEM_DOMAIN_OA) {
170 		placements[c].fpfn = 0;
171 		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
172 			AMDGPU_PL_FLAG_OA;
173 	}
174 
175 	if (!c) {
176 		placements[c].fpfn = 0;
177 		placements[c++].flags = TTM_PL_MASK_CACHING |
178 			TTM_PL_FLAG_SYSTEM;
179 	}
180 	placement->num_placement = c;
181 	placement->num_busy_placement = c;
182 
183 	for (i = 0; i < c; i++) {
184 		if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
185 			(placements[i].flags & TTM_PL_FLAG_VRAM) &&
186 			!placements[i].fpfn)
187 			placements[i].lpfn =
188 				adev->mc.visible_vram_size >> PAGE_SHIFT;
189 		else
190 			placements[i].lpfn = 0;
191 	}
192 }
193 
194 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
195 {
196 	amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
197 				  rbo->placements, domain, rbo->flags);
198 }
199 
200 static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
201 					struct ttm_placement *placement)
202 {
203 	BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
204 
205 	memcpy(bo->placements, placement->placement,
206 	       placement->num_placement * sizeof(struct ttm_place));
207 	bo->placement.num_placement = placement->num_placement;
208 	bo->placement.num_busy_placement = placement->num_busy_placement;
209 	bo->placement.placement = bo->placements;
210 	bo->placement.busy_placement = bo->placements;
211 }
212 
213 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
214 				unsigned long size, int byte_align,
215 				bool kernel, u32 domain, u64 flags,
216 				struct sg_table *sg,
217 				struct ttm_placement *placement,
218 				struct reservation_object *resv,
219 				struct amdgpu_bo **bo_ptr)
220 {
221 	struct amdgpu_bo *bo;
222 	enum ttm_bo_type type;
223 	unsigned long page_align;
224 	size_t acc_size;
225 	int r;
226 
227 	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
228 	size = ALIGN(size, PAGE_SIZE);
229 
230 	if (kernel) {
231 		type = ttm_bo_type_kernel;
232 	} else if (sg) {
233 		type = ttm_bo_type_sg;
234 	} else {
235 		type = ttm_bo_type_device;
236 	}
237 	*bo_ptr = NULL;
238 
239 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
240 				       sizeof(struct amdgpu_bo));
241 
242 	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
243 	if (bo == NULL)
244 		return -ENOMEM;
245 	r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
246 	if (unlikely(r)) {
247 		kfree(bo);
248 		return r;
249 	}
250 	bo->adev = adev;
251 	INIT_LIST_HEAD(&bo->list);
252 	INIT_LIST_HEAD(&bo->va);
253 	bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM |
254 				       AMDGPU_GEM_DOMAIN_GTT |
255 				       AMDGPU_GEM_DOMAIN_CPU |
256 				       AMDGPU_GEM_DOMAIN_GDS |
257 				       AMDGPU_GEM_DOMAIN_GWS |
258 				       AMDGPU_GEM_DOMAIN_OA);
259 
260 	bo->flags = flags;
261 	amdgpu_fill_placement_to_bo(bo, placement);
262 	/* Kernel allocation are uninterruptible */
263 	r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
264 			&bo->placement, page_align, !kernel, NULL,
265 			acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
266 	if (unlikely(r != 0)) {
267 		return r;
268 	}
269 	*bo_ptr = bo;
270 
271 	trace_amdgpu_bo_create(bo);
272 
273 	return 0;
274 }
275 
276 int amdgpu_bo_create(struct amdgpu_device *adev,
277 		     unsigned long size, int byte_align,
278 		     bool kernel, u32 domain, u64 flags,
279 		     struct sg_table *sg,
280 		     struct reservation_object *resv,
281 		     struct amdgpu_bo **bo_ptr)
282 {
283 	struct ttm_placement placement = {0};
284 	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
285 
286 	memset(&placements, 0,
287 	       (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
288 
289 	amdgpu_ttm_placement_init(adev, &placement,
290 				  placements, domain, flags);
291 
292 	return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
293 					   domain, flags, sg, &placement,
294 					   resv, bo_ptr);
295 }
296 
297 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
298 {
299 	bool is_iomem;
300 	int r;
301 
302 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
303 		return -EPERM;
304 
305 	if (bo->kptr) {
306 		if (ptr) {
307 			*ptr = bo->kptr;
308 		}
309 		return 0;
310 	}
311 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
312 	if (r) {
313 		return r;
314 	}
315 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
316 	if (ptr) {
317 		*ptr = bo->kptr;
318 	}
319 	return 0;
320 }
321 
322 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
323 {
324 	if (bo->kptr == NULL)
325 		return;
326 	bo->kptr = NULL;
327 	ttm_bo_kunmap(&bo->kmap);
328 }
329 
330 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
331 {
332 	if (bo == NULL)
333 		return NULL;
334 
335 	ttm_bo_reference(&bo->tbo);
336 	return bo;
337 }
338 
339 void amdgpu_bo_unref(struct amdgpu_bo **bo)
340 {
341 	struct ttm_buffer_object *tbo;
342 
343 	if ((*bo) == NULL)
344 		return;
345 
346 	tbo = &((*bo)->tbo);
347 	ttm_bo_unref(&tbo);
348 	if (tbo == NULL)
349 		*bo = NULL;
350 }
351 
352 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
353 			     u64 min_offset, u64 max_offset,
354 			     u64 *gpu_addr)
355 {
356 	int r, i;
357 	unsigned fpfn, lpfn;
358 
359 	if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
360 		return -EPERM;
361 
362 	if (WARN_ON_ONCE(min_offset > max_offset))
363 		return -EINVAL;
364 
365 	if (bo->pin_count) {
366 		bo->pin_count++;
367 		if (gpu_addr)
368 			*gpu_addr = amdgpu_bo_gpu_offset(bo);
369 
370 		if (max_offset != 0) {
371 			u64 domain_start;
372 			if (domain == AMDGPU_GEM_DOMAIN_VRAM)
373 				domain_start = bo->adev->mc.vram_start;
374 			else
375 				domain_start = bo->adev->mc.gtt_start;
376 			WARN_ON_ONCE(max_offset <
377 				     (amdgpu_bo_gpu_offset(bo) - domain_start));
378 		}
379 
380 		return 0;
381 	}
382 	amdgpu_ttm_placement_from_domain(bo, domain);
383 	for (i = 0; i < bo->placement.num_placement; i++) {
384 		/* force to pin into visible video ram */
385 		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
386 		    !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
387 		    (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
388 			if (WARN_ON_ONCE(min_offset >
389 					 bo->adev->mc.visible_vram_size))
390 				return -EINVAL;
391 			fpfn = min_offset >> PAGE_SHIFT;
392 			lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
393 		} else {
394 			fpfn = min_offset >> PAGE_SHIFT;
395 			lpfn = max_offset >> PAGE_SHIFT;
396 		}
397 		if (fpfn > bo->placements[i].fpfn)
398 			bo->placements[i].fpfn = fpfn;
399 		if (lpfn && lpfn < bo->placements[i].lpfn)
400 			bo->placements[i].lpfn = lpfn;
401 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
402 	}
403 
404 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
405 	if (likely(r == 0)) {
406 		bo->pin_count = 1;
407 		if (gpu_addr != NULL)
408 			*gpu_addr = amdgpu_bo_gpu_offset(bo);
409 		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
410 			bo->adev->vram_pin_size += amdgpu_bo_size(bo);
411 		else
412 			bo->adev->gart_pin_size += amdgpu_bo_size(bo);
413 	} else {
414 		dev_err(bo->adev->dev, "%p pin failed\n", bo);
415 	}
416 	return r;
417 }
418 
419 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
420 {
421 	return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
422 }
423 
424 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
425 {
426 	int r, i;
427 
428 	if (!bo->pin_count) {
429 		dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
430 		return 0;
431 	}
432 	bo->pin_count--;
433 	if (bo->pin_count)
434 		return 0;
435 	for (i = 0; i < bo->placement.num_placement; i++) {
436 		bo->placements[i].lpfn = 0;
437 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
438 	}
439 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
440 	if (likely(r == 0)) {
441 		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
442 			bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
443 		else
444 			bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
445 	} else {
446 		dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
447 	}
448 	return r;
449 }
450 
451 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
452 {
453 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
454 	if (0 && (adev->flags & AMD_IS_APU)) {
455 		/* Useless to evict on IGP chips */
456 		return 0;
457 	}
458 	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
459 }
460 
461 void amdgpu_bo_force_delete(struct amdgpu_device *adev)
462 {
463 	struct amdgpu_bo *bo, *n;
464 
465 	if (list_empty(&adev->gem.objects)) {
466 		return;
467 	}
468 	dev_err(adev->dev, "Userspace still has active objects !\n");
469 	list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
470 		dev_err(adev->dev, "%p %p %lu %lu force free\n",
471 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
472 			*((unsigned long *)&bo->gem_base.refcount));
473 		mutex_lock(&bo->adev->gem.mutex);
474 		list_del_init(&bo->list);
475 		mutex_unlock(&bo->adev->gem.mutex);
476 		/* this should unref the ttm bo */
477 		drm_gem_object_unreference_unlocked(&bo->gem_base);
478 	}
479 }
480 
481 int amdgpu_bo_init(struct amdgpu_device *adev)
482 {
483 	/* Add an MTRR for the VRAM */
484 	adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
485 					      adev->mc.aper_size);
486 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
487 		adev->mc.mc_vram_size >> 20,
488 		(unsigned long long)adev->mc.aper_size >> 20);
489 	DRM_INFO("RAM width %dbits DDR\n",
490 			adev->mc.vram_width);
491 	return amdgpu_ttm_init(adev);
492 }
493 
494 void amdgpu_bo_fini(struct amdgpu_device *adev)
495 {
496 	amdgpu_ttm_fini(adev);
497 	arch_phys_wc_del(adev->mc.vram_mtrr);
498 }
499 
500 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
501 			     struct vm_area_struct *vma)
502 {
503 	return ttm_fbdev_mmap(vma, &bo->tbo);
504 }
505 
506 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
507 {
508 	if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
509 		return -EINVAL;
510 
511 	bo->tiling_flags = tiling_flags;
512 	return 0;
513 }
514 
515 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
516 {
517 	lockdep_assert_held(&bo->tbo.resv->lock.base);
518 
519 	if (tiling_flags)
520 		*tiling_flags = bo->tiling_flags;
521 }
522 
523 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
524 			    uint32_t metadata_size, uint64_t flags)
525 {
526 	void *buffer;
527 
528 	if (!metadata_size) {
529 		if (bo->metadata_size) {
530 			kfree(bo->metadata);
531 			bo->metadata_size = 0;
532 		}
533 		return 0;
534 	}
535 
536 	if (metadata == NULL)
537 		return -EINVAL;
538 
539 	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
540 	if (buffer == NULL)
541 		return -ENOMEM;
542 
543 	kfree(bo->metadata);
544 	bo->metadata_flags = flags;
545 	bo->metadata = buffer;
546 	bo->metadata_size = metadata_size;
547 
548 	return 0;
549 }
550 
551 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
552 			   size_t buffer_size, uint32_t *metadata_size,
553 			   uint64_t *flags)
554 {
555 	if (!buffer && !metadata_size)
556 		return -EINVAL;
557 
558 	if (buffer) {
559 		if (buffer_size < bo->metadata_size)
560 			return -EINVAL;
561 
562 		if (bo->metadata_size)
563 			memcpy(buffer, bo->metadata, bo->metadata_size);
564 	}
565 
566 	if (metadata_size)
567 		*metadata_size = bo->metadata_size;
568 	if (flags)
569 		*flags = bo->metadata_flags;
570 
571 	return 0;
572 }
573 
574 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
575 			   struct ttm_mem_reg *new_mem)
576 {
577 	struct amdgpu_bo *rbo;
578 
579 	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
580 		return;
581 
582 	rbo = container_of(bo, struct amdgpu_bo, tbo);
583 	amdgpu_vm_bo_invalidate(rbo->adev, rbo);
584 
585 	/* update statistics */
586 	if (!new_mem)
587 		return;
588 
589 	/* move_notify is called before move happens */
590 	amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
591 }
592 
593 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
594 {
595 	struct amdgpu_device *adev;
596 	struct amdgpu_bo *abo;
597 	unsigned long offset, size, lpfn;
598 	int i, r;
599 
600 	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
601 		return 0;
602 
603 	abo = container_of(bo, struct amdgpu_bo, tbo);
604 	adev = abo->adev;
605 	if (bo->mem.mem_type != TTM_PL_VRAM)
606 		return 0;
607 
608 	size = bo->mem.num_pages << PAGE_SHIFT;
609 	offset = bo->mem.start << PAGE_SHIFT;
610 	if ((offset + size) <= adev->mc.visible_vram_size)
611 		return 0;
612 
613 	/* hurrah the memory is not visible ! */
614 	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
615 	lpfn =	adev->mc.visible_vram_size >> PAGE_SHIFT;
616 	for (i = 0; i < abo->placement.num_placement; i++) {
617 		/* Force into visible VRAM */
618 		if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
619 		    (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
620 			abo->placements[i].lpfn = lpfn;
621 	}
622 	r = ttm_bo_validate(bo, &abo->placement, false, false);
623 	if (unlikely(r == -ENOMEM)) {
624 		amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
625 		return ttm_bo_validate(bo, &abo->placement, false, false);
626 	} else if (unlikely(r != 0)) {
627 		return r;
628 	}
629 
630 	offset = bo->mem.start << PAGE_SHIFT;
631 	/* this should never happen */
632 	if ((offset + size) > adev->mc.visible_vram_size)
633 		return -EINVAL;
634 
635 	return 0;
636 }
637 
638 /**
639  * amdgpu_bo_fence - add fence to buffer object
640  *
641  * @bo: buffer object in question
642  * @fence: fence to add
643  * @shared: true if fence should be added shared
644  *
645  */
646 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
647 		     bool shared)
648 {
649 	struct reservation_object *resv = bo->tbo.resv;
650 
651 	if (shared)
652 		reservation_object_add_shared_fence(resv, fence);
653 	else
654 		reservation_object_add_excl_fence(resv, fence);
655 }
656