xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c (revision 9e1e9d660255d7216067193d774f338d08d8528d)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <linux/dma-buf.h>
35 #include <linux/export.h>
36 
37 #include <drm/drm_drv.h>
38 #include <drm/amdgpu_drm.h>
39 #include <drm/drm_cache.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_amdkfd.h"
43 #include "amdgpu_vram_mgr.h"
44 #include "amdgpu_vm.h"
45 #include "amdgpu_dma_buf.h"
46 
47 /**
48  * DOC: amdgpu_object
49  *
50  * This defines the interfaces to operate on an &amdgpu_bo buffer object which
51  * represents memory used by driver (VRAM, system memory, etc.). The driver
52  * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
53  * to create/destroy/set buffer object which are then managed by the kernel TTM
54  * memory manager.
55  * The interfaces are also used internally by kernel clients, including gfx,
56  * uvd, etc. for kernel managed allocations used by the GPU.
57  *
58  */
59 
60 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
61 {
62 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
63 
64 	amdgpu_bo_kunmap(bo);
65 
66 	if (drm_gem_is_imported(&bo->tbo.base))
67 		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
68 	drm_gem_object_release(&bo->tbo.base);
69 	amdgpu_bo_unref(&bo->parent);
70 	kvfree(bo);
71 }
72 
73 static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
74 {
75 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
76 	struct amdgpu_bo_user *ubo;
77 
78 	ubo = to_amdgpu_bo_user(bo);
79 	kfree(ubo->metadata);
80 	amdgpu_bo_destroy(tbo);
81 }
82 
83 /**
84  * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
85  * @bo: buffer object to be checked
86  *
87  * Uses destroy function associated with the object to determine if this is
88  * an &amdgpu_bo.
89  *
90  * Returns:
91  * true if the object belongs to &amdgpu_bo, false if not.
92  */
93 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
94 {
95 	if (bo->destroy == &amdgpu_bo_destroy ||
96 	    bo->destroy == &amdgpu_bo_user_destroy)
97 		return true;
98 
99 	return false;
100 }
101 
102 /**
103  * amdgpu_bo_placement_from_domain - set buffer's placement
104  * @abo: &amdgpu_bo buffer object whose placement is to be set
105  * @domain: requested domain
106  *
107  * Sets buffer's placement according to requested domain and the buffer's
108  * flags.
109  */
110 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
111 {
112 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
113 	struct ttm_placement *placement = &abo->placement;
114 	struct ttm_place *places = abo->placements;
115 	u64 flags = abo->flags;
116 	u32 c = 0;
117 
118 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
119 		unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
120 		int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
121 
122 		if (adev->gmc.mem_partitions && mem_id >= 0) {
123 			places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn;
124 			/*
125 			 * memory partition range lpfn is inclusive start + size - 1
126 			 * TTM place lpfn is exclusive start + size
127 			 */
128 			places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1;
129 		} else {
130 			places[c].fpfn = 0;
131 			places[c].lpfn = 0;
132 		}
133 		places[c].mem_type = TTM_PL_VRAM;
134 		places[c].flags = 0;
135 
136 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
137 			places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn);
138 		else
139 			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
140 
141 		if (abo->tbo.type == ttm_bo_type_kernel &&
142 		    flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
143 			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
144 
145 		c++;
146 	}
147 
148 	if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) {
149 		places[c].fpfn = 0;
150 		places[c].lpfn = 0;
151 		places[c].mem_type = AMDGPU_PL_DOORBELL;
152 		places[c].flags = 0;
153 		c++;
154 	}
155 
156 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
157 		places[c].fpfn = 0;
158 		places[c].lpfn = 0;
159 		places[c].mem_type =
160 			abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
161 			AMDGPU_PL_PREEMPT : TTM_PL_TT;
162 		places[c].flags = 0;
163 		/*
164 		 * When GTT is just an alternative to VRAM make sure that we
165 		 * only use it as fallback and still try to fill up VRAM first.
166 		 */
167 		if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) &&
168 		    domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
169 			places[c].flags |= TTM_PL_FLAG_FALLBACK;
170 		c++;
171 	}
172 
173 	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
174 		places[c].fpfn = 0;
175 		places[c].lpfn = 0;
176 		places[c].mem_type = TTM_PL_SYSTEM;
177 		places[c].flags = 0;
178 		c++;
179 	}
180 
181 	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
182 		places[c].fpfn = 0;
183 		places[c].lpfn = 0;
184 		places[c].mem_type = AMDGPU_PL_GDS;
185 		places[c].flags = 0;
186 		c++;
187 	}
188 
189 	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
190 		places[c].fpfn = 0;
191 		places[c].lpfn = 0;
192 		places[c].mem_type = AMDGPU_PL_GWS;
193 		places[c].flags = 0;
194 		c++;
195 	}
196 
197 	if (domain & AMDGPU_GEM_DOMAIN_OA) {
198 		places[c].fpfn = 0;
199 		places[c].lpfn = 0;
200 		places[c].mem_type = AMDGPU_PL_OA;
201 		places[c].flags = 0;
202 		c++;
203 	}
204 
205 	if (!c) {
206 		places[c].fpfn = 0;
207 		places[c].lpfn = 0;
208 		places[c].mem_type = TTM_PL_SYSTEM;
209 		places[c].flags = 0;
210 		c++;
211 	}
212 
213 	BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
214 
215 	placement->num_placement = c;
216 	placement->placement = places;
217 }
218 
219 /**
220  * amdgpu_bo_create_reserved - create reserved BO for kernel use
221  *
222  * @adev: amdgpu device object
223  * @size: size for the new BO
224  * @align: alignment for the new BO
225  * @domain: where to place it
226  * @bo_ptr: used to initialize BOs in structures
227  * @gpu_addr: GPU addr of the pinned BO
228  * @cpu_addr: optional CPU address mapping
229  *
230  * Allocates and pins a BO for kernel internal use, and returns it still
231  * reserved.
232  *
233  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
234  *
235  * Returns:
236  * 0 on success, negative error code otherwise.
237  */
238 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
239 			      unsigned long size, int align,
240 			      u32 domain, struct amdgpu_bo **bo_ptr,
241 			      u64 *gpu_addr, void **cpu_addr)
242 {
243 	struct amdgpu_bo_param bp;
244 	bool free = false;
245 	int r;
246 
247 	if (!size) {
248 		amdgpu_bo_unref(bo_ptr);
249 		return 0;
250 	}
251 
252 	memset(&bp, 0, sizeof(bp));
253 	bp.size = size;
254 	bp.byte_align = align;
255 	bp.domain = domain;
256 	bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
257 		: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
258 	bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
259 	bp.type = ttm_bo_type_kernel;
260 	bp.resv = NULL;
261 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
262 
263 	if (!*bo_ptr) {
264 		r = amdgpu_bo_create(adev, &bp, bo_ptr);
265 		if (r) {
266 			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
267 				r);
268 			return r;
269 		}
270 		free = true;
271 	}
272 
273 	r = amdgpu_bo_reserve(*bo_ptr, false);
274 	if (r) {
275 		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
276 		goto error_free;
277 	}
278 
279 	r = amdgpu_bo_pin(*bo_ptr, domain);
280 	if (r) {
281 		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
282 		goto error_unreserve;
283 	}
284 
285 	r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
286 	if (r) {
287 		dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
288 		goto error_unpin;
289 	}
290 
291 	if (gpu_addr)
292 		*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
293 
294 	if (cpu_addr) {
295 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
296 		if (r) {
297 			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
298 			goto error_unpin;
299 		}
300 	}
301 
302 	return 0;
303 
304 error_unpin:
305 	amdgpu_bo_unpin(*bo_ptr);
306 error_unreserve:
307 	amdgpu_bo_unreserve(*bo_ptr);
308 
309 error_free:
310 	if (free)
311 		amdgpu_bo_unref(bo_ptr);
312 
313 	return r;
314 }
315 
316 /**
317  * amdgpu_bo_create_kernel - create BO for kernel use
318  *
319  * @adev: amdgpu device object
320  * @size: size for the new BO
321  * @align: alignment for the new BO
322  * @domain: where to place it
323  * @bo_ptr:  used to initialize BOs in structures
324  * @gpu_addr: GPU addr of the pinned BO
325  * @cpu_addr: optional CPU address mapping
326  *
327  * Allocates and pins a BO for kernel internal use.
328  *
329  * This function is exported to allow the V4L2 isp device
330  * external to drm device to create and access the kernel BO.
331  *
332  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
333  *
334  * Returns:
335  * 0 on success, negative error code otherwise.
336  */
337 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
338 			    unsigned long size, int align,
339 			    u32 domain, struct amdgpu_bo **bo_ptr,
340 			    u64 *gpu_addr, void **cpu_addr)
341 {
342 	int r;
343 
344 	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
345 				      gpu_addr, cpu_addr);
346 
347 	if (r)
348 		return r;
349 
350 	if (*bo_ptr)
351 		amdgpu_bo_unreserve(*bo_ptr);
352 
353 	return 0;
354 }
355 
356 /**
357  * amdgpu_bo_create_isp_user - create user BO for isp
358  *
359  * @adev: amdgpu device object
360  * @dma_buf: DMABUF handle for isp buffer
361  * @domain: where to place it
362  * @bo:  used to initialize BOs in structures
363  * @gpu_addr: GPU addr of the pinned BO
364  *
365  * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
366  * GART alloc to generate gpu_addr for BO to make it accessible through the
367  * GART aperture for ISP HW.
368  *
369  * This function is exported to allow the V4L2 isp device external to drm device
370  * to create and access the isp user BO.
371  *
372  * Returns:
373  * 0 on success, negative error code otherwise.
374  */
375 int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
376 			   struct dma_buf *dma_buf, u32 domain, struct amdgpu_bo **bo,
377 			   u64 *gpu_addr)
378 
379 {
380 	struct drm_gem_object *gem_obj;
381 	int r;
382 
383 	gem_obj = amdgpu_gem_prime_import(&adev->ddev, dma_buf);
384 	*bo = gem_to_amdgpu_bo(gem_obj);
385 	if (!(*bo)) {
386 		dev_err(adev->dev, "failed to get valid isp user bo\n");
387 		return -EINVAL;
388 	}
389 
390 	r = amdgpu_bo_reserve(*bo, false);
391 	if (r) {
392 		dev_err(adev->dev, "(%d) failed to reserve isp user bo\n", r);
393 		return r;
394 	}
395 
396 	r = amdgpu_bo_pin(*bo, domain);
397 	if (r) {
398 		dev_err(adev->dev, "(%d) isp user bo pin failed\n", r);
399 		goto error_unreserve;
400 	}
401 
402 	r = amdgpu_ttm_alloc_gart(&(*bo)->tbo);
403 	if (r) {
404 		dev_err(adev->dev, "%p bind failed\n", *bo);
405 		goto error_unpin;
406 	}
407 
408 	if (!WARN_ON(!gpu_addr))
409 		*gpu_addr = amdgpu_bo_gpu_offset(*bo);
410 
411 	amdgpu_bo_unreserve(*bo);
412 
413 	return 0;
414 
415 error_unpin:
416 	amdgpu_bo_unpin(*bo);
417 error_unreserve:
418 	amdgpu_bo_unreserve(*bo);
419 	amdgpu_bo_unref(bo);
420 
421 	return r;
422 }
423 
424 /**
425  * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
426  *
427  * @adev: amdgpu device object
428  * @offset: offset of the BO
429  * @size: size of the BO
430  * @bo_ptr:  used to initialize BOs in structures
431  * @cpu_addr: optional CPU address mapping
432  *
433  * Creates a kernel BO at a specific offset in VRAM.
434  *
435  * Returns:
436  * 0 on success, negative error code otherwise.
437  */
438 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
439 			       uint64_t offset, uint64_t size,
440 			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
441 {
442 	struct ttm_operation_ctx ctx = { false, false };
443 	unsigned int i;
444 	int r;
445 
446 	offset &= PAGE_MASK;
447 	size = ALIGN(size, PAGE_SIZE);
448 
449 	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
450 				      AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
451 				      cpu_addr);
452 	if (r)
453 		return r;
454 
455 	if ((*bo_ptr) == NULL)
456 		return 0;
457 
458 	/*
459 	 * Remove the original mem node and create a new one at the request
460 	 * position.
461 	 */
462 	if (cpu_addr)
463 		amdgpu_bo_kunmap(*bo_ptr);
464 
465 	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
466 
467 	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
468 		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
469 		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
470 	}
471 	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
472 			     &(*bo_ptr)->tbo.resource, &ctx);
473 	if (r)
474 		goto error;
475 
476 	if (cpu_addr) {
477 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
478 		if (r)
479 			goto error;
480 	}
481 
482 	amdgpu_bo_unreserve(*bo_ptr);
483 	return 0;
484 
485 error:
486 	amdgpu_bo_unreserve(*bo_ptr);
487 	amdgpu_bo_unref(bo_ptr);
488 	return r;
489 }
490 
491 /**
492  * amdgpu_bo_free_kernel - free BO for kernel use
493  *
494  * @bo: amdgpu BO to free
495  * @gpu_addr: pointer to where the BO's GPU memory space address was stored
496  * @cpu_addr: pointer to where the BO's CPU memory space address was stored
497  *
498  * unmaps and unpin a BO for kernel internal use.
499  *
500  * This function is exported to allow the V4L2 isp device
501  * external to drm device to free the kernel BO.
502  */
503 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
504 			   void **cpu_addr)
505 {
506 	if (*bo == NULL)
507 		return;
508 
509 	WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
510 
511 	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
512 		if (cpu_addr)
513 			amdgpu_bo_kunmap(*bo);
514 
515 		amdgpu_bo_unpin(*bo);
516 		amdgpu_bo_unreserve(*bo);
517 	}
518 	amdgpu_bo_unref(bo);
519 
520 	if (gpu_addr)
521 		*gpu_addr = 0;
522 
523 	if (cpu_addr)
524 		*cpu_addr = NULL;
525 }
526 
527 /**
528  * amdgpu_bo_free_isp_user - free BO for isp use
529  *
530  * @bo: amdgpu isp user BO to free
531  *
532  * unpin and unref BO for isp internal use.
533  *
534  * This function is exported to allow the V4L2 isp device
535  * external to drm device to free the isp user BO.
536  */
537 void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
538 {
539 	if (bo == NULL)
540 		return;
541 
542 	if (amdgpu_bo_reserve(bo, true) == 0) {
543 		amdgpu_bo_unpin(bo);
544 		amdgpu_bo_unreserve(bo);
545 	}
546 	amdgpu_bo_unref(&bo);
547 }
548 
549 /* Validate bo size is bit bigger than the request domain */
550 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
551 					  unsigned long size, u32 domain)
552 {
553 	struct ttm_resource_manager *man = NULL;
554 
555 	/*
556 	 * If GTT is part of requested domains the check must succeed to
557 	 * allow fall back to GTT.
558 	 */
559 	if (domain & AMDGPU_GEM_DOMAIN_GTT)
560 		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
561 	else if (domain & AMDGPU_GEM_DOMAIN_VRAM)
562 		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
563 	else
564 		return true;
565 
566 	if (!man) {
567 		if (domain & AMDGPU_GEM_DOMAIN_GTT)
568 			WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
569 		return false;
570 	}
571 
572 	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */
573 	if (size < man->size)
574 		return true;
575 
576 	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, man->size);
577 	return false;
578 }
579 
580 bool amdgpu_bo_support_uswc(u64 bo_flags)
581 {
582 
583 #ifdef CONFIG_X86_32
584 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
585 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
586 	 */
587 	return false;
588 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
589 	/* Don't try to enable write-combining when it can't work, or things
590 	 * may be slow
591 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
592 	 */
593 
594 #ifndef CONFIG_COMPILE_TEST
595 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
596 	 thanks to write-combining
597 #endif
598 
599 	if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
600 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
601 			      "better performance thanks to write-combining\n");
602 	return false;
603 #else
604 	/* For architectures that don't support WC memory,
605 	 * mask out the WC flag from the BO
606 	 */
607 	if (!drm_arch_can_wc_memory())
608 		return false;
609 
610 	return true;
611 #endif
612 }
613 
614 /**
615  * amdgpu_bo_create - create an &amdgpu_bo buffer object
616  * @adev: amdgpu device object
617  * @bp: parameters to be used for the buffer object
618  * @bo_ptr: pointer to the buffer object pointer
619  *
620  * Creates an &amdgpu_bo buffer object.
621  *
622  * Returns:
623  * 0 for success or a negative error code on failure.
624  */
625 int amdgpu_bo_create(struct amdgpu_device *adev,
626 			       struct amdgpu_bo_param *bp,
627 			       struct amdgpu_bo **bo_ptr)
628 {
629 	struct ttm_operation_ctx ctx = {
630 		.interruptible = (bp->type != ttm_bo_type_kernel),
631 		.no_wait_gpu = bp->no_wait_gpu,
632 		/* We opt to avoid OOM on system pages allocations */
633 		.gfp_retry_mayfail = true,
634 		.allow_res_evict = bp->type != ttm_bo_type_kernel,
635 		.resv = bp->resv
636 	};
637 	struct amdgpu_bo *bo;
638 	unsigned long page_align, size = bp->size;
639 	int r;
640 
641 	/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
642 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
643 		/* GWS and OA don't need any alignment. */
644 		page_align = bp->byte_align;
645 		size <<= PAGE_SHIFT;
646 
647 	} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
648 		/* Both size and alignment must be a multiple of 4. */
649 		page_align = ALIGN(bp->byte_align, 4);
650 		size = ALIGN(size, 4) << PAGE_SHIFT;
651 	} else {
652 		/* Memory should be aligned at least to a page size. */
653 		page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
654 		size = ALIGN(size, PAGE_SIZE);
655 	}
656 
657 	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
658 		return -ENOMEM;
659 
660 	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
661 
662 	*bo_ptr = NULL;
663 	bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
664 	if (bo == NULL)
665 		return -ENOMEM;
666 	drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
667 	bo->tbo.base.funcs = &amdgpu_gem_object_funcs;
668 	bo->vm_bo = NULL;
669 	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
670 		bp->domain;
671 	bo->allowed_domains = bo->preferred_domains;
672 	if (bp->type != ttm_bo_type_kernel &&
673 	    !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
674 	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
675 		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
676 
677 	bo->flags = bp->flags;
678 
679 	if (adev->gmc.mem_partitions)
680 		/* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */
681 		bo->xcp_id = bp->xcp_id_plus1 - 1;
682 	else
683 		/* For GPUs without spatial partitioning */
684 		bo->xcp_id = 0;
685 
686 	if (!amdgpu_bo_support_uswc(bo->flags))
687 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
688 
689 	bo->tbo.bdev = &adev->mman.bdev;
690 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
691 			  AMDGPU_GEM_DOMAIN_GDS))
692 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
693 	else
694 		amdgpu_bo_placement_from_domain(bo, bp->domain);
695 	if (bp->type == ttm_bo_type_kernel)
696 		bo->tbo.priority = 2;
697 	else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
698 		bo->tbo.priority = 1;
699 
700 	if (!bp->destroy)
701 		bp->destroy = &amdgpu_bo_destroy;
702 
703 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
704 				 &bo->placement, page_align, &ctx,  NULL,
705 				 bp->resv, bp->destroy);
706 	if (unlikely(r != 0))
707 		return r;
708 
709 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
710 	    amdgpu_res_cpu_visible(adev, bo->tbo.resource))
711 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
712 					     ctx.bytes_moved);
713 	else
714 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
715 
716 	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
717 	    bo->tbo.resource->mem_type == TTM_PL_VRAM) {
718 		struct dma_fence *fence;
719 
720 		r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence);
721 		if (unlikely(r))
722 			goto fail_unreserve;
723 
724 		dma_resv_add_fence(bo->tbo.base.resv, fence,
725 				   DMA_RESV_USAGE_KERNEL);
726 		dma_fence_put(fence);
727 	}
728 	if (!bp->resv)
729 		amdgpu_bo_unreserve(bo);
730 	*bo_ptr = bo;
731 
732 	trace_amdgpu_bo_create(bo);
733 
734 	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
735 	if (bp->type == ttm_bo_type_device)
736 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
737 
738 	return 0;
739 
740 fail_unreserve:
741 	if (!bp->resv)
742 		dma_resv_unlock(bo->tbo.base.resv);
743 	amdgpu_bo_unref(&bo);
744 	return r;
745 }
746 
747 /**
748  * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
749  * @adev: amdgpu device object
750  * @bp: parameters to be used for the buffer object
751  * @ubo_ptr: pointer to the buffer object pointer
752  *
753  * Create a BO to be used by user application;
754  *
755  * Returns:
756  * 0 for success or a negative error code on failure.
757  */
758 
759 int amdgpu_bo_create_user(struct amdgpu_device *adev,
760 			  struct amdgpu_bo_param *bp,
761 			  struct amdgpu_bo_user **ubo_ptr)
762 {
763 	struct amdgpu_bo *bo_ptr;
764 	int r;
765 
766 	bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
767 	bp->destroy = &amdgpu_bo_user_destroy;
768 	r = amdgpu_bo_create(adev, bp, &bo_ptr);
769 	if (r)
770 		return r;
771 
772 	*ubo_ptr = to_amdgpu_bo_user(bo_ptr);
773 	return r;
774 }
775 
776 /**
777  * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
778  * @adev: amdgpu device object
779  * @bp: parameters to be used for the buffer object
780  * @vmbo_ptr: pointer to the buffer object pointer
781  *
782  * Create a BO to be for GPUVM.
783  *
784  * Returns:
785  * 0 for success or a negative error code on failure.
786  */
787 
788 int amdgpu_bo_create_vm(struct amdgpu_device *adev,
789 			struct amdgpu_bo_param *bp,
790 			struct amdgpu_bo_vm **vmbo_ptr)
791 {
792 	struct amdgpu_bo *bo_ptr;
793 	int r;
794 
795 	/* bo_ptr_size will be determined by the caller and it depends on
796 	 * num of amdgpu_vm_pt entries.
797 	 */
798 	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
799 	r = amdgpu_bo_create(adev, bp, &bo_ptr);
800 	if (r)
801 		return r;
802 
803 	*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
804 	return r;
805 }
806 
807 /**
808  * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
809  * @bo: &amdgpu_bo buffer object to be mapped
810  * @ptr: kernel virtual address to be returned
811  *
812  * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
813  * amdgpu_bo_kptr() to get the kernel virtual address.
814  *
815  * Returns:
816  * 0 for success or a negative error code on failure.
817  */
818 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
819 {
820 	void *kptr;
821 	long r;
822 
823 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
824 		return -EPERM;
825 
826 	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
827 				  false, MAX_SCHEDULE_TIMEOUT);
828 	if (r < 0)
829 		return r;
830 
831 	kptr = amdgpu_bo_kptr(bo);
832 	if (kptr) {
833 		if (ptr)
834 			*ptr = kptr;
835 		return 0;
836 	}
837 
838 	r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
839 	if (r)
840 		return r;
841 
842 	if (ptr)
843 		*ptr = amdgpu_bo_kptr(bo);
844 
845 	return 0;
846 }
847 
848 /**
849  * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
850  * @bo: &amdgpu_bo buffer object
851  *
852  * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
853  *
854  * Returns:
855  * the virtual address of a buffer object area.
856  */
857 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
858 {
859 	bool is_iomem;
860 
861 	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
862 }
863 
864 /**
865  * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
866  * @bo: &amdgpu_bo buffer object to be unmapped
867  *
868  * Unmaps a kernel map set up by amdgpu_bo_kmap().
869  */
870 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
871 {
872 	if (bo->kmap.bo)
873 		ttm_bo_kunmap(&bo->kmap);
874 }
875 
876 /**
877  * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
878  * @bo: &amdgpu_bo buffer object
879  *
880  * References the contained &ttm_buffer_object.
881  *
882  * Returns:
883  * a refcounted pointer to the &amdgpu_bo buffer object.
884  */
885 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
886 {
887 	if (bo == NULL)
888 		return NULL;
889 
890 	drm_gem_object_get(&bo->tbo.base);
891 	return bo;
892 }
893 
894 /**
895  * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
896  * @bo: &amdgpu_bo buffer object
897  *
898  * Unreferences the contained &ttm_buffer_object and clear the pointer
899  */
900 void amdgpu_bo_unref(struct amdgpu_bo **bo)
901 {
902 	if ((*bo) == NULL)
903 		return;
904 
905 	drm_gem_object_put(&(*bo)->tbo.base);
906 	*bo = NULL;
907 }
908 
909 /**
910  * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
911  * @bo: &amdgpu_bo buffer object to be pinned
912  * @domain: domain to be pinned to
913  *
914  * Pins the buffer object according to requested domain. If the memory is
915  * unbound gart memory, binds the pages into gart table. Adjusts pin_count and
916  * pin_size accordingly.
917  *
918  * Pinning means to lock pages in memory along with keeping them at a fixed
919  * offset. It is required when a buffer can not be moved, for example, when
920  * a display buffer is being scanned out.
921  *
922  * Returns:
923  * 0 for success or a negative error code on failure.
924  */
925 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
926 {
927 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
928 	struct ttm_operation_ctx ctx = { false, false };
929 	int r, i;
930 
931 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
932 		return -EPERM;
933 
934 	/* Check domain to be pinned to against preferred domains */
935 	if (bo->preferred_domains & domain)
936 		domain = bo->preferred_domains & domain;
937 
938 	/* A shared bo cannot be migrated to VRAM */
939 	if (drm_gem_is_imported(&bo->tbo.base)) {
940 		if (domain & AMDGPU_GEM_DOMAIN_GTT)
941 			domain = AMDGPU_GEM_DOMAIN_GTT;
942 		else
943 			return -EINVAL;
944 	}
945 
946 	if (bo->tbo.pin_count) {
947 		uint32_t mem_type = bo->tbo.resource->mem_type;
948 		uint32_t mem_flags = bo->tbo.resource->placement;
949 
950 		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
951 			return -EINVAL;
952 
953 		if ((mem_type == TTM_PL_VRAM) &&
954 		    (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
955 		    !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
956 			return -EINVAL;
957 
958 		ttm_bo_pin(&bo->tbo);
959 		return 0;
960 	}
961 
962 	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
963 	 * See function amdgpu_display_supported_domains()
964 	 */
965 	domain = amdgpu_bo_get_preferred_domain(adev, domain);
966 
967 	if (drm_gem_is_imported(&bo->tbo.base))
968 		dma_buf_pin(bo->tbo.base.import_attach);
969 
970 	/* force to pin into visible video ram */
971 	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
972 		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
973 	amdgpu_bo_placement_from_domain(bo, domain);
974 	for (i = 0; i < bo->placement.num_placement; i++) {
975 		if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS &&
976 		    bo->placements[i].mem_type == TTM_PL_VRAM)
977 			bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
978 	}
979 
980 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
981 	if (unlikely(r)) {
982 		dev_err(adev->dev, "%p pin failed\n", bo);
983 		goto error;
984 	}
985 
986 	ttm_bo_pin(&bo->tbo);
987 
988 	if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
989 		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
990 		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
991 			     &adev->visible_pin_size);
992 	} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
993 		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
994 	}
995 
996 error:
997 	return r;
998 }
999 
1000 /**
1001  * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
1002  * @bo: &amdgpu_bo buffer object to be unpinned
1003  *
1004  * Decreases the pin_count, and clears the flags if pin_count reaches 0.
1005  * Changes placement and pin size accordingly.
1006  *
1007  * Returns:
1008  * 0 for success or a negative error code on failure.
1009  */
1010 void amdgpu_bo_unpin(struct amdgpu_bo *bo)
1011 {
1012 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1013 
1014 	ttm_bo_unpin(&bo->tbo);
1015 	if (bo->tbo.pin_count)
1016 		return;
1017 
1018 	if (drm_gem_is_imported(&bo->tbo.base))
1019 		dma_buf_unpin(bo->tbo.base.import_attach);
1020 
1021 	if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
1022 		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
1023 		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
1024 			     &adev->visible_pin_size);
1025 	} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
1026 		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
1027 	}
1028 
1029 }
1030 
1031 static const char * const amdgpu_vram_names[] = {
1032 	"UNKNOWN",
1033 	"GDDR1",
1034 	"DDR2",
1035 	"GDDR3",
1036 	"GDDR4",
1037 	"GDDR5",
1038 	"HBM",
1039 	"DDR3",
1040 	"DDR4",
1041 	"GDDR6",
1042 	"DDR5",
1043 	"LPDDR4",
1044 	"LPDDR5",
1045 	"HBM3E",
1046 	"HBM4"
1047 };
1048 
1049 /**
1050  * amdgpu_bo_init - initialize memory manager
1051  * @adev: amdgpu device object
1052  *
1053  * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1054  *
1055  * Returns:
1056  * 0 for success or a negative error code on failure.
1057  */
1058 int amdgpu_bo_init(struct amdgpu_device *adev)
1059 {
1060 	/* On A+A platform, VRAM can be mapped as WB */
1061 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
1062 		/* reserve PAT memory space to WC for VRAM */
1063 		int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1064 				adev->gmc.aper_size);
1065 
1066 		if (r) {
1067 			DRM_ERROR("Unable to set WC memtype for the aperture base\n");
1068 			return r;
1069 		}
1070 
1071 		/* Add an MTRR for the VRAM */
1072 		adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1073 				adev->gmc.aper_size);
1074 	}
1075 
1076 	drm_info(adev_to_drm(adev), "Detected VRAM RAM=%lluM, BAR=%lluM\n",
1077 		 adev->gmc.mc_vram_size >> 20,
1078 		 (unsigned long long)adev->gmc.aper_size >> 20);
1079 	drm_info(adev_to_drm(adev), "RAM width %dbits %s\n",
1080 		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1081 	return amdgpu_ttm_init(adev);
1082 }
1083 
1084 /**
1085  * amdgpu_bo_fini - tear down memory manager
1086  * @adev: amdgpu device object
1087  *
1088  * Reverses amdgpu_bo_init() to tear down memory manager.
1089  */
1090 void amdgpu_bo_fini(struct amdgpu_device *adev)
1091 {
1092 	int idx;
1093 
1094 	amdgpu_ttm_fini(adev);
1095 
1096 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
1097 		if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
1098 			arch_phys_wc_del(adev->gmc.vram_mtrr);
1099 			arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1100 		}
1101 		drm_dev_exit(idx);
1102 	}
1103 }
1104 
1105 /**
1106  * amdgpu_bo_set_tiling_flags - set tiling flags
1107  * @bo: &amdgpu_bo buffer object
1108  * @tiling_flags: new flags
1109  *
1110  * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1111  * kernel driver to set the tiling flags on a buffer.
1112  *
1113  * Returns:
1114  * 0 for success or a negative error code on failure.
1115  */
1116 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1117 {
1118 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1119 	struct amdgpu_bo_user *ubo;
1120 
1121 	/* MMIO_REMAP is BAR I/O space; tiling should never be used here. */
1122 	WARN_ON_ONCE(bo->tbo.resource &&
1123 		     bo->tbo.resource->mem_type == AMDGPU_PL_MMIO_REMAP);
1124 
1125 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1126 	if (adev->family <= AMDGPU_FAMILY_CZ &&
1127 	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1128 		return -EINVAL;
1129 
1130 	ubo = to_amdgpu_bo_user(bo);
1131 	ubo->tiling_flags = tiling_flags;
1132 	return 0;
1133 }
1134 
1135 /**
1136  * amdgpu_bo_get_tiling_flags - get tiling flags
1137  * @bo: &amdgpu_bo buffer object
1138  * @tiling_flags: returned flags
1139  *
1140  * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1141  * set the tiling flags on a buffer.
1142  */
1143 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1144 {
1145 	struct amdgpu_bo_user *ubo;
1146 
1147 	/*
1148 	 * MMIO_REMAP BOs are not real VRAM/GTT memory but a fixed BAR I/O window.
1149 	 * They should never go through GEM tiling helpers.
1150 	 */
1151 	WARN_ON_ONCE(bo->tbo.resource &&
1152 		     bo->tbo.resource->mem_type == AMDGPU_PL_MMIO_REMAP);
1153 
1154 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1155 	dma_resv_assert_held(bo->tbo.base.resv);
1156 	ubo = to_amdgpu_bo_user(bo);
1157 
1158 	if (tiling_flags)
1159 		*tiling_flags = ubo->tiling_flags;
1160 }
1161 
1162 /**
1163  * amdgpu_bo_set_metadata - set metadata
1164  * @bo: &amdgpu_bo buffer object
1165  * @metadata: new metadata
1166  * @metadata_size: size of the new metadata
1167  * @flags: flags of the new metadata
1168  *
1169  * Sets buffer object's metadata, its size and flags.
1170  * Used via GEM ioctl.
1171  *
1172  * Returns:
1173  * 0 for success or a negative error code on failure.
1174  */
1175 int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata,
1176 			   u32 metadata_size, uint64_t flags)
1177 {
1178 	struct amdgpu_bo_user *ubo;
1179 	void *buffer;
1180 
1181 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1182 	ubo = to_amdgpu_bo_user(bo);
1183 	if (!metadata_size) {
1184 		if (ubo->metadata_size) {
1185 			kfree(ubo->metadata);
1186 			ubo->metadata = NULL;
1187 			ubo->metadata_size = 0;
1188 		}
1189 		return 0;
1190 	}
1191 
1192 	if (metadata == NULL)
1193 		return -EINVAL;
1194 
1195 	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1196 	if (buffer == NULL)
1197 		return -ENOMEM;
1198 
1199 	kfree(ubo->metadata);
1200 	ubo->metadata_flags = flags;
1201 	ubo->metadata = buffer;
1202 	ubo->metadata_size = metadata_size;
1203 
1204 	return 0;
1205 }
1206 
1207 /**
1208  * amdgpu_bo_get_metadata - get metadata
1209  * @bo: &amdgpu_bo buffer object
1210  * @buffer: returned metadata
1211  * @buffer_size: size of the buffer
1212  * @metadata_size: size of the returned metadata
1213  * @flags: flags of the returned metadata
1214  *
1215  * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1216  * less than metadata_size.
1217  * Used via GEM ioctl.
1218  *
1219  * Returns:
1220  * 0 for success or a negative error code on failure.
1221  */
1222 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1223 			   size_t buffer_size, uint32_t *metadata_size,
1224 			   uint64_t *flags)
1225 {
1226 	struct amdgpu_bo_user *ubo;
1227 
1228 	if (!buffer && !metadata_size)
1229 		return -EINVAL;
1230 
1231 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1232 	ubo = to_amdgpu_bo_user(bo);
1233 	if (metadata_size)
1234 		*metadata_size = ubo->metadata_size;
1235 
1236 	if (buffer) {
1237 		if (buffer_size < ubo->metadata_size)
1238 			return -EINVAL;
1239 
1240 		if (ubo->metadata_size)
1241 			memcpy(buffer, ubo->metadata, ubo->metadata_size);
1242 	}
1243 
1244 	if (flags)
1245 		*flags = ubo->metadata_flags;
1246 
1247 	return 0;
1248 }
1249 
1250 /**
1251  * amdgpu_bo_move_notify - notification about a memory move
1252  * @bo: pointer to a buffer object
1253  * @evict: if this move is evicting the buffer from the graphics address space
1254  * @new_mem: new resource for backing the BO
1255  *
1256  * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1257  * bookkeeping.
1258  * TTM driver callback which is called when ttm moves a buffer.
1259  */
1260 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1261 			   bool evict,
1262 			   struct ttm_resource *new_mem)
1263 {
1264 	struct ttm_resource *old_mem = bo->resource;
1265 	struct amdgpu_bo *abo;
1266 
1267 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1268 		return;
1269 
1270 	abo = ttm_to_amdgpu_bo(bo);
1271 	amdgpu_vm_bo_move(abo, new_mem, evict);
1272 
1273 	amdgpu_bo_kunmap(abo);
1274 
1275 	if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) &&
1276 	    old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
1277 		dma_buf_invalidate_mappings(abo->tbo.base.dma_buf);
1278 
1279 	/* move_notify is called before move happens */
1280 	trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
1281 			     old_mem ? old_mem->mem_type : -1);
1282 }
1283 
1284 /**
1285  * amdgpu_bo_release_notify - notification about a BO being released
1286  * @bo: pointer to a buffer object
1287  *
1288  * Wipes VRAM buffers whose contents should not be leaked before the
1289  * memory is released.
1290  */
1291 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1292 {
1293 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1294 	struct dma_fence *fence = NULL;
1295 	struct amdgpu_bo *abo;
1296 	int r;
1297 
1298 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1299 		return;
1300 
1301 	abo = ttm_to_amdgpu_bo(bo);
1302 
1303 	WARN_ON(abo->vm_bo);
1304 
1305 	if (abo->kfd_bo)
1306 		amdgpu_amdkfd_release_notify(abo);
1307 
1308 	/*
1309 	 * We lock the private dma_resv object here and since the BO is about to
1310 	 * be released nobody else should have a pointer to it.
1311 	 * So when this locking here fails something is wrong with the reference
1312 	 * counting.
1313 	 */
1314 	if (WARN_ON_ONCE(!dma_resv_trylock(&bo->base._resv)))
1315 		return;
1316 
1317 	amdgpu_amdkfd_remove_all_eviction_fences(abo);
1318 
1319 	if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
1320 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
1321 	    adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
1322 		goto out;
1323 
1324 	r = dma_resv_reserve_fences(&bo->base._resv, 1);
1325 	if (r)
1326 		goto out;
1327 
1328 	r = amdgpu_fill_buffer(amdgpu_ttm_next_clear_entity(adev),
1329 			       abo, 0, &bo->base._resv,
1330 			       &fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
1331 	if (WARN_ON(r))
1332 		goto out;
1333 
1334 	amdgpu_vram_mgr_set_cleared(bo->resource);
1335 	dma_resv_add_fence(&bo->base._resv, fence, DMA_RESV_USAGE_KERNEL);
1336 	dma_fence_put(fence);
1337 
1338 out:
1339 	dma_resv_unlock(&bo->base._resv);
1340 }
1341 
1342 /**
1343  * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1344  * @bo: pointer to a buffer object
1345  *
1346  * Notifies the driver we are taking a fault on this BO and have reserved it,
1347  * also performs bookkeeping.
1348  * TTM driver callback for dealing with vm faults.
1349  *
1350  * Returns:
1351  * 0 for success or a negative error code on failure.
1352  */
1353 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1354 {
1355 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1356 	struct ttm_operation_ctx ctx = { false, false };
1357 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1358 	int r;
1359 
1360 	/* Remember that this BO was accessed by the CPU */
1361 	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1362 
1363 	if (amdgpu_res_cpu_visible(adev, bo->resource))
1364 		return 0;
1365 
1366 	/* Can't move a pinned BO to visible VRAM */
1367 	if (abo->tbo.pin_count > 0)
1368 		return VM_FAULT_SIGBUS;
1369 
1370 	/* hurrah the memory is not visible ! */
1371 	atomic64_inc(&adev->num_vram_cpu_page_faults);
1372 	amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1373 					AMDGPU_GEM_DOMAIN_GTT);
1374 
1375 	/* Avoid costly evictions; only set GTT as a busy placement */
1376 	abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
1377 
1378 	r = ttm_bo_validate(bo, &abo->placement, &ctx);
1379 	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1380 		return VM_FAULT_NOPAGE;
1381 	else if (unlikely(r))
1382 		return VM_FAULT_SIGBUS;
1383 
1384 	/* this should never happen */
1385 	if (bo->resource->mem_type == TTM_PL_VRAM &&
1386 	    !amdgpu_res_cpu_visible(adev, bo->resource))
1387 		return VM_FAULT_SIGBUS;
1388 
1389 	ttm_bo_move_to_lru_tail_unlocked(bo);
1390 	return 0;
1391 }
1392 
1393 /**
1394  * amdgpu_bo_fence - add fence to buffer object
1395  *
1396  * @bo: buffer object in question
1397  * @fence: fence to add
1398  * @shared: true if fence should be added shared
1399  *
1400  */
1401 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1402 		     bool shared)
1403 {
1404 	struct dma_resv *resv = bo->tbo.base.resv;
1405 	int r;
1406 
1407 	r = dma_resv_reserve_fences(resv, 1);
1408 	if (r) {
1409 		/* As last resort on OOM we block for the fence */
1410 		dma_fence_wait(fence, false);
1411 		return;
1412 	}
1413 
1414 	dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
1415 			   DMA_RESV_USAGE_WRITE);
1416 }
1417 
1418 /**
1419  * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1420  *
1421  * @adev: amdgpu device pointer
1422  * @resv: reservation object to sync to
1423  * @sync_mode: synchronization mode
1424  * @owner: fence owner
1425  * @intr: Whether the wait is interruptible
1426  *
1427  * Extract the fences from the reservation object and waits for them to finish.
1428  *
1429  * Returns:
1430  * 0 on success, errno otherwise.
1431  */
1432 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1433 			     enum amdgpu_sync_mode sync_mode, void *owner,
1434 			     bool intr)
1435 {
1436 	struct amdgpu_sync sync;
1437 	int r;
1438 
1439 	amdgpu_sync_create(&sync);
1440 	amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1441 	r = amdgpu_sync_wait(&sync, intr);
1442 	amdgpu_sync_free(&sync);
1443 	return r;
1444 }
1445 
1446 /**
1447  * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1448  * @bo: buffer object to wait for
1449  * @owner: fence owner
1450  * @intr: Whether the wait is interruptible
1451  *
1452  * Wrapper to wait for fences in a BO.
1453  * Returns:
1454  * 0 on success, errno otherwise.
1455  */
1456 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1457 {
1458 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1459 
1460 	return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1461 					AMDGPU_SYNC_NE_OWNER, owner, intr);
1462 }
1463 
1464 /**
1465  * amdgpu_bo_gpu_offset - return GPU offset of bo
1466  * @bo:	amdgpu object for which we query the offset
1467  *
1468  * Note: object should either be pinned or reserved when calling this
1469  * function, it might be useful to add check for this for debugging.
1470  *
1471  * Returns:
1472  * current GPU offset of the object.
1473  */
1474 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1475 {
1476 	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
1477 	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1478 		     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1479 	WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
1480 	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
1481 		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1482 
1483 	return amdgpu_bo_gpu_offset_no_check(bo);
1484 }
1485 
1486 /**
1487  * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo
1488  * @bo:	amdgpu VRAM buffer object for which we query the offset
1489  *
1490  * Returns:
1491  * current FB aperture GPU offset of the object.
1492  */
1493 u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo)
1494 {
1495 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1496 	uint64_t offset, fb_base;
1497 
1498 	WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM);
1499 
1500 	fb_base = adev->gmc.fb_start;
1501 	fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1502 	offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base;
1503 	return amdgpu_gmc_sign_extend(offset);
1504 }
1505 
1506 /**
1507  * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1508  * @bo:	amdgpu object for which we query the offset
1509  *
1510  * Returns:
1511  * current GPU offset of the object without raising warnings.
1512  */
1513 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1514 {
1515 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1516 	uint64_t offset = AMDGPU_BO_INVALID_OFFSET;
1517 
1518 	if (bo->tbo.resource->mem_type == TTM_PL_TT)
1519 		offset = amdgpu_gmc_agp_addr(&bo->tbo);
1520 
1521 	if (offset == AMDGPU_BO_INVALID_OFFSET)
1522 		offset = (bo->tbo.resource->start << PAGE_SHIFT) +
1523 			amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
1524 
1525 	return amdgpu_gmc_sign_extend(offset);
1526 }
1527 
1528 /**
1529  * amdgpu_bo_mem_stats_placement - bo placement for memory accounting
1530  * @bo:	the buffer object we should look at
1531  *
1532  * BO can have multiple preferred placements, to avoid double counting we want
1533  * to file it under a single placement for memory stats.
1534  * Luckily, if we take the highest set bit in preferred_domains the result is
1535  * quite sensible.
1536  *
1537  * Returns:
1538  * Which of the placements should the BO be accounted under.
1539  */
1540 uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
1541 {
1542 	u32 domain;
1543 
1544 	/*
1545 	 * MMIO_REMAP is internal now, so it no longer maps from a userspace
1546 	 * domain bit. Keep fdinfo/mem-stats visibility by checking the actual
1547 	 * TTM placement.
1548 	 */
1549 	if (bo->tbo.resource && bo->tbo.resource->mem_type == AMDGPU_PL_MMIO_REMAP)
1550 		return AMDGPU_PL_MMIO_REMAP;
1551 
1552 	domain = bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK;
1553 	if (!domain)
1554 		return TTM_PL_SYSTEM;
1555 
1556 	switch (rounddown_pow_of_two(domain)) {
1557 	case AMDGPU_GEM_DOMAIN_CPU:
1558 		return TTM_PL_SYSTEM;
1559 	case AMDGPU_GEM_DOMAIN_GTT:
1560 		return TTM_PL_TT;
1561 	case AMDGPU_GEM_DOMAIN_VRAM:
1562 		return TTM_PL_VRAM;
1563 	case AMDGPU_GEM_DOMAIN_GDS:
1564 		return AMDGPU_PL_GDS;
1565 	case AMDGPU_GEM_DOMAIN_GWS:
1566 		return AMDGPU_PL_GWS;
1567 	case AMDGPU_GEM_DOMAIN_OA:
1568 		return AMDGPU_PL_OA;
1569 	case AMDGPU_GEM_DOMAIN_DOORBELL:
1570 		return AMDGPU_PL_DOORBELL;
1571 	default:
1572 		return TTM_PL_SYSTEM;
1573 	}
1574 }
1575 
1576 /**
1577  * amdgpu_bo_get_preferred_domain - get preferred domain
1578  * @adev: amdgpu device object
1579  * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1580  *
1581  * Returns:
1582  * Which of the allowed domains is preferred for allocating the BO.
1583  */
1584 uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
1585 					    uint32_t domain)
1586 {
1587 	if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
1588 	    ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
1589 		domain = AMDGPU_GEM_DOMAIN_VRAM;
1590 		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1591 			domain = AMDGPU_GEM_DOMAIN_GTT;
1592 	}
1593 	return domain;
1594 }
1595 
1596 #if defined(CONFIG_DEBUG_FS)
1597 #define amdgpu_bo_print_flag(m, bo, flag)		        \
1598 	do {							\
1599 		if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
1600 			seq_printf((m), " " #flag);		\
1601 		}						\
1602 	} while (0)
1603 
1604 /**
1605  * amdgpu_bo_print_info - print BO info in debugfs file
1606  *
1607  * @id: Index or Id of the BO
1608  * @bo: Requested BO for printing info
1609  * @m: debugfs file
1610  *
1611  * Print BO information in debugfs file
1612  *
1613  * Returns:
1614  * Size of the BO in bytes.
1615  */
1616 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1617 {
1618 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1619 	struct dma_buf_attachment *attachment;
1620 	struct dma_buf *dma_buf;
1621 	const char *placement;
1622 	unsigned int pin_count;
1623 	u64 size;
1624 
1625 	if (dma_resv_trylock(bo->tbo.base.resv)) {
1626 		if (!bo->tbo.resource) {
1627 			placement = "NONE";
1628 		} else {
1629 			switch (bo->tbo.resource->mem_type) {
1630 			case TTM_PL_VRAM:
1631 				if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
1632 					placement = "VRAM VISIBLE";
1633 				else
1634 					placement = "VRAM";
1635 				break;
1636 			case TTM_PL_TT:
1637 				placement = "GTT";
1638 				break;
1639 			case AMDGPU_PL_GDS:
1640 				placement = "GDS";
1641 				break;
1642 			case AMDGPU_PL_GWS:
1643 				placement = "GWS";
1644 				break;
1645 			case AMDGPU_PL_OA:
1646 				placement = "OA";
1647 				break;
1648 			case AMDGPU_PL_PREEMPT:
1649 				placement = "PREEMPTIBLE";
1650 				break;
1651 			case AMDGPU_PL_DOORBELL:
1652 				placement = "DOORBELL";
1653 				break;
1654 			case AMDGPU_PL_MMIO_REMAP:
1655 				placement = "MMIO REMAP";
1656 				break;
1657 			case TTM_PL_SYSTEM:
1658 			default:
1659 				placement = "CPU";
1660 				break;
1661 			}
1662 		}
1663 		dma_resv_unlock(bo->tbo.base.resv);
1664 	} else {
1665 		placement = "UNKNOWN";
1666 	}
1667 
1668 	size = amdgpu_bo_size(bo);
1669 	seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1670 			id, size, placement);
1671 
1672 	pin_count = READ_ONCE(bo->tbo.pin_count);
1673 	if (pin_count)
1674 		seq_printf(m, " pin count %d", pin_count);
1675 
1676 	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1677 	attachment = READ_ONCE(bo->tbo.base.import_attach);
1678 
1679 	if (attachment)
1680 		seq_printf(m, " imported from ino:%llu", file_inode(dma_buf->file)->i_ino);
1681 	else if (dma_buf)
1682 		seq_printf(m, " exported as ino:%llu", file_inode(dma_buf->file)->i_ino);
1683 
1684 	amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1685 	amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1686 	amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1687 	amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1688 	amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1689 	amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1690 	amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1691 	/* Add the gem obj resv fence dump*/
1692 	if (dma_resv_trylock(bo->tbo.base.resv)) {
1693 		dma_resv_describe(bo->tbo.base.resv, m);
1694 		dma_resv_unlock(bo->tbo.base.resv);
1695 	}
1696 	seq_puts(m, "\n");
1697 
1698 	return size;
1699 }
1700 #endif
1701