xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c (revision 75372d75a4e23783583998ed99d5009d555850da)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <linux/dma-buf.h>
35 #include <linux/export.h>
36 
37 #include <drm/drm_drv.h>
38 #include <drm/amdgpu_drm.h>
39 #include <drm/drm_cache.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_amdkfd.h"
43 #include "amdgpu_vram_mgr.h"
44 #include "amdgpu_vm.h"
45 #include "amdgpu_dma_buf.h"
46 
47 /**
48  * DOC: amdgpu_object
49  *
50  * This defines the interfaces to operate on an &amdgpu_bo buffer object which
51  * represents memory used by driver (VRAM, system memory, etc.). The driver
52  * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
53  * to create/destroy/set buffer object which are then managed by the kernel TTM
54  * memory manager.
55  * The interfaces are also used internally by kernel clients, including gfx,
56  * uvd, etc. for kernel managed allocations used by the GPU.
57  *
58  */
59 
60 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
61 {
62 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
63 
64 	amdgpu_bo_kunmap(bo);
65 
66 	if (drm_gem_is_imported(&bo->tbo.base))
67 		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
68 	drm_gem_object_release(&bo->tbo.base);
69 	amdgpu_bo_unref(&bo->parent);
70 	kvfree(bo);
71 }
72 
73 static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
74 {
75 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
76 	struct amdgpu_bo_user *ubo;
77 
78 	ubo = to_amdgpu_bo_user(bo);
79 	kfree(ubo->metadata);
80 	amdgpu_bo_destroy(tbo);
81 }
82 
83 /**
84  * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
85  * @bo: buffer object to be checked
86  *
87  * Uses destroy function associated with the object to determine if this is
88  * an &amdgpu_bo.
89  *
90  * Returns:
91  * true if the object belongs to &amdgpu_bo, false if not.
92  */
93 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
94 {
95 	if (bo->destroy == &amdgpu_bo_destroy ||
96 	    bo->destroy == &amdgpu_bo_user_destroy)
97 		return true;
98 
99 	return false;
100 }
101 
102 /**
103  * amdgpu_bo_placement_from_domain - set buffer's placement
104  * @abo: &amdgpu_bo buffer object whose placement is to be set
105  * @domain: requested domain
106  *
107  * Sets buffer's placement according to requested domain and the buffer's
108  * flags.
109  */
110 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
111 {
112 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
113 	struct ttm_placement *placement = &abo->placement;
114 	struct ttm_place *places = abo->placements;
115 	u64 flags = abo->flags;
116 	u32 c = 0;
117 
118 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
119 		unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
120 		int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
121 
122 		if (adev->gmc.mem_partitions && mem_id >= 0) {
123 			places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn;
124 			/*
125 			 * memory partition range lpfn is inclusive start + size - 1
126 			 * TTM place lpfn is exclusive start + size
127 			 */
128 			places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1;
129 		} else {
130 			places[c].fpfn = 0;
131 			places[c].lpfn = 0;
132 		}
133 		places[c].mem_type = TTM_PL_VRAM;
134 		places[c].flags = 0;
135 
136 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
137 			places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn);
138 		else
139 			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
140 
141 		if (abo->tbo.type == ttm_bo_type_kernel &&
142 		    flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
143 			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
144 
145 		c++;
146 	}
147 
148 	if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) {
149 		places[c].fpfn = 0;
150 		places[c].lpfn = 0;
151 		places[c].mem_type = AMDGPU_PL_DOORBELL;
152 		places[c].flags = 0;
153 		c++;
154 	}
155 
156 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
157 		places[c].fpfn = 0;
158 		places[c].lpfn = 0;
159 		places[c].mem_type =
160 			abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
161 			AMDGPU_PL_PREEMPT : TTM_PL_TT;
162 		places[c].flags = 0;
163 		/*
164 		 * When GTT is just an alternative to VRAM make sure that we
165 		 * only use it as fallback and still try to fill up VRAM first.
166 		 */
167 		if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) &&
168 		    domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
169 			places[c].flags |= TTM_PL_FLAG_FALLBACK;
170 		c++;
171 	}
172 
173 	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
174 		places[c].fpfn = 0;
175 		places[c].lpfn = 0;
176 		places[c].mem_type = TTM_PL_SYSTEM;
177 		places[c].flags = 0;
178 		c++;
179 	}
180 
181 	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
182 		places[c].fpfn = 0;
183 		places[c].lpfn = 0;
184 		places[c].mem_type = AMDGPU_PL_GDS;
185 		places[c].flags = 0;
186 		c++;
187 	}
188 
189 	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
190 		places[c].fpfn = 0;
191 		places[c].lpfn = 0;
192 		places[c].mem_type = AMDGPU_PL_GWS;
193 		places[c].flags = 0;
194 		c++;
195 	}
196 
197 	if (domain & AMDGPU_GEM_DOMAIN_OA) {
198 		places[c].fpfn = 0;
199 		places[c].lpfn = 0;
200 		places[c].mem_type = AMDGPU_PL_OA;
201 		places[c].flags = 0;
202 		c++;
203 	}
204 
205 	if (!c) {
206 		places[c].fpfn = 0;
207 		places[c].lpfn = 0;
208 		places[c].mem_type = TTM_PL_SYSTEM;
209 		places[c].flags = 0;
210 		c++;
211 	}
212 
213 	BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
214 
215 	placement->num_placement = c;
216 	placement->placement = places;
217 }
218 
219 /**
220  * amdgpu_bo_create_reserved - create reserved BO for kernel use
221  *
222  * @adev: amdgpu device object
223  * @size: size for the new BO
224  * @align: alignment for the new BO
225  * @domain: where to place it
226  * @bo_ptr: used to initialize BOs in structures
227  * @gpu_addr: GPU addr of the pinned BO
228  * @cpu_addr: optional CPU address mapping
229  *
230  * Allocates and pins a BO for kernel internal use, and returns it still
231  * reserved.
232  *
233  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
234  *
235  * Returns:
236  * 0 on success, negative error code otherwise.
237  */
238 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
239 			      unsigned long size, int align,
240 			      u32 domain, struct amdgpu_bo **bo_ptr,
241 			      u64 *gpu_addr, void **cpu_addr)
242 {
243 	struct amdgpu_bo_param bp;
244 	bool free = false;
245 	int r;
246 
247 	if (!size) {
248 		amdgpu_bo_unref(bo_ptr);
249 		return 0;
250 	}
251 
252 	memset(&bp, 0, sizeof(bp));
253 	bp.size = size;
254 	bp.byte_align = align;
255 	bp.domain = domain;
256 	bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
257 		: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
258 	bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
259 	bp.type = ttm_bo_type_kernel;
260 	bp.resv = NULL;
261 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
262 
263 	if (!*bo_ptr) {
264 		r = amdgpu_bo_create(adev, &bp, bo_ptr);
265 		if (r) {
266 			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
267 				r);
268 			return r;
269 		}
270 		free = true;
271 	}
272 
273 	r = amdgpu_bo_reserve(*bo_ptr, false);
274 	if (r) {
275 		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
276 		goto error_free;
277 	}
278 
279 	r = amdgpu_bo_pin(*bo_ptr, domain);
280 	if (r) {
281 		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
282 		goto error_unreserve;
283 	}
284 
285 	r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
286 	if (r) {
287 		dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
288 		goto error_unpin;
289 	}
290 
291 	if (gpu_addr)
292 		*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
293 
294 	if (cpu_addr) {
295 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
296 		if (r) {
297 			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
298 			goto error_unpin;
299 		}
300 	}
301 
302 	return 0;
303 
304 error_unpin:
305 	amdgpu_bo_unpin(*bo_ptr);
306 error_unreserve:
307 	amdgpu_bo_unreserve(*bo_ptr);
308 
309 error_free:
310 	if (free)
311 		amdgpu_bo_unref(bo_ptr);
312 
313 	return r;
314 }
315 
316 /**
317  * amdgpu_bo_create_kernel - create BO for kernel use
318  *
319  * @adev: amdgpu device object
320  * @size: size for the new BO
321  * @align: alignment for the new BO
322  * @domain: where to place it
323  * @bo_ptr:  used to initialize BOs in structures
324  * @gpu_addr: GPU addr of the pinned BO
325  * @cpu_addr: optional CPU address mapping
326  *
327  * Allocates and pins a BO for kernel internal use.
328  *
329  * This function is exported to allow the V4L2 isp device
330  * external to drm device to create and access the kernel BO.
331  *
332  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
333  *
334  * Returns:
335  * 0 on success, negative error code otherwise.
336  */
337 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
338 			    unsigned long size, int align,
339 			    u32 domain, struct amdgpu_bo **bo_ptr,
340 			    u64 *gpu_addr, void **cpu_addr)
341 {
342 	int r;
343 
344 	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
345 				      gpu_addr, cpu_addr);
346 
347 	if (r)
348 		return r;
349 
350 	if (*bo_ptr)
351 		amdgpu_bo_unreserve(*bo_ptr);
352 
353 	return 0;
354 }
355 
356 /**
357  * amdgpu_bo_create_isp_user - create user BO for isp
358  *
359  * @adev: amdgpu device object
360  * @dma_buf: DMABUF handle for isp buffer
361  * @domain: where to place it
362  * @bo:  used to initialize BOs in structures
363  * @gpu_addr: GPU addr of the pinned BO
364  *
365  * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
366  * GART alloc to generate gpu_addr for BO to make it accessible through the
367  * GART aperture for ISP HW.
368  *
369  * This function is exported to allow the V4L2 isp device external to drm device
370  * to create and access the isp user BO.
371  *
372  * Returns:
373  * 0 on success, negative error code otherwise.
374  */
375 int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
376 			   struct dma_buf *dma_buf, u32 domain, struct amdgpu_bo **bo,
377 			   u64 *gpu_addr)
378 
379 {
380 	struct drm_gem_object *gem_obj;
381 	int r;
382 
383 	gem_obj = amdgpu_gem_prime_import(&adev->ddev, dma_buf);
384 	*bo = gem_to_amdgpu_bo(gem_obj);
385 	if (!(*bo)) {
386 		dev_err(adev->dev, "failed to get valid isp user bo\n");
387 		return -EINVAL;
388 	}
389 
390 	r = amdgpu_bo_reserve(*bo, false);
391 	if (r) {
392 		dev_err(adev->dev, "(%d) failed to reserve isp user bo\n", r);
393 		return r;
394 	}
395 
396 	r = amdgpu_bo_pin(*bo, domain);
397 	if (r) {
398 		dev_err(adev->dev, "(%d) isp user bo pin failed\n", r);
399 		goto error_unreserve;
400 	}
401 
402 	r = amdgpu_ttm_alloc_gart(&(*bo)->tbo);
403 	if (r) {
404 		dev_err(adev->dev, "%p bind failed\n", *bo);
405 		goto error_unpin;
406 	}
407 
408 	if (!WARN_ON(!gpu_addr))
409 		*gpu_addr = amdgpu_bo_gpu_offset(*bo);
410 
411 	amdgpu_bo_unreserve(*bo);
412 
413 	return 0;
414 
415 error_unpin:
416 	amdgpu_bo_unpin(*bo);
417 error_unreserve:
418 	amdgpu_bo_unreserve(*bo);
419 	amdgpu_bo_unref(bo);
420 
421 	return r;
422 }
423 
424 /**
425  * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
426  *
427  * @adev: amdgpu device object
428  * @offset: offset of the BO
429  * @size: size of the BO
430  * @bo_ptr:  used to initialize BOs in structures
431  * @cpu_addr: optional CPU address mapping
432  *
433  * Creates a kernel BO at a specific offset in VRAM.
434  *
435  * Returns:
436  * 0 on success, negative error code otherwise.
437  */
438 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
439 			       uint64_t offset, uint64_t size,
440 			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
441 {
442 	struct ttm_operation_ctx ctx = { false, false };
443 	unsigned int i;
444 	int r;
445 
446 	offset &= PAGE_MASK;
447 	size = ALIGN(size, PAGE_SIZE);
448 
449 	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
450 				      AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
451 				      cpu_addr);
452 	if (r)
453 		return r;
454 
455 	if ((*bo_ptr) == NULL)
456 		return 0;
457 
458 	/*
459 	 * Remove the original mem node and create a new one at the request
460 	 * position.
461 	 */
462 	if (cpu_addr)
463 		amdgpu_bo_kunmap(*bo_ptr);
464 
465 	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
466 
467 	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
468 		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
469 		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
470 	}
471 	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
472 			     &(*bo_ptr)->tbo.resource, &ctx);
473 	if (r)
474 		goto error;
475 
476 	if (cpu_addr) {
477 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
478 		if (r)
479 			goto error;
480 	}
481 
482 	amdgpu_bo_unreserve(*bo_ptr);
483 	return 0;
484 
485 error:
486 	amdgpu_bo_unreserve(*bo_ptr);
487 	amdgpu_bo_unref(bo_ptr);
488 	return r;
489 }
490 
491 /**
492  * amdgpu_bo_free_kernel - free BO for kernel use
493  *
494  * @bo: amdgpu BO to free
495  * @gpu_addr: pointer to where the BO's GPU memory space address was stored
496  * @cpu_addr: pointer to where the BO's CPU memory space address was stored
497  *
498  * unmaps and unpin a BO for kernel internal use.
499  *
500  * This function is exported to allow the V4L2 isp device
501  * external to drm device to free the kernel BO.
502  */
503 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
504 			   void **cpu_addr)
505 {
506 	if (*bo == NULL)
507 		return;
508 
509 	WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
510 
511 	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
512 		if (cpu_addr)
513 			amdgpu_bo_kunmap(*bo);
514 
515 		amdgpu_bo_unpin(*bo);
516 		amdgpu_bo_unreserve(*bo);
517 	}
518 	amdgpu_bo_unref(bo);
519 
520 	if (gpu_addr)
521 		*gpu_addr = 0;
522 
523 	if (cpu_addr)
524 		*cpu_addr = NULL;
525 }
526 
527 /**
528  * amdgpu_bo_free_isp_user - free BO for isp use
529  *
530  * @bo: amdgpu isp user BO to free
531  *
532  * unpin and unref BO for isp internal use.
533  *
534  * This function is exported to allow the V4L2 isp device
535  * external to drm device to free the isp user BO.
536  */
537 void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
538 {
539 	if (bo == NULL)
540 		return;
541 
542 	if (amdgpu_bo_reserve(bo, true) == 0) {
543 		amdgpu_bo_unpin(bo);
544 		amdgpu_bo_unreserve(bo);
545 	}
546 	amdgpu_bo_unref(&bo);
547 }
548 
549 /* Validate bo size is bit bigger than the request domain */
550 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
551 					  unsigned long size, u32 domain)
552 {
553 	struct ttm_resource_manager *man = NULL;
554 
555 	/*
556 	 * If GTT is part of requested domains the check must succeed to
557 	 * allow fall back to GTT.
558 	 */
559 	if (domain & AMDGPU_GEM_DOMAIN_GTT)
560 		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
561 	else if (domain & AMDGPU_GEM_DOMAIN_VRAM)
562 		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
563 	else
564 		return true;
565 
566 	if (!man) {
567 		if (domain & AMDGPU_GEM_DOMAIN_GTT)
568 			WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
569 		return false;
570 	}
571 
572 	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */
573 	if (size < man->size)
574 		return true;
575 
576 	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, man->size);
577 	return false;
578 }
579 
580 bool amdgpu_bo_support_uswc(u64 bo_flags)
581 {
582 
583 #ifdef CONFIG_X86_32
584 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
585 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
586 	 */
587 	return false;
588 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
589 	/* Don't try to enable write-combining when it can't work, or things
590 	 * may be slow
591 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
592 	 */
593 
594 #ifndef CONFIG_COMPILE_TEST
595 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
596 	 thanks to write-combining
597 #endif
598 
599 	if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
600 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
601 			      "better performance thanks to write-combining\n");
602 	return false;
603 #else
604 	/* For architectures that don't support WC memory,
605 	 * mask out the WC flag from the BO
606 	 */
607 	if (!drm_arch_can_wc_memory())
608 		return false;
609 
610 	return true;
611 #endif
612 }
613 
614 /**
615  * amdgpu_bo_create - create an &amdgpu_bo buffer object
616  * @adev: amdgpu device object
617  * @bp: parameters to be used for the buffer object
618  * @bo_ptr: pointer to the buffer object pointer
619  *
620  * Creates an &amdgpu_bo buffer object.
621  *
622  * Returns:
623  * 0 for success or a negative error code on failure.
624  */
625 int amdgpu_bo_create(struct amdgpu_device *adev,
626 			       struct amdgpu_bo_param *bp,
627 			       struct amdgpu_bo **bo_ptr)
628 {
629 	struct ttm_operation_ctx ctx = {
630 		.interruptible = (bp->type != ttm_bo_type_kernel),
631 		.no_wait_gpu = bp->no_wait_gpu,
632 		/* We opt to avoid OOM on system pages allocations */
633 		.gfp_retry_mayfail = true,
634 		.allow_res_evict = bp->type != ttm_bo_type_kernel,
635 		.resv = bp->resv
636 	};
637 	struct amdgpu_bo *bo;
638 	unsigned long page_align, size = bp->size;
639 	int r;
640 
641 	/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
642 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
643 		/* GWS and OA don't need any alignment. */
644 		page_align = bp->byte_align;
645 		size <<= PAGE_SHIFT;
646 
647 	} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
648 		/* Both size and alignment must be a multiple of 4. */
649 		page_align = ALIGN(bp->byte_align, 4);
650 		size = ALIGN(size, 4) << PAGE_SHIFT;
651 	} else {
652 		/* Memory should be aligned at least to a page size. */
653 		page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
654 		size = ALIGN(size, PAGE_SIZE);
655 	}
656 
657 	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
658 		return -ENOMEM;
659 
660 	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
661 
662 	*bo_ptr = NULL;
663 	bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
664 	if (bo == NULL)
665 		return -ENOMEM;
666 	drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
667 	bo->tbo.base.funcs = &amdgpu_gem_object_funcs;
668 	bo->vm_bo = NULL;
669 	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
670 		bp->domain;
671 	bo->allowed_domains = bo->preferred_domains;
672 	if (bp->type != ttm_bo_type_kernel &&
673 	    !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
674 	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
675 		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
676 
677 	bo->flags = bp->flags;
678 
679 	if (adev->gmc.mem_partitions)
680 		/* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */
681 		bo->xcp_id = bp->xcp_id_plus1 - 1;
682 	else
683 		/* For GPUs without spatial partitioning */
684 		bo->xcp_id = 0;
685 
686 	if (!amdgpu_bo_support_uswc(bo->flags))
687 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
688 
689 	bo->tbo.bdev = &adev->mman.bdev;
690 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
691 			  AMDGPU_GEM_DOMAIN_GDS))
692 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
693 	else
694 		amdgpu_bo_placement_from_domain(bo, bp->domain);
695 	if (bp->type == ttm_bo_type_kernel)
696 		bo->tbo.priority = 2;
697 	else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
698 		bo->tbo.priority = 1;
699 
700 	if (!bp->destroy)
701 		bp->destroy = &amdgpu_bo_destroy;
702 
703 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
704 				 &bo->placement, page_align, &ctx,  NULL,
705 				 bp->resv, bp->destroy);
706 	if (unlikely(r != 0))
707 		return r;
708 
709 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
710 	    amdgpu_res_cpu_visible(adev, bo->tbo.resource))
711 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
712 					     ctx.bytes_moved);
713 	else
714 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
715 
716 	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
717 	    bo->tbo.resource->mem_type == TTM_PL_VRAM) {
718 		struct dma_fence *fence;
719 
720 		r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence);
721 		if (unlikely(r))
722 			goto fail_unreserve;
723 
724 		dma_resv_add_fence(bo->tbo.base.resv, fence,
725 				   DMA_RESV_USAGE_KERNEL);
726 		dma_fence_put(fence);
727 	}
728 	if (!bp->resv)
729 		amdgpu_bo_unreserve(bo);
730 	*bo_ptr = bo;
731 
732 	trace_amdgpu_bo_create(bo);
733 
734 	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
735 	if (bp->type == ttm_bo_type_device)
736 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
737 
738 	return 0;
739 
740 fail_unreserve:
741 	if (!bp->resv)
742 		dma_resv_unlock(bo->tbo.base.resv);
743 	amdgpu_bo_unref(&bo);
744 	return r;
745 }
746 
747 /**
748  * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
749  * @adev: amdgpu device object
750  * @bp: parameters to be used for the buffer object
751  * @ubo_ptr: pointer to the buffer object pointer
752  *
753  * Create a BO to be used by user application;
754  *
755  * Returns:
756  * 0 for success or a negative error code on failure.
757  */
758 
759 int amdgpu_bo_create_user(struct amdgpu_device *adev,
760 			  struct amdgpu_bo_param *bp,
761 			  struct amdgpu_bo_user **ubo_ptr)
762 {
763 	struct amdgpu_bo *bo_ptr;
764 	int r;
765 
766 	bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
767 	bp->destroy = &amdgpu_bo_user_destroy;
768 	r = amdgpu_bo_create(adev, bp, &bo_ptr);
769 	if (r)
770 		return r;
771 
772 	*ubo_ptr = to_amdgpu_bo_user(bo_ptr);
773 	return r;
774 }
775 
776 /**
777  * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
778  * @adev: amdgpu device object
779  * @bp: parameters to be used for the buffer object
780  * @vmbo_ptr: pointer to the buffer object pointer
781  *
782  * Create a BO to be for GPUVM.
783  *
784  * Returns:
785  * 0 for success or a negative error code on failure.
786  */
787 
788 int amdgpu_bo_create_vm(struct amdgpu_device *adev,
789 			struct amdgpu_bo_param *bp,
790 			struct amdgpu_bo_vm **vmbo_ptr)
791 {
792 	struct amdgpu_bo *bo_ptr;
793 	int r;
794 
795 	/* bo_ptr_size will be determined by the caller and it depends on
796 	 * num of amdgpu_vm_pt entries.
797 	 */
798 	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
799 	r = amdgpu_bo_create(adev, bp, &bo_ptr);
800 	if (r)
801 		return r;
802 
803 	*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
804 	return r;
805 }
806 
807 /**
808  * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
809  * @bo: &amdgpu_bo buffer object to be mapped
810  * @ptr: kernel virtual address to be returned
811  *
812  * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
813  * amdgpu_bo_kptr() to get the kernel virtual address.
814  *
815  * Returns:
816  * 0 for success or a negative error code on failure.
817  */
818 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
819 {
820 	void *kptr;
821 	long r;
822 
823 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
824 		return -EPERM;
825 
826 	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
827 				  false, MAX_SCHEDULE_TIMEOUT);
828 	if (r < 0)
829 		return r;
830 
831 	kptr = amdgpu_bo_kptr(bo);
832 	if (kptr) {
833 		if (ptr)
834 			*ptr = kptr;
835 		return 0;
836 	}
837 
838 	r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
839 	if (r)
840 		return r;
841 
842 	if (ptr)
843 		*ptr = amdgpu_bo_kptr(bo);
844 
845 	return 0;
846 }
847 
848 /**
849  * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
850  * @bo: &amdgpu_bo buffer object
851  *
852  * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
853  *
854  * Returns:
855  * the virtual address of a buffer object area.
856  */
857 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
858 {
859 	bool is_iomem;
860 
861 	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
862 }
863 
864 /**
865  * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
866  * @bo: &amdgpu_bo buffer object to be unmapped
867  *
868  * Unmaps a kernel map set up by amdgpu_bo_kmap().
869  */
870 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
871 {
872 	if (bo->kmap.bo)
873 		ttm_bo_kunmap(&bo->kmap);
874 }
875 
876 /**
877  * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
878  * @bo: &amdgpu_bo buffer object
879  *
880  * References the contained &ttm_buffer_object.
881  *
882  * Returns:
883  * a refcounted pointer to the &amdgpu_bo buffer object.
884  */
885 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
886 {
887 	if (bo == NULL)
888 		return NULL;
889 
890 	drm_gem_object_get(&bo->tbo.base);
891 	return bo;
892 }
893 
894 /**
895  * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
896  * @bo: &amdgpu_bo buffer object
897  *
898  * Unreferences the contained &ttm_buffer_object and clear the pointer
899  */
900 void amdgpu_bo_unref(struct amdgpu_bo **bo)
901 {
902 	if ((*bo) == NULL)
903 		return;
904 
905 	drm_gem_object_put(&(*bo)->tbo.base);
906 	*bo = NULL;
907 }
908 
909 /**
910  * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
911  * @bo: &amdgpu_bo buffer object to be pinned
912  * @domain: domain to be pinned to
913  *
914  * Pins the buffer object according to requested domain. If the memory is
915  * unbound gart memory, binds the pages into gart table. Adjusts pin_count and
916  * pin_size accordingly.
917  *
918  * Pinning means to lock pages in memory along with keeping them at a fixed
919  * offset. It is required when a buffer can not be moved, for example, when
920  * a display buffer is being scanned out.
921  *
922  * Returns:
923  * 0 for success or a negative error code on failure.
924  */
925 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
926 {
927 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
928 	struct ttm_operation_ctx ctx = { false, false };
929 	int r, i;
930 
931 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
932 		return -EPERM;
933 
934 	/* Check domain to be pinned to against preferred domains */
935 	if (bo->preferred_domains & domain)
936 		domain = bo->preferred_domains & domain;
937 
938 	/* A shared bo cannot be migrated to VRAM */
939 	if (drm_gem_is_imported(&bo->tbo.base)) {
940 		if (domain & AMDGPU_GEM_DOMAIN_GTT)
941 			domain = AMDGPU_GEM_DOMAIN_GTT;
942 		else
943 			return -EINVAL;
944 	}
945 
946 	if (bo->tbo.pin_count) {
947 		uint32_t mem_type = bo->tbo.resource->mem_type;
948 		uint32_t mem_flags = bo->tbo.resource->placement;
949 
950 		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
951 			return -EINVAL;
952 
953 		if ((mem_type == TTM_PL_VRAM) &&
954 		    (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
955 		    !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
956 			return -EINVAL;
957 
958 		ttm_bo_pin(&bo->tbo);
959 		return 0;
960 	}
961 
962 	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
963 	 * See function amdgpu_display_supported_domains()
964 	 */
965 	domain = amdgpu_bo_get_preferred_domain(adev, domain);
966 
967 	if (drm_gem_is_imported(&bo->tbo.base))
968 		dma_buf_pin(bo->tbo.base.import_attach);
969 
970 	/* force to pin into visible video ram */
971 	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
972 		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
973 	amdgpu_bo_placement_from_domain(bo, domain);
974 	for (i = 0; i < bo->placement.num_placement; i++) {
975 		if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS &&
976 		    bo->placements[i].mem_type == TTM_PL_VRAM)
977 			bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
978 	}
979 
980 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
981 	if (unlikely(r)) {
982 		dev_err(adev->dev, "%p pin failed\n", bo);
983 		goto error;
984 	}
985 
986 	ttm_bo_pin(&bo->tbo);
987 
988 	if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
989 		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
990 		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
991 			     &adev->visible_pin_size);
992 	} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
993 		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
994 	}
995 
996 error:
997 	return r;
998 }
999 
1000 /**
1001  * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
1002  * @bo: &amdgpu_bo buffer object to be unpinned
1003  *
1004  * Decreases the pin_count, and clears the flags if pin_count reaches 0.
1005  * Changes placement and pin size accordingly.
1006  *
1007  * Returns:
1008  * 0 for success or a negative error code on failure.
1009  */
1010 void amdgpu_bo_unpin(struct amdgpu_bo *bo)
1011 {
1012 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1013 
1014 	ttm_bo_unpin(&bo->tbo);
1015 	if (bo->tbo.pin_count)
1016 		return;
1017 
1018 	if (drm_gem_is_imported(&bo->tbo.base))
1019 		dma_buf_unpin(bo->tbo.base.import_attach);
1020 
1021 	if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
1022 		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
1023 		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
1024 			     &adev->visible_pin_size);
1025 	} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
1026 		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
1027 	}
1028 
1029 }
1030 
1031 static const char * const amdgpu_vram_names[] = {
1032 	"UNKNOWN",
1033 	"GDDR1",
1034 	"DDR2",
1035 	"GDDR3",
1036 	"GDDR4",
1037 	"GDDR5",
1038 	"HBM",
1039 	"DDR3",
1040 	"DDR4",
1041 	"GDDR6",
1042 	"DDR5",
1043 	"LPDDR4",
1044 	"LPDDR5",
1045 	"HBM3E",
1046 	"HBM4"
1047 };
1048 
1049 /**
1050  * amdgpu_bo_init - initialize memory manager
1051  * @adev: amdgpu device object
1052  *
1053  * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1054  *
1055  * Returns:
1056  * 0 for success or a negative error code on failure.
1057  */
1058 int amdgpu_bo_init(struct amdgpu_device *adev)
1059 {
1060 	/* On A+A platform, VRAM can be mapped as WB */
1061 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
1062 		/* reserve PAT memory space to WC for VRAM */
1063 		int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1064 				adev->gmc.aper_size);
1065 
1066 		if (r) {
1067 			DRM_ERROR("Unable to set WC memtype for the aperture base\n");
1068 			return r;
1069 		}
1070 
1071 		/* Add an MTRR for the VRAM */
1072 		adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1073 				adev->gmc.aper_size);
1074 	}
1075 
1076 	drm_info(adev_to_drm(adev), "Detected VRAM RAM=%lluM, BAR=%lluM\n",
1077 		 adev->gmc.mc_vram_size >> 20,
1078 		 (unsigned long long)adev->gmc.aper_size >> 20);
1079 	drm_info(adev_to_drm(adev), "RAM width %dbits %s\n",
1080 		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1081 	return amdgpu_ttm_init(adev);
1082 }
1083 
1084 /**
1085  * amdgpu_bo_fini - tear down memory manager
1086  * @adev: amdgpu device object
1087  *
1088  * Reverses amdgpu_bo_init() to tear down memory manager.
1089  */
1090 void amdgpu_bo_fini(struct amdgpu_device *adev)
1091 {
1092 	int idx;
1093 
1094 	amdgpu_ttm_fini(adev);
1095 
1096 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
1097 		if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
1098 			arch_phys_wc_del(adev->gmc.vram_mtrr);
1099 			arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1100 		}
1101 		drm_dev_exit(idx);
1102 	}
1103 }
1104 
1105 /**
1106  * amdgpu_bo_set_tiling_flags - set tiling flags
1107  * @bo: &amdgpu_bo buffer object
1108  * @tiling_flags: new flags
1109  *
1110  * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1111  * kernel driver to set the tiling flags on a buffer.
1112  *
1113  * Returns:
1114  * 0 for success or a negative error code on failure.
1115  */
1116 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1117 {
1118 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1119 	struct amdgpu_bo_user *ubo;
1120 
1121 	/* MMIO_REMAP is BAR I/O space; tiling should never be used here. */
1122 	WARN_ON_ONCE(bo->tbo.resource &&
1123 		     bo->tbo.resource->mem_type == AMDGPU_PL_MMIO_REMAP);
1124 
1125 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1126 	if (adev->family <= AMDGPU_FAMILY_CZ &&
1127 	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1128 		return -EINVAL;
1129 
1130 	ubo = to_amdgpu_bo_user(bo);
1131 	ubo->tiling_flags = tiling_flags;
1132 	return 0;
1133 }
1134 
1135 /**
1136  * amdgpu_bo_get_tiling_flags - get tiling flags
1137  * @bo: &amdgpu_bo buffer object
1138  * @tiling_flags: returned flags
1139  *
1140  * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1141  * set the tiling flags on a buffer.
1142  */
1143 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1144 {
1145 	struct amdgpu_bo_user *ubo;
1146 
1147 	/*
1148 	 * MMIO_REMAP BOs are not real VRAM/GTT memory but a fixed BAR I/O window.
1149 	 * They should never go through GEM tiling helpers.
1150 	 */
1151 	WARN_ON_ONCE(bo->tbo.resource &&
1152 		     bo->tbo.resource->mem_type == AMDGPU_PL_MMIO_REMAP);
1153 
1154 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1155 	dma_resv_assert_held(bo->tbo.base.resv);
1156 	ubo = to_amdgpu_bo_user(bo);
1157 
1158 	if (tiling_flags)
1159 		*tiling_flags = ubo->tiling_flags;
1160 }
1161 
1162 /**
1163  * amdgpu_bo_set_metadata - set metadata
1164  * @bo: &amdgpu_bo buffer object
1165  * @metadata: new metadata
1166  * @metadata_size: size of the new metadata
1167  * @flags: flags of the new metadata
1168  *
1169  * Sets buffer object's metadata, its size and flags.
1170  * Used via GEM ioctl.
1171  *
1172  * Returns:
1173  * 0 for success or a negative error code on failure.
1174  */
1175 int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata,
1176 			   u32 metadata_size, uint64_t flags)
1177 {
1178 	struct amdgpu_bo_user *ubo;
1179 	void *buffer;
1180 
1181 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1182 	ubo = to_amdgpu_bo_user(bo);
1183 	if (!metadata_size) {
1184 		if (ubo->metadata_size) {
1185 			kfree(ubo->metadata);
1186 			ubo->metadata = NULL;
1187 			ubo->metadata_size = 0;
1188 		}
1189 		return 0;
1190 	}
1191 
1192 	if (metadata == NULL)
1193 		return -EINVAL;
1194 
1195 	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1196 	if (buffer == NULL)
1197 		return -ENOMEM;
1198 
1199 	kfree(ubo->metadata);
1200 	ubo->metadata_flags = flags;
1201 	ubo->metadata = buffer;
1202 	ubo->metadata_size = metadata_size;
1203 
1204 	return 0;
1205 }
1206 
1207 /**
1208  * amdgpu_bo_get_metadata - get metadata
1209  * @bo: &amdgpu_bo buffer object
1210  * @buffer: returned metadata
1211  * @buffer_size: size of the buffer
1212  * @metadata_size: size of the returned metadata
1213  * @flags: flags of the returned metadata
1214  *
1215  * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1216  * less than metadata_size.
1217  * Used via GEM ioctl.
1218  *
1219  * Returns:
1220  * 0 for success or a negative error code on failure.
1221  */
1222 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1223 			   size_t buffer_size, uint32_t *metadata_size,
1224 			   uint64_t *flags)
1225 {
1226 	struct amdgpu_bo_user *ubo;
1227 
1228 	if (!buffer && !metadata_size)
1229 		return -EINVAL;
1230 
1231 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1232 	ubo = to_amdgpu_bo_user(bo);
1233 	if (metadata_size)
1234 		*metadata_size = ubo->metadata_size;
1235 
1236 	if (buffer) {
1237 		if (buffer_size < ubo->metadata_size)
1238 			return -EINVAL;
1239 
1240 		if (ubo->metadata_size)
1241 			memcpy(buffer, ubo->metadata, ubo->metadata_size);
1242 	}
1243 
1244 	if (flags)
1245 		*flags = ubo->metadata_flags;
1246 
1247 	return 0;
1248 }
1249 
1250 /**
1251  * amdgpu_bo_move_notify - notification about a memory move
1252  * @bo: pointer to a buffer object
1253  * @evict: if this move is evicting the buffer from the graphics address space
1254  * @new_mem: new resource for backing the BO
1255  *
1256  * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1257  * bookkeeping.
1258  * TTM driver callback which is called when ttm moves a buffer.
1259  */
1260 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1261 			   bool evict,
1262 			   struct ttm_resource *new_mem)
1263 {
1264 	struct ttm_resource *old_mem = bo->resource;
1265 	struct amdgpu_bo *abo;
1266 
1267 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1268 		return;
1269 
1270 	abo = ttm_to_amdgpu_bo(bo);
1271 	amdgpu_vm_bo_move(abo, new_mem, evict);
1272 
1273 	amdgpu_bo_kunmap(abo);
1274 
1275 	if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) &&
1276 	    old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
1277 		dma_buf_move_notify(abo->tbo.base.dma_buf);
1278 
1279 	/* move_notify is called before move happens */
1280 	trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
1281 			     old_mem ? old_mem->mem_type : -1);
1282 }
1283 
1284 /**
1285  * amdgpu_bo_release_notify - notification about a BO being released
1286  * @bo: pointer to a buffer object
1287  *
1288  * Wipes VRAM buffers whose contents should not be leaked before the
1289  * memory is released.
1290  */
1291 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1292 {
1293 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1294 	struct dma_fence *fence = NULL;
1295 	struct amdgpu_bo *abo;
1296 	int r;
1297 
1298 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1299 		return;
1300 
1301 	abo = ttm_to_amdgpu_bo(bo);
1302 
1303 	WARN_ON(abo->vm_bo);
1304 
1305 	if (abo->kfd_bo)
1306 		amdgpu_amdkfd_release_notify(abo);
1307 
1308 	/*
1309 	 * We lock the private dma_resv object here and since the BO is about to
1310 	 * be released nobody else should have a pointer to it.
1311 	 * So when this locking here fails something is wrong with the reference
1312 	 * counting.
1313 	 */
1314 	if (WARN_ON_ONCE(!dma_resv_trylock(&bo->base._resv)))
1315 		return;
1316 
1317 	amdgpu_amdkfd_remove_all_eviction_fences(abo);
1318 
1319 	if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
1320 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
1321 	    adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
1322 		goto out;
1323 
1324 	r = dma_resv_reserve_fences(&bo->base._resv, 1);
1325 	if (r)
1326 		goto out;
1327 
1328 	r = amdgpu_fill_buffer(&adev->mman.clear_entity, abo, 0, &bo->base._resv,
1329 			       &fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
1330 	if (WARN_ON(r))
1331 		goto out;
1332 
1333 	amdgpu_vram_mgr_set_cleared(bo->resource);
1334 	dma_resv_add_fence(&bo->base._resv, fence, DMA_RESV_USAGE_KERNEL);
1335 	dma_fence_put(fence);
1336 
1337 out:
1338 	dma_resv_unlock(&bo->base._resv);
1339 }
1340 
1341 /**
1342  * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1343  * @bo: pointer to a buffer object
1344  *
1345  * Notifies the driver we are taking a fault on this BO and have reserved it,
1346  * also performs bookkeeping.
1347  * TTM driver callback for dealing with vm faults.
1348  *
1349  * Returns:
1350  * 0 for success or a negative error code on failure.
1351  */
1352 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1353 {
1354 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1355 	struct ttm_operation_ctx ctx = { false, false };
1356 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1357 	int r;
1358 
1359 	/* Remember that this BO was accessed by the CPU */
1360 	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1361 
1362 	if (amdgpu_res_cpu_visible(adev, bo->resource))
1363 		return 0;
1364 
1365 	/* Can't move a pinned BO to visible VRAM */
1366 	if (abo->tbo.pin_count > 0)
1367 		return VM_FAULT_SIGBUS;
1368 
1369 	/* hurrah the memory is not visible ! */
1370 	atomic64_inc(&adev->num_vram_cpu_page_faults);
1371 	amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1372 					AMDGPU_GEM_DOMAIN_GTT);
1373 
1374 	/* Avoid costly evictions; only set GTT as a busy placement */
1375 	abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
1376 
1377 	r = ttm_bo_validate(bo, &abo->placement, &ctx);
1378 	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1379 		return VM_FAULT_NOPAGE;
1380 	else if (unlikely(r))
1381 		return VM_FAULT_SIGBUS;
1382 
1383 	/* this should never happen */
1384 	if (bo->resource->mem_type == TTM_PL_VRAM &&
1385 	    !amdgpu_res_cpu_visible(adev, bo->resource))
1386 		return VM_FAULT_SIGBUS;
1387 
1388 	ttm_bo_move_to_lru_tail_unlocked(bo);
1389 	return 0;
1390 }
1391 
1392 /**
1393  * amdgpu_bo_fence - add fence to buffer object
1394  *
1395  * @bo: buffer object in question
1396  * @fence: fence to add
1397  * @shared: true if fence should be added shared
1398  *
1399  */
1400 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1401 		     bool shared)
1402 {
1403 	struct dma_resv *resv = bo->tbo.base.resv;
1404 	int r;
1405 
1406 	r = dma_resv_reserve_fences(resv, 1);
1407 	if (r) {
1408 		/* As last resort on OOM we block for the fence */
1409 		dma_fence_wait(fence, false);
1410 		return;
1411 	}
1412 
1413 	dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
1414 			   DMA_RESV_USAGE_WRITE);
1415 }
1416 
1417 /**
1418  * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1419  *
1420  * @adev: amdgpu device pointer
1421  * @resv: reservation object to sync to
1422  * @sync_mode: synchronization mode
1423  * @owner: fence owner
1424  * @intr: Whether the wait is interruptible
1425  *
1426  * Extract the fences from the reservation object and waits for them to finish.
1427  *
1428  * Returns:
1429  * 0 on success, errno otherwise.
1430  */
1431 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1432 			     enum amdgpu_sync_mode sync_mode, void *owner,
1433 			     bool intr)
1434 {
1435 	struct amdgpu_sync sync;
1436 	int r;
1437 
1438 	amdgpu_sync_create(&sync);
1439 	amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1440 	r = amdgpu_sync_wait(&sync, intr);
1441 	amdgpu_sync_free(&sync);
1442 	return r;
1443 }
1444 
1445 /**
1446  * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1447  * @bo: buffer object to wait for
1448  * @owner: fence owner
1449  * @intr: Whether the wait is interruptible
1450  *
1451  * Wrapper to wait for fences in a BO.
1452  * Returns:
1453  * 0 on success, errno otherwise.
1454  */
1455 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1456 {
1457 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1458 
1459 	return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1460 					AMDGPU_SYNC_NE_OWNER, owner, intr);
1461 }
1462 
1463 /**
1464  * amdgpu_bo_gpu_offset - return GPU offset of bo
1465  * @bo:	amdgpu object for which we query the offset
1466  *
1467  * Note: object should either be pinned or reserved when calling this
1468  * function, it might be useful to add check for this for debugging.
1469  *
1470  * Returns:
1471  * current GPU offset of the object.
1472  */
1473 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1474 {
1475 	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
1476 	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1477 		     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1478 	WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
1479 	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
1480 		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1481 
1482 	return amdgpu_bo_gpu_offset_no_check(bo);
1483 }
1484 
1485 /**
1486  * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo
1487  * @bo:	amdgpu VRAM buffer object for which we query the offset
1488  *
1489  * Returns:
1490  * current FB aperture GPU offset of the object.
1491  */
1492 u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo)
1493 {
1494 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1495 	uint64_t offset, fb_base;
1496 
1497 	WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM);
1498 
1499 	fb_base = adev->gmc.fb_start;
1500 	fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1501 	offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base;
1502 	return amdgpu_gmc_sign_extend(offset);
1503 }
1504 
1505 /**
1506  * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1507  * @bo:	amdgpu object for which we query the offset
1508  *
1509  * Returns:
1510  * current GPU offset of the object without raising warnings.
1511  */
1512 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1513 {
1514 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1515 	uint64_t offset = AMDGPU_BO_INVALID_OFFSET;
1516 
1517 	if (bo->tbo.resource->mem_type == TTM_PL_TT)
1518 		offset = amdgpu_gmc_agp_addr(&bo->tbo);
1519 
1520 	if (offset == AMDGPU_BO_INVALID_OFFSET)
1521 		offset = (bo->tbo.resource->start << PAGE_SHIFT) +
1522 			amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
1523 
1524 	return amdgpu_gmc_sign_extend(offset);
1525 }
1526 
1527 /**
1528  * amdgpu_bo_mem_stats_placement - bo placement for memory accounting
1529  * @bo:	the buffer object we should look at
1530  *
1531  * BO can have multiple preferred placements, to avoid double counting we want
1532  * to file it under a single placement for memory stats.
1533  * Luckily, if we take the highest set bit in preferred_domains the result is
1534  * quite sensible.
1535  *
1536  * Returns:
1537  * Which of the placements should the BO be accounted under.
1538  */
1539 uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
1540 {
1541 	u32 domain;
1542 
1543 	/*
1544 	 * MMIO_REMAP is internal now, so it no longer maps from a userspace
1545 	 * domain bit. Keep fdinfo/mem-stats visibility by checking the actual
1546 	 * TTM placement.
1547 	 */
1548 	if (bo->tbo.resource && bo->tbo.resource->mem_type == AMDGPU_PL_MMIO_REMAP)
1549 		return AMDGPU_PL_MMIO_REMAP;
1550 
1551 	domain = bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK;
1552 	if (!domain)
1553 		return TTM_PL_SYSTEM;
1554 
1555 	switch (rounddown_pow_of_two(domain)) {
1556 	case AMDGPU_GEM_DOMAIN_CPU:
1557 		return TTM_PL_SYSTEM;
1558 	case AMDGPU_GEM_DOMAIN_GTT:
1559 		return TTM_PL_TT;
1560 	case AMDGPU_GEM_DOMAIN_VRAM:
1561 		return TTM_PL_VRAM;
1562 	case AMDGPU_GEM_DOMAIN_GDS:
1563 		return AMDGPU_PL_GDS;
1564 	case AMDGPU_GEM_DOMAIN_GWS:
1565 		return AMDGPU_PL_GWS;
1566 	case AMDGPU_GEM_DOMAIN_OA:
1567 		return AMDGPU_PL_OA;
1568 	case AMDGPU_GEM_DOMAIN_DOORBELL:
1569 		return AMDGPU_PL_DOORBELL;
1570 	default:
1571 		return TTM_PL_SYSTEM;
1572 	}
1573 }
1574 
1575 /**
1576  * amdgpu_bo_get_preferred_domain - get preferred domain
1577  * @adev: amdgpu device object
1578  * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1579  *
1580  * Returns:
1581  * Which of the allowed domains is preferred for allocating the BO.
1582  */
1583 uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
1584 					    uint32_t domain)
1585 {
1586 	if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
1587 	    ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
1588 		domain = AMDGPU_GEM_DOMAIN_VRAM;
1589 		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1590 			domain = AMDGPU_GEM_DOMAIN_GTT;
1591 	}
1592 	return domain;
1593 }
1594 
1595 #if defined(CONFIG_DEBUG_FS)
1596 #define amdgpu_bo_print_flag(m, bo, flag)		        \
1597 	do {							\
1598 		if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
1599 			seq_printf((m), " " #flag);		\
1600 		}						\
1601 	} while (0)
1602 
1603 /**
1604  * amdgpu_bo_print_info - print BO info in debugfs file
1605  *
1606  * @id: Index or Id of the BO
1607  * @bo: Requested BO for printing info
1608  * @m: debugfs file
1609  *
1610  * Print BO information in debugfs file
1611  *
1612  * Returns:
1613  * Size of the BO in bytes.
1614  */
1615 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1616 {
1617 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1618 	struct dma_buf_attachment *attachment;
1619 	struct dma_buf *dma_buf;
1620 	const char *placement;
1621 	unsigned int pin_count;
1622 	u64 size;
1623 
1624 	if (dma_resv_trylock(bo->tbo.base.resv)) {
1625 		if (!bo->tbo.resource) {
1626 			placement = "NONE";
1627 		} else {
1628 			switch (bo->tbo.resource->mem_type) {
1629 			case TTM_PL_VRAM:
1630 				if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
1631 					placement = "VRAM VISIBLE";
1632 				else
1633 					placement = "VRAM";
1634 				break;
1635 			case TTM_PL_TT:
1636 				placement = "GTT";
1637 				break;
1638 			case AMDGPU_PL_GDS:
1639 				placement = "GDS";
1640 				break;
1641 			case AMDGPU_PL_GWS:
1642 				placement = "GWS";
1643 				break;
1644 			case AMDGPU_PL_OA:
1645 				placement = "OA";
1646 				break;
1647 			case AMDGPU_PL_PREEMPT:
1648 				placement = "PREEMPTIBLE";
1649 				break;
1650 			case AMDGPU_PL_DOORBELL:
1651 				placement = "DOORBELL";
1652 				break;
1653 			case AMDGPU_PL_MMIO_REMAP:
1654 				placement = "MMIO REMAP";
1655 				break;
1656 			case TTM_PL_SYSTEM:
1657 			default:
1658 				placement = "CPU";
1659 				break;
1660 			}
1661 		}
1662 		dma_resv_unlock(bo->tbo.base.resv);
1663 	} else {
1664 		placement = "UNKNOWN";
1665 	}
1666 
1667 	size = amdgpu_bo_size(bo);
1668 	seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1669 			id, size, placement);
1670 
1671 	pin_count = READ_ONCE(bo->tbo.pin_count);
1672 	if (pin_count)
1673 		seq_printf(m, " pin count %d", pin_count);
1674 
1675 	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1676 	attachment = READ_ONCE(bo->tbo.base.import_attach);
1677 
1678 	if (attachment)
1679 		seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino);
1680 	else if (dma_buf)
1681 		seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino);
1682 
1683 	amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1684 	amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1685 	amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1686 	amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1687 	amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1688 	amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1689 	amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1690 	/* Add the gem obj resv fence dump*/
1691 	if (dma_resv_trylock(bo->tbo.base.resv)) {
1692 		dma_resv_describe(bo->tbo.base.resv, m);
1693 		dma_resv_unlock(bo->tbo.base.resv);
1694 	}
1695 	seq_puts(m, "\n");
1696 
1697 	return size;
1698 }
1699 #endif
1700