xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c (revision b96150a70696582e1e49dcdefb2d101c109610d7)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched/task.h>
37 #include <linux/sched/mm.h>
38 #include <linux/seq_file.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/dma-buf.h>
42 #include <linux/sizes.h>
43 #include <linux/module.h>
44 
45 #include <drm/drm_drv.h>
46 #include <drm/ttm/ttm_bo.h>
47 #include <drm/ttm/ttm_placement.h>
48 #include <drm/ttm/ttm_range_manager.h>
49 #include <drm/ttm/ttm_tt.h>
50 
51 #include <drm/amdgpu_drm.h>
52 
53 #include "amdgpu.h"
54 #include "amdgpu_object.h"
55 #include "amdgpu_trace.h"
56 #include "amdgpu_amdkfd.h"
57 #include "amdgpu_sdma.h"
58 #include "amdgpu_ras.h"
59 #include "amdgpu_hmm.h"
60 #include "amdgpu_atomfirmware.h"
61 #include "amdgpu_res_cursor.h"
62 #include "bif/bif_4_1_d.h"
63 
64 MODULE_IMPORT_NS("DMA_BUF");
65 
66 #define AMDGPU_TTM_VRAM_MAX_DW_READ	((size_t)128)
67 
68 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
69 				   struct ttm_tt *ttm,
70 				   struct ttm_resource *bo_mem);
71 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
72 				      struct ttm_tt *ttm);
73 
74 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
75 				    unsigned int type,
76 				    uint64_t size_in_page)
77 {
78 	return ttm_range_man_init(&adev->mman.bdev, type,
79 				  false, size_in_page);
80 }
81 
82 /**
83  * amdgpu_evict_flags - Compute placement flags
84  *
85  * @bo: The buffer object to evict
86  * @placement: Possible destination(s) for evicted BO
87  *
88  * Fill in placement data when ttm_bo_evict() is called
89  */
90 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
91 				struct ttm_placement *placement)
92 {
93 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
94 	struct amdgpu_bo *abo;
95 	static const struct ttm_place placements = {
96 		.fpfn = 0,
97 		.lpfn = 0,
98 		.mem_type = TTM_PL_SYSTEM,
99 		.flags = 0
100 	};
101 
102 	/* Don't handle scatter gather BOs */
103 	if (bo->type == ttm_bo_type_sg) {
104 		placement->num_placement = 0;
105 		return;
106 	}
107 
108 	/* Object isn't an AMDGPU object so ignore */
109 	if (!amdgpu_bo_is_amdgpu_bo(bo)) {
110 		placement->placement = &placements;
111 		placement->num_placement = 1;
112 		return;
113 	}
114 
115 	abo = ttm_to_amdgpu_bo(bo);
116 	if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
117 		placement->num_placement = 0;
118 		return;
119 	}
120 
121 	switch (bo->resource->mem_type) {
122 	case AMDGPU_PL_GDS:
123 	case AMDGPU_PL_GWS:
124 	case AMDGPU_PL_OA:
125 	case AMDGPU_PL_DOORBELL:
126 	case AMDGPU_PL_MMIO_REMAP:
127 		placement->num_placement = 0;
128 		return;
129 
130 	case TTM_PL_VRAM:
131 		if (!adev->mman.buffer_funcs_enabled) {
132 			/* Move to system memory */
133 			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
134 
135 		} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
136 			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
137 			   amdgpu_res_cpu_visible(adev, bo->resource)) {
138 
139 			/* Try evicting to the CPU inaccessible part of VRAM
140 			 * first, but only set GTT as busy placement, so this
141 			 * BO will be evicted to GTT rather than causing other
142 			 * BOs to be evicted from VRAM
143 			 */
144 			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
145 							AMDGPU_GEM_DOMAIN_GTT |
146 							AMDGPU_GEM_DOMAIN_CPU);
147 			abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
148 			abo->placements[0].lpfn = 0;
149 			abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
150 		} else {
151 			/* Move to GTT memory */
152 			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
153 							AMDGPU_GEM_DOMAIN_CPU);
154 		}
155 		break;
156 	case TTM_PL_TT:
157 	case AMDGPU_PL_PREEMPT:
158 	default:
159 		amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
160 		break;
161 	}
162 	*placement = abo->placement;
163 }
164 
165 static struct dma_fence *
166 amdgpu_ttm_job_submit(struct amdgpu_device *adev, struct amdgpu_ttm_buffer_entity *entity,
167 		      struct amdgpu_job *job, u32 num_dw)
168 {
169 	struct amdgpu_ring *ring;
170 
171 	ring = adev->mman.buffer_funcs_ring;
172 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
173 	WARN_ON(job->ibs[0].length_dw > num_dw);
174 
175 	lockdep_assert_held(&entity->lock);
176 
177 	return amdgpu_job_submit(job);
178 }
179 
180 /**
181  * amdgpu_ttm_map_buffer - Map memory into the GART windows
182  * @entity: entity to run the window setup job
183  * @bo: buffer object to map
184  * @mem: memory object to map
185  * @mm_cur: range to map
186  * @window: which GART window to use
187  * @tmz: if we should setup a TMZ enabled mapping
188  * @size: in number of bytes to map, out number of bytes mapped
189  * @addr: resulting address inside the MC address space
190  *
191  * Setup one of the GART windows to access a specific piece of memory or return
192  * the physical address for local memory.
193  */
194 static int amdgpu_ttm_map_buffer(struct amdgpu_ttm_buffer_entity *entity,
195 				 struct ttm_buffer_object *bo,
196 				 struct ttm_resource *mem,
197 				 struct amdgpu_res_cursor *mm_cur,
198 				 unsigned int window,
199 				 bool tmz, uint64_t *size, uint64_t *addr)
200 {
201 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
202 	unsigned int offset, num_pages, num_dw, num_bytes;
203 	uint64_t src_addr, dst_addr;
204 	struct amdgpu_job *job;
205 	void *cpu_addr;
206 	uint64_t flags;
207 	int r;
208 
209 	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
210 	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
211 
212 	if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
213 		return -EINVAL;
214 
215 	/* Map only what can't be accessed directly */
216 	if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
217 		*addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
218 			mm_cur->start;
219 		return 0;
220 	}
221 
222 
223 	/*
224 	 * If start begins at an offset inside the page, then adjust the size
225 	 * and addr accordingly
226 	 */
227 	offset = mm_cur->start & ~PAGE_MASK;
228 
229 	num_pages = PFN_UP(*size + offset);
230 	num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
231 
232 	*size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
233 
234 	*addr = amdgpu_compute_gart_address(&adev->gmc, entity, window);
235 	*addr += offset;
236 
237 	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
238 	num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
239 
240 	r = amdgpu_job_alloc_with_ib(adev, &entity->base,
241 				     AMDGPU_FENCE_OWNER_UNDEFINED,
242 				     num_dw * 4 + num_bytes,
243 				     AMDGPU_IB_POOL_DELAYED, &job,
244 				     AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
245 	if (r)
246 		return r;
247 
248 	src_addr = num_dw * 4;
249 	src_addr += job->ibs[0].gpu_addr;
250 
251 	dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
252 	dst_addr += (entity->gart_window_offs[window] >> AMDGPU_GPU_PAGE_SHIFT) * 8;
253 	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
254 				dst_addr, num_bytes, 0);
255 
256 	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
257 	if (tmz)
258 		flags |= AMDGPU_PTE_TMZ;
259 
260 	cpu_addr = &job->ibs[0].ptr[num_dw];
261 
262 	if (mem->mem_type == TTM_PL_TT) {
263 		dma_addr_t *dma_addr;
264 
265 		dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
266 		amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
267 	} else {
268 		u64 pa = mm_cur->start + adev->vm_manager.vram_base_offset;
269 
270 		amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr);
271 	}
272 
273 	dma_fence_put(amdgpu_ttm_job_submit(adev, entity, job, num_dw));
274 	return 0;
275 }
276 
277 /**
278  * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
279  * @adev: amdgpu device
280  * @entity: entity to run the jobs
281  * @src: buffer/address where to read from
282  * @dst: buffer/address where to write to
283  * @size: number of bytes to copy
284  * @tmz: if a secure copy should be used
285  * @resv: resv object to sync to
286  * @f: Returns the last fence if multiple jobs are submitted.
287  *
288  * The function copies @size bytes from {src->mem + src->offset} to
289  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
290  * move and different for a BO to BO copy.
291  *
292  */
293 __attribute__((nonnull))
294 static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
295 				      struct amdgpu_ttm_buffer_entity *entity,
296 				      const struct amdgpu_copy_mem *src,
297 				      const struct amdgpu_copy_mem *dst,
298 				      uint64_t size, bool tmz,
299 				      struct dma_resv *resv,
300 				      struct dma_fence **f)
301 {
302 	struct amdgpu_res_cursor src_mm, dst_mm;
303 	struct dma_fence *fence = NULL;
304 	int r = 0;
305 	uint32_t copy_flags = 0;
306 	struct amdgpu_bo *abo_src, *abo_dst;
307 
308 	if (!adev->mman.buffer_funcs_enabled) {
309 		dev_err(adev->dev,
310 			"Trying to move memory with ring turned off.\n");
311 		return -EINVAL;
312 	}
313 
314 	amdgpu_res_first(src->mem, src->offset, size, &src_mm);
315 	amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
316 
317 	mutex_lock(&entity->lock);
318 	while (src_mm.remaining) {
319 		uint64_t from, to, cur_size, tiling_flags;
320 		uint32_t num_type, data_format, max_com, write_compress_disable;
321 		struct dma_fence *next;
322 
323 		/* Never copy more than 256MiB at once to avoid a timeout */
324 		cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
325 
326 		/* Map src to window 0 and dst to window 1. */
327 		r = amdgpu_ttm_map_buffer(entity, src->bo, src->mem, &src_mm,
328 					  0, tmz, &cur_size, &from);
329 		if (r)
330 			goto error;
331 
332 		r = amdgpu_ttm_map_buffer(entity, dst->bo, dst->mem, &dst_mm,
333 					  1, tmz, &cur_size, &to);
334 		if (r)
335 			goto error;
336 
337 		abo_src = ttm_to_amdgpu_bo(src->bo);
338 		abo_dst = ttm_to_amdgpu_bo(dst->bo);
339 		if (tmz)
340 			copy_flags |= AMDGPU_COPY_FLAGS_TMZ;
341 		if ((abo_src->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
342 		    (abo_src->tbo.resource->mem_type == TTM_PL_VRAM))
343 			copy_flags |= AMDGPU_COPY_FLAGS_READ_DECOMPRESSED;
344 		if ((abo_dst->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
345 		    (dst->mem->mem_type == TTM_PL_VRAM)) {
346 			copy_flags |= AMDGPU_COPY_FLAGS_WRITE_COMPRESSED;
347 			amdgpu_bo_get_tiling_flags(abo_dst, &tiling_flags);
348 			max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
349 			num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
350 			data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
351 			write_compress_disable =
352 				AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
353 			copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
354 				       AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
355 				       AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
356 				       AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
357 							     write_compress_disable));
358 		}
359 
360 		r = amdgpu_copy_buffer(adev, entity, from, to, cur_size, resv,
361 				       &next, true, copy_flags);
362 		if (r)
363 			goto error;
364 
365 		dma_fence_put(fence);
366 		fence = next;
367 
368 		amdgpu_res_next(&src_mm, cur_size);
369 		amdgpu_res_next(&dst_mm, cur_size);
370 	}
371 error:
372 	mutex_unlock(&entity->lock);
373 	*f = fence;
374 	return r;
375 }
376 
377 /*
378  * amdgpu_move_blit - Copy an entire buffer to another buffer
379  *
380  * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
381  * help move buffers to and from VRAM.
382  */
383 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
384 			    bool evict,
385 			    struct ttm_resource *new_mem,
386 			    struct ttm_resource *old_mem)
387 {
388 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
389 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
390 	struct amdgpu_copy_mem src, dst;
391 	struct dma_fence *fence = NULL;
392 	int r;
393 
394 	src.bo = bo;
395 	dst.bo = bo;
396 	src.mem = old_mem;
397 	dst.mem = new_mem;
398 	src.offset = 0;
399 	dst.offset = 0;
400 
401 	r = amdgpu_ttm_copy_mem_to_mem(adev,
402 				       &adev->mman.move_entity,
403 				       &src, &dst,
404 				       new_mem->size,
405 				       amdgpu_bo_encrypted(abo),
406 				       bo->base.resv, &fence);
407 	if (r)
408 		goto error;
409 
410 	/* clear the space being freed */
411 	if (old_mem->mem_type == TTM_PL_VRAM &&
412 	    (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
413 		struct dma_fence *wipe_fence = NULL;
414 
415 		r = amdgpu_fill_buffer(&adev->mman.move_entity,
416 				       abo, 0, NULL, &wipe_fence,
417 				       AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
418 		if (r) {
419 			goto error;
420 		} else if (wipe_fence) {
421 			amdgpu_vram_mgr_set_cleared(bo->resource);
422 			dma_fence_put(fence);
423 			fence = wipe_fence;
424 		}
425 	}
426 
427 	/* Always block for VM page tables before committing the new location */
428 	if (bo->type == ttm_bo_type_kernel)
429 		r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
430 	else
431 		r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
432 	dma_fence_put(fence);
433 	return r;
434 
435 error:
436 	if (fence)
437 		dma_fence_wait(fence, false);
438 	dma_fence_put(fence);
439 	return r;
440 }
441 
442 /**
443  * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
444  * @adev: amdgpu device
445  * @res: the resource to check
446  *
447  * Returns: true if the full resource is CPU visible, false otherwise.
448  */
449 bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
450 			    struct ttm_resource *res)
451 {
452 	struct amdgpu_res_cursor cursor;
453 
454 	if (!res)
455 		return false;
456 
457 	if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
458 	    res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
459 	    res->mem_type == AMDGPU_PL_MMIO_REMAP)
460 		return true;
461 
462 	if (res->mem_type != TTM_PL_VRAM)
463 		return false;
464 
465 	amdgpu_res_first(res, 0, res->size, &cursor);
466 	while (cursor.remaining) {
467 		if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size)
468 			return false;
469 		amdgpu_res_next(&cursor, cursor.size);
470 	}
471 
472 	return true;
473 }
474 
475 /*
476  * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
477  *
478  * Called by amdgpu_bo_move()
479  */
480 static bool amdgpu_res_copyable(struct amdgpu_device *adev,
481 				struct ttm_resource *mem)
482 {
483 	if (!amdgpu_res_cpu_visible(adev, mem))
484 		return false;
485 
486 	/* ttm_resource_ioremap only supports contiguous memory */
487 	if (mem->mem_type == TTM_PL_VRAM &&
488 	    !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
489 		return false;
490 
491 	return true;
492 }
493 
494 /*
495  * amdgpu_bo_move - Move a buffer object to a new memory location
496  *
497  * Called by ttm_bo_handle_move_mem()
498  */
499 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
500 			  struct ttm_operation_ctx *ctx,
501 			  struct ttm_resource *new_mem,
502 			  struct ttm_place *hop)
503 {
504 	struct amdgpu_device *adev;
505 	struct amdgpu_bo *abo;
506 	struct ttm_resource *old_mem = bo->resource;
507 	int r;
508 
509 	if (new_mem->mem_type == TTM_PL_TT ||
510 	    new_mem->mem_type == AMDGPU_PL_PREEMPT) {
511 		r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
512 		if (r)
513 			return r;
514 	}
515 
516 	abo = ttm_to_amdgpu_bo(bo);
517 	adev = amdgpu_ttm_adev(bo->bdev);
518 
519 	if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
520 			 bo->ttm == NULL)) {
521 		amdgpu_bo_move_notify(bo, evict, new_mem);
522 		ttm_bo_move_null(bo, new_mem);
523 		return 0;
524 	}
525 	if (old_mem->mem_type == TTM_PL_SYSTEM &&
526 	    (new_mem->mem_type == TTM_PL_TT ||
527 	     new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
528 		amdgpu_bo_move_notify(bo, evict, new_mem);
529 		ttm_bo_move_null(bo, new_mem);
530 		return 0;
531 	}
532 	if ((old_mem->mem_type == TTM_PL_TT ||
533 	     old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
534 	    new_mem->mem_type == TTM_PL_SYSTEM) {
535 		r = ttm_bo_wait_ctx(bo, ctx);
536 		if (r)
537 			return r;
538 
539 		amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
540 		amdgpu_bo_move_notify(bo, evict, new_mem);
541 		ttm_resource_free(bo, &bo->resource);
542 		ttm_bo_assign_mem(bo, new_mem);
543 		return 0;
544 	}
545 
546 	if (old_mem->mem_type == AMDGPU_PL_GDS ||
547 	    old_mem->mem_type == AMDGPU_PL_GWS ||
548 	    old_mem->mem_type == AMDGPU_PL_OA ||
549 	    old_mem->mem_type == AMDGPU_PL_DOORBELL ||
550 	    old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
551 	    new_mem->mem_type == AMDGPU_PL_GDS ||
552 	    new_mem->mem_type == AMDGPU_PL_GWS ||
553 	    new_mem->mem_type == AMDGPU_PL_OA ||
554 	    new_mem->mem_type == AMDGPU_PL_DOORBELL ||
555 	    new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
556 		/* Nothing to save here */
557 		amdgpu_bo_move_notify(bo, evict, new_mem);
558 		ttm_bo_move_null(bo, new_mem);
559 		return 0;
560 	}
561 
562 	if (bo->type == ttm_bo_type_device &&
563 	    new_mem->mem_type == TTM_PL_VRAM &&
564 	    old_mem->mem_type != TTM_PL_VRAM) {
565 		/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
566 		 * accesses the BO after it's moved.
567 		 */
568 		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
569 	}
570 
571 	if (adev->mman.buffer_funcs_enabled &&
572 	    ((old_mem->mem_type == TTM_PL_SYSTEM &&
573 	      new_mem->mem_type == TTM_PL_VRAM) ||
574 	     (old_mem->mem_type == TTM_PL_VRAM &&
575 	      new_mem->mem_type == TTM_PL_SYSTEM))) {
576 		hop->fpfn = 0;
577 		hop->lpfn = 0;
578 		hop->mem_type = TTM_PL_TT;
579 		hop->flags = TTM_PL_FLAG_TEMPORARY;
580 		return -EMULTIHOP;
581 	}
582 
583 	amdgpu_bo_move_notify(bo, evict, new_mem);
584 	if (adev->mman.buffer_funcs_enabled)
585 		r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
586 	else
587 		r = -ENODEV;
588 
589 	if (r) {
590 		/* Check that all memory is CPU accessible */
591 		if (!amdgpu_res_copyable(adev, old_mem) ||
592 		    !amdgpu_res_copyable(adev, new_mem)) {
593 			pr_err("Move buffer fallback to memcpy unavailable\n");
594 			return r;
595 		}
596 
597 		r = ttm_bo_move_memcpy(bo, ctx, new_mem);
598 		if (r)
599 			return r;
600 	}
601 
602 	/* update statistics after the move */
603 	if (evict)
604 		atomic64_inc(&adev->num_evictions);
605 	atomic64_add(bo->base.size, &adev->num_bytes_moved);
606 	return 0;
607 }
608 
609 /*
610  * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
611  *
612  * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
613  */
614 static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
615 				     struct ttm_resource *mem)
616 {
617 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
618 
619 	switch (mem->mem_type) {
620 	case TTM_PL_SYSTEM:
621 		/* system memory */
622 		return 0;
623 	case TTM_PL_TT:
624 	case AMDGPU_PL_PREEMPT:
625 		break;
626 	case TTM_PL_VRAM:
627 		mem->bus.offset = mem->start << PAGE_SHIFT;
628 
629 		if (adev->mman.aper_base_kaddr &&
630 		    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
631 			mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
632 					mem->bus.offset;
633 
634 		mem->bus.offset += adev->gmc.aper_base;
635 		mem->bus.is_iomem = true;
636 		break;
637 	case AMDGPU_PL_DOORBELL:
638 		mem->bus.offset = mem->start << PAGE_SHIFT;
639 		mem->bus.offset += adev->doorbell.base;
640 		mem->bus.is_iomem = true;
641 		mem->bus.caching = ttm_uncached;
642 		break;
643 	case AMDGPU_PL_MMIO_REMAP:
644 		mem->bus.offset = mem->start << PAGE_SHIFT;
645 		mem->bus.offset += adev->rmmio_remap.bus_addr;
646 		mem->bus.is_iomem = true;
647 		mem->bus.caching = ttm_uncached;
648 		break;
649 	default:
650 		return -EINVAL;
651 	}
652 	return 0;
653 }
654 
655 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
656 					   unsigned long page_offset)
657 {
658 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
659 	struct amdgpu_res_cursor cursor;
660 
661 	amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
662 			 &cursor);
663 
664 	if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
665 		return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
666 	else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
667 		return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
668 
669 	return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
670 }
671 
672 /**
673  * amdgpu_ttm_domain_start - Returns GPU start address
674  * @adev: amdgpu device object
675  * @type: type of the memory
676  *
677  * Returns:
678  * GPU start address of a memory domain
679  */
680 
681 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
682 {
683 	switch (type) {
684 	case TTM_PL_TT:
685 		return adev->gmc.gart_start;
686 	case TTM_PL_VRAM:
687 		return adev->gmc.vram_start;
688 	}
689 
690 	return 0;
691 }
692 
693 /*
694  * TTM backend functions.
695  */
696 struct amdgpu_ttm_tt {
697 	struct ttm_tt	ttm;
698 	struct drm_gem_object	*gobj;
699 	u64			offset;
700 	uint64_t		userptr;
701 	struct task_struct	*usertask;
702 	uint32_t		userflags;
703 	bool			bound;
704 	int32_t			pool_id;
705 };
706 
707 #define ttm_to_amdgpu_ttm_tt(ptr)	container_of(ptr, struct amdgpu_ttm_tt, ttm)
708 
709 #ifdef CONFIG_DRM_AMDGPU_USERPTR
710 /*
711  * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
712  * memory and start HMM tracking CPU page table update
713  *
714  * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
715  * once afterwards to stop HMM tracking. Its the caller responsibility to ensure
716  * that range is a valid memory and it is freed too.
717  */
718 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
719 				 struct amdgpu_hmm_range *range)
720 {
721 	struct ttm_tt *ttm = bo->tbo.ttm;
722 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
723 	unsigned long start = gtt->userptr;
724 	struct vm_area_struct *vma;
725 	struct mm_struct *mm;
726 	bool readonly;
727 	int r = 0;
728 
729 	mm = bo->notifier.mm;
730 	if (unlikely(!mm)) {
731 		DRM_DEBUG_DRIVER("BO is not registered?\n");
732 		return -EFAULT;
733 	}
734 
735 	if (!mmget_not_zero(mm)) /* Happens during process shutdown */
736 		return -ESRCH;
737 
738 	mmap_read_lock(mm);
739 	vma = vma_lookup(mm, start);
740 	if (unlikely(!vma)) {
741 		r = -EFAULT;
742 		goto out_unlock;
743 	}
744 	if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
745 		vma->vm_file)) {
746 		r = -EPERM;
747 		goto out_unlock;
748 	}
749 
750 	readonly = amdgpu_ttm_tt_is_readonly(ttm);
751 	r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
752 				       readonly, NULL, range);
753 out_unlock:
754 	mmap_read_unlock(mm);
755 	if (r)
756 		pr_debug("failed %d to get user pages 0x%lx\n", r, start);
757 
758 	mmput(mm);
759 
760 	return r;
761 }
762 
763 #endif
764 
765 /*
766  * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
767  *
768  * Called by amdgpu_cs_list_validate(). This creates the page list
769  * that backs user memory and will ultimately be mapped into the device
770  * address space.
771  */
772 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
773 {
774 	unsigned long i;
775 
776 	for (i = 0; i < ttm->num_pages; ++i)
777 		ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
778 }
779 
780 /*
781  * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
782  *
783  * Called by amdgpu_ttm_backend_bind()
784  **/
785 static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
786 				     struct ttm_tt *ttm)
787 {
788 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
789 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
790 	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
791 	enum dma_data_direction direction = write ?
792 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
793 	int r;
794 
795 	/* Allocate an SG array and squash pages into it */
796 	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
797 				      (u64)ttm->num_pages << PAGE_SHIFT,
798 				      GFP_KERNEL);
799 	if (r)
800 		goto release_sg;
801 
802 	/* Map SG to device */
803 	r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
804 	if (r)
805 		goto release_sg_table;
806 
807 	/* convert SG to linear array of pages and dma addresses */
808 	drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
809 				       ttm->num_pages);
810 
811 	return 0;
812 
813 release_sg_table:
814 	sg_free_table(ttm->sg);
815 release_sg:
816 	kfree(ttm->sg);
817 	ttm->sg = NULL;
818 	return r;
819 }
820 
821 /*
822  * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
823  */
824 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
825 					struct ttm_tt *ttm)
826 {
827 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
828 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
829 	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
830 	enum dma_data_direction direction = write ?
831 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
832 
833 	/* double check that we don't free the table twice */
834 	if (!ttm->sg || !ttm->sg->sgl)
835 		return;
836 
837 	/* unmap the pages mapped to the device */
838 	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
839 	sg_free_table(ttm->sg);
840 }
841 
842 /*
843  * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ...
844  * MQDn+CtrlStackn where n is the number of XCCs per partition.
845  * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD
846  * and uses memory type default, UC. The rest of pages_per_xcc are
847  * Ctrl stack and modify their memory type to NC.
848  */
849 static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev,
850 				struct ttm_tt *ttm, uint64_t flags)
851 {
852 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
853 	uint64_t total_pages = ttm->num_pages;
854 	int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp);
855 	uint64_t page_idx, pages_per_xcc;
856 	int i;
857 
858 	pages_per_xcc = total_pages;
859 	do_div(pages_per_xcc, num_xcc);
860 
861 	for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) {
862 		amdgpu_gart_map_gfx9_mqd(adev,
863 				gtt->offset + (page_idx << PAGE_SHIFT),
864 				pages_per_xcc, &gtt->ttm.dma_address[page_idx],
865 				flags);
866 	}
867 }
868 
869 static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
870 				 struct ttm_buffer_object *tbo,
871 				 uint64_t flags)
872 {
873 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
874 	struct ttm_tt *ttm = tbo->ttm;
875 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
876 
877 	if (amdgpu_bo_encrypted(abo))
878 		flags |= AMDGPU_PTE_TMZ;
879 
880 	if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
881 		amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags);
882 	} else {
883 		amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
884 				 gtt->ttm.dma_address, flags);
885 	}
886 	gtt->bound = true;
887 }
888 
889 /*
890  * amdgpu_ttm_backend_bind - Bind GTT memory
891  *
892  * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
893  * This handles binding GTT memory to the device address space.
894  */
895 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
896 				   struct ttm_tt *ttm,
897 				   struct ttm_resource *bo_mem)
898 {
899 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
900 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
901 	uint64_t flags;
902 	int r;
903 
904 	if (!bo_mem)
905 		return -EINVAL;
906 
907 	if (gtt->bound)
908 		return 0;
909 
910 	if (gtt->userptr) {
911 		r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
912 		if (r) {
913 			dev_err(adev->dev, "failed to pin userptr\n");
914 			return r;
915 		}
916 	} else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
917 		if (!ttm->sg) {
918 			struct dma_buf_attachment *attach;
919 			struct sg_table *sgt;
920 
921 			attach = gtt->gobj->import_attach;
922 			sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
923 			if (IS_ERR(sgt))
924 				return PTR_ERR(sgt);
925 
926 			ttm->sg = sgt;
927 		}
928 
929 		drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
930 					       ttm->num_pages);
931 	}
932 
933 	if (!ttm->num_pages) {
934 		WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
935 		     ttm->num_pages, bo_mem, ttm);
936 	}
937 
938 	if (bo_mem->mem_type != TTM_PL_TT ||
939 	    !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
940 		gtt->offset = AMDGPU_BO_INVALID_OFFSET;
941 		return 0;
942 	}
943 
944 	/* compute PTE flags relevant to this BO memory */
945 	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
946 
947 	/* bind pages into GART page tables */
948 	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
949 	amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
950 			 gtt->ttm.dma_address, flags);
951 	gtt->bound = true;
952 	return 0;
953 }
954 
955 /*
956  * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
957  * through AGP or GART aperture.
958  *
959  * If bo is accessible through AGP aperture, then use AGP aperture
960  * to access bo; otherwise allocate logical space in GART aperture
961  * and map bo to GART aperture.
962  */
963 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
964 {
965 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
966 	struct ttm_operation_ctx ctx = { false, false };
967 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
968 	struct ttm_placement placement;
969 	struct ttm_place placements;
970 	struct ttm_resource *tmp;
971 	uint64_t addr, flags;
972 	int r;
973 
974 	if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
975 		return 0;
976 
977 	addr = amdgpu_gmc_agp_addr(bo);
978 	if (addr != AMDGPU_BO_INVALID_OFFSET)
979 		return 0;
980 
981 	/* allocate GART space */
982 	placement.num_placement = 1;
983 	placement.placement = &placements;
984 	placements.fpfn = 0;
985 	placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
986 	placements.mem_type = TTM_PL_TT;
987 	placements.flags = bo->resource->placement;
988 
989 	r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
990 	if (unlikely(r))
991 		return r;
992 
993 	/* compute PTE flags for this buffer object */
994 	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
995 
996 	/* Bind pages */
997 	gtt->offset = (u64)tmp->start << PAGE_SHIFT;
998 	amdgpu_ttm_gart_bind(adev, bo, flags);
999 	amdgpu_gart_invalidate_tlb(adev);
1000 	ttm_resource_free(bo, &bo->resource);
1001 	ttm_bo_assign_mem(bo, tmp);
1002 
1003 	return 0;
1004 }
1005 
1006 /*
1007  * amdgpu_ttm_recover_gart - Rebind GTT pages
1008  *
1009  * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1010  * rebind GTT pages during a GPU reset.
1011  */
1012 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1013 {
1014 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1015 	uint64_t flags;
1016 
1017 	if (!tbo->ttm)
1018 		return;
1019 
1020 	flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
1021 	amdgpu_ttm_gart_bind(adev, tbo, flags);
1022 }
1023 
1024 /*
1025  * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1026  *
1027  * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1028  * ttm_tt_destroy().
1029  */
1030 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
1031 				      struct ttm_tt *ttm)
1032 {
1033 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1034 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1035 
1036 	/* if the pages have userptr pinning then clear that first */
1037 	if (gtt->userptr) {
1038 		amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1039 	} else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) {
1040 		struct dma_buf_attachment *attach;
1041 
1042 		attach = gtt->gobj->import_attach;
1043 		dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1044 		ttm->sg = NULL;
1045 	}
1046 
1047 	if (!gtt->bound)
1048 		return;
1049 
1050 	if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1051 		return;
1052 
1053 	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1054 	amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1055 	gtt->bound = false;
1056 }
1057 
1058 static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1059 				       struct ttm_tt *ttm)
1060 {
1061 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1062 
1063 	if (gtt->usertask)
1064 		put_task_struct(gtt->usertask);
1065 
1066 	ttm_tt_fini(&gtt->ttm);
1067 	kfree(gtt);
1068 }
1069 
1070 /**
1071  * amdgpu_ttm_mmio_remap_alloc_sgt - build an sg_table for MMIO_REMAP I/O aperture
1072  * @adev: amdgpu device providing the remap BAR base (adev->rmmio_remap.bus_addr)
1073  * @res:  TTM resource of the BO to export; expected to live in AMDGPU_PL_MMIO_REMAP
1074  * @dev:  importing device to map for (typically @attach->dev in dma-buf paths)
1075  * @dir:  DMA data direction for the importer (passed to dma_map_resource())
1076  * @sgt:  output; on success, set to a newly allocated sg_table describing the I/O span
1077  *
1078  * The HDP flush page (AMDGPU_PL_MMIO_REMAP) is a fixed hardware I/O window in a PCI
1079  * BAR—there are no struct pages to back it. Importers still need a DMA address list,
1080  * so we synthesize a minimal sg_table and populate it from dma_map_resource(), not
1081  * from pages. Using the common amdgpu_res_cursor walker keeps the offset/size math
1082  * consistent with other TTM/manager users.
1083  *
1084  * - @res is assumed to be a small, contiguous I/O region (typically a single 4 KiB
1085  *   page) in AMDGPU_PL_MMIO_REMAP. Callers should validate placement before calling.
1086  * - The sg entry is created with sg_set_page(sg, NULL, …) to reflect I/O space.
1087  * - The mapping uses DMA_ATTR_SKIP_CPU_SYNC because this is MMIO, not cacheable RAM.
1088  * - Peer reachability / p2pdma policy checks must be done by the caller.
1089  *
1090  * Return:
1091  * * 0 on success, with *@sgt set to a valid table that must be freed via
1092  *   amdgpu_ttm_mmio_remap_free_sgt().
1093  * * -ENOMEM if allocation of the sg_table fails.
1094  * * -EIO if dma_map_resource() fails.
1095  *
1096  */
1097 int amdgpu_ttm_mmio_remap_alloc_sgt(struct amdgpu_device *adev,
1098 				    struct ttm_resource *res,
1099 				    struct device *dev,
1100 				    enum dma_data_direction dir,
1101 				    struct sg_table **sgt)
1102 {
1103 	struct amdgpu_res_cursor cur;
1104 	dma_addr_t dma;
1105 	resource_size_t phys;
1106 	struct scatterlist *sg;
1107 	int r;
1108 
1109 	/* Walk the resource once; MMIO_REMAP is expected to be contiguous+small. */
1110 	amdgpu_res_first(res, 0, res->size, &cur);
1111 
1112 	/* Translate byte offset in the remap window into a host physical BAR address. */
1113 	phys = adev->rmmio_remap.bus_addr + cur.start;
1114 
1115 	/* Build a single-entry sg_table mapped as I/O (no struct page backing). */
1116 	*sgt = kzalloc_obj(**sgt);
1117 	if (!*sgt)
1118 		return -ENOMEM;
1119 	r = sg_alloc_table(*sgt, 1, GFP_KERNEL);
1120 	if (r) {
1121 		kfree(*sgt);
1122 		return r;
1123 	}
1124 	sg = (*sgt)->sgl;
1125 	sg_set_page(sg, NULL, cur.size, 0);  /* WHY: I/O space → no pages */
1126 
1127 	dma = dma_map_resource(dev, phys, cur.size, dir, DMA_ATTR_SKIP_CPU_SYNC);
1128 	if (dma_mapping_error(dev, dma)) {
1129 		sg_free_table(*sgt);
1130 		kfree(*sgt);
1131 		return -EIO;
1132 	}
1133 	sg_dma_address(sg) = dma;
1134 	sg_dma_len(sg) = cur.size;
1135 	return 0;
1136 }
1137 
1138 void amdgpu_ttm_mmio_remap_free_sgt(struct device *dev,
1139 				    enum dma_data_direction dir,
1140 				    struct sg_table *sgt)
1141 {
1142 	struct scatterlist *sg = sgt->sgl;
1143 
1144 	dma_unmap_resource(dev, sg_dma_address(sg), sg_dma_len(sg),
1145 			   dir, DMA_ATTR_SKIP_CPU_SYNC);
1146 	sg_free_table(sgt);
1147 	kfree(sgt);
1148 }
1149 
1150 /**
1151  * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1152  *
1153  * @bo: The buffer object to create a GTT ttm_tt object around
1154  * @page_flags: Page flags to be added to the ttm_tt object
1155  *
1156  * Called by ttm_tt_create().
1157  */
1158 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1159 					   uint32_t page_flags)
1160 {
1161 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1162 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1163 	struct amdgpu_ttm_tt *gtt;
1164 	enum ttm_caching caching;
1165 
1166 	gtt = kzalloc_obj(struct amdgpu_ttm_tt);
1167 	if (!gtt)
1168 		return NULL;
1169 
1170 	gtt->gobj = &bo->base;
1171 	if (adev->gmc.mem_partitions && abo->xcp_id >= 0)
1172 		gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
1173 	else
1174 		gtt->pool_id = abo->xcp_id;
1175 
1176 	if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1177 		caching = ttm_write_combined;
1178 	else
1179 		caching = ttm_cached;
1180 
1181 	/* allocate space for the uninitialized page entries */
1182 	if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
1183 		kfree(gtt);
1184 		return NULL;
1185 	}
1186 	return &gtt->ttm;
1187 }
1188 
1189 /*
1190  * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1191  *
1192  * Map the pages of a ttm_tt object to an address space visible
1193  * to the underlying device.
1194  */
1195 static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1196 				  struct ttm_tt *ttm,
1197 				  struct ttm_operation_ctx *ctx)
1198 {
1199 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1200 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1201 	struct ttm_pool *pool;
1202 	pgoff_t i;
1203 	int ret;
1204 
1205 	/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1206 	if (gtt->userptr) {
1207 		ttm->sg = kzalloc_obj(struct sg_table);
1208 		if (!ttm->sg)
1209 			return -ENOMEM;
1210 		return 0;
1211 	}
1212 
1213 	if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1214 		return 0;
1215 
1216 	if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1217 		pool = &adev->mman.ttm_pools[gtt->pool_id];
1218 	else
1219 		pool = &adev->mman.bdev.pool;
1220 	ret = ttm_pool_alloc(pool, ttm, ctx);
1221 	if (ret)
1222 		return ret;
1223 
1224 	for (i = 0; i < ttm->num_pages; ++i)
1225 		ttm->pages[i]->mapping = bdev->dev_mapping;
1226 
1227 	return 0;
1228 }
1229 
1230 /*
1231  * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1232  *
1233  * Unmaps pages of a ttm_tt object from the device address space and
1234  * unpopulates the page array backing it.
1235  */
1236 static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1237 				     struct ttm_tt *ttm)
1238 {
1239 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1240 	struct amdgpu_device *adev;
1241 	struct ttm_pool *pool;
1242 	pgoff_t i;
1243 
1244 	amdgpu_ttm_backend_unbind(bdev, ttm);
1245 
1246 	if (gtt->userptr) {
1247 		amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1248 		kfree(ttm->sg);
1249 		ttm->sg = NULL;
1250 		return;
1251 	}
1252 
1253 	if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1254 		return;
1255 
1256 	for (i = 0; i < ttm->num_pages; ++i)
1257 		ttm->pages[i]->mapping = NULL;
1258 
1259 	adev = amdgpu_ttm_adev(bdev);
1260 
1261 	if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1262 		pool = &adev->mman.ttm_pools[gtt->pool_id];
1263 	else
1264 		pool = &adev->mman.bdev.pool;
1265 
1266 	return ttm_pool_free(pool, ttm);
1267 }
1268 
1269 /**
1270  * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
1271  * task
1272  *
1273  * @tbo: The ttm_buffer_object that contains the userptr
1274  * @user_addr:  The returned value
1275  */
1276 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
1277 			      uint64_t *user_addr)
1278 {
1279 	struct amdgpu_ttm_tt *gtt;
1280 
1281 	if (!tbo->ttm)
1282 		return -EINVAL;
1283 
1284 	gtt = (void *)tbo->ttm;
1285 	*user_addr = gtt->userptr;
1286 	return 0;
1287 }
1288 
1289 /**
1290  * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1291  * task
1292  *
1293  * @bo: The ttm_buffer_object to bind this userptr to
1294  * @addr:  The address in the current tasks VM space to use
1295  * @flags: Requirements of userptr object.
1296  *
1297  * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
1298  * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
1299  * initialize GPU VM for a KFD process.
1300  */
1301 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1302 			      uint64_t addr, uint32_t flags)
1303 {
1304 	struct amdgpu_ttm_tt *gtt;
1305 
1306 	if (!bo->ttm) {
1307 		/* TODO: We want a separate TTM object type for userptrs */
1308 		bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1309 		if (bo->ttm == NULL)
1310 			return -ENOMEM;
1311 	}
1312 
1313 	/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
1314 	bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
1315 
1316 	gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
1317 	gtt->userptr = addr;
1318 	gtt->userflags = flags;
1319 
1320 	if (gtt->usertask)
1321 		put_task_struct(gtt->usertask);
1322 	gtt->usertask = current->group_leader;
1323 	get_task_struct(gtt->usertask);
1324 
1325 	return 0;
1326 }
1327 
1328 /*
1329  * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1330  */
1331 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1332 {
1333 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1334 
1335 	if (gtt == NULL)
1336 		return NULL;
1337 
1338 	if (gtt->usertask == NULL)
1339 		return NULL;
1340 
1341 	return gtt->usertask->mm;
1342 }
1343 
1344 /*
1345  * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1346  * address range for the current task.
1347  *
1348  */
1349 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1350 				  unsigned long end, unsigned long *userptr)
1351 {
1352 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1353 	unsigned long size;
1354 
1355 	if (gtt == NULL || !gtt->userptr)
1356 		return false;
1357 
1358 	/* Return false if no part of the ttm_tt object lies within
1359 	 * the range
1360 	 */
1361 	size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1362 	if (gtt->userptr > end || gtt->userptr + size <= start)
1363 		return false;
1364 
1365 	if (userptr)
1366 		*userptr = gtt->userptr;
1367 	return true;
1368 }
1369 
1370 /*
1371  * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1372  */
1373 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1374 {
1375 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1376 
1377 	if (gtt == NULL || !gtt->userptr)
1378 		return false;
1379 
1380 	return true;
1381 }
1382 
1383 /*
1384  * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1385  */
1386 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1387 {
1388 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1389 
1390 	if (gtt == NULL)
1391 		return false;
1392 
1393 	return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1394 }
1395 
1396 /**
1397  * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1398  *
1399  * @ttm: The ttm_tt object to compute the flags for
1400  * @mem: The memory registry backing this ttm_tt object
1401  *
1402  * Figure out the flags to use for a VM PDE (Page Directory Entry).
1403  */
1404 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1405 {
1406 	uint64_t flags = 0;
1407 
1408 	if (mem && mem->mem_type != TTM_PL_SYSTEM)
1409 		flags |= AMDGPU_PTE_VALID;
1410 
1411 	if (mem && (mem->mem_type == TTM_PL_TT ||
1412 		    mem->mem_type == AMDGPU_PL_DOORBELL ||
1413 		    mem->mem_type == AMDGPU_PL_PREEMPT ||
1414 		    mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
1415 		flags |= AMDGPU_PTE_SYSTEM;
1416 
1417 		if (ttm && ttm->caching == ttm_cached)
1418 			flags |= AMDGPU_PTE_SNOOPED;
1419 	}
1420 
1421 	if (mem && mem->mem_type == TTM_PL_VRAM &&
1422 			mem->bus.caching == ttm_cached)
1423 		flags |= AMDGPU_PTE_SNOOPED;
1424 
1425 	return flags;
1426 }
1427 
1428 /**
1429  * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1430  *
1431  * @adev: amdgpu_device pointer
1432  * @ttm: The ttm_tt object to compute the flags for
1433  * @mem: The memory registry backing this ttm_tt object
1434  *
1435  * Figure out the flags to use for a VM PTE (Page Table Entry).
1436  */
1437 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1438 				 struct ttm_resource *mem)
1439 {
1440 	uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1441 
1442 	flags |= adev->gart.gart_pte_flags;
1443 	flags |= AMDGPU_PTE_READABLE;
1444 
1445 	if (!amdgpu_ttm_tt_is_readonly(ttm))
1446 		flags |= AMDGPU_PTE_WRITEABLE;
1447 
1448 	return flags;
1449 }
1450 
1451 /*
1452  * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1453  * object.
1454  *
1455  * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1456  * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1457  * it can find space for a new object and by ttm_bo_force_list_clean() which is
1458  * used to clean out a memory space.
1459  */
1460 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1461 					    const struct ttm_place *place)
1462 {
1463 	struct dma_resv_iter resv_cursor;
1464 	struct dma_fence *f;
1465 
1466 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1467 		return ttm_bo_eviction_valuable(bo, place);
1468 
1469 	/* Swapout? */
1470 	if (bo->resource->mem_type == TTM_PL_SYSTEM)
1471 		return true;
1472 
1473 	if (bo->type == ttm_bo_type_kernel &&
1474 	    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1475 		return false;
1476 
1477 	/* If bo is a KFD BO, check if the bo belongs to the current process.
1478 	 * If true, then return false as any KFD process needs all its BOs to
1479 	 * be resident to run successfully
1480 	 */
1481 	dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
1482 				DMA_RESV_USAGE_BOOKKEEP, f) {
1483 		if (amdkfd_fence_check_mm(f, current->mm) &&
1484 		    !(place->flags & TTM_PL_FLAG_CONTIGUOUS))
1485 			return false;
1486 	}
1487 
1488 	/* Preemptible BOs don't own system resources managed by the
1489 	 * driver (pages, VRAM, GART space). They point to resources
1490 	 * owned by someone else (e.g. pageable memory in user mode
1491 	 * or a DMABuf). They are used in a preemptible context so we
1492 	 * can guarantee no deadlocks and good QoS in case of MMU
1493 	 * notifiers or DMABuf move notifiers from the resource owner.
1494 	 */
1495 	if (bo->resource->mem_type == AMDGPU_PL_PREEMPT)
1496 		return false;
1497 
1498 	if (bo->resource->mem_type == TTM_PL_TT &&
1499 	    amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1500 		return false;
1501 
1502 	return ttm_bo_eviction_valuable(bo, place);
1503 }
1504 
1505 static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
1506 				      void *buf, size_t size, bool write)
1507 {
1508 	while (size) {
1509 		uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
1510 		uint64_t bytes = 4 - (pos & 0x3);
1511 		uint32_t shift = (pos & 0x3) * 8;
1512 		uint32_t mask = 0xffffffff << shift;
1513 		uint32_t value = 0;
1514 
1515 		if (size < bytes) {
1516 			mask &= 0xffffffff >> (bytes - size) * 8;
1517 			bytes = size;
1518 		}
1519 
1520 		if (mask != 0xffffffff) {
1521 			amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
1522 			if (write) {
1523 				value &= ~mask;
1524 				value |= (*(uint32_t *)buf << shift) & mask;
1525 				amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
1526 			} else {
1527 				value = (value & mask) >> shift;
1528 				memcpy(buf, &value, bytes);
1529 			}
1530 		} else {
1531 			amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
1532 		}
1533 
1534 		pos += bytes;
1535 		buf += bytes;
1536 		size -= bytes;
1537 	}
1538 }
1539 
1540 static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
1541 					unsigned long offset, void *buf,
1542 					int len, int write)
1543 {
1544 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1545 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1546 	struct amdgpu_res_cursor src_mm;
1547 	struct amdgpu_job *job;
1548 	struct dma_fence *fence;
1549 	uint64_t src_addr, dst_addr;
1550 	unsigned int num_dw;
1551 	int r, idx;
1552 
1553 	if (len != PAGE_SIZE)
1554 		return -EINVAL;
1555 
1556 	if (!adev->mman.sdma_access_ptr)
1557 		return -EACCES;
1558 
1559 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
1560 		return -ENODEV;
1561 
1562 	if (write)
1563 		memcpy(adev->mman.sdma_access_ptr, buf, len);
1564 
1565 	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
1566 	r = amdgpu_job_alloc_with_ib(adev, &adev->mman.default_entity.base,
1567 				     AMDGPU_FENCE_OWNER_UNDEFINED,
1568 				     num_dw * 4, AMDGPU_IB_POOL_DELAYED,
1569 				     &job,
1570 				     AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
1571 	if (r)
1572 		goto out;
1573 
1574 	mutex_lock(&adev->mman.default_entity.lock);
1575 	amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
1576 	src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
1577 		src_mm.start;
1578 	dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
1579 	if (write)
1580 		swap(src_addr, dst_addr);
1581 
1582 	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
1583 				PAGE_SIZE, 0);
1584 
1585 	fence = amdgpu_ttm_job_submit(adev, &adev->mman.default_entity, job, num_dw);
1586 	mutex_unlock(&adev->mman.default_entity.lock);
1587 
1588 	if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
1589 		r = -ETIMEDOUT;
1590 	dma_fence_put(fence);
1591 
1592 	if (!(r || write))
1593 		memcpy(buf, adev->mman.sdma_access_ptr, len);
1594 out:
1595 	drm_dev_exit(idx);
1596 	return r;
1597 }
1598 
1599 /**
1600  * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1601  *
1602  * @bo:  The buffer object to read/write
1603  * @offset:  Offset into buffer object
1604  * @buf:  Secondary buffer to write/read from
1605  * @len: Length in bytes of access
1606  * @write:  true if writing
1607  *
1608  * This is used to access VRAM that backs a buffer object via MMIO
1609  * access for debugging purposes.
1610  */
1611 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1612 				    unsigned long offset, void *buf, int len,
1613 				    int write)
1614 {
1615 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1616 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1617 	struct amdgpu_res_cursor cursor;
1618 	int ret = 0;
1619 
1620 	if (bo->resource->mem_type != TTM_PL_VRAM)
1621 		return -EIO;
1622 
1623 	if (amdgpu_device_has_timeouts_enabled(adev) &&
1624 			!amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
1625 		return len;
1626 
1627 	amdgpu_res_first(bo->resource, offset, len, &cursor);
1628 	while (cursor.remaining) {
1629 		size_t count, size = cursor.size;
1630 		loff_t pos = cursor.start;
1631 
1632 		count = amdgpu_device_aper_access(adev, pos, buf, size, write);
1633 		size -= count;
1634 		if (size) {
1635 			/* using MM to access rest vram and handle un-aligned address */
1636 			pos += count;
1637 			buf += count;
1638 			amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
1639 		}
1640 
1641 		ret += cursor.size;
1642 		buf += cursor.size;
1643 		amdgpu_res_next(&cursor, cursor.size);
1644 	}
1645 
1646 	return ret;
1647 }
1648 
1649 static void
1650 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1651 {
1652 	amdgpu_bo_move_notify(bo, false, NULL);
1653 }
1654 
1655 static struct ttm_device_funcs amdgpu_bo_driver = {
1656 	.ttm_tt_create = &amdgpu_ttm_tt_create,
1657 	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
1658 	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1659 	.ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1660 	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1661 	.evict_flags = &amdgpu_evict_flags,
1662 	.move = &amdgpu_bo_move,
1663 	.delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1664 	.release_notify = &amdgpu_bo_release_notify,
1665 	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1666 	.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1667 	.access_memory = &amdgpu_ttm_access_memory,
1668 };
1669 
1670 /*
1671  * Firmware Reservation functions
1672  */
1673 /**
1674  * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1675  *
1676  * @adev: amdgpu_device pointer
1677  *
1678  * free fw reserved vram if it has been reserved.
1679  */
1680 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1681 {
1682 	amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1683 		NULL, &adev->mman.fw_vram_usage_va);
1684 }
1685 
1686 /*
1687  * Driver Reservation functions
1688  */
1689 /**
1690  * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
1691  *
1692  * @adev: amdgpu_device pointer
1693  *
1694  * free drv reserved vram if it has been reserved.
1695  */
1696 static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
1697 {
1698 	amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
1699 						  NULL,
1700 						  &adev->mman.drv_vram_usage_va);
1701 }
1702 
1703 /**
1704  * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1705  *
1706  * @adev: amdgpu_device pointer
1707  *
1708  * create bo vram reservation from fw.
1709  */
1710 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1711 {
1712 	uint64_t vram_size = adev->gmc.visible_vram_size;
1713 
1714 	adev->mman.fw_vram_usage_va = NULL;
1715 	adev->mman.fw_vram_usage_reserved_bo = NULL;
1716 
1717 	if (adev->mman.fw_vram_usage_size == 0 ||
1718 	    adev->mman.fw_vram_usage_size > vram_size)
1719 		return 0;
1720 
1721 	return amdgpu_bo_create_kernel_at(adev,
1722 					  adev->mman.fw_vram_usage_start_offset,
1723 					  adev->mman.fw_vram_usage_size,
1724 					  &adev->mman.fw_vram_usage_reserved_bo,
1725 					  &adev->mman.fw_vram_usage_va);
1726 }
1727 
1728 /**
1729  * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
1730  *
1731  * @adev: amdgpu_device pointer
1732  *
1733  * create bo vram reservation from drv.
1734  */
1735 static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
1736 {
1737 	u64 vram_size = adev->gmc.visible_vram_size;
1738 
1739 	adev->mman.drv_vram_usage_va = NULL;
1740 	adev->mman.drv_vram_usage_reserved_bo = NULL;
1741 
1742 	if (adev->mman.drv_vram_usage_size == 0 ||
1743 	    adev->mman.drv_vram_usage_size > vram_size)
1744 		return 0;
1745 
1746 	return amdgpu_bo_create_kernel_at(adev,
1747 					  adev->mman.drv_vram_usage_start_offset,
1748 					  adev->mman.drv_vram_usage_size,
1749 					  &adev->mman.drv_vram_usage_reserved_bo,
1750 					  &adev->mman.drv_vram_usage_va);
1751 }
1752 
1753 /*
1754  * Memoy training reservation functions
1755  */
1756 
1757 /**
1758  * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1759  *
1760  * @adev: amdgpu_device pointer
1761  *
1762  * free memory training reserved vram if it has been reserved.
1763  */
1764 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1765 {
1766 	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1767 
1768 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1769 	amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1770 	ctx->c2p_bo = NULL;
1771 
1772 	return 0;
1773 }
1774 
1775 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev,
1776 						uint32_t reserve_size)
1777 {
1778 	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1779 
1780 	memset(ctx, 0, sizeof(*ctx));
1781 
1782 	ctx->c2p_train_data_offset =
1783 		ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M);
1784 	ctx->p2c_train_data_offset =
1785 		(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1786 	ctx->train_data_size =
1787 		GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1788 
1789 	DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1790 			ctx->train_data_size,
1791 			ctx->p2c_train_data_offset,
1792 			ctx->c2p_train_data_offset);
1793 }
1794 
1795 /*
1796  * reserve TMR memory at the top of VRAM which holds
1797  * IP Discovery data and is protected by PSP.
1798  */
1799 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1800 {
1801 	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1802 	bool mem_train_support = false;
1803 	uint32_t reserve_size = 0;
1804 	int ret;
1805 
1806 	if (adev->bios && !amdgpu_sriov_vf(adev)) {
1807 		if (amdgpu_atomfirmware_mem_training_supported(adev))
1808 			mem_train_support = true;
1809 		else
1810 			DRM_DEBUG("memory training does not support!\n");
1811 	}
1812 
1813 	/*
1814 	 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1815 	 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1816 	 *
1817 	 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1818 	 * discovery data and G6 memory training data respectively
1819 	 */
1820 	if (adev->bios)
1821 		reserve_size =
1822 			amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1823 
1824 	if (!adev->bios &&
1825 	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1826 	     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
1827 	     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)))
1828 		reserve_size = max(reserve_size, (uint32_t)280 << 20);
1829 	else if (!adev->bios &&
1830 		 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) {
1831 		if (hweight32(adev->aid_mask) == 1)
1832 			reserve_size = max(reserve_size, (uint32_t)128 << 20);
1833 		else
1834 			reserve_size = max(reserve_size, (uint32_t)144 << 20);
1835 	} else if (!reserve_size)
1836 		reserve_size = DISCOVERY_TMR_OFFSET;
1837 
1838 	if (mem_train_support) {
1839 		/* reserve vram for mem train according to TMR location */
1840 		amdgpu_ttm_training_data_block_init(adev, reserve_size);
1841 		ret = amdgpu_bo_create_kernel_at(adev,
1842 						 ctx->c2p_train_data_offset,
1843 						 ctx->train_data_size,
1844 						 &ctx->c2p_bo,
1845 						 NULL);
1846 		if (ret) {
1847 			dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret);
1848 			amdgpu_ttm_training_reserve_vram_fini(adev);
1849 			return ret;
1850 		}
1851 		ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1852 	}
1853 
1854 	ret = amdgpu_bo_create_kernel_at(
1855 		adev, adev->gmc.real_vram_size - reserve_size, reserve_size,
1856 		&adev->mman.fw_reserved_memory, NULL);
1857 	if (ret) {
1858 		dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
1859 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
1860 				      NULL);
1861 		return ret;
1862 	}
1863 
1864 	return 0;
1865 }
1866 
1867 static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
1868 {
1869 	int i;
1870 
1871 	if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions)
1872 		return 0;
1873 
1874 	adev->mman.ttm_pools = kzalloc_objs(*adev->mman.ttm_pools,
1875 					    adev->gmc.num_mem_partitions);
1876 	if (!adev->mman.ttm_pools)
1877 		return -ENOMEM;
1878 
1879 	for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
1880 		ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
1881 			      adev->gmc.mem_partitions[i].numa.node,
1882 			      TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
1883 	}
1884 	return 0;
1885 }
1886 
1887 static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
1888 {
1889 	int i;
1890 
1891 	if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools)
1892 		return;
1893 
1894 	for (i = 0; i < adev->gmc.num_mem_partitions; i++)
1895 		ttm_pool_fini(&adev->mman.ttm_pools[i]);
1896 
1897 	kfree(adev->mman.ttm_pools);
1898 	adev->mman.ttm_pools = NULL;
1899 }
1900 
1901 /**
1902  * amdgpu_ttm_alloc_mmio_remap_bo - Allocate the singleton MMIO_REMAP BO
1903  * @adev: amdgpu device
1904  *
1905  * Allocates a global BO with backing AMDGPU_PL_MMIO_REMAP when the
1906  * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
1907  * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
1908  * GEM object (amdgpu_bo_create).
1909  *
1910  * Return:
1911  *  * 0 on success or intentional skip (feature not present/unsupported)
1912  *  * negative errno on allocation failure
1913  */
1914 static int amdgpu_ttm_alloc_mmio_remap_bo(struct amdgpu_device *adev)
1915 {
1916 	struct ttm_operation_ctx ctx = { false, false };
1917 	struct ttm_placement placement;
1918 	struct ttm_buffer_object *tbo;
1919 	struct ttm_place placements;
1920 	struct amdgpu_bo_param bp;
1921 	struct ttm_resource *tmp;
1922 	int r;
1923 
1924 	/* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
1925 	if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
1926 		return 0;
1927 
1928 	/*
1929 	 * Allocate a BO first and then move it to AMDGPU_PL_MMIO_REMAP.
1930 	 * The initial TTM resource assigned by amdgpu_bo_create() is
1931 	 * replaced below with a fixed MMIO_REMAP placement.
1932 	 */
1933 	memset(&bp, 0, sizeof(bp));
1934 	bp.type        = ttm_bo_type_device;
1935 	bp.size        = AMDGPU_GPU_PAGE_SIZE;
1936 	bp.byte_align  = AMDGPU_GPU_PAGE_SIZE;
1937 	bp.domain      = 0;
1938 	bp.flags       = 0;
1939 	bp.resv        = NULL;
1940 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
1941 	r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
1942 	if (r)
1943 		return r;
1944 
1945 	r = amdgpu_bo_reserve(adev->rmmio_remap.bo, true);
1946 	if (r)
1947 		goto err_unref;
1948 
1949 	tbo = &adev->rmmio_remap.bo->tbo;
1950 
1951 	/*
1952 	 * MMIO_REMAP is a fixed I/O placement (AMDGPU_PL_MMIO_REMAP).
1953 	 */
1954 	placement.num_placement = 1;
1955 	placement.placement = &placements;
1956 	placements.fpfn = 0;
1957 	placements.lpfn = 0;
1958 	placements.mem_type = AMDGPU_PL_MMIO_REMAP;
1959 	placements.flags = 0;
1960 	/* Force the BO into the fixed MMIO_REMAP placement */
1961 	r = ttm_bo_mem_space(tbo, &placement, &tmp, &ctx);
1962 	if (unlikely(r))
1963 		goto err_unlock;
1964 
1965 	ttm_resource_free(tbo, &tbo->resource);
1966 	ttm_bo_assign_mem(tbo, tmp);
1967 	ttm_bo_pin(tbo);
1968 
1969 	amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1970 	return 0;
1971 
1972 err_unlock:
1973 	amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1974 
1975 err_unref:
1976 	amdgpu_bo_unref(&adev->rmmio_remap.bo);
1977 	adev->rmmio_remap.bo = NULL;
1978 	return r;
1979 }
1980 
1981 /**
1982  * amdgpu_ttm_free_mmio_remap_bo - Free the singleton MMIO_REMAP BO
1983  * @adev: amdgpu device
1984  *
1985  * Frees the kernel-owned MMIO_REMAP BO if it was allocated by
1986  * amdgpu_ttm_mmio_remap_bo_init().
1987  */
1988 static void amdgpu_ttm_free_mmio_remap_bo(struct amdgpu_device *adev)
1989 {
1990 	if (!adev->rmmio_remap.bo)
1991 		return;
1992 
1993 	if (!amdgpu_bo_reserve(adev->rmmio_remap.bo, true)) {
1994 		ttm_bo_unpin(&adev->rmmio_remap.bo->tbo);
1995 		amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1996 	}
1997 
1998     /*
1999      * At this point we rely on normal DRM teardown ordering:
2000      * no new user ioctls can access the global MMIO_REMAP BO
2001      * once TTM teardown begins.
2002      */
2003 	amdgpu_bo_unref(&adev->rmmio_remap.bo);
2004 	adev->rmmio_remap.bo = NULL;
2005 }
2006 
2007 static int amdgpu_ttm_buffer_entity_init(struct amdgpu_gtt_mgr *mgr,
2008 					 struct amdgpu_ttm_buffer_entity *entity,
2009 					 enum drm_sched_priority prio,
2010 					 struct drm_gpu_scheduler **scheds,
2011 					 int num_schedulers,
2012 					 u32 num_gart_windows)
2013 {
2014 	int i, r, num_pages;
2015 
2016 	r = drm_sched_entity_init(&entity->base, prio, scheds, num_schedulers, NULL);
2017 	if (r)
2018 		return r;
2019 
2020 	mutex_init(&entity->lock);
2021 
2022 	if (ARRAY_SIZE(entity->gart_window_offs) < num_gart_windows)
2023 		return -EINVAL;
2024 	if (num_gart_windows == 0)
2025 		return 0;
2026 
2027 	num_pages = num_gart_windows * AMDGPU_GTT_MAX_TRANSFER_SIZE;
2028 	r = amdgpu_gtt_mgr_alloc_entries(mgr, &entity->gart_node, num_pages,
2029 					 DRM_MM_INSERT_BEST);
2030 	if (r) {
2031 		drm_sched_entity_destroy(&entity->base);
2032 		return r;
2033 	}
2034 
2035 	for (i = 0; i < num_gart_windows; i++) {
2036 		entity->gart_window_offs[i] =
2037 			amdgpu_gtt_node_to_byte_offset(&entity->gart_node) +
2038 				i * AMDGPU_GTT_MAX_TRANSFER_SIZE * PAGE_SIZE;
2039 	}
2040 
2041 	return 0;
2042 }
2043 
2044 static void amdgpu_ttm_buffer_entity_fini(struct amdgpu_gtt_mgr *mgr,
2045 					  struct amdgpu_ttm_buffer_entity *entity)
2046 {
2047 	amdgpu_gtt_mgr_free_entries(mgr, &entity->gart_node);
2048 	drm_sched_entity_destroy(&entity->base);
2049 }
2050 
2051 /*
2052  * amdgpu_ttm_init - Init the memory management (ttm) as well as various
2053  * gtt/vram related fields.
2054  *
2055  * This initializes all of the memory space pools that the TTM layer
2056  * will need such as the GTT space (system memory mapped to the device),
2057  * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
2058  * can be mapped per VMID.
2059  */
2060 int amdgpu_ttm_init(struct amdgpu_device *adev)
2061 {
2062 	uint64_t gtt_size;
2063 	int r;
2064 
2065 	dma_set_max_seg_size(adev->dev, UINT_MAX);
2066 	/* No others user of address space so set it to 0 */
2067 	r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
2068 			       adev_to_drm(adev)->anon_inode->i_mapping,
2069 			       adev_to_drm(adev)->vma_offset_manager,
2070 			       (adev->need_swiotlb ?
2071 				TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
2072 			       (dma_addressing_limited(adev->dev) ?
2073 				TTM_ALLOCATION_POOL_USE_DMA32 : 0) |
2074 			       TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
2075 	if (r) {
2076 		dev_err(adev->dev,
2077 			"failed initializing buffer object driver(%d).\n", r);
2078 		return r;
2079 	}
2080 
2081 	r = amdgpu_ttm_pools_init(adev);
2082 	if (r) {
2083 		dev_err(adev->dev, "failed to init ttm pools(%d).\n", r);
2084 		return r;
2085 	}
2086 	adev->mman.initialized = true;
2087 
2088 	if (!adev->gmc.is_app_apu) {
2089 		/* Initialize VRAM pool with all of VRAM divided into pages */
2090 		r = amdgpu_vram_mgr_init(adev);
2091 		if (r) {
2092 			dev_err(adev->dev, "Failed initializing VRAM heap.\n");
2093 			return r;
2094 		}
2095 	}
2096 
2097 	/* Change the size here instead of the init above so only lpfn is affected */
2098 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
2099 #ifdef CONFIG_64BIT
2100 #ifdef CONFIG_X86
2101 	if (adev->gmc.xgmi.connected_to_cpu)
2102 		adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
2103 				adev->gmc.visible_vram_size);
2104 
2105 	else if (adev->gmc.is_app_apu)
2106 		DRM_DEBUG_DRIVER(
2107 			"No need to ioremap when real vram size is 0\n");
2108 	else
2109 #endif
2110 		adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
2111 				adev->gmc.visible_vram_size);
2112 #endif
2113 
2114 	/*
2115 	 *The reserved vram for firmware must be pinned to the specified
2116 	 *place on the VRAM, so reserve it early.
2117 	 */
2118 	r = amdgpu_ttm_fw_reserve_vram_init(adev);
2119 	if (r)
2120 		return r;
2121 
2122 	/*
2123 	 * The reserved VRAM for the driver must be pinned to a specific
2124 	 * location in VRAM, so reserve it early.
2125 	 */
2126 	r = amdgpu_ttm_drv_reserve_vram_init(adev);
2127 	if (r)
2128 		return r;
2129 
2130 	/*
2131 	 * only NAVI10 and later ASICs support IP discovery.
2132 	 * If IP discovery is enabled, a block of memory should be
2133 	 * reserved for it.
2134 	 */
2135 	if (adev->discovery.reserve_tmr) {
2136 		r = amdgpu_ttm_reserve_tmr(adev);
2137 		if (r)
2138 			return r;
2139 	}
2140 
2141 	/* allocate memory as required for VGA
2142 	 * This is used for VGA emulation and pre-OS scanout buffers to
2143 	 * avoid display artifacts while transitioning between pre-OS
2144 	 * and driver.
2145 	 */
2146 	if (!adev->gmc.is_app_apu) {
2147 		r = amdgpu_bo_create_kernel_at(adev, 0,
2148 					       adev->mman.stolen_vga_size,
2149 					       &adev->mman.stolen_vga_memory,
2150 					       NULL);
2151 		if (r)
2152 			return r;
2153 
2154 		r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
2155 					       adev->mman.stolen_extended_size,
2156 					       &adev->mman.stolen_extended_memory,
2157 					       NULL);
2158 
2159 		if (r)
2160 			return r;
2161 
2162 		r = amdgpu_bo_create_kernel_at(adev,
2163 					       adev->mman.stolen_reserved_offset,
2164 					       adev->mman.stolen_reserved_size,
2165 					       &adev->mman.stolen_reserved_memory,
2166 					       NULL);
2167 		if (r)
2168 			return r;
2169 	} else {
2170 		DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
2171 	}
2172 
2173 	dev_info(adev->dev, " %uM of VRAM memory ready\n",
2174 		 (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
2175 
2176 	/* Compute GTT size, either based on TTM limit
2177 	 * or whatever the user passed on module init.
2178 	 */
2179 	gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
2180 	if (amdgpu_gtt_size != -1) {
2181 		uint64_t configured_size = (uint64_t)amdgpu_gtt_size << 20;
2182 
2183 		drm_warn(&adev->ddev,
2184 			"Configuring gttsize via module parameter is deprecated, please use ttm.pages_limit\n");
2185 		if (gtt_size != configured_size)
2186 			drm_warn(&adev->ddev,
2187 				"GTT size has been set as %llu but TTM size has been set as %llu, this is unusual\n",
2188 				configured_size, gtt_size);
2189 
2190 		gtt_size = configured_size;
2191 	}
2192 
2193 	/* Initialize GTT memory pool */
2194 	r = amdgpu_gtt_mgr_init(adev, gtt_size);
2195 	if (r) {
2196 		dev_err(adev->dev, "Failed initializing GTT heap.\n");
2197 		return r;
2198 	}
2199 	dev_info(adev->dev, " %uM of GTT memory ready.\n",
2200 		 (unsigned int)(gtt_size / (1024 * 1024)));
2201 
2202 	if (adev->flags & AMD_IS_APU) {
2203 		if (adev->gmc.real_vram_size < gtt_size)
2204 			adev->apu_prefer_gtt = true;
2205 	}
2206 
2207 	/* Initialize doorbell pool on PCI BAR */
2208 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
2209 	if (r) {
2210 		dev_err(adev->dev, "Failed initializing doorbell heap.\n");
2211 		return r;
2212 	}
2213 
2214 	/* Create a boorbell page for kernel usages */
2215 	r = amdgpu_doorbell_create_kernel_doorbells(adev);
2216 	if (r) {
2217 		dev_err(adev->dev, "Failed to initialize kernel doorbells.\n");
2218 		return r;
2219 	}
2220 
2221 	/* Initialize MMIO-remap pool (single page 4K) */
2222 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
2223 	if (r) {
2224 		dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
2225 		return r;
2226 	}
2227 
2228 	/* Allocate the singleton MMIO_REMAP BO if supported */
2229 	r = amdgpu_ttm_alloc_mmio_remap_bo(adev);
2230 	if (r)
2231 		return r;
2232 
2233 	/* Initialize preemptible memory pool */
2234 	r = amdgpu_preempt_mgr_init(adev);
2235 	if (r) {
2236 		dev_err(adev->dev, "Failed initializing PREEMPT heap.\n");
2237 		return r;
2238 	}
2239 
2240 	/* Initialize various on-chip memory pools */
2241 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
2242 	if (r) {
2243 		dev_err(adev->dev, "Failed initializing GDS heap.\n");
2244 		return r;
2245 	}
2246 
2247 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
2248 	if (r) {
2249 		dev_err(adev->dev, "Failed initializing gws heap.\n");
2250 		return r;
2251 	}
2252 
2253 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
2254 	if (r) {
2255 		dev_err(adev->dev, "Failed initializing oa heap.\n");
2256 		return r;
2257 	}
2258 	if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
2259 				AMDGPU_GEM_DOMAIN_GTT,
2260 				&adev->mman.sdma_access_bo, NULL,
2261 				&adev->mman.sdma_access_ptr))
2262 		drm_warn(adev_to_drm(adev),
2263 				"Debug VRAM access will use slowpath MM access\n");
2264 
2265 	return 0;
2266 }
2267 
2268 /*
2269  * amdgpu_ttm_fini - De-initialize the TTM memory pools
2270  */
2271 void amdgpu_ttm_fini(struct amdgpu_device *adev)
2272 {
2273 	int idx;
2274 
2275 	if (!adev->mman.initialized)
2276 		return;
2277 
2278 	amdgpu_ttm_pools_fini(adev);
2279 
2280 	amdgpu_ttm_training_reserve_vram_fini(adev);
2281 	/* return the stolen vga memory back to VRAM */
2282 	if (!adev->gmc.is_app_apu) {
2283 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
2284 		amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
2285 		/* return the FW reserved memory back to VRAM */
2286 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
2287 				      NULL);
2288 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL,
2289 				      NULL);
2290 		if (adev->mman.stolen_reserved_size)
2291 			amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
2292 					      NULL, NULL);
2293 	}
2294 	amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
2295 					&adev->mman.sdma_access_ptr);
2296 
2297 	amdgpu_ttm_free_mmio_remap_bo(adev);
2298 	amdgpu_ttm_fw_reserve_vram_fini(adev);
2299 	amdgpu_ttm_drv_reserve_vram_fini(adev);
2300 
2301 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
2302 
2303 		if (adev->mman.aper_base_kaddr)
2304 			iounmap(adev->mman.aper_base_kaddr);
2305 		adev->mman.aper_base_kaddr = NULL;
2306 
2307 		drm_dev_exit(idx);
2308 	}
2309 
2310 	if (!adev->gmc.is_app_apu)
2311 		amdgpu_vram_mgr_fini(adev);
2312 	amdgpu_gtt_mgr_fini(adev);
2313 	amdgpu_preempt_mgr_fini(adev);
2314 	amdgpu_doorbell_fini(adev);
2315 
2316 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
2317 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
2318 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
2319 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
2320 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
2321 	ttm_device_fini(&adev->mman.bdev);
2322 	adev->mman.initialized = false;
2323 	dev_info(adev->dev, " ttm finalized\n");
2324 }
2325 
2326 /**
2327  * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
2328  *
2329  * @adev: amdgpu_device pointer
2330  * @enable: true when we can use buffer functions.
2331  *
2332  * Enable/disable use of buffer functions during suspend/resume. This should
2333  * only be called at bootup or when userspace isn't running.
2334  */
2335 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
2336 {
2337 	struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
2338 	uint64_t size;
2339 	int r;
2340 
2341 	if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
2342 	    adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu)
2343 		return;
2344 
2345 	if (enable) {
2346 		struct amdgpu_ring *ring;
2347 		struct drm_gpu_scheduler *sched;
2348 
2349 		if (!adev->mman.buffer_funcs_ring || !adev->mman.buffer_funcs_ring->sched.ready) {
2350 			dev_warn(adev->dev, "Not enabling DMA transfers for in kernel use");
2351 			return;
2352 		}
2353 
2354 		ring = adev->mman.buffer_funcs_ring;
2355 		sched = &ring->sched;
2356 		r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr,
2357 						  &adev->mman.default_entity,
2358 						  DRM_SCHED_PRIORITY_KERNEL,
2359 						  &sched, 1, 0);
2360 		if (r < 0) {
2361 			dev_err(adev->dev,
2362 				"Failed setting up TTM entity (%d)\n", r);
2363 			return;
2364 		}
2365 
2366 		r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr,
2367 						  &adev->mman.clear_entity,
2368 						  DRM_SCHED_PRIORITY_NORMAL,
2369 						  &sched, 1, 1);
2370 		if (r < 0) {
2371 			dev_err(adev->dev,
2372 				"Failed setting up TTM BO clear entity (%d)\n", r);
2373 			goto error_free_default_entity;
2374 		}
2375 
2376 		r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr,
2377 						  &adev->mman.move_entity,
2378 						  DRM_SCHED_PRIORITY_NORMAL,
2379 						  &sched, 1, 2);
2380 		if (r < 0) {
2381 			dev_err(adev->dev,
2382 				"Failed setting up TTM BO move entity (%d)\n", r);
2383 			goto error_free_clear_entity;
2384 		}
2385 	} else {
2386 		amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2387 					      &adev->mman.default_entity);
2388 		amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2389 					      &adev->mman.clear_entity);
2390 		amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2391 					      &adev->mman.move_entity);
2392 		/* Drop all the old fences since re-creating the scheduler entities
2393 		 * will allocate new contexts.
2394 		 */
2395 		ttm_resource_manager_cleanup(man);
2396 	}
2397 
2398 	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
2399 	if (enable)
2400 		size = adev->gmc.real_vram_size;
2401 	else
2402 		size = adev->gmc.visible_vram_size;
2403 	man->size = size;
2404 	adev->mman.buffer_funcs_enabled = enable;
2405 
2406 	return;
2407 
2408 error_free_clear_entity:
2409 	amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2410 				      &adev->mman.clear_entity);
2411 error_free_default_entity:
2412 	amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr,
2413 				      &adev->mman.default_entity);
2414 }
2415 
2416 static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
2417 				  struct amdgpu_ttm_buffer_entity *entity,
2418 				  unsigned int num_dw,
2419 				  struct dma_resv *resv,
2420 				  bool vm_needs_flush,
2421 				  struct amdgpu_job **job,
2422 				  u64 k_job_id)
2423 {
2424 	enum amdgpu_ib_pool_type pool = AMDGPU_IB_POOL_DELAYED;
2425 	int r;
2426 	r = amdgpu_job_alloc_with_ib(adev, &entity->base,
2427 				     AMDGPU_FENCE_OWNER_UNDEFINED,
2428 				     num_dw * 4, pool, job, k_job_id);
2429 	if (r)
2430 		return r;
2431 
2432 	if (vm_needs_flush) {
2433 		(*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
2434 							adev->gmc.pdb0_bo :
2435 							adev->gart.bo);
2436 		(*job)->vm_needs_flush = true;
2437 	}
2438 	if (!resv)
2439 		return 0;
2440 
2441 	return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
2442 						   DMA_RESV_USAGE_BOOKKEEP);
2443 }
2444 
2445 int amdgpu_copy_buffer(struct amdgpu_device *adev,
2446 		       struct amdgpu_ttm_buffer_entity *entity,
2447 		       uint64_t src_offset,
2448 		       uint64_t dst_offset, uint32_t byte_count,
2449 		       struct dma_resv *resv,
2450 		       struct dma_fence **fence,
2451 		       bool vm_needs_flush, uint32_t copy_flags)
2452 {
2453 	unsigned int num_loops, num_dw;
2454 	struct amdgpu_ring *ring;
2455 	struct amdgpu_job *job;
2456 	uint32_t max_bytes;
2457 	unsigned int i;
2458 	int r;
2459 
2460 	ring = adev->mman.buffer_funcs_ring;
2461 
2462 	if (!ring->sched.ready) {
2463 		dev_err(adev->dev,
2464 			"Trying to move memory with ring turned off.\n");
2465 		return -EINVAL;
2466 	}
2467 
2468 	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2469 	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2470 	num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2471 	r = amdgpu_ttm_prepare_job(adev, entity, num_dw,
2472 				   resv, vm_needs_flush, &job,
2473 				   AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
2474 	if (r)
2475 		goto error_free;
2476 
2477 	for (i = 0; i < num_loops; i++) {
2478 		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2479 
2480 		amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2481 					dst_offset, cur_size_in_bytes, copy_flags);
2482 		src_offset += cur_size_in_bytes;
2483 		dst_offset += cur_size_in_bytes;
2484 		byte_count -= cur_size_in_bytes;
2485 	}
2486 
2487 	*fence = amdgpu_ttm_job_submit(adev, entity, job, num_dw);
2488 
2489 	return 0;
2490 
2491 error_free:
2492 	amdgpu_job_free(job);
2493 	dev_err(adev->dev, "Error scheduling IBs (%d)\n", r);
2494 	return r;
2495 }
2496 
2497 static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev,
2498 			       struct amdgpu_ttm_buffer_entity *entity,
2499 			       uint32_t src_data,
2500 			       uint64_t dst_addr, uint32_t byte_count,
2501 			       struct dma_resv *resv,
2502 			       struct dma_fence **fence,
2503 			       bool vm_needs_flush,
2504 			       u64 k_job_id)
2505 {
2506 	unsigned int num_loops, num_dw;
2507 	struct amdgpu_job *job;
2508 	uint32_t max_bytes;
2509 	unsigned int i;
2510 	int r;
2511 
2512 	max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2513 	num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
2514 	num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
2515 	r = amdgpu_ttm_prepare_job(adev, entity, num_dw, resv,
2516 				   vm_needs_flush, &job, k_job_id);
2517 	if (r)
2518 		return r;
2519 
2520 	for (i = 0; i < num_loops; i++) {
2521 		uint32_t cur_size = min(byte_count, max_bytes);
2522 
2523 		amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2524 					cur_size);
2525 
2526 		dst_addr += cur_size;
2527 		byte_count -= cur_size;
2528 	}
2529 
2530 	*fence = amdgpu_ttm_job_submit(adev, entity, job, num_dw);
2531 	return 0;
2532 }
2533 
2534 /**
2535  * amdgpu_ttm_clear_buffer - clear memory buffers
2536  * @bo: amdgpu buffer object
2537  * @resv: reservation object
2538  * @fence: dma_fence associated with the operation
2539  *
2540  * Clear the memory buffer resource.
2541  *
2542  * Returns:
2543  * 0 for success or a negative error code on failure.
2544  */
2545 int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
2546 			    struct dma_resv *resv,
2547 			    struct dma_fence **fence)
2548 {
2549 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2550 	struct amdgpu_ttm_buffer_entity *entity;
2551 	struct amdgpu_res_cursor cursor;
2552 	u64 addr;
2553 	int r = 0;
2554 
2555 	if (!adev->mman.buffer_funcs_enabled)
2556 		return -EINVAL;
2557 
2558 	if (!fence)
2559 		return -EINVAL;
2560 
2561 	entity = &adev->mman.clear_entity;
2562 	*fence = dma_fence_get_stub();
2563 
2564 	amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
2565 
2566 	mutex_lock(&entity->lock);
2567 	while (cursor.remaining) {
2568 		struct dma_fence *next = NULL;
2569 		u64 size;
2570 
2571 		if (amdgpu_res_cleared(&cursor)) {
2572 			amdgpu_res_next(&cursor, cursor.size);
2573 			continue;
2574 		}
2575 
2576 		/* Never clear more than 256MiB at once to avoid timeouts */
2577 		size = min(cursor.size, 256ULL << 20);
2578 
2579 		r = amdgpu_ttm_map_buffer(entity, &bo->tbo, bo->tbo.resource, &cursor,
2580 					  0, false, &size, &addr);
2581 		if (r)
2582 			goto err;
2583 
2584 		r = amdgpu_ttm_fill_mem(adev, entity, 0, addr, size, resv,
2585 					&next, true,
2586 					AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
2587 		if (r)
2588 			goto err;
2589 
2590 		dma_fence_put(*fence);
2591 		*fence = next;
2592 
2593 		amdgpu_res_next(&cursor, size);
2594 	}
2595 err:
2596 	mutex_unlock(&entity->lock);
2597 
2598 	return r;
2599 }
2600 
2601 int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
2602 		       struct amdgpu_bo *bo,
2603 		       uint32_t src_data,
2604 		       struct dma_resv *resv,
2605 		       struct dma_fence **f,
2606 		       u64 k_job_id)
2607 {
2608 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2609 	struct dma_fence *fence = NULL;
2610 	struct amdgpu_res_cursor dst;
2611 	int r;
2612 
2613 	if (!adev->mman.buffer_funcs_enabled) {
2614 		dev_err(adev->dev,
2615 			"Trying to clear memory with ring turned off.\n");
2616 		return -EINVAL;
2617 	}
2618 
2619 	amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
2620 
2621 	mutex_lock(&entity->lock);
2622 	while (dst.remaining) {
2623 		struct dma_fence *next;
2624 		uint64_t cur_size, to;
2625 
2626 		/* Never fill more than 256MiB at once to avoid timeouts */
2627 		cur_size = min(dst.size, 256ULL << 20);
2628 
2629 		r = amdgpu_ttm_map_buffer(entity, &bo->tbo, bo->tbo.resource, &dst,
2630 					  0, false, &cur_size, &to);
2631 		if (r)
2632 			goto error;
2633 
2634 		r = amdgpu_ttm_fill_mem(adev, entity,
2635 					src_data, to, cur_size, resv,
2636 					&next, true, k_job_id);
2637 		if (r)
2638 			goto error;
2639 
2640 		dma_fence_put(fence);
2641 		fence = next;
2642 
2643 		amdgpu_res_next(&dst, cur_size);
2644 	}
2645 error:
2646 	mutex_unlock(&entity->lock);
2647 	if (f)
2648 		*f = dma_fence_get(fence);
2649 	dma_fence_put(fence);
2650 	return r;
2651 }
2652 
2653 /**
2654  * amdgpu_ttm_evict_resources - evict memory buffers
2655  * @adev: amdgpu device object
2656  * @mem_type: evicted BO's memory type
2657  *
2658  * Evicts all @mem_type buffers on the lru list of the memory type.
2659  *
2660  * Returns:
2661  * 0 for success or a negative error code on failure.
2662  */
2663 int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
2664 {
2665 	struct ttm_resource_manager *man;
2666 
2667 	switch (mem_type) {
2668 	case TTM_PL_VRAM:
2669 	case TTM_PL_TT:
2670 	case AMDGPU_PL_GWS:
2671 	case AMDGPU_PL_GDS:
2672 	case AMDGPU_PL_OA:
2673 		man = ttm_manager_type(&adev->mman.bdev, mem_type);
2674 		break;
2675 	default:
2676 		dev_err(adev->dev, "Trying to evict invalid memory type\n");
2677 		return -EINVAL;
2678 	}
2679 
2680 	return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
2681 }
2682 
2683 #if defined(CONFIG_DEBUG_FS)
2684 
2685 static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2686 {
2687 	struct amdgpu_device *adev = m->private;
2688 
2689 	return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2690 }
2691 
2692 DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2693 
2694 /*
2695  * amdgpu_ttm_vram_read - Linear read access to VRAM
2696  *
2697  * Accesses VRAM via MMIO for debugging purposes.
2698  */
2699 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2700 				    size_t size, loff_t *pos)
2701 {
2702 	struct amdgpu_device *adev = file_inode(f)->i_private;
2703 	ssize_t result = 0;
2704 
2705 	if (size & 0x3 || *pos & 0x3)
2706 		return -EINVAL;
2707 
2708 	if (*pos >= adev->gmc.mc_vram_size)
2709 		return -ENXIO;
2710 
2711 	size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2712 	while (size) {
2713 		size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2714 		uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2715 
2716 		amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2717 		if (copy_to_user(buf, value, bytes))
2718 			return -EFAULT;
2719 
2720 		result += bytes;
2721 		buf += bytes;
2722 		*pos += bytes;
2723 		size -= bytes;
2724 	}
2725 
2726 	return result;
2727 }
2728 
2729 /*
2730  * amdgpu_ttm_vram_write - Linear write access to VRAM
2731  *
2732  * Accesses VRAM via MMIO for debugging purposes.
2733  */
2734 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2735 				    size_t size, loff_t *pos)
2736 {
2737 	struct amdgpu_device *adev = file_inode(f)->i_private;
2738 	ssize_t result = 0;
2739 	int r;
2740 
2741 	if (size & 0x3 || *pos & 0x3)
2742 		return -EINVAL;
2743 
2744 	if (*pos >= adev->gmc.mc_vram_size)
2745 		return -ENXIO;
2746 
2747 	while (size) {
2748 		uint32_t value;
2749 
2750 		if (*pos >= adev->gmc.mc_vram_size)
2751 			return result;
2752 
2753 		r = get_user(value, (uint32_t *)buf);
2754 		if (r)
2755 			return r;
2756 
2757 		amdgpu_device_mm_access(adev, *pos, &value, 4, true);
2758 
2759 		result += 4;
2760 		buf += 4;
2761 		*pos += 4;
2762 		size -= 4;
2763 	}
2764 
2765 	return result;
2766 }
2767 
2768 static const struct file_operations amdgpu_ttm_vram_fops = {
2769 	.owner = THIS_MODULE,
2770 	.read = amdgpu_ttm_vram_read,
2771 	.write = amdgpu_ttm_vram_write,
2772 	.llseek = default_llseek,
2773 };
2774 
2775 /*
2776  * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2777  *
2778  * This function is used to read memory that has been mapped to the
2779  * GPU and the known addresses are not physical addresses but instead
2780  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2781  */
2782 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2783 				 size_t size, loff_t *pos)
2784 {
2785 	struct amdgpu_device *adev = file_inode(f)->i_private;
2786 	struct iommu_domain *dom;
2787 	ssize_t result = 0;
2788 	int r;
2789 
2790 	/* retrieve the IOMMU domain if any for this device */
2791 	dom = iommu_get_domain_for_dev(adev->dev);
2792 
2793 	while (size) {
2794 		phys_addr_t addr = *pos & PAGE_MASK;
2795 		loff_t off = *pos & ~PAGE_MASK;
2796 		size_t bytes = PAGE_SIZE - off;
2797 		unsigned long pfn;
2798 		struct page *p;
2799 		void *ptr;
2800 
2801 		bytes = min(bytes, size);
2802 
2803 		/* Translate the bus address to a physical address.  If
2804 		 * the domain is NULL it means there is no IOMMU active
2805 		 * and the address translation is the identity
2806 		 */
2807 		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2808 
2809 		pfn = addr >> PAGE_SHIFT;
2810 		if (!pfn_valid(pfn))
2811 			return -EPERM;
2812 
2813 		p = pfn_to_page(pfn);
2814 		if (p->mapping != adev->mman.bdev.dev_mapping)
2815 			return -EPERM;
2816 
2817 		ptr = kmap_local_page(p);
2818 		r = copy_to_user(buf, ptr + off, bytes);
2819 		kunmap_local(ptr);
2820 		if (r)
2821 			return -EFAULT;
2822 
2823 		size -= bytes;
2824 		*pos += bytes;
2825 		result += bytes;
2826 	}
2827 
2828 	return result;
2829 }
2830 
2831 /*
2832  * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2833  *
2834  * This function is used to write memory that has been mapped to the
2835  * GPU and the known addresses are not physical addresses but instead
2836  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2837  */
2838 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2839 				 size_t size, loff_t *pos)
2840 {
2841 	struct amdgpu_device *adev = file_inode(f)->i_private;
2842 	struct iommu_domain *dom;
2843 	ssize_t result = 0;
2844 	int r;
2845 
2846 	dom = iommu_get_domain_for_dev(adev->dev);
2847 
2848 	while (size) {
2849 		phys_addr_t addr = *pos & PAGE_MASK;
2850 		loff_t off = *pos & ~PAGE_MASK;
2851 		size_t bytes = PAGE_SIZE - off;
2852 		unsigned long pfn;
2853 		struct page *p;
2854 		void *ptr;
2855 
2856 		bytes = min(bytes, size);
2857 
2858 		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2859 
2860 		pfn = addr >> PAGE_SHIFT;
2861 		if (!pfn_valid(pfn))
2862 			return -EPERM;
2863 
2864 		p = pfn_to_page(pfn);
2865 		if (p->mapping != adev->mman.bdev.dev_mapping)
2866 			return -EPERM;
2867 
2868 		ptr = kmap_local_page(p);
2869 		r = copy_from_user(ptr + off, buf, bytes);
2870 		kunmap_local(ptr);
2871 		if (r)
2872 			return -EFAULT;
2873 
2874 		size -= bytes;
2875 		*pos += bytes;
2876 		result += bytes;
2877 	}
2878 
2879 	return result;
2880 }
2881 
2882 static const struct file_operations amdgpu_ttm_iomem_fops = {
2883 	.owner = THIS_MODULE,
2884 	.read = amdgpu_iomem_read,
2885 	.write = amdgpu_iomem_write,
2886 	.llseek = default_llseek
2887 };
2888 
2889 #endif
2890 
2891 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2892 {
2893 #if defined(CONFIG_DEBUG_FS)
2894 	struct drm_minor *minor = adev_to_drm(adev)->primary;
2895 	struct dentry *root = minor->debugfs_root;
2896 
2897 	debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2898 				 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2899 	debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2900 			    &amdgpu_ttm_iomem_fops);
2901 	debugfs_create_file("ttm_page_pool", 0444, root, adev,
2902 			    &amdgpu_ttm_page_pool_fops);
2903 	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2904 							     TTM_PL_VRAM),
2905 					    root, "amdgpu_vram_mm");
2906 	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2907 							     TTM_PL_TT),
2908 					    root, "amdgpu_gtt_mm");
2909 	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2910 							     AMDGPU_PL_GDS),
2911 					    root, "amdgpu_gds_mm");
2912 	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2913 							     AMDGPU_PL_GWS),
2914 					    root, "amdgpu_gws_mm");
2915 	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2916 							     AMDGPU_PL_OA),
2917 					    root, "amdgpu_oa_mm");
2918 
2919 #endif
2920 }
2921