xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c (revision c7062be3380cb20c8b1c4a935a13f1848ead0719)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched/task.h>
37 #include <linux/sched/mm.h>
38 #include <linux/seq_file.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/dma-buf.h>
42 #include <linux/sizes.h>
43 #include <linux/module.h>
44 
45 #include <drm/drm_drv.h>
46 #include <drm/ttm/ttm_bo.h>
47 #include <drm/ttm/ttm_placement.h>
48 #include <drm/ttm/ttm_range_manager.h>
49 #include <drm/ttm/ttm_tt.h>
50 
51 #include <drm/amdgpu_drm.h>
52 
53 #include "amdgpu.h"
54 #include "amdgpu_object.h"
55 #include "amdgpu_trace.h"
56 #include "amdgpu_amdkfd.h"
57 #include "amdgpu_sdma.h"
58 #include "amdgpu_ras.h"
59 #include "amdgpu_hmm.h"
60 #include "amdgpu_atomfirmware.h"
61 #include "amdgpu_res_cursor.h"
62 #include "bif/bif_4_1_d.h"
63 
64 MODULE_IMPORT_NS("DMA_BUF");
65 
66 #define AMDGPU_TTM_VRAM_MAX_DW_READ	((size_t)128)
67 
68 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
69 				   struct ttm_tt *ttm,
70 				   struct ttm_resource *bo_mem);
71 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
72 				      struct ttm_tt *ttm);
73 
74 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
75 				    unsigned int type,
76 				    uint64_t size_in_page)
77 {
78 	return ttm_range_man_init(&adev->mman.bdev, type,
79 				  false, size_in_page);
80 }
81 
82 /**
83  * amdgpu_evict_flags - Compute placement flags
84  *
85  * @bo: The buffer object to evict
86  * @placement: Possible destination(s) for evicted BO
87  *
88  * Fill in placement data when ttm_bo_evict() is called
89  */
90 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
91 				struct ttm_placement *placement)
92 {
93 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
94 	struct amdgpu_bo *abo;
95 	static const struct ttm_place placements = {
96 		.fpfn = 0,
97 		.lpfn = 0,
98 		.mem_type = TTM_PL_SYSTEM,
99 		.flags = 0
100 	};
101 
102 	/* Don't handle scatter gather BOs */
103 	if (bo->type == ttm_bo_type_sg) {
104 		placement->num_placement = 0;
105 		return;
106 	}
107 
108 	/* Object isn't an AMDGPU object so ignore */
109 	if (!amdgpu_bo_is_amdgpu_bo(bo)) {
110 		placement->placement = &placements;
111 		placement->num_placement = 1;
112 		return;
113 	}
114 
115 	abo = ttm_to_amdgpu_bo(bo);
116 	if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
117 		placement->num_placement = 0;
118 		return;
119 	}
120 
121 	switch (bo->resource->mem_type) {
122 	case AMDGPU_PL_GDS:
123 	case AMDGPU_PL_GWS:
124 	case AMDGPU_PL_OA:
125 	case AMDGPU_PL_DOORBELL:
126 	case AMDGPU_PL_MMIO_REMAP:
127 		placement->num_placement = 0;
128 		return;
129 
130 	case TTM_PL_VRAM:
131 		if (!adev->mman.buffer_funcs_enabled) {
132 			/* Move to system memory */
133 			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
134 
135 		} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
136 			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
137 			   amdgpu_res_cpu_visible(adev, bo->resource)) {
138 
139 			/* Try evicting to the CPU inaccessible part of VRAM
140 			 * first, but only set GTT as busy placement, so this
141 			 * BO will be evicted to GTT rather than causing other
142 			 * BOs to be evicted from VRAM
143 			 */
144 			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
145 							AMDGPU_GEM_DOMAIN_GTT |
146 							AMDGPU_GEM_DOMAIN_CPU);
147 			abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
148 			abo->placements[0].lpfn = 0;
149 			abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
150 		} else {
151 			/* Move to GTT memory */
152 			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
153 							AMDGPU_GEM_DOMAIN_CPU);
154 		}
155 		break;
156 	case TTM_PL_TT:
157 	case AMDGPU_PL_PREEMPT:
158 	default:
159 		amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
160 		break;
161 	}
162 	*placement = abo->placement;
163 }
164 
165 /**
166  * amdgpu_ttm_map_buffer - Map memory into the GART windows
167  * @bo: buffer object to map
168  * @mem: memory object to map
169  * @mm_cur: range to map
170  * @window: which GART window to use
171  * @ring: DMA ring to use for the copy
172  * @tmz: if we should setup a TMZ enabled mapping
173  * @size: in number of bytes to map, out number of bytes mapped
174  * @addr: resulting address inside the MC address space
175  *
176  * Setup one of the GART windows to access a specific piece of memory or return
177  * the physical address for local memory.
178  */
179 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
180 				 struct ttm_resource *mem,
181 				 struct amdgpu_res_cursor *mm_cur,
182 				 unsigned int window, struct amdgpu_ring *ring,
183 				 bool tmz, uint64_t *size, uint64_t *addr)
184 {
185 	struct amdgpu_device *adev = ring->adev;
186 	unsigned int offset, num_pages, num_dw, num_bytes;
187 	uint64_t src_addr, dst_addr;
188 	struct amdgpu_job *job;
189 	void *cpu_addr;
190 	uint64_t flags;
191 	int r;
192 
193 	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
194 	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
195 
196 	if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
197 		return -EINVAL;
198 
199 	/* Map only what can't be accessed directly */
200 	if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
201 		*addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
202 			mm_cur->start;
203 		return 0;
204 	}
205 
206 
207 	/*
208 	 * If start begins at an offset inside the page, then adjust the size
209 	 * and addr accordingly
210 	 */
211 	offset = mm_cur->start & ~PAGE_MASK;
212 
213 	num_pages = PFN_UP(*size + offset);
214 	num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
215 
216 	*size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
217 
218 	*addr = adev->gmc.gart_start;
219 	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
220 		AMDGPU_GPU_PAGE_SIZE;
221 	*addr += offset;
222 
223 	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
224 	num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
225 
226 	r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
227 				     AMDGPU_FENCE_OWNER_UNDEFINED,
228 				     num_dw * 4 + num_bytes,
229 				     AMDGPU_IB_POOL_DELAYED, &job,
230 				     AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
231 	if (r)
232 		return r;
233 
234 	src_addr = num_dw * 4;
235 	src_addr += job->ibs[0].gpu_addr;
236 
237 	dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
238 	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
239 	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
240 				dst_addr, num_bytes, 0);
241 
242 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
243 	WARN_ON(job->ibs[0].length_dw > num_dw);
244 
245 	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
246 	if (tmz)
247 		flags |= AMDGPU_PTE_TMZ;
248 
249 	cpu_addr = &job->ibs[0].ptr[num_dw];
250 
251 	if (mem->mem_type == TTM_PL_TT) {
252 		dma_addr_t *dma_addr;
253 
254 		dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
255 		amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
256 	} else {
257 		u64 pa = mm_cur->start + adev->vm_manager.vram_base_offset;
258 
259 		amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr);
260 	}
261 
262 	dma_fence_put(amdgpu_job_submit(job));
263 	return 0;
264 }
265 
266 /**
267  * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
268  * @adev: amdgpu device
269  * @src: buffer/address where to read from
270  * @dst: buffer/address where to write to
271  * @size: number of bytes to copy
272  * @tmz: if a secure copy should be used
273  * @resv: resv object to sync to
274  * @f: Returns the last fence if multiple jobs are submitted.
275  *
276  * The function copies @size bytes from {src->mem + src->offset} to
277  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
278  * move and different for a BO to BO copy.
279  *
280  */
281 __attribute__((nonnull))
282 static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
283 				      const struct amdgpu_copy_mem *src,
284 				      const struct amdgpu_copy_mem *dst,
285 				      uint64_t size, bool tmz,
286 				      struct dma_resv *resv,
287 				      struct dma_fence **f)
288 {
289 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
290 	struct amdgpu_res_cursor src_mm, dst_mm;
291 	struct dma_fence *fence = NULL;
292 	int r = 0;
293 	uint32_t copy_flags = 0;
294 	struct amdgpu_bo *abo_src, *abo_dst;
295 
296 	if (!adev->mman.buffer_funcs_enabled) {
297 		dev_err(adev->dev,
298 			"Trying to move memory with ring turned off.\n");
299 		return -EINVAL;
300 	}
301 
302 	amdgpu_res_first(src->mem, src->offset, size, &src_mm);
303 	amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
304 
305 	mutex_lock(&adev->mman.gtt_window_lock);
306 	while (src_mm.remaining) {
307 		uint64_t from, to, cur_size, tiling_flags;
308 		uint32_t num_type, data_format, max_com, write_compress_disable;
309 		struct dma_fence *next;
310 
311 		/* Never copy more than 256MiB at once to avoid a timeout */
312 		cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
313 
314 		/* Map src to window 0 and dst to window 1. */
315 		r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
316 					  0, ring, tmz, &cur_size, &from);
317 		if (r)
318 			goto error;
319 
320 		r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
321 					  1, ring, tmz, &cur_size, &to);
322 		if (r)
323 			goto error;
324 
325 		abo_src = ttm_to_amdgpu_bo(src->bo);
326 		abo_dst = ttm_to_amdgpu_bo(dst->bo);
327 		if (tmz)
328 			copy_flags |= AMDGPU_COPY_FLAGS_TMZ;
329 		if ((abo_src->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
330 		    (abo_src->tbo.resource->mem_type == TTM_PL_VRAM))
331 			copy_flags |= AMDGPU_COPY_FLAGS_READ_DECOMPRESSED;
332 		if ((abo_dst->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
333 		    (dst->mem->mem_type == TTM_PL_VRAM)) {
334 			copy_flags |= AMDGPU_COPY_FLAGS_WRITE_COMPRESSED;
335 			amdgpu_bo_get_tiling_flags(abo_dst, &tiling_flags);
336 			max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
337 			num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
338 			data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
339 			write_compress_disable =
340 				AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
341 			copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
342 				       AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
343 				       AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
344 				       AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
345 							     write_compress_disable));
346 		}
347 
348 		r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
349 				       &next, false, true, copy_flags);
350 		if (r)
351 			goto error;
352 
353 		dma_fence_put(fence);
354 		fence = next;
355 
356 		amdgpu_res_next(&src_mm, cur_size);
357 		amdgpu_res_next(&dst_mm, cur_size);
358 	}
359 error:
360 	mutex_unlock(&adev->mman.gtt_window_lock);
361 	*f = fence;
362 	return r;
363 }
364 
365 /*
366  * amdgpu_move_blit - Copy an entire buffer to another buffer
367  *
368  * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
369  * help move buffers to and from VRAM.
370  */
371 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
372 			    bool evict,
373 			    struct ttm_resource *new_mem,
374 			    struct ttm_resource *old_mem)
375 {
376 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
377 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
378 	struct amdgpu_copy_mem src, dst;
379 	struct dma_fence *fence = NULL;
380 	int r;
381 
382 	src.bo = bo;
383 	dst.bo = bo;
384 	src.mem = old_mem;
385 	dst.mem = new_mem;
386 	src.offset = 0;
387 	dst.offset = 0;
388 
389 	r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
390 				       new_mem->size,
391 				       amdgpu_bo_encrypted(abo),
392 				       bo->base.resv, &fence);
393 	if (r)
394 		goto error;
395 
396 	/* clear the space being freed */
397 	if (old_mem->mem_type == TTM_PL_VRAM &&
398 	    (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
399 		struct dma_fence *wipe_fence = NULL;
400 
401 		r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
402 				       false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
403 		if (r) {
404 			goto error;
405 		} else if (wipe_fence) {
406 			amdgpu_vram_mgr_set_cleared(bo->resource);
407 			dma_fence_put(fence);
408 			fence = wipe_fence;
409 		}
410 	}
411 
412 	/* Always block for VM page tables before committing the new location */
413 	if (bo->type == ttm_bo_type_kernel)
414 		r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
415 	else
416 		r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
417 	dma_fence_put(fence);
418 	return r;
419 
420 error:
421 	if (fence)
422 		dma_fence_wait(fence, false);
423 	dma_fence_put(fence);
424 	return r;
425 }
426 
427 /**
428  * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
429  * @adev: amdgpu device
430  * @res: the resource to check
431  *
432  * Returns: true if the full resource is CPU visible, false otherwise.
433  */
434 bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
435 			    struct ttm_resource *res)
436 {
437 	struct amdgpu_res_cursor cursor;
438 
439 	if (!res)
440 		return false;
441 
442 	if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
443 	    res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
444 	    res->mem_type == AMDGPU_PL_MMIO_REMAP)
445 		return true;
446 
447 	if (res->mem_type != TTM_PL_VRAM)
448 		return false;
449 
450 	amdgpu_res_first(res, 0, res->size, &cursor);
451 	while (cursor.remaining) {
452 		if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size)
453 			return false;
454 		amdgpu_res_next(&cursor, cursor.size);
455 	}
456 
457 	return true;
458 }
459 
460 /*
461  * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
462  *
463  * Called by amdgpu_bo_move()
464  */
465 static bool amdgpu_res_copyable(struct amdgpu_device *adev,
466 				struct ttm_resource *mem)
467 {
468 	if (!amdgpu_res_cpu_visible(adev, mem))
469 		return false;
470 
471 	/* ttm_resource_ioremap only supports contiguous memory */
472 	if (mem->mem_type == TTM_PL_VRAM &&
473 	    !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
474 		return false;
475 
476 	return true;
477 }
478 
479 /*
480  * amdgpu_bo_move - Move a buffer object to a new memory location
481  *
482  * Called by ttm_bo_handle_move_mem()
483  */
484 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
485 			  struct ttm_operation_ctx *ctx,
486 			  struct ttm_resource *new_mem,
487 			  struct ttm_place *hop)
488 {
489 	struct amdgpu_device *adev;
490 	struct amdgpu_bo *abo;
491 	struct ttm_resource *old_mem = bo->resource;
492 	int r;
493 
494 	if (new_mem->mem_type == TTM_PL_TT ||
495 	    new_mem->mem_type == AMDGPU_PL_PREEMPT) {
496 		r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
497 		if (r)
498 			return r;
499 	}
500 
501 	abo = ttm_to_amdgpu_bo(bo);
502 	adev = amdgpu_ttm_adev(bo->bdev);
503 
504 	if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
505 			 bo->ttm == NULL)) {
506 		amdgpu_bo_move_notify(bo, evict, new_mem);
507 		ttm_bo_move_null(bo, new_mem);
508 		return 0;
509 	}
510 	if (old_mem->mem_type == TTM_PL_SYSTEM &&
511 	    (new_mem->mem_type == TTM_PL_TT ||
512 	     new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
513 		amdgpu_bo_move_notify(bo, evict, new_mem);
514 		ttm_bo_move_null(bo, new_mem);
515 		return 0;
516 	}
517 	if ((old_mem->mem_type == TTM_PL_TT ||
518 	     old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
519 	    new_mem->mem_type == TTM_PL_SYSTEM) {
520 		r = ttm_bo_wait_ctx(bo, ctx);
521 		if (r)
522 			return r;
523 
524 		amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
525 		amdgpu_bo_move_notify(bo, evict, new_mem);
526 		ttm_resource_free(bo, &bo->resource);
527 		ttm_bo_assign_mem(bo, new_mem);
528 		return 0;
529 	}
530 
531 	if (old_mem->mem_type == AMDGPU_PL_GDS ||
532 	    old_mem->mem_type == AMDGPU_PL_GWS ||
533 	    old_mem->mem_type == AMDGPU_PL_OA ||
534 	    old_mem->mem_type == AMDGPU_PL_DOORBELL ||
535 	    old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
536 	    new_mem->mem_type == AMDGPU_PL_GDS ||
537 	    new_mem->mem_type == AMDGPU_PL_GWS ||
538 	    new_mem->mem_type == AMDGPU_PL_OA ||
539 	    new_mem->mem_type == AMDGPU_PL_DOORBELL ||
540 	    new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
541 		/* Nothing to save here */
542 		amdgpu_bo_move_notify(bo, evict, new_mem);
543 		ttm_bo_move_null(bo, new_mem);
544 		return 0;
545 	}
546 
547 	if (bo->type == ttm_bo_type_device &&
548 	    new_mem->mem_type == TTM_PL_VRAM &&
549 	    old_mem->mem_type != TTM_PL_VRAM) {
550 		/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
551 		 * accesses the BO after it's moved.
552 		 */
553 		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
554 	}
555 
556 	if (adev->mman.buffer_funcs_enabled &&
557 	    ((old_mem->mem_type == TTM_PL_SYSTEM &&
558 	      new_mem->mem_type == TTM_PL_VRAM) ||
559 	     (old_mem->mem_type == TTM_PL_VRAM &&
560 	      new_mem->mem_type == TTM_PL_SYSTEM))) {
561 		hop->fpfn = 0;
562 		hop->lpfn = 0;
563 		hop->mem_type = TTM_PL_TT;
564 		hop->flags = TTM_PL_FLAG_TEMPORARY;
565 		return -EMULTIHOP;
566 	}
567 
568 	amdgpu_bo_move_notify(bo, evict, new_mem);
569 	if (adev->mman.buffer_funcs_enabled)
570 		r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
571 	else
572 		r = -ENODEV;
573 
574 	if (r) {
575 		/* Check that all memory is CPU accessible */
576 		if (!amdgpu_res_copyable(adev, old_mem) ||
577 		    !amdgpu_res_copyable(adev, new_mem)) {
578 			pr_err("Move buffer fallback to memcpy unavailable\n");
579 			return r;
580 		}
581 
582 		r = ttm_bo_move_memcpy(bo, ctx, new_mem);
583 		if (r)
584 			return r;
585 	}
586 
587 	/* update statistics after the move */
588 	if (evict)
589 		atomic64_inc(&adev->num_evictions);
590 	atomic64_add(bo->base.size, &adev->num_bytes_moved);
591 	return 0;
592 }
593 
594 /*
595  * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
596  *
597  * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
598  */
599 static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
600 				     struct ttm_resource *mem)
601 {
602 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
603 
604 	switch (mem->mem_type) {
605 	case TTM_PL_SYSTEM:
606 		/* system memory */
607 		return 0;
608 	case TTM_PL_TT:
609 	case AMDGPU_PL_PREEMPT:
610 		break;
611 	case TTM_PL_VRAM:
612 		mem->bus.offset = mem->start << PAGE_SHIFT;
613 
614 		if (adev->mman.aper_base_kaddr &&
615 		    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
616 			mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
617 					mem->bus.offset;
618 
619 		mem->bus.offset += adev->gmc.aper_base;
620 		mem->bus.is_iomem = true;
621 		break;
622 	case AMDGPU_PL_DOORBELL:
623 		mem->bus.offset = mem->start << PAGE_SHIFT;
624 		mem->bus.offset += adev->doorbell.base;
625 		mem->bus.is_iomem = true;
626 		mem->bus.caching = ttm_uncached;
627 		break;
628 	case AMDGPU_PL_MMIO_REMAP:
629 		mem->bus.offset = mem->start << PAGE_SHIFT;
630 		mem->bus.offset += adev->rmmio_remap.bus_addr;
631 		mem->bus.is_iomem = true;
632 		mem->bus.caching = ttm_uncached;
633 		break;
634 	default:
635 		return -EINVAL;
636 	}
637 	return 0;
638 }
639 
640 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
641 					   unsigned long page_offset)
642 {
643 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
644 	struct amdgpu_res_cursor cursor;
645 
646 	amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
647 			 &cursor);
648 
649 	if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
650 		return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
651 	else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
652 		return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
653 
654 	return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
655 }
656 
657 /**
658  * amdgpu_ttm_domain_start - Returns GPU start address
659  * @adev: amdgpu device object
660  * @type: type of the memory
661  *
662  * Returns:
663  * GPU start address of a memory domain
664  */
665 
666 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
667 {
668 	switch (type) {
669 	case TTM_PL_TT:
670 		return adev->gmc.gart_start;
671 	case TTM_PL_VRAM:
672 		return adev->gmc.vram_start;
673 	}
674 
675 	return 0;
676 }
677 
678 /*
679  * TTM backend functions.
680  */
681 struct amdgpu_ttm_tt {
682 	struct ttm_tt	ttm;
683 	struct drm_gem_object	*gobj;
684 	u64			offset;
685 	uint64_t		userptr;
686 	struct task_struct	*usertask;
687 	uint32_t		userflags;
688 	bool			bound;
689 	int32_t			pool_id;
690 };
691 
692 #define ttm_to_amdgpu_ttm_tt(ptr)	container_of(ptr, struct amdgpu_ttm_tt, ttm)
693 
694 #ifdef CONFIG_DRM_AMDGPU_USERPTR
695 /*
696  * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
697  * memory and start HMM tracking CPU page table update
698  *
699  * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
700  * once afterwards to stop HMM tracking. Its the caller responsibility to ensure
701  * that range is a valid memory and it is freed too.
702  */
703 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
704 				 struct amdgpu_hmm_range *range)
705 {
706 	struct ttm_tt *ttm = bo->tbo.ttm;
707 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
708 	unsigned long start = gtt->userptr;
709 	struct vm_area_struct *vma;
710 	struct mm_struct *mm;
711 	bool readonly;
712 	int r = 0;
713 
714 	mm = bo->notifier.mm;
715 	if (unlikely(!mm)) {
716 		DRM_DEBUG_DRIVER("BO is not registered?\n");
717 		return -EFAULT;
718 	}
719 
720 	if (!mmget_not_zero(mm)) /* Happens during process shutdown */
721 		return -ESRCH;
722 
723 	mmap_read_lock(mm);
724 	vma = vma_lookup(mm, start);
725 	if (unlikely(!vma)) {
726 		r = -EFAULT;
727 		goto out_unlock;
728 	}
729 	if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
730 		vma->vm_file)) {
731 		r = -EPERM;
732 		goto out_unlock;
733 	}
734 
735 	readonly = amdgpu_ttm_tt_is_readonly(ttm);
736 	r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
737 				       readonly, NULL, range);
738 out_unlock:
739 	mmap_read_unlock(mm);
740 	if (r)
741 		pr_debug("failed %d to get user pages 0x%lx\n", r, start);
742 
743 	mmput(mm);
744 
745 	return r;
746 }
747 
748 #endif
749 
750 /*
751  * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
752  *
753  * Called by amdgpu_cs_list_validate(). This creates the page list
754  * that backs user memory and will ultimately be mapped into the device
755  * address space.
756  */
757 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
758 {
759 	unsigned long i;
760 
761 	for (i = 0; i < ttm->num_pages; ++i)
762 		ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
763 }
764 
765 /*
766  * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
767  *
768  * Called by amdgpu_ttm_backend_bind()
769  **/
770 static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
771 				     struct ttm_tt *ttm)
772 {
773 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
774 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
775 	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
776 	enum dma_data_direction direction = write ?
777 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
778 	int r;
779 
780 	/* Allocate an SG array and squash pages into it */
781 	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
782 				      (u64)ttm->num_pages << PAGE_SHIFT,
783 				      GFP_KERNEL);
784 	if (r)
785 		goto release_sg;
786 
787 	/* Map SG to device */
788 	r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
789 	if (r)
790 		goto release_sg_table;
791 
792 	/* convert SG to linear array of pages and dma addresses */
793 	drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
794 				       ttm->num_pages);
795 
796 	return 0;
797 
798 release_sg_table:
799 	sg_free_table(ttm->sg);
800 release_sg:
801 	kfree(ttm->sg);
802 	ttm->sg = NULL;
803 	return r;
804 }
805 
806 /*
807  * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
808  */
809 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
810 					struct ttm_tt *ttm)
811 {
812 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
813 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
814 	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
815 	enum dma_data_direction direction = write ?
816 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
817 
818 	/* double check that we don't free the table twice */
819 	if (!ttm->sg || !ttm->sg->sgl)
820 		return;
821 
822 	/* unmap the pages mapped to the device */
823 	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
824 	sg_free_table(ttm->sg);
825 }
826 
827 /*
828  * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ...
829  * MQDn+CtrlStackn where n is the number of XCCs per partition.
830  * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD
831  * and uses memory type default, UC. The rest of pages_per_xcc are
832  * Ctrl stack and modify their memory type to NC.
833  */
834 static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev,
835 				struct ttm_tt *ttm, uint64_t flags)
836 {
837 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
838 	uint64_t total_pages = ttm->num_pages;
839 	int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp);
840 	uint64_t page_idx, pages_per_xcc;
841 	int i;
842 	uint64_t ctrl_flags = AMDGPU_PTE_MTYPE_VG10(flags, AMDGPU_MTYPE_NC);
843 
844 	pages_per_xcc = total_pages;
845 	do_div(pages_per_xcc, num_xcc);
846 
847 	for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) {
848 		/* MQD page: use default flags */
849 		amdgpu_gart_bind(adev,
850 				gtt->offset + (page_idx << PAGE_SHIFT),
851 				1, &gtt->ttm.dma_address[page_idx], flags);
852 		/*
853 		 * Ctrl pages - modify the memory type to NC (ctrl_flags) from
854 		 * the second page of the BO onward.
855 		 */
856 		amdgpu_gart_bind(adev,
857 				gtt->offset + ((page_idx + 1) << PAGE_SHIFT),
858 				pages_per_xcc - 1,
859 				&gtt->ttm.dma_address[page_idx + 1],
860 				ctrl_flags);
861 	}
862 }
863 
864 static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
865 				 struct ttm_buffer_object *tbo,
866 				 uint64_t flags)
867 {
868 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
869 	struct ttm_tt *ttm = tbo->ttm;
870 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
871 
872 	if (amdgpu_bo_encrypted(abo))
873 		flags |= AMDGPU_PTE_TMZ;
874 
875 	if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
876 		amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags);
877 	} else {
878 		amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
879 				 gtt->ttm.dma_address, flags);
880 	}
881 	gtt->bound = true;
882 }
883 
884 /*
885  * amdgpu_ttm_backend_bind - Bind GTT memory
886  *
887  * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
888  * This handles binding GTT memory to the device address space.
889  */
890 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
891 				   struct ttm_tt *ttm,
892 				   struct ttm_resource *bo_mem)
893 {
894 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
895 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
896 	uint64_t flags;
897 	int r;
898 
899 	if (!bo_mem)
900 		return -EINVAL;
901 
902 	if (gtt->bound)
903 		return 0;
904 
905 	if (gtt->userptr) {
906 		r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
907 		if (r) {
908 			dev_err(adev->dev, "failed to pin userptr\n");
909 			return r;
910 		}
911 	} else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
912 		if (!ttm->sg) {
913 			struct dma_buf_attachment *attach;
914 			struct sg_table *sgt;
915 
916 			attach = gtt->gobj->import_attach;
917 			sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
918 			if (IS_ERR(sgt))
919 				return PTR_ERR(sgt);
920 
921 			ttm->sg = sgt;
922 		}
923 
924 		drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
925 					       ttm->num_pages);
926 	}
927 
928 	if (!ttm->num_pages) {
929 		WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
930 		     ttm->num_pages, bo_mem, ttm);
931 	}
932 
933 	if (bo_mem->mem_type != TTM_PL_TT ||
934 	    !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
935 		gtt->offset = AMDGPU_BO_INVALID_OFFSET;
936 		return 0;
937 	}
938 
939 	/* compute PTE flags relevant to this BO memory */
940 	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
941 
942 	/* bind pages into GART page tables */
943 	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
944 	amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
945 			 gtt->ttm.dma_address, flags);
946 	gtt->bound = true;
947 	return 0;
948 }
949 
950 /*
951  * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
952  * through AGP or GART aperture.
953  *
954  * If bo is accessible through AGP aperture, then use AGP aperture
955  * to access bo; otherwise allocate logical space in GART aperture
956  * and map bo to GART aperture.
957  */
958 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
959 {
960 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
961 	struct ttm_operation_ctx ctx = { false, false };
962 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
963 	struct ttm_placement placement;
964 	struct ttm_place placements;
965 	struct ttm_resource *tmp;
966 	uint64_t addr, flags;
967 	int r;
968 
969 	if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
970 		return 0;
971 
972 	addr = amdgpu_gmc_agp_addr(bo);
973 	if (addr != AMDGPU_BO_INVALID_OFFSET)
974 		return 0;
975 
976 	/* allocate GART space */
977 	placement.num_placement = 1;
978 	placement.placement = &placements;
979 	placements.fpfn = 0;
980 	placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
981 	placements.mem_type = TTM_PL_TT;
982 	placements.flags = bo->resource->placement;
983 
984 	r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
985 	if (unlikely(r))
986 		return r;
987 
988 	/* compute PTE flags for this buffer object */
989 	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
990 
991 	/* Bind pages */
992 	gtt->offset = (u64)tmp->start << PAGE_SHIFT;
993 	amdgpu_ttm_gart_bind(adev, bo, flags);
994 	amdgpu_gart_invalidate_tlb(adev);
995 	ttm_resource_free(bo, &bo->resource);
996 	ttm_bo_assign_mem(bo, tmp);
997 
998 	return 0;
999 }
1000 
1001 /*
1002  * amdgpu_ttm_recover_gart - Rebind GTT pages
1003  *
1004  * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1005  * rebind GTT pages during a GPU reset.
1006  */
1007 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1008 {
1009 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1010 	uint64_t flags;
1011 
1012 	if (!tbo->ttm)
1013 		return;
1014 
1015 	flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
1016 	amdgpu_ttm_gart_bind(adev, tbo, flags);
1017 }
1018 
1019 /*
1020  * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1021  *
1022  * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1023  * ttm_tt_destroy().
1024  */
1025 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
1026 				      struct ttm_tt *ttm)
1027 {
1028 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1029 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1030 
1031 	/* if the pages have userptr pinning then clear that first */
1032 	if (gtt->userptr) {
1033 		amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1034 	} else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) {
1035 		struct dma_buf_attachment *attach;
1036 
1037 		attach = gtt->gobj->import_attach;
1038 		dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1039 		ttm->sg = NULL;
1040 	}
1041 
1042 	if (!gtt->bound)
1043 		return;
1044 
1045 	if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1046 		return;
1047 
1048 	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1049 	amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1050 	gtt->bound = false;
1051 }
1052 
1053 static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1054 				       struct ttm_tt *ttm)
1055 {
1056 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1057 
1058 	if (gtt->usertask)
1059 		put_task_struct(gtt->usertask);
1060 
1061 	ttm_tt_fini(&gtt->ttm);
1062 	kfree(gtt);
1063 }
1064 
1065 /**
1066  * amdgpu_ttm_mmio_remap_alloc_sgt - build an sg_table for MMIO_REMAP I/O aperture
1067  * @adev: amdgpu device providing the remap BAR base (adev->rmmio_remap.bus_addr)
1068  * @res:  TTM resource of the BO to export; expected to live in AMDGPU_PL_MMIO_REMAP
1069  * @dev:  importing device to map for (typically @attach->dev in dma-buf paths)
1070  * @dir:  DMA data direction for the importer (passed to dma_map_resource())
1071  * @sgt:  output; on success, set to a newly allocated sg_table describing the I/O span
1072  *
1073  * The HDP flush page (AMDGPU_PL_MMIO_REMAP) is a fixed hardware I/O window in a PCI
1074  * BAR—there are no struct pages to back it. Importers still need a DMA address list,
1075  * so we synthesize a minimal sg_table and populate it from dma_map_resource(), not
1076  * from pages. Using the common amdgpu_res_cursor walker keeps the offset/size math
1077  * consistent with other TTM/manager users.
1078  *
1079  * - @res is assumed to be a small, contiguous I/O region (typically a single 4 KiB
1080  *   page) in AMDGPU_PL_MMIO_REMAP. Callers should validate placement before calling.
1081  * - The sg entry is created with sg_set_page(sg, NULL, …) to reflect I/O space.
1082  * - The mapping uses DMA_ATTR_SKIP_CPU_SYNC because this is MMIO, not cacheable RAM.
1083  * - Peer reachability / p2pdma policy checks must be done by the caller.
1084  *
1085  * Return:
1086  * * 0 on success, with *@sgt set to a valid table that must be freed via
1087  *   amdgpu_ttm_mmio_remap_free_sgt().
1088  * * -ENOMEM if allocation of the sg_table fails.
1089  * * -EIO if dma_map_resource() fails.
1090  *
1091  */
1092 int amdgpu_ttm_mmio_remap_alloc_sgt(struct amdgpu_device *adev,
1093 				    struct ttm_resource *res,
1094 				    struct device *dev,
1095 				    enum dma_data_direction dir,
1096 				    struct sg_table **sgt)
1097 {
1098 	struct amdgpu_res_cursor cur;
1099 	dma_addr_t dma;
1100 	resource_size_t phys;
1101 	struct scatterlist *sg;
1102 	int r;
1103 
1104 	/* Walk the resource once; MMIO_REMAP is expected to be contiguous+small. */
1105 	amdgpu_res_first(res, 0, res->size, &cur);
1106 
1107 	/* Translate byte offset in the remap window into a host physical BAR address. */
1108 	phys = adev->rmmio_remap.bus_addr + cur.start;
1109 
1110 	/* Build a single-entry sg_table mapped as I/O (no struct page backing). */
1111 	*sgt = kzalloc(sizeof(**sgt), GFP_KERNEL);
1112 	if (!*sgt)
1113 		return -ENOMEM;
1114 	r = sg_alloc_table(*sgt, 1, GFP_KERNEL);
1115 	if (r) {
1116 		kfree(*sgt);
1117 		return r;
1118 	}
1119 	sg = (*sgt)->sgl;
1120 	sg_set_page(sg, NULL, cur.size, 0);  /* WHY: I/O space → no pages */
1121 
1122 	dma = dma_map_resource(dev, phys, cur.size, dir, DMA_ATTR_SKIP_CPU_SYNC);
1123 	if (dma_mapping_error(dev, dma)) {
1124 		sg_free_table(*sgt);
1125 		kfree(*sgt);
1126 		return -EIO;
1127 	}
1128 	sg_dma_address(sg) = dma;
1129 	sg_dma_len(sg) = cur.size;
1130 	return 0;
1131 }
1132 
1133 void amdgpu_ttm_mmio_remap_free_sgt(struct device *dev,
1134 				    enum dma_data_direction dir,
1135 				    struct sg_table *sgt)
1136 {
1137 	struct scatterlist *sg = sgt->sgl;
1138 
1139 	dma_unmap_resource(dev, sg_dma_address(sg), sg_dma_len(sg),
1140 			   dir, DMA_ATTR_SKIP_CPU_SYNC);
1141 	sg_free_table(sgt);
1142 	kfree(sgt);
1143 }
1144 
1145 /**
1146  * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1147  *
1148  * @bo: The buffer object to create a GTT ttm_tt object around
1149  * @page_flags: Page flags to be added to the ttm_tt object
1150  *
1151  * Called by ttm_tt_create().
1152  */
1153 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1154 					   uint32_t page_flags)
1155 {
1156 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1157 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1158 	struct amdgpu_ttm_tt *gtt;
1159 	enum ttm_caching caching;
1160 
1161 	gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1162 	if (!gtt)
1163 		return NULL;
1164 
1165 	gtt->gobj = &bo->base;
1166 	if (adev->gmc.mem_partitions && abo->xcp_id >= 0)
1167 		gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
1168 	else
1169 		gtt->pool_id = abo->xcp_id;
1170 
1171 	if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1172 		caching = ttm_write_combined;
1173 	else
1174 		caching = ttm_cached;
1175 
1176 	/* allocate space for the uninitialized page entries */
1177 	if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
1178 		kfree(gtt);
1179 		return NULL;
1180 	}
1181 	return &gtt->ttm;
1182 }
1183 
1184 /*
1185  * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1186  *
1187  * Map the pages of a ttm_tt object to an address space visible
1188  * to the underlying device.
1189  */
1190 static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1191 				  struct ttm_tt *ttm,
1192 				  struct ttm_operation_ctx *ctx)
1193 {
1194 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1195 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1196 	struct ttm_pool *pool;
1197 	pgoff_t i;
1198 	int ret;
1199 
1200 	/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1201 	if (gtt->userptr) {
1202 		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1203 		if (!ttm->sg)
1204 			return -ENOMEM;
1205 		return 0;
1206 	}
1207 
1208 	if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1209 		return 0;
1210 
1211 	if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1212 		pool = &adev->mman.ttm_pools[gtt->pool_id];
1213 	else
1214 		pool = &adev->mman.bdev.pool;
1215 	ret = ttm_pool_alloc(pool, ttm, ctx);
1216 	if (ret)
1217 		return ret;
1218 
1219 	for (i = 0; i < ttm->num_pages; ++i)
1220 		ttm->pages[i]->mapping = bdev->dev_mapping;
1221 
1222 	return 0;
1223 }
1224 
1225 /*
1226  * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1227  *
1228  * Unmaps pages of a ttm_tt object from the device address space and
1229  * unpopulates the page array backing it.
1230  */
1231 static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1232 				     struct ttm_tt *ttm)
1233 {
1234 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1235 	struct amdgpu_device *adev;
1236 	struct ttm_pool *pool;
1237 	pgoff_t i;
1238 
1239 	amdgpu_ttm_backend_unbind(bdev, ttm);
1240 
1241 	if (gtt->userptr) {
1242 		amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1243 		kfree(ttm->sg);
1244 		ttm->sg = NULL;
1245 		return;
1246 	}
1247 
1248 	if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1249 		return;
1250 
1251 	for (i = 0; i < ttm->num_pages; ++i)
1252 		ttm->pages[i]->mapping = NULL;
1253 
1254 	adev = amdgpu_ttm_adev(bdev);
1255 
1256 	if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1257 		pool = &adev->mman.ttm_pools[gtt->pool_id];
1258 	else
1259 		pool = &adev->mman.bdev.pool;
1260 
1261 	return ttm_pool_free(pool, ttm);
1262 }
1263 
1264 /**
1265  * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
1266  * task
1267  *
1268  * @tbo: The ttm_buffer_object that contains the userptr
1269  * @user_addr:  The returned value
1270  */
1271 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
1272 			      uint64_t *user_addr)
1273 {
1274 	struct amdgpu_ttm_tt *gtt;
1275 
1276 	if (!tbo->ttm)
1277 		return -EINVAL;
1278 
1279 	gtt = (void *)tbo->ttm;
1280 	*user_addr = gtt->userptr;
1281 	return 0;
1282 }
1283 
1284 /**
1285  * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1286  * task
1287  *
1288  * @bo: The ttm_buffer_object to bind this userptr to
1289  * @addr:  The address in the current tasks VM space to use
1290  * @flags: Requirements of userptr object.
1291  *
1292  * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
1293  * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
1294  * initialize GPU VM for a KFD process.
1295  */
1296 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1297 			      uint64_t addr, uint32_t flags)
1298 {
1299 	struct amdgpu_ttm_tt *gtt;
1300 
1301 	if (!bo->ttm) {
1302 		/* TODO: We want a separate TTM object type for userptrs */
1303 		bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1304 		if (bo->ttm == NULL)
1305 			return -ENOMEM;
1306 	}
1307 
1308 	/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
1309 	bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
1310 
1311 	gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
1312 	gtt->userptr = addr;
1313 	gtt->userflags = flags;
1314 
1315 	if (gtt->usertask)
1316 		put_task_struct(gtt->usertask);
1317 	gtt->usertask = current->group_leader;
1318 	get_task_struct(gtt->usertask);
1319 
1320 	return 0;
1321 }
1322 
1323 /*
1324  * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1325  */
1326 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1327 {
1328 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1329 
1330 	if (gtt == NULL)
1331 		return NULL;
1332 
1333 	if (gtt->usertask == NULL)
1334 		return NULL;
1335 
1336 	return gtt->usertask->mm;
1337 }
1338 
1339 /*
1340  * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1341  * address range for the current task.
1342  *
1343  */
1344 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1345 				  unsigned long end, unsigned long *userptr)
1346 {
1347 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1348 	unsigned long size;
1349 
1350 	if (gtt == NULL || !gtt->userptr)
1351 		return false;
1352 
1353 	/* Return false if no part of the ttm_tt object lies within
1354 	 * the range
1355 	 */
1356 	size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1357 	if (gtt->userptr > end || gtt->userptr + size <= start)
1358 		return false;
1359 
1360 	if (userptr)
1361 		*userptr = gtt->userptr;
1362 	return true;
1363 }
1364 
1365 /*
1366  * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1367  */
1368 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1369 {
1370 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1371 
1372 	if (gtt == NULL || !gtt->userptr)
1373 		return false;
1374 
1375 	return true;
1376 }
1377 
1378 /*
1379  * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1380  */
1381 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1382 {
1383 	struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1384 
1385 	if (gtt == NULL)
1386 		return false;
1387 
1388 	return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1389 }
1390 
1391 /**
1392  * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1393  *
1394  * @ttm: The ttm_tt object to compute the flags for
1395  * @mem: The memory registry backing this ttm_tt object
1396  *
1397  * Figure out the flags to use for a VM PDE (Page Directory Entry).
1398  */
1399 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1400 {
1401 	uint64_t flags = 0;
1402 
1403 	if (mem && mem->mem_type != TTM_PL_SYSTEM)
1404 		flags |= AMDGPU_PTE_VALID;
1405 
1406 	if (mem && (mem->mem_type == TTM_PL_TT ||
1407 		    mem->mem_type == AMDGPU_PL_DOORBELL ||
1408 		    mem->mem_type == AMDGPU_PL_PREEMPT ||
1409 		    mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
1410 		flags |= AMDGPU_PTE_SYSTEM;
1411 
1412 		if (ttm && ttm->caching == ttm_cached)
1413 			flags |= AMDGPU_PTE_SNOOPED;
1414 	}
1415 
1416 	if (mem && mem->mem_type == TTM_PL_VRAM &&
1417 			mem->bus.caching == ttm_cached)
1418 		flags |= AMDGPU_PTE_SNOOPED;
1419 
1420 	return flags;
1421 }
1422 
1423 /**
1424  * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1425  *
1426  * @adev: amdgpu_device pointer
1427  * @ttm: The ttm_tt object to compute the flags for
1428  * @mem: The memory registry backing this ttm_tt object
1429  *
1430  * Figure out the flags to use for a VM PTE (Page Table Entry).
1431  */
1432 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1433 				 struct ttm_resource *mem)
1434 {
1435 	uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1436 
1437 	flags |= adev->gart.gart_pte_flags;
1438 	flags |= AMDGPU_PTE_READABLE;
1439 
1440 	if (!amdgpu_ttm_tt_is_readonly(ttm))
1441 		flags |= AMDGPU_PTE_WRITEABLE;
1442 
1443 	return flags;
1444 }
1445 
1446 /*
1447  * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1448  * object.
1449  *
1450  * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1451  * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1452  * it can find space for a new object and by ttm_bo_force_list_clean() which is
1453  * used to clean out a memory space.
1454  */
1455 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1456 					    const struct ttm_place *place)
1457 {
1458 	struct dma_resv_iter resv_cursor;
1459 	struct dma_fence *f;
1460 
1461 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1462 		return ttm_bo_eviction_valuable(bo, place);
1463 
1464 	/* Swapout? */
1465 	if (bo->resource->mem_type == TTM_PL_SYSTEM)
1466 		return true;
1467 
1468 	if (bo->type == ttm_bo_type_kernel &&
1469 	    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1470 		return false;
1471 
1472 	/* If bo is a KFD BO, check if the bo belongs to the current process.
1473 	 * If true, then return false as any KFD process needs all its BOs to
1474 	 * be resident to run successfully
1475 	 */
1476 	dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
1477 				DMA_RESV_USAGE_BOOKKEEP, f) {
1478 		if (amdkfd_fence_check_mm(f, current->mm) &&
1479 		    !(place->flags & TTM_PL_FLAG_CONTIGUOUS))
1480 			return false;
1481 	}
1482 
1483 	/* Preemptible BOs don't own system resources managed by the
1484 	 * driver (pages, VRAM, GART space). They point to resources
1485 	 * owned by someone else (e.g. pageable memory in user mode
1486 	 * or a DMABuf). They are used in a preemptible context so we
1487 	 * can guarantee no deadlocks and good QoS in case of MMU
1488 	 * notifiers or DMABuf move notifiers from the resource owner.
1489 	 */
1490 	if (bo->resource->mem_type == AMDGPU_PL_PREEMPT)
1491 		return false;
1492 
1493 	if (bo->resource->mem_type == TTM_PL_TT &&
1494 	    amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1495 		return false;
1496 
1497 	return ttm_bo_eviction_valuable(bo, place);
1498 }
1499 
1500 static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
1501 				      void *buf, size_t size, bool write)
1502 {
1503 	while (size) {
1504 		uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
1505 		uint64_t bytes = 4 - (pos & 0x3);
1506 		uint32_t shift = (pos & 0x3) * 8;
1507 		uint32_t mask = 0xffffffff << shift;
1508 		uint32_t value = 0;
1509 
1510 		if (size < bytes) {
1511 			mask &= 0xffffffff >> (bytes - size) * 8;
1512 			bytes = size;
1513 		}
1514 
1515 		if (mask != 0xffffffff) {
1516 			amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
1517 			if (write) {
1518 				value &= ~mask;
1519 				value |= (*(uint32_t *)buf << shift) & mask;
1520 				amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
1521 			} else {
1522 				value = (value & mask) >> shift;
1523 				memcpy(buf, &value, bytes);
1524 			}
1525 		} else {
1526 			amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
1527 		}
1528 
1529 		pos += bytes;
1530 		buf += bytes;
1531 		size -= bytes;
1532 	}
1533 }
1534 
1535 static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
1536 					unsigned long offset, void *buf,
1537 					int len, int write)
1538 {
1539 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1540 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1541 	struct amdgpu_res_cursor src_mm;
1542 	struct amdgpu_job *job;
1543 	struct dma_fence *fence;
1544 	uint64_t src_addr, dst_addr;
1545 	unsigned int num_dw;
1546 	int r, idx;
1547 
1548 	if (len != PAGE_SIZE)
1549 		return -EINVAL;
1550 
1551 	if (!adev->mman.sdma_access_ptr)
1552 		return -EACCES;
1553 
1554 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
1555 		return -ENODEV;
1556 
1557 	if (write)
1558 		memcpy(adev->mman.sdma_access_ptr, buf, len);
1559 
1560 	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
1561 	r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
1562 				     AMDGPU_FENCE_OWNER_UNDEFINED,
1563 				     num_dw * 4, AMDGPU_IB_POOL_DELAYED,
1564 				     &job,
1565 				     AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
1566 	if (r)
1567 		goto out;
1568 
1569 	mutex_lock(&adev->mman.gtt_window_lock);
1570 	amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
1571 	src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
1572 		src_mm.start;
1573 	dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
1574 	if (write)
1575 		swap(src_addr, dst_addr);
1576 
1577 	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
1578 				PAGE_SIZE, 0);
1579 
1580 	amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
1581 	WARN_ON(job->ibs[0].length_dw > num_dw);
1582 
1583 	fence = amdgpu_job_submit(job);
1584 	mutex_unlock(&adev->mman.gtt_window_lock);
1585 
1586 	if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
1587 		r = -ETIMEDOUT;
1588 	dma_fence_put(fence);
1589 
1590 	if (!(r || write))
1591 		memcpy(buf, adev->mman.sdma_access_ptr, len);
1592 out:
1593 	drm_dev_exit(idx);
1594 	return r;
1595 }
1596 
1597 /**
1598  * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1599  *
1600  * @bo:  The buffer object to read/write
1601  * @offset:  Offset into buffer object
1602  * @buf:  Secondary buffer to write/read from
1603  * @len: Length in bytes of access
1604  * @write:  true if writing
1605  *
1606  * This is used to access VRAM that backs a buffer object via MMIO
1607  * access for debugging purposes.
1608  */
1609 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1610 				    unsigned long offset, void *buf, int len,
1611 				    int write)
1612 {
1613 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1614 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1615 	struct amdgpu_res_cursor cursor;
1616 	int ret = 0;
1617 
1618 	if (bo->resource->mem_type != TTM_PL_VRAM)
1619 		return -EIO;
1620 
1621 	if (amdgpu_device_has_timeouts_enabled(adev) &&
1622 			!amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
1623 		return len;
1624 
1625 	amdgpu_res_first(bo->resource, offset, len, &cursor);
1626 	while (cursor.remaining) {
1627 		size_t count, size = cursor.size;
1628 		loff_t pos = cursor.start;
1629 
1630 		count = amdgpu_device_aper_access(adev, pos, buf, size, write);
1631 		size -= count;
1632 		if (size) {
1633 			/* using MM to access rest vram and handle un-aligned address */
1634 			pos += count;
1635 			buf += count;
1636 			amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
1637 		}
1638 
1639 		ret += cursor.size;
1640 		buf += cursor.size;
1641 		amdgpu_res_next(&cursor, cursor.size);
1642 	}
1643 
1644 	return ret;
1645 }
1646 
1647 static void
1648 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1649 {
1650 	amdgpu_bo_move_notify(bo, false, NULL);
1651 }
1652 
1653 static struct ttm_device_funcs amdgpu_bo_driver = {
1654 	.ttm_tt_create = &amdgpu_ttm_tt_create,
1655 	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
1656 	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1657 	.ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1658 	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1659 	.evict_flags = &amdgpu_evict_flags,
1660 	.move = &amdgpu_bo_move,
1661 	.delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1662 	.release_notify = &amdgpu_bo_release_notify,
1663 	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1664 	.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1665 	.access_memory = &amdgpu_ttm_access_memory,
1666 };
1667 
1668 /*
1669  * Firmware Reservation functions
1670  */
1671 /**
1672  * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1673  *
1674  * @adev: amdgpu_device pointer
1675  *
1676  * free fw reserved vram if it has been reserved.
1677  */
1678 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1679 {
1680 	amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1681 		NULL, &adev->mman.fw_vram_usage_va);
1682 }
1683 
1684 /*
1685  * Driver Reservation functions
1686  */
1687 /**
1688  * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
1689  *
1690  * @adev: amdgpu_device pointer
1691  *
1692  * free drv reserved vram if it has been reserved.
1693  */
1694 static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
1695 {
1696 	amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
1697 						  NULL,
1698 						  &adev->mman.drv_vram_usage_va);
1699 }
1700 
1701 /**
1702  * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1703  *
1704  * @adev: amdgpu_device pointer
1705  *
1706  * create bo vram reservation from fw.
1707  */
1708 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1709 {
1710 	uint64_t vram_size = adev->gmc.visible_vram_size;
1711 
1712 	adev->mman.fw_vram_usage_va = NULL;
1713 	adev->mman.fw_vram_usage_reserved_bo = NULL;
1714 
1715 	if (adev->mman.fw_vram_usage_size == 0 ||
1716 	    adev->mman.fw_vram_usage_size > vram_size)
1717 		return 0;
1718 
1719 	return amdgpu_bo_create_kernel_at(adev,
1720 					  adev->mman.fw_vram_usage_start_offset,
1721 					  adev->mman.fw_vram_usage_size,
1722 					  &adev->mman.fw_vram_usage_reserved_bo,
1723 					  &adev->mman.fw_vram_usage_va);
1724 }
1725 
1726 /**
1727  * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
1728  *
1729  * @adev: amdgpu_device pointer
1730  *
1731  * create bo vram reservation from drv.
1732  */
1733 static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
1734 {
1735 	u64 vram_size = adev->gmc.visible_vram_size;
1736 
1737 	adev->mman.drv_vram_usage_va = NULL;
1738 	adev->mman.drv_vram_usage_reserved_bo = NULL;
1739 
1740 	if (adev->mman.drv_vram_usage_size == 0 ||
1741 	    adev->mman.drv_vram_usage_size > vram_size)
1742 		return 0;
1743 
1744 	return amdgpu_bo_create_kernel_at(adev,
1745 					  adev->mman.drv_vram_usage_start_offset,
1746 					  adev->mman.drv_vram_usage_size,
1747 					  &adev->mman.drv_vram_usage_reserved_bo,
1748 					  &adev->mman.drv_vram_usage_va);
1749 }
1750 
1751 /*
1752  * Memoy training reservation functions
1753  */
1754 
1755 /**
1756  * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1757  *
1758  * @adev: amdgpu_device pointer
1759  *
1760  * free memory training reserved vram if it has been reserved.
1761  */
1762 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1763 {
1764 	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1765 
1766 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1767 	amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1768 	ctx->c2p_bo = NULL;
1769 
1770 	return 0;
1771 }
1772 
1773 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev,
1774 						uint32_t reserve_size)
1775 {
1776 	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1777 
1778 	memset(ctx, 0, sizeof(*ctx));
1779 
1780 	ctx->c2p_train_data_offset =
1781 		ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M);
1782 	ctx->p2c_train_data_offset =
1783 		(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1784 	ctx->train_data_size =
1785 		GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1786 
1787 	DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1788 			ctx->train_data_size,
1789 			ctx->p2c_train_data_offset,
1790 			ctx->c2p_train_data_offset);
1791 }
1792 
1793 /*
1794  * reserve TMR memory at the top of VRAM which holds
1795  * IP Discovery data and is protected by PSP.
1796  */
1797 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1798 {
1799 	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1800 	bool mem_train_support = false;
1801 	uint32_t reserve_size = 0;
1802 	int ret;
1803 
1804 	if (adev->bios && !amdgpu_sriov_vf(adev)) {
1805 		if (amdgpu_atomfirmware_mem_training_supported(adev))
1806 			mem_train_support = true;
1807 		else
1808 			DRM_DEBUG("memory training does not support!\n");
1809 	}
1810 
1811 	/*
1812 	 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1813 	 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1814 	 *
1815 	 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1816 	 * discovery data and G6 memory training data respectively
1817 	 */
1818 	if (adev->bios)
1819 		reserve_size =
1820 			amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1821 
1822 	if (!adev->bios &&
1823 	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1824 	     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
1825 	     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)))
1826 		reserve_size = max(reserve_size, (uint32_t)280 << 20);
1827 	else if (!reserve_size)
1828 		reserve_size = DISCOVERY_TMR_OFFSET;
1829 
1830 	if (mem_train_support) {
1831 		/* reserve vram for mem train according to TMR location */
1832 		amdgpu_ttm_training_data_block_init(adev, reserve_size);
1833 		ret = amdgpu_bo_create_kernel_at(adev,
1834 						 ctx->c2p_train_data_offset,
1835 						 ctx->train_data_size,
1836 						 &ctx->c2p_bo,
1837 						 NULL);
1838 		if (ret) {
1839 			dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret);
1840 			amdgpu_ttm_training_reserve_vram_fini(adev);
1841 			return ret;
1842 		}
1843 		ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1844 	}
1845 
1846 	ret = amdgpu_bo_create_kernel_at(
1847 		adev, adev->gmc.real_vram_size - reserve_size, reserve_size,
1848 		&adev->mman.fw_reserved_memory, NULL);
1849 	if (ret) {
1850 		dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
1851 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
1852 				      NULL);
1853 		return ret;
1854 	}
1855 
1856 	return 0;
1857 }
1858 
1859 static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
1860 {
1861 	int i;
1862 
1863 	if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions)
1864 		return 0;
1865 
1866 	adev->mman.ttm_pools = kcalloc(adev->gmc.num_mem_partitions,
1867 				       sizeof(*adev->mman.ttm_pools),
1868 				       GFP_KERNEL);
1869 	if (!adev->mman.ttm_pools)
1870 		return -ENOMEM;
1871 
1872 	for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
1873 		ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
1874 			      adev->gmc.mem_partitions[i].numa.node,
1875 			      TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
1876 	}
1877 	return 0;
1878 }
1879 
1880 static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
1881 {
1882 	int i;
1883 
1884 	if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools)
1885 		return;
1886 
1887 	for (i = 0; i < adev->gmc.num_mem_partitions; i++)
1888 		ttm_pool_fini(&adev->mman.ttm_pools[i]);
1889 
1890 	kfree(adev->mman.ttm_pools);
1891 	adev->mman.ttm_pools = NULL;
1892 }
1893 
1894 /**
1895  * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO
1896  * @adev: amdgpu device
1897  *
1898  * Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the
1899  * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
1900  * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
1901  * GEM object (amdgpu_bo_create).
1902  *
1903  * The BO is created as a normal GEM object via amdgpu_bo_create(), then
1904  * reserved and pinned at the TTM level (ttm_bo_pin()) so it can never be
1905  * migrated or evicted. No CPU mapping is established here.
1906  *
1907  * Return:
1908  *  * 0 on success or intentional skip (feature not present/unsupported)
1909  *  * negative errno on allocation failure
1910  */
1911 static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev)
1912 {
1913 	struct amdgpu_bo_param bp;
1914 	int r;
1915 
1916 	/* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
1917 	if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
1918 		return 0;
1919 
1920 	memset(&bp, 0, sizeof(bp));
1921 
1922 	/* Create exactly one GEM BO in the MMIO_REMAP domain. */
1923 	bp.type        = ttm_bo_type_device;          /* userspace-mappable GEM */
1924 	bp.size        = AMDGPU_GPU_PAGE_SIZE;        /* 4K */
1925 	bp.byte_align  = AMDGPU_GPU_PAGE_SIZE;
1926 	bp.domain      = AMDGPU_GEM_DOMAIN_MMIO_REMAP;
1927 	bp.flags       = 0;
1928 	bp.resv        = NULL;
1929 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
1930 
1931 	r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
1932 	if (r)
1933 		return r;
1934 
1935 	r = amdgpu_bo_reserve(adev->rmmio_remap.bo, true);
1936 	if (r)
1937 		goto err_unref;
1938 
1939 	/*
1940 	 * MMIO_REMAP is a fixed I/O placement (AMDGPU_PL_MMIO_REMAP).
1941 	 * Use TTM-level pin so the BO cannot be evicted/migrated,
1942 	 * independent of GEM domains. This
1943 	 * enforces the “fixed I/O window”
1944 	 */
1945 	ttm_bo_pin(&adev->rmmio_remap.bo->tbo);
1946 
1947 	amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1948 	return 0;
1949 
1950 err_unref:
1951 	if (adev->rmmio_remap.bo)
1952 		amdgpu_bo_unref(&adev->rmmio_remap.bo);
1953 	adev->rmmio_remap.bo = NULL;
1954 	return r;
1955 }
1956 
1957 /**
1958  * amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO
1959  * @adev: amdgpu device
1960  *
1961  * Frees the kernel-owned MMIO_REMAP BO if it was allocated by
1962  * amdgpu_ttm_mmio_remap_bo_init().
1963  */
1964 static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev)
1965 {
1966 	struct amdgpu_bo *bo = adev->rmmio_remap.bo;
1967 
1968 	if (!bo)
1969 		return;   /* <-- safest early exit */
1970 
1971 	if (!amdgpu_bo_reserve(adev->rmmio_remap.bo, true)) {
1972 		ttm_bo_unpin(&adev->rmmio_remap.bo->tbo);
1973 		amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1974 	}
1975 	amdgpu_bo_unref(&adev->rmmio_remap.bo);
1976 	adev->rmmio_remap.bo = NULL;
1977 }
1978 
1979 /*
1980  * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1981  * gtt/vram related fields.
1982  *
1983  * This initializes all of the memory space pools that the TTM layer
1984  * will need such as the GTT space (system memory mapped to the device),
1985  * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1986  * can be mapped per VMID.
1987  */
1988 int amdgpu_ttm_init(struct amdgpu_device *adev)
1989 {
1990 	uint64_t gtt_size;
1991 	int r;
1992 
1993 	mutex_init(&adev->mman.gtt_window_lock);
1994 
1995 	dma_set_max_seg_size(adev->dev, UINT_MAX);
1996 	/* No others user of address space so set it to 0 */
1997 	r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1998 			       adev_to_drm(adev)->anon_inode->i_mapping,
1999 			       adev_to_drm(adev)->vma_offset_manager,
2000 			       (adev->need_swiotlb ?
2001 				TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
2002 			       (dma_addressing_limited(adev->dev) ?
2003 				TTM_ALLOCATION_POOL_USE_DMA32 : 0) |
2004 			       TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
2005 	if (r) {
2006 		dev_err(adev->dev,
2007 			"failed initializing buffer object driver(%d).\n", r);
2008 		return r;
2009 	}
2010 
2011 	r = amdgpu_ttm_pools_init(adev);
2012 	if (r) {
2013 		dev_err(adev->dev, "failed to init ttm pools(%d).\n", r);
2014 		return r;
2015 	}
2016 	adev->mman.initialized = true;
2017 
2018 	if (!adev->gmc.is_app_apu) {
2019 		/* Initialize VRAM pool with all of VRAM divided into pages */
2020 		r = amdgpu_vram_mgr_init(adev);
2021 		if (r) {
2022 			dev_err(adev->dev, "Failed initializing VRAM heap.\n");
2023 			return r;
2024 		}
2025 	}
2026 
2027 	/* Change the size here instead of the init above so only lpfn is affected */
2028 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
2029 #ifdef CONFIG_64BIT
2030 #ifdef CONFIG_X86
2031 	if (adev->gmc.xgmi.connected_to_cpu)
2032 		adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
2033 				adev->gmc.visible_vram_size);
2034 
2035 	else if (adev->gmc.is_app_apu)
2036 		DRM_DEBUG_DRIVER(
2037 			"No need to ioremap when real vram size is 0\n");
2038 	else
2039 #endif
2040 		adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
2041 				adev->gmc.visible_vram_size);
2042 #endif
2043 
2044 	/*
2045 	 *The reserved vram for firmware must be pinned to the specified
2046 	 *place on the VRAM, so reserve it early.
2047 	 */
2048 	r = amdgpu_ttm_fw_reserve_vram_init(adev);
2049 	if (r)
2050 		return r;
2051 
2052 	/*
2053 	 * The reserved VRAM for the driver must be pinned to a specific
2054 	 * location in VRAM, so reserve it early.
2055 	 */
2056 	r = amdgpu_ttm_drv_reserve_vram_init(adev);
2057 	if (r)
2058 		return r;
2059 
2060 	/*
2061 	 * only NAVI10 and later ASICs support IP discovery.
2062 	 * If IP discovery is enabled, a block of memory should be
2063 	 * reserved for it.
2064 	 */
2065 	if (adev->discovery.reserve_tmr) {
2066 		r = amdgpu_ttm_reserve_tmr(adev);
2067 		if (r)
2068 			return r;
2069 	}
2070 
2071 	/* allocate memory as required for VGA
2072 	 * This is used for VGA emulation and pre-OS scanout buffers to
2073 	 * avoid display artifacts while transitioning between pre-OS
2074 	 * and driver.
2075 	 */
2076 	if (!adev->gmc.is_app_apu) {
2077 		r = amdgpu_bo_create_kernel_at(adev, 0,
2078 					       adev->mman.stolen_vga_size,
2079 					       &adev->mman.stolen_vga_memory,
2080 					       NULL);
2081 		if (r)
2082 			return r;
2083 
2084 		r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
2085 					       adev->mman.stolen_extended_size,
2086 					       &adev->mman.stolen_extended_memory,
2087 					       NULL);
2088 
2089 		if (r)
2090 			return r;
2091 
2092 		r = amdgpu_bo_create_kernel_at(adev,
2093 					       adev->mman.stolen_reserved_offset,
2094 					       adev->mman.stolen_reserved_size,
2095 					       &adev->mman.stolen_reserved_memory,
2096 					       NULL);
2097 		if (r)
2098 			return r;
2099 	} else {
2100 		DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
2101 	}
2102 
2103 	dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n",
2104 		 (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
2105 
2106 	/* Compute GTT size, either based on TTM limit
2107 	 * or whatever the user passed on module init.
2108 	 */
2109 	gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
2110 	if (amdgpu_gtt_size != -1) {
2111 		uint64_t configured_size = (uint64_t)amdgpu_gtt_size << 20;
2112 
2113 		drm_warn(&adev->ddev,
2114 			"Configuring gttsize via module parameter is deprecated, please use ttm.pages_limit\n");
2115 		if (gtt_size != configured_size)
2116 			drm_warn(&adev->ddev,
2117 				"GTT size has been set as %llu but TTM size has been set as %llu, this is unusual\n",
2118 				configured_size, gtt_size);
2119 
2120 		gtt_size = configured_size;
2121 	}
2122 
2123 	/* Initialize GTT memory pool */
2124 	r = amdgpu_gtt_mgr_init(adev, gtt_size);
2125 	if (r) {
2126 		dev_err(adev->dev, "Failed initializing GTT heap.\n");
2127 		return r;
2128 	}
2129 	dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n",
2130 		 (unsigned int)(gtt_size / (1024 * 1024)));
2131 
2132 	if (adev->flags & AMD_IS_APU) {
2133 		if (adev->gmc.real_vram_size < gtt_size)
2134 			adev->apu_prefer_gtt = true;
2135 	}
2136 
2137 	/* Initialize doorbell pool on PCI BAR */
2138 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
2139 	if (r) {
2140 		dev_err(adev->dev, "Failed initializing doorbell heap.\n");
2141 		return r;
2142 	}
2143 
2144 	/* Create a boorbell page for kernel usages */
2145 	r = amdgpu_doorbell_create_kernel_doorbells(adev);
2146 	if (r) {
2147 		dev_err(adev->dev, "Failed to initialize kernel doorbells.\n");
2148 		return r;
2149 	}
2150 
2151 	/* Initialize MMIO-remap pool (single page 4K) */
2152 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
2153 	if (r) {
2154 		dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
2155 		return r;
2156 	}
2157 
2158 	/* Allocate the singleton MMIO_REMAP BO (4K) if supported */
2159 	r = amdgpu_ttm_mmio_remap_bo_init(adev);
2160 	if (r)
2161 		return r;
2162 
2163 	/* Initialize preemptible memory pool */
2164 	r = amdgpu_preempt_mgr_init(adev);
2165 	if (r) {
2166 		dev_err(adev->dev, "Failed initializing PREEMPT heap.\n");
2167 		return r;
2168 	}
2169 
2170 	/* Initialize various on-chip memory pools */
2171 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
2172 	if (r) {
2173 		dev_err(adev->dev, "Failed initializing GDS heap.\n");
2174 		return r;
2175 	}
2176 
2177 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
2178 	if (r) {
2179 		dev_err(adev->dev, "Failed initializing gws heap.\n");
2180 		return r;
2181 	}
2182 
2183 	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
2184 	if (r) {
2185 		dev_err(adev->dev, "Failed initializing oa heap.\n");
2186 		return r;
2187 	}
2188 	if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
2189 				AMDGPU_GEM_DOMAIN_GTT,
2190 				&adev->mman.sdma_access_bo, NULL,
2191 				&adev->mman.sdma_access_ptr))
2192 		DRM_WARN("Debug VRAM access will use slowpath MM access\n");
2193 
2194 	return 0;
2195 }
2196 
2197 /*
2198  * amdgpu_ttm_fini - De-initialize the TTM memory pools
2199  */
2200 void amdgpu_ttm_fini(struct amdgpu_device *adev)
2201 {
2202 	int idx;
2203 
2204 	if (!adev->mman.initialized)
2205 		return;
2206 
2207 	amdgpu_ttm_pools_fini(adev);
2208 
2209 	amdgpu_ttm_training_reserve_vram_fini(adev);
2210 	/* return the stolen vga memory back to VRAM */
2211 	if (!adev->gmc.is_app_apu) {
2212 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
2213 		amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
2214 		/* return the FW reserved memory back to VRAM */
2215 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
2216 				      NULL);
2217 		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL,
2218 				      NULL);
2219 		if (adev->mman.stolen_reserved_size)
2220 			amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
2221 					      NULL, NULL);
2222 	}
2223 	amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
2224 					&adev->mman.sdma_access_ptr);
2225 
2226 	amdgpu_ttm_mmio_remap_bo_fini(adev);
2227 	amdgpu_ttm_fw_reserve_vram_fini(adev);
2228 	amdgpu_ttm_drv_reserve_vram_fini(adev);
2229 
2230 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
2231 
2232 		if (adev->mman.aper_base_kaddr)
2233 			iounmap(adev->mman.aper_base_kaddr);
2234 		adev->mman.aper_base_kaddr = NULL;
2235 
2236 		drm_dev_exit(idx);
2237 	}
2238 
2239 	if (!adev->gmc.is_app_apu)
2240 		amdgpu_vram_mgr_fini(adev);
2241 	amdgpu_gtt_mgr_fini(adev);
2242 	amdgpu_preempt_mgr_fini(adev);
2243 	amdgpu_doorbell_fini(adev);
2244 
2245 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
2246 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
2247 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
2248 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
2249 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
2250 	ttm_device_fini(&adev->mman.bdev);
2251 	adev->mman.initialized = false;
2252 	dev_info(adev->dev, "amdgpu: ttm finalized\n");
2253 }
2254 
2255 /**
2256  * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
2257  *
2258  * @adev: amdgpu_device pointer
2259  * @enable: true when we can use buffer functions.
2260  *
2261  * Enable/disable use of buffer functions during suspend/resume. This should
2262  * only be called at bootup or when userspace isn't running.
2263  */
2264 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
2265 {
2266 	struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
2267 	uint64_t size;
2268 	int r;
2269 
2270 	if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
2271 	    adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu)
2272 		return;
2273 
2274 	if (enable) {
2275 		struct amdgpu_ring *ring;
2276 		struct drm_gpu_scheduler *sched;
2277 
2278 		ring = adev->mman.buffer_funcs_ring;
2279 		sched = &ring->sched;
2280 		r = drm_sched_entity_init(&adev->mman.high_pr,
2281 					  DRM_SCHED_PRIORITY_KERNEL, &sched,
2282 					  1, NULL);
2283 		if (r) {
2284 			dev_err(adev->dev,
2285 				"Failed setting up TTM BO move entity (%d)\n",
2286 				r);
2287 			return;
2288 		}
2289 
2290 		r = drm_sched_entity_init(&adev->mman.low_pr,
2291 					  DRM_SCHED_PRIORITY_NORMAL, &sched,
2292 					  1, NULL);
2293 		if (r) {
2294 			dev_err(adev->dev,
2295 				"Failed setting up TTM BO move entity (%d)\n",
2296 				r);
2297 			goto error_free_entity;
2298 		}
2299 	} else {
2300 		drm_sched_entity_destroy(&adev->mman.high_pr);
2301 		drm_sched_entity_destroy(&adev->mman.low_pr);
2302 		/* Drop all the old fences since re-creating the scheduler entities
2303 		 * will allocate new contexts.
2304 		 */
2305 		ttm_resource_manager_cleanup(man);
2306 	}
2307 
2308 	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
2309 	if (enable)
2310 		size = adev->gmc.real_vram_size;
2311 	else
2312 		size = adev->gmc.visible_vram_size;
2313 	man->size = size;
2314 	adev->mman.buffer_funcs_enabled = enable;
2315 
2316 	return;
2317 
2318 error_free_entity:
2319 	drm_sched_entity_destroy(&adev->mman.high_pr);
2320 }
2321 
2322 static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
2323 				  bool direct_submit,
2324 				  unsigned int num_dw,
2325 				  struct dma_resv *resv,
2326 				  bool vm_needs_flush,
2327 				  struct amdgpu_job **job,
2328 				  bool delayed, u64 k_job_id)
2329 {
2330 	enum amdgpu_ib_pool_type pool = direct_submit ?
2331 		AMDGPU_IB_POOL_DIRECT :
2332 		AMDGPU_IB_POOL_DELAYED;
2333 	int r;
2334 	struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr :
2335 						    &adev->mman.high_pr;
2336 	r = amdgpu_job_alloc_with_ib(adev, entity,
2337 				     AMDGPU_FENCE_OWNER_UNDEFINED,
2338 				     num_dw * 4, pool, job, k_job_id);
2339 	if (r)
2340 		return r;
2341 
2342 	if (vm_needs_flush) {
2343 		(*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
2344 							adev->gmc.pdb0_bo :
2345 							adev->gart.bo);
2346 		(*job)->vm_needs_flush = true;
2347 	}
2348 	if (!resv)
2349 		return 0;
2350 
2351 	return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
2352 						   DMA_RESV_USAGE_BOOKKEEP);
2353 }
2354 
2355 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2356 		       uint64_t dst_offset, uint32_t byte_count,
2357 		       struct dma_resv *resv,
2358 		       struct dma_fence **fence, bool direct_submit,
2359 		       bool vm_needs_flush, uint32_t copy_flags)
2360 {
2361 	struct amdgpu_device *adev = ring->adev;
2362 	unsigned int num_loops, num_dw;
2363 	struct amdgpu_job *job;
2364 	uint32_t max_bytes;
2365 	unsigned int i;
2366 	int r;
2367 
2368 	if (!direct_submit && !ring->sched.ready) {
2369 		dev_err(adev->dev,
2370 			"Trying to move memory with ring turned off.\n");
2371 		return -EINVAL;
2372 	}
2373 
2374 	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2375 	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2376 	num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2377 	r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
2378 				   resv, vm_needs_flush, &job, false,
2379 				   AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
2380 	if (r)
2381 		return r;
2382 
2383 	for (i = 0; i < num_loops; i++) {
2384 		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2385 
2386 		amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2387 					dst_offset, cur_size_in_bytes, copy_flags);
2388 		src_offset += cur_size_in_bytes;
2389 		dst_offset += cur_size_in_bytes;
2390 		byte_count -= cur_size_in_bytes;
2391 	}
2392 
2393 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2394 	WARN_ON(job->ibs[0].length_dw > num_dw);
2395 	if (direct_submit)
2396 		r = amdgpu_job_submit_direct(job, ring, fence);
2397 	else
2398 		*fence = amdgpu_job_submit(job);
2399 	if (r)
2400 		goto error_free;
2401 
2402 	return r;
2403 
2404 error_free:
2405 	amdgpu_job_free(job);
2406 	dev_err(adev->dev, "Error scheduling IBs (%d)\n", r);
2407 	return r;
2408 }
2409 
2410 static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
2411 			       uint64_t dst_addr, uint32_t byte_count,
2412 			       struct dma_resv *resv,
2413 			       struct dma_fence **fence,
2414 			       bool vm_needs_flush, bool delayed,
2415 			       u64 k_job_id)
2416 {
2417 	struct amdgpu_device *adev = ring->adev;
2418 	unsigned int num_loops, num_dw;
2419 	struct amdgpu_job *job;
2420 	uint32_t max_bytes;
2421 	unsigned int i;
2422 	int r;
2423 
2424 	max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2425 	num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
2426 	num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
2427 	r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
2428 				   &job, delayed, k_job_id);
2429 	if (r)
2430 		return r;
2431 
2432 	for (i = 0; i < num_loops; i++) {
2433 		uint32_t cur_size = min(byte_count, max_bytes);
2434 
2435 		amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2436 					cur_size);
2437 
2438 		dst_addr += cur_size;
2439 		byte_count -= cur_size;
2440 	}
2441 
2442 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2443 	WARN_ON(job->ibs[0].length_dw > num_dw);
2444 	*fence = amdgpu_job_submit(job);
2445 	return 0;
2446 }
2447 
2448 /**
2449  * amdgpu_ttm_clear_buffer - clear memory buffers
2450  * @bo: amdgpu buffer object
2451  * @resv: reservation object
2452  * @fence: dma_fence associated with the operation
2453  *
2454  * Clear the memory buffer resource.
2455  *
2456  * Returns:
2457  * 0 for success or a negative error code on failure.
2458  */
2459 int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
2460 			    struct dma_resv *resv,
2461 			    struct dma_fence **fence)
2462 {
2463 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2464 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2465 	struct amdgpu_res_cursor cursor;
2466 	u64 addr;
2467 	int r = 0;
2468 
2469 	if (!adev->mman.buffer_funcs_enabled)
2470 		return -EINVAL;
2471 
2472 	if (!fence)
2473 		return -EINVAL;
2474 
2475 	*fence = dma_fence_get_stub();
2476 
2477 	amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
2478 
2479 	mutex_lock(&adev->mman.gtt_window_lock);
2480 	while (cursor.remaining) {
2481 		struct dma_fence *next = NULL;
2482 		u64 size;
2483 
2484 		if (amdgpu_res_cleared(&cursor)) {
2485 			amdgpu_res_next(&cursor, cursor.size);
2486 			continue;
2487 		}
2488 
2489 		/* Never clear more than 256MiB at once to avoid timeouts */
2490 		size = min(cursor.size, 256ULL << 20);
2491 
2492 		r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor,
2493 					  1, ring, false, &size, &addr);
2494 		if (r)
2495 			goto err;
2496 
2497 		r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
2498 					&next, true, true,
2499 					AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
2500 		if (r)
2501 			goto err;
2502 
2503 		dma_fence_put(*fence);
2504 		*fence = next;
2505 
2506 		amdgpu_res_next(&cursor, size);
2507 	}
2508 err:
2509 	mutex_unlock(&adev->mman.gtt_window_lock);
2510 
2511 	return r;
2512 }
2513 
2514 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2515 			uint32_t src_data,
2516 			struct dma_resv *resv,
2517 			struct dma_fence **f,
2518 			bool delayed,
2519 			u64 k_job_id)
2520 {
2521 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2522 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2523 	struct dma_fence *fence = NULL;
2524 	struct amdgpu_res_cursor dst;
2525 	int r;
2526 
2527 	if (!adev->mman.buffer_funcs_enabled) {
2528 		dev_err(adev->dev,
2529 			"Trying to clear memory with ring turned off.\n");
2530 		return -EINVAL;
2531 	}
2532 
2533 	amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
2534 
2535 	mutex_lock(&adev->mman.gtt_window_lock);
2536 	while (dst.remaining) {
2537 		struct dma_fence *next;
2538 		uint64_t cur_size, to;
2539 
2540 		/* Never fill more than 256MiB at once to avoid timeouts */
2541 		cur_size = min(dst.size, 256ULL << 20);
2542 
2543 		r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
2544 					  1, ring, false, &cur_size, &to);
2545 		if (r)
2546 			goto error;
2547 
2548 		r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
2549 					&next, true, delayed, k_job_id);
2550 		if (r)
2551 			goto error;
2552 
2553 		dma_fence_put(fence);
2554 		fence = next;
2555 
2556 		amdgpu_res_next(&dst, cur_size);
2557 	}
2558 error:
2559 	mutex_unlock(&adev->mman.gtt_window_lock);
2560 	if (f)
2561 		*f = dma_fence_get(fence);
2562 	dma_fence_put(fence);
2563 	return r;
2564 }
2565 
2566 /**
2567  * amdgpu_ttm_evict_resources - evict memory buffers
2568  * @adev: amdgpu device object
2569  * @mem_type: evicted BO's memory type
2570  *
2571  * Evicts all @mem_type buffers on the lru list of the memory type.
2572  *
2573  * Returns:
2574  * 0 for success or a negative error code on failure.
2575  */
2576 int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
2577 {
2578 	struct ttm_resource_manager *man;
2579 
2580 	switch (mem_type) {
2581 	case TTM_PL_VRAM:
2582 	case TTM_PL_TT:
2583 	case AMDGPU_PL_GWS:
2584 	case AMDGPU_PL_GDS:
2585 	case AMDGPU_PL_OA:
2586 		man = ttm_manager_type(&adev->mman.bdev, mem_type);
2587 		break;
2588 	default:
2589 		dev_err(adev->dev, "Trying to evict invalid memory type\n");
2590 		return -EINVAL;
2591 	}
2592 
2593 	return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
2594 }
2595 
2596 #if defined(CONFIG_DEBUG_FS)
2597 
2598 static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2599 {
2600 	struct amdgpu_device *adev = m->private;
2601 
2602 	return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2603 }
2604 
2605 DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2606 
2607 /*
2608  * amdgpu_ttm_vram_read - Linear read access to VRAM
2609  *
2610  * Accesses VRAM via MMIO for debugging purposes.
2611  */
2612 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2613 				    size_t size, loff_t *pos)
2614 {
2615 	struct amdgpu_device *adev = file_inode(f)->i_private;
2616 	ssize_t result = 0;
2617 
2618 	if (size & 0x3 || *pos & 0x3)
2619 		return -EINVAL;
2620 
2621 	if (*pos >= adev->gmc.mc_vram_size)
2622 		return -ENXIO;
2623 
2624 	size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2625 	while (size) {
2626 		size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2627 		uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2628 
2629 		amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2630 		if (copy_to_user(buf, value, bytes))
2631 			return -EFAULT;
2632 
2633 		result += bytes;
2634 		buf += bytes;
2635 		*pos += bytes;
2636 		size -= bytes;
2637 	}
2638 
2639 	return result;
2640 }
2641 
2642 /*
2643  * amdgpu_ttm_vram_write - Linear write access to VRAM
2644  *
2645  * Accesses VRAM via MMIO for debugging purposes.
2646  */
2647 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2648 				    size_t size, loff_t *pos)
2649 {
2650 	struct amdgpu_device *adev = file_inode(f)->i_private;
2651 	ssize_t result = 0;
2652 	int r;
2653 
2654 	if (size & 0x3 || *pos & 0x3)
2655 		return -EINVAL;
2656 
2657 	if (*pos >= adev->gmc.mc_vram_size)
2658 		return -ENXIO;
2659 
2660 	while (size) {
2661 		uint32_t value;
2662 
2663 		if (*pos >= adev->gmc.mc_vram_size)
2664 			return result;
2665 
2666 		r = get_user(value, (uint32_t *)buf);
2667 		if (r)
2668 			return r;
2669 
2670 		amdgpu_device_mm_access(adev, *pos, &value, 4, true);
2671 
2672 		result += 4;
2673 		buf += 4;
2674 		*pos += 4;
2675 		size -= 4;
2676 	}
2677 
2678 	return result;
2679 }
2680 
2681 static const struct file_operations amdgpu_ttm_vram_fops = {
2682 	.owner = THIS_MODULE,
2683 	.read = amdgpu_ttm_vram_read,
2684 	.write = amdgpu_ttm_vram_write,
2685 	.llseek = default_llseek,
2686 };
2687 
2688 /*
2689  * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2690  *
2691  * This function is used to read memory that has been mapped to the
2692  * GPU and the known addresses are not physical addresses but instead
2693  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2694  */
2695 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2696 				 size_t size, loff_t *pos)
2697 {
2698 	struct amdgpu_device *adev = file_inode(f)->i_private;
2699 	struct iommu_domain *dom;
2700 	ssize_t result = 0;
2701 	int r;
2702 
2703 	/* retrieve the IOMMU domain if any for this device */
2704 	dom = iommu_get_domain_for_dev(adev->dev);
2705 
2706 	while (size) {
2707 		phys_addr_t addr = *pos & PAGE_MASK;
2708 		loff_t off = *pos & ~PAGE_MASK;
2709 		size_t bytes = PAGE_SIZE - off;
2710 		unsigned long pfn;
2711 		struct page *p;
2712 		void *ptr;
2713 
2714 		bytes = min(bytes, size);
2715 
2716 		/* Translate the bus address to a physical address.  If
2717 		 * the domain is NULL it means there is no IOMMU active
2718 		 * and the address translation is the identity
2719 		 */
2720 		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2721 
2722 		pfn = addr >> PAGE_SHIFT;
2723 		if (!pfn_valid(pfn))
2724 			return -EPERM;
2725 
2726 		p = pfn_to_page(pfn);
2727 		if (p->mapping != adev->mman.bdev.dev_mapping)
2728 			return -EPERM;
2729 
2730 		ptr = kmap_local_page(p);
2731 		r = copy_to_user(buf, ptr + off, bytes);
2732 		kunmap_local(ptr);
2733 		if (r)
2734 			return -EFAULT;
2735 
2736 		size -= bytes;
2737 		*pos += bytes;
2738 		result += bytes;
2739 	}
2740 
2741 	return result;
2742 }
2743 
2744 /*
2745  * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2746  *
2747  * This function is used to write memory that has been mapped to the
2748  * GPU and the known addresses are not physical addresses but instead
2749  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2750  */
2751 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2752 				 size_t size, loff_t *pos)
2753 {
2754 	struct amdgpu_device *adev = file_inode(f)->i_private;
2755 	struct iommu_domain *dom;
2756 	ssize_t result = 0;
2757 	int r;
2758 
2759 	dom = iommu_get_domain_for_dev(adev->dev);
2760 
2761 	while (size) {
2762 		phys_addr_t addr = *pos & PAGE_MASK;
2763 		loff_t off = *pos & ~PAGE_MASK;
2764 		size_t bytes = PAGE_SIZE - off;
2765 		unsigned long pfn;
2766 		struct page *p;
2767 		void *ptr;
2768 
2769 		bytes = min(bytes, size);
2770 
2771 		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2772 
2773 		pfn = addr >> PAGE_SHIFT;
2774 		if (!pfn_valid(pfn))
2775 			return -EPERM;
2776 
2777 		p = pfn_to_page(pfn);
2778 		if (p->mapping != adev->mman.bdev.dev_mapping)
2779 			return -EPERM;
2780 
2781 		ptr = kmap_local_page(p);
2782 		r = copy_from_user(ptr + off, buf, bytes);
2783 		kunmap_local(ptr);
2784 		if (r)
2785 			return -EFAULT;
2786 
2787 		size -= bytes;
2788 		*pos += bytes;
2789 		result += bytes;
2790 	}
2791 
2792 	return result;
2793 }
2794 
2795 static const struct file_operations amdgpu_ttm_iomem_fops = {
2796 	.owner = THIS_MODULE,
2797 	.read = amdgpu_iomem_read,
2798 	.write = amdgpu_iomem_write,
2799 	.llseek = default_llseek
2800 };
2801 
2802 #endif
2803 
2804 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2805 {
2806 #if defined(CONFIG_DEBUG_FS)
2807 	struct drm_minor *minor = adev_to_drm(adev)->primary;
2808 	struct dentry *root = minor->debugfs_root;
2809 
2810 	debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2811 				 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2812 	debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2813 			    &amdgpu_ttm_iomem_fops);
2814 	debugfs_create_file("ttm_page_pool", 0444, root, adev,
2815 			    &amdgpu_ttm_page_pool_fops);
2816 	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2817 							     TTM_PL_VRAM),
2818 					    root, "amdgpu_vram_mm");
2819 	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2820 							     TTM_PL_TT),
2821 					    root, "amdgpu_gtt_mm");
2822 	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2823 							     AMDGPU_PL_GDS),
2824 					    root, "amdgpu_gds_mm");
2825 	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2826 							     AMDGPU_PL_GWS),
2827 					    root, "amdgpu_gws_mm");
2828 	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2829 							     AMDGPU_PL_OA),
2830 					    root, "amdgpu_oa_mm");
2831 
2832 #endif
2833 }
2834