xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c (revision 38f7e5450ebfc6f2e046a249a3f629ea7bec8c31)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_vm.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_amdkfd.h"
42 #include "amdgpu_gmc.h"
43 #include "amdgpu_xgmi.h"
44 #include "amdgpu_dma_buf.h"
45 #include "amdgpu_res_cursor.h"
46 #include "kfd_svm.h"
47 
48 /**
49  * DOC: GPUVM
50  *
51  * GPUVM is the MMU functionality provided on the GPU.
52  * GPUVM is similar to the legacy GART on older asics, however
53  * rather than there being a single global GART table
54  * for the entire GPU, there can be multiple GPUVM page tables active
55  * at any given time.  The GPUVM page tables can contain a mix
56  * VRAM pages and system pages (both memory and MMIO) and system pages
57  * can be mapped as snooped (cached system pages) or unsnooped
58  * (uncached system pages).
59  *
60  * Each active GPUVM has an ID associated with it and there is a page table
61  * linked with each VMID.  When executing a command buffer,
62  * the kernel tells the engine what VMID to use for that command
63  * buffer.  VMIDs are allocated dynamically as commands are submitted.
64  * The userspace drivers maintain their own address space and the kernel
65  * sets up their pages tables accordingly when they submit their
66  * command buffers and a VMID is assigned.
67  * The hardware supports up to 16 active GPUVMs at any given time.
68  *
69  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
70  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
71  * as other features such as encryption and caching attributes.
72  *
73  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
74  * addition to an aperture managed by a page table, VMID 0 also has
75  * several other apertures.  There is an aperture for direct access to VRAM
76  * and there is a legacy AGP aperture which just forwards accesses directly
77  * to the matching system physical addresses (or IOVAs when an IOMMU is
78  * present).  These apertures provide direct access to these memories without
79  * incurring the overhead of a page table.  VMID 0 is used by the kernel
80  * driver for tasks like memory management.
81  *
82  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
83  * For user applications, each application can have their own unique GPUVM
84  * address space.  The application manages the address space and the kernel
85  * driver manages the GPUVM page tables for each process.  If an GPU client
86  * accesses an invalid page, it will generate a GPU page fault, similar to
87  * accessing an invalid page on a CPU.
88  */
89 
90 #define START(node) ((node)->start)
91 #define LAST(node) ((node)->last)
92 
93 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
94 		     START, LAST, static, amdgpu_vm_it)
95 
96 #undef START
97 #undef LAST
98 
99 /**
100  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
101  */
102 struct amdgpu_prt_cb {
103 
104 	/**
105 	 * @adev: amdgpu device
106 	 */
107 	struct amdgpu_device *adev;
108 
109 	/**
110 	 * @cb: callback
111 	 */
112 	struct dma_fence_cb cb;
113 };
114 
115 /**
116  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
117  */
118 struct amdgpu_vm_tlb_seq_struct {
119 	/**
120 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
121 	 */
122 	struct amdgpu_vm *vm;
123 
124 	/**
125 	 * @cb: callback
126 	 */
127 	struct dma_fence_cb cb;
128 };
129 
130 /**
131  * amdgpu_vm_assert_locked - check if VM is correctly locked
132  * @vm: the VM which schould be tested
133  *
134  * Asserts that the VM root PD is locked.
135  */
136 static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
137 {
138 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
139 }
140 
141 /**
142  * amdgpu_vm_bo_evicted - vm_bo is evicted
143  *
144  * @vm_bo: vm_bo which is evicted
145  *
146  * State for PDs/PTs and per VM BOs which are not at the location they should
147  * be.
148  */
149 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
150 {
151 	struct amdgpu_vm *vm = vm_bo->vm;
152 	struct amdgpu_bo *bo = vm_bo->bo;
153 
154 	vm_bo->moved = true;
155 	amdgpu_vm_assert_locked(vm);
156 	spin_lock(&vm_bo->vm->status_lock);
157 	if (bo->tbo.type == ttm_bo_type_kernel)
158 		list_move(&vm_bo->vm_status, &vm->evicted);
159 	else
160 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
161 	spin_unlock(&vm_bo->vm->status_lock);
162 }
163 /**
164  * amdgpu_vm_bo_moved - vm_bo is moved
165  *
166  * @vm_bo: vm_bo which is moved
167  *
168  * State for per VM BOs which are moved, but that change is not yet reflected
169  * in the page tables.
170  */
171 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
172 {
173 	amdgpu_vm_assert_locked(vm_bo->vm);
174 	spin_lock(&vm_bo->vm->status_lock);
175 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
176 	spin_unlock(&vm_bo->vm->status_lock);
177 }
178 
179 /**
180  * amdgpu_vm_bo_idle - vm_bo is idle
181  *
182  * @vm_bo: vm_bo which is now idle
183  *
184  * State for PDs/PTs and per VM BOs which have gone through the state machine
185  * and are now idle.
186  */
187 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
188 {
189 	amdgpu_vm_assert_locked(vm_bo->vm);
190 	spin_lock(&vm_bo->vm->status_lock);
191 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
192 	spin_unlock(&vm_bo->vm->status_lock);
193 	vm_bo->moved = false;
194 }
195 
196 /**
197  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
198  *
199  * @vm_bo: vm_bo which is now invalidated
200  *
201  * State for normal BOs which are invalidated and that change not yet reflected
202  * in the PTs.
203  */
204 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
205 {
206 	spin_lock(&vm_bo->vm->status_lock);
207 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
208 	spin_unlock(&vm_bo->vm->status_lock);
209 }
210 
211 /**
212  * amdgpu_vm_bo_evicted_user - vm_bo is evicted
213  *
214  * @vm_bo: vm_bo which is evicted
215  *
216  * State for BOs used by user mode queues which are not at the location they
217  * should be.
218  */
219 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
220 {
221 	vm_bo->moved = true;
222 	spin_lock(&vm_bo->vm->status_lock);
223 	list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
224 	spin_unlock(&vm_bo->vm->status_lock);
225 }
226 
227 /**
228  * amdgpu_vm_bo_relocated - vm_bo is reloacted
229  *
230  * @vm_bo: vm_bo which is relocated
231  *
232  * State for PDs/PTs which needs to update their parent PD.
233  * For the root PD, just move to idle state.
234  */
235 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
236 {
237 	amdgpu_vm_assert_locked(vm_bo->vm);
238 	if (vm_bo->bo->parent) {
239 		spin_lock(&vm_bo->vm->status_lock);
240 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
241 		spin_unlock(&vm_bo->vm->status_lock);
242 	} else {
243 		amdgpu_vm_bo_idle(vm_bo);
244 	}
245 }
246 
247 /**
248  * amdgpu_vm_bo_done - vm_bo is done
249  *
250  * @vm_bo: vm_bo which is now done
251  *
252  * State for normal BOs which are invalidated and that change has been updated
253  * in the PTs.
254  */
255 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
256 {
257 	amdgpu_vm_assert_locked(vm_bo->vm);
258 	spin_lock(&vm_bo->vm->status_lock);
259 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
260 	spin_unlock(&vm_bo->vm->status_lock);
261 }
262 
263 /**
264  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
265  * @vm: the VM which state machine to reset
266  *
267  * Move all vm_bo object in the VM into a state where they will be updated
268  * again during validation.
269  */
270 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
271 {
272 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
273 
274 	amdgpu_vm_assert_locked(vm);
275 
276 	spin_lock(&vm->status_lock);
277 	list_splice_init(&vm->done, &vm->invalidated);
278 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
279 		vm_bo->moved = true;
280 
281 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
282 		struct amdgpu_bo *bo = vm_bo->bo;
283 
284 		vm_bo->moved = true;
285 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
286 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
287 		else if (bo->parent)
288 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
289 	}
290 	spin_unlock(&vm->status_lock);
291 }
292 
293 /**
294  * amdgpu_vm_update_shared - helper to update shared memory stat
295  * @base: base structure for tracking BO usage in a VM
296  *
297  * Takes the vm status_lock and updates the shared memory stat. If the basic
298  * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
299  * as well.
300  */
301 static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
302 {
303 	struct amdgpu_vm *vm = base->vm;
304 	struct amdgpu_bo *bo = base->bo;
305 	uint64_t size = amdgpu_bo_size(bo);
306 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
307 	bool shared;
308 
309 	dma_resv_assert_held(bo->tbo.base.resv);
310 	spin_lock(&vm->status_lock);
311 	shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
312 	if (base->shared != shared) {
313 		base->shared = shared;
314 		if (shared) {
315 			vm->stats[bo_memtype].drm.shared += size;
316 			vm->stats[bo_memtype].drm.private -= size;
317 		} else {
318 			vm->stats[bo_memtype].drm.shared -= size;
319 			vm->stats[bo_memtype].drm.private += size;
320 		}
321 	}
322 	spin_unlock(&vm->status_lock);
323 }
324 
325 /**
326  * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
327  * @bo: amdgpu buffer object
328  *
329  * Update the per VM stats for all the vm if needed from private to shared or
330  * vice versa.
331  */
332 void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
333 {
334 	struct amdgpu_vm_bo_base *base;
335 
336 	for (base = bo->vm_bo; base; base = base->next)
337 		amdgpu_vm_update_shared(base);
338 }
339 
340 /**
341  * amdgpu_vm_update_stats_locked - helper to update normal memory stat
342  * @base: base structure for tracking BO usage in a VM
343  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
344  *        be bo->tbo.resource
345  * @sign: if we should add (+1) or subtract (-1) from the stat
346  *
347  * Caller need to have the vm status_lock held. Useful for when multiple update
348  * need to happen at the same time.
349  */
350 static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
351 			    struct ttm_resource *res, int sign)
352 {
353 	struct amdgpu_vm *vm = base->vm;
354 	struct amdgpu_bo *bo = base->bo;
355 	int64_t size = sign * amdgpu_bo_size(bo);
356 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
357 
358 	/* For drm-total- and drm-shared-, BO are accounted by their preferred
359 	 * placement, see also amdgpu_bo_mem_stats_placement.
360 	 */
361 	if (base->shared)
362 		vm->stats[bo_memtype].drm.shared += size;
363 	else
364 		vm->stats[bo_memtype].drm.private += size;
365 
366 	if (res && res->mem_type < __AMDGPU_PL_NUM) {
367 		uint32_t res_memtype = res->mem_type;
368 
369 		vm->stats[res_memtype].drm.resident += size;
370 		/* BO only count as purgeable if it is resident,
371 		 * since otherwise there's nothing to purge.
372 		 */
373 		if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
374 			vm->stats[res_memtype].drm.purgeable += size;
375 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
376 			vm->stats[bo_memtype].evicted += size;
377 	}
378 }
379 
380 /**
381  * amdgpu_vm_update_stats - helper to update normal memory stat
382  * @base: base structure for tracking BO usage in a VM
383  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
384  *        be bo->tbo.resource
385  * @sign: if we should add (+1) or subtract (-1) from the stat
386  *
387  * Updates the basic memory stat when bo is added/deleted/moved.
388  */
389 void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
390 			    struct ttm_resource *res, int sign)
391 {
392 	struct amdgpu_vm *vm = base->vm;
393 
394 	spin_lock(&vm->status_lock);
395 	amdgpu_vm_update_stats_locked(base, res, sign);
396 	spin_unlock(&vm->status_lock);
397 }
398 
399 /**
400  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
401  *
402  * @base: base structure for tracking BO usage in a VM
403  * @vm: vm to which bo is to be added
404  * @bo: amdgpu buffer object
405  *
406  * Initialize a bo_va_base structure and add it to the appropriate lists
407  *
408  */
409 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
410 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
411 {
412 	base->vm = vm;
413 	base->bo = bo;
414 	base->next = NULL;
415 	INIT_LIST_HEAD(&base->vm_status);
416 
417 	if (!bo)
418 		return;
419 	base->next = bo->vm_bo;
420 	bo->vm_bo = base;
421 
422 	spin_lock(&vm->status_lock);
423 	base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
424 	amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
425 	spin_unlock(&vm->status_lock);
426 
427 	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
428 		return;
429 
430 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
431 
432 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
433 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
434 		amdgpu_vm_bo_relocated(base);
435 	else
436 		amdgpu_vm_bo_idle(base);
437 
438 	if (bo->preferred_domains &
439 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
440 		return;
441 
442 	/*
443 	 * we checked all the prerequisites, but it looks like this per vm bo
444 	 * is currently evicted. add the bo to the evicted list to make sure it
445 	 * is validated on next vm use to avoid fault.
446 	 * */
447 	amdgpu_vm_bo_evicted(base);
448 }
449 
450 /**
451  * amdgpu_vm_lock_pd - lock PD in drm_exec
452  *
453  * @vm: vm providing the BOs
454  * @exec: drm execution context
455  * @num_fences: number of extra fences to reserve
456  *
457  * Lock the VM root PD in the DRM execution context.
458  */
459 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
460 		      unsigned int num_fences)
461 {
462 	/* We need at least two fences for the VM PD/PT updates */
463 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
464 				    2 + num_fences);
465 }
466 
467 /**
468  * amdgpu_vm_lock_done_list - lock all BOs on the done list
469  * @vm: vm providing the BOs
470  * @exec: drm execution context
471  * @num_fences: number of extra fences to reserve
472  *
473  * Lock the BOs on the done list in the DRM execution context.
474  */
475 int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
476 			     unsigned int num_fences)
477 {
478 	struct list_head *prev = &vm->done;
479 	struct amdgpu_bo_va *bo_va;
480 	struct amdgpu_bo *bo;
481 	int ret;
482 
483 	/* We can only trust prev->next while holding the lock */
484 	spin_lock(&vm->status_lock);
485 	while (!list_is_head(prev->next, &vm->done)) {
486 		bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
487 
488 		bo = bo_va->base.bo;
489 		if (bo) {
490 			amdgpu_bo_ref(bo);
491 			spin_unlock(&vm->status_lock);
492 
493 			ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
494 			amdgpu_bo_unref(&bo);
495 			if (unlikely(ret))
496 				return ret;
497 
498 			spin_lock(&vm->status_lock);
499 		}
500 		prev = prev->next;
501 	}
502 	spin_unlock(&vm->status_lock);
503 
504 	return 0;
505 }
506 
507 /**
508  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
509  *
510  * @adev: amdgpu device pointer
511  * @vm: vm providing the BOs
512  *
513  * Move all BOs to the end of LRU and remember their positions to put them
514  * together.
515  */
516 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
517 				struct amdgpu_vm *vm)
518 {
519 	spin_lock(&adev->mman.bdev.lru_lock);
520 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
521 	spin_unlock(&adev->mman.bdev.lru_lock);
522 }
523 
524 /* Create scheduler entities for page table updates */
525 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
526 				   struct amdgpu_vm *vm)
527 {
528 	int r;
529 
530 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
531 				  adev->vm_manager.vm_pte_scheds,
532 				  adev->vm_manager.vm_pte_num_scheds, NULL);
533 	if (r)
534 		goto error;
535 
536 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
537 				     adev->vm_manager.vm_pte_scheds,
538 				     adev->vm_manager.vm_pte_num_scheds, NULL);
539 
540 error:
541 	drm_sched_entity_destroy(&vm->immediate);
542 	return r;
543 }
544 
545 /* Destroy the entities for page table updates again */
546 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
547 {
548 	drm_sched_entity_destroy(&vm->immediate);
549 	drm_sched_entity_destroy(&vm->delayed);
550 }
551 
552 /**
553  * amdgpu_vm_generation - return the page table re-generation counter
554  * @adev: the amdgpu_device
555  * @vm: optional VM to check, might be NULL
556  *
557  * Returns a page table re-generation token to allow checking if submissions
558  * are still valid to use this VM. The VM parameter might be NULL in which case
559  * just the VRAM lost counter will be used.
560  */
561 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
562 {
563 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
564 
565 	if (!vm)
566 		return result;
567 
568 	result += lower_32_bits(vm->generation);
569 	/* Add one if the page tables will be re-generated on next CS */
570 	if (drm_sched_entity_error(&vm->delayed))
571 		++result;
572 
573 	return result;
574 }
575 
576 /**
577  * amdgpu_vm_validate - validate evicted BOs tracked in the VM
578  *
579  * @adev: amdgpu device pointer
580  * @vm: vm providing the BOs
581  * @ticket: optional reservation ticket used to reserve the VM
582  * @validate: callback to do the validation
583  * @param: parameter for the validation callback
584  *
585  * Validate the page table BOs and per-VM BOs on command submission if
586  * necessary. If a ticket is given, also try to validate evicted user queue
587  * BOs. They must already be reserved with the given ticket.
588  *
589  * Returns:
590  * Validation result.
591  */
592 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
593 		       struct ww_acquire_ctx *ticket,
594 		       int (*validate)(void *p, struct amdgpu_bo *bo),
595 		       void *param)
596 {
597 	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
598 	struct amdgpu_vm_bo_base *bo_base;
599 	struct amdgpu_bo *bo;
600 	int r;
601 
602 	if (vm->generation != new_vm_generation) {
603 		vm->generation = new_vm_generation;
604 		amdgpu_vm_bo_reset_state_machine(vm);
605 		amdgpu_vm_fini_entities(vm);
606 		r = amdgpu_vm_init_entities(adev, vm);
607 		if (r)
608 			return r;
609 	}
610 
611 	spin_lock(&vm->status_lock);
612 	while (!list_empty(&vm->evicted)) {
613 		bo_base = list_first_entry(&vm->evicted,
614 					   struct amdgpu_vm_bo_base,
615 					   vm_status);
616 		spin_unlock(&vm->status_lock);
617 
618 		bo = bo_base->bo;
619 
620 		r = validate(param, bo);
621 		if (r)
622 			return r;
623 
624 		if (bo->tbo.type != ttm_bo_type_kernel) {
625 			amdgpu_vm_bo_moved(bo_base);
626 		} else {
627 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
628 			amdgpu_vm_bo_relocated(bo_base);
629 		}
630 		spin_lock(&vm->status_lock);
631 	}
632 	while (ticket && !list_empty(&vm->evicted_user)) {
633 		bo_base = list_first_entry(&vm->evicted_user,
634 					   struct amdgpu_vm_bo_base,
635 					   vm_status);
636 		spin_unlock(&vm->status_lock);
637 
638 		bo = bo_base->bo;
639 		dma_resv_assert_held(bo->tbo.base.resv);
640 
641 		r = validate(param, bo);
642 		if (r)
643 			return r;
644 
645 		amdgpu_vm_bo_invalidated(bo_base);
646 
647 		spin_lock(&vm->status_lock);
648 	}
649 	spin_unlock(&vm->status_lock);
650 
651 	amdgpu_vm_eviction_lock(vm);
652 	vm->evicting = false;
653 	amdgpu_vm_eviction_unlock(vm);
654 
655 	return 0;
656 }
657 
658 /**
659  * amdgpu_vm_ready - check VM is ready for updates
660  *
661  * @vm: VM to check
662  *
663  * Check if all VM PDs/PTs are ready for updates
664  *
665  * Returns:
666  * True if VM is not evicting and all VM entities are not stopped
667  */
668 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
669 {
670 	bool ret;
671 
672 	amdgpu_vm_assert_locked(vm);
673 
674 	amdgpu_vm_eviction_lock(vm);
675 	ret = !vm->evicting;
676 	amdgpu_vm_eviction_unlock(vm);
677 
678 	spin_lock(&vm->status_lock);
679 	ret &= list_empty(&vm->evicted);
680 	spin_unlock(&vm->status_lock);
681 
682 	spin_lock(&vm->immediate.lock);
683 	ret &= !vm->immediate.stopped;
684 	spin_unlock(&vm->immediate.lock);
685 
686 	spin_lock(&vm->delayed.lock);
687 	ret &= !vm->delayed.stopped;
688 	spin_unlock(&vm->delayed.lock);
689 
690 	return ret;
691 }
692 
693 /**
694  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
695  *
696  * @adev: amdgpu_device pointer
697  */
698 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
699 {
700 	const struct amdgpu_ip_block *ip_block;
701 	bool has_compute_vm_bug;
702 	struct amdgpu_ring *ring;
703 	int i;
704 
705 	has_compute_vm_bug = false;
706 
707 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
708 	if (ip_block) {
709 		/* Compute has a VM bug for GFX version < 7.
710 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
711 		if (ip_block->version->major <= 7)
712 			has_compute_vm_bug = true;
713 		else if (ip_block->version->major == 8)
714 			if (adev->gfx.mec_fw_version < 673)
715 				has_compute_vm_bug = true;
716 	}
717 
718 	for (i = 0; i < adev->num_rings; i++) {
719 		ring = adev->rings[i];
720 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
721 			/* only compute rings */
722 			ring->has_compute_vm_bug = has_compute_vm_bug;
723 		else
724 			ring->has_compute_vm_bug = false;
725 	}
726 }
727 
728 /**
729  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
730  *
731  * @ring: ring on which the job will be submitted
732  * @job: job to submit
733  *
734  * Returns:
735  * True if sync is needed.
736  */
737 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
738 				  struct amdgpu_job *job)
739 {
740 	struct amdgpu_device *adev = ring->adev;
741 	unsigned vmhub = ring->vm_hub;
742 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
743 
744 	if (job->vmid == 0)
745 		return false;
746 
747 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
748 		return true;
749 
750 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
751 		return true;
752 
753 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
754 		return true;
755 
756 	return false;
757 }
758 
759 /**
760  * amdgpu_vm_flush - hardware flush the vm
761  *
762  * @ring: ring to use for flush
763  * @job:  related job
764  * @need_pipe_sync: is pipe sync needed
765  *
766  * Emit a VM flush when it is necessary.
767  */
768 void amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
769 		     bool need_pipe_sync)
770 {
771 	struct amdgpu_device *adev = ring->adev;
772 	struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
773 	unsigned vmhub = ring->vm_hub;
774 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
775 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
776 	bool spm_update_needed = job->spm_update_needed;
777 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
778 		job->gds_switch_needed;
779 	bool vm_flush_needed = job->vm_needs_flush;
780 	bool cleaner_shader_needed = false;
781 	bool pasid_mapping_needed = false;
782 	struct dma_fence *fence = NULL;
783 	unsigned int patch = 0;
784 
785 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
786 		gds_switch_needed = true;
787 		vm_flush_needed = true;
788 		pasid_mapping_needed = true;
789 		spm_update_needed = true;
790 	}
791 
792 	mutex_lock(&id_mgr->lock);
793 	if (id->pasid != job->pasid || !id->pasid_mapping ||
794 	    !dma_fence_is_signaled(id->pasid_mapping))
795 		pasid_mapping_needed = true;
796 	mutex_unlock(&id_mgr->lock);
797 
798 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
799 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
800 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
801 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
802 		ring->funcs->emit_wreg;
803 
804 	cleaner_shader_needed = job->run_cleaner_shader &&
805 		adev->gfx.enable_cleaner_shader &&
806 		ring->funcs->emit_cleaner_shader && job->base.s_fence &&
807 		&job->base.s_fence->scheduled == isolation->spearhead;
808 
809 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
810 	    !cleaner_shader_needed)
811 		return;
812 
813 	amdgpu_ring_ib_begin(ring);
814 
815 	/* There is no matching insert_end for this on purpose for the vm flush.
816 	 * The IB portion of the submission has both.  Having multiple
817 	 * insert_start sequences is ok, but you can only have one insert_end
818 	 * per submission based on the way VCN FW works.  For JPEG
819 	 * you can as many insert_start and insert_end sequences as you like as
820 	 * long as the rest of the packets come between start and end sequences.
821 	 */
822 	if (ring->funcs->insert_start)
823 		ring->funcs->insert_start(ring);
824 
825 	if (ring->funcs->init_cond_exec)
826 		patch = amdgpu_ring_init_cond_exec(ring,
827 						   ring->cond_exe_gpu_addr);
828 
829 	if (need_pipe_sync)
830 		amdgpu_ring_emit_pipeline_sync(ring);
831 
832 	if (cleaner_shader_needed)
833 		ring->funcs->emit_cleaner_shader(ring);
834 
835 	if (vm_flush_needed) {
836 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
837 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
838 	}
839 
840 	if (pasid_mapping_needed)
841 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
842 
843 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
844 		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring->xcc_id, ring, job->vmid);
845 
846 	if (ring->funcs->emit_gds_switch &&
847 	    gds_switch_needed) {
848 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
849 					    job->gds_size, job->gws_base,
850 					    job->gws_size, job->oa_base,
851 					    job->oa_size);
852 	}
853 
854 	if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
855 		amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
856 		fence = &job->hw_vm_fence->base;
857 		/* get a ref for the job */
858 		dma_fence_get(fence);
859 	}
860 
861 	if (vm_flush_needed) {
862 		mutex_lock(&id_mgr->lock);
863 		dma_fence_put(id->last_flush);
864 		id->last_flush = dma_fence_get(fence);
865 		id->current_gpu_reset_count =
866 			atomic_read(&adev->gpu_reset_counter);
867 		mutex_unlock(&id_mgr->lock);
868 	}
869 
870 	if (pasid_mapping_needed) {
871 		mutex_lock(&id_mgr->lock);
872 		id->pasid = job->pasid;
873 		dma_fence_put(id->pasid_mapping);
874 		id->pasid_mapping = dma_fence_get(fence);
875 		mutex_unlock(&id_mgr->lock);
876 	}
877 
878 	/*
879 	 * Make sure that all other submissions wait for the cleaner shader to
880 	 * finish before we push them to the HW.
881 	 */
882 	if (cleaner_shader_needed) {
883 		trace_amdgpu_cleaner_shader(ring, fence);
884 		mutex_lock(&adev->enforce_isolation_mutex);
885 		dma_fence_put(isolation->spearhead);
886 		isolation->spearhead = dma_fence_get(fence);
887 		mutex_unlock(&adev->enforce_isolation_mutex);
888 	}
889 	dma_fence_put(fence);
890 
891 	amdgpu_ring_patch_cond_exec(ring, patch);
892 
893 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
894 	if (ring->funcs->emit_switch_buffer) {
895 		amdgpu_ring_emit_switch_buffer(ring);
896 		amdgpu_ring_emit_switch_buffer(ring);
897 	}
898 
899 	amdgpu_ring_ib_end(ring);
900 }
901 
902 /**
903  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
904  *
905  * @vm: requested vm
906  * @bo: requested buffer object
907  *
908  * Find @bo inside the requested vm.
909  * Search inside the @bos vm list for the requested vm
910  * Returns the found bo_va or NULL if none is found
911  *
912  * Object has to be reserved!
913  *
914  * Returns:
915  * Found bo_va or NULL.
916  */
917 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
918 				       struct amdgpu_bo *bo)
919 {
920 	struct amdgpu_vm_bo_base *base;
921 
922 	for (base = bo->vm_bo; base; base = base->next) {
923 		if (base->vm != vm)
924 			continue;
925 
926 		return container_of(base, struct amdgpu_bo_va, base);
927 	}
928 	return NULL;
929 }
930 
931 /**
932  * amdgpu_vm_map_gart - Resolve gart mapping of addr
933  *
934  * @pages_addr: optional DMA address to use for lookup
935  * @addr: the unmapped addr
936  *
937  * Look up the physical address of the page that the pte resolves
938  * to.
939  *
940  * Returns:
941  * The pointer for the page table entry.
942  */
943 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
944 {
945 	uint64_t result;
946 
947 	/* page table offset */
948 	result = pages_addr[addr >> PAGE_SHIFT];
949 
950 	/* in case cpu page size != gpu page size*/
951 	result |= addr & (~PAGE_MASK);
952 
953 	result &= 0xFFFFFFFFFFFFF000ULL;
954 
955 	return result;
956 }
957 
958 /**
959  * amdgpu_vm_update_pdes - make sure that all directories are valid
960  *
961  * @adev: amdgpu_device pointer
962  * @vm: requested vm
963  * @immediate: submit immediately to the paging queue
964  *
965  * Makes sure all directories are up to date.
966  *
967  * Returns:
968  * 0 for success, error for failure.
969  */
970 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
971 			  struct amdgpu_vm *vm, bool immediate)
972 {
973 	struct amdgpu_vm_update_params params;
974 	struct amdgpu_vm_bo_base *entry;
975 	bool flush_tlb_needed = false;
976 	LIST_HEAD(relocated);
977 	int r, idx;
978 
979 	amdgpu_vm_assert_locked(vm);
980 
981 	spin_lock(&vm->status_lock);
982 	list_splice_init(&vm->relocated, &relocated);
983 	spin_unlock(&vm->status_lock);
984 
985 	if (list_empty(&relocated))
986 		return 0;
987 
988 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
989 		return -ENODEV;
990 
991 	memset(&params, 0, sizeof(params));
992 	params.adev = adev;
993 	params.vm = vm;
994 	params.immediate = immediate;
995 
996 	r = vm->update_funcs->prepare(&params, NULL,
997 				      AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
998 	if (r)
999 		goto error;
1000 
1001 	list_for_each_entry(entry, &relocated, vm_status) {
1002 		/* vm_flush_needed after updating moved PDEs */
1003 		flush_tlb_needed |= entry->moved;
1004 
1005 		r = amdgpu_vm_pde_update(&params, entry);
1006 		if (r)
1007 			goto error;
1008 	}
1009 
1010 	r = vm->update_funcs->commit(&params, &vm->last_update);
1011 	if (r)
1012 		goto error;
1013 
1014 	if (flush_tlb_needed)
1015 		atomic64_inc(&vm->tlb_seq);
1016 
1017 	while (!list_empty(&relocated)) {
1018 		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
1019 					 vm_status);
1020 		amdgpu_vm_bo_idle(entry);
1021 	}
1022 
1023 error:
1024 	drm_dev_exit(idx);
1025 	return r;
1026 }
1027 
1028 /**
1029  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
1030  * @fence: unused
1031  * @cb: the callback structure
1032  *
1033  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1034  */
1035 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
1036 				 struct dma_fence_cb *cb)
1037 {
1038 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1039 
1040 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
1041 	atomic64_inc(&tlb_cb->vm->tlb_seq);
1042 	kfree(tlb_cb);
1043 }
1044 
1045 /**
1046  * amdgpu_vm_tlb_flush - prepare TLB flush
1047  *
1048  * @params: parameters for update
1049  * @fence: input fence to sync TLB flush with
1050  * @tlb_cb: the callback structure
1051  *
1052  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1053  */
1054 static void
1055 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
1056 		    struct dma_fence **fence,
1057 		    struct amdgpu_vm_tlb_seq_struct *tlb_cb)
1058 {
1059 	struct amdgpu_vm *vm = params->vm;
1060 
1061 	tlb_cb->vm = vm;
1062 	if (!fence || !*fence) {
1063 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1064 		return;
1065 	}
1066 
1067 	if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
1068 				    amdgpu_vm_tlb_seq_cb)) {
1069 		dma_fence_put(vm->last_tlb_flush);
1070 		vm->last_tlb_flush = dma_fence_get(*fence);
1071 	} else {
1072 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1073 	}
1074 
1075 	/* Prepare a TLB flush fence to be attached to PTs */
1076 	if (!params->unlocked) {
1077 		amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
1078 
1079 		/* Makes sure no PD/PT is freed before the flush */
1080 		dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
1081 				   DMA_RESV_USAGE_BOOKKEEP);
1082 	}
1083 }
1084 
1085 /**
1086  * amdgpu_vm_update_range - update a range in the vm page table
1087  *
1088  * @adev: amdgpu_device pointer to use for commands
1089  * @vm: the VM to update the range
1090  * @immediate: immediate submission in a page fault
1091  * @unlocked: unlocked invalidation during MM callback
1092  * @flush_tlb: trigger tlb invalidation after update completed
1093  * @allow_override: change MTYPE for local NUMA nodes
1094  * @sync: fences we need to sync to
1095  * @start: start of mapped range
1096  * @last: last mapped entry
1097  * @flags: flags for the entries
1098  * @offset: offset into nodes and pages_addr
1099  * @vram_base: base for vram mappings
1100  * @res: ttm_resource to map
1101  * @pages_addr: DMA addresses to use for mapping
1102  * @fence: optional resulting fence
1103  *
1104  * Fill in the page table entries between @start and @last.
1105  *
1106  * Returns:
1107  * 0 for success, negative erro code for failure.
1108  */
1109 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1110 			   bool immediate, bool unlocked, bool flush_tlb,
1111 			   bool allow_override, struct amdgpu_sync *sync,
1112 			   uint64_t start, uint64_t last, uint64_t flags,
1113 			   uint64_t offset, uint64_t vram_base,
1114 			   struct ttm_resource *res, dma_addr_t *pages_addr,
1115 			   struct dma_fence **fence)
1116 {
1117 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1118 	struct amdgpu_vm_update_params params;
1119 	struct amdgpu_res_cursor cursor;
1120 	int r, idx;
1121 
1122 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
1123 		return -ENODEV;
1124 
1125 	tlb_cb = kmalloc_obj(*tlb_cb);
1126 	if (!tlb_cb) {
1127 		drm_dev_exit(idx);
1128 		return -ENOMEM;
1129 	}
1130 
1131 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
1132 	 * heavy-weight flush TLB unconditionally.
1133 	 */
1134 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
1135 		     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
1136 
1137 	/*
1138 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
1139 	 */
1140 	flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
1141 
1142 	memset(&params, 0, sizeof(params));
1143 	params.adev = adev;
1144 	params.vm = vm;
1145 	params.immediate = immediate;
1146 	params.pages_addr = pages_addr;
1147 	params.unlocked = unlocked;
1148 	params.needs_flush = flush_tlb;
1149 	params.allow_override = allow_override;
1150 	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
1151 
1152 	amdgpu_vm_eviction_lock(vm);
1153 	if (vm->evicting) {
1154 		r = -EBUSY;
1155 		goto error_free;
1156 	}
1157 
1158 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1159 		struct dma_fence *tmp = dma_fence_get_stub();
1160 
1161 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1162 		swap(vm->last_unlocked, tmp);
1163 		dma_fence_put(tmp);
1164 	}
1165 
1166 	r = vm->update_funcs->prepare(&params, sync,
1167 				      AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
1168 	if (r)
1169 		goto error_free;
1170 
1171 	amdgpu_res_first(pages_addr ? NULL : res, offset,
1172 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1173 	while (cursor.remaining) {
1174 		uint64_t tmp, num_entries, addr;
1175 
1176 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1177 		if (pages_addr) {
1178 			bool contiguous = true;
1179 
1180 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1181 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
1182 				uint64_t count;
1183 
1184 				contiguous = pages_addr[pfn + 1] ==
1185 					pages_addr[pfn] + PAGE_SIZE;
1186 
1187 				tmp = num_entries /
1188 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1189 				for (count = 2; count < tmp; ++count) {
1190 					uint64_t idx = pfn + count;
1191 
1192 					if (contiguous != (pages_addr[idx] ==
1193 					    pages_addr[idx - 1] + PAGE_SIZE))
1194 						break;
1195 				}
1196 				if (!contiguous)
1197 					count--;
1198 				num_entries = count *
1199 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1200 			}
1201 
1202 			if (!contiguous) {
1203 				addr = cursor.start;
1204 				params.pages_addr = pages_addr;
1205 			} else {
1206 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1207 				params.pages_addr = NULL;
1208 			}
1209 
1210 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1211 			addr = vram_base + cursor.start;
1212 		} else {
1213 			addr = 0;
1214 		}
1215 
1216 		tmp = start + num_entries;
1217 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1218 		if (r)
1219 			goto error_free;
1220 
1221 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1222 		start = tmp;
1223 	}
1224 
1225 	r = vm->update_funcs->commit(&params, fence);
1226 	if (r)
1227 		goto error_free;
1228 
1229 	if (params.needs_flush) {
1230 		amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
1231 		tlb_cb = NULL;
1232 	}
1233 
1234 	amdgpu_vm_pt_free_list(adev, &params);
1235 
1236 error_free:
1237 	kfree(tlb_cb);
1238 	amdgpu_vm_eviction_unlock(vm);
1239 	drm_dev_exit(idx);
1240 	return r;
1241 }
1242 
1243 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1244 			  struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
1245 {
1246 	spin_lock(&vm->status_lock);
1247 	memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
1248 	spin_unlock(&vm->status_lock);
1249 }
1250 
1251 /**
1252  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1253  *
1254  * @adev: amdgpu_device pointer
1255  * @bo_va: requested BO and VM object
1256  * @clear: if true clear the entries
1257  *
1258  * Fill in the page table entries for @bo_va.
1259  *
1260  * Returns:
1261  * 0 for success, -EINVAL for failure.
1262  */
1263 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1264 			bool clear)
1265 {
1266 	struct amdgpu_bo *bo = bo_va->base.bo;
1267 	struct amdgpu_vm *vm = bo_va->base.vm;
1268 	struct amdgpu_bo_va_mapping *mapping;
1269 	struct dma_fence **last_update;
1270 	dma_addr_t *pages_addr = NULL;
1271 	struct ttm_resource *mem;
1272 	struct amdgpu_sync sync;
1273 	bool flush_tlb = clear;
1274 	uint64_t vram_base;
1275 	uint64_t flags;
1276 	bool uncached;
1277 	int r;
1278 
1279 	amdgpu_sync_create(&sync);
1280 	if (clear) {
1281 		mem = NULL;
1282 
1283 		/* Implicitly sync to command submissions in the same VM before
1284 		 * unmapping.
1285 		 */
1286 		r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1287 				     AMDGPU_SYNC_EQ_OWNER, vm);
1288 		if (r)
1289 			goto error_free;
1290 		if (bo) {
1291 			r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
1292 			if (r)
1293 				goto error_free;
1294 		}
1295 	} else if (!bo) {
1296 		mem = NULL;
1297 
1298 		/* PRT map operations don't need to sync to anything. */
1299 
1300 	} else {
1301 		struct drm_gem_object *obj = &bo->tbo.base;
1302 
1303 		if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
1304 			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1305 			struct drm_gem_object *gobj = dma_buf->priv;
1306 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1307 
1308 			if (abo->tbo.resource &&
1309 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1310 				bo = gem_to_amdgpu_bo(gobj);
1311 		}
1312 		mem = bo->tbo.resource;
1313 		if (mem && (mem->mem_type == TTM_PL_TT ||
1314 			    mem->mem_type == AMDGPU_PL_PREEMPT))
1315 			pages_addr = bo->tbo.ttm->dma_address;
1316 
1317 		/* Implicitly sync to moving fences before mapping anything */
1318 		r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1319 				     AMDGPU_SYNC_EXPLICIT, vm);
1320 		if (r)
1321 			goto error_free;
1322 	}
1323 
1324 	if (bo) {
1325 		struct amdgpu_device *bo_adev;
1326 
1327 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1328 
1329 		if (amdgpu_bo_encrypted(bo))
1330 			flags |= AMDGPU_PTE_TMZ;
1331 
1332 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1333 		vram_base = bo_adev->vm_manager.vram_base_offset;
1334 		uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1335 	} else {
1336 		flags = 0x0;
1337 		vram_base = 0;
1338 		uncached = false;
1339 	}
1340 
1341 	if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1342 		last_update = &vm->last_update;
1343 	else
1344 		last_update = &bo_va->last_pt_update;
1345 
1346 	if (!clear && bo_va->base.moved) {
1347 		flush_tlb = true;
1348 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1349 
1350 	} else if (bo_va->cleared != clear) {
1351 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1352 	}
1353 
1354 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1355 		uint64_t update_flags = flags;
1356 
1357 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1358 		 * but in case of something, we filter the flags in first place
1359 		 */
1360 		if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
1361 			update_flags &= ~AMDGPU_PTE_READABLE;
1362 		if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
1363 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1364 
1365 		/* Apply ASIC specific mapping flags */
1366 		amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
1367 				      &update_flags);
1368 
1369 		trace_amdgpu_vm_bo_update(mapping);
1370 
1371 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1372 					   !uncached, &sync, mapping->start,
1373 					   mapping->last, update_flags,
1374 					   mapping->offset, vram_base, mem,
1375 					   pages_addr, last_update);
1376 		if (r)
1377 			goto error_free;
1378 	}
1379 
1380 	/* If the BO is not in its preferred location add it back to
1381 	 * the evicted list so that it gets validated again on the
1382 	 * next command submission.
1383 	 */
1384 	if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1385 		if (bo->tbo.resource &&
1386 		    !(bo->preferred_domains &
1387 		      amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1388 			amdgpu_vm_bo_evicted(&bo_va->base);
1389 		else
1390 			amdgpu_vm_bo_idle(&bo_va->base);
1391 	} else {
1392 		amdgpu_vm_bo_done(&bo_va->base);
1393 	}
1394 
1395 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1396 	bo_va->cleared = clear;
1397 	bo_va->base.moved = false;
1398 
1399 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1400 		list_for_each_entry(mapping, &bo_va->valids, list)
1401 			trace_amdgpu_vm_bo_mapping(mapping);
1402 	}
1403 
1404 error_free:
1405 	amdgpu_sync_free(&sync);
1406 	return r;
1407 }
1408 
1409 /**
1410  * amdgpu_vm_update_prt_state - update the global PRT state
1411  *
1412  * @adev: amdgpu_device pointer
1413  */
1414 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1415 {
1416 	unsigned long flags;
1417 	bool enable;
1418 
1419 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1420 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1421 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1422 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1423 }
1424 
1425 /**
1426  * amdgpu_vm_prt_get - add a PRT user
1427  *
1428  * @adev: amdgpu_device pointer
1429  */
1430 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1431 {
1432 	if (!adev->gmc.gmc_funcs->set_prt)
1433 		return;
1434 
1435 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1436 		amdgpu_vm_update_prt_state(adev);
1437 }
1438 
1439 /**
1440  * amdgpu_vm_prt_put - drop a PRT user
1441  *
1442  * @adev: amdgpu_device pointer
1443  */
1444 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1445 {
1446 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1447 		amdgpu_vm_update_prt_state(adev);
1448 }
1449 
1450 /**
1451  * amdgpu_vm_prt_cb - callback for updating the PRT status
1452  *
1453  * @fence: fence for the callback
1454  * @_cb: the callback function
1455  */
1456 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1457 {
1458 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1459 
1460 	amdgpu_vm_prt_put(cb->adev);
1461 	kfree(cb);
1462 }
1463 
1464 /**
1465  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1466  *
1467  * @adev: amdgpu_device pointer
1468  * @fence: fence for the callback
1469  */
1470 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1471 				 struct dma_fence *fence)
1472 {
1473 	struct amdgpu_prt_cb *cb;
1474 
1475 	if (!adev->gmc.gmc_funcs->set_prt)
1476 		return;
1477 
1478 	cb = kmalloc_obj(struct amdgpu_prt_cb);
1479 	if (!cb) {
1480 		/* Last resort when we are OOM */
1481 		if (fence)
1482 			dma_fence_wait(fence, false);
1483 
1484 		amdgpu_vm_prt_put(adev);
1485 	} else {
1486 		cb->adev = adev;
1487 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1488 						     amdgpu_vm_prt_cb))
1489 			amdgpu_vm_prt_cb(fence, &cb->cb);
1490 	}
1491 }
1492 
1493 /**
1494  * amdgpu_vm_free_mapping - free a mapping
1495  *
1496  * @adev: amdgpu_device pointer
1497  * @vm: requested vm
1498  * @mapping: mapping to be freed
1499  * @fence: fence of the unmap operation
1500  *
1501  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1502  */
1503 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1504 				   struct amdgpu_vm *vm,
1505 				   struct amdgpu_bo_va_mapping *mapping,
1506 				   struct dma_fence *fence)
1507 {
1508 	if (mapping->flags & AMDGPU_VM_PAGE_PRT)
1509 		amdgpu_vm_add_prt_cb(adev, fence);
1510 	kfree(mapping);
1511 }
1512 
1513 /**
1514  * amdgpu_vm_prt_fini - finish all prt mappings
1515  *
1516  * @adev: amdgpu_device pointer
1517  * @vm: requested vm
1518  *
1519  * Register a cleanup callback to disable PRT support after VM dies.
1520  */
1521 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1522 {
1523 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1524 	struct dma_resv_iter cursor;
1525 	struct dma_fence *fence;
1526 
1527 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1528 		/* Add a callback for each fence in the reservation object */
1529 		amdgpu_vm_prt_get(adev);
1530 		amdgpu_vm_add_prt_cb(adev, fence);
1531 	}
1532 }
1533 
1534 /**
1535  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1536  *
1537  * @adev: amdgpu_device pointer
1538  * @vm: requested vm
1539  * @fence: optional resulting fence (unchanged if no work needed to be done
1540  * or if an error occurred)
1541  *
1542  * Make sure all freed BOs are cleared in the PT.
1543  * PTs have to be reserved and mutex must be locked!
1544  *
1545  * Returns:
1546  * 0 for success.
1547  *
1548  */
1549 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1550 			  struct amdgpu_vm *vm,
1551 			  struct dma_fence **fence)
1552 {
1553 	struct amdgpu_bo_va_mapping *mapping;
1554 	struct dma_fence *f = NULL;
1555 	struct amdgpu_sync sync;
1556 	int r;
1557 
1558 
1559 	/*
1560 	 * Implicitly sync to command submissions in the same VM before
1561 	 * unmapping.
1562 	 */
1563 	amdgpu_sync_create(&sync);
1564 	r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1565 			     AMDGPU_SYNC_EQ_OWNER, vm);
1566 	if (r)
1567 		goto error_free;
1568 
1569 	while (!list_empty(&vm->freed)) {
1570 		mapping = list_first_entry(&vm->freed,
1571 			struct amdgpu_bo_va_mapping, list);
1572 		list_del(&mapping->list);
1573 
1574 		r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1575 					   &sync, mapping->start, mapping->last,
1576 					   0, 0, 0, NULL, NULL, &f);
1577 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1578 		if (r) {
1579 			dma_fence_put(f);
1580 			goto error_free;
1581 		}
1582 	}
1583 
1584 	if (fence && f) {
1585 		dma_fence_put(*fence);
1586 		*fence = f;
1587 	} else {
1588 		dma_fence_put(f);
1589 	}
1590 
1591 error_free:
1592 	amdgpu_sync_free(&sync);
1593 	return r;
1594 
1595 }
1596 
1597 /**
1598  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1599  *
1600  * @adev: amdgpu_device pointer
1601  * @vm: requested vm
1602  * @ticket: optional reservation ticket used to reserve the VM
1603  *
1604  * Make sure all BOs which are moved are updated in the PTs.
1605  *
1606  * Returns:
1607  * 0 for success.
1608  *
1609  * PTs have to be reserved!
1610  */
1611 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1612 			   struct amdgpu_vm *vm,
1613 			   struct ww_acquire_ctx *ticket)
1614 {
1615 	struct amdgpu_bo_va *bo_va;
1616 	struct dma_resv *resv;
1617 	bool clear, unlock;
1618 	int r;
1619 
1620 	spin_lock(&vm->status_lock);
1621 	while (!list_empty(&vm->moved)) {
1622 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1623 					 base.vm_status);
1624 		spin_unlock(&vm->status_lock);
1625 
1626 		/* Per VM BOs never need to bo cleared in the page tables */
1627 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1628 		if (r)
1629 			return r;
1630 		spin_lock(&vm->status_lock);
1631 	}
1632 
1633 	while (!list_empty(&vm->invalidated)) {
1634 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1635 					 base.vm_status);
1636 		resv = bo_va->base.bo->tbo.base.resv;
1637 		spin_unlock(&vm->status_lock);
1638 
1639 		/* Try to reserve the BO to avoid clearing its ptes */
1640 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
1641 			clear = false;
1642 			unlock = true;
1643 		/* The caller is already holding the reservation lock */
1644 		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1645 			clear = false;
1646 			unlock = false;
1647 		/* Somebody else is using the BO right now */
1648 		} else {
1649 			clear = true;
1650 			unlock = false;
1651 		}
1652 
1653 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1654 
1655 		if (unlock)
1656 			dma_resv_unlock(resv);
1657 		if (r)
1658 			return r;
1659 
1660 		/* Remember evicted DMABuf imports in compute VMs for later
1661 		 * validation
1662 		 */
1663 		if (vm->is_compute_context &&
1664 		    drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
1665 		    (!bo_va->base.bo->tbo.resource ||
1666 		     bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1667 			amdgpu_vm_bo_evicted_user(&bo_va->base);
1668 
1669 		spin_lock(&vm->status_lock);
1670 	}
1671 	spin_unlock(&vm->status_lock);
1672 
1673 	return 0;
1674 }
1675 
1676 /**
1677  * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1678  *
1679  * @adev: amdgpu_device pointer
1680  * @vm: requested vm
1681  * @flush_type: flush type
1682  * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1683  *
1684  * Flush TLB if needed for a compute VM.
1685  *
1686  * Returns:
1687  * 0 for success.
1688  */
1689 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1690 				struct amdgpu_vm *vm,
1691 				uint32_t flush_type,
1692 				uint32_t xcc_mask)
1693 {
1694 	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1695 	bool all_hub = false;
1696 	int xcc = 0, r = 0;
1697 
1698 	WARN_ON_ONCE(!vm->is_compute_context);
1699 
1700 	/*
1701 	 * It can be that we race and lose here, but that is extremely unlikely
1702 	 * and the worst thing which could happen is that we flush the changes
1703 	 * into the TLB once more which is harmless.
1704 	 */
1705 	if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1706 		return 0;
1707 
1708 	if (adev->family == AMDGPU_FAMILY_AI ||
1709 	    adev->family == AMDGPU_FAMILY_RV)
1710 		all_hub = true;
1711 
1712 	for_each_inst(xcc, xcc_mask) {
1713 		r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1714 						   all_hub, xcc);
1715 		if (r)
1716 			break;
1717 	}
1718 	return r;
1719 }
1720 
1721 /**
1722  * amdgpu_vm_bo_add - add a bo to a specific vm
1723  *
1724  * @adev: amdgpu_device pointer
1725  * @vm: requested vm
1726  * @bo: amdgpu buffer object
1727  *
1728  * Add @bo into the requested vm.
1729  * Add @bo to the list of bos associated with the vm
1730  *
1731  * Returns:
1732  * Newly added bo_va or NULL for failure
1733  *
1734  * Object has to be reserved!
1735  */
1736 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1737 				      struct amdgpu_vm *vm,
1738 				      struct amdgpu_bo *bo)
1739 {
1740 	struct amdgpu_bo_va *bo_va;
1741 
1742 	amdgpu_vm_assert_locked(vm);
1743 
1744 	bo_va = kzalloc_obj(struct amdgpu_bo_va);
1745 	if (bo_va == NULL) {
1746 		return NULL;
1747 	}
1748 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1749 
1750 	bo_va->ref_count = 1;
1751 	bo_va->last_pt_update = dma_fence_get_stub();
1752 	INIT_LIST_HEAD(&bo_va->valids);
1753 	INIT_LIST_HEAD(&bo_va->invalids);
1754 
1755 	if (!bo)
1756 		return bo_va;
1757 
1758 	dma_resv_assert_held(bo->tbo.base.resv);
1759 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1760 		bo_va->is_xgmi = true;
1761 		/* Power up XGMI if it can be potentially used */
1762 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1763 	}
1764 
1765 	return bo_va;
1766 }
1767 
1768 
1769 /**
1770  * amdgpu_vm_bo_insert_map - insert a new mapping
1771  *
1772  * @adev: amdgpu_device pointer
1773  * @bo_va: bo_va to store the address
1774  * @mapping: the mapping to insert
1775  *
1776  * Insert a new mapping into all structures.
1777  */
1778 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1779 				    struct amdgpu_bo_va *bo_va,
1780 				    struct amdgpu_bo_va_mapping *mapping)
1781 {
1782 	struct amdgpu_vm *vm = bo_va->base.vm;
1783 	struct amdgpu_bo *bo = bo_va->base.bo;
1784 
1785 	mapping->bo_va = bo_va;
1786 	list_add(&mapping->list, &bo_va->invalids);
1787 	amdgpu_vm_it_insert(mapping, &vm->va);
1788 
1789 	if (mapping->flags & AMDGPU_VM_PAGE_PRT)
1790 		amdgpu_vm_prt_get(adev);
1791 
1792 	if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1793 		amdgpu_vm_bo_moved(&bo_va->base);
1794 
1795 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1796 }
1797 
1798 /* Validate operation parameters to prevent potential abuse */
1799 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1800 					  struct amdgpu_bo *bo,
1801 					  uint64_t saddr,
1802 					  uint64_t offset,
1803 					  uint64_t size)
1804 {
1805 	uint64_t tmp, lpfn;
1806 
1807 	if (saddr & AMDGPU_GPU_PAGE_MASK
1808 	    || offset & AMDGPU_GPU_PAGE_MASK
1809 	    || size & AMDGPU_GPU_PAGE_MASK)
1810 		return -EINVAL;
1811 
1812 	if (check_add_overflow(saddr, size, &tmp)
1813 	    || check_add_overflow(offset, size, &tmp)
1814 	    || size == 0 /* which also leads to end < begin */)
1815 		return -EINVAL;
1816 
1817 	/* make sure object fit at this offset */
1818 	if (bo && offset + size > amdgpu_bo_size(bo))
1819 		return -EINVAL;
1820 
1821 	/* Ensure last pfn not exceed max_pfn */
1822 	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1823 	if (lpfn >= adev->vm_manager.max_pfn)
1824 		return -EINVAL;
1825 
1826 	return 0;
1827 }
1828 
1829 /**
1830  * amdgpu_vm_bo_map - map bo inside a vm
1831  *
1832  * @adev: amdgpu_device pointer
1833  * @bo_va: bo_va to store the address
1834  * @saddr: where to map the BO
1835  * @offset: requested offset in the BO
1836  * @size: BO size in bytes
1837  * @flags: attributes of pages (read/write/valid/etc.)
1838  *
1839  * Add a mapping of the BO at the specefied addr into the VM.
1840  *
1841  * Returns:
1842  * 0 for success, error for failure.
1843  *
1844  * Object has to be reserved and unreserved outside!
1845  */
1846 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1847 		     struct amdgpu_bo_va *bo_va,
1848 		     uint64_t saddr, uint64_t offset,
1849 		     uint64_t size, uint32_t flags)
1850 {
1851 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1852 	struct amdgpu_bo *bo = bo_va->base.bo;
1853 	struct amdgpu_vm *vm = bo_va->base.vm;
1854 	uint64_t eaddr;
1855 	int r;
1856 
1857 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1858 	if (r)
1859 		return r;
1860 
1861 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1862 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1863 
1864 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1865 	if (tmp) {
1866 		/* bo and tmp overlap, invalid addr */
1867 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1868 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1869 			tmp->start, tmp->last + 1);
1870 		return -EINVAL;
1871 	}
1872 
1873 	mapping = kmalloc_obj(*mapping);
1874 	if (!mapping)
1875 		return -ENOMEM;
1876 
1877 	mapping->start = saddr;
1878 	mapping->last = eaddr;
1879 	mapping->offset = offset;
1880 	mapping->flags = flags;
1881 
1882 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1883 
1884 	return 0;
1885 }
1886 
1887 /**
1888  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1889  *
1890  * @adev: amdgpu_device pointer
1891  * @bo_va: bo_va to store the address
1892  * @saddr: where to map the BO
1893  * @offset: requested offset in the BO
1894  * @size: BO size in bytes
1895  * @flags: attributes of pages (read/write/valid/etc.)
1896  *
1897  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1898  * mappings as we do so.
1899  *
1900  * Returns:
1901  * 0 for success, error for failure.
1902  *
1903  * Object has to be reserved and unreserved outside!
1904  */
1905 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1906 			     struct amdgpu_bo_va *bo_va,
1907 			     uint64_t saddr, uint64_t offset,
1908 			     uint64_t size, uint32_t flags)
1909 {
1910 	struct amdgpu_bo_va_mapping *mapping;
1911 	struct amdgpu_bo *bo = bo_va->base.bo;
1912 	uint64_t eaddr;
1913 	int r;
1914 
1915 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1916 	if (r)
1917 		return r;
1918 
1919 	/* Allocate all the needed memory */
1920 	mapping = kmalloc_obj(*mapping);
1921 	if (!mapping)
1922 		return -ENOMEM;
1923 
1924 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1925 	if (r) {
1926 		kfree(mapping);
1927 		return r;
1928 	}
1929 
1930 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1931 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1932 
1933 	mapping->start = saddr;
1934 	mapping->last = eaddr;
1935 	mapping->offset = offset;
1936 	mapping->flags = flags;
1937 
1938 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1939 
1940 	return 0;
1941 }
1942 
1943 /**
1944  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1945  *
1946  * @adev: amdgpu_device pointer
1947  * @bo_va: bo_va to remove the address from
1948  * @saddr: where to the BO is mapped
1949  *
1950  * Remove a mapping of the BO at the specefied addr from the VM.
1951  *
1952  * Returns:
1953  * 0 for success, error for failure.
1954  *
1955  * Object has to be reserved and unreserved outside!
1956  */
1957 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1958 		       struct amdgpu_bo_va *bo_va,
1959 		       uint64_t saddr)
1960 {
1961 	struct amdgpu_bo_va_mapping *mapping;
1962 	struct amdgpu_vm *vm = bo_va->base.vm;
1963 	bool valid = true;
1964 	int r;
1965 
1966 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1967 
1968 	list_for_each_entry(mapping, &bo_va->valids, list) {
1969 		if (mapping->start == saddr)
1970 			break;
1971 	}
1972 
1973 	if (&mapping->list == &bo_va->valids) {
1974 		valid = false;
1975 
1976 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1977 			if (mapping->start == saddr)
1978 				break;
1979 		}
1980 
1981 		if (&mapping->list == &bo_va->invalids)
1982 			return -ENOENT;
1983 	}
1984 
1985 	/* It's unlikely to happen that the mapping userq hasn't been idled
1986 	 * during user requests GEM unmap IOCTL except for forcing the unmap
1987 	 * from user space.
1988 	 */
1989 	if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) {
1990 		r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
1991 		if (unlikely(r == -EBUSY))
1992 			dev_warn_once(adev->dev,
1993 				      "Attempt to unmap an active userq buffer\n");
1994 	}
1995 
1996 	list_del(&mapping->list);
1997 	amdgpu_vm_it_remove(mapping, &vm->va);
1998 	mapping->bo_va = NULL;
1999 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2000 
2001 	if (valid)
2002 		list_add(&mapping->list, &vm->freed);
2003 	else
2004 		amdgpu_vm_free_mapping(adev, vm, mapping,
2005 				       bo_va->last_pt_update);
2006 
2007 	return 0;
2008 }
2009 
2010 /**
2011  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2012  *
2013  * @adev: amdgpu_device pointer
2014  * @vm: VM structure to use
2015  * @saddr: start of the range
2016  * @size: size of the range
2017  *
2018  * Remove all mappings in a range, split them as appropriate.
2019  *
2020  * Returns:
2021  * 0 for success, error for failure.
2022  */
2023 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2024 				struct amdgpu_vm *vm,
2025 				uint64_t saddr, uint64_t size)
2026 {
2027 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2028 	LIST_HEAD(removed);
2029 	uint64_t eaddr;
2030 	int r;
2031 
2032 	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
2033 	if (r)
2034 		return r;
2035 
2036 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2037 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
2038 
2039 	/* Allocate all the needed memory */
2040 	before = kzalloc_obj(*before);
2041 	if (!before)
2042 		return -ENOMEM;
2043 	INIT_LIST_HEAD(&before->list);
2044 
2045 	after = kzalloc_obj(*after);
2046 	if (!after) {
2047 		kfree(before);
2048 		return -ENOMEM;
2049 	}
2050 	INIT_LIST_HEAD(&after->list);
2051 
2052 	/* Now gather all removed mappings */
2053 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2054 	while (tmp) {
2055 		/* Remember mapping split at the start */
2056 		if (tmp->start < saddr) {
2057 			before->start = tmp->start;
2058 			before->last = saddr - 1;
2059 			before->offset = tmp->offset;
2060 			before->flags = tmp->flags;
2061 			before->bo_va = tmp->bo_va;
2062 			list_add(&before->list, &tmp->bo_va->invalids);
2063 		}
2064 
2065 		/* Remember mapping split at the end */
2066 		if (tmp->last > eaddr) {
2067 			after->start = eaddr + 1;
2068 			after->last = tmp->last;
2069 			after->offset = tmp->offset;
2070 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2071 			after->flags = tmp->flags;
2072 			after->bo_va = tmp->bo_va;
2073 			list_add(&after->list, &tmp->bo_va->invalids);
2074 		}
2075 
2076 		list_del(&tmp->list);
2077 		list_add(&tmp->list, &removed);
2078 
2079 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2080 	}
2081 
2082 	/* And free them up */
2083 	list_for_each_entry_safe(tmp, next, &removed, list) {
2084 		amdgpu_vm_it_remove(tmp, &vm->va);
2085 		list_del(&tmp->list);
2086 
2087 		if (tmp->start < saddr)
2088 		    tmp->start = saddr;
2089 		if (tmp->last > eaddr)
2090 		    tmp->last = eaddr;
2091 
2092 		tmp->bo_va = NULL;
2093 		list_add(&tmp->list, &vm->freed);
2094 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2095 	}
2096 
2097 	/* Insert partial mapping before the range */
2098 	if (!list_empty(&before->list)) {
2099 		struct amdgpu_bo *bo = before->bo_va->base.bo;
2100 
2101 		amdgpu_vm_it_insert(before, &vm->va);
2102 		if (before->flags & AMDGPU_VM_PAGE_PRT)
2103 			amdgpu_vm_prt_get(adev);
2104 
2105 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2106 		    !before->bo_va->base.moved)
2107 			amdgpu_vm_bo_moved(&before->bo_va->base);
2108 	} else {
2109 		kfree(before);
2110 	}
2111 
2112 	/* Insert partial mapping after the range */
2113 	if (!list_empty(&after->list)) {
2114 		struct amdgpu_bo *bo = after->bo_va->base.bo;
2115 
2116 		amdgpu_vm_it_insert(after, &vm->va);
2117 		if (after->flags & AMDGPU_VM_PAGE_PRT)
2118 			amdgpu_vm_prt_get(adev);
2119 
2120 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2121 		    !after->bo_va->base.moved)
2122 			amdgpu_vm_bo_moved(&after->bo_va->base);
2123 	} else {
2124 		kfree(after);
2125 	}
2126 
2127 	return 0;
2128 }
2129 
2130 /**
2131  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2132  *
2133  * @vm: the requested VM
2134  * @addr: the address
2135  *
2136  * Find a mapping by it's address.
2137  *
2138  * Returns:
2139  * The amdgpu_bo_va_mapping matching for addr or NULL
2140  *
2141  */
2142 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2143 							 uint64_t addr)
2144 {
2145 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2146 }
2147 
2148 /**
2149  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2150  *
2151  * @vm: the requested vm
2152  * @ticket: CS ticket
2153  *
2154  * Trace all mappings of BOs reserved during a command submission.
2155  */
2156 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2157 {
2158 	struct amdgpu_bo_va_mapping *mapping;
2159 
2160 	if (!trace_amdgpu_vm_bo_cs_enabled())
2161 		return;
2162 
2163 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2164 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2165 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2166 			struct amdgpu_bo *bo;
2167 
2168 			bo = mapping->bo_va->base.bo;
2169 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2170 			    ticket)
2171 				continue;
2172 		}
2173 
2174 		trace_amdgpu_vm_bo_cs(mapping);
2175 	}
2176 }
2177 
2178 /**
2179  * amdgpu_vm_bo_del - remove a bo from a specific vm
2180  *
2181  * @adev: amdgpu_device pointer
2182  * @bo_va: requested bo_va
2183  *
2184  * Remove @bo_va->bo from the requested vm.
2185  *
2186  * Object have to be reserved!
2187  */
2188 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2189 		      struct amdgpu_bo_va *bo_va)
2190 {
2191 	struct amdgpu_bo_va_mapping *mapping, *next;
2192 	struct amdgpu_bo *bo = bo_va->base.bo;
2193 	struct amdgpu_vm *vm = bo_va->base.vm;
2194 	struct amdgpu_vm_bo_base **base;
2195 
2196 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2197 
2198 	if (bo) {
2199 		dma_resv_assert_held(bo->tbo.base.resv);
2200 		if (amdgpu_vm_is_bo_always_valid(vm, bo))
2201 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
2202 
2203 		for (base = &bo_va->base.bo->vm_bo; *base;
2204 		     base = &(*base)->next) {
2205 			if (*base != &bo_va->base)
2206 				continue;
2207 
2208 			amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
2209 			*base = bo_va->base.next;
2210 			break;
2211 		}
2212 	}
2213 
2214 	spin_lock(&vm->status_lock);
2215 	list_del(&bo_va->base.vm_status);
2216 	spin_unlock(&vm->status_lock);
2217 
2218 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2219 		list_del(&mapping->list);
2220 		amdgpu_vm_it_remove(mapping, &vm->va);
2221 		mapping->bo_va = NULL;
2222 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2223 		list_add(&mapping->list, &vm->freed);
2224 	}
2225 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2226 		list_del(&mapping->list);
2227 		amdgpu_vm_it_remove(mapping, &vm->va);
2228 		amdgpu_vm_free_mapping(adev, vm, mapping,
2229 				       bo_va->last_pt_update);
2230 	}
2231 
2232 	dma_fence_put(bo_va->last_pt_update);
2233 
2234 	if (bo && bo_va->is_xgmi)
2235 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2236 
2237 	kfree(bo_va);
2238 }
2239 
2240 /**
2241  * amdgpu_vm_evictable - check if we can evict a VM
2242  *
2243  * @bo: A page table of the VM.
2244  *
2245  * Check if it is possible to evict a VM.
2246  */
2247 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2248 {
2249 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2250 
2251 	/* Page tables of a destroyed VM can go away immediately */
2252 	if (!bo_base || !bo_base->vm)
2253 		return true;
2254 
2255 	/* Don't evict VM page tables while they are busy */
2256 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2257 		return false;
2258 
2259 	/* Try to block ongoing updates */
2260 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2261 		return false;
2262 
2263 	/* Don't evict VM page tables while they are updated */
2264 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2265 		amdgpu_vm_eviction_unlock(bo_base->vm);
2266 		return false;
2267 	}
2268 
2269 	bo_base->vm->evicting = true;
2270 	amdgpu_vm_eviction_unlock(bo_base->vm);
2271 	return true;
2272 }
2273 
2274 /**
2275  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2276  *
2277  * @bo: amdgpu buffer object
2278  * @evicted: is the BO evicted
2279  *
2280  * Mark @bo as invalid.
2281  */
2282 void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
2283 {
2284 	struct amdgpu_vm_bo_base *bo_base;
2285 
2286 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2287 		struct amdgpu_vm *vm = bo_base->vm;
2288 
2289 		if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2290 			amdgpu_vm_bo_evicted(bo_base);
2291 			continue;
2292 		}
2293 
2294 		if (bo_base->moved)
2295 			continue;
2296 		bo_base->moved = true;
2297 
2298 		if (bo->tbo.type == ttm_bo_type_kernel)
2299 			amdgpu_vm_bo_relocated(bo_base);
2300 		else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2301 			amdgpu_vm_bo_moved(bo_base);
2302 		else
2303 			amdgpu_vm_bo_invalidated(bo_base);
2304 	}
2305 }
2306 
2307 /**
2308  * amdgpu_vm_bo_move - handle BO move
2309  *
2310  * @bo: amdgpu buffer object
2311  * @new_mem: the new placement of the BO move
2312  * @evicted: is the BO evicted
2313  *
2314  * Update the memory stats for the new placement and mark @bo as invalid.
2315  */
2316 void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
2317 		       bool evicted)
2318 {
2319 	struct amdgpu_vm_bo_base *bo_base;
2320 
2321 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2322 		struct amdgpu_vm *vm = bo_base->vm;
2323 
2324 		spin_lock(&vm->status_lock);
2325 		amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
2326 		amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
2327 		spin_unlock(&vm->status_lock);
2328 	}
2329 
2330 	amdgpu_vm_bo_invalidate(bo, evicted);
2331 }
2332 
2333 /**
2334  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2335  *
2336  * @vm_size: VM size
2337  *
2338  * Returns:
2339  * VM page table as power of two
2340  */
2341 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2342 {
2343 	/* Total bits covered by PD + PTs */
2344 	unsigned bits = ilog2(vm_size) + 18;
2345 
2346 	/* Make sure the PD is 4K in size up to 8GB address space.
2347 	   Above that split equal between PD and PTs */
2348 	if (vm_size <= 8)
2349 		return (bits - 9);
2350 	else
2351 		return ((bits + 3) / 2);
2352 }
2353 
2354 /**
2355  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2356  *
2357  * @adev: amdgpu_device pointer
2358  * @min_vm_size: the minimum vm size in GB if it's set auto
2359  * @fragment_size_default: Default PTE fragment size
2360  * @max_level: max VMPT level
2361  * @max_bits: max address space size in bits
2362  *
2363  */
2364 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2365 			   uint32_t fragment_size_default, unsigned max_level,
2366 			   unsigned max_bits)
2367 {
2368 	unsigned int max_size = 1 << (max_bits - 30);
2369 	unsigned int vm_size;
2370 	uint64_t tmp;
2371 
2372 	/* adjust vm size first */
2373 	if (amdgpu_vm_size != -1) {
2374 		vm_size = amdgpu_vm_size;
2375 		if (vm_size > max_size) {
2376 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2377 				 amdgpu_vm_size, max_size);
2378 			vm_size = max_size;
2379 		}
2380 	} else {
2381 		struct sysinfo si;
2382 		unsigned int phys_ram_gb;
2383 
2384 		/* Optimal VM size depends on the amount of physical
2385 		 * RAM available. Underlying requirements and
2386 		 * assumptions:
2387 		 *
2388 		 *  - Need to map system memory and VRAM from all GPUs
2389 		 *     - VRAM from other GPUs not known here
2390 		 *     - Assume VRAM <= system memory
2391 		 *  - On GFX8 and older, VM space can be segmented for
2392 		 *    different MTYPEs
2393 		 *  - Need to allow room for fragmentation, guard pages etc.
2394 		 *
2395 		 * This adds up to a rough guess of system memory x3.
2396 		 * Round up to power of two to maximize the available
2397 		 * VM size with the given page table size.
2398 		 */
2399 		si_meminfo(&si);
2400 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2401 			       (1 << 30) - 1) >> 30;
2402 		vm_size = roundup_pow_of_two(
2403 			clamp(phys_ram_gb * 3, min_vm_size, max_size));
2404 	}
2405 
2406 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2407 	adev->vm_manager.max_level = max_level;
2408 
2409 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2410 	if (amdgpu_vm_block_size != -1)
2411 		tmp >>= amdgpu_vm_block_size - 9;
2412 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2413 	adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2414 	switch (adev->vm_manager.num_level) {
2415 	case 4:
2416 		adev->vm_manager.root_level = AMDGPU_VM_PDB3;
2417 		break;
2418 	case 3:
2419 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2420 		break;
2421 	case 2:
2422 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2423 		break;
2424 	case 1:
2425 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2426 		break;
2427 	default:
2428 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2429 	}
2430 	/* block size depends on vm size and hw setup*/
2431 	if (amdgpu_vm_block_size != -1)
2432 		adev->vm_manager.block_size =
2433 			min((unsigned)amdgpu_vm_block_size, max_bits
2434 			    - AMDGPU_GPU_PAGE_SHIFT
2435 			    - 9 * adev->vm_manager.num_level);
2436 	else if (adev->vm_manager.num_level > 1)
2437 		adev->vm_manager.block_size = 9;
2438 	else
2439 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2440 
2441 	if (amdgpu_vm_fragment_size == -1)
2442 		adev->vm_manager.fragment_size = fragment_size_default;
2443 	else
2444 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2445 
2446 	dev_info(
2447 		adev->dev,
2448 		"vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2449 		vm_size, adev->vm_manager.num_level + 1,
2450 		adev->vm_manager.block_size, adev->vm_manager.fragment_size);
2451 }
2452 
2453 /**
2454  * amdgpu_vm_wait_idle - wait for the VM to become idle
2455  *
2456  * @vm: VM object to wait for
2457  * @timeout: timeout to wait for VM to become idle
2458  */
2459 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2460 {
2461 	timeout = drm_sched_entity_flush(&vm->immediate, timeout);
2462 	if (timeout <= 0)
2463 		return timeout;
2464 
2465 	return drm_sched_entity_flush(&vm->delayed, timeout);
2466 }
2467 
2468 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2469 {
2470 	struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2471 
2472 	kfree(ti);
2473 }
2474 
2475 static inline struct amdgpu_vm *
2476 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2477 {
2478 	struct amdgpu_vm *vm;
2479 	unsigned long flags;
2480 
2481 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2482 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2483 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2484 
2485 	return vm;
2486 }
2487 
2488 /**
2489  * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2490  *
2491  * @task_info: task_info struct under discussion.
2492  *
2493  * frees the vm task_info ptr at the last put
2494  */
2495 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2496 {
2497 	if (task_info)
2498 		kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2499 }
2500 
2501 /**
2502  * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2503  *
2504  * @vm: VM to get info from
2505  *
2506  * Returns the reference counted task_info structure, which must be
2507  * referenced down with amdgpu_vm_put_task_info.
2508  */
2509 struct amdgpu_task_info *
2510 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2511 {
2512 	struct amdgpu_task_info *ti = NULL;
2513 
2514 	if (vm) {
2515 		ti = vm->task_info;
2516 		kref_get(&vm->task_info->refcount);
2517 	}
2518 
2519 	return ti;
2520 }
2521 
2522 /**
2523  * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2524  *
2525  * @adev: drm device pointer
2526  * @pasid: PASID identifier for VM
2527  *
2528  * Returns the reference counted task_info structure, which must be
2529  * referenced down with amdgpu_vm_put_task_info.
2530  */
2531 struct amdgpu_task_info *
2532 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2533 {
2534 	return amdgpu_vm_get_task_info_vm(
2535 			amdgpu_vm_get_vm_from_pasid(adev, pasid));
2536 }
2537 
2538 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2539 {
2540 	vm->task_info = kzalloc_obj(struct amdgpu_task_info);
2541 	if (!vm->task_info)
2542 		return -ENOMEM;
2543 
2544 	kref_init(&vm->task_info->refcount);
2545 	return 0;
2546 }
2547 
2548 /**
2549  * amdgpu_vm_set_task_info - Sets VMs task info.
2550  *
2551  * @vm: vm for which to set the info
2552  */
2553 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2554 {
2555 	if (!vm->task_info)
2556 		return;
2557 
2558 	if (vm->task_info->task.pid == current->pid)
2559 		return;
2560 
2561 	vm->task_info->task.pid = current->pid;
2562 	get_task_comm(vm->task_info->task.comm, current);
2563 
2564 	vm->task_info->tgid = current->tgid;
2565 	get_task_comm(vm->task_info->process_name, current->group_leader);
2566 }
2567 
2568 /**
2569  * amdgpu_vm_init - initialize a vm instance
2570  *
2571  * @adev: amdgpu_device pointer
2572  * @vm: requested vm
2573  * @xcp_id: GPU partition selection id
2574  * @pasid: the pasid the VM is using on this GPU
2575  *
2576  * Init @vm fields.
2577  *
2578  * Returns:
2579  * 0 for success, error for failure.
2580  */
2581 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2582 		   int32_t xcp_id, uint32_t pasid)
2583 {
2584 	struct amdgpu_bo *root_bo;
2585 	struct amdgpu_bo_vm *root;
2586 	int r, i;
2587 
2588 	vm->va = RB_ROOT_CACHED;
2589 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2590 		vm->reserved_vmid[i] = NULL;
2591 	INIT_LIST_HEAD(&vm->evicted);
2592 	INIT_LIST_HEAD(&vm->evicted_user);
2593 	INIT_LIST_HEAD(&vm->relocated);
2594 	INIT_LIST_HEAD(&vm->moved);
2595 	INIT_LIST_HEAD(&vm->idle);
2596 	INIT_LIST_HEAD(&vm->invalidated);
2597 	spin_lock_init(&vm->status_lock);
2598 	INIT_LIST_HEAD(&vm->freed);
2599 	INIT_LIST_HEAD(&vm->done);
2600 	INIT_KFIFO(vm->faults);
2601 
2602 	r = amdgpu_vm_init_entities(adev, vm);
2603 	if (r)
2604 		return r;
2605 
2606 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2607 
2608 	vm->is_compute_context = false;
2609 
2610 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2611 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2612 
2613 	dev_dbg(adev->dev, "VM update mode is %s\n",
2614 		vm->use_cpu_for_update ? "CPU" : "SDMA");
2615 	WARN_ONCE((vm->use_cpu_for_update &&
2616 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2617 		  "CPU update of VM recommended only for large BAR system\n");
2618 
2619 	if (vm->use_cpu_for_update)
2620 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2621 	else
2622 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2623 
2624 	vm->last_update = dma_fence_get_stub();
2625 	vm->last_unlocked = dma_fence_get_stub();
2626 	vm->last_tlb_flush = dma_fence_get_stub();
2627 	vm->generation = amdgpu_vm_generation(adev, NULL);
2628 
2629 	mutex_init(&vm->eviction_lock);
2630 	vm->evicting = false;
2631 	vm->tlb_fence_context = dma_fence_context_alloc(1);
2632 
2633 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2634 				false, &root, xcp_id);
2635 	if (r)
2636 		goto error_free_delayed;
2637 
2638 	root_bo = amdgpu_bo_ref(&root->bo);
2639 	r = amdgpu_bo_reserve(root_bo, true);
2640 	if (r) {
2641 		amdgpu_bo_unref(&root_bo);
2642 		goto error_free_delayed;
2643 	}
2644 
2645 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2646 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2647 	if (r)
2648 		goto error_free_root;
2649 
2650 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2651 	if (r)
2652 		goto error_free_root;
2653 
2654 	r = amdgpu_vm_create_task_info(vm);
2655 	if (r)
2656 		dev_dbg(adev->dev, "Failed to create task info for VM\n");
2657 
2658 	/* Store new PASID in XArray (if non-zero) */
2659 	if (pasid != 0) {
2660 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL));
2661 		if (r < 0)
2662 			goto error_free_root;
2663 
2664 		vm->pasid = pasid;
2665 	}
2666 
2667 	amdgpu_bo_unreserve(vm->root.bo);
2668 	amdgpu_bo_unref(&root_bo);
2669 
2670 	return 0;
2671 
2672 error_free_root:
2673 	/* If PASID was partially set, erase it from XArray before failing */
2674 	if (vm->pasid != 0) {
2675 		xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
2676 		vm->pasid = 0;
2677 	}
2678 	amdgpu_vm_pt_free_root(adev, vm);
2679 	amdgpu_bo_unreserve(vm->root.bo);
2680 	amdgpu_bo_unref(&root_bo);
2681 
2682 error_free_delayed:
2683 	dma_fence_put(vm->last_tlb_flush);
2684 	dma_fence_put(vm->last_unlocked);
2685 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2686 	amdgpu_vm_fini_entities(vm);
2687 
2688 	return r;
2689 }
2690 
2691 /**
2692  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2693  *
2694  * @adev: amdgpu_device pointer
2695  * @vm: requested vm
2696  *
2697  * This only works on GFX VMs that don't have any BOs added and no
2698  * page tables allocated yet.
2699  *
2700  * Changes the following VM parameters:
2701  * - use_cpu_for_update
2702  * - pte_supports_ats
2703  *
2704  * Reinitializes the page directory to reflect the changed ATS
2705  * setting.
2706  *
2707  * Returns:
2708  * 0 for success, -errno for errors.
2709  */
2710 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2711 {
2712 	int r;
2713 
2714 	r = amdgpu_bo_reserve(vm->root.bo, true);
2715 	if (r)
2716 		return r;
2717 
2718 	/* Update VM state */
2719 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2720 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2721 	dev_dbg(adev->dev, "VM update mode is %s\n",
2722 		vm->use_cpu_for_update ? "CPU" : "SDMA");
2723 	WARN_ONCE((vm->use_cpu_for_update &&
2724 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2725 		  "CPU update of VM recommended only for large BAR system\n");
2726 
2727 	if (vm->use_cpu_for_update) {
2728 		/* Sync with last SDMA update/clear before switching to CPU */
2729 		r = amdgpu_bo_sync_wait(vm->root.bo,
2730 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2731 		if (r)
2732 			goto unreserve_bo;
2733 
2734 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2735 		r = amdgpu_vm_pt_map_tables(adev, vm);
2736 		if (r)
2737 			goto unreserve_bo;
2738 
2739 	} else {
2740 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2741 	}
2742 
2743 	dma_fence_put(vm->last_update);
2744 	vm->last_update = dma_fence_get_stub();
2745 	vm->is_compute_context = true;
2746 
2747 unreserve_bo:
2748 	amdgpu_bo_unreserve(vm->root.bo);
2749 	return r;
2750 }
2751 
2752 static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
2753 {
2754 	for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
2755 		if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
2756 		      vm->stats[i].evicted == 0))
2757 			return false;
2758 	}
2759 	return true;
2760 }
2761 
2762 /**
2763  * amdgpu_vm_fini - tear down a vm instance
2764  *
2765  * @adev: amdgpu_device pointer
2766  * @vm: requested vm
2767  *
2768  * Tear down @vm.
2769  * Unbind the VM and remove all bos from the vm bo list
2770  */
2771 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2772 {
2773 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2774 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2775 	struct amdgpu_bo *root;
2776 	unsigned long flags;
2777 	int i;
2778 
2779 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2780 
2781 	root = amdgpu_bo_ref(vm->root.bo);
2782 	amdgpu_bo_reserve(root, true);
2783 	/* Remove PASID mapping before destroying VM */
2784 	if (vm->pasid != 0) {
2785 		xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
2786 		vm->pasid = 0;
2787 	}
2788 	dma_fence_wait(vm->last_unlocked, false);
2789 	dma_fence_put(vm->last_unlocked);
2790 	dma_fence_wait(vm->last_tlb_flush, false);
2791 	/* Make sure that all fence callbacks have completed */
2792 	dma_fence_lock_irqsave(vm->last_tlb_flush, flags);
2793 	dma_fence_unlock_irqrestore(vm->last_tlb_flush, flags);
2794 	dma_fence_put(vm->last_tlb_flush);
2795 
2796 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2797 		if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
2798 			amdgpu_vm_prt_fini(adev, vm);
2799 			prt_fini_needed = false;
2800 		}
2801 
2802 		list_del(&mapping->list);
2803 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2804 	}
2805 
2806 	amdgpu_vm_pt_free_root(adev, vm);
2807 	amdgpu_bo_unreserve(root);
2808 	amdgpu_bo_unref(&root);
2809 	WARN_ON(vm->root.bo);
2810 
2811 	amdgpu_vm_fini_entities(vm);
2812 
2813 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2814 		dev_err(adev->dev, "still active bo inside vm\n");
2815 	}
2816 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2817 					     &vm->va.rb_root, rb) {
2818 		/* Don't remove the mapping here, we don't want to trigger a
2819 		 * rebalance and the tree is about to be destroyed anyway.
2820 		 */
2821 		list_del(&mapping->list);
2822 		kfree(mapping);
2823 	}
2824 
2825 	dma_fence_put(vm->last_update);
2826 
2827 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2828 		amdgpu_vmid_free_reserved(adev, vm, i);
2829 	}
2830 
2831 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2832 
2833 	if (!amdgpu_vm_stats_is_zero(vm)) {
2834 		struct amdgpu_task_info *ti = vm->task_info;
2835 
2836 		dev_warn(adev->dev,
2837 			 "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
2838 			 ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
2839 	}
2840 
2841 	amdgpu_vm_put_task_info(vm->task_info);
2842 }
2843 
2844 /**
2845  * amdgpu_vm_manager_init - init the VM manager
2846  *
2847  * @adev: amdgpu_device pointer
2848  *
2849  * Initialize the VM manager structures
2850  */
2851 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2852 {
2853 	/* Concurrent flushes are only possible starting with Vega10 and
2854 	 * are broken on Navi10 and Navi14.
2855 	 */
2856 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2857 					      adev->asic_type == CHIP_NAVI10 ||
2858 					      adev->asic_type == CHIP_NAVI14);
2859 	amdgpu_vmid_mgr_init(adev);
2860 
2861 	spin_lock_init(&adev->vm_manager.prt_lock);
2862 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2863 
2864 	/* If not overridden by the user, by default, only in large BAR systems
2865 	 * Compute VM tables will be updated by CPU
2866 	 */
2867 #ifdef CONFIG_X86_64
2868 	if (amdgpu_vm_update_mode == -1) {
2869 		/* For asic with VF MMIO access protection
2870 		 * avoid using CPU for VM table updates
2871 		 */
2872 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2873 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2874 			adev->vm_manager.vm_update_mode =
2875 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2876 		else
2877 			adev->vm_manager.vm_update_mode = 0;
2878 	} else
2879 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2880 #else
2881 	adev->vm_manager.vm_update_mode = 0;
2882 #endif
2883 
2884 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2885 }
2886 
2887 /**
2888  * amdgpu_vm_manager_fini - cleanup VM manager
2889  *
2890  * @adev: amdgpu_device pointer
2891  *
2892  * Cleanup the VM manager and free resources.
2893  */
2894 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2895 {
2896 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2897 	xa_destroy(&adev->vm_manager.pasids);
2898 
2899 	amdgpu_vmid_mgr_fini(adev);
2900 }
2901 
2902 /**
2903  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2904  *
2905  * @dev: drm device pointer
2906  * @data: drm_amdgpu_vm
2907  * @filp: drm file pointer
2908  *
2909  * Returns:
2910  * 0 for success, -errno for errors.
2911  */
2912 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2913 {
2914 	union drm_amdgpu_vm *args = data;
2915 	struct amdgpu_device *adev = drm_to_adev(dev);
2916 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2917 	struct amdgpu_vm *vm = &fpriv->vm;
2918 
2919 	/* No valid flags defined yet */
2920 	if (args->in.flags)
2921 		return -EINVAL;
2922 
2923 	switch (args->in.op) {
2924 	case AMDGPU_VM_OP_RESERVE_VMID:
2925 		/* We only have requirement to reserve vmid from gfxhub */
2926 		return amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
2927 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2928 		amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
2929 		break;
2930 	default:
2931 		return -EINVAL;
2932 	}
2933 
2934 	return 0;
2935 }
2936 
2937 /**
2938  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2939  * @adev: amdgpu device pointer
2940  * @pasid: PASID of the VM
2941  * @ts: Timestamp of the fault
2942  * @vmid: VMID, only used for GFX 9.4.3.
2943  * @node_id: Node_id received in IH cookie. Only applicable for
2944  *           GFX 9.4.3.
2945  * @addr: Address of the fault
2946  * @write_fault: true is write fault, false is read fault
2947  *
2948  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2949  * shouldn't be reported any more.
2950  */
2951 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2952 			    u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
2953 			    bool write_fault)
2954 {
2955 	bool is_compute_context = false;
2956 	struct amdgpu_bo *root;
2957 	unsigned long irqflags;
2958 	uint64_t value, flags;
2959 	struct amdgpu_vm *vm;
2960 	int r;
2961 
2962 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2963 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2964 	if (vm) {
2965 		root = amdgpu_bo_ref(vm->root.bo);
2966 		is_compute_context = vm->is_compute_context;
2967 	} else {
2968 		root = NULL;
2969 	}
2970 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2971 
2972 	if (!root)
2973 		return false;
2974 
2975 	addr /= AMDGPU_GPU_PAGE_SIZE;
2976 
2977 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2978 	    node_id, addr, ts, write_fault)) {
2979 		amdgpu_bo_unref(&root);
2980 		return true;
2981 	}
2982 
2983 	r = amdgpu_bo_reserve(root, true);
2984 	if (r)
2985 		goto error_unref;
2986 
2987 	/* Double check that the VM still exists */
2988 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2989 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2990 	if (vm && vm->root.bo != root)
2991 		vm = NULL;
2992 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2993 	if (!vm)
2994 		goto error_unlock;
2995 
2996 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2997 		AMDGPU_PTE_SYSTEM;
2998 
2999 	if (is_compute_context) {
3000 		/* Intentionally setting invalid PTE flag
3001 		 * combination to force a no-retry-fault
3002 		 */
3003 		flags = AMDGPU_VM_NORETRY_FLAGS;
3004 		value = 0;
3005 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
3006 		/* Redirect the access to the dummy page */
3007 		value = adev->dummy_page_addr;
3008 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
3009 			AMDGPU_PTE_WRITEABLE;
3010 
3011 	} else {
3012 		/* Let the hw retry silently on the PTE */
3013 		value = 0;
3014 	}
3015 
3016 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
3017 	if (r) {
3018 		pr_debug("failed %d to reserve fence slot\n", r);
3019 		goto error_unlock;
3020 	}
3021 
3022 	r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
3023 				   NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
3024 	if (r)
3025 		goto error_unlock;
3026 
3027 	r = amdgpu_vm_update_pdes(adev, vm, true);
3028 
3029 error_unlock:
3030 	amdgpu_bo_unreserve(root);
3031 	if (r < 0)
3032 		dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
3033 
3034 error_unref:
3035 	amdgpu_bo_unref(&root);
3036 
3037 	return false;
3038 }
3039 
3040 #if defined(CONFIG_DEBUG_FS)
3041 /**
3042  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
3043  *
3044  * @vm: Requested VM for printing BO info
3045  * @m: debugfs file
3046  *
3047  * Print BO information in debugfs file for the VM
3048  */
3049 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
3050 {
3051 	struct amdgpu_bo_va *bo_va, *tmp;
3052 	u64 total_idle = 0;
3053 	u64 total_evicted = 0;
3054 	u64 total_relocated = 0;
3055 	u64 total_moved = 0;
3056 	u64 total_invalidated = 0;
3057 	u64 total_done = 0;
3058 	unsigned int total_idle_objs = 0;
3059 	unsigned int total_evicted_objs = 0;
3060 	unsigned int total_relocated_objs = 0;
3061 	unsigned int total_moved_objs = 0;
3062 	unsigned int total_invalidated_objs = 0;
3063 	unsigned int total_done_objs = 0;
3064 	unsigned int id = 0;
3065 
3066 	amdgpu_vm_assert_locked(vm);
3067 
3068 	spin_lock(&vm->status_lock);
3069 	seq_puts(m, "\tIdle BOs:\n");
3070 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3071 		if (!bo_va->base.bo)
3072 			continue;
3073 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3074 	}
3075 	total_idle_objs = id;
3076 	id = 0;
3077 
3078 	seq_puts(m, "\tEvicted BOs:\n");
3079 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3080 		if (!bo_va->base.bo)
3081 			continue;
3082 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3083 	}
3084 	total_evicted_objs = id;
3085 	id = 0;
3086 
3087 	seq_puts(m, "\tRelocated BOs:\n");
3088 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3089 		if (!bo_va->base.bo)
3090 			continue;
3091 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3092 	}
3093 	total_relocated_objs = id;
3094 	id = 0;
3095 
3096 	seq_puts(m, "\tMoved BOs:\n");
3097 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3098 		if (!bo_va->base.bo)
3099 			continue;
3100 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3101 	}
3102 	total_moved_objs = id;
3103 	id = 0;
3104 
3105 	seq_puts(m, "\tInvalidated BOs:\n");
3106 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3107 		if (!bo_va->base.bo)
3108 			continue;
3109 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
3110 	}
3111 	total_invalidated_objs = id;
3112 	id = 0;
3113 
3114 	seq_puts(m, "\tDone BOs:\n");
3115 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3116 		if (!bo_va->base.bo)
3117 			continue;
3118 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3119 	}
3120 	spin_unlock(&vm->status_lock);
3121 	total_done_objs = id;
3122 
3123 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
3124 		   total_idle_objs);
3125 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
3126 		   total_evicted_objs);
3127 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
3128 		   total_relocated_objs);
3129 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
3130 		   total_moved_objs);
3131 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
3132 		   total_invalidated_objs);
3133 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
3134 		   total_done_objs);
3135 }
3136 #endif
3137 
3138 /**
3139  * amdgpu_vm_update_fault_cache - update cached fault into.
3140  * @adev: amdgpu device pointer
3141  * @pasid: PASID of the VM
3142  * @addr: Address of the fault
3143  * @status: GPUVM fault status register
3144  * @vmhub: which vmhub got the fault
3145  *
3146  * Cache the fault info for later use by userspace in debugging.
3147  */
3148 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
3149 				  unsigned int pasid,
3150 				  uint64_t addr,
3151 				  uint32_t status,
3152 				  unsigned int vmhub)
3153 {
3154 	struct amdgpu_vm *vm;
3155 	unsigned long flags;
3156 
3157 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3158 
3159 	vm = xa_load(&adev->vm_manager.pasids, pasid);
3160 	/* Don't update the fault cache if status is 0.  In the multiple
3161 	 * fault case, subsequent faults will return a 0 status which is
3162 	 * useless for userspace and replaces the useful fault status, so
3163 	 * only update if status is non-0.
3164 	 */
3165 	if (vm && status) {
3166 		vm->fault_info.addr = addr;
3167 		vm->fault_info.status = status;
3168 		/*
3169 		 * Update the fault information globally for later usage
3170 		 * when vm could be stale or freed.
3171 		 */
3172 		adev->vm_manager.fault_info.addr = addr;
3173 		adev->vm_manager.fault_info.vmhub = vmhub;
3174 		adev->vm_manager.fault_info.status = status;
3175 
3176 		if (AMDGPU_IS_GFXHUB(vmhub)) {
3177 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3178 			vm->fault_info.vmhub |=
3179 				(vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3180 		} else if (AMDGPU_IS_MMHUB0(vmhub)) {
3181 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3182 			vm->fault_info.vmhub |=
3183 				(vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3184 		} else if (AMDGPU_IS_MMHUB1(vmhub)) {
3185 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3186 			vm->fault_info.vmhub |=
3187 				(vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3188 		} else {
3189 			WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3190 		}
3191 	}
3192 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3193 }
3194 
3195 /**
3196  * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3197  *
3198  * @vm: VM to test against.
3199  * @bo: BO to be tested.
3200  *
3201  * Returns true if the BO shares the dma_resv object with the root PD and is
3202  * always guaranteed to be valid inside the VM.
3203  */
3204 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3205 {
3206 	return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3207 }
3208 
3209 void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
3210 			       struct amdgpu_task_info *task_info)
3211 {
3212 	dev_err(adev->dev,
3213 		" Process %s pid %d thread %s pid %d\n",
3214 		task_info->process_name, task_info->tgid,
3215 		task_info->task.comm, task_info->task.pid);
3216 }
3217 
3218 void amdgpu_sdma_set_vm_pte_scheds(struct amdgpu_device *adev,
3219 				   const struct amdgpu_vm_pte_funcs *vm_pte_funcs)
3220 {
3221 	struct drm_gpu_scheduler *sched;
3222 	int i;
3223 
3224 	for (i = 0; i < adev->sdma.num_instances; i++) {
3225 		if (adev->sdma.has_page_queue)
3226 			sched = &adev->sdma.instance[i].page.sched;
3227 		else
3228 			sched = &adev->sdma.instance[i].ring.sched;
3229 		adev->vm_manager.vm_pte_scheds[i] = sched;
3230 	}
3231 	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
3232 	adev->vm_manager.vm_pte_funcs = vm_pte_funcs;
3233 }
3234