xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c (revision 36265d2bcc9eef005e1b175c849f715b4dcd48df)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_vm.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_amdkfd.h"
42 #include "amdgpu_gmc.h"
43 #include "amdgpu_xgmi.h"
44 #include "amdgpu_dma_buf.h"
45 #include "amdgpu_res_cursor.h"
46 #include "kfd_svm.h"
47 
48 /**
49  * DOC: GPUVM
50  *
51  * GPUVM is the MMU functionality provided on the GPU.
52  * GPUVM is similar to the legacy GART on older asics, however
53  * rather than there being a single global GART table
54  * for the entire GPU, there can be multiple GPUVM page tables active
55  * at any given time.  The GPUVM page tables can contain a mix
56  * VRAM pages and system pages (both memory and MMIO) and system pages
57  * can be mapped as snooped (cached system pages) or unsnooped
58  * (uncached system pages).
59  *
60  * Each active GPUVM has an ID associated with it and there is a page table
61  * linked with each VMID.  When executing a command buffer,
62  * the kernel tells the engine what VMID to use for that command
63  * buffer.  VMIDs are allocated dynamically as commands are submitted.
64  * The userspace drivers maintain their own address space and the kernel
65  * sets up their pages tables accordingly when they submit their
66  * command buffers and a VMID is assigned.
67  * The hardware supports up to 16 active GPUVMs at any given time.
68  *
69  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
70  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
71  * as other features such as encryption and caching attributes.
72  *
73  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
74  * addition to an aperture managed by a page table, VMID 0 also has
75  * several other apertures.  There is an aperture for direct access to VRAM
76  * and there is a legacy AGP aperture which just forwards accesses directly
77  * to the matching system physical addresses (or IOVAs when an IOMMU is
78  * present).  These apertures provide direct access to these memories without
79  * incurring the overhead of a page table.  VMID 0 is used by the kernel
80  * driver for tasks like memory management.
81  *
82  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
83  * For user applications, each application can have their own unique GPUVM
84  * address space.  The application manages the address space and the kernel
85  * driver manages the GPUVM page tables for each process.  If an GPU client
86  * accesses an invalid page, it will generate a GPU page fault, similar to
87  * accessing an invalid page on a CPU.
88  */
89 
90 #define START(node) ((node)->start)
91 #define LAST(node) ((node)->last)
92 
93 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
94 		     START, LAST, static, amdgpu_vm_it)
95 
96 #undef START
97 #undef LAST
98 
99 /**
100  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
101  */
102 struct amdgpu_prt_cb {
103 
104 	/**
105 	 * @adev: amdgpu device
106 	 */
107 	struct amdgpu_device *adev;
108 
109 	/**
110 	 * @cb: callback
111 	 */
112 	struct dma_fence_cb cb;
113 };
114 
115 /**
116  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
117  */
118 struct amdgpu_vm_tlb_seq_struct {
119 	/**
120 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
121 	 */
122 	struct amdgpu_vm *vm;
123 
124 	/**
125 	 * @cb: callback
126 	 */
127 	struct dma_fence_cb cb;
128 };
129 
130 /**
131  * amdgpu_vm_assert_locked - check if VM is correctly locked
132  * @vm: the VM which schould be tested
133  *
134  * Asserts that the VM root PD is locked.
135  */
136 static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
137 {
138 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
139 }
140 
141 /**
142  * amdgpu_vm_bo_evicted - vm_bo is evicted
143  *
144  * @vm_bo: vm_bo which is evicted
145  *
146  * State for PDs/PTs and per VM BOs which are not at the location they should
147  * be.
148  */
149 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
150 {
151 	struct amdgpu_vm *vm = vm_bo->vm;
152 	struct amdgpu_bo *bo = vm_bo->bo;
153 
154 	vm_bo->moved = true;
155 	amdgpu_vm_assert_locked(vm);
156 	spin_lock(&vm_bo->vm->status_lock);
157 	if (bo->tbo.type == ttm_bo_type_kernel)
158 		list_move(&vm_bo->vm_status, &vm->evicted);
159 	else
160 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
161 	spin_unlock(&vm_bo->vm->status_lock);
162 }
163 /**
164  * amdgpu_vm_bo_moved - vm_bo is moved
165  *
166  * @vm_bo: vm_bo which is moved
167  *
168  * State for per VM BOs which are moved, but that change is not yet reflected
169  * in the page tables.
170  */
171 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
172 {
173 	amdgpu_vm_assert_locked(vm_bo->vm);
174 	spin_lock(&vm_bo->vm->status_lock);
175 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
176 	spin_unlock(&vm_bo->vm->status_lock);
177 }
178 
179 /**
180  * amdgpu_vm_bo_idle - vm_bo is idle
181  *
182  * @vm_bo: vm_bo which is now idle
183  *
184  * State for PDs/PTs and per VM BOs which have gone through the state machine
185  * and are now idle.
186  */
187 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
188 {
189 	amdgpu_vm_assert_locked(vm_bo->vm);
190 	spin_lock(&vm_bo->vm->status_lock);
191 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
192 	spin_unlock(&vm_bo->vm->status_lock);
193 	vm_bo->moved = false;
194 }
195 
196 /**
197  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
198  *
199  * @vm_bo: vm_bo which is now invalidated
200  *
201  * State for normal BOs which are invalidated and that change not yet reflected
202  * in the PTs.
203  */
204 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
205 {
206 	spin_lock(&vm_bo->vm->status_lock);
207 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
208 	spin_unlock(&vm_bo->vm->status_lock);
209 }
210 
211 /**
212  * amdgpu_vm_bo_evicted_user - vm_bo is evicted
213  *
214  * @vm_bo: vm_bo which is evicted
215  *
216  * State for BOs used by user mode queues which are not at the location they
217  * should be.
218  */
219 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
220 {
221 	vm_bo->moved = true;
222 	spin_lock(&vm_bo->vm->status_lock);
223 	list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
224 	spin_unlock(&vm_bo->vm->status_lock);
225 }
226 
227 /**
228  * amdgpu_vm_bo_relocated - vm_bo is reloacted
229  *
230  * @vm_bo: vm_bo which is relocated
231  *
232  * State for PDs/PTs which needs to update their parent PD.
233  * For the root PD, just move to idle state.
234  */
235 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
236 {
237 	amdgpu_vm_assert_locked(vm_bo->vm);
238 	if (vm_bo->bo->parent) {
239 		spin_lock(&vm_bo->vm->status_lock);
240 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
241 		spin_unlock(&vm_bo->vm->status_lock);
242 	} else {
243 		amdgpu_vm_bo_idle(vm_bo);
244 	}
245 }
246 
247 /**
248  * amdgpu_vm_bo_done - vm_bo is done
249  *
250  * @vm_bo: vm_bo which is now done
251  *
252  * State for normal BOs which are invalidated and that change has been updated
253  * in the PTs.
254  */
255 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
256 {
257 	amdgpu_vm_assert_locked(vm_bo->vm);
258 	spin_lock(&vm_bo->vm->status_lock);
259 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
260 	spin_unlock(&vm_bo->vm->status_lock);
261 }
262 
263 /**
264  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
265  * @vm: the VM which state machine to reset
266  *
267  * Move all vm_bo object in the VM into a state where they will be updated
268  * again during validation.
269  */
270 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
271 {
272 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
273 
274 	amdgpu_vm_assert_locked(vm);
275 
276 	spin_lock(&vm->status_lock);
277 	list_splice_init(&vm->done, &vm->invalidated);
278 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
279 		vm_bo->moved = true;
280 
281 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
282 		struct amdgpu_bo *bo = vm_bo->bo;
283 
284 		vm_bo->moved = true;
285 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
286 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
287 		else if (bo->parent)
288 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
289 	}
290 	spin_unlock(&vm->status_lock);
291 }
292 
293 /**
294  * amdgpu_vm_update_shared - helper to update shared memory stat
295  * @base: base structure for tracking BO usage in a VM
296  *
297  * Takes the vm status_lock and updates the shared memory stat. If the basic
298  * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
299  * as well.
300  */
301 static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
302 {
303 	struct amdgpu_vm *vm = base->vm;
304 	struct amdgpu_bo *bo = base->bo;
305 	uint64_t size = amdgpu_bo_size(bo);
306 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
307 	bool shared;
308 
309 	dma_resv_assert_held(bo->tbo.base.resv);
310 	spin_lock(&vm->status_lock);
311 	shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
312 	if (base->shared != shared) {
313 		base->shared = shared;
314 		if (shared) {
315 			vm->stats[bo_memtype].drm.shared += size;
316 			vm->stats[bo_memtype].drm.private -= size;
317 		} else {
318 			vm->stats[bo_memtype].drm.shared -= size;
319 			vm->stats[bo_memtype].drm.private += size;
320 		}
321 	}
322 	spin_unlock(&vm->status_lock);
323 }
324 
325 /**
326  * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
327  * @bo: amdgpu buffer object
328  *
329  * Update the per VM stats for all the vm if needed from private to shared or
330  * vice versa.
331  */
332 void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
333 {
334 	struct amdgpu_vm_bo_base *base;
335 
336 	for (base = bo->vm_bo; base; base = base->next)
337 		amdgpu_vm_update_shared(base);
338 }
339 
340 /**
341  * amdgpu_vm_update_stats_locked - helper to update normal memory stat
342  * @base: base structure for tracking BO usage in a VM
343  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
344  *        be bo->tbo.resource
345  * @sign: if we should add (+1) or subtract (-1) from the stat
346  *
347  * Caller need to have the vm status_lock held. Useful for when multiple update
348  * need to happen at the same time.
349  */
350 static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
351 			    struct ttm_resource *res, int sign)
352 {
353 	struct amdgpu_vm *vm = base->vm;
354 	struct amdgpu_bo *bo = base->bo;
355 	int64_t size = sign * amdgpu_bo_size(bo);
356 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
357 
358 	/* For drm-total- and drm-shared-, BO are accounted by their preferred
359 	 * placement, see also amdgpu_bo_mem_stats_placement.
360 	 */
361 	if (base->shared)
362 		vm->stats[bo_memtype].drm.shared += size;
363 	else
364 		vm->stats[bo_memtype].drm.private += size;
365 
366 	if (res && res->mem_type < __AMDGPU_PL_NUM) {
367 		uint32_t res_memtype = res->mem_type;
368 
369 		vm->stats[res_memtype].drm.resident += size;
370 		/* BO only count as purgeable if it is resident,
371 		 * since otherwise there's nothing to purge.
372 		 */
373 		if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
374 			vm->stats[res_memtype].drm.purgeable += size;
375 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
376 			vm->stats[bo_memtype].evicted += size;
377 	}
378 }
379 
380 /**
381  * amdgpu_vm_update_stats - helper to update normal memory stat
382  * @base: base structure for tracking BO usage in a VM
383  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
384  *        be bo->tbo.resource
385  * @sign: if we should add (+1) or subtract (-1) from the stat
386  *
387  * Updates the basic memory stat when bo is added/deleted/moved.
388  */
389 void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
390 			    struct ttm_resource *res, int sign)
391 {
392 	struct amdgpu_vm *vm = base->vm;
393 
394 	spin_lock(&vm->status_lock);
395 	amdgpu_vm_update_stats_locked(base, res, sign);
396 	spin_unlock(&vm->status_lock);
397 }
398 
399 /**
400  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
401  *
402  * @base: base structure for tracking BO usage in a VM
403  * @vm: vm to which bo is to be added
404  * @bo: amdgpu buffer object
405  *
406  * Initialize a bo_va_base structure and add it to the appropriate lists
407  *
408  */
409 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
410 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
411 {
412 	base->vm = vm;
413 	base->bo = bo;
414 	base->next = NULL;
415 	INIT_LIST_HEAD(&base->vm_status);
416 
417 	if (!bo)
418 		return;
419 	base->next = bo->vm_bo;
420 	bo->vm_bo = base;
421 
422 	spin_lock(&vm->status_lock);
423 	base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
424 	amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
425 	spin_unlock(&vm->status_lock);
426 
427 	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
428 		return;
429 
430 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
431 
432 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
433 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
434 		amdgpu_vm_bo_relocated(base);
435 	else
436 		amdgpu_vm_bo_idle(base);
437 
438 	if (bo->preferred_domains &
439 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
440 		return;
441 
442 	/*
443 	 * we checked all the prerequisites, but it looks like this per vm bo
444 	 * is currently evicted. add the bo to the evicted list to make sure it
445 	 * is validated on next vm use to avoid fault.
446 	 * */
447 	amdgpu_vm_bo_evicted(base);
448 }
449 
450 /**
451  * amdgpu_vm_lock_pd - lock PD in drm_exec
452  *
453  * @vm: vm providing the BOs
454  * @exec: drm execution context
455  * @num_fences: number of extra fences to reserve
456  *
457  * Lock the VM root PD in the DRM execution context.
458  */
459 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
460 		      unsigned int num_fences)
461 {
462 	/* We need at least two fences for the VM PD/PT updates */
463 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
464 				    2 + num_fences);
465 }
466 
467 /**
468  * amdgpu_vm_lock_done_list - lock all BOs on the done list
469  * @vm: vm providing the BOs
470  * @exec: drm execution context
471  * @num_fences: number of extra fences to reserve
472  *
473  * Lock the BOs on the done list in the DRM execution context.
474  */
475 int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
476 			     unsigned int num_fences)
477 {
478 	struct list_head *prev = &vm->done;
479 	struct amdgpu_bo_va *bo_va;
480 	struct amdgpu_bo *bo;
481 	int ret;
482 
483 	/* We can only trust prev->next while holding the lock */
484 	spin_lock(&vm->status_lock);
485 	while (!list_is_head(prev->next, &vm->done)) {
486 		bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
487 
488 		bo = bo_va->base.bo;
489 		if (bo) {
490 			amdgpu_bo_ref(bo);
491 			spin_unlock(&vm->status_lock);
492 
493 			ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
494 			amdgpu_bo_unref(&bo);
495 			if (unlikely(ret))
496 				return ret;
497 
498 			spin_lock(&vm->status_lock);
499 		}
500 		prev = prev->next;
501 	}
502 	spin_unlock(&vm->status_lock);
503 
504 	return 0;
505 }
506 
507 /**
508  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
509  *
510  * @adev: amdgpu device pointer
511  * @vm: vm providing the BOs
512  *
513  * Move all BOs to the end of LRU and remember their positions to put them
514  * together.
515  */
516 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
517 				struct amdgpu_vm *vm)
518 {
519 	spin_lock(&adev->mman.bdev.lru_lock);
520 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
521 	spin_unlock(&adev->mman.bdev.lru_lock);
522 }
523 
524 /* Create scheduler entities for page table updates */
525 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
526 				   struct amdgpu_vm *vm)
527 {
528 	int r;
529 
530 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
531 				  adev->vm_manager.vm_pte_scheds,
532 				  adev->vm_manager.vm_pte_num_scheds, NULL);
533 	if (r)
534 		goto error;
535 
536 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
537 				     adev->vm_manager.vm_pte_scheds,
538 				     adev->vm_manager.vm_pte_num_scheds, NULL);
539 
540 error:
541 	drm_sched_entity_destroy(&vm->immediate);
542 	return r;
543 }
544 
545 /* Destroy the entities for page table updates again */
546 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
547 {
548 	drm_sched_entity_destroy(&vm->immediate);
549 	drm_sched_entity_destroy(&vm->delayed);
550 }
551 
552 /**
553  * amdgpu_vm_generation - return the page table re-generation counter
554  * @adev: the amdgpu_device
555  * @vm: optional VM to check, might be NULL
556  *
557  * Returns a page table re-generation token to allow checking if submissions
558  * are still valid to use this VM. The VM parameter might be NULL in which case
559  * just the VRAM lost counter will be used.
560  */
561 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
562 {
563 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
564 
565 	if (!vm)
566 		return result;
567 
568 	result += lower_32_bits(vm->generation);
569 	/* Add one if the page tables will be re-generated on next CS */
570 	if (drm_sched_entity_error(&vm->delayed))
571 		++result;
572 
573 	return result;
574 }
575 
576 /**
577  * amdgpu_vm_validate - validate evicted BOs tracked in the VM
578  *
579  * @adev: amdgpu device pointer
580  * @vm: vm providing the BOs
581  * @ticket: optional reservation ticket used to reserve the VM
582  * @validate: callback to do the validation
583  * @param: parameter for the validation callback
584  *
585  * Validate the page table BOs and per-VM BOs on command submission if
586  * necessary. If a ticket is given, also try to validate evicted user queue
587  * BOs. They must already be reserved with the given ticket.
588  *
589  * Returns:
590  * Validation result.
591  */
592 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
593 		       struct ww_acquire_ctx *ticket,
594 		       int (*validate)(void *p, struct amdgpu_bo *bo),
595 		       void *param)
596 {
597 	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
598 	struct amdgpu_vm_bo_base *bo_base;
599 	struct amdgpu_bo *bo;
600 	int r;
601 
602 	if (vm->generation != new_vm_generation) {
603 		vm->generation = new_vm_generation;
604 		amdgpu_vm_bo_reset_state_machine(vm);
605 		amdgpu_vm_fini_entities(vm);
606 		r = amdgpu_vm_init_entities(adev, vm);
607 		if (r)
608 			return r;
609 	}
610 
611 	spin_lock(&vm->status_lock);
612 	while (!list_empty(&vm->evicted)) {
613 		bo_base = list_first_entry(&vm->evicted,
614 					   struct amdgpu_vm_bo_base,
615 					   vm_status);
616 		spin_unlock(&vm->status_lock);
617 
618 		bo = bo_base->bo;
619 
620 		r = validate(param, bo);
621 		if (r)
622 			return r;
623 
624 		if (bo->tbo.type != ttm_bo_type_kernel) {
625 			amdgpu_vm_bo_moved(bo_base);
626 		} else {
627 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
628 			amdgpu_vm_bo_relocated(bo_base);
629 		}
630 		spin_lock(&vm->status_lock);
631 	}
632 	while (ticket && !list_empty(&vm->evicted_user)) {
633 		bo_base = list_first_entry(&vm->evicted_user,
634 					   struct amdgpu_vm_bo_base,
635 					   vm_status);
636 		spin_unlock(&vm->status_lock);
637 
638 		bo = bo_base->bo;
639 		dma_resv_assert_held(bo->tbo.base.resv);
640 
641 		r = validate(param, bo);
642 		if (r)
643 			return r;
644 
645 		amdgpu_vm_bo_invalidated(bo_base);
646 
647 		spin_lock(&vm->status_lock);
648 	}
649 	spin_unlock(&vm->status_lock);
650 
651 	amdgpu_vm_eviction_lock(vm);
652 	vm->evicting = false;
653 	amdgpu_vm_eviction_unlock(vm);
654 
655 	return 0;
656 }
657 
658 /**
659  * amdgpu_vm_ready - check VM is ready for updates
660  *
661  * @vm: VM to check
662  *
663  * Check if all VM PDs/PTs are ready for updates
664  *
665  * Returns:
666  * True if VM is not evicting and all VM entities are not stopped
667  */
668 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
669 {
670 	bool ret;
671 
672 	amdgpu_vm_assert_locked(vm);
673 
674 	amdgpu_vm_eviction_lock(vm);
675 	ret = !vm->evicting;
676 	amdgpu_vm_eviction_unlock(vm);
677 
678 	spin_lock(&vm->status_lock);
679 	ret &= list_empty(&vm->evicted);
680 	spin_unlock(&vm->status_lock);
681 
682 	spin_lock(&vm->immediate.lock);
683 	ret &= !vm->immediate.stopped;
684 	spin_unlock(&vm->immediate.lock);
685 
686 	spin_lock(&vm->delayed.lock);
687 	ret &= !vm->delayed.stopped;
688 	spin_unlock(&vm->delayed.lock);
689 
690 	return ret;
691 }
692 
693 /**
694  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
695  *
696  * @adev: amdgpu_device pointer
697  */
698 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
699 {
700 	const struct amdgpu_ip_block *ip_block;
701 	bool has_compute_vm_bug;
702 	struct amdgpu_ring *ring;
703 	int i;
704 
705 	has_compute_vm_bug = false;
706 
707 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
708 	if (ip_block) {
709 		/* Compute has a VM bug for GFX version < 7.
710 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
711 		if (ip_block->version->major <= 7)
712 			has_compute_vm_bug = true;
713 		else if (ip_block->version->major == 8)
714 			if (adev->gfx.mec_fw_version < 673)
715 				has_compute_vm_bug = true;
716 	}
717 
718 	for (i = 0; i < adev->num_rings; i++) {
719 		ring = adev->rings[i];
720 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
721 			/* only compute rings */
722 			ring->has_compute_vm_bug = has_compute_vm_bug;
723 		else
724 			ring->has_compute_vm_bug = false;
725 	}
726 }
727 
728 /**
729  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
730  *
731  * @ring: ring on which the job will be submitted
732  * @job: job to submit
733  *
734  * Returns:
735  * True if sync is needed.
736  */
737 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
738 				  struct amdgpu_job *job)
739 {
740 	struct amdgpu_device *adev = ring->adev;
741 	unsigned vmhub = ring->vm_hub;
742 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
743 
744 	if (job->vmid == 0)
745 		return false;
746 
747 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
748 		return true;
749 
750 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
751 		return true;
752 
753 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
754 		return true;
755 
756 	return false;
757 }
758 
759 /**
760  * amdgpu_vm_flush - hardware flush the vm
761  *
762  * @ring: ring to use for flush
763  * @job:  related job
764  * @need_pipe_sync: is pipe sync needed
765  *
766  * Emit a VM flush when it is necessary.
767  *
768  * Returns:
769  * 0 on success, errno otherwise.
770  */
771 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
772 		    bool need_pipe_sync)
773 {
774 	struct amdgpu_device *adev = ring->adev;
775 	struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
776 	unsigned vmhub = ring->vm_hub;
777 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
778 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
779 	bool spm_update_needed = job->spm_update_needed;
780 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
781 		job->gds_switch_needed;
782 	bool vm_flush_needed = job->vm_needs_flush;
783 	bool cleaner_shader_needed = false;
784 	bool pasid_mapping_needed = false;
785 	struct dma_fence *fence = NULL;
786 	unsigned int patch;
787 	int r;
788 
789 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
790 		gds_switch_needed = true;
791 		vm_flush_needed = true;
792 		pasid_mapping_needed = true;
793 		spm_update_needed = true;
794 	}
795 
796 	mutex_lock(&id_mgr->lock);
797 	if (id->pasid != job->pasid || !id->pasid_mapping ||
798 	    !dma_fence_is_signaled(id->pasid_mapping))
799 		pasid_mapping_needed = true;
800 	mutex_unlock(&id_mgr->lock);
801 
802 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
803 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
804 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
805 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
806 		ring->funcs->emit_wreg;
807 
808 	cleaner_shader_needed = job->run_cleaner_shader &&
809 		adev->gfx.enable_cleaner_shader &&
810 		ring->funcs->emit_cleaner_shader && job->base.s_fence &&
811 		&job->base.s_fence->scheduled == isolation->spearhead;
812 
813 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
814 	    !cleaner_shader_needed)
815 		return 0;
816 
817 	amdgpu_ring_ib_begin(ring);
818 	if (ring->funcs->init_cond_exec)
819 		patch = amdgpu_ring_init_cond_exec(ring,
820 						   ring->cond_exe_gpu_addr);
821 
822 	if (need_pipe_sync)
823 		amdgpu_ring_emit_pipeline_sync(ring);
824 
825 	if (cleaner_shader_needed)
826 		ring->funcs->emit_cleaner_shader(ring);
827 
828 	if (vm_flush_needed) {
829 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
830 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
831 	}
832 
833 	if (pasid_mapping_needed)
834 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
835 
836 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
837 		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
838 
839 	if (ring->funcs->emit_gds_switch &&
840 	    gds_switch_needed) {
841 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
842 					    job->gds_size, job->gws_base,
843 					    job->gws_size, job->oa_base,
844 					    job->oa_size);
845 	}
846 
847 	if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
848 		r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
849 		if (r)
850 			return r;
851 		fence = &job->hw_vm_fence->base;
852 	}
853 
854 	if (vm_flush_needed) {
855 		mutex_lock(&id_mgr->lock);
856 		dma_fence_put(id->last_flush);
857 		id->last_flush = dma_fence_get(fence);
858 		id->current_gpu_reset_count =
859 			atomic_read(&adev->gpu_reset_counter);
860 		mutex_unlock(&id_mgr->lock);
861 	}
862 
863 	if (pasid_mapping_needed) {
864 		mutex_lock(&id_mgr->lock);
865 		id->pasid = job->pasid;
866 		dma_fence_put(id->pasid_mapping);
867 		id->pasid_mapping = dma_fence_get(fence);
868 		mutex_unlock(&id_mgr->lock);
869 	}
870 
871 	/*
872 	 * Make sure that all other submissions wait for the cleaner shader to
873 	 * finish before we push them to the HW.
874 	 */
875 	if (cleaner_shader_needed) {
876 		trace_amdgpu_cleaner_shader(ring, fence);
877 		mutex_lock(&adev->enforce_isolation_mutex);
878 		dma_fence_put(isolation->spearhead);
879 		isolation->spearhead = dma_fence_get(fence);
880 		mutex_unlock(&adev->enforce_isolation_mutex);
881 	}
882 	dma_fence_put(fence);
883 
884 	amdgpu_ring_patch_cond_exec(ring, patch);
885 
886 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
887 	if (ring->funcs->emit_switch_buffer) {
888 		amdgpu_ring_emit_switch_buffer(ring);
889 		amdgpu_ring_emit_switch_buffer(ring);
890 	}
891 
892 	amdgpu_ring_ib_end(ring);
893 	return 0;
894 }
895 
896 /**
897  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
898  *
899  * @vm: requested vm
900  * @bo: requested buffer object
901  *
902  * Find @bo inside the requested vm.
903  * Search inside the @bos vm list for the requested vm
904  * Returns the found bo_va or NULL if none is found
905  *
906  * Object has to be reserved!
907  *
908  * Returns:
909  * Found bo_va or NULL.
910  */
911 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
912 				       struct amdgpu_bo *bo)
913 {
914 	struct amdgpu_vm_bo_base *base;
915 
916 	for (base = bo->vm_bo; base; base = base->next) {
917 		if (base->vm != vm)
918 			continue;
919 
920 		return container_of(base, struct amdgpu_bo_va, base);
921 	}
922 	return NULL;
923 }
924 
925 /**
926  * amdgpu_vm_map_gart - Resolve gart mapping of addr
927  *
928  * @pages_addr: optional DMA address to use for lookup
929  * @addr: the unmapped addr
930  *
931  * Look up the physical address of the page that the pte resolves
932  * to.
933  *
934  * Returns:
935  * The pointer for the page table entry.
936  */
937 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
938 {
939 	uint64_t result;
940 
941 	/* page table offset */
942 	result = pages_addr[addr >> PAGE_SHIFT];
943 
944 	/* in case cpu page size != gpu page size*/
945 	result |= addr & (~PAGE_MASK);
946 
947 	result &= 0xFFFFFFFFFFFFF000ULL;
948 
949 	return result;
950 }
951 
952 /**
953  * amdgpu_vm_update_pdes - make sure that all directories are valid
954  *
955  * @adev: amdgpu_device pointer
956  * @vm: requested vm
957  * @immediate: submit immediately to the paging queue
958  *
959  * Makes sure all directories are up to date.
960  *
961  * Returns:
962  * 0 for success, error for failure.
963  */
964 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
965 			  struct amdgpu_vm *vm, bool immediate)
966 {
967 	struct amdgpu_vm_update_params params;
968 	struct amdgpu_vm_bo_base *entry;
969 	bool flush_tlb_needed = false;
970 	LIST_HEAD(relocated);
971 	int r, idx;
972 
973 	amdgpu_vm_assert_locked(vm);
974 
975 	spin_lock(&vm->status_lock);
976 	list_splice_init(&vm->relocated, &relocated);
977 	spin_unlock(&vm->status_lock);
978 
979 	if (list_empty(&relocated))
980 		return 0;
981 
982 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
983 		return -ENODEV;
984 
985 	memset(&params, 0, sizeof(params));
986 	params.adev = adev;
987 	params.vm = vm;
988 	params.immediate = immediate;
989 
990 	r = vm->update_funcs->prepare(&params, NULL,
991 				      AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
992 	if (r)
993 		goto error;
994 
995 	list_for_each_entry(entry, &relocated, vm_status) {
996 		/* vm_flush_needed after updating moved PDEs */
997 		flush_tlb_needed |= entry->moved;
998 
999 		r = amdgpu_vm_pde_update(&params, entry);
1000 		if (r)
1001 			goto error;
1002 	}
1003 
1004 	r = vm->update_funcs->commit(&params, &vm->last_update);
1005 	if (r)
1006 		goto error;
1007 
1008 	if (flush_tlb_needed)
1009 		atomic64_inc(&vm->tlb_seq);
1010 
1011 	while (!list_empty(&relocated)) {
1012 		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
1013 					 vm_status);
1014 		amdgpu_vm_bo_idle(entry);
1015 	}
1016 
1017 error:
1018 	drm_dev_exit(idx);
1019 	return r;
1020 }
1021 
1022 /**
1023  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
1024  * @fence: unused
1025  * @cb: the callback structure
1026  *
1027  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1028  */
1029 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
1030 				 struct dma_fence_cb *cb)
1031 {
1032 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1033 
1034 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
1035 	atomic64_inc(&tlb_cb->vm->tlb_seq);
1036 	kfree(tlb_cb);
1037 }
1038 
1039 /**
1040  * amdgpu_vm_tlb_flush - prepare TLB flush
1041  *
1042  * @params: parameters for update
1043  * @fence: input fence to sync TLB flush with
1044  * @tlb_cb: the callback structure
1045  *
1046  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1047  */
1048 static void
1049 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
1050 		    struct dma_fence **fence,
1051 		    struct amdgpu_vm_tlb_seq_struct *tlb_cb)
1052 {
1053 	struct amdgpu_vm *vm = params->vm;
1054 
1055 	tlb_cb->vm = vm;
1056 	if (!fence || !*fence) {
1057 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1058 		return;
1059 	}
1060 
1061 	if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
1062 				    amdgpu_vm_tlb_seq_cb)) {
1063 		dma_fence_put(vm->last_tlb_flush);
1064 		vm->last_tlb_flush = dma_fence_get(*fence);
1065 	} else {
1066 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1067 	}
1068 
1069 	/* Prepare a TLB flush fence to be attached to PTs */
1070 	if (!params->unlocked && vm->is_compute_context) {
1071 		amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
1072 
1073 		/* Makes sure no PD/PT is freed before the flush */
1074 		dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
1075 				   DMA_RESV_USAGE_BOOKKEEP);
1076 	}
1077 }
1078 
1079 /**
1080  * amdgpu_vm_update_range - update a range in the vm page table
1081  *
1082  * @adev: amdgpu_device pointer to use for commands
1083  * @vm: the VM to update the range
1084  * @immediate: immediate submission in a page fault
1085  * @unlocked: unlocked invalidation during MM callback
1086  * @flush_tlb: trigger tlb invalidation after update completed
1087  * @allow_override: change MTYPE for local NUMA nodes
1088  * @sync: fences we need to sync to
1089  * @start: start of mapped range
1090  * @last: last mapped entry
1091  * @flags: flags for the entries
1092  * @offset: offset into nodes and pages_addr
1093  * @vram_base: base for vram mappings
1094  * @res: ttm_resource to map
1095  * @pages_addr: DMA addresses to use for mapping
1096  * @fence: optional resulting fence
1097  *
1098  * Fill in the page table entries between @start and @last.
1099  *
1100  * Returns:
1101  * 0 for success, negative erro code for failure.
1102  */
1103 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1104 			   bool immediate, bool unlocked, bool flush_tlb,
1105 			   bool allow_override, struct amdgpu_sync *sync,
1106 			   uint64_t start, uint64_t last, uint64_t flags,
1107 			   uint64_t offset, uint64_t vram_base,
1108 			   struct ttm_resource *res, dma_addr_t *pages_addr,
1109 			   struct dma_fence **fence)
1110 {
1111 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1112 	struct amdgpu_vm_update_params params;
1113 	struct amdgpu_res_cursor cursor;
1114 	int r, idx;
1115 
1116 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
1117 		return -ENODEV;
1118 
1119 	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
1120 	if (!tlb_cb) {
1121 		drm_dev_exit(idx);
1122 		return -ENOMEM;
1123 	}
1124 
1125 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
1126 	 * heavy-weight flush TLB unconditionally.
1127 	 */
1128 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
1129 		     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
1130 
1131 	/*
1132 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
1133 	 */
1134 	flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
1135 
1136 	memset(&params, 0, sizeof(params));
1137 	params.adev = adev;
1138 	params.vm = vm;
1139 	params.immediate = immediate;
1140 	params.pages_addr = pages_addr;
1141 	params.unlocked = unlocked;
1142 	params.needs_flush = flush_tlb;
1143 	params.allow_override = allow_override;
1144 	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
1145 
1146 	amdgpu_vm_eviction_lock(vm);
1147 	if (vm->evicting) {
1148 		r = -EBUSY;
1149 		goto error_free;
1150 	}
1151 
1152 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1153 		struct dma_fence *tmp = dma_fence_get_stub();
1154 
1155 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1156 		swap(vm->last_unlocked, tmp);
1157 		dma_fence_put(tmp);
1158 	}
1159 
1160 	r = vm->update_funcs->prepare(&params, sync,
1161 				      AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
1162 	if (r)
1163 		goto error_free;
1164 
1165 	amdgpu_res_first(pages_addr ? NULL : res, offset,
1166 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1167 	while (cursor.remaining) {
1168 		uint64_t tmp, num_entries, addr;
1169 
1170 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1171 		if (pages_addr) {
1172 			bool contiguous = true;
1173 
1174 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1175 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
1176 				uint64_t count;
1177 
1178 				contiguous = pages_addr[pfn + 1] ==
1179 					pages_addr[pfn] + PAGE_SIZE;
1180 
1181 				tmp = num_entries /
1182 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1183 				for (count = 2; count < tmp; ++count) {
1184 					uint64_t idx = pfn + count;
1185 
1186 					if (contiguous != (pages_addr[idx] ==
1187 					    pages_addr[idx - 1] + PAGE_SIZE))
1188 						break;
1189 				}
1190 				if (!contiguous)
1191 					count--;
1192 				num_entries = count *
1193 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1194 			}
1195 
1196 			if (!contiguous) {
1197 				addr = cursor.start;
1198 				params.pages_addr = pages_addr;
1199 			} else {
1200 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1201 				params.pages_addr = NULL;
1202 			}
1203 
1204 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1205 			addr = vram_base + cursor.start;
1206 		} else {
1207 			addr = 0;
1208 		}
1209 
1210 		tmp = start + num_entries;
1211 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1212 		if (r)
1213 			goto error_free;
1214 
1215 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1216 		start = tmp;
1217 	}
1218 
1219 	r = vm->update_funcs->commit(&params, fence);
1220 	if (r)
1221 		goto error_free;
1222 
1223 	if (params.needs_flush) {
1224 		amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
1225 		tlb_cb = NULL;
1226 	}
1227 
1228 	amdgpu_vm_pt_free_list(adev, &params);
1229 
1230 error_free:
1231 	kfree(tlb_cb);
1232 	amdgpu_vm_eviction_unlock(vm);
1233 	drm_dev_exit(idx);
1234 	return r;
1235 }
1236 
1237 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1238 			  struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
1239 {
1240 	spin_lock(&vm->status_lock);
1241 	memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
1242 	spin_unlock(&vm->status_lock);
1243 }
1244 
1245 /**
1246  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1247  *
1248  * @adev: amdgpu_device pointer
1249  * @bo_va: requested BO and VM object
1250  * @clear: if true clear the entries
1251  *
1252  * Fill in the page table entries for @bo_va.
1253  *
1254  * Returns:
1255  * 0 for success, -EINVAL for failure.
1256  */
1257 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1258 			bool clear)
1259 {
1260 	struct amdgpu_bo *bo = bo_va->base.bo;
1261 	struct amdgpu_vm *vm = bo_va->base.vm;
1262 	struct amdgpu_bo_va_mapping *mapping;
1263 	struct dma_fence **last_update;
1264 	dma_addr_t *pages_addr = NULL;
1265 	struct ttm_resource *mem;
1266 	struct amdgpu_sync sync;
1267 	bool flush_tlb = clear;
1268 	uint64_t vram_base;
1269 	uint64_t flags;
1270 	bool uncached;
1271 	int r;
1272 
1273 	amdgpu_sync_create(&sync);
1274 	if (clear) {
1275 		mem = NULL;
1276 
1277 		/* Implicitly sync to command submissions in the same VM before
1278 		 * unmapping.
1279 		 */
1280 		r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1281 				     AMDGPU_SYNC_EQ_OWNER, vm);
1282 		if (r)
1283 			goto error_free;
1284 		if (bo) {
1285 			r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
1286 			if (r)
1287 				goto error_free;
1288 		}
1289 	} else if (!bo) {
1290 		mem = NULL;
1291 
1292 		/* PRT map operations don't need to sync to anything. */
1293 
1294 	} else {
1295 		struct drm_gem_object *obj = &bo->tbo.base;
1296 
1297 		if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
1298 			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1299 			struct drm_gem_object *gobj = dma_buf->priv;
1300 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1301 
1302 			if (abo->tbo.resource &&
1303 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1304 				bo = gem_to_amdgpu_bo(gobj);
1305 		}
1306 		mem = bo->tbo.resource;
1307 		if (mem && (mem->mem_type == TTM_PL_TT ||
1308 			    mem->mem_type == AMDGPU_PL_PREEMPT))
1309 			pages_addr = bo->tbo.ttm->dma_address;
1310 
1311 		/* Implicitly sync to moving fences before mapping anything */
1312 		r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1313 				     AMDGPU_SYNC_EXPLICIT, vm);
1314 		if (r)
1315 			goto error_free;
1316 	}
1317 
1318 	if (bo) {
1319 		struct amdgpu_device *bo_adev;
1320 
1321 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1322 
1323 		if (amdgpu_bo_encrypted(bo))
1324 			flags |= AMDGPU_PTE_TMZ;
1325 
1326 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1327 		vram_base = bo_adev->vm_manager.vram_base_offset;
1328 		uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1329 	} else {
1330 		flags = 0x0;
1331 		vram_base = 0;
1332 		uncached = false;
1333 	}
1334 
1335 	if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1336 		last_update = &vm->last_update;
1337 	else
1338 		last_update = &bo_va->last_pt_update;
1339 
1340 	if (!clear && bo_va->base.moved) {
1341 		flush_tlb = true;
1342 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1343 
1344 	} else if (bo_va->cleared != clear) {
1345 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1346 	}
1347 
1348 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1349 		uint64_t update_flags = flags;
1350 
1351 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1352 		 * but in case of something, we filter the flags in first place
1353 		 */
1354 		if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
1355 			update_flags &= ~AMDGPU_PTE_READABLE;
1356 		if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
1357 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1358 
1359 		/* Apply ASIC specific mapping flags */
1360 		amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
1361 				      &update_flags);
1362 
1363 		trace_amdgpu_vm_bo_update(mapping);
1364 
1365 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1366 					   !uncached, &sync, mapping->start,
1367 					   mapping->last, update_flags,
1368 					   mapping->offset, vram_base, mem,
1369 					   pages_addr, last_update);
1370 		if (r)
1371 			goto error_free;
1372 	}
1373 
1374 	/* If the BO is not in its preferred location add it back to
1375 	 * the evicted list so that it gets validated again on the
1376 	 * next command submission.
1377 	 */
1378 	if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1379 		if (bo->tbo.resource &&
1380 		    !(bo->preferred_domains &
1381 		      amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1382 			amdgpu_vm_bo_evicted(&bo_va->base);
1383 		else
1384 			amdgpu_vm_bo_idle(&bo_va->base);
1385 	} else {
1386 		amdgpu_vm_bo_done(&bo_va->base);
1387 	}
1388 
1389 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1390 	bo_va->cleared = clear;
1391 	bo_va->base.moved = false;
1392 
1393 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1394 		list_for_each_entry(mapping, &bo_va->valids, list)
1395 			trace_amdgpu_vm_bo_mapping(mapping);
1396 	}
1397 
1398 error_free:
1399 	amdgpu_sync_free(&sync);
1400 	return r;
1401 }
1402 
1403 /**
1404  * amdgpu_vm_update_prt_state - update the global PRT state
1405  *
1406  * @adev: amdgpu_device pointer
1407  */
1408 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1409 {
1410 	unsigned long flags;
1411 	bool enable;
1412 
1413 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1414 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1415 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1416 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1417 }
1418 
1419 /**
1420  * amdgpu_vm_prt_get - add a PRT user
1421  *
1422  * @adev: amdgpu_device pointer
1423  */
1424 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1425 {
1426 	if (!adev->gmc.gmc_funcs->set_prt)
1427 		return;
1428 
1429 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1430 		amdgpu_vm_update_prt_state(adev);
1431 }
1432 
1433 /**
1434  * amdgpu_vm_prt_put - drop a PRT user
1435  *
1436  * @adev: amdgpu_device pointer
1437  */
1438 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1439 {
1440 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1441 		amdgpu_vm_update_prt_state(adev);
1442 }
1443 
1444 /**
1445  * amdgpu_vm_prt_cb - callback for updating the PRT status
1446  *
1447  * @fence: fence for the callback
1448  * @_cb: the callback function
1449  */
1450 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1451 {
1452 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1453 
1454 	amdgpu_vm_prt_put(cb->adev);
1455 	kfree(cb);
1456 }
1457 
1458 /**
1459  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1460  *
1461  * @adev: amdgpu_device pointer
1462  * @fence: fence for the callback
1463  */
1464 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1465 				 struct dma_fence *fence)
1466 {
1467 	struct amdgpu_prt_cb *cb;
1468 
1469 	if (!adev->gmc.gmc_funcs->set_prt)
1470 		return;
1471 
1472 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1473 	if (!cb) {
1474 		/* Last resort when we are OOM */
1475 		if (fence)
1476 			dma_fence_wait(fence, false);
1477 
1478 		amdgpu_vm_prt_put(adev);
1479 	} else {
1480 		cb->adev = adev;
1481 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1482 						     amdgpu_vm_prt_cb))
1483 			amdgpu_vm_prt_cb(fence, &cb->cb);
1484 	}
1485 }
1486 
1487 /**
1488  * amdgpu_vm_free_mapping - free a mapping
1489  *
1490  * @adev: amdgpu_device pointer
1491  * @vm: requested vm
1492  * @mapping: mapping to be freed
1493  * @fence: fence of the unmap operation
1494  *
1495  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1496  */
1497 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1498 				   struct amdgpu_vm *vm,
1499 				   struct amdgpu_bo_va_mapping *mapping,
1500 				   struct dma_fence *fence)
1501 {
1502 	if (mapping->flags & AMDGPU_VM_PAGE_PRT)
1503 		amdgpu_vm_add_prt_cb(adev, fence);
1504 	kfree(mapping);
1505 }
1506 
1507 /**
1508  * amdgpu_vm_prt_fini - finish all prt mappings
1509  *
1510  * @adev: amdgpu_device pointer
1511  * @vm: requested vm
1512  *
1513  * Register a cleanup callback to disable PRT support after VM dies.
1514  */
1515 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1516 {
1517 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1518 	struct dma_resv_iter cursor;
1519 	struct dma_fence *fence;
1520 
1521 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1522 		/* Add a callback for each fence in the reservation object */
1523 		amdgpu_vm_prt_get(adev);
1524 		amdgpu_vm_add_prt_cb(adev, fence);
1525 	}
1526 }
1527 
1528 /**
1529  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1530  *
1531  * @adev: amdgpu_device pointer
1532  * @vm: requested vm
1533  * @fence: optional resulting fence (unchanged if no work needed to be done
1534  * or if an error occurred)
1535  *
1536  * Make sure all freed BOs are cleared in the PT.
1537  * PTs have to be reserved and mutex must be locked!
1538  *
1539  * Returns:
1540  * 0 for success.
1541  *
1542  */
1543 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1544 			  struct amdgpu_vm *vm,
1545 			  struct dma_fence **fence)
1546 {
1547 	struct amdgpu_bo_va_mapping *mapping;
1548 	struct dma_fence *f = NULL;
1549 	struct amdgpu_sync sync;
1550 	int r;
1551 
1552 
1553 	/*
1554 	 * Implicitly sync to command submissions in the same VM before
1555 	 * unmapping.
1556 	 */
1557 	amdgpu_sync_create(&sync);
1558 	r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1559 			     AMDGPU_SYNC_EQ_OWNER, vm);
1560 	if (r)
1561 		goto error_free;
1562 
1563 	while (!list_empty(&vm->freed)) {
1564 		mapping = list_first_entry(&vm->freed,
1565 			struct amdgpu_bo_va_mapping, list);
1566 		list_del(&mapping->list);
1567 
1568 		r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1569 					   &sync, mapping->start, mapping->last,
1570 					   0, 0, 0, NULL, NULL, &f);
1571 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1572 		if (r) {
1573 			dma_fence_put(f);
1574 			goto error_free;
1575 		}
1576 	}
1577 
1578 	if (fence && f) {
1579 		dma_fence_put(*fence);
1580 		*fence = f;
1581 	} else {
1582 		dma_fence_put(f);
1583 	}
1584 
1585 error_free:
1586 	amdgpu_sync_free(&sync);
1587 	return r;
1588 
1589 }
1590 
1591 /**
1592  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1593  *
1594  * @adev: amdgpu_device pointer
1595  * @vm: requested vm
1596  * @ticket: optional reservation ticket used to reserve the VM
1597  *
1598  * Make sure all BOs which are moved are updated in the PTs.
1599  *
1600  * Returns:
1601  * 0 for success.
1602  *
1603  * PTs have to be reserved!
1604  */
1605 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1606 			   struct amdgpu_vm *vm,
1607 			   struct ww_acquire_ctx *ticket)
1608 {
1609 	struct amdgpu_bo_va *bo_va;
1610 	struct dma_resv *resv;
1611 	bool clear, unlock;
1612 	int r;
1613 
1614 	spin_lock(&vm->status_lock);
1615 	while (!list_empty(&vm->moved)) {
1616 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1617 					 base.vm_status);
1618 		spin_unlock(&vm->status_lock);
1619 
1620 		/* Per VM BOs never need to bo cleared in the page tables */
1621 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1622 		if (r)
1623 			return r;
1624 		spin_lock(&vm->status_lock);
1625 	}
1626 
1627 	while (!list_empty(&vm->invalidated)) {
1628 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1629 					 base.vm_status);
1630 		resv = bo_va->base.bo->tbo.base.resv;
1631 		spin_unlock(&vm->status_lock);
1632 
1633 		/* Try to reserve the BO to avoid clearing its ptes */
1634 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
1635 			clear = false;
1636 			unlock = true;
1637 		/* The caller is already holding the reservation lock */
1638 		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1639 			clear = false;
1640 			unlock = false;
1641 		/* Somebody else is using the BO right now */
1642 		} else {
1643 			clear = true;
1644 			unlock = false;
1645 		}
1646 
1647 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1648 
1649 		if (unlock)
1650 			dma_resv_unlock(resv);
1651 		if (r)
1652 			return r;
1653 
1654 		/* Remember evicted DMABuf imports in compute VMs for later
1655 		 * validation
1656 		 */
1657 		if (vm->is_compute_context &&
1658 		    drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
1659 		    (!bo_va->base.bo->tbo.resource ||
1660 		     bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1661 			amdgpu_vm_bo_evicted_user(&bo_va->base);
1662 
1663 		spin_lock(&vm->status_lock);
1664 	}
1665 	spin_unlock(&vm->status_lock);
1666 
1667 	return 0;
1668 }
1669 
1670 /**
1671  * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1672  *
1673  * @adev: amdgpu_device pointer
1674  * @vm: requested vm
1675  * @flush_type: flush type
1676  * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1677  *
1678  * Flush TLB if needed for a compute VM.
1679  *
1680  * Returns:
1681  * 0 for success.
1682  */
1683 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1684 				struct amdgpu_vm *vm,
1685 				uint32_t flush_type,
1686 				uint32_t xcc_mask)
1687 {
1688 	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1689 	bool all_hub = false;
1690 	int xcc = 0, r = 0;
1691 
1692 	WARN_ON_ONCE(!vm->is_compute_context);
1693 
1694 	/*
1695 	 * It can be that we race and lose here, but that is extremely unlikely
1696 	 * and the worst thing which could happen is that we flush the changes
1697 	 * into the TLB once more which is harmless.
1698 	 */
1699 	if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1700 		return 0;
1701 
1702 	if (adev->family == AMDGPU_FAMILY_AI ||
1703 	    adev->family == AMDGPU_FAMILY_RV)
1704 		all_hub = true;
1705 
1706 	for_each_inst(xcc, xcc_mask) {
1707 		r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1708 						   all_hub, xcc);
1709 		if (r)
1710 			break;
1711 	}
1712 	return r;
1713 }
1714 
1715 /**
1716  * amdgpu_vm_bo_add - add a bo to a specific vm
1717  *
1718  * @adev: amdgpu_device pointer
1719  * @vm: requested vm
1720  * @bo: amdgpu buffer object
1721  *
1722  * Add @bo into the requested vm.
1723  * Add @bo to the list of bos associated with the vm
1724  *
1725  * Returns:
1726  * Newly added bo_va or NULL for failure
1727  *
1728  * Object has to be reserved!
1729  */
1730 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1731 				      struct amdgpu_vm *vm,
1732 				      struct amdgpu_bo *bo)
1733 {
1734 	struct amdgpu_bo_va *bo_va;
1735 
1736 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1737 	if (bo_va == NULL) {
1738 		return NULL;
1739 	}
1740 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1741 
1742 	bo_va->ref_count = 1;
1743 	bo_va->last_pt_update = dma_fence_get_stub();
1744 	INIT_LIST_HEAD(&bo_va->valids);
1745 	INIT_LIST_HEAD(&bo_va->invalids);
1746 
1747 	if (!bo)
1748 		return bo_va;
1749 
1750 	dma_resv_assert_held(bo->tbo.base.resv);
1751 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1752 		bo_va->is_xgmi = true;
1753 		/* Power up XGMI if it can be potentially used */
1754 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1755 	}
1756 
1757 	return bo_va;
1758 }
1759 
1760 
1761 /**
1762  * amdgpu_vm_bo_insert_map - insert a new mapping
1763  *
1764  * @adev: amdgpu_device pointer
1765  * @bo_va: bo_va to store the address
1766  * @mapping: the mapping to insert
1767  *
1768  * Insert a new mapping into all structures.
1769  */
1770 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1771 				    struct amdgpu_bo_va *bo_va,
1772 				    struct amdgpu_bo_va_mapping *mapping)
1773 {
1774 	struct amdgpu_vm *vm = bo_va->base.vm;
1775 	struct amdgpu_bo *bo = bo_va->base.bo;
1776 
1777 	mapping->bo_va = bo_va;
1778 	list_add(&mapping->list, &bo_va->invalids);
1779 	amdgpu_vm_it_insert(mapping, &vm->va);
1780 
1781 	if (mapping->flags & AMDGPU_VM_PAGE_PRT)
1782 		amdgpu_vm_prt_get(adev);
1783 
1784 	if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1785 		amdgpu_vm_bo_moved(&bo_va->base);
1786 
1787 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1788 }
1789 
1790 /* Validate operation parameters to prevent potential abuse */
1791 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1792 					  struct amdgpu_bo *bo,
1793 					  uint64_t saddr,
1794 					  uint64_t offset,
1795 					  uint64_t size)
1796 {
1797 	uint64_t tmp, lpfn;
1798 
1799 	if (saddr & AMDGPU_GPU_PAGE_MASK
1800 	    || offset & AMDGPU_GPU_PAGE_MASK
1801 	    || size & AMDGPU_GPU_PAGE_MASK)
1802 		return -EINVAL;
1803 
1804 	if (check_add_overflow(saddr, size, &tmp)
1805 	    || check_add_overflow(offset, size, &tmp)
1806 	    || size == 0 /* which also leads to end < begin */)
1807 		return -EINVAL;
1808 
1809 	/* make sure object fit at this offset */
1810 	if (bo && offset + size > amdgpu_bo_size(bo))
1811 		return -EINVAL;
1812 
1813 	/* Ensure last pfn not exceed max_pfn */
1814 	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1815 	if (lpfn >= adev->vm_manager.max_pfn)
1816 		return -EINVAL;
1817 
1818 	return 0;
1819 }
1820 
1821 /**
1822  * amdgpu_vm_bo_map - map bo inside a vm
1823  *
1824  * @adev: amdgpu_device pointer
1825  * @bo_va: bo_va to store the address
1826  * @saddr: where to map the BO
1827  * @offset: requested offset in the BO
1828  * @size: BO size in bytes
1829  * @flags: attributes of pages (read/write/valid/etc.)
1830  *
1831  * Add a mapping of the BO at the specefied addr into the VM.
1832  *
1833  * Returns:
1834  * 0 for success, error for failure.
1835  *
1836  * Object has to be reserved and unreserved outside!
1837  */
1838 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1839 		     struct amdgpu_bo_va *bo_va,
1840 		     uint64_t saddr, uint64_t offset,
1841 		     uint64_t size, uint32_t flags)
1842 {
1843 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1844 	struct amdgpu_bo *bo = bo_va->base.bo;
1845 	struct amdgpu_vm *vm = bo_va->base.vm;
1846 	uint64_t eaddr;
1847 	int r;
1848 
1849 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1850 	if (r)
1851 		return r;
1852 
1853 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1854 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1855 
1856 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1857 	if (tmp) {
1858 		/* bo and tmp overlap, invalid addr */
1859 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1860 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1861 			tmp->start, tmp->last + 1);
1862 		return -EINVAL;
1863 	}
1864 
1865 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1866 	if (!mapping)
1867 		return -ENOMEM;
1868 
1869 	mapping->start = saddr;
1870 	mapping->last = eaddr;
1871 	mapping->offset = offset;
1872 	mapping->flags = flags;
1873 
1874 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1875 
1876 	return 0;
1877 }
1878 
1879 /**
1880  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1881  *
1882  * @adev: amdgpu_device pointer
1883  * @bo_va: bo_va to store the address
1884  * @saddr: where to map the BO
1885  * @offset: requested offset in the BO
1886  * @size: BO size in bytes
1887  * @flags: attributes of pages (read/write/valid/etc.)
1888  *
1889  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1890  * mappings as we do so.
1891  *
1892  * Returns:
1893  * 0 for success, error for failure.
1894  *
1895  * Object has to be reserved and unreserved outside!
1896  */
1897 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1898 			     struct amdgpu_bo_va *bo_va,
1899 			     uint64_t saddr, uint64_t offset,
1900 			     uint64_t size, uint32_t flags)
1901 {
1902 	struct amdgpu_bo_va_mapping *mapping;
1903 	struct amdgpu_bo *bo = bo_va->base.bo;
1904 	uint64_t eaddr;
1905 	int r;
1906 
1907 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1908 	if (r)
1909 		return r;
1910 
1911 	/* Allocate all the needed memory */
1912 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1913 	if (!mapping)
1914 		return -ENOMEM;
1915 
1916 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1917 	if (r) {
1918 		kfree(mapping);
1919 		return r;
1920 	}
1921 
1922 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1923 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1924 
1925 	mapping->start = saddr;
1926 	mapping->last = eaddr;
1927 	mapping->offset = offset;
1928 	mapping->flags = flags;
1929 
1930 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1931 
1932 	return 0;
1933 }
1934 
1935 /**
1936  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1937  *
1938  * @adev: amdgpu_device pointer
1939  * @bo_va: bo_va to remove the address from
1940  * @saddr: where to the BO is mapped
1941  *
1942  * Remove a mapping of the BO at the specefied addr from the VM.
1943  *
1944  * Returns:
1945  * 0 for success, error for failure.
1946  *
1947  * Object has to be reserved and unreserved outside!
1948  */
1949 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1950 		       struct amdgpu_bo_va *bo_va,
1951 		       uint64_t saddr)
1952 {
1953 	struct amdgpu_bo_va_mapping *mapping;
1954 	struct amdgpu_vm *vm = bo_va->base.vm;
1955 	bool valid = true;
1956 	int r;
1957 
1958 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1959 
1960 	list_for_each_entry(mapping, &bo_va->valids, list) {
1961 		if (mapping->start == saddr)
1962 			break;
1963 	}
1964 
1965 	if (&mapping->list == &bo_va->valids) {
1966 		valid = false;
1967 
1968 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1969 			if (mapping->start == saddr)
1970 				break;
1971 		}
1972 
1973 		if (&mapping->list == &bo_va->invalids)
1974 			return -ENOENT;
1975 	}
1976 
1977 	/* It's unlikely to happen that the mapping userq hasn't been idled
1978 	 * during user requests GEM unmap IOCTL except for forcing the unmap
1979 	 * from user space.
1980 	 */
1981 	if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) {
1982 		r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
1983 		if (unlikely(r == -EBUSY))
1984 			dev_warn_once(adev->dev,
1985 				      "Attempt to unmap an active userq buffer\n");
1986 	}
1987 
1988 	list_del(&mapping->list);
1989 	amdgpu_vm_it_remove(mapping, &vm->va);
1990 	mapping->bo_va = NULL;
1991 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1992 
1993 	if (valid)
1994 		list_add(&mapping->list, &vm->freed);
1995 	else
1996 		amdgpu_vm_free_mapping(adev, vm, mapping,
1997 				       bo_va->last_pt_update);
1998 
1999 	return 0;
2000 }
2001 
2002 /**
2003  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2004  *
2005  * @adev: amdgpu_device pointer
2006  * @vm: VM structure to use
2007  * @saddr: start of the range
2008  * @size: size of the range
2009  *
2010  * Remove all mappings in a range, split them as appropriate.
2011  *
2012  * Returns:
2013  * 0 for success, error for failure.
2014  */
2015 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2016 				struct amdgpu_vm *vm,
2017 				uint64_t saddr, uint64_t size)
2018 {
2019 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2020 	LIST_HEAD(removed);
2021 	uint64_t eaddr;
2022 	int r;
2023 
2024 	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
2025 	if (r)
2026 		return r;
2027 
2028 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2029 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
2030 
2031 	/* Allocate all the needed memory */
2032 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2033 	if (!before)
2034 		return -ENOMEM;
2035 	INIT_LIST_HEAD(&before->list);
2036 
2037 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2038 	if (!after) {
2039 		kfree(before);
2040 		return -ENOMEM;
2041 	}
2042 	INIT_LIST_HEAD(&after->list);
2043 
2044 	/* Now gather all removed mappings */
2045 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2046 	while (tmp) {
2047 		/* Remember mapping split at the start */
2048 		if (tmp->start < saddr) {
2049 			before->start = tmp->start;
2050 			before->last = saddr - 1;
2051 			before->offset = tmp->offset;
2052 			before->flags = tmp->flags;
2053 			before->bo_va = tmp->bo_va;
2054 			list_add(&before->list, &tmp->bo_va->invalids);
2055 		}
2056 
2057 		/* Remember mapping split at the end */
2058 		if (tmp->last > eaddr) {
2059 			after->start = eaddr + 1;
2060 			after->last = tmp->last;
2061 			after->offset = tmp->offset;
2062 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2063 			after->flags = tmp->flags;
2064 			after->bo_va = tmp->bo_va;
2065 			list_add(&after->list, &tmp->bo_va->invalids);
2066 		}
2067 
2068 		list_del(&tmp->list);
2069 		list_add(&tmp->list, &removed);
2070 
2071 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2072 	}
2073 
2074 	/* And free them up */
2075 	list_for_each_entry_safe(tmp, next, &removed, list) {
2076 		amdgpu_vm_it_remove(tmp, &vm->va);
2077 		list_del(&tmp->list);
2078 
2079 		if (tmp->start < saddr)
2080 		    tmp->start = saddr;
2081 		if (tmp->last > eaddr)
2082 		    tmp->last = eaddr;
2083 
2084 		tmp->bo_va = NULL;
2085 		list_add(&tmp->list, &vm->freed);
2086 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2087 	}
2088 
2089 	/* Insert partial mapping before the range */
2090 	if (!list_empty(&before->list)) {
2091 		struct amdgpu_bo *bo = before->bo_va->base.bo;
2092 
2093 		amdgpu_vm_it_insert(before, &vm->va);
2094 		if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
2095 			amdgpu_vm_prt_get(adev);
2096 
2097 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2098 		    !before->bo_va->base.moved)
2099 			amdgpu_vm_bo_moved(&before->bo_va->base);
2100 	} else {
2101 		kfree(before);
2102 	}
2103 
2104 	/* Insert partial mapping after the range */
2105 	if (!list_empty(&after->list)) {
2106 		struct amdgpu_bo *bo = after->bo_va->base.bo;
2107 
2108 		amdgpu_vm_it_insert(after, &vm->va);
2109 		if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
2110 			amdgpu_vm_prt_get(adev);
2111 
2112 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2113 		    !after->bo_va->base.moved)
2114 			amdgpu_vm_bo_moved(&after->bo_va->base);
2115 	} else {
2116 		kfree(after);
2117 	}
2118 
2119 	return 0;
2120 }
2121 
2122 /**
2123  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2124  *
2125  * @vm: the requested VM
2126  * @addr: the address
2127  *
2128  * Find a mapping by it's address.
2129  *
2130  * Returns:
2131  * The amdgpu_bo_va_mapping matching for addr or NULL
2132  *
2133  */
2134 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2135 							 uint64_t addr)
2136 {
2137 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2138 }
2139 
2140 /**
2141  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2142  *
2143  * @vm: the requested vm
2144  * @ticket: CS ticket
2145  *
2146  * Trace all mappings of BOs reserved during a command submission.
2147  */
2148 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2149 {
2150 	struct amdgpu_bo_va_mapping *mapping;
2151 
2152 	if (!trace_amdgpu_vm_bo_cs_enabled())
2153 		return;
2154 
2155 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2156 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2157 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2158 			struct amdgpu_bo *bo;
2159 
2160 			bo = mapping->bo_va->base.bo;
2161 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2162 			    ticket)
2163 				continue;
2164 		}
2165 
2166 		trace_amdgpu_vm_bo_cs(mapping);
2167 	}
2168 }
2169 
2170 /**
2171  * amdgpu_vm_bo_del - remove a bo from a specific vm
2172  *
2173  * @adev: amdgpu_device pointer
2174  * @bo_va: requested bo_va
2175  *
2176  * Remove @bo_va->bo from the requested vm.
2177  *
2178  * Object have to be reserved!
2179  */
2180 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2181 		      struct amdgpu_bo_va *bo_va)
2182 {
2183 	struct amdgpu_bo_va_mapping *mapping, *next;
2184 	struct amdgpu_bo *bo = bo_va->base.bo;
2185 	struct amdgpu_vm *vm = bo_va->base.vm;
2186 	struct amdgpu_vm_bo_base **base;
2187 
2188 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2189 
2190 	if (bo) {
2191 		dma_resv_assert_held(bo->tbo.base.resv);
2192 		if (amdgpu_vm_is_bo_always_valid(vm, bo))
2193 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
2194 
2195 		for (base = &bo_va->base.bo->vm_bo; *base;
2196 		     base = &(*base)->next) {
2197 			if (*base != &bo_va->base)
2198 				continue;
2199 
2200 			amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
2201 			*base = bo_va->base.next;
2202 			break;
2203 		}
2204 	}
2205 
2206 	spin_lock(&vm->status_lock);
2207 	list_del(&bo_va->base.vm_status);
2208 	spin_unlock(&vm->status_lock);
2209 
2210 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2211 		list_del(&mapping->list);
2212 		amdgpu_vm_it_remove(mapping, &vm->va);
2213 		mapping->bo_va = NULL;
2214 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2215 		list_add(&mapping->list, &vm->freed);
2216 	}
2217 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2218 		list_del(&mapping->list);
2219 		amdgpu_vm_it_remove(mapping, &vm->va);
2220 		amdgpu_vm_free_mapping(adev, vm, mapping,
2221 				       bo_va->last_pt_update);
2222 	}
2223 
2224 	dma_fence_put(bo_va->last_pt_update);
2225 
2226 	if (bo && bo_va->is_xgmi)
2227 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2228 
2229 	kfree(bo_va);
2230 }
2231 
2232 /**
2233  * amdgpu_vm_evictable - check if we can evict a VM
2234  *
2235  * @bo: A page table of the VM.
2236  *
2237  * Check if it is possible to evict a VM.
2238  */
2239 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2240 {
2241 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2242 
2243 	/* Page tables of a destroyed VM can go away immediately */
2244 	if (!bo_base || !bo_base->vm)
2245 		return true;
2246 
2247 	/* Don't evict VM page tables while they are busy */
2248 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2249 		return false;
2250 
2251 	/* Try to block ongoing updates */
2252 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2253 		return false;
2254 
2255 	/* Don't evict VM page tables while they are updated */
2256 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2257 		amdgpu_vm_eviction_unlock(bo_base->vm);
2258 		return false;
2259 	}
2260 
2261 	bo_base->vm->evicting = true;
2262 	amdgpu_vm_eviction_unlock(bo_base->vm);
2263 	return true;
2264 }
2265 
2266 /**
2267  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2268  *
2269  * @bo: amdgpu buffer object
2270  * @evicted: is the BO evicted
2271  *
2272  * Mark @bo as invalid.
2273  */
2274 void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
2275 {
2276 	struct amdgpu_vm_bo_base *bo_base;
2277 
2278 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2279 		struct amdgpu_vm *vm = bo_base->vm;
2280 
2281 		if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2282 			amdgpu_vm_bo_evicted(bo_base);
2283 			continue;
2284 		}
2285 
2286 		if (bo_base->moved)
2287 			continue;
2288 		bo_base->moved = true;
2289 
2290 		if (bo->tbo.type == ttm_bo_type_kernel)
2291 			amdgpu_vm_bo_relocated(bo_base);
2292 		else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2293 			amdgpu_vm_bo_moved(bo_base);
2294 		else
2295 			amdgpu_vm_bo_invalidated(bo_base);
2296 	}
2297 }
2298 
2299 /**
2300  * amdgpu_vm_bo_move - handle BO move
2301  *
2302  * @bo: amdgpu buffer object
2303  * @new_mem: the new placement of the BO move
2304  * @evicted: is the BO evicted
2305  *
2306  * Update the memory stats for the new placement and mark @bo as invalid.
2307  */
2308 void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
2309 		       bool evicted)
2310 {
2311 	struct amdgpu_vm_bo_base *bo_base;
2312 
2313 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2314 		struct amdgpu_vm *vm = bo_base->vm;
2315 
2316 		spin_lock(&vm->status_lock);
2317 		amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
2318 		amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
2319 		spin_unlock(&vm->status_lock);
2320 	}
2321 
2322 	amdgpu_vm_bo_invalidate(bo, evicted);
2323 }
2324 
2325 /**
2326  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2327  *
2328  * @vm_size: VM size
2329  *
2330  * Returns:
2331  * VM page table as power of two
2332  */
2333 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2334 {
2335 	/* Total bits covered by PD + PTs */
2336 	unsigned bits = ilog2(vm_size) + 18;
2337 
2338 	/* Make sure the PD is 4K in size up to 8GB address space.
2339 	   Above that split equal between PD and PTs */
2340 	if (vm_size <= 8)
2341 		return (bits - 9);
2342 	else
2343 		return ((bits + 3) / 2);
2344 }
2345 
2346 /**
2347  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2348  *
2349  * @adev: amdgpu_device pointer
2350  * @min_vm_size: the minimum vm size in GB if it's set auto
2351  * @fragment_size_default: Default PTE fragment size
2352  * @max_level: max VMPT level
2353  * @max_bits: max address space size in bits
2354  *
2355  */
2356 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2357 			   uint32_t fragment_size_default, unsigned max_level,
2358 			   unsigned max_bits)
2359 {
2360 	unsigned int max_size = 1 << (max_bits - 30);
2361 	unsigned int vm_size;
2362 	uint64_t tmp;
2363 
2364 	/* adjust vm size first */
2365 	if (amdgpu_vm_size != -1) {
2366 		vm_size = amdgpu_vm_size;
2367 		if (vm_size > max_size) {
2368 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2369 				 amdgpu_vm_size, max_size);
2370 			vm_size = max_size;
2371 		}
2372 	} else {
2373 		struct sysinfo si;
2374 		unsigned int phys_ram_gb;
2375 
2376 		/* Optimal VM size depends on the amount of physical
2377 		 * RAM available. Underlying requirements and
2378 		 * assumptions:
2379 		 *
2380 		 *  - Need to map system memory and VRAM from all GPUs
2381 		 *     - VRAM from other GPUs not known here
2382 		 *     - Assume VRAM <= system memory
2383 		 *  - On GFX8 and older, VM space can be segmented for
2384 		 *    different MTYPEs
2385 		 *  - Need to allow room for fragmentation, guard pages etc.
2386 		 *
2387 		 * This adds up to a rough guess of system memory x3.
2388 		 * Round up to power of two to maximize the available
2389 		 * VM size with the given page table size.
2390 		 */
2391 		si_meminfo(&si);
2392 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2393 			       (1 << 30) - 1) >> 30;
2394 		vm_size = roundup_pow_of_two(
2395 			clamp(phys_ram_gb * 3, min_vm_size, max_size));
2396 	}
2397 
2398 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2399 
2400 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2401 	if (amdgpu_vm_block_size != -1)
2402 		tmp >>= amdgpu_vm_block_size - 9;
2403 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2404 	adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2405 	switch (adev->vm_manager.num_level) {
2406 	case 3:
2407 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2408 		break;
2409 	case 2:
2410 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2411 		break;
2412 	case 1:
2413 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2414 		break;
2415 	default:
2416 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2417 	}
2418 	/* block size depends on vm size and hw setup*/
2419 	if (amdgpu_vm_block_size != -1)
2420 		adev->vm_manager.block_size =
2421 			min((unsigned)amdgpu_vm_block_size, max_bits
2422 			    - AMDGPU_GPU_PAGE_SHIFT
2423 			    - 9 * adev->vm_manager.num_level);
2424 	else if (adev->vm_manager.num_level > 1)
2425 		adev->vm_manager.block_size = 9;
2426 	else
2427 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2428 
2429 	if (amdgpu_vm_fragment_size == -1)
2430 		adev->vm_manager.fragment_size = fragment_size_default;
2431 	else
2432 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2433 
2434 	dev_info(
2435 		adev->dev,
2436 		"vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2437 		vm_size, adev->vm_manager.num_level + 1,
2438 		adev->vm_manager.block_size, adev->vm_manager.fragment_size);
2439 }
2440 
2441 /**
2442  * amdgpu_vm_wait_idle - wait for the VM to become idle
2443  *
2444  * @vm: VM object to wait for
2445  * @timeout: timeout to wait for VM to become idle
2446  */
2447 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2448 {
2449 	timeout = drm_sched_entity_flush(&vm->immediate, timeout);
2450 	if (timeout <= 0)
2451 		return timeout;
2452 
2453 	return drm_sched_entity_flush(&vm->delayed, timeout);
2454 }
2455 
2456 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2457 {
2458 	struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2459 
2460 	kfree(ti);
2461 }
2462 
2463 static inline struct amdgpu_vm *
2464 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2465 {
2466 	struct amdgpu_vm *vm;
2467 	unsigned long flags;
2468 
2469 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2470 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2471 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2472 
2473 	return vm;
2474 }
2475 
2476 /**
2477  * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2478  *
2479  * @task_info: task_info struct under discussion.
2480  *
2481  * frees the vm task_info ptr at the last put
2482  */
2483 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2484 {
2485 	if (task_info)
2486 		kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2487 }
2488 
2489 /**
2490  * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2491  *
2492  * @vm: VM to get info from
2493  *
2494  * Returns the reference counted task_info structure, which must be
2495  * referenced down with amdgpu_vm_put_task_info.
2496  */
2497 struct amdgpu_task_info *
2498 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2499 {
2500 	struct amdgpu_task_info *ti = NULL;
2501 
2502 	if (vm) {
2503 		ti = vm->task_info;
2504 		kref_get(&vm->task_info->refcount);
2505 	}
2506 
2507 	return ti;
2508 }
2509 
2510 /**
2511  * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2512  *
2513  * @adev: drm device pointer
2514  * @pasid: PASID identifier for VM
2515  *
2516  * Returns the reference counted task_info structure, which must be
2517  * referenced down with amdgpu_vm_put_task_info.
2518  */
2519 struct amdgpu_task_info *
2520 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2521 {
2522 	return amdgpu_vm_get_task_info_vm(
2523 			amdgpu_vm_get_vm_from_pasid(adev, pasid));
2524 }
2525 
2526 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2527 {
2528 	vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2529 	if (!vm->task_info)
2530 		return -ENOMEM;
2531 
2532 	kref_init(&vm->task_info->refcount);
2533 	return 0;
2534 }
2535 
2536 /**
2537  * amdgpu_vm_set_task_info - Sets VMs task info.
2538  *
2539  * @vm: vm for which to set the info
2540  */
2541 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2542 {
2543 	if (!vm->task_info)
2544 		return;
2545 
2546 	if (vm->task_info->task.pid == current->pid)
2547 		return;
2548 
2549 	vm->task_info->task.pid = current->pid;
2550 	get_task_comm(vm->task_info->task.comm, current);
2551 
2552 	if (current->group_leader->mm != current->mm)
2553 		return;
2554 
2555 	vm->task_info->tgid = current->group_leader->pid;
2556 	get_task_comm(vm->task_info->process_name, current->group_leader);
2557 }
2558 
2559 /**
2560  * amdgpu_vm_init - initialize a vm instance
2561  *
2562  * @adev: amdgpu_device pointer
2563  * @vm: requested vm
2564  * @xcp_id: GPU partition selection id
2565  * @pasid: the pasid the VM is using on this GPU
2566  *
2567  * Init @vm fields.
2568  *
2569  * Returns:
2570  * 0 for success, error for failure.
2571  */
2572 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2573 		   int32_t xcp_id, uint32_t pasid)
2574 {
2575 	struct amdgpu_bo *root_bo;
2576 	struct amdgpu_bo_vm *root;
2577 	int r, i;
2578 
2579 	vm->va = RB_ROOT_CACHED;
2580 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2581 		vm->reserved_vmid[i] = NULL;
2582 	INIT_LIST_HEAD(&vm->evicted);
2583 	INIT_LIST_HEAD(&vm->evicted_user);
2584 	INIT_LIST_HEAD(&vm->relocated);
2585 	INIT_LIST_HEAD(&vm->moved);
2586 	INIT_LIST_HEAD(&vm->idle);
2587 	INIT_LIST_HEAD(&vm->invalidated);
2588 	spin_lock_init(&vm->status_lock);
2589 	INIT_LIST_HEAD(&vm->freed);
2590 	INIT_LIST_HEAD(&vm->done);
2591 	INIT_KFIFO(vm->faults);
2592 
2593 	r = amdgpu_vm_init_entities(adev, vm);
2594 	if (r)
2595 		return r;
2596 
2597 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2598 
2599 	vm->is_compute_context = false;
2600 
2601 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2602 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2603 
2604 	dev_dbg(adev->dev, "VM update mode is %s\n",
2605 		vm->use_cpu_for_update ? "CPU" : "SDMA");
2606 	WARN_ONCE((vm->use_cpu_for_update &&
2607 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2608 		  "CPU update of VM recommended only for large BAR system\n");
2609 
2610 	if (vm->use_cpu_for_update)
2611 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2612 	else
2613 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2614 
2615 	vm->last_update = dma_fence_get_stub();
2616 	vm->last_unlocked = dma_fence_get_stub();
2617 	vm->last_tlb_flush = dma_fence_get_stub();
2618 	vm->generation = amdgpu_vm_generation(adev, NULL);
2619 
2620 	mutex_init(&vm->eviction_lock);
2621 	vm->evicting = false;
2622 	vm->tlb_fence_context = dma_fence_context_alloc(1);
2623 
2624 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2625 				false, &root, xcp_id);
2626 	if (r)
2627 		goto error_free_delayed;
2628 
2629 	root_bo = amdgpu_bo_ref(&root->bo);
2630 	r = amdgpu_bo_reserve(root_bo, true);
2631 	if (r) {
2632 		amdgpu_bo_unref(&root_bo);
2633 		goto error_free_delayed;
2634 	}
2635 
2636 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2637 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2638 	if (r)
2639 		goto error_free_root;
2640 
2641 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2642 	if (r)
2643 		goto error_free_root;
2644 
2645 	r = amdgpu_vm_create_task_info(vm);
2646 	if (r)
2647 		dev_dbg(adev->dev, "Failed to create task info for VM\n");
2648 
2649 	/* Store new PASID in XArray (if non-zero) */
2650 	if (pasid != 0) {
2651 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL));
2652 		if (r < 0)
2653 			goto error_free_root;
2654 
2655 		vm->pasid = pasid;
2656 	}
2657 
2658 	amdgpu_bo_unreserve(vm->root.bo);
2659 	amdgpu_bo_unref(&root_bo);
2660 
2661 	return 0;
2662 
2663 error_free_root:
2664 	/* If PASID was partially set, erase it from XArray before failing */
2665 	if (vm->pasid != 0) {
2666 		xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
2667 		vm->pasid = 0;
2668 	}
2669 	amdgpu_vm_pt_free_root(adev, vm);
2670 	amdgpu_bo_unreserve(vm->root.bo);
2671 	amdgpu_bo_unref(&root_bo);
2672 
2673 error_free_delayed:
2674 	dma_fence_put(vm->last_tlb_flush);
2675 	dma_fence_put(vm->last_unlocked);
2676 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2677 	amdgpu_vm_fini_entities(vm);
2678 
2679 	return r;
2680 }
2681 
2682 /**
2683  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2684  *
2685  * @adev: amdgpu_device pointer
2686  * @vm: requested vm
2687  *
2688  * This only works on GFX VMs that don't have any BOs added and no
2689  * page tables allocated yet.
2690  *
2691  * Changes the following VM parameters:
2692  * - use_cpu_for_update
2693  * - pte_supports_ats
2694  *
2695  * Reinitializes the page directory to reflect the changed ATS
2696  * setting.
2697  *
2698  * Returns:
2699  * 0 for success, -errno for errors.
2700  */
2701 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2702 {
2703 	int r;
2704 
2705 	r = amdgpu_bo_reserve(vm->root.bo, true);
2706 	if (r)
2707 		return r;
2708 
2709 	/* Update VM state */
2710 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2711 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2712 	dev_dbg(adev->dev, "VM update mode is %s\n",
2713 		vm->use_cpu_for_update ? "CPU" : "SDMA");
2714 	WARN_ONCE((vm->use_cpu_for_update &&
2715 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2716 		  "CPU update of VM recommended only for large BAR system\n");
2717 
2718 	if (vm->use_cpu_for_update) {
2719 		/* Sync with last SDMA update/clear before switching to CPU */
2720 		r = amdgpu_bo_sync_wait(vm->root.bo,
2721 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2722 		if (r)
2723 			goto unreserve_bo;
2724 
2725 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2726 		r = amdgpu_vm_pt_map_tables(adev, vm);
2727 		if (r)
2728 			goto unreserve_bo;
2729 
2730 	} else {
2731 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2732 	}
2733 
2734 	dma_fence_put(vm->last_update);
2735 	vm->last_update = dma_fence_get_stub();
2736 	vm->is_compute_context = true;
2737 
2738 unreserve_bo:
2739 	amdgpu_bo_unreserve(vm->root.bo);
2740 	return r;
2741 }
2742 
2743 static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
2744 {
2745 	for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
2746 		if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
2747 		      vm->stats[i].evicted == 0))
2748 			return false;
2749 	}
2750 	return true;
2751 }
2752 
2753 /**
2754  * amdgpu_vm_fini - tear down a vm instance
2755  *
2756  * @adev: amdgpu_device pointer
2757  * @vm: requested vm
2758  *
2759  * Tear down @vm.
2760  * Unbind the VM and remove all bos from the vm bo list
2761  */
2762 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2763 {
2764 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2765 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2766 	struct amdgpu_bo *root;
2767 	unsigned long flags;
2768 	int i;
2769 
2770 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2771 
2772 	root = amdgpu_bo_ref(vm->root.bo);
2773 	amdgpu_bo_reserve(root, true);
2774 	/* Remove PASID mapping before destroying VM */
2775 	if (vm->pasid != 0) {
2776 		xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
2777 		vm->pasid = 0;
2778 	}
2779 	dma_fence_wait(vm->last_unlocked, false);
2780 	dma_fence_put(vm->last_unlocked);
2781 	dma_fence_wait(vm->last_tlb_flush, false);
2782 	/* Make sure that all fence callbacks have completed */
2783 	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2784 	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2785 	dma_fence_put(vm->last_tlb_flush);
2786 
2787 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2788 		if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
2789 			amdgpu_vm_prt_fini(adev, vm);
2790 			prt_fini_needed = false;
2791 		}
2792 
2793 		list_del(&mapping->list);
2794 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2795 	}
2796 
2797 	amdgpu_vm_pt_free_root(adev, vm);
2798 	amdgpu_bo_unreserve(root);
2799 	amdgpu_bo_unref(&root);
2800 	WARN_ON(vm->root.bo);
2801 
2802 	amdgpu_vm_fini_entities(vm);
2803 
2804 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2805 		dev_err(adev->dev, "still active bo inside vm\n");
2806 	}
2807 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2808 					     &vm->va.rb_root, rb) {
2809 		/* Don't remove the mapping here, we don't want to trigger a
2810 		 * rebalance and the tree is about to be destroyed anyway.
2811 		 */
2812 		list_del(&mapping->list);
2813 		kfree(mapping);
2814 	}
2815 
2816 	dma_fence_put(vm->last_update);
2817 
2818 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2819 		amdgpu_vmid_free_reserved(adev, vm, i);
2820 	}
2821 
2822 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2823 
2824 	if (!amdgpu_vm_stats_is_zero(vm)) {
2825 		struct amdgpu_task_info *ti = vm->task_info;
2826 
2827 		dev_warn(adev->dev,
2828 			 "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
2829 			 ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
2830 	}
2831 
2832 	amdgpu_vm_put_task_info(vm->task_info);
2833 }
2834 
2835 /**
2836  * amdgpu_vm_manager_init - init the VM manager
2837  *
2838  * @adev: amdgpu_device pointer
2839  *
2840  * Initialize the VM manager structures
2841  */
2842 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2843 {
2844 	unsigned i;
2845 
2846 	/* Concurrent flushes are only possible starting with Vega10 and
2847 	 * are broken on Navi10 and Navi14.
2848 	 */
2849 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2850 					      adev->asic_type == CHIP_NAVI10 ||
2851 					      adev->asic_type == CHIP_NAVI14);
2852 	amdgpu_vmid_mgr_init(adev);
2853 
2854 	adev->vm_manager.fence_context =
2855 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2856 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2857 		adev->vm_manager.seqno[i] = 0;
2858 
2859 	spin_lock_init(&adev->vm_manager.prt_lock);
2860 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2861 
2862 	/* If not overridden by the user, by default, only in large BAR systems
2863 	 * Compute VM tables will be updated by CPU
2864 	 */
2865 #ifdef CONFIG_X86_64
2866 	if (amdgpu_vm_update_mode == -1) {
2867 		/* For asic with VF MMIO access protection
2868 		 * avoid using CPU for VM table updates
2869 		 */
2870 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2871 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2872 			adev->vm_manager.vm_update_mode =
2873 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2874 		else
2875 			adev->vm_manager.vm_update_mode = 0;
2876 	} else
2877 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2878 #else
2879 	adev->vm_manager.vm_update_mode = 0;
2880 #endif
2881 
2882 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2883 }
2884 
2885 /**
2886  * amdgpu_vm_manager_fini - cleanup VM manager
2887  *
2888  * @adev: amdgpu_device pointer
2889  *
2890  * Cleanup the VM manager and free resources.
2891  */
2892 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2893 {
2894 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2895 	xa_destroy(&adev->vm_manager.pasids);
2896 
2897 	amdgpu_vmid_mgr_fini(adev);
2898 }
2899 
2900 /**
2901  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2902  *
2903  * @dev: drm device pointer
2904  * @data: drm_amdgpu_vm
2905  * @filp: drm file pointer
2906  *
2907  * Returns:
2908  * 0 for success, -errno for errors.
2909  */
2910 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2911 {
2912 	union drm_amdgpu_vm *args = data;
2913 	struct amdgpu_device *adev = drm_to_adev(dev);
2914 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2915 	struct amdgpu_vm *vm = &fpriv->vm;
2916 
2917 	/* No valid flags defined yet */
2918 	if (args->in.flags)
2919 		return -EINVAL;
2920 
2921 	switch (args->in.op) {
2922 	case AMDGPU_VM_OP_RESERVE_VMID:
2923 		/* We only have requirement to reserve vmid from gfxhub */
2924 		amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
2925 		break;
2926 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2927 		amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
2928 		break;
2929 	default:
2930 		return -EINVAL;
2931 	}
2932 
2933 	return 0;
2934 }
2935 
2936 /**
2937  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2938  * @adev: amdgpu device pointer
2939  * @pasid: PASID of the VM
2940  * @ts: Timestamp of the fault
2941  * @vmid: VMID, only used for GFX 9.4.3.
2942  * @node_id: Node_id received in IH cookie. Only applicable for
2943  *           GFX 9.4.3.
2944  * @addr: Address of the fault
2945  * @write_fault: true is write fault, false is read fault
2946  *
2947  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2948  * shouldn't be reported any more.
2949  */
2950 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2951 			    u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
2952 			    bool write_fault)
2953 {
2954 	bool is_compute_context = false;
2955 	struct amdgpu_bo *root;
2956 	unsigned long irqflags;
2957 	uint64_t value, flags;
2958 	struct amdgpu_vm *vm;
2959 	int r;
2960 
2961 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2962 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2963 	if (vm) {
2964 		root = amdgpu_bo_ref(vm->root.bo);
2965 		is_compute_context = vm->is_compute_context;
2966 	} else {
2967 		root = NULL;
2968 	}
2969 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2970 
2971 	if (!root)
2972 		return false;
2973 
2974 	addr /= AMDGPU_GPU_PAGE_SIZE;
2975 
2976 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2977 	    node_id, addr, ts, write_fault)) {
2978 		amdgpu_bo_unref(&root);
2979 		return true;
2980 	}
2981 
2982 	r = amdgpu_bo_reserve(root, true);
2983 	if (r)
2984 		goto error_unref;
2985 
2986 	/* Double check that the VM still exists */
2987 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2988 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2989 	if (vm && vm->root.bo != root)
2990 		vm = NULL;
2991 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2992 	if (!vm)
2993 		goto error_unlock;
2994 
2995 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2996 		AMDGPU_PTE_SYSTEM;
2997 
2998 	if (is_compute_context) {
2999 		/* Intentionally setting invalid PTE flag
3000 		 * combination to force a no-retry-fault
3001 		 */
3002 		flags = AMDGPU_VM_NORETRY_FLAGS;
3003 		value = 0;
3004 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
3005 		/* Redirect the access to the dummy page */
3006 		value = adev->dummy_page_addr;
3007 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
3008 			AMDGPU_PTE_WRITEABLE;
3009 
3010 	} else {
3011 		/* Let the hw retry silently on the PTE */
3012 		value = 0;
3013 	}
3014 
3015 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
3016 	if (r) {
3017 		pr_debug("failed %d to reserve fence slot\n", r);
3018 		goto error_unlock;
3019 	}
3020 
3021 	r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
3022 				   NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
3023 	if (r)
3024 		goto error_unlock;
3025 
3026 	r = amdgpu_vm_update_pdes(adev, vm, true);
3027 
3028 error_unlock:
3029 	amdgpu_bo_unreserve(root);
3030 	if (r < 0)
3031 		dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
3032 
3033 error_unref:
3034 	amdgpu_bo_unref(&root);
3035 
3036 	return false;
3037 }
3038 
3039 #if defined(CONFIG_DEBUG_FS)
3040 /**
3041  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
3042  *
3043  * @vm: Requested VM for printing BO info
3044  * @m: debugfs file
3045  *
3046  * Print BO information in debugfs file for the VM
3047  */
3048 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
3049 {
3050 	struct amdgpu_bo_va *bo_va, *tmp;
3051 	u64 total_idle = 0;
3052 	u64 total_evicted = 0;
3053 	u64 total_relocated = 0;
3054 	u64 total_moved = 0;
3055 	u64 total_invalidated = 0;
3056 	u64 total_done = 0;
3057 	unsigned int total_idle_objs = 0;
3058 	unsigned int total_evicted_objs = 0;
3059 	unsigned int total_relocated_objs = 0;
3060 	unsigned int total_moved_objs = 0;
3061 	unsigned int total_invalidated_objs = 0;
3062 	unsigned int total_done_objs = 0;
3063 	unsigned int id = 0;
3064 
3065 	amdgpu_vm_assert_locked(vm);
3066 
3067 	spin_lock(&vm->status_lock);
3068 	seq_puts(m, "\tIdle BOs:\n");
3069 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3070 		if (!bo_va->base.bo)
3071 			continue;
3072 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3073 	}
3074 	total_idle_objs = id;
3075 	id = 0;
3076 
3077 	seq_puts(m, "\tEvicted BOs:\n");
3078 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3079 		if (!bo_va->base.bo)
3080 			continue;
3081 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3082 	}
3083 	total_evicted_objs = id;
3084 	id = 0;
3085 
3086 	seq_puts(m, "\tRelocated BOs:\n");
3087 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3088 		if (!bo_va->base.bo)
3089 			continue;
3090 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3091 	}
3092 	total_relocated_objs = id;
3093 	id = 0;
3094 
3095 	seq_puts(m, "\tMoved BOs:\n");
3096 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3097 		if (!bo_va->base.bo)
3098 			continue;
3099 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3100 	}
3101 	total_moved_objs = id;
3102 	id = 0;
3103 
3104 	seq_puts(m, "\tInvalidated BOs:\n");
3105 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3106 		if (!bo_va->base.bo)
3107 			continue;
3108 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
3109 	}
3110 	total_invalidated_objs = id;
3111 	id = 0;
3112 
3113 	seq_puts(m, "\tDone BOs:\n");
3114 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3115 		if (!bo_va->base.bo)
3116 			continue;
3117 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3118 	}
3119 	spin_unlock(&vm->status_lock);
3120 	total_done_objs = id;
3121 
3122 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
3123 		   total_idle_objs);
3124 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
3125 		   total_evicted_objs);
3126 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
3127 		   total_relocated_objs);
3128 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
3129 		   total_moved_objs);
3130 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
3131 		   total_invalidated_objs);
3132 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
3133 		   total_done_objs);
3134 }
3135 #endif
3136 
3137 /**
3138  * amdgpu_vm_update_fault_cache - update cached fault into.
3139  * @adev: amdgpu device pointer
3140  * @pasid: PASID of the VM
3141  * @addr: Address of the fault
3142  * @status: GPUVM fault status register
3143  * @vmhub: which vmhub got the fault
3144  *
3145  * Cache the fault info for later use by userspace in debugging.
3146  */
3147 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
3148 				  unsigned int pasid,
3149 				  uint64_t addr,
3150 				  uint32_t status,
3151 				  unsigned int vmhub)
3152 {
3153 	struct amdgpu_vm *vm;
3154 	unsigned long flags;
3155 
3156 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3157 
3158 	vm = xa_load(&adev->vm_manager.pasids, pasid);
3159 	/* Don't update the fault cache if status is 0.  In the multiple
3160 	 * fault case, subsequent faults will return a 0 status which is
3161 	 * useless for userspace and replaces the useful fault status, so
3162 	 * only update if status is non-0.
3163 	 */
3164 	if (vm && status) {
3165 		vm->fault_info.addr = addr;
3166 		vm->fault_info.status = status;
3167 		/*
3168 		 * Update the fault information globally for later usage
3169 		 * when vm could be stale or freed.
3170 		 */
3171 		adev->vm_manager.fault_info.addr = addr;
3172 		adev->vm_manager.fault_info.vmhub = vmhub;
3173 		adev->vm_manager.fault_info.status = status;
3174 
3175 		if (AMDGPU_IS_GFXHUB(vmhub)) {
3176 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3177 			vm->fault_info.vmhub |=
3178 				(vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3179 		} else if (AMDGPU_IS_MMHUB0(vmhub)) {
3180 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3181 			vm->fault_info.vmhub |=
3182 				(vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3183 		} else if (AMDGPU_IS_MMHUB1(vmhub)) {
3184 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3185 			vm->fault_info.vmhub |=
3186 				(vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3187 		} else {
3188 			WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3189 		}
3190 	}
3191 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3192 }
3193 
3194 /**
3195  * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3196  *
3197  * @vm: VM to test against.
3198  * @bo: BO to be tested.
3199  *
3200  * Returns true if the BO shares the dma_resv object with the root PD and is
3201  * always guaranteed to be valid inside the VM.
3202  */
3203 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3204 {
3205 	return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3206 }
3207 
3208 void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
3209 			       struct amdgpu_task_info *task_info)
3210 {
3211 	dev_err(adev->dev,
3212 		" Process %s pid %d thread %s pid %d\n",
3213 		task_info->process_name, task_info->tgid,
3214 		task_info->task.comm, task_info->task.pid);
3215 }
3216