xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c (revision 02ba7543f261a4c938d984d980d5c4690ba21b7e)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_vm.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_amdkfd.h"
42 #include "amdgpu_gmc.h"
43 #include "amdgpu_xgmi.h"
44 #include "amdgpu_dma_buf.h"
45 #include "amdgpu_res_cursor.h"
46 #include "kfd_svm.h"
47 
48 /**
49  * DOC: GPUVM
50  *
51  * GPUVM is the MMU functionality provided on the GPU.
52  * GPUVM is similar to the legacy GART on older asics, however
53  * rather than there being a single global GART table
54  * for the entire GPU, there can be multiple GPUVM page tables active
55  * at any given time.  The GPUVM page tables can contain a mix
56  * VRAM pages and system pages (both memory and MMIO) and system pages
57  * can be mapped as snooped (cached system pages) or unsnooped
58  * (uncached system pages).
59  *
60  * Each active GPUVM has an ID associated with it and there is a page table
61  * linked with each VMID.  When executing a command buffer,
62  * the kernel tells the engine what VMID to use for that command
63  * buffer.  VMIDs are allocated dynamically as commands are submitted.
64  * The userspace drivers maintain their own address space and the kernel
65  * sets up their pages tables accordingly when they submit their
66  * command buffers and a VMID is assigned.
67  * The hardware supports up to 16 active GPUVMs at any given time.
68  *
69  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
70  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
71  * as other features such as encryption and caching attributes.
72  *
73  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
74  * addition to an aperture managed by a page table, VMID 0 also has
75  * several other apertures.  There is an aperture for direct access to VRAM
76  * and there is a legacy AGP aperture which just forwards accesses directly
77  * to the matching system physical addresses (or IOVAs when an IOMMU is
78  * present).  These apertures provide direct access to these memories without
79  * incurring the overhead of a page table.  VMID 0 is used by the kernel
80  * driver for tasks like memory management.
81  *
82  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
83  * For user applications, each application can have their own unique GPUVM
84  * address space.  The application manages the address space and the kernel
85  * driver manages the GPUVM page tables for each process.  If an GPU client
86  * accesses an invalid page, it will generate a GPU page fault, similar to
87  * accessing an invalid page on a CPU.
88  */
89 
90 #define START(node) ((node)->start)
91 #define LAST(node) ((node)->last)
92 
93 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
94 		     START, LAST, static, amdgpu_vm_it)
95 
96 #undef START
97 #undef LAST
98 
99 /**
100  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
101  */
102 struct amdgpu_prt_cb {
103 
104 	/**
105 	 * @adev: amdgpu device
106 	 */
107 	struct amdgpu_device *adev;
108 
109 	/**
110 	 * @cb: callback
111 	 */
112 	struct dma_fence_cb cb;
113 };
114 
115 /**
116  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
117  */
118 struct amdgpu_vm_tlb_seq_struct {
119 	/**
120 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
121 	 */
122 	struct amdgpu_vm *vm;
123 
124 	/**
125 	 * @cb: callback
126 	 */
127 	struct dma_fence_cb cb;
128 };
129 
130 /**
131  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
132  *
133  * @adev: amdgpu_device pointer
134  * @vm: amdgpu_vm pointer
135  * @pasid: the pasid the VM is using on this GPU
136  *
137  * Set the pasid this VM is using on this GPU, can also be used to remove the
138  * pasid by passing in zero.
139  *
140  */
141 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
142 			u32 pasid)
143 {
144 	int r;
145 
146 	if (vm->pasid == pasid)
147 		return 0;
148 
149 	if (vm->pasid) {
150 		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
151 		if (r < 0)
152 			return r;
153 
154 		vm->pasid = 0;
155 	}
156 
157 	if (pasid) {
158 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
159 					GFP_KERNEL));
160 		if (r < 0)
161 			return r;
162 
163 		vm->pasid = pasid;
164 	}
165 
166 
167 	return 0;
168 }
169 
170 /**
171  * amdgpu_vm_bo_evicted - vm_bo is evicted
172  *
173  * @vm_bo: vm_bo which is evicted
174  *
175  * State for PDs/PTs and per VM BOs which are not at the location they should
176  * be.
177  */
178 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
179 {
180 	struct amdgpu_vm *vm = vm_bo->vm;
181 	struct amdgpu_bo *bo = vm_bo->bo;
182 
183 	vm_bo->moved = true;
184 	spin_lock(&vm_bo->vm->status_lock);
185 	if (bo->tbo.type == ttm_bo_type_kernel)
186 		list_move(&vm_bo->vm_status, &vm->evicted);
187 	else
188 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
189 	spin_unlock(&vm_bo->vm->status_lock);
190 }
191 /**
192  * amdgpu_vm_bo_moved - vm_bo is moved
193  *
194  * @vm_bo: vm_bo which is moved
195  *
196  * State for per VM BOs which are moved, but that change is not yet reflected
197  * in the page tables.
198  */
199 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
200 {
201 	spin_lock(&vm_bo->vm->status_lock);
202 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
203 	spin_unlock(&vm_bo->vm->status_lock);
204 }
205 
206 /**
207  * amdgpu_vm_bo_idle - vm_bo is idle
208  *
209  * @vm_bo: vm_bo which is now idle
210  *
211  * State for PDs/PTs and per VM BOs which have gone through the state machine
212  * and are now idle.
213  */
214 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
215 {
216 	spin_lock(&vm_bo->vm->status_lock);
217 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
218 	spin_unlock(&vm_bo->vm->status_lock);
219 	vm_bo->moved = false;
220 }
221 
222 /**
223  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
224  *
225  * @vm_bo: vm_bo which is now invalidated
226  *
227  * State for normal BOs which are invalidated and that change not yet reflected
228  * in the PTs.
229  */
230 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
231 {
232 	spin_lock(&vm_bo->vm->status_lock);
233 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
234 	spin_unlock(&vm_bo->vm->status_lock);
235 }
236 
237 /**
238  * amdgpu_vm_bo_evicted_user - vm_bo is evicted
239  *
240  * @vm_bo: vm_bo which is evicted
241  *
242  * State for BOs used by user mode queues which are not at the location they
243  * should be.
244  */
245 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
246 {
247 	vm_bo->moved = true;
248 	spin_lock(&vm_bo->vm->status_lock);
249 	list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
250 	spin_unlock(&vm_bo->vm->status_lock);
251 }
252 
253 /**
254  * amdgpu_vm_bo_relocated - vm_bo is reloacted
255  *
256  * @vm_bo: vm_bo which is relocated
257  *
258  * State for PDs/PTs which needs to update their parent PD.
259  * For the root PD, just move to idle state.
260  */
261 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
262 {
263 	if (vm_bo->bo->parent) {
264 		spin_lock(&vm_bo->vm->status_lock);
265 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
266 		spin_unlock(&vm_bo->vm->status_lock);
267 	} else {
268 		amdgpu_vm_bo_idle(vm_bo);
269 	}
270 }
271 
272 /**
273  * amdgpu_vm_bo_done - vm_bo is done
274  *
275  * @vm_bo: vm_bo which is now done
276  *
277  * State for normal BOs which are invalidated and that change has been updated
278  * in the PTs.
279  */
280 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
281 {
282 	spin_lock(&vm_bo->vm->status_lock);
283 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
284 	spin_unlock(&vm_bo->vm->status_lock);
285 }
286 
287 /**
288  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
289  * @vm: the VM which state machine to reset
290  *
291  * Move all vm_bo object in the VM into a state where they will be updated
292  * again during validation.
293  */
294 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
295 {
296 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
297 
298 	spin_lock(&vm->status_lock);
299 	list_splice_init(&vm->done, &vm->invalidated);
300 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
301 		vm_bo->moved = true;
302 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
303 		struct amdgpu_bo *bo = vm_bo->bo;
304 
305 		vm_bo->moved = true;
306 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
307 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
308 		else if (bo->parent)
309 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
310 	}
311 	spin_unlock(&vm->status_lock);
312 }
313 
314 /**
315  * amdgpu_vm_update_shared - helper to update shared memory stat
316  * @base: base structure for tracking BO usage in a VM
317  *
318  * Takes the vm status_lock and updates the shared memory stat. If the basic
319  * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
320  * as well.
321  */
322 static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
323 {
324 	struct amdgpu_vm *vm = base->vm;
325 	struct amdgpu_bo *bo = base->bo;
326 	uint64_t size = amdgpu_bo_size(bo);
327 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
328 	bool shared;
329 
330 	spin_lock(&vm->status_lock);
331 	shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
332 	if (base->shared != shared) {
333 		base->shared = shared;
334 		if (shared) {
335 			vm->stats[bo_memtype].drm.shared += size;
336 			vm->stats[bo_memtype].drm.private -= size;
337 		} else {
338 			vm->stats[bo_memtype].drm.shared -= size;
339 			vm->stats[bo_memtype].drm.private += size;
340 		}
341 	}
342 	spin_unlock(&vm->status_lock);
343 }
344 
345 /**
346  * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
347  * @bo: amdgpu buffer object
348  *
349  * Update the per VM stats for all the vm if needed from private to shared or
350  * vice versa.
351  */
352 void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
353 {
354 	struct amdgpu_vm_bo_base *base;
355 
356 	for (base = bo->vm_bo; base; base = base->next)
357 		amdgpu_vm_update_shared(base);
358 }
359 
360 /**
361  * amdgpu_vm_update_stats_locked - helper to update normal memory stat
362  * @base: base structure for tracking BO usage in a VM
363  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
364  *        be bo->tbo.resource
365  * @sign: if we should add (+1) or subtract (-1) from the stat
366  *
367  * Caller need to have the vm status_lock held. Useful for when multiple update
368  * need to happen at the same time.
369  */
370 static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
371 			    struct ttm_resource *res, int sign)
372 {
373 	struct amdgpu_vm *vm = base->vm;
374 	struct amdgpu_bo *bo = base->bo;
375 	int64_t size = sign * amdgpu_bo_size(bo);
376 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
377 
378 	/* For drm-total- and drm-shared-, BO are accounted by their preferred
379 	 * placement, see also amdgpu_bo_mem_stats_placement.
380 	 */
381 	if (base->shared)
382 		vm->stats[bo_memtype].drm.shared += size;
383 	else
384 		vm->stats[bo_memtype].drm.private += size;
385 
386 	if (res && res->mem_type < __AMDGPU_PL_NUM) {
387 		uint32_t res_memtype = res->mem_type;
388 
389 		vm->stats[res_memtype].drm.resident += size;
390 		/* BO only count as purgeable if it is resident,
391 		 * since otherwise there's nothing to purge.
392 		 */
393 		if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
394 			vm->stats[res_memtype].drm.purgeable += size;
395 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
396 			vm->stats[bo_memtype].evicted += size;
397 	}
398 }
399 
400 /**
401  * amdgpu_vm_update_stats - helper to update normal memory stat
402  * @base: base structure for tracking BO usage in a VM
403  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
404  *        be bo->tbo.resource
405  * @sign: if we should add (+1) or subtract (-1) from the stat
406  *
407  * Updates the basic memory stat when bo is added/deleted/moved.
408  */
409 void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
410 			    struct ttm_resource *res, int sign)
411 {
412 	struct amdgpu_vm *vm = base->vm;
413 
414 	spin_lock(&vm->status_lock);
415 	amdgpu_vm_update_stats_locked(base, res, sign);
416 	spin_unlock(&vm->status_lock);
417 }
418 
419 /**
420  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
421  *
422  * @base: base structure for tracking BO usage in a VM
423  * @vm: vm to which bo is to be added
424  * @bo: amdgpu buffer object
425  *
426  * Initialize a bo_va_base structure and add it to the appropriate lists
427  *
428  */
429 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
430 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
431 {
432 	base->vm = vm;
433 	base->bo = bo;
434 	base->next = NULL;
435 	INIT_LIST_HEAD(&base->vm_status);
436 
437 	if (!bo)
438 		return;
439 	base->next = bo->vm_bo;
440 	bo->vm_bo = base;
441 
442 	spin_lock(&vm->status_lock);
443 	base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
444 	amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
445 	spin_unlock(&vm->status_lock);
446 
447 	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
448 		return;
449 
450 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
451 
452 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
453 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
454 		amdgpu_vm_bo_relocated(base);
455 	else
456 		amdgpu_vm_bo_idle(base);
457 
458 	if (bo->preferred_domains &
459 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
460 		return;
461 
462 	/*
463 	 * we checked all the prerequisites, but it looks like this per vm bo
464 	 * is currently evicted. add the bo to the evicted list to make sure it
465 	 * is validated on next vm use to avoid fault.
466 	 * */
467 	amdgpu_vm_bo_evicted(base);
468 }
469 
470 /**
471  * amdgpu_vm_lock_pd - lock PD in drm_exec
472  *
473  * @vm: vm providing the BOs
474  * @exec: drm execution context
475  * @num_fences: number of extra fences to reserve
476  *
477  * Lock the VM root PD in the DRM execution context.
478  */
479 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
480 		      unsigned int num_fences)
481 {
482 	/* We need at least two fences for the VM PD/PT updates */
483 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
484 				    2 + num_fences);
485 }
486 
487 /**
488  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
489  *
490  * @adev: amdgpu device pointer
491  * @vm: vm providing the BOs
492  *
493  * Move all BOs to the end of LRU and remember their positions to put them
494  * together.
495  */
496 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
497 				struct amdgpu_vm *vm)
498 {
499 	spin_lock(&adev->mman.bdev.lru_lock);
500 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
501 	spin_unlock(&adev->mman.bdev.lru_lock);
502 }
503 
504 /* Create scheduler entities for page table updates */
505 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
506 				   struct amdgpu_vm *vm)
507 {
508 	int r;
509 
510 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
511 				  adev->vm_manager.vm_pte_scheds,
512 				  adev->vm_manager.vm_pte_num_scheds, NULL);
513 	if (r)
514 		goto error;
515 
516 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
517 				     adev->vm_manager.vm_pte_scheds,
518 				     adev->vm_manager.vm_pte_num_scheds, NULL);
519 
520 error:
521 	drm_sched_entity_destroy(&vm->immediate);
522 	return r;
523 }
524 
525 /* Destroy the entities for page table updates again */
526 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
527 {
528 	drm_sched_entity_destroy(&vm->immediate);
529 	drm_sched_entity_destroy(&vm->delayed);
530 }
531 
532 /**
533  * amdgpu_vm_generation - return the page table re-generation counter
534  * @adev: the amdgpu_device
535  * @vm: optional VM to check, might be NULL
536  *
537  * Returns a page table re-generation token to allow checking if submissions
538  * are still valid to use this VM. The VM parameter might be NULL in which case
539  * just the VRAM lost counter will be used.
540  */
541 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
542 {
543 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
544 
545 	if (!vm)
546 		return result;
547 
548 	result += lower_32_bits(vm->generation);
549 	/* Add one if the page tables will be re-generated on next CS */
550 	if (drm_sched_entity_error(&vm->delayed))
551 		++result;
552 
553 	return result;
554 }
555 
556 /**
557  * amdgpu_vm_validate - validate evicted BOs tracked in the VM
558  *
559  * @adev: amdgpu device pointer
560  * @vm: vm providing the BOs
561  * @ticket: optional reservation ticket used to reserve the VM
562  * @validate: callback to do the validation
563  * @param: parameter for the validation callback
564  *
565  * Validate the page table BOs and per-VM BOs on command submission if
566  * necessary. If a ticket is given, also try to validate evicted user queue
567  * BOs. They must already be reserved with the given ticket.
568  *
569  * Returns:
570  * Validation result.
571  */
572 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
573 		       struct ww_acquire_ctx *ticket,
574 		       int (*validate)(void *p, struct amdgpu_bo *bo),
575 		       void *param)
576 {
577 	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
578 	struct amdgpu_vm_bo_base *bo_base;
579 	struct amdgpu_bo *bo;
580 	int r;
581 
582 	if (vm->generation != new_vm_generation) {
583 		vm->generation = new_vm_generation;
584 		amdgpu_vm_bo_reset_state_machine(vm);
585 		amdgpu_vm_fini_entities(vm);
586 		r = amdgpu_vm_init_entities(adev, vm);
587 		if (r)
588 			return r;
589 	}
590 
591 	spin_lock(&vm->status_lock);
592 	while (!list_empty(&vm->evicted)) {
593 		bo_base = list_first_entry(&vm->evicted,
594 					   struct amdgpu_vm_bo_base,
595 					   vm_status);
596 		spin_unlock(&vm->status_lock);
597 
598 		bo = bo_base->bo;
599 
600 		r = validate(param, bo);
601 		if (r)
602 			return r;
603 
604 		if (bo->tbo.type != ttm_bo_type_kernel) {
605 			amdgpu_vm_bo_moved(bo_base);
606 		} else {
607 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
608 			amdgpu_vm_bo_relocated(bo_base);
609 		}
610 		spin_lock(&vm->status_lock);
611 	}
612 	while (ticket && !list_empty(&vm->evicted_user)) {
613 		bo_base = list_first_entry(&vm->evicted_user,
614 					   struct amdgpu_vm_bo_base,
615 					   vm_status);
616 		spin_unlock(&vm->status_lock);
617 
618 		bo = bo_base->bo;
619 
620 		if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
621 			struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
622 
623 			pr_warn_ratelimited("Evicted user BO is not reserved\n");
624 			if (ti) {
625 				pr_warn_ratelimited("pid %d\n", ti->pid);
626 				amdgpu_vm_put_task_info(ti);
627 			}
628 
629 			return -EINVAL;
630 		}
631 
632 		r = validate(param, bo);
633 		if (r)
634 			return r;
635 
636 		amdgpu_vm_bo_invalidated(bo_base);
637 
638 		spin_lock(&vm->status_lock);
639 	}
640 	spin_unlock(&vm->status_lock);
641 
642 	amdgpu_vm_eviction_lock(vm);
643 	vm->evicting = false;
644 	amdgpu_vm_eviction_unlock(vm);
645 
646 	return 0;
647 }
648 
649 /**
650  * amdgpu_vm_ready - check VM is ready for updates
651  *
652  * @vm: VM to check
653  *
654  * Check if all VM PDs/PTs are ready for updates
655  *
656  * Returns:
657  * True if VM is not evicting.
658  */
659 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
660 {
661 	bool empty;
662 	bool ret;
663 
664 	amdgpu_vm_eviction_lock(vm);
665 	ret = !vm->evicting;
666 	amdgpu_vm_eviction_unlock(vm);
667 
668 	spin_lock(&vm->status_lock);
669 	empty = list_empty(&vm->evicted);
670 	spin_unlock(&vm->status_lock);
671 
672 	return ret && empty;
673 }
674 
675 /**
676  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
677  *
678  * @adev: amdgpu_device pointer
679  */
680 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
681 {
682 	const struct amdgpu_ip_block *ip_block;
683 	bool has_compute_vm_bug;
684 	struct amdgpu_ring *ring;
685 	int i;
686 
687 	has_compute_vm_bug = false;
688 
689 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
690 	if (ip_block) {
691 		/* Compute has a VM bug for GFX version < 7.
692 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
693 		if (ip_block->version->major <= 7)
694 			has_compute_vm_bug = true;
695 		else if (ip_block->version->major == 8)
696 			if (adev->gfx.mec_fw_version < 673)
697 				has_compute_vm_bug = true;
698 	}
699 
700 	for (i = 0; i < adev->num_rings; i++) {
701 		ring = adev->rings[i];
702 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
703 			/* only compute rings */
704 			ring->has_compute_vm_bug = has_compute_vm_bug;
705 		else
706 			ring->has_compute_vm_bug = false;
707 	}
708 }
709 
710 /**
711  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
712  *
713  * @ring: ring on which the job will be submitted
714  * @job: job to submit
715  *
716  * Returns:
717  * True if sync is needed.
718  */
719 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
720 				  struct amdgpu_job *job)
721 {
722 	struct amdgpu_device *adev = ring->adev;
723 	unsigned vmhub = ring->vm_hub;
724 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
725 
726 	if (job->vmid == 0)
727 		return false;
728 
729 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
730 		return true;
731 
732 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
733 		return true;
734 
735 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
736 		return true;
737 
738 	return false;
739 }
740 
741 /**
742  * amdgpu_vm_flush - hardware flush the vm
743  *
744  * @ring: ring to use for flush
745  * @job:  related job
746  * @need_pipe_sync: is pipe sync needed
747  *
748  * Emit a VM flush when it is necessary.
749  *
750  * Returns:
751  * 0 on success, errno otherwise.
752  */
753 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
754 		    bool need_pipe_sync)
755 {
756 	struct amdgpu_device *adev = ring->adev;
757 	struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
758 	unsigned vmhub = ring->vm_hub;
759 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
760 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
761 	bool spm_update_needed = job->spm_update_needed;
762 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
763 		job->gds_switch_needed;
764 	bool vm_flush_needed = job->vm_needs_flush;
765 	bool cleaner_shader_needed = false;
766 	bool pasid_mapping_needed = false;
767 	struct dma_fence *fence = NULL;
768 	unsigned int patch;
769 	int r;
770 
771 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
772 		gds_switch_needed = true;
773 		vm_flush_needed = true;
774 		pasid_mapping_needed = true;
775 		spm_update_needed = true;
776 	}
777 
778 	mutex_lock(&id_mgr->lock);
779 	if (id->pasid != job->pasid || !id->pasid_mapping ||
780 	    !dma_fence_is_signaled(id->pasid_mapping))
781 		pasid_mapping_needed = true;
782 	mutex_unlock(&id_mgr->lock);
783 
784 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
785 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
786 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
787 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
788 		ring->funcs->emit_wreg;
789 
790 	cleaner_shader_needed = adev->gfx.enable_cleaner_shader &&
791 		ring->funcs->emit_cleaner_shader && job->base.s_fence &&
792 		&job->base.s_fence->scheduled == isolation->spearhead;
793 
794 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
795 	    !cleaner_shader_needed)
796 		return 0;
797 
798 	amdgpu_ring_ib_begin(ring);
799 	if (ring->funcs->init_cond_exec)
800 		patch = amdgpu_ring_init_cond_exec(ring,
801 						   ring->cond_exe_gpu_addr);
802 
803 	if (need_pipe_sync)
804 		amdgpu_ring_emit_pipeline_sync(ring);
805 
806 	if (cleaner_shader_needed)
807 		ring->funcs->emit_cleaner_shader(ring);
808 
809 	if (vm_flush_needed) {
810 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
811 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
812 	}
813 
814 	if (pasid_mapping_needed)
815 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
816 
817 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
818 		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
819 
820 	if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
821 	    gds_switch_needed) {
822 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
823 					    job->gds_size, job->gws_base,
824 					    job->gws_size, job->oa_base,
825 					    job->oa_size);
826 	}
827 
828 	if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
829 		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
830 		if (r)
831 			return r;
832 	}
833 
834 	if (vm_flush_needed) {
835 		mutex_lock(&id_mgr->lock);
836 		dma_fence_put(id->last_flush);
837 		id->last_flush = dma_fence_get(fence);
838 		id->current_gpu_reset_count =
839 			atomic_read(&adev->gpu_reset_counter);
840 		mutex_unlock(&id_mgr->lock);
841 	}
842 
843 	if (pasid_mapping_needed) {
844 		mutex_lock(&id_mgr->lock);
845 		id->pasid = job->pasid;
846 		dma_fence_put(id->pasid_mapping);
847 		id->pasid_mapping = dma_fence_get(fence);
848 		mutex_unlock(&id_mgr->lock);
849 	}
850 
851 	/*
852 	 * Make sure that all other submissions wait for the cleaner shader to
853 	 * finish before we push them to the HW.
854 	 */
855 	if (cleaner_shader_needed) {
856 		trace_amdgpu_cleaner_shader(ring, fence);
857 		mutex_lock(&adev->enforce_isolation_mutex);
858 		dma_fence_put(isolation->spearhead);
859 		isolation->spearhead = dma_fence_get(fence);
860 		mutex_unlock(&adev->enforce_isolation_mutex);
861 	}
862 	dma_fence_put(fence);
863 
864 	amdgpu_ring_patch_cond_exec(ring, patch);
865 
866 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
867 	if (ring->funcs->emit_switch_buffer) {
868 		amdgpu_ring_emit_switch_buffer(ring);
869 		amdgpu_ring_emit_switch_buffer(ring);
870 	}
871 
872 	amdgpu_ring_ib_end(ring);
873 	return 0;
874 }
875 
876 /**
877  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
878  *
879  * @vm: requested vm
880  * @bo: requested buffer object
881  *
882  * Find @bo inside the requested vm.
883  * Search inside the @bos vm list for the requested vm
884  * Returns the found bo_va or NULL if none is found
885  *
886  * Object has to be reserved!
887  *
888  * Returns:
889  * Found bo_va or NULL.
890  */
891 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
892 				       struct amdgpu_bo *bo)
893 {
894 	struct amdgpu_vm_bo_base *base;
895 
896 	for (base = bo->vm_bo; base; base = base->next) {
897 		if (base->vm != vm)
898 			continue;
899 
900 		return container_of(base, struct amdgpu_bo_va, base);
901 	}
902 	return NULL;
903 }
904 
905 /**
906  * amdgpu_vm_map_gart - Resolve gart mapping of addr
907  *
908  * @pages_addr: optional DMA address to use for lookup
909  * @addr: the unmapped addr
910  *
911  * Look up the physical address of the page that the pte resolves
912  * to.
913  *
914  * Returns:
915  * The pointer for the page table entry.
916  */
917 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
918 {
919 	uint64_t result;
920 
921 	/* page table offset */
922 	result = pages_addr[addr >> PAGE_SHIFT];
923 
924 	/* in case cpu page size != gpu page size*/
925 	result |= addr & (~PAGE_MASK);
926 
927 	result &= 0xFFFFFFFFFFFFF000ULL;
928 
929 	return result;
930 }
931 
932 /**
933  * amdgpu_vm_update_pdes - make sure that all directories are valid
934  *
935  * @adev: amdgpu_device pointer
936  * @vm: requested vm
937  * @immediate: submit immediately to the paging queue
938  *
939  * Makes sure all directories are up to date.
940  *
941  * Returns:
942  * 0 for success, error for failure.
943  */
944 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
945 			  struct amdgpu_vm *vm, bool immediate)
946 {
947 	struct amdgpu_vm_update_params params;
948 	struct amdgpu_vm_bo_base *entry;
949 	bool flush_tlb_needed = false;
950 	LIST_HEAD(relocated);
951 	int r, idx;
952 
953 	spin_lock(&vm->status_lock);
954 	list_splice_init(&vm->relocated, &relocated);
955 	spin_unlock(&vm->status_lock);
956 
957 	if (list_empty(&relocated))
958 		return 0;
959 
960 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
961 		return -ENODEV;
962 
963 	memset(&params, 0, sizeof(params));
964 	params.adev = adev;
965 	params.vm = vm;
966 	params.immediate = immediate;
967 
968 	r = vm->update_funcs->prepare(&params, NULL);
969 	if (r)
970 		goto error;
971 
972 	list_for_each_entry(entry, &relocated, vm_status) {
973 		/* vm_flush_needed after updating moved PDEs */
974 		flush_tlb_needed |= entry->moved;
975 
976 		r = amdgpu_vm_pde_update(&params, entry);
977 		if (r)
978 			goto error;
979 	}
980 
981 	r = vm->update_funcs->commit(&params, &vm->last_update);
982 	if (r)
983 		goto error;
984 
985 	if (flush_tlb_needed)
986 		atomic64_inc(&vm->tlb_seq);
987 
988 	while (!list_empty(&relocated)) {
989 		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
990 					 vm_status);
991 		amdgpu_vm_bo_idle(entry);
992 	}
993 
994 error:
995 	drm_dev_exit(idx);
996 	return r;
997 }
998 
999 /**
1000  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
1001  * @fence: unused
1002  * @cb: the callback structure
1003  *
1004  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1005  */
1006 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
1007 				 struct dma_fence_cb *cb)
1008 {
1009 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1010 
1011 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
1012 	atomic64_inc(&tlb_cb->vm->tlb_seq);
1013 	kfree(tlb_cb);
1014 }
1015 
1016 /**
1017  * amdgpu_vm_tlb_flush - prepare TLB flush
1018  *
1019  * @params: parameters for update
1020  * @fence: input fence to sync TLB flush with
1021  * @tlb_cb: the callback structure
1022  *
1023  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1024  */
1025 static void
1026 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
1027 		    struct dma_fence **fence,
1028 		    struct amdgpu_vm_tlb_seq_struct *tlb_cb)
1029 {
1030 	struct amdgpu_vm *vm = params->vm;
1031 
1032 	tlb_cb->vm = vm;
1033 	if (!fence || !*fence) {
1034 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1035 		return;
1036 	}
1037 
1038 	if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
1039 				    amdgpu_vm_tlb_seq_cb)) {
1040 		dma_fence_put(vm->last_tlb_flush);
1041 		vm->last_tlb_flush = dma_fence_get(*fence);
1042 	} else {
1043 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1044 	}
1045 
1046 	/* Prepare a TLB flush fence to be attached to PTs */
1047 	if (!params->unlocked && vm->is_compute_context) {
1048 		amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
1049 
1050 		/* Makes sure no PD/PT is freed before the flush */
1051 		dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
1052 				   DMA_RESV_USAGE_BOOKKEEP);
1053 	}
1054 }
1055 
1056 /**
1057  * amdgpu_vm_update_range - update a range in the vm page table
1058  *
1059  * @adev: amdgpu_device pointer to use for commands
1060  * @vm: the VM to update the range
1061  * @immediate: immediate submission in a page fault
1062  * @unlocked: unlocked invalidation during MM callback
1063  * @flush_tlb: trigger tlb invalidation after update completed
1064  * @allow_override: change MTYPE for local NUMA nodes
1065  * @sync: fences we need to sync to
1066  * @start: start of mapped range
1067  * @last: last mapped entry
1068  * @flags: flags for the entries
1069  * @offset: offset into nodes and pages_addr
1070  * @vram_base: base for vram mappings
1071  * @res: ttm_resource to map
1072  * @pages_addr: DMA addresses to use for mapping
1073  * @fence: optional resulting fence
1074  *
1075  * Fill in the page table entries between @start and @last.
1076  *
1077  * Returns:
1078  * 0 for success, negative erro code for failure.
1079  */
1080 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1081 			   bool immediate, bool unlocked, bool flush_tlb,
1082 			   bool allow_override, struct amdgpu_sync *sync,
1083 			   uint64_t start, uint64_t last, uint64_t flags,
1084 			   uint64_t offset, uint64_t vram_base,
1085 			   struct ttm_resource *res, dma_addr_t *pages_addr,
1086 			   struct dma_fence **fence)
1087 {
1088 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1089 	struct amdgpu_vm_update_params params;
1090 	struct amdgpu_res_cursor cursor;
1091 	int r, idx;
1092 
1093 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
1094 		return -ENODEV;
1095 
1096 	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
1097 	if (!tlb_cb) {
1098 		drm_dev_exit(idx);
1099 		return -ENOMEM;
1100 	}
1101 
1102 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
1103 	 * heavy-weight flush TLB unconditionally.
1104 	 */
1105 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
1106 		     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
1107 
1108 	/*
1109 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
1110 	 */
1111 	flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
1112 
1113 	memset(&params, 0, sizeof(params));
1114 	params.adev = adev;
1115 	params.vm = vm;
1116 	params.immediate = immediate;
1117 	params.pages_addr = pages_addr;
1118 	params.unlocked = unlocked;
1119 	params.needs_flush = flush_tlb;
1120 	params.allow_override = allow_override;
1121 	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
1122 
1123 	amdgpu_vm_eviction_lock(vm);
1124 	if (vm->evicting) {
1125 		r = -EBUSY;
1126 		goto error_free;
1127 	}
1128 
1129 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1130 		struct dma_fence *tmp = dma_fence_get_stub();
1131 
1132 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1133 		swap(vm->last_unlocked, tmp);
1134 		dma_fence_put(tmp);
1135 	}
1136 
1137 	r = vm->update_funcs->prepare(&params, sync);
1138 	if (r)
1139 		goto error_free;
1140 
1141 	amdgpu_res_first(pages_addr ? NULL : res, offset,
1142 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1143 	while (cursor.remaining) {
1144 		uint64_t tmp, num_entries, addr;
1145 
1146 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1147 		if (pages_addr) {
1148 			bool contiguous = true;
1149 
1150 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1151 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
1152 				uint64_t count;
1153 
1154 				contiguous = pages_addr[pfn + 1] ==
1155 					pages_addr[pfn] + PAGE_SIZE;
1156 
1157 				tmp = num_entries /
1158 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1159 				for (count = 2; count < tmp; ++count) {
1160 					uint64_t idx = pfn + count;
1161 
1162 					if (contiguous != (pages_addr[idx] ==
1163 					    pages_addr[idx - 1] + PAGE_SIZE))
1164 						break;
1165 				}
1166 				if (!contiguous)
1167 					count--;
1168 				num_entries = count *
1169 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1170 			}
1171 
1172 			if (!contiguous) {
1173 				addr = cursor.start;
1174 				params.pages_addr = pages_addr;
1175 			} else {
1176 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1177 				params.pages_addr = NULL;
1178 			}
1179 
1180 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1181 			addr = vram_base + cursor.start;
1182 		} else {
1183 			addr = 0;
1184 		}
1185 
1186 		tmp = start + num_entries;
1187 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1188 		if (r)
1189 			goto error_free;
1190 
1191 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1192 		start = tmp;
1193 	}
1194 
1195 	r = vm->update_funcs->commit(&params, fence);
1196 	if (r)
1197 		goto error_free;
1198 
1199 	if (params.needs_flush) {
1200 		amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
1201 		tlb_cb = NULL;
1202 	}
1203 
1204 	amdgpu_vm_pt_free_list(adev, &params);
1205 
1206 error_free:
1207 	kfree(tlb_cb);
1208 	amdgpu_vm_eviction_unlock(vm);
1209 	drm_dev_exit(idx);
1210 	return r;
1211 }
1212 
1213 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1214 			  struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
1215 {
1216 	spin_lock(&vm->status_lock);
1217 	memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
1218 	spin_unlock(&vm->status_lock);
1219 }
1220 
1221 /**
1222  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1223  *
1224  * @adev: amdgpu_device pointer
1225  * @bo_va: requested BO and VM object
1226  * @clear: if true clear the entries
1227  *
1228  * Fill in the page table entries for @bo_va.
1229  *
1230  * Returns:
1231  * 0 for success, -EINVAL for failure.
1232  */
1233 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1234 			bool clear)
1235 {
1236 	struct amdgpu_bo *bo = bo_va->base.bo;
1237 	struct amdgpu_vm *vm = bo_va->base.vm;
1238 	struct amdgpu_bo_va_mapping *mapping;
1239 	struct dma_fence **last_update;
1240 	dma_addr_t *pages_addr = NULL;
1241 	struct ttm_resource *mem;
1242 	struct amdgpu_sync sync;
1243 	bool flush_tlb = clear;
1244 	uint64_t vram_base;
1245 	uint64_t flags;
1246 	bool uncached;
1247 	int r;
1248 
1249 	amdgpu_sync_create(&sync);
1250 	if (clear) {
1251 		mem = NULL;
1252 
1253 		/* Implicitly sync to command submissions in the same VM before
1254 		 * unmapping.
1255 		 */
1256 		r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1257 				     AMDGPU_SYNC_EQ_OWNER, vm);
1258 		if (r)
1259 			goto error_free;
1260 		if (bo) {
1261 			r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
1262 			if (r)
1263 				goto error_free;
1264 		}
1265 	} else if (!bo) {
1266 		mem = NULL;
1267 
1268 		/* PRT map operations don't need to sync to anything. */
1269 
1270 	} else {
1271 		struct drm_gem_object *obj = &bo->tbo.base;
1272 
1273 		if (obj->import_attach && bo_va->is_xgmi) {
1274 			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1275 			struct drm_gem_object *gobj = dma_buf->priv;
1276 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1277 
1278 			if (abo->tbo.resource &&
1279 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1280 				bo = gem_to_amdgpu_bo(gobj);
1281 		}
1282 		mem = bo->tbo.resource;
1283 		if (mem && (mem->mem_type == TTM_PL_TT ||
1284 			    mem->mem_type == AMDGPU_PL_PREEMPT))
1285 			pages_addr = bo->tbo.ttm->dma_address;
1286 
1287 		/* Implicitly sync to moving fences before mapping anything */
1288 		r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1289 				     AMDGPU_SYNC_EXPLICIT, vm);
1290 		if (r)
1291 			goto error_free;
1292 	}
1293 
1294 	if (bo) {
1295 		struct amdgpu_device *bo_adev;
1296 
1297 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1298 
1299 		if (amdgpu_bo_encrypted(bo))
1300 			flags |= AMDGPU_PTE_TMZ;
1301 
1302 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1303 		vram_base = bo_adev->vm_manager.vram_base_offset;
1304 		uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1305 	} else {
1306 		flags = 0x0;
1307 		vram_base = 0;
1308 		uncached = false;
1309 	}
1310 
1311 	if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1312 		last_update = &vm->last_update;
1313 	else
1314 		last_update = &bo_va->last_pt_update;
1315 
1316 	if (!clear && bo_va->base.moved) {
1317 		flush_tlb = true;
1318 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1319 
1320 	} else if (bo_va->cleared != clear) {
1321 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1322 	}
1323 
1324 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1325 		uint64_t update_flags = flags;
1326 
1327 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1328 		 * but in case of something, we filter the flags in first place
1329 		 */
1330 		if (!(mapping->flags & AMDGPU_PTE_READABLE))
1331 			update_flags &= ~AMDGPU_PTE_READABLE;
1332 		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1333 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1334 
1335 		/* Apply ASIC specific mapping flags */
1336 		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1337 
1338 		trace_amdgpu_vm_bo_update(mapping);
1339 
1340 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1341 					   !uncached, &sync, mapping->start,
1342 					   mapping->last, update_flags,
1343 					   mapping->offset, vram_base, mem,
1344 					   pages_addr, last_update);
1345 		if (r)
1346 			goto error_free;
1347 	}
1348 
1349 	/* If the BO is not in its preferred location add it back to
1350 	 * the evicted list so that it gets validated again on the
1351 	 * next command submission.
1352 	 */
1353 	if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1354 		if (bo->tbo.resource &&
1355 		    !(bo->preferred_domains &
1356 		      amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1357 			amdgpu_vm_bo_evicted(&bo_va->base);
1358 		else
1359 			amdgpu_vm_bo_idle(&bo_va->base);
1360 	} else {
1361 		amdgpu_vm_bo_done(&bo_va->base);
1362 	}
1363 
1364 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1365 	bo_va->cleared = clear;
1366 	bo_va->base.moved = false;
1367 
1368 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1369 		list_for_each_entry(mapping, &bo_va->valids, list)
1370 			trace_amdgpu_vm_bo_mapping(mapping);
1371 	}
1372 
1373 error_free:
1374 	amdgpu_sync_free(&sync);
1375 	return r;
1376 }
1377 
1378 /**
1379  * amdgpu_vm_update_prt_state - update the global PRT state
1380  *
1381  * @adev: amdgpu_device pointer
1382  */
1383 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1384 {
1385 	unsigned long flags;
1386 	bool enable;
1387 
1388 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1389 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1390 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1391 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1392 }
1393 
1394 /**
1395  * amdgpu_vm_prt_get - add a PRT user
1396  *
1397  * @adev: amdgpu_device pointer
1398  */
1399 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1400 {
1401 	if (!adev->gmc.gmc_funcs->set_prt)
1402 		return;
1403 
1404 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1405 		amdgpu_vm_update_prt_state(adev);
1406 }
1407 
1408 /**
1409  * amdgpu_vm_prt_put - drop a PRT user
1410  *
1411  * @adev: amdgpu_device pointer
1412  */
1413 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1414 {
1415 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1416 		amdgpu_vm_update_prt_state(adev);
1417 }
1418 
1419 /**
1420  * amdgpu_vm_prt_cb - callback for updating the PRT status
1421  *
1422  * @fence: fence for the callback
1423  * @_cb: the callback function
1424  */
1425 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1426 {
1427 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1428 
1429 	amdgpu_vm_prt_put(cb->adev);
1430 	kfree(cb);
1431 }
1432 
1433 /**
1434  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1435  *
1436  * @adev: amdgpu_device pointer
1437  * @fence: fence for the callback
1438  */
1439 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1440 				 struct dma_fence *fence)
1441 {
1442 	struct amdgpu_prt_cb *cb;
1443 
1444 	if (!adev->gmc.gmc_funcs->set_prt)
1445 		return;
1446 
1447 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1448 	if (!cb) {
1449 		/* Last resort when we are OOM */
1450 		if (fence)
1451 			dma_fence_wait(fence, false);
1452 
1453 		amdgpu_vm_prt_put(adev);
1454 	} else {
1455 		cb->adev = adev;
1456 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1457 						     amdgpu_vm_prt_cb))
1458 			amdgpu_vm_prt_cb(fence, &cb->cb);
1459 	}
1460 }
1461 
1462 /**
1463  * amdgpu_vm_free_mapping - free a mapping
1464  *
1465  * @adev: amdgpu_device pointer
1466  * @vm: requested vm
1467  * @mapping: mapping to be freed
1468  * @fence: fence of the unmap operation
1469  *
1470  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1471  */
1472 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1473 				   struct amdgpu_vm *vm,
1474 				   struct amdgpu_bo_va_mapping *mapping,
1475 				   struct dma_fence *fence)
1476 {
1477 	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1478 		amdgpu_vm_add_prt_cb(adev, fence);
1479 	kfree(mapping);
1480 }
1481 
1482 /**
1483  * amdgpu_vm_prt_fini - finish all prt mappings
1484  *
1485  * @adev: amdgpu_device pointer
1486  * @vm: requested vm
1487  *
1488  * Register a cleanup callback to disable PRT support after VM dies.
1489  */
1490 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1491 {
1492 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1493 	struct dma_resv_iter cursor;
1494 	struct dma_fence *fence;
1495 
1496 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1497 		/* Add a callback for each fence in the reservation object */
1498 		amdgpu_vm_prt_get(adev);
1499 		amdgpu_vm_add_prt_cb(adev, fence);
1500 	}
1501 }
1502 
1503 /**
1504  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1505  *
1506  * @adev: amdgpu_device pointer
1507  * @vm: requested vm
1508  * @fence: optional resulting fence (unchanged if no work needed to be done
1509  * or if an error occurred)
1510  *
1511  * Make sure all freed BOs are cleared in the PT.
1512  * PTs have to be reserved and mutex must be locked!
1513  *
1514  * Returns:
1515  * 0 for success.
1516  *
1517  */
1518 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1519 			  struct amdgpu_vm *vm,
1520 			  struct dma_fence **fence)
1521 {
1522 	struct amdgpu_bo_va_mapping *mapping;
1523 	struct dma_fence *f = NULL;
1524 	struct amdgpu_sync sync;
1525 	int r;
1526 
1527 
1528 	/*
1529 	 * Implicitly sync to command submissions in the same VM before
1530 	 * unmapping.
1531 	 */
1532 	amdgpu_sync_create(&sync);
1533 	r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1534 			     AMDGPU_SYNC_EQ_OWNER, vm);
1535 	if (r)
1536 		goto error_free;
1537 
1538 	while (!list_empty(&vm->freed)) {
1539 		mapping = list_first_entry(&vm->freed,
1540 			struct amdgpu_bo_va_mapping, list);
1541 		list_del(&mapping->list);
1542 
1543 		r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1544 					   &sync, mapping->start, mapping->last,
1545 					   0, 0, 0, NULL, NULL, &f);
1546 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1547 		if (r) {
1548 			dma_fence_put(f);
1549 			goto error_free;
1550 		}
1551 	}
1552 
1553 	if (fence && f) {
1554 		dma_fence_put(*fence);
1555 		*fence = f;
1556 	} else {
1557 		dma_fence_put(f);
1558 	}
1559 
1560 error_free:
1561 	amdgpu_sync_free(&sync);
1562 	return r;
1563 
1564 }
1565 
1566 /**
1567  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1568  *
1569  * @adev: amdgpu_device pointer
1570  * @vm: requested vm
1571  * @ticket: optional reservation ticket used to reserve the VM
1572  *
1573  * Make sure all BOs which are moved are updated in the PTs.
1574  *
1575  * Returns:
1576  * 0 for success.
1577  *
1578  * PTs have to be reserved!
1579  */
1580 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1581 			   struct amdgpu_vm *vm,
1582 			   struct ww_acquire_ctx *ticket)
1583 {
1584 	struct amdgpu_bo_va *bo_va;
1585 	struct dma_resv *resv;
1586 	bool clear, unlock;
1587 	int r;
1588 
1589 	spin_lock(&vm->status_lock);
1590 	while (!list_empty(&vm->moved)) {
1591 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1592 					 base.vm_status);
1593 		spin_unlock(&vm->status_lock);
1594 
1595 		/* Per VM BOs never need to bo cleared in the page tables */
1596 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1597 		if (r)
1598 			return r;
1599 		spin_lock(&vm->status_lock);
1600 	}
1601 
1602 	while (!list_empty(&vm->invalidated)) {
1603 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1604 					 base.vm_status);
1605 		resv = bo_va->base.bo->tbo.base.resv;
1606 		spin_unlock(&vm->status_lock);
1607 
1608 		/* Try to reserve the BO to avoid clearing its ptes */
1609 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
1610 			clear = false;
1611 			unlock = true;
1612 		/* The caller is already holding the reservation lock */
1613 		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1614 			clear = false;
1615 			unlock = false;
1616 		/* Somebody else is using the BO right now */
1617 		} else {
1618 			clear = true;
1619 			unlock = false;
1620 		}
1621 
1622 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1623 
1624 		if (unlock)
1625 			dma_resv_unlock(resv);
1626 		if (r)
1627 			return r;
1628 
1629 		/* Remember evicted DMABuf imports in compute VMs for later
1630 		 * validation
1631 		 */
1632 		if (vm->is_compute_context &&
1633 		    bo_va->base.bo->tbo.base.import_attach &&
1634 		    (!bo_va->base.bo->tbo.resource ||
1635 		     bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1636 			amdgpu_vm_bo_evicted_user(&bo_va->base);
1637 
1638 		spin_lock(&vm->status_lock);
1639 	}
1640 	spin_unlock(&vm->status_lock);
1641 
1642 	return 0;
1643 }
1644 
1645 /**
1646  * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1647  *
1648  * @adev: amdgpu_device pointer
1649  * @vm: requested vm
1650  * @flush_type: flush type
1651  * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1652  *
1653  * Flush TLB if needed for a compute VM.
1654  *
1655  * Returns:
1656  * 0 for success.
1657  */
1658 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1659 				struct amdgpu_vm *vm,
1660 				uint32_t flush_type,
1661 				uint32_t xcc_mask)
1662 {
1663 	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1664 	bool all_hub = false;
1665 	int xcc = 0, r = 0;
1666 
1667 	WARN_ON_ONCE(!vm->is_compute_context);
1668 
1669 	/*
1670 	 * It can be that we race and lose here, but that is extremely unlikely
1671 	 * and the worst thing which could happen is that we flush the changes
1672 	 * into the TLB once more which is harmless.
1673 	 */
1674 	if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1675 		return 0;
1676 
1677 	if (adev->family == AMDGPU_FAMILY_AI ||
1678 	    adev->family == AMDGPU_FAMILY_RV)
1679 		all_hub = true;
1680 
1681 	for_each_inst(xcc, xcc_mask) {
1682 		r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1683 						   all_hub, xcc);
1684 		if (r)
1685 			break;
1686 	}
1687 	return r;
1688 }
1689 
1690 /**
1691  * amdgpu_vm_bo_add - add a bo to a specific vm
1692  *
1693  * @adev: amdgpu_device pointer
1694  * @vm: requested vm
1695  * @bo: amdgpu buffer object
1696  *
1697  * Add @bo into the requested vm.
1698  * Add @bo to the list of bos associated with the vm
1699  *
1700  * Returns:
1701  * Newly added bo_va or NULL for failure
1702  *
1703  * Object has to be reserved!
1704  */
1705 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1706 				      struct amdgpu_vm *vm,
1707 				      struct amdgpu_bo *bo)
1708 {
1709 	struct amdgpu_bo_va *bo_va;
1710 
1711 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1712 	if (bo_va == NULL) {
1713 		return NULL;
1714 	}
1715 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1716 
1717 	bo_va->ref_count = 1;
1718 	bo_va->last_pt_update = dma_fence_get_stub();
1719 	INIT_LIST_HEAD(&bo_va->valids);
1720 	INIT_LIST_HEAD(&bo_va->invalids);
1721 
1722 	if (!bo)
1723 		return bo_va;
1724 
1725 	dma_resv_assert_held(bo->tbo.base.resv);
1726 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1727 		bo_va->is_xgmi = true;
1728 		/* Power up XGMI if it can be potentially used */
1729 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1730 	}
1731 
1732 	return bo_va;
1733 }
1734 
1735 
1736 /**
1737  * amdgpu_vm_bo_insert_map - insert a new mapping
1738  *
1739  * @adev: amdgpu_device pointer
1740  * @bo_va: bo_va to store the address
1741  * @mapping: the mapping to insert
1742  *
1743  * Insert a new mapping into all structures.
1744  */
1745 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1746 				    struct amdgpu_bo_va *bo_va,
1747 				    struct amdgpu_bo_va_mapping *mapping)
1748 {
1749 	struct amdgpu_vm *vm = bo_va->base.vm;
1750 	struct amdgpu_bo *bo = bo_va->base.bo;
1751 
1752 	mapping->bo_va = bo_va;
1753 	list_add(&mapping->list, &bo_va->invalids);
1754 	amdgpu_vm_it_insert(mapping, &vm->va);
1755 
1756 	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1757 		amdgpu_vm_prt_get(adev);
1758 
1759 	if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1760 		amdgpu_vm_bo_moved(&bo_va->base);
1761 
1762 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1763 }
1764 
1765 /* Validate operation parameters to prevent potential abuse */
1766 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1767 					  struct amdgpu_bo *bo,
1768 					  uint64_t saddr,
1769 					  uint64_t offset,
1770 					  uint64_t size)
1771 {
1772 	uint64_t tmp, lpfn;
1773 
1774 	if (saddr & AMDGPU_GPU_PAGE_MASK
1775 	    || offset & AMDGPU_GPU_PAGE_MASK
1776 	    || size & AMDGPU_GPU_PAGE_MASK)
1777 		return -EINVAL;
1778 
1779 	if (check_add_overflow(saddr, size, &tmp)
1780 	    || check_add_overflow(offset, size, &tmp)
1781 	    || size == 0 /* which also leads to end < begin */)
1782 		return -EINVAL;
1783 
1784 	/* make sure object fit at this offset */
1785 	if (bo && offset + size > amdgpu_bo_size(bo))
1786 		return -EINVAL;
1787 
1788 	/* Ensure last pfn not exceed max_pfn */
1789 	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1790 	if (lpfn >= adev->vm_manager.max_pfn)
1791 		return -EINVAL;
1792 
1793 	return 0;
1794 }
1795 
1796 /**
1797  * amdgpu_vm_bo_map - map bo inside a vm
1798  *
1799  * @adev: amdgpu_device pointer
1800  * @bo_va: bo_va to store the address
1801  * @saddr: where to map the BO
1802  * @offset: requested offset in the BO
1803  * @size: BO size in bytes
1804  * @flags: attributes of pages (read/write/valid/etc.)
1805  *
1806  * Add a mapping of the BO at the specefied addr into the VM.
1807  *
1808  * Returns:
1809  * 0 for success, error for failure.
1810  *
1811  * Object has to be reserved and unreserved outside!
1812  */
1813 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1814 		     struct amdgpu_bo_va *bo_va,
1815 		     uint64_t saddr, uint64_t offset,
1816 		     uint64_t size, uint64_t flags)
1817 {
1818 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1819 	struct amdgpu_bo *bo = bo_va->base.bo;
1820 	struct amdgpu_vm *vm = bo_va->base.vm;
1821 	uint64_t eaddr;
1822 	int r;
1823 
1824 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1825 	if (r)
1826 		return r;
1827 
1828 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1829 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1830 
1831 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1832 	if (tmp) {
1833 		/* bo and tmp overlap, invalid addr */
1834 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1835 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1836 			tmp->start, tmp->last + 1);
1837 		return -EINVAL;
1838 	}
1839 
1840 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1841 	if (!mapping)
1842 		return -ENOMEM;
1843 
1844 	mapping->start = saddr;
1845 	mapping->last = eaddr;
1846 	mapping->offset = offset;
1847 	mapping->flags = flags;
1848 
1849 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1850 
1851 	return 0;
1852 }
1853 
1854 /**
1855  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1856  *
1857  * @adev: amdgpu_device pointer
1858  * @bo_va: bo_va to store the address
1859  * @saddr: where to map the BO
1860  * @offset: requested offset in the BO
1861  * @size: BO size in bytes
1862  * @flags: attributes of pages (read/write/valid/etc.)
1863  *
1864  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1865  * mappings as we do so.
1866  *
1867  * Returns:
1868  * 0 for success, error for failure.
1869  *
1870  * Object has to be reserved and unreserved outside!
1871  */
1872 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1873 			     struct amdgpu_bo_va *bo_va,
1874 			     uint64_t saddr, uint64_t offset,
1875 			     uint64_t size, uint64_t flags)
1876 {
1877 	struct amdgpu_bo_va_mapping *mapping;
1878 	struct amdgpu_bo *bo = bo_va->base.bo;
1879 	uint64_t eaddr;
1880 	int r;
1881 
1882 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1883 	if (r)
1884 		return r;
1885 
1886 	/* Allocate all the needed memory */
1887 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1888 	if (!mapping)
1889 		return -ENOMEM;
1890 
1891 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1892 	if (r) {
1893 		kfree(mapping);
1894 		return r;
1895 	}
1896 
1897 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1898 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1899 
1900 	mapping->start = saddr;
1901 	mapping->last = eaddr;
1902 	mapping->offset = offset;
1903 	mapping->flags = flags;
1904 
1905 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1906 
1907 	return 0;
1908 }
1909 
1910 /**
1911  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1912  *
1913  * @adev: amdgpu_device pointer
1914  * @bo_va: bo_va to remove the address from
1915  * @saddr: where to the BO is mapped
1916  *
1917  * Remove a mapping of the BO at the specefied addr from the VM.
1918  *
1919  * Returns:
1920  * 0 for success, error for failure.
1921  *
1922  * Object has to be reserved and unreserved outside!
1923  */
1924 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1925 		       struct amdgpu_bo_va *bo_va,
1926 		       uint64_t saddr)
1927 {
1928 	struct amdgpu_bo_va_mapping *mapping;
1929 	struct amdgpu_vm *vm = bo_va->base.vm;
1930 	bool valid = true;
1931 
1932 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1933 
1934 	list_for_each_entry(mapping, &bo_va->valids, list) {
1935 		if (mapping->start == saddr)
1936 			break;
1937 	}
1938 
1939 	if (&mapping->list == &bo_va->valids) {
1940 		valid = false;
1941 
1942 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1943 			if (mapping->start == saddr)
1944 				break;
1945 		}
1946 
1947 		if (&mapping->list == &bo_va->invalids)
1948 			return -ENOENT;
1949 	}
1950 
1951 	list_del(&mapping->list);
1952 	amdgpu_vm_it_remove(mapping, &vm->va);
1953 	mapping->bo_va = NULL;
1954 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1955 
1956 	if (valid)
1957 		list_add(&mapping->list, &vm->freed);
1958 	else
1959 		amdgpu_vm_free_mapping(adev, vm, mapping,
1960 				       bo_va->last_pt_update);
1961 
1962 	return 0;
1963 }
1964 
1965 /**
1966  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1967  *
1968  * @adev: amdgpu_device pointer
1969  * @vm: VM structure to use
1970  * @saddr: start of the range
1971  * @size: size of the range
1972  *
1973  * Remove all mappings in a range, split them as appropriate.
1974  *
1975  * Returns:
1976  * 0 for success, error for failure.
1977  */
1978 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1979 				struct amdgpu_vm *vm,
1980 				uint64_t saddr, uint64_t size)
1981 {
1982 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1983 	LIST_HEAD(removed);
1984 	uint64_t eaddr;
1985 	int r;
1986 
1987 	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
1988 	if (r)
1989 		return r;
1990 
1991 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1992 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1993 
1994 	/* Allocate all the needed memory */
1995 	before = kzalloc(sizeof(*before), GFP_KERNEL);
1996 	if (!before)
1997 		return -ENOMEM;
1998 	INIT_LIST_HEAD(&before->list);
1999 
2000 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2001 	if (!after) {
2002 		kfree(before);
2003 		return -ENOMEM;
2004 	}
2005 	INIT_LIST_HEAD(&after->list);
2006 
2007 	/* Now gather all removed mappings */
2008 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2009 	while (tmp) {
2010 		/* Remember mapping split at the start */
2011 		if (tmp->start < saddr) {
2012 			before->start = tmp->start;
2013 			before->last = saddr - 1;
2014 			before->offset = tmp->offset;
2015 			before->flags = tmp->flags;
2016 			before->bo_va = tmp->bo_va;
2017 			list_add(&before->list, &tmp->bo_va->invalids);
2018 		}
2019 
2020 		/* Remember mapping split at the end */
2021 		if (tmp->last > eaddr) {
2022 			after->start = eaddr + 1;
2023 			after->last = tmp->last;
2024 			after->offset = tmp->offset;
2025 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2026 			after->flags = tmp->flags;
2027 			after->bo_va = tmp->bo_va;
2028 			list_add(&after->list, &tmp->bo_va->invalids);
2029 		}
2030 
2031 		list_del(&tmp->list);
2032 		list_add(&tmp->list, &removed);
2033 
2034 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2035 	}
2036 
2037 	/* And free them up */
2038 	list_for_each_entry_safe(tmp, next, &removed, list) {
2039 		amdgpu_vm_it_remove(tmp, &vm->va);
2040 		list_del(&tmp->list);
2041 
2042 		if (tmp->start < saddr)
2043 		    tmp->start = saddr;
2044 		if (tmp->last > eaddr)
2045 		    tmp->last = eaddr;
2046 
2047 		tmp->bo_va = NULL;
2048 		list_add(&tmp->list, &vm->freed);
2049 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2050 	}
2051 
2052 	/* Insert partial mapping before the range */
2053 	if (!list_empty(&before->list)) {
2054 		struct amdgpu_bo *bo = before->bo_va->base.bo;
2055 
2056 		amdgpu_vm_it_insert(before, &vm->va);
2057 		if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
2058 			amdgpu_vm_prt_get(adev);
2059 
2060 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2061 		    !before->bo_va->base.moved)
2062 			amdgpu_vm_bo_moved(&before->bo_va->base);
2063 	} else {
2064 		kfree(before);
2065 	}
2066 
2067 	/* Insert partial mapping after the range */
2068 	if (!list_empty(&after->list)) {
2069 		struct amdgpu_bo *bo = after->bo_va->base.bo;
2070 
2071 		amdgpu_vm_it_insert(after, &vm->va);
2072 		if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
2073 			amdgpu_vm_prt_get(adev);
2074 
2075 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2076 		    !after->bo_va->base.moved)
2077 			amdgpu_vm_bo_moved(&after->bo_va->base);
2078 	} else {
2079 		kfree(after);
2080 	}
2081 
2082 	return 0;
2083 }
2084 
2085 /**
2086  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2087  *
2088  * @vm: the requested VM
2089  * @addr: the address
2090  *
2091  * Find a mapping by it's address.
2092  *
2093  * Returns:
2094  * The amdgpu_bo_va_mapping matching for addr or NULL
2095  *
2096  */
2097 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2098 							 uint64_t addr)
2099 {
2100 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2101 }
2102 
2103 /**
2104  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2105  *
2106  * @vm: the requested vm
2107  * @ticket: CS ticket
2108  *
2109  * Trace all mappings of BOs reserved during a command submission.
2110  */
2111 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2112 {
2113 	struct amdgpu_bo_va_mapping *mapping;
2114 
2115 	if (!trace_amdgpu_vm_bo_cs_enabled())
2116 		return;
2117 
2118 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2119 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2120 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2121 			struct amdgpu_bo *bo;
2122 
2123 			bo = mapping->bo_va->base.bo;
2124 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2125 			    ticket)
2126 				continue;
2127 		}
2128 
2129 		trace_amdgpu_vm_bo_cs(mapping);
2130 	}
2131 }
2132 
2133 /**
2134  * amdgpu_vm_bo_del - remove a bo from a specific vm
2135  *
2136  * @adev: amdgpu_device pointer
2137  * @bo_va: requested bo_va
2138  *
2139  * Remove @bo_va->bo from the requested vm.
2140  *
2141  * Object have to be reserved!
2142  */
2143 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2144 		      struct amdgpu_bo_va *bo_va)
2145 {
2146 	struct amdgpu_bo_va_mapping *mapping, *next;
2147 	struct amdgpu_bo *bo = bo_va->base.bo;
2148 	struct amdgpu_vm *vm = bo_va->base.vm;
2149 	struct amdgpu_vm_bo_base **base;
2150 
2151 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2152 
2153 	if (bo) {
2154 		dma_resv_assert_held(bo->tbo.base.resv);
2155 		if (amdgpu_vm_is_bo_always_valid(vm, bo))
2156 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
2157 
2158 		for (base = &bo_va->base.bo->vm_bo; *base;
2159 		     base = &(*base)->next) {
2160 			if (*base != &bo_va->base)
2161 				continue;
2162 
2163 			amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
2164 			*base = bo_va->base.next;
2165 			break;
2166 		}
2167 	}
2168 
2169 	spin_lock(&vm->status_lock);
2170 	list_del(&bo_va->base.vm_status);
2171 	spin_unlock(&vm->status_lock);
2172 
2173 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2174 		list_del(&mapping->list);
2175 		amdgpu_vm_it_remove(mapping, &vm->va);
2176 		mapping->bo_va = NULL;
2177 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2178 		list_add(&mapping->list, &vm->freed);
2179 	}
2180 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2181 		list_del(&mapping->list);
2182 		amdgpu_vm_it_remove(mapping, &vm->va);
2183 		amdgpu_vm_free_mapping(adev, vm, mapping,
2184 				       bo_va->last_pt_update);
2185 	}
2186 
2187 	dma_fence_put(bo_va->last_pt_update);
2188 
2189 	if (bo && bo_va->is_xgmi)
2190 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2191 
2192 	kfree(bo_va);
2193 }
2194 
2195 /**
2196  * amdgpu_vm_evictable - check if we can evict a VM
2197  *
2198  * @bo: A page table of the VM.
2199  *
2200  * Check if it is possible to evict a VM.
2201  */
2202 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2203 {
2204 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2205 
2206 	/* Page tables of a destroyed VM can go away immediately */
2207 	if (!bo_base || !bo_base->vm)
2208 		return true;
2209 
2210 	/* Don't evict VM page tables while they are busy */
2211 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2212 		return false;
2213 
2214 	/* Try to block ongoing updates */
2215 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2216 		return false;
2217 
2218 	/* Don't evict VM page tables while they are updated */
2219 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2220 		amdgpu_vm_eviction_unlock(bo_base->vm);
2221 		return false;
2222 	}
2223 
2224 	bo_base->vm->evicting = true;
2225 	amdgpu_vm_eviction_unlock(bo_base->vm);
2226 	return true;
2227 }
2228 
2229 /**
2230  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2231  *
2232  * @bo: amdgpu buffer object
2233  * @evicted: is the BO evicted
2234  *
2235  * Mark @bo as invalid.
2236  */
2237 void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
2238 {
2239 	struct amdgpu_vm_bo_base *bo_base;
2240 
2241 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2242 		struct amdgpu_vm *vm = bo_base->vm;
2243 
2244 		if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2245 			amdgpu_vm_bo_evicted(bo_base);
2246 			continue;
2247 		}
2248 
2249 		if (bo_base->moved)
2250 			continue;
2251 		bo_base->moved = true;
2252 
2253 		if (bo->tbo.type == ttm_bo_type_kernel)
2254 			amdgpu_vm_bo_relocated(bo_base);
2255 		else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2256 			amdgpu_vm_bo_moved(bo_base);
2257 		else
2258 			amdgpu_vm_bo_invalidated(bo_base);
2259 	}
2260 }
2261 
2262 /**
2263  * amdgpu_vm_bo_move - handle BO move
2264  *
2265  * @bo: amdgpu buffer object
2266  * @new_mem: the new placement of the BO move
2267  * @evicted: is the BO evicted
2268  *
2269  * Update the memory stats for the new placement and mark @bo as invalid.
2270  */
2271 void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
2272 		       bool evicted)
2273 {
2274 	struct amdgpu_vm_bo_base *bo_base;
2275 
2276 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2277 		struct amdgpu_vm *vm = bo_base->vm;
2278 
2279 		spin_lock(&vm->status_lock);
2280 		amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
2281 		amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
2282 		spin_unlock(&vm->status_lock);
2283 	}
2284 
2285 	amdgpu_vm_bo_invalidate(bo, evicted);
2286 }
2287 
2288 /**
2289  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2290  *
2291  * @vm_size: VM size
2292  *
2293  * Returns:
2294  * VM page table as power of two
2295  */
2296 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2297 {
2298 	/* Total bits covered by PD + PTs */
2299 	unsigned bits = ilog2(vm_size) + 18;
2300 
2301 	/* Make sure the PD is 4K in size up to 8GB address space.
2302 	   Above that split equal between PD and PTs */
2303 	if (vm_size <= 8)
2304 		return (bits - 9);
2305 	else
2306 		return ((bits + 3) / 2);
2307 }
2308 
2309 /**
2310  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2311  *
2312  * @adev: amdgpu_device pointer
2313  * @min_vm_size: the minimum vm size in GB if it's set auto
2314  * @fragment_size_default: Default PTE fragment size
2315  * @max_level: max VMPT level
2316  * @max_bits: max address space size in bits
2317  *
2318  */
2319 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2320 			   uint32_t fragment_size_default, unsigned max_level,
2321 			   unsigned max_bits)
2322 {
2323 	unsigned int max_size = 1 << (max_bits - 30);
2324 	unsigned int vm_size;
2325 	uint64_t tmp;
2326 
2327 	/* adjust vm size first */
2328 	if (amdgpu_vm_size != -1) {
2329 		vm_size = amdgpu_vm_size;
2330 		if (vm_size > max_size) {
2331 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2332 				 amdgpu_vm_size, max_size);
2333 			vm_size = max_size;
2334 		}
2335 	} else {
2336 		struct sysinfo si;
2337 		unsigned int phys_ram_gb;
2338 
2339 		/* Optimal VM size depends on the amount of physical
2340 		 * RAM available. Underlying requirements and
2341 		 * assumptions:
2342 		 *
2343 		 *  - Need to map system memory and VRAM from all GPUs
2344 		 *     - VRAM from other GPUs not known here
2345 		 *     - Assume VRAM <= system memory
2346 		 *  - On GFX8 and older, VM space can be segmented for
2347 		 *    different MTYPEs
2348 		 *  - Need to allow room for fragmentation, guard pages etc.
2349 		 *
2350 		 * This adds up to a rough guess of system memory x3.
2351 		 * Round up to power of two to maximize the available
2352 		 * VM size with the given page table size.
2353 		 */
2354 		si_meminfo(&si);
2355 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2356 			       (1 << 30) - 1) >> 30;
2357 		vm_size = roundup_pow_of_two(
2358 			clamp(phys_ram_gb * 3, min_vm_size, max_size));
2359 	}
2360 
2361 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2362 
2363 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2364 	if (amdgpu_vm_block_size != -1)
2365 		tmp >>= amdgpu_vm_block_size - 9;
2366 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2367 	adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2368 	switch (adev->vm_manager.num_level) {
2369 	case 3:
2370 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2371 		break;
2372 	case 2:
2373 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2374 		break;
2375 	case 1:
2376 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2377 		break;
2378 	default:
2379 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2380 	}
2381 	/* block size depends on vm size and hw setup*/
2382 	if (amdgpu_vm_block_size != -1)
2383 		adev->vm_manager.block_size =
2384 			min((unsigned)amdgpu_vm_block_size, max_bits
2385 			    - AMDGPU_GPU_PAGE_SHIFT
2386 			    - 9 * adev->vm_manager.num_level);
2387 	else if (adev->vm_manager.num_level > 1)
2388 		adev->vm_manager.block_size = 9;
2389 	else
2390 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2391 
2392 	if (amdgpu_vm_fragment_size == -1)
2393 		adev->vm_manager.fragment_size = fragment_size_default;
2394 	else
2395 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2396 
2397 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2398 		 vm_size, adev->vm_manager.num_level + 1,
2399 		 adev->vm_manager.block_size,
2400 		 adev->vm_manager.fragment_size);
2401 }
2402 
2403 /**
2404  * amdgpu_vm_wait_idle - wait for the VM to become idle
2405  *
2406  * @vm: VM object to wait for
2407  * @timeout: timeout to wait for VM to become idle
2408  */
2409 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2410 {
2411 	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2412 					DMA_RESV_USAGE_BOOKKEEP,
2413 					true, timeout);
2414 	if (timeout <= 0)
2415 		return timeout;
2416 
2417 	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2418 }
2419 
2420 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2421 {
2422 	struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2423 
2424 	kfree(ti);
2425 }
2426 
2427 static inline struct amdgpu_vm *
2428 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2429 {
2430 	struct amdgpu_vm *vm;
2431 	unsigned long flags;
2432 
2433 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2434 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2435 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2436 
2437 	return vm;
2438 }
2439 
2440 /**
2441  * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2442  *
2443  * @task_info: task_info struct under discussion.
2444  *
2445  * frees the vm task_info ptr at the last put
2446  */
2447 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2448 {
2449 	kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2450 }
2451 
2452 /**
2453  * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2454  *
2455  * @vm: VM to get info from
2456  *
2457  * Returns the reference counted task_info structure, which must be
2458  * referenced down with amdgpu_vm_put_task_info.
2459  */
2460 struct amdgpu_task_info *
2461 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2462 {
2463 	struct amdgpu_task_info *ti = NULL;
2464 
2465 	if (vm) {
2466 		ti = vm->task_info;
2467 		kref_get(&vm->task_info->refcount);
2468 	}
2469 
2470 	return ti;
2471 }
2472 
2473 /**
2474  * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2475  *
2476  * @adev: drm device pointer
2477  * @pasid: PASID identifier for VM
2478  *
2479  * Returns the reference counted task_info structure, which must be
2480  * referenced down with amdgpu_vm_put_task_info.
2481  */
2482 struct amdgpu_task_info *
2483 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2484 {
2485 	return amdgpu_vm_get_task_info_vm(
2486 			amdgpu_vm_get_vm_from_pasid(adev, pasid));
2487 }
2488 
2489 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2490 {
2491 	vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2492 	if (!vm->task_info)
2493 		return -ENOMEM;
2494 
2495 	kref_init(&vm->task_info->refcount);
2496 	return 0;
2497 }
2498 
2499 /**
2500  * amdgpu_vm_set_task_info - Sets VMs task info.
2501  *
2502  * @vm: vm for which to set the info
2503  */
2504 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2505 {
2506 	if (!vm->task_info)
2507 		return;
2508 
2509 	if (vm->task_info->pid == current->pid)
2510 		return;
2511 
2512 	vm->task_info->pid = current->pid;
2513 	get_task_comm(vm->task_info->task_name, current);
2514 
2515 	if (current->group_leader->mm != current->mm)
2516 		return;
2517 
2518 	vm->task_info->tgid = current->group_leader->pid;
2519 	get_task_comm(vm->task_info->process_name, current->group_leader);
2520 }
2521 
2522 /**
2523  * amdgpu_vm_init - initialize a vm instance
2524  *
2525  * @adev: amdgpu_device pointer
2526  * @vm: requested vm
2527  * @xcp_id: GPU partition selection id
2528  *
2529  * Init @vm fields.
2530  *
2531  * Returns:
2532  * 0 for success, error for failure.
2533  */
2534 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2535 		   int32_t xcp_id)
2536 {
2537 	struct amdgpu_bo *root_bo;
2538 	struct amdgpu_bo_vm *root;
2539 	int r, i;
2540 
2541 	vm->va = RB_ROOT_CACHED;
2542 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2543 		vm->reserved_vmid[i] = NULL;
2544 	INIT_LIST_HEAD(&vm->evicted);
2545 	INIT_LIST_HEAD(&vm->evicted_user);
2546 	INIT_LIST_HEAD(&vm->relocated);
2547 	INIT_LIST_HEAD(&vm->moved);
2548 	INIT_LIST_HEAD(&vm->idle);
2549 	INIT_LIST_HEAD(&vm->invalidated);
2550 	spin_lock_init(&vm->status_lock);
2551 	INIT_LIST_HEAD(&vm->freed);
2552 	INIT_LIST_HEAD(&vm->done);
2553 	INIT_KFIFO(vm->faults);
2554 
2555 	r = amdgpu_vm_init_entities(adev, vm);
2556 	if (r)
2557 		return r;
2558 
2559 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2560 
2561 	vm->is_compute_context = false;
2562 
2563 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2564 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2565 
2566 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2567 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2568 	WARN_ONCE((vm->use_cpu_for_update &&
2569 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2570 		  "CPU update of VM recommended only for large BAR system\n");
2571 
2572 	if (vm->use_cpu_for_update)
2573 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2574 	else
2575 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2576 
2577 	vm->last_update = dma_fence_get_stub();
2578 	vm->last_unlocked = dma_fence_get_stub();
2579 	vm->last_tlb_flush = dma_fence_get_stub();
2580 	vm->generation = amdgpu_vm_generation(adev, NULL);
2581 
2582 	mutex_init(&vm->eviction_lock);
2583 	vm->evicting = false;
2584 	vm->tlb_fence_context = dma_fence_context_alloc(1);
2585 
2586 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2587 				false, &root, xcp_id);
2588 	if (r)
2589 		goto error_free_delayed;
2590 
2591 	root_bo = amdgpu_bo_ref(&root->bo);
2592 	r = amdgpu_bo_reserve(root_bo, true);
2593 	if (r) {
2594 		amdgpu_bo_unref(&root_bo);
2595 		goto error_free_delayed;
2596 	}
2597 
2598 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2599 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2600 	if (r)
2601 		goto error_free_root;
2602 
2603 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2604 	if (r)
2605 		goto error_free_root;
2606 
2607 	r = amdgpu_vm_create_task_info(vm);
2608 	if (r)
2609 		DRM_DEBUG("Failed to create task info for VM\n");
2610 
2611 	amdgpu_bo_unreserve(vm->root.bo);
2612 	amdgpu_bo_unref(&root_bo);
2613 
2614 	return 0;
2615 
2616 error_free_root:
2617 	amdgpu_vm_pt_free_root(adev, vm);
2618 	amdgpu_bo_unreserve(vm->root.bo);
2619 	amdgpu_bo_unref(&root_bo);
2620 
2621 error_free_delayed:
2622 	dma_fence_put(vm->last_tlb_flush);
2623 	dma_fence_put(vm->last_unlocked);
2624 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2625 	amdgpu_vm_fini_entities(vm);
2626 
2627 	return r;
2628 }
2629 
2630 /**
2631  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2632  *
2633  * @adev: amdgpu_device pointer
2634  * @vm: requested vm
2635  *
2636  * This only works on GFX VMs that don't have any BOs added and no
2637  * page tables allocated yet.
2638  *
2639  * Changes the following VM parameters:
2640  * - use_cpu_for_update
2641  * - pte_supports_ats
2642  *
2643  * Reinitializes the page directory to reflect the changed ATS
2644  * setting.
2645  *
2646  * Returns:
2647  * 0 for success, -errno for errors.
2648  */
2649 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2650 {
2651 	int r;
2652 
2653 	r = amdgpu_bo_reserve(vm->root.bo, true);
2654 	if (r)
2655 		return r;
2656 
2657 	/* Update VM state */
2658 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2659 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2660 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2661 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2662 	WARN_ONCE((vm->use_cpu_for_update &&
2663 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2664 		  "CPU update of VM recommended only for large BAR system\n");
2665 
2666 	if (vm->use_cpu_for_update) {
2667 		/* Sync with last SDMA update/clear before switching to CPU */
2668 		r = amdgpu_bo_sync_wait(vm->root.bo,
2669 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2670 		if (r)
2671 			goto unreserve_bo;
2672 
2673 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2674 		r = amdgpu_vm_pt_map_tables(adev, vm);
2675 		if (r)
2676 			goto unreserve_bo;
2677 
2678 	} else {
2679 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2680 	}
2681 
2682 	dma_fence_put(vm->last_update);
2683 	vm->last_update = dma_fence_get_stub();
2684 	vm->is_compute_context = true;
2685 
2686 unreserve_bo:
2687 	amdgpu_bo_unreserve(vm->root.bo);
2688 	return r;
2689 }
2690 
2691 static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
2692 {
2693 	for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
2694 		if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
2695 		      vm->stats[i].evicted == 0))
2696 			return false;
2697 	}
2698 	return true;
2699 }
2700 
2701 /**
2702  * amdgpu_vm_fini - tear down a vm instance
2703  *
2704  * @adev: amdgpu_device pointer
2705  * @vm: requested vm
2706  *
2707  * Tear down @vm.
2708  * Unbind the VM and remove all bos from the vm bo list
2709  */
2710 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2711 {
2712 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2713 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2714 	struct amdgpu_bo *root;
2715 	unsigned long flags;
2716 	int i;
2717 
2718 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2719 
2720 	root = amdgpu_bo_ref(vm->root.bo);
2721 	amdgpu_bo_reserve(root, true);
2722 	amdgpu_vm_set_pasid(adev, vm, 0);
2723 	dma_fence_wait(vm->last_unlocked, false);
2724 	dma_fence_put(vm->last_unlocked);
2725 	dma_fence_wait(vm->last_tlb_flush, false);
2726 	/* Make sure that all fence callbacks have completed */
2727 	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2728 	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2729 	dma_fence_put(vm->last_tlb_flush);
2730 
2731 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2732 		if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
2733 			amdgpu_vm_prt_fini(adev, vm);
2734 			prt_fini_needed = false;
2735 		}
2736 
2737 		list_del(&mapping->list);
2738 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2739 	}
2740 
2741 	amdgpu_vm_pt_free_root(adev, vm);
2742 	amdgpu_bo_unreserve(root);
2743 	amdgpu_bo_unref(&root);
2744 	WARN_ON(vm->root.bo);
2745 
2746 	amdgpu_vm_fini_entities(vm);
2747 
2748 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2749 		dev_err(adev->dev, "still active bo inside vm\n");
2750 	}
2751 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2752 					     &vm->va.rb_root, rb) {
2753 		/* Don't remove the mapping here, we don't want to trigger a
2754 		 * rebalance and the tree is about to be destroyed anyway.
2755 		 */
2756 		list_del(&mapping->list);
2757 		kfree(mapping);
2758 	}
2759 
2760 	dma_fence_put(vm->last_update);
2761 
2762 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2763 		if (vm->reserved_vmid[i]) {
2764 			amdgpu_vmid_free_reserved(adev, i);
2765 			vm->reserved_vmid[i] = false;
2766 		}
2767 	}
2768 
2769 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2770 
2771 	if (!amdgpu_vm_stats_is_zero(vm)) {
2772 		struct amdgpu_task_info *ti = vm->task_info;
2773 
2774 		dev_warn(adev->dev,
2775 			 "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
2776 			 ti->process_name, ti->pid, ti->task_name, ti->tgid);
2777 	}
2778 
2779 	amdgpu_vm_put_task_info(vm->task_info);
2780 }
2781 
2782 /**
2783  * amdgpu_vm_manager_init - init the VM manager
2784  *
2785  * @adev: amdgpu_device pointer
2786  *
2787  * Initialize the VM manager structures
2788  */
2789 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2790 {
2791 	unsigned i;
2792 
2793 	/* Concurrent flushes are only possible starting with Vega10 and
2794 	 * are broken on Navi10 and Navi14.
2795 	 */
2796 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2797 					      adev->asic_type == CHIP_NAVI10 ||
2798 					      adev->asic_type == CHIP_NAVI14);
2799 	amdgpu_vmid_mgr_init(adev);
2800 
2801 	adev->vm_manager.fence_context =
2802 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2803 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2804 		adev->vm_manager.seqno[i] = 0;
2805 
2806 	spin_lock_init(&adev->vm_manager.prt_lock);
2807 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2808 
2809 	/* If not overridden by the user, by default, only in large BAR systems
2810 	 * Compute VM tables will be updated by CPU
2811 	 */
2812 #ifdef CONFIG_X86_64
2813 	if (amdgpu_vm_update_mode == -1) {
2814 		/* For asic with VF MMIO access protection
2815 		 * avoid using CPU for VM table updates
2816 		 */
2817 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2818 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2819 			adev->vm_manager.vm_update_mode =
2820 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2821 		else
2822 			adev->vm_manager.vm_update_mode = 0;
2823 	} else
2824 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2825 #else
2826 	adev->vm_manager.vm_update_mode = 0;
2827 #endif
2828 
2829 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2830 }
2831 
2832 /**
2833  * amdgpu_vm_manager_fini - cleanup VM manager
2834  *
2835  * @adev: amdgpu_device pointer
2836  *
2837  * Cleanup the VM manager and free resources.
2838  */
2839 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2840 {
2841 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2842 	xa_destroy(&adev->vm_manager.pasids);
2843 
2844 	amdgpu_vmid_mgr_fini(adev);
2845 }
2846 
2847 /**
2848  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2849  *
2850  * @dev: drm device pointer
2851  * @data: drm_amdgpu_vm
2852  * @filp: drm file pointer
2853  *
2854  * Returns:
2855  * 0 for success, -errno for errors.
2856  */
2857 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2858 {
2859 	union drm_amdgpu_vm *args = data;
2860 	struct amdgpu_device *adev = drm_to_adev(dev);
2861 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2862 
2863 	/* No valid flags defined yet */
2864 	if (args->in.flags)
2865 		return -EINVAL;
2866 
2867 	switch (args->in.op) {
2868 	case AMDGPU_VM_OP_RESERVE_VMID:
2869 		/* We only have requirement to reserve vmid from gfxhub */
2870 		if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2871 			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2872 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2873 		}
2874 
2875 		break;
2876 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2877 		if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2878 			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2879 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2880 		}
2881 		break;
2882 	default:
2883 		return -EINVAL;
2884 	}
2885 
2886 	return 0;
2887 }
2888 
2889 /**
2890  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2891  * @adev: amdgpu device pointer
2892  * @pasid: PASID of the VM
2893  * @ts: Timestamp of the fault
2894  * @vmid: VMID, only used for GFX 9.4.3.
2895  * @node_id: Node_id received in IH cookie. Only applicable for
2896  *           GFX 9.4.3.
2897  * @addr: Address of the fault
2898  * @write_fault: true is write fault, false is read fault
2899  *
2900  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2901  * shouldn't be reported any more.
2902  */
2903 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2904 			    u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
2905 			    bool write_fault)
2906 {
2907 	bool is_compute_context = false;
2908 	struct amdgpu_bo *root;
2909 	unsigned long irqflags;
2910 	uint64_t value, flags;
2911 	struct amdgpu_vm *vm;
2912 	int r;
2913 
2914 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2915 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2916 	if (vm) {
2917 		root = amdgpu_bo_ref(vm->root.bo);
2918 		is_compute_context = vm->is_compute_context;
2919 	} else {
2920 		root = NULL;
2921 	}
2922 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2923 
2924 	if (!root)
2925 		return false;
2926 
2927 	addr /= AMDGPU_GPU_PAGE_SIZE;
2928 
2929 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2930 	    node_id, addr, ts, write_fault)) {
2931 		amdgpu_bo_unref(&root);
2932 		return true;
2933 	}
2934 
2935 	r = amdgpu_bo_reserve(root, true);
2936 	if (r)
2937 		goto error_unref;
2938 
2939 	/* Double check that the VM still exists */
2940 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2941 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2942 	if (vm && vm->root.bo != root)
2943 		vm = NULL;
2944 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2945 	if (!vm)
2946 		goto error_unlock;
2947 
2948 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2949 		AMDGPU_PTE_SYSTEM;
2950 
2951 	if (is_compute_context) {
2952 		/* Intentionally setting invalid PTE flag
2953 		 * combination to force a no-retry-fault
2954 		 */
2955 		flags = AMDGPU_VM_NORETRY_FLAGS;
2956 		value = 0;
2957 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2958 		/* Redirect the access to the dummy page */
2959 		value = adev->dummy_page_addr;
2960 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2961 			AMDGPU_PTE_WRITEABLE;
2962 
2963 	} else {
2964 		/* Let the hw retry silently on the PTE */
2965 		value = 0;
2966 	}
2967 
2968 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2969 	if (r) {
2970 		pr_debug("failed %d to reserve fence slot\n", r);
2971 		goto error_unlock;
2972 	}
2973 
2974 	r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2975 				   NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
2976 	if (r)
2977 		goto error_unlock;
2978 
2979 	r = amdgpu_vm_update_pdes(adev, vm, true);
2980 
2981 error_unlock:
2982 	amdgpu_bo_unreserve(root);
2983 	if (r < 0)
2984 		DRM_ERROR("Can't handle page fault (%d)\n", r);
2985 
2986 error_unref:
2987 	amdgpu_bo_unref(&root);
2988 
2989 	return false;
2990 }
2991 
2992 #if defined(CONFIG_DEBUG_FS)
2993 /**
2994  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
2995  *
2996  * @vm: Requested VM for printing BO info
2997  * @m: debugfs file
2998  *
2999  * Print BO information in debugfs file for the VM
3000  */
3001 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
3002 {
3003 	struct amdgpu_bo_va *bo_va, *tmp;
3004 	u64 total_idle = 0;
3005 	u64 total_evicted = 0;
3006 	u64 total_relocated = 0;
3007 	u64 total_moved = 0;
3008 	u64 total_invalidated = 0;
3009 	u64 total_done = 0;
3010 	unsigned int total_idle_objs = 0;
3011 	unsigned int total_evicted_objs = 0;
3012 	unsigned int total_relocated_objs = 0;
3013 	unsigned int total_moved_objs = 0;
3014 	unsigned int total_invalidated_objs = 0;
3015 	unsigned int total_done_objs = 0;
3016 	unsigned int id = 0;
3017 
3018 	spin_lock(&vm->status_lock);
3019 	seq_puts(m, "\tIdle BOs:\n");
3020 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3021 		if (!bo_va->base.bo)
3022 			continue;
3023 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3024 	}
3025 	total_idle_objs = id;
3026 	id = 0;
3027 
3028 	seq_puts(m, "\tEvicted BOs:\n");
3029 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3030 		if (!bo_va->base.bo)
3031 			continue;
3032 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3033 	}
3034 	total_evicted_objs = id;
3035 	id = 0;
3036 
3037 	seq_puts(m, "\tRelocated BOs:\n");
3038 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3039 		if (!bo_va->base.bo)
3040 			continue;
3041 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3042 	}
3043 	total_relocated_objs = id;
3044 	id = 0;
3045 
3046 	seq_puts(m, "\tMoved BOs:\n");
3047 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3048 		if (!bo_va->base.bo)
3049 			continue;
3050 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3051 	}
3052 	total_moved_objs = id;
3053 	id = 0;
3054 
3055 	seq_puts(m, "\tInvalidated BOs:\n");
3056 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3057 		if (!bo_va->base.bo)
3058 			continue;
3059 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
3060 	}
3061 	total_invalidated_objs = id;
3062 	id = 0;
3063 
3064 	seq_puts(m, "\tDone BOs:\n");
3065 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3066 		if (!bo_va->base.bo)
3067 			continue;
3068 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3069 	}
3070 	spin_unlock(&vm->status_lock);
3071 	total_done_objs = id;
3072 
3073 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
3074 		   total_idle_objs);
3075 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
3076 		   total_evicted_objs);
3077 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
3078 		   total_relocated_objs);
3079 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
3080 		   total_moved_objs);
3081 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
3082 		   total_invalidated_objs);
3083 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
3084 		   total_done_objs);
3085 }
3086 #endif
3087 
3088 /**
3089  * amdgpu_vm_update_fault_cache - update cached fault into.
3090  * @adev: amdgpu device pointer
3091  * @pasid: PASID of the VM
3092  * @addr: Address of the fault
3093  * @status: GPUVM fault status register
3094  * @vmhub: which vmhub got the fault
3095  *
3096  * Cache the fault info for later use by userspace in debugging.
3097  */
3098 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
3099 				  unsigned int pasid,
3100 				  uint64_t addr,
3101 				  uint32_t status,
3102 				  unsigned int vmhub)
3103 {
3104 	struct amdgpu_vm *vm;
3105 	unsigned long flags;
3106 
3107 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3108 
3109 	vm = xa_load(&adev->vm_manager.pasids, pasid);
3110 	/* Don't update the fault cache if status is 0.  In the multiple
3111 	 * fault case, subsequent faults will return a 0 status which is
3112 	 * useless for userspace and replaces the useful fault status, so
3113 	 * only update if status is non-0.
3114 	 */
3115 	if (vm && status) {
3116 		vm->fault_info.addr = addr;
3117 		vm->fault_info.status = status;
3118 		/*
3119 		 * Update the fault information globally for later usage
3120 		 * when vm could be stale or freed.
3121 		 */
3122 		adev->vm_manager.fault_info.addr = addr;
3123 		adev->vm_manager.fault_info.vmhub = vmhub;
3124 		adev->vm_manager.fault_info.status = status;
3125 
3126 		if (AMDGPU_IS_GFXHUB(vmhub)) {
3127 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3128 			vm->fault_info.vmhub |=
3129 				(vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3130 		} else if (AMDGPU_IS_MMHUB0(vmhub)) {
3131 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3132 			vm->fault_info.vmhub |=
3133 				(vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3134 		} else if (AMDGPU_IS_MMHUB1(vmhub)) {
3135 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3136 			vm->fault_info.vmhub |=
3137 				(vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3138 		} else {
3139 			WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3140 		}
3141 	}
3142 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3143 }
3144 
3145 /**
3146  * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3147  *
3148  * @vm: VM to test against.
3149  * @bo: BO to be tested.
3150  *
3151  * Returns true if the BO shares the dma_resv object with the root PD and is
3152  * always guaranteed to be valid inside the VM.
3153  */
3154 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3155 {
3156 	return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3157 }
3158