xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_vm.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_amdkfd.h"
42 #include "amdgpu_gmc.h"
43 #include "amdgpu_xgmi.h"
44 #include "amdgpu_dma_buf.h"
45 #include "amdgpu_res_cursor.h"
46 #include "kfd_svm.h"
47 
48 /**
49  * DOC: GPUVM
50  *
51  * GPUVM is the MMU functionality provided on the GPU.
52  * GPUVM is similar to the legacy GART on older asics, however
53  * rather than there being a single global GART table
54  * for the entire GPU, there can be multiple GPUVM page tables active
55  * at any given time.  The GPUVM page tables can contain a mix
56  * VRAM pages and system pages (both memory and MMIO) and system pages
57  * can be mapped as snooped (cached system pages) or unsnooped
58  * (uncached system pages).
59  *
60  * Each active GPUVM has an ID associated with it and there is a page table
61  * linked with each VMID.  When executing a command buffer,
62  * the kernel tells the engine what VMID to use for that command
63  * buffer.  VMIDs are allocated dynamically as commands are submitted.
64  * The userspace drivers maintain their own address space and the kernel
65  * sets up their pages tables accordingly when they submit their
66  * command buffers and a VMID is assigned.
67  * The hardware supports up to 16 active GPUVMs at any given time.
68  *
69  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
70  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
71  * as other features such as encryption and caching attributes.
72  *
73  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
74  * addition to an aperture managed by a page table, VMID 0 also has
75  * several other apertures.  There is an aperture for direct access to VRAM
76  * and there is a legacy AGP aperture which just forwards accesses directly
77  * to the matching system physical addresses (or IOVAs when an IOMMU is
78  * present).  These apertures provide direct access to these memories without
79  * incurring the overhead of a page table.  VMID 0 is used by the kernel
80  * driver for tasks like memory management.
81  *
82  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
83  * For user applications, each application can have their own unique GPUVM
84  * address space.  The application manages the address space and the kernel
85  * driver manages the GPUVM page tables for each process.  If an GPU client
86  * accesses an invalid page, it will generate a GPU page fault, similar to
87  * accessing an invalid page on a CPU.
88  */
89 
90 #define START(node) ((node)->start)
91 #define LAST(node) ((node)->last)
92 
93 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
94 		     START, LAST, static, amdgpu_vm_it)
95 
96 #undef START
97 #undef LAST
98 
99 /**
100  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
101  */
102 struct amdgpu_prt_cb {
103 
104 	/**
105 	 * @adev: amdgpu device
106 	 */
107 	struct amdgpu_device *adev;
108 
109 	/**
110 	 * @cb: callback
111 	 */
112 	struct dma_fence_cb cb;
113 };
114 
115 /**
116  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
117  */
118 struct amdgpu_vm_tlb_seq_struct {
119 	/**
120 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
121 	 */
122 	struct amdgpu_vm *vm;
123 
124 	/**
125 	 * @cb: callback
126 	 */
127 	struct dma_fence_cb cb;
128 };
129 
130 /**
131  * amdgpu_vm_assert_locked - check if VM is correctly locked
132  * @vm: the VM which schould be tested
133  *
134  * Asserts that the VM root PD is locked.
135  */
136 static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
137 {
138 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
139 }
140 
141 /**
142  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
143  *
144  * @adev: amdgpu_device pointer
145  * @vm: amdgpu_vm pointer
146  * @pasid: the pasid the VM is using on this GPU
147  *
148  * Set the pasid this VM is using on this GPU, can also be used to remove the
149  * pasid by passing in zero.
150  *
151  */
152 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
153 			u32 pasid)
154 {
155 	int r;
156 
157 	amdgpu_vm_assert_locked(vm);
158 
159 	if (vm->pasid == pasid)
160 		return 0;
161 
162 	if (vm->pasid) {
163 		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
164 		if (r < 0)
165 			return r;
166 
167 		vm->pasid = 0;
168 	}
169 
170 	if (pasid) {
171 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
172 					GFP_KERNEL));
173 		if (r < 0)
174 			return r;
175 
176 		vm->pasid = pasid;
177 	}
178 
179 
180 	return 0;
181 }
182 
183 /**
184  * amdgpu_vm_bo_evicted - vm_bo is evicted
185  *
186  * @vm_bo: vm_bo which is evicted
187  *
188  * State for PDs/PTs and per VM BOs which are not at the location they should
189  * be.
190  */
191 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
192 {
193 	struct amdgpu_vm *vm = vm_bo->vm;
194 	struct amdgpu_bo *bo = vm_bo->bo;
195 
196 	vm_bo->moved = true;
197 	amdgpu_vm_assert_locked(vm);
198 	if (bo->tbo.type == ttm_bo_type_kernel)
199 		list_move(&vm_bo->vm_status, &vm->evicted);
200 	else
201 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
202 }
203 /**
204  * amdgpu_vm_bo_moved - vm_bo is moved
205  *
206  * @vm_bo: vm_bo which is moved
207  *
208  * State for per VM BOs which are moved, but that change is not yet reflected
209  * in the page tables.
210  */
211 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
212 {
213 	amdgpu_vm_assert_locked(vm_bo->vm);
214 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
215 }
216 
217 /**
218  * amdgpu_vm_bo_idle - vm_bo is idle
219  *
220  * @vm_bo: vm_bo which is now idle
221  *
222  * State for PDs/PTs and per VM BOs which have gone through the state machine
223  * and are now idle.
224  */
225 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
226 {
227 	amdgpu_vm_assert_locked(vm_bo->vm);
228 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
229 	vm_bo->moved = false;
230 }
231 
232 /**
233  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
234  *
235  * @vm_bo: vm_bo which is now invalidated
236  *
237  * State for normal BOs which are invalidated and that change not yet reflected
238  * in the PTs.
239  */
240 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
241 {
242 	spin_lock(&vm_bo->vm->invalidated_lock);
243 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
244 	spin_unlock(&vm_bo->vm->invalidated_lock);
245 }
246 
247 /**
248  * amdgpu_vm_bo_evicted_user - vm_bo is evicted
249  *
250  * @vm_bo: vm_bo which is evicted
251  *
252  * State for BOs used by user mode queues which are not at the location they
253  * should be.
254  */
255 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
256 {
257 	amdgpu_vm_assert_locked(vm_bo->vm);
258 	vm_bo->moved = true;
259 	list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
260 }
261 
262 /**
263  * amdgpu_vm_bo_relocated - vm_bo is reloacted
264  *
265  * @vm_bo: vm_bo which is relocated
266  *
267  * State for PDs/PTs which needs to update their parent PD.
268  * For the root PD, just move to idle state.
269  */
270 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
271 {
272 	amdgpu_vm_assert_locked(vm_bo->vm);
273 	if (vm_bo->bo->parent)
274 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
275 	else
276 		amdgpu_vm_bo_idle(vm_bo);
277 }
278 
279 /**
280  * amdgpu_vm_bo_done - vm_bo is done
281  *
282  * @vm_bo: vm_bo which is now done
283  *
284  * State for normal BOs which are invalidated and that change has been updated
285  * in the PTs.
286  */
287 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
288 {
289 	amdgpu_vm_assert_locked(vm_bo->vm);
290 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
291 }
292 
293 /**
294  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
295  * @vm: the VM which state machine to reset
296  *
297  * Move all vm_bo object in the VM into a state where they will be updated
298  * again during validation.
299  */
300 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
301 {
302 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
303 
304 	spin_lock(&vm->invalidated_lock);
305 	list_splice_init(&vm->done, &vm->invalidated);
306 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
307 		vm_bo->moved = true;
308 	spin_unlock(&vm->invalidated_lock);
309 
310 	amdgpu_vm_assert_locked(vm_bo->vm);
311 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
312 		struct amdgpu_bo *bo = vm_bo->bo;
313 
314 		vm_bo->moved = true;
315 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
316 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
317 		else if (bo->parent)
318 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
319 	}
320 }
321 
322 /**
323  * amdgpu_vm_update_shared - helper to update shared memory stat
324  * @base: base structure for tracking BO usage in a VM
325  *
326  * Takes the vm stats_lock and updates the shared memory stat. If the basic
327  * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
328  * as well.
329  */
330 static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
331 {
332 	struct amdgpu_vm *vm = base->vm;
333 	struct amdgpu_bo *bo = base->bo;
334 	uint64_t size = amdgpu_bo_size(bo);
335 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
336 	bool shared;
337 
338 	dma_resv_assert_held(bo->tbo.base.resv);
339 	spin_lock(&vm->stats_lock);
340 	shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
341 	if (base->shared != shared) {
342 		base->shared = shared;
343 		if (shared) {
344 			vm->stats[bo_memtype].drm.shared += size;
345 			vm->stats[bo_memtype].drm.private -= size;
346 		} else {
347 			vm->stats[bo_memtype].drm.shared -= size;
348 			vm->stats[bo_memtype].drm.private += size;
349 		}
350 	}
351 	spin_unlock(&vm->stats_lock);
352 }
353 
354 /**
355  * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
356  * @bo: amdgpu buffer object
357  *
358  * Update the per VM stats for all the vm if needed from private to shared or
359  * vice versa.
360  */
361 void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
362 {
363 	struct amdgpu_vm_bo_base *base;
364 
365 	for (base = bo->vm_bo; base; base = base->next)
366 		amdgpu_vm_update_shared(base);
367 }
368 
369 /**
370  * amdgpu_vm_update_stats_locked - helper to update normal memory stat
371  * @base: base structure for tracking BO usage in a VM
372  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
373  *        be bo->tbo.resource
374  * @sign: if we should add (+1) or subtract (-1) from the stat
375  *
376  * Caller need to have the vm stats_lock held. Useful for when multiple update
377  * need to happen at the same time.
378  */
379 static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
380 					  struct ttm_resource *res, int sign)
381 {
382 	struct amdgpu_vm *vm = base->vm;
383 	struct amdgpu_bo *bo = base->bo;
384 	int64_t size = sign * amdgpu_bo_size(bo);
385 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
386 
387 	/* For drm-total- and drm-shared-, BO are accounted by their preferred
388 	 * placement, see also amdgpu_bo_mem_stats_placement.
389 	 */
390 	if (base->shared)
391 		vm->stats[bo_memtype].drm.shared += size;
392 	else
393 		vm->stats[bo_memtype].drm.private += size;
394 
395 	if (res && res->mem_type < __AMDGPU_PL_NUM) {
396 		uint32_t res_memtype = res->mem_type;
397 
398 		vm->stats[res_memtype].drm.resident += size;
399 		/* BO only count as purgeable if it is resident,
400 		 * since otherwise there's nothing to purge.
401 		 */
402 		if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
403 			vm->stats[res_memtype].drm.purgeable += size;
404 		if (!(bo->preferred_domains &
405 		      amdgpu_mem_type_to_domain(res_memtype)))
406 			vm->stats[bo_memtype].evicted += size;
407 	}
408 }
409 
410 /**
411  * amdgpu_vm_update_stats - helper to update normal memory stat
412  * @base: base structure for tracking BO usage in a VM
413  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
414  *        be bo->tbo.resource
415  * @sign: if we should add (+1) or subtract (-1) from the stat
416  *
417  * Updates the basic memory stat when bo is added/deleted/moved.
418  */
419 void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
420 			    struct ttm_resource *res, int sign)
421 {
422 	struct amdgpu_vm *vm = base->vm;
423 
424 	spin_lock(&vm->stats_lock);
425 	amdgpu_vm_update_stats_locked(base, res, sign);
426 	spin_unlock(&vm->stats_lock);
427 }
428 
429 /**
430  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
431  *
432  * @base: base structure for tracking BO usage in a VM
433  * @vm: vm to which bo is to be added
434  * @bo: amdgpu buffer object
435  *
436  * Initialize a bo_va_base structure and add it to the appropriate lists
437  *
438  */
439 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
440 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
441 {
442 	base->vm = vm;
443 	base->bo = bo;
444 	base->next = NULL;
445 	INIT_LIST_HEAD(&base->vm_status);
446 
447 	if (!bo)
448 		return;
449 	base->next = bo->vm_bo;
450 	bo->vm_bo = base;
451 
452 	spin_lock(&vm->stats_lock);
453 	base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
454 	amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
455 	spin_unlock(&vm->stats_lock);
456 
457 	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
458 		return;
459 
460 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
461 
462 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
463 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
464 		amdgpu_vm_bo_relocated(base);
465 	else
466 		amdgpu_vm_bo_idle(base);
467 
468 	if (bo->preferred_domains &
469 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
470 		return;
471 
472 	/*
473 	 * we checked all the prerequisites, but it looks like this per vm bo
474 	 * is currently evicted. add the bo to the evicted list to make sure it
475 	 * is validated on next vm use to avoid fault.
476 	 * */
477 	amdgpu_vm_bo_evicted(base);
478 }
479 
480 /**
481  * amdgpu_vm_lock_pd - lock PD in drm_exec
482  *
483  * @vm: vm providing the BOs
484  * @exec: drm execution context
485  * @num_fences: number of extra fences to reserve
486  *
487  * Lock the VM root PD in the DRM execution context.
488  */
489 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
490 		      unsigned int num_fences)
491 {
492 	/* We need at least two fences for the VM PD/PT updates */
493 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
494 				    2 + num_fences);
495 }
496 
497 /**
498  * amdgpu_vm_lock_done_list - lock all BOs on the done list
499  * @vm: vm providing the BOs
500  * @exec: drm execution context
501  * @num_fences: number of extra fences to reserve
502  *
503  * Lock the BOs on the done list in the DRM execution context.
504  */
505 int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
506 			     unsigned int num_fences)
507 {
508 	struct list_head *prev = &vm->done;
509 	struct amdgpu_bo_va *bo_va;
510 	struct amdgpu_bo *bo;
511 	int ret;
512 
513 	/* We can only trust prev->next while holding the lock */
514 	spin_lock(&vm->invalidated_lock);
515 	while (!list_is_head(prev->next, &vm->done)) {
516 		bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
517 		spin_unlock(&vm->invalidated_lock);
518 
519 		bo = bo_va->base.bo;
520 		if (bo) {
521 			ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
522 			if (unlikely(ret))
523 				return ret;
524 		}
525 		spin_lock(&vm->invalidated_lock);
526 		prev = prev->next;
527 	}
528 	spin_unlock(&vm->invalidated_lock);
529 
530 	return 0;
531 }
532 
533 /**
534  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
535  *
536  * @adev: amdgpu device pointer
537  * @vm: vm providing the BOs
538  *
539  * Move all BOs to the end of LRU and remember their positions to put them
540  * together.
541  */
542 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
543 				struct amdgpu_vm *vm)
544 {
545 	spin_lock(&adev->mman.bdev.lru_lock);
546 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
547 	spin_unlock(&adev->mman.bdev.lru_lock);
548 }
549 
550 /* Create scheduler entities for page table updates */
551 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
552 				   struct amdgpu_vm *vm)
553 {
554 	int r;
555 
556 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
557 				  adev->vm_manager.vm_pte_scheds,
558 				  adev->vm_manager.vm_pte_num_scheds, NULL);
559 	if (r)
560 		goto error;
561 
562 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
563 				     adev->vm_manager.vm_pte_scheds,
564 				     adev->vm_manager.vm_pte_num_scheds, NULL);
565 
566 error:
567 	drm_sched_entity_destroy(&vm->immediate);
568 	return r;
569 }
570 
571 /* Destroy the entities for page table updates again */
572 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
573 {
574 	drm_sched_entity_destroy(&vm->immediate);
575 	drm_sched_entity_destroy(&vm->delayed);
576 }
577 
578 /**
579  * amdgpu_vm_generation - return the page table re-generation counter
580  * @adev: the amdgpu_device
581  * @vm: optional VM to check, might be NULL
582  *
583  * Returns a page table re-generation token to allow checking if submissions
584  * are still valid to use this VM. The VM parameter might be NULL in which case
585  * just the VRAM lost counter will be used.
586  */
587 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
588 {
589 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
590 
591 	if (!vm)
592 		return result;
593 
594 	result += lower_32_bits(vm->generation);
595 	/* Add one if the page tables will be re-generated on next CS */
596 	if (drm_sched_entity_error(&vm->delayed))
597 		++result;
598 
599 	return result;
600 }
601 
602 /**
603  * amdgpu_vm_validate - validate evicted BOs tracked in the VM
604  *
605  * @adev: amdgpu device pointer
606  * @vm: vm providing the BOs
607  * @ticket: optional reservation ticket used to reserve the VM
608  * @validate: callback to do the validation
609  * @param: parameter for the validation callback
610  *
611  * Validate the page table BOs and per-VM BOs on command submission if
612  * necessary. If a ticket is given, also try to validate evicted user queue
613  * BOs. They must already be reserved with the given ticket.
614  *
615  * Returns:
616  * Validation result.
617  */
618 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
619 		       struct ww_acquire_ctx *ticket,
620 		       int (*validate)(void *p, struct amdgpu_bo *bo),
621 		       void *param)
622 {
623 	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
624 	struct amdgpu_vm_bo_base *bo_base, *tmp;
625 	struct amdgpu_bo *bo;
626 	int r;
627 
628 	if (vm->generation != new_vm_generation) {
629 		vm->generation = new_vm_generation;
630 		amdgpu_vm_bo_reset_state_machine(vm);
631 		amdgpu_vm_fini_entities(vm);
632 		r = amdgpu_vm_init_entities(adev, vm);
633 		if (r)
634 			return r;
635 	}
636 
637 	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
638 		bo = bo_base->bo;
639 
640 		r = validate(param, bo);
641 		if (r)
642 			return r;
643 
644 		if (bo->tbo.type != ttm_bo_type_kernel) {
645 			amdgpu_vm_bo_moved(bo_base);
646 		} else {
647 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
648 			amdgpu_vm_bo_relocated(bo_base);
649 		}
650 	}
651 
652 	if (ticket) {
653 		list_for_each_entry_safe(bo_base, tmp, &vm->evicted_user,
654 					 vm_status) {
655 			bo = bo_base->bo;
656 			dma_resv_assert_held(bo->tbo.base.resv);
657 
658 			r = validate(param, bo);
659 			if (r)
660 				return r;
661 
662 			amdgpu_vm_bo_invalidated(bo_base);
663 		}
664 	}
665 
666 	amdgpu_vm_eviction_lock(vm);
667 	vm->evicting = false;
668 	amdgpu_vm_eviction_unlock(vm);
669 
670 	return 0;
671 }
672 
673 /**
674  * amdgpu_vm_ready - check VM is ready for updates
675  *
676  * @vm: VM to check
677  *
678  * Check if all VM PDs/PTs are ready for updates
679  *
680  * Returns:
681  * True if VM is not evicting and all VM entities are not stopped
682  */
683 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
684 {
685 	bool ret;
686 
687 	amdgpu_vm_assert_locked(vm);
688 
689 	amdgpu_vm_eviction_lock(vm);
690 	ret = !vm->evicting;
691 	amdgpu_vm_eviction_unlock(vm);
692 
693 	ret &= list_empty(&vm->evicted);
694 
695 	spin_lock(&vm->immediate.lock);
696 	ret &= !vm->immediate.stopped;
697 	spin_unlock(&vm->immediate.lock);
698 
699 	spin_lock(&vm->delayed.lock);
700 	ret &= !vm->delayed.stopped;
701 	spin_unlock(&vm->delayed.lock);
702 
703 	return ret;
704 }
705 
706 /**
707  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
708  *
709  * @adev: amdgpu_device pointer
710  */
711 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
712 {
713 	const struct amdgpu_ip_block *ip_block;
714 	bool has_compute_vm_bug;
715 	struct amdgpu_ring *ring;
716 	int i;
717 
718 	has_compute_vm_bug = false;
719 
720 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
721 	if (ip_block) {
722 		/* Compute has a VM bug for GFX version < 7.
723 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
724 		if (ip_block->version->major <= 7)
725 			has_compute_vm_bug = true;
726 		else if (ip_block->version->major == 8)
727 			if (adev->gfx.mec_fw_version < 673)
728 				has_compute_vm_bug = true;
729 	}
730 
731 	for (i = 0; i < adev->num_rings; i++) {
732 		ring = adev->rings[i];
733 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
734 			/* only compute rings */
735 			ring->has_compute_vm_bug = has_compute_vm_bug;
736 		else
737 			ring->has_compute_vm_bug = false;
738 	}
739 }
740 
741 /**
742  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
743  *
744  * @ring: ring on which the job will be submitted
745  * @job: job to submit
746  *
747  * Returns:
748  * True if sync is needed.
749  */
750 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
751 				  struct amdgpu_job *job)
752 {
753 	struct amdgpu_device *adev = ring->adev;
754 	unsigned vmhub = ring->vm_hub;
755 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
756 
757 	if (job->vmid == 0)
758 		return false;
759 
760 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
761 		return true;
762 
763 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
764 		return true;
765 
766 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
767 		return true;
768 
769 	return false;
770 }
771 
772 /**
773  * amdgpu_vm_flush - hardware flush the vm
774  *
775  * @ring: ring to use for flush
776  * @job:  related job
777  * @need_pipe_sync: is pipe sync needed
778  *
779  * Emit a VM flush when it is necessary.
780  *
781  * Returns:
782  * 0 on success, errno otherwise.
783  */
784 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
785 		    bool need_pipe_sync)
786 {
787 	struct amdgpu_device *adev = ring->adev;
788 	struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
789 	unsigned vmhub = ring->vm_hub;
790 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
791 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
792 	bool spm_update_needed = job->spm_update_needed;
793 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
794 		job->gds_switch_needed;
795 	bool vm_flush_needed = job->vm_needs_flush;
796 	bool cleaner_shader_needed = false;
797 	bool pasid_mapping_needed = false;
798 	struct dma_fence *fence = NULL;
799 	struct amdgpu_fence *af;
800 	unsigned int patch;
801 	int r;
802 
803 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
804 		gds_switch_needed = true;
805 		vm_flush_needed = true;
806 		pasid_mapping_needed = true;
807 		spm_update_needed = true;
808 	}
809 
810 	mutex_lock(&id_mgr->lock);
811 	if (id->pasid != job->pasid || !id->pasid_mapping ||
812 	    !dma_fence_is_signaled(id->pasid_mapping))
813 		pasid_mapping_needed = true;
814 	mutex_unlock(&id_mgr->lock);
815 
816 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
817 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
818 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
819 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
820 		ring->funcs->emit_wreg;
821 
822 	cleaner_shader_needed = job->run_cleaner_shader &&
823 		adev->gfx.enable_cleaner_shader &&
824 		ring->funcs->emit_cleaner_shader && job->base.s_fence &&
825 		&job->base.s_fence->scheduled == isolation->spearhead;
826 
827 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
828 	    !cleaner_shader_needed)
829 		return 0;
830 
831 	amdgpu_ring_ib_begin(ring);
832 	if (ring->funcs->init_cond_exec)
833 		patch = amdgpu_ring_init_cond_exec(ring,
834 						   ring->cond_exe_gpu_addr);
835 
836 	if (need_pipe_sync)
837 		amdgpu_ring_emit_pipeline_sync(ring);
838 
839 	if (cleaner_shader_needed)
840 		ring->funcs->emit_cleaner_shader(ring);
841 
842 	if (vm_flush_needed) {
843 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
844 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
845 	}
846 
847 	if (pasid_mapping_needed)
848 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
849 
850 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
851 		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
852 
853 	if (ring->funcs->emit_gds_switch &&
854 	    gds_switch_needed) {
855 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
856 					    job->gds_size, job->gws_base,
857 					    job->gws_size, job->oa_base,
858 					    job->oa_size);
859 	}
860 
861 	if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
862 		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
863 		if (r)
864 			return r;
865 		/* this is part of the job's context */
866 		af = container_of(fence, struct amdgpu_fence, base);
867 		af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
868 	}
869 
870 	if (vm_flush_needed) {
871 		mutex_lock(&id_mgr->lock);
872 		dma_fence_put(id->last_flush);
873 		id->last_flush = dma_fence_get(fence);
874 		id->current_gpu_reset_count =
875 			atomic_read(&adev->gpu_reset_counter);
876 		mutex_unlock(&id_mgr->lock);
877 	}
878 
879 	if (pasid_mapping_needed) {
880 		mutex_lock(&id_mgr->lock);
881 		id->pasid = job->pasid;
882 		dma_fence_put(id->pasid_mapping);
883 		id->pasid_mapping = dma_fence_get(fence);
884 		mutex_unlock(&id_mgr->lock);
885 	}
886 
887 	/*
888 	 * Make sure that all other submissions wait for the cleaner shader to
889 	 * finish before we push them to the HW.
890 	 */
891 	if (cleaner_shader_needed) {
892 		trace_amdgpu_cleaner_shader(ring, fence);
893 		mutex_lock(&adev->enforce_isolation_mutex);
894 		dma_fence_put(isolation->spearhead);
895 		isolation->spearhead = dma_fence_get(fence);
896 		mutex_unlock(&adev->enforce_isolation_mutex);
897 	}
898 	dma_fence_put(fence);
899 
900 	amdgpu_ring_patch_cond_exec(ring, patch);
901 
902 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
903 	if (ring->funcs->emit_switch_buffer) {
904 		amdgpu_ring_emit_switch_buffer(ring);
905 		amdgpu_ring_emit_switch_buffer(ring);
906 	}
907 
908 	amdgpu_ring_ib_end(ring);
909 	return 0;
910 }
911 
912 /**
913  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
914  *
915  * @vm: requested vm
916  * @bo: requested buffer object
917  *
918  * Find @bo inside the requested vm.
919  * Search inside the @bos vm list for the requested vm
920  * Returns the found bo_va or NULL if none is found
921  *
922  * Object has to be reserved!
923  *
924  * Returns:
925  * Found bo_va or NULL.
926  */
927 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
928 				       struct amdgpu_bo *bo)
929 {
930 	struct amdgpu_vm_bo_base *base;
931 
932 	for (base = bo->vm_bo; base; base = base->next) {
933 		if (base->vm != vm)
934 			continue;
935 
936 		return container_of(base, struct amdgpu_bo_va, base);
937 	}
938 	return NULL;
939 }
940 
941 /**
942  * amdgpu_vm_map_gart - Resolve gart mapping of addr
943  *
944  * @pages_addr: optional DMA address to use for lookup
945  * @addr: the unmapped addr
946  *
947  * Look up the physical address of the page that the pte resolves
948  * to.
949  *
950  * Returns:
951  * The pointer for the page table entry.
952  */
953 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
954 {
955 	uint64_t result;
956 
957 	/* page table offset */
958 	result = pages_addr[addr >> PAGE_SHIFT];
959 
960 	/* in case cpu page size != gpu page size*/
961 	result |= addr & (~PAGE_MASK);
962 
963 	result &= 0xFFFFFFFFFFFFF000ULL;
964 
965 	return result;
966 }
967 
968 /**
969  * amdgpu_vm_update_pdes - make sure that all directories are valid
970  *
971  * @adev: amdgpu_device pointer
972  * @vm: requested vm
973  * @immediate: submit immediately to the paging queue
974  *
975  * Makes sure all directories are up to date.
976  *
977  * Returns:
978  * 0 for success, error for failure.
979  */
980 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
981 			  struct amdgpu_vm *vm, bool immediate)
982 {
983 	struct amdgpu_vm_update_params params;
984 	struct amdgpu_vm_bo_base *entry, *tmp;
985 	bool flush_tlb_needed = false;
986 	int r, idx;
987 
988 	amdgpu_vm_assert_locked(vm);
989 
990 	if (list_empty(&vm->relocated))
991 		return 0;
992 
993 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
994 		return -ENODEV;
995 
996 	memset(&params, 0, sizeof(params));
997 	params.adev = adev;
998 	params.vm = vm;
999 	params.immediate = immediate;
1000 
1001 	r = vm->update_funcs->prepare(&params, NULL,
1002 				      AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
1003 	if (r)
1004 		goto error;
1005 
1006 	list_for_each_entry(entry, &vm->relocated, vm_status) {
1007 		/* vm_flush_needed after updating moved PDEs */
1008 		flush_tlb_needed |= entry->moved;
1009 
1010 		r = amdgpu_vm_pde_update(&params, entry);
1011 		if (r)
1012 			goto error;
1013 	}
1014 
1015 	r = vm->update_funcs->commit(&params, &vm->last_update);
1016 	if (r)
1017 		goto error;
1018 
1019 	if (flush_tlb_needed)
1020 		atomic64_inc(&vm->tlb_seq);
1021 
1022 	list_for_each_entry_safe(entry, tmp, &vm->relocated, vm_status) {
1023 		amdgpu_vm_bo_idle(entry);
1024 	}
1025 
1026 error:
1027 	drm_dev_exit(idx);
1028 	return r;
1029 }
1030 
1031 /**
1032  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
1033  * @fence: unused
1034  * @cb: the callback structure
1035  *
1036  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1037  */
1038 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
1039 				 struct dma_fence_cb *cb)
1040 {
1041 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1042 
1043 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
1044 	atomic64_inc(&tlb_cb->vm->tlb_seq);
1045 	kfree(tlb_cb);
1046 }
1047 
1048 /**
1049  * amdgpu_vm_tlb_flush - prepare TLB flush
1050  *
1051  * @params: parameters for update
1052  * @fence: input fence to sync TLB flush with
1053  * @tlb_cb: the callback structure
1054  *
1055  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1056  */
1057 static void
1058 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
1059 		    struct dma_fence **fence,
1060 		    struct amdgpu_vm_tlb_seq_struct *tlb_cb)
1061 {
1062 	struct amdgpu_vm *vm = params->vm;
1063 
1064 	tlb_cb->vm = vm;
1065 	if (!fence || !*fence) {
1066 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1067 		return;
1068 	}
1069 
1070 	if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
1071 				    amdgpu_vm_tlb_seq_cb)) {
1072 		dma_fence_put(vm->last_tlb_flush);
1073 		vm->last_tlb_flush = dma_fence_get(*fence);
1074 	} else {
1075 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1076 	}
1077 
1078 	/* Prepare a TLB flush fence to be attached to PTs */
1079 	if (!params->unlocked && vm->is_compute_context) {
1080 		amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
1081 
1082 		/* Makes sure no PD/PT is freed before the flush */
1083 		dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
1084 				   DMA_RESV_USAGE_BOOKKEEP);
1085 	}
1086 }
1087 
1088 /**
1089  * amdgpu_vm_update_range - update a range in the vm page table
1090  *
1091  * @adev: amdgpu_device pointer to use for commands
1092  * @vm: the VM to update the range
1093  * @immediate: immediate submission in a page fault
1094  * @unlocked: unlocked invalidation during MM callback
1095  * @flush_tlb: trigger tlb invalidation after update completed
1096  * @allow_override: change MTYPE for local NUMA nodes
1097  * @sync: fences we need to sync to
1098  * @start: start of mapped range
1099  * @last: last mapped entry
1100  * @flags: flags for the entries
1101  * @offset: offset into nodes and pages_addr
1102  * @vram_base: base for vram mappings
1103  * @res: ttm_resource to map
1104  * @pages_addr: DMA addresses to use for mapping
1105  * @fence: optional resulting fence
1106  *
1107  * Fill in the page table entries between @start and @last.
1108  *
1109  * Returns:
1110  * 0 for success, negative erro code for failure.
1111  */
1112 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1113 			   bool immediate, bool unlocked, bool flush_tlb,
1114 			   bool allow_override, struct amdgpu_sync *sync,
1115 			   uint64_t start, uint64_t last, uint64_t flags,
1116 			   uint64_t offset, uint64_t vram_base,
1117 			   struct ttm_resource *res, dma_addr_t *pages_addr,
1118 			   struct dma_fence **fence)
1119 {
1120 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1121 	struct amdgpu_vm_update_params params;
1122 	struct amdgpu_res_cursor cursor;
1123 	int r, idx;
1124 
1125 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
1126 		return -ENODEV;
1127 
1128 	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
1129 	if (!tlb_cb) {
1130 		drm_dev_exit(idx);
1131 		return -ENOMEM;
1132 	}
1133 
1134 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
1135 	 * heavy-weight flush TLB unconditionally.
1136 	 */
1137 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
1138 		     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
1139 
1140 	/*
1141 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
1142 	 */
1143 	flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
1144 
1145 	memset(&params, 0, sizeof(params));
1146 	params.adev = adev;
1147 	params.vm = vm;
1148 	params.immediate = immediate;
1149 	params.pages_addr = pages_addr;
1150 	params.unlocked = unlocked;
1151 	params.needs_flush = flush_tlb;
1152 	params.allow_override = allow_override;
1153 	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
1154 
1155 	amdgpu_vm_eviction_lock(vm);
1156 	if (vm->evicting) {
1157 		r = -EBUSY;
1158 		goto error_free;
1159 	}
1160 
1161 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1162 		struct dma_fence *tmp = dma_fence_get_stub();
1163 
1164 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1165 		swap(vm->last_unlocked, tmp);
1166 		dma_fence_put(tmp);
1167 	}
1168 
1169 	r = vm->update_funcs->prepare(&params, sync,
1170 				      AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
1171 	if (r)
1172 		goto error_free;
1173 
1174 	amdgpu_res_first(pages_addr ? NULL : res, offset,
1175 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1176 	while (cursor.remaining) {
1177 		uint64_t tmp, num_entries, addr;
1178 
1179 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1180 		if (pages_addr) {
1181 			bool contiguous = true;
1182 
1183 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1184 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
1185 				uint64_t count;
1186 
1187 				contiguous = pages_addr[pfn + 1] ==
1188 					pages_addr[pfn] + PAGE_SIZE;
1189 
1190 				tmp = num_entries /
1191 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1192 				for (count = 2; count < tmp; ++count) {
1193 					uint64_t idx = pfn + count;
1194 
1195 					if (contiguous != (pages_addr[idx] ==
1196 					    pages_addr[idx - 1] + PAGE_SIZE))
1197 						break;
1198 				}
1199 				if (!contiguous)
1200 					count--;
1201 				num_entries = count *
1202 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1203 			}
1204 
1205 			if (!contiguous) {
1206 				addr = cursor.start;
1207 				params.pages_addr = pages_addr;
1208 			} else {
1209 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1210 				params.pages_addr = NULL;
1211 			}
1212 
1213 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1214 			addr = vram_base + cursor.start;
1215 		} else {
1216 			addr = 0;
1217 		}
1218 
1219 		tmp = start + num_entries;
1220 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1221 		if (r)
1222 			goto error_free;
1223 
1224 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1225 		start = tmp;
1226 	}
1227 
1228 	r = vm->update_funcs->commit(&params, fence);
1229 	if (r)
1230 		goto error_free;
1231 
1232 	if (params.needs_flush) {
1233 		amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
1234 		tlb_cb = NULL;
1235 	}
1236 
1237 	amdgpu_vm_pt_free_list(adev, &params);
1238 
1239 error_free:
1240 	kfree(tlb_cb);
1241 	amdgpu_vm_eviction_unlock(vm);
1242 	drm_dev_exit(idx);
1243 	return r;
1244 }
1245 
1246 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1247 			  struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
1248 {
1249 	spin_lock(&vm->stats_lock);
1250 	memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
1251 	spin_unlock(&vm->stats_lock);
1252 }
1253 
1254 /**
1255  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1256  *
1257  * @adev: amdgpu_device pointer
1258  * @bo_va: requested BO and VM object
1259  * @clear: if true clear the entries
1260  *
1261  * Fill in the page table entries for @bo_va.
1262  *
1263  * Returns:
1264  * 0 for success, -EINVAL for failure.
1265  */
1266 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1267 			bool clear)
1268 {
1269 	struct amdgpu_bo *bo = bo_va->base.bo;
1270 	struct amdgpu_vm *vm = bo_va->base.vm;
1271 	struct amdgpu_bo_va_mapping *mapping;
1272 	struct dma_fence **last_update;
1273 	dma_addr_t *pages_addr = NULL;
1274 	struct ttm_resource *mem;
1275 	struct amdgpu_sync sync;
1276 	bool flush_tlb = clear;
1277 	uint64_t vram_base;
1278 	uint64_t flags;
1279 	bool uncached;
1280 	int r;
1281 
1282 	amdgpu_sync_create(&sync);
1283 	if (clear) {
1284 		mem = NULL;
1285 
1286 		/* Implicitly sync to command submissions in the same VM before
1287 		 * unmapping.
1288 		 */
1289 		r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1290 				     AMDGPU_SYNC_EQ_OWNER, vm);
1291 		if (r)
1292 			goto error_free;
1293 		if (bo) {
1294 			r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
1295 			if (r)
1296 				goto error_free;
1297 		}
1298 	} else if (!bo) {
1299 		mem = NULL;
1300 
1301 		/* PRT map operations don't need to sync to anything. */
1302 
1303 	} else {
1304 		struct drm_gem_object *obj = &bo->tbo.base;
1305 
1306 		if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
1307 			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1308 			struct drm_gem_object *gobj = dma_buf->priv;
1309 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1310 
1311 			if (abo->tbo.resource &&
1312 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1313 				bo = gem_to_amdgpu_bo(gobj);
1314 		}
1315 		mem = bo->tbo.resource;
1316 		if (mem && (mem->mem_type == TTM_PL_TT ||
1317 			    mem->mem_type == AMDGPU_PL_PREEMPT))
1318 			pages_addr = bo->tbo.ttm->dma_address;
1319 
1320 		/* Implicitly sync to moving fences before mapping anything */
1321 		r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1322 				     AMDGPU_SYNC_EXPLICIT, vm);
1323 		if (r)
1324 			goto error_free;
1325 	}
1326 
1327 	if (bo) {
1328 		struct amdgpu_device *bo_adev;
1329 
1330 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1331 
1332 		if (amdgpu_bo_encrypted(bo))
1333 			flags |= AMDGPU_PTE_TMZ;
1334 
1335 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1336 		vram_base = bo_adev->vm_manager.vram_base_offset;
1337 		uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1338 	} else {
1339 		flags = 0x0;
1340 		vram_base = 0;
1341 		uncached = false;
1342 	}
1343 
1344 	if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1345 		last_update = &vm->last_update;
1346 	else
1347 		last_update = &bo_va->last_pt_update;
1348 
1349 	if (!clear && bo_va->base.moved) {
1350 		flush_tlb = true;
1351 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1352 
1353 	} else if (bo_va->cleared != clear) {
1354 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1355 	}
1356 
1357 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1358 		uint64_t update_flags = flags;
1359 
1360 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1361 		 * but in case of something, we filter the flags in first place
1362 		 */
1363 		if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
1364 			update_flags &= ~AMDGPU_PTE_READABLE;
1365 		if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
1366 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1367 
1368 		/* Apply ASIC specific mapping flags */
1369 		amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
1370 				      &update_flags);
1371 
1372 		trace_amdgpu_vm_bo_update(mapping);
1373 
1374 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1375 					   !uncached, &sync, mapping->start,
1376 					   mapping->last, update_flags,
1377 					   mapping->offset, vram_base, mem,
1378 					   pages_addr, last_update);
1379 		if (r)
1380 			goto error_free;
1381 	}
1382 
1383 	/* If the BO is not in its preferred location add it back to
1384 	 * the evicted list so that it gets validated again on the
1385 	 * next command submission.
1386 	 */
1387 	if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1388 		if (bo->tbo.resource &&
1389 		    !(bo->preferred_domains &
1390 		      amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1391 			amdgpu_vm_bo_evicted(&bo_va->base);
1392 		else
1393 			amdgpu_vm_bo_idle(&bo_va->base);
1394 	} else {
1395 		amdgpu_vm_bo_done(&bo_va->base);
1396 	}
1397 
1398 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1399 	bo_va->cleared = clear;
1400 	bo_va->base.moved = false;
1401 
1402 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1403 		list_for_each_entry(mapping, &bo_va->valids, list)
1404 			trace_amdgpu_vm_bo_mapping(mapping);
1405 	}
1406 
1407 error_free:
1408 	amdgpu_sync_free(&sync);
1409 	return r;
1410 }
1411 
1412 /**
1413  * amdgpu_vm_update_prt_state - update the global PRT state
1414  *
1415  * @adev: amdgpu_device pointer
1416  */
1417 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1418 {
1419 	unsigned long flags;
1420 	bool enable;
1421 
1422 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1423 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1424 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1425 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1426 }
1427 
1428 /**
1429  * amdgpu_vm_prt_get - add a PRT user
1430  *
1431  * @adev: amdgpu_device pointer
1432  */
1433 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1434 {
1435 	if (!adev->gmc.gmc_funcs->set_prt)
1436 		return;
1437 
1438 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1439 		amdgpu_vm_update_prt_state(adev);
1440 }
1441 
1442 /**
1443  * amdgpu_vm_prt_put - drop a PRT user
1444  *
1445  * @adev: amdgpu_device pointer
1446  */
1447 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1448 {
1449 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1450 		amdgpu_vm_update_prt_state(adev);
1451 }
1452 
1453 /**
1454  * amdgpu_vm_prt_cb - callback for updating the PRT status
1455  *
1456  * @fence: fence for the callback
1457  * @_cb: the callback function
1458  */
1459 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1460 {
1461 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1462 
1463 	amdgpu_vm_prt_put(cb->adev);
1464 	kfree(cb);
1465 }
1466 
1467 /**
1468  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1469  *
1470  * @adev: amdgpu_device pointer
1471  * @fence: fence for the callback
1472  */
1473 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1474 				 struct dma_fence *fence)
1475 {
1476 	struct amdgpu_prt_cb *cb;
1477 
1478 	if (!adev->gmc.gmc_funcs->set_prt)
1479 		return;
1480 
1481 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1482 	if (!cb) {
1483 		/* Last resort when we are OOM */
1484 		if (fence)
1485 			dma_fence_wait(fence, false);
1486 
1487 		amdgpu_vm_prt_put(adev);
1488 	} else {
1489 		cb->adev = adev;
1490 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1491 						     amdgpu_vm_prt_cb))
1492 			amdgpu_vm_prt_cb(fence, &cb->cb);
1493 	}
1494 }
1495 
1496 /**
1497  * amdgpu_vm_free_mapping - free a mapping
1498  *
1499  * @adev: amdgpu_device pointer
1500  * @vm: requested vm
1501  * @mapping: mapping to be freed
1502  * @fence: fence of the unmap operation
1503  *
1504  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1505  */
1506 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1507 				   struct amdgpu_vm *vm,
1508 				   struct amdgpu_bo_va_mapping *mapping,
1509 				   struct dma_fence *fence)
1510 {
1511 	if (mapping->flags & AMDGPU_VM_PAGE_PRT)
1512 		amdgpu_vm_add_prt_cb(adev, fence);
1513 	kfree(mapping);
1514 }
1515 
1516 /**
1517  * amdgpu_vm_prt_fini - finish all prt mappings
1518  *
1519  * @adev: amdgpu_device pointer
1520  * @vm: requested vm
1521  *
1522  * Register a cleanup callback to disable PRT support after VM dies.
1523  */
1524 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1525 {
1526 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1527 	struct dma_resv_iter cursor;
1528 	struct dma_fence *fence;
1529 
1530 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1531 		/* Add a callback for each fence in the reservation object */
1532 		amdgpu_vm_prt_get(adev);
1533 		amdgpu_vm_add_prt_cb(adev, fence);
1534 	}
1535 }
1536 
1537 /**
1538  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1539  *
1540  * @adev: amdgpu_device pointer
1541  * @vm: requested vm
1542  * @fence: optional resulting fence (unchanged if no work needed to be done
1543  * or if an error occurred)
1544  *
1545  * Make sure all freed BOs are cleared in the PT.
1546  * PTs have to be reserved and mutex must be locked!
1547  *
1548  * Returns:
1549  * 0 for success.
1550  *
1551  */
1552 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1553 			  struct amdgpu_vm *vm,
1554 			  struct dma_fence **fence)
1555 {
1556 	struct amdgpu_bo_va_mapping *mapping;
1557 	struct dma_fence *f = NULL;
1558 	struct amdgpu_sync sync;
1559 	int r;
1560 
1561 
1562 	/*
1563 	 * Implicitly sync to command submissions in the same VM before
1564 	 * unmapping.
1565 	 */
1566 	amdgpu_sync_create(&sync);
1567 	r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1568 			     AMDGPU_SYNC_EQ_OWNER, vm);
1569 	if (r)
1570 		goto error_free;
1571 
1572 	while (!list_empty(&vm->freed)) {
1573 		mapping = list_first_entry(&vm->freed,
1574 			struct amdgpu_bo_va_mapping, list);
1575 		list_del(&mapping->list);
1576 
1577 		r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1578 					   &sync, mapping->start, mapping->last,
1579 					   0, 0, 0, NULL, NULL, &f);
1580 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1581 		if (r) {
1582 			dma_fence_put(f);
1583 			goto error_free;
1584 		}
1585 	}
1586 
1587 	if (fence && f) {
1588 		dma_fence_put(*fence);
1589 		*fence = f;
1590 	} else {
1591 		dma_fence_put(f);
1592 	}
1593 
1594 error_free:
1595 	amdgpu_sync_free(&sync);
1596 	return r;
1597 
1598 }
1599 
1600 /**
1601  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1602  *
1603  * @adev: amdgpu_device pointer
1604  * @vm: requested vm
1605  * @ticket: optional reservation ticket used to reserve the VM
1606  *
1607  * Make sure all BOs which are moved are updated in the PTs.
1608  *
1609  * Returns:
1610  * 0 for success.
1611  *
1612  * PTs have to be reserved!
1613  */
1614 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1615 			   struct amdgpu_vm *vm,
1616 			   struct ww_acquire_ctx *ticket)
1617 {
1618 	struct amdgpu_bo_va *bo_va, *tmp;
1619 	struct dma_resv *resv;
1620 	bool clear, unlock;
1621 	int r;
1622 
1623 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
1624 		/* Per VM BOs never need to bo cleared in the page tables */
1625 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1626 		if (r)
1627 			return r;
1628 	}
1629 
1630 	spin_lock(&vm->invalidated_lock);
1631 	while (!list_empty(&vm->invalidated)) {
1632 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1633 					 base.vm_status);
1634 		resv = bo_va->base.bo->tbo.base.resv;
1635 		spin_unlock(&vm->invalidated_lock);
1636 
1637 		/* Try to reserve the BO to avoid clearing its ptes */
1638 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
1639 			clear = false;
1640 			unlock = true;
1641 		/* The caller is already holding the reservation lock */
1642 		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1643 			clear = false;
1644 			unlock = false;
1645 		/* Somebody else is using the BO right now */
1646 		} else {
1647 			clear = true;
1648 			unlock = false;
1649 		}
1650 
1651 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1652 
1653 		if (unlock)
1654 			dma_resv_unlock(resv);
1655 		if (r)
1656 			return r;
1657 
1658 		/* Remember evicted DMABuf imports in compute VMs for later
1659 		 * validation
1660 		 */
1661 		if (vm->is_compute_context &&
1662 		    drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
1663 		    (!bo_va->base.bo->tbo.resource ||
1664 		     bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1665 			amdgpu_vm_bo_evicted_user(&bo_va->base);
1666 
1667 		spin_lock(&vm->invalidated_lock);
1668 	}
1669 	spin_unlock(&vm->invalidated_lock);
1670 
1671 	return 0;
1672 }
1673 
1674 /**
1675  * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1676  *
1677  * @adev: amdgpu_device pointer
1678  * @vm: requested vm
1679  * @flush_type: flush type
1680  * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1681  *
1682  * Flush TLB if needed for a compute VM.
1683  *
1684  * Returns:
1685  * 0 for success.
1686  */
1687 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1688 				struct amdgpu_vm *vm,
1689 				uint32_t flush_type,
1690 				uint32_t xcc_mask)
1691 {
1692 	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1693 	bool all_hub = false;
1694 	int xcc = 0, r = 0;
1695 
1696 	WARN_ON_ONCE(!vm->is_compute_context);
1697 
1698 	/*
1699 	 * It can be that we race and lose here, but that is extremely unlikely
1700 	 * and the worst thing which could happen is that we flush the changes
1701 	 * into the TLB once more which is harmless.
1702 	 */
1703 	if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1704 		return 0;
1705 
1706 	if (adev->family == AMDGPU_FAMILY_AI ||
1707 	    adev->family == AMDGPU_FAMILY_RV)
1708 		all_hub = true;
1709 
1710 	for_each_inst(xcc, xcc_mask) {
1711 		r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1712 						   all_hub, xcc);
1713 		if (r)
1714 			break;
1715 	}
1716 	return r;
1717 }
1718 
1719 /**
1720  * amdgpu_vm_bo_add - add a bo to a specific vm
1721  *
1722  * @adev: amdgpu_device pointer
1723  * @vm: requested vm
1724  * @bo: amdgpu buffer object
1725  *
1726  * Add @bo into the requested vm.
1727  * Add @bo to the list of bos associated with the vm
1728  *
1729  * Returns:
1730  * Newly added bo_va or NULL for failure
1731  *
1732  * Object has to be reserved!
1733  */
1734 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1735 				      struct amdgpu_vm *vm,
1736 				      struct amdgpu_bo *bo)
1737 {
1738 	struct amdgpu_bo_va *bo_va;
1739 
1740 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1741 	if (bo_va == NULL) {
1742 		return NULL;
1743 	}
1744 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1745 
1746 	bo_va->ref_count = 1;
1747 	bo_va->last_pt_update = dma_fence_get_stub();
1748 	INIT_LIST_HEAD(&bo_va->valids);
1749 	INIT_LIST_HEAD(&bo_va->invalids);
1750 
1751 	if (!bo)
1752 		return bo_va;
1753 
1754 	dma_resv_assert_held(bo->tbo.base.resv);
1755 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1756 		bo_va->is_xgmi = true;
1757 		/* Power up XGMI if it can be potentially used */
1758 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1759 	}
1760 
1761 	return bo_va;
1762 }
1763 
1764 
1765 /**
1766  * amdgpu_vm_bo_insert_map - insert a new mapping
1767  *
1768  * @adev: amdgpu_device pointer
1769  * @bo_va: bo_va to store the address
1770  * @mapping: the mapping to insert
1771  *
1772  * Insert a new mapping into all structures.
1773  */
1774 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1775 				    struct amdgpu_bo_va *bo_va,
1776 				    struct amdgpu_bo_va_mapping *mapping)
1777 {
1778 	struct amdgpu_vm *vm = bo_va->base.vm;
1779 	struct amdgpu_bo *bo = bo_va->base.bo;
1780 
1781 	mapping->bo_va = bo_va;
1782 	list_add(&mapping->list, &bo_va->invalids);
1783 	amdgpu_vm_it_insert(mapping, &vm->va);
1784 
1785 	if (mapping->flags & AMDGPU_VM_PAGE_PRT)
1786 		amdgpu_vm_prt_get(adev);
1787 
1788 	if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1789 		amdgpu_vm_bo_moved(&bo_va->base);
1790 
1791 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1792 }
1793 
1794 /* Validate operation parameters to prevent potential abuse */
1795 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1796 					  struct amdgpu_bo *bo,
1797 					  uint64_t saddr,
1798 					  uint64_t offset,
1799 					  uint64_t size)
1800 {
1801 	uint64_t tmp, lpfn;
1802 
1803 	if (saddr & AMDGPU_GPU_PAGE_MASK
1804 	    || offset & AMDGPU_GPU_PAGE_MASK
1805 	    || size & AMDGPU_GPU_PAGE_MASK)
1806 		return -EINVAL;
1807 
1808 	if (check_add_overflow(saddr, size, &tmp)
1809 	    || check_add_overflow(offset, size, &tmp)
1810 	    || size == 0 /* which also leads to end < begin */)
1811 		return -EINVAL;
1812 
1813 	/* make sure object fit at this offset */
1814 	if (bo && offset + size > amdgpu_bo_size(bo))
1815 		return -EINVAL;
1816 
1817 	/* Ensure last pfn not exceed max_pfn */
1818 	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1819 	if (lpfn >= adev->vm_manager.max_pfn)
1820 		return -EINVAL;
1821 
1822 	return 0;
1823 }
1824 
1825 /**
1826  * amdgpu_vm_bo_map - map bo inside a vm
1827  *
1828  * @adev: amdgpu_device pointer
1829  * @bo_va: bo_va to store the address
1830  * @saddr: where to map the BO
1831  * @offset: requested offset in the BO
1832  * @size: BO size in bytes
1833  * @flags: attributes of pages (read/write/valid/etc.)
1834  *
1835  * Add a mapping of the BO at the specefied addr into the VM.
1836  *
1837  * Returns:
1838  * 0 for success, error for failure.
1839  *
1840  * Object has to be reserved and unreserved outside!
1841  */
1842 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1843 		     struct amdgpu_bo_va *bo_va,
1844 		     uint64_t saddr, uint64_t offset,
1845 		     uint64_t size, uint32_t flags)
1846 {
1847 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1848 	struct amdgpu_bo *bo = bo_va->base.bo;
1849 	struct amdgpu_vm *vm = bo_va->base.vm;
1850 	uint64_t eaddr;
1851 	int r;
1852 
1853 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1854 	if (r)
1855 		return r;
1856 
1857 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1858 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1859 
1860 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1861 	if (tmp) {
1862 		/* bo and tmp overlap, invalid addr */
1863 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1864 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1865 			tmp->start, tmp->last + 1);
1866 		return -EINVAL;
1867 	}
1868 
1869 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1870 	if (!mapping)
1871 		return -ENOMEM;
1872 
1873 	mapping->start = saddr;
1874 	mapping->last = eaddr;
1875 	mapping->offset = offset;
1876 	mapping->flags = flags;
1877 
1878 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1879 
1880 	return 0;
1881 }
1882 
1883 /**
1884  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1885  *
1886  * @adev: amdgpu_device pointer
1887  * @bo_va: bo_va to store the address
1888  * @saddr: where to map the BO
1889  * @offset: requested offset in the BO
1890  * @size: BO size in bytes
1891  * @flags: attributes of pages (read/write/valid/etc.)
1892  *
1893  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1894  * mappings as we do so.
1895  *
1896  * Returns:
1897  * 0 for success, error for failure.
1898  *
1899  * Object has to be reserved and unreserved outside!
1900  */
1901 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1902 			     struct amdgpu_bo_va *bo_va,
1903 			     uint64_t saddr, uint64_t offset,
1904 			     uint64_t size, uint32_t flags)
1905 {
1906 	struct amdgpu_bo_va_mapping *mapping;
1907 	struct amdgpu_bo *bo = bo_va->base.bo;
1908 	uint64_t eaddr;
1909 	int r;
1910 
1911 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1912 	if (r)
1913 		return r;
1914 
1915 	/* Allocate all the needed memory */
1916 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1917 	if (!mapping)
1918 		return -ENOMEM;
1919 
1920 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1921 	if (r) {
1922 		kfree(mapping);
1923 		return r;
1924 	}
1925 
1926 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1927 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1928 
1929 	mapping->start = saddr;
1930 	mapping->last = eaddr;
1931 	mapping->offset = offset;
1932 	mapping->flags = flags;
1933 
1934 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1935 
1936 	return 0;
1937 }
1938 
1939 /**
1940  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1941  *
1942  * @adev: amdgpu_device pointer
1943  * @bo_va: bo_va to remove the address from
1944  * @saddr: where to the BO is mapped
1945  *
1946  * Remove a mapping of the BO at the specefied addr from the VM.
1947  *
1948  * Returns:
1949  * 0 for success, error for failure.
1950  *
1951  * Object has to be reserved and unreserved outside!
1952  */
1953 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1954 		       struct amdgpu_bo_va *bo_va,
1955 		       uint64_t saddr)
1956 {
1957 	struct amdgpu_bo_va_mapping *mapping;
1958 	struct amdgpu_vm *vm = bo_va->base.vm;
1959 	bool valid = true;
1960 
1961 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1962 
1963 	list_for_each_entry(mapping, &bo_va->valids, list) {
1964 		if (mapping->start == saddr)
1965 			break;
1966 	}
1967 
1968 	if (&mapping->list == &bo_va->valids) {
1969 		valid = false;
1970 
1971 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1972 			if (mapping->start == saddr)
1973 				break;
1974 		}
1975 
1976 		if (&mapping->list == &bo_va->invalids)
1977 			return -ENOENT;
1978 	}
1979 
1980 	list_del(&mapping->list);
1981 	amdgpu_vm_it_remove(mapping, &vm->va);
1982 	mapping->bo_va = NULL;
1983 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1984 
1985 	if (valid)
1986 		list_add(&mapping->list, &vm->freed);
1987 	else
1988 		amdgpu_vm_free_mapping(adev, vm, mapping,
1989 				       bo_va->last_pt_update);
1990 
1991 	return 0;
1992 }
1993 
1994 /**
1995  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1996  *
1997  * @adev: amdgpu_device pointer
1998  * @vm: VM structure to use
1999  * @saddr: start of the range
2000  * @size: size of the range
2001  *
2002  * Remove all mappings in a range, split them as appropriate.
2003  *
2004  * Returns:
2005  * 0 for success, error for failure.
2006  */
2007 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2008 				struct amdgpu_vm *vm,
2009 				uint64_t saddr, uint64_t size)
2010 {
2011 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2012 	LIST_HEAD(removed);
2013 	uint64_t eaddr;
2014 	int r;
2015 
2016 	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
2017 	if (r)
2018 		return r;
2019 
2020 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2021 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
2022 
2023 	/* Allocate all the needed memory */
2024 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2025 	if (!before)
2026 		return -ENOMEM;
2027 	INIT_LIST_HEAD(&before->list);
2028 
2029 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2030 	if (!after) {
2031 		kfree(before);
2032 		return -ENOMEM;
2033 	}
2034 	INIT_LIST_HEAD(&after->list);
2035 
2036 	/* Now gather all removed mappings */
2037 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2038 	while (tmp) {
2039 		/* Remember mapping split at the start */
2040 		if (tmp->start < saddr) {
2041 			before->start = tmp->start;
2042 			before->last = saddr - 1;
2043 			before->offset = tmp->offset;
2044 			before->flags = tmp->flags;
2045 			before->bo_va = tmp->bo_va;
2046 			list_add(&before->list, &tmp->bo_va->invalids);
2047 		}
2048 
2049 		/* Remember mapping split at the end */
2050 		if (tmp->last > eaddr) {
2051 			after->start = eaddr + 1;
2052 			after->last = tmp->last;
2053 			after->offset = tmp->offset;
2054 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2055 			after->flags = tmp->flags;
2056 			after->bo_va = tmp->bo_va;
2057 			list_add(&after->list, &tmp->bo_va->invalids);
2058 		}
2059 
2060 		list_del(&tmp->list);
2061 		list_add(&tmp->list, &removed);
2062 
2063 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2064 	}
2065 
2066 	/* And free them up */
2067 	list_for_each_entry_safe(tmp, next, &removed, list) {
2068 		amdgpu_vm_it_remove(tmp, &vm->va);
2069 		list_del(&tmp->list);
2070 
2071 		if (tmp->start < saddr)
2072 		    tmp->start = saddr;
2073 		if (tmp->last > eaddr)
2074 		    tmp->last = eaddr;
2075 
2076 		tmp->bo_va = NULL;
2077 		list_add(&tmp->list, &vm->freed);
2078 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2079 	}
2080 
2081 	/* Insert partial mapping before the range */
2082 	if (!list_empty(&before->list)) {
2083 		struct amdgpu_bo *bo = before->bo_va->base.bo;
2084 
2085 		amdgpu_vm_it_insert(before, &vm->va);
2086 		if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
2087 			amdgpu_vm_prt_get(adev);
2088 
2089 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2090 		    !before->bo_va->base.moved)
2091 			amdgpu_vm_bo_moved(&before->bo_va->base);
2092 	} else {
2093 		kfree(before);
2094 	}
2095 
2096 	/* Insert partial mapping after the range */
2097 	if (!list_empty(&after->list)) {
2098 		struct amdgpu_bo *bo = after->bo_va->base.bo;
2099 
2100 		amdgpu_vm_it_insert(after, &vm->va);
2101 		if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
2102 			amdgpu_vm_prt_get(adev);
2103 
2104 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2105 		    !after->bo_va->base.moved)
2106 			amdgpu_vm_bo_moved(&after->bo_va->base);
2107 	} else {
2108 		kfree(after);
2109 	}
2110 
2111 	return 0;
2112 }
2113 
2114 /**
2115  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2116  *
2117  * @vm: the requested VM
2118  * @addr: the address
2119  *
2120  * Find a mapping by it's address.
2121  *
2122  * Returns:
2123  * The amdgpu_bo_va_mapping matching for addr or NULL
2124  *
2125  */
2126 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2127 							 uint64_t addr)
2128 {
2129 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2130 }
2131 
2132 /**
2133  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2134  *
2135  * @vm: the requested vm
2136  * @ticket: CS ticket
2137  *
2138  * Trace all mappings of BOs reserved during a command submission.
2139  */
2140 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2141 {
2142 	struct amdgpu_bo_va_mapping *mapping;
2143 
2144 	if (!trace_amdgpu_vm_bo_cs_enabled())
2145 		return;
2146 
2147 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2148 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2149 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2150 			struct amdgpu_bo *bo;
2151 
2152 			bo = mapping->bo_va->base.bo;
2153 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2154 			    ticket)
2155 				continue;
2156 		}
2157 
2158 		trace_amdgpu_vm_bo_cs(mapping);
2159 	}
2160 }
2161 
2162 /**
2163  * amdgpu_vm_bo_del - remove a bo from a specific vm
2164  *
2165  * @adev: amdgpu_device pointer
2166  * @bo_va: requested bo_va
2167  *
2168  * Remove @bo_va->bo from the requested vm.
2169  *
2170  * Object have to be reserved!
2171  */
2172 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2173 		      struct amdgpu_bo_va *bo_va)
2174 {
2175 	struct amdgpu_bo_va_mapping *mapping, *next;
2176 	struct amdgpu_bo *bo = bo_va->base.bo;
2177 	struct amdgpu_vm *vm = bo_va->base.vm;
2178 	struct amdgpu_vm_bo_base **base;
2179 
2180 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2181 
2182 	if (bo) {
2183 		dma_resv_assert_held(bo->tbo.base.resv);
2184 		if (amdgpu_vm_is_bo_always_valid(vm, bo))
2185 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
2186 
2187 		for (base = &bo_va->base.bo->vm_bo; *base;
2188 		     base = &(*base)->next) {
2189 			if (*base != &bo_va->base)
2190 				continue;
2191 
2192 			amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
2193 			*base = bo_va->base.next;
2194 			break;
2195 		}
2196 	}
2197 
2198 	spin_lock(&vm->invalidated_lock);
2199 	list_del(&bo_va->base.vm_status);
2200 	spin_unlock(&vm->invalidated_lock);
2201 
2202 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2203 		list_del(&mapping->list);
2204 		amdgpu_vm_it_remove(mapping, &vm->va);
2205 		mapping->bo_va = NULL;
2206 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2207 		list_add(&mapping->list, &vm->freed);
2208 	}
2209 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2210 		list_del(&mapping->list);
2211 		amdgpu_vm_it_remove(mapping, &vm->va);
2212 		amdgpu_vm_free_mapping(adev, vm, mapping,
2213 				       bo_va->last_pt_update);
2214 	}
2215 
2216 	dma_fence_put(bo_va->last_pt_update);
2217 
2218 	if (bo && bo_va->is_xgmi)
2219 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2220 
2221 	kfree(bo_va);
2222 }
2223 
2224 /**
2225  * amdgpu_vm_evictable - check if we can evict a VM
2226  *
2227  * @bo: A page table of the VM.
2228  *
2229  * Check if it is possible to evict a VM.
2230  */
2231 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2232 {
2233 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2234 
2235 	/* Page tables of a destroyed VM can go away immediately */
2236 	if (!bo_base || !bo_base->vm)
2237 		return true;
2238 
2239 	/* Don't evict VM page tables while they are busy */
2240 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2241 		return false;
2242 
2243 	/* Try to block ongoing updates */
2244 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2245 		return false;
2246 
2247 	/* Don't evict VM page tables while they are updated */
2248 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2249 		amdgpu_vm_eviction_unlock(bo_base->vm);
2250 		return false;
2251 	}
2252 
2253 	bo_base->vm->evicting = true;
2254 	amdgpu_vm_eviction_unlock(bo_base->vm);
2255 	return true;
2256 }
2257 
2258 /**
2259  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2260  *
2261  * @bo: amdgpu buffer object
2262  * @evicted: is the BO evicted
2263  *
2264  * Mark @bo as invalid.
2265  */
2266 void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
2267 {
2268 	struct amdgpu_vm_bo_base *bo_base;
2269 
2270 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2271 		struct amdgpu_vm *vm = bo_base->vm;
2272 
2273 		if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2274 			amdgpu_vm_bo_evicted(bo_base);
2275 			continue;
2276 		}
2277 
2278 		if (bo_base->moved)
2279 			continue;
2280 		bo_base->moved = true;
2281 
2282 		if (bo->tbo.type == ttm_bo_type_kernel)
2283 			amdgpu_vm_bo_relocated(bo_base);
2284 		else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2285 			amdgpu_vm_bo_moved(bo_base);
2286 		else
2287 			amdgpu_vm_bo_invalidated(bo_base);
2288 	}
2289 }
2290 
2291 /**
2292  * amdgpu_vm_bo_move - handle BO move
2293  *
2294  * @bo: amdgpu buffer object
2295  * @new_mem: the new placement of the BO move
2296  * @evicted: is the BO evicted
2297  *
2298  * Update the memory stats for the new placement and mark @bo as invalid.
2299  */
2300 void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
2301 		       bool evicted)
2302 {
2303 	struct amdgpu_vm_bo_base *bo_base;
2304 
2305 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2306 		struct amdgpu_vm *vm = bo_base->vm;
2307 
2308 		spin_lock(&vm->stats_lock);
2309 		amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
2310 		amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
2311 		spin_unlock(&vm->stats_lock);
2312 	}
2313 
2314 	amdgpu_vm_bo_invalidate(bo, evicted);
2315 }
2316 
2317 /**
2318  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2319  *
2320  * @vm_size: VM size
2321  *
2322  * Returns:
2323  * VM page table as power of two
2324  */
2325 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2326 {
2327 	/* Total bits covered by PD + PTs */
2328 	unsigned bits = ilog2(vm_size) + 18;
2329 
2330 	/* Make sure the PD is 4K in size up to 8GB address space.
2331 	   Above that split equal between PD and PTs */
2332 	if (vm_size <= 8)
2333 		return (bits - 9);
2334 	else
2335 		return ((bits + 3) / 2);
2336 }
2337 
2338 /**
2339  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2340  *
2341  * @adev: amdgpu_device pointer
2342  * @min_vm_size: the minimum vm size in GB if it's set auto
2343  * @fragment_size_default: Default PTE fragment size
2344  * @max_level: max VMPT level
2345  * @max_bits: max address space size in bits
2346  *
2347  */
2348 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2349 			   uint32_t fragment_size_default, unsigned max_level,
2350 			   unsigned max_bits)
2351 {
2352 	unsigned int max_size = 1 << (max_bits - 30);
2353 	unsigned int vm_size;
2354 	uint64_t tmp;
2355 
2356 	/* adjust vm size first */
2357 	if (amdgpu_vm_size != -1) {
2358 		vm_size = amdgpu_vm_size;
2359 		if (vm_size > max_size) {
2360 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2361 				 amdgpu_vm_size, max_size);
2362 			vm_size = max_size;
2363 		}
2364 	} else {
2365 		struct sysinfo si;
2366 		unsigned int phys_ram_gb;
2367 
2368 		/* Optimal VM size depends on the amount of physical
2369 		 * RAM available. Underlying requirements and
2370 		 * assumptions:
2371 		 *
2372 		 *  - Need to map system memory and VRAM from all GPUs
2373 		 *     - VRAM from other GPUs not known here
2374 		 *     - Assume VRAM <= system memory
2375 		 *  - On GFX8 and older, VM space can be segmented for
2376 		 *    different MTYPEs
2377 		 *  - Need to allow room for fragmentation, guard pages etc.
2378 		 *
2379 		 * This adds up to a rough guess of system memory x3.
2380 		 * Round up to power of two to maximize the available
2381 		 * VM size with the given page table size.
2382 		 */
2383 		si_meminfo(&si);
2384 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2385 			       (1 << 30) - 1) >> 30;
2386 		vm_size = roundup_pow_of_two(
2387 			clamp(phys_ram_gb * 3, min_vm_size, max_size));
2388 	}
2389 
2390 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2391 
2392 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2393 	if (amdgpu_vm_block_size != -1)
2394 		tmp >>= amdgpu_vm_block_size - 9;
2395 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2396 	adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2397 	switch (adev->vm_manager.num_level) {
2398 	case 3:
2399 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2400 		break;
2401 	case 2:
2402 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2403 		break;
2404 	case 1:
2405 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2406 		break;
2407 	default:
2408 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2409 	}
2410 	/* block size depends on vm size and hw setup*/
2411 	if (amdgpu_vm_block_size != -1)
2412 		adev->vm_manager.block_size =
2413 			min((unsigned)amdgpu_vm_block_size, max_bits
2414 			    - AMDGPU_GPU_PAGE_SHIFT
2415 			    - 9 * adev->vm_manager.num_level);
2416 	else if (adev->vm_manager.num_level > 1)
2417 		adev->vm_manager.block_size = 9;
2418 	else
2419 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2420 
2421 	if (amdgpu_vm_fragment_size == -1)
2422 		adev->vm_manager.fragment_size = fragment_size_default;
2423 	else
2424 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2425 
2426 	dev_info(
2427 		adev->dev,
2428 		"vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2429 		vm_size, adev->vm_manager.num_level + 1,
2430 		adev->vm_manager.block_size, adev->vm_manager.fragment_size);
2431 }
2432 
2433 /**
2434  * amdgpu_vm_wait_idle - wait for the VM to become idle
2435  *
2436  * @vm: VM object to wait for
2437  * @timeout: timeout to wait for VM to become idle
2438  */
2439 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2440 {
2441 	timeout = drm_sched_entity_flush(&vm->immediate, timeout);
2442 	if (timeout <= 0)
2443 		return timeout;
2444 
2445 	return drm_sched_entity_flush(&vm->delayed, timeout);
2446 }
2447 
2448 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2449 {
2450 	struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2451 
2452 	kfree(ti);
2453 }
2454 
2455 static inline struct amdgpu_vm *
2456 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2457 {
2458 	struct amdgpu_vm *vm;
2459 	unsigned long flags;
2460 
2461 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2462 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2463 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2464 
2465 	return vm;
2466 }
2467 
2468 /**
2469  * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2470  *
2471  * @task_info: task_info struct under discussion.
2472  *
2473  * frees the vm task_info ptr at the last put
2474  */
2475 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2476 {
2477 	if (task_info)
2478 		kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2479 }
2480 
2481 /**
2482  * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2483  *
2484  * @vm: VM to get info from
2485  *
2486  * Returns the reference counted task_info structure, which must be
2487  * referenced down with amdgpu_vm_put_task_info.
2488  */
2489 struct amdgpu_task_info *
2490 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2491 {
2492 	struct amdgpu_task_info *ti = NULL;
2493 
2494 	if (vm) {
2495 		ti = vm->task_info;
2496 		kref_get(&vm->task_info->refcount);
2497 	}
2498 
2499 	return ti;
2500 }
2501 
2502 /**
2503  * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2504  *
2505  * @adev: drm device pointer
2506  * @pasid: PASID identifier for VM
2507  *
2508  * Returns the reference counted task_info structure, which must be
2509  * referenced down with amdgpu_vm_put_task_info.
2510  */
2511 struct amdgpu_task_info *
2512 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2513 {
2514 	return amdgpu_vm_get_task_info_vm(
2515 			amdgpu_vm_get_vm_from_pasid(adev, pasid));
2516 }
2517 
2518 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2519 {
2520 	vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2521 	if (!vm->task_info)
2522 		return -ENOMEM;
2523 
2524 	kref_init(&vm->task_info->refcount);
2525 	return 0;
2526 }
2527 
2528 /**
2529  * amdgpu_vm_set_task_info - Sets VMs task info.
2530  *
2531  * @vm: vm for which to set the info
2532  */
2533 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2534 {
2535 	if (!vm->task_info)
2536 		return;
2537 
2538 	if (vm->task_info->task.pid == current->pid)
2539 		return;
2540 
2541 	vm->task_info->task.pid = current->pid;
2542 	get_task_comm(vm->task_info->task.comm, current);
2543 
2544 	if (current->group_leader->mm != current->mm)
2545 		return;
2546 
2547 	vm->task_info->tgid = current->group_leader->pid;
2548 	get_task_comm(vm->task_info->process_name, current->group_leader);
2549 }
2550 
2551 /**
2552  * amdgpu_vm_init - initialize a vm instance
2553  *
2554  * @adev: amdgpu_device pointer
2555  * @vm: requested vm
2556  * @xcp_id: GPU partition selection id
2557  *
2558  * Init @vm fields.
2559  *
2560  * Returns:
2561  * 0 for success, error for failure.
2562  */
2563 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2564 		   int32_t xcp_id)
2565 {
2566 	struct amdgpu_bo *root_bo;
2567 	struct amdgpu_bo_vm *root;
2568 	int r, i;
2569 
2570 	vm->va = RB_ROOT_CACHED;
2571 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2572 		vm->reserved_vmid[i] = NULL;
2573 	INIT_LIST_HEAD(&vm->evicted);
2574 	INIT_LIST_HEAD(&vm->evicted_user);
2575 	INIT_LIST_HEAD(&vm->relocated);
2576 	INIT_LIST_HEAD(&vm->moved);
2577 	INIT_LIST_HEAD(&vm->idle);
2578 	spin_lock_init(&vm->invalidated_lock);
2579 	INIT_LIST_HEAD(&vm->invalidated);
2580 	INIT_LIST_HEAD(&vm->freed);
2581 	INIT_LIST_HEAD(&vm->done);
2582 	INIT_KFIFO(vm->faults);
2583 	spin_lock_init(&vm->stats_lock);
2584 
2585 	r = amdgpu_vm_init_entities(adev, vm);
2586 	if (r)
2587 		return r;
2588 
2589 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2590 
2591 	vm->is_compute_context = false;
2592 
2593 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2594 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2595 
2596 	dev_dbg(adev->dev, "VM update mode is %s\n",
2597 		vm->use_cpu_for_update ? "CPU" : "SDMA");
2598 	WARN_ONCE((vm->use_cpu_for_update &&
2599 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2600 		  "CPU update of VM recommended only for large BAR system\n");
2601 
2602 	if (vm->use_cpu_for_update)
2603 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2604 	else
2605 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2606 
2607 	vm->last_update = dma_fence_get_stub();
2608 	vm->last_unlocked = dma_fence_get_stub();
2609 	vm->last_tlb_flush = dma_fence_get_stub();
2610 	vm->generation = amdgpu_vm_generation(adev, NULL);
2611 
2612 	mutex_init(&vm->eviction_lock);
2613 	vm->evicting = false;
2614 	vm->tlb_fence_context = dma_fence_context_alloc(1);
2615 
2616 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2617 				false, &root, xcp_id);
2618 	if (r)
2619 		goto error_free_delayed;
2620 
2621 	root_bo = amdgpu_bo_ref(&root->bo);
2622 	r = amdgpu_bo_reserve(root_bo, true);
2623 	if (r) {
2624 		amdgpu_bo_unref(&root_bo);
2625 		goto error_free_delayed;
2626 	}
2627 
2628 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2629 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2630 	if (r)
2631 		goto error_free_root;
2632 
2633 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2634 	if (r)
2635 		goto error_free_root;
2636 
2637 	r = amdgpu_vm_create_task_info(vm);
2638 	if (r)
2639 		dev_dbg(adev->dev, "Failed to create task info for VM\n");
2640 
2641 	amdgpu_bo_unreserve(vm->root.bo);
2642 	amdgpu_bo_unref(&root_bo);
2643 
2644 	return 0;
2645 
2646 error_free_root:
2647 	amdgpu_vm_pt_free_root(adev, vm);
2648 	amdgpu_bo_unreserve(vm->root.bo);
2649 	amdgpu_bo_unref(&root_bo);
2650 
2651 error_free_delayed:
2652 	dma_fence_put(vm->last_tlb_flush);
2653 	dma_fence_put(vm->last_unlocked);
2654 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2655 	amdgpu_vm_fini_entities(vm);
2656 
2657 	return r;
2658 }
2659 
2660 /**
2661  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2662  *
2663  * @adev: amdgpu_device pointer
2664  * @vm: requested vm
2665  *
2666  * This only works on GFX VMs that don't have any BOs added and no
2667  * page tables allocated yet.
2668  *
2669  * Changes the following VM parameters:
2670  * - use_cpu_for_update
2671  * - pte_supports_ats
2672  *
2673  * Reinitializes the page directory to reflect the changed ATS
2674  * setting.
2675  *
2676  * Returns:
2677  * 0 for success, -errno for errors.
2678  */
2679 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2680 {
2681 	int r;
2682 
2683 	r = amdgpu_bo_reserve(vm->root.bo, true);
2684 	if (r)
2685 		return r;
2686 
2687 	/* Update VM state */
2688 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2689 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2690 	dev_dbg(adev->dev, "VM update mode is %s\n",
2691 		vm->use_cpu_for_update ? "CPU" : "SDMA");
2692 	WARN_ONCE((vm->use_cpu_for_update &&
2693 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2694 		  "CPU update of VM recommended only for large BAR system\n");
2695 
2696 	if (vm->use_cpu_for_update) {
2697 		/* Sync with last SDMA update/clear before switching to CPU */
2698 		r = amdgpu_bo_sync_wait(vm->root.bo,
2699 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2700 		if (r)
2701 			goto unreserve_bo;
2702 
2703 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2704 		r = amdgpu_vm_pt_map_tables(adev, vm);
2705 		if (r)
2706 			goto unreserve_bo;
2707 
2708 	} else {
2709 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2710 	}
2711 
2712 	dma_fence_put(vm->last_update);
2713 	vm->last_update = dma_fence_get_stub();
2714 	vm->is_compute_context = true;
2715 
2716 unreserve_bo:
2717 	amdgpu_bo_unreserve(vm->root.bo);
2718 	return r;
2719 }
2720 
2721 static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
2722 {
2723 	for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
2724 		if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
2725 		      vm->stats[i].evicted == 0))
2726 			return false;
2727 	}
2728 	return true;
2729 }
2730 
2731 /**
2732  * amdgpu_vm_fini - tear down a vm instance
2733  *
2734  * @adev: amdgpu_device pointer
2735  * @vm: requested vm
2736  *
2737  * Tear down @vm.
2738  * Unbind the VM and remove all bos from the vm bo list
2739  */
2740 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2741 {
2742 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2743 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2744 	struct amdgpu_bo *root;
2745 	unsigned long flags;
2746 	int i;
2747 
2748 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2749 
2750 	root = amdgpu_bo_ref(vm->root.bo);
2751 	amdgpu_bo_reserve(root, true);
2752 	amdgpu_vm_set_pasid(adev, vm, 0);
2753 	dma_fence_wait(vm->last_unlocked, false);
2754 	dma_fence_put(vm->last_unlocked);
2755 	dma_fence_wait(vm->last_tlb_flush, false);
2756 	/* Make sure that all fence callbacks have completed */
2757 	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2758 	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2759 	dma_fence_put(vm->last_tlb_flush);
2760 
2761 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2762 		if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
2763 			amdgpu_vm_prt_fini(adev, vm);
2764 			prt_fini_needed = false;
2765 		}
2766 
2767 		list_del(&mapping->list);
2768 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2769 	}
2770 
2771 	amdgpu_vm_pt_free_root(adev, vm);
2772 	amdgpu_bo_unreserve(root);
2773 	amdgpu_bo_unref(&root);
2774 	WARN_ON(vm->root.bo);
2775 
2776 	amdgpu_vm_fini_entities(vm);
2777 
2778 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2779 		dev_err(adev->dev, "still active bo inside vm\n");
2780 	}
2781 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2782 					     &vm->va.rb_root, rb) {
2783 		/* Don't remove the mapping here, we don't want to trigger a
2784 		 * rebalance and the tree is about to be destroyed anyway.
2785 		 */
2786 		list_del(&mapping->list);
2787 		kfree(mapping);
2788 	}
2789 
2790 	dma_fence_put(vm->last_update);
2791 
2792 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2793 		amdgpu_vmid_free_reserved(adev, vm, i);
2794 	}
2795 
2796 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2797 
2798 	if (!amdgpu_vm_stats_is_zero(vm)) {
2799 		struct amdgpu_task_info *ti = vm->task_info;
2800 
2801 		dev_warn(adev->dev,
2802 			 "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
2803 			 ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
2804 	}
2805 
2806 	amdgpu_vm_put_task_info(vm->task_info);
2807 }
2808 
2809 /**
2810  * amdgpu_vm_manager_init - init the VM manager
2811  *
2812  * @adev: amdgpu_device pointer
2813  *
2814  * Initialize the VM manager structures
2815  */
2816 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2817 {
2818 	unsigned i;
2819 
2820 	/* Concurrent flushes are only possible starting with Vega10 and
2821 	 * are broken on Navi10 and Navi14.
2822 	 */
2823 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2824 					      adev->asic_type == CHIP_NAVI10 ||
2825 					      adev->asic_type == CHIP_NAVI14);
2826 	amdgpu_vmid_mgr_init(adev);
2827 
2828 	adev->vm_manager.fence_context =
2829 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2830 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2831 		adev->vm_manager.seqno[i] = 0;
2832 
2833 	spin_lock_init(&adev->vm_manager.prt_lock);
2834 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2835 
2836 	/* If not overridden by the user, by default, only in large BAR systems
2837 	 * Compute VM tables will be updated by CPU
2838 	 */
2839 #ifdef CONFIG_X86_64
2840 	if (amdgpu_vm_update_mode == -1) {
2841 		/* For asic with VF MMIO access protection
2842 		 * avoid using CPU for VM table updates
2843 		 */
2844 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2845 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2846 			adev->vm_manager.vm_update_mode =
2847 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2848 		else
2849 			adev->vm_manager.vm_update_mode = 0;
2850 	} else
2851 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2852 #else
2853 	adev->vm_manager.vm_update_mode = 0;
2854 #endif
2855 
2856 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2857 }
2858 
2859 /**
2860  * amdgpu_vm_manager_fini - cleanup VM manager
2861  *
2862  * @adev: amdgpu_device pointer
2863  *
2864  * Cleanup the VM manager and free resources.
2865  */
2866 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2867 {
2868 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2869 	xa_destroy(&adev->vm_manager.pasids);
2870 
2871 	amdgpu_vmid_mgr_fini(adev);
2872 }
2873 
2874 /**
2875  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2876  *
2877  * @dev: drm device pointer
2878  * @data: drm_amdgpu_vm
2879  * @filp: drm file pointer
2880  *
2881  * Returns:
2882  * 0 for success, -errno for errors.
2883  */
2884 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2885 {
2886 	union drm_amdgpu_vm *args = data;
2887 	struct amdgpu_device *adev = drm_to_adev(dev);
2888 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2889 	struct amdgpu_vm *vm = &fpriv->vm;
2890 
2891 	/* No valid flags defined yet */
2892 	if (args->in.flags)
2893 		return -EINVAL;
2894 
2895 	switch (args->in.op) {
2896 	case AMDGPU_VM_OP_RESERVE_VMID:
2897 		/* We only have requirement to reserve vmid from gfxhub */
2898 		amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
2899 		break;
2900 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2901 		amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
2902 		break;
2903 	default:
2904 		return -EINVAL;
2905 	}
2906 
2907 	return 0;
2908 }
2909 
2910 /**
2911  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2912  * @adev: amdgpu device pointer
2913  * @pasid: PASID of the VM
2914  * @ts: Timestamp of the fault
2915  * @vmid: VMID, only used for GFX 9.4.3.
2916  * @node_id: Node_id received in IH cookie. Only applicable for
2917  *           GFX 9.4.3.
2918  * @addr: Address of the fault
2919  * @write_fault: true is write fault, false is read fault
2920  *
2921  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2922  * shouldn't be reported any more.
2923  */
2924 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2925 			    u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
2926 			    bool write_fault)
2927 {
2928 	bool is_compute_context = false;
2929 	struct amdgpu_bo *root;
2930 	unsigned long irqflags;
2931 	uint64_t value, flags;
2932 	struct amdgpu_vm *vm;
2933 	int r;
2934 
2935 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2936 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2937 	if (vm) {
2938 		root = amdgpu_bo_ref(vm->root.bo);
2939 		is_compute_context = vm->is_compute_context;
2940 	} else {
2941 		root = NULL;
2942 	}
2943 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2944 
2945 	if (!root)
2946 		return false;
2947 
2948 	addr /= AMDGPU_GPU_PAGE_SIZE;
2949 
2950 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2951 	    node_id, addr, ts, write_fault)) {
2952 		amdgpu_bo_unref(&root);
2953 		return true;
2954 	}
2955 
2956 	r = amdgpu_bo_reserve(root, true);
2957 	if (r)
2958 		goto error_unref;
2959 
2960 	/* Double check that the VM still exists */
2961 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2962 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2963 	if (vm && vm->root.bo != root)
2964 		vm = NULL;
2965 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2966 	if (!vm)
2967 		goto error_unlock;
2968 
2969 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2970 		AMDGPU_PTE_SYSTEM;
2971 
2972 	if (is_compute_context) {
2973 		/* Intentionally setting invalid PTE flag
2974 		 * combination to force a no-retry-fault
2975 		 */
2976 		flags = AMDGPU_VM_NORETRY_FLAGS;
2977 		value = 0;
2978 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2979 		/* Redirect the access to the dummy page */
2980 		value = adev->dummy_page_addr;
2981 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2982 			AMDGPU_PTE_WRITEABLE;
2983 
2984 	} else {
2985 		/* Let the hw retry silently on the PTE */
2986 		value = 0;
2987 	}
2988 
2989 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2990 	if (r) {
2991 		pr_debug("failed %d to reserve fence slot\n", r);
2992 		goto error_unlock;
2993 	}
2994 
2995 	r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2996 				   NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
2997 	if (r)
2998 		goto error_unlock;
2999 
3000 	r = amdgpu_vm_update_pdes(adev, vm, true);
3001 
3002 error_unlock:
3003 	amdgpu_bo_unreserve(root);
3004 	if (r < 0)
3005 		dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
3006 
3007 error_unref:
3008 	amdgpu_bo_unref(&root);
3009 
3010 	return false;
3011 }
3012 
3013 #if defined(CONFIG_DEBUG_FS)
3014 /**
3015  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
3016  *
3017  * @vm: Requested VM for printing BO info
3018  * @m: debugfs file
3019  *
3020  * Print BO information in debugfs file for the VM
3021  */
3022 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
3023 {
3024 	struct amdgpu_bo_va *bo_va, *tmp;
3025 	u64 total_idle = 0;
3026 	u64 total_evicted = 0;
3027 	u64 total_relocated = 0;
3028 	u64 total_moved = 0;
3029 	u64 total_invalidated = 0;
3030 	u64 total_done = 0;
3031 	unsigned int total_idle_objs = 0;
3032 	unsigned int total_evicted_objs = 0;
3033 	unsigned int total_relocated_objs = 0;
3034 	unsigned int total_moved_objs = 0;
3035 	unsigned int total_invalidated_objs = 0;
3036 	unsigned int total_done_objs = 0;
3037 	unsigned int id = 0;
3038 
3039 	amdgpu_vm_assert_locked(vm);
3040 
3041 	seq_puts(m, "\tIdle BOs:\n");
3042 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3043 		if (!bo_va->base.bo)
3044 			continue;
3045 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3046 	}
3047 	total_idle_objs = id;
3048 	id = 0;
3049 
3050 	seq_puts(m, "\tEvicted BOs:\n");
3051 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3052 		if (!bo_va->base.bo)
3053 			continue;
3054 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3055 	}
3056 	total_evicted_objs = id;
3057 	id = 0;
3058 
3059 	seq_puts(m, "\tRelocated BOs:\n");
3060 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3061 		if (!bo_va->base.bo)
3062 			continue;
3063 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3064 	}
3065 	total_relocated_objs = id;
3066 	id = 0;
3067 
3068 	seq_puts(m, "\tMoved BOs:\n");
3069 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3070 		if (!bo_va->base.bo)
3071 			continue;
3072 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3073 	}
3074 	total_moved_objs = id;
3075 	id = 0;
3076 
3077 	seq_puts(m, "\tInvalidated BOs:\n");
3078 	spin_lock(&vm->invalidated_lock);
3079 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3080 		if (!bo_va->base.bo)
3081 			continue;
3082 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
3083 	}
3084 	spin_unlock(&vm->invalidated_lock);
3085 	total_invalidated_objs = id;
3086 	id = 0;
3087 
3088 	seq_puts(m, "\tDone BOs:\n");
3089 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3090 		if (!bo_va->base.bo)
3091 			continue;
3092 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3093 	}
3094 	total_done_objs = id;
3095 
3096 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
3097 		   total_idle_objs);
3098 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
3099 		   total_evicted_objs);
3100 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
3101 		   total_relocated_objs);
3102 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
3103 		   total_moved_objs);
3104 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
3105 		   total_invalidated_objs);
3106 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
3107 		   total_done_objs);
3108 }
3109 #endif
3110 
3111 /**
3112  * amdgpu_vm_update_fault_cache - update cached fault into.
3113  * @adev: amdgpu device pointer
3114  * @pasid: PASID of the VM
3115  * @addr: Address of the fault
3116  * @status: GPUVM fault status register
3117  * @vmhub: which vmhub got the fault
3118  *
3119  * Cache the fault info for later use by userspace in debugging.
3120  */
3121 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
3122 				  unsigned int pasid,
3123 				  uint64_t addr,
3124 				  uint32_t status,
3125 				  unsigned int vmhub)
3126 {
3127 	struct amdgpu_vm *vm;
3128 	unsigned long flags;
3129 
3130 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3131 
3132 	vm = xa_load(&adev->vm_manager.pasids, pasid);
3133 	/* Don't update the fault cache if status is 0.  In the multiple
3134 	 * fault case, subsequent faults will return a 0 status which is
3135 	 * useless for userspace and replaces the useful fault status, so
3136 	 * only update if status is non-0.
3137 	 */
3138 	if (vm && status) {
3139 		vm->fault_info.addr = addr;
3140 		vm->fault_info.status = status;
3141 		/*
3142 		 * Update the fault information globally for later usage
3143 		 * when vm could be stale or freed.
3144 		 */
3145 		adev->vm_manager.fault_info.addr = addr;
3146 		adev->vm_manager.fault_info.vmhub = vmhub;
3147 		adev->vm_manager.fault_info.status = status;
3148 
3149 		if (AMDGPU_IS_GFXHUB(vmhub)) {
3150 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3151 			vm->fault_info.vmhub |=
3152 				(vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3153 		} else if (AMDGPU_IS_MMHUB0(vmhub)) {
3154 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3155 			vm->fault_info.vmhub |=
3156 				(vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3157 		} else if (AMDGPU_IS_MMHUB1(vmhub)) {
3158 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3159 			vm->fault_info.vmhub |=
3160 				(vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3161 		} else {
3162 			WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3163 		}
3164 	}
3165 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3166 }
3167 
3168 /**
3169  * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3170  *
3171  * @vm: VM to test against.
3172  * @bo: BO to be tested.
3173  *
3174  * Returns true if the BO shares the dma_resv object with the root PD and is
3175  * always guaranteed to be valid inside the VM.
3176  */
3177 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3178 {
3179 	return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3180 }
3181 
3182 void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
3183 			       struct amdgpu_task_info *task_info)
3184 {
3185 	dev_err(adev->dev,
3186 		" Process %s pid %d thread %s pid %d\n",
3187 		task_info->process_name, task_info->tgid,
3188 		task_info->task.comm, task_info->task.pid);
3189 }
3190