xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c (revision d728fd03e5f2117853d91b3626d434a97fe896d1)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_vm.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_amdkfd.h"
42 #include "amdgpu_gmc.h"
43 #include "amdgpu_xgmi.h"
44 #include "amdgpu_dma_buf.h"
45 #include "amdgpu_res_cursor.h"
46 #include "kfd_svm.h"
47 
48 /**
49  * DOC: GPUVM
50  *
51  * GPUVM is the MMU functionality provided on the GPU.
52  * GPUVM is similar to the legacy GART on older asics, however
53  * rather than there being a single global GART table
54  * for the entire GPU, there can be multiple GPUVM page tables active
55  * at any given time.  The GPUVM page tables can contain a mix
56  * VRAM pages and system pages (both memory and MMIO) and system pages
57  * can be mapped as snooped (cached system pages) or unsnooped
58  * (uncached system pages).
59  *
60  * Each active GPUVM has an ID associated with it and there is a page table
61  * linked with each VMID.  When executing a command buffer,
62  * the kernel tells the engine what VMID to use for that command
63  * buffer.  VMIDs are allocated dynamically as commands are submitted.
64  * The userspace drivers maintain their own address space and the kernel
65  * sets up their pages tables accordingly when they submit their
66  * command buffers and a VMID is assigned.
67  * The hardware supports up to 16 active GPUVMs at any given time.
68  *
69  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
70  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
71  * as other features such as encryption and caching attributes.
72  *
73  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
74  * addition to an aperture managed by a page table, VMID 0 also has
75  * several other apertures.  There is an aperture for direct access to VRAM
76  * and there is a legacy AGP aperture which just forwards accesses directly
77  * to the matching system physical addresses (or IOVAs when an IOMMU is
78  * present).  These apertures provide direct access to these memories without
79  * incurring the overhead of a page table.  VMID 0 is used by the kernel
80  * driver for tasks like memory management.
81  *
82  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
83  * For user applications, each application can have their own unique GPUVM
84  * address space.  The application manages the address space and the kernel
85  * driver manages the GPUVM page tables for each process.  If an GPU client
86  * accesses an invalid page, it will generate a GPU page fault, similar to
87  * accessing an invalid page on a CPU.
88  */
89 
90 #define START(node) ((node)->start)
91 #define LAST(node) ((node)->last)
92 
93 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
94 		     START, LAST, static, amdgpu_vm_it)
95 
96 #undef START
97 #undef LAST
98 
99 /**
100  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
101  */
102 struct amdgpu_prt_cb {
103 
104 	/**
105 	 * @adev: amdgpu device
106 	 */
107 	struct amdgpu_device *adev;
108 
109 	/**
110 	 * @cb: callback
111 	 */
112 	struct dma_fence_cb cb;
113 };
114 
115 /**
116  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
117  */
118 struct amdgpu_vm_tlb_seq_struct {
119 	/**
120 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
121 	 */
122 	struct amdgpu_vm *vm;
123 
124 	/**
125 	 * @cb: callback
126 	 */
127 	struct dma_fence_cb cb;
128 };
129 
130 /**
131  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
132  *
133  * @adev: amdgpu_device pointer
134  * @vm: amdgpu_vm pointer
135  * @pasid: the pasid the VM is using on this GPU
136  *
137  * Set the pasid this VM is using on this GPU, can also be used to remove the
138  * pasid by passing in zero.
139  *
140  */
141 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
142 			u32 pasid)
143 {
144 	int r;
145 
146 	if (vm->pasid == pasid)
147 		return 0;
148 
149 	if (vm->pasid) {
150 		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
151 		if (r < 0)
152 			return r;
153 
154 		vm->pasid = 0;
155 	}
156 
157 	if (pasid) {
158 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
159 					GFP_KERNEL));
160 		if (r < 0)
161 			return r;
162 
163 		vm->pasid = pasid;
164 	}
165 
166 
167 	return 0;
168 }
169 
170 /**
171  * amdgpu_vm_bo_evicted - vm_bo is evicted
172  *
173  * @vm_bo: vm_bo which is evicted
174  *
175  * State for PDs/PTs and per VM BOs which are not at the location they should
176  * be.
177  */
178 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
179 {
180 	struct amdgpu_vm *vm = vm_bo->vm;
181 	struct amdgpu_bo *bo = vm_bo->bo;
182 
183 	vm_bo->moved = true;
184 	spin_lock(&vm_bo->vm->status_lock);
185 	if (bo->tbo.type == ttm_bo_type_kernel)
186 		list_move(&vm_bo->vm_status, &vm->evicted);
187 	else
188 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
189 	spin_unlock(&vm_bo->vm->status_lock);
190 }
191 /**
192  * amdgpu_vm_bo_moved - vm_bo is moved
193  *
194  * @vm_bo: vm_bo which is moved
195  *
196  * State for per VM BOs which are moved, but that change is not yet reflected
197  * in the page tables.
198  */
199 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
200 {
201 	spin_lock(&vm_bo->vm->status_lock);
202 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
203 	spin_unlock(&vm_bo->vm->status_lock);
204 }
205 
206 /**
207  * amdgpu_vm_bo_idle - vm_bo is idle
208  *
209  * @vm_bo: vm_bo which is now idle
210  *
211  * State for PDs/PTs and per VM BOs which have gone through the state machine
212  * and are now idle.
213  */
214 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
215 {
216 	spin_lock(&vm_bo->vm->status_lock);
217 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
218 	spin_unlock(&vm_bo->vm->status_lock);
219 	vm_bo->moved = false;
220 }
221 
222 /**
223  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
224  *
225  * @vm_bo: vm_bo which is now invalidated
226  *
227  * State for normal BOs which are invalidated and that change not yet reflected
228  * in the PTs.
229  */
230 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
231 {
232 	spin_lock(&vm_bo->vm->status_lock);
233 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
234 	spin_unlock(&vm_bo->vm->status_lock);
235 }
236 
237 /**
238  * amdgpu_vm_bo_evicted_user - vm_bo is evicted
239  *
240  * @vm_bo: vm_bo which is evicted
241  *
242  * State for BOs used by user mode queues which are not at the location they
243  * should be.
244  */
245 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
246 {
247 	vm_bo->moved = true;
248 	spin_lock(&vm_bo->vm->status_lock);
249 	list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
250 	spin_unlock(&vm_bo->vm->status_lock);
251 }
252 
253 /**
254  * amdgpu_vm_bo_relocated - vm_bo is reloacted
255  *
256  * @vm_bo: vm_bo which is relocated
257  *
258  * State for PDs/PTs which needs to update their parent PD.
259  * For the root PD, just move to idle state.
260  */
261 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
262 {
263 	if (vm_bo->bo->parent) {
264 		spin_lock(&vm_bo->vm->status_lock);
265 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
266 		spin_unlock(&vm_bo->vm->status_lock);
267 	} else {
268 		amdgpu_vm_bo_idle(vm_bo);
269 	}
270 }
271 
272 /**
273  * amdgpu_vm_bo_done - vm_bo is done
274  *
275  * @vm_bo: vm_bo which is now done
276  *
277  * State for normal BOs which are invalidated and that change has been updated
278  * in the PTs.
279  */
280 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
281 {
282 	spin_lock(&vm_bo->vm->status_lock);
283 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
284 	spin_unlock(&vm_bo->vm->status_lock);
285 }
286 
287 /**
288  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
289  * @vm: the VM which state machine to reset
290  *
291  * Move all vm_bo object in the VM into a state where they will be updated
292  * again during validation.
293  */
294 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
295 {
296 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
297 
298 	spin_lock(&vm->status_lock);
299 	list_splice_init(&vm->done, &vm->invalidated);
300 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
301 		vm_bo->moved = true;
302 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
303 		struct amdgpu_bo *bo = vm_bo->bo;
304 
305 		vm_bo->moved = true;
306 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
307 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
308 		else if (bo->parent)
309 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
310 	}
311 	spin_unlock(&vm->status_lock);
312 }
313 
314 /**
315  * amdgpu_vm_update_shared - helper to update shared memory stat
316  * @base: base structure for tracking BO usage in a VM
317  *
318  * Takes the vm status_lock and updates the shared memory stat. If the basic
319  * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
320  * as well.
321  */
322 static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
323 {
324 	struct amdgpu_vm *vm = base->vm;
325 	struct amdgpu_bo *bo = base->bo;
326 	uint64_t size = amdgpu_bo_size(bo);
327 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
328 	bool shared;
329 
330 	spin_lock(&vm->status_lock);
331 	shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
332 	if (base->shared != shared) {
333 		base->shared = shared;
334 		if (shared) {
335 			vm->stats[bo_memtype].drm.shared += size;
336 			vm->stats[bo_memtype].drm.private -= size;
337 		} else {
338 			vm->stats[bo_memtype].drm.shared -= size;
339 			vm->stats[bo_memtype].drm.private += size;
340 		}
341 	}
342 	spin_unlock(&vm->status_lock);
343 }
344 
345 /**
346  * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
347  * @bo: amdgpu buffer object
348  *
349  * Update the per VM stats for all the vm if needed from private to shared or
350  * vice versa.
351  */
352 void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
353 {
354 	struct amdgpu_vm_bo_base *base;
355 
356 	for (base = bo->vm_bo; base; base = base->next)
357 		amdgpu_vm_update_shared(base);
358 }
359 
360 /**
361  * amdgpu_vm_update_stats_locked - helper to update normal memory stat
362  * @base: base structure for tracking BO usage in a VM
363  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
364  *        be bo->tbo.resource
365  * @sign: if we should add (+1) or subtract (-1) from the stat
366  *
367  * Caller need to have the vm status_lock held. Useful for when multiple update
368  * need to happen at the same time.
369  */
370 static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
371 			    struct ttm_resource *res, int sign)
372 {
373 	struct amdgpu_vm *vm = base->vm;
374 	struct amdgpu_bo *bo = base->bo;
375 	int64_t size = sign * amdgpu_bo_size(bo);
376 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
377 
378 	/* For drm-total- and drm-shared-, BO are accounted by their preferred
379 	 * placement, see also amdgpu_bo_mem_stats_placement.
380 	 */
381 	if (base->shared)
382 		vm->stats[bo_memtype].drm.shared += size;
383 	else
384 		vm->stats[bo_memtype].drm.private += size;
385 
386 	if (res && res->mem_type < __AMDGPU_PL_NUM) {
387 		uint32_t res_memtype = res->mem_type;
388 
389 		vm->stats[res_memtype].drm.resident += size;
390 		/* BO only count as purgeable if it is resident,
391 		 * since otherwise there's nothing to purge.
392 		 */
393 		if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
394 			vm->stats[res_memtype].drm.purgeable += size;
395 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
396 			vm->stats[bo_memtype].evicted += size;
397 	}
398 }
399 
400 /**
401  * amdgpu_vm_update_stats - helper to update normal memory stat
402  * @base: base structure for tracking BO usage in a VM
403  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
404  *        be bo->tbo.resource
405  * @sign: if we should add (+1) or subtract (-1) from the stat
406  *
407  * Updates the basic memory stat when bo is added/deleted/moved.
408  */
409 void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
410 			    struct ttm_resource *res, int sign)
411 {
412 	struct amdgpu_vm *vm = base->vm;
413 
414 	spin_lock(&vm->status_lock);
415 	amdgpu_vm_update_stats_locked(base, res, sign);
416 	spin_unlock(&vm->status_lock);
417 }
418 
419 /**
420  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
421  *
422  * @base: base structure for tracking BO usage in a VM
423  * @vm: vm to which bo is to be added
424  * @bo: amdgpu buffer object
425  *
426  * Initialize a bo_va_base structure and add it to the appropriate lists
427  *
428  */
429 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
430 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
431 {
432 	base->vm = vm;
433 	base->bo = bo;
434 	base->next = NULL;
435 	INIT_LIST_HEAD(&base->vm_status);
436 
437 	if (!bo)
438 		return;
439 	base->next = bo->vm_bo;
440 	bo->vm_bo = base;
441 
442 	spin_lock(&vm->status_lock);
443 	base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
444 	amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
445 	spin_unlock(&vm->status_lock);
446 
447 	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
448 		return;
449 
450 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
451 
452 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
453 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
454 		amdgpu_vm_bo_relocated(base);
455 	else
456 		amdgpu_vm_bo_idle(base);
457 
458 	if (bo->preferred_domains &
459 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
460 		return;
461 
462 	/*
463 	 * we checked all the prerequisites, but it looks like this per vm bo
464 	 * is currently evicted. add the bo to the evicted list to make sure it
465 	 * is validated on next vm use to avoid fault.
466 	 * */
467 	amdgpu_vm_bo_evicted(base);
468 }
469 
470 /**
471  * amdgpu_vm_lock_pd - lock PD in drm_exec
472  *
473  * @vm: vm providing the BOs
474  * @exec: drm execution context
475  * @num_fences: number of extra fences to reserve
476  *
477  * Lock the VM root PD in the DRM execution context.
478  */
479 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
480 		      unsigned int num_fences)
481 {
482 	/* We need at least two fences for the VM PD/PT updates */
483 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
484 				    2 + num_fences);
485 }
486 
487 /**
488  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
489  *
490  * @adev: amdgpu device pointer
491  * @vm: vm providing the BOs
492  *
493  * Move all BOs to the end of LRU and remember their positions to put them
494  * together.
495  */
496 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
497 				struct amdgpu_vm *vm)
498 {
499 	spin_lock(&adev->mman.bdev.lru_lock);
500 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
501 	spin_unlock(&adev->mman.bdev.lru_lock);
502 }
503 
504 /* Create scheduler entities for page table updates */
505 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
506 				   struct amdgpu_vm *vm)
507 {
508 	int r;
509 
510 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
511 				  adev->vm_manager.vm_pte_scheds,
512 				  adev->vm_manager.vm_pte_num_scheds, NULL);
513 	if (r)
514 		goto error;
515 
516 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
517 				     adev->vm_manager.vm_pte_scheds,
518 				     adev->vm_manager.vm_pte_num_scheds, NULL);
519 
520 error:
521 	drm_sched_entity_destroy(&vm->immediate);
522 	return r;
523 }
524 
525 /* Destroy the entities for page table updates again */
526 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
527 {
528 	drm_sched_entity_destroy(&vm->immediate);
529 	drm_sched_entity_destroy(&vm->delayed);
530 }
531 
532 /**
533  * amdgpu_vm_generation - return the page table re-generation counter
534  * @adev: the amdgpu_device
535  * @vm: optional VM to check, might be NULL
536  *
537  * Returns a page table re-generation token to allow checking if submissions
538  * are still valid to use this VM. The VM parameter might be NULL in which case
539  * just the VRAM lost counter will be used.
540  */
541 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
542 {
543 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
544 
545 	if (!vm)
546 		return result;
547 
548 	result += lower_32_bits(vm->generation);
549 	/* Add one if the page tables will be re-generated on next CS */
550 	if (drm_sched_entity_error(&vm->delayed))
551 		++result;
552 
553 	return result;
554 }
555 
556 /**
557  * amdgpu_vm_validate - validate evicted BOs tracked in the VM
558  *
559  * @adev: amdgpu device pointer
560  * @vm: vm providing the BOs
561  * @ticket: optional reservation ticket used to reserve the VM
562  * @validate: callback to do the validation
563  * @param: parameter for the validation callback
564  *
565  * Validate the page table BOs and per-VM BOs on command submission if
566  * necessary. If a ticket is given, also try to validate evicted user queue
567  * BOs. They must already be reserved with the given ticket.
568  *
569  * Returns:
570  * Validation result.
571  */
572 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
573 		       struct ww_acquire_ctx *ticket,
574 		       int (*validate)(void *p, struct amdgpu_bo *bo),
575 		       void *param)
576 {
577 	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
578 	struct amdgpu_vm_bo_base *bo_base;
579 	struct amdgpu_bo *bo;
580 	int r;
581 
582 	if (vm->generation != new_vm_generation) {
583 		vm->generation = new_vm_generation;
584 		amdgpu_vm_bo_reset_state_machine(vm);
585 		amdgpu_vm_fini_entities(vm);
586 		r = amdgpu_vm_init_entities(adev, vm);
587 		if (r)
588 			return r;
589 	}
590 
591 	spin_lock(&vm->status_lock);
592 	while (!list_empty(&vm->evicted)) {
593 		bo_base = list_first_entry(&vm->evicted,
594 					   struct amdgpu_vm_bo_base,
595 					   vm_status);
596 		spin_unlock(&vm->status_lock);
597 
598 		bo = bo_base->bo;
599 
600 		r = validate(param, bo);
601 		if (r)
602 			return r;
603 
604 		if (bo->tbo.type != ttm_bo_type_kernel) {
605 			amdgpu_vm_bo_moved(bo_base);
606 		} else {
607 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
608 			amdgpu_vm_bo_relocated(bo_base);
609 		}
610 		spin_lock(&vm->status_lock);
611 	}
612 	while (ticket && !list_empty(&vm->evicted_user)) {
613 		bo_base = list_first_entry(&vm->evicted_user,
614 					   struct amdgpu_vm_bo_base,
615 					   vm_status);
616 		spin_unlock(&vm->status_lock);
617 
618 		bo = bo_base->bo;
619 
620 		if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
621 			struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
622 
623 			pr_warn_ratelimited("Evicted user BO is not reserved\n");
624 			if (ti) {
625 				pr_warn_ratelimited("pid %d\n", ti->task.pid);
626 				amdgpu_vm_put_task_info(ti);
627 			}
628 
629 			return -EINVAL;
630 		}
631 
632 		r = validate(param, bo);
633 		if (r)
634 			return r;
635 
636 		amdgpu_vm_bo_invalidated(bo_base);
637 
638 		spin_lock(&vm->status_lock);
639 	}
640 	spin_unlock(&vm->status_lock);
641 
642 	amdgpu_vm_eviction_lock(vm);
643 	vm->evicting = false;
644 	amdgpu_vm_eviction_unlock(vm);
645 
646 	return 0;
647 }
648 
649 /**
650  * amdgpu_vm_ready - check VM is ready for updates
651  *
652  * @vm: VM to check
653  *
654  * Check if all VM PDs/PTs are ready for updates
655  *
656  * Returns:
657  * True if VM is not evicting and all VM entities are not stopped
658  */
659 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
660 {
661 	bool ret;
662 
663 	amdgpu_vm_eviction_lock(vm);
664 	ret = !vm->evicting;
665 	amdgpu_vm_eviction_unlock(vm);
666 
667 	spin_lock(&vm->status_lock);
668 	ret &= list_empty(&vm->evicted);
669 	spin_unlock(&vm->status_lock);
670 
671 	spin_lock(&vm->immediate.lock);
672 	ret &= !vm->immediate.stopped;
673 	spin_unlock(&vm->immediate.lock);
674 
675 	spin_lock(&vm->delayed.lock);
676 	ret &= !vm->delayed.stopped;
677 	spin_unlock(&vm->delayed.lock);
678 
679 	return ret;
680 }
681 
682 /**
683  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
684  *
685  * @adev: amdgpu_device pointer
686  */
687 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
688 {
689 	const struct amdgpu_ip_block *ip_block;
690 	bool has_compute_vm_bug;
691 	struct amdgpu_ring *ring;
692 	int i;
693 
694 	has_compute_vm_bug = false;
695 
696 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
697 	if (ip_block) {
698 		/* Compute has a VM bug for GFX version < 7.
699 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
700 		if (ip_block->version->major <= 7)
701 			has_compute_vm_bug = true;
702 		else if (ip_block->version->major == 8)
703 			if (adev->gfx.mec_fw_version < 673)
704 				has_compute_vm_bug = true;
705 	}
706 
707 	for (i = 0; i < adev->num_rings; i++) {
708 		ring = adev->rings[i];
709 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
710 			/* only compute rings */
711 			ring->has_compute_vm_bug = has_compute_vm_bug;
712 		else
713 			ring->has_compute_vm_bug = false;
714 	}
715 }
716 
717 /**
718  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
719  *
720  * @ring: ring on which the job will be submitted
721  * @job: job to submit
722  *
723  * Returns:
724  * True if sync is needed.
725  */
726 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
727 				  struct amdgpu_job *job)
728 {
729 	struct amdgpu_device *adev = ring->adev;
730 	unsigned vmhub = ring->vm_hub;
731 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
732 
733 	if (job->vmid == 0)
734 		return false;
735 
736 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
737 		return true;
738 
739 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
740 		return true;
741 
742 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
743 		return true;
744 
745 	return false;
746 }
747 
748 /**
749  * amdgpu_vm_flush - hardware flush the vm
750  *
751  * @ring: ring to use for flush
752  * @job:  related job
753  * @need_pipe_sync: is pipe sync needed
754  *
755  * Emit a VM flush when it is necessary.
756  *
757  * Returns:
758  * 0 on success, errno otherwise.
759  */
760 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
761 		    bool need_pipe_sync)
762 {
763 	struct amdgpu_device *adev = ring->adev;
764 	struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
765 	unsigned vmhub = ring->vm_hub;
766 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
767 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
768 	bool spm_update_needed = job->spm_update_needed;
769 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
770 		job->gds_switch_needed;
771 	bool vm_flush_needed = job->vm_needs_flush;
772 	bool cleaner_shader_needed = false;
773 	bool pasid_mapping_needed = false;
774 	struct dma_fence *fence = NULL;
775 	struct amdgpu_fence *af;
776 	unsigned int patch;
777 	int r;
778 
779 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
780 		gds_switch_needed = true;
781 		vm_flush_needed = true;
782 		pasid_mapping_needed = true;
783 		spm_update_needed = true;
784 	}
785 
786 	mutex_lock(&id_mgr->lock);
787 	if (id->pasid != job->pasid || !id->pasid_mapping ||
788 	    !dma_fence_is_signaled(id->pasid_mapping))
789 		pasid_mapping_needed = true;
790 	mutex_unlock(&id_mgr->lock);
791 
792 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
793 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
794 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
795 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
796 		ring->funcs->emit_wreg;
797 
798 	cleaner_shader_needed = job->run_cleaner_shader &&
799 		adev->gfx.enable_cleaner_shader &&
800 		ring->funcs->emit_cleaner_shader && job->base.s_fence &&
801 		&job->base.s_fence->scheduled == isolation->spearhead;
802 
803 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
804 	    !cleaner_shader_needed)
805 		return 0;
806 
807 	amdgpu_ring_ib_begin(ring);
808 	if (ring->funcs->init_cond_exec)
809 		patch = amdgpu_ring_init_cond_exec(ring,
810 						   ring->cond_exe_gpu_addr);
811 
812 	if (need_pipe_sync)
813 		amdgpu_ring_emit_pipeline_sync(ring);
814 
815 	if (cleaner_shader_needed)
816 		ring->funcs->emit_cleaner_shader(ring);
817 
818 	if (vm_flush_needed) {
819 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
820 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
821 	}
822 
823 	if (pasid_mapping_needed)
824 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
825 
826 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
827 		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
828 
829 	if (ring->funcs->emit_gds_switch &&
830 	    gds_switch_needed) {
831 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
832 					    job->gds_size, job->gws_base,
833 					    job->gws_size, job->oa_base,
834 					    job->oa_size);
835 	}
836 
837 	if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
838 		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
839 		if (r)
840 			return r;
841 		/* this is part of the job's context */
842 		af = container_of(fence, struct amdgpu_fence, base);
843 		af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
844 	}
845 
846 	if (vm_flush_needed) {
847 		mutex_lock(&id_mgr->lock);
848 		dma_fence_put(id->last_flush);
849 		id->last_flush = dma_fence_get(fence);
850 		id->current_gpu_reset_count =
851 			atomic_read(&adev->gpu_reset_counter);
852 		mutex_unlock(&id_mgr->lock);
853 	}
854 
855 	if (pasid_mapping_needed) {
856 		mutex_lock(&id_mgr->lock);
857 		id->pasid = job->pasid;
858 		dma_fence_put(id->pasid_mapping);
859 		id->pasid_mapping = dma_fence_get(fence);
860 		mutex_unlock(&id_mgr->lock);
861 	}
862 
863 	/*
864 	 * Make sure that all other submissions wait for the cleaner shader to
865 	 * finish before we push them to the HW.
866 	 */
867 	if (cleaner_shader_needed) {
868 		trace_amdgpu_cleaner_shader(ring, fence);
869 		mutex_lock(&adev->enforce_isolation_mutex);
870 		dma_fence_put(isolation->spearhead);
871 		isolation->spearhead = dma_fence_get(fence);
872 		mutex_unlock(&adev->enforce_isolation_mutex);
873 	}
874 	dma_fence_put(fence);
875 
876 	amdgpu_ring_patch_cond_exec(ring, patch);
877 
878 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
879 	if (ring->funcs->emit_switch_buffer) {
880 		amdgpu_ring_emit_switch_buffer(ring);
881 		amdgpu_ring_emit_switch_buffer(ring);
882 	}
883 
884 	amdgpu_ring_ib_end(ring);
885 	return 0;
886 }
887 
888 /**
889  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
890  *
891  * @vm: requested vm
892  * @bo: requested buffer object
893  *
894  * Find @bo inside the requested vm.
895  * Search inside the @bos vm list for the requested vm
896  * Returns the found bo_va or NULL if none is found
897  *
898  * Object has to be reserved!
899  *
900  * Returns:
901  * Found bo_va or NULL.
902  */
903 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
904 				       struct amdgpu_bo *bo)
905 {
906 	struct amdgpu_vm_bo_base *base;
907 
908 	for (base = bo->vm_bo; base; base = base->next) {
909 		if (base->vm != vm)
910 			continue;
911 
912 		return container_of(base, struct amdgpu_bo_va, base);
913 	}
914 	return NULL;
915 }
916 
917 /**
918  * amdgpu_vm_map_gart - Resolve gart mapping of addr
919  *
920  * @pages_addr: optional DMA address to use for lookup
921  * @addr: the unmapped addr
922  *
923  * Look up the physical address of the page that the pte resolves
924  * to.
925  *
926  * Returns:
927  * The pointer for the page table entry.
928  */
929 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
930 {
931 	uint64_t result;
932 
933 	/* page table offset */
934 	result = pages_addr[addr >> PAGE_SHIFT];
935 
936 	/* in case cpu page size != gpu page size*/
937 	result |= addr & (~PAGE_MASK);
938 
939 	result &= 0xFFFFFFFFFFFFF000ULL;
940 
941 	return result;
942 }
943 
944 /**
945  * amdgpu_vm_update_pdes - make sure that all directories are valid
946  *
947  * @adev: amdgpu_device pointer
948  * @vm: requested vm
949  * @immediate: submit immediately to the paging queue
950  *
951  * Makes sure all directories are up to date.
952  *
953  * Returns:
954  * 0 for success, error for failure.
955  */
956 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
957 			  struct amdgpu_vm *vm, bool immediate)
958 {
959 	struct amdgpu_vm_update_params params;
960 	struct amdgpu_vm_bo_base *entry;
961 	bool flush_tlb_needed = false;
962 	LIST_HEAD(relocated);
963 	int r, idx;
964 
965 	spin_lock(&vm->status_lock);
966 	list_splice_init(&vm->relocated, &relocated);
967 	spin_unlock(&vm->status_lock);
968 
969 	if (list_empty(&relocated))
970 		return 0;
971 
972 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
973 		return -ENODEV;
974 
975 	memset(&params, 0, sizeof(params));
976 	params.adev = adev;
977 	params.vm = vm;
978 	params.immediate = immediate;
979 
980 	r = vm->update_funcs->prepare(&params, NULL);
981 	if (r)
982 		goto error;
983 
984 	list_for_each_entry(entry, &relocated, vm_status) {
985 		/* vm_flush_needed after updating moved PDEs */
986 		flush_tlb_needed |= entry->moved;
987 
988 		r = amdgpu_vm_pde_update(&params, entry);
989 		if (r)
990 			goto error;
991 	}
992 
993 	r = vm->update_funcs->commit(&params, &vm->last_update);
994 	if (r)
995 		goto error;
996 
997 	if (flush_tlb_needed)
998 		atomic64_inc(&vm->tlb_seq);
999 
1000 	while (!list_empty(&relocated)) {
1001 		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
1002 					 vm_status);
1003 		amdgpu_vm_bo_idle(entry);
1004 	}
1005 
1006 error:
1007 	drm_dev_exit(idx);
1008 	return r;
1009 }
1010 
1011 /**
1012  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
1013  * @fence: unused
1014  * @cb: the callback structure
1015  *
1016  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1017  */
1018 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
1019 				 struct dma_fence_cb *cb)
1020 {
1021 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1022 
1023 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
1024 	atomic64_inc(&tlb_cb->vm->tlb_seq);
1025 	kfree(tlb_cb);
1026 }
1027 
1028 /**
1029  * amdgpu_vm_tlb_flush - prepare TLB flush
1030  *
1031  * @params: parameters for update
1032  * @fence: input fence to sync TLB flush with
1033  * @tlb_cb: the callback structure
1034  *
1035  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1036  */
1037 static void
1038 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
1039 		    struct dma_fence **fence,
1040 		    struct amdgpu_vm_tlb_seq_struct *tlb_cb)
1041 {
1042 	struct amdgpu_vm *vm = params->vm;
1043 
1044 	tlb_cb->vm = vm;
1045 	if (!fence || !*fence) {
1046 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1047 		return;
1048 	}
1049 
1050 	if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
1051 				    amdgpu_vm_tlb_seq_cb)) {
1052 		dma_fence_put(vm->last_tlb_flush);
1053 		vm->last_tlb_flush = dma_fence_get(*fence);
1054 	} else {
1055 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1056 	}
1057 
1058 	/* Prepare a TLB flush fence to be attached to PTs */
1059 	if (!params->unlocked && vm->is_compute_context) {
1060 		amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
1061 
1062 		/* Makes sure no PD/PT is freed before the flush */
1063 		dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
1064 				   DMA_RESV_USAGE_BOOKKEEP);
1065 	}
1066 }
1067 
1068 /**
1069  * amdgpu_vm_update_range - update a range in the vm page table
1070  *
1071  * @adev: amdgpu_device pointer to use for commands
1072  * @vm: the VM to update the range
1073  * @immediate: immediate submission in a page fault
1074  * @unlocked: unlocked invalidation during MM callback
1075  * @flush_tlb: trigger tlb invalidation after update completed
1076  * @allow_override: change MTYPE for local NUMA nodes
1077  * @sync: fences we need to sync to
1078  * @start: start of mapped range
1079  * @last: last mapped entry
1080  * @flags: flags for the entries
1081  * @offset: offset into nodes and pages_addr
1082  * @vram_base: base for vram mappings
1083  * @res: ttm_resource to map
1084  * @pages_addr: DMA addresses to use for mapping
1085  * @fence: optional resulting fence
1086  *
1087  * Fill in the page table entries between @start and @last.
1088  *
1089  * Returns:
1090  * 0 for success, negative erro code for failure.
1091  */
1092 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1093 			   bool immediate, bool unlocked, bool flush_tlb,
1094 			   bool allow_override, struct amdgpu_sync *sync,
1095 			   uint64_t start, uint64_t last, uint64_t flags,
1096 			   uint64_t offset, uint64_t vram_base,
1097 			   struct ttm_resource *res, dma_addr_t *pages_addr,
1098 			   struct dma_fence **fence)
1099 {
1100 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1101 	struct amdgpu_vm_update_params params;
1102 	struct amdgpu_res_cursor cursor;
1103 	int r, idx;
1104 
1105 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
1106 		return -ENODEV;
1107 
1108 	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
1109 	if (!tlb_cb) {
1110 		drm_dev_exit(idx);
1111 		return -ENOMEM;
1112 	}
1113 
1114 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
1115 	 * heavy-weight flush TLB unconditionally.
1116 	 */
1117 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
1118 		     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
1119 
1120 	/*
1121 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
1122 	 */
1123 	flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
1124 
1125 	memset(&params, 0, sizeof(params));
1126 	params.adev = adev;
1127 	params.vm = vm;
1128 	params.immediate = immediate;
1129 	params.pages_addr = pages_addr;
1130 	params.unlocked = unlocked;
1131 	params.needs_flush = flush_tlb;
1132 	params.allow_override = allow_override;
1133 	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
1134 
1135 	amdgpu_vm_eviction_lock(vm);
1136 	if (vm->evicting) {
1137 		r = -EBUSY;
1138 		goto error_free;
1139 	}
1140 
1141 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1142 		struct dma_fence *tmp = dma_fence_get_stub();
1143 
1144 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1145 		swap(vm->last_unlocked, tmp);
1146 		dma_fence_put(tmp);
1147 	}
1148 
1149 	r = vm->update_funcs->prepare(&params, sync);
1150 	if (r)
1151 		goto error_free;
1152 
1153 	amdgpu_res_first(pages_addr ? NULL : res, offset,
1154 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1155 	while (cursor.remaining) {
1156 		uint64_t tmp, num_entries, addr;
1157 
1158 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1159 		if (pages_addr) {
1160 			bool contiguous = true;
1161 
1162 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1163 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
1164 				uint64_t count;
1165 
1166 				contiguous = pages_addr[pfn + 1] ==
1167 					pages_addr[pfn] + PAGE_SIZE;
1168 
1169 				tmp = num_entries /
1170 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1171 				for (count = 2; count < tmp; ++count) {
1172 					uint64_t idx = pfn + count;
1173 
1174 					if (contiguous != (pages_addr[idx] ==
1175 					    pages_addr[idx - 1] + PAGE_SIZE))
1176 						break;
1177 				}
1178 				if (!contiguous)
1179 					count--;
1180 				num_entries = count *
1181 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1182 			}
1183 
1184 			if (!contiguous) {
1185 				addr = cursor.start;
1186 				params.pages_addr = pages_addr;
1187 			} else {
1188 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1189 				params.pages_addr = NULL;
1190 			}
1191 
1192 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1193 			addr = vram_base + cursor.start;
1194 		} else {
1195 			addr = 0;
1196 		}
1197 
1198 		tmp = start + num_entries;
1199 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1200 		if (r)
1201 			goto error_free;
1202 
1203 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1204 		start = tmp;
1205 	}
1206 
1207 	r = vm->update_funcs->commit(&params, fence);
1208 	if (r)
1209 		goto error_free;
1210 
1211 	if (params.needs_flush) {
1212 		amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
1213 		tlb_cb = NULL;
1214 	}
1215 
1216 	amdgpu_vm_pt_free_list(adev, &params);
1217 
1218 error_free:
1219 	kfree(tlb_cb);
1220 	amdgpu_vm_eviction_unlock(vm);
1221 	drm_dev_exit(idx);
1222 	return r;
1223 }
1224 
1225 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1226 			  struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
1227 {
1228 	spin_lock(&vm->status_lock);
1229 	memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
1230 	spin_unlock(&vm->status_lock);
1231 }
1232 
1233 /**
1234  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1235  *
1236  * @adev: amdgpu_device pointer
1237  * @bo_va: requested BO and VM object
1238  * @clear: if true clear the entries
1239  *
1240  * Fill in the page table entries for @bo_va.
1241  *
1242  * Returns:
1243  * 0 for success, -EINVAL for failure.
1244  */
1245 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1246 			bool clear)
1247 {
1248 	struct amdgpu_bo *bo = bo_va->base.bo;
1249 	struct amdgpu_vm *vm = bo_va->base.vm;
1250 	struct amdgpu_bo_va_mapping *mapping;
1251 	struct dma_fence **last_update;
1252 	dma_addr_t *pages_addr = NULL;
1253 	struct ttm_resource *mem;
1254 	struct amdgpu_sync sync;
1255 	bool flush_tlb = clear;
1256 	uint64_t vram_base;
1257 	uint64_t flags;
1258 	bool uncached;
1259 	int r;
1260 
1261 	amdgpu_sync_create(&sync);
1262 	if (clear) {
1263 		mem = NULL;
1264 
1265 		/* Implicitly sync to command submissions in the same VM before
1266 		 * unmapping.
1267 		 */
1268 		r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1269 				     AMDGPU_SYNC_EQ_OWNER, vm);
1270 		if (r)
1271 			goto error_free;
1272 		if (bo) {
1273 			r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
1274 			if (r)
1275 				goto error_free;
1276 		}
1277 	} else if (!bo) {
1278 		mem = NULL;
1279 
1280 		/* PRT map operations don't need to sync to anything. */
1281 
1282 	} else {
1283 		struct drm_gem_object *obj = &bo->tbo.base;
1284 
1285 		if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
1286 			struct dma_buf *dma_buf = obj->dma_buf;
1287 			struct drm_gem_object *gobj = dma_buf->priv;
1288 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1289 
1290 			if (abo->tbo.resource &&
1291 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1292 				bo = gem_to_amdgpu_bo(gobj);
1293 		}
1294 		mem = bo->tbo.resource;
1295 		if (mem && (mem->mem_type == TTM_PL_TT ||
1296 			    mem->mem_type == AMDGPU_PL_PREEMPT))
1297 			pages_addr = bo->tbo.ttm->dma_address;
1298 
1299 		/* Implicitly sync to moving fences before mapping anything */
1300 		r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1301 				     AMDGPU_SYNC_EXPLICIT, vm);
1302 		if (r)
1303 			goto error_free;
1304 	}
1305 
1306 	if (bo) {
1307 		struct amdgpu_device *bo_adev;
1308 
1309 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1310 
1311 		if (amdgpu_bo_encrypted(bo))
1312 			flags |= AMDGPU_PTE_TMZ;
1313 
1314 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1315 		vram_base = bo_adev->vm_manager.vram_base_offset;
1316 		uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1317 	} else {
1318 		flags = 0x0;
1319 		vram_base = 0;
1320 		uncached = false;
1321 	}
1322 
1323 	if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1324 		last_update = &vm->last_update;
1325 	else
1326 		last_update = &bo_va->last_pt_update;
1327 
1328 	if (!clear && bo_va->base.moved) {
1329 		flush_tlb = true;
1330 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1331 
1332 	} else if (bo_va->cleared != clear) {
1333 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1334 	}
1335 
1336 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1337 		uint64_t update_flags = flags;
1338 
1339 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1340 		 * but in case of something, we filter the flags in first place
1341 		 */
1342 		if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
1343 			update_flags &= ~AMDGPU_PTE_READABLE;
1344 		if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
1345 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1346 
1347 		/* Apply ASIC specific mapping flags */
1348 		amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
1349 				      &update_flags);
1350 
1351 		trace_amdgpu_vm_bo_update(mapping);
1352 
1353 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1354 					   !uncached, &sync, mapping->start,
1355 					   mapping->last, update_flags,
1356 					   mapping->offset, vram_base, mem,
1357 					   pages_addr, last_update);
1358 		if (r)
1359 			goto error_free;
1360 	}
1361 
1362 	/* If the BO is not in its preferred location add it back to
1363 	 * the evicted list so that it gets validated again on the
1364 	 * next command submission.
1365 	 */
1366 	if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1367 		if (bo->tbo.resource &&
1368 		    !(bo->preferred_domains &
1369 		      amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1370 			amdgpu_vm_bo_evicted(&bo_va->base);
1371 		else
1372 			amdgpu_vm_bo_idle(&bo_va->base);
1373 	} else {
1374 		amdgpu_vm_bo_done(&bo_va->base);
1375 	}
1376 
1377 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1378 	bo_va->cleared = clear;
1379 	bo_va->base.moved = false;
1380 
1381 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1382 		list_for_each_entry(mapping, &bo_va->valids, list)
1383 			trace_amdgpu_vm_bo_mapping(mapping);
1384 	}
1385 
1386 error_free:
1387 	amdgpu_sync_free(&sync);
1388 	return r;
1389 }
1390 
1391 /**
1392  * amdgpu_vm_update_prt_state - update the global PRT state
1393  *
1394  * @adev: amdgpu_device pointer
1395  */
1396 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1397 {
1398 	unsigned long flags;
1399 	bool enable;
1400 
1401 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1402 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1403 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1404 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1405 }
1406 
1407 /**
1408  * amdgpu_vm_prt_get - add a PRT user
1409  *
1410  * @adev: amdgpu_device pointer
1411  */
1412 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1413 {
1414 	if (!adev->gmc.gmc_funcs->set_prt)
1415 		return;
1416 
1417 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1418 		amdgpu_vm_update_prt_state(adev);
1419 }
1420 
1421 /**
1422  * amdgpu_vm_prt_put - drop a PRT user
1423  *
1424  * @adev: amdgpu_device pointer
1425  */
1426 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1427 {
1428 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1429 		amdgpu_vm_update_prt_state(adev);
1430 }
1431 
1432 /**
1433  * amdgpu_vm_prt_cb - callback for updating the PRT status
1434  *
1435  * @fence: fence for the callback
1436  * @_cb: the callback function
1437  */
1438 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1439 {
1440 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1441 
1442 	amdgpu_vm_prt_put(cb->adev);
1443 	kfree(cb);
1444 }
1445 
1446 /**
1447  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1448  *
1449  * @adev: amdgpu_device pointer
1450  * @fence: fence for the callback
1451  */
1452 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1453 				 struct dma_fence *fence)
1454 {
1455 	struct amdgpu_prt_cb *cb;
1456 
1457 	if (!adev->gmc.gmc_funcs->set_prt)
1458 		return;
1459 
1460 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1461 	if (!cb) {
1462 		/* Last resort when we are OOM */
1463 		if (fence)
1464 			dma_fence_wait(fence, false);
1465 
1466 		amdgpu_vm_prt_put(adev);
1467 	} else {
1468 		cb->adev = adev;
1469 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1470 						     amdgpu_vm_prt_cb))
1471 			amdgpu_vm_prt_cb(fence, &cb->cb);
1472 	}
1473 }
1474 
1475 /**
1476  * amdgpu_vm_free_mapping - free a mapping
1477  *
1478  * @adev: amdgpu_device pointer
1479  * @vm: requested vm
1480  * @mapping: mapping to be freed
1481  * @fence: fence of the unmap operation
1482  *
1483  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1484  */
1485 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1486 				   struct amdgpu_vm *vm,
1487 				   struct amdgpu_bo_va_mapping *mapping,
1488 				   struct dma_fence *fence)
1489 {
1490 	if (mapping->flags & AMDGPU_VM_PAGE_PRT)
1491 		amdgpu_vm_add_prt_cb(adev, fence);
1492 	kfree(mapping);
1493 }
1494 
1495 /**
1496  * amdgpu_vm_prt_fini - finish all prt mappings
1497  *
1498  * @adev: amdgpu_device pointer
1499  * @vm: requested vm
1500  *
1501  * Register a cleanup callback to disable PRT support after VM dies.
1502  */
1503 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1504 {
1505 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1506 	struct dma_resv_iter cursor;
1507 	struct dma_fence *fence;
1508 
1509 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1510 		/* Add a callback for each fence in the reservation object */
1511 		amdgpu_vm_prt_get(adev);
1512 		amdgpu_vm_add_prt_cb(adev, fence);
1513 	}
1514 }
1515 
1516 /**
1517  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1518  *
1519  * @adev: amdgpu_device pointer
1520  * @vm: requested vm
1521  * @fence: optional resulting fence (unchanged if no work needed to be done
1522  * or if an error occurred)
1523  *
1524  * Make sure all freed BOs are cleared in the PT.
1525  * PTs have to be reserved and mutex must be locked!
1526  *
1527  * Returns:
1528  * 0 for success.
1529  *
1530  */
1531 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1532 			  struct amdgpu_vm *vm,
1533 			  struct dma_fence **fence)
1534 {
1535 	struct amdgpu_bo_va_mapping *mapping;
1536 	struct dma_fence *f = NULL;
1537 	struct amdgpu_sync sync;
1538 	int r;
1539 
1540 
1541 	/*
1542 	 * Implicitly sync to command submissions in the same VM before
1543 	 * unmapping.
1544 	 */
1545 	amdgpu_sync_create(&sync);
1546 	r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1547 			     AMDGPU_SYNC_EQ_OWNER, vm);
1548 	if (r)
1549 		goto error_free;
1550 
1551 	while (!list_empty(&vm->freed)) {
1552 		mapping = list_first_entry(&vm->freed,
1553 			struct amdgpu_bo_va_mapping, list);
1554 		list_del(&mapping->list);
1555 
1556 		r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1557 					   &sync, mapping->start, mapping->last,
1558 					   0, 0, 0, NULL, NULL, &f);
1559 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1560 		if (r) {
1561 			dma_fence_put(f);
1562 			goto error_free;
1563 		}
1564 	}
1565 
1566 	if (fence && f) {
1567 		dma_fence_put(*fence);
1568 		*fence = f;
1569 	} else {
1570 		dma_fence_put(f);
1571 	}
1572 
1573 error_free:
1574 	amdgpu_sync_free(&sync);
1575 	return r;
1576 
1577 }
1578 
1579 /**
1580  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1581  *
1582  * @adev: amdgpu_device pointer
1583  * @vm: requested vm
1584  * @ticket: optional reservation ticket used to reserve the VM
1585  *
1586  * Make sure all BOs which are moved are updated in the PTs.
1587  *
1588  * Returns:
1589  * 0 for success.
1590  *
1591  * PTs have to be reserved!
1592  */
1593 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1594 			   struct amdgpu_vm *vm,
1595 			   struct ww_acquire_ctx *ticket)
1596 {
1597 	struct amdgpu_bo_va *bo_va;
1598 	struct dma_resv *resv;
1599 	bool clear, unlock;
1600 	int r;
1601 
1602 	spin_lock(&vm->status_lock);
1603 	while (!list_empty(&vm->moved)) {
1604 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1605 					 base.vm_status);
1606 		spin_unlock(&vm->status_lock);
1607 
1608 		/* Per VM BOs never need to bo cleared in the page tables */
1609 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1610 		if (r)
1611 			return r;
1612 		spin_lock(&vm->status_lock);
1613 	}
1614 
1615 	while (!list_empty(&vm->invalidated)) {
1616 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1617 					 base.vm_status);
1618 		resv = bo_va->base.bo->tbo.base.resv;
1619 		spin_unlock(&vm->status_lock);
1620 
1621 		/* Try to reserve the BO to avoid clearing its ptes */
1622 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
1623 			clear = false;
1624 			unlock = true;
1625 		/* The caller is already holding the reservation lock */
1626 		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1627 			clear = false;
1628 			unlock = false;
1629 		/* Somebody else is using the BO right now */
1630 		} else {
1631 			clear = true;
1632 			unlock = false;
1633 		}
1634 
1635 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1636 
1637 		if (unlock)
1638 			dma_resv_unlock(resv);
1639 		if (r)
1640 			return r;
1641 
1642 		/* Remember evicted DMABuf imports in compute VMs for later
1643 		 * validation
1644 		 */
1645 		if (vm->is_compute_context &&
1646 		    drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
1647 		    (!bo_va->base.bo->tbo.resource ||
1648 		     bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1649 			amdgpu_vm_bo_evicted_user(&bo_va->base);
1650 
1651 		spin_lock(&vm->status_lock);
1652 	}
1653 	spin_unlock(&vm->status_lock);
1654 
1655 	return 0;
1656 }
1657 
1658 /**
1659  * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1660  *
1661  * @adev: amdgpu_device pointer
1662  * @vm: requested vm
1663  * @flush_type: flush type
1664  * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1665  *
1666  * Flush TLB if needed for a compute VM.
1667  *
1668  * Returns:
1669  * 0 for success.
1670  */
1671 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1672 				struct amdgpu_vm *vm,
1673 				uint32_t flush_type,
1674 				uint32_t xcc_mask)
1675 {
1676 	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1677 	bool all_hub = false;
1678 	int xcc = 0, r = 0;
1679 
1680 	WARN_ON_ONCE(!vm->is_compute_context);
1681 
1682 	/*
1683 	 * It can be that we race and lose here, but that is extremely unlikely
1684 	 * and the worst thing which could happen is that we flush the changes
1685 	 * into the TLB once more which is harmless.
1686 	 */
1687 	if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1688 		return 0;
1689 
1690 	if (adev->family == AMDGPU_FAMILY_AI ||
1691 	    adev->family == AMDGPU_FAMILY_RV)
1692 		all_hub = true;
1693 
1694 	for_each_inst(xcc, xcc_mask) {
1695 		r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1696 						   all_hub, xcc);
1697 		if (r)
1698 			break;
1699 	}
1700 	return r;
1701 }
1702 
1703 /**
1704  * amdgpu_vm_bo_add - add a bo to a specific vm
1705  *
1706  * @adev: amdgpu_device pointer
1707  * @vm: requested vm
1708  * @bo: amdgpu buffer object
1709  *
1710  * Add @bo into the requested vm.
1711  * Add @bo to the list of bos associated with the vm
1712  *
1713  * Returns:
1714  * Newly added bo_va or NULL for failure
1715  *
1716  * Object has to be reserved!
1717  */
1718 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1719 				      struct amdgpu_vm *vm,
1720 				      struct amdgpu_bo *bo)
1721 {
1722 	struct amdgpu_bo_va *bo_va;
1723 
1724 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1725 	if (bo_va == NULL) {
1726 		return NULL;
1727 	}
1728 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1729 
1730 	bo_va->ref_count = 1;
1731 	bo_va->last_pt_update = dma_fence_get_stub();
1732 	INIT_LIST_HEAD(&bo_va->valids);
1733 	INIT_LIST_HEAD(&bo_va->invalids);
1734 
1735 	if (!bo)
1736 		return bo_va;
1737 
1738 	dma_resv_assert_held(bo->tbo.base.resv);
1739 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1740 		bo_va->is_xgmi = true;
1741 		/* Power up XGMI if it can be potentially used */
1742 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1743 	}
1744 
1745 	return bo_va;
1746 }
1747 
1748 
1749 /**
1750  * amdgpu_vm_bo_insert_map - insert a new mapping
1751  *
1752  * @adev: amdgpu_device pointer
1753  * @bo_va: bo_va to store the address
1754  * @mapping: the mapping to insert
1755  *
1756  * Insert a new mapping into all structures.
1757  */
1758 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1759 				    struct amdgpu_bo_va *bo_va,
1760 				    struct amdgpu_bo_va_mapping *mapping)
1761 {
1762 	struct amdgpu_vm *vm = bo_va->base.vm;
1763 	struct amdgpu_bo *bo = bo_va->base.bo;
1764 
1765 	mapping->bo_va = bo_va;
1766 	list_add(&mapping->list, &bo_va->invalids);
1767 	amdgpu_vm_it_insert(mapping, &vm->va);
1768 
1769 	if (mapping->flags & AMDGPU_VM_PAGE_PRT)
1770 		amdgpu_vm_prt_get(adev);
1771 
1772 	if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1773 		amdgpu_vm_bo_moved(&bo_va->base);
1774 
1775 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1776 }
1777 
1778 /* Validate operation parameters to prevent potential abuse */
1779 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1780 					  struct amdgpu_bo *bo,
1781 					  uint64_t saddr,
1782 					  uint64_t offset,
1783 					  uint64_t size)
1784 {
1785 	uint64_t tmp, lpfn;
1786 
1787 	if (saddr & AMDGPU_GPU_PAGE_MASK
1788 	    || offset & AMDGPU_GPU_PAGE_MASK
1789 	    || size & AMDGPU_GPU_PAGE_MASK)
1790 		return -EINVAL;
1791 
1792 	if (check_add_overflow(saddr, size, &tmp)
1793 	    || check_add_overflow(offset, size, &tmp)
1794 	    || size == 0 /* which also leads to end < begin */)
1795 		return -EINVAL;
1796 
1797 	/* make sure object fit at this offset */
1798 	if (bo && offset + size > amdgpu_bo_size(bo))
1799 		return -EINVAL;
1800 
1801 	/* Ensure last pfn not exceed max_pfn */
1802 	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1803 	if (lpfn >= adev->vm_manager.max_pfn)
1804 		return -EINVAL;
1805 
1806 	return 0;
1807 }
1808 
1809 /**
1810  * amdgpu_vm_bo_map - map bo inside a vm
1811  *
1812  * @adev: amdgpu_device pointer
1813  * @bo_va: bo_va to store the address
1814  * @saddr: where to map the BO
1815  * @offset: requested offset in the BO
1816  * @size: BO size in bytes
1817  * @flags: attributes of pages (read/write/valid/etc.)
1818  *
1819  * Add a mapping of the BO at the specefied addr into the VM.
1820  *
1821  * Returns:
1822  * 0 for success, error for failure.
1823  *
1824  * Object has to be reserved and unreserved outside!
1825  */
1826 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1827 		     struct amdgpu_bo_va *bo_va,
1828 		     uint64_t saddr, uint64_t offset,
1829 		     uint64_t size, uint32_t flags)
1830 {
1831 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1832 	struct amdgpu_bo *bo = bo_va->base.bo;
1833 	struct amdgpu_vm *vm = bo_va->base.vm;
1834 	uint64_t eaddr;
1835 	int r;
1836 
1837 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1838 	if (r)
1839 		return r;
1840 
1841 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1842 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1843 
1844 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1845 	if (tmp) {
1846 		/* bo and tmp overlap, invalid addr */
1847 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1848 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1849 			tmp->start, tmp->last + 1);
1850 		return -EINVAL;
1851 	}
1852 
1853 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1854 	if (!mapping)
1855 		return -ENOMEM;
1856 
1857 	mapping->start = saddr;
1858 	mapping->last = eaddr;
1859 	mapping->offset = offset;
1860 	mapping->flags = flags;
1861 
1862 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1863 
1864 	return 0;
1865 }
1866 
1867 /**
1868  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1869  *
1870  * @adev: amdgpu_device pointer
1871  * @bo_va: bo_va to store the address
1872  * @saddr: where to map the BO
1873  * @offset: requested offset in the BO
1874  * @size: BO size in bytes
1875  * @flags: attributes of pages (read/write/valid/etc.)
1876  *
1877  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1878  * mappings as we do so.
1879  *
1880  * Returns:
1881  * 0 for success, error for failure.
1882  *
1883  * Object has to be reserved and unreserved outside!
1884  */
1885 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1886 			     struct amdgpu_bo_va *bo_va,
1887 			     uint64_t saddr, uint64_t offset,
1888 			     uint64_t size, uint32_t flags)
1889 {
1890 	struct amdgpu_bo_va_mapping *mapping;
1891 	struct amdgpu_bo *bo = bo_va->base.bo;
1892 	uint64_t eaddr;
1893 	int r;
1894 
1895 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1896 	if (r)
1897 		return r;
1898 
1899 	/* Allocate all the needed memory */
1900 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1901 	if (!mapping)
1902 		return -ENOMEM;
1903 
1904 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1905 	if (r) {
1906 		kfree(mapping);
1907 		return r;
1908 	}
1909 
1910 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1911 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1912 
1913 	mapping->start = saddr;
1914 	mapping->last = eaddr;
1915 	mapping->offset = offset;
1916 	mapping->flags = flags;
1917 
1918 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1919 
1920 	return 0;
1921 }
1922 
1923 /**
1924  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1925  *
1926  * @adev: amdgpu_device pointer
1927  * @bo_va: bo_va to remove the address from
1928  * @saddr: where to the BO is mapped
1929  *
1930  * Remove a mapping of the BO at the specefied addr from the VM.
1931  *
1932  * Returns:
1933  * 0 for success, error for failure.
1934  *
1935  * Object has to be reserved and unreserved outside!
1936  */
1937 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1938 		       struct amdgpu_bo_va *bo_va,
1939 		       uint64_t saddr)
1940 {
1941 	struct amdgpu_bo_va_mapping *mapping;
1942 	struct amdgpu_vm *vm = bo_va->base.vm;
1943 	bool valid = true;
1944 
1945 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1946 
1947 	list_for_each_entry(mapping, &bo_va->valids, list) {
1948 		if (mapping->start == saddr)
1949 			break;
1950 	}
1951 
1952 	if (&mapping->list == &bo_va->valids) {
1953 		valid = false;
1954 
1955 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1956 			if (mapping->start == saddr)
1957 				break;
1958 		}
1959 
1960 		if (&mapping->list == &bo_va->invalids)
1961 			return -ENOENT;
1962 	}
1963 
1964 	list_del(&mapping->list);
1965 	amdgpu_vm_it_remove(mapping, &vm->va);
1966 	mapping->bo_va = NULL;
1967 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1968 
1969 	if (valid)
1970 		list_add(&mapping->list, &vm->freed);
1971 	else
1972 		amdgpu_vm_free_mapping(adev, vm, mapping,
1973 				       bo_va->last_pt_update);
1974 
1975 	return 0;
1976 }
1977 
1978 /**
1979  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1980  *
1981  * @adev: amdgpu_device pointer
1982  * @vm: VM structure to use
1983  * @saddr: start of the range
1984  * @size: size of the range
1985  *
1986  * Remove all mappings in a range, split them as appropriate.
1987  *
1988  * Returns:
1989  * 0 for success, error for failure.
1990  */
1991 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1992 				struct amdgpu_vm *vm,
1993 				uint64_t saddr, uint64_t size)
1994 {
1995 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1996 	LIST_HEAD(removed);
1997 	uint64_t eaddr;
1998 	int r;
1999 
2000 	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
2001 	if (r)
2002 		return r;
2003 
2004 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2005 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
2006 
2007 	/* Allocate all the needed memory */
2008 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2009 	if (!before)
2010 		return -ENOMEM;
2011 	INIT_LIST_HEAD(&before->list);
2012 
2013 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2014 	if (!after) {
2015 		kfree(before);
2016 		return -ENOMEM;
2017 	}
2018 	INIT_LIST_HEAD(&after->list);
2019 
2020 	/* Now gather all removed mappings */
2021 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2022 	while (tmp) {
2023 		/* Remember mapping split at the start */
2024 		if (tmp->start < saddr) {
2025 			before->start = tmp->start;
2026 			before->last = saddr - 1;
2027 			before->offset = tmp->offset;
2028 			before->flags = tmp->flags;
2029 			before->bo_va = tmp->bo_va;
2030 			list_add(&before->list, &tmp->bo_va->invalids);
2031 		}
2032 
2033 		/* Remember mapping split at the end */
2034 		if (tmp->last > eaddr) {
2035 			after->start = eaddr + 1;
2036 			after->last = tmp->last;
2037 			after->offset = tmp->offset;
2038 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2039 			after->flags = tmp->flags;
2040 			after->bo_va = tmp->bo_va;
2041 			list_add(&after->list, &tmp->bo_va->invalids);
2042 		}
2043 
2044 		list_del(&tmp->list);
2045 		list_add(&tmp->list, &removed);
2046 
2047 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2048 	}
2049 
2050 	/* And free them up */
2051 	list_for_each_entry_safe(tmp, next, &removed, list) {
2052 		amdgpu_vm_it_remove(tmp, &vm->va);
2053 		list_del(&tmp->list);
2054 
2055 		if (tmp->start < saddr)
2056 		    tmp->start = saddr;
2057 		if (tmp->last > eaddr)
2058 		    tmp->last = eaddr;
2059 
2060 		tmp->bo_va = NULL;
2061 		list_add(&tmp->list, &vm->freed);
2062 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2063 	}
2064 
2065 	/* Insert partial mapping before the range */
2066 	if (!list_empty(&before->list)) {
2067 		struct amdgpu_bo *bo = before->bo_va->base.bo;
2068 
2069 		amdgpu_vm_it_insert(before, &vm->va);
2070 		if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
2071 			amdgpu_vm_prt_get(adev);
2072 
2073 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2074 		    !before->bo_va->base.moved)
2075 			amdgpu_vm_bo_moved(&before->bo_va->base);
2076 	} else {
2077 		kfree(before);
2078 	}
2079 
2080 	/* Insert partial mapping after the range */
2081 	if (!list_empty(&after->list)) {
2082 		struct amdgpu_bo *bo = after->bo_va->base.bo;
2083 
2084 		amdgpu_vm_it_insert(after, &vm->va);
2085 		if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
2086 			amdgpu_vm_prt_get(adev);
2087 
2088 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2089 		    !after->bo_va->base.moved)
2090 			amdgpu_vm_bo_moved(&after->bo_va->base);
2091 	} else {
2092 		kfree(after);
2093 	}
2094 
2095 	return 0;
2096 }
2097 
2098 /**
2099  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2100  *
2101  * @vm: the requested VM
2102  * @addr: the address
2103  *
2104  * Find a mapping by it's address.
2105  *
2106  * Returns:
2107  * The amdgpu_bo_va_mapping matching for addr or NULL
2108  *
2109  */
2110 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2111 							 uint64_t addr)
2112 {
2113 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2114 }
2115 
2116 /**
2117  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2118  *
2119  * @vm: the requested vm
2120  * @ticket: CS ticket
2121  *
2122  * Trace all mappings of BOs reserved during a command submission.
2123  */
2124 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2125 {
2126 	struct amdgpu_bo_va_mapping *mapping;
2127 
2128 	if (!trace_amdgpu_vm_bo_cs_enabled())
2129 		return;
2130 
2131 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2132 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2133 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2134 			struct amdgpu_bo *bo;
2135 
2136 			bo = mapping->bo_va->base.bo;
2137 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2138 			    ticket)
2139 				continue;
2140 		}
2141 
2142 		trace_amdgpu_vm_bo_cs(mapping);
2143 	}
2144 }
2145 
2146 /**
2147  * amdgpu_vm_bo_del - remove a bo from a specific vm
2148  *
2149  * @adev: amdgpu_device pointer
2150  * @bo_va: requested bo_va
2151  *
2152  * Remove @bo_va->bo from the requested vm.
2153  *
2154  * Object have to be reserved!
2155  */
2156 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2157 		      struct amdgpu_bo_va *bo_va)
2158 {
2159 	struct amdgpu_bo_va_mapping *mapping, *next;
2160 	struct amdgpu_bo *bo = bo_va->base.bo;
2161 	struct amdgpu_vm *vm = bo_va->base.vm;
2162 	struct amdgpu_vm_bo_base **base;
2163 
2164 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2165 
2166 	if (bo) {
2167 		dma_resv_assert_held(bo->tbo.base.resv);
2168 		if (amdgpu_vm_is_bo_always_valid(vm, bo))
2169 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
2170 
2171 		for (base = &bo_va->base.bo->vm_bo; *base;
2172 		     base = &(*base)->next) {
2173 			if (*base != &bo_va->base)
2174 				continue;
2175 
2176 			amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
2177 			*base = bo_va->base.next;
2178 			break;
2179 		}
2180 	}
2181 
2182 	spin_lock(&vm->status_lock);
2183 	list_del(&bo_va->base.vm_status);
2184 	spin_unlock(&vm->status_lock);
2185 
2186 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2187 		list_del(&mapping->list);
2188 		amdgpu_vm_it_remove(mapping, &vm->va);
2189 		mapping->bo_va = NULL;
2190 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2191 		list_add(&mapping->list, &vm->freed);
2192 	}
2193 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2194 		list_del(&mapping->list);
2195 		amdgpu_vm_it_remove(mapping, &vm->va);
2196 		amdgpu_vm_free_mapping(adev, vm, mapping,
2197 				       bo_va->last_pt_update);
2198 	}
2199 
2200 	dma_fence_put(bo_va->last_pt_update);
2201 
2202 	if (bo && bo_va->is_xgmi)
2203 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2204 
2205 	kfree(bo_va);
2206 }
2207 
2208 /**
2209  * amdgpu_vm_evictable - check if we can evict a VM
2210  *
2211  * @bo: A page table of the VM.
2212  *
2213  * Check if it is possible to evict a VM.
2214  */
2215 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2216 {
2217 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2218 
2219 	/* Page tables of a destroyed VM can go away immediately */
2220 	if (!bo_base || !bo_base->vm)
2221 		return true;
2222 
2223 	/* Don't evict VM page tables while they are busy */
2224 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2225 		return false;
2226 
2227 	/* Try to block ongoing updates */
2228 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2229 		return false;
2230 
2231 	/* Don't evict VM page tables while they are updated */
2232 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2233 		amdgpu_vm_eviction_unlock(bo_base->vm);
2234 		return false;
2235 	}
2236 
2237 	bo_base->vm->evicting = true;
2238 	amdgpu_vm_eviction_unlock(bo_base->vm);
2239 	return true;
2240 }
2241 
2242 /**
2243  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2244  *
2245  * @bo: amdgpu buffer object
2246  * @evicted: is the BO evicted
2247  *
2248  * Mark @bo as invalid.
2249  */
2250 void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
2251 {
2252 	struct amdgpu_vm_bo_base *bo_base;
2253 
2254 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2255 		struct amdgpu_vm *vm = bo_base->vm;
2256 
2257 		if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2258 			amdgpu_vm_bo_evicted(bo_base);
2259 			continue;
2260 		}
2261 
2262 		if (bo_base->moved)
2263 			continue;
2264 		bo_base->moved = true;
2265 
2266 		if (bo->tbo.type == ttm_bo_type_kernel)
2267 			amdgpu_vm_bo_relocated(bo_base);
2268 		else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2269 			amdgpu_vm_bo_moved(bo_base);
2270 		else
2271 			amdgpu_vm_bo_invalidated(bo_base);
2272 	}
2273 }
2274 
2275 /**
2276  * amdgpu_vm_bo_move - handle BO move
2277  *
2278  * @bo: amdgpu buffer object
2279  * @new_mem: the new placement of the BO move
2280  * @evicted: is the BO evicted
2281  *
2282  * Update the memory stats for the new placement and mark @bo as invalid.
2283  */
2284 void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
2285 		       bool evicted)
2286 {
2287 	struct amdgpu_vm_bo_base *bo_base;
2288 
2289 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2290 		struct amdgpu_vm *vm = bo_base->vm;
2291 
2292 		spin_lock(&vm->status_lock);
2293 		amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
2294 		amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
2295 		spin_unlock(&vm->status_lock);
2296 	}
2297 
2298 	amdgpu_vm_bo_invalidate(bo, evicted);
2299 }
2300 
2301 /**
2302  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2303  *
2304  * @vm_size: VM size
2305  *
2306  * Returns:
2307  * VM page table as power of two
2308  */
2309 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2310 {
2311 	/* Total bits covered by PD + PTs */
2312 	unsigned bits = ilog2(vm_size) + 18;
2313 
2314 	/* Make sure the PD is 4K in size up to 8GB address space.
2315 	   Above that split equal between PD and PTs */
2316 	if (vm_size <= 8)
2317 		return (bits - 9);
2318 	else
2319 		return ((bits + 3) / 2);
2320 }
2321 
2322 /**
2323  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2324  *
2325  * @adev: amdgpu_device pointer
2326  * @min_vm_size: the minimum vm size in GB if it's set auto
2327  * @fragment_size_default: Default PTE fragment size
2328  * @max_level: max VMPT level
2329  * @max_bits: max address space size in bits
2330  *
2331  */
2332 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2333 			   uint32_t fragment_size_default, unsigned max_level,
2334 			   unsigned max_bits)
2335 {
2336 	unsigned int max_size = 1 << (max_bits - 30);
2337 	unsigned int vm_size;
2338 	uint64_t tmp;
2339 
2340 	/* adjust vm size first */
2341 	if (amdgpu_vm_size != -1) {
2342 		vm_size = amdgpu_vm_size;
2343 		if (vm_size > max_size) {
2344 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2345 				 amdgpu_vm_size, max_size);
2346 			vm_size = max_size;
2347 		}
2348 	} else {
2349 		struct sysinfo si;
2350 		unsigned int phys_ram_gb;
2351 
2352 		/* Optimal VM size depends on the amount of physical
2353 		 * RAM available. Underlying requirements and
2354 		 * assumptions:
2355 		 *
2356 		 *  - Need to map system memory and VRAM from all GPUs
2357 		 *     - VRAM from other GPUs not known here
2358 		 *     - Assume VRAM <= system memory
2359 		 *  - On GFX8 and older, VM space can be segmented for
2360 		 *    different MTYPEs
2361 		 *  - Need to allow room for fragmentation, guard pages etc.
2362 		 *
2363 		 * This adds up to a rough guess of system memory x3.
2364 		 * Round up to power of two to maximize the available
2365 		 * VM size with the given page table size.
2366 		 */
2367 		si_meminfo(&si);
2368 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2369 			       (1 << 30) - 1) >> 30;
2370 		vm_size = roundup_pow_of_two(
2371 			clamp(phys_ram_gb * 3, min_vm_size, max_size));
2372 	}
2373 
2374 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2375 
2376 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2377 	if (amdgpu_vm_block_size != -1)
2378 		tmp >>= amdgpu_vm_block_size - 9;
2379 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2380 	adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2381 	switch (adev->vm_manager.num_level) {
2382 	case 3:
2383 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2384 		break;
2385 	case 2:
2386 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2387 		break;
2388 	case 1:
2389 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2390 		break;
2391 	default:
2392 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2393 	}
2394 	/* block size depends on vm size and hw setup*/
2395 	if (amdgpu_vm_block_size != -1)
2396 		adev->vm_manager.block_size =
2397 			min((unsigned)amdgpu_vm_block_size, max_bits
2398 			    - AMDGPU_GPU_PAGE_SHIFT
2399 			    - 9 * adev->vm_manager.num_level);
2400 	else if (adev->vm_manager.num_level > 1)
2401 		adev->vm_manager.block_size = 9;
2402 	else
2403 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2404 
2405 	if (amdgpu_vm_fragment_size == -1)
2406 		adev->vm_manager.fragment_size = fragment_size_default;
2407 	else
2408 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2409 
2410 	dev_info(
2411 		adev->dev,
2412 		"vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2413 		vm_size, adev->vm_manager.num_level + 1,
2414 		adev->vm_manager.block_size, adev->vm_manager.fragment_size);
2415 }
2416 
2417 /**
2418  * amdgpu_vm_wait_idle - wait for the VM to become idle
2419  *
2420  * @vm: VM object to wait for
2421  * @timeout: timeout to wait for VM to become idle
2422  */
2423 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2424 {
2425 	timeout = drm_sched_entity_flush(&vm->immediate, timeout);
2426 	if (timeout <= 0)
2427 		return timeout;
2428 
2429 	return drm_sched_entity_flush(&vm->delayed, timeout);
2430 }
2431 
2432 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2433 {
2434 	struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2435 
2436 	kfree(ti);
2437 }
2438 
2439 static inline struct amdgpu_vm *
2440 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2441 {
2442 	struct amdgpu_vm *vm;
2443 	unsigned long flags;
2444 
2445 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2446 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2447 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2448 
2449 	return vm;
2450 }
2451 
2452 /**
2453  * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2454  *
2455  * @task_info: task_info struct under discussion.
2456  *
2457  * frees the vm task_info ptr at the last put
2458  */
2459 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2460 {
2461 	if (task_info)
2462 		kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2463 }
2464 
2465 /**
2466  * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2467  *
2468  * @vm: VM to get info from
2469  *
2470  * Returns the reference counted task_info structure, which must be
2471  * referenced down with amdgpu_vm_put_task_info.
2472  */
2473 struct amdgpu_task_info *
2474 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2475 {
2476 	struct amdgpu_task_info *ti = NULL;
2477 
2478 	if (vm) {
2479 		ti = vm->task_info;
2480 		kref_get(&vm->task_info->refcount);
2481 	}
2482 
2483 	return ti;
2484 }
2485 
2486 /**
2487  * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2488  *
2489  * @adev: drm device pointer
2490  * @pasid: PASID identifier for VM
2491  *
2492  * Returns the reference counted task_info structure, which must be
2493  * referenced down with amdgpu_vm_put_task_info.
2494  */
2495 struct amdgpu_task_info *
2496 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2497 {
2498 	return amdgpu_vm_get_task_info_vm(
2499 			amdgpu_vm_get_vm_from_pasid(adev, pasid));
2500 }
2501 
2502 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2503 {
2504 	vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2505 	if (!vm->task_info)
2506 		return -ENOMEM;
2507 
2508 	kref_init(&vm->task_info->refcount);
2509 	return 0;
2510 }
2511 
2512 /**
2513  * amdgpu_vm_set_task_info - Sets VMs task info.
2514  *
2515  * @vm: vm for which to set the info
2516  */
2517 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2518 {
2519 	if (!vm->task_info)
2520 		return;
2521 
2522 	if (vm->task_info->task.pid == current->pid)
2523 		return;
2524 
2525 	vm->task_info->task.pid = current->pid;
2526 	get_task_comm(vm->task_info->task.comm, current);
2527 
2528 	if (current->group_leader->mm != current->mm)
2529 		return;
2530 
2531 	vm->task_info->tgid = current->group_leader->pid;
2532 	get_task_comm(vm->task_info->process_name, current->group_leader);
2533 }
2534 
2535 /**
2536  * amdgpu_vm_init - initialize a vm instance
2537  *
2538  * @adev: amdgpu_device pointer
2539  * @vm: requested vm
2540  * @xcp_id: GPU partition selection id
2541  *
2542  * Init @vm fields.
2543  *
2544  * Returns:
2545  * 0 for success, error for failure.
2546  */
2547 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2548 		   int32_t xcp_id)
2549 {
2550 	struct amdgpu_bo *root_bo;
2551 	struct amdgpu_bo_vm *root;
2552 	int r, i;
2553 
2554 	vm->va = RB_ROOT_CACHED;
2555 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2556 		vm->reserved_vmid[i] = NULL;
2557 	INIT_LIST_HEAD(&vm->evicted);
2558 	INIT_LIST_HEAD(&vm->evicted_user);
2559 	INIT_LIST_HEAD(&vm->relocated);
2560 	INIT_LIST_HEAD(&vm->moved);
2561 	INIT_LIST_HEAD(&vm->idle);
2562 	INIT_LIST_HEAD(&vm->invalidated);
2563 	spin_lock_init(&vm->status_lock);
2564 	INIT_LIST_HEAD(&vm->freed);
2565 	INIT_LIST_HEAD(&vm->done);
2566 	INIT_KFIFO(vm->faults);
2567 
2568 	r = amdgpu_vm_init_entities(adev, vm);
2569 	if (r)
2570 		return r;
2571 
2572 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2573 
2574 	vm->is_compute_context = false;
2575 
2576 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2577 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2578 
2579 	dev_dbg(adev->dev, "VM update mode is %s\n",
2580 		vm->use_cpu_for_update ? "CPU" : "SDMA");
2581 	WARN_ONCE((vm->use_cpu_for_update &&
2582 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2583 		  "CPU update of VM recommended only for large BAR system\n");
2584 
2585 	if (vm->use_cpu_for_update)
2586 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2587 	else
2588 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2589 
2590 	vm->last_update = dma_fence_get_stub();
2591 	vm->last_unlocked = dma_fence_get_stub();
2592 	vm->last_tlb_flush = dma_fence_get_stub();
2593 	vm->generation = amdgpu_vm_generation(adev, NULL);
2594 
2595 	mutex_init(&vm->eviction_lock);
2596 	vm->evicting = false;
2597 	vm->tlb_fence_context = dma_fence_context_alloc(1);
2598 
2599 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2600 				false, &root, xcp_id);
2601 	if (r)
2602 		goto error_free_delayed;
2603 
2604 	root_bo = amdgpu_bo_ref(&root->bo);
2605 	r = amdgpu_bo_reserve(root_bo, true);
2606 	if (r) {
2607 		amdgpu_bo_unref(&root_bo);
2608 		goto error_free_delayed;
2609 	}
2610 
2611 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2612 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2613 	if (r)
2614 		goto error_free_root;
2615 
2616 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2617 	if (r)
2618 		goto error_free_root;
2619 
2620 	r = amdgpu_vm_create_task_info(vm);
2621 	if (r)
2622 		dev_dbg(adev->dev, "Failed to create task info for VM\n");
2623 
2624 	amdgpu_bo_unreserve(vm->root.bo);
2625 	amdgpu_bo_unref(&root_bo);
2626 
2627 	return 0;
2628 
2629 error_free_root:
2630 	amdgpu_vm_pt_free_root(adev, vm);
2631 	amdgpu_bo_unreserve(vm->root.bo);
2632 	amdgpu_bo_unref(&root_bo);
2633 
2634 error_free_delayed:
2635 	dma_fence_put(vm->last_tlb_flush);
2636 	dma_fence_put(vm->last_unlocked);
2637 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2638 	amdgpu_vm_fini_entities(vm);
2639 
2640 	return r;
2641 }
2642 
2643 /**
2644  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2645  *
2646  * @adev: amdgpu_device pointer
2647  * @vm: requested vm
2648  *
2649  * This only works on GFX VMs that don't have any BOs added and no
2650  * page tables allocated yet.
2651  *
2652  * Changes the following VM parameters:
2653  * - use_cpu_for_update
2654  * - pte_supports_ats
2655  *
2656  * Reinitializes the page directory to reflect the changed ATS
2657  * setting.
2658  *
2659  * Returns:
2660  * 0 for success, -errno for errors.
2661  */
2662 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2663 {
2664 	int r;
2665 
2666 	r = amdgpu_bo_reserve(vm->root.bo, true);
2667 	if (r)
2668 		return r;
2669 
2670 	/* Update VM state */
2671 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2672 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2673 	dev_dbg(adev->dev, "VM update mode is %s\n",
2674 		vm->use_cpu_for_update ? "CPU" : "SDMA");
2675 	WARN_ONCE((vm->use_cpu_for_update &&
2676 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2677 		  "CPU update of VM recommended only for large BAR system\n");
2678 
2679 	if (vm->use_cpu_for_update) {
2680 		/* Sync with last SDMA update/clear before switching to CPU */
2681 		r = amdgpu_bo_sync_wait(vm->root.bo,
2682 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2683 		if (r)
2684 			goto unreserve_bo;
2685 
2686 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2687 		r = amdgpu_vm_pt_map_tables(adev, vm);
2688 		if (r)
2689 			goto unreserve_bo;
2690 
2691 	} else {
2692 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2693 	}
2694 
2695 	dma_fence_put(vm->last_update);
2696 	vm->last_update = dma_fence_get_stub();
2697 	vm->is_compute_context = true;
2698 
2699 unreserve_bo:
2700 	amdgpu_bo_unreserve(vm->root.bo);
2701 	return r;
2702 }
2703 
2704 static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
2705 {
2706 	for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
2707 		if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
2708 		      vm->stats[i].evicted == 0))
2709 			return false;
2710 	}
2711 	return true;
2712 }
2713 
2714 /**
2715  * amdgpu_vm_fini - tear down a vm instance
2716  *
2717  * @adev: amdgpu_device pointer
2718  * @vm: requested vm
2719  *
2720  * Tear down @vm.
2721  * Unbind the VM and remove all bos from the vm bo list
2722  */
2723 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2724 {
2725 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2726 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2727 	struct amdgpu_bo *root;
2728 	unsigned long flags;
2729 	int i;
2730 
2731 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2732 
2733 	root = amdgpu_bo_ref(vm->root.bo);
2734 	amdgpu_bo_reserve(root, true);
2735 	amdgpu_vm_set_pasid(adev, vm, 0);
2736 	dma_fence_wait(vm->last_unlocked, false);
2737 	dma_fence_put(vm->last_unlocked);
2738 	dma_fence_wait(vm->last_tlb_flush, false);
2739 	/* Make sure that all fence callbacks have completed */
2740 	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2741 	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2742 	dma_fence_put(vm->last_tlb_flush);
2743 
2744 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2745 		if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
2746 			amdgpu_vm_prt_fini(adev, vm);
2747 			prt_fini_needed = false;
2748 		}
2749 
2750 		list_del(&mapping->list);
2751 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2752 	}
2753 
2754 	amdgpu_vm_pt_free_root(adev, vm);
2755 	amdgpu_bo_unreserve(root);
2756 	amdgpu_bo_unref(&root);
2757 	WARN_ON(vm->root.bo);
2758 
2759 	amdgpu_vm_fini_entities(vm);
2760 
2761 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2762 		dev_err(adev->dev, "still active bo inside vm\n");
2763 	}
2764 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2765 					     &vm->va.rb_root, rb) {
2766 		/* Don't remove the mapping here, we don't want to trigger a
2767 		 * rebalance and the tree is about to be destroyed anyway.
2768 		 */
2769 		list_del(&mapping->list);
2770 		kfree(mapping);
2771 	}
2772 
2773 	dma_fence_put(vm->last_update);
2774 
2775 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2776 		if (vm->reserved_vmid[i]) {
2777 			amdgpu_vmid_free_reserved(adev, i);
2778 			vm->reserved_vmid[i] = false;
2779 		}
2780 	}
2781 
2782 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2783 
2784 	if (!amdgpu_vm_stats_is_zero(vm)) {
2785 		struct amdgpu_task_info *ti = vm->task_info;
2786 
2787 		dev_warn(adev->dev,
2788 			 "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
2789 			 ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
2790 	}
2791 
2792 	amdgpu_vm_put_task_info(vm->task_info);
2793 }
2794 
2795 /**
2796  * amdgpu_vm_manager_init - init the VM manager
2797  *
2798  * @adev: amdgpu_device pointer
2799  *
2800  * Initialize the VM manager structures
2801  */
2802 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2803 {
2804 	unsigned i;
2805 
2806 	/* Concurrent flushes are only possible starting with Vega10 and
2807 	 * are broken on Navi10 and Navi14.
2808 	 */
2809 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2810 					      adev->asic_type == CHIP_NAVI10 ||
2811 					      adev->asic_type == CHIP_NAVI14);
2812 	amdgpu_vmid_mgr_init(adev);
2813 
2814 	adev->vm_manager.fence_context =
2815 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2816 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2817 		adev->vm_manager.seqno[i] = 0;
2818 
2819 	spin_lock_init(&adev->vm_manager.prt_lock);
2820 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2821 
2822 	/* If not overridden by the user, by default, only in large BAR systems
2823 	 * Compute VM tables will be updated by CPU
2824 	 */
2825 #ifdef CONFIG_X86_64
2826 	if (amdgpu_vm_update_mode == -1) {
2827 		/* For asic with VF MMIO access protection
2828 		 * avoid using CPU for VM table updates
2829 		 */
2830 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2831 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2832 			adev->vm_manager.vm_update_mode =
2833 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2834 		else
2835 			adev->vm_manager.vm_update_mode = 0;
2836 	} else
2837 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2838 #else
2839 	adev->vm_manager.vm_update_mode = 0;
2840 #endif
2841 
2842 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2843 }
2844 
2845 /**
2846  * amdgpu_vm_manager_fini - cleanup VM manager
2847  *
2848  * @adev: amdgpu_device pointer
2849  *
2850  * Cleanup the VM manager and free resources.
2851  */
2852 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2853 {
2854 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2855 	xa_destroy(&adev->vm_manager.pasids);
2856 
2857 	amdgpu_vmid_mgr_fini(adev);
2858 }
2859 
2860 /**
2861  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2862  *
2863  * @dev: drm device pointer
2864  * @data: drm_amdgpu_vm
2865  * @filp: drm file pointer
2866  *
2867  * Returns:
2868  * 0 for success, -errno for errors.
2869  */
2870 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2871 {
2872 	union drm_amdgpu_vm *args = data;
2873 	struct amdgpu_device *adev = drm_to_adev(dev);
2874 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2875 
2876 	/* No valid flags defined yet */
2877 	if (args->in.flags)
2878 		return -EINVAL;
2879 
2880 	switch (args->in.op) {
2881 	case AMDGPU_VM_OP_RESERVE_VMID:
2882 		/* We only have requirement to reserve vmid from gfxhub */
2883 		if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2884 			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2885 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2886 		}
2887 
2888 		break;
2889 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2890 		if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2891 			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2892 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2893 		}
2894 		break;
2895 	default:
2896 		return -EINVAL;
2897 	}
2898 
2899 	return 0;
2900 }
2901 
2902 /**
2903  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2904  * @adev: amdgpu device pointer
2905  * @pasid: PASID of the VM
2906  * @ts: Timestamp of the fault
2907  * @vmid: VMID, only used for GFX 9.4.3.
2908  * @node_id: Node_id received in IH cookie. Only applicable for
2909  *           GFX 9.4.3.
2910  * @addr: Address of the fault
2911  * @write_fault: true is write fault, false is read fault
2912  *
2913  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2914  * shouldn't be reported any more.
2915  */
2916 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2917 			    u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
2918 			    bool write_fault)
2919 {
2920 	bool is_compute_context = false;
2921 	struct amdgpu_bo *root;
2922 	unsigned long irqflags;
2923 	uint64_t value, flags;
2924 	struct amdgpu_vm *vm;
2925 	int r;
2926 
2927 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2928 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2929 	if (vm) {
2930 		root = amdgpu_bo_ref(vm->root.bo);
2931 		is_compute_context = vm->is_compute_context;
2932 	} else {
2933 		root = NULL;
2934 	}
2935 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2936 
2937 	if (!root)
2938 		return false;
2939 
2940 	addr /= AMDGPU_GPU_PAGE_SIZE;
2941 
2942 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2943 	    node_id, addr, ts, write_fault)) {
2944 		amdgpu_bo_unref(&root);
2945 		return true;
2946 	}
2947 
2948 	r = amdgpu_bo_reserve(root, true);
2949 	if (r)
2950 		goto error_unref;
2951 
2952 	/* Double check that the VM still exists */
2953 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2954 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2955 	if (vm && vm->root.bo != root)
2956 		vm = NULL;
2957 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2958 	if (!vm)
2959 		goto error_unlock;
2960 
2961 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2962 		AMDGPU_PTE_SYSTEM;
2963 
2964 	if (is_compute_context) {
2965 		/* Intentionally setting invalid PTE flag
2966 		 * combination to force a no-retry-fault
2967 		 */
2968 		flags = AMDGPU_VM_NORETRY_FLAGS;
2969 		value = 0;
2970 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2971 		/* Redirect the access to the dummy page */
2972 		value = adev->dummy_page_addr;
2973 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2974 			AMDGPU_PTE_WRITEABLE;
2975 
2976 	} else {
2977 		/* Let the hw retry silently on the PTE */
2978 		value = 0;
2979 	}
2980 
2981 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2982 	if (r) {
2983 		pr_debug("failed %d to reserve fence slot\n", r);
2984 		goto error_unlock;
2985 	}
2986 
2987 	r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2988 				   NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
2989 	if (r)
2990 		goto error_unlock;
2991 
2992 	r = amdgpu_vm_update_pdes(adev, vm, true);
2993 
2994 error_unlock:
2995 	amdgpu_bo_unreserve(root);
2996 	if (r < 0)
2997 		dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
2998 
2999 error_unref:
3000 	amdgpu_bo_unref(&root);
3001 
3002 	return false;
3003 }
3004 
3005 #if defined(CONFIG_DEBUG_FS)
3006 /**
3007  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
3008  *
3009  * @vm: Requested VM for printing BO info
3010  * @m: debugfs file
3011  *
3012  * Print BO information in debugfs file for the VM
3013  */
3014 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
3015 {
3016 	struct amdgpu_bo_va *bo_va, *tmp;
3017 	u64 total_idle = 0;
3018 	u64 total_evicted = 0;
3019 	u64 total_relocated = 0;
3020 	u64 total_moved = 0;
3021 	u64 total_invalidated = 0;
3022 	u64 total_done = 0;
3023 	unsigned int total_idle_objs = 0;
3024 	unsigned int total_evicted_objs = 0;
3025 	unsigned int total_relocated_objs = 0;
3026 	unsigned int total_moved_objs = 0;
3027 	unsigned int total_invalidated_objs = 0;
3028 	unsigned int total_done_objs = 0;
3029 	unsigned int id = 0;
3030 
3031 	spin_lock(&vm->status_lock);
3032 	seq_puts(m, "\tIdle BOs:\n");
3033 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3034 		if (!bo_va->base.bo)
3035 			continue;
3036 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3037 	}
3038 	total_idle_objs = id;
3039 	id = 0;
3040 
3041 	seq_puts(m, "\tEvicted BOs:\n");
3042 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3043 		if (!bo_va->base.bo)
3044 			continue;
3045 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3046 	}
3047 	total_evicted_objs = id;
3048 	id = 0;
3049 
3050 	seq_puts(m, "\tRelocated BOs:\n");
3051 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3052 		if (!bo_va->base.bo)
3053 			continue;
3054 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3055 	}
3056 	total_relocated_objs = id;
3057 	id = 0;
3058 
3059 	seq_puts(m, "\tMoved BOs:\n");
3060 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3061 		if (!bo_va->base.bo)
3062 			continue;
3063 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3064 	}
3065 	total_moved_objs = id;
3066 	id = 0;
3067 
3068 	seq_puts(m, "\tInvalidated BOs:\n");
3069 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3070 		if (!bo_va->base.bo)
3071 			continue;
3072 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
3073 	}
3074 	total_invalidated_objs = id;
3075 	id = 0;
3076 
3077 	seq_puts(m, "\tDone BOs:\n");
3078 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3079 		if (!bo_va->base.bo)
3080 			continue;
3081 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3082 	}
3083 	spin_unlock(&vm->status_lock);
3084 	total_done_objs = id;
3085 
3086 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
3087 		   total_idle_objs);
3088 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
3089 		   total_evicted_objs);
3090 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
3091 		   total_relocated_objs);
3092 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
3093 		   total_moved_objs);
3094 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
3095 		   total_invalidated_objs);
3096 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
3097 		   total_done_objs);
3098 }
3099 #endif
3100 
3101 /**
3102  * amdgpu_vm_update_fault_cache - update cached fault into.
3103  * @adev: amdgpu device pointer
3104  * @pasid: PASID of the VM
3105  * @addr: Address of the fault
3106  * @status: GPUVM fault status register
3107  * @vmhub: which vmhub got the fault
3108  *
3109  * Cache the fault info for later use by userspace in debugging.
3110  */
3111 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
3112 				  unsigned int pasid,
3113 				  uint64_t addr,
3114 				  uint32_t status,
3115 				  unsigned int vmhub)
3116 {
3117 	struct amdgpu_vm *vm;
3118 	unsigned long flags;
3119 
3120 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3121 
3122 	vm = xa_load(&adev->vm_manager.pasids, pasid);
3123 	/* Don't update the fault cache if status is 0.  In the multiple
3124 	 * fault case, subsequent faults will return a 0 status which is
3125 	 * useless for userspace and replaces the useful fault status, so
3126 	 * only update if status is non-0.
3127 	 */
3128 	if (vm && status) {
3129 		vm->fault_info.addr = addr;
3130 		vm->fault_info.status = status;
3131 		/*
3132 		 * Update the fault information globally for later usage
3133 		 * when vm could be stale or freed.
3134 		 */
3135 		adev->vm_manager.fault_info.addr = addr;
3136 		adev->vm_manager.fault_info.vmhub = vmhub;
3137 		adev->vm_manager.fault_info.status = status;
3138 
3139 		if (AMDGPU_IS_GFXHUB(vmhub)) {
3140 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3141 			vm->fault_info.vmhub |=
3142 				(vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3143 		} else if (AMDGPU_IS_MMHUB0(vmhub)) {
3144 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3145 			vm->fault_info.vmhub |=
3146 				(vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3147 		} else if (AMDGPU_IS_MMHUB1(vmhub)) {
3148 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3149 			vm->fault_info.vmhub |=
3150 				(vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3151 		} else {
3152 			WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3153 		}
3154 	}
3155 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3156 }
3157 
3158 /**
3159  * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3160  *
3161  * @vm: VM to test against.
3162  * @bo: BO to be tested.
3163  *
3164  * Returns true if the BO shares the dma_resv object with the root PD and is
3165  * always guaranteed to be valid inside the VM.
3166  */
3167 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3168 {
3169 	return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3170 }
3171 
3172 void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
3173 			       struct amdgpu_task_info *task_info)
3174 {
3175 	dev_err(adev->dev,
3176 		" Process %s pid %d thread %s pid %d\n",
3177 		task_info->process_name, task_info->tgid,
3178 		task_info->task.comm, task_info->task.pid);
3179 }
3180