xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c (revision bfe68975768a983d4e59c7fb465301999b863a7e)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_vm.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_amdkfd.h"
42 #include "amdgpu_gmc.h"
43 #include "amdgpu_xgmi.h"
44 #include "amdgpu_dma_buf.h"
45 #include "amdgpu_res_cursor.h"
46 #include "kfd_svm.h"
47 
48 /**
49  * DOC: GPUVM
50  *
51  * GPUVM is the MMU functionality provided on the GPU.
52  * GPUVM is similar to the legacy GART on older asics, however
53  * rather than there being a single global GART table
54  * for the entire GPU, there can be multiple GPUVM page tables active
55  * at any given time.  The GPUVM page tables can contain a mix
56  * VRAM pages and system pages (both memory and MMIO) and system pages
57  * can be mapped as snooped (cached system pages) or unsnooped
58  * (uncached system pages).
59  *
60  * Each active GPUVM has an ID associated with it and there is a page table
61  * linked with each VMID.  When executing a command buffer,
62  * the kernel tells the engine what VMID to use for that command
63  * buffer.  VMIDs are allocated dynamically as commands are submitted.
64  * The userspace drivers maintain their own address space and the kernel
65  * sets up their pages tables accordingly when they submit their
66  * command buffers and a VMID is assigned.
67  * The hardware supports up to 16 active GPUVMs at any given time.
68  *
69  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
70  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
71  * as other features such as encryption and caching attributes.
72  *
73  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
74  * addition to an aperture managed by a page table, VMID 0 also has
75  * several other apertures.  There is an aperture for direct access to VRAM
76  * and there is a legacy AGP aperture which just forwards accesses directly
77  * to the matching system physical addresses (or IOVAs when an IOMMU is
78  * present).  These apertures provide direct access to these memories without
79  * incurring the overhead of a page table.  VMID 0 is used by the kernel
80  * driver for tasks like memory management.
81  *
82  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
83  * For user applications, each application can have their own unique GPUVM
84  * address space.  The application manages the address space and the kernel
85  * driver manages the GPUVM page tables for each process.  If an GPU client
86  * accesses an invalid page, it will generate a GPU page fault, similar to
87  * accessing an invalid page on a CPU.
88  */
89 
90 #define START(node) ((node)->start)
91 #define LAST(node) ((node)->last)
92 
93 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
94 		     START, LAST, static, amdgpu_vm_it)
95 
96 #undef START
97 #undef LAST
98 
99 /**
100  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
101  */
102 struct amdgpu_prt_cb {
103 
104 	/**
105 	 * @adev: amdgpu device
106 	 */
107 	struct amdgpu_device *adev;
108 
109 	/**
110 	 * @cb: callback
111 	 */
112 	struct dma_fence_cb cb;
113 };
114 
115 /**
116  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
117  */
118 struct amdgpu_vm_tlb_seq_struct {
119 	/**
120 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
121 	 */
122 	struct amdgpu_vm *vm;
123 
124 	/**
125 	 * @cb: callback
126 	 */
127 	struct dma_fence_cb cb;
128 };
129 
130 /**
131  * amdgpu_vm_assert_locked - check if VM is correctly locked
132  * @vm: the VM which schould be tested
133  *
134  * Asserts that the VM root PD is locked.
135  */
136 static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
137 {
138 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
139 }
140 
141 /**
142  * amdgpu_vm_bo_evicted - vm_bo is evicted
143  *
144  * @vm_bo: vm_bo which is evicted
145  *
146  * State for PDs/PTs and per VM BOs which are not at the location they should
147  * be.
148  */
149 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
150 {
151 	struct amdgpu_vm *vm = vm_bo->vm;
152 	struct amdgpu_bo *bo = vm_bo->bo;
153 
154 	vm_bo->moved = true;
155 	amdgpu_vm_assert_locked(vm);
156 	spin_lock(&vm_bo->vm->status_lock);
157 	if (bo->tbo.type == ttm_bo_type_kernel)
158 		list_move(&vm_bo->vm_status, &vm->evicted);
159 	else
160 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
161 	spin_unlock(&vm_bo->vm->status_lock);
162 }
163 /**
164  * amdgpu_vm_bo_moved - vm_bo is moved
165  *
166  * @vm_bo: vm_bo which is moved
167  *
168  * State for per VM BOs which are moved, but that change is not yet reflected
169  * in the page tables.
170  */
171 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
172 {
173 	amdgpu_vm_assert_locked(vm_bo->vm);
174 	spin_lock(&vm_bo->vm->status_lock);
175 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
176 	spin_unlock(&vm_bo->vm->status_lock);
177 }
178 
179 /**
180  * amdgpu_vm_bo_idle - vm_bo is idle
181  *
182  * @vm_bo: vm_bo which is now idle
183  *
184  * State for PDs/PTs and per VM BOs which have gone through the state machine
185  * and are now idle.
186  */
187 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
188 {
189 	amdgpu_vm_assert_locked(vm_bo->vm);
190 	spin_lock(&vm_bo->vm->status_lock);
191 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
192 	spin_unlock(&vm_bo->vm->status_lock);
193 	vm_bo->moved = false;
194 }
195 
196 /**
197  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
198  *
199  * @vm_bo: vm_bo which is now invalidated
200  *
201  * State for normal BOs which are invalidated and that change not yet reflected
202  * in the PTs.
203  */
204 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
205 {
206 	spin_lock(&vm_bo->vm->status_lock);
207 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
208 	spin_unlock(&vm_bo->vm->status_lock);
209 }
210 
211 /**
212  * amdgpu_vm_bo_evicted_user - vm_bo is evicted
213  *
214  * @vm_bo: vm_bo which is evicted
215  *
216  * State for BOs used by user mode queues which are not at the location they
217  * should be.
218  */
219 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
220 {
221 	vm_bo->moved = true;
222 	spin_lock(&vm_bo->vm->status_lock);
223 	list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
224 	spin_unlock(&vm_bo->vm->status_lock);
225 }
226 
227 /**
228  * amdgpu_vm_bo_relocated - vm_bo is reloacted
229  *
230  * @vm_bo: vm_bo which is relocated
231  *
232  * State for PDs/PTs which needs to update their parent PD.
233  * For the root PD, just move to idle state.
234  */
235 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
236 {
237 	amdgpu_vm_assert_locked(vm_bo->vm);
238 	if (vm_bo->bo->parent) {
239 		spin_lock(&vm_bo->vm->status_lock);
240 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
241 		spin_unlock(&vm_bo->vm->status_lock);
242 	} else {
243 		amdgpu_vm_bo_idle(vm_bo);
244 	}
245 }
246 
247 /**
248  * amdgpu_vm_bo_done - vm_bo is done
249  *
250  * @vm_bo: vm_bo which is now done
251  *
252  * State for normal BOs which are invalidated and that change has been updated
253  * in the PTs.
254  */
255 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
256 {
257 	amdgpu_vm_assert_locked(vm_bo->vm);
258 	spin_lock(&vm_bo->vm->status_lock);
259 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
260 	spin_unlock(&vm_bo->vm->status_lock);
261 }
262 
263 /**
264  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
265  * @vm: the VM which state machine to reset
266  *
267  * Move all vm_bo object in the VM into a state where they will be updated
268  * again during validation.
269  */
270 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
271 {
272 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
273 
274 	amdgpu_vm_assert_locked(vm);
275 
276 	spin_lock(&vm->status_lock);
277 	list_splice_init(&vm->done, &vm->invalidated);
278 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
279 		vm_bo->moved = true;
280 
281 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
282 		struct amdgpu_bo *bo = vm_bo->bo;
283 
284 		vm_bo->moved = true;
285 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
286 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
287 		else if (bo->parent)
288 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
289 	}
290 	spin_unlock(&vm->status_lock);
291 }
292 
293 /**
294  * amdgpu_vm_update_shared - helper to update shared memory stat
295  * @base: base structure for tracking BO usage in a VM
296  *
297  * Takes the vm status_lock and updates the shared memory stat. If the basic
298  * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
299  * as well.
300  */
301 static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
302 {
303 	struct amdgpu_vm *vm = base->vm;
304 	struct amdgpu_bo *bo = base->bo;
305 	uint64_t size = amdgpu_bo_size(bo);
306 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
307 	bool shared;
308 
309 	dma_resv_assert_held(bo->tbo.base.resv);
310 	spin_lock(&vm->status_lock);
311 	shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
312 	if (base->shared != shared) {
313 		base->shared = shared;
314 		if (shared) {
315 			vm->stats[bo_memtype].drm.shared += size;
316 			vm->stats[bo_memtype].drm.private -= size;
317 		} else {
318 			vm->stats[bo_memtype].drm.shared -= size;
319 			vm->stats[bo_memtype].drm.private += size;
320 		}
321 	}
322 	spin_unlock(&vm->status_lock);
323 }
324 
325 /**
326  * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
327  * @bo: amdgpu buffer object
328  *
329  * Update the per VM stats for all the vm if needed from private to shared or
330  * vice versa.
331  */
332 void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
333 {
334 	struct amdgpu_vm_bo_base *base;
335 
336 	for (base = bo->vm_bo; base; base = base->next)
337 		amdgpu_vm_update_shared(base);
338 }
339 
340 /**
341  * amdgpu_vm_update_stats_locked - helper to update normal memory stat
342  * @base: base structure for tracking BO usage in a VM
343  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
344  *        be bo->tbo.resource
345  * @sign: if we should add (+1) or subtract (-1) from the stat
346  *
347  * Caller need to have the vm status_lock held. Useful for when multiple update
348  * need to happen at the same time.
349  */
350 static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
351 			    struct ttm_resource *res, int sign)
352 {
353 	struct amdgpu_vm *vm = base->vm;
354 	struct amdgpu_bo *bo = base->bo;
355 	int64_t size = sign * amdgpu_bo_size(bo);
356 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
357 
358 	/* For drm-total- and drm-shared-, BO are accounted by their preferred
359 	 * placement, see also amdgpu_bo_mem_stats_placement.
360 	 */
361 	if (base->shared)
362 		vm->stats[bo_memtype].drm.shared += size;
363 	else
364 		vm->stats[bo_memtype].drm.private += size;
365 
366 	if (res && res->mem_type < __AMDGPU_PL_NUM) {
367 		uint32_t res_memtype = res->mem_type;
368 
369 		vm->stats[res_memtype].drm.resident += size;
370 		/* BO only count as purgeable if it is resident,
371 		 * since otherwise there's nothing to purge.
372 		 */
373 		if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
374 			vm->stats[res_memtype].drm.purgeable += size;
375 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
376 			vm->stats[bo_memtype].evicted += size;
377 	}
378 }
379 
380 /**
381  * amdgpu_vm_update_stats - helper to update normal memory stat
382  * @base: base structure for tracking BO usage in a VM
383  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
384  *        be bo->tbo.resource
385  * @sign: if we should add (+1) or subtract (-1) from the stat
386  *
387  * Updates the basic memory stat when bo is added/deleted/moved.
388  */
389 void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
390 			    struct ttm_resource *res, int sign)
391 {
392 	struct amdgpu_vm *vm = base->vm;
393 
394 	spin_lock(&vm->status_lock);
395 	amdgpu_vm_update_stats_locked(base, res, sign);
396 	spin_unlock(&vm->status_lock);
397 }
398 
399 /**
400  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
401  *
402  * @base: base structure for tracking BO usage in a VM
403  * @vm: vm to which bo is to be added
404  * @bo: amdgpu buffer object
405  *
406  * Initialize a bo_va_base structure and add it to the appropriate lists
407  *
408  */
409 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
410 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
411 {
412 	base->vm = vm;
413 	base->bo = bo;
414 	base->next = NULL;
415 	INIT_LIST_HEAD(&base->vm_status);
416 
417 	if (!bo)
418 		return;
419 	base->next = bo->vm_bo;
420 	bo->vm_bo = base;
421 
422 	spin_lock(&vm->status_lock);
423 	base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
424 	amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
425 	spin_unlock(&vm->status_lock);
426 
427 	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
428 		return;
429 
430 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
431 
432 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
433 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
434 		amdgpu_vm_bo_relocated(base);
435 	else
436 		amdgpu_vm_bo_idle(base);
437 
438 	if (bo->preferred_domains &
439 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
440 		return;
441 
442 	/*
443 	 * we checked all the prerequisites, but it looks like this per vm bo
444 	 * is currently evicted. add the bo to the evicted list to make sure it
445 	 * is validated on next vm use to avoid fault.
446 	 * */
447 	amdgpu_vm_bo_evicted(base);
448 }
449 
450 /**
451  * amdgpu_vm_lock_pd - lock PD in drm_exec
452  *
453  * @vm: vm providing the BOs
454  * @exec: drm execution context
455  * @num_fences: number of extra fences to reserve
456  *
457  * Lock the VM root PD in the DRM execution context.
458  */
459 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
460 		      unsigned int num_fences)
461 {
462 	/* We need at least two fences for the VM PD/PT updates */
463 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
464 				    2 + num_fences);
465 }
466 
467 /**
468  * amdgpu_vm_lock_done_list - lock all BOs on the done list
469  * @vm: vm providing the BOs
470  * @exec: drm execution context
471  * @num_fences: number of extra fences to reserve
472  *
473  * Lock the BOs on the done list in the DRM execution context.
474  */
475 int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
476 			     unsigned int num_fences)
477 {
478 	struct list_head *prev = &vm->done;
479 	struct amdgpu_bo_va *bo_va;
480 	struct amdgpu_bo *bo;
481 	int ret;
482 
483 	/* We can only trust prev->next while holding the lock */
484 	spin_lock(&vm->status_lock);
485 	while (!list_is_head(prev->next, &vm->done)) {
486 		bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
487 		spin_unlock(&vm->status_lock);
488 
489 		bo = bo_va->base.bo;
490 		if (bo) {
491 			ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
492 			if (unlikely(ret))
493 				return ret;
494 		}
495 		spin_lock(&vm->status_lock);
496 		prev = prev->next;
497 	}
498 	spin_unlock(&vm->status_lock);
499 
500 	return 0;
501 }
502 
503 /**
504  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
505  *
506  * @adev: amdgpu device pointer
507  * @vm: vm providing the BOs
508  *
509  * Move all BOs to the end of LRU and remember their positions to put them
510  * together.
511  */
512 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
513 				struct amdgpu_vm *vm)
514 {
515 	spin_lock(&adev->mman.bdev.lru_lock);
516 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
517 	spin_unlock(&adev->mman.bdev.lru_lock);
518 }
519 
520 /* Create scheduler entities for page table updates */
521 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
522 				   struct amdgpu_vm *vm)
523 {
524 	int r;
525 
526 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
527 				  adev->vm_manager.vm_pte_scheds,
528 				  adev->vm_manager.vm_pte_num_scheds, NULL);
529 	if (r)
530 		goto error;
531 
532 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
533 				     adev->vm_manager.vm_pte_scheds,
534 				     adev->vm_manager.vm_pte_num_scheds, NULL);
535 
536 error:
537 	drm_sched_entity_destroy(&vm->immediate);
538 	return r;
539 }
540 
541 /* Destroy the entities for page table updates again */
542 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
543 {
544 	drm_sched_entity_destroy(&vm->immediate);
545 	drm_sched_entity_destroy(&vm->delayed);
546 }
547 
548 /**
549  * amdgpu_vm_generation - return the page table re-generation counter
550  * @adev: the amdgpu_device
551  * @vm: optional VM to check, might be NULL
552  *
553  * Returns a page table re-generation token to allow checking if submissions
554  * are still valid to use this VM. The VM parameter might be NULL in which case
555  * just the VRAM lost counter will be used.
556  */
557 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
558 {
559 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
560 
561 	if (!vm)
562 		return result;
563 
564 	result += lower_32_bits(vm->generation);
565 	/* Add one if the page tables will be re-generated on next CS */
566 	if (drm_sched_entity_error(&vm->delayed))
567 		++result;
568 
569 	return result;
570 }
571 
572 /**
573  * amdgpu_vm_validate - validate evicted BOs tracked in the VM
574  *
575  * @adev: amdgpu device pointer
576  * @vm: vm providing the BOs
577  * @ticket: optional reservation ticket used to reserve the VM
578  * @validate: callback to do the validation
579  * @param: parameter for the validation callback
580  *
581  * Validate the page table BOs and per-VM BOs on command submission if
582  * necessary. If a ticket is given, also try to validate evicted user queue
583  * BOs. They must already be reserved with the given ticket.
584  *
585  * Returns:
586  * Validation result.
587  */
588 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
589 		       struct ww_acquire_ctx *ticket,
590 		       int (*validate)(void *p, struct amdgpu_bo *bo),
591 		       void *param)
592 {
593 	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
594 	struct amdgpu_vm_bo_base *bo_base;
595 	struct amdgpu_bo *bo;
596 	int r;
597 
598 	if (vm->generation != new_vm_generation) {
599 		vm->generation = new_vm_generation;
600 		amdgpu_vm_bo_reset_state_machine(vm);
601 		amdgpu_vm_fini_entities(vm);
602 		r = amdgpu_vm_init_entities(adev, vm);
603 		if (r)
604 			return r;
605 	}
606 
607 	spin_lock(&vm->status_lock);
608 	while (!list_empty(&vm->evicted)) {
609 		bo_base = list_first_entry(&vm->evicted,
610 					   struct amdgpu_vm_bo_base,
611 					   vm_status);
612 		spin_unlock(&vm->status_lock);
613 
614 		bo = bo_base->bo;
615 
616 		r = validate(param, bo);
617 		if (r)
618 			return r;
619 
620 		if (bo->tbo.type != ttm_bo_type_kernel) {
621 			amdgpu_vm_bo_moved(bo_base);
622 		} else {
623 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
624 			amdgpu_vm_bo_relocated(bo_base);
625 		}
626 		spin_lock(&vm->status_lock);
627 	}
628 	while (ticket && !list_empty(&vm->evicted_user)) {
629 		bo_base = list_first_entry(&vm->evicted_user,
630 					   struct amdgpu_vm_bo_base,
631 					   vm_status);
632 		spin_unlock(&vm->status_lock);
633 
634 		bo = bo_base->bo;
635 		dma_resv_assert_held(bo->tbo.base.resv);
636 
637 		r = validate(param, bo);
638 		if (r)
639 			return r;
640 
641 		amdgpu_vm_bo_invalidated(bo_base);
642 
643 		spin_lock(&vm->status_lock);
644 	}
645 	spin_unlock(&vm->status_lock);
646 
647 	amdgpu_vm_eviction_lock(vm);
648 	vm->evicting = false;
649 	amdgpu_vm_eviction_unlock(vm);
650 
651 	return 0;
652 }
653 
654 /**
655  * amdgpu_vm_ready - check VM is ready for updates
656  *
657  * @vm: VM to check
658  *
659  * Check if all VM PDs/PTs are ready for updates
660  *
661  * Returns:
662  * True if VM is not evicting and all VM entities are not stopped
663  */
664 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
665 {
666 	bool ret;
667 
668 	amdgpu_vm_assert_locked(vm);
669 
670 	amdgpu_vm_eviction_lock(vm);
671 	ret = !vm->evicting;
672 	amdgpu_vm_eviction_unlock(vm);
673 
674 	spin_lock(&vm->status_lock);
675 	ret &= list_empty(&vm->evicted);
676 	spin_unlock(&vm->status_lock);
677 
678 	spin_lock(&vm->immediate.lock);
679 	ret &= !vm->immediate.stopped;
680 	spin_unlock(&vm->immediate.lock);
681 
682 	spin_lock(&vm->delayed.lock);
683 	ret &= !vm->delayed.stopped;
684 	spin_unlock(&vm->delayed.lock);
685 
686 	return ret;
687 }
688 
689 /**
690  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
691  *
692  * @adev: amdgpu_device pointer
693  */
694 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
695 {
696 	const struct amdgpu_ip_block *ip_block;
697 	bool has_compute_vm_bug;
698 	struct amdgpu_ring *ring;
699 	int i;
700 
701 	has_compute_vm_bug = false;
702 
703 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
704 	if (ip_block) {
705 		/* Compute has a VM bug for GFX version < 7.
706 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
707 		if (ip_block->version->major <= 7)
708 			has_compute_vm_bug = true;
709 		else if (ip_block->version->major == 8)
710 			if (adev->gfx.mec_fw_version < 673)
711 				has_compute_vm_bug = true;
712 	}
713 
714 	for (i = 0; i < adev->num_rings; i++) {
715 		ring = adev->rings[i];
716 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
717 			/* only compute rings */
718 			ring->has_compute_vm_bug = has_compute_vm_bug;
719 		else
720 			ring->has_compute_vm_bug = false;
721 	}
722 }
723 
724 /**
725  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
726  *
727  * @ring: ring on which the job will be submitted
728  * @job: job to submit
729  *
730  * Returns:
731  * True if sync is needed.
732  */
733 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
734 				  struct amdgpu_job *job)
735 {
736 	struct amdgpu_device *adev = ring->adev;
737 	unsigned vmhub = ring->vm_hub;
738 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
739 
740 	if (job->vmid == 0)
741 		return false;
742 
743 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
744 		return true;
745 
746 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
747 		return true;
748 
749 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
750 		return true;
751 
752 	return false;
753 }
754 
755 /**
756  * amdgpu_vm_flush - hardware flush the vm
757  *
758  * @ring: ring to use for flush
759  * @job:  related job
760  * @need_pipe_sync: is pipe sync needed
761  *
762  * Emit a VM flush when it is necessary.
763  *
764  * Returns:
765  * 0 on success, errno otherwise.
766  */
767 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
768 		    bool need_pipe_sync)
769 {
770 	struct amdgpu_device *adev = ring->adev;
771 	struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
772 	unsigned vmhub = ring->vm_hub;
773 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
774 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
775 	bool spm_update_needed = job->spm_update_needed;
776 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
777 		job->gds_switch_needed;
778 	bool vm_flush_needed = job->vm_needs_flush;
779 	bool cleaner_shader_needed = false;
780 	bool pasid_mapping_needed = false;
781 	struct dma_fence *fence = NULL;
782 	unsigned int patch;
783 	int r;
784 
785 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
786 		gds_switch_needed = true;
787 		vm_flush_needed = true;
788 		pasid_mapping_needed = true;
789 		spm_update_needed = true;
790 	}
791 
792 	mutex_lock(&id_mgr->lock);
793 	if (id->pasid != job->pasid || !id->pasid_mapping ||
794 	    !dma_fence_is_signaled(id->pasid_mapping))
795 		pasid_mapping_needed = true;
796 	mutex_unlock(&id_mgr->lock);
797 
798 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
799 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
800 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
801 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
802 		ring->funcs->emit_wreg;
803 
804 	cleaner_shader_needed = job->run_cleaner_shader &&
805 		adev->gfx.enable_cleaner_shader &&
806 		ring->funcs->emit_cleaner_shader && job->base.s_fence &&
807 		&job->base.s_fence->scheduled == isolation->spearhead;
808 
809 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
810 	    !cleaner_shader_needed)
811 		return 0;
812 
813 	amdgpu_ring_ib_begin(ring);
814 	if (ring->funcs->init_cond_exec)
815 		patch = amdgpu_ring_init_cond_exec(ring,
816 						   ring->cond_exe_gpu_addr);
817 
818 	if (need_pipe_sync)
819 		amdgpu_ring_emit_pipeline_sync(ring);
820 
821 	if (cleaner_shader_needed)
822 		ring->funcs->emit_cleaner_shader(ring);
823 
824 	if (vm_flush_needed) {
825 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
826 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
827 	}
828 
829 	if (pasid_mapping_needed)
830 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
831 
832 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
833 		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
834 
835 	if (ring->funcs->emit_gds_switch &&
836 	    gds_switch_needed) {
837 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
838 					    job->gds_size, job->gws_base,
839 					    job->gws_size, job->oa_base,
840 					    job->oa_size);
841 	}
842 
843 	if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
844 		r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
845 		if (r)
846 			return r;
847 		fence = &job->hw_vm_fence->base;
848 	}
849 
850 	if (vm_flush_needed) {
851 		mutex_lock(&id_mgr->lock);
852 		dma_fence_put(id->last_flush);
853 		id->last_flush = dma_fence_get(fence);
854 		id->current_gpu_reset_count =
855 			atomic_read(&adev->gpu_reset_counter);
856 		mutex_unlock(&id_mgr->lock);
857 	}
858 
859 	if (pasid_mapping_needed) {
860 		mutex_lock(&id_mgr->lock);
861 		id->pasid = job->pasid;
862 		dma_fence_put(id->pasid_mapping);
863 		id->pasid_mapping = dma_fence_get(fence);
864 		mutex_unlock(&id_mgr->lock);
865 	}
866 
867 	/*
868 	 * Make sure that all other submissions wait for the cleaner shader to
869 	 * finish before we push them to the HW.
870 	 */
871 	if (cleaner_shader_needed) {
872 		trace_amdgpu_cleaner_shader(ring, fence);
873 		mutex_lock(&adev->enforce_isolation_mutex);
874 		dma_fence_put(isolation->spearhead);
875 		isolation->spearhead = dma_fence_get(fence);
876 		mutex_unlock(&adev->enforce_isolation_mutex);
877 	}
878 	dma_fence_put(fence);
879 
880 	amdgpu_ring_patch_cond_exec(ring, patch);
881 
882 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
883 	if (ring->funcs->emit_switch_buffer) {
884 		amdgpu_ring_emit_switch_buffer(ring);
885 		amdgpu_ring_emit_switch_buffer(ring);
886 	}
887 
888 	amdgpu_ring_ib_end(ring);
889 	return 0;
890 }
891 
892 /**
893  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
894  *
895  * @vm: requested vm
896  * @bo: requested buffer object
897  *
898  * Find @bo inside the requested vm.
899  * Search inside the @bos vm list for the requested vm
900  * Returns the found bo_va or NULL if none is found
901  *
902  * Object has to be reserved!
903  *
904  * Returns:
905  * Found bo_va or NULL.
906  */
907 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
908 				       struct amdgpu_bo *bo)
909 {
910 	struct amdgpu_vm_bo_base *base;
911 
912 	for (base = bo->vm_bo; base; base = base->next) {
913 		if (base->vm != vm)
914 			continue;
915 
916 		return container_of(base, struct amdgpu_bo_va, base);
917 	}
918 	return NULL;
919 }
920 
921 /**
922  * amdgpu_vm_map_gart - Resolve gart mapping of addr
923  *
924  * @pages_addr: optional DMA address to use for lookup
925  * @addr: the unmapped addr
926  *
927  * Look up the physical address of the page that the pte resolves
928  * to.
929  *
930  * Returns:
931  * The pointer for the page table entry.
932  */
933 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
934 {
935 	uint64_t result;
936 
937 	/* page table offset */
938 	result = pages_addr[addr >> PAGE_SHIFT];
939 
940 	/* in case cpu page size != gpu page size*/
941 	result |= addr & (~PAGE_MASK);
942 
943 	result &= 0xFFFFFFFFFFFFF000ULL;
944 
945 	return result;
946 }
947 
948 /**
949  * amdgpu_vm_update_pdes - make sure that all directories are valid
950  *
951  * @adev: amdgpu_device pointer
952  * @vm: requested vm
953  * @immediate: submit immediately to the paging queue
954  *
955  * Makes sure all directories are up to date.
956  *
957  * Returns:
958  * 0 for success, error for failure.
959  */
960 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
961 			  struct amdgpu_vm *vm, bool immediate)
962 {
963 	struct amdgpu_vm_update_params params;
964 	struct amdgpu_vm_bo_base *entry;
965 	bool flush_tlb_needed = false;
966 	LIST_HEAD(relocated);
967 	int r, idx;
968 
969 	amdgpu_vm_assert_locked(vm);
970 
971 	spin_lock(&vm->status_lock);
972 	list_splice_init(&vm->relocated, &relocated);
973 	spin_unlock(&vm->status_lock);
974 
975 	if (list_empty(&relocated))
976 		return 0;
977 
978 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
979 		return -ENODEV;
980 
981 	memset(&params, 0, sizeof(params));
982 	params.adev = adev;
983 	params.vm = vm;
984 	params.immediate = immediate;
985 
986 	r = vm->update_funcs->prepare(&params, NULL,
987 				      AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
988 	if (r)
989 		goto error;
990 
991 	list_for_each_entry(entry, &relocated, vm_status) {
992 		/* vm_flush_needed after updating moved PDEs */
993 		flush_tlb_needed |= entry->moved;
994 
995 		r = amdgpu_vm_pde_update(&params, entry);
996 		if (r)
997 			goto error;
998 	}
999 
1000 	r = vm->update_funcs->commit(&params, &vm->last_update);
1001 	if (r)
1002 		goto error;
1003 
1004 	if (flush_tlb_needed)
1005 		atomic64_inc(&vm->tlb_seq);
1006 
1007 	while (!list_empty(&relocated)) {
1008 		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
1009 					 vm_status);
1010 		amdgpu_vm_bo_idle(entry);
1011 	}
1012 
1013 error:
1014 	drm_dev_exit(idx);
1015 	return r;
1016 }
1017 
1018 /**
1019  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
1020  * @fence: unused
1021  * @cb: the callback structure
1022  *
1023  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1024  */
1025 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
1026 				 struct dma_fence_cb *cb)
1027 {
1028 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1029 
1030 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
1031 	atomic64_inc(&tlb_cb->vm->tlb_seq);
1032 	kfree(tlb_cb);
1033 }
1034 
1035 /**
1036  * amdgpu_vm_tlb_flush - prepare TLB flush
1037  *
1038  * @params: parameters for update
1039  * @fence: input fence to sync TLB flush with
1040  * @tlb_cb: the callback structure
1041  *
1042  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1043  */
1044 static void
1045 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
1046 		    struct dma_fence **fence,
1047 		    struct amdgpu_vm_tlb_seq_struct *tlb_cb)
1048 {
1049 	struct amdgpu_vm *vm = params->vm;
1050 
1051 	tlb_cb->vm = vm;
1052 	if (!fence || !*fence) {
1053 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1054 		return;
1055 	}
1056 
1057 	if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
1058 				    amdgpu_vm_tlb_seq_cb)) {
1059 		dma_fence_put(vm->last_tlb_flush);
1060 		vm->last_tlb_flush = dma_fence_get(*fence);
1061 	} else {
1062 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1063 	}
1064 
1065 	/* Prepare a TLB flush fence to be attached to PTs */
1066 	if (!params->unlocked && vm->is_compute_context) {
1067 		amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
1068 
1069 		/* Makes sure no PD/PT is freed before the flush */
1070 		dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
1071 				   DMA_RESV_USAGE_BOOKKEEP);
1072 	}
1073 }
1074 
1075 /**
1076  * amdgpu_vm_update_range - update a range in the vm page table
1077  *
1078  * @adev: amdgpu_device pointer to use for commands
1079  * @vm: the VM to update the range
1080  * @immediate: immediate submission in a page fault
1081  * @unlocked: unlocked invalidation during MM callback
1082  * @flush_tlb: trigger tlb invalidation after update completed
1083  * @allow_override: change MTYPE for local NUMA nodes
1084  * @sync: fences we need to sync to
1085  * @start: start of mapped range
1086  * @last: last mapped entry
1087  * @flags: flags for the entries
1088  * @offset: offset into nodes and pages_addr
1089  * @vram_base: base for vram mappings
1090  * @res: ttm_resource to map
1091  * @pages_addr: DMA addresses to use for mapping
1092  * @fence: optional resulting fence
1093  *
1094  * Fill in the page table entries between @start and @last.
1095  *
1096  * Returns:
1097  * 0 for success, negative erro code for failure.
1098  */
1099 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1100 			   bool immediate, bool unlocked, bool flush_tlb,
1101 			   bool allow_override, struct amdgpu_sync *sync,
1102 			   uint64_t start, uint64_t last, uint64_t flags,
1103 			   uint64_t offset, uint64_t vram_base,
1104 			   struct ttm_resource *res, dma_addr_t *pages_addr,
1105 			   struct dma_fence **fence)
1106 {
1107 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1108 	struct amdgpu_vm_update_params params;
1109 	struct amdgpu_res_cursor cursor;
1110 	int r, idx;
1111 
1112 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
1113 		return -ENODEV;
1114 
1115 	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
1116 	if (!tlb_cb) {
1117 		drm_dev_exit(idx);
1118 		return -ENOMEM;
1119 	}
1120 
1121 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
1122 	 * heavy-weight flush TLB unconditionally.
1123 	 */
1124 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
1125 		     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
1126 
1127 	/*
1128 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
1129 	 */
1130 	flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
1131 
1132 	memset(&params, 0, sizeof(params));
1133 	params.adev = adev;
1134 	params.vm = vm;
1135 	params.immediate = immediate;
1136 	params.pages_addr = pages_addr;
1137 	params.unlocked = unlocked;
1138 	params.needs_flush = flush_tlb;
1139 	params.allow_override = allow_override;
1140 	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
1141 
1142 	amdgpu_vm_eviction_lock(vm);
1143 	if (vm->evicting) {
1144 		r = -EBUSY;
1145 		goto error_free;
1146 	}
1147 
1148 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1149 		struct dma_fence *tmp = dma_fence_get_stub();
1150 
1151 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1152 		swap(vm->last_unlocked, tmp);
1153 		dma_fence_put(tmp);
1154 	}
1155 
1156 	r = vm->update_funcs->prepare(&params, sync,
1157 				      AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
1158 	if (r)
1159 		goto error_free;
1160 
1161 	amdgpu_res_first(pages_addr ? NULL : res, offset,
1162 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1163 	while (cursor.remaining) {
1164 		uint64_t tmp, num_entries, addr;
1165 
1166 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1167 		if (pages_addr) {
1168 			bool contiguous = true;
1169 
1170 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1171 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
1172 				uint64_t count;
1173 
1174 				contiguous = pages_addr[pfn + 1] ==
1175 					pages_addr[pfn] + PAGE_SIZE;
1176 
1177 				tmp = num_entries /
1178 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1179 				for (count = 2; count < tmp; ++count) {
1180 					uint64_t idx = pfn + count;
1181 
1182 					if (contiguous != (pages_addr[idx] ==
1183 					    pages_addr[idx - 1] + PAGE_SIZE))
1184 						break;
1185 				}
1186 				if (!contiguous)
1187 					count--;
1188 				num_entries = count *
1189 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1190 			}
1191 
1192 			if (!contiguous) {
1193 				addr = cursor.start;
1194 				params.pages_addr = pages_addr;
1195 			} else {
1196 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1197 				params.pages_addr = NULL;
1198 			}
1199 
1200 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1201 			addr = vram_base + cursor.start;
1202 		} else {
1203 			addr = 0;
1204 		}
1205 
1206 		tmp = start + num_entries;
1207 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1208 		if (r)
1209 			goto error_free;
1210 
1211 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1212 		start = tmp;
1213 	}
1214 
1215 	r = vm->update_funcs->commit(&params, fence);
1216 	if (r)
1217 		goto error_free;
1218 
1219 	if (params.needs_flush) {
1220 		amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
1221 		tlb_cb = NULL;
1222 	}
1223 
1224 	amdgpu_vm_pt_free_list(adev, &params);
1225 
1226 error_free:
1227 	kfree(tlb_cb);
1228 	amdgpu_vm_eviction_unlock(vm);
1229 	drm_dev_exit(idx);
1230 	return r;
1231 }
1232 
1233 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1234 			  struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
1235 {
1236 	spin_lock(&vm->status_lock);
1237 	memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
1238 	spin_unlock(&vm->status_lock);
1239 }
1240 
1241 /**
1242  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1243  *
1244  * @adev: amdgpu_device pointer
1245  * @bo_va: requested BO and VM object
1246  * @clear: if true clear the entries
1247  *
1248  * Fill in the page table entries for @bo_va.
1249  *
1250  * Returns:
1251  * 0 for success, -EINVAL for failure.
1252  */
1253 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1254 			bool clear)
1255 {
1256 	struct amdgpu_bo *bo = bo_va->base.bo;
1257 	struct amdgpu_vm *vm = bo_va->base.vm;
1258 	struct amdgpu_bo_va_mapping *mapping;
1259 	struct dma_fence **last_update;
1260 	dma_addr_t *pages_addr = NULL;
1261 	struct ttm_resource *mem;
1262 	struct amdgpu_sync sync;
1263 	bool flush_tlb = clear;
1264 	uint64_t vram_base;
1265 	uint64_t flags;
1266 	bool uncached;
1267 	int r;
1268 
1269 	amdgpu_sync_create(&sync);
1270 	if (clear) {
1271 		mem = NULL;
1272 
1273 		/* Implicitly sync to command submissions in the same VM before
1274 		 * unmapping.
1275 		 */
1276 		r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1277 				     AMDGPU_SYNC_EQ_OWNER, vm);
1278 		if (r)
1279 			goto error_free;
1280 		if (bo) {
1281 			r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
1282 			if (r)
1283 				goto error_free;
1284 		}
1285 	} else if (!bo) {
1286 		mem = NULL;
1287 
1288 		/* PRT map operations don't need to sync to anything. */
1289 
1290 	} else {
1291 		struct drm_gem_object *obj = &bo->tbo.base;
1292 
1293 		if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
1294 			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1295 			struct drm_gem_object *gobj = dma_buf->priv;
1296 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1297 
1298 			if (abo->tbo.resource &&
1299 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1300 				bo = gem_to_amdgpu_bo(gobj);
1301 		}
1302 		mem = bo->tbo.resource;
1303 		if (mem && (mem->mem_type == TTM_PL_TT ||
1304 			    mem->mem_type == AMDGPU_PL_PREEMPT))
1305 			pages_addr = bo->tbo.ttm->dma_address;
1306 
1307 		/* Implicitly sync to moving fences before mapping anything */
1308 		r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1309 				     AMDGPU_SYNC_EXPLICIT, vm);
1310 		if (r)
1311 			goto error_free;
1312 	}
1313 
1314 	if (bo) {
1315 		struct amdgpu_device *bo_adev;
1316 
1317 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1318 
1319 		if (amdgpu_bo_encrypted(bo))
1320 			flags |= AMDGPU_PTE_TMZ;
1321 
1322 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1323 		vram_base = bo_adev->vm_manager.vram_base_offset;
1324 		uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1325 	} else {
1326 		flags = 0x0;
1327 		vram_base = 0;
1328 		uncached = false;
1329 	}
1330 
1331 	if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1332 		last_update = &vm->last_update;
1333 	else
1334 		last_update = &bo_va->last_pt_update;
1335 
1336 	if (!clear && bo_va->base.moved) {
1337 		flush_tlb = true;
1338 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1339 
1340 	} else if (bo_va->cleared != clear) {
1341 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1342 	}
1343 
1344 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1345 		uint64_t update_flags = flags;
1346 
1347 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1348 		 * but in case of something, we filter the flags in first place
1349 		 */
1350 		if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
1351 			update_flags &= ~AMDGPU_PTE_READABLE;
1352 		if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
1353 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1354 
1355 		/* Apply ASIC specific mapping flags */
1356 		amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
1357 				      &update_flags);
1358 
1359 		trace_amdgpu_vm_bo_update(mapping);
1360 
1361 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1362 					   !uncached, &sync, mapping->start,
1363 					   mapping->last, update_flags,
1364 					   mapping->offset, vram_base, mem,
1365 					   pages_addr, last_update);
1366 		if (r)
1367 			goto error_free;
1368 	}
1369 
1370 	/* If the BO is not in its preferred location add it back to
1371 	 * the evicted list so that it gets validated again on the
1372 	 * next command submission.
1373 	 */
1374 	if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1375 		if (bo->tbo.resource &&
1376 		    !(bo->preferred_domains &
1377 		      amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1378 			amdgpu_vm_bo_evicted(&bo_va->base);
1379 		else
1380 			amdgpu_vm_bo_idle(&bo_va->base);
1381 	} else {
1382 		amdgpu_vm_bo_done(&bo_va->base);
1383 	}
1384 
1385 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1386 	bo_va->cleared = clear;
1387 	bo_va->base.moved = false;
1388 
1389 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1390 		list_for_each_entry(mapping, &bo_va->valids, list)
1391 			trace_amdgpu_vm_bo_mapping(mapping);
1392 	}
1393 
1394 error_free:
1395 	amdgpu_sync_free(&sync);
1396 	return r;
1397 }
1398 
1399 /**
1400  * amdgpu_vm_update_prt_state - update the global PRT state
1401  *
1402  * @adev: amdgpu_device pointer
1403  */
1404 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1405 {
1406 	unsigned long flags;
1407 	bool enable;
1408 
1409 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1410 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1411 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1412 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1413 }
1414 
1415 /**
1416  * amdgpu_vm_prt_get - add a PRT user
1417  *
1418  * @adev: amdgpu_device pointer
1419  */
1420 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1421 {
1422 	if (!adev->gmc.gmc_funcs->set_prt)
1423 		return;
1424 
1425 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1426 		amdgpu_vm_update_prt_state(adev);
1427 }
1428 
1429 /**
1430  * amdgpu_vm_prt_put - drop a PRT user
1431  *
1432  * @adev: amdgpu_device pointer
1433  */
1434 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1435 {
1436 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1437 		amdgpu_vm_update_prt_state(adev);
1438 }
1439 
1440 /**
1441  * amdgpu_vm_prt_cb - callback for updating the PRT status
1442  *
1443  * @fence: fence for the callback
1444  * @_cb: the callback function
1445  */
1446 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1447 {
1448 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1449 
1450 	amdgpu_vm_prt_put(cb->adev);
1451 	kfree(cb);
1452 }
1453 
1454 /**
1455  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1456  *
1457  * @adev: amdgpu_device pointer
1458  * @fence: fence for the callback
1459  */
1460 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1461 				 struct dma_fence *fence)
1462 {
1463 	struct amdgpu_prt_cb *cb;
1464 
1465 	if (!adev->gmc.gmc_funcs->set_prt)
1466 		return;
1467 
1468 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1469 	if (!cb) {
1470 		/* Last resort when we are OOM */
1471 		if (fence)
1472 			dma_fence_wait(fence, false);
1473 
1474 		amdgpu_vm_prt_put(adev);
1475 	} else {
1476 		cb->adev = adev;
1477 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1478 						     amdgpu_vm_prt_cb))
1479 			amdgpu_vm_prt_cb(fence, &cb->cb);
1480 	}
1481 }
1482 
1483 /**
1484  * amdgpu_vm_free_mapping - free a mapping
1485  *
1486  * @adev: amdgpu_device pointer
1487  * @vm: requested vm
1488  * @mapping: mapping to be freed
1489  * @fence: fence of the unmap operation
1490  *
1491  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1492  */
1493 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1494 				   struct amdgpu_vm *vm,
1495 				   struct amdgpu_bo_va_mapping *mapping,
1496 				   struct dma_fence *fence)
1497 {
1498 	if (mapping->flags & AMDGPU_VM_PAGE_PRT)
1499 		amdgpu_vm_add_prt_cb(adev, fence);
1500 	kfree(mapping);
1501 }
1502 
1503 /**
1504  * amdgpu_vm_prt_fini - finish all prt mappings
1505  *
1506  * @adev: amdgpu_device pointer
1507  * @vm: requested vm
1508  *
1509  * Register a cleanup callback to disable PRT support after VM dies.
1510  */
1511 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1512 {
1513 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1514 	struct dma_resv_iter cursor;
1515 	struct dma_fence *fence;
1516 
1517 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1518 		/* Add a callback for each fence in the reservation object */
1519 		amdgpu_vm_prt_get(adev);
1520 		amdgpu_vm_add_prt_cb(adev, fence);
1521 	}
1522 }
1523 
1524 /**
1525  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1526  *
1527  * @adev: amdgpu_device pointer
1528  * @vm: requested vm
1529  * @fence: optional resulting fence (unchanged if no work needed to be done
1530  * or if an error occurred)
1531  *
1532  * Make sure all freed BOs are cleared in the PT.
1533  * PTs have to be reserved and mutex must be locked!
1534  *
1535  * Returns:
1536  * 0 for success.
1537  *
1538  */
1539 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1540 			  struct amdgpu_vm *vm,
1541 			  struct dma_fence **fence)
1542 {
1543 	struct amdgpu_bo_va_mapping *mapping;
1544 	struct dma_fence *f = NULL;
1545 	struct amdgpu_sync sync;
1546 	int r;
1547 
1548 
1549 	/*
1550 	 * Implicitly sync to command submissions in the same VM before
1551 	 * unmapping.
1552 	 */
1553 	amdgpu_sync_create(&sync);
1554 	r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1555 			     AMDGPU_SYNC_EQ_OWNER, vm);
1556 	if (r)
1557 		goto error_free;
1558 
1559 	while (!list_empty(&vm->freed)) {
1560 		mapping = list_first_entry(&vm->freed,
1561 			struct amdgpu_bo_va_mapping, list);
1562 		list_del(&mapping->list);
1563 
1564 		r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1565 					   &sync, mapping->start, mapping->last,
1566 					   0, 0, 0, NULL, NULL, &f);
1567 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1568 		if (r) {
1569 			dma_fence_put(f);
1570 			goto error_free;
1571 		}
1572 	}
1573 
1574 	if (fence && f) {
1575 		dma_fence_put(*fence);
1576 		*fence = f;
1577 	} else {
1578 		dma_fence_put(f);
1579 	}
1580 
1581 error_free:
1582 	amdgpu_sync_free(&sync);
1583 	return r;
1584 
1585 }
1586 
1587 /**
1588  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1589  *
1590  * @adev: amdgpu_device pointer
1591  * @vm: requested vm
1592  * @ticket: optional reservation ticket used to reserve the VM
1593  *
1594  * Make sure all BOs which are moved are updated in the PTs.
1595  *
1596  * Returns:
1597  * 0 for success.
1598  *
1599  * PTs have to be reserved!
1600  */
1601 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1602 			   struct amdgpu_vm *vm,
1603 			   struct ww_acquire_ctx *ticket)
1604 {
1605 	struct amdgpu_bo_va *bo_va;
1606 	struct dma_resv *resv;
1607 	bool clear, unlock;
1608 	int r;
1609 
1610 	spin_lock(&vm->status_lock);
1611 	while (!list_empty(&vm->moved)) {
1612 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1613 					 base.vm_status);
1614 		spin_unlock(&vm->status_lock);
1615 
1616 		/* Per VM BOs never need to bo cleared in the page tables */
1617 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1618 		if (r)
1619 			return r;
1620 		spin_lock(&vm->status_lock);
1621 	}
1622 
1623 	while (!list_empty(&vm->invalidated)) {
1624 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1625 					 base.vm_status);
1626 		resv = bo_va->base.bo->tbo.base.resv;
1627 		spin_unlock(&vm->status_lock);
1628 
1629 		/* Try to reserve the BO to avoid clearing its ptes */
1630 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
1631 			clear = false;
1632 			unlock = true;
1633 		/* The caller is already holding the reservation lock */
1634 		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1635 			clear = false;
1636 			unlock = false;
1637 		/* Somebody else is using the BO right now */
1638 		} else {
1639 			clear = true;
1640 			unlock = false;
1641 		}
1642 
1643 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1644 
1645 		if (unlock)
1646 			dma_resv_unlock(resv);
1647 		if (r)
1648 			return r;
1649 
1650 		/* Remember evicted DMABuf imports in compute VMs for later
1651 		 * validation
1652 		 */
1653 		if (vm->is_compute_context &&
1654 		    drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
1655 		    (!bo_va->base.bo->tbo.resource ||
1656 		     bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1657 			amdgpu_vm_bo_evicted_user(&bo_va->base);
1658 
1659 		spin_lock(&vm->status_lock);
1660 	}
1661 	spin_unlock(&vm->status_lock);
1662 
1663 	return 0;
1664 }
1665 
1666 /**
1667  * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1668  *
1669  * @adev: amdgpu_device pointer
1670  * @vm: requested vm
1671  * @flush_type: flush type
1672  * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1673  *
1674  * Flush TLB if needed for a compute VM.
1675  *
1676  * Returns:
1677  * 0 for success.
1678  */
1679 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1680 				struct amdgpu_vm *vm,
1681 				uint32_t flush_type,
1682 				uint32_t xcc_mask)
1683 {
1684 	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1685 	bool all_hub = false;
1686 	int xcc = 0, r = 0;
1687 
1688 	WARN_ON_ONCE(!vm->is_compute_context);
1689 
1690 	/*
1691 	 * It can be that we race and lose here, but that is extremely unlikely
1692 	 * and the worst thing which could happen is that we flush the changes
1693 	 * into the TLB once more which is harmless.
1694 	 */
1695 	if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1696 		return 0;
1697 
1698 	if (adev->family == AMDGPU_FAMILY_AI ||
1699 	    adev->family == AMDGPU_FAMILY_RV)
1700 		all_hub = true;
1701 
1702 	for_each_inst(xcc, xcc_mask) {
1703 		r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1704 						   all_hub, xcc);
1705 		if (r)
1706 			break;
1707 	}
1708 	return r;
1709 }
1710 
1711 /**
1712  * amdgpu_vm_bo_add - add a bo to a specific vm
1713  *
1714  * @adev: amdgpu_device pointer
1715  * @vm: requested vm
1716  * @bo: amdgpu buffer object
1717  *
1718  * Add @bo into the requested vm.
1719  * Add @bo to the list of bos associated with the vm
1720  *
1721  * Returns:
1722  * Newly added bo_va or NULL for failure
1723  *
1724  * Object has to be reserved!
1725  */
1726 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1727 				      struct amdgpu_vm *vm,
1728 				      struct amdgpu_bo *bo)
1729 {
1730 	struct amdgpu_bo_va *bo_va;
1731 
1732 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1733 	if (bo_va == NULL) {
1734 		return NULL;
1735 	}
1736 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1737 
1738 	bo_va->ref_count = 1;
1739 	bo_va->last_pt_update = dma_fence_get_stub();
1740 	INIT_LIST_HEAD(&bo_va->valids);
1741 	INIT_LIST_HEAD(&bo_va->invalids);
1742 
1743 	if (!bo)
1744 		return bo_va;
1745 
1746 	dma_resv_assert_held(bo->tbo.base.resv);
1747 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1748 		bo_va->is_xgmi = true;
1749 		/* Power up XGMI if it can be potentially used */
1750 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1751 	}
1752 
1753 	return bo_va;
1754 }
1755 
1756 
1757 /**
1758  * amdgpu_vm_bo_insert_map - insert a new mapping
1759  *
1760  * @adev: amdgpu_device pointer
1761  * @bo_va: bo_va to store the address
1762  * @mapping: the mapping to insert
1763  *
1764  * Insert a new mapping into all structures.
1765  */
1766 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1767 				    struct amdgpu_bo_va *bo_va,
1768 				    struct amdgpu_bo_va_mapping *mapping)
1769 {
1770 	struct amdgpu_vm *vm = bo_va->base.vm;
1771 	struct amdgpu_bo *bo = bo_va->base.bo;
1772 
1773 	mapping->bo_va = bo_va;
1774 	list_add(&mapping->list, &bo_va->invalids);
1775 	amdgpu_vm_it_insert(mapping, &vm->va);
1776 
1777 	if (mapping->flags & AMDGPU_VM_PAGE_PRT)
1778 		amdgpu_vm_prt_get(adev);
1779 
1780 	if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1781 		amdgpu_vm_bo_moved(&bo_va->base);
1782 
1783 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1784 }
1785 
1786 /* Validate operation parameters to prevent potential abuse */
1787 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1788 					  struct amdgpu_bo *bo,
1789 					  uint64_t saddr,
1790 					  uint64_t offset,
1791 					  uint64_t size)
1792 {
1793 	uint64_t tmp, lpfn;
1794 
1795 	if (saddr & AMDGPU_GPU_PAGE_MASK
1796 	    || offset & AMDGPU_GPU_PAGE_MASK
1797 	    || size & AMDGPU_GPU_PAGE_MASK)
1798 		return -EINVAL;
1799 
1800 	if (check_add_overflow(saddr, size, &tmp)
1801 	    || check_add_overflow(offset, size, &tmp)
1802 	    || size == 0 /* which also leads to end < begin */)
1803 		return -EINVAL;
1804 
1805 	/* make sure object fit at this offset */
1806 	if (bo && offset + size > amdgpu_bo_size(bo))
1807 		return -EINVAL;
1808 
1809 	/* Ensure last pfn not exceed max_pfn */
1810 	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1811 	if (lpfn >= adev->vm_manager.max_pfn)
1812 		return -EINVAL;
1813 
1814 	return 0;
1815 }
1816 
1817 /**
1818  * amdgpu_vm_bo_map - map bo inside a vm
1819  *
1820  * @adev: amdgpu_device pointer
1821  * @bo_va: bo_va to store the address
1822  * @saddr: where to map the BO
1823  * @offset: requested offset in the BO
1824  * @size: BO size in bytes
1825  * @flags: attributes of pages (read/write/valid/etc.)
1826  *
1827  * Add a mapping of the BO at the specefied addr into the VM.
1828  *
1829  * Returns:
1830  * 0 for success, error for failure.
1831  *
1832  * Object has to be reserved and unreserved outside!
1833  */
1834 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1835 		     struct amdgpu_bo_va *bo_va,
1836 		     uint64_t saddr, uint64_t offset,
1837 		     uint64_t size, uint32_t flags)
1838 {
1839 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1840 	struct amdgpu_bo *bo = bo_va->base.bo;
1841 	struct amdgpu_vm *vm = bo_va->base.vm;
1842 	uint64_t eaddr;
1843 	int r;
1844 
1845 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1846 	if (r)
1847 		return r;
1848 
1849 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1850 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1851 
1852 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1853 	if (tmp) {
1854 		/* bo and tmp overlap, invalid addr */
1855 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1856 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1857 			tmp->start, tmp->last + 1);
1858 		return -EINVAL;
1859 	}
1860 
1861 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1862 	if (!mapping)
1863 		return -ENOMEM;
1864 
1865 	mapping->start = saddr;
1866 	mapping->last = eaddr;
1867 	mapping->offset = offset;
1868 	mapping->flags = flags;
1869 
1870 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1871 
1872 	return 0;
1873 }
1874 
1875 /**
1876  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1877  *
1878  * @adev: amdgpu_device pointer
1879  * @bo_va: bo_va to store the address
1880  * @saddr: where to map the BO
1881  * @offset: requested offset in the BO
1882  * @size: BO size in bytes
1883  * @flags: attributes of pages (read/write/valid/etc.)
1884  *
1885  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1886  * mappings as we do so.
1887  *
1888  * Returns:
1889  * 0 for success, error for failure.
1890  *
1891  * Object has to be reserved and unreserved outside!
1892  */
1893 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1894 			     struct amdgpu_bo_va *bo_va,
1895 			     uint64_t saddr, uint64_t offset,
1896 			     uint64_t size, uint32_t flags)
1897 {
1898 	struct amdgpu_bo_va_mapping *mapping;
1899 	struct amdgpu_bo *bo = bo_va->base.bo;
1900 	uint64_t eaddr;
1901 	int r;
1902 
1903 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1904 	if (r)
1905 		return r;
1906 
1907 	/* Allocate all the needed memory */
1908 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1909 	if (!mapping)
1910 		return -ENOMEM;
1911 
1912 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1913 	if (r) {
1914 		kfree(mapping);
1915 		return r;
1916 	}
1917 
1918 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1919 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1920 
1921 	mapping->start = saddr;
1922 	mapping->last = eaddr;
1923 	mapping->offset = offset;
1924 	mapping->flags = flags;
1925 
1926 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1927 
1928 	return 0;
1929 }
1930 
1931 /**
1932  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1933  *
1934  * @adev: amdgpu_device pointer
1935  * @bo_va: bo_va to remove the address from
1936  * @saddr: where to the BO is mapped
1937  *
1938  * Remove a mapping of the BO at the specefied addr from the VM.
1939  *
1940  * Returns:
1941  * 0 for success, error for failure.
1942  *
1943  * Object has to be reserved and unreserved outside!
1944  */
1945 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1946 		       struct amdgpu_bo_va *bo_va,
1947 		       uint64_t saddr)
1948 {
1949 	struct amdgpu_bo_va_mapping *mapping;
1950 	struct amdgpu_vm *vm = bo_va->base.vm;
1951 	bool valid = true;
1952 	int r;
1953 
1954 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1955 
1956 	list_for_each_entry(mapping, &bo_va->valids, list) {
1957 		if (mapping->start == saddr)
1958 			break;
1959 	}
1960 
1961 	if (&mapping->list == &bo_va->valids) {
1962 		valid = false;
1963 
1964 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1965 			if (mapping->start == saddr)
1966 				break;
1967 		}
1968 
1969 		if (&mapping->list == &bo_va->invalids)
1970 			return -ENOENT;
1971 	}
1972 
1973 	/* It's unlikely to happen that the mapping userq hasn't been idled
1974 	 * during user requests GEM unmap IOCTL except for forcing the unmap
1975 	 * from user space.
1976 	 */
1977 	if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) {
1978 		r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
1979 		if (unlikely(r == -EBUSY))
1980 			dev_warn_once(adev->dev,
1981 				      "Attempt to unmap an active userq buffer\n");
1982 	}
1983 
1984 	list_del(&mapping->list);
1985 	amdgpu_vm_it_remove(mapping, &vm->va);
1986 	mapping->bo_va = NULL;
1987 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1988 
1989 	if (valid)
1990 		list_add(&mapping->list, &vm->freed);
1991 	else
1992 		amdgpu_vm_free_mapping(adev, vm, mapping,
1993 				       bo_va->last_pt_update);
1994 
1995 	return 0;
1996 }
1997 
1998 /**
1999  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2000  *
2001  * @adev: amdgpu_device pointer
2002  * @vm: VM structure to use
2003  * @saddr: start of the range
2004  * @size: size of the range
2005  *
2006  * Remove all mappings in a range, split them as appropriate.
2007  *
2008  * Returns:
2009  * 0 for success, error for failure.
2010  */
2011 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2012 				struct amdgpu_vm *vm,
2013 				uint64_t saddr, uint64_t size)
2014 {
2015 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2016 	LIST_HEAD(removed);
2017 	uint64_t eaddr;
2018 	int r;
2019 
2020 	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
2021 	if (r)
2022 		return r;
2023 
2024 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2025 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
2026 
2027 	/* Allocate all the needed memory */
2028 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2029 	if (!before)
2030 		return -ENOMEM;
2031 	INIT_LIST_HEAD(&before->list);
2032 
2033 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2034 	if (!after) {
2035 		kfree(before);
2036 		return -ENOMEM;
2037 	}
2038 	INIT_LIST_HEAD(&after->list);
2039 
2040 	/* Now gather all removed mappings */
2041 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2042 	while (tmp) {
2043 		/* Remember mapping split at the start */
2044 		if (tmp->start < saddr) {
2045 			before->start = tmp->start;
2046 			before->last = saddr - 1;
2047 			before->offset = tmp->offset;
2048 			before->flags = tmp->flags;
2049 			before->bo_va = tmp->bo_va;
2050 			list_add(&before->list, &tmp->bo_va->invalids);
2051 		}
2052 
2053 		/* Remember mapping split at the end */
2054 		if (tmp->last > eaddr) {
2055 			after->start = eaddr + 1;
2056 			after->last = tmp->last;
2057 			after->offset = tmp->offset;
2058 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2059 			after->flags = tmp->flags;
2060 			after->bo_va = tmp->bo_va;
2061 			list_add(&after->list, &tmp->bo_va->invalids);
2062 		}
2063 
2064 		list_del(&tmp->list);
2065 		list_add(&tmp->list, &removed);
2066 
2067 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2068 	}
2069 
2070 	/* And free them up */
2071 	list_for_each_entry_safe(tmp, next, &removed, list) {
2072 		amdgpu_vm_it_remove(tmp, &vm->va);
2073 		list_del(&tmp->list);
2074 
2075 		if (tmp->start < saddr)
2076 		    tmp->start = saddr;
2077 		if (tmp->last > eaddr)
2078 		    tmp->last = eaddr;
2079 
2080 		tmp->bo_va = NULL;
2081 		list_add(&tmp->list, &vm->freed);
2082 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2083 	}
2084 
2085 	/* Insert partial mapping before the range */
2086 	if (!list_empty(&before->list)) {
2087 		struct amdgpu_bo *bo = before->bo_va->base.bo;
2088 
2089 		amdgpu_vm_it_insert(before, &vm->va);
2090 		if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
2091 			amdgpu_vm_prt_get(adev);
2092 
2093 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2094 		    !before->bo_va->base.moved)
2095 			amdgpu_vm_bo_moved(&before->bo_va->base);
2096 	} else {
2097 		kfree(before);
2098 	}
2099 
2100 	/* Insert partial mapping after the range */
2101 	if (!list_empty(&after->list)) {
2102 		struct amdgpu_bo *bo = after->bo_va->base.bo;
2103 
2104 		amdgpu_vm_it_insert(after, &vm->va);
2105 		if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
2106 			amdgpu_vm_prt_get(adev);
2107 
2108 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2109 		    !after->bo_va->base.moved)
2110 			amdgpu_vm_bo_moved(&after->bo_va->base);
2111 	} else {
2112 		kfree(after);
2113 	}
2114 
2115 	return 0;
2116 }
2117 
2118 /**
2119  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2120  *
2121  * @vm: the requested VM
2122  * @addr: the address
2123  *
2124  * Find a mapping by it's address.
2125  *
2126  * Returns:
2127  * The amdgpu_bo_va_mapping matching for addr or NULL
2128  *
2129  */
2130 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2131 							 uint64_t addr)
2132 {
2133 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2134 }
2135 
2136 /**
2137  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2138  *
2139  * @vm: the requested vm
2140  * @ticket: CS ticket
2141  *
2142  * Trace all mappings of BOs reserved during a command submission.
2143  */
2144 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2145 {
2146 	struct amdgpu_bo_va_mapping *mapping;
2147 
2148 	if (!trace_amdgpu_vm_bo_cs_enabled())
2149 		return;
2150 
2151 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2152 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2153 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2154 			struct amdgpu_bo *bo;
2155 
2156 			bo = mapping->bo_va->base.bo;
2157 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2158 			    ticket)
2159 				continue;
2160 		}
2161 
2162 		trace_amdgpu_vm_bo_cs(mapping);
2163 	}
2164 }
2165 
2166 /**
2167  * amdgpu_vm_bo_del - remove a bo from a specific vm
2168  *
2169  * @adev: amdgpu_device pointer
2170  * @bo_va: requested bo_va
2171  *
2172  * Remove @bo_va->bo from the requested vm.
2173  *
2174  * Object have to be reserved!
2175  */
2176 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2177 		      struct amdgpu_bo_va *bo_va)
2178 {
2179 	struct amdgpu_bo_va_mapping *mapping, *next;
2180 	struct amdgpu_bo *bo = bo_va->base.bo;
2181 	struct amdgpu_vm *vm = bo_va->base.vm;
2182 	struct amdgpu_vm_bo_base **base;
2183 
2184 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2185 
2186 	if (bo) {
2187 		dma_resv_assert_held(bo->tbo.base.resv);
2188 		if (amdgpu_vm_is_bo_always_valid(vm, bo))
2189 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
2190 
2191 		for (base = &bo_va->base.bo->vm_bo; *base;
2192 		     base = &(*base)->next) {
2193 			if (*base != &bo_va->base)
2194 				continue;
2195 
2196 			amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
2197 			*base = bo_va->base.next;
2198 			break;
2199 		}
2200 	}
2201 
2202 	spin_lock(&vm->status_lock);
2203 	list_del(&bo_va->base.vm_status);
2204 	spin_unlock(&vm->status_lock);
2205 
2206 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2207 		list_del(&mapping->list);
2208 		amdgpu_vm_it_remove(mapping, &vm->va);
2209 		mapping->bo_va = NULL;
2210 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2211 		list_add(&mapping->list, &vm->freed);
2212 	}
2213 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2214 		list_del(&mapping->list);
2215 		amdgpu_vm_it_remove(mapping, &vm->va);
2216 		amdgpu_vm_free_mapping(adev, vm, mapping,
2217 				       bo_va->last_pt_update);
2218 	}
2219 
2220 	dma_fence_put(bo_va->last_pt_update);
2221 
2222 	if (bo && bo_va->is_xgmi)
2223 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2224 
2225 	kfree(bo_va);
2226 }
2227 
2228 /**
2229  * amdgpu_vm_evictable - check if we can evict a VM
2230  *
2231  * @bo: A page table of the VM.
2232  *
2233  * Check if it is possible to evict a VM.
2234  */
2235 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2236 {
2237 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2238 
2239 	/* Page tables of a destroyed VM can go away immediately */
2240 	if (!bo_base || !bo_base->vm)
2241 		return true;
2242 
2243 	/* Don't evict VM page tables while they are busy */
2244 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2245 		return false;
2246 
2247 	/* Try to block ongoing updates */
2248 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2249 		return false;
2250 
2251 	/* Don't evict VM page tables while they are updated */
2252 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2253 		amdgpu_vm_eviction_unlock(bo_base->vm);
2254 		return false;
2255 	}
2256 
2257 	bo_base->vm->evicting = true;
2258 	amdgpu_vm_eviction_unlock(bo_base->vm);
2259 	return true;
2260 }
2261 
2262 /**
2263  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2264  *
2265  * @bo: amdgpu buffer object
2266  * @evicted: is the BO evicted
2267  *
2268  * Mark @bo as invalid.
2269  */
2270 void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
2271 {
2272 	struct amdgpu_vm_bo_base *bo_base;
2273 
2274 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2275 		struct amdgpu_vm *vm = bo_base->vm;
2276 
2277 		if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2278 			amdgpu_vm_bo_evicted(bo_base);
2279 			continue;
2280 		}
2281 
2282 		if (bo_base->moved)
2283 			continue;
2284 		bo_base->moved = true;
2285 
2286 		if (bo->tbo.type == ttm_bo_type_kernel)
2287 			amdgpu_vm_bo_relocated(bo_base);
2288 		else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2289 			amdgpu_vm_bo_moved(bo_base);
2290 		else
2291 			amdgpu_vm_bo_invalidated(bo_base);
2292 	}
2293 }
2294 
2295 /**
2296  * amdgpu_vm_bo_move - handle BO move
2297  *
2298  * @bo: amdgpu buffer object
2299  * @new_mem: the new placement of the BO move
2300  * @evicted: is the BO evicted
2301  *
2302  * Update the memory stats for the new placement and mark @bo as invalid.
2303  */
2304 void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
2305 		       bool evicted)
2306 {
2307 	struct amdgpu_vm_bo_base *bo_base;
2308 
2309 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2310 		struct amdgpu_vm *vm = bo_base->vm;
2311 
2312 		spin_lock(&vm->status_lock);
2313 		amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
2314 		amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
2315 		spin_unlock(&vm->status_lock);
2316 	}
2317 
2318 	amdgpu_vm_bo_invalidate(bo, evicted);
2319 }
2320 
2321 /**
2322  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2323  *
2324  * @vm_size: VM size
2325  *
2326  * Returns:
2327  * VM page table as power of two
2328  */
2329 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2330 {
2331 	/* Total bits covered by PD + PTs */
2332 	unsigned bits = ilog2(vm_size) + 18;
2333 
2334 	/* Make sure the PD is 4K in size up to 8GB address space.
2335 	   Above that split equal between PD and PTs */
2336 	if (vm_size <= 8)
2337 		return (bits - 9);
2338 	else
2339 		return ((bits + 3) / 2);
2340 }
2341 
2342 /**
2343  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2344  *
2345  * @adev: amdgpu_device pointer
2346  * @min_vm_size: the minimum vm size in GB if it's set auto
2347  * @fragment_size_default: Default PTE fragment size
2348  * @max_level: max VMPT level
2349  * @max_bits: max address space size in bits
2350  *
2351  */
2352 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2353 			   uint32_t fragment_size_default, unsigned max_level,
2354 			   unsigned max_bits)
2355 {
2356 	unsigned int max_size = 1 << (max_bits - 30);
2357 	unsigned int vm_size;
2358 	uint64_t tmp;
2359 
2360 	/* adjust vm size first */
2361 	if (amdgpu_vm_size != -1) {
2362 		vm_size = amdgpu_vm_size;
2363 		if (vm_size > max_size) {
2364 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2365 				 amdgpu_vm_size, max_size);
2366 			vm_size = max_size;
2367 		}
2368 	} else {
2369 		struct sysinfo si;
2370 		unsigned int phys_ram_gb;
2371 
2372 		/* Optimal VM size depends on the amount of physical
2373 		 * RAM available. Underlying requirements and
2374 		 * assumptions:
2375 		 *
2376 		 *  - Need to map system memory and VRAM from all GPUs
2377 		 *     - VRAM from other GPUs not known here
2378 		 *     - Assume VRAM <= system memory
2379 		 *  - On GFX8 and older, VM space can be segmented for
2380 		 *    different MTYPEs
2381 		 *  - Need to allow room for fragmentation, guard pages etc.
2382 		 *
2383 		 * This adds up to a rough guess of system memory x3.
2384 		 * Round up to power of two to maximize the available
2385 		 * VM size with the given page table size.
2386 		 */
2387 		si_meminfo(&si);
2388 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2389 			       (1 << 30) - 1) >> 30;
2390 		vm_size = roundup_pow_of_two(
2391 			clamp(phys_ram_gb * 3, min_vm_size, max_size));
2392 	}
2393 
2394 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2395 
2396 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2397 	if (amdgpu_vm_block_size != -1)
2398 		tmp >>= amdgpu_vm_block_size - 9;
2399 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2400 	adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2401 	switch (adev->vm_manager.num_level) {
2402 	case 3:
2403 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2404 		break;
2405 	case 2:
2406 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2407 		break;
2408 	case 1:
2409 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2410 		break;
2411 	default:
2412 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2413 	}
2414 	/* block size depends on vm size and hw setup*/
2415 	if (amdgpu_vm_block_size != -1)
2416 		adev->vm_manager.block_size =
2417 			min((unsigned)amdgpu_vm_block_size, max_bits
2418 			    - AMDGPU_GPU_PAGE_SHIFT
2419 			    - 9 * adev->vm_manager.num_level);
2420 	else if (adev->vm_manager.num_level > 1)
2421 		adev->vm_manager.block_size = 9;
2422 	else
2423 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2424 
2425 	if (amdgpu_vm_fragment_size == -1)
2426 		adev->vm_manager.fragment_size = fragment_size_default;
2427 	else
2428 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2429 
2430 	dev_info(
2431 		adev->dev,
2432 		"vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2433 		vm_size, adev->vm_manager.num_level + 1,
2434 		adev->vm_manager.block_size, adev->vm_manager.fragment_size);
2435 }
2436 
2437 /**
2438  * amdgpu_vm_wait_idle - wait for the VM to become idle
2439  *
2440  * @vm: VM object to wait for
2441  * @timeout: timeout to wait for VM to become idle
2442  */
2443 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2444 {
2445 	timeout = drm_sched_entity_flush(&vm->immediate, timeout);
2446 	if (timeout <= 0)
2447 		return timeout;
2448 
2449 	return drm_sched_entity_flush(&vm->delayed, timeout);
2450 }
2451 
2452 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2453 {
2454 	struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2455 
2456 	kfree(ti);
2457 }
2458 
2459 static inline struct amdgpu_vm *
2460 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2461 {
2462 	struct amdgpu_vm *vm;
2463 	unsigned long flags;
2464 
2465 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2466 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2467 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2468 
2469 	return vm;
2470 }
2471 
2472 /**
2473  * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2474  *
2475  * @task_info: task_info struct under discussion.
2476  *
2477  * frees the vm task_info ptr at the last put
2478  */
2479 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2480 {
2481 	if (task_info)
2482 		kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2483 }
2484 
2485 /**
2486  * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2487  *
2488  * @vm: VM to get info from
2489  *
2490  * Returns the reference counted task_info structure, which must be
2491  * referenced down with amdgpu_vm_put_task_info.
2492  */
2493 struct amdgpu_task_info *
2494 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2495 {
2496 	struct amdgpu_task_info *ti = NULL;
2497 
2498 	if (vm) {
2499 		ti = vm->task_info;
2500 		kref_get(&vm->task_info->refcount);
2501 	}
2502 
2503 	return ti;
2504 }
2505 
2506 /**
2507  * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2508  *
2509  * @adev: drm device pointer
2510  * @pasid: PASID identifier for VM
2511  *
2512  * Returns the reference counted task_info structure, which must be
2513  * referenced down with amdgpu_vm_put_task_info.
2514  */
2515 struct amdgpu_task_info *
2516 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2517 {
2518 	return amdgpu_vm_get_task_info_vm(
2519 			amdgpu_vm_get_vm_from_pasid(adev, pasid));
2520 }
2521 
2522 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2523 {
2524 	vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2525 	if (!vm->task_info)
2526 		return -ENOMEM;
2527 
2528 	kref_init(&vm->task_info->refcount);
2529 	return 0;
2530 }
2531 
2532 /**
2533  * amdgpu_vm_set_task_info - Sets VMs task info.
2534  *
2535  * @vm: vm for which to set the info
2536  */
2537 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2538 {
2539 	if (!vm->task_info)
2540 		return;
2541 
2542 	if (vm->task_info->task.pid == current->pid)
2543 		return;
2544 
2545 	vm->task_info->task.pid = current->pid;
2546 	get_task_comm(vm->task_info->task.comm, current);
2547 
2548 	if (current->group_leader->mm != current->mm)
2549 		return;
2550 
2551 	vm->task_info->tgid = current->group_leader->pid;
2552 	get_task_comm(vm->task_info->process_name, current->group_leader);
2553 }
2554 
2555 /**
2556  * amdgpu_vm_init - initialize a vm instance
2557  *
2558  * @adev: amdgpu_device pointer
2559  * @vm: requested vm
2560  * @xcp_id: GPU partition selection id
2561  * @pasid: the pasid the VM is using on this GPU
2562  *
2563  * Init @vm fields.
2564  *
2565  * Returns:
2566  * 0 for success, error for failure.
2567  */
2568 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2569 		   int32_t xcp_id, uint32_t pasid)
2570 {
2571 	struct amdgpu_bo *root_bo;
2572 	struct amdgpu_bo_vm *root;
2573 	int r, i;
2574 
2575 	vm->va = RB_ROOT_CACHED;
2576 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2577 		vm->reserved_vmid[i] = NULL;
2578 	INIT_LIST_HEAD(&vm->evicted);
2579 	INIT_LIST_HEAD(&vm->evicted_user);
2580 	INIT_LIST_HEAD(&vm->relocated);
2581 	INIT_LIST_HEAD(&vm->moved);
2582 	INIT_LIST_HEAD(&vm->idle);
2583 	INIT_LIST_HEAD(&vm->invalidated);
2584 	spin_lock_init(&vm->status_lock);
2585 	INIT_LIST_HEAD(&vm->freed);
2586 	INIT_LIST_HEAD(&vm->done);
2587 	INIT_KFIFO(vm->faults);
2588 
2589 	r = amdgpu_vm_init_entities(adev, vm);
2590 	if (r)
2591 		return r;
2592 
2593 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2594 
2595 	vm->is_compute_context = false;
2596 
2597 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2598 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2599 
2600 	dev_dbg(adev->dev, "VM update mode is %s\n",
2601 		vm->use_cpu_for_update ? "CPU" : "SDMA");
2602 	WARN_ONCE((vm->use_cpu_for_update &&
2603 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2604 		  "CPU update of VM recommended only for large BAR system\n");
2605 
2606 	if (vm->use_cpu_for_update)
2607 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2608 	else
2609 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2610 
2611 	vm->last_update = dma_fence_get_stub();
2612 	vm->last_unlocked = dma_fence_get_stub();
2613 	vm->last_tlb_flush = dma_fence_get_stub();
2614 	vm->generation = amdgpu_vm_generation(adev, NULL);
2615 
2616 	mutex_init(&vm->eviction_lock);
2617 	vm->evicting = false;
2618 	vm->tlb_fence_context = dma_fence_context_alloc(1);
2619 
2620 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2621 				false, &root, xcp_id);
2622 	if (r)
2623 		goto error_free_delayed;
2624 
2625 	root_bo = amdgpu_bo_ref(&root->bo);
2626 	r = amdgpu_bo_reserve(root_bo, true);
2627 	if (r) {
2628 		amdgpu_bo_unref(&root_bo);
2629 		goto error_free_delayed;
2630 	}
2631 
2632 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2633 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2634 	if (r)
2635 		goto error_free_root;
2636 
2637 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2638 	if (r)
2639 		goto error_free_root;
2640 
2641 	r = amdgpu_vm_create_task_info(vm);
2642 	if (r)
2643 		dev_dbg(adev->dev, "Failed to create task info for VM\n");
2644 
2645 	/* Store new PASID in XArray (if non-zero) */
2646 	if (pasid != 0) {
2647 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL));
2648 		if (r < 0)
2649 			goto error_free_root;
2650 
2651 		vm->pasid = pasid;
2652 	}
2653 
2654 	amdgpu_bo_unreserve(vm->root.bo);
2655 	amdgpu_bo_unref(&root_bo);
2656 
2657 	return 0;
2658 
2659 error_free_root:
2660 	/* If PASID was partially set, erase it from XArray before failing */
2661 	if (vm->pasid != 0) {
2662 		xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
2663 		vm->pasid = 0;
2664 	}
2665 	amdgpu_vm_pt_free_root(adev, vm);
2666 	amdgpu_bo_unreserve(vm->root.bo);
2667 	amdgpu_bo_unref(&root_bo);
2668 
2669 error_free_delayed:
2670 	dma_fence_put(vm->last_tlb_flush);
2671 	dma_fence_put(vm->last_unlocked);
2672 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2673 	amdgpu_vm_fini_entities(vm);
2674 
2675 	return r;
2676 }
2677 
2678 /**
2679  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2680  *
2681  * @adev: amdgpu_device pointer
2682  * @vm: requested vm
2683  *
2684  * This only works on GFX VMs that don't have any BOs added and no
2685  * page tables allocated yet.
2686  *
2687  * Changes the following VM parameters:
2688  * - use_cpu_for_update
2689  * - pte_supports_ats
2690  *
2691  * Reinitializes the page directory to reflect the changed ATS
2692  * setting.
2693  *
2694  * Returns:
2695  * 0 for success, -errno for errors.
2696  */
2697 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2698 {
2699 	int r;
2700 
2701 	r = amdgpu_bo_reserve(vm->root.bo, true);
2702 	if (r)
2703 		return r;
2704 
2705 	/* Update VM state */
2706 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2707 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2708 	dev_dbg(adev->dev, "VM update mode is %s\n",
2709 		vm->use_cpu_for_update ? "CPU" : "SDMA");
2710 	WARN_ONCE((vm->use_cpu_for_update &&
2711 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2712 		  "CPU update of VM recommended only for large BAR system\n");
2713 
2714 	if (vm->use_cpu_for_update) {
2715 		/* Sync with last SDMA update/clear before switching to CPU */
2716 		r = amdgpu_bo_sync_wait(vm->root.bo,
2717 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2718 		if (r)
2719 			goto unreserve_bo;
2720 
2721 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2722 		r = amdgpu_vm_pt_map_tables(adev, vm);
2723 		if (r)
2724 			goto unreserve_bo;
2725 
2726 	} else {
2727 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2728 	}
2729 
2730 	dma_fence_put(vm->last_update);
2731 	vm->last_update = dma_fence_get_stub();
2732 	vm->is_compute_context = true;
2733 
2734 unreserve_bo:
2735 	amdgpu_bo_unreserve(vm->root.bo);
2736 	return r;
2737 }
2738 
2739 static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
2740 {
2741 	for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
2742 		if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
2743 		      vm->stats[i].evicted == 0))
2744 			return false;
2745 	}
2746 	return true;
2747 }
2748 
2749 /**
2750  * amdgpu_vm_fini - tear down a vm instance
2751  *
2752  * @adev: amdgpu_device pointer
2753  * @vm: requested vm
2754  *
2755  * Tear down @vm.
2756  * Unbind the VM and remove all bos from the vm bo list
2757  */
2758 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2759 {
2760 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2761 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2762 	struct amdgpu_bo *root;
2763 	unsigned long flags;
2764 	int i;
2765 
2766 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2767 
2768 	root = amdgpu_bo_ref(vm->root.bo);
2769 	amdgpu_bo_reserve(root, true);
2770 	/* Remove PASID mapping before destroying VM */
2771 	if (vm->pasid != 0) {
2772 		xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
2773 		vm->pasid = 0;
2774 	}
2775 	dma_fence_wait(vm->last_unlocked, false);
2776 	dma_fence_put(vm->last_unlocked);
2777 	dma_fence_wait(vm->last_tlb_flush, false);
2778 	/* Make sure that all fence callbacks have completed */
2779 	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2780 	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2781 	dma_fence_put(vm->last_tlb_flush);
2782 
2783 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2784 		if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
2785 			amdgpu_vm_prt_fini(adev, vm);
2786 			prt_fini_needed = false;
2787 		}
2788 
2789 		list_del(&mapping->list);
2790 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2791 	}
2792 
2793 	amdgpu_vm_pt_free_root(adev, vm);
2794 	amdgpu_bo_unreserve(root);
2795 	amdgpu_bo_unref(&root);
2796 	WARN_ON(vm->root.bo);
2797 
2798 	amdgpu_vm_fini_entities(vm);
2799 
2800 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2801 		dev_err(adev->dev, "still active bo inside vm\n");
2802 	}
2803 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2804 					     &vm->va.rb_root, rb) {
2805 		/* Don't remove the mapping here, we don't want to trigger a
2806 		 * rebalance and the tree is about to be destroyed anyway.
2807 		 */
2808 		list_del(&mapping->list);
2809 		kfree(mapping);
2810 	}
2811 
2812 	dma_fence_put(vm->last_update);
2813 
2814 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2815 		amdgpu_vmid_free_reserved(adev, vm, i);
2816 	}
2817 
2818 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2819 
2820 	if (!amdgpu_vm_stats_is_zero(vm)) {
2821 		struct amdgpu_task_info *ti = vm->task_info;
2822 
2823 		dev_warn(adev->dev,
2824 			 "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
2825 			 ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
2826 	}
2827 
2828 	amdgpu_vm_put_task_info(vm->task_info);
2829 }
2830 
2831 /**
2832  * amdgpu_vm_manager_init - init the VM manager
2833  *
2834  * @adev: amdgpu_device pointer
2835  *
2836  * Initialize the VM manager structures
2837  */
2838 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2839 {
2840 	unsigned i;
2841 
2842 	/* Concurrent flushes are only possible starting with Vega10 and
2843 	 * are broken on Navi10 and Navi14.
2844 	 */
2845 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2846 					      adev->asic_type == CHIP_NAVI10 ||
2847 					      adev->asic_type == CHIP_NAVI14);
2848 	amdgpu_vmid_mgr_init(adev);
2849 
2850 	adev->vm_manager.fence_context =
2851 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2852 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2853 		adev->vm_manager.seqno[i] = 0;
2854 
2855 	spin_lock_init(&adev->vm_manager.prt_lock);
2856 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2857 
2858 	/* If not overridden by the user, by default, only in large BAR systems
2859 	 * Compute VM tables will be updated by CPU
2860 	 */
2861 #ifdef CONFIG_X86_64
2862 	if (amdgpu_vm_update_mode == -1) {
2863 		/* For asic with VF MMIO access protection
2864 		 * avoid using CPU for VM table updates
2865 		 */
2866 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2867 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2868 			adev->vm_manager.vm_update_mode =
2869 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2870 		else
2871 			adev->vm_manager.vm_update_mode = 0;
2872 	} else
2873 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2874 #else
2875 	adev->vm_manager.vm_update_mode = 0;
2876 #endif
2877 
2878 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2879 }
2880 
2881 /**
2882  * amdgpu_vm_manager_fini - cleanup VM manager
2883  *
2884  * @adev: amdgpu_device pointer
2885  *
2886  * Cleanup the VM manager and free resources.
2887  */
2888 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2889 {
2890 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2891 	xa_destroy(&adev->vm_manager.pasids);
2892 
2893 	amdgpu_vmid_mgr_fini(adev);
2894 }
2895 
2896 /**
2897  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2898  *
2899  * @dev: drm device pointer
2900  * @data: drm_amdgpu_vm
2901  * @filp: drm file pointer
2902  *
2903  * Returns:
2904  * 0 for success, -errno for errors.
2905  */
2906 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2907 {
2908 	union drm_amdgpu_vm *args = data;
2909 	struct amdgpu_device *adev = drm_to_adev(dev);
2910 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2911 	struct amdgpu_vm *vm = &fpriv->vm;
2912 
2913 	/* No valid flags defined yet */
2914 	if (args->in.flags)
2915 		return -EINVAL;
2916 
2917 	switch (args->in.op) {
2918 	case AMDGPU_VM_OP_RESERVE_VMID:
2919 		/* We only have requirement to reserve vmid from gfxhub */
2920 		amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
2921 		break;
2922 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2923 		amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
2924 		break;
2925 	default:
2926 		return -EINVAL;
2927 	}
2928 
2929 	return 0;
2930 }
2931 
2932 /**
2933  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2934  * @adev: amdgpu device pointer
2935  * @pasid: PASID of the VM
2936  * @ts: Timestamp of the fault
2937  * @vmid: VMID, only used for GFX 9.4.3.
2938  * @node_id: Node_id received in IH cookie. Only applicable for
2939  *           GFX 9.4.3.
2940  * @addr: Address of the fault
2941  * @write_fault: true is write fault, false is read fault
2942  *
2943  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2944  * shouldn't be reported any more.
2945  */
2946 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2947 			    u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
2948 			    bool write_fault)
2949 {
2950 	bool is_compute_context = false;
2951 	struct amdgpu_bo *root;
2952 	unsigned long irqflags;
2953 	uint64_t value, flags;
2954 	struct amdgpu_vm *vm;
2955 	int r;
2956 
2957 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2958 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2959 	if (vm) {
2960 		root = amdgpu_bo_ref(vm->root.bo);
2961 		is_compute_context = vm->is_compute_context;
2962 	} else {
2963 		root = NULL;
2964 	}
2965 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2966 
2967 	if (!root)
2968 		return false;
2969 
2970 	addr /= AMDGPU_GPU_PAGE_SIZE;
2971 
2972 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2973 	    node_id, addr, ts, write_fault)) {
2974 		amdgpu_bo_unref(&root);
2975 		return true;
2976 	}
2977 
2978 	r = amdgpu_bo_reserve(root, true);
2979 	if (r)
2980 		goto error_unref;
2981 
2982 	/* Double check that the VM still exists */
2983 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2984 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2985 	if (vm && vm->root.bo != root)
2986 		vm = NULL;
2987 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2988 	if (!vm)
2989 		goto error_unlock;
2990 
2991 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2992 		AMDGPU_PTE_SYSTEM;
2993 
2994 	if (is_compute_context) {
2995 		/* Intentionally setting invalid PTE flag
2996 		 * combination to force a no-retry-fault
2997 		 */
2998 		flags = AMDGPU_VM_NORETRY_FLAGS;
2999 		value = 0;
3000 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
3001 		/* Redirect the access to the dummy page */
3002 		value = adev->dummy_page_addr;
3003 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
3004 			AMDGPU_PTE_WRITEABLE;
3005 
3006 	} else {
3007 		/* Let the hw retry silently on the PTE */
3008 		value = 0;
3009 	}
3010 
3011 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
3012 	if (r) {
3013 		pr_debug("failed %d to reserve fence slot\n", r);
3014 		goto error_unlock;
3015 	}
3016 
3017 	r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
3018 				   NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
3019 	if (r)
3020 		goto error_unlock;
3021 
3022 	r = amdgpu_vm_update_pdes(adev, vm, true);
3023 
3024 error_unlock:
3025 	amdgpu_bo_unreserve(root);
3026 	if (r < 0)
3027 		dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
3028 
3029 error_unref:
3030 	amdgpu_bo_unref(&root);
3031 
3032 	return false;
3033 }
3034 
3035 #if defined(CONFIG_DEBUG_FS)
3036 /**
3037  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
3038  *
3039  * @vm: Requested VM for printing BO info
3040  * @m: debugfs file
3041  *
3042  * Print BO information in debugfs file for the VM
3043  */
3044 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
3045 {
3046 	struct amdgpu_bo_va *bo_va, *tmp;
3047 	u64 total_idle = 0;
3048 	u64 total_evicted = 0;
3049 	u64 total_relocated = 0;
3050 	u64 total_moved = 0;
3051 	u64 total_invalidated = 0;
3052 	u64 total_done = 0;
3053 	unsigned int total_idle_objs = 0;
3054 	unsigned int total_evicted_objs = 0;
3055 	unsigned int total_relocated_objs = 0;
3056 	unsigned int total_moved_objs = 0;
3057 	unsigned int total_invalidated_objs = 0;
3058 	unsigned int total_done_objs = 0;
3059 	unsigned int id = 0;
3060 
3061 	amdgpu_vm_assert_locked(vm);
3062 
3063 	spin_lock(&vm->status_lock);
3064 	seq_puts(m, "\tIdle BOs:\n");
3065 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3066 		if (!bo_va->base.bo)
3067 			continue;
3068 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3069 	}
3070 	total_idle_objs = id;
3071 	id = 0;
3072 
3073 	seq_puts(m, "\tEvicted BOs:\n");
3074 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3075 		if (!bo_va->base.bo)
3076 			continue;
3077 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3078 	}
3079 	total_evicted_objs = id;
3080 	id = 0;
3081 
3082 	seq_puts(m, "\tRelocated BOs:\n");
3083 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3084 		if (!bo_va->base.bo)
3085 			continue;
3086 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3087 	}
3088 	total_relocated_objs = id;
3089 	id = 0;
3090 
3091 	seq_puts(m, "\tMoved BOs:\n");
3092 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3093 		if (!bo_va->base.bo)
3094 			continue;
3095 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3096 	}
3097 	total_moved_objs = id;
3098 	id = 0;
3099 
3100 	seq_puts(m, "\tInvalidated BOs:\n");
3101 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3102 		if (!bo_va->base.bo)
3103 			continue;
3104 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
3105 	}
3106 	total_invalidated_objs = id;
3107 	id = 0;
3108 
3109 	seq_puts(m, "\tDone BOs:\n");
3110 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3111 		if (!bo_va->base.bo)
3112 			continue;
3113 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3114 	}
3115 	spin_unlock(&vm->status_lock);
3116 	total_done_objs = id;
3117 
3118 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
3119 		   total_idle_objs);
3120 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
3121 		   total_evicted_objs);
3122 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
3123 		   total_relocated_objs);
3124 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
3125 		   total_moved_objs);
3126 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
3127 		   total_invalidated_objs);
3128 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
3129 		   total_done_objs);
3130 }
3131 #endif
3132 
3133 /**
3134  * amdgpu_vm_update_fault_cache - update cached fault into.
3135  * @adev: amdgpu device pointer
3136  * @pasid: PASID of the VM
3137  * @addr: Address of the fault
3138  * @status: GPUVM fault status register
3139  * @vmhub: which vmhub got the fault
3140  *
3141  * Cache the fault info for later use by userspace in debugging.
3142  */
3143 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
3144 				  unsigned int pasid,
3145 				  uint64_t addr,
3146 				  uint32_t status,
3147 				  unsigned int vmhub)
3148 {
3149 	struct amdgpu_vm *vm;
3150 	unsigned long flags;
3151 
3152 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3153 
3154 	vm = xa_load(&adev->vm_manager.pasids, pasid);
3155 	/* Don't update the fault cache if status is 0.  In the multiple
3156 	 * fault case, subsequent faults will return a 0 status which is
3157 	 * useless for userspace and replaces the useful fault status, so
3158 	 * only update if status is non-0.
3159 	 */
3160 	if (vm && status) {
3161 		vm->fault_info.addr = addr;
3162 		vm->fault_info.status = status;
3163 		/*
3164 		 * Update the fault information globally for later usage
3165 		 * when vm could be stale or freed.
3166 		 */
3167 		adev->vm_manager.fault_info.addr = addr;
3168 		adev->vm_manager.fault_info.vmhub = vmhub;
3169 		adev->vm_manager.fault_info.status = status;
3170 
3171 		if (AMDGPU_IS_GFXHUB(vmhub)) {
3172 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3173 			vm->fault_info.vmhub |=
3174 				(vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3175 		} else if (AMDGPU_IS_MMHUB0(vmhub)) {
3176 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3177 			vm->fault_info.vmhub |=
3178 				(vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3179 		} else if (AMDGPU_IS_MMHUB1(vmhub)) {
3180 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3181 			vm->fault_info.vmhub |=
3182 				(vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3183 		} else {
3184 			WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3185 		}
3186 	}
3187 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3188 }
3189 
3190 /**
3191  * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3192  *
3193  * @vm: VM to test against.
3194  * @bo: BO to be tested.
3195  *
3196  * Returns true if the BO shares the dma_resv object with the root PD and is
3197  * always guaranteed to be valid inside the VM.
3198  */
3199 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3200 {
3201 	return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3202 }
3203 
3204 void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
3205 			       struct amdgpu_task_info *task_info)
3206 {
3207 	dev_err(adev->dev,
3208 		" Process %s pid %d thread %s pid %d\n",
3209 		task_info->process_name, task_info->tgid,
3210 		task_info->task.comm, task_info->task.pid);
3211 }
3212