xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c (revision 2a1eea8fd601db4c52f0d14f8871663b7b052c91)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_vm.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_amdkfd.h"
42 #include "amdgpu_gmc.h"
43 #include "amdgpu_xgmi.h"
44 #include "amdgpu_dma_buf.h"
45 #include "amdgpu_res_cursor.h"
46 #include "kfd_svm.h"
47 
48 /**
49  * DOC: GPUVM
50  *
51  * GPUVM is the MMU functionality provided on the GPU.
52  * GPUVM is similar to the legacy GART on older asics, however
53  * rather than there being a single global GART table
54  * for the entire GPU, there can be multiple GPUVM page tables active
55  * at any given time.  The GPUVM page tables can contain a mix
56  * VRAM pages and system pages (both memory and MMIO) and system pages
57  * can be mapped as snooped (cached system pages) or unsnooped
58  * (uncached system pages).
59  *
60  * Each active GPUVM has an ID associated with it and there is a page table
61  * linked with each VMID.  When executing a command buffer,
62  * the kernel tells the engine what VMID to use for that command
63  * buffer.  VMIDs are allocated dynamically as commands are submitted.
64  * The userspace drivers maintain their own address space and the kernel
65  * sets up their pages tables accordingly when they submit their
66  * command buffers and a VMID is assigned.
67  * The hardware supports up to 16 active GPUVMs at any given time.
68  *
69  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
70  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
71  * as other features such as encryption and caching attributes.
72  *
73  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
74  * addition to an aperture managed by a page table, VMID 0 also has
75  * several other apertures.  There is an aperture for direct access to VRAM
76  * and there is a legacy AGP aperture which just forwards accesses directly
77  * to the matching system physical addresses (or IOVAs when an IOMMU is
78  * present).  These apertures provide direct access to these memories without
79  * incurring the overhead of a page table.  VMID 0 is used by the kernel
80  * driver for tasks like memory management.
81  *
82  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
83  * For user applications, each application can have their own unique GPUVM
84  * address space.  The application manages the address space and the kernel
85  * driver manages the GPUVM page tables for each process.  If an GPU client
86  * accesses an invalid page, it will generate a GPU page fault, similar to
87  * accessing an invalid page on a CPU.
88  */
89 
90 #define START(node) ((node)->start)
91 #define LAST(node) ((node)->last)
92 
93 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
94 		     START, LAST, static, amdgpu_vm_it)
95 
96 #undef START
97 #undef LAST
98 
99 /**
100  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
101  */
102 struct amdgpu_prt_cb {
103 
104 	/**
105 	 * @adev: amdgpu device
106 	 */
107 	struct amdgpu_device *adev;
108 
109 	/**
110 	 * @cb: callback
111 	 */
112 	struct dma_fence_cb cb;
113 };
114 
115 /**
116  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
117  */
118 struct amdgpu_vm_tlb_seq_struct {
119 	/**
120 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
121 	 */
122 	struct amdgpu_vm *vm;
123 
124 	/**
125 	 * @cb: callback
126 	 */
127 	struct dma_fence_cb cb;
128 };
129 
130 /**
131  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
132  *
133  * @adev: amdgpu_device pointer
134  * @vm: amdgpu_vm pointer
135  * @pasid: the pasid the VM is using on this GPU
136  *
137  * Set the pasid this VM is using on this GPU, can also be used to remove the
138  * pasid by passing in zero.
139  *
140  */
141 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
142 			u32 pasid)
143 {
144 	int r;
145 
146 	if (vm->pasid == pasid)
147 		return 0;
148 
149 	if (vm->pasid) {
150 		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
151 		if (r < 0)
152 			return r;
153 
154 		vm->pasid = 0;
155 	}
156 
157 	if (pasid) {
158 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
159 					GFP_KERNEL));
160 		if (r < 0)
161 			return r;
162 
163 		vm->pasid = pasid;
164 	}
165 
166 
167 	return 0;
168 }
169 
170 /**
171  * amdgpu_vm_bo_evicted - vm_bo is evicted
172  *
173  * @vm_bo: vm_bo which is evicted
174  *
175  * State for PDs/PTs and per VM BOs which are not at the location they should
176  * be.
177  */
178 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
179 {
180 	struct amdgpu_vm *vm = vm_bo->vm;
181 	struct amdgpu_bo *bo = vm_bo->bo;
182 
183 	vm_bo->moved = true;
184 	spin_lock(&vm_bo->vm->status_lock);
185 	if (bo->tbo.type == ttm_bo_type_kernel)
186 		list_move(&vm_bo->vm_status, &vm->evicted);
187 	else
188 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
189 	spin_unlock(&vm_bo->vm->status_lock);
190 }
191 /**
192  * amdgpu_vm_bo_moved - vm_bo is moved
193  *
194  * @vm_bo: vm_bo which is moved
195  *
196  * State for per VM BOs which are moved, but that change is not yet reflected
197  * in the page tables.
198  */
199 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
200 {
201 	spin_lock(&vm_bo->vm->status_lock);
202 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
203 	spin_unlock(&vm_bo->vm->status_lock);
204 }
205 
206 /**
207  * amdgpu_vm_bo_idle - vm_bo is idle
208  *
209  * @vm_bo: vm_bo which is now idle
210  *
211  * State for PDs/PTs and per VM BOs which have gone through the state machine
212  * and are now idle.
213  */
214 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
215 {
216 	spin_lock(&vm_bo->vm->status_lock);
217 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
218 	spin_unlock(&vm_bo->vm->status_lock);
219 	vm_bo->moved = false;
220 }
221 
222 /**
223  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
224  *
225  * @vm_bo: vm_bo which is now invalidated
226  *
227  * State for normal BOs which are invalidated and that change not yet reflected
228  * in the PTs.
229  */
230 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
231 {
232 	spin_lock(&vm_bo->vm->status_lock);
233 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
234 	spin_unlock(&vm_bo->vm->status_lock);
235 }
236 
237 /**
238  * amdgpu_vm_bo_evicted_user - vm_bo is evicted
239  *
240  * @vm_bo: vm_bo which is evicted
241  *
242  * State for BOs used by user mode queues which are not at the location they
243  * should be.
244  */
245 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
246 {
247 	vm_bo->moved = true;
248 	spin_lock(&vm_bo->vm->status_lock);
249 	list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
250 	spin_unlock(&vm_bo->vm->status_lock);
251 }
252 
253 /**
254  * amdgpu_vm_bo_relocated - vm_bo is reloacted
255  *
256  * @vm_bo: vm_bo which is relocated
257  *
258  * State for PDs/PTs which needs to update their parent PD.
259  * For the root PD, just move to idle state.
260  */
261 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
262 {
263 	if (vm_bo->bo->parent) {
264 		spin_lock(&vm_bo->vm->status_lock);
265 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
266 		spin_unlock(&vm_bo->vm->status_lock);
267 	} else {
268 		amdgpu_vm_bo_idle(vm_bo);
269 	}
270 }
271 
272 /**
273  * amdgpu_vm_bo_done - vm_bo is done
274  *
275  * @vm_bo: vm_bo which is now done
276  *
277  * State for normal BOs which are invalidated and that change has been updated
278  * in the PTs.
279  */
280 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
281 {
282 	spin_lock(&vm_bo->vm->status_lock);
283 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
284 	spin_unlock(&vm_bo->vm->status_lock);
285 }
286 
287 /**
288  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
289  * @vm: the VM which state machine to reset
290  *
291  * Move all vm_bo object in the VM into a state where they will be updated
292  * again during validation.
293  */
294 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
295 {
296 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
297 
298 	spin_lock(&vm->status_lock);
299 	list_splice_init(&vm->done, &vm->invalidated);
300 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
301 		vm_bo->moved = true;
302 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
303 		struct amdgpu_bo *bo = vm_bo->bo;
304 
305 		vm_bo->moved = true;
306 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
307 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
308 		else if (bo->parent)
309 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
310 	}
311 	spin_unlock(&vm->status_lock);
312 }
313 
314 /**
315  * amdgpu_vm_update_shared - helper to update shared memory stat
316  * @base: base structure for tracking BO usage in a VM
317  *
318  * Takes the vm status_lock and updates the shared memory stat. If the basic
319  * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
320  * as well.
321  */
322 static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
323 {
324 	struct amdgpu_vm *vm = base->vm;
325 	struct amdgpu_bo *bo = base->bo;
326 	uint64_t size = amdgpu_bo_size(bo);
327 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
328 	bool shared;
329 
330 	spin_lock(&vm->status_lock);
331 	shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
332 	if (base->shared != shared) {
333 		base->shared = shared;
334 		if (shared) {
335 			vm->stats[bo_memtype].drm.shared += size;
336 			vm->stats[bo_memtype].drm.private -= size;
337 		} else {
338 			vm->stats[bo_memtype].drm.shared -= size;
339 			vm->stats[bo_memtype].drm.private += size;
340 		}
341 	}
342 	spin_unlock(&vm->status_lock);
343 }
344 
345 /**
346  * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
347  * @bo: amdgpu buffer object
348  *
349  * Update the per VM stats for all the vm if needed from private to shared or
350  * vice versa.
351  */
352 void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
353 {
354 	struct amdgpu_vm_bo_base *base;
355 
356 	for (base = bo->vm_bo; base; base = base->next)
357 		amdgpu_vm_update_shared(base);
358 }
359 
360 /**
361  * amdgpu_vm_update_stats_locked - helper to update normal memory stat
362  * @base: base structure for tracking BO usage in a VM
363  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
364  *        be bo->tbo.resource
365  * @sign: if we should add (+1) or subtract (-1) from the stat
366  *
367  * Caller need to have the vm status_lock held. Useful for when multiple update
368  * need to happen at the same time.
369  */
370 static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
371 			    struct ttm_resource *res, int sign)
372 {
373 	struct amdgpu_vm *vm = base->vm;
374 	struct amdgpu_bo *bo = base->bo;
375 	int64_t size = sign * amdgpu_bo_size(bo);
376 	uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
377 
378 	/* For drm-total- and drm-shared-, BO are accounted by their preferred
379 	 * placement, see also amdgpu_bo_mem_stats_placement.
380 	 */
381 	if (base->shared)
382 		vm->stats[bo_memtype].drm.shared += size;
383 	else
384 		vm->stats[bo_memtype].drm.private += size;
385 
386 	if (res && res->mem_type < __AMDGPU_PL_NUM) {
387 		uint32_t res_memtype = res->mem_type;
388 
389 		vm->stats[res_memtype].drm.resident += size;
390 		/* BO only count as purgeable if it is resident,
391 		 * since otherwise there's nothing to purge.
392 		 */
393 		if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
394 			vm->stats[res_memtype].drm.purgeable += size;
395 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
396 			vm->stats[bo_memtype].evicted += size;
397 	}
398 }
399 
400 /**
401  * amdgpu_vm_update_stats - helper to update normal memory stat
402  * @base: base structure for tracking BO usage in a VM
403  * @res:  the ttm_resource to use for the purpose of accounting, may or may not
404  *        be bo->tbo.resource
405  * @sign: if we should add (+1) or subtract (-1) from the stat
406  *
407  * Updates the basic memory stat when bo is added/deleted/moved.
408  */
409 void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
410 			    struct ttm_resource *res, int sign)
411 {
412 	struct amdgpu_vm *vm = base->vm;
413 
414 	spin_lock(&vm->status_lock);
415 	amdgpu_vm_update_stats_locked(base, res, sign);
416 	spin_unlock(&vm->status_lock);
417 }
418 
419 /**
420  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
421  *
422  * @base: base structure for tracking BO usage in a VM
423  * @vm: vm to which bo is to be added
424  * @bo: amdgpu buffer object
425  *
426  * Initialize a bo_va_base structure and add it to the appropriate lists
427  *
428  */
429 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
430 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
431 {
432 	base->vm = vm;
433 	base->bo = bo;
434 	base->next = NULL;
435 	INIT_LIST_HEAD(&base->vm_status);
436 
437 	if (!bo)
438 		return;
439 	base->next = bo->vm_bo;
440 	bo->vm_bo = base;
441 
442 	spin_lock(&vm->status_lock);
443 	base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
444 	amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
445 	spin_unlock(&vm->status_lock);
446 
447 	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
448 		return;
449 
450 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
451 
452 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
453 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
454 		amdgpu_vm_bo_relocated(base);
455 	else
456 		amdgpu_vm_bo_idle(base);
457 
458 	if (bo->preferred_domains &
459 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
460 		return;
461 
462 	/*
463 	 * we checked all the prerequisites, but it looks like this per vm bo
464 	 * is currently evicted. add the bo to the evicted list to make sure it
465 	 * is validated on next vm use to avoid fault.
466 	 * */
467 	amdgpu_vm_bo_evicted(base);
468 }
469 
470 /**
471  * amdgpu_vm_lock_pd - lock PD in drm_exec
472  *
473  * @vm: vm providing the BOs
474  * @exec: drm execution context
475  * @num_fences: number of extra fences to reserve
476  *
477  * Lock the VM root PD in the DRM execution context.
478  */
479 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
480 		      unsigned int num_fences)
481 {
482 	/* We need at least two fences for the VM PD/PT updates */
483 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
484 				    2 + num_fences);
485 }
486 
487 /**
488  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
489  *
490  * @adev: amdgpu device pointer
491  * @vm: vm providing the BOs
492  *
493  * Move all BOs to the end of LRU and remember their positions to put them
494  * together.
495  */
496 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
497 				struct amdgpu_vm *vm)
498 {
499 	spin_lock(&adev->mman.bdev.lru_lock);
500 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
501 	spin_unlock(&adev->mman.bdev.lru_lock);
502 }
503 
504 /* Create scheduler entities for page table updates */
505 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
506 				   struct amdgpu_vm *vm)
507 {
508 	int r;
509 
510 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
511 				  adev->vm_manager.vm_pte_scheds,
512 				  adev->vm_manager.vm_pte_num_scheds, NULL);
513 	if (r)
514 		goto error;
515 
516 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
517 				     adev->vm_manager.vm_pte_scheds,
518 				     adev->vm_manager.vm_pte_num_scheds, NULL);
519 
520 error:
521 	drm_sched_entity_destroy(&vm->immediate);
522 	return r;
523 }
524 
525 /* Destroy the entities for page table updates again */
526 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
527 {
528 	drm_sched_entity_destroy(&vm->immediate);
529 	drm_sched_entity_destroy(&vm->delayed);
530 }
531 
532 /**
533  * amdgpu_vm_generation - return the page table re-generation counter
534  * @adev: the amdgpu_device
535  * @vm: optional VM to check, might be NULL
536  *
537  * Returns a page table re-generation token to allow checking if submissions
538  * are still valid to use this VM. The VM parameter might be NULL in which case
539  * just the VRAM lost counter will be used.
540  */
541 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
542 {
543 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
544 
545 	if (!vm)
546 		return result;
547 
548 	result += lower_32_bits(vm->generation);
549 	/* Add one if the page tables will be re-generated on next CS */
550 	if (drm_sched_entity_error(&vm->delayed))
551 		++result;
552 
553 	return result;
554 }
555 
556 /**
557  * amdgpu_vm_validate - validate evicted BOs tracked in the VM
558  *
559  * @adev: amdgpu device pointer
560  * @vm: vm providing the BOs
561  * @ticket: optional reservation ticket used to reserve the VM
562  * @validate: callback to do the validation
563  * @param: parameter for the validation callback
564  *
565  * Validate the page table BOs and per-VM BOs on command submission if
566  * necessary. If a ticket is given, also try to validate evicted user queue
567  * BOs. They must already be reserved with the given ticket.
568  *
569  * Returns:
570  * Validation result.
571  */
572 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
573 		       struct ww_acquire_ctx *ticket,
574 		       int (*validate)(void *p, struct amdgpu_bo *bo),
575 		       void *param)
576 {
577 	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
578 	struct amdgpu_vm_bo_base *bo_base;
579 	struct amdgpu_bo *bo;
580 	int r;
581 
582 	if (vm->generation != new_vm_generation) {
583 		vm->generation = new_vm_generation;
584 		amdgpu_vm_bo_reset_state_machine(vm);
585 		amdgpu_vm_fini_entities(vm);
586 		r = amdgpu_vm_init_entities(adev, vm);
587 		if (r)
588 			return r;
589 	}
590 
591 	spin_lock(&vm->status_lock);
592 	while (!list_empty(&vm->evicted)) {
593 		bo_base = list_first_entry(&vm->evicted,
594 					   struct amdgpu_vm_bo_base,
595 					   vm_status);
596 		spin_unlock(&vm->status_lock);
597 
598 		bo = bo_base->bo;
599 
600 		r = validate(param, bo);
601 		if (r)
602 			return r;
603 
604 		if (bo->tbo.type != ttm_bo_type_kernel) {
605 			amdgpu_vm_bo_moved(bo_base);
606 		} else {
607 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
608 			amdgpu_vm_bo_relocated(bo_base);
609 		}
610 		spin_lock(&vm->status_lock);
611 	}
612 	while (ticket && !list_empty(&vm->evicted_user)) {
613 		bo_base = list_first_entry(&vm->evicted_user,
614 					   struct amdgpu_vm_bo_base,
615 					   vm_status);
616 		spin_unlock(&vm->status_lock);
617 
618 		bo = bo_base->bo;
619 
620 		if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
621 			struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
622 
623 			pr_warn_ratelimited("Evicted user BO is not reserved\n");
624 			if (ti) {
625 				pr_warn_ratelimited("pid %d\n", ti->task.pid);
626 				amdgpu_vm_put_task_info(ti);
627 			}
628 
629 			return -EINVAL;
630 		}
631 
632 		r = validate(param, bo);
633 		if (r)
634 			return r;
635 
636 		amdgpu_vm_bo_invalidated(bo_base);
637 
638 		spin_lock(&vm->status_lock);
639 	}
640 	spin_unlock(&vm->status_lock);
641 
642 	amdgpu_vm_eviction_lock(vm);
643 	vm->evicting = false;
644 	amdgpu_vm_eviction_unlock(vm);
645 
646 	return 0;
647 }
648 
649 /**
650  * amdgpu_vm_ready - check VM is ready for updates
651  *
652  * @vm: VM to check
653  *
654  * Check if all VM PDs/PTs are ready for updates
655  *
656  * Returns:
657  * True if VM is not evicting and all VM entities are not stopped
658  */
659 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
660 {
661 	bool ret;
662 
663 	amdgpu_vm_eviction_lock(vm);
664 	ret = !vm->evicting;
665 	amdgpu_vm_eviction_unlock(vm);
666 
667 	spin_lock(&vm->status_lock);
668 	ret &= list_empty(&vm->evicted);
669 	spin_unlock(&vm->status_lock);
670 
671 	spin_lock(&vm->immediate.lock);
672 	ret &= !vm->immediate.stopped;
673 	spin_unlock(&vm->immediate.lock);
674 
675 	spin_lock(&vm->delayed.lock);
676 	ret &= !vm->delayed.stopped;
677 	spin_unlock(&vm->delayed.lock);
678 
679 	return ret;
680 }
681 
682 /**
683  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
684  *
685  * @adev: amdgpu_device pointer
686  */
687 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
688 {
689 	const struct amdgpu_ip_block *ip_block;
690 	bool has_compute_vm_bug;
691 	struct amdgpu_ring *ring;
692 	int i;
693 
694 	has_compute_vm_bug = false;
695 
696 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
697 	if (ip_block) {
698 		/* Compute has a VM bug for GFX version < 7.
699 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
700 		if (ip_block->version->major <= 7)
701 			has_compute_vm_bug = true;
702 		else if (ip_block->version->major == 8)
703 			if (adev->gfx.mec_fw_version < 673)
704 				has_compute_vm_bug = true;
705 	}
706 
707 	for (i = 0; i < adev->num_rings; i++) {
708 		ring = adev->rings[i];
709 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
710 			/* only compute rings */
711 			ring->has_compute_vm_bug = has_compute_vm_bug;
712 		else
713 			ring->has_compute_vm_bug = false;
714 	}
715 }
716 
717 /**
718  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
719  *
720  * @ring: ring on which the job will be submitted
721  * @job: job to submit
722  *
723  * Returns:
724  * True if sync is needed.
725  */
726 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
727 				  struct amdgpu_job *job)
728 {
729 	struct amdgpu_device *adev = ring->adev;
730 	unsigned vmhub = ring->vm_hub;
731 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
732 
733 	if (job->vmid == 0)
734 		return false;
735 
736 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
737 		return true;
738 
739 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
740 		return true;
741 
742 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
743 		return true;
744 
745 	return false;
746 }
747 
748 /**
749  * amdgpu_vm_flush - hardware flush the vm
750  *
751  * @ring: ring to use for flush
752  * @job:  related job
753  * @need_pipe_sync: is pipe sync needed
754  *
755  * Emit a VM flush when it is necessary.
756  *
757  * Returns:
758  * 0 on success, errno otherwise.
759  */
760 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
761 		    bool need_pipe_sync)
762 {
763 	struct amdgpu_device *adev = ring->adev;
764 	struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
765 	unsigned vmhub = ring->vm_hub;
766 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
767 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
768 	bool spm_update_needed = job->spm_update_needed;
769 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
770 		job->gds_switch_needed;
771 	bool vm_flush_needed = job->vm_needs_flush;
772 	bool cleaner_shader_needed = false;
773 	bool pasid_mapping_needed = false;
774 	struct dma_fence *fence = NULL;
775 	struct amdgpu_fence *af;
776 	unsigned int patch;
777 	int r;
778 
779 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
780 		gds_switch_needed = true;
781 		vm_flush_needed = true;
782 		pasid_mapping_needed = true;
783 		spm_update_needed = true;
784 	}
785 
786 	mutex_lock(&id_mgr->lock);
787 	if (id->pasid != job->pasid || !id->pasid_mapping ||
788 	    !dma_fence_is_signaled(id->pasid_mapping))
789 		pasid_mapping_needed = true;
790 	mutex_unlock(&id_mgr->lock);
791 
792 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
793 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
794 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
795 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
796 		ring->funcs->emit_wreg;
797 
798 	cleaner_shader_needed = job->run_cleaner_shader &&
799 		adev->gfx.enable_cleaner_shader &&
800 		ring->funcs->emit_cleaner_shader && job->base.s_fence &&
801 		&job->base.s_fence->scheduled == isolation->spearhead;
802 
803 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
804 	    !cleaner_shader_needed)
805 		return 0;
806 
807 	amdgpu_ring_ib_begin(ring);
808 	if (ring->funcs->init_cond_exec)
809 		patch = amdgpu_ring_init_cond_exec(ring,
810 						   ring->cond_exe_gpu_addr);
811 
812 	if (need_pipe_sync)
813 		amdgpu_ring_emit_pipeline_sync(ring);
814 
815 	if (cleaner_shader_needed)
816 		ring->funcs->emit_cleaner_shader(ring);
817 
818 	if (vm_flush_needed) {
819 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
820 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
821 	}
822 
823 	if (pasid_mapping_needed)
824 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
825 
826 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
827 		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
828 
829 	if (ring->funcs->emit_gds_switch &&
830 	    gds_switch_needed) {
831 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
832 					    job->gds_size, job->gws_base,
833 					    job->gws_size, job->oa_base,
834 					    job->oa_size);
835 	}
836 
837 	if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
838 		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
839 		if (r)
840 			return r;
841 		/* this is part of the job's context */
842 		af = container_of(fence, struct amdgpu_fence, base);
843 		af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
844 	}
845 
846 	if (vm_flush_needed) {
847 		mutex_lock(&id_mgr->lock);
848 		dma_fence_put(id->last_flush);
849 		id->last_flush = dma_fence_get(fence);
850 		id->current_gpu_reset_count =
851 			atomic_read(&adev->gpu_reset_counter);
852 		mutex_unlock(&id_mgr->lock);
853 	}
854 
855 	if (pasid_mapping_needed) {
856 		mutex_lock(&id_mgr->lock);
857 		id->pasid = job->pasid;
858 		dma_fence_put(id->pasid_mapping);
859 		id->pasid_mapping = dma_fence_get(fence);
860 		mutex_unlock(&id_mgr->lock);
861 	}
862 
863 	/*
864 	 * Make sure that all other submissions wait for the cleaner shader to
865 	 * finish before we push them to the HW.
866 	 */
867 	if (cleaner_shader_needed) {
868 		trace_amdgpu_cleaner_shader(ring, fence);
869 		mutex_lock(&adev->enforce_isolation_mutex);
870 		dma_fence_put(isolation->spearhead);
871 		isolation->spearhead = dma_fence_get(fence);
872 		mutex_unlock(&adev->enforce_isolation_mutex);
873 	}
874 	dma_fence_put(fence);
875 
876 	amdgpu_ring_patch_cond_exec(ring, patch);
877 
878 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
879 	if (ring->funcs->emit_switch_buffer) {
880 		amdgpu_ring_emit_switch_buffer(ring);
881 		amdgpu_ring_emit_switch_buffer(ring);
882 	}
883 
884 	amdgpu_ring_ib_end(ring);
885 	return 0;
886 }
887 
888 /**
889  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
890  *
891  * @vm: requested vm
892  * @bo: requested buffer object
893  *
894  * Find @bo inside the requested vm.
895  * Search inside the @bos vm list for the requested vm
896  * Returns the found bo_va or NULL if none is found
897  *
898  * Object has to be reserved!
899  *
900  * Returns:
901  * Found bo_va or NULL.
902  */
903 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
904 				       struct amdgpu_bo *bo)
905 {
906 	struct amdgpu_vm_bo_base *base;
907 
908 	for (base = bo->vm_bo; base; base = base->next) {
909 		if (base->vm != vm)
910 			continue;
911 
912 		return container_of(base, struct amdgpu_bo_va, base);
913 	}
914 	return NULL;
915 }
916 
917 /**
918  * amdgpu_vm_map_gart - Resolve gart mapping of addr
919  *
920  * @pages_addr: optional DMA address to use for lookup
921  * @addr: the unmapped addr
922  *
923  * Look up the physical address of the page that the pte resolves
924  * to.
925  *
926  * Returns:
927  * The pointer for the page table entry.
928  */
929 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
930 {
931 	uint64_t result;
932 
933 	/* page table offset */
934 	result = pages_addr[addr >> PAGE_SHIFT];
935 
936 	/* in case cpu page size != gpu page size*/
937 	result |= addr & (~PAGE_MASK);
938 
939 	result &= 0xFFFFFFFFFFFFF000ULL;
940 
941 	return result;
942 }
943 
944 /**
945  * amdgpu_vm_update_pdes - make sure that all directories are valid
946  *
947  * @adev: amdgpu_device pointer
948  * @vm: requested vm
949  * @immediate: submit immediately to the paging queue
950  *
951  * Makes sure all directories are up to date.
952  *
953  * Returns:
954  * 0 for success, error for failure.
955  */
956 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
957 			  struct amdgpu_vm *vm, bool immediate)
958 {
959 	struct amdgpu_vm_update_params params;
960 	struct amdgpu_vm_bo_base *entry;
961 	bool flush_tlb_needed = false;
962 	LIST_HEAD(relocated);
963 	int r, idx;
964 
965 	spin_lock(&vm->status_lock);
966 	list_splice_init(&vm->relocated, &relocated);
967 	spin_unlock(&vm->status_lock);
968 
969 	if (list_empty(&relocated))
970 		return 0;
971 
972 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
973 		return -ENODEV;
974 
975 	memset(&params, 0, sizeof(params));
976 	params.adev = adev;
977 	params.vm = vm;
978 	params.immediate = immediate;
979 
980 	r = vm->update_funcs->prepare(&params, NULL,
981 				      AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
982 	if (r)
983 		goto error;
984 
985 	list_for_each_entry(entry, &relocated, vm_status) {
986 		/* vm_flush_needed after updating moved PDEs */
987 		flush_tlb_needed |= entry->moved;
988 
989 		r = amdgpu_vm_pde_update(&params, entry);
990 		if (r)
991 			goto error;
992 	}
993 
994 	r = vm->update_funcs->commit(&params, &vm->last_update);
995 	if (r)
996 		goto error;
997 
998 	if (flush_tlb_needed)
999 		atomic64_inc(&vm->tlb_seq);
1000 
1001 	while (!list_empty(&relocated)) {
1002 		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
1003 					 vm_status);
1004 		amdgpu_vm_bo_idle(entry);
1005 	}
1006 
1007 error:
1008 	drm_dev_exit(idx);
1009 	return r;
1010 }
1011 
1012 /**
1013  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
1014  * @fence: unused
1015  * @cb: the callback structure
1016  *
1017  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1018  */
1019 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
1020 				 struct dma_fence_cb *cb)
1021 {
1022 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1023 
1024 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
1025 	atomic64_inc(&tlb_cb->vm->tlb_seq);
1026 	kfree(tlb_cb);
1027 }
1028 
1029 /**
1030  * amdgpu_vm_tlb_flush - prepare TLB flush
1031  *
1032  * @params: parameters for update
1033  * @fence: input fence to sync TLB flush with
1034  * @tlb_cb: the callback structure
1035  *
1036  * Increments the tlb sequence to make sure that future CS execute a VM flush.
1037  */
1038 static void
1039 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
1040 		    struct dma_fence **fence,
1041 		    struct amdgpu_vm_tlb_seq_struct *tlb_cb)
1042 {
1043 	struct amdgpu_vm *vm = params->vm;
1044 
1045 	tlb_cb->vm = vm;
1046 	if (!fence || !*fence) {
1047 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1048 		return;
1049 	}
1050 
1051 	if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
1052 				    amdgpu_vm_tlb_seq_cb)) {
1053 		dma_fence_put(vm->last_tlb_flush);
1054 		vm->last_tlb_flush = dma_fence_get(*fence);
1055 	} else {
1056 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1057 	}
1058 
1059 	/* Prepare a TLB flush fence to be attached to PTs */
1060 	if (!params->unlocked && vm->is_compute_context) {
1061 		amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
1062 
1063 		/* Makes sure no PD/PT is freed before the flush */
1064 		dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
1065 				   DMA_RESV_USAGE_BOOKKEEP);
1066 	}
1067 }
1068 
1069 /**
1070  * amdgpu_vm_update_range - update a range in the vm page table
1071  *
1072  * @adev: amdgpu_device pointer to use for commands
1073  * @vm: the VM to update the range
1074  * @immediate: immediate submission in a page fault
1075  * @unlocked: unlocked invalidation during MM callback
1076  * @flush_tlb: trigger tlb invalidation after update completed
1077  * @allow_override: change MTYPE for local NUMA nodes
1078  * @sync: fences we need to sync to
1079  * @start: start of mapped range
1080  * @last: last mapped entry
1081  * @flags: flags for the entries
1082  * @offset: offset into nodes and pages_addr
1083  * @vram_base: base for vram mappings
1084  * @res: ttm_resource to map
1085  * @pages_addr: DMA addresses to use for mapping
1086  * @fence: optional resulting fence
1087  *
1088  * Fill in the page table entries between @start and @last.
1089  *
1090  * Returns:
1091  * 0 for success, negative erro code for failure.
1092  */
1093 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1094 			   bool immediate, bool unlocked, bool flush_tlb,
1095 			   bool allow_override, struct amdgpu_sync *sync,
1096 			   uint64_t start, uint64_t last, uint64_t flags,
1097 			   uint64_t offset, uint64_t vram_base,
1098 			   struct ttm_resource *res, dma_addr_t *pages_addr,
1099 			   struct dma_fence **fence)
1100 {
1101 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1102 	struct amdgpu_vm_update_params params;
1103 	struct amdgpu_res_cursor cursor;
1104 	int r, idx;
1105 
1106 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
1107 		return -ENODEV;
1108 
1109 	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
1110 	if (!tlb_cb) {
1111 		drm_dev_exit(idx);
1112 		return -ENOMEM;
1113 	}
1114 
1115 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
1116 	 * heavy-weight flush TLB unconditionally.
1117 	 */
1118 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
1119 		     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
1120 
1121 	/*
1122 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
1123 	 */
1124 	flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
1125 
1126 	memset(&params, 0, sizeof(params));
1127 	params.adev = adev;
1128 	params.vm = vm;
1129 	params.immediate = immediate;
1130 	params.pages_addr = pages_addr;
1131 	params.unlocked = unlocked;
1132 	params.needs_flush = flush_tlb;
1133 	params.allow_override = allow_override;
1134 	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
1135 
1136 	amdgpu_vm_eviction_lock(vm);
1137 	if (vm->evicting) {
1138 		r = -EBUSY;
1139 		goto error_free;
1140 	}
1141 
1142 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1143 		struct dma_fence *tmp = dma_fence_get_stub();
1144 
1145 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1146 		swap(vm->last_unlocked, tmp);
1147 		dma_fence_put(tmp);
1148 	}
1149 
1150 	r = vm->update_funcs->prepare(&params, sync,
1151 				      AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
1152 	if (r)
1153 		goto error_free;
1154 
1155 	amdgpu_res_first(pages_addr ? NULL : res, offset,
1156 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1157 	while (cursor.remaining) {
1158 		uint64_t tmp, num_entries, addr;
1159 
1160 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1161 		if (pages_addr) {
1162 			bool contiguous = true;
1163 
1164 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1165 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
1166 				uint64_t count;
1167 
1168 				contiguous = pages_addr[pfn + 1] ==
1169 					pages_addr[pfn] + PAGE_SIZE;
1170 
1171 				tmp = num_entries /
1172 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1173 				for (count = 2; count < tmp; ++count) {
1174 					uint64_t idx = pfn + count;
1175 
1176 					if (contiguous != (pages_addr[idx] ==
1177 					    pages_addr[idx - 1] + PAGE_SIZE))
1178 						break;
1179 				}
1180 				if (!contiguous)
1181 					count--;
1182 				num_entries = count *
1183 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1184 			}
1185 
1186 			if (!contiguous) {
1187 				addr = cursor.start;
1188 				params.pages_addr = pages_addr;
1189 			} else {
1190 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1191 				params.pages_addr = NULL;
1192 			}
1193 
1194 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1195 			addr = vram_base + cursor.start;
1196 		} else {
1197 			addr = 0;
1198 		}
1199 
1200 		tmp = start + num_entries;
1201 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1202 		if (r)
1203 			goto error_free;
1204 
1205 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1206 		start = tmp;
1207 	}
1208 
1209 	r = vm->update_funcs->commit(&params, fence);
1210 	if (r)
1211 		goto error_free;
1212 
1213 	if (params.needs_flush) {
1214 		amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
1215 		tlb_cb = NULL;
1216 	}
1217 
1218 	amdgpu_vm_pt_free_list(adev, &params);
1219 
1220 error_free:
1221 	kfree(tlb_cb);
1222 	amdgpu_vm_eviction_unlock(vm);
1223 	drm_dev_exit(idx);
1224 	return r;
1225 }
1226 
1227 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1228 			  struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
1229 {
1230 	spin_lock(&vm->status_lock);
1231 	memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
1232 	spin_unlock(&vm->status_lock);
1233 }
1234 
1235 /**
1236  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1237  *
1238  * @adev: amdgpu_device pointer
1239  * @bo_va: requested BO and VM object
1240  * @clear: if true clear the entries
1241  *
1242  * Fill in the page table entries for @bo_va.
1243  *
1244  * Returns:
1245  * 0 for success, -EINVAL for failure.
1246  */
1247 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1248 			bool clear)
1249 {
1250 	struct amdgpu_bo *bo = bo_va->base.bo;
1251 	struct amdgpu_vm *vm = bo_va->base.vm;
1252 	struct amdgpu_bo_va_mapping *mapping;
1253 	struct dma_fence **last_update;
1254 	dma_addr_t *pages_addr = NULL;
1255 	struct ttm_resource *mem;
1256 	struct amdgpu_sync sync;
1257 	bool flush_tlb = clear;
1258 	uint64_t vram_base;
1259 	uint64_t flags;
1260 	bool uncached;
1261 	int r;
1262 
1263 	amdgpu_sync_create(&sync);
1264 	if (clear) {
1265 		mem = NULL;
1266 
1267 		/* Implicitly sync to command submissions in the same VM before
1268 		 * unmapping.
1269 		 */
1270 		r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1271 				     AMDGPU_SYNC_EQ_OWNER, vm);
1272 		if (r)
1273 			goto error_free;
1274 		if (bo) {
1275 			r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
1276 			if (r)
1277 				goto error_free;
1278 		}
1279 	} else if (!bo) {
1280 		mem = NULL;
1281 
1282 		/* PRT map operations don't need to sync to anything. */
1283 
1284 	} else {
1285 		struct drm_gem_object *obj = &bo->tbo.base;
1286 
1287 		if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
1288 			struct dma_buf *dma_buf = obj->dma_buf;
1289 			struct drm_gem_object *gobj = dma_buf->priv;
1290 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1291 
1292 			if (abo->tbo.resource &&
1293 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1294 				bo = gem_to_amdgpu_bo(gobj);
1295 		}
1296 		mem = bo->tbo.resource;
1297 		if (mem && (mem->mem_type == TTM_PL_TT ||
1298 			    mem->mem_type == AMDGPU_PL_PREEMPT))
1299 			pages_addr = bo->tbo.ttm->dma_address;
1300 
1301 		/* Implicitly sync to moving fences before mapping anything */
1302 		r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1303 				     AMDGPU_SYNC_EXPLICIT, vm);
1304 		if (r)
1305 			goto error_free;
1306 	}
1307 
1308 	if (bo) {
1309 		struct amdgpu_device *bo_adev;
1310 
1311 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1312 
1313 		if (amdgpu_bo_encrypted(bo))
1314 			flags |= AMDGPU_PTE_TMZ;
1315 
1316 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1317 		vram_base = bo_adev->vm_manager.vram_base_offset;
1318 		uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1319 	} else {
1320 		flags = 0x0;
1321 		vram_base = 0;
1322 		uncached = false;
1323 	}
1324 
1325 	if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1326 		last_update = &vm->last_update;
1327 	else
1328 		last_update = &bo_va->last_pt_update;
1329 
1330 	if (!clear && bo_va->base.moved) {
1331 		flush_tlb = true;
1332 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1333 
1334 	} else if (bo_va->cleared != clear) {
1335 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1336 	}
1337 
1338 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1339 		uint64_t update_flags = flags;
1340 
1341 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1342 		 * but in case of something, we filter the flags in first place
1343 		 */
1344 		if (!(mapping->flags & AMDGPU_PTE_READABLE))
1345 			update_flags &= ~AMDGPU_PTE_READABLE;
1346 		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1347 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1348 
1349 		/* Apply ASIC specific mapping flags */
1350 		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1351 
1352 		trace_amdgpu_vm_bo_update(mapping);
1353 
1354 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1355 					   !uncached, &sync, mapping->start,
1356 					   mapping->last, update_flags,
1357 					   mapping->offset, vram_base, mem,
1358 					   pages_addr, last_update);
1359 		if (r)
1360 			goto error_free;
1361 	}
1362 
1363 	/* If the BO is not in its preferred location add it back to
1364 	 * the evicted list so that it gets validated again on the
1365 	 * next command submission.
1366 	 */
1367 	if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1368 		if (bo->tbo.resource &&
1369 		    !(bo->preferred_domains &
1370 		      amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1371 			amdgpu_vm_bo_evicted(&bo_va->base);
1372 		else
1373 			amdgpu_vm_bo_idle(&bo_va->base);
1374 	} else {
1375 		amdgpu_vm_bo_done(&bo_va->base);
1376 	}
1377 
1378 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1379 	bo_va->cleared = clear;
1380 	bo_va->base.moved = false;
1381 
1382 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1383 		list_for_each_entry(mapping, &bo_va->valids, list)
1384 			trace_amdgpu_vm_bo_mapping(mapping);
1385 	}
1386 
1387 error_free:
1388 	amdgpu_sync_free(&sync);
1389 	return r;
1390 }
1391 
1392 /**
1393  * amdgpu_vm_update_prt_state - update the global PRT state
1394  *
1395  * @adev: amdgpu_device pointer
1396  */
1397 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1398 {
1399 	unsigned long flags;
1400 	bool enable;
1401 
1402 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1403 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1404 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1405 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1406 }
1407 
1408 /**
1409  * amdgpu_vm_prt_get - add a PRT user
1410  *
1411  * @adev: amdgpu_device pointer
1412  */
1413 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1414 {
1415 	if (!adev->gmc.gmc_funcs->set_prt)
1416 		return;
1417 
1418 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1419 		amdgpu_vm_update_prt_state(adev);
1420 }
1421 
1422 /**
1423  * amdgpu_vm_prt_put - drop a PRT user
1424  *
1425  * @adev: amdgpu_device pointer
1426  */
1427 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1428 {
1429 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1430 		amdgpu_vm_update_prt_state(adev);
1431 }
1432 
1433 /**
1434  * amdgpu_vm_prt_cb - callback for updating the PRT status
1435  *
1436  * @fence: fence for the callback
1437  * @_cb: the callback function
1438  */
1439 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1440 {
1441 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1442 
1443 	amdgpu_vm_prt_put(cb->adev);
1444 	kfree(cb);
1445 }
1446 
1447 /**
1448  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1449  *
1450  * @adev: amdgpu_device pointer
1451  * @fence: fence for the callback
1452  */
1453 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1454 				 struct dma_fence *fence)
1455 {
1456 	struct amdgpu_prt_cb *cb;
1457 
1458 	if (!adev->gmc.gmc_funcs->set_prt)
1459 		return;
1460 
1461 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1462 	if (!cb) {
1463 		/* Last resort when we are OOM */
1464 		if (fence)
1465 			dma_fence_wait(fence, false);
1466 
1467 		amdgpu_vm_prt_put(adev);
1468 	} else {
1469 		cb->adev = adev;
1470 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1471 						     amdgpu_vm_prt_cb))
1472 			amdgpu_vm_prt_cb(fence, &cb->cb);
1473 	}
1474 }
1475 
1476 /**
1477  * amdgpu_vm_free_mapping - free a mapping
1478  *
1479  * @adev: amdgpu_device pointer
1480  * @vm: requested vm
1481  * @mapping: mapping to be freed
1482  * @fence: fence of the unmap operation
1483  *
1484  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1485  */
1486 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1487 				   struct amdgpu_vm *vm,
1488 				   struct amdgpu_bo_va_mapping *mapping,
1489 				   struct dma_fence *fence)
1490 {
1491 	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1492 		amdgpu_vm_add_prt_cb(adev, fence);
1493 	kfree(mapping);
1494 }
1495 
1496 /**
1497  * amdgpu_vm_prt_fini - finish all prt mappings
1498  *
1499  * @adev: amdgpu_device pointer
1500  * @vm: requested vm
1501  *
1502  * Register a cleanup callback to disable PRT support after VM dies.
1503  */
1504 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1505 {
1506 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1507 	struct dma_resv_iter cursor;
1508 	struct dma_fence *fence;
1509 
1510 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1511 		/* Add a callback for each fence in the reservation object */
1512 		amdgpu_vm_prt_get(adev);
1513 		amdgpu_vm_add_prt_cb(adev, fence);
1514 	}
1515 }
1516 
1517 /**
1518  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1519  *
1520  * @adev: amdgpu_device pointer
1521  * @vm: requested vm
1522  * @fence: optional resulting fence (unchanged if no work needed to be done
1523  * or if an error occurred)
1524  *
1525  * Make sure all freed BOs are cleared in the PT.
1526  * PTs have to be reserved and mutex must be locked!
1527  *
1528  * Returns:
1529  * 0 for success.
1530  *
1531  */
1532 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1533 			  struct amdgpu_vm *vm,
1534 			  struct dma_fence **fence)
1535 {
1536 	struct amdgpu_bo_va_mapping *mapping;
1537 	struct dma_fence *f = NULL;
1538 	struct amdgpu_sync sync;
1539 	int r;
1540 
1541 
1542 	/*
1543 	 * Implicitly sync to command submissions in the same VM before
1544 	 * unmapping.
1545 	 */
1546 	amdgpu_sync_create(&sync);
1547 	r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1548 			     AMDGPU_SYNC_EQ_OWNER, vm);
1549 	if (r)
1550 		goto error_free;
1551 
1552 	while (!list_empty(&vm->freed)) {
1553 		mapping = list_first_entry(&vm->freed,
1554 			struct amdgpu_bo_va_mapping, list);
1555 		list_del(&mapping->list);
1556 
1557 		r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1558 					   &sync, mapping->start, mapping->last,
1559 					   0, 0, 0, NULL, NULL, &f);
1560 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1561 		if (r) {
1562 			dma_fence_put(f);
1563 			goto error_free;
1564 		}
1565 	}
1566 
1567 	if (fence && f) {
1568 		dma_fence_put(*fence);
1569 		*fence = f;
1570 	} else {
1571 		dma_fence_put(f);
1572 	}
1573 
1574 error_free:
1575 	amdgpu_sync_free(&sync);
1576 	return r;
1577 
1578 }
1579 
1580 /**
1581  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1582  *
1583  * @adev: amdgpu_device pointer
1584  * @vm: requested vm
1585  * @ticket: optional reservation ticket used to reserve the VM
1586  *
1587  * Make sure all BOs which are moved are updated in the PTs.
1588  *
1589  * Returns:
1590  * 0 for success.
1591  *
1592  * PTs have to be reserved!
1593  */
1594 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1595 			   struct amdgpu_vm *vm,
1596 			   struct ww_acquire_ctx *ticket)
1597 {
1598 	struct amdgpu_bo_va *bo_va;
1599 	struct dma_resv *resv;
1600 	bool clear, unlock;
1601 	int r;
1602 
1603 	spin_lock(&vm->status_lock);
1604 	while (!list_empty(&vm->moved)) {
1605 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1606 					 base.vm_status);
1607 		spin_unlock(&vm->status_lock);
1608 
1609 		/* Per VM BOs never need to bo cleared in the page tables */
1610 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1611 		if (r)
1612 			return r;
1613 		spin_lock(&vm->status_lock);
1614 	}
1615 
1616 	while (!list_empty(&vm->invalidated)) {
1617 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1618 					 base.vm_status);
1619 		resv = bo_va->base.bo->tbo.base.resv;
1620 		spin_unlock(&vm->status_lock);
1621 
1622 		/* Try to reserve the BO to avoid clearing its ptes */
1623 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
1624 			clear = false;
1625 			unlock = true;
1626 		/* The caller is already holding the reservation lock */
1627 		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1628 			clear = false;
1629 			unlock = false;
1630 		/* Somebody else is using the BO right now */
1631 		} else {
1632 			clear = true;
1633 			unlock = false;
1634 		}
1635 
1636 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1637 
1638 		if (unlock)
1639 			dma_resv_unlock(resv);
1640 		if (r)
1641 			return r;
1642 
1643 		/* Remember evicted DMABuf imports in compute VMs for later
1644 		 * validation
1645 		 */
1646 		if (vm->is_compute_context &&
1647 		    drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
1648 		    (!bo_va->base.bo->tbo.resource ||
1649 		     bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1650 			amdgpu_vm_bo_evicted_user(&bo_va->base);
1651 
1652 		spin_lock(&vm->status_lock);
1653 	}
1654 	spin_unlock(&vm->status_lock);
1655 
1656 	return 0;
1657 }
1658 
1659 /**
1660  * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1661  *
1662  * @adev: amdgpu_device pointer
1663  * @vm: requested vm
1664  * @flush_type: flush type
1665  * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1666  *
1667  * Flush TLB if needed for a compute VM.
1668  *
1669  * Returns:
1670  * 0 for success.
1671  */
1672 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1673 				struct amdgpu_vm *vm,
1674 				uint32_t flush_type,
1675 				uint32_t xcc_mask)
1676 {
1677 	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1678 	bool all_hub = false;
1679 	int xcc = 0, r = 0;
1680 
1681 	WARN_ON_ONCE(!vm->is_compute_context);
1682 
1683 	/*
1684 	 * It can be that we race and lose here, but that is extremely unlikely
1685 	 * and the worst thing which could happen is that we flush the changes
1686 	 * into the TLB once more which is harmless.
1687 	 */
1688 	if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1689 		return 0;
1690 
1691 	if (adev->family == AMDGPU_FAMILY_AI ||
1692 	    adev->family == AMDGPU_FAMILY_RV)
1693 		all_hub = true;
1694 
1695 	for_each_inst(xcc, xcc_mask) {
1696 		r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1697 						   all_hub, xcc);
1698 		if (r)
1699 			break;
1700 	}
1701 	return r;
1702 }
1703 
1704 /**
1705  * amdgpu_vm_bo_add - add a bo to a specific vm
1706  *
1707  * @adev: amdgpu_device pointer
1708  * @vm: requested vm
1709  * @bo: amdgpu buffer object
1710  *
1711  * Add @bo into the requested vm.
1712  * Add @bo to the list of bos associated with the vm
1713  *
1714  * Returns:
1715  * Newly added bo_va or NULL for failure
1716  *
1717  * Object has to be reserved!
1718  */
1719 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1720 				      struct amdgpu_vm *vm,
1721 				      struct amdgpu_bo *bo)
1722 {
1723 	struct amdgpu_bo_va *bo_va;
1724 
1725 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1726 	if (bo_va == NULL) {
1727 		return NULL;
1728 	}
1729 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1730 
1731 	bo_va->ref_count = 1;
1732 	bo_va->last_pt_update = dma_fence_get_stub();
1733 	INIT_LIST_HEAD(&bo_va->valids);
1734 	INIT_LIST_HEAD(&bo_va->invalids);
1735 
1736 	if (!bo)
1737 		return bo_va;
1738 
1739 	dma_resv_assert_held(bo->tbo.base.resv);
1740 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1741 		bo_va->is_xgmi = true;
1742 		/* Power up XGMI if it can be potentially used */
1743 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1744 	}
1745 
1746 	return bo_va;
1747 }
1748 
1749 
1750 /**
1751  * amdgpu_vm_bo_insert_map - insert a new mapping
1752  *
1753  * @adev: amdgpu_device pointer
1754  * @bo_va: bo_va to store the address
1755  * @mapping: the mapping to insert
1756  *
1757  * Insert a new mapping into all structures.
1758  */
1759 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1760 				    struct amdgpu_bo_va *bo_va,
1761 				    struct amdgpu_bo_va_mapping *mapping)
1762 {
1763 	struct amdgpu_vm *vm = bo_va->base.vm;
1764 	struct amdgpu_bo *bo = bo_va->base.bo;
1765 
1766 	mapping->bo_va = bo_va;
1767 	list_add(&mapping->list, &bo_va->invalids);
1768 	amdgpu_vm_it_insert(mapping, &vm->va);
1769 
1770 	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1771 		amdgpu_vm_prt_get(adev);
1772 
1773 	if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1774 		amdgpu_vm_bo_moved(&bo_va->base);
1775 
1776 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1777 }
1778 
1779 /* Validate operation parameters to prevent potential abuse */
1780 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1781 					  struct amdgpu_bo *bo,
1782 					  uint64_t saddr,
1783 					  uint64_t offset,
1784 					  uint64_t size)
1785 {
1786 	uint64_t tmp, lpfn;
1787 
1788 	if (saddr & AMDGPU_GPU_PAGE_MASK
1789 	    || offset & AMDGPU_GPU_PAGE_MASK
1790 	    || size & AMDGPU_GPU_PAGE_MASK)
1791 		return -EINVAL;
1792 
1793 	if (check_add_overflow(saddr, size, &tmp)
1794 	    || check_add_overflow(offset, size, &tmp)
1795 	    || size == 0 /* which also leads to end < begin */)
1796 		return -EINVAL;
1797 
1798 	/* make sure object fit at this offset */
1799 	if (bo && offset + size > amdgpu_bo_size(bo))
1800 		return -EINVAL;
1801 
1802 	/* Ensure last pfn not exceed max_pfn */
1803 	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1804 	if (lpfn >= adev->vm_manager.max_pfn)
1805 		return -EINVAL;
1806 
1807 	return 0;
1808 }
1809 
1810 /**
1811  * amdgpu_vm_bo_map - map bo inside a vm
1812  *
1813  * @adev: amdgpu_device pointer
1814  * @bo_va: bo_va to store the address
1815  * @saddr: where to map the BO
1816  * @offset: requested offset in the BO
1817  * @size: BO size in bytes
1818  * @flags: attributes of pages (read/write/valid/etc.)
1819  *
1820  * Add a mapping of the BO at the specefied addr into the VM.
1821  *
1822  * Returns:
1823  * 0 for success, error for failure.
1824  *
1825  * Object has to be reserved and unreserved outside!
1826  */
1827 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1828 		     struct amdgpu_bo_va *bo_va,
1829 		     uint64_t saddr, uint64_t offset,
1830 		     uint64_t size, uint64_t flags)
1831 {
1832 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1833 	struct amdgpu_bo *bo = bo_va->base.bo;
1834 	struct amdgpu_vm *vm = bo_va->base.vm;
1835 	uint64_t eaddr;
1836 	int r;
1837 
1838 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1839 	if (r)
1840 		return r;
1841 
1842 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1843 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1844 
1845 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1846 	if (tmp) {
1847 		/* bo and tmp overlap, invalid addr */
1848 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1849 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1850 			tmp->start, tmp->last + 1);
1851 		return -EINVAL;
1852 	}
1853 
1854 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1855 	if (!mapping)
1856 		return -ENOMEM;
1857 
1858 	mapping->start = saddr;
1859 	mapping->last = eaddr;
1860 	mapping->offset = offset;
1861 	mapping->flags = flags;
1862 
1863 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1864 
1865 	return 0;
1866 }
1867 
1868 /**
1869  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1870  *
1871  * @adev: amdgpu_device pointer
1872  * @bo_va: bo_va to store the address
1873  * @saddr: where to map the BO
1874  * @offset: requested offset in the BO
1875  * @size: BO size in bytes
1876  * @flags: attributes of pages (read/write/valid/etc.)
1877  *
1878  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1879  * mappings as we do so.
1880  *
1881  * Returns:
1882  * 0 for success, error for failure.
1883  *
1884  * Object has to be reserved and unreserved outside!
1885  */
1886 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1887 			     struct amdgpu_bo_va *bo_va,
1888 			     uint64_t saddr, uint64_t offset,
1889 			     uint64_t size, uint64_t flags)
1890 {
1891 	struct amdgpu_bo_va_mapping *mapping;
1892 	struct amdgpu_bo *bo = bo_va->base.bo;
1893 	uint64_t eaddr;
1894 	int r;
1895 
1896 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1897 	if (r)
1898 		return r;
1899 
1900 	/* Allocate all the needed memory */
1901 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1902 	if (!mapping)
1903 		return -ENOMEM;
1904 
1905 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1906 	if (r) {
1907 		kfree(mapping);
1908 		return r;
1909 	}
1910 
1911 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1912 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1913 
1914 	mapping->start = saddr;
1915 	mapping->last = eaddr;
1916 	mapping->offset = offset;
1917 	mapping->flags = flags;
1918 
1919 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1920 
1921 	return 0;
1922 }
1923 
1924 /**
1925  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1926  *
1927  * @adev: amdgpu_device pointer
1928  * @bo_va: bo_va to remove the address from
1929  * @saddr: where to the BO is mapped
1930  *
1931  * Remove a mapping of the BO at the specefied addr from the VM.
1932  *
1933  * Returns:
1934  * 0 for success, error for failure.
1935  *
1936  * Object has to be reserved and unreserved outside!
1937  */
1938 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1939 		       struct amdgpu_bo_va *bo_va,
1940 		       uint64_t saddr)
1941 {
1942 	struct amdgpu_bo_va_mapping *mapping;
1943 	struct amdgpu_vm *vm = bo_va->base.vm;
1944 	bool valid = true;
1945 
1946 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1947 
1948 	list_for_each_entry(mapping, &bo_va->valids, list) {
1949 		if (mapping->start == saddr)
1950 			break;
1951 	}
1952 
1953 	if (&mapping->list == &bo_va->valids) {
1954 		valid = false;
1955 
1956 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1957 			if (mapping->start == saddr)
1958 				break;
1959 		}
1960 
1961 		if (&mapping->list == &bo_va->invalids)
1962 			return -ENOENT;
1963 	}
1964 
1965 	list_del(&mapping->list);
1966 	amdgpu_vm_it_remove(mapping, &vm->va);
1967 	mapping->bo_va = NULL;
1968 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1969 
1970 	if (valid)
1971 		list_add(&mapping->list, &vm->freed);
1972 	else
1973 		amdgpu_vm_free_mapping(adev, vm, mapping,
1974 				       bo_va->last_pt_update);
1975 
1976 	return 0;
1977 }
1978 
1979 /**
1980  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1981  *
1982  * @adev: amdgpu_device pointer
1983  * @vm: VM structure to use
1984  * @saddr: start of the range
1985  * @size: size of the range
1986  *
1987  * Remove all mappings in a range, split them as appropriate.
1988  *
1989  * Returns:
1990  * 0 for success, error for failure.
1991  */
1992 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1993 				struct amdgpu_vm *vm,
1994 				uint64_t saddr, uint64_t size)
1995 {
1996 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1997 	LIST_HEAD(removed);
1998 	uint64_t eaddr;
1999 	int r;
2000 
2001 	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
2002 	if (r)
2003 		return r;
2004 
2005 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2006 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
2007 
2008 	/* Allocate all the needed memory */
2009 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2010 	if (!before)
2011 		return -ENOMEM;
2012 	INIT_LIST_HEAD(&before->list);
2013 
2014 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2015 	if (!after) {
2016 		kfree(before);
2017 		return -ENOMEM;
2018 	}
2019 	INIT_LIST_HEAD(&after->list);
2020 
2021 	/* Now gather all removed mappings */
2022 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2023 	while (tmp) {
2024 		/* Remember mapping split at the start */
2025 		if (tmp->start < saddr) {
2026 			before->start = tmp->start;
2027 			before->last = saddr - 1;
2028 			before->offset = tmp->offset;
2029 			before->flags = tmp->flags;
2030 			before->bo_va = tmp->bo_va;
2031 			list_add(&before->list, &tmp->bo_va->invalids);
2032 		}
2033 
2034 		/* Remember mapping split at the end */
2035 		if (tmp->last > eaddr) {
2036 			after->start = eaddr + 1;
2037 			after->last = tmp->last;
2038 			after->offset = tmp->offset;
2039 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2040 			after->flags = tmp->flags;
2041 			after->bo_va = tmp->bo_va;
2042 			list_add(&after->list, &tmp->bo_va->invalids);
2043 		}
2044 
2045 		list_del(&tmp->list);
2046 		list_add(&tmp->list, &removed);
2047 
2048 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2049 	}
2050 
2051 	/* And free them up */
2052 	list_for_each_entry_safe(tmp, next, &removed, list) {
2053 		amdgpu_vm_it_remove(tmp, &vm->va);
2054 		list_del(&tmp->list);
2055 
2056 		if (tmp->start < saddr)
2057 		    tmp->start = saddr;
2058 		if (tmp->last > eaddr)
2059 		    tmp->last = eaddr;
2060 
2061 		tmp->bo_va = NULL;
2062 		list_add(&tmp->list, &vm->freed);
2063 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2064 	}
2065 
2066 	/* Insert partial mapping before the range */
2067 	if (!list_empty(&before->list)) {
2068 		struct amdgpu_bo *bo = before->bo_va->base.bo;
2069 
2070 		amdgpu_vm_it_insert(before, &vm->va);
2071 		if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
2072 			amdgpu_vm_prt_get(adev);
2073 
2074 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2075 		    !before->bo_va->base.moved)
2076 			amdgpu_vm_bo_moved(&before->bo_va->base);
2077 	} else {
2078 		kfree(before);
2079 	}
2080 
2081 	/* Insert partial mapping after the range */
2082 	if (!list_empty(&after->list)) {
2083 		struct amdgpu_bo *bo = after->bo_va->base.bo;
2084 
2085 		amdgpu_vm_it_insert(after, &vm->va);
2086 		if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
2087 			amdgpu_vm_prt_get(adev);
2088 
2089 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2090 		    !after->bo_va->base.moved)
2091 			amdgpu_vm_bo_moved(&after->bo_va->base);
2092 	} else {
2093 		kfree(after);
2094 	}
2095 
2096 	return 0;
2097 }
2098 
2099 /**
2100  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2101  *
2102  * @vm: the requested VM
2103  * @addr: the address
2104  *
2105  * Find a mapping by it's address.
2106  *
2107  * Returns:
2108  * The amdgpu_bo_va_mapping matching for addr or NULL
2109  *
2110  */
2111 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2112 							 uint64_t addr)
2113 {
2114 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2115 }
2116 
2117 /**
2118  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2119  *
2120  * @vm: the requested vm
2121  * @ticket: CS ticket
2122  *
2123  * Trace all mappings of BOs reserved during a command submission.
2124  */
2125 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2126 {
2127 	struct amdgpu_bo_va_mapping *mapping;
2128 
2129 	if (!trace_amdgpu_vm_bo_cs_enabled())
2130 		return;
2131 
2132 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2133 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2134 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2135 			struct amdgpu_bo *bo;
2136 
2137 			bo = mapping->bo_va->base.bo;
2138 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2139 			    ticket)
2140 				continue;
2141 		}
2142 
2143 		trace_amdgpu_vm_bo_cs(mapping);
2144 	}
2145 }
2146 
2147 /**
2148  * amdgpu_vm_bo_del - remove a bo from a specific vm
2149  *
2150  * @adev: amdgpu_device pointer
2151  * @bo_va: requested bo_va
2152  *
2153  * Remove @bo_va->bo from the requested vm.
2154  *
2155  * Object have to be reserved!
2156  */
2157 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2158 		      struct amdgpu_bo_va *bo_va)
2159 {
2160 	struct amdgpu_bo_va_mapping *mapping, *next;
2161 	struct amdgpu_bo *bo = bo_va->base.bo;
2162 	struct amdgpu_vm *vm = bo_va->base.vm;
2163 	struct amdgpu_vm_bo_base **base;
2164 
2165 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2166 
2167 	if (bo) {
2168 		dma_resv_assert_held(bo->tbo.base.resv);
2169 		if (amdgpu_vm_is_bo_always_valid(vm, bo))
2170 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
2171 
2172 		for (base = &bo_va->base.bo->vm_bo; *base;
2173 		     base = &(*base)->next) {
2174 			if (*base != &bo_va->base)
2175 				continue;
2176 
2177 			amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
2178 			*base = bo_va->base.next;
2179 			break;
2180 		}
2181 	}
2182 
2183 	spin_lock(&vm->status_lock);
2184 	list_del(&bo_va->base.vm_status);
2185 	spin_unlock(&vm->status_lock);
2186 
2187 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2188 		list_del(&mapping->list);
2189 		amdgpu_vm_it_remove(mapping, &vm->va);
2190 		mapping->bo_va = NULL;
2191 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2192 		list_add(&mapping->list, &vm->freed);
2193 	}
2194 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2195 		list_del(&mapping->list);
2196 		amdgpu_vm_it_remove(mapping, &vm->va);
2197 		amdgpu_vm_free_mapping(adev, vm, mapping,
2198 				       bo_va->last_pt_update);
2199 	}
2200 
2201 	dma_fence_put(bo_va->last_pt_update);
2202 
2203 	if (bo && bo_va->is_xgmi)
2204 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2205 
2206 	kfree(bo_va);
2207 }
2208 
2209 /**
2210  * amdgpu_vm_evictable - check if we can evict a VM
2211  *
2212  * @bo: A page table of the VM.
2213  *
2214  * Check if it is possible to evict a VM.
2215  */
2216 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2217 {
2218 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2219 
2220 	/* Page tables of a destroyed VM can go away immediately */
2221 	if (!bo_base || !bo_base->vm)
2222 		return true;
2223 
2224 	/* Don't evict VM page tables while they are busy */
2225 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2226 		return false;
2227 
2228 	/* Try to block ongoing updates */
2229 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2230 		return false;
2231 
2232 	/* Don't evict VM page tables while they are updated */
2233 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2234 		amdgpu_vm_eviction_unlock(bo_base->vm);
2235 		return false;
2236 	}
2237 
2238 	bo_base->vm->evicting = true;
2239 	amdgpu_vm_eviction_unlock(bo_base->vm);
2240 	return true;
2241 }
2242 
2243 /**
2244  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2245  *
2246  * @bo: amdgpu buffer object
2247  * @evicted: is the BO evicted
2248  *
2249  * Mark @bo as invalid.
2250  */
2251 void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
2252 {
2253 	struct amdgpu_vm_bo_base *bo_base;
2254 
2255 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2256 		struct amdgpu_vm *vm = bo_base->vm;
2257 
2258 		if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2259 			amdgpu_vm_bo_evicted(bo_base);
2260 			continue;
2261 		}
2262 
2263 		if (bo_base->moved)
2264 			continue;
2265 		bo_base->moved = true;
2266 
2267 		if (bo->tbo.type == ttm_bo_type_kernel)
2268 			amdgpu_vm_bo_relocated(bo_base);
2269 		else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2270 			amdgpu_vm_bo_moved(bo_base);
2271 		else
2272 			amdgpu_vm_bo_invalidated(bo_base);
2273 	}
2274 }
2275 
2276 /**
2277  * amdgpu_vm_bo_move - handle BO move
2278  *
2279  * @bo: amdgpu buffer object
2280  * @new_mem: the new placement of the BO move
2281  * @evicted: is the BO evicted
2282  *
2283  * Update the memory stats for the new placement and mark @bo as invalid.
2284  */
2285 void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
2286 		       bool evicted)
2287 {
2288 	struct amdgpu_vm_bo_base *bo_base;
2289 
2290 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2291 		struct amdgpu_vm *vm = bo_base->vm;
2292 
2293 		spin_lock(&vm->status_lock);
2294 		amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
2295 		amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
2296 		spin_unlock(&vm->status_lock);
2297 	}
2298 
2299 	amdgpu_vm_bo_invalidate(bo, evicted);
2300 }
2301 
2302 /**
2303  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2304  *
2305  * @vm_size: VM size
2306  *
2307  * Returns:
2308  * VM page table as power of two
2309  */
2310 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2311 {
2312 	/* Total bits covered by PD + PTs */
2313 	unsigned bits = ilog2(vm_size) + 18;
2314 
2315 	/* Make sure the PD is 4K in size up to 8GB address space.
2316 	   Above that split equal between PD and PTs */
2317 	if (vm_size <= 8)
2318 		return (bits - 9);
2319 	else
2320 		return ((bits + 3) / 2);
2321 }
2322 
2323 /**
2324  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2325  *
2326  * @adev: amdgpu_device pointer
2327  * @min_vm_size: the minimum vm size in GB if it's set auto
2328  * @fragment_size_default: Default PTE fragment size
2329  * @max_level: max VMPT level
2330  * @max_bits: max address space size in bits
2331  *
2332  */
2333 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2334 			   uint32_t fragment_size_default, unsigned max_level,
2335 			   unsigned max_bits)
2336 {
2337 	unsigned int max_size = 1 << (max_bits - 30);
2338 	unsigned int vm_size;
2339 	uint64_t tmp;
2340 
2341 	/* adjust vm size first */
2342 	if (amdgpu_vm_size != -1) {
2343 		vm_size = amdgpu_vm_size;
2344 		if (vm_size > max_size) {
2345 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2346 				 amdgpu_vm_size, max_size);
2347 			vm_size = max_size;
2348 		}
2349 	} else {
2350 		struct sysinfo si;
2351 		unsigned int phys_ram_gb;
2352 
2353 		/* Optimal VM size depends on the amount of physical
2354 		 * RAM available. Underlying requirements and
2355 		 * assumptions:
2356 		 *
2357 		 *  - Need to map system memory and VRAM from all GPUs
2358 		 *     - VRAM from other GPUs not known here
2359 		 *     - Assume VRAM <= system memory
2360 		 *  - On GFX8 and older, VM space can be segmented for
2361 		 *    different MTYPEs
2362 		 *  - Need to allow room for fragmentation, guard pages etc.
2363 		 *
2364 		 * This adds up to a rough guess of system memory x3.
2365 		 * Round up to power of two to maximize the available
2366 		 * VM size with the given page table size.
2367 		 */
2368 		si_meminfo(&si);
2369 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2370 			       (1 << 30) - 1) >> 30;
2371 		vm_size = roundup_pow_of_two(
2372 			clamp(phys_ram_gb * 3, min_vm_size, max_size));
2373 	}
2374 
2375 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2376 
2377 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2378 	if (amdgpu_vm_block_size != -1)
2379 		tmp >>= amdgpu_vm_block_size - 9;
2380 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2381 	adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2382 	switch (adev->vm_manager.num_level) {
2383 	case 3:
2384 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2385 		break;
2386 	case 2:
2387 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2388 		break;
2389 	case 1:
2390 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2391 		break;
2392 	default:
2393 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2394 	}
2395 	/* block size depends on vm size and hw setup*/
2396 	if (amdgpu_vm_block_size != -1)
2397 		adev->vm_manager.block_size =
2398 			min((unsigned)amdgpu_vm_block_size, max_bits
2399 			    - AMDGPU_GPU_PAGE_SHIFT
2400 			    - 9 * adev->vm_manager.num_level);
2401 	else if (adev->vm_manager.num_level > 1)
2402 		adev->vm_manager.block_size = 9;
2403 	else
2404 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2405 
2406 	if (amdgpu_vm_fragment_size == -1)
2407 		adev->vm_manager.fragment_size = fragment_size_default;
2408 	else
2409 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2410 
2411 	dev_info(
2412 		adev->dev,
2413 		"vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2414 		vm_size, adev->vm_manager.num_level + 1,
2415 		adev->vm_manager.block_size, adev->vm_manager.fragment_size);
2416 }
2417 
2418 /**
2419  * amdgpu_vm_wait_idle - wait for the VM to become idle
2420  *
2421  * @vm: VM object to wait for
2422  * @timeout: timeout to wait for VM to become idle
2423  */
2424 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2425 {
2426 	timeout = drm_sched_entity_flush(&vm->immediate, timeout);
2427 	if (timeout <= 0)
2428 		return timeout;
2429 
2430 	return drm_sched_entity_flush(&vm->delayed, timeout);
2431 }
2432 
2433 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2434 {
2435 	struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2436 
2437 	kfree(ti);
2438 }
2439 
2440 static inline struct amdgpu_vm *
2441 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2442 {
2443 	struct amdgpu_vm *vm;
2444 	unsigned long flags;
2445 
2446 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2447 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2448 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2449 
2450 	return vm;
2451 }
2452 
2453 /**
2454  * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2455  *
2456  * @task_info: task_info struct under discussion.
2457  *
2458  * frees the vm task_info ptr at the last put
2459  */
2460 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2461 {
2462 	if (task_info)
2463 		kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2464 }
2465 
2466 /**
2467  * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2468  *
2469  * @vm: VM to get info from
2470  *
2471  * Returns the reference counted task_info structure, which must be
2472  * referenced down with amdgpu_vm_put_task_info.
2473  */
2474 struct amdgpu_task_info *
2475 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2476 {
2477 	struct amdgpu_task_info *ti = NULL;
2478 
2479 	if (vm) {
2480 		ti = vm->task_info;
2481 		kref_get(&vm->task_info->refcount);
2482 	}
2483 
2484 	return ti;
2485 }
2486 
2487 /**
2488  * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2489  *
2490  * @adev: drm device pointer
2491  * @pasid: PASID identifier for VM
2492  *
2493  * Returns the reference counted task_info structure, which must be
2494  * referenced down with amdgpu_vm_put_task_info.
2495  */
2496 struct amdgpu_task_info *
2497 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2498 {
2499 	return amdgpu_vm_get_task_info_vm(
2500 			amdgpu_vm_get_vm_from_pasid(adev, pasid));
2501 }
2502 
2503 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2504 {
2505 	vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2506 	if (!vm->task_info)
2507 		return -ENOMEM;
2508 
2509 	kref_init(&vm->task_info->refcount);
2510 	return 0;
2511 }
2512 
2513 /**
2514  * amdgpu_vm_set_task_info - Sets VMs task info.
2515  *
2516  * @vm: vm for which to set the info
2517  */
2518 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2519 {
2520 	if (!vm->task_info)
2521 		return;
2522 
2523 	if (vm->task_info->task.pid == current->pid)
2524 		return;
2525 
2526 	vm->task_info->task.pid = current->pid;
2527 	get_task_comm(vm->task_info->task.comm, current);
2528 
2529 	if (current->group_leader->mm != current->mm)
2530 		return;
2531 
2532 	vm->task_info->tgid = current->group_leader->pid;
2533 	get_task_comm(vm->task_info->process_name, current->group_leader);
2534 }
2535 
2536 /**
2537  * amdgpu_vm_init - initialize a vm instance
2538  *
2539  * @adev: amdgpu_device pointer
2540  * @vm: requested vm
2541  * @xcp_id: GPU partition selection id
2542  *
2543  * Init @vm fields.
2544  *
2545  * Returns:
2546  * 0 for success, error for failure.
2547  */
2548 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2549 		   int32_t xcp_id)
2550 {
2551 	struct amdgpu_bo *root_bo;
2552 	struct amdgpu_bo_vm *root;
2553 	int r, i;
2554 
2555 	vm->va = RB_ROOT_CACHED;
2556 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2557 		vm->reserved_vmid[i] = NULL;
2558 	INIT_LIST_HEAD(&vm->evicted);
2559 	INIT_LIST_HEAD(&vm->evicted_user);
2560 	INIT_LIST_HEAD(&vm->relocated);
2561 	INIT_LIST_HEAD(&vm->moved);
2562 	INIT_LIST_HEAD(&vm->idle);
2563 	INIT_LIST_HEAD(&vm->invalidated);
2564 	spin_lock_init(&vm->status_lock);
2565 	INIT_LIST_HEAD(&vm->freed);
2566 	INIT_LIST_HEAD(&vm->done);
2567 	INIT_KFIFO(vm->faults);
2568 
2569 	r = amdgpu_vm_init_entities(adev, vm);
2570 	if (r)
2571 		return r;
2572 
2573 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2574 
2575 	vm->is_compute_context = false;
2576 
2577 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2578 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2579 
2580 	dev_dbg(adev->dev, "VM update mode is %s\n",
2581 		vm->use_cpu_for_update ? "CPU" : "SDMA");
2582 	WARN_ONCE((vm->use_cpu_for_update &&
2583 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2584 		  "CPU update of VM recommended only for large BAR system\n");
2585 
2586 	if (vm->use_cpu_for_update)
2587 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2588 	else
2589 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2590 
2591 	vm->last_update = dma_fence_get_stub();
2592 	vm->last_unlocked = dma_fence_get_stub();
2593 	vm->last_tlb_flush = dma_fence_get_stub();
2594 	vm->generation = amdgpu_vm_generation(adev, NULL);
2595 
2596 	mutex_init(&vm->eviction_lock);
2597 	vm->evicting = false;
2598 	vm->tlb_fence_context = dma_fence_context_alloc(1);
2599 
2600 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2601 				false, &root, xcp_id);
2602 	if (r)
2603 		goto error_free_delayed;
2604 
2605 	root_bo = amdgpu_bo_ref(&root->bo);
2606 	r = amdgpu_bo_reserve(root_bo, true);
2607 	if (r) {
2608 		amdgpu_bo_unref(&root_bo);
2609 		goto error_free_delayed;
2610 	}
2611 
2612 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2613 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2614 	if (r)
2615 		goto error_free_root;
2616 
2617 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2618 	if (r)
2619 		goto error_free_root;
2620 
2621 	r = amdgpu_vm_create_task_info(vm);
2622 	if (r)
2623 		dev_dbg(adev->dev, "Failed to create task info for VM\n");
2624 
2625 	amdgpu_bo_unreserve(vm->root.bo);
2626 	amdgpu_bo_unref(&root_bo);
2627 
2628 	return 0;
2629 
2630 error_free_root:
2631 	amdgpu_vm_pt_free_root(adev, vm);
2632 	amdgpu_bo_unreserve(vm->root.bo);
2633 	amdgpu_bo_unref(&root_bo);
2634 
2635 error_free_delayed:
2636 	dma_fence_put(vm->last_tlb_flush);
2637 	dma_fence_put(vm->last_unlocked);
2638 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2639 	amdgpu_vm_fini_entities(vm);
2640 
2641 	return r;
2642 }
2643 
2644 /**
2645  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2646  *
2647  * @adev: amdgpu_device pointer
2648  * @vm: requested vm
2649  *
2650  * This only works on GFX VMs that don't have any BOs added and no
2651  * page tables allocated yet.
2652  *
2653  * Changes the following VM parameters:
2654  * - use_cpu_for_update
2655  * - pte_supports_ats
2656  *
2657  * Reinitializes the page directory to reflect the changed ATS
2658  * setting.
2659  *
2660  * Returns:
2661  * 0 for success, -errno for errors.
2662  */
2663 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2664 {
2665 	int r;
2666 
2667 	r = amdgpu_bo_reserve(vm->root.bo, true);
2668 	if (r)
2669 		return r;
2670 
2671 	/* Update VM state */
2672 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2673 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2674 	dev_dbg(adev->dev, "VM update mode is %s\n",
2675 		vm->use_cpu_for_update ? "CPU" : "SDMA");
2676 	WARN_ONCE((vm->use_cpu_for_update &&
2677 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2678 		  "CPU update of VM recommended only for large BAR system\n");
2679 
2680 	if (vm->use_cpu_for_update) {
2681 		/* Sync with last SDMA update/clear before switching to CPU */
2682 		r = amdgpu_bo_sync_wait(vm->root.bo,
2683 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2684 		if (r)
2685 			goto unreserve_bo;
2686 
2687 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2688 		r = amdgpu_vm_pt_map_tables(adev, vm);
2689 		if (r)
2690 			goto unreserve_bo;
2691 
2692 	} else {
2693 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2694 	}
2695 
2696 	dma_fence_put(vm->last_update);
2697 	vm->last_update = dma_fence_get_stub();
2698 	vm->is_compute_context = true;
2699 
2700 unreserve_bo:
2701 	amdgpu_bo_unreserve(vm->root.bo);
2702 	return r;
2703 }
2704 
2705 static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
2706 {
2707 	for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
2708 		if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
2709 		      vm->stats[i].evicted == 0))
2710 			return false;
2711 	}
2712 	return true;
2713 }
2714 
2715 /**
2716  * amdgpu_vm_fini - tear down a vm instance
2717  *
2718  * @adev: amdgpu_device pointer
2719  * @vm: requested vm
2720  *
2721  * Tear down @vm.
2722  * Unbind the VM and remove all bos from the vm bo list
2723  */
2724 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2725 {
2726 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2727 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2728 	struct amdgpu_bo *root;
2729 	unsigned long flags;
2730 	int i;
2731 
2732 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2733 
2734 	root = amdgpu_bo_ref(vm->root.bo);
2735 	amdgpu_bo_reserve(root, true);
2736 	amdgpu_vm_set_pasid(adev, vm, 0);
2737 	dma_fence_wait(vm->last_unlocked, false);
2738 	dma_fence_put(vm->last_unlocked);
2739 	dma_fence_wait(vm->last_tlb_flush, false);
2740 	/* Make sure that all fence callbacks have completed */
2741 	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2742 	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2743 	dma_fence_put(vm->last_tlb_flush);
2744 
2745 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2746 		if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
2747 			amdgpu_vm_prt_fini(adev, vm);
2748 			prt_fini_needed = false;
2749 		}
2750 
2751 		list_del(&mapping->list);
2752 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2753 	}
2754 
2755 	amdgpu_vm_pt_free_root(adev, vm);
2756 	amdgpu_bo_unreserve(root);
2757 	amdgpu_bo_unref(&root);
2758 	WARN_ON(vm->root.bo);
2759 
2760 	amdgpu_vm_fini_entities(vm);
2761 
2762 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2763 		dev_err(adev->dev, "still active bo inside vm\n");
2764 	}
2765 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2766 					     &vm->va.rb_root, rb) {
2767 		/* Don't remove the mapping here, we don't want to trigger a
2768 		 * rebalance and the tree is about to be destroyed anyway.
2769 		 */
2770 		list_del(&mapping->list);
2771 		kfree(mapping);
2772 	}
2773 
2774 	dma_fence_put(vm->last_update);
2775 
2776 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2777 		if (vm->reserved_vmid[i]) {
2778 			amdgpu_vmid_free_reserved(adev, i);
2779 			vm->reserved_vmid[i] = false;
2780 		}
2781 	}
2782 
2783 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2784 
2785 	if (!amdgpu_vm_stats_is_zero(vm)) {
2786 		struct amdgpu_task_info *ti = vm->task_info;
2787 
2788 		dev_warn(adev->dev,
2789 			 "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
2790 			 ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
2791 	}
2792 
2793 	amdgpu_vm_put_task_info(vm->task_info);
2794 }
2795 
2796 /**
2797  * amdgpu_vm_manager_init - init the VM manager
2798  *
2799  * @adev: amdgpu_device pointer
2800  *
2801  * Initialize the VM manager structures
2802  */
2803 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2804 {
2805 	unsigned i;
2806 
2807 	/* Concurrent flushes are only possible starting with Vega10 and
2808 	 * are broken on Navi10 and Navi14.
2809 	 */
2810 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2811 					      adev->asic_type == CHIP_NAVI10 ||
2812 					      adev->asic_type == CHIP_NAVI14);
2813 	amdgpu_vmid_mgr_init(adev);
2814 
2815 	adev->vm_manager.fence_context =
2816 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2817 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2818 		adev->vm_manager.seqno[i] = 0;
2819 
2820 	spin_lock_init(&adev->vm_manager.prt_lock);
2821 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2822 
2823 	/* If not overridden by the user, by default, only in large BAR systems
2824 	 * Compute VM tables will be updated by CPU
2825 	 */
2826 #ifdef CONFIG_X86_64
2827 	if (amdgpu_vm_update_mode == -1) {
2828 		/* For asic with VF MMIO access protection
2829 		 * avoid using CPU for VM table updates
2830 		 */
2831 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2832 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2833 			adev->vm_manager.vm_update_mode =
2834 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2835 		else
2836 			adev->vm_manager.vm_update_mode = 0;
2837 	} else
2838 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2839 #else
2840 	adev->vm_manager.vm_update_mode = 0;
2841 #endif
2842 
2843 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2844 }
2845 
2846 /**
2847  * amdgpu_vm_manager_fini - cleanup VM manager
2848  *
2849  * @adev: amdgpu_device pointer
2850  *
2851  * Cleanup the VM manager and free resources.
2852  */
2853 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2854 {
2855 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2856 	xa_destroy(&adev->vm_manager.pasids);
2857 
2858 	amdgpu_vmid_mgr_fini(adev);
2859 }
2860 
2861 /**
2862  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2863  *
2864  * @dev: drm device pointer
2865  * @data: drm_amdgpu_vm
2866  * @filp: drm file pointer
2867  *
2868  * Returns:
2869  * 0 for success, -errno for errors.
2870  */
2871 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2872 {
2873 	union drm_amdgpu_vm *args = data;
2874 	struct amdgpu_device *adev = drm_to_adev(dev);
2875 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2876 
2877 	/* No valid flags defined yet */
2878 	if (args->in.flags)
2879 		return -EINVAL;
2880 
2881 	switch (args->in.op) {
2882 	case AMDGPU_VM_OP_RESERVE_VMID:
2883 		/* We only have requirement to reserve vmid from gfxhub */
2884 		if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2885 			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2886 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2887 		}
2888 
2889 		break;
2890 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2891 		if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2892 			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2893 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2894 		}
2895 		break;
2896 	default:
2897 		return -EINVAL;
2898 	}
2899 
2900 	return 0;
2901 }
2902 
2903 /**
2904  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2905  * @adev: amdgpu device pointer
2906  * @pasid: PASID of the VM
2907  * @ts: Timestamp of the fault
2908  * @vmid: VMID, only used for GFX 9.4.3.
2909  * @node_id: Node_id received in IH cookie. Only applicable for
2910  *           GFX 9.4.3.
2911  * @addr: Address of the fault
2912  * @write_fault: true is write fault, false is read fault
2913  *
2914  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2915  * shouldn't be reported any more.
2916  */
2917 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2918 			    u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
2919 			    bool write_fault)
2920 {
2921 	bool is_compute_context = false;
2922 	struct amdgpu_bo *root;
2923 	unsigned long irqflags;
2924 	uint64_t value, flags;
2925 	struct amdgpu_vm *vm;
2926 	int r;
2927 
2928 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2929 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2930 	if (vm) {
2931 		root = amdgpu_bo_ref(vm->root.bo);
2932 		is_compute_context = vm->is_compute_context;
2933 	} else {
2934 		root = NULL;
2935 	}
2936 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2937 
2938 	if (!root)
2939 		return false;
2940 
2941 	addr /= AMDGPU_GPU_PAGE_SIZE;
2942 
2943 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2944 	    node_id, addr, ts, write_fault)) {
2945 		amdgpu_bo_unref(&root);
2946 		return true;
2947 	}
2948 
2949 	r = amdgpu_bo_reserve(root, true);
2950 	if (r)
2951 		goto error_unref;
2952 
2953 	/* Double check that the VM still exists */
2954 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2955 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2956 	if (vm && vm->root.bo != root)
2957 		vm = NULL;
2958 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2959 	if (!vm)
2960 		goto error_unlock;
2961 
2962 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2963 		AMDGPU_PTE_SYSTEM;
2964 
2965 	if (is_compute_context) {
2966 		/* Intentionally setting invalid PTE flag
2967 		 * combination to force a no-retry-fault
2968 		 */
2969 		flags = AMDGPU_VM_NORETRY_FLAGS;
2970 		value = 0;
2971 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2972 		/* Redirect the access to the dummy page */
2973 		value = adev->dummy_page_addr;
2974 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2975 			AMDGPU_PTE_WRITEABLE;
2976 
2977 	} else {
2978 		/* Let the hw retry silently on the PTE */
2979 		value = 0;
2980 	}
2981 
2982 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2983 	if (r) {
2984 		pr_debug("failed %d to reserve fence slot\n", r);
2985 		goto error_unlock;
2986 	}
2987 
2988 	r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2989 				   NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
2990 	if (r)
2991 		goto error_unlock;
2992 
2993 	r = amdgpu_vm_update_pdes(adev, vm, true);
2994 
2995 error_unlock:
2996 	amdgpu_bo_unreserve(root);
2997 	if (r < 0)
2998 		dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
2999 
3000 error_unref:
3001 	amdgpu_bo_unref(&root);
3002 
3003 	return false;
3004 }
3005 
3006 #if defined(CONFIG_DEBUG_FS)
3007 /**
3008  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
3009  *
3010  * @vm: Requested VM for printing BO info
3011  * @m: debugfs file
3012  *
3013  * Print BO information in debugfs file for the VM
3014  */
3015 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
3016 {
3017 	struct amdgpu_bo_va *bo_va, *tmp;
3018 	u64 total_idle = 0;
3019 	u64 total_evicted = 0;
3020 	u64 total_relocated = 0;
3021 	u64 total_moved = 0;
3022 	u64 total_invalidated = 0;
3023 	u64 total_done = 0;
3024 	unsigned int total_idle_objs = 0;
3025 	unsigned int total_evicted_objs = 0;
3026 	unsigned int total_relocated_objs = 0;
3027 	unsigned int total_moved_objs = 0;
3028 	unsigned int total_invalidated_objs = 0;
3029 	unsigned int total_done_objs = 0;
3030 	unsigned int id = 0;
3031 
3032 	spin_lock(&vm->status_lock);
3033 	seq_puts(m, "\tIdle BOs:\n");
3034 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3035 		if (!bo_va->base.bo)
3036 			continue;
3037 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3038 	}
3039 	total_idle_objs = id;
3040 	id = 0;
3041 
3042 	seq_puts(m, "\tEvicted BOs:\n");
3043 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3044 		if (!bo_va->base.bo)
3045 			continue;
3046 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3047 	}
3048 	total_evicted_objs = id;
3049 	id = 0;
3050 
3051 	seq_puts(m, "\tRelocated BOs:\n");
3052 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3053 		if (!bo_va->base.bo)
3054 			continue;
3055 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3056 	}
3057 	total_relocated_objs = id;
3058 	id = 0;
3059 
3060 	seq_puts(m, "\tMoved BOs:\n");
3061 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3062 		if (!bo_va->base.bo)
3063 			continue;
3064 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3065 	}
3066 	total_moved_objs = id;
3067 	id = 0;
3068 
3069 	seq_puts(m, "\tInvalidated BOs:\n");
3070 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3071 		if (!bo_va->base.bo)
3072 			continue;
3073 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
3074 	}
3075 	total_invalidated_objs = id;
3076 	id = 0;
3077 
3078 	seq_puts(m, "\tDone BOs:\n");
3079 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3080 		if (!bo_va->base.bo)
3081 			continue;
3082 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3083 	}
3084 	spin_unlock(&vm->status_lock);
3085 	total_done_objs = id;
3086 
3087 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
3088 		   total_idle_objs);
3089 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
3090 		   total_evicted_objs);
3091 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
3092 		   total_relocated_objs);
3093 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
3094 		   total_moved_objs);
3095 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
3096 		   total_invalidated_objs);
3097 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
3098 		   total_done_objs);
3099 }
3100 #endif
3101 
3102 /**
3103  * amdgpu_vm_update_fault_cache - update cached fault into.
3104  * @adev: amdgpu device pointer
3105  * @pasid: PASID of the VM
3106  * @addr: Address of the fault
3107  * @status: GPUVM fault status register
3108  * @vmhub: which vmhub got the fault
3109  *
3110  * Cache the fault info for later use by userspace in debugging.
3111  */
3112 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
3113 				  unsigned int pasid,
3114 				  uint64_t addr,
3115 				  uint32_t status,
3116 				  unsigned int vmhub)
3117 {
3118 	struct amdgpu_vm *vm;
3119 	unsigned long flags;
3120 
3121 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3122 
3123 	vm = xa_load(&adev->vm_manager.pasids, pasid);
3124 	/* Don't update the fault cache if status is 0.  In the multiple
3125 	 * fault case, subsequent faults will return a 0 status which is
3126 	 * useless for userspace and replaces the useful fault status, so
3127 	 * only update if status is non-0.
3128 	 */
3129 	if (vm && status) {
3130 		vm->fault_info.addr = addr;
3131 		vm->fault_info.status = status;
3132 		/*
3133 		 * Update the fault information globally for later usage
3134 		 * when vm could be stale or freed.
3135 		 */
3136 		adev->vm_manager.fault_info.addr = addr;
3137 		adev->vm_manager.fault_info.vmhub = vmhub;
3138 		adev->vm_manager.fault_info.status = status;
3139 
3140 		if (AMDGPU_IS_GFXHUB(vmhub)) {
3141 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3142 			vm->fault_info.vmhub |=
3143 				(vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3144 		} else if (AMDGPU_IS_MMHUB0(vmhub)) {
3145 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3146 			vm->fault_info.vmhub |=
3147 				(vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3148 		} else if (AMDGPU_IS_MMHUB1(vmhub)) {
3149 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3150 			vm->fault_info.vmhub |=
3151 				(vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3152 		} else {
3153 			WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3154 		}
3155 	}
3156 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3157 }
3158 
3159 /**
3160  * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3161  *
3162  * @vm: VM to test against.
3163  * @bo: BO to be tested.
3164  *
3165  * Returns true if the BO shares the dma_resv object with the root PD and is
3166  * always guaranteed to be valid inside the VM.
3167  */
3168 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3169 {
3170 	return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3171 }
3172 
3173 void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
3174 			       struct amdgpu_task_info *task_info)
3175 {
3176 	dev_err(adev->dev,
3177 		" Process %s pid %d thread %s pid %d\n",
3178 		task_info->process_name, task_info->tgid,
3179 		task_info->task.comm, task_info->task.pid);
3180 }
3181