xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c (revision 297fef494d78d00fa563ead08396da6b4ba58172)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_amdkfd.h"
41 #include "amdgpu_gmc.h"
42 #include "amdgpu_xgmi.h"
43 #include "amdgpu_dma_buf.h"
44 #include "amdgpu_res_cursor.h"
45 #include "kfd_svm.h"
46 
47 /**
48  * DOC: GPUVM
49  *
50  * GPUVM is the MMU functionality provided on the GPU.
51  * GPUVM is similar to the legacy GART on older asics, however
52  * rather than there being a single global GART table
53  * for the entire GPU, there can be multiple GPUVM page tables active
54  * at any given time.  The GPUVM page tables can contain a mix
55  * VRAM pages and system pages (both memory and MMIO) and system pages
56  * can be mapped as snooped (cached system pages) or unsnooped
57  * (uncached system pages).
58  *
59  * Each active GPUVM has an ID associated with it and there is a page table
60  * linked with each VMID.  When executing a command buffer,
61  * the kernel tells the engine what VMID to use for that command
62  * buffer.  VMIDs are allocated dynamically as commands are submitted.
63  * The userspace drivers maintain their own address space and the kernel
64  * sets up their pages tables accordingly when they submit their
65  * command buffers and a VMID is assigned.
66  * The hardware supports up to 16 active GPUVMs at any given time.
67  *
68  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
69  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
70  * as other features such as encryption and caching attributes.
71  *
72  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
73  * addition to an aperture managed by a page table, VMID 0 also has
74  * several other apertures.  There is an aperture for direct access to VRAM
75  * and there is a legacy AGP aperture which just forwards accesses directly
76  * to the matching system physical addresses (or IOVAs when an IOMMU is
77  * present).  These apertures provide direct access to these memories without
78  * incurring the overhead of a page table.  VMID 0 is used by the kernel
79  * driver for tasks like memory management.
80  *
81  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
82  * For user applications, each application can have their own unique GPUVM
83  * address space.  The application manages the address space and the kernel
84  * driver manages the GPUVM page tables for each process.  If an GPU client
85  * accesses an invalid page, it will generate a GPU page fault, similar to
86  * accessing an invalid page on a CPU.
87  */
88 
89 #define START(node) ((node)->start)
90 #define LAST(node) ((node)->last)
91 
92 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
93 		     START, LAST, static, amdgpu_vm_it)
94 
95 #undef START
96 #undef LAST
97 
98 /**
99  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
100  */
101 struct amdgpu_prt_cb {
102 
103 	/**
104 	 * @adev: amdgpu device
105 	 */
106 	struct amdgpu_device *adev;
107 
108 	/**
109 	 * @cb: callback
110 	 */
111 	struct dma_fence_cb cb;
112 };
113 
114 /**
115  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
116  */
117 struct amdgpu_vm_tlb_seq_struct {
118 	/**
119 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
120 	 */
121 	struct amdgpu_vm *vm;
122 
123 	/**
124 	 * @cb: callback
125 	 */
126 	struct dma_fence_cb cb;
127 };
128 
129 /**
130  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
131  *
132  * @adev: amdgpu_device pointer
133  * @vm: amdgpu_vm pointer
134  * @pasid: the pasid the VM is using on this GPU
135  *
136  * Set the pasid this VM is using on this GPU, can also be used to remove the
137  * pasid by passing in zero.
138  *
139  */
140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
141 			u32 pasid)
142 {
143 	int r;
144 
145 	if (vm->pasid == pasid)
146 		return 0;
147 
148 	if (vm->pasid) {
149 		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
150 		if (r < 0)
151 			return r;
152 
153 		vm->pasid = 0;
154 	}
155 
156 	if (pasid) {
157 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
158 					GFP_KERNEL));
159 		if (r < 0)
160 			return r;
161 
162 		vm->pasid = pasid;
163 	}
164 
165 
166 	return 0;
167 }
168 
169 /**
170  * amdgpu_vm_bo_evicted - vm_bo is evicted
171  *
172  * @vm_bo: vm_bo which is evicted
173  *
174  * State for PDs/PTs and per VM BOs which are not at the location they should
175  * be.
176  */
177 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
178 {
179 	struct amdgpu_vm *vm = vm_bo->vm;
180 	struct amdgpu_bo *bo = vm_bo->bo;
181 
182 	vm_bo->moved = true;
183 	spin_lock(&vm_bo->vm->status_lock);
184 	if (bo->tbo.type == ttm_bo_type_kernel)
185 		list_move(&vm_bo->vm_status, &vm->evicted);
186 	else
187 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
188 	spin_unlock(&vm_bo->vm->status_lock);
189 }
190 /**
191  * amdgpu_vm_bo_moved - vm_bo is moved
192  *
193  * @vm_bo: vm_bo which is moved
194  *
195  * State for per VM BOs which are moved, but that change is not yet reflected
196  * in the page tables.
197  */
198 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
199 {
200 	spin_lock(&vm_bo->vm->status_lock);
201 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
202 	spin_unlock(&vm_bo->vm->status_lock);
203 }
204 
205 /**
206  * amdgpu_vm_bo_idle - vm_bo is idle
207  *
208  * @vm_bo: vm_bo which is now idle
209  *
210  * State for PDs/PTs and per VM BOs which have gone through the state machine
211  * and are now idle.
212  */
213 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
214 {
215 	spin_lock(&vm_bo->vm->status_lock);
216 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
217 	spin_unlock(&vm_bo->vm->status_lock);
218 	vm_bo->moved = false;
219 }
220 
221 /**
222  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
223  *
224  * @vm_bo: vm_bo which is now invalidated
225  *
226  * State for normal BOs which are invalidated and that change not yet reflected
227  * in the PTs.
228  */
229 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
230 {
231 	spin_lock(&vm_bo->vm->status_lock);
232 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
233 	spin_unlock(&vm_bo->vm->status_lock);
234 }
235 
236 /**
237  * amdgpu_vm_bo_evicted_user - vm_bo is evicted
238  *
239  * @vm_bo: vm_bo which is evicted
240  *
241  * State for BOs used by user mode queues which are not at the location they
242  * should be.
243  */
244 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
245 {
246 	vm_bo->moved = true;
247 	spin_lock(&vm_bo->vm->status_lock);
248 	list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
249 	spin_unlock(&vm_bo->vm->status_lock);
250 }
251 
252 /**
253  * amdgpu_vm_bo_relocated - vm_bo is reloacted
254  *
255  * @vm_bo: vm_bo which is relocated
256  *
257  * State for PDs/PTs which needs to update their parent PD.
258  * For the root PD, just move to idle state.
259  */
260 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
261 {
262 	if (vm_bo->bo->parent) {
263 		spin_lock(&vm_bo->vm->status_lock);
264 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
265 		spin_unlock(&vm_bo->vm->status_lock);
266 	} else {
267 		amdgpu_vm_bo_idle(vm_bo);
268 	}
269 }
270 
271 /**
272  * amdgpu_vm_bo_done - vm_bo is done
273  *
274  * @vm_bo: vm_bo which is now done
275  *
276  * State for normal BOs which are invalidated and that change has been updated
277  * in the PTs.
278  */
279 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
280 {
281 	spin_lock(&vm_bo->vm->status_lock);
282 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
283 	spin_unlock(&vm_bo->vm->status_lock);
284 }
285 
286 /**
287  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
288  * @vm: the VM which state machine to reset
289  *
290  * Move all vm_bo object in the VM into a state where they will be updated
291  * again during validation.
292  */
293 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
294 {
295 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
296 
297 	spin_lock(&vm->status_lock);
298 	list_splice_init(&vm->done, &vm->invalidated);
299 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
300 		vm_bo->moved = true;
301 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
302 		struct amdgpu_bo *bo = vm_bo->bo;
303 
304 		vm_bo->moved = true;
305 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
306 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
307 		else if (bo->parent)
308 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
309 	}
310 	spin_unlock(&vm->status_lock);
311 }
312 
313 /**
314  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
315  *
316  * @base: base structure for tracking BO usage in a VM
317  * @vm: vm to which bo is to be added
318  * @bo: amdgpu buffer object
319  *
320  * Initialize a bo_va_base structure and add it to the appropriate lists
321  *
322  */
323 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
324 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
325 {
326 	base->vm = vm;
327 	base->bo = bo;
328 	base->next = NULL;
329 	INIT_LIST_HEAD(&base->vm_status);
330 
331 	if (!bo)
332 		return;
333 	base->next = bo->vm_bo;
334 	bo->vm_bo = base;
335 
336 	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
337 		return;
338 
339 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
340 
341 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
342 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
343 		amdgpu_vm_bo_relocated(base);
344 	else
345 		amdgpu_vm_bo_idle(base);
346 
347 	if (bo->preferred_domains &
348 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
349 		return;
350 
351 	/*
352 	 * we checked all the prerequisites, but it looks like this per vm bo
353 	 * is currently evicted. add the bo to the evicted list to make sure it
354 	 * is validated on next vm use to avoid fault.
355 	 * */
356 	amdgpu_vm_bo_evicted(base);
357 }
358 
359 /**
360  * amdgpu_vm_lock_pd - lock PD in drm_exec
361  *
362  * @vm: vm providing the BOs
363  * @exec: drm execution context
364  * @num_fences: number of extra fences to reserve
365  *
366  * Lock the VM root PD in the DRM execution context.
367  */
368 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
369 		      unsigned int num_fences)
370 {
371 	/* We need at least two fences for the VM PD/PT updates */
372 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
373 				    2 + num_fences);
374 }
375 
376 /**
377  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
378  *
379  * @adev: amdgpu device pointer
380  * @vm: vm providing the BOs
381  *
382  * Move all BOs to the end of LRU and remember their positions to put them
383  * together.
384  */
385 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
386 				struct amdgpu_vm *vm)
387 {
388 	spin_lock(&adev->mman.bdev.lru_lock);
389 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
390 	spin_unlock(&adev->mman.bdev.lru_lock);
391 }
392 
393 /* Create scheduler entities for page table updates */
394 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
395 				   struct amdgpu_vm *vm)
396 {
397 	int r;
398 
399 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
400 				  adev->vm_manager.vm_pte_scheds,
401 				  adev->vm_manager.vm_pte_num_scheds, NULL);
402 	if (r)
403 		goto error;
404 
405 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
406 				     adev->vm_manager.vm_pte_scheds,
407 				     adev->vm_manager.vm_pte_num_scheds, NULL);
408 
409 error:
410 	drm_sched_entity_destroy(&vm->immediate);
411 	return r;
412 }
413 
414 /* Destroy the entities for page table updates again */
415 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
416 {
417 	drm_sched_entity_destroy(&vm->immediate);
418 	drm_sched_entity_destroy(&vm->delayed);
419 }
420 
421 /**
422  * amdgpu_vm_generation - return the page table re-generation counter
423  * @adev: the amdgpu_device
424  * @vm: optional VM to check, might be NULL
425  *
426  * Returns a page table re-generation token to allow checking if submissions
427  * are still valid to use this VM. The VM parameter might be NULL in which case
428  * just the VRAM lost counter will be used.
429  */
430 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
431 {
432 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
433 
434 	if (!vm)
435 		return result;
436 
437 	result += lower_32_bits(vm->generation);
438 	/* Add one if the page tables will be re-generated on next CS */
439 	if (drm_sched_entity_error(&vm->delayed))
440 		++result;
441 
442 	return result;
443 }
444 
445 /**
446  * amdgpu_vm_validate - validate evicted BOs tracked in the VM
447  *
448  * @adev: amdgpu device pointer
449  * @vm: vm providing the BOs
450  * @ticket: optional reservation ticket used to reserve the VM
451  * @validate: callback to do the validation
452  * @param: parameter for the validation callback
453  *
454  * Validate the page table BOs and per-VM BOs on command submission if
455  * necessary. If a ticket is given, also try to validate evicted user queue
456  * BOs. They must already be reserved with the given ticket.
457  *
458  * Returns:
459  * Validation result.
460  */
461 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
462 		       struct ww_acquire_ctx *ticket,
463 		       int (*validate)(void *p, struct amdgpu_bo *bo),
464 		       void *param)
465 {
466 	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
467 	struct amdgpu_vm_bo_base *bo_base;
468 	struct amdgpu_bo *shadow;
469 	struct amdgpu_bo *bo;
470 	int r;
471 
472 	if (vm->generation != new_vm_generation) {
473 		vm->generation = new_vm_generation;
474 		amdgpu_vm_bo_reset_state_machine(vm);
475 		amdgpu_vm_fini_entities(vm);
476 		r = amdgpu_vm_init_entities(adev, vm);
477 		if (r)
478 			return r;
479 	}
480 
481 	spin_lock(&vm->status_lock);
482 	while (!list_empty(&vm->evicted)) {
483 		bo_base = list_first_entry(&vm->evicted,
484 					   struct amdgpu_vm_bo_base,
485 					   vm_status);
486 		spin_unlock(&vm->status_lock);
487 
488 		bo = bo_base->bo;
489 		shadow = amdgpu_bo_shadowed(bo);
490 
491 		r = validate(param, bo);
492 		if (r)
493 			return r;
494 		if (shadow) {
495 			r = validate(param, shadow);
496 			if (r)
497 				return r;
498 		}
499 
500 		if (bo->tbo.type != ttm_bo_type_kernel) {
501 			amdgpu_vm_bo_moved(bo_base);
502 		} else {
503 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
504 			amdgpu_vm_bo_relocated(bo_base);
505 		}
506 		spin_lock(&vm->status_lock);
507 	}
508 	while (ticket && !list_empty(&vm->evicted_user)) {
509 		bo_base = list_first_entry(&vm->evicted_user,
510 					   struct amdgpu_vm_bo_base,
511 					   vm_status);
512 		spin_unlock(&vm->status_lock);
513 
514 		bo = bo_base->bo;
515 
516 		if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
517 			struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
518 
519 			pr_warn_ratelimited("Evicted user BO is not reserved\n");
520 			if (ti) {
521 				pr_warn_ratelimited("pid %d\n", ti->pid);
522 				amdgpu_vm_put_task_info(ti);
523 			}
524 
525 			return -EINVAL;
526 		}
527 
528 		r = validate(param, bo);
529 		if (r)
530 			return r;
531 
532 		amdgpu_vm_bo_invalidated(bo_base);
533 
534 		spin_lock(&vm->status_lock);
535 	}
536 	spin_unlock(&vm->status_lock);
537 
538 	amdgpu_vm_eviction_lock(vm);
539 	vm->evicting = false;
540 	amdgpu_vm_eviction_unlock(vm);
541 
542 	return 0;
543 }
544 
545 /**
546  * amdgpu_vm_ready - check VM is ready for updates
547  *
548  * @vm: VM to check
549  *
550  * Check if all VM PDs/PTs are ready for updates
551  *
552  * Returns:
553  * True if VM is not evicting.
554  */
555 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
556 {
557 	bool empty;
558 	bool ret;
559 
560 	amdgpu_vm_eviction_lock(vm);
561 	ret = !vm->evicting;
562 	amdgpu_vm_eviction_unlock(vm);
563 
564 	spin_lock(&vm->status_lock);
565 	empty = list_empty(&vm->evicted);
566 	spin_unlock(&vm->status_lock);
567 
568 	return ret && empty;
569 }
570 
571 /**
572  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
573  *
574  * @adev: amdgpu_device pointer
575  */
576 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
577 {
578 	const struct amdgpu_ip_block *ip_block;
579 	bool has_compute_vm_bug;
580 	struct amdgpu_ring *ring;
581 	int i;
582 
583 	has_compute_vm_bug = false;
584 
585 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
586 	if (ip_block) {
587 		/* Compute has a VM bug for GFX version < 7.
588 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
589 		if (ip_block->version->major <= 7)
590 			has_compute_vm_bug = true;
591 		else if (ip_block->version->major == 8)
592 			if (adev->gfx.mec_fw_version < 673)
593 				has_compute_vm_bug = true;
594 	}
595 
596 	for (i = 0; i < adev->num_rings; i++) {
597 		ring = adev->rings[i];
598 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
599 			/* only compute rings */
600 			ring->has_compute_vm_bug = has_compute_vm_bug;
601 		else
602 			ring->has_compute_vm_bug = false;
603 	}
604 }
605 
606 /**
607  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
608  *
609  * @ring: ring on which the job will be submitted
610  * @job: job to submit
611  *
612  * Returns:
613  * True if sync is needed.
614  */
615 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
616 				  struct amdgpu_job *job)
617 {
618 	struct amdgpu_device *adev = ring->adev;
619 	unsigned vmhub = ring->vm_hub;
620 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
621 
622 	if (job->vmid == 0)
623 		return false;
624 
625 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
626 		return true;
627 
628 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
629 		return true;
630 
631 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
632 		return true;
633 
634 	return false;
635 }
636 
637 /**
638  * amdgpu_vm_flush - hardware flush the vm
639  *
640  * @ring: ring to use for flush
641  * @job:  related job
642  * @need_pipe_sync: is pipe sync needed
643  *
644  * Emit a VM flush when it is necessary.
645  *
646  * Returns:
647  * 0 on success, errno otherwise.
648  */
649 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
650 		    bool need_pipe_sync)
651 {
652 	struct amdgpu_device *adev = ring->adev;
653 	unsigned vmhub = ring->vm_hub;
654 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
655 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
656 	bool spm_update_needed = job->spm_update_needed;
657 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
658 		job->gds_switch_needed;
659 	bool vm_flush_needed = job->vm_needs_flush;
660 	struct dma_fence *fence = NULL;
661 	bool pasid_mapping_needed = false;
662 	unsigned int patch;
663 	int r;
664 
665 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
666 		gds_switch_needed = true;
667 		vm_flush_needed = true;
668 		pasid_mapping_needed = true;
669 		spm_update_needed = true;
670 	}
671 
672 	mutex_lock(&id_mgr->lock);
673 	if (id->pasid != job->pasid || !id->pasid_mapping ||
674 	    !dma_fence_is_signaled(id->pasid_mapping))
675 		pasid_mapping_needed = true;
676 	mutex_unlock(&id_mgr->lock);
677 
678 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
679 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
680 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
681 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
682 		ring->funcs->emit_wreg;
683 
684 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
685 		return 0;
686 
687 	amdgpu_ring_ib_begin(ring);
688 	if (ring->funcs->init_cond_exec)
689 		patch = amdgpu_ring_init_cond_exec(ring,
690 						   ring->cond_exe_gpu_addr);
691 
692 	if (need_pipe_sync)
693 		amdgpu_ring_emit_pipeline_sync(ring);
694 
695 	if (vm_flush_needed) {
696 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
697 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
698 	}
699 
700 	if (pasid_mapping_needed)
701 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
702 
703 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
704 		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
705 
706 	if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
707 	    gds_switch_needed) {
708 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
709 					    job->gds_size, job->gws_base,
710 					    job->gws_size, job->oa_base,
711 					    job->oa_size);
712 	}
713 
714 	if (vm_flush_needed || pasid_mapping_needed) {
715 		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
716 		if (r)
717 			return r;
718 	}
719 
720 	if (vm_flush_needed) {
721 		mutex_lock(&id_mgr->lock);
722 		dma_fence_put(id->last_flush);
723 		id->last_flush = dma_fence_get(fence);
724 		id->current_gpu_reset_count =
725 			atomic_read(&adev->gpu_reset_counter);
726 		mutex_unlock(&id_mgr->lock);
727 	}
728 
729 	if (pasid_mapping_needed) {
730 		mutex_lock(&id_mgr->lock);
731 		id->pasid = job->pasid;
732 		dma_fence_put(id->pasid_mapping);
733 		id->pasid_mapping = dma_fence_get(fence);
734 		mutex_unlock(&id_mgr->lock);
735 	}
736 	dma_fence_put(fence);
737 
738 	amdgpu_ring_patch_cond_exec(ring, patch);
739 
740 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
741 	if (ring->funcs->emit_switch_buffer) {
742 		amdgpu_ring_emit_switch_buffer(ring);
743 		amdgpu_ring_emit_switch_buffer(ring);
744 	}
745 	amdgpu_ring_ib_end(ring);
746 	return 0;
747 }
748 
749 /**
750  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
751  *
752  * @vm: requested vm
753  * @bo: requested buffer object
754  *
755  * Find @bo inside the requested vm.
756  * Search inside the @bos vm list for the requested vm
757  * Returns the found bo_va or NULL if none is found
758  *
759  * Object has to be reserved!
760  *
761  * Returns:
762  * Found bo_va or NULL.
763  */
764 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
765 				       struct amdgpu_bo *bo)
766 {
767 	struct amdgpu_vm_bo_base *base;
768 
769 	for (base = bo->vm_bo; base; base = base->next) {
770 		if (base->vm != vm)
771 			continue;
772 
773 		return container_of(base, struct amdgpu_bo_va, base);
774 	}
775 	return NULL;
776 }
777 
778 /**
779  * amdgpu_vm_map_gart - Resolve gart mapping of addr
780  *
781  * @pages_addr: optional DMA address to use for lookup
782  * @addr: the unmapped addr
783  *
784  * Look up the physical address of the page that the pte resolves
785  * to.
786  *
787  * Returns:
788  * The pointer for the page table entry.
789  */
790 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
791 {
792 	uint64_t result;
793 
794 	/* page table offset */
795 	result = pages_addr[addr >> PAGE_SHIFT];
796 
797 	/* in case cpu page size != gpu page size*/
798 	result |= addr & (~PAGE_MASK);
799 
800 	result &= 0xFFFFFFFFFFFFF000ULL;
801 
802 	return result;
803 }
804 
805 /**
806  * amdgpu_vm_update_pdes - make sure that all directories are valid
807  *
808  * @adev: amdgpu_device pointer
809  * @vm: requested vm
810  * @immediate: submit immediately to the paging queue
811  *
812  * Makes sure all directories are up to date.
813  *
814  * Returns:
815  * 0 for success, error for failure.
816  */
817 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
818 			  struct amdgpu_vm *vm, bool immediate)
819 {
820 	struct amdgpu_vm_update_params params;
821 	struct amdgpu_vm_bo_base *entry;
822 	bool flush_tlb_needed = false;
823 	LIST_HEAD(relocated);
824 	int r, idx;
825 
826 	spin_lock(&vm->status_lock);
827 	list_splice_init(&vm->relocated, &relocated);
828 	spin_unlock(&vm->status_lock);
829 
830 	if (list_empty(&relocated))
831 		return 0;
832 
833 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
834 		return -ENODEV;
835 
836 	memset(&params, 0, sizeof(params));
837 	params.adev = adev;
838 	params.vm = vm;
839 	params.immediate = immediate;
840 
841 	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
842 	if (r)
843 		goto error;
844 
845 	list_for_each_entry(entry, &relocated, vm_status) {
846 		/* vm_flush_needed after updating moved PDEs */
847 		flush_tlb_needed |= entry->moved;
848 
849 		r = amdgpu_vm_pde_update(&params, entry);
850 		if (r)
851 			goto error;
852 	}
853 
854 	r = vm->update_funcs->commit(&params, &vm->last_update);
855 	if (r)
856 		goto error;
857 
858 	if (flush_tlb_needed)
859 		atomic64_inc(&vm->tlb_seq);
860 
861 	while (!list_empty(&relocated)) {
862 		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
863 					 vm_status);
864 		amdgpu_vm_bo_idle(entry);
865 	}
866 
867 error:
868 	drm_dev_exit(idx);
869 	return r;
870 }
871 
872 /**
873  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
874  * @fence: unused
875  * @cb: the callback structure
876  *
877  * Increments the tlb sequence to make sure that future CS execute a VM flush.
878  */
879 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
880 				 struct dma_fence_cb *cb)
881 {
882 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
883 
884 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
885 	atomic64_inc(&tlb_cb->vm->tlb_seq);
886 	kfree(tlb_cb);
887 }
888 
889 /**
890  * amdgpu_vm_tlb_flush - prepare TLB flush
891  *
892  * @params: parameters for update
893  * @fence: input fence to sync TLB flush with
894  * @tlb_cb: the callback structure
895  *
896  * Increments the tlb sequence to make sure that future CS execute a VM flush.
897  */
898 static void
899 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
900 		    struct dma_fence **fence,
901 		    struct amdgpu_vm_tlb_seq_struct *tlb_cb)
902 {
903 	struct amdgpu_vm *vm = params->vm;
904 
905 	if (!fence || !*fence)
906 		return;
907 
908 	tlb_cb->vm = vm;
909 	if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
910 				    amdgpu_vm_tlb_seq_cb)) {
911 		dma_fence_put(vm->last_tlb_flush);
912 		vm->last_tlb_flush = dma_fence_get(*fence);
913 	} else {
914 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
915 	}
916 
917 	/* Prepare a TLB flush fence to be attached to PTs */
918 	if (!params->unlocked && vm->is_compute_context) {
919 		amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
920 
921 		/* Makes sure no PD/PT is freed before the flush */
922 		dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
923 				   DMA_RESV_USAGE_BOOKKEEP);
924 	}
925 }
926 
927 /**
928  * amdgpu_vm_update_range - update a range in the vm page table
929  *
930  * @adev: amdgpu_device pointer to use for commands
931  * @vm: the VM to update the range
932  * @immediate: immediate submission in a page fault
933  * @unlocked: unlocked invalidation during MM callback
934  * @flush_tlb: trigger tlb invalidation after update completed
935  * @allow_override: change MTYPE for local NUMA nodes
936  * @resv: fences we need to sync to
937  * @start: start of mapped range
938  * @last: last mapped entry
939  * @flags: flags for the entries
940  * @offset: offset into nodes and pages_addr
941  * @vram_base: base for vram mappings
942  * @res: ttm_resource to map
943  * @pages_addr: DMA addresses to use for mapping
944  * @fence: optional resulting fence
945  *
946  * Fill in the page table entries between @start and @last.
947  *
948  * Returns:
949  * 0 for success, negative erro code for failure.
950  */
951 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
952 			   bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
953 			   struct dma_resv *resv, uint64_t start, uint64_t last,
954 			   uint64_t flags, uint64_t offset, uint64_t vram_base,
955 			   struct ttm_resource *res, dma_addr_t *pages_addr,
956 			   struct dma_fence **fence)
957 {
958 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
959 	struct amdgpu_vm_update_params params;
960 	struct amdgpu_res_cursor cursor;
961 	enum amdgpu_sync_mode sync_mode;
962 	int r, idx;
963 
964 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
965 		return -ENODEV;
966 
967 	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
968 	if (!tlb_cb) {
969 		drm_dev_exit(idx);
970 		return -ENOMEM;
971 	}
972 
973 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
974 	 * heavy-weight flush TLB unconditionally.
975 	 */
976 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
977 		     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
978 
979 	/*
980 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
981 	 */
982 	flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
983 
984 	memset(&params, 0, sizeof(params));
985 	params.adev = adev;
986 	params.vm = vm;
987 	params.immediate = immediate;
988 	params.pages_addr = pages_addr;
989 	params.unlocked = unlocked;
990 	params.needs_flush = flush_tlb;
991 	params.allow_override = allow_override;
992 	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
993 
994 	/* Implicitly sync to command submissions in the same VM before
995 	 * unmapping. Sync to moving fences before mapping.
996 	 */
997 	if (!(flags & AMDGPU_PTE_VALID))
998 		sync_mode = AMDGPU_SYNC_EQ_OWNER;
999 	else
1000 		sync_mode = AMDGPU_SYNC_EXPLICIT;
1001 
1002 	amdgpu_vm_eviction_lock(vm);
1003 	if (vm->evicting) {
1004 		r = -EBUSY;
1005 		goto error_free;
1006 	}
1007 
1008 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1009 		struct dma_fence *tmp = dma_fence_get_stub();
1010 
1011 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1012 		swap(vm->last_unlocked, tmp);
1013 		dma_fence_put(tmp);
1014 	}
1015 
1016 	r = vm->update_funcs->prepare(&params, resv, sync_mode);
1017 	if (r)
1018 		goto error_free;
1019 
1020 	amdgpu_res_first(pages_addr ? NULL : res, offset,
1021 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1022 	while (cursor.remaining) {
1023 		uint64_t tmp, num_entries, addr;
1024 
1025 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1026 		if (pages_addr) {
1027 			bool contiguous = true;
1028 
1029 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1030 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
1031 				uint64_t count;
1032 
1033 				contiguous = pages_addr[pfn + 1] ==
1034 					pages_addr[pfn] + PAGE_SIZE;
1035 
1036 				tmp = num_entries /
1037 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1038 				for (count = 2; count < tmp; ++count) {
1039 					uint64_t idx = pfn + count;
1040 
1041 					if (contiguous != (pages_addr[idx] ==
1042 					    pages_addr[idx - 1] + PAGE_SIZE))
1043 						break;
1044 				}
1045 				if (!contiguous)
1046 					count--;
1047 				num_entries = count *
1048 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1049 			}
1050 
1051 			if (!contiguous) {
1052 				addr = cursor.start;
1053 				params.pages_addr = pages_addr;
1054 			} else {
1055 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1056 				params.pages_addr = NULL;
1057 			}
1058 
1059 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1060 			addr = vram_base + cursor.start;
1061 		} else {
1062 			addr = 0;
1063 		}
1064 
1065 		tmp = start + num_entries;
1066 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1067 		if (r)
1068 			goto error_free;
1069 
1070 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1071 		start = tmp;
1072 	}
1073 
1074 	r = vm->update_funcs->commit(&params, fence);
1075 	if (r)
1076 		goto error_free;
1077 
1078 	if (params.needs_flush) {
1079 		amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
1080 		tlb_cb = NULL;
1081 	}
1082 
1083 	amdgpu_vm_pt_free_list(adev, &params);
1084 
1085 error_free:
1086 	kfree(tlb_cb);
1087 	amdgpu_vm_eviction_unlock(vm);
1088 	drm_dev_exit(idx);
1089 	return r;
1090 }
1091 
1092 static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
1093 				    struct amdgpu_mem_stats *stats)
1094 {
1095 	struct amdgpu_vm *vm = bo_va->base.vm;
1096 	struct amdgpu_bo *bo = bo_va->base.bo;
1097 
1098 	if (!bo)
1099 		return;
1100 
1101 	/*
1102 	 * For now ignore BOs which are currently locked and potentially
1103 	 * changing their location.
1104 	 */
1105 	if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
1106 	    !dma_resv_trylock(bo->tbo.base.resv))
1107 		return;
1108 
1109 	amdgpu_bo_get_memory(bo, stats);
1110 	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
1111 		dma_resv_unlock(bo->tbo.base.resv);
1112 }
1113 
1114 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1115 			  struct amdgpu_mem_stats *stats)
1116 {
1117 	struct amdgpu_bo_va *bo_va, *tmp;
1118 
1119 	spin_lock(&vm->status_lock);
1120 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
1121 		amdgpu_vm_bo_get_memory(bo_va, stats);
1122 
1123 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
1124 		amdgpu_vm_bo_get_memory(bo_va, stats);
1125 
1126 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
1127 		amdgpu_vm_bo_get_memory(bo_va, stats);
1128 
1129 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
1130 		amdgpu_vm_bo_get_memory(bo_va, stats);
1131 
1132 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
1133 		amdgpu_vm_bo_get_memory(bo_va, stats);
1134 
1135 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
1136 		amdgpu_vm_bo_get_memory(bo_va, stats);
1137 	spin_unlock(&vm->status_lock);
1138 }
1139 
1140 /**
1141  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1142  *
1143  * @adev: amdgpu_device pointer
1144  * @bo_va: requested BO and VM object
1145  * @clear: if true clear the entries
1146  *
1147  * Fill in the page table entries for @bo_va.
1148  *
1149  * Returns:
1150  * 0 for success, -EINVAL for failure.
1151  */
1152 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1153 			bool clear)
1154 {
1155 	struct amdgpu_bo *bo = bo_va->base.bo;
1156 	struct amdgpu_vm *vm = bo_va->base.vm;
1157 	struct amdgpu_bo_va_mapping *mapping;
1158 	dma_addr_t *pages_addr = NULL;
1159 	struct ttm_resource *mem;
1160 	struct dma_fence **last_update;
1161 	bool flush_tlb = clear;
1162 	bool uncached;
1163 	struct dma_resv *resv;
1164 	uint64_t vram_base;
1165 	uint64_t flags;
1166 	int r;
1167 
1168 	if (clear || !bo) {
1169 		mem = NULL;
1170 		resv = vm->root.bo->tbo.base.resv;
1171 	} else {
1172 		struct drm_gem_object *obj = &bo->tbo.base;
1173 
1174 		resv = bo->tbo.base.resv;
1175 		if (obj->import_attach && bo_va->is_xgmi) {
1176 			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1177 			struct drm_gem_object *gobj = dma_buf->priv;
1178 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1179 
1180 			if (abo->tbo.resource &&
1181 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1182 				bo = gem_to_amdgpu_bo(gobj);
1183 		}
1184 		mem = bo->tbo.resource;
1185 		if (mem && (mem->mem_type == TTM_PL_TT ||
1186 			    mem->mem_type == AMDGPU_PL_PREEMPT))
1187 			pages_addr = bo->tbo.ttm->dma_address;
1188 	}
1189 
1190 	if (bo) {
1191 		struct amdgpu_device *bo_adev;
1192 
1193 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1194 
1195 		if (amdgpu_bo_encrypted(bo))
1196 			flags |= AMDGPU_PTE_TMZ;
1197 
1198 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1199 		vram_base = bo_adev->vm_manager.vram_base_offset;
1200 		uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1201 	} else {
1202 		flags = 0x0;
1203 		vram_base = 0;
1204 		uncached = false;
1205 	}
1206 
1207 	if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1208 		last_update = &vm->last_update;
1209 	else
1210 		last_update = &bo_va->last_pt_update;
1211 
1212 	if (!clear && bo_va->base.moved) {
1213 		flush_tlb = true;
1214 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1215 
1216 	} else if (bo_va->cleared != clear) {
1217 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1218 	}
1219 
1220 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1221 		uint64_t update_flags = flags;
1222 
1223 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1224 		 * but in case of something, we filter the flags in first place
1225 		 */
1226 		if (!(mapping->flags & AMDGPU_PTE_READABLE))
1227 			update_flags &= ~AMDGPU_PTE_READABLE;
1228 		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1229 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1230 
1231 		/* Apply ASIC specific mapping flags */
1232 		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1233 
1234 		trace_amdgpu_vm_bo_update(mapping);
1235 
1236 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1237 					   !uncached, resv, mapping->start, mapping->last,
1238 					   update_flags, mapping->offset,
1239 					   vram_base, mem, pages_addr,
1240 					   last_update);
1241 		if (r)
1242 			return r;
1243 	}
1244 
1245 	/* If the BO is not in its preferred location add it back to
1246 	 * the evicted list so that it gets validated again on the
1247 	 * next command submission.
1248 	 */
1249 	if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1250 		uint32_t mem_type = bo->tbo.resource->mem_type;
1251 
1252 		if (!(bo->preferred_domains &
1253 		      amdgpu_mem_type_to_domain(mem_type)))
1254 			amdgpu_vm_bo_evicted(&bo_va->base);
1255 		else
1256 			amdgpu_vm_bo_idle(&bo_va->base);
1257 	} else {
1258 		amdgpu_vm_bo_done(&bo_va->base);
1259 	}
1260 
1261 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1262 	bo_va->cleared = clear;
1263 	bo_va->base.moved = false;
1264 
1265 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1266 		list_for_each_entry(mapping, &bo_va->valids, list)
1267 			trace_amdgpu_vm_bo_mapping(mapping);
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 /**
1274  * amdgpu_vm_update_prt_state - update the global PRT state
1275  *
1276  * @adev: amdgpu_device pointer
1277  */
1278 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1279 {
1280 	unsigned long flags;
1281 	bool enable;
1282 
1283 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1284 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1285 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1286 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1287 }
1288 
1289 /**
1290  * amdgpu_vm_prt_get - add a PRT user
1291  *
1292  * @adev: amdgpu_device pointer
1293  */
1294 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1295 {
1296 	if (!adev->gmc.gmc_funcs->set_prt)
1297 		return;
1298 
1299 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1300 		amdgpu_vm_update_prt_state(adev);
1301 }
1302 
1303 /**
1304  * amdgpu_vm_prt_put - drop a PRT user
1305  *
1306  * @adev: amdgpu_device pointer
1307  */
1308 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1309 {
1310 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1311 		amdgpu_vm_update_prt_state(adev);
1312 }
1313 
1314 /**
1315  * amdgpu_vm_prt_cb - callback for updating the PRT status
1316  *
1317  * @fence: fence for the callback
1318  * @_cb: the callback function
1319  */
1320 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1321 {
1322 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1323 
1324 	amdgpu_vm_prt_put(cb->adev);
1325 	kfree(cb);
1326 }
1327 
1328 /**
1329  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1330  *
1331  * @adev: amdgpu_device pointer
1332  * @fence: fence for the callback
1333  */
1334 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1335 				 struct dma_fence *fence)
1336 {
1337 	struct amdgpu_prt_cb *cb;
1338 
1339 	if (!adev->gmc.gmc_funcs->set_prt)
1340 		return;
1341 
1342 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1343 	if (!cb) {
1344 		/* Last resort when we are OOM */
1345 		if (fence)
1346 			dma_fence_wait(fence, false);
1347 
1348 		amdgpu_vm_prt_put(adev);
1349 	} else {
1350 		cb->adev = adev;
1351 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1352 						     amdgpu_vm_prt_cb))
1353 			amdgpu_vm_prt_cb(fence, &cb->cb);
1354 	}
1355 }
1356 
1357 /**
1358  * amdgpu_vm_free_mapping - free a mapping
1359  *
1360  * @adev: amdgpu_device pointer
1361  * @vm: requested vm
1362  * @mapping: mapping to be freed
1363  * @fence: fence of the unmap operation
1364  *
1365  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1366  */
1367 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1368 				   struct amdgpu_vm *vm,
1369 				   struct amdgpu_bo_va_mapping *mapping,
1370 				   struct dma_fence *fence)
1371 {
1372 	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1373 		amdgpu_vm_add_prt_cb(adev, fence);
1374 	kfree(mapping);
1375 }
1376 
1377 /**
1378  * amdgpu_vm_prt_fini - finish all prt mappings
1379  *
1380  * @adev: amdgpu_device pointer
1381  * @vm: requested vm
1382  *
1383  * Register a cleanup callback to disable PRT support after VM dies.
1384  */
1385 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1386 {
1387 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1388 	struct dma_resv_iter cursor;
1389 	struct dma_fence *fence;
1390 
1391 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1392 		/* Add a callback for each fence in the reservation object */
1393 		amdgpu_vm_prt_get(adev);
1394 		amdgpu_vm_add_prt_cb(adev, fence);
1395 	}
1396 }
1397 
1398 /**
1399  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1400  *
1401  * @adev: amdgpu_device pointer
1402  * @vm: requested vm
1403  * @fence: optional resulting fence (unchanged if no work needed to be done
1404  * or if an error occurred)
1405  *
1406  * Make sure all freed BOs are cleared in the PT.
1407  * PTs have to be reserved and mutex must be locked!
1408  *
1409  * Returns:
1410  * 0 for success.
1411  *
1412  */
1413 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1414 			  struct amdgpu_vm *vm,
1415 			  struct dma_fence **fence)
1416 {
1417 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1418 	struct amdgpu_bo_va_mapping *mapping;
1419 	uint64_t init_pte_value = 0;
1420 	struct dma_fence *f = NULL;
1421 	int r;
1422 
1423 	while (!list_empty(&vm->freed)) {
1424 		mapping = list_first_entry(&vm->freed,
1425 			struct amdgpu_bo_va_mapping, list);
1426 		list_del(&mapping->list);
1427 
1428 		r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1429 					   resv, mapping->start, mapping->last,
1430 					   init_pte_value, 0, 0, NULL, NULL,
1431 					   &f);
1432 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1433 		if (r) {
1434 			dma_fence_put(f);
1435 			return r;
1436 		}
1437 	}
1438 
1439 	if (fence && f) {
1440 		dma_fence_put(*fence);
1441 		*fence = f;
1442 	} else {
1443 		dma_fence_put(f);
1444 	}
1445 
1446 	return 0;
1447 
1448 }
1449 
1450 /**
1451  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1452  *
1453  * @adev: amdgpu_device pointer
1454  * @vm: requested vm
1455  * @ticket: optional reservation ticket used to reserve the VM
1456  *
1457  * Make sure all BOs which are moved are updated in the PTs.
1458  *
1459  * Returns:
1460  * 0 for success.
1461  *
1462  * PTs have to be reserved!
1463  */
1464 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1465 			   struct amdgpu_vm *vm,
1466 			   struct ww_acquire_ctx *ticket)
1467 {
1468 	struct amdgpu_bo_va *bo_va;
1469 	struct dma_resv *resv;
1470 	bool clear, unlock;
1471 	int r;
1472 
1473 	spin_lock(&vm->status_lock);
1474 	while (!list_empty(&vm->moved)) {
1475 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1476 					 base.vm_status);
1477 		spin_unlock(&vm->status_lock);
1478 
1479 		/* Per VM BOs never need to bo cleared in the page tables */
1480 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1481 		if (r)
1482 			return r;
1483 		spin_lock(&vm->status_lock);
1484 	}
1485 
1486 	while (!list_empty(&vm->invalidated)) {
1487 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1488 					 base.vm_status);
1489 		resv = bo_va->base.bo->tbo.base.resv;
1490 		spin_unlock(&vm->status_lock);
1491 
1492 		/* Try to reserve the BO to avoid clearing its ptes */
1493 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
1494 			clear = false;
1495 			unlock = true;
1496 		/* The caller is already holding the reservation lock */
1497 		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1498 			clear = false;
1499 			unlock = false;
1500 		/* Somebody else is using the BO right now */
1501 		} else {
1502 			clear = true;
1503 			unlock = false;
1504 		}
1505 
1506 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1507 
1508 		if (unlock)
1509 			dma_resv_unlock(resv);
1510 		if (r)
1511 			return r;
1512 
1513 		/* Remember evicted DMABuf imports in compute VMs for later
1514 		 * validation
1515 		 */
1516 		if (vm->is_compute_context &&
1517 		    bo_va->base.bo->tbo.base.import_attach &&
1518 		    (!bo_va->base.bo->tbo.resource ||
1519 		     bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1520 			amdgpu_vm_bo_evicted_user(&bo_va->base);
1521 
1522 		spin_lock(&vm->status_lock);
1523 	}
1524 	spin_unlock(&vm->status_lock);
1525 
1526 	return 0;
1527 }
1528 
1529 /**
1530  * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1531  *
1532  * @adev: amdgpu_device pointer
1533  * @vm: requested vm
1534  * @flush_type: flush type
1535  * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1536  *
1537  * Flush TLB if needed for a compute VM.
1538  *
1539  * Returns:
1540  * 0 for success.
1541  */
1542 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1543 				struct amdgpu_vm *vm,
1544 				uint32_t flush_type,
1545 				uint32_t xcc_mask)
1546 {
1547 	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1548 	bool all_hub = false;
1549 	int xcc = 0, r = 0;
1550 
1551 	WARN_ON_ONCE(!vm->is_compute_context);
1552 
1553 	/*
1554 	 * It can be that we race and lose here, but that is extremely unlikely
1555 	 * and the worst thing which could happen is that we flush the changes
1556 	 * into the TLB once more which is harmless.
1557 	 */
1558 	if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1559 		return 0;
1560 
1561 	if (adev->family == AMDGPU_FAMILY_AI ||
1562 	    adev->family == AMDGPU_FAMILY_RV)
1563 		all_hub = true;
1564 
1565 	for_each_inst(xcc, xcc_mask) {
1566 		r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1567 						   all_hub, xcc);
1568 		if (r)
1569 			break;
1570 	}
1571 	return r;
1572 }
1573 
1574 /**
1575  * amdgpu_vm_bo_add - add a bo to a specific vm
1576  *
1577  * @adev: amdgpu_device pointer
1578  * @vm: requested vm
1579  * @bo: amdgpu buffer object
1580  *
1581  * Add @bo into the requested vm.
1582  * Add @bo to the list of bos associated with the vm
1583  *
1584  * Returns:
1585  * Newly added bo_va or NULL for failure
1586  *
1587  * Object has to be reserved!
1588  */
1589 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1590 				      struct amdgpu_vm *vm,
1591 				      struct amdgpu_bo *bo)
1592 {
1593 	struct amdgpu_bo_va *bo_va;
1594 
1595 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1596 	if (bo_va == NULL) {
1597 		return NULL;
1598 	}
1599 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1600 
1601 	bo_va->ref_count = 1;
1602 	bo_va->last_pt_update = dma_fence_get_stub();
1603 	INIT_LIST_HEAD(&bo_va->valids);
1604 	INIT_LIST_HEAD(&bo_va->invalids);
1605 
1606 	if (!bo)
1607 		return bo_va;
1608 
1609 	dma_resv_assert_held(bo->tbo.base.resv);
1610 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1611 		bo_va->is_xgmi = true;
1612 		/* Power up XGMI if it can be potentially used */
1613 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1614 	}
1615 
1616 	return bo_va;
1617 }
1618 
1619 
1620 /**
1621  * amdgpu_vm_bo_insert_map - insert a new mapping
1622  *
1623  * @adev: amdgpu_device pointer
1624  * @bo_va: bo_va to store the address
1625  * @mapping: the mapping to insert
1626  *
1627  * Insert a new mapping into all structures.
1628  */
1629 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1630 				    struct amdgpu_bo_va *bo_va,
1631 				    struct amdgpu_bo_va_mapping *mapping)
1632 {
1633 	struct amdgpu_vm *vm = bo_va->base.vm;
1634 	struct amdgpu_bo *bo = bo_va->base.bo;
1635 
1636 	mapping->bo_va = bo_va;
1637 	list_add(&mapping->list, &bo_va->invalids);
1638 	amdgpu_vm_it_insert(mapping, &vm->va);
1639 
1640 	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1641 		amdgpu_vm_prt_get(adev);
1642 
1643 	if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1644 		amdgpu_vm_bo_moved(&bo_va->base);
1645 
1646 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1647 }
1648 
1649 /* Validate operation parameters to prevent potential abuse */
1650 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1651 					  struct amdgpu_bo *bo,
1652 					  uint64_t saddr,
1653 					  uint64_t offset,
1654 					  uint64_t size)
1655 {
1656 	uint64_t tmp, lpfn;
1657 
1658 	if (saddr & AMDGPU_GPU_PAGE_MASK
1659 	    || offset & AMDGPU_GPU_PAGE_MASK
1660 	    || size & AMDGPU_GPU_PAGE_MASK)
1661 		return -EINVAL;
1662 
1663 	if (check_add_overflow(saddr, size, &tmp)
1664 	    || check_add_overflow(offset, size, &tmp)
1665 	    || size == 0 /* which also leads to end < begin */)
1666 		return -EINVAL;
1667 
1668 	/* make sure object fit at this offset */
1669 	if (bo && offset + size > amdgpu_bo_size(bo))
1670 		return -EINVAL;
1671 
1672 	/* Ensure last pfn not exceed max_pfn */
1673 	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1674 	if (lpfn >= adev->vm_manager.max_pfn)
1675 		return -EINVAL;
1676 
1677 	return 0;
1678 }
1679 
1680 /**
1681  * amdgpu_vm_bo_map - map bo inside a vm
1682  *
1683  * @adev: amdgpu_device pointer
1684  * @bo_va: bo_va to store the address
1685  * @saddr: where to map the BO
1686  * @offset: requested offset in the BO
1687  * @size: BO size in bytes
1688  * @flags: attributes of pages (read/write/valid/etc.)
1689  *
1690  * Add a mapping of the BO at the specefied addr into the VM.
1691  *
1692  * Returns:
1693  * 0 for success, error for failure.
1694  *
1695  * Object has to be reserved and unreserved outside!
1696  */
1697 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1698 		     struct amdgpu_bo_va *bo_va,
1699 		     uint64_t saddr, uint64_t offset,
1700 		     uint64_t size, uint64_t flags)
1701 {
1702 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1703 	struct amdgpu_bo *bo = bo_va->base.bo;
1704 	struct amdgpu_vm *vm = bo_va->base.vm;
1705 	uint64_t eaddr;
1706 	int r;
1707 
1708 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1709 	if (r)
1710 		return r;
1711 
1712 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1713 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1714 
1715 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1716 	if (tmp) {
1717 		/* bo and tmp overlap, invalid addr */
1718 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1719 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1720 			tmp->start, tmp->last + 1);
1721 		return -EINVAL;
1722 	}
1723 
1724 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1725 	if (!mapping)
1726 		return -ENOMEM;
1727 
1728 	mapping->start = saddr;
1729 	mapping->last = eaddr;
1730 	mapping->offset = offset;
1731 	mapping->flags = flags;
1732 
1733 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1734 
1735 	return 0;
1736 }
1737 
1738 /**
1739  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1740  *
1741  * @adev: amdgpu_device pointer
1742  * @bo_va: bo_va to store the address
1743  * @saddr: where to map the BO
1744  * @offset: requested offset in the BO
1745  * @size: BO size in bytes
1746  * @flags: attributes of pages (read/write/valid/etc.)
1747  *
1748  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1749  * mappings as we do so.
1750  *
1751  * Returns:
1752  * 0 for success, error for failure.
1753  *
1754  * Object has to be reserved and unreserved outside!
1755  */
1756 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1757 			     struct amdgpu_bo_va *bo_va,
1758 			     uint64_t saddr, uint64_t offset,
1759 			     uint64_t size, uint64_t flags)
1760 {
1761 	struct amdgpu_bo_va_mapping *mapping;
1762 	struct amdgpu_bo *bo = bo_va->base.bo;
1763 	uint64_t eaddr;
1764 	int r;
1765 
1766 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1767 	if (r)
1768 		return r;
1769 
1770 	/* Allocate all the needed memory */
1771 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1772 	if (!mapping)
1773 		return -ENOMEM;
1774 
1775 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1776 	if (r) {
1777 		kfree(mapping);
1778 		return r;
1779 	}
1780 
1781 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1782 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1783 
1784 	mapping->start = saddr;
1785 	mapping->last = eaddr;
1786 	mapping->offset = offset;
1787 	mapping->flags = flags;
1788 
1789 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1790 
1791 	return 0;
1792 }
1793 
1794 /**
1795  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1796  *
1797  * @adev: amdgpu_device pointer
1798  * @bo_va: bo_va to remove the address from
1799  * @saddr: where to the BO is mapped
1800  *
1801  * Remove a mapping of the BO at the specefied addr from the VM.
1802  *
1803  * Returns:
1804  * 0 for success, error for failure.
1805  *
1806  * Object has to be reserved and unreserved outside!
1807  */
1808 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1809 		       struct amdgpu_bo_va *bo_va,
1810 		       uint64_t saddr)
1811 {
1812 	struct amdgpu_bo_va_mapping *mapping;
1813 	struct amdgpu_vm *vm = bo_va->base.vm;
1814 	bool valid = true;
1815 
1816 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1817 
1818 	list_for_each_entry(mapping, &bo_va->valids, list) {
1819 		if (mapping->start == saddr)
1820 			break;
1821 	}
1822 
1823 	if (&mapping->list == &bo_va->valids) {
1824 		valid = false;
1825 
1826 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1827 			if (mapping->start == saddr)
1828 				break;
1829 		}
1830 
1831 		if (&mapping->list == &bo_va->invalids)
1832 			return -ENOENT;
1833 	}
1834 
1835 	list_del(&mapping->list);
1836 	amdgpu_vm_it_remove(mapping, &vm->va);
1837 	mapping->bo_va = NULL;
1838 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1839 
1840 	if (valid)
1841 		list_add(&mapping->list, &vm->freed);
1842 	else
1843 		amdgpu_vm_free_mapping(adev, vm, mapping,
1844 				       bo_va->last_pt_update);
1845 
1846 	return 0;
1847 }
1848 
1849 /**
1850  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1851  *
1852  * @adev: amdgpu_device pointer
1853  * @vm: VM structure to use
1854  * @saddr: start of the range
1855  * @size: size of the range
1856  *
1857  * Remove all mappings in a range, split them as appropriate.
1858  *
1859  * Returns:
1860  * 0 for success, error for failure.
1861  */
1862 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1863 				struct amdgpu_vm *vm,
1864 				uint64_t saddr, uint64_t size)
1865 {
1866 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1867 	LIST_HEAD(removed);
1868 	uint64_t eaddr;
1869 	int r;
1870 
1871 	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
1872 	if (r)
1873 		return r;
1874 
1875 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1876 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1877 
1878 	/* Allocate all the needed memory */
1879 	before = kzalloc(sizeof(*before), GFP_KERNEL);
1880 	if (!before)
1881 		return -ENOMEM;
1882 	INIT_LIST_HEAD(&before->list);
1883 
1884 	after = kzalloc(sizeof(*after), GFP_KERNEL);
1885 	if (!after) {
1886 		kfree(before);
1887 		return -ENOMEM;
1888 	}
1889 	INIT_LIST_HEAD(&after->list);
1890 
1891 	/* Now gather all removed mappings */
1892 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1893 	while (tmp) {
1894 		/* Remember mapping split at the start */
1895 		if (tmp->start < saddr) {
1896 			before->start = tmp->start;
1897 			before->last = saddr - 1;
1898 			before->offset = tmp->offset;
1899 			before->flags = tmp->flags;
1900 			before->bo_va = tmp->bo_va;
1901 			list_add(&before->list, &tmp->bo_va->invalids);
1902 		}
1903 
1904 		/* Remember mapping split at the end */
1905 		if (tmp->last > eaddr) {
1906 			after->start = eaddr + 1;
1907 			after->last = tmp->last;
1908 			after->offset = tmp->offset;
1909 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
1910 			after->flags = tmp->flags;
1911 			after->bo_va = tmp->bo_va;
1912 			list_add(&after->list, &tmp->bo_va->invalids);
1913 		}
1914 
1915 		list_del(&tmp->list);
1916 		list_add(&tmp->list, &removed);
1917 
1918 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1919 	}
1920 
1921 	/* And free them up */
1922 	list_for_each_entry_safe(tmp, next, &removed, list) {
1923 		amdgpu_vm_it_remove(tmp, &vm->va);
1924 		list_del(&tmp->list);
1925 
1926 		if (tmp->start < saddr)
1927 		    tmp->start = saddr;
1928 		if (tmp->last > eaddr)
1929 		    tmp->last = eaddr;
1930 
1931 		tmp->bo_va = NULL;
1932 		list_add(&tmp->list, &vm->freed);
1933 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
1934 	}
1935 
1936 	/* Insert partial mapping before the range */
1937 	if (!list_empty(&before->list)) {
1938 		struct amdgpu_bo *bo = before->bo_va->base.bo;
1939 
1940 		amdgpu_vm_it_insert(before, &vm->va);
1941 		if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
1942 			amdgpu_vm_prt_get(adev);
1943 
1944 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
1945 		    !before->bo_va->base.moved)
1946 			amdgpu_vm_bo_moved(&before->bo_va->base);
1947 	} else {
1948 		kfree(before);
1949 	}
1950 
1951 	/* Insert partial mapping after the range */
1952 	if (!list_empty(&after->list)) {
1953 		struct amdgpu_bo *bo = after->bo_va->base.bo;
1954 
1955 		amdgpu_vm_it_insert(after, &vm->va);
1956 		if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
1957 			amdgpu_vm_prt_get(adev);
1958 
1959 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
1960 		    !after->bo_va->base.moved)
1961 			amdgpu_vm_bo_moved(&after->bo_va->base);
1962 	} else {
1963 		kfree(after);
1964 	}
1965 
1966 	return 0;
1967 }
1968 
1969 /**
1970  * amdgpu_vm_bo_lookup_mapping - find mapping by address
1971  *
1972  * @vm: the requested VM
1973  * @addr: the address
1974  *
1975  * Find a mapping by it's address.
1976  *
1977  * Returns:
1978  * The amdgpu_bo_va_mapping matching for addr or NULL
1979  *
1980  */
1981 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
1982 							 uint64_t addr)
1983 {
1984 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
1985 }
1986 
1987 /**
1988  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
1989  *
1990  * @vm: the requested vm
1991  * @ticket: CS ticket
1992  *
1993  * Trace all mappings of BOs reserved during a command submission.
1994  */
1995 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
1996 {
1997 	struct amdgpu_bo_va_mapping *mapping;
1998 
1999 	if (!trace_amdgpu_vm_bo_cs_enabled())
2000 		return;
2001 
2002 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2003 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2004 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2005 			struct amdgpu_bo *bo;
2006 
2007 			bo = mapping->bo_va->base.bo;
2008 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2009 			    ticket)
2010 				continue;
2011 		}
2012 
2013 		trace_amdgpu_vm_bo_cs(mapping);
2014 	}
2015 }
2016 
2017 /**
2018  * amdgpu_vm_bo_del - remove a bo from a specific vm
2019  *
2020  * @adev: amdgpu_device pointer
2021  * @bo_va: requested bo_va
2022  *
2023  * Remove @bo_va->bo from the requested vm.
2024  *
2025  * Object have to be reserved!
2026  */
2027 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2028 		      struct amdgpu_bo_va *bo_va)
2029 {
2030 	struct amdgpu_bo_va_mapping *mapping, *next;
2031 	struct amdgpu_bo *bo = bo_va->base.bo;
2032 	struct amdgpu_vm *vm = bo_va->base.vm;
2033 	struct amdgpu_vm_bo_base **base;
2034 
2035 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2036 
2037 	if (bo) {
2038 		dma_resv_assert_held(bo->tbo.base.resv);
2039 		if (amdgpu_vm_is_bo_always_valid(vm, bo))
2040 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
2041 
2042 		for (base = &bo_va->base.bo->vm_bo; *base;
2043 		     base = &(*base)->next) {
2044 			if (*base != &bo_va->base)
2045 				continue;
2046 
2047 			*base = bo_va->base.next;
2048 			break;
2049 		}
2050 	}
2051 
2052 	spin_lock(&vm->status_lock);
2053 	list_del(&bo_va->base.vm_status);
2054 	spin_unlock(&vm->status_lock);
2055 
2056 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2057 		list_del(&mapping->list);
2058 		amdgpu_vm_it_remove(mapping, &vm->va);
2059 		mapping->bo_va = NULL;
2060 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2061 		list_add(&mapping->list, &vm->freed);
2062 	}
2063 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2064 		list_del(&mapping->list);
2065 		amdgpu_vm_it_remove(mapping, &vm->va);
2066 		amdgpu_vm_free_mapping(adev, vm, mapping,
2067 				       bo_va->last_pt_update);
2068 	}
2069 
2070 	dma_fence_put(bo_va->last_pt_update);
2071 
2072 	if (bo && bo_va->is_xgmi)
2073 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2074 
2075 	kfree(bo_va);
2076 }
2077 
2078 /**
2079  * amdgpu_vm_evictable - check if we can evict a VM
2080  *
2081  * @bo: A page table of the VM.
2082  *
2083  * Check if it is possible to evict a VM.
2084  */
2085 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2086 {
2087 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2088 
2089 	/* Page tables of a destroyed VM can go away immediately */
2090 	if (!bo_base || !bo_base->vm)
2091 		return true;
2092 
2093 	/* Don't evict VM page tables while they are busy */
2094 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2095 		return false;
2096 
2097 	/* Try to block ongoing updates */
2098 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2099 		return false;
2100 
2101 	/* Don't evict VM page tables while they are updated */
2102 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2103 		amdgpu_vm_eviction_unlock(bo_base->vm);
2104 		return false;
2105 	}
2106 
2107 	bo_base->vm->evicting = true;
2108 	amdgpu_vm_eviction_unlock(bo_base->vm);
2109 	return true;
2110 }
2111 
2112 /**
2113  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2114  *
2115  * @adev: amdgpu_device pointer
2116  * @bo: amdgpu buffer object
2117  * @evicted: is the BO evicted
2118  *
2119  * Mark @bo as invalid.
2120  */
2121 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2122 			     struct amdgpu_bo *bo, bool evicted)
2123 {
2124 	struct amdgpu_vm_bo_base *bo_base;
2125 
2126 	/* shadow bo doesn't have bo base, its validation needs its parent */
2127 	if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
2128 		bo = bo->parent;
2129 
2130 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2131 		struct amdgpu_vm *vm = bo_base->vm;
2132 
2133 		if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2134 			amdgpu_vm_bo_evicted(bo_base);
2135 			continue;
2136 		}
2137 
2138 		if (bo_base->moved)
2139 			continue;
2140 		bo_base->moved = true;
2141 
2142 		if (bo->tbo.type == ttm_bo_type_kernel)
2143 			amdgpu_vm_bo_relocated(bo_base);
2144 		else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2145 			amdgpu_vm_bo_moved(bo_base);
2146 		else
2147 			amdgpu_vm_bo_invalidated(bo_base);
2148 	}
2149 }
2150 
2151 /**
2152  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2153  *
2154  * @vm_size: VM size
2155  *
2156  * Returns:
2157  * VM page table as power of two
2158  */
2159 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2160 {
2161 	/* Total bits covered by PD + PTs */
2162 	unsigned bits = ilog2(vm_size) + 18;
2163 
2164 	/* Make sure the PD is 4K in size up to 8GB address space.
2165 	   Above that split equal between PD and PTs */
2166 	if (vm_size <= 8)
2167 		return (bits - 9);
2168 	else
2169 		return ((bits + 3) / 2);
2170 }
2171 
2172 /**
2173  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2174  *
2175  * @adev: amdgpu_device pointer
2176  * @min_vm_size: the minimum vm size in GB if it's set auto
2177  * @fragment_size_default: Default PTE fragment size
2178  * @max_level: max VMPT level
2179  * @max_bits: max address space size in bits
2180  *
2181  */
2182 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2183 			   uint32_t fragment_size_default, unsigned max_level,
2184 			   unsigned max_bits)
2185 {
2186 	unsigned int max_size = 1 << (max_bits - 30);
2187 	unsigned int vm_size;
2188 	uint64_t tmp;
2189 
2190 	/* adjust vm size first */
2191 	if (amdgpu_vm_size != -1) {
2192 		vm_size = amdgpu_vm_size;
2193 		if (vm_size > max_size) {
2194 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2195 				 amdgpu_vm_size, max_size);
2196 			vm_size = max_size;
2197 		}
2198 	} else {
2199 		struct sysinfo si;
2200 		unsigned int phys_ram_gb;
2201 
2202 		/* Optimal VM size depends on the amount of physical
2203 		 * RAM available. Underlying requirements and
2204 		 * assumptions:
2205 		 *
2206 		 *  - Need to map system memory and VRAM from all GPUs
2207 		 *     - VRAM from other GPUs not known here
2208 		 *     - Assume VRAM <= system memory
2209 		 *  - On GFX8 and older, VM space can be segmented for
2210 		 *    different MTYPEs
2211 		 *  - Need to allow room for fragmentation, guard pages etc.
2212 		 *
2213 		 * This adds up to a rough guess of system memory x3.
2214 		 * Round up to power of two to maximize the available
2215 		 * VM size with the given page table size.
2216 		 */
2217 		si_meminfo(&si);
2218 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2219 			       (1 << 30) - 1) >> 30;
2220 		vm_size = roundup_pow_of_two(
2221 			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2222 	}
2223 
2224 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2225 
2226 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2227 	if (amdgpu_vm_block_size != -1)
2228 		tmp >>= amdgpu_vm_block_size - 9;
2229 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2230 	adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2231 	switch (adev->vm_manager.num_level) {
2232 	case 3:
2233 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2234 		break;
2235 	case 2:
2236 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2237 		break;
2238 	case 1:
2239 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2240 		break;
2241 	default:
2242 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2243 	}
2244 	/* block size depends on vm size and hw setup*/
2245 	if (amdgpu_vm_block_size != -1)
2246 		adev->vm_manager.block_size =
2247 			min((unsigned)amdgpu_vm_block_size, max_bits
2248 			    - AMDGPU_GPU_PAGE_SHIFT
2249 			    - 9 * adev->vm_manager.num_level);
2250 	else if (adev->vm_manager.num_level > 1)
2251 		adev->vm_manager.block_size = 9;
2252 	else
2253 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2254 
2255 	if (amdgpu_vm_fragment_size == -1)
2256 		adev->vm_manager.fragment_size = fragment_size_default;
2257 	else
2258 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2259 
2260 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2261 		 vm_size, adev->vm_manager.num_level + 1,
2262 		 adev->vm_manager.block_size,
2263 		 adev->vm_manager.fragment_size);
2264 }
2265 
2266 /**
2267  * amdgpu_vm_wait_idle - wait for the VM to become idle
2268  *
2269  * @vm: VM object to wait for
2270  * @timeout: timeout to wait for VM to become idle
2271  */
2272 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2273 {
2274 	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2275 					DMA_RESV_USAGE_BOOKKEEP,
2276 					true, timeout);
2277 	if (timeout <= 0)
2278 		return timeout;
2279 
2280 	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2281 }
2282 
2283 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2284 {
2285 	struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2286 
2287 	kfree(ti);
2288 }
2289 
2290 static inline struct amdgpu_vm *
2291 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2292 {
2293 	struct amdgpu_vm *vm;
2294 	unsigned long flags;
2295 
2296 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2297 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2298 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2299 
2300 	return vm;
2301 }
2302 
2303 /**
2304  * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2305  *
2306  * @task_info: task_info struct under discussion.
2307  *
2308  * frees the vm task_info ptr at the last put
2309  */
2310 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2311 {
2312 	kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2313 }
2314 
2315 /**
2316  * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2317  *
2318  * @vm: VM to get info from
2319  *
2320  * Returns the reference counted task_info structure, which must be
2321  * referenced down with amdgpu_vm_put_task_info.
2322  */
2323 struct amdgpu_task_info *
2324 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2325 {
2326 	struct amdgpu_task_info *ti = NULL;
2327 
2328 	if (vm) {
2329 		ti = vm->task_info;
2330 		kref_get(&vm->task_info->refcount);
2331 	}
2332 
2333 	return ti;
2334 }
2335 
2336 /**
2337  * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2338  *
2339  * @adev: drm device pointer
2340  * @pasid: PASID identifier for VM
2341  *
2342  * Returns the reference counted task_info structure, which must be
2343  * referenced down with amdgpu_vm_put_task_info.
2344  */
2345 struct amdgpu_task_info *
2346 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2347 {
2348 	return amdgpu_vm_get_task_info_vm(
2349 			amdgpu_vm_get_vm_from_pasid(adev, pasid));
2350 }
2351 
2352 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2353 {
2354 	vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2355 	if (!vm->task_info)
2356 		return -ENOMEM;
2357 
2358 	kref_init(&vm->task_info->refcount);
2359 	return 0;
2360 }
2361 
2362 /**
2363  * amdgpu_vm_set_task_info - Sets VMs task info.
2364  *
2365  * @vm: vm for which to set the info
2366  */
2367 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2368 {
2369 	if (!vm->task_info)
2370 		return;
2371 
2372 	if (vm->task_info->pid == current->pid)
2373 		return;
2374 
2375 	vm->task_info->pid = current->pid;
2376 	get_task_comm(vm->task_info->task_name, current);
2377 
2378 	if (current->group_leader->mm != current->mm)
2379 		return;
2380 
2381 	vm->task_info->tgid = current->group_leader->pid;
2382 	get_task_comm(vm->task_info->process_name, current->group_leader);
2383 }
2384 
2385 /**
2386  * amdgpu_vm_init - initialize a vm instance
2387  *
2388  * @adev: amdgpu_device pointer
2389  * @vm: requested vm
2390  * @xcp_id: GPU partition selection id
2391  *
2392  * Init @vm fields.
2393  *
2394  * Returns:
2395  * 0 for success, error for failure.
2396  */
2397 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2398 		   int32_t xcp_id)
2399 {
2400 	struct amdgpu_bo *root_bo;
2401 	struct amdgpu_bo_vm *root;
2402 	int r, i;
2403 
2404 	vm->va = RB_ROOT_CACHED;
2405 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2406 		vm->reserved_vmid[i] = NULL;
2407 	INIT_LIST_HEAD(&vm->evicted);
2408 	INIT_LIST_HEAD(&vm->evicted_user);
2409 	INIT_LIST_HEAD(&vm->relocated);
2410 	INIT_LIST_HEAD(&vm->moved);
2411 	INIT_LIST_HEAD(&vm->idle);
2412 	INIT_LIST_HEAD(&vm->invalidated);
2413 	spin_lock_init(&vm->status_lock);
2414 	INIT_LIST_HEAD(&vm->freed);
2415 	INIT_LIST_HEAD(&vm->done);
2416 	INIT_LIST_HEAD(&vm->pt_freed);
2417 	INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
2418 	INIT_KFIFO(vm->faults);
2419 
2420 	r = amdgpu_vm_init_entities(adev, vm);
2421 	if (r)
2422 		return r;
2423 
2424 	vm->is_compute_context = false;
2425 
2426 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2427 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2428 
2429 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2430 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2431 	WARN_ONCE((vm->use_cpu_for_update &&
2432 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2433 		  "CPU update of VM recommended only for large BAR system\n");
2434 
2435 	if (vm->use_cpu_for_update)
2436 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2437 	else
2438 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2439 
2440 	vm->last_update = dma_fence_get_stub();
2441 	vm->last_unlocked = dma_fence_get_stub();
2442 	vm->last_tlb_flush = dma_fence_get_stub();
2443 	vm->generation = amdgpu_vm_generation(adev, NULL);
2444 
2445 	mutex_init(&vm->eviction_lock);
2446 	vm->evicting = false;
2447 	vm->tlb_fence_context = dma_fence_context_alloc(1);
2448 
2449 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2450 				false, &root, xcp_id);
2451 	if (r)
2452 		goto error_free_delayed;
2453 
2454 	root_bo = amdgpu_bo_ref(&root->bo);
2455 	r = amdgpu_bo_reserve(root_bo, true);
2456 	if (r) {
2457 		amdgpu_bo_unref(&root->shadow);
2458 		amdgpu_bo_unref(&root_bo);
2459 		goto error_free_delayed;
2460 	}
2461 
2462 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2463 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2464 	if (r)
2465 		goto error_free_root;
2466 
2467 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2468 	if (r)
2469 		goto error_free_root;
2470 
2471 	r = amdgpu_vm_create_task_info(vm);
2472 	if (r)
2473 		DRM_DEBUG("Failed to create task info for VM\n");
2474 
2475 	amdgpu_bo_unreserve(vm->root.bo);
2476 	amdgpu_bo_unref(&root_bo);
2477 
2478 	return 0;
2479 
2480 error_free_root:
2481 	amdgpu_vm_pt_free_root(adev, vm);
2482 	amdgpu_bo_unreserve(vm->root.bo);
2483 	amdgpu_bo_unref(&root_bo);
2484 
2485 error_free_delayed:
2486 	dma_fence_put(vm->last_tlb_flush);
2487 	dma_fence_put(vm->last_unlocked);
2488 	amdgpu_vm_fini_entities(vm);
2489 
2490 	return r;
2491 }
2492 
2493 /**
2494  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2495  *
2496  * @adev: amdgpu_device pointer
2497  * @vm: requested vm
2498  *
2499  * This only works on GFX VMs that don't have any BOs added and no
2500  * page tables allocated yet.
2501  *
2502  * Changes the following VM parameters:
2503  * - use_cpu_for_update
2504  * - pte_supports_ats
2505  *
2506  * Reinitializes the page directory to reflect the changed ATS
2507  * setting.
2508  *
2509  * Returns:
2510  * 0 for success, -errno for errors.
2511  */
2512 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2513 {
2514 	int r;
2515 
2516 	r = amdgpu_bo_reserve(vm->root.bo, true);
2517 	if (r)
2518 		return r;
2519 
2520 	/* Update VM state */
2521 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2522 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2523 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2524 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2525 	WARN_ONCE((vm->use_cpu_for_update &&
2526 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2527 		  "CPU update of VM recommended only for large BAR system\n");
2528 
2529 	if (vm->use_cpu_for_update) {
2530 		/* Sync with last SDMA update/clear before switching to CPU */
2531 		r = amdgpu_bo_sync_wait(vm->root.bo,
2532 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2533 		if (r)
2534 			goto unreserve_bo;
2535 
2536 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2537 		r = amdgpu_vm_pt_map_tables(adev, vm);
2538 		if (r)
2539 			goto unreserve_bo;
2540 
2541 	} else {
2542 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2543 	}
2544 
2545 	dma_fence_put(vm->last_update);
2546 	vm->last_update = dma_fence_get_stub();
2547 	vm->is_compute_context = true;
2548 
2549 	/* Free the shadow bo for compute VM */
2550 	amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
2551 
2552 	goto unreserve_bo;
2553 
2554 unreserve_bo:
2555 	amdgpu_bo_unreserve(vm->root.bo);
2556 	return r;
2557 }
2558 
2559 /**
2560  * amdgpu_vm_release_compute - release a compute vm
2561  * @adev: amdgpu_device pointer
2562  * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2563  *
2564  * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2565  * pasid from vm. Compute should stop use of vm after this call.
2566  */
2567 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2568 {
2569 	amdgpu_vm_set_pasid(adev, vm, 0);
2570 	vm->is_compute_context = false;
2571 }
2572 
2573 /**
2574  * amdgpu_vm_fini - tear down a vm instance
2575  *
2576  * @adev: amdgpu_device pointer
2577  * @vm: requested vm
2578  *
2579  * Tear down @vm.
2580  * Unbind the VM and remove all bos from the vm bo list
2581  */
2582 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2583 {
2584 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2585 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2586 	struct amdgpu_bo *root;
2587 	unsigned long flags;
2588 	int i;
2589 
2590 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2591 
2592 	flush_work(&vm->pt_free_work);
2593 
2594 	root = amdgpu_bo_ref(vm->root.bo);
2595 	amdgpu_bo_reserve(root, true);
2596 	amdgpu_vm_put_task_info(vm->task_info);
2597 	amdgpu_vm_set_pasid(adev, vm, 0);
2598 	dma_fence_wait(vm->last_unlocked, false);
2599 	dma_fence_put(vm->last_unlocked);
2600 	dma_fence_wait(vm->last_tlb_flush, false);
2601 	/* Make sure that all fence callbacks have completed */
2602 	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2603 	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2604 	dma_fence_put(vm->last_tlb_flush);
2605 
2606 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2607 		if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
2608 			amdgpu_vm_prt_fini(adev, vm);
2609 			prt_fini_needed = false;
2610 		}
2611 
2612 		list_del(&mapping->list);
2613 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2614 	}
2615 
2616 	amdgpu_vm_pt_free_root(adev, vm);
2617 	amdgpu_bo_unreserve(root);
2618 	amdgpu_bo_unref(&root);
2619 	WARN_ON(vm->root.bo);
2620 
2621 	amdgpu_vm_fini_entities(vm);
2622 
2623 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2624 		dev_err(adev->dev, "still active bo inside vm\n");
2625 	}
2626 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2627 					     &vm->va.rb_root, rb) {
2628 		/* Don't remove the mapping here, we don't want to trigger a
2629 		 * rebalance and the tree is about to be destroyed anyway.
2630 		 */
2631 		list_del(&mapping->list);
2632 		kfree(mapping);
2633 	}
2634 
2635 	dma_fence_put(vm->last_update);
2636 
2637 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2638 		if (vm->reserved_vmid[i]) {
2639 			amdgpu_vmid_free_reserved(adev, i);
2640 			vm->reserved_vmid[i] = false;
2641 		}
2642 	}
2643 
2644 }
2645 
2646 /**
2647  * amdgpu_vm_manager_init - init the VM manager
2648  *
2649  * @adev: amdgpu_device pointer
2650  *
2651  * Initialize the VM manager structures
2652  */
2653 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2654 {
2655 	unsigned i;
2656 
2657 	/* Concurrent flushes are only possible starting with Vega10 and
2658 	 * are broken on Navi10 and Navi14.
2659 	 */
2660 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2661 					      adev->asic_type == CHIP_NAVI10 ||
2662 					      adev->asic_type == CHIP_NAVI14);
2663 	amdgpu_vmid_mgr_init(adev);
2664 
2665 	adev->vm_manager.fence_context =
2666 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2667 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2668 		adev->vm_manager.seqno[i] = 0;
2669 
2670 	spin_lock_init(&adev->vm_manager.prt_lock);
2671 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2672 
2673 	/* If not overridden by the user, by default, only in large BAR systems
2674 	 * Compute VM tables will be updated by CPU
2675 	 */
2676 #ifdef CONFIG_X86_64
2677 	if (amdgpu_vm_update_mode == -1) {
2678 		/* For asic with VF MMIO access protection
2679 		 * avoid using CPU for VM table updates
2680 		 */
2681 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2682 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2683 			adev->vm_manager.vm_update_mode =
2684 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2685 		else
2686 			adev->vm_manager.vm_update_mode = 0;
2687 	} else
2688 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2689 #else
2690 	adev->vm_manager.vm_update_mode = 0;
2691 #endif
2692 
2693 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2694 }
2695 
2696 /**
2697  * amdgpu_vm_manager_fini - cleanup VM manager
2698  *
2699  * @adev: amdgpu_device pointer
2700  *
2701  * Cleanup the VM manager and free resources.
2702  */
2703 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2704 {
2705 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2706 	xa_destroy(&adev->vm_manager.pasids);
2707 
2708 	amdgpu_vmid_mgr_fini(adev);
2709 }
2710 
2711 /**
2712  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2713  *
2714  * @dev: drm device pointer
2715  * @data: drm_amdgpu_vm
2716  * @filp: drm file pointer
2717  *
2718  * Returns:
2719  * 0 for success, -errno for errors.
2720  */
2721 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2722 {
2723 	union drm_amdgpu_vm *args = data;
2724 	struct amdgpu_device *adev = drm_to_adev(dev);
2725 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2726 
2727 	/* No valid flags defined yet */
2728 	if (args->in.flags)
2729 		return -EINVAL;
2730 
2731 	switch (args->in.op) {
2732 	case AMDGPU_VM_OP_RESERVE_VMID:
2733 		/* We only have requirement to reserve vmid from gfxhub */
2734 		if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2735 			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2736 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2737 		}
2738 
2739 		break;
2740 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2741 		if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2742 			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2743 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2744 		}
2745 		break;
2746 	default:
2747 		return -EINVAL;
2748 	}
2749 
2750 	return 0;
2751 }
2752 
2753 /**
2754  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2755  * @adev: amdgpu device pointer
2756  * @pasid: PASID of the VM
2757  * @vmid: VMID, only used for GFX 9.4.3.
2758  * @node_id: Node_id received in IH cookie. Only applicable for
2759  *           GFX 9.4.3.
2760  * @addr: Address of the fault
2761  * @write_fault: true is write fault, false is read fault
2762  *
2763  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2764  * shouldn't be reported any more.
2765  */
2766 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2767 			    u32 vmid, u32 node_id, uint64_t addr,
2768 			    bool write_fault)
2769 {
2770 	bool is_compute_context = false;
2771 	struct amdgpu_bo *root;
2772 	unsigned long irqflags;
2773 	uint64_t value, flags;
2774 	struct amdgpu_vm *vm;
2775 	int r;
2776 
2777 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2778 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2779 	if (vm) {
2780 		root = amdgpu_bo_ref(vm->root.bo);
2781 		is_compute_context = vm->is_compute_context;
2782 	} else {
2783 		root = NULL;
2784 	}
2785 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2786 
2787 	if (!root)
2788 		return false;
2789 
2790 	addr /= AMDGPU_GPU_PAGE_SIZE;
2791 
2792 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2793 	    node_id, addr, write_fault)) {
2794 		amdgpu_bo_unref(&root);
2795 		return true;
2796 	}
2797 
2798 	r = amdgpu_bo_reserve(root, true);
2799 	if (r)
2800 		goto error_unref;
2801 
2802 	/* Double check that the VM still exists */
2803 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2804 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2805 	if (vm && vm->root.bo != root)
2806 		vm = NULL;
2807 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2808 	if (!vm)
2809 		goto error_unlock;
2810 
2811 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2812 		AMDGPU_PTE_SYSTEM;
2813 
2814 	if (is_compute_context) {
2815 		/* Intentionally setting invalid PTE flag
2816 		 * combination to force a no-retry-fault
2817 		 */
2818 		flags = AMDGPU_VM_NORETRY_FLAGS;
2819 		value = 0;
2820 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2821 		/* Redirect the access to the dummy page */
2822 		value = adev->dummy_page_addr;
2823 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2824 			AMDGPU_PTE_WRITEABLE;
2825 
2826 	} else {
2827 		/* Let the hw retry silently on the PTE */
2828 		value = 0;
2829 	}
2830 
2831 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2832 	if (r) {
2833 		pr_debug("failed %d to reserve fence slot\n", r);
2834 		goto error_unlock;
2835 	}
2836 
2837 	r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2838 				   NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
2839 	if (r)
2840 		goto error_unlock;
2841 
2842 	r = amdgpu_vm_update_pdes(adev, vm, true);
2843 
2844 error_unlock:
2845 	amdgpu_bo_unreserve(root);
2846 	if (r < 0)
2847 		DRM_ERROR("Can't handle page fault (%d)\n", r);
2848 
2849 error_unref:
2850 	amdgpu_bo_unref(&root);
2851 
2852 	return false;
2853 }
2854 
2855 #if defined(CONFIG_DEBUG_FS)
2856 /**
2857  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
2858  *
2859  * @vm: Requested VM for printing BO info
2860  * @m: debugfs file
2861  *
2862  * Print BO information in debugfs file for the VM
2863  */
2864 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
2865 {
2866 	struct amdgpu_bo_va *bo_va, *tmp;
2867 	u64 total_idle = 0;
2868 	u64 total_evicted = 0;
2869 	u64 total_relocated = 0;
2870 	u64 total_moved = 0;
2871 	u64 total_invalidated = 0;
2872 	u64 total_done = 0;
2873 	unsigned int total_idle_objs = 0;
2874 	unsigned int total_evicted_objs = 0;
2875 	unsigned int total_relocated_objs = 0;
2876 	unsigned int total_moved_objs = 0;
2877 	unsigned int total_invalidated_objs = 0;
2878 	unsigned int total_done_objs = 0;
2879 	unsigned int id = 0;
2880 
2881 	spin_lock(&vm->status_lock);
2882 	seq_puts(m, "\tIdle BOs:\n");
2883 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
2884 		if (!bo_va->base.bo)
2885 			continue;
2886 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2887 	}
2888 	total_idle_objs = id;
2889 	id = 0;
2890 
2891 	seq_puts(m, "\tEvicted BOs:\n");
2892 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
2893 		if (!bo_va->base.bo)
2894 			continue;
2895 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2896 	}
2897 	total_evicted_objs = id;
2898 	id = 0;
2899 
2900 	seq_puts(m, "\tRelocated BOs:\n");
2901 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
2902 		if (!bo_va->base.bo)
2903 			continue;
2904 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2905 	}
2906 	total_relocated_objs = id;
2907 	id = 0;
2908 
2909 	seq_puts(m, "\tMoved BOs:\n");
2910 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2911 		if (!bo_va->base.bo)
2912 			continue;
2913 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2914 	}
2915 	total_moved_objs = id;
2916 	id = 0;
2917 
2918 	seq_puts(m, "\tInvalidated BOs:\n");
2919 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
2920 		if (!bo_va->base.bo)
2921 			continue;
2922 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
2923 	}
2924 	total_invalidated_objs = id;
2925 	id = 0;
2926 
2927 	seq_puts(m, "\tDone BOs:\n");
2928 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
2929 		if (!bo_va->base.bo)
2930 			continue;
2931 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2932 	}
2933 	spin_unlock(&vm->status_lock);
2934 	total_done_objs = id;
2935 
2936 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
2937 		   total_idle_objs);
2938 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
2939 		   total_evicted_objs);
2940 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
2941 		   total_relocated_objs);
2942 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
2943 		   total_moved_objs);
2944 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
2945 		   total_invalidated_objs);
2946 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
2947 		   total_done_objs);
2948 }
2949 #endif
2950 
2951 /**
2952  * amdgpu_vm_update_fault_cache - update cached fault into.
2953  * @adev: amdgpu device pointer
2954  * @pasid: PASID of the VM
2955  * @addr: Address of the fault
2956  * @status: GPUVM fault status register
2957  * @vmhub: which vmhub got the fault
2958  *
2959  * Cache the fault info for later use by userspace in debugging.
2960  */
2961 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
2962 				  unsigned int pasid,
2963 				  uint64_t addr,
2964 				  uint32_t status,
2965 				  unsigned int vmhub)
2966 {
2967 	struct amdgpu_vm *vm;
2968 	unsigned long flags;
2969 
2970 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2971 
2972 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2973 	/* Don't update the fault cache if status is 0.  In the multiple
2974 	 * fault case, subsequent faults will return a 0 status which is
2975 	 * useless for userspace and replaces the useful fault status, so
2976 	 * only update if status is non-0.
2977 	 */
2978 	if (vm && status) {
2979 		vm->fault_info.addr = addr;
2980 		vm->fault_info.status = status;
2981 		/*
2982 		 * Update the fault information globally for later usage
2983 		 * when vm could be stale or freed.
2984 		 */
2985 		adev->vm_manager.fault_info.addr = addr;
2986 		adev->vm_manager.fault_info.vmhub = vmhub;
2987 		adev->vm_manager.fault_info.status = status;
2988 
2989 		if (AMDGPU_IS_GFXHUB(vmhub)) {
2990 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
2991 			vm->fault_info.vmhub |=
2992 				(vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
2993 		} else if (AMDGPU_IS_MMHUB0(vmhub)) {
2994 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
2995 			vm->fault_info.vmhub |=
2996 				(vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
2997 		} else if (AMDGPU_IS_MMHUB1(vmhub)) {
2998 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
2999 			vm->fault_info.vmhub |=
3000 				(vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3001 		} else {
3002 			WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3003 		}
3004 	}
3005 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3006 }
3007 
3008 /**
3009  * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3010  *
3011  * @vm: VM to test against.
3012  * @bo: BO to be tested.
3013  *
3014  * Returns true if the BO shares the dma_resv object with the root PD and is
3015  * always guaranteed to be valid inside the VM.
3016  */
3017 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3018 {
3019 	return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3020 }
3021