xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c (revision de848da12f752170c2ebe114804a985314fd5a6a)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_amdkfd.h"
41 #include "amdgpu_gmc.h"
42 #include "amdgpu_xgmi.h"
43 #include "amdgpu_dma_buf.h"
44 #include "amdgpu_res_cursor.h"
45 #include "kfd_svm.h"
46 
47 /**
48  * DOC: GPUVM
49  *
50  * GPUVM is the MMU functionality provided on the GPU.
51  * GPUVM is similar to the legacy GART on older asics, however
52  * rather than there being a single global GART table
53  * for the entire GPU, there can be multiple GPUVM page tables active
54  * at any given time.  The GPUVM page tables can contain a mix
55  * VRAM pages and system pages (both memory and MMIO) and system pages
56  * can be mapped as snooped (cached system pages) or unsnooped
57  * (uncached system pages).
58  *
59  * Each active GPUVM has an ID associated with it and there is a page table
60  * linked with each VMID.  When executing a command buffer,
61  * the kernel tells the engine what VMID to use for that command
62  * buffer.  VMIDs are allocated dynamically as commands are submitted.
63  * The userspace drivers maintain their own address space and the kernel
64  * sets up their pages tables accordingly when they submit their
65  * command buffers and a VMID is assigned.
66  * The hardware supports up to 16 active GPUVMs at any given time.
67  *
68  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
69  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
70  * as other features such as encryption and caching attributes.
71  *
72  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
73  * addition to an aperture managed by a page table, VMID 0 also has
74  * several other apertures.  There is an aperture for direct access to VRAM
75  * and there is a legacy AGP aperture which just forwards accesses directly
76  * to the matching system physical addresses (or IOVAs when an IOMMU is
77  * present).  These apertures provide direct access to these memories without
78  * incurring the overhead of a page table.  VMID 0 is used by the kernel
79  * driver for tasks like memory management.
80  *
81  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
82  * For user applications, each application can have their own unique GPUVM
83  * address space.  The application manages the address space and the kernel
84  * driver manages the GPUVM page tables for each process.  If an GPU client
85  * accesses an invalid page, it will generate a GPU page fault, similar to
86  * accessing an invalid page on a CPU.
87  */
88 
89 #define START(node) ((node)->start)
90 #define LAST(node) ((node)->last)
91 
92 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
93 		     START, LAST, static, amdgpu_vm_it)
94 
95 #undef START
96 #undef LAST
97 
98 /**
99  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
100  */
101 struct amdgpu_prt_cb {
102 
103 	/**
104 	 * @adev: amdgpu device
105 	 */
106 	struct amdgpu_device *adev;
107 
108 	/**
109 	 * @cb: callback
110 	 */
111 	struct dma_fence_cb cb;
112 };
113 
114 /**
115  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
116  */
117 struct amdgpu_vm_tlb_seq_struct {
118 	/**
119 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
120 	 */
121 	struct amdgpu_vm *vm;
122 
123 	/**
124 	 * @cb: callback
125 	 */
126 	struct dma_fence_cb cb;
127 };
128 
129 /**
130  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
131  *
132  * @adev: amdgpu_device pointer
133  * @vm: amdgpu_vm pointer
134  * @pasid: the pasid the VM is using on this GPU
135  *
136  * Set the pasid this VM is using on this GPU, can also be used to remove the
137  * pasid by passing in zero.
138  *
139  */
140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
141 			u32 pasid)
142 {
143 	int r;
144 
145 	if (vm->pasid == pasid)
146 		return 0;
147 
148 	if (vm->pasid) {
149 		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
150 		if (r < 0)
151 			return r;
152 
153 		vm->pasid = 0;
154 	}
155 
156 	if (pasid) {
157 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
158 					GFP_KERNEL));
159 		if (r < 0)
160 			return r;
161 
162 		vm->pasid = pasid;
163 	}
164 
165 
166 	return 0;
167 }
168 
169 /**
170  * amdgpu_vm_bo_evicted - vm_bo is evicted
171  *
172  * @vm_bo: vm_bo which is evicted
173  *
174  * State for PDs/PTs and per VM BOs which are not at the location they should
175  * be.
176  */
177 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
178 {
179 	struct amdgpu_vm *vm = vm_bo->vm;
180 	struct amdgpu_bo *bo = vm_bo->bo;
181 
182 	vm_bo->moved = true;
183 	spin_lock(&vm_bo->vm->status_lock);
184 	if (bo->tbo.type == ttm_bo_type_kernel)
185 		list_move(&vm_bo->vm_status, &vm->evicted);
186 	else
187 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
188 	spin_unlock(&vm_bo->vm->status_lock);
189 }
190 /**
191  * amdgpu_vm_bo_moved - vm_bo is moved
192  *
193  * @vm_bo: vm_bo which is moved
194  *
195  * State for per VM BOs which are moved, but that change is not yet reflected
196  * in the page tables.
197  */
198 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
199 {
200 	spin_lock(&vm_bo->vm->status_lock);
201 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
202 	spin_unlock(&vm_bo->vm->status_lock);
203 }
204 
205 /**
206  * amdgpu_vm_bo_idle - vm_bo is idle
207  *
208  * @vm_bo: vm_bo which is now idle
209  *
210  * State for PDs/PTs and per VM BOs which have gone through the state machine
211  * and are now idle.
212  */
213 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
214 {
215 	spin_lock(&vm_bo->vm->status_lock);
216 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
217 	spin_unlock(&vm_bo->vm->status_lock);
218 	vm_bo->moved = false;
219 }
220 
221 /**
222  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
223  *
224  * @vm_bo: vm_bo which is now invalidated
225  *
226  * State for normal BOs which are invalidated and that change not yet reflected
227  * in the PTs.
228  */
229 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
230 {
231 	spin_lock(&vm_bo->vm->status_lock);
232 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
233 	spin_unlock(&vm_bo->vm->status_lock);
234 }
235 
236 /**
237  * amdgpu_vm_bo_evicted_user - vm_bo is evicted
238  *
239  * @vm_bo: vm_bo which is evicted
240  *
241  * State for BOs used by user mode queues which are not at the location they
242  * should be.
243  */
244 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
245 {
246 	vm_bo->moved = true;
247 	spin_lock(&vm_bo->vm->status_lock);
248 	list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
249 	spin_unlock(&vm_bo->vm->status_lock);
250 }
251 
252 /**
253  * amdgpu_vm_bo_relocated - vm_bo is reloacted
254  *
255  * @vm_bo: vm_bo which is relocated
256  *
257  * State for PDs/PTs which needs to update their parent PD.
258  * For the root PD, just move to idle state.
259  */
260 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
261 {
262 	if (vm_bo->bo->parent) {
263 		spin_lock(&vm_bo->vm->status_lock);
264 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
265 		spin_unlock(&vm_bo->vm->status_lock);
266 	} else {
267 		amdgpu_vm_bo_idle(vm_bo);
268 	}
269 }
270 
271 /**
272  * amdgpu_vm_bo_done - vm_bo is done
273  *
274  * @vm_bo: vm_bo which is now done
275  *
276  * State for normal BOs which are invalidated and that change has been updated
277  * in the PTs.
278  */
279 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
280 {
281 	spin_lock(&vm_bo->vm->status_lock);
282 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
283 	spin_unlock(&vm_bo->vm->status_lock);
284 }
285 
286 /**
287  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
288  * @vm: the VM which state machine to reset
289  *
290  * Move all vm_bo object in the VM into a state where they will be updated
291  * again during validation.
292  */
293 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
294 {
295 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
296 
297 	spin_lock(&vm->status_lock);
298 	list_splice_init(&vm->done, &vm->invalidated);
299 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
300 		vm_bo->moved = true;
301 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
302 		struct amdgpu_bo *bo = vm_bo->bo;
303 
304 		vm_bo->moved = true;
305 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
306 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
307 		else if (bo->parent)
308 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
309 	}
310 	spin_unlock(&vm->status_lock);
311 }
312 
313 /**
314  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
315  *
316  * @base: base structure for tracking BO usage in a VM
317  * @vm: vm to which bo is to be added
318  * @bo: amdgpu buffer object
319  *
320  * Initialize a bo_va_base structure and add it to the appropriate lists
321  *
322  */
323 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
324 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
325 {
326 	base->vm = vm;
327 	base->bo = bo;
328 	base->next = NULL;
329 	INIT_LIST_HEAD(&base->vm_status);
330 
331 	if (!bo)
332 		return;
333 	base->next = bo->vm_bo;
334 	bo->vm_bo = base;
335 
336 	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
337 		return;
338 
339 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
340 
341 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
342 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
343 		amdgpu_vm_bo_relocated(base);
344 	else
345 		amdgpu_vm_bo_idle(base);
346 
347 	if (bo->preferred_domains &
348 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
349 		return;
350 
351 	/*
352 	 * we checked all the prerequisites, but it looks like this per vm bo
353 	 * is currently evicted. add the bo to the evicted list to make sure it
354 	 * is validated on next vm use to avoid fault.
355 	 * */
356 	amdgpu_vm_bo_evicted(base);
357 }
358 
359 /**
360  * amdgpu_vm_lock_pd - lock PD in drm_exec
361  *
362  * @vm: vm providing the BOs
363  * @exec: drm execution context
364  * @num_fences: number of extra fences to reserve
365  *
366  * Lock the VM root PD in the DRM execution context.
367  */
368 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
369 		      unsigned int num_fences)
370 {
371 	/* We need at least two fences for the VM PD/PT updates */
372 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
373 				    2 + num_fences);
374 }
375 
376 /**
377  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
378  *
379  * @adev: amdgpu device pointer
380  * @vm: vm providing the BOs
381  *
382  * Move all BOs to the end of LRU and remember their positions to put them
383  * together.
384  */
385 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
386 				struct amdgpu_vm *vm)
387 {
388 	spin_lock(&adev->mman.bdev.lru_lock);
389 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
390 	spin_unlock(&adev->mman.bdev.lru_lock);
391 }
392 
393 /* Create scheduler entities for page table updates */
394 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
395 				   struct amdgpu_vm *vm)
396 {
397 	int r;
398 
399 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
400 				  adev->vm_manager.vm_pte_scheds,
401 				  adev->vm_manager.vm_pte_num_scheds, NULL);
402 	if (r)
403 		goto error;
404 
405 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
406 				     adev->vm_manager.vm_pte_scheds,
407 				     adev->vm_manager.vm_pte_num_scheds, NULL);
408 
409 error:
410 	drm_sched_entity_destroy(&vm->immediate);
411 	return r;
412 }
413 
414 /* Destroy the entities for page table updates again */
415 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
416 {
417 	drm_sched_entity_destroy(&vm->immediate);
418 	drm_sched_entity_destroy(&vm->delayed);
419 }
420 
421 /**
422  * amdgpu_vm_generation - return the page table re-generation counter
423  * @adev: the amdgpu_device
424  * @vm: optional VM to check, might be NULL
425  *
426  * Returns a page table re-generation token to allow checking if submissions
427  * are still valid to use this VM. The VM parameter might be NULL in which case
428  * just the VRAM lost counter will be used.
429  */
430 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
431 {
432 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
433 
434 	if (!vm)
435 		return result;
436 
437 	result += lower_32_bits(vm->generation);
438 	/* Add one if the page tables will be re-generated on next CS */
439 	if (drm_sched_entity_error(&vm->delayed))
440 		++result;
441 
442 	return result;
443 }
444 
445 /**
446  * amdgpu_vm_validate - validate evicted BOs tracked in the VM
447  *
448  * @adev: amdgpu device pointer
449  * @vm: vm providing the BOs
450  * @ticket: optional reservation ticket used to reserve the VM
451  * @validate: callback to do the validation
452  * @param: parameter for the validation callback
453  *
454  * Validate the page table BOs and per-VM BOs on command submission if
455  * necessary. If a ticket is given, also try to validate evicted user queue
456  * BOs. They must already be reserved with the given ticket.
457  *
458  * Returns:
459  * Validation result.
460  */
461 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
462 		       struct ww_acquire_ctx *ticket,
463 		       int (*validate)(void *p, struct amdgpu_bo *bo),
464 		       void *param)
465 {
466 	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
467 	struct amdgpu_vm_bo_base *bo_base;
468 	struct amdgpu_bo *shadow;
469 	struct amdgpu_bo *bo;
470 	int r;
471 
472 	if (vm->generation != new_vm_generation) {
473 		vm->generation = new_vm_generation;
474 		amdgpu_vm_bo_reset_state_machine(vm);
475 		amdgpu_vm_fini_entities(vm);
476 		r = amdgpu_vm_init_entities(adev, vm);
477 		if (r)
478 			return r;
479 	}
480 
481 	spin_lock(&vm->status_lock);
482 	while (!list_empty(&vm->evicted)) {
483 		bo_base = list_first_entry(&vm->evicted,
484 					   struct amdgpu_vm_bo_base,
485 					   vm_status);
486 		spin_unlock(&vm->status_lock);
487 
488 		bo = bo_base->bo;
489 		shadow = amdgpu_bo_shadowed(bo);
490 
491 		r = validate(param, bo);
492 		if (r)
493 			return r;
494 		if (shadow) {
495 			r = validate(param, shadow);
496 			if (r)
497 				return r;
498 		}
499 
500 		if (bo->tbo.type != ttm_bo_type_kernel) {
501 			amdgpu_vm_bo_moved(bo_base);
502 		} else {
503 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
504 			amdgpu_vm_bo_relocated(bo_base);
505 		}
506 		spin_lock(&vm->status_lock);
507 	}
508 	while (ticket && !list_empty(&vm->evicted_user)) {
509 		bo_base = list_first_entry(&vm->evicted_user,
510 					   struct amdgpu_vm_bo_base,
511 					   vm_status);
512 		spin_unlock(&vm->status_lock);
513 
514 		bo = bo_base->bo;
515 
516 		if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
517 			struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
518 
519 			pr_warn_ratelimited("Evicted user BO is not reserved\n");
520 			if (ti) {
521 				pr_warn_ratelimited("pid %d\n", ti->pid);
522 				amdgpu_vm_put_task_info(ti);
523 			}
524 
525 			return -EINVAL;
526 		}
527 
528 		r = validate(param, bo);
529 		if (r)
530 			return r;
531 
532 		amdgpu_vm_bo_invalidated(bo_base);
533 
534 		spin_lock(&vm->status_lock);
535 	}
536 	spin_unlock(&vm->status_lock);
537 
538 	amdgpu_vm_eviction_lock(vm);
539 	vm->evicting = false;
540 	amdgpu_vm_eviction_unlock(vm);
541 
542 	return 0;
543 }
544 
545 /**
546  * amdgpu_vm_ready - check VM is ready for updates
547  *
548  * @vm: VM to check
549  *
550  * Check if all VM PDs/PTs are ready for updates
551  *
552  * Returns:
553  * True if VM is not evicting.
554  */
555 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
556 {
557 	bool empty;
558 	bool ret;
559 
560 	amdgpu_vm_eviction_lock(vm);
561 	ret = !vm->evicting;
562 	amdgpu_vm_eviction_unlock(vm);
563 
564 	spin_lock(&vm->status_lock);
565 	empty = list_empty(&vm->evicted);
566 	spin_unlock(&vm->status_lock);
567 
568 	return ret && empty;
569 }
570 
571 /**
572  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
573  *
574  * @adev: amdgpu_device pointer
575  */
576 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
577 {
578 	const struct amdgpu_ip_block *ip_block;
579 	bool has_compute_vm_bug;
580 	struct amdgpu_ring *ring;
581 	int i;
582 
583 	has_compute_vm_bug = false;
584 
585 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
586 	if (ip_block) {
587 		/* Compute has a VM bug for GFX version < 7.
588 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
589 		if (ip_block->version->major <= 7)
590 			has_compute_vm_bug = true;
591 		else if (ip_block->version->major == 8)
592 			if (adev->gfx.mec_fw_version < 673)
593 				has_compute_vm_bug = true;
594 	}
595 
596 	for (i = 0; i < adev->num_rings; i++) {
597 		ring = adev->rings[i];
598 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
599 			/* only compute rings */
600 			ring->has_compute_vm_bug = has_compute_vm_bug;
601 		else
602 			ring->has_compute_vm_bug = false;
603 	}
604 }
605 
606 /**
607  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
608  *
609  * @ring: ring on which the job will be submitted
610  * @job: job to submit
611  *
612  * Returns:
613  * True if sync is needed.
614  */
615 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
616 				  struct amdgpu_job *job)
617 {
618 	struct amdgpu_device *adev = ring->adev;
619 	unsigned vmhub = ring->vm_hub;
620 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
621 
622 	if (job->vmid == 0)
623 		return false;
624 
625 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
626 		return true;
627 
628 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
629 		return true;
630 
631 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
632 		return true;
633 
634 	return false;
635 }
636 
637 /**
638  * amdgpu_vm_flush - hardware flush the vm
639  *
640  * @ring: ring to use for flush
641  * @job:  related job
642  * @need_pipe_sync: is pipe sync needed
643  *
644  * Emit a VM flush when it is necessary.
645  *
646  * Returns:
647  * 0 on success, errno otherwise.
648  */
649 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
650 		    bool need_pipe_sync)
651 {
652 	struct amdgpu_device *adev = ring->adev;
653 	unsigned vmhub = ring->vm_hub;
654 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
655 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
656 	bool spm_update_needed = job->spm_update_needed;
657 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
658 		job->gds_switch_needed;
659 	bool vm_flush_needed = job->vm_needs_flush;
660 	struct dma_fence *fence = NULL;
661 	bool pasid_mapping_needed = false;
662 	unsigned int patch;
663 	int r;
664 
665 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
666 		gds_switch_needed = true;
667 		vm_flush_needed = true;
668 		pasid_mapping_needed = true;
669 		spm_update_needed = true;
670 	}
671 
672 	mutex_lock(&id_mgr->lock);
673 	if (id->pasid != job->pasid || !id->pasid_mapping ||
674 	    !dma_fence_is_signaled(id->pasid_mapping))
675 		pasid_mapping_needed = true;
676 	mutex_unlock(&id_mgr->lock);
677 
678 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
679 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
680 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
681 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
682 		ring->funcs->emit_wreg;
683 
684 	if (adev->gfx.enable_cleaner_shader &&
685 	    ring->funcs->emit_cleaner_shader &&
686 	    job->enforce_isolation)
687 		ring->funcs->emit_cleaner_shader(ring);
688 
689 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
690 		return 0;
691 
692 	amdgpu_ring_ib_begin(ring);
693 	if (ring->funcs->init_cond_exec)
694 		patch = amdgpu_ring_init_cond_exec(ring,
695 						   ring->cond_exe_gpu_addr);
696 
697 	if (need_pipe_sync)
698 		amdgpu_ring_emit_pipeline_sync(ring);
699 
700 	if (vm_flush_needed) {
701 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
702 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
703 	}
704 
705 	if (pasid_mapping_needed)
706 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
707 
708 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
709 		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
710 
711 	if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
712 	    gds_switch_needed) {
713 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
714 					    job->gds_size, job->gws_base,
715 					    job->gws_size, job->oa_base,
716 					    job->oa_size);
717 	}
718 
719 	if (vm_flush_needed || pasid_mapping_needed) {
720 		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
721 		if (r)
722 			return r;
723 	}
724 
725 	if (vm_flush_needed) {
726 		mutex_lock(&id_mgr->lock);
727 		dma_fence_put(id->last_flush);
728 		id->last_flush = dma_fence_get(fence);
729 		id->current_gpu_reset_count =
730 			atomic_read(&adev->gpu_reset_counter);
731 		mutex_unlock(&id_mgr->lock);
732 	}
733 
734 	if (pasid_mapping_needed) {
735 		mutex_lock(&id_mgr->lock);
736 		id->pasid = job->pasid;
737 		dma_fence_put(id->pasid_mapping);
738 		id->pasid_mapping = dma_fence_get(fence);
739 		mutex_unlock(&id_mgr->lock);
740 	}
741 	dma_fence_put(fence);
742 
743 	amdgpu_ring_patch_cond_exec(ring, patch);
744 
745 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
746 	if (ring->funcs->emit_switch_buffer) {
747 		amdgpu_ring_emit_switch_buffer(ring);
748 		amdgpu_ring_emit_switch_buffer(ring);
749 	}
750 
751 	amdgpu_ring_ib_end(ring);
752 	return 0;
753 }
754 
755 /**
756  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
757  *
758  * @vm: requested vm
759  * @bo: requested buffer object
760  *
761  * Find @bo inside the requested vm.
762  * Search inside the @bos vm list for the requested vm
763  * Returns the found bo_va or NULL if none is found
764  *
765  * Object has to be reserved!
766  *
767  * Returns:
768  * Found bo_va or NULL.
769  */
770 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
771 				       struct amdgpu_bo *bo)
772 {
773 	struct amdgpu_vm_bo_base *base;
774 
775 	for (base = bo->vm_bo; base; base = base->next) {
776 		if (base->vm != vm)
777 			continue;
778 
779 		return container_of(base, struct amdgpu_bo_va, base);
780 	}
781 	return NULL;
782 }
783 
784 /**
785  * amdgpu_vm_map_gart - Resolve gart mapping of addr
786  *
787  * @pages_addr: optional DMA address to use for lookup
788  * @addr: the unmapped addr
789  *
790  * Look up the physical address of the page that the pte resolves
791  * to.
792  *
793  * Returns:
794  * The pointer for the page table entry.
795  */
796 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
797 {
798 	uint64_t result;
799 
800 	/* page table offset */
801 	result = pages_addr[addr >> PAGE_SHIFT];
802 
803 	/* in case cpu page size != gpu page size*/
804 	result |= addr & (~PAGE_MASK);
805 
806 	result &= 0xFFFFFFFFFFFFF000ULL;
807 
808 	return result;
809 }
810 
811 /**
812  * amdgpu_vm_update_pdes - make sure that all directories are valid
813  *
814  * @adev: amdgpu_device pointer
815  * @vm: requested vm
816  * @immediate: submit immediately to the paging queue
817  *
818  * Makes sure all directories are up to date.
819  *
820  * Returns:
821  * 0 for success, error for failure.
822  */
823 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
824 			  struct amdgpu_vm *vm, bool immediate)
825 {
826 	struct amdgpu_vm_update_params params;
827 	struct amdgpu_vm_bo_base *entry;
828 	bool flush_tlb_needed = false;
829 	LIST_HEAD(relocated);
830 	int r, idx;
831 
832 	spin_lock(&vm->status_lock);
833 	list_splice_init(&vm->relocated, &relocated);
834 	spin_unlock(&vm->status_lock);
835 
836 	if (list_empty(&relocated))
837 		return 0;
838 
839 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
840 		return -ENODEV;
841 
842 	memset(&params, 0, sizeof(params));
843 	params.adev = adev;
844 	params.vm = vm;
845 	params.immediate = immediate;
846 
847 	r = vm->update_funcs->prepare(&params, NULL);
848 	if (r)
849 		goto error;
850 
851 	list_for_each_entry(entry, &relocated, vm_status) {
852 		/* vm_flush_needed after updating moved PDEs */
853 		flush_tlb_needed |= entry->moved;
854 
855 		r = amdgpu_vm_pde_update(&params, entry);
856 		if (r)
857 			goto error;
858 	}
859 
860 	r = vm->update_funcs->commit(&params, &vm->last_update);
861 	if (r)
862 		goto error;
863 
864 	if (flush_tlb_needed)
865 		atomic64_inc(&vm->tlb_seq);
866 
867 	while (!list_empty(&relocated)) {
868 		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
869 					 vm_status);
870 		amdgpu_vm_bo_idle(entry);
871 	}
872 
873 error:
874 	drm_dev_exit(idx);
875 	return r;
876 }
877 
878 /**
879  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
880  * @fence: unused
881  * @cb: the callback structure
882  *
883  * Increments the tlb sequence to make sure that future CS execute a VM flush.
884  */
885 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
886 				 struct dma_fence_cb *cb)
887 {
888 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
889 
890 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
891 	atomic64_inc(&tlb_cb->vm->tlb_seq);
892 	kfree(tlb_cb);
893 }
894 
895 /**
896  * amdgpu_vm_tlb_flush - prepare TLB flush
897  *
898  * @params: parameters for update
899  * @fence: input fence to sync TLB flush with
900  * @tlb_cb: the callback structure
901  *
902  * Increments the tlb sequence to make sure that future CS execute a VM flush.
903  */
904 static void
905 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
906 		    struct dma_fence **fence,
907 		    struct amdgpu_vm_tlb_seq_struct *tlb_cb)
908 {
909 	struct amdgpu_vm *vm = params->vm;
910 
911 	tlb_cb->vm = vm;
912 	if (!fence || !*fence) {
913 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
914 		return;
915 	}
916 
917 	if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
918 				    amdgpu_vm_tlb_seq_cb)) {
919 		dma_fence_put(vm->last_tlb_flush);
920 		vm->last_tlb_flush = dma_fence_get(*fence);
921 	} else {
922 		amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
923 	}
924 
925 	/* Prepare a TLB flush fence to be attached to PTs */
926 	if (!params->unlocked && vm->is_compute_context) {
927 		amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
928 
929 		/* Makes sure no PD/PT is freed before the flush */
930 		dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
931 				   DMA_RESV_USAGE_BOOKKEEP);
932 	}
933 }
934 
935 /**
936  * amdgpu_vm_update_range - update a range in the vm page table
937  *
938  * @adev: amdgpu_device pointer to use for commands
939  * @vm: the VM to update the range
940  * @immediate: immediate submission in a page fault
941  * @unlocked: unlocked invalidation during MM callback
942  * @flush_tlb: trigger tlb invalidation after update completed
943  * @allow_override: change MTYPE for local NUMA nodes
944  * @sync: fences we need to sync to
945  * @start: start of mapped range
946  * @last: last mapped entry
947  * @flags: flags for the entries
948  * @offset: offset into nodes and pages_addr
949  * @vram_base: base for vram mappings
950  * @res: ttm_resource to map
951  * @pages_addr: DMA addresses to use for mapping
952  * @fence: optional resulting fence
953  *
954  * Fill in the page table entries between @start and @last.
955  *
956  * Returns:
957  * 0 for success, negative erro code for failure.
958  */
959 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
960 			   bool immediate, bool unlocked, bool flush_tlb,
961 			   bool allow_override, struct amdgpu_sync *sync,
962 			   uint64_t start, uint64_t last, uint64_t flags,
963 			   uint64_t offset, uint64_t vram_base,
964 			   struct ttm_resource *res, dma_addr_t *pages_addr,
965 			   struct dma_fence **fence)
966 {
967 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
968 	struct amdgpu_vm_update_params params;
969 	struct amdgpu_res_cursor cursor;
970 	int r, idx;
971 
972 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
973 		return -ENODEV;
974 
975 	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
976 	if (!tlb_cb) {
977 		drm_dev_exit(idx);
978 		return -ENOMEM;
979 	}
980 
981 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
982 	 * heavy-weight flush TLB unconditionally.
983 	 */
984 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
985 		     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
986 
987 	/*
988 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
989 	 */
990 	flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
991 
992 	memset(&params, 0, sizeof(params));
993 	params.adev = adev;
994 	params.vm = vm;
995 	params.immediate = immediate;
996 	params.pages_addr = pages_addr;
997 	params.unlocked = unlocked;
998 	params.needs_flush = flush_tlb;
999 	params.allow_override = allow_override;
1000 	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
1001 
1002 	amdgpu_vm_eviction_lock(vm);
1003 	if (vm->evicting) {
1004 		r = -EBUSY;
1005 		goto error_free;
1006 	}
1007 
1008 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1009 		struct dma_fence *tmp = dma_fence_get_stub();
1010 
1011 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1012 		swap(vm->last_unlocked, tmp);
1013 		dma_fence_put(tmp);
1014 	}
1015 
1016 	r = vm->update_funcs->prepare(&params, sync);
1017 	if (r)
1018 		goto error_free;
1019 
1020 	amdgpu_res_first(pages_addr ? NULL : res, offset,
1021 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1022 	while (cursor.remaining) {
1023 		uint64_t tmp, num_entries, addr;
1024 
1025 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1026 		if (pages_addr) {
1027 			bool contiguous = true;
1028 
1029 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1030 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
1031 				uint64_t count;
1032 
1033 				contiguous = pages_addr[pfn + 1] ==
1034 					pages_addr[pfn] + PAGE_SIZE;
1035 
1036 				tmp = num_entries /
1037 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1038 				for (count = 2; count < tmp; ++count) {
1039 					uint64_t idx = pfn + count;
1040 
1041 					if (contiguous != (pages_addr[idx] ==
1042 					    pages_addr[idx - 1] + PAGE_SIZE))
1043 						break;
1044 				}
1045 				if (!contiguous)
1046 					count--;
1047 				num_entries = count *
1048 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1049 			}
1050 
1051 			if (!contiguous) {
1052 				addr = cursor.start;
1053 				params.pages_addr = pages_addr;
1054 			} else {
1055 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1056 				params.pages_addr = NULL;
1057 			}
1058 
1059 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1060 			addr = vram_base + cursor.start;
1061 		} else {
1062 			addr = 0;
1063 		}
1064 
1065 		tmp = start + num_entries;
1066 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
1067 		if (r)
1068 			goto error_free;
1069 
1070 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1071 		start = tmp;
1072 	}
1073 
1074 	r = vm->update_funcs->commit(&params, fence);
1075 	if (r)
1076 		goto error_free;
1077 
1078 	if (params.needs_flush) {
1079 		amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
1080 		tlb_cb = NULL;
1081 	}
1082 
1083 	amdgpu_vm_pt_free_list(adev, &params);
1084 
1085 error_free:
1086 	kfree(tlb_cb);
1087 	amdgpu_vm_eviction_unlock(vm);
1088 	drm_dev_exit(idx);
1089 	return r;
1090 }
1091 
1092 static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
1093 				    struct amdgpu_mem_stats *stats)
1094 {
1095 	struct amdgpu_vm *vm = bo_va->base.vm;
1096 	struct amdgpu_bo *bo = bo_va->base.bo;
1097 
1098 	if (!bo)
1099 		return;
1100 
1101 	/*
1102 	 * For now ignore BOs which are currently locked and potentially
1103 	 * changing their location.
1104 	 */
1105 	if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
1106 	    !dma_resv_trylock(bo->tbo.base.resv))
1107 		return;
1108 
1109 	amdgpu_bo_get_memory(bo, stats);
1110 	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
1111 		dma_resv_unlock(bo->tbo.base.resv);
1112 }
1113 
1114 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1115 			  struct amdgpu_mem_stats *stats)
1116 {
1117 	struct amdgpu_bo_va *bo_va, *tmp;
1118 
1119 	spin_lock(&vm->status_lock);
1120 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
1121 		amdgpu_vm_bo_get_memory(bo_va, stats);
1122 
1123 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
1124 		amdgpu_vm_bo_get_memory(bo_va, stats);
1125 
1126 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
1127 		amdgpu_vm_bo_get_memory(bo_va, stats);
1128 
1129 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
1130 		amdgpu_vm_bo_get_memory(bo_va, stats);
1131 
1132 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
1133 		amdgpu_vm_bo_get_memory(bo_va, stats);
1134 
1135 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
1136 		amdgpu_vm_bo_get_memory(bo_va, stats);
1137 	spin_unlock(&vm->status_lock);
1138 }
1139 
1140 /**
1141  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1142  *
1143  * @adev: amdgpu_device pointer
1144  * @bo_va: requested BO and VM object
1145  * @clear: if true clear the entries
1146  *
1147  * Fill in the page table entries for @bo_va.
1148  *
1149  * Returns:
1150  * 0 for success, -EINVAL for failure.
1151  */
1152 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1153 			bool clear)
1154 {
1155 	struct amdgpu_bo *bo = bo_va->base.bo;
1156 	struct amdgpu_vm *vm = bo_va->base.vm;
1157 	struct amdgpu_bo_va_mapping *mapping;
1158 	struct dma_fence **last_update;
1159 	dma_addr_t *pages_addr = NULL;
1160 	struct ttm_resource *mem;
1161 	struct amdgpu_sync sync;
1162 	bool flush_tlb = clear;
1163 	uint64_t vram_base;
1164 	uint64_t flags;
1165 	bool uncached;
1166 	int r;
1167 
1168 	amdgpu_sync_create(&sync);
1169 	if (clear || !bo) {
1170 		mem = NULL;
1171 
1172 		/* Implicitly sync to command submissions in the same VM before
1173 		 * unmapping.
1174 		 */
1175 		r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1176 				     AMDGPU_SYNC_EQ_OWNER, vm);
1177 		if (r)
1178 			goto error_free;
1179 	} else {
1180 		struct drm_gem_object *obj = &bo->tbo.base;
1181 
1182 		if (obj->import_attach && bo_va->is_xgmi) {
1183 			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1184 			struct drm_gem_object *gobj = dma_buf->priv;
1185 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1186 
1187 			if (abo->tbo.resource &&
1188 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1189 				bo = gem_to_amdgpu_bo(gobj);
1190 		}
1191 		mem = bo->tbo.resource;
1192 		if (mem && (mem->mem_type == TTM_PL_TT ||
1193 			    mem->mem_type == AMDGPU_PL_PREEMPT))
1194 			pages_addr = bo->tbo.ttm->dma_address;
1195 
1196 		/* Implicitly sync to moving fences before mapping anything */
1197 		r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1198 				     AMDGPU_SYNC_EXPLICIT, vm);
1199 		if (r)
1200 			goto error_free;
1201 	}
1202 
1203 	if (bo) {
1204 		struct amdgpu_device *bo_adev;
1205 
1206 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1207 
1208 		if (amdgpu_bo_encrypted(bo))
1209 			flags |= AMDGPU_PTE_TMZ;
1210 
1211 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1212 		vram_base = bo_adev->vm_manager.vram_base_offset;
1213 		uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1214 	} else {
1215 		flags = 0x0;
1216 		vram_base = 0;
1217 		uncached = false;
1218 	}
1219 
1220 	if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1221 		last_update = &vm->last_update;
1222 	else
1223 		last_update = &bo_va->last_pt_update;
1224 
1225 	if (!clear && bo_va->base.moved) {
1226 		flush_tlb = true;
1227 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1228 
1229 	} else if (bo_va->cleared != clear) {
1230 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1231 	}
1232 
1233 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1234 		uint64_t update_flags = flags;
1235 
1236 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1237 		 * but in case of something, we filter the flags in first place
1238 		 */
1239 		if (!(mapping->flags & AMDGPU_PTE_READABLE))
1240 			update_flags &= ~AMDGPU_PTE_READABLE;
1241 		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1242 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1243 
1244 		/* Apply ASIC specific mapping flags */
1245 		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1246 
1247 		trace_amdgpu_vm_bo_update(mapping);
1248 
1249 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1250 					   !uncached, &sync, mapping->start,
1251 					   mapping->last, update_flags,
1252 					   mapping->offset, vram_base, mem,
1253 					   pages_addr, last_update);
1254 		if (r)
1255 			goto error_free;
1256 	}
1257 
1258 	/* If the BO is not in its preferred location add it back to
1259 	 * the evicted list so that it gets validated again on the
1260 	 * next command submission.
1261 	 */
1262 	if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1263 		uint32_t mem_type = bo->tbo.resource->mem_type;
1264 
1265 		if (!(bo->preferred_domains &
1266 		      amdgpu_mem_type_to_domain(mem_type)))
1267 			amdgpu_vm_bo_evicted(&bo_va->base);
1268 		else
1269 			amdgpu_vm_bo_idle(&bo_va->base);
1270 	} else {
1271 		amdgpu_vm_bo_done(&bo_va->base);
1272 	}
1273 
1274 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1275 	bo_va->cleared = clear;
1276 	bo_va->base.moved = false;
1277 
1278 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1279 		list_for_each_entry(mapping, &bo_va->valids, list)
1280 			trace_amdgpu_vm_bo_mapping(mapping);
1281 	}
1282 
1283 error_free:
1284 	amdgpu_sync_free(&sync);
1285 	return r;
1286 }
1287 
1288 /**
1289  * amdgpu_vm_update_prt_state - update the global PRT state
1290  *
1291  * @adev: amdgpu_device pointer
1292  */
1293 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1294 {
1295 	unsigned long flags;
1296 	bool enable;
1297 
1298 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1299 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1300 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1301 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1302 }
1303 
1304 /**
1305  * amdgpu_vm_prt_get - add a PRT user
1306  *
1307  * @adev: amdgpu_device pointer
1308  */
1309 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1310 {
1311 	if (!adev->gmc.gmc_funcs->set_prt)
1312 		return;
1313 
1314 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1315 		amdgpu_vm_update_prt_state(adev);
1316 }
1317 
1318 /**
1319  * amdgpu_vm_prt_put - drop a PRT user
1320  *
1321  * @adev: amdgpu_device pointer
1322  */
1323 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1324 {
1325 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1326 		amdgpu_vm_update_prt_state(adev);
1327 }
1328 
1329 /**
1330  * amdgpu_vm_prt_cb - callback for updating the PRT status
1331  *
1332  * @fence: fence for the callback
1333  * @_cb: the callback function
1334  */
1335 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1336 {
1337 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1338 
1339 	amdgpu_vm_prt_put(cb->adev);
1340 	kfree(cb);
1341 }
1342 
1343 /**
1344  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1345  *
1346  * @adev: amdgpu_device pointer
1347  * @fence: fence for the callback
1348  */
1349 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1350 				 struct dma_fence *fence)
1351 {
1352 	struct amdgpu_prt_cb *cb;
1353 
1354 	if (!adev->gmc.gmc_funcs->set_prt)
1355 		return;
1356 
1357 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1358 	if (!cb) {
1359 		/* Last resort when we are OOM */
1360 		if (fence)
1361 			dma_fence_wait(fence, false);
1362 
1363 		amdgpu_vm_prt_put(adev);
1364 	} else {
1365 		cb->adev = adev;
1366 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1367 						     amdgpu_vm_prt_cb))
1368 			amdgpu_vm_prt_cb(fence, &cb->cb);
1369 	}
1370 }
1371 
1372 /**
1373  * amdgpu_vm_free_mapping - free a mapping
1374  *
1375  * @adev: amdgpu_device pointer
1376  * @vm: requested vm
1377  * @mapping: mapping to be freed
1378  * @fence: fence of the unmap operation
1379  *
1380  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1381  */
1382 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1383 				   struct amdgpu_vm *vm,
1384 				   struct amdgpu_bo_va_mapping *mapping,
1385 				   struct dma_fence *fence)
1386 {
1387 	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1388 		amdgpu_vm_add_prt_cb(adev, fence);
1389 	kfree(mapping);
1390 }
1391 
1392 /**
1393  * amdgpu_vm_prt_fini - finish all prt mappings
1394  *
1395  * @adev: amdgpu_device pointer
1396  * @vm: requested vm
1397  *
1398  * Register a cleanup callback to disable PRT support after VM dies.
1399  */
1400 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1401 {
1402 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1403 	struct dma_resv_iter cursor;
1404 	struct dma_fence *fence;
1405 
1406 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1407 		/* Add a callback for each fence in the reservation object */
1408 		amdgpu_vm_prt_get(adev);
1409 		amdgpu_vm_add_prt_cb(adev, fence);
1410 	}
1411 }
1412 
1413 /**
1414  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1415  *
1416  * @adev: amdgpu_device pointer
1417  * @vm: requested vm
1418  * @fence: optional resulting fence (unchanged if no work needed to be done
1419  * or if an error occurred)
1420  *
1421  * Make sure all freed BOs are cleared in the PT.
1422  * PTs have to be reserved and mutex must be locked!
1423  *
1424  * Returns:
1425  * 0 for success.
1426  *
1427  */
1428 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1429 			  struct amdgpu_vm *vm,
1430 			  struct dma_fence **fence)
1431 {
1432 	struct amdgpu_bo_va_mapping *mapping;
1433 	struct dma_fence *f = NULL;
1434 	struct amdgpu_sync sync;
1435 	int r;
1436 
1437 
1438 	/*
1439 	 * Implicitly sync to command submissions in the same VM before
1440 	 * unmapping.
1441 	 */
1442 	amdgpu_sync_create(&sync);
1443 	r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1444 			     AMDGPU_SYNC_EQ_OWNER, vm);
1445 	if (r)
1446 		goto error_free;
1447 
1448 	while (!list_empty(&vm->freed)) {
1449 		mapping = list_first_entry(&vm->freed,
1450 			struct amdgpu_bo_va_mapping, list);
1451 		list_del(&mapping->list);
1452 
1453 		r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1454 					   &sync, mapping->start, mapping->last,
1455 					   0, 0, 0, NULL, NULL, &f);
1456 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1457 		if (r) {
1458 			dma_fence_put(f);
1459 			goto error_free;
1460 		}
1461 	}
1462 
1463 	if (fence && f) {
1464 		dma_fence_put(*fence);
1465 		*fence = f;
1466 	} else {
1467 		dma_fence_put(f);
1468 	}
1469 
1470 error_free:
1471 	amdgpu_sync_free(&sync);
1472 	return r;
1473 
1474 }
1475 
1476 /**
1477  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1478  *
1479  * @adev: amdgpu_device pointer
1480  * @vm: requested vm
1481  * @ticket: optional reservation ticket used to reserve the VM
1482  *
1483  * Make sure all BOs which are moved are updated in the PTs.
1484  *
1485  * Returns:
1486  * 0 for success.
1487  *
1488  * PTs have to be reserved!
1489  */
1490 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1491 			   struct amdgpu_vm *vm,
1492 			   struct ww_acquire_ctx *ticket)
1493 {
1494 	struct amdgpu_bo_va *bo_va;
1495 	struct dma_resv *resv;
1496 	bool clear, unlock;
1497 	int r;
1498 
1499 	spin_lock(&vm->status_lock);
1500 	while (!list_empty(&vm->moved)) {
1501 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1502 					 base.vm_status);
1503 		spin_unlock(&vm->status_lock);
1504 
1505 		/* Per VM BOs never need to bo cleared in the page tables */
1506 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1507 		if (r)
1508 			return r;
1509 		spin_lock(&vm->status_lock);
1510 	}
1511 
1512 	while (!list_empty(&vm->invalidated)) {
1513 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1514 					 base.vm_status);
1515 		resv = bo_va->base.bo->tbo.base.resv;
1516 		spin_unlock(&vm->status_lock);
1517 
1518 		/* Try to reserve the BO to avoid clearing its ptes */
1519 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
1520 			clear = false;
1521 			unlock = true;
1522 		/* The caller is already holding the reservation lock */
1523 		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1524 			clear = false;
1525 			unlock = false;
1526 		/* Somebody else is using the BO right now */
1527 		} else {
1528 			clear = true;
1529 			unlock = false;
1530 		}
1531 
1532 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1533 
1534 		if (unlock)
1535 			dma_resv_unlock(resv);
1536 		if (r)
1537 			return r;
1538 
1539 		/* Remember evicted DMABuf imports in compute VMs for later
1540 		 * validation
1541 		 */
1542 		if (vm->is_compute_context &&
1543 		    bo_va->base.bo->tbo.base.import_attach &&
1544 		    (!bo_va->base.bo->tbo.resource ||
1545 		     bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1546 			amdgpu_vm_bo_evicted_user(&bo_va->base);
1547 
1548 		spin_lock(&vm->status_lock);
1549 	}
1550 	spin_unlock(&vm->status_lock);
1551 
1552 	return 0;
1553 }
1554 
1555 /**
1556  * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1557  *
1558  * @adev: amdgpu_device pointer
1559  * @vm: requested vm
1560  * @flush_type: flush type
1561  * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1562  *
1563  * Flush TLB if needed for a compute VM.
1564  *
1565  * Returns:
1566  * 0 for success.
1567  */
1568 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1569 				struct amdgpu_vm *vm,
1570 				uint32_t flush_type,
1571 				uint32_t xcc_mask)
1572 {
1573 	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1574 	bool all_hub = false;
1575 	int xcc = 0, r = 0;
1576 
1577 	WARN_ON_ONCE(!vm->is_compute_context);
1578 
1579 	/*
1580 	 * It can be that we race and lose here, but that is extremely unlikely
1581 	 * and the worst thing which could happen is that we flush the changes
1582 	 * into the TLB once more which is harmless.
1583 	 */
1584 	if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1585 		return 0;
1586 
1587 	if (adev->family == AMDGPU_FAMILY_AI ||
1588 	    adev->family == AMDGPU_FAMILY_RV)
1589 		all_hub = true;
1590 
1591 	for_each_inst(xcc, xcc_mask) {
1592 		r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1593 						   all_hub, xcc);
1594 		if (r)
1595 			break;
1596 	}
1597 	return r;
1598 }
1599 
1600 /**
1601  * amdgpu_vm_bo_add - add a bo to a specific vm
1602  *
1603  * @adev: amdgpu_device pointer
1604  * @vm: requested vm
1605  * @bo: amdgpu buffer object
1606  *
1607  * Add @bo into the requested vm.
1608  * Add @bo to the list of bos associated with the vm
1609  *
1610  * Returns:
1611  * Newly added bo_va or NULL for failure
1612  *
1613  * Object has to be reserved!
1614  */
1615 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1616 				      struct amdgpu_vm *vm,
1617 				      struct amdgpu_bo *bo)
1618 {
1619 	struct amdgpu_bo_va *bo_va;
1620 
1621 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1622 	if (bo_va == NULL) {
1623 		return NULL;
1624 	}
1625 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1626 
1627 	bo_va->ref_count = 1;
1628 	bo_va->last_pt_update = dma_fence_get_stub();
1629 	INIT_LIST_HEAD(&bo_va->valids);
1630 	INIT_LIST_HEAD(&bo_va->invalids);
1631 
1632 	if (!bo)
1633 		return bo_va;
1634 
1635 	dma_resv_assert_held(bo->tbo.base.resv);
1636 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1637 		bo_va->is_xgmi = true;
1638 		/* Power up XGMI if it can be potentially used */
1639 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1640 	}
1641 
1642 	return bo_va;
1643 }
1644 
1645 
1646 /**
1647  * amdgpu_vm_bo_insert_map - insert a new mapping
1648  *
1649  * @adev: amdgpu_device pointer
1650  * @bo_va: bo_va to store the address
1651  * @mapping: the mapping to insert
1652  *
1653  * Insert a new mapping into all structures.
1654  */
1655 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1656 				    struct amdgpu_bo_va *bo_va,
1657 				    struct amdgpu_bo_va_mapping *mapping)
1658 {
1659 	struct amdgpu_vm *vm = bo_va->base.vm;
1660 	struct amdgpu_bo *bo = bo_va->base.bo;
1661 
1662 	mapping->bo_va = bo_va;
1663 	list_add(&mapping->list, &bo_va->invalids);
1664 	amdgpu_vm_it_insert(mapping, &vm->va);
1665 
1666 	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1667 		amdgpu_vm_prt_get(adev);
1668 
1669 	if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1670 		amdgpu_vm_bo_moved(&bo_va->base);
1671 
1672 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1673 }
1674 
1675 /* Validate operation parameters to prevent potential abuse */
1676 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1677 					  struct amdgpu_bo *bo,
1678 					  uint64_t saddr,
1679 					  uint64_t offset,
1680 					  uint64_t size)
1681 {
1682 	uint64_t tmp, lpfn;
1683 
1684 	if (saddr & AMDGPU_GPU_PAGE_MASK
1685 	    || offset & AMDGPU_GPU_PAGE_MASK
1686 	    || size & AMDGPU_GPU_PAGE_MASK)
1687 		return -EINVAL;
1688 
1689 	if (check_add_overflow(saddr, size, &tmp)
1690 	    || check_add_overflow(offset, size, &tmp)
1691 	    || size == 0 /* which also leads to end < begin */)
1692 		return -EINVAL;
1693 
1694 	/* make sure object fit at this offset */
1695 	if (bo && offset + size > amdgpu_bo_size(bo))
1696 		return -EINVAL;
1697 
1698 	/* Ensure last pfn not exceed max_pfn */
1699 	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1700 	if (lpfn >= adev->vm_manager.max_pfn)
1701 		return -EINVAL;
1702 
1703 	return 0;
1704 }
1705 
1706 /**
1707  * amdgpu_vm_bo_map - map bo inside a vm
1708  *
1709  * @adev: amdgpu_device pointer
1710  * @bo_va: bo_va to store the address
1711  * @saddr: where to map the BO
1712  * @offset: requested offset in the BO
1713  * @size: BO size in bytes
1714  * @flags: attributes of pages (read/write/valid/etc.)
1715  *
1716  * Add a mapping of the BO at the specefied addr into the VM.
1717  *
1718  * Returns:
1719  * 0 for success, error for failure.
1720  *
1721  * Object has to be reserved and unreserved outside!
1722  */
1723 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1724 		     struct amdgpu_bo_va *bo_va,
1725 		     uint64_t saddr, uint64_t offset,
1726 		     uint64_t size, uint64_t flags)
1727 {
1728 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1729 	struct amdgpu_bo *bo = bo_va->base.bo;
1730 	struct amdgpu_vm *vm = bo_va->base.vm;
1731 	uint64_t eaddr;
1732 	int r;
1733 
1734 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1735 	if (r)
1736 		return r;
1737 
1738 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1739 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1740 
1741 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1742 	if (tmp) {
1743 		/* bo and tmp overlap, invalid addr */
1744 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1745 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1746 			tmp->start, tmp->last + 1);
1747 		return -EINVAL;
1748 	}
1749 
1750 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1751 	if (!mapping)
1752 		return -ENOMEM;
1753 
1754 	mapping->start = saddr;
1755 	mapping->last = eaddr;
1756 	mapping->offset = offset;
1757 	mapping->flags = flags;
1758 
1759 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1760 
1761 	return 0;
1762 }
1763 
1764 /**
1765  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1766  *
1767  * @adev: amdgpu_device pointer
1768  * @bo_va: bo_va to store the address
1769  * @saddr: where to map the BO
1770  * @offset: requested offset in the BO
1771  * @size: BO size in bytes
1772  * @flags: attributes of pages (read/write/valid/etc.)
1773  *
1774  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1775  * mappings as we do so.
1776  *
1777  * Returns:
1778  * 0 for success, error for failure.
1779  *
1780  * Object has to be reserved and unreserved outside!
1781  */
1782 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1783 			     struct amdgpu_bo_va *bo_va,
1784 			     uint64_t saddr, uint64_t offset,
1785 			     uint64_t size, uint64_t flags)
1786 {
1787 	struct amdgpu_bo_va_mapping *mapping;
1788 	struct amdgpu_bo *bo = bo_va->base.bo;
1789 	uint64_t eaddr;
1790 	int r;
1791 
1792 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1793 	if (r)
1794 		return r;
1795 
1796 	/* Allocate all the needed memory */
1797 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1798 	if (!mapping)
1799 		return -ENOMEM;
1800 
1801 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1802 	if (r) {
1803 		kfree(mapping);
1804 		return r;
1805 	}
1806 
1807 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1808 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1809 
1810 	mapping->start = saddr;
1811 	mapping->last = eaddr;
1812 	mapping->offset = offset;
1813 	mapping->flags = flags;
1814 
1815 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1816 
1817 	return 0;
1818 }
1819 
1820 /**
1821  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1822  *
1823  * @adev: amdgpu_device pointer
1824  * @bo_va: bo_va to remove the address from
1825  * @saddr: where to the BO is mapped
1826  *
1827  * Remove a mapping of the BO at the specefied addr from the VM.
1828  *
1829  * Returns:
1830  * 0 for success, error for failure.
1831  *
1832  * Object has to be reserved and unreserved outside!
1833  */
1834 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1835 		       struct amdgpu_bo_va *bo_va,
1836 		       uint64_t saddr)
1837 {
1838 	struct amdgpu_bo_va_mapping *mapping;
1839 	struct amdgpu_vm *vm = bo_va->base.vm;
1840 	bool valid = true;
1841 
1842 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1843 
1844 	list_for_each_entry(mapping, &bo_va->valids, list) {
1845 		if (mapping->start == saddr)
1846 			break;
1847 	}
1848 
1849 	if (&mapping->list == &bo_va->valids) {
1850 		valid = false;
1851 
1852 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1853 			if (mapping->start == saddr)
1854 				break;
1855 		}
1856 
1857 		if (&mapping->list == &bo_va->invalids)
1858 			return -ENOENT;
1859 	}
1860 
1861 	list_del(&mapping->list);
1862 	amdgpu_vm_it_remove(mapping, &vm->va);
1863 	mapping->bo_va = NULL;
1864 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1865 
1866 	if (valid)
1867 		list_add(&mapping->list, &vm->freed);
1868 	else
1869 		amdgpu_vm_free_mapping(adev, vm, mapping,
1870 				       bo_va->last_pt_update);
1871 
1872 	return 0;
1873 }
1874 
1875 /**
1876  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1877  *
1878  * @adev: amdgpu_device pointer
1879  * @vm: VM structure to use
1880  * @saddr: start of the range
1881  * @size: size of the range
1882  *
1883  * Remove all mappings in a range, split them as appropriate.
1884  *
1885  * Returns:
1886  * 0 for success, error for failure.
1887  */
1888 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1889 				struct amdgpu_vm *vm,
1890 				uint64_t saddr, uint64_t size)
1891 {
1892 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1893 	LIST_HEAD(removed);
1894 	uint64_t eaddr;
1895 	int r;
1896 
1897 	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
1898 	if (r)
1899 		return r;
1900 
1901 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1902 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1903 
1904 	/* Allocate all the needed memory */
1905 	before = kzalloc(sizeof(*before), GFP_KERNEL);
1906 	if (!before)
1907 		return -ENOMEM;
1908 	INIT_LIST_HEAD(&before->list);
1909 
1910 	after = kzalloc(sizeof(*after), GFP_KERNEL);
1911 	if (!after) {
1912 		kfree(before);
1913 		return -ENOMEM;
1914 	}
1915 	INIT_LIST_HEAD(&after->list);
1916 
1917 	/* Now gather all removed mappings */
1918 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1919 	while (tmp) {
1920 		/* Remember mapping split at the start */
1921 		if (tmp->start < saddr) {
1922 			before->start = tmp->start;
1923 			before->last = saddr - 1;
1924 			before->offset = tmp->offset;
1925 			before->flags = tmp->flags;
1926 			before->bo_va = tmp->bo_va;
1927 			list_add(&before->list, &tmp->bo_va->invalids);
1928 		}
1929 
1930 		/* Remember mapping split at the end */
1931 		if (tmp->last > eaddr) {
1932 			after->start = eaddr + 1;
1933 			after->last = tmp->last;
1934 			after->offset = tmp->offset;
1935 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
1936 			after->flags = tmp->flags;
1937 			after->bo_va = tmp->bo_va;
1938 			list_add(&after->list, &tmp->bo_va->invalids);
1939 		}
1940 
1941 		list_del(&tmp->list);
1942 		list_add(&tmp->list, &removed);
1943 
1944 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1945 	}
1946 
1947 	/* And free them up */
1948 	list_for_each_entry_safe(tmp, next, &removed, list) {
1949 		amdgpu_vm_it_remove(tmp, &vm->va);
1950 		list_del(&tmp->list);
1951 
1952 		if (tmp->start < saddr)
1953 		    tmp->start = saddr;
1954 		if (tmp->last > eaddr)
1955 		    tmp->last = eaddr;
1956 
1957 		tmp->bo_va = NULL;
1958 		list_add(&tmp->list, &vm->freed);
1959 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
1960 	}
1961 
1962 	/* Insert partial mapping before the range */
1963 	if (!list_empty(&before->list)) {
1964 		struct amdgpu_bo *bo = before->bo_va->base.bo;
1965 
1966 		amdgpu_vm_it_insert(before, &vm->va);
1967 		if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
1968 			amdgpu_vm_prt_get(adev);
1969 
1970 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
1971 		    !before->bo_va->base.moved)
1972 			amdgpu_vm_bo_moved(&before->bo_va->base);
1973 	} else {
1974 		kfree(before);
1975 	}
1976 
1977 	/* Insert partial mapping after the range */
1978 	if (!list_empty(&after->list)) {
1979 		struct amdgpu_bo *bo = after->bo_va->base.bo;
1980 
1981 		amdgpu_vm_it_insert(after, &vm->va);
1982 		if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
1983 			amdgpu_vm_prt_get(adev);
1984 
1985 		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
1986 		    !after->bo_va->base.moved)
1987 			amdgpu_vm_bo_moved(&after->bo_va->base);
1988 	} else {
1989 		kfree(after);
1990 	}
1991 
1992 	return 0;
1993 }
1994 
1995 /**
1996  * amdgpu_vm_bo_lookup_mapping - find mapping by address
1997  *
1998  * @vm: the requested VM
1999  * @addr: the address
2000  *
2001  * Find a mapping by it's address.
2002  *
2003  * Returns:
2004  * The amdgpu_bo_va_mapping matching for addr or NULL
2005  *
2006  */
2007 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2008 							 uint64_t addr)
2009 {
2010 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2011 }
2012 
2013 /**
2014  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2015  *
2016  * @vm: the requested vm
2017  * @ticket: CS ticket
2018  *
2019  * Trace all mappings of BOs reserved during a command submission.
2020  */
2021 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2022 {
2023 	struct amdgpu_bo_va_mapping *mapping;
2024 
2025 	if (!trace_amdgpu_vm_bo_cs_enabled())
2026 		return;
2027 
2028 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2029 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2030 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2031 			struct amdgpu_bo *bo;
2032 
2033 			bo = mapping->bo_va->base.bo;
2034 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2035 			    ticket)
2036 				continue;
2037 		}
2038 
2039 		trace_amdgpu_vm_bo_cs(mapping);
2040 	}
2041 }
2042 
2043 /**
2044  * amdgpu_vm_bo_del - remove a bo from a specific vm
2045  *
2046  * @adev: amdgpu_device pointer
2047  * @bo_va: requested bo_va
2048  *
2049  * Remove @bo_va->bo from the requested vm.
2050  *
2051  * Object have to be reserved!
2052  */
2053 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2054 		      struct amdgpu_bo_va *bo_va)
2055 {
2056 	struct amdgpu_bo_va_mapping *mapping, *next;
2057 	struct amdgpu_bo *bo = bo_va->base.bo;
2058 	struct amdgpu_vm *vm = bo_va->base.vm;
2059 	struct amdgpu_vm_bo_base **base;
2060 
2061 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2062 
2063 	if (bo) {
2064 		dma_resv_assert_held(bo->tbo.base.resv);
2065 		if (amdgpu_vm_is_bo_always_valid(vm, bo))
2066 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
2067 
2068 		for (base = &bo_va->base.bo->vm_bo; *base;
2069 		     base = &(*base)->next) {
2070 			if (*base != &bo_va->base)
2071 				continue;
2072 
2073 			*base = bo_va->base.next;
2074 			break;
2075 		}
2076 	}
2077 
2078 	spin_lock(&vm->status_lock);
2079 	list_del(&bo_va->base.vm_status);
2080 	spin_unlock(&vm->status_lock);
2081 
2082 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2083 		list_del(&mapping->list);
2084 		amdgpu_vm_it_remove(mapping, &vm->va);
2085 		mapping->bo_va = NULL;
2086 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2087 		list_add(&mapping->list, &vm->freed);
2088 	}
2089 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2090 		list_del(&mapping->list);
2091 		amdgpu_vm_it_remove(mapping, &vm->va);
2092 		amdgpu_vm_free_mapping(adev, vm, mapping,
2093 				       bo_va->last_pt_update);
2094 	}
2095 
2096 	dma_fence_put(bo_va->last_pt_update);
2097 
2098 	if (bo && bo_va->is_xgmi)
2099 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2100 
2101 	kfree(bo_va);
2102 }
2103 
2104 /**
2105  * amdgpu_vm_evictable - check if we can evict a VM
2106  *
2107  * @bo: A page table of the VM.
2108  *
2109  * Check if it is possible to evict a VM.
2110  */
2111 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2112 {
2113 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2114 
2115 	/* Page tables of a destroyed VM can go away immediately */
2116 	if (!bo_base || !bo_base->vm)
2117 		return true;
2118 
2119 	/* Don't evict VM page tables while they are busy */
2120 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2121 		return false;
2122 
2123 	/* Try to block ongoing updates */
2124 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2125 		return false;
2126 
2127 	/* Don't evict VM page tables while they are updated */
2128 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2129 		amdgpu_vm_eviction_unlock(bo_base->vm);
2130 		return false;
2131 	}
2132 
2133 	bo_base->vm->evicting = true;
2134 	amdgpu_vm_eviction_unlock(bo_base->vm);
2135 	return true;
2136 }
2137 
2138 /**
2139  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2140  *
2141  * @adev: amdgpu_device pointer
2142  * @bo: amdgpu buffer object
2143  * @evicted: is the BO evicted
2144  *
2145  * Mark @bo as invalid.
2146  */
2147 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2148 			     struct amdgpu_bo *bo, bool evicted)
2149 {
2150 	struct amdgpu_vm_bo_base *bo_base;
2151 
2152 	/* shadow bo doesn't have bo base, its validation needs its parent */
2153 	if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
2154 		bo = bo->parent;
2155 
2156 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2157 		struct amdgpu_vm *vm = bo_base->vm;
2158 
2159 		if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2160 			amdgpu_vm_bo_evicted(bo_base);
2161 			continue;
2162 		}
2163 
2164 		if (bo_base->moved)
2165 			continue;
2166 		bo_base->moved = true;
2167 
2168 		if (bo->tbo.type == ttm_bo_type_kernel)
2169 			amdgpu_vm_bo_relocated(bo_base);
2170 		else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2171 			amdgpu_vm_bo_moved(bo_base);
2172 		else
2173 			amdgpu_vm_bo_invalidated(bo_base);
2174 	}
2175 }
2176 
2177 /**
2178  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2179  *
2180  * @vm_size: VM size
2181  *
2182  * Returns:
2183  * VM page table as power of two
2184  */
2185 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2186 {
2187 	/* Total bits covered by PD + PTs */
2188 	unsigned bits = ilog2(vm_size) + 18;
2189 
2190 	/* Make sure the PD is 4K in size up to 8GB address space.
2191 	   Above that split equal between PD and PTs */
2192 	if (vm_size <= 8)
2193 		return (bits - 9);
2194 	else
2195 		return ((bits + 3) / 2);
2196 }
2197 
2198 /**
2199  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2200  *
2201  * @adev: amdgpu_device pointer
2202  * @min_vm_size: the minimum vm size in GB if it's set auto
2203  * @fragment_size_default: Default PTE fragment size
2204  * @max_level: max VMPT level
2205  * @max_bits: max address space size in bits
2206  *
2207  */
2208 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2209 			   uint32_t fragment_size_default, unsigned max_level,
2210 			   unsigned max_bits)
2211 {
2212 	unsigned int max_size = 1 << (max_bits - 30);
2213 	unsigned int vm_size;
2214 	uint64_t tmp;
2215 
2216 	/* adjust vm size first */
2217 	if (amdgpu_vm_size != -1) {
2218 		vm_size = amdgpu_vm_size;
2219 		if (vm_size > max_size) {
2220 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2221 				 amdgpu_vm_size, max_size);
2222 			vm_size = max_size;
2223 		}
2224 	} else {
2225 		struct sysinfo si;
2226 		unsigned int phys_ram_gb;
2227 
2228 		/* Optimal VM size depends on the amount of physical
2229 		 * RAM available. Underlying requirements and
2230 		 * assumptions:
2231 		 *
2232 		 *  - Need to map system memory and VRAM from all GPUs
2233 		 *     - VRAM from other GPUs not known here
2234 		 *     - Assume VRAM <= system memory
2235 		 *  - On GFX8 and older, VM space can be segmented for
2236 		 *    different MTYPEs
2237 		 *  - Need to allow room for fragmentation, guard pages etc.
2238 		 *
2239 		 * This adds up to a rough guess of system memory x3.
2240 		 * Round up to power of two to maximize the available
2241 		 * VM size with the given page table size.
2242 		 */
2243 		si_meminfo(&si);
2244 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2245 			       (1 << 30) - 1) >> 30;
2246 		vm_size = roundup_pow_of_two(
2247 			clamp(phys_ram_gb * 3, min_vm_size, max_size));
2248 	}
2249 
2250 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2251 
2252 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2253 	if (amdgpu_vm_block_size != -1)
2254 		tmp >>= amdgpu_vm_block_size - 9;
2255 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2256 	adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2257 	switch (adev->vm_manager.num_level) {
2258 	case 3:
2259 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2260 		break;
2261 	case 2:
2262 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2263 		break;
2264 	case 1:
2265 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2266 		break;
2267 	default:
2268 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2269 	}
2270 	/* block size depends on vm size and hw setup*/
2271 	if (amdgpu_vm_block_size != -1)
2272 		adev->vm_manager.block_size =
2273 			min((unsigned)amdgpu_vm_block_size, max_bits
2274 			    - AMDGPU_GPU_PAGE_SHIFT
2275 			    - 9 * adev->vm_manager.num_level);
2276 	else if (adev->vm_manager.num_level > 1)
2277 		adev->vm_manager.block_size = 9;
2278 	else
2279 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2280 
2281 	if (amdgpu_vm_fragment_size == -1)
2282 		adev->vm_manager.fragment_size = fragment_size_default;
2283 	else
2284 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2285 
2286 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2287 		 vm_size, adev->vm_manager.num_level + 1,
2288 		 adev->vm_manager.block_size,
2289 		 adev->vm_manager.fragment_size);
2290 }
2291 
2292 /**
2293  * amdgpu_vm_wait_idle - wait for the VM to become idle
2294  *
2295  * @vm: VM object to wait for
2296  * @timeout: timeout to wait for VM to become idle
2297  */
2298 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2299 {
2300 	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2301 					DMA_RESV_USAGE_BOOKKEEP,
2302 					true, timeout);
2303 	if (timeout <= 0)
2304 		return timeout;
2305 
2306 	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2307 }
2308 
2309 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2310 {
2311 	struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2312 
2313 	kfree(ti);
2314 }
2315 
2316 static inline struct amdgpu_vm *
2317 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2318 {
2319 	struct amdgpu_vm *vm;
2320 	unsigned long flags;
2321 
2322 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2323 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2324 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2325 
2326 	return vm;
2327 }
2328 
2329 /**
2330  * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2331  *
2332  * @task_info: task_info struct under discussion.
2333  *
2334  * frees the vm task_info ptr at the last put
2335  */
2336 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2337 {
2338 	kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2339 }
2340 
2341 /**
2342  * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2343  *
2344  * @vm: VM to get info from
2345  *
2346  * Returns the reference counted task_info structure, which must be
2347  * referenced down with amdgpu_vm_put_task_info.
2348  */
2349 struct amdgpu_task_info *
2350 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2351 {
2352 	struct amdgpu_task_info *ti = NULL;
2353 
2354 	if (vm) {
2355 		ti = vm->task_info;
2356 		kref_get(&vm->task_info->refcount);
2357 	}
2358 
2359 	return ti;
2360 }
2361 
2362 /**
2363  * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2364  *
2365  * @adev: drm device pointer
2366  * @pasid: PASID identifier for VM
2367  *
2368  * Returns the reference counted task_info structure, which must be
2369  * referenced down with amdgpu_vm_put_task_info.
2370  */
2371 struct amdgpu_task_info *
2372 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2373 {
2374 	return amdgpu_vm_get_task_info_vm(
2375 			amdgpu_vm_get_vm_from_pasid(adev, pasid));
2376 }
2377 
2378 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2379 {
2380 	vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2381 	if (!vm->task_info)
2382 		return -ENOMEM;
2383 
2384 	kref_init(&vm->task_info->refcount);
2385 	return 0;
2386 }
2387 
2388 /**
2389  * amdgpu_vm_set_task_info - Sets VMs task info.
2390  *
2391  * @vm: vm for which to set the info
2392  */
2393 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2394 {
2395 	if (!vm->task_info)
2396 		return;
2397 
2398 	if (vm->task_info->pid == current->pid)
2399 		return;
2400 
2401 	vm->task_info->pid = current->pid;
2402 	get_task_comm(vm->task_info->task_name, current);
2403 
2404 	if (current->group_leader->mm != current->mm)
2405 		return;
2406 
2407 	vm->task_info->tgid = current->group_leader->pid;
2408 	get_task_comm(vm->task_info->process_name, current->group_leader);
2409 }
2410 
2411 /**
2412  * amdgpu_vm_init - initialize a vm instance
2413  *
2414  * @adev: amdgpu_device pointer
2415  * @vm: requested vm
2416  * @xcp_id: GPU partition selection id
2417  *
2418  * Init @vm fields.
2419  *
2420  * Returns:
2421  * 0 for success, error for failure.
2422  */
2423 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2424 		   int32_t xcp_id)
2425 {
2426 	struct amdgpu_bo *root_bo;
2427 	struct amdgpu_bo_vm *root;
2428 	int r, i;
2429 
2430 	vm->va = RB_ROOT_CACHED;
2431 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2432 		vm->reserved_vmid[i] = NULL;
2433 	INIT_LIST_HEAD(&vm->evicted);
2434 	INIT_LIST_HEAD(&vm->evicted_user);
2435 	INIT_LIST_HEAD(&vm->relocated);
2436 	INIT_LIST_HEAD(&vm->moved);
2437 	INIT_LIST_HEAD(&vm->idle);
2438 	INIT_LIST_HEAD(&vm->invalidated);
2439 	spin_lock_init(&vm->status_lock);
2440 	INIT_LIST_HEAD(&vm->freed);
2441 	INIT_LIST_HEAD(&vm->done);
2442 	INIT_LIST_HEAD(&vm->pt_freed);
2443 	INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
2444 	INIT_KFIFO(vm->faults);
2445 
2446 	r = amdgpu_vm_init_entities(adev, vm);
2447 	if (r)
2448 		return r;
2449 
2450 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2451 
2452 	vm->is_compute_context = false;
2453 
2454 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2455 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2456 
2457 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2458 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2459 	WARN_ONCE((vm->use_cpu_for_update &&
2460 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2461 		  "CPU update of VM recommended only for large BAR system\n");
2462 
2463 	if (vm->use_cpu_for_update)
2464 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2465 	else
2466 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2467 
2468 	vm->last_update = dma_fence_get_stub();
2469 	vm->last_unlocked = dma_fence_get_stub();
2470 	vm->last_tlb_flush = dma_fence_get_stub();
2471 	vm->generation = amdgpu_vm_generation(adev, NULL);
2472 
2473 	mutex_init(&vm->eviction_lock);
2474 	vm->evicting = false;
2475 	vm->tlb_fence_context = dma_fence_context_alloc(1);
2476 
2477 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2478 				false, &root, xcp_id);
2479 	if (r)
2480 		goto error_free_delayed;
2481 
2482 	root_bo = amdgpu_bo_ref(&root->bo);
2483 	r = amdgpu_bo_reserve(root_bo, true);
2484 	if (r) {
2485 		amdgpu_bo_unref(&root->shadow);
2486 		amdgpu_bo_unref(&root_bo);
2487 		goto error_free_delayed;
2488 	}
2489 
2490 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2491 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2492 	if (r)
2493 		goto error_free_root;
2494 
2495 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2496 	if (r)
2497 		goto error_free_root;
2498 
2499 	r = amdgpu_vm_create_task_info(vm);
2500 	if (r)
2501 		DRM_DEBUG("Failed to create task info for VM\n");
2502 
2503 	amdgpu_bo_unreserve(vm->root.bo);
2504 	amdgpu_bo_unref(&root_bo);
2505 
2506 	return 0;
2507 
2508 error_free_root:
2509 	amdgpu_vm_pt_free_root(adev, vm);
2510 	amdgpu_bo_unreserve(vm->root.bo);
2511 	amdgpu_bo_unref(&root_bo);
2512 
2513 error_free_delayed:
2514 	dma_fence_put(vm->last_tlb_flush);
2515 	dma_fence_put(vm->last_unlocked);
2516 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2517 	amdgpu_vm_fini_entities(vm);
2518 
2519 	return r;
2520 }
2521 
2522 /**
2523  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2524  *
2525  * @adev: amdgpu_device pointer
2526  * @vm: requested vm
2527  *
2528  * This only works on GFX VMs that don't have any BOs added and no
2529  * page tables allocated yet.
2530  *
2531  * Changes the following VM parameters:
2532  * - use_cpu_for_update
2533  * - pte_supports_ats
2534  *
2535  * Reinitializes the page directory to reflect the changed ATS
2536  * setting.
2537  *
2538  * Returns:
2539  * 0 for success, -errno for errors.
2540  */
2541 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2542 {
2543 	int r;
2544 
2545 	r = amdgpu_bo_reserve(vm->root.bo, true);
2546 	if (r)
2547 		return r;
2548 
2549 	/* Update VM state */
2550 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2551 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2552 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2553 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2554 	WARN_ONCE((vm->use_cpu_for_update &&
2555 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2556 		  "CPU update of VM recommended only for large BAR system\n");
2557 
2558 	if (vm->use_cpu_for_update) {
2559 		/* Sync with last SDMA update/clear before switching to CPU */
2560 		r = amdgpu_bo_sync_wait(vm->root.bo,
2561 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2562 		if (r)
2563 			goto unreserve_bo;
2564 
2565 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2566 		r = amdgpu_vm_pt_map_tables(adev, vm);
2567 		if (r)
2568 			goto unreserve_bo;
2569 
2570 	} else {
2571 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2572 	}
2573 
2574 	dma_fence_put(vm->last_update);
2575 	vm->last_update = dma_fence_get_stub();
2576 	vm->is_compute_context = true;
2577 
2578 	/* Free the shadow bo for compute VM */
2579 	amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
2580 
2581 	goto unreserve_bo;
2582 
2583 unreserve_bo:
2584 	amdgpu_bo_unreserve(vm->root.bo);
2585 	return r;
2586 }
2587 
2588 /**
2589  * amdgpu_vm_release_compute - release a compute vm
2590  * @adev: amdgpu_device pointer
2591  * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2592  *
2593  * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2594  * pasid from vm. Compute should stop use of vm after this call.
2595  */
2596 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2597 {
2598 	amdgpu_vm_set_pasid(adev, vm, 0);
2599 	vm->is_compute_context = false;
2600 }
2601 
2602 /**
2603  * amdgpu_vm_fini - tear down a vm instance
2604  *
2605  * @adev: amdgpu_device pointer
2606  * @vm: requested vm
2607  *
2608  * Tear down @vm.
2609  * Unbind the VM and remove all bos from the vm bo list
2610  */
2611 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2612 {
2613 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2614 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2615 	struct amdgpu_bo *root;
2616 	unsigned long flags;
2617 	int i;
2618 
2619 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2620 
2621 	flush_work(&vm->pt_free_work);
2622 
2623 	root = amdgpu_bo_ref(vm->root.bo);
2624 	amdgpu_bo_reserve(root, true);
2625 	amdgpu_vm_put_task_info(vm->task_info);
2626 	amdgpu_vm_set_pasid(adev, vm, 0);
2627 	dma_fence_wait(vm->last_unlocked, false);
2628 	dma_fence_put(vm->last_unlocked);
2629 	dma_fence_wait(vm->last_tlb_flush, false);
2630 	/* Make sure that all fence callbacks have completed */
2631 	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2632 	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2633 	dma_fence_put(vm->last_tlb_flush);
2634 
2635 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2636 		if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
2637 			amdgpu_vm_prt_fini(adev, vm);
2638 			prt_fini_needed = false;
2639 		}
2640 
2641 		list_del(&mapping->list);
2642 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2643 	}
2644 
2645 	amdgpu_vm_pt_free_root(adev, vm);
2646 	amdgpu_bo_unreserve(root);
2647 	amdgpu_bo_unref(&root);
2648 	WARN_ON(vm->root.bo);
2649 
2650 	amdgpu_vm_fini_entities(vm);
2651 
2652 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2653 		dev_err(adev->dev, "still active bo inside vm\n");
2654 	}
2655 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2656 					     &vm->va.rb_root, rb) {
2657 		/* Don't remove the mapping here, we don't want to trigger a
2658 		 * rebalance and the tree is about to be destroyed anyway.
2659 		 */
2660 		list_del(&mapping->list);
2661 		kfree(mapping);
2662 	}
2663 
2664 	dma_fence_put(vm->last_update);
2665 
2666 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2667 		if (vm->reserved_vmid[i]) {
2668 			amdgpu_vmid_free_reserved(adev, i);
2669 			vm->reserved_vmid[i] = false;
2670 		}
2671 	}
2672 
2673 	ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2674 }
2675 
2676 /**
2677  * amdgpu_vm_manager_init - init the VM manager
2678  *
2679  * @adev: amdgpu_device pointer
2680  *
2681  * Initialize the VM manager structures
2682  */
2683 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2684 {
2685 	unsigned i;
2686 
2687 	/* Concurrent flushes are only possible starting with Vega10 and
2688 	 * are broken on Navi10 and Navi14.
2689 	 */
2690 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2691 					      adev->asic_type == CHIP_NAVI10 ||
2692 					      adev->asic_type == CHIP_NAVI14);
2693 	amdgpu_vmid_mgr_init(adev);
2694 
2695 	adev->vm_manager.fence_context =
2696 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2697 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2698 		adev->vm_manager.seqno[i] = 0;
2699 
2700 	spin_lock_init(&adev->vm_manager.prt_lock);
2701 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2702 
2703 	/* If not overridden by the user, by default, only in large BAR systems
2704 	 * Compute VM tables will be updated by CPU
2705 	 */
2706 #ifdef CONFIG_X86_64
2707 	if (amdgpu_vm_update_mode == -1) {
2708 		/* For asic with VF MMIO access protection
2709 		 * avoid using CPU for VM table updates
2710 		 */
2711 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2712 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2713 			adev->vm_manager.vm_update_mode =
2714 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2715 		else
2716 			adev->vm_manager.vm_update_mode = 0;
2717 	} else
2718 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2719 #else
2720 	adev->vm_manager.vm_update_mode = 0;
2721 #endif
2722 
2723 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2724 }
2725 
2726 /**
2727  * amdgpu_vm_manager_fini - cleanup VM manager
2728  *
2729  * @adev: amdgpu_device pointer
2730  *
2731  * Cleanup the VM manager and free resources.
2732  */
2733 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2734 {
2735 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2736 	xa_destroy(&adev->vm_manager.pasids);
2737 
2738 	amdgpu_vmid_mgr_fini(adev);
2739 }
2740 
2741 /**
2742  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2743  *
2744  * @dev: drm device pointer
2745  * @data: drm_amdgpu_vm
2746  * @filp: drm file pointer
2747  *
2748  * Returns:
2749  * 0 for success, -errno for errors.
2750  */
2751 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2752 {
2753 	union drm_amdgpu_vm *args = data;
2754 	struct amdgpu_device *adev = drm_to_adev(dev);
2755 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2756 
2757 	/* No valid flags defined yet */
2758 	if (args->in.flags)
2759 		return -EINVAL;
2760 
2761 	switch (args->in.op) {
2762 	case AMDGPU_VM_OP_RESERVE_VMID:
2763 		/* We only have requirement to reserve vmid from gfxhub */
2764 		if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2765 			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2766 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2767 		}
2768 
2769 		break;
2770 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2771 		if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2772 			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2773 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2774 		}
2775 		break;
2776 	default:
2777 		return -EINVAL;
2778 	}
2779 
2780 	return 0;
2781 }
2782 
2783 /**
2784  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2785  * @adev: amdgpu device pointer
2786  * @pasid: PASID of the VM
2787  * @ts: Timestamp of the fault
2788  * @vmid: VMID, only used for GFX 9.4.3.
2789  * @node_id: Node_id received in IH cookie. Only applicable for
2790  *           GFX 9.4.3.
2791  * @addr: Address of the fault
2792  * @write_fault: true is write fault, false is read fault
2793  *
2794  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2795  * shouldn't be reported any more.
2796  */
2797 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2798 			    u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
2799 			    bool write_fault)
2800 {
2801 	bool is_compute_context = false;
2802 	struct amdgpu_bo *root;
2803 	unsigned long irqflags;
2804 	uint64_t value, flags;
2805 	struct amdgpu_vm *vm;
2806 	int r;
2807 
2808 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2809 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2810 	if (vm) {
2811 		root = amdgpu_bo_ref(vm->root.bo);
2812 		is_compute_context = vm->is_compute_context;
2813 	} else {
2814 		root = NULL;
2815 	}
2816 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2817 
2818 	if (!root)
2819 		return false;
2820 
2821 	addr /= AMDGPU_GPU_PAGE_SIZE;
2822 
2823 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2824 	    node_id, addr, ts, write_fault)) {
2825 		amdgpu_bo_unref(&root);
2826 		return true;
2827 	}
2828 
2829 	r = amdgpu_bo_reserve(root, true);
2830 	if (r)
2831 		goto error_unref;
2832 
2833 	/* Double check that the VM still exists */
2834 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2835 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2836 	if (vm && vm->root.bo != root)
2837 		vm = NULL;
2838 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2839 	if (!vm)
2840 		goto error_unlock;
2841 
2842 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2843 		AMDGPU_PTE_SYSTEM;
2844 
2845 	if (is_compute_context) {
2846 		/* Intentionally setting invalid PTE flag
2847 		 * combination to force a no-retry-fault
2848 		 */
2849 		flags = AMDGPU_VM_NORETRY_FLAGS;
2850 		value = 0;
2851 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2852 		/* Redirect the access to the dummy page */
2853 		value = adev->dummy_page_addr;
2854 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2855 			AMDGPU_PTE_WRITEABLE;
2856 
2857 	} else {
2858 		/* Let the hw retry silently on the PTE */
2859 		value = 0;
2860 	}
2861 
2862 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2863 	if (r) {
2864 		pr_debug("failed %d to reserve fence slot\n", r);
2865 		goto error_unlock;
2866 	}
2867 
2868 	r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2869 				   NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
2870 	if (r)
2871 		goto error_unlock;
2872 
2873 	r = amdgpu_vm_update_pdes(adev, vm, true);
2874 
2875 error_unlock:
2876 	amdgpu_bo_unreserve(root);
2877 	if (r < 0)
2878 		DRM_ERROR("Can't handle page fault (%d)\n", r);
2879 
2880 error_unref:
2881 	amdgpu_bo_unref(&root);
2882 
2883 	return false;
2884 }
2885 
2886 #if defined(CONFIG_DEBUG_FS)
2887 /**
2888  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
2889  *
2890  * @vm: Requested VM for printing BO info
2891  * @m: debugfs file
2892  *
2893  * Print BO information in debugfs file for the VM
2894  */
2895 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
2896 {
2897 	struct amdgpu_bo_va *bo_va, *tmp;
2898 	u64 total_idle = 0;
2899 	u64 total_evicted = 0;
2900 	u64 total_relocated = 0;
2901 	u64 total_moved = 0;
2902 	u64 total_invalidated = 0;
2903 	u64 total_done = 0;
2904 	unsigned int total_idle_objs = 0;
2905 	unsigned int total_evicted_objs = 0;
2906 	unsigned int total_relocated_objs = 0;
2907 	unsigned int total_moved_objs = 0;
2908 	unsigned int total_invalidated_objs = 0;
2909 	unsigned int total_done_objs = 0;
2910 	unsigned int id = 0;
2911 
2912 	spin_lock(&vm->status_lock);
2913 	seq_puts(m, "\tIdle BOs:\n");
2914 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
2915 		if (!bo_va->base.bo)
2916 			continue;
2917 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2918 	}
2919 	total_idle_objs = id;
2920 	id = 0;
2921 
2922 	seq_puts(m, "\tEvicted BOs:\n");
2923 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
2924 		if (!bo_va->base.bo)
2925 			continue;
2926 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2927 	}
2928 	total_evicted_objs = id;
2929 	id = 0;
2930 
2931 	seq_puts(m, "\tRelocated BOs:\n");
2932 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
2933 		if (!bo_va->base.bo)
2934 			continue;
2935 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2936 	}
2937 	total_relocated_objs = id;
2938 	id = 0;
2939 
2940 	seq_puts(m, "\tMoved BOs:\n");
2941 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2942 		if (!bo_va->base.bo)
2943 			continue;
2944 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2945 	}
2946 	total_moved_objs = id;
2947 	id = 0;
2948 
2949 	seq_puts(m, "\tInvalidated BOs:\n");
2950 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
2951 		if (!bo_va->base.bo)
2952 			continue;
2953 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
2954 	}
2955 	total_invalidated_objs = id;
2956 	id = 0;
2957 
2958 	seq_puts(m, "\tDone BOs:\n");
2959 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
2960 		if (!bo_va->base.bo)
2961 			continue;
2962 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2963 	}
2964 	spin_unlock(&vm->status_lock);
2965 	total_done_objs = id;
2966 
2967 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
2968 		   total_idle_objs);
2969 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
2970 		   total_evicted_objs);
2971 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
2972 		   total_relocated_objs);
2973 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
2974 		   total_moved_objs);
2975 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
2976 		   total_invalidated_objs);
2977 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
2978 		   total_done_objs);
2979 }
2980 #endif
2981 
2982 /**
2983  * amdgpu_vm_update_fault_cache - update cached fault into.
2984  * @adev: amdgpu device pointer
2985  * @pasid: PASID of the VM
2986  * @addr: Address of the fault
2987  * @status: GPUVM fault status register
2988  * @vmhub: which vmhub got the fault
2989  *
2990  * Cache the fault info for later use by userspace in debugging.
2991  */
2992 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
2993 				  unsigned int pasid,
2994 				  uint64_t addr,
2995 				  uint32_t status,
2996 				  unsigned int vmhub)
2997 {
2998 	struct amdgpu_vm *vm;
2999 	unsigned long flags;
3000 
3001 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3002 
3003 	vm = xa_load(&adev->vm_manager.pasids, pasid);
3004 	/* Don't update the fault cache if status is 0.  In the multiple
3005 	 * fault case, subsequent faults will return a 0 status which is
3006 	 * useless for userspace and replaces the useful fault status, so
3007 	 * only update if status is non-0.
3008 	 */
3009 	if (vm && status) {
3010 		vm->fault_info.addr = addr;
3011 		vm->fault_info.status = status;
3012 		/*
3013 		 * Update the fault information globally for later usage
3014 		 * when vm could be stale or freed.
3015 		 */
3016 		adev->vm_manager.fault_info.addr = addr;
3017 		adev->vm_manager.fault_info.vmhub = vmhub;
3018 		adev->vm_manager.fault_info.status = status;
3019 
3020 		if (AMDGPU_IS_GFXHUB(vmhub)) {
3021 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3022 			vm->fault_info.vmhub |=
3023 				(vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3024 		} else if (AMDGPU_IS_MMHUB0(vmhub)) {
3025 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3026 			vm->fault_info.vmhub |=
3027 				(vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3028 		} else if (AMDGPU_IS_MMHUB1(vmhub)) {
3029 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3030 			vm->fault_info.vmhub |=
3031 				(vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3032 		} else {
3033 			WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3034 		}
3035 	}
3036 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3037 }
3038 
3039 /**
3040  * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3041  *
3042  * @vm: VM to test against.
3043  * @bo: BO to be tested.
3044  *
3045  * Returns true if the BO shares the dma_resv object with the root PD and is
3046  * always guaranteed to be valid inside the VM.
3047  */
3048 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3049 {
3050 	return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3051 }
3052