xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_amdkfd.h"
41 #include "amdgpu_gmc.h"
42 #include "amdgpu_xgmi.h"
43 #include "amdgpu_dma_buf.h"
44 #include "amdgpu_res_cursor.h"
45 #include "kfd_svm.h"
46 
47 /**
48  * DOC: GPUVM
49  *
50  * GPUVM is the MMU functionality provided on the GPU.
51  * GPUVM is similar to the legacy GART on older asics, however
52  * rather than there being a single global GART table
53  * for the entire GPU, there can be multiple GPUVM page tables active
54  * at any given time.  The GPUVM page tables can contain a mix
55  * VRAM pages and system pages (both memory and MMIO) and system pages
56  * can be mapped as snooped (cached system pages) or unsnooped
57  * (uncached system pages).
58  *
59  * Each active GPUVM has an ID associated with it and there is a page table
60  * linked with each VMID.  When executing a command buffer,
61  * the kernel tells the engine what VMID to use for that command
62  * buffer.  VMIDs are allocated dynamically as commands are submitted.
63  * The userspace drivers maintain their own address space and the kernel
64  * sets up their pages tables accordingly when they submit their
65  * command buffers and a VMID is assigned.
66  * The hardware supports up to 16 active GPUVMs at any given time.
67  *
68  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
69  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
70  * as other features such as encryption and caching attributes.
71  *
72  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
73  * addition to an aperture managed by a page table, VMID 0 also has
74  * several other apertures.  There is an aperture for direct access to VRAM
75  * and there is a legacy AGP aperture which just forwards accesses directly
76  * to the matching system physical addresses (or IOVAs when an IOMMU is
77  * present).  These apertures provide direct access to these memories without
78  * incurring the overhead of a page table.  VMID 0 is used by the kernel
79  * driver for tasks like memory management.
80  *
81  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
82  * For user applications, each application can have their own unique GPUVM
83  * address space.  The application manages the address space and the kernel
84  * driver manages the GPUVM page tables for each process.  If an GPU client
85  * accesses an invalid page, it will generate a GPU page fault, similar to
86  * accessing an invalid page on a CPU.
87  */
88 
89 #define START(node) ((node)->start)
90 #define LAST(node) ((node)->last)
91 
92 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
93 		     START, LAST, static, amdgpu_vm_it)
94 
95 #undef START
96 #undef LAST
97 
98 /**
99  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
100  */
101 struct amdgpu_prt_cb {
102 
103 	/**
104 	 * @adev: amdgpu device
105 	 */
106 	struct amdgpu_device *adev;
107 
108 	/**
109 	 * @cb: callback
110 	 */
111 	struct dma_fence_cb cb;
112 };
113 
114 /**
115  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
116  */
117 struct amdgpu_vm_tlb_seq_struct {
118 	/**
119 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
120 	 */
121 	struct amdgpu_vm *vm;
122 
123 	/**
124 	 * @cb: callback
125 	 */
126 	struct dma_fence_cb cb;
127 };
128 
129 /**
130  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
131  *
132  * @adev: amdgpu_device pointer
133  * @vm: amdgpu_vm pointer
134  * @pasid: the pasid the VM is using on this GPU
135  *
136  * Set the pasid this VM is using on this GPU, can also be used to remove the
137  * pasid by passing in zero.
138  *
139  */
140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
141 			u32 pasid)
142 {
143 	int r;
144 
145 	if (vm->pasid == pasid)
146 		return 0;
147 
148 	if (vm->pasid) {
149 		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
150 		if (r < 0)
151 			return r;
152 
153 		vm->pasid = 0;
154 	}
155 
156 	if (pasid) {
157 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
158 					GFP_KERNEL));
159 		if (r < 0)
160 			return r;
161 
162 		vm->pasid = pasid;
163 	}
164 
165 
166 	return 0;
167 }
168 
169 /**
170  * amdgpu_vm_bo_evicted - vm_bo is evicted
171  *
172  * @vm_bo: vm_bo which is evicted
173  *
174  * State for PDs/PTs and per VM BOs which are not at the location they should
175  * be.
176  */
177 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
178 {
179 	struct amdgpu_vm *vm = vm_bo->vm;
180 	struct amdgpu_bo *bo = vm_bo->bo;
181 
182 	vm_bo->moved = true;
183 	spin_lock(&vm_bo->vm->status_lock);
184 	if (bo->tbo.type == ttm_bo_type_kernel)
185 		list_move(&vm_bo->vm_status, &vm->evicted);
186 	else
187 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
188 	spin_unlock(&vm_bo->vm->status_lock);
189 }
190 /**
191  * amdgpu_vm_bo_moved - vm_bo is moved
192  *
193  * @vm_bo: vm_bo which is moved
194  *
195  * State for per VM BOs which are moved, but that change is not yet reflected
196  * in the page tables.
197  */
198 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
199 {
200 	spin_lock(&vm_bo->vm->status_lock);
201 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
202 	spin_unlock(&vm_bo->vm->status_lock);
203 }
204 
205 /**
206  * amdgpu_vm_bo_idle - vm_bo is idle
207  *
208  * @vm_bo: vm_bo which is now idle
209  *
210  * State for PDs/PTs and per VM BOs which have gone through the state machine
211  * and are now idle.
212  */
213 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
214 {
215 	spin_lock(&vm_bo->vm->status_lock);
216 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
217 	spin_unlock(&vm_bo->vm->status_lock);
218 	vm_bo->moved = false;
219 }
220 
221 /**
222  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
223  *
224  * @vm_bo: vm_bo which is now invalidated
225  *
226  * State for normal BOs which are invalidated and that change not yet reflected
227  * in the PTs.
228  */
229 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
230 {
231 	spin_lock(&vm_bo->vm->status_lock);
232 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
233 	spin_unlock(&vm_bo->vm->status_lock);
234 }
235 
236 /**
237  * amdgpu_vm_bo_relocated - vm_bo is reloacted
238  *
239  * @vm_bo: vm_bo which is relocated
240  *
241  * State for PDs/PTs which needs to update their parent PD.
242  * For the root PD, just move to idle state.
243  */
244 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
245 {
246 	if (vm_bo->bo->parent) {
247 		spin_lock(&vm_bo->vm->status_lock);
248 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
249 		spin_unlock(&vm_bo->vm->status_lock);
250 	} else {
251 		amdgpu_vm_bo_idle(vm_bo);
252 	}
253 }
254 
255 /**
256  * amdgpu_vm_bo_done - vm_bo is done
257  *
258  * @vm_bo: vm_bo which is now done
259  *
260  * State for normal BOs which are invalidated and that change has been updated
261  * in the PTs.
262  */
263 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
264 {
265 	spin_lock(&vm_bo->vm->status_lock);
266 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
267 	spin_unlock(&vm_bo->vm->status_lock);
268 }
269 
270 /**
271  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
272  * @vm: the VM which state machine to reset
273  *
274  * Move all vm_bo object in the VM into a state where they will be updated
275  * again during validation.
276  */
277 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
278 {
279 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
280 
281 	spin_lock(&vm->status_lock);
282 	list_splice_init(&vm->done, &vm->invalidated);
283 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
284 		vm_bo->moved = true;
285 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
286 		struct amdgpu_bo *bo = vm_bo->bo;
287 
288 		vm_bo->moved = true;
289 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
290 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
291 		else if (bo->parent)
292 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
293 	}
294 	spin_unlock(&vm->status_lock);
295 }
296 
297 /**
298  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
299  *
300  * @base: base structure for tracking BO usage in a VM
301  * @vm: vm to which bo is to be added
302  * @bo: amdgpu buffer object
303  *
304  * Initialize a bo_va_base structure and add it to the appropriate lists
305  *
306  */
307 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
308 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
309 {
310 	base->vm = vm;
311 	base->bo = bo;
312 	base->next = NULL;
313 	INIT_LIST_HEAD(&base->vm_status);
314 
315 	if (!bo)
316 		return;
317 	base->next = bo->vm_bo;
318 	bo->vm_bo = base;
319 
320 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
321 		return;
322 
323 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
324 
325 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
326 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
327 		amdgpu_vm_bo_relocated(base);
328 	else
329 		amdgpu_vm_bo_idle(base);
330 
331 	if (bo->preferred_domains &
332 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
333 		return;
334 
335 	/*
336 	 * we checked all the prerequisites, but it looks like this per vm bo
337 	 * is currently evicted. add the bo to the evicted list to make sure it
338 	 * is validated on next vm use to avoid fault.
339 	 * */
340 	amdgpu_vm_bo_evicted(base);
341 }
342 
343 /**
344  * amdgpu_vm_lock_pd - lock PD in drm_exec
345  *
346  * @vm: vm providing the BOs
347  * @exec: drm execution context
348  * @num_fences: number of extra fences to reserve
349  *
350  * Lock the VM root PD in the DRM execution context.
351  */
352 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
353 		      unsigned int num_fences)
354 {
355 	/* We need at least two fences for the VM PD/PT updates */
356 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
357 				    2 + num_fences);
358 }
359 
360 /**
361  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
362  *
363  * @adev: amdgpu device pointer
364  * @vm: vm providing the BOs
365  *
366  * Move all BOs to the end of LRU and remember their positions to put them
367  * together.
368  */
369 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
370 				struct amdgpu_vm *vm)
371 {
372 	spin_lock(&adev->mman.bdev.lru_lock);
373 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
374 	spin_unlock(&adev->mman.bdev.lru_lock);
375 }
376 
377 /* Create scheduler entities for page table updates */
378 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
379 				   struct amdgpu_vm *vm)
380 {
381 	int r;
382 
383 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
384 				  adev->vm_manager.vm_pte_scheds,
385 				  adev->vm_manager.vm_pte_num_scheds, NULL);
386 	if (r)
387 		goto error;
388 
389 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
390 				     adev->vm_manager.vm_pte_scheds,
391 				     adev->vm_manager.vm_pte_num_scheds, NULL);
392 
393 error:
394 	drm_sched_entity_destroy(&vm->immediate);
395 	return r;
396 }
397 
398 /* Destroy the entities for page table updates again */
399 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
400 {
401 	drm_sched_entity_destroy(&vm->immediate);
402 	drm_sched_entity_destroy(&vm->delayed);
403 }
404 
405 /**
406  * amdgpu_vm_generation - return the page table re-generation counter
407  * @adev: the amdgpu_device
408  * @vm: optional VM to check, might be NULL
409  *
410  * Returns a page table re-generation token to allow checking if submissions
411  * are still valid to use this VM. The VM parameter might be NULL in which case
412  * just the VRAM lost counter will be used.
413  */
414 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
415 {
416 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
417 
418 	if (!vm)
419 		return result;
420 
421 	result += vm->generation;
422 	/* Add one if the page tables will be re-generated on next CS */
423 	if (drm_sched_entity_error(&vm->delayed))
424 		++result;
425 
426 	return result;
427 }
428 
429 /**
430  * amdgpu_vm_validate_pt_bos - validate the page table BOs
431  *
432  * @adev: amdgpu device pointer
433  * @vm: vm providing the BOs
434  * @validate: callback to do the validation
435  * @param: parameter for the validation callback
436  *
437  * Validate the page table BOs on command submission if neccessary.
438  *
439  * Returns:
440  * Validation result.
441  */
442 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
443 			      int (*validate)(void *p, struct amdgpu_bo *bo),
444 			      void *param)
445 {
446 	struct amdgpu_vm_bo_base *bo_base;
447 	struct amdgpu_bo *shadow;
448 	struct amdgpu_bo *bo;
449 	int r;
450 
451 	if (drm_sched_entity_error(&vm->delayed)) {
452 		++vm->generation;
453 		amdgpu_vm_bo_reset_state_machine(vm);
454 		amdgpu_vm_fini_entities(vm);
455 		r = amdgpu_vm_init_entities(adev, vm);
456 		if (r)
457 			return r;
458 	}
459 
460 	spin_lock(&vm->status_lock);
461 	while (!list_empty(&vm->evicted)) {
462 		bo_base = list_first_entry(&vm->evicted,
463 					   struct amdgpu_vm_bo_base,
464 					   vm_status);
465 		spin_unlock(&vm->status_lock);
466 
467 		bo = bo_base->bo;
468 		shadow = amdgpu_bo_shadowed(bo);
469 
470 		r = validate(param, bo);
471 		if (r)
472 			return r;
473 		if (shadow) {
474 			r = validate(param, shadow);
475 			if (r)
476 				return r;
477 		}
478 
479 		if (bo->tbo.type != ttm_bo_type_kernel) {
480 			amdgpu_vm_bo_moved(bo_base);
481 		} else {
482 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
483 			amdgpu_vm_bo_relocated(bo_base);
484 		}
485 		spin_lock(&vm->status_lock);
486 	}
487 	spin_unlock(&vm->status_lock);
488 
489 	amdgpu_vm_eviction_lock(vm);
490 	vm->evicting = false;
491 	amdgpu_vm_eviction_unlock(vm);
492 
493 	return 0;
494 }
495 
496 /**
497  * amdgpu_vm_ready - check VM is ready for updates
498  *
499  * @vm: VM to check
500  *
501  * Check if all VM PDs/PTs are ready for updates
502  *
503  * Returns:
504  * True if VM is not evicting.
505  */
506 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
507 {
508 	bool empty;
509 	bool ret;
510 
511 	amdgpu_vm_eviction_lock(vm);
512 	ret = !vm->evicting;
513 	amdgpu_vm_eviction_unlock(vm);
514 
515 	spin_lock(&vm->status_lock);
516 	empty = list_empty(&vm->evicted);
517 	spin_unlock(&vm->status_lock);
518 
519 	return ret && empty;
520 }
521 
522 /**
523  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
524  *
525  * @adev: amdgpu_device pointer
526  */
527 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
528 {
529 	const struct amdgpu_ip_block *ip_block;
530 	bool has_compute_vm_bug;
531 	struct amdgpu_ring *ring;
532 	int i;
533 
534 	has_compute_vm_bug = false;
535 
536 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
537 	if (ip_block) {
538 		/* Compute has a VM bug for GFX version < 7.
539 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
540 		if (ip_block->version->major <= 7)
541 			has_compute_vm_bug = true;
542 		else if (ip_block->version->major == 8)
543 			if (adev->gfx.mec_fw_version < 673)
544 				has_compute_vm_bug = true;
545 	}
546 
547 	for (i = 0; i < adev->num_rings; i++) {
548 		ring = adev->rings[i];
549 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
550 			/* only compute rings */
551 			ring->has_compute_vm_bug = has_compute_vm_bug;
552 		else
553 			ring->has_compute_vm_bug = false;
554 	}
555 }
556 
557 /**
558  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
559  *
560  * @ring: ring on which the job will be submitted
561  * @job: job to submit
562  *
563  * Returns:
564  * True if sync is needed.
565  */
566 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
567 				  struct amdgpu_job *job)
568 {
569 	struct amdgpu_device *adev = ring->adev;
570 	unsigned vmhub = ring->vm_hub;
571 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
572 
573 	if (job->vmid == 0)
574 		return false;
575 
576 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
577 		return true;
578 
579 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
580 		return true;
581 
582 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
583 		return true;
584 
585 	return false;
586 }
587 
588 /**
589  * amdgpu_vm_flush - hardware flush the vm
590  *
591  * @ring: ring to use for flush
592  * @job:  related job
593  * @need_pipe_sync: is pipe sync needed
594  *
595  * Emit a VM flush when it is necessary.
596  *
597  * Returns:
598  * 0 on success, errno otherwise.
599  */
600 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
601 		    bool need_pipe_sync)
602 {
603 	struct amdgpu_device *adev = ring->adev;
604 	unsigned vmhub = ring->vm_hub;
605 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
606 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
607 	bool spm_update_needed = job->spm_update_needed;
608 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
609 		job->gds_switch_needed;
610 	bool vm_flush_needed = job->vm_needs_flush;
611 	struct dma_fence *fence = NULL;
612 	bool pasid_mapping_needed = false;
613 	unsigned patch_offset = 0;
614 	int r;
615 
616 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
617 		gds_switch_needed = true;
618 		vm_flush_needed = true;
619 		pasid_mapping_needed = true;
620 		spm_update_needed = true;
621 	}
622 
623 	mutex_lock(&id_mgr->lock);
624 	if (id->pasid != job->pasid || !id->pasid_mapping ||
625 	    !dma_fence_is_signaled(id->pasid_mapping))
626 		pasid_mapping_needed = true;
627 	mutex_unlock(&id_mgr->lock);
628 
629 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
630 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
631 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
632 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
633 		ring->funcs->emit_wreg;
634 
635 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
636 		return 0;
637 
638 	amdgpu_ring_ib_begin(ring);
639 	if (ring->funcs->init_cond_exec)
640 		patch_offset = amdgpu_ring_init_cond_exec(ring);
641 
642 	if (need_pipe_sync)
643 		amdgpu_ring_emit_pipeline_sync(ring);
644 
645 	if (vm_flush_needed) {
646 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
647 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
648 	}
649 
650 	if (pasid_mapping_needed)
651 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
652 
653 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
654 		adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
655 
656 	if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
657 	    gds_switch_needed) {
658 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
659 					    job->gds_size, job->gws_base,
660 					    job->gws_size, job->oa_base,
661 					    job->oa_size);
662 	}
663 
664 	if (vm_flush_needed || pasid_mapping_needed) {
665 		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
666 		if (r)
667 			return r;
668 	}
669 
670 	if (vm_flush_needed) {
671 		mutex_lock(&id_mgr->lock);
672 		dma_fence_put(id->last_flush);
673 		id->last_flush = dma_fence_get(fence);
674 		id->current_gpu_reset_count =
675 			atomic_read(&adev->gpu_reset_counter);
676 		mutex_unlock(&id_mgr->lock);
677 	}
678 
679 	if (pasid_mapping_needed) {
680 		mutex_lock(&id_mgr->lock);
681 		id->pasid = job->pasid;
682 		dma_fence_put(id->pasid_mapping);
683 		id->pasid_mapping = dma_fence_get(fence);
684 		mutex_unlock(&id_mgr->lock);
685 	}
686 	dma_fence_put(fence);
687 
688 	if (ring->funcs->patch_cond_exec)
689 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
690 
691 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
692 	if (ring->funcs->emit_switch_buffer) {
693 		amdgpu_ring_emit_switch_buffer(ring);
694 		amdgpu_ring_emit_switch_buffer(ring);
695 	}
696 	amdgpu_ring_ib_end(ring);
697 	return 0;
698 }
699 
700 /**
701  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
702  *
703  * @vm: requested vm
704  * @bo: requested buffer object
705  *
706  * Find @bo inside the requested vm.
707  * Search inside the @bos vm list for the requested vm
708  * Returns the found bo_va or NULL if none is found
709  *
710  * Object has to be reserved!
711  *
712  * Returns:
713  * Found bo_va or NULL.
714  */
715 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
716 				       struct amdgpu_bo *bo)
717 {
718 	struct amdgpu_vm_bo_base *base;
719 
720 	for (base = bo->vm_bo; base; base = base->next) {
721 		if (base->vm != vm)
722 			continue;
723 
724 		return container_of(base, struct amdgpu_bo_va, base);
725 	}
726 	return NULL;
727 }
728 
729 /**
730  * amdgpu_vm_map_gart - Resolve gart mapping of addr
731  *
732  * @pages_addr: optional DMA address to use for lookup
733  * @addr: the unmapped addr
734  *
735  * Look up the physical address of the page that the pte resolves
736  * to.
737  *
738  * Returns:
739  * The pointer for the page table entry.
740  */
741 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
742 {
743 	uint64_t result;
744 
745 	/* page table offset */
746 	result = pages_addr[addr >> PAGE_SHIFT];
747 
748 	/* in case cpu page size != gpu page size*/
749 	result |= addr & (~PAGE_MASK);
750 
751 	result &= 0xFFFFFFFFFFFFF000ULL;
752 
753 	return result;
754 }
755 
756 /**
757  * amdgpu_vm_update_pdes - make sure that all directories are valid
758  *
759  * @adev: amdgpu_device pointer
760  * @vm: requested vm
761  * @immediate: submit immediately to the paging queue
762  *
763  * Makes sure all directories are up to date.
764  *
765  * Returns:
766  * 0 for success, error for failure.
767  */
768 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
769 			  struct amdgpu_vm *vm, bool immediate)
770 {
771 	struct amdgpu_vm_update_params params;
772 	struct amdgpu_vm_bo_base *entry;
773 	bool flush_tlb_needed = false;
774 	LIST_HEAD(relocated);
775 	int r, idx;
776 
777 	spin_lock(&vm->status_lock);
778 	list_splice_init(&vm->relocated, &relocated);
779 	spin_unlock(&vm->status_lock);
780 
781 	if (list_empty(&relocated))
782 		return 0;
783 
784 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
785 		return -ENODEV;
786 
787 	memset(&params, 0, sizeof(params));
788 	params.adev = adev;
789 	params.vm = vm;
790 	params.immediate = immediate;
791 
792 	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
793 	if (r)
794 		goto error;
795 
796 	list_for_each_entry(entry, &relocated, vm_status) {
797 		/* vm_flush_needed after updating moved PDEs */
798 		flush_tlb_needed |= entry->moved;
799 
800 		r = amdgpu_vm_pde_update(&params, entry);
801 		if (r)
802 			goto error;
803 	}
804 
805 	r = vm->update_funcs->commit(&params, &vm->last_update);
806 	if (r)
807 		goto error;
808 
809 	if (flush_tlb_needed)
810 		atomic64_inc(&vm->tlb_seq);
811 
812 	while (!list_empty(&relocated)) {
813 		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
814 					 vm_status);
815 		amdgpu_vm_bo_idle(entry);
816 	}
817 
818 error:
819 	drm_dev_exit(idx);
820 	return r;
821 }
822 
823 /**
824  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
825  * @fence: unused
826  * @cb: the callback structure
827  *
828  * Increments the tlb sequence to make sure that future CS execute a VM flush.
829  */
830 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
831 				 struct dma_fence_cb *cb)
832 {
833 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
834 
835 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
836 	atomic64_inc(&tlb_cb->vm->tlb_seq);
837 	kfree(tlb_cb);
838 }
839 
840 /**
841  * amdgpu_vm_update_range - update a range in the vm page table
842  *
843  * @adev: amdgpu_device pointer to use for commands
844  * @vm: the VM to update the range
845  * @immediate: immediate submission in a page fault
846  * @unlocked: unlocked invalidation during MM callback
847  * @flush_tlb: trigger tlb invalidation after update completed
848  * @allow_override: change MTYPE for local NUMA nodes
849  * @resv: fences we need to sync to
850  * @start: start of mapped range
851  * @last: last mapped entry
852  * @flags: flags for the entries
853  * @offset: offset into nodes and pages_addr
854  * @vram_base: base for vram mappings
855  * @res: ttm_resource to map
856  * @pages_addr: DMA addresses to use for mapping
857  * @fence: optional resulting fence
858  *
859  * Fill in the page table entries between @start and @last.
860  *
861  * Returns:
862  * 0 for success, negative erro code for failure.
863  */
864 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
865 			   bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
866 			   struct dma_resv *resv, uint64_t start, uint64_t last,
867 			   uint64_t flags, uint64_t offset, uint64_t vram_base,
868 			   struct ttm_resource *res, dma_addr_t *pages_addr,
869 			   struct dma_fence **fence)
870 {
871 	struct amdgpu_vm_update_params params;
872 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
873 	struct amdgpu_res_cursor cursor;
874 	enum amdgpu_sync_mode sync_mode;
875 	int r, idx;
876 
877 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
878 		return -ENODEV;
879 
880 	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
881 	if (!tlb_cb) {
882 		r = -ENOMEM;
883 		goto error_unlock;
884 	}
885 
886 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
887 	 * heavy-weight flush TLB unconditionally.
888 	 */
889 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
890 		     amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
891 
892 	/*
893 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
894 	 */
895 	flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
896 
897 	memset(&params, 0, sizeof(params));
898 	params.adev = adev;
899 	params.vm = vm;
900 	params.immediate = immediate;
901 	params.pages_addr = pages_addr;
902 	params.unlocked = unlocked;
903 	params.allow_override = allow_override;
904 
905 	/* Implicitly sync to command submissions in the same VM before
906 	 * unmapping. Sync to moving fences before mapping.
907 	 */
908 	if (!(flags & AMDGPU_PTE_VALID))
909 		sync_mode = AMDGPU_SYNC_EQ_OWNER;
910 	else
911 		sync_mode = AMDGPU_SYNC_EXPLICIT;
912 
913 	amdgpu_vm_eviction_lock(vm);
914 	if (vm->evicting) {
915 		r = -EBUSY;
916 		goto error_free;
917 	}
918 
919 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
920 		struct dma_fence *tmp = dma_fence_get_stub();
921 
922 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
923 		swap(vm->last_unlocked, tmp);
924 		dma_fence_put(tmp);
925 	}
926 
927 	r = vm->update_funcs->prepare(&params, resv, sync_mode);
928 	if (r)
929 		goto error_free;
930 
931 	amdgpu_res_first(pages_addr ? NULL : res, offset,
932 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
933 	while (cursor.remaining) {
934 		uint64_t tmp, num_entries, addr;
935 
936 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
937 		if (pages_addr) {
938 			bool contiguous = true;
939 
940 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
941 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
942 				uint64_t count;
943 
944 				contiguous = pages_addr[pfn + 1] ==
945 					pages_addr[pfn] + PAGE_SIZE;
946 
947 				tmp = num_entries /
948 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
949 				for (count = 2; count < tmp; ++count) {
950 					uint64_t idx = pfn + count;
951 
952 					if (contiguous != (pages_addr[idx] ==
953 					    pages_addr[idx - 1] + PAGE_SIZE))
954 						break;
955 				}
956 				if (!contiguous)
957 					count--;
958 				num_entries = count *
959 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
960 			}
961 
962 			if (!contiguous) {
963 				addr = cursor.start;
964 				params.pages_addr = pages_addr;
965 			} else {
966 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
967 				params.pages_addr = NULL;
968 			}
969 
970 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
971 			addr = vram_base + cursor.start;
972 		} else {
973 			addr = 0;
974 		}
975 
976 		tmp = start + num_entries;
977 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
978 		if (r)
979 			goto error_free;
980 
981 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
982 		start = tmp;
983 	}
984 
985 	r = vm->update_funcs->commit(&params, fence);
986 
987 	if (flush_tlb || params.table_freed) {
988 		tlb_cb->vm = vm;
989 		if (fence && *fence &&
990 		    !dma_fence_add_callback(*fence, &tlb_cb->cb,
991 					   amdgpu_vm_tlb_seq_cb)) {
992 			dma_fence_put(vm->last_tlb_flush);
993 			vm->last_tlb_flush = dma_fence_get(*fence);
994 		} else {
995 			amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
996 		}
997 		tlb_cb = NULL;
998 	}
999 
1000 error_free:
1001 	kfree(tlb_cb);
1002 
1003 error_unlock:
1004 	amdgpu_vm_eviction_unlock(vm);
1005 	drm_dev_exit(idx);
1006 	return r;
1007 }
1008 
1009 static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
1010 				    struct amdgpu_mem_stats *stats)
1011 {
1012 	struct amdgpu_vm *vm = bo_va->base.vm;
1013 	struct amdgpu_bo *bo = bo_va->base.bo;
1014 
1015 	if (!bo)
1016 		return;
1017 
1018 	/*
1019 	 * For now ignore BOs which are currently locked and potentially
1020 	 * changing their location.
1021 	 */
1022 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv &&
1023 	    !dma_resv_trylock(bo->tbo.base.resv))
1024 		return;
1025 
1026 	amdgpu_bo_get_memory(bo, stats);
1027 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
1028 	    dma_resv_unlock(bo->tbo.base.resv);
1029 }
1030 
1031 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1032 			  struct amdgpu_mem_stats *stats)
1033 {
1034 	struct amdgpu_bo_va *bo_va, *tmp;
1035 
1036 	spin_lock(&vm->status_lock);
1037 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
1038 		amdgpu_vm_bo_get_memory(bo_va, stats);
1039 
1040 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
1041 		amdgpu_vm_bo_get_memory(bo_va, stats);
1042 
1043 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
1044 		amdgpu_vm_bo_get_memory(bo_va, stats);
1045 
1046 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
1047 		amdgpu_vm_bo_get_memory(bo_va, stats);
1048 
1049 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
1050 		amdgpu_vm_bo_get_memory(bo_va, stats);
1051 
1052 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
1053 		amdgpu_vm_bo_get_memory(bo_va, stats);
1054 	spin_unlock(&vm->status_lock);
1055 }
1056 
1057 /**
1058  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1059  *
1060  * @adev: amdgpu_device pointer
1061  * @bo_va: requested BO and VM object
1062  * @clear: if true clear the entries
1063  *
1064  * Fill in the page table entries for @bo_va.
1065  *
1066  * Returns:
1067  * 0 for success, -EINVAL for failure.
1068  */
1069 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1070 			bool clear)
1071 {
1072 	struct amdgpu_bo *bo = bo_va->base.bo;
1073 	struct amdgpu_vm *vm = bo_va->base.vm;
1074 	struct amdgpu_bo_va_mapping *mapping;
1075 	dma_addr_t *pages_addr = NULL;
1076 	struct ttm_resource *mem;
1077 	struct dma_fence **last_update;
1078 	bool flush_tlb = clear;
1079 	bool uncached;
1080 	struct dma_resv *resv;
1081 	uint64_t vram_base;
1082 	uint64_t flags;
1083 	int r;
1084 
1085 	if (clear || !bo) {
1086 		mem = NULL;
1087 		resv = vm->root.bo->tbo.base.resv;
1088 	} else {
1089 		struct drm_gem_object *obj = &bo->tbo.base;
1090 
1091 		resv = bo->tbo.base.resv;
1092 		if (obj->import_attach && bo_va->is_xgmi) {
1093 			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1094 			struct drm_gem_object *gobj = dma_buf->priv;
1095 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1096 
1097 			if (abo->tbo.resource &&
1098 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1099 				bo = gem_to_amdgpu_bo(gobj);
1100 		}
1101 		mem = bo->tbo.resource;
1102 		if (mem && (mem->mem_type == TTM_PL_TT ||
1103 			    mem->mem_type == AMDGPU_PL_PREEMPT))
1104 			pages_addr = bo->tbo.ttm->dma_address;
1105 	}
1106 
1107 	if (bo) {
1108 		struct amdgpu_device *bo_adev;
1109 
1110 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1111 
1112 		if (amdgpu_bo_encrypted(bo))
1113 			flags |= AMDGPU_PTE_TMZ;
1114 
1115 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1116 		vram_base = bo_adev->vm_manager.vram_base_offset;
1117 		uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1118 	} else {
1119 		flags = 0x0;
1120 		vram_base = 0;
1121 		uncached = false;
1122 	}
1123 
1124 	if (clear || (bo && bo->tbo.base.resv ==
1125 		      vm->root.bo->tbo.base.resv))
1126 		last_update = &vm->last_update;
1127 	else
1128 		last_update = &bo_va->last_pt_update;
1129 
1130 	if (!clear && bo_va->base.moved) {
1131 		flush_tlb = true;
1132 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1133 
1134 	} else if (bo_va->cleared != clear) {
1135 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1136 	}
1137 
1138 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1139 		uint64_t update_flags = flags;
1140 
1141 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1142 		 * but in case of something, we filter the flags in first place
1143 		 */
1144 		if (!(mapping->flags & AMDGPU_PTE_READABLE))
1145 			update_flags &= ~AMDGPU_PTE_READABLE;
1146 		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1147 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1148 
1149 		/* Apply ASIC specific mapping flags */
1150 		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1151 
1152 		trace_amdgpu_vm_bo_update(mapping);
1153 
1154 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1155 					   !uncached, resv, mapping->start, mapping->last,
1156 					   update_flags, mapping->offset,
1157 					   vram_base, mem, pages_addr,
1158 					   last_update);
1159 		if (r)
1160 			return r;
1161 	}
1162 
1163 	/* If the BO is not in its preferred location add it back to
1164 	 * the evicted list so that it gets validated again on the
1165 	 * next command submission.
1166 	 */
1167 	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1168 		uint32_t mem_type = bo->tbo.resource->mem_type;
1169 
1170 		if (!(bo->preferred_domains &
1171 		      amdgpu_mem_type_to_domain(mem_type)))
1172 			amdgpu_vm_bo_evicted(&bo_va->base);
1173 		else
1174 			amdgpu_vm_bo_idle(&bo_va->base);
1175 	} else {
1176 		amdgpu_vm_bo_done(&bo_va->base);
1177 	}
1178 
1179 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1180 	bo_va->cleared = clear;
1181 	bo_va->base.moved = false;
1182 
1183 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1184 		list_for_each_entry(mapping, &bo_va->valids, list)
1185 			trace_amdgpu_vm_bo_mapping(mapping);
1186 	}
1187 
1188 	return 0;
1189 }
1190 
1191 /**
1192  * amdgpu_vm_update_prt_state - update the global PRT state
1193  *
1194  * @adev: amdgpu_device pointer
1195  */
1196 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1197 {
1198 	unsigned long flags;
1199 	bool enable;
1200 
1201 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1202 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1203 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1204 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1205 }
1206 
1207 /**
1208  * amdgpu_vm_prt_get - add a PRT user
1209  *
1210  * @adev: amdgpu_device pointer
1211  */
1212 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1213 {
1214 	if (!adev->gmc.gmc_funcs->set_prt)
1215 		return;
1216 
1217 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1218 		amdgpu_vm_update_prt_state(adev);
1219 }
1220 
1221 /**
1222  * amdgpu_vm_prt_put - drop a PRT user
1223  *
1224  * @adev: amdgpu_device pointer
1225  */
1226 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1227 {
1228 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1229 		amdgpu_vm_update_prt_state(adev);
1230 }
1231 
1232 /**
1233  * amdgpu_vm_prt_cb - callback for updating the PRT status
1234  *
1235  * @fence: fence for the callback
1236  * @_cb: the callback function
1237  */
1238 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1239 {
1240 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1241 
1242 	amdgpu_vm_prt_put(cb->adev);
1243 	kfree(cb);
1244 }
1245 
1246 /**
1247  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1248  *
1249  * @adev: amdgpu_device pointer
1250  * @fence: fence for the callback
1251  */
1252 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1253 				 struct dma_fence *fence)
1254 {
1255 	struct amdgpu_prt_cb *cb;
1256 
1257 	if (!adev->gmc.gmc_funcs->set_prt)
1258 		return;
1259 
1260 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1261 	if (!cb) {
1262 		/* Last resort when we are OOM */
1263 		if (fence)
1264 			dma_fence_wait(fence, false);
1265 
1266 		amdgpu_vm_prt_put(adev);
1267 	} else {
1268 		cb->adev = adev;
1269 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1270 						     amdgpu_vm_prt_cb))
1271 			amdgpu_vm_prt_cb(fence, &cb->cb);
1272 	}
1273 }
1274 
1275 /**
1276  * amdgpu_vm_free_mapping - free a mapping
1277  *
1278  * @adev: amdgpu_device pointer
1279  * @vm: requested vm
1280  * @mapping: mapping to be freed
1281  * @fence: fence of the unmap operation
1282  *
1283  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1284  */
1285 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1286 				   struct amdgpu_vm *vm,
1287 				   struct amdgpu_bo_va_mapping *mapping,
1288 				   struct dma_fence *fence)
1289 {
1290 	if (mapping->flags & AMDGPU_PTE_PRT)
1291 		amdgpu_vm_add_prt_cb(adev, fence);
1292 	kfree(mapping);
1293 }
1294 
1295 /**
1296  * amdgpu_vm_prt_fini - finish all prt mappings
1297  *
1298  * @adev: amdgpu_device pointer
1299  * @vm: requested vm
1300  *
1301  * Register a cleanup callback to disable PRT support after VM dies.
1302  */
1303 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1304 {
1305 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1306 	struct dma_resv_iter cursor;
1307 	struct dma_fence *fence;
1308 
1309 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1310 		/* Add a callback for each fence in the reservation object */
1311 		amdgpu_vm_prt_get(adev);
1312 		amdgpu_vm_add_prt_cb(adev, fence);
1313 	}
1314 }
1315 
1316 /**
1317  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1318  *
1319  * @adev: amdgpu_device pointer
1320  * @vm: requested vm
1321  * @fence: optional resulting fence (unchanged if no work needed to be done
1322  * or if an error occurred)
1323  *
1324  * Make sure all freed BOs are cleared in the PT.
1325  * PTs have to be reserved and mutex must be locked!
1326  *
1327  * Returns:
1328  * 0 for success.
1329  *
1330  */
1331 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1332 			  struct amdgpu_vm *vm,
1333 			  struct dma_fence **fence)
1334 {
1335 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1336 	struct amdgpu_bo_va_mapping *mapping;
1337 	uint64_t init_pte_value = 0;
1338 	struct dma_fence *f = NULL;
1339 	int r;
1340 
1341 	while (!list_empty(&vm->freed)) {
1342 		mapping = list_first_entry(&vm->freed,
1343 			struct amdgpu_bo_va_mapping, list);
1344 		list_del(&mapping->list);
1345 
1346 		if (vm->pte_support_ats &&
1347 		    mapping->start < AMDGPU_GMC_HOLE_START)
1348 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1349 
1350 		r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1351 					   resv, mapping->start, mapping->last,
1352 					   init_pte_value, 0, 0, NULL, NULL,
1353 					   &f);
1354 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1355 		if (r) {
1356 			dma_fence_put(f);
1357 			return r;
1358 		}
1359 	}
1360 
1361 	if (fence && f) {
1362 		dma_fence_put(*fence);
1363 		*fence = f;
1364 	} else {
1365 		dma_fence_put(f);
1366 	}
1367 
1368 	return 0;
1369 
1370 }
1371 
1372 /**
1373  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1374  *
1375  * @adev: amdgpu_device pointer
1376  * @vm: requested vm
1377  * @ticket: optional reservation ticket used to reserve the VM
1378  *
1379  * Make sure all BOs which are moved are updated in the PTs.
1380  *
1381  * Returns:
1382  * 0 for success.
1383  *
1384  * PTs have to be reserved!
1385  */
1386 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1387 			   struct amdgpu_vm *vm,
1388 			   struct ww_acquire_ctx *ticket)
1389 {
1390 	struct amdgpu_bo_va *bo_va;
1391 	struct dma_resv *resv;
1392 	bool clear, unlock;
1393 	int r;
1394 
1395 	spin_lock(&vm->status_lock);
1396 	while (!list_empty(&vm->moved)) {
1397 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1398 					 base.vm_status);
1399 		spin_unlock(&vm->status_lock);
1400 
1401 		/* Per VM BOs never need to bo cleared in the page tables */
1402 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1403 		if (r)
1404 			return r;
1405 		spin_lock(&vm->status_lock);
1406 	}
1407 
1408 	while (!list_empty(&vm->invalidated)) {
1409 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1410 					 base.vm_status);
1411 		resv = bo_va->base.bo->tbo.base.resv;
1412 		spin_unlock(&vm->status_lock);
1413 
1414 		/* Try to reserve the BO to avoid clearing its ptes */
1415 		if (!adev->debug_vm && dma_resv_trylock(resv)) {
1416 			clear = false;
1417 			unlock = true;
1418 		/* The caller is already holding the reservation lock */
1419 		} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1420 			clear = false;
1421 			unlock = false;
1422 		/* Somebody else is using the BO right now */
1423 		} else {
1424 			clear = true;
1425 			unlock = false;
1426 		}
1427 
1428 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1429 		if (r)
1430 			return r;
1431 
1432 		if (unlock)
1433 			dma_resv_unlock(resv);
1434 		spin_lock(&vm->status_lock);
1435 	}
1436 	spin_unlock(&vm->status_lock);
1437 
1438 	return 0;
1439 }
1440 
1441 /**
1442  * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1443  *
1444  * @adev: amdgpu_device pointer
1445  * @vm: requested vm
1446  * @flush_type: flush type
1447  * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1448  *
1449  * Flush TLB if needed for a compute VM.
1450  *
1451  * Returns:
1452  * 0 for success.
1453  */
1454 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1455 				struct amdgpu_vm *vm,
1456 				uint32_t flush_type,
1457 				uint32_t xcc_mask)
1458 {
1459 	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1460 	bool all_hub = false;
1461 	int xcc = 0, r = 0;
1462 
1463 	WARN_ON_ONCE(!vm->is_compute_context);
1464 
1465 	/*
1466 	 * It can be that we race and lose here, but that is extremely unlikely
1467 	 * and the worst thing which could happen is that we flush the changes
1468 	 * into the TLB once more which is harmless.
1469 	 */
1470 	if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1471 		return 0;
1472 
1473 	if (adev->family == AMDGPU_FAMILY_AI ||
1474 	    adev->family == AMDGPU_FAMILY_RV)
1475 		all_hub = true;
1476 
1477 	for_each_inst(xcc, xcc_mask) {
1478 		r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1479 						   all_hub, xcc);
1480 		if (r)
1481 			break;
1482 	}
1483 	return r;
1484 }
1485 
1486 /**
1487  * amdgpu_vm_bo_add - add a bo to a specific vm
1488  *
1489  * @adev: amdgpu_device pointer
1490  * @vm: requested vm
1491  * @bo: amdgpu buffer object
1492  *
1493  * Add @bo into the requested vm.
1494  * Add @bo to the list of bos associated with the vm
1495  *
1496  * Returns:
1497  * Newly added bo_va or NULL for failure
1498  *
1499  * Object has to be reserved!
1500  */
1501 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1502 				      struct amdgpu_vm *vm,
1503 				      struct amdgpu_bo *bo)
1504 {
1505 	struct amdgpu_bo_va *bo_va;
1506 
1507 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1508 	if (bo_va == NULL) {
1509 		return NULL;
1510 	}
1511 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1512 
1513 	bo_va->ref_count = 1;
1514 	bo_va->last_pt_update = dma_fence_get_stub();
1515 	INIT_LIST_HEAD(&bo_va->valids);
1516 	INIT_LIST_HEAD(&bo_va->invalids);
1517 
1518 	if (!bo)
1519 		return bo_va;
1520 
1521 	dma_resv_assert_held(bo->tbo.base.resv);
1522 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1523 		bo_va->is_xgmi = true;
1524 		/* Power up XGMI if it can be potentially used */
1525 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1526 	}
1527 
1528 	return bo_va;
1529 }
1530 
1531 
1532 /**
1533  * amdgpu_vm_bo_insert_map - insert a new mapping
1534  *
1535  * @adev: amdgpu_device pointer
1536  * @bo_va: bo_va to store the address
1537  * @mapping: the mapping to insert
1538  *
1539  * Insert a new mapping into all structures.
1540  */
1541 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1542 				    struct amdgpu_bo_va *bo_va,
1543 				    struct amdgpu_bo_va_mapping *mapping)
1544 {
1545 	struct amdgpu_vm *vm = bo_va->base.vm;
1546 	struct amdgpu_bo *bo = bo_va->base.bo;
1547 
1548 	mapping->bo_va = bo_va;
1549 	list_add(&mapping->list, &bo_va->invalids);
1550 	amdgpu_vm_it_insert(mapping, &vm->va);
1551 
1552 	if (mapping->flags & AMDGPU_PTE_PRT)
1553 		amdgpu_vm_prt_get(adev);
1554 
1555 	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1556 	    !bo_va->base.moved) {
1557 		amdgpu_vm_bo_moved(&bo_va->base);
1558 	}
1559 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1560 }
1561 
1562 /**
1563  * amdgpu_vm_bo_map - map bo inside a vm
1564  *
1565  * @adev: amdgpu_device pointer
1566  * @bo_va: bo_va to store the address
1567  * @saddr: where to map the BO
1568  * @offset: requested offset in the BO
1569  * @size: BO size in bytes
1570  * @flags: attributes of pages (read/write/valid/etc.)
1571  *
1572  * Add a mapping of the BO at the specefied addr into the VM.
1573  *
1574  * Returns:
1575  * 0 for success, error for failure.
1576  *
1577  * Object has to be reserved and unreserved outside!
1578  */
1579 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1580 		     struct amdgpu_bo_va *bo_va,
1581 		     uint64_t saddr, uint64_t offset,
1582 		     uint64_t size, uint64_t flags)
1583 {
1584 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1585 	struct amdgpu_bo *bo = bo_va->base.bo;
1586 	struct amdgpu_vm *vm = bo_va->base.vm;
1587 	uint64_t eaddr;
1588 
1589 	/* validate the parameters */
1590 	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
1591 		return -EINVAL;
1592 	if (saddr + size <= saddr || offset + size <= offset)
1593 		return -EINVAL;
1594 
1595 	/* make sure object fit at this offset */
1596 	eaddr = saddr + size - 1;
1597 	if ((bo && offset + size > amdgpu_bo_size(bo)) ||
1598 	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
1599 		return -EINVAL;
1600 
1601 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1602 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1603 
1604 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1605 	if (tmp) {
1606 		/* bo and tmp overlap, invalid addr */
1607 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1608 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1609 			tmp->start, tmp->last + 1);
1610 		return -EINVAL;
1611 	}
1612 
1613 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1614 	if (!mapping)
1615 		return -ENOMEM;
1616 
1617 	mapping->start = saddr;
1618 	mapping->last = eaddr;
1619 	mapping->offset = offset;
1620 	mapping->flags = flags;
1621 
1622 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1623 
1624 	return 0;
1625 }
1626 
1627 /**
1628  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1629  *
1630  * @adev: amdgpu_device pointer
1631  * @bo_va: bo_va to store the address
1632  * @saddr: where to map the BO
1633  * @offset: requested offset in the BO
1634  * @size: BO size in bytes
1635  * @flags: attributes of pages (read/write/valid/etc.)
1636  *
1637  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1638  * mappings as we do so.
1639  *
1640  * Returns:
1641  * 0 for success, error for failure.
1642  *
1643  * Object has to be reserved and unreserved outside!
1644  */
1645 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1646 			     struct amdgpu_bo_va *bo_va,
1647 			     uint64_t saddr, uint64_t offset,
1648 			     uint64_t size, uint64_t flags)
1649 {
1650 	struct amdgpu_bo_va_mapping *mapping;
1651 	struct amdgpu_bo *bo = bo_va->base.bo;
1652 	uint64_t eaddr;
1653 	int r;
1654 
1655 	/* validate the parameters */
1656 	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
1657 		return -EINVAL;
1658 	if (saddr + size <= saddr || offset + size <= offset)
1659 		return -EINVAL;
1660 
1661 	/* make sure object fit at this offset */
1662 	eaddr = saddr + size - 1;
1663 	if ((bo && offset + size > amdgpu_bo_size(bo)) ||
1664 	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
1665 		return -EINVAL;
1666 
1667 	/* Allocate all the needed memory */
1668 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1669 	if (!mapping)
1670 		return -ENOMEM;
1671 
1672 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1673 	if (r) {
1674 		kfree(mapping);
1675 		return r;
1676 	}
1677 
1678 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1679 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1680 
1681 	mapping->start = saddr;
1682 	mapping->last = eaddr;
1683 	mapping->offset = offset;
1684 	mapping->flags = flags;
1685 
1686 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1687 
1688 	return 0;
1689 }
1690 
1691 /**
1692  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1693  *
1694  * @adev: amdgpu_device pointer
1695  * @bo_va: bo_va to remove the address from
1696  * @saddr: where to the BO is mapped
1697  *
1698  * Remove a mapping of the BO at the specefied addr from the VM.
1699  *
1700  * Returns:
1701  * 0 for success, error for failure.
1702  *
1703  * Object has to be reserved and unreserved outside!
1704  */
1705 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1706 		       struct amdgpu_bo_va *bo_va,
1707 		       uint64_t saddr)
1708 {
1709 	struct amdgpu_bo_va_mapping *mapping;
1710 	struct amdgpu_vm *vm = bo_va->base.vm;
1711 	bool valid = true;
1712 
1713 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1714 
1715 	list_for_each_entry(mapping, &bo_va->valids, list) {
1716 		if (mapping->start == saddr)
1717 			break;
1718 	}
1719 
1720 	if (&mapping->list == &bo_va->valids) {
1721 		valid = false;
1722 
1723 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1724 			if (mapping->start == saddr)
1725 				break;
1726 		}
1727 
1728 		if (&mapping->list == &bo_va->invalids)
1729 			return -ENOENT;
1730 	}
1731 
1732 	list_del(&mapping->list);
1733 	amdgpu_vm_it_remove(mapping, &vm->va);
1734 	mapping->bo_va = NULL;
1735 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1736 
1737 	if (valid)
1738 		list_add(&mapping->list, &vm->freed);
1739 	else
1740 		amdgpu_vm_free_mapping(adev, vm, mapping,
1741 				       bo_va->last_pt_update);
1742 
1743 	return 0;
1744 }
1745 
1746 /**
1747  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1748  *
1749  * @adev: amdgpu_device pointer
1750  * @vm: VM structure to use
1751  * @saddr: start of the range
1752  * @size: size of the range
1753  *
1754  * Remove all mappings in a range, split them as appropriate.
1755  *
1756  * Returns:
1757  * 0 for success, error for failure.
1758  */
1759 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1760 				struct amdgpu_vm *vm,
1761 				uint64_t saddr, uint64_t size)
1762 {
1763 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1764 	LIST_HEAD(removed);
1765 	uint64_t eaddr;
1766 
1767 	eaddr = saddr + size - 1;
1768 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1769 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1770 
1771 	/* Allocate all the needed memory */
1772 	before = kzalloc(sizeof(*before), GFP_KERNEL);
1773 	if (!before)
1774 		return -ENOMEM;
1775 	INIT_LIST_HEAD(&before->list);
1776 
1777 	after = kzalloc(sizeof(*after), GFP_KERNEL);
1778 	if (!after) {
1779 		kfree(before);
1780 		return -ENOMEM;
1781 	}
1782 	INIT_LIST_HEAD(&after->list);
1783 
1784 	/* Now gather all removed mappings */
1785 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1786 	while (tmp) {
1787 		/* Remember mapping split at the start */
1788 		if (tmp->start < saddr) {
1789 			before->start = tmp->start;
1790 			before->last = saddr - 1;
1791 			before->offset = tmp->offset;
1792 			before->flags = tmp->flags;
1793 			before->bo_va = tmp->bo_va;
1794 			list_add(&before->list, &tmp->bo_va->invalids);
1795 		}
1796 
1797 		/* Remember mapping split at the end */
1798 		if (tmp->last > eaddr) {
1799 			after->start = eaddr + 1;
1800 			after->last = tmp->last;
1801 			after->offset = tmp->offset;
1802 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
1803 			after->flags = tmp->flags;
1804 			after->bo_va = tmp->bo_va;
1805 			list_add(&after->list, &tmp->bo_va->invalids);
1806 		}
1807 
1808 		list_del(&tmp->list);
1809 		list_add(&tmp->list, &removed);
1810 
1811 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1812 	}
1813 
1814 	/* And free them up */
1815 	list_for_each_entry_safe(tmp, next, &removed, list) {
1816 		amdgpu_vm_it_remove(tmp, &vm->va);
1817 		list_del(&tmp->list);
1818 
1819 		if (tmp->start < saddr)
1820 		    tmp->start = saddr;
1821 		if (tmp->last > eaddr)
1822 		    tmp->last = eaddr;
1823 
1824 		tmp->bo_va = NULL;
1825 		list_add(&tmp->list, &vm->freed);
1826 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
1827 	}
1828 
1829 	/* Insert partial mapping before the range */
1830 	if (!list_empty(&before->list)) {
1831 		struct amdgpu_bo *bo = before->bo_va->base.bo;
1832 
1833 		amdgpu_vm_it_insert(before, &vm->va);
1834 		if (before->flags & AMDGPU_PTE_PRT)
1835 			amdgpu_vm_prt_get(adev);
1836 
1837 		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1838 		    !before->bo_va->base.moved)
1839 			amdgpu_vm_bo_moved(&before->bo_va->base);
1840 	} else {
1841 		kfree(before);
1842 	}
1843 
1844 	/* Insert partial mapping after the range */
1845 	if (!list_empty(&after->list)) {
1846 		struct amdgpu_bo *bo = after->bo_va->base.bo;
1847 
1848 		amdgpu_vm_it_insert(after, &vm->va);
1849 		if (after->flags & AMDGPU_PTE_PRT)
1850 			amdgpu_vm_prt_get(adev);
1851 
1852 		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1853 		    !after->bo_va->base.moved)
1854 			amdgpu_vm_bo_moved(&after->bo_va->base);
1855 	} else {
1856 		kfree(after);
1857 	}
1858 
1859 	return 0;
1860 }
1861 
1862 /**
1863  * amdgpu_vm_bo_lookup_mapping - find mapping by address
1864  *
1865  * @vm: the requested VM
1866  * @addr: the address
1867  *
1868  * Find a mapping by it's address.
1869  *
1870  * Returns:
1871  * The amdgpu_bo_va_mapping matching for addr or NULL
1872  *
1873  */
1874 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
1875 							 uint64_t addr)
1876 {
1877 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
1878 }
1879 
1880 /**
1881  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
1882  *
1883  * @vm: the requested vm
1884  * @ticket: CS ticket
1885  *
1886  * Trace all mappings of BOs reserved during a command submission.
1887  */
1888 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
1889 {
1890 	struct amdgpu_bo_va_mapping *mapping;
1891 
1892 	if (!trace_amdgpu_vm_bo_cs_enabled())
1893 		return;
1894 
1895 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
1896 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
1897 		if (mapping->bo_va && mapping->bo_va->base.bo) {
1898 			struct amdgpu_bo *bo;
1899 
1900 			bo = mapping->bo_va->base.bo;
1901 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
1902 			    ticket)
1903 				continue;
1904 		}
1905 
1906 		trace_amdgpu_vm_bo_cs(mapping);
1907 	}
1908 }
1909 
1910 /**
1911  * amdgpu_vm_bo_del - remove a bo from a specific vm
1912  *
1913  * @adev: amdgpu_device pointer
1914  * @bo_va: requested bo_va
1915  *
1916  * Remove @bo_va->bo from the requested vm.
1917  *
1918  * Object have to be reserved!
1919  */
1920 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
1921 		      struct amdgpu_bo_va *bo_va)
1922 {
1923 	struct amdgpu_bo_va_mapping *mapping, *next;
1924 	struct amdgpu_bo *bo = bo_va->base.bo;
1925 	struct amdgpu_vm *vm = bo_va->base.vm;
1926 	struct amdgpu_vm_bo_base **base;
1927 
1928 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
1929 
1930 	if (bo) {
1931 		dma_resv_assert_held(bo->tbo.base.resv);
1932 		if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
1933 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
1934 
1935 		for (base = &bo_va->base.bo->vm_bo; *base;
1936 		     base = &(*base)->next) {
1937 			if (*base != &bo_va->base)
1938 				continue;
1939 
1940 			*base = bo_va->base.next;
1941 			break;
1942 		}
1943 	}
1944 
1945 	spin_lock(&vm->status_lock);
1946 	list_del(&bo_va->base.vm_status);
1947 	spin_unlock(&vm->status_lock);
1948 
1949 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1950 		list_del(&mapping->list);
1951 		amdgpu_vm_it_remove(mapping, &vm->va);
1952 		mapping->bo_va = NULL;
1953 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1954 		list_add(&mapping->list, &vm->freed);
1955 	}
1956 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1957 		list_del(&mapping->list);
1958 		amdgpu_vm_it_remove(mapping, &vm->va);
1959 		amdgpu_vm_free_mapping(adev, vm, mapping,
1960 				       bo_va->last_pt_update);
1961 	}
1962 
1963 	dma_fence_put(bo_va->last_pt_update);
1964 
1965 	if (bo && bo_va->is_xgmi)
1966 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
1967 
1968 	kfree(bo_va);
1969 }
1970 
1971 /**
1972  * amdgpu_vm_evictable - check if we can evict a VM
1973  *
1974  * @bo: A page table of the VM.
1975  *
1976  * Check if it is possible to evict a VM.
1977  */
1978 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
1979 {
1980 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
1981 
1982 	/* Page tables of a destroyed VM can go away immediately */
1983 	if (!bo_base || !bo_base->vm)
1984 		return true;
1985 
1986 	/* Don't evict VM page tables while they are busy */
1987 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
1988 		return false;
1989 
1990 	/* Try to block ongoing updates */
1991 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
1992 		return false;
1993 
1994 	/* Don't evict VM page tables while they are updated */
1995 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
1996 		amdgpu_vm_eviction_unlock(bo_base->vm);
1997 		return false;
1998 	}
1999 
2000 	bo_base->vm->evicting = true;
2001 	amdgpu_vm_eviction_unlock(bo_base->vm);
2002 	return true;
2003 }
2004 
2005 /**
2006  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2007  *
2008  * @adev: amdgpu_device pointer
2009  * @bo: amdgpu buffer object
2010  * @evicted: is the BO evicted
2011  *
2012  * Mark @bo as invalid.
2013  */
2014 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2015 			     struct amdgpu_bo *bo, bool evicted)
2016 {
2017 	struct amdgpu_vm_bo_base *bo_base;
2018 
2019 	/* shadow bo doesn't have bo base, its validation needs its parent */
2020 	if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
2021 		bo = bo->parent;
2022 
2023 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2024 		struct amdgpu_vm *vm = bo_base->vm;
2025 
2026 		if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
2027 			amdgpu_vm_bo_evicted(bo_base);
2028 			continue;
2029 		}
2030 
2031 		if (bo_base->moved)
2032 			continue;
2033 		bo_base->moved = true;
2034 
2035 		if (bo->tbo.type == ttm_bo_type_kernel)
2036 			amdgpu_vm_bo_relocated(bo_base);
2037 		else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
2038 			amdgpu_vm_bo_moved(bo_base);
2039 		else
2040 			amdgpu_vm_bo_invalidated(bo_base);
2041 	}
2042 }
2043 
2044 /**
2045  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2046  *
2047  * @vm_size: VM size
2048  *
2049  * Returns:
2050  * VM page table as power of two
2051  */
2052 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2053 {
2054 	/* Total bits covered by PD + PTs */
2055 	unsigned bits = ilog2(vm_size) + 18;
2056 
2057 	/* Make sure the PD is 4K in size up to 8GB address space.
2058 	   Above that split equal between PD and PTs */
2059 	if (vm_size <= 8)
2060 		return (bits - 9);
2061 	else
2062 		return ((bits + 3) / 2);
2063 }
2064 
2065 /**
2066  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2067  *
2068  * @adev: amdgpu_device pointer
2069  * @min_vm_size: the minimum vm size in GB if it's set auto
2070  * @fragment_size_default: Default PTE fragment size
2071  * @max_level: max VMPT level
2072  * @max_bits: max address space size in bits
2073  *
2074  */
2075 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2076 			   uint32_t fragment_size_default, unsigned max_level,
2077 			   unsigned max_bits)
2078 {
2079 	unsigned int max_size = 1 << (max_bits - 30);
2080 	unsigned int vm_size;
2081 	uint64_t tmp;
2082 
2083 	/* adjust vm size first */
2084 	if (amdgpu_vm_size != -1) {
2085 		vm_size = amdgpu_vm_size;
2086 		if (vm_size > max_size) {
2087 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2088 				 amdgpu_vm_size, max_size);
2089 			vm_size = max_size;
2090 		}
2091 	} else {
2092 		struct sysinfo si;
2093 		unsigned int phys_ram_gb;
2094 
2095 		/* Optimal VM size depends on the amount of physical
2096 		 * RAM available. Underlying requirements and
2097 		 * assumptions:
2098 		 *
2099 		 *  - Need to map system memory and VRAM from all GPUs
2100 		 *     - VRAM from other GPUs not known here
2101 		 *     - Assume VRAM <= system memory
2102 		 *  - On GFX8 and older, VM space can be segmented for
2103 		 *    different MTYPEs
2104 		 *  - Need to allow room for fragmentation, guard pages etc.
2105 		 *
2106 		 * This adds up to a rough guess of system memory x3.
2107 		 * Round up to power of two to maximize the available
2108 		 * VM size with the given page table size.
2109 		 */
2110 		si_meminfo(&si);
2111 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2112 			       (1 << 30) - 1) >> 30;
2113 		vm_size = roundup_pow_of_two(
2114 			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2115 	}
2116 
2117 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2118 
2119 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2120 	if (amdgpu_vm_block_size != -1)
2121 		tmp >>= amdgpu_vm_block_size - 9;
2122 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2123 	adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2124 	switch (adev->vm_manager.num_level) {
2125 	case 3:
2126 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2127 		break;
2128 	case 2:
2129 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2130 		break;
2131 	case 1:
2132 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2133 		break;
2134 	default:
2135 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2136 	}
2137 	/* block size depends on vm size and hw setup*/
2138 	if (amdgpu_vm_block_size != -1)
2139 		adev->vm_manager.block_size =
2140 			min((unsigned)amdgpu_vm_block_size, max_bits
2141 			    - AMDGPU_GPU_PAGE_SHIFT
2142 			    - 9 * adev->vm_manager.num_level);
2143 	else if (adev->vm_manager.num_level > 1)
2144 		adev->vm_manager.block_size = 9;
2145 	else
2146 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2147 
2148 	if (amdgpu_vm_fragment_size == -1)
2149 		adev->vm_manager.fragment_size = fragment_size_default;
2150 	else
2151 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2152 
2153 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2154 		 vm_size, adev->vm_manager.num_level + 1,
2155 		 adev->vm_manager.block_size,
2156 		 adev->vm_manager.fragment_size);
2157 }
2158 
2159 /**
2160  * amdgpu_vm_wait_idle - wait for the VM to become idle
2161  *
2162  * @vm: VM object to wait for
2163  * @timeout: timeout to wait for VM to become idle
2164  */
2165 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2166 {
2167 	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2168 					DMA_RESV_USAGE_BOOKKEEP,
2169 					true, timeout);
2170 	if (timeout <= 0)
2171 		return timeout;
2172 
2173 	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2174 }
2175 
2176 /**
2177  * amdgpu_vm_init - initialize a vm instance
2178  *
2179  * @adev: amdgpu_device pointer
2180  * @vm: requested vm
2181  * @xcp_id: GPU partition selection id
2182  *
2183  * Init @vm fields.
2184  *
2185  * Returns:
2186  * 0 for success, error for failure.
2187  */
2188 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2189 		   int32_t xcp_id)
2190 {
2191 	struct amdgpu_bo *root_bo;
2192 	struct amdgpu_bo_vm *root;
2193 	int r, i;
2194 
2195 	vm->va = RB_ROOT_CACHED;
2196 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2197 		vm->reserved_vmid[i] = NULL;
2198 	INIT_LIST_HEAD(&vm->evicted);
2199 	INIT_LIST_HEAD(&vm->relocated);
2200 	INIT_LIST_HEAD(&vm->moved);
2201 	INIT_LIST_HEAD(&vm->idle);
2202 	INIT_LIST_HEAD(&vm->invalidated);
2203 	spin_lock_init(&vm->status_lock);
2204 	INIT_LIST_HEAD(&vm->freed);
2205 	INIT_LIST_HEAD(&vm->done);
2206 	INIT_LIST_HEAD(&vm->pt_freed);
2207 	INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
2208 	INIT_KFIFO(vm->faults);
2209 
2210 	r = amdgpu_vm_init_entities(adev, vm);
2211 	if (r)
2212 		return r;
2213 
2214 	vm->pte_support_ats = false;
2215 	vm->is_compute_context = false;
2216 
2217 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2218 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2219 
2220 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2221 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2222 	WARN_ONCE((vm->use_cpu_for_update &&
2223 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2224 		  "CPU update of VM recommended only for large BAR system\n");
2225 
2226 	if (vm->use_cpu_for_update)
2227 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2228 	else
2229 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2230 
2231 	vm->last_update = dma_fence_get_stub();
2232 	vm->last_unlocked = dma_fence_get_stub();
2233 	vm->last_tlb_flush = dma_fence_get_stub();
2234 	vm->generation = 0;
2235 
2236 	mutex_init(&vm->eviction_lock);
2237 	vm->evicting = false;
2238 
2239 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2240 				false, &root, xcp_id);
2241 	if (r)
2242 		goto error_free_delayed;
2243 
2244 	root_bo = amdgpu_bo_ref(&root->bo);
2245 	r = amdgpu_bo_reserve(root_bo, true);
2246 	if (r) {
2247 		amdgpu_bo_unref(&root->shadow);
2248 		amdgpu_bo_unref(&root_bo);
2249 		goto error_free_delayed;
2250 	}
2251 
2252 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2253 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2254 	if (r)
2255 		goto error_free_root;
2256 
2257 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2258 	if (r)
2259 		goto error_free_root;
2260 
2261 	amdgpu_bo_unreserve(vm->root.bo);
2262 	amdgpu_bo_unref(&root_bo);
2263 
2264 	return 0;
2265 
2266 error_free_root:
2267 	amdgpu_vm_pt_free_root(adev, vm);
2268 	amdgpu_bo_unreserve(vm->root.bo);
2269 	amdgpu_bo_unref(&root_bo);
2270 
2271 error_free_delayed:
2272 	dma_fence_put(vm->last_tlb_flush);
2273 	dma_fence_put(vm->last_unlocked);
2274 	amdgpu_vm_fini_entities(vm);
2275 
2276 	return r;
2277 }
2278 
2279 /**
2280  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2281  *
2282  * @adev: amdgpu_device pointer
2283  * @vm: requested vm
2284  *
2285  * This only works on GFX VMs that don't have any BOs added and no
2286  * page tables allocated yet.
2287  *
2288  * Changes the following VM parameters:
2289  * - use_cpu_for_update
2290  * - pte_supports_ats
2291  *
2292  * Reinitializes the page directory to reflect the changed ATS
2293  * setting.
2294  *
2295  * Returns:
2296  * 0 for success, -errno for errors.
2297  */
2298 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2299 {
2300 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2301 	int r;
2302 
2303 	r = amdgpu_bo_reserve(vm->root.bo, true);
2304 	if (r)
2305 		return r;
2306 
2307 	/* Check if PD needs to be reinitialized and do it before
2308 	 * changing any other state, in case it fails.
2309 	 */
2310 	if (pte_support_ats != vm->pte_support_ats) {
2311 		/* Sanity checks */
2312 		if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
2313 			r = -EINVAL;
2314 			goto unreserve_bo;
2315 		}
2316 
2317 		vm->pte_support_ats = pte_support_ats;
2318 		r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo),
2319 				       false);
2320 		if (r)
2321 			goto unreserve_bo;
2322 	}
2323 
2324 	/* Update VM state */
2325 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2326 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2327 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2328 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2329 	WARN_ONCE((vm->use_cpu_for_update &&
2330 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2331 		  "CPU update of VM recommended only for large BAR system\n");
2332 
2333 	if (vm->use_cpu_for_update) {
2334 		/* Sync with last SDMA update/clear before switching to CPU */
2335 		r = amdgpu_bo_sync_wait(vm->root.bo,
2336 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2337 		if (r)
2338 			goto unreserve_bo;
2339 
2340 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2341 		r = amdgpu_vm_pt_map_tables(adev, vm);
2342 		if (r)
2343 			goto unreserve_bo;
2344 
2345 	} else {
2346 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2347 	}
2348 
2349 	dma_fence_put(vm->last_update);
2350 	vm->last_update = dma_fence_get_stub();
2351 	vm->is_compute_context = true;
2352 
2353 	/* Free the shadow bo for compute VM */
2354 	amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
2355 
2356 	goto unreserve_bo;
2357 
2358 unreserve_bo:
2359 	amdgpu_bo_unreserve(vm->root.bo);
2360 	return r;
2361 }
2362 
2363 /**
2364  * amdgpu_vm_release_compute - release a compute vm
2365  * @adev: amdgpu_device pointer
2366  * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2367  *
2368  * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2369  * pasid from vm. Compute should stop use of vm after this call.
2370  */
2371 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2372 {
2373 	amdgpu_vm_set_pasid(adev, vm, 0);
2374 	vm->is_compute_context = false;
2375 }
2376 
2377 /**
2378  * amdgpu_vm_fini - tear down a vm instance
2379  *
2380  * @adev: amdgpu_device pointer
2381  * @vm: requested vm
2382  *
2383  * Tear down @vm.
2384  * Unbind the VM and remove all bos from the vm bo list
2385  */
2386 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2387 {
2388 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2389 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2390 	struct amdgpu_bo *root;
2391 	unsigned long flags;
2392 	int i;
2393 
2394 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2395 
2396 	flush_work(&vm->pt_free_work);
2397 
2398 	root = amdgpu_bo_ref(vm->root.bo);
2399 	amdgpu_bo_reserve(root, true);
2400 	amdgpu_vm_set_pasid(adev, vm, 0);
2401 	dma_fence_wait(vm->last_unlocked, false);
2402 	dma_fence_put(vm->last_unlocked);
2403 	dma_fence_wait(vm->last_tlb_flush, false);
2404 	/* Make sure that all fence callbacks have completed */
2405 	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2406 	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2407 	dma_fence_put(vm->last_tlb_flush);
2408 
2409 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2410 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2411 			amdgpu_vm_prt_fini(adev, vm);
2412 			prt_fini_needed = false;
2413 		}
2414 
2415 		list_del(&mapping->list);
2416 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2417 	}
2418 
2419 	amdgpu_vm_pt_free_root(adev, vm);
2420 	amdgpu_bo_unreserve(root);
2421 	amdgpu_bo_unref(&root);
2422 	WARN_ON(vm->root.bo);
2423 
2424 	amdgpu_vm_fini_entities(vm);
2425 
2426 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2427 		dev_err(adev->dev, "still active bo inside vm\n");
2428 	}
2429 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2430 					     &vm->va.rb_root, rb) {
2431 		/* Don't remove the mapping here, we don't want to trigger a
2432 		 * rebalance and the tree is about to be destroyed anyway.
2433 		 */
2434 		list_del(&mapping->list);
2435 		kfree(mapping);
2436 	}
2437 
2438 	dma_fence_put(vm->last_update);
2439 
2440 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2441 		if (vm->reserved_vmid[i]) {
2442 			amdgpu_vmid_free_reserved(adev, i);
2443 			vm->reserved_vmid[i] = false;
2444 		}
2445 	}
2446 
2447 }
2448 
2449 /**
2450  * amdgpu_vm_manager_init - init the VM manager
2451  *
2452  * @adev: amdgpu_device pointer
2453  *
2454  * Initialize the VM manager structures
2455  */
2456 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2457 {
2458 	unsigned i;
2459 
2460 	/* Concurrent flushes are only possible starting with Vega10 and
2461 	 * are broken on Navi10 and Navi14.
2462 	 */
2463 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2464 					      adev->asic_type == CHIP_NAVI10 ||
2465 					      adev->asic_type == CHIP_NAVI14);
2466 	amdgpu_vmid_mgr_init(adev);
2467 
2468 	adev->vm_manager.fence_context =
2469 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2470 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2471 		adev->vm_manager.seqno[i] = 0;
2472 
2473 	spin_lock_init(&adev->vm_manager.prt_lock);
2474 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2475 
2476 	/* If not overridden by the user, by default, only in large BAR systems
2477 	 * Compute VM tables will be updated by CPU
2478 	 */
2479 #ifdef CONFIG_X86_64
2480 	if (amdgpu_vm_update_mode == -1) {
2481 		/* For asic with VF MMIO access protection
2482 		 * avoid using CPU for VM table updates
2483 		 */
2484 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2485 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2486 			adev->vm_manager.vm_update_mode =
2487 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2488 		else
2489 			adev->vm_manager.vm_update_mode = 0;
2490 	} else
2491 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2492 #else
2493 	adev->vm_manager.vm_update_mode = 0;
2494 #endif
2495 
2496 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2497 }
2498 
2499 /**
2500  * amdgpu_vm_manager_fini - cleanup VM manager
2501  *
2502  * @adev: amdgpu_device pointer
2503  *
2504  * Cleanup the VM manager and free resources.
2505  */
2506 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2507 {
2508 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2509 	xa_destroy(&adev->vm_manager.pasids);
2510 
2511 	amdgpu_vmid_mgr_fini(adev);
2512 }
2513 
2514 /**
2515  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2516  *
2517  * @dev: drm device pointer
2518  * @data: drm_amdgpu_vm
2519  * @filp: drm file pointer
2520  *
2521  * Returns:
2522  * 0 for success, -errno for errors.
2523  */
2524 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2525 {
2526 	union drm_amdgpu_vm *args = data;
2527 	struct amdgpu_device *adev = drm_to_adev(dev);
2528 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2529 
2530 	/* No valid flags defined yet */
2531 	if (args->in.flags)
2532 		return -EINVAL;
2533 
2534 	switch (args->in.op) {
2535 	case AMDGPU_VM_OP_RESERVE_VMID:
2536 		/* We only have requirement to reserve vmid from gfxhub */
2537 		if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2538 			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2539 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2540 		}
2541 
2542 		break;
2543 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2544 		if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2545 			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2546 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2547 		}
2548 		break;
2549 	default:
2550 		return -EINVAL;
2551 	}
2552 
2553 	return 0;
2554 }
2555 
2556 /**
2557  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
2558  *
2559  * @adev: drm device pointer
2560  * @pasid: PASID identifier for VM
2561  * @task_info: task_info to fill.
2562  */
2563 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
2564 			 struct amdgpu_task_info *task_info)
2565 {
2566 	struct amdgpu_vm *vm;
2567 	unsigned long flags;
2568 
2569 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2570 
2571 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2572 	if (vm)
2573 		*task_info = vm->task_info;
2574 
2575 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2576 }
2577 
2578 /**
2579  * amdgpu_vm_set_task_info - Sets VMs task info.
2580  *
2581  * @vm: vm for which to set the info
2582  */
2583 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2584 {
2585 	if (vm->task_info.pid)
2586 		return;
2587 
2588 	vm->task_info.pid = current->pid;
2589 	get_task_comm(vm->task_info.task_name, current);
2590 
2591 	if (current->group_leader->mm != current->mm)
2592 		return;
2593 
2594 	vm->task_info.tgid = current->group_leader->pid;
2595 	get_task_comm(vm->task_info.process_name, current->group_leader);
2596 }
2597 
2598 /**
2599  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2600  * @adev: amdgpu device pointer
2601  * @pasid: PASID of the VM
2602  * @vmid: VMID, only used for GFX 9.4.3.
2603  * @node_id: Node_id received in IH cookie. Only applicable for
2604  *           GFX 9.4.3.
2605  * @addr: Address of the fault
2606  * @write_fault: true is write fault, false is read fault
2607  *
2608  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2609  * shouldn't be reported any more.
2610  */
2611 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2612 			    u32 vmid, u32 node_id, uint64_t addr,
2613 			    bool write_fault)
2614 {
2615 	bool is_compute_context = false;
2616 	struct amdgpu_bo *root;
2617 	unsigned long irqflags;
2618 	uint64_t value, flags;
2619 	struct amdgpu_vm *vm;
2620 	int r;
2621 
2622 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2623 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2624 	if (vm) {
2625 		root = amdgpu_bo_ref(vm->root.bo);
2626 		is_compute_context = vm->is_compute_context;
2627 	} else {
2628 		root = NULL;
2629 	}
2630 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2631 
2632 	if (!root)
2633 		return false;
2634 
2635 	addr /= AMDGPU_GPU_PAGE_SIZE;
2636 
2637 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2638 	    node_id, addr, write_fault)) {
2639 		amdgpu_bo_unref(&root);
2640 		return true;
2641 	}
2642 
2643 	r = amdgpu_bo_reserve(root, true);
2644 	if (r)
2645 		goto error_unref;
2646 
2647 	/* Double check that the VM still exists */
2648 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2649 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2650 	if (vm && vm->root.bo != root)
2651 		vm = NULL;
2652 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2653 	if (!vm)
2654 		goto error_unlock;
2655 
2656 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2657 		AMDGPU_PTE_SYSTEM;
2658 
2659 	if (is_compute_context) {
2660 		/* Intentionally setting invalid PTE flag
2661 		 * combination to force a no-retry-fault
2662 		 */
2663 		flags = AMDGPU_VM_NORETRY_FLAGS;
2664 		value = 0;
2665 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2666 		/* Redirect the access to the dummy page */
2667 		value = adev->dummy_page_addr;
2668 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2669 			AMDGPU_PTE_WRITEABLE;
2670 
2671 	} else {
2672 		/* Let the hw retry silently on the PTE */
2673 		value = 0;
2674 	}
2675 
2676 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2677 	if (r) {
2678 		pr_debug("failed %d to reserve fence slot\n", r);
2679 		goto error_unlock;
2680 	}
2681 
2682 	r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2683 				   NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
2684 	if (r)
2685 		goto error_unlock;
2686 
2687 	r = amdgpu_vm_update_pdes(adev, vm, true);
2688 
2689 error_unlock:
2690 	amdgpu_bo_unreserve(root);
2691 	if (r < 0)
2692 		DRM_ERROR("Can't handle page fault (%d)\n", r);
2693 
2694 error_unref:
2695 	amdgpu_bo_unref(&root);
2696 
2697 	return false;
2698 }
2699 
2700 #if defined(CONFIG_DEBUG_FS)
2701 /**
2702  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
2703  *
2704  * @vm: Requested VM for printing BO info
2705  * @m: debugfs file
2706  *
2707  * Print BO information in debugfs file for the VM
2708  */
2709 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
2710 {
2711 	struct amdgpu_bo_va *bo_va, *tmp;
2712 	u64 total_idle = 0;
2713 	u64 total_evicted = 0;
2714 	u64 total_relocated = 0;
2715 	u64 total_moved = 0;
2716 	u64 total_invalidated = 0;
2717 	u64 total_done = 0;
2718 	unsigned int total_idle_objs = 0;
2719 	unsigned int total_evicted_objs = 0;
2720 	unsigned int total_relocated_objs = 0;
2721 	unsigned int total_moved_objs = 0;
2722 	unsigned int total_invalidated_objs = 0;
2723 	unsigned int total_done_objs = 0;
2724 	unsigned int id = 0;
2725 
2726 	spin_lock(&vm->status_lock);
2727 	seq_puts(m, "\tIdle BOs:\n");
2728 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
2729 		if (!bo_va->base.bo)
2730 			continue;
2731 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2732 	}
2733 	total_idle_objs = id;
2734 	id = 0;
2735 
2736 	seq_puts(m, "\tEvicted BOs:\n");
2737 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
2738 		if (!bo_va->base.bo)
2739 			continue;
2740 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2741 	}
2742 	total_evicted_objs = id;
2743 	id = 0;
2744 
2745 	seq_puts(m, "\tRelocated BOs:\n");
2746 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
2747 		if (!bo_va->base.bo)
2748 			continue;
2749 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2750 	}
2751 	total_relocated_objs = id;
2752 	id = 0;
2753 
2754 	seq_puts(m, "\tMoved BOs:\n");
2755 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2756 		if (!bo_va->base.bo)
2757 			continue;
2758 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2759 	}
2760 	total_moved_objs = id;
2761 	id = 0;
2762 
2763 	seq_puts(m, "\tInvalidated BOs:\n");
2764 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
2765 		if (!bo_va->base.bo)
2766 			continue;
2767 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
2768 	}
2769 	total_invalidated_objs = id;
2770 	id = 0;
2771 
2772 	seq_puts(m, "\tDone BOs:\n");
2773 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
2774 		if (!bo_va->base.bo)
2775 			continue;
2776 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2777 	}
2778 	spin_unlock(&vm->status_lock);
2779 	total_done_objs = id;
2780 
2781 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
2782 		   total_idle_objs);
2783 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
2784 		   total_evicted_objs);
2785 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
2786 		   total_relocated_objs);
2787 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
2788 		   total_moved_objs);
2789 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
2790 		   total_invalidated_objs);
2791 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
2792 		   total_done_objs);
2793 }
2794 #endif
2795 
2796 /**
2797  * amdgpu_vm_update_fault_cache - update cached fault into.
2798  * @adev: amdgpu device pointer
2799  * @pasid: PASID of the VM
2800  * @addr: Address of the fault
2801  * @status: GPUVM fault status register
2802  * @vmhub: which vmhub got the fault
2803  *
2804  * Cache the fault info for later use by userspace in debugging.
2805  */
2806 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
2807 				  unsigned int pasid,
2808 				  uint64_t addr,
2809 				  uint32_t status,
2810 				  unsigned int vmhub)
2811 {
2812 	struct amdgpu_vm *vm;
2813 	unsigned long flags;
2814 
2815 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2816 
2817 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2818 	/* Don't update the fault cache if status is 0.  In the multiple
2819 	 * fault case, subsequent faults will return a 0 status which is
2820 	 * useless for userspace and replaces the useful fault status, so
2821 	 * only update if status is non-0.
2822 	 */
2823 	if (vm && status) {
2824 		vm->fault_info.addr = addr;
2825 		vm->fault_info.status = status;
2826 		if (AMDGPU_IS_GFXHUB(vmhub)) {
2827 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
2828 			vm->fault_info.vmhub |=
2829 				(vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
2830 		} else if (AMDGPU_IS_MMHUB0(vmhub)) {
2831 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
2832 			vm->fault_info.vmhub |=
2833 				(vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
2834 		} else if (AMDGPU_IS_MMHUB1(vmhub)) {
2835 			vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
2836 			vm->fault_info.vmhub |=
2837 				(vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
2838 		} else {
2839 			WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
2840 		}
2841 	}
2842 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2843 }
2844 
2845