1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_vm.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_amdkfd.h"
42 #include "amdgpu_gmc.h"
43 #include "amdgpu_xgmi.h"
44 #include "amdgpu_dma_buf.h"
45 #include "amdgpu_res_cursor.h"
46 #include "kfd_svm.h"
47
48 /**
49 * DOC: GPUVM
50 *
51 * GPUVM is the MMU functionality provided on the GPU.
52 * GPUVM is similar to the legacy GART on older asics, however
53 * rather than there being a single global GART table
54 * for the entire GPU, there can be multiple GPUVM page tables active
55 * at any given time. The GPUVM page tables can contain a mix
56 * VRAM pages and system pages (both memory and MMIO) and system pages
57 * can be mapped as snooped (cached system pages) or unsnooped
58 * (uncached system pages).
59 *
60 * Each active GPUVM has an ID associated with it and there is a page table
61 * linked with each VMID. When executing a command buffer,
62 * the kernel tells the engine what VMID to use for that command
63 * buffer. VMIDs are allocated dynamically as commands are submitted.
64 * The userspace drivers maintain their own address space and the kernel
65 * sets up their pages tables accordingly when they submit their
66 * command buffers and a VMID is assigned.
67 * The hardware supports up to 16 active GPUVMs at any given time.
68 *
69 * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
70 * on the ASIC family. GPUVM supports RWX attributes on each page as well
71 * as other features such as encryption and caching attributes.
72 *
73 * VMID 0 is special. It is the GPUVM used for the kernel driver. In
74 * addition to an aperture managed by a page table, VMID 0 also has
75 * several other apertures. There is an aperture for direct access to VRAM
76 * and there is a legacy AGP aperture which just forwards accesses directly
77 * to the matching system physical addresses (or IOVAs when an IOMMU is
78 * present). These apertures provide direct access to these memories without
79 * incurring the overhead of a page table. VMID 0 is used by the kernel
80 * driver for tasks like memory management.
81 *
82 * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
83 * For user applications, each application can have their own unique GPUVM
84 * address space. The application manages the address space and the kernel
85 * driver manages the GPUVM page tables for each process. If an GPU client
86 * accesses an invalid page, it will generate a GPU page fault, similar to
87 * accessing an invalid page on a CPU.
88 */
89
90 #define START(node) ((node)->start)
91 #define LAST(node) ((node)->last)
92
93 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
94 START, LAST, static, amdgpu_vm_it)
95
96 #undef START
97 #undef LAST
98
99 /**
100 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
101 */
102 struct amdgpu_prt_cb {
103
104 /**
105 * @adev: amdgpu device
106 */
107 struct amdgpu_device *adev;
108
109 /**
110 * @cb: callback
111 */
112 struct dma_fence_cb cb;
113 };
114
115 /**
116 * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
117 */
118 struct amdgpu_vm_tlb_seq_struct {
119 /**
120 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
121 */
122 struct amdgpu_vm *vm;
123
124 /**
125 * @cb: callback
126 */
127 struct dma_fence_cb cb;
128 };
129
130 /**
131 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
132 *
133 * @adev: amdgpu_device pointer
134 * @vm: amdgpu_vm pointer
135 * @pasid: the pasid the VM is using on this GPU
136 *
137 * Set the pasid this VM is using on this GPU, can also be used to remove the
138 * pasid by passing in zero.
139 *
140 */
amdgpu_vm_set_pasid(struct amdgpu_device * adev,struct amdgpu_vm * vm,u32 pasid)141 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
142 u32 pasid)
143 {
144 int r;
145
146 if (vm->pasid == pasid)
147 return 0;
148
149 if (vm->pasid) {
150 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
151 if (r < 0)
152 return r;
153
154 vm->pasid = 0;
155 }
156
157 if (pasid) {
158 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
159 GFP_KERNEL));
160 if (r < 0)
161 return r;
162
163 vm->pasid = pasid;
164 }
165
166
167 return 0;
168 }
169
170 /**
171 * amdgpu_vm_bo_evicted - vm_bo is evicted
172 *
173 * @vm_bo: vm_bo which is evicted
174 *
175 * State for PDs/PTs and per VM BOs which are not at the location they should
176 * be.
177 */
amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base * vm_bo)178 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
179 {
180 struct amdgpu_vm *vm = vm_bo->vm;
181 struct amdgpu_bo *bo = vm_bo->bo;
182
183 vm_bo->moved = true;
184 spin_lock(&vm_bo->vm->status_lock);
185 if (bo->tbo.type == ttm_bo_type_kernel)
186 list_move(&vm_bo->vm_status, &vm->evicted);
187 else
188 list_move_tail(&vm_bo->vm_status, &vm->evicted);
189 spin_unlock(&vm_bo->vm->status_lock);
190 }
191 /**
192 * amdgpu_vm_bo_moved - vm_bo is moved
193 *
194 * @vm_bo: vm_bo which is moved
195 *
196 * State for per VM BOs which are moved, but that change is not yet reflected
197 * in the page tables.
198 */
amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base * vm_bo)199 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
200 {
201 spin_lock(&vm_bo->vm->status_lock);
202 list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
203 spin_unlock(&vm_bo->vm->status_lock);
204 }
205
206 /**
207 * amdgpu_vm_bo_idle - vm_bo is idle
208 *
209 * @vm_bo: vm_bo which is now idle
210 *
211 * State for PDs/PTs and per VM BOs which have gone through the state machine
212 * and are now idle.
213 */
amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base * vm_bo)214 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
215 {
216 spin_lock(&vm_bo->vm->status_lock);
217 list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
218 spin_unlock(&vm_bo->vm->status_lock);
219 vm_bo->moved = false;
220 }
221
222 /**
223 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
224 *
225 * @vm_bo: vm_bo which is now invalidated
226 *
227 * State for normal BOs which are invalidated and that change not yet reflected
228 * in the PTs.
229 */
amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base * vm_bo)230 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
231 {
232 spin_lock(&vm_bo->vm->status_lock);
233 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
234 spin_unlock(&vm_bo->vm->status_lock);
235 }
236
237 /**
238 * amdgpu_vm_bo_evicted_user - vm_bo is evicted
239 *
240 * @vm_bo: vm_bo which is evicted
241 *
242 * State for BOs used by user mode queues which are not at the location they
243 * should be.
244 */
amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base * vm_bo)245 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
246 {
247 vm_bo->moved = true;
248 spin_lock(&vm_bo->vm->status_lock);
249 list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
250 spin_unlock(&vm_bo->vm->status_lock);
251 }
252
253 /**
254 * amdgpu_vm_bo_relocated - vm_bo is reloacted
255 *
256 * @vm_bo: vm_bo which is relocated
257 *
258 * State for PDs/PTs which needs to update their parent PD.
259 * For the root PD, just move to idle state.
260 */
amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base * vm_bo)261 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
262 {
263 if (vm_bo->bo->parent) {
264 spin_lock(&vm_bo->vm->status_lock);
265 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
266 spin_unlock(&vm_bo->vm->status_lock);
267 } else {
268 amdgpu_vm_bo_idle(vm_bo);
269 }
270 }
271
272 /**
273 * amdgpu_vm_bo_done - vm_bo is done
274 *
275 * @vm_bo: vm_bo which is now done
276 *
277 * State for normal BOs which are invalidated and that change has been updated
278 * in the PTs.
279 */
amdgpu_vm_bo_done(struct amdgpu_vm_bo_base * vm_bo)280 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
281 {
282 spin_lock(&vm_bo->vm->status_lock);
283 list_move(&vm_bo->vm_status, &vm_bo->vm->done);
284 spin_unlock(&vm_bo->vm->status_lock);
285 }
286
287 /**
288 * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
289 * @vm: the VM which state machine to reset
290 *
291 * Move all vm_bo object in the VM into a state where they will be updated
292 * again during validation.
293 */
amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm * vm)294 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
295 {
296 struct amdgpu_vm_bo_base *vm_bo, *tmp;
297
298 spin_lock(&vm->status_lock);
299 list_splice_init(&vm->done, &vm->invalidated);
300 list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
301 vm_bo->moved = true;
302 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
303 struct amdgpu_bo *bo = vm_bo->bo;
304
305 vm_bo->moved = true;
306 if (!bo || bo->tbo.type != ttm_bo_type_kernel)
307 list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
308 else if (bo->parent)
309 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
310 }
311 spin_unlock(&vm->status_lock);
312 }
313
314 /**
315 * amdgpu_vm_update_shared - helper to update shared memory stat
316 * @base: base structure for tracking BO usage in a VM
317 *
318 * Takes the vm status_lock and updates the shared memory stat. If the basic
319 * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
320 * as well.
321 */
amdgpu_vm_update_shared(struct amdgpu_vm_bo_base * base)322 static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
323 {
324 struct amdgpu_vm *vm = base->vm;
325 struct amdgpu_bo *bo = base->bo;
326 uint64_t size = amdgpu_bo_size(bo);
327 uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
328 bool shared;
329
330 spin_lock(&vm->status_lock);
331 shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
332 if (base->shared != shared) {
333 base->shared = shared;
334 if (shared) {
335 vm->stats[bo_memtype].drm.shared += size;
336 vm->stats[bo_memtype].drm.private -= size;
337 } else {
338 vm->stats[bo_memtype].drm.shared -= size;
339 vm->stats[bo_memtype].drm.private += size;
340 }
341 }
342 spin_unlock(&vm->status_lock);
343 }
344
345 /**
346 * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
347 * @bo: amdgpu buffer object
348 *
349 * Update the per VM stats for all the vm if needed from private to shared or
350 * vice versa.
351 */
amdgpu_vm_bo_update_shared(struct amdgpu_bo * bo)352 void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
353 {
354 struct amdgpu_vm_bo_base *base;
355
356 for (base = bo->vm_bo; base; base = base->next)
357 amdgpu_vm_update_shared(base);
358 }
359
360 /**
361 * amdgpu_vm_update_stats_locked - helper to update normal memory stat
362 * @base: base structure for tracking BO usage in a VM
363 * @res: the ttm_resource to use for the purpose of accounting, may or may not
364 * be bo->tbo.resource
365 * @sign: if we should add (+1) or subtract (-1) from the stat
366 *
367 * Caller need to have the vm status_lock held. Useful for when multiple update
368 * need to happen at the same time.
369 */
amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base * base,struct ttm_resource * res,int sign)370 static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
371 struct ttm_resource *res, int sign)
372 {
373 struct amdgpu_vm *vm = base->vm;
374 struct amdgpu_bo *bo = base->bo;
375 int64_t size = sign * amdgpu_bo_size(bo);
376 uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
377
378 /* For drm-total- and drm-shared-, BO are accounted by their preferred
379 * placement, see also amdgpu_bo_mem_stats_placement.
380 */
381 if (base->shared)
382 vm->stats[bo_memtype].drm.shared += size;
383 else
384 vm->stats[bo_memtype].drm.private += size;
385
386 if (res && res->mem_type < __AMDGPU_PL_NUM) {
387 uint32_t res_memtype = res->mem_type;
388
389 vm->stats[res_memtype].drm.resident += size;
390 /* BO only count as purgeable if it is resident,
391 * since otherwise there's nothing to purge.
392 */
393 if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
394 vm->stats[res_memtype].drm.purgeable += size;
395 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
396 vm->stats[bo_memtype].evicted += size;
397 }
398 }
399
400 /**
401 * amdgpu_vm_update_stats - helper to update normal memory stat
402 * @base: base structure for tracking BO usage in a VM
403 * @res: the ttm_resource to use for the purpose of accounting, may or may not
404 * be bo->tbo.resource
405 * @sign: if we should add (+1) or subtract (-1) from the stat
406 *
407 * Updates the basic memory stat when bo is added/deleted/moved.
408 */
amdgpu_vm_update_stats(struct amdgpu_vm_bo_base * base,struct ttm_resource * res,int sign)409 void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
410 struct ttm_resource *res, int sign)
411 {
412 struct amdgpu_vm *vm = base->vm;
413
414 spin_lock(&vm->status_lock);
415 amdgpu_vm_update_stats_locked(base, res, sign);
416 spin_unlock(&vm->status_lock);
417 }
418
419 /**
420 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
421 *
422 * @base: base structure for tracking BO usage in a VM
423 * @vm: vm to which bo is to be added
424 * @bo: amdgpu buffer object
425 *
426 * Initialize a bo_va_base structure and add it to the appropriate lists
427 *
428 */
amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base * base,struct amdgpu_vm * vm,struct amdgpu_bo * bo)429 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
430 struct amdgpu_vm *vm, struct amdgpu_bo *bo)
431 {
432 base->vm = vm;
433 base->bo = bo;
434 base->next = NULL;
435 INIT_LIST_HEAD(&base->vm_status);
436
437 if (!bo)
438 return;
439 base->next = bo->vm_bo;
440 bo->vm_bo = base;
441
442 spin_lock(&vm->status_lock);
443 base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
444 amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
445 spin_unlock(&vm->status_lock);
446
447 if (!amdgpu_vm_is_bo_always_valid(vm, bo))
448 return;
449
450 dma_resv_assert_held(vm->root.bo->tbo.base.resv);
451
452 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
453 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
454 amdgpu_vm_bo_relocated(base);
455 else
456 amdgpu_vm_bo_idle(base);
457
458 if (bo->preferred_domains &
459 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
460 return;
461
462 /*
463 * we checked all the prerequisites, but it looks like this per vm bo
464 * is currently evicted. add the bo to the evicted list to make sure it
465 * is validated on next vm use to avoid fault.
466 * */
467 amdgpu_vm_bo_evicted(base);
468 }
469
470 /**
471 * amdgpu_vm_lock_pd - lock PD in drm_exec
472 *
473 * @vm: vm providing the BOs
474 * @exec: drm execution context
475 * @num_fences: number of extra fences to reserve
476 *
477 * Lock the VM root PD in the DRM execution context.
478 */
amdgpu_vm_lock_pd(struct amdgpu_vm * vm,struct drm_exec * exec,unsigned int num_fences)479 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
480 unsigned int num_fences)
481 {
482 /* We need at least two fences for the VM PD/PT updates */
483 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
484 2 + num_fences);
485 }
486
487 /**
488 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
489 *
490 * @adev: amdgpu device pointer
491 * @vm: vm providing the BOs
492 *
493 * Move all BOs to the end of LRU and remember their positions to put them
494 * together.
495 */
amdgpu_vm_move_to_lru_tail(struct amdgpu_device * adev,struct amdgpu_vm * vm)496 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
497 struct amdgpu_vm *vm)
498 {
499 spin_lock(&adev->mman.bdev.lru_lock);
500 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
501 spin_unlock(&adev->mman.bdev.lru_lock);
502 }
503
504 /* Create scheduler entities for page table updates */
amdgpu_vm_init_entities(struct amdgpu_device * adev,struct amdgpu_vm * vm)505 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
506 struct amdgpu_vm *vm)
507 {
508 int r;
509
510 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
511 adev->vm_manager.vm_pte_scheds,
512 adev->vm_manager.vm_pte_num_scheds, NULL);
513 if (r)
514 goto error;
515
516 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
517 adev->vm_manager.vm_pte_scheds,
518 adev->vm_manager.vm_pte_num_scheds, NULL);
519
520 error:
521 drm_sched_entity_destroy(&vm->immediate);
522 return r;
523 }
524
525 /* Destroy the entities for page table updates again */
amdgpu_vm_fini_entities(struct amdgpu_vm * vm)526 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
527 {
528 drm_sched_entity_destroy(&vm->immediate);
529 drm_sched_entity_destroy(&vm->delayed);
530 }
531
532 /**
533 * amdgpu_vm_generation - return the page table re-generation counter
534 * @adev: the amdgpu_device
535 * @vm: optional VM to check, might be NULL
536 *
537 * Returns a page table re-generation token to allow checking if submissions
538 * are still valid to use this VM. The VM parameter might be NULL in which case
539 * just the VRAM lost counter will be used.
540 */
amdgpu_vm_generation(struct amdgpu_device * adev,struct amdgpu_vm * vm)541 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
542 {
543 uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
544
545 if (!vm)
546 return result;
547
548 result += lower_32_bits(vm->generation);
549 /* Add one if the page tables will be re-generated on next CS */
550 if (drm_sched_entity_error(&vm->delayed))
551 ++result;
552
553 return result;
554 }
555
556 /**
557 * amdgpu_vm_validate - validate evicted BOs tracked in the VM
558 *
559 * @adev: amdgpu device pointer
560 * @vm: vm providing the BOs
561 * @ticket: optional reservation ticket used to reserve the VM
562 * @validate: callback to do the validation
563 * @param: parameter for the validation callback
564 *
565 * Validate the page table BOs and per-VM BOs on command submission if
566 * necessary. If a ticket is given, also try to validate evicted user queue
567 * BOs. They must already be reserved with the given ticket.
568 *
569 * Returns:
570 * Validation result.
571 */
amdgpu_vm_validate(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct ww_acquire_ctx * ticket,int (* validate)(void * p,struct amdgpu_bo * bo),void * param)572 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
573 struct ww_acquire_ctx *ticket,
574 int (*validate)(void *p, struct amdgpu_bo *bo),
575 void *param)
576 {
577 uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
578 struct amdgpu_vm_bo_base *bo_base;
579 struct amdgpu_bo *bo;
580 int r;
581
582 if (vm->generation != new_vm_generation) {
583 vm->generation = new_vm_generation;
584 amdgpu_vm_bo_reset_state_machine(vm);
585 amdgpu_vm_fini_entities(vm);
586 r = amdgpu_vm_init_entities(adev, vm);
587 if (r)
588 return r;
589 }
590
591 spin_lock(&vm->status_lock);
592 while (!list_empty(&vm->evicted)) {
593 bo_base = list_first_entry(&vm->evicted,
594 struct amdgpu_vm_bo_base,
595 vm_status);
596 spin_unlock(&vm->status_lock);
597
598 bo = bo_base->bo;
599
600 r = validate(param, bo);
601 if (r)
602 return r;
603
604 if (bo->tbo.type != ttm_bo_type_kernel) {
605 amdgpu_vm_bo_moved(bo_base);
606 } else {
607 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
608 amdgpu_vm_bo_relocated(bo_base);
609 }
610 spin_lock(&vm->status_lock);
611 }
612 while (ticket && !list_empty(&vm->evicted_user)) {
613 bo_base = list_first_entry(&vm->evicted_user,
614 struct amdgpu_vm_bo_base,
615 vm_status);
616 spin_unlock(&vm->status_lock);
617
618 bo = bo_base->bo;
619
620 if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
621 struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
622
623 pr_warn_ratelimited("Evicted user BO is not reserved\n");
624 if (ti) {
625 pr_warn_ratelimited("pid %d\n", ti->pid);
626 amdgpu_vm_put_task_info(ti);
627 }
628
629 return -EINVAL;
630 }
631
632 r = validate(param, bo);
633 if (r)
634 return r;
635
636 amdgpu_vm_bo_invalidated(bo_base);
637
638 spin_lock(&vm->status_lock);
639 }
640 spin_unlock(&vm->status_lock);
641
642 amdgpu_vm_eviction_lock(vm);
643 vm->evicting = false;
644 amdgpu_vm_eviction_unlock(vm);
645
646 return 0;
647 }
648
649 /**
650 * amdgpu_vm_ready - check VM is ready for updates
651 *
652 * @vm: VM to check
653 *
654 * Check if all VM PDs/PTs are ready for updates
655 *
656 * Returns:
657 * True if VM is not evicting.
658 */
amdgpu_vm_ready(struct amdgpu_vm * vm)659 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
660 {
661 bool empty;
662 bool ret;
663
664 amdgpu_vm_eviction_lock(vm);
665 ret = !vm->evicting;
666 amdgpu_vm_eviction_unlock(vm);
667
668 spin_lock(&vm->status_lock);
669 empty = list_empty(&vm->evicted);
670 spin_unlock(&vm->status_lock);
671
672 return ret && empty;
673 }
674
675 /**
676 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
677 *
678 * @adev: amdgpu_device pointer
679 */
amdgpu_vm_check_compute_bug(struct amdgpu_device * adev)680 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
681 {
682 const struct amdgpu_ip_block *ip_block;
683 bool has_compute_vm_bug;
684 struct amdgpu_ring *ring;
685 int i;
686
687 has_compute_vm_bug = false;
688
689 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
690 if (ip_block) {
691 /* Compute has a VM bug for GFX version < 7.
692 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
693 if (ip_block->version->major <= 7)
694 has_compute_vm_bug = true;
695 else if (ip_block->version->major == 8)
696 if (adev->gfx.mec_fw_version < 673)
697 has_compute_vm_bug = true;
698 }
699
700 for (i = 0; i < adev->num_rings; i++) {
701 ring = adev->rings[i];
702 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
703 /* only compute rings */
704 ring->has_compute_vm_bug = has_compute_vm_bug;
705 else
706 ring->has_compute_vm_bug = false;
707 }
708 }
709
710 /**
711 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
712 *
713 * @ring: ring on which the job will be submitted
714 * @job: job to submit
715 *
716 * Returns:
717 * True if sync is needed.
718 */
amdgpu_vm_need_pipeline_sync(struct amdgpu_ring * ring,struct amdgpu_job * job)719 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
720 struct amdgpu_job *job)
721 {
722 struct amdgpu_device *adev = ring->adev;
723 unsigned vmhub = ring->vm_hub;
724 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
725
726 if (job->vmid == 0)
727 return false;
728
729 if (job->vm_needs_flush || ring->has_compute_vm_bug)
730 return true;
731
732 if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
733 return true;
734
735 if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
736 return true;
737
738 return false;
739 }
740
741 /**
742 * amdgpu_vm_flush - hardware flush the vm
743 *
744 * @ring: ring to use for flush
745 * @job: related job
746 * @need_pipe_sync: is pipe sync needed
747 *
748 * Emit a VM flush when it is necessary.
749 *
750 * Returns:
751 * 0 on success, errno otherwise.
752 */
amdgpu_vm_flush(struct amdgpu_ring * ring,struct amdgpu_job * job,bool need_pipe_sync)753 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
754 bool need_pipe_sync)
755 {
756 struct amdgpu_device *adev = ring->adev;
757 unsigned vmhub = ring->vm_hub;
758 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
759 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
760 bool spm_update_needed = job->spm_update_needed;
761 bool gds_switch_needed = ring->funcs->emit_gds_switch &&
762 job->gds_switch_needed;
763 bool vm_flush_needed = job->vm_needs_flush;
764 struct dma_fence *fence = NULL;
765 bool pasid_mapping_needed = false;
766 unsigned int patch;
767 int r;
768
769 if (amdgpu_vmid_had_gpu_reset(adev, id)) {
770 gds_switch_needed = true;
771 vm_flush_needed = true;
772 pasid_mapping_needed = true;
773 spm_update_needed = true;
774 }
775
776 mutex_lock(&id_mgr->lock);
777 if (id->pasid != job->pasid || !id->pasid_mapping ||
778 !dma_fence_is_signaled(id->pasid_mapping))
779 pasid_mapping_needed = true;
780 mutex_unlock(&id_mgr->lock);
781
782 gds_switch_needed &= !!ring->funcs->emit_gds_switch;
783 vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
784 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
785 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
786 ring->funcs->emit_wreg;
787
788 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
789 !(job->enforce_isolation && !job->vmid))
790 return 0;
791
792 amdgpu_ring_ib_begin(ring);
793 if (ring->funcs->init_cond_exec)
794 patch = amdgpu_ring_init_cond_exec(ring,
795 ring->cond_exe_gpu_addr);
796
797 if (need_pipe_sync)
798 amdgpu_ring_emit_pipeline_sync(ring);
799
800 if (adev->gfx.enable_cleaner_shader &&
801 ring->funcs->emit_cleaner_shader &&
802 job->enforce_isolation)
803 ring->funcs->emit_cleaner_shader(ring);
804
805 if (vm_flush_needed) {
806 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
807 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
808 }
809
810 if (pasid_mapping_needed)
811 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
812
813 if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
814 adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
815
816 if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
817 gds_switch_needed) {
818 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
819 job->gds_size, job->gws_base,
820 job->gws_size, job->oa_base,
821 job->oa_size);
822 }
823
824 if (vm_flush_needed || pasid_mapping_needed) {
825 r = amdgpu_fence_emit(ring, &fence, NULL, 0);
826 if (r)
827 return r;
828 }
829
830 if (vm_flush_needed) {
831 mutex_lock(&id_mgr->lock);
832 dma_fence_put(id->last_flush);
833 id->last_flush = dma_fence_get(fence);
834 id->current_gpu_reset_count =
835 atomic_read(&adev->gpu_reset_counter);
836 mutex_unlock(&id_mgr->lock);
837 }
838
839 if (pasid_mapping_needed) {
840 mutex_lock(&id_mgr->lock);
841 id->pasid = job->pasid;
842 dma_fence_put(id->pasid_mapping);
843 id->pasid_mapping = dma_fence_get(fence);
844 mutex_unlock(&id_mgr->lock);
845 }
846 dma_fence_put(fence);
847
848 amdgpu_ring_patch_cond_exec(ring, patch);
849
850 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
851 if (ring->funcs->emit_switch_buffer) {
852 amdgpu_ring_emit_switch_buffer(ring);
853 amdgpu_ring_emit_switch_buffer(ring);
854 }
855
856 amdgpu_ring_ib_end(ring);
857 return 0;
858 }
859
860 /**
861 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
862 *
863 * @vm: requested vm
864 * @bo: requested buffer object
865 *
866 * Find @bo inside the requested vm.
867 * Search inside the @bos vm list for the requested vm
868 * Returns the found bo_va or NULL if none is found
869 *
870 * Object has to be reserved!
871 *
872 * Returns:
873 * Found bo_va or NULL.
874 */
amdgpu_vm_bo_find(struct amdgpu_vm * vm,struct amdgpu_bo * bo)875 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
876 struct amdgpu_bo *bo)
877 {
878 struct amdgpu_vm_bo_base *base;
879
880 for (base = bo->vm_bo; base; base = base->next) {
881 if (base->vm != vm)
882 continue;
883
884 return container_of(base, struct amdgpu_bo_va, base);
885 }
886 return NULL;
887 }
888
889 /**
890 * amdgpu_vm_map_gart - Resolve gart mapping of addr
891 *
892 * @pages_addr: optional DMA address to use for lookup
893 * @addr: the unmapped addr
894 *
895 * Look up the physical address of the page that the pte resolves
896 * to.
897 *
898 * Returns:
899 * The pointer for the page table entry.
900 */
amdgpu_vm_map_gart(const dma_addr_t * pages_addr,uint64_t addr)901 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
902 {
903 uint64_t result;
904
905 /* page table offset */
906 result = pages_addr[addr >> PAGE_SHIFT];
907
908 /* in case cpu page size != gpu page size*/
909 result |= addr & (~PAGE_MASK);
910
911 result &= 0xFFFFFFFFFFFFF000ULL;
912
913 return result;
914 }
915
916 /**
917 * amdgpu_vm_update_pdes - make sure that all directories are valid
918 *
919 * @adev: amdgpu_device pointer
920 * @vm: requested vm
921 * @immediate: submit immediately to the paging queue
922 *
923 * Makes sure all directories are up to date.
924 *
925 * Returns:
926 * 0 for success, error for failure.
927 */
amdgpu_vm_update_pdes(struct amdgpu_device * adev,struct amdgpu_vm * vm,bool immediate)928 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
929 struct amdgpu_vm *vm, bool immediate)
930 {
931 struct amdgpu_vm_update_params params;
932 struct amdgpu_vm_bo_base *entry;
933 bool flush_tlb_needed = false;
934 LIST_HEAD(relocated);
935 int r, idx;
936
937 spin_lock(&vm->status_lock);
938 list_splice_init(&vm->relocated, &relocated);
939 spin_unlock(&vm->status_lock);
940
941 if (list_empty(&relocated))
942 return 0;
943
944 if (!drm_dev_enter(adev_to_drm(adev), &idx))
945 return -ENODEV;
946
947 memset(¶ms, 0, sizeof(params));
948 params.adev = adev;
949 params.vm = vm;
950 params.immediate = immediate;
951
952 r = vm->update_funcs->prepare(¶ms, NULL);
953 if (r)
954 goto error;
955
956 list_for_each_entry(entry, &relocated, vm_status) {
957 /* vm_flush_needed after updating moved PDEs */
958 flush_tlb_needed |= entry->moved;
959
960 r = amdgpu_vm_pde_update(¶ms, entry);
961 if (r)
962 goto error;
963 }
964
965 r = vm->update_funcs->commit(¶ms, &vm->last_update);
966 if (r)
967 goto error;
968
969 if (flush_tlb_needed)
970 atomic64_inc(&vm->tlb_seq);
971
972 while (!list_empty(&relocated)) {
973 entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
974 vm_status);
975 amdgpu_vm_bo_idle(entry);
976 }
977
978 error:
979 drm_dev_exit(idx);
980 return r;
981 }
982
983 /**
984 * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
985 * @fence: unused
986 * @cb: the callback structure
987 *
988 * Increments the tlb sequence to make sure that future CS execute a VM flush.
989 */
amdgpu_vm_tlb_seq_cb(struct dma_fence * fence,struct dma_fence_cb * cb)990 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
991 struct dma_fence_cb *cb)
992 {
993 struct amdgpu_vm_tlb_seq_struct *tlb_cb;
994
995 tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
996 atomic64_inc(&tlb_cb->vm->tlb_seq);
997 kfree(tlb_cb);
998 }
999
1000 /**
1001 * amdgpu_vm_tlb_flush - prepare TLB flush
1002 *
1003 * @params: parameters for update
1004 * @fence: input fence to sync TLB flush with
1005 * @tlb_cb: the callback structure
1006 *
1007 * Increments the tlb sequence to make sure that future CS execute a VM flush.
1008 */
1009 static void
amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params * params,struct dma_fence ** fence,struct amdgpu_vm_tlb_seq_struct * tlb_cb)1010 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
1011 struct dma_fence **fence,
1012 struct amdgpu_vm_tlb_seq_struct *tlb_cb)
1013 {
1014 struct amdgpu_vm *vm = params->vm;
1015
1016 tlb_cb->vm = vm;
1017 if (!fence || !*fence) {
1018 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1019 return;
1020 }
1021
1022 if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
1023 amdgpu_vm_tlb_seq_cb)) {
1024 dma_fence_put(vm->last_tlb_flush);
1025 vm->last_tlb_flush = dma_fence_get(*fence);
1026 } else {
1027 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1028 }
1029
1030 /* Prepare a TLB flush fence to be attached to PTs */
1031 if (!params->unlocked && vm->is_compute_context) {
1032 amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
1033
1034 /* Makes sure no PD/PT is freed before the flush */
1035 dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
1036 DMA_RESV_USAGE_BOOKKEEP);
1037 }
1038 }
1039
1040 /**
1041 * amdgpu_vm_update_range - update a range in the vm page table
1042 *
1043 * @adev: amdgpu_device pointer to use for commands
1044 * @vm: the VM to update the range
1045 * @immediate: immediate submission in a page fault
1046 * @unlocked: unlocked invalidation during MM callback
1047 * @flush_tlb: trigger tlb invalidation after update completed
1048 * @allow_override: change MTYPE for local NUMA nodes
1049 * @sync: fences we need to sync to
1050 * @start: start of mapped range
1051 * @last: last mapped entry
1052 * @flags: flags for the entries
1053 * @offset: offset into nodes and pages_addr
1054 * @vram_base: base for vram mappings
1055 * @res: ttm_resource to map
1056 * @pages_addr: DMA addresses to use for mapping
1057 * @fence: optional resulting fence
1058 *
1059 * Fill in the page table entries between @start and @last.
1060 *
1061 * Returns:
1062 * 0 for success, negative erro code for failure.
1063 */
amdgpu_vm_update_range(struct amdgpu_device * adev,struct amdgpu_vm * vm,bool immediate,bool unlocked,bool flush_tlb,bool allow_override,struct amdgpu_sync * sync,uint64_t start,uint64_t last,uint64_t flags,uint64_t offset,uint64_t vram_base,struct ttm_resource * res,dma_addr_t * pages_addr,struct dma_fence ** fence)1064 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1065 bool immediate, bool unlocked, bool flush_tlb,
1066 bool allow_override, struct amdgpu_sync *sync,
1067 uint64_t start, uint64_t last, uint64_t flags,
1068 uint64_t offset, uint64_t vram_base,
1069 struct ttm_resource *res, dma_addr_t *pages_addr,
1070 struct dma_fence **fence)
1071 {
1072 struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1073 struct amdgpu_vm_update_params params;
1074 struct amdgpu_res_cursor cursor;
1075 int r, idx;
1076
1077 if (!drm_dev_enter(adev_to_drm(adev), &idx))
1078 return -ENODEV;
1079
1080 tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
1081 if (!tlb_cb) {
1082 drm_dev_exit(idx);
1083 return -ENOMEM;
1084 }
1085
1086 /* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
1087 * heavy-weight flush TLB unconditionally.
1088 */
1089 flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
1090 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
1091
1092 /*
1093 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
1094 */
1095 flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
1096
1097 memset(¶ms, 0, sizeof(params));
1098 params.adev = adev;
1099 params.vm = vm;
1100 params.immediate = immediate;
1101 params.pages_addr = pages_addr;
1102 params.unlocked = unlocked;
1103 params.needs_flush = flush_tlb;
1104 params.allow_override = allow_override;
1105 INIT_LIST_HEAD(¶ms.tlb_flush_waitlist);
1106
1107 amdgpu_vm_eviction_lock(vm);
1108 if (vm->evicting) {
1109 r = -EBUSY;
1110 goto error_free;
1111 }
1112
1113 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1114 struct dma_fence *tmp = dma_fence_get_stub();
1115
1116 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1117 swap(vm->last_unlocked, tmp);
1118 dma_fence_put(tmp);
1119 }
1120
1121 r = vm->update_funcs->prepare(¶ms, sync);
1122 if (r)
1123 goto error_free;
1124
1125 amdgpu_res_first(pages_addr ? NULL : res, offset,
1126 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1127 while (cursor.remaining) {
1128 uint64_t tmp, num_entries, addr;
1129
1130 num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1131 if (pages_addr) {
1132 bool contiguous = true;
1133
1134 if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1135 uint64_t pfn = cursor.start >> PAGE_SHIFT;
1136 uint64_t count;
1137
1138 contiguous = pages_addr[pfn + 1] ==
1139 pages_addr[pfn] + PAGE_SIZE;
1140
1141 tmp = num_entries /
1142 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1143 for (count = 2; count < tmp; ++count) {
1144 uint64_t idx = pfn + count;
1145
1146 if (contiguous != (pages_addr[idx] ==
1147 pages_addr[idx - 1] + PAGE_SIZE))
1148 break;
1149 }
1150 if (!contiguous)
1151 count--;
1152 num_entries = count *
1153 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1154 }
1155
1156 if (!contiguous) {
1157 addr = cursor.start;
1158 params.pages_addr = pages_addr;
1159 } else {
1160 addr = pages_addr[cursor.start >> PAGE_SHIFT];
1161 params.pages_addr = NULL;
1162 }
1163
1164 } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1165 addr = vram_base + cursor.start;
1166 } else {
1167 addr = 0;
1168 }
1169
1170 tmp = start + num_entries;
1171 r = amdgpu_vm_ptes_update(¶ms, start, tmp, addr, flags);
1172 if (r)
1173 goto error_free;
1174
1175 amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1176 start = tmp;
1177 }
1178
1179 r = vm->update_funcs->commit(¶ms, fence);
1180 if (r)
1181 goto error_free;
1182
1183 if (params.needs_flush) {
1184 amdgpu_vm_tlb_flush(¶ms, fence, tlb_cb);
1185 tlb_cb = NULL;
1186 }
1187
1188 amdgpu_vm_pt_free_list(adev, ¶ms);
1189
1190 error_free:
1191 kfree(tlb_cb);
1192 amdgpu_vm_eviction_unlock(vm);
1193 drm_dev_exit(idx);
1194 return r;
1195 }
1196
amdgpu_vm_get_memory(struct amdgpu_vm * vm,struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])1197 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1198 struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
1199 {
1200 spin_lock(&vm->status_lock);
1201 memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
1202 spin_unlock(&vm->status_lock);
1203 }
1204
1205 /**
1206 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1207 *
1208 * @adev: amdgpu_device pointer
1209 * @bo_va: requested BO and VM object
1210 * @clear: if true clear the entries
1211 *
1212 * Fill in the page table entries for @bo_va.
1213 *
1214 * Returns:
1215 * 0 for success, -EINVAL for failure.
1216 */
amdgpu_vm_bo_update(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,bool clear)1217 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1218 bool clear)
1219 {
1220 struct amdgpu_bo *bo = bo_va->base.bo;
1221 struct amdgpu_vm *vm = bo_va->base.vm;
1222 struct amdgpu_bo_va_mapping *mapping;
1223 struct dma_fence **last_update;
1224 dma_addr_t *pages_addr = NULL;
1225 struct ttm_resource *mem;
1226 struct amdgpu_sync sync;
1227 bool flush_tlb = clear;
1228 uint64_t vram_base;
1229 uint64_t flags;
1230 bool uncached;
1231 int r;
1232
1233 amdgpu_sync_create(&sync);
1234 if (clear) {
1235 mem = NULL;
1236
1237 /* Implicitly sync to command submissions in the same VM before
1238 * unmapping.
1239 */
1240 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1241 AMDGPU_SYNC_EQ_OWNER, vm);
1242 if (r)
1243 goto error_free;
1244 if (bo) {
1245 r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
1246 if (r)
1247 goto error_free;
1248 }
1249 } else if (!bo) {
1250 mem = NULL;
1251
1252 /* PRT map operations don't need to sync to anything. */
1253
1254 } else {
1255 struct drm_gem_object *obj = &bo->tbo.base;
1256
1257 if (obj->import_attach && bo_va->is_xgmi) {
1258 struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1259 struct drm_gem_object *gobj = dma_buf->priv;
1260 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1261
1262 if (abo->tbo.resource &&
1263 abo->tbo.resource->mem_type == TTM_PL_VRAM)
1264 bo = gem_to_amdgpu_bo(gobj);
1265 }
1266 mem = bo->tbo.resource;
1267 if (mem && (mem->mem_type == TTM_PL_TT ||
1268 mem->mem_type == AMDGPU_PL_PREEMPT))
1269 pages_addr = bo->tbo.ttm->dma_address;
1270
1271 /* Implicitly sync to moving fences before mapping anything */
1272 r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1273 AMDGPU_SYNC_EXPLICIT, vm);
1274 if (r)
1275 goto error_free;
1276 }
1277
1278 if (bo) {
1279 struct amdgpu_device *bo_adev;
1280
1281 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1282
1283 if (amdgpu_bo_encrypted(bo))
1284 flags |= AMDGPU_PTE_TMZ;
1285
1286 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1287 vram_base = bo_adev->vm_manager.vram_base_offset;
1288 uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1289 } else {
1290 flags = 0x0;
1291 vram_base = 0;
1292 uncached = false;
1293 }
1294
1295 if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1296 last_update = &vm->last_update;
1297 else
1298 last_update = &bo_va->last_pt_update;
1299
1300 if (!clear && bo_va->base.moved) {
1301 flush_tlb = true;
1302 list_splice_init(&bo_va->valids, &bo_va->invalids);
1303
1304 } else if (bo_va->cleared != clear) {
1305 list_splice_init(&bo_va->valids, &bo_va->invalids);
1306 }
1307
1308 list_for_each_entry(mapping, &bo_va->invalids, list) {
1309 uint64_t update_flags = flags;
1310
1311 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1312 * but in case of something, we filter the flags in first place
1313 */
1314 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1315 update_flags &= ~AMDGPU_PTE_READABLE;
1316 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1317 update_flags &= ~AMDGPU_PTE_WRITEABLE;
1318
1319 /* Apply ASIC specific mapping flags */
1320 amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1321
1322 trace_amdgpu_vm_bo_update(mapping);
1323
1324 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1325 !uncached, &sync, mapping->start,
1326 mapping->last, update_flags,
1327 mapping->offset, vram_base, mem,
1328 pages_addr, last_update);
1329 if (r)
1330 goto error_free;
1331 }
1332
1333 /* If the BO is not in its preferred location add it back to
1334 * the evicted list so that it gets validated again on the
1335 * next command submission.
1336 */
1337 if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1338 if (bo->tbo.resource &&
1339 !(bo->preferred_domains &
1340 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1341 amdgpu_vm_bo_evicted(&bo_va->base);
1342 else
1343 amdgpu_vm_bo_idle(&bo_va->base);
1344 } else {
1345 amdgpu_vm_bo_done(&bo_va->base);
1346 }
1347
1348 list_splice_init(&bo_va->invalids, &bo_va->valids);
1349 bo_va->cleared = clear;
1350 bo_va->base.moved = false;
1351
1352 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1353 list_for_each_entry(mapping, &bo_va->valids, list)
1354 trace_amdgpu_vm_bo_mapping(mapping);
1355 }
1356
1357 error_free:
1358 amdgpu_sync_free(&sync);
1359 return r;
1360 }
1361
1362 /**
1363 * amdgpu_vm_update_prt_state - update the global PRT state
1364 *
1365 * @adev: amdgpu_device pointer
1366 */
amdgpu_vm_update_prt_state(struct amdgpu_device * adev)1367 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1368 {
1369 unsigned long flags;
1370 bool enable;
1371
1372 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1373 enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1374 adev->gmc.gmc_funcs->set_prt(adev, enable);
1375 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1376 }
1377
1378 /**
1379 * amdgpu_vm_prt_get - add a PRT user
1380 *
1381 * @adev: amdgpu_device pointer
1382 */
amdgpu_vm_prt_get(struct amdgpu_device * adev)1383 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1384 {
1385 if (!adev->gmc.gmc_funcs->set_prt)
1386 return;
1387
1388 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1389 amdgpu_vm_update_prt_state(adev);
1390 }
1391
1392 /**
1393 * amdgpu_vm_prt_put - drop a PRT user
1394 *
1395 * @adev: amdgpu_device pointer
1396 */
amdgpu_vm_prt_put(struct amdgpu_device * adev)1397 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1398 {
1399 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1400 amdgpu_vm_update_prt_state(adev);
1401 }
1402
1403 /**
1404 * amdgpu_vm_prt_cb - callback for updating the PRT status
1405 *
1406 * @fence: fence for the callback
1407 * @_cb: the callback function
1408 */
amdgpu_vm_prt_cb(struct dma_fence * fence,struct dma_fence_cb * _cb)1409 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1410 {
1411 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1412
1413 amdgpu_vm_prt_put(cb->adev);
1414 kfree(cb);
1415 }
1416
1417 /**
1418 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1419 *
1420 * @adev: amdgpu_device pointer
1421 * @fence: fence for the callback
1422 */
amdgpu_vm_add_prt_cb(struct amdgpu_device * adev,struct dma_fence * fence)1423 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1424 struct dma_fence *fence)
1425 {
1426 struct amdgpu_prt_cb *cb;
1427
1428 if (!adev->gmc.gmc_funcs->set_prt)
1429 return;
1430
1431 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1432 if (!cb) {
1433 /* Last resort when we are OOM */
1434 if (fence)
1435 dma_fence_wait(fence, false);
1436
1437 amdgpu_vm_prt_put(adev);
1438 } else {
1439 cb->adev = adev;
1440 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1441 amdgpu_vm_prt_cb))
1442 amdgpu_vm_prt_cb(fence, &cb->cb);
1443 }
1444 }
1445
1446 /**
1447 * amdgpu_vm_free_mapping - free a mapping
1448 *
1449 * @adev: amdgpu_device pointer
1450 * @vm: requested vm
1451 * @mapping: mapping to be freed
1452 * @fence: fence of the unmap operation
1453 *
1454 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1455 */
amdgpu_vm_free_mapping(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va_mapping * mapping,struct dma_fence * fence)1456 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1457 struct amdgpu_vm *vm,
1458 struct amdgpu_bo_va_mapping *mapping,
1459 struct dma_fence *fence)
1460 {
1461 if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1462 amdgpu_vm_add_prt_cb(adev, fence);
1463 kfree(mapping);
1464 }
1465
1466 /**
1467 * amdgpu_vm_prt_fini - finish all prt mappings
1468 *
1469 * @adev: amdgpu_device pointer
1470 * @vm: requested vm
1471 *
1472 * Register a cleanup callback to disable PRT support after VM dies.
1473 */
amdgpu_vm_prt_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)1474 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1475 {
1476 struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1477 struct dma_resv_iter cursor;
1478 struct dma_fence *fence;
1479
1480 dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1481 /* Add a callback for each fence in the reservation object */
1482 amdgpu_vm_prt_get(adev);
1483 amdgpu_vm_add_prt_cb(adev, fence);
1484 }
1485 }
1486
1487 /**
1488 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1489 *
1490 * @adev: amdgpu_device pointer
1491 * @vm: requested vm
1492 * @fence: optional resulting fence (unchanged if no work needed to be done
1493 * or if an error occurred)
1494 *
1495 * Make sure all freed BOs are cleared in the PT.
1496 * PTs have to be reserved and mutex must be locked!
1497 *
1498 * Returns:
1499 * 0 for success.
1500 *
1501 */
amdgpu_vm_clear_freed(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct dma_fence ** fence)1502 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1503 struct amdgpu_vm *vm,
1504 struct dma_fence **fence)
1505 {
1506 struct amdgpu_bo_va_mapping *mapping;
1507 struct dma_fence *f = NULL;
1508 struct amdgpu_sync sync;
1509 int r;
1510
1511
1512 /*
1513 * Implicitly sync to command submissions in the same VM before
1514 * unmapping.
1515 */
1516 amdgpu_sync_create(&sync);
1517 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1518 AMDGPU_SYNC_EQ_OWNER, vm);
1519 if (r)
1520 goto error_free;
1521
1522 while (!list_empty(&vm->freed)) {
1523 mapping = list_first_entry(&vm->freed,
1524 struct amdgpu_bo_va_mapping, list);
1525 list_del(&mapping->list);
1526
1527 r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1528 &sync, mapping->start, mapping->last,
1529 0, 0, 0, NULL, NULL, &f);
1530 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1531 if (r) {
1532 dma_fence_put(f);
1533 goto error_free;
1534 }
1535 }
1536
1537 if (fence && f) {
1538 dma_fence_put(*fence);
1539 *fence = f;
1540 } else {
1541 dma_fence_put(f);
1542 }
1543
1544 error_free:
1545 amdgpu_sync_free(&sync);
1546 return r;
1547
1548 }
1549
1550 /**
1551 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1552 *
1553 * @adev: amdgpu_device pointer
1554 * @vm: requested vm
1555 * @ticket: optional reservation ticket used to reserve the VM
1556 *
1557 * Make sure all BOs which are moved are updated in the PTs.
1558 *
1559 * Returns:
1560 * 0 for success.
1561 *
1562 * PTs have to be reserved!
1563 */
amdgpu_vm_handle_moved(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct ww_acquire_ctx * ticket)1564 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1565 struct amdgpu_vm *vm,
1566 struct ww_acquire_ctx *ticket)
1567 {
1568 struct amdgpu_bo_va *bo_va;
1569 struct dma_resv *resv;
1570 bool clear, unlock;
1571 int r;
1572
1573 spin_lock(&vm->status_lock);
1574 while (!list_empty(&vm->moved)) {
1575 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1576 base.vm_status);
1577 spin_unlock(&vm->status_lock);
1578
1579 /* Per VM BOs never need to bo cleared in the page tables */
1580 r = amdgpu_vm_bo_update(adev, bo_va, false);
1581 if (r)
1582 return r;
1583 spin_lock(&vm->status_lock);
1584 }
1585
1586 while (!list_empty(&vm->invalidated)) {
1587 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1588 base.vm_status);
1589 resv = bo_va->base.bo->tbo.base.resv;
1590 spin_unlock(&vm->status_lock);
1591
1592 /* Try to reserve the BO to avoid clearing its ptes */
1593 if (!adev->debug_vm && dma_resv_trylock(resv)) {
1594 clear = false;
1595 unlock = true;
1596 /* The caller is already holding the reservation lock */
1597 } else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1598 clear = false;
1599 unlock = false;
1600 /* Somebody else is using the BO right now */
1601 } else {
1602 clear = true;
1603 unlock = false;
1604 }
1605
1606 r = amdgpu_vm_bo_update(adev, bo_va, clear);
1607
1608 if (unlock)
1609 dma_resv_unlock(resv);
1610 if (r)
1611 return r;
1612
1613 /* Remember evicted DMABuf imports in compute VMs for later
1614 * validation
1615 */
1616 if (vm->is_compute_context &&
1617 bo_va->base.bo->tbo.base.import_attach &&
1618 (!bo_va->base.bo->tbo.resource ||
1619 bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1620 amdgpu_vm_bo_evicted_user(&bo_va->base);
1621
1622 spin_lock(&vm->status_lock);
1623 }
1624 spin_unlock(&vm->status_lock);
1625
1626 return 0;
1627 }
1628
1629 /**
1630 * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1631 *
1632 * @adev: amdgpu_device pointer
1633 * @vm: requested vm
1634 * @flush_type: flush type
1635 * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1636 *
1637 * Flush TLB if needed for a compute VM.
1638 *
1639 * Returns:
1640 * 0 for success.
1641 */
amdgpu_vm_flush_compute_tlb(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint32_t flush_type,uint32_t xcc_mask)1642 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1643 struct amdgpu_vm *vm,
1644 uint32_t flush_type,
1645 uint32_t xcc_mask)
1646 {
1647 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1648 bool all_hub = false;
1649 int xcc = 0, r = 0;
1650
1651 WARN_ON_ONCE(!vm->is_compute_context);
1652
1653 /*
1654 * It can be that we race and lose here, but that is extremely unlikely
1655 * and the worst thing which could happen is that we flush the changes
1656 * into the TLB once more which is harmless.
1657 */
1658 if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1659 return 0;
1660
1661 if (adev->family == AMDGPU_FAMILY_AI ||
1662 adev->family == AMDGPU_FAMILY_RV)
1663 all_hub = true;
1664
1665 for_each_inst(xcc, xcc_mask) {
1666 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1667 all_hub, xcc);
1668 if (r)
1669 break;
1670 }
1671 return r;
1672 }
1673
1674 /**
1675 * amdgpu_vm_bo_add - add a bo to a specific vm
1676 *
1677 * @adev: amdgpu_device pointer
1678 * @vm: requested vm
1679 * @bo: amdgpu buffer object
1680 *
1681 * Add @bo into the requested vm.
1682 * Add @bo to the list of bos associated with the vm
1683 *
1684 * Returns:
1685 * Newly added bo_va or NULL for failure
1686 *
1687 * Object has to be reserved!
1688 */
amdgpu_vm_bo_add(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo)1689 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1690 struct amdgpu_vm *vm,
1691 struct amdgpu_bo *bo)
1692 {
1693 struct amdgpu_bo_va *bo_va;
1694
1695 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1696 if (bo_va == NULL) {
1697 return NULL;
1698 }
1699 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1700
1701 bo_va->ref_count = 1;
1702 bo_va->last_pt_update = dma_fence_get_stub();
1703 INIT_LIST_HEAD(&bo_va->valids);
1704 INIT_LIST_HEAD(&bo_va->invalids);
1705
1706 if (!bo)
1707 return bo_va;
1708
1709 dma_resv_assert_held(bo->tbo.base.resv);
1710 if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1711 bo_va->is_xgmi = true;
1712 /* Power up XGMI if it can be potentially used */
1713 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1714 }
1715
1716 return bo_va;
1717 }
1718
1719
1720 /**
1721 * amdgpu_vm_bo_insert_map - insert a new mapping
1722 *
1723 * @adev: amdgpu_device pointer
1724 * @bo_va: bo_va to store the address
1725 * @mapping: the mapping to insert
1726 *
1727 * Insert a new mapping into all structures.
1728 */
amdgpu_vm_bo_insert_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,struct amdgpu_bo_va_mapping * mapping)1729 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1730 struct amdgpu_bo_va *bo_va,
1731 struct amdgpu_bo_va_mapping *mapping)
1732 {
1733 struct amdgpu_vm *vm = bo_va->base.vm;
1734 struct amdgpu_bo *bo = bo_va->base.bo;
1735
1736 mapping->bo_va = bo_va;
1737 list_add(&mapping->list, &bo_va->invalids);
1738 amdgpu_vm_it_insert(mapping, &vm->va);
1739
1740 if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1741 amdgpu_vm_prt_get(adev);
1742
1743 if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1744 amdgpu_vm_bo_moved(&bo_va->base);
1745
1746 trace_amdgpu_vm_bo_map(bo_va, mapping);
1747 }
1748
1749 /* Validate operation parameters to prevent potential abuse */
amdgpu_vm_verify_parameters(struct amdgpu_device * adev,struct amdgpu_bo * bo,uint64_t saddr,uint64_t offset,uint64_t size)1750 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1751 struct amdgpu_bo *bo,
1752 uint64_t saddr,
1753 uint64_t offset,
1754 uint64_t size)
1755 {
1756 uint64_t tmp, lpfn;
1757
1758 if (saddr & AMDGPU_GPU_PAGE_MASK
1759 || offset & AMDGPU_GPU_PAGE_MASK
1760 || size & AMDGPU_GPU_PAGE_MASK)
1761 return -EINVAL;
1762
1763 if (check_add_overflow(saddr, size, &tmp)
1764 || check_add_overflow(offset, size, &tmp)
1765 || size == 0 /* which also leads to end < begin */)
1766 return -EINVAL;
1767
1768 /* make sure object fit at this offset */
1769 if (bo && offset + size > amdgpu_bo_size(bo))
1770 return -EINVAL;
1771
1772 /* Ensure last pfn not exceed max_pfn */
1773 lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1774 if (lpfn >= adev->vm_manager.max_pfn)
1775 return -EINVAL;
1776
1777 return 0;
1778 }
1779
1780 /**
1781 * amdgpu_vm_bo_map - map bo inside a vm
1782 *
1783 * @adev: amdgpu_device pointer
1784 * @bo_va: bo_va to store the address
1785 * @saddr: where to map the BO
1786 * @offset: requested offset in the BO
1787 * @size: BO size in bytes
1788 * @flags: attributes of pages (read/write/valid/etc.)
1789 *
1790 * Add a mapping of the BO at the specefied addr into the VM.
1791 *
1792 * Returns:
1793 * 0 for success, error for failure.
1794 *
1795 * Object has to be reserved and unreserved outside!
1796 */
amdgpu_vm_bo_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint64_t flags)1797 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1798 struct amdgpu_bo_va *bo_va,
1799 uint64_t saddr, uint64_t offset,
1800 uint64_t size, uint64_t flags)
1801 {
1802 struct amdgpu_bo_va_mapping *mapping, *tmp;
1803 struct amdgpu_bo *bo = bo_va->base.bo;
1804 struct amdgpu_vm *vm = bo_va->base.vm;
1805 uint64_t eaddr;
1806 int r;
1807
1808 r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1809 if (r)
1810 return r;
1811
1812 saddr /= AMDGPU_GPU_PAGE_SIZE;
1813 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1814
1815 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1816 if (tmp) {
1817 /* bo and tmp overlap, invalid addr */
1818 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1819 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1820 tmp->start, tmp->last + 1);
1821 return -EINVAL;
1822 }
1823
1824 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1825 if (!mapping)
1826 return -ENOMEM;
1827
1828 mapping->start = saddr;
1829 mapping->last = eaddr;
1830 mapping->offset = offset;
1831 mapping->flags = flags;
1832
1833 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1834
1835 return 0;
1836 }
1837
1838 /**
1839 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1840 *
1841 * @adev: amdgpu_device pointer
1842 * @bo_va: bo_va to store the address
1843 * @saddr: where to map the BO
1844 * @offset: requested offset in the BO
1845 * @size: BO size in bytes
1846 * @flags: attributes of pages (read/write/valid/etc.)
1847 *
1848 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1849 * mappings as we do so.
1850 *
1851 * Returns:
1852 * 0 for success, error for failure.
1853 *
1854 * Object has to be reserved and unreserved outside!
1855 */
amdgpu_vm_bo_replace_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint64_t flags)1856 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1857 struct amdgpu_bo_va *bo_va,
1858 uint64_t saddr, uint64_t offset,
1859 uint64_t size, uint64_t flags)
1860 {
1861 struct amdgpu_bo_va_mapping *mapping;
1862 struct amdgpu_bo *bo = bo_va->base.bo;
1863 uint64_t eaddr;
1864 int r;
1865
1866 r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1867 if (r)
1868 return r;
1869
1870 /* Allocate all the needed memory */
1871 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1872 if (!mapping)
1873 return -ENOMEM;
1874
1875 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1876 if (r) {
1877 kfree(mapping);
1878 return r;
1879 }
1880
1881 saddr /= AMDGPU_GPU_PAGE_SIZE;
1882 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1883
1884 mapping->start = saddr;
1885 mapping->last = eaddr;
1886 mapping->offset = offset;
1887 mapping->flags = flags;
1888
1889 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1890
1891 return 0;
1892 }
1893
1894 /**
1895 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1896 *
1897 * @adev: amdgpu_device pointer
1898 * @bo_va: bo_va to remove the address from
1899 * @saddr: where to the BO is mapped
1900 *
1901 * Remove a mapping of the BO at the specefied addr from the VM.
1902 *
1903 * Returns:
1904 * 0 for success, error for failure.
1905 *
1906 * Object has to be reserved and unreserved outside!
1907 */
amdgpu_vm_bo_unmap(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr)1908 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1909 struct amdgpu_bo_va *bo_va,
1910 uint64_t saddr)
1911 {
1912 struct amdgpu_bo_va_mapping *mapping;
1913 struct amdgpu_vm *vm = bo_va->base.vm;
1914 bool valid = true;
1915
1916 saddr /= AMDGPU_GPU_PAGE_SIZE;
1917
1918 list_for_each_entry(mapping, &bo_va->valids, list) {
1919 if (mapping->start == saddr)
1920 break;
1921 }
1922
1923 if (&mapping->list == &bo_va->valids) {
1924 valid = false;
1925
1926 list_for_each_entry(mapping, &bo_va->invalids, list) {
1927 if (mapping->start == saddr)
1928 break;
1929 }
1930
1931 if (&mapping->list == &bo_va->invalids)
1932 return -ENOENT;
1933 }
1934
1935 list_del(&mapping->list);
1936 amdgpu_vm_it_remove(mapping, &vm->va);
1937 mapping->bo_va = NULL;
1938 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1939
1940 if (valid)
1941 list_add(&mapping->list, &vm->freed);
1942 else
1943 amdgpu_vm_free_mapping(adev, vm, mapping,
1944 bo_va->last_pt_update);
1945
1946 return 0;
1947 }
1948
1949 /**
1950 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1951 *
1952 * @adev: amdgpu_device pointer
1953 * @vm: VM structure to use
1954 * @saddr: start of the range
1955 * @size: size of the range
1956 *
1957 * Remove all mappings in a range, split them as appropriate.
1958 *
1959 * Returns:
1960 * 0 for success, error for failure.
1961 */
amdgpu_vm_bo_clear_mappings(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t saddr,uint64_t size)1962 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1963 struct amdgpu_vm *vm,
1964 uint64_t saddr, uint64_t size)
1965 {
1966 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1967 LIST_HEAD(removed);
1968 uint64_t eaddr;
1969 int r;
1970
1971 r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
1972 if (r)
1973 return r;
1974
1975 saddr /= AMDGPU_GPU_PAGE_SIZE;
1976 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1977
1978 /* Allocate all the needed memory */
1979 before = kzalloc(sizeof(*before), GFP_KERNEL);
1980 if (!before)
1981 return -ENOMEM;
1982 INIT_LIST_HEAD(&before->list);
1983
1984 after = kzalloc(sizeof(*after), GFP_KERNEL);
1985 if (!after) {
1986 kfree(before);
1987 return -ENOMEM;
1988 }
1989 INIT_LIST_HEAD(&after->list);
1990
1991 /* Now gather all removed mappings */
1992 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1993 while (tmp) {
1994 /* Remember mapping split at the start */
1995 if (tmp->start < saddr) {
1996 before->start = tmp->start;
1997 before->last = saddr - 1;
1998 before->offset = tmp->offset;
1999 before->flags = tmp->flags;
2000 before->bo_va = tmp->bo_va;
2001 list_add(&before->list, &tmp->bo_va->invalids);
2002 }
2003
2004 /* Remember mapping split at the end */
2005 if (tmp->last > eaddr) {
2006 after->start = eaddr + 1;
2007 after->last = tmp->last;
2008 after->offset = tmp->offset;
2009 after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2010 after->flags = tmp->flags;
2011 after->bo_va = tmp->bo_va;
2012 list_add(&after->list, &tmp->bo_va->invalids);
2013 }
2014
2015 list_del(&tmp->list);
2016 list_add(&tmp->list, &removed);
2017
2018 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2019 }
2020
2021 /* And free them up */
2022 list_for_each_entry_safe(tmp, next, &removed, list) {
2023 amdgpu_vm_it_remove(tmp, &vm->va);
2024 list_del(&tmp->list);
2025
2026 if (tmp->start < saddr)
2027 tmp->start = saddr;
2028 if (tmp->last > eaddr)
2029 tmp->last = eaddr;
2030
2031 tmp->bo_va = NULL;
2032 list_add(&tmp->list, &vm->freed);
2033 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2034 }
2035
2036 /* Insert partial mapping before the range */
2037 if (!list_empty(&before->list)) {
2038 struct amdgpu_bo *bo = before->bo_va->base.bo;
2039
2040 amdgpu_vm_it_insert(before, &vm->va);
2041 if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
2042 amdgpu_vm_prt_get(adev);
2043
2044 if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2045 !before->bo_va->base.moved)
2046 amdgpu_vm_bo_moved(&before->bo_va->base);
2047 } else {
2048 kfree(before);
2049 }
2050
2051 /* Insert partial mapping after the range */
2052 if (!list_empty(&after->list)) {
2053 struct amdgpu_bo *bo = after->bo_va->base.bo;
2054
2055 amdgpu_vm_it_insert(after, &vm->va);
2056 if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
2057 amdgpu_vm_prt_get(adev);
2058
2059 if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2060 !after->bo_va->base.moved)
2061 amdgpu_vm_bo_moved(&after->bo_va->base);
2062 } else {
2063 kfree(after);
2064 }
2065
2066 return 0;
2067 }
2068
2069 /**
2070 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2071 *
2072 * @vm: the requested VM
2073 * @addr: the address
2074 *
2075 * Find a mapping by it's address.
2076 *
2077 * Returns:
2078 * The amdgpu_bo_va_mapping matching for addr or NULL
2079 *
2080 */
amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm * vm,uint64_t addr)2081 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2082 uint64_t addr)
2083 {
2084 return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2085 }
2086
2087 /**
2088 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2089 *
2090 * @vm: the requested vm
2091 * @ticket: CS ticket
2092 *
2093 * Trace all mappings of BOs reserved during a command submission.
2094 */
amdgpu_vm_bo_trace_cs(struct amdgpu_vm * vm,struct ww_acquire_ctx * ticket)2095 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2096 {
2097 struct amdgpu_bo_va_mapping *mapping;
2098
2099 if (!trace_amdgpu_vm_bo_cs_enabled())
2100 return;
2101
2102 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2103 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2104 if (mapping->bo_va && mapping->bo_va->base.bo) {
2105 struct amdgpu_bo *bo;
2106
2107 bo = mapping->bo_va->base.bo;
2108 if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2109 ticket)
2110 continue;
2111 }
2112
2113 trace_amdgpu_vm_bo_cs(mapping);
2114 }
2115 }
2116
2117 /**
2118 * amdgpu_vm_bo_del - remove a bo from a specific vm
2119 *
2120 * @adev: amdgpu_device pointer
2121 * @bo_va: requested bo_va
2122 *
2123 * Remove @bo_va->bo from the requested vm.
2124 *
2125 * Object have to be reserved!
2126 */
amdgpu_vm_bo_del(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va)2127 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2128 struct amdgpu_bo_va *bo_va)
2129 {
2130 struct amdgpu_bo_va_mapping *mapping, *next;
2131 struct amdgpu_bo *bo = bo_va->base.bo;
2132 struct amdgpu_vm *vm = bo_va->base.vm;
2133 struct amdgpu_vm_bo_base **base;
2134
2135 dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2136
2137 if (bo) {
2138 dma_resv_assert_held(bo->tbo.base.resv);
2139 if (amdgpu_vm_is_bo_always_valid(vm, bo))
2140 ttm_bo_set_bulk_move(&bo->tbo, NULL);
2141
2142 for (base = &bo_va->base.bo->vm_bo; *base;
2143 base = &(*base)->next) {
2144 if (*base != &bo_va->base)
2145 continue;
2146
2147 amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
2148 *base = bo_va->base.next;
2149 break;
2150 }
2151 }
2152
2153 spin_lock(&vm->status_lock);
2154 list_del(&bo_va->base.vm_status);
2155 spin_unlock(&vm->status_lock);
2156
2157 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2158 list_del(&mapping->list);
2159 amdgpu_vm_it_remove(mapping, &vm->va);
2160 mapping->bo_va = NULL;
2161 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2162 list_add(&mapping->list, &vm->freed);
2163 }
2164 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2165 list_del(&mapping->list);
2166 amdgpu_vm_it_remove(mapping, &vm->va);
2167 amdgpu_vm_free_mapping(adev, vm, mapping,
2168 bo_va->last_pt_update);
2169 }
2170
2171 dma_fence_put(bo_va->last_pt_update);
2172
2173 if (bo && bo_va->is_xgmi)
2174 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2175
2176 kfree(bo_va);
2177 }
2178
2179 /**
2180 * amdgpu_vm_evictable - check if we can evict a VM
2181 *
2182 * @bo: A page table of the VM.
2183 *
2184 * Check if it is possible to evict a VM.
2185 */
amdgpu_vm_evictable(struct amdgpu_bo * bo)2186 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2187 {
2188 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2189
2190 /* Page tables of a destroyed VM can go away immediately */
2191 if (!bo_base || !bo_base->vm)
2192 return true;
2193
2194 /* Don't evict VM page tables while they are busy */
2195 if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2196 return false;
2197
2198 /* Try to block ongoing updates */
2199 if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2200 return false;
2201
2202 /* Don't evict VM page tables while they are updated */
2203 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2204 amdgpu_vm_eviction_unlock(bo_base->vm);
2205 return false;
2206 }
2207
2208 bo_base->vm->evicting = true;
2209 amdgpu_vm_eviction_unlock(bo_base->vm);
2210 return true;
2211 }
2212
2213 /**
2214 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2215 *
2216 * @bo: amdgpu buffer object
2217 * @evicted: is the BO evicted
2218 *
2219 * Mark @bo as invalid.
2220 */
amdgpu_vm_bo_invalidate(struct amdgpu_bo * bo,bool evicted)2221 void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
2222 {
2223 struct amdgpu_vm_bo_base *bo_base;
2224
2225 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2226 struct amdgpu_vm *vm = bo_base->vm;
2227
2228 if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2229 amdgpu_vm_bo_evicted(bo_base);
2230 continue;
2231 }
2232
2233 if (bo_base->moved)
2234 continue;
2235 bo_base->moved = true;
2236
2237 if (bo->tbo.type == ttm_bo_type_kernel)
2238 amdgpu_vm_bo_relocated(bo_base);
2239 else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2240 amdgpu_vm_bo_moved(bo_base);
2241 else
2242 amdgpu_vm_bo_invalidated(bo_base);
2243 }
2244 }
2245
2246 /**
2247 * amdgpu_vm_bo_move - handle BO move
2248 *
2249 * @bo: amdgpu buffer object
2250 * @new_mem: the new placement of the BO move
2251 * @evicted: is the BO evicted
2252 *
2253 * Update the memory stats for the new placement and mark @bo as invalid.
2254 */
amdgpu_vm_bo_move(struct amdgpu_bo * bo,struct ttm_resource * new_mem,bool evicted)2255 void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
2256 bool evicted)
2257 {
2258 struct amdgpu_vm_bo_base *bo_base;
2259
2260 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2261 struct amdgpu_vm *vm = bo_base->vm;
2262
2263 spin_lock(&vm->status_lock);
2264 amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
2265 amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
2266 spin_unlock(&vm->status_lock);
2267 }
2268
2269 amdgpu_vm_bo_invalidate(bo, evicted);
2270 }
2271
2272 /**
2273 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2274 *
2275 * @vm_size: VM size
2276 *
2277 * Returns:
2278 * VM page table as power of two
2279 */
amdgpu_vm_get_block_size(uint64_t vm_size)2280 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2281 {
2282 /* Total bits covered by PD + PTs */
2283 unsigned bits = ilog2(vm_size) + 18;
2284
2285 /* Make sure the PD is 4K in size up to 8GB address space.
2286 Above that split equal between PD and PTs */
2287 if (vm_size <= 8)
2288 return (bits - 9);
2289 else
2290 return ((bits + 3) / 2);
2291 }
2292
2293 /**
2294 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2295 *
2296 * @adev: amdgpu_device pointer
2297 * @min_vm_size: the minimum vm size in GB if it's set auto
2298 * @fragment_size_default: Default PTE fragment size
2299 * @max_level: max VMPT level
2300 * @max_bits: max address space size in bits
2301 *
2302 */
amdgpu_vm_adjust_size(struct amdgpu_device * adev,uint32_t min_vm_size,uint32_t fragment_size_default,unsigned max_level,unsigned max_bits)2303 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2304 uint32_t fragment_size_default, unsigned max_level,
2305 unsigned max_bits)
2306 {
2307 unsigned int max_size = 1 << (max_bits - 30);
2308 unsigned int vm_size;
2309 uint64_t tmp;
2310
2311 /* adjust vm size first */
2312 if (amdgpu_vm_size != -1) {
2313 vm_size = amdgpu_vm_size;
2314 if (vm_size > max_size) {
2315 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2316 amdgpu_vm_size, max_size);
2317 vm_size = max_size;
2318 }
2319 } else {
2320 struct sysinfo si;
2321 unsigned int phys_ram_gb;
2322
2323 /* Optimal VM size depends on the amount of physical
2324 * RAM available. Underlying requirements and
2325 * assumptions:
2326 *
2327 * - Need to map system memory and VRAM from all GPUs
2328 * - VRAM from other GPUs not known here
2329 * - Assume VRAM <= system memory
2330 * - On GFX8 and older, VM space can be segmented for
2331 * different MTYPEs
2332 * - Need to allow room for fragmentation, guard pages etc.
2333 *
2334 * This adds up to a rough guess of system memory x3.
2335 * Round up to power of two to maximize the available
2336 * VM size with the given page table size.
2337 */
2338 si_meminfo(&si);
2339 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2340 (1 << 30) - 1) >> 30;
2341 vm_size = roundup_pow_of_two(
2342 clamp(phys_ram_gb * 3, min_vm_size, max_size));
2343 }
2344
2345 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2346
2347 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2348 if (amdgpu_vm_block_size != -1)
2349 tmp >>= amdgpu_vm_block_size - 9;
2350 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2351 adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2352 switch (adev->vm_manager.num_level) {
2353 case 3:
2354 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2355 break;
2356 case 2:
2357 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2358 break;
2359 case 1:
2360 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2361 break;
2362 default:
2363 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2364 }
2365 /* block size depends on vm size and hw setup*/
2366 if (amdgpu_vm_block_size != -1)
2367 adev->vm_manager.block_size =
2368 min((unsigned)amdgpu_vm_block_size, max_bits
2369 - AMDGPU_GPU_PAGE_SHIFT
2370 - 9 * adev->vm_manager.num_level);
2371 else if (adev->vm_manager.num_level > 1)
2372 adev->vm_manager.block_size = 9;
2373 else
2374 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2375
2376 if (amdgpu_vm_fragment_size == -1)
2377 adev->vm_manager.fragment_size = fragment_size_default;
2378 else
2379 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2380
2381 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2382 vm_size, adev->vm_manager.num_level + 1,
2383 adev->vm_manager.block_size,
2384 adev->vm_manager.fragment_size);
2385 }
2386
2387 /**
2388 * amdgpu_vm_wait_idle - wait for the VM to become idle
2389 *
2390 * @vm: VM object to wait for
2391 * @timeout: timeout to wait for VM to become idle
2392 */
amdgpu_vm_wait_idle(struct amdgpu_vm * vm,long timeout)2393 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2394 {
2395 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2396 DMA_RESV_USAGE_BOOKKEEP,
2397 true, timeout);
2398 if (timeout <= 0)
2399 return timeout;
2400
2401 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2402 }
2403
amdgpu_vm_destroy_task_info(struct kref * kref)2404 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2405 {
2406 struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2407
2408 kfree(ti);
2409 }
2410
2411 static inline struct amdgpu_vm *
amdgpu_vm_get_vm_from_pasid(struct amdgpu_device * adev,u32 pasid)2412 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2413 {
2414 struct amdgpu_vm *vm;
2415 unsigned long flags;
2416
2417 xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2418 vm = xa_load(&adev->vm_manager.pasids, pasid);
2419 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2420
2421 return vm;
2422 }
2423
2424 /**
2425 * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2426 *
2427 * @task_info: task_info struct under discussion.
2428 *
2429 * frees the vm task_info ptr at the last put
2430 */
amdgpu_vm_put_task_info(struct amdgpu_task_info * task_info)2431 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2432 {
2433 kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2434 }
2435
2436 /**
2437 * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2438 *
2439 * @vm: VM to get info from
2440 *
2441 * Returns the reference counted task_info structure, which must be
2442 * referenced down with amdgpu_vm_put_task_info.
2443 */
2444 struct amdgpu_task_info *
amdgpu_vm_get_task_info_vm(struct amdgpu_vm * vm)2445 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2446 {
2447 struct amdgpu_task_info *ti = NULL;
2448
2449 if (vm) {
2450 ti = vm->task_info;
2451 kref_get(&vm->task_info->refcount);
2452 }
2453
2454 return ti;
2455 }
2456
2457 /**
2458 * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2459 *
2460 * @adev: drm device pointer
2461 * @pasid: PASID identifier for VM
2462 *
2463 * Returns the reference counted task_info structure, which must be
2464 * referenced down with amdgpu_vm_put_task_info.
2465 */
2466 struct amdgpu_task_info *
amdgpu_vm_get_task_info_pasid(struct amdgpu_device * adev,u32 pasid)2467 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2468 {
2469 return amdgpu_vm_get_task_info_vm(
2470 amdgpu_vm_get_vm_from_pasid(adev, pasid));
2471 }
2472
amdgpu_vm_create_task_info(struct amdgpu_vm * vm)2473 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2474 {
2475 vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2476 if (!vm->task_info)
2477 return -ENOMEM;
2478
2479 kref_init(&vm->task_info->refcount);
2480 return 0;
2481 }
2482
2483 /**
2484 * amdgpu_vm_set_task_info - Sets VMs task info.
2485 *
2486 * @vm: vm for which to set the info
2487 */
amdgpu_vm_set_task_info(struct amdgpu_vm * vm)2488 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2489 {
2490 if (!vm->task_info)
2491 return;
2492
2493 if (vm->task_info->pid == current->pid)
2494 return;
2495
2496 vm->task_info->pid = current->pid;
2497 get_task_comm(vm->task_info->task_name, current);
2498
2499 if (current->group_leader->mm != current->mm)
2500 return;
2501
2502 vm->task_info->tgid = current->group_leader->pid;
2503 get_task_comm(vm->task_info->process_name, current->group_leader);
2504 }
2505
2506 /**
2507 * amdgpu_vm_init - initialize a vm instance
2508 *
2509 * @adev: amdgpu_device pointer
2510 * @vm: requested vm
2511 * @xcp_id: GPU partition selection id
2512 *
2513 * Init @vm fields.
2514 *
2515 * Returns:
2516 * 0 for success, error for failure.
2517 */
amdgpu_vm_init(struct amdgpu_device * adev,struct amdgpu_vm * vm,int32_t xcp_id)2518 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2519 int32_t xcp_id)
2520 {
2521 struct amdgpu_bo *root_bo;
2522 struct amdgpu_bo_vm *root;
2523 int r, i;
2524
2525 vm->va = RB_ROOT_CACHED;
2526 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2527 vm->reserved_vmid[i] = NULL;
2528 INIT_LIST_HEAD(&vm->evicted);
2529 INIT_LIST_HEAD(&vm->evicted_user);
2530 INIT_LIST_HEAD(&vm->relocated);
2531 INIT_LIST_HEAD(&vm->moved);
2532 INIT_LIST_HEAD(&vm->idle);
2533 INIT_LIST_HEAD(&vm->invalidated);
2534 spin_lock_init(&vm->status_lock);
2535 INIT_LIST_HEAD(&vm->freed);
2536 INIT_LIST_HEAD(&vm->done);
2537 INIT_LIST_HEAD(&vm->pt_freed);
2538 INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
2539 INIT_KFIFO(vm->faults);
2540
2541 r = amdgpu_vm_init_entities(adev, vm);
2542 if (r)
2543 return r;
2544
2545 ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2546
2547 vm->is_compute_context = false;
2548
2549 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2550 AMDGPU_VM_USE_CPU_FOR_GFX);
2551
2552 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2553 vm->use_cpu_for_update ? "CPU" : "SDMA");
2554 WARN_ONCE((vm->use_cpu_for_update &&
2555 !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2556 "CPU update of VM recommended only for large BAR system\n");
2557
2558 if (vm->use_cpu_for_update)
2559 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2560 else
2561 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2562
2563 vm->last_update = dma_fence_get_stub();
2564 vm->last_unlocked = dma_fence_get_stub();
2565 vm->last_tlb_flush = dma_fence_get_stub();
2566 vm->generation = amdgpu_vm_generation(adev, NULL);
2567
2568 mutex_init(&vm->eviction_lock);
2569 vm->evicting = false;
2570 vm->tlb_fence_context = dma_fence_context_alloc(1);
2571
2572 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2573 false, &root, xcp_id);
2574 if (r)
2575 goto error_free_delayed;
2576
2577 root_bo = amdgpu_bo_ref(&root->bo);
2578 r = amdgpu_bo_reserve(root_bo, true);
2579 if (r) {
2580 amdgpu_bo_unref(&root_bo);
2581 goto error_free_delayed;
2582 }
2583
2584 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2585 r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2586 if (r)
2587 goto error_free_root;
2588
2589 r = amdgpu_vm_pt_clear(adev, vm, root, false);
2590 if (r)
2591 goto error_free_root;
2592
2593 r = amdgpu_vm_create_task_info(vm);
2594 if (r)
2595 DRM_DEBUG("Failed to create task info for VM\n");
2596
2597 amdgpu_bo_unreserve(vm->root.bo);
2598 amdgpu_bo_unref(&root_bo);
2599
2600 return 0;
2601
2602 error_free_root:
2603 amdgpu_vm_pt_free_root(adev, vm);
2604 amdgpu_bo_unreserve(vm->root.bo);
2605 amdgpu_bo_unref(&root_bo);
2606
2607 error_free_delayed:
2608 dma_fence_put(vm->last_tlb_flush);
2609 dma_fence_put(vm->last_unlocked);
2610 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2611 amdgpu_vm_fini_entities(vm);
2612
2613 return r;
2614 }
2615
2616 /**
2617 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2618 *
2619 * @adev: amdgpu_device pointer
2620 * @vm: requested vm
2621 *
2622 * This only works on GFX VMs that don't have any BOs added and no
2623 * page tables allocated yet.
2624 *
2625 * Changes the following VM parameters:
2626 * - use_cpu_for_update
2627 * - pte_supports_ats
2628 *
2629 * Reinitializes the page directory to reflect the changed ATS
2630 * setting.
2631 *
2632 * Returns:
2633 * 0 for success, -errno for errors.
2634 */
amdgpu_vm_make_compute(struct amdgpu_device * adev,struct amdgpu_vm * vm)2635 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2636 {
2637 int r;
2638
2639 r = amdgpu_bo_reserve(vm->root.bo, true);
2640 if (r)
2641 return r;
2642
2643 /* Update VM state */
2644 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2645 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2646 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2647 vm->use_cpu_for_update ? "CPU" : "SDMA");
2648 WARN_ONCE((vm->use_cpu_for_update &&
2649 !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2650 "CPU update of VM recommended only for large BAR system\n");
2651
2652 if (vm->use_cpu_for_update) {
2653 /* Sync with last SDMA update/clear before switching to CPU */
2654 r = amdgpu_bo_sync_wait(vm->root.bo,
2655 AMDGPU_FENCE_OWNER_UNDEFINED, true);
2656 if (r)
2657 goto unreserve_bo;
2658
2659 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2660 r = amdgpu_vm_pt_map_tables(adev, vm);
2661 if (r)
2662 goto unreserve_bo;
2663
2664 } else {
2665 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2666 }
2667
2668 dma_fence_put(vm->last_update);
2669 vm->last_update = dma_fence_get_stub();
2670 vm->is_compute_context = true;
2671
2672 unreserve_bo:
2673 amdgpu_bo_unreserve(vm->root.bo);
2674 return r;
2675 }
2676
2677 /**
2678 * amdgpu_vm_release_compute - release a compute vm
2679 * @adev: amdgpu_device pointer
2680 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2681 *
2682 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2683 * pasid from vm. Compute should stop use of vm after this call.
2684 */
amdgpu_vm_release_compute(struct amdgpu_device * adev,struct amdgpu_vm * vm)2685 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2686 {
2687 amdgpu_vm_set_pasid(adev, vm, 0);
2688 vm->is_compute_context = false;
2689 }
2690
amdgpu_vm_stats_is_zero(struct amdgpu_vm * vm)2691 static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
2692 {
2693 for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
2694 if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
2695 vm->stats[i].evicted == 0))
2696 return false;
2697 }
2698 return true;
2699 }
2700
2701 /**
2702 * amdgpu_vm_fini - tear down a vm instance
2703 *
2704 * @adev: amdgpu_device pointer
2705 * @vm: requested vm
2706 *
2707 * Tear down @vm.
2708 * Unbind the VM and remove all bos from the vm bo list
2709 */
amdgpu_vm_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)2710 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2711 {
2712 struct amdgpu_bo_va_mapping *mapping, *tmp;
2713 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2714 struct amdgpu_bo *root;
2715 unsigned long flags;
2716 int i;
2717
2718 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2719
2720 flush_work(&vm->pt_free_work);
2721
2722 root = amdgpu_bo_ref(vm->root.bo);
2723 amdgpu_bo_reserve(root, true);
2724 amdgpu_vm_set_pasid(adev, vm, 0);
2725 dma_fence_wait(vm->last_unlocked, false);
2726 dma_fence_put(vm->last_unlocked);
2727 dma_fence_wait(vm->last_tlb_flush, false);
2728 /* Make sure that all fence callbacks have completed */
2729 spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2730 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2731 dma_fence_put(vm->last_tlb_flush);
2732
2733 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2734 if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
2735 amdgpu_vm_prt_fini(adev, vm);
2736 prt_fini_needed = false;
2737 }
2738
2739 list_del(&mapping->list);
2740 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2741 }
2742
2743 amdgpu_vm_pt_free_root(adev, vm);
2744 amdgpu_bo_unreserve(root);
2745 amdgpu_bo_unref(&root);
2746 WARN_ON(vm->root.bo);
2747
2748 amdgpu_vm_fini_entities(vm);
2749
2750 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2751 dev_err(adev->dev, "still active bo inside vm\n");
2752 }
2753 rbtree_postorder_for_each_entry_safe(mapping, tmp,
2754 &vm->va.rb_root, rb) {
2755 /* Don't remove the mapping here, we don't want to trigger a
2756 * rebalance and the tree is about to be destroyed anyway.
2757 */
2758 list_del(&mapping->list);
2759 kfree(mapping);
2760 }
2761
2762 dma_fence_put(vm->last_update);
2763
2764 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2765 if (vm->reserved_vmid[i]) {
2766 amdgpu_vmid_free_reserved(adev, i);
2767 vm->reserved_vmid[i] = false;
2768 }
2769 }
2770
2771 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2772
2773 if (!amdgpu_vm_stats_is_zero(vm)) {
2774 struct amdgpu_task_info *ti = vm->task_info;
2775
2776 dev_warn(adev->dev,
2777 "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
2778 ti->process_name, ti->pid, ti->task_name, ti->tgid);
2779 }
2780
2781 amdgpu_vm_put_task_info(vm->task_info);
2782 }
2783
2784 /**
2785 * amdgpu_vm_manager_init - init the VM manager
2786 *
2787 * @adev: amdgpu_device pointer
2788 *
2789 * Initialize the VM manager structures
2790 */
amdgpu_vm_manager_init(struct amdgpu_device * adev)2791 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2792 {
2793 unsigned i;
2794
2795 /* Concurrent flushes are only possible starting with Vega10 and
2796 * are broken on Navi10 and Navi14.
2797 */
2798 adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2799 adev->asic_type == CHIP_NAVI10 ||
2800 adev->asic_type == CHIP_NAVI14);
2801 amdgpu_vmid_mgr_init(adev);
2802
2803 adev->vm_manager.fence_context =
2804 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2805 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2806 adev->vm_manager.seqno[i] = 0;
2807
2808 spin_lock_init(&adev->vm_manager.prt_lock);
2809 atomic_set(&adev->vm_manager.num_prt_users, 0);
2810
2811 /* If not overridden by the user, by default, only in large BAR systems
2812 * Compute VM tables will be updated by CPU
2813 */
2814 #ifdef CONFIG_X86_64
2815 if (amdgpu_vm_update_mode == -1) {
2816 /* For asic with VF MMIO access protection
2817 * avoid using CPU for VM table updates
2818 */
2819 if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2820 !amdgpu_sriov_vf_mmio_access_protection(adev))
2821 adev->vm_manager.vm_update_mode =
2822 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2823 else
2824 adev->vm_manager.vm_update_mode = 0;
2825 } else
2826 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2827 #else
2828 adev->vm_manager.vm_update_mode = 0;
2829 #endif
2830
2831 xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2832 }
2833
2834 /**
2835 * amdgpu_vm_manager_fini - cleanup VM manager
2836 *
2837 * @adev: amdgpu_device pointer
2838 *
2839 * Cleanup the VM manager and free resources.
2840 */
amdgpu_vm_manager_fini(struct amdgpu_device * adev)2841 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2842 {
2843 WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2844 xa_destroy(&adev->vm_manager.pasids);
2845
2846 amdgpu_vmid_mgr_fini(adev);
2847 }
2848
2849 /**
2850 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2851 *
2852 * @dev: drm device pointer
2853 * @data: drm_amdgpu_vm
2854 * @filp: drm file pointer
2855 *
2856 * Returns:
2857 * 0 for success, -errno for errors.
2858 */
amdgpu_vm_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)2859 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2860 {
2861 union drm_amdgpu_vm *args = data;
2862 struct amdgpu_device *adev = drm_to_adev(dev);
2863 struct amdgpu_fpriv *fpriv = filp->driver_priv;
2864
2865 /* No valid flags defined yet */
2866 if (args->in.flags)
2867 return -EINVAL;
2868
2869 switch (args->in.op) {
2870 case AMDGPU_VM_OP_RESERVE_VMID:
2871 /* We only have requirement to reserve vmid from gfxhub */
2872 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2873 amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2874 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2875 }
2876
2877 break;
2878 case AMDGPU_VM_OP_UNRESERVE_VMID:
2879 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2880 amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2881 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2882 }
2883 break;
2884 default:
2885 return -EINVAL;
2886 }
2887
2888 return 0;
2889 }
2890
2891 /**
2892 * amdgpu_vm_handle_fault - graceful handling of VM faults.
2893 * @adev: amdgpu device pointer
2894 * @pasid: PASID of the VM
2895 * @ts: Timestamp of the fault
2896 * @vmid: VMID, only used for GFX 9.4.3.
2897 * @node_id: Node_id received in IH cookie. Only applicable for
2898 * GFX 9.4.3.
2899 * @addr: Address of the fault
2900 * @write_fault: true is write fault, false is read fault
2901 *
2902 * Try to gracefully handle a VM fault. Return true if the fault was handled and
2903 * shouldn't be reported any more.
2904 */
amdgpu_vm_handle_fault(struct amdgpu_device * adev,u32 pasid,u32 vmid,u32 node_id,uint64_t addr,uint64_t ts,bool write_fault)2905 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2906 u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
2907 bool write_fault)
2908 {
2909 bool is_compute_context = false;
2910 struct amdgpu_bo *root;
2911 unsigned long irqflags;
2912 uint64_t value, flags;
2913 struct amdgpu_vm *vm;
2914 int r;
2915
2916 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2917 vm = xa_load(&adev->vm_manager.pasids, pasid);
2918 if (vm) {
2919 root = amdgpu_bo_ref(vm->root.bo);
2920 is_compute_context = vm->is_compute_context;
2921 } else {
2922 root = NULL;
2923 }
2924 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2925
2926 if (!root)
2927 return false;
2928
2929 addr /= AMDGPU_GPU_PAGE_SIZE;
2930
2931 if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2932 node_id, addr, ts, write_fault)) {
2933 amdgpu_bo_unref(&root);
2934 return true;
2935 }
2936
2937 r = amdgpu_bo_reserve(root, true);
2938 if (r)
2939 goto error_unref;
2940
2941 /* Double check that the VM still exists */
2942 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2943 vm = xa_load(&adev->vm_manager.pasids, pasid);
2944 if (vm && vm->root.bo != root)
2945 vm = NULL;
2946 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2947 if (!vm)
2948 goto error_unlock;
2949
2950 flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2951 AMDGPU_PTE_SYSTEM;
2952
2953 if (is_compute_context) {
2954 /* Intentionally setting invalid PTE flag
2955 * combination to force a no-retry-fault
2956 */
2957 flags = AMDGPU_VM_NORETRY_FLAGS;
2958 value = 0;
2959 } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2960 /* Redirect the access to the dummy page */
2961 value = adev->dummy_page_addr;
2962 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2963 AMDGPU_PTE_WRITEABLE;
2964
2965 } else {
2966 /* Let the hw retry silently on the PTE */
2967 value = 0;
2968 }
2969
2970 r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2971 if (r) {
2972 pr_debug("failed %d to reserve fence slot\n", r);
2973 goto error_unlock;
2974 }
2975
2976 r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2977 NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
2978 if (r)
2979 goto error_unlock;
2980
2981 r = amdgpu_vm_update_pdes(adev, vm, true);
2982
2983 error_unlock:
2984 amdgpu_bo_unreserve(root);
2985 if (r < 0)
2986 DRM_ERROR("Can't handle page fault (%d)\n", r);
2987
2988 error_unref:
2989 amdgpu_bo_unref(&root);
2990
2991 return false;
2992 }
2993
2994 #if defined(CONFIG_DEBUG_FS)
2995 /**
2996 * amdgpu_debugfs_vm_bo_info - print BO info for the VM
2997 *
2998 * @vm: Requested VM for printing BO info
2999 * @m: debugfs file
3000 *
3001 * Print BO information in debugfs file for the VM
3002 */
amdgpu_debugfs_vm_bo_info(struct amdgpu_vm * vm,struct seq_file * m)3003 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
3004 {
3005 struct amdgpu_bo_va *bo_va, *tmp;
3006 u64 total_idle = 0;
3007 u64 total_evicted = 0;
3008 u64 total_relocated = 0;
3009 u64 total_moved = 0;
3010 u64 total_invalidated = 0;
3011 u64 total_done = 0;
3012 unsigned int total_idle_objs = 0;
3013 unsigned int total_evicted_objs = 0;
3014 unsigned int total_relocated_objs = 0;
3015 unsigned int total_moved_objs = 0;
3016 unsigned int total_invalidated_objs = 0;
3017 unsigned int total_done_objs = 0;
3018 unsigned int id = 0;
3019
3020 spin_lock(&vm->status_lock);
3021 seq_puts(m, "\tIdle BOs:\n");
3022 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3023 if (!bo_va->base.bo)
3024 continue;
3025 total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3026 }
3027 total_idle_objs = id;
3028 id = 0;
3029
3030 seq_puts(m, "\tEvicted BOs:\n");
3031 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3032 if (!bo_va->base.bo)
3033 continue;
3034 total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3035 }
3036 total_evicted_objs = id;
3037 id = 0;
3038
3039 seq_puts(m, "\tRelocated BOs:\n");
3040 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3041 if (!bo_va->base.bo)
3042 continue;
3043 total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3044 }
3045 total_relocated_objs = id;
3046 id = 0;
3047
3048 seq_puts(m, "\tMoved BOs:\n");
3049 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3050 if (!bo_va->base.bo)
3051 continue;
3052 total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3053 }
3054 total_moved_objs = id;
3055 id = 0;
3056
3057 seq_puts(m, "\tInvalidated BOs:\n");
3058 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3059 if (!bo_va->base.bo)
3060 continue;
3061 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3062 }
3063 total_invalidated_objs = id;
3064 id = 0;
3065
3066 seq_puts(m, "\tDone BOs:\n");
3067 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3068 if (!bo_va->base.bo)
3069 continue;
3070 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3071 }
3072 spin_unlock(&vm->status_lock);
3073 total_done_objs = id;
3074
3075 seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
3076 total_idle_objs);
3077 seq_printf(m, "\tTotal evicted size: %12lld\tobjs:\t%d\n", total_evicted,
3078 total_evicted_objs);
3079 seq_printf(m, "\tTotal relocated size: %12lld\tobjs:\t%d\n", total_relocated,
3080 total_relocated_objs);
3081 seq_printf(m, "\tTotal moved size: %12lld\tobjs:\t%d\n", total_moved,
3082 total_moved_objs);
3083 seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
3084 total_invalidated_objs);
3085 seq_printf(m, "\tTotal done size: %12lld\tobjs:\t%d\n", total_done,
3086 total_done_objs);
3087 }
3088 #endif
3089
3090 /**
3091 * amdgpu_vm_update_fault_cache - update cached fault into.
3092 * @adev: amdgpu device pointer
3093 * @pasid: PASID of the VM
3094 * @addr: Address of the fault
3095 * @status: GPUVM fault status register
3096 * @vmhub: which vmhub got the fault
3097 *
3098 * Cache the fault info for later use by userspace in debugging.
3099 */
amdgpu_vm_update_fault_cache(struct amdgpu_device * adev,unsigned int pasid,uint64_t addr,uint32_t status,unsigned int vmhub)3100 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
3101 unsigned int pasid,
3102 uint64_t addr,
3103 uint32_t status,
3104 unsigned int vmhub)
3105 {
3106 struct amdgpu_vm *vm;
3107 unsigned long flags;
3108
3109 xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3110
3111 vm = xa_load(&adev->vm_manager.pasids, pasid);
3112 /* Don't update the fault cache if status is 0. In the multiple
3113 * fault case, subsequent faults will return a 0 status which is
3114 * useless for userspace and replaces the useful fault status, so
3115 * only update if status is non-0.
3116 */
3117 if (vm && status) {
3118 vm->fault_info.addr = addr;
3119 vm->fault_info.status = status;
3120 /*
3121 * Update the fault information globally for later usage
3122 * when vm could be stale or freed.
3123 */
3124 adev->vm_manager.fault_info.addr = addr;
3125 adev->vm_manager.fault_info.vmhub = vmhub;
3126 adev->vm_manager.fault_info.status = status;
3127
3128 if (AMDGPU_IS_GFXHUB(vmhub)) {
3129 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3130 vm->fault_info.vmhub |=
3131 (vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3132 } else if (AMDGPU_IS_MMHUB0(vmhub)) {
3133 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3134 vm->fault_info.vmhub |=
3135 (vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3136 } else if (AMDGPU_IS_MMHUB1(vmhub)) {
3137 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3138 vm->fault_info.vmhub |=
3139 (vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3140 } else {
3141 WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3142 }
3143 }
3144 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3145 }
3146
3147 /**
3148 * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3149 *
3150 * @vm: VM to test against.
3151 * @bo: BO to be tested.
3152 *
3153 * Returns true if the BO shares the dma_resv object with the root PD and is
3154 * always guaranteed to be valid inside the VM.
3155 */
amdgpu_vm_is_bo_always_valid(struct amdgpu_vm * vm,struct amdgpu_bo * bo)3156 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
3157 {
3158 return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
3159 }
3160