1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_vm.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_amdkfd.h"
42 #include "amdgpu_gmc.h"
43 #include "amdgpu_xgmi.h"
44 #include "amdgpu_dma_buf.h"
45 #include "amdgpu_res_cursor.h"
46 #include "kfd_svm.h"
47
48 /**
49 * DOC: GPUVM
50 *
51 * GPUVM is the MMU functionality provided on the GPU.
52 * GPUVM is similar to the legacy GART on older asics, however
53 * rather than there being a single global GART table
54 * for the entire GPU, there can be multiple GPUVM page tables active
55 * at any given time. The GPUVM page tables can contain a mix
56 * VRAM pages and system pages (both memory and MMIO) and system pages
57 * can be mapped as snooped (cached system pages) or unsnooped
58 * (uncached system pages).
59 *
60 * Each active GPUVM has an ID associated with it and there is a page table
61 * linked with each VMID. When executing a command buffer,
62 * the kernel tells the engine what VMID to use for that command
63 * buffer. VMIDs are allocated dynamically as commands are submitted.
64 * The userspace drivers maintain their own address space and the kernel
65 * sets up their pages tables accordingly when they submit their
66 * command buffers and a VMID is assigned.
67 * The hardware supports up to 16 active GPUVMs at any given time.
68 *
69 * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
70 * on the ASIC family. GPUVM supports RWX attributes on each page as well
71 * as other features such as encryption and caching attributes.
72 *
73 * VMID 0 is special. It is the GPUVM used for the kernel driver. In
74 * addition to an aperture managed by a page table, VMID 0 also has
75 * several other apertures. There is an aperture for direct access to VRAM
76 * and there is a legacy AGP aperture which just forwards accesses directly
77 * to the matching system physical addresses (or IOVAs when an IOMMU is
78 * present). These apertures provide direct access to these memories without
79 * incurring the overhead of a page table. VMID 0 is used by the kernel
80 * driver for tasks like memory management.
81 *
82 * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
83 * For user applications, each application can have their own unique GPUVM
84 * address space. The application manages the address space and the kernel
85 * driver manages the GPUVM page tables for each process. If an GPU client
86 * accesses an invalid page, it will generate a GPU page fault, similar to
87 * accessing an invalid page on a CPU.
88 */
89
90 #define START(node) ((node)->start)
91 #define LAST(node) ((node)->last)
92
93 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
94 START, LAST, static, amdgpu_vm_it)
95
96 #undef START
97 #undef LAST
98
99 /**
100 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
101 */
102 struct amdgpu_prt_cb {
103
104 /**
105 * @adev: amdgpu device
106 */
107 struct amdgpu_device *adev;
108
109 /**
110 * @cb: callback
111 */
112 struct dma_fence_cb cb;
113 };
114
115 /**
116 * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
117 */
118 struct amdgpu_vm_tlb_seq_struct {
119 /**
120 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
121 */
122 struct amdgpu_vm *vm;
123
124 /**
125 * @cb: callback
126 */
127 struct dma_fence_cb cb;
128 };
129
130 /**
131 * amdgpu_vm_assert_locked - check if VM is correctly locked
132 * @vm: the VM which schould be tested
133 *
134 * Asserts that the VM root PD is locked.
135 */
amdgpu_vm_assert_locked(struct amdgpu_vm * vm)136 static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
137 {
138 dma_resv_assert_held(vm->root.bo->tbo.base.resv);
139 }
140
141 /**
142 * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
143 *
144 * @vm: VM to test against.
145 * @bo: BO to be tested.
146 *
147 * Returns true if the BO shares the dma_resv object with the root PD and is
148 * always guaranteed to be valid inside the VM.
149 */
amdgpu_vm_is_bo_always_valid(struct amdgpu_vm * vm,struct amdgpu_bo * bo)150 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
151 {
152 return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
153 }
154
155 /**
156 * amdgpu_vm_bo_evicted - vm_bo is evicted
157 *
158 * @vm_bo: vm_bo which is evicted
159 *
160 * State for PDs/PTs and per VM BOs which are not at the location they should
161 * be.
162 */
amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base * vm_bo)163 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
164 {
165 struct amdgpu_vm *vm = vm_bo->vm;
166 struct amdgpu_bo *bo = vm_bo->bo;
167
168 vm_bo->moved = true;
169 amdgpu_vm_assert_locked(vm);
170 spin_lock(&vm_bo->vm->status_lock);
171 if (bo->tbo.type == ttm_bo_type_kernel)
172 list_move(&vm_bo->vm_status, &vm->evicted);
173 else
174 list_move_tail(&vm_bo->vm_status, &vm->evicted);
175 spin_unlock(&vm_bo->vm->status_lock);
176 }
177 /**
178 * amdgpu_vm_bo_moved - vm_bo is moved
179 *
180 * @vm_bo: vm_bo which is moved
181 *
182 * State for per VM BOs which are moved, but that change is not yet reflected
183 * in the page tables.
184 */
amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base * vm_bo)185 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
186 {
187 amdgpu_vm_assert_locked(vm_bo->vm);
188 spin_lock(&vm_bo->vm->status_lock);
189 list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
190 spin_unlock(&vm_bo->vm->status_lock);
191 }
192
193 /**
194 * amdgpu_vm_bo_idle - vm_bo is idle
195 *
196 * @vm_bo: vm_bo which is now idle
197 *
198 * State for PDs/PTs and per VM BOs which have gone through the state machine
199 * and are now idle.
200 */
amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base * vm_bo)201 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
202 {
203 amdgpu_vm_assert_locked(vm_bo->vm);
204 spin_lock(&vm_bo->vm->status_lock);
205 list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
206 spin_unlock(&vm_bo->vm->status_lock);
207 vm_bo->moved = false;
208 }
209
210 /**
211 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
212 *
213 * @vm_bo: vm_bo which is now invalidated
214 *
215 * State for normal BOs which are invalidated and that change not yet reflected
216 * in the PTs.
217 */
amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base * vm_bo)218 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
219 {
220 spin_lock(&vm_bo->vm->status_lock);
221 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
222 spin_unlock(&vm_bo->vm->status_lock);
223 }
224
225 /**
226 * amdgpu_vm_bo_evicted_user - vm_bo is evicted
227 *
228 * @vm_bo: vm_bo which is evicted
229 *
230 * State for BOs used by user mode queues which are not at the location they
231 * should be.
232 */
amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base * vm_bo)233 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
234 {
235 vm_bo->moved = true;
236 spin_lock(&vm_bo->vm->status_lock);
237 list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
238 spin_unlock(&vm_bo->vm->status_lock);
239 }
240
241 /**
242 * amdgpu_vm_bo_relocated - vm_bo is reloacted
243 *
244 * @vm_bo: vm_bo which is relocated
245 *
246 * State for PDs/PTs which needs to update their parent PD.
247 * For the root PD, just move to idle state.
248 */
amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base * vm_bo)249 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
250 {
251 amdgpu_vm_assert_locked(vm_bo->vm);
252 if (vm_bo->bo->parent) {
253 spin_lock(&vm_bo->vm->status_lock);
254 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
255 spin_unlock(&vm_bo->vm->status_lock);
256 } else {
257 amdgpu_vm_bo_idle(vm_bo);
258 }
259 }
260
261 /**
262 * amdgpu_vm_bo_done - vm_bo is done
263 *
264 * @vm_bo: vm_bo which is now done
265 *
266 * State for normal BOs which are invalidated and that change has been updated
267 * in the PTs.
268 */
amdgpu_vm_bo_done(struct amdgpu_vm_bo_base * vm_bo)269 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
270 {
271 amdgpu_vm_assert_locked(vm_bo->vm);
272 spin_lock(&vm_bo->vm->status_lock);
273 list_move(&vm_bo->vm_status, &vm_bo->vm->done);
274 spin_unlock(&vm_bo->vm->status_lock);
275 }
276
277 /**
278 * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
279 * @vm: the VM which state machine to reset
280 *
281 * Move all vm_bo object in the VM into a state where they will be updated
282 * again during validation.
283 */
amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm * vm)284 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
285 {
286 struct amdgpu_vm_bo_base *vm_bo, *tmp;
287
288 amdgpu_vm_assert_locked(vm);
289
290 spin_lock(&vm->status_lock);
291 list_splice_init(&vm->done, &vm->invalidated);
292 list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
293 vm_bo->moved = true;
294
295 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
296 struct amdgpu_bo *bo = vm_bo->bo;
297
298 vm_bo->moved = true;
299 if (!bo || bo->tbo.type != ttm_bo_type_kernel)
300 list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
301 else if (bo->parent)
302 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
303 }
304 spin_unlock(&vm->status_lock);
305 }
306
307 /**
308 * amdgpu_vm_update_shared - helper to update shared memory stat
309 * @base: base structure for tracking BO usage in a VM
310 *
311 * Takes the vm status_lock and updates the shared memory stat. If the basic
312 * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
313 * as well.
314 */
amdgpu_vm_update_shared(struct amdgpu_vm_bo_base * base)315 static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
316 {
317 struct amdgpu_vm *vm = base->vm;
318 struct amdgpu_bo *bo = base->bo;
319 uint64_t size = amdgpu_bo_size(bo);
320 uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
321 bool shared;
322
323 dma_resv_assert_held(bo->tbo.base.resv);
324 spin_lock(&vm->status_lock);
325 shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
326 if (base->shared != shared) {
327 base->shared = shared;
328 if (shared) {
329 vm->stats[bo_memtype].drm.shared += size;
330 vm->stats[bo_memtype].drm.private -= size;
331 } else {
332 vm->stats[bo_memtype].drm.shared -= size;
333 vm->stats[bo_memtype].drm.private += size;
334 }
335 }
336 spin_unlock(&vm->status_lock);
337 }
338
339 /**
340 * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
341 * @bo: amdgpu buffer object
342 *
343 * Update the per VM stats for all the vm if needed from private to shared or
344 * vice versa.
345 */
amdgpu_vm_bo_update_shared(struct amdgpu_bo * bo)346 void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
347 {
348 struct amdgpu_vm_bo_base *base;
349
350 for (base = bo->vm_bo; base; base = base->next)
351 amdgpu_vm_update_shared(base);
352 }
353
354 /**
355 * amdgpu_vm_update_stats_locked - helper to update normal memory stat
356 * @base: base structure for tracking BO usage in a VM
357 * @res: the ttm_resource to use for the purpose of accounting, may or may not
358 * be bo->tbo.resource
359 * @sign: if we should add (+1) or subtract (-1) from the stat
360 *
361 * Caller need to have the vm status_lock held. Useful for when multiple update
362 * need to happen at the same time.
363 */
amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base * base,struct ttm_resource * res,int sign)364 static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
365 struct ttm_resource *res, int sign)
366 {
367 struct amdgpu_vm *vm = base->vm;
368 struct amdgpu_bo *bo = base->bo;
369 int64_t size = sign * amdgpu_bo_size(bo);
370 uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
371
372 /* For drm-total- and drm-shared-, BO are accounted by their preferred
373 * placement, see also amdgpu_bo_mem_stats_placement.
374 */
375 if (base->shared)
376 vm->stats[bo_memtype].drm.shared += size;
377 else
378 vm->stats[bo_memtype].drm.private += size;
379
380 if (res && res->mem_type < __AMDGPU_PL_NUM) {
381 uint32_t res_memtype = res->mem_type;
382
383 vm->stats[res_memtype].drm.resident += size;
384 /* BO only count as purgeable if it is resident,
385 * since otherwise there's nothing to purge.
386 */
387 if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
388 vm->stats[res_memtype].drm.purgeable += size;
389 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
390 vm->stats[bo_memtype].evicted += size;
391 }
392 }
393
394 /**
395 * amdgpu_vm_update_stats - helper to update normal memory stat
396 * @base: base structure for tracking BO usage in a VM
397 * @res: the ttm_resource to use for the purpose of accounting, may or may not
398 * be bo->tbo.resource
399 * @sign: if we should add (+1) or subtract (-1) from the stat
400 *
401 * Updates the basic memory stat when bo is added/deleted/moved.
402 */
amdgpu_vm_update_stats(struct amdgpu_vm_bo_base * base,struct ttm_resource * res,int sign)403 void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
404 struct ttm_resource *res, int sign)
405 {
406 struct amdgpu_vm *vm = base->vm;
407
408 spin_lock(&vm->status_lock);
409 amdgpu_vm_update_stats_locked(base, res, sign);
410 spin_unlock(&vm->status_lock);
411 }
412
413 /**
414 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
415 *
416 * @base: base structure for tracking BO usage in a VM
417 * @vm: vm to which bo is to be added
418 * @bo: amdgpu buffer object
419 *
420 * Initialize a bo_va_base structure and add it to the appropriate lists
421 *
422 */
amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base * base,struct amdgpu_vm * vm,struct amdgpu_bo * bo)423 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
424 struct amdgpu_vm *vm, struct amdgpu_bo *bo)
425 {
426 base->vm = vm;
427 base->bo = bo;
428 base->next = NULL;
429 INIT_LIST_HEAD(&base->vm_status);
430
431 if (!bo)
432 return;
433 base->next = bo->vm_bo;
434 bo->vm_bo = base;
435
436 spin_lock(&vm->status_lock);
437 base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
438 amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
439 spin_unlock(&vm->status_lock);
440
441 if (!amdgpu_vm_is_bo_always_valid(vm, bo))
442 return;
443
444 dma_resv_assert_held(vm->root.bo->tbo.base.resv);
445
446 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
447 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
448 amdgpu_vm_bo_relocated(base);
449 else
450 amdgpu_vm_bo_idle(base);
451
452 if (bo->preferred_domains &
453 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
454 return;
455
456 /*
457 * we checked all the prerequisites, but it looks like this per vm bo
458 * is currently evicted. add the bo to the evicted list to make sure it
459 * is validated on next vm use to avoid fault.
460 * */
461 amdgpu_vm_bo_evicted(base);
462 }
463
464 /**
465 * amdgpu_vm_lock_pd - lock PD in drm_exec
466 *
467 * @vm: vm providing the BOs
468 * @exec: drm execution context
469 * @num_fences: number of extra fences to reserve
470 *
471 * Lock the VM root PD in the DRM execution context.
472 */
amdgpu_vm_lock_pd(struct amdgpu_vm * vm,struct drm_exec * exec,unsigned int num_fences)473 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
474 unsigned int num_fences)
475 {
476 /* We need at least two fences for the VM PD/PT updates */
477 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
478 2 + num_fences);
479 }
480
481 /**
482 * amdgpu_vm_lock_done_list - lock all BOs on the done list
483 * @vm: vm providing the BOs
484 * @exec: drm execution context
485 * @num_fences: number of extra fences to reserve
486 *
487 * Lock the BOs on the done list in the DRM execution context.
488 */
amdgpu_vm_lock_done_list(struct amdgpu_vm * vm,struct drm_exec * exec,unsigned int num_fences)489 int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
490 unsigned int num_fences)
491 {
492 struct list_head *prev = &vm->done;
493 struct amdgpu_bo_va *bo_va;
494 struct amdgpu_bo *bo;
495 int ret;
496
497 /* We can only trust prev->next while holding the lock */
498 spin_lock(&vm->status_lock);
499 while (!list_is_head(prev->next, &vm->done)) {
500 bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
501
502 bo = bo_va->base.bo;
503 if (bo) {
504 amdgpu_bo_ref(bo);
505 spin_unlock(&vm->status_lock);
506
507 ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
508 amdgpu_bo_unref(&bo);
509 if (unlikely(ret))
510 return ret;
511
512 spin_lock(&vm->status_lock);
513 }
514 prev = prev->next;
515 }
516 spin_unlock(&vm->status_lock);
517
518 return 0;
519 }
520
521 /**
522 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
523 *
524 * @adev: amdgpu device pointer
525 * @vm: vm providing the BOs
526 *
527 * Move all BOs to the end of LRU and remember their positions to put them
528 * together.
529 */
amdgpu_vm_move_to_lru_tail(struct amdgpu_device * adev,struct amdgpu_vm * vm)530 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
531 struct amdgpu_vm *vm)
532 {
533 spin_lock(&adev->mman.bdev.lru_lock);
534 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
535 spin_unlock(&adev->mman.bdev.lru_lock);
536 }
537
538 /* Create scheduler entities for page table updates */
amdgpu_vm_init_entities(struct amdgpu_device * adev,struct amdgpu_vm * vm)539 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
540 struct amdgpu_vm *vm)
541 {
542 int r;
543
544 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
545 adev->vm_manager.vm_pte_scheds,
546 adev->vm_manager.vm_pte_num_scheds, NULL);
547 if (r)
548 goto error;
549
550 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
551 adev->vm_manager.vm_pte_scheds,
552 adev->vm_manager.vm_pte_num_scheds, NULL);
553
554 error:
555 drm_sched_entity_destroy(&vm->immediate);
556 return r;
557 }
558
559 /* Destroy the entities for page table updates again */
amdgpu_vm_fini_entities(struct amdgpu_vm * vm)560 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
561 {
562 drm_sched_entity_destroy(&vm->immediate);
563 drm_sched_entity_destroy(&vm->delayed);
564 }
565
566 /**
567 * amdgpu_vm_generation - return the page table re-generation counter
568 * @adev: the amdgpu_device
569 * @vm: optional VM to check, might be NULL
570 *
571 * Returns a page table re-generation token to allow checking if submissions
572 * are still valid to use this VM. The VM parameter might be NULL in which case
573 * just the VRAM lost counter will be used.
574 */
amdgpu_vm_generation(struct amdgpu_device * adev,struct amdgpu_vm * vm)575 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
576 {
577 uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
578
579 if (!vm)
580 return result;
581
582 result += lower_32_bits(vm->generation);
583 /* Add one if the page tables will be re-generated on next CS */
584 if (drm_sched_entity_error(&vm->delayed))
585 ++result;
586
587 return result;
588 }
589
590 /**
591 * amdgpu_vm_validate - validate evicted BOs tracked in the VM
592 *
593 * @adev: amdgpu device pointer
594 * @vm: vm providing the BOs
595 * @ticket: optional reservation ticket used to reserve the VM
596 * @validate: callback to do the validation
597 * @param: parameter for the validation callback
598 *
599 * Validate the page table BOs and per-VM BOs on command submission if
600 * necessary. If a ticket is given, also try to validate evicted user queue
601 * BOs. They must already be reserved with the given ticket.
602 *
603 * Returns:
604 * Validation result.
605 */
amdgpu_vm_validate(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct ww_acquire_ctx * ticket,int (* validate)(void * p,struct amdgpu_bo * bo),void * param)606 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
607 struct ww_acquire_ctx *ticket,
608 int (*validate)(void *p, struct amdgpu_bo *bo),
609 void *param)
610 {
611 uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
612 struct amdgpu_vm_bo_base *bo_base;
613 struct amdgpu_bo *bo;
614 int r;
615
616 if (vm->generation != new_vm_generation) {
617 vm->generation = new_vm_generation;
618 amdgpu_vm_bo_reset_state_machine(vm);
619 amdgpu_vm_fini_entities(vm);
620 r = amdgpu_vm_init_entities(adev, vm);
621 if (r)
622 return r;
623 }
624
625 spin_lock(&vm->status_lock);
626 while (!list_empty(&vm->evicted)) {
627 bo_base = list_first_entry(&vm->evicted,
628 struct amdgpu_vm_bo_base,
629 vm_status);
630 spin_unlock(&vm->status_lock);
631
632 bo = bo_base->bo;
633
634 r = validate(param, bo);
635 if (r)
636 return r;
637
638 if (bo->tbo.type != ttm_bo_type_kernel) {
639 amdgpu_vm_bo_moved(bo_base);
640 } else {
641 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
642 amdgpu_vm_bo_relocated(bo_base);
643 }
644 spin_lock(&vm->status_lock);
645 }
646 while (ticket && !list_empty(&vm->evicted_user)) {
647 bo_base = list_first_entry(&vm->evicted_user,
648 struct amdgpu_vm_bo_base,
649 vm_status);
650 spin_unlock(&vm->status_lock);
651
652 bo = bo_base->bo;
653 dma_resv_assert_held(bo->tbo.base.resv);
654
655 r = validate(param, bo);
656 if (r)
657 return r;
658
659 amdgpu_vm_bo_invalidated(bo_base);
660
661 spin_lock(&vm->status_lock);
662 }
663 spin_unlock(&vm->status_lock);
664
665 amdgpu_vm_eviction_lock(vm);
666 vm->evicting = false;
667 amdgpu_vm_eviction_unlock(vm);
668
669 return 0;
670 }
671
672 /**
673 * amdgpu_vm_ready - check VM is ready for updates
674 *
675 * @vm: VM to check
676 *
677 * Check if all VM PDs/PTs are ready for updates
678 *
679 * Returns:
680 * True if VM is not evicting and all VM entities are not stopped
681 */
amdgpu_vm_ready(struct amdgpu_vm * vm)682 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
683 {
684 bool ret;
685
686 amdgpu_vm_assert_locked(vm);
687
688 amdgpu_vm_eviction_lock(vm);
689 ret = !vm->evicting;
690 amdgpu_vm_eviction_unlock(vm);
691
692 spin_lock(&vm->status_lock);
693 ret &= list_empty(&vm->evicted);
694 spin_unlock(&vm->status_lock);
695
696 spin_lock(&vm->immediate.lock);
697 ret &= !vm->immediate.stopped;
698 spin_unlock(&vm->immediate.lock);
699
700 spin_lock(&vm->delayed.lock);
701 ret &= !vm->delayed.stopped;
702 spin_unlock(&vm->delayed.lock);
703
704 return ret;
705 }
706
707 /**
708 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
709 *
710 * @adev: amdgpu_device pointer
711 */
amdgpu_vm_check_compute_bug(struct amdgpu_device * adev)712 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
713 {
714 const struct amdgpu_ip_block *ip_block;
715 bool has_compute_vm_bug;
716 struct amdgpu_ring *ring;
717 int i;
718
719 has_compute_vm_bug = false;
720
721 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
722 if (ip_block) {
723 /* Compute has a VM bug for GFX version < 7.
724 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
725 if (ip_block->version->major <= 7)
726 has_compute_vm_bug = true;
727 else if (ip_block->version->major == 8)
728 if (adev->gfx.mec_fw_version < 673)
729 has_compute_vm_bug = true;
730 }
731
732 for (i = 0; i < adev->num_rings; i++) {
733 ring = adev->rings[i];
734 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
735 /* only compute rings */
736 ring->has_compute_vm_bug = has_compute_vm_bug;
737 else
738 ring->has_compute_vm_bug = false;
739 }
740 }
741
742 /**
743 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
744 *
745 * @ring: ring on which the job will be submitted
746 * @job: job to submit
747 *
748 * Returns:
749 * True if sync is needed.
750 */
amdgpu_vm_need_pipeline_sync(struct amdgpu_ring * ring,struct amdgpu_job * job)751 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
752 struct amdgpu_job *job)
753 {
754 struct amdgpu_device *adev = ring->adev;
755 unsigned vmhub = ring->vm_hub;
756 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
757
758 if (job->vmid == 0)
759 return false;
760
761 if (job->vm_needs_flush || ring->has_compute_vm_bug)
762 return true;
763
764 if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
765 return true;
766
767 if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
768 return true;
769
770 return false;
771 }
772
773 /**
774 * amdgpu_vm_flush - hardware flush the vm
775 *
776 * @ring: ring to use for flush
777 * @job: related job
778 * @need_pipe_sync: is pipe sync needed
779 *
780 * Emit a VM flush when it is necessary.
781 */
amdgpu_vm_flush(struct amdgpu_ring * ring,struct amdgpu_job * job,bool need_pipe_sync)782 void amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
783 bool need_pipe_sync)
784 {
785 struct amdgpu_device *adev = ring->adev;
786 struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
787 unsigned vmhub = ring->vm_hub;
788 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
789 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
790 bool spm_update_needed = job->spm_update_needed;
791 bool gds_switch_needed = ring->funcs->emit_gds_switch &&
792 job->gds_switch_needed;
793 bool vm_flush_needed = job->vm_needs_flush;
794 bool cleaner_shader_needed = false;
795 bool pasid_mapping_needed = false;
796 struct dma_fence *fence = NULL;
797 unsigned int patch = 0;
798
799 if (amdgpu_vmid_had_gpu_reset(adev, id)) {
800 gds_switch_needed = true;
801 vm_flush_needed = true;
802 pasid_mapping_needed = true;
803 spm_update_needed = true;
804 }
805
806 mutex_lock(&id_mgr->lock);
807 if (id->pasid != job->pasid || !id->pasid_mapping ||
808 !dma_fence_is_signaled(id->pasid_mapping))
809 pasid_mapping_needed = true;
810 mutex_unlock(&id_mgr->lock);
811
812 gds_switch_needed &= !!ring->funcs->emit_gds_switch;
813 vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
814 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
815 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
816 ring->funcs->emit_wreg;
817
818 cleaner_shader_needed = job->run_cleaner_shader &&
819 adev->gfx.enable_cleaner_shader &&
820 ring->funcs->emit_cleaner_shader && job->base.s_fence &&
821 &job->base.s_fence->scheduled == isolation->spearhead;
822
823 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
824 !cleaner_shader_needed)
825 return;
826
827 amdgpu_ring_ib_begin(ring);
828
829 /* There is no matching insert_end for this on purpose for the vm flush.
830 * The IB portion of the submission has both. Having multiple
831 * insert_start sequences is ok, but you can only have one insert_end
832 * per submission based on the way VCN FW works. For JPEG
833 * you can as many insert_start and insert_end sequences as you like as
834 * long as the rest of the packets come between start and end sequences.
835 */
836 if (ring->funcs->insert_start)
837 ring->funcs->insert_start(ring);
838
839 if (ring->funcs->init_cond_exec)
840 patch = amdgpu_ring_init_cond_exec(ring,
841 ring->cond_exe_gpu_addr);
842
843 if (need_pipe_sync)
844 amdgpu_ring_emit_pipeline_sync(ring);
845
846 if (cleaner_shader_needed)
847 ring->funcs->emit_cleaner_shader(ring);
848
849 if (vm_flush_needed) {
850 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
851 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
852 }
853
854 if (pasid_mapping_needed)
855 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
856
857 if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
858 adev->gfx.rlc.funcs->update_spm_vmid(adev, ring->xcc_id, ring, job->vmid);
859
860 if (ring->funcs->emit_gds_switch &&
861 gds_switch_needed) {
862 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
863 job->gds_size, job->gws_base,
864 job->gws_size, job->oa_base,
865 job->oa_size);
866 }
867
868 if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
869 amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
870 fence = &job->hw_vm_fence->base;
871 /* get a ref for the job */
872 dma_fence_get(fence);
873 }
874
875 if (vm_flush_needed) {
876 mutex_lock(&id_mgr->lock);
877 dma_fence_put(id->last_flush);
878 id->last_flush = dma_fence_get(fence);
879 id->current_gpu_reset_count =
880 atomic_read(&adev->gpu_reset_counter);
881 mutex_unlock(&id_mgr->lock);
882 }
883
884 if (pasid_mapping_needed) {
885 mutex_lock(&id_mgr->lock);
886 id->pasid = job->pasid;
887 dma_fence_put(id->pasid_mapping);
888 id->pasid_mapping = dma_fence_get(fence);
889 mutex_unlock(&id_mgr->lock);
890 }
891
892 /*
893 * Make sure that all other submissions wait for the cleaner shader to
894 * finish before we push them to the HW.
895 */
896 if (cleaner_shader_needed) {
897 trace_amdgpu_cleaner_shader(ring, fence);
898 mutex_lock(&adev->enforce_isolation_mutex);
899 dma_fence_put(isolation->spearhead);
900 isolation->spearhead = dma_fence_get(fence);
901 mutex_unlock(&adev->enforce_isolation_mutex);
902 }
903 dma_fence_put(fence);
904
905 amdgpu_ring_patch_cond_exec(ring, patch);
906
907 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
908 if (ring->funcs->emit_switch_buffer) {
909 amdgpu_ring_emit_switch_buffer(ring);
910 amdgpu_ring_emit_switch_buffer(ring);
911 }
912
913 amdgpu_ring_ib_end(ring);
914 }
915
916 /**
917 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
918 *
919 * @vm: requested vm
920 * @bo: requested buffer object
921 *
922 * Find @bo inside the requested vm.
923 * Search inside the @bos vm list for the requested vm
924 * Returns the found bo_va or NULL if none is found
925 *
926 * Object has to be reserved!
927 *
928 * Returns:
929 * Found bo_va or NULL.
930 */
amdgpu_vm_bo_find(struct amdgpu_vm * vm,struct amdgpu_bo * bo)931 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
932 struct amdgpu_bo *bo)
933 {
934 struct amdgpu_vm_bo_base *base;
935
936 for (base = bo->vm_bo; base; base = base->next) {
937 if (base->vm != vm)
938 continue;
939
940 return container_of(base, struct amdgpu_bo_va, base);
941 }
942 return NULL;
943 }
944
945 /**
946 * amdgpu_vm_map_gart - Resolve gart mapping of addr
947 *
948 * @pages_addr: optional DMA address to use for lookup
949 * @addr: the unmapped addr
950 *
951 * Look up the physical address of the page that the pte resolves
952 * to.
953 *
954 * Returns:
955 * The pointer for the page table entry.
956 */
amdgpu_vm_map_gart(const dma_addr_t * pages_addr,uint64_t addr)957 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
958 {
959 uint64_t result;
960
961 /* page table offset */
962 result = pages_addr[addr >> PAGE_SHIFT];
963
964 /* in case cpu page size != gpu page size*/
965 result |= addr & (~PAGE_MASK);
966
967 result &= 0xFFFFFFFFFFFFF000ULL;
968
969 return result;
970 }
971
972 /**
973 * amdgpu_vm_update_pdes - make sure that all directories are valid
974 *
975 * @adev: amdgpu_device pointer
976 * @vm: requested vm
977 * @immediate: submit immediately to the paging queue
978 *
979 * Makes sure all directories are up to date.
980 *
981 * Returns:
982 * 0 for success, error for failure.
983 */
amdgpu_vm_update_pdes(struct amdgpu_device * adev,struct amdgpu_vm * vm,bool immediate)984 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
985 struct amdgpu_vm *vm, bool immediate)
986 {
987 struct amdgpu_vm_update_params params;
988 struct amdgpu_vm_bo_base *entry;
989 bool flush_tlb_needed = false;
990 LIST_HEAD(relocated);
991 int r, idx;
992
993 amdgpu_vm_assert_locked(vm);
994
995 spin_lock(&vm->status_lock);
996 list_splice_init(&vm->relocated, &relocated);
997 spin_unlock(&vm->status_lock);
998
999 if (list_empty(&relocated))
1000 return 0;
1001
1002 if (!drm_dev_enter(adev_to_drm(adev), &idx))
1003 return -ENODEV;
1004
1005 memset(¶ms, 0, sizeof(params));
1006 params.adev = adev;
1007 params.vm = vm;
1008 params.immediate = immediate;
1009
1010 r = vm->update_funcs->prepare(¶ms, NULL,
1011 AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
1012 if (r)
1013 goto error;
1014
1015 list_for_each_entry(entry, &relocated, vm_status) {
1016 /* vm_flush_needed after updating moved PDEs */
1017 flush_tlb_needed |= entry->moved;
1018
1019 r = amdgpu_vm_pde_update(¶ms, entry);
1020 if (r)
1021 goto error;
1022 }
1023
1024 r = vm->update_funcs->commit(¶ms, &vm->last_update);
1025 if (r)
1026 goto error;
1027
1028 if (flush_tlb_needed)
1029 atomic64_inc(&vm->tlb_seq);
1030
1031 while (!list_empty(&relocated)) {
1032 entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
1033 vm_status);
1034 amdgpu_vm_bo_idle(entry);
1035 }
1036
1037 error:
1038 drm_dev_exit(idx);
1039 return r;
1040 }
1041
1042 /**
1043 * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
1044 * @fence: unused
1045 * @cb: the callback structure
1046 *
1047 * Increments the tlb sequence to make sure that future CS execute a VM flush.
1048 */
amdgpu_vm_tlb_seq_cb(struct dma_fence * fence,struct dma_fence_cb * cb)1049 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
1050 struct dma_fence_cb *cb)
1051 {
1052 struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1053
1054 tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
1055 atomic64_inc(&tlb_cb->vm->tlb_seq);
1056 kfree(tlb_cb);
1057 }
1058
1059 /**
1060 * amdgpu_vm_tlb_flush - prepare TLB flush
1061 *
1062 * @params: parameters for update
1063 * @fence: input fence to sync TLB flush with
1064 * @tlb_cb: the callback structure
1065 *
1066 * Increments the tlb sequence to make sure that future CS execute a VM flush.
1067 */
1068 static void
amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params * params,struct dma_fence ** fence,struct amdgpu_vm_tlb_seq_struct * tlb_cb)1069 amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
1070 struct dma_fence **fence,
1071 struct amdgpu_vm_tlb_seq_struct *tlb_cb)
1072 {
1073 struct amdgpu_vm *vm = params->vm;
1074
1075 tlb_cb->vm = vm;
1076 if (!fence || !*fence) {
1077 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1078 return;
1079 }
1080
1081 if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
1082 amdgpu_vm_tlb_seq_cb)) {
1083 dma_fence_put(vm->last_tlb_flush);
1084 vm->last_tlb_flush = dma_fence_get(*fence);
1085 } else {
1086 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1087 }
1088
1089 /* Prepare a TLB flush fence to be attached to PTs */
1090 /* The check for need_tlb_fence should be dropped once we
1091 * sort out the issues with KIQ/MES TLB invalidation timeouts.
1092 */
1093 if (!params->unlocked && vm->need_tlb_fence) {
1094 amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
1095
1096 /* Makes sure no PD/PT is freed before the flush */
1097 dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
1098 DMA_RESV_USAGE_BOOKKEEP);
1099 }
1100 }
1101
1102 /**
1103 * amdgpu_vm_update_range - update a range in the vm page table
1104 *
1105 * @adev: amdgpu_device pointer to use for commands
1106 * @vm: the VM to update the range
1107 * @immediate: immediate submission in a page fault
1108 * @unlocked: unlocked invalidation during MM callback
1109 * @flush_tlb: trigger tlb invalidation after update completed
1110 * @allow_override: change MTYPE for local NUMA nodes
1111 * @sync: fences we need to sync to
1112 * @start: start of mapped range
1113 * @last: last mapped entry
1114 * @flags: flags for the entries
1115 * @offset: offset into nodes and pages_addr
1116 * @vram_base: base for vram mappings
1117 * @res: ttm_resource to map
1118 * @pages_addr: DMA addresses to use for mapping
1119 * @fence: optional resulting fence
1120 *
1121 * Fill in the page table entries between @start and @last.
1122 *
1123 * Returns:
1124 * 0 for success, negative erro code for failure.
1125 */
amdgpu_vm_update_range(struct amdgpu_device * adev,struct amdgpu_vm * vm,bool immediate,bool unlocked,bool flush_tlb,bool allow_override,struct amdgpu_sync * sync,uint64_t start,uint64_t last,uint64_t flags,uint64_t offset,uint64_t vram_base,struct ttm_resource * res,dma_addr_t * pages_addr,struct dma_fence ** fence)1126 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1127 bool immediate, bool unlocked, bool flush_tlb,
1128 bool allow_override, struct amdgpu_sync *sync,
1129 uint64_t start, uint64_t last, uint64_t flags,
1130 uint64_t offset, uint64_t vram_base,
1131 struct ttm_resource *res, dma_addr_t *pages_addr,
1132 struct dma_fence **fence)
1133 {
1134 struct amdgpu_vm_tlb_seq_struct *tlb_cb;
1135 struct amdgpu_vm_update_params params;
1136 struct amdgpu_res_cursor cursor;
1137 int r, idx;
1138
1139 if (!drm_dev_enter(adev_to_drm(adev), &idx))
1140 return -ENODEV;
1141
1142 tlb_cb = kmalloc_obj(*tlb_cb);
1143 if (!tlb_cb) {
1144 drm_dev_exit(idx);
1145 return -ENOMEM;
1146 }
1147
1148 /* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
1149 * heavy-weight flush TLB unconditionally.
1150 */
1151 flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
1152 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
1153
1154 /*
1155 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
1156 */
1157 flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
1158
1159 memset(¶ms, 0, sizeof(params));
1160 params.adev = adev;
1161 params.vm = vm;
1162 params.immediate = immediate;
1163 params.pages_addr = pages_addr;
1164 params.unlocked = unlocked;
1165 params.needs_flush = flush_tlb;
1166 params.allow_override = allow_override;
1167 INIT_LIST_HEAD(¶ms.tlb_flush_waitlist);
1168
1169 amdgpu_vm_eviction_lock(vm);
1170 if (vm->evicting) {
1171 r = -EBUSY;
1172 goto error_free;
1173 }
1174
1175 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1176 struct dma_fence *tmp = dma_fence_get_stub();
1177
1178 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1179 swap(vm->last_unlocked, tmp);
1180 dma_fence_put(tmp);
1181 }
1182
1183 r = vm->update_funcs->prepare(¶ms, sync,
1184 AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
1185 if (r)
1186 goto error_free;
1187
1188 amdgpu_res_first(pages_addr ? NULL : res, offset,
1189 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1190 while (cursor.remaining) {
1191 uint64_t tmp, num_entries, addr;
1192
1193 num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1194 if (pages_addr) {
1195 bool contiguous = true;
1196
1197 if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1198 uint64_t pfn = cursor.start >> PAGE_SHIFT;
1199 uint64_t count;
1200
1201 contiguous = pages_addr[pfn + 1] ==
1202 pages_addr[pfn] + PAGE_SIZE;
1203
1204 tmp = num_entries /
1205 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1206 for (count = 2; count < tmp; ++count) {
1207 uint64_t idx = pfn + count;
1208
1209 if (contiguous != (pages_addr[idx] ==
1210 pages_addr[idx - 1] + PAGE_SIZE))
1211 break;
1212 }
1213 if (!contiguous)
1214 count--;
1215 num_entries = count *
1216 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1217 }
1218
1219 if (!contiguous) {
1220 addr = cursor.start;
1221 params.pages_addr = pages_addr;
1222 } else {
1223 addr = pages_addr[cursor.start >> PAGE_SHIFT];
1224 params.pages_addr = NULL;
1225 }
1226
1227 } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
1228 addr = vram_base + cursor.start;
1229 } else {
1230 addr = 0;
1231 }
1232
1233 tmp = start + num_entries;
1234 r = amdgpu_vm_ptes_update(¶ms, start, tmp, addr, flags);
1235 if (r)
1236 goto error_free;
1237
1238 amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1239 start = tmp;
1240 }
1241
1242 r = vm->update_funcs->commit(¶ms, fence);
1243 if (r)
1244 goto error_free;
1245
1246 if (params.needs_flush) {
1247 amdgpu_vm_tlb_flush(¶ms, fence, tlb_cb);
1248 tlb_cb = NULL;
1249 }
1250
1251 amdgpu_vm_pt_free_list(adev, ¶ms);
1252
1253 error_free:
1254 kfree(tlb_cb);
1255 amdgpu_vm_eviction_unlock(vm);
1256 drm_dev_exit(idx);
1257 return r;
1258 }
1259
amdgpu_vm_get_memory(struct amdgpu_vm * vm,struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])1260 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1261 struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
1262 {
1263 spin_lock(&vm->status_lock);
1264 memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
1265 spin_unlock(&vm->status_lock);
1266 }
1267
1268 /**
1269 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1270 *
1271 * @adev: amdgpu_device pointer
1272 * @bo_va: requested BO and VM object
1273 * @clear: if true clear the entries
1274 *
1275 * Fill in the page table entries for @bo_va.
1276 *
1277 * Returns:
1278 * 0 for success, -EINVAL for failure.
1279 */
amdgpu_vm_bo_update(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,bool clear)1280 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1281 bool clear)
1282 {
1283 struct amdgpu_bo *bo = bo_va->base.bo;
1284 struct amdgpu_vm *vm = bo_va->base.vm;
1285 struct amdgpu_bo_va_mapping *mapping;
1286 struct dma_fence **last_update;
1287 dma_addr_t *pages_addr = NULL;
1288 struct ttm_resource *mem;
1289 struct amdgpu_sync sync;
1290 bool flush_tlb = clear;
1291 uint64_t vram_base;
1292 uint64_t flags;
1293 bool uncached;
1294 int r;
1295
1296 amdgpu_sync_create(&sync);
1297 if (clear) {
1298 mem = NULL;
1299
1300 /* Implicitly sync to command submissions in the same VM before
1301 * unmapping.
1302 */
1303 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1304 AMDGPU_SYNC_EQ_OWNER, vm);
1305 if (r)
1306 goto error_free;
1307 if (bo) {
1308 r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
1309 if (r)
1310 goto error_free;
1311 }
1312 } else if (!bo) {
1313 mem = NULL;
1314
1315 /* PRT map operations don't need to sync to anything. */
1316
1317 } else {
1318 struct drm_gem_object *obj = &bo->tbo.base;
1319
1320 if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
1321 struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1322 struct drm_gem_object *gobj = dma_buf->priv;
1323 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1324
1325 if (abo->tbo.resource &&
1326 abo->tbo.resource->mem_type == TTM_PL_VRAM)
1327 bo = gem_to_amdgpu_bo(gobj);
1328 }
1329 mem = bo->tbo.resource;
1330 if (mem && (mem->mem_type == TTM_PL_TT ||
1331 mem->mem_type == AMDGPU_PL_PREEMPT))
1332 pages_addr = bo->tbo.ttm->dma_address;
1333
1334 /* Implicitly sync to moving fences before mapping anything */
1335 r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1336 AMDGPU_SYNC_EXPLICIT, vm);
1337 if (r)
1338 goto error_free;
1339 }
1340
1341 if (bo) {
1342 struct amdgpu_device *bo_adev;
1343
1344 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1345
1346 if (amdgpu_bo_encrypted(bo))
1347 flags |= AMDGPU_PTE_TMZ;
1348
1349 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1350 vram_base = bo_adev->vm_manager.vram_base_offset;
1351 uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1352 } else {
1353 flags = 0x0;
1354 vram_base = 0;
1355 uncached = false;
1356 }
1357
1358 if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
1359 last_update = &vm->last_update;
1360 else
1361 last_update = &bo_va->last_pt_update;
1362
1363 if (!clear && bo_va->base.moved) {
1364 flush_tlb = true;
1365 list_splice_init(&bo_va->valids, &bo_va->invalids);
1366
1367 } else if (bo_va->cleared != clear) {
1368 list_splice_init(&bo_va->valids, &bo_va->invalids);
1369 }
1370
1371 list_for_each_entry(mapping, &bo_va->invalids, list) {
1372 uint64_t update_flags = flags;
1373
1374 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1375 * but in case of something, we filter the flags in first place
1376 */
1377 if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
1378 update_flags &= ~AMDGPU_PTE_READABLE;
1379 if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
1380 update_flags &= ~AMDGPU_PTE_WRITEABLE;
1381
1382 /* Apply ASIC specific mapping flags */
1383 amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
1384 &update_flags);
1385
1386 trace_amdgpu_vm_bo_update(mapping);
1387
1388 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1389 !uncached, &sync, mapping->start,
1390 mapping->last, update_flags,
1391 mapping->offset, vram_base, mem,
1392 pages_addr, last_update);
1393 if (r)
1394 goto error_free;
1395 }
1396
1397 /* If the BO is not in its preferred location add it back to
1398 * the evicted list so that it gets validated again on the
1399 * next command submission.
1400 */
1401 if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
1402 if (bo->tbo.resource &&
1403 !(bo->preferred_domains &
1404 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1405 amdgpu_vm_bo_evicted(&bo_va->base);
1406 else
1407 amdgpu_vm_bo_idle(&bo_va->base);
1408 } else {
1409 amdgpu_vm_bo_done(&bo_va->base);
1410 }
1411
1412 list_splice_init(&bo_va->invalids, &bo_va->valids);
1413 bo_va->cleared = clear;
1414 bo_va->base.moved = false;
1415
1416 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1417 list_for_each_entry(mapping, &bo_va->valids, list)
1418 trace_amdgpu_vm_bo_mapping(mapping);
1419 }
1420
1421 error_free:
1422 amdgpu_sync_free(&sync);
1423 return r;
1424 }
1425
1426 /**
1427 * amdgpu_vm_update_prt_state - update the global PRT state
1428 *
1429 * @adev: amdgpu_device pointer
1430 */
amdgpu_vm_update_prt_state(struct amdgpu_device * adev)1431 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1432 {
1433 unsigned long flags;
1434 bool enable;
1435
1436 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1437 enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1438 adev->gmc.gmc_funcs->set_prt(adev, enable);
1439 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1440 }
1441
1442 /**
1443 * amdgpu_vm_prt_get - add a PRT user
1444 *
1445 * @adev: amdgpu_device pointer
1446 */
amdgpu_vm_prt_get(struct amdgpu_device * adev)1447 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1448 {
1449 if (!adev->gmc.gmc_funcs->set_prt)
1450 return;
1451
1452 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1453 amdgpu_vm_update_prt_state(adev);
1454 }
1455
1456 /**
1457 * amdgpu_vm_prt_put - drop a PRT user
1458 *
1459 * @adev: amdgpu_device pointer
1460 */
amdgpu_vm_prt_put(struct amdgpu_device * adev)1461 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1462 {
1463 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1464 amdgpu_vm_update_prt_state(adev);
1465 }
1466
1467 /**
1468 * amdgpu_vm_prt_cb - callback for updating the PRT status
1469 *
1470 * @fence: fence for the callback
1471 * @_cb: the callback function
1472 */
amdgpu_vm_prt_cb(struct dma_fence * fence,struct dma_fence_cb * _cb)1473 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1474 {
1475 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1476
1477 amdgpu_vm_prt_put(cb->adev);
1478 kfree(cb);
1479 }
1480
1481 /**
1482 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1483 *
1484 * @adev: amdgpu_device pointer
1485 * @fence: fence for the callback
1486 */
amdgpu_vm_add_prt_cb(struct amdgpu_device * adev,struct dma_fence * fence)1487 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1488 struct dma_fence *fence)
1489 {
1490 struct amdgpu_prt_cb *cb;
1491
1492 if (!adev->gmc.gmc_funcs->set_prt)
1493 return;
1494
1495 cb = kmalloc_obj(struct amdgpu_prt_cb);
1496 if (!cb) {
1497 /* Last resort when we are OOM */
1498 if (fence)
1499 dma_fence_wait(fence, false);
1500
1501 amdgpu_vm_prt_put(adev);
1502 } else {
1503 cb->adev = adev;
1504 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1505 amdgpu_vm_prt_cb))
1506 amdgpu_vm_prt_cb(fence, &cb->cb);
1507 }
1508 }
1509
1510 /**
1511 * amdgpu_vm_free_mapping - free a mapping
1512 *
1513 * @adev: amdgpu_device pointer
1514 * @vm: requested vm
1515 * @mapping: mapping to be freed
1516 * @fence: fence of the unmap operation
1517 *
1518 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1519 */
amdgpu_vm_free_mapping(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va_mapping * mapping,struct dma_fence * fence)1520 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1521 struct amdgpu_vm *vm,
1522 struct amdgpu_bo_va_mapping *mapping,
1523 struct dma_fence *fence)
1524 {
1525 if (mapping->flags & AMDGPU_VM_PAGE_PRT)
1526 amdgpu_vm_add_prt_cb(adev, fence);
1527 kfree(mapping);
1528 }
1529
1530 /**
1531 * amdgpu_vm_prt_fini - finish all prt mappings
1532 *
1533 * @adev: amdgpu_device pointer
1534 * @vm: requested vm
1535 *
1536 * Register a cleanup callback to disable PRT support after VM dies.
1537 */
amdgpu_vm_prt_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)1538 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1539 {
1540 struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1541 struct dma_resv_iter cursor;
1542 struct dma_fence *fence;
1543
1544 dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1545 /* Add a callback for each fence in the reservation object */
1546 amdgpu_vm_prt_get(adev);
1547 amdgpu_vm_add_prt_cb(adev, fence);
1548 }
1549 }
1550
1551 /**
1552 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1553 *
1554 * @adev: amdgpu_device pointer
1555 * @vm: requested vm
1556 * @fence: optional resulting fence (unchanged if no work needed to be done
1557 * or if an error occurred)
1558 *
1559 * Make sure all freed BOs are cleared in the PT.
1560 * PTs have to be reserved and mutex must be locked!
1561 *
1562 * Returns:
1563 * 0 for success.
1564 *
1565 */
amdgpu_vm_clear_freed(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct dma_fence ** fence)1566 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1567 struct amdgpu_vm *vm,
1568 struct dma_fence **fence)
1569 {
1570 struct amdgpu_bo_va_mapping *mapping;
1571 struct dma_fence *f = NULL;
1572 struct amdgpu_sync sync;
1573 int r;
1574
1575
1576 /*
1577 * Implicitly sync to command submissions in the same VM before
1578 * unmapping.
1579 */
1580 amdgpu_sync_create(&sync);
1581 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1582 AMDGPU_SYNC_EQ_OWNER, vm);
1583 if (r)
1584 goto error_free;
1585
1586 while (!list_empty(&vm->freed)) {
1587 mapping = list_first_entry(&vm->freed,
1588 struct amdgpu_bo_va_mapping, list);
1589 list_del(&mapping->list);
1590
1591 r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1592 &sync, mapping->start, mapping->last,
1593 0, 0, 0, NULL, NULL, &f);
1594 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1595 if (r) {
1596 dma_fence_put(f);
1597 goto error_free;
1598 }
1599 }
1600
1601 if (fence && f) {
1602 dma_fence_put(*fence);
1603 *fence = f;
1604 } else {
1605 dma_fence_put(f);
1606 }
1607
1608 error_free:
1609 amdgpu_sync_free(&sync);
1610 return r;
1611
1612 }
1613
1614 /**
1615 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1616 *
1617 * @adev: amdgpu_device pointer
1618 * @vm: requested vm
1619 * @ticket: optional reservation ticket used to reserve the VM
1620 *
1621 * Make sure all BOs which are moved are updated in the PTs.
1622 *
1623 * Returns:
1624 * 0 for success.
1625 *
1626 * PTs have to be reserved!
1627 */
amdgpu_vm_handle_moved(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct ww_acquire_ctx * ticket)1628 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1629 struct amdgpu_vm *vm,
1630 struct ww_acquire_ctx *ticket)
1631 {
1632 struct amdgpu_bo_va *bo_va;
1633 struct dma_resv *resv;
1634 bool clear, unlock;
1635 int r;
1636
1637 spin_lock(&vm->status_lock);
1638 while (!list_empty(&vm->moved)) {
1639 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1640 base.vm_status);
1641 spin_unlock(&vm->status_lock);
1642
1643 /* Per VM BOs never need to bo cleared in the page tables */
1644 r = amdgpu_vm_bo_update(adev, bo_va, false);
1645 if (r)
1646 return r;
1647 spin_lock(&vm->status_lock);
1648 }
1649
1650 while (!list_empty(&vm->invalidated)) {
1651 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1652 base.vm_status);
1653 resv = bo_va->base.bo->tbo.base.resv;
1654 spin_unlock(&vm->status_lock);
1655
1656 /* Try to reserve the BO to avoid clearing its ptes */
1657 if (!adev->debug_vm && dma_resv_trylock(resv)) {
1658 clear = false;
1659 unlock = true;
1660 /* The caller is already holding the reservation lock */
1661 } else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
1662 clear = false;
1663 unlock = false;
1664 /* Somebody else is using the BO right now */
1665 } else {
1666 clear = true;
1667 unlock = false;
1668 }
1669
1670 r = amdgpu_vm_bo_update(adev, bo_va, clear);
1671
1672 if (unlock)
1673 dma_resv_unlock(resv);
1674 if (r)
1675 return r;
1676
1677 /* Remember evicted DMABuf imports in compute VMs for later
1678 * validation
1679 */
1680 if (vm->is_compute_context &&
1681 drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
1682 (!bo_va->base.bo->tbo.resource ||
1683 bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1684 amdgpu_vm_bo_evicted_user(&bo_va->base);
1685
1686 spin_lock(&vm->status_lock);
1687 }
1688 spin_unlock(&vm->status_lock);
1689
1690 return 0;
1691 }
1692
1693 /**
1694 * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1695 *
1696 * @adev: amdgpu_device pointer
1697 * @vm: requested vm
1698 * @flush_type: flush type
1699 * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1700 *
1701 * Flush TLB if needed for a compute VM.
1702 *
1703 * Returns:
1704 * 0 for success.
1705 */
amdgpu_vm_flush_compute_tlb(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint32_t flush_type,uint32_t xcc_mask)1706 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
1707 struct amdgpu_vm *vm,
1708 uint32_t flush_type,
1709 uint32_t xcc_mask)
1710 {
1711 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1712 bool all_hub = false;
1713 int xcc = 0, r = 0;
1714
1715 WARN_ON_ONCE(!vm->is_compute_context);
1716
1717 /*
1718 * It can be that we race and lose here, but that is extremely unlikely
1719 * and the worst thing which could happen is that we flush the changes
1720 * into the TLB once more which is harmless.
1721 */
1722 if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1723 return 0;
1724
1725 if (adev->family == AMDGPU_FAMILY_AI ||
1726 adev->family == AMDGPU_FAMILY_RV)
1727 all_hub = true;
1728
1729 for_each_inst(xcc, xcc_mask) {
1730 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1731 all_hub, xcc);
1732 if (r)
1733 break;
1734 }
1735 return r;
1736 }
1737
1738 /**
1739 * amdgpu_vm_bo_add - add a bo to a specific vm
1740 *
1741 * @adev: amdgpu_device pointer
1742 * @vm: requested vm
1743 * @bo: amdgpu buffer object
1744 *
1745 * Add @bo into the requested vm.
1746 * Add @bo to the list of bos associated with the vm
1747 *
1748 * Returns:
1749 * Newly added bo_va or NULL for failure
1750 *
1751 * Object has to be reserved!
1752 */
amdgpu_vm_bo_add(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo)1753 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1754 struct amdgpu_vm *vm,
1755 struct amdgpu_bo *bo)
1756 {
1757 struct amdgpu_bo_va *bo_va;
1758
1759 amdgpu_vm_assert_locked(vm);
1760
1761 bo_va = kzalloc_obj(struct amdgpu_bo_va);
1762 if (bo_va == NULL) {
1763 return NULL;
1764 }
1765 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1766
1767 bo_va->ref_count = 1;
1768 bo_va->last_pt_update = dma_fence_get_stub();
1769 INIT_LIST_HEAD(&bo_va->valids);
1770 INIT_LIST_HEAD(&bo_va->invalids);
1771
1772 if (!bo)
1773 return bo_va;
1774
1775 dma_resv_assert_held(bo->tbo.base.resv);
1776 if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1777 bo_va->is_xgmi = true;
1778 /* Power up XGMI if it can be potentially used */
1779 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1780 }
1781
1782 return bo_va;
1783 }
1784
1785
1786 /**
1787 * amdgpu_vm_bo_insert_map - insert a new mapping
1788 *
1789 * @adev: amdgpu_device pointer
1790 * @bo_va: bo_va to store the address
1791 * @mapping: the mapping to insert
1792 *
1793 * Insert a new mapping into all structures.
1794 */
amdgpu_vm_bo_insert_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,struct amdgpu_bo_va_mapping * mapping)1795 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1796 struct amdgpu_bo_va *bo_va,
1797 struct amdgpu_bo_va_mapping *mapping)
1798 {
1799 struct amdgpu_vm *vm = bo_va->base.vm;
1800 struct amdgpu_bo *bo = bo_va->base.bo;
1801
1802 mapping->bo_va = bo_va;
1803 list_add(&mapping->list, &bo_va->invalids);
1804 amdgpu_vm_it_insert(mapping, &vm->va);
1805
1806 if (mapping->flags & AMDGPU_VM_PAGE_PRT)
1807 amdgpu_vm_prt_get(adev);
1808
1809 if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1810 amdgpu_vm_bo_moved(&bo_va->base);
1811
1812 trace_amdgpu_vm_bo_map(bo_va, mapping);
1813 }
1814
1815 /* Validate operation parameters to prevent potential abuse */
amdgpu_vm_verify_parameters(struct amdgpu_device * adev,struct amdgpu_bo * bo,uint64_t saddr,uint64_t offset,uint64_t size)1816 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1817 struct amdgpu_bo *bo,
1818 uint64_t saddr,
1819 uint64_t offset,
1820 uint64_t size)
1821 {
1822 uint64_t tmp, lpfn;
1823
1824 if (saddr & AMDGPU_GPU_PAGE_MASK
1825 || offset & AMDGPU_GPU_PAGE_MASK
1826 || size & AMDGPU_GPU_PAGE_MASK)
1827 return -EINVAL;
1828
1829 if (check_add_overflow(saddr, size, &tmp)
1830 || check_add_overflow(offset, size, &tmp)
1831 || size == 0 /* which also leads to end < begin */)
1832 return -EINVAL;
1833
1834 /* make sure object fit at this offset */
1835 if (bo && offset + size > amdgpu_bo_size(bo))
1836 return -EINVAL;
1837
1838 /* Ensure last pfn not exceed max_pfn */
1839 lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1840 if (lpfn >= adev->vm_manager.max_pfn)
1841 return -EINVAL;
1842
1843 return 0;
1844 }
1845
1846 /**
1847 * amdgpu_vm_bo_map - map bo inside a vm
1848 *
1849 * @adev: amdgpu_device pointer
1850 * @bo_va: bo_va to store the address
1851 * @saddr: where to map the BO
1852 * @offset: requested offset in the BO
1853 * @size: BO size in bytes
1854 * @flags: attributes of pages (read/write/valid/etc.)
1855 *
1856 * Add a mapping of the BO at the specefied addr into the VM.
1857 *
1858 * Returns:
1859 * 0 for success, error for failure.
1860 *
1861 * Object has to be reserved and unreserved outside!
1862 */
amdgpu_vm_bo_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint32_t flags)1863 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1864 struct amdgpu_bo_va *bo_va,
1865 uint64_t saddr, uint64_t offset,
1866 uint64_t size, uint32_t flags)
1867 {
1868 struct amdgpu_bo_va_mapping *mapping, *tmp;
1869 struct amdgpu_bo *bo = bo_va->base.bo;
1870 struct amdgpu_vm *vm = bo_va->base.vm;
1871 uint64_t eaddr;
1872 int r;
1873
1874 r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1875 if (r)
1876 return r;
1877
1878 saddr /= AMDGPU_GPU_PAGE_SIZE;
1879 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1880
1881 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1882 if (tmp) {
1883 /* bo and tmp overlap, invalid addr */
1884 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1885 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1886 tmp->start, tmp->last + 1);
1887 return -EINVAL;
1888 }
1889
1890 mapping = kmalloc_obj(*mapping);
1891 if (!mapping)
1892 return -ENOMEM;
1893
1894 mapping->start = saddr;
1895 mapping->last = eaddr;
1896 mapping->offset = offset;
1897 mapping->flags = flags;
1898
1899 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1900
1901 return 0;
1902 }
1903
1904 /**
1905 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1906 *
1907 * @adev: amdgpu_device pointer
1908 * @bo_va: bo_va to store the address
1909 * @saddr: where to map the BO
1910 * @offset: requested offset in the BO
1911 * @size: BO size in bytes
1912 * @flags: attributes of pages (read/write/valid/etc.)
1913 *
1914 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1915 * mappings as we do so.
1916 *
1917 * Returns:
1918 * 0 for success, error for failure.
1919 *
1920 * Object has to be reserved and unreserved outside!
1921 */
amdgpu_vm_bo_replace_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint32_t flags)1922 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1923 struct amdgpu_bo_va *bo_va,
1924 uint64_t saddr, uint64_t offset,
1925 uint64_t size, uint32_t flags)
1926 {
1927 struct amdgpu_bo_va_mapping *mapping;
1928 struct amdgpu_bo *bo = bo_va->base.bo;
1929 uint64_t eaddr;
1930 int r;
1931
1932 r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1933 if (r)
1934 return r;
1935
1936 /* Allocate all the needed memory */
1937 mapping = kmalloc_obj(*mapping);
1938 if (!mapping)
1939 return -ENOMEM;
1940
1941 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1942 if (r) {
1943 kfree(mapping);
1944 return r;
1945 }
1946
1947 saddr /= AMDGPU_GPU_PAGE_SIZE;
1948 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1949
1950 mapping->start = saddr;
1951 mapping->last = eaddr;
1952 mapping->offset = offset;
1953 mapping->flags = flags;
1954
1955 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1956
1957 return 0;
1958 }
1959
1960 /**
1961 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1962 *
1963 * @adev: amdgpu_device pointer
1964 * @bo_va: bo_va to remove the address from
1965 * @saddr: where to the BO is mapped
1966 *
1967 * Remove a mapping of the BO at the specefied addr from the VM.
1968 *
1969 * Returns:
1970 * 0 for success, error for failure.
1971 *
1972 * Object has to be reserved and unreserved outside!
1973 */
amdgpu_vm_bo_unmap(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr)1974 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1975 struct amdgpu_bo_va *bo_va,
1976 uint64_t saddr)
1977 {
1978 struct amdgpu_bo_va_mapping *mapping;
1979 struct amdgpu_vm *vm = bo_va->base.vm;
1980 bool valid = true;
1981
1982 saddr /= AMDGPU_GPU_PAGE_SIZE;
1983
1984 list_for_each_entry(mapping, &bo_va->valids, list) {
1985 if (mapping->start == saddr)
1986 break;
1987 }
1988
1989 if (&mapping->list == &bo_va->valids) {
1990 valid = false;
1991
1992 list_for_each_entry(mapping, &bo_va->invalids, list) {
1993 if (mapping->start == saddr)
1994 break;
1995 }
1996
1997 if (&mapping->list == &bo_va->invalids)
1998 return -ENOENT;
1999 }
2000
2001 /* It's unlikely to happen that the mapping userq hasn't been idled
2002 * during user requests GEM unmap IOCTL except for forcing the unmap
2003 * from user space.
2004 */
2005 if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0))
2006 amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
2007
2008 list_del(&mapping->list);
2009 amdgpu_vm_it_remove(mapping, &vm->va);
2010 mapping->bo_va = NULL;
2011 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2012
2013 if (valid)
2014 list_add(&mapping->list, &vm->freed);
2015 else
2016 amdgpu_vm_free_mapping(adev, vm, mapping,
2017 bo_va->last_pt_update);
2018
2019 return 0;
2020 }
2021
2022 /**
2023 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2024 *
2025 * @adev: amdgpu_device pointer
2026 * @vm: VM structure to use
2027 * @saddr: start of the range
2028 * @size: size of the range
2029 *
2030 * Remove all mappings in a range, split them as appropriate.
2031 *
2032 * Returns:
2033 * 0 for success, error for failure.
2034 */
amdgpu_vm_bo_clear_mappings(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t saddr,uint64_t size)2035 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2036 struct amdgpu_vm *vm,
2037 uint64_t saddr, uint64_t size)
2038 {
2039 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2040 LIST_HEAD(removed);
2041 uint64_t eaddr;
2042 int r;
2043
2044 r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
2045 if (r)
2046 return r;
2047
2048 saddr /= AMDGPU_GPU_PAGE_SIZE;
2049 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
2050
2051 /* Allocate all the needed memory */
2052 before = kzalloc_obj(*before);
2053 if (!before)
2054 return -ENOMEM;
2055 INIT_LIST_HEAD(&before->list);
2056
2057 after = kzalloc_obj(*after);
2058 if (!after) {
2059 kfree(before);
2060 return -ENOMEM;
2061 }
2062 INIT_LIST_HEAD(&after->list);
2063
2064 /* Now gather all removed mappings */
2065 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2066 while (tmp) {
2067 /* Remember mapping split at the start */
2068 if (tmp->start < saddr) {
2069 before->start = tmp->start;
2070 before->last = saddr - 1;
2071 before->offset = tmp->offset;
2072 before->flags = tmp->flags;
2073 before->bo_va = tmp->bo_va;
2074 list_add(&before->list, &tmp->bo_va->invalids);
2075 }
2076
2077 /* Remember mapping split at the end */
2078 if (tmp->last > eaddr) {
2079 after->start = eaddr + 1;
2080 after->last = tmp->last;
2081 after->offset = tmp->offset;
2082 after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2083 after->flags = tmp->flags;
2084 after->bo_va = tmp->bo_va;
2085 list_add(&after->list, &tmp->bo_va->invalids);
2086 }
2087
2088 list_del(&tmp->list);
2089 list_add(&tmp->list, &removed);
2090
2091 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2092 }
2093
2094 /* And free them up */
2095 list_for_each_entry_safe(tmp, next, &removed, list) {
2096 amdgpu_vm_it_remove(tmp, &vm->va);
2097 list_del(&tmp->list);
2098
2099 if (tmp->start < saddr)
2100 tmp->start = saddr;
2101 if (tmp->last > eaddr)
2102 tmp->last = eaddr;
2103
2104 tmp->bo_va = NULL;
2105 list_add(&tmp->list, &vm->freed);
2106 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2107 }
2108
2109 /* Insert partial mapping before the range */
2110 if (!list_empty(&before->list)) {
2111 struct amdgpu_bo *bo = before->bo_va->base.bo;
2112
2113 amdgpu_vm_it_insert(before, &vm->va);
2114 if (before->flags & AMDGPU_VM_PAGE_PRT)
2115 amdgpu_vm_prt_get(adev);
2116
2117 if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2118 !before->bo_va->base.moved)
2119 amdgpu_vm_bo_moved(&before->bo_va->base);
2120 } else {
2121 kfree(before);
2122 }
2123
2124 /* Insert partial mapping after the range */
2125 if (!list_empty(&after->list)) {
2126 struct amdgpu_bo *bo = after->bo_va->base.bo;
2127
2128 amdgpu_vm_it_insert(after, &vm->va);
2129 if (after->flags & AMDGPU_VM_PAGE_PRT)
2130 amdgpu_vm_prt_get(adev);
2131
2132 if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
2133 !after->bo_va->base.moved)
2134 amdgpu_vm_bo_moved(&after->bo_va->base);
2135 } else {
2136 kfree(after);
2137 }
2138
2139 return 0;
2140 }
2141
2142 /**
2143 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2144 *
2145 * @vm: the requested VM
2146 * @addr: the address
2147 *
2148 * Find a mapping by it's address.
2149 *
2150 * Returns:
2151 * The amdgpu_bo_va_mapping matching for addr or NULL
2152 *
2153 */
amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm * vm,uint64_t addr)2154 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2155 uint64_t addr)
2156 {
2157 return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2158 }
2159
2160 /**
2161 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2162 *
2163 * @vm: the requested vm
2164 * @ticket: CS ticket
2165 *
2166 * Trace all mappings of BOs reserved during a command submission.
2167 */
amdgpu_vm_bo_trace_cs(struct amdgpu_vm * vm,struct ww_acquire_ctx * ticket)2168 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2169 {
2170 struct amdgpu_bo_va_mapping *mapping;
2171
2172 if (!trace_amdgpu_vm_bo_cs_enabled())
2173 return;
2174
2175 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2176 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2177 if (mapping->bo_va && mapping->bo_va->base.bo) {
2178 struct amdgpu_bo *bo;
2179
2180 bo = mapping->bo_va->base.bo;
2181 if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2182 ticket)
2183 continue;
2184 }
2185
2186 trace_amdgpu_vm_bo_cs(mapping);
2187 }
2188 }
2189
2190 /**
2191 * amdgpu_vm_bo_del - remove a bo from a specific vm
2192 *
2193 * @adev: amdgpu_device pointer
2194 * @bo_va: requested bo_va
2195 *
2196 * Remove @bo_va->bo from the requested vm.
2197 *
2198 * Object have to be reserved!
2199 */
amdgpu_vm_bo_del(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va)2200 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
2201 struct amdgpu_bo_va *bo_va)
2202 {
2203 struct amdgpu_bo_va_mapping *mapping, *next;
2204 struct amdgpu_bo *bo = bo_va->base.bo;
2205 struct amdgpu_vm *vm = bo_va->base.vm;
2206 struct amdgpu_vm_bo_base **base;
2207
2208 dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2209
2210 if (bo) {
2211 dma_resv_assert_held(bo->tbo.base.resv);
2212 if (amdgpu_vm_is_bo_always_valid(vm, bo))
2213 ttm_bo_set_bulk_move(&bo->tbo, NULL);
2214
2215 for (base = &bo_va->base.bo->vm_bo; *base;
2216 base = &(*base)->next) {
2217 if (*base != &bo_va->base)
2218 continue;
2219
2220 amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
2221 *base = bo_va->base.next;
2222 break;
2223 }
2224 }
2225
2226 spin_lock(&vm->status_lock);
2227 list_del(&bo_va->base.vm_status);
2228 spin_unlock(&vm->status_lock);
2229
2230 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2231 list_del(&mapping->list);
2232 amdgpu_vm_it_remove(mapping, &vm->va);
2233 mapping->bo_va = NULL;
2234 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2235 list_add(&mapping->list, &vm->freed);
2236 }
2237 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2238 list_del(&mapping->list);
2239 amdgpu_vm_it_remove(mapping, &vm->va);
2240 amdgpu_vm_free_mapping(adev, vm, mapping,
2241 bo_va->last_pt_update);
2242 }
2243
2244 dma_fence_put(bo_va->last_pt_update);
2245
2246 if (bo && bo_va->is_xgmi)
2247 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2248
2249 kfree(bo_va);
2250 }
2251
2252 /**
2253 * amdgpu_vm_evictable - check if we can evict a VM
2254 *
2255 * @bo: A page table of the VM.
2256 *
2257 * Check if it is possible to evict a VM.
2258 */
amdgpu_vm_evictable(struct amdgpu_bo * bo)2259 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2260 {
2261 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2262
2263 /* Page tables of a destroyed VM can go away immediately */
2264 if (!bo_base || !bo_base->vm)
2265 return true;
2266
2267 /* Don't evict VM page tables while they are busy */
2268 if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2269 return false;
2270
2271 /* Try to block ongoing updates */
2272 if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2273 return false;
2274
2275 /* Don't evict VM page tables while they are updated */
2276 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2277 amdgpu_vm_eviction_unlock(bo_base->vm);
2278 return false;
2279 }
2280
2281 bo_base->vm->evicting = true;
2282 amdgpu_vm_eviction_unlock(bo_base->vm);
2283 return true;
2284 }
2285
2286 /**
2287 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2288 *
2289 * @bo: amdgpu buffer object
2290 * @evicted: is the BO evicted
2291 *
2292 * Mark @bo as invalid.
2293 */
amdgpu_vm_bo_invalidate(struct amdgpu_bo * bo,bool evicted)2294 void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
2295 {
2296 struct amdgpu_vm_bo_base *bo_base;
2297
2298 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2299 struct amdgpu_vm *vm = bo_base->vm;
2300
2301 if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
2302 amdgpu_vm_bo_evicted(bo_base);
2303 continue;
2304 }
2305
2306 if (bo_base->moved)
2307 continue;
2308 bo_base->moved = true;
2309
2310 if (bo->tbo.type == ttm_bo_type_kernel)
2311 amdgpu_vm_bo_relocated(bo_base);
2312 else if (amdgpu_vm_is_bo_always_valid(vm, bo))
2313 amdgpu_vm_bo_moved(bo_base);
2314 else
2315 amdgpu_vm_bo_invalidated(bo_base);
2316 }
2317 }
2318
2319 /**
2320 * amdgpu_vm_bo_move - handle BO move
2321 *
2322 * @bo: amdgpu buffer object
2323 * @new_mem: the new placement of the BO move
2324 * @evicted: is the BO evicted
2325 *
2326 * Update the memory stats for the new placement and mark @bo as invalid.
2327 */
amdgpu_vm_bo_move(struct amdgpu_bo * bo,struct ttm_resource * new_mem,bool evicted)2328 void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
2329 bool evicted)
2330 {
2331 struct amdgpu_vm_bo_base *bo_base;
2332
2333 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2334 struct amdgpu_vm *vm = bo_base->vm;
2335
2336 spin_lock(&vm->status_lock);
2337 amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
2338 amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
2339 spin_unlock(&vm->status_lock);
2340 }
2341
2342 amdgpu_vm_bo_invalidate(bo, evicted);
2343 }
2344
2345 /**
2346 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2347 *
2348 * @vm_size: VM size
2349 *
2350 * Returns:
2351 * VM page table as power of two
2352 */
amdgpu_vm_get_block_size(uint64_t vm_size)2353 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2354 {
2355 /* Total bits covered by PD + PTs */
2356 unsigned bits = ilog2(vm_size) + 18;
2357
2358 /* Make sure the PD is 4K in size up to 8GB address space.
2359 Above that split equal between PD and PTs */
2360 if (vm_size <= 8)
2361 return (bits - 9);
2362 else
2363 return ((bits + 3) / 2);
2364 }
2365
2366 /**
2367 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2368 *
2369 * @adev: amdgpu_device pointer
2370 * @min_vm_size: the minimum vm size in GB if it's set auto
2371 * @fragment_size_default: Default PTE fragment size
2372 * @max_level: max VMPT level
2373 * @max_bits: max address space size in bits
2374 *
2375 */
amdgpu_vm_adjust_size(struct amdgpu_device * adev,uint32_t min_vm_size,uint32_t fragment_size_default,unsigned max_level,unsigned max_bits)2376 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2377 uint32_t fragment_size_default, unsigned max_level,
2378 unsigned max_bits)
2379 {
2380 unsigned int max_size = 1 << (max_bits - 30);
2381 unsigned int vm_size;
2382 uint64_t tmp;
2383
2384 /* adjust vm size first */
2385 if (amdgpu_vm_size != -1) {
2386 vm_size = amdgpu_vm_size;
2387 if (vm_size > max_size) {
2388 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2389 amdgpu_vm_size, max_size);
2390 vm_size = max_size;
2391 }
2392 } else {
2393 struct sysinfo si;
2394 unsigned int phys_ram_gb;
2395
2396 /* Optimal VM size depends on the amount of physical
2397 * RAM available. Underlying requirements and
2398 * assumptions:
2399 *
2400 * - Need to map system memory and VRAM from all GPUs
2401 * - VRAM from other GPUs not known here
2402 * - Assume VRAM <= system memory
2403 * - On GFX8 and older, VM space can be segmented for
2404 * different MTYPEs
2405 * - Need to allow room for fragmentation, guard pages etc.
2406 *
2407 * This adds up to a rough guess of system memory x3.
2408 * Round up to power of two to maximize the available
2409 * VM size with the given page table size.
2410 */
2411 si_meminfo(&si);
2412 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2413 (1 << 30) - 1) >> 30;
2414 vm_size = roundup_pow_of_two(
2415 clamp(phys_ram_gb * 3, min_vm_size, max_size));
2416 }
2417
2418 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2419 adev->vm_manager.max_level = max_level;
2420
2421 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2422 if (amdgpu_vm_block_size != -1)
2423 tmp >>= amdgpu_vm_block_size - 9;
2424 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2425 adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2426 switch (adev->vm_manager.num_level) {
2427 case 4:
2428 adev->vm_manager.root_level = AMDGPU_VM_PDB3;
2429 break;
2430 case 3:
2431 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2432 break;
2433 case 2:
2434 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2435 break;
2436 case 1:
2437 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2438 break;
2439 default:
2440 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2441 }
2442 /* block size depends on vm size and hw setup*/
2443 if (amdgpu_vm_block_size != -1)
2444 adev->vm_manager.block_size =
2445 min((unsigned)amdgpu_vm_block_size, max_bits
2446 - AMDGPU_GPU_PAGE_SHIFT
2447 - 9 * adev->vm_manager.num_level);
2448 else if (adev->vm_manager.num_level > 1)
2449 adev->vm_manager.block_size = 9;
2450 else
2451 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2452
2453 if (amdgpu_vm_fragment_size == -1)
2454 adev->vm_manager.fragment_size = fragment_size_default;
2455 else
2456 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2457
2458 dev_info(
2459 adev->dev,
2460 "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2461 vm_size, adev->vm_manager.num_level + 1,
2462 adev->vm_manager.block_size, adev->vm_manager.fragment_size);
2463 }
2464
2465 /**
2466 * amdgpu_vm_wait_idle - wait for the VM to become idle
2467 *
2468 * @vm: VM object to wait for
2469 * @timeout: timeout to wait for VM to become idle
2470 */
amdgpu_vm_wait_idle(struct amdgpu_vm * vm,long timeout)2471 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2472 {
2473 timeout = drm_sched_entity_flush(&vm->immediate, timeout);
2474 if (timeout <= 0)
2475 return timeout;
2476
2477 return drm_sched_entity_flush(&vm->delayed, timeout);
2478 }
2479
amdgpu_vm_destroy_task_info(struct kref * kref)2480 static void amdgpu_vm_destroy_task_info(struct kref *kref)
2481 {
2482 struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
2483
2484 kfree(ti);
2485 }
2486
2487 static inline struct amdgpu_vm *
amdgpu_vm_get_vm_from_pasid(struct amdgpu_device * adev,u32 pasid)2488 amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
2489 {
2490 struct amdgpu_vm *vm;
2491 unsigned long flags;
2492
2493 xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2494 vm = xa_load(&adev->vm_manager.pasids, pasid);
2495 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2496
2497 return vm;
2498 }
2499
2500 /**
2501 * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2502 *
2503 * @task_info: task_info struct under discussion.
2504 *
2505 * frees the vm task_info ptr at the last put
2506 */
amdgpu_vm_put_task_info(struct amdgpu_task_info * task_info)2507 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
2508 {
2509 if (task_info)
2510 kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2511 }
2512
2513 /**
2514 * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2515 *
2516 * @vm: VM to get info from
2517 *
2518 * Returns the reference counted task_info structure, which must be
2519 * referenced down with amdgpu_vm_put_task_info.
2520 */
2521 struct amdgpu_task_info *
amdgpu_vm_get_task_info_vm(struct amdgpu_vm * vm)2522 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2523 {
2524 struct amdgpu_task_info *ti = NULL;
2525
2526 if (vm) {
2527 ti = vm->task_info;
2528 kref_get(&vm->task_info->refcount);
2529 }
2530
2531 return ti;
2532 }
2533
2534 /**
2535 * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2536 *
2537 * @adev: drm device pointer
2538 * @pasid: PASID identifier for VM
2539 *
2540 * Returns the reference counted task_info structure, which must be
2541 * referenced down with amdgpu_vm_put_task_info.
2542 */
2543 struct amdgpu_task_info *
amdgpu_vm_get_task_info_pasid(struct amdgpu_device * adev,u32 pasid)2544 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
2545 {
2546 return amdgpu_vm_get_task_info_vm(
2547 amdgpu_vm_get_vm_from_pasid(adev, pasid));
2548 }
2549
amdgpu_vm_create_task_info(struct amdgpu_vm * vm)2550 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2551 {
2552 vm->task_info = kzalloc_obj(struct amdgpu_task_info);
2553 if (!vm->task_info)
2554 return -ENOMEM;
2555
2556 kref_init(&vm->task_info->refcount);
2557 return 0;
2558 }
2559
2560 /**
2561 * amdgpu_vm_set_task_info - Sets VMs task info.
2562 *
2563 * @vm: vm for which to set the info
2564 */
amdgpu_vm_set_task_info(struct amdgpu_vm * vm)2565 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2566 {
2567 if (!vm->task_info)
2568 return;
2569
2570 if (vm->task_info->task.pid == current->pid)
2571 return;
2572
2573 vm->task_info->task.pid = current->pid;
2574 get_task_comm(vm->task_info->task.comm, current);
2575
2576 vm->task_info->tgid = current->tgid;
2577 get_task_comm(vm->task_info->process_name, current->group_leader);
2578 }
2579
2580 /**
2581 * amdgpu_vm_init - initialize a vm instance
2582 *
2583 * @adev: amdgpu_device pointer
2584 * @vm: requested vm
2585 * @xcp_id: GPU partition selection id
2586 * @pasid: the pasid the VM is using on this GPU
2587 *
2588 * Init @vm fields.
2589 *
2590 * Returns:
2591 * 0 for success, error for failure.
2592 */
amdgpu_vm_init(struct amdgpu_device * adev,struct amdgpu_vm * vm,int32_t xcp_id,uint32_t pasid)2593 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2594 int32_t xcp_id, uint32_t pasid)
2595 {
2596 struct amdgpu_bo *root_bo;
2597 struct amdgpu_bo_vm *root;
2598 int r, i;
2599
2600 vm->va = RB_ROOT_CACHED;
2601 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2602 vm->reserved_vmid[i] = NULL;
2603 INIT_LIST_HEAD(&vm->evicted);
2604 INIT_LIST_HEAD(&vm->evicted_user);
2605 INIT_LIST_HEAD(&vm->relocated);
2606 INIT_LIST_HEAD(&vm->moved);
2607 INIT_LIST_HEAD(&vm->idle);
2608 INIT_LIST_HEAD(&vm->invalidated);
2609 spin_lock_init(&vm->status_lock);
2610 INIT_LIST_HEAD(&vm->freed);
2611 INIT_LIST_HEAD(&vm->done);
2612 INIT_KFIFO(vm->faults);
2613
2614 r = amdgpu_vm_init_entities(adev, vm);
2615 if (r)
2616 return r;
2617
2618 ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2619
2620 vm->is_compute_context = false;
2621 vm->need_tlb_fence = amdgpu_userq_enabled(&adev->ddev);
2622
2623 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2624 AMDGPU_VM_USE_CPU_FOR_GFX);
2625
2626 dev_dbg(adev->dev, "VM update mode is %s\n",
2627 vm->use_cpu_for_update ? "CPU" : "SDMA");
2628 WARN_ONCE((vm->use_cpu_for_update &&
2629 !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2630 "CPU update of VM recommended only for large BAR system\n");
2631
2632 if (vm->use_cpu_for_update)
2633 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2634 else
2635 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2636
2637 vm->last_update = dma_fence_get_stub();
2638 vm->last_unlocked = dma_fence_get_stub();
2639 vm->last_tlb_flush = dma_fence_get_stub();
2640 vm->generation = amdgpu_vm_generation(adev, NULL);
2641
2642 mutex_init(&vm->eviction_lock);
2643 vm->evicting = false;
2644 vm->tlb_fence_context = dma_fence_context_alloc(1);
2645
2646 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2647 false, &root, xcp_id);
2648 if (r)
2649 goto error_free_delayed;
2650
2651 root_bo = amdgpu_bo_ref(&root->bo);
2652 r = amdgpu_bo_reserve(root_bo, true);
2653 if (r) {
2654 amdgpu_bo_unref(&root_bo);
2655 goto error_free_delayed;
2656 }
2657
2658 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2659 r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2660 if (r)
2661 goto error_free_root;
2662
2663 r = amdgpu_vm_pt_clear(adev, vm, root, false);
2664 if (r)
2665 goto error_free_root;
2666
2667 r = amdgpu_vm_create_task_info(vm);
2668 if (r)
2669 dev_dbg(adev->dev, "Failed to create task info for VM\n");
2670
2671 /* Store new PASID in XArray (if non-zero) */
2672 if (pasid != 0) {
2673 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL));
2674 if (r < 0)
2675 goto error_free_root;
2676
2677 vm->pasid = pasid;
2678 }
2679
2680 amdgpu_bo_unreserve(vm->root.bo);
2681 amdgpu_bo_unref(&root_bo);
2682
2683 return 0;
2684
2685 error_free_root:
2686 /* If PASID was partially set, erase it from XArray before failing */
2687 if (vm->pasid != 0) {
2688 xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
2689 vm->pasid = 0;
2690 }
2691 amdgpu_vm_pt_free_root(adev, vm);
2692 amdgpu_bo_unreserve(vm->root.bo);
2693 amdgpu_bo_unref(&root_bo);
2694
2695 error_free_delayed:
2696 dma_fence_put(vm->last_tlb_flush);
2697 dma_fence_put(vm->last_unlocked);
2698 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2699 amdgpu_vm_fini_entities(vm);
2700
2701 return r;
2702 }
2703
2704 /**
2705 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2706 *
2707 * @adev: amdgpu_device pointer
2708 * @vm: requested vm
2709 *
2710 * This only works on GFX VMs that don't have any BOs added and no
2711 * page tables allocated yet.
2712 *
2713 * Changes the following VM parameters:
2714 * - use_cpu_for_update
2715 * - pte_supports_ats
2716 *
2717 * Reinitializes the page directory to reflect the changed ATS
2718 * setting.
2719 *
2720 * Returns:
2721 * 0 for success, -errno for errors.
2722 */
amdgpu_vm_make_compute(struct amdgpu_device * adev,struct amdgpu_vm * vm)2723 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2724 {
2725 int r;
2726
2727 r = amdgpu_bo_reserve(vm->root.bo, true);
2728 if (r)
2729 return r;
2730
2731 /* Update VM state */
2732 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2733 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2734 dev_dbg(adev->dev, "VM update mode is %s\n",
2735 vm->use_cpu_for_update ? "CPU" : "SDMA");
2736 WARN_ONCE((vm->use_cpu_for_update &&
2737 !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2738 "CPU update of VM recommended only for large BAR system\n");
2739
2740 if (vm->use_cpu_for_update) {
2741 /* Sync with last SDMA update/clear before switching to CPU */
2742 r = amdgpu_bo_sync_wait(vm->root.bo,
2743 AMDGPU_FENCE_OWNER_UNDEFINED, true);
2744 if (r)
2745 goto unreserve_bo;
2746
2747 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2748 r = amdgpu_vm_pt_map_tables(adev, vm);
2749 if (r)
2750 goto unreserve_bo;
2751
2752 } else {
2753 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2754 }
2755
2756 dma_fence_put(vm->last_update);
2757 vm->last_update = dma_fence_get_stub();
2758 vm->is_compute_context = true;
2759 vm->need_tlb_fence = true;
2760
2761 unreserve_bo:
2762 amdgpu_bo_unreserve(vm->root.bo);
2763 return r;
2764 }
2765
amdgpu_vm_stats_is_zero(struct amdgpu_vm * vm)2766 static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
2767 {
2768 for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
2769 if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
2770 vm->stats[i].evicted == 0))
2771 return false;
2772 }
2773 return true;
2774 }
2775
2776 /**
2777 * amdgpu_vm_fini - tear down a vm instance
2778 *
2779 * @adev: amdgpu_device pointer
2780 * @vm: requested vm
2781 *
2782 * Tear down @vm.
2783 * Unbind the VM and remove all bos from the vm bo list
2784 */
amdgpu_vm_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)2785 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2786 {
2787 struct amdgpu_bo_va_mapping *mapping, *tmp;
2788 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2789 struct amdgpu_bo *root;
2790 unsigned long flags;
2791 int i;
2792
2793 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2794
2795 root = amdgpu_bo_ref(vm->root.bo);
2796 amdgpu_bo_reserve(root, true);
2797 /* Remove PASID mapping before destroying VM */
2798 if (vm->pasid != 0) {
2799 xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
2800 vm->pasid = 0;
2801 }
2802 dma_fence_wait(vm->last_unlocked, false);
2803 dma_fence_put(vm->last_unlocked);
2804 dma_fence_wait(vm->last_tlb_flush, false);
2805 /* Make sure that all fence callbacks have completed */
2806 dma_fence_lock_irqsave(vm->last_tlb_flush, flags);
2807 dma_fence_unlock_irqrestore(vm->last_tlb_flush, flags);
2808 dma_fence_put(vm->last_tlb_flush);
2809
2810 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2811 if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
2812 amdgpu_vm_prt_fini(adev, vm);
2813 prt_fini_needed = false;
2814 }
2815
2816 list_del(&mapping->list);
2817 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2818 }
2819
2820 amdgpu_vm_pt_free_root(adev, vm);
2821 amdgpu_bo_unreserve(root);
2822 amdgpu_bo_unref(&root);
2823 WARN_ON(vm->root.bo);
2824
2825 amdgpu_vm_fini_entities(vm);
2826
2827 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2828 dev_err(adev->dev, "still active bo inside vm\n");
2829 }
2830 rbtree_postorder_for_each_entry_safe(mapping, tmp,
2831 &vm->va.rb_root, rb) {
2832 /* Don't remove the mapping here, we don't want to trigger a
2833 * rebalance and the tree is about to be destroyed anyway.
2834 */
2835 list_del(&mapping->list);
2836 kfree(mapping);
2837 }
2838
2839 dma_fence_put(vm->last_update);
2840
2841 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2842 amdgpu_vmid_free_reserved(adev, vm, i);
2843 }
2844
2845 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2846
2847 if (!amdgpu_vm_stats_is_zero(vm)) {
2848 struct amdgpu_task_info *ti = vm->task_info;
2849
2850 dev_warn(adev->dev,
2851 "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
2852 ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
2853 }
2854
2855 amdgpu_vm_put_task_info(vm->task_info);
2856 }
2857
2858 /**
2859 * amdgpu_vm_manager_init - init the VM manager
2860 *
2861 * @adev: amdgpu_device pointer
2862 *
2863 * Initialize the VM manager structures
2864 */
amdgpu_vm_manager_init(struct amdgpu_device * adev)2865 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2866 {
2867 /* Concurrent flushes are only possible starting with Vega10 and
2868 * are broken on Navi10 and Navi14.
2869 */
2870 adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2871 adev->asic_type == CHIP_NAVI10 ||
2872 adev->asic_type == CHIP_NAVI14);
2873 amdgpu_vmid_mgr_init(adev);
2874
2875 spin_lock_init(&adev->vm_manager.prt_lock);
2876 atomic_set(&adev->vm_manager.num_prt_users, 0);
2877
2878 /* If not overridden by the user, by default, only in large BAR systems
2879 * Compute VM tables will be updated by CPU
2880 */
2881 #ifdef CONFIG_X86_64
2882 if (amdgpu_vm_update_mode == -1) {
2883 /* For asic with VF MMIO access protection
2884 * avoid using CPU for VM table updates
2885 */
2886 if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2887 !amdgpu_sriov_vf_mmio_access_protection(adev))
2888 adev->vm_manager.vm_update_mode =
2889 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2890 else
2891 adev->vm_manager.vm_update_mode = 0;
2892 } else
2893 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2894 #else
2895 adev->vm_manager.vm_update_mode = 0;
2896 #endif
2897
2898 xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2899 }
2900
2901 /**
2902 * amdgpu_vm_manager_fini - cleanup VM manager
2903 *
2904 * @adev: amdgpu_device pointer
2905 *
2906 * Cleanup the VM manager and free resources.
2907 */
amdgpu_vm_manager_fini(struct amdgpu_device * adev)2908 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2909 {
2910 WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2911 xa_destroy(&adev->vm_manager.pasids);
2912
2913 amdgpu_vmid_mgr_fini(adev);
2914 amdgpu_pasid_mgr_cleanup();
2915 }
2916
2917 /**
2918 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2919 *
2920 * @dev: drm device pointer
2921 * @data: drm_amdgpu_vm
2922 * @filp: drm file pointer
2923 *
2924 * Returns:
2925 * 0 for success, -errno for errors.
2926 */
amdgpu_vm_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)2927 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2928 {
2929 union drm_amdgpu_vm *args = data;
2930 struct amdgpu_device *adev = drm_to_adev(dev);
2931 struct amdgpu_fpriv *fpriv = filp->driver_priv;
2932 struct amdgpu_vm *vm = &fpriv->vm;
2933
2934 /* No valid flags defined yet */
2935 if (args->in.flags)
2936 return -EINVAL;
2937
2938 switch (args->in.op) {
2939 case AMDGPU_VM_OP_RESERVE_VMID:
2940 /* We only have requirement to reserve vmid from gfxhub */
2941 return amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
2942 case AMDGPU_VM_OP_UNRESERVE_VMID:
2943 amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
2944 break;
2945 default:
2946 return -EINVAL;
2947 }
2948
2949 return 0;
2950 }
2951
2952 /**
2953 * amdgpu_vm_lock_by_pasid - return an amdgpu_vm and its root bo from a pasid, if possible.
2954 * @adev: amdgpu device pointer
2955 * @root: root BO of the VM
2956 * @pasid: PASID of the VM
2957 * The caller needs to unreserve and unref the root bo on success.
2958 */
amdgpu_vm_lock_by_pasid(struct amdgpu_device * adev,struct amdgpu_bo ** root,u32 pasid)2959 struct amdgpu_vm *amdgpu_vm_lock_by_pasid(struct amdgpu_device *adev,
2960 struct amdgpu_bo **root, u32 pasid)
2961 {
2962 unsigned long irqflags;
2963 struct amdgpu_vm *vm;
2964 int r;
2965
2966 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2967 vm = xa_load(&adev->vm_manager.pasids, pasid);
2968 *root = vm ? amdgpu_bo_ref(vm->root.bo) : NULL;
2969 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2970
2971 if (!*root)
2972 return NULL;
2973
2974 r = amdgpu_bo_reserve(*root, true);
2975 if (r)
2976 goto error_unref;
2977
2978 /* Double check that the VM still exists */
2979 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2980 vm = xa_load(&adev->vm_manager.pasids, pasid);
2981 if (vm && vm->root.bo != *root)
2982 vm = NULL;
2983 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2984 if (!vm)
2985 goto error_unlock;
2986
2987 return vm;
2988 error_unlock:
2989 amdgpu_bo_unreserve(*root);
2990
2991 error_unref:
2992 amdgpu_bo_unref(root);
2993 return NULL;
2994 }
2995
2996 /**
2997 * amdgpu_vm_handle_fault - graceful handling of VM faults.
2998 * @adev: amdgpu device pointer
2999 * @pasid: PASID of the VM
3000 * @ts: Timestamp of the fault
3001 * @vmid: VMID, only used for GFX 9.4.3.
3002 * @node_id: Node_id received in IH cookie. Only applicable for
3003 * GFX 9.4.3.
3004 * @addr: Address of the fault
3005 * @write_fault: true is write fault, false is read fault
3006 *
3007 * Try to gracefully handle a VM fault. Return true if the fault was handled and
3008 * shouldn't be reported any more.
3009 */
amdgpu_vm_handle_fault(struct amdgpu_device * adev,u32 pasid,u32 vmid,u32 node_id,uint64_t addr,uint64_t ts,bool write_fault)3010 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
3011 u32 vmid, u32 node_id, uint64_t addr,
3012 uint64_t ts, bool write_fault)
3013 {
3014 bool is_compute_context = false;
3015 struct amdgpu_bo *root;
3016 uint64_t value, flags;
3017 struct amdgpu_vm *vm;
3018 int r;
3019
3020 vm = amdgpu_vm_lock_by_pasid(adev, &root, pasid);
3021 if (!vm)
3022 return false;
3023
3024 is_compute_context = vm->is_compute_context;
3025
3026 if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
3027 node_id, addr >> PAGE_SHIFT, ts, write_fault)) {
3028 amdgpu_bo_unreserve(root);
3029 amdgpu_bo_unref(&root);
3030 return true;
3031 }
3032
3033 addr /= AMDGPU_GPU_PAGE_SIZE;
3034 flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
3035 AMDGPU_PTE_SYSTEM;
3036
3037 if (is_compute_context) {
3038 /* Intentionally setting invalid PTE flag
3039 * combination to force a no-retry-fault
3040 */
3041 flags = AMDGPU_VM_NORETRY_FLAGS;
3042 value = 0;
3043 } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
3044 /* Redirect the access to the dummy page */
3045 value = adev->dummy_page_addr;
3046 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
3047 AMDGPU_PTE_WRITEABLE;
3048
3049 } else {
3050 /* Let the hw retry silently on the PTE */
3051 value = 0;
3052 }
3053
3054 r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
3055 if (r) {
3056 pr_debug("failed %d to reserve fence slot\n", r);
3057 goto error_unlock;
3058 }
3059
3060 r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
3061 NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
3062 if (r)
3063 goto error_unlock;
3064
3065 r = amdgpu_vm_update_pdes(adev, vm, true);
3066
3067 error_unlock:
3068 amdgpu_bo_unreserve(root);
3069 if (r < 0)
3070 dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
3071
3072 amdgpu_bo_unref(&root);
3073
3074 return false;
3075 }
3076
3077 #if defined(CONFIG_DEBUG_FS)
3078 /**
3079 * amdgpu_debugfs_vm_bo_info - print BO info for the VM
3080 *
3081 * @vm: Requested VM for printing BO info
3082 * @m: debugfs file
3083 *
3084 * Print BO information in debugfs file for the VM
3085 */
amdgpu_debugfs_vm_bo_info(struct amdgpu_vm * vm,struct seq_file * m)3086 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
3087 {
3088 struct amdgpu_bo_va *bo_va, *tmp;
3089 u64 total_idle = 0;
3090 u64 total_evicted = 0;
3091 u64 total_relocated = 0;
3092 u64 total_moved = 0;
3093 u64 total_invalidated = 0;
3094 u64 total_done = 0;
3095 unsigned int total_idle_objs = 0;
3096 unsigned int total_evicted_objs = 0;
3097 unsigned int total_relocated_objs = 0;
3098 unsigned int total_moved_objs = 0;
3099 unsigned int total_invalidated_objs = 0;
3100 unsigned int total_done_objs = 0;
3101 unsigned int id = 0;
3102
3103 amdgpu_vm_assert_locked(vm);
3104
3105 spin_lock(&vm->status_lock);
3106 seq_puts(m, "\tIdle BOs:\n");
3107 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3108 if (!bo_va->base.bo)
3109 continue;
3110 total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3111 }
3112 total_idle_objs = id;
3113 id = 0;
3114
3115 seq_puts(m, "\tEvicted BOs:\n");
3116 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3117 if (!bo_va->base.bo)
3118 continue;
3119 total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3120 }
3121 total_evicted_objs = id;
3122 id = 0;
3123
3124 seq_puts(m, "\tRelocated BOs:\n");
3125 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3126 if (!bo_va->base.bo)
3127 continue;
3128 total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3129 }
3130 total_relocated_objs = id;
3131 id = 0;
3132
3133 seq_puts(m, "\tMoved BOs:\n");
3134 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3135 if (!bo_va->base.bo)
3136 continue;
3137 total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3138 }
3139 total_moved_objs = id;
3140 id = 0;
3141
3142 seq_puts(m, "\tInvalidated BOs:\n");
3143 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3144 if (!bo_va->base.bo)
3145 continue;
3146 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3147 }
3148 total_invalidated_objs = id;
3149 id = 0;
3150
3151 seq_puts(m, "\tDone BOs:\n");
3152 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3153 if (!bo_va->base.bo)
3154 continue;
3155 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3156 }
3157 spin_unlock(&vm->status_lock);
3158 total_done_objs = id;
3159
3160 seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
3161 total_idle_objs);
3162 seq_printf(m, "\tTotal evicted size: %12lld\tobjs:\t%d\n", total_evicted,
3163 total_evicted_objs);
3164 seq_printf(m, "\tTotal relocated size: %12lld\tobjs:\t%d\n", total_relocated,
3165 total_relocated_objs);
3166 seq_printf(m, "\tTotal moved size: %12lld\tobjs:\t%d\n", total_moved,
3167 total_moved_objs);
3168 seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
3169 total_invalidated_objs);
3170 seq_printf(m, "\tTotal done size: %12lld\tobjs:\t%d\n", total_done,
3171 total_done_objs);
3172 }
3173 #endif
3174
3175 /**
3176 * amdgpu_vm_update_fault_cache - update cached fault into.
3177 * @adev: amdgpu device pointer
3178 * @pasid: PASID of the VM
3179 * @addr: Address of the fault
3180 * @status: GPUVM fault status register
3181 * @vmhub: which vmhub got the fault
3182 *
3183 * Cache the fault info for later use by userspace in debugging.
3184 */
amdgpu_vm_update_fault_cache(struct amdgpu_device * adev,unsigned int pasid,uint64_t addr,uint32_t status,unsigned int vmhub)3185 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
3186 unsigned int pasid,
3187 uint64_t addr,
3188 uint32_t status,
3189 unsigned int vmhub)
3190 {
3191 struct amdgpu_vm *vm;
3192 unsigned long flags;
3193
3194 xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3195
3196 vm = xa_load(&adev->vm_manager.pasids, pasid);
3197 /* Don't update the fault cache if status is 0. In the multiple
3198 * fault case, subsequent faults will return a 0 status which is
3199 * useless for userspace and replaces the useful fault status, so
3200 * only update if status is non-0.
3201 */
3202 if (vm && status) {
3203 vm->fault_info.addr = addr;
3204 vm->fault_info.status = status;
3205 /*
3206 * Update the fault information globally for later usage
3207 * when vm could be stale or freed.
3208 */
3209 adev->vm_manager.fault_info.addr = addr;
3210 adev->vm_manager.fault_info.vmhub = vmhub;
3211 adev->vm_manager.fault_info.status = status;
3212
3213 if (AMDGPU_IS_GFXHUB(vmhub)) {
3214 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3215 vm->fault_info.vmhub |=
3216 (vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3217 } else if (AMDGPU_IS_MMHUB0(vmhub)) {
3218 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3219 vm->fault_info.vmhub |=
3220 (vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3221 } else if (AMDGPU_IS_MMHUB1(vmhub)) {
3222 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3223 vm->fault_info.vmhub |=
3224 (vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3225 } else {
3226 WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
3227 }
3228 }
3229 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3230 }
3231
amdgpu_vm_print_task_info(struct amdgpu_device * adev,struct amdgpu_task_info * task_info)3232 void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
3233 struct amdgpu_task_info *task_info)
3234 {
3235 dev_err(adev->dev,
3236 " Process %s pid %d thread %s pid %d\n",
3237 task_info->process_name, task_info->tgid,
3238 task_info->task.comm, task_info->task.pid);
3239 }
3240
amdgpu_sdma_set_vm_pte_scheds(struct amdgpu_device * adev,const struct amdgpu_vm_pte_funcs * vm_pte_funcs)3241 void amdgpu_sdma_set_vm_pte_scheds(struct amdgpu_device *adev,
3242 const struct amdgpu_vm_pte_funcs *vm_pte_funcs)
3243 {
3244 struct drm_gpu_scheduler *sched;
3245 int i;
3246
3247 for (i = 0; i < adev->sdma.num_instances; i++) {
3248 if (adev->sdma.has_page_queue)
3249 sched = &adev->sdma.instance[i].page.sched;
3250 else
3251 sched = &adev->sdma.instance[i].ring.sched;
3252 adev->vm_manager.vm_pte_scheds[i] = sched;
3253 }
3254 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
3255 adev->vm_manager.vm_pte_funcs = vm_pte_funcs;
3256 }
3257