1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <drm/drm_drv.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_trace.h"
28 #include "amdgpu_vm.h"
29
30 /*
31 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
32 */
33 struct amdgpu_vm_pt_cursor {
34 uint64_t pfn;
35 struct amdgpu_vm_bo_base *parent;
36 struct amdgpu_vm_bo_base *entry;
37 unsigned int level;
38 };
39
40 /**
41 * amdgpu_vm_pt_level_shift - return the addr shift for each level
42 *
43 * @adev: amdgpu_device pointer
44 * @level: VMPT level
45 *
46 * Returns:
47 * The number of bits the pfn needs to be right shifted for a level.
48 */
amdgpu_vm_pt_level_shift(struct amdgpu_device * adev,unsigned int level)49 static unsigned int amdgpu_vm_pt_level_shift(struct amdgpu_device *adev,
50 unsigned int level)
51 {
52 switch (level) {
53 case AMDGPU_VM_PDB2:
54 case AMDGPU_VM_PDB1:
55 case AMDGPU_VM_PDB0:
56 return 9 * (AMDGPU_VM_PDB0 - level) +
57 adev->vm_manager.block_size;
58 case AMDGPU_VM_PTB:
59 return 0;
60 default:
61 return ~0;
62 }
63 }
64
65 /**
66 * amdgpu_vm_pt_num_entries - return the number of entries in a PD/PT
67 *
68 * @adev: amdgpu_device pointer
69 * @level: VMPT level
70 *
71 * Returns:
72 * The number of entries in a page directory or page table.
73 */
amdgpu_vm_pt_num_entries(struct amdgpu_device * adev,unsigned int level)74 static unsigned int amdgpu_vm_pt_num_entries(struct amdgpu_device *adev,
75 unsigned int level)
76 {
77 unsigned int shift;
78
79 shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
80 if (level == adev->vm_manager.root_level)
81 /* For the root directory */
82 return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
83 >> shift;
84 else if (level != AMDGPU_VM_PTB)
85 /* Everything in between */
86 return 512;
87
88 /* For the page tables on the leaves */
89 return AMDGPU_VM_PTE_COUNT(adev);
90 }
91
92 /**
93 * amdgpu_vm_pt_entries_mask - the mask to get the entry number of a PD/PT
94 *
95 * @adev: amdgpu_device pointer
96 * @level: VMPT level
97 *
98 * Returns:
99 * The mask to extract the entry number of a PD/PT from an address.
100 */
amdgpu_vm_pt_entries_mask(struct amdgpu_device * adev,unsigned int level)101 static uint32_t amdgpu_vm_pt_entries_mask(struct amdgpu_device *adev,
102 unsigned int level)
103 {
104 if (level <= adev->vm_manager.root_level)
105 return 0xffffffff;
106 else if (level != AMDGPU_VM_PTB)
107 return 0x1ff;
108 else
109 return AMDGPU_VM_PTE_COUNT(adev) - 1;
110 }
111
112 /**
113 * amdgpu_vm_pt_size - returns the size of the page table in bytes
114 *
115 * @adev: amdgpu_device pointer
116 * @level: VMPT level
117 *
118 * Returns:
119 * The size of the BO for a page directory or page table in bytes.
120 */
amdgpu_vm_pt_size(struct amdgpu_device * adev,unsigned int level)121 static unsigned int amdgpu_vm_pt_size(struct amdgpu_device *adev,
122 unsigned int level)
123 {
124 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_pt_num_entries(adev, level) * 8);
125 }
126
127 /**
128 * amdgpu_vm_pt_parent - get the parent page directory
129 *
130 * @pt: child page table
131 *
132 * Helper to get the parent entry for the child page table. NULL if we are at
133 * the root page directory.
134 */
135 static struct amdgpu_vm_bo_base *
amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base * pt)136 amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
137 {
138 struct amdgpu_bo *parent = pt->bo->parent;
139
140 if (!parent)
141 return NULL;
142
143 return parent->vm_bo;
144 }
145
146 /**
147 * amdgpu_vm_pt_start - start PD/PT walk
148 *
149 * @adev: amdgpu_device pointer
150 * @vm: amdgpu_vm structure
151 * @start: start address of the walk
152 * @cursor: state to initialize
153 *
154 * Initialize a amdgpu_vm_pt_cursor to start a walk.
155 */
amdgpu_vm_pt_start(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,struct amdgpu_vm_pt_cursor * cursor)156 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
157 struct amdgpu_vm *vm, uint64_t start,
158 struct amdgpu_vm_pt_cursor *cursor)
159 {
160 cursor->pfn = start;
161 cursor->parent = NULL;
162 cursor->entry = &vm->root;
163 cursor->level = adev->vm_manager.root_level;
164 }
165
166 /**
167 * amdgpu_vm_pt_descendant - go to child node
168 *
169 * @adev: amdgpu_device pointer
170 * @cursor: current state
171 *
172 * Walk to the child node of the current node.
173 * Returns:
174 * True if the walk was possible, false otherwise.
175 */
amdgpu_vm_pt_descendant(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)176 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
177 struct amdgpu_vm_pt_cursor *cursor)
178 {
179 unsigned int mask, shift, idx;
180
181 if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
182 !cursor->entry->bo)
183 return false;
184
185 mask = amdgpu_vm_pt_entries_mask(adev, cursor->level);
186 shift = amdgpu_vm_pt_level_shift(adev, cursor->level);
187
188 ++cursor->level;
189 idx = (cursor->pfn >> shift) & mask;
190 cursor->parent = cursor->entry;
191 cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
192 return true;
193 }
194
195 /**
196 * amdgpu_vm_pt_sibling - go to sibling node
197 *
198 * @adev: amdgpu_device pointer
199 * @cursor: current state
200 *
201 * Walk to the sibling node of the current node.
202 * Returns:
203 * True if the walk was possible, false otherwise.
204 */
amdgpu_vm_pt_sibling(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)205 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
206 struct amdgpu_vm_pt_cursor *cursor)
207 {
208
209 unsigned int shift, num_entries;
210 struct amdgpu_bo_vm *parent;
211
212 /* Root doesn't have a sibling */
213 if (!cursor->parent)
214 return false;
215
216 /* Go to our parents and see if we got a sibling */
217 shift = amdgpu_vm_pt_level_shift(adev, cursor->level - 1);
218 num_entries = amdgpu_vm_pt_num_entries(adev, cursor->level - 1);
219 parent = to_amdgpu_bo_vm(cursor->parent->bo);
220
221 if (cursor->entry == &parent->entries[num_entries - 1])
222 return false;
223
224 cursor->pfn += 1ULL << shift;
225 cursor->pfn &= ~((1ULL << shift) - 1);
226 ++cursor->entry;
227 return true;
228 }
229
230 /**
231 * amdgpu_vm_pt_ancestor - go to parent node
232 *
233 * @cursor: current state
234 *
235 * Walk to the parent node of the current node.
236 * Returns:
237 * True if the walk was possible, false otherwise.
238 */
amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor * cursor)239 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
240 {
241 if (!cursor->parent)
242 return false;
243
244 --cursor->level;
245 cursor->entry = cursor->parent;
246 cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
247 return true;
248 }
249
250 /**
251 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
252 *
253 * @adev: amdgpu_device pointer
254 * @cursor: current state
255 *
256 * Walk the PD/PT tree to the next node.
257 */
amdgpu_vm_pt_next(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)258 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
259 struct amdgpu_vm_pt_cursor *cursor)
260 {
261 /* First try a newborn child */
262 if (amdgpu_vm_pt_descendant(adev, cursor))
263 return;
264
265 /* If that didn't worked try to find a sibling */
266 while (!amdgpu_vm_pt_sibling(adev, cursor)) {
267 /* No sibling, go to our parents and grandparents */
268 if (!amdgpu_vm_pt_ancestor(cursor)) {
269 cursor->pfn = ~0ll;
270 return;
271 }
272 }
273 }
274
275 /**
276 * amdgpu_vm_pt_first_dfs - start a deep first search
277 *
278 * @adev: amdgpu_device structure
279 * @vm: amdgpu_vm structure
280 * @start: optional cursor to start with
281 * @cursor: state to initialize
282 *
283 * Starts a deep first traversal of the PD/PT tree.
284 */
amdgpu_vm_pt_first_dfs(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_pt_cursor * cursor)285 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
286 struct amdgpu_vm *vm,
287 struct amdgpu_vm_pt_cursor *start,
288 struct amdgpu_vm_pt_cursor *cursor)
289 {
290 if (start)
291 *cursor = *start;
292 else
293 amdgpu_vm_pt_start(adev, vm, 0, cursor);
294
295 while (amdgpu_vm_pt_descendant(adev, cursor))
296 ;
297 }
298
299 /**
300 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
301 *
302 * @start: starting point for the search
303 * @entry: current entry
304 *
305 * Returns:
306 * True when the search should continue, false otherwise.
307 */
amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_bo_base * entry)308 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
309 struct amdgpu_vm_bo_base *entry)
310 {
311 return entry && (!start || entry != start->entry);
312 }
313
314 /**
315 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
316 *
317 * @adev: amdgpu_device structure
318 * @cursor: current state
319 *
320 * Move the cursor to the next node in a deep first search.
321 */
amdgpu_vm_pt_next_dfs(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)322 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
323 struct amdgpu_vm_pt_cursor *cursor)
324 {
325 if (!cursor->entry)
326 return;
327
328 if (!cursor->parent)
329 cursor->entry = NULL;
330 else if (amdgpu_vm_pt_sibling(adev, cursor))
331 while (amdgpu_vm_pt_descendant(adev, cursor))
332 ;
333 else
334 amdgpu_vm_pt_ancestor(cursor);
335 }
336
337 /*
338 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
339 */
340 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
341 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
342 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
343 amdgpu_vm_pt_continue_dfs((start), (entry)); \
344 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
345
346 /**
347 * amdgpu_vm_pt_clear - initially clear the PDs/PTs
348 *
349 * @adev: amdgpu_device pointer
350 * @vm: VM to clear BO from
351 * @vmbo: BO to clear
352 * @immediate: use an immediate update
353 *
354 * Root PD needs to be reserved when calling this.
355 *
356 * Returns:
357 * 0 on success, errno otherwise.
358 */
amdgpu_vm_pt_clear(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_vm * vmbo,bool immediate)359 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
360 struct amdgpu_bo_vm *vmbo, bool immediate)
361 {
362 unsigned int level = adev->vm_manager.root_level;
363 struct ttm_operation_ctx ctx = { true, false };
364 struct amdgpu_vm_update_params params;
365 struct amdgpu_bo *ancestor = &vmbo->bo;
366 unsigned int entries;
367 struct amdgpu_bo *bo = &vmbo->bo;
368 uint64_t addr;
369 int r, idx;
370
371 /* Figure out our place in the hierarchy */
372 if (ancestor->parent) {
373 ++level;
374 while (ancestor->parent->parent) {
375 ++level;
376 ancestor = ancestor->parent;
377 }
378 }
379
380 entries = amdgpu_bo_size(bo) / 8;
381
382 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
383 if (r)
384 return r;
385
386 if (!drm_dev_enter(adev_to_drm(adev), &idx))
387 return -ENODEV;
388
389 r = vm->update_funcs->map_table(vmbo);
390 if (r)
391 goto exit;
392
393 memset(¶ms, 0, sizeof(params));
394 params.adev = adev;
395 params.vm = vm;
396 params.immediate = immediate;
397
398 r = vm->update_funcs->prepare(¶ms, NULL);
399 if (r)
400 goto exit;
401
402 addr = 0;
403
404 uint64_t value = 0, flags = 0;
405 if (adev->asic_type >= CHIP_VEGA10) {
406 if (level != AMDGPU_VM_PTB) {
407 /* Handle leaf PDEs as PTEs */
408 flags |= AMDGPU_PDE_PTE_FLAG(adev);
409 amdgpu_gmc_get_vm_pde(adev, level,
410 &value, &flags);
411 } else {
412 /* Workaround for fault priority problem on GMC9 */
413 flags = AMDGPU_PTE_EXECUTABLE;
414 }
415 }
416
417 r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries,
418 value, flags);
419 if (r)
420 goto exit;
421
422 r = vm->update_funcs->commit(¶ms, NULL);
423 exit:
424 drm_dev_exit(idx);
425 return r;
426 }
427
428 /**
429 * amdgpu_vm_pt_create - create bo for PD/PT
430 *
431 * @adev: amdgpu_device pointer
432 * @vm: requesting vm
433 * @level: the page table level
434 * @immediate: use a immediate update
435 * @vmbo: pointer to the buffer object pointer
436 * @xcp_id: GPU partition id
437 */
amdgpu_vm_pt_create(struct amdgpu_device * adev,struct amdgpu_vm * vm,int level,bool immediate,struct amdgpu_bo_vm ** vmbo,int32_t xcp_id)438 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
439 int level, bool immediate, struct amdgpu_bo_vm **vmbo,
440 int32_t xcp_id)
441 {
442 struct amdgpu_bo_param bp;
443 unsigned int num_entries;
444
445 memset(&bp, 0, sizeof(bp));
446
447 bp.size = amdgpu_vm_pt_size(adev, level);
448 bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
449
450 if (!adev->gmc.is_app_apu)
451 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
452 else
453 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
454
455 bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
456 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
457 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
458
459 if (level < AMDGPU_VM_PTB)
460 num_entries = amdgpu_vm_pt_num_entries(adev, level);
461 else
462 num_entries = 0;
463
464 bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
465
466 if (vm->use_cpu_for_update)
467 bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
468
469 bp.type = ttm_bo_type_kernel;
470 bp.no_wait_gpu = immediate;
471 bp.xcp_id_plus1 = xcp_id + 1;
472
473 if (vm->root.bo)
474 bp.resv = vm->root.bo->tbo.base.resv;
475
476 return amdgpu_bo_create_vm(adev, &bp, vmbo);
477 }
478
479 /**
480 * amdgpu_vm_pt_alloc - Allocate a specific page table
481 *
482 * @adev: amdgpu_device pointer
483 * @vm: VM to allocate page tables for
484 * @cursor: Which page table to allocate
485 * @immediate: use an immediate update
486 *
487 * Make sure a specific page table or directory is allocated.
488 *
489 * Returns:
490 * 1 if page table needed to be allocated, 0 if page table was already
491 * allocated, negative errno if an error occurred.
492 */
amdgpu_vm_pt_alloc(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * cursor,bool immediate)493 static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
494 struct amdgpu_vm *vm,
495 struct amdgpu_vm_pt_cursor *cursor,
496 bool immediate)
497 {
498 struct amdgpu_vm_bo_base *entry = cursor->entry;
499 struct amdgpu_bo *pt_bo;
500 struct amdgpu_bo_vm *pt;
501 int r;
502
503 if (entry->bo)
504 return 0;
505
506 amdgpu_vm_eviction_unlock(vm);
507 r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
508 vm->root.bo->xcp_id);
509 amdgpu_vm_eviction_lock(vm);
510 if (r)
511 return r;
512
513 /* Keep a reference to the root directory to avoid
514 * freeing them up in the wrong order.
515 */
516 pt_bo = &pt->bo;
517 pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
518 amdgpu_vm_bo_base_init(entry, vm, pt_bo);
519 r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
520 if (r)
521 goto error_free_pt;
522
523 return 0;
524
525 error_free_pt:
526 amdgpu_bo_unref(&pt_bo);
527 return r;
528 }
529
530 /**
531 * amdgpu_vm_pt_free - free one PD/PT
532 *
533 * @entry: PDE to free
534 */
amdgpu_vm_pt_free(struct amdgpu_vm_bo_base * entry)535 static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
536 {
537 if (!entry->bo)
538 return;
539
540 amdgpu_vm_update_stats(entry, entry->bo->tbo.resource, -1);
541 entry->bo->vm_bo = NULL;
542 ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
543
544 spin_lock(&entry->vm->status_lock);
545 list_del(&entry->vm_status);
546 spin_unlock(&entry->vm->status_lock);
547 amdgpu_bo_unref(&entry->bo);
548 }
549
amdgpu_vm_pt_free_work(struct work_struct * work)550 void amdgpu_vm_pt_free_work(struct work_struct *work)
551 {
552 struct amdgpu_vm_bo_base *entry, *next;
553 struct amdgpu_vm *vm;
554 LIST_HEAD(pt_freed);
555
556 vm = container_of(work, struct amdgpu_vm, pt_free_work);
557
558 spin_lock(&vm->status_lock);
559 list_splice_init(&vm->pt_freed, &pt_freed);
560 spin_unlock(&vm->status_lock);
561
562 /* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
563 amdgpu_bo_reserve(vm->root.bo, true);
564
565 list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
566 amdgpu_vm_pt_free(entry);
567
568 amdgpu_bo_unreserve(vm->root.bo);
569 }
570
571 /**
572 * amdgpu_vm_pt_free_list - free PD/PT levels
573 *
574 * @adev: amdgpu device structure
575 * @params: see amdgpu_vm_update_params definition
576 *
577 * Free the page directory objects saved in the flush list
578 */
amdgpu_vm_pt_free_list(struct amdgpu_device * adev,struct amdgpu_vm_update_params * params)579 void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
580 struct amdgpu_vm_update_params *params)
581 {
582 struct amdgpu_vm_bo_base *entry, *next;
583 struct amdgpu_vm *vm = params->vm;
584 bool unlocked = params->unlocked;
585
586 if (list_empty(¶ms->tlb_flush_waitlist))
587 return;
588
589 if (unlocked) {
590 spin_lock(&vm->status_lock);
591 list_splice_init(¶ms->tlb_flush_waitlist, &vm->pt_freed);
592 spin_unlock(&vm->status_lock);
593 schedule_work(&vm->pt_free_work);
594 return;
595 }
596
597 list_for_each_entry_safe(entry, next, ¶ms->tlb_flush_waitlist, vm_status)
598 amdgpu_vm_pt_free(entry);
599 }
600
601 /**
602 * amdgpu_vm_pt_add_list - add PD/PT level to the flush list
603 *
604 * @params: parameters for the update
605 * @cursor: first PT entry to start DF search from, non NULL
606 *
607 * This list will be freed after TLB flush.
608 */
amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params * params,struct amdgpu_vm_pt_cursor * cursor)609 static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
610 struct amdgpu_vm_pt_cursor *cursor)
611 {
612 struct amdgpu_vm_pt_cursor seek;
613 struct amdgpu_vm_bo_base *entry;
614
615 spin_lock(¶ms->vm->status_lock);
616 for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
617 if (entry && entry->bo)
618 list_move(&entry->vm_status, ¶ms->tlb_flush_waitlist);
619 }
620
621 /* enter start node now */
622 list_move(&cursor->entry->vm_status, ¶ms->tlb_flush_waitlist);
623 spin_unlock(¶ms->vm->status_lock);
624 }
625
626 /**
627 * amdgpu_vm_pt_free_root - free root PD
628 * @adev: amdgpu device structure
629 * @vm: amdgpu vm structure
630 *
631 * Free the root page directory and everything below it.
632 */
amdgpu_vm_pt_free_root(struct amdgpu_device * adev,struct amdgpu_vm * vm)633 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
634 {
635 struct amdgpu_vm_pt_cursor cursor;
636 struct amdgpu_vm_bo_base *entry;
637
638 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
639 if (entry)
640 amdgpu_vm_pt_free(entry);
641 }
642 }
643
644 /**
645 * amdgpu_vm_pde_update - update a single level in the hierarchy
646 *
647 * @params: parameters for the update
648 * @entry: entry to update
649 *
650 * Makes sure the requested entry in parent is up to date.
651 */
amdgpu_vm_pde_update(struct amdgpu_vm_update_params * params,struct amdgpu_vm_bo_base * entry)652 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
653 struct amdgpu_vm_bo_base *entry)
654 {
655 struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
656 struct amdgpu_bo *bo, *pbo;
657 struct amdgpu_vm *vm = params->vm;
658 uint64_t pde, pt, flags;
659 unsigned int level;
660
661 if (WARN_ON(!parent))
662 return -EINVAL;
663
664 bo = parent->bo;
665 for (level = 0, pbo = bo->parent; pbo; ++level)
666 pbo = pbo->parent;
667
668 level += params->adev->vm_manager.root_level;
669 amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
670 pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
671 return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
672 1, 0, flags);
673 }
674
675 /**
676 * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags
677 *
678 * @adev: amdgpu_device pointer
679 * @flags: pointer to PTE flags
680 *
681 * Update PTE no-retry flags when TF is enabled.
682 */
amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device * adev,uint64_t * flags)683 static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev,
684 uint64_t *flags)
685 {
686 /*
687 * Update no-retry flags with the corresponding TF
688 * no-retry combination.
689 */
690 if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) {
691 *flags &= ~AMDGPU_VM_NORETRY_FLAGS;
692 *flags |= adev->gmc.noretry_flags;
693 }
694 }
695
696 /*
697 * amdgpu_vm_pte_update_flags - figure out flags for PTE updates
698 *
699 * Make sure to set the right flags for the PTEs at the desired level.
700 */
amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params * params,struct amdgpu_bo_vm * pt,unsigned int level,uint64_t pe,uint64_t addr,unsigned int count,uint32_t incr,uint64_t flags)701 static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
702 struct amdgpu_bo_vm *pt,
703 unsigned int level,
704 uint64_t pe, uint64_t addr,
705 unsigned int count, uint32_t incr,
706 uint64_t flags)
707 {
708 struct amdgpu_device *adev = params->adev;
709
710 if (level != AMDGPU_VM_PTB) {
711 flags |= AMDGPU_PDE_PTE_FLAG(params->adev);
712 amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags);
713
714 } else if (adev->asic_type >= CHIP_VEGA10 &&
715 !(flags & AMDGPU_PTE_VALID) &&
716 !(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) {
717
718 /* Workaround for fault priority problem on GMC9 */
719 flags |= AMDGPU_PTE_EXECUTABLE;
720 }
721
722 /*
723 * Update no-retry flags to use the no-retry flag combination
724 * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination
725 * does not work when TF is enabled. So, replace them with
726 * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for
727 * all cases.
728 */
729 if (level == AMDGPU_VM_PTB)
730 amdgpu_vm_pte_update_noretry_flags(adev, &flags);
731
732 /* APUs mapping system memory may need different MTYPEs on different
733 * NUMA nodes. Only do this for contiguous ranges that can be assumed
734 * to be on the same NUMA node.
735 */
736 if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
737 adev->gmc.gmc_funcs->override_vm_pte_flags &&
738 num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
739 amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
740
741 params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
742 flags);
743 }
744
745 /**
746 * amdgpu_vm_pte_fragment - get fragment for PTEs
747 *
748 * @params: see amdgpu_vm_update_params definition
749 * @start: first PTE to handle
750 * @end: last PTE to handle
751 * @flags: hw mapping flags
752 * @frag: resulting fragment size
753 * @frag_end: end of this fragment
754 *
755 * Returns the first possible fragment for the start and end address.
756 */
amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t flags,unsigned int * frag,uint64_t * frag_end)757 static void amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params *params,
758 uint64_t start, uint64_t end, uint64_t flags,
759 unsigned int *frag, uint64_t *frag_end)
760 {
761 /**
762 * The MC L1 TLB supports variable sized pages, based on a fragment
763 * field in the PTE. When this field is set to a non-zero value, page
764 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
765 * flags are considered valid for all PTEs within the fragment range
766 * and corresponding mappings are assumed to be physically contiguous.
767 *
768 * The L1 TLB can store a single PTE for the whole fragment,
769 * significantly increasing the space available for translation
770 * caching. This leads to large improvements in throughput when the
771 * TLB is under pressure.
772 *
773 * The L2 TLB distributes small and large fragments into two
774 * asymmetric partitions. The large fragment cache is significantly
775 * larger. Thus, we try to use large fragments wherever possible.
776 * Userspace can support this by aligning virtual base address and
777 * allocation size to the fragment size.
778 *
779 * Starting with Vega10 the fragment size only controls the L1. The L2
780 * is now directly feed with small/huge/giant pages from the walker.
781 */
782 unsigned int max_frag;
783
784 if (params->adev->asic_type < CHIP_VEGA10)
785 max_frag = params->adev->vm_manager.fragment_size;
786 else
787 max_frag = 31;
788
789 /* system pages are non continuously */
790 if (params->pages_addr) {
791 *frag = 0;
792 *frag_end = end;
793 return;
794 }
795
796 /* This intentionally wraps around if no bit is set */
797 *frag = min_t(unsigned int, ffs(start) - 1, fls64(end - start) - 1);
798 if (*frag >= max_frag) {
799 *frag = max_frag;
800 *frag_end = end & ~((1ULL << max_frag) - 1);
801 } else {
802 *frag_end = start + (1 << *frag);
803 }
804 }
805
806 /**
807 * amdgpu_vm_ptes_update - make sure that page tables are valid
808 *
809 * @params: see amdgpu_vm_update_params definition
810 * @start: start of GPU address range
811 * @end: end of GPU address range
812 * @dst: destination address to map to, the next dst inside the function
813 * @flags: mapping flags
814 *
815 * Update the page tables in the range @start - @end.
816 *
817 * Returns:
818 * 0 for success, -EINVAL for failure.
819 */
amdgpu_vm_ptes_update(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t dst,uint64_t flags)820 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
821 uint64_t start, uint64_t end,
822 uint64_t dst, uint64_t flags)
823 {
824 struct amdgpu_device *adev = params->adev;
825 struct amdgpu_vm_pt_cursor cursor;
826 uint64_t frag_start = start, frag_end;
827 unsigned int frag;
828 int r;
829
830 /* figure out the initial fragment */
831 amdgpu_vm_pte_fragment(params, frag_start, end, flags, &frag,
832 &frag_end);
833
834 /* walk over the address space and update the PTs */
835 amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
836 while (cursor.pfn < end) {
837 unsigned int shift, parent_shift, mask;
838 uint64_t incr, entry_end, pe_start;
839 struct amdgpu_bo *pt;
840
841 if (!params->unlocked) {
842 /* make sure that the page tables covering the
843 * address range are actually allocated
844 */
845 r = amdgpu_vm_pt_alloc(params->adev, params->vm,
846 &cursor, params->immediate);
847 if (r)
848 return r;
849 }
850
851 shift = amdgpu_vm_pt_level_shift(adev, cursor.level);
852 parent_shift = amdgpu_vm_pt_level_shift(adev, cursor.level - 1);
853 if (params->unlocked) {
854 /* Unlocked updates are only allowed on the leaves */
855 if (amdgpu_vm_pt_descendant(adev, &cursor))
856 continue;
857 } else if (adev->asic_type < CHIP_VEGA10 &&
858 (flags & AMDGPU_PTE_VALID)) {
859 /* No huge page support before GMC v9 */
860 if (cursor.level != AMDGPU_VM_PTB) {
861 if (!amdgpu_vm_pt_descendant(adev, &cursor))
862 return -ENOENT;
863 continue;
864 }
865 } else if (frag < shift) {
866 /* We can't use this level when the fragment size is
867 * smaller than the address shift. Go to the next
868 * child entry and try again.
869 */
870 if (amdgpu_vm_pt_descendant(adev, &cursor))
871 continue;
872 } else if (frag >= parent_shift) {
873 /* If the fragment size is even larger than the parent
874 * shift we should go up one level and check it again.
875 */
876 if (!amdgpu_vm_pt_ancestor(&cursor))
877 return -EINVAL;
878 continue;
879 }
880
881 pt = cursor.entry->bo;
882 if (!pt) {
883 /* We need all PDs and PTs for mapping something, */
884 if (flags & AMDGPU_PTE_VALID)
885 return -ENOENT;
886
887 /* but unmapping something can happen at a higher
888 * level.
889 */
890 if (!amdgpu_vm_pt_ancestor(&cursor))
891 return -EINVAL;
892
893 pt = cursor.entry->bo;
894 shift = parent_shift;
895 frag_end = max(frag_end, ALIGN(frag_start + 1,
896 1ULL << shift));
897 }
898
899 /* Looks good so far, calculate parameters for the update */
900 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
901 mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
902 pe_start = ((cursor.pfn >> shift) & mask) * 8;
903 entry_end = ((uint64_t)mask + 1) << shift;
904 entry_end += cursor.pfn & ~(entry_end - 1);
905 entry_end = min(entry_end, end);
906
907 do {
908 struct amdgpu_vm *vm = params->vm;
909 uint64_t upd_end = min(entry_end, frag_end);
910 unsigned int nptes = (upd_end - frag_start) >> shift;
911 uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
912
913 /* This can happen when we set higher level PDs to
914 * silent to stop fault floods.
915 */
916 nptes = max(nptes, 1u);
917
918 trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
919 min(nptes, 32u), dst, incr,
920 upd_flags,
921 vm->task_info ? vm->task_info->tgid : 0,
922 vm->immediate.fence_context);
923 amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
924 cursor.level, pe_start, dst,
925 nptes, incr, upd_flags);
926
927 pe_start += nptes * 8;
928 dst += nptes * incr;
929
930 frag_start = upd_end;
931 if (frag_start >= frag_end) {
932 /* figure out the next fragment */
933 amdgpu_vm_pte_fragment(params, frag_start, end,
934 flags, &frag, &frag_end);
935 if (frag < shift)
936 break;
937 }
938 } while (frag_start < entry_end);
939
940 if (amdgpu_vm_pt_descendant(adev, &cursor)) {
941 /* Free all child entries.
942 * Update the tables with the flags and addresses and free up subsequent
943 * tables in the case of huge pages or freed up areas.
944 * This is the maximum you can free, because all other page tables are not
945 * completely covered by the range and so potentially still in use.
946 */
947 while (cursor.pfn < frag_start) {
948 /* Make sure previous mapping is freed */
949 if (cursor.entry->bo) {
950 params->needs_flush = true;
951 amdgpu_vm_pt_add_list(params, &cursor);
952 }
953 amdgpu_vm_pt_next(adev, &cursor);
954 }
955
956 } else if (frag >= shift) {
957 /* or just move on to the next on the same level. */
958 amdgpu_vm_pt_next(adev, &cursor);
959 }
960 }
961
962 return 0;
963 }
964
965 /**
966 * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
967 * @adev: amdgpu device structure
968 * @vm: amdgpu vm structure
969 *
970 * make root page directory and everything below it cpu accessible.
971 */
amdgpu_vm_pt_map_tables(struct amdgpu_device * adev,struct amdgpu_vm * vm)972 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
973 {
974 struct amdgpu_vm_pt_cursor cursor;
975 struct amdgpu_vm_bo_base *entry;
976
977 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
978
979 struct amdgpu_bo_vm *bo;
980 int r;
981
982 if (entry->bo) {
983 bo = to_amdgpu_bo_vm(entry->bo);
984 r = vm->update_funcs->map_table(bo);
985 if (r)
986 return r;
987 }
988 }
989
990 return 0;
991 }
992