1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <drm/drm_drv.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_trace.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_job.h"
30
31 /*
32 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
33 */
34 struct amdgpu_vm_pt_cursor {
35 uint64_t pfn;
36 struct amdgpu_vm_bo_base *parent;
37 struct amdgpu_vm_bo_base *entry;
38 unsigned int level;
39 };
40
41 /**
42 * amdgpu_vm_pt_level_shift - return the addr shift for each level
43 *
44 * @adev: amdgpu_device pointer
45 * @level: VMPT level
46 *
47 * Returns:
48 * The number of bits the pfn needs to be right shifted for a level.
49 */
amdgpu_vm_pt_level_shift(struct amdgpu_device * adev,unsigned int level)50 static unsigned int amdgpu_vm_pt_level_shift(struct amdgpu_device *adev,
51 unsigned int level)
52 {
53 switch (level) {
54 case AMDGPU_VM_PDB2:
55 case AMDGPU_VM_PDB1:
56 case AMDGPU_VM_PDB0:
57 return 9 * (AMDGPU_VM_PDB0 - level) +
58 adev->vm_manager.block_size;
59 case AMDGPU_VM_PTB:
60 return 0;
61 default:
62 return ~0;
63 }
64 }
65
66 /**
67 * amdgpu_vm_pt_num_entries - return the number of entries in a PD/PT
68 *
69 * @adev: amdgpu_device pointer
70 * @level: VMPT level
71 *
72 * Returns:
73 * The number of entries in a page directory or page table.
74 */
amdgpu_vm_pt_num_entries(struct amdgpu_device * adev,unsigned int level)75 static unsigned int amdgpu_vm_pt_num_entries(struct amdgpu_device *adev,
76 unsigned int level)
77 {
78 unsigned int shift;
79
80 shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
81 if (level == adev->vm_manager.root_level)
82 /* For the root directory */
83 return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
84 >> shift;
85 else if (level != AMDGPU_VM_PTB)
86 /* Everything in between */
87 return 512;
88
89 /* For the page tables on the leaves */
90 return AMDGPU_VM_PTE_COUNT(adev);
91 }
92
93 /**
94 * amdgpu_vm_pt_entries_mask - the mask to get the entry number of a PD/PT
95 *
96 * @adev: amdgpu_device pointer
97 * @level: VMPT level
98 *
99 * Returns:
100 * The mask to extract the entry number of a PD/PT from an address.
101 */
amdgpu_vm_pt_entries_mask(struct amdgpu_device * adev,unsigned int level)102 static uint32_t amdgpu_vm_pt_entries_mask(struct amdgpu_device *adev,
103 unsigned int level)
104 {
105 if (level <= adev->vm_manager.root_level)
106 return 0xffffffff;
107 else if (level != AMDGPU_VM_PTB)
108 return 0x1ff;
109 else
110 return AMDGPU_VM_PTE_COUNT(adev) - 1;
111 }
112
113 /**
114 * amdgpu_vm_pt_size - returns the size of the page table in bytes
115 *
116 * @adev: amdgpu_device pointer
117 * @level: VMPT level
118 *
119 * Returns:
120 * The size of the BO for a page directory or page table in bytes.
121 */
amdgpu_vm_pt_size(struct amdgpu_device * adev,unsigned int level)122 static unsigned int amdgpu_vm_pt_size(struct amdgpu_device *adev,
123 unsigned int level)
124 {
125 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_pt_num_entries(adev, level) * 8);
126 }
127
128 /**
129 * amdgpu_vm_pt_parent - get the parent page directory
130 *
131 * @pt: child page table
132 *
133 * Helper to get the parent entry for the child page table. NULL if we are at
134 * the root page directory.
135 */
136 static struct amdgpu_vm_bo_base *
amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base * pt)137 amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
138 {
139 struct amdgpu_bo *parent = pt->bo->parent;
140
141 if (!parent)
142 return NULL;
143
144 return parent->vm_bo;
145 }
146
147 /**
148 * amdgpu_vm_pt_start - start PD/PT walk
149 *
150 * @adev: amdgpu_device pointer
151 * @vm: amdgpu_vm structure
152 * @start: start address of the walk
153 * @cursor: state to initialize
154 *
155 * Initialize a amdgpu_vm_pt_cursor to start a walk.
156 */
amdgpu_vm_pt_start(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,struct amdgpu_vm_pt_cursor * cursor)157 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
158 struct amdgpu_vm *vm, uint64_t start,
159 struct amdgpu_vm_pt_cursor *cursor)
160 {
161 cursor->pfn = start;
162 cursor->parent = NULL;
163 cursor->entry = &vm->root;
164 cursor->level = adev->vm_manager.root_level;
165 }
166
167 /**
168 * amdgpu_vm_pt_descendant - go to child node
169 *
170 * @adev: amdgpu_device pointer
171 * @cursor: current state
172 *
173 * Walk to the child node of the current node.
174 * Returns:
175 * True if the walk was possible, false otherwise.
176 */
amdgpu_vm_pt_descendant(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)177 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
178 struct amdgpu_vm_pt_cursor *cursor)
179 {
180 unsigned int mask, shift, idx;
181
182 if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
183 !cursor->entry->bo)
184 return false;
185
186 mask = amdgpu_vm_pt_entries_mask(adev, cursor->level);
187 shift = amdgpu_vm_pt_level_shift(adev, cursor->level);
188
189 ++cursor->level;
190 idx = (cursor->pfn >> shift) & mask;
191 cursor->parent = cursor->entry;
192 cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
193 return true;
194 }
195
196 /**
197 * amdgpu_vm_pt_sibling - go to sibling node
198 *
199 * @adev: amdgpu_device pointer
200 * @cursor: current state
201 *
202 * Walk to the sibling node of the current node.
203 * Returns:
204 * True if the walk was possible, false otherwise.
205 */
amdgpu_vm_pt_sibling(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)206 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
207 struct amdgpu_vm_pt_cursor *cursor)
208 {
209
210 unsigned int shift, num_entries;
211 struct amdgpu_bo_vm *parent;
212
213 /* Root doesn't have a sibling */
214 if (!cursor->parent)
215 return false;
216
217 /* Go to our parents and see if we got a sibling */
218 shift = amdgpu_vm_pt_level_shift(adev, cursor->level - 1);
219 num_entries = amdgpu_vm_pt_num_entries(adev, cursor->level - 1);
220 parent = to_amdgpu_bo_vm(cursor->parent->bo);
221
222 if (cursor->entry == &parent->entries[num_entries - 1])
223 return false;
224
225 cursor->pfn += 1ULL << shift;
226 cursor->pfn &= ~((1ULL << shift) - 1);
227 ++cursor->entry;
228 return true;
229 }
230
231 /**
232 * amdgpu_vm_pt_ancestor - go to parent node
233 *
234 * @cursor: current state
235 *
236 * Walk to the parent node of the current node.
237 * Returns:
238 * True if the walk was possible, false otherwise.
239 */
amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor * cursor)240 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
241 {
242 if (!cursor->parent)
243 return false;
244
245 --cursor->level;
246 cursor->entry = cursor->parent;
247 cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
248 return true;
249 }
250
251 /**
252 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
253 *
254 * @adev: amdgpu_device pointer
255 * @cursor: current state
256 *
257 * Walk the PD/PT tree to the next node.
258 */
amdgpu_vm_pt_next(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)259 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
260 struct amdgpu_vm_pt_cursor *cursor)
261 {
262 /* First try a newborn child */
263 if (amdgpu_vm_pt_descendant(adev, cursor))
264 return;
265
266 /* If that didn't worked try to find a sibling */
267 while (!amdgpu_vm_pt_sibling(adev, cursor)) {
268 /* No sibling, go to our parents and grandparents */
269 if (!amdgpu_vm_pt_ancestor(cursor)) {
270 cursor->pfn = ~0ll;
271 return;
272 }
273 }
274 }
275
276 /**
277 * amdgpu_vm_pt_first_dfs - start a deep first search
278 *
279 * @adev: amdgpu_device structure
280 * @vm: amdgpu_vm structure
281 * @start: optional cursor to start with
282 * @cursor: state to initialize
283 *
284 * Starts a deep first traversal of the PD/PT tree.
285 */
amdgpu_vm_pt_first_dfs(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_pt_cursor * cursor)286 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
287 struct amdgpu_vm *vm,
288 struct amdgpu_vm_pt_cursor *start,
289 struct amdgpu_vm_pt_cursor *cursor)
290 {
291 if (start)
292 *cursor = *start;
293 else
294 amdgpu_vm_pt_start(adev, vm, 0, cursor);
295
296 while (amdgpu_vm_pt_descendant(adev, cursor))
297 ;
298 }
299
300 /**
301 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
302 *
303 * @start: starting point for the search
304 * @entry: current entry
305 *
306 * Returns:
307 * True when the search should continue, false otherwise.
308 */
amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_bo_base * entry)309 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
310 struct amdgpu_vm_bo_base *entry)
311 {
312 return entry && (!start || entry != start->entry);
313 }
314
315 /**
316 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
317 *
318 * @adev: amdgpu_device structure
319 * @cursor: current state
320 *
321 * Move the cursor to the next node in a deep first search.
322 */
amdgpu_vm_pt_next_dfs(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)323 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
324 struct amdgpu_vm_pt_cursor *cursor)
325 {
326 if (!cursor->entry)
327 return;
328
329 if (!cursor->parent)
330 cursor->entry = NULL;
331 else if (amdgpu_vm_pt_sibling(adev, cursor))
332 while (amdgpu_vm_pt_descendant(adev, cursor))
333 ;
334 else
335 amdgpu_vm_pt_ancestor(cursor);
336 }
337
338 /*
339 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
340 */
341 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
342 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
343 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
344 amdgpu_vm_pt_continue_dfs((start), (entry)); \
345 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
346
347 /**
348 * amdgpu_vm_pt_clear - initially clear the PDs/PTs
349 *
350 * @adev: amdgpu_device pointer
351 * @vm: VM to clear BO from
352 * @vmbo: BO to clear
353 * @immediate: use an immediate update
354 *
355 * Root PD needs to be reserved when calling this.
356 *
357 * Returns:
358 * 0 on success, errno otherwise.
359 */
amdgpu_vm_pt_clear(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_vm * vmbo,bool immediate)360 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
361 struct amdgpu_bo_vm *vmbo, bool immediate)
362 {
363 unsigned int level = adev->vm_manager.root_level;
364 struct ttm_operation_ctx ctx = { true, false };
365 struct amdgpu_vm_update_params params;
366 struct amdgpu_bo *ancestor = &vmbo->bo;
367 unsigned int entries;
368 struct amdgpu_bo *bo = &vmbo->bo;
369 uint64_t addr;
370 int r, idx;
371
372 /* Figure out our place in the hierarchy */
373 if (ancestor->parent) {
374 ++level;
375 while (ancestor->parent->parent) {
376 ++level;
377 ancestor = ancestor->parent;
378 }
379 }
380
381 entries = amdgpu_bo_size(bo) / 8;
382
383 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
384 if (r)
385 return r;
386
387 if (!drm_dev_enter(adev_to_drm(adev), &idx))
388 return -ENODEV;
389
390 r = vm->update_funcs->map_table(vmbo);
391 if (r)
392 goto exit;
393
394 memset(¶ms, 0, sizeof(params));
395 params.adev = adev;
396 params.vm = vm;
397 params.immediate = immediate;
398
399 r = vm->update_funcs->prepare(¶ms, NULL,
400 AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR);
401 if (r)
402 goto exit;
403
404 addr = 0;
405
406 uint64_t value = 0, flags = 0;
407 if (adev->asic_type >= CHIP_VEGA10) {
408 if (level != AMDGPU_VM_PTB) {
409 /* Handle leaf PDEs as PTEs */
410 flags |= AMDGPU_PDE_PTE_FLAG(adev);
411 amdgpu_gmc_get_vm_pde(adev, level,
412 &value, &flags);
413 } else {
414 /* Workaround for fault priority problem on GMC9 */
415 flags = AMDGPU_PTE_EXECUTABLE;
416 }
417 }
418
419 r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries,
420 value, flags);
421 if (r)
422 goto exit;
423
424 r = vm->update_funcs->commit(¶ms, NULL);
425 exit:
426 drm_dev_exit(idx);
427 return r;
428 }
429
430 /**
431 * amdgpu_vm_pt_create - create bo for PD/PT
432 *
433 * @adev: amdgpu_device pointer
434 * @vm: requesting vm
435 * @level: the page table level
436 * @immediate: use a immediate update
437 * @vmbo: pointer to the buffer object pointer
438 * @xcp_id: GPU partition id
439 */
amdgpu_vm_pt_create(struct amdgpu_device * adev,struct amdgpu_vm * vm,int level,bool immediate,struct amdgpu_bo_vm ** vmbo,int32_t xcp_id)440 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
441 int level, bool immediate, struct amdgpu_bo_vm **vmbo,
442 int32_t xcp_id)
443 {
444 struct amdgpu_bo_param bp;
445 unsigned int num_entries;
446
447 memset(&bp, 0, sizeof(bp));
448
449 bp.size = amdgpu_vm_pt_size(adev, level);
450 bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
451
452 if (!adev->gmc.is_app_apu)
453 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
454 else
455 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
456
457 bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
458 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
459 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
460
461 if (level < AMDGPU_VM_PTB)
462 num_entries = amdgpu_vm_pt_num_entries(adev, level);
463 else
464 num_entries = 0;
465
466 bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
467
468 if (vm->use_cpu_for_update)
469 bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
470
471 bp.type = ttm_bo_type_kernel;
472 bp.no_wait_gpu = immediate;
473 bp.xcp_id_plus1 = xcp_id + 1;
474
475 if (vm->root.bo)
476 bp.resv = vm->root.bo->tbo.base.resv;
477
478 return amdgpu_bo_create_vm(adev, &bp, vmbo);
479 }
480
481 /**
482 * amdgpu_vm_pt_alloc - Allocate a specific page table
483 *
484 * @adev: amdgpu_device pointer
485 * @vm: VM to allocate page tables for
486 * @cursor: Which page table to allocate
487 * @immediate: use an immediate update
488 *
489 * Make sure a specific page table or directory is allocated.
490 *
491 * Returns:
492 * 1 if page table needed to be allocated, 0 if page table was already
493 * allocated, negative errno if an error occurred.
494 */
amdgpu_vm_pt_alloc(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * cursor,bool immediate)495 static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
496 struct amdgpu_vm *vm,
497 struct amdgpu_vm_pt_cursor *cursor,
498 bool immediate)
499 {
500 struct amdgpu_vm_bo_base *entry = cursor->entry;
501 struct amdgpu_bo *pt_bo;
502 struct amdgpu_bo_vm *pt;
503 int r;
504
505 if (entry->bo)
506 return 0;
507
508 amdgpu_vm_eviction_unlock(vm);
509 r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
510 vm->root.bo->xcp_id);
511 amdgpu_vm_eviction_lock(vm);
512 if (r)
513 return r;
514
515 /* Keep a reference to the root directory to avoid
516 * freeing them up in the wrong order.
517 */
518 pt_bo = &pt->bo;
519 pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
520 amdgpu_vm_bo_base_init(entry, vm, pt_bo);
521 r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
522 if (r)
523 goto error_free_pt;
524
525 return 0;
526
527 error_free_pt:
528 amdgpu_bo_unref(&pt_bo);
529 return r;
530 }
531
532 /**
533 * amdgpu_vm_pt_free - free one PD/PT
534 *
535 * @entry: PDE to free
536 */
amdgpu_vm_pt_free(struct amdgpu_vm_bo_base * entry)537 static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
538 {
539 if (!entry->bo)
540 return;
541
542 amdgpu_vm_update_stats(entry, entry->bo->tbo.resource, -1);
543 entry->bo->vm_bo = NULL;
544 ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
545
546 spin_lock(&entry->vm->status_lock);
547 list_del(&entry->vm_status);
548 spin_unlock(&entry->vm->status_lock);
549 amdgpu_bo_unref(&entry->bo);
550 }
551
552 /**
553 * amdgpu_vm_pt_free_list - free PD/PT levels
554 *
555 * @adev: amdgpu device structure
556 * @params: see amdgpu_vm_update_params definition
557 *
558 * Free the page directory objects saved in the flush list
559 */
amdgpu_vm_pt_free_list(struct amdgpu_device * adev,struct amdgpu_vm_update_params * params)560 void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
561 struct amdgpu_vm_update_params *params)
562 {
563 struct amdgpu_vm_bo_base *entry, *next;
564 bool unlocked = params->unlocked;
565
566 if (list_empty(¶ms->tlb_flush_waitlist))
567 return;
568
569 /*
570 * unlocked unmap clear page table leaves, warning to free the page entry.
571 */
572 WARN_ON(unlocked);
573
574 list_for_each_entry_safe(entry, next, ¶ms->tlb_flush_waitlist, vm_status)
575 amdgpu_vm_pt_free(entry);
576 }
577
578 /**
579 * amdgpu_vm_pt_add_list - add PD/PT level to the flush list
580 *
581 * @params: parameters for the update
582 * @cursor: first PT entry to start DF search from, non NULL
583 *
584 * This list will be freed after TLB flush.
585 */
amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params * params,struct amdgpu_vm_pt_cursor * cursor)586 static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
587 struct amdgpu_vm_pt_cursor *cursor)
588 {
589 struct amdgpu_vm_pt_cursor seek;
590 struct amdgpu_vm_bo_base *entry;
591
592 spin_lock(¶ms->vm->status_lock);
593 for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
594 if (entry && entry->bo)
595 list_move(&entry->vm_status, ¶ms->tlb_flush_waitlist);
596 }
597
598 /* enter start node now */
599 list_move(&cursor->entry->vm_status, ¶ms->tlb_flush_waitlist);
600 spin_unlock(¶ms->vm->status_lock);
601 }
602
603 /**
604 * amdgpu_vm_pt_free_root - free root PD
605 * @adev: amdgpu device structure
606 * @vm: amdgpu vm structure
607 *
608 * Free the root page directory and everything below it.
609 */
amdgpu_vm_pt_free_root(struct amdgpu_device * adev,struct amdgpu_vm * vm)610 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
611 {
612 struct amdgpu_vm_pt_cursor cursor;
613 struct amdgpu_vm_bo_base *entry;
614
615 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
616 if (entry)
617 amdgpu_vm_pt_free(entry);
618 }
619 }
620
621 /**
622 * amdgpu_vm_pde_update - update a single level in the hierarchy
623 *
624 * @params: parameters for the update
625 * @entry: entry to update
626 *
627 * Makes sure the requested entry in parent is up to date.
628 */
amdgpu_vm_pde_update(struct amdgpu_vm_update_params * params,struct amdgpu_vm_bo_base * entry)629 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
630 struct amdgpu_vm_bo_base *entry)
631 {
632 struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
633 struct amdgpu_bo *bo, *pbo;
634 struct amdgpu_vm *vm = params->vm;
635 uint64_t pde, pt, flags;
636 unsigned int level;
637
638 if (WARN_ON(!parent))
639 return -EINVAL;
640
641 bo = parent->bo;
642 for (level = 0, pbo = bo->parent; pbo; ++level)
643 pbo = pbo->parent;
644
645 level += params->adev->vm_manager.root_level;
646 amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
647 pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
648 return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
649 1, 0, flags);
650 }
651
652 /**
653 * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags
654 *
655 * @adev: amdgpu_device pointer
656 * @flags: pointer to PTE flags
657 *
658 * Update PTE no-retry flags when TF is enabled.
659 */
amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device * adev,uint64_t * flags)660 static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev,
661 uint64_t *flags)
662 {
663 /*
664 * Update no-retry flags with the corresponding TF
665 * no-retry combination.
666 */
667 if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) {
668 *flags &= ~AMDGPU_VM_NORETRY_FLAGS;
669 *flags |= adev->gmc.noretry_flags;
670 }
671 }
672
673 /*
674 * amdgpu_vm_pte_update_flags - figure out flags for PTE updates
675 *
676 * Make sure to set the right flags for the PTEs at the desired level.
677 */
amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params * params,struct amdgpu_bo_vm * pt,unsigned int level,uint64_t pe,uint64_t addr,unsigned int count,uint32_t incr,uint64_t flags)678 static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
679 struct amdgpu_bo_vm *pt,
680 unsigned int level,
681 uint64_t pe, uint64_t addr,
682 unsigned int count, uint32_t incr,
683 uint64_t flags)
684 {
685 struct amdgpu_device *adev = params->adev;
686
687 if (level != AMDGPU_VM_PTB) {
688 flags |= AMDGPU_PDE_PTE_FLAG(params->adev);
689 amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags);
690
691 } else if (adev->asic_type >= CHIP_VEGA10 &&
692 !(flags & AMDGPU_PTE_VALID) &&
693 !(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) {
694
695 /* Workaround for fault priority problem on GMC9 */
696 flags |= AMDGPU_PTE_EXECUTABLE;
697 }
698
699 /*
700 * Update no-retry flags to use the no-retry flag combination
701 * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination
702 * does not work when TF is enabled. So, replace them with
703 * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for
704 * all cases.
705 */
706 if (level == AMDGPU_VM_PTB)
707 amdgpu_vm_pte_update_noretry_flags(adev, &flags);
708
709 /* APUs mapping system memory may need different MTYPEs on different
710 * NUMA nodes. Only do this for contiguous ranges that can be assumed
711 * to be on the same NUMA node.
712 */
713 if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
714 adev->gmc.gmc_funcs->override_vm_pte_flags &&
715 num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
716 amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
717
718 params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
719 flags);
720 }
721
722 /**
723 * amdgpu_vm_pte_fragment - get fragment for PTEs
724 *
725 * @params: see amdgpu_vm_update_params definition
726 * @start: first PTE to handle
727 * @end: last PTE to handle
728 * @flags: hw mapping flags
729 * @frag: resulting fragment size
730 * @frag_end: end of this fragment
731 *
732 * Returns the first possible fragment for the start and end address.
733 */
amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t flags,unsigned int * frag,uint64_t * frag_end)734 static void amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params *params,
735 uint64_t start, uint64_t end, uint64_t flags,
736 unsigned int *frag, uint64_t *frag_end)
737 {
738 /**
739 * The MC L1 TLB supports variable sized pages, based on a fragment
740 * field in the PTE. When this field is set to a non-zero value, page
741 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
742 * flags are considered valid for all PTEs within the fragment range
743 * and corresponding mappings are assumed to be physically contiguous.
744 *
745 * The L1 TLB can store a single PTE for the whole fragment,
746 * significantly increasing the space available for translation
747 * caching. This leads to large improvements in throughput when the
748 * TLB is under pressure.
749 *
750 * The L2 TLB distributes small and large fragments into two
751 * asymmetric partitions. The large fragment cache is significantly
752 * larger. Thus, we try to use large fragments wherever possible.
753 * Userspace can support this by aligning virtual base address and
754 * allocation size to the fragment size.
755 *
756 * Starting with Vega10 the fragment size only controls the L1. The L2
757 * is now directly feed with small/huge/giant pages from the walker.
758 */
759 unsigned int max_frag;
760
761 if (params->adev->asic_type < CHIP_VEGA10)
762 max_frag = params->adev->vm_manager.fragment_size;
763 else
764 max_frag = 31;
765
766 /* system pages are non continuously */
767 if (params->pages_addr) {
768 *frag = 0;
769 *frag_end = end;
770 return;
771 }
772
773 /* This intentionally wraps around if no bit is set */
774 *frag = min_t(unsigned int, ffs(start) - 1, fls64(end - start) - 1);
775 if (*frag >= max_frag) {
776 *frag = max_frag;
777 *frag_end = end & ~((1ULL << max_frag) - 1);
778 } else {
779 *frag_end = start + (1 << *frag);
780 }
781 }
782
783 /**
784 * amdgpu_vm_ptes_update - make sure that page tables are valid
785 *
786 * @params: see amdgpu_vm_update_params definition
787 * @start: start of GPU address range
788 * @end: end of GPU address range
789 * @dst: destination address to map to, the next dst inside the function
790 * @flags: mapping flags
791 *
792 * Update the page tables in the range @start - @end.
793 *
794 * Returns:
795 * 0 for success, -EINVAL for failure.
796 */
amdgpu_vm_ptes_update(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t dst,uint64_t flags)797 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
798 uint64_t start, uint64_t end,
799 uint64_t dst, uint64_t flags)
800 {
801 struct amdgpu_device *adev = params->adev;
802 struct amdgpu_vm_pt_cursor cursor;
803 uint64_t frag_start = start, frag_end;
804 unsigned int frag;
805 int r;
806
807 /* figure out the initial fragment */
808 amdgpu_vm_pte_fragment(params, frag_start, end, flags, &frag,
809 &frag_end);
810
811 /* walk over the address space and update the PTs */
812 amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
813 while (cursor.pfn < end) {
814 unsigned int shift, parent_shift, mask;
815 uint64_t incr, entry_end, pe_start;
816 struct amdgpu_bo *pt;
817
818 if (!params->unlocked) {
819 /* make sure that the page tables covering the
820 * address range are actually allocated
821 */
822 r = amdgpu_vm_pt_alloc(params->adev, params->vm,
823 &cursor, params->immediate);
824 if (r)
825 return r;
826 }
827
828 shift = amdgpu_vm_pt_level_shift(adev, cursor.level);
829 parent_shift = amdgpu_vm_pt_level_shift(adev, cursor.level - 1);
830 if (params->unlocked) {
831 /* Unlocked updates are only allowed on the leaves */
832 if (amdgpu_vm_pt_descendant(adev, &cursor))
833 continue;
834 } else if (adev->asic_type < CHIP_VEGA10 &&
835 (flags & AMDGPU_PTE_VALID)) {
836 /* No huge page support before GMC v9 */
837 if (cursor.level != AMDGPU_VM_PTB) {
838 if (!amdgpu_vm_pt_descendant(adev, &cursor))
839 return -ENOENT;
840 continue;
841 }
842 } else if (frag < shift) {
843 /* We can't use this level when the fragment size is
844 * smaller than the address shift. Go to the next
845 * child entry and try again.
846 */
847 if (amdgpu_vm_pt_descendant(adev, &cursor))
848 continue;
849 } else if (frag >= parent_shift) {
850 /* If the fragment size is even larger than the parent
851 * shift we should go up one level and check it again.
852 */
853 if (!amdgpu_vm_pt_ancestor(&cursor))
854 return -EINVAL;
855 continue;
856 }
857
858 pt = cursor.entry->bo;
859 if (!pt) {
860 /* We need all PDs and PTs for mapping something, */
861 if (flags & AMDGPU_PTE_VALID)
862 return -ENOENT;
863
864 /* but unmapping something can happen at a higher
865 * level.
866 */
867 if (!amdgpu_vm_pt_ancestor(&cursor))
868 return -EINVAL;
869
870 pt = cursor.entry->bo;
871 shift = parent_shift;
872 frag_end = max(frag_end, ALIGN(frag_start + 1,
873 1ULL << shift));
874 }
875
876 /* Looks good so far, calculate parameters for the update */
877 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
878 mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
879 pe_start = ((cursor.pfn >> shift) & mask) * 8;
880
881 if (cursor.level < AMDGPU_VM_PTB && params->unlocked)
882 /*
883 * MMU notifier callback unlocked unmap huge page, leave is PDE entry,
884 * only clear one entry. Next entry search again for PDE or PTE leave.
885 */
886 entry_end = 1ULL << shift;
887 else
888 entry_end = ((uint64_t)mask + 1) << shift;
889 entry_end += cursor.pfn & ~(entry_end - 1);
890 entry_end = min(entry_end, end);
891
892 do {
893 struct amdgpu_vm *vm = params->vm;
894 uint64_t upd_end = min(entry_end, frag_end);
895 unsigned int nptes = (upd_end - frag_start) >> shift;
896 uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
897
898 /* This can happen when we set higher level PDs to
899 * silent to stop fault floods.
900 */
901 nptes = max(nptes, 1u);
902
903 trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
904 min(nptes, 32u), dst, incr,
905 upd_flags,
906 vm->task_info ? vm->task_info->tgid : 0,
907 vm->immediate.fence_context);
908 amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
909 cursor.level, pe_start, dst,
910 nptes, incr, upd_flags);
911
912 pe_start += nptes * 8;
913 dst += nptes * incr;
914
915 frag_start = upd_end;
916 if (frag_start >= frag_end) {
917 /* figure out the next fragment */
918 amdgpu_vm_pte_fragment(params, frag_start, end,
919 flags, &frag, &frag_end);
920 if (frag < shift)
921 break;
922 }
923 } while (frag_start < entry_end);
924
925 if (amdgpu_vm_pt_descendant(adev, &cursor)) {
926 /* Free all child entries.
927 * Update the tables with the flags and addresses and free up subsequent
928 * tables in the case of huge pages or freed up areas.
929 * This is the maximum you can free, because all other page tables are not
930 * completely covered by the range and so potentially still in use.
931 */
932 while (cursor.pfn < frag_start) {
933 /* Make sure previous mapping is freed */
934 if (cursor.entry->bo) {
935 params->needs_flush = true;
936 amdgpu_vm_pt_add_list(params, &cursor);
937 }
938 amdgpu_vm_pt_next(adev, &cursor);
939 }
940
941 } else if (frag >= shift) {
942 /* or just move on to the next on the same level. */
943 amdgpu_vm_pt_next(adev, &cursor);
944 }
945 }
946
947 return 0;
948 }
949
950 /**
951 * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
952 * @adev: amdgpu device structure
953 * @vm: amdgpu vm structure
954 *
955 * make root page directory and everything below it cpu accessible.
956 */
amdgpu_vm_pt_map_tables(struct amdgpu_device * adev,struct amdgpu_vm * vm)957 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
958 {
959 struct amdgpu_vm_pt_cursor cursor;
960 struct amdgpu_vm_bo_base *entry;
961
962 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
963
964 struct amdgpu_bo_vm *bo;
965 int r;
966
967 if (entry->bo) {
968 bo = to_amdgpu_bo_vm(entry->bo);
969 r = vm->update_funcs->map_table(bo);
970 if (r)
971 return r;
972 }
973 }
974
975 return 0;
976 }
977