xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_drv.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_trace.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_job.h"
30 
31 /*
32  * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
33  */
34 struct amdgpu_vm_pt_cursor {
35 	uint64_t pfn;
36 	struct amdgpu_vm_bo_base *parent;
37 	struct amdgpu_vm_bo_base *entry;
38 	unsigned int level;
39 };
40 
41 /**
42  * amdgpu_vm_pt_level_shift - return the addr shift for each level
43  *
44  * @adev: amdgpu_device pointer
45  * @level: VMPT level
46  *
47  * Returns:
48  * The number of bits the pfn needs to be right shifted for a level.
49  */
50 static unsigned int amdgpu_vm_pt_level_shift(struct amdgpu_device *adev,
51 					     unsigned int level)
52 {
53 	switch (level) {
54 	case AMDGPU_VM_PDB2:
55 	case AMDGPU_VM_PDB1:
56 	case AMDGPU_VM_PDB0:
57 		return 9 * (AMDGPU_VM_PDB0 - level) +
58 			adev->vm_manager.block_size;
59 	case AMDGPU_VM_PTB:
60 		return 0;
61 	default:
62 		return ~0;
63 	}
64 }
65 
66 /**
67  * amdgpu_vm_pt_num_entries - return the number of entries in a PD/PT
68  *
69  * @adev: amdgpu_device pointer
70  * @level: VMPT level
71  *
72  * Returns:
73  * The number of entries in a page directory or page table.
74  */
75 static unsigned int amdgpu_vm_pt_num_entries(struct amdgpu_device *adev,
76 					     unsigned int level)
77 {
78 	unsigned int shift;
79 
80 	shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
81 	if (level == adev->vm_manager.root_level)
82 		/* For the root directory */
83 		return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
84 			>> shift;
85 	else if (level != AMDGPU_VM_PTB)
86 		/* Everything in between */
87 		return 512;
88 
89 	/* For the page tables on the leaves */
90 	return AMDGPU_VM_PTE_COUNT(adev);
91 }
92 
93 /**
94  * amdgpu_vm_pt_entries_mask - the mask to get the entry number of a PD/PT
95  *
96  * @adev: amdgpu_device pointer
97  * @level: VMPT level
98  *
99  * Returns:
100  * The mask to extract the entry number of a PD/PT from an address.
101  */
102 static uint32_t amdgpu_vm_pt_entries_mask(struct amdgpu_device *adev,
103 					  unsigned int level)
104 {
105 	if (level <= adev->vm_manager.root_level)
106 		return 0xffffffff;
107 	else if (level != AMDGPU_VM_PTB)
108 		return 0x1ff;
109 	else
110 		return AMDGPU_VM_PTE_COUNT(adev) - 1;
111 }
112 
113 /**
114  * amdgpu_vm_pt_size - returns the size of the page table in bytes
115  *
116  * @adev: amdgpu_device pointer
117  * @level: VMPT level
118  *
119  * Returns:
120  * The size of the BO for a page directory or page table in bytes.
121  */
122 static unsigned int amdgpu_vm_pt_size(struct amdgpu_device *adev,
123 				      unsigned int level)
124 {
125 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_pt_num_entries(adev, level) * 8);
126 }
127 
128 /**
129  * amdgpu_vm_pt_parent - get the parent page directory
130  *
131  * @pt: child page table
132  *
133  * Helper to get the parent entry for the child page table. NULL if we are at
134  * the root page directory.
135  */
136 static struct amdgpu_vm_bo_base *
137 amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
138 {
139 	struct amdgpu_bo *parent = pt->bo->parent;
140 
141 	if (!parent)
142 		return NULL;
143 
144 	return parent->vm_bo;
145 }
146 
147 /**
148  * amdgpu_vm_pt_start - start PD/PT walk
149  *
150  * @adev: amdgpu_device pointer
151  * @vm: amdgpu_vm structure
152  * @start: start address of the walk
153  * @cursor: state to initialize
154  *
155  * Initialize a amdgpu_vm_pt_cursor to start a walk.
156  */
157 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
158 			       struct amdgpu_vm *vm, uint64_t start,
159 			       struct amdgpu_vm_pt_cursor *cursor)
160 {
161 	cursor->pfn = start;
162 	cursor->parent = NULL;
163 	cursor->entry = &vm->root;
164 	cursor->level = adev->vm_manager.root_level;
165 }
166 
167 /**
168  * amdgpu_vm_pt_descendant - go to child node
169  *
170  * @adev: amdgpu_device pointer
171  * @cursor: current state
172  *
173  * Walk to the child node of the current node.
174  * Returns:
175  * True if the walk was possible, false otherwise.
176  */
177 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
178 				    struct amdgpu_vm_pt_cursor *cursor)
179 {
180 	unsigned int mask, shift, idx;
181 
182 	if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
183 	    !cursor->entry->bo)
184 		return false;
185 
186 	mask = amdgpu_vm_pt_entries_mask(adev, cursor->level);
187 	shift = amdgpu_vm_pt_level_shift(adev, cursor->level);
188 
189 	++cursor->level;
190 	idx = (cursor->pfn >> shift) & mask;
191 	cursor->parent = cursor->entry;
192 	cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
193 	return true;
194 }
195 
196 /**
197  * amdgpu_vm_pt_sibling - go to sibling node
198  *
199  * @adev: amdgpu_device pointer
200  * @cursor: current state
201  *
202  * Walk to the sibling node of the current node.
203  * Returns:
204  * True if the walk was possible, false otherwise.
205  */
206 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
207 				 struct amdgpu_vm_pt_cursor *cursor)
208 {
209 
210 	unsigned int shift, num_entries;
211 	struct amdgpu_bo_vm *parent;
212 
213 	/* Root doesn't have a sibling */
214 	if (!cursor->parent)
215 		return false;
216 
217 	/* Go to our parents and see if we got a sibling */
218 	shift = amdgpu_vm_pt_level_shift(adev, cursor->level - 1);
219 	num_entries = amdgpu_vm_pt_num_entries(adev, cursor->level - 1);
220 	parent = to_amdgpu_bo_vm(cursor->parent->bo);
221 
222 	if (cursor->entry == &parent->entries[num_entries - 1])
223 		return false;
224 
225 	cursor->pfn += 1ULL << shift;
226 	cursor->pfn &= ~((1ULL << shift) - 1);
227 	++cursor->entry;
228 	return true;
229 }
230 
231 /**
232  * amdgpu_vm_pt_ancestor - go to parent node
233  *
234  * @cursor: current state
235  *
236  * Walk to the parent node of the current node.
237  * Returns:
238  * True if the walk was possible, false otherwise.
239  */
240 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
241 {
242 	if (!cursor->parent)
243 		return false;
244 
245 	--cursor->level;
246 	cursor->entry = cursor->parent;
247 	cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
248 	return true;
249 }
250 
251 /**
252  * amdgpu_vm_pt_next - get next PD/PT in hieratchy
253  *
254  * @adev: amdgpu_device pointer
255  * @cursor: current state
256  *
257  * Walk the PD/PT tree to the next node.
258  */
259 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
260 			      struct amdgpu_vm_pt_cursor *cursor)
261 {
262 	/* First try a newborn child */
263 	if (amdgpu_vm_pt_descendant(adev, cursor))
264 		return;
265 
266 	/* If that didn't worked try to find a sibling */
267 	while (!amdgpu_vm_pt_sibling(adev, cursor)) {
268 		/* No sibling, go to our parents and grandparents */
269 		if (!amdgpu_vm_pt_ancestor(cursor)) {
270 			cursor->pfn = ~0ll;
271 			return;
272 		}
273 	}
274 }
275 
276 /**
277  * amdgpu_vm_pt_first_dfs - start a deep first search
278  *
279  * @adev: amdgpu_device structure
280  * @vm: amdgpu_vm structure
281  * @start: optional cursor to start with
282  * @cursor: state to initialize
283  *
284  * Starts a deep first traversal of the PD/PT tree.
285  */
286 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
287 				   struct amdgpu_vm *vm,
288 				   struct amdgpu_vm_pt_cursor *start,
289 				   struct amdgpu_vm_pt_cursor *cursor)
290 {
291 	if (start)
292 		*cursor = *start;
293 	else
294 		amdgpu_vm_pt_start(adev, vm, 0, cursor);
295 
296 	while (amdgpu_vm_pt_descendant(adev, cursor))
297 		;
298 }
299 
300 /**
301  * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
302  *
303  * @start: starting point for the search
304  * @entry: current entry
305  *
306  * Returns:
307  * True when the search should continue, false otherwise.
308  */
309 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
310 				      struct amdgpu_vm_bo_base *entry)
311 {
312 	return entry && (!start || entry != start->entry);
313 }
314 
315 /**
316  * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
317  *
318  * @adev: amdgpu_device structure
319  * @cursor: current state
320  *
321  * Move the cursor to the next node in a deep first search.
322  */
323 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
324 				  struct amdgpu_vm_pt_cursor *cursor)
325 {
326 	if (!cursor->entry)
327 		return;
328 
329 	if (!cursor->parent)
330 		cursor->entry = NULL;
331 	else if (amdgpu_vm_pt_sibling(adev, cursor))
332 		while (amdgpu_vm_pt_descendant(adev, cursor))
333 			;
334 	else
335 		amdgpu_vm_pt_ancestor(cursor);
336 }
337 
338 /*
339  * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
340  */
341 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)		\
342 	for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)),		\
343 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
344 	     amdgpu_vm_pt_continue_dfs((start), (entry));			\
345 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
346 
347 /**
348  * amdgpu_vm_pt_clear - initially clear the PDs/PTs
349  *
350  * @adev: amdgpu_device pointer
351  * @vm: VM to clear BO from
352  * @vmbo: BO to clear
353  * @immediate: use an immediate update
354  *
355  * Root PD needs to be reserved when calling this.
356  *
357  * Returns:
358  * 0 on success, errno otherwise.
359  */
360 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
361 		       struct amdgpu_bo_vm *vmbo, bool immediate)
362 {
363 	unsigned int level = adev->vm_manager.root_level;
364 	struct ttm_operation_ctx ctx = { true, false };
365 	struct amdgpu_vm_update_params params;
366 	struct amdgpu_bo *ancestor = &vmbo->bo;
367 	unsigned int entries;
368 	struct amdgpu_bo *bo = &vmbo->bo;
369 	uint64_t addr;
370 	int r, idx;
371 
372 	/* Figure out our place in the hierarchy */
373 	if (ancestor->parent) {
374 		++level;
375 		while (ancestor->parent->parent) {
376 			++level;
377 			ancestor = ancestor->parent;
378 		}
379 	}
380 
381 	entries = amdgpu_bo_size(bo) / 8;
382 
383 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
384 	if (r)
385 		return r;
386 
387 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
388 		return -ENODEV;
389 
390 	r = vm->update_funcs->map_table(vmbo);
391 	if (r)
392 		goto exit;
393 
394 	memset(&params, 0, sizeof(params));
395 	params.adev = adev;
396 	params.vm = vm;
397 	params.immediate = immediate;
398 
399 	r = vm->update_funcs->prepare(&params, NULL,
400 				      AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR);
401 	if (r)
402 		goto exit;
403 
404 	addr = 0;
405 
406 	uint64_t value = 0, flags = 0;
407 	if (adev->asic_type >= CHIP_VEGA10) {
408 		if (level != AMDGPU_VM_PTB) {
409 			/* Handle leaf PDEs as PTEs */
410 			flags |= AMDGPU_PDE_PTE_FLAG(adev);
411 			amdgpu_gmc_get_vm_pde(adev, level,
412 					      &value, &flags);
413 		} else {
414 			/* Workaround for fault priority problem on GMC9 */
415 			flags = AMDGPU_PTE_EXECUTABLE;
416 		}
417 	}
418 
419 	r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
420 				     value, flags);
421 	if (r)
422 		goto exit;
423 
424 	r = vm->update_funcs->commit(&params, NULL);
425 exit:
426 	drm_dev_exit(idx);
427 	return r;
428 }
429 
430 /**
431  * amdgpu_vm_pt_create - create bo for PD/PT
432  *
433  * @adev: amdgpu_device pointer
434  * @vm: requesting vm
435  * @level: the page table level
436  * @immediate: use a immediate update
437  * @vmbo: pointer to the buffer object pointer
438  * @xcp_id: GPU partition id
439  */
440 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
441 			int level, bool immediate, struct amdgpu_bo_vm **vmbo,
442 			int32_t xcp_id)
443 {
444 	struct amdgpu_bo_param bp;
445 	unsigned int num_entries;
446 
447 	memset(&bp, 0, sizeof(bp));
448 
449 	bp.size = amdgpu_vm_pt_size(adev, level);
450 	bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
451 
452 	if (!adev->gmc.is_app_apu)
453 		bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
454 	else
455 		bp.domain = AMDGPU_GEM_DOMAIN_GTT;
456 
457 	bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
458 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
459 		AMDGPU_GEM_CREATE_CPU_GTT_USWC;
460 
461 	if (level < AMDGPU_VM_PTB)
462 		num_entries = amdgpu_vm_pt_num_entries(adev, level);
463 	else
464 		num_entries = 0;
465 
466 	bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
467 
468 	if (vm->use_cpu_for_update)
469 		bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
470 
471 	bp.type = ttm_bo_type_kernel;
472 	bp.no_wait_gpu = immediate;
473 	bp.xcp_id_plus1 = xcp_id + 1;
474 
475 	if (vm->root.bo)
476 		bp.resv = vm->root.bo->tbo.base.resv;
477 
478 	return amdgpu_bo_create_vm(adev, &bp, vmbo);
479 }
480 
481 /**
482  * amdgpu_vm_pt_alloc - Allocate a specific page table
483  *
484  * @adev: amdgpu_device pointer
485  * @vm: VM to allocate page tables for
486  * @cursor: Which page table to allocate
487  * @immediate: use an immediate update
488  *
489  * Make sure a specific page table or directory is allocated.
490  *
491  * Returns:
492  * 1 if page table needed to be allocated, 0 if page table was already
493  * allocated, negative errno if an error occurred.
494  */
495 static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
496 			      struct amdgpu_vm *vm,
497 			      struct amdgpu_vm_pt_cursor *cursor,
498 			      bool immediate)
499 {
500 	struct amdgpu_vm_bo_base *entry = cursor->entry;
501 	struct amdgpu_bo *pt_bo;
502 	struct amdgpu_bo_vm *pt;
503 	int r;
504 
505 	if (entry->bo)
506 		return 0;
507 
508 	amdgpu_vm_eviction_unlock(vm);
509 	r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
510 				vm->root.bo->xcp_id);
511 	amdgpu_vm_eviction_lock(vm);
512 	if (r)
513 		return r;
514 
515 	/* Keep a reference to the root directory to avoid
516 	 * freeing them up in the wrong order.
517 	 */
518 	pt_bo = &pt->bo;
519 	pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
520 	amdgpu_vm_bo_base_init(entry, vm, pt_bo);
521 	r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
522 	if (r)
523 		goto error_free_pt;
524 
525 	return 0;
526 
527 error_free_pt:
528 	amdgpu_bo_unref(&pt_bo);
529 	return r;
530 }
531 
532 /**
533  * amdgpu_vm_pt_free - free one PD/PT
534  *
535  * @entry: PDE to free
536  */
537 static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
538 {
539 	if (!entry->bo)
540 		return;
541 
542 	amdgpu_vm_update_stats(entry, entry->bo->tbo.resource, -1);
543 	entry->bo->vm_bo = NULL;
544 	ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
545 
546 	list_del(&entry->vm_status);
547 	amdgpu_bo_unref(&entry->bo);
548 }
549 
550 /**
551  * amdgpu_vm_pt_free_list - free PD/PT levels
552  *
553  * @adev: amdgpu device structure
554  * @params: see amdgpu_vm_update_params definition
555  *
556  * Free the page directory objects saved in the flush list
557  */
558 void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
559 			    struct amdgpu_vm_update_params *params)
560 {
561 	struct amdgpu_vm_bo_base *entry, *next;
562 	bool unlocked = params->unlocked;
563 
564 	if (list_empty(&params->tlb_flush_waitlist))
565 		return;
566 
567 	/*
568 	 * unlocked unmap clear page table leaves, warning to free the page entry.
569 	 */
570 	WARN_ON(unlocked);
571 
572 	list_for_each_entry_safe(entry, next, &params->tlb_flush_waitlist, vm_status)
573 		amdgpu_vm_pt_free(entry);
574 }
575 
576 /**
577  * amdgpu_vm_pt_add_list - add PD/PT level to the flush list
578  *
579  * @params: parameters for the update
580  * @cursor: first PT entry to start DF search from, non NULL
581  *
582  * This list will be freed after TLB flush.
583  */
584 static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
585 				  struct amdgpu_vm_pt_cursor *cursor)
586 {
587 	struct amdgpu_vm_pt_cursor seek;
588 	struct amdgpu_vm_bo_base *entry;
589 
590 	for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
591 		if (entry && entry->bo)
592 			list_move(&entry->vm_status, &params->tlb_flush_waitlist);
593 	}
594 
595 	/* enter start node now */
596 	list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist);
597 }
598 
599 /**
600  * amdgpu_vm_pt_free_root - free root PD
601  * @adev: amdgpu device structure
602  * @vm: amdgpu vm structure
603  *
604  * Free the root page directory and everything below it.
605  */
606 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
607 {
608 	struct amdgpu_vm_pt_cursor cursor;
609 	struct amdgpu_vm_bo_base *entry;
610 
611 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
612 		if (entry)
613 			amdgpu_vm_pt_free(entry);
614 	}
615 }
616 
617 /**
618  * amdgpu_vm_pde_update - update a single level in the hierarchy
619  *
620  * @params: parameters for the update
621  * @entry: entry to update
622  *
623  * Makes sure the requested entry in parent is up to date.
624  */
625 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
626 			 struct amdgpu_vm_bo_base *entry)
627 {
628 	struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
629 	struct amdgpu_bo *bo, *pbo;
630 	struct amdgpu_vm *vm = params->vm;
631 	uint64_t pde, pt, flags;
632 	unsigned int level;
633 
634 	if (WARN_ON(!parent))
635 		return -EINVAL;
636 
637 	bo = parent->bo;
638 	for (level = 0, pbo = bo->parent; pbo; ++level)
639 		pbo = pbo->parent;
640 
641 	level += params->adev->vm_manager.root_level;
642 	amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
643 	pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
644 	return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
645 					1, 0, flags);
646 }
647 
648 /**
649  * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags
650  *
651  * @adev: amdgpu_device pointer
652  * @flags: pointer to PTE flags
653  *
654  * Update PTE no-retry flags when TF is enabled.
655  */
656 static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev,
657 						uint64_t *flags)
658 {
659 	/*
660 	 * Update no-retry flags with the corresponding TF
661 	 * no-retry combination.
662 	 */
663 	if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) {
664 		*flags &= ~AMDGPU_VM_NORETRY_FLAGS;
665 		*flags |= adev->gmc.noretry_flags;
666 	}
667 }
668 
669 /*
670  * amdgpu_vm_pte_update_flags - figure out flags for PTE updates
671  *
672  * Make sure to set the right flags for the PTEs at the desired level.
673  */
674 static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
675 				       struct amdgpu_bo_vm *pt,
676 				       unsigned int level,
677 				       uint64_t pe, uint64_t addr,
678 				       unsigned int count, uint32_t incr,
679 				       uint64_t flags)
680 {
681 	struct amdgpu_device *adev = params->adev;
682 
683 	if (level != AMDGPU_VM_PTB) {
684 		flags |= AMDGPU_PDE_PTE_FLAG(params->adev);
685 		amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags);
686 
687 	} else if (adev->asic_type >= CHIP_VEGA10 &&
688 		   !(flags & AMDGPU_PTE_VALID) &&
689 		   !(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) {
690 
691 		/* Workaround for fault priority problem on GMC9 */
692 		flags |= AMDGPU_PTE_EXECUTABLE;
693 	}
694 
695 	/*
696 	 * Update no-retry flags to use the no-retry flag combination
697 	 * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination
698 	 * does not work when TF is enabled. So, replace them with
699 	 * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for
700 	 * all cases.
701 	 */
702 	if (level == AMDGPU_VM_PTB)
703 		amdgpu_vm_pte_update_noretry_flags(adev, &flags);
704 
705 	/* APUs mapping system memory may need different MTYPEs on different
706 	 * NUMA nodes. Only do this for contiguous ranges that can be assumed
707 	 * to be on the same NUMA node.
708 	 */
709 	if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
710 	    adev->gmc.gmc_funcs->override_vm_pte_flags &&
711 	    num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
712 		amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
713 
714 	params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
715 					 flags);
716 }
717 
718 /**
719  * amdgpu_vm_pte_fragment - get fragment for PTEs
720  *
721  * @params: see amdgpu_vm_update_params definition
722  * @start: first PTE to handle
723  * @end: last PTE to handle
724  * @flags: hw mapping flags
725  * @frag: resulting fragment size
726  * @frag_end: end of this fragment
727  *
728  * Returns the first possible fragment for the start and end address.
729  */
730 static void amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params *params,
731 				   uint64_t start, uint64_t end, uint64_t flags,
732 				   unsigned int *frag, uint64_t *frag_end)
733 {
734 	/**
735 	 * The MC L1 TLB supports variable sized pages, based on a fragment
736 	 * field in the PTE. When this field is set to a non-zero value, page
737 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
738 	 * flags are considered valid for all PTEs within the fragment range
739 	 * and corresponding mappings are assumed to be physically contiguous.
740 	 *
741 	 * The L1 TLB can store a single PTE for the whole fragment,
742 	 * significantly increasing the space available for translation
743 	 * caching. This leads to large improvements in throughput when the
744 	 * TLB is under pressure.
745 	 *
746 	 * The L2 TLB distributes small and large fragments into two
747 	 * asymmetric partitions. The large fragment cache is significantly
748 	 * larger. Thus, we try to use large fragments wherever possible.
749 	 * Userspace can support this by aligning virtual base address and
750 	 * allocation size to the fragment size.
751 	 *
752 	 * Starting with Vega10 the fragment size only controls the L1. The L2
753 	 * is now directly feed with small/huge/giant pages from the walker.
754 	 */
755 	unsigned int max_frag;
756 
757 	if (params->adev->asic_type < CHIP_VEGA10)
758 		max_frag = params->adev->vm_manager.fragment_size;
759 	else
760 		max_frag = 31;
761 
762 	/* system pages are non continuously */
763 	if (params->pages_addr) {
764 		*frag = 0;
765 		*frag_end = end;
766 		return;
767 	}
768 
769 	/* This intentionally wraps around if no bit is set */
770 	*frag = min_t(unsigned int, ffs(start) - 1, fls64(end - start) - 1);
771 	if (*frag >= max_frag) {
772 		*frag = max_frag;
773 		*frag_end = end & ~((1ULL << max_frag) - 1);
774 	} else {
775 		*frag_end = start + (1 << *frag);
776 	}
777 }
778 
779 /**
780  * amdgpu_vm_ptes_update - make sure that page tables are valid
781  *
782  * @params: see amdgpu_vm_update_params definition
783  * @start: start of GPU address range
784  * @end: end of GPU address range
785  * @dst: destination address to map to, the next dst inside the function
786  * @flags: mapping flags
787  *
788  * Update the page tables in the range @start - @end.
789  *
790  * Returns:
791  * 0 for success, -EINVAL for failure.
792  */
793 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
794 			  uint64_t start, uint64_t end,
795 			  uint64_t dst, uint64_t flags)
796 {
797 	struct amdgpu_device *adev = params->adev;
798 	struct amdgpu_vm_pt_cursor cursor;
799 	uint64_t frag_start = start, frag_end;
800 	unsigned int frag;
801 	int r;
802 
803 	/* figure out the initial fragment */
804 	amdgpu_vm_pte_fragment(params, frag_start, end, flags, &frag,
805 			       &frag_end);
806 
807 	/* walk over the address space and update the PTs */
808 	amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
809 	while (cursor.pfn < end) {
810 		unsigned int shift, parent_shift, mask;
811 		uint64_t incr, entry_end, pe_start;
812 		struct amdgpu_bo *pt;
813 
814 		if (!params->unlocked) {
815 			/* make sure that the page tables covering the
816 			 * address range are actually allocated
817 			 */
818 			r = amdgpu_vm_pt_alloc(params->adev, params->vm,
819 					       &cursor, params->immediate);
820 			if (r)
821 				return r;
822 		}
823 
824 		shift = amdgpu_vm_pt_level_shift(adev, cursor.level);
825 		parent_shift = amdgpu_vm_pt_level_shift(adev, cursor.level - 1);
826 		if (params->unlocked) {
827 			/* Unlocked updates are only allowed on the leaves */
828 			if (amdgpu_vm_pt_descendant(adev, &cursor))
829 				continue;
830 		} else if (adev->asic_type < CHIP_VEGA10 &&
831 			   (flags & AMDGPU_PTE_VALID)) {
832 			/* No huge page support before GMC v9 */
833 			if (cursor.level != AMDGPU_VM_PTB) {
834 				if (!amdgpu_vm_pt_descendant(adev, &cursor))
835 					return -ENOENT;
836 				continue;
837 			}
838 		} else if (frag < shift) {
839 			/* We can't use this level when the fragment size is
840 			 * smaller than the address shift. Go to the next
841 			 * child entry and try again.
842 			 */
843 			if (amdgpu_vm_pt_descendant(adev, &cursor))
844 				continue;
845 		} else if (frag >= parent_shift) {
846 			/* If the fragment size is even larger than the parent
847 			 * shift we should go up one level and check it again.
848 			 */
849 			if (!amdgpu_vm_pt_ancestor(&cursor))
850 				return -EINVAL;
851 			continue;
852 		}
853 
854 		pt = cursor.entry->bo;
855 		if (!pt) {
856 			/* We need all PDs and PTs for mapping something, */
857 			if (flags & AMDGPU_PTE_VALID)
858 				return -ENOENT;
859 
860 			/* but unmapping something can happen at a higher
861 			 * level.
862 			 */
863 			if (!amdgpu_vm_pt_ancestor(&cursor))
864 				return -EINVAL;
865 
866 			pt = cursor.entry->bo;
867 			shift = parent_shift;
868 			frag_end = max(frag_end, ALIGN(frag_start + 1,
869 				   1ULL << shift));
870 		}
871 
872 		/* Looks good so far, calculate parameters for the update */
873 		incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
874 		mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
875 		pe_start = ((cursor.pfn >> shift) & mask) * 8;
876 
877 		if (cursor.level < AMDGPU_VM_PTB && params->unlocked)
878 			/*
879 			 * MMU notifier callback unlocked unmap huge page, leave is PDE entry,
880 			 * only clear one entry. Next entry search again for PDE or PTE leave.
881 			 */
882 			entry_end = 1ULL << shift;
883 		else
884 			entry_end = ((uint64_t)mask + 1) << shift;
885 		entry_end += cursor.pfn & ~(entry_end - 1);
886 		entry_end = min(entry_end, end);
887 
888 		do {
889 			struct amdgpu_vm *vm = params->vm;
890 			uint64_t upd_end = min(entry_end, frag_end);
891 			unsigned int nptes = (upd_end - frag_start) >> shift;
892 			uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
893 
894 			/* This can happen when we set higher level PDs to
895 			 * silent to stop fault floods.
896 			 */
897 			nptes = max(nptes, 1u);
898 
899 			trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
900 						    min(nptes, 32u), dst, incr,
901 						    upd_flags,
902 						    vm->task_info ? vm->task_info->tgid : 0,
903 						    vm->immediate.fence_context);
904 			amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
905 						   cursor.level, pe_start, dst,
906 						   nptes, incr, upd_flags);
907 
908 			pe_start += nptes * 8;
909 			dst += nptes * incr;
910 
911 			frag_start = upd_end;
912 			if (frag_start >= frag_end) {
913 				/* figure out the next fragment */
914 				amdgpu_vm_pte_fragment(params, frag_start, end,
915 						       flags, &frag, &frag_end);
916 				if (frag < shift)
917 					break;
918 			}
919 		} while (frag_start < entry_end);
920 
921 		if (amdgpu_vm_pt_descendant(adev, &cursor)) {
922 			/* Free all child entries.
923 			 * Update the tables with the flags and addresses and free up subsequent
924 			 * tables in the case of huge pages or freed up areas.
925 			 * This is the maximum you can free, because all other page tables are not
926 			 * completely covered by the range and so potentially still in use.
927 			 */
928 			while (cursor.pfn < frag_start) {
929 				/* Make sure previous mapping is freed */
930 				if (cursor.entry->bo) {
931 					params->needs_flush = true;
932 					amdgpu_vm_pt_add_list(params, &cursor);
933 				}
934 				amdgpu_vm_pt_next(adev, &cursor);
935 			}
936 
937 		} else if (frag >= shift) {
938 			/* or just move on to the next on the same level. */
939 			amdgpu_vm_pt_next(adev, &cursor);
940 		}
941 	}
942 
943 	return 0;
944 }
945 
946 /**
947  * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
948  * @adev: amdgpu device structure
949  * @vm: amdgpu vm structure
950  *
951  * make root page directory and everything below it cpu accessible.
952  */
953 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
954 {
955 	struct amdgpu_vm_pt_cursor cursor;
956 	struct amdgpu_vm_bo_base *entry;
957 
958 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
959 
960 		struct amdgpu_bo_vm *bo;
961 		int r;
962 
963 		if (entry->bo) {
964 			bo = to_amdgpu_bo_vm(entry->bo);
965 			r = vm->update_funcs->map_table(bo);
966 			if (r)
967 				return r;
968 		}
969 	}
970 
971 	return 0;
972 }
973