xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c (revision 92c4c9fdc838d3b41a996bb700ea64b9e78fc7ea)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_drv.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_trace.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_job.h"
30 
31 /*
32  * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
33  */
34 struct amdgpu_vm_pt_cursor {
35 	uint64_t pfn;
36 	struct amdgpu_vm_bo_base *parent;
37 	struct amdgpu_vm_bo_base *entry;
38 	unsigned int level;
39 };
40 
41 /**
42  * amdgpu_vm_pt_level_shift - return the addr shift for each level
43  *
44  * @adev: amdgpu_device pointer
45  * @level: VMPT level
46  *
47  * Returns:
48  * The number of bits the pfn needs to be right shifted for a level.
49  */
amdgpu_vm_pt_level_shift(struct amdgpu_device * adev,unsigned int level)50 static unsigned int amdgpu_vm_pt_level_shift(struct amdgpu_device *adev,
51 					     unsigned int level)
52 {
53 	switch (level) {
54 	case AMDGPU_VM_PDB3:
55 	case AMDGPU_VM_PDB2:
56 	case AMDGPU_VM_PDB1:
57 	case AMDGPU_VM_PDB0:
58 		return 9 * (AMDGPU_VM_PDB0 - level) +
59 			adev->vm_manager.block_size;
60 	case AMDGPU_VM_PTB:
61 		return 0;
62 	default:
63 		return ~0;
64 	}
65 }
66 
67 /**
68  * amdgpu_vm_pt_num_entries - return the number of entries in a PD/PT
69  *
70  * @adev: amdgpu_device pointer
71  * @level: VMPT level
72  *
73  * Returns:
74  * The number of entries in a page directory or page table.
75  */
amdgpu_vm_pt_num_entries(struct amdgpu_device * adev,unsigned int level)76 static unsigned int amdgpu_vm_pt_num_entries(struct amdgpu_device *adev,
77 					     unsigned int level)
78 {
79 	unsigned int shift;
80 
81 	shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
82 	if (level == adev->vm_manager.root_level)
83 		/* For the root directory */
84 		return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
85 			>> shift;
86 	else if (level != AMDGPU_VM_PTB)
87 		/* Everything in between */
88 		return 512;
89 
90 	/* For the page tables on the leaves */
91 	return AMDGPU_VM_PTE_COUNT(adev);
92 }
93 
94 /**
95  * amdgpu_vm_pt_entries_mask - the mask to get the entry number of a PD/PT
96  *
97  * @adev: amdgpu_device pointer
98  * @level: VMPT level
99  *
100  * Returns:
101  * The mask to extract the entry number of a PD/PT from an address.
102  */
amdgpu_vm_pt_entries_mask(struct amdgpu_device * adev,unsigned int level)103 static uint32_t amdgpu_vm_pt_entries_mask(struct amdgpu_device *adev,
104 					  unsigned int level)
105 {
106 	if (level <= adev->vm_manager.root_level)
107 		return 0xffffffff;
108 	else if (level != AMDGPU_VM_PTB)
109 		return 0x1ff;
110 	else
111 		return AMDGPU_VM_PTE_COUNT(adev) - 1;
112 }
113 
114 /**
115  * amdgpu_vm_pt_size - returns the size of the page table in bytes
116  *
117  * @adev: amdgpu_device pointer
118  * @level: VMPT level
119  *
120  * Returns:
121  * The size of the BO for a page directory or page table in bytes.
122  */
amdgpu_vm_pt_size(struct amdgpu_device * adev,unsigned int level)123 static unsigned int amdgpu_vm_pt_size(struct amdgpu_device *adev,
124 				      unsigned int level)
125 {
126 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_pt_num_entries(adev, level) * 8);
127 }
128 
129 /**
130  * amdgpu_vm_pt_parent - get the parent page directory
131  *
132  * @pt: child page table
133  *
134  * Helper to get the parent entry for the child page table. NULL if we are at
135  * the root page directory.
136  */
137 static struct amdgpu_vm_bo_base *
amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base * pt)138 amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
139 {
140 	struct amdgpu_bo *parent = pt->bo->parent;
141 
142 	if (!parent)
143 		return NULL;
144 
145 	return parent->vm_bo;
146 }
147 
148 /**
149  * amdgpu_vm_pt_start - start PD/PT walk
150  *
151  * @adev: amdgpu_device pointer
152  * @vm: amdgpu_vm structure
153  * @start: start address of the walk
154  * @cursor: state to initialize
155  *
156  * Initialize a amdgpu_vm_pt_cursor to start a walk.
157  */
amdgpu_vm_pt_start(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,struct amdgpu_vm_pt_cursor * cursor)158 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
159 			       struct amdgpu_vm *vm, uint64_t start,
160 			       struct amdgpu_vm_pt_cursor *cursor)
161 {
162 	cursor->pfn = start;
163 	cursor->parent = NULL;
164 	cursor->entry = &vm->root;
165 	cursor->level = adev->vm_manager.root_level;
166 }
167 
168 /**
169  * amdgpu_vm_pt_descendant - go to child node
170  *
171  * @adev: amdgpu_device pointer
172  * @cursor: current state
173  *
174  * Walk to the child node of the current node.
175  * Returns:
176  * True if the walk was possible, false otherwise.
177  */
amdgpu_vm_pt_descendant(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)178 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
179 				    struct amdgpu_vm_pt_cursor *cursor)
180 {
181 	unsigned int mask, shift, idx;
182 
183 	if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
184 	    !cursor->entry->bo)
185 		return false;
186 
187 	mask = amdgpu_vm_pt_entries_mask(adev, cursor->level);
188 	shift = amdgpu_vm_pt_level_shift(adev, cursor->level);
189 
190 	++cursor->level;
191 	idx = (cursor->pfn >> shift) & mask;
192 	cursor->parent = cursor->entry;
193 	cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
194 	return true;
195 }
196 
197 /**
198  * amdgpu_vm_pt_sibling - go to sibling node
199  *
200  * @adev: amdgpu_device pointer
201  * @cursor: current state
202  *
203  * Walk to the sibling node of the current node.
204  * Returns:
205  * True if the walk was possible, false otherwise.
206  */
amdgpu_vm_pt_sibling(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)207 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
208 				 struct amdgpu_vm_pt_cursor *cursor)
209 {
210 
211 	unsigned int shift, num_entries;
212 	struct amdgpu_bo_vm *parent;
213 
214 	/* Root doesn't have a sibling */
215 	if (!cursor->parent)
216 		return false;
217 
218 	/* Go to our parents and see if we got a sibling */
219 	shift = amdgpu_vm_pt_level_shift(adev, cursor->level - 1);
220 	num_entries = amdgpu_vm_pt_num_entries(adev, cursor->level - 1);
221 	parent = to_amdgpu_bo_vm(cursor->parent->bo);
222 
223 	if (cursor->entry == &parent->entries[num_entries - 1])
224 		return false;
225 
226 	cursor->pfn += 1ULL << shift;
227 	cursor->pfn &= ~((1ULL << shift) - 1);
228 	++cursor->entry;
229 	return true;
230 }
231 
232 /**
233  * amdgpu_vm_pt_ancestor - go to parent node
234  *
235  * @cursor: current state
236  *
237  * Walk to the parent node of the current node.
238  * Returns:
239  * True if the walk was possible, false otherwise.
240  */
amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor * cursor)241 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
242 {
243 	if (!cursor->parent)
244 		return false;
245 
246 	--cursor->level;
247 	cursor->entry = cursor->parent;
248 	cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
249 	return true;
250 }
251 
252 /**
253  * amdgpu_vm_pt_next - get next PD/PT in hieratchy
254  *
255  * @adev: amdgpu_device pointer
256  * @cursor: current state
257  *
258  * Walk the PD/PT tree to the next node.
259  */
amdgpu_vm_pt_next(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)260 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
261 			      struct amdgpu_vm_pt_cursor *cursor)
262 {
263 	/* First try a newborn child */
264 	if (amdgpu_vm_pt_descendant(adev, cursor))
265 		return;
266 
267 	/* If that didn't worked try to find a sibling */
268 	while (!amdgpu_vm_pt_sibling(adev, cursor)) {
269 		/* No sibling, go to our parents and grandparents */
270 		if (!amdgpu_vm_pt_ancestor(cursor)) {
271 			cursor->pfn = ~0ll;
272 			return;
273 		}
274 	}
275 }
276 
277 /**
278  * amdgpu_vm_pt_first_dfs - start a deep first search
279  *
280  * @adev: amdgpu_device structure
281  * @vm: amdgpu_vm structure
282  * @start: optional cursor to start with
283  * @cursor: state to initialize
284  *
285  * Starts a deep first traversal of the PD/PT tree.
286  */
amdgpu_vm_pt_first_dfs(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_pt_cursor * cursor)287 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
288 				   struct amdgpu_vm *vm,
289 				   struct amdgpu_vm_pt_cursor *start,
290 				   struct amdgpu_vm_pt_cursor *cursor)
291 {
292 	if (start)
293 		*cursor = *start;
294 	else
295 		amdgpu_vm_pt_start(adev, vm, 0, cursor);
296 
297 	while (amdgpu_vm_pt_descendant(adev, cursor))
298 		;
299 }
300 
301 /**
302  * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
303  *
304  * @start: starting point for the search
305  * @entry: current entry
306  *
307  * Returns:
308  * True when the search should continue, false otherwise.
309  */
amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_bo_base * entry)310 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
311 				      struct amdgpu_vm_bo_base *entry)
312 {
313 	return entry && (!start || entry != start->entry);
314 }
315 
316 /**
317  * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
318  *
319  * @adev: amdgpu_device structure
320  * @cursor: current state
321  *
322  * Move the cursor to the next node in a deep first search.
323  */
amdgpu_vm_pt_next_dfs(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)324 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
325 				  struct amdgpu_vm_pt_cursor *cursor)
326 {
327 	if (!cursor->entry)
328 		return;
329 
330 	if (!cursor->parent)
331 		cursor->entry = NULL;
332 	else if (amdgpu_vm_pt_sibling(adev, cursor))
333 		while (amdgpu_vm_pt_descendant(adev, cursor))
334 			;
335 	else
336 		amdgpu_vm_pt_ancestor(cursor);
337 }
338 
339 /*
340  * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
341  */
342 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)		\
343 	for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)),		\
344 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
345 	     amdgpu_vm_pt_continue_dfs((start), (entry));			\
346 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
347 
348 /**
349  * amdgpu_vm_pt_clear - initially clear the PDs/PTs
350  *
351  * @adev: amdgpu_device pointer
352  * @vm: VM to clear BO from
353  * @vmbo: BO to clear
354  * @immediate: use an immediate update
355  *
356  * Root PD needs to be reserved when calling this.
357  *
358  * Returns:
359  * 0 on success, errno otherwise.
360  */
amdgpu_vm_pt_clear(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_vm * vmbo,bool immediate)361 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
362 		       struct amdgpu_bo_vm *vmbo, bool immediate)
363 {
364 	unsigned int level = adev->vm_manager.root_level;
365 	struct ttm_operation_ctx ctx = { true, false };
366 	struct amdgpu_vm_update_params params;
367 	struct amdgpu_bo *ancestor = &vmbo->bo;
368 	unsigned int entries;
369 	struct amdgpu_bo *bo = &vmbo->bo;
370 	uint64_t value = 0, flags = 0;
371 	uint64_t addr;
372 	int r, idx;
373 
374 	/* Figure out our place in the hierarchy */
375 	if (ancestor->parent) {
376 		++level;
377 		while (ancestor->parent->parent) {
378 			++level;
379 			ancestor = ancestor->parent;
380 		}
381 	}
382 
383 	entries = amdgpu_bo_size(bo) / 8;
384 
385 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
386 	if (r)
387 		return r;
388 
389 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
390 		return -ENODEV;
391 
392 	r = vm->update_funcs->map_table(vmbo);
393 	if (r)
394 		goto exit;
395 
396 	memset(&params, 0, sizeof(params));
397 	params.adev = adev;
398 	params.vm = vm;
399 	params.immediate = immediate;
400 
401 	r = vm->update_funcs->prepare(&params, NULL,
402 				      AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR);
403 	if (r)
404 		goto exit;
405 
406 	addr = 0;
407 
408 	if (adev->asic_type >= CHIP_VEGA10) {
409 		if (level != AMDGPU_VM_PTB) {
410 			/* Handle leaf PDEs as PTEs */
411 			flags |= AMDGPU_PDE_PTE_FLAG(adev);
412 			amdgpu_gmc_get_vm_pde(adev, level,
413 					      &value, &flags);
414 		} else {
415 			/* Workaround for fault priority problem on GMC9 */
416 			flags = AMDGPU_PTE_EXECUTABLE | adev->gmc.init_pte_flags;
417 		}
418 	}
419 
420 	r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
421 				     value, flags);
422 	if (r)
423 		goto exit;
424 
425 	r = vm->update_funcs->commit(&params, NULL);
426 exit:
427 	drm_dev_exit(idx);
428 	return r;
429 }
430 
431 /**
432  * amdgpu_vm_pt_create - create bo for PD/PT
433  *
434  * @adev: amdgpu_device pointer
435  * @vm: requesting vm
436  * @level: the page table level
437  * @immediate: use a immediate update
438  * @vmbo: pointer to the buffer object pointer
439  * @xcp_id: GPU partition id
440  */
amdgpu_vm_pt_create(struct amdgpu_device * adev,struct amdgpu_vm * vm,int level,bool immediate,struct amdgpu_bo_vm ** vmbo,int32_t xcp_id)441 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
442 			int level, bool immediate, struct amdgpu_bo_vm **vmbo,
443 			int32_t xcp_id)
444 {
445 	struct amdgpu_bo_param bp;
446 	unsigned int num_entries;
447 
448 	memset(&bp, 0, sizeof(bp));
449 
450 	bp.size = amdgpu_vm_pt_size(adev, level);
451 	bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
452 
453 	if (!adev->gmc.is_app_apu)
454 		bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
455 	else
456 		bp.domain = AMDGPU_GEM_DOMAIN_GTT;
457 
458 	bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
459 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
460 		AMDGPU_GEM_CREATE_CPU_GTT_USWC;
461 
462 	if (level < AMDGPU_VM_PTB)
463 		num_entries = amdgpu_vm_pt_num_entries(adev, level);
464 	else
465 		num_entries = 0;
466 
467 	bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
468 
469 	if (vm->use_cpu_for_update)
470 		bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
471 
472 	bp.type = ttm_bo_type_kernel;
473 	bp.no_wait_gpu = immediate;
474 	bp.xcp_id_plus1 = xcp_id + 1;
475 
476 	if (vm->root.bo)
477 		bp.resv = vm->root.bo->tbo.base.resv;
478 
479 	return amdgpu_bo_create_vm(adev, &bp, vmbo);
480 }
481 
482 /**
483  * amdgpu_vm_pt_alloc - Allocate a specific page table
484  *
485  * @adev: amdgpu_device pointer
486  * @vm: VM to allocate page tables for
487  * @cursor: Which page table to allocate
488  * @immediate: use an immediate update
489  *
490  * Make sure a specific page table or directory is allocated.
491  *
492  * Returns:
493  * 1 if page table needed to be allocated, 0 if page table was already
494  * allocated, negative errno if an error occurred.
495  */
amdgpu_vm_pt_alloc(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * cursor,bool immediate)496 static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
497 			      struct amdgpu_vm *vm,
498 			      struct amdgpu_vm_pt_cursor *cursor,
499 			      bool immediate)
500 {
501 	struct amdgpu_vm_bo_base *entry = cursor->entry;
502 	struct amdgpu_bo *pt_bo;
503 	struct amdgpu_bo_vm *pt;
504 	int r;
505 
506 	if (entry->bo)
507 		return 0;
508 
509 	amdgpu_vm_eviction_unlock(vm);
510 	r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
511 				vm->root.bo->xcp_id);
512 	amdgpu_vm_eviction_lock(vm);
513 	if (r)
514 		return r;
515 
516 	/* Keep a reference to the root directory to avoid
517 	 * freeing them up in the wrong order.
518 	 */
519 	pt_bo = &pt->bo;
520 	pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
521 	amdgpu_vm_bo_base_init(entry, vm, pt_bo);
522 	r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
523 	if (r)
524 		goto error_free_pt;
525 
526 	return 0;
527 
528 error_free_pt:
529 	amdgpu_bo_unref(&pt_bo);
530 	return r;
531 }
532 
533 /**
534  * amdgpu_vm_pt_free - free one PD/PT
535  *
536  * @entry: PDE to free
537  */
amdgpu_vm_pt_free(struct amdgpu_vm_bo_base * entry)538 static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
539 {
540 	if (!entry->bo)
541 		return;
542 
543 	amdgpu_vm_update_stats(entry, entry->bo->tbo.resource, -1);
544 	entry->bo->vm_bo = NULL;
545 	ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
546 
547 	spin_lock(&entry->vm->status_lock);
548 	list_del(&entry->vm_status);
549 	spin_unlock(&entry->vm->status_lock);
550 	amdgpu_bo_unref(&entry->bo);
551 }
552 
553 /**
554  * amdgpu_vm_pt_free_list - free PD/PT levels
555  *
556  * @adev: amdgpu device structure
557  * @params: see amdgpu_vm_update_params definition
558  *
559  * Free the page directory objects saved in the flush list
560  */
amdgpu_vm_pt_free_list(struct amdgpu_device * adev,struct amdgpu_vm_update_params * params)561 void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
562 			    struct amdgpu_vm_update_params *params)
563 {
564 	struct amdgpu_vm_bo_base *entry, *next;
565 	bool unlocked = params->unlocked;
566 
567 	if (list_empty(&params->tlb_flush_waitlist))
568 		return;
569 
570 	/*
571 	 * unlocked unmap clear page table leaves, warning to free the page entry.
572 	 */
573 	WARN_ON(unlocked);
574 
575 	list_for_each_entry_safe(entry, next, &params->tlb_flush_waitlist, vm_status)
576 		amdgpu_vm_pt_free(entry);
577 }
578 
579 /**
580  * amdgpu_vm_pt_add_list - add PD/PT level to the flush list
581  *
582  * @params: parameters for the update
583  * @cursor: first PT entry to start DF search from, non NULL
584  *
585  * This list will be freed after TLB flush.
586  */
amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params * params,struct amdgpu_vm_pt_cursor * cursor)587 static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
588 				  struct amdgpu_vm_pt_cursor *cursor)
589 {
590 	struct amdgpu_vm_pt_cursor seek;
591 	struct amdgpu_vm_bo_base *entry;
592 
593 	spin_lock(&params->vm->status_lock);
594 	for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
595 		if (entry && entry->bo)
596 			list_move(&entry->vm_status, &params->tlb_flush_waitlist);
597 	}
598 
599 	/* enter start node now */
600 	list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist);
601 	spin_unlock(&params->vm->status_lock);
602 }
603 
604 /**
605  * amdgpu_vm_pt_free_root - free root PD
606  * @adev: amdgpu device structure
607  * @vm: amdgpu vm structure
608  *
609  * Free the root page directory and everything below it.
610  */
amdgpu_vm_pt_free_root(struct amdgpu_device * adev,struct amdgpu_vm * vm)611 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
612 {
613 	struct amdgpu_vm_pt_cursor cursor;
614 	struct amdgpu_vm_bo_base *entry;
615 
616 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
617 		if (entry)
618 			amdgpu_vm_pt_free(entry);
619 	}
620 }
621 
622 /**
623  * amdgpu_vm_pde_update - update a single level in the hierarchy
624  *
625  * @params: parameters for the update
626  * @entry: entry to update
627  *
628  * Makes sure the requested entry in parent is up to date.
629  */
amdgpu_vm_pde_update(struct amdgpu_vm_update_params * params,struct amdgpu_vm_bo_base * entry)630 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
631 			 struct amdgpu_vm_bo_base *entry)
632 {
633 	struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
634 	struct amdgpu_bo *bo, *pbo;
635 	struct amdgpu_vm *vm = params->vm;
636 	uint64_t pde, pt, flags;
637 	unsigned int level;
638 
639 	if (WARN_ON(!parent))
640 		return -EINVAL;
641 
642 	bo = parent->bo;
643 	for (level = 0, pbo = bo->parent; pbo; ++level)
644 		pbo = pbo->parent;
645 
646 	level += params->adev->vm_manager.root_level;
647 	amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
648 	pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
649 	return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
650 					1, 0, flags);
651 }
652 
653 /**
654  * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags
655  *
656  * @adev: amdgpu_device pointer
657  * @flags: pointer to PTE flags
658  *
659  * Update PTE no-retry flags when TF is enabled.
660  */
amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device * adev,uint64_t * flags)661 static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev,
662 						uint64_t *flags)
663 {
664 	/*
665 	 * Update no-retry flags with the corresponding TF
666 	 * no-retry combination.
667 	 */
668 	if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) {
669 		*flags &= ~AMDGPU_VM_NORETRY_FLAGS;
670 		*flags |= adev->gmc.noretry_flags;
671 	}
672 }
673 
674 /*
675  * amdgpu_vm_pte_update_flags - figure out flags for PTE updates
676  *
677  * Make sure to set the right flags for the PTEs at the desired level.
678  */
amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params * params,struct amdgpu_bo_vm * pt,unsigned int level,uint64_t pe,uint64_t addr,unsigned int count,uint32_t incr,uint64_t flags)679 static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
680 				       struct amdgpu_bo_vm *pt,
681 				       unsigned int level,
682 				       uint64_t pe, uint64_t addr,
683 				       unsigned int count, uint32_t incr,
684 				       uint64_t flags)
685 {
686 	struct amdgpu_device *adev = params->adev;
687 
688 	if (level != AMDGPU_VM_PTB) {
689 		flags |= AMDGPU_PDE_PTE_FLAG(params->adev);
690 		amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags);
691 
692 	} else if (adev->asic_type >= CHIP_VEGA10 &&
693 		   !(flags & AMDGPU_PTE_VALID) &&
694 		   !(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) {
695 
696 		/* Workaround for fault priority problem on GMC9 and GFX12,
697 		 * EXECUTABLE for GMC9 fault priority and init_pte_flags
698 		 * (e.g. AMDGPU_PTE_IS_PTE on GFX12)
699 		 */
700 		flags |= AMDGPU_PTE_EXECUTABLE | adev->gmc.init_pte_flags;
701 	}
702 
703 	/*
704 	 * Update no-retry flags to use the no-retry flag combination
705 	 * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination
706 	 * does not work when TF is enabled. So, replace them with
707 	 * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for
708 	 * all cases.
709 	 */
710 	if (level == AMDGPU_VM_PTB)
711 		amdgpu_vm_pte_update_noretry_flags(adev, &flags);
712 
713 	/* APUs mapping system memory may need different MTYPEs on different
714 	 * NUMA nodes. Only do this for contiguous ranges that can be assumed
715 	 * to be on the same NUMA node.
716 	 */
717 	if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
718 	    adev->gmc.gmc_funcs->override_vm_pte_flags &&
719 	    num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
720 		amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
721 
722 	params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
723 					 flags);
724 }
725 
726 /**
727  * amdgpu_vm_pte_fragment - get fragment for PTEs
728  *
729  * @params: see amdgpu_vm_update_params definition
730  * @start: first PTE to handle
731  * @end: last PTE to handle
732  * @flags: hw mapping flags
733  * @frag: resulting fragment size
734  * @frag_end: end of this fragment
735  *
736  * Returns the first possible fragment for the start and end address.
737  */
amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t flags,unsigned int * frag,uint64_t * frag_end)738 static void amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params *params,
739 				   uint64_t start, uint64_t end, uint64_t flags,
740 				   unsigned int *frag, uint64_t *frag_end)
741 {
742 	/**
743 	 * The MC L1 TLB supports variable sized pages, based on a fragment
744 	 * field in the PTE. When this field is set to a non-zero value, page
745 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
746 	 * flags are considered valid for all PTEs within the fragment range
747 	 * and corresponding mappings are assumed to be physically contiguous.
748 	 *
749 	 * The L1 TLB can store a single PTE for the whole fragment,
750 	 * significantly increasing the space available for translation
751 	 * caching. This leads to large improvements in throughput when the
752 	 * TLB is under pressure.
753 	 *
754 	 * The L2 TLB distributes small and large fragments into two
755 	 * asymmetric partitions. The large fragment cache is significantly
756 	 * larger. Thus, we try to use large fragments wherever possible.
757 	 * Userspace can support this by aligning virtual base address and
758 	 * allocation size to the fragment size.
759 	 *
760 	 * Starting with Vega10 the fragment size only controls the L1. The L2
761 	 * is now directly feed with small/huge/giant pages from the walker.
762 	 */
763 	unsigned int max_frag;
764 
765 	if (params->adev->asic_type < CHIP_VEGA10)
766 		max_frag = params->adev->vm_manager.fragment_size;
767 	else
768 		max_frag = 31;
769 
770 	/* system pages are non continuously */
771 	if (params->pages_addr) {
772 		*frag = 0;
773 		*frag_end = end;
774 		return;
775 	}
776 
777 	/* This intentionally wraps around if no bit is set */
778 	*frag = min_t(unsigned int, ffs(start) - 1, fls64(end - start) - 1);
779 	if (*frag >= max_frag) {
780 		*frag = max_frag;
781 		*frag_end = end & ~((1ULL << max_frag) - 1);
782 	} else {
783 		*frag_end = start + (1 << *frag);
784 	}
785 }
786 
787 /**
788  * amdgpu_vm_ptes_update - make sure that page tables are valid
789  *
790  * @params: see amdgpu_vm_update_params definition
791  * @start: start of GPU address range
792  * @end: end of GPU address range
793  * @dst: destination address to map to, the next dst inside the function
794  * @flags: mapping flags
795  *
796  * Update the page tables in the range @start - @end.
797  *
798  * Returns:
799  * 0 for success, -EINVAL for failure.
800  */
amdgpu_vm_ptes_update(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t dst,uint64_t flags)801 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
802 			  uint64_t start, uint64_t end,
803 			  uint64_t dst, uint64_t flags)
804 {
805 	struct amdgpu_device *adev = params->adev;
806 	struct amdgpu_vm_pt_cursor cursor;
807 	uint64_t frag_start = start, frag_end;
808 	unsigned int frag;
809 	int r;
810 
811 	/* figure out the initial fragment */
812 	amdgpu_vm_pte_fragment(params, frag_start, end, flags, &frag,
813 			       &frag_end);
814 
815 	/* walk over the address space and update the PTs */
816 	amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
817 	while (cursor.pfn < end) {
818 		unsigned int shift, parent_shift, mask;
819 		uint64_t incr, entry_end, pe_start;
820 		struct amdgpu_bo *pt;
821 
822 		if (!params->unlocked) {
823 			/* make sure that the page tables covering the
824 			 * address range are actually allocated
825 			 */
826 			r = amdgpu_vm_pt_alloc(params->adev, params->vm,
827 					       &cursor, params->immediate);
828 			if (r)
829 				return r;
830 		}
831 
832 		shift = amdgpu_vm_pt_level_shift(adev, cursor.level);
833 		parent_shift = amdgpu_vm_pt_level_shift(adev, cursor.level - 1);
834 		if (params->unlocked) {
835 			/* Unlocked updates are only allowed on the leaves */
836 			if (amdgpu_vm_pt_descendant(adev, &cursor))
837 				continue;
838 		} else if (adev->asic_type < CHIP_VEGA10 &&
839 			   (flags & AMDGPU_PTE_VALID)) {
840 			/* No huge page support before GMC v9 */
841 			if (cursor.level != AMDGPU_VM_PTB) {
842 				if (!amdgpu_vm_pt_descendant(adev, &cursor))
843 					return -ENOENT;
844 				continue;
845 			}
846 		} else if (frag < shift) {
847 			/* We can't use this level when the fragment size is
848 			 * smaller than the address shift. Go to the next
849 			 * child entry and try again.
850 			 */
851 			if (amdgpu_vm_pt_descendant(adev, &cursor))
852 				continue;
853 		} else if (frag >= parent_shift) {
854 			/* If the fragment size is even larger than the parent
855 			 * shift we should go up one level and check it again.
856 			 */
857 			if (!amdgpu_vm_pt_ancestor(&cursor))
858 				return -EINVAL;
859 			continue;
860 		}
861 
862 		pt = cursor.entry->bo;
863 		if (!pt) {
864 			/* We need all PDs and PTs for mapping something, */
865 			if (flags & AMDGPU_PTE_VALID)
866 				return -ENOENT;
867 
868 			/* but unmapping something can happen at a higher
869 			 * level.
870 			 */
871 			if (!amdgpu_vm_pt_ancestor(&cursor))
872 				return -EINVAL;
873 
874 			pt = cursor.entry->bo;
875 			shift = parent_shift;
876 			frag_end = max(frag_end, ALIGN(frag_start + 1,
877 				   1ULL << shift));
878 		}
879 
880 		/* Looks good so far, calculate parameters for the update */
881 		incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
882 		mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
883 		pe_start = ((cursor.pfn >> shift) & mask) * 8;
884 
885 		if (cursor.level < AMDGPU_VM_PTB && params->unlocked)
886 			/*
887 			 * MMU notifier callback unlocked unmap huge page, leave is PDE entry,
888 			 * only clear one entry. Next entry search again for PDE or PTE leave.
889 			 */
890 			entry_end = 1ULL << shift;
891 		else
892 			entry_end = ((uint64_t)mask + 1) << shift;
893 		entry_end += cursor.pfn & ~(entry_end - 1);
894 		entry_end = min(entry_end, end);
895 
896 		do {
897 			struct amdgpu_vm *vm = params->vm;
898 			uint64_t upd_end = min(entry_end, frag_end);
899 			unsigned int nptes = (upd_end - frag_start) >> shift;
900 			uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
901 
902 			/* This can happen when we set higher level PDs to
903 			 * silent to stop fault floods.
904 			 */
905 			nptes = max(nptes, 1u);
906 
907 			trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
908 						    min(nptes, 32u), dst, incr,
909 						    upd_flags,
910 						    vm->task_info ? vm->task_info->tgid : 0,
911 						    vm->immediate.fence_context);
912 			amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
913 						   cursor.level, pe_start, dst,
914 						   nptes, incr, upd_flags);
915 
916 			pe_start += nptes * 8;
917 			dst += nptes * incr;
918 
919 			frag_start = upd_end;
920 			if (frag_start >= frag_end) {
921 				/* figure out the next fragment */
922 				amdgpu_vm_pte_fragment(params, frag_start, end,
923 						       flags, &frag, &frag_end);
924 				if (frag < shift)
925 					break;
926 			}
927 		} while (frag_start < entry_end);
928 
929 		if (amdgpu_vm_pt_descendant(adev, &cursor)) {
930 			/* Free all child entries.
931 			 * Update the tables with the flags and addresses and free up subsequent
932 			 * tables in the case of huge pages or freed up areas.
933 			 * This is the maximum you can free, because all other page tables are not
934 			 * completely covered by the range and so potentially still in use.
935 			 */
936 			while (cursor.pfn < frag_start) {
937 				/* Make sure previous mapping is freed */
938 				if (cursor.entry->bo) {
939 					params->needs_flush = true;
940 					amdgpu_vm_pt_add_list(params, &cursor);
941 				}
942 				amdgpu_vm_pt_next(adev, &cursor);
943 			}
944 
945 		} else if (frag >= shift) {
946 			/* or just move on to the next on the same level. */
947 			amdgpu_vm_pt_next(adev, &cursor);
948 		}
949 	}
950 
951 	return 0;
952 }
953 
954 /**
955  * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
956  * @adev: amdgpu device structure
957  * @vm: amdgpu vm structure
958  *
959  * make root page directory and everything below it cpu accessible.
960  */
amdgpu_vm_pt_map_tables(struct amdgpu_device * adev,struct amdgpu_vm * vm)961 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
962 {
963 	struct amdgpu_vm_pt_cursor cursor;
964 	struct amdgpu_vm_bo_base *entry;
965 
966 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
967 
968 		struct amdgpu_bo_vm *bo;
969 		int r;
970 
971 		if (entry->bo) {
972 			bo = to_amdgpu_bo_vm(entry->bo);
973 			r = vm->update_funcs->map_table(bo);
974 			if (r)
975 				return r;
976 		}
977 	}
978 
979 	return 0;
980 }
981