xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c (revision 872c0de315d2ebad16d0ff574f8c9ce26dd5c6f2)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2014-2018 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <linux/dma-buf.h>
24 #include <linux/list.h>
25 #include <linux/pagemap.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <linux/fdtable.h>
29 #include <drm/ttm/ttm_tt.h>
30 
31 #include <drm/drm_exec.h>
32 
33 #include "amdgpu_object.h"
34 #include "amdgpu_gem.h"
35 #include "amdgpu_vm.h"
36 #include "amdgpu_hmm.h"
37 #include "amdgpu_amdkfd.h"
38 #include "amdgpu_dma_buf.h"
39 #include <uapi/linux/kfd_ioctl.h>
40 #include "amdgpu_xgmi.h"
41 #include "kfd_priv.h"
42 #include "kfd_smi_events.h"
43 
44 /* Userptr restore delay, just long enough to allow consecutive VM
45  * changes to accumulate
46  */
47 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
48 #define AMDGPU_RESERVE_MEM_LIMIT			(3UL << 29)
49 
50 /*
51  * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
52  * BO chunk
53  */
54 #define VRAM_AVAILABLITY_ALIGN (1 << 21)
55 
56 /* Impose limit on how much memory KFD can use */
57 static struct {
58 	uint64_t max_system_mem_limit;
59 	uint64_t max_ttm_mem_limit;
60 	int64_t system_mem_used;
61 	int64_t ttm_mem_used;
62 	spinlock_t mem_limit_lock;
63 } kfd_mem_limit;
64 
65 static const char * const domain_bit_to_string[] = {
66 		"CPU",
67 		"GTT",
68 		"VRAM",
69 		"GDS",
70 		"GWS",
71 		"OA"
72 };
73 
74 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
75 
76 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
77 
78 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
79 		struct kgd_mem *mem)
80 {
81 	struct kfd_mem_attachment *entry;
82 
83 	list_for_each_entry(entry, &mem->attachments, list)
84 		if (entry->bo_va->base.vm == avm)
85 			return true;
86 
87 	return false;
88 }
89 
90 /**
91  * reuse_dmamap() - Check whether adev can share the original
92  * userptr BO
93  *
94  * If both adev and bo_adev are in direct mapping or
95  * in the same iommu group, they can share the original BO.
96  *
97  * @adev: Device to which can or cannot share the original BO
98  * @bo_adev: Device to which allocated BO belongs to
99  *
100  * Return: returns true if adev can share original userptr BO,
101  * false otherwise.
102  */
103 static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev)
104 {
105 	return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) ||
106 			(adev->dev->iommu_group == bo_adev->dev->iommu_group);
107 }
108 
109 /* Set memory usage limits. Current, limits are
110  *  System (TTM + userptr) memory - 15/16th System RAM
111  *  TTM memory - 3/8th System RAM
112  */
113 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
114 {
115 	struct sysinfo si;
116 	uint64_t mem;
117 
118 	if (kfd_mem_limit.max_system_mem_limit)
119 		return;
120 
121 	si_meminfo(&si);
122 	mem = si.totalram - si.totalhigh;
123 	mem *= si.mem_unit;
124 
125 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
126 	kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6);
127 	if (kfd_mem_limit.max_system_mem_limit < 2 * AMDGPU_RESERVE_MEM_LIMIT)
128 		kfd_mem_limit.max_system_mem_limit >>= 1;
129 	else
130 		kfd_mem_limit.max_system_mem_limit -= AMDGPU_RESERVE_MEM_LIMIT;
131 
132 	kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT;
133 	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
134 		(kfd_mem_limit.max_system_mem_limit >> 20),
135 		(kfd_mem_limit.max_ttm_mem_limit >> 20));
136 }
137 
138 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
139 {
140 	kfd_mem_limit.system_mem_used += size;
141 }
142 
143 /* Estimate page table size needed to represent a given memory size
144  *
145  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
146  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
147  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
148  * for 2MB pages for TLB efficiency. However, small allocations and
149  * fragmented system memory still need some 4KB pages. We choose a
150  * compromise that should work in most cases without reserving too
151  * much memory for page tables unnecessarily (factor 16K, >> 14).
152  */
153 
154 #define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM)
155 
156 /**
157  * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
158  * of buffer.
159  *
160  * @adev: Device to which allocated BO belongs to
161  * @size: Size of buffer, in bytes, encapsulated by B0. This should be
162  * equivalent to amdgpu_bo_size(BO)
163  * @alloc_flag: Flag used in allocating a BO as noted above
164  * @xcp_id: xcp_id is used to get xcp from xcp manager, one xcp is
165  * managed as one compute node in driver for app
166  *
167  * Return:
168  *	returns -ENOMEM in case of error, ZERO otherwise
169  */
170 int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
171 		uint64_t size, u32 alloc_flag, int8_t xcp_id)
172 {
173 	uint64_t reserved_for_pt =
174 		ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
175 	size_t system_mem_needed, ttm_mem_needed, vram_needed;
176 	int ret = 0;
177 	uint64_t vram_size = 0;
178 
179 	system_mem_needed = 0;
180 	ttm_mem_needed = 0;
181 	vram_needed = 0;
182 	if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
183 		system_mem_needed = size;
184 		ttm_mem_needed = size;
185 	} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
186 		/*
187 		 * Conservatively round up the allocation requirement to 2 MB
188 		 * to avoid fragmentation caused by 4K allocations in the tail
189 		 * 2M BO chunk.
190 		 */
191 		vram_needed = size;
192 		/*
193 		 * For GFX 9.4.3, get the VRAM size from XCP structs
194 		 */
195 		if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
196 			return -EINVAL;
197 
198 		vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
199 		if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
200 			system_mem_needed = size;
201 			ttm_mem_needed = size;
202 		}
203 	} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
204 		system_mem_needed = size;
205 	} else if (!(alloc_flag &
206 				(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
207 				 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
208 		pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
209 		return -ENOMEM;
210 	}
211 
212 	spin_lock(&kfd_mem_limit.mem_limit_lock);
213 
214 	if (kfd_mem_limit.system_mem_used + system_mem_needed >
215 	    kfd_mem_limit.max_system_mem_limit)
216 		pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
217 
218 	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
219 	     kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
220 	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
221 	     kfd_mem_limit.max_ttm_mem_limit) ||
222 	    (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
223 	     vram_size - reserved_for_pt - atomic64_read(&adev->vram_pin_size))) {
224 		ret = -ENOMEM;
225 		goto release;
226 	}
227 
228 	/* Update memory accounting by decreasing available system
229 	 * memory, TTM memory and GPU memory as computed above
230 	 */
231 	WARN_ONCE(vram_needed && !adev,
232 		  "adev reference can't be null when vram is used");
233 	if (adev && xcp_id >= 0) {
234 		adev->kfd.vram_used[xcp_id] += vram_needed;
235 		adev->kfd.vram_used_aligned[xcp_id] +=
236 				(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
237 				vram_needed :
238 				ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
239 	}
240 	kfd_mem_limit.system_mem_used += system_mem_needed;
241 	kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
242 
243 release:
244 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
245 	return ret;
246 }
247 
248 void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
249 		uint64_t size, u32 alloc_flag, int8_t xcp_id)
250 {
251 	spin_lock(&kfd_mem_limit.mem_limit_lock);
252 
253 	if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
254 		kfd_mem_limit.system_mem_used -= size;
255 		kfd_mem_limit.ttm_mem_used -= size;
256 	} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
257 		WARN_ONCE(!adev,
258 			  "adev reference can't be null when alloc mem flags vram is set");
259 		if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
260 			goto release;
261 
262 		if (adev) {
263 			adev->kfd.vram_used[xcp_id] -= size;
264 			if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
265 				adev->kfd.vram_used_aligned[xcp_id] -= size;
266 				kfd_mem_limit.system_mem_used -= size;
267 				kfd_mem_limit.ttm_mem_used -= size;
268 			} else {
269 				adev->kfd.vram_used_aligned[xcp_id] -=
270 					ALIGN(size, VRAM_AVAILABLITY_ALIGN);
271 			}
272 		}
273 	} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
274 		kfd_mem_limit.system_mem_used -= size;
275 	} else if (!(alloc_flag &
276 				(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
277 				 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
278 		pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
279 		goto release;
280 	}
281 	WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0,
282 		  "KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id);
283 	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
284 		  "KFD TTM memory accounting unbalanced");
285 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
286 		  "KFD system memory accounting unbalanced");
287 
288 release:
289 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
290 }
291 
292 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
293 {
294 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
295 	u32 alloc_flags = bo->kfd_bo->alloc_flags;
296 	u64 size = amdgpu_bo_size(bo);
297 
298 	amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags,
299 					  bo->xcp_id);
300 
301 	kfree(bo->kfd_bo);
302 }
303 
304 /**
305  * create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information
306  * about USERPTR or DOOREBELL or MMIO BO.
307  *
308  * @adev: Device for which dmamap BO is being created
309  * @mem: BO of peer device that is being DMA mapped. Provides parameters
310  *	 in building the dmamap BO
311  * @bo_out: Output parameter updated with handle of dmamap BO
312  */
313 static int
314 create_dmamap_sg_bo(struct amdgpu_device *adev,
315 		 struct kgd_mem *mem, struct amdgpu_bo **bo_out)
316 {
317 	struct drm_gem_object *gem_obj;
318 	int ret;
319 	uint64_t flags = 0;
320 
321 	ret = amdgpu_bo_reserve(mem->bo, false);
322 	if (ret)
323 		return ret;
324 
325 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)
326 		flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
327 					AMDGPU_GEM_CREATE_UNCACHED);
328 
329 	ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
330 			AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags,
331 			ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0);
332 
333 	amdgpu_bo_unreserve(mem->bo);
334 
335 	if (ret) {
336 		pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret);
337 		return -EINVAL;
338 	}
339 
340 	*bo_out = gem_to_amdgpu_bo(gem_obj);
341 	(*bo_out)->parent = amdgpu_bo_ref(mem->bo);
342 	return ret;
343 }
344 
345 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
346  *  reservation object.
347  *
348  * @bo: [IN] Remove eviction fence(s) from this BO
349  * @ef: [IN] This eviction fence is removed if it
350  *  is present in the shared list.
351  *
352  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
353  */
354 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
355 					struct amdgpu_amdkfd_fence *ef)
356 {
357 	struct dma_fence *replacement;
358 
359 	if (!ef)
360 		return -EINVAL;
361 
362 	/* TODO: Instead of block before we should use the fence of the page
363 	 * table update and TLB flush here directly.
364 	 */
365 	replacement = dma_fence_get_stub();
366 	dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
367 				replacement, DMA_RESV_USAGE_BOOKKEEP);
368 	dma_fence_put(replacement);
369 	return 0;
370 }
371 
372 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
373 {
374 	struct amdgpu_bo *root = bo;
375 	struct amdgpu_vm_bo_base *vm_bo;
376 	struct amdgpu_vm *vm;
377 	struct amdkfd_process_info *info;
378 	struct amdgpu_amdkfd_fence *ef;
379 	int ret;
380 
381 	/* we can always get vm_bo from root PD bo.*/
382 	while (root->parent)
383 		root = root->parent;
384 
385 	vm_bo = root->vm_bo;
386 	if (!vm_bo)
387 		return 0;
388 
389 	vm = vm_bo->vm;
390 	if (!vm)
391 		return 0;
392 
393 	info = vm->process_info;
394 	if (!info || !info->eviction_fence)
395 		return 0;
396 
397 	ef = container_of(dma_fence_get(&info->eviction_fence->base),
398 			struct amdgpu_amdkfd_fence, base);
399 
400 	BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
401 	ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
402 	dma_resv_unlock(bo->tbo.base.resv);
403 
404 	dma_fence_put(&ef->base);
405 	return ret;
406 }
407 
408 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
409 				     bool wait)
410 {
411 	struct ttm_operation_ctx ctx = { false, false };
412 	int ret;
413 
414 	if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
415 		 "Called with userptr BO"))
416 		return -EINVAL;
417 
418 	/* bo has been pinned, not need validate it */
419 	if (bo->tbo.pin_count)
420 		return 0;
421 
422 	amdgpu_bo_placement_from_domain(bo, domain);
423 
424 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
425 	if (ret)
426 		goto validate_fail;
427 	if (wait)
428 		amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
429 
430 validate_fail:
431 	return ret;
432 }
433 
434 int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
435 					uint32_t domain,
436 					struct dma_fence *fence)
437 {
438 	int ret = amdgpu_bo_reserve(bo, false);
439 
440 	if (ret)
441 		return ret;
442 
443 	ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
444 	if (ret)
445 		goto unreserve_out;
446 
447 	ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
448 	if (ret)
449 		goto unreserve_out;
450 
451 	dma_resv_add_fence(bo->tbo.base.resv, fence,
452 			   DMA_RESV_USAGE_BOOKKEEP);
453 
454 unreserve_out:
455 	amdgpu_bo_unreserve(bo);
456 
457 	return ret;
458 }
459 
460 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
461 {
462 	return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
463 }
464 
465 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
466  *
467  * Page directories are not updated here because huge page handling
468  * during page table updates can invalidate page directory entries
469  * again. Page directories are only updated after updating page
470  * tables.
471  */
472 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm,
473 				 struct ww_acquire_ctx *ticket)
474 {
475 	struct amdgpu_bo *pd = vm->root.bo;
476 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
477 	int ret;
478 
479 	ret = amdgpu_vm_validate(adev, vm, ticket,
480 				 amdgpu_amdkfd_validate_vm_bo, NULL);
481 	if (ret) {
482 		pr_err("failed to validate PT BOs\n");
483 		return ret;
484 	}
485 
486 	vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
487 
488 	return 0;
489 }
490 
491 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
492 {
493 	struct amdgpu_bo *pd = vm->root.bo;
494 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
495 	int ret;
496 
497 	ret = amdgpu_vm_update_pdes(adev, vm, false);
498 	if (ret)
499 		return ret;
500 
501 	return amdgpu_sync_fence(sync, vm->last_update);
502 }
503 
504 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
505 {
506 	uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
507 				 AMDGPU_VM_MTYPE_DEFAULT;
508 
509 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
510 		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
511 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
512 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
513 
514 	return amdgpu_gem_va_map_flags(adev, mapping_flags);
515 }
516 
517 /**
518  * create_sg_table() - Create an sg_table for a contiguous DMA addr range
519  * @addr: The starting address to point to
520  * @size: Size of memory area in bytes being pointed to
521  *
522  * Allocates an instance of sg_table and initializes it to point to memory
523  * area specified by input parameters. The address used to build is assumed
524  * to be DMA mapped, if needed.
525  *
526  * DOORBELL or MMIO BOs use only one scatterlist node in their sg_table
527  * because they are physically contiguous.
528  *
529  * Return: Initialized instance of SG Table or NULL
530  */
531 static struct sg_table *create_sg_table(uint64_t addr, uint32_t size)
532 {
533 	struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
534 
535 	if (!sg)
536 		return NULL;
537 	if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
538 		kfree(sg);
539 		return NULL;
540 	}
541 	sg_dma_address(sg->sgl) = addr;
542 	sg->sgl->length = size;
543 #ifdef CONFIG_NEED_SG_DMA_LENGTH
544 	sg->sgl->dma_length = size;
545 #endif
546 	return sg;
547 }
548 
549 static int
550 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
551 		       struct kfd_mem_attachment *attachment)
552 {
553 	enum dma_data_direction direction =
554 		mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
555 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
556 	struct ttm_operation_ctx ctx = {.interruptible = true};
557 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
558 	struct amdgpu_device *adev = attachment->adev;
559 	struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
560 	struct ttm_tt *ttm = bo->tbo.ttm;
561 	int ret;
562 
563 	if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
564 		return -EINVAL;
565 
566 	ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
567 	if (unlikely(!ttm->sg))
568 		return -ENOMEM;
569 
570 	/* Same sequence as in amdgpu_ttm_tt_pin_userptr */
571 	ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
572 					ttm->num_pages, 0,
573 					(u64)ttm->num_pages << PAGE_SHIFT,
574 					GFP_KERNEL);
575 	if (unlikely(ret))
576 		goto free_sg;
577 
578 	ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
579 	if (unlikely(ret))
580 		goto release_sg;
581 
582 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
583 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
584 	if (ret)
585 		goto unmap_sg;
586 
587 	return 0;
588 
589 unmap_sg:
590 	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
591 release_sg:
592 	pr_err("DMA map userptr failed: %d\n", ret);
593 	sg_free_table(ttm->sg);
594 free_sg:
595 	kfree(ttm->sg);
596 	ttm->sg = NULL;
597 	return ret;
598 }
599 
600 static int
601 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
602 {
603 	struct ttm_operation_ctx ctx = {.interruptible = true};
604 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
605 	int ret;
606 
607 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
608 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
609 	if (ret)
610 		return ret;
611 
612 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
613 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
614 }
615 
616 /**
617  * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO
618  * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
619  * @attachment: Virtual address attachment of the BO on accessing device
620  *
621  * An access request from the device that owns DOORBELL does not require DMA mapping.
622  * This is because the request doesn't go through PCIe root complex i.e. it instead
623  * loops back. The need to DMA map arises only when accessing peer device's DOORBELL
624  *
625  * In contrast, all access requests for MMIO need to be DMA mapped without regard to
626  * device ownership. This is because access requests for MMIO go through PCIe root
627  * complex.
628  *
629  * This is accomplished in two steps:
630  *   - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used
631  *         in updating requesting device's page table
632  *   - Signal TTM to mark memory pointed to by requesting device's BO as GPU
633  *         accessible. This allows an update of requesting device's page table
634  *         with entries associated with DOOREBELL or MMIO memory
635  *
636  * This method is invoked in the following contexts:
637  *   - Mapping of DOORBELL or MMIO BO of same or peer device
638  *   - Validating an evicted DOOREBELL or MMIO BO on device seeking access
639  *
640  * Return: ZERO if successful, NON-ZERO otherwise
641  */
642 static int
643 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem,
644 		     struct kfd_mem_attachment *attachment)
645 {
646 	struct ttm_operation_ctx ctx = {.interruptible = true};
647 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
648 	struct amdgpu_device *adev = attachment->adev;
649 	struct ttm_tt *ttm = bo->tbo.ttm;
650 	enum dma_data_direction dir;
651 	dma_addr_t dma_addr;
652 	bool mmio;
653 	int ret;
654 
655 	/* Expect SG Table of dmapmap BO to be NULL */
656 	mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP);
657 	if (unlikely(ttm->sg)) {
658 		pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio);
659 		return -EINVAL;
660 	}
661 
662 	dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
663 			DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
664 	dma_addr = mem->bo->tbo.sg->sgl->dma_address;
665 	pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length);
666 	pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr);
667 	dma_addr = dma_map_resource(adev->dev, dma_addr,
668 			mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
669 	ret = dma_mapping_error(adev->dev, dma_addr);
670 	if (unlikely(ret))
671 		return ret;
672 	pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr);
673 
674 	ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length);
675 	if (unlikely(!ttm->sg)) {
676 		ret = -ENOMEM;
677 		goto unmap_sg;
678 	}
679 
680 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
681 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
682 	if (unlikely(ret))
683 		goto free_sg;
684 
685 	return ret;
686 
687 free_sg:
688 	sg_free_table(ttm->sg);
689 	kfree(ttm->sg);
690 	ttm->sg = NULL;
691 unmap_sg:
692 	dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length,
693 			   dir, DMA_ATTR_SKIP_CPU_SYNC);
694 	return ret;
695 }
696 
697 static int
698 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
699 			  struct kfd_mem_attachment *attachment)
700 {
701 	switch (attachment->type) {
702 	case KFD_MEM_ATT_SHARED:
703 		return 0;
704 	case KFD_MEM_ATT_USERPTR:
705 		return kfd_mem_dmamap_userptr(mem, attachment);
706 	case KFD_MEM_ATT_DMABUF:
707 		return kfd_mem_dmamap_dmabuf(attachment);
708 	case KFD_MEM_ATT_SG:
709 		return kfd_mem_dmamap_sg_bo(mem, attachment);
710 	default:
711 		WARN_ON_ONCE(1);
712 	}
713 	return -EINVAL;
714 }
715 
716 static void
717 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
718 			 struct kfd_mem_attachment *attachment)
719 {
720 	enum dma_data_direction direction =
721 		mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
722 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
723 	struct ttm_operation_ctx ctx = {.interruptible = false};
724 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
725 	struct amdgpu_device *adev = attachment->adev;
726 	struct ttm_tt *ttm = bo->tbo.ttm;
727 
728 	if (unlikely(!ttm->sg))
729 		return;
730 
731 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
732 	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
733 
734 	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
735 	sg_free_table(ttm->sg);
736 	kfree(ttm->sg);
737 	ttm->sg = NULL;
738 }
739 
740 static void
741 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
742 {
743 	/* This is a no-op. We don't want to trigger eviction fences when
744 	 * unmapping DMABufs. Therefore the invalidation (moving to system
745 	 * domain) is done in kfd_mem_dmamap_dmabuf.
746 	 */
747 }
748 
749 /**
750  * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO
751  * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
752  * @attachment: Virtual address attachment of the BO on accessing device
753  *
754  * The method performs following steps:
755  *   - Signal TTM to mark memory pointed to by BO as GPU inaccessible
756  *   - Free SG Table that is used to encapsulate DMA mapped memory of
757  *          peer device's DOORBELL or MMIO memory
758  *
759  * This method is invoked in the following contexts:
760  *     UNMapping of DOORBELL or MMIO BO on a device having access to its memory
761  *     Eviction of DOOREBELL or MMIO BO on device having access to its memory
762  *
763  * Return: void
764  */
765 static void
766 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
767 		       struct kfd_mem_attachment *attachment)
768 {
769 	struct ttm_operation_ctx ctx = {.interruptible = true};
770 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
771 	struct amdgpu_device *adev = attachment->adev;
772 	struct ttm_tt *ttm = bo->tbo.ttm;
773 	enum dma_data_direction dir;
774 
775 	if (unlikely(!ttm->sg)) {
776 		pr_debug("SG Table of BO is NULL");
777 		return;
778 	}
779 
780 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
781 	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
782 
783 	dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
784 				DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
785 	dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address,
786 			ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
787 	sg_free_table(ttm->sg);
788 	kfree(ttm->sg);
789 	ttm->sg = NULL;
790 	bo->tbo.sg = NULL;
791 }
792 
793 static void
794 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
795 			    struct kfd_mem_attachment *attachment)
796 {
797 	switch (attachment->type) {
798 	case KFD_MEM_ATT_SHARED:
799 		break;
800 	case KFD_MEM_ATT_USERPTR:
801 		kfd_mem_dmaunmap_userptr(mem, attachment);
802 		break;
803 	case KFD_MEM_ATT_DMABUF:
804 		kfd_mem_dmaunmap_dmabuf(attachment);
805 		break;
806 	case KFD_MEM_ATT_SG:
807 		kfd_mem_dmaunmap_sg_bo(mem, attachment);
808 		break;
809 	default:
810 		WARN_ON_ONCE(1);
811 	}
812 }
813 
814 static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
815 {
816 	if (!mem->dmabuf) {
817 		struct amdgpu_device *bo_adev;
818 		struct dma_buf *dmabuf;
819 		int r, fd;
820 
821 		bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
822 		r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file,
823 					       mem->gem_handle,
824 			mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
825 					       DRM_RDWR : 0, &fd);
826 		if (r)
827 			return r;
828 		dmabuf = dma_buf_get(fd);
829 		close_fd(fd);
830 		if (WARN_ON_ONCE(IS_ERR(dmabuf)))
831 			return PTR_ERR(dmabuf);
832 		mem->dmabuf = dmabuf;
833 	}
834 
835 	return 0;
836 }
837 
838 static int
839 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
840 		      struct amdgpu_bo **bo)
841 {
842 	struct drm_gem_object *gobj;
843 	int ret;
844 
845 	ret = kfd_mem_export_dmabuf(mem);
846 	if (ret)
847 		return ret;
848 
849 	gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
850 	if (IS_ERR(gobj))
851 		return PTR_ERR(gobj);
852 
853 	*bo = gem_to_amdgpu_bo(gobj);
854 	(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
855 
856 	return 0;
857 }
858 
859 /* kfd_mem_attach - Add a BO to a VM
860  *
861  * Everything that needs to bo done only once when a BO is first added
862  * to a VM. It can later be mapped and unmapped many times without
863  * repeating these steps.
864  *
865  * 0. Create BO for DMA mapping, if needed
866  * 1. Allocate and initialize BO VA entry data structure
867  * 2. Add BO to the VM
868  * 3. Determine ASIC-specific PTE flags
869  * 4. Alloc page tables and directories if needed
870  * 4a.  Validate new page tables and directories
871  */
872 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
873 		struct amdgpu_vm *vm, bool is_aql)
874 {
875 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
876 	unsigned long bo_size = mem->bo->tbo.base.size;
877 	uint64_t va = mem->va;
878 	struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
879 	struct amdgpu_bo *bo[2] = {NULL, NULL};
880 	struct amdgpu_bo_va *bo_va;
881 	bool same_hive = false;
882 	int i, ret;
883 
884 	if (!va) {
885 		pr_err("Invalid VA when adding BO to VM\n");
886 		return -EINVAL;
887 	}
888 
889 	/* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices
890 	 *
891 	 * The access path of MMIO and DOORBELL BOs of is always over PCIe.
892 	 * In contrast the access path of VRAM BOs depens upon the type of
893 	 * link that connects the peer device. Access over PCIe is allowed
894 	 * if peer device has large BAR. In contrast, access over xGMI is
895 	 * allowed for both small and large BAR configurations of peer device
896 	 */
897 	if ((adev != bo_adev && !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)) &&
898 	    ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
899 	     (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
900 	     (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
901 		if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM)
902 			same_hive = amdgpu_xgmi_same_hive(adev, bo_adev);
903 		if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev))
904 			return -EINVAL;
905 	}
906 
907 	for (i = 0; i <= is_aql; i++) {
908 		attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
909 		if (unlikely(!attachment[i])) {
910 			ret = -ENOMEM;
911 			goto unwind;
912 		}
913 
914 		pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
915 			 va + bo_size, vm);
916 
917 		if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) ||
918 		    (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) ||
919 		    (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) ||
920 		    same_hive) {
921 			/* Mappings on the local GPU, or VRAM mappings in the
922 			 * local hive, or userptr, or GTT mapping can reuse dma map
923 			 * address space share the original BO
924 			 */
925 			attachment[i]->type = KFD_MEM_ATT_SHARED;
926 			bo[i] = mem->bo;
927 			drm_gem_object_get(&bo[i]->tbo.base);
928 		} else if (i > 0) {
929 			/* Multiple mappings on the same GPU share the BO */
930 			attachment[i]->type = KFD_MEM_ATT_SHARED;
931 			bo[i] = bo[0];
932 			drm_gem_object_get(&bo[i]->tbo.base);
933 		} else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
934 			/* Create an SG BO to DMA-map userptrs on other GPUs */
935 			attachment[i]->type = KFD_MEM_ATT_USERPTR;
936 			ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
937 			if (ret)
938 				goto unwind;
939 		/* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */
940 		} else if (mem->bo->tbo.type == ttm_bo_type_sg) {
941 			WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL ||
942 				    mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP),
943 				  "Handing invalid SG BO in ATTACH request");
944 			attachment[i]->type = KFD_MEM_ATT_SG;
945 			ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
946 			if (ret)
947 				goto unwind;
948 		/* Enable acces to GTT and VRAM BOs of peer devices */
949 		} else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT ||
950 			   mem->domain == AMDGPU_GEM_DOMAIN_VRAM) {
951 			attachment[i]->type = KFD_MEM_ATT_DMABUF;
952 			ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
953 			if (ret)
954 				goto unwind;
955 			pr_debug("Employ DMABUF mechanism to enable peer GPU access\n");
956 		} else {
957 			WARN_ONCE(true, "Handling invalid ATTACH request");
958 			ret = -EINVAL;
959 			goto unwind;
960 		}
961 
962 		/* Add BO to VM internal data structures */
963 		ret = amdgpu_bo_reserve(bo[i], false);
964 		if (ret) {
965 			pr_debug("Unable to reserve BO during memory attach");
966 			goto unwind;
967 		}
968 		bo_va = amdgpu_vm_bo_find(vm, bo[i]);
969 		if (!bo_va)
970 			bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
971 		else
972 			++bo_va->ref_count;
973 		attachment[i]->bo_va = bo_va;
974 		amdgpu_bo_unreserve(bo[i]);
975 		if (unlikely(!attachment[i]->bo_va)) {
976 			ret = -ENOMEM;
977 			pr_err("Failed to add BO object to VM. ret == %d\n",
978 			       ret);
979 			goto unwind;
980 		}
981 		attachment[i]->va = va;
982 		attachment[i]->pte_flags = get_pte_flags(adev, mem);
983 		attachment[i]->adev = adev;
984 		list_add(&attachment[i]->list, &mem->attachments);
985 
986 		va += bo_size;
987 	}
988 
989 	return 0;
990 
991 unwind:
992 	for (; i >= 0; i--) {
993 		if (!attachment[i])
994 			continue;
995 		if (attachment[i]->bo_va) {
996 			amdgpu_bo_reserve(bo[i], true);
997 			if (--attachment[i]->bo_va->ref_count == 0)
998 				amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
999 			amdgpu_bo_unreserve(bo[i]);
1000 			list_del(&attachment[i]->list);
1001 		}
1002 		if (bo[i])
1003 			drm_gem_object_put(&bo[i]->tbo.base);
1004 		kfree(attachment[i]);
1005 	}
1006 	return ret;
1007 }
1008 
1009 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
1010 {
1011 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
1012 
1013 	pr_debug("\t remove VA 0x%llx in entry %p\n",
1014 			attachment->va, attachment);
1015 	if (--attachment->bo_va->ref_count == 0)
1016 		amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
1017 	drm_gem_object_put(&bo->tbo.base);
1018 	list_del(&attachment->list);
1019 	kfree(attachment);
1020 }
1021 
1022 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
1023 				struct amdkfd_process_info *process_info,
1024 				bool userptr)
1025 {
1026 	mutex_lock(&process_info->lock);
1027 	if (userptr)
1028 		list_add_tail(&mem->validate_list,
1029 			      &process_info->userptr_valid_list);
1030 	else
1031 		list_add_tail(&mem->validate_list, &process_info->kfd_bo_list);
1032 	mutex_unlock(&process_info->lock);
1033 }
1034 
1035 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
1036 		struct amdkfd_process_info *process_info)
1037 {
1038 	mutex_lock(&process_info->lock);
1039 	list_del(&mem->validate_list);
1040 	mutex_unlock(&process_info->lock);
1041 }
1042 
1043 /* Initializes user pages. It registers the MMU notifier and validates
1044  * the userptr BO in the GTT domain.
1045  *
1046  * The BO must already be on the userptr_valid_list. Otherwise an
1047  * eviction and restore may happen that leaves the new BO unmapped
1048  * with the user mode queues running.
1049  *
1050  * Takes the process_info->lock to protect against concurrent restore
1051  * workers.
1052  *
1053  * Returns 0 for success, negative errno for errors.
1054  */
1055 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
1056 			   bool criu_resume)
1057 {
1058 	struct amdkfd_process_info *process_info = mem->process_info;
1059 	struct amdgpu_bo *bo = mem->bo;
1060 	struct ttm_operation_ctx ctx = { true, false };
1061 	struct hmm_range *range;
1062 	int ret = 0;
1063 
1064 	mutex_lock(&process_info->lock);
1065 
1066 	ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
1067 	if (ret) {
1068 		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
1069 		goto out;
1070 	}
1071 
1072 	ret = amdgpu_hmm_register(bo, user_addr);
1073 	if (ret) {
1074 		pr_err("%s: Failed to register MMU notifier: %d\n",
1075 		       __func__, ret);
1076 		goto out;
1077 	}
1078 
1079 	if (criu_resume) {
1080 		/*
1081 		 * During a CRIU restore operation, the userptr buffer objects
1082 		 * will be validated in the restore_userptr_work worker at a
1083 		 * later stage when it is scheduled by another ioctl called by
1084 		 * CRIU master process for the target pid for restore.
1085 		 */
1086 		mutex_lock(&process_info->notifier_lock);
1087 		mem->invalid++;
1088 		mutex_unlock(&process_info->notifier_lock);
1089 		mutex_unlock(&process_info->lock);
1090 		return 0;
1091 	}
1092 
1093 	ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
1094 	if (ret) {
1095 		if (ret == -EAGAIN)
1096 			pr_debug("Failed to get user pages, try again\n");
1097 		else
1098 			pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
1099 		goto unregister_out;
1100 	}
1101 
1102 	ret = amdgpu_bo_reserve(bo, true);
1103 	if (ret) {
1104 		pr_err("%s: Failed to reserve BO\n", __func__);
1105 		goto release_out;
1106 	}
1107 	amdgpu_bo_placement_from_domain(bo, mem->domain);
1108 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1109 	if (ret)
1110 		pr_err("%s: failed to validate BO\n", __func__);
1111 	amdgpu_bo_unreserve(bo);
1112 
1113 release_out:
1114 	amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
1115 unregister_out:
1116 	if (ret)
1117 		amdgpu_hmm_unregister(bo);
1118 out:
1119 	mutex_unlock(&process_info->lock);
1120 	return ret;
1121 }
1122 
1123 /* Reserving a BO and its page table BOs must happen atomically to
1124  * avoid deadlocks. Some operations update multiple VMs at once. Track
1125  * all the reservation info in a context structure. Optionally a sync
1126  * object can track VM updates.
1127  */
1128 struct bo_vm_reservation_context {
1129 	/* DRM execution context for the reservation */
1130 	struct drm_exec exec;
1131 	/* Number of VMs reserved */
1132 	unsigned int n_vms;
1133 	/* Pointer to sync object */
1134 	struct amdgpu_sync *sync;
1135 };
1136 
1137 enum bo_vm_match {
1138 	BO_VM_NOT_MAPPED = 0,	/* Match VMs where a BO is not mapped */
1139 	BO_VM_MAPPED,		/* Match VMs where a BO is mapped     */
1140 	BO_VM_ALL,		/* Match all VMs a BO was added to    */
1141 };
1142 
1143 /**
1144  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
1145  * @mem: KFD BO structure.
1146  * @vm: the VM to reserve.
1147  * @ctx: the struct that will be used in unreserve_bo_and_vms().
1148  */
1149 static int reserve_bo_and_vm(struct kgd_mem *mem,
1150 			      struct amdgpu_vm *vm,
1151 			      struct bo_vm_reservation_context *ctx)
1152 {
1153 	struct amdgpu_bo *bo = mem->bo;
1154 	int ret;
1155 
1156 	WARN_ON(!vm);
1157 
1158 	ctx->n_vms = 1;
1159 	ctx->sync = &mem->sync;
1160 	drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
1161 	drm_exec_until_all_locked(&ctx->exec) {
1162 		ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1163 		drm_exec_retry_on_contention(&ctx->exec);
1164 		if (unlikely(ret))
1165 			goto error;
1166 
1167 		ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1168 		drm_exec_retry_on_contention(&ctx->exec);
1169 		if (unlikely(ret))
1170 			goto error;
1171 	}
1172 	return 0;
1173 
1174 error:
1175 	pr_err("Failed to reserve buffers in ttm.\n");
1176 	drm_exec_fini(&ctx->exec);
1177 	return ret;
1178 }
1179 
1180 /**
1181  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
1182  * @mem: KFD BO structure.
1183  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
1184  * is used. Otherwise, a single VM associated with the BO.
1185  * @map_type: the mapping status that will be used to filter the VMs.
1186  * @ctx: the struct that will be used in unreserve_bo_and_vms().
1187  *
1188  * Returns 0 for success, negative for failure.
1189  */
1190 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
1191 				struct amdgpu_vm *vm, enum bo_vm_match map_type,
1192 				struct bo_vm_reservation_context *ctx)
1193 {
1194 	struct kfd_mem_attachment *entry;
1195 	struct amdgpu_bo *bo = mem->bo;
1196 	int ret;
1197 
1198 	ctx->sync = &mem->sync;
1199 	drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
1200 		      DRM_EXEC_IGNORE_DUPLICATES, 0);
1201 	drm_exec_until_all_locked(&ctx->exec) {
1202 		ctx->n_vms = 0;
1203 		list_for_each_entry(entry, &mem->attachments, list) {
1204 			if ((vm && vm != entry->bo_va->base.vm) ||
1205 				(entry->is_mapped != map_type
1206 				&& map_type != BO_VM_ALL))
1207 				continue;
1208 
1209 			ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
1210 						&ctx->exec, 2);
1211 			drm_exec_retry_on_contention(&ctx->exec);
1212 			if (unlikely(ret))
1213 				goto error;
1214 			++ctx->n_vms;
1215 		}
1216 
1217 		ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1218 		drm_exec_retry_on_contention(&ctx->exec);
1219 		if (unlikely(ret))
1220 			goto error;
1221 	}
1222 	return 0;
1223 
1224 error:
1225 	pr_err("Failed to reserve buffers in ttm.\n");
1226 	drm_exec_fini(&ctx->exec);
1227 	return ret;
1228 }
1229 
1230 /**
1231  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1232  * @ctx: Reservation context to unreserve
1233  * @wait: Optionally wait for a sync object representing pending VM updates
1234  * @intr: Whether the wait is interruptible
1235  *
1236  * Also frees any resources allocated in
1237  * reserve_bo_and_(cond_)vm(s). Returns the status from
1238  * amdgpu_sync_wait.
1239  */
1240 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1241 				 bool wait, bool intr)
1242 {
1243 	int ret = 0;
1244 
1245 	if (wait)
1246 		ret = amdgpu_sync_wait(ctx->sync, intr);
1247 
1248 	drm_exec_fini(&ctx->exec);
1249 	ctx->sync = NULL;
1250 	return ret;
1251 }
1252 
1253 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1254 				struct kfd_mem_attachment *entry,
1255 				struct amdgpu_sync *sync)
1256 {
1257 	struct amdgpu_bo_va *bo_va = entry->bo_va;
1258 	struct amdgpu_device *adev = entry->adev;
1259 	struct amdgpu_vm *vm = bo_va->base.vm;
1260 
1261 	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1262 
1263 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1264 
1265 	amdgpu_sync_fence(sync, bo_va->last_pt_update);
1266 }
1267 
1268 static int update_gpuvm_pte(struct kgd_mem *mem,
1269 			    struct kfd_mem_attachment *entry,
1270 			    struct amdgpu_sync *sync)
1271 {
1272 	struct amdgpu_bo_va *bo_va = entry->bo_va;
1273 	struct amdgpu_device *adev = entry->adev;
1274 	int ret;
1275 
1276 	ret = kfd_mem_dmamap_attachment(mem, entry);
1277 	if (ret)
1278 		return ret;
1279 
1280 	/* Update the page tables  */
1281 	ret = amdgpu_vm_bo_update(adev, bo_va, false);
1282 	if (ret) {
1283 		pr_err("amdgpu_vm_bo_update failed\n");
1284 		return ret;
1285 	}
1286 
1287 	return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1288 }
1289 
1290 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1291 			   struct kfd_mem_attachment *entry,
1292 			   struct amdgpu_sync *sync,
1293 			   bool no_update_pte)
1294 {
1295 	int ret;
1296 
1297 	/* Set virtual address for the allocation */
1298 	ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1299 			       amdgpu_bo_size(entry->bo_va->base.bo),
1300 			       entry->pte_flags);
1301 	if (ret) {
1302 		pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1303 				entry->va, ret);
1304 		return ret;
1305 	}
1306 
1307 	if (no_update_pte)
1308 		return 0;
1309 
1310 	ret = update_gpuvm_pte(mem, entry, sync);
1311 	if (ret) {
1312 		pr_err("update_gpuvm_pte() failed\n");
1313 		goto update_gpuvm_pte_failed;
1314 	}
1315 
1316 	return 0;
1317 
1318 update_gpuvm_pte_failed:
1319 	unmap_bo_from_gpuvm(mem, entry, sync);
1320 	kfd_mem_dmaunmap_attachment(mem, entry);
1321 	return ret;
1322 }
1323 
1324 static int process_validate_vms(struct amdkfd_process_info *process_info,
1325 				struct ww_acquire_ctx *ticket)
1326 {
1327 	struct amdgpu_vm *peer_vm;
1328 	int ret;
1329 
1330 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1331 			    vm_list_node) {
1332 		ret = vm_validate_pt_pd_bos(peer_vm, ticket);
1333 		if (ret)
1334 			return ret;
1335 	}
1336 
1337 	return 0;
1338 }
1339 
1340 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1341 				 struct amdgpu_sync *sync)
1342 {
1343 	struct amdgpu_vm *peer_vm;
1344 	int ret;
1345 
1346 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1347 			    vm_list_node) {
1348 		struct amdgpu_bo *pd = peer_vm->root.bo;
1349 
1350 		ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1351 				       AMDGPU_SYNC_NE_OWNER,
1352 				       AMDGPU_FENCE_OWNER_KFD);
1353 		if (ret)
1354 			return ret;
1355 	}
1356 
1357 	return 0;
1358 }
1359 
1360 static int process_update_pds(struct amdkfd_process_info *process_info,
1361 			      struct amdgpu_sync *sync)
1362 {
1363 	struct amdgpu_vm *peer_vm;
1364 	int ret;
1365 
1366 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1367 			    vm_list_node) {
1368 		ret = vm_update_pds(peer_vm, sync);
1369 		if (ret)
1370 			return ret;
1371 	}
1372 
1373 	return 0;
1374 }
1375 
1376 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1377 		       struct dma_fence **ef)
1378 {
1379 	struct amdkfd_process_info *info = NULL;
1380 	int ret;
1381 
1382 	if (!*process_info) {
1383 		info = kzalloc(sizeof(*info), GFP_KERNEL);
1384 		if (!info)
1385 			return -ENOMEM;
1386 
1387 		mutex_init(&info->lock);
1388 		mutex_init(&info->notifier_lock);
1389 		INIT_LIST_HEAD(&info->vm_list_head);
1390 		INIT_LIST_HEAD(&info->kfd_bo_list);
1391 		INIT_LIST_HEAD(&info->userptr_valid_list);
1392 		INIT_LIST_HEAD(&info->userptr_inval_list);
1393 
1394 		info->eviction_fence =
1395 			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1396 						   current->mm,
1397 						   NULL);
1398 		if (!info->eviction_fence) {
1399 			pr_err("Failed to create eviction fence\n");
1400 			ret = -ENOMEM;
1401 			goto create_evict_fence_fail;
1402 		}
1403 
1404 		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1405 		INIT_DELAYED_WORK(&info->restore_userptr_work,
1406 				  amdgpu_amdkfd_restore_userptr_worker);
1407 
1408 		*process_info = info;
1409 	}
1410 
1411 	vm->process_info = *process_info;
1412 
1413 	/* Validate page directory and attach eviction fence */
1414 	ret = amdgpu_bo_reserve(vm->root.bo, true);
1415 	if (ret)
1416 		goto reserve_pd_fail;
1417 	ret = vm_validate_pt_pd_bos(vm, NULL);
1418 	if (ret) {
1419 		pr_err("validate_pt_pd_bos() failed\n");
1420 		goto validate_pd_fail;
1421 	}
1422 	ret = amdgpu_bo_sync_wait(vm->root.bo,
1423 				  AMDGPU_FENCE_OWNER_KFD, false);
1424 	if (ret)
1425 		goto wait_pd_fail;
1426 	ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
1427 	if (ret)
1428 		goto reserve_shared_fail;
1429 	dma_resv_add_fence(vm->root.bo->tbo.base.resv,
1430 			   &vm->process_info->eviction_fence->base,
1431 			   DMA_RESV_USAGE_BOOKKEEP);
1432 	amdgpu_bo_unreserve(vm->root.bo);
1433 
1434 	/* Update process info */
1435 	mutex_lock(&vm->process_info->lock);
1436 	list_add_tail(&vm->vm_list_node,
1437 			&(vm->process_info->vm_list_head));
1438 	vm->process_info->n_vms++;
1439 
1440 	*ef = dma_fence_get(&vm->process_info->eviction_fence->base);
1441 	mutex_unlock(&vm->process_info->lock);
1442 
1443 	return 0;
1444 
1445 reserve_shared_fail:
1446 wait_pd_fail:
1447 validate_pd_fail:
1448 	amdgpu_bo_unreserve(vm->root.bo);
1449 reserve_pd_fail:
1450 	vm->process_info = NULL;
1451 	if (info) {
1452 		dma_fence_put(&info->eviction_fence->base);
1453 		*process_info = NULL;
1454 		put_pid(info->pid);
1455 create_evict_fence_fail:
1456 		mutex_destroy(&info->lock);
1457 		mutex_destroy(&info->notifier_lock);
1458 		kfree(info);
1459 	}
1460 	return ret;
1461 }
1462 
1463 /**
1464  * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
1465  * @bo: Handle of buffer object being pinned
1466  * @domain: Domain into which BO should be pinned
1467  *
1468  *   - USERPTR BOs are UNPINNABLE and will return error
1469  *   - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1470  *     PIN count incremented. It is valid to PIN a BO multiple times
1471  *
1472  * Return: ZERO if successful in pinning, Non-Zero in case of error.
1473  */
1474 static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
1475 {
1476 	int ret = 0;
1477 
1478 	ret = amdgpu_bo_reserve(bo, false);
1479 	if (unlikely(ret))
1480 		return ret;
1481 
1482 	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) {
1483 		/*
1484 		 * If bo is not contiguous on VRAM, move to system memory first to ensure
1485 		 * we can get contiguous VRAM space after evicting other BOs.
1486 		 */
1487 		if (!(bo->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
1488 			struct ttm_operation_ctx ctx = { true, false };
1489 
1490 			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
1491 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1492 			if (unlikely(ret)) {
1493 				pr_debug("validate bo 0x%p to GTT failed %d\n", &bo->tbo, ret);
1494 				goto out;
1495 			}
1496 		}
1497 	}
1498 
1499 	ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1500 	if (ret)
1501 		pr_err("Error in Pinning BO to domain: %d\n", domain);
1502 
1503 	amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
1504 out:
1505 	amdgpu_bo_unreserve(bo);
1506 	return ret;
1507 }
1508 
1509 /**
1510  * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
1511  * @bo: Handle of buffer object being unpinned
1512  *
1513  *   - Is a illegal request for USERPTR BOs and is ignored
1514  *   - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1515  *     PIN count decremented. Calls to UNPIN must balance calls to PIN
1516  */
1517 static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
1518 {
1519 	int ret = 0;
1520 
1521 	ret = amdgpu_bo_reserve(bo, false);
1522 	if (unlikely(ret))
1523 		return;
1524 
1525 	amdgpu_bo_unpin(bo);
1526 	amdgpu_bo_unreserve(bo);
1527 }
1528 
1529 int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
1530 				     struct amdgpu_vm *avm, u32 pasid)
1531 
1532 {
1533 	int ret;
1534 
1535 	/* Free the original amdgpu allocated pasid,
1536 	 * will be replaced with kfd allocated pasid.
1537 	 */
1538 	if (avm->pasid) {
1539 		amdgpu_pasid_free(avm->pasid);
1540 		amdgpu_vm_set_pasid(adev, avm, 0);
1541 	}
1542 
1543 	ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1544 	if (ret)
1545 		return ret;
1546 
1547 	return 0;
1548 }
1549 
1550 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
1551 					   struct amdgpu_vm *avm,
1552 					   void **process_info,
1553 					   struct dma_fence **ef)
1554 {
1555 	int ret;
1556 
1557 	/* Already a compute VM? */
1558 	if (avm->process_info)
1559 		return -EINVAL;
1560 
1561 	/* Convert VM into a compute VM */
1562 	ret = amdgpu_vm_make_compute(adev, avm);
1563 	if (ret)
1564 		return ret;
1565 
1566 	/* Initialize KFD part of the VM and process info */
1567 	ret = init_kfd_vm(avm, process_info, ef);
1568 	if (ret)
1569 		return ret;
1570 
1571 	amdgpu_vm_set_task_info(avm);
1572 
1573 	return 0;
1574 }
1575 
1576 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1577 				    struct amdgpu_vm *vm)
1578 {
1579 	struct amdkfd_process_info *process_info = vm->process_info;
1580 
1581 	if (!process_info)
1582 		return;
1583 
1584 	/* Update process info */
1585 	mutex_lock(&process_info->lock);
1586 	process_info->n_vms--;
1587 	list_del(&vm->vm_list_node);
1588 	mutex_unlock(&process_info->lock);
1589 
1590 	vm->process_info = NULL;
1591 
1592 	/* Release per-process resources when last compute VM is destroyed */
1593 	if (!process_info->n_vms) {
1594 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
1595 		WARN_ON(!list_empty(&process_info->userptr_valid_list));
1596 		WARN_ON(!list_empty(&process_info->userptr_inval_list));
1597 
1598 		dma_fence_put(&process_info->eviction_fence->base);
1599 		cancel_delayed_work_sync(&process_info->restore_userptr_work);
1600 		put_pid(process_info->pid);
1601 		mutex_destroy(&process_info->lock);
1602 		mutex_destroy(&process_info->notifier_lock);
1603 		kfree(process_info);
1604 	}
1605 }
1606 
1607 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
1608 					    void *drm_priv)
1609 {
1610 	struct amdgpu_vm *avm;
1611 
1612 	if (WARN_ON(!adev || !drm_priv))
1613 		return;
1614 
1615 	avm = drm_priv_to_vm(drm_priv);
1616 
1617 	pr_debug("Releasing process vm %p\n", avm);
1618 
1619 	/* The original pasid of amdgpu vm has already been
1620 	 * released during making a amdgpu vm to a compute vm
1621 	 * The current pasid is managed by kfd and will be
1622 	 * released on kfd process destroy. Set amdgpu pasid
1623 	 * to 0 to avoid duplicate release.
1624 	 */
1625 	amdgpu_vm_release_compute(adev, avm);
1626 }
1627 
1628 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1629 {
1630 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1631 	struct amdgpu_bo *pd = avm->root.bo;
1632 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1633 
1634 	if (adev->asic_type < CHIP_VEGA10)
1635 		return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1636 	return avm->pd_phys_addr;
1637 }
1638 
1639 void amdgpu_amdkfd_block_mmu_notifications(void *p)
1640 {
1641 	struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1642 
1643 	mutex_lock(&pinfo->lock);
1644 	WRITE_ONCE(pinfo->block_mmu_notifications, true);
1645 	mutex_unlock(&pinfo->lock);
1646 }
1647 
1648 int amdgpu_amdkfd_criu_resume(void *p)
1649 {
1650 	int ret = 0;
1651 	struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1652 
1653 	mutex_lock(&pinfo->lock);
1654 	pr_debug("scheduling work\n");
1655 	mutex_lock(&pinfo->notifier_lock);
1656 	pinfo->evicted_bos++;
1657 	mutex_unlock(&pinfo->notifier_lock);
1658 	if (!READ_ONCE(pinfo->block_mmu_notifications)) {
1659 		ret = -EINVAL;
1660 		goto out_unlock;
1661 	}
1662 	WRITE_ONCE(pinfo->block_mmu_notifications, false);
1663 	queue_delayed_work(system_freezable_wq,
1664 			   &pinfo->restore_userptr_work, 0);
1665 
1666 out_unlock:
1667 	mutex_unlock(&pinfo->lock);
1668 	return ret;
1669 }
1670 
1671 size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
1672 					  uint8_t xcp_id)
1673 {
1674 	uint64_t reserved_for_pt =
1675 		ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
1676 	ssize_t available;
1677 	uint64_t vram_available, system_mem_available, ttm_mem_available;
1678 
1679 	spin_lock(&kfd_mem_limit.mem_limit_lock);
1680 	vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
1681 		- adev->kfd.vram_used_aligned[xcp_id]
1682 		- atomic64_read(&adev->vram_pin_size)
1683 		- reserved_for_pt;
1684 
1685 	if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
1686 		system_mem_available = no_system_mem_limit ?
1687 					kfd_mem_limit.max_system_mem_limit :
1688 					kfd_mem_limit.max_system_mem_limit -
1689 					kfd_mem_limit.system_mem_used;
1690 
1691 		ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit -
1692 				kfd_mem_limit.ttm_mem_used;
1693 
1694 		available = min3(system_mem_available, ttm_mem_available,
1695 				 vram_available);
1696 		available = ALIGN_DOWN(available, PAGE_SIZE);
1697 	} else {
1698 		available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN);
1699 	}
1700 
1701 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
1702 
1703 	if (available < 0)
1704 		available = 0;
1705 
1706 	return available;
1707 }
1708 
1709 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1710 		struct amdgpu_device *adev, uint64_t va, uint64_t size,
1711 		void *drm_priv, struct kgd_mem **mem,
1712 		uint64_t *offset, uint32_t flags, bool criu_resume)
1713 {
1714 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1715 	struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm);
1716 	enum ttm_bo_type bo_type = ttm_bo_type_device;
1717 	struct sg_table *sg = NULL;
1718 	uint64_t user_addr = 0;
1719 	struct amdgpu_bo *bo;
1720 	struct drm_gem_object *gobj = NULL;
1721 	u32 domain, alloc_domain;
1722 	uint64_t aligned_size;
1723 	int8_t xcp_id = -1;
1724 	u64 alloc_flags;
1725 	int ret;
1726 
1727 	/*
1728 	 * Check on which domain to allocate BO
1729 	 */
1730 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1731 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1732 
1733 		if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
1734 			domain = AMDGPU_GEM_DOMAIN_GTT;
1735 			alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1736 			alloc_flags = 0;
1737 		} else {
1738 			alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1739 			alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1740 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1741 
1742 			/* For contiguous VRAM allocation */
1743 			if (flags & KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS)
1744 				alloc_flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1745 		}
1746 		xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
1747 					0 : fpriv->xcp_id;
1748 	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1749 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1750 		alloc_flags = 0;
1751 	} else {
1752 		domain = AMDGPU_GEM_DOMAIN_GTT;
1753 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1754 		alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1755 
1756 		if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1757 			if (!offset || !*offset)
1758 				return -EINVAL;
1759 			user_addr = untagged_addr(*offset);
1760 		} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1761 				    KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1762 			bo_type = ttm_bo_type_sg;
1763 			if (size > UINT_MAX)
1764 				return -EINVAL;
1765 			sg = create_sg_table(*offset, size);
1766 			if (!sg)
1767 				return -ENOMEM;
1768 		} else {
1769 			return -EINVAL;
1770 		}
1771 	}
1772 
1773 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT)
1774 		alloc_flags |= AMDGPU_GEM_CREATE_COHERENT;
1775 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT)
1776 		alloc_flags |= AMDGPU_GEM_CREATE_EXT_COHERENT;
1777 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
1778 		alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
1779 
1780 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1781 	if (!*mem) {
1782 		ret = -ENOMEM;
1783 		goto err;
1784 	}
1785 	INIT_LIST_HEAD(&(*mem)->attachments);
1786 	mutex_init(&(*mem)->lock);
1787 	(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1788 
1789 	/* Workaround for AQL queue wraparound bug. Map the same
1790 	 * memory twice. That means we only actually allocate half
1791 	 * the memory.
1792 	 */
1793 	if ((*mem)->aql_queue)
1794 		size >>= 1;
1795 	aligned_size = PAGE_ALIGN(size);
1796 
1797 	(*mem)->alloc_flags = flags;
1798 
1799 	amdgpu_sync_create(&(*mem)->sync);
1800 
1801 	ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags,
1802 					      xcp_id);
1803 	if (ret) {
1804 		pr_debug("Insufficient memory\n");
1805 		goto err_reserve_limit;
1806 	}
1807 
1808 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n",
1809 		 va, (*mem)->aql_queue ? size << 1 : size,
1810 		 domain_string(alloc_domain), xcp_id);
1811 
1812 	ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
1813 				       bo_type, NULL, &gobj, xcp_id + 1);
1814 	if (ret) {
1815 		pr_debug("Failed to create BO on domain %s. ret %d\n",
1816 			 domain_string(alloc_domain), ret);
1817 		goto err_bo_create;
1818 	}
1819 	ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1820 	if (ret) {
1821 		pr_debug("Failed to allow vma node access. ret %d\n", ret);
1822 		goto err_node_allow;
1823 	}
1824 	ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle);
1825 	if (ret)
1826 		goto err_gem_handle_create;
1827 	bo = gem_to_amdgpu_bo(gobj);
1828 	if (bo_type == ttm_bo_type_sg) {
1829 		bo->tbo.sg = sg;
1830 		bo->tbo.ttm->sg = sg;
1831 	}
1832 	bo->kfd_bo = *mem;
1833 	(*mem)->bo = bo;
1834 	if (user_addr)
1835 		bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1836 
1837 	(*mem)->va = va;
1838 	(*mem)->domain = domain;
1839 	(*mem)->mapped_to_gpu_memory = 0;
1840 	(*mem)->process_info = avm->process_info;
1841 
1842 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1843 
1844 	if (user_addr) {
1845 		pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
1846 		ret = init_user_pages(*mem, user_addr, criu_resume);
1847 		if (ret)
1848 			goto allocate_init_user_pages_failed;
1849 	} else  if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1850 				KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1851 		ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
1852 		if (ret) {
1853 			pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
1854 			goto err_pin_bo;
1855 		}
1856 		bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
1857 		bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
1858 	} else {
1859 		mutex_lock(&avm->process_info->lock);
1860 		if (avm->process_info->eviction_fence &&
1861 		    !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
1862 			ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain,
1863 				&avm->process_info->eviction_fence->base);
1864 		mutex_unlock(&avm->process_info->lock);
1865 		if (ret)
1866 			goto err_validate_bo;
1867 	}
1868 
1869 	if (offset)
1870 		*offset = amdgpu_bo_mmap_offset(bo);
1871 
1872 	return 0;
1873 
1874 allocate_init_user_pages_failed:
1875 err_pin_bo:
1876 err_validate_bo:
1877 	remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1878 	drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle);
1879 err_gem_handle_create:
1880 	drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1881 err_node_allow:
1882 	/* Don't unreserve system mem limit twice */
1883 	goto err_reserve_limit;
1884 err_bo_create:
1885 	amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
1886 err_reserve_limit:
1887 	amdgpu_sync_free(&(*mem)->sync);
1888 	mutex_destroy(&(*mem)->lock);
1889 	if (gobj)
1890 		drm_gem_object_put(gobj);
1891 	else
1892 		kfree(*mem);
1893 err:
1894 	if (sg) {
1895 		sg_free_table(sg);
1896 		kfree(sg);
1897 	}
1898 	return ret;
1899 }
1900 
1901 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1902 		struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
1903 		uint64_t *size)
1904 {
1905 	struct amdkfd_process_info *process_info = mem->process_info;
1906 	unsigned long bo_size = mem->bo->tbo.base.size;
1907 	bool use_release_notifier = (mem->bo->kfd_bo == mem);
1908 	struct kfd_mem_attachment *entry, *tmp;
1909 	struct bo_vm_reservation_context ctx;
1910 	unsigned int mapped_to_gpu_memory;
1911 	int ret;
1912 	bool is_imported = false;
1913 
1914 	mutex_lock(&mem->lock);
1915 
1916 	/* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
1917 	if (mem->alloc_flags &
1918 	    (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1919 	     KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1920 		amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
1921 	}
1922 
1923 	mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1924 	is_imported = mem->is_imported;
1925 	mutex_unlock(&mem->lock);
1926 	/* lock is not needed after this, since mem is unused and will
1927 	 * be freed anyway
1928 	 */
1929 
1930 	if (mapped_to_gpu_memory > 0) {
1931 		pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1932 				mem->va, bo_size);
1933 		return -EBUSY;
1934 	}
1935 
1936 	/* Make sure restore workers don't access the BO any more */
1937 	mutex_lock(&process_info->lock);
1938 	list_del(&mem->validate_list);
1939 	mutex_unlock(&process_info->lock);
1940 
1941 	/* Cleanup user pages and MMU notifiers */
1942 	if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
1943 		amdgpu_hmm_unregister(mem->bo);
1944 		mutex_lock(&process_info->notifier_lock);
1945 		amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
1946 		mutex_unlock(&process_info->notifier_lock);
1947 	}
1948 
1949 	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1950 	if (unlikely(ret))
1951 		return ret;
1952 
1953 	amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1954 					process_info->eviction_fence);
1955 	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1956 		mem->va + bo_size * (1 + mem->aql_queue));
1957 
1958 	/* Remove from VM internal data structures */
1959 	list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
1960 		kfd_mem_dmaunmap_attachment(mem, entry);
1961 		kfd_mem_detach(entry);
1962 	}
1963 
1964 	ret = unreserve_bo_and_vms(&ctx, false, false);
1965 
1966 	/* Free the sync object */
1967 	amdgpu_sync_free(&mem->sync);
1968 
1969 	/* If the SG is not NULL, it's one we created for a doorbell or mmio
1970 	 * remap BO. We need to free it.
1971 	 */
1972 	if (mem->bo->tbo.sg) {
1973 		sg_free_table(mem->bo->tbo.sg);
1974 		kfree(mem->bo->tbo.sg);
1975 	}
1976 
1977 	/* Update the size of the BO being freed if it was allocated from
1978 	 * VRAM and is not imported. For APP APU VRAM allocations are done
1979 	 * in GTT domain
1980 	 */
1981 	if (size) {
1982 		if (!is_imported &&
1983 		   (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
1984 		   ((adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) &&
1985 		    mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
1986 			*size = bo_size;
1987 		else
1988 			*size = 0;
1989 	}
1990 
1991 	/* Free the BO*/
1992 	drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1993 	drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle);
1994 	if (mem->dmabuf) {
1995 		dma_buf_put(mem->dmabuf);
1996 		mem->dmabuf = NULL;
1997 	}
1998 	mutex_destroy(&mem->lock);
1999 
2000 	/* If this releases the last reference, it will end up calling
2001 	 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
2002 	 * this needs to be the last call here.
2003 	 */
2004 	drm_gem_object_put(&mem->bo->tbo.base);
2005 
2006 	/*
2007 	 * For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(),
2008 	 * explicitly free it here.
2009 	 */
2010 	if (!use_release_notifier)
2011 		kfree(mem);
2012 
2013 	return ret;
2014 }
2015 
2016 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
2017 		struct amdgpu_device *adev, struct kgd_mem *mem,
2018 		void *drm_priv)
2019 {
2020 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2021 	int ret;
2022 	struct amdgpu_bo *bo;
2023 	uint32_t domain;
2024 	struct kfd_mem_attachment *entry;
2025 	struct bo_vm_reservation_context ctx;
2026 	unsigned long bo_size;
2027 	bool is_invalid_userptr = false;
2028 
2029 	bo = mem->bo;
2030 	if (!bo) {
2031 		pr_err("Invalid BO when mapping memory to GPU\n");
2032 		return -EINVAL;
2033 	}
2034 
2035 	/* Make sure restore is not running concurrently. Since we
2036 	 * don't map invalid userptr BOs, we rely on the next restore
2037 	 * worker to do the mapping
2038 	 */
2039 	mutex_lock(&mem->process_info->lock);
2040 
2041 	/* Lock notifier lock. If we find an invalid userptr BO, we can be
2042 	 * sure that the MMU notifier is no longer running
2043 	 * concurrently and the queues are actually stopped
2044 	 */
2045 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2046 		mutex_lock(&mem->process_info->notifier_lock);
2047 		is_invalid_userptr = !!mem->invalid;
2048 		mutex_unlock(&mem->process_info->notifier_lock);
2049 	}
2050 
2051 	mutex_lock(&mem->lock);
2052 
2053 	domain = mem->domain;
2054 	bo_size = bo->tbo.base.size;
2055 
2056 	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
2057 			mem->va,
2058 			mem->va + bo_size * (1 + mem->aql_queue),
2059 			avm, domain_string(domain));
2060 
2061 	if (!kfd_mem_is_attached(avm, mem)) {
2062 		ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
2063 		if (ret)
2064 			goto out;
2065 	}
2066 
2067 	ret = reserve_bo_and_vm(mem, avm, &ctx);
2068 	if (unlikely(ret))
2069 		goto out;
2070 
2071 	/* Userptr can be marked as "not invalid", but not actually be
2072 	 * validated yet (still in the system domain). In that case
2073 	 * the queues are still stopped and we can leave mapping for
2074 	 * the next restore worker
2075 	 */
2076 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
2077 	    bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
2078 		is_invalid_userptr = true;
2079 
2080 	ret = vm_validate_pt_pd_bos(avm, NULL);
2081 	if (unlikely(ret))
2082 		goto out_unreserve;
2083 
2084 	list_for_each_entry(entry, &mem->attachments, list) {
2085 		if (entry->bo_va->base.vm != avm || entry->is_mapped)
2086 			continue;
2087 
2088 		pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
2089 			 entry->va, entry->va + bo_size, entry);
2090 
2091 		ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
2092 				      is_invalid_userptr);
2093 		if (ret) {
2094 			pr_err("Failed to map bo to gpuvm\n");
2095 			goto out_unreserve;
2096 		}
2097 
2098 		ret = vm_update_pds(avm, ctx.sync);
2099 		if (ret) {
2100 			pr_err("Failed to update page directories\n");
2101 			goto out_unreserve;
2102 		}
2103 
2104 		entry->is_mapped = true;
2105 		mem->mapped_to_gpu_memory++;
2106 		pr_debug("\t INC mapping count %d\n",
2107 			 mem->mapped_to_gpu_memory);
2108 	}
2109 
2110 	ret = unreserve_bo_and_vms(&ctx, false, false);
2111 
2112 	goto out;
2113 
2114 out_unreserve:
2115 	unreserve_bo_and_vms(&ctx, false, false);
2116 out:
2117 	mutex_unlock(&mem->process_info->lock);
2118 	mutex_unlock(&mem->lock);
2119 	return ret;
2120 }
2121 
2122 int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
2123 {
2124 	struct kfd_mem_attachment *entry;
2125 	struct amdgpu_vm *vm;
2126 	int ret;
2127 
2128 	vm = drm_priv_to_vm(drm_priv);
2129 
2130 	mutex_lock(&mem->lock);
2131 
2132 	ret = amdgpu_bo_reserve(mem->bo, true);
2133 	if (ret)
2134 		goto out;
2135 
2136 	list_for_each_entry(entry, &mem->attachments, list) {
2137 		if (entry->bo_va->base.vm != vm)
2138 			continue;
2139 		if (entry->bo_va->base.bo->tbo.ttm &&
2140 		    !entry->bo_va->base.bo->tbo.ttm->sg)
2141 			continue;
2142 
2143 		kfd_mem_dmaunmap_attachment(mem, entry);
2144 	}
2145 
2146 	amdgpu_bo_unreserve(mem->bo);
2147 out:
2148 	mutex_unlock(&mem->lock);
2149 
2150 	return ret;
2151 }
2152 
2153 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
2154 		struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
2155 {
2156 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2157 	unsigned long bo_size = mem->bo->tbo.base.size;
2158 	struct kfd_mem_attachment *entry;
2159 	struct bo_vm_reservation_context ctx;
2160 	int ret;
2161 
2162 	mutex_lock(&mem->lock);
2163 
2164 	ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
2165 	if (unlikely(ret))
2166 		goto out;
2167 	/* If no VMs were reserved, it means the BO wasn't actually mapped */
2168 	if (ctx.n_vms == 0) {
2169 		ret = -EINVAL;
2170 		goto unreserve_out;
2171 	}
2172 
2173 	ret = vm_validate_pt_pd_bos(avm, NULL);
2174 	if (unlikely(ret))
2175 		goto unreserve_out;
2176 
2177 	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
2178 		mem->va,
2179 		mem->va + bo_size * (1 + mem->aql_queue),
2180 		avm);
2181 
2182 	list_for_each_entry(entry, &mem->attachments, list) {
2183 		if (entry->bo_va->base.vm != avm || !entry->is_mapped)
2184 			continue;
2185 
2186 		pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
2187 			 entry->va, entry->va + bo_size, entry);
2188 
2189 		unmap_bo_from_gpuvm(mem, entry, ctx.sync);
2190 		entry->is_mapped = false;
2191 
2192 		mem->mapped_to_gpu_memory--;
2193 		pr_debug("\t DEC mapping count %d\n",
2194 			 mem->mapped_to_gpu_memory);
2195 	}
2196 
2197 unreserve_out:
2198 	unreserve_bo_and_vms(&ctx, false, false);
2199 out:
2200 	mutex_unlock(&mem->lock);
2201 	return ret;
2202 }
2203 
2204 int amdgpu_amdkfd_gpuvm_sync_memory(
2205 		struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
2206 {
2207 	struct amdgpu_sync sync;
2208 	int ret;
2209 
2210 	amdgpu_sync_create(&sync);
2211 
2212 	mutex_lock(&mem->lock);
2213 	amdgpu_sync_clone(&mem->sync, &sync);
2214 	mutex_unlock(&mem->lock);
2215 
2216 	ret = amdgpu_sync_wait(&sync, intr);
2217 	amdgpu_sync_free(&sync);
2218 	return ret;
2219 }
2220 
2221 /**
2222  * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
2223  * @bo: Buffer object to be mapped
2224  *
2225  * Before return, bo reference count is incremented. To release the reference and unpin/
2226  * unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
2227  */
2228 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
2229 {
2230 	int ret;
2231 
2232 	ret = amdgpu_bo_reserve(bo, true);
2233 	if (ret) {
2234 		pr_err("Failed to reserve bo. ret %d\n", ret);
2235 		goto err_reserve_bo_failed;
2236 	}
2237 
2238 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2239 	if (ret) {
2240 		pr_err("Failed to pin bo. ret %d\n", ret);
2241 		goto err_pin_bo_failed;
2242 	}
2243 
2244 	ret = amdgpu_ttm_alloc_gart(&bo->tbo);
2245 	if (ret) {
2246 		pr_err("Failed to bind bo to GART. ret %d\n", ret);
2247 		goto err_map_bo_gart_failed;
2248 	}
2249 
2250 	amdgpu_amdkfd_remove_eviction_fence(
2251 		bo, bo->vm_bo->vm->process_info->eviction_fence);
2252 
2253 	amdgpu_bo_unreserve(bo);
2254 
2255 	bo = amdgpu_bo_ref(bo);
2256 
2257 	return 0;
2258 
2259 err_map_bo_gart_failed:
2260 	amdgpu_bo_unpin(bo);
2261 err_pin_bo_failed:
2262 	amdgpu_bo_unreserve(bo);
2263 err_reserve_bo_failed:
2264 
2265 	return ret;
2266 }
2267 
2268 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access
2269  *
2270  * @mem: Buffer object to be mapped for CPU access
2271  * @kptr[out]: pointer in kernel CPU address space
2272  * @size[out]: size of the buffer
2273  *
2274  * Pins the BO and maps it for kernel CPU access. The eviction fence is removed
2275  * from the BO, since pinned BOs cannot be evicted. The bo must remain on the
2276  * validate_list, so the GPU mapping can be restored after a page table was
2277  * evicted.
2278  *
2279  * Return: 0 on success, error code on failure
2280  */
2281 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
2282 					     void **kptr, uint64_t *size)
2283 {
2284 	int ret;
2285 	struct amdgpu_bo *bo = mem->bo;
2286 
2287 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2288 		pr_err("userptr can't be mapped to kernel\n");
2289 		return -EINVAL;
2290 	}
2291 
2292 	mutex_lock(&mem->process_info->lock);
2293 
2294 	ret = amdgpu_bo_reserve(bo, true);
2295 	if (ret) {
2296 		pr_err("Failed to reserve bo. ret %d\n", ret);
2297 		goto bo_reserve_failed;
2298 	}
2299 
2300 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2301 	if (ret) {
2302 		pr_err("Failed to pin bo. ret %d\n", ret);
2303 		goto pin_failed;
2304 	}
2305 
2306 	ret = amdgpu_bo_kmap(bo, kptr);
2307 	if (ret) {
2308 		pr_err("Failed to map bo to kernel. ret %d\n", ret);
2309 		goto kmap_failed;
2310 	}
2311 
2312 	amdgpu_amdkfd_remove_eviction_fence(
2313 		bo, mem->process_info->eviction_fence);
2314 
2315 	if (size)
2316 		*size = amdgpu_bo_size(bo);
2317 
2318 	amdgpu_bo_unreserve(bo);
2319 
2320 	mutex_unlock(&mem->process_info->lock);
2321 	return 0;
2322 
2323 kmap_failed:
2324 	amdgpu_bo_unpin(bo);
2325 pin_failed:
2326 	amdgpu_bo_unreserve(bo);
2327 bo_reserve_failed:
2328 	mutex_unlock(&mem->process_info->lock);
2329 
2330 	return ret;
2331 }
2332 
2333 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access
2334  *
2335  * @mem: Buffer object to be unmapped for CPU access
2336  *
2337  * Removes the kernel CPU mapping and unpins the BO. It does not restore the
2338  * eviction fence, so this function should only be used for cleanup before the
2339  * BO is destroyed.
2340  */
2341 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
2342 {
2343 	struct amdgpu_bo *bo = mem->bo;
2344 
2345 	amdgpu_bo_reserve(bo, true);
2346 	amdgpu_bo_kunmap(bo);
2347 	amdgpu_bo_unpin(bo);
2348 	amdgpu_bo_unreserve(bo);
2349 }
2350 
2351 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
2352 					  struct kfd_vm_fault_info *mem)
2353 {
2354 	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
2355 		*mem = *adev->gmc.vm_fault_info;
2356 		mb(); /* make sure read happened */
2357 		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
2358 	}
2359 	return 0;
2360 }
2361 
2362 static int import_obj_create(struct amdgpu_device *adev,
2363 			     struct dma_buf *dma_buf,
2364 			     struct drm_gem_object *obj,
2365 			     uint64_t va, void *drm_priv,
2366 			     struct kgd_mem **mem, uint64_t *size,
2367 			     uint64_t *mmap_offset)
2368 {
2369 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2370 	struct amdgpu_bo *bo;
2371 	int ret;
2372 
2373 	bo = gem_to_amdgpu_bo(obj);
2374 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
2375 				    AMDGPU_GEM_DOMAIN_GTT)))
2376 		/* Only VRAM and GTT BOs are supported */
2377 		return -EINVAL;
2378 
2379 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2380 	if (!*mem)
2381 		return -ENOMEM;
2382 
2383 	ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
2384 	if (ret)
2385 		goto err_free_mem;
2386 
2387 	if (size)
2388 		*size = amdgpu_bo_size(bo);
2389 
2390 	if (mmap_offset)
2391 		*mmap_offset = amdgpu_bo_mmap_offset(bo);
2392 
2393 	INIT_LIST_HEAD(&(*mem)->attachments);
2394 	mutex_init(&(*mem)->lock);
2395 
2396 	(*mem)->alloc_flags =
2397 		((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2398 		KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
2399 		| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
2400 		| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
2401 
2402 	get_dma_buf(dma_buf);
2403 	(*mem)->dmabuf = dma_buf;
2404 	(*mem)->bo = bo;
2405 	(*mem)->va = va;
2406 	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
2407 			 !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
2408 			 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
2409 
2410 	(*mem)->mapped_to_gpu_memory = 0;
2411 	(*mem)->process_info = avm->process_info;
2412 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
2413 	amdgpu_sync_create(&(*mem)->sync);
2414 	(*mem)->is_imported = true;
2415 
2416 	mutex_lock(&avm->process_info->lock);
2417 	if (avm->process_info->eviction_fence &&
2418 	    !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
2419 		ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain,
2420 				&avm->process_info->eviction_fence->base);
2421 	mutex_unlock(&avm->process_info->lock);
2422 	if (ret)
2423 		goto err_remove_mem;
2424 
2425 	return 0;
2426 
2427 err_remove_mem:
2428 	remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
2429 	drm_vma_node_revoke(&obj->vma_node, drm_priv);
2430 err_free_mem:
2431 	kfree(*mem);
2432 	return ret;
2433 }
2434 
2435 int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
2436 					 uint64_t va, void *drm_priv,
2437 					 struct kgd_mem **mem, uint64_t *size,
2438 					 uint64_t *mmap_offset)
2439 {
2440 	struct drm_gem_object *obj;
2441 	uint32_t handle;
2442 	int ret;
2443 
2444 	ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd,
2445 					 &handle);
2446 	if (ret)
2447 		return ret;
2448 	obj = drm_gem_object_lookup(adev->kfd.client.file, handle);
2449 	if (!obj) {
2450 		ret = -EINVAL;
2451 		goto err_release_handle;
2452 	}
2453 
2454 	ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size,
2455 				mmap_offset);
2456 	if (ret)
2457 		goto err_put_obj;
2458 
2459 	(*mem)->gem_handle = handle;
2460 
2461 	return 0;
2462 
2463 err_put_obj:
2464 	drm_gem_object_put(obj);
2465 err_release_handle:
2466 	drm_gem_handle_delete(adev->kfd.client.file, handle);
2467 	return ret;
2468 }
2469 
2470 int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
2471 				      struct dma_buf **dma_buf)
2472 {
2473 	int ret;
2474 
2475 	mutex_lock(&mem->lock);
2476 	ret = kfd_mem_export_dmabuf(mem);
2477 	if (ret)
2478 		goto out;
2479 
2480 	get_dma_buf(mem->dmabuf);
2481 	*dma_buf = mem->dmabuf;
2482 out:
2483 	mutex_unlock(&mem->lock);
2484 	return ret;
2485 }
2486 
2487 /* Evict a userptr BO by stopping the queues if necessary
2488  *
2489  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
2490  * cannot do any memory allocations, and cannot take any locks that
2491  * are held elsewhere while allocating memory.
2492  *
2493  * It doesn't do anything to the BO itself. The real work happens in
2494  * restore, where we get updated page addresses. This function only
2495  * ensures that GPU access to the BO is stopped.
2496  */
2497 int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
2498 				unsigned long cur_seq, struct kgd_mem *mem)
2499 {
2500 	struct amdkfd_process_info *process_info = mem->process_info;
2501 	int r = 0;
2502 
2503 	/* Do not process MMU notifications during CRIU restore until
2504 	 * KFD_CRIU_OP_RESUME IOCTL is received
2505 	 */
2506 	if (READ_ONCE(process_info->block_mmu_notifications))
2507 		return 0;
2508 
2509 	mutex_lock(&process_info->notifier_lock);
2510 	mmu_interval_set_seq(mni, cur_seq);
2511 
2512 	mem->invalid++;
2513 	if (++process_info->evicted_bos == 1) {
2514 		/* First eviction, stop the queues */
2515 		r = kgd2kfd_quiesce_mm(mni->mm,
2516 				       KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
2517 		if (r)
2518 			pr_err("Failed to quiesce KFD\n");
2519 		queue_delayed_work(system_freezable_wq,
2520 			&process_info->restore_userptr_work,
2521 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2522 	}
2523 	mutex_unlock(&process_info->notifier_lock);
2524 
2525 	return r;
2526 }
2527 
2528 /* Update invalid userptr BOs
2529  *
2530  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
2531  * userptr_inval_list and updates user pages for all BOs that have
2532  * been invalidated since their last update.
2533  */
2534 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
2535 				     struct mm_struct *mm)
2536 {
2537 	struct kgd_mem *mem, *tmp_mem;
2538 	struct amdgpu_bo *bo;
2539 	struct ttm_operation_ctx ctx = { false, false };
2540 	uint32_t invalid;
2541 	int ret = 0;
2542 
2543 	mutex_lock(&process_info->notifier_lock);
2544 
2545 	/* Move all invalidated BOs to the userptr_inval_list */
2546 	list_for_each_entry_safe(mem, tmp_mem,
2547 				 &process_info->userptr_valid_list,
2548 				 validate_list)
2549 		if (mem->invalid)
2550 			list_move_tail(&mem->validate_list,
2551 				       &process_info->userptr_inval_list);
2552 
2553 	/* Go through userptr_inval_list and update any invalid user_pages */
2554 	list_for_each_entry(mem, &process_info->userptr_inval_list,
2555 			    validate_list) {
2556 		invalid = mem->invalid;
2557 		if (!invalid)
2558 			/* BO hasn't been invalidated since the last
2559 			 * revalidation attempt. Keep its page list.
2560 			 */
2561 			continue;
2562 
2563 		bo = mem->bo;
2564 
2565 		amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
2566 		mem->range = NULL;
2567 
2568 		/* BO reservations and getting user pages (hmm_range_fault)
2569 		 * must happen outside the notifier lock
2570 		 */
2571 		mutex_unlock(&process_info->notifier_lock);
2572 
2573 		/* Move the BO to system (CPU) domain if necessary to unmap
2574 		 * and free the SG table
2575 		 */
2576 		if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
2577 			if (amdgpu_bo_reserve(bo, true))
2578 				return -EAGAIN;
2579 			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2580 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2581 			amdgpu_bo_unreserve(bo);
2582 			if (ret) {
2583 				pr_err("%s: Failed to invalidate userptr BO\n",
2584 				       __func__);
2585 				return -EAGAIN;
2586 			}
2587 		}
2588 
2589 		/* Get updated user pages */
2590 		ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
2591 						   &mem->range);
2592 		if (ret) {
2593 			pr_debug("Failed %d to get user pages\n", ret);
2594 
2595 			/* Return -EFAULT bad address error as success. It will
2596 			 * fail later with a VM fault if the GPU tries to access
2597 			 * it. Better than hanging indefinitely with stalled
2598 			 * user mode queues.
2599 			 *
2600 			 * Return other error -EBUSY or -ENOMEM to retry restore
2601 			 */
2602 			if (ret != -EFAULT)
2603 				return ret;
2604 
2605 			ret = 0;
2606 		}
2607 
2608 		mutex_lock(&process_info->notifier_lock);
2609 
2610 		/* Mark the BO as valid unless it was invalidated
2611 		 * again concurrently.
2612 		 */
2613 		if (mem->invalid != invalid) {
2614 			ret = -EAGAIN;
2615 			goto unlock_out;
2616 		}
2617 		 /* set mem valid if mem has hmm range associated */
2618 		if (mem->range)
2619 			mem->invalid = 0;
2620 	}
2621 
2622 unlock_out:
2623 	mutex_unlock(&process_info->notifier_lock);
2624 
2625 	return ret;
2626 }
2627 
2628 /* Validate invalid userptr BOs
2629  *
2630  * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
2631  * with new page addresses and waits for the page table updates to complete.
2632  */
2633 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2634 {
2635 	struct ttm_operation_ctx ctx = { false, false };
2636 	struct amdgpu_sync sync;
2637 	struct drm_exec exec;
2638 
2639 	struct amdgpu_vm *peer_vm;
2640 	struct kgd_mem *mem, *tmp_mem;
2641 	struct amdgpu_bo *bo;
2642 	int ret;
2643 
2644 	amdgpu_sync_create(&sync);
2645 
2646 	drm_exec_init(&exec, 0, 0);
2647 	/* Reserve all BOs and page tables for validation */
2648 	drm_exec_until_all_locked(&exec) {
2649 		/* Reserve all the page directories */
2650 		list_for_each_entry(peer_vm, &process_info->vm_list_head,
2651 				    vm_list_node) {
2652 			ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
2653 			drm_exec_retry_on_contention(&exec);
2654 			if (unlikely(ret))
2655 				goto unreserve_out;
2656 		}
2657 
2658 		/* Reserve the userptr_inval_list entries to resv_list */
2659 		list_for_each_entry(mem, &process_info->userptr_inval_list,
2660 				    validate_list) {
2661 			struct drm_gem_object *gobj;
2662 
2663 			gobj = &mem->bo->tbo.base;
2664 			ret = drm_exec_prepare_obj(&exec, gobj, 1);
2665 			drm_exec_retry_on_contention(&exec);
2666 			if (unlikely(ret))
2667 				goto unreserve_out;
2668 		}
2669 	}
2670 
2671 	ret = process_validate_vms(process_info, NULL);
2672 	if (ret)
2673 		goto unreserve_out;
2674 
2675 	/* Validate BOs and update GPUVM page tables */
2676 	list_for_each_entry_safe(mem, tmp_mem,
2677 				 &process_info->userptr_inval_list,
2678 				 validate_list) {
2679 		struct kfd_mem_attachment *attachment;
2680 
2681 		bo = mem->bo;
2682 
2683 		/* Validate the BO if we got user pages */
2684 		if (bo->tbo.ttm->pages[0]) {
2685 			amdgpu_bo_placement_from_domain(bo, mem->domain);
2686 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2687 			if (ret) {
2688 				pr_err("%s: failed to validate BO\n", __func__);
2689 				goto unreserve_out;
2690 			}
2691 		}
2692 
2693 		/* Update mapping. If the BO was not validated
2694 		 * (because we couldn't get user pages), this will
2695 		 * clear the page table entries, which will result in
2696 		 * VM faults if the GPU tries to access the invalid
2697 		 * memory.
2698 		 */
2699 		list_for_each_entry(attachment, &mem->attachments, list) {
2700 			if (!attachment->is_mapped)
2701 				continue;
2702 
2703 			kfd_mem_dmaunmap_attachment(mem, attachment);
2704 			ret = update_gpuvm_pte(mem, attachment, &sync);
2705 			if (ret) {
2706 				pr_err("%s: update PTE failed\n", __func__);
2707 				/* make sure this gets validated again */
2708 				mutex_lock(&process_info->notifier_lock);
2709 				mem->invalid++;
2710 				mutex_unlock(&process_info->notifier_lock);
2711 				goto unreserve_out;
2712 			}
2713 		}
2714 	}
2715 
2716 	/* Update page directories */
2717 	ret = process_update_pds(process_info, &sync);
2718 
2719 unreserve_out:
2720 	drm_exec_fini(&exec);
2721 	amdgpu_sync_wait(&sync, false);
2722 	amdgpu_sync_free(&sync);
2723 
2724 	return ret;
2725 }
2726 
2727 /* Confirm that all user pages are valid while holding the notifier lock
2728  *
2729  * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
2730  */
2731 static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
2732 {
2733 	struct kgd_mem *mem, *tmp_mem;
2734 	int ret = 0;
2735 
2736 	list_for_each_entry_safe(mem, tmp_mem,
2737 				 &process_info->userptr_inval_list,
2738 				 validate_list) {
2739 		bool valid;
2740 
2741 		/* keep mem without hmm range at userptr_inval_list */
2742 		if (!mem->range)
2743 			continue;
2744 
2745 		/* Only check mem with hmm range associated */
2746 		valid = amdgpu_ttm_tt_get_user_pages_done(
2747 					mem->bo->tbo.ttm, mem->range);
2748 
2749 		mem->range = NULL;
2750 		if (!valid) {
2751 			WARN(!mem->invalid, "Invalid BO not marked invalid");
2752 			ret = -EAGAIN;
2753 			continue;
2754 		}
2755 
2756 		if (mem->invalid) {
2757 			WARN(1, "Valid BO is marked invalid");
2758 			ret = -EAGAIN;
2759 			continue;
2760 		}
2761 
2762 		list_move_tail(&mem->validate_list,
2763 			       &process_info->userptr_valid_list);
2764 	}
2765 
2766 	return ret;
2767 }
2768 
2769 /* Worker callback to restore evicted userptr BOs
2770  *
2771  * Tries to update and validate all userptr BOs. If successful and no
2772  * concurrent evictions happened, the queues are restarted. Otherwise,
2773  * reschedule for another attempt later.
2774  */
2775 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2776 {
2777 	struct delayed_work *dwork = to_delayed_work(work);
2778 	struct amdkfd_process_info *process_info =
2779 		container_of(dwork, struct amdkfd_process_info,
2780 			     restore_userptr_work);
2781 	struct task_struct *usertask;
2782 	struct mm_struct *mm;
2783 	uint32_t evicted_bos;
2784 
2785 	mutex_lock(&process_info->notifier_lock);
2786 	evicted_bos = process_info->evicted_bos;
2787 	mutex_unlock(&process_info->notifier_lock);
2788 	if (!evicted_bos)
2789 		return;
2790 
2791 	/* Reference task and mm in case of concurrent process termination */
2792 	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2793 	if (!usertask)
2794 		return;
2795 	mm = get_task_mm(usertask);
2796 	if (!mm) {
2797 		put_task_struct(usertask);
2798 		return;
2799 	}
2800 
2801 	mutex_lock(&process_info->lock);
2802 
2803 	if (update_invalid_user_pages(process_info, mm))
2804 		goto unlock_out;
2805 	/* userptr_inval_list can be empty if all evicted userptr BOs
2806 	 * have been freed. In that case there is nothing to validate
2807 	 * and we can just restart the queues.
2808 	 */
2809 	if (!list_empty(&process_info->userptr_inval_list)) {
2810 		if (validate_invalid_user_pages(process_info))
2811 			goto unlock_out;
2812 	}
2813 	/* Final check for concurrent evicton and atomic update. If
2814 	 * another eviction happens after successful update, it will
2815 	 * be a first eviction that calls quiesce_mm. The eviction
2816 	 * reference counting inside KFD will handle this case.
2817 	 */
2818 	mutex_lock(&process_info->notifier_lock);
2819 	if (process_info->evicted_bos != evicted_bos)
2820 		goto unlock_notifier_out;
2821 
2822 	if (confirm_valid_user_pages_locked(process_info)) {
2823 		WARN(1, "User pages unexpectedly invalid");
2824 		goto unlock_notifier_out;
2825 	}
2826 
2827 	process_info->evicted_bos = evicted_bos = 0;
2828 
2829 	if (kgd2kfd_resume_mm(mm)) {
2830 		pr_err("%s: Failed to resume KFD\n", __func__);
2831 		/* No recovery from this failure. Probably the CP is
2832 		 * hanging. No point trying again.
2833 		 */
2834 	}
2835 
2836 unlock_notifier_out:
2837 	mutex_unlock(&process_info->notifier_lock);
2838 unlock_out:
2839 	mutex_unlock(&process_info->lock);
2840 
2841 	/* If validation failed, reschedule another attempt */
2842 	if (evicted_bos) {
2843 		queue_delayed_work(system_freezable_wq,
2844 			&process_info->restore_userptr_work,
2845 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2846 
2847 		kfd_smi_event_queue_restore_rescheduled(mm);
2848 	}
2849 	mmput(mm);
2850 	put_task_struct(usertask);
2851 }
2852 
2853 static void replace_eviction_fence(struct dma_fence __rcu **ef,
2854 				   struct dma_fence *new_ef)
2855 {
2856 	struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true
2857 		/* protected by process_info->lock */);
2858 
2859 	/* If we're replacing an unsignaled eviction fence, that fence will
2860 	 * never be signaled, and if anyone is still waiting on that fence,
2861 	 * they will hang forever. This should never happen. We should only
2862 	 * replace the fence in restore_work that only gets scheduled after
2863 	 * eviction work signaled the fence.
2864 	 */
2865 	WARN_ONCE(!dma_fence_is_signaled(old_ef),
2866 		  "Replacing unsignaled eviction fence");
2867 	dma_fence_put(old_ef);
2868 }
2869 
2870 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2871  *   KFD process identified by process_info
2872  *
2873  * @process_info: amdkfd_process_info of the KFD process
2874  *
2875  * After memory eviction, restore thread calls this function. The function
2876  * should be called when the Process is still valid. BO restore involves -
2877  *
2878  * 1.  Release old eviction fence and create new one
2879  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2880  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2881  *     BOs that need to be reserved.
2882  * 4.  Reserve all the BOs
2883  * 5.  Validate of PD and PT BOs.
2884  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2885  * 7.  Add fence to all PD and PT BOs.
2886  * 8.  Unreserve all BOs
2887  */
2888 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef)
2889 {
2890 	struct amdkfd_process_info *process_info = info;
2891 	struct amdgpu_vm *peer_vm;
2892 	struct kgd_mem *mem;
2893 	struct list_head duplicate_save;
2894 	struct amdgpu_sync sync_obj;
2895 	unsigned long failed_size = 0;
2896 	unsigned long total_size = 0;
2897 	struct drm_exec exec;
2898 	int ret;
2899 
2900 	INIT_LIST_HEAD(&duplicate_save);
2901 
2902 	mutex_lock(&process_info->lock);
2903 
2904 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
2905 	drm_exec_until_all_locked(&exec) {
2906 		list_for_each_entry(peer_vm, &process_info->vm_list_head,
2907 				    vm_list_node) {
2908 			ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
2909 			drm_exec_retry_on_contention(&exec);
2910 			if (unlikely(ret)) {
2911 				pr_err("Locking VM PD failed, ret: %d\n", ret);
2912 				goto ttm_reserve_fail;
2913 			}
2914 		}
2915 
2916 		/* Reserve all BOs and page tables/directory. Add all BOs from
2917 		 * kfd_bo_list to ctx.list
2918 		 */
2919 		list_for_each_entry(mem, &process_info->kfd_bo_list,
2920 				    validate_list) {
2921 			struct drm_gem_object *gobj;
2922 
2923 			gobj = &mem->bo->tbo.base;
2924 			ret = drm_exec_prepare_obj(&exec, gobj, 1);
2925 			drm_exec_retry_on_contention(&exec);
2926 			if (unlikely(ret)) {
2927 				pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
2928 				goto ttm_reserve_fail;
2929 			}
2930 		}
2931 	}
2932 
2933 	amdgpu_sync_create(&sync_obj);
2934 
2935 	/* Validate BOs managed by KFD */
2936 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2937 			    validate_list) {
2938 
2939 		struct amdgpu_bo *bo = mem->bo;
2940 		uint32_t domain = mem->domain;
2941 		struct dma_resv_iter cursor;
2942 		struct dma_fence *fence;
2943 
2944 		total_size += amdgpu_bo_size(bo);
2945 
2946 		ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2947 		if (ret) {
2948 			pr_debug("Memory eviction: Validate BOs failed\n");
2949 			failed_size += amdgpu_bo_size(bo);
2950 			ret = amdgpu_amdkfd_bo_validate(bo,
2951 						AMDGPU_GEM_DOMAIN_GTT, false);
2952 			if (ret) {
2953 				pr_debug("Memory eviction: Try again\n");
2954 				goto validate_map_fail;
2955 			}
2956 		}
2957 		dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
2958 					DMA_RESV_USAGE_KERNEL, fence) {
2959 			ret = amdgpu_sync_fence(&sync_obj, fence);
2960 			if (ret) {
2961 				pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2962 				goto validate_map_fail;
2963 			}
2964 		}
2965 	}
2966 
2967 	if (failed_size)
2968 		pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2969 
2970 	/* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
2971 	 * validations above would invalidate DMABuf imports again.
2972 	 */
2973 	ret = process_validate_vms(process_info, &exec.ticket);
2974 	if (ret) {
2975 		pr_debug("Validating VMs failed, ret: %d\n", ret);
2976 		goto validate_map_fail;
2977 	}
2978 
2979 	/* Update mappings managed by KFD. */
2980 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2981 			    validate_list) {
2982 		struct kfd_mem_attachment *attachment;
2983 
2984 		list_for_each_entry(attachment, &mem->attachments, list) {
2985 			if (!attachment->is_mapped)
2986 				continue;
2987 
2988 			kfd_mem_dmaunmap_attachment(mem, attachment);
2989 			ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2990 			if (ret) {
2991 				pr_debug("Memory eviction: update PTE failed. Try again\n");
2992 				goto validate_map_fail;
2993 			}
2994 		}
2995 	}
2996 
2997 	/* Update mappings not managed by KFD */
2998 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2999 			vm_list_node) {
3000 		struct amdgpu_device *adev = amdgpu_ttm_adev(
3001 			peer_vm->root.bo->tbo.bdev);
3002 
3003 		ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
3004 		if (ret) {
3005 			pr_debug("Memory eviction: handle moved failed. Try again\n");
3006 			goto validate_map_fail;
3007 		}
3008 	}
3009 
3010 	/* Update page directories */
3011 	ret = process_update_pds(process_info, &sync_obj);
3012 	if (ret) {
3013 		pr_debug("Memory eviction: update PDs failed. Try again\n");
3014 		goto validate_map_fail;
3015 	}
3016 
3017 	/* Sync with fences on all the page tables. They implicitly depend on any
3018 	 * move fences from amdgpu_vm_handle_moved above.
3019 	 */
3020 	ret = process_sync_pds_resv(process_info, &sync_obj);
3021 	if (ret) {
3022 		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
3023 		goto validate_map_fail;
3024 	}
3025 
3026 	/* Wait for validate and PT updates to finish */
3027 	amdgpu_sync_wait(&sync_obj, false);
3028 
3029 	/* The old eviction fence may be unsignaled if restore happens
3030 	 * after a GPU reset or suspend/resume. Keep the old fence in that
3031 	 * case. Otherwise release the old eviction fence and create new
3032 	 * one, because fence only goes from unsignaled to signaled once
3033 	 * and cannot be reused. Use context and mm from the old fence.
3034 	 *
3035 	 * If an old eviction fence signals after this check, that's OK.
3036 	 * Anyone signaling an eviction fence must stop the queues first
3037 	 * and schedule another restore worker.
3038 	 */
3039 	if (dma_fence_is_signaled(&process_info->eviction_fence->base)) {
3040 		struct amdgpu_amdkfd_fence *new_fence =
3041 			amdgpu_amdkfd_fence_create(
3042 				process_info->eviction_fence->base.context,
3043 				process_info->eviction_fence->mm,
3044 				NULL);
3045 
3046 		if (!new_fence) {
3047 			pr_err("Failed to create eviction fence\n");
3048 			ret = -ENOMEM;
3049 			goto validate_map_fail;
3050 		}
3051 		dma_fence_put(&process_info->eviction_fence->base);
3052 		process_info->eviction_fence = new_fence;
3053 		replace_eviction_fence(ef, dma_fence_get(&new_fence->base));
3054 	} else {
3055 		WARN_ONCE(*ef != &process_info->eviction_fence->base,
3056 			  "KFD eviction fence doesn't match KGD process_info");
3057 	}
3058 
3059 	/* Attach new eviction fence to all BOs except pinned ones */
3060 	list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) {
3061 		if (mem->bo->tbo.pin_count)
3062 			continue;
3063 
3064 		dma_resv_add_fence(mem->bo->tbo.base.resv,
3065 				   &process_info->eviction_fence->base,
3066 				   DMA_RESV_USAGE_BOOKKEEP);
3067 	}
3068 	/* Attach eviction fence to PD / PT BOs and DMABuf imports */
3069 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
3070 			    vm_list_node) {
3071 		struct amdgpu_bo *bo = peer_vm->root.bo;
3072 
3073 		dma_resv_add_fence(bo->tbo.base.resv,
3074 				   &process_info->eviction_fence->base,
3075 				   DMA_RESV_USAGE_BOOKKEEP);
3076 	}
3077 
3078 validate_map_fail:
3079 	amdgpu_sync_free(&sync_obj);
3080 ttm_reserve_fail:
3081 	drm_exec_fini(&exec);
3082 	mutex_unlock(&process_info->lock);
3083 	return ret;
3084 }
3085 
3086 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
3087 {
3088 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
3089 	struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
3090 	int ret;
3091 
3092 	if (!info || !gws)
3093 		return -EINVAL;
3094 
3095 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
3096 	if (!*mem)
3097 		return -ENOMEM;
3098 
3099 	mutex_init(&(*mem)->lock);
3100 	INIT_LIST_HEAD(&(*mem)->attachments);
3101 	(*mem)->bo = amdgpu_bo_ref(gws_bo);
3102 	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
3103 	(*mem)->process_info = process_info;
3104 	add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
3105 	amdgpu_sync_create(&(*mem)->sync);
3106 
3107 
3108 	/* Validate gws bo the first time it is added to process */
3109 	mutex_lock(&(*mem)->process_info->lock);
3110 	ret = amdgpu_bo_reserve(gws_bo, false);
3111 	if (unlikely(ret)) {
3112 		pr_err("Reserve gws bo failed %d\n", ret);
3113 		goto bo_reservation_failure;
3114 	}
3115 
3116 	ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
3117 	if (ret) {
3118 		pr_err("GWS BO validate failed %d\n", ret);
3119 		goto bo_validation_failure;
3120 	}
3121 	/* GWS resource is shared b/t amdgpu and amdkfd
3122 	 * Add process eviction fence to bo so they can
3123 	 * evict each other.
3124 	 */
3125 	ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
3126 	if (ret)
3127 		goto reserve_shared_fail;
3128 	dma_resv_add_fence(gws_bo->tbo.base.resv,
3129 			   &process_info->eviction_fence->base,
3130 			   DMA_RESV_USAGE_BOOKKEEP);
3131 	amdgpu_bo_unreserve(gws_bo);
3132 	mutex_unlock(&(*mem)->process_info->lock);
3133 
3134 	return ret;
3135 
3136 reserve_shared_fail:
3137 bo_validation_failure:
3138 	amdgpu_bo_unreserve(gws_bo);
3139 bo_reservation_failure:
3140 	mutex_unlock(&(*mem)->process_info->lock);
3141 	amdgpu_sync_free(&(*mem)->sync);
3142 	remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
3143 	amdgpu_bo_unref(&gws_bo);
3144 	mutex_destroy(&(*mem)->lock);
3145 	kfree(*mem);
3146 	*mem = NULL;
3147 	return ret;
3148 }
3149 
3150 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
3151 {
3152 	int ret;
3153 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
3154 	struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
3155 	struct amdgpu_bo *gws_bo = kgd_mem->bo;
3156 
3157 	/* Remove BO from process's validate list so restore worker won't touch
3158 	 * it anymore
3159 	 */
3160 	remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
3161 
3162 	ret = amdgpu_bo_reserve(gws_bo, false);
3163 	if (unlikely(ret)) {
3164 		pr_err("Reserve gws bo failed %d\n", ret);
3165 		//TODO add BO back to validate_list?
3166 		return ret;
3167 	}
3168 	amdgpu_amdkfd_remove_eviction_fence(gws_bo,
3169 			process_info->eviction_fence);
3170 	amdgpu_bo_unreserve(gws_bo);
3171 	amdgpu_sync_free(&kgd_mem->sync);
3172 	amdgpu_bo_unref(&gws_bo);
3173 	mutex_destroy(&kgd_mem->lock);
3174 	kfree(mem);
3175 	return 0;
3176 }
3177 
3178 /* Returns GPU-specific tiling mode information */
3179 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
3180 				struct tile_config *config)
3181 {
3182 	config->gb_addr_config = adev->gfx.config.gb_addr_config;
3183 	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
3184 	config->num_tile_configs =
3185 			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
3186 	config->macro_tile_config_ptr =
3187 			adev->gfx.config.macrotile_mode_array;
3188 	config->num_macro_tile_configs =
3189 			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
3190 
3191 	/* Those values are not set from GFX9 onwards */
3192 	config->num_banks = adev->gfx.config.num_banks;
3193 	config->num_ranks = adev->gfx.config.num_ranks;
3194 
3195 	return 0;
3196 }
3197 
3198 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
3199 {
3200 	struct kfd_mem_attachment *entry;
3201 
3202 	list_for_each_entry(entry, &mem->attachments, list) {
3203 		if (entry->is_mapped && entry->adev == adev)
3204 			return true;
3205 	}
3206 	return false;
3207 }
3208 
3209 #if defined(CONFIG_DEBUG_FS)
3210 
3211 int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data)
3212 {
3213 
3214 	spin_lock(&kfd_mem_limit.mem_limit_lock);
3215 	seq_printf(m, "System mem used %lldM out of %lluM\n",
3216 		  (kfd_mem_limit.system_mem_used >> 20),
3217 		  (kfd_mem_limit.max_system_mem_limit >> 20));
3218 	seq_printf(m, "TTM mem used %lldM out of %lluM\n",
3219 		  (kfd_mem_limit.ttm_mem_used >> 20),
3220 		  (kfd_mem_limit.max_ttm_mem_limit >> 20));
3221 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
3222 
3223 	return 0;
3224 }
3225 
3226 #endif
3227