1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <linux/dma-buf.h>
24 #include <linux/list.h>
25 #include <linux/pagemap.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <drm/ttm/ttm_tt.h>
29
30 #include <drm/drm_exec.h>
31
32 #include "amdgpu_object.h"
33 #include "amdgpu_gem.h"
34 #include "amdgpu_vm.h"
35 #include "amdgpu_hmm.h"
36 #include "amdgpu_amdkfd.h"
37 #include "amdgpu_dma_buf.h"
38 #include <uapi/linux/kfd_ioctl.h>
39 #include "amdgpu_xgmi.h"
40 #include "kfd_priv.h"
41 #include "kfd_smi_events.h"
42
43 /* Userptr restore delay, just long enough to allow consecutive VM
44 * changes to accumulate
45 */
46 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
47 #define AMDGPU_RESERVE_MEM_LIMIT (3UL << 29)
48
49 /*
50 * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
51 * BO chunk
52 */
53 #define VRAM_AVAILABLITY_ALIGN (1 << 21)
54
55 /* Impose limit on how much memory KFD can use */
56 static struct {
57 uint64_t max_system_mem_limit;
58 uint64_t max_ttm_mem_limit;
59 int64_t system_mem_used;
60 int64_t ttm_mem_used;
61 spinlock_t mem_limit_lock;
62 } kfd_mem_limit;
63
64 static const char * const domain_bit_to_string[] = {
65 "CPU",
66 "GTT",
67 "VRAM",
68 "GDS",
69 "GWS",
70 "OA"
71 };
72
73 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
74
75 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
76
kfd_mem_is_attached(struct amdgpu_vm * avm,struct kgd_mem * mem)77 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
78 struct kgd_mem *mem)
79 {
80 struct kfd_mem_attachment *entry;
81
82 list_for_each_entry(entry, &mem->attachments, list)
83 if (entry->bo_va->base.vm == avm)
84 return true;
85
86 return false;
87 }
88
89 /**
90 * reuse_dmamap() - Check whether adev can share the original
91 * userptr BO
92 *
93 * If both adev and bo_adev are in direct mapping or
94 * in the same iommu group, they can share the original BO.
95 *
96 * @adev: Device to which can or cannot share the original BO
97 * @bo_adev: Device to which allocated BO belongs to
98 *
99 * Return: returns true if adev can share original userptr BO,
100 * false otherwise.
101 */
reuse_dmamap(struct amdgpu_device * adev,struct amdgpu_device * bo_adev)102 static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev)
103 {
104 return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) ||
105 (adev->dev->iommu_group == bo_adev->dev->iommu_group);
106 }
107
108 /* Set memory usage limits. Current, limits are
109 * System (TTM + userptr) memory - 15/16th System RAM
110 * TTM memory - 3/8th System RAM
111 */
amdgpu_amdkfd_gpuvm_init_mem_limits(void)112 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
113 {
114 struct sysinfo si;
115 uint64_t mem;
116
117 if (kfd_mem_limit.max_system_mem_limit)
118 return;
119
120 si_meminfo(&si);
121 mem = si.totalram - si.totalhigh;
122 mem *= si.mem_unit;
123
124 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
125 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6);
126 if (kfd_mem_limit.max_system_mem_limit < 2 * AMDGPU_RESERVE_MEM_LIMIT)
127 kfd_mem_limit.max_system_mem_limit >>= 1;
128 else
129 kfd_mem_limit.max_system_mem_limit -= AMDGPU_RESERVE_MEM_LIMIT;
130
131 kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT;
132 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
133 (kfd_mem_limit.max_system_mem_limit >> 20),
134 (kfd_mem_limit.max_ttm_mem_limit >> 20));
135 }
136
amdgpu_amdkfd_reserve_system_mem(uint64_t size)137 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
138 {
139 kfd_mem_limit.system_mem_used += size;
140 }
141
142 /* Estimate page table size needed to represent a given memory size
143 *
144 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
145 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
146 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
147 * for 2MB pages for TLB efficiency. However, small allocations and
148 * fragmented system memory still need some 4KB pages. We choose a
149 * compromise that should work in most cases without reserving too
150 * much memory for page tables unnecessarily (factor 16K, >> 14).
151 */
152
153 #define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM)
154
155 /**
156 * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
157 * of buffer.
158 *
159 * @adev: Device to which allocated BO belongs to
160 * @size: Size of buffer, in bytes, encapsulated by B0. This should be
161 * equivalent to amdgpu_bo_size(BO)
162 * @alloc_flag: Flag used in allocating a BO as noted above
163 * @xcp_id: xcp_id is used to get xcp from xcp manager, one xcp is
164 * managed as one compute node in driver for app
165 *
166 * Return:
167 * returns -ENOMEM in case of error, ZERO otherwise
168 */
amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 alloc_flag,int8_t xcp_id)169 int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
170 uint64_t size, u32 alloc_flag, int8_t xcp_id)
171 {
172 uint64_t reserved_for_pt =
173 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
174 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
175 uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0);
176 size_t system_mem_needed, ttm_mem_needed, vram_needed;
177 int ret = 0;
178 uint64_t vram_size = 0;
179
180 system_mem_needed = 0;
181 ttm_mem_needed = 0;
182 vram_needed = 0;
183 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
184 system_mem_needed = size;
185 ttm_mem_needed = size;
186 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
187 /*
188 * Conservatively round up the allocation requirement to 2 MB
189 * to avoid fragmentation caused by 4K allocations in the tail
190 * 2M BO chunk.
191 */
192 vram_needed = size;
193 /*
194 * For GFX 9.4.3, get the VRAM size from XCP structs
195 */
196 if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
197 return -EINVAL;
198
199 vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
200 if (adev->apu_prefer_gtt) {
201 system_mem_needed = size;
202 ttm_mem_needed = size;
203 }
204 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
205 system_mem_needed = size;
206 } else if (!(alloc_flag &
207 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
208 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
209 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
210 return -ENOMEM;
211 }
212
213 spin_lock(&kfd_mem_limit.mem_limit_lock);
214
215 if (kfd_mem_limit.system_mem_used + system_mem_needed >
216 kfd_mem_limit.max_system_mem_limit) {
217 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
218 if (!no_system_mem_limit) {
219 ret = -ENOMEM;
220 goto release;
221 }
222 }
223
224 if (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
225 kfd_mem_limit.max_ttm_mem_limit) {
226 ret = -ENOMEM;
227 goto release;
228 }
229
230 /*if is_app_apu is false and apu_prefer_gtt is true, it is an APU with
231 * carve out < gtt. In that case, VRAM allocation will go to gtt domain, skip
232 * VRAM check since ttm_mem_limit check already cover this allocation
233 */
234
235 if (adev && xcp_id >= 0 && (!adev->apu_prefer_gtt || adev->gmc.is_app_apu)) {
236 uint64_t vram_available =
237 vram_size - reserved_for_pt - reserved_for_ras -
238 atomic64_read(&adev->vram_pin_size);
239 if (adev->kfd.vram_used[xcp_id] + vram_needed > vram_available) {
240 ret = -ENOMEM;
241 goto release;
242 }
243 }
244
245 /* Update memory accounting by decreasing available system
246 * memory, TTM memory and GPU memory as computed above
247 */
248 WARN_ONCE(vram_needed && !adev,
249 "adev reference can't be null when vram is used");
250 if (adev && xcp_id >= 0) {
251 adev->kfd.vram_used[xcp_id] += vram_needed;
252 adev->kfd.vram_used_aligned[xcp_id] +=
253 adev->apu_prefer_gtt ?
254 vram_needed :
255 ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
256 }
257 kfd_mem_limit.system_mem_used += system_mem_needed;
258 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
259
260 release:
261 spin_unlock(&kfd_mem_limit.mem_limit_lock);
262 return ret;
263 }
264
amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 alloc_flag,int8_t xcp_id)265 void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
266 uint64_t size, u32 alloc_flag, int8_t xcp_id)
267 {
268 spin_lock(&kfd_mem_limit.mem_limit_lock);
269
270 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
271 kfd_mem_limit.system_mem_used -= size;
272 kfd_mem_limit.ttm_mem_used -= size;
273 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
274 WARN_ONCE(!adev,
275 "adev reference can't be null when alloc mem flags vram is set");
276 if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
277 goto release;
278
279 if (adev) {
280 adev->kfd.vram_used[xcp_id] -= size;
281 if (adev->apu_prefer_gtt) {
282 adev->kfd.vram_used_aligned[xcp_id] -= size;
283 kfd_mem_limit.system_mem_used -= size;
284 kfd_mem_limit.ttm_mem_used -= size;
285 } else {
286 adev->kfd.vram_used_aligned[xcp_id] -=
287 ALIGN(size, VRAM_AVAILABLITY_ALIGN);
288 }
289 }
290 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
291 kfd_mem_limit.system_mem_used -= size;
292 } else if (!(alloc_flag &
293 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
294 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
295 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
296 goto release;
297 }
298 WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0,
299 "KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id);
300 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
301 "KFD TTM memory accounting unbalanced");
302 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
303 "KFD system memory accounting unbalanced");
304
305 release:
306 spin_unlock(&kfd_mem_limit.mem_limit_lock);
307 }
308
amdgpu_amdkfd_release_notify(struct amdgpu_bo * bo)309 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
310 {
311 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
312 u32 alloc_flags = bo->kfd_bo->alloc_flags;
313 u64 size = amdgpu_bo_size(bo);
314
315 amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags,
316 bo->xcp_id);
317
318 kfree(bo->kfd_bo);
319 }
320
321 /**
322 * create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information
323 * about USERPTR or DOOREBELL or MMIO BO.
324 *
325 * @adev: Device for which dmamap BO is being created
326 * @mem: BO of peer device that is being DMA mapped. Provides parameters
327 * in building the dmamap BO
328 * @bo_out: Output parameter updated with handle of dmamap BO
329 */
330 static int
create_dmamap_sg_bo(struct amdgpu_device * adev,struct kgd_mem * mem,struct amdgpu_bo ** bo_out)331 create_dmamap_sg_bo(struct amdgpu_device *adev,
332 struct kgd_mem *mem, struct amdgpu_bo **bo_out)
333 {
334 struct drm_gem_object *gem_obj;
335 int ret;
336 uint64_t flags = 0;
337
338 ret = amdgpu_bo_reserve(mem->bo, false);
339 if (ret)
340 return ret;
341
342 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)
343 flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
344 AMDGPU_GEM_CREATE_UNCACHED);
345
346 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
347 AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags,
348 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0);
349
350 amdgpu_bo_unreserve(mem->bo);
351
352 if (ret) {
353 pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret);
354 return -EINVAL;
355 }
356
357 *bo_out = gem_to_amdgpu_bo(gem_obj);
358 (*bo_out)->parent = amdgpu_bo_ref(mem->bo);
359 return ret;
360 }
361
362 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
363 * reservation object.
364 *
365 * @bo: [IN] Remove eviction fence(s) from this BO
366 * @ef: [IN] This eviction fence is removed if it
367 * is present in the shared list.
368 *
369 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
370 */
amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo * bo,struct amdgpu_amdkfd_fence * ef)371 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
372 struct amdgpu_amdkfd_fence *ef)
373 {
374 struct dma_fence *replacement;
375
376 if (!ef)
377 return -EINVAL;
378
379 /* TODO: Instead of block before we should use the fence of the page
380 * table update and TLB flush here directly.
381 */
382 replacement = dma_fence_get_stub();
383 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
384 replacement, DMA_RESV_USAGE_BOOKKEEP);
385 dma_fence_put(replacement);
386 return 0;
387 }
388
389 /**
390 * amdgpu_amdkfd_remove_all_eviction_fences - Remove all eviction fences
391 * @bo: the BO where to remove the evictions fences from.
392 *
393 * This functions should only be used on release when all references to the BO
394 * are already dropped. We remove the eviction fence from the private copy of
395 * the dma_resv object here since that is what is used during release to
396 * determine of the BO is idle or not.
397 */
amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo * bo)398 void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
399 {
400 struct dma_resv *resv = &bo->tbo.base._resv;
401 struct dma_fence *fence, *stub;
402 struct dma_resv_iter cursor;
403
404 dma_resv_assert_held(resv);
405
406 stub = dma_fence_get_stub();
407 dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
408 if (!to_amdgpu_amdkfd_fence(fence))
409 continue;
410
411 dma_resv_replace_fences(resv, fence->context, stub,
412 DMA_RESV_USAGE_BOOKKEEP);
413 }
414 dma_fence_put(stub);
415 }
416
amdgpu_amdkfd_bo_validate(struct amdgpu_bo * bo,uint32_t domain,bool wait)417 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
418 bool wait)
419 {
420 struct ttm_operation_ctx ctx = { false, false };
421 int ret;
422
423 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
424 "Called with userptr BO"))
425 return -EINVAL;
426
427 /* bo has been pinned, not need validate it */
428 if (bo->tbo.pin_count)
429 return 0;
430
431 amdgpu_bo_placement_from_domain(bo, domain);
432
433 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
434 if (ret)
435 goto validate_fail;
436 if (wait)
437 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
438
439 validate_fail:
440 return ret;
441 }
442
amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo * bo,uint32_t domain,struct dma_fence * fence)443 int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
444 uint32_t domain,
445 struct dma_fence *fence)
446 {
447 int ret = amdgpu_bo_reserve(bo, false);
448
449 if (ret)
450 return ret;
451
452 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
453 if (ret)
454 goto unreserve_out;
455
456 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
457 if (ret)
458 goto unreserve_out;
459
460 dma_resv_add_fence(bo->tbo.base.resv, fence,
461 DMA_RESV_USAGE_BOOKKEEP);
462
463 unreserve_out:
464 amdgpu_bo_unreserve(bo);
465
466 return ret;
467 }
468
amdgpu_amdkfd_validate_vm_bo(void * _unused,struct amdgpu_bo * bo)469 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
470 {
471 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
472 }
473
474 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
475 *
476 * Page directories are not updated here because huge page handling
477 * during page table updates can invalidate page directory entries
478 * again. Page directories are only updated after updating page
479 * tables.
480 */
vm_validate_pt_pd_bos(struct amdgpu_vm * vm,struct ww_acquire_ctx * ticket)481 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm,
482 struct ww_acquire_ctx *ticket)
483 {
484 struct amdgpu_bo *pd = vm->root.bo;
485 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
486 int ret;
487
488 ret = amdgpu_vm_validate(adev, vm, ticket,
489 amdgpu_amdkfd_validate_vm_bo, NULL);
490 if (ret) {
491 pr_err("failed to validate PT BOs\n");
492 return ret;
493 }
494
495 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
496
497 return 0;
498 }
499
vm_update_pds(struct amdgpu_vm * vm,struct amdgpu_sync * sync)500 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
501 {
502 struct amdgpu_bo *pd = vm->root.bo;
503 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
504 int ret;
505
506 ret = amdgpu_vm_update_pdes(adev, vm, false);
507 if (ret)
508 return ret;
509
510 return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
511 }
512
get_pte_flags(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct kgd_mem * mem)513 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct amdgpu_vm *vm,
514 struct kgd_mem *mem)
515 {
516 uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
517 AMDGPU_VM_MTYPE_DEFAULT;
518
519 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
520 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
521 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
522 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
523
524 return mapping_flags;
525 }
526
527 /**
528 * create_sg_table() - Create an sg_table for a contiguous DMA addr range
529 * @addr: The starting address to point to
530 * @size: Size of memory area in bytes being pointed to
531 *
532 * Allocates an instance of sg_table and initializes it to point to memory
533 * area specified by input parameters. The address used to build is assumed
534 * to be DMA mapped, if needed.
535 *
536 * DOORBELL or MMIO BOs use only one scatterlist node in their sg_table
537 * because they are physically contiguous.
538 *
539 * Return: Initialized instance of SG Table or NULL
540 */
create_sg_table(uint64_t addr,uint32_t size)541 static struct sg_table *create_sg_table(uint64_t addr, uint32_t size)
542 {
543 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
544
545 if (!sg)
546 return NULL;
547 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
548 kfree(sg);
549 return NULL;
550 }
551 sg_dma_address(sg->sgl) = addr;
552 sg->sgl->length = size;
553 #ifdef CONFIG_NEED_SG_DMA_LENGTH
554 sg->sgl->dma_length = size;
555 #endif
556 return sg;
557 }
558
559 static int
kfd_mem_dmamap_userptr(struct kgd_mem * mem,struct kfd_mem_attachment * attachment)560 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
561 struct kfd_mem_attachment *attachment)
562 {
563 enum dma_data_direction direction =
564 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
565 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
566 struct ttm_operation_ctx ctx = {.interruptible = true};
567 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
568 struct amdgpu_device *adev = attachment->adev;
569 struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
570 struct ttm_tt *ttm = bo->tbo.ttm;
571 int ret;
572
573 if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
574 return -EINVAL;
575
576 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
577 if (unlikely(!ttm->sg))
578 return -ENOMEM;
579
580 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
581 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
582 ttm->num_pages, 0,
583 (u64)ttm->num_pages << PAGE_SHIFT,
584 GFP_KERNEL);
585 if (unlikely(ret))
586 goto free_sg;
587
588 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
589 if (unlikely(ret))
590 goto release_sg;
591
592 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
593 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
594 if (ret)
595 goto unmap_sg;
596
597 return 0;
598
599 unmap_sg:
600 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
601 release_sg:
602 pr_err("DMA map userptr failed: %d\n", ret);
603 sg_free_table(ttm->sg);
604 free_sg:
605 kfree(ttm->sg);
606 ttm->sg = NULL;
607 return ret;
608 }
609
610 static int
kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment * attachment)611 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
612 {
613 struct ttm_operation_ctx ctx = {.interruptible = true};
614 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
615
616 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
617 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
618 }
619
620 /**
621 * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO
622 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
623 * @attachment: Virtual address attachment of the BO on accessing device
624 *
625 * An access request from the device that owns DOORBELL does not require DMA mapping.
626 * This is because the request doesn't go through PCIe root complex i.e. it instead
627 * loops back. The need to DMA map arises only when accessing peer device's DOORBELL
628 *
629 * In contrast, all access requests for MMIO need to be DMA mapped without regard to
630 * device ownership. This is because access requests for MMIO go through PCIe root
631 * complex.
632 *
633 * This is accomplished in two steps:
634 * - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used
635 * in updating requesting device's page table
636 * - Signal TTM to mark memory pointed to by requesting device's BO as GPU
637 * accessible. This allows an update of requesting device's page table
638 * with entries associated with DOOREBELL or MMIO memory
639 *
640 * This method is invoked in the following contexts:
641 * - Mapping of DOORBELL or MMIO BO of same or peer device
642 * - Validating an evicted DOOREBELL or MMIO BO on device seeking access
643 *
644 * Return: ZERO if successful, NON-ZERO otherwise
645 */
646 static int
kfd_mem_dmamap_sg_bo(struct kgd_mem * mem,struct kfd_mem_attachment * attachment)647 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem,
648 struct kfd_mem_attachment *attachment)
649 {
650 struct ttm_operation_ctx ctx = {.interruptible = true};
651 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
652 struct amdgpu_device *adev = attachment->adev;
653 struct ttm_tt *ttm = bo->tbo.ttm;
654 enum dma_data_direction dir;
655 dma_addr_t dma_addr;
656 bool mmio;
657 int ret;
658
659 /* Expect SG Table of dmapmap BO to be NULL */
660 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP);
661 if (unlikely(ttm->sg)) {
662 pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio);
663 return -EINVAL;
664 }
665
666 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
667 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
668 dma_addr = mem->bo->tbo.sg->sgl->dma_address;
669 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length);
670 pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr);
671 dma_addr = dma_map_resource(adev->dev, dma_addr,
672 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
673 ret = dma_mapping_error(adev->dev, dma_addr);
674 if (unlikely(ret))
675 return ret;
676 pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr);
677
678 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length);
679 if (unlikely(!ttm->sg)) {
680 ret = -ENOMEM;
681 goto unmap_sg;
682 }
683
684 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
685 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
686 if (unlikely(ret))
687 goto free_sg;
688
689 return ret;
690
691 free_sg:
692 sg_free_table(ttm->sg);
693 kfree(ttm->sg);
694 ttm->sg = NULL;
695 unmap_sg:
696 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length,
697 dir, DMA_ATTR_SKIP_CPU_SYNC);
698 return ret;
699 }
700
701 static int
kfd_mem_dmamap_attachment(struct kgd_mem * mem,struct kfd_mem_attachment * attachment)702 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
703 struct kfd_mem_attachment *attachment)
704 {
705 switch (attachment->type) {
706 case KFD_MEM_ATT_SHARED:
707 return 0;
708 case KFD_MEM_ATT_USERPTR:
709 return kfd_mem_dmamap_userptr(mem, attachment);
710 case KFD_MEM_ATT_DMABUF:
711 return kfd_mem_dmamap_dmabuf(attachment);
712 case KFD_MEM_ATT_SG:
713 return kfd_mem_dmamap_sg_bo(mem, attachment);
714 default:
715 WARN_ON_ONCE(1);
716 }
717 return -EINVAL;
718 }
719
720 static void
kfd_mem_dmaunmap_userptr(struct kgd_mem * mem,struct kfd_mem_attachment * attachment)721 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
722 struct kfd_mem_attachment *attachment)
723 {
724 enum dma_data_direction direction =
725 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
726 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
727 struct ttm_operation_ctx ctx = {.interruptible = false};
728 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
729 struct amdgpu_device *adev = attachment->adev;
730 struct ttm_tt *ttm = bo->tbo.ttm;
731
732 if (unlikely(!ttm->sg))
733 return;
734
735 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
736 (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
737
738 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
739 sg_free_table(ttm->sg);
740 kfree(ttm->sg);
741 ttm->sg = NULL;
742 }
743
744 static void
kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment * attachment)745 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
746 {
747 /* This is a no-op. We don't want to trigger eviction fences when
748 * unmapping DMABufs. Therefore the invalidation (moving to system
749 * domain) is done in kfd_mem_dmamap_dmabuf.
750 */
751 }
752
753 /**
754 * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO
755 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
756 * @attachment: Virtual address attachment of the BO on accessing device
757 *
758 * The method performs following steps:
759 * - Signal TTM to mark memory pointed to by BO as GPU inaccessible
760 * - Free SG Table that is used to encapsulate DMA mapped memory of
761 * peer device's DOORBELL or MMIO memory
762 *
763 * This method is invoked in the following contexts:
764 * UNMapping of DOORBELL or MMIO BO on a device having access to its memory
765 * Eviction of DOOREBELL or MMIO BO on device having access to its memory
766 *
767 * Return: void
768 */
769 static void
kfd_mem_dmaunmap_sg_bo(struct kgd_mem * mem,struct kfd_mem_attachment * attachment)770 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
771 struct kfd_mem_attachment *attachment)
772 {
773 struct ttm_operation_ctx ctx = {.interruptible = true};
774 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
775 struct amdgpu_device *adev = attachment->adev;
776 struct ttm_tt *ttm = bo->tbo.ttm;
777 enum dma_data_direction dir;
778
779 if (unlikely(!ttm->sg)) {
780 pr_debug("SG Table of BO is NULL");
781 return;
782 }
783
784 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
785 (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
786
787 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
788 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
789 dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address,
790 ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
791 sg_free_table(ttm->sg);
792 kfree(ttm->sg);
793 ttm->sg = NULL;
794 bo->tbo.sg = NULL;
795 }
796
797 static void
kfd_mem_dmaunmap_attachment(struct kgd_mem * mem,struct kfd_mem_attachment * attachment)798 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
799 struct kfd_mem_attachment *attachment)
800 {
801 switch (attachment->type) {
802 case KFD_MEM_ATT_SHARED:
803 break;
804 case KFD_MEM_ATT_USERPTR:
805 kfd_mem_dmaunmap_userptr(mem, attachment);
806 break;
807 case KFD_MEM_ATT_DMABUF:
808 kfd_mem_dmaunmap_dmabuf(attachment);
809 break;
810 case KFD_MEM_ATT_SG:
811 kfd_mem_dmaunmap_sg_bo(mem, attachment);
812 break;
813 default:
814 WARN_ON_ONCE(1);
815 }
816 }
817
kfd_mem_export_dmabuf(struct kgd_mem * mem)818 static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
819 {
820 if (!mem->dmabuf) {
821 struct amdgpu_device *bo_adev;
822 struct dma_buf *dmabuf;
823
824 bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
825 dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file,
826 mem->gem_handle,
827 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
828 DRM_RDWR : 0);
829 if (IS_ERR(dmabuf))
830 return PTR_ERR(dmabuf);
831 mem->dmabuf = dmabuf;
832 }
833
834 return 0;
835 }
836
837 static int
kfd_mem_attach_dmabuf(struct amdgpu_device * adev,struct kgd_mem * mem,struct amdgpu_bo ** bo)838 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
839 struct amdgpu_bo **bo)
840 {
841 struct drm_gem_object *gobj;
842 int ret;
843
844 ret = kfd_mem_export_dmabuf(mem);
845 if (ret)
846 return ret;
847
848 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
849 if (IS_ERR(gobj))
850 return PTR_ERR(gobj);
851
852 *bo = gem_to_amdgpu_bo(gobj);
853 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
854
855 return 0;
856 }
857
858 /* kfd_mem_attach - Add a BO to a VM
859 *
860 * Everything that needs to bo done only once when a BO is first added
861 * to a VM. It can later be mapped and unmapped many times without
862 * repeating these steps.
863 *
864 * 0. Create BO for DMA mapping, if needed
865 * 1. Allocate and initialize BO VA entry data structure
866 * 2. Add BO to the VM
867 * 3. Determine ASIC-specific PTE flags
868 * 4. Alloc page tables and directories if needed
869 * 4a. Validate new page tables and directories
870 */
kfd_mem_attach(struct amdgpu_device * adev,struct kgd_mem * mem,struct amdgpu_vm * vm,bool is_aql)871 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
872 struct amdgpu_vm *vm, bool is_aql)
873 {
874 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
875 unsigned long bo_size = mem->bo->tbo.base.size;
876 uint64_t va = mem->va;
877 struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
878 struct amdgpu_bo *bo[2] = {NULL, NULL};
879 struct amdgpu_bo_va *bo_va;
880 bool same_hive = false;
881 int i, ret;
882
883 if (!va) {
884 pr_err("Invalid VA when adding BO to VM\n");
885 return -EINVAL;
886 }
887
888 /* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices
889 *
890 * The access path of MMIO and DOORBELL BOs of is always over PCIe.
891 * In contrast the access path of VRAM BOs depens upon the type of
892 * link that connects the peer device. Access over PCIe is allowed
893 * if peer device has large BAR. In contrast, access over xGMI is
894 * allowed for both small and large BAR configurations of peer device
895 */
896 if ((adev != bo_adev && !adev->apu_prefer_gtt) &&
897 ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
898 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
899 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
900 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM)
901 same_hive = amdgpu_xgmi_same_hive(adev, bo_adev);
902 if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev))
903 return -EINVAL;
904 }
905
906 for (i = 0; i <= is_aql; i++) {
907 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
908 if (unlikely(!attachment[i])) {
909 ret = -ENOMEM;
910 goto unwind;
911 }
912
913 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
914 va + bo_size, vm);
915
916 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) ||
917 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) ||
918 (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) ||
919 same_hive) {
920 /* Mappings on the local GPU, or VRAM mappings in the
921 * local hive, or userptr, or GTT mapping can reuse dma map
922 * address space share the original BO
923 */
924 attachment[i]->type = KFD_MEM_ATT_SHARED;
925 bo[i] = mem->bo;
926 drm_gem_object_get(&bo[i]->tbo.base);
927 } else if (i > 0) {
928 /* Multiple mappings on the same GPU share the BO */
929 attachment[i]->type = KFD_MEM_ATT_SHARED;
930 bo[i] = bo[0];
931 drm_gem_object_get(&bo[i]->tbo.base);
932 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
933 /* Create an SG BO to DMA-map userptrs on other GPUs */
934 attachment[i]->type = KFD_MEM_ATT_USERPTR;
935 ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
936 if (ret)
937 goto unwind;
938 /* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */
939 } else if (mem->bo->tbo.type == ttm_bo_type_sg) {
940 WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL ||
941 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP),
942 "Handing invalid SG BO in ATTACH request");
943 attachment[i]->type = KFD_MEM_ATT_SG;
944 ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
945 if (ret)
946 goto unwind;
947 /* Enable acces to GTT and VRAM BOs of peer devices */
948 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT ||
949 mem->domain == AMDGPU_GEM_DOMAIN_VRAM) {
950 attachment[i]->type = KFD_MEM_ATT_DMABUF;
951 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
952 if (ret)
953 goto unwind;
954 pr_debug("Employ DMABUF mechanism to enable peer GPU access\n");
955 } else {
956 WARN_ONCE(true, "Handling invalid ATTACH request");
957 ret = -EINVAL;
958 goto unwind;
959 }
960
961 /* Add BO to VM internal data structures */
962 ret = amdgpu_bo_reserve(bo[i], false);
963 if (ret) {
964 pr_debug("Unable to reserve BO during memory attach");
965 goto unwind;
966 }
967 bo_va = amdgpu_vm_bo_find(vm, bo[i]);
968 if (!bo_va)
969 bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
970 else
971 ++bo_va->ref_count;
972 attachment[i]->bo_va = bo_va;
973 amdgpu_bo_unreserve(bo[i]);
974 if (unlikely(!attachment[i]->bo_va)) {
975 ret = -ENOMEM;
976 pr_err("Failed to add BO object to VM. ret == %d\n",
977 ret);
978 goto unwind;
979 }
980 attachment[i]->va = va;
981 attachment[i]->pte_flags = get_pte_flags(adev, vm, mem);
982 attachment[i]->adev = adev;
983 list_add(&attachment[i]->list, &mem->attachments);
984
985 va += bo_size;
986 }
987
988 return 0;
989
990 unwind:
991 for (; i >= 0; i--) {
992 if (!attachment[i])
993 continue;
994 if (attachment[i]->bo_va) {
995 (void)amdgpu_bo_reserve(bo[i], true);
996 if (--attachment[i]->bo_va->ref_count == 0)
997 amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
998 amdgpu_bo_unreserve(bo[i]);
999 list_del(&attachment[i]->list);
1000 }
1001 if (bo[i])
1002 drm_gem_object_put(&bo[i]->tbo.base);
1003 kfree(attachment[i]);
1004 }
1005 return ret;
1006 }
1007
kfd_mem_detach(struct kfd_mem_attachment * attachment)1008 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
1009 {
1010 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
1011
1012 pr_debug("\t remove VA 0x%llx in entry %p\n",
1013 attachment->va, attachment);
1014 if (--attachment->bo_va->ref_count == 0)
1015 amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
1016 drm_gem_object_put(&bo->tbo.base);
1017 list_del(&attachment->list);
1018 kfree(attachment);
1019 }
1020
add_kgd_mem_to_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info,bool userptr)1021 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
1022 struct amdkfd_process_info *process_info,
1023 bool userptr)
1024 {
1025 mutex_lock(&process_info->lock);
1026 if (userptr)
1027 list_add_tail(&mem->validate_list,
1028 &process_info->userptr_valid_list);
1029 else
1030 list_add_tail(&mem->validate_list, &process_info->kfd_bo_list);
1031 mutex_unlock(&process_info->lock);
1032 }
1033
remove_kgd_mem_from_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info)1034 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
1035 struct amdkfd_process_info *process_info)
1036 {
1037 mutex_lock(&process_info->lock);
1038 list_del(&mem->validate_list);
1039 mutex_unlock(&process_info->lock);
1040 }
1041
1042 /* Initializes user pages. It registers the MMU notifier and validates
1043 * the userptr BO in the GTT domain.
1044 *
1045 * The BO must already be on the userptr_valid_list. Otherwise an
1046 * eviction and restore may happen that leaves the new BO unmapped
1047 * with the user mode queues running.
1048 *
1049 * Takes the process_info->lock to protect against concurrent restore
1050 * workers.
1051 *
1052 * Returns 0 for success, negative errno for errors.
1053 */
init_user_pages(struct kgd_mem * mem,uint64_t user_addr,bool criu_resume)1054 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
1055 bool criu_resume)
1056 {
1057 struct amdkfd_process_info *process_info = mem->process_info;
1058 struct amdgpu_bo *bo = mem->bo;
1059 struct ttm_operation_ctx ctx = { true, false };
1060 struct hmm_range *range;
1061 int ret = 0;
1062
1063 mutex_lock(&process_info->lock);
1064
1065 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
1066 if (ret) {
1067 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
1068 goto out;
1069 }
1070
1071 ret = amdgpu_hmm_register(bo, user_addr);
1072 if (ret) {
1073 pr_err("%s: Failed to register MMU notifier: %d\n",
1074 __func__, ret);
1075 goto out;
1076 }
1077
1078 if (criu_resume) {
1079 /*
1080 * During a CRIU restore operation, the userptr buffer objects
1081 * will be validated in the restore_userptr_work worker at a
1082 * later stage when it is scheduled by another ioctl called by
1083 * CRIU master process for the target pid for restore.
1084 */
1085 mutex_lock(&process_info->notifier_lock);
1086 mem->invalid++;
1087 mutex_unlock(&process_info->notifier_lock);
1088 mutex_unlock(&process_info->lock);
1089 return 0;
1090 }
1091
1092 ret = amdgpu_ttm_tt_get_user_pages(bo, &range);
1093 if (ret) {
1094 if (ret == -EAGAIN)
1095 pr_debug("Failed to get user pages, try again\n");
1096 else
1097 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
1098 goto unregister_out;
1099 }
1100
1101 ret = amdgpu_bo_reserve(bo, true);
1102 if (ret) {
1103 pr_err("%s: Failed to reserve BO\n", __func__);
1104 goto release_out;
1105 }
1106
1107 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
1108
1109 amdgpu_bo_placement_from_domain(bo, mem->domain);
1110 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1111 if (ret)
1112 pr_err("%s: failed to validate BO\n", __func__);
1113 amdgpu_bo_unreserve(bo);
1114
1115 release_out:
1116 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
1117 unregister_out:
1118 if (ret)
1119 amdgpu_hmm_unregister(bo);
1120 out:
1121 mutex_unlock(&process_info->lock);
1122 return ret;
1123 }
1124
1125 /* Reserving a BO and its page table BOs must happen atomically to
1126 * avoid deadlocks. Some operations update multiple VMs at once. Track
1127 * all the reservation info in a context structure. Optionally a sync
1128 * object can track VM updates.
1129 */
1130 struct bo_vm_reservation_context {
1131 /* DRM execution context for the reservation */
1132 struct drm_exec exec;
1133 /* Number of VMs reserved */
1134 unsigned int n_vms;
1135 /* Pointer to sync object */
1136 struct amdgpu_sync *sync;
1137 };
1138
1139 enum bo_vm_match {
1140 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
1141 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
1142 BO_VM_ALL, /* Match all VMs a BO was added to */
1143 };
1144
1145 /**
1146 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
1147 * @mem: KFD BO structure.
1148 * @vm: the VM to reserve.
1149 * @ctx: the struct that will be used in unreserve_bo_and_vms().
1150 */
reserve_bo_and_vm(struct kgd_mem * mem,struct amdgpu_vm * vm,struct bo_vm_reservation_context * ctx)1151 static int reserve_bo_and_vm(struct kgd_mem *mem,
1152 struct amdgpu_vm *vm,
1153 struct bo_vm_reservation_context *ctx)
1154 {
1155 struct amdgpu_bo *bo = mem->bo;
1156 int ret;
1157
1158 WARN_ON(!vm);
1159
1160 ctx->n_vms = 1;
1161 ctx->sync = &mem->sync;
1162 drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
1163 drm_exec_until_all_locked(&ctx->exec) {
1164 ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1165 drm_exec_retry_on_contention(&ctx->exec);
1166 if (unlikely(ret))
1167 goto error;
1168
1169 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1170 drm_exec_retry_on_contention(&ctx->exec);
1171 if (unlikely(ret))
1172 goto error;
1173 }
1174 return 0;
1175
1176 error:
1177 pr_err("Failed to reserve buffers in ttm.\n");
1178 drm_exec_fini(&ctx->exec);
1179 return ret;
1180 }
1181
1182 /**
1183 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
1184 * @mem: KFD BO structure.
1185 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
1186 * is used. Otherwise, a single VM associated with the BO.
1187 * @map_type: the mapping status that will be used to filter the VMs.
1188 * @ctx: the struct that will be used in unreserve_bo_and_vms().
1189 *
1190 * Returns 0 for success, negative for failure.
1191 */
reserve_bo_and_cond_vms(struct kgd_mem * mem,struct amdgpu_vm * vm,enum bo_vm_match map_type,struct bo_vm_reservation_context * ctx)1192 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
1193 struct amdgpu_vm *vm, enum bo_vm_match map_type,
1194 struct bo_vm_reservation_context *ctx)
1195 {
1196 struct kfd_mem_attachment *entry;
1197 struct amdgpu_bo *bo = mem->bo;
1198 int ret;
1199
1200 ctx->sync = &mem->sync;
1201 drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
1202 DRM_EXEC_IGNORE_DUPLICATES, 0);
1203 drm_exec_until_all_locked(&ctx->exec) {
1204 ctx->n_vms = 0;
1205 list_for_each_entry(entry, &mem->attachments, list) {
1206 if ((vm && vm != entry->bo_va->base.vm) ||
1207 (entry->is_mapped != map_type
1208 && map_type != BO_VM_ALL))
1209 continue;
1210
1211 ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
1212 &ctx->exec, 2);
1213 drm_exec_retry_on_contention(&ctx->exec);
1214 if (unlikely(ret))
1215 goto error;
1216 ++ctx->n_vms;
1217 }
1218
1219 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1220 drm_exec_retry_on_contention(&ctx->exec);
1221 if (unlikely(ret))
1222 goto error;
1223 }
1224 return 0;
1225
1226 error:
1227 pr_err("Failed to reserve buffers in ttm.\n");
1228 drm_exec_fini(&ctx->exec);
1229 return ret;
1230 }
1231
1232 /**
1233 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1234 * @ctx: Reservation context to unreserve
1235 * @wait: Optionally wait for a sync object representing pending VM updates
1236 * @intr: Whether the wait is interruptible
1237 *
1238 * Also frees any resources allocated in
1239 * reserve_bo_and_(cond_)vm(s). Returns the status from
1240 * amdgpu_sync_wait.
1241 */
unreserve_bo_and_vms(struct bo_vm_reservation_context * ctx,bool wait,bool intr)1242 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1243 bool wait, bool intr)
1244 {
1245 int ret = 0;
1246
1247 if (wait)
1248 ret = amdgpu_sync_wait(ctx->sync, intr);
1249
1250 drm_exec_fini(&ctx->exec);
1251 ctx->sync = NULL;
1252 return ret;
1253 }
1254
unmap_bo_from_gpuvm(struct kgd_mem * mem,struct kfd_mem_attachment * entry,struct amdgpu_sync * sync)1255 static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
1256 struct kfd_mem_attachment *entry,
1257 struct amdgpu_sync *sync)
1258 {
1259 struct amdgpu_bo_va *bo_va = entry->bo_va;
1260 struct amdgpu_device *adev = entry->adev;
1261 struct amdgpu_vm *vm = bo_va->base.vm;
1262
1263 if (bo_va->queue_refcount) {
1264 pr_debug("bo_va->queue_refcount %d\n", bo_va->queue_refcount);
1265 return -EBUSY;
1266 }
1267
1268 (void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1269
1270 (void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1271
1272 (void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
1273
1274 return 0;
1275 }
1276
update_gpuvm_pte(struct kgd_mem * mem,struct kfd_mem_attachment * entry,struct amdgpu_sync * sync)1277 static int update_gpuvm_pte(struct kgd_mem *mem,
1278 struct kfd_mem_attachment *entry,
1279 struct amdgpu_sync *sync)
1280 {
1281 struct amdgpu_bo_va *bo_va = entry->bo_va;
1282 struct amdgpu_device *adev = entry->adev;
1283 int ret;
1284
1285 ret = kfd_mem_dmamap_attachment(mem, entry);
1286 if (ret)
1287 return ret;
1288
1289 /* Update the page tables */
1290 ret = amdgpu_vm_bo_update(adev, bo_va, false);
1291 if (ret) {
1292 pr_err("amdgpu_vm_bo_update failed\n");
1293 return ret;
1294 }
1295
1296 return amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
1297 }
1298
map_bo_to_gpuvm(struct kgd_mem * mem,struct kfd_mem_attachment * entry,struct amdgpu_sync * sync,bool no_update_pte)1299 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1300 struct kfd_mem_attachment *entry,
1301 struct amdgpu_sync *sync,
1302 bool no_update_pte)
1303 {
1304 int ret;
1305
1306 /* Set virtual address for the allocation */
1307 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1308 amdgpu_bo_size(entry->bo_va->base.bo),
1309 entry->pte_flags);
1310 if (ret) {
1311 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1312 entry->va, ret);
1313 return ret;
1314 }
1315
1316 if (no_update_pte)
1317 return 0;
1318
1319 ret = update_gpuvm_pte(mem, entry, sync);
1320 if (ret) {
1321 pr_err("update_gpuvm_pte() failed\n");
1322 goto update_gpuvm_pte_failed;
1323 }
1324
1325 return 0;
1326
1327 update_gpuvm_pte_failed:
1328 unmap_bo_from_gpuvm(mem, entry, sync);
1329 kfd_mem_dmaunmap_attachment(mem, entry);
1330 return ret;
1331 }
1332
process_validate_vms(struct amdkfd_process_info * process_info,struct ww_acquire_ctx * ticket)1333 static int process_validate_vms(struct amdkfd_process_info *process_info,
1334 struct ww_acquire_ctx *ticket)
1335 {
1336 struct amdgpu_vm *peer_vm;
1337 int ret;
1338
1339 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1340 vm_list_node) {
1341 ret = vm_validate_pt_pd_bos(peer_vm, ticket);
1342 if (ret)
1343 return ret;
1344 }
1345
1346 return 0;
1347 }
1348
process_sync_pds_resv(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)1349 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1350 struct amdgpu_sync *sync)
1351 {
1352 struct amdgpu_vm *peer_vm;
1353 int ret;
1354
1355 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1356 vm_list_node) {
1357 struct amdgpu_bo *pd = peer_vm->root.bo;
1358
1359 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1360 AMDGPU_SYNC_NE_OWNER,
1361 AMDGPU_FENCE_OWNER_KFD);
1362 if (ret)
1363 return ret;
1364 }
1365
1366 return 0;
1367 }
1368
process_update_pds(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)1369 static int process_update_pds(struct amdkfd_process_info *process_info,
1370 struct amdgpu_sync *sync)
1371 {
1372 struct amdgpu_vm *peer_vm;
1373 int ret;
1374
1375 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1376 vm_list_node) {
1377 ret = vm_update_pds(peer_vm, sync);
1378 if (ret)
1379 return ret;
1380 }
1381
1382 return 0;
1383 }
1384
init_kfd_vm(struct amdgpu_vm * vm,void ** process_info,struct dma_fence ** ef)1385 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1386 struct dma_fence **ef)
1387 {
1388 struct amdkfd_process_info *info = NULL;
1389 int ret;
1390
1391 if (!*process_info) {
1392 info = kzalloc(sizeof(*info), GFP_KERNEL);
1393 if (!info)
1394 return -ENOMEM;
1395
1396 mutex_init(&info->lock);
1397 mutex_init(&info->notifier_lock);
1398 INIT_LIST_HEAD(&info->vm_list_head);
1399 INIT_LIST_HEAD(&info->kfd_bo_list);
1400 INIT_LIST_HEAD(&info->userptr_valid_list);
1401 INIT_LIST_HEAD(&info->userptr_inval_list);
1402
1403 info->eviction_fence =
1404 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1405 current->mm,
1406 NULL);
1407 if (!info->eviction_fence) {
1408 pr_err("Failed to create eviction fence\n");
1409 ret = -ENOMEM;
1410 goto create_evict_fence_fail;
1411 }
1412
1413 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1414 INIT_DELAYED_WORK(&info->restore_userptr_work,
1415 amdgpu_amdkfd_restore_userptr_worker);
1416
1417 *process_info = info;
1418 }
1419
1420 vm->process_info = *process_info;
1421
1422 /* Validate page directory and attach eviction fence */
1423 ret = amdgpu_bo_reserve(vm->root.bo, true);
1424 if (ret)
1425 goto reserve_pd_fail;
1426 ret = vm_validate_pt_pd_bos(vm, NULL);
1427 if (ret) {
1428 pr_err("validate_pt_pd_bos() failed\n");
1429 goto validate_pd_fail;
1430 }
1431 ret = amdgpu_bo_sync_wait(vm->root.bo,
1432 AMDGPU_FENCE_OWNER_KFD, false);
1433 if (ret)
1434 goto wait_pd_fail;
1435 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
1436 if (ret)
1437 goto reserve_shared_fail;
1438 dma_resv_add_fence(vm->root.bo->tbo.base.resv,
1439 &vm->process_info->eviction_fence->base,
1440 DMA_RESV_USAGE_BOOKKEEP);
1441 amdgpu_bo_unreserve(vm->root.bo);
1442
1443 /* Update process info */
1444 mutex_lock(&vm->process_info->lock);
1445 list_add_tail(&vm->vm_list_node,
1446 &(vm->process_info->vm_list_head));
1447 vm->process_info->n_vms++;
1448 if (ef)
1449 *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
1450 mutex_unlock(&vm->process_info->lock);
1451
1452 return 0;
1453
1454 reserve_shared_fail:
1455 wait_pd_fail:
1456 validate_pd_fail:
1457 amdgpu_bo_unreserve(vm->root.bo);
1458 reserve_pd_fail:
1459 vm->process_info = NULL;
1460 if (info) {
1461 dma_fence_put(&info->eviction_fence->base);
1462 *process_info = NULL;
1463 put_pid(info->pid);
1464 create_evict_fence_fail:
1465 mutex_destroy(&info->lock);
1466 mutex_destroy(&info->notifier_lock);
1467 kfree(info);
1468 }
1469 return ret;
1470 }
1471
1472 /**
1473 * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
1474 * @bo: Handle of buffer object being pinned
1475 * @domain: Domain into which BO should be pinned
1476 *
1477 * - USERPTR BOs are UNPINNABLE and will return error
1478 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1479 * PIN count incremented. It is valid to PIN a BO multiple times
1480 *
1481 * Return: ZERO if successful in pinning, Non-Zero in case of error.
1482 */
amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo * bo,u32 domain)1483 static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
1484 {
1485 int ret = 0;
1486
1487 ret = amdgpu_bo_reserve(bo, false);
1488 if (unlikely(ret))
1489 return ret;
1490
1491 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) {
1492 /*
1493 * If bo is not contiguous on VRAM, move to system memory first to ensure
1494 * we can get contiguous VRAM space after evicting other BOs.
1495 */
1496 if (!(bo->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
1497 struct ttm_operation_ctx ctx = { true, false };
1498
1499 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
1500 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1501 if (unlikely(ret)) {
1502 pr_debug("validate bo 0x%p to GTT failed %d\n", &bo->tbo, ret);
1503 goto out;
1504 }
1505 }
1506 }
1507
1508 ret = amdgpu_bo_pin(bo, domain);
1509 if (ret)
1510 pr_err("Error in Pinning BO to domain: %d\n", domain);
1511
1512 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
1513 out:
1514 amdgpu_bo_unreserve(bo);
1515 return ret;
1516 }
1517
1518 /**
1519 * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
1520 * @bo: Handle of buffer object being unpinned
1521 *
1522 * - Is a illegal request for USERPTR BOs and is ignored
1523 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1524 * PIN count decremented. Calls to UNPIN must balance calls to PIN
1525 */
amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo * bo)1526 static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
1527 {
1528 int ret = 0;
1529
1530 ret = amdgpu_bo_reserve(bo, false);
1531 if (unlikely(ret))
1532 return;
1533
1534 amdgpu_bo_unpin(bo);
1535 amdgpu_bo_unreserve(bo);
1536 }
1537
amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device * adev,struct amdgpu_vm * avm,void ** process_info,struct dma_fence ** ef)1538 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
1539 struct amdgpu_vm *avm,
1540 void **process_info,
1541 struct dma_fence **ef)
1542 {
1543 int ret;
1544
1545 /* Already a compute VM? */
1546 if (avm->process_info)
1547 return -EINVAL;
1548
1549 /* Convert VM into a compute VM */
1550 ret = amdgpu_vm_make_compute(adev, avm);
1551 if (ret)
1552 return ret;
1553
1554 /* Initialize KFD part of the VM and process info */
1555 ret = init_kfd_vm(avm, process_info, ef);
1556 if (ret)
1557 return ret;
1558
1559 amdgpu_vm_set_task_info(avm);
1560
1561 return 0;
1562 }
1563
amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device * adev,struct amdgpu_vm * vm)1564 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1565 struct amdgpu_vm *vm)
1566 {
1567 struct amdkfd_process_info *process_info = vm->process_info;
1568
1569 if (!process_info)
1570 return;
1571
1572 /* Update process info */
1573 mutex_lock(&process_info->lock);
1574 process_info->n_vms--;
1575 list_del(&vm->vm_list_node);
1576 mutex_unlock(&process_info->lock);
1577
1578 vm->process_info = NULL;
1579
1580 /* Release per-process resources when last compute VM is destroyed */
1581 if (!process_info->n_vms) {
1582 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1583 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1584 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1585
1586 dma_fence_put(&process_info->eviction_fence->base);
1587 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1588 put_pid(process_info->pid);
1589 mutex_destroy(&process_info->lock);
1590 mutex_destroy(&process_info->notifier_lock);
1591 kfree(process_info);
1592 }
1593 }
1594
amdgpu_amdkfd_gpuvm_get_process_page_dir(void * drm_priv)1595 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1596 {
1597 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1598 struct amdgpu_bo *pd = avm->root.bo;
1599 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1600
1601 if (adev->asic_type < CHIP_VEGA10)
1602 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1603 return avm->pd_phys_addr;
1604 }
1605
amdgpu_amdkfd_block_mmu_notifications(void * p)1606 void amdgpu_amdkfd_block_mmu_notifications(void *p)
1607 {
1608 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1609
1610 mutex_lock(&pinfo->lock);
1611 WRITE_ONCE(pinfo->block_mmu_notifications, true);
1612 mutex_unlock(&pinfo->lock);
1613 }
1614
amdgpu_amdkfd_criu_resume(void * p)1615 int amdgpu_amdkfd_criu_resume(void *p)
1616 {
1617 int ret = 0;
1618 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1619
1620 mutex_lock(&pinfo->lock);
1621 pr_debug("scheduling work\n");
1622 mutex_lock(&pinfo->notifier_lock);
1623 pinfo->evicted_bos++;
1624 mutex_unlock(&pinfo->notifier_lock);
1625 if (!READ_ONCE(pinfo->block_mmu_notifications)) {
1626 ret = -EINVAL;
1627 goto out_unlock;
1628 }
1629 WRITE_ONCE(pinfo->block_mmu_notifications, false);
1630 queue_delayed_work(system_freezable_wq,
1631 &pinfo->restore_userptr_work, 0);
1632
1633 out_unlock:
1634 mutex_unlock(&pinfo->lock);
1635 return ret;
1636 }
1637
amdgpu_amdkfd_get_available_memory(struct amdgpu_device * adev,uint8_t xcp_id)1638 size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
1639 uint8_t xcp_id)
1640 {
1641 uint64_t reserved_for_pt =
1642 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
1643 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1644 uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0);
1645 ssize_t available;
1646 uint64_t vram_available, system_mem_available, ttm_mem_available;
1647
1648 spin_lock(&kfd_mem_limit.mem_limit_lock);
1649 if (adev->apu_prefer_gtt && !adev->gmc.is_app_apu)
1650 vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
1651 - adev->kfd.vram_used_aligned[xcp_id];
1652 else
1653 vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
1654 - adev->kfd.vram_used_aligned[xcp_id]
1655 - atomic64_read(&adev->vram_pin_size)
1656 - reserved_for_pt
1657 - reserved_for_ras;
1658
1659 if (adev->apu_prefer_gtt) {
1660 system_mem_available = no_system_mem_limit ?
1661 kfd_mem_limit.max_system_mem_limit :
1662 kfd_mem_limit.max_system_mem_limit -
1663 kfd_mem_limit.system_mem_used;
1664
1665 ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit -
1666 kfd_mem_limit.ttm_mem_used;
1667
1668 available = min3(system_mem_available, ttm_mem_available,
1669 vram_available);
1670 available = ALIGN_DOWN(available, PAGE_SIZE);
1671 } else {
1672 available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN);
1673 }
1674
1675 spin_unlock(&kfd_mem_limit.mem_limit_lock);
1676
1677 if (available < 0)
1678 available = 0;
1679
1680 return available;
1681 }
1682
amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(struct amdgpu_device * adev,uint64_t va,uint64_t size,void * drm_priv,struct kgd_mem ** mem,uint64_t * offset,uint32_t flags,bool criu_resume)1683 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1684 struct amdgpu_device *adev, uint64_t va, uint64_t size,
1685 void *drm_priv, struct kgd_mem **mem,
1686 uint64_t *offset, uint32_t flags, bool criu_resume)
1687 {
1688 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1689 struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm);
1690 enum ttm_bo_type bo_type = ttm_bo_type_device;
1691 struct sg_table *sg = NULL;
1692 uint64_t user_addr = 0;
1693 struct amdgpu_bo *bo;
1694 struct drm_gem_object *gobj = NULL;
1695 u32 domain, alloc_domain;
1696 uint64_t aligned_size;
1697 int8_t xcp_id = -1;
1698 u64 alloc_flags;
1699 int ret;
1700
1701 /*
1702 * Check on which domain to allocate BO
1703 */
1704 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1705 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1706
1707 if (adev->apu_prefer_gtt) {
1708 domain = AMDGPU_GEM_DOMAIN_GTT;
1709 alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1710 alloc_flags = 0;
1711 } else {
1712 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1713 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1714 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1715
1716 /* For contiguous VRAM allocation */
1717 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS)
1718 alloc_flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1719 }
1720 xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
1721 0 : fpriv->xcp_id;
1722 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1723 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1724 alloc_flags = 0;
1725 } else {
1726 domain = AMDGPU_GEM_DOMAIN_GTT;
1727 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1728 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1729
1730 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1731 if (!offset || !*offset)
1732 return -EINVAL;
1733 user_addr = untagged_addr(*offset);
1734 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1735 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1736 bo_type = ttm_bo_type_sg;
1737 if (size > UINT_MAX)
1738 return -EINVAL;
1739 sg = create_sg_table(*offset, size);
1740 if (!sg)
1741 return -ENOMEM;
1742 } else {
1743 return -EINVAL;
1744 }
1745 }
1746
1747 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT)
1748 alloc_flags |= AMDGPU_GEM_CREATE_COHERENT;
1749 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT)
1750 alloc_flags |= AMDGPU_GEM_CREATE_EXT_COHERENT;
1751 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
1752 alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
1753
1754 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1755 if (!*mem) {
1756 ret = -ENOMEM;
1757 goto err;
1758 }
1759 INIT_LIST_HEAD(&(*mem)->attachments);
1760 mutex_init(&(*mem)->lock);
1761 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1762
1763 /* Workaround for AQL queue wraparound bug. Map the same
1764 * memory twice. That means we only actually allocate half
1765 * the memory.
1766 */
1767 if ((*mem)->aql_queue)
1768 size >>= 1;
1769 aligned_size = PAGE_ALIGN(size);
1770
1771 (*mem)->alloc_flags = flags;
1772
1773 amdgpu_sync_create(&(*mem)->sync);
1774
1775 ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags,
1776 xcp_id);
1777 if (ret) {
1778 pr_debug("Insufficient memory\n");
1779 goto err_reserve_limit;
1780 }
1781
1782 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n",
1783 va, (*mem)->aql_queue ? size << 1 : size,
1784 domain_string(alloc_domain), xcp_id);
1785
1786 ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
1787 bo_type, NULL, &gobj, xcp_id + 1);
1788 if (ret) {
1789 pr_debug("Failed to create BO on domain %s. ret %d\n",
1790 domain_string(alloc_domain), ret);
1791 goto err_bo_create;
1792 }
1793 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1794 if (ret) {
1795 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1796 goto err_node_allow;
1797 }
1798 ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle);
1799 if (ret)
1800 goto err_gem_handle_create;
1801 bo = gem_to_amdgpu_bo(gobj);
1802 if (bo_type == ttm_bo_type_sg) {
1803 bo->tbo.sg = sg;
1804 bo->tbo.ttm->sg = sg;
1805 }
1806 bo->kfd_bo = *mem;
1807 (*mem)->bo = bo;
1808 if (user_addr)
1809 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1810
1811 (*mem)->va = va;
1812 (*mem)->domain = domain;
1813 (*mem)->mapped_to_gpu_memory = 0;
1814 (*mem)->process_info = avm->process_info;
1815
1816 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1817
1818 if (user_addr) {
1819 pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
1820 ret = init_user_pages(*mem, user_addr, criu_resume);
1821 if (ret)
1822 goto allocate_init_user_pages_failed;
1823 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1824 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1825 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
1826 if (ret) {
1827 pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
1828 goto err_pin_bo;
1829 }
1830 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
1831 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
1832 } else {
1833 mutex_lock(&avm->process_info->lock);
1834 if (avm->process_info->eviction_fence &&
1835 !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
1836 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain,
1837 &avm->process_info->eviction_fence->base);
1838 mutex_unlock(&avm->process_info->lock);
1839 if (ret)
1840 goto err_validate_bo;
1841 }
1842
1843 if (offset)
1844 *offset = amdgpu_bo_mmap_offset(bo);
1845
1846 return 0;
1847
1848 allocate_init_user_pages_failed:
1849 err_pin_bo:
1850 err_validate_bo:
1851 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1852 drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle);
1853 err_gem_handle_create:
1854 drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1855 err_node_allow:
1856 /* Don't unreserve system mem limit twice */
1857 goto err_reserve_limit;
1858 err_bo_create:
1859 amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
1860 err_reserve_limit:
1861 amdgpu_sync_free(&(*mem)->sync);
1862 mutex_destroy(&(*mem)->lock);
1863 if (gobj)
1864 drm_gem_object_put(gobj);
1865 else
1866 kfree(*mem);
1867 err:
1868 if (sg) {
1869 sg_free_table(sg);
1870 kfree(sg);
1871 }
1872 return ret;
1873 }
1874
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(struct amdgpu_device * adev,struct kgd_mem * mem,void * drm_priv,uint64_t * size)1875 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1876 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
1877 uint64_t *size)
1878 {
1879 struct amdkfd_process_info *process_info = mem->process_info;
1880 unsigned long bo_size = mem->bo->tbo.base.size;
1881 bool use_release_notifier = (mem->bo->kfd_bo == mem);
1882 struct kfd_mem_attachment *entry, *tmp;
1883 struct bo_vm_reservation_context ctx;
1884 unsigned int mapped_to_gpu_memory;
1885 int ret;
1886 bool is_imported = false;
1887
1888 mutex_lock(&mem->lock);
1889
1890 /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
1891 if (mem->alloc_flags &
1892 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1893 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1894 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
1895 }
1896
1897 mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1898 is_imported = mem->is_imported;
1899 mutex_unlock(&mem->lock);
1900 /* lock is not needed after this, since mem is unused and will
1901 * be freed anyway
1902 */
1903
1904 if (mapped_to_gpu_memory > 0) {
1905 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1906 mem->va, bo_size);
1907 return -EBUSY;
1908 }
1909
1910 /* Make sure restore workers don't access the BO any more */
1911 mutex_lock(&process_info->lock);
1912 list_del(&mem->validate_list);
1913 mutex_unlock(&process_info->lock);
1914
1915 /* Cleanup user pages and MMU notifiers */
1916 if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
1917 amdgpu_hmm_unregister(mem->bo);
1918 mutex_lock(&process_info->notifier_lock);
1919 amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
1920 mutex_unlock(&process_info->notifier_lock);
1921 }
1922
1923 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1924 if (unlikely(ret))
1925 return ret;
1926
1927 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1928 process_info->eviction_fence);
1929 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1930 mem->va + bo_size * (1 + mem->aql_queue));
1931
1932 /* Remove from VM internal data structures */
1933 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
1934 kfd_mem_dmaunmap_attachment(mem, entry);
1935 kfd_mem_detach(entry);
1936 }
1937
1938 ret = unreserve_bo_and_vms(&ctx, false, false);
1939
1940 /* Free the sync object */
1941 amdgpu_sync_free(&mem->sync);
1942
1943 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1944 * remap BO. We need to free it.
1945 */
1946 if (mem->bo->tbo.sg) {
1947 sg_free_table(mem->bo->tbo.sg);
1948 kfree(mem->bo->tbo.sg);
1949 }
1950
1951 /* Update the size of the BO being freed if it was allocated from
1952 * VRAM and is not imported. For APP APU VRAM allocations are done
1953 * in GTT domain
1954 */
1955 if (size) {
1956 if (!is_imported &&
1957 (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
1958 (adev->apu_prefer_gtt &&
1959 mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
1960 *size = bo_size;
1961 else
1962 *size = 0;
1963 }
1964
1965 /* Free the BO*/
1966 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1967 drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle);
1968 if (mem->dmabuf) {
1969 dma_buf_put(mem->dmabuf);
1970 mem->dmabuf = NULL;
1971 }
1972 mutex_destroy(&mem->lock);
1973
1974 /* If this releases the last reference, it will end up calling
1975 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
1976 * this needs to be the last call here.
1977 */
1978 drm_gem_object_put(&mem->bo->tbo.base);
1979
1980 /*
1981 * For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(),
1982 * explicitly free it here.
1983 */
1984 if (!use_release_notifier)
1985 kfree(mem);
1986
1987 return ret;
1988 }
1989
amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device * adev,struct kgd_mem * mem,void * drm_priv)1990 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1991 struct amdgpu_device *adev, struct kgd_mem *mem,
1992 void *drm_priv)
1993 {
1994 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1995 int ret;
1996 struct amdgpu_bo *bo;
1997 uint32_t domain;
1998 struct kfd_mem_attachment *entry;
1999 struct bo_vm_reservation_context ctx;
2000 unsigned long bo_size;
2001 bool is_invalid_userptr = false;
2002
2003 bo = mem->bo;
2004 if (!bo) {
2005 pr_err("Invalid BO when mapping memory to GPU\n");
2006 return -EINVAL;
2007 }
2008
2009 /* Make sure restore is not running concurrently. Since we
2010 * don't map invalid userptr BOs, we rely on the next restore
2011 * worker to do the mapping
2012 */
2013 mutex_lock(&mem->process_info->lock);
2014
2015 /* Lock notifier lock. If we find an invalid userptr BO, we can be
2016 * sure that the MMU notifier is no longer running
2017 * concurrently and the queues are actually stopped
2018 */
2019 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2020 mutex_lock(&mem->process_info->notifier_lock);
2021 is_invalid_userptr = !!mem->invalid;
2022 mutex_unlock(&mem->process_info->notifier_lock);
2023 }
2024
2025 mutex_lock(&mem->lock);
2026
2027 domain = mem->domain;
2028 bo_size = bo->tbo.base.size;
2029
2030 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
2031 mem->va,
2032 mem->va + bo_size * (1 + mem->aql_queue),
2033 avm, domain_string(domain));
2034
2035 if (!kfd_mem_is_attached(avm, mem)) {
2036 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
2037 if (ret)
2038 goto out;
2039 }
2040
2041 ret = reserve_bo_and_vm(mem, avm, &ctx);
2042 if (unlikely(ret))
2043 goto out;
2044
2045 /* Userptr can be marked as "not invalid", but not actually be
2046 * validated yet (still in the system domain). In that case
2047 * the queues are still stopped and we can leave mapping for
2048 * the next restore worker
2049 */
2050 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
2051 bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
2052 is_invalid_userptr = true;
2053
2054 ret = vm_validate_pt_pd_bos(avm, NULL);
2055 if (unlikely(ret))
2056 goto out_unreserve;
2057
2058 list_for_each_entry(entry, &mem->attachments, list) {
2059 if (entry->bo_va->base.vm != avm || entry->is_mapped)
2060 continue;
2061
2062 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
2063 entry->va, entry->va + bo_size, entry);
2064
2065 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
2066 is_invalid_userptr);
2067 if (ret) {
2068 pr_err("Failed to map bo to gpuvm\n");
2069 goto out_unreserve;
2070 }
2071
2072 ret = vm_update_pds(avm, ctx.sync);
2073 if (ret) {
2074 pr_err("Failed to update page directories\n");
2075 goto out_unreserve;
2076 }
2077
2078 entry->is_mapped = true;
2079 mem->mapped_to_gpu_memory++;
2080 pr_debug("\t INC mapping count %d\n",
2081 mem->mapped_to_gpu_memory);
2082 }
2083
2084 ret = unreserve_bo_and_vms(&ctx, false, false);
2085
2086 goto out;
2087
2088 out_unreserve:
2089 unreserve_bo_and_vms(&ctx, false, false);
2090 out:
2091 mutex_unlock(&mem->process_info->lock);
2092 mutex_unlock(&mem->lock);
2093 return ret;
2094 }
2095
amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem * mem,void * drm_priv)2096 int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
2097 {
2098 struct kfd_mem_attachment *entry;
2099 struct amdgpu_vm *vm;
2100 int ret;
2101
2102 vm = drm_priv_to_vm(drm_priv);
2103
2104 mutex_lock(&mem->lock);
2105
2106 ret = amdgpu_bo_reserve(mem->bo, true);
2107 if (ret)
2108 goto out;
2109
2110 list_for_each_entry(entry, &mem->attachments, list) {
2111 if (entry->bo_va->base.vm != vm)
2112 continue;
2113 if (entry->bo_va->base.bo->tbo.ttm &&
2114 !entry->bo_va->base.bo->tbo.ttm->sg)
2115 continue;
2116
2117 kfd_mem_dmaunmap_attachment(mem, entry);
2118 }
2119
2120 amdgpu_bo_unreserve(mem->bo);
2121 out:
2122 mutex_unlock(&mem->lock);
2123
2124 return ret;
2125 }
2126
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(struct amdgpu_device * adev,struct kgd_mem * mem,void * drm_priv)2127 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
2128 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
2129 {
2130 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2131 unsigned long bo_size = mem->bo->tbo.base.size;
2132 struct kfd_mem_attachment *entry;
2133 struct bo_vm_reservation_context ctx;
2134 int ret;
2135
2136 mutex_lock(&mem->lock);
2137
2138 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
2139 if (unlikely(ret))
2140 goto out;
2141 /* If no VMs were reserved, it means the BO wasn't actually mapped */
2142 if (ctx.n_vms == 0) {
2143 ret = -EINVAL;
2144 goto unreserve_out;
2145 }
2146
2147 ret = vm_validate_pt_pd_bos(avm, NULL);
2148 if (unlikely(ret))
2149 goto unreserve_out;
2150
2151 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
2152 mem->va,
2153 mem->va + bo_size * (1 + mem->aql_queue),
2154 avm);
2155
2156 list_for_each_entry(entry, &mem->attachments, list) {
2157 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
2158 continue;
2159
2160 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
2161 entry->va, entry->va + bo_size, entry);
2162
2163 ret = unmap_bo_from_gpuvm(mem, entry, ctx.sync);
2164 if (ret)
2165 goto unreserve_out;
2166
2167 entry->is_mapped = false;
2168
2169 mem->mapped_to_gpu_memory--;
2170 pr_debug("\t DEC mapping count %d\n",
2171 mem->mapped_to_gpu_memory);
2172 }
2173
2174 unreserve_out:
2175 unreserve_bo_and_vms(&ctx, false, false);
2176 out:
2177 mutex_unlock(&mem->lock);
2178 return ret;
2179 }
2180
amdgpu_amdkfd_gpuvm_sync_memory(struct amdgpu_device * adev,struct kgd_mem * mem,bool intr)2181 int amdgpu_amdkfd_gpuvm_sync_memory(
2182 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
2183 {
2184 struct amdgpu_sync sync;
2185 int ret;
2186
2187 amdgpu_sync_create(&sync);
2188
2189 mutex_lock(&mem->lock);
2190 amdgpu_sync_clone(&mem->sync, &sync);
2191 mutex_unlock(&mem->lock);
2192
2193 ret = amdgpu_sync_wait(&sync, intr);
2194 amdgpu_sync_free(&sync);
2195 return ret;
2196 }
2197
2198 /**
2199 * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
2200 * @bo: Buffer object to be mapped
2201 * @bo_gart: Return bo reference
2202 *
2203 * Before return, bo reference count is incremented. To release the reference and unpin/
2204 * unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
2205 */
amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo * bo,struct amdgpu_bo ** bo_gart)2206 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart)
2207 {
2208 int ret;
2209
2210 ret = amdgpu_bo_reserve(bo, true);
2211 if (ret) {
2212 pr_err("Failed to reserve bo. ret %d\n", ret);
2213 goto err_reserve_bo_failed;
2214 }
2215
2216 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2217 if (ret) {
2218 pr_err("Failed to pin bo. ret %d\n", ret);
2219 goto err_pin_bo_failed;
2220 }
2221
2222 ret = amdgpu_ttm_alloc_gart(&bo->tbo);
2223 if (ret) {
2224 pr_err("Failed to bind bo to GART. ret %d\n", ret);
2225 goto err_map_bo_gart_failed;
2226 }
2227
2228 amdgpu_amdkfd_remove_eviction_fence(
2229 bo, bo->vm_bo->vm->process_info->eviction_fence);
2230
2231 amdgpu_bo_unreserve(bo);
2232
2233 *bo_gart = amdgpu_bo_ref(bo);
2234
2235 return 0;
2236
2237 err_map_bo_gart_failed:
2238 amdgpu_bo_unpin(bo);
2239 err_pin_bo_failed:
2240 amdgpu_bo_unreserve(bo);
2241 err_reserve_bo_failed:
2242
2243 return ret;
2244 }
2245
2246 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access
2247 *
2248 * @mem: Buffer object to be mapped for CPU access
2249 * @kptr[out]: pointer in kernel CPU address space
2250 * @size[out]: size of the buffer
2251 *
2252 * Pins the BO and maps it for kernel CPU access. The eviction fence is removed
2253 * from the BO, since pinned BOs cannot be evicted. The bo must remain on the
2254 * validate_list, so the GPU mapping can be restored after a page table was
2255 * evicted.
2256 *
2257 * Return: 0 on success, error code on failure
2258 */
amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem * mem,void ** kptr,uint64_t * size)2259 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
2260 void **kptr, uint64_t *size)
2261 {
2262 int ret;
2263 struct amdgpu_bo *bo = mem->bo;
2264
2265 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2266 pr_err("userptr can't be mapped to kernel\n");
2267 return -EINVAL;
2268 }
2269
2270 mutex_lock(&mem->process_info->lock);
2271
2272 ret = amdgpu_bo_reserve(bo, true);
2273 if (ret) {
2274 pr_err("Failed to reserve bo. ret %d\n", ret);
2275 goto bo_reserve_failed;
2276 }
2277
2278 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2279 if (ret) {
2280 pr_err("Failed to pin bo. ret %d\n", ret);
2281 goto pin_failed;
2282 }
2283
2284 ret = amdgpu_bo_kmap(bo, kptr);
2285 if (ret) {
2286 pr_err("Failed to map bo to kernel. ret %d\n", ret);
2287 goto kmap_failed;
2288 }
2289
2290 amdgpu_amdkfd_remove_eviction_fence(
2291 bo, mem->process_info->eviction_fence);
2292
2293 if (size)
2294 *size = amdgpu_bo_size(bo);
2295
2296 amdgpu_bo_unreserve(bo);
2297
2298 mutex_unlock(&mem->process_info->lock);
2299 return 0;
2300
2301 kmap_failed:
2302 amdgpu_bo_unpin(bo);
2303 pin_failed:
2304 amdgpu_bo_unreserve(bo);
2305 bo_reserve_failed:
2306 mutex_unlock(&mem->process_info->lock);
2307
2308 return ret;
2309 }
2310
2311 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access
2312 *
2313 * @mem: Buffer object to be unmapped for CPU access
2314 *
2315 * Removes the kernel CPU mapping and unpins the BO. It does not restore the
2316 * eviction fence, so this function should only be used for cleanup before the
2317 * BO is destroyed.
2318 */
amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem * mem)2319 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
2320 {
2321 struct amdgpu_bo *bo = mem->bo;
2322
2323 (void)amdgpu_bo_reserve(bo, true);
2324 amdgpu_bo_kunmap(bo);
2325 amdgpu_bo_unpin(bo);
2326 amdgpu_bo_unreserve(bo);
2327 }
2328
amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device * adev,struct kfd_vm_fault_info * mem)2329 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
2330 struct kfd_vm_fault_info *mem)
2331 {
2332 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
2333 *mem = *adev->gmc.vm_fault_info;
2334 mb(); /* make sure read happened */
2335 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
2336 }
2337 return 0;
2338 }
2339
import_obj_create(struct amdgpu_device * adev,struct dma_buf * dma_buf,struct drm_gem_object * obj,uint64_t va,void * drm_priv,struct kgd_mem ** mem,uint64_t * size,uint64_t * mmap_offset)2340 static int import_obj_create(struct amdgpu_device *adev,
2341 struct dma_buf *dma_buf,
2342 struct drm_gem_object *obj,
2343 uint64_t va, void *drm_priv,
2344 struct kgd_mem **mem, uint64_t *size,
2345 uint64_t *mmap_offset)
2346 {
2347 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2348 struct amdgpu_bo *bo;
2349 int ret;
2350
2351 bo = gem_to_amdgpu_bo(obj);
2352 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
2353 AMDGPU_GEM_DOMAIN_GTT)))
2354 /* Only VRAM and GTT BOs are supported */
2355 return -EINVAL;
2356
2357 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2358 if (!*mem)
2359 return -ENOMEM;
2360
2361 ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
2362 if (ret)
2363 goto err_free_mem;
2364
2365 if (size)
2366 *size = amdgpu_bo_size(bo);
2367
2368 if (mmap_offset)
2369 *mmap_offset = amdgpu_bo_mmap_offset(bo);
2370
2371 INIT_LIST_HEAD(&(*mem)->attachments);
2372 mutex_init(&(*mem)->lock);
2373
2374 (*mem)->alloc_flags =
2375 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2376 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
2377 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
2378 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
2379
2380 get_dma_buf(dma_buf);
2381 (*mem)->dmabuf = dma_buf;
2382 (*mem)->bo = bo;
2383 (*mem)->va = va;
2384 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
2385 !adev->apu_prefer_gtt ?
2386 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
2387
2388 (*mem)->mapped_to_gpu_memory = 0;
2389 (*mem)->process_info = avm->process_info;
2390 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
2391 amdgpu_sync_create(&(*mem)->sync);
2392 (*mem)->is_imported = true;
2393
2394 mutex_lock(&avm->process_info->lock);
2395 if (avm->process_info->eviction_fence &&
2396 !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
2397 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain,
2398 &avm->process_info->eviction_fence->base);
2399 mutex_unlock(&avm->process_info->lock);
2400 if (ret)
2401 goto err_remove_mem;
2402
2403 return 0;
2404
2405 err_remove_mem:
2406 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
2407 drm_vma_node_revoke(&obj->vma_node, drm_priv);
2408 err_free_mem:
2409 kfree(*mem);
2410 return ret;
2411 }
2412
amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device * adev,int fd,uint64_t va,void * drm_priv,struct kgd_mem ** mem,uint64_t * size,uint64_t * mmap_offset)2413 int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
2414 uint64_t va, void *drm_priv,
2415 struct kgd_mem **mem, uint64_t *size,
2416 uint64_t *mmap_offset)
2417 {
2418 struct drm_gem_object *obj;
2419 uint32_t handle;
2420 int ret;
2421
2422 ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd,
2423 &handle);
2424 if (ret)
2425 return ret;
2426 obj = drm_gem_object_lookup(adev->kfd.client.file, handle);
2427 if (!obj) {
2428 ret = -EINVAL;
2429 goto err_release_handle;
2430 }
2431
2432 ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size,
2433 mmap_offset);
2434 if (ret)
2435 goto err_put_obj;
2436
2437 (*mem)->gem_handle = handle;
2438
2439 return 0;
2440
2441 err_put_obj:
2442 drm_gem_object_put(obj);
2443 err_release_handle:
2444 drm_gem_handle_delete(adev->kfd.client.file, handle);
2445 return ret;
2446 }
2447
amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem * mem,struct dma_buf ** dma_buf)2448 int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
2449 struct dma_buf **dma_buf)
2450 {
2451 int ret;
2452
2453 mutex_lock(&mem->lock);
2454 ret = kfd_mem_export_dmabuf(mem);
2455 if (ret)
2456 goto out;
2457
2458 get_dma_buf(mem->dmabuf);
2459 *dma_buf = mem->dmabuf;
2460 out:
2461 mutex_unlock(&mem->lock);
2462 return ret;
2463 }
2464
2465 /* Evict a userptr BO by stopping the queues if necessary
2466 *
2467 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
2468 * cannot do any memory allocations, and cannot take any locks that
2469 * are held elsewhere while allocating memory.
2470 *
2471 * It doesn't do anything to the BO itself. The real work happens in
2472 * restore, where we get updated page addresses. This function only
2473 * ensures that GPU access to the BO is stopped.
2474 */
amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier * mni,unsigned long cur_seq,struct kgd_mem * mem)2475 int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
2476 unsigned long cur_seq, struct kgd_mem *mem)
2477 {
2478 struct amdkfd_process_info *process_info = mem->process_info;
2479 int r = 0;
2480
2481 /* Do not process MMU notifications during CRIU restore until
2482 * KFD_CRIU_OP_RESUME IOCTL is received
2483 */
2484 if (READ_ONCE(process_info->block_mmu_notifications))
2485 return 0;
2486
2487 mutex_lock(&process_info->notifier_lock);
2488 mmu_interval_set_seq(mni, cur_seq);
2489
2490 mem->invalid++;
2491 if (++process_info->evicted_bos == 1) {
2492 /* First eviction, stop the queues */
2493 r = kgd2kfd_quiesce_mm(mni->mm,
2494 KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
2495
2496 if (r && r != -ESRCH)
2497 pr_err("Failed to quiesce KFD\n");
2498
2499 if (r != -ESRCH)
2500 queue_delayed_work(system_freezable_wq,
2501 &process_info->restore_userptr_work,
2502 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2503 }
2504 mutex_unlock(&process_info->notifier_lock);
2505
2506 return r;
2507 }
2508
2509 /* Update invalid userptr BOs
2510 *
2511 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
2512 * userptr_inval_list and updates user pages for all BOs that have
2513 * been invalidated since their last update.
2514 */
update_invalid_user_pages(struct amdkfd_process_info * process_info,struct mm_struct * mm)2515 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
2516 struct mm_struct *mm)
2517 {
2518 struct kgd_mem *mem, *tmp_mem;
2519 struct amdgpu_bo *bo;
2520 struct ttm_operation_ctx ctx = { false, false };
2521 uint32_t invalid;
2522 int ret = 0;
2523
2524 mutex_lock(&process_info->notifier_lock);
2525
2526 /* Move all invalidated BOs to the userptr_inval_list */
2527 list_for_each_entry_safe(mem, tmp_mem,
2528 &process_info->userptr_valid_list,
2529 validate_list)
2530 if (mem->invalid)
2531 list_move_tail(&mem->validate_list,
2532 &process_info->userptr_inval_list);
2533
2534 /* Go through userptr_inval_list and update any invalid user_pages */
2535 list_for_each_entry(mem, &process_info->userptr_inval_list,
2536 validate_list) {
2537 invalid = mem->invalid;
2538 if (!invalid)
2539 /* BO hasn't been invalidated since the last
2540 * revalidation attempt. Keep its page list.
2541 */
2542 continue;
2543
2544 bo = mem->bo;
2545
2546 amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
2547 mem->range = NULL;
2548
2549 /* BO reservations and getting user pages (hmm_range_fault)
2550 * must happen outside the notifier lock
2551 */
2552 mutex_unlock(&process_info->notifier_lock);
2553
2554 /* Move the BO to system (CPU) domain if necessary to unmap
2555 * and free the SG table
2556 */
2557 if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
2558 if (amdgpu_bo_reserve(bo, true))
2559 return -EAGAIN;
2560 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2561 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2562 amdgpu_bo_unreserve(bo);
2563 if (ret) {
2564 pr_err("%s: Failed to invalidate userptr BO\n",
2565 __func__);
2566 return -EAGAIN;
2567 }
2568 }
2569
2570 /* Get updated user pages */
2571 ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range);
2572 if (ret) {
2573 pr_debug("Failed %d to get user pages\n", ret);
2574
2575 /* Return -EFAULT bad address error as success. It will
2576 * fail later with a VM fault if the GPU tries to access
2577 * it. Better than hanging indefinitely with stalled
2578 * user mode queues.
2579 *
2580 * Return other error -EBUSY or -ENOMEM to retry restore
2581 */
2582 if (ret != -EFAULT)
2583 return ret;
2584
2585 /* If applications unmap memory before destroying the userptr
2586 * from the KFD, trigger a segmentation fault in VM debug mode.
2587 */
2588 if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) {
2589 pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",
2590 pid_nr(process_info->pid), mem->va);
2591
2592 // Send GPU VM fault to user space
2593 kfd_signal_vm_fault_event_with_userptr(kfd_lookup_process_by_pid(process_info->pid),
2594 mem->va);
2595 }
2596
2597 ret = 0;
2598 }
2599
2600 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->range);
2601
2602 mutex_lock(&process_info->notifier_lock);
2603
2604 /* Mark the BO as valid unless it was invalidated
2605 * again concurrently.
2606 */
2607 if (mem->invalid != invalid) {
2608 ret = -EAGAIN;
2609 goto unlock_out;
2610 }
2611 /* set mem valid if mem has hmm range associated */
2612 if (mem->range)
2613 mem->invalid = 0;
2614 }
2615
2616 unlock_out:
2617 mutex_unlock(&process_info->notifier_lock);
2618
2619 return ret;
2620 }
2621
2622 /* Validate invalid userptr BOs
2623 *
2624 * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
2625 * with new page addresses and waits for the page table updates to complete.
2626 */
validate_invalid_user_pages(struct amdkfd_process_info * process_info)2627 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2628 {
2629 struct ttm_operation_ctx ctx = { false, false };
2630 struct amdgpu_sync sync;
2631 struct drm_exec exec;
2632
2633 struct amdgpu_vm *peer_vm;
2634 struct kgd_mem *mem, *tmp_mem;
2635 struct amdgpu_bo *bo;
2636 int ret;
2637
2638 amdgpu_sync_create(&sync);
2639
2640 drm_exec_init(&exec, 0, 0);
2641 /* Reserve all BOs and page tables for validation */
2642 drm_exec_until_all_locked(&exec) {
2643 /* Reserve all the page directories */
2644 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2645 vm_list_node) {
2646 ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
2647 drm_exec_retry_on_contention(&exec);
2648 if (unlikely(ret))
2649 goto unreserve_out;
2650 }
2651
2652 /* Reserve the userptr_inval_list entries to resv_list */
2653 list_for_each_entry(mem, &process_info->userptr_inval_list,
2654 validate_list) {
2655 struct drm_gem_object *gobj;
2656
2657 gobj = &mem->bo->tbo.base;
2658 ret = drm_exec_prepare_obj(&exec, gobj, 1);
2659 drm_exec_retry_on_contention(&exec);
2660 if (unlikely(ret))
2661 goto unreserve_out;
2662 }
2663 }
2664
2665 ret = process_validate_vms(process_info, NULL);
2666 if (ret)
2667 goto unreserve_out;
2668
2669 /* Validate BOs and update GPUVM page tables */
2670 list_for_each_entry_safe(mem, tmp_mem,
2671 &process_info->userptr_inval_list,
2672 validate_list) {
2673 struct kfd_mem_attachment *attachment;
2674
2675 bo = mem->bo;
2676
2677 /* Validate the BO if we got user pages */
2678 if (bo->tbo.ttm->pages[0]) {
2679 amdgpu_bo_placement_from_domain(bo, mem->domain);
2680 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2681 if (ret) {
2682 pr_err("%s: failed to validate BO\n", __func__);
2683 goto unreserve_out;
2684 }
2685 }
2686
2687 /* Update mapping. If the BO was not validated
2688 * (because we couldn't get user pages), this will
2689 * clear the page table entries, which will result in
2690 * VM faults if the GPU tries to access the invalid
2691 * memory.
2692 */
2693 list_for_each_entry(attachment, &mem->attachments, list) {
2694 if (!attachment->is_mapped)
2695 continue;
2696
2697 kfd_mem_dmaunmap_attachment(mem, attachment);
2698 ret = update_gpuvm_pte(mem, attachment, &sync);
2699 if (ret) {
2700 pr_err("%s: update PTE failed\n", __func__);
2701 /* make sure this gets validated again */
2702 mutex_lock(&process_info->notifier_lock);
2703 mem->invalid++;
2704 mutex_unlock(&process_info->notifier_lock);
2705 goto unreserve_out;
2706 }
2707 }
2708 }
2709
2710 /* Update page directories */
2711 ret = process_update_pds(process_info, &sync);
2712
2713 unreserve_out:
2714 drm_exec_fini(&exec);
2715 amdgpu_sync_wait(&sync, false);
2716 amdgpu_sync_free(&sync);
2717
2718 return ret;
2719 }
2720
2721 /* Confirm that all user pages are valid while holding the notifier lock
2722 *
2723 * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
2724 */
confirm_valid_user_pages_locked(struct amdkfd_process_info * process_info)2725 static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
2726 {
2727 struct kgd_mem *mem, *tmp_mem;
2728 int ret = 0;
2729
2730 list_for_each_entry_safe(mem, tmp_mem,
2731 &process_info->userptr_inval_list,
2732 validate_list) {
2733 bool valid;
2734
2735 /* keep mem without hmm range at userptr_inval_list */
2736 if (!mem->range)
2737 continue;
2738
2739 /* Only check mem with hmm range associated */
2740 valid = amdgpu_ttm_tt_get_user_pages_done(
2741 mem->bo->tbo.ttm, mem->range);
2742
2743 mem->range = NULL;
2744 if (!valid) {
2745 WARN(!mem->invalid, "Invalid BO not marked invalid");
2746 ret = -EAGAIN;
2747 continue;
2748 }
2749
2750 if (mem->invalid) {
2751 WARN(1, "Valid BO is marked invalid");
2752 ret = -EAGAIN;
2753 continue;
2754 }
2755
2756 list_move_tail(&mem->validate_list,
2757 &process_info->userptr_valid_list);
2758 }
2759
2760 return ret;
2761 }
2762
2763 /* Worker callback to restore evicted userptr BOs
2764 *
2765 * Tries to update and validate all userptr BOs. If successful and no
2766 * concurrent evictions happened, the queues are restarted. Otherwise,
2767 * reschedule for another attempt later.
2768 */
amdgpu_amdkfd_restore_userptr_worker(struct work_struct * work)2769 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2770 {
2771 struct delayed_work *dwork = to_delayed_work(work);
2772 struct amdkfd_process_info *process_info =
2773 container_of(dwork, struct amdkfd_process_info,
2774 restore_userptr_work);
2775 struct task_struct *usertask;
2776 struct mm_struct *mm;
2777 uint32_t evicted_bos;
2778
2779 mutex_lock(&process_info->notifier_lock);
2780 evicted_bos = process_info->evicted_bos;
2781 mutex_unlock(&process_info->notifier_lock);
2782 if (!evicted_bos)
2783 return;
2784
2785 /* Reference task and mm in case of concurrent process termination */
2786 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2787 if (!usertask)
2788 return;
2789 mm = get_task_mm(usertask);
2790 if (!mm) {
2791 put_task_struct(usertask);
2792 return;
2793 }
2794
2795 mutex_lock(&process_info->lock);
2796
2797 if (update_invalid_user_pages(process_info, mm))
2798 goto unlock_out;
2799 /* userptr_inval_list can be empty if all evicted userptr BOs
2800 * have been freed. In that case there is nothing to validate
2801 * and we can just restart the queues.
2802 */
2803 if (!list_empty(&process_info->userptr_inval_list)) {
2804 if (validate_invalid_user_pages(process_info))
2805 goto unlock_out;
2806 }
2807 /* Final check for concurrent evicton and atomic update. If
2808 * another eviction happens after successful update, it will
2809 * be a first eviction that calls quiesce_mm. The eviction
2810 * reference counting inside KFD will handle this case.
2811 */
2812 mutex_lock(&process_info->notifier_lock);
2813 if (process_info->evicted_bos != evicted_bos)
2814 goto unlock_notifier_out;
2815
2816 if (confirm_valid_user_pages_locked(process_info)) {
2817 WARN(1, "User pages unexpectedly invalid");
2818 goto unlock_notifier_out;
2819 }
2820
2821 process_info->evicted_bos = evicted_bos = 0;
2822
2823 if (kgd2kfd_resume_mm(mm)) {
2824 pr_err("%s: Failed to resume KFD\n", __func__);
2825 /* No recovery from this failure. Probably the CP is
2826 * hanging. No point trying again.
2827 */
2828 }
2829
2830 unlock_notifier_out:
2831 mutex_unlock(&process_info->notifier_lock);
2832 unlock_out:
2833 mutex_unlock(&process_info->lock);
2834
2835 /* If validation failed, reschedule another attempt */
2836 if (evicted_bos) {
2837 queue_delayed_work(system_freezable_wq,
2838 &process_info->restore_userptr_work,
2839 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2840
2841 kfd_smi_event_queue_restore_rescheduled(mm);
2842 }
2843 mmput(mm);
2844 put_task_struct(usertask);
2845 }
2846
replace_eviction_fence(struct dma_fence __rcu ** ef,struct dma_fence * new_ef)2847 static void replace_eviction_fence(struct dma_fence __rcu **ef,
2848 struct dma_fence *new_ef)
2849 {
2850 struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true
2851 /* protected by process_info->lock */);
2852
2853 /* If we're replacing an unsignaled eviction fence, that fence will
2854 * never be signaled, and if anyone is still waiting on that fence,
2855 * they will hang forever. This should never happen. We should only
2856 * replace the fence in restore_work that only gets scheduled after
2857 * eviction work signaled the fence.
2858 */
2859 WARN_ONCE(!dma_fence_is_signaled(old_ef),
2860 "Replacing unsignaled eviction fence");
2861 dma_fence_put(old_ef);
2862 }
2863
2864 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2865 * KFD process identified by process_info
2866 *
2867 * @process_info: amdkfd_process_info of the KFD process
2868 *
2869 * After memory eviction, restore thread calls this function. The function
2870 * should be called when the Process is still valid. BO restore involves -
2871 *
2872 * 1. Release old eviction fence and create new one
2873 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2874 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2875 * BOs that need to be reserved.
2876 * 4. Reserve all the BOs
2877 * 5. Validate of PD and PT BOs.
2878 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2879 * 7. Add fence to all PD and PT BOs.
2880 * 8. Unreserve all BOs
2881 */
amdgpu_amdkfd_gpuvm_restore_process_bos(void * info,struct dma_fence __rcu ** ef)2882 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef)
2883 {
2884 struct amdkfd_process_info *process_info = info;
2885 struct amdgpu_vm *peer_vm;
2886 struct kgd_mem *mem;
2887 struct list_head duplicate_save;
2888 struct amdgpu_sync sync_obj;
2889 unsigned long failed_size = 0;
2890 unsigned long total_size = 0;
2891 struct drm_exec exec;
2892 int ret;
2893
2894 INIT_LIST_HEAD(&duplicate_save);
2895
2896 mutex_lock(&process_info->lock);
2897
2898 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
2899 drm_exec_until_all_locked(&exec) {
2900 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2901 vm_list_node) {
2902 ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
2903 drm_exec_retry_on_contention(&exec);
2904 if (unlikely(ret)) {
2905 pr_err("Locking VM PD failed, ret: %d\n", ret);
2906 goto ttm_reserve_fail;
2907 }
2908 }
2909
2910 /* Reserve all BOs and page tables/directory. Add all BOs from
2911 * kfd_bo_list to ctx.list
2912 */
2913 list_for_each_entry(mem, &process_info->kfd_bo_list,
2914 validate_list) {
2915 struct drm_gem_object *gobj;
2916
2917 gobj = &mem->bo->tbo.base;
2918 ret = drm_exec_prepare_obj(&exec, gobj, 1);
2919 drm_exec_retry_on_contention(&exec);
2920 if (unlikely(ret)) {
2921 pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
2922 goto ttm_reserve_fail;
2923 }
2924 }
2925 }
2926
2927 amdgpu_sync_create(&sync_obj);
2928
2929 /* Validate BOs managed by KFD */
2930 list_for_each_entry(mem, &process_info->kfd_bo_list,
2931 validate_list) {
2932
2933 struct amdgpu_bo *bo = mem->bo;
2934 uint32_t domain = mem->domain;
2935 struct dma_resv_iter cursor;
2936 struct dma_fence *fence;
2937
2938 total_size += amdgpu_bo_size(bo);
2939
2940 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2941 if (ret) {
2942 pr_debug("Memory eviction: Validate BOs failed\n");
2943 failed_size += amdgpu_bo_size(bo);
2944 ret = amdgpu_amdkfd_bo_validate(bo,
2945 AMDGPU_GEM_DOMAIN_GTT, false);
2946 if (ret) {
2947 pr_debug("Memory eviction: Try again\n");
2948 goto validate_map_fail;
2949 }
2950 }
2951 dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
2952 DMA_RESV_USAGE_KERNEL, fence) {
2953 ret = amdgpu_sync_fence(&sync_obj, fence, GFP_KERNEL);
2954 if (ret) {
2955 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2956 goto validate_map_fail;
2957 }
2958 }
2959 }
2960
2961 if (failed_size)
2962 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2963
2964 /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
2965 * validations above would invalidate DMABuf imports again.
2966 */
2967 ret = process_validate_vms(process_info, &exec.ticket);
2968 if (ret) {
2969 pr_debug("Validating VMs failed, ret: %d\n", ret);
2970 goto validate_map_fail;
2971 }
2972
2973 /* Update mappings managed by KFD. */
2974 list_for_each_entry(mem, &process_info->kfd_bo_list,
2975 validate_list) {
2976 struct kfd_mem_attachment *attachment;
2977
2978 list_for_each_entry(attachment, &mem->attachments, list) {
2979 if (!attachment->is_mapped)
2980 continue;
2981
2982 kfd_mem_dmaunmap_attachment(mem, attachment);
2983 ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2984 if (ret) {
2985 pr_debug("Memory eviction: update PTE failed. Try again\n");
2986 goto validate_map_fail;
2987 }
2988 }
2989 }
2990
2991 /* Update mappings not managed by KFD */
2992 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2993 vm_list_node) {
2994 struct amdgpu_device *adev = amdgpu_ttm_adev(
2995 peer_vm->root.bo->tbo.bdev);
2996
2997 struct amdgpu_fpriv *fpriv =
2998 container_of(peer_vm, struct amdgpu_fpriv, vm);
2999
3000 ret = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
3001 if (ret) {
3002 dev_dbg(adev->dev,
3003 "Memory eviction: handle PRT moved failed, pid %8d. Try again.\n",
3004 pid_nr(process_info->pid));
3005 goto validate_map_fail;
3006 }
3007
3008 ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
3009 if (ret) {
3010 dev_dbg(adev->dev,
3011 "Memory eviction: handle moved failed, pid %8d. Try again.\n",
3012 pid_nr(process_info->pid));
3013 goto validate_map_fail;
3014 }
3015 }
3016
3017 /* Update page directories */
3018 ret = process_update_pds(process_info, &sync_obj);
3019 if (ret) {
3020 pr_debug("Memory eviction: update PDs failed. Try again\n");
3021 goto validate_map_fail;
3022 }
3023
3024 /* Sync with fences on all the page tables. They implicitly depend on any
3025 * move fences from amdgpu_vm_handle_moved above.
3026 */
3027 ret = process_sync_pds_resv(process_info, &sync_obj);
3028 if (ret) {
3029 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
3030 goto validate_map_fail;
3031 }
3032
3033 /* Wait for validate and PT updates to finish */
3034 amdgpu_sync_wait(&sync_obj, false);
3035
3036 /* The old eviction fence may be unsignaled if restore happens
3037 * after a GPU reset or suspend/resume. Keep the old fence in that
3038 * case. Otherwise release the old eviction fence and create new
3039 * one, because fence only goes from unsignaled to signaled once
3040 * and cannot be reused. Use context and mm from the old fence.
3041 *
3042 * If an old eviction fence signals after this check, that's OK.
3043 * Anyone signaling an eviction fence must stop the queues first
3044 * and schedule another restore worker.
3045 */
3046 if (dma_fence_is_signaled(&process_info->eviction_fence->base)) {
3047 struct amdgpu_amdkfd_fence *new_fence =
3048 amdgpu_amdkfd_fence_create(
3049 process_info->eviction_fence->base.context,
3050 process_info->eviction_fence->mm,
3051 NULL);
3052
3053 if (!new_fence) {
3054 pr_err("Failed to create eviction fence\n");
3055 ret = -ENOMEM;
3056 goto validate_map_fail;
3057 }
3058 dma_fence_put(&process_info->eviction_fence->base);
3059 process_info->eviction_fence = new_fence;
3060 replace_eviction_fence(ef, dma_fence_get(&new_fence->base));
3061 } else {
3062 WARN_ONCE(*ef != &process_info->eviction_fence->base,
3063 "KFD eviction fence doesn't match KGD process_info");
3064 }
3065
3066 /* Attach new eviction fence to all BOs except pinned ones */
3067 list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) {
3068 if (mem->bo->tbo.pin_count)
3069 continue;
3070
3071 dma_resv_add_fence(mem->bo->tbo.base.resv,
3072 &process_info->eviction_fence->base,
3073 DMA_RESV_USAGE_BOOKKEEP);
3074 }
3075 /* Attach eviction fence to PD / PT BOs and DMABuf imports */
3076 list_for_each_entry(peer_vm, &process_info->vm_list_head,
3077 vm_list_node) {
3078 struct amdgpu_bo *bo = peer_vm->root.bo;
3079
3080 dma_resv_add_fence(bo->tbo.base.resv,
3081 &process_info->eviction_fence->base,
3082 DMA_RESV_USAGE_BOOKKEEP);
3083 }
3084
3085 validate_map_fail:
3086 amdgpu_sync_free(&sync_obj);
3087 ttm_reserve_fail:
3088 drm_exec_fini(&exec);
3089 mutex_unlock(&process_info->lock);
3090 return ret;
3091 }
3092
amdgpu_amdkfd_add_gws_to_process(void * info,void * gws,struct kgd_mem ** mem)3093 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
3094 {
3095 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
3096 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
3097 int ret;
3098
3099 if (!info || !gws)
3100 return -EINVAL;
3101
3102 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
3103 if (!*mem)
3104 return -ENOMEM;
3105
3106 mutex_init(&(*mem)->lock);
3107 INIT_LIST_HEAD(&(*mem)->attachments);
3108 (*mem)->bo = amdgpu_bo_ref(gws_bo);
3109 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
3110 (*mem)->process_info = process_info;
3111 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
3112 amdgpu_sync_create(&(*mem)->sync);
3113
3114
3115 /* Validate gws bo the first time it is added to process */
3116 mutex_lock(&(*mem)->process_info->lock);
3117 ret = amdgpu_bo_reserve(gws_bo, false);
3118 if (unlikely(ret)) {
3119 pr_err("Reserve gws bo failed %d\n", ret);
3120 goto bo_reservation_failure;
3121 }
3122
3123 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
3124 if (ret) {
3125 pr_err("GWS BO validate failed %d\n", ret);
3126 goto bo_validation_failure;
3127 }
3128 /* GWS resource is shared b/t amdgpu and amdkfd
3129 * Add process eviction fence to bo so they can
3130 * evict each other.
3131 */
3132 ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
3133 if (ret)
3134 goto reserve_shared_fail;
3135 dma_resv_add_fence(gws_bo->tbo.base.resv,
3136 &process_info->eviction_fence->base,
3137 DMA_RESV_USAGE_BOOKKEEP);
3138 amdgpu_bo_unreserve(gws_bo);
3139 mutex_unlock(&(*mem)->process_info->lock);
3140
3141 return ret;
3142
3143 reserve_shared_fail:
3144 bo_validation_failure:
3145 amdgpu_bo_unreserve(gws_bo);
3146 bo_reservation_failure:
3147 mutex_unlock(&(*mem)->process_info->lock);
3148 amdgpu_sync_free(&(*mem)->sync);
3149 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
3150 amdgpu_bo_unref(&gws_bo);
3151 mutex_destroy(&(*mem)->lock);
3152 kfree(*mem);
3153 *mem = NULL;
3154 return ret;
3155 }
3156
amdgpu_amdkfd_remove_gws_from_process(void * info,void * mem)3157 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
3158 {
3159 int ret;
3160 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
3161 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
3162 struct amdgpu_bo *gws_bo = kgd_mem->bo;
3163
3164 /* Remove BO from process's validate list so restore worker won't touch
3165 * it anymore
3166 */
3167 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
3168
3169 ret = amdgpu_bo_reserve(gws_bo, false);
3170 if (unlikely(ret)) {
3171 pr_err("Reserve gws bo failed %d\n", ret);
3172 //TODO add BO back to validate_list?
3173 return ret;
3174 }
3175 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
3176 process_info->eviction_fence);
3177 amdgpu_bo_unreserve(gws_bo);
3178 amdgpu_sync_free(&kgd_mem->sync);
3179 amdgpu_bo_unref(&gws_bo);
3180 mutex_destroy(&kgd_mem->lock);
3181 kfree(mem);
3182 return 0;
3183 }
3184
3185 /* Returns GPU-specific tiling mode information */
amdgpu_amdkfd_get_tile_config(struct amdgpu_device * adev,struct tile_config * config)3186 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
3187 struct tile_config *config)
3188 {
3189 config->gb_addr_config = adev->gfx.config.gb_addr_config;
3190 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
3191 config->num_tile_configs =
3192 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
3193 config->macro_tile_config_ptr =
3194 adev->gfx.config.macrotile_mode_array;
3195 config->num_macro_tile_configs =
3196 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
3197
3198 /* Those values are not set from GFX9 onwards */
3199 config->num_banks = adev->gfx.config.num_banks;
3200 config->num_ranks = adev->gfx.config.num_ranks;
3201
3202 return 0;
3203 }
3204
amdgpu_amdkfd_bo_mapped_to_dev(void * drm_priv,struct kgd_mem * mem)3205 bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem)
3206 {
3207 struct amdgpu_vm *vm = drm_priv_to_vm(drm_priv);
3208 struct kfd_mem_attachment *entry;
3209
3210 list_for_each_entry(entry, &mem->attachments, list) {
3211 if (entry->is_mapped && entry->bo_va->base.vm == vm)
3212 return true;
3213 }
3214 return false;
3215 }
3216
3217 #if defined(CONFIG_DEBUG_FS)
3218
kfd_debugfs_kfd_mem_limits(struct seq_file * m,void * data)3219 int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data)
3220 {
3221
3222 spin_lock(&kfd_mem_limit.mem_limit_lock);
3223 seq_printf(m, "System mem used %lldM out of %lluM\n",
3224 (kfd_mem_limit.system_mem_used >> 20),
3225 (kfd_mem_limit.max_system_mem_limit >> 20));
3226 seq_printf(m, "TTM mem used %lldM out of %lluM\n",
3227 (kfd_mem_limit.ttm_mem_used >> 20),
3228 (kfd_mem_limit.max_ttm_mem_limit >> 20));
3229 spin_unlock(&kfd_mem_limit.mem_limit_lock);
3230
3231 return 0;
3232 }
3233
3234 #endif
3235