1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2014-2018 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 #include <linux/dma-buf.h> 24 #include <linux/list.h> 25 #include <linux/pagemap.h> 26 #include <linux/sched/mm.h> 27 #include <linux/sched/task.h> 28 #include <linux/fdtable.h> 29 #include <drm/ttm/ttm_tt.h> 30 31 #include <drm/drm_exec.h> 32 33 #include "amdgpu_object.h" 34 #include "amdgpu_gem.h" 35 #include "amdgpu_vm.h" 36 #include "amdgpu_hmm.h" 37 #include "amdgpu_amdkfd.h" 38 #include "amdgpu_dma_buf.h" 39 #include <uapi/linux/kfd_ioctl.h> 40 #include "amdgpu_xgmi.h" 41 #include "kfd_priv.h" 42 #include "kfd_smi_events.h" 43 44 /* Userptr restore delay, just long enough to allow consecutive VM 45 * changes to accumulate 46 */ 47 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 48 #define AMDGPU_RESERVE_MEM_LIMIT (3UL << 29) 49 50 /* 51 * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB 52 * BO chunk 53 */ 54 #define VRAM_AVAILABLITY_ALIGN (1 << 21) 55 56 /* Impose limit on how much memory KFD can use */ 57 static struct { 58 uint64_t max_system_mem_limit; 59 uint64_t max_ttm_mem_limit; 60 int64_t system_mem_used; 61 int64_t ttm_mem_used; 62 spinlock_t mem_limit_lock; 63 } kfd_mem_limit; 64 65 static const char * const domain_bit_to_string[] = { 66 "CPU", 67 "GTT", 68 "VRAM", 69 "GDS", 70 "GWS", 71 "OA" 72 }; 73 74 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1] 75 76 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); 77 78 static bool kfd_mem_is_attached(struct amdgpu_vm *avm, 79 struct kgd_mem *mem) 80 { 81 struct kfd_mem_attachment *entry; 82 83 list_for_each_entry(entry, &mem->attachments, list) 84 if (entry->bo_va->base.vm == avm) 85 return true; 86 87 return false; 88 } 89 90 /** 91 * reuse_dmamap() - Check whether adev can share the original 92 * userptr BO 93 * 94 * If both adev and bo_adev are in direct mapping or 95 * in the same iommu group, they can share the original BO. 96 * 97 * @adev: Device to which can or cannot share the original BO 98 * @bo_adev: Device to which allocated BO belongs to 99 * 100 * Return: returns true if adev can share original userptr BO, 101 * false otherwise. 102 */ 103 static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev) 104 { 105 return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) || 106 (adev->dev->iommu_group == bo_adev->dev->iommu_group); 107 } 108 109 /* Set memory usage limits. Current, limits are 110 * System (TTM + userptr) memory - 15/16th System RAM 111 * TTM memory - 3/8th System RAM 112 */ 113 void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 114 { 115 struct sysinfo si; 116 uint64_t mem; 117 118 if (kfd_mem_limit.max_system_mem_limit) 119 return; 120 121 si_meminfo(&si); 122 mem = si.totalram - si.totalhigh; 123 mem *= si.mem_unit; 124 125 spin_lock_init(&kfd_mem_limit.mem_limit_lock); 126 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6); 127 if (kfd_mem_limit.max_system_mem_limit < 2 * AMDGPU_RESERVE_MEM_LIMIT) 128 kfd_mem_limit.max_system_mem_limit >>= 1; 129 else 130 kfd_mem_limit.max_system_mem_limit -= AMDGPU_RESERVE_MEM_LIMIT; 131 132 kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT; 133 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", 134 (kfd_mem_limit.max_system_mem_limit >> 20), 135 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 136 } 137 138 void amdgpu_amdkfd_reserve_system_mem(uint64_t size) 139 { 140 kfd_mem_limit.system_mem_used += size; 141 } 142 143 /* Estimate page table size needed to represent a given memory size 144 * 145 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory 146 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB 147 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize 148 * for 2MB pages for TLB efficiency. However, small allocations and 149 * fragmented system memory still need some 4KB pages. We choose a 150 * compromise that should work in most cases without reserving too 151 * much memory for page tables unnecessarily (factor 16K, >> 14). 152 */ 153 154 #define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM) 155 156 /** 157 * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size 158 * of buffer. 159 * 160 * @adev: Device to which allocated BO belongs to 161 * @size: Size of buffer, in bytes, encapsulated by B0. This should be 162 * equivalent to amdgpu_bo_size(BO) 163 * @alloc_flag: Flag used in allocating a BO as noted above 164 * @xcp_id: xcp_id is used to get xcp from xcp manager, one xcp is 165 * managed as one compute node in driver for app 166 * 167 * Return: 168 * returns -ENOMEM in case of error, ZERO otherwise 169 */ 170 int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, 171 uint64_t size, u32 alloc_flag, int8_t xcp_id) 172 { 173 uint64_t reserved_for_pt = 174 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); 175 size_t system_mem_needed, ttm_mem_needed, vram_needed; 176 int ret = 0; 177 uint64_t vram_size = 0; 178 179 system_mem_needed = 0; 180 ttm_mem_needed = 0; 181 vram_needed = 0; 182 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 183 system_mem_needed = size; 184 ttm_mem_needed = size; 185 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 186 /* 187 * Conservatively round up the allocation requirement to 2 MB 188 * to avoid fragmentation caused by 4K allocations in the tail 189 * 2M BO chunk. 190 */ 191 vram_needed = size; 192 /* 193 * For GFX 9.4.3, get the VRAM size from XCP structs 194 */ 195 if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id)) 196 return -EINVAL; 197 198 vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id); 199 if (adev->gmc.is_app_apu) { 200 system_mem_needed = size; 201 ttm_mem_needed = size; 202 } 203 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 204 system_mem_needed = size; 205 } else if (!(alloc_flag & 206 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 207 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { 208 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); 209 return -ENOMEM; 210 } 211 212 spin_lock(&kfd_mem_limit.mem_limit_lock); 213 214 if (kfd_mem_limit.system_mem_used + system_mem_needed > 215 kfd_mem_limit.max_system_mem_limit) 216 pr_debug("Set no_system_mem_limit=1 if using shared memory\n"); 217 218 if ((kfd_mem_limit.system_mem_used + system_mem_needed > 219 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || 220 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > 221 kfd_mem_limit.max_ttm_mem_limit) || 222 (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed > 223 vram_size - reserved_for_pt - atomic64_read(&adev->vram_pin_size))) { 224 ret = -ENOMEM; 225 goto release; 226 } 227 228 /* Update memory accounting by decreasing available system 229 * memory, TTM memory and GPU memory as computed above 230 */ 231 WARN_ONCE(vram_needed && !adev, 232 "adev reference can't be null when vram is used"); 233 if (adev && xcp_id >= 0) { 234 adev->kfd.vram_used[xcp_id] += vram_needed; 235 adev->kfd.vram_used_aligned[xcp_id] += adev->gmc.is_app_apu ? 236 vram_needed : 237 ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN); 238 } 239 kfd_mem_limit.system_mem_used += system_mem_needed; 240 kfd_mem_limit.ttm_mem_used += ttm_mem_needed; 241 242 release: 243 spin_unlock(&kfd_mem_limit.mem_limit_lock); 244 return ret; 245 } 246 247 void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, 248 uint64_t size, u32 alloc_flag, int8_t xcp_id) 249 { 250 spin_lock(&kfd_mem_limit.mem_limit_lock); 251 252 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 253 kfd_mem_limit.system_mem_used -= size; 254 kfd_mem_limit.ttm_mem_used -= size; 255 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 256 WARN_ONCE(!adev, 257 "adev reference can't be null when alloc mem flags vram is set"); 258 if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id)) 259 goto release; 260 261 if (adev) { 262 adev->kfd.vram_used[xcp_id] -= size; 263 if (adev->gmc.is_app_apu) { 264 adev->kfd.vram_used_aligned[xcp_id] -= size; 265 kfd_mem_limit.system_mem_used -= size; 266 kfd_mem_limit.ttm_mem_used -= size; 267 } else { 268 adev->kfd.vram_used_aligned[xcp_id] -= 269 ALIGN(size, VRAM_AVAILABLITY_ALIGN); 270 } 271 } 272 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 273 kfd_mem_limit.system_mem_used -= size; 274 } else if (!(alloc_flag & 275 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 276 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { 277 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); 278 goto release; 279 } 280 WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0, 281 "KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id); 282 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, 283 "KFD TTM memory accounting unbalanced"); 284 WARN_ONCE(kfd_mem_limit.system_mem_used < 0, 285 "KFD system memory accounting unbalanced"); 286 287 release: 288 spin_unlock(&kfd_mem_limit.mem_limit_lock); 289 } 290 291 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) 292 { 293 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 294 u32 alloc_flags = bo->kfd_bo->alloc_flags; 295 u64 size = amdgpu_bo_size(bo); 296 297 amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags, 298 bo->xcp_id); 299 300 kfree(bo->kfd_bo); 301 } 302 303 /** 304 * create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information 305 * about USERPTR or DOOREBELL or MMIO BO. 306 * 307 * @adev: Device for which dmamap BO is being created 308 * @mem: BO of peer device that is being DMA mapped. Provides parameters 309 * in building the dmamap BO 310 * @bo_out: Output parameter updated with handle of dmamap BO 311 */ 312 static int 313 create_dmamap_sg_bo(struct amdgpu_device *adev, 314 struct kgd_mem *mem, struct amdgpu_bo **bo_out) 315 { 316 struct drm_gem_object *gem_obj; 317 int ret; 318 uint64_t flags = 0; 319 320 ret = amdgpu_bo_reserve(mem->bo, false); 321 if (ret) 322 return ret; 323 324 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) 325 flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT | 326 AMDGPU_GEM_CREATE_UNCACHED); 327 328 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1, 329 AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags, 330 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0); 331 332 amdgpu_bo_unreserve(mem->bo); 333 334 if (ret) { 335 pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret); 336 return -EINVAL; 337 } 338 339 *bo_out = gem_to_amdgpu_bo(gem_obj); 340 (*bo_out)->parent = amdgpu_bo_ref(mem->bo); 341 return ret; 342 } 343 344 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's 345 * reservation object. 346 * 347 * @bo: [IN] Remove eviction fence(s) from this BO 348 * @ef: [IN] This eviction fence is removed if it 349 * is present in the shared list. 350 * 351 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. 352 */ 353 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 354 struct amdgpu_amdkfd_fence *ef) 355 { 356 struct dma_fence *replacement; 357 358 if (!ef) 359 return -EINVAL; 360 361 /* TODO: Instead of block before we should use the fence of the page 362 * table update and TLB flush here directly. 363 */ 364 replacement = dma_fence_get_stub(); 365 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context, 366 replacement, DMA_RESV_USAGE_BOOKKEEP); 367 dma_fence_put(replacement); 368 return 0; 369 } 370 371 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 372 { 373 struct amdgpu_bo *root = bo; 374 struct amdgpu_vm_bo_base *vm_bo; 375 struct amdgpu_vm *vm; 376 struct amdkfd_process_info *info; 377 struct amdgpu_amdkfd_fence *ef; 378 int ret; 379 380 /* we can always get vm_bo from root PD bo.*/ 381 while (root->parent) 382 root = root->parent; 383 384 vm_bo = root->vm_bo; 385 if (!vm_bo) 386 return 0; 387 388 vm = vm_bo->vm; 389 if (!vm) 390 return 0; 391 392 info = vm->process_info; 393 if (!info || !info->eviction_fence) 394 return 0; 395 396 ef = container_of(dma_fence_get(&info->eviction_fence->base), 397 struct amdgpu_amdkfd_fence, base); 398 399 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); 400 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef); 401 dma_resv_unlock(bo->tbo.base.resv); 402 403 dma_fence_put(&ef->base); 404 return ret; 405 } 406 407 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, 408 bool wait) 409 { 410 struct ttm_operation_ctx ctx = { false, false }; 411 int ret; 412 413 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), 414 "Called with userptr BO")) 415 return -EINVAL; 416 417 amdgpu_bo_placement_from_domain(bo, domain); 418 419 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 420 if (ret) 421 goto validate_fail; 422 if (wait) 423 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 424 425 validate_fail: 426 return ret; 427 } 428 429 int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, 430 uint32_t domain, 431 struct dma_fence *fence) 432 { 433 int ret = amdgpu_bo_reserve(bo, false); 434 435 if (ret) 436 return ret; 437 438 ret = amdgpu_amdkfd_bo_validate(bo, domain, true); 439 if (ret) 440 goto unreserve_out; 441 442 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1); 443 if (ret) 444 goto unreserve_out; 445 446 dma_resv_add_fence(bo->tbo.base.resv, fence, 447 DMA_RESV_USAGE_BOOKKEEP); 448 449 unreserve_out: 450 amdgpu_bo_unreserve(bo); 451 452 return ret; 453 } 454 455 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) 456 { 457 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); 458 } 459 460 /* vm_validate_pt_pd_bos - Validate page table and directory BOs 461 * 462 * Page directories are not updated here because huge page handling 463 * during page table updates can invalidate page directory entries 464 * again. Page directories are only updated after updating page 465 * tables. 466 */ 467 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm, 468 struct ww_acquire_ctx *ticket) 469 { 470 struct amdgpu_bo *pd = vm->root.bo; 471 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 472 int ret; 473 474 ret = amdgpu_vm_validate(adev, vm, ticket, 475 amdgpu_amdkfd_validate_vm_bo, NULL); 476 if (ret) { 477 pr_err("failed to validate PT BOs\n"); 478 return ret; 479 } 480 481 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); 482 483 return 0; 484 } 485 486 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) 487 { 488 struct amdgpu_bo *pd = vm->root.bo; 489 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 490 int ret; 491 492 ret = amdgpu_vm_update_pdes(adev, vm, false); 493 if (ret) 494 return ret; 495 496 return amdgpu_sync_fence(sync, vm->last_update); 497 } 498 499 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) 500 { 501 uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE | 502 AMDGPU_VM_MTYPE_DEFAULT; 503 504 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) 505 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; 506 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) 507 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 508 509 return amdgpu_gem_va_map_flags(adev, mapping_flags); 510 } 511 512 /** 513 * create_sg_table() - Create an sg_table for a contiguous DMA addr range 514 * @addr: The starting address to point to 515 * @size: Size of memory area in bytes being pointed to 516 * 517 * Allocates an instance of sg_table and initializes it to point to memory 518 * area specified by input parameters. The address used to build is assumed 519 * to be DMA mapped, if needed. 520 * 521 * DOORBELL or MMIO BOs use only one scatterlist node in their sg_table 522 * because they are physically contiguous. 523 * 524 * Return: Initialized instance of SG Table or NULL 525 */ 526 static struct sg_table *create_sg_table(uint64_t addr, uint32_t size) 527 { 528 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); 529 530 if (!sg) 531 return NULL; 532 if (sg_alloc_table(sg, 1, GFP_KERNEL)) { 533 kfree(sg); 534 return NULL; 535 } 536 sg_dma_address(sg->sgl) = addr; 537 sg->sgl->length = size; 538 #ifdef CONFIG_NEED_SG_DMA_LENGTH 539 sg->sgl->dma_length = size; 540 #endif 541 return sg; 542 } 543 544 static int 545 kfd_mem_dmamap_userptr(struct kgd_mem *mem, 546 struct kfd_mem_attachment *attachment) 547 { 548 enum dma_data_direction direction = 549 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 550 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 551 struct ttm_operation_ctx ctx = {.interruptible = true}; 552 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 553 struct amdgpu_device *adev = attachment->adev; 554 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; 555 struct ttm_tt *ttm = bo->tbo.ttm; 556 int ret; 557 558 if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) 559 return -EINVAL; 560 561 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); 562 if (unlikely(!ttm->sg)) 563 return -ENOMEM; 564 565 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */ 566 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, 567 ttm->num_pages, 0, 568 (u64)ttm->num_pages << PAGE_SHIFT, 569 GFP_KERNEL); 570 if (unlikely(ret)) 571 goto free_sg; 572 573 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); 574 if (unlikely(ret)) 575 goto release_sg; 576 577 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 578 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 579 if (ret) 580 goto unmap_sg; 581 582 return 0; 583 584 unmap_sg: 585 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 586 release_sg: 587 pr_err("DMA map userptr failed: %d\n", ret); 588 sg_free_table(ttm->sg); 589 free_sg: 590 kfree(ttm->sg); 591 ttm->sg = NULL; 592 return ret; 593 } 594 595 static int 596 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment) 597 { 598 struct ttm_operation_ctx ctx = {.interruptible = true}; 599 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 600 int ret; 601 602 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 603 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 604 if (ret) 605 return ret; 606 607 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 608 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 609 } 610 611 /** 612 * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO 613 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device 614 * @attachment: Virtual address attachment of the BO on accessing device 615 * 616 * An access request from the device that owns DOORBELL does not require DMA mapping. 617 * This is because the request doesn't go through PCIe root complex i.e. it instead 618 * loops back. The need to DMA map arises only when accessing peer device's DOORBELL 619 * 620 * In contrast, all access requests for MMIO need to be DMA mapped without regard to 621 * device ownership. This is because access requests for MMIO go through PCIe root 622 * complex. 623 * 624 * This is accomplished in two steps: 625 * - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used 626 * in updating requesting device's page table 627 * - Signal TTM to mark memory pointed to by requesting device's BO as GPU 628 * accessible. This allows an update of requesting device's page table 629 * with entries associated with DOOREBELL or MMIO memory 630 * 631 * This method is invoked in the following contexts: 632 * - Mapping of DOORBELL or MMIO BO of same or peer device 633 * - Validating an evicted DOOREBELL or MMIO BO on device seeking access 634 * 635 * Return: ZERO if successful, NON-ZERO otherwise 636 */ 637 static int 638 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem, 639 struct kfd_mem_attachment *attachment) 640 { 641 struct ttm_operation_ctx ctx = {.interruptible = true}; 642 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 643 struct amdgpu_device *adev = attachment->adev; 644 struct ttm_tt *ttm = bo->tbo.ttm; 645 enum dma_data_direction dir; 646 dma_addr_t dma_addr; 647 bool mmio; 648 int ret; 649 650 /* Expect SG Table of dmapmap BO to be NULL */ 651 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP); 652 if (unlikely(ttm->sg)) { 653 pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio); 654 return -EINVAL; 655 } 656 657 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 658 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 659 dma_addr = mem->bo->tbo.sg->sgl->dma_address; 660 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length); 661 pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr); 662 dma_addr = dma_map_resource(adev->dev, dma_addr, 663 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); 664 ret = dma_mapping_error(adev->dev, dma_addr); 665 if (unlikely(ret)) 666 return ret; 667 pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr); 668 669 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length); 670 if (unlikely(!ttm->sg)) { 671 ret = -ENOMEM; 672 goto unmap_sg; 673 } 674 675 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 676 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 677 if (unlikely(ret)) 678 goto free_sg; 679 680 return ret; 681 682 free_sg: 683 sg_free_table(ttm->sg); 684 kfree(ttm->sg); 685 ttm->sg = NULL; 686 unmap_sg: 687 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length, 688 dir, DMA_ATTR_SKIP_CPU_SYNC); 689 return ret; 690 } 691 692 static int 693 kfd_mem_dmamap_attachment(struct kgd_mem *mem, 694 struct kfd_mem_attachment *attachment) 695 { 696 switch (attachment->type) { 697 case KFD_MEM_ATT_SHARED: 698 return 0; 699 case KFD_MEM_ATT_USERPTR: 700 return kfd_mem_dmamap_userptr(mem, attachment); 701 case KFD_MEM_ATT_DMABUF: 702 return kfd_mem_dmamap_dmabuf(attachment); 703 case KFD_MEM_ATT_SG: 704 return kfd_mem_dmamap_sg_bo(mem, attachment); 705 default: 706 WARN_ON_ONCE(1); 707 } 708 return -EINVAL; 709 } 710 711 static void 712 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, 713 struct kfd_mem_attachment *attachment) 714 { 715 enum dma_data_direction direction = 716 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 717 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 718 struct ttm_operation_ctx ctx = {.interruptible = false}; 719 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 720 struct amdgpu_device *adev = attachment->adev; 721 struct ttm_tt *ttm = bo->tbo.ttm; 722 723 if (unlikely(!ttm->sg)) 724 return; 725 726 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 727 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 728 729 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 730 sg_free_table(ttm->sg); 731 kfree(ttm->sg); 732 ttm->sg = NULL; 733 } 734 735 static void 736 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment) 737 { 738 /* This is a no-op. We don't want to trigger eviction fences when 739 * unmapping DMABufs. Therefore the invalidation (moving to system 740 * domain) is done in kfd_mem_dmamap_dmabuf. 741 */ 742 } 743 744 /** 745 * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO 746 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device 747 * @attachment: Virtual address attachment of the BO on accessing device 748 * 749 * The method performs following steps: 750 * - Signal TTM to mark memory pointed to by BO as GPU inaccessible 751 * - Free SG Table that is used to encapsulate DMA mapped memory of 752 * peer device's DOORBELL or MMIO memory 753 * 754 * This method is invoked in the following contexts: 755 * UNMapping of DOORBELL or MMIO BO on a device having access to its memory 756 * Eviction of DOOREBELL or MMIO BO on device having access to its memory 757 * 758 * Return: void 759 */ 760 static void 761 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem, 762 struct kfd_mem_attachment *attachment) 763 { 764 struct ttm_operation_ctx ctx = {.interruptible = true}; 765 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 766 struct amdgpu_device *adev = attachment->adev; 767 struct ttm_tt *ttm = bo->tbo.ttm; 768 enum dma_data_direction dir; 769 770 if (unlikely(!ttm->sg)) { 771 pr_debug("SG Table of BO is NULL"); 772 return; 773 } 774 775 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 776 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 777 778 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 779 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 780 dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address, 781 ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); 782 sg_free_table(ttm->sg); 783 kfree(ttm->sg); 784 ttm->sg = NULL; 785 bo->tbo.sg = NULL; 786 } 787 788 static void 789 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, 790 struct kfd_mem_attachment *attachment) 791 { 792 switch (attachment->type) { 793 case KFD_MEM_ATT_SHARED: 794 break; 795 case KFD_MEM_ATT_USERPTR: 796 kfd_mem_dmaunmap_userptr(mem, attachment); 797 break; 798 case KFD_MEM_ATT_DMABUF: 799 kfd_mem_dmaunmap_dmabuf(attachment); 800 break; 801 case KFD_MEM_ATT_SG: 802 kfd_mem_dmaunmap_sg_bo(mem, attachment); 803 break; 804 default: 805 WARN_ON_ONCE(1); 806 } 807 } 808 809 static int kfd_mem_export_dmabuf(struct kgd_mem *mem) 810 { 811 if (!mem->dmabuf) { 812 struct amdgpu_device *bo_adev; 813 struct dma_buf *dmabuf; 814 int r, fd; 815 816 bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 817 r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file, 818 mem->gem_handle, 819 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 820 DRM_RDWR : 0, &fd); 821 if (r) 822 return r; 823 dmabuf = dma_buf_get(fd); 824 close_fd(fd); 825 if (WARN_ON_ONCE(IS_ERR(dmabuf))) 826 return PTR_ERR(dmabuf); 827 mem->dmabuf = dmabuf; 828 } 829 830 return 0; 831 } 832 833 static int 834 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, 835 struct amdgpu_bo **bo) 836 { 837 struct drm_gem_object *gobj; 838 int ret; 839 840 ret = kfd_mem_export_dmabuf(mem); 841 if (ret) 842 return ret; 843 844 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); 845 if (IS_ERR(gobj)) 846 return PTR_ERR(gobj); 847 848 *bo = gem_to_amdgpu_bo(gobj); 849 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; 850 851 return 0; 852 } 853 854 /* kfd_mem_attach - Add a BO to a VM 855 * 856 * Everything that needs to bo done only once when a BO is first added 857 * to a VM. It can later be mapped and unmapped many times without 858 * repeating these steps. 859 * 860 * 0. Create BO for DMA mapping, if needed 861 * 1. Allocate and initialize BO VA entry data structure 862 * 2. Add BO to the VM 863 * 3. Determine ASIC-specific PTE flags 864 * 4. Alloc page tables and directories if needed 865 * 4a. Validate new page tables and directories 866 */ 867 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, 868 struct amdgpu_vm *vm, bool is_aql) 869 { 870 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 871 unsigned long bo_size = mem->bo->tbo.base.size; 872 uint64_t va = mem->va; 873 struct kfd_mem_attachment *attachment[2] = {NULL, NULL}; 874 struct amdgpu_bo *bo[2] = {NULL, NULL}; 875 struct amdgpu_bo_va *bo_va; 876 bool same_hive = false; 877 int i, ret; 878 879 if (!va) { 880 pr_err("Invalid VA when adding BO to VM\n"); 881 return -EINVAL; 882 } 883 884 /* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices 885 * 886 * The access path of MMIO and DOORBELL BOs of is always over PCIe. 887 * In contrast the access path of VRAM BOs depens upon the type of 888 * link that connects the peer device. Access over PCIe is allowed 889 * if peer device has large BAR. In contrast, access over xGMI is 890 * allowed for both small and large BAR configurations of peer device 891 */ 892 if ((adev != bo_adev && !adev->gmc.is_app_apu) && 893 ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) || 894 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) || 895 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { 896 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM) 897 same_hive = amdgpu_xgmi_same_hive(adev, bo_adev); 898 if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev)) 899 return -EINVAL; 900 } 901 902 for (i = 0; i <= is_aql; i++) { 903 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL); 904 if (unlikely(!attachment[i])) { 905 ret = -ENOMEM; 906 goto unwind; 907 } 908 909 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, 910 va + bo_size, vm); 911 912 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) || 913 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) || 914 (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) || 915 same_hive) { 916 /* Mappings on the local GPU, or VRAM mappings in the 917 * local hive, or userptr, or GTT mapping can reuse dma map 918 * address space share the original BO 919 */ 920 attachment[i]->type = KFD_MEM_ATT_SHARED; 921 bo[i] = mem->bo; 922 drm_gem_object_get(&bo[i]->tbo.base); 923 } else if (i > 0) { 924 /* Multiple mappings on the same GPU share the BO */ 925 attachment[i]->type = KFD_MEM_ATT_SHARED; 926 bo[i] = bo[0]; 927 drm_gem_object_get(&bo[i]->tbo.base); 928 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { 929 /* Create an SG BO to DMA-map userptrs on other GPUs */ 930 attachment[i]->type = KFD_MEM_ATT_USERPTR; 931 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); 932 if (ret) 933 goto unwind; 934 /* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */ 935 } else if (mem->bo->tbo.type == ttm_bo_type_sg) { 936 WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL || 937 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP), 938 "Handing invalid SG BO in ATTACH request"); 939 attachment[i]->type = KFD_MEM_ATT_SG; 940 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); 941 if (ret) 942 goto unwind; 943 /* Enable acces to GTT and VRAM BOs of peer devices */ 944 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT || 945 mem->domain == AMDGPU_GEM_DOMAIN_VRAM) { 946 attachment[i]->type = KFD_MEM_ATT_DMABUF; 947 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); 948 if (ret) 949 goto unwind; 950 pr_debug("Employ DMABUF mechanism to enable peer GPU access\n"); 951 } else { 952 WARN_ONCE(true, "Handling invalid ATTACH request"); 953 ret = -EINVAL; 954 goto unwind; 955 } 956 957 /* Add BO to VM internal data structures */ 958 ret = amdgpu_bo_reserve(bo[i], false); 959 if (ret) { 960 pr_debug("Unable to reserve BO during memory attach"); 961 goto unwind; 962 } 963 bo_va = amdgpu_vm_bo_find(vm, bo[i]); 964 if (!bo_va) 965 bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); 966 else 967 ++bo_va->ref_count; 968 attachment[i]->bo_va = bo_va; 969 amdgpu_bo_unreserve(bo[i]); 970 if (unlikely(!attachment[i]->bo_va)) { 971 ret = -ENOMEM; 972 pr_err("Failed to add BO object to VM. ret == %d\n", 973 ret); 974 goto unwind; 975 } 976 attachment[i]->va = va; 977 attachment[i]->pte_flags = get_pte_flags(adev, mem); 978 attachment[i]->adev = adev; 979 list_add(&attachment[i]->list, &mem->attachments); 980 981 va += bo_size; 982 } 983 984 return 0; 985 986 unwind: 987 for (; i >= 0; i--) { 988 if (!attachment[i]) 989 continue; 990 if (attachment[i]->bo_va) { 991 amdgpu_bo_reserve(bo[i], true); 992 if (--attachment[i]->bo_va->ref_count == 0) 993 amdgpu_vm_bo_del(adev, attachment[i]->bo_va); 994 amdgpu_bo_unreserve(bo[i]); 995 list_del(&attachment[i]->list); 996 } 997 if (bo[i]) 998 drm_gem_object_put(&bo[i]->tbo.base); 999 kfree(attachment[i]); 1000 } 1001 return ret; 1002 } 1003 1004 static void kfd_mem_detach(struct kfd_mem_attachment *attachment) 1005 { 1006 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 1007 1008 pr_debug("\t remove VA 0x%llx in entry %p\n", 1009 attachment->va, attachment); 1010 if (--attachment->bo_va->ref_count == 0) 1011 amdgpu_vm_bo_del(attachment->adev, attachment->bo_va); 1012 drm_gem_object_put(&bo->tbo.base); 1013 list_del(&attachment->list); 1014 kfree(attachment); 1015 } 1016 1017 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, 1018 struct amdkfd_process_info *process_info, 1019 bool userptr) 1020 { 1021 mutex_lock(&process_info->lock); 1022 if (userptr) 1023 list_add_tail(&mem->validate_list, 1024 &process_info->userptr_valid_list); 1025 else 1026 list_add_tail(&mem->validate_list, &process_info->kfd_bo_list); 1027 mutex_unlock(&process_info->lock); 1028 } 1029 1030 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, 1031 struct amdkfd_process_info *process_info) 1032 { 1033 mutex_lock(&process_info->lock); 1034 list_del(&mem->validate_list); 1035 mutex_unlock(&process_info->lock); 1036 } 1037 1038 /* Initializes user pages. It registers the MMU notifier and validates 1039 * the userptr BO in the GTT domain. 1040 * 1041 * The BO must already be on the userptr_valid_list. Otherwise an 1042 * eviction and restore may happen that leaves the new BO unmapped 1043 * with the user mode queues running. 1044 * 1045 * Takes the process_info->lock to protect against concurrent restore 1046 * workers. 1047 * 1048 * Returns 0 for success, negative errno for errors. 1049 */ 1050 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, 1051 bool criu_resume) 1052 { 1053 struct amdkfd_process_info *process_info = mem->process_info; 1054 struct amdgpu_bo *bo = mem->bo; 1055 struct ttm_operation_ctx ctx = { true, false }; 1056 struct hmm_range *range; 1057 int ret = 0; 1058 1059 mutex_lock(&process_info->lock); 1060 1061 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0); 1062 if (ret) { 1063 pr_err("%s: Failed to set userptr: %d\n", __func__, ret); 1064 goto out; 1065 } 1066 1067 ret = amdgpu_hmm_register(bo, user_addr); 1068 if (ret) { 1069 pr_err("%s: Failed to register MMU notifier: %d\n", 1070 __func__, ret); 1071 goto out; 1072 } 1073 1074 if (criu_resume) { 1075 /* 1076 * During a CRIU restore operation, the userptr buffer objects 1077 * will be validated in the restore_userptr_work worker at a 1078 * later stage when it is scheduled by another ioctl called by 1079 * CRIU master process for the target pid for restore. 1080 */ 1081 mutex_lock(&process_info->notifier_lock); 1082 mem->invalid++; 1083 mutex_unlock(&process_info->notifier_lock); 1084 mutex_unlock(&process_info->lock); 1085 return 0; 1086 } 1087 1088 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range); 1089 if (ret) { 1090 pr_err("%s: Failed to get user pages: %d\n", __func__, ret); 1091 goto unregister_out; 1092 } 1093 1094 ret = amdgpu_bo_reserve(bo, true); 1095 if (ret) { 1096 pr_err("%s: Failed to reserve BO\n", __func__); 1097 goto release_out; 1098 } 1099 amdgpu_bo_placement_from_domain(bo, mem->domain); 1100 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1101 if (ret) 1102 pr_err("%s: failed to validate BO\n", __func__); 1103 amdgpu_bo_unreserve(bo); 1104 1105 release_out: 1106 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); 1107 unregister_out: 1108 if (ret) 1109 amdgpu_hmm_unregister(bo); 1110 out: 1111 mutex_unlock(&process_info->lock); 1112 return ret; 1113 } 1114 1115 /* Reserving a BO and its page table BOs must happen atomically to 1116 * avoid deadlocks. Some operations update multiple VMs at once. Track 1117 * all the reservation info in a context structure. Optionally a sync 1118 * object can track VM updates. 1119 */ 1120 struct bo_vm_reservation_context { 1121 /* DRM execution context for the reservation */ 1122 struct drm_exec exec; 1123 /* Number of VMs reserved */ 1124 unsigned int n_vms; 1125 /* Pointer to sync object */ 1126 struct amdgpu_sync *sync; 1127 }; 1128 1129 enum bo_vm_match { 1130 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */ 1131 BO_VM_MAPPED, /* Match VMs where a BO is mapped */ 1132 BO_VM_ALL, /* Match all VMs a BO was added to */ 1133 }; 1134 1135 /** 1136 * reserve_bo_and_vm - reserve a BO and a VM unconditionally. 1137 * @mem: KFD BO structure. 1138 * @vm: the VM to reserve. 1139 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 1140 */ 1141 static int reserve_bo_and_vm(struct kgd_mem *mem, 1142 struct amdgpu_vm *vm, 1143 struct bo_vm_reservation_context *ctx) 1144 { 1145 struct amdgpu_bo *bo = mem->bo; 1146 int ret; 1147 1148 WARN_ON(!vm); 1149 1150 ctx->n_vms = 1; 1151 ctx->sync = &mem->sync; 1152 drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 1153 drm_exec_until_all_locked(&ctx->exec) { 1154 ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); 1155 drm_exec_retry_on_contention(&ctx->exec); 1156 if (unlikely(ret)) 1157 goto error; 1158 1159 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1); 1160 drm_exec_retry_on_contention(&ctx->exec); 1161 if (unlikely(ret)) 1162 goto error; 1163 } 1164 return 0; 1165 1166 error: 1167 pr_err("Failed to reserve buffers in ttm.\n"); 1168 drm_exec_fini(&ctx->exec); 1169 return ret; 1170 } 1171 1172 /** 1173 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally 1174 * @mem: KFD BO structure. 1175 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO 1176 * is used. Otherwise, a single VM associated with the BO. 1177 * @map_type: the mapping status that will be used to filter the VMs. 1178 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 1179 * 1180 * Returns 0 for success, negative for failure. 1181 */ 1182 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, 1183 struct amdgpu_vm *vm, enum bo_vm_match map_type, 1184 struct bo_vm_reservation_context *ctx) 1185 { 1186 struct kfd_mem_attachment *entry; 1187 struct amdgpu_bo *bo = mem->bo; 1188 int ret; 1189 1190 ctx->sync = &mem->sync; 1191 drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 1192 drm_exec_until_all_locked(&ctx->exec) { 1193 ctx->n_vms = 0; 1194 list_for_each_entry(entry, &mem->attachments, list) { 1195 if ((vm && vm != entry->bo_va->base.vm) || 1196 (entry->is_mapped != map_type 1197 && map_type != BO_VM_ALL)) 1198 continue; 1199 1200 ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm, 1201 &ctx->exec, 2); 1202 drm_exec_retry_on_contention(&ctx->exec); 1203 if (unlikely(ret)) 1204 goto error; 1205 ++ctx->n_vms; 1206 } 1207 1208 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1); 1209 drm_exec_retry_on_contention(&ctx->exec); 1210 if (unlikely(ret)) 1211 goto error; 1212 } 1213 return 0; 1214 1215 error: 1216 pr_err("Failed to reserve buffers in ttm.\n"); 1217 drm_exec_fini(&ctx->exec); 1218 return ret; 1219 } 1220 1221 /** 1222 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context 1223 * @ctx: Reservation context to unreserve 1224 * @wait: Optionally wait for a sync object representing pending VM updates 1225 * @intr: Whether the wait is interruptible 1226 * 1227 * Also frees any resources allocated in 1228 * reserve_bo_and_(cond_)vm(s). Returns the status from 1229 * amdgpu_sync_wait. 1230 */ 1231 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, 1232 bool wait, bool intr) 1233 { 1234 int ret = 0; 1235 1236 if (wait) 1237 ret = amdgpu_sync_wait(ctx->sync, intr); 1238 1239 drm_exec_fini(&ctx->exec); 1240 ctx->sync = NULL; 1241 return ret; 1242 } 1243 1244 static void unmap_bo_from_gpuvm(struct kgd_mem *mem, 1245 struct kfd_mem_attachment *entry, 1246 struct amdgpu_sync *sync) 1247 { 1248 struct amdgpu_bo_va *bo_va = entry->bo_va; 1249 struct amdgpu_device *adev = entry->adev; 1250 struct amdgpu_vm *vm = bo_va->base.vm; 1251 1252 amdgpu_vm_bo_unmap(adev, bo_va, entry->va); 1253 1254 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); 1255 1256 amdgpu_sync_fence(sync, bo_va->last_pt_update); 1257 } 1258 1259 static int update_gpuvm_pte(struct kgd_mem *mem, 1260 struct kfd_mem_attachment *entry, 1261 struct amdgpu_sync *sync) 1262 { 1263 struct amdgpu_bo_va *bo_va = entry->bo_va; 1264 struct amdgpu_device *adev = entry->adev; 1265 int ret; 1266 1267 ret = kfd_mem_dmamap_attachment(mem, entry); 1268 if (ret) 1269 return ret; 1270 1271 /* Update the page tables */ 1272 ret = amdgpu_vm_bo_update(adev, bo_va, false); 1273 if (ret) { 1274 pr_err("amdgpu_vm_bo_update failed\n"); 1275 return ret; 1276 } 1277 1278 return amdgpu_sync_fence(sync, bo_va->last_pt_update); 1279 } 1280 1281 static int map_bo_to_gpuvm(struct kgd_mem *mem, 1282 struct kfd_mem_attachment *entry, 1283 struct amdgpu_sync *sync, 1284 bool no_update_pte) 1285 { 1286 int ret; 1287 1288 /* Set virtual address for the allocation */ 1289 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0, 1290 amdgpu_bo_size(entry->bo_va->base.bo), 1291 entry->pte_flags); 1292 if (ret) { 1293 pr_err("Failed to map VA 0x%llx in vm. ret %d\n", 1294 entry->va, ret); 1295 return ret; 1296 } 1297 1298 if (no_update_pte) 1299 return 0; 1300 1301 ret = update_gpuvm_pte(mem, entry, sync); 1302 if (ret) { 1303 pr_err("update_gpuvm_pte() failed\n"); 1304 goto update_gpuvm_pte_failed; 1305 } 1306 1307 return 0; 1308 1309 update_gpuvm_pte_failed: 1310 unmap_bo_from_gpuvm(mem, entry, sync); 1311 kfd_mem_dmaunmap_attachment(mem, entry); 1312 return ret; 1313 } 1314 1315 static int process_validate_vms(struct amdkfd_process_info *process_info, 1316 struct ww_acquire_ctx *ticket) 1317 { 1318 struct amdgpu_vm *peer_vm; 1319 int ret; 1320 1321 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1322 vm_list_node) { 1323 ret = vm_validate_pt_pd_bos(peer_vm, ticket); 1324 if (ret) 1325 return ret; 1326 } 1327 1328 return 0; 1329 } 1330 1331 static int process_sync_pds_resv(struct amdkfd_process_info *process_info, 1332 struct amdgpu_sync *sync) 1333 { 1334 struct amdgpu_vm *peer_vm; 1335 int ret; 1336 1337 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1338 vm_list_node) { 1339 struct amdgpu_bo *pd = peer_vm->root.bo; 1340 1341 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, 1342 AMDGPU_SYNC_NE_OWNER, 1343 AMDGPU_FENCE_OWNER_KFD); 1344 if (ret) 1345 return ret; 1346 } 1347 1348 return 0; 1349 } 1350 1351 static int process_update_pds(struct amdkfd_process_info *process_info, 1352 struct amdgpu_sync *sync) 1353 { 1354 struct amdgpu_vm *peer_vm; 1355 int ret; 1356 1357 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1358 vm_list_node) { 1359 ret = vm_update_pds(peer_vm, sync); 1360 if (ret) 1361 return ret; 1362 } 1363 1364 return 0; 1365 } 1366 1367 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, 1368 struct dma_fence **ef) 1369 { 1370 struct amdkfd_process_info *info = NULL; 1371 int ret; 1372 1373 if (!*process_info) { 1374 info = kzalloc(sizeof(*info), GFP_KERNEL); 1375 if (!info) 1376 return -ENOMEM; 1377 1378 mutex_init(&info->lock); 1379 mutex_init(&info->notifier_lock); 1380 INIT_LIST_HEAD(&info->vm_list_head); 1381 INIT_LIST_HEAD(&info->kfd_bo_list); 1382 INIT_LIST_HEAD(&info->userptr_valid_list); 1383 INIT_LIST_HEAD(&info->userptr_inval_list); 1384 1385 info->eviction_fence = 1386 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 1387 current->mm, 1388 NULL); 1389 if (!info->eviction_fence) { 1390 pr_err("Failed to create eviction fence\n"); 1391 ret = -ENOMEM; 1392 goto create_evict_fence_fail; 1393 } 1394 1395 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); 1396 INIT_DELAYED_WORK(&info->restore_userptr_work, 1397 amdgpu_amdkfd_restore_userptr_worker); 1398 1399 *process_info = info; 1400 } 1401 1402 vm->process_info = *process_info; 1403 1404 /* Validate page directory and attach eviction fence */ 1405 ret = amdgpu_bo_reserve(vm->root.bo, true); 1406 if (ret) 1407 goto reserve_pd_fail; 1408 ret = vm_validate_pt_pd_bos(vm, NULL); 1409 if (ret) { 1410 pr_err("validate_pt_pd_bos() failed\n"); 1411 goto validate_pd_fail; 1412 } 1413 ret = amdgpu_bo_sync_wait(vm->root.bo, 1414 AMDGPU_FENCE_OWNER_KFD, false); 1415 if (ret) 1416 goto wait_pd_fail; 1417 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1); 1418 if (ret) 1419 goto reserve_shared_fail; 1420 dma_resv_add_fence(vm->root.bo->tbo.base.resv, 1421 &vm->process_info->eviction_fence->base, 1422 DMA_RESV_USAGE_BOOKKEEP); 1423 amdgpu_bo_unreserve(vm->root.bo); 1424 1425 /* Update process info */ 1426 mutex_lock(&vm->process_info->lock); 1427 list_add_tail(&vm->vm_list_node, 1428 &(vm->process_info->vm_list_head)); 1429 vm->process_info->n_vms++; 1430 1431 *ef = dma_fence_get(&vm->process_info->eviction_fence->base); 1432 mutex_unlock(&vm->process_info->lock); 1433 1434 return 0; 1435 1436 reserve_shared_fail: 1437 wait_pd_fail: 1438 validate_pd_fail: 1439 amdgpu_bo_unreserve(vm->root.bo); 1440 reserve_pd_fail: 1441 vm->process_info = NULL; 1442 if (info) { 1443 dma_fence_put(&info->eviction_fence->base); 1444 *process_info = NULL; 1445 put_pid(info->pid); 1446 create_evict_fence_fail: 1447 mutex_destroy(&info->lock); 1448 mutex_destroy(&info->notifier_lock); 1449 kfree(info); 1450 } 1451 return ret; 1452 } 1453 1454 /** 1455 * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria 1456 * @bo: Handle of buffer object being pinned 1457 * @domain: Domain into which BO should be pinned 1458 * 1459 * - USERPTR BOs are UNPINNABLE and will return error 1460 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their 1461 * PIN count incremented. It is valid to PIN a BO multiple times 1462 * 1463 * Return: ZERO if successful in pinning, Non-Zero in case of error. 1464 */ 1465 static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain) 1466 { 1467 int ret = 0; 1468 1469 ret = amdgpu_bo_reserve(bo, false); 1470 if (unlikely(ret)) 1471 return ret; 1472 1473 ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0); 1474 if (ret) 1475 pr_err("Error in Pinning BO to domain: %d\n", domain); 1476 1477 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 1478 amdgpu_bo_unreserve(bo); 1479 1480 return ret; 1481 } 1482 1483 /** 1484 * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria 1485 * @bo: Handle of buffer object being unpinned 1486 * 1487 * - Is a illegal request for USERPTR BOs and is ignored 1488 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their 1489 * PIN count decremented. Calls to UNPIN must balance calls to PIN 1490 */ 1491 static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo) 1492 { 1493 int ret = 0; 1494 1495 ret = amdgpu_bo_reserve(bo, false); 1496 if (unlikely(ret)) 1497 return; 1498 1499 amdgpu_bo_unpin(bo); 1500 amdgpu_bo_unreserve(bo); 1501 } 1502 1503 int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev, 1504 struct amdgpu_vm *avm, u32 pasid) 1505 1506 { 1507 int ret; 1508 1509 /* Free the original amdgpu allocated pasid, 1510 * will be replaced with kfd allocated pasid. 1511 */ 1512 if (avm->pasid) { 1513 amdgpu_pasid_free(avm->pasid); 1514 amdgpu_vm_set_pasid(adev, avm, 0); 1515 } 1516 1517 ret = amdgpu_vm_set_pasid(adev, avm, pasid); 1518 if (ret) 1519 return ret; 1520 1521 return 0; 1522 } 1523 1524 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, 1525 struct amdgpu_vm *avm, 1526 void **process_info, 1527 struct dma_fence **ef) 1528 { 1529 int ret; 1530 1531 /* Already a compute VM? */ 1532 if (avm->process_info) 1533 return -EINVAL; 1534 1535 /* Convert VM into a compute VM */ 1536 ret = amdgpu_vm_make_compute(adev, avm); 1537 if (ret) 1538 return ret; 1539 1540 /* Initialize KFD part of the VM and process info */ 1541 ret = init_kfd_vm(avm, process_info, ef); 1542 if (ret) 1543 return ret; 1544 1545 amdgpu_vm_set_task_info(avm); 1546 1547 return 0; 1548 } 1549 1550 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 1551 struct amdgpu_vm *vm) 1552 { 1553 struct amdkfd_process_info *process_info = vm->process_info; 1554 1555 if (!process_info) 1556 return; 1557 1558 /* Update process info */ 1559 mutex_lock(&process_info->lock); 1560 process_info->n_vms--; 1561 list_del(&vm->vm_list_node); 1562 mutex_unlock(&process_info->lock); 1563 1564 vm->process_info = NULL; 1565 1566 /* Release per-process resources when last compute VM is destroyed */ 1567 if (!process_info->n_vms) { 1568 WARN_ON(!list_empty(&process_info->kfd_bo_list)); 1569 WARN_ON(!list_empty(&process_info->userptr_valid_list)); 1570 WARN_ON(!list_empty(&process_info->userptr_inval_list)); 1571 1572 dma_fence_put(&process_info->eviction_fence->base); 1573 cancel_delayed_work_sync(&process_info->restore_userptr_work); 1574 put_pid(process_info->pid); 1575 mutex_destroy(&process_info->lock); 1576 mutex_destroy(&process_info->notifier_lock); 1577 kfree(process_info); 1578 } 1579 } 1580 1581 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, 1582 void *drm_priv) 1583 { 1584 struct amdgpu_vm *avm; 1585 1586 if (WARN_ON(!adev || !drm_priv)) 1587 return; 1588 1589 avm = drm_priv_to_vm(drm_priv); 1590 1591 pr_debug("Releasing process vm %p\n", avm); 1592 1593 /* The original pasid of amdgpu vm has already been 1594 * released during making a amdgpu vm to a compute vm 1595 * The current pasid is managed by kfd and will be 1596 * released on kfd process destroy. Set amdgpu pasid 1597 * to 0 to avoid duplicate release. 1598 */ 1599 amdgpu_vm_release_compute(adev, avm); 1600 } 1601 1602 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) 1603 { 1604 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1605 struct amdgpu_bo *pd = avm->root.bo; 1606 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 1607 1608 if (adev->asic_type < CHIP_VEGA10) 1609 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; 1610 return avm->pd_phys_addr; 1611 } 1612 1613 void amdgpu_amdkfd_block_mmu_notifications(void *p) 1614 { 1615 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p; 1616 1617 mutex_lock(&pinfo->lock); 1618 WRITE_ONCE(pinfo->block_mmu_notifications, true); 1619 mutex_unlock(&pinfo->lock); 1620 } 1621 1622 int amdgpu_amdkfd_criu_resume(void *p) 1623 { 1624 int ret = 0; 1625 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p; 1626 1627 mutex_lock(&pinfo->lock); 1628 pr_debug("scheduling work\n"); 1629 mutex_lock(&pinfo->notifier_lock); 1630 pinfo->evicted_bos++; 1631 mutex_unlock(&pinfo->notifier_lock); 1632 if (!READ_ONCE(pinfo->block_mmu_notifications)) { 1633 ret = -EINVAL; 1634 goto out_unlock; 1635 } 1636 WRITE_ONCE(pinfo->block_mmu_notifications, false); 1637 queue_delayed_work(system_freezable_wq, 1638 &pinfo->restore_userptr_work, 0); 1639 1640 out_unlock: 1641 mutex_unlock(&pinfo->lock); 1642 return ret; 1643 } 1644 1645 size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, 1646 uint8_t xcp_id) 1647 { 1648 uint64_t reserved_for_pt = 1649 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); 1650 ssize_t available; 1651 uint64_t vram_available, system_mem_available, ttm_mem_available; 1652 1653 spin_lock(&kfd_mem_limit.mem_limit_lock); 1654 vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id) 1655 - adev->kfd.vram_used_aligned[xcp_id] 1656 - atomic64_read(&adev->vram_pin_size) 1657 - reserved_for_pt; 1658 1659 if (adev->gmc.is_app_apu) { 1660 system_mem_available = no_system_mem_limit ? 1661 kfd_mem_limit.max_system_mem_limit : 1662 kfd_mem_limit.max_system_mem_limit - 1663 kfd_mem_limit.system_mem_used; 1664 1665 ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit - 1666 kfd_mem_limit.ttm_mem_used; 1667 1668 available = min3(system_mem_available, ttm_mem_available, 1669 vram_available); 1670 available = ALIGN_DOWN(available, PAGE_SIZE); 1671 } else { 1672 available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN); 1673 } 1674 1675 spin_unlock(&kfd_mem_limit.mem_limit_lock); 1676 1677 if (available < 0) 1678 available = 0; 1679 1680 return available; 1681 } 1682 1683 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 1684 struct amdgpu_device *adev, uint64_t va, uint64_t size, 1685 void *drm_priv, struct kgd_mem **mem, 1686 uint64_t *offset, uint32_t flags, bool criu_resume) 1687 { 1688 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1689 struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm); 1690 enum ttm_bo_type bo_type = ttm_bo_type_device; 1691 struct sg_table *sg = NULL; 1692 uint64_t user_addr = 0; 1693 struct amdgpu_bo *bo; 1694 struct drm_gem_object *gobj = NULL; 1695 u32 domain, alloc_domain; 1696 uint64_t aligned_size; 1697 int8_t xcp_id = -1; 1698 u64 alloc_flags; 1699 int ret; 1700 1701 /* 1702 * Check on which domain to allocate BO 1703 */ 1704 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 1705 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; 1706 1707 if (adev->gmc.is_app_apu) { 1708 domain = AMDGPU_GEM_DOMAIN_GTT; 1709 alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1710 alloc_flags = 0; 1711 } else { 1712 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 1713 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? 1714 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; 1715 1716 /* For contiguous VRAM allocation */ 1717 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS) 1718 alloc_flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1719 } 1720 xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? 1721 0 : fpriv->xcp_id; 1722 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 1723 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1724 alloc_flags = 0; 1725 } else { 1726 domain = AMDGPU_GEM_DOMAIN_GTT; 1727 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1728 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE; 1729 1730 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 1731 if (!offset || !*offset) 1732 return -EINVAL; 1733 user_addr = untagged_addr(*offset); 1734 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1735 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1736 bo_type = ttm_bo_type_sg; 1737 if (size > UINT_MAX) 1738 return -EINVAL; 1739 sg = create_sg_table(*offset, size); 1740 if (!sg) 1741 return -ENOMEM; 1742 } else { 1743 return -EINVAL; 1744 } 1745 } 1746 1747 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT) 1748 alloc_flags |= AMDGPU_GEM_CREATE_COHERENT; 1749 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT) 1750 alloc_flags |= AMDGPU_GEM_CREATE_EXT_COHERENT; 1751 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED) 1752 alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED; 1753 1754 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1755 if (!*mem) { 1756 ret = -ENOMEM; 1757 goto err; 1758 } 1759 INIT_LIST_HEAD(&(*mem)->attachments); 1760 mutex_init(&(*mem)->lock); 1761 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); 1762 1763 /* Workaround for AQL queue wraparound bug. Map the same 1764 * memory twice. That means we only actually allocate half 1765 * the memory. 1766 */ 1767 if ((*mem)->aql_queue) 1768 size >>= 1; 1769 aligned_size = PAGE_ALIGN(size); 1770 1771 (*mem)->alloc_flags = flags; 1772 1773 amdgpu_sync_create(&(*mem)->sync); 1774 1775 ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags, 1776 xcp_id); 1777 if (ret) { 1778 pr_debug("Insufficient memory\n"); 1779 goto err_reserve_limit; 1780 } 1781 1782 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n", 1783 va, (*mem)->aql_queue ? size << 1 : size, 1784 domain_string(alloc_domain), xcp_id); 1785 1786 ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags, 1787 bo_type, NULL, &gobj, xcp_id + 1); 1788 if (ret) { 1789 pr_debug("Failed to create BO on domain %s. ret %d\n", 1790 domain_string(alloc_domain), ret); 1791 goto err_bo_create; 1792 } 1793 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv); 1794 if (ret) { 1795 pr_debug("Failed to allow vma node access. ret %d\n", ret); 1796 goto err_node_allow; 1797 } 1798 ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle); 1799 if (ret) 1800 goto err_gem_handle_create; 1801 bo = gem_to_amdgpu_bo(gobj); 1802 if (bo_type == ttm_bo_type_sg) { 1803 bo->tbo.sg = sg; 1804 bo->tbo.ttm->sg = sg; 1805 } 1806 bo->kfd_bo = *mem; 1807 (*mem)->bo = bo; 1808 if (user_addr) 1809 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO; 1810 1811 (*mem)->va = va; 1812 (*mem)->domain = domain; 1813 (*mem)->mapped_to_gpu_memory = 0; 1814 (*mem)->process_info = avm->process_info; 1815 1816 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); 1817 1818 if (user_addr) { 1819 pr_debug("creating userptr BO for user_addr = %llx\n", user_addr); 1820 ret = init_user_pages(*mem, user_addr, criu_resume); 1821 if (ret) 1822 goto allocate_init_user_pages_failed; 1823 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1824 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1825 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT); 1826 if (ret) { 1827 pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n"); 1828 goto err_pin_bo; 1829 } 1830 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 1831 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 1832 } else { 1833 mutex_lock(&avm->process_info->lock); 1834 if (avm->process_info->eviction_fence && 1835 !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) 1836 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain, 1837 &avm->process_info->eviction_fence->base); 1838 mutex_unlock(&avm->process_info->lock); 1839 if (ret) 1840 goto err_validate_bo; 1841 } 1842 1843 if (offset) 1844 *offset = amdgpu_bo_mmap_offset(bo); 1845 1846 return 0; 1847 1848 allocate_init_user_pages_failed: 1849 err_pin_bo: 1850 err_validate_bo: 1851 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 1852 drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle); 1853 err_gem_handle_create: 1854 drm_vma_node_revoke(&gobj->vma_node, drm_priv); 1855 err_node_allow: 1856 /* Don't unreserve system mem limit twice */ 1857 goto err_reserve_limit; 1858 err_bo_create: 1859 amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id); 1860 err_reserve_limit: 1861 amdgpu_sync_free(&(*mem)->sync); 1862 mutex_destroy(&(*mem)->lock); 1863 if (gobj) 1864 drm_gem_object_put(gobj); 1865 else 1866 kfree(*mem); 1867 err: 1868 if (sg) { 1869 sg_free_table(sg); 1870 kfree(sg); 1871 } 1872 return ret; 1873 } 1874 1875 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 1876 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, 1877 uint64_t *size) 1878 { 1879 struct amdkfd_process_info *process_info = mem->process_info; 1880 unsigned long bo_size = mem->bo->tbo.base.size; 1881 bool use_release_notifier = (mem->bo->kfd_bo == mem); 1882 struct kfd_mem_attachment *entry, *tmp; 1883 struct bo_vm_reservation_context ctx; 1884 unsigned int mapped_to_gpu_memory; 1885 int ret; 1886 bool is_imported = false; 1887 1888 mutex_lock(&mem->lock); 1889 1890 /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */ 1891 if (mem->alloc_flags & 1892 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1893 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1894 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo); 1895 } 1896 1897 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; 1898 is_imported = mem->is_imported; 1899 mutex_unlock(&mem->lock); 1900 /* lock is not needed after this, since mem is unused and will 1901 * be freed anyway 1902 */ 1903 1904 if (mapped_to_gpu_memory > 0) { 1905 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", 1906 mem->va, bo_size); 1907 return -EBUSY; 1908 } 1909 1910 /* Make sure restore workers don't access the BO any more */ 1911 mutex_lock(&process_info->lock); 1912 list_del(&mem->validate_list); 1913 mutex_unlock(&process_info->lock); 1914 1915 /* Cleanup user pages and MMU notifiers */ 1916 if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { 1917 amdgpu_hmm_unregister(mem->bo); 1918 mutex_lock(&process_info->notifier_lock); 1919 amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range); 1920 mutex_unlock(&process_info->notifier_lock); 1921 } 1922 1923 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1924 if (unlikely(ret)) 1925 return ret; 1926 1927 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1928 process_info->eviction_fence); 1929 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, 1930 mem->va + bo_size * (1 + mem->aql_queue)); 1931 1932 /* Remove from VM internal data structures */ 1933 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) { 1934 kfd_mem_dmaunmap_attachment(mem, entry); 1935 kfd_mem_detach(entry); 1936 } 1937 1938 ret = unreserve_bo_and_vms(&ctx, false, false); 1939 1940 /* Free the sync object */ 1941 amdgpu_sync_free(&mem->sync); 1942 1943 /* If the SG is not NULL, it's one we created for a doorbell or mmio 1944 * remap BO. We need to free it. 1945 */ 1946 if (mem->bo->tbo.sg) { 1947 sg_free_table(mem->bo->tbo.sg); 1948 kfree(mem->bo->tbo.sg); 1949 } 1950 1951 /* Update the size of the BO being freed if it was allocated from 1952 * VRAM and is not imported. For APP APU VRAM allocations are done 1953 * in GTT domain 1954 */ 1955 if (size) { 1956 if (!is_imported && 1957 (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || 1958 (adev->gmc.is_app_apu && 1959 mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) 1960 *size = bo_size; 1961 else 1962 *size = 0; 1963 } 1964 1965 /* Free the BO*/ 1966 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); 1967 drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle); 1968 if (mem->dmabuf) { 1969 dma_buf_put(mem->dmabuf); 1970 mem->dmabuf = NULL; 1971 } 1972 mutex_destroy(&mem->lock); 1973 1974 /* If this releases the last reference, it will end up calling 1975 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why 1976 * this needs to be the last call here. 1977 */ 1978 drm_gem_object_put(&mem->bo->tbo.base); 1979 1980 /* 1981 * For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(), 1982 * explicitly free it here. 1983 */ 1984 if (!use_release_notifier) 1985 kfree(mem); 1986 1987 return ret; 1988 } 1989 1990 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1991 struct amdgpu_device *adev, struct kgd_mem *mem, 1992 void *drm_priv) 1993 { 1994 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1995 int ret; 1996 struct amdgpu_bo *bo; 1997 uint32_t domain; 1998 struct kfd_mem_attachment *entry; 1999 struct bo_vm_reservation_context ctx; 2000 unsigned long bo_size; 2001 bool is_invalid_userptr = false; 2002 2003 bo = mem->bo; 2004 if (!bo) { 2005 pr_err("Invalid BO when mapping memory to GPU\n"); 2006 return -EINVAL; 2007 } 2008 2009 /* Make sure restore is not running concurrently. Since we 2010 * don't map invalid userptr BOs, we rely on the next restore 2011 * worker to do the mapping 2012 */ 2013 mutex_lock(&mem->process_info->lock); 2014 2015 /* Lock notifier lock. If we find an invalid userptr BO, we can be 2016 * sure that the MMU notifier is no longer running 2017 * concurrently and the queues are actually stopped 2018 */ 2019 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 2020 mutex_lock(&mem->process_info->notifier_lock); 2021 is_invalid_userptr = !!mem->invalid; 2022 mutex_unlock(&mem->process_info->notifier_lock); 2023 } 2024 2025 mutex_lock(&mem->lock); 2026 2027 domain = mem->domain; 2028 bo_size = bo->tbo.base.size; 2029 2030 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", 2031 mem->va, 2032 mem->va + bo_size * (1 + mem->aql_queue), 2033 avm, domain_string(domain)); 2034 2035 if (!kfd_mem_is_attached(avm, mem)) { 2036 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); 2037 if (ret) 2038 goto out; 2039 } 2040 2041 ret = reserve_bo_and_vm(mem, avm, &ctx); 2042 if (unlikely(ret)) 2043 goto out; 2044 2045 /* Userptr can be marked as "not invalid", but not actually be 2046 * validated yet (still in the system domain). In that case 2047 * the queues are still stopped and we can leave mapping for 2048 * the next restore worker 2049 */ 2050 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && 2051 bo->tbo.resource->mem_type == TTM_PL_SYSTEM) 2052 is_invalid_userptr = true; 2053 2054 ret = vm_validate_pt_pd_bos(avm, NULL); 2055 if (unlikely(ret)) 2056 goto out_unreserve; 2057 2058 list_for_each_entry(entry, &mem->attachments, list) { 2059 if (entry->bo_va->base.vm != avm || entry->is_mapped) 2060 continue; 2061 2062 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", 2063 entry->va, entry->va + bo_size, entry); 2064 2065 ret = map_bo_to_gpuvm(mem, entry, ctx.sync, 2066 is_invalid_userptr); 2067 if (ret) { 2068 pr_err("Failed to map bo to gpuvm\n"); 2069 goto out_unreserve; 2070 } 2071 2072 ret = vm_update_pds(avm, ctx.sync); 2073 if (ret) { 2074 pr_err("Failed to update page directories\n"); 2075 goto out_unreserve; 2076 } 2077 2078 entry->is_mapped = true; 2079 mem->mapped_to_gpu_memory++; 2080 pr_debug("\t INC mapping count %d\n", 2081 mem->mapped_to_gpu_memory); 2082 } 2083 2084 ret = unreserve_bo_and_vms(&ctx, false, false); 2085 2086 goto out; 2087 2088 out_unreserve: 2089 unreserve_bo_and_vms(&ctx, false, false); 2090 out: 2091 mutex_unlock(&mem->process_info->lock); 2092 mutex_unlock(&mem->lock); 2093 return ret; 2094 } 2095 2096 int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) 2097 { 2098 struct kfd_mem_attachment *entry; 2099 struct amdgpu_vm *vm; 2100 int ret; 2101 2102 vm = drm_priv_to_vm(drm_priv); 2103 2104 mutex_lock(&mem->lock); 2105 2106 ret = amdgpu_bo_reserve(mem->bo, true); 2107 if (ret) 2108 goto out; 2109 2110 list_for_each_entry(entry, &mem->attachments, list) { 2111 if (entry->bo_va->base.vm != vm) 2112 continue; 2113 if (entry->bo_va->base.bo->tbo.ttm && 2114 !entry->bo_va->base.bo->tbo.ttm->sg) 2115 continue; 2116 2117 kfd_mem_dmaunmap_attachment(mem, entry); 2118 } 2119 2120 amdgpu_bo_unreserve(mem->bo); 2121 out: 2122 mutex_unlock(&mem->lock); 2123 2124 return ret; 2125 } 2126 2127 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 2128 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) 2129 { 2130 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 2131 unsigned long bo_size = mem->bo->tbo.base.size; 2132 struct kfd_mem_attachment *entry; 2133 struct bo_vm_reservation_context ctx; 2134 int ret; 2135 2136 mutex_lock(&mem->lock); 2137 2138 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); 2139 if (unlikely(ret)) 2140 goto out; 2141 /* If no VMs were reserved, it means the BO wasn't actually mapped */ 2142 if (ctx.n_vms == 0) { 2143 ret = -EINVAL; 2144 goto unreserve_out; 2145 } 2146 2147 ret = vm_validate_pt_pd_bos(avm, NULL); 2148 if (unlikely(ret)) 2149 goto unreserve_out; 2150 2151 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", 2152 mem->va, 2153 mem->va + bo_size * (1 + mem->aql_queue), 2154 avm); 2155 2156 list_for_each_entry(entry, &mem->attachments, list) { 2157 if (entry->bo_va->base.vm != avm || !entry->is_mapped) 2158 continue; 2159 2160 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", 2161 entry->va, entry->va + bo_size, entry); 2162 2163 unmap_bo_from_gpuvm(mem, entry, ctx.sync); 2164 entry->is_mapped = false; 2165 2166 mem->mapped_to_gpu_memory--; 2167 pr_debug("\t DEC mapping count %d\n", 2168 mem->mapped_to_gpu_memory); 2169 } 2170 2171 unreserve_out: 2172 unreserve_bo_and_vms(&ctx, false, false); 2173 out: 2174 mutex_unlock(&mem->lock); 2175 return ret; 2176 } 2177 2178 int amdgpu_amdkfd_gpuvm_sync_memory( 2179 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr) 2180 { 2181 struct amdgpu_sync sync; 2182 int ret; 2183 2184 amdgpu_sync_create(&sync); 2185 2186 mutex_lock(&mem->lock); 2187 amdgpu_sync_clone(&mem->sync, &sync); 2188 mutex_unlock(&mem->lock); 2189 2190 ret = amdgpu_sync_wait(&sync, intr); 2191 amdgpu_sync_free(&sync); 2192 return ret; 2193 } 2194 2195 /** 2196 * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count 2197 * @bo: Buffer object to be mapped 2198 * 2199 * Before return, bo reference count is incremented. To release the reference and unpin/ 2200 * unmap the BO, call amdgpu_amdkfd_free_gtt_mem. 2201 */ 2202 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo) 2203 { 2204 int ret; 2205 2206 ret = amdgpu_bo_reserve(bo, true); 2207 if (ret) { 2208 pr_err("Failed to reserve bo. ret %d\n", ret); 2209 goto err_reserve_bo_failed; 2210 } 2211 2212 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 2213 if (ret) { 2214 pr_err("Failed to pin bo. ret %d\n", ret); 2215 goto err_pin_bo_failed; 2216 } 2217 2218 ret = amdgpu_ttm_alloc_gart(&bo->tbo); 2219 if (ret) { 2220 pr_err("Failed to bind bo to GART. ret %d\n", ret); 2221 goto err_map_bo_gart_failed; 2222 } 2223 2224 amdgpu_amdkfd_remove_eviction_fence( 2225 bo, bo->vm_bo->vm->process_info->eviction_fence); 2226 2227 amdgpu_bo_unreserve(bo); 2228 2229 bo = amdgpu_bo_ref(bo); 2230 2231 return 0; 2232 2233 err_map_bo_gart_failed: 2234 amdgpu_bo_unpin(bo); 2235 err_pin_bo_failed: 2236 amdgpu_bo_unreserve(bo); 2237 err_reserve_bo_failed: 2238 2239 return ret; 2240 } 2241 2242 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access 2243 * 2244 * @mem: Buffer object to be mapped for CPU access 2245 * @kptr[out]: pointer in kernel CPU address space 2246 * @size[out]: size of the buffer 2247 * 2248 * Pins the BO and maps it for kernel CPU access. The eviction fence is removed 2249 * from the BO, since pinned BOs cannot be evicted. The bo must remain on the 2250 * validate_list, so the GPU mapping can be restored after a page table was 2251 * evicted. 2252 * 2253 * Return: 0 on success, error code on failure 2254 */ 2255 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, 2256 void **kptr, uint64_t *size) 2257 { 2258 int ret; 2259 struct amdgpu_bo *bo = mem->bo; 2260 2261 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 2262 pr_err("userptr can't be mapped to kernel\n"); 2263 return -EINVAL; 2264 } 2265 2266 mutex_lock(&mem->process_info->lock); 2267 2268 ret = amdgpu_bo_reserve(bo, true); 2269 if (ret) { 2270 pr_err("Failed to reserve bo. ret %d\n", ret); 2271 goto bo_reserve_failed; 2272 } 2273 2274 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 2275 if (ret) { 2276 pr_err("Failed to pin bo. ret %d\n", ret); 2277 goto pin_failed; 2278 } 2279 2280 ret = amdgpu_bo_kmap(bo, kptr); 2281 if (ret) { 2282 pr_err("Failed to map bo to kernel. ret %d\n", ret); 2283 goto kmap_failed; 2284 } 2285 2286 amdgpu_amdkfd_remove_eviction_fence( 2287 bo, mem->process_info->eviction_fence); 2288 2289 if (size) 2290 *size = amdgpu_bo_size(bo); 2291 2292 amdgpu_bo_unreserve(bo); 2293 2294 mutex_unlock(&mem->process_info->lock); 2295 return 0; 2296 2297 kmap_failed: 2298 amdgpu_bo_unpin(bo); 2299 pin_failed: 2300 amdgpu_bo_unreserve(bo); 2301 bo_reserve_failed: 2302 mutex_unlock(&mem->process_info->lock); 2303 2304 return ret; 2305 } 2306 2307 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access 2308 * 2309 * @mem: Buffer object to be unmapped for CPU access 2310 * 2311 * Removes the kernel CPU mapping and unpins the BO. It does not restore the 2312 * eviction fence, so this function should only be used for cleanup before the 2313 * BO is destroyed. 2314 */ 2315 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem) 2316 { 2317 struct amdgpu_bo *bo = mem->bo; 2318 2319 amdgpu_bo_reserve(bo, true); 2320 amdgpu_bo_kunmap(bo); 2321 amdgpu_bo_unpin(bo); 2322 amdgpu_bo_unreserve(bo); 2323 } 2324 2325 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, 2326 struct kfd_vm_fault_info *mem) 2327 { 2328 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { 2329 *mem = *adev->gmc.vm_fault_info; 2330 mb(); /* make sure read happened */ 2331 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 2332 } 2333 return 0; 2334 } 2335 2336 static int import_obj_create(struct amdgpu_device *adev, 2337 struct dma_buf *dma_buf, 2338 struct drm_gem_object *obj, 2339 uint64_t va, void *drm_priv, 2340 struct kgd_mem **mem, uint64_t *size, 2341 uint64_t *mmap_offset) 2342 { 2343 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 2344 struct amdgpu_bo *bo; 2345 int ret; 2346 2347 bo = gem_to_amdgpu_bo(obj); 2348 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | 2349 AMDGPU_GEM_DOMAIN_GTT))) 2350 /* Only VRAM and GTT BOs are supported */ 2351 return -EINVAL; 2352 2353 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 2354 if (!*mem) 2355 return -ENOMEM; 2356 2357 ret = drm_vma_node_allow(&obj->vma_node, drm_priv); 2358 if (ret) 2359 goto err_free_mem; 2360 2361 if (size) 2362 *size = amdgpu_bo_size(bo); 2363 2364 if (mmap_offset) 2365 *mmap_offset = amdgpu_bo_mmap_offset(bo); 2366 2367 INIT_LIST_HEAD(&(*mem)->attachments); 2368 mutex_init(&(*mem)->lock); 2369 2370 (*mem)->alloc_flags = 2371 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 2372 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) 2373 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE 2374 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 2375 2376 get_dma_buf(dma_buf); 2377 (*mem)->dmabuf = dma_buf; 2378 (*mem)->bo = bo; 2379 (*mem)->va = va; 2380 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && !adev->gmc.is_app_apu ? 2381 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; 2382 2383 (*mem)->mapped_to_gpu_memory = 0; 2384 (*mem)->process_info = avm->process_info; 2385 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); 2386 amdgpu_sync_create(&(*mem)->sync); 2387 (*mem)->is_imported = true; 2388 2389 mutex_lock(&avm->process_info->lock); 2390 if (avm->process_info->eviction_fence && 2391 !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) 2392 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain, 2393 &avm->process_info->eviction_fence->base); 2394 mutex_unlock(&avm->process_info->lock); 2395 if (ret) 2396 goto err_remove_mem; 2397 2398 return 0; 2399 2400 err_remove_mem: 2401 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 2402 drm_vma_node_revoke(&obj->vma_node, drm_priv); 2403 err_free_mem: 2404 kfree(*mem); 2405 return ret; 2406 } 2407 2408 int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd, 2409 uint64_t va, void *drm_priv, 2410 struct kgd_mem **mem, uint64_t *size, 2411 uint64_t *mmap_offset) 2412 { 2413 struct drm_gem_object *obj; 2414 uint32_t handle; 2415 int ret; 2416 2417 ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd, 2418 &handle); 2419 if (ret) 2420 return ret; 2421 obj = drm_gem_object_lookup(adev->kfd.client.file, handle); 2422 if (!obj) { 2423 ret = -EINVAL; 2424 goto err_release_handle; 2425 } 2426 2427 ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size, 2428 mmap_offset); 2429 if (ret) 2430 goto err_put_obj; 2431 2432 (*mem)->gem_handle = handle; 2433 2434 return 0; 2435 2436 err_put_obj: 2437 drm_gem_object_put(obj); 2438 err_release_handle: 2439 drm_gem_handle_delete(adev->kfd.client.file, handle); 2440 return ret; 2441 } 2442 2443 int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, 2444 struct dma_buf **dma_buf) 2445 { 2446 int ret; 2447 2448 mutex_lock(&mem->lock); 2449 ret = kfd_mem_export_dmabuf(mem); 2450 if (ret) 2451 goto out; 2452 2453 get_dma_buf(mem->dmabuf); 2454 *dma_buf = mem->dmabuf; 2455 out: 2456 mutex_unlock(&mem->lock); 2457 return ret; 2458 } 2459 2460 /* Evict a userptr BO by stopping the queues if necessary 2461 * 2462 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it 2463 * cannot do any memory allocations, and cannot take any locks that 2464 * are held elsewhere while allocating memory. 2465 * 2466 * It doesn't do anything to the BO itself. The real work happens in 2467 * restore, where we get updated page addresses. This function only 2468 * ensures that GPU access to the BO is stopped. 2469 */ 2470 int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, 2471 unsigned long cur_seq, struct kgd_mem *mem) 2472 { 2473 struct amdkfd_process_info *process_info = mem->process_info; 2474 int r = 0; 2475 2476 /* Do not process MMU notifications during CRIU restore until 2477 * KFD_CRIU_OP_RESUME IOCTL is received 2478 */ 2479 if (READ_ONCE(process_info->block_mmu_notifications)) 2480 return 0; 2481 2482 mutex_lock(&process_info->notifier_lock); 2483 mmu_interval_set_seq(mni, cur_seq); 2484 2485 mem->invalid++; 2486 if (++process_info->evicted_bos == 1) { 2487 /* First eviction, stop the queues */ 2488 r = kgd2kfd_quiesce_mm(mni->mm, 2489 KFD_QUEUE_EVICTION_TRIGGER_USERPTR); 2490 if (r) 2491 pr_err("Failed to quiesce KFD\n"); 2492 queue_delayed_work(system_freezable_wq, 2493 &process_info->restore_userptr_work, 2494 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2495 } 2496 mutex_unlock(&process_info->notifier_lock); 2497 2498 return r; 2499 } 2500 2501 /* Update invalid userptr BOs 2502 * 2503 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to 2504 * userptr_inval_list and updates user pages for all BOs that have 2505 * been invalidated since their last update. 2506 */ 2507 static int update_invalid_user_pages(struct amdkfd_process_info *process_info, 2508 struct mm_struct *mm) 2509 { 2510 struct kgd_mem *mem, *tmp_mem; 2511 struct amdgpu_bo *bo; 2512 struct ttm_operation_ctx ctx = { false, false }; 2513 uint32_t invalid; 2514 int ret = 0; 2515 2516 mutex_lock(&process_info->notifier_lock); 2517 2518 /* Move all invalidated BOs to the userptr_inval_list */ 2519 list_for_each_entry_safe(mem, tmp_mem, 2520 &process_info->userptr_valid_list, 2521 validate_list) 2522 if (mem->invalid) 2523 list_move_tail(&mem->validate_list, 2524 &process_info->userptr_inval_list); 2525 2526 /* Go through userptr_inval_list and update any invalid user_pages */ 2527 list_for_each_entry(mem, &process_info->userptr_inval_list, 2528 validate_list) { 2529 invalid = mem->invalid; 2530 if (!invalid) 2531 /* BO hasn't been invalidated since the last 2532 * revalidation attempt. Keep its page list. 2533 */ 2534 continue; 2535 2536 bo = mem->bo; 2537 2538 amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range); 2539 mem->range = NULL; 2540 2541 /* BO reservations and getting user pages (hmm_range_fault) 2542 * must happen outside the notifier lock 2543 */ 2544 mutex_unlock(&process_info->notifier_lock); 2545 2546 /* Move the BO to system (CPU) domain if necessary to unmap 2547 * and free the SG table 2548 */ 2549 if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) { 2550 if (amdgpu_bo_reserve(bo, true)) 2551 return -EAGAIN; 2552 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 2553 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 2554 amdgpu_bo_unreserve(bo); 2555 if (ret) { 2556 pr_err("%s: Failed to invalidate userptr BO\n", 2557 __func__); 2558 return -EAGAIN; 2559 } 2560 } 2561 2562 /* Get updated user pages */ 2563 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, 2564 &mem->range); 2565 if (ret) { 2566 pr_debug("Failed %d to get user pages\n", ret); 2567 2568 /* Return -EFAULT bad address error as success. It will 2569 * fail later with a VM fault if the GPU tries to access 2570 * it. Better than hanging indefinitely with stalled 2571 * user mode queues. 2572 * 2573 * Return other error -EBUSY or -ENOMEM to retry restore 2574 */ 2575 if (ret != -EFAULT) 2576 return ret; 2577 2578 ret = 0; 2579 } 2580 2581 mutex_lock(&process_info->notifier_lock); 2582 2583 /* Mark the BO as valid unless it was invalidated 2584 * again concurrently. 2585 */ 2586 if (mem->invalid != invalid) { 2587 ret = -EAGAIN; 2588 goto unlock_out; 2589 } 2590 /* set mem valid if mem has hmm range associated */ 2591 if (mem->range) 2592 mem->invalid = 0; 2593 } 2594 2595 unlock_out: 2596 mutex_unlock(&process_info->notifier_lock); 2597 2598 return ret; 2599 } 2600 2601 /* Validate invalid userptr BOs 2602 * 2603 * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables 2604 * with new page addresses and waits for the page table updates to complete. 2605 */ 2606 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) 2607 { 2608 struct ttm_operation_ctx ctx = { false, false }; 2609 struct amdgpu_sync sync; 2610 struct drm_exec exec; 2611 2612 struct amdgpu_vm *peer_vm; 2613 struct kgd_mem *mem, *tmp_mem; 2614 struct amdgpu_bo *bo; 2615 int ret; 2616 2617 amdgpu_sync_create(&sync); 2618 2619 drm_exec_init(&exec, 0, 0); 2620 /* Reserve all BOs and page tables for validation */ 2621 drm_exec_until_all_locked(&exec) { 2622 /* Reserve all the page directories */ 2623 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2624 vm_list_node) { 2625 ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2); 2626 drm_exec_retry_on_contention(&exec); 2627 if (unlikely(ret)) 2628 goto unreserve_out; 2629 } 2630 2631 /* Reserve the userptr_inval_list entries to resv_list */ 2632 list_for_each_entry(mem, &process_info->userptr_inval_list, 2633 validate_list) { 2634 struct drm_gem_object *gobj; 2635 2636 gobj = &mem->bo->tbo.base; 2637 ret = drm_exec_prepare_obj(&exec, gobj, 1); 2638 drm_exec_retry_on_contention(&exec); 2639 if (unlikely(ret)) 2640 goto unreserve_out; 2641 } 2642 } 2643 2644 ret = process_validate_vms(process_info, NULL); 2645 if (ret) 2646 goto unreserve_out; 2647 2648 /* Validate BOs and update GPUVM page tables */ 2649 list_for_each_entry_safe(mem, tmp_mem, 2650 &process_info->userptr_inval_list, 2651 validate_list) { 2652 struct kfd_mem_attachment *attachment; 2653 2654 bo = mem->bo; 2655 2656 /* Validate the BO if we got user pages */ 2657 if (bo->tbo.ttm->pages[0]) { 2658 amdgpu_bo_placement_from_domain(bo, mem->domain); 2659 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 2660 if (ret) { 2661 pr_err("%s: failed to validate BO\n", __func__); 2662 goto unreserve_out; 2663 } 2664 } 2665 2666 /* Update mapping. If the BO was not validated 2667 * (because we couldn't get user pages), this will 2668 * clear the page table entries, which will result in 2669 * VM faults if the GPU tries to access the invalid 2670 * memory. 2671 */ 2672 list_for_each_entry(attachment, &mem->attachments, list) { 2673 if (!attachment->is_mapped) 2674 continue; 2675 2676 kfd_mem_dmaunmap_attachment(mem, attachment); 2677 ret = update_gpuvm_pte(mem, attachment, &sync); 2678 if (ret) { 2679 pr_err("%s: update PTE failed\n", __func__); 2680 /* make sure this gets validated again */ 2681 mutex_lock(&process_info->notifier_lock); 2682 mem->invalid++; 2683 mutex_unlock(&process_info->notifier_lock); 2684 goto unreserve_out; 2685 } 2686 } 2687 } 2688 2689 /* Update page directories */ 2690 ret = process_update_pds(process_info, &sync); 2691 2692 unreserve_out: 2693 drm_exec_fini(&exec); 2694 amdgpu_sync_wait(&sync, false); 2695 amdgpu_sync_free(&sync); 2696 2697 return ret; 2698 } 2699 2700 /* Confirm that all user pages are valid while holding the notifier lock 2701 * 2702 * Moves valid BOs from the userptr_inval_list back to userptr_val_list. 2703 */ 2704 static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info) 2705 { 2706 struct kgd_mem *mem, *tmp_mem; 2707 int ret = 0; 2708 2709 list_for_each_entry_safe(mem, tmp_mem, 2710 &process_info->userptr_inval_list, 2711 validate_list) { 2712 bool valid; 2713 2714 /* keep mem without hmm range at userptr_inval_list */ 2715 if (!mem->range) 2716 continue; 2717 2718 /* Only check mem with hmm range associated */ 2719 valid = amdgpu_ttm_tt_get_user_pages_done( 2720 mem->bo->tbo.ttm, mem->range); 2721 2722 mem->range = NULL; 2723 if (!valid) { 2724 WARN(!mem->invalid, "Invalid BO not marked invalid"); 2725 ret = -EAGAIN; 2726 continue; 2727 } 2728 2729 if (mem->invalid) { 2730 WARN(1, "Valid BO is marked invalid"); 2731 ret = -EAGAIN; 2732 continue; 2733 } 2734 2735 list_move_tail(&mem->validate_list, 2736 &process_info->userptr_valid_list); 2737 } 2738 2739 return ret; 2740 } 2741 2742 /* Worker callback to restore evicted userptr BOs 2743 * 2744 * Tries to update and validate all userptr BOs. If successful and no 2745 * concurrent evictions happened, the queues are restarted. Otherwise, 2746 * reschedule for another attempt later. 2747 */ 2748 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) 2749 { 2750 struct delayed_work *dwork = to_delayed_work(work); 2751 struct amdkfd_process_info *process_info = 2752 container_of(dwork, struct amdkfd_process_info, 2753 restore_userptr_work); 2754 struct task_struct *usertask; 2755 struct mm_struct *mm; 2756 uint32_t evicted_bos; 2757 2758 mutex_lock(&process_info->notifier_lock); 2759 evicted_bos = process_info->evicted_bos; 2760 mutex_unlock(&process_info->notifier_lock); 2761 if (!evicted_bos) 2762 return; 2763 2764 /* Reference task and mm in case of concurrent process termination */ 2765 usertask = get_pid_task(process_info->pid, PIDTYPE_PID); 2766 if (!usertask) 2767 return; 2768 mm = get_task_mm(usertask); 2769 if (!mm) { 2770 put_task_struct(usertask); 2771 return; 2772 } 2773 2774 mutex_lock(&process_info->lock); 2775 2776 if (update_invalid_user_pages(process_info, mm)) 2777 goto unlock_out; 2778 /* userptr_inval_list can be empty if all evicted userptr BOs 2779 * have been freed. In that case there is nothing to validate 2780 * and we can just restart the queues. 2781 */ 2782 if (!list_empty(&process_info->userptr_inval_list)) { 2783 if (validate_invalid_user_pages(process_info)) 2784 goto unlock_out; 2785 } 2786 /* Final check for concurrent evicton and atomic update. If 2787 * another eviction happens after successful update, it will 2788 * be a first eviction that calls quiesce_mm. The eviction 2789 * reference counting inside KFD will handle this case. 2790 */ 2791 mutex_lock(&process_info->notifier_lock); 2792 if (process_info->evicted_bos != evicted_bos) 2793 goto unlock_notifier_out; 2794 2795 if (confirm_valid_user_pages_locked(process_info)) { 2796 WARN(1, "User pages unexpectedly invalid"); 2797 goto unlock_notifier_out; 2798 } 2799 2800 process_info->evicted_bos = evicted_bos = 0; 2801 2802 if (kgd2kfd_resume_mm(mm)) { 2803 pr_err("%s: Failed to resume KFD\n", __func__); 2804 /* No recovery from this failure. Probably the CP is 2805 * hanging. No point trying again. 2806 */ 2807 } 2808 2809 unlock_notifier_out: 2810 mutex_unlock(&process_info->notifier_lock); 2811 unlock_out: 2812 mutex_unlock(&process_info->lock); 2813 2814 /* If validation failed, reschedule another attempt */ 2815 if (evicted_bos) { 2816 queue_delayed_work(system_freezable_wq, 2817 &process_info->restore_userptr_work, 2818 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2819 2820 kfd_smi_event_queue_restore_rescheduled(mm); 2821 } 2822 mmput(mm); 2823 put_task_struct(usertask); 2824 } 2825 2826 static void replace_eviction_fence(struct dma_fence __rcu **ef, 2827 struct dma_fence *new_ef) 2828 { 2829 struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true 2830 /* protected by process_info->lock */); 2831 2832 /* If we're replacing an unsignaled eviction fence, that fence will 2833 * never be signaled, and if anyone is still waiting on that fence, 2834 * they will hang forever. This should never happen. We should only 2835 * replace the fence in restore_work that only gets scheduled after 2836 * eviction work signaled the fence. 2837 */ 2838 WARN_ONCE(!dma_fence_is_signaled(old_ef), 2839 "Replacing unsignaled eviction fence"); 2840 dma_fence_put(old_ef); 2841 } 2842 2843 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given 2844 * KFD process identified by process_info 2845 * 2846 * @process_info: amdkfd_process_info of the KFD process 2847 * 2848 * After memory eviction, restore thread calls this function. The function 2849 * should be called when the Process is still valid. BO restore involves - 2850 * 2851 * 1. Release old eviction fence and create new one 2852 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list. 2853 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of 2854 * BOs that need to be reserved. 2855 * 4. Reserve all the BOs 2856 * 5. Validate of PD and PT BOs. 2857 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence 2858 * 7. Add fence to all PD and PT BOs. 2859 * 8. Unreserve all BOs 2860 */ 2861 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef) 2862 { 2863 struct amdkfd_process_info *process_info = info; 2864 struct amdgpu_vm *peer_vm; 2865 struct kgd_mem *mem; 2866 struct list_head duplicate_save; 2867 struct amdgpu_sync sync_obj; 2868 unsigned long failed_size = 0; 2869 unsigned long total_size = 0; 2870 struct drm_exec exec; 2871 int ret; 2872 2873 INIT_LIST_HEAD(&duplicate_save); 2874 2875 mutex_lock(&process_info->lock); 2876 2877 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 2878 drm_exec_until_all_locked(&exec) { 2879 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2880 vm_list_node) { 2881 ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2); 2882 drm_exec_retry_on_contention(&exec); 2883 if (unlikely(ret)) { 2884 pr_err("Locking VM PD failed, ret: %d\n", ret); 2885 goto ttm_reserve_fail; 2886 } 2887 } 2888 2889 /* Reserve all BOs and page tables/directory. Add all BOs from 2890 * kfd_bo_list to ctx.list 2891 */ 2892 list_for_each_entry(mem, &process_info->kfd_bo_list, 2893 validate_list) { 2894 struct drm_gem_object *gobj; 2895 2896 gobj = &mem->bo->tbo.base; 2897 ret = drm_exec_prepare_obj(&exec, gobj, 1); 2898 drm_exec_retry_on_contention(&exec); 2899 if (unlikely(ret)) { 2900 pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret); 2901 goto ttm_reserve_fail; 2902 } 2903 } 2904 } 2905 2906 amdgpu_sync_create(&sync_obj); 2907 2908 /* Validate BOs managed by KFD */ 2909 list_for_each_entry(mem, &process_info->kfd_bo_list, 2910 validate_list) { 2911 2912 struct amdgpu_bo *bo = mem->bo; 2913 uint32_t domain = mem->domain; 2914 struct dma_resv_iter cursor; 2915 struct dma_fence *fence; 2916 2917 total_size += amdgpu_bo_size(bo); 2918 2919 ret = amdgpu_amdkfd_bo_validate(bo, domain, false); 2920 if (ret) { 2921 pr_debug("Memory eviction: Validate BOs failed\n"); 2922 failed_size += amdgpu_bo_size(bo); 2923 ret = amdgpu_amdkfd_bo_validate(bo, 2924 AMDGPU_GEM_DOMAIN_GTT, false); 2925 if (ret) { 2926 pr_debug("Memory eviction: Try again\n"); 2927 goto validate_map_fail; 2928 } 2929 } 2930 dma_resv_for_each_fence(&cursor, bo->tbo.base.resv, 2931 DMA_RESV_USAGE_KERNEL, fence) { 2932 ret = amdgpu_sync_fence(&sync_obj, fence); 2933 if (ret) { 2934 pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); 2935 goto validate_map_fail; 2936 } 2937 } 2938 } 2939 2940 if (failed_size) 2941 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); 2942 2943 /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO 2944 * validations above would invalidate DMABuf imports again. 2945 */ 2946 ret = process_validate_vms(process_info, &exec.ticket); 2947 if (ret) { 2948 pr_debug("Validating VMs failed, ret: %d\n", ret); 2949 goto validate_map_fail; 2950 } 2951 2952 /* Update mappings managed by KFD. */ 2953 list_for_each_entry(mem, &process_info->kfd_bo_list, 2954 validate_list) { 2955 struct kfd_mem_attachment *attachment; 2956 2957 list_for_each_entry(attachment, &mem->attachments, list) { 2958 if (!attachment->is_mapped) 2959 continue; 2960 2961 if (attachment->bo_va->base.bo->tbo.pin_count) 2962 continue; 2963 2964 kfd_mem_dmaunmap_attachment(mem, attachment); 2965 ret = update_gpuvm_pte(mem, attachment, &sync_obj); 2966 if (ret) { 2967 pr_debug("Memory eviction: update PTE failed. Try again\n"); 2968 goto validate_map_fail; 2969 } 2970 } 2971 } 2972 2973 /* Update mappings not managed by KFD */ 2974 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2975 vm_list_node) { 2976 struct amdgpu_device *adev = amdgpu_ttm_adev( 2977 peer_vm->root.bo->tbo.bdev); 2978 2979 ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket); 2980 if (ret) { 2981 pr_debug("Memory eviction: handle moved failed. Try again\n"); 2982 goto validate_map_fail; 2983 } 2984 } 2985 2986 /* Update page directories */ 2987 ret = process_update_pds(process_info, &sync_obj); 2988 if (ret) { 2989 pr_debug("Memory eviction: update PDs failed. Try again\n"); 2990 goto validate_map_fail; 2991 } 2992 2993 /* Sync with fences on all the page tables. They implicitly depend on any 2994 * move fences from amdgpu_vm_handle_moved above. 2995 */ 2996 ret = process_sync_pds_resv(process_info, &sync_obj); 2997 if (ret) { 2998 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); 2999 goto validate_map_fail; 3000 } 3001 3002 /* Wait for validate and PT updates to finish */ 3003 amdgpu_sync_wait(&sync_obj, false); 3004 3005 /* The old eviction fence may be unsignaled if restore happens 3006 * after a GPU reset or suspend/resume. Keep the old fence in that 3007 * case. Otherwise release the old eviction fence and create new 3008 * one, because fence only goes from unsignaled to signaled once 3009 * and cannot be reused. Use context and mm from the old fence. 3010 * 3011 * If an old eviction fence signals after this check, that's OK. 3012 * Anyone signaling an eviction fence must stop the queues first 3013 * and schedule another restore worker. 3014 */ 3015 if (dma_fence_is_signaled(&process_info->eviction_fence->base)) { 3016 struct amdgpu_amdkfd_fence *new_fence = 3017 amdgpu_amdkfd_fence_create( 3018 process_info->eviction_fence->base.context, 3019 process_info->eviction_fence->mm, 3020 NULL); 3021 3022 if (!new_fence) { 3023 pr_err("Failed to create eviction fence\n"); 3024 ret = -ENOMEM; 3025 goto validate_map_fail; 3026 } 3027 dma_fence_put(&process_info->eviction_fence->base); 3028 process_info->eviction_fence = new_fence; 3029 replace_eviction_fence(ef, dma_fence_get(&new_fence->base)); 3030 } else { 3031 WARN_ONCE(*ef != &process_info->eviction_fence->base, 3032 "KFD eviction fence doesn't match KGD process_info"); 3033 } 3034 3035 /* Attach new eviction fence to all BOs except pinned ones */ 3036 list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { 3037 if (mem->bo->tbo.pin_count) 3038 continue; 3039 3040 dma_resv_add_fence(mem->bo->tbo.base.resv, 3041 &process_info->eviction_fence->base, 3042 DMA_RESV_USAGE_BOOKKEEP); 3043 } 3044 /* Attach eviction fence to PD / PT BOs and DMABuf imports */ 3045 list_for_each_entry(peer_vm, &process_info->vm_list_head, 3046 vm_list_node) { 3047 struct amdgpu_bo *bo = peer_vm->root.bo; 3048 3049 dma_resv_add_fence(bo->tbo.base.resv, 3050 &process_info->eviction_fence->base, 3051 DMA_RESV_USAGE_BOOKKEEP); 3052 } 3053 3054 validate_map_fail: 3055 amdgpu_sync_free(&sync_obj); 3056 ttm_reserve_fail: 3057 drm_exec_fini(&exec); 3058 mutex_unlock(&process_info->lock); 3059 return ret; 3060 } 3061 3062 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) 3063 { 3064 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 3065 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws; 3066 int ret; 3067 3068 if (!info || !gws) 3069 return -EINVAL; 3070 3071 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 3072 if (!*mem) 3073 return -ENOMEM; 3074 3075 mutex_init(&(*mem)->lock); 3076 INIT_LIST_HEAD(&(*mem)->attachments); 3077 (*mem)->bo = amdgpu_bo_ref(gws_bo); 3078 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; 3079 (*mem)->process_info = process_info; 3080 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); 3081 amdgpu_sync_create(&(*mem)->sync); 3082 3083 3084 /* Validate gws bo the first time it is added to process */ 3085 mutex_lock(&(*mem)->process_info->lock); 3086 ret = amdgpu_bo_reserve(gws_bo, false); 3087 if (unlikely(ret)) { 3088 pr_err("Reserve gws bo failed %d\n", ret); 3089 goto bo_reservation_failure; 3090 } 3091 3092 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true); 3093 if (ret) { 3094 pr_err("GWS BO validate failed %d\n", ret); 3095 goto bo_validation_failure; 3096 } 3097 /* GWS resource is shared b/t amdgpu and amdkfd 3098 * Add process eviction fence to bo so they can 3099 * evict each other. 3100 */ 3101 ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1); 3102 if (ret) 3103 goto reserve_shared_fail; 3104 dma_resv_add_fence(gws_bo->tbo.base.resv, 3105 &process_info->eviction_fence->base, 3106 DMA_RESV_USAGE_BOOKKEEP); 3107 amdgpu_bo_unreserve(gws_bo); 3108 mutex_unlock(&(*mem)->process_info->lock); 3109 3110 return ret; 3111 3112 reserve_shared_fail: 3113 bo_validation_failure: 3114 amdgpu_bo_unreserve(gws_bo); 3115 bo_reservation_failure: 3116 mutex_unlock(&(*mem)->process_info->lock); 3117 amdgpu_sync_free(&(*mem)->sync); 3118 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); 3119 amdgpu_bo_unref(&gws_bo); 3120 mutex_destroy(&(*mem)->lock); 3121 kfree(*mem); 3122 *mem = NULL; 3123 return ret; 3124 } 3125 3126 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) 3127 { 3128 int ret; 3129 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 3130 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; 3131 struct amdgpu_bo *gws_bo = kgd_mem->bo; 3132 3133 /* Remove BO from process's validate list so restore worker won't touch 3134 * it anymore 3135 */ 3136 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info); 3137 3138 ret = amdgpu_bo_reserve(gws_bo, false); 3139 if (unlikely(ret)) { 3140 pr_err("Reserve gws bo failed %d\n", ret); 3141 //TODO add BO back to validate_list? 3142 return ret; 3143 } 3144 amdgpu_amdkfd_remove_eviction_fence(gws_bo, 3145 process_info->eviction_fence); 3146 amdgpu_bo_unreserve(gws_bo); 3147 amdgpu_sync_free(&kgd_mem->sync); 3148 amdgpu_bo_unref(&gws_bo); 3149 mutex_destroy(&kgd_mem->lock); 3150 kfree(mem); 3151 return 0; 3152 } 3153 3154 /* Returns GPU-specific tiling mode information */ 3155 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, 3156 struct tile_config *config) 3157 { 3158 config->gb_addr_config = adev->gfx.config.gb_addr_config; 3159 config->tile_config_ptr = adev->gfx.config.tile_mode_array; 3160 config->num_tile_configs = 3161 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 3162 config->macro_tile_config_ptr = 3163 adev->gfx.config.macrotile_mode_array; 3164 config->num_macro_tile_configs = 3165 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 3166 3167 /* Those values are not set from GFX9 onwards */ 3168 config->num_banks = adev->gfx.config.num_banks; 3169 config->num_ranks = adev->gfx.config.num_ranks; 3170 3171 return 0; 3172 } 3173 3174 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem) 3175 { 3176 struct kfd_mem_attachment *entry; 3177 3178 list_for_each_entry(entry, &mem->attachments, list) { 3179 if (entry->is_mapped && entry->adev == adev) 3180 return true; 3181 } 3182 return false; 3183 } 3184 3185 #if defined(CONFIG_DEBUG_FS) 3186 3187 int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data) 3188 { 3189 3190 spin_lock(&kfd_mem_limit.mem_limit_lock); 3191 seq_printf(m, "System mem used %lldM out of %lluM\n", 3192 (kfd_mem_limit.system_mem_used >> 20), 3193 (kfd_mem_limit.max_system_mem_limit >> 20)); 3194 seq_printf(m, "TTM mem used %lldM out of %lluM\n", 3195 (kfd_mem_limit.ttm_mem_used >> 20), 3196 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 3197 spin_unlock(&kfd_mem_limit.mem_limit_lock); 3198 3199 return 0; 3200 } 3201 3202 #endif 3203