1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2014-2018 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 #include <linux/dma-buf.h> 24 #include <linux/list.h> 25 #include <linux/pagemap.h> 26 #include <linux/sched/mm.h> 27 #include <linux/sched/task.h> 28 #include <drm/ttm/ttm_tt.h> 29 30 #include <drm/drm_exec.h> 31 32 #include "amdgpu_object.h" 33 #include "amdgpu_gem.h" 34 #include "amdgpu_vm.h" 35 #include "amdgpu_hmm.h" 36 #include "amdgpu_amdkfd.h" 37 #include "amdgpu_dma_buf.h" 38 #include <uapi/linux/kfd_ioctl.h> 39 #include "amdgpu_xgmi.h" 40 #include "kfd_priv.h" 41 #include "kfd_smi_events.h" 42 43 /* Userptr restore delay, just long enough to allow consecutive VM 44 * changes to accumulate 45 */ 46 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 47 #define AMDGPU_RESERVE_MEM_LIMIT (3UL << 29) 48 49 /* 50 * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB 51 * BO chunk 52 */ 53 #define VRAM_AVAILABLITY_ALIGN (1 << 21) 54 55 /* Impose limit on how much memory KFD can use */ 56 static struct { 57 uint64_t max_system_mem_limit; 58 uint64_t max_ttm_mem_limit; 59 int64_t system_mem_used; 60 int64_t ttm_mem_used; 61 spinlock_t mem_limit_lock; 62 } kfd_mem_limit; 63 64 static const char * const domain_bit_to_string[] = { 65 "CPU", 66 "GTT", 67 "VRAM", 68 "GDS", 69 "GWS", 70 "OA" 71 }; 72 73 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1] 74 75 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); 76 77 static bool kfd_mem_is_attached(struct amdgpu_vm *avm, 78 struct kgd_mem *mem) 79 { 80 struct kfd_mem_attachment *entry; 81 82 list_for_each_entry(entry, &mem->attachments, list) 83 if (entry->bo_va->base.vm == avm) 84 return true; 85 86 return false; 87 } 88 89 /** 90 * reuse_dmamap() - Check whether adev can share the original 91 * userptr BO 92 * 93 * If both adev and bo_adev are in direct mapping or 94 * in the same iommu group, they can share the original BO. 95 * 96 * @adev: Device to which can or cannot share the original BO 97 * @bo_adev: Device to which allocated BO belongs to 98 * 99 * Return: returns true if adev can share original userptr BO, 100 * false otherwise. 101 */ 102 static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev) 103 { 104 return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) || 105 (adev->dev->iommu_group == bo_adev->dev->iommu_group); 106 } 107 108 /* Set memory usage limits. Current, limits are 109 * System (TTM + userptr) memory - 15/16th System RAM 110 * TTM memory - 3/8th System RAM 111 */ 112 void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 113 { 114 struct sysinfo si; 115 uint64_t mem; 116 117 if (kfd_mem_limit.max_system_mem_limit) 118 return; 119 120 si_meminfo(&si); 121 mem = si.totalram - si.totalhigh; 122 mem *= si.mem_unit; 123 124 spin_lock_init(&kfd_mem_limit.mem_limit_lock); 125 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6); 126 if (kfd_mem_limit.max_system_mem_limit < 2 * AMDGPU_RESERVE_MEM_LIMIT) 127 kfd_mem_limit.max_system_mem_limit >>= 1; 128 else 129 kfd_mem_limit.max_system_mem_limit -= AMDGPU_RESERVE_MEM_LIMIT; 130 131 kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT; 132 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", 133 (kfd_mem_limit.max_system_mem_limit >> 20), 134 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 135 } 136 137 void amdgpu_amdkfd_reserve_system_mem(uint64_t size) 138 { 139 kfd_mem_limit.system_mem_used += size; 140 } 141 142 /* Estimate page table size needed to represent a given memory size 143 * 144 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory 145 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB 146 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize 147 * for 2MB pages for TLB efficiency. However, small allocations and 148 * fragmented system memory still need some 4KB pages. We choose a 149 * compromise that should work in most cases without reserving too 150 * much memory for page tables unnecessarily (factor 16K, >> 14). 151 */ 152 153 #define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM) 154 155 /** 156 * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size 157 * of buffer. 158 * 159 * @adev: Device to which allocated BO belongs to 160 * @size: Size of buffer, in bytes, encapsulated by B0. This should be 161 * equivalent to amdgpu_bo_size(BO) 162 * @alloc_flag: Flag used in allocating a BO as noted above 163 * @xcp_id: xcp_id is used to get xcp from xcp manager, one xcp is 164 * managed as one compute node in driver for app 165 * 166 * Return: 167 * returns -ENOMEM in case of error, ZERO otherwise 168 */ 169 int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, 170 uint64_t size, u32 alloc_flag, int8_t xcp_id) 171 { 172 uint64_t reserved_for_pt = 173 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); 174 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 175 uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0); 176 size_t system_mem_needed, ttm_mem_needed, vram_needed; 177 int ret = 0; 178 uint64_t vram_size = 0; 179 180 system_mem_needed = 0; 181 ttm_mem_needed = 0; 182 vram_needed = 0; 183 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 184 system_mem_needed = size; 185 ttm_mem_needed = size; 186 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 187 /* 188 * Conservatively round up the allocation requirement to 2 MB 189 * to avoid fragmentation caused by 4K allocations in the tail 190 * 2M BO chunk. 191 */ 192 vram_needed = size; 193 /* 194 * For GFX 9.4.3, get the VRAM size from XCP structs 195 */ 196 if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id)) 197 return -EINVAL; 198 199 vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id); 200 if (adev->apu_prefer_gtt) { 201 system_mem_needed = size; 202 ttm_mem_needed = size; 203 } 204 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 205 system_mem_needed = size; 206 } else if (!(alloc_flag & 207 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 208 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { 209 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); 210 return -ENOMEM; 211 } 212 213 spin_lock(&kfd_mem_limit.mem_limit_lock); 214 215 if (kfd_mem_limit.system_mem_used + system_mem_needed > 216 kfd_mem_limit.max_system_mem_limit) 217 pr_debug("Set no_system_mem_limit=1 if using shared memory\n"); 218 219 if ((kfd_mem_limit.system_mem_used + system_mem_needed > 220 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || 221 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > 222 kfd_mem_limit.max_ttm_mem_limit) || 223 (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed > 224 vram_size - reserved_for_pt - reserved_for_ras - atomic64_read(&adev->vram_pin_size))) { 225 ret = -ENOMEM; 226 goto release; 227 } 228 229 /* Update memory accounting by decreasing available system 230 * memory, TTM memory and GPU memory as computed above 231 */ 232 WARN_ONCE(vram_needed && !adev, 233 "adev reference can't be null when vram is used"); 234 if (adev && xcp_id >= 0) { 235 adev->kfd.vram_used[xcp_id] += vram_needed; 236 adev->kfd.vram_used_aligned[xcp_id] += 237 adev->apu_prefer_gtt ? 238 vram_needed : 239 ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN); 240 } 241 kfd_mem_limit.system_mem_used += system_mem_needed; 242 kfd_mem_limit.ttm_mem_used += ttm_mem_needed; 243 244 release: 245 spin_unlock(&kfd_mem_limit.mem_limit_lock); 246 return ret; 247 } 248 249 void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, 250 uint64_t size, u32 alloc_flag, int8_t xcp_id) 251 { 252 spin_lock(&kfd_mem_limit.mem_limit_lock); 253 254 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 255 kfd_mem_limit.system_mem_used -= size; 256 kfd_mem_limit.ttm_mem_used -= size; 257 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 258 WARN_ONCE(!adev, 259 "adev reference can't be null when alloc mem flags vram is set"); 260 if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id)) 261 goto release; 262 263 if (adev) { 264 adev->kfd.vram_used[xcp_id] -= size; 265 if (adev->apu_prefer_gtt) { 266 adev->kfd.vram_used_aligned[xcp_id] -= size; 267 kfd_mem_limit.system_mem_used -= size; 268 kfd_mem_limit.ttm_mem_used -= size; 269 } else { 270 adev->kfd.vram_used_aligned[xcp_id] -= 271 ALIGN(size, VRAM_AVAILABLITY_ALIGN); 272 } 273 } 274 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 275 kfd_mem_limit.system_mem_used -= size; 276 } else if (!(alloc_flag & 277 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 278 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { 279 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); 280 goto release; 281 } 282 WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0, 283 "KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id); 284 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, 285 "KFD TTM memory accounting unbalanced"); 286 WARN_ONCE(kfd_mem_limit.system_mem_used < 0, 287 "KFD system memory accounting unbalanced"); 288 289 release: 290 spin_unlock(&kfd_mem_limit.mem_limit_lock); 291 } 292 293 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) 294 { 295 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 296 u32 alloc_flags = bo->kfd_bo->alloc_flags; 297 u64 size = amdgpu_bo_size(bo); 298 299 amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags, 300 bo->xcp_id); 301 302 kfree(bo->kfd_bo); 303 } 304 305 /** 306 * create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information 307 * about USERPTR or DOOREBELL or MMIO BO. 308 * 309 * @adev: Device for which dmamap BO is being created 310 * @mem: BO of peer device that is being DMA mapped. Provides parameters 311 * in building the dmamap BO 312 * @bo_out: Output parameter updated with handle of dmamap BO 313 */ 314 static int 315 create_dmamap_sg_bo(struct amdgpu_device *adev, 316 struct kgd_mem *mem, struct amdgpu_bo **bo_out) 317 { 318 struct drm_gem_object *gem_obj; 319 int ret; 320 uint64_t flags = 0; 321 322 ret = amdgpu_bo_reserve(mem->bo, false); 323 if (ret) 324 return ret; 325 326 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) 327 flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT | 328 AMDGPU_GEM_CREATE_UNCACHED); 329 330 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1, 331 AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags, 332 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0); 333 334 amdgpu_bo_unreserve(mem->bo); 335 336 if (ret) { 337 pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret); 338 return -EINVAL; 339 } 340 341 *bo_out = gem_to_amdgpu_bo(gem_obj); 342 (*bo_out)->parent = amdgpu_bo_ref(mem->bo); 343 return ret; 344 } 345 346 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's 347 * reservation object. 348 * 349 * @bo: [IN] Remove eviction fence(s) from this BO 350 * @ef: [IN] This eviction fence is removed if it 351 * is present in the shared list. 352 * 353 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. 354 */ 355 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 356 struct amdgpu_amdkfd_fence *ef) 357 { 358 struct dma_fence *replacement; 359 360 if (!ef) 361 return -EINVAL; 362 363 /* TODO: Instead of block before we should use the fence of the page 364 * table update and TLB flush here directly. 365 */ 366 replacement = dma_fence_get_stub(); 367 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context, 368 replacement, DMA_RESV_USAGE_BOOKKEEP); 369 dma_fence_put(replacement); 370 return 0; 371 } 372 373 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 374 { 375 struct amdgpu_bo *root = bo; 376 struct amdgpu_vm_bo_base *vm_bo; 377 struct amdgpu_vm *vm; 378 struct amdkfd_process_info *info; 379 struct amdgpu_amdkfd_fence *ef; 380 int ret; 381 382 /* we can always get vm_bo from root PD bo.*/ 383 while (root->parent) 384 root = root->parent; 385 386 vm_bo = root->vm_bo; 387 if (!vm_bo) 388 return 0; 389 390 vm = vm_bo->vm; 391 if (!vm) 392 return 0; 393 394 info = vm->process_info; 395 if (!info || !info->eviction_fence) 396 return 0; 397 398 ef = container_of(dma_fence_get(&info->eviction_fence->base), 399 struct amdgpu_amdkfd_fence, base); 400 401 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); 402 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef); 403 dma_resv_unlock(bo->tbo.base.resv); 404 405 dma_fence_put(&ef->base); 406 return ret; 407 } 408 409 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, 410 bool wait) 411 { 412 struct ttm_operation_ctx ctx = { false, false }; 413 int ret; 414 415 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), 416 "Called with userptr BO")) 417 return -EINVAL; 418 419 /* bo has been pinned, not need validate it */ 420 if (bo->tbo.pin_count) 421 return 0; 422 423 amdgpu_bo_placement_from_domain(bo, domain); 424 425 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 426 if (ret) 427 goto validate_fail; 428 if (wait) 429 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 430 431 validate_fail: 432 return ret; 433 } 434 435 int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, 436 uint32_t domain, 437 struct dma_fence *fence) 438 { 439 int ret = amdgpu_bo_reserve(bo, false); 440 441 if (ret) 442 return ret; 443 444 ret = amdgpu_amdkfd_bo_validate(bo, domain, true); 445 if (ret) 446 goto unreserve_out; 447 448 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1); 449 if (ret) 450 goto unreserve_out; 451 452 dma_resv_add_fence(bo->tbo.base.resv, fence, 453 DMA_RESV_USAGE_BOOKKEEP); 454 455 unreserve_out: 456 amdgpu_bo_unreserve(bo); 457 458 return ret; 459 } 460 461 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) 462 { 463 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); 464 } 465 466 /* vm_validate_pt_pd_bos - Validate page table and directory BOs 467 * 468 * Page directories are not updated here because huge page handling 469 * during page table updates can invalidate page directory entries 470 * again. Page directories are only updated after updating page 471 * tables. 472 */ 473 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm, 474 struct ww_acquire_ctx *ticket) 475 { 476 struct amdgpu_bo *pd = vm->root.bo; 477 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 478 int ret; 479 480 ret = amdgpu_vm_validate(adev, vm, ticket, 481 amdgpu_amdkfd_validate_vm_bo, NULL); 482 if (ret) { 483 pr_err("failed to validate PT BOs\n"); 484 return ret; 485 } 486 487 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); 488 489 return 0; 490 } 491 492 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) 493 { 494 struct amdgpu_bo *pd = vm->root.bo; 495 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 496 int ret; 497 498 ret = amdgpu_vm_update_pdes(adev, vm, false); 499 if (ret) 500 return ret; 501 502 return amdgpu_sync_fence(sync, vm->last_update); 503 } 504 505 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) 506 { 507 uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE | 508 AMDGPU_VM_MTYPE_DEFAULT; 509 510 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) 511 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; 512 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) 513 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 514 515 return amdgpu_gem_va_map_flags(adev, mapping_flags); 516 } 517 518 /** 519 * create_sg_table() - Create an sg_table for a contiguous DMA addr range 520 * @addr: The starting address to point to 521 * @size: Size of memory area in bytes being pointed to 522 * 523 * Allocates an instance of sg_table and initializes it to point to memory 524 * area specified by input parameters. The address used to build is assumed 525 * to be DMA mapped, if needed. 526 * 527 * DOORBELL or MMIO BOs use only one scatterlist node in their sg_table 528 * because they are physically contiguous. 529 * 530 * Return: Initialized instance of SG Table or NULL 531 */ 532 static struct sg_table *create_sg_table(uint64_t addr, uint32_t size) 533 { 534 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); 535 536 if (!sg) 537 return NULL; 538 if (sg_alloc_table(sg, 1, GFP_KERNEL)) { 539 kfree(sg); 540 return NULL; 541 } 542 sg_dma_address(sg->sgl) = addr; 543 sg->sgl->length = size; 544 #ifdef CONFIG_NEED_SG_DMA_LENGTH 545 sg->sgl->dma_length = size; 546 #endif 547 return sg; 548 } 549 550 static int 551 kfd_mem_dmamap_userptr(struct kgd_mem *mem, 552 struct kfd_mem_attachment *attachment) 553 { 554 enum dma_data_direction direction = 555 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 556 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 557 struct ttm_operation_ctx ctx = {.interruptible = true}; 558 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 559 struct amdgpu_device *adev = attachment->adev; 560 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; 561 struct ttm_tt *ttm = bo->tbo.ttm; 562 int ret; 563 564 if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) 565 return -EINVAL; 566 567 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); 568 if (unlikely(!ttm->sg)) 569 return -ENOMEM; 570 571 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */ 572 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, 573 ttm->num_pages, 0, 574 (u64)ttm->num_pages << PAGE_SHIFT, 575 GFP_KERNEL); 576 if (unlikely(ret)) 577 goto free_sg; 578 579 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); 580 if (unlikely(ret)) 581 goto release_sg; 582 583 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 584 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 585 if (ret) 586 goto unmap_sg; 587 588 return 0; 589 590 unmap_sg: 591 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 592 release_sg: 593 pr_err("DMA map userptr failed: %d\n", ret); 594 sg_free_table(ttm->sg); 595 free_sg: 596 kfree(ttm->sg); 597 ttm->sg = NULL; 598 return ret; 599 } 600 601 static int 602 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment) 603 { 604 struct ttm_operation_ctx ctx = {.interruptible = true}; 605 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 606 int ret; 607 608 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 609 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 610 if (ret) 611 return ret; 612 613 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 614 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 615 } 616 617 /** 618 * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO 619 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device 620 * @attachment: Virtual address attachment of the BO on accessing device 621 * 622 * An access request from the device that owns DOORBELL does not require DMA mapping. 623 * This is because the request doesn't go through PCIe root complex i.e. it instead 624 * loops back. The need to DMA map arises only when accessing peer device's DOORBELL 625 * 626 * In contrast, all access requests for MMIO need to be DMA mapped without regard to 627 * device ownership. This is because access requests for MMIO go through PCIe root 628 * complex. 629 * 630 * This is accomplished in two steps: 631 * - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used 632 * in updating requesting device's page table 633 * - Signal TTM to mark memory pointed to by requesting device's BO as GPU 634 * accessible. This allows an update of requesting device's page table 635 * with entries associated with DOOREBELL or MMIO memory 636 * 637 * This method is invoked in the following contexts: 638 * - Mapping of DOORBELL or MMIO BO of same or peer device 639 * - Validating an evicted DOOREBELL or MMIO BO on device seeking access 640 * 641 * Return: ZERO if successful, NON-ZERO otherwise 642 */ 643 static int 644 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem, 645 struct kfd_mem_attachment *attachment) 646 { 647 struct ttm_operation_ctx ctx = {.interruptible = true}; 648 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 649 struct amdgpu_device *adev = attachment->adev; 650 struct ttm_tt *ttm = bo->tbo.ttm; 651 enum dma_data_direction dir; 652 dma_addr_t dma_addr; 653 bool mmio; 654 int ret; 655 656 /* Expect SG Table of dmapmap BO to be NULL */ 657 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP); 658 if (unlikely(ttm->sg)) { 659 pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio); 660 return -EINVAL; 661 } 662 663 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 664 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 665 dma_addr = mem->bo->tbo.sg->sgl->dma_address; 666 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length); 667 pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr); 668 dma_addr = dma_map_resource(adev->dev, dma_addr, 669 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); 670 ret = dma_mapping_error(adev->dev, dma_addr); 671 if (unlikely(ret)) 672 return ret; 673 pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr); 674 675 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length); 676 if (unlikely(!ttm->sg)) { 677 ret = -ENOMEM; 678 goto unmap_sg; 679 } 680 681 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 682 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 683 if (unlikely(ret)) 684 goto free_sg; 685 686 return ret; 687 688 free_sg: 689 sg_free_table(ttm->sg); 690 kfree(ttm->sg); 691 ttm->sg = NULL; 692 unmap_sg: 693 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length, 694 dir, DMA_ATTR_SKIP_CPU_SYNC); 695 return ret; 696 } 697 698 static int 699 kfd_mem_dmamap_attachment(struct kgd_mem *mem, 700 struct kfd_mem_attachment *attachment) 701 { 702 switch (attachment->type) { 703 case KFD_MEM_ATT_SHARED: 704 return 0; 705 case KFD_MEM_ATT_USERPTR: 706 return kfd_mem_dmamap_userptr(mem, attachment); 707 case KFD_MEM_ATT_DMABUF: 708 return kfd_mem_dmamap_dmabuf(attachment); 709 case KFD_MEM_ATT_SG: 710 return kfd_mem_dmamap_sg_bo(mem, attachment); 711 default: 712 WARN_ON_ONCE(1); 713 } 714 return -EINVAL; 715 } 716 717 static void 718 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, 719 struct kfd_mem_attachment *attachment) 720 { 721 enum dma_data_direction direction = 722 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 723 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 724 struct ttm_operation_ctx ctx = {.interruptible = false}; 725 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 726 struct amdgpu_device *adev = attachment->adev; 727 struct ttm_tt *ttm = bo->tbo.ttm; 728 729 if (unlikely(!ttm->sg)) 730 return; 731 732 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 733 (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 734 735 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 736 sg_free_table(ttm->sg); 737 kfree(ttm->sg); 738 ttm->sg = NULL; 739 } 740 741 static void 742 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment) 743 { 744 /* This is a no-op. We don't want to trigger eviction fences when 745 * unmapping DMABufs. Therefore the invalidation (moving to system 746 * domain) is done in kfd_mem_dmamap_dmabuf. 747 */ 748 } 749 750 /** 751 * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO 752 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device 753 * @attachment: Virtual address attachment of the BO on accessing device 754 * 755 * The method performs following steps: 756 * - Signal TTM to mark memory pointed to by BO as GPU inaccessible 757 * - Free SG Table that is used to encapsulate DMA mapped memory of 758 * peer device's DOORBELL or MMIO memory 759 * 760 * This method is invoked in the following contexts: 761 * UNMapping of DOORBELL or MMIO BO on a device having access to its memory 762 * Eviction of DOOREBELL or MMIO BO on device having access to its memory 763 * 764 * Return: void 765 */ 766 static void 767 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem, 768 struct kfd_mem_attachment *attachment) 769 { 770 struct ttm_operation_ctx ctx = {.interruptible = true}; 771 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 772 struct amdgpu_device *adev = attachment->adev; 773 struct ttm_tt *ttm = bo->tbo.ttm; 774 enum dma_data_direction dir; 775 776 if (unlikely(!ttm->sg)) { 777 pr_debug("SG Table of BO is NULL"); 778 return; 779 } 780 781 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 782 (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 783 784 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 785 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 786 dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address, 787 ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); 788 sg_free_table(ttm->sg); 789 kfree(ttm->sg); 790 ttm->sg = NULL; 791 bo->tbo.sg = NULL; 792 } 793 794 static void 795 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, 796 struct kfd_mem_attachment *attachment) 797 { 798 switch (attachment->type) { 799 case KFD_MEM_ATT_SHARED: 800 break; 801 case KFD_MEM_ATT_USERPTR: 802 kfd_mem_dmaunmap_userptr(mem, attachment); 803 break; 804 case KFD_MEM_ATT_DMABUF: 805 kfd_mem_dmaunmap_dmabuf(attachment); 806 break; 807 case KFD_MEM_ATT_SG: 808 kfd_mem_dmaunmap_sg_bo(mem, attachment); 809 break; 810 default: 811 WARN_ON_ONCE(1); 812 } 813 } 814 815 static int kfd_mem_export_dmabuf(struct kgd_mem *mem) 816 { 817 if (!mem->dmabuf) { 818 struct amdgpu_device *bo_adev; 819 struct dma_buf *dmabuf; 820 821 bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 822 dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file, 823 mem->gem_handle, 824 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? 825 DRM_RDWR : 0); 826 if (IS_ERR(dmabuf)) 827 return PTR_ERR(dmabuf); 828 mem->dmabuf = dmabuf; 829 } 830 831 return 0; 832 } 833 834 static int 835 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, 836 struct amdgpu_bo **bo) 837 { 838 struct drm_gem_object *gobj; 839 int ret; 840 841 ret = kfd_mem_export_dmabuf(mem); 842 if (ret) 843 return ret; 844 845 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); 846 if (IS_ERR(gobj)) 847 return PTR_ERR(gobj); 848 849 *bo = gem_to_amdgpu_bo(gobj); 850 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; 851 852 return 0; 853 } 854 855 /* kfd_mem_attach - Add a BO to a VM 856 * 857 * Everything that needs to bo done only once when a BO is first added 858 * to a VM. It can later be mapped and unmapped many times without 859 * repeating these steps. 860 * 861 * 0. Create BO for DMA mapping, if needed 862 * 1. Allocate and initialize BO VA entry data structure 863 * 2. Add BO to the VM 864 * 3. Determine ASIC-specific PTE flags 865 * 4. Alloc page tables and directories if needed 866 * 4a. Validate new page tables and directories 867 */ 868 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, 869 struct amdgpu_vm *vm, bool is_aql) 870 { 871 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 872 unsigned long bo_size = mem->bo->tbo.base.size; 873 uint64_t va = mem->va; 874 struct kfd_mem_attachment *attachment[2] = {NULL, NULL}; 875 struct amdgpu_bo *bo[2] = {NULL, NULL}; 876 struct amdgpu_bo_va *bo_va; 877 bool same_hive = false; 878 int i, ret; 879 880 if (!va) { 881 pr_err("Invalid VA when adding BO to VM\n"); 882 return -EINVAL; 883 } 884 885 /* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices 886 * 887 * The access path of MMIO and DOORBELL BOs of is always over PCIe. 888 * In contrast the access path of VRAM BOs depens upon the type of 889 * link that connects the peer device. Access over PCIe is allowed 890 * if peer device has large BAR. In contrast, access over xGMI is 891 * allowed for both small and large BAR configurations of peer device 892 */ 893 if ((adev != bo_adev && !adev->apu_prefer_gtt) && 894 ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) || 895 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) || 896 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { 897 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM) 898 same_hive = amdgpu_xgmi_same_hive(adev, bo_adev); 899 if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev)) 900 return -EINVAL; 901 } 902 903 for (i = 0; i <= is_aql; i++) { 904 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL); 905 if (unlikely(!attachment[i])) { 906 ret = -ENOMEM; 907 goto unwind; 908 } 909 910 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, 911 va + bo_size, vm); 912 913 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) || 914 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) || 915 (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) || 916 same_hive) { 917 /* Mappings on the local GPU, or VRAM mappings in the 918 * local hive, or userptr, or GTT mapping can reuse dma map 919 * address space share the original BO 920 */ 921 attachment[i]->type = KFD_MEM_ATT_SHARED; 922 bo[i] = mem->bo; 923 drm_gem_object_get(&bo[i]->tbo.base); 924 } else if (i > 0) { 925 /* Multiple mappings on the same GPU share the BO */ 926 attachment[i]->type = KFD_MEM_ATT_SHARED; 927 bo[i] = bo[0]; 928 drm_gem_object_get(&bo[i]->tbo.base); 929 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { 930 /* Create an SG BO to DMA-map userptrs on other GPUs */ 931 attachment[i]->type = KFD_MEM_ATT_USERPTR; 932 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); 933 if (ret) 934 goto unwind; 935 /* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */ 936 } else if (mem->bo->tbo.type == ttm_bo_type_sg) { 937 WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL || 938 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP), 939 "Handing invalid SG BO in ATTACH request"); 940 attachment[i]->type = KFD_MEM_ATT_SG; 941 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); 942 if (ret) 943 goto unwind; 944 /* Enable acces to GTT and VRAM BOs of peer devices */ 945 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT || 946 mem->domain == AMDGPU_GEM_DOMAIN_VRAM) { 947 attachment[i]->type = KFD_MEM_ATT_DMABUF; 948 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); 949 if (ret) 950 goto unwind; 951 pr_debug("Employ DMABUF mechanism to enable peer GPU access\n"); 952 } else { 953 WARN_ONCE(true, "Handling invalid ATTACH request"); 954 ret = -EINVAL; 955 goto unwind; 956 } 957 958 /* Add BO to VM internal data structures */ 959 ret = amdgpu_bo_reserve(bo[i], false); 960 if (ret) { 961 pr_debug("Unable to reserve BO during memory attach"); 962 goto unwind; 963 } 964 bo_va = amdgpu_vm_bo_find(vm, bo[i]); 965 if (!bo_va) 966 bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); 967 else 968 ++bo_va->ref_count; 969 attachment[i]->bo_va = bo_va; 970 amdgpu_bo_unreserve(bo[i]); 971 if (unlikely(!attachment[i]->bo_va)) { 972 ret = -ENOMEM; 973 pr_err("Failed to add BO object to VM. ret == %d\n", 974 ret); 975 goto unwind; 976 } 977 attachment[i]->va = va; 978 attachment[i]->pte_flags = get_pte_flags(adev, mem); 979 attachment[i]->adev = adev; 980 list_add(&attachment[i]->list, &mem->attachments); 981 982 va += bo_size; 983 } 984 985 return 0; 986 987 unwind: 988 for (; i >= 0; i--) { 989 if (!attachment[i]) 990 continue; 991 if (attachment[i]->bo_va) { 992 (void)amdgpu_bo_reserve(bo[i], true); 993 if (--attachment[i]->bo_va->ref_count == 0) 994 amdgpu_vm_bo_del(adev, attachment[i]->bo_va); 995 amdgpu_bo_unreserve(bo[i]); 996 list_del(&attachment[i]->list); 997 } 998 if (bo[i]) 999 drm_gem_object_put(&bo[i]->tbo.base); 1000 kfree(attachment[i]); 1001 } 1002 return ret; 1003 } 1004 1005 static void kfd_mem_detach(struct kfd_mem_attachment *attachment) 1006 { 1007 struct amdgpu_bo *bo = attachment->bo_va->base.bo; 1008 1009 pr_debug("\t remove VA 0x%llx in entry %p\n", 1010 attachment->va, attachment); 1011 if (--attachment->bo_va->ref_count == 0) 1012 amdgpu_vm_bo_del(attachment->adev, attachment->bo_va); 1013 drm_gem_object_put(&bo->tbo.base); 1014 list_del(&attachment->list); 1015 kfree(attachment); 1016 } 1017 1018 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, 1019 struct amdkfd_process_info *process_info, 1020 bool userptr) 1021 { 1022 mutex_lock(&process_info->lock); 1023 if (userptr) 1024 list_add_tail(&mem->validate_list, 1025 &process_info->userptr_valid_list); 1026 else 1027 list_add_tail(&mem->validate_list, &process_info->kfd_bo_list); 1028 mutex_unlock(&process_info->lock); 1029 } 1030 1031 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, 1032 struct amdkfd_process_info *process_info) 1033 { 1034 mutex_lock(&process_info->lock); 1035 list_del(&mem->validate_list); 1036 mutex_unlock(&process_info->lock); 1037 } 1038 1039 /* Initializes user pages. It registers the MMU notifier and validates 1040 * the userptr BO in the GTT domain. 1041 * 1042 * The BO must already be on the userptr_valid_list. Otherwise an 1043 * eviction and restore may happen that leaves the new BO unmapped 1044 * with the user mode queues running. 1045 * 1046 * Takes the process_info->lock to protect against concurrent restore 1047 * workers. 1048 * 1049 * Returns 0 for success, negative errno for errors. 1050 */ 1051 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, 1052 bool criu_resume) 1053 { 1054 struct amdkfd_process_info *process_info = mem->process_info; 1055 struct amdgpu_bo *bo = mem->bo; 1056 struct ttm_operation_ctx ctx = { true, false }; 1057 struct hmm_range *range; 1058 int ret = 0; 1059 1060 mutex_lock(&process_info->lock); 1061 1062 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0); 1063 if (ret) { 1064 pr_err("%s: Failed to set userptr: %d\n", __func__, ret); 1065 goto out; 1066 } 1067 1068 ret = amdgpu_hmm_register(bo, user_addr); 1069 if (ret) { 1070 pr_err("%s: Failed to register MMU notifier: %d\n", 1071 __func__, ret); 1072 goto out; 1073 } 1074 1075 if (criu_resume) { 1076 /* 1077 * During a CRIU restore operation, the userptr buffer objects 1078 * will be validated in the restore_userptr_work worker at a 1079 * later stage when it is scheduled by another ioctl called by 1080 * CRIU master process for the target pid for restore. 1081 */ 1082 mutex_lock(&process_info->notifier_lock); 1083 mem->invalid++; 1084 mutex_unlock(&process_info->notifier_lock); 1085 mutex_unlock(&process_info->lock); 1086 return 0; 1087 } 1088 1089 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range); 1090 if (ret) { 1091 if (ret == -EAGAIN) 1092 pr_debug("Failed to get user pages, try again\n"); 1093 else 1094 pr_err("%s: Failed to get user pages: %d\n", __func__, ret); 1095 goto unregister_out; 1096 } 1097 1098 ret = amdgpu_bo_reserve(bo, true); 1099 if (ret) { 1100 pr_err("%s: Failed to reserve BO\n", __func__); 1101 goto release_out; 1102 } 1103 amdgpu_bo_placement_from_domain(bo, mem->domain); 1104 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1105 if (ret) 1106 pr_err("%s: failed to validate BO\n", __func__); 1107 amdgpu_bo_unreserve(bo); 1108 1109 release_out: 1110 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); 1111 unregister_out: 1112 if (ret) 1113 amdgpu_hmm_unregister(bo); 1114 out: 1115 mutex_unlock(&process_info->lock); 1116 return ret; 1117 } 1118 1119 /* Reserving a BO and its page table BOs must happen atomically to 1120 * avoid deadlocks. Some operations update multiple VMs at once. Track 1121 * all the reservation info in a context structure. Optionally a sync 1122 * object can track VM updates. 1123 */ 1124 struct bo_vm_reservation_context { 1125 /* DRM execution context for the reservation */ 1126 struct drm_exec exec; 1127 /* Number of VMs reserved */ 1128 unsigned int n_vms; 1129 /* Pointer to sync object */ 1130 struct amdgpu_sync *sync; 1131 }; 1132 1133 enum bo_vm_match { 1134 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */ 1135 BO_VM_MAPPED, /* Match VMs where a BO is mapped */ 1136 BO_VM_ALL, /* Match all VMs a BO was added to */ 1137 }; 1138 1139 /** 1140 * reserve_bo_and_vm - reserve a BO and a VM unconditionally. 1141 * @mem: KFD BO structure. 1142 * @vm: the VM to reserve. 1143 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 1144 */ 1145 static int reserve_bo_and_vm(struct kgd_mem *mem, 1146 struct amdgpu_vm *vm, 1147 struct bo_vm_reservation_context *ctx) 1148 { 1149 struct amdgpu_bo *bo = mem->bo; 1150 int ret; 1151 1152 WARN_ON(!vm); 1153 1154 ctx->n_vms = 1; 1155 ctx->sync = &mem->sync; 1156 drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 1157 drm_exec_until_all_locked(&ctx->exec) { 1158 ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); 1159 drm_exec_retry_on_contention(&ctx->exec); 1160 if (unlikely(ret)) 1161 goto error; 1162 1163 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1); 1164 drm_exec_retry_on_contention(&ctx->exec); 1165 if (unlikely(ret)) 1166 goto error; 1167 } 1168 return 0; 1169 1170 error: 1171 pr_err("Failed to reserve buffers in ttm.\n"); 1172 drm_exec_fini(&ctx->exec); 1173 return ret; 1174 } 1175 1176 /** 1177 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally 1178 * @mem: KFD BO structure. 1179 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO 1180 * is used. Otherwise, a single VM associated with the BO. 1181 * @map_type: the mapping status that will be used to filter the VMs. 1182 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 1183 * 1184 * Returns 0 for success, negative for failure. 1185 */ 1186 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, 1187 struct amdgpu_vm *vm, enum bo_vm_match map_type, 1188 struct bo_vm_reservation_context *ctx) 1189 { 1190 struct kfd_mem_attachment *entry; 1191 struct amdgpu_bo *bo = mem->bo; 1192 int ret; 1193 1194 ctx->sync = &mem->sync; 1195 drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT | 1196 DRM_EXEC_IGNORE_DUPLICATES, 0); 1197 drm_exec_until_all_locked(&ctx->exec) { 1198 ctx->n_vms = 0; 1199 list_for_each_entry(entry, &mem->attachments, list) { 1200 if ((vm && vm != entry->bo_va->base.vm) || 1201 (entry->is_mapped != map_type 1202 && map_type != BO_VM_ALL)) 1203 continue; 1204 1205 ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm, 1206 &ctx->exec, 2); 1207 drm_exec_retry_on_contention(&ctx->exec); 1208 if (unlikely(ret)) 1209 goto error; 1210 ++ctx->n_vms; 1211 } 1212 1213 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1); 1214 drm_exec_retry_on_contention(&ctx->exec); 1215 if (unlikely(ret)) 1216 goto error; 1217 } 1218 return 0; 1219 1220 error: 1221 pr_err("Failed to reserve buffers in ttm.\n"); 1222 drm_exec_fini(&ctx->exec); 1223 return ret; 1224 } 1225 1226 /** 1227 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context 1228 * @ctx: Reservation context to unreserve 1229 * @wait: Optionally wait for a sync object representing pending VM updates 1230 * @intr: Whether the wait is interruptible 1231 * 1232 * Also frees any resources allocated in 1233 * reserve_bo_and_(cond_)vm(s). Returns the status from 1234 * amdgpu_sync_wait. 1235 */ 1236 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, 1237 bool wait, bool intr) 1238 { 1239 int ret = 0; 1240 1241 if (wait) 1242 ret = amdgpu_sync_wait(ctx->sync, intr); 1243 1244 drm_exec_fini(&ctx->exec); 1245 ctx->sync = NULL; 1246 return ret; 1247 } 1248 1249 static int unmap_bo_from_gpuvm(struct kgd_mem *mem, 1250 struct kfd_mem_attachment *entry, 1251 struct amdgpu_sync *sync) 1252 { 1253 struct amdgpu_bo_va *bo_va = entry->bo_va; 1254 struct amdgpu_device *adev = entry->adev; 1255 struct amdgpu_vm *vm = bo_va->base.vm; 1256 1257 if (bo_va->queue_refcount) { 1258 pr_debug("bo_va->queue_refcount %d\n", bo_va->queue_refcount); 1259 return -EBUSY; 1260 } 1261 1262 (void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va); 1263 1264 (void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); 1265 1266 (void)amdgpu_sync_fence(sync, bo_va->last_pt_update); 1267 1268 return 0; 1269 } 1270 1271 static int update_gpuvm_pte(struct kgd_mem *mem, 1272 struct kfd_mem_attachment *entry, 1273 struct amdgpu_sync *sync) 1274 { 1275 struct amdgpu_bo_va *bo_va = entry->bo_va; 1276 struct amdgpu_device *adev = entry->adev; 1277 int ret; 1278 1279 ret = kfd_mem_dmamap_attachment(mem, entry); 1280 if (ret) 1281 return ret; 1282 1283 /* Update the page tables */ 1284 ret = amdgpu_vm_bo_update(adev, bo_va, false); 1285 if (ret) { 1286 pr_err("amdgpu_vm_bo_update failed\n"); 1287 return ret; 1288 } 1289 1290 return amdgpu_sync_fence(sync, bo_va->last_pt_update); 1291 } 1292 1293 static int map_bo_to_gpuvm(struct kgd_mem *mem, 1294 struct kfd_mem_attachment *entry, 1295 struct amdgpu_sync *sync, 1296 bool no_update_pte) 1297 { 1298 int ret; 1299 1300 /* Set virtual address for the allocation */ 1301 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0, 1302 amdgpu_bo_size(entry->bo_va->base.bo), 1303 entry->pte_flags); 1304 if (ret) { 1305 pr_err("Failed to map VA 0x%llx in vm. ret %d\n", 1306 entry->va, ret); 1307 return ret; 1308 } 1309 1310 if (no_update_pte) 1311 return 0; 1312 1313 ret = update_gpuvm_pte(mem, entry, sync); 1314 if (ret) { 1315 pr_err("update_gpuvm_pte() failed\n"); 1316 goto update_gpuvm_pte_failed; 1317 } 1318 1319 return 0; 1320 1321 update_gpuvm_pte_failed: 1322 unmap_bo_from_gpuvm(mem, entry, sync); 1323 kfd_mem_dmaunmap_attachment(mem, entry); 1324 return ret; 1325 } 1326 1327 static int process_validate_vms(struct amdkfd_process_info *process_info, 1328 struct ww_acquire_ctx *ticket) 1329 { 1330 struct amdgpu_vm *peer_vm; 1331 int ret; 1332 1333 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1334 vm_list_node) { 1335 ret = vm_validate_pt_pd_bos(peer_vm, ticket); 1336 if (ret) 1337 return ret; 1338 } 1339 1340 return 0; 1341 } 1342 1343 static int process_sync_pds_resv(struct amdkfd_process_info *process_info, 1344 struct amdgpu_sync *sync) 1345 { 1346 struct amdgpu_vm *peer_vm; 1347 int ret; 1348 1349 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1350 vm_list_node) { 1351 struct amdgpu_bo *pd = peer_vm->root.bo; 1352 1353 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, 1354 AMDGPU_SYNC_NE_OWNER, 1355 AMDGPU_FENCE_OWNER_KFD); 1356 if (ret) 1357 return ret; 1358 } 1359 1360 return 0; 1361 } 1362 1363 static int process_update_pds(struct amdkfd_process_info *process_info, 1364 struct amdgpu_sync *sync) 1365 { 1366 struct amdgpu_vm *peer_vm; 1367 int ret; 1368 1369 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1370 vm_list_node) { 1371 ret = vm_update_pds(peer_vm, sync); 1372 if (ret) 1373 return ret; 1374 } 1375 1376 return 0; 1377 } 1378 1379 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, 1380 struct dma_fence **ef) 1381 { 1382 struct amdkfd_process_info *info = NULL; 1383 int ret; 1384 1385 if (!*process_info) { 1386 info = kzalloc(sizeof(*info), GFP_KERNEL); 1387 if (!info) 1388 return -ENOMEM; 1389 1390 mutex_init(&info->lock); 1391 mutex_init(&info->notifier_lock); 1392 INIT_LIST_HEAD(&info->vm_list_head); 1393 INIT_LIST_HEAD(&info->kfd_bo_list); 1394 INIT_LIST_HEAD(&info->userptr_valid_list); 1395 INIT_LIST_HEAD(&info->userptr_inval_list); 1396 1397 info->eviction_fence = 1398 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 1399 current->mm, 1400 NULL); 1401 if (!info->eviction_fence) { 1402 pr_err("Failed to create eviction fence\n"); 1403 ret = -ENOMEM; 1404 goto create_evict_fence_fail; 1405 } 1406 1407 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); 1408 INIT_DELAYED_WORK(&info->restore_userptr_work, 1409 amdgpu_amdkfd_restore_userptr_worker); 1410 1411 *process_info = info; 1412 } 1413 1414 vm->process_info = *process_info; 1415 1416 /* Validate page directory and attach eviction fence */ 1417 ret = amdgpu_bo_reserve(vm->root.bo, true); 1418 if (ret) 1419 goto reserve_pd_fail; 1420 ret = vm_validate_pt_pd_bos(vm, NULL); 1421 if (ret) { 1422 pr_err("validate_pt_pd_bos() failed\n"); 1423 goto validate_pd_fail; 1424 } 1425 ret = amdgpu_bo_sync_wait(vm->root.bo, 1426 AMDGPU_FENCE_OWNER_KFD, false); 1427 if (ret) 1428 goto wait_pd_fail; 1429 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1); 1430 if (ret) 1431 goto reserve_shared_fail; 1432 dma_resv_add_fence(vm->root.bo->tbo.base.resv, 1433 &vm->process_info->eviction_fence->base, 1434 DMA_RESV_USAGE_BOOKKEEP); 1435 amdgpu_bo_unreserve(vm->root.bo); 1436 1437 /* Update process info */ 1438 mutex_lock(&vm->process_info->lock); 1439 list_add_tail(&vm->vm_list_node, 1440 &(vm->process_info->vm_list_head)); 1441 vm->process_info->n_vms++; 1442 if (ef) 1443 *ef = dma_fence_get(&vm->process_info->eviction_fence->base); 1444 mutex_unlock(&vm->process_info->lock); 1445 1446 return 0; 1447 1448 reserve_shared_fail: 1449 wait_pd_fail: 1450 validate_pd_fail: 1451 amdgpu_bo_unreserve(vm->root.bo); 1452 reserve_pd_fail: 1453 vm->process_info = NULL; 1454 if (info) { 1455 dma_fence_put(&info->eviction_fence->base); 1456 *process_info = NULL; 1457 put_pid(info->pid); 1458 create_evict_fence_fail: 1459 mutex_destroy(&info->lock); 1460 mutex_destroy(&info->notifier_lock); 1461 kfree(info); 1462 } 1463 return ret; 1464 } 1465 1466 /** 1467 * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria 1468 * @bo: Handle of buffer object being pinned 1469 * @domain: Domain into which BO should be pinned 1470 * 1471 * - USERPTR BOs are UNPINNABLE and will return error 1472 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their 1473 * PIN count incremented. It is valid to PIN a BO multiple times 1474 * 1475 * Return: ZERO if successful in pinning, Non-Zero in case of error. 1476 */ 1477 static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain) 1478 { 1479 int ret = 0; 1480 1481 ret = amdgpu_bo_reserve(bo, false); 1482 if (unlikely(ret)) 1483 return ret; 1484 1485 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) { 1486 /* 1487 * If bo is not contiguous on VRAM, move to system memory first to ensure 1488 * we can get contiguous VRAM space after evicting other BOs. 1489 */ 1490 if (!(bo->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) { 1491 struct ttm_operation_ctx ctx = { true, false }; 1492 1493 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 1494 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1495 if (unlikely(ret)) { 1496 pr_debug("validate bo 0x%p to GTT failed %d\n", &bo->tbo, ret); 1497 goto out; 1498 } 1499 } 1500 } 1501 1502 ret = amdgpu_bo_pin(bo, domain); 1503 if (ret) 1504 pr_err("Error in Pinning BO to domain: %d\n", domain); 1505 1506 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 1507 out: 1508 amdgpu_bo_unreserve(bo); 1509 return ret; 1510 } 1511 1512 /** 1513 * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria 1514 * @bo: Handle of buffer object being unpinned 1515 * 1516 * - Is a illegal request for USERPTR BOs and is ignored 1517 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their 1518 * PIN count decremented. Calls to UNPIN must balance calls to PIN 1519 */ 1520 static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo) 1521 { 1522 int ret = 0; 1523 1524 ret = amdgpu_bo_reserve(bo, false); 1525 if (unlikely(ret)) 1526 return; 1527 1528 amdgpu_bo_unpin(bo); 1529 amdgpu_bo_unreserve(bo); 1530 } 1531 1532 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, 1533 struct amdgpu_vm *avm, 1534 void **process_info, 1535 struct dma_fence **ef) 1536 { 1537 int ret; 1538 1539 /* Already a compute VM? */ 1540 if (avm->process_info) 1541 return -EINVAL; 1542 1543 /* Convert VM into a compute VM */ 1544 ret = amdgpu_vm_make_compute(adev, avm); 1545 if (ret) 1546 return ret; 1547 1548 /* Initialize KFD part of the VM and process info */ 1549 ret = init_kfd_vm(avm, process_info, ef); 1550 if (ret) 1551 return ret; 1552 1553 amdgpu_vm_set_task_info(avm); 1554 1555 return 0; 1556 } 1557 1558 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 1559 struct amdgpu_vm *vm) 1560 { 1561 struct amdkfd_process_info *process_info = vm->process_info; 1562 1563 if (!process_info) 1564 return; 1565 1566 /* Update process info */ 1567 mutex_lock(&process_info->lock); 1568 process_info->n_vms--; 1569 list_del(&vm->vm_list_node); 1570 mutex_unlock(&process_info->lock); 1571 1572 vm->process_info = NULL; 1573 1574 /* Release per-process resources when last compute VM is destroyed */ 1575 if (!process_info->n_vms) { 1576 WARN_ON(!list_empty(&process_info->kfd_bo_list)); 1577 WARN_ON(!list_empty(&process_info->userptr_valid_list)); 1578 WARN_ON(!list_empty(&process_info->userptr_inval_list)); 1579 1580 dma_fence_put(&process_info->eviction_fence->base); 1581 cancel_delayed_work_sync(&process_info->restore_userptr_work); 1582 put_pid(process_info->pid); 1583 mutex_destroy(&process_info->lock); 1584 mutex_destroy(&process_info->notifier_lock); 1585 kfree(process_info); 1586 } 1587 } 1588 1589 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) 1590 { 1591 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1592 struct amdgpu_bo *pd = avm->root.bo; 1593 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 1594 1595 if (adev->asic_type < CHIP_VEGA10) 1596 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; 1597 return avm->pd_phys_addr; 1598 } 1599 1600 void amdgpu_amdkfd_block_mmu_notifications(void *p) 1601 { 1602 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p; 1603 1604 mutex_lock(&pinfo->lock); 1605 WRITE_ONCE(pinfo->block_mmu_notifications, true); 1606 mutex_unlock(&pinfo->lock); 1607 } 1608 1609 int amdgpu_amdkfd_criu_resume(void *p) 1610 { 1611 int ret = 0; 1612 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p; 1613 1614 mutex_lock(&pinfo->lock); 1615 pr_debug("scheduling work\n"); 1616 mutex_lock(&pinfo->notifier_lock); 1617 pinfo->evicted_bos++; 1618 mutex_unlock(&pinfo->notifier_lock); 1619 if (!READ_ONCE(pinfo->block_mmu_notifications)) { 1620 ret = -EINVAL; 1621 goto out_unlock; 1622 } 1623 WRITE_ONCE(pinfo->block_mmu_notifications, false); 1624 queue_delayed_work(system_freezable_wq, 1625 &pinfo->restore_userptr_work, 0); 1626 1627 out_unlock: 1628 mutex_unlock(&pinfo->lock); 1629 return ret; 1630 } 1631 1632 size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, 1633 uint8_t xcp_id) 1634 { 1635 uint64_t reserved_for_pt = 1636 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); 1637 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1638 uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0); 1639 ssize_t available; 1640 uint64_t vram_available, system_mem_available, ttm_mem_available; 1641 1642 spin_lock(&kfd_mem_limit.mem_limit_lock); 1643 vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id) 1644 - adev->kfd.vram_used_aligned[xcp_id] 1645 - atomic64_read(&adev->vram_pin_size) 1646 - reserved_for_pt 1647 - reserved_for_ras; 1648 1649 if (adev->apu_prefer_gtt) { 1650 system_mem_available = no_system_mem_limit ? 1651 kfd_mem_limit.max_system_mem_limit : 1652 kfd_mem_limit.max_system_mem_limit - 1653 kfd_mem_limit.system_mem_used; 1654 1655 ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit - 1656 kfd_mem_limit.ttm_mem_used; 1657 1658 available = min3(system_mem_available, ttm_mem_available, 1659 vram_available); 1660 available = ALIGN_DOWN(available, PAGE_SIZE); 1661 } else { 1662 available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN); 1663 } 1664 1665 spin_unlock(&kfd_mem_limit.mem_limit_lock); 1666 1667 if (available < 0) 1668 available = 0; 1669 1670 return available; 1671 } 1672 1673 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 1674 struct amdgpu_device *adev, uint64_t va, uint64_t size, 1675 void *drm_priv, struct kgd_mem **mem, 1676 uint64_t *offset, uint32_t flags, bool criu_resume) 1677 { 1678 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1679 struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm); 1680 enum ttm_bo_type bo_type = ttm_bo_type_device; 1681 struct sg_table *sg = NULL; 1682 uint64_t user_addr = 0; 1683 struct amdgpu_bo *bo; 1684 struct drm_gem_object *gobj = NULL; 1685 u32 domain, alloc_domain; 1686 uint64_t aligned_size; 1687 int8_t xcp_id = -1; 1688 u64 alloc_flags; 1689 int ret; 1690 1691 /* 1692 * Check on which domain to allocate BO 1693 */ 1694 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 1695 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; 1696 1697 if (adev->apu_prefer_gtt) { 1698 domain = AMDGPU_GEM_DOMAIN_GTT; 1699 alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1700 alloc_flags = 0; 1701 } else { 1702 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 1703 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? 1704 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; 1705 1706 /* For contiguous VRAM allocation */ 1707 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS) 1708 alloc_flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1709 } 1710 xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? 1711 0 : fpriv->xcp_id; 1712 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 1713 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1714 alloc_flags = 0; 1715 } else { 1716 domain = AMDGPU_GEM_DOMAIN_GTT; 1717 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1718 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE; 1719 1720 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 1721 if (!offset || !*offset) 1722 return -EINVAL; 1723 user_addr = untagged_addr(*offset); 1724 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1725 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1726 bo_type = ttm_bo_type_sg; 1727 if (size > UINT_MAX) 1728 return -EINVAL; 1729 sg = create_sg_table(*offset, size); 1730 if (!sg) 1731 return -ENOMEM; 1732 } else { 1733 return -EINVAL; 1734 } 1735 } 1736 1737 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT) 1738 alloc_flags |= AMDGPU_GEM_CREATE_COHERENT; 1739 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT) 1740 alloc_flags |= AMDGPU_GEM_CREATE_EXT_COHERENT; 1741 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED) 1742 alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED; 1743 1744 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1745 if (!*mem) { 1746 ret = -ENOMEM; 1747 goto err; 1748 } 1749 INIT_LIST_HEAD(&(*mem)->attachments); 1750 mutex_init(&(*mem)->lock); 1751 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); 1752 1753 /* Workaround for AQL queue wraparound bug. Map the same 1754 * memory twice. That means we only actually allocate half 1755 * the memory. 1756 */ 1757 if ((*mem)->aql_queue) 1758 size >>= 1; 1759 aligned_size = PAGE_ALIGN(size); 1760 1761 (*mem)->alloc_flags = flags; 1762 1763 amdgpu_sync_create(&(*mem)->sync); 1764 1765 ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags, 1766 xcp_id); 1767 if (ret) { 1768 pr_debug("Insufficient memory\n"); 1769 goto err_reserve_limit; 1770 } 1771 1772 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n", 1773 va, (*mem)->aql_queue ? size << 1 : size, 1774 domain_string(alloc_domain), xcp_id); 1775 1776 ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags, 1777 bo_type, NULL, &gobj, xcp_id + 1); 1778 if (ret) { 1779 pr_debug("Failed to create BO on domain %s. ret %d\n", 1780 domain_string(alloc_domain), ret); 1781 goto err_bo_create; 1782 } 1783 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv); 1784 if (ret) { 1785 pr_debug("Failed to allow vma node access. ret %d\n", ret); 1786 goto err_node_allow; 1787 } 1788 ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle); 1789 if (ret) 1790 goto err_gem_handle_create; 1791 bo = gem_to_amdgpu_bo(gobj); 1792 if (bo_type == ttm_bo_type_sg) { 1793 bo->tbo.sg = sg; 1794 bo->tbo.ttm->sg = sg; 1795 } 1796 bo->kfd_bo = *mem; 1797 (*mem)->bo = bo; 1798 if (user_addr) 1799 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO; 1800 1801 (*mem)->va = va; 1802 (*mem)->domain = domain; 1803 (*mem)->mapped_to_gpu_memory = 0; 1804 (*mem)->process_info = avm->process_info; 1805 1806 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); 1807 1808 if (user_addr) { 1809 pr_debug("creating userptr BO for user_addr = %llx\n", user_addr); 1810 ret = init_user_pages(*mem, user_addr, criu_resume); 1811 if (ret) 1812 goto allocate_init_user_pages_failed; 1813 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1814 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1815 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT); 1816 if (ret) { 1817 pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n"); 1818 goto err_pin_bo; 1819 } 1820 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 1821 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 1822 } else { 1823 mutex_lock(&avm->process_info->lock); 1824 if (avm->process_info->eviction_fence && 1825 !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) 1826 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain, 1827 &avm->process_info->eviction_fence->base); 1828 mutex_unlock(&avm->process_info->lock); 1829 if (ret) 1830 goto err_validate_bo; 1831 } 1832 1833 if (offset) 1834 *offset = amdgpu_bo_mmap_offset(bo); 1835 1836 return 0; 1837 1838 allocate_init_user_pages_failed: 1839 err_pin_bo: 1840 err_validate_bo: 1841 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 1842 drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle); 1843 err_gem_handle_create: 1844 drm_vma_node_revoke(&gobj->vma_node, drm_priv); 1845 err_node_allow: 1846 /* Don't unreserve system mem limit twice */ 1847 goto err_reserve_limit; 1848 err_bo_create: 1849 amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id); 1850 err_reserve_limit: 1851 amdgpu_sync_free(&(*mem)->sync); 1852 mutex_destroy(&(*mem)->lock); 1853 if (gobj) 1854 drm_gem_object_put(gobj); 1855 else 1856 kfree(*mem); 1857 err: 1858 if (sg) { 1859 sg_free_table(sg); 1860 kfree(sg); 1861 } 1862 return ret; 1863 } 1864 1865 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 1866 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, 1867 uint64_t *size) 1868 { 1869 struct amdkfd_process_info *process_info = mem->process_info; 1870 unsigned long bo_size = mem->bo->tbo.base.size; 1871 bool use_release_notifier = (mem->bo->kfd_bo == mem); 1872 struct kfd_mem_attachment *entry, *tmp; 1873 struct bo_vm_reservation_context ctx; 1874 unsigned int mapped_to_gpu_memory; 1875 int ret; 1876 bool is_imported = false; 1877 1878 mutex_lock(&mem->lock); 1879 1880 /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */ 1881 if (mem->alloc_flags & 1882 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1883 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1884 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo); 1885 } 1886 1887 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; 1888 is_imported = mem->is_imported; 1889 mutex_unlock(&mem->lock); 1890 /* lock is not needed after this, since mem is unused and will 1891 * be freed anyway 1892 */ 1893 1894 if (mapped_to_gpu_memory > 0) { 1895 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", 1896 mem->va, bo_size); 1897 return -EBUSY; 1898 } 1899 1900 /* Make sure restore workers don't access the BO any more */ 1901 mutex_lock(&process_info->lock); 1902 list_del(&mem->validate_list); 1903 mutex_unlock(&process_info->lock); 1904 1905 /* Cleanup user pages and MMU notifiers */ 1906 if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { 1907 amdgpu_hmm_unregister(mem->bo); 1908 mutex_lock(&process_info->notifier_lock); 1909 amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range); 1910 mutex_unlock(&process_info->notifier_lock); 1911 } 1912 1913 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1914 if (unlikely(ret)) 1915 return ret; 1916 1917 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1918 process_info->eviction_fence); 1919 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, 1920 mem->va + bo_size * (1 + mem->aql_queue)); 1921 1922 /* Remove from VM internal data structures */ 1923 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) { 1924 kfd_mem_dmaunmap_attachment(mem, entry); 1925 kfd_mem_detach(entry); 1926 } 1927 1928 ret = unreserve_bo_and_vms(&ctx, false, false); 1929 1930 /* Free the sync object */ 1931 amdgpu_sync_free(&mem->sync); 1932 1933 /* If the SG is not NULL, it's one we created for a doorbell or mmio 1934 * remap BO. We need to free it. 1935 */ 1936 if (mem->bo->tbo.sg) { 1937 sg_free_table(mem->bo->tbo.sg); 1938 kfree(mem->bo->tbo.sg); 1939 } 1940 1941 /* Update the size of the BO being freed if it was allocated from 1942 * VRAM and is not imported. For APP APU VRAM allocations are done 1943 * in GTT domain 1944 */ 1945 if (size) { 1946 if (!is_imported && 1947 (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || 1948 (adev->apu_prefer_gtt && 1949 mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) 1950 *size = bo_size; 1951 else 1952 *size = 0; 1953 } 1954 1955 /* Free the BO*/ 1956 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); 1957 drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle); 1958 if (mem->dmabuf) { 1959 dma_buf_put(mem->dmabuf); 1960 mem->dmabuf = NULL; 1961 } 1962 mutex_destroy(&mem->lock); 1963 1964 /* If this releases the last reference, it will end up calling 1965 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why 1966 * this needs to be the last call here. 1967 */ 1968 drm_gem_object_put(&mem->bo->tbo.base); 1969 1970 /* 1971 * For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(), 1972 * explicitly free it here. 1973 */ 1974 if (!use_release_notifier) 1975 kfree(mem); 1976 1977 return ret; 1978 } 1979 1980 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1981 struct amdgpu_device *adev, struct kgd_mem *mem, 1982 void *drm_priv) 1983 { 1984 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1985 int ret; 1986 struct amdgpu_bo *bo; 1987 uint32_t domain; 1988 struct kfd_mem_attachment *entry; 1989 struct bo_vm_reservation_context ctx; 1990 unsigned long bo_size; 1991 bool is_invalid_userptr = false; 1992 1993 bo = mem->bo; 1994 if (!bo) { 1995 pr_err("Invalid BO when mapping memory to GPU\n"); 1996 return -EINVAL; 1997 } 1998 1999 /* Make sure restore is not running concurrently. Since we 2000 * don't map invalid userptr BOs, we rely on the next restore 2001 * worker to do the mapping 2002 */ 2003 mutex_lock(&mem->process_info->lock); 2004 2005 /* Lock notifier lock. If we find an invalid userptr BO, we can be 2006 * sure that the MMU notifier is no longer running 2007 * concurrently and the queues are actually stopped 2008 */ 2009 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 2010 mutex_lock(&mem->process_info->notifier_lock); 2011 is_invalid_userptr = !!mem->invalid; 2012 mutex_unlock(&mem->process_info->notifier_lock); 2013 } 2014 2015 mutex_lock(&mem->lock); 2016 2017 domain = mem->domain; 2018 bo_size = bo->tbo.base.size; 2019 2020 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", 2021 mem->va, 2022 mem->va + bo_size * (1 + mem->aql_queue), 2023 avm, domain_string(domain)); 2024 2025 if (!kfd_mem_is_attached(avm, mem)) { 2026 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); 2027 if (ret) 2028 goto out; 2029 } 2030 2031 ret = reserve_bo_and_vm(mem, avm, &ctx); 2032 if (unlikely(ret)) 2033 goto out; 2034 2035 /* Userptr can be marked as "not invalid", but not actually be 2036 * validated yet (still in the system domain). In that case 2037 * the queues are still stopped and we can leave mapping for 2038 * the next restore worker 2039 */ 2040 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && 2041 bo->tbo.resource->mem_type == TTM_PL_SYSTEM) 2042 is_invalid_userptr = true; 2043 2044 ret = vm_validate_pt_pd_bos(avm, NULL); 2045 if (unlikely(ret)) 2046 goto out_unreserve; 2047 2048 list_for_each_entry(entry, &mem->attachments, list) { 2049 if (entry->bo_va->base.vm != avm || entry->is_mapped) 2050 continue; 2051 2052 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", 2053 entry->va, entry->va + bo_size, entry); 2054 2055 ret = map_bo_to_gpuvm(mem, entry, ctx.sync, 2056 is_invalid_userptr); 2057 if (ret) { 2058 pr_err("Failed to map bo to gpuvm\n"); 2059 goto out_unreserve; 2060 } 2061 2062 ret = vm_update_pds(avm, ctx.sync); 2063 if (ret) { 2064 pr_err("Failed to update page directories\n"); 2065 goto out_unreserve; 2066 } 2067 2068 entry->is_mapped = true; 2069 mem->mapped_to_gpu_memory++; 2070 pr_debug("\t INC mapping count %d\n", 2071 mem->mapped_to_gpu_memory); 2072 } 2073 2074 ret = unreserve_bo_and_vms(&ctx, false, false); 2075 2076 goto out; 2077 2078 out_unreserve: 2079 unreserve_bo_and_vms(&ctx, false, false); 2080 out: 2081 mutex_unlock(&mem->process_info->lock); 2082 mutex_unlock(&mem->lock); 2083 return ret; 2084 } 2085 2086 int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) 2087 { 2088 struct kfd_mem_attachment *entry; 2089 struct amdgpu_vm *vm; 2090 int ret; 2091 2092 vm = drm_priv_to_vm(drm_priv); 2093 2094 mutex_lock(&mem->lock); 2095 2096 ret = amdgpu_bo_reserve(mem->bo, true); 2097 if (ret) 2098 goto out; 2099 2100 list_for_each_entry(entry, &mem->attachments, list) { 2101 if (entry->bo_va->base.vm != vm) 2102 continue; 2103 if (entry->bo_va->base.bo->tbo.ttm && 2104 !entry->bo_va->base.bo->tbo.ttm->sg) 2105 continue; 2106 2107 kfd_mem_dmaunmap_attachment(mem, entry); 2108 } 2109 2110 amdgpu_bo_unreserve(mem->bo); 2111 out: 2112 mutex_unlock(&mem->lock); 2113 2114 return ret; 2115 } 2116 2117 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 2118 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) 2119 { 2120 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 2121 unsigned long bo_size = mem->bo->tbo.base.size; 2122 struct kfd_mem_attachment *entry; 2123 struct bo_vm_reservation_context ctx; 2124 int ret; 2125 2126 mutex_lock(&mem->lock); 2127 2128 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); 2129 if (unlikely(ret)) 2130 goto out; 2131 /* If no VMs were reserved, it means the BO wasn't actually mapped */ 2132 if (ctx.n_vms == 0) { 2133 ret = -EINVAL; 2134 goto unreserve_out; 2135 } 2136 2137 ret = vm_validate_pt_pd_bos(avm, NULL); 2138 if (unlikely(ret)) 2139 goto unreserve_out; 2140 2141 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", 2142 mem->va, 2143 mem->va + bo_size * (1 + mem->aql_queue), 2144 avm); 2145 2146 list_for_each_entry(entry, &mem->attachments, list) { 2147 if (entry->bo_va->base.vm != avm || !entry->is_mapped) 2148 continue; 2149 2150 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", 2151 entry->va, entry->va + bo_size, entry); 2152 2153 ret = unmap_bo_from_gpuvm(mem, entry, ctx.sync); 2154 if (ret) 2155 goto unreserve_out; 2156 2157 entry->is_mapped = false; 2158 2159 mem->mapped_to_gpu_memory--; 2160 pr_debug("\t DEC mapping count %d\n", 2161 mem->mapped_to_gpu_memory); 2162 } 2163 2164 unreserve_out: 2165 unreserve_bo_and_vms(&ctx, false, false); 2166 out: 2167 mutex_unlock(&mem->lock); 2168 return ret; 2169 } 2170 2171 int amdgpu_amdkfd_gpuvm_sync_memory( 2172 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr) 2173 { 2174 struct amdgpu_sync sync; 2175 int ret; 2176 2177 amdgpu_sync_create(&sync); 2178 2179 mutex_lock(&mem->lock); 2180 amdgpu_sync_clone(&mem->sync, &sync); 2181 mutex_unlock(&mem->lock); 2182 2183 ret = amdgpu_sync_wait(&sync, intr); 2184 amdgpu_sync_free(&sync); 2185 return ret; 2186 } 2187 2188 /** 2189 * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count 2190 * @bo: Buffer object to be mapped 2191 * @bo_gart: Return bo reference 2192 * 2193 * Before return, bo reference count is incremented. To release the reference and unpin/ 2194 * unmap the BO, call amdgpu_amdkfd_free_gtt_mem. 2195 */ 2196 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart) 2197 { 2198 int ret; 2199 2200 ret = amdgpu_bo_reserve(bo, true); 2201 if (ret) { 2202 pr_err("Failed to reserve bo. ret %d\n", ret); 2203 goto err_reserve_bo_failed; 2204 } 2205 2206 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 2207 if (ret) { 2208 pr_err("Failed to pin bo. ret %d\n", ret); 2209 goto err_pin_bo_failed; 2210 } 2211 2212 ret = amdgpu_ttm_alloc_gart(&bo->tbo); 2213 if (ret) { 2214 pr_err("Failed to bind bo to GART. ret %d\n", ret); 2215 goto err_map_bo_gart_failed; 2216 } 2217 2218 amdgpu_amdkfd_remove_eviction_fence( 2219 bo, bo->vm_bo->vm->process_info->eviction_fence); 2220 2221 amdgpu_bo_unreserve(bo); 2222 2223 *bo_gart = amdgpu_bo_ref(bo); 2224 2225 return 0; 2226 2227 err_map_bo_gart_failed: 2228 amdgpu_bo_unpin(bo); 2229 err_pin_bo_failed: 2230 amdgpu_bo_unreserve(bo); 2231 err_reserve_bo_failed: 2232 2233 return ret; 2234 } 2235 2236 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access 2237 * 2238 * @mem: Buffer object to be mapped for CPU access 2239 * @kptr[out]: pointer in kernel CPU address space 2240 * @size[out]: size of the buffer 2241 * 2242 * Pins the BO and maps it for kernel CPU access. The eviction fence is removed 2243 * from the BO, since pinned BOs cannot be evicted. The bo must remain on the 2244 * validate_list, so the GPU mapping can be restored after a page table was 2245 * evicted. 2246 * 2247 * Return: 0 on success, error code on failure 2248 */ 2249 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, 2250 void **kptr, uint64_t *size) 2251 { 2252 int ret; 2253 struct amdgpu_bo *bo = mem->bo; 2254 2255 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 2256 pr_err("userptr can't be mapped to kernel\n"); 2257 return -EINVAL; 2258 } 2259 2260 mutex_lock(&mem->process_info->lock); 2261 2262 ret = amdgpu_bo_reserve(bo, true); 2263 if (ret) { 2264 pr_err("Failed to reserve bo. ret %d\n", ret); 2265 goto bo_reserve_failed; 2266 } 2267 2268 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 2269 if (ret) { 2270 pr_err("Failed to pin bo. ret %d\n", ret); 2271 goto pin_failed; 2272 } 2273 2274 ret = amdgpu_bo_kmap(bo, kptr); 2275 if (ret) { 2276 pr_err("Failed to map bo to kernel. ret %d\n", ret); 2277 goto kmap_failed; 2278 } 2279 2280 amdgpu_amdkfd_remove_eviction_fence( 2281 bo, mem->process_info->eviction_fence); 2282 2283 if (size) 2284 *size = amdgpu_bo_size(bo); 2285 2286 amdgpu_bo_unreserve(bo); 2287 2288 mutex_unlock(&mem->process_info->lock); 2289 return 0; 2290 2291 kmap_failed: 2292 amdgpu_bo_unpin(bo); 2293 pin_failed: 2294 amdgpu_bo_unreserve(bo); 2295 bo_reserve_failed: 2296 mutex_unlock(&mem->process_info->lock); 2297 2298 return ret; 2299 } 2300 2301 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access 2302 * 2303 * @mem: Buffer object to be unmapped for CPU access 2304 * 2305 * Removes the kernel CPU mapping and unpins the BO. It does not restore the 2306 * eviction fence, so this function should only be used for cleanup before the 2307 * BO is destroyed. 2308 */ 2309 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem) 2310 { 2311 struct amdgpu_bo *bo = mem->bo; 2312 2313 (void)amdgpu_bo_reserve(bo, true); 2314 amdgpu_bo_kunmap(bo); 2315 amdgpu_bo_unpin(bo); 2316 amdgpu_bo_unreserve(bo); 2317 } 2318 2319 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, 2320 struct kfd_vm_fault_info *mem) 2321 { 2322 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { 2323 *mem = *adev->gmc.vm_fault_info; 2324 mb(); /* make sure read happened */ 2325 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 2326 } 2327 return 0; 2328 } 2329 2330 static int import_obj_create(struct amdgpu_device *adev, 2331 struct dma_buf *dma_buf, 2332 struct drm_gem_object *obj, 2333 uint64_t va, void *drm_priv, 2334 struct kgd_mem **mem, uint64_t *size, 2335 uint64_t *mmap_offset) 2336 { 2337 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 2338 struct amdgpu_bo *bo; 2339 int ret; 2340 2341 bo = gem_to_amdgpu_bo(obj); 2342 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | 2343 AMDGPU_GEM_DOMAIN_GTT))) 2344 /* Only VRAM and GTT BOs are supported */ 2345 return -EINVAL; 2346 2347 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 2348 if (!*mem) 2349 return -ENOMEM; 2350 2351 ret = drm_vma_node_allow(&obj->vma_node, drm_priv); 2352 if (ret) 2353 goto err_free_mem; 2354 2355 if (size) 2356 *size = amdgpu_bo_size(bo); 2357 2358 if (mmap_offset) 2359 *mmap_offset = amdgpu_bo_mmap_offset(bo); 2360 2361 INIT_LIST_HEAD(&(*mem)->attachments); 2362 mutex_init(&(*mem)->lock); 2363 2364 (*mem)->alloc_flags = 2365 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 2366 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) 2367 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE 2368 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 2369 2370 get_dma_buf(dma_buf); 2371 (*mem)->dmabuf = dma_buf; 2372 (*mem)->bo = bo; 2373 (*mem)->va = va; 2374 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && 2375 !adev->apu_prefer_gtt ? 2376 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; 2377 2378 (*mem)->mapped_to_gpu_memory = 0; 2379 (*mem)->process_info = avm->process_info; 2380 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); 2381 amdgpu_sync_create(&(*mem)->sync); 2382 (*mem)->is_imported = true; 2383 2384 mutex_lock(&avm->process_info->lock); 2385 if (avm->process_info->eviction_fence && 2386 !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) 2387 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain, 2388 &avm->process_info->eviction_fence->base); 2389 mutex_unlock(&avm->process_info->lock); 2390 if (ret) 2391 goto err_remove_mem; 2392 2393 return 0; 2394 2395 err_remove_mem: 2396 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 2397 drm_vma_node_revoke(&obj->vma_node, drm_priv); 2398 err_free_mem: 2399 kfree(*mem); 2400 return ret; 2401 } 2402 2403 int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd, 2404 uint64_t va, void *drm_priv, 2405 struct kgd_mem **mem, uint64_t *size, 2406 uint64_t *mmap_offset) 2407 { 2408 struct drm_gem_object *obj; 2409 uint32_t handle; 2410 int ret; 2411 2412 ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd, 2413 &handle); 2414 if (ret) 2415 return ret; 2416 obj = drm_gem_object_lookup(adev->kfd.client.file, handle); 2417 if (!obj) { 2418 ret = -EINVAL; 2419 goto err_release_handle; 2420 } 2421 2422 ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size, 2423 mmap_offset); 2424 if (ret) 2425 goto err_put_obj; 2426 2427 (*mem)->gem_handle = handle; 2428 2429 return 0; 2430 2431 err_put_obj: 2432 drm_gem_object_put(obj); 2433 err_release_handle: 2434 drm_gem_handle_delete(adev->kfd.client.file, handle); 2435 return ret; 2436 } 2437 2438 int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, 2439 struct dma_buf **dma_buf) 2440 { 2441 int ret; 2442 2443 mutex_lock(&mem->lock); 2444 ret = kfd_mem_export_dmabuf(mem); 2445 if (ret) 2446 goto out; 2447 2448 get_dma_buf(mem->dmabuf); 2449 *dma_buf = mem->dmabuf; 2450 out: 2451 mutex_unlock(&mem->lock); 2452 return ret; 2453 } 2454 2455 /* Evict a userptr BO by stopping the queues if necessary 2456 * 2457 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it 2458 * cannot do any memory allocations, and cannot take any locks that 2459 * are held elsewhere while allocating memory. 2460 * 2461 * It doesn't do anything to the BO itself. The real work happens in 2462 * restore, where we get updated page addresses. This function only 2463 * ensures that GPU access to the BO is stopped. 2464 */ 2465 int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, 2466 unsigned long cur_seq, struct kgd_mem *mem) 2467 { 2468 struct amdkfd_process_info *process_info = mem->process_info; 2469 int r = 0; 2470 2471 /* Do not process MMU notifications during CRIU restore until 2472 * KFD_CRIU_OP_RESUME IOCTL is received 2473 */ 2474 if (READ_ONCE(process_info->block_mmu_notifications)) 2475 return 0; 2476 2477 mutex_lock(&process_info->notifier_lock); 2478 mmu_interval_set_seq(mni, cur_seq); 2479 2480 mem->invalid++; 2481 if (++process_info->evicted_bos == 1) { 2482 /* First eviction, stop the queues */ 2483 r = kgd2kfd_quiesce_mm(mni->mm, 2484 KFD_QUEUE_EVICTION_TRIGGER_USERPTR); 2485 2486 if (r && r != -ESRCH) 2487 pr_err("Failed to quiesce KFD\n"); 2488 2489 if (r != -ESRCH) 2490 queue_delayed_work(system_freezable_wq, 2491 &process_info->restore_userptr_work, 2492 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2493 } 2494 mutex_unlock(&process_info->notifier_lock); 2495 2496 return r; 2497 } 2498 2499 /* Update invalid userptr BOs 2500 * 2501 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to 2502 * userptr_inval_list and updates user pages for all BOs that have 2503 * been invalidated since their last update. 2504 */ 2505 static int update_invalid_user_pages(struct amdkfd_process_info *process_info, 2506 struct mm_struct *mm) 2507 { 2508 struct kgd_mem *mem, *tmp_mem; 2509 struct amdgpu_bo *bo; 2510 struct ttm_operation_ctx ctx = { false, false }; 2511 uint32_t invalid; 2512 int ret = 0; 2513 2514 mutex_lock(&process_info->notifier_lock); 2515 2516 /* Move all invalidated BOs to the userptr_inval_list */ 2517 list_for_each_entry_safe(mem, tmp_mem, 2518 &process_info->userptr_valid_list, 2519 validate_list) 2520 if (mem->invalid) 2521 list_move_tail(&mem->validate_list, 2522 &process_info->userptr_inval_list); 2523 2524 /* Go through userptr_inval_list and update any invalid user_pages */ 2525 list_for_each_entry(mem, &process_info->userptr_inval_list, 2526 validate_list) { 2527 invalid = mem->invalid; 2528 if (!invalid) 2529 /* BO hasn't been invalidated since the last 2530 * revalidation attempt. Keep its page list. 2531 */ 2532 continue; 2533 2534 bo = mem->bo; 2535 2536 amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range); 2537 mem->range = NULL; 2538 2539 /* BO reservations and getting user pages (hmm_range_fault) 2540 * must happen outside the notifier lock 2541 */ 2542 mutex_unlock(&process_info->notifier_lock); 2543 2544 /* Move the BO to system (CPU) domain if necessary to unmap 2545 * and free the SG table 2546 */ 2547 if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) { 2548 if (amdgpu_bo_reserve(bo, true)) 2549 return -EAGAIN; 2550 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 2551 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 2552 amdgpu_bo_unreserve(bo); 2553 if (ret) { 2554 pr_err("%s: Failed to invalidate userptr BO\n", 2555 __func__); 2556 return -EAGAIN; 2557 } 2558 } 2559 2560 /* Get updated user pages */ 2561 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, 2562 &mem->range); 2563 if (ret) { 2564 pr_debug("Failed %d to get user pages\n", ret); 2565 2566 /* Return -EFAULT bad address error as success. It will 2567 * fail later with a VM fault if the GPU tries to access 2568 * it. Better than hanging indefinitely with stalled 2569 * user mode queues. 2570 * 2571 * Return other error -EBUSY or -ENOMEM to retry restore 2572 */ 2573 if (ret != -EFAULT) 2574 return ret; 2575 2576 ret = 0; 2577 } 2578 2579 mutex_lock(&process_info->notifier_lock); 2580 2581 /* Mark the BO as valid unless it was invalidated 2582 * again concurrently. 2583 */ 2584 if (mem->invalid != invalid) { 2585 ret = -EAGAIN; 2586 goto unlock_out; 2587 } 2588 /* set mem valid if mem has hmm range associated */ 2589 if (mem->range) 2590 mem->invalid = 0; 2591 } 2592 2593 unlock_out: 2594 mutex_unlock(&process_info->notifier_lock); 2595 2596 return ret; 2597 } 2598 2599 /* Validate invalid userptr BOs 2600 * 2601 * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables 2602 * with new page addresses and waits for the page table updates to complete. 2603 */ 2604 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) 2605 { 2606 struct ttm_operation_ctx ctx = { false, false }; 2607 struct amdgpu_sync sync; 2608 struct drm_exec exec; 2609 2610 struct amdgpu_vm *peer_vm; 2611 struct kgd_mem *mem, *tmp_mem; 2612 struct amdgpu_bo *bo; 2613 int ret; 2614 2615 amdgpu_sync_create(&sync); 2616 2617 drm_exec_init(&exec, 0, 0); 2618 /* Reserve all BOs and page tables for validation */ 2619 drm_exec_until_all_locked(&exec) { 2620 /* Reserve all the page directories */ 2621 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2622 vm_list_node) { 2623 ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2); 2624 drm_exec_retry_on_contention(&exec); 2625 if (unlikely(ret)) 2626 goto unreserve_out; 2627 } 2628 2629 /* Reserve the userptr_inval_list entries to resv_list */ 2630 list_for_each_entry(mem, &process_info->userptr_inval_list, 2631 validate_list) { 2632 struct drm_gem_object *gobj; 2633 2634 gobj = &mem->bo->tbo.base; 2635 ret = drm_exec_prepare_obj(&exec, gobj, 1); 2636 drm_exec_retry_on_contention(&exec); 2637 if (unlikely(ret)) 2638 goto unreserve_out; 2639 } 2640 } 2641 2642 ret = process_validate_vms(process_info, NULL); 2643 if (ret) 2644 goto unreserve_out; 2645 2646 /* Validate BOs and update GPUVM page tables */ 2647 list_for_each_entry_safe(mem, tmp_mem, 2648 &process_info->userptr_inval_list, 2649 validate_list) { 2650 struct kfd_mem_attachment *attachment; 2651 2652 bo = mem->bo; 2653 2654 /* Validate the BO if we got user pages */ 2655 if (bo->tbo.ttm->pages[0]) { 2656 amdgpu_bo_placement_from_domain(bo, mem->domain); 2657 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 2658 if (ret) { 2659 pr_err("%s: failed to validate BO\n", __func__); 2660 goto unreserve_out; 2661 } 2662 } 2663 2664 /* Update mapping. If the BO was not validated 2665 * (because we couldn't get user pages), this will 2666 * clear the page table entries, which will result in 2667 * VM faults if the GPU tries to access the invalid 2668 * memory. 2669 */ 2670 list_for_each_entry(attachment, &mem->attachments, list) { 2671 if (!attachment->is_mapped) 2672 continue; 2673 2674 kfd_mem_dmaunmap_attachment(mem, attachment); 2675 ret = update_gpuvm_pte(mem, attachment, &sync); 2676 if (ret) { 2677 pr_err("%s: update PTE failed\n", __func__); 2678 /* make sure this gets validated again */ 2679 mutex_lock(&process_info->notifier_lock); 2680 mem->invalid++; 2681 mutex_unlock(&process_info->notifier_lock); 2682 goto unreserve_out; 2683 } 2684 } 2685 } 2686 2687 /* Update page directories */ 2688 ret = process_update_pds(process_info, &sync); 2689 2690 unreserve_out: 2691 drm_exec_fini(&exec); 2692 amdgpu_sync_wait(&sync, false); 2693 amdgpu_sync_free(&sync); 2694 2695 return ret; 2696 } 2697 2698 /* Confirm that all user pages are valid while holding the notifier lock 2699 * 2700 * Moves valid BOs from the userptr_inval_list back to userptr_val_list. 2701 */ 2702 static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info) 2703 { 2704 struct kgd_mem *mem, *tmp_mem; 2705 int ret = 0; 2706 2707 list_for_each_entry_safe(mem, tmp_mem, 2708 &process_info->userptr_inval_list, 2709 validate_list) { 2710 bool valid; 2711 2712 /* keep mem without hmm range at userptr_inval_list */ 2713 if (!mem->range) 2714 continue; 2715 2716 /* Only check mem with hmm range associated */ 2717 valid = amdgpu_ttm_tt_get_user_pages_done( 2718 mem->bo->tbo.ttm, mem->range); 2719 2720 mem->range = NULL; 2721 if (!valid) { 2722 WARN(!mem->invalid, "Invalid BO not marked invalid"); 2723 ret = -EAGAIN; 2724 continue; 2725 } 2726 2727 if (mem->invalid) { 2728 WARN(1, "Valid BO is marked invalid"); 2729 ret = -EAGAIN; 2730 continue; 2731 } 2732 2733 list_move_tail(&mem->validate_list, 2734 &process_info->userptr_valid_list); 2735 } 2736 2737 return ret; 2738 } 2739 2740 /* Worker callback to restore evicted userptr BOs 2741 * 2742 * Tries to update and validate all userptr BOs. If successful and no 2743 * concurrent evictions happened, the queues are restarted. Otherwise, 2744 * reschedule for another attempt later. 2745 */ 2746 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) 2747 { 2748 struct delayed_work *dwork = to_delayed_work(work); 2749 struct amdkfd_process_info *process_info = 2750 container_of(dwork, struct amdkfd_process_info, 2751 restore_userptr_work); 2752 struct task_struct *usertask; 2753 struct mm_struct *mm; 2754 uint32_t evicted_bos; 2755 2756 mutex_lock(&process_info->notifier_lock); 2757 evicted_bos = process_info->evicted_bos; 2758 mutex_unlock(&process_info->notifier_lock); 2759 if (!evicted_bos) 2760 return; 2761 2762 /* Reference task and mm in case of concurrent process termination */ 2763 usertask = get_pid_task(process_info->pid, PIDTYPE_PID); 2764 if (!usertask) 2765 return; 2766 mm = get_task_mm(usertask); 2767 if (!mm) { 2768 put_task_struct(usertask); 2769 return; 2770 } 2771 2772 mutex_lock(&process_info->lock); 2773 2774 if (update_invalid_user_pages(process_info, mm)) 2775 goto unlock_out; 2776 /* userptr_inval_list can be empty if all evicted userptr BOs 2777 * have been freed. In that case there is nothing to validate 2778 * and we can just restart the queues. 2779 */ 2780 if (!list_empty(&process_info->userptr_inval_list)) { 2781 if (validate_invalid_user_pages(process_info)) 2782 goto unlock_out; 2783 } 2784 /* Final check for concurrent evicton and atomic update. If 2785 * another eviction happens after successful update, it will 2786 * be a first eviction that calls quiesce_mm. The eviction 2787 * reference counting inside KFD will handle this case. 2788 */ 2789 mutex_lock(&process_info->notifier_lock); 2790 if (process_info->evicted_bos != evicted_bos) 2791 goto unlock_notifier_out; 2792 2793 if (confirm_valid_user_pages_locked(process_info)) { 2794 WARN(1, "User pages unexpectedly invalid"); 2795 goto unlock_notifier_out; 2796 } 2797 2798 process_info->evicted_bos = evicted_bos = 0; 2799 2800 if (kgd2kfd_resume_mm(mm)) { 2801 pr_err("%s: Failed to resume KFD\n", __func__); 2802 /* No recovery from this failure. Probably the CP is 2803 * hanging. No point trying again. 2804 */ 2805 } 2806 2807 unlock_notifier_out: 2808 mutex_unlock(&process_info->notifier_lock); 2809 unlock_out: 2810 mutex_unlock(&process_info->lock); 2811 2812 /* If validation failed, reschedule another attempt */ 2813 if (evicted_bos) { 2814 queue_delayed_work(system_freezable_wq, 2815 &process_info->restore_userptr_work, 2816 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2817 2818 kfd_smi_event_queue_restore_rescheduled(mm); 2819 } 2820 mmput(mm); 2821 put_task_struct(usertask); 2822 } 2823 2824 static void replace_eviction_fence(struct dma_fence __rcu **ef, 2825 struct dma_fence *new_ef) 2826 { 2827 struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true 2828 /* protected by process_info->lock */); 2829 2830 /* If we're replacing an unsignaled eviction fence, that fence will 2831 * never be signaled, and if anyone is still waiting on that fence, 2832 * they will hang forever. This should never happen. We should only 2833 * replace the fence in restore_work that only gets scheduled after 2834 * eviction work signaled the fence. 2835 */ 2836 WARN_ONCE(!dma_fence_is_signaled(old_ef), 2837 "Replacing unsignaled eviction fence"); 2838 dma_fence_put(old_ef); 2839 } 2840 2841 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given 2842 * KFD process identified by process_info 2843 * 2844 * @process_info: amdkfd_process_info of the KFD process 2845 * 2846 * After memory eviction, restore thread calls this function. The function 2847 * should be called when the Process is still valid. BO restore involves - 2848 * 2849 * 1. Release old eviction fence and create new one 2850 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list. 2851 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of 2852 * BOs that need to be reserved. 2853 * 4. Reserve all the BOs 2854 * 5. Validate of PD and PT BOs. 2855 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence 2856 * 7. Add fence to all PD and PT BOs. 2857 * 8. Unreserve all BOs 2858 */ 2859 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef) 2860 { 2861 struct amdkfd_process_info *process_info = info; 2862 struct amdgpu_vm *peer_vm; 2863 struct kgd_mem *mem; 2864 struct list_head duplicate_save; 2865 struct amdgpu_sync sync_obj; 2866 unsigned long failed_size = 0; 2867 unsigned long total_size = 0; 2868 struct drm_exec exec; 2869 int ret; 2870 2871 INIT_LIST_HEAD(&duplicate_save); 2872 2873 mutex_lock(&process_info->lock); 2874 2875 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 2876 drm_exec_until_all_locked(&exec) { 2877 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2878 vm_list_node) { 2879 ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2); 2880 drm_exec_retry_on_contention(&exec); 2881 if (unlikely(ret)) { 2882 pr_err("Locking VM PD failed, ret: %d\n", ret); 2883 goto ttm_reserve_fail; 2884 } 2885 } 2886 2887 /* Reserve all BOs and page tables/directory. Add all BOs from 2888 * kfd_bo_list to ctx.list 2889 */ 2890 list_for_each_entry(mem, &process_info->kfd_bo_list, 2891 validate_list) { 2892 struct drm_gem_object *gobj; 2893 2894 gobj = &mem->bo->tbo.base; 2895 ret = drm_exec_prepare_obj(&exec, gobj, 1); 2896 drm_exec_retry_on_contention(&exec); 2897 if (unlikely(ret)) { 2898 pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret); 2899 goto ttm_reserve_fail; 2900 } 2901 } 2902 } 2903 2904 amdgpu_sync_create(&sync_obj); 2905 2906 /* Validate BOs managed by KFD */ 2907 list_for_each_entry(mem, &process_info->kfd_bo_list, 2908 validate_list) { 2909 2910 struct amdgpu_bo *bo = mem->bo; 2911 uint32_t domain = mem->domain; 2912 struct dma_resv_iter cursor; 2913 struct dma_fence *fence; 2914 2915 total_size += amdgpu_bo_size(bo); 2916 2917 ret = amdgpu_amdkfd_bo_validate(bo, domain, false); 2918 if (ret) { 2919 pr_debug("Memory eviction: Validate BOs failed\n"); 2920 failed_size += amdgpu_bo_size(bo); 2921 ret = amdgpu_amdkfd_bo_validate(bo, 2922 AMDGPU_GEM_DOMAIN_GTT, false); 2923 if (ret) { 2924 pr_debug("Memory eviction: Try again\n"); 2925 goto validate_map_fail; 2926 } 2927 } 2928 dma_resv_for_each_fence(&cursor, bo->tbo.base.resv, 2929 DMA_RESV_USAGE_KERNEL, fence) { 2930 ret = amdgpu_sync_fence(&sync_obj, fence); 2931 if (ret) { 2932 pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); 2933 goto validate_map_fail; 2934 } 2935 } 2936 } 2937 2938 if (failed_size) 2939 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); 2940 2941 /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO 2942 * validations above would invalidate DMABuf imports again. 2943 */ 2944 ret = process_validate_vms(process_info, &exec.ticket); 2945 if (ret) { 2946 pr_debug("Validating VMs failed, ret: %d\n", ret); 2947 goto validate_map_fail; 2948 } 2949 2950 /* Update mappings managed by KFD. */ 2951 list_for_each_entry(mem, &process_info->kfd_bo_list, 2952 validate_list) { 2953 struct kfd_mem_attachment *attachment; 2954 2955 list_for_each_entry(attachment, &mem->attachments, list) { 2956 if (!attachment->is_mapped) 2957 continue; 2958 2959 kfd_mem_dmaunmap_attachment(mem, attachment); 2960 ret = update_gpuvm_pte(mem, attachment, &sync_obj); 2961 if (ret) { 2962 pr_debug("Memory eviction: update PTE failed. Try again\n"); 2963 goto validate_map_fail; 2964 } 2965 } 2966 } 2967 2968 /* Update mappings not managed by KFD */ 2969 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2970 vm_list_node) { 2971 struct amdgpu_device *adev = amdgpu_ttm_adev( 2972 peer_vm->root.bo->tbo.bdev); 2973 2974 ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket); 2975 if (ret) { 2976 pr_debug("Memory eviction: handle moved failed. Try again\n"); 2977 goto validate_map_fail; 2978 } 2979 } 2980 2981 /* Update page directories */ 2982 ret = process_update_pds(process_info, &sync_obj); 2983 if (ret) { 2984 pr_debug("Memory eviction: update PDs failed. Try again\n"); 2985 goto validate_map_fail; 2986 } 2987 2988 /* Sync with fences on all the page tables. They implicitly depend on any 2989 * move fences from amdgpu_vm_handle_moved above. 2990 */ 2991 ret = process_sync_pds_resv(process_info, &sync_obj); 2992 if (ret) { 2993 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); 2994 goto validate_map_fail; 2995 } 2996 2997 /* Wait for validate and PT updates to finish */ 2998 amdgpu_sync_wait(&sync_obj, false); 2999 3000 /* The old eviction fence may be unsignaled if restore happens 3001 * after a GPU reset or suspend/resume. Keep the old fence in that 3002 * case. Otherwise release the old eviction fence and create new 3003 * one, because fence only goes from unsignaled to signaled once 3004 * and cannot be reused. Use context and mm from the old fence. 3005 * 3006 * If an old eviction fence signals after this check, that's OK. 3007 * Anyone signaling an eviction fence must stop the queues first 3008 * and schedule another restore worker. 3009 */ 3010 if (dma_fence_is_signaled(&process_info->eviction_fence->base)) { 3011 struct amdgpu_amdkfd_fence *new_fence = 3012 amdgpu_amdkfd_fence_create( 3013 process_info->eviction_fence->base.context, 3014 process_info->eviction_fence->mm, 3015 NULL); 3016 3017 if (!new_fence) { 3018 pr_err("Failed to create eviction fence\n"); 3019 ret = -ENOMEM; 3020 goto validate_map_fail; 3021 } 3022 dma_fence_put(&process_info->eviction_fence->base); 3023 process_info->eviction_fence = new_fence; 3024 replace_eviction_fence(ef, dma_fence_get(&new_fence->base)); 3025 } else { 3026 WARN_ONCE(*ef != &process_info->eviction_fence->base, 3027 "KFD eviction fence doesn't match KGD process_info"); 3028 } 3029 3030 /* Attach new eviction fence to all BOs except pinned ones */ 3031 list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { 3032 if (mem->bo->tbo.pin_count) 3033 continue; 3034 3035 dma_resv_add_fence(mem->bo->tbo.base.resv, 3036 &process_info->eviction_fence->base, 3037 DMA_RESV_USAGE_BOOKKEEP); 3038 } 3039 /* Attach eviction fence to PD / PT BOs and DMABuf imports */ 3040 list_for_each_entry(peer_vm, &process_info->vm_list_head, 3041 vm_list_node) { 3042 struct amdgpu_bo *bo = peer_vm->root.bo; 3043 3044 dma_resv_add_fence(bo->tbo.base.resv, 3045 &process_info->eviction_fence->base, 3046 DMA_RESV_USAGE_BOOKKEEP); 3047 } 3048 3049 validate_map_fail: 3050 amdgpu_sync_free(&sync_obj); 3051 ttm_reserve_fail: 3052 drm_exec_fini(&exec); 3053 mutex_unlock(&process_info->lock); 3054 return ret; 3055 } 3056 3057 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) 3058 { 3059 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 3060 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws; 3061 int ret; 3062 3063 if (!info || !gws) 3064 return -EINVAL; 3065 3066 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 3067 if (!*mem) 3068 return -ENOMEM; 3069 3070 mutex_init(&(*mem)->lock); 3071 INIT_LIST_HEAD(&(*mem)->attachments); 3072 (*mem)->bo = amdgpu_bo_ref(gws_bo); 3073 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; 3074 (*mem)->process_info = process_info; 3075 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); 3076 amdgpu_sync_create(&(*mem)->sync); 3077 3078 3079 /* Validate gws bo the first time it is added to process */ 3080 mutex_lock(&(*mem)->process_info->lock); 3081 ret = amdgpu_bo_reserve(gws_bo, false); 3082 if (unlikely(ret)) { 3083 pr_err("Reserve gws bo failed %d\n", ret); 3084 goto bo_reservation_failure; 3085 } 3086 3087 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true); 3088 if (ret) { 3089 pr_err("GWS BO validate failed %d\n", ret); 3090 goto bo_validation_failure; 3091 } 3092 /* GWS resource is shared b/t amdgpu and amdkfd 3093 * Add process eviction fence to bo so they can 3094 * evict each other. 3095 */ 3096 ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1); 3097 if (ret) 3098 goto reserve_shared_fail; 3099 dma_resv_add_fence(gws_bo->tbo.base.resv, 3100 &process_info->eviction_fence->base, 3101 DMA_RESV_USAGE_BOOKKEEP); 3102 amdgpu_bo_unreserve(gws_bo); 3103 mutex_unlock(&(*mem)->process_info->lock); 3104 3105 return ret; 3106 3107 reserve_shared_fail: 3108 bo_validation_failure: 3109 amdgpu_bo_unreserve(gws_bo); 3110 bo_reservation_failure: 3111 mutex_unlock(&(*mem)->process_info->lock); 3112 amdgpu_sync_free(&(*mem)->sync); 3113 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); 3114 amdgpu_bo_unref(&gws_bo); 3115 mutex_destroy(&(*mem)->lock); 3116 kfree(*mem); 3117 *mem = NULL; 3118 return ret; 3119 } 3120 3121 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) 3122 { 3123 int ret; 3124 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 3125 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; 3126 struct amdgpu_bo *gws_bo = kgd_mem->bo; 3127 3128 /* Remove BO from process's validate list so restore worker won't touch 3129 * it anymore 3130 */ 3131 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info); 3132 3133 ret = amdgpu_bo_reserve(gws_bo, false); 3134 if (unlikely(ret)) { 3135 pr_err("Reserve gws bo failed %d\n", ret); 3136 //TODO add BO back to validate_list? 3137 return ret; 3138 } 3139 amdgpu_amdkfd_remove_eviction_fence(gws_bo, 3140 process_info->eviction_fence); 3141 amdgpu_bo_unreserve(gws_bo); 3142 amdgpu_sync_free(&kgd_mem->sync); 3143 amdgpu_bo_unref(&gws_bo); 3144 mutex_destroy(&kgd_mem->lock); 3145 kfree(mem); 3146 return 0; 3147 } 3148 3149 /* Returns GPU-specific tiling mode information */ 3150 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, 3151 struct tile_config *config) 3152 { 3153 config->gb_addr_config = adev->gfx.config.gb_addr_config; 3154 config->tile_config_ptr = adev->gfx.config.tile_mode_array; 3155 config->num_tile_configs = 3156 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 3157 config->macro_tile_config_ptr = 3158 adev->gfx.config.macrotile_mode_array; 3159 config->num_macro_tile_configs = 3160 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 3161 3162 /* Those values are not set from GFX9 onwards */ 3163 config->num_banks = adev->gfx.config.num_banks; 3164 config->num_ranks = adev->gfx.config.num_ranks; 3165 3166 return 0; 3167 } 3168 3169 bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem) 3170 { 3171 struct amdgpu_vm *vm = drm_priv_to_vm(drm_priv); 3172 struct kfd_mem_attachment *entry; 3173 3174 list_for_each_entry(entry, &mem->attachments, list) { 3175 if (entry->is_mapped && entry->bo_va->base.vm == vm) 3176 return true; 3177 } 3178 return false; 3179 } 3180 3181 #if defined(CONFIG_DEBUG_FS) 3182 3183 int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data) 3184 { 3185 3186 spin_lock(&kfd_mem_limit.mem_limit_lock); 3187 seq_printf(m, "System mem used %lldM out of %lluM\n", 3188 (kfd_mem_limit.system_mem_used >> 20), 3189 (kfd_mem_limit.max_system_mem_limit >> 20)); 3190 seq_printf(m, "TTM mem used %lldM out of %lluM\n", 3191 (kfd_mem_limit.ttm_mem_used >> 20), 3192 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 3193 spin_unlock(&kfd_mem_limit.mem_limit_lock); 3194 3195 return 0; 3196 } 3197 3198 #endif 3199