1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 */ 32 #include <linux/list.h> 33 #include <linux/slab.h> 34 #include <linux/dma-buf.h> 35 #include <linux/export.h> 36 37 #include <drm/drm_drv.h> 38 #include <drm/amdgpu_drm.h> 39 #include <drm/drm_cache.h> 40 #include "amdgpu.h" 41 #include "amdgpu_trace.h" 42 #include "amdgpu_amdkfd.h" 43 #include "amdgpu_vram_mgr.h" 44 #include "amdgpu_vm.h" 45 #include "amdgpu_dma_buf.h" 46 47 /** 48 * DOC: amdgpu_object 49 * 50 * This defines the interfaces to operate on an &amdgpu_bo buffer object which 51 * represents memory used by driver (VRAM, system memory, etc.). The driver 52 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces 53 * to create/destroy/set buffer object which are then managed by the kernel TTM 54 * memory manager. 55 * The interfaces are also used internally by kernel clients, including gfx, 56 * uvd, etc. for kernel managed allocations used by the GPU. 57 * 58 */ 59 60 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) 61 { 62 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 63 64 amdgpu_bo_kunmap(bo); 65 66 if (drm_gem_is_imported(&bo->tbo.base)) 67 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); 68 drm_gem_object_release(&bo->tbo.base); 69 amdgpu_bo_unref(&bo->parent); 70 kvfree(bo); 71 } 72 73 static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) 74 { 75 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 76 struct amdgpu_bo_user *ubo; 77 78 ubo = to_amdgpu_bo_user(bo); 79 kfree(ubo->metadata); 80 amdgpu_bo_destroy(tbo); 81 } 82 83 /** 84 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo 85 * @bo: buffer object to be checked 86 * 87 * Uses destroy function associated with the object to determine if this is 88 * an &amdgpu_bo. 89 * 90 * Returns: 91 * true if the object belongs to &amdgpu_bo, false if not. 92 */ 93 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) 94 { 95 if (bo->destroy == &amdgpu_bo_destroy || 96 bo->destroy == &amdgpu_bo_user_destroy) 97 return true; 98 99 return false; 100 } 101 102 /** 103 * amdgpu_bo_placement_from_domain - set buffer's placement 104 * @abo: &amdgpu_bo buffer object whose placement is to be set 105 * @domain: requested domain 106 * 107 * Sets buffer's placement according to requested domain and the buffer's 108 * flags. 109 */ 110 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) 111 { 112 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 113 struct ttm_placement *placement = &abo->placement; 114 struct ttm_place *places = abo->placements; 115 u64 flags = abo->flags; 116 u32 c = 0; 117 118 if (domain & AMDGPU_GEM_DOMAIN_VRAM) { 119 unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; 120 int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); 121 122 if (adev->gmc.mem_partitions && mem_id >= 0) { 123 places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn; 124 /* 125 * memory partition range lpfn is inclusive start + size - 1 126 * TTM place lpfn is exclusive start + size 127 */ 128 places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1; 129 } else { 130 places[c].fpfn = 0; 131 places[c].lpfn = 0; 132 } 133 places[c].mem_type = TTM_PL_VRAM; 134 places[c].flags = 0; 135 136 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) 137 places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn); 138 else 139 places[c].flags |= TTM_PL_FLAG_TOPDOWN; 140 141 if (abo->tbo.type == ttm_bo_type_kernel && 142 flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) 143 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS; 144 145 c++; 146 } 147 148 if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) { 149 places[c].fpfn = 0; 150 places[c].lpfn = 0; 151 places[c].mem_type = AMDGPU_PL_DOORBELL; 152 places[c].flags = 0; 153 c++; 154 } 155 156 if (domain & AMDGPU_GEM_DOMAIN_GTT) { 157 places[c].fpfn = 0; 158 places[c].lpfn = 0; 159 places[c].mem_type = 160 abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ? 161 AMDGPU_PL_PREEMPT : TTM_PL_TT; 162 places[c].flags = 0; 163 /* 164 * When GTT is just an alternative to VRAM make sure that we 165 * only use it as fallback and still try to fill up VRAM first. 166 */ 167 if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) && 168 domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) 169 places[c].flags |= TTM_PL_FLAG_FALLBACK; 170 c++; 171 } 172 173 if (domain & AMDGPU_GEM_DOMAIN_CPU) { 174 places[c].fpfn = 0; 175 places[c].lpfn = 0; 176 places[c].mem_type = TTM_PL_SYSTEM; 177 places[c].flags = 0; 178 c++; 179 } 180 181 if (domain & AMDGPU_GEM_DOMAIN_GDS) { 182 places[c].fpfn = 0; 183 places[c].lpfn = 0; 184 places[c].mem_type = AMDGPU_PL_GDS; 185 places[c].flags = 0; 186 c++; 187 } 188 189 if (domain & AMDGPU_GEM_DOMAIN_GWS) { 190 places[c].fpfn = 0; 191 places[c].lpfn = 0; 192 places[c].mem_type = AMDGPU_PL_GWS; 193 places[c].flags = 0; 194 c++; 195 } 196 197 if (domain & AMDGPU_GEM_DOMAIN_OA) { 198 places[c].fpfn = 0; 199 places[c].lpfn = 0; 200 places[c].mem_type = AMDGPU_PL_OA; 201 places[c].flags = 0; 202 c++; 203 } 204 205 if (!c) { 206 places[c].fpfn = 0; 207 places[c].lpfn = 0; 208 places[c].mem_type = TTM_PL_SYSTEM; 209 places[c].flags = 0; 210 c++; 211 } 212 213 BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS); 214 215 placement->num_placement = c; 216 placement->placement = places; 217 } 218 219 /** 220 * amdgpu_bo_create_reserved - create reserved BO for kernel use 221 * 222 * @adev: amdgpu device object 223 * @size: size for the new BO 224 * @align: alignment for the new BO 225 * @domain: where to place it 226 * @bo_ptr: used to initialize BOs in structures 227 * @gpu_addr: GPU addr of the pinned BO 228 * @cpu_addr: optional CPU address mapping 229 * 230 * Allocates and pins a BO for kernel internal use, and returns it still 231 * reserved. 232 * 233 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. 234 * 235 * Returns: 236 * 0 on success, negative error code otherwise. 237 */ 238 int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 239 unsigned long size, int align, 240 u32 domain, struct amdgpu_bo **bo_ptr, 241 u64 *gpu_addr, void **cpu_addr) 242 { 243 struct amdgpu_bo_param bp; 244 bool free = false; 245 int r; 246 247 if (!size) { 248 amdgpu_bo_unref(bo_ptr); 249 return 0; 250 } 251 252 memset(&bp, 0, sizeof(bp)); 253 bp.size = size; 254 bp.byte_align = align; 255 bp.domain = domain; 256 bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED 257 : AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 258 bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 259 bp.type = ttm_bo_type_kernel; 260 bp.resv = NULL; 261 bp.bo_ptr_size = sizeof(struct amdgpu_bo); 262 263 if (!*bo_ptr) { 264 r = amdgpu_bo_create(adev, &bp, bo_ptr); 265 if (r) { 266 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", 267 r); 268 return r; 269 } 270 free = true; 271 } 272 273 r = amdgpu_bo_reserve(*bo_ptr, false); 274 if (r) { 275 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r); 276 goto error_free; 277 } 278 279 r = amdgpu_bo_pin(*bo_ptr, domain); 280 if (r) { 281 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r); 282 goto error_unreserve; 283 } 284 285 r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo); 286 if (r) { 287 dev_err(adev->dev, "%p bind failed\n", *bo_ptr); 288 goto error_unpin; 289 } 290 291 if (gpu_addr) 292 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr); 293 294 if (cpu_addr) { 295 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); 296 if (r) { 297 dev_err(adev->dev, "(%d) kernel bo map failed\n", r); 298 goto error_unpin; 299 } 300 } 301 302 return 0; 303 304 error_unpin: 305 amdgpu_bo_unpin(*bo_ptr); 306 error_unreserve: 307 amdgpu_bo_unreserve(*bo_ptr); 308 309 error_free: 310 if (free) 311 amdgpu_bo_unref(bo_ptr); 312 313 return r; 314 } 315 316 /** 317 * amdgpu_bo_create_kernel - create BO for kernel use 318 * 319 * @adev: amdgpu device object 320 * @size: size for the new BO 321 * @align: alignment for the new BO 322 * @domain: where to place it 323 * @bo_ptr: used to initialize BOs in structures 324 * @gpu_addr: GPU addr of the pinned BO 325 * @cpu_addr: optional CPU address mapping 326 * 327 * Allocates and pins a BO for kernel internal use. 328 * 329 * This function is exported to allow the V4L2 isp device 330 * external to drm device to create and access the kernel BO. 331 * 332 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. 333 * 334 * Returns: 335 * 0 on success, negative error code otherwise. 336 */ 337 int amdgpu_bo_create_kernel(struct amdgpu_device *adev, 338 unsigned long size, int align, 339 u32 domain, struct amdgpu_bo **bo_ptr, 340 u64 *gpu_addr, void **cpu_addr) 341 { 342 int r; 343 344 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr, 345 gpu_addr, cpu_addr); 346 347 if (r) 348 return r; 349 350 if (*bo_ptr) 351 amdgpu_bo_unreserve(*bo_ptr); 352 353 return 0; 354 } 355 356 /** 357 * amdgpu_bo_create_isp_user - create user BO for isp 358 * 359 * @adev: amdgpu device object 360 * @dma_buf: DMABUF handle for isp buffer 361 * @domain: where to place it 362 * @bo: used to initialize BOs in structures 363 * @gpu_addr: GPU addr of the pinned BO 364 * 365 * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does 366 * GART alloc to generate gpu_addr for BO to make it accessible through the 367 * GART aperture for ISP HW. 368 * 369 * This function is exported to allow the V4L2 isp device external to drm device 370 * to create and access the isp user BO. 371 * 372 * Returns: 373 * 0 on success, negative error code otherwise. 374 */ 375 int amdgpu_bo_create_isp_user(struct amdgpu_device *adev, 376 struct dma_buf *dma_buf, u32 domain, struct amdgpu_bo **bo, 377 u64 *gpu_addr) 378 379 { 380 struct drm_gem_object *gem_obj; 381 int r; 382 383 gem_obj = amdgpu_gem_prime_import(&adev->ddev, dma_buf); 384 *bo = gem_to_amdgpu_bo(gem_obj); 385 if (!(*bo)) { 386 dev_err(adev->dev, "failed to get valid isp user bo\n"); 387 return -EINVAL; 388 } 389 390 r = amdgpu_bo_reserve(*bo, false); 391 if (r) { 392 dev_err(adev->dev, "(%d) failed to reserve isp user bo\n", r); 393 return r; 394 } 395 396 r = amdgpu_bo_pin(*bo, domain); 397 if (r) { 398 dev_err(adev->dev, "(%d) isp user bo pin failed\n", r); 399 goto error_unreserve; 400 } 401 402 r = amdgpu_ttm_alloc_gart(&(*bo)->tbo); 403 if (r) { 404 dev_err(adev->dev, "%p bind failed\n", *bo); 405 goto error_unpin; 406 } 407 408 if (!WARN_ON(!gpu_addr)) 409 *gpu_addr = amdgpu_bo_gpu_offset(*bo); 410 411 amdgpu_bo_unreserve(*bo); 412 413 return 0; 414 415 error_unpin: 416 amdgpu_bo_unpin(*bo); 417 error_unreserve: 418 amdgpu_bo_unreserve(*bo); 419 amdgpu_bo_unref(bo); 420 421 return r; 422 } 423 424 /** 425 * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location 426 * 427 * @adev: amdgpu device object 428 * @offset: offset of the BO 429 * @size: size of the BO 430 * @bo_ptr: used to initialize BOs in structures 431 * @cpu_addr: optional CPU address mapping 432 * 433 * Creates a kernel BO at a specific offset in VRAM. 434 * 435 * Returns: 436 * 0 on success, negative error code otherwise. 437 */ 438 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, 439 uint64_t offset, uint64_t size, 440 struct amdgpu_bo **bo_ptr, void **cpu_addr) 441 { 442 struct ttm_operation_ctx ctx = { false, false }; 443 unsigned int i; 444 int r; 445 446 offset &= PAGE_MASK; 447 size = ALIGN(size, PAGE_SIZE); 448 449 r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, 450 AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL, 451 cpu_addr); 452 if (r) 453 return r; 454 455 if ((*bo_ptr) == NULL) 456 return 0; 457 458 /* 459 * Remove the original mem node and create a new one at the request 460 * position. 461 */ 462 if (cpu_addr) 463 amdgpu_bo_kunmap(*bo_ptr); 464 465 ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource); 466 467 for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { 468 (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; 469 (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; 470 } 471 r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, 472 &(*bo_ptr)->tbo.resource, &ctx); 473 if (r) 474 goto error; 475 476 if (cpu_addr) { 477 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); 478 if (r) 479 goto error; 480 } 481 482 amdgpu_bo_unreserve(*bo_ptr); 483 return 0; 484 485 error: 486 amdgpu_bo_unreserve(*bo_ptr); 487 amdgpu_bo_unref(bo_ptr); 488 return r; 489 } 490 491 /** 492 * amdgpu_bo_free_kernel - free BO for kernel use 493 * 494 * @bo: amdgpu BO to free 495 * @gpu_addr: pointer to where the BO's GPU memory space address was stored 496 * @cpu_addr: pointer to where the BO's CPU memory space address was stored 497 * 498 * unmaps and unpin a BO for kernel internal use. 499 * 500 * This function is exported to allow the V4L2 isp device 501 * external to drm device to free the kernel BO. 502 */ 503 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, 504 void **cpu_addr) 505 { 506 if (*bo == NULL) 507 return; 508 509 WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend); 510 511 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) { 512 if (cpu_addr) 513 amdgpu_bo_kunmap(*bo); 514 515 amdgpu_bo_unpin(*bo); 516 amdgpu_bo_unreserve(*bo); 517 } 518 amdgpu_bo_unref(bo); 519 520 if (gpu_addr) 521 *gpu_addr = 0; 522 523 if (cpu_addr) 524 *cpu_addr = NULL; 525 } 526 527 /** 528 * amdgpu_bo_free_isp_user - free BO for isp use 529 * 530 * @bo: amdgpu isp user BO to free 531 * 532 * unpin and unref BO for isp internal use. 533 * 534 * This function is exported to allow the V4L2 isp device 535 * external to drm device to free the isp user BO. 536 */ 537 void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo) 538 { 539 if (bo == NULL) 540 return; 541 542 if (amdgpu_bo_reserve(bo, true) == 0) { 543 amdgpu_bo_unpin(bo); 544 amdgpu_bo_unreserve(bo); 545 } 546 amdgpu_bo_unref(&bo); 547 } 548 549 /* Validate bo size is bit bigger than the request domain */ 550 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, 551 unsigned long size, u32 domain) 552 { 553 struct ttm_resource_manager *man = NULL; 554 555 /* 556 * If GTT is part of requested domains the check must succeed to 557 * allow fall back to GTT. 558 */ 559 if (domain & AMDGPU_GEM_DOMAIN_GTT) 560 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); 561 else if (domain & AMDGPU_GEM_DOMAIN_VRAM) 562 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 563 else 564 return true; 565 566 if (!man) { 567 if (domain & AMDGPU_GEM_DOMAIN_GTT) 568 WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized"); 569 return false; 570 } 571 572 /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */ 573 if (size < man->size) 574 return true; 575 576 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, man->size); 577 return false; 578 } 579 580 bool amdgpu_bo_support_uswc(u64 bo_flags) 581 { 582 583 #ifdef CONFIG_X86_32 584 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 585 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 586 */ 587 return false; 588 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) 589 /* Don't try to enable write-combining when it can't work, or things 590 * may be slow 591 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 592 */ 593 594 #ifndef CONFIG_COMPILE_TEST 595 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ 596 thanks to write-combining 597 #endif 598 599 if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) 600 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 601 "better performance thanks to write-combining\n"); 602 return false; 603 #else 604 /* For architectures that don't support WC memory, 605 * mask out the WC flag from the BO 606 */ 607 if (!drm_arch_can_wc_memory()) 608 return false; 609 610 return true; 611 #endif 612 } 613 614 /** 615 * amdgpu_bo_create - create an &amdgpu_bo buffer object 616 * @adev: amdgpu device object 617 * @bp: parameters to be used for the buffer object 618 * @bo_ptr: pointer to the buffer object pointer 619 * 620 * Creates an &amdgpu_bo buffer object. 621 * 622 * Returns: 623 * 0 for success or a negative error code on failure. 624 */ 625 int amdgpu_bo_create(struct amdgpu_device *adev, 626 struct amdgpu_bo_param *bp, 627 struct amdgpu_bo **bo_ptr) 628 { 629 struct ttm_operation_ctx ctx = { 630 .interruptible = (bp->type != ttm_bo_type_kernel), 631 .no_wait_gpu = bp->no_wait_gpu, 632 /* We opt to avoid OOM on system pages allocations */ 633 .gfp_retry_mayfail = true, 634 .allow_res_evict = bp->type != ttm_bo_type_kernel, 635 .resv = bp->resv 636 }; 637 struct amdgpu_bo *bo; 638 unsigned long page_align, size = bp->size; 639 int r; 640 641 /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */ 642 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 643 /* GWS and OA don't need any alignment. */ 644 page_align = bp->byte_align; 645 size <<= PAGE_SHIFT; 646 647 } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) { 648 /* Both size and alignment must be a multiple of 4. */ 649 page_align = ALIGN(bp->byte_align, 4); 650 size = ALIGN(size, 4) << PAGE_SHIFT; 651 } else { 652 /* Memory should be aligned at least to a page size. */ 653 page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT; 654 size = ALIGN(size, PAGE_SIZE); 655 } 656 657 if (!amdgpu_bo_validate_size(adev, size, bp->domain)) 658 return -ENOMEM; 659 660 BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo)); 661 662 *bo_ptr = NULL; 663 bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL); 664 if (bo == NULL) 665 return -ENOMEM; 666 drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size); 667 bo->tbo.base.funcs = &amdgpu_gem_object_funcs; 668 bo->vm_bo = NULL; 669 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : 670 bp->domain; 671 bo->allowed_domains = bo->preferred_domains; 672 if (bp->type != ttm_bo_type_kernel && 673 !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) && 674 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 675 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 676 677 bo->flags = bp->flags; 678 679 if (adev->gmc.mem_partitions) 680 /* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */ 681 bo->xcp_id = bp->xcp_id_plus1 - 1; 682 else 683 /* For GPUs without spatial partitioning */ 684 bo->xcp_id = 0; 685 686 if (!amdgpu_bo_support_uswc(bo->flags)) 687 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; 688 689 bo->tbo.bdev = &adev->mman.bdev; 690 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA | 691 AMDGPU_GEM_DOMAIN_GDS)) 692 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 693 else 694 amdgpu_bo_placement_from_domain(bo, bp->domain); 695 if (bp->type == ttm_bo_type_kernel) 696 bo->tbo.priority = 2; 697 else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE)) 698 bo->tbo.priority = 1; 699 700 if (!bp->destroy) 701 bp->destroy = &amdgpu_bo_destroy; 702 703 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type, 704 &bo->placement, page_align, &ctx, NULL, 705 bp->resv, bp->destroy); 706 if (unlikely(r != 0)) 707 return r; 708 709 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 710 amdgpu_res_cpu_visible(adev, bo->tbo.resource)) 711 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 712 ctx.bytes_moved); 713 else 714 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); 715 716 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && 717 bo->tbo.resource->mem_type == TTM_PL_VRAM) { 718 struct dma_fence *fence; 719 720 r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence); 721 if (unlikely(r)) 722 goto fail_unreserve; 723 724 dma_resv_add_fence(bo->tbo.base.resv, fence, 725 DMA_RESV_USAGE_KERNEL); 726 dma_fence_put(fence); 727 } 728 if (!bp->resv) 729 amdgpu_bo_unreserve(bo); 730 *bo_ptr = bo; 731 732 trace_amdgpu_bo_create(bo); 733 734 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ 735 if (bp->type == ttm_bo_type_device) 736 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 737 738 return 0; 739 740 fail_unreserve: 741 if (!bp->resv) 742 dma_resv_unlock(bo->tbo.base.resv); 743 amdgpu_bo_unref(&bo); 744 return r; 745 } 746 747 /** 748 * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object 749 * @adev: amdgpu device object 750 * @bp: parameters to be used for the buffer object 751 * @ubo_ptr: pointer to the buffer object pointer 752 * 753 * Create a BO to be used by user application; 754 * 755 * Returns: 756 * 0 for success or a negative error code on failure. 757 */ 758 759 int amdgpu_bo_create_user(struct amdgpu_device *adev, 760 struct amdgpu_bo_param *bp, 761 struct amdgpu_bo_user **ubo_ptr) 762 { 763 struct amdgpu_bo *bo_ptr; 764 int r; 765 766 bp->bo_ptr_size = sizeof(struct amdgpu_bo_user); 767 bp->destroy = &amdgpu_bo_user_destroy; 768 r = amdgpu_bo_create(adev, bp, &bo_ptr); 769 if (r) 770 return r; 771 772 *ubo_ptr = to_amdgpu_bo_user(bo_ptr); 773 return r; 774 } 775 776 /** 777 * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object 778 * @adev: amdgpu device object 779 * @bp: parameters to be used for the buffer object 780 * @vmbo_ptr: pointer to the buffer object pointer 781 * 782 * Create a BO to be for GPUVM. 783 * 784 * Returns: 785 * 0 for success or a negative error code on failure. 786 */ 787 788 int amdgpu_bo_create_vm(struct amdgpu_device *adev, 789 struct amdgpu_bo_param *bp, 790 struct amdgpu_bo_vm **vmbo_ptr) 791 { 792 struct amdgpu_bo *bo_ptr; 793 int r; 794 795 /* bo_ptr_size will be determined by the caller and it depends on 796 * num of amdgpu_vm_pt entries. 797 */ 798 BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm)); 799 r = amdgpu_bo_create(adev, bp, &bo_ptr); 800 if (r) 801 return r; 802 803 *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); 804 return r; 805 } 806 807 /** 808 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object 809 * @bo: &amdgpu_bo buffer object to be mapped 810 * @ptr: kernel virtual address to be returned 811 * 812 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls 813 * amdgpu_bo_kptr() to get the kernel virtual address. 814 * 815 * Returns: 816 * 0 for success or a negative error code on failure. 817 */ 818 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) 819 { 820 void *kptr; 821 long r; 822 823 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 824 return -EPERM; 825 826 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL, 827 false, MAX_SCHEDULE_TIMEOUT); 828 if (r < 0) 829 return r; 830 831 kptr = amdgpu_bo_kptr(bo); 832 if (kptr) { 833 if (ptr) 834 *ptr = kptr; 835 return 0; 836 } 837 838 r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap); 839 if (r) 840 return r; 841 842 if (ptr) 843 *ptr = amdgpu_bo_kptr(bo); 844 845 return 0; 846 } 847 848 /** 849 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object 850 * @bo: &amdgpu_bo buffer object 851 * 852 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address 853 * 854 * Returns: 855 * the virtual address of a buffer object area. 856 */ 857 void *amdgpu_bo_kptr(struct amdgpu_bo *bo) 858 { 859 bool is_iomem; 860 861 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 862 } 863 864 /** 865 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object 866 * @bo: &amdgpu_bo buffer object to be unmapped 867 * 868 * Unmaps a kernel map set up by amdgpu_bo_kmap(). 869 */ 870 void amdgpu_bo_kunmap(struct amdgpu_bo *bo) 871 { 872 if (bo->kmap.bo) 873 ttm_bo_kunmap(&bo->kmap); 874 } 875 876 /** 877 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object 878 * @bo: &amdgpu_bo buffer object 879 * 880 * References the contained &ttm_buffer_object. 881 * 882 * Returns: 883 * a refcounted pointer to the &amdgpu_bo buffer object. 884 */ 885 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) 886 { 887 if (bo == NULL) 888 return NULL; 889 890 drm_gem_object_get(&bo->tbo.base); 891 return bo; 892 } 893 894 /** 895 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object 896 * @bo: &amdgpu_bo buffer object 897 * 898 * Unreferences the contained &ttm_buffer_object and clear the pointer 899 */ 900 void amdgpu_bo_unref(struct amdgpu_bo **bo) 901 { 902 if ((*bo) == NULL) 903 return; 904 905 drm_gem_object_put(&(*bo)->tbo.base); 906 *bo = NULL; 907 } 908 909 /** 910 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object 911 * @bo: &amdgpu_bo buffer object to be pinned 912 * @domain: domain to be pinned to 913 * 914 * Pins the buffer object according to requested domain. If the memory is 915 * unbound gart memory, binds the pages into gart table. Adjusts pin_count and 916 * pin_size accordingly. 917 * 918 * Pinning means to lock pages in memory along with keeping them at a fixed 919 * offset. It is required when a buffer can not be moved, for example, when 920 * a display buffer is being scanned out. 921 * 922 * Returns: 923 * 0 for success or a negative error code on failure. 924 */ 925 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) 926 { 927 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 928 struct ttm_operation_ctx ctx = { false, false }; 929 int r, i; 930 931 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) 932 return -EPERM; 933 934 /* Check domain to be pinned to against preferred domains */ 935 if (bo->preferred_domains & domain) 936 domain = bo->preferred_domains & domain; 937 938 /* A shared bo cannot be migrated to VRAM */ 939 if (drm_gem_is_imported(&bo->tbo.base)) { 940 if (domain & AMDGPU_GEM_DOMAIN_GTT) 941 domain = AMDGPU_GEM_DOMAIN_GTT; 942 else 943 return -EINVAL; 944 } 945 946 if (bo->tbo.pin_count) { 947 uint32_t mem_type = bo->tbo.resource->mem_type; 948 uint32_t mem_flags = bo->tbo.resource->placement; 949 950 if (!(domain & amdgpu_mem_type_to_domain(mem_type))) 951 return -EINVAL; 952 953 if ((mem_type == TTM_PL_VRAM) && 954 (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) && 955 !(mem_flags & TTM_PL_FLAG_CONTIGUOUS)) 956 return -EINVAL; 957 958 ttm_bo_pin(&bo->tbo); 959 return 0; 960 } 961 962 /* This assumes only APU display buffers are pinned with (VRAM|GTT). 963 * See function amdgpu_display_supported_domains() 964 */ 965 domain = amdgpu_bo_get_preferred_domain(adev, domain); 966 967 if (drm_gem_is_imported(&bo->tbo.base)) 968 dma_buf_pin(bo->tbo.base.import_attach); 969 970 /* force to pin into visible video ram */ 971 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) 972 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 973 amdgpu_bo_placement_from_domain(bo, domain); 974 for (i = 0; i < bo->placement.num_placement; i++) { 975 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && 976 bo->placements[i].mem_type == TTM_PL_VRAM) 977 bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; 978 } 979 980 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 981 if (unlikely(r)) { 982 dev_err(adev->dev, "%p pin failed\n", bo); 983 goto error; 984 } 985 986 ttm_bo_pin(&bo->tbo); 987 988 if (bo->tbo.resource->mem_type == TTM_PL_VRAM) { 989 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size); 990 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo), 991 &adev->visible_pin_size); 992 } else if (bo->tbo.resource->mem_type == TTM_PL_TT) { 993 atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size); 994 } 995 996 error: 997 return r; 998 } 999 1000 /** 1001 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object 1002 * @bo: &amdgpu_bo buffer object to be unpinned 1003 * 1004 * Decreases the pin_count, and clears the flags if pin_count reaches 0. 1005 * Changes placement and pin size accordingly. 1006 * 1007 * Returns: 1008 * 0 for success or a negative error code on failure. 1009 */ 1010 void amdgpu_bo_unpin(struct amdgpu_bo *bo) 1011 { 1012 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1013 1014 ttm_bo_unpin(&bo->tbo); 1015 if (bo->tbo.pin_count) 1016 return; 1017 1018 if (drm_gem_is_imported(&bo->tbo.base)) 1019 dma_buf_unpin(bo->tbo.base.import_attach); 1020 1021 if (bo->tbo.resource->mem_type == TTM_PL_VRAM) { 1022 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size); 1023 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo), 1024 &adev->visible_pin_size); 1025 } else if (bo->tbo.resource->mem_type == TTM_PL_TT) { 1026 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size); 1027 } 1028 1029 } 1030 1031 static const char * const amdgpu_vram_names[] = { 1032 "UNKNOWN", 1033 "GDDR1", 1034 "DDR2", 1035 "GDDR3", 1036 "GDDR4", 1037 "GDDR5", 1038 "HBM", 1039 "DDR3", 1040 "DDR4", 1041 "GDDR6", 1042 "DDR5", 1043 "LPDDR4", 1044 "LPDDR5", 1045 "HBM3E" 1046 }; 1047 1048 /** 1049 * amdgpu_bo_init - initialize memory manager 1050 * @adev: amdgpu device object 1051 * 1052 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager. 1053 * 1054 * Returns: 1055 * 0 for success or a negative error code on failure. 1056 */ 1057 int amdgpu_bo_init(struct amdgpu_device *adev) 1058 { 1059 /* On A+A platform, VRAM can be mapped as WB */ 1060 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { 1061 /* reserve PAT memory space to WC for VRAM */ 1062 int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base, 1063 adev->gmc.aper_size); 1064 1065 if (r) { 1066 DRM_ERROR("Unable to set WC memtype for the aperture base\n"); 1067 return r; 1068 } 1069 1070 /* Add an MTRR for the VRAM */ 1071 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base, 1072 adev->gmc.aper_size); 1073 } 1074 1075 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 1076 adev->gmc.mc_vram_size >> 20, 1077 (unsigned long long)adev->gmc.aper_size >> 20); 1078 DRM_INFO("RAM width %dbits %s\n", 1079 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]); 1080 return amdgpu_ttm_init(adev); 1081 } 1082 1083 /** 1084 * amdgpu_bo_fini - tear down memory manager 1085 * @adev: amdgpu device object 1086 * 1087 * Reverses amdgpu_bo_init() to tear down memory manager. 1088 */ 1089 void amdgpu_bo_fini(struct amdgpu_device *adev) 1090 { 1091 int idx; 1092 1093 amdgpu_ttm_fini(adev); 1094 1095 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 1096 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { 1097 arch_phys_wc_del(adev->gmc.vram_mtrr); 1098 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); 1099 } 1100 drm_dev_exit(idx); 1101 } 1102 } 1103 1104 /** 1105 * amdgpu_bo_set_tiling_flags - set tiling flags 1106 * @bo: &amdgpu_bo buffer object 1107 * @tiling_flags: new flags 1108 * 1109 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or 1110 * kernel driver to set the tiling flags on a buffer. 1111 * 1112 * Returns: 1113 * 0 for success or a negative error code on failure. 1114 */ 1115 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) 1116 { 1117 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1118 struct amdgpu_bo_user *ubo; 1119 1120 BUG_ON(bo->tbo.type == ttm_bo_type_kernel); 1121 if (adev->family <= AMDGPU_FAMILY_CZ && 1122 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6) 1123 return -EINVAL; 1124 1125 ubo = to_amdgpu_bo_user(bo); 1126 ubo->tiling_flags = tiling_flags; 1127 return 0; 1128 } 1129 1130 /** 1131 * amdgpu_bo_get_tiling_flags - get tiling flags 1132 * @bo: &amdgpu_bo buffer object 1133 * @tiling_flags: returned flags 1134 * 1135 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to 1136 * set the tiling flags on a buffer. 1137 */ 1138 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) 1139 { 1140 struct amdgpu_bo_user *ubo; 1141 1142 BUG_ON(bo->tbo.type == ttm_bo_type_kernel); 1143 dma_resv_assert_held(bo->tbo.base.resv); 1144 ubo = to_amdgpu_bo_user(bo); 1145 1146 if (tiling_flags) 1147 *tiling_flags = ubo->tiling_flags; 1148 } 1149 1150 /** 1151 * amdgpu_bo_set_metadata - set metadata 1152 * @bo: &amdgpu_bo buffer object 1153 * @metadata: new metadata 1154 * @metadata_size: size of the new metadata 1155 * @flags: flags of the new metadata 1156 * 1157 * Sets buffer object's metadata, its size and flags. 1158 * Used via GEM ioctl. 1159 * 1160 * Returns: 1161 * 0 for success or a negative error code on failure. 1162 */ 1163 int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata, 1164 u32 metadata_size, uint64_t flags) 1165 { 1166 struct amdgpu_bo_user *ubo; 1167 void *buffer; 1168 1169 BUG_ON(bo->tbo.type == ttm_bo_type_kernel); 1170 ubo = to_amdgpu_bo_user(bo); 1171 if (!metadata_size) { 1172 if (ubo->metadata_size) { 1173 kfree(ubo->metadata); 1174 ubo->metadata = NULL; 1175 ubo->metadata_size = 0; 1176 } 1177 return 0; 1178 } 1179 1180 if (metadata == NULL) 1181 return -EINVAL; 1182 1183 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL); 1184 if (buffer == NULL) 1185 return -ENOMEM; 1186 1187 kfree(ubo->metadata); 1188 ubo->metadata_flags = flags; 1189 ubo->metadata = buffer; 1190 ubo->metadata_size = metadata_size; 1191 1192 return 0; 1193 } 1194 1195 /** 1196 * amdgpu_bo_get_metadata - get metadata 1197 * @bo: &amdgpu_bo buffer object 1198 * @buffer: returned metadata 1199 * @buffer_size: size of the buffer 1200 * @metadata_size: size of the returned metadata 1201 * @flags: flags of the returned metadata 1202 * 1203 * Gets buffer object's metadata, its size and flags. buffer_size shall not be 1204 * less than metadata_size. 1205 * Used via GEM ioctl. 1206 * 1207 * Returns: 1208 * 0 for success or a negative error code on failure. 1209 */ 1210 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, 1211 size_t buffer_size, uint32_t *metadata_size, 1212 uint64_t *flags) 1213 { 1214 struct amdgpu_bo_user *ubo; 1215 1216 if (!buffer && !metadata_size) 1217 return -EINVAL; 1218 1219 BUG_ON(bo->tbo.type == ttm_bo_type_kernel); 1220 ubo = to_amdgpu_bo_user(bo); 1221 if (metadata_size) 1222 *metadata_size = ubo->metadata_size; 1223 1224 if (buffer) { 1225 if (buffer_size < ubo->metadata_size) 1226 return -EINVAL; 1227 1228 if (ubo->metadata_size) 1229 memcpy(buffer, ubo->metadata, ubo->metadata_size); 1230 } 1231 1232 if (flags) 1233 *flags = ubo->metadata_flags; 1234 1235 return 0; 1236 } 1237 1238 /** 1239 * amdgpu_bo_move_notify - notification about a memory move 1240 * @bo: pointer to a buffer object 1241 * @evict: if this move is evicting the buffer from the graphics address space 1242 * @new_mem: new resource for backing the BO 1243 * 1244 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs 1245 * bookkeeping. 1246 * TTM driver callback which is called when ttm moves a buffer. 1247 */ 1248 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 1249 bool evict, 1250 struct ttm_resource *new_mem) 1251 { 1252 struct ttm_resource *old_mem = bo->resource; 1253 struct amdgpu_bo *abo; 1254 1255 if (!amdgpu_bo_is_amdgpu_bo(bo)) 1256 return; 1257 1258 abo = ttm_to_amdgpu_bo(bo); 1259 amdgpu_vm_bo_move(abo, new_mem, evict); 1260 1261 amdgpu_bo_kunmap(abo); 1262 1263 if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) && 1264 old_mem && old_mem->mem_type != TTM_PL_SYSTEM) 1265 dma_buf_move_notify(abo->tbo.base.dma_buf); 1266 1267 /* move_notify is called before move happens */ 1268 trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1, 1269 old_mem ? old_mem->mem_type : -1); 1270 } 1271 1272 /** 1273 * amdgpu_bo_release_notify - notification about a BO being released 1274 * @bo: pointer to a buffer object 1275 * 1276 * Wipes VRAM buffers whose contents should not be leaked before the 1277 * memory is released. 1278 */ 1279 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) 1280 { 1281 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 1282 struct dma_fence *fence = NULL; 1283 struct amdgpu_bo *abo; 1284 int r; 1285 1286 if (!amdgpu_bo_is_amdgpu_bo(bo)) 1287 return; 1288 1289 abo = ttm_to_amdgpu_bo(bo); 1290 1291 WARN_ON(abo->vm_bo); 1292 1293 if (abo->kfd_bo) 1294 amdgpu_amdkfd_release_notify(abo); 1295 1296 /* 1297 * We lock the private dma_resv object here and since the BO is about to 1298 * be released nobody else should have a pointer to it. 1299 * So when this locking here fails something is wrong with the reference 1300 * counting. 1301 */ 1302 if (WARN_ON_ONCE(!dma_resv_trylock(&bo->base._resv))) 1303 return; 1304 1305 amdgpu_amdkfd_remove_all_eviction_fences(abo); 1306 1307 if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM || 1308 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) || 1309 adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev))) 1310 goto out; 1311 1312 r = dma_resv_reserve_fences(&bo->base._resv, 1); 1313 if (r) 1314 goto out; 1315 1316 r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true); 1317 if (WARN_ON(r)) 1318 goto out; 1319 1320 amdgpu_vram_mgr_set_cleared(bo->resource); 1321 dma_resv_add_fence(&bo->base._resv, fence, DMA_RESV_USAGE_KERNEL); 1322 dma_fence_put(fence); 1323 1324 out: 1325 dma_resv_unlock(&bo->base._resv); 1326 } 1327 1328 /** 1329 * amdgpu_bo_fault_reserve_notify - notification about a memory fault 1330 * @bo: pointer to a buffer object 1331 * 1332 * Notifies the driver we are taking a fault on this BO and have reserved it, 1333 * also performs bookkeeping. 1334 * TTM driver callback for dealing with vm faults. 1335 * 1336 * Returns: 1337 * 0 for success or a negative error code on failure. 1338 */ 1339 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 1340 { 1341 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 1342 struct ttm_operation_ctx ctx = { false, false }; 1343 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1344 int r; 1345 1346 /* Remember that this BO was accessed by the CPU */ 1347 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 1348 1349 if (amdgpu_res_cpu_visible(adev, bo->resource)) 1350 return 0; 1351 1352 /* Can't move a pinned BO to visible VRAM */ 1353 if (abo->tbo.pin_count > 0) 1354 return VM_FAULT_SIGBUS; 1355 1356 /* hurrah the memory is not visible ! */ 1357 atomic64_inc(&adev->num_vram_cpu_page_faults); 1358 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | 1359 AMDGPU_GEM_DOMAIN_GTT); 1360 1361 /* Avoid costly evictions; only set GTT as a busy placement */ 1362 abo->placements[0].flags |= TTM_PL_FLAG_DESIRED; 1363 1364 r = ttm_bo_validate(bo, &abo->placement, &ctx); 1365 if (unlikely(r == -EBUSY || r == -ERESTARTSYS)) 1366 return VM_FAULT_NOPAGE; 1367 else if (unlikely(r)) 1368 return VM_FAULT_SIGBUS; 1369 1370 /* this should never happen */ 1371 if (bo->resource->mem_type == TTM_PL_VRAM && 1372 !amdgpu_res_cpu_visible(adev, bo->resource)) 1373 return VM_FAULT_SIGBUS; 1374 1375 ttm_bo_move_to_lru_tail_unlocked(bo); 1376 return 0; 1377 } 1378 1379 /** 1380 * amdgpu_bo_fence - add fence to buffer object 1381 * 1382 * @bo: buffer object in question 1383 * @fence: fence to add 1384 * @shared: true if fence should be added shared 1385 * 1386 */ 1387 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, 1388 bool shared) 1389 { 1390 struct dma_resv *resv = bo->tbo.base.resv; 1391 int r; 1392 1393 r = dma_resv_reserve_fences(resv, 1); 1394 if (r) { 1395 /* As last resort on OOM we block for the fence */ 1396 dma_fence_wait(fence, false); 1397 return; 1398 } 1399 1400 dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ : 1401 DMA_RESV_USAGE_WRITE); 1402 } 1403 1404 /** 1405 * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences 1406 * 1407 * @adev: amdgpu device pointer 1408 * @resv: reservation object to sync to 1409 * @sync_mode: synchronization mode 1410 * @owner: fence owner 1411 * @intr: Whether the wait is interruptible 1412 * 1413 * Extract the fences from the reservation object and waits for them to finish. 1414 * 1415 * Returns: 1416 * 0 on success, errno otherwise. 1417 */ 1418 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, 1419 enum amdgpu_sync_mode sync_mode, void *owner, 1420 bool intr) 1421 { 1422 struct amdgpu_sync sync; 1423 int r; 1424 1425 amdgpu_sync_create(&sync); 1426 amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner); 1427 r = amdgpu_sync_wait(&sync, intr); 1428 amdgpu_sync_free(&sync); 1429 return r; 1430 } 1431 1432 /** 1433 * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv 1434 * @bo: buffer object to wait for 1435 * @owner: fence owner 1436 * @intr: Whether the wait is interruptible 1437 * 1438 * Wrapper to wait for fences in a BO. 1439 * Returns: 1440 * 0 on success, errno otherwise. 1441 */ 1442 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) 1443 { 1444 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1445 1446 return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv, 1447 AMDGPU_SYNC_NE_OWNER, owner, intr); 1448 } 1449 1450 /** 1451 * amdgpu_bo_gpu_offset - return GPU offset of bo 1452 * @bo: amdgpu object for which we query the offset 1453 * 1454 * Note: object should either be pinned or reserved when calling this 1455 * function, it might be useful to add check for this for debugging. 1456 * 1457 * Returns: 1458 * current GPU offset of the object. 1459 */ 1460 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) 1461 { 1462 WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM); 1463 WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && 1464 !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel); 1465 WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET); 1466 WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM && 1467 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); 1468 1469 return amdgpu_bo_gpu_offset_no_check(bo); 1470 } 1471 1472 /** 1473 * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo 1474 * @bo: amdgpu VRAM buffer object for which we query the offset 1475 * 1476 * Returns: 1477 * current FB aperture GPU offset of the object. 1478 */ 1479 u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo) 1480 { 1481 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1482 uint64_t offset, fb_base; 1483 1484 WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM); 1485 1486 fb_base = adev->gmc.fb_start; 1487 fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 1488 offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base; 1489 return amdgpu_gmc_sign_extend(offset); 1490 } 1491 1492 /** 1493 * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo 1494 * @bo: amdgpu object for which we query the offset 1495 * 1496 * Returns: 1497 * current GPU offset of the object without raising warnings. 1498 */ 1499 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) 1500 { 1501 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1502 uint64_t offset = AMDGPU_BO_INVALID_OFFSET; 1503 1504 if (bo->tbo.resource->mem_type == TTM_PL_TT) 1505 offset = amdgpu_gmc_agp_addr(&bo->tbo); 1506 1507 if (offset == AMDGPU_BO_INVALID_OFFSET) 1508 offset = (bo->tbo.resource->start << PAGE_SHIFT) + 1509 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type); 1510 1511 return amdgpu_gmc_sign_extend(offset); 1512 } 1513 1514 /** 1515 * amdgpu_bo_mem_stats_placement - bo placement for memory accounting 1516 * @bo: the buffer object we should look at 1517 * 1518 * BO can have multiple preferred placements, to avoid double counting we want 1519 * to file it under a single placement for memory stats. 1520 * Luckily, if we take the highest set bit in preferred_domains the result is 1521 * quite sensible. 1522 * 1523 * Returns: 1524 * Which of the placements should the BO be accounted under. 1525 */ 1526 uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo) 1527 { 1528 uint32_t domain = bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK; 1529 1530 if (!domain) 1531 return TTM_PL_SYSTEM; 1532 1533 switch (rounddown_pow_of_two(domain)) { 1534 case AMDGPU_GEM_DOMAIN_CPU: 1535 return TTM_PL_SYSTEM; 1536 case AMDGPU_GEM_DOMAIN_GTT: 1537 return TTM_PL_TT; 1538 case AMDGPU_GEM_DOMAIN_VRAM: 1539 return TTM_PL_VRAM; 1540 case AMDGPU_GEM_DOMAIN_GDS: 1541 return AMDGPU_PL_GDS; 1542 case AMDGPU_GEM_DOMAIN_GWS: 1543 return AMDGPU_PL_GWS; 1544 case AMDGPU_GEM_DOMAIN_OA: 1545 return AMDGPU_PL_OA; 1546 case AMDGPU_GEM_DOMAIN_DOORBELL: 1547 return AMDGPU_PL_DOORBELL; 1548 default: 1549 return TTM_PL_SYSTEM; 1550 } 1551 } 1552 1553 /** 1554 * amdgpu_bo_get_preferred_domain - get preferred domain 1555 * @adev: amdgpu device object 1556 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>` 1557 * 1558 * Returns: 1559 * Which of the allowed domains is preferred for allocating the BO. 1560 */ 1561 uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, 1562 uint32_t domain) 1563 { 1564 if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) && 1565 ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) { 1566 domain = AMDGPU_GEM_DOMAIN_VRAM; 1567 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) 1568 domain = AMDGPU_GEM_DOMAIN_GTT; 1569 } 1570 return domain; 1571 } 1572 1573 #if defined(CONFIG_DEBUG_FS) 1574 #define amdgpu_bo_print_flag(m, bo, flag) \ 1575 do { \ 1576 if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \ 1577 seq_printf((m), " " #flag); \ 1578 } \ 1579 } while (0) 1580 1581 /** 1582 * amdgpu_bo_print_info - print BO info in debugfs file 1583 * 1584 * @id: Index or Id of the BO 1585 * @bo: Requested BO for printing info 1586 * @m: debugfs file 1587 * 1588 * Print BO information in debugfs file 1589 * 1590 * Returns: 1591 * Size of the BO in bytes. 1592 */ 1593 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) 1594 { 1595 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1596 struct dma_buf_attachment *attachment; 1597 struct dma_buf *dma_buf; 1598 const char *placement; 1599 unsigned int pin_count; 1600 u64 size; 1601 1602 if (dma_resv_trylock(bo->tbo.base.resv)) { 1603 if (!bo->tbo.resource) { 1604 placement = "NONE"; 1605 } else { 1606 switch (bo->tbo.resource->mem_type) { 1607 case TTM_PL_VRAM: 1608 if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) 1609 placement = "VRAM VISIBLE"; 1610 else 1611 placement = "VRAM"; 1612 break; 1613 case TTM_PL_TT: 1614 placement = "GTT"; 1615 break; 1616 case AMDGPU_PL_GDS: 1617 placement = "GDS"; 1618 break; 1619 case AMDGPU_PL_GWS: 1620 placement = "GWS"; 1621 break; 1622 case AMDGPU_PL_OA: 1623 placement = "OA"; 1624 break; 1625 case AMDGPU_PL_PREEMPT: 1626 placement = "PREEMPTIBLE"; 1627 break; 1628 case AMDGPU_PL_DOORBELL: 1629 placement = "DOORBELL"; 1630 break; 1631 case TTM_PL_SYSTEM: 1632 default: 1633 placement = "CPU"; 1634 break; 1635 } 1636 } 1637 dma_resv_unlock(bo->tbo.base.resv); 1638 } else { 1639 placement = "UNKNOWN"; 1640 } 1641 1642 size = amdgpu_bo_size(bo); 1643 seq_printf(m, "\t\t0x%08x: %12lld byte %s", 1644 id, size, placement); 1645 1646 pin_count = READ_ONCE(bo->tbo.pin_count); 1647 if (pin_count) 1648 seq_printf(m, " pin count %d", pin_count); 1649 1650 dma_buf = READ_ONCE(bo->tbo.base.dma_buf); 1651 attachment = READ_ONCE(bo->tbo.base.import_attach); 1652 1653 if (attachment) 1654 seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino); 1655 else if (dma_buf) 1656 seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino); 1657 1658 amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED); 1659 amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS); 1660 amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC); 1661 amdgpu_bo_print_flag(m, bo, VRAM_CLEARED); 1662 amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS); 1663 amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID); 1664 amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC); 1665 /* Add the gem obj resv fence dump*/ 1666 if (dma_resv_trylock(bo->tbo.base.resv)) { 1667 dma_resv_describe(bo->tbo.base.resv, m); 1668 dma_resv_unlock(bo->tbo.base.resv); 1669 } 1670 seq_puts(m, "\n"); 1671 1672 return size; 1673 } 1674 #endif 1675