1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/dma-mapping.h> 26 #include <drm/ttm/ttm_range_manager.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_vm.h" 31 #include "amdgpu_res_cursor.h" 32 #include "atom.h" 33 34 #define AMDGPU_MAX_SG_SEGMENT_SIZE (2UL << 30) 35 36 struct amdgpu_vram_reservation { 37 u64 start; 38 u64 size; 39 struct list_head allocated; 40 struct list_head blocks; 41 }; 42 43 static inline struct amdgpu_vram_mgr * 44 to_vram_mgr(struct ttm_resource_manager *man) 45 { 46 return container_of(man, struct amdgpu_vram_mgr, manager); 47 } 48 49 static inline struct amdgpu_device * 50 to_amdgpu_device(struct amdgpu_vram_mgr *mgr) 51 { 52 return container_of(mgr, struct amdgpu_device, mman.vram_mgr); 53 } 54 55 static inline struct drm_buddy_block * 56 amdgpu_vram_mgr_first_block(struct list_head *list) 57 { 58 return list_first_entry_or_null(list, struct drm_buddy_block, link); 59 } 60 61 static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head) 62 { 63 struct drm_buddy_block *block; 64 u64 start, size; 65 66 block = amdgpu_vram_mgr_first_block(head); 67 if (!block) 68 return false; 69 70 while (head != block->link.next) { 71 start = amdgpu_vram_mgr_block_start(block); 72 size = amdgpu_vram_mgr_block_size(block); 73 74 block = list_entry(block->link.next, struct drm_buddy_block, link); 75 if (start + size != amdgpu_vram_mgr_block_start(block)) 76 return false; 77 } 78 79 return true; 80 } 81 82 static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head) 83 { 84 struct drm_buddy_block *block; 85 u64 size = 0; 86 87 list_for_each_entry(block, head, link) 88 size += amdgpu_vram_mgr_block_size(block); 89 90 return size; 91 } 92 93 /** 94 * DOC: mem_info_vram_total 95 * 96 * The amdgpu driver provides a sysfs API for reporting current total VRAM 97 * available on the device 98 * The file mem_info_vram_total is used for this and returns the total 99 * amount of VRAM in bytes 100 */ 101 static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev, 102 struct device_attribute *attr, char *buf) 103 { 104 struct drm_device *ddev = dev_get_drvdata(dev); 105 struct amdgpu_device *adev = drm_to_adev(ddev); 106 107 return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size); 108 } 109 110 /** 111 * DOC: mem_info_vis_vram_total 112 * 113 * The amdgpu driver provides a sysfs API for reporting current total 114 * visible VRAM available on the device 115 * The file mem_info_vis_vram_total is used for this and returns the total 116 * amount of visible VRAM in bytes 117 */ 118 static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev, 119 struct device_attribute *attr, char *buf) 120 { 121 struct drm_device *ddev = dev_get_drvdata(dev); 122 struct amdgpu_device *adev = drm_to_adev(ddev); 123 124 return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size); 125 } 126 127 /** 128 * DOC: mem_info_vram_used 129 * 130 * The amdgpu driver provides a sysfs API for reporting current total VRAM 131 * available on the device 132 * The file mem_info_vram_used is used for this and returns the total 133 * amount of currently used VRAM in bytes 134 */ 135 static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, 136 struct device_attribute *attr, 137 char *buf) 138 { 139 struct drm_device *ddev = dev_get_drvdata(dev); 140 struct amdgpu_device *adev = drm_to_adev(ddev); 141 struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager; 142 143 return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man)); 144 } 145 146 /** 147 * DOC: mem_info_vis_vram_used 148 * 149 * The amdgpu driver provides a sysfs API for reporting current total of 150 * used visible VRAM 151 * The file mem_info_vis_vram_used is used for this and returns the total 152 * amount of currently used visible VRAM in bytes 153 */ 154 static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, 155 struct device_attribute *attr, 156 char *buf) 157 { 158 struct drm_device *ddev = dev_get_drvdata(dev); 159 struct amdgpu_device *adev = drm_to_adev(ddev); 160 161 return sysfs_emit(buf, "%llu\n", 162 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr)); 163 } 164 165 /** 166 * DOC: mem_info_vram_vendor 167 * 168 * The amdgpu driver provides a sysfs API for reporting the vendor of the 169 * installed VRAM 170 * The file mem_info_vram_vendor is used for this and returns the name of the 171 * vendor. 172 */ 173 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev, 174 struct device_attribute *attr, 175 char *buf) 176 { 177 struct drm_device *ddev = dev_get_drvdata(dev); 178 struct amdgpu_device *adev = drm_to_adev(ddev); 179 180 switch (adev->gmc.vram_vendor) { 181 case SAMSUNG: 182 return sysfs_emit(buf, "samsung\n"); 183 case INFINEON: 184 return sysfs_emit(buf, "infineon\n"); 185 case ELPIDA: 186 return sysfs_emit(buf, "elpida\n"); 187 case ETRON: 188 return sysfs_emit(buf, "etron\n"); 189 case NANYA: 190 return sysfs_emit(buf, "nanya\n"); 191 case HYNIX: 192 return sysfs_emit(buf, "hynix\n"); 193 case MOSEL: 194 return sysfs_emit(buf, "mosel\n"); 195 case WINBOND: 196 return sysfs_emit(buf, "winbond\n"); 197 case ESMT: 198 return sysfs_emit(buf, "esmt\n"); 199 case MICRON: 200 return sysfs_emit(buf, "micron\n"); 201 default: 202 return sysfs_emit(buf, "unknown\n"); 203 } 204 } 205 206 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO, 207 amdgpu_mem_info_vram_total_show, NULL); 208 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO, 209 amdgpu_mem_info_vis_vram_total_show,NULL); 210 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO, 211 amdgpu_mem_info_vram_used_show, NULL); 212 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO, 213 amdgpu_mem_info_vis_vram_used_show, NULL); 214 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO, 215 amdgpu_mem_info_vram_vendor, NULL); 216 217 static struct attribute *amdgpu_vram_mgr_attributes[] = { 218 &dev_attr_mem_info_vram_total.attr, 219 &dev_attr_mem_info_vis_vram_total.attr, 220 &dev_attr_mem_info_vram_used.attr, 221 &dev_attr_mem_info_vis_vram_used.attr, 222 &dev_attr_mem_info_vram_vendor.attr, 223 NULL 224 }; 225 226 static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj, 227 struct attribute *attr, int i) 228 { 229 struct device *dev = kobj_to_dev(kobj); 230 struct drm_device *ddev = dev_get_drvdata(dev); 231 struct amdgpu_device *adev = drm_to_adev(ddev); 232 233 if (attr == &dev_attr_mem_info_vram_vendor.attr && 234 !adev->gmc.vram_vendor) 235 return 0; 236 237 return attr->mode; 238 } 239 240 const struct attribute_group amdgpu_vram_mgr_attr_group = { 241 .attrs = amdgpu_vram_mgr_attributes, 242 .is_visible = amdgpu_vram_attrs_is_visible 243 }; 244 245 /** 246 * amdgpu_vram_mgr_vis_size - Calculate visible block size 247 * 248 * @adev: amdgpu_device pointer 249 * @block: DRM BUDDY block structure 250 * 251 * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM 252 */ 253 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, 254 struct drm_buddy_block *block) 255 { 256 u64 start = amdgpu_vram_mgr_block_start(block); 257 u64 end = start + amdgpu_vram_mgr_block_size(block); 258 259 if (start >= adev->gmc.visible_vram_size) 260 return 0; 261 262 return (end > adev->gmc.visible_vram_size ? 263 adev->gmc.visible_vram_size : end) - start; 264 } 265 266 /** 267 * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size 268 * 269 * @bo: &amdgpu_bo buffer object (must be in VRAM) 270 * 271 * Returns: 272 * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM. 273 */ 274 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) 275 { 276 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 277 struct ttm_resource *res = bo->tbo.resource; 278 struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); 279 struct drm_buddy_block *block; 280 u64 usage = 0; 281 282 if (amdgpu_gmc_vram_full_visible(&adev->gmc)) 283 return amdgpu_bo_size(bo); 284 285 if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) 286 return 0; 287 288 list_for_each_entry(block, &vres->blocks, link) 289 usage += amdgpu_vram_mgr_vis_size(adev, block); 290 291 return usage; 292 } 293 294 /* Commit the reservation of VRAM pages */ 295 static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) 296 { 297 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 298 struct amdgpu_device *adev = to_amdgpu_device(mgr); 299 struct drm_buddy *mm = &mgr->mm; 300 struct amdgpu_vram_reservation *rsv, *temp; 301 struct drm_buddy_block *block; 302 uint64_t vis_usage; 303 304 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) { 305 if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size, 306 rsv->size, mm->chunk_size, &rsv->allocated, 307 DRM_BUDDY_RANGE_ALLOCATION)) 308 continue; 309 310 block = amdgpu_vram_mgr_first_block(&rsv->allocated); 311 if (!block) 312 continue; 313 314 dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n", 315 rsv->start, rsv->size); 316 317 vis_usage = amdgpu_vram_mgr_vis_size(adev, block); 318 atomic64_add(vis_usage, &mgr->vis_usage); 319 spin_lock(&man->bdev->lru_lock); 320 man->usage += rsv->size; 321 spin_unlock(&man->bdev->lru_lock); 322 list_move(&rsv->blocks, &mgr->reserved_pages); 323 } 324 } 325 326 /** 327 * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM 328 * 329 * @mgr: amdgpu_vram_mgr pointer 330 * @start: start address of the range in VRAM 331 * @size: size of the range 332 * 333 * Reserve memory from start address with the specified size in VRAM 334 */ 335 int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, 336 uint64_t start, uint64_t size) 337 { 338 struct amdgpu_vram_reservation *rsv; 339 340 rsv = kzalloc(sizeof(*rsv), GFP_KERNEL); 341 if (!rsv) 342 return -ENOMEM; 343 344 INIT_LIST_HEAD(&rsv->allocated); 345 INIT_LIST_HEAD(&rsv->blocks); 346 347 rsv->start = start; 348 rsv->size = size; 349 350 mutex_lock(&mgr->lock); 351 list_add_tail(&rsv->blocks, &mgr->reservations_pending); 352 amdgpu_vram_mgr_do_reserve(&mgr->manager); 353 mutex_unlock(&mgr->lock); 354 355 return 0; 356 } 357 358 /** 359 * amdgpu_vram_mgr_query_page_status - query the reservation status 360 * 361 * @mgr: amdgpu_vram_mgr pointer 362 * @start: start address of a page in VRAM 363 * 364 * Returns: 365 * -EBUSY: the page is still hold and in pending list 366 * 0: the page has been reserved 367 * -ENOENT: the input page is not a reservation 368 */ 369 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, 370 uint64_t start) 371 { 372 struct amdgpu_vram_reservation *rsv; 373 int ret; 374 375 mutex_lock(&mgr->lock); 376 377 list_for_each_entry(rsv, &mgr->reservations_pending, blocks) { 378 if (rsv->start <= start && 379 (start < (rsv->start + rsv->size))) { 380 ret = -EBUSY; 381 goto out; 382 } 383 } 384 385 list_for_each_entry(rsv, &mgr->reserved_pages, blocks) { 386 if (rsv->start <= start && 387 (start < (rsv->start + rsv->size))) { 388 ret = 0; 389 goto out; 390 } 391 } 392 393 ret = -ENOENT; 394 out: 395 mutex_unlock(&mgr->lock); 396 return ret; 397 } 398 399 int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr, 400 uint64_t address, struct amdgpu_vram_block_info *info) 401 { 402 struct amdgpu_vram_mgr_resource *vres; 403 struct drm_buddy_block *block; 404 u64 start, size; 405 int ret = -ENOENT; 406 407 mutex_lock(&mgr->lock); 408 list_for_each_entry(vres, &mgr->allocated_vres_list, vres_node) { 409 list_for_each_entry(block, &vres->blocks, link) { 410 start = amdgpu_vram_mgr_block_start(block); 411 size = amdgpu_vram_mgr_block_size(block); 412 if ((start <= address) && (address < (start + size))) { 413 info->start = start; 414 info->size = size; 415 memcpy(&info->task, &vres->task, sizeof(vres->task)); 416 ret = 0; 417 goto out; 418 } 419 } 420 } 421 422 out: 423 mutex_unlock(&mgr->lock); 424 425 return ret; 426 } 427 428 /** 429 * amdgpu_vram_mgr_new - allocate new ranges 430 * 431 * @man: TTM memory type manager 432 * @tbo: TTM BO we need this range for 433 * @place: placement flags and restrictions 434 * @res: the resulting mem object 435 * 436 * Allocate VRAM for the given BO. 437 */ 438 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, 439 struct ttm_buffer_object *tbo, 440 const struct ttm_place *place, 441 struct ttm_resource **res) 442 { 443 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 444 struct amdgpu_device *adev = to_amdgpu_device(mgr); 445 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 446 u64 vis_usage = 0, max_bytes, min_block_size; 447 struct amdgpu_vram_mgr_resource *vres; 448 u64 size, remaining_size, lpfn, fpfn; 449 unsigned int adjust_dcc_size = 0; 450 struct drm_buddy *mm = &mgr->mm; 451 struct drm_buddy_block *block; 452 unsigned long pages_per_block; 453 int r; 454 455 lpfn = (u64)place->lpfn << PAGE_SHIFT; 456 if (!lpfn || lpfn > man->size) 457 lpfn = man->size; 458 459 fpfn = (u64)place->fpfn << PAGE_SHIFT; 460 461 max_bytes = adev->gmc.mc_vram_size; 462 if (tbo->type != ttm_bo_type_kernel) 463 max_bytes -= AMDGPU_VM_RESERVED_VRAM; 464 465 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) { 466 pages_per_block = ~0ul; 467 } else { 468 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 469 pages_per_block = HPAGE_PMD_NR; 470 #else 471 /* default to 2MB */ 472 pages_per_block = 2UL << (20UL - PAGE_SHIFT); 473 #endif 474 pages_per_block = max_t(u32, pages_per_block, 475 tbo->page_alignment); 476 } 477 478 vres = kzalloc(sizeof(*vres), GFP_KERNEL); 479 if (!vres) 480 return -ENOMEM; 481 482 ttm_resource_init(tbo, place, &vres->base); 483 484 /* bail out quickly if there's likely not enough VRAM for this BO */ 485 if (ttm_resource_manager_usage(man) > max_bytes) { 486 r = -ENOSPC; 487 goto error_fini; 488 } 489 490 INIT_LIST_HEAD(&vres->blocks); 491 492 if (place->flags & TTM_PL_FLAG_TOPDOWN) 493 vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; 494 495 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) 496 vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION; 497 498 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED) 499 vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION; 500 501 if (fpfn || lpfn != mgr->mm.size) 502 /* Allocate blocks in desired range */ 503 vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; 504 505 if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC && 506 adev->gmc.gmc_funcs->get_dcc_alignment) 507 adjust_dcc_size = amdgpu_gmc_get_dcc_alignment(adev); 508 509 remaining_size = (u64)vres->base.size; 510 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) { 511 unsigned int dcc_size; 512 513 dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size); 514 remaining_size = (u64)dcc_size; 515 516 vres->flags |= DRM_BUDDY_TRIM_DISABLE; 517 } 518 519 mutex_lock(&mgr->lock); 520 while (remaining_size) { 521 if (tbo->page_alignment) 522 min_block_size = (u64)tbo->page_alignment << PAGE_SHIFT; 523 else 524 min_block_size = mgr->default_page_size; 525 526 size = remaining_size; 527 528 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) 529 min_block_size = size; 530 else if ((size >= (u64)pages_per_block << PAGE_SHIFT) && 531 !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1))) 532 min_block_size = (u64)pages_per_block << PAGE_SHIFT; 533 534 BUG_ON(min_block_size < mm->chunk_size); 535 536 r = drm_buddy_alloc_blocks(mm, fpfn, 537 lpfn, 538 size, 539 min_block_size, 540 &vres->blocks, 541 vres->flags); 542 543 if (unlikely(r == -ENOSPC) && pages_per_block == ~0ul && 544 !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) { 545 vres->flags &= ~DRM_BUDDY_CONTIGUOUS_ALLOCATION; 546 pages_per_block = max_t(u32, 2UL << (20UL - PAGE_SHIFT), 547 tbo->page_alignment); 548 549 continue; 550 } 551 552 if (unlikely(r)) 553 goto error_free_blocks; 554 555 if (size > remaining_size) 556 remaining_size = 0; 557 else 558 remaining_size -= size; 559 } 560 561 vres->task.pid = task_pid_nr(current); 562 get_task_comm(vres->task.comm, current); 563 list_add_tail(&vres->vres_node, &mgr->allocated_vres_list); 564 565 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) { 566 struct drm_buddy_block *dcc_block; 567 unsigned long dcc_start; 568 u64 trim_start; 569 570 dcc_block = amdgpu_vram_mgr_first_block(&vres->blocks); 571 /* Adjust the start address for DCC buffers only */ 572 dcc_start = 573 roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block), 574 adjust_dcc_size); 575 trim_start = (u64)dcc_start; 576 drm_buddy_block_trim(mm, &trim_start, 577 (u64)vres->base.size, 578 &vres->blocks); 579 } 580 mutex_unlock(&mgr->lock); 581 582 vres->base.start = 0; 583 size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks), 584 vres->base.size); 585 list_for_each_entry(block, &vres->blocks, link) { 586 unsigned long start; 587 588 start = amdgpu_vram_mgr_block_start(block) + 589 amdgpu_vram_mgr_block_size(block); 590 start >>= PAGE_SHIFT; 591 592 if (start > PFN_UP(size)) 593 start -= PFN_UP(size); 594 else 595 start = 0; 596 vres->base.start = max(vres->base.start, start); 597 598 vis_usage += amdgpu_vram_mgr_vis_size(adev, block); 599 } 600 601 if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks)) 602 vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS; 603 604 if (adev->gmc.xgmi.connected_to_cpu) 605 vres->base.bus.caching = ttm_cached; 606 else 607 vres->base.bus.caching = ttm_write_combined; 608 609 atomic64_add(vis_usage, &mgr->vis_usage); 610 *res = &vres->base; 611 return 0; 612 613 error_free_blocks: 614 drm_buddy_free_list(mm, &vres->blocks, 0); 615 mutex_unlock(&mgr->lock); 616 error_fini: 617 ttm_resource_fini(man, &vres->base); 618 kfree(vres); 619 620 return r; 621 } 622 623 /** 624 * amdgpu_vram_mgr_del - free ranges 625 * 626 * @man: TTM memory type manager 627 * @res: TTM memory object 628 * 629 * Free the allocated VRAM again. 630 */ 631 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, 632 struct ttm_resource *res) 633 { 634 struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); 635 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 636 struct amdgpu_device *adev = to_amdgpu_device(mgr); 637 struct drm_buddy *mm = &mgr->mm; 638 struct drm_buddy_block *block; 639 uint64_t vis_usage = 0; 640 641 mutex_lock(&mgr->lock); 642 643 list_del(&vres->vres_node); 644 memset(&vres->task, 0, sizeof(vres->task)); 645 646 list_for_each_entry(block, &vres->blocks, link) 647 vis_usage += amdgpu_vram_mgr_vis_size(adev, block); 648 649 drm_buddy_free_list(mm, &vres->blocks, vres->flags); 650 amdgpu_vram_mgr_do_reserve(man); 651 mutex_unlock(&mgr->lock); 652 653 atomic64_sub(vis_usage, &mgr->vis_usage); 654 655 ttm_resource_fini(man, res); 656 kfree(vres); 657 } 658 659 /** 660 * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table 661 * 662 * @adev: amdgpu device pointer 663 * @res: TTM memory object 664 * @offset: byte offset from the base of VRAM BO 665 * @length: number of bytes to export in sg_table 666 * @dev: the other device 667 * @dir: dma direction 668 * @sgt: resulting sg table 669 * 670 * Allocate and fill a sg table from a VRAM allocation. 671 */ 672 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, 673 struct ttm_resource *res, 674 u64 offset, u64 length, 675 struct device *dev, 676 enum dma_data_direction dir, 677 struct sg_table **sgt) 678 { 679 struct amdgpu_res_cursor cursor; 680 struct scatterlist *sg; 681 int num_entries = 0; 682 int i, r; 683 684 *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL); 685 if (!*sgt) 686 return -ENOMEM; 687 688 /* Determine the number of DRM_BUDDY blocks to export */ 689 amdgpu_res_first(res, offset, length, &cursor); 690 while (cursor.remaining) { 691 num_entries++; 692 amdgpu_res_next(&cursor, min(cursor.size, AMDGPU_MAX_SG_SEGMENT_SIZE)); 693 } 694 695 r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL); 696 if (r) 697 goto error_free; 698 699 /* Initialize scatterlist nodes of sg_table */ 700 for_each_sgtable_sg((*sgt), sg, i) 701 sg->length = 0; 702 703 /* 704 * Walk down DRM_BUDDY blocks to populate scatterlist nodes 705 * @note: Use iterator api to get first the DRM_BUDDY block 706 * and the number of bytes from it. Access the following 707 * DRM_BUDDY block(s) if more buffer needs to exported 708 */ 709 amdgpu_res_first(res, offset, length, &cursor); 710 for_each_sgtable_sg((*sgt), sg, i) { 711 phys_addr_t phys = cursor.start + adev->gmc.aper_base; 712 unsigned long size = min(cursor.size, AMDGPU_MAX_SG_SEGMENT_SIZE); 713 dma_addr_t addr; 714 715 addr = dma_map_resource(dev, phys, size, dir, 716 DMA_ATTR_SKIP_CPU_SYNC); 717 r = dma_mapping_error(dev, addr); 718 if (r) 719 goto error_unmap; 720 721 sg_set_page(sg, NULL, size, 0); 722 sg_dma_address(sg) = addr; 723 sg_dma_len(sg) = size; 724 725 amdgpu_res_next(&cursor, size); 726 } 727 728 return 0; 729 730 error_unmap: 731 for_each_sgtable_sg((*sgt), sg, i) { 732 if (!sg->length) 733 continue; 734 735 dma_unmap_resource(dev, sg->dma_address, 736 sg->length, dir, 737 DMA_ATTR_SKIP_CPU_SYNC); 738 } 739 sg_free_table(*sgt); 740 741 error_free: 742 kfree(*sgt); 743 return r; 744 } 745 746 /** 747 * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table 748 * 749 * @dev: device pointer 750 * @dir: data direction of resource to unmap 751 * @sgt: sg table to free 752 * 753 * Free a previously allocate sg table. 754 */ 755 void amdgpu_vram_mgr_free_sgt(struct device *dev, 756 enum dma_data_direction dir, 757 struct sg_table *sgt) 758 { 759 struct scatterlist *sg; 760 int i; 761 762 for_each_sgtable_sg(sgt, sg, i) 763 dma_unmap_resource(dev, sg->dma_address, 764 sg->length, dir, 765 DMA_ATTR_SKIP_CPU_SYNC); 766 sg_free_table(sgt); 767 kfree(sgt); 768 } 769 770 /** 771 * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part 772 * 773 * @mgr: amdgpu_vram_mgr pointer 774 * 775 * Returns how many bytes are used in the visible part of VRAM 776 */ 777 uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr) 778 { 779 return atomic64_read(&mgr->vis_usage); 780 } 781 782 /** 783 * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks 784 * 785 * @adev: amdgpu device pointer 786 * 787 * Reset the cleared drm buddy blocks. 788 */ 789 void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev) 790 { 791 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 792 struct drm_buddy *mm = &mgr->mm; 793 794 mutex_lock(&mgr->lock); 795 drm_buddy_reset_clear(mm, false); 796 mutex_unlock(&mgr->lock); 797 } 798 799 /** 800 * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection 801 * 802 * @man: TTM memory type manager 803 * @res: The resource to test 804 * @place: The place to test against 805 * @size: Size of the new allocation 806 * 807 * Test each drm buddy block for intersection for eviction decision. 808 */ 809 static bool amdgpu_vram_mgr_intersects(struct ttm_resource_manager *man, 810 struct ttm_resource *res, 811 const struct ttm_place *place, 812 size_t size) 813 { 814 struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res); 815 struct drm_buddy_block *block; 816 817 /* Check each drm buddy block individually */ 818 list_for_each_entry(block, &mgr->blocks, link) { 819 unsigned long fpfn = 820 amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT; 821 unsigned long lpfn = fpfn + 822 (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT); 823 824 if (place->fpfn < lpfn && 825 (!place->lpfn || place->lpfn > fpfn)) 826 return true; 827 } 828 829 return false; 830 } 831 832 /** 833 * amdgpu_vram_mgr_compatible - test each drm buddy block for compatibility 834 * 835 * @man: TTM memory type manager 836 * @res: The resource to test 837 * @place: The place to test against 838 * @size: Size of the new allocation 839 * 840 * Test each drm buddy block for placement compatibility. 841 */ 842 static bool amdgpu_vram_mgr_compatible(struct ttm_resource_manager *man, 843 struct ttm_resource *res, 844 const struct ttm_place *place, 845 size_t size) 846 { 847 struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res); 848 struct drm_buddy_block *block; 849 850 /* Check each drm buddy block individually */ 851 list_for_each_entry(block, &mgr->blocks, link) { 852 unsigned long fpfn = 853 amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT; 854 unsigned long lpfn = fpfn + 855 (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT); 856 857 if (fpfn < place->fpfn || 858 (place->lpfn && lpfn > place->lpfn)) 859 return false; 860 } 861 862 return true; 863 } 864 865 /** 866 * amdgpu_vram_mgr_debug - dump VRAM table 867 * 868 * @man: TTM memory type manager 869 * @printer: DRM printer to use 870 * 871 * Dump the table content using printk. 872 */ 873 static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, 874 struct drm_printer *printer) 875 { 876 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 877 struct drm_buddy *mm = &mgr->mm; 878 struct amdgpu_vram_reservation *rsv; 879 880 drm_printf(printer, " vis usage:%llu\n", 881 amdgpu_vram_mgr_vis_usage(mgr)); 882 883 mutex_lock(&mgr->lock); 884 drm_printf(printer, "default_page_size: %lluKiB\n", 885 mgr->default_page_size >> 10); 886 887 drm_buddy_print(mm, printer); 888 889 drm_printf(printer, "reserved:\n"); 890 list_for_each_entry(rsv, &mgr->reserved_pages, blocks) 891 drm_printf(printer, "%#018llx-%#018llx: %llu\n", 892 rsv->start, rsv->start + rsv->size, rsv->size); 893 mutex_unlock(&mgr->lock); 894 } 895 896 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { 897 .alloc = amdgpu_vram_mgr_new, 898 .free = amdgpu_vram_mgr_del, 899 .intersects = amdgpu_vram_mgr_intersects, 900 .compatible = amdgpu_vram_mgr_compatible, 901 .debug = amdgpu_vram_mgr_debug 902 }; 903 904 /** 905 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM 906 * 907 * @adev: amdgpu_device pointer 908 * 909 * Allocate and initialize the VRAM manager. 910 */ 911 int amdgpu_vram_mgr_init(struct amdgpu_device *adev) 912 { 913 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 914 struct ttm_resource_manager *man = &mgr->manager; 915 int err; 916 917 man->cg = drmm_cgroup_register_region(adev_to_drm(adev), "vram", adev->gmc.real_vram_size); 918 if (IS_ERR(man->cg)) 919 return PTR_ERR(man->cg); 920 ttm_resource_manager_init(man, &adev->mman.bdev, 921 adev->gmc.real_vram_size); 922 923 mutex_init(&mgr->lock); 924 INIT_LIST_HEAD(&mgr->reservations_pending); 925 INIT_LIST_HEAD(&mgr->reserved_pages); 926 INIT_LIST_HEAD(&mgr->allocated_vres_list); 927 mgr->default_page_size = PAGE_SIZE; 928 929 man->func = &amdgpu_vram_mgr_func; 930 err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); 931 if (err) 932 return err; 933 934 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); 935 ttm_resource_manager_set_used(man, true); 936 return 0; 937 } 938 939 /** 940 * amdgpu_vram_mgr_fini - free and destroy VRAM manager 941 * 942 * @adev: amdgpu_device pointer 943 * 944 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still 945 * allocated inside it. 946 */ 947 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) 948 { 949 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 950 struct ttm_resource_manager *man = &mgr->manager; 951 int ret; 952 struct amdgpu_vram_reservation *rsv, *temp; 953 954 ttm_resource_manager_set_used(man, false); 955 956 ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); 957 if (ret) 958 return; 959 960 mutex_lock(&mgr->lock); 961 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) 962 kfree(rsv); 963 964 list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) { 965 drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0); 966 kfree(rsv); 967 } 968 if (!adev->gmc.is_app_apu) 969 drm_buddy_fini(&mgr->mm); 970 mutex_unlock(&mgr->lock); 971 972 ttm_resource_manager_cleanup(man); 973 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); 974 } 975