1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #define pr_fmt(fmt) "[TTM] " fmt 33 34 #include <drm/ttm/ttm_module.h> 35 #include <drm/ttm/ttm_bo_driver.h> 36 #include <drm/ttm/ttm_placement.h> 37 #include <linux/jiffies.h> 38 #include <linux/slab.h> 39 #include <linux/sched.h> 40 #include <linux/mm.h> 41 #include <linux/file.h> 42 #include <linux/module.h> 43 #include <linux/atomic.h> 44 #include <linux/dma-resv.h> 45 46 static void ttm_bo_global_kobj_release(struct kobject *kobj); 47 48 /** 49 * ttm_global_mutex - protecting the global BO state 50 */ 51 DEFINE_MUTEX(ttm_global_mutex); 52 unsigned ttm_bo_glob_use_count; 53 struct ttm_bo_global ttm_bo_glob; 54 EXPORT_SYMBOL(ttm_bo_glob); 55 56 static struct attribute ttm_bo_count = { 57 .name = "bo_count", 58 .mode = S_IRUGO 59 }; 60 61 /* default destructor */ 62 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo) 63 { 64 kfree(bo); 65 } 66 67 static inline int ttm_mem_type_from_place(const struct ttm_place *place, 68 uint32_t *mem_type) 69 { 70 int pos; 71 72 pos = ffs(place->flags & TTM_PL_MASK_MEM); 73 if (unlikely(!pos)) 74 return -EINVAL; 75 76 *mem_type = pos - 1; 77 return 0; 78 } 79 80 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p, 81 int mem_type) 82 { 83 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 84 85 drm_printf(p, " has_type: %d\n", man->has_type); 86 drm_printf(p, " use_type: %d\n", man->use_type); 87 drm_printf(p, " flags: 0x%08X\n", man->flags); 88 drm_printf(p, " gpu_offset: 0x%08llX\n", man->gpu_offset); 89 drm_printf(p, " size: %llu\n", man->size); 90 drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); 91 drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); 92 if (mem_type != TTM_PL_SYSTEM) 93 (*man->func->debug)(man, p); 94 } 95 96 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 97 struct ttm_placement *placement) 98 { 99 struct drm_printer p = drm_debug_printer(TTM_PFX); 100 int i, ret, mem_type; 101 102 drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", 103 bo, bo->mem.num_pages, bo->mem.size >> 10, 104 bo->mem.size >> 20); 105 for (i = 0; i < placement->num_placement; i++) { 106 ret = ttm_mem_type_from_place(&placement->placement[i], 107 &mem_type); 108 if (ret) 109 return; 110 drm_printf(&p, " placement[%d]=0x%08X (%d)\n", 111 i, placement->placement[i].flags, mem_type); 112 ttm_mem_type_debug(bo->bdev, &p, mem_type); 113 } 114 } 115 116 static ssize_t ttm_bo_global_show(struct kobject *kobj, 117 struct attribute *attr, 118 char *buffer) 119 { 120 struct ttm_bo_global *glob = 121 container_of(kobj, struct ttm_bo_global, kobj); 122 123 return snprintf(buffer, PAGE_SIZE, "%d\n", 124 atomic_read(&glob->bo_count)); 125 } 126 127 static struct attribute *ttm_bo_global_attrs[] = { 128 &ttm_bo_count, 129 NULL 130 }; 131 132 static const struct sysfs_ops ttm_bo_global_ops = { 133 .show = &ttm_bo_global_show 134 }; 135 136 static struct kobj_type ttm_bo_glob_kobj_type = { 137 .release = &ttm_bo_global_kobj_release, 138 .sysfs_ops = &ttm_bo_global_ops, 139 .default_attrs = ttm_bo_global_attrs 140 }; 141 142 143 static inline uint32_t ttm_bo_type_flags(unsigned type) 144 { 145 return 1 << (type); 146 } 147 148 static void ttm_bo_release_list(struct kref *list_kref) 149 { 150 struct ttm_buffer_object *bo = 151 container_of(list_kref, struct ttm_buffer_object, list_kref); 152 size_t acc_size = bo->acc_size; 153 154 BUG_ON(kref_read(&bo->list_kref)); 155 BUG_ON(kref_read(&bo->kref)); 156 BUG_ON(bo->mem.mm_node != NULL); 157 BUG_ON(!list_empty(&bo->lru)); 158 BUG_ON(!list_empty(&bo->ddestroy)); 159 ttm_tt_destroy(bo->ttm); 160 atomic_dec(&ttm_bo_glob.bo_count); 161 dma_fence_put(bo->moving); 162 if (!ttm_bo_uses_embedded_gem_object(bo)) 163 dma_resv_fini(&bo->base._resv); 164 bo->destroy(bo); 165 ttm_mem_global_free(&ttm_mem_glob, acc_size); 166 } 167 168 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, 169 struct ttm_mem_reg *mem) 170 { 171 struct ttm_bo_device *bdev = bo->bdev; 172 struct ttm_mem_type_manager *man; 173 174 dma_resv_assert_held(bo->base.resv); 175 176 if (!list_empty(&bo->lru)) 177 return; 178 179 if (mem->placement & TTM_PL_FLAG_NO_EVICT) 180 return; 181 182 man = &bdev->man[mem->mem_type]; 183 list_add_tail(&bo->lru, &man->lru[bo->priority]); 184 kref_get(&bo->list_kref); 185 186 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm && 187 !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | 188 TTM_PAGE_FLAG_SWAPPED))) { 189 list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]); 190 kref_get(&bo->list_kref); 191 } 192 } 193 194 static void ttm_bo_ref_bug(struct kref *list_kref) 195 { 196 BUG(); 197 } 198 199 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 200 { 201 struct ttm_bo_device *bdev = bo->bdev; 202 bool notify = false; 203 204 if (!list_empty(&bo->swap)) { 205 list_del_init(&bo->swap); 206 kref_put(&bo->list_kref, ttm_bo_ref_bug); 207 notify = true; 208 } 209 if (!list_empty(&bo->lru)) { 210 list_del_init(&bo->lru); 211 kref_put(&bo->list_kref, ttm_bo_ref_bug); 212 notify = true; 213 } 214 215 if (notify && bdev->driver->del_from_lru_notify) 216 bdev->driver->del_from_lru_notify(bo); 217 } 218 219 static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, 220 struct ttm_buffer_object *bo) 221 { 222 if (!pos->first) 223 pos->first = bo; 224 pos->last = bo; 225 } 226 227 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, 228 struct ttm_lru_bulk_move *bulk) 229 { 230 dma_resv_assert_held(bo->base.resv); 231 232 ttm_bo_del_from_lru(bo); 233 ttm_bo_add_mem_to_lru(bo, &bo->mem); 234 235 if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 236 switch (bo->mem.mem_type) { 237 case TTM_PL_TT: 238 ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo); 239 break; 240 241 case TTM_PL_VRAM: 242 ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo); 243 break; 244 } 245 if (bo->ttm && !(bo->ttm->page_flags & 246 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) 247 ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo); 248 } 249 } 250 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 251 252 void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) 253 { 254 unsigned i; 255 256 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 257 struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i]; 258 struct ttm_mem_type_manager *man; 259 260 if (!pos->first) 261 continue; 262 263 dma_resv_assert_held(pos->first->base.resv); 264 dma_resv_assert_held(pos->last->base.resv); 265 266 man = &pos->first->bdev->man[TTM_PL_TT]; 267 list_bulk_move_tail(&man->lru[i], &pos->first->lru, 268 &pos->last->lru); 269 } 270 271 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 272 struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i]; 273 struct ttm_mem_type_manager *man; 274 275 if (!pos->first) 276 continue; 277 278 dma_resv_assert_held(pos->first->base.resv); 279 dma_resv_assert_held(pos->last->base.resv); 280 281 man = &pos->first->bdev->man[TTM_PL_VRAM]; 282 list_bulk_move_tail(&man->lru[i], &pos->first->lru, 283 &pos->last->lru); 284 } 285 286 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 287 struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i]; 288 struct list_head *lru; 289 290 if (!pos->first) 291 continue; 292 293 dma_resv_assert_held(pos->first->base.resv); 294 dma_resv_assert_held(pos->last->base.resv); 295 296 lru = &ttm_bo_glob.swap_lru[i]; 297 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); 298 } 299 } 300 EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); 301 302 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 303 struct ttm_mem_reg *mem, bool evict, 304 struct ttm_operation_ctx *ctx) 305 { 306 struct ttm_bo_device *bdev = bo->bdev; 307 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 308 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); 309 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; 310 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; 311 int ret = 0; 312 313 if (old_is_pci || new_is_pci || 314 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { 315 ret = ttm_mem_io_lock(old_man, true); 316 if (unlikely(ret != 0)) 317 goto out_err; 318 ttm_bo_unmap_virtual_locked(bo); 319 ttm_mem_io_unlock(old_man); 320 } 321 322 /* 323 * Create and bind a ttm if required. 324 */ 325 326 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 327 if (bo->ttm == NULL) { 328 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 329 ret = ttm_tt_create(bo, zero); 330 if (ret) 331 goto out_err; 332 } 333 334 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 335 if (ret) 336 goto out_err; 337 338 if (mem->mem_type != TTM_PL_SYSTEM) { 339 ret = ttm_tt_bind(bo->ttm, mem, ctx); 340 if (ret) 341 goto out_err; 342 } 343 344 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 345 if (bdev->driver->move_notify) 346 bdev->driver->move_notify(bo, evict, mem); 347 bo->mem = *mem; 348 mem->mm_node = NULL; 349 goto moved; 350 } 351 } 352 353 if (bdev->driver->move_notify) 354 bdev->driver->move_notify(bo, evict, mem); 355 356 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 357 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 358 ret = ttm_bo_move_ttm(bo, ctx, mem); 359 else if (bdev->driver->move) 360 ret = bdev->driver->move(bo, evict, ctx, mem); 361 else 362 ret = ttm_bo_move_memcpy(bo, ctx, mem); 363 364 if (ret) { 365 if (bdev->driver->move_notify) { 366 swap(*mem, bo->mem); 367 bdev->driver->move_notify(bo, false, mem); 368 swap(*mem, bo->mem); 369 } 370 371 goto out_err; 372 } 373 374 moved: 375 if (bo->evicted) { 376 if (bdev->driver->invalidate_caches) { 377 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 378 if (ret) 379 pr_err("Can not flush read caches\n"); 380 } 381 bo->evicted = false; 382 } 383 384 if (bo->mem.mm_node) 385 bo->offset = (bo->mem.start << PAGE_SHIFT) + 386 bdev->man[bo->mem.mem_type].gpu_offset; 387 else 388 bo->offset = 0; 389 390 ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; 391 return 0; 392 393 out_err: 394 new_man = &bdev->man[bo->mem.mem_type]; 395 if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) { 396 ttm_tt_destroy(bo->ttm); 397 bo->ttm = NULL; 398 } 399 400 return ret; 401 } 402 403 /** 404 * Call bo::reserved. 405 * Will release GPU memory type usage on destruction. 406 * This is the place to put in driver specific hooks to release 407 * driver private resources. 408 * Will release the bo::reserved lock. 409 */ 410 411 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 412 { 413 if (bo->bdev->driver->move_notify) 414 bo->bdev->driver->move_notify(bo, false, NULL); 415 416 ttm_tt_destroy(bo->ttm); 417 bo->ttm = NULL; 418 ttm_bo_mem_put(bo, &bo->mem); 419 } 420 421 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) 422 { 423 int r; 424 425 if (bo->base.resv == &bo->base._resv) 426 return 0; 427 428 BUG_ON(!dma_resv_trylock(&bo->base._resv)); 429 430 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv); 431 if (r) 432 dma_resv_unlock(&bo->base._resv); 433 434 return r; 435 } 436 437 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 438 { 439 struct dma_resv_list *fobj; 440 struct dma_fence *fence; 441 int i; 442 443 fobj = dma_resv_get_list(&bo->base._resv); 444 fence = dma_resv_get_excl(&bo->base._resv); 445 if (fence && !fence->ops->signaled) 446 dma_fence_enable_sw_signaling(fence); 447 448 for (i = 0; fobj && i < fobj->shared_count; ++i) { 449 fence = rcu_dereference_protected(fobj->shared[i], 450 dma_resv_held(bo->base.resv)); 451 452 if (!fence->ops->signaled) 453 dma_fence_enable_sw_signaling(fence); 454 } 455 } 456 457 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 458 { 459 struct ttm_bo_device *bdev = bo->bdev; 460 int ret; 461 462 ret = ttm_bo_individualize_resv(bo); 463 if (ret) { 464 /* Last resort, if we fail to allocate memory for the 465 * fences block for the BO to become idle 466 */ 467 dma_resv_wait_timeout_rcu(bo->base.resv, true, false, 468 30 * HZ); 469 spin_lock(&ttm_bo_glob.lru_lock); 470 goto error; 471 } 472 473 spin_lock(&ttm_bo_glob.lru_lock); 474 ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY; 475 if (!ret) { 476 if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) { 477 ttm_bo_del_from_lru(bo); 478 spin_unlock(&ttm_bo_glob.lru_lock); 479 if (bo->base.resv != &bo->base._resv) 480 dma_resv_unlock(&bo->base._resv); 481 482 ttm_bo_cleanup_memtype_use(bo); 483 dma_resv_unlock(bo->base.resv); 484 return; 485 } 486 487 ttm_bo_flush_all_fences(bo); 488 489 /* 490 * Make NO_EVICT bos immediately available to 491 * shrinkers, now that they are queued for 492 * destruction. 493 */ 494 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 495 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; 496 ttm_bo_move_to_lru_tail(bo, NULL); 497 } 498 499 dma_resv_unlock(bo->base.resv); 500 } 501 if (bo->base.resv != &bo->base._resv) 502 dma_resv_unlock(&bo->base._resv); 503 504 error: 505 kref_get(&bo->list_kref); 506 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 507 spin_unlock(&ttm_bo_glob.lru_lock); 508 509 schedule_delayed_work(&bdev->wq, 510 ((HZ / 100) < 1) ? 1 : HZ / 100); 511 } 512 513 /** 514 * function ttm_bo_cleanup_refs 515 * If bo idle, remove from delayed- and lru lists, and unref. 516 * If not idle, do nothing. 517 * 518 * Must be called with lru_lock and reservation held, this function 519 * will drop the lru lock and optionally the reservation lock before returning. 520 * 521 * @interruptible Any sleeps should occur interruptibly. 522 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 523 * @unlock_resv Unlock the reservation lock as well. 524 */ 525 526 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, 527 bool interruptible, bool no_wait_gpu, 528 bool unlock_resv) 529 { 530 struct dma_resv *resv; 531 int ret; 532 533 if (unlikely(list_empty(&bo->ddestroy))) 534 resv = bo->base.resv; 535 else 536 resv = &bo->base._resv; 537 538 if (dma_resv_test_signaled_rcu(resv, true)) 539 ret = 0; 540 else 541 ret = -EBUSY; 542 543 if (ret && !no_wait_gpu) { 544 long lret; 545 546 if (unlock_resv) 547 dma_resv_unlock(bo->base.resv); 548 spin_unlock(&ttm_bo_glob.lru_lock); 549 550 lret = dma_resv_wait_timeout_rcu(resv, true, 551 interruptible, 552 30 * HZ); 553 554 if (lret < 0) 555 return lret; 556 else if (lret == 0) 557 return -EBUSY; 558 559 spin_lock(&ttm_bo_glob.lru_lock); 560 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { 561 /* 562 * We raced, and lost, someone else holds the reservation now, 563 * and is probably busy in ttm_bo_cleanup_memtype_use. 564 * 565 * Even if it's not the case, because we finished waiting any 566 * delayed destruction would succeed, so just return success 567 * here. 568 */ 569 spin_unlock(&ttm_bo_glob.lru_lock); 570 return 0; 571 } 572 ret = 0; 573 } 574 575 if (ret || unlikely(list_empty(&bo->ddestroy))) { 576 if (unlock_resv) 577 dma_resv_unlock(bo->base.resv); 578 spin_unlock(&ttm_bo_glob.lru_lock); 579 return ret; 580 } 581 582 ttm_bo_del_from_lru(bo); 583 list_del_init(&bo->ddestroy); 584 kref_put(&bo->list_kref, ttm_bo_ref_bug); 585 586 spin_unlock(&ttm_bo_glob.lru_lock); 587 ttm_bo_cleanup_memtype_use(bo); 588 589 if (unlock_resv) 590 dma_resv_unlock(bo->base.resv); 591 592 return 0; 593 } 594 595 /** 596 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 597 * encountered buffers. 598 */ 599 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 600 { 601 struct ttm_bo_global *glob = &ttm_bo_glob; 602 struct list_head removed; 603 bool empty; 604 605 INIT_LIST_HEAD(&removed); 606 607 spin_lock(&glob->lru_lock); 608 while (!list_empty(&bdev->ddestroy)) { 609 struct ttm_buffer_object *bo; 610 611 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object, 612 ddestroy); 613 kref_get(&bo->list_kref); 614 list_move_tail(&bo->ddestroy, &removed); 615 616 if (remove_all || bo->base.resv != &bo->base._resv) { 617 spin_unlock(&glob->lru_lock); 618 dma_resv_lock(bo->base.resv, NULL); 619 620 spin_lock(&glob->lru_lock); 621 ttm_bo_cleanup_refs(bo, false, !remove_all, true); 622 623 } else if (dma_resv_trylock(bo->base.resv)) { 624 ttm_bo_cleanup_refs(bo, false, !remove_all, true); 625 } else { 626 spin_unlock(&glob->lru_lock); 627 } 628 629 kref_put(&bo->list_kref, ttm_bo_release_list); 630 spin_lock(&glob->lru_lock); 631 } 632 list_splice_tail(&removed, &bdev->ddestroy); 633 empty = list_empty(&bdev->ddestroy); 634 spin_unlock(&glob->lru_lock); 635 636 return empty; 637 } 638 639 static void ttm_bo_delayed_workqueue(struct work_struct *work) 640 { 641 struct ttm_bo_device *bdev = 642 container_of(work, struct ttm_bo_device, wq.work); 643 644 if (!ttm_bo_delayed_delete(bdev, false)) 645 schedule_delayed_work(&bdev->wq, 646 ((HZ / 100) < 1) ? 1 : HZ / 100); 647 } 648 649 static void ttm_bo_release(struct kref *kref) 650 { 651 struct ttm_buffer_object *bo = 652 container_of(kref, struct ttm_buffer_object, kref); 653 struct ttm_bo_device *bdev = bo->bdev; 654 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 655 656 if (bo->bdev->driver->release_notify) 657 bo->bdev->driver->release_notify(bo); 658 659 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); 660 ttm_mem_io_lock(man, false); 661 ttm_mem_io_free_vm(bo); 662 ttm_mem_io_unlock(man); 663 ttm_bo_cleanup_refs_or_queue(bo); 664 kref_put(&bo->list_kref, ttm_bo_release_list); 665 } 666 667 void ttm_bo_put(struct ttm_buffer_object *bo) 668 { 669 kref_put(&bo->kref, ttm_bo_release); 670 } 671 EXPORT_SYMBOL(ttm_bo_put); 672 673 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 674 { 675 return cancel_delayed_work_sync(&bdev->wq); 676 } 677 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 678 679 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 680 { 681 if (resched) 682 schedule_delayed_work(&bdev->wq, 683 ((HZ / 100) < 1) ? 1 : HZ / 100); 684 } 685 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 686 687 static int ttm_bo_evict(struct ttm_buffer_object *bo, 688 struct ttm_operation_ctx *ctx) 689 { 690 struct ttm_bo_device *bdev = bo->bdev; 691 struct ttm_mem_reg evict_mem; 692 struct ttm_placement placement; 693 int ret = 0; 694 695 dma_resv_assert_held(bo->base.resv); 696 697 placement.num_placement = 0; 698 placement.num_busy_placement = 0; 699 bdev->driver->evict_flags(bo, &placement); 700 701 if (!placement.num_placement && !placement.num_busy_placement) { 702 ret = ttm_bo_pipeline_gutting(bo); 703 if (ret) 704 return ret; 705 706 return ttm_tt_create(bo, false); 707 } 708 709 evict_mem = bo->mem; 710 evict_mem.mm_node = NULL; 711 evict_mem.bus.io_reserved_vm = false; 712 evict_mem.bus.io_reserved_count = 0; 713 714 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); 715 if (ret) { 716 if (ret != -ERESTARTSYS) { 717 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 718 bo); 719 ttm_bo_mem_space_debug(bo, &placement); 720 } 721 goto out; 722 } 723 724 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx); 725 if (unlikely(ret)) { 726 if (ret != -ERESTARTSYS) 727 pr_err("Buffer eviction failed\n"); 728 ttm_bo_mem_put(bo, &evict_mem); 729 goto out; 730 } 731 bo->evicted = true; 732 out: 733 return ret; 734 } 735 736 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 737 const struct ttm_place *place) 738 { 739 /* Don't evict this BO if it's outside of the 740 * requested placement range 741 */ 742 if (place->fpfn >= (bo->mem.start + bo->mem.size) || 743 (place->lpfn && place->lpfn <= bo->mem.start)) 744 return false; 745 746 return true; 747 } 748 EXPORT_SYMBOL(ttm_bo_eviction_valuable); 749 750 /** 751 * Check the target bo is allowable to be evicted or swapout, including cases: 752 * 753 * a. if share same reservation object with ctx->resv, have assumption 754 * reservation objects should already be locked, so not lock again and 755 * return true directly when either the opreation allow_reserved_eviction 756 * or the target bo already is in delayed free list; 757 * 758 * b. Otherwise, trylock it. 759 */ 760 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, 761 struct ttm_operation_ctx *ctx, bool *locked, bool *busy) 762 { 763 bool ret = false; 764 765 if (bo->base.resv == ctx->resv) { 766 dma_resv_assert_held(bo->base.resv); 767 if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT 768 || !list_empty(&bo->ddestroy)) 769 ret = true; 770 *locked = false; 771 if (busy) 772 *busy = false; 773 } else { 774 ret = dma_resv_trylock(bo->base.resv); 775 *locked = ret; 776 if (busy) 777 *busy = !ret; 778 } 779 780 return ret; 781 } 782 783 /** 784 * ttm_mem_evict_wait_busy - wait for a busy BO to become available 785 * 786 * @busy_bo: BO which couldn't be locked with trylock 787 * @ctx: operation context 788 * @ticket: acquire ticket 789 * 790 * Try to lock a busy buffer object to avoid failing eviction. 791 */ 792 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, 793 struct ttm_operation_ctx *ctx, 794 struct ww_acquire_ctx *ticket) 795 { 796 int r; 797 798 if (!busy_bo || !ticket) 799 return -EBUSY; 800 801 if (ctx->interruptible) 802 r = dma_resv_lock_interruptible(busy_bo->base.resv, 803 ticket); 804 else 805 r = dma_resv_lock(busy_bo->base.resv, ticket); 806 807 /* 808 * TODO: It would be better to keep the BO locked until allocation is at 809 * least tried one more time, but that would mean a much larger rework 810 * of TTM. 811 */ 812 if (!r) 813 dma_resv_unlock(busy_bo->base.resv); 814 815 return r == -EDEADLK ? -EBUSY : r; 816 } 817 818 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 819 uint32_t mem_type, 820 const struct ttm_place *place, 821 struct ttm_operation_ctx *ctx, 822 struct ww_acquire_ctx *ticket) 823 { 824 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; 825 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 826 bool locked = false; 827 unsigned i; 828 int ret; 829 830 spin_lock(&ttm_bo_glob.lru_lock); 831 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 832 list_for_each_entry(bo, &man->lru[i], lru) { 833 bool busy; 834 835 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, 836 &busy)) { 837 if (busy && !busy_bo && ticket != 838 dma_resv_locking_ctx(bo->base.resv)) 839 busy_bo = bo; 840 continue; 841 } 842 843 if (place && !bdev->driver->eviction_valuable(bo, 844 place)) { 845 if (locked) 846 dma_resv_unlock(bo->base.resv); 847 continue; 848 } 849 break; 850 } 851 852 /* If the inner loop terminated early, we have our candidate */ 853 if (&bo->lru != &man->lru[i]) 854 break; 855 856 bo = NULL; 857 } 858 859 if (!bo) { 860 if (busy_bo) 861 kref_get(&busy_bo->list_kref); 862 spin_unlock(&ttm_bo_glob.lru_lock); 863 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket); 864 if (busy_bo) 865 kref_put(&busy_bo->list_kref, ttm_bo_release_list); 866 return ret; 867 } 868 869 kref_get(&bo->list_kref); 870 871 if (!list_empty(&bo->ddestroy)) { 872 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible, 873 ctx->no_wait_gpu, locked); 874 kref_put(&bo->list_kref, ttm_bo_release_list); 875 return ret; 876 } 877 878 spin_unlock(&ttm_bo_glob.lru_lock); 879 880 ret = ttm_bo_evict(bo, ctx); 881 if (locked) 882 ttm_bo_unreserve(bo); 883 884 kref_put(&bo->list_kref, ttm_bo_release_list); 885 return ret; 886 } 887 888 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 889 { 890 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; 891 892 if (mem->mm_node) 893 (*man->func->put_node)(man, mem); 894 } 895 EXPORT_SYMBOL(ttm_bo_mem_put); 896 897 /** 898 * Add the last move fence to the BO and reserve a new shared slot. 899 */ 900 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, 901 struct ttm_mem_type_manager *man, 902 struct ttm_mem_reg *mem, 903 bool no_wait_gpu) 904 { 905 struct dma_fence *fence; 906 int ret; 907 908 spin_lock(&man->move_lock); 909 fence = dma_fence_get(man->move); 910 spin_unlock(&man->move_lock); 911 912 if (!fence) 913 return 0; 914 915 if (no_wait_gpu) 916 return -EBUSY; 917 918 dma_resv_add_shared_fence(bo->base.resv, fence); 919 920 ret = dma_resv_reserve_shared(bo->base.resv, 1); 921 if (unlikely(ret)) { 922 dma_fence_put(fence); 923 return ret; 924 } 925 926 dma_fence_put(bo->moving); 927 bo->moving = fence; 928 return 0; 929 } 930 931 /** 932 * Repeatedly evict memory from the LRU for @mem_type until we create enough 933 * space, or we've evicted everything and there isn't enough space. 934 */ 935 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 936 const struct ttm_place *place, 937 struct ttm_mem_reg *mem, 938 struct ttm_operation_ctx *ctx) 939 { 940 struct ttm_bo_device *bdev = bo->bdev; 941 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 942 struct ww_acquire_ctx *ticket; 943 int ret; 944 945 ticket = dma_resv_locking_ctx(bo->base.resv); 946 do { 947 ret = (*man->func->get_node)(man, bo, place, mem); 948 if (unlikely(ret != 0)) 949 return ret; 950 if (mem->mm_node) 951 break; 952 ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx, 953 ticket); 954 if (unlikely(ret != 0)) 955 return ret; 956 } while (1); 957 958 return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); 959 } 960 961 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 962 uint32_t cur_placement, 963 uint32_t proposed_placement) 964 { 965 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; 966 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; 967 968 /** 969 * Keep current caching if possible. 970 */ 971 972 if ((cur_placement & caching) != 0) 973 result |= (cur_placement & caching); 974 else if ((man->default_caching & caching) != 0) 975 result |= man->default_caching; 976 else if ((TTM_PL_FLAG_CACHED & caching) != 0) 977 result |= TTM_PL_FLAG_CACHED; 978 else if ((TTM_PL_FLAG_WC & caching) != 0) 979 result |= TTM_PL_FLAG_WC; 980 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) 981 result |= TTM_PL_FLAG_UNCACHED; 982 983 return result; 984 } 985 986 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 987 uint32_t mem_type, 988 const struct ttm_place *place, 989 uint32_t *masked_placement) 990 { 991 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 992 993 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) 994 return false; 995 996 if ((place->flags & man->available_caching) == 0) 997 return false; 998 999 cur_flags |= (place->flags & man->available_caching); 1000 1001 *masked_placement = cur_flags; 1002 return true; 1003 } 1004 1005 /** 1006 * ttm_bo_mem_placement - check if placement is compatible 1007 * @bo: BO to find memory for 1008 * @place: where to search 1009 * @mem: the memory object to fill in 1010 * @ctx: operation context 1011 * 1012 * Check if placement is compatible and fill in mem structure. 1013 * Returns -EBUSY if placement won't work or negative error code. 1014 * 0 when placement can be used. 1015 */ 1016 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, 1017 const struct ttm_place *place, 1018 struct ttm_mem_reg *mem, 1019 struct ttm_operation_ctx *ctx) 1020 { 1021 struct ttm_bo_device *bdev = bo->bdev; 1022 uint32_t mem_type = TTM_PL_SYSTEM; 1023 struct ttm_mem_type_manager *man; 1024 uint32_t cur_flags = 0; 1025 int ret; 1026 1027 ret = ttm_mem_type_from_place(place, &mem_type); 1028 if (ret) 1029 return ret; 1030 1031 man = &bdev->man[mem_type]; 1032 if (!man->has_type || !man->use_type) 1033 return -EBUSY; 1034 1035 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) 1036 return -EBUSY; 1037 1038 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); 1039 /* 1040 * Use the access and other non-mapping-related flag bits from 1041 * the memory placement flags to the current flags 1042 */ 1043 ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE); 1044 1045 mem->mem_type = mem_type; 1046 mem->placement = cur_flags; 1047 1048 spin_lock(&ttm_bo_glob.lru_lock); 1049 ttm_bo_del_from_lru(bo); 1050 ttm_bo_add_mem_to_lru(bo, mem); 1051 spin_unlock(&ttm_bo_glob.lru_lock); 1052 1053 return 0; 1054 } 1055 1056 /** 1057 * Creates space for memory region @mem according to its type. 1058 * 1059 * This function first searches for free space in compatible memory types in 1060 * the priority order defined by the driver. If free space isn't found, then 1061 * ttm_bo_mem_force_space is attempted in priority order to evict and find 1062 * space. 1063 */ 1064 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 1065 struct ttm_placement *placement, 1066 struct ttm_mem_reg *mem, 1067 struct ttm_operation_ctx *ctx) 1068 { 1069 struct ttm_bo_device *bdev = bo->bdev; 1070 bool type_found = false; 1071 int i, ret; 1072 1073 ret = dma_resv_reserve_shared(bo->base.resv, 1); 1074 if (unlikely(ret)) 1075 return ret; 1076 1077 mem->mm_node = NULL; 1078 for (i = 0; i < placement->num_placement; ++i) { 1079 const struct ttm_place *place = &placement->placement[i]; 1080 struct ttm_mem_type_manager *man; 1081 1082 ret = ttm_bo_mem_placement(bo, place, mem, ctx); 1083 if (ret == -EBUSY) 1084 continue; 1085 if (ret) 1086 goto error; 1087 1088 type_found = true; 1089 mem->mm_node = NULL; 1090 if (mem->mem_type == TTM_PL_SYSTEM) 1091 return 0; 1092 1093 man = &bdev->man[mem->mem_type]; 1094 ret = (*man->func->get_node)(man, bo, place, mem); 1095 if (unlikely(ret)) 1096 goto error; 1097 1098 if (!mem->mm_node) 1099 continue; 1100 1101 ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); 1102 if (unlikely(ret)) { 1103 (*man->func->put_node)(man, mem); 1104 if (ret == -EBUSY) 1105 continue; 1106 1107 goto error; 1108 } 1109 return 0; 1110 } 1111 1112 for (i = 0; i < placement->num_busy_placement; ++i) { 1113 const struct ttm_place *place = &placement->busy_placement[i]; 1114 1115 ret = ttm_bo_mem_placement(bo, place, mem, ctx); 1116 if (ret == -EBUSY) 1117 continue; 1118 if (ret) 1119 goto error; 1120 1121 type_found = true; 1122 mem->mm_node = NULL; 1123 if (mem->mem_type == TTM_PL_SYSTEM) 1124 return 0; 1125 1126 ret = ttm_bo_mem_force_space(bo, place, mem, ctx); 1127 if (ret == 0 && mem->mm_node) 1128 return 0; 1129 1130 if (ret && ret != -EBUSY) 1131 goto error; 1132 } 1133 1134 ret = -ENOMEM; 1135 if (!type_found) { 1136 pr_err(TTM_PFX "No compatible memory type found\n"); 1137 ret = -EINVAL; 1138 } 1139 1140 error: 1141 if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { 1142 spin_lock(&ttm_bo_glob.lru_lock); 1143 ttm_bo_move_to_lru_tail(bo, NULL); 1144 spin_unlock(&ttm_bo_glob.lru_lock); 1145 } 1146 1147 return ret; 1148 } 1149 EXPORT_SYMBOL(ttm_bo_mem_space); 1150 1151 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 1152 struct ttm_placement *placement, 1153 struct ttm_operation_ctx *ctx) 1154 { 1155 int ret = 0; 1156 struct ttm_mem_reg mem; 1157 1158 dma_resv_assert_held(bo->base.resv); 1159 1160 mem.num_pages = bo->num_pages; 1161 mem.size = mem.num_pages << PAGE_SHIFT; 1162 mem.page_alignment = bo->mem.page_alignment; 1163 mem.bus.io_reserved_vm = false; 1164 mem.bus.io_reserved_count = 0; 1165 /* 1166 * Determine where to move the buffer. 1167 */ 1168 ret = ttm_bo_mem_space(bo, placement, &mem, ctx); 1169 if (ret) 1170 goto out_unlock; 1171 ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx); 1172 out_unlock: 1173 if (ret && mem.mm_node) 1174 ttm_bo_mem_put(bo, &mem); 1175 return ret; 1176 } 1177 1178 static bool ttm_bo_places_compat(const struct ttm_place *places, 1179 unsigned num_placement, 1180 struct ttm_mem_reg *mem, 1181 uint32_t *new_flags) 1182 { 1183 unsigned i; 1184 1185 for (i = 0; i < num_placement; i++) { 1186 const struct ttm_place *heap = &places[i]; 1187 1188 if (mem->mm_node && (mem->start < heap->fpfn || 1189 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1190 continue; 1191 1192 *new_flags = heap->flags; 1193 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1194 (*new_flags & mem->placement & TTM_PL_MASK_MEM) && 1195 (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) || 1196 (mem->placement & TTM_PL_FLAG_CONTIGUOUS))) 1197 return true; 1198 } 1199 return false; 1200 } 1201 1202 bool ttm_bo_mem_compat(struct ttm_placement *placement, 1203 struct ttm_mem_reg *mem, 1204 uint32_t *new_flags) 1205 { 1206 if (ttm_bo_places_compat(placement->placement, placement->num_placement, 1207 mem, new_flags)) 1208 return true; 1209 1210 if ((placement->busy_placement != placement->placement || 1211 placement->num_busy_placement > placement->num_placement) && 1212 ttm_bo_places_compat(placement->busy_placement, 1213 placement->num_busy_placement, 1214 mem, new_flags)) 1215 return true; 1216 1217 return false; 1218 } 1219 EXPORT_SYMBOL(ttm_bo_mem_compat); 1220 1221 int ttm_bo_validate(struct ttm_buffer_object *bo, 1222 struct ttm_placement *placement, 1223 struct ttm_operation_ctx *ctx) 1224 { 1225 int ret; 1226 uint32_t new_flags; 1227 1228 dma_resv_assert_held(bo->base.resv); 1229 /* 1230 * Check whether we need to move buffer. 1231 */ 1232 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { 1233 ret = ttm_bo_move_buffer(bo, placement, ctx); 1234 if (ret) 1235 return ret; 1236 } else { 1237 /* 1238 * Use the access and other non-mapping-related flag bits from 1239 * the compatible memory placement flags to the active flags 1240 */ 1241 ttm_flag_masked(&bo->mem.placement, new_flags, 1242 ~TTM_PL_MASK_MEMTYPE); 1243 } 1244 /* 1245 * We might need to add a TTM. 1246 */ 1247 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1248 ret = ttm_tt_create(bo, true); 1249 if (ret) 1250 return ret; 1251 } 1252 return 0; 1253 } 1254 EXPORT_SYMBOL(ttm_bo_validate); 1255 1256 int ttm_bo_init_reserved(struct ttm_bo_device *bdev, 1257 struct ttm_buffer_object *bo, 1258 unsigned long size, 1259 enum ttm_bo_type type, 1260 struct ttm_placement *placement, 1261 uint32_t page_alignment, 1262 struct ttm_operation_ctx *ctx, 1263 size_t acc_size, 1264 struct sg_table *sg, 1265 struct dma_resv *resv, 1266 void (*destroy) (struct ttm_buffer_object *)) 1267 { 1268 struct ttm_mem_global *mem_glob = &ttm_mem_glob; 1269 int ret = 0; 1270 unsigned long num_pages; 1271 bool locked; 1272 1273 ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx); 1274 if (ret) { 1275 pr_err("Out of kernel memory\n"); 1276 if (destroy) 1277 (*destroy)(bo); 1278 else 1279 kfree(bo); 1280 return -ENOMEM; 1281 } 1282 1283 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1284 if (num_pages == 0) { 1285 pr_err("Illegal buffer object size\n"); 1286 if (destroy) 1287 (*destroy)(bo); 1288 else 1289 kfree(bo); 1290 ttm_mem_global_free(mem_glob, acc_size); 1291 return -EINVAL; 1292 } 1293 bo->destroy = destroy ? destroy : ttm_bo_default_destroy; 1294 1295 kref_init(&bo->kref); 1296 kref_init(&bo->list_kref); 1297 INIT_LIST_HEAD(&bo->lru); 1298 INIT_LIST_HEAD(&bo->ddestroy); 1299 INIT_LIST_HEAD(&bo->swap); 1300 INIT_LIST_HEAD(&bo->io_reserve_lru); 1301 bo->bdev = bdev; 1302 bo->type = type; 1303 bo->num_pages = num_pages; 1304 bo->mem.size = num_pages << PAGE_SHIFT; 1305 bo->mem.mem_type = TTM_PL_SYSTEM; 1306 bo->mem.num_pages = bo->num_pages; 1307 bo->mem.mm_node = NULL; 1308 bo->mem.page_alignment = page_alignment; 1309 bo->mem.bus.io_reserved_vm = false; 1310 bo->mem.bus.io_reserved_count = 0; 1311 bo->moving = NULL; 1312 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1313 bo->acc_size = acc_size; 1314 bo->sg = sg; 1315 if (resv) { 1316 bo->base.resv = resv; 1317 dma_resv_assert_held(bo->base.resv); 1318 } else { 1319 bo->base.resv = &bo->base._resv; 1320 } 1321 if (!ttm_bo_uses_embedded_gem_object(bo)) { 1322 /* 1323 * bo.gem is not initialized, so we have to setup the 1324 * struct elements we want use regardless. 1325 */ 1326 dma_resv_init(&bo->base._resv); 1327 drm_vma_node_reset(&bo->base.vma_node); 1328 } 1329 atomic_inc(&ttm_bo_glob.bo_count); 1330 1331 /* 1332 * For ttm_bo_type_device buffers, allocate 1333 * address space from the device. 1334 */ 1335 if (bo->type == ttm_bo_type_device || 1336 bo->type == ttm_bo_type_sg) 1337 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node, 1338 bo->mem.num_pages); 1339 1340 /* passed reservation objects should already be locked, 1341 * since otherwise lockdep will be angered in radeon. 1342 */ 1343 if (!resv) { 1344 locked = dma_resv_trylock(bo->base.resv); 1345 WARN_ON(!locked); 1346 } 1347 1348 if (likely(!ret)) 1349 ret = ttm_bo_validate(bo, placement, ctx); 1350 1351 if (unlikely(ret)) { 1352 if (!resv) 1353 ttm_bo_unreserve(bo); 1354 1355 ttm_bo_put(bo); 1356 return ret; 1357 } 1358 1359 spin_lock(&ttm_bo_glob.lru_lock); 1360 ttm_bo_move_to_lru_tail(bo, NULL); 1361 spin_unlock(&ttm_bo_glob.lru_lock); 1362 1363 return ret; 1364 } 1365 EXPORT_SYMBOL(ttm_bo_init_reserved); 1366 1367 int ttm_bo_init(struct ttm_bo_device *bdev, 1368 struct ttm_buffer_object *bo, 1369 unsigned long size, 1370 enum ttm_bo_type type, 1371 struct ttm_placement *placement, 1372 uint32_t page_alignment, 1373 bool interruptible, 1374 size_t acc_size, 1375 struct sg_table *sg, 1376 struct dma_resv *resv, 1377 void (*destroy) (struct ttm_buffer_object *)) 1378 { 1379 struct ttm_operation_ctx ctx = { interruptible, false }; 1380 int ret; 1381 1382 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement, 1383 page_alignment, &ctx, acc_size, 1384 sg, resv, destroy); 1385 if (ret) 1386 return ret; 1387 1388 if (!resv) 1389 ttm_bo_unreserve(bo); 1390 1391 return 0; 1392 } 1393 EXPORT_SYMBOL(ttm_bo_init); 1394 1395 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 1396 unsigned long bo_size, 1397 unsigned struct_size) 1398 { 1399 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1400 size_t size = 0; 1401 1402 size += ttm_round_pot(struct_size); 1403 size += ttm_round_pot(npages * sizeof(void *)); 1404 size += ttm_round_pot(sizeof(struct ttm_tt)); 1405 return size; 1406 } 1407 EXPORT_SYMBOL(ttm_bo_acc_size); 1408 1409 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1410 unsigned long bo_size, 1411 unsigned struct_size) 1412 { 1413 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1414 size_t size = 0; 1415 1416 size += ttm_round_pot(struct_size); 1417 size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t))); 1418 size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1419 return size; 1420 } 1421 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1422 1423 int ttm_bo_create(struct ttm_bo_device *bdev, 1424 unsigned long size, 1425 enum ttm_bo_type type, 1426 struct ttm_placement *placement, 1427 uint32_t page_alignment, 1428 bool interruptible, 1429 struct ttm_buffer_object **p_bo) 1430 { 1431 struct ttm_buffer_object *bo; 1432 size_t acc_size; 1433 int ret; 1434 1435 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1436 if (unlikely(bo == NULL)) 1437 return -ENOMEM; 1438 1439 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1440 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1441 interruptible, acc_size, 1442 NULL, NULL, NULL); 1443 if (likely(ret == 0)) 1444 *p_bo = bo; 1445 1446 return ret; 1447 } 1448 EXPORT_SYMBOL(ttm_bo_create); 1449 1450 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1451 unsigned mem_type) 1452 { 1453 struct ttm_operation_ctx ctx = { 1454 .interruptible = false, 1455 .no_wait_gpu = false, 1456 .flags = TTM_OPT_FLAG_FORCE_ALLOC 1457 }; 1458 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1459 struct ttm_bo_global *glob = &ttm_bo_glob; 1460 struct dma_fence *fence; 1461 int ret; 1462 unsigned i; 1463 1464 /* 1465 * Can't use standard list traversal since we're unlocking. 1466 */ 1467 1468 spin_lock(&glob->lru_lock); 1469 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 1470 while (!list_empty(&man->lru[i])) { 1471 spin_unlock(&glob->lru_lock); 1472 ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx, 1473 NULL); 1474 if (ret) 1475 return ret; 1476 spin_lock(&glob->lru_lock); 1477 } 1478 } 1479 spin_unlock(&glob->lru_lock); 1480 1481 spin_lock(&man->move_lock); 1482 fence = dma_fence_get(man->move); 1483 spin_unlock(&man->move_lock); 1484 1485 if (fence) { 1486 ret = dma_fence_wait(fence, false); 1487 dma_fence_put(fence); 1488 if (ret) 1489 return ret; 1490 } 1491 1492 return 0; 1493 } 1494 1495 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1496 { 1497 struct ttm_mem_type_manager *man; 1498 int ret = -EINVAL; 1499 1500 if (mem_type >= TTM_NUM_MEM_TYPES) { 1501 pr_err("Illegal memory type %d\n", mem_type); 1502 return ret; 1503 } 1504 man = &bdev->man[mem_type]; 1505 1506 if (!man->has_type) { 1507 pr_err("Trying to take down uninitialized memory manager type %u\n", 1508 mem_type); 1509 return ret; 1510 } 1511 1512 man->use_type = false; 1513 man->has_type = false; 1514 1515 ret = 0; 1516 if (mem_type > 0) { 1517 ret = ttm_bo_force_list_clean(bdev, mem_type); 1518 if (ret) { 1519 pr_err("Cleanup eviction failed\n"); 1520 return ret; 1521 } 1522 1523 ret = (*man->func->takedown)(man); 1524 } 1525 1526 dma_fence_put(man->move); 1527 man->move = NULL; 1528 1529 return ret; 1530 } 1531 EXPORT_SYMBOL(ttm_bo_clean_mm); 1532 1533 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1534 { 1535 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1536 1537 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { 1538 pr_err("Illegal memory manager memory type %u\n", mem_type); 1539 return -EINVAL; 1540 } 1541 1542 if (!man->has_type) { 1543 pr_err("Memory type %u has not been initialized\n", mem_type); 1544 return 0; 1545 } 1546 1547 return ttm_bo_force_list_clean(bdev, mem_type); 1548 } 1549 EXPORT_SYMBOL(ttm_bo_evict_mm); 1550 1551 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1552 unsigned long p_size) 1553 { 1554 int ret; 1555 struct ttm_mem_type_manager *man; 1556 unsigned i; 1557 1558 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1559 man = &bdev->man[type]; 1560 BUG_ON(man->has_type); 1561 man->io_reserve_fastpath = true; 1562 man->use_io_reserve_lru = false; 1563 mutex_init(&man->io_reserve_mutex); 1564 spin_lock_init(&man->move_lock); 1565 INIT_LIST_HEAD(&man->io_reserve_lru); 1566 1567 ret = bdev->driver->init_mem_type(bdev, type, man); 1568 if (ret) 1569 return ret; 1570 man->bdev = bdev; 1571 1572 if (type != TTM_PL_SYSTEM) { 1573 ret = (*man->func->init)(man, p_size); 1574 if (ret) 1575 return ret; 1576 } 1577 man->has_type = true; 1578 man->use_type = true; 1579 man->size = p_size; 1580 1581 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 1582 INIT_LIST_HEAD(&man->lru[i]); 1583 man->move = NULL; 1584 1585 return 0; 1586 } 1587 EXPORT_SYMBOL(ttm_bo_init_mm); 1588 1589 static void ttm_bo_global_kobj_release(struct kobject *kobj) 1590 { 1591 struct ttm_bo_global *glob = 1592 container_of(kobj, struct ttm_bo_global, kobj); 1593 1594 __free_page(glob->dummy_read_page); 1595 } 1596 1597 static void ttm_bo_global_release(void) 1598 { 1599 struct ttm_bo_global *glob = &ttm_bo_glob; 1600 1601 mutex_lock(&ttm_global_mutex); 1602 if (--ttm_bo_glob_use_count > 0) 1603 goto out; 1604 1605 kobject_del(&glob->kobj); 1606 kobject_put(&glob->kobj); 1607 ttm_mem_global_release(&ttm_mem_glob); 1608 memset(glob, 0, sizeof(*glob)); 1609 out: 1610 mutex_unlock(&ttm_global_mutex); 1611 } 1612 1613 static int ttm_bo_global_init(void) 1614 { 1615 struct ttm_bo_global *glob = &ttm_bo_glob; 1616 int ret = 0; 1617 unsigned i; 1618 1619 mutex_lock(&ttm_global_mutex); 1620 if (++ttm_bo_glob_use_count > 1) 1621 goto out; 1622 1623 ret = ttm_mem_global_init(&ttm_mem_glob); 1624 if (ret) 1625 goto out; 1626 1627 spin_lock_init(&glob->lru_lock); 1628 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); 1629 1630 if (unlikely(glob->dummy_read_page == NULL)) { 1631 ret = -ENOMEM; 1632 goto out; 1633 } 1634 1635 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 1636 INIT_LIST_HEAD(&glob->swap_lru[i]); 1637 INIT_LIST_HEAD(&glob->device_list); 1638 atomic_set(&glob->bo_count, 0); 1639 1640 ret = kobject_init_and_add( 1641 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); 1642 if (unlikely(ret != 0)) 1643 kobject_put(&glob->kobj); 1644 out: 1645 mutex_unlock(&ttm_global_mutex); 1646 return ret; 1647 } 1648 1649 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1650 { 1651 struct ttm_bo_global *glob = &ttm_bo_glob; 1652 int ret = 0; 1653 unsigned i = TTM_NUM_MEM_TYPES; 1654 struct ttm_mem_type_manager *man; 1655 1656 while (i--) { 1657 man = &bdev->man[i]; 1658 if (man->has_type) { 1659 man->use_type = false; 1660 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { 1661 ret = -EBUSY; 1662 pr_err("DRM memory manager type %d is not clean\n", 1663 i); 1664 } 1665 man->has_type = false; 1666 } 1667 } 1668 1669 mutex_lock(&ttm_global_mutex); 1670 list_del(&bdev->device_list); 1671 mutex_unlock(&ttm_global_mutex); 1672 1673 cancel_delayed_work_sync(&bdev->wq); 1674 1675 if (ttm_bo_delayed_delete(bdev, true)) 1676 pr_debug("Delayed destroy list was clean\n"); 1677 1678 spin_lock(&glob->lru_lock); 1679 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 1680 if (list_empty(&bdev->man[0].lru[0])) 1681 pr_debug("Swap list %d was clean\n", i); 1682 spin_unlock(&glob->lru_lock); 1683 1684 if (!ret) 1685 ttm_bo_global_release(); 1686 1687 return ret; 1688 } 1689 EXPORT_SYMBOL(ttm_bo_device_release); 1690 1691 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1692 struct ttm_bo_driver *driver, 1693 struct address_space *mapping, 1694 struct drm_vma_offset_manager *vma_manager, 1695 bool need_dma32) 1696 { 1697 struct ttm_bo_global *glob = &ttm_bo_glob; 1698 int ret; 1699 1700 if (WARN_ON(vma_manager == NULL)) 1701 return -EINVAL; 1702 1703 ret = ttm_bo_global_init(); 1704 if (ret) 1705 return ret; 1706 1707 bdev->driver = driver; 1708 1709 memset(bdev->man, 0, sizeof(bdev->man)); 1710 1711 /* 1712 * Initialize the system memory buffer type. 1713 * Other types need to be driver / IOCTL initialized. 1714 */ 1715 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); 1716 if (unlikely(ret != 0)) 1717 goto out_no_sys; 1718 1719 bdev->vma_manager = vma_manager; 1720 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1721 INIT_LIST_HEAD(&bdev->ddestroy); 1722 bdev->dev_mapping = mapping; 1723 bdev->need_dma32 = need_dma32; 1724 mutex_lock(&ttm_global_mutex); 1725 list_add_tail(&bdev->device_list, &glob->device_list); 1726 mutex_unlock(&ttm_global_mutex); 1727 1728 return 0; 1729 out_no_sys: 1730 ttm_bo_global_release(); 1731 return ret; 1732 } 1733 EXPORT_SYMBOL(ttm_bo_device_init); 1734 1735 /* 1736 * buffer object vm functions. 1737 */ 1738 1739 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1740 { 1741 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1742 1743 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 1744 if (mem->mem_type == TTM_PL_SYSTEM) 1745 return false; 1746 1747 if (man->flags & TTM_MEMTYPE_FLAG_CMA) 1748 return false; 1749 1750 if (mem->placement & TTM_PL_FLAG_CACHED) 1751 return false; 1752 } 1753 return true; 1754 } 1755 1756 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1757 { 1758 struct ttm_bo_device *bdev = bo->bdev; 1759 1760 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); 1761 ttm_mem_io_free_vm(bo); 1762 } 1763 1764 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1765 { 1766 struct ttm_bo_device *bdev = bo->bdev; 1767 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 1768 1769 ttm_mem_io_lock(man, false); 1770 ttm_bo_unmap_virtual_locked(bo); 1771 ttm_mem_io_unlock(man); 1772 } 1773 1774 1775 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1776 1777 int ttm_bo_wait(struct ttm_buffer_object *bo, 1778 bool interruptible, bool no_wait) 1779 { 1780 long timeout = 15 * HZ; 1781 1782 if (no_wait) { 1783 if (dma_resv_test_signaled_rcu(bo->base.resv, true)) 1784 return 0; 1785 else 1786 return -EBUSY; 1787 } 1788 1789 timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true, 1790 interruptible, timeout); 1791 if (timeout < 0) 1792 return timeout; 1793 1794 if (timeout == 0) 1795 return -EBUSY; 1796 1797 dma_resv_add_excl_fence(bo->base.resv, NULL); 1798 return 0; 1799 } 1800 EXPORT_SYMBOL(ttm_bo_wait); 1801 1802 /** 1803 * A buffer object shrink method that tries to swap out the first 1804 * buffer object on the bo_global::swap_lru list. 1805 */ 1806 int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) 1807 { 1808 struct ttm_buffer_object *bo; 1809 int ret = -EBUSY; 1810 bool locked; 1811 unsigned i; 1812 1813 spin_lock(&glob->lru_lock); 1814 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 1815 list_for_each_entry(bo, &glob->swap_lru[i], swap) { 1816 if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked, 1817 NULL)) { 1818 ret = 0; 1819 break; 1820 } 1821 } 1822 if (!ret) 1823 break; 1824 } 1825 1826 if (ret) { 1827 spin_unlock(&glob->lru_lock); 1828 return ret; 1829 } 1830 1831 kref_get(&bo->list_kref); 1832 1833 if (!list_empty(&bo->ddestroy)) { 1834 ret = ttm_bo_cleanup_refs(bo, false, false, locked); 1835 kref_put(&bo->list_kref, ttm_bo_release_list); 1836 return ret; 1837 } 1838 1839 ttm_bo_del_from_lru(bo); 1840 spin_unlock(&glob->lru_lock); 1841 1842 /** 1843 * Move to system cached 1844 */ 1845 1846 if (bo->mem.mem_type != TTM_PL_SYSTEM || 1847 bo->ttm->caching_state != tt_cached) { 1848 struct ttm_operation_ctx ctx = { false, false }; 1849 struct ttm_mem_reg evict_mem; 1850 1851 evict_mem = bo->mem; 1852 evict_mem.mm_node = NULL; 1853 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 1854 evict_mem.mem_type = TTM_PL_SYSTEM; 1855 1856 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx); 1857 if (unlikely(ret != 0)) 1858 goto out; 1859 } 1860 1861 /** 1862 * Make sure BO is idle. 1863 */ 1864 1865 ret = ttm_bo_wait(bo, false, false); 1866 if (unlikely(ret != 0)) 1867 goto out; 1868 1869 ttm_bo_unmap_virtual(bo); 1870 1871 /** 1872 * Swap out. Buffer will be swapped in again as soon as 1873 * anyone tries to access a ttm page. 1874 */ 1875 1876 if (bo->bdev->driver->swap_notify) 1877 bo->bdev->driver->swap_notify(bo); 1878 1879 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); 1880 out: 1881 1882 /** 1883 * 1884 * Unreserve without putting on LRU to avoid swapping out an 1885 * already swapped buffer. 1886 */ 1887 if (locked) 1888 dma_resv_unlock(bo->base.resv); 1889 kref_put(&bo->list_kref, ttm_bo_release_list); 1890 return ret; 1891 } 1892 EXPORT_SYMBOL(ttm_bo_swapout); 1893 1894 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1895 { 1896 struct ttm_operation_ctx ctx = { 1897 .interruptible = false, 1898 .no_wait_gpu = false 1899 }; 1900 1901 while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0); 1902 } 1903 EXPORT_SYMBOL(ttm_bo_swapout_all); 1904