1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <drm/ttm/ttm_module.h> 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <linux/jiffies.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 #include <linux/mm.h> 40 #include <linux/file.h> 41 #include <linux/module.h> 42 #include <linux/atomic.h> 43 #include <linux/reservation.h> 44 45 #define TTM_ASSERT_LOCKED(param) 46 #define TTM_DEBUG(fmt, arg...) 47 #define TTM_BO_HASH_ORDER 13 48 49 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 50 static void ttm_bo_global_kobj_release(struct kobject *kobj); 51 52 static struct attribute ttm_bo_count = { 53 .name = "bo_count", 54 .mode = S_IRUGO 55 }; 56 57 static inline int ttm_mem_type_from_place(const struct ttm_place *place, 58 uint32_t *mem_type) 59 { 60 int i; 61 62 for (i = 0; i <= TTM_PL_PRIV5; i++) 63 if (place->flags & (1 << i)) { 64 *mem_type = i; 65 return 0; 66 } 67 return -EINVAL; 68 } 69 70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) 71 { 72 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 73 74 pr_err(" has_type: %d\n", man->has_type); 75 pr_err(" use_type: %d\n", man->use_type); 76 pr_err(" flags: 0x%08X\n", man->flags); 77 pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); 78 pr_err(" size: %llu\n", man->size); 79 pr_err(" available_caching: 0x%08X\n", man->available_caching); 80 pr_err(" default_caching: 0x%08X\n", man->default_caching); 81 if (mem_type != TTM_PL_SYSTEM) 82 (*man->func->debug)(man, TTM_PFX); 83 } 84 85 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 86 struct ttm_placement *placement) 87 { 88 int i, ret, mem_type; 89 90 pr_err("No space for %p (%lu pages, %luK, %luM)\n", 91 bo, bo->mem.num_pages, bo->mem.size >> 10, 92 bo->mem.size >> 20); 93 for (i = 0; i < placement->num_placement; i++) { 94 ret = ttm_mem_type_from_place(&placement->placement[i], 95 &mem_type); 96 if (ret) 97 return; 98 pr_err(" placement[%d]=0x%08X (%d)\n", 99 i, placement->placement[i].flags, mem_type); 100 ttm_mem_type_debug(bo->bdev, mem_type); 101 } 102 } 103 104 static ssize_t ttm_bo_global_show(struct kobject *kobj, 105 struct attribute *attr, 106 char *buffer) 107 { 108 struct ttm_bo_global *glob = 109 container_of(kobj, struct ttm_bo_global, kobj); 110 111 return snprintf(buffer, PAGE_SIZE, "%lu\n", 112 (unsigned long) atomic_read(&glob->bo_count)); 113 } 114 115 static struct attribute *ttm_bo_global_attrs[] = { 116 &ttm_bo_count, 117 NULL 118 }; 119 120 static const struct sysfs_ops ttm_bo_global_ops = { 121 .show = &ttm_bo_global_show 122 }; 123 124 static struct kobj_type ttm_bo_glob_kobj_type = { 125 .release = &ttm_bo_global_kobj_release, 126 .sysfs_ops = &ttm_bo_global_ops, 127 .default_attrs = ttm_bo_global_attrs 128 }; 129 130 131 static inline uint32_t ttm_bo_type_flags(unsigned type) 132 { 133 return 1 << (type); 134 } 135 136 static void ttm_bo_release_list(struct kref *list_kref) 137 { 138 struct ttm_buffer_object *bo = 139 container_of(list_kref, struct ttm_buffer_object, list_kref); 140 struct ttm_bo_device *bdev = bo->bdev; 141 size_t acc_size = bo->acc_size; 142 143 BUG_ON(atomic_read(&bo->list_kref.refcount)); 144 BUG_ON(atomic_read(&bo->kref.refcount)); 145 BUG_ON(atomic_read(&bo->cpu_writers)); 146 BUG_ON(bo->mem.mm_node != NULL); 147 BUG_ON(!list_empty(&bo->lru)); 148 BUG_ON(!list_empty(&bo->ddestroy)); 149 150 if (bo->ttm) 151 ttm_tt_destroy(bo->ttm); 152 atomic_dec(&bo->glob->bo_count); 153 if (bo->resv == &bo->ttm_resv) 154 reservation_object_fini(&bo->ttm_resv); 155 mutex_destroy(&bo->wu_mutex); 156 if (bo->destroy) 157 bo->destroy(bo); 158 else { 159 kfree(bo); 160 } 161 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 162 } 163 164 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 165 { 166 struct ttm_bo_device *bdev = bo->bdev; 167 168 lockdep_assert_held(&bo->resv->lock.base); 169 170 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 171 172 BUG_ON(!list_empty(&bo->lru)); 173 174 list_add(&bo->lru, bdev->driver->lru_tail(bo)); 175 kref_get(&bo->list_kref); 176 177 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { 178 list_add(&bo->swap, bdev->driver->swap_lru_tail(bo)); 179 kref_get(&bo->list_kref); 180 } 181 } 182 } 183 EXPORT_SYMBOL(ttm_bo_add_to_lru); 184 185 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 186 { 187 struct ttm_bo_device *bdev = bo->bdev; 188 int put_count = 0; 189 190 if (bdev->driver->lru_removal) 191 bdev->driver->lru_removal(bo); 192 193 if (!list_empty(&bo->swap)) { 194 list_del_init(&bo->swap); 195 ++put_count; 196 } 197 if (!list_empty(&bo->lru)) { 198 list_del_init(&bo->lru); 199 ++put_count; 200 } 201 202 return put_count; 203 } 204 205 static void ttm_bo_ref_bug(struct kref *list_kref) 206 { 207 BUG(); 208 } 209 210 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, 211 bool never_free) 212 { 213 kref_sub(&bo->list_kref, count, 214 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); 215 } 216 217 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) 218 { 219 int put_count; 220 221 spin_lock(&bo->glob->lru_lock); 222 put_count = ttm_bo_del_from_lru(bo); 223 spin_unlock(&bo->glob->lru_lock); 224 ttm_bo_list_ref_sub(bo, put_count, true); 225 } 226 EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); 227 228 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) 229 { 230 struct ttm_bo_device *bdev = bo->bdev; 231 int put_count = 0; 232 233 lockdep_assert_held(&bo->resv->lock.base); 234 235 if (bdev->driver->lru_removal) 236 bdev->driver->lru_removal(bo); 237 238 put_count = ttm_bo_del_from_lru(bo); 239 ttm_bo_list_ref_sub(bo, put_count, true); 240 ttm_bo_add_to_lru(bo); 241 } 242 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 243 244 struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo) 245 { 246 return bo->bdev->man[bo->mem.mem_type].lru.prev; 247 } 248 EXPORT_SYMBOL(ttm_bo_default_lru_tail); 249 250 struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo) 251 { 252 return bo->glob->swap_lru.prev; 253 } 254 EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail); 255 256 /* 257 * Call bo->mutex locked. 258 */ 259 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 260 { 261 struct ttm_bo_device *bdev = bo->bdev; 262 struct ttm_bo_global *glob = bo->glob; 263 int ret = 0; 264 uint32_t page_flags = 0; 265 266 TTM_ASSERT_LOCKED(&bo->mutex); 267 bo->ttm = NULL; 268 269 if (bdev->need_dma32) 270 page_flags |= TTM_PAGE_FLAG_DMA32; 271 272 switch (bo->type) { 273 case ttm_bo_type_device: 274 if (zero_alloc) 275 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 276 case ttm_bo_type_kernel: 277 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 278 page_flags, glob->dummy_read_page); 279 if (unlikely(bo->ttm == NULL)) 280 ret = -ENOMEM; 281 break; 282 case ttm_bo_type_sg: 283 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 284 page_flags | TTM_PAGE_FLAG_SG, 285 glob->dummy_read_page); 286 if (unlikely(bo->ttm == NULL)) { 287 ret = -ENOMEM; 288 break; 289 } 290 bo->ttm->sg = bo->sg; 291 break; 292 default: 293 pr_err("Illegal buffer object type\n"); 294 ret = -EINVAL; 295 break; 296 } 297 298 return ret; 299 } 300 301 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 302 struct ttm_mem_reg *mem, 303 bool evict, bool interruptible, 304 bool no_wait_gpu) 305 { 306 struct ttm_bo_device *bdev = bo->bdev; 307 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 308 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); 309 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; 310 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; 311 int ret = 0; 312 313 if (old_is_pci || new_is_pci || 314 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { 315 ret = ttm_mem_io_lock(old_man, true); 316 if (unlikely(ret != 0)) 317 goto out_err; 318 ttm_bo_unmap_virtual_locked(bo); 319 ttm_mem_io_unlock(old_man); 320 } 321 322 /* 323 * Create and bind a ttm if required. 324 */ 325 326 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 327 if (bo->ttm == NULL) { 328 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 329 ret = ttm_bo_add_ttm(bo, zero); 330 if (ret) 331 goto out_err; 332 } 333 334 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 335 if (ret) 336 goto out_err; 337 338 if (mem->mem_type != TTM_PL_SYSTEM) { 339 ret = ttm_tt_bind(bo->ttm, mem); 340 if (ret) 341 goto out_err; 342 } 343 344 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 345 if (bdev->driver->move_notify) 346 bdev->driver->move_notify(bo, mem); 347 bo->mem = *mem; 348 mem->mm_node = NULL; 349 goto moved; 350 } 351 } 352 353 if (bdev->driver->move_notify) 354 bdev->driver->move_notify(bo, mem); 355 356 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 357 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 358 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); 359 else if (bdev->driver->move) 360 ret = bdev->driver->move(bo, evict, interruptible, 361 no_wait_gpu, mem); 362 else 363 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); 364 365 if (ret) { 366 if (bdev->driver->move_notify) { 367 struct ttm_mem_reg tmp_mem = *mem; 368 *mem = bo->mem; 369 bo->mem = tmp_mem; 370 bdev->driver->move_notify(bo, mem); 371 bo->mem = *mem; 372 *mem = tmp_mem; 373 } 374 375 goto out_err; 376 } 377 378 moved: 379 if (bo->evicted) { 380 if (bdev->driver->invalidate_caches) { 381 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 382 if (ret) 383 pr_err("Can not flush read caches\n"); 384 } 385 bo->evicted = false; 386 } 387 388 if (bo->mem.mm_node) { 389 bo->offset = (bo->mem.start << PAGE_SHIFT) + 390 bdev->man[bo->mem.mem_type].gpu_offset; 391 bo->cur_placement = bo->mem.placement; 392 } else 393 bo->offset = 0; 394 395 return 0; 396 397 out_err: 398 new_man = &bdev->man[bo->mem.mem_type]; 399 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { 400 ttm_tt_unbind(bo->ttm); 401 ttm_tt_destroy(bo->ttm); 402 bo->ttm = NULL; 403 } 404 405 return ret; 406 } 407 408 /** 409 * Call bo::reserved. 410 * Will release GPU memory type usage on destruction. 411 * This is the place to put in driver specific hooks to release 412 * driver private resources. 413 * Will release the bo::reserved lock. 414 */ 415 416 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 417 { 418 if (bo->bdev->driver->move_notify) 419 bo->bdev->driver->move_notify(bo, NULL); 420 421 if (bo->ttm) { 422 ttm_tt_unbind(bo->ttm); 423 ttm_tt_destroy(bo->ttm); 424 bo->ttm = NULL; 425 } 426 ttm_bo_mem_put(bo, &bo->mem); 427 428 ww_mutex_unlock (&bo->resv->lock); 429 } 430 431 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 432 { 433 struct reservation_object_list *fobj; 434 struct fence *fence; 435 int i; 436 437 fobj = reservation_object_get_list(bo->resv); 438 fence = reservation_object_get_excl(bo->resv); 439 if (fence && !fence->ops->signaled) 440 fence_enable_sw_signaling(fence); 441 442 for (i = 0; fobj && i < fobj->shared_count; ++i) { 443 fence = rcu_dereference_protected(fobj->shared[i], 444 reservation_object_held(bo->resv)); 445 446 if (!fence->ops->signaled) 447 fence_enable_sw_signaling(fence); 448 } 449 } 450 451 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 452 { 453 struct ttm_bo_device *bdev = bo->bdev; 454 struct ttm_bo_global *glob = bo->glob; 455 int put_count; 456 int ret; 457 458 spin_lock(&glob->lru_lock); 459 ret = __ttm_bo_reserve(bo, false, true, NULL); 460 461 if (!ret) { 462 if (!ttm_bo_wait(bo, false, true)) { 463 put_count = ttm_bo_del_from_lru(bo); 464 465 spin_unlock(&glob->lru_lock); 466 ttm_bo_cleanup_memtype_use(bo); 467 468 ttm_bo_list_ref_sub(bo, put_count, true); 469 470 return; 471 } else 472 ttm_bo_flush_all_fences(bo); 473 474 /* 475 * Make NO_EVICT bos immediately available to 476 * shrinkers, now that they are queued for 477 * destruction. 478 */ 479 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 480 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; 481 ttm_bo_add_to_lru(bo); 482 } 483 484 __ttm_bo_unreserve(bo); 485 } 486 487 kref_get(&bo->list_kref); 488 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 489 spin_unlock(&glob->lru_lock); 490 491 schedule_delayed_work(&bdev->wq, 492 ((HZ / 100) < 1) ? 1 : HZ / 100); 493 } 494 495 /** 496 * function ttm_bo_cleanup_refs_and_unlock 497 * If bo idle, remove from delayed- and lru lists, and unref. 498 * If not idle, do nothing. 499 * 500 * Must be called with lru_lock and reservation held, this function 501 * will drop both before returning. 502 * 503 * @interruptible Any sleeps should occur interruptibly. 504 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 505 */ 506 507 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, 508 bool interruptible, 509 bool no_wait_gpu) 510 { 511 struct ttm_bo_global *glob = bo->glob; 512 int put_count; 513 int ret; 514 515 ret = ttm_bo_wait(bo, false, true); 516 517 if (ret && !no_wait_gpu) { 518 long lret; 519 ww_mutex_unlock(&bo->resv->lock); 520 spin_unlock(&glob->lru_lock); 521 522 lret = reservation_object_wait_timeout_rcu(bo->resv, 523 true, 524 interruptible, 525 30 * HZ); 526 527 if (lret < 0) 528 return lret; 529 else if (lret == 0) 530 return -EBUSY; 531 532 spin_lock(&glob->lru_lock); 533 ret = __ttm_bo_reserve(bo, false, true, NULL); 534 535 /* 536 * We raced, and lost, someone else holds the reservation now, 537 * and is probably busy in ttm_bo_cleanup_memtype_use. 538 * 539 * Even if it's not the case, because we finished waiting any 540 * delayed destruction would succeed, so just return success 541 * here. 542 */ 543 if (ret) { 544 spin_unlock(&glob->lru_lock); 545 return 0; 546 } 547 548 /* 549 * remove sync_obj with ttm_bo_wait, the wait should be 550 * finished, and no new wait object should have been added. 551 */ 552 ret = ttm_bo_wait(bo, false, true); 553 WARN_ON(ret); 554 } 555 556 if (ret || unlikely(list_empty(&bo->ddestroy))) { 557 __ttm_bo_unreserve(bo); 558 spin_unlock(&glob->lru_lock); 559 return ret; 560 } 561 562 put_count = ttm_bo_del_from_lru(bo); 563 list_del_init(&bo->ddestroy); 564 ++put_count; 565 566 spin_unlock(&glob->lru_lock); 567 ttm_bo_cleanup_memtype_use(bo); 568 569 ttm_bo_list_ref_sub(bo, put_count, true); 570 571 return 0; 572 } 573 574 /** 575 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 576 * encountered buffers. 577 */ 578 579 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 580 { 581 struct ttm_bo_global *glob = bdev->glob; 582 struct ttm_buffer_object *entry = NULL; 583 int ret = 0; 584 585 spin_lock(&glob->lru_lock); 586 if (list_empty(&bdev->ddestroy)) 587 goto out_unlock; 588 589 entry = list_first_entry(&bdev->ddestroy, 590 struct ttm_buffer_object, ddestroy); 591 kref_get(&entry->list_kref); 592 593 for (;;) { 594 struct ttm_buffer_object *nentry = NULL; 595 596 if (entry->ddestroy.next != &bdev->ddestroy) { 597 nentry = list_first_entry(&entry->ddestroy, 598 struct ttm_buffer_object, ddestroy); 599 kref_get(&nentry->list_kref); 600 } 601 602 ret = __ttm_bo_reserve(entry, false, true, NULL); 603 if (remove_all && ret) { 604 spin_unlock(&glob->lru_lock); 605 ret = __ttm_bo_reserve(entry, false, false, NULL); 606 spin_lock(&glob->lru_lock); 607 } 608 609 if (!ret) 610 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 611 !remove_all); 612 else 613 spin_unlock(&glob->lru_lock); 614 615 kref_put(&entry->list_kref, ttm_bo_release_list); 616 entry = nentry; 617 618 if (ret || !entry) 619 goto out; 620 621 spin_lock(&glob->lru_lock); 622 if (list_empty(&entry->ddestroy)) 623 break; 624 } 625 626 out_unlock: 627 spin_unlock(&glob->lru_lock); 628 out: 629 if (entry) 630 kref_put(&entry->list_kref, ttm_bo_release_list); 631 return ret; 632 } 633 634 static void ttm_bo_delayed_workqueue(struct work_struct *work) 635 { 636 struct ttm_bo_device *bdev = 637 container_of(work, struct ttm_bo_device, wq.work); 638 639 if (ttm_bo_delayed_delete(bdev, false)) { 640 schedule_delayed_work(&bdev->wq, 641 ((HZ / 100) < 1) ? 1 : HZ / 100); 642 } 643 } 644 645 static void ttm_bo_release(struct kref *kref) 646 { 647 struct ttm_buffer_object *bo = 648 container_of(kref, struct ttm_buffer_object, kref); 649 struct ttm_bo_device *bdev = bo->bdev; 650 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 651 652 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); 653 ttm_mem_io_lock(man, false); 654 ttm_mem_io_free_vm(bo); 655 ttm_mem_io_unlock(man); 656 ttm_bo_cleanup_refs_or_queue(bo); 657 kref_put(&bo->list_kref, ttm_bo_release_list); 658 } 659 660 void ttm_bo_unref(struct ttm_buffer_object **p_bo) 661 { 662 struct ttm_buffer_object *bo = *p_bo; 663 664 *p_bo = NULL; 665 kref_put(&bo->kref, ttm_bo_release); 666 } 667 EXPORT_SYMBOL(ttm_bo_unref); 668 669 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 670 { 671 return cancel_delayed_work_sync(&bdev->wq); 672 } 673 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 674 675 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 676 { 677 if (resched) 678 schedule_delayed_work(&bdev->wq, 679 ((HZ / 100) < 1) ? 1 : HZ / 100); 680 } 681 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 682 683 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 684 bool no_wait_gpu) 685 { 686 struct ttm_bo_device *bdev = bo->bdev; 687 struct ttm_mem_reg evict_mem; 688 struct ttm_placement placement; 689 int ret = 0; 690 691 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); 692 693 if (unlikely(ret != 0)) { 694 if (ret != -ERESTARTSYS) { 695 pr_err("Failed to expire sync object before buffer eviction\n"); 696 } 697 goto out; 698 } 699 700 lockdep_assert_held(&bo->resv->lock.base); 701 702 evict_mem = bo->mem; 703 evict_mem.mm_node = NULL; 704 evict_mem.bus.io_reserved_vm = false; 705 evict_mem.bus.io_reserved_count = 0; 706 707 placement.num_placement = 0; 708 placement.num_busy_placement = 0; 709 bdev->driver->evict_flags(bo, &placement); 710 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 711 no_wait_gpu); 712 if (ret) { 713 if (ret != -ERESTARTSYS) { 714 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 715 bo); 716 ttm_bo_mem_space_debug(bo, &placement); 717 } 718 goto out; 719 } 720 721 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 722 no_wait_gpu); 723 if (ret) { 724 if (ret != -ERESTARTSYS) 725 pr_err("Buffer eviction failed\n"); 726 ttm_bo_mem_put(bo, &evict_mem); 727 goto out; 728 } 729 bo->evicted = true; 730 out: 731 return ret; 732 } 733 734 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 735 uint32_t mem_type, 736 const struct ttm_place *place, 737 bool interruptible, 738 bool no_wait_gpu) 739 { 740 struct ttm_bo_global *glob = bdev->glob; 741 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 742 struct ttm_buffer_object *bo; 743 int ret = -EBUSY, put_count; 744 745 spin_lock(&glob->lru_lock); 746 list_for_each_entry(bo, &man->lru, lru) { 747 ret = __ttm_bo_reserve(bo, false, true, NULL); 748 if (!ret) { 749 if (place && (place->fpfn || place->lpfn)) { 750 /* Don't evict this BO if it's outside of the 751 * requested placement range 752 */ 753 if (place->fpfn >= (bo->mem.start + bo->mem.size) || 754 (place->lpfn && place->lpfn <= bo->mem.start)) { 755 __ttm_bo_unreserve(bo); 756 ret = -EBUSY; 757 continue; 758 } 759 } 760 761 break; 762 } 763 } 764 765 if (ret) { 766 spin_unlock(&glob->lru_lock); 767 return ret; 768 } 769 770 kref_get(&bo->list_kref); 771 772 if (!list_empty(&bo->ddestroy)) { 773 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, 774 no_wait_gpu); 775 kref_put(&bo->list_kref, ttm_bo_release_list); 776 return ret; 777 } 778 779 put_count = ttm_bo_del_from_lru(bo); 780 spin_unlock(&glob->lru_lock); 781 782 BUG_ON(ret != 0); 783 784 ttm_bo_list_ref_sub(bo, put_count, true); 785 786 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); 787 ttm_bo_unreserve(bo); 788 789 kref_put(&bo->list_kref, ttm_bo_release_list); 790 return ret; 791 } 792 793 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 794 { 795 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; 796 797 if (mem->mm_node) 798 (*man->func->put_node)(man, mem); 799 } 800 EXPORT_SYMBOL(ttm_bo_mem_put); 801 802 /** 803 * Repeatedly evict memory from the LRU for @mem_type until we create enough 804 * space, or we've evicted everything and there isn't enough space. 805 */ 806 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 807 uint32_t mem_type, 808 const struct ttm_place *place, 809 struct ttm_mem_reg *mem, 810 bool interruptible, 811 bool no_wait_gpu) 812 { 813 struct ttm_bo_device *bdev = bo->bdev; 814 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 815 int ret; 816 817 do { 818 ret = (*man->func->get_node)(man, bo, place, mem); 819 if (unlikely(ret != 0)) 820 return ret; 821 if (mem->mm_node) 822 break; 823 ret = ttm_mem_evict_first(bdev, mem_type, place, 824 interruptible, no_wait_gpu); 825 if (unlikely(ret != 0)) 826 return ret; 827 } while (1); 828 if (mem->mm_node == NULL) 829 return -ENOMEM; 830 mem->mem_type = mem_type; 831 return 0; 832 } 833 834 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 835 uint32_t cur_placement, 836 uint32_t proposed_placement) 837 { 838 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; 839 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; 840 841 /** 842 * Keep current caching if possible. 843 */ 844 845 if ((cur_placement & caching) != 0) 846 result |= (cur_placement & caching); 847 else if ((man->default_caching & caching) != 0) 848 result |= man->default_caching; 849 else if ((TTM_PL_FLAG_CACHED & caching) != 0) 850 result |= TTM_PL_FLAG_CACHED; 851 else if ((TTM_PL_FLAG_WC & caching) != 0) 852 result |= TTM_PL_FLAG_WC; 853 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) 854 result |= TTM_PL_FLAG_UNCACHED; 855 856 return result; 857 } 858 859 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 860 uint32_t mem_type, 861 const struct ttm_place *place, 862 uint32_t *masked_placement) 863 { 864 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 865 866 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) 867 return false; 868 869 if ((place->flags & man->available_caching) == 0) 870 return false; 871 872 cur_flags |= (place->flags & man->available_caching); 873 874 *masked_placement = cur_flags; 875 return true; 876 } 877 878 /** 879 * Creates space for memory region @mem according to its type. 880 * 881 * This function first searches for free space in compatible memory types in 882 * the priority order defined by the driver. If free space isn't found, then 883 * ttm_bo_mem_force_space is attempted in priority order to evict and find 884 * space. 885 */ 886 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 887 struct ttm_placement *placement, 888 struct ttm_mem_reg *mem, 889 bool interruptible, 890 bool no_wait_gpu) 891 { 892 struct ttm_bo_device *bdev = bo->bdev; 893 struct ttm_mem_type_manager *man; 894 uint32_t mem_type = TTM_PL_SYSTEM; 895 uint32_t cur_flags = 0; 896 bool type_found = false; 897 bool type_ok = false; 898 bool has_erestartsys = false; 899 int i, ret; 900 901 mem->mm_node = NULL; 902 for (i = 0; i < placement->num_placement; ++i) { 903 const struct ttm_place *place = &placement->placement[i]; 904 905 ret = ttm_mem_type_from_place(place, &mem_type); 906 if (ret) 907 return ret; 908 man = &bdev->man[mem_type]; 909 if (!man->has_type || !man->use_type) 910 continue; 911 912 type_ok = ttm_bo_mt_compatible(man, mem_type, place, 913 &cur_flags); 914 915 if (!type_ok) 916 continue; 917 918 type_found = true; 919 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 920 cur_flags); 921 /* 922 * Use the access and other non-mapping-related flag bits from 923 * the memory placement flags to the current flags 924 */ 925 ttm_flag_masked(&cur_flags, place->flags, 926 ~TTM_PL_MASK_MEMTYPE); 927 928 if (mem_type == TTM_PL_SYSTEM) 929 break; 930 931 ret = (*man->func->get_node)(man, bo, place, mem); 932 if (unlikely(ret)) 933 return ret; 934 935 if (mem->mm_node) 936 break; 937 } 938 939 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { 940 mem->mem_type = mem_type; 941 mem->placement = cur_flags; 942 return 0; 943 } 944 945 for (i = 0; i < placement->num_busy_placement; ++i) { 946 const struct ttm_place *place = &placement->busy_placement[i]; 947 948 ret = ttm_mem_type_from_place(place, &mem_type); 949 if (ret) 950 return ret; 951 man = &bdev->man[mem_type]; 952 if (!man->has_type || !man->use_type) 953 continue; 954 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) 955 continue; 956 957 type_found = true; 958 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 959 cur_flags); 960 /* 961 * Use the access and other non-mapping-related flag bits from 962 * the memory placement flags to the current flags 963 */ 964 ttm_flag_masked(&cur_flags, place->flags, 965 ~TTM_PL_MASK_MEMTYPE); 966 967 if (mem_type == TTM_PL_SYSTEM) { 968 mem->mem_type = mem_type; 969 mem->placement = cur_flags; 970 mem->mm_node = NULL; 971 return 0; 972 } 973 974 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, 975 interruptible, no_wait_gpu); 976 if (ret == 0 && mem->mm_node) { 977 mem->placement = cur_flags; 978 return 0; 979 } 980 if (ret == -ERESTARTSYS) 981 has_erestartsys = true; 982 } 983 984 if (!type_found) { 985 printk(KERN_ERR TTM_PFX "No compatible memory type found.\n"); 986 return -EINVAL; 987 } 988 989 return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 990 } 991 EXPORT_SYMBOL(ttm_bo_mem_space); 992 993 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 994 struct ttm_placement *placement, 995 bool interruptible, 996 bool no_wait_gpu) 997 { 998 int ret = 0; 999 struct ttm_mem_reg mem; 1000 1001 lockdep_assert_held(&bo->resv->lock.base); 1002 1003 /* 1004 * Don't wait for the BO on initial allocation. This is important when 1005 * the BO has an imported reservation object. 1006 */ 1007 if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) { 1008 /* 1009 * FIXME: It's possible to pipeline buffer moves. 1010 * Have the driver move function wait for idle when necessary, 1011 * instead of doing it here. 1012 */ 1013 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); 1014 if (ret) 1015 return ret; 1016 } 1017 mem.num_pages = bo->num_pages; 1018 mem.size = mem.num_pages << PAGE_SHIFT; 1019 mem.page_alignment = bo->mem.page_alignment; 1020 mem.bus.io_reserved_vm = false; 1021 mem.bus.io_reserved_count = 0; 1022 /* 1023 * Determine where to move the buffer. 1024 */ 1025 ret = ttm_bo_mem_space(bo, placement, &mem, 1026 interruptible, no_wait_gpu); 1027 if (ret) 1028 goto out_unlock; 1029 ret = ttm_bo_handle_move_mem(bo, &mem, false, 1030 interruptible, no_wait_gpu); 1031 out_unlock: 1032 if (ret && mem.mm_node) 1033 ttm_bo_mem_put(bo, &mem); 1034 return ret; 1035 } 1036 1037 static bool ttm_bo_mem_compat(struct ttm_placement *placement, 1038 struct ttm_mem_reg *mem, 1039 uint32_t *new_flags) 1040 { 1041 int i; 1042 1043 for (i = 0; i < placement->num_placement; i++) { 1044 const struct ttm_place *heap = &placement->placement[i]; 1045 if (mem->mm_node && 1046 (mem->start < heap->fpfn || 1047 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1048 continue; 1049 1050 *new_flags = heap->flags; 1051 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1052 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1053 return true; 1054 } 1055 1056 for (i = 0; i < placement->num_busy_placement; i++) { 1057 const struct ttm_place *heap = &placement->busy_placement[i]; 1058 if (mem->mm_node && 1059 (mem->start < heap->fpfn || 1060 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1061 continue; 1062 1063 *new_flags = heap->flags; 1064 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1065 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1066 return true; 1067 } 1068 1069 return false; 1070 } 1071 1072 int ttm_bo_validate(struct ttm_buffer_object *bo, 1073 struct ttm_placement *placement, 1074 bool interruptible, 1075 bool no_wait_gpu) 1076 { 1077 int ret; 1078 uint32_t new_flags; 1079 1080 lockdep_assert_held(&bo->resv->lock.base); 1081 /* 1082 * Check whether we need to move buffer. 1083 */ 1084 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { 1085 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1086 no_wait_gpu); 1087 if (ret) 1088 return ret; 1089 } else { 1090 /* 1091 * Use the access and other non-mapping-related flag bits from 1092 * the compatible memory placement flags to the active flags 1093 */ 1094 ttm_flag_masked(&bo->mem.placement, new_flags, 1095 ~TTM_PL_MASK_MEMTYPE); 1096 } 1097 /* 1098 * We might need to add a TTM. 1099 */ 1100 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1101 ret = ttm_bo_add_ttm(bo, true); 1102 if (ret) 1103 return ret; 1104 } 1105 return 0; 1106 } 1107 EXPORT_SYMBOL(ttm_bo_validate); 1108 1109 int ttm_bo_init(struct ttm_bo_device *bdev, 1110 struct ttm_buffer_object *bo, 1111 unsigned long size, 1112 enum ttm_bo_type type, 1113 struct ttm_placement *placement, 1114 uint32_t page_alignment, 1115 bool interruptible, 1116 struct file *persistent_swap_storage, 1117 size_t acc_size, 1118 struct sg_table *sg, 1119 struct reservation_object *resv, 1120 void (*destroy) (struct ttm_buffer_object *)) 1121 { 1122 int ret = 0; 1123 unsigned long num_pages; 1124 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1125 bool locked; 1126 1127 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1128 if (ret) { 1129 pr_err("Out of kernel memory\n"); 1130 if (destroy) 1131 (*destroy)(bo); 1132 else 1133 kfree(bo); 1134 return -ENOMEM; 1135 } 1136 1137 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1138 if (num_pages == 0) { 1139 pr_err("Illegal buffer object size\n"); 1140 if (destroy) 1141 (*destroy)(bo); 1142 else 1143 kfree(bo); 1144 ttm_mem_global_free(mem_glob, acc_size); 1145 return -EINVAL; 1146 } 1147 bo->destroy = destroy; 1148 1149 kref_init(&bo->kref); 1150 kref_init(&bo->list_kref); 1151 atomic_set(&bo->cpu_writers, 0); 1152 INIT_LIST_HEAD(&bo->lru); 1153 INIT_LIST_HEAD(&bo->ddestroy); 1154 INIT_LIST_HEAD(&bo->swap); 1155 INIT_LIST_HEAD(&bo->io_reserve_lru); 1156 mutex_init(&bo->wu_mutex); 1157 bo->bdev = bdev; 1158 bo->glob = bdev->glob; 1159 bo->type = type; 1160 bo->num_pages = num_pages; 1161 bo->mem.size = num_pages << PAGE_SHIFT; 1162 bo->mem.mem_type = TTM_PL_SYSTEM; 1163 bo->mem.num_pages = bo->num_pages; 1164 bo->mem.mm_node = NULL; 1165 bo->mem.page_alignment = page_alignment; 1166 bo->mem.bus.io_reserved_vm = false; 1167 bo->mem.bus.io_reserved_count = 0; 1168 bo->priv_flags = 0; 1169 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1170 bo->persistent_swap_storage = persistent_swap_storage; 1171 bo->acc_size = acc_size; 1172 bo->sg = sg; 1173 if (resv) { 1174 bo->resv = resv; 1175 lockdep_assert_held(&bo->resv->lock.base); 1176 } else { 1177 bo->resv = &bo->ttm_resv; 1178 reservation_object_init(&bo->ttm_resv); 1179 } 1180 atomic_inc(&bo->glob->bo_count); 1181 drm_vma_node_reset(&bo->vma_node); 1182 1183 /* 1184 * For ttm_bo_type_device buffers, allocate 1185 * address space from the device. 1186 */ 1187 if (bo->type == ttm_bo_type_device || 1188 bo->type == ttm_bo_type_sg) 1189 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, 1190 bo->mem.num_pages); 1191 1192 /* passed reservation objects should already be locked, 1193 * since otherwise lockdep will be angered in radeon. 1194 */ 1195 if (!resv) { 1196 locked = ww_mutex_trylock(&bo->resv->lock); 1197 WARN_ON(!locked); 1198 } 1199 1200 if (likely(!ret)) 1201 ret = ttm_bo_validate(bo, placement, interruptible, false); 1202 1203 if (!resv) { 1204 ttm_bo_unreserve(bo); 1205 1206 } else if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 1207 spin_lock(&bo->glob->lru_lock); 1208 ttm_bo_add_to_lru(bo); 1209 spin_unlock(&bo->glob->lru_lock); 1210 } 1211 1212 if (unlikely(ret)) 1213 ttm_bo_unref(&bo); 1214 1215 return ret; 1216 } 1217 EXPORT_SYMBOL(ttm_bo_init); 1218 1219 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 1220 unsigned long bo_size, 1221 unsigned struct_size) 1222 { 1223 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1224 size_t size = 0; 1225 1226 size += ttm_round_pot(struct_size); 1227 size += ttm_round_pot(npages * sizeof(void *)); 1228 size += ttm_round_pot(sizeof(struct ttm_tt)); 1229 return size; 1230 } 1231 EXPORT_SYMBOL(ttm_bo_acc_size); 1232 1233 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1234 unsigned long bo_size, 1235 unsigned struct_size) 1236 { 1237 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1238 size_t size = 0; 1239 1240 size += ttm_round_pot(struct_size); 1241 size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t))); 1242 size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1243 return size; 1244 } 1245 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1246 1247 int ttm_bo_create(struct ttm_bo_device *bdev, 1248 unsigned long size, 1249 enum ttm_bo_type type, 1250 struct ttm_placement *placement, 1251 uint32_t page_alignment, 1252 bool interruptible, 1253 struct file *persistent_swap_storage, 1254 struct ttm_buffer_object **p_bo) 1255 { 1256 struct ttm_buffer_object *bo; 1257 size_t acc_size; 1258 int ret; 1259 1260 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1261 if (unlikely(bo == NULL)) 1262 return -ENOMEM; 1263 1264 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1265 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1266 interruptible, persistent_swap_storage, acc_size, 1267 NULL, NULL, NULL); 1268 if (likely(ret == 0)) 1269 *p_bo = bo; 1270 1271 return ret; 1272 } 1273 EXPORT_SYMBOL(ttm_bo_create); 1274 1275 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1276 unsigned mem_type, bool allow_errors) 1277 { 1278 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1279 struct ttm_bo_global *glob = bdev->glob; 1280 int ret; 1281 1282 /* 1283 * Can't use standard list traversal since we're unlocking. 1284 */ 1285 1286 spin_lock(&glob->lru_lock); 1287 while (!list_empty(&man->lru)) { 1288 spin_unlock(&glob->lru_lock); 1289 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); 1290 if (ret) { 1291 if (allow_errors) { 1292 return ret; 1293 } else { 1294 pr_err("Cleanup eviction failed\n"); 1295 } 1296 } 1297 spin_lock(&glob->lru_lock); 1298 } 1299 spin_unlock(&glob->lru_lock); 1300 return 0; 1301 } 1302 1303 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1304 { 1305 struct ttm_mem_type_manager *man; 1306 int ret = -EINVAL; 1307 1308 if (mem_type >= TTM_NUM_MEM_TYPES) { 1309 pr_err("Illegal memory type %d\n", mem_type); 1310 return ret; 1311 } 1312 man = &bdev->man[mem_type]; 1313 1314 if (!man->has_type) { 1315 pr_err("Trying to take down uninitialized memory manager type %u\n", 1316 mem_type); 1317 return ret; 1318 } 1319 1320 man->use_type = false; 1321 man->has_type = false; 1322 1323 ret = 0; 1324 if (mem_type > 0) { 1325 ttm_bo_force_list_clean(bdev, mem_type, false); 1326 1327 ret = (*man->func->takedown)(man); 1328 } 1329 1330 return ret; 1331 } 1332 EXPORT_SYMBOL(ttm_bo_clean_mm); 1333 1334 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1335 { 1336 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1337 1338 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { 1339 pr_err("Illegal memory manager memory type %u\n", mem_type); 1340 return -EINVAL; 1341 } 1342 1343 if (!man->has_type) { 1344 pr_err("Memory type %u has not been initialized\n", mem_type); 1345 return 0; 1346 } 1347 1348 return ttm_bo_force_list_clean(bdev, mem_type, true); 1349 } 1350 EXPORT_SYMBOL(ttm_bo_evict_mm); 1351 1352 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1353 unsigned long p_size) 1354 { 1355 int ret = -EINVAL; 1356 struct ttm_mem_type_manager *man; 1357 1358 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1359 man = &bdev->man[type]; 1360 BUG_ON(man->has_type); 1361 man->io_reserve_fastpath = true; 1362 man->use_io_reserve_lru = false; 1363 mutex_init(&man->io_reserve_mutex); 1364 INIT_LIST_HEAD(&man->io_reserve_lru); 1365 1366 ret = bdev->driver->init_mem_type(bdev, type, man); 1367 if (ret) 1368 return ret; 1369 man->bdev = bdev; 1370 1371 ret = 0; 1372 if (type != TTM_PL_SYSTEM) { 1373 ret = (*man->func->init)(man, p_size); 1374 if (ret) 1375 return ret; 1376 } 1377 man->has_type = true; 1378 man->use_type = true; 1379 man->size = p_size; 1380 1381 INIT_LIST_HEAD(&man->lru); 1382 1383 return 0; 1384 } 1385 EXPORT_SYMBOL(ttm_bo_init_mm); 1386 1387 static void ttm_bo_global_kobj_release(struct kobject *kobj) 1388 { 1389 struct ttm_bo_global *glob = 1390 container_of(kobj, struct ttm_bo_global, kobj); 1391 1392 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); 1393 __free_page(glob->dummy_read_page); 1394 kfree(glob); 1395 } 1396 1397 void ttm_bo_global_release(struct drm_global_reference *ref) 1398 { 1399 struct ttm_bo_global *glob = ref->object; 1400 1401 kobject_del(&glob->kobj); 1402 kobject_put(&glob->kobj); 1403 } 1404 EXPORT_SYMBOL(ttm_bo_global_release); 1405 1406 int ttm_bo_global_init(struct drm_global_reference *ref) 1407 { 1408 struct ttm_bo_global_ref *bo_ref = 1409 container_of(ref, struct ttm_bo_global_ref, ref); 1410 struct ttm_bo_global *glob = ref->object; 1411 int ret; 1412 1413 mutex_init(&glob->device_list_mutex); 1414 spin_lock_init(&glob->lru_lock); 1415 glob->mem_glob = bo_ref->mem_glob; 1416 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); 1417 1418 if (unlikely(glob->dummy_read_page == NULL)) { 1419 ret = -ENOMEM; 1420 goto out_no_drp; 1421 } 1422 1423 INIT_LIST_HEAD(&glob->swap_lru); 1424 INIT_LIST_HEAD(&glob->device_list); 1425 1426 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); 1427 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); 1428 if (unlikely(ret != 0)) { 1429 pr_err("Could not register buffer object swapout\n"); 1430 goto out_no_shrink; 1431 } 1432 1433 atomic_set(&glob->bo_count, 0); 1434 1435 ret = kobject_init_and_add( 1436 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); 1437 if (unlikely(ret != 0)) 1438 kobject_put(&glob->kobj); 1439 return ret; 1440 out_no_shrink: 1441 __free_page(glob->dummy_read_page); 1442 out_no_drp: 1443 kfree(glob); 1444 return ret; 1445 } 1446 EXPORT_SYMBOL(ttm_bo_global_init); 1447 1448 1449 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1450 { 1451 int ret = 0; 1452 unsigned i = TTM_NUM_MEM_TYPES; 1453 struct ttm_mem_type_manager *man; 1454 struct ttm_bo_global *glob = bdev->glob; 1455 1456 while (i--) { 1457 man = &bdev->man[i]; 1458 if (man->has_type) { 1459 man->use_type = false; 1460 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { 1461 ret = -EBUSY; 1462 pr_err("DRM memory manager type %d is not clean\n", 1463 i); 1464 } 1465 man->has_type = false; 1466 } 1467 } 1468 1469 mutex_lock(&glob->device_list_mutex); 1470 list_del(&bdev->device_list); 1471 mutex_unlock(&glob->device_list_mutex); 1472 1473 cancel_delayed_work_sync(&bdev->wq); 1474 1475 while (ttm_bo_delayed_delete(bdev, true)) 1476 ; 1477 1478 spin_lock(&glob->lru_lock); 1479 if (list_empty(&bdev->ddestroy)) 1480 TTM_DEBUG("Delayed destroy list was clean\n"); 1481 1482 if (list_empty(&bdev->man[0].lru)) 1483 TTM_DEBUG("Swap list was clean\n"); 1484 spin_unlock(&glob->lru_lock); 1485 1486 drm_vma_offset_manager_destroy(&bdev->vma_manager); 1487 1488 return ret; 1489 } 1490 EXPORT_SYMBOL(ttm_bo_device_release); 1491 1492 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1493 struct ttm_bo_global *glob, 1494 struct ttm_bo_driver *driver, 1495 struct address_space *mapping, 1496 uint64_t file_page_offset, 1497 bool need_dma32) 1498 { 1499 int ret = -EINVAL; 1500 1501 bdev->driver = driver; 1502 1503 memset(bdev->man, 0, sizeof(bdev->man)); 1504 1505 /* 1506 * Initialize the system memory buffer type. 1507 * Other types need to be driver / IOCTL initialized. 1508 */ 1509 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); 1510 if (unlikely(ret != 0)) 1511 goto out_no_sys; 1512 1513 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, 1514 0x10000000); 1515 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1516 INIT_LIST_HEAD(&bdev->ddestroy); 1517 bdev->dev_mapping = mapping; 1518 bdev->glob = glob; 1519 bdev->need_dma32 = need_dma32; 1520 mutex_lock(&glob->device_list_mutex); 1521 list_add_tail(&bdev->device_list, &glob->device_list); 1522 mutex_unlock(&glob->device_list_mutex); 1523 1524 return 0; 1525 out_no_sys: 1526 return ret; 1527 } 1528 EXPORT_SYMBOL(ttm_bo_device_init); 1529 1530 /* 1531 * buffer object vm functions. 1532 */ 1533 1534 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1535 { 1536 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1537 1538 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 1539 if (mem->mem_type == TTM_PL_SYSTEM) 1540 return false; 1541 1542 if (man->flags & TTM_MEMTYPE_FLAG_CMA) 1543 return false; 1544 1545 if (mem->placement & TTM_PL_FLAG_CACHED) 1546 return false; 1547 } 1548 return true; 1549 } 1550 1551 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1552 { 1553 struct ttm_bo_device *bdev = bo->bdev; 1554 1555 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); 1556 ttm_mem_io_free_vm(bo); 1557 } 1558 1559 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1560 { 1561 struct ttm_bo_device *bdev = bo->bdev; 1562 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 1563 1564 ttm_mem_io_lock(man, false); 1565 ttm_bo_unmap_virtual_locked(bo); 1566 ttm_mem_io_unlock(man); 1567 } 1568 1569 1570 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1571 1572 int ttm_bo_wait(struct ttm_buffer_object *bo, 1573 bool interruptible, bool no_wait) 1574 { 1575 struct reservation_object_list *fobj; 1576 struct reservation_object *resv; 1577 struct fence *excl; 1578 long timeout = 15 * HZ; 1579 int i; 1580 1581 resv = bo->resv; 1582 fobj = reservation_object_get_list(resv); 1583 excl = reservation_object_get_excl(resv); 1584 if (excl) { 1585 if (!fence_is_signaled(excl)) { 1586 if (no_wait) 1587 return -EBUSY; 1588 1589 timeout = fence_wait_timeout(excl, 1590 interruptible, timeout); 1591 } 1592 } 1593 1594 for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) { 1595 struct fence *fence; 1596 fence = rcu_dereference_protected(fobj->shared[i], 1597 reservation_object_held(resv)); 1598 1599 if (!fence_is_signaled(fence)) { 1600 if (no_wait) 1601 return -EBUSY; 1602 1603 timeout = fence_wait_timeout(fence, 1604 interruptible, timeout); 1605 } 1606 } 1607 1608 if (timeout < 0) 1609 return timeout; 1610 1611 if (timeout == 0) 1612 return -EBUSY; 1613 1614 reservation_object_add_excl_fence(resv, NULL); 1615 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1616 return 0; 1617 } 1618 EXPORT_SYMBOL(ttm_bo_wait); 1619 1620 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1621 { 1622 int ret = 0; 1623 1624 /* 1625 * Using ttm_bo_reserve makes sure the lru lists are updated. 1626 */ 1627 1628 ret = ttm_bo_reserve(bo, true, no_wait, NULL); 1629 if (unlikely(ret != 0)) 1630 return ret; 1631 ret = ttm_bo_wait(bo, true, no_wait); 1632 if (likely(ret == 0)) 1633 atomic_inc(&bo->cpu_writers); 1634 ttm_bo_unreserve(bo); 1635 return ret; 1636 } 1637 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); 1638 1639 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1640 { 1641 atomic_dec(&bo->cpu_writers); 1642 } 1643 EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1644 1645 /** 1646 * A buffer object shrink method that tries to swap out the first 1647 * buffer object on the bo_global::swap_lru list. 1648 */ 1649 1650 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1651 { 1652 struct ttm_bo_global *glob = 1653 container_of(shrink, struct ttm_bo_global, shrink); 1654 struct ttm_buffer_object *bo; 1655 int ret = -EBUSY; 1656 int put_count; 1657 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1658 1659 spin_lock(&glob->lru_lock); 1660 list_for_each_entry(bo, &glob->swap_lru, swap) { 1661 ret = __ttm_bo_reserve(bo, false, true, NULL); 1662 if (!ret) 1663 break; 1664 } 1665 1666 if (ret) { 1667 spin_unlock(&glob->lru_lock); 1668 return ret; 1669 } 1670 1671 kref_get(&bo->list_kref); 1672 1673 if (!list_empty(&bo->ddestroy)) { 1674 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); 1675 kref_put(&bo->list_kref, ttm_bo_release_list); 1676 return ret; 1677 } 1678 1679 put_count = ttm_bo_del_from_lru(bo); 1680 spin_unlock(&glob->lru_lock); 1681 1682 ttm_bo_list_ref_sub(bo, put_count, true); 1683 1684 /** 1685 * Wait for GPU, then move to system cached. 1686 */ 1687 1688 ret = ttm_bo_wait(bo, false, false); 1689 1690 if (unlikely(ret != 0)) 1691 goto out; 1692 1693 if ((bo->mem.placement & swap_placement) != swap_placement) { 1694 struct ttm_mem_reg evict_mem; 1695 1696 evict_mem = bo->mem; 1697 evict_mem.mm_node = NULL; 1698 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 1699 evict_mem.mem_type = TTM_PL_SYSTEM; 1700 1701 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1702 false, false); 1703 if (unlikely(ret != 0)) 1704 goto out; 1705 } 1706 1707 ttm_bo_unmap_virtual(bo); 1708 1709 /** 1710 * Swap out. Buffer will be swapped in again as soon as 1711 * anyone tries to access a ttm page. 1712 */ 1713 1714 if (bo->bdev->driver->swap_notify) 1715 bo->bdev->driver->swap_notify(bo); 1716 1717 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); 1718 out: 1719 1720 /** 1721 * 1722 * Unreserve without putting on LRU to avoid swapping out an 1723 * already swapped buffer. 1724 */ 1725 1726 __ttm_bo_unreserve(bo); 1727 kref_put(&bo->list_kref, ttm_bo_release_list); 1728 return ret; 1729 } 1730 1731 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1732 { 1733 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1734 ; 1735 } 1736 EXPORT_SYMBOL(ttm_bo_swapout_all); 1737 1738 /** 1739 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become 1740 * unreserved 1741 * 1742 * @bo: Pointer to buffer 1743 */ 1744 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) 1745 { 1746 int ret; 1747 1748 /* 1749 * In the absense of a wait_unlocked API, 1750 * Use the bo::wu_mutex to avoid triggering livelocks due to 1751 * concurrent use of this function. Note that this use of 1752 * bo::wu_mutex can go away if we change locking order to 1753 * mmap_sem -> bo::reserve. 1754 */ 1755 ret = mutex_lock_interruptible(&bo->wu_mutex); 1756 if (unlikely(ret != 0)) 1757 return -ERESTARTSYS; 1758 if (!ww_mutex_is_locked(&bo->resv->lock)) 1759 goto out_unlock; 1760 ret = __ttm_bo_reserve(bo, true, false, NULL); 1761 if (unlikely(ret != 0)) 1762 goto out_unlock; 1763 __ttm_bo_unreserve(bo); 1764 1765 out_unlock: 1766 mutex_unlock(&bo->wu_mutex); 1767 return ret; 1768 } 1769