1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #define pr_fmt(fmt) "[TTM] " fmt 33 34 #include <drm/ttm/ttm_bo.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <drm/ttm/ttm_tt.h> 37 38 #include <linux/jiffies.h> 39 #include <linux/slab.h> 40 #include <linux/sched.h> 41 #include <linux/mm.h> 42 #include <linux/file.h> 43 #include <linux/module.h> 44 #include <linux/atomic.h> 45 #include <linux/dma-resv.h> 46 47 #include "ttm_module.h" 48 49 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 50 struct ttm_placement *placement) 51 { 52 struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, TTM_PFX); 53 struct ttm_resource_manager *man; 54 int i, mem_type; 55 56 for (i = 0; i < placement->num_placement; i++) { 57 mem_type = placement->placement[i].mem_type; 58 drm_printf(&p, " placement[%d]=0x%08X (%d)\n", 59 i, placement->placement[i].flags, mem_type); 60 man = ttm_manager_type(bo->bdev, mem_type); 61 ttm_resource_manager_debug(man, &p); 62 } 63 } 64 65 /** 66 * ttm_bo_move_to_lru_tail 67 * 68 * @bo: The buffer object. 69 * 70 * Move this BO to the tail of all lru lists used to lookup and reserve an 71 * object. This function must be called with struct ttm_global::lru_lock 72 * held, and is used to make a BO less likely to be considered for eviction. 73 */ 74 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) 75 { 76 dma_resv_assert_held(bo->base.resv); 77 78 if (bo->resource) 79 ttm_resource_move_to_lru_tail(bo->resource); 80 } 81 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 82 83 /** 84 * ttm_bo_set_bulk_move - update BOs bulk move object 85 * 86 * @bo: The buffer object. 87 * @bulk: bulk move structure 88 * 89 * Update the BOs bulk move object, making sure that resources are added/removed 90 * as well. A bulk move allows to move many resource on the LRU at once, 91 * resulting in much less overhead of maintaining the LRU. 92 * The only requirement is that the resources stay together on the LRU and are 93 * never separated. This is enforces by setting the bulk_move structure on a BO. 94 * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of 95 * their LRU list. 96 */ 97 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, 98 struct ttm_lru_bulk_move *bulk) 99 { 100 dma_resv_assert_held(bo->base.resv); 101 102 if (bo->bulk_move == bulk) 103 return; 104 105 spin_lock(&bo->bdev->lru_lock); 106 if (bo->resource) 107 ttm_resource_del_bulk_move(bo->resource, bo); 108 bo->bulk_move = bulk; 109 if (bo->resource) 110 ttm_resource_add_bulk_move(bo->resource, bo); 111 spin_unlock(&bo->bdev->lru_lock); 112 } 113 EXPORT_SYMBOL(ttm_bo_set_bulk_move); 114 115 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 116 struct ttm_resource *mem, bool evict, 117 struct ttm_operation_ctx *ctx, 118 struct ttm_place *hop) 119 { 120 struct ttm_device *bdev = bo->bdev; 121 bool old_use_tt, new_use_tt; 122 int ret; 123 124 old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt; 125 new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt; 126 127 ttm_bo_unmap_virtual(bo); 128 129 /* 130 * Create and bind a ttm if required. 131 */ 132 133 if (new_use_tt) { 134 /* Zero init the new TTM structure if the old location should 135 * have used one as well. 136 */ 137 ret = ttm_tt_create(bo, old_use_tt); 138 if (ret) 139 goto out_err; 140 141 if (mem->mem_type != TTM_PL_SYSTEM) { 142 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); 143 if (ret) 144 goto out_err; 145 } 146 } 147 148 ret = dma_resv_reserve_fences(bo->base.resv, 1); 149 if (ret) 150 goto out_err; 151 152 ret = bdev->funcs->move(bo, evict, ctx, mem, hop); 153 if (ret) { 154 if (ret == -EMULTIHOP) 155 return ret; 156 goto out_err; 157 } 158 159 ctx->bytes_moved += bo->base.size; 160 return 0; 161 162 out_err: 163 if (!old_use_tt) 164 ttm_bo_tt_destroy(bo); 165 166 return ret; 167 } 168 169 /* 170 * Call bo::reserved. 171 * Will release GPU memory type usage on destruction. 172 * This is the place to put in driver specific hooks to release 173 * driver private resources. 174 * Will release the bo::reserved lock. 175 */ 176 177 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 178 { 179 if (bo->bdev->funcs->delete_mem_notify) 180 bo->bdev->funcs->delete_mem_notify(bo); 181 182 ttm_bo_tt_destroy(bo); 183 ttm_resource_free(bo, &bo->resource); 184 } 185 186 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) 187 { 188 int r; 189 190 if (bo->base.resv == &bo->base._resv) 191 return 0; 192 193 BUG_ON(!dma_resv_trylock(&bo->base._resv)); 194 195 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv); 196 dma_resv_unlock(&bo->base._resv); 197 if (r) 198 return r; 199 200 if (bo->type != ttm_bo_type_sg) { 201 /* This works because the BO is about to be destroyed and nobody 202 * reference it any more. The only tricky case is the trylock on 203 * the resv object while holding the lru_lock. 204 */ 205 spin_lock(&bo->bdev->lru_lock); 206 bo->base.resv = &bo->base._resv; 207 spin_unlock(&bo->bdev->lru_lock); 208 } 209 210 return r; 211 } 212 213 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 214 { 215 struct dma_resv *resv = &bo->base._resv; 216 struct dma_resv_iter cursor; 217 struct dma_fence *fence; 218 219 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP); 220 dma_resv_for_each_fence_unlocked(&cursor, fence) { 221 if (!fence->ops->signaled) 222 dma_fence_enable_sw_signaling(fence); 223 } 224 dma_resv_iter_end(&cursor); 225 } 226 227 /* 228 * Block for the dma_resv object to become idle, lock the buffer and clean up 229 * the resource and tt object. 230 */ 231 static void ttm_bo_delayed_delete(struct work_struct *work) 232 { 233 struct ttm_buffer_object *bo; 234 235 bo = container_of(work, typeof(*bo), delayed_delete); 236 237 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false, 238 MAX_SCHEDULE_TIMEOUT); 239 dma_resv_lock(bo->base.resv, NULL); 240 ttm_bo_cleanup_memtype_use(bo); 241 dma_resv_unlock(bo->base.resv); 242 ttm_bo_put(bo); 243 } 244 245 static void ttm_bo_release(struct kref *kref) 246 { 247 struct ttm_buffer_object *bo = 248 container_of(kref, struct ttm_buffer_object, kref); 249 struct ttm_device *bdev = bo->bdev; 250 int ret; 251 252 WARN_ON_ONCE(bo->pin_count); 253 WARN_ON_ONCE(bo->bulk_move); 254 255 if (!bo->deleted) { 256 ret = ttm_bo_individualize_resv(bo); 257 if (ret) { 258 /* Last resort, if we fail to allocate memory for the 259 * fences block for the BO to become idle 260 */ 261 dma_resv_wait_timeout(bo->base.resv, 262 DMA_RESV_USAGE_BOOKKEEP, false, 263 30 * HZ); 264 } 265 266 if (bo->bdev->funcs->release_notify) 267 bo->bdev->funcs->release_notify(bo); 268 269 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); 270 ttm_mem_io_free(bdev, bo->resource); 271 272 if (!dma_resv_test_signaled(bo->base.resv, 273 DMA_RESV_USAGE_BOOKKEEP) || 274 (want_init_on_free() && (bo->ttm != NULL)) || 275 bo->type == ttm_bo_type_sg || 276 !dma_resv_trylock(bo->base.resv)) { 277 /* The BO is not idle, resurrect it for delayed destroy */ 278 ttm_bo_flush_all_fences(bo); 279 bo->deleted = true; 280 281 spin_lock(&bo->bdev->lru_lock); 282 283 /* 284 * Make pinned bos immediately available to 285 * shrinkers, now that they are queued for 286 * destruction. 287 * 288 * FIXME: QXL is triggering this. Can be removed when the 289 * driver is fixed. 290 */ 291 if (bo->pin_count) { 292 bo->pin_count = 0; 293 ttm_resource_move_to_lru_tail(bo->resource); 294 } 295 296 kref_init(&bo->kref); 297 spin_unlock(&bo->bdev->lru_lock); 298 299 INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete); 300 301 /* Schedule the worker on the closest NUMA node. This 302 * improves performance since system memory might be 303 * cleared on free and that is best done on a CPU core 304 * close to it. 305 */ 306 queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete); 307 return; 308 } 309 310 ttm_bo_cleanup_memtype_use(bo); 311 dma_resv_unlock(bo->base.resv); 312 } 313 314 atomic_dec(&ttm_glob.bo_count); 315 bo->destroy(bo); 316 } 317 318 /** 319 * ttm_bo_put 320 * 321 * @bo: The buffer object. 322 * 323 * Unreference a buffer object. 324 */ 325 void ttm_bo_put(struct ttm_buffer_object *bo) 326 { 327 kref_put(&bo->kref, ttm_bo_release); 328 } 329 EXPORT_SYMBOL(ttm_bo_put); 330 331 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, 332 struct ttm_operation_ctx *ctx, 333 struct ttm_place *hop) 334 { 335 struct ttm_placement hop_placement; 336 struct ttm_resource *hop_mem; 337 int ret; 338 339 hop_placement.num_placement = 1; 340 hop_placement.placement = hop; 341 342 /* find space in the bounce domain */ 343 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx); 344 if (ret) 345 return ret; 346 /* move to the bounce domain */ 347 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL); 348 if (ret) { 349 ttm_resource_free(bo, &hop_mem); 350 return ret; 351 } 352 return 0; 353 } 354 355 static int ttm_bo_evict(struct ttm_buffer_object *bo, 356 struct ttm_operation_ctx *ctx) 357 { 358 struct ttm_device *bdev = bo->bdev; 359 struct ttm_resource *evict_mem; 360 struct ttm_placement placement; 361 struct ttm_place hop; 362 int ret = 0; 363 364 memset(&hop, 0, sizeof(hop)); 365 366 dma_resv_assert_held(bo->base.resv); 367 368 placement.num_placement = 0; 369 bdev->funcs->evict_flags(bo, &placement); 370 371 if (!placement.num_placement) { 372 ret = ttm_bo_wait_ctx(bo, ctx); 373 if (ret) 374 return ret; 375 376 /* 377 * Since we've already synced, this frees backing store 378 * immediately. 379 */ 380 return ttm_bo_pipeline_gutting(bo); 381 } 382 383 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); 384 if (ret) { 385 if (ret != -ERESTARTSYS) { 386 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 387 bo); 388 ttm_bo_mem_space_debug(bo, &placement); 389 } 390 goto out; 391 } 392 393 do { 394 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop); 395 if (ret != -EMULTIHOP) 396 break; 397 398 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop); 399 } while (!ret); 400 401 if (ret) { 402 ttm_resource_free(bo, &evict_mem); 403 if (ret != -ERESTARTSYS && ret != -EINTR) 404 pr_err("Buffer eviction failed\n"); 405 } 406 out: 407 return ret; 408 } 409 410 /** 411 * ttm_bo_eviction_valuable 412 * 413 * @bo: The buffer object to evict 414 * @place: the placement we need to make room for 415 * 416 * Check if it is valuable to evict the BO to make room for the given placement. 417 */ 418 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 419 const struct ttm_place *place) 420 { 421 struct ttm_resource *res = bo->resource; 422 struct ttm_device *bdev = bo->bdev; 423 424 dma_resv_assert_held(bo->base.resv); 425 if (bo->resource->mem_type == TTM_PL_SYSTEM) 426 return true; 427 428 /* Don't evict this BO if it's outside of the 429 * requested placement range 430 */ 431 return ttm_resource_intersects(bdev, res, place, bo->base.size); 432 } 433 EXPORT_SYMBOL(ttm_bo_eviction_valuable); 434 435 /** 436 * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list. 437 * @bdev: The ttm device. 438 * @man: The manager whose bo to evict. 439 * @ctx: The TTM operation ctx governing the eviction. 440 * 441 * Return: 0 if successful or the resource disappeared. Negative error code on error. 442 */ 443 int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man, 444 struct ttm_operation_ctx *ctx) 445 { 446 struct ttm_resource_cursor cursor; 447 struct ttm_buffer_object *bo; 448 struct ttm_resource *res; 449 unsigned int mem_type; 450 int ret = 0; 451 452 spin_lock(&bdev->lru_lock); 453 res = ttm_resource_manager_first(man, &cursor); 454 ttm_resource_cursor_fini(&cursor); 455 if (!res) { 456 ret = -ENOENT; 457 goto out_no_ref; 458 } 459 bo = res->bo; 460 if (!ttm_bo_get_unless_zero(bo)) 461 goto out_no_ref; 462 mem_type = res->mem_type; 463 spin_unlock(&bdev->lru_lock); 464 ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL); 465 if (ret) 466 goto out_no_lock; 467 if (!bo->resource || bo->resource->mem_type != mem_type) 468 goto out_bo_moved; 469 470 if (bo->deleted) { 471 ret = ttm_bo_wait_ctx(bo, ctx); 472 if (!ret) 473 ttm_bo_cleanup_memtype_use(bo); 474 } else { 475 ret = ttm_bo_evict(bo, ctx); 476 } 477 out_bo_moved: 478 dma_resv_unlock(bo->base.resv); 479 out_no_lock: 480 ttm_bo_put(bo); 481 return ret; 482 483 out_no_ref: 484 spin_unlock(&bdev->lru_lock); 485 return ret; 486 } 487 488 /** 489 * struct ttm_bo_evict_walk - Parameters for the evict walk. 490 */ 491 struct ttm_bo_evict_walk { 492 /** @walk: The walk base parameters. */ 493 struct ttm_lru_walk walk; 494 /** @place: The place passed to the resource allocation. */ 495 const struct ttm_place *place; 496 /** @evictor: The buffer object we're trying to make room for. */ 497 struct ttm_buffer_object *evictor; 498 /** @res: The allocated resource if any. */ 499 struct ttm_resource **res; 500 /** @evicted: Number of successful evictions. */ 501 unsigned long evicted; 502 }; 503 504 static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo) 505 { 506 struct ttm_bo_evict_walk *evict_walk = 507 container_of(walk, typeof(*evict_walk), walk); 508 s64 lret; 509 510 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place)) 511 return 0; 512 513 if (bo->deleted) { 514 lret = ttm_bo_wait_ctx(bo, walk->ctx); 515 if (!lret) 516 ttm_bo_cleanup_memtype_use(bo); 517 } else { 518 lret = ttm_bo_evict(bo, walk->ctx); 519 } 520 521 if (lret) 522 goto out; 523 524 evict_walk->evicted++; 525 if (evict_walk->res) 526 lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place, 527 evict_walk->res); 528 if (lret == 0) 529 return 1; 530 out: 531 /* Errors that should terminate the walk. */ 532 if (lret == -ENOSPC) 533 return -EBUSY; 534 535 return lret; 536 } 537 538 static const struct ttm_lru_walk_ops ttm_evict_walk_ops = { 539 .process_bo = ttm_bo_evict_cb, 540 }; 541 542 static int ttm_bo_evict_alloc(struct ttm_device *bdev, 543 struct ttm_resource_manager *man, 544 const struct ttm_place *place, 545 struct ttm_buffer_object *evictor, 546 struct ttm_operation_ctx *ctx, 547 struct ww_acquire_ctx *ticket, 548 struct ttm_resource **res) 549 { 550 struct ttm_bo_evict_walk evict_walk = { 551 .walk = { 552 .ops = &ttm_evict_walk_ops, 553 .ctx = ctx, 554 .ticket = ticket, 555 }, 556 .place = place, 557 .evictor = evictor, 558 .res = res, 559 }; 560 s64 lret; 561 562 evict_walk.walk.trylock_only = true; 563 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1); 564 if (lret || !ticket) 565 goto out; 566 567 /* If ticket-locking, repeat while making progress. */ 568 evict_walk.walk.trylock_only = false; 569 do { 570 /* The walk may clear the evict_walk.walk.ticket field */ 571 evict_walk.walk.ticket = ticket; 572 evict_walk.evicted = 0; 573 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1); 574 } while (!lret && evict_walk.evicted); 575 out: 576 if (lret < 0) 577 return lret; 578 if (lret == 0) 579 return -EBUSY; 580 return 0; 581 } 582 583 /** 584 * ttm_bo_pin - Pin the buffer object. 585 * @bo: The buffer object to pin 586 * 587 * Make sure the buffer is not evicted any more during memory pressure. 588 * @bo must be unpinned again by calling ttm_bo_unpin(). 589 */ 590 void ttm_bo_pin(struct ttm_buffer_object *bo) 591 { 592 dma_resv_assert_held(bo->base.resv); 593 WARN_ON_ONCE(!kref_read(&bo->kref)); 594 spin_lock(&bo->bdev->lru_lock); 595 if (bo->resource) 596 ttm_resource_del_bulk_move(bo->resource, bo); 597 ++bo->pin_count; 598 spin_unlock(&bo->bdev->lru_lock); 599 } 600 EXPORT_SYMBOL(ttm_bo_pin); 601 602 /** 603 * ttm_bo_unpin - Unpin the buffer object. 604 * @bo: The buffer object to unpin 605 * 606 * Allows the buffer object to be evicted again during memory pressure. 607 */ 608 void ttm_bo_unpin(struct ttm_buffer_object *bo) 609 { 610 dma_resv_assert_held(bo->base.resv); 611 WARN_ON_ONCE(!kref_read(&bo->kref)); 612 if (WARN_ON_ONCE(!bo->pin_count)) 613 return; 614 615 spin_lock(&bo->bdev->lru_lock); 616 --bo->pin_count; 617 if (bo->resource) 618 ttm_resource_add_bulk_move(bo->resource, bo); 619 spin_unlock(&bo->bdev->lru_lock); 620 } 621 EXPORT_SYMBOL(ttm_bo_unpin); 622 623 /* 624 * Add the last move fence to the BO as kernel dependency and reserve a new 625 * fence slot. 626 */ 627 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, 628 struct ttm_resource_manager *man, 629 bool no_wait_gpu) 630 { 631 struct dma_fence *fence; 632 int ret; 633 634 spin_lock(&man->move_lock); 635 fence = dma_fence_get(man->move); 636 spin_unlock(&man->move_lock); 637 638 if (!fence) 639 return 0; 640 641 if (no_wait_gpu) { 642 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY; 643 dma_fence_put(fence); 644 return ret; 645 } 646 647 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); 648 649 ret = dma_resv_reserve_fences(bo->base.resv, 1); 650 dma_fence_put(fence); 651 return ret; 652 } 653 654 /** 655 * ttm_bo_alloc_resource - Allocate backing store for a BO 656 * 657 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for 658 * @placement: Proposed new placement for the buffer object 659 * @ctx: if and how to sleep, lock buffers and alloc memory 660 * @force_space: If we should evict buffers to force space 661 * @res: The resulting struct ttm_resource. 662 * 663 * Allocates a resource for the buffer object pointed to by @bo, using the 664 * placement flags in @placement, potentially evicting other buffer objects when 665 * @force_space is true. 666 * This function may sleep while waiting for resources to become available. 667 * Returns: 668 * -EBUSY: No space available (only if no_wait == true). 669 * -ENOSPC: Could not allocate space for the buffer object, either due to 670 * fragmentation or concurrent allocators. 671 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 672 */ 673 static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo, 674 struct ttm_placement *placement, 675 struct ttm_operation_ctx *ctx, 676 bool force_space, 677 struct ttm_resource **res) 678 { 679 struct ttm_device *bdev = bo->bdev; 680 struct ww_acquire_ctx *ticket; 681 int i, ret; 682 683 ticket = dma_resv_locking_ctx(bo->base.resv); 684 ret = dma_resv_reserve_fences(bo->base.resv, 1); 685 if (unlikely(ret)) 686 return ret; 687 688 for (i = 0; i < placement->num_placement; ++i) { 689 const struct ttm_place *place = &placement->placement[i]; 690 struct ttm_resource_manager *man; 691 bool may_evict; 692 693 man = ttm_manager_type(bdev, place->mem_type); 694 if (!man || !ttm_resource_manager_used(man)) 695 continue; 696 697 if (place->flags & (force_space ? TTM_PL_FLAG_DESIRED : 698 TTM_PL_FLAG_FALLBACK)) 699 continue; 700 701 may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM); 702 ret = ttm_resource_alloc(bo, place, res); 703 if (ret) { 704 if (ret != -ENOSPC) 705 return ret; 706 if (!may_evict) 707 continue; 708 709 ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx, 710 ticket, res); 711 if (ret == -EBUSY) 712 continue; 713 if (ret) 714 return ret; 715 } 716 717 ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu); 718 if (unlikely(ret)) { 719 ttm_resource_free(bo, res); 720 if (ret == -EBUSY) 721 continue; 722 723 return ret; 724 } 725 return 0; 726 } 727 728 return -ENOSPC; 729 } 730 731 /* 732 * ttm_bo_mem_space - Wrapper around ttm_bo_alloc_resource 733 * 734 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for 735 * @placement: Proposed new placement for the buffer object 736 * @res: The resulting struct ttm_resource. 737 * @ctx: if and how to sleep, lock buffers and alloc memory 738 * 739 * Tries both idle allocation and forcefully eviction of buffers. See 740 * ttm_bo_alloc_resource for details. 741 */ 742 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 743 struct ttm_placement *placement, 744 struct ttm_resource **res, 745 struct ttm_operation_ctx *ctx) 746 { 747 bool force_space = false; 748 int ret; 749 750 do { 751 ret = ttm_bo_alloc_resource(bo, placement, ctx, 752 force_space, res); 753 force_space = !force_space; 754 } while (ret == -ENOSPC && force_space); 755 756 return ret; 757 } 758 EXPORT_SYMBOL(ttm_bo_mem_space); 759 760 /** 761 * ttm_bo_validate 762 * 763 * @bo: The buffer object. 764 * @placement: Proposed placement for the buffer object. 765 * @ctx: validation parameters. 766 * 767 * Changes placement and caching policy of the buffer object 768 * according proposed placement. 769 * Returns 770 * -EINVAL on invalid proposed placement. 771 * -ENOMEM on out-of-memory condition. 772 * -EBUSY if no_wait is true and buffer busy. 773 * -ERESTARTSYS if interrupted by a signal. 774 */ 775 int ttm_bo_validate(struct ttm_buffer_object *bo, 776 struct ttm_placement *placement, 777 struct ttm_operation_ctx *ctx) 778 { 779 struct ttm_resource *res; 780 struct ttm_place hop; 781 bool force_space; 782 int ret; 783 784 dma_resv_assert_held(bo->base.resv); 785 786 /* 787 * Remove the backing store if no placement is given. 788 */ 789 if (!placement->num_placement) 790 return ttm_bo_pipeline_gutting(bo); 791 792 force_space = false; 793 do { 794 /* Check whether we need to move buffer. */ 795 if (bo->resource && 796 ttm_resource_compatible(bo->resource, placement, 797 force_space)) 798 return 0; 799 800 /* Moving of pinned BOs is forbidden */ 801 if (bo->pin_count) 802 return -EINVAL; 803 804 /* 805 * Determine where to move the buffer. 806 * 807 * If driver determines move is going to need 808 * an extra step then it will return -EMULTIHOP 809 * and the buffer will be moved to the temporary 810 * stop and the driver will be called to make 811 * the second hop. 812 */ 813 ret = ttm_bo_alloc_resource(bo, placement, ctx, force_space, 814 &res); 815 force_space = !force_space; 816 if (ret == -ENOSPC) 817 continue; 818 if (ret) 819 return ret; 820 821 bounce: 822 ret = ttm_bo_handle_move_mem(bo, res, false, ctx, &hop); 823 if (ret == -EMULTIHOP) { 824 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop); 825 /* try and move to final place now. */ 826 if (!ret) 827 goto bounce; 828 } 829 if (ret) { 830 ttm_resource_free(bo, &res); 831 return ret; 832 } 833 834 } while (ret && force_space); 835 836 /* For backward compatibility with userspace */ 837 if (ret == -ENOSPC) 838 return -ENOMEM; 839 840 /* 841 * We might need to add a TTM. 842 */ 843 if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) { 844 ret = ttm_tt_create(bo, true); 845 if (ret) 846 return ret; 847 } 848 return 0; 849 } 850 EXPORT_SYMBOL(ttm_bo_validate); 851 852 /** 853 * ttm_bo_init_reserved 854 * 855 * @bdev: Pointer to a ttm_device struct. 856 * @bo: Pointer to a ttm_buffer_object to be initialized. 857 * @type: Requested type of buffer object. 858 * @placement: Initial placement for buffer object. 859 * @alignment: Data alignment in pages. 860 * @ctx: TTM operation context for memory allocation. 861 * @sg: Scatter-gather table. 862 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. 863 * @destroy: Destroy function. Use NULL for kfree(). 864 * 865 * This function initializes a pre-allocated struct ttm_buffer_object. 866 * As this object may be part of a larger structure, this function, 867 * together with the @destroy function, enables driver-specific objects 868 * derived from a ttm_buffer_object. 869 * 870 * On successful return, the caller owns an object kref to @bo. The kref and 871 * list_kref are usually set to 1, but note that in some situations, other 872 * tasks may already be holding references to @bo as well. 873 * Furthermore, if resv == NULL, the buffer's reservation lock will be held, 874 * and it is the caller's responsibility to call ttm_bo_unreserve. 875 * 876 * If a failure occurs, the function will call the @destroy function. Thus, 877 * after a failure, dereferencing @bo is illegal and will likely cause memory 878 * corruption. 879 * 880 * Returns 881 * -ENOMEM: Out of memory. 882 * -EINVAL: Invalid placement flags. 883 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. 884 */ 885 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, 886 enum ttm_bo_type type, struct ttm_placement *placement, 887 uint32_t alignment, struct ttm_operation_ctx *ctx, 888 struct sg_table *sg, struct dma_resv *resv, 889 void (*destroy) (struct ttm_buffer_object *)) 890 { 891 int ret; 892 893 kref_init(&bo->kref); 894 bo->bdev = bdev; 895 bo->type = type; 896 bo->page_alignment = alignment; 897 bo->destroy = destroy; 898 bo->pin_count = 0; 899 bo->sg = sg; 900 bo->bulk_move = NULL; 901 if (resv) 902 bo->base.resv = resv; 903 else 904 bo->base.resv = &bo->base._resv; 905 atomic_inc(&ttm_glob.bo_count); 906 907 /* 908 * For ttm_bo_type_device buffers, allocate 909 * address space from the device. 910 */ 911 if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) { 912 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node, 913 PFN_UP(bo->base.size)); 914 if (ret) 915 goto err_put; 916 } 917 918 /* passed reservation objects should already be locked, 919 * since otherwise lockdep will be angered in radeon. 920 */ 921 if (!resv) 922 WARN_ON(!dma_resv_trylock(bo->base.resv)); 923 else 924 dma_resv_assert_held(resv); 925 926 ret = ttm_bo_validate(bo, placement, ctx); 927 if (unlikely(ret)) 928 goto err_unlock; 929 930 return 0; 931 932 err_unlock: 933 if (!resv) 934 dma_resv_unlock(bo->base.resv); 935 936 err_put: 937 ttm_bo_put(bo); 938 return ret; 939 } 940 EXPORT_SYMBOL(ttm_bo_init_reserved); 941 942 /** 943 * ttm_bo_init_validate 944 * 945 * @bdev: Pointer to a ttm_device struct. 946 * @bo: Pointer to a ttm_buffer_object to be initialized. 947 * @type: Requested type of buffer object. 948 * @placement: Initial placement for buffer object. 949 * @alignment: Data alignment in pages. 950 * @interruptible: If needing to sleep to wait for GPU resources, 951 * sleep interruptible. 952 * pinned in physical memory. If this behaviour is not desired, this member 953 * holds a pointer to a persistent shmem object. Typically, this would 954 * point to the shmem object backing a GEM object if TTM is used to back a 955 * GEM user interface. 956 * @sg: Scatter-gather table. 957 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. 958 * @destroy: Destroy function. Use NULL for kfree(). 959 * 960 * This function initializes a pre-allocated struct ttm_buffer_object. 961 * As this object may be part of a larger structure, this function, 962 * together with the @destroy function, 963 * enables driver-specific objects derived from a ttm_buffer_object. 964 * 965 * On successful return, the caller owns an object kref to @bo. The kref and 966 * list_kref are usually set to 1, but note that in some situations, other 967 * tasks may already be holding references to @bo as well. 968 * 969 * If a failure occurs, the function will call the @destroy function, Thus, 970 * after a failure, dereferencing @bo is illegal and will likely cause memory 971 * corruption. 972 * 973 * Returns 974 * -ENOMEM: Out of memory. 975 * -EINVAL: Invalid placement flags. 976 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. 977 */ 978 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo, 979 enum ttm_bo_type type, struct ttm_placement *placement, 980 uint32_t alignment, bool interruptible, 981 struct sg_table *sg, struct dma_resv *resv, 982 void (*destroy) (struct ttm_buffer_object *)) 983 { 984 struct ttm_operation_ctx ctx = { interruptible, false }; 985 int ret; 986 987 ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx, 988 sg, resv, destroy); 989 if (ret) 990 return ret; 991 992 if (!resv) 993 ttm_bo_unreserve(bo); 994 995 return 0; 996 } 997 EXPORT_SYMBOL(ttm_bo_init_validate); 998 999 /* 1000 * buffer object vm functions. 1001 */ 1002 1003 /** 1004 * ttm_bo_unmap_virtual 1005 * 1006 * @bo: tear down the virtual mappings for this BO 1007 */ 1008 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1009 { 1010 struct ttm_device *bdev = bo->bdev; 1011 1012 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); 1013 ttm_mem_io_free(bdev, bo->resource); 1014 } 1015 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1016 1017 /** 1018 * ttm_bo_wait_ctx - wait for buffer idle. 1019 * 1020 * @bo: The buffer object. 1021 * @ctx: defines how to wait 1022 * 1023 * Waits for the buffer to be idle. Used timeout depends on the context. 1024 * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or 1025 * zero on success. 1026 */ 1027 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) 1028 { 1029 long ret; 1030 1031 if (ctx->no_wait_gpu) { 1032 if (dma_resv_test_signaled(bo->base.resv, 1033 DMA_RESV_USAGE_BOOKKEEP)) 1034 return 0; 1035 else 1036 return -EBUSY; 1037 } 1038 1039 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, 1040 ctx->interruptible, 15 * HZ); 1041 if (unlikely(ret < 0)) 1042 return ret; 1043 if (unlikely(ret == 0)) 1044 return -EBUSY; 1045 return 0; 1046 } 1047 EXPORT_SYMBOL(ttm_bo_wait_ctx); 1048 1049 /** 1050 * struct ttm_bo_swapout_walk - Parameters for the swapout walk 1051 */ 1052 struct ttm_bo_swapout_walk { 1053 /** @walk: The walk base parameters. */ 1054 struct ttm_lru_walk walk; 1055 /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */ 1056 gfp_t gfp_flags; 1057 }; 1058 1059 static s64 1060 ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo) 1061 { 1062 struct ttm_place place = {.mem_type = bo->resource->mem_type}; 1063 struct ttm_bo_swapout_walk *swapout_walk = 1064 container_of(walk, typeof(*swapout_walk), walk); 1065 struct ttm_operation_ctx *ctx = walk->ctx; 1066 s64 ret; 1067 1068 /* 1069 * While the bo may already reside in SYSTEM placement, set 1070 * SYSTEM as new placement to cover also the move further below. 1071 * The driver may use the fact that we're moving from SYSTEM 1072 * as an indication that we're about to swap out. 1073 */ 1074 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) { 1075 ret = -EBUSY; 1076 goto out; 1077 } 1078 1079 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) || 1080 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL || 1081 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) { 1082 ret = -EBUSY; 1083 goto out; 1084 } 1085 1086 if (bo->deleted) { 1087 pgoff_t num_pages = bo->ttm->num_pages; 1088 1089 ret = ttm_bo_wait_ctx(bo, ctx); 1090 if (ret) 1091 goto out; 1092 1093 ttm_bo_cleanup_memtype_use(bo); 1094 ret = num_pages; 1095 goto out; 1096 } 1097 1098 /* 1099 * Move to system cached 1100 */ 1101 if (bo->resource->mem_type != TTM_PL_SYSTEM) { 1102 struct ttm_resource *evict_mem; 1103 struct ttm_place hop; 1104 1105 memset(&hop, 0, sizeof(hop)); 1106 place.mem_type = TTM_PL_SYSTEM; 1107 ret = ttm_resource_alloc(bo, &place, &evict_mem); 1108 if (ret) 1109 goto out; 1110 1111 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop); 1112 if (ret) { 1113 WARN(ret == -EMULTIHOP, 1114 "Unexpected multihop in swapout - likely driver bug.\n"); 1115 ttm_resource_free(bo, &evict_mem); 1116 goto out; 1117 } 1118 } 1119 1120 /* 1121 * Make sure BO is idle. 1122 */ 1123 ret = ttm_bo_wait_ctx(bo, ctx); 1124 if (ret) 1125 goto out; 1126 1127 ttm_bo_unmap_virtual(bo); 1128 if (bo->bdev->funcs->swap_notify) 1129 bo->bdev->funcs->swap_notify(bo); 1130 1131 if (ttm_tt_is_populated(bo->ttm)) 1132 ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags); 1133 1134 out: 1135 /* Consider -ENOMEM and -ENOSPC non-fatal. */ 1136 if (ret == -ENOMEM || ret == -ENOSPC) 1137 ret = -EBUSY; 1138 1139 return ret; 1140 } 1141 1142 const struct ttm_lru_walk_ops ttm_swap_ops = { 1143 .process_bo = ttm_bo_swapout_cb, 1144 }; 1145 1146 /** 1147 * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem. 1148 * @bdev: The ttm device. 1149 * @ctx: The ttm_operation_ctx governing the swapout operation. 1150 * @man: The resource manager whose resources / buffer objects are 1151 * goint to be swapped out. 1152 * @gfp_flags: The gfp flags used for shmem page allocations. 1153 * @target: The desired number of bytes to swap out. 1154 * 1155 * Return: The number of bytes actually swapped out, or negative error code 1156 * on error. 1157 */ 1158 s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, 1159 struct ttm_resource_manager *man, gfp_t gfp_flags, 1160 s64 target) 1161 { 1162 struct ttm_bo_swapout_walk swapout_walk = { 1163 .walk = { 1164 .ops = &ttm_swap_ops, 1165 .ctx = ctx, 1166 .trylock_only = true, 1167 }, 1168 .gfp_flags = gfp_flags, 1169 }; 1170 1171 return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target); 1172 } 1173 1174 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) 1175 { 1176 if (bo->ttm == NULL) 1177 return; 1178 1179 ttm_tt_unpopulate(bo->bdev, bo->ttm); 1180 ttm_tt_destroy(bo->bdev, bo->ttm); 1181 bo->ttm = NULL; 1182 } 1183