1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #define pr_fmt(fmt) "[TTM] " fmt 33 34 #include <drm/ttm/ttm_bo.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <drm/ttm/ttm_tt.h> 37 38 #include <linux/jiffies.h> 39 #include <linux/slab.h> 40 #include <linux/sched.h> 41 #include <linux/mm.h> 42 #include <linux/file.h> 43 #include <linux/module.h> 44 #include <linux/atomic.h> 45 #include <linux/dma-resv.h> 46 47 #include "ttm_module.h" 48 49 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 50 struct ttm_placement *placement) 51 { 52 struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, TTM_PFX); 53 struct ttm_resource_manager *man; 54 int i, mem_type; 55 56 for (i = 0; i < placement->num_placement; i++) { 57 mem_type = placement->placement[i].mem_type; 58 drm_printf(&p, " placement[%d]=0x%08X (%d)\n", 59 i, placement->placement[i].flags, mem_type); 60 man = ttm_manager_type(bo->bdev, mem_type); 61 ttm_resource_manager_debug(man, &p); 62 } 63 } 64 65 /** 66 * ttm_bo_move_to_lru_tail 67 * 68 * @bo: The buffer object. 69 * 70 * Move this BO to the tail of all lru lists used to lookup and reserve an 71 * object. This function must be called with struct ttm_global::lru_lock 72 * held, and is used to make a BO less likely to be considered for eviction. 73 */ 74 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) 75 { 76 dma_resv_assert_held(bo->base.resv); 77 78 if (bo->resource) 79 ttm_resource_move_to_lru_tail(bo->resource); 80 } 81 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 82 83 /** 84 * ttm_bo_set_bulk_move - update BOs bulk move object 85 * 86 * @bo: The buffer object. 87 * @bulk: bulk move structure 88 * 89 * Update the BOs bulk move object, making sure that resources are added/removed 90 * as well. A bulk move allows to move many resource on the LRU at once, 91 * resulting in much less overhead of maintaining the LRU. 92 * The only requirement is that the resources stay together on the LRU and are 93 * never separated. This is enforces by setting the bulk_move structure on a BO. 94 * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of 95 * their LRU list. 96 */ 97 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, 98 struct ttm_lru_bulk_move *bulk) 99 { 100 dma_resv_assert_held(bo->base.resv); 101 102 if (bo->bulk_move == bulk) 103 return; 104 105 spin_lock(&bo->bdev->lru_lock); 106 if (bo->resource) 107 ttm_resource_del_bulk_move(bo->resource, bo); 108 bo->bulk_move = bulk; 109 if (bo->resource) 110 ttm_resource_add_bulk_move(bo->resource, bo); 111 spin_unlock(&bo->bdev->lru_lock); 112 } 113 EXPORT_SYMBOL(ttm_bo_set_bulk_move); 114 115 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 116 struct ttm_resource *mem, bool evict, 117 struct ttm_operation_ctx *ctx, 118 struct ttm_place *hop) 119 { 120 struct ttm_device *bdev = bo->bdev; 121 bool old_use_tt, new_use_tt; 122 int ret; 123 124 old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt; 125 new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt; 126 127 ttm_bo_unmap_virtual(bo); 128 129 /* 130 * Create and bind a ttm if required. 131 */ 132 133 if (new_use_tt) { 134 /* Zero init the new TTM structure if the old location should 135 * have used one as well. 136 */ 137 ret = ttm_tt_create(bo, old_use_tt); 138 if (ret) 139 goto out_err; 140 141 if (mem->mem_type != TTM_PL_SYSTEM) { 142 ret = ttm_bo_populate(bo, ctx); 143 if (ret) 144 goto out_err; 145 } 146 } 147 148 ret = dma_resv_reserve_fences(bo->base.resv, 1); 149 if (ret) 150 goto out_err; 151 152 ret = bdev->funcs->move(bo, evict, ctx, mem, hop); 153 if (ret) { 154 if (ret == -EMULTIHOP) 155 return ret; 156 goto out_err; 157 } 158 159 ctx->bytes_moved += bo->base.size; 160 return 0; 161 162 out_err: 163 if (!old_use_tt) 164 ttm_bo_tt_destroy(bo); 165 166 return ret; 167 } 168 169 /* 170 * Call bo::reserved. 171 * Will release GPU memory type usage on destruction. 172 * This is the place to put in driver specific hooks to release 173 * driver private resources. 174 * Will release the bo::reserved lock. 175 */ 176 177 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 178 { 179 if (bo->bdev->funcs->delete_mem_notify) 180 bo->bdev->funcs->delete_mem_notify(bo); 181 182 ttm_bo_tt_destroy(bo); 183 ttm_resource_free(bo, &bo->resource); 184 } 185 186 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) 187 { 188 int r; 189 190 if (bo->base.resv == &bo->base._resv) 191 return 0; 192 193 BUG_ON(!dma_resv_trylock(&bo->base._resv)); 194 195 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv); 196 dma_resv_unlock(&bo->base._resv); 197 if (r) 198 return r; 199 200 if (bo->type != ttm_bo_type_sg) { 201 /* This works because the BO is about to be destroyed and nobody 202 * reference it any more. The only tricky case is the trylock on 203 * the resv object while holding the lru_lock. 204 */ 205 spin_lock(&bo->bdev->lru_lock); 206 bo->base.resv = &bo->base._resv; 207 spin_unlock(&bo->bdev->lru_lock); 208 } 209 210 return r; 211 } 212 213 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 214 { 215 struct dma_resv *resv = &bo->base._resv; 216 struct dma_resv_iter cursor; 217 struct dma_fence *fence; 218 219 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP); 220 dma_resv_for_each_fence_unlocked(&cursor, fence) { 221 if (!fence->ops->signaled) 222 dma_fence_enable_sw_signaling(fence); 223 } 224 dma_resv_iter_end(&cursor); 225 } 226 227 /* 228 * Block for the dma_resv object to become idle, lock the buffer and clean up 229 * the resource and tt object. 230 */ 231 static void ttm_bo_delayed_delete(struct work_struct *work) 232 { 233 struct ttm_buffer_object *bo; 234 235 bo = container_of(work, typeof(*bo), delayed_delete); 236 237 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false, 238 MAX_SCHEDULE_TIMEOUT); 239 dma_resv_lock(bo->base.resv, NULL); 240 ttm_bo_cleanup_memtype_use(bo); 241 dma_resv_unlock(bo->base.resv); 242 ttm_bo_put(bo); 243 } 244 245 static void ttm_bo_release(struct kref *kref) 246 { 247 struct ttm_buffer_object *bo = 248 container_of(kref, struct ttm_buffer_object, kref); 249 struct ttm_device *bdev = bo->bdev; 250 int ret; 251 252 WARN_ON_ONCE(bo->pin_count); 253 WARN_ON_ONCE(bo->bulk_move); 254 255 if (!bo->deleted) { 256 ret = ttm_bo_individualize_resv(bo); 257 if (ret) { 258 /* Last resort, if we fail to allocate memory for the 259 * fences block for the BO to become idle 260 */ 261 dma_resv_wait_timeout(bo->base.resv, 262 DMA_RESV_USAGE_BOOKKEEP, false, 263 30 * HZ); 264 } 265 266 if (bo->bdev->funcs->release_notify) 267 bo->bdev->funcs->release_notify(bo); 268 269 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); 270 ttm_mem_io_free(bdev, bo->resource); 271 272 if (!dma_resv_test_signaled(bo->base.resv, 273 DMA_RESV_USAGE_BOOKKEEP) || 274 (want_init_on_free() && (bo->ttm != NULL)) || 275 bo->type == ttm_bo_type_sg || 276 !dma_resv_trylock(bo->base.resv)) { 277 /* The BO is not idle, resurrect it for delayed destroy */ 278 ttm_bo_flush_all_fences(bo); 279 bo->deleted = true; 280 281 spin_lock(&bo->bdev->lru_lock); 282 283 /* 284 * Make pinned bos immediately available to 285 * shrinkers, now that they are queued for 286 * destruction. 287 * 288 * FIXME: QXL is triggering this. Can be removed when the 289 * driver is fixed. 290 */ 291 if (bo->pin_count) { 292 bo->pin_count = 0; 293 ttm_resource_move_to_lru_tail(bo->resource); 294 } 295 296 kref_init(&bo->kref); 297 spin_unlock(&bo->bdev->lru_lock); 298 299 INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete); 300 301 /* Schedule the worker on the closest NUMA node. This 302 * improves performance since system memory might be 303 * cleared on free and that is best done on a CPU core 304 * close to it. 305 */ 306 queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete); 307 return; 308 } 309 310 ttm_bo_cleanup_memtype_use(bo); 311 dma_resv_unlock(bo->base.resv); 312 } 313 314 atomic_dec(&ttm_glob.bo_count); 315 bo->destroy(bo); 316 } 317 318 /** 319 * ttm_bo_put 320 * 321 * @bo: The buffer object. 322 * 323 * Unreference a buffer object. 324 */ 325 void ttm_bo_put(struct ttm_buffer_object *bo) 326 { 327 kref_put(&bo->kref, ttm_bo_release); 328 } 329 EXPORT_SYMBOL(ttm_bo_put); 330 331 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, 332 struct ttm_operation_ctx *ctx, 333 struct ttm_place *hop) 334 { 335 struct ttm_placement hop_placement; 336 struct ttm_resource *hop_mem; 337 int ret; 338 339 hop_placement.num_placement = 1; 340 hop_placement.placement = hop; 341 342 /* find space in the bounce domain */ 343 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx); 344 if (ret) 345 return ret; 346 /* move to the bounce domain */ 347 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL); 348 if (ret) { 349 ttm_resource_free(bo, &hop_mem); 350 return ret; 351 } 352 return 0; 353 } 354 355 static int ttm_bo_evict(struct ttm_buffer_object *bo, 356 struct ttm_operation_ctx *ctx) 357 { 358 struct ttm_device *bdev = bo->bdev; 359 struct ttm_resource *evict_mem; 360 struct ttm_placement placement; 361 struct ttm_place hop; 362 int ret = 0; 363 364 memset(&hop, 0, sizeof(hop)); 365 366 dma_resv_assert_held(bo->base.resv); 367 368 placement.num_placement = 0; 369 bdev->funcs->evict_flags(bo, &placement); 370 371 if (!placement.num_placement) { 372 ret = ttm_bo_wait_ctx(bo, ctx); 373 if (ret) 374 return ret; 375 376 /* 377 * Since we've already synced, this frees backing store 378 * immediately. 379 */ 380 return ttm_bo_pipeline_gutting(bo); 381 } 382 383 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); 384 if (ret) { 385 if (ret != -ERESTARTSYS) { 386 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 387 bo); 388 ttm_bo_mem_space_debug(bo, &placement); 389 } 390 goto out; 391 } 392 393 do { 394 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop); 395 if (ret != -EMULTIHOP) 396 break; 397 398 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop); 399 } while (!ret); 400 401 if (ret) { 402 ttm_resource_free(bo, &evict_mem); 403 if (ret != -ERESTARTSYS && ret != -EINTR) 404 pr_err("Buffer eviction failed\n"); 405 } 406 out: 407 return ret; 408 } 409 410 /** 411 * ttm_bo_eviction_valuable 412 * 413 * @bo: The buffer object to evict 414 * @place: the placement we need to make room for 415 * 416 * Check if it is valuable to evict the BO to make room for the given placement. 417 */ 418 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 419 const struct ttm_place *place) 420 { 421 struct ttm_resource *res = bo->resource; 422 struct ttm_device *bdev = bo->bdev; 423 424 dma_resv_assert_held(bo->base.resv); 425 if (bo->resource->mem_type == TTM_PL_SYSTEM) 426 return true; 427 428 /* Don't evict this BO if it's outside of the 429 * requested placement range 430 */ 431 return ttm_resource_intersects(bdev, res, place, bo->base.size); 432 } 433 EXPORT_SYMBOL(ttm_bo_eviction_valuable); 434 435 /** 436 * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list. 437 * @bdev: The ttm device. 438 * @man: The manager whose bo to evict. 439 * @ctx: The TTM operation ctx governing the eviction. 440 * 441 * Return: 0 if successful or the resource disappeared. Negative error code on error. 442 */ 443 int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man, 444 struct ttm_operation_ctx *ctx) 445 { 446 struct ttm_resource_cursor cursor; 447 struct ttm_buffer_object *bo; 448 struct ttm_resource *res; 449 unsigned int mem_type; 450 int ret = 0; 451 452 spin_lock(&bdev->lru_lock); 453 res = ttm_resource_manager_first(man, &cursor); 454 ttm_resource_cursor_fini(&cursor); 455 if (!res) { 456 ret = -ENOENT; 457 goto out_no_ref; 458 } 459 bo = res->bo; 460 if (!ttm_bo_get_unless_zero(bo)) 461 goto out_no_ref; 462 mem_type = res->mem_type; 463 spin_unlock(&bdev->lru_lock); 464 ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL); 465 if (ret) 466 goto out_no_lock; 467 if (!bo->resource || bo->resource->mem_type != mem_type) 468 goto out_bo_moved; 469 470 if (bo->deleted) { 471 ret = ttm_bo_wait_ctx(bo, ctx); 472 if (!ret) 473 ttm_bo_cleanup_memtype_use(bo); 474 } else { 475 ret = ttm_bo_evict(bo, ctx); 476 } 477 out_bo_moved: 478 dma_resv_unlock(bo->base.resv); 479 out_no_lock: 480 ttm_bo_put(bo); 481 return ret; 482 483 out_no_ref: 484 spin_unlock(&bdev->lru_lock); 485 return ret; 486 } 487 488 /** 489 * struct ttm_bo_evict_walk - Parameters for the evict walk. 490 */ 491 struct ttm_bo_evict_walk { 492 /** @walk: The walk base parameters. */ 493 struct ttm_lru_walk walk; 494 /** @place: The place passed to the resource allocation. */ 495 const struct ttm_place *place; 496 /** @evictor: The buffer object we're trying to make room for. */ 497 struct ttm_buffer_object *evictor; 498 /** @res: The allocated resource if any. */ 499 struct ttm_resource **res; 500 /** @evicted: Number of successful evictions. */ 501 unsigned long evicted; 502 }; 503 504 static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo) 505 { 506 struct ttm_bo_evict_walk *evict_walk = 507 container_of(walk, typeof(*evict_walk), walk); 508 s64 lret; 509 510 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place)) 511 return 0; 512 513 if (bo->deleted) { 514 lret = ttm_bo_wait_ctx(bo, walk->ctx); 515 if (!lret) 516 ttm_bo_cleanup_memtype_use(bo); 517 } else { 518 lret = ttm_bo_evict(bo, walk->ctx); 519 } 520 521 if (lret) 522 goto out; 523 524 evict_walk->evicted++; 525 if (evict_walk->res) 526 lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place, 527 evict_walk->res); 528 if (lret == 0) 529 return 1; 530 out: 531 /* Errors that should terminate the walk. */ 532 if (lret == -ENOSPC) 533 return -EBUSY; 534 535 return lret; 536 } 537 538 static const struct ttm_lru_walk_ops ttm_evict_walk_ops = { 539 .process_bo = ttm_bo_evict_cb, 540 }; 541 542 static int ttm_bo_evict_alloc(struct ttm_device *bdev, 543 struct ttm_resource_manager *man, 544 const struct ttm_place *place, 545 struct ttm_buffer_object *evictor, 546 struct ttm_operation_ctx *ctx, 547 struct ww_acquire_ctx *ticket, 548 struct ttm_resource **res) 549 { 550 struct ttm_bo_evict_walk evict_walk = { 551 .walk = { 552 .ops = &ttm_evict_walk_ops, 553 .ctx = ctx, 554 .ticket = ticket, 555 }, 556 .place = place, 557 .evictor = evictor, 558 .res = res, 559 }; 560 s64 lret; 561 562 evict_walk.walk.trylock_only = true; 563 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1); 564 if (lret || !ticket) 565 goto out; 566 567 /* If ticket-locking, repeat while making progress. */ 568 evict_walk.walk.trylock_only = false; 569 do { 570 /* The walk may clear the evict_walk.walk.ticket field */ 571 evict_walk.walk.ticket = ticket; 572 evict_walk.evicted = 0; 573 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1); 574 } while (!lret && evict_walk.evicted); 575 out: 576 if (lret < 0) 577 return lret; 578 if (lret == 0) 579 return -EBUSY; 580 return 0; 581 } 582 583 /** 584 * ttm_bo_pin - Pin the buffer object. 585 * @bo: The buffer object to pin 586 * 587 * Make sure the buffer is not evicted any more during memory pressure. 588 * @bo must be unpinned again by calling ttm_bo_unpin(). 589 */ 590 void ttm_bo_pin(struct ttm_buffer_object *bo) 591 { 592 dma_resv_assert_held(bo->base.resv); 593 WARN_ON_ONCE(!kref_read(&bo->kref)); 594 spin_lock(&bo->bdev->lru_lock); 595 if (bo->resource) 596 ttm_resource_del_bulk_move(bo->resource, bo); 597 if (!bo->pin_count++ && bo->resource) 598 ttm_resource_move_to_lru_tail(bo->resource); 599 spin_unlock(&bo->bdev->lru_lock); 600 } 601 EXPORT_SYMBOL(ttm_bo_pin); 602 603 /** 604 * ttm_bo_unpin - Unpin the buffer object. 605 * @bo: The buffer object to unpin 606 * 607 * Allows the buffer object to be evicted again during memory pressure. 608 */ 609 void ttm_bo_unpin(struct ttm_buffer_object *bo) 610 { 611 dma_resv_assert_held(bo->base.resv); 612 WARN_ON_ONCE(!kref_read(&bo->kref)); 613 if (WARN_ON_ONCE(!bo->pin_count)) 614 return; 615 616 spin_lock(&bo->bdev->lru_lock); 617 if (!--bo->pin_count && bo->resource) { 618 ttm_resource_add_bulk_move(bo->resource, bo); 619 ttm_resource_move_to_lru_tail(bo->resource); 620 } 621 spin_unlock(&bo->bdev->lru_lock); 622 } 623 EXPORT_SYMBOL(ttm_bo_unpin); 624 625 /* 626 * Add the last move fence to the BO as kernel dependency and reserve a new 627 * fence slot. 628 */ 629 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, 630 struct ttm_resource_manager *man, 631 bool no_wait_gpu) 632 { 633 struct dma_fence *fence; 634 int ret; 635 636 spin_lock(&man->move_lock); 637 fence = dma_fence_get(man->move); 638 spin_unlock(&man->move_lock); 639 640 if (!fence) 641 return 0; 642 643 if (no_wait_gpu) { 644 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY; 645 dma_fence_put(fence); 646 return ret; 647 } 648 649 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); 650 651 ret = dma_resv_reserve_fences(bo->base.resv, 1); 652 dma_fence_put(fence); 653 return ret; 654 } 655 656 /** 657 * ttm_bo_alloc_resource - Allocate backing store for a BO 658 * 659 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for 660 * @placement: Proposed new placement for the buffer object 661 * @ctx: if and how to sleep, lock buffers and alloc memory 662 * @force_space: If we should evict buffers to force space 663 * @res: The resulting struct ttm_resource. 664 * 665 * Allocates a resource for the buffer object pointed to by @bo, using the 666 * placement flags in @placement, potentially evicting other buffer objects when 667 * @force_space is true. 668 * This function may sleep while waiting for resources to become available. 669 * Returns: 670 * -EBUSY: No space available (only if no_wait == true). 671 * -ENOSPC: Could not allocate space for the buffer object, either due to 672 * fragmentation or concurrent allocators. 673 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 674 */ 675 static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo, 676 struct ttm_placement *placement, 677 struct ttm_operation_ctx *ctx, 678 bool force_space, 679 struct ttm_resource **res) 680 { 681 struct ttm_device *bdev = bo->bdev; 682 struct ww_acquire_ctx *ticket; 683 int i, ret; 684 685 ticket = dma_resv_locking_ctx(bo->base.resv); 686 ret = dma_resv_reserve_fences(bo->base.resv, 1); 687 if (unlikely(ret)) 688 return ret; 689 690 for (i = 0; i < placement->num_placement; ++i) { 691 const struct ttm_place *place = &placement->placement[i]; 692 struct ttm_resource_manager *man; 693 bool may_evict; 694 695 man = ttm_manager_type(bdev, place->mem_type); 696 if (!man || !ttm_resource_manager_used(man)) 697 continue; 698 699 if (place->flags & (force_space ? TTM_PL_FLAG_DESIRED : 700 TTM_PL_FLAG_FALLBACK)) 701 continue; 702 703 may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM); 704 ret = ttm_resource_alloc(bo, place, res); 705 if (ret) { 706 if (ret != -ENOSPC) 707 return ret; 708 if (!may_evict) 709 continue; 710 711 ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx, 712 ticket, res); 713 if (ret == -EBUSY) 714 continue; 715 if (ret) 716 return ret; 717 } 718 719 ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu); 720 if (unlikely(ret)) { 721 ttm_resource_free(bo, res); 722 if (ret == -EBUSY) 723 continue; 724 725 return ret; 726 } 727 return 0; 728 } 729 730 return -ENOSPC; 731 } 732 733 /* 734 * ttm_bo_mem_space - Wrapper around ttm_bo_alloc_resource 735 * 736 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for 737 * @placement: Proposed new placement for the buffer object 738 * @res: The resulting struct ttm_resource. 739 * @ctx: if and how to sleep, lock buffers and alloc memory 740 * 741 * Tries both idle allocation and forcefully eviction of buffers. See 742 * ttm_bo_alloc_resource for details. 743 */ 744 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 745 struct ttm_placement *placement, 746 struct ttm_resource **res, 747 struct ttm_operation_ctx *ctx) 748 { 749 bool force_space = false; 750 int ret; 751 752 do { 753 ret = ttm_bo_alloc_resource(bo, placement, ctx, 754 force_space, res); 755 force_space = !force_space; 756 } while (ret == -ENOSPC && force_space); 757 758 return ret; 759 } 760 EXPORT_SYMBOL(ttm_bo_mem_space); 761 762 /** 763 * ttm_bo_validate 764 * 765 * @bo: The buffer object. 766 * @placement: Proposed placement for the buffer object. 767 * @ctx: validation parameters. 768 * 769 * Changes placement and caching policy of the buffer object 770 * according proposed placement. 771 * Returns 772 * -EINVAL on invalid proposed placement. 773 * -ENOMEM on out-of-memory condition. 774 * -EBUSY if no_wait is true and buffer busy. 775 * -ERESTARTSYS if interrupted by a signal. 776 */ 777 int ttm_bo_validate(struct ttm_buffer_object *bo, 778 struct ttm_placement *placement, 779 struct ttm_operation_ctx *ctx) 780 { 781 struct ttm_resource *res; 782 struct ttm_place hop; 783 bool force_space; 784 int ret; 785 786 dma_resv_assert_held(bo->base.resv); 787 788 /* 789 * Remove the backing store if no placement is given. 790 */ 791 if (!placement->num_placement) 792 return ttm_bo_pipeline_gutting(bo); 793 794 force_space = false; 795 do { 796 /* Check whether we need to move buffer. */ 797 if (bo->resource && 798 ttm_resource_compatible(bo->resource, placement, 799 force_space)) 800 return 0; 801 802 /* Moving of pinned BOs is forbidden */ 803 if (bo->pin_count) 804 return -EINVAL; 805 806 /* 807 * Determine where to move the buffer. 808 * 809 * If driver determines move is going to need 810 * an extra step then it will return -EMULTIHOP 811 * and the buffer will be moved to the temporary 812 * stop and the driver will be called to make 813 * the second hop. 814 */ 815 ret = ttm_bo_alloc_resource(bo, placement, ctx, force_space, 816 &res); 817 force_space = !force_space; 818 if (ret == -ENOSPC) 819 continue; 820 if (ret) 821 return ret; 822 823 bounce: 824 ret = ttm_bo_handle_move_mem(bo, res, false, ctx, &hop); 825 if (ret == -EMULTIHOP) { 826 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop); 827 /* try and move to final place now. */ 828 if (!ret) 829 goto bounce; 830 } 831 if (ret) { 832 ttm_resource_free(bo, &res); 833 return ret; 834 } 835 836 } while (ret && force_space); 837 838 /* For backward compatibility with userspace */ 839 if (ret == -ENOSPC) 840 return -ENOMEM; 841 842 /* 843 * We might need to add a TTM. 844 */ 845 if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) { 846 ret = ttm_tt_create(bo, true); 847 if (ret) 848 return ret; 849 } 850 return 0; 851 } 852 EXPORT_SYMBOL(ttm_bo_validate); 853 854 /** 855 * ttm_bo_init_reserved 856 * 857 * @bdev: Pointer to a ttm_device struct. 858 * @bo: Pointer to a ttm_buffer_object to be initialized. 859 * @type: Requested type of buffer object. 860 * @placement: Initial placement for buffer object. 861 * @alignment: Data alignment in pages. 862 * @ctx: TTM operation context for memory allocation. 863 * @sg: Scatter-gather table. 864 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. 865 * @destroy: Destroy function. Use NULL for kfree(). 866 * 867 * This function initializes a pre-allocated struct ttm_buffer_object. 868 * As this object may be part of a larger structure, this function, 869 * together with the @destroy function, enables driver-specific objects 870 * derived from a ttm_buffer_object. 871 * 872 * On successful return, the caller owns an object kref to @bo. The kref and 873 * list_kref are usually set to 1, but note that in some situations, other 874 * tasks may already be holding references to @bo as well. 875 * Furthermore, if resv == NULL, the buffer's reservation lock will be held, 876 * and it is the caller's responsibility to call ttm_bo_unreserve. 877 * 878 * If a failure occurs, the function will call the @destroy function. Thus, 879 * after a failure, dereferencing @bo is illegal and will likely cause memory 880 * corruption. 881 * 882 * Returns 883 * -ENOMEM: Out of memory. 884 * -EINVAL: Invalid placement flags. 885 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. 886 */ 887 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, 888 enum ttm_bo_type type, struct ttm_placement *placement, 889 uint32_t alignment, struct ttm_operation_ctx *ctx, 890 struct sg_table *sg, struct dma_resv *resv, 891 void (*destroy) (struct ttm_buffer_object *)) 892 { 893 int ret; 894 895 kref_init(&bo->kref); 896 bo->bdev = bdev; 897 bo->type = type; 898 bo->page_alignment = alignment; 899 bo->destroy = destroy; 900 bo->pin_count = 0; 901 bo->sg = sg; 902 bo->bulk_move = NULL; 903 if (resv) 904 bo->base.resv = resv; 905 else 906 bo->base.resv = &bo->base._resv; 907 atomic_inc(&ttm_glob.bo_count); 908 909 /* 910 * For ttm_bo_type_device buffers, allocate 911 * address space from the device. 912 */ 913 if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) { 914 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node, 915 PFN_UP(bo->base.size)); 916 if (ret) 917 goto err_put; 918 } 919 920 /* passed reservation objects should already be locked, 921 * since otherwise lockdep will be angered in radeon. 922 */ 923 if (!resv) 924 WARN_ON(!dma_resv_trylock(bo->base.resv)); 925 else 926 dma_resv_assert_held(resv); 927 928 ret = ttm_bo_validate(bo, placement, ctx); 929 if (unlikely(ret)) 930 goto err_unlock; 931 932 return 0; 933 934 err_unlock: 935 if (!resv) 936 dma_resv_unlock(bo->base.resv); 937 938 err_put: 939 ttm_bo_put(bo); 940 return ret; 941 } 942 EXPORT_SYMBOL(ttm_bo_init_reserved); 943 944 /** 945 * ttm_bo_init_validate 946 * 947 * @bdev: Pointer to a ttm_device struct. 948 * @bo: Pointer to a ttm_buffer_object to be initialized. 949 * @type: Requested type of buffer object. 950 * @placement: Initial placement for buffer object. 951 * @alignment: Data alignment in pages. 952 * @interruptible: If needing to sleep to wait for GPU resources, 953 * sleep interruptible. 954 * pinned in physical memory. If this behaviour is not desired, this member 955 * holds a pointer to a persistent shmem object. Typically, this would 956 * point to the shmem object backing a GEM object if TTM is used to back a 957 * GEM user interface. 958 * @sg: Scatter-gather table. 959 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. 960 * @destroy: Destroy function. Use NULL for kfree(). 961 * 962 * This function initializes a pre-allocated struct ttm_buffer_object. 963 * As this object may be part of a larger structure, this function, 964 * together with the @destroy function, 965 * enables driver-specific objects derived from a ttm_buffer_object. 966 * 967 * On successful return, the caller owns an object kref to @bo. The kref and 968 * list_kref are usually set to 1, but note that in some situations, other 969 * tasks may already be holding references to @bo as well. 970 * 971 * If a failure occurs, the function will call the @destroy function, Thus, 972 * after a failure, dereferencing @bo is illegal and will likely cause memory 973 * corruption. 974 * 975 * Returns 976 * -ENOMEM: Out of memory. 977 * -EINVAL: Invalid placement flags. 978 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. 979 */ 980 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo, 981 enum ttm_bo_type type, struct ttm_placement *placement, 982 uint32_t alignment, bool interruptible, 983 struct sg_table *sg, struct dma_resv *resv, 984 void (*destroy) (struct ttm_buffer_object *)) 985 { 986 struct ttm_operation_ctx ctx = { interruptible, false }; 987 int ret; 988 989 ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx, 990 sg, resv, destroy); 991 if (ret) 992 return ret; 993 994 if (!resv) 995 ttm_bo_unreserve(bo); 996 997 return 0; 998 } 999 EXPORT_SYMBOL(ttm_bo_init_validate); 1000 1001 /* 1002 * buffer object vm functions. 1003 */ 1004 1005 /** 1006 * ttm_bo_unmap_virtual 1007 * 1008 * @bo: tear down the virtual mappings for this BO 1009 */ 1010 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1011 { 1012 struct ttm_device *bdev = bo->bdev; 1013 1014 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); 1015 ttm_mem_io_free(bdev, bo->resource); 1016 } 1017 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1018 1019 /** 1020 * ttm_bo_wait_ctx - wait for buffer idle. 1021 * 1022 * @bo: The buffer object. 1023 * @ctx: defines how to wait 1024 * 1025 * Waits for the buffer to be idle. Used timeout depends on the context. 1026 * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or 1027 * zero on success. 1028 */ 1029 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) 1030 { 1031 long ret; 1032 1033 if (ctx->no_wait_gpu) { 1034 if (dma_resv_test_signaled(bo->base.resv, 1035 DMA_RESV_USAGE_BOOKKEEP)) 1036 return 0; 1037 else 1038 return -EBUSY; 1039 } 1040 1041 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, 1042 ctx->interruptible, 15 * HZ); 1043 if (unlikely(ret < 0)) 1044 return ret; 1045 if (unlikely(ret == 0)) 1046 return -EBUSY; 1047 return 0; 1048 } 1049 EXPORT_SYMBOL(ttm_bo_wait_ctx); 1050 1051 /** 1052 * struct ttm_bo_swapout_walk - Parameters for the swapout walk 1053 */ 1054 struct ttm_bo_swapout_walk { 1055 /** @walk: The walk base parameters. */ 1056 struct ttm_lru_walk walk; 1057 /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */ 1058 gfp_t gfp_flags; 1059 }; 1060 1061 static s64 1062 ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo) 1063 { 1064 struct ttm_place place = {.mem_type = bo->resource->mem_type}; 1065 struct ttm_bo_swapout_walk *swapout_walk = 1066 container_of(walk, typeof(*swapout_walk), walk); 1067 struct ttm_operation_ctx *ctx = walk->ctx; 1068 s64 ret; 1069 1070 /* 1071 * While the bo may already reside in SYSTEM placement, set 1072 * SYSTEM as new placement to cover also the move further below. 1073 * The driver may use the fact that we're moving from SYSTEM 1074 * as an indication that we're about to swap out. 1075 */ 1076 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) { 1077 ret = -EBUSY; 1078 goto out; 1079 } 1080 1081 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) || 1082 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL || 1083 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) { 1084 ret = -EBUSY; 1085 goto out; 1086 } 1087 1088 if (bo->deleted) { 1089 pgoff_t num_pages = bo->ttm->num_pages; 1090 1091 ret = ttm_bo_wait_ctx(bo, ctx); 1092 if (ret) 1093 goto out; 1094 1095 ttm_bo_cleanup_memtype_use(bo); 1096 ret = num_pages; 1097 goto out; 1098 } 1099 1100 /* 1101 * Move to system cached 1102 */ 1103 if (bo->resource->mem_type != TTM_PL_SYSTEM) { 1104 struct ttm_resource *evict_mem; 1105 struct ttm_place hop; 1106 1107 memset(&hop, 0, sizeof(hop)); 1108 place.mem_type = TTM_PL_SYSTEM; 1109 ret = ttm_resource_alloc(bo, &place, &evict_mem); 1110 if (ret) 1111 goto out; 1112 1113 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop); 1114 if (ret) { 1115 WARN(ret == -EMULTIHOP, 1116 "Unexpected multihop in swapout - likely driver bug.\n"); 1117 ttm_resource_free(bo, &evict_mem); 1118 goto out; 1119 } 1120 } 1121 1122 /* 1123 * Make sure BO is idle. 1124 */ 1125 ret = ttm_bo_wait_ctx(bo, ctx); 1126 if (ret) 1127 goto out; 1128 1129 ttm_bo_unmap_virtual(bo); 1130 if (bo->bdev->funcs->swap_notify) 1131 bo->bdev->funcs->swap_notify(bo); 1132 1133 if (ttm_tt_is_populated(bo->ttm)) { 1134 spin_lock(&bo->bdev->lru_lock); 1135 ttm_resource_del_bulk_move(bo->resource, bo); 1136 spin_unlock(&bo->bdev->lru_lock); 1137 1138 ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags); 1139 1140 spin_lock(&bo->bdev->lru_lock); 1141 if (ret) 1142 ttm_resource_add_bulk_move(bo->resource, bo); 1143 ttm_resource_move_to_lru_tail(bo->resource); 1144 spin_unlock(&bo->bdev->lru_lock); 1145 } 1146 1147 out: 1148 /* Consider -ENOMEM and -ENOSPC non-fatal. */ 1149 if (ret == -ENOMEM || ret == -ENOSPC) 1150 ret = -EBUSY; 1151 1152 return ret; 1153 } 1154 1155 const struct ttm_lru_walk_ops ttm_swap_ops = { 1156 .process_bo = ttm_bo_swapout_cb, 1157 }; 1158 1159 /** 1160 * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem. 1161 * @bdev: The ttm device. 1162 * @ctx: The ttm_operation_ctx governing the swapout operation. 1163 * @man: The resource manager whose resources / buffer objects are 1164 * goint to be swapped out. 1165 * @gfp_flags: The gfp flags used for shmem page allocations. 1166 * @target: The desired number of bytes to swap out. 1167 * 1168 * Return: The number of bytes actually swapped out, or negative error code 1169 * on error. 1170 */ 1171 s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, 1172 struct ttm_resource_manager *man, gfp_t gfp_flags, 1173 s64 target) 1174 { 1175 struct ttm_bo_swapout_walk swapout_walk = { 1176 .walk = { 1177 .ops = &ttm_swap_ops, 1178 .ctx = ctx, 1179 .trylock_only = true, 1180 }, 1181 .gfp_flags = gfp_flags, 1182 }; 1183 1184 return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target); 1185 } 1186 1187 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) 1188 { 1189 if (bo->ttm == NULL) 1190 return; 1191 1192 ttm_tt_unpopulate(bo->bdev, bo->ttm); 1193 ttm_tt_destroy(bo->bdev, bo->ttm); 1194 bo->ttm = NULL; 1195 } 1196 1197 /** 1198 * ttm_bo_populate() - Ensure that a buffer object has backing pages 1199 * @bo: The buffer object 1200 * @ctx: The ttm_operation_ctx governing the operation. 1201 * 1202 * For buffer objects in a memory type whose manager uses 1203 * struct ttm_tt for backing pages, ensure those backing pages 1204 * are present and with valid content. The bo's resource is also 1205 * placed on the correct LRU list if it was previously swapped 1206 * out. 1207 * 1208 * Return: 0 if successful, negative error code on failure. 1209 * Note: May return -EINTR or -ERESTARTSYS if @ctx::interruptible 1210 * is set to true. 1211 */ 1212 int ttm_bo_populate(struct ttm_buffer_object *bo, 1213 struct ttm_operation_ctx *ctx) 1214 { 1215 struct ttm_tt *tt = bo->ttm; 1216 bool swapped; 1217 int ret; 1218 1219 dma_resv_assert_held(bo->base.resv); 1220 1221 if (!tt) 1222 return 0; 1223 1224 swapped = ttm_tt_is_swapped(tt); 1225 ret = ttm_tt_populate(bo->bdev, tt, ctx); 1226 if (ret) 1227 return ret; 1228 1229 if (swapped && !ttm_tt_is_swapped(tt) && !bo->pin_count && 1230 bo->resource) { 1231 spin_lock(&bo->bdev->lru_lock); 1232 ttm_resource_add_bulk_move(bo->resource, bo); 1233 ttm_resource_move_to_lru_tail(bo->resource); 1234 spin_unlock(&bo->bdev->lru_lock); 1235 } 1236 1237 return 0; 1238 } 1239 EXPORT_SYMBOL(ttm_bo_populate); 1240