1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 #include <linux/swap.h> 32 #include <linux/vmalloc.h> 33 34 #include <drm/ttm/ttm_bo.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <drm/ttm/ttm_tt.h> 37 38 #include <drm/drm_cache.h> 39 40 struct ttm_transfer_obj { 41 struct ttm_buffer_object base; 42 struct ttm_buffer_object *bo; 43 }; 44 45 int ttm_mem_io_reserve(struct ttm_device *bdev, 46 struct ttm_resource *mem) 47 { 48 if (mem->bus.offset || mem->bus.addr) 49 return 0; 50 51 mem->bus.is_iomem = false; 52 if (!bdev->funcs->io_mem_reserve) 53 return 0; 54 55 return bdev->funcs->io_mem_reserve(bdev, mem); 56 } 57 58 void ttm_mem_io_free(struct ttm_device *bdev, 59 struct ttm_resource *mem) 60 { 61 if (!mem) 62 return; 63 64 if (!mem->bus.offset && !mem->bus.addr) 65 return; 66 67 if (bdev->funcs->io_mem_free) 68 bdev->funcs->io_mem_free(bdev, mem); 69 70 mem->bus.offset = 0; 71 mem->bus.addr = NULL; 72 } 73 74 /** 75 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation. 76 * @clear: Whether to clear rather than copy. 77 * @num_pages: Number of pages of the operation. 78 * @dst_iter: A struct ttm_kmap_iter representing the destination resource. 79 * @src_iter: A struct ttm_kmap_iter representing the source resource. 80 * 81 * This function is intended to be able to move out async under a 82 * dma-fence if desired. 83 */ 84 void ttm_move_memcpy(bool clear, 85 u32 num_pages, 86 struct ttm_kmap_iter *dst_iter, 87 struct ttm_kmap_iter *src_iter) 88 { 89 const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops; 90 const struct ttm_kmap_iter_ops *src_ops = src_iter->ops; 91 struct iosys_map src_map, dst_map; 92 pgoff_t i; 93 94 /* Single TTM move. NOP */ 95 if (dst_ops->maps_tt && src_ops->maps_tt) 96 return; 97 98 /* Don't move nonexistent data. Clear destination instead. */ 99 if (clear) { 100 for (i = 0; i < num_pages; ++i) { 101 dst_ops->map_local(dst_iter, &dst_map, i); 102 if (dst_map.is_iomem) 103 memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE); 104 else 105 memset(dst_map.vaddr, 0, PAGE_SIZE); 106 if (dst_ops->unmap_local) 107 dst_ops->unmap_local(dst_iter, &dst_map); 108 } 109 return; 110 } 111 112 for (i = 0; i < num_pages; ++i) { 113 dst_ops->map_local(dst_iter, &dst_map, i); 114 src_ops->map_local(src_iter, &src_map, i); 115 116 drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE); 117 118 if (src_ops->unmap_local) 119 src_ops->unmap_local(src_iter, &src_map); 120 if (dst_ops->unmap_local) 121 dst_ops->unmap_local(dst_iter, &dst_map); 122 } 123 } 124 EXPORT_SYMBOL(ttm_move_memcpy); 125 126 /** 127 * ttm_bo_move_memcpy 128 * 129 * @bo: A pointer to a struct ttm_buffer_object. 130 * @ctx: operation context 131 * @dst_mem: struct ttm_resource indicating where to move. 132 * 133 * Fallback move function for a mappable buffer object in mappable memory. 134 * The function will, if successful, 135 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 136 * and update the (@bo)->mem placement flags. If unsuccessful, the old 137 * data remains untouched, and it's up to the caller to free the 138 * memory space indicated by @new_mem. 139 * Returns: 140 * !0: Failure. 141 */ 142 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 143 struct ttm_operation_ctx *ctx, 144 struct ttm_resource *dst_mem) 145 { 146 struct ttm_device *bdev = bo->bdev; 147 struct ttm_resource_manager *dst_man = 148 ttm_manager_type(bo->bdev, dst_mem->mem_type); 149 struct ttm_tt *ttm = bo->ttm; 150 struct ttm_resource *src_mem = bo->resource; 151 struct ttm_resource_manager *src_man; 152 union { 153 struct ttm_kmap_iter_tt tt; 154 struct ttm_kmap_iter_linear_io io; 155 } _dst_iter, _src_iter; 156 struct ttm_kmap_iter *dst_iter, *src_iter; 157 bool clear; 158 int ret = 0; 159 160 if (WARN_ON(!src_mem)) 161 return -EINVAL; 162 163 src_man = ttm_manager_type(bdev, src_mem->mem_type); 164 if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || 165 dst_man->use_tt)) { 166 ret = ttm_bo_populate(bo, ctx); 167 if (ret) 168 return ret; 169 } 170 171 dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem); 172 if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt) 173 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm); 174 if (IS_ERR(dst_iter)) 175 return PTR_ERR(dst_iter); 176 177 src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem); 178 if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt) 179 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm); 180 if (IS_ERR(src_iter)) { 181 ret = PTR_ERR(src_iter); 182 goto out_src_iter; 183 } 184 185 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); 186 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) 187 ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter); 188 189 if (!src_iter->ops->maps_tt) 190 ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem); 191 ttm_bo_move_sync_cleanup(bo, dst_mem); 192 193 out_src_iter: 194 if (!dst_iter->ops->maps_tt) 195 ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem); 196 197 return ret; 198 } 199 EXPORT_SYMBOL(ttm_bo_move_memcpy); 200 201 static void ttm_transfered_destroy(struct ttm_buffer_object *bo) 202 { 203 struct ttm_transfer_obj *fbo; 204 205 fbo = container_of(bo, struct ttm_transfer_obj, base); 206 dma_resv_fini(&fbo->base.base._resv); 207 ttm_bo_put(fbo->bo); 208 kfree(fbo); 209 } 210 211 /** 212 * ttm_buffer_object_transfer 213 * 214 * @bo: A pointer to a struct ttm_buffer_object. 215 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, 216 * holding the data of @bo with the old placement. 217 * 218 * This is a utility function that may be called after an accelerated move 219 * has been scheduled. A new buffer object is created as a placeholder for 220 * the old data while it's being copied. When that buffer object is idle, 221 * it can be destroyed, releasing the space of the old placement. 222 * Returns: 223 * !0: Failure. 224 */ 225 226 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, 227 struct ttm_buffer_object **new_obj) 228 { 229 struct ttm_transfer_obj *fbo; 230 int ret; 231 232 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); 233 if (!fbo) 234 return -ENOMEM; 235 236 fbo->base = *bo; 237 238 /** 239 * Fix up members that we shouldn't copy directly: 240 * TODO: Explicit member copy would probably be better here. 241 */ 242 243 atomic_inc(&ttm_glob.bo_count); 244 drm_vma_node_reset(&fbo->base.base.vma_node); 245 246 kref_init(&fbo->base.kref); 247 fbo->base.destroy = &ttm_transfered_destroy; 248 fbo->base.pin_count = 0; 249 if (bo->type != ttm_bo_type_sg) 250 fbo->base.base.resv = &fbo->base.base._resv; 251 252 dma_resv_init(&fbo->base.base._resv); 253 fbo->base.base.dev = NULL; 254 ret = dma_resv_trylock(&fbo->base.base._resv); 255 WARN_ON(!ret); 256 257 if (fbo->base.resource) { 258 ttm_resource_set_bo(fbo->base.resource, &fbo->base); 259 bo->resource = NULL; 260 ttm_bo_set_bulk_move(&fbo->base, NULL); 261 } else { 262 fbo->base.bulk_move = NULL; 263 } 264 265 ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); 266 if (ret) { 267 kfree(fbo); 268 return ret; 269 } 270 271 ttm_bo_get(bo); 272 fbo->bo = bo; 273 274 ttm_bo_move_to_lru_tail_unlocked(&fbo->base); 275 276 *new_obj = &fbo->base; 277 return 0; 278 } 279 280 /** 281 * ttm_io_prot 282 * 283 * @bo: ttm buffer object 284 * @res: ttm resource object 285 * @tmp: Page protection flag for a normal, cached mapping. 286 * 287 * Utility function that returns the pgprot_t that should be used for 288 * setting up a PTE with the caching model indicated by @c_state. 289 */ 290 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, 291 pgprot_t tmp) 292 { 293 struct ttm_resource_manager *man; 294 enum ttm_caching caching; 295 296 man = ttm_manager_type(bo->bdev, res->mem_type); 297 if (man->use_tt) { 298 caching = bo->ttm->caching; 299 if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED) 300 tmp = pgprot_decrypted(tmp); 301 } else { 302 caching = res->bus.caching; 303 } 304 305 return ttm_prot_from_caching(caching, tmp); 306 } 307 EXPORT_SYMBOL(ttm_io_prot); 308 309 static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 310 unsigned long offset, 311 unsigned long size, 312 struct ttm_bo_kmap_obj *map) 313 { 314 struct ttm_resource *mem = bo->resource; 315 316 if (bo->resource->bus.addr) { 317 map->bo_kmap_type = ttm_bo_map_premapped; 318 map->virtual = ((u8 *)bo->resource->bus.addr) + offset; 319 } else { 320 resource_size_t res = bo->resource->bus.offset + offset; 321 322 map->bo_kmap_type = ttm_bo_map_iomap; 323 if (mem->bus.caching == ttm_write_combined) 324 map->virtual = ioremap_wc(res, size); 325 #ifdef CONFIG_X86 326 else if (mem->bus.caching == ttm_cached) 327 map->virtual = ioremap_cache(res, size); 328 #endif 329 else 330 map->virtual = ioremap(res, size); 331 } 332 return (!map->virtual) ? -ENOMEM : 0; 333 } 334 335 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, 336 unsigned long start_page, 337 unsigned long num_pages, 338 struct ttm_bo_kmap_obj *map) 339 { 340 struct ttm_resource *mem = bo->resource; 341 struct ttm_operation_ctx ctx = { 342 .interruptible = false, 343 .no_wait_gpu = false 344 }; 345 struct ttm_tt *ttm = bo->ttm; 346 struct ttm_resource_manager *man = 347 ttm_manager_type(bo->bdev, bo->resource->mem_type); 348 pgprot_t prot; 349 int ret; 350 351 BUG_ON(!ttm); 352 353 ret = ttm_bo_populate(bo, &ctx); 354 if (ret) 355 return ret; 356 357 if (num_pages == 1 && ttm->caching == ttm_cached && 358 !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) { 359 /* 360 * We're mapping a single page, and the desired 361 * page protection is consistent with the bo. 362 */ 363 364 map->bo_kmap_type = ttm_bo_map_kmap; 365 map->page = ttm->pages[start_page]; 366 map->virtual = kmap(map->page); 367 } else { 368 /* 369 * We need to use vmap to get the desired page protection 370 * or to make the buffer object look contiguous. 371 */ 372 prot = ttm_io_prot(bo, mem, PAGE_KERNEL); 373 map->bo_kmap_type = ttm_bo_map_vmap; 374 map->virtual = vmap(ttm->pages + start_page, num_pages, 375 0, prot); 376 } 377 return (!map->virtual) ? -ENOMEM : 0; 378 } 379 380 /** 381 * ttm_bo_kmap 382 * 383 * @bo: The buffer object. 384 * @start_page: The first page to map. 385 * @num_pages: Number of pages to map. 386 * @map: pointer to a struct ttm_bo_kmap_obj representing the map. 387 * 388 * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the 389 * data in the buffer object. The ttm_kmap_obj_virtual function can then be 390 * used to obtain a virtual address to the data. 391 * 392 * Returns 393 * -ENOMEM: Out of memory. 394 * -EINVAL: Invalid range. 395 */ 396 int ttm_bo_kmap(struct ttm_buffer_object *bo, 397 unsigned long start_page, unsigned long num_pages, 398 struct ttm_bo_kmap_obj *map) 399 { 400 unsigned long offset, size; 401 int ret; 402 403 map->virtual = NULL; 404 map->bo = bo; 405 if (num_pages > PFN_UP(bo->resource->size)) 406 return -EINVAL; 407 if ((start_page + num_pages) > PFN_UP(bo->resource->size)) 408 return -EINVAL; 409 410 ret = ttm_mem_io_reserve(bo->bdev, bo->resource); 411 if (ret) 412 return ret; 413 if (!bo->resource->bus.is_iomem) { 414 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 415 } else { 416 offset = start_page << PAGE_SHIFT; 417 size = num_pages << PAGE_SHIFT; 418 return ttm_bo_ioremap(bo, offset, size, map); 419 } 420 } 421 EXPORT_SYMBOL(ttm_bo_kmap); 422 423 /** 424 * ttm_bo_kunmap 425 * 426 * @map: Object describing the map to unmap. 427 * 428 * Unmaps a kernel map set up by ttm_bo_kmap. 429 */ 430 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 431 { 432 if (!map->virtual) 433 return; 434 switch (map->bo_kmap_type) { 435 case ttm_bo_map_iomap: 436 iounmap(map->virtual); 437 break; 438 case ttm_bo_map_vmap: 439 vunmap(map->virtual); 440 break; 441 case ttm_bo_map_kmap: 442 kunmap(map->page); 443 break; 444 case ttm_bo_map_premapped: 445 break; 446 default: 447 BUG(); 448 } 449 ttm_mem_io_free(map->bo->bdev, map->bo->resource); 450 map->virtual = NULL; 451 map->page = NULL; 452 } 453 EXPORT_SYMBOL(ttm_bo_kunmap); 454 455 /** 456 * ttm_bo_vmap 457 * 458 * @bo: The buffer object. 459 * @map: pointer to a struct iosys_map representing the map. 460 * 461 * Sets up a kernel virtual mapping, using ioremap or vmap to the 462 * data in the buffer object. The parameter @map returns the virtual 463 * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap(). 464 * 465 * Returns 466 * -ENOMEM: Out of memory. 467 * -EINVAL: Invalid range. 468 */ 469 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map) 470 { 471 struct ttm_resource *mem = bo->resource; 472 int ret; 473 474 dma_resv_assert_held(bo->base.resv); 475 476 ret = ttm_mem_io_reserve(bo->bdev, mem); 477 if (ret) 478 return ret; 479 480 if (mem->bus.is_iomem) { 481 void __iomem *vaddr_iomem; 482 483 if (mem->bus.addr) 484 vaddr_iomem = (void __iomem *)mem->bus.addr; 485 else if (mem->bus.caching == ttm_write_combined) 486 vaddr_iomem = ioremap_wc(mem->bus.offset, 487 bo->base.size); 488 #ifdef CONFIG_X86 489 else if (mem->bus.caching == ttm_cached) 490 vaddr_iomem = ioremap_cache(mem->bus.offset, 491 bo->base.size); 492 #endif 493 else 494 vaddr_iomem = ioremap(mem->bus.offset, bo->base.size); 495 496 if (!vaddr_iomem) 497 return -ENOMEM; 498 499 iosys_map_set_vaddr_iomem(map, vaddr_iomem); 500 501 } else { 502 struct ttm_operation_ctx ctx = { 503 .interruptible = false, 504 .no_wait_gpu = false 505 }; 506 struct ttm_tt *ttm = bo->ttm; 507 pgprot_t prot; 508 void *vaddr; 509 510 ret = ttm_bo_populate(bo, &ctx); 511 if (ret) 512 return ret; 513 514 /* 515 * We need to use vmap to get the desired page protection 516 * or to make the buffer object look contiguous. 517 */ 518 prot = ttm_io_prot(bo, mem, PAGE_KERNEL); 519 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot); 520 if (!vaddr) 521 return -ENOMEM; 522 523 iosys_map_set_vaddr(map, vaddr); 524 } 525 526 return 0; 527 } 528 EXPORT_SYMBOL(ttm_bo_vmap); 529 530 /** 531 * ttm_bo_vunmap 532 * 533 * @bo: The buffer object. 534 * @map: Object describing the map to unmap. 535 * 536 * Unmaps a kernel map set up by ttm_bo_vmap(). 537 */ 538 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map) 539 { 540 struct ttm_resource *mem = bo->resource; 541 542 dma_resv_assert_held(bo->base.resv); 543 544 if (iosys_map_is_null(map)) 545 return; 546 547 if (!map->is_iomem) 548 vunmap(map->vaddr); 549 else if (!mem->bus.addr) 550 iounmap(map->vaddr_iomem); 551 iosys_map_clear(map); 552 553 ttm_mem_io_free(bo->bdev, bo->resource); 554 } 555 EXPORT_SYMBOL(ttm_bo_vunmap); 556 557 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, 558 bool dst_use_tt) 559 { 560 long ret; 561 562 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, 563 false, 15 * HZ); 564 if (ret == 0) 565 return -EBUSY; 566 if (ret < 0) 567 return ret; 568 569 if (!dst_use_tt) 570 ttm_bo_tt_destroy(bo); 571 ttm_resource_free(bo, &bo->resource); 572 return 0; 573 } 574 575 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, 576 struct dma_fence *fence, 577 bool dst_use_tt) 578 { 579 struct ttm_buffer_object *ghost_obj; 580 int ret; 581 582 /** 583 * This should help pipeline ordinary buffer moves. 584 * 585 * Hang old buffer memory on a new buffer object, 586 * and leave it to be released when the GPU 587 * operation has completed. 588 */ 589 590 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 591 if (ret) 592 return ret; 593 594 dma_resv_add_fence(&ghost_obj->base._resv, fence, 595 DMA_RESV_USAGE_KERNEL); 596 597 /** 598 * If we're not moving to fixed memory, the TTM object 599 * needs to stay alive. Otherwhise hang it on the ghost 600 * bo to be unbound and destroyed. 601 */ 602 603 if (dst_use_tt) 604 ghost_obj->ttm = NULL; 605 else 606 bo->ttm = NULL; 607 608 dma_resv_unlock(&ghost_obj->base._resv); 609 ttm_bo_put(ghost_obj); 610 return 0; 611 } 612 613 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, 614 struct dma_fence *fence) 615 { 616 struct ttm_device *bdev = bo->bdev; 617 struct ttm_resource_manager *from; 618 619 from = ttm_manager_type(bdev, bo->resource->mem_type); 620 621 /** 622 * BO doesn't have a TTM we need to bind/unbind. Just remember 623 * this eviction and free up the allocation 624 */ 625 spin_lock(&from->move_lock); 626 if (!from->move || dma_fence_is_later(fence, from->move)) { 627 dma_fence_put(from->move); 628 from->move = dma_fence_get(fence); 629 } 630 spin_unlock(&from->move_lock); 631 632 ttm_resource_free(bo, &bo->resource); 633 } 634 635 /** 636 * ttm_bo_move_accel_cleanup - cleanup helper for hw copies 637 * 638 * @bo: A pointer to a struct ttm_buffer_object. 639 * @fence: A fence object that signals when moving is complete. 640 * @evict: This is an evict move. Don't return until the buffer is idle. 641 * @pipeline: evictions are to be pipelined. 642 * @new_mem: struct ttm_resource indicating where to move. 643 * 644 * Accelerated move function to be called when an accelerated move 645 * has been scheduled. The function will create a new temporary buffer object 646 * representing the old placement, and put the sync object on both buffer 647 * objects. After that the newly created buffer object is unref'd to be 648 * destroyed when the move is complete. This will help pipeline 649 * buffer moves. 650 */ 651 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 652 struct dma_fence *fence, 653 bool evict, 654 bool pipeline, 655 struct ttm_resource *new_mem) 656 { 657 struct ttm_device *bdev = bo->bdev; 658 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type); 659 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); 660 int ret = 0; 661 662 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); 663 if (!evict) 664 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); 665 else if (!from->use_tt && pipeline) 666 ttm_bo_move_pipeline_evict(bo, fence); 667 else 668 ret = ttm_bo_wait_free_node(bo, man->use_tt); 669 670 if (ret) 671 return ret; 672 673 ttm_bo_assign_mem(bo, new_mem); 674 675 return 0; 676 } 677 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 678 679 /** 680 * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish 681 * 682 * @bo: A pointer to a struct ttm_buffer_object. 683 * @new_mem: struct ttm_resource indicating where to move. 684 * 685 * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed 686 * by the caller to be idle. Typically used after memcpy buffer moves. 687 */ 688 void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, 689 struct ttm_resource *new_mem) 690 { 691 struct ttm_device *bdev = bo->bdev; 692 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); 693 int ret; 694 695 ret = ttm_bo_wait_free_node(bo, man->use_tt); 696 if (WARN_ON(ret)) 697 return; 698 699 ttm_bo_assign_mem(bo, new_mem); 700 } 701 EXPORT_SYMBOL(ttm_bo_move_sync_cleanup); 702 703 /** 704 * ttm_bo_pipeline_gutting - purge the contents of a bo 705 * @bo: The buffer object 706 * 707 * Purge the contents of a bo, async if the bo is not idle. 708 * After a successful call, the bo is left unpopulated in 709 * system placement. The function may wait uninterruptible 710 * for idle on OOM. 711 * 712 * Return: 0 if successful, negative error code on failure. 713 */ 714 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) 715 { 716 struct ttm_buffer_object *ghost; 717 struct ttm_tt *ttm; 718 int ret; 719 720 /* If already idle, no need for ghost object dance. */ 721 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) { 722 if (!bo->ttm) { 723 /* See comment below about clearing. */ 724 ret = ttm_tt_create(bo, true); 725 if (ret) 726 return ret; 727 } else { 728 ttm_tt_unpopulate(bo->bdev, bo->ttm); 729 if (bo->type == ttm_bo_type_device) 730 ttm_tt_mark_for_clear(bo->ttm); 731 } 732 ttm_resource_free(bo, &bo->resource); 733 return 0; 734 } 735 736 /* 737 * We need an unpopulated ttm_tt after giving our current one, 738 * if any, to the ghost object. And we can't afford to fail 739 * creating one *after* the operation. If the bo subsequently gets 740 * resurrected, make sure it's cleared (if ttm_bo_type_device) 741 * to avoid leaking sensitive information to user-space. 742 */ 743 744 ttm = bo->ttm; 745 bo->ttm = NULL; 746 ret = ttm_tt_create(bo, true); 747 swap(bo->ttm, ttm); 748 if (ret) 749 return ret; 750 751 ret = ttm_buffer_object_transfer(bo, &ghost); 752 if (ret) 753 goto error_destroy_tt; 754 755 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); 756 /* Last resort, wait for the BO to be idle when we are OOM */ 757 if (ret) { 758 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, 759 false, MAX_SCHEDULE_TIMEOUT); 760 } 761 762 dma_resv_unlock(&ghost->base._resv); 763 ttm_bo_put(ghost); 764 bo->ttm = ttm; 765 return 0; 766 767 error_destroy_tt: 768 ttm_tt_destroy(bo->bdev, ttm); 769 return ret; 770 } 771 772 static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx, 773 struct ttm_buffer_object *bo, 774 bool *needs_unlock) 775 { 776 *needs_unlock = false; 777 778 if (dma_resv_trylock(bo->base.resv)) { 779 *needs_unlock = true; 780 return true; 781 } 782 783 if (bo->base.resv == ctx->resv && ctx->allow_res_evict) { 784 dma_resv_assert_held(bo->base.resv); 785 return true; 786 } 787 788 return false; 789 } 790 791 static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk, 792 struct ttm_buffer_object *bo, 793 bool *needs_unlock) 794 { 795 struct dma_resv *resv = bo->base.resv; 796 int ret; 797 798 if (walk->ctx->interruptible) 799 ret = dma_resv_lock_interruptible(resv, walk->ticket); 800 else 801 ret = dma_resv_lock(resv, walk->ticket); 802 803 if (!ret) { 804 *needs_unlock = true; 805 /* 806 * Only a single ticketlock per loop. Ticketlocks are prone 807 * to return -EDEADLK causing the eviction to fail, so 808 * after waiting for the ticketlock, revert back to 809 * trylocking for this walk. 810 */ 811 walk->ticket = NULL; 812 } else if (ret == -EDEADLK) { 813 /* Caller needs to exit the ww transaction. */ 814 ret = -ENOSPC; 815 } 816 817 return ret; 818 } 819 820 static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked) 821 { 822 if (locked) 823 dma_resv_unlock(bo->base.resv); 824 } 825 826 /** 827 * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on 828 * valid items. 829 * @walk: describe the walks and actions taken 830 * @bdev: The TTM device. 831 * @man: The struct ttm_resource manager whose LRU lists we're walking. 832 * @target: The end condition for the walk. 833 * 834 * The LRU lists of @man are walk, and for each struct ttm_resource encountered, 835 * the corresponding ttm_buffer_object is locked and taken a reference on, and 836 * the LRU lock is dropped. the LRU lock may be dropped before locking and, in 837 * that case, it's verified that the item actually remains on the LRU list after 838 * the lock, and that the buffer object didn't switch resource in between. 839 * 840 * With a locked object, the actions indicated by @walk->process_bo are 841 * performed, and after that, the bo is unlocked, the refcount dropped and the 842 * next struct ttm_resource is processed. Here, the walker relies on 843 * TTM's restartable LRU list implementation. 844 * 845 * Typically @walk->process_bo() would return the number of pages evicted, 846 * swapped or shrunken, so that when the total exceeds @target, or when the 847 * LRU list has been walked in full, iteration is terminated. It's also terminated 848 * on error. Note that the definition of @target is done by the caller, it 849 * could have a different meaning than the number of pages. 850 * 851 * Note that the way dma_resv individualization is done, locking needs to be done 852 * either with the LRU lock held (trylocking only) or with a reference on the 853 * object. 854 * 855 * Return: The progress made towards target or negative error code on error. 856 */ 857 s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, 858 struct ttm_resource_manager *man, s64 target) 859 { 860 struct ttm_resource_cursor cursor; 861 struct ttm_resource *res; 862 s64 progress = 0; 863 s64 lret; 864 865 spin_lock(&bdev->lru_lock); 866 ttm_resource_cursor_init(&cursor, man); 867 ttm_resource_manager_for_each_res(&cursor, res) { 868 struct ttm_buffer_object *bo = res->bo; 869 bool bo_needs_unlock = false; 870 bool bo_locked = false; 871 int mem_type; 872 873 /* 874 * Attempt a trylock before taking a reference on the bo, 875 * since if we do it the other way around, and the trylock fails, 876 * we need to drop the lru lock to put the bo. 877 */ 878 if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock)) 879 bo_locked = true; 880 else if (!walk->ticket || walk->ctx->no_wait_gpu || 881 walk->trylock_only) 882 continue; 883 884 if (!ttm_bo_get_unless_zero(bo)) { 885 ttm_lru_walk_unlock(bo, bo_needs_unlock); 886 continue; 887 } 888 889 mem_type = res->mem_type; 890 spin_unlock(&bdev->lru_lock); 891 892 lret = 0; 893 if (!bo_locked) 894 lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock); 895 896 /* 897 * Note that in between the release of the lru lock and the 898 * ticketlock, the bo may have switched resource, 899 * and also memory type, since the resource may have been 900 * freed and allocated again with a different memory type. 901 * In that case, just skip it. 902 */ 903 if (!lret && bo->resource && bo->resource->mem_type == mem_type) 904 lret = walk->ops->process_bo(walk, bo); 905 906 ttm_lru_walk_unlock(bo, bo_needs_unlock); 907 ttm_bo_put(bo); 908 if (lret == -EBUSY || lret == -EALREADY) 909 lret = 0; 910 progress = (lret < 0) ? lret : progress + lret; 911 912 spin_lock(&bdev->lru_lock); 913 if (progress < 0 || progress >= target) 914 break; 915 } 916 ttm_resource_cursor_fini(&cursor); 917 spin_unlock(&bdev->lru_lock); 918 919 return progress; 920 } 921 EXPORT_SYMBOL(ttm_lru_walk_for_evict); 922 923 static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs) 924 { 925 struct ttm_buffer_object *bo = curs->bo; 926 927 if (bo) { 928 if (curs->needs_unlock) 929 dma_resv_unlock(bo->base.resv); 930 ttm_bo_put(bo); 931 curs->bo = NULL; 932 } 933 } 934 935 /** 936 * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor 937 * and clean up any iteration it was used for. 938 * @curs: The cursor. 939 */ 940 void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs) 941 { 942 spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; 943 944 ttm_bo_lru_cursor_cleanup_bo(curs); 945 spin_lock(lru_lock); 946 ttm_resource_cursor_fini(&curs->res_curs); 947 spin_unlock(lru_lock); 948 } 949 EXPORT_SYMBOL(ttm_bo_lru_cursor_fini); 950 951 /** 952 * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor 953 * @curs: The ttm_bo_lru_cursor to initialize. 954 * @man: The ttm resource_manager whose LRU lists to iterate over. 955 * @ctx: The ttm_operation_ctx to govern the locking. 956 * 957 * Initialize a struct ttm_bo_lru_cursor. Currently only trylocking 958 * or prelocked buffer objects are available as detailed by 959 * @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not 960 * supported. 961 * 962 * Return: Pointer to @curs. The function does not fail. 963 */ 964 struct ttm_bo_lru_cursor * 965 ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs, 966 struct ttm_resource_manager *man, 967 struct ttm_operation_ctx *ctx) 968 { 969 memset(curs, 0, sizeof(*curs)); 970 ttm_resource_cursor_init(&curs->res_curs, man); 971 curs->ctx = ctx; 972 973 return curs; 974 } 975 EXPORT_SYMBOL(ttm_bo_lru_cursor_init); 976 977 static struct ttm_buffer_object * 978 ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs) 979 { 980 struct ttm_buffer_object *bo = res->bo; 981 982 if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock)) 983 return NULL; 984 985 if (!ttm_bo_get_unless_zero(bo)) { 986 if (curs->needs_unlock) 987 dma_resv_unlock(bo->base.resv); 988 return NULL; 989 } 990 991 curs->bo = bo; 992 return bo; 993 } 994 995 /** 996 * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists 997 * to find and lock buffer object. 998 * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and 999 * ttm_bo_lru_cursor_first(). 1000 * 1001 * Return: A pointer to a locked and reference-counted buffer object, 1002 * or NULL if none could be found and looping should be terminated. 1003 */ 1004 struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs) 1005 { 1006 spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; 1007 struct ttm_resource *res = NULL; 1008 struct ttm_buffer_object *bo; 1009 1010 ttm_bo_lru_cursor_cleanup_bo(curs); 1011 1012 spin_lock(lru_lock); 1013 for (;;) { 1014 res = ttm_resource_manager_next(&curs->res_curs); 1015 if (!res) 1016 break; 1017 1018 bo = ttm_bo_from_res_reserved(res, curs); 1019 if (bo) 1020 break; 1021 } 1022 1023 spin_unlock(lru_lock); 1024 return res ? bo : NULL; 1025 } 1026 EXPORT_SYMBOL(ttm_bo_lru_cursor_next); 1027 1028 /** 1029 * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists 1030 * to find and lock buffer object. 1031 * @curs: The cursor initialized using ttm_bo_lru_cursor_init(). 1032 * 1033 * Return: A pointer to a locked and reference-counted buffer object, 1034 * or NULL if none could be found and looping should be terminated. 1035 */ 1036 struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs) 1037 { 1038 spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; 1039 struct ttm_buffer_object *bo; 1040 struct ttm_resource *res; 1041 1042 spin_lock(lru_lock); 1043 res = ttm_resource_manager_first(&curs->res_curs); 1044 if (!res) { 1045 spin_unlock(lru_lock); 1046 return NULL; 1047 } 1048 1049 bo = ttm_bo_from_res_reserved(res, curs); 1050 spin_unlock(lru_lock); 1051 1052 return bo ? bo : ttm_bo_lru_cursor_next(curs); 1053 } 1054 EXPORT_SYMBOL(ttm_bo_lru_cursor_first); 1055 1056 /** 1057 * ttm_bo_shrink() - Helper to shrink a ttm buffer object. 1058 * @ctx: The struct ttm_operation_ctx used for the shrinking operation. 1059 * @bo: The buffer object. 1060 * @flags: Flags governing the shrinking behaviour. 1061 * 1062 * The function uses the ttm_tt_back_up functionality to back up or 1063 * purge a struct ttm_tt. If the bo is not in system, it's first 1064 * moved there. 1065 * 1066 * Return: The number of pages shrunken or purged, or 1067 * negative error code on failure. 1068 */ 1069 long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, 1070 const struct ttm_bo_shrink_flags flags) 1071 { 1072 static const struct ttm_place sys_placement_flags = { 1073 .fpfn = 0, 1074 .lpfn = 0, 1075 .mem_type = TTM_PL_SYSTEM, 1076 .flags = 0, 1077 }; 1078 static struct ttm_placement sys_placement = { 1079 .num_placement = 1, 1080 .placement = &sys_placement_flags, 1081 }; 1082 struct ttm_tt *tt = bo->ttm; 1083 long lret; 1084 1085 dma_resv_assert_held(bo->base.resv); 1086 1087 if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) { 1088 int ret = ttm_bo_validate(bo, &sys_placement, ctx); 1089 1090 /* Consider -ENOMEM and -ENOSPC non-fatal. */ 1091 if (ret) { 1092 if (ret == -ENOMEM || ret == -ENOSPC) 1093 ret = -EBUSY; 1094 return ret; 1095 } 1096 } 1097 1098 ttm_bo_unmap_virtual(bo); 1099 lret = ttm_bo_wait_ctx(bo, ctx); 1100 if (lret < 0) 1101 return lret; 1102 1103 if (bo->bulk_move) { 1104 spin_lock(&bo->bdev->lru_lock); 1105 ttm_resource_del_bulk_move(bo->resource, bo); 1106 spin_unlock(&bo->bdev->lru_lock); 1107 } 1108 1109 lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags) 1110 {.purge = flags.purge, 1111 .writeback = flags.writeback}); 1112 1113 if (lret <= 0 && bo->bulk_move) { 1114 spin_lock(&bo->bdev->lru_lock); 1115 ttm_resource_add_bulk_move(bo->resource, bo); 1116 spin_unlock(&bo->bdev->lru_lock); 1117 } 1118 1119 if (lret < 0 && lret != -EINTR) 1120 return -EBUSY; 1121 1122 return lret; 1123 } 1124 EXPORT_SYMBOL(ttm_bo_shrink); 1125 1126 /** 1127 * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking 1128 * @ctx: The struct ttm_operation_ctx governing the shrinking. 1129 * @bo: The candidate for shrinking. 1130 * 1131 * Check whether the object, given the information available to TTM, 1132 * is suitable for shinking, This function can and should be used 1133 * before attempting to shrink an object. 1134 * 1135 * Return: true if suitable. false if not. 1136 */ 1137 bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) 1138 { 1139 return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count && 1140 (!ctx->no_wait_gpu || 1141 dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)); 1142 } 1143 EXPORT_SYMBOL(ttm_bo_shrink_suitable); 1144 1145 /** 1146 * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU 1147 * during shrinking 1148 * 1149 * In some situations, like direct reclaim, waiting (in particular gpu waiting) 1150 * should be avoided since it may stall a system that could otherwise make progress 1151 * shrinking something else less time consuming. 1152 * 1153 * Return: true if gpu waiting should be avoided, false if not. 1154 */ 1155 bool ttm_bo_shrink_avoid_wait(void) 1156 { 1157 return !current_is_kswapd(); 1158 } 1159 EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait); 1160