1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/debugfs.h> 26 #include <linux/export.h> 27 #include <linux/io-mapping.h> 28 #include <linux/iosys-map.h> 29 #include <linux/scatterlist.h> 30 #include <linux/cgroup_dmem.h> 31 32 #include <drm/ttm/ttm_bo.h> 33 #include <drm/ttm/ttm_placement.h> 34 #include <drm/ttm/ttm_resource.h> 35 #include <drm/ttm/ttm_tt.h> 36 37 #include <drm/drm_print.h> 38 #include <drm/drm_util.h> 39 40 /* Detach the cursor from the bulk move list */ 41 static void 42 ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor) 43 { 44 lockdep_assert_held(&cursor->man->bdev->lru_lock); 45 46 cursor->bulk = NULL; 47 list_del_init(&cursor->bulk_link); 48 } 49 50 /* Move the cursor to the end of the bulk move list it's in */ 51 static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk, 52 struct ttm_resource_cursor *cursor) 53 { 54 struct ttm_lru_bulk_move_pos *pos; 55 56 lockdep_assert_held(&cursor->man->bdev->lru_lock); 57 58 if (WARN_ON_ONCE(bulk != cursor->bulk)) { 59 list_del_init(&cursor->bulk_link); 60 return; 61 } 62 63 pos = &bulk->pos[cursor->mem_type][cursor->priority]; 64 if (pos->last) 65 list_move(&cursor->hitch.link, &pos->last->lru.link); 66 ttm_resource_cursor_clear_bulk(cursor); 67 } 68 69 /* Move all cursors attached to a bulk move to its end */ 70 static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk) 71 { 72 struct ttm_resource_cursor *cursor, *next; 73 74 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 75 ttm_resource_cursor_move_bulk_tail(bulk, cursor); 76 } 77 78 /* Remove a cursor from an empty bulk move list */ 79 static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk) 80 { 81 struct ttm_resource_cursor *cursor, *next; 82 83 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 84 ttm_resource_cursor_clear_bulk(cursor); 85 } 86 87 /** 88 * ttm_resource_cursor_init() - Initialize a struct ttm_resource_cursor 89 * @cursor: The cursor to initialize. 90 * @man: The resource manager. 91 * 92 * Initialize the cursor before using it for iteration. 93 */ 94 void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor, 95 struct ttm_resource_manager *man) 96 { 97 cursor->priority = 0; 98 cursor->man = man; 99 ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH); 100 INIT_LIST_HEAD(&cursor->bulk_link); 101 INIT_LIST_HEAD(&cursor->hitch.link); 102 } 103 104 /** 105 * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage 106 * @cursor: The struct ttm_resource_cursor to finalize. 107 * 108 * The function pulls the LRU list cursor off any lists it was previously 109 * attached to. Needs to be called with the LRU lock held. The function 110 * can be called multiple times after each other. 111 */ 112 void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor) 113 { 114 lockdep_assert_held(&cursor->man->bdev->lru_lock); 115 list_del_init(&cursor->hitch.link); 116 ttm_resource_cursor_clear_bulk(cursor); 117 } 118 119 /** 120 * ttm_lru_bulk_move_init - initialize a bulk move structure 121 * @bulk: the structure to init 122 * 123 * For now just memset the structure to zero. 124 */ 125 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) 126 { 127 memset(bulk, 0, sizeof(*bulk)); 128 INIT_LIST_HEAD(&bulk->cursor_list); 129 } 130 EXPORT_SYMBOL(ttm_lru_bulk_move_init); 131 132 /** 133 * ttm_lru_bulk_move_fini - finalize a bulk move structure 134 * @bdev: The struct ttm_device 135 * @bulk: the structure to finalize 136 * 137 * Sanity checks that bulk moves don't have any 138 * resources left and hence no cursors attached. 139 */ 140 void ttm_lru_bulk_move_fini(struct ttm_device *bdev, 141 struct ttm_lru_bulk_move *bulk) 142 { 143 spin_lock(&bdev->lru_lock); 144 ttm_bulk_move_drop_cursors(bulk); 145 spin_unlock(&bdev->lru_lock); 146 } 147 EXPORT_SYMBOL(ttm_lru_bulk_move_fini); 148 149 /** 150 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail. 151 * 152 * @bulk: bulk move structure 153 * 154 * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that 155 * resource order never changes. Should be called with &ttm_device.lru_lock held. 156 */ 157 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) 158 { 159 unsigned i, j; 160 161 ttm_bulk_move_adjust_cursors(bulk); 162 for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { 163 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { 164 struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; 165 struct ttm_resource_manager *man; 166 167 if (!pos->first) 168 continue; 169 170 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); 171 dma_resv_assert_held(pos->first->bo->base.resv); 172 dma_resv_assert_held(pos->last->bo->base.resv); 173 174 man = ttm_manager_type(pos->first->bo->bdev, i); 175 list_bulk_move_tail(&man->lru[j], &pos->first->lru.link, 176 &pos->last->lru.link); 177 } 178 } 179 } 180 EXPORT_SYMBOL(ttm_lru_bulk_move_tail); 181 182 /* Return the bulk move pos object for this resource */ 183 static struct ttm_lru_bulk_move_pos * 184 ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) 185 { 186 return &bulk->pos[res->mem_type][res->bo->priority]; 187 } 188 189 /* Return the previous resource on the list (skip over non-resource list items) */ 190 static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur) 191 { 192 struct ttm_lru_item *lru = &cur->lru; 193 194 do { 195 lru = list_prev_entry(lru, link); 196 } while (!ttm_lru_item_is_res(lru)); 197 198 return ttm_lru_item_to_res(lru); 199 } 200 201 /* Return the next resource on the list (skip over non-resource list items) */ 202 static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur) 203 { 204 struct ttm_lru_item *lru = &cur->lru; 205 206 do { 207 lru = list_next_entry(lru, link); 208 } while (!ttm_lru_item_is_res(lru)); 209 210 return ttm_lru_item_to_res(lru); 211 } 212 213 /* Move the resource to the tail of the bulk move range */ 214 static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, 215 struct ttm_resource *res) 216 { 217 if (pos->last != res) { 218 if (pos->first == res) 219 pos->first = ttm_lru_next_res(res); 220 list_move(&res->lru.link, &pos->last->lru.link); 221 pos->last = res; 222 } 223 } 224 225 /* Add the resource to a bulk_move cursor */ 226 static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, 227 struct ttm_resource *res) 228 { 229 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 230 231 if (!pos->first) { 232 pos->first = res; 233 pos->last = res; 234 } else { 235 WARN_ON(pos->first->bo->base.resv != res->bo->base.resv); 236 ttm_lru_bulk_move_pos_tail(pos, res); 237 } 238 } 239 240 /* Remove the resource from a bulk_move range */ 241 static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, 242 struct ttm_resource *res) 243 { 244 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 245 246 if (unlikely(WARN_ON(!pos->first || !pos->last) || 247 (pos->first == res && pos->last == res))) { 248 pos->first = NULL; 249 pos->last = NULL; 250 } else if (pos->first == res) { 251 pos->first = ttm_lru_next_res(res); 252 } else if (pos->last == res) { 253 pos->last = ttm_lru_prev_res(res); 254 } else { 255 list_move(&res->lru.link, &pos->last->lru.link); 256 } 257 } 258 259 static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo) 260 { 261 /* 262 * Take care when creating a new resource for a bo, that it is not considered 263 * swapped if it's not the current resource for the bo and is thus logically 264 * associated with the ttm_tt. Think a VRAM resource created to move a 265 * swapped-out bo to VRAM. 266 */ 267 if (bo->resource != res || !bo->ttm) 268 return false; 269 270 dma_resv_assert_held(bo->base.resv); 271 return ttm_tt_is_swapped(bo->ttm); 272 } 273 274 static bool ttm_resource_unevictable(struct ttm_resource *res, struct ttm_buffer_object *bo) 275 { 276 return bo->pin_count || ttm_resource_is_swapped(res, bo); 277 } 278 279 /* Add the resource to a bulk move if the BO is configured for it */ 280 void ttm_resource_add_bulk_move(struct ttm_resource *res, 281 struct ttm_buffer_object *bo) 282 { 283 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) 284 ttm_lru_bulk_move_add(bo->bulk_move, res); 285 } 286 287 /* Remove the resource from a bulk move if the BO is configured for it */ 288 void ttm_resource_del_bulk_move(struct ttm_resource *res, 289 struct ttm_buffer_object *bo) 290 { 291 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) 292 ttm_lru_bulk_move_del(bo->bulk_move, res); 293 } 294 295 /* 296 * Remove a resource from its bulk_move, bypassing the unevictable check. 297 * Use only when the resource is known to still be tracked in the range despite 298 * the BO having just become unevictable; asserts that this is the case. 299 */ 300 void ttm_resource_del_bulk_move_unevictable(struct ttm_resource *res, 301 struct ttm_buffer_object *bo) 302 { 303 WARN_ON_ONCE(!ttm_resource_unevictable(res, bo)); 304 if (bo->bulk_move) 305 ttm_lru_bulk_move_del(bo->bulk_move, res); 306 } 307 308 /* Move a resource to the LRU or bulk tail */ 309 void ttm_resource_move_to_lru_tail(struct ttm_resource *res) 310 { 311 struct ttm_buffer_object *bo = res->bo; 312 struct ttm_device *bdev = bo->bdev; 313 314 lockdep_assert_held(&bo->bdev->lru_lock); 315 316 if (ttm_resource_unevictable(res, bo)) { 317 list_move_tail(&res->lru.link, &bdev->unevictable); 318 319 } else if (bo->bulk_move) { 320 struct ttm_lru_bulk_move_pos *pos = 321 ttm_lru_bulk_move_pos(bo->bulk_move, res); 322 323 ttm_lru_bulk_move_pos_tail(pos, res); 324 } else { 325 struct ttm_resource_manager *man; 326 327 man = ttm_manager_type(bdev, res->mem_type); 328 list_move_tail(&res->lru.link, &man->lru[bo->priority]); 329 } 330 } 331 332 /** 333 * ttm_resource_init - resource object constructor 334 * @bo: buffer object this resource is allocated for 335 * @place: placement of the resource 336 * @res: the resource object to initialize 337 * 338 * Initialize a new resource object. Counterpart of ttm_resource_fini(). 339 */ 340 void ttm_resource_init(struct ttm_buffer_object *bo, 341 const struct ttm_place *place, 342 struct ttm_resource *res) 343 { 344 struct ttm_resource_manager *man; 345 346 res->start = 0; 347 res->size = bo->base.size; 348 res->mem_type = place->mem_type; 349 res->placement = place->flags; 350 res->bus.addr = NULL; 351 res->bus.offset = 0; 352 res->bus.is_iomem = false; 353 res->bus.caching = ttm_cached; 354 res->bo = bo; 355 356 man = ttm_manager_type(bo->bdev, place->mem_type); 357 spin_lock(&bo->bdev->lru_lock); 358 if (ttm_resource_unevictable(res, bo)) 359 list_add_tail(&res->lru.link, &bo->bdev->unevictable); 360 else 361 list_add_tail(&res->lru.link, &man->lru[bo->priority]); 362 man->usage += res->size; 363 spin_unlock(&bo->bdev->lru_lock); 364 } 365 EXPORT_SYMBOL(ttm_resource_init); 366 367 /** 368 * ttm_resource_fini - resource destructor 369 * @man: the resource manager this resource belongs to 370 * @res: the resource to clean up 371 * 372 * Should be used by resource manager backends to clean up the TTM resource 373 * objects before freeing the underlying structure. Makes sure the resource is 374 * removed from the LRU before destruction. 375 * Counterpart of ttm_resource_init(). 376 */ 377 void ttm_resource_fini(struct ttm_resource_manager *man, 378 struct ttm_resource *res) 379 { 380 struct ttm_device *bdev = man->bdev; 381 382 spin_lock(&bdev->lru_lock); 383 list_del_init(&res->lru.link); 384 man->usage -= res->size; 385 spin_unlock(&bdev->lru_lock); 386 } 387 EXPORT_SYMBOL(ttm_resource_fini); 388 389 int ttm_resource_alloc(struct ttm_buffer_object *bo, 390 const struct ttm_place *place, 391 struct ttm_resource **res_ptr, 392 struct dmem_cgroup_pool_state **ret_limit_pool) 393 { 394 struct ttm_resource_manager *man = 395 ttm_manager_type(bo->bdev, place->mem_type); 396 struct dmem_cgroup_pool_state *pool = NULL; 397 int ret; 398 399 if (man->cg) { 400 ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool); 401 if (ret) 402 return ret; 403 } 404 405 ret = man->func->alloc(man, bo, place, res_ptr); 406 if (ret) { 407 if (pool) 408 dmem_cgroup_uncharge(pool, bo->base.size); 409 return ret; 410 } 411 412 (*res_ptr)->css = pool; 413 414 spin_lock(&bo->bdev->lru_lock); 415 ttm_resource_add_bulk_move(*res_ptr, bo); 416 spin_unlock(&bo->bdev->lru_lock); 417 return 0; 418 } 419 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc); 420 421 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) 422 { 423 struct ttm_resource_manager *man; 424 struct dmem_cgroup_pool_state *pool; 425 426 if (!*res) 427 return; 428 429 spin_lock(&bo->bdev->lru_lock); 430 ttm_resource_del_bulk_move(*res, bo); 431 spin_unlock(&bo->bdev->lru_lock); 432 433 pool = (*res)->css; 434 man = ttm_manager_type(bo->bdev, (*res)->mem_type); 435 man->func->free(man, *res); 436 *res = NULL; 437 if (man->cg) 438 dmem_cgroup_uncharge(pool, bo->base.size); 439 } 440 EXPORT_SYMBOL(ttm_resource_free); 441 442 /** 443 * ttm_resource_intersects - test for intersection 444 * 445 * @bdev: TTM device structure 446 * @res: The resource to test 447 * @place: The placement to test 448 * @size: How many bytes the new allocation needs. 449 * 450 * Test if @res intersects with @place and @size. Used for testing if evictions 451 * are valuable or not. 452 * 453 * Returns true if the res placement intersects with @place and @size. 454 */ 455 bool ttm_resource_intersects(struct ttm_device *bdev, 456 struct ttm_resource *res, 457 const struct ttm_place *place, 458 size_t size) 459 { 460 struct ttm_resource_manager *man; 461 462 man = ttm_manager_type(bdev, res->mem_type); 463 if (!place || !man->func->intersects) 464 return true; 465 466 return man->func->intersects(man, res, place, size); 467 } 468 469 /** 470 * ttm_resource_compatible - check if resource is compatible with placement 471 * 472 * @res: the resource to check 473 * @placement: the placement to check against 474 * @evicting: true if the caller is doing evictions 475 * 476 * Returns true if the placement is compatible. 477 */ 478 bool ttm_resource_compatible(struct ttm_resource *res, 479 struct ttm_placement *placement, 480 bool evicting) 481 { 482 struct ttm_buffer_object *bo = res->bo; 483 struct ttm_device *bdev = bo->bdev; 484 unsigned i; 485 486 if (res->placement & TTM_PL_FLAG_TEMPORARY) 487 return false; 488 489 for (i = 0; i < placement->num_placement; i++) { 490 const struct ttm_place *place = &placement->placement[i]; 491 struct ttm_resource_manager *man; 492 493 if (res->mem_type != place->mem_type) 494 continue; 495 496 if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED : 497 TTM_PL_FLAG_FALLBACK)) 498 continue; 499 500 if (place->flags & TTM_PL_FLAG_CONTIGUOUS && 501 !(res->placement & TTM_PL_FLAG_CONTIGUOUS)) 502 continue; 503 504 man = ttm_manager_type(bdev, res->mem_type); 505 if (man->func->compatible && 506 !man->func->compatible(man, res, place, bo->base.size)) 507 continue; 508 509 return true; 510 } 511 return false; 512 } 513 514 void ttm_resource_set_bo(struct ttm_resource *res, 515 struct ttm_buffer_object *bo) 516 { 517 spin_lock(&bo->bdev->lru_lock); 518 res->bo = bo; 519 spin_unlock(&bo->bdev->lru_lock); 520 } 521 522 /** 523 * ttm_resource_manager_init 524 * 525 * @man: memory manager object to init 526 * @bdev: ttm device this manager belongs to 527 * @size: size of managed resources in arbitrary units 528 * 529 * Initialize core parts of a manager object. 530 */ 531 void ttm_resource_manager_init(struct ttm_resource_manager *man, 532 struct ttm_device *bdev, 533 uint64_t size) 534 { 535 unsigned i; 536 537 man->bdev = bdev; 538 man->size = size; 539 man->usage = 0; 540 541 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 542 INIT_LIST_HEAD(&man->lru[i]); 543 spin_lock_init(&man->eviction_lock); 544 for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) 545 man->eviction_fences[i] = NULL; 546 } 547 EXPORT_SYMBOL(ttm_resource_manager_init); 548 549 /* 550 * ttm_resource_manager_evict_all 551 * 552 * @bdev: device to use 553 * @man: manager to use 554 * 555 * Evict all the objects out of a memory manager until it is empty. 556 * Part of memory manager cleanup sequence. 557 */ 558 int ttm_resource_manager_evict_all(struct ttm_device *bdev, 559 struct ttm_resource_manager *man) 560 { 561 struct ttm_operation_ctx ctx = { }; 562 struct dma_fence *fence; 563 int ret, i; 564 565 do { 566 ret = ttm_bo_evict_first(bdev, man, &ctx); 567 cond_resched(); 568 } while (!ret); 569 570 if (ret && ret != -ENOENT) 571 return ret; 572 573 ret = 0; 574 575 spin_lock(&man->eviction_lock); 576 for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { 577 fence = man->eviction_fences[i]; 578 if (fence && !dma_fence_is_signaled(fence)) { 579 dma_fence_get(fence); 580 spin_unlock(&man->eviction_lock); 581 ret = dma_fence_wait(fence, false); 582 dma_fence_put(fence); 583 if (ret) 584 return ret; 585 spin_lock(&man->eviction_lock); 586 } 587 } 588 spin_unlock(&man->eviction_lock); 589 590 return ret; 591 } 592 EXPORT_SYMBOL(ttm_resource_manager_evict_all); 593 594 /** 595 * ttm_resource_manager_usage 596 * 597 * @man: A memory manager object. 598 * 599 * Return how many resources are currently used. 600 */ 601 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) 602 { 603 uint64_t usage; 604 605 if (WARN_ON_ONCE(!man->bdev)) 606 return 0; 607 608 spin_lock(&man->bdev->lru_lock); 609 usage = man->usage; 610 spin_unlock(&man->bdev->lru_lock); 611 return usage; 612 } 613 EXPORT_SYMBOL(ttm_resource_manager_usage); 614 615 /** 616 * ttm_resource_manager_debug 617 * 618 * @man: manager type to dump. 619 * @p: printer to use for debug. 620 */ 621 void ttm_resource_manager_debug(struct ttm_resource_manager *man, 622 struct drm_printer *p) 623 { 624 drm_printf(p, " use_type: %d\n", man->use_type); 625 drm_printf(p, " use_tt: %d\n", man->use_tt); 626 drm_printf(p, " size: %llu\n", man->size); 627 drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man)); 628 if (man->func->debug) 629 man->func->debug(man, p); 630 } 631 EXPORT_SYMBOL(ttm_resource_manager_debug); 632 633 static void 634 ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor, 635 struct ttm_lru_item *next_lru) 636 { 637 struct ttm_resource *next = ttm_lru_item_to_res(next_lru); 638 struct ttm_lru_bulk_move *bulk; 639 640 lockdep_assert_held(&cursor->man->bdev->lru_lock); 641 642 bulk = next->bo->bulk_move; 643 644 if (cursor->bulk != bulk) { 645 if (bulk) { 646 list_move_tail(&cursor->bulk_link, &bulk->cursor_list); 647 cursor->mem_type = next->mem_type; 648 } else { 649 list_del_init(&cursor->bulk_link); 650 } 651 cursor->bulk = bulk; 652 } 653 } 654 655 /** 656 * ttm_resource_manager_first() - Start iterating over the resources 657 * of a resource manager 658 * @cursor: cursor to record the position 659 * 660 * Initializes the cursor and starts iterating. When done iterating, 661 * the caller must explicitly call ttm_resource_cursor_fini(). 662 * 663 * Return: The first resource from the resource manager. 664 */ 665 struct ttm_resource * 666 ttm_resource_manager_first(struct ttm_resource_cursor *cursor) 667 { 668 struct ttm_resource_manager *man = cursor->man; 669 670 if (WARN_ON_ONCE(!man)) 671 return NULL; 672 673 lockdep_assert_held(&man->bdev->lru_lock); 674 675 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 676 return ttm_resource_manager_next(cursor); 677 } 678 679 /** 680 * ttm_resource_manager_next() - Continue iterating over the resource manager 681 * resources 682 * @cursor: cursor to record the position 683 * 684 * Return: the next resource from the resource manager. 685 */ 686 struct ttm_resource * 687 ttm_resource_manager_next(struct ttm_resource_cursor *cursor) 688 { 689 struct ttm_resource_manager *man = cursor->man; 690 struct ttm_lru_item *lru; 691 692 lockdep_assert_held(&man->bdev->lru_lock); 693 694 for (;;) { 695 lru = &cursor->hitch; 696 list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) { 697 if (ttm_lru_item_is_res(lru)) { 698 ttm_resource_cursor_check_bulk(cursor, lru); 699 list_move(&cursor->hitch.link, &lru->link); 700 return ttm_lru_item_to_res(lru); 701 } 702 } 703 704 if (++cursor->priority >= TTM_MAX_BO_PRIORITY) 705 break; 706 707 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 708 ttm_resource_cursor_clear_bulk(cursor); 709 } 710 711 return NULL; 712 } 713 714 /** 715 * ttm_lru_first_res_or_null() - Return the first resource on an lru list 716 * @head: The list head of the lru list. 717 * 718 * Return: Pointer to the first resource on the lru list or NULL if 719 * there is none. 720 */ 721 struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head) 722 { 723 struct ttm_lru_item *lru; 724 725 list_for_each_entry(lru, head, link) { 726 if (ttm_lru_item_is_res(lru)) 727 return ttm_lru_item_to_res(lru); 728 } 729 730 return NULL; 731 } 732 733 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, 734 struct iosys_map *dmap, 735 pgoff_t i) 736 { 737 struct ttm_kmap_iter_iomap *iter_io = 738 container_of(iter, typeof(*iter_io), base); 739 void __iomem *addr; 740 741 retry: 742 while (i >= iter_io->cache.end) { 743 iter_io->cache.sg = iter_io->cache.sg ? 744 sg_next(iter_io->cache.sg) : iter_io->st->sgl; 745 iter_io->cache.i = iter_io->cache.end; 746 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >> 747 PAGE_SHIFT; 748 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) - 749 iter_io->start; 750 } 751 752 if (i < iter_io->cache.i) { 753 iter_io->cache.end = 0; 754 iter_io->cache.sg = NULL; 755 goto retry; 756 } 757 758 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + 759 (((resource_size_t)i - iter_io->cache.i) 760 << PAGE_SHIFT)); 761 iosys_map_set_vaddr_iomem(dmap, addr); 762 } 763 764 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, 765 struct iosys_map *map) 766 { 767 io_mapping_unmap_local(map->vaddr_iomem); 768 } 769 770 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = { 771 .map_local = ttm_kmap_iter_iomap_map_local, 772 .unmap_local = ttm_kmap_iter_iomap_unmap_local, 773 .maps_tt = false, 774 }; 775 776 /** 777 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap 778 * @iter_io: The struct ttm_kmap_iter_iomap to initialize. 779 * @iomap: The struct io_mapping representing the underlying linear io_memory. 780 * @st: sg_table into @iomap, representing the memory of the struct 781 * ttm_resource. 782 * @start: Offset that needs to be subtracted from @st to make 783 * sg_dma_address(st->sgl) - @start == 0 for @iomap start. 784 * 785 * Return: Pointer to the embedded struct ttm_kmap_iter. 786 */ 787 struct ttm_kmap_iter * 788 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, 789 struct io_mapping *iomap, 790 struct sg_table *st, 791 resource_size_t start) 792 { 793 iter_io->base.ops = &ttm_kmap_iter_io_ops; 794 iter_io->iomap = iomap; 795 iter_io->st = st; 796 iter_io->start = start; 797 memset(&iter_io->cache, 0, sizeof(iter_io->cache)); 798 799 return &iter_io->base; 800 } 801 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); 802 803 /** 804 * DOC: Linear io iterator 805 * 806 * This code should die in the not too near future. Best would be if we could 807 * make io-mapping use memremap for all io memory, and have memremap 808 * implement a kmap_local functionality. We could then strip a huge amount of 809 * code. These linear io iterators are implemented to mimic old functionality, 810 * and they don't use kmap_local semantics at all internally. Rather ioremap or 811 * friends, and at least on 32-bit they add global TLB flushes and points 812 * of failure. 813 */ 814 815 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, 816 struct iosys_map *dmap, 817 pgoff_t i) 818 { 819 struct ttm_kmap_iter_linear_io *iter_io = 820 container_of(iter, typeof(*iter_io), base); 821 822 *dmap = iter_io->dmap; 823 iosys_map_incr(dmap, i * PAGE_SIZE); 824 } 825 826 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { 827 .map_local = ttm_kmap_iter_linear_io_map_local, 828 .maps_tt = false, 829 }; 830 831 /** 832 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory 833 * @iter_io: The iterator to initialize 834 * @bdev: The TTM device 835 * @mem: The ttm resource representing the iomap. 836 * 837 * This function is for internal TTM use only. It sets up a memcpy kmap iterator 838 * pointing at a linear chunk of io memory. 839 * 840 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on 841 * failure. 842 */ 843 struct ttm_kmap_iter * 844 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, 845 struct ttm_device *bdev, 846 struct ttm_resource *mem) 847 { 848 int ret; 849 850 ret = ttm_mem_io_reserve(bdev, mem); 851 if (ret) 852 goto out_err; 853 if (!mem->bus.is_iomem) { 854 ret = -EINVAL; 855 goto out_io_free; 856 } 857 858 if (mem->bus.addr) { 859 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); 860 iter_io->needs_unmap = false; 861 } else { 862 iter_io->needs_unmap = true; 863 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); 864 if (mem->bus.caching == ttm_write_combined) 865 iosys_map_set_vaddr_iomem(&iter_io->dmap, 866 ioremap_wc(mem->bus.offset, 867 mem->size)); 868 else if (mem->bus.caching == ttm_cached) 869 iosys_map_set_vaddr(&iter_io->dmap, 870 memremap(mem->bus.offset, mem->size, 871 MEMREMAP_WB | 872 MEMREMAP_WT | 873 MEMREMAP_WC)); 874 875 /* If uncached requested or if mapping cached or wc failed */ 876 if (iosys_map_is_null(&iter_io->dmap)) 877 iosys_map_set_vaddr_iomem(&iter_io->dmap, 878 ioremap(mem->bus.offset, 879 mem->size)); 880 881 if (iosys_map_is_null(&iter_io->dmap)) { 882 ret = -ENOMEM; 883 goto out_io_free; 884 } 885 } 886 887 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops; 888 return &iter_io->base; 889 890 out_io_free: 891 ttm_mem_io_free(bdev, mem); 892 out_err: 893 return ERR_PTR(ret); 894 } 895 896 /** 897 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory 898 * @iter_io: The iterator to finalize 899 * @bdev: The TTM device 900 * @mem: The ttm resource representing the iomap. 901 * 902 * This function is for internal TTM use only. It cleans up a memcpy kmap 903 * iterator initialized by ttm_kmap_iter_linear_io_init. 904 */ 905 void 906 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, 907 struct ttm_device *bdev, 908 struct ttm_resource *mem) 909 { 910 if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { 911 if (iter_io->dmap.is_iomem) 912 iounmap(iter_io->dmap.vaddr_iomem); 913 else 914 memunmap(iter_io->dmap.vaddr); 915 } 916 917 ttm_mem_io_free(bdev, mem); 918 } 919 920 #if defined(CONFIG_DEBUG_FS) 921 922 static int ttm_resource_manager_show(struct seq_file *m, void *unused) 923 { 924 struct ttm_resource_manager *man = 925 (struct ttm_resource_manager *)m->private; 926 struct drm_printer p = drm_seq_file_printer(m); 927 ttm_resource_manager_debug(man, &p); 928 return 0; 929 } 930 DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); 931 932 #endif 933 934 /** 935 * ttm_resource_manager_create_debugfs - Create debugfs entry for specified 936 * resource manager. 937 * @man: The TTM resource manager for which the debugfs stats file to be created 938 * @parent: debugfs directory in which the file will reside 939 * @name: The filename to create. 940 * 941 * This function sets up a debugfs file that can be used to look 942 * at debug statistics of the specified ttm_resource_manager. 943 */ 944 void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, 945 struct dentry *parent, 946 const char *name) 947 { 948 #if defined(CONFIG_DEBUG_FS) 949 debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); 950 #endif 951 } 952 EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); 953