1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/debugfs.h> 26 #include <linux/export.h> 27 #include <linux/io-mapping.h> 28 #include <linux/iosys-map.h> 29 #include <linux/scatterlist.h> 30 #include <linux/cgroup_dmem.h> 31 32 #include <drm/ttm/ttm_bo.h> 33 #include <drm/ttm/ttm_placement.h> 34 #include <drm/ttm/ttm_resource.h> 35 #include <drm/ttm/ttm_tt.h> 36 37 #include <drm/drm_print.h> 38 #include <drm/drm_util.h> 39 40 /* Detach the cursor from the bulk move list*/ 41 static void 42 ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor) 43 { 44 lockdep_assert_held(&cursor->man->bdev->lru_lock); 45 46 cursor->bulk = NULL; 47 list_del_init(&cursor->bulk_link); 48 } 49 50 /* Move the cursor to the end of the bulk move list it's in */ 51 static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk, 52 struct ttm_resource_cursor *cursor) 53 { 54 struct ttm_lru_bulk_move_pos *pos; 55 56 lockdep_assert_held(&cursor->man->bdev->lru_lock); 57 58 if (WARN_ON_ONCE(bulk != cursor->bulk)) { 59 list_del_init(&cursor->bulk_link); 60 return; 61 } 62 63 pos = &bulk->pos[cursor->mem_type][cursor->priority]; 64 if (pos->last) 65 list_move(&cursor->hitch.link, &pos->last->lru.link); 66 ttm_resource_cursor_clear_bulk(cursor); 67 } 68 69 /* Move all cursors attached to a bulk move to its end */ 70 static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk) 71 { 72 struct ttm_resource_cursor *cursor, *next; 73 74 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 75 ttm_resource_cursor_move_bulk_tail(bulk, cursor); 76 } 77 78 /* Remove a cursor from an empty bulk move list */ 79 static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk) 80 { 81 struct ttm_resource_cursor *cursor, *next; 82 83 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 84 ttm_resource_cursor_clear_bulk(cursor); 85 } 86 87 /** 88 * ttm_resource_cursor_init() - Initialize a struct ttm_resource_cursor 89 * @cursor: The cursor to initialize. 90 * @man: The resource manager. 91 * 92 * Initialize the cursor before using it for iteration. 93 */ 94 void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor, 95 struct ttm_resource_manager *man) 96 { 97 cursor->priority = 0; 98 cursor->man = man; 99 ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH); 100 INIT_LIST_HEAD(&cursor->bulk_link); 101 INIT_LIST_HEAD(&cursor->hitch.link); 102 } 103 104 /** 105 * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage 106 * @cursor: The struct ttm_resource_cursor to finalize. 107 * 108 * The function pulls the LRU list cursor off any lists it was previusly 109 * attached to. Needs to be called with the LRU lock held. The function 110 * can be called multiple times after eachother. 111 */ 112 void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor) 113 { 114 lockdep_assert_held(&cursor->man->bdev->lru_lock); 115 list_del_init(&cursor->hitch.link); 116 ttm_resource_cursor_clear_bulk(cursor); 117 } 118 119 /** 120 * ttm_lru_bulk_move_init - initialize a bulk move structure 121 * @bulk: the structure to init 122 * 123 * For now just memset the structure to zero. 124 */ 125 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) 126 { 127 memset(bulk, 0, sizeof(*bulk)); 128 INIT_LIST_HEAD(&bulk->cursor_list); 129 } 130 EXPORT_SYMBOL(ttm_lru_bulk_move_init); 131 132 /** 133 * ttm_lru_bulk_move_fini - finalize a bulk move structure 134 * @bdev: The struct ttm_device 135 * @bulk: the structure to finalize 136 * 137 * Sanity checks that bulk moves don't have any 138 * resources left and hence no cursors attached. 139 */ 140 void ttm_lru_bulk_move_fini(struct ttm_device *bdev, 141 struct ttm_lru_bulk_move *bulk) 142 { 143 spin_lock(&bdev->lru_lock); 144 ttm_bulk_move_drop_cursors(bulk); 145 spin_unlock(&bdev->lru_lock); 146 } 147 EXPORT_SYMBOL(ttm_lru_bulk_move_fini); 148 149 /** 150 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail. 151 * 152 * @bulk: bulk move structure 153 * 154 * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that 155 * resource order never changes. Should be called with &ttm_device.lru_lock held. 156 */ 157 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) 158 { 159 unsigned i, j; 160 161 ttm_bulk_move_adjust_cursors(bulk); 162 for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { 163 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { 164 struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; 165 struct ttm_resource_manager *man; 166 167 if (!pos->first) 168 continue; 169 170 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); 171 dma_resv_assert_held(pos->first->bo->base.resv); 172 dma_resv_assert_held(pos->last->bo->base.resv); 173 174 man = ttm_manager_type(pos->first->bo->bdev, i); 175 list_bulk_move_tail(&man->lru[j], &pos->first->lru.link, 176 &pos->last->lru.link); 177 } 178 } 179 } 180 EXPORT_SYMBOL(ttm_lru_bulk_move_tail); 181 182 /* Return the bulk move pos object for this resource */ 183 static struct ttm_lru_bulk_move_pos * 184 ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) 185 { 186 return &bulk->pos[res->mem_type][res->bo->priority]; 187 } 188 189 /* Return the previous resource on the list (skip over non-resource list items) */ 190 static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur) 191 { 192 struct ttm_lru_item *lru = &cur->lru; 193 194 do { 195 lru = list_prev_entry(lru, link); 196 } while (!ttm_lru_item_is_res(lru)); 197 198 return ttm_lru_item_to_res(lru); 199 } 200 201 /* Return the next resource on the list (skip over non-resource list items) */ 202 static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur) 203 { 204 struct ttm_lru_item *lru = &cur->lru; 205 206 do { 207 lru = list_next_entry(lru, link); 208 } while (!ttm_lru_item_is_res(lru)); 209 210 return ttm_lru_item_to_res(lru); 211 } 212 213 /* Move the resource to the tail of the bulk move range */ 214 static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, 215 struct ttm_resource *res) 216 { 217 if (pos->last != res) { 218 if (pos->first == res) 219 pos->first = ttm_lru_next_res(res); 220 list_move(&res->lru.link, &pos->last->lru.link); 221 pos->last = res; 222 } 223 } 224 225 /* Add the resource to a bulk_move cursor */ 226 static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, 227 struct ttm_resource *res) 228 { 229 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 230 231 if (!pos->first) { 232 pos->first = res; 233 pos->last = res; 234 } else { 235 WARN_ON(pos->first->bo->base.resv != res->bo->base.resv); 236 ttm_lru_bulk_move_pos_tail(pos, res); 237 } 238 } 239 240 /* Remove the resource from a bulk_move range */ 241 static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, 242 struct ttm_resource *res) 243 { 244 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 245 246 if (unlikely(WARN_ON(!pos->first || !pos->last) || 247 (pos->first == res && pos->last == res))) { 248 pos->first = NULL; 249 pos->last = NULL; 250 } else if (pos->first == res) { 251 pos->first = ttm_lru_next_res(res); 252 } else if (pos->last == res) { 253 pos->last = ttm_lru_prev_res(res); 254 } else { 255 list_move(&res->lru.link, &pos->last->lru.link); 256 } 257 } 258 259 static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo) 260 { 261 /* 262 * Take care when creating a new resource for a bo, that it is not considered 263 * swapped if it's not the current resource for the bo and is thus logically 264 * associated with the ttm_tt. Think a VRAM resource created to move a 265 * swapped-out bo to VRAM. 266 */ 267 if (bo->resource != res || !bo->ttm) 268 return false; 269 270 dma_resv_assert_held(bo->base.resv); 271 return ttm_tt_is_swapped(bo->ttm); 272 } 273 274 static bool ttm_resource_unevictable(struct ttm_resource *res, struct ttm_buffer_object *bo) 275 { 276 return bo->pin_count || ttm_resource_is_swapped(res, bo); 277 } 278 279 /* Add the resource to a bulk move if the BO is configured for it */ 280 void ttm_resource_add_bulk_move(struct ttm_resource *res, 281 struct ttm_buffer_object *bo) 282 { 283 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) 284 ttm_lru_bulk_move_add(bo->bulk_move, res); 285 } 286 287 /* Remove the resource from a bulk move if the BO is configured for it */ 288 void ttm_resource_del_bulk_move(struct ttm_resource *res, 289 struct ttm_buffer_object *bo) 290 { 291 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) 292 ttm_lru_bulk_move_del(bo->bulk_move, res); 293 } 294 295 /* Move a resource to the LRU or bulk tail */ 296 void ttm_resource_move_to_lru_tail(struct ttm_resource *res) 297 { 298 struct ttm_buffer_object *bo = res->bo; 299 struct ttm_device *bdev = bo->bdev; 300 301 lockdep_assert_held(&bo->bdev->lru_lock); 302 303 if (ttm_resource_unevictable(res, bo)) { 304 list_move_tail(&res->lru.link, &bdev->unevictable); 305 306 } else if (bo->bulk_move) { 307 struct ttm_lru_bulk_move_pos *pos = 308 ttm_lru_bulk_move_pos(bo->bulk_move, res); 309 310 ttm_lru_bulk_move_pos_tail(pos, res); 311 } else { 312 struct ttm_resource_manager *man; 313 314 man = ttm_manager_type(bdev, res->mem_type); 315 list_move_tail(&res->lru.link, &man->lru[bo->priority]); 316 } 317 } 318 319 /** 320 * ttm_resource_init - resource object constructure 321 * @bo: buffer object this resources is allocated for 322 * @place: placement of the resource 323 * @res: the resource object to inistilize 324 * 325 * Initialize a new resource object. Counterpart of ttm_resource_fini(). 326 */ 327 void ttm_resource_init(struct ttm_buffer_object *bo, 328 const struct ttm_place *place, 329 struct ttm_resource *res) 330 { 331 struct ttm_resource_manager *man; 332 333 res->start = 0; 334 res->size = bo->base.size; 335 res->mem_type = place->mem_type; 336 res->placement = place->flags; 337 res->bus.addr = NULL; 338 res->bus.offset = 0; 339 res->bus.is_iomem = false; 340 res->bus.caching = ttm_cached; 341 res->bo = bo; 342 343 man = ttm_manager_type(bo->bdev, place->mem_type); 344 spin_lock(&bo->bdev->lru_lock); 345 if (ttm_resource_unevictable(res, bo)) 346 list_add_tail(&res->lru.link, &bo->bdev->unevictable); 347 else 348 list_add_tail(&res->lru.link, &man->lru[bo->priority]); 349 man->usage += res->size; 350 spin_unlock(&bo->bdev->lru_lock); 351 } 352 EXPORT_SYMBOL(ttm_resource_init); 353 354 /** 355 * ttm_resource_fini - resource destructor 356 * @man: the resource manager this resource belongs to 357 * @res: the resource to clean up 358 * 359 * Should be used by resource manager backends to clean up the TTM resource 360 * objects before freeing the underlying structure. Makes sure the resource is 361 * removed from the LRU before destruction. 362 * Counterpart of ttm_resource_init(). 363 */ 364 void ttm_resource_fini(struct ttm_resource_manager *man, 365 struct ttm_resource *res) 366 { 367 struct ttm_device *bdev = man->bdev; 368 369 spin_lock(&bdev->lru_lock); 370 list_del_init(&res->lru.link); 371 man->usage -= res->size; 372 spin_unlock(&bdev->lru_lock); 373 } 374 EXPORT_SYMBOL(ttm_resource_fini); 375 376 int ttm_resource_alloc(struct ttm_buffer_object *bo, 377 const struct ttm_place *place, 378 struct ttm_resource **res_ptr, 379 struct dmem_cgroup_pool_state **ret_limit_pool) 380 { 381 struct ttm_resource_manager *man = 382 ttm_manager_type(bo->bdev, place->mem_type); 383 struct dmem_cgroup_pool_state *pool = NULL; 384 int ret; 385 386 if (man->cg) { 387 ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool); 388 if (ret) 389 return ret; 390 } 391 392 ret = man->func->alloc(man, bo, place, res_ptr); 393 if (ret) { 394 if (pool) 395 dmem_cgroup_uncharge(pool, bo->base.size); 396 return ret; 397 } 398 399 (*res_ptr)->css = pool; 400 401 spin_lock(&bo->bdev->lru_lock); 402 ttm_resource_add_bulk_move(*res_ptr, bo); 403 spin_unlock(&bo->bdev->lru_lock); 404 return 0; 405 } 406 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc); 407 408 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) 409 { 410 struct ttm_resource_manager *man; 411 struct dmem_cgroup_pool_state *pool; 412 413 if (!*res) 414 return; 415 416 spin_lock(&bo->bdev->lru_lock); 417 ttm_resource_del_bulk_move(*res, bo); 418 spin_unlock(&bo->bdev->lru_lock); 419 420 pool = (*res)->css; 421 man = ttm_manager_type(bo->bdev, (*res)->mem_type); 422 man->func->free(man, *res); 423 *res = NULL; 424 if (man->cg) 425 dmem_cgroup_uncharge(pool, bo->base.size); 426 } 427 EXPORT_SYMBOL(ttm_resource_free); 428 429 /** 430 * ttm_resource_intersects - test for intersection 431 * 432 * @bdev: TTM device structure 433 * @res: The resource to test 434 * @place: The placement to test 435 * @size: How many bytes the new allocation needs. 436 * 437 * Test if @res intersects with @place and @size. Used for testing if evictions 438 * are valueable or not. 439 * 440 * Returns true if the res placement intersects with @place and @size. 441 */ 442 bool ttm_resource_intersects(struct ttm_device *bdev, 443 struct ttm_resource *res, 444 const struct ttm_place *place, 445 size_t size) 446 { 447 struct ttm_resource_manager *man; 448 449 if (!res) 450 return false; 451 452 man = ttm_manager_type(bdev, res->mem_type); 453 if (!place || !man->func->intersects) 454 return true; 455 456 return man->func->intersects(man, res, place, size); 457 } 458 459 /** 460 * ttm_resource_compatible - check if resource is compatible with placement 461 * 462 * @res: the resource to check 463 * @placement: the placement to check against 464 * @evicting: true if the caller is doing evictions 465 * 466 * Returns true if the placement is compatible. 467 */ 468 bool ttm_resource_compatible(struct ttm_resource *res, 469 struct ttm_placement *placement, 470 bool evicting) 471 { 472 struct ttm_buffer_object *bo = res->bo; 473 struct ttm_device *bdev = bo->bdev; 474 unsigned i; 475 476 if (res->placement & TTM_PL_FLAG_TEMPORARY) 477 return false; 478 479 for (i = 0; i < placement->num_placement; i++) { 480 const struct ttm_place *place = &placement->placement[i]; 481 struct ttm_resource_manager *man; 482 483 if (res->mem_type != place->mem_type) 484 continue; 485 486 if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED : 487 TTM_PL_FLAG_FALLBACK)) 488 continue; 489 490 if (place->flags & TTM_PL_FLAG_CONTIGUOUS && 491 !(res->placement & TTM_PL_FLAG_CONTIGUOUS)) 492 continue; 493 494 man = ttm_manager_type(bdev, res->mem_type); 495 if (man->func->compatible && 496 !man->func->compatible(man, res, place, bo->base.size)) 497 continue; 498 499 return true; 500 } 501 return false; 502 } 503 504 void ttm_resource_set_bo(struct ttm_resource *res, 505 struct ttm_buffer_object *bo) 506 { 507 spin_lock(&bo->bdev->lru_lock); 508 res->bo = bo; 509 spin_unlock(&bo->bdev->lru_lock); 510 } 511 512 /** 513 * ttm_resource_manager_init 514 * 515 * @man: memory manager object to init 516 * @bdev: ttm device this manager belongs to 517 * @size: size of managed resources in arbitrary units 518 * 519 * Initialise core parts of a manager object. 520 */ 521 void ttm_resource_manager_init(struct ttm_resource_manager *man, 522 struct ttm_device *bdev, 523 uint64_t size) 524 { 525 unsigned i; 526 527 spin_lock_init(&man->move_lock); 528 man->bdev = bdev; 529 man->size = size; 530 man->usage = 0; 531 532 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 533 INIT_LIST_HEAD(&man->lru[i]); 534 man->move = NULL; 535 } 536 EXPORT_SYMBOL(ttm_resource_manager_init); 537 538 /* 539 * ttm_resource_manager_evict_all 540 * 541 * @bdev - device to use 542 * @man - manager to use 543 * 544 * Evict all the objects out of a memory manager until it is empty. 545 * Part of memory manager cleanup sequence. 546 */ 547 int ttm_resource_manager_evict_all(struct ttm_device *bdev, 548 struct ttm_resource_manager *man) 549 { 550 struct ttm_operation_ctx ctx = { 551 .interruptible = false, 552 .no_wait_gpu = false, 553 }; 554 struct dma_fence *fence; 555 int ret; 556 557 do { 558 ret = ttm_bo_evict_first(bdev, man, &ctx); 559 cond_resched(); 560 } while (!ret); 561 562 if (ret && ret != -ENOENT) 563 return ret; 564 565 spin_lock(&man->move_lock); 566 fence = dma_fence_get(man->move); 567 spin_unlock(&man->move_lock); 568 569 if (fence) { 570 ret = dma_fence_wait(fence, false); 571 dma_fence_put(fence); 572 if (ret) 573 return ret; 574 } 575 576 return 0; 577 } 578 EXPORT_SYMBOL(ttm_resource_manager_evict_all); 579 580 /** 581 * ttm_resource_manager_usage 582 * 583 * @man: A memory manager object. 584 * 585 * Return how many resources are currently used. 586 */ 587 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) 588 { 589 uint64_t usage; 590 591 if (WARN_ON_ONCE(!man->bdev)) 592 return 0; 593 594 spin_lock(&man->bdev->lru_lock); 595 usage = man->usage; 596 spin_unlock(&man->bdev->lru_lock); 597 return usage; 598 } 599 EXPORT_SYMBOL(ttm_resource_manager_usage); 600 601 /** 602 * ttm_resource_manager_debug 603 * 604 * @man: manager type to dump. 605 * @p: printer to use for debug. 606 */ 607 void ttm_resource_manager_debug(struct ttm_resource_manager *man, 608 struct drm_printer *p) 609 { 610 drm_printf(p, " use_type: %d\n", man->use_type); 611 drm_printf(p, " use_tt: %d\n", man->use_tt); 612 drm_printf(p, " size: %llu\n", man->size); 613 drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man)); 614 if (man->func->debug) 615 man->func->debug(man, p); 616 } 617 EXPORT_SYMBOL(ttm_resource_manager_debug); 618 619 static void 620 ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor, 621 struct ttm_lru_item *next_lru) 622 { 623 struct ttm_resource *next = ttm_lru_item_to_res(next_lru); 624 struct ttm_lru_bulk_move *bulk = NULL; 625 struct ttm_buffer_object *bo = next->bo; 626 627 lockdep_assert_held(&cursor->man->bdev->lru_lock); 628 bulk = bo->bulk_move; 629 630 if (cursor->bulk != bulk) { 631 if (bulk) { 632 list_move_tail(&cursor->bulk_link, &bulk->cursor_list); 633 cursor->mem_type = next->mem_type; 634 } else { 635 list_del_init(&cursor->bulk_link); 636 } 637 cursor->bulk = bulk; 638 } 639 } 640 641 /** 642 * ttm_resource_manager_first() - Start iterating over the resources 643 * of a resource manager 644 * @cursor: cursor to record the position 645 * 646 * Initializes the cursor and starts iterating. When done iterating, 647 * the caller must explicitly call ttm_resource_cursor_fini(). 648 * 649 * Return: The first resource from the resource manager. 650 */ 651 struct ttm_resource * 652 ttm_resource_manager_first(struct ttm_resource_cursor *cursor) 653 { 654 struct ttm_resource_manager *man = cursor->man; 655 656 if (WARN_ON_ONCE(!man)) 657 return NULL; 658 659 lockdep_assert_held(&man->bdev->lru_lock); 660 661 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 662 return ttm_resource_manager_next(cursor); 663 } 664 665 /** 666 * ttm_resource_manager_next() - Continue iterating over the resource manager 667 * resources 668 * @cursor: cursor to record the position 669 * 670 * Return: the next resource from the resource manager. 671 */ 672 struct ttm_resource * 673 ttm_resource_manager_next(struct ttm_resource_cursor *cursor) 674 { 675 struct ttm_resource_manager *man = cursor->man; 676 struct ttm_lru_item *lru; 677 678 lockdep_assert_held(&man->bdev->lru_lock); 679 680 for (;;) { 681 lru = &cursor->hitch; 682 list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) { 683 if (ttm_lru_item_is_res(lru)) { 684 ttm_resource_cursor_check_bulk(cursor, lru); 685 list_move(&cursor->hitch.link, &lru->link); 686 return ttm_lru_item_to_res(lru); 687 } 688 } 689 690 if (++cursor->priority >= TTM_MAX_BO_PRIORITY) 691 break; 692 693 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 694 ttm_resource_cursor_clear_bulk(cursor); 695 } 696 697 return NULL; 698 } 699 700 /** 701 * ttm_lru_first_res_or_null() - Return the first resource on an lru list 702 * @head: The list head of the lru list. 703 * 704 * Return: Pointer to the first resource on the lru list or NULL if 705 * there is none. 706 */ 707 struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head) 708 { 709 struct ttm_lru_item *lru; 710 711 list_for_each_entry(lru, head, link) { 712 if (ttm_lru_item_is_res(lru)) 713 return ttm_lru_item_to_res(lru); 714 } 715 716 return NULL; 717 } 718 719 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, 720 struct iosys_map *dmap, 721 pgoff_t i) 722 { 723 struct ttm_kmap_iter_iomap *iter_io = 724 container_of(iter, typeof(*iter_io), base); 725 void __iomem *addr; 726 727 retry: 728 while (i >= iter_io->cache.end) { 729 iter_io->cache.sg = iter_io->cache.sg ? 730 sg_next(iter_io->cache.sg) : iter_io->st->sgl; 731 iter_io->cache.i = iter_io->cache.end; 732 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >> 733 PAGE_SHIFT; 734 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) - 735 iter_io->start; 736 } 737 738 if (i < iter_io->cache.i) { 739 iter_io->cache.end = 0; 740 iter_io->cache.sg = NULL; 741 goto retry; 742 } 743 744 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + 745 (((resource_size_t)i - iter_io->cache.i) 746 << PAGE_SHIFT)); 747 iosys_map_set_vaddr_iomem(dmap, addr); 748 } 749 750 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, 751 struct iosys_map *map) 752 { 753 io_mapping_unmap_local(map->vaddr_iomem); 754 } 755 756 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = { 757 .map_local = ttm_kmap_iter_iomap_map_local, 758 .unmap_local = ttm_kmap_iter_iomap_unmap_local, 759 .maps_tt = false, 760 }; 761 762 /** 763 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap 764 * @iter_io: The struct ttm_kmap_iter_iomap to initialize. 765 * @iomap: The struct io_mapping representing the underlying linear io_memory. 766 * @st: sg_table into @iomap, representing the memory of the struct 767 * ttm_resource. 768 * @start: Offset that needs to be subtracted from @st to make 769 * sg_dma_address(st->sgl) - @start == 0 for @iomap start. 770 * 771 * Return: Pointer to the embedded struct ttm_kmap_iter. 772 */ 773 struct ttm_kmap_iter * 774 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, 775 struct io_mapping *iomap, 776 struct sg_table *st, 777 resource_size_t start) 778 { 779 iter_io->base.ops = &ttm_kmap_iter_io_ops; 780 iter_io->iomap = iomap; 781 iter_io->st = st; 782 iter_io->start = start; 783 memset(&iter_io->cache, 0, sizeof(iter_io->cache)); 784 785 return &iter_io->base; 786 } 787 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); 788 789 /** 790 * DOC: Linear io iterator 791 * 792 * This code should die in the not too near future. Best would be if we could 793 * make io-mapping use memremap for all io memory, and have memremap 794 * implement a kmap_local functionality. We could then strip a huge amount of 795 * code. These linear io iterators are implemented to mimic old functionality, 796 * and they don't use kmap_local semantics at all internally. Rather ioremap or 797 * friends, and at least on 32-bit they add global TLB flushes and points 798 * of failure. 799 */ 800 801 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, 802 struct iosys_map *dmap, 803 pgoff_t i) 804 { 805 struct ttm_kmap_iter_linear_io *iter_io = 806 container_of(iter, typeof(*iter_io), base); 807 808 *dmap = iter_io->dmap; 809 iosys_map_incr(dmap, i * PAGE_SIZE); 810 } 811 812 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { 813 .map_local = ttm_kmap_iter_linear_io_map_local, 814 .maps_tt = false, 815 }; 816 817 /** 818 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory 819 * @iter_io: The iterator to initialize 820 * @bdev: The TTM device 821 * @mem: The ttm resource representing the iomap. 822 * 823 * This function is for internal TTM use only. It sets up a memcpy kmap iterator 824 * pointing at a linear chunk of io memory. 825 * 826 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on 827 * failure. 828 */ 829 struct ttm_kmap_iter * 830 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, 831 struct ttm_device *bdev, 832 struct ttm_resource *mem) 833 { 834 int ret; 835 836 ret = ttm_mem_io_reserve(bdev, mem); 837 if (ret) 838 goto out_err; 839 if (!mem->bus.is_iomem) { 840 ret = -EINVAL; 841 goto out_io_free; 842 } 843 844 if (mem->bus.addr) { 845 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); 846 iter_io->needs_unmap = false; 847 } else { 848 iter_io->needs_unmap = true; 849 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); 850 if (mem->bus.caching == ttm_write_combined) 851 iosys_map_set_vaddr_iomem(&iter_io->dmap, 852 ioremap_wc(mem->bus.offset, 853 mem->size)); 854 else if (mem->bus.caching == ttm_cached) 855 iosys_map_set_vaddr(&iter_io->dmap, 856 memremap(mem->bus.offset, mem->size, 857 MEMREMAP_WB | 858 MEMREMAP_WT | 859 MEMREMAP_WC)); 860 861 /* If uncached requested or if mapping cached or wc failed */ 862 if (iosys_map_is_null(&iter_io->dmap)) 863 iosys_map_set_vaddr_iomem(&iter_io->dmap, 864 ioremap(mem->bus.offset, 865 mem->size)); 866 867 if (iosys_map_is_null(&iter_io->dmap)) { 868 ret = -ENOMEM; 869 goto out_io_free; 870 } 871 } 872 873 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops; 874 return &iter_io->base; 875 876 out_io_free: 877 ttm_mem_io_free(bdev, mem); 878 out_err: 879 return ERR_PTR(ret); 880 } 881 882 /** 883 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory 884 * @iter_io: The iterator to initialize 885 * @bdev: The TTM device 886 * @mem: The ttm resource representing the iomap. 887 * 888 * This function is for internal TTM use only. It cleans up a memcpy kmap 889 * iterator initialized by ttm_kmap_iter_linear_io_init. 890 */ 891 void 892 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, 893 struct ttm_device *bdev, 894 struct ttm_resource *mem) 895 { 896 if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { 897 if (iter_io->dmap.is_iomem) 898 iounmap(iter_io->dmap.vaddr_iomem); 899 else 900 memunmap(iter_io->dmap.vaddr); 901 } 902 903 ttm_mem_io_free(bdev, mem); 904 } 905 906 #if defined(CONFIG_DEBUG_FS) 907 908 static int ttm_resource_manager_show(struct seq_file *m, void *unused) 909 { 910 struct ttm_resource_manager *man = 911 (struct ttm_resource_manager *)m->private; 912 struct drm_printer p = drm_seq_file_printer(m); 913 ttm_resource_manager_debug(man, &p); 914 return 0; 915 } 916 DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); 917 918 #endif 919 920 /** 921 * ttm_resource_manager_create_debugfs - Create debugfs entry for specified 922 * resource manager. 923 * @man: The TTM resource manager for which the debugfs stats file be creates 924 * @parent: debugfs directory in which the file will reside 925 * @name: The filename to create. 926 * 927 * This function setups up a debugfs file that can be used to look 928 * at debug statistics of the specified ttm_resource_manager. 929 */ 930 void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, 931 struct dentry * parent, 932 const char *name) 933 { 934 #if defined(CONFIG_DEBUG_FS) 935 debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); 936 #endif 937 } 938 EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); 939