1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/debugfs.h> 26 #include <linux/io-mapping.h> 27 #include <linux/iosys-map.h> 28 #include <linux/scatterlist.h> 29 #include <linux/cgroup_dmem.h> 30 31 #include <drm/ttm/ttm_bo.h> 32 #include <drm/ttm/ttm_placement.h> 33 #include <drm/ttm/ttm_resource.h> 34 #include <drm/ttm/ttm_tt.h> 35 36 #include <drm/drm_util.h> 37 38 /* Detach the cursor from the bulk move list*/ 39 static void 40 ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor) 41 { 42 lockdep_assert_held(&cursor->man->bdev->lru_lock); 43 44 cursor->bulk = NULL; 45 list_del_init(&cursor->bulk_link); 46 } 47 48 /* Move the cursor to the end of the bulk move list it's in */ 49 static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk, 50 struct ttm_resource_cursor *cursor) 51 { 52 struct ttm_lru_bulk_move_pos *pos; 53 54 lockdep_assert_held(&cursor->man->bdev->lru_lock); 55 56 if (WARN_ON_ONCE(bulk != cursor->bulk)) { 57 list_del_init(&cursor->bulk_link); 58 return; 59 } 60 61 pos = &bulk->pos[cursor->mem_type][cursor->priority]; 62 if (pos->last) 63 list_move(&cursor->hitch.link, &pos->last->lru.link); 64 ttm_resource_cursor_clear_bulk(cursor); 65 } 66 67 /* Move all cursors attached to a bulk move to its end */ 68 static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk) 69 { 70 struct ttm_resource_cursor *cursor, *next; 71 72 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 73 ttm_resource_cursor_move_bulk_tail(bulk, cursor); 74 } 75 76 /* Remove a cursor from an empty bulk move list */ 77 static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk) 78 { 79 struct ttm_resource_cursor *cursor, *next; 80 81 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 82 ttm_resource_cursor_clear_bulk(cursor); 83 } 84 85 /** 86 * ttm_resource_cursor_init() - Initialize a struct ttm_resource_cursor 87 * @cursor: The cursor to initialize. 88 * @man: The resource manager. 89 * 90 * Initialize the cursor before using it for iteration. 91 */ 92 void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor, 93 struct ttm_resource_manager *man) 94 { 95 cursor->priority = 0; 96 cursor->man = man; 97 ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH); 98 INIT_LIST_HEAD(&cursor->bulk_link); 99 INIT_LIST_HEAD(&cursor->hitch.link); 100 } 101 102 /** 103 * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage 104 * @cursor: The struct ttm_resource_cursor to finalize. 105 * 106 * The function pulls the LRU list cursor off any lists it was previusly 107 * attached to. Needs to be called with the LRU lock held. The function 108 * can be called multiple times after eachother. 109 */ 110 void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor) 111 { 112 lockdep_assert_held(&cursor->man->bdev->lru_lock); 113 list_del_init(&cursor->hitch.link); 114 ttm_resource_cursor_clear_bulk(cursor); 115 } 116 117 /** 118 * ttm_lru_bulk_move_init - initialize a bulk move structure 119 * @bulk: the structure to init 120 * 121 * For now just memset the structure to zero. 122 */ 123 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) 124 { 125 memset(bulk, 0, sizeof(*bulk)); 126 INIT_LIST_HEAD(&bulk->cursor_list); 127 } 128 EXPORT_SYMBOL(ttm_lru_bulk_move_init); 129 130 /** 131 * ttm_lru_bulk_move_fini - finalize a bulk move structure 132 * @bdev: The struct ttm_device 133 * @bulk: the structure to finalize 134 * 135 * Sanity checks that bulk moves don't have any 136 * resources left and hence no cursors attached. 137 */ 138 void ttm_lru_bulk_move_fini(struct ttm_device *bdev, 139 struct ttm_lru_bulk_move *bulk) 140 { 141 spin_lock(&bdev->lru_lock); 142 ttm_bulk_move_drop_cursors(bulk); 143 spin_unlock(&bdev->lru_lock); 144 } 145 EXPORT_SYMBOL(ttm_lru_bulk_move_fini); 146 147 /** 148 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail. 149 * 150 * @bulk: bulk move structure 151 * 152 * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that 153 * resource order never changes. Should be called with &ttm_device.lru_lock held. 154 */ 155 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) 156 { 157 unsigned i, j; 158 159 ttm_bulk_move_adjust_cursors(bulk); 160 for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { 161 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { 162 struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; 163 struct ttm_resource_manager *man; 164 165 if (!pos->first) 166 continue; 167 168 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); 169 dma_resv_assert_held(pos->first->bo->base.resv); 170 dma_resv_assert_held(pos->last->bo->base.resv); 171 172 man = ttm_manager_type(pos->first->bo->bdev, i); 173 list_bulk_move_tail(&man->lru[j], &pos->first->lru.link, 174 &pos->last->lru.link); 175 } 176 } 177 } 178 EXPORT_SYMBOL(ttm_lru_bulk_move_tail); 179 180 /* Return the bulk move pos object for this resource */ 181 static struct ttm_lru_bulk_move_pos * 182 ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) 183 { 184 return &bulk->pos[res->mem_type][res->bo->priority]; 185 } 186 187 /* Return the previous resource on the list (skip over non-resource list items) */ 188 static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur) 189 { 190 struct ttm_lru_item *lru = &cur->lru; 191 192 do { 193 lru = list_prev_entry(lru, link); 194 } while (!ttm_lru_item_is_res(lru)); 195 196 return ttm_lru_item_to_res(lru); 197 } 198 199 /* Return the next resource on the list (skip over non-resource list items) */ 200 static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur) 201 { 202 struct ttm_lru_item *lru = &cur->lru; 203 204 do { 205 lru = list_next_entry(lru, link); 206 } while (!ttm_lru_item_is_res(lru)); 207 208 return ttm_lru_item_to_res(lru); 209 } 210 211 /* Move the resource to the tail of the bulk move range */ 212 static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, 213 struct ttm_resource *res) 214 { 215 if (pos->last != res) { 216 if (pos->first == res) 217 pos->first = ttm_lru_next_res(res); 218 list_move(&res->lru.link, &pos->last->lru.link); 219 pos->last = res; 220 } 221 } 222 223 /* Add the resource to a bulk_move cursor */ 224 static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, 225 struct ttm_resource *res) 226 { 227 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 228 229 if (!pos->first) { 230 pos->first = res; 231 pos->last = res; 232 } else { 233 WARN_ON(pos->first->bo->base.resv != res->bo->base.resv); 234 ttm_lru_bulk_move_pos_tail(pos, res); 235 } 236 } 237 238 /* Remove the resource from a bulk_move range */ 239 static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, 240 struct ttm_resource *res) 241 { 242 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 243 244 if (unlikely(WARN_ON(!pos->first || !pos->last) || 245 (pos->first == res && pos->last == res))) { 246 pos->first = NULL; 247 pos->last = NULL; 248 } else if (pos->first == res) { 249 pos->first = ttm_lru_next_res(res); 250 } else if (pos->last == res) { 251 pos->last = ttm_lru_prev_res(res); 252 } else { 253 list_move(&res->lru.link, &pos->last->lru.link); 254 } 255 } 256 257 static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo) 258 { 259 /* 260 * Take care when creating a new resource for a bo, that it is not considered 261 * swapped if it's not the current resource for the bo and is thus logically 262 * associated with the ttm_tt. Think a VRAM resource created to move a 263 * swapped-out bo to VRAM. 264 */ 265 if (bo->resource != res || !bo->ttm) 266 return false; 267 268 dma_resv_assert_held(bo->base.resv); 269 return ttm_tt_is_swapped(bo->ttm); 270 } 271 272 static bool ttm_resource_unevictable(struct ttm_resource *res, struct ttm_buffer_object *bo) 273 { 274 return bo->pin_count || ttm_resource_is_swapped(res, bo); 275 } 276 277 /* Add the resource to a bulk move if the BO is configured for it */ 278 void ttm_resource_add_bulk_move(struct ttm_resource *res, 279 struct ttm_buffer_object *bo) 280 { 281 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) 282 ttm_lru_bulk_move_add(bo->bulk_move, res); 283 } 284 285 /* Remove the resource from a bulk move if the BO is configured for it */ 286 void ttm_resource_del_bulk_move(struct ttm_resource *res, 287 struct ttm_buffer_object *bo) 288 { 289 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) 290 ttm_lru_bulk_move_del(bo->bulk_move, res); 291 } 292 293 /* Move a resource to the LRU or bulk tail */ 294 void ttm_resource_move_to_lru_tail(struct ttm_resource *res) 295 { 296 struct ttm_buffer_object *bo = res->bo; 297 struct ttm_device *bdev = bo->bdev; 298 299 lockdep_assert_held(&bo->bdev->lru_lock); 300 301 if (ttm_resource_unevictable(res, bo)) { 302 list_move_tail(&res->lru.link, &bdev->unevictable); 303 304 } else if (bo->bulk_move) { 305 struct ttm_lru_bulk_move_pos *pos = 306 ttm_lru_bulk_move_pos(bo->bulk_move, res); 307 308 ttm_lru_bulk_move_pos_tail(pos, res); 309 } else { 310 struct ttm_resource_manager *man; 311 312 man = ttm_manager_type(bdev, res->mem_type); 313 list_move_tail(&res->lru.link, &man->lru[bo->priority]); 314 } 315 } 316 317 /** 318 * ttm_resource_init - resource object constructure 319 * @bo: buffer object this resources is allocated for 320 * @place: placement of the resource 321 * @res: the resource object to inistilize 322 * 323 * Initialize a new resource object. Counterpart of ttm_resource_fini(). 324 */ 325 void ttm_resource_init(struct ttm_buffer_object *bo, 326 const struct ttm_place *place, 327 struct ttm_resource *res) 328 { 329 struct ttm_resource_manager *man; 330 331 res->start = 0; 332 res->size = bo->base.size; 333 res->mem_type = place->mem_type; 334 res->placement = place->flags; 335 res->bus.addr = NULL; 336 res->bus.offset = 0; 337 res->bus.is_iomem = false; 338 res->bus.caching = ttm_cached; 339 res->bo = bo; 340 341 man = ttm_manager_type(bo->bdev, place->mem_type); 342 spin_lock(&bo->bdev->lru_lock); 343 if (ttm_resource_unevictable(res, bo)) 344 list_add_tail(&res->lru.link, &bo->bdev->unevictable); 345 else 346 list_add_tail(&res->lru.link, &man->lru[bo->priority]); 347 man->usage += res->size; 348 spin_unlock(&bo->bdev->lru_lock); 349 } 350 EXPORT_SYMBOL(ttm_resource_init); 351 352 /** 353 * ttm_resource_fini - resource destructor 354 * @man: the resource manager this resource belongs to 355 * @res: the resource to clean up 356 * 357 * Should be used by resource manager backends to clean up the TTM resource 358 * objects before freeing the underlying structure. Makes sure the resource is 359 * removed from the LRU before destruction. 360 * Counterpart of ttm_resource_init(). 361 */ 362 void ttm_resource_fini(struct ttm_resource_manager *man, 363 struct ttm_resource *res) 364 { 365 struct ttm_device *bdev = man->bdev; 366 367 spin_lock(&bdev->lru_lock); 368 list_del_init(&res->lru.link); 369 man->usage -= res->size; 370 spin_unlock(&bdev->lru_lock); 371 } 372 EXPORT_SYMBOL(ttm_resource_fini); 373 374 int ttm_resource_alloc(struct ttm_buffer_object *bo, 375 const struct ttm_place *place, 376 struct ttm_resource **res_ptr, 377 struct dmem_cgroup_pool_state **ret_limit_pool) 378 { 379 struct ttm_resource_manager *man = 380 ttm_manager_type(bo->bdev, place->mem_type); 381 struct dmem_cgroup_pool_state *pool = NULL; 382 int ret; 383 384 if (man->cg) { 385 ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool); 386 if (ret) 387 return ret; 388 } 389 390 ret = man->func->alloc(man, bo, place, res_ptr); 391 if (ret) { 392 if (pool) 393 dmem_cgroup_uncharge(pool, bo->base.size); 394 return ret; 395 } 396 397 (*res_ptr)->css = pool; 398 399 spin_lock(&bo->bdev->lru_lock); 400 ttm_resource_add_bulk_move(*res_ptr, bo); 401 spin_unlock(&bo->bdev->lru_lock); 402 return 0; 403 } 404 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc); 405 406 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) 407 { 408 struct ttm_resource_manager *man; 409 struct dmem_cgroup_pool_state *pool; 410 411 if (!*res) 412 return; 413 414 spin_lock(&bo->bdev->lru_lock); 415 ttm_resource_del_bulk_move(*res, bo); 416 spin_unlock(&bo->bdev->lru_lock); 417 418 pool = (*res)->css; 419 man = ttm_manager_type(bo->bdev, (*res)->mem_type); 420 man->func->free(man, *res); 421 *res = NULL; 422 if (man->cg) 423 dmem_cgroup_uncharge(pool, bo->base.size); 424 } 425 EXPORT_SYMBOL(ttm_resource_free); 426 427 /** 428 * ttm_resource_intersects - test for intersection 429 * 430 * @bdev: TTM device structure 431 * @res: The resource to test 432 * @place: The placement to test 433 * @size: How many bytes the new allocation needs. 434 * 435 * Test if @res intersects with @place and @size. Used for testing if evictions 436 * are valueable or not. 437 * 438 * Returns true if the res placement intersects with @place and @size. 439 */ 440 bool ttm_resource_intersects(struct ttm_device *bdev, 441 struct ttm_resource *res, 442 const struct ttm_place *place, 443 size_t size) 444 { 445 struct ttm_resource_manager *man; 446 447 if (!res) 448 return false; 449 450 man = ttm_manager_type(bdev, res->mem_type); 451 if (!place || !man->func->intersects) 452 return true; 453 454 return man->func->intersects(man, res, place, size); 455 } 456 457 /** 458 * ttm_resource_compatible - check if resource is compatible with placement 459 * 460 * @res: the resource to check 461 * @placement: the placement to check against 462 * @evicting: true if the caller is doing evictions 463 * 464 * Returns true if the placement is compatible. 465 */ 466 bool ttm_resource_compatible(struct ttm_resource *res, 467 struct ttm_placement *placement, 468 bool evicting) 469 { 470 struct ttm_buffer_object *bo = res->bo; 471 struct ttm_device *bdev = bo->bdev; 472 unsigned i; 473 474 if (res->placement & TTM_PL_FLAG_TEMPORARY) 475 return false; 476 477 for (i = 0; i < placement->num_placement; i++) { 478 const struct ttm_place *place = &placement->placement[i]; 479 struct ttm_resource_manager *man; 480 481 if (res->mem_type != place->mem_type) 482 continue; 483 484 if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED : 485 TTM_PL_FLAG_FALLBACK)) 486 continue; 487 488 if (place->flags & TTM_PL_FLAG_CONTIGUOUS && 489 !(res->placement & TTM_PL_FLAG_CONTIGUOUS)) 490 continue; 491 492 man = ttm_manager_type(bdev, res->mem_type); 493 if (man->func->compatible && 494 !man->func->compatible(man, res, place, bo->base.size)) 495 continue; 496 497 return true; 498 } 499 return false; 500 } 501 502 void ttm_resource_set_bo(struct ttm_resource *res, 503 struct ttm_buffer_object *bo) 504 { 505 spin_lock(&bo->bdev->lru_lock); 506 res->bo = bo; 507 spin_unlock(&bo->bdev->lru_lock); 508 } 509 510 /** 511 * ttm_resource_manager_init 512 * 513 * @man: memory manager object to init 514 * @bdev: ttm device this manager belongs to 515 * @size: size of managed resources in arbitrary units 516 * 517 * Initialise core parts of a manager object. 518 */ 519 void ttm_resource_manager_init(struct ttm_resource_manager *man, 520 struct ttm_device *bdev, 521 uint64_t size) 522 { 523 unsigned i; 524 525 spin_lock_init(&man->move_lock); 526 man->bdev = bdev; 527 man->size = size; 528 man->usage = 0; 529 530 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 531 INIT_LIST_HEAD(&man->lru[i]); 532 man->move = NULL; 533 } 534 EXPORT_SYMBOL(ttm_resource_manager_init); 535 536 /* 537 * ttm_resource_manager_evict_all 538 * 539 * @bdev - device to use 540 * @man - manager to use 541 * 542 * Evict all the objects out of a memory manager until it is empty. 543 * Part of memory manager cleanup sequence. 544 */ 545 int ttm_resource_manager_evict_all(struct ttm_device *bdev, 546 struct ttm_resource_manager *man) 547 { 548 struct ttm_operation_ctx ctx = { 549 .interruptible = false, 550 .no_wait_gpu = false, 551 }; 552 struct dma_fence *fence; 553 int ret; 554 555 do { 556 ret = ttm_bo_evict_first(bdev, man, &ctx); 557 cond_resched(); 558 } while (!ret); 559 560 spin_lock(&man->move_lock); 561 fence = dma_fence_get(man->move); 562 spin_unlock(&man->move_lock); 563 564 if (fence) { 565 ret = dma_fence_wait(fence, false); 566 dma_fence_put(fence); 567 if (ret) 568 return ret; 569 } 570 571 return 0; 572 } 573 EXPORT_SYMBOL(ttm_resource_manager_evict_all); 574 575 /** 576 * ttm_resource_manager_usage 577 * 578 * @man: A memory manager object. 579 * 580 * Return how many resources are currently used. 581 */ 582 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) 583 { 584 uint64_t usage; 585 586 spin_lock(&man->bdev->lru_lock); 587 usage = man->usage; 588 spin_unlock(&man->bdev->lru_lock); 589 return usage; 590 } 591 EXPORT_SYMBOL(ttm_resource_manager_usage); 592 593 /** 594 * ttm_resource_manager_debug 595 * 596 * @man: manager type to dump. 597 * @p: printer to use for debug. 598 */ 599 void ttm_resource_manager_debug(struct ttm_resource_manager *man, 600 struct drm_printer *p) 601 { 602 drm_printf(p, " use_type: %d\n", man->use_type); 603 drm_printf(p, " use_tt: %d\n", man->use_tt); 604 drm_printf(p, " size: %llu\n", man->size); 605 drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man)); 606 if (man->func->debug) 607 man->func->debug(man, p); 608 } 609 EXPORT_SYMBOL(ttm_resource_manager_debug); 610 611 static void 612 ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor, 613 struct ttm_lru_item *next_lru) 614 { 615 struct ttm_resource *next = ttm_lru_item_to_res(next_lru); 616 struct ttm_lru_bulk_move *bulk = NULL; 617 struct ttm_buffer_object *bo = next->bo; 618 619 lockdep_assert_held(&cursor->man->bdev->lru_lock); 620 bulk = bo->bulk_move; 621 622 if (cursor->bulk != bulk) { 623 if (bulk) { 624 list_move_tail(&cursor->bulk_link, &bulk->cursor_list); 625 cursor->mem_type = next->mem_type; 626 } else { 627 list_del_init(&cursor->bulk_link); 628 } 629 cursor->bulk = bulk; 630 } 631 } 632 633 /** 634 * ttm_resource_manager_first() - Start iterating over the resources 635 * of a resource manager 636 * @cursor: cursor to record the position 637 * 638 * Initializes the cursor and starts iterating. When done iterating, 639 * the caller must explicitly call ttm_resource_cursor_fini(). 640 * 641 * Return: The first resource from the resource manager. 642 */ 643 struct ttm_resource * 644 ttm_resource_manager_first(struct ttm_resource_cursor *cursor) 645 { 646 struct ttm_resource_manager *man = cursor->man; 647 648 if (WARN_ON_ONCE(!man)) 649 return NULL; 650 651 lockdep_assert_held(&man->bdev->lru_lock); 652 653 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 654 return ttm_resource_manager_next(cursor); 655 } 656 657 /** 658 * ttm_resource_manager_next() - Continue iterating over the resource manager 659 * resources 660 * @cursor: cursor to record the position 661 * 662 * Return: the next resource from the resource manager. 663 */ 664 struct ttm_resource * 665 ttm_resource_manager_next(struct ttm_resource_cursor *cursor) 666 { 667 struct ttm_resource_manager *man = cursor->man; 668 struct ttm_lru_item *lru; 669 670 lockdep_assert_held(&man->bdev->lru_lock); 671 672 for (;;) { 673 lru = &cursor->hitch; 674 list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) { 675 if (ttm_lru_item_is_res(lru)) { 676 ttm_resource_cursor_check_bulk(cursor, lru); 677 list_move(&cursor->hitch.link, &lru->link); 678 return ttm_lru_item_to_res(lru); 679 } 680 } 681 682 if (++cursor->priority >= TTM_MAX_BO_PRIORITY) 683 break; 684 685 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 686 ttm_resource_cursor_clear_bulk(cursor); 687 } 688 689 return NULL; 690 } 691 692 /** 693 * ttm_lru_first_res_or_null() - Return the first resource on an lru list 694 * @head: The list head of the lru list. 695 * 696 * Return: Pointer to the first resource on the lru list or NULL if 697 * there is none. 698 */ 699 struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head) 700 { 701 struct ttm_lru_item *lru; 702 703 list_for_each_entry(lru, head, link) { 704 if (ttm_lru_item_is_res(lru)) 705 return ttm_lru_item_to_res(lru); 706 } 707 708 return NULL; 709 } 710 711 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, 712 struct iosys_map *dmap, 713 pgoff_t i) 714 { 715 struct ttm_kmap_iter_iomap *iter_io = 716 container_of(iter, typeof(*iter_io), base); 717 void __iomem *addr; 718 719 retry: 720 while (i >= iter_io->cache.end) { 721 iter_io->cache.sg = iter_io->cache.sg ? 722 sg_next(iter_io->cache.sg) : iter_io->st->sgl; 723 iter_io->cache.i = iter_io->cache.end; 724 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >> 725 PAGE_SHIFT; 726 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) - 727 iter_io->start; 728 } 729 730 if (i < iter_io->cache.i) { 731 iter_io->cache.end = 0; 732 iter_io->cache.sg = NULL; 733 goto retry; 734 } 735 736 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + 737 (((resource_size_t)i - iter_io->cache.i) 738 << PAGE_SHIFT)); 739 iosys_map_set_vaddr_iomem(dmap, addr); 740 } 741 742 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, 743 struct iosys_map *map) 744 { 745 io_mapping_unmap_local(map->vaddr_iomem); 746 } 747 748 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = { 749 .map_local = ttm_kmap_iter_iomap_map_local, 750 .unmap_local = ttm_kmap_iter_iomap_unmap_local, 751 .maps_tt = false, 752 }; 753 754 /** 755 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap 756 * @iter_io: The struct ttm_kmap_iter_iomap to initialize. 757 * @iomap: The struct io_mapping representing the underlying linear io_memory. 758 * @st: sg_table into @iomap, representing the memory of the struct 759 * ttm_resource. 760 * @start: Offset that needs to be subtracted from @st to make 761 * sg_dma_address(st->sgl) - @start == 0 for @iomap start. 762 * 763 * Return: Pointer to the embedded struct ttm_kmap_iter. 764 */ 765 struct ttm_kmap_iter * 766 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, 767 struct io_mapping *iomap, 768 struct sg_table *st, 769 resource_size_t start) 770 { 771 iter_io->base.ops = &ttm_kmap_iter_io_ops; 772 iter_io->iomap = iomap; 773 iter_io->st = st; 774 iter_io->start = start; 775 memset(&iter_io->cache, 0, sizeof(iter_io->cache)); 776 777 return &iter_io->base; 778 } 779 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); 780 781 /** 782 * DOC: Linear io iterator 783 * 784 * This code should die in the not too near future. Best would be if we could 785 * make io-mapping use memremap for all io memory, and have memremap 786 * implement a kmap_local functionality. We could then strip a huge amount of 787 * code. These linear io iterators are implemented to mimic old functionality, 788 * and they don't use kmap_local semantics at all internally. Rather ioremap or 789 * friends, and at least on 32-bit they add global TLB flushes and points 790 * of failure. 791 */ 792 793 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, 794 struct iosys_map *dmap, 795 pgoff_t i) 796 { 797 struct ttm_kmap_iter_linear_io *iter_io = 798 container_of(iter, typeof(*iter_io), base); 799 800 *dmap = iter_io->dmap; 801 iosys_map_incr(dmap, i * PAGE_SIZE); 802 } 803 804 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { 805 .map_local = ttm_kmap_iter_linear_io_map_local, 806 .maps_tt = false, 807 }; 808 809 /** 810 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory 811 * @iter_io: The iterator to initialize 812 * @bdev: The TTM device 813 * @mem: The ttm resource representing the iomap. 814 * 815 * This function is for internal TTM use only. It sets up a memcpy kmap iterator 816 * pointing at a linear chunk of io memory. 817 * 818 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on 819 * failure. 820 */ 821 struct ttm_kmap_iter * 822 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, 823 struct ttm_device *bdev, 824 struct ttm_resource *mem) 825 { 826 int ret; 827 828 ret = ttm_mem_io_reserve(bdev, mem); 829 if (ret) 830 goto out_err; 831 if (!mem->bus.is_iomem) { 832 ret = -EINVAL; 833 goto out_io_free; 834 } 835 836 if (mem->bus.addr) { 837 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); 838 iter_io->needs_unmap = false; 839 } else { 840 iter_io->needs_unmap = true; 841 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); 842 if (mem->bus.caching == ttm_write_combined) 843 iosys_map_set_vaddr_iomem(&iter_io->dmap, 844 ioremap_wc(mem->bus.offset, 845 mem->size)); 846 else if (mem->bus.caching == ttm_cached) 847 iosys_map_set_vaddr(&iter_io->dmap, 848 memremap(mem->bus.offset, mem->size, 849 MEMREMAP_WB | 850 MEMREMAP_WT | 851 MEMREMAP_WC)); 852 853 /* If uncached requested or if mapping cached or wc failed */ 854 if (iosys_map_is_null(&iter_io->dmap)) 855 iosys_map_set_vaddr_iomem(&iter_io->dmap, 856 ioremap(mem->bus.offset, 857 mem->size)); 858 859 if (iosys_map_is_null(&iter_io->dmap)) { 860 ret = -ENOMEM; 861 goto out_io_free; 862 } 863 } 864 865 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops; 866 return &iter_io->base; 867 868 out_io_free: 869 ttm_mem_io_free(bdev, mem); 870 out_err: 871 return ERR_PTR(ret); 872 } 873 874 /** 875 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory 876 * @iter_io: The iterator to initialize 877 * @bdev: The TTM device 878 * @mem: The ttm resource representing the iomap. 879 * 880 * This function is for internal TTM use only. It cleans up a memcpy kmap 881 * iterator initialized by ttm_kmap_iter_linear_io_init. 882 */ 883 void 884 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, 885 struct ttm_device *bdev, 886 struct ttm_resource *mem) 887 { 888 if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { 889 if (iter_io->dmap.is_iomem) 890 iounmap(iter_io->dmap.vaddr_iomem); 891 else 892 memunmap(iter_io->dmap.vaddr); 893 } 894 895 ttm_mem_io_free(bdev, mem); 896 } 897 898 #if defined(CONFIG_DEBUG_FS) 899 900 static int ttm_resource_manager_show(struct seq_file *m, void *unused) 901 { 902 struct ttm_resource_manager *man = 903 (struct ttm_resource_manager *)m->private; 904 struct drm_printer p = drm_seq_file_printer(m); 905 ttm_resource_manager_debug(man, &p); 906 return 0; 907 } 908 DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); 909 910 #endif 911 912 /** 913 * ttm_resource_manager_create_debugfs - Create debugfs entry for specified 914 * resource manager. 915 * @man: The TTM resource manager for which the debugfs stats file be creates 916 * @parent: debugfs directory in which the file will reside 917 * @name: The filename to create. 918 * 919 * This function setups up a debugfs file that can be used to look 920 * at debug statistics of the specified ttm_resource_manager. 921 */ 922 void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, 923 struct dentry * parent, 924 const char *name) 925 { 926 #if defined(CONFIG_DEBUG_FS) 927 debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); 928 #endif 929 } 930 EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); 931