1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/debugfs.h> 26 #include <linux/export.h> 27 #include <linux/io-mapping.h> 28 #include <linux/iosys-map.h> 29 #include <linux/scatterlist.h> 30 #include <linux/cgroup_dmem.h> 31 32 #include <drm/ttm/ttm_bo.h> 33 #include <drm/ttm/ttm_placement.h> 34 #include <drm/ttm/ttm_resource.h> 35 #include <drm/ttm/ttm_tt.h> 36 37 #include <drm/drm_util.h> 38 39 /* Detach the cursor from the bulk move list*/ 40 static void 41 ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor) 42 { 43 lockdep_assert_held(&cursor->man->bdev->lru_lock); 44 45 cursor->bulk = NULL; 46 list_del_init(&cursor->bulk_link); 47 } 48 49 /* Move the cursor to the end of the bulk move list it's in */ 50 static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk, 51 struct ttm_resource_cursor *cursor) 52 { 53 struct ttm_lru_bulk_move_pos *pos; 54 55 lockdep_assert_held(&cursor->man->bdev->lru_lock); 56 57 if (WARN_ON_ONCE(bulk != cursor->bulk)) { 58 list_del_init(&cursor->bulk_link); 59 return; 60 } 61 62 pos = &bulk->pos[cursor->mem_type][cursor->priority]; 63 if (pos->last) 64 list_move(&cursor->hitch.link, &pos->last->lru.link); 65 ttm_resource_cursor_clear_bulk(cursor); 66 } 67 68 /* Move all cursors attached to a bulk move to its end */ 69 static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk) 70 { 71 struct ttm_resource_cursor *cursor, *next; 72 73 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 74 ttm_resource_cursor_move_bulk_tail(bulk, cursor); 75 } 76 77 /* Remove a cursor from an empty bulk move list */ 78 static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk) 79 { 80 struct ttm_resource_cursor *cursor, *next; 81 82 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 83 ttm_resource_cursor_clear_bulk(cursor); 84 } 85 86 /** 87 * ttm_resource_cursor_init() - Initialize a struct ttm_resource_cursor 88 * @cursor: The cursor to initialize. 89 * @man: The resource manager. 90 * 91 * Initialize the cursor before using it for iteration. 92 */ 93 void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor, 94 struct ttm_resource_manager *man) 95 { 96 cursor->priority = 0; 97 cursor->man = man; 98 ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH); 99 INIT_LIST_HEAD(&cursor->bulk_link); 100 INIT_LIST_HEAD(&cursor->hitch.link); 101 } 102 103 /** 104 * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage 105 * @cursor: The struct ttm_resource_cursor to finalize. 106 * 107 * The function pulls the LRU list cursor off any lists it was previusly 108 * attached to. Needs to be called with the LRU lock held. The function 109 * can be called multiple times after eachother. 110 */ 111 void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor) 112 { 113 lockdep_assert_held(&cursor->man->bdev->lru_lock); 114 list_del_init(&cursor->hitch.link); 115 ttm_resource_cursor_clear_bulk(cursor); 116 } 117 118 /** 119 * ttm_lru_bulk_move_init - initialize a bulk move structure 120 * @bulk: the structure to init 121 * 122 * For now just memset the structure to zero. 123 */ 124 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) 125 { 126 memset(bulk, 0, sizeof(*bulk)); 127 INIT_LIST_HEAD(&bulk->cursor_list); 128 } 129 EXPORT_SYMBOL(ttm_lru_bulk_move_init); 130 131 /** 132 * ttm_lru_bulk_move_fini - finalize a bulk move structure 133 * @bdev: The struct ttm_device 134 * @bulk: the structure to finalize 135 * 136 * Sanity checks that bulk moves don't have any 137 * resources left and hence no cursors attached. 138 */ 139 void ttm_lru_bulk_move_fini(struct ttm_device *bdev, 140 struct ttm_lru_bulk_move *bulk) 141 { 142 spin_lock(&bdev->lru_lock); 143 ttm_bulk_move_drop_cursors(bulk); 144 spin_unlock(&bdev->lru_lock); 145 } 146 EXPORT_SYMBOL(ttm_lru_bulk_move_fini); 147 148 /** 149 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail. 150 * 151 * @bulk: bulk move structure 152 * 153 * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that 154 * resource order never changes. Should be called with &ttm_device.lru_lock held. 155 */ 156 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) 157 { 158 unsigned i, j; 159 160 ttm_bulk_move_adjust_cursors(bulk); 161 for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { 162 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { 163 struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; 164 struct ttm_resource_manager *man; 165 166 if (!pos->first) 167 continue; 168 169 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); 170 dma_resv_assert_held(pos->first->bo->base.resv); 171 dma_resv_assert_held(pos->last->bo->base.resv); 172 173 man = ttm_manager_type(pos->first->bo->bdev, i); 174 list_bulk_move_tail(&man->lru[j], &pos->first->lru.link, 175 &pos->last->lru.link); 176 } 177 } 178 } 179 EXPORT_SYMBOL(ttm_lru_bulk_move_tail); 180 181 /* Return the bulk move pos object for this resource */ 182 static struct ttm_lru_bulk_move_pos * 183 ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) 184 { 185 return &bulk->pos[res->mem_type][res->bo->priority]; 186 } 187 188 /* Return the previous resource on the list (skip over non-resource list items) */ 189 static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur) 190 { 191 struct ttm_lru_item *lru = &cur->lru; 192 193 do { 194 lru = list_prev_entry(lru, link); 195 } while (!ttm_lru_item_is_res(lru)); 196 197 return ttm_lru_item_to_res(lru); 198 } 199 200 /* Return the next resource on the list (skip over non-resource list items) */ 201 static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur) 202 { 203 struct ttm_lru_item *lru = &cur->lru; 204 205 do { 206 lru = list_next_entry(lru, link); 207 } while (!ttm_lru_item_is_res(lru)); 208 209 return ttm_lru_item_to_res(lru); 210 } 211 212 /* Move the resource to the tail of the bulk move range */ 213 static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, 214 struct ttm_resource *res) 215 { 216 if (pos->last != res) { 217 if (pos->first == res) 218 pos->first = ttm_lru_next_res(res); 219 list_move(&res->lru.link, &pos->last->lru.link); 220 pos->last = res; 221 } 222 } 223 224 /* Add the resource to a bulk_move cursor */ 225 static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, 226 struct ttm_resource *res) 227 { 228 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 229 230 if (!pos->first) { 231 pos->first = res; 232 pos->last = res; 233 } else { 234 WARN_ON(pos->first->bo->base.resv != res->bo->base.resv); 235 ttm_lru_bulk_move_pos_tail(pos, res); 236 } 237 } 238 239 /* Remove the resource from a bulk_move range */ 240 static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, 241 struct ttm_resource *res) 242 { 243 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 244 245 if (unlikely(WARN_ON(!pos->first || !pos->last) || 246 (pos->first == res && pos->last == res))) { 247 pos->first = NULL; 248 pos->last = NULL; 249 } else if (pos->first == res) { 250 pos->first = ttm_lru_next_res(res); 251 } else if (pos->last == res) { 252 pos->last = ttm_lru_prev_res(res); 253 } else { 254 list_move(&res->lru.link, &pos->last->lru.link); 255 } 256 } 257 258 static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo) 259 { 260 /* 261 * Take care when creating a new resource for a bo, that it is not considered 262 * swapped if it's not the current resource for the bo and is thus logically 263 * associated with the ttm_tt. Think a VRAM resource created to move a 264 * swapped-out bo to VRAM. 265 */ 266 if (bo->resource != res || !bo->ttm) 267 return false; 268 269 dma_resv_assert_held(bo->base.resv); 270 return ttm_tt_is_swapped(bo->ttm); 271 } 272 273 static bool ttm_resource_unevictable(struct ttm_resource *res, struct ttm_buffer_object *bo) 274 { 275 return bo->pin_count || ttm_resource_is_swapped(res, bo); 276 } 277 278 /* Add the resource to a bulk move if the BO is configured for it */ 279 void ttm_resource_add_bulk_move(struct ttm_resource *res, 280 struct ttm_buffer_object *bo) 281 { 282 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) 283 ttm_lru_bulk_move_add(bo->bulk_move, res); 284 } 285 286 /* Remove the resource from a bulk move if the BO is configured for it */ 287 void ttm_resource_del_bulk_move(struct ttm_resource *res, 288 struct ttm_buffer_object *bo) 289 { 290 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) 291 ttm_lru_bulk_move_del(bo->bulk_move, res); 292 } 293 294 /* Move a resource to the LRU or bulk tail */ 295 void ttm_resource_move_to_lru_tail(struct ttm_resource *res) 296 { 297 struct ttm_buffer_object *bo = res->bo; 298 struct ttm_device *bdev = bo->bdev; 299 300 lockdep_assert_held(&bo->bdev->lru_lock); 301 302 if (ttm_resource_unevictable(res, bo)) { 303 list_move_tail(&res->lru.link, &bdev->unevictable); 304 305 } else if (bo->bulk_move) { 306 struct ttm_lru_bulk_move_pos *pos = 307 ttm_lru_bulk_move_pos(bo->bulk_move, res); 308 309 ttm_lru_bulk_move_pos_tail(pos, res); 310 } else { 311 struct ttm_resource_manager *man; 312 313 man = ttm_manager_type(bdev, res->mem_type); 314 list_move_tail(&res->lru.link, &man->lru[bo->priority]); 315 } 316 } 317 318 /** 319 * ttm_resource_init - resource object constructure 320 * @bo: buffer object this resources is allocated for 321 * @place: placement of the resource 322 * @res: the resource object to inistilize 323 * 324 * Initialize a new resource object. Counterpart of ttm_resource_fini(). 325 */ 326 void ttm_resource_init(struct ttm_buffer_object *bo, 327 const struct ttm_place *place, 328 struct ttm_resource *res) 329 { 330 struct ttm_resource_manager *man; 331 332 res->start = 0; 333 res->size = bo->base.size; 334 res->mem_type = place->mem_type; 335 res->placement = place->flags; 336 res->bus.addr = NULL; 337 res->bus.offset = 0; 338 res->bus.is_iomem = false; 339 res->bus.caching = ttm_cached; 340 res->bo = bo; 341 342 man = ttm_manager_type(bo->bdev, place->mem_type); 343 spin_lock(&bo->bdev->lru_lock); 344 if (ttm_resource_unevictable(res, bo)) 345 list_add_tail(&res->lru.link, &bo->bdev->unevictable); 346 else 347 list_add_tail(&res->lru.link, &man->lru[bo->priority]); 348 man->usage += res->size; 349 spin_unlock(&bo->bdev->lru_lock); 350 } 351 EXPORT_SYMBOL(ttm_resource_init); 352 353 /** 354 * ttm_resource_fini - resource destructor 355 * @man: the resource manager this resource belongs to 356 * @res: the resource to clean up 357 * 358 * Should be used by resource manager backends to clean up the TTM resource 359 * objects before freeing the underlying structure. Makes sure the resource is 360 * removed from the LRU before destruction. 361 * Counterpart of ttm_resource_init(). 362 */ 363 void ttm_resource_fini(struct ttm_resource_manager *man, 364 struct ttm_resource *res) 365 { 366 struct ttm_device *bdev = man->bdev; 367 368 spin_lock(&bdev->lru_lock); 369 list_del_init(&res->lru.link); 370 man->usage -= res->size; 371 spin_unlock(&bdev->lru_lock); 372 } 373 EXPORT_SYMBOL(ttm_resource_fini); 374 375 int ttm_resource_alloc(struct ttm_buffer_object *bo, 376 const struct ttm_place *place, 377 struct ttm_resource **res_ptr, 378 struct dmem_cgroup_pool_state **ret_limit_pool) 379 { 380 struct ttm_resource_manager *man = 381 ttm_manager_type(bo->bdev, place->mem_type); 382 struct dmem_cgroup_pool_state *pool = NULL; 383 int ret; 384 385 if (man->cg) { 386 ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool); 387 if (ret) 388 return ret; 389 } 390 391 ret = man->func->alloc(man, bo, place, res_ptr); 392 if (ret) { 393 if (pool) 394 dmem_cgroup_uncharge(pool, bo->base.size); 395 return ret; 396 } 397 398 (*res_ptr)->css = pool; 399 400 spin_lock(&bo->bdev->lru_lock); 401 ttm_resource_add_bulk_move(*res_ptr, bo); 402 spin_unlock(&bo->bdev->lru_lock); 403 return 0; 404 } 405 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc); 406 407 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) 408 { 409 struct ttm_resource_manager *man; 410 struct dmem_cgroup_pool_state *pool; 411 412 if (!*res) 413 return; 414 415 spin_lock(&bo->bdev->lru_lock); 416 ttm_resource_del_bulk_move(*res, bo); 417 spin_unlock(&bo->bdev->lru_lock); 418 419 pool = (*res)->css; 420 man = ttm_manager_type(bo->bdev, (*res)->mem_type); 421 man->func->free(man, *res); 422 *res = NULL; 423 if (man->cg) 424 dmem_cgroup_uncharge(pool, bo->base.size); 425 } 426 EXPORT_SYMBOL(ttm_resource_free); 427 428 /** 429 * ttm_resource_intersects - test for intersection 430 * 431 * @bdev: TTM device structure 432 * @res: The resource to test 433 * @place: The placement to test 434 * @size: How many bytes the new allocation needs. 435 * 436 * Test if @res intersects with @place and @size. Used for testing if evictions 437 * are valueable or not. 438 * 439 * Returns true if the res placement intersects with @place and @size. 440 */ 441 bool ttm_resource_intersects(struct ttm_device *bdev, 442 struct ttm_resource *res, 443 const struct ttm_place *place, 444 size_t size) 445 { 446 struct ttm_resource_manager *man; 447 448 if (!res) 449 return false; 450 451 man = ttm_manager_type(bdev, res->mem_type); 452 if (!place || !man->func->intersects) 453 return true; 454 455 return man->func->intersects(man, res, place, size); 456 } 457 458 /** 459 * ttm_resource_compatible - check if resource is compatible with placement 460 * 461 * @res: the resource to check 462 * @placement: the placement to check against 463 * @evicting: true if the caller is doing evictions 464 * 465 * Returns true if the placement is compatible. 466 */ 467 bool ttm_resource_compatible(struct ttm_resource *res, 468 struct ttm_placement *placement, 469 bool evicting) 470 { 471 struct ttm_buffer_object *bo = res->bo; 472 struct ttm_device *bdev = bo->bdev; 473 unsigned i; 474 475 if (res->placement & TTM_PL_FLAG_TEMPORARY) 476 return false; 477 478 for (i = 0; i < placement->num_placement; i++) { 479 const struct ttm_place *place = &placement->placement[i]; 480 struct ttm_resource_manager *man; 481 482 if (res->mem_type != place->mem_type) 483 continue; 484 485 if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED : 486 TTM_PL_FLAG_FALLBACK)) 487 continue; 488 489 if (place->flags & TTM_PL_FLAG_CONTIGUOUS && 490 !(res->placement & TTM_PL_FLAG_CONTIGUOUS)) 491 continue; 492 493 man = ttm_manager_type(bdev, res->mem_type); 494 if (man->func->compatible && 495 !man->func->compatible(man, res, place, bo->base.size)) 496 continue; 497 498 return true; 499 } 500 return false; 501 } 502 503 void ttm_resource_set_bo(struct ttm_resource *res, 504 struct ttm_buffer_object *bo) 505 { 506 spin_lock(&bo->bdev->lru_lock); 507 res->bo = bo; 508 spin_unlock(&bo->bdev->lru_lock); 509 } 510 511 /** 512 * ttm_resource_manager_init 513 * 514 * @man: memory manager object to init 515 * @bdev: ttm device this manager belongs to 516 * @size: size of managed resources in arbitrary units 517 * 518 * Initialise core parts of a manager object. 519 */ 520 void ttm_resource_manager_init(struct ttm_resource_manager *man, 521 struct ttm_device *bdev, 522 uint64_t size) 523 { 524 unsigned i; 525 526 spin_lock_init(&man->move_lock); 527 man->bdev = bdev; 528 man->size = size; 529 man->usage = 0; 530 531 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 532 INIT_LIST_HEAD(&man->lru[i]); 533 man->move = NULL; 534 } 535 EXPORT_SYMBOL(ttm_resource_manager_init); 536 537 /* 538 * ttm_resource_manager_evict_all 539 * 540 * @bdev - device to use 541 * @man - manager to use 542 * 543 * Evict all the objects out of a memory manager until it is empty. 544 * Part of memory manager cleanup sequence. 545 */ 546 int ttm_resource_manager_evict_all(struct ttm_device *bdev, 547 struct ttm_resource_manager *man) 548 { 549 struct ttm_operation_ctx ctx = { 550 .interruptible = false, 551 .no_wait_gpu = false, 552 }; 553 struct dma_fence *fence; 554 int ret; 555 556 do { 557 ret = ttm_bo_evict_first(bdev, man, &ctx); 558 cond_resched(); 559 } while (!ret); 560 561 if (ret && ret != -ENOENT) 562 return ret; 563 564 spin_lock(&man->move_lock); 565 fence = dma_fence_get(man->move); 566 spin_unlock(&man->move_lock); 567 568 if (fence) { 569 ret = dma_fence_wait(fence, false); 570 dma_fence_put(fence); 571 if (ret) 572 return ret; 573 } 574 575 return 0; 576 } 577 EXPORT_SYMBOL(ttm_resource_manager_evict_all); 578 579 /** 580 * ttm_resource_manager_usage 581 * 582 * @man: A memory manager object. 583 * 584 * Return how many resources are currently used. 585 */ 586 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) 587 { 588 uint64_t usage; 589 590 spin_lock(&man->bdev->lru_lock); 591 usage = man->usage; 592 spin_unlock(&man->bdev->lru_lock); 593 return usage; 594 } 595 EXPORT_SYMBOL(ttm_resource_manager_usage); 596 597 /** 598 * ttm_resource_manager_debug 599 * 600 * @man: manager type to dump. 601 * @p: printer to use for debug. 602 */ 603 void ttm_resource_manager_debug(struct ttm_resource_manager *man, 604 struct drm_printer *p) 605 { 606 drm_printf(p, " use_type: %d\n", man->use_type); 607 drm_printf(p, " use_tt: %d\n", man->use_tt); 608 drm_printf(p, " size: %llu\n", man->size); 609 drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man)); 610 if (man->func->debug) 611 man->func->debug(man, p); 612 } 613 EXPORT_SYMBOL(ttm_resource_manager_debug); 614 615 static void 616 ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor, 617 struct ttm_lru_item *next_lru) 618 { 619 struct ttm_resource *next = ttm_lru_item_to_res(next_lru); 620 struct ttm_lru_bulk_move *bulk = NULL; 621 struct ttm_buffer_object *bo = next->bo; 622 623 lockdep_assert_held(&cursor->man->bdev->lru_lock); 624 bulk = bo->bulk_move; 625 626 if (cursor->bulk != bulk) { 627 if (bulk) { 628 list_move_tail(&cursor->bulk_link, &bulk->cursor_list); 629 cursor->mem_type = next->mem_type; 630 } else { 631 list_del_init(&cursor->bulk_link); 632 } 633 cursor->bulk = bulk; 634 } 635 } 636 637 /** 638 * ttm_resource_manager_first() - Start iterating over the resources 639 * of a resource manager 640 * @cursor: cursor to record the position 641 * 642 * Initializes the cursor and starts iterating. When done iterating, 643 * the caller must explicitly call ttm_resource_cursor_fini(). 644 * 645 * Return: The first resource from the resource manager. 646 */ 647 struct ttm_resource * 648 ttm_resource_manager_first(struct ttm_resource_cursor *cursor) 649 { 650 struct ttm_resource_manager *man = cursor->man; 651 652 if (WARN_ON_ONCE(!man)) 653 return NULL; 654 655 lockdep_assert_held(&man->bdev->lru_lock); 656 657 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 658 return ttm_resource_manager_next(cursor); 659 } 660 661 /** 662 * ttm_resource_manager_next() - Continue iterating over the resource manager 663 * resources 664 * @cursor: cursor to record the position 665 * 666 * Return: the next resource from the resource manager. 667 */ 668 struct ttm_resource * 669 ttm_resource_manager_next(struct ttm_resource_cursor *cursor) 670 { 671 struct ttm_resource_manager *man = cursor->man; 672 struct ttm_lru_item *lru; 673 674 lockdep_assert_held(&man->bdev->lru_lock); 675 676 for (;;) { 677 lru = &cursor->hitch; 678 list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) { 679 if (ttm_lru_item_is_res(lru)) { 680 ttm_resource_cursor_check_bulk(cursor, lru); 681 list_move(&cursor->hitch.link, &lru->link); 682 return ttm_lru_item_to_res(lru); 683 } 684 } 685 686 if (++cursor->priority >= TTM_MAX_BO_PRIORITY) 687 break; 688 689 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 690 ttm_resource_cursor_clear_bulk(cursor); 691 } 692 693 return NULL; 694 } 695 696 /** 697 * ttm_lru_first_res_or_null() - Return the first resource on an lru list 698 * @head: The list head of the lru list. 699 * 700 * Return: Pointer to the first resource on the lru list or NULL if 701 * there is none. 702 */ 703 struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head) 704 { 705 struct ttm_lru_item *lru; 706 707 list_for_each_entry(lru, head, link) { 708 if (ttm_lru_item_is_res(lru)) 709 return ttm_lru_item_to_res(lru); 710 } 711 712 return NULL; 713 } 714 715 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, 716 struct iosys_map *dmap, 717 pgoff_t i) 718 { 719 struct ttm_kmap_iter_iomap *iter_io = 720 container_of(iter, typeof(*iter_io), base); 721 void __iomem *addr; 722 723 retry: 724 while (i >= iter_io->cache.end) { 725 iter_io->cache.sg = iter_io->cache.sg ? 726 sg_next(iter_io->cache.sg) : iter_io->st->sgl; 727 iter_io->cache.i = iter_io->cache.end; 728 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >> 729 PAGE_SHIFT; 730 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) - 731 iter_io->start; 732 } 733 734 if (i < iter_io->cache.i) { 735 iter_io->cache.end = 0; 736 iter_io->cache.sg = NULL; 737 goto retry; 738 } 739 740 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + 741 (((resource_size_t)i - iter_io->cache.i) 742 << PAGE_SHIFT)); 743 iosys_map_set_vaddr_iomem(dmap, addr); 744 } 745 746 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, 747 struct iosys_map *map) 748 { 749 io_mapping_unmap_local(map->vaddr_iomem); 750 } 751 752 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = { 753 .map_local = ttm_kmap_iter_iomap_map_local, 754 .unmap_local = ttm_kmap_iter_iomap_unmap_local, 755 .maps_tt = false, 756 }; 757 758 /** 759 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap 760 * @iter_io: The struct ttm_kmap_iter_iomap to initialize. 761 * @iomap: The struct io_mapping representing the underlying linear io_memory. 762 * @st: sg_table into @iomap, representing the memory of the struct 763 * ttm_resource. 764 * @start: Offset that needs to be subtracted from @st to make 765 * sg_dma_address(st->sgl) - @start == 0 for @iomap start. 766 * 767 * Return: Pointer to the embedded struct ttm_kmap_iter. 768 */ 769 struct ttm_kmap_iter * 770 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, 771 struct io_mapping *iomap, 772 struct sg_table *st, 773 resource_size_t start) 774 { 775 iter_io->base.ops = &ttm_kmap_iter_io_ops; 776 iter_io->iomap = iomap; 777 iter_io->st = st; 778 iter_io->start = start; 779 memset(&iter_io->cache, 0, sizeof(iter_io->cache)); 780 781 return &iter_io->base; 782 } 783 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); 784 785 /** 786 * DOC: Linear io iterator 787 * 788 * This code should die in the not too near future. Best would be if we could 789 * make io-mapping use memremap for all io memory, and have memremap 790 * implement a kmap_local functionality. We could then strip a huge amount of 791 * code. These linear io iterators are implemented to mimic old functionality, 792 * and they don't use kmap_local semantics at all internally. Rather ioremap or 793 * friends, and at least on 32-bit they add global TLB flushes and points 794 * of failure. 795 */ 796 797 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, 798 struct iosys_map *dmap, 799 pgoff_t i) 800 { 801 struct ttm_kmap_iter_linear_io *iter_io = 802 container_of(iter, typeof(*iter_io), base); 803 804 *dmap = iter_io->dmap; 805 iosys_map_incr(dmap, i * PAGE_SIZE); 806 } 807 808 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { 809 .map_local = ttm_kmap_iter_linear_io_map_local, 810 .maps_tt = false, 811 }; 812 813 /** 814 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory 815 * @iter_io: The iterator to initialize 816 * @bdev: The TTM device 817 * @mem: The ttm resource representing the iomap. 818 * 819 * This function is for internal TTM use only. It sets up a memcpy kmap iterator 820 * pointing at a linear chunk of io memory. 821 * 822 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on 823 * failure. 824 */ 825 struct ttm_kmap_iter * 826 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, 827 struct ttm_device *bdev, 828 struct ttm_resource *mem) 829 { 830 int ret; 831 832 ret = ttm_mem_io_reserve(bdev, mem); 833 if (ret) 834 goto out_err; 835 if (!mem->bus.is_iomem) { 836 ret = -EINVAL; 837 goto out_io_free; 838 } 839 840 if (mem->bus.addr) { 841 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); 842 iter_io->needs_unmap = false; 843 } else { 844 iter_io->needs_unmap = true; 845 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); 846 if (mem->bus.caching == ttm_write_combined) 847 iosys_map_set_vaddr_iomem(&iter_io->dmap, 848 ioremap_wc(mem->bus.offset, 849 mem->size)); 850 else if (mem->bus.caching == ttm_cached) 851 iosys_map_set_vaddr(&iter_io->dmap, 852 memremap(mem->bus.offset, mem->size, 853 MEMREMAP_WB | 854 MEMREMAP_WT | 855 MEMREMAP_WC)); 856 857 /* If uncached requested or if mapping cached or wc failed */ 858 if (iosys_map_is_null(&iter_io->dmap)) 859 iosys_map_set_vaddr_iomem(&iter_io->dmap, 860 ioremap(mem->bus.offset, 861 mem->size)); 862 863 if (iosys_map_is_null(&iter_io->dmap)) { 864 ret = -ENOMEM; 865 goto out_io_free; 866 } 867 } 868 869 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops; 870 return &iter_io->base; 871 872 out_io_free: 873 ttm_mem_io_free(bdev, mem); 874 out_err: 875 return ERR_PTR(ret); 876 } 877 878 /** 879 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory 880 * @iter_io: The iterator to initialize 881 * @bdev: The TTM device 882 * @mem: The ttm resource representing the iomap. 883 * 884 * This function is for internal TTM use only. It cleans up a memcpy kmap 885 * iterator initialized by ttm_kmap_iter_linear_io_init. 886 */ 887 void 888 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, 889 struct ttm_device *bdev, 890 struct ttm_resource *mem) 891 { 892 if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { 893 if (iter_io->dmap.is_iomem) 894 iounmap(iter_io->dmap.vaddr_iomem); 895 else 896 memunmap(iter_io->dmap.vaddr); 897 } 898 899 ttm_mem_io_free(bdev, mem); 900 } 901 902 #if defined(CONFIG_DEBUG_FS) 903 904 static int ttm_resource_manager_show(struct seq_file *m, void *unused) 905 { 906 struct ttm_resource_manager *man = 907 (struct ttm_resource_manager *)m->private; 908 struct drm_printer p = drm_seq_file_printer(m); 909 ttm_resource_manager_debug(man, &p); 910 return 0; 911 } 912 DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); 913 914 #endif 915 916 /** 917 * ttm_resource_manager_create_debugfs - Create debugfs entry for specified 918 * resource manager. 919 * @man: The TTM resource manager for which the debugfs stats file be creates 920 * @parent: debugfs directory in which the file will reside 921 * @name: The filename to create. 922 * 923 * This function setups up a debugfs file that can be used to look 924 * at debug statistics of the specified ttm_resource_manager. 925 */ 926 void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, 927 struct dentry * parent, 928 const char *name) 929 { 930 #if defined(CONFIG_DEBUG_FS) 931 debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); 932 #endif 933 } 934 EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); 935