1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/debugfs.h> 26 #include <linux/io-mapping.h> 27 #include <linux/iosys-map.h> 28 #include <linux/scatterlist.h> 29 #include <linux/cgroup_dmem.h> 30 31 #include <drm/ttm/ttm_bo.h> 32 #include <drm/ttm/ttm_placement.h> 33 #include <drm/ttm/ttm_resource.h> 34 #include <drm/ttm/ttm_tt.h> 35 36 #include <drm/drm_util.h> 37 38 /* Detach the cursor from the bulk move list*/ 39 static void 40 ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor) 41 { 42 lockdep_assert_held(&cursor->man->bdev->lru_lock); 43 44 cursor->bulk = NULL; 45 list_del_init(&cursor->bulk_link); 46 } 47 48 /* Move the cursor to the end of the bulk move list it's in */ 49 static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk, 50 struct ttm_resource_cursor *cursor) 51 { 52 struct ttm_lru_bulk_move_pos *pos; 53 54 lockdep_assert_held(&cursor->man->bdev->lru_lock); 55 56 if (WARN_ON_ONCE(bulk != cursor->bulk)) { 57 list_del_init(&cursor->bulk_link); 58 return; 59 } 60 61 pos = &bulk->pos[cursor->mem_type][cursor->priority]; 62 if (pos->last) 63 list_move(&cursor->hitch.link, &pos->last->lru.link); 64 ttm_resource_cursor_clear_bulk(cursor); 65 } 66 67 /* Move all cursors attached to a bulk move to its end */ 68 static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk) 69 { 70 struct ttm_resource_cursor *cursor, *next; 71 72 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 73 ttm_resource_cursor_move_bulk_tail(bulk, cursor); 74 } 75 76 /* Remove a cursor from an empty bulk move list */ 77 static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk) 78 { 79 struct ttm_resource_cursor *cursor, *next; 80 81 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 82 ttm_resource_cursor_clear_bulk(cursor); 83 } 84 85 /** 86 * ttm_resource_cursor_init() - Initialize a struct ttm_resource_cursor 87 * @cursor: The cursor to initialize. 88 * @man: The resource manager. 89 * 90 * Initialize the cursor before using it for iteration. 91 */ 92 void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor, 93 struct ttm_resource_manager *man) 94 { 95 cursor->priority = 0; 96 cursor->man = man; 97 ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH); 98 INIT_LIST_HEAD(&cursor->bulk_link); 99 INIT_LIST_HEAD(&cursor->hitch.link); 100 } 101 102 /** 103 * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage 104 * @cursor: The struct ttm_resource_cursor to finalize. 105 * 106 * The function pulls the LRU list cursor off any lists it was previusly 107 * attached to. Needs to be called with the LRU lock held. The function 108 * can be called multiple times after eachother. 109 */ 110 void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor) 111 { 112 lockdep_assert_held(&cursor->man->bdev->lru_lock); 113 list_del_init(&cursor->hitch.link); 114 ttm_resource_cursor_clear_bulk(cursor); 115 } 116 117 /** 118 * ttm_lru_bulk_move_init - initialize a bulk move structure 119 * @bulk: the structure to init 120 * 121 * For now just memset the structure to zero. 122 */ 123 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) 124 { 125 memset(bulk, 0, sizeof(*bulk)); 126 INIT_LIST_HEAD(&bulk->cursor_list); 127 } 128 EXPORT_SYMBOL(ttm_lru_bulk_move_init); 129 130 /** 131 * ttm_lru_bulk_move_fini - finalize a bulk move structure 132 * @bdev: The struct ttm_device 133 * @bulk: the structure to finalize 134 * 135 * Sanity checks that bulk moves don't have any 136 * resources left and hence no cursors attached. 137 */ 138 void ttm_lru_bulk_move_fini(struct ttm_device *bdev, 139 struct ttm_lru_bulk_move *bulk) 140 { 141 spin_lock(&bdev->lru_lock); 142 ttm_bulk_move_drop_cursors(bulk); 143 spin_unlock(&bdev->lru_lock); 144 } 145 EXPORT_SYMBOL(ttm_lru_bulk_move_fini); 146 147 /** 148 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail. 149 * 150 * @bulk: bulk move structure 151 * 152 * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that 153 * resource order never changes. Should be called with &ttm_device.lru_lock held. 154 */ 155 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) 156 { 157 unsigned i, j; 158 159 ttm_bulk_move_adjust_cursors(bulk); 160 for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { 161 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { 162 struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; 163 struct ttm_resource_manager *man; 164 165 if (!pos->first) 166 continue; 167 168 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); 169 dma_resv_assert_held(pos->first->bo->base.resv); 170 dma_resv_assert_held(pos->last->bo->base.resv); 171 172 man = ttm_manager_type(pos->first->bo->bdev, i); 173 list_bulk_move_tail(&man->lru[j], &pos->first->lru.link, 174 &pos->last->lru.link); 175 } 176 } 177 } 178 EXPORT_SYMBOL(ttm_lru_bulk_move_tail); 179 180 /* Return the bulk move pos object for this resource */ 181 static struct ttm_lru_bulk_move_pos * 182 ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) 183 { 184 return &bulk->pos[res->mem_type][res->bo->priority]; 185 } 186 187 /* Return the previous resource on the list (skip over non-resource list items) */ 188 static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur) 189 { 190 struct ttm_lru_item *lru = &cur->lru; 191 192 do { 193 lru = list_prev_entry(lru, link); 194 } while (!ttm_lru_item_is_res(lru)); 195 196 return ttm_lru_item_to_res(lru); 197 } 198 199 /* Return the next resource on the list (skip over non-resource list items) */ 200 static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur) 201 { 202 struct ttm_lru_item *lru = &cur->lru; 203 204 do { 205 lru = list_next_entry(lru, link); 206 } while (!ttm_lru_item_is_res(lru)); 207 208 return ttm_lru_item_to_res(lru); 209 } 210 211 /* Move the resource to the tail of the bulk move range */ 212 static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, 213 struct ttm_resource *res) 214 { 215 if (pos->last != res) { 216 if (pos->first == res) 217 pos->first = ttm_lru_next_res(res); 218 list_move(&res->lru.link, &pos->last->lru.link); 219 pos->last = res; 220 } 221 } 222 223 /* Add the resource to a bulk_move cursor */ 224 static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, 225 struct ttm_resource *res) 226 { 227 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 228 229 if (!pos->first) { 230 pos->first = res; 231 pos->last = res; 232 } else { 233 WARN_ON(pos->first->bo->base.resv != res->bo->base.resv); 234 ttm_lru_bulk_move_pos_tail(pos, res); 235 } 236 } 237 238 /* Remove the resource from a bulk_move range */ 239 static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, 240 struct ttm_resource *res) 241 { 242 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 243 244 if (unlikely(WARN_ON(!pos->first || !pos->last) || 245 (pos->first == res && pos->last == res))) { 246 pos->first = NULL; 247 pos->last = NULL; 248 } else if (pos->first == res) { 249 pos->first = ttm_lru_next_res(res); 250 } else if (pos->last == res) { 251 pos->last = ttm_lru_prev_res(res); 252 } else { 253 list_move(&res->lru.link, &pos->last->lru.link); 254 } 255 } 256 257 static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo) 258 { 259 /* 260 * Take care when creating a new resource for a bo, that it is not considered 261 * swapped if it's not the current resource for the bo and is thus logically 262 * associated with the ttm_tt. Think a VRAM resource created to move a 263 * swapped-out bo to VRAM. 264 */ 265 if (bo->resource != res || !bo->ttm) 266 return false; 267 268 dma_resv_assert_held(bo->base.resv); 269 return ttm_tt_is_swapped(bo->ttm); 270 } 271 272 static bool ttm_resource_unevictable(struct ttm_resource *res, struct ttm_buffer_object *bo) 273 { 274 return bo->pin_count || ttm_resource_is_swapped(res, bo); 275 } 276 277 /* Add the resource to a bulk move if the BO is configured for it */ 278 void ttm_resource_add_bulk_move(struct ttm_resource *res, 279 struct ttm_buffer_object *bo) 280 { 281 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) 282 ttm_lru_bulk_move_add(bo->bulk_move, res); 283 } 284 285 /* Remove the resource from a bulk move if the BO is configured for it */ 286 void ttm_resource_del_bulk_move(struct ttm_resource *res, 287 struct ttm_buffer_object *bo) 288 { 289 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) 290 ttm_lru_bulk_move_del(bo->bulk_move, res); 291 } 292 293 /* Move a resource to the LRU or bulk tail */ 294 void ttm_resource_move_to_lru_tail(struct ttm_resource *res) 295 { 296 struct ttm_buffer_object *bo = res->bo; 297 struct ttm_device *bdev = bo->bdev; 298 299 lockdep_assert_held(&bo->bdev->lru_lock); 300 301 if (ttm_resource_unevictable(res, bo)) { 302 list_move_tail(&res->lru.link, &bdev->unevictable); 303 304 } else if (bo->bulk_move) { 305 struct ttm_lru_bulk_move_pos *pos = 306 ttm_lru_bulk_move_pos(bo->bulk_move, res); 307 308 ttm_lru_bulk_move_pos_tail(pos, res); 309 } else { 310 struct ttm_resource_manager *man; 311 312 man = ttm_manager_type(bdev, res->mem_type); 313 list_move_tail(&res->lru.link, &man->lru[bo->priority]); 314 } 315 } 316 317 /** 318 * ttm_resource_init - resource object constructure 319 * @bo: buffer object this resources is allocated for 320 * @place: placement of the resource 321 * @res: the resource object to inistilize 322 * 323 * Initialize a new resource object. Counterpart of ttm_resource_fini(). 324 */ 325 void ttm_resource_init(struct ttm_buffer_object *bo, 326 const struct ttm_place *place, 327 struct ttm_resource *res) 328 { 329 struct ttm_resource_manager *man; 330 331 res->start = 0; 332 res->size = bo->base.size; 333 res->mem_type = place->mem_type; 334 res->placement = place->flags; 335 res->bus.addr = NULL; 336 res->bus.offset = 0; 337 res->bus.is_iomem = false; 338 res->bus.caching = ttm_cached; 339 res->bo = bo; 340 341 man = ttm_manager_type(bo->bdev, place->mem_type); 342 spin_lock(&bo->bdev->lru_lock); 343 if (ttm_resource_unevictable(res, bo)) 344 list_add_tail(&res->lru.link, &bo->bdev->unevictable); 345 else 346 list_add_tail(&res->lru.link, &man->lru[bo->priority]); 347 man->usage += res->size; 348 spin_unlock(&bo->bdev->lru_lock); 349 } 350 EXPORT_SYMBOL(ttm_resource_init); 351 352 /** 353 * ttm_resource_fini - resource destructor 354 * @man: the resource manager this resource belongs to 355 * @res: the resource to clean up 356 * 357 * Should be used by resource manager backends to clean up the TTM resource 358 * objects before freeing the underlying structure. Makes sure the resource is 359 * removed from the LRU before destruction. 360 * Counterpart of ttm_resource_init(). 361 */ 362 void ttm_resource_fini(struct ttm_resource_manager *man, 363 struct ttm_resource *res) 364 { 365 struct ttm_device *bdev = man->bdev; 366 367 spin_lock(&bdev->lru_lock); 368 list_del_init(&res->lru.link); 369 man->usage -= res->size; 370 spin_unlock(&bdev->lru_lock); 371 } 372 EXPORT_SYMBOL(ttm_resource_fini); 373 374 int ttm_resource_alloc(struct ttm_buffer_object *bo, 375 const struct ttm_place *place, 376 struct ttm_resource **res_ptr, 377 struct dmem_cgroup_pool_state **ret_limit_pool) 378 { 379 struct ttm_resource_manager *man = 380 ttm_manager_type(bo->bdev, place->mem_type); 381 struct dmem_cgroup_pool_state *pool = NULL; 382 int ret; 383 384 if (man->cg) { 385 ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool); 386 if (ret) 387 return ret; 388 } 389 390 ret = man->func->alloc(man, bo, place, res_ptr); 391 if (ret) { 392 if (pool) 393 dmem_cgroup_uncharge(pool, bo->base.size); 394 return ret; 395 } 396 397 (*res_ptr)->css = pool; 398 399 spin_lock(&bo->bdev->lru_lock); 400 ttm_resource_add_bulk_move(*res_ptr, bo); 401 spin_unlock(&bo->bdev->lru_lock); 402 return 0; 403 } 404 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc); 405 406 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) 407 { 408 struct ttm_resource_manager *man; 409 struct dmem_cgroup_pool_state *pool; 410 411 if (!*res) 412 return; 413 414 spin_lock(&bo->bdev->lru_lock); 415 ttm_resource_del_bulk_move(*res, bo); 416 spin_unlock(&bo->bdev->lru_lock); 417 418 pool = (*res)->css; 419 man = ttm_manager_type(bo->bdev, (*res)->mem_type); 420 man->func->free(man, *res); 421 *res = NULL; 422 if (man->cg) 423 dmem_cgroup_uncharge(pool, bo->base.size); 424 } 425 EXPORT_SYMBOL(ttm_resource_free); 426 427 /** 428 * ttm_resource_intersects - test for intersection 429 * 430 * @bdev: TTM device structure 431 * @res: The resource to test 432 * @place: The placement to test 433 * @size: How many bytes the new allocation needs. 434 * 435 * Test if @res intersects with @place and @size. Used for testing if evictions 436 * are valueable or not. 437 * 438 * Returns true if the res placement intersects with @place and @size. 439 */ 440 bool ttm_resource_intersects(struct ttm_device *bdev, 441 struct ttm_resource *res, 442 const struct ttm_place *place, 443 size_t size) 444 { 445 struct ttm_resource_manager *man; 446 447 if (!res) 448 return false; 449 450 man = ttm_manager_type(bdev, res->mem_type); 451 if (!place || !man->func->intersects) 452 return true; 453 454 return man->func->intersects(man, res, place, size); 455 } 456 457 /** 458 * ttm_resource_compatible - check if resource is compatible with placement 459 * 460 * @res: the resource to check 461 * @placement: the placement to check against 462 * @evicting: true if the caller is doing evictions 463 * 464 * Returns true if the placement is compatible. 465 */ 466 bool ttm_resource_compatible(struct ttm_resource *res, 467 struct ttm_placement *placement, 468 bool evicting) 469 { 470 struct ttm_buffer_object *bo = res->bo; 471 struct ttm_device *bdev = bo->bdev; 472 unsigned i; 473 474 if (res->placement & TTM_PL_FLAG_TEMPORARY) 475 return false; 476 477 for (i = 0; i < placement->num_placement; i++) { 478 const struct ttm_place *place = &placement->placement[i]; 479 struct ttm_resource_manager *man; 480 481 if (res->mem_type != place->mem_type) 482 continue; 483 484 if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED : 485 TTM_PL_FLAG_FALLBACK)) 486 continue; 487 488 if (place->flags & TTM_PL_FLAG_CONTIGUOUS && 489 !(res->placement & TTM_PL_FLAG_CONTIGUOUS)) 490 continue; 491 492 man = ttm_manager_type(bdev, res->mem_type); 493 if (man->func->compatible && 494 !man->func->compatible(man, res, place, bo->base.size)) 495 continue; 496 497 return true; 498 } 499 return false; 500 } 501 502 void ttm_resource_set_bo(struct ttm_resource *res, 503 struct ttm_buffer_object *bo) 504 { 505 spin_lock(&bo->bdev->lru_lock); 506 res->bo = bo; 507 spin_unlock(&bo->bdev->lru_lock); 508 } 509 510 /** 511 * ttm_resource_manager_init 512 * 513 * @man: memory manager object to init 514 * @bdev: ttm device this manager belongs to 515 * @size: size of managed resources in arbitrary units 516 * 517 * Initialise core parts of a manager object. 518 */ 519 void ttm_resource_manager_init(struct ttm_resource_manager *man, 520 struct ttm_device *bdev, 521 uint64_t size) 522 { 523 unsigned i; 524 525 spin_lock_init(&man->move_lock); 526 man->bdev = bdev; 527 man->size = size; 528 man->usage = 0; 529 530 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 531 INIT_LIST_HEAD(&man->lru[i]); 532 man->move = NULL; 533 } 534 EXPORT_SYMBOL(ttm_resource_manager_init); 535 536 /* 537 * ttm_resource_manager_evict_all 538 * 539 * @bdev - device to use 540 * @man - manager to use 541 * 542 * Evict all the objects out of a memory manager until it is empty. 543 * Part of memory manager cleanup sequence. 544 */ 545 int ttm_resource_manager_evict_all(struct ttm_device *bdev, 546 struct ttm_resource_manager *man) 547 { 548 struct ttm_operation_ctx ctx = { 549 .interruptible = false, 550 .no_wait_gpu = false, 551 .force_alloc = true 552 }; 553 struct dma_fence *fence; 554 int ret; 555 556 do { 557 ret = ttm_bo_evict_first(bdev, man, &ctx); 558 cond_resched(); 559 } while (!ret); 560 561 spin_lock(&man->move_lock); 562 fence = dma_fence_get(man->move); 563 spin_unlock(&man->move_lock); 564 565 if (fence) { 566 ret = dma_fence_wait(fence, false); 567 dma_fence_put(fence); 568 if (ret) 569 return ret; 570 } 571 572 return 0; 573 } 574 EXPORT_SYMBOL(ttm_resource_manager_evict_all); 575 576 /** 577 * ttm_resource_manager_usage 578 * 579 * @man: A memory manager object. 580 * 581 * Return how many resources are currently used. 582 */ 583 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) 584 { 585 uint64_t usage; 586 587 spin_lock(&man->bdev->lru_lock); 588 usage = man->usage; 589 spin_unlock(&man->bdev->lru_lock); 590 return usage; 591 } 592 EXPORT_SYMBOL(ttm_resource_manager_usage); 593 594 /** 595 * ttm_resource_manager_debug 596 * 597 * @man: manager type to dump. 598 * @p: printer to use for debug. 599 */ 600 void ttm_resource_manager_debug(struct ttm_resource_manager *man, 601 struct drm_printer *p) 602 { 603 drm_printf(p, " use_type: %d\n", man->use_type); 604 drm_printf(p, " use_tt: %d\n", man->use_tt); 605 drm_printf(p, " size: %llu\n", man->size); 606 drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man)); 607 if (man->func->debug) 608 man->func->debug(man, p); 609 } 610 EXPORT_SYMBOL(ttm_resource_manager_debug); 611 612 static void 613 ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor, 614 struct ttm_lru_item *next_lru) 615 { 616 struct ttm_resource *next = ttm_lru_item_to_res(next_lru); 617 struct ttm_lru_bulk_move *bulk = NULL; 618 struct ttm_buffer_object *bo = next->bo; 619 620 lockdep_assert_held(&cursor->man->bdev->lru_lock); 621 bulk = bo->bulk_move; 622 623 if (cursor->bulk != bulk) { 624 if (bulk) { 625 list_move_tail(&cursor->bulk_link, &bulk->cursor_list); 626 cursor->mem_type = next->mem_type; 627 } else { 628 list_del_init(&cursor->bulk_link); 629 } 630 cursor->bulk = bulk; 631 } 632 } 633 634 /** 635 * ttm_resource_manager_first() - Start iterating over the resources 636 * of a resource manager 637 * @cursor: cursor to record the position 638 * 639 * Initializes the cursor and starts iterating. When done iterating, 640 * the caller must explicitly call ttm_resource_cursor_fini(). 641 * 642 * Return: The first resource from the resource manager. 643 */ 644 struct ttm_resource * 645 ttm_resource_manager_first(struct ttm_resource_cursor *cursor) 646 { 647 struct ttm_resource_manager *man = cursor->man; 648 649 if (WARN_ON_ONCE(!man)) 650 return NULL; 651 652 lockdep_assert_held(&man->bdev->lru_lock); 653 654 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 655 return ttm_resource_manager_next(cursor); 656 } 657 658 /** 659 * ttm_resource_manager_next() - Continue iterating over the resource manager 660 * resources 661 * @cursor: cursor to record the position 662 * 663 * Return: the next resource from the resource manager. 664 */ 665 struct ttm_resource * 666 ttm_resource_manager_next(struct ttm_resource_cursor *cursor) 667 { 668 struct ttm_resource_manager *man = cursor->man; 669 struct ttm_lru_item *lru; 670 671 lockdep_assert_held(&man->bdev->lru_lock); 672 673 for (;;) { 674 lru = &cursor->hitch; 675 list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) { 676 if (ttm_lru_item_is_res(lru)) { 677 ttm_resource_cursor_check_bulk(cursor, lru); 678 list_move(&cursor->hitch.link, &lru->link); 679 return ttm_lru_item_to_res(lru); 680 } 681 } 682 683 if (++cursor->priority >= TTM_MAX_BO_PRIORITY) 684 break; 685 686 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 687 ttm_resource_cursor_clear_bulk(cursor); 688 } 689 690 return NULL; 691 } 692 693 /** 694 * ttm_lru_first_res_or_null() - Return the first resource on an lru list 695 * @head: The list head of the lru list. 696 * 697 * Return: Pointer to the first resource on the lru list or NULL if 698 * there is none. 699 */ 700 struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head) 701 { 702 struct ttm_lru_item *lru; 703 704 list_for_each_entry(lru, head, link) { 705 if (ttm_lru_item_is_res(lru)) 706 return ttm_lru_item_to_res(lru); 707 } 708 709 return NULL; 710 } 711 712 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, 713 struct iosys_map *dmap, 714 pgoff_t i) 715 { 716 struct ttm_kmap_iter_iomap *iter_io = 717 container_of(iter, typeof(*iter_io), base); 718 void __iomem *addr; 719 720 retry: 721 while (i >= iter_io->cache.end) { 722 iter_io->cache.sg = iter_io->cache.sg ? 723 sg_next(iter_io->cache.sg) : iter_io->st->sgl; 724 iter_io->cache.i = iter_io->cache.end; 725 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >> 726 PAGE_SHIFT; 727 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) - 728 iter_io->start; 729 } 730 731 if (i < iter_io->cache.i) { 732 iter_io->cache.end = 0; 733 iter_io->cache.sg = NULL; 734 goto retry; 735 } 736 737 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + 738 (((resource_size_t)i - iter_io->cache.i) 739 << PAGE_SHIFT)); 740 iosys_map_set_vaddr_iomem(dmap, addr); 741 } 742 743 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, 744 struct iosys_map *map) 745 { 746 io_mapping_unmap_local(map->vaddr_iomem); 747 } 748 749 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = { 750 .map_local = ttm_kmap_iter_iomap_map_local, 751 .unmap_local = ttm_kmap_iter_iomap_unmap_local, 752 .maps_tt = false, 753 }; 754 755 /** 756 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap 757 * @iter_io: The struct ttm_kmap_iter_iomap to initialize. 758 * @iomap: The struct io_mapping representing the underlying linear io_memory. 759 * @st: sg_table into @iomap, representing the memory of the struct 760 * ttm_resource. 761 * @start: Offset that needs to be subtracted from @st to make 762 * sg_dma_address(st->sgl) - @start == 0 for @iomap start. 763 * 764 * Return: Pointer to the embedded struct ttm_kmap_iter. 765 */ 766 struct ttm_kmap_iter * 767 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, 768 struct io_mapping *iomap, 769 struct sg_table *st, 770 resource_size_t start) 771 { 772 iter_io->base.ops = &ttm_kmap_iter_io_ops; 773 iter_io->iomap = iomap; 774 iter_io->st = st; 775 iter_io->start = start; 776 memset(&iter_io->cache, 0, sizeof(iter_io->cache)); 777 778 return &iter_io->base; 779 } 780 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); 781 782 /** 783 * DOC: Linear io iterator 784 * 785 * This code should die in the not too near future. Best would be if we could 786 * make io-mapping use memremap for all io memory, and have memremap 787 * implement a kmap_local functionality. We could then strip a huge amount of 788 * code. These linear io iterators are implemented to mimic old functionality, 789 * and they don't use kmap_local semantics at all internally. Rather ioremap or 790 * friends, and at least on 32-bit they add global TLB flushes and points 791 * of failure. 792 */ 793 794 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, 795 struct iosys_map *dmap, 796 pgoff_t i) 797 { 798 struct ttm_kmap_iter_linear_io *iter_io = 799 container_of(iter, typeof(*iter_io), base); 800 801 *dmap = iter_io->dmap; 802 iosys_map_incr(dmap, i * PAGE_SIZE); 803 } 804 805 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { 806 .map_local = ttm_kmap_iter_linear_io_map_local, 807 .maps_tt = false, 808 }; 809 810 /** 811 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory 812 * @iter_io: The iterator to initialize 813 * @bdev: The TTM device 814 * @mem: The ttm resource representing the iomap. 815 * 816 * This function is for internal TTM use only. It sets up a memcpy kmap iterator 817 * pointing at a linear chunk of io memory. 818 * 819 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on 820 * failure. 821 */ 822 struct ttm_kmap_iter * 823 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, 824 struct ttm_device *bdev, 825 struct ttm_resource *mem) 826 { 827 int ret; 828 829 ret = ttm_mem_io_reserve(bdev, mem); 830 if (ret) 831 goto out_err; 832 if (!mem->bus.is_iomem) { 833 ret = -EINVAL; 834 goto out_io_free; 835 } 836 837 if (mem->bus.addr) { 838 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); 839 iter_io->needs_unmap = false; 840 } else { 841 iter_io->needs_unmap = true; 842 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); 843 if (mem->bus.caching == ttm_write_combined) 844 iosys_map_set_vaddr_iomem(&iter_io->dmap, 845 ioremap_wc(mem->bus.offset, 846 mem->size)); 847 else if (mem->bus.caching == ttm_cached) 848 iosys_map_set_vaddr(&iter_io->dmap, 849 memremap(mem->bus.offset, mem->size, 850 MEMREMAP_WB | 851 MEMREMAP_WT | 852 MEMREMAP_WC)); 853 854 /* If uncached requested or if mapping cached or wc failed */ 855 if (iosys_map_is_null(&iter_io->dmap)) 856 iosys_map_set_vaddr_iomem(&iter_io->dmap, 857 ioremap(mem->bus.offset, 858 mem->size)); 859 860 if (iosys_map_is_null(&iter_io->dmap)) { 861 ret = -ENOMEM; 862 goto out_io_free; 863 } 864 } 865 866 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops; 867 return &iter_io->base; 868 869 out_io_free: 870 ttm_mem_io_free(bdev, mem); 871 out_err: 872 return ERR_PTR(ret); 873 } 874 875 /** 876 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory 877 * @iter_io: The iterator to initialize 878 * @bdev: The TTM device 879 * @mem: The ttm resource representing the iomap. 880 * 881 * This function is for internal TTM use only. It cleans up a memcpy kmap 882 * iterator initialized by ttm_kmap_iter_linear_io_init. 883 */ 884 void 885 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, 886 struct ttm_device *bdev, 887 struct ttm_resource *mem) 888 { 889 if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { 890 if (iter_io->dmap.is_iomem) 891 iounmap(iter_io->dmap.vaddr_iomem); 892 else 893 memunmap(iter_io->dmap.vaddr); 894 } 895 896 ttm_mem_io_free(bdev, mem); 897 } 898 899 #if defined(CONFIG_DEBUG_FS) 900 901 static int ttm_resource_manager_show(struct seq_file *m, void *unused) 902 { 903 struct ttm_resource_manager *man = 904 (struct ttm_resource_manager *)m->private; 905 struct drm_printer p = drm_seq_file_printer(m); 906 ttm_resource_manager_debug(man, &p); 907 return 0; 908 } 909 DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); 910 911 #endif 912 913 /** 914 * ttm_resource_manager_create_debugfs - Create debugfs entry for specified 915 * resource manager. 916 * @man: The TTM resource manager for which the debugfs stats file be creates 917 * @parent: debugfs directory in which the file will reside 918 * @name: The filename to create. 919 * 920 * This function setups up a debugfs file that can be used to look 921 * at debug statistics of the specified ttm_resource_manager. 922 */ 923 void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, 924 struct dentry * parent, 925 const char *name) 926 { 927 #if defined(CONFIG_DEBUG_FS) 928 debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); 929 #endif 930 } 931 EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); 932