1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/debugfs.h> 26 #include <linux/export.h> 27 #include <linux/io-mapping.h> 28 #include <linux/iosys-map.h> 29 #include <linux/scatterlist.h> 30 #include <linux/cgroup_dmem.h> 31 32 #include <drm/ttm/ttm_bo.h> 33 #include <drm/ttm/ttm_placement.h> 34 #include <drm/ttm/ttm_resource.h> 35 #include <drm/ttm/ttm_tt.h> 36 37 #include <drm/drm_print.h> 38 #include <drm/drm_util.h> 39 40 /* Detach the cursor from the bulk move list*/ 41 static void 42 ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor) 43 { 44 lockdep_assert_held(&cursor->man->bdev->lru_lock); 45 46 cursor->bulk = NULL; 47 list_del_init(&cursor->bulk_link); 48 } 49 50 /* Move the cursor to the end of the bulk move list it's in */ 51 static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk, 52 struct ttm_resource_cursor *cursor) 53 { 54 struct ttm_lru_bulk_move_pos *pos; 55 56 lockdep_assert_held(&cursor->man->bdev->lru_lock); 57 58 if (WARN_ON_ONCE(bulk != cursor->bulk)) { 59 list_del_init(&cursor->bulk_link); 60 return; 61 } 62 63 pos = &bulk->pos[cursor->mem_type][cursor->priority]; 64 if (pos->last) 65 list_move(&cursor->hitch.link, &pos->last->lru.link); 66 ttm_resource_cursor_clear_bulk(cursor); 67 } 68 69 /* Move all cursors attached to a bulk move to its end */ 70 static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk) 71 { 72 struct ttm_resource_cursor *cursor, *next; 73 74 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 75 ttm_resource_cursor_move_bulk_tail(bulk, cursor); 76 } 77 78 /* Remove a cursor from an empty bulk move list */ 79 static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk) 80 { 81 struct ttm_resource_cursor *cursor, *next; 82 83 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 84 ttm_resource_cursor_clear_bulk(cursor); 85 } 86 87 /** 88 * ttm_resource_cursor_init() - Initialize a struct ttm_resource_cursor 89 * @cursor: The cursor to initialize. 90 * @man: The resource manager. 91 * 92 * Initialize the cursor before using it for iteration. 93 */ 94 void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor, 95 struct ttm_resource_manager *man) 96 { 97 cursor->priority = 0; 98 cursor->man = man; 99 ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH); 100 INIT_LIST_HEAD(&cursor->bulk_link); 101 INIT_LIST_HEAD(&cursor->hitch.link); 102 } 103 104 /** 105 * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage 106 * @cursor: The struct ttm_resource_cursor to finalize. 107 * 108 * The function pulls the LRU list cursor off any lists it was previusly 109 * attached to. Needs to be called with the LRU lock held. The function 110 * can be called multiple times after eachother. 111 */ 112 void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor) 113 { 114 lockdep_assert_held(&cursor->man->bdev->lru_lock); 115 list_del_init(&cursor->hitch.link); 116 ttm_resource_cursor_clear_bulk(cursor); 117 } 118 119 /** 120 * ttm_lru_bulk_move_init - initialize a bulk move structure 121 * @bulk: the structure to init 122 * 123 * For now just memset the structure to zero. 124 */ 125 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) 126 { 127 memset(bulk, 0, sizeof(*bulk)); 128 INIT_LIST_HEAD(&bulk->cursor_list); 129 } 130 EXPORT_SYMBOL(ttm_lru_bulk_move_init); 131 132 /** 133 * ttm_lru_bulk_move_fini - finalize a bulk move structure 134 * @bdev: The struct ttm_device 135 * @bulk: the structure to finalize 136 * 137 * Sanity checks that bulk moves don't have any 138 * resources left and hence no cursors attached. 139 */ 140 void ttm_lru_bulk_move_fini(struct ttm_device *bdev, 141 struct ttm_lru_bulk_move *bulk) 142 { 143 spin_lock(&bdev->lru_lock); 144 ttm_bulk_move_drop_cursors(bulk); 145 spin_unlock(&bdev->lru_lock); 146 } 147 EXPORT_SYMBOL(ttm_lru_bulk_move_fini); 148 149 /** 150 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail. 151 * 152 * @bulk: bulk move structure 153 * 154 * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that 155 * resource order never changes. Should be called with &ttm_device.lru_lock held. 156 */ 157 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) 158 { 159 unsigned i, j; 160 161 ttm_bulk_move_adjust_cursors(bulk); 162 for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { 163 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { 164 struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; 165 struct ttm_resource_manager *man; 166 167 if (!pos->first) 168 continue; 169 170 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); 171 dma_resv_assert_held(pos->first->bo->base.resv); 172 dma_resv_assert_held(pos->last->bo->base.resv); 173 174 man = ttm_manager_type(pos->first->bo->bdev, i); 175 list_bulk_move_tail(&man->lru[j], &pos->first->lru.link, 176 &pos->last->lru.link); 177 } 178 } 179 } 180 EXPORT_SYMBOL(ttm_lru_bulk_move_tail); 181 182 /* Return the bulk move pos object for this resource */ 183 static struct ttm_lru_bulk_move_pos * 184 ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) 185 { 186 return &bulk->pos[res->mem_type][res->bo->priority]; 187 } 188 189 /* Return the previous resource on the list (skip over non-resource list items) */ 190 static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur) 191 { 192 struct ttm_lru_item *lru = &cur->lru; 193 194 do { 195 lru = list_prev_entry(lru, link); 196 } while (!ttm_lru_item_is_res(lru)); 197 198 return ttm_lru_item_to_res(lru); 199 } 200 201 /* Return the next resource on the list (skip over non-resource list items) */ 202 static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur) 203 { 204 struct ttm_lru_item *lru = &cur->lru; 205 206 do { 207 lru = list_next_entry(lru, link); 208 } while (!ttm_lru_item_is_res(lru)); 209 210 return ttm_lru_item_to_res(lru); 211 } 212 213 /* Move the resource to the tail of the bulk move range */ 214 static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, 215 struct ttm_resource *res) 216 { 217 if (pos->last != res) { 218 if (pos->first == res) 219 pos->first = ttm_lru_next_res(res); 220 list_move(&res->lru.link, &pos->last->lru.link); 221 pos->last = res; 222 } 223 } 224 225 /* Add the resource to a bulk_move cursor */ 226 static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, 227 struct ttm_resource *res) 228 { 229 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 230 231 if (!pos->first) { 232 pos->first = res; 233 pos->last = res; 234 } else { 235 WARN_ON(pos->first->bo->base.resv != res->bo->base.resv); 236 ttm_lru_bulk_move_pos_tail(pos, res); 237 } 238 } 239 240 /* Remove the resource from a bulk_move range */ 241 static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, 242 struct ttm_resource *res) 243 { 244 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 245 246 if (unlikely(WARN_ON(!pos->first || !pos->last) || 247 (pos->first == res && pos->last == res))) { 248 pos->first = NULL; 249 pos->last = NULL; 250 } else if (pos->first == res) { 251 pos->first = ttm_lru_next_res(res); 252 } else if (pos->last == res) { 253 pos->last = ttm_lru_prev_res(res); 254 } else { 255 list_move(&res->lru.link, &pos->last->lru.link); 256 } 257 } 258 259 static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo) 260 { 261 /* 262 * Take care when creating a new resource for a bo, that it is not considered 263 * swapped if it's not the current resource for the bo and is thus logically 264 * associated with the ttm_tt. Think a VRAM resource created to move a 265 * swapped-out bo to VRAM. 266 */ 267 if (bo->resource != res || !bo->ttm) 268 return false; 269 270 dma_resv_assert_held(bo->base.resv); 271 return ttm_tt_is_swapped(bo->ttm); 272 } 273 274 static bool ttm_resource_unevictable(struct ttm_resource *res, struct ttm_buffer_object *bo) 275 { 276 return bo->pin_count || ttm_resource_is_swapped(res, bo); 277 } 278 279 /* Add the resource to a bulk move if the BO is configured for it */ 280 void ttm_resource_add_bulk_move(struct ttm_resource *res, 281 struct ttm_buffer_object *bo) 282 { 283 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) 284 ttm_lru_bulk_move_add(bo->bulk_move, res); 285 } 286 287 /* Remove the resource from a bulk move if the BO is configured for it */ 288 void ttm_resource_del_bulk_move(struct ttm_resource *res, 289 struct ttm_buffer_object *bo) 290 { 291 if (bo->bulk_move && !ttm_resource_unevictable(res, bo)) 292 ttm_lru_bulk_move_del(bo->bulk_move, res); 293 } 294 295 /* Move a resource to the LRU or bulk tail */ 296 void ttm_resource_move_to_lru_tail(struct ttm_resource *res) 297 { 298 struct ttm_buffer_object *bo = res->bo; 299 struct ttm_device *bdev = bo->bdev; 300 301 lockdep_assert_held(&bo->bdev->lru_lock); 302 303 if (ttm_resource_unevictable(res, bo)) { 304 list_move_tail(&res->lru.link, &bdev->unevictable); 305 306 } else if (bo->bulk_move) { 307 struct ttm_lru_bulk_move_pos *pos = 308 ttm_lru_bulk_move_pos(bo->bulk_move, res); 309 310 ttm_lru_bulk_move_pos_tail(pos, res); 311 } else { 312 struct ttm_resource_manager *man; 313 314 man = ttm_manager_type(bdev, res->mem_type); 315 list_move_tail(&res->lru.link, &man->lru[bo->priority]); 316 } 317 } 318 319 /** 320 * ttm_resource_init - resource object constructure 321 * @bo: buffer object this resources is allocated for 322 * @place: placement of the resource 323 * @res: the resource object to inistilize 324 * 325 * Initialize a new resource object. Counterpart of ttm_resource_fini(). 326 */ 327 void ttm_resource_init(struct ttm_buffer_object *bo, 328 const struct ttm_place *place, 329 struct ttm_resource *res) 330 { 331 struct ttm_resource_manager *man; 332 333 res->start = 0; 334 res->size = bo->base.size; 335 res->mem_type = place->mem_type; 336 res->placement = place->flags; 337 res->bus.addr = NULL; 338 res->bus.offset = 0; 339 res->bus.is_iomem = false; 340 res->bus.caching = ttm_cached; 341 res->bo = bo; 342 343 man = ttm_manager_type(bo->bdev, place->mem_type); 344 spin_lock(&bo->bdev->lru_lock); 345 if (ttm_resource_unevictable(res, bo)) 346 list_add_tail(&res->lru.link, &bo->bdev->unevictable); 347 else 348 list_add_tail(&res->lru.link, &man->lru[bo->priority]); 349 man->usage += res->size; 350 spin_unlock(&bo->bdev->lru_lock); 351 } 352 EXPORT_SYMBOL(ttm_resource_init); 353 354 /** 355 * ttm_resource_fini - resource destructor 356 * @man: the resource manager this resource belongs to 357 * @res: the resource to clean up 358 * 359 * Should be used by resource manager backends to clean up the TTM resource 360 * objects before freeing the underlying structure. Makes sure the resource is 361 * removed from the LRU before destruction. 362 * Counterpart of ttm_resource_init(). 363 */ 364 void ttm_resource_fini(struct ttm_resource_manager *man, 365 struct ttm_resource *res) 366 { 367 struct ttm_device *bdev = man->bdev; 368 369 spin_lock(&bdev->lru_lock); 370 list_del_init(&res->lru.link); 371 man->usage -= res->size; 372 spin_unlock(&bdev->lru_lock); 373 } 374 EXPORT_SYMBOL(ttm_resource_fini); 375 376 int ttm_resource_alloc(struct ttm_buffer_object *bo, 377 const struct ttm_place *place, 378 struct ttm_resource **res_ptr, 379 struct dmem_cgroup_pool_state **ret_limit_pool) 380 { 381 struct ttm_resource_manager *man = 382 ttm_manager_type(bo->bdev, place->mem_type); 383 struct dmem_cgroup_pool_state *pool = NULL; 384 int ret; 385 386 if (man->cg) { 387 ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool); 388 if (ret) 389 return ret; 390 } 391 392 ret = man->func->alloc(man, bo, place, res_ptr); 393 if (ret) { 394 if (pool) 395 dmem_cgroup_uncharge(pool, bo->base.size); 396 return ret; 397 } 398 399 (*res_ptr)->css = pool; 400 401 spin_lock(&bo->bdev->lru_lock); 402 ttm_resource_add_bulk_move(*res_ptr, bo); 403 spin_unlock(&bo->bdev->lru_lock); 404 return 0; 405 } 406 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc); 407 408 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) 409 { 410 struct ttm_resource_manager *man; 411 struct dmem_cgroup_pool_state *pool; 412 413 if (!*res) 414 return; 415 416 spin_lock(&bo->bdev->lru_lock); 417 ttm_resource_del_bulk_move(*res, bo); 418 spin_unlock(&bo->bdev->lru_lock); 419 420 pool = (*res)->css; 421 man = ttm_manager_type(bo->bdev, (*res)->mem_type); 422 man->func->free(man, *res); 423 *res = NULL; 424 if (man->cg) 425 dmem_cgroup_uncharge(pool, bo->base.size); 426 } 427 EXPORT_SYMBOL(ttm_resource_free); 428 429 /** 430 * ttm_resource_intersects - test for intersection 431 * 432 * @bdev: TTM device structure 433 * @res: The resource to test 434 * @place: The placement to test 435 * @size: How many bytes the new allocation needs. 436 * 437 * Test if @res intersects with @place and @size. Used for testing if evictions 438 * are valueable or not. 439 * 440 * Returns true if the res placement intersects with @place and @size. 441 */ 442 bool ttm_resource_intersects(struct ttm_device *bdev, 443 struct ttm_resource *res, 444 const struct ttm_place *place, 445 size_t size) 446 { 447 struct ttm_resource_manager *man; 448 449 man = ttm_manager_type(bdev, res->mem_type); 450 if (!place || !man->func->intersects) 451 return true; 452 453 return man->func->intersects(man, res, place, size); 454 } 455 456 /** 457 * ttm_resource_compatible - check if resource is compatible with placement 458 * 459 * @res: the resource to check 460 * @placement: the placement to check against 461 * @evicting: true if the caller is doing evictions 462 * 463 * Returns true if the placement is compatible. 464 */ 465 bool ttm_resource_compatible(struct ttm_resource *res, 466 struct ttm_placement *placement, 467 bool evicting) 468 { 469 struct ttm_buffer_object *bo = res->bo; 470 struct ttm_device *bdev = bo->bdev; 471 unsigned i; 472 473 if (res->placement & TTM_PL_FLAG_TEMPORARY) 474 return false; 475 476 for (i = 0; i < placement->num_placement; i++) { 477 const struct ttm_place *place = &placement->placement[i]; 478 struct ttm_resource_manager *man; 479 480 if (res->mem_type != place->mem_type) 481 continue; 482 483 if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED : 484 TTM_PL_FLAG_FALLBACK)) 485 continue; 486 487 if (place->flags & TTM_PL_FLAG_CONTIGUOUS && 488 !(res->placement & TTM_PL_FLAG_CONTIGUOUS)) 489 continue; 490 491 man = ttm_manager_type(bdev, res->mem_type); 492 if (man->func->compatible && 493 !man->func->compatible(man, res, place, bo->base.size)) 494 continue; 495 496 return true; 497 } 498 return false; 499 } 500 501 void ttm_resource_set_bo(struct ttm_resource *res, 502 struct ttm_buffer_object *bo) 503 { 504 spin_lock(&bo->bdev->lru_lock); 505 res->bo = bo; 506 spin_unlock(&bo->bdev->lru_lock); 507 } 508 509 /** 510 * ttm_resource_manager_init 511 * 512 * @man: memory manager object to init 513 * @bdev: ttm device this manager belongs to 514 * @size: size of managed resources in arbitrary units 515 * 516 * Initialise core parts of a manager object. 517 */ 518 void ttm_resource_manager_init(struct ttm_resource_manager *man, 519 struct ttm_device *bdev, 520 uint64_t size) 521 { 522 unsigned i; 523 524 man->bdev = bdev; 525 man->size = size; 526 man->usage = 0; 527 528 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 529 INIT_LIST_HEAD(&man->lru[i]); 530 spin_lock_init(&man->eviction_lock); 531 for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) 532 man->eviction_fences[i] = NULL; 533 } 534 EXPORT_SYMBOL(ttm_resource_manager_init); 535 536 /* 537 * ttm_resource_manager_evict_all 538 * 539 * @bdev - device to use 540 * @man - manager to use 541 * 542 * Evict all the objects out of a memory manager until it is empty. 543 * Part of memory manager cleanup sequence. 544 */ 545 int ttm_resource_manager_evict_all(struct ttm_device *bdev, 546 struct ttm_resource_manager *man) 547 { 548 struct ttm_operation_ctx ctx = { }; 549 struct dma_fence *fence; 550 int ret, i; 551 552 do { 553 ret = ttm_bo_evict_first(bdev, man, &ctx); 554 cond_resched(); 555 } while (!ret); 556 557 if (ret && ret != -ENOENT) 558 return ret; 559 560 ret = 0; 561 562 spin_lock(&man->eviction_lock); 563 for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { 564 fence = man->eviction_fences[i]; 565 if (fence && !dma_fence_is_signaled(fence)) { 566 dma_fence_get(fence); 567 spin_unlock(&man->eviction_lock); 568 ret = dma_fence_wait(fence, false); 569 dma_fence_put(fence); 570 if (ret) 571 return ret; 572 spin_lock(&man->eviction_lock); 573 } 574 } 575 spin_unlock(&man->eviction_lock); 576 577 return ret; 578 } 579 EXPORT_SYMBOL(ttm_resource_manager_evict_all); 580 581 /** 582 * ttm_resource_manager_usage 583 * 584 * @man: A memory manager object. 585 * 586 * Return how many resources are currently used. 587 */ 588 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) 589 { 590 uint64_t usage; 591 592 if (WARN_ON_ONCE(!man->bdev)) 593 return 0; 594 595 spin_lock(&man->bdev->lru_lock); 596 usage = man->usage; 597 spin_unlock(&man->bdev->lru_lock); 598 return usage; 599 } 600 EXPORT_SYMBOL(ttm_resource_manager_usage); 601 602 /** 603 * ttm_resource_manager_debug 604 * 605 * @man: manager type to dump. 606 * @p: printer to use for debug. 607 */ 608 void ttm_resource_manager_debug(struct ttm_resource_manager *man, 609 struct drm_printer *p) 610 { 611 drm_printf(p, " use_type: %d\n", man->use_type); 612 drm_printf(p, " use_tt: %d\n", man->use_tt); 613 drm_printf(p, " size: %llu\n", man->size); 614 drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man)); 615 if (man->func->debug) 616 man->func->debug(man, p); 617 } 618 EXPORT_SYMBOL(ttm_resource_manager_debug); 619 620 static void 621 ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor, 622 struct ttm_lru_item *next_lru) 623 { 624 struct ttm_resource *next = ttm_lru_item_to_res(next_lru); 625 struct ttm_lru_bulk_move *bulk; 626 627 lockdep_assert_held(&cursor->man->bdev->lru_lock); 628 629 bulk = next->bo->bulk_move; 630 631 if (cursor->bulk != bulk) { 632 if (bulk) { 633 list_move_tail(&cursor->bulk_link, &bulk->cursor_list); 634 cursor->mem_type = next->mem_type; 635 } else { 636 list_del_init(&cursor->bulk_link); 637 } 638 cursor->bulk = bulk; 639 } 640 } 641 642 /** 643 * ttm_resource_manager_first() - Start iterating over the resources 644 * of a resource manager 645 * @cursor: cursor to record the position 646 * 647 * Initializes the cursor and starts iterating. When done iterating, 648 * the caller must explicitly call ttm_resource_cursor_fini(). 649 * 650 * Return: The first resource from the resource manager. 651 */ 652 struct ttm_resource * 653 ttm_resource_manager_first(struct ttm_resource_cursor *cursor) 654 { 655 struct ttm_resource_manager *man = cursor->man; 656 657 if (WARN_ON_ONCE(!man)) 658 return NULL; 659 660 lockdep_assert_held(&man->bdev->lru_lock); 661 662 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 663 return ttm_resource_manager_next(cursor); 664 } 665 666 /** 667 * ttm_resource_manager_next() - Continue iterating over the resource manager 668 * resources 669 * @cursor: cursor to record the position 670 * 671 * Return: the next resource from the resource manager. 672 */ 673 struct ttm_resource * 674 ttm_resource_manager_next(struct ttm_resource_cursor *cursor) 675 { 676 struct ttm_resource_manager *man = cursor->man; 677 struct ttm_lru_item *lru; 678 679 lockdep_assert_held(&man->bdev->lru_lock); 680 681 for (;;) { 682 lru = &cursor->hitch; 683 list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) { 684 if (ttm_lru_item_is_res(lru)) { 685 ttm_resource_cursor_check_bulk(cursor, lru); 686 list_move(&cursor->hitch.link, &lru->link); 687 return ttm_lru_item_to_res(lru); 688 } 689 } 690 691 if (++cursor->priority >= TTM_MAX_BO_PRIORITY) 692 break; 693 694 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 695 ttm_resource_cursor_clear_bulk(cursor); 696 } 697 698 return NULL; 699 } 700 701 /** 702 * ttm_lru_first_res_or_null() - Return the first resource on an lru list 703 * @head: The list head of the lru list. 704 * 705 * Return: Pointer to the first resource on the lru list or NULL if 706 * there is none. 707 */ 708 struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head) 709 { 710 struct ttm_lru_item *lru; 711 712 list_for_each_entry(lru, head, link) { 713 if (ttm_lru_item_is_res(lru)) 714 return ttm_lru_item_to_res(lru); 715 } 716 717 return NULL; 718 } 719 720 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, 721 struct iosys_map *dmap, 722 pgoff_t i) 723 { 724 struct ttm_kmap_iter_iomap *iter_io = 725 container_of(iter, typeof(*iter_io), base); 726 void __iomem *addr; 727 728 retry: 729 while (i >= iter_io->cache.end) { 730 iter_io->cache.sg = iter_io->cache.sg ? 731 sg_next(iter_io->cache.sg) : iter_io->st->sgl; 732 iter_io->cache.i = iter_io->cache.end; 733 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >> 734 PAGE_SHIFT; 735 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) - 736 iter_io->start; 737 } 738 739 if (i < iter_io->cache.i) { 740 iter_io->cache.end = 0; 741 iter_io->cache.sg = NULL; 742 goto retry; 743 } 744 745 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + 746 (((resource_size_t)i - iter_io->cache.i) 747 << PAGE_SHIFT)); 748 iosys_map_set_vaddr_iomem(dmap, addr); 749 } 750 751 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, 752 struct iosys_map *map) 753 { 754 io_mapping_unmap_local(map->vaddr_iomem); 755 } 756 757 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = { 758 .map_local = ttm_kmap_iter_iomap_map_local, 759 .unmap_local = ttm_kmap_iter_iomap_unmap_local, 760 .maps_tt = false, 761 }; 762 763 /** 764 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap 765 * @iter_io: The struct ttm_kmap_iter_iomap to initialize. 766 * @iomap: The struct io_mapping representing the underlying linear io_memory. 767 * @st: sg_table into @iomap, representing the memory of the struct 768 * ttm_resource. 769 * @start: Offset that needs to be subtracted from @st to make 770 * sg_dma_address(st->sgl) - @start == 0 for @iomap start. 771 * 772 * Return: Pointer to the embedded struct ttm_kmap_iter. 773 */ 774 struct ttm_kmap_iter * 775 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, 776 struct io_mapping *iomap, 777 struct sg_table *st, 778 resource_size_t start) 779 { 780 iter_io->base.ops = &ttm_kmap_iter_io_ops; 781 iter_io->iomap = iomap; 782 iter_io->st = st; 783 iter_io->start = start; 784 memset(&iter_io->cache, 0, sizeof(iter_io->cache)); 785 786 return &iter_io->base; 787 } 788 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); 789 790 /** 791 * DOC: Linear io iterator 792 * 793 * This code should die in the not too near future. Best would be if we could 794 * make io-mapping use memremap for all io memory, and have memremap 795 * implement a kmap_local functionality. We could then strip a huge amount of 796 * code. These linear io iterators are implemented to mimic old functionality, 797 * and they don't use kmap_local semantics at all internally. Rather ioremap or 798 * friends, and at least on 32-bit they add global TLB flushes and points 799 * of failure. 800 */ 801 802 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, 803 struct iosys_map *dmap, 804 pgoff_t i) 805 { 806 struct ttm_kmap_iter_linear_io *iter_io = 807 container_of(iter, typeof(*iter_io), base); 808 809 *dmap = iter_io->dmap; 810 iosys_map_incr(dmap, i * PAGE_SIZE); 811 } 812 813 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { 814 .map_local = ttm_kmap_iter_linear_io_map_local, 815 .maps_tt = false, 816 }; 817 818 /** 819 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory 820 * @iter_io: The iterator to initialize 821 * @bdev: The TTM device 822 * @mem: The ttm resource representing the iomap. 823 * 824 * This function is for internal TTM use only. It sets up a memcpy kmap iterator 825 * pointing at a linear chunk of io memory. 826 * 827 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on 828 * failure. 829 */ 830 struct ttm_kmap_iter * 831 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, 832 struct ttm_device *bdev, 833 struct ttm_resource *mem) 834 { 835 int ret; 836 837 ret = ttm_mem_io_reserve(bdev, mem); 838 if (ret) 839 goto out_err; 840 if (!mem->bus.is_iomem) { 841 ret = -EINVAL; 842 goto out_io_free; 843 } 844 845 if (mem->bus.addr) { 846 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); 847 iter_io->needs_unmap = false; 848 } else { 849 iter_io->needs_unmap = true; 850 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); 851 if (mem->bus.caching == ttm_write_combined) 852 iosys_map_set_vaddr_iomem(&iter_io->dmap, 853 ioremap_wc(mem->bus.offset, 854 mem->size)); 855 else if (mem->bus.caching == ttm_cached) 856 iosys_map_set_vaddr(&iter_io->dmap, 857 memremap(mem->bus.offset, mem->size, 858 MEMREMAP_WB | 859 MEMREMAP_WT | 860 MEMREMAP_WC)); 861 862 /* If uncached requested or if mapping cached or wc failed */ 863 if (iosys_map_is_null(&iter_io->dmap)) 864 iosys_map_set_vaddr_iomem(&iter_io->dmap, 865 ioremap(mem->bus.offset, 866 mem->size)); 867 868 if (iosys_map_is_null(&iter_io->dmap)) { 869 ret = -ENOMEM; 870 goto out_io_free; 871 } 872 } 873 874 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops; 875 return &iter_io->base; 876 877 out_io_free: 878 ttm_mem_io_free(bdev, mem); 879 out_err: 880 return ERR_PTR(ret); 881 } 882 883 /** 884 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory 885 * @iter_io: The iterator to initialize 886 * @bdev: The TTM device 887 * @mem: The ttm resource representing the iomap. 888 * 889 * This function is for internal TTM use only. It cleans up a memcpy kmap 890 * iterator initialized by ttm_kmap_iter_linear_io_init. 891 */ 892 void 893 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, 894 struct ttm_device *bdev, 895 struct ttm_resource *mem) 896 { 897 if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { 898 if (iter_io->dmap.is_iomem) 899 iounmap(iter_io->dmap.vaddr_iomem); 900 else 901 memunmap(iter_io->dmap.vaddr); 902 } 903 904 ttm_mem_io_free(bdev, mem); 905 } 906 907 #if defined(CONFIG_DEBUG_FS) 908 909 static int ttm_resource_manager_show(struct seq_file *m, void *unused) 910 { 911 struct ttm_resource_manager *man = 912 (struct ttm_resource_manager *)m->private; 913 struct drm_printer p = drm_seq_file_printer(m); 914 ttm_resource_manager_debug(man, &p); 915 return 0; 916 } 917 DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); 918 919 #endif 920 921 /** 922 * ttm_resource_manager_create_debugfs - Create debugfs entry for specified 923 * resource manager. 924 * @man: The TTM resource manager for which the debugfs stats file be creates 925 * @parent: debugfs directory in which the file will reside 926 * @name: The filename to create. 927 * 928 * This function setups up a debugfs file that can be used to look 929 * at debug statistics of the specified ttm_resource_manager. 930 */ 931 void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, 932 struct dentry * parent, 933 const char *name) 934 { 935 #if defined(CONFIG_DEBUG_FS) 936 debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); 937 #endif 938 } 939 EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); 940