1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/debugfs.h> 26 #include <linux/io-mapping.h> 27 #include <linux/iosys-map.h> 28 #include <linux/scatterlist.h> 29 #include <linux/cgroup_dmem.h> 30 31 #include <drm/ttm/ttm_bo.h> 32 #include <drm/ttm/ttm_placement.h> 33 #include <drm/ttm/ttm_resource.h> 34 #include <drm/ttm/ttm_tt.h> 35 36 #include <drm/drm_util.h> 37 38 /* Detach the cursor from the bulk move list*/ 39 static void 40 ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor) 41 { 42 lockdep_assert_held(&cursor->man->bdev->lru_lock); 43 44 cursor->bulk = NULL; 45 list_del_init(&cursor->bulk_link); 46 } 47 48 /* Move the cursor to the end of the bulk move list it's in */ 49 static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk, 50 struct ttm_resource_cursor *cursor) 51 { 52 struct ttm_lru_bulk_move_pos *pos; 53 54 lockdep_assert_held(&cursor->man->bdev->lru_lock); 55 56 if (WARN_ON_ONCE(bulk != cursor->bulk)) { 57 list_del_init(&cursor->bulk_link); 58 return; 59 } 60 61 pos = &bulk->pos[cursor->mem_type][cursor->priority]; 62 if (pos->last) 63 list_move(&cursor->hitch.link, &pos->last->lru.link); 64 ttm_resource_cursor_clear_bulk(cursor); 65 } 66 67 /* Move all cursors attached to a bulk move to its end */ 68 static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk) 69 { 70 struct ttm_resource_cursor *cursor, *next; 71 72 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 73 ttm_resource_cursor_move_bulk_tail(bulk, cursor); 74 } 75 76 /* Remove a cursor from an empty bulk move list */ 77 static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk) 78 { 79 struct ttm_resource_cursor *cursor, *next; 80 81 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 82 ttm_resource_cursor_clear_bulk(cursor); 83 } 84 85 /** 86 * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage 87 * @cursor: The struct ttm_resource_cursor to finalize. 88 * 89 * The function pulls the LRU list cursor off any lists it was previusly 90 * attached to. Needs to be called with the LRU lock held. The function 91 * can be called multiple times after eachother. 92 */ 93 void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor) 94 { 95 lockdep_assert_held(&cursor->man->bdev->lru_lock); 96 list_del_init(&cursor->hitch.link); 97 ttm_resource_cursor_clear_bulk(cursor); 98 } 99 100 /** 101 * ttm_lru_bulk_move_init - initialize a bulk move structure 102 * @bulk: the structure to init 103 * 104 * For now just memset the structure to zero. 105 */ 106 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) 107 { 108 memset(bulk, 0, sizeof(*bulk)); 109 INIT_LIST_HEAD(&bulk->cursor_list); 110 } 111 EXPORT_SYMBOL(ttm_lru_bulk_move_init); 112 113 /** 114 * ttm_lru_bulk_move_fini - finalize a bulk move structure 115 * @bdev: The struct ttm_device 116 * @bulk: the structure to finalize 117 * 118 * Sanity checks that bulk moves don't have any 119 * resources left and hence no cursors attached. 120 */ 121 void ttm_lru_bulk_move_fini(struct ttm_device *bdev, 122 struct ttm_lru_bulk_move *bulk) 123 { 124 spin_lock(&bdev->lru_lock); 125 ttm_bulk_move_drop_cursors(bulk); 126 spin_unlock(&bdev->lru_lock); 127 } 128 EXPORT_SYMBOL(ttm_lru_bulk_move_fini); 129 130 /** 131 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail. 132 * 133 * @bulk: bulk move structure 134 * 135 * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that 136 * resource order never changes. Should be called with &ttm_device.lru_lock held. 137 */ 138 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) 139 { 140 unsigned i, j; 141 142 ttm_bulk_move_adjust_cursors(bulk); 143 for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { 144 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { 145 struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; 146 struct ttm_resource_manager *man; 147 148 if (!pos->first) 149 continue; 150 151 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); 152 dma_resv_assert_held(pos->first->bo->base.resv); 153 dma_resv_assert_held(pos->last->bo->base.resv); 154 155 man = ttm_manager_type(pos->first->bo->bdev, i); 156 list_bulk_move_tail(&man->lru[j], &pos->first->lru.link, 157 &pos->last->lru.link); 158 } 159 } 160 } 161 EXPORT_SYMBOL(ttm_lru_bulk_move_tail); 162 163 /* Return the bulk move pos object for this resource */ 164 static struct ttm_lru_bulk_move_pos * 165 ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) 166 { 167 return &bulk->pos[res->mem_type][res->bo->priority]; 168 } 169 170 /* Return the previous resource on the list (skip over non-resource list items) */ 171 static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur) 172 { 173 struct ttm_lru_item *lru = &cur->lru; 174 175 do { 176 lru = list_prev_entry(lru, link); 177 } while (!ttm_lru_item_is_res(lru)); 178 179 return ttm_lru_item_to_res(lru); 180 } 181 182 /* Return the next resource on the list (skip over non-resource list items) */ 183 static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur) 184 { 185 struct ttm_lru_item *lru = &cur->lru; 186 187 do { 188 lru = list_next_entry(lru, link); 189 } while (!ttm_lru_item_is_res(lru)); 190 191 return ttm_lru_item_to_res(lru); 192 } 193 194 /* Move the resource to the tail of the bulk move range */ 195 static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, 196 struct ttm_resource *res) 197 { 198 if (pos->last != res) { 199 if (pos->first == res) 200 pos->first = ttm_lru_next_res(res); 201 list_move(&res->lru.link, &pos->last->lru.link); 202 pos->last = res; 203 } 204 } 205 206 /* Add the resource to a bulk_move cursor */ 207 static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, 208 struct ttm_resource *res) 209 { 210 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 211 212 if (!pos->first) { 213 pos->first = res; 214 pos->last = res; 215 } else { 216 WARN_ON(pos->first->bo->base.resv != res->bo->base.resv); 217 ttm_lru_bulk_move_pos_tail(pos, res); 218 } 219 } 220 221 /* Remove the resource from a bulk_move range */ 222 static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, 223 struct ttm_resource *res) 224 { 225 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 226 227 if (unlikely(WARN_ON(!pos->first || !pos->last) || 228 (pos->first == res && pos->last == res))) { 229 pos->first = NULL; 230 pos->last = NULL; 231 } else if (pos->first == res) { 232 pos->first = ttm_lru_next_res(res); 233 } else if (pos->last == res) { 234 pos->last = ttm_lru_prev_res(res); 235 } else { 236 list_move(&res->lru.link, &pos->last->lru.link); 237 } 238 } 239 240 static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo) 241 { 242 /* 243 * Take care when creating a new resource for a bo, that it is not considered 244 * swapped if it's not the current resource for the bo and is thus logically 245 * associated with the ttm_tt. Think a VRAM resource created to move a 246 * swapped-out bo to VRAM. 247 */ 248 if (bo->resource != res || !bo->ttm) 249 return false; 250 251 dma_resv_assert_held(bo->base.resv); 252 return ttm_tt_is_swapped(bo->ttm); 253 } 254 255 /* Add the resource to a bulk move if the BO is configured for it */ 256 void ttm_resource_add_bulk_move(struct ttm_resource *res, 257 struct ttm_buffer_object *bo) 258 { 259 if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo)) 260 ttm_lru_bulk_move_add(bo->bulk_move, res); 261 } 262 263 /* Remove the resource from a bulk move if the BO is configured for it */ 264 void ttm_resource_del_bulk_move(struct ttm_resource *res, 265 struct ttm_buffer_object *bo) 266 { 267 if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo)) 268 ttm_lru_bulk_move_del(bo->bulk_move, res); 269 } 270 271 /* Move a resource to the LRU or bulk tail */ 272 void ttm_resource_move_to_lru_tail(struct ttm_resource *res) 273 { 274 struct ttm_buffer_object *bo = res->bo; 275 struct ttm_device *bdev = bo->bdev; 276 277 lockdep_assert_held(&bo->bdev->lru_lock); 278 279 if (bo->pin_count || ttm_resource_is_swapped(res, bo)) { 280 list_move_tail(&res->lru.link, &bdev->unevictable); 281 282 } else if (bo->bulk_move) { 283 struct ttm_lru_bulk_move_pos *pos = 284 ttm_lru_bulk_move_pos(bo->bulk_move, res); 285 286 ttm_lru_bulk_move_pos_tail(pos, res); 287 } else { 288 struct ttm_resource_manager *man; 289 290 man = ttm_manager_type(bdev, res->mem_type); 291 list_move_tail(&res->lru.link, &man->lru[bo->priority]); 292 } 293 } 294 295 /** 296 * ttm_resource_init - resource object constructure 297 * @bo: buffer object this resources is allocated for 298 * @place: placement of the resource 299 * @res: the resource object to inistilize 300 * 301 * Initialize a new resource object. Counterpart of ttm_resource_fini(). 302 */ 303 void ttm_resource_init(struct ttm_buffer_object *bo, 304 const struct ttm_place *place, 305 struct ttm_resource *res) 306 { 307 struct ttm_resource_manager *man; 308 309 res->start = 0; 310 res->size = bo->base.size; 311 res->mem_type = place->mem_type; 312 res->placement = place->flags; 313 res->bus.addr = NULL; 314 res->bus.offset = 0; 315 res->bus.is_iomem = false; 316 res->bus.caching = ttm_cached; 317 res->bo = bo; 318 319 man = ttm_manager_type(bo->bdev, place->mem_type); 320 spin_lock(&bo->bdev->lru_lock); 321 if (bo->pin_count || ttm_resource_is_swapped(res, bo)) 322 list_add_tail(&res->lru.link, &bo->bdev->unevictable); 323 else 324 list_add_tail(&res->lru.link, &man->lru[bo->priority]); 325 man->usage += res->size; 326 spin_unlock(&bo->bdev->lru_lock); 327 } 328 EXPORT_SYMBOL(ttm_resource_init); 329 330 /** 331 * ttm_resource_fini - resource destructor 332 * @man: the resource manager this resource belongs to 333 * @res: the resource to clean up 334 * 335 * Should be used by resource manager backends to clean up the TTM resource 336 * objects before freeing the underlying structure. Makes sure the resource is 337 * removed from the LRU before destruction. 338 * Counterpart of ttm_resource_init(). 339 */ 340 void ttm_resource_fini(struct ttm_resource_manager *man, 341 struct ttm_resource *res) 342 { 343 struct ttm_device *bdev = man->bdev; 344 345 spin_lock(&bdev->lru_lock); 346 list_del_init(&res->lru.link); 347 man->usage -= res->size; 348 spin_unlock(&bdev->lru_lock); 349 } 350 EXPORT_SYMBOL(ttm_resource_fini); 351 352 int ttm_resource_alloc(struct ttm_buffer_object *bo, 353 const struct ttm_place *place, 354 struct ttm_resource **res_ptr, 355 struct dmem_cgroup_pool_state **ret_limit_pool) 356 { 357 struct ttm_resource_manager *man = 358 ttm_manager_type(bo->bdev, place->mem_type); 359 struct dmem_cgroup_pool_state *pool = NULL; 360 int ret; 361 362 if (man->cg) { 363 ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool); 364 if (ret) 365 return ret; 366 } 367 368 ret = man->func->alloc(man, bo, place, res_ptr); 369 if (ret) { 370 if (pool) 371 dmem_cgroup_uncharge(pool, bo->base.size); 372 return ret; 373 } 374 375 (*res_ptr)->css = pool; 376 377 spin_lock(&bo->bdev->lru_lock); 378 ttm_resource_add_bulk_move(*res_ptr, bo); 379 spin_unlock(&bo->bdev->lru_lock); 380 return 0; 381 } 382 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc); 383 384 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) 385 { 386 struct ttm_resource_manager *man; 387 struct dmem_cgroup_pool_state *pool; 388 389 if (!*res) 390 return; 391 392 spin_lock(&bo->bdev->lru_lock); 393 ttm_resource_del_bulk_move(*res, bo); 394 spin_unlock(&bo->bdev->lru_lock); 395 396 pool = (*res)->css; 397 man = ttm_manager_type(bo->bdev, (*res)->mem_type); 398 man->func->free(man, *res); 399 *res = NULL; 400 if (man->cg) 401 dmem_cgroup_uncharge(pool, bo->base.size); 402 } 403 EXPORT_SYMBOL(ttm_resource_free); 404 405 /** 406 * ttm_resource_intersects - test for intersection 407 * 408 * @bdev: TTM device structure 409 * @res: The resource to test 410 * @place: The placement to test 411 * @size: How many bytes the new allocation needs. 412 * 413 * Test if @res intersects with @place and @size. Used for testing if evictions 414 * are valueable or not. 415 * 416 * Returns true if the res placement intersects with @place and @size. 417 */ 418 bool ttm_resource_intersects(struct ttm_device *bdev, 419 struct ttm_resource *res, 420 const struct ttm_place *place, 421 size_t size) 422 { 423 struct ttm_resource_manager *man; 424 425 if (!res) 426 return false; 427 428 man = ttm_manager_type(bdev, res->mem_type); 429 if (!place || !man->func->intersects) 430 return true; 431 432 return man->func->intersects(man, res, place, size); 433 } 434 435 /** 436 * ttm_resource_compatible - check if resource is compatible with placement 437 * 438 * @res: the resource to check 439 * @placement: the placement to check against 440 * @evicting: true if the caller is doing evictions 441 * 442 * Returns true if the placement is compatible. 443 */ 444 bool ttm_resource_compatible(struct ttm_resource *res, 445 struct ttm_placement *placement, 446 bool evicting) 447 { 448 struct ttm_buffer_object *bo = res->bo; 449 struct ttm_device *bdev = bo->bdev; 450 unsigned i; 451 452 if (res->placement & TTM_PL_FLAG_TEMPORARY) 453 return false; 454 455 for (i = 0; i < placement->num_placement; i++) { 456 const struct ttm_place *place = &placement->placement[i]; 457 struct ttm_resource_manager *man; 458 459 if (res->mem_type != place->mem_type) 460 continue; 461 462 if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED : 463 TTM_PL_FLAG_FALLBACK)) 464 continue; 465 466 if (place->flags & TTM_PL_FLAG_CONTIGUOUS && 467 !(res->placement & TTM_PL_FLAG_CONTIGUOUS)) 468 continue; 469 470 man = ttm_manager_type(bdev, res->mem_type); 471 if (man->func->compatible && 472 !man->func->compatible(man, res, place, bo->base.size)) 473 continue; 474 475 return true; 476 } 477 return false; 478 } 479 480 void ttm_resource_set_bo(struct ttm_resource *res, 481 struct ttm_buffer_object *bo) 482 { 483 spin_lock(&bo->bdev->lru_lock); 484 res->bo = bo; 485 spin_unlock(&bo->bdev->lru_lock); 486 } 487 488 /** 489 * ttm_resource_manager_init 490 * 491 * @man: memory manager object to init 492 * @bdev: ttm device this manager belongs to 493 * @size: size of managed resources in arbitrary units 494 * 495 * Initialise core parts of a manager object. 496 */ 497 void ttm_resource_manager_init(struct ttm_resource_manager *man, 498 struct ttm_device *bdev, 499 uint64_t size) 500 { 501 unsigned i; 502 503 spin_lock_init(&man->move_lock); 504 man->bdev = bdev; 505 man->size = size; 506 man->usage = 0; 507 508 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 509 INIT_LIST_HEAD(&man->lru[i]); 510 man->move = NULL; 511 } 512 EXPORT_SYMBOL(ttm_resource_manager_init); 513 514 /* 515 * ttm_resource_manager_evict_all 516 * 517 * @bdev - device to use 518 * @man - manager to use 519 * 520 * Evict all the objects out of a memory manager until it is empty. 521 * Part of memory manager cleanup sequence. 522 */ 523 int ttm_resource_manager_evict_all(struct ttm_device *bdev, 524 struct ttm_resource_manager *man) 525 { 526 struct ttm_operation_ctx ctx = { 527 .interruptible = false, 528 .no_wait_gpu = false, 529 .force_alloc = true 530 }; 531 struct dma_fence *fence; 532 int ret; 533 534 do { 535 ret = ttm_bo_evict_first(bdev, man, &ctx); 536 cond_resched(); 537 } while (!ret); 538 539 spin_lock(&man->move_lock); 540 fence = dma_fence_get(man->move); 541 spin_unlock(&man->move_lock); 542 543 if (fence) { 544 ret = dma_fence_wait(fence, false); 545 dma_fence_put(fence); 546 if (ret) 547 return ret; 548 } 549 550 return 0; 551 } 552 EXPORT_SYMBOL(ttm_resource_manager_evict_all); 553 554 /** 555 * ttm_resource_manager_usage 556 * 557 * @man: A memory manager object. 558 * 559 * Return how many resources are currently used. 560 */ 561 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) 562 { 563 uint64_t usage; 564 565 spin_lock(&man->bdev->lru_lock); 566 usage = man->usage; 567 spin_unlock(&man->bdev->lru_lock); 568 return usage; 569 } 570 EXPORT_SYMBOL(ttm_resource_manager_usage); 571 572 /** 573 * ttm_resource_manager_debug 574 * 575 * @man: manager type to dump. 576 * @p: printer to use for debug. 577 */ 578 void ttm_resource_manager_debug(struct ttm_resource_manager *man, 579 struct drm_printer *p) 580 { 581 drm_printf(p, " use_type: %d\n", man->use_type); 582 drm_printf(p, " use_tt: %d\n", man->use_tt); 583 drm_printf(p, " size: %llu\n", man->size); 584 drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man)); 585 if (man->func->debug) 586 man->func->debug(man, p); 587 } 588 EXPORT_SYMBOL(ttm_resource_manager_debug); 589 590 static void 591 ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor, 592 struct ttm_lru_item *next_lru) 593 { 594 struct ttm_resource *next = ttm_lru_item_to_res(next_lru); 595 struct ttm_lru_bulk_move *bulk = NULL; 596 struct ttm_buffer_object *bo = next->bo; 597 598 lockdep_assert_held(&cursor->man->bdev->lru_lock); 599 bulk = bo->bulk_move; 600 601 if (cursor->bulk != bulk) { 602 if (bulk) { 603 list_move_tail(&cursor->bulk_link, &bulk->cursor_list); 604 cursor->mem_type = next->mem_type; 605 } else { 606 list_del_init(&cursor->bulk_link); 607 } 608 cursor->bulk = bulk; 609 } 610 } 611 612 /** 613 * ttm_resource_manager_first() - Start iterating over the resources 614 * of a resource manager 615 * @man: resource manager to iterate over 616 * @cursor: cursor to record the position 617 * 618 * Initializes the cursor and starts iterating. When done iterating, 619 * the caller must explicitly call ttm_resource_cursor_fini(). 620 * 621 * Return: The first resource from the resource manager. 622 */ 623 struct ttm_resource * 624 ttm_resource_manager_first(struct ttm_resource_manager *man, 625 struct ttm_resource_cursor *cursor) 626 { 627 lockdep_assert_held(&man->bdev->lru_lock); 628 629 cursor->priority = 0; 630 cursor->man = man; 631 ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH); 632 INIT_LIST_HEAD(&cursor->bulk_link); 633 list_add(&cursor->hitch.link, &man->lru[cursor->priority]); 634 635 return ttm_resource_manager_next(cursor); 636 } 637 638 /** 639 * ttm_resource_manager_next() - Continue iterating over the resource manager 640 * resources 641 * @cursor: cursor to record the position 642 * 643 * Return: the next resource from the resource manager. 644 */ 645 struct ttm_resource * 646 ttm_resource_manager_next(struct ttm_resource_cursor *cursor) 647 { 648 struct ttm_resource_manager *man = cursor->man; 649 struct ttm_lru_item *lru; 650 651 lockdep_assert_held(&man->bdev->lru_lock); 652 653 for (;;) { 654 lru = &cursor->hitch; 655 list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) { 656 if (ttm_lru_item_is_res(lru)) { 657 ttm_resource_cursor_check_bulk(cursor, lru); 658 list_move(&cursor->hitch.link, &lru->link); 659 return ttm_lru_item_to_res(lru); 660 } 661 } 662 663 if (++cursor->priority >= TTM_MAX_BO_PRIORITY) 664 break; 665 666 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 667 ttm_resource_cursor_clear_bulk(cursor); 668 } 669 670 ttm_resource_cursor_fini(cursor); 671 672 return NULL; 673 } 674 675 /** 676 * ttm_lru_first_res_or_null() - Return the first resource on an lru list 677 * @head: The list head of the lru list. 678 * 679 * Return: Pointer to the first resource on the lru list or NULL if 680 * there is none. 681 */ 682 struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head) 683 { 684 struct ttm_lru_item *lru; 685 686 list_for_each_entry(lru, head, link) { 687 if (ttm_lru_item_is_res(lru)) 688 return ttm_lru_item_to_res(lru); 689 } 690 691 return NULL; 692 } 693 694 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, 695 struct iosys_map *dmap, 696 pgoff_t i) 697 { 698 struct ttm_kmap_iter_iomap *iter_io = 699 container_of(iter, typeof(*iter_io), base); 700 void __iomem *addr; 701 702 retry: 703 while (i >= iter_io->cache.end) { 704 iter_io->cache.sg = iter_io->cache.sg ? 705 sg_next(iter_io->cache.sg) : iter_io->st->sgl; 706 iter_io->cache.i = iter_io->cache.end; 707 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >> 708 PAGE_SHIFT; 709 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) - 710 iter_io->start; 711 } 712 713 if (i < iter_io->cache.i) { 714 iter_io->cache.end = 0; 715 iter_io->cache.sg = NULL; 716 goto retry; 717 } 718 719 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + 720 (((resource_size_t)i - iter_io->cache.i) 721 << PAGE_SHIFT)); 722 iosys_map_set_vaddr_iomem(dmap, addr); 723 } 724 725 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, 726 struct iosys_map *map) 727 { 728 io_mapping_unmap_local(map->vaddr_iomem); 729 } 730 731 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = { 732 .map_local = ttm_kmap_iter_iomap_map_local, 733 .unmap_local = ttm_kmap_iter_iomap_unmap_local, 734 .maps_tt = false, 735 }; 736 737 /** 738 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap 739 * @iter_io: The struct ttm_kmap_iter_iomap to initialize. 740 * @iomap: The struct io_mapping representing the underlying linear io_memory. 741 * @st: sg_table into @iomap, representing the memory of the struct 742 * ttm_resource. 743 * @start: Offset that needs to be subtracted from @st to make 744 * sg_dma_address(st->sgl) - @start == 0 for @iomap start. 745 * 746 * Return: Pointer to the embedded struct ttm_kmap_iter. 747 */ 748 struct ttm_kmap_iter * 749 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, 750 struct io_mapping *iomap, 751 struct sg_table *st, 752 resource_size_t start) 753 { 754 iter_io->base.ops = &ttm_kmap_iter_io_ops; 755 iter_io->iomap = iomap; 756 iter_io->st = st; 757 iter_io->start = start; 758 memset(&iter_io->cache, 0, sizeof(iter_io->cache)); 759 760 return &iter_io->base; 761 } 762 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); 763 764 /** 765 * DOC: Linear io iterator 766 * 767 * This code should die in the not too near future. Best would be if we could 768 * make io-mapping use memremap for all io memory, and have memremap 769 * implement a kmap_local functionality. We could then strip a huge amount of 770 * code. These linear io iterators are implemented to mimic old functionality, 771 * and they don't use kmap_local semantics at all internally. Rather ioremap or 772 * friends, and at least on 32-bit they add global TLB flushes and points 773 * of failure. 774 */ 775 776 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, 777 struct iosys_map *dmap, 778 pgoff_t i) 779 { 780 struct ttm_kmap_iter_linear_io *iter_io = 781 container_of(iter, typeof(*iter_io), base); 782 783 *dmap = iter_io->dmap; 784 iosys_map_incr(dmap, i * PAGE_SIZE); 785 } 786 787 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { 788 .map_local = ttm_kmap_iter_linear_io_map_local, 789 .maps_tt = false, 790 }; 791 792 /** 793 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory 794 * @iter_io: The iterator to initialize 795 * @bdev: The TTM device 796 * @mem: The ttm resource representing the iomap. 797 * 798 * This function is for internal TTM use only. It sets up a memcpy kmap iterator 799 * pointing at a linear chunk of io memory. 800 * 801 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on 802 * failure. 803 */ 804 struct ttm_kmap_iter * 805 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, 806 struct ttm_device *bdev, 807 struct ttm_resource *mem) 808 { 809 int ret; 810 811 ret = ttm_mem_io_reserve(bdev, mem); 812 if (ret) 813 goto out_err; 814 if (!mem->bus.is_iomem) { 815 ret = -EINVAL; 816 goto out_io_free; 817 } 818 819 if (mem->bus.addr) { 820 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); 821 iter_io->needs_unmap = false; 822 } else { 823 iter_io->needs_unmap = true; 824 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); 825 if (mem->bus.caching == ttm_write_combined) 826 iosys_map_set_vaddr_iomem(&iter_io->dmap, 827 ioremap_wc(mem->bus.offset, 828 mem->size)); 829 else if (mem->bus.caching == ttm_cached) 830 iosys_map_set_vaddr(&iter_io->dmap, 831 memremap(mem->bus.offset, mem->size, 832 MEMREMAP_WB | 833 MEMREMAP_WT | 834 MEMREMAP_WC)); 835 836 /* If uncached requested or if mapping cached or wc failed */ 837 if (iosys_map_is_null(&iter_io->dmap)) 838 iosys_map_set_vaddr_iomem(&iter_io->dmap, 839 ioremap(mem->bus.offset, 840 mem->size)); 841 842 if (iosys_map_is_null(&iter_io->dmap)) { 843 ret = -ENOMEM; 844 goto out_io_free; 845 } 846 } 847 848 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops; 849 return &iter_io->base; 850 851 out_io_free: 852 ttm_mem_io_free(bdev, mem); 853 out_err: 854 return ERR_PTR(ret); 855 } 856 857 /** 858 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory 859 * @iter_io: The iterator to initialize 860 * @bdev: The TTM device 861 * @mem: The ttm resource representing the iomap. 862 * 863 * This function is for internal TTM use only. It cleans up a memcpy kmap 864 * iterator initialized by ttm_kmap_iter_linear_io_init. 865 */ 866 void 867 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, 868 struct ttm_device *bdev, 869 struct ttm_resource *mem) 870 { 871 if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { 872 if (iter_io->dmap.is_iomem) 873 iounmap(iter_io->dmap.vaddr_iomem); 874 else 875 memunmap(iter_io->dmap.vaddr); 876 } 877 878 ttm_mem_io_free(bdev, mem); 879 } 880 881 #if defined(CONFIG_DEBUG_FS) 882 883 static int ttm_resource_manager_show(struct seq_file *m, void *unused) 884 { 885 struct ttm_resource_manager *man = 886 (struct ttm_resource_manager *)m->private; 887 struct drm_printer p = drm_seq_file_printer(m); 888 ttm_resource_manager_debug(man, &p); 889 return 0; 890 } 891 DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); 892 893 #endif 894 895 /** 896 * ttm_resource_manager_create_debugfs - Create debugfs entry for specified 897 * resource manager. 898 * @man: The TTM resource manager for which the debugfs stats file be creates 899 * @parent: debugfs directory in which the file will reside 900 * @name: The filename to create. 901 * 902 * This function setups up a debugfs file that can be used to look 903 * at debug statistics of the specified ttm_resource_manager. 904 */ 905 void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, 906 struct dentry * parent, 907 const char *name) 908 { 909 #if defined(CONFIG_DEBUG_FS) 910 debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); 911 #endif 912 } 913 EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); 914