1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/debugfs.h> 26 #include <linux/io-mapping.h> 27 #include <linux/iosys-map.h> 28 #include <linux/scatterlist.h> 29 30 #include <drm/ttm/ttm_bo.h> 31 #include <drm/ttm/ttm_placement.h> 32 #include <drm/ttm/ttm_resource.h> 33 #include <drm/ttm/ttm_tt.h> 34 35 #include <drm/drm_util.h> 36 37 /* Detach the cursor from the bulk move list*/ 38 static void 39 ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor) 40 { 41 lockdep_assert_held(&cursor->man->bdev->lru_lock); 42 43 cursor->bulk = NULL; 44 list_del_init(&cursor->bulk_link); 45 } 46 47 /* Move the cursor to the end of the bulk move list it's in */ 48 static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk, 49 struct ttm_resource_cursor *cursor) 50 { 51 struct ttm_lru_bulk_move_pos *pos; 52 53 lockdep_assert_held(&cursor->man->bdev->lru_lock); 54 55 if (WARN_ON_ONCE(bulk != cursor->bulk)) { 56 list_del_init(&cursor->bulk_link); 57 return; 58 } 59 60 pos = &bulk->pos[cursor->mem_type][cursor->priority]; 61 if (pos->last) 62 list_move(&cursor->hitch.link, &pos->last->lru.link); 63 ttm_resource_cursor_clear_bulk(cursor); 64 } 65 66 /* Move all cursors attached to a bulk move to its end */ 67 static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk) 68 { 69 struct ttm_resource_cursor *cursor, *next; 70 71 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 72 ttm_resource_cursor_move_bulk_tail(bulk, cursor); 73 } 74 75 /* Remove a cursor from an empty bulk move list */ 76 static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk) 77 { 78 struct ttm_resource_cursor *cursor, *next; 79 80 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) 81 ttm_resource_cursor_clear_bulk(cursor); 82 } 83 84 /** 85 * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage 86 * @cursor: The struct ttm_resource_cursor to finalize. 87 * 88 * The function pulls the LRU list cursor off any lists it was previusly 89 * attached to. Needs to be called with the LRU lock held. The function 90 * can be called multiple times after eachother. 91 */ 92 void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor) 93 { 94 lockdep_assert_held(&cursor->man->bdev->lru_lock); 95 list_del_init(&cursor->hitch.link); 96 ttm_resource_cursor_clear_bulk(cursor); 97 } 98 99 /** 100 * ttm_lru_bulk_move_init - initialize a bulk move structure 101 * @bulk: the structure to init 102 * 103 * For now just memset the structure to zero. 104 */ 105 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) 106 { 107 memset(bulk, 0, sizeof(*bulk)); 108 INIT_LIST_HEAD(&bulk->cursor_list); 109 } 110 EXPORT_SYMBOL(ttm_lru_bulk_move_init); 111 112 /** 113 * ttm_lru_bulk_move_fini - finalize a bulk move structure 114 * @bdev: The struct ttm_device 115 * @bulk: the structure to finalize 116 * 117 * Sanity checks that bulk moves don't have any 118 * resources left and hence no cursors attached. 119 */ 120 void ttm_lru_bulk_move_fini(struct ttm_device *bdev, 121 struct ttm_lru_bulk_move *bulk) 122 { 123 spin_lock(&bdev->lru_lock); 124 ttm_bulk_move_drop_cursors(bulk); 125 spin_unlock(&bdev->lru_lock); 126 } 127 EXPORT_SYMBOL(ttm_lru_bulk_move_fini); 128 129 /** 130 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail. 131 * 132 * @bulk: bulk move structure 133 * 134 * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that 135 * resource order never changes. Should be called with &ttm_device.lru_lock held. 136 */ 137 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) 138 { 139 unsigned i, j; 140 141 ttm_bulk_move_adjust_cursors(bulk); 142 for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { 143 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { 144 struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; 145 struct ttm_resource_manager *man; 146 147 if (!pos->first) 148 continue; 149 150 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); 151 dma_resv_assert_held(pos->first->bo->base.resv); 152 dma_resv_assert_held(pos->last->bo->base.resv); 153 154 man = ttm_manager_type(pos->first->bo->bdev, i); 155 list_bulk_move_tail(&man->lru[j], &pos->first->lru.link, 156 &pos->last->lru.link); 157 } 158 } 159 } 160 EXPORT_SYMBOL(ttm_lru_bulk_move_tail); 161 162 /* Return the bulk move pos object for this resource */ 163 static struct ttm_lru_bulk_move_pos * 164 ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) 165 { 166 return &bulk->pos[res->mem_type][res->bo->priority]; 167 } 168 169 /* Return the previous resource on the list (skip over non-resource list items) */ 170 static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur) 171 { 172 struct ttm_lru_item *lru = &cur->lru; 173 174 do { 175 lru = list_prev_entry(lru, link); 176 } while (!ttm_lru_item_is_res(lru)); 177 178 return ttm_lru_item_to_res(lru); 179 } 180 181 /* Return the next resource on the list (skip over non-resource list items) */ 182 static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur) 183 { 184 struct ttm_lru_item *lru = &cur->lru; 185 186 do { 187 lru = list_next_entry(lru, link); 188 } while (!ttm_lru_item_is_res(lru)); 189 190 return ttm_lru_item_to_res(lru); 191 } 192 193 /* Move the resource to the tail of the bulk move range */ 194 static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, 195 struct ttm_resource *res) 196 { 197 if (pos->last != res) { 198 if (pos->first == res) 199 pos->first = ttm_lru_next_res(res); 200 list_move(&res->lru.link, &pos->last->lru.link); 201 pos->last = res; 202 } 203 } 204 205 /* Add the resource to a bulk_move cursor */ 206 static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, 207 struct ttm_resource *res) 208 { 209 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 210 211 if (!pos->first) { 212 pos->first = res; 213 pos->last = res; 214 } else { 215 WARN_ON(pos->first->bo->base.resv != res->bo->base.resv); 216 ttm_lru_bulk_move_pos_tail(pos, res); 217 } 218 } 219 220 /* Remove the resource from a bulk_move range */ 221 static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, 222 struct ttm_resource *res) 223 { 224 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 225 226 if (unlikely(WARN_ON(!pos->first || !pos->last) || 227 (pos->first == res && pos->last == res))) { 228 pos->first = NULL; 229 pos->last = NULL; 230 } else if (pos->first == res) { 231 pos->first = ttm_lru_next_res(res); 232 } else if (pos->last == res) { 233 pos->last = ttm_lru_prev_res(res); 234 } else { 235 list_move(&res->lru.link, &pos->last->lru.link); 236 } 237 } 238 239 static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo) 240 { 241 /* 242 * Take care when creating a new resource for a bo, that it is not considered 243 * swapped if it's not the current resource for the bo and is thus logically 244 * associated with the ttm_tt. Think a VRAM resource created to move a 245 * swapped-out bo to VRAM. 246 */ 247 if (bo->resource != res || !bo->ttm) 248 return false; 249 250 dma_resv_assert_held(bo->base.resv); 251 return ttm_tt_is_swapped(bo->ttm); 252 } 253 254 /* Add the resource to a bulk move if the BO is configured for it */ 255 void ttm_resource_add_bulk_move(struct ttm_resource *res, 256 struct ttm_buffer_object *bo) 257 { 258 if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo)) 259 ttm_lru_bulk_move_add(bo->bulk_move, res); 260 } 261 262 /* Remove the resource from a bulk move if the BO is configured for it */ 263 void ttm_resource_del_bulk_move(struct ttm_resource *res, 264 struct ttm_buffer_object *bo) 265 { 266 if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo)) 267 ttm_lru_bulk_move_del(bo->bulk_move, res); 268 } 269 270 /* Move a resource to the LRU or bulk tail */ 271 void ttm_resource_move_to_lru_tail(struct ttm_resource *res) 272 { 273 struct ttm_buffer_object *bo = res->bo; 274 struct ttm_device *bdev = bo->bdev; 275 276 lockdep_assert_held(&bo->bdev->lru_lock); 277 278 if (bo->pin_count || ttm_resource_is_swapped(res, bo)) { 279 list_move_tail(&res->lru.link, &bdev->unevictable); 280 281 } else if (bo->bulk_move) { 282 struct ttm_lru_bulk_move_pos *pos = 283 ttm_lru_bulk_move_pos(bo->bulk_move, res); 284 285 ttm_lru_bulk_move_pos_tail(pos, res); 286 } else { 287 struct ttm_resource_manager *man; 288 289 man = ttm_manager_type(bdev, res->mem_type); 290 list_move_tail(&res->lru.link, &man->lru[bo->priority]); 291 } 292 } 293 294 /** 295 * ttm_resource_init - resource object constructure 296 * @bo: buffer object this resources is allocated for 297 * @place: placement of the resource 298 * @res: the resource object to inistilize 299 * 300 * Initialize a new resource object. Counterpart of ttm_resource_fini(). 301 */ 302 void ttm_resource_init(struct ttm_buffer_object *bo, 303 const struct ttm_place *place, 304 struct ttm_resource *res) 305 { 306 struct ttm_resource_manager *man; 307 308 res->start = 0; 309 res->size = bo->base.size; 310 res->mem_type = place->mem_type; 311 res->placement = place->flags; 312 res->bus.addr = NULL; 313 res->bus.offset = 0; 314 res->bus.is_iomem = false; 315 res->bus.caching = ttm_cached; 316 res->bo = bo; 317 318 man = ttm_manager_type(bo->bdev, place->mem_type); 319 spin_lock(&bo->bdev->lru_lock); 320 if (bo->pin_count || ttm_resource_is_swapped(res, bo)) 321 list_add_tail(&res->lru.link, &bo->bdev->unevictable); 322 else 323 list_add_tail(&res->lru.link, &man->lru[bo->priority]); 324 man->usage += res->size; 325 spin_unlock(&bo->bdev->lru_lock); 326 } 327 EXPORT_SYMBOL(ttm_resource_init); 328 329 /** 330 * ttm_resource_fini - resource destructor 331 * @man: the resource manager this resource belongs to 332 * @res: the resource to clean up 333 * 334 * Should be used by resource manager backends to clean up the TTM resource 335 * objects before freeing the underlying structure. Makes sure the resource is 336 * removed from the LRU before destruction. 337 * Counterpart of ttm_resource_init(). 338 */ 339 void ttm_resource_fini(struct ttm_resource_manager *man, 340 struct ttm_resource *res) 341 { 342 struct ttm_device *bdev = man->bdev; 343 344 spin_lock(&bdev->lru_lock); 345 list_del_init(&res->lru.link); 346 man->usage -= res->size; 347 spin_unlock(&bdev->lru_lock); 348 } 349 EXPORT_SYMBOL(ttm_resource_fini); 350 351 int ttm_resource_alloc(struct ttm_buffer_object *bo, 352 const struct ttm_place *place, 353 struct ttm_resource **res_ptr) 354 { 355 struct ttm_resource_manager *man = 356 ttm_manager_type(bo->bdev, place->mem_type); 357 int ret; 358 359 ret = man->func->alloc(man, bo, place, res_ptr); 360 if (ret) 361 return ret; 362 363 spin_lock(&bo->bdev->lru_lock); 364 ttm_resource_add_bulk_move(*res_ptr, bo); 365 spin_unlock(&bo->bdev->lru_lock); 366 return 0; 367 } 368 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc); 369 370 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) 371 { 372 struct ttm_resource_manager *man; 373 374 if (!*res) 375 return; 376 377 spin_lock(&bo->bdev->lru_lock); 378 ttm_resource_del_bulk_move(*res, bo); 379 spin_unlock(&bo->bdev->lru_lock); 380 man = ttm_manager_type(bo->bdev, (*res)->mem_type); 381 man->func->free(man, *res); 382 *res = NULL; 383 } 384 EXPORT_SYMBOL(ttm_resource_free); 385 386 /** 387 * ttm_resource_intersects - test for intersection 388 * 389 * @bdev: TTM device structure 390 * @res: The resource to test 391 * @place: The placement to test 392 * @size: How many bytes the new allocation needs. 393 * 394 * Test if @res intersects with @place and @size. Used for testing if evictions 395 * are valueable or not. 396 * 397 * Returns true if the res placement intersects with @place and @size. 398 */ 399 bool ttm_resource_intersects(struct ttm_device *bdev, 400 struct ttm_resource *res, 401 const struct ttm_place *place, 402 size_t size) 403 { 404 struct ttm_resource_manager *man; 405 406 if (!res) 407 return false; 408 409 man = ttm_manager_type(bdev, res->mem_type); 410 if (!place || !man->func->intersects) 411 return true; 412 413 return man->func->intersects(man, res, place, size); 414 } 415 416 /** 417 * ttm_resource_compatible - check if resource is compatible with placement 418 * 419 * @res: the resource to check 420 * @placement: the placement to check against 421 * @evicting: true if the caller is doing evictions 422 * 423 * Returns true if the placement is compatible. 424 */ 425 bool ttm_resource_compatible(struct ttm_resource *res, 426 struct ttm_placement *placement, 427 bool evicting) 428 { 429 struct ttm_buffer_object *bo = res->bo; 430 struct ttm_device *bdev = bo->bdev; 431 unsigned i; 432 433 if (res->placement & TTM_PL_FLAG_TEMPORARY) 434 return false; 435 436 for (i = 0; i < placement->num_placement; i++) { 437 const struct ttm_place *place = &placement->placement[i]; 438 struct ttm_resource_manager *man; 439 440 if (res->mem_type != place->mem_type) 441 continue; 442 443 if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED : 444 TTM_PL_FLAG_FALLBACK)) 445 continue; 446 447 if (place->flags & TTM_PL_FLAG_CONTIGUOUS && 448 !(res->placement & TTM_PL_FLAG_CONTIGUOUS)) 449 continue; 450 451 man = ttm_manager_type(bdev, res->mem_type); 452 if (man->func->compatible && 453 !man->func->compatible(man, res, place, bo->base.size)) 454 continue; 455 456 return true; 457 } 458 return false; 459 } 460 461 void ttm_resource_set_bo(struct ttm_resource *res, 462 struct ttm_buffer_object *bo) 463 { 464 spin_lock(&bo->bdev->lru_lock); 465 res->bo = bo; 466 spin_unlock(&bo->bdev->lru_lock); 467 } 468 469 /** 470 * ttm_resource_manager_init 471 * 472 * @man: memory manager object to init 473 * @bdev: ttm device this manager belongs to 474 * @size: size of managed resources in arbitrary units 475 * 476 * Initialise core parts of a manager object. 477 */ 478 void ttm_resource_manager_init(struct ttm_resource_manager *man, 479 struct ttm_device *bdev, 480 uint64_t size) 481 { 482 unsigned i; 483 484 spin_lock_init(&man->move_lock); 485 man->bdev = bdev; 486 man->size = size; 487 man->usage = 0; 488 489 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 490 INIT_LIST_HEAD(&man->lru[i]); 491 man->move = NULL; 492 } 493 EXPORT_SYMBOL(ttm_resource_manager_init); 494 495 /* 496 * ttm_resource_manager_evict_all 497 * 498 * @bdev - device to use 499 * @man - manager to use 500 * 501 * Evict all the objects out of a memory manager until it is empty. 502 * Part of memory manager cleanup sequence. 503 */ 504 int ttm_resource_manager_evict_all(struct ttm_device *bdev, 505 struct ttm_resource_manager *man) 506 { 507 struct ttm_operation_ctx ctx = { 508 .interruptible = false, 509 .no_wait_gpu = false, 510 .force_alloc = true 511 }; 512 struct dma_fence *fence; 513 int ret; 514 515 do { 516 ret = ttm_bo_evict_first(bdev, man, &ctx); 517 cond_resched(); 518 } while (!ret); 519 520 spin_lock(&man->move_lock); 521 fence = dma_fence_get(man->move); 522 spin_unlock(&man->move_lock); 523 524 if (fence) { 525 ret = dma_fence_wait(fence, false); 526 dma_fence_put(fence); 527 if (ret) 528 return ret; 529 } 530 531 return 0; 532 } 533 EXPORT_SYMBOL(ttm_resource_manager_evict_all); 534 535 /** 536 * ttm_resource_manager_usage 537 * 538 * @man: A memory manager object. 539 * 540 * Return how many resources are currently used. 541 */ 542 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) 543 { 544 uint64_t usage; 545 546 spin_lock(&man->bdev->lru_lock); 547 usage = man->usage; 548 spin_unlock(&man->bdev->lru_lock); 549 return usage; 550 } 551 EXPORT_SYMBOL(ttm_resource_manager_usage); 552 553 /** 554 * ttm_resource_manager_debug 555 * 556 * @man: manager type to dump. 557 * @p: printer to use for debug. 558 */ 559 void ttm_resource_manager_debug(struct ttm_resource_manager *man, 560 struct drm_printer *p) 561 { 562 drm_printf(p, " use_type: %d\n", man->use_type); 563 drm_printf(p, " use_tt: %d\n", man->use_tt); 564 drm_printf(p, " size: %llu\n", man->size); 565 drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man)); 566 if (man->func->debug) 567 man->func->debug(man, p); 568 } 569 EXPORT_SYMBOL(ttm_resource_manager_debug); 570 571 static void 572 ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor, 573 struct ttm_lru_item *next_lru) 574 { 575 struct ttm_resource *next = ttm_lru_item_to_res(next_lru); 576 struct ttm_lru_bulk_move *bulk = NULL; 577 struct ttm_buffer_object *bo = next->bo; 578 579 lockdep_assert_held(&cursor->man->bdev->lru_lock); 580 bulk = bo->bulk_move; 581 582 if (cursor->bulk != bulk) { 583 if (bulk) { 584 list_move_tail(&cursor->bulk_link, &bulk->cursor_list); 585 cursor->mem_type = next->mem_type; 586 } else { 587 list_del_init(&cursor->bulk_link); 588 } 589 cursor->bulk = bulk; 590 } 591 } 592 593 /** 594 * ttm_resource_manager_first() - Start iterating over the resources 595 * of a resource manager 596 * @man: resource manager to iterate over 597 * @cursor: cursor to record the position 598 * 599 * Initializes the cursor and starts iterating. When done iterating, 600 * the caller must explicitly call ttm_resource_cursor_fini(). 601 * 602 * Return: The first resource from the resource manager. 603 */ 604 struct ttm_resource * 605 ttm_resource_manager_first(struct ttm_resource_manager *man, 606 struct ttm_resource_cursor *cursor) 607 { 608 lockdep_assert_held(&man->bdev->lru_lock); 609 610 cursor->priority = 0; 611 cursor->man = man; 612 ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH); 613 INIT_LIST_HEAD(&cursor->bulk_link); 614 list_add(&cursor->hitch.link, &man->lru[cursor->priority]); 615 616 return ttm_resource_manager_next(cursor); 617 } 618 619 /** 620 * ttm_resource_manager_next() - Continue iterating over the resource manager 621 * resources 622 * @cursor: cursor to record the position 623 * 624 * Return: the next resource from the resource manager. 625 */ 626 struct ttm_resource * 627 ttm_resource_manager_next(struct ttm_resource_cursor *cursor) 628 { 629 struct ttm_resource_manager *man = cursor->man; 630 struct ttm_lru_item *lru; 631 632 lockdep_assert_held(&man->bdev->lru_lock); 633 634 for (;;) { 635 lru = &cursor->hitch; 636 list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) { 637 if (ttm_lru_item_is_res(lru)) { 638 ttm_resource_cursor_check_bulk(cursor, lru); 639 list_move(&cursor->hitch.link, &lru->link); 640 return ttm_lru_item_to_res(lru); 641 } 642 } 643 644 if (++cursor->priority >= TTM_MAX_BO_PRIORITY) 645 break; 646 647 list_move(&cursor->hitch.link, &man->lru[cursor->priority]); 648 ttm_resource_cursor_clear_bulk(cursor); 649 } 650 651 ttm_resource_cursor_fini(cursor); 652 653 return NULL; 654 } 655 656 /** 657 * ttm_lru_first_res_or_null() - Return the first resource on an lru list 658 * @head: The list head of the lru list. 659 * 660 * Return: Pointer to the first resource on the lru list or NULL if 661 * there is none. 662 */ 663 struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head) 664 { 665 struct ttm_lru_item *lru; 666 667 list_for_each_entry(lru, head, link) { 668 if (ttm_lru_item_is_res(lru)) 669 return ttm_lru_item_to_res(lru); 670 } 671 672 return NULL; 673 } 674 675 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, 676 struct iosys_map *dmap, 677 pgoff_t i) 678 { 679 struct ttm_kmap_iter_iomap *iter_io = 680 container_of(iter, typeof(*iter_io), base); 681 void __iomem *addr; 682 683 retry: 684 while (i >= iter_io->cache.end) { 685 iter_io->cache.sg = iter_io->cache.sg ? 686 sg_next(iter_io->cache.sg) : iter_io->st->sgl; 687 iter_io->cache.i = iter_io->cache.end; 688 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >> 689 PAGE_SHIFT; 690 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) - 691 iter_io->start; 692 } 693 694 if (i < iter_io->cache.i) { 695 iter_io->cache.end = 0; 696 iter_io->cache.sg = NULL; 697 goto retry; 698 } 699 700 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + 701 (((resource_size_t)i - iter_io->cache.i) 702 << PAGE_SHIFT)); 703 iosys_map_set_vaddr_iomem(dmap, addr); 704 } 705 706 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, 707 struct iosys_map *map) 708 { 709 io_mapping_unmap_local(map->vaddr_iomem); 710 } 711 712 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = { 713 .map_local = ttm_kmap_iter_iomap_map_local, 714 .unmap_local = ttm_kmap_iter_iomap_unmap_local, 715 .maps_tt = false, 716 }; 717 718 /** 719 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap 720 * @iter_io: The struct ttm_kmap_iter_iomap to initialize. 721 * @iomap: The struct io_mapping representing the underlying linear io_memory. 722 * @st: sg_table into @iomap, representing the memory of the struct 723 * ttm_resource. 724 * @start: Offset that needs to be subtracted from @st to make 725 * sg_dma_address(st->sgl) - @start == 0 for @iomap start. 726 * 727 * Return: Pointer to the embedded struct ttm_kmap_iter. 728 */ 729 struct ttm_kmap_iter * 730 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, 731 struct io_mapping *iomap, 732 struct sg_table *st, 733 resource_size_t start) 734 { 735 iter_io->base.ops = &ttm_kmap_iter_io_ops; 736 iter_io->iomap = iomap; 737 iter_io->st = st; 738 iter_io->start = start; 739 memset(&iter_io->cache, 0, sizeof(iter_io->cache)); 740 741 return &iter_io->base; 742 } 743 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); 744 745 /** 746 * DOC: Linear io iterator 747 * 748 * This code should die in the not too near future. Best would be if we could 749 * make io-mapping use memremap for all io memory, and have memremap 750 * implement a kmap_local functionality. We could then strip a huge amount of 751 * code. These linear io iterators are implemented to mimic old functionality, 752 * and they don't use kmap_local semantics at all internally. Rather ioremap or 753 * friends, and at least on 32-bit they add global TLB flushes and points 754 * of failure. 755 */ 756 757 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, 758 struct iosys_map *dmap, 759 pgoff_t i) 760 { 761 struct ttm_kmap_iter_linear_io *iter_io = 762 container_of(iter, typeof(*iter_io), base); 763 764 *dmap = iter_io->dmap; 765 iosys_map_incr(dmap, i * PAGE_SIZE); 766 } 767 768 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { 769 .map_local = ttm_kmap_iter_linear_io_map_local, 770 .maps_tt = false, 771 }; 772 773 /** 774 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory 775 * @iter_io: The iterator to initialize 776 * @bdev: The TTM device 777 * @mem: The ttm resource representing the iomap. 778 * 779 * This function is for internal TTM use only. It sets up a memcpy kmap iterator 780 * pointing at a linear chunk of io memory. 781 * 782 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on 783 * failure. 784 */ 785 struct ttm_kmap_iter * 786 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, 787 struct ttm_device *bdev, 788 struct ttm_resource *mem) 789 { 790 int ret; 791 792 ret = ttm_mem_io_reserve(bdev, mem); 793 if (ret) 794 goto out_err; 795 if (!mem->bus.is_iomem) { 796 ret = -EINVAL; 797 goto out_io_free; 798 } 799 800 if (mem->bus.addr) { 801 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); 802 iter_io->needs_unmap = false; 803 } else { 804 iter_io->needs_unmap = true; 805 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); 806 if (mem->bus.caching == ttm_write_combined) 807 iosys_map_set_vaddr_iomem(&iter_io->dmap, 808 ioremap_wc(mem->bus.offset, 809 mem->size)); 810 else if (mem->bus.caching == ttm_cached) 811 iosys_map_set_vaddr(&iter_io->dmap, 812 memremap(mem->bus.offset, mem->size, 813 MEMREMAP_WB | 814 MEMREMAP_WT | 815 MEMREMAP_WC)); 816 817 /* If uncached requested or if mapping cached or wc failed */ 818 if (iosys_map_is_null(&iter_io->dmap)) 819 iosys_map_set_vaddr_iomem(&iter_io->dmap, 820 ioremap(mem->bus.offset, 821 mem->size)); 822 823 if (iosys_map_is_null(&iter_io->dmap)) { 824 ret = -ENOMEM; 825 goto out_io_free; 826 } 827 } 828 829 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops; 830 return &iter_io->base; 831 832 out_io_free: 833 ttm_mem_io_free(bdev, mem); 834 out_err: 835 return ERR_PTR(ret); 836 } 837 838 /** 839 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory 840 * @iter_io: The iterator to initialize 841 * @bdev: The TTM device 842 * @mem: The ttm resource representing the iomap. 843 * 844 * This function is for internal TTM use only. It cleans up a memcpy kmap 845 * iterator initialized by ttm_kmap_iter_linear_io_init. 846 */ 847 void 848 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, 849 struct ttm_device *bdev, 850 struct ttm_resource *mem) 851 { 852 if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { 853 if (iter_io->dmap.is_iomem) 854 iounmap(iter_io->dmap.vaddr_iomem); 855 else 856 memunmap(iter_io->dmap.vaddr); 857 } 858 859 ttm_mem_io_free(bdev, mem); 860 } 861 862 #if defined(CONFIG_DEBUG_FS) 863 864 static int ttm_resource_manager_show(struct seq_file *m, void *unused) 865 { 866 struct ttm_resource_manager *man = 867 (struct ttm_resource_manager *)m->private; 868 struct drm_printer p = drm_seq_file_printer(m); 869 ttm_resource_manager_debug(man, &p); 870 return 0; 871 } 872 DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); 873 874 #endif 875 876 /** 877 * ttm_resource_manager_create_debugfs - Create debugfs entry for specified 878 * resource manager. 879 * @man: The TTM resource manager for which the debugfs stats file be creates 880 * @parent: debugfs directory in which the file will reside 881 * @name: The filename to create. 882 * 883 * This function setups up a debugfs file that can be used to look 884 * at debug statistics of the specified ttm_resource_manager. 885 */ 886 void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, 887 struct dentry * parent, 888 const char *name) 889 { 890 #if defined(CONFIG_DEBUG_FS) 891 debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); 892 #endif 893 } 894 EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); 895