1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <linux/debugfs.h> 26 #include <linux/io-mapping.h> 27 #include <linux/iosys-map.h> 28 #include <linux/scatterlist.h> 29 30 #include <drm/ttm/ttm_bo.h> 31 #include <drm/ttm/ttm_placement.h> 32 #include <drm/ttm/ttm_resource.h> 33 34 #include <drm/drm_util.h> 35 36 /** 37 * ttm_lru_bulk_move_init - initialize a bulk move structure 38 * @bulk: the structure to init 39 * 40 * For now just memset the structure to zero. 41 */ 42 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) 43 { 44 memset(bulk, 0, sizeof(*bulk)); 45 } 46 EXPORT_SYMBOL(ttm_lru_bulk_move_init); 47 48 /** 49 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail. 50 * 51 * @bulk: bulk move structure 52 * 53 * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that 54 * resource order never changes. Should be called with &ttm_device.lru_lock held. 55 */ 56 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) 57 { 58 unsigned i, j; 59 60 for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) { 61 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { 62 struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j]; 63 struct ttm_resource_manager *man; 64 65 if (!pos->first) 66 continue; 67 68 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); 69 dma_resv_assert_held(pos->first->bo->base.resv); 70 dma_resv_assert_held(pos->last->bo->base.resv); 71 72 man = ttm_manager_type(pos->first->bo->bdev, i); 73 list_bulk_move_tail(&man->lru[j], &pos->first->lru, 74 &pos->last->lru); 75 } 76 } 77 } 78 EXPORT_SYMBOL(ttm_lru_bulk_move_tail); 79 80 /* Return the bulk move pos object for this resource */ 81 static struct ttm_lru_bulk_move_pos * 82 ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) 83 { 84 return &bulk->pos[res->mem_type][res->bo->priority]; 85 } 86 87 /* Move the resource to the tail of the bulk move range */ 88 static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, 89 struct ttm_resource *res) 90 { 91 if (pos->last != res) { 92 if (pos->first == res) 93 pos->first = list_next_entry(res, lru); 94 list_move(&res->lru, &pos->last->lru); 95 pos->last = res; 96 } 97 } 98 99 /* Add the resource to a bulk_move cursor */ 100 static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, 101 struct ttm_resource *res) 102 { 103 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 104 105 if (!pos->first) { 106 pos->first = res; 107 pos->last = res; 108 } else { 109 WARN_ON(pos->first->bo->base.resv != res->bo->base.resv); 110 ttm_lru_bulk_move_pos_tail(pos, res); 111 } 112 } 113 114 /* Remove the resource from a bulk_move range */ 115 static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, 116 struct ttm_resource *res) 117 { 118 struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); 119 120 if (unlikely(WARN_ON(!pos->first || !pos->last) || 121 (pos->first == res && pos->last == res))) { 122 pos->first = NULL; 123 pos->last = NULL; 124 } else if (pos->first == res) { 125 pos->first = list_next_entry(res, lru); 126 } else if (pos->last == res) { 127 pos->last = list_prev_entry(res, lru); 128 } else { 129 list_move(&res->lru, &pos->last->lru); 130 } 131 } 132 133 /* Add the resource to a bulk move if the BO is configured for it */ 134 void ttm_resource_add_bulk_move(struct ttm_resource *res, 135 struct ttm_buffer_object *bo) 136 { 137 if (bo->bulk_move && !bo->pin_count) 138 ttm_lru_bulk_move_add(bo->bulk_move, res); 139 } 140 141 /* Remove the resource from a bulk move if the BO is configured for it */ 142 void ttm_resource_del_bulk_move(struct ttm_resource *res, 143 struct ttm_buffer_object *bo) 144 { 145 if (bo->bulk_move && !bo->pin_count) 146 ttm_lru_bulk_move_del(bo->bulk_move, res); 147 } 148 149 /* Move a resource to the LRU or bulk tail */ 150 void ttm_resource_move_to_lru_tail(struct ttm_resource *res) 151 { 152 struct ttm_buffer_object *bo = res->bo; 153 struct ttm_device *bdev = bo->bdev; 154 155 lockdep_assert_held(&bo->bdev->lru_lock); 156 157 if (bo->pin_count) { 158 list_move_tail(&res->lru, &bdev->pinned); 159 160 } else if (bo->bulk_move) { 161 struct ttm_lru_bulk_move_pos *pos = 162 ttm_lru_bulk_move_pos(bo->bulk_move, res); 163 164 ttm_lru_bulk_move_pos_tail(pos, res); 165 } else { 166 struct ttm_resource_manager *man; 167 168 man = ttm_manager_type(bdev, res->mem_type); 169 list_move_tail(&res->lru, &man->lru[bo->priority]); 170 } 171 } 172 173 /** 174 * ttm_resource_init - resource object constructure 175 * @bo: buffer object this resources is allocated for 176 * @place: placement of the resource 177 * @res: the resource object to inistilize 178 * 179 * Initialize a new resource object. Counterpart of ttm_resource_fini(). 180 */ 181 void ttm_resource_init(struct ttm_buffer_object *bo, 182 const struct ttm_place *place, 183 struct ttm_resource *res) 184 { 185 struct ttm_resource_manager *man; 186 187 res->start = 0; 188 res->size = bo->base.size; 189 res->mem_type = place->mem_type; 190 res->placement = place->flags; 191 res->bus.addr = NULL; 192 res->bus.offset = 0; 193 res->bus.is_iomem = false; 194 res->bus.caching = ttm_cached; 195 res->bo = bo; 196 197 man = ttm_manager_type(bo->bdev, place->mem_type); 198 spin_lock(&bo->bdev->lru_lock); 199 if (bo->pin_count) 200 list_add_tail(&res->lru, &bo->bdev->pinned); 201 else 202 list_add_tail(&res->lru, &man->lru[bo->priority]); 203 man->usage += res->size; 204 spin_unlock(&bo->bdev->lru_lock); 205 } 206 EXPORT_SYMBOL(ttm_resource_init); 207 208 /** 209 * ttm_resource_fini - resource destructor 210 * @man: the resource manager this resource belongs to 211 * @res: the resource to clean up 212 * 213 * Should be used by resource manager backends to clean up the TTM resource 214 * objects before freeing the underlying structure. Makes sure the resource is 215 * removed from the LRU before destruction. 216 * Counterpart of ttm_resource_init(). 217 */ 218 void ttm_resource_fini(struct ttm_resource_manager *man, 219 struct ttm_resource *res) 220 { 221 struct ttm_device *bdev = man->bdev; 222 223 spin_lock(&bdev->lru_lock); 224 list_del_init(&res->lru); 225 man->usage -= res->size; 226 spin_unlock(&bdev->lru_lock); 227 } 228 EXPORT_SYMBOL(ttm_resource_fini); 229 230 int ttm_resource_alloc(struct ttm_buffer_object *bo, 231 const struct ttm_place *place, 232 struct ttm_resource **res_ptr) 233 { 234 struct ttm_resource_manager *man = 235 ttm_manager_type(bo->bdev, place->mem_type); 236 int ret; 237 238 ret = man->func->alloc(man, bo, place, res_ptr); 239 if (ret) 240 return ret; 241 242 spin_lock(&bo->bdev->lru_lock); 243 ttm_resource_add_bulk_move(*res_ptr, bo); 244 spin_unlock(&bo->bdev->lru_lock); 245 return 0; 246 } 247 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc); 248 249 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) 250 { 251 struct ttm_resource_manager *man; 252 253 if (!*res) 254 return; 255 256 spin_lock(&bo->bdev->lru_lock); 257 ttm_resource_del_bulk_move(*res, bo); 258 spin_unlock(&bo->bdev->lru_lock); 259 man = ttm_manager_type(bo->bdev, (*res)->mem_type); 260 man->func->free(man, *res); 261 *res = NULL; 262 } 263 EXPORT_SYMBOL(ttm_resource_free); 264 265 /** 266 * ttm_resource_intersects - test for intersection 267 * 268 * @bdev: TTM device structure 269 * @res: The resource to test 270 * @place: The placement to test 271 * @size: How many bytes the new allocation needs. 272 * 273 * Test if @res intersects with @place and @size. Used for testing if evictions 274 * are valueable or not. 275 * 276 * Returns true if the res placement intersects with @place and @size. 277 */ 278 bool ttm_resource_intersects(struct ttm_device *bdev, 279 struct ttm_resource *res, 280 const struct ttm_place *place, 281 size_t size) 282 { 283 struct ttm_resource_manager *man; 284 285 if (!res) 286 return false; 287 288 man = ttm_manager_type(bdev, res->mem_type); 289 if (!place || !man->func->intersects) 290 return true; 291 292 return man->func->intersects(man, res, place, size); 293 } 294 295 /** 296 * ttm_resource_compatible - check if resource is compatible with placement 297 * 298 * @res: the resource to check 299 * @placement: the placement to check against 300 * @evicting: true if the caller is doing evictions 301 * 302 * Returns true if the placement is compatible. 303 */ 304 bool ttm_resource_compatible(struct ttm_resource *res, 305 struct ttm_placement *placement, 306 bool evicting) 307 { 308 struct ttm_buffer_object *bo = res->bo; 309 struct ttm_device *bdev = bo->bdev; 310 unsigned i; 311 312 if (res->placement & TTM_PL_FLAG_TEMPORARY) 313 return false; 314 315 for (i = 0; i < placement->num_placement; i++) { 316 const struct ttm_place *place = &placement->placement[i]; 317 struct ttm_resource_manager *man; 318 319 if (res->mem_type != place->mem_type) 320 continue; 321 322 if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED : 323 TTM_PL_FLAG_FALLBACK)) 324 continue; 325 326 if (place->flags & TTM_PL_FLAG_CONTIGUOUS && 327 !(res->placement & TTM_PL_FLAG_CONTIGUOUS)) 328 continue; 329 330 man = ttm_manager_type(bdev, res->mem_type); 331 if (man->func->compatible && 332 !man->func->compatible(man, res, place, bo->base.size)) 333 continue; 334 335 return true; 336 } 337 return false; 338 } 339 340 void ttm_resource_set_bo(struct ttm_resource *res, 341 struct ttm_buffer_object *bo) 342 { 343 spin_lock(&bo->bdev->lru_lock); 344 res->bo = bo; 345 spin_unlock(&bo->bdev->lru_lock); 346 } 347 348 /** 349 * ttm_resource_manager_init 350 * 351 * @man: memory manager object to init 352 * @bdev: ttm device this manager belongs to 353 * @size: size of managed resources in arbitrary units 354 * 355 * Initialise core parts of a manager object. 356 */ 357 void ttm_resource_manager_init(struct ttm_resource_manager *man, 358 struct ttm_device *bdev, 359 uint64_t size) 360 { 361 unsigned i; 362 363 spin_lock_init(&man->move_lock); 364 man->bdev = bdev; 365 man->size = size; 366 man->usage = 0; 367 368 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 369 INIT_LIST_HEAD(&man->lru[i]); 370 man->move = NULL; 371 } 372 EXPORT_SYMBOL(ttm_resource_manager_init); 373 374 /* 375 * ttm_resource_manager_evict_all 376 * 377 * @bdev - device to use 378 * @man - manager to use 379 * 380 * Evict all the objects out of a memory manager until it is empty. 381 * Part of memory manager cleanup sequence. 382 */ 383 int ttm_resource_manager_evict_all(struct ttm_device *bdev, 384 struct ttm_resource_manager *man) 385 { 386 struct ttm_operation_ctx ctx = { 387 .interruptible = false, 388 .no_wait_gpu = false, 389 .force_alloc = true 390 }; 391 struct dma_fence *fence; 392 int ret; 393 unsigned i; 394 395 /* 396 * Can't use standard list traversal since we're unlocking. 397 */ 398 399 spin_lock(&bdev->lru_lock); 400 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 401 while (!list_empty(&man->lru[i])) { 402 spin_unlock(&bdev->lru_lock); 403 ret = ttm_mem_evict_first(bdev, man, NULL, &ctx, 404 NULL); 405 if (ret) 406 return ret; 407 spin_lock(&bdev->lru_lock); 408 } 409 } 410 spin_unlock(&bdev->lru_lock); 411 412 spin_lock(&man->move_lock); 413 fence = dma_fence_get(man->move); 414 spin_unlock(&man->move_lock); 415 416 if (fence) { 417 ret = dma_fence_wait(fence, false); 418 dma_fence_put(fence); 419 if (ret) 420 return ret; 421 } 422 423 return 0; 424 } 425 EXPORT_SYMBOL(ttm_resource_manager_evict_all); 426 427 /** 428 * ttm_resource_manager_usage 429 * 430 * @man: A memory manager object. 431 * 432 * Return how many resources are currently used. 433 */ 434 uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) 435 { 436 uint64_t usage; 437 438 spin_lock(&man->bdev->lru_lock); 439 usage = man->usage; 440 spin_unlock(&man->bdev->lru_lock); 441 return usage; 442 } 443 EXPORT_SYMBOL(ttm_resource_manager_usage); 444 445 /** 446 * ttm_resource_manager_debug 447 * 448 * @man: manager type to dump. 449 * @p: printer to use for debug. 450 */ 451 void ttm_resource_manager_debug(struct ttm_resource_manager *man, 452 struct drm_printer *p) 453 { 454 drm_printf(p, " use_type: %d\n", man->use_type); 455 drm_printf(p, " use_tt: %d\n", man->use_tt); 456 drm_printf(p, " size: %llu\n", man->size); 457 drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man)); 458 if (man->func->debug) 459 man->func->debug(man, p); 460 } 461 EXPORT_SYMBOL(ttm_resource_manager_debug); 462 463 /** 464 * ttm_resource_manager_first 465 * 466 * @man: resource manager to iterate over 467 * @cursor: cursor to record the position 468 * 469 * Returns the first resource from the resource manager. 470 */ 471 struct ttm_resource * 472 ttm_resource_manager_first(struct ttm_resource_manager *man, 473 struct ttm_resource_cursor *cursor) 474 { 475 struct ttm_resource *res; 476 477 lockdep_assert_held(&man->bdev->lru_lock); 478 479 for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY; 480 ++cursor->priority) 481 list_for_each_entry(res, &man->lru[cursor->priority], lru) 482 return res; 483 484 return NULL; 485 } 486 487 /** 488 * ttm_resource_manager_next 489 * 490 * @man: resource manager to iterate over 491 * @cursor: cursor to record the position 492 * @res: the current resource pointer 493 * 494 * Returns the next resource from the resource manager. 495 */ 496 struct ttm_resource * 497 ttm_resource_manager_next(struct ttm_resource_manager *man, 498 struct ttm_resource_cursor *cursor, 499 struct ttm_resource *res) 500 { 501 lockdep_assert_held(&man->bdev->lru_lock); 502 503 list_for_each_entry_continue(res, &man->lru[cursor->priority], lru) 504 return res; 505 506 for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY; 507 ++cursor->priority) 508 list_for_each_entry(res, &man->lru[cursor->priority], lru) 509 return res; 510 511 return NULL; 512 } 513 514 static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, 515 struct iosys_map *dmap, 516 pgoff_t i) 517 { 518 struct ttm_kmap_iter_iomap *iter_io = 519 container_of(iter, typeof(*iter_io), base); 520 void __iomem *addr; 521 522 retry: 523 while (i >= iter_io->cache.end) { 524 iter_io->cache.sg = iter_io->cache.sg ? 525 sg_next(iter_io->cache.sg) : iter_io->st->sgl; 526 iter_io->cache.i = iter_io->cache.end; 527 iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >> 528 PAGE_SHIFT; 529 iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) - 530 iter_io->start; 531 } 532 533 if (i < iter_io->cache.i) { 534 iter_io->cache.end = 0; 535 iter_io->cache.sg = NULL; 536 goto retry; 537 } 538 539 addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + 540 (((resource_size_t)i - iter_io->cache.i) 541 << PAGE_SHIFT)); 542 iosys_map_set_vaddr_iomem(dmap, addr); 543 } 544 545 static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, 546 struct iosys_map *map) 547 { 548 io_mapping_unmap_local(map->vaddr_iomem); 549 } 550 551 static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = { 552 .map_local = ttm_kmap_iter_iomap_map_local, 553 .unmap_local = ttm_kmap_iter_iomap_unmap_local, 554 .maps_tt = false, 555 }; 556 557 /** 558 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap 559 * @iter_io: The struct ttm_kmap_iter_iomap to initialize. 560 * @iomap: The struct io_mapping representing the underlying linear io_memory. 561 * @st: sg_table into @iomap, representing the memory of the struct 562 * ttm_resource. 563 * @start: Offset that needs to be subtracted from @st to make 564 * sg_dma_address(st->sgl) - @start == 0 for @iomap start. 565 * 566 * Return: Pointer to the embedded struct ttm_kmap_iter. 567 */ 568 struct ttm_kmap_iter * 569 ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, 570 struct io_mapping *iomap, 571 struct sg_table *st, 572 resource_size_t start) 573 { 574 iter_io->base.ops = &ttm_kmap_iter_io_ops; 575 iter_io->iomap = iomap; 576 iter_io->st = st; 577 iter_io->start = start; 578 memset(&iter_io->cache, 0, sizeof(iter_io->cache)); 579 580 return &iter_io->base; 581 } 582 EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); 583 584 /** 585 * DOC: Linear io iterator 586 * 587 * This code should die in the not too near future. Best would be if we could 588 * make io-mapping use memremap for all io memory, and have memremap 589 * implement a kmap_local functionality. We could then strip a huge amount of 590 * code. These linear io iterators are implemented to mimic old functionality, 591 * and they don't use kmap_local semantics at all internally. Rather ioremap or 592 * friends, and at least on 32-bit they add global TLB flushes and points 593 * of failure. 594 */ 595 596 static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, 597 struct iosys_map *dmap, 598 pgoff_t i) 599 { 600 struct ttm_kmap_iter_linear_io *iter_io = 601 container_of(iter, typeof(*iter_io), base); 602 603 *dmap = iter_io->dmap; 604 iosys_map_incr(dmap, i * PAGE_SIZE); 605 } 606 607 static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { 608 .map_local = ttm_kmap_iter_linear_io_map_local, 609 .maps_tt = false, 610 }; 611 612 /** 613 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory 614 * @iter_io: The iterator to initialize 615 * @bdev: The TTM device 616 * @mem: The ttm resource representing the iomap. 617 * 618 * This function is for internal TTM use only. It sets up a memcpy kmap iterator 619 * pointing at a linear chunk of io memory. 620 * 621 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on 622 * failure. 623 */ 624 struct ttm_kmap_iter * 625 ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, 626 struct ttm_device *bdev, 627 struct ttm_resource *mem) 628 { 629 int ret; 630 631 ret = ttm_mem_io_reserve(bdev, mem); 632 if (ret) 633 goto out_err; 634 if (!mem->bus.is_iomem) { 635 ret = -EINVAL; 636 goto out_io_free; 637 } 638 639 if (mem->bus.addr) { 640 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); 641 iter_io->needs_unmap = false; 642 } else { 643 iter_io->needs_unmap = true; 644 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); 645 if (mem->bus.caching == ttm_write_combined) 646 iosys_map_set_vaddr_iomem(&iter_io->dmap, 647 ioremap_wc(mem->bus.offset, 648 mem->size)); 649 else if (mem->bus.caching == ttm_cached) 650 iosys_map_set_vaddr(&iter_io->dmap, 651 memremap(mem->bus.offset, mem->size, 652 MEMREMAP_WB | 653 MEMREMAP_WT | 654 MEMREMAP_WC)); 655 656 /* If uncached requested or if mapping cached or wc failed */ 657 if (iosys_map_is_null(&iter_io->dmap)) 658 iosys_map_set_vaddr_iomem(&iter_io->dmap, 659 ioremap(mem->bus.offset, 660 mem->size)); 661 662 if (iosys_map_is_null(&iter_io->dmap)) { 663 ret = -ENOMEM; 664 goto out_io_free; 665 } 666 } 667 668 iter_io->base.ops = &ttm_kmap_iter_linear_io_ops; 669 return &iter_io->base; 670 671 out_io_free: 672 ttm_mem_io_free(bdev, mem); 673 out_err: 674 return ERR_PTR(ret); 675 } 676 677 /** 678 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory 679 * @iter_io: The iterator to initialize 680 * @bdev: The TTM device 681 * @mem: The ttm resource representing the iomap. 682 * 683 * This function is for internal TTM use only. It cleans up a memcpy kmap 684 * iterator initialized by ttm_kmap_iter_linear_io_init. 685 */ 686 void 687 ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, 688 struct ttm_device *bdev, 689 struct ttm_resource *mem) 690 { 691 if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { 692 if (iter_io->dmap.is_iomem) 693 iounmap(iter_io->dmap.vaddr_iomem); 694 else 695 memunmap(iter_io->dmap.vaddr); 696 } 697 698 ttm_mem_io_free(bdev, mem); 699 } 700 701 #if defined(CONFIG_DEBUG_FS) 702 703 static int ttm_resource_manager_show(struct seq_file *m, void *unused) 704 { 705 struct ttm_resource_manager *man = 706 (struct ttm_resource_manager *)m->private; 707 struct drm_printer p = drm_seq_file_printer(m); 708 ttm_resource_manager_debug(man, &p); 709 return 0; 710 } 711 DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); 712 713 #endif 714 715 /** 716 * ttm_resource_manager_create_debugfs - Create debugfs entry for specified 717 * resource manager. 718 * @man: The TTM resource manager for which the debugfs stats file be creates 719 * @parent: debugfs directory in which the file will reside 720 * @name: The filename to create. 721 * 722 * This function setups up a debugfs file that can be used to look 723 * at debug statistics of the specified ttm_resource_manager. 724 */ 725 void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, 726 struct dentry * parent, 727 const char *name) 728 { 729 #if defined(CONFIG_DEBUG_FS) 730 debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); 731 #endif 732 } 733 EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); 734