1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2020 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: Christian König 24 */ 25 26 /* Pooling of allocated pages is necessary because changing the caching 27 * attributes on x86 of the linear mapping requires a costly cross CPU TLB 28 * invalidate for those addresses. 29 * 30 * Additional to that allocations from the DMA coherent API are pooled as well 31 * cause they are rather slow compared to alloc_pages+map. 32 */ 33 34 #include <linux/export.h> 35 #include <linux/module.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/debugfs.h> 38 #include <linux/highmem.h> 39 #include <linux/sched/mm.h> 40 41 #ifdef CONFIG_X86 42 #include <asm/set_memory.h> 43 #endif 44 45 #include <drm/ttm/ttm_backup.h> 46 #include <drm/ttm/ttm_pool.h> 47 #include <drm/ttm/ttm_tt.h> 48 #include <drm/ttm/ttm_bo.h> 49 50 #include "ttm_module.h" 51 #include "ttm_pool_internal.h" 52 53 #ifdef CONFIG_FAULT_INJECTION 54 #include <linux/fault-inject.h> 55 static DECLARE_FAULT_ATTR(backup_fault_inject); 56 #else 57 #define should_fail(...) false 58 #endif 59 60 /** 61 * struct ttm_pool_dma - Helper object for coherent DMA mappings 62 * 63 * @addr: original DMA address returned for the mapping 64 * @vaddr: original vaddr return for the mapping and order in the lower bits 65 */ 66 struct ttm_pool_dma { 67 dma_addr_t addr; 68 unsigned long vaddr; 69 }; 70 71 /** 72 * struct ttm_pool_alloc_state - Current state of the tt page allocation process 73 * @pages: Pointer to the next tt page pointer to populate. 74 * @caching_divide: Pointer to the first page pointer whose page has a staged but 75 * not committed caching transition from write-back to @tt_caching. 76 * @dma_addr: Pointer to the next tt dma_address entry to populate if any. 77 * @remaining_pages: Remaining pages to populate. 78 * @tt_caching: The requested cpu-caching for the pages allocated. 79 */ 80 struct ttm_pool_alloc_state { 81 struct page **pages; 82 struct page **caching_divide; 83 dma_addr_t *dma_addr; 84 pgoff_t remaining_pages; 85 enum ttm_caching tt_caching; 86 }; 87 88 /** 89 * struct ttm_pool_tt_restore - State representing restore from backup 90 * @pool: The pool used for page allocation while restoring. 91 * @snapshot_alloc: A snapshot of the most recent struct ttm_pool_alloc_state. 92 * @alloced_page: Pointer to the page most recently allocated from a pool or system. 93 * @first_dma: The dma address corresponding to @alloced_page if dma_mapping 94 * is requested. 95 * @alloced_pages: The number of allocated pages present in the struct ttm_tt 96 * page vector from this restore session. 97 * @restored_pages: The number of 4K pages restored for @alloced_page (which 98 * is typically a multi-order page). 99 * @page_caching: The struct ttm_tt requested caching 100 * @order: The order of @alloced_page. 101 * 102 * Recovery from backup might fail when we've recovered less than the 103 * full ttm_tt. In order not to loose any data (yet), keep information 104 * around that allows us to restart a failed ttm backup recovery. 105 */ 106 struct ttm_pool_tt_restore { 107 struct ttm_pool *pool; 108 struct ttm_pool_alloc_state snapshot_alloc; 109 struct page *alloced_page; 110 dma_addr_t first_dma; 111 pgoff_t alloced_pages; 112 pgoff_t restored_pages; 113 enum ttm_caching page_caching; 114 unsigned int order; 115 }; 116 117 static unsigned long page_pool_size; 118 119 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool"); 120 module_param(page_pool_size, ulong, 0644); 121 122 static atomic_long_t allocated_pages; 123 124 static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS]; 125 static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS]; 126 127 static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS]; 128 static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS]; 129 130 static spinlock_t shrinker_lock; 131 static struct list_head shrinker_list; 132 static struct shrinker *mm_shrinker; 133 static DECLARE_RWSEM(pool_shrink_rwsem); 134 135 /* Allocate pages of size 1 << order with the given gfp_flags */ 136 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, 137 unsigned int order) 138 { 139 const unsigned int beneficial_order = ttm_pool_beneficial_order(pool); 140 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; 141 struct ttm_pool_dma *dma; 142 struct page *p; 143 void *vaddr; 144 145 /* Don't set the __GFP_COMP flag for higher order allocations. 146 * Mapping pages directly into an userspace process and calling 147 * put_page() on a TTM allocated page is illegal. 148 */ 149 if (order) 150 gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | 151 __GFP_THISNODE; 152 153 /* 154 * Do not add latency to the allocation path for allocations orders 155 * device tolds us do not bring them additional performance gains. 156 */ 157 if (beneficial_order && order > beneficial_order) 158 gfp_flags &= ~__GFP_DIRECT_RECLAIM; 159 160 if (!ttm_pool_uses_dma_alloc(pool)) { 161 p = alloc_pages_node(pool->nid, gfp_flags, order); 162 if (p) 163 p->private = order; 164 return p; 165 } 166 167 dma = kmalloc_obj(*dma); 168 if (!dma) 169 return NULL; 170 171 if (order) 172 attr |= DMA_ATTR_NO_WARN; 173 174 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, 175 &dma->addr, gfp_flags, attr); 176 if (!vaddr) 177 goto error_free; 178 179 /* TODO: This is an illegal abuse of the DMA API, but we need to rework 180 * TTM page fault handling and extend the DMA API to clean this up. 181 */ 182 if (is_vmalloc_addr(vaddr)) 183 p = vmalloc_to_page(vaddr); 184 else 185 p = virt_to_page(vaddr); 186 187 dma->vaddr = (unsigned long)vaddr | order; 188 p->private = (unsigned long)dma; 189 return p; 190 191 error_free: 192 kfree(dma); 193 return NULL; 194 } 195 196 /* Reset the caching and pages of size 1 << order */ 197 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, 198 unsigned int order, struct page *p) 199 { 200 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; 201 struct ttm_pool_dma *dma; 202 void *vaddr; 203 204 #ifdef CONFIG_X86 205 /* We don't care that set_pages_wb is inefficient here. This is only 206 * used when we have to shrink and CPU overhead is irrelevant then. 207 */ 208 if (caching != ttm_cached && !PageHighMem(p)) 209 set_pages_wb(p, 1 << order); 210 #endif 211 212 if (!pool || !ttm_pool_uses_dma_alloc(pool)) { 213 __free_pages(p, order); 214 return; 215 } 216 217 if (order) 218 attr |= DMA_ATTR_NO_WARN; 219 220 dma = (void *)p->private; 221 vaddr = (void *)(dma->vaddr & PAGE_MASK); 222 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr, 223 attr); 224 kfree(dma); 225 } 226 227 /* Apply any cpu-caching deferred during page allocation */ 228 static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc) 229 { 230 #ifdef CONFIG_X86 231 unsigned int num_pages = alloc->pages - alloc->caching_divide; 232 233 if (!num_pages) 234 return 0; 235 236 switch (alloc->tt_caching) { 237 case ttm_cached: 238 break; 239 case ttm_write_combined: 240 return set_pages_array_wc(alloc->caching_divide, num_pages); 241 case ttm_uncached: 242 return set_pages_array_uc(alloc->caching_divide, num_pages); 243 } 244 #endif 245 alloc->caching_divide = alloc->pages; 246 return 0; 247 } 248 249 /* DMA Map pages of 1 << order size and return the resulting dma_address. */ 250 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, 251 struct page *p, dma_addr_t *dma_addr) 252 { 253 dma_addr_t addr; 254 255 if (ttm_pool_uses_dma_alloc(pool)) { 256 struct ttm_pool_dma *dma = (void *)p->private; 257 258 addr = dma->addr; 259 } else { 260 size_t size = (1ULL << order) * PAGE_SIZE; 261 262 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL); 263 if (dma_mapping_error(pool->dev, addr)) 264 return -EFAULT; 265 } 266 267 *dma_addr = addr; 268 269 return 0; 270 } 271 272 /* Unmap pages of 1 << order size */ 273 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, 274 unsigned int num_pages) 275 { 276 /* Unmapped while freeing the page */ 277 if (ttm_pool_uses_dma_alloc(pool)) 278 return; 279 280 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, 281 DMA_BIDIRECTIONAL); 282 } 283 284 /* Give pages into a specific pool_type */ 285 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p) 286 { 287 unsigned int i, num_pages = 1 << pt->order; 288 289 for (i = 0; i < num_pages; ++i) { 290 if (PageHighMem(p)) 291 clear_highpage(p + i); 292 else 293 clear_page(page_address(p + i)); 294 } 295 296 spin_lock(&pt->lock); 297 list_add(&p->lru, &pt->pages); 298 spin_unlock(&pt->lock); 299 atomic_long_add(1 << pt->order, &allocated_pages); 300 } 301 302 /* Take pages from a specific pool_type, return NULL when nothing available */ 303 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt) 304 { 305 struct page *p; 306 307 spin_lock(&pt->lock); 308 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); 309 if (p) { 310 atomic_long_sub(1 << pt->order, &allocated_pages); 311 list_del(&p->lru); 312 } 313 spin_unlock(&pt->lock); 314 315 return p; 316 } 317 318 /* Initialize and add a pool type to the global shrinker list */ 319 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, 320 enum ttm_caching caching, unsigned int order) 321 { 322 pt->pool = pool; 323 pt->caching = caching; 324 pt->order = order; 325 spin_lock_init(&pt->lock); 326 INIT_LIST_HEAD(&pt->pages); 327 328 spin_lock(&shrinker_lock); 329 list_add_tail(&pt->shrinker_list, &shrinker_list); 330 spin_unlock(&shrinker_lock); 331 } 332 333 /* Remove a pool_type from the global shrinker list and free all pages */ 334 static void ttm_pool_type_fini(struct ttm_pool_type *pt) 335 { 336 struct page *p; 337 338 spin_lock(&shrinker_lock); 339 list_del(&pt->shrinker_list); 340 spin_unlock(&shrinker_lock); 341 342 while ((p = ttm_pool_type_take(pt))) 343 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); 344 } 345 346 /* Return the pool_type to use for the given caching and order */ 347 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, 348 enum ttm_caching caching, 349 unsigned int order) 350 { 351 if (ttm_pool_uses_dma_alloc(pool)) 352 return &pool->caching[caching].orders[order]; 353 354 #ifdef CONFIG_X86 355 switch (caching) { 356 case ttm_write_combined: 357 if (pool->nid != NUMA_NO_NODE) 358 return &pool->caching[caching].orders[order]; 359 360 if (ttm_pool_uses_dma32(pool)) 361 return &global_dma32_write_combined[order]; 362 363 return &global_write_combined[order]; 364 case ttm_uncached: 365 if (pool->nid != NUMA_NO_NODE) 366 return &pool->caching[caching].orders[order]; 367 368 if (ttm_pool_uses_dma32(pool)) 369 return &global_dma32_uncached[order]; 370 371 return &global_uncached[order]; 372 default: 373 break; 374 } 375 #endif 376 377 return NULL; 378 } 379 380 /* Free pages using the global shrinker list */ 381 static unsigned int ttm_pool_shrink(void) 382 { 383 struct ttm_pool_type *pt; 384 unsigned int num_pages; 385 struct page *p; 386 387 down_read(&pool_shrink_rwsem); 388 spin_lock(&shrinker_lock); 389 pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list); 390 list_move_tail(&pt->shrinker_list, &shrinker_list); 391 spin_unlock(&shrinker_lock); 392 393 p = ttm_pool_type_take(pt); 394 if (p) { 395 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); 396 num_pages = 1 << pt->order; 397 } else { 398 num_pages = 0; 399 } 400 up_read(&pool_shrink_rwsem); 401 402 return num_pages; 403 } 404 405 /* Return the allocation order based for a page */ 406 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) 407 { 408 if (ttm_pool_uses_dma_alloc(pool)) { 409 struct ttm_pool_dma *dma = (void *)p->private; 410 411 return dma->vaddr & ~PAGE_MASK; 412 } 413 414 return p->private; 415 } 416 417 /* 418 * Split larger pages so that we can free each PAGE_SIZE page as soon 419 * as it has been backed up, in order to avoid memory pressure during 420 * reclaim. 421 */ 422 static void ttm_pool_split_for_swap(struct ttm_pool *pool, struct page *p) 423 { 424 unsigned int order = ttm_pool_page_order(pool, p); 425 pgoff_t nr; 426 427 if (!order) 428 return; 429 430 split_page(p, order); 431 nr = 1UL << order; 432 while (nr--) 433 (p++)->private = 0; 434 } 435 436 /** 437 * DOC: Partial backup and restoration of a struct ttm_tt. 438 * 439 * Swapout using ttm_backup_backup_page() and swapin using 440 * ttm_backup_copy_page() may fail. 441 * The former most likely due to lack of swap-space or memory, the latter due 442 * to lack of memory or because of signal interruption during waits. 443 * 444 * Backup failure is easily handled by using a ttm_tt pages vector that holds 445 * both backup handles and page pointers. This has to be taken into account when 446 * restoring such a ttm_tt from backup, and when freeing it while backed up. 447 * When restoring, for simplicity, new pages are actually allocated from the 448 * pool and the contents of any old pages are copied in and then the old pages 449 * are released. 450 * 451 * For restoration failures, the struct ttm_pool_tt_restore holds sufficient state 452 * to be able to resume an interrupted restore, and that structure is freed once 453 * the restoration is complete. If the struct ttm_tt is destroyed while there 454 * is a valid struct ttm_pool_tt_restore attached, that is also properly taken 455 * care of. 456 */ 457 458 /* Is restore ongoing for the currently allocated page? */ 459 static bool ttm_pool_restore_valid(const struct ttm_pool_tt_restore *restore) 460 { 461 return restore && restore->restored_pages < (1 << restore->order); 462 } 463 464 /* DMA unmap and free a multi-order page, either to the relevant pool or to system. */ 465 static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool *pool, struct page *page, 466 const dma_addr_t *dma_addr, enum ttm_caching caching) 467 { 468 struct ttm_pool_type *pt = NULL; 469 unsigned int order; 470 pgoff_t nr; 471 472 if (pool) { 473 order = ttm_pool_page_order(pool, page); 474 nr = (1UL << order); 475 if (dma_addr) 476 ttm_pool_unmap(pool, *dma_addr, nr); 477 478 pt = ttm_pool_select_type(pool, caching, order); 479 } else { 480 order = page->private; 481 nr = (1UL << order); 482 } 483 484 if (pt) 485 ttm_pool_type_give(pt, page); 486 else 487 ttm_pool_free_page(pool, caching, order, page); 488 489 return nr; 490 } 491 492 /* Populate the page-array using the most recent allocated multi-order page. */ 493 static void ttm_pool_allocated_page_commit(struct page *allocated, 494 dma_addr_t first_dma, 495 struct ttm_pool_alloc_state *alloc, 496 pgoff_t nr) 497 { 498 pgoff_t i; 499 500 for (i = 0; i < nr; ++i) 501 *alloc->pages++ = allocated++; 502 503 alloc->remaining_pages -= nr; 504 505 if (!alloc->dma_addr) 506 return; 507 508 for (i = 0; i < nr; ++i) { 509 *alloc->dma_addr++ = first_dma; 510 first_dma += PAGE_SIZE; 511 } 512 } 513 514 /* 515 * When restoring, restore backed-up content to the newly allocated page and 516 * if successful, populate the page-table and dma-address arrays. 517 */ 518 static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore, 519 struct file *backup, 520 const struct ttm_operation_ctx *ctx, 521 struct ttm_pool_alloc_state *alloc) 522 523 { 524 pgoff_t i, nr = 1UL << restore->order; 525 struct page **first_page = alloc->pages; 526 struct page *p; 527 int ret = 0; 528 529 for (i = restore->restored_pages; i < nr; ++i) { 530 p = first_page[i]; 531 if (ttm_backup_page_ptr_is_handle(p)) { 532 unsigned long handle = ttm_backup_page_ptr_to_handle(p); 533 gfp_t additional_gfp = ctx->gfp_retry_mayfail ? 534 __GFP_RETRY_MAYFAIL | __GFP_NOWARN : 0; 535 536 if (IS_ENABLED(CONFIG_FAULT_INJECTION) && ctx->interruptible && 537 should_fail(&backup_fault_inject, 1)) { 538 ret = -EINTR; 539 break; 540 } 541 542 if (handle == 0) { 543 restore->restored_pages++; 544 continue; 545 } 546 547 ret = ttm_backup_copy_page(backup, restore->alloced_page + i, 548 handle, ctx->interruptible, 549 additional_gfp); 550 if (ret) 551 break; 552 553 ttm_backup_drop(backup, handle); 554 } else if (p) { 555 /* 556 * We could probably avoid splitting the old page 557 * using clever logic, but ATM we don't care, as 558 * we prioritize releasing memory ASAP. Note that 559 * here, the old retained page is always write-back 560 * cached. 561 */ 562 ttm_pool_split_for_swap(restore->pool, p); 563 copy_highpage(restore->alloced_page + i, p); 564 __free_pages(p, 0); 565 } 566 567 restore->restored_pages++; 568 first_page[i] = ttm_backup_handle_to_page_ptr(0); 569 } 570 571 if (ret) { 572 if (!restore->restored_pages) { 573 dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL; 574 575 ttm_pool_unmap_and_free(restore->pool, restore->alloced_page, 576 dma_addr, restore->page_caching); 577 restore->restored_pages = nr; 578 } 579 return ret; 580 } 581 582 ttm_pool_allocated_page_commit(restore->alloced_page, restore->first_dma, 583 alloc, nr); 584 if (restore->page_caching == alloc->tt_caching || PageHighMem(restore->alloced_page)) 585 alloc->caching_divide = alloc->pages; 586 restore->snapshot_alloc = *alloc; 587 restore->alloced_pages += nr; 588 589 return 0; 590 } 591 592 /* If restoring, save information needed for ttm_pool_restore_commit(). */ 593 static void 594 ttm_pool_page_allocated_restore(struct ttm_pool *pool, unsigned int order, 595 struct page *p, 596 enum ttm_caching page_caching, 597 dma_addr_t first_dma, 598 struct ttm_pool_tt_restore *restore, 599 const struct ttm_pool_alloc_state *alloc) 600 { 601 restore->pool = pool; 602 restore->order = order; 603 restore->restored_pages = 0; 604 restore->page_caching = page_caching; 605 restore->first_dma = first_dma; 606 restore->alloced_page = p; 607 restore->snapshot_alloc = *alloc; 608 } 609 610 /* 611 * Called when we got a page, either from a pool or newly allocated. 612 * if needed, dma map the page and populate the dma address array. 613 * Populate the page address array. 614 * If the caching is consistent, update any deferred caching. Otherwise 615 * stage this page for an upcoming deferred caching update. 616 */ 617 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, 618 struct page *p, enum ttm_caching page_caching, 619 struct ttm_pool_alloc_state *alloc, 620 struct ttm_pool_tt_restore *restore) 621 { 622 bool caching_consistent; 623 dma_addr_t first_dma; 624 int r = 0; 625 626 caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p); 627 628 if (caching_consistent) { 629 r = ttm_pool_apply_caching(alloc); 630 if (r) 631 return r; 632 } 633 634 if (alloc->dma_addr) { 635 r = ttm_pool_map(pool, order, p, &first_dma); 636 if (r) 637 return r; 638 } 639 640 if (restore) { 641 ttm_pool_page_allocated_restore(pool, order, p, page_caching, 642 first_dma, restore, alloc); 643 } else { 644 ttm_pool_allocated_page_commit(p, first_dma, alloc, 1UL << order); 645 646 if (caching_consistent) 647 alloc->caching_divide = alloc->pages; 648 } 649 650 return 0; 651 } 652 653 /** 654 * ttm_pool_free_range() - Free a range of TTM pages 655 * @pool: The pool used for allocating. 656 * @tt: The struct ttm_tt holding the page pointers. 657 * @caching: The page caching mode used by the range. 658 * @start_page: index for first page to free. 659 * @end_page: index for last page to free + 1. 660 * 661 * During allocation the ttm_tt page-vector may be populated with ranges of 662 * pages with different attributes if allocation hit an error without being 663 * able to completely fulfill the allocation. This function can be used 664 * to free these individual ranges. 665 */ 666 static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt, 667 enum ttm_caching caching, 668 pgoff_t start_page, pgoff_t end_page) 669 { 670 struct page **pages = &tt->pages[start_page]; 671 struct file *backup = tt->backup; 672 pgoff_t i, nr; 673 674 for (i = start_page; i < end_page; i += nr, pages += nr) { 675 struct page *p = *pages; 676 677 nr = 1; 678 if (ttm_backup_page_ptr_is_handle(p)) { 679 unsigned long handle = ttm_backup_page_ptr_to_handle(p); 680 681 if (handle != 0) 682 ttm_backup_drop(backup, handle); 683 } else if (p) { 684 dma_addr_t *dma_addr = tt->dma_address ? 685 tt->dma_address + i : NULL; 686 687 nr = ttm_pool_unmap_and_free(pool, p, dma_addr, caching); 688 } 689 } 690 } 691 692 static void ttm_pool_alloc_state_init(const struct ttm_tt *tt, 693 struct ttm_pool_alloc_state *alloc) 694 { 695 alloc->pages = tt->pages; 696 alloc->caching_divide = tt->pages; 697 alloc->dma_addr = tt->dma_address; 698 alloc->remaining_pages = tt->num_pages; 699 alloc->tt_caching = tt->caching; 700 } 701 702 /* 703 * Find a suitable allocation order based on highest desired order 704 * and number of remaining pages 705 */ 706 static unsigned int ttm_pool_alloc_find_order(unsigned int highest, 707 const struct ttm_pool_alloc_state *alloc) 708 { 709 return min_t(unsigned int, highest, __fls(alloc->remaining_pages)); 710 } 711 712 static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, 713 const struct ttm_operation_ctx *ctx, 714 struct ttm_pool_alloc_state *alloc, 715 struct ttm_pool_tt_restore *restore) 716 { 717 enum ttm_caching page_caching; 718 gfp_t gfp_flags = GFP_USER; 719 pgoff_t caching_divide; 720 unsigned int order; 721 bool allow_pools; 722 struct page *p; 723 int r; 724 725 WARN_ON(!alloc->remaining_pages || ttm_tt_is_populated(tt)); 726 WARN_ON(alloc->dma_addr && !pool->dev); 727 728 if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC) 729 gfp_flags |= __GFP_ZERO; 730 731 if (ctx->gfp_retry_mayfail) 732 gfp_flags |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 733 734 if (ttm_pool_uses_dma32(pool)) 735 gfp_flags |= GFP_DMA32; 736 else 737 gfp_flags |= GFP_HIGHUSER; 738 739 page_caching = tt->caching; 740 allow_pools = true; 741 for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc); 742 alloc->remaining_pages; 743 order = ttm_pool_alloc_find_order(order, alloc)) { 744 struct ttm_pool_type *pt; 745 746 /* First, try to allocate a page from a pool if one exists. */ 747 p = NULL; 748 pt = ttm_pool_select_type(pool, page_caching, order); 749 if (pt && allow_pools) 750 p = ttm_pool_type_take(pt); 751 /* 752 * If that fails or previously failed, allocate from system. 753 * Note that this also disallows additional pool allocations using 754 * write-back cached pools of the same order. Consider removing 755 * that behaviour. 756 */ 757 if (!p) { 758 page_caching = ttm_cached; 759 allow_pools = false; 760 p = ttm_pool_alloc_page(pool, gfp_flags, order); 761 } 762 /* If that fails, lower the order if possible and retry. */ 763 if (!p) { 764 if (order) { 765 --order; 766 page_caching = tt->caching; 767 allow_pools = true; 768 continue; 769 } 770 r = -ENOMEM; 771 goto error_free_all; 772 } 773 r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc, 774 restore); 775 if (r) 776 goto error_free_page; 777 778 if (ttm_pool_restore_valid(restore)) { 779 r = ttm_pool_restore_commit(restore, tt->backup, ctx, alloc); 780 if (r) 781 goto error_free_all; 782 } 783 } 784 785 r = ttm_pool_apply_caching(alloc); 786 if (r) 787 goto error_free_all; 788 789 kfree(tt->restore); 790 tt->restore = NULL; 791 792 return 0; 793 794 error_free_page: 795 ttm_pool_free_page(pool, page_caching, order, p); 796 797 error_free_all: 798 if (tt->restore) 799 return r; 800 801 caching_divide = alloc->caching_divide - tt->pages; 802 ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide); 803 ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, 804 tt->num_pages - alloc->remaining_pages); 805 806 return r; 807 } 808 809 /** 810 * ttm_pool_alloc - Fill a ttm_tt object 811 * 812 * @pool: ttm_pool to use 813 * @tt: ttm_tt object to fill 814 * @ctx: operation context 815 * 816 * Fill the ttm_tt object with pages and also make sure to DMA map them when 817 * necessary. 818 * 819 * Returns: 0 on successe, negative error code otherwise. 820 */ 821 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, 822 struct ttm_operation_ctx *ctx) 823 { 824 struct ttm_pool_alloc_state alloc; 825 826 if (WARN_ON(ttm_tt_is_backed_up(tt))) 827 return -EINVAL; 828 829 ttm_pool_alloc_state_init(tt, &alloc); 830 831 return __ttm_pool_alloc(pool, tt, ctx, &alloc, NULL); 832 } 833 EXPORT_SYMBOL(ttm_pool_alloc); 834 835 /** 836 * ttm_pool_restore_and_alloc - Fill a ttm_tt, restoring previously backed-up 837 * content. 838 * 839 * @pool: ttm_pool to use 840 * @tt: ttm_tt object to fill 841 * @ctx: operation context 842 * 843 * Fill the ttm_tt object with pages and also make sure to DMA map them when 844 * necessary. Read in backed-up content. 845 * 846 * Returns: 0 on successe, negative error code otherwise. 847 */ 848 int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt, 849 const struct ttm_operation_ctx *ctx) 850 { 851 struct ttm_pool_tt_restore *restore = tt->restore; 852 struct ttm_pool_alloc_state alloc; 853 854 if (WARN_ON(!ttm_tt_is_backed_up(tt))) 855 return -EINVAL; 856 857 if (!restore) { 858 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 859 860 ttm_pool_alloc_state_init(tt, &alloc); 861 if (ctx->gfp_retry_mayfail) 862 gfp |= __GFP_RETRY_MAYFAIL; 863 864 restore = kzalloc_obj(*restore, gfp); 865 if (!restore) 866 return -ENOMEM; 867 868 restore->snapshot_alloc = alloc; 869 restore->pool = pool; 870 restore->restored_pages = 1; 871 872 tt->restore = restore; 873 } else { 874 alloc = restore->snapshot_alloc; 875 if (ttm_pool_restore_valid(restore)) { 876 int ret = ttm_pool_restore_commit(restore, tt->backup, 877 ctx, &alloc); 878 879 if (ret) 880 return ret; 881 } 882 if (!alloc.remaining_pages) 883 return 0; 884 } 885 886 return __ttm_pool_alloc(pool, tt, ctx, &alloc, restore); 887 } 888 889 /** 890 * ttm_pool_free - Free the backing pages from a ttm_tt object 891 * 892 * @pool: Pool to give pages back to. 893 * @tt: ttm_tt object to unpopulate 894 * 895 * Give the packing pages back to a pool or free them 896 */ 897 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) 898 { 899 ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages); 900 901 while (atomic_long_read(&allocated_pages) > page_pool_size) 902 ttm_pool_shrink(); 903 } 904 EXPORT_SYMBOL(ttm_pool_free); 905 906 /** 907 * ttm_pool_drop_backed_up() - Release content of a swapped-out struct ttm_tt 908 * @tt: The struct ttm_tt. 909 * 910 * Release handles with associated content or any remaining pages of 911 * a backed-up struct ttm_tt. 912 */ 913 void ttm_pool_drop_backed_up(struct ttm_tt *tt) 914 { 915 struct ttm_pool_tt_restore *restore; 916 pgoff_t start_page = 0; 917 918 WARN_ON(!ttm_tt_is_backed_up(tt)); 919 920 restore = tt->restore; 921 922 /* 923 * Unmap and free any uncommitted restore page. 924 * any tt page-array backup entries already read back has 925 * been cleared already 926 */ 927 if (ttm_pool_restore_valid(restore)) { 928 dma_addr_t *dma_addr = tt->dma_address ? &restore->first_dma : NULL; 929 930 ttm_pool_unmap_and_free(restore->pool, restore->alloced_page, 931 dma_addr, restore->page_caching); 932 restore->restored_pages = 1UL << restore->order; 933 } 934 935 /* 936 * If a restore is ongoing, part of the tt pages may have a 937 * caching different than writeback. 938 */ 939 if (restore) { 940 pgoff_t mid = restore->snapshot_alloc.caching_divide - tt->pages; 941 942 start_page = restore->alloced_pages; 943 WARN_ON(mid > start_page); 944 /* Pages that might be dma-mapped and non-cached */ 945 ttm_pool_free_range(restore->pool, tt, tt->caching, 946 0, mid); 947 /* Pages that might be dma-mapped but cached */ 948 ttm_pool_free_range(restore->pool, tt, ttm_cached, 949 mid, restore->alloced_pages); 950 kfree(restore); 951 tt->restore = NULL; 952 } 953 954 ttm_pool_free_range(NULL, tt, ttm_cached, start_page, tt->num_pages); 955 } 956 957 /** 958 * ttm_pool_backup() - Back up or purge a struct ttm_tt 959 * @pool: The pool used when allocating the struct ttm_tt. 960 * @tt: The struct ttm_tt. 961 * @flags: Flags to govern the backup behaviour. 962 * 963 * Back up or purge a struct ttm_tt. If @purge is true, then 964 * all pages will be freed directly to the system rather than to the pool 965 * they were allocated from, making the function behave similarly to 966 * ttm_pool_free(). If @purge is false the pages will be backed up instead, 967 * exchanged for handles. 968 * A subsequent call to ttm_pool_restore_and_alloc() will then read back the content and 969 * a subsequent call to ttm_pool_drop_backed_up() will drop it. 970 * If backup of a page fails for whatever reason, @ttm will still be 971 * partially backed up, retaining those pages for which backup fails. 972 * In that case, this function can be retried, possibly after freeing up 973 * memory resources. 974 * 975 * Return: Number of pages actually backed up or freed, or negative 976 * error code on error. 977 */ 978 long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt, 979 const struct ttm_backup_flags *flags) 980 { 981 struct file *backup = tt->backup; 982 struct page *page; 983 unsigned long handle; 984 gfp_t alloc_gfp; 985 gfp_t gfp; 986 int ret = 0; 987 pgoff_t shrunken = 0; 988 pgoff_t i, num_pages; 989 990 if (WARN_ON(ttm_tt_is_backed_up(tt))) 991 return -EINVAL; 992 993 if ((!ttm_backup_bytes_avail() && !flags->purge) || 994 ttm_pool_uses_dma_alloc(pool) || ttm_tt_is_backed_up(tt)) 995 return -EBUSY; 996 997 #ifdef CONFIG_X86 998 /* Anything returned to the system needs to be cached. */ 999 if (tt->caching != ttm_cached) 1000 set_pages_array_wb(tt->pages, tt->num_pages); 1001 #endif 1002 1003 if (tt->dma_address || flags->purge) { 1004 for (i = 0; i < tt->num_pages; i += num_pages) { 1005 unsigned int order; 1006 1007 page = tt->pages[i]; 1008 if (unlikely(!page)) { 1009 num_pages = 1; 1010 continue; 1011 } 1012 1013 order = ttm_pool_page_order(pool, page); 1014 num_pages = 1UL << order; 1015 if (tt->dma_address) 1016 ttm_pool_unmap(pool, tt->dma_address[i], 1017 num_pages); 1018 if (flags->purge) { 1019 shrunken += num_pages; 1020 page->private = 0; 1021 __free_pages(page, order); 1022 memset(tt->pages + i, 0, 1023 num_pages * sizeof(*tt->pages)); 1024 } 1025 } 1026 } 1027 1028 if (flags->purge) 1029 return shrunken; 1030 1031 if (ttm_pool_uses_dma32(pool)) 1032 gfp = GFP_DMA32; 1033 else 1034 gfp = GFP_HIGHUSER; 1035 1036 alloc_gfp = GFP_KERNEL | __GFP_HIGH | __GFP_NOWARN | __GFP_RETRY_MAYFAIL; 1037 1038 num_pages = tt->num_pages; 1039 1040 /* Pretend doing fault injection by shrinking only half of the pages. */ 1041 if (IS_ENABLED(CONFIG_FAULT_INJECTION) && should_fail(&backup_fault_inject, 1)) 1042 num_pages = DIV_ROUND_UP(num_pages, 2); 1043 1044 for (i = 0; i < num_pages; ++i) { 1045 s64 shandle; 1046 1047 page = tt->pages[i]; 1048 if (unlikely(!page)) 1049 continue; 1050 1051 ttm_pool_split_for_swap(pool, page); 1052 1053 shandle = ttm_backup_backup_page(backup, page, flags->writeback, i, 1054 gfp, alloc_gfp); 1055 if (shandle < 0) { 1056 /* We allow partially shrunken tts */ 1057 ret = shandle; 1058 break; 1059 } 1060 handle = shandle; 1061 tt->pages[i] = ttm_backup_handle_to_page_ptr(handle); 1062 put_page(page); 1063 shrunken++; 1064 } 1065 1066 return shrunken ? shrunken : ret; 1067 } 1068 1069 /** 1070 * ttm_pool_init - Initialize a pool 1071 * 1072 * @pool: the pool to initialize 1073 * @dev: device for DMA allocations and mappings 1074 * @nid: NUMA node to use for allocations 1075 * @alloc_flags: TTM_ALLOCATION_POOL_* flags 1076 * 1077 * Initialize the pool and its pool types. 1078 */ 1079 void ttm_pool_init(struct ttm_pool *pool, struct device *dev, 1080 int nid, unsigned int alloc_flags) 1081 { 1082 unsigned int i, j; 1083 1084 WARN_ON(!dev && ttm_pool_uses_dma_alloc(pool)); 1085 1086 pool->dev = dev; 1087 pool->nid = nid; 1088 pool->alloc_flags = alloc_flags; 1089 1090 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { 1091 for (j = 0; j < NR_PAGE_ORDERS; ++j) { 1092 struct ttm_pool_type *pt; 1093 1094 /* Initialize only pool types which are actually used */ 1095 pt = ttm_pool_select_type(pool, i, j); 1096 if (pt != &pool->caching[i].orders[j]) 1097 continue; 1098 1099 ttm_pool_type_init(pt, pool, i, j); 1100 } 1101 } 1102 } 1103 EXPORT_SYMBOL(ttm_pool_init); 1104 1105 /** 1106 * ttm_pool_synchronize_shrinkers - Wait for all running shrinkers to complete. 1107 * 1108 * This is useful to guarantee that all shrinker invocations have seen an 1109 * update, before freeing memory, similar to rcu. 1110 */ 1111 static void ttm_pool_synchronize_shrinkers(void) 1112 { 1113 down_write(&pool_shrink_rwsem); 1114 up_write(&pool_shrink_rwsem); 1115 } 1116 1117 /** 1118 * ttm_pool_fini - Cleanup a pool 1119 * 1120 * @pool: the pool to clean up 1121 * 1122 * Free all pages in the pool and unregister the types from the global 1123 * shrinker. 1124 */ 1125 void ttm_pool_fini(struct ttm_pool *pool) 1126 { 1127 unsigned int i, j; 1128 1129 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { 1130 for (j = 0; j < NR_PAGE_ORDERS; ++j) { 1131 struct ttm_pool_type *pt; 1132 1133 pt = ttm_pool_select_type(pool, i, j); 1134 if (pt != &pool->caching[i].orders[j]) 1135 continue; 1136 1137 ttm_pool_type_fini(pt); 1138 } 1139 } 1140 1141 /* We removed the pool types from the LRU, but we need to also make sure 1142 * that no shrinker is concurrently freeing pages from the pool. 1143 */ 1144 ttm_pool_synchronize_shrinkers(); 1145 } 1146 EXPORT_SYMBOL(ttm_pool_fini); 1147 1148 /* Free average pool number of pages. */ 1149 #define TTM_SHRINKER_BATCH ((1 << (MAX_PAGE_ORDER / 2)) * NR_PAGE_ORDERS) 1150 1151 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, 1152 struct shrink_control *sc) 1153 { 1154 unsigned long num_freed = 0; 1155 1156 do 1157 num_freed += ttm_pool_shrink(); 1158 while (num_freed < sc->nr_to_scan && 1159 atomic_long_read(&allocated_pages)); 1160 1161 sc->nr_scanned = num_freed; 1162 1163 return num_freed ?: SHRINK_STOP; 1164 } 1165 1166 /* Return the number of pages available or SHRINK_EMPTY if we have none */ 1167 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink, 1168 struct shrink_control *sc) 1169 { 1170 unsigned long num_pages = atomic_long_read(&allocated_pages); 1171 1172 return num_pages ? num_pages : SHRINK_EMPTY; 1173 } 1174 1175 #ifdef CONFIG_DEBUG_FS 1176 /* Count the number of pages available in a pool_type */ 1177 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) 1178 { 1179 unsigned int count = 0; 1180 struct page *p; 1181 1182 spin_lock(&pt->lock); 1183 /* Only used for debugfs, the overhead doesn't matter */ 1184 list_for_each_entry(p, &pt->pages, lru) 1185 ++count; 1186 spin_unlock(&pt->lock); 1187 1188 return count; 1189 } 1190 1191 /* Print a nice header for the order */ 1192 static void ttm_pool_debugfs_header(struct seq_file *m) 1193 { 1194 unsigned int i; 1195 1196 seq_puts(m, "\t "); 1197 for (i = 0; i < NR_PAGE_ORDERS; ++i) 1198 seq_printf(m, " ---%2u---", i); 1199 seq_puts(m, "\n"); 1200 } 1201 1202 /* Dump information about the different pool types */ 1203 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, 1204 struct seq_file *m) 1205 { 1206 unsigned int i; 1207 1208 for (i = 0; i < NR_PAGE_ORDERS; ++i) 1209 seq_printf(m, " %8u", ttm_pool_type_count(&pt[i])); 1210 seq_puts(m, "\n"); 1211 } 1212 1213 /* Dump the total amount of allocated pages */ 1214 static void ttm_pool_debugfs_footer(struct seq_file *m) 1215 { 1216 seq_printf(m, "\ntotal\t: %8lu of %8lu\n", 1217 atomic_long_read(&allocated_pages), page_pool_size); 1218 } 1219 1220 /* Dump the information for the global pools */ 1221 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data) 1222 { 1223 ttm_pool_debugfs_header(m); 1224 1225 spin_lock(&shrinker_lock); 1226 seq_puts(m, "wc\t:"); 1227 ttm_pool_debugfs_orders(global_write_combined, m); 1228 seq_puts(m, "uc\t:"); 1229 ttm_pool_debugfs_orders(global_uncached, m); 1230 seq_puts(m, "wc 32\t:"); 1231 ttm_pool_debugfs_orders(global_dma32_write_combined, m); 1232 seq_puts(m, "uc 32\t:"); 1233 ttm_pool_debugfs_orders(global_dma32_uncached, m); 1234 spin_unlock(&shrinker_lock); 1235 1236 ttm_pool_debugfs_footer(m); 1237 1238 return 0; 1239 } 1240 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals); 1241 1242 /** 1243 * ttm_pool_debugfs - Debugfs dump function for a pool 1244 * 1245 * @pool: the pool to dump the information for 1246 * @m: seq_file to dump to 1247 * 1248 * Make a debugfs dump with the per pool and global information. 1249 */ 1250 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) 1251 { 1252 unsigned int i; 1253 1254 if (!ttm_pool_uses_dma_alloc(pool) && pool->nid == NUMA_NO_NODE) { 1255 seq_puts(m, "unused\n"); 1256 return 0; 1257 } 1258 1259 ttm_pool_debugfs_header(m); 1260 1261 spin_lock(&shrinker_lock); 1262 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { 1263 if (!ttm_pool_select_type(pool, i, 0)) 1264 continue; 1265 if (ttm_pool_uses_dma_alloc(pool)) 1266 seq_puts(m, "DMA "); 1267 else 1268 seq_printf(m, "N%d ", pool->nid); 1269 switch (i) { 1270 case ttm_cached: 1271 seq_puts(m, "\t:"); 1272 break; 1273 case ttm_write_combined: 1274 seq_puts(m, "wc\t:"); 1275 break; 1276 case ttm_uncached: 1277 seq_puts(m, "uc\t:"); 1278 break; 1279 } 1280 ttm_pool_debugfs_orders(pool->caching[i].orders, m); 1281 } 1282 spin_unlock(&shrinker_lock); 1283 1284 ttm_pool_debugfs_footer(m); 1285 return 0; 1286 } 1287 EXPORT_SYMBOL(ttm_pool_debugfs); 1288 1289 /* Test the shrinker functions and dump the result */ 1290 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data) 1291 { 1292 struct shrink_control sc = { 1293 .gfp_mask = GFP_NOFS, 1294 .nr_to_scan = TTM_SHRINKER_BATCH, 1295 }; 1296 unsigned long count; 1297 1298 fs_reclaim_acquire(GFP_KERNEL); 1299 count = ttm_pool_shrinker_count(mm_shrinker, &sc); 1300 seq_printf(m, "%lu/%lu\n", count, 1301 ttm_pool_shrinker_scan(mm_shrinker, &sc)); 1302 fs_reclaim_release(GFP_KERNEL); 1303 1304 return 0; 1305 } 1306 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink); 1307 1308 #endif 1309 1310 /** 1311 * ttm_pool_mgr_init - Initialize globals 1312 * 1313 * @num_pages: default number of pages 1314 * 1315 * Initialize the global locks and lists for the MM shrinker. 1316 */ 1317 int ttm_pool_mgr_init(unsigned long num_pages) 1318 { 1319 unsigned int i; 1320 1321 if (!page_pool_size) 1322 page_pool_size = num_pages; 1323 1324 spin_lock_init(&shrinker_lock); 1325 INIT_LIST_HEAD(&shrinker_list); 1326 1327 for (i = 0; i < NR_PAGE_ORDERS; ++i) { 1328 ttm_pool_type_init(&global_write_combined[i], NULL, 1329 ttm_write_combined, i); 1330 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); 1331 1332 ttm_pool_type_init(&global_dma32_write_combined[i], NULL, 1333 ttm_write_combined, i); 1334 ttm_pool_type_init(&global_dma32_uncached[i], NULL, 1335 ttm_uncached, i); 1336 } 1337 1338 #ifdef CONFIG_DEBUG_FS 1339 debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL, 1340 &ttm_pool_debugfs_globals_fops); 1341 debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL, 1342 &ttm_pool_debugfs_shrink_fops); 1343 #ifdef CONFIG_FAULT_INJECTION 1344 fault_create_debugfs_attr("backup_fault_inject", ttm_debugfs_root, 1345 &backup_fault_inject); 1346 #endif 1347 #endif 1348 1349 mm_shrinker = shrinker_alloc(0, "drm-ttm_pool"); 1350 if (!mm_shrinker) 1351 return -ENOMEM; 1352 1353 mm_shrinker->count_objects = ttm_pool_shrinker_count; 1354 mm_shrinker->scan_objects = ttm_pool_shrinker_scan; 1355 mm_shrinker->batch = TTM_SHRINKER_BATCH; 1356 mm_shrinker->seeks = 1; 1357 1358 shrinker_register(mm_shrinker); 1359 1360 return 0; 1361 } 1362 1363 /** 1364 * ttm_pool_mgr_fini - Finalize globals 1365 * 1366 * Cleanup the global pools and unregister the MM shrinker. 1367 */ 1368 void ttm_pool_mgr_fini(void) 1369 { 1370 unsigned int i; 1371 1372 for (i = 0; i < NR_PAGE_ORDERS; ++i) { 1373 ttm_pool_type_fini(&global_write_combined[i]); 1374 ttm_pool_type_fini(&global_uncached[i]); 1375 1376 ttm_pool_type_fini(&global_dma32_write_combined[i]); 1377 ttm_pool_type_fini(&global_dma32_uncached[i]); 1378 } 1379 1380 shrinker_free(mm_shrinker); 1381 WARN_ON(!list_empty(&shrinker_list)); 1382 } 1383