1 /* 2 * Copyright (c) Red Hat Inc. 3 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sub license, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the 12 * next paragraph) shall be included in all copies or substantial portions 13 * of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: Dave Airlie <airlied@redhat.com> 24 * Jerome Glisse <jglisse@redhat.com> 25 * Pauli Nieminen <suokkos@gmail.com> 26 */ 27 /* 28 * Copyright (c) 2013 The FreeBSD Foundation 29 * All rights reserved. 30 * 31 * Portions of this software were developed by Konstantin Belousov 32 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation. 33 */ 34 35 /* simple list based uncached page pool 36 * - Pool collects resently freed pages for reuse 37 * - Use page->lru to keep a free list 38 * - doesn't track currently in use pages 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include <dev/drm2/drmP.h> 45 #include <dev/drm2/ttm/ttm_bo_driver.h> 46 #include <dev/drm2/ttm/ttm_page_alloc.h> 47 #include <vm/vm_pageout.h> 48 49 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t)) 50 #define SMALL_ALLOCATION 16 51 #define FREE_ALL_PAGES (~0U) 52 /* times are in msecs */ 53 #define PAGE_FREE_INTERVAL 1000 54 55 /** 56 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. 57 * 58 * @lock: Protects the shared pool from concurrnet access. Must be used with 59 * irqsave/irqrestore variants because pool allocator maybe called from 60 * delayed work. 61 * @fill_lock: Prevent concurrent calls to fill. 62 * @list: Pool of free uc/wc pages for fast reuse. 63 * @gfp_flags: Flags to pass for alloc_page. 64 * @npages: Number of pages in pool. 65 */ 66 struct ttm_page_pool { 67 struct mtx lock; 68 bool fill_lock; 69 bool dma32; 70 struct pglist list; 71 int ttm_page_alloc_flags; 72 unsigned npages; 73 char *name; 74 unsigned long nfrees; 75 unsigned long nrefills; 76 }; 77 78 /** 79 * Limits for the pool. They are handled without locks because only place where 80 * they may change is in sysfs store. They won't have immediate effect anyway 81 * so forcing serialization to access them is pointless. 82 */ 83 84 struct ttm_pool_opts { 85 unsigned alloc_size; 86 unsigned max_size; 87 unsigned small; 88 }; 89 90 #define NUM_POOLS 4 91 92 /** 93 * struct ttm_pool_manager - Holds memory pools for fst allocation 94 * 95 * Manager is read only object for pool code so it doesn't need locking. 96 * 97 * @free_interval: minimum number of jiffies between freeing pages from pool. 98 * @page_alloc_inited: reference counting for pool allocation. 99 * @work: Work that is used to shrink the pool. Work is only run when there is 100 * some pages to free. 101 * @small_allocation: Limit in number of pages what is small allocation. 102 * 103 * @pools: All pool objects in use. 104 **/ 105 struct ttm_pool_manager { 106 unsigned int kobj_ref; 107 eventhandler_tag lowmem_handler; 108 struct ttm_pool_opts options; 109 110 union { 111 struct ttm_page_pool u_pools[NUM_POOLS]; 112 struct _utag { 113 struct ttm_page_pool u_wc_pool; 114 struct ttm_page_pool u_uc_pool; 115 struct ttm_page_pool u_wc_pool_dma32; 116 struct ttm_page_pool u_uc_pool_dma32; 117 } _ut; 118 } _u; 119 }; 120 121 #define pools _u.u_pools 122 #define wc_pool _u._ut.u_wc_pool 123 #define uc_pool _u._ut.u_uc_pool 124 #define wc_pool_dma32 _u._ut.u_wc_pool_dma32 125 #define uc_pool_dma32 _u._ut.u_uc_pool_dma32 126 127 MALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager"); 128 129 static void 130 ttm_vm_page_free(vm_page_t m) 131 { 132 133 KASSERT(m->object == NULL, ("ttm page %p is owned", m)); 134 KASSERT(m->wire_count == 1, ("ttm lost wire %p", m)); 135 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m)); 136 KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m)); 137 m->flags &= ~PG_FICTITIOUS; 138 m->oflags |= VPO_UNMANAGED; 139 vm_page_unwire(m, PQ_INACTIVE); 140 vm_page_free(m); 141 } 142 143 static vm_memattr_t 144 ttm_caching_state_to_vm(enum ttm_caching_state cstate) 145 { 146 147 switch (cstate) { 148 case tt_uncached: 149 return (VM_MEMATTR_UNCACHEABLE); 150 case tt_wc: 151 return (VM_MEMATTR_WRITE_COMBINING); 152 case tt_cached: 153 return (VM_MEMATTR_WRITE_BACK); 154 } 155 panic("caching state %d\n", cstate); 156 } 157 158 static void ttm_pool_kobj_release(struct ttm_pool_manager *m) 159 { 160 161 free(m, M_TTM_POOLMGR); 162 } 163 164 #if 0 165 /* XXXKIB sysctl */ 166 static ssize_t ttm_pool_store(struct ttm_pool_manager *m, 167 struct attribute *attr, const char *buffer, size_t size) 168 { 169 int chars; 170 unsigned val; 171 chars = sscanf(buffer, "%u", &val); 172 if (chars == 0) 173 return size; 174 175 /* Convert kb to number of pages */ 176 val = val / (PAGE_SIZE >> 10); 177 178 if (attr == &ttm_page_pool_max) 179 m->options.max_size = val; 180 else if (attr == &ttm_page_pool_small) 181 m->options.small = val; 182 else if (attr == &ttm_page_pool_alloc_size) { 183 if (val > NUM_PAGES_TO_ALLOC*8) { 184 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", 185 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), 186 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 187 return size; 188 } else if (val > NUM_PAGES_TO_ALLOC) { 189 pr_warn("Setting allocation size to larger than %lu is not recommended\n", 190 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 191 } 192 m->options.alloc_size = val; 193 } 194 195 return size; 196 } 197 198 static ssize_t ttm_pool_show(struct ttm_pool_manager *m, 199 struct attribute *attr, char *buffer) 200 { 201 unsigned val = 0; 202 203 if (attr == &ttm_page_pool_max) 204 val = m->options.max_size; 205 else if (attr == &ttm_page_pool_small) 206 val = m->options.small; 207 else if (attr == &ttm_page_pool_alloc_size) 208 val = m->options.alloc_size; 209 210 val = val * (PAGE_SIZE >> 10); 211 212 return snprintf(buffer, PAGE_SIZE, "%u\n", val); 213 } 214 #endif 215 216 static struct ttm_pool_manager *_manager; 217 218 static int set_pages_array_wb(vm_page_t *pages, int addrinarray) 219 { 220 #ifdef TTM_HAS_AGP 221 int i; 222 223 for (i = 0; i < addrinarray; i++) 224 pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_BACK); 225 #endif 226 return 0; 227 } 228 229 static int set_pages_array_wc(vm_page_t *pages, int addrinarray) 230 { 231 #ifdef TTM_HAS_AGP 232 int i; 233 234 for (i = 0; i < addrinarray; i++) 235 pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_COMBINING); 236 #endif 237 return 0; 238 } 239 240 static int set_pages_array_uc(vm_page_t *pages, int addrinarray) 241 { 242 #ifdef TTM_HAS_AGP 243 int i; 244 245 for (i = 0; i < addrinarray; i++) 246 pmap_page_set_memattr(pages[i], VM_MEMATTR_UNCACHEABLE); 247 #endif 248 return 0; 249 } 250 251 /** 252 * Select the right pool or requested caching state and ttm flags. */ 253 static struct ttm_page_pool *ttm_get_pool(int flags, 254 enum ttm_caching_state cstate) 255 { 256 int pool_index; 257 258 if (cstate == tt_cached) 259 return NULL; 260 261 if (cstate == tt_wc) 262 pool_index = 0x0; 263 else 264 pool_index = 0x1; 265 266 if (flags & TTM_PAGE_FLAG_DMA32) 267 pool_index |= 0x2; 268 269 return &_manager->pools[pool_index]; 270 } 271 272 /* set memory back to wb and free the pages. */ 273 static void ttm_pages_put(vm_page_t *pages, unsigned npages) 274 { 275 unsigned i; 276 277 /* Our VM handles vm memattr automatically on the page free. */ 278 if (set_pages_array_wb(pages, npages)) 279 printf("[TTM] Failed to set %d pages to wb!\n", npages); 280 for (i = 0; i < npages; ++i) 281 ttm_vm_page_free(pages[i]); 282 } 283 284 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, 285 unsigned freed_pages) 286 { 287 pool->npages -= freed_pages; 288 pool->nfrees += freed_pages; 289 } 290 291 /** 292 * Free pages from pool. 293 * 294 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC 295 * number of pages in one go. 296 * 297 * @pool: to free the pages from 298 * @free_all: If set to true will free all pages in pool 299 **/ 300 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) 301 { 302 vm_page_t p, p1; 303 vm_page_t *pages_to_free; 304 unsigned freed_pages = 0, 305 npages_to_free = nr_free; 306 unsigned i; 307 308 if (NUM_PAGES_TO_ALLOC < nr_free) 309 npages_to_free = NUM_PAGES_TO_ALLOC; 310 311 pages_to_free = malloc(npages_to_free * sizeof(vm_page_t), 312 M_TEMP, M_WAITOK | M_ZERO); 313 314 restart: 315 mtx_lock(&pool->lock); 316 317 TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, plinks.q, p1) { 318 if (freed_pages >= npages_to_free) 319 break; 320 321 pages_to_free[freed_pages++] = p; 322 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ 323 if (freed_pages >= NUM_PAGES_TO_ALLOC) { 324 /* remove range of pages from the pool */ 325 for (i = 0; i < freed_pages; i++) 326 TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q); 327 328 ttm_pool_update_free_locked(pool, freed_pages); 329 /** 330 * Because changing page caching is costly 331 * we unlock the pool to prevent stalling. 332 */ 333 mtx_unlock(&pool->lock); 334 335 ttm_pages_put(pages_to_free, freed_pages); 336 if (likely(nr_free != FREE_ALL_PAGES)) 337 nr_free -= freed_pages; 338 339 if (NUM_PAGES_TO_ALLOC >= nr_free) 340 npages_to_free = nr_free; 341 else 342 npages_to_free = NUM_PAGES_TO_ALLOC; 343 344 freed_pages = 0; 345 346 /* free all so restart the processing */ 347 if (nr_free) 348 goto restart; 349 350 /* Not allowed to fall through or break because 351 * following context is inside spinlock while we are 352 * outside here. 353 */ 354 goto out; 355 356 } 357 } 358 359 /* remove range of pages from the pool */ 360 if (freed_pages) { 361 for (i = 0; i < freed_pages; i++) 362 TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q); 363 364 ttm_pool_update_free_locked(pool, freed_pages); 365 nr_free -= freed_pages; 366 } 367 368 mtx_unlock(&pool->lock); 369 370 if (freed_pages) 371 ttm_pages_put(pages_to_free, freed_pages); 372 out: 373 free(pages_to_free, M_TEMP); 374 return nr_free; 375 } 376 377 /* Get good estimation how many pages are free in pools */ 378 static int ttm_pool_get_num_unused_pages(void) 379 { 380 unsigned i; 381 int total = 0; 382 for (i = 0; i < NUM_POOLS; ++i) 383 total += _manager->pools[i].npages; 384 385 return total; 386 } 387 388 /** 389 * Callback for mm to request pool to reduce number of page held. 390 */ 391 static int ttm_pool_mm_shrink(void *arg) 392 { 393 static unsigned int start_pool = 0; 394 unsigned i; 395 unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1); 396 struct ttm_page_pool *pool; 397 int shrink_pages = 100; /* XXXKIB */ 398 399 pool_offset = pool_offset % NUM_POOLS; 400 /* select start pool in round robin fashion */ 401 for (i = 0; i < NUM_POOLS; ++i) { 402 unsigned nr_free = shrink_pages; 403 if (shrink_pages == 0) 404 break; 405 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 406 shrink_pages = ttm_page_pool_free(pool, nr_free); 407 } 408 /* return estimated number of unused pages in pool */ 409 return ttm_pool_get_num_unused_pages(); 410 } 411 412 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) 413 { 414 415 manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem, 416 ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY); 417 } 418 419 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) 420 { 421 422 EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler); 423 } 424 425 static int ttm_set_pages_caching(vm_page_t *pages, 426 enum ttm_caching_state cstate, unsigned cpages) 427 { 428 int r = 0; 429 /* Set page caching */ 430 switch (cstate) { 431 case tt_uncached: 432 r = set_pages_array_uc(pages, cpages); 433 if (r) 434 printf("[TTM] Failed to set %d pages to uc!\n", cpages); 435 break; 436 case tt_wc: 437 r = set_pages_array_wc(pages, cpages); 438 if (r) 439 printf("[TTM] Failed to set %d pages to wc!\n", cpages); 440 break; 441 default: 442 break; 443 } 444 return r; 445 } 446 447 /** 448 * Free pages the pages that failed to change the caching state. If there is 449 * any pages that have changed their caching state already put them to the 450 * pool. 451 */ 452 static void ttm_handle_caching_state_failure(struct pglist *pages, 453 int ttm_flags, enum ttm_caching_state cstate, 454 vm_page_t *failed_pages, unsigned cpages) 455 { 456 unsigned i; 457 /* Failed pages have to be freed */ 458 for (i = 0; i < cpages; ++i) { 459 TAILQ_REMOVE(pages, failed_pages[i], plinks.q); 460 ttm_vm_page_free(failed_pages[i]); 461 } 462 } 463 464 static vm_paddr_t 465 ttm_alloc_high_bound(int ttm_alloc_flags) 466 { 467 468 return ((ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff : 469 VM_MAX_ADDRESS); 470 } 471 472 /** 473 * Allocate new pages with correct caching. 474 * 475 * This function is reentrant if caller updates count depending on number of 476 * pages returned in pages array. 477 */ 478 static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags, 479 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 480 { 481 vm_page_t *caching_array; 482 vm_page_t p; 483 int r = 0; 484 unsigned i, cpages, aflags; 485 unsigned max_cpages = min(count, 486 (unsigned)(PAGE_SIZE/sizeof(vm_page_t))); 487 int tries; 488 489 aflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | 490 ((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? 491 VM_ALLOC_ZERO : 0); 492 493 /* allocate array for page caching change */ 494 caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP, 495 M_WAITOK | M_ZERO); 496 497 for (i = 0, cpages = 0; i < count; ++i) { 498 tries = 0; 499 retry: 500 p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0, 501 ttm_alloc_high_bound(ttm_alloc_flags), 502 PAGE_SIZE, 0, ttm_caching_state_to_vm(cstate)); 503 if (!p) { 504 if (tries < 3) { 505 vm_pageout_grow_cache(tries, 0, 506 ttm_alloc_high_bound(ttm_alloc_flags)); 507 tries++; 508 goto retry; 509 } 510 printf("[TTM] Unable to get page %u\n", i); 511 512 /* store already allocated pages in the pool after 513 * setting the caching state */ 514 if (cpages) { 515 r = ttm_set_pages_caching(caching_array, 516 cstate, cpages); 517 if (r) 518 ttm_handle_caching_state_failure(pages, 519 ttm_flags, cstate, 520 caching_array, cpages); 521 } 522 r = -ENOMEM; 523 goto out; 524 } 525 p->oflags &= ~VPO_UNMANAGED; 526 p->flags |= PG_FICTITIOUS; 527 528 #ifdef CONFIG_HIGHMEM /* KIB: nop */ 529 /* gfp flags of highmem page should never be dma32 so we 530 * we should be fine in such case 531 */ 532 if (!PageHighMem(p)) 533 #endif 534 { 535 caching_array[cpages++] = p; 536 if (cpages == max_cpages) { 537 538 r = ttm_set_pages_caching(caching_array, 539 cstate, cpages); 540 if (r) { 541 ttm_handle_caching_state_failure(pages, 542 ttm_flags, cstate, 543 caching_array, cpages); 544 goto out; 545 } 546 cpages = 0; 547 } 548 } 549 550 TAILQ_INSERT_HEAD(pages, p, plinks.q); 551 } 552 553 if (cpages) { 554 r = ttm_set_pages_caching(caching_array, cstate, cpages); 555 if (r) 556 ttm_handle_caching_state_failure(pages, 557 ttm_flags, cstate, 558 caching_array, cpages); 559 } 560 out: 561 free(caching_array, M_TEMP); 562 563 return r; 564 } 565 566 /** 567 * Fill the given pool if there aren't enough pages and the requested number of 568 * pages is small. 569 */ 570 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, 571 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 572 { 573 vm_page_t p; 574 int r; 575 unsigned cpages = 0; 576 /** 577 * Only allow one pool fill operation at a time. 578 * If pool doesn't have enough pages for the allocation new pages are 579 * allocated from outside of pool. 580 */ 581 if (pool->fill_lock) 582 return; 583 584 pool->fill_lock = true; 585 586 /* If allocation request is small and there are not enough 587 * pages in a pool we fill the pool up first. */ 588 if (count < _manager->options.small 589 && count > pool->npages) { 590 struct pglist new_pages; 591 unsigned alloc_size = _manager->options.alloc_size; 592 593 /** 594 * Can't change page caching if in irqsave context. We have to 595 * drop the pool->lock. 596 */ 597 mtx_unlock(&pool->lock); 598 599 TAILQ_INIT(&new_pages); 600 r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags, 601 ttm_flags, cstate, alloc_size); 602 mtx_lock(&pool->lock); 603 604 if (!r) { 605 TAILQ_CONCAT(&pool->list, &new_pages, plinks.q); 606 ++pool->nrefills; 607 pool->npages += alloc_size; 608 } else { 609 printf("[TTM] Failed to fill pool (%p)\n", pool); 610 /* If we have any pages left put them to the pool. */ 611 TAILQ_FOREACH(p, &pool->list, plinks.q) { 612 ++cpages; 613 } 614 TAILQ_CONCAT(&pool->list, &new_pages, plinks.q); 615 pool->npages += cpages; 616 } 617 618 } 619 pool->fill_lock = false; 620 } 621 622 /** 623 * Cut 'count' number of pages from the pool and put them on the return list. 624 * 625 * @return count of pages still required to fulfill the request. 626 */ 627 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, 628 struct pglist *pages, 629 int ttm_flags, 630 enum ttm_caching_state cstate, 631 unsigned count) 632 { 633 vm_page_t p; 634 unsigned i; 635 636 mtx_lock(&pool->lock); 637 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count); 638 639 if (count >= pool->npages) { 640 /* take all pages from the pool */ 641 TAILQ_CONCAT(pages, &pool->list, plinks.q); 642 count -= pool->npages; 643 pool->npages = 0; 644 goto out; 645 } 646 for (i = 0; i < count; i++) { 647 p = TAILQ_FIRST(&pool->list); 648 TAILQ_REMOVE(&pool->list, p, plinks.q); 649 TAILQ_INSERT_TAIL(pages, p, plinks.q); 650 } 651 pool->npages -= count; 652 count = 0; 653 out: 654 mtx_unlock(&pool->lock); 655 return count; 656 } 657 658 /* Put all pages in pages list to correct pool to wait for reuse */ 659 static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags, 660 enum ttm_caching_state cstate) 661 { 662 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 663 unsigned i; 664 665 if (pool == NULL) { 666 /* No pool for this memory type so free the pages */ 667 for (i = 0; i < npages; i++) { 668 if (pages[i]) { 669 ttm_vm_page_free(pages[i]); 670 pages[i] = NULL; 671 } 672 } 673 return; 674 } 675 676 mtx_lock(&pool->lock); 677 for (i = 0; i < npages; i++) { 678 if (pages[i]) { 679 TAILQ_INSERT_TAIL(&pool->list, pages[i], plinks.q); 680 pages[i] = NULL; 681 pool->npages++; 682 } 683 } 684 /* Check that we don't go over the pool limit */ 685 npages = 0; 686 if (pool->npages > _manager->options.max_size) { 687 npages = pool->npages - _manager->options.max_size; 688 /* free at least NUM_PAGES_TO_ALLOC number of pages 689 * to reduce calls to set_memory_wb */ 690 if (npages < NUM_PAGES_TO_ALLOC) 691 npages = NUM_PAGES_TO_ALLOC; 692 } 693 mtx_unlock(&pool->lock); 694 if (npages) 695 ttm_page_pool_free(pool, npages); 696 } 697 698 /* 699 * On success pages list will hold count number of correctly 700 * cached pages. 701 */ 702 static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags, 703 enum ttm_caching_state cstate) 704 { 705 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 706 struct pglist plist; 707 vm_page_t p = NULL; 708 int gfp_flags, aflags; 709 unsigned count; 710 int r; 711 int tries; 712 713 aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 714 ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0); 715 716 /* No pool for cached pages */ 717 if (pool == NULL) { 718 for (r = 0; r < npages; ++r) { 719 tries = 0; 720 retry: 721 p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0, 722 ttm_alloc_high_bound(flags), PAGE_SIZE, 723 0, ttm_caching_state_to_vm(cstate)); 724 if (!p) { 725 if (tries < 3) { 726 vm_pageout_grow_cache(tries, 0, 727 ttm_alloc_high_bound(flags)); 728 tries++; 729 goto retry; 730 } 731 printf("[TTM] Unable to allocate page\n"); 732 return -ENOMEM; 733 } 734 p->oflags &= ~VPO_UNMANAGED; 735 p->flags |= PG_FICTITIOUS; 736 pages[r] = p; 737 } 738 return 0; 739 } 740 741 /* combine zero flag to pool flags */ 742 gfp_flags = flags | pool->ttm_page_alloc_flags; 743 744 /* First we take pages from the pool */ 745 TAILQ_INIT(&plist); 746 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); 747 count = 0; 748 TAILQ_FOREACH(p, &plist, plinks.q) { 749 pages[count++] = p; 750 } 751 752 /* clear the pages coming from the pool if requested */ 753 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { 754 TAILQ_FOREACH(p, &plist, plinks.q) { 755 pmap_zero_page(p); 756 } 757 } 758 759 /* If pool didn't have enough pages allocate new one. */ 760 if (npages > 0) { 761 /* ttm_alloc_new_pages doesn't reference pool so we can run 762 * multiple requests in parallel. 763 **/ 764 TAILQ_INIT(&plist); 765 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, 766 npages); 767 TAILQ_FOREACH(p, &plist, plinks.q) { 768 pages[count++] = p; 769 } 770 if (r) { 771 /* If there is any pages in the list put them back to 772 * the pool. */ 773 printf("[TTM] Failed to allocate extra pages for large request\n"); 774 ttm_put_pages(pages, count, flags, cstate); 775 return r; 776 } 777 } 778 779 return 0; 780 } 781 782 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, 783 char *name) 784 { 785 mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF); 786 pool->fill_lock = false; 787 TAILQ_INIT(&pool->list); 788 pool->npages = pool->nfrees = 0; 789 pool->ttm_page_alloc_flags = flags; 790 pool->name = name; 791 } 792 793 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) 794 { 795 796 if (_manager != NULL) 797 printf("[TTM] manager != NULL\n"); 798 printf("[TTM] Initializing pool allocator\n"); 799 800 _manager = malloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO); 801 802 ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc"); 803 ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc"); 804 ttm_page_pool_init_locked(&_manager->wc_pool_dma32, 805 TTM_PAGE_FLAG_DMA32, "wc dma"); 806 ttm_page_pool_init_locked(&_manager->uc_pool_dma32, 807 TTM_PAGE_FLAG_DMA32, "uc dma"); 808 809 _manager->options.max_size = max_pages; 810 _manager->options.small = SMALL_ALLOCATION; 811 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; 812 813 refcount_init(&_manager->kobj_ref, 1); 814 ttm_pool_mm_shrink_init(_manager); 815 816 return 0; 817 } 818 819 void ttm_page_alloc_fini(void) 820 { 821 int i; 822 823 printf("[TTM] Finalizing pool allocator\n"); 824 ttm_pool_mm_shrink_fini(_manager); 825 826 for (i = 0; i < NUM_POOLS; ++i) 827 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); 828 829 if (refcount_release(&_manager->kobj_ref)) 830 ttm_pool_kobj_release(_manager); 831 _manager = NULL; 832 } 833 834 int ttm_pool_populate(struct ttm_tt *ttm) 835 { 836 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 837 unsigned i; 838 int ret; 839 840 if (ttm->state != tt_unpopulated) 841 return 0; 842 843 for (i = 0; i < ttm->num_pages; ++i) { 844 ret = ttm_get_pages(&ttm->pages[i], 1, 845 ttm->page_flags, 846 ttm->caching_state); 847 if (ret != 0) { 848 ttm_pool_unpopulate(ttm); 849 return -ENOMEM; 850 } 851 852 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 853 false, false); 854 if (unlikely(ret != 0)) { 855 ttm_pool_unpopulate(ttm); 856 return -ENOMEM; 857 } 858 } 859 860 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 861 ret = ttm_tt_swapin(ttm); 862 if (unlikely(ret != 0)) { 863 ttm_pool_unpopulate(ttm); 864 return ret; 865 } 866 } 867 868 ttm->state = tt_unbound; 869 return 0; 870 } 871 872 void ttm_pool_unpopulate(struct ttm_tt *ttm) 873 { 874 unsigned i; 875 876 for (i = 0; i < ttm->num_pages; ++i) { 877 if (ttm->pages[i]) { 878 ttm_mem_global_free_page(ttm->glob->mem_glob, 879 ttm->pages[i]); 880 ttm_put_pages(&ttm->pages[i], 1, 881 ttm->page_flags, 882 ttm->caching_state); 883 } 884 } 885 ttm->state = tt_unpopulated; 886 } 887 888 #if 0 889 /* XXXKIB sysctl */ 890 int ttm_page_alloc_debugfs(struct seq_file *m, void *data) 891 { 892 struct ttm_page_pool *p; 893 unsigned i; 894 char *h[] = {"pool", "refills", "pages freed", "size"}; 895 if (!_manager) { 896 seq_printf(m, "No pool allocator running.\n"); 897 return 0; 898 } 899 seq_printf(m, "%6s %12s %13s %8s\n", 900 h[0], h[1], h[2], h[3]); 901 for (i = 0; i < NUM_POOLS; ++i) { 902 p = &_manager->pools[i]; 903 904 seq_printf(m, "%6s %12ld %13ld %8d\n", 905 p->name, p->nrefills, 906 p->nfrees, p->npages); 907 } 908 return 0; 909 } 910 #endif 911