Lines Matching full:pool
35 /* simple list based uncached page pool
36 * - Pool collects resently freed pages for reuse
55 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
57 * @lock: Protects the shared pool from concurrnet access. Must be used with
58 * irqsave/irqrestore variants because pool allocator maybe called from
61 * @list: Pool of free uc/wc pages for fast reuse.
63 * @npages: Number of pages in pool.
78 * Limits for the pool. They are handled without locks because only place where
94 * Manager is read only object for pool code so it doesn't need locking.
96 * @free_interval: minimum number of jiffies between freeing pages from pool.
97 * @page_alloc_inited: reference counting for pool allocation.
98 * @work: Work that is used to shrink the pool. Work is only run when there is
102 * @pools: All pool objects in use.
126 MALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager");
305 * Select the right pool or requested caching state and ttm flags. */
337 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, in ttm_pool_update_free_locked() argument
340 pool->npages -= freed_pages; in ttm_pool_update_free_locked()
341 pool->nfrees += freed_pages; in ttm_pool_update_free_locked()
345 * Free pages from pool.
350 * @pool: to free the pages from
351 * @free_all: If set to true will free all pages in pool
353 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) in ttm_page_pool_free() argument
368 mtx_lock(&pool->lock); in ttm_page_pool_free()
370 TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, plinks.q, p1) { in ttm_page_pool_free()
377 /* remove range of pages from the pool */ in ttm_page_pool_free()
379 TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q); in ttm_page_pool_free()
381 ttm_pool_update_free_locked(pool, freed_pages); in ttm_page_pool_free()
384 * we unlock the pool to prevent stalling. in ttm_page_pool_free()
386 mtx_unlock(&pool->lock); in ttm_page_pool_free()
412 /* remove range of pages from the pool */ in ttm_page_pool_free()
415 TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q); in ttm_page_pool_free()
417 ttm_pool_update_free_locked(pool, freed_pages); in ttm_page_pool_free()
421 mtx_unlock(&pool->lock); in ttm_page_pool_free()
442 * Callback for mm to request pool to reduce number of page held.
449 struct ttm_page_pool *pool; in ttm_pool_mm_shrink() local
453 /* select start pool in round robin fashion */ in ttm_pool_mm_shrink()
458 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; in ttm_pool_mm_shrink()
459 shrink_pages = ttm_page_pool_free(pool, nr_free); in ttm_pool_mm_shrink()
461 /* return estimated number of unused pages in pool */ in ttm_pool_mm_shrink()
503 * pool.
542 /* store already allocated pages in the pool after in ttm_alloc_new_pages()
595 * Fill the given pool if there aren't enough pages and the requested number of
598 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, in ttm_page_pool_fill_locked() argument
605 * Only allow one pool fill operation at a time. in ttm_page_pool_fill_locked()
606 * If pool doesn't have enough pages for the allocation new pages are in ttm_page_pool_fill_locked()
607 * allocated from outside of pool. in ttm_page_pool_fill_locked()
609 if (pool->fill_lock) in ttm_page_pool_fill_locked()
612 pool->fill_lock = true; in ttm_page_pool_fill_locked()
615 * pages in a pool we fill the pool up first. */ in ttm_page_pool_fill_locked()
617 && count > pool->npages) { in ttm_page_pool_fill_locked()
623 * drop the pool->lock. in ttm_page_pool_fill_locked()
625 mtx_unlock(&pool->lock); in ttm_page_pool_fill_locked()
628 r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags, in ttm_page_pool_fill_locked()
630 mtx_lock(&pool->lock); in ttm_page_pool_fill_locked()
633 TAILQ_CONCAT(&pool->list, &new_pages, plinks.q); in ttm_page_pool_fill_locked()
634 ++pool->nrefills; in ttm_page_pool_fill_locked()
635 pool->npages += alloc_size; in ttm_page_pool_fill_locked()
637 printf("[TTM] Failed to fill pool (%p)\n", pool); in ttm_page_pool_fill_locked()
638 /* If we have any pages left put them to the pool. */ in ttm_page_pool_fill_locked()
639 TAILQ_FOREACH(p, &pool->list, plinks.q) { in ttm_page_pool_fill_locked()
642 TAILQ_CONCAT(&pool->list, &new_pages, plinks.q); in ttm_page_pool_fill_locked()
643 pool->npages += cpages; in ttm_page_pool_fill_locked()
647 pool->fill_lock = false; in ttm_page_pool_fill_locked()
651 * Cut 'count' number of pages from the pool and put them on the return list.
655 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, in ttm_page_pool_get_pages() argument
664 mtx_lock(&pool->lock); in ttm_page_pool_get_pages()
665 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count); in ttm_page_pool_get_pages()
667 if (count >= pool->npages) { in ttm_page_pool_get_pages()
668 /* take all pages from the pool */ in ttm_page_pool_get_pages()
669 TAILQ_CONCAT(pages, &pool->list, plinks.q); in ttm_page_pool_get_pages()
670 count -= pool->npages; in ttm_page_pool_get_pages()
671 pool->npages = 0; in ttm_page_pool_get_pages()
675 p = TAILQ_FIRST(&pool->list); in ttm_page_pool_get_pages()
676 TAILQ_REMOVE(&pool->list, p, plinks.q); in ttm_page_pool_get_pages()
679 pool->npages -= count; in ttm_page_pool_get_pages()
682 mtx_unlock(&pool->lock); in ttm_page_pool_get_pages()
686 /* Put all pages in pages list to correct pool to wait for reuse */
690 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); in ttm_put_pages() local
693 if (pool == NULL) { in ttm_put_pages()
694 /* No pool for this memory type so free the pages */ in ttm_put_pages()
704 mtx_lock(&pool->lock); in ttm_put_pages()
707 TAILQ_INSERT_TAIL(&pool->list, pages[i], plinks.q); in ttm_put_pages()
709 pool->npages++; in ttm_put_pages()
712 /* Check that we don't go over the pool limit */ in ttm_put_pages()
714 if (pool->npages > _manager->options.max_size) { in ttm_put_pages()
715 npages = pool->npages - _manager->options.max_size; in ttm_put_pages()
721 mtx_unlock(&pool->lock); in ttm_put_pages()
723 ttm_page_pool_free(pool, npages); in ttm_put_pages()
733 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); in ttm_get_pages() local
740 /* No pool for cached pages */ in ttm_get_pages()
741 if (pool == NULL) { in ttm_get_pages()
753 /* combine zero flag to pool flags */ in ttm_get_pages()
754 gfp_flags = flags | pool->ttm_page_alloc_flags; in ttm_get_pages()
756 /* First we take pages from the pool */ in ttm_get_pages()
758 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); in ttm_get_pages()
764 /* clear the pages coming from the pool if requested */ in ttm_get_pages()
771 /* If pool didn't have enough pages allocate new one. */ in ttm_get_pages()
773 /* ttm_alloc_new_pages doesn't reference pool so we can run in ttm_get_pages()
784 * the pool. */ in ttm_get_pages()
794 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, in ttm_page_pool_init_locked() argument
797 mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF); in ttm_page_pool_init_locked()
798 pool->fill_lock = false; in ttm_page_pool_init_locked()
799 TAILQ_INIT(&pool->list); in ttm_page_pool_init_locked()
800 pool->npages = pool->nfrees = 0; in ttm_page_pool_init_locked()
801 pool->ttm_page_alloc_flags = flags; in ttm_page_pool_init_locked()
802 pool->name = name; in ttm_page_pool_init_locked()
810 printf("[TTM] Initializing pool allocator\n"); in ttm_page_alloc_init()
835 printf("[TTM] Finalizing pool allocator\n"); in ttm_page_alloc_fini()
906 char *h[] = {"pool", "refills", "pages freed", "size"};
908 seq_printf(m, "No pool allocator running.\n");