Lines Matching full:caching

26 /* Pooling of allocated pages is necessary because changing the caching
74 * not committed caching transition from write-back to @tt_caching.
77 * @tt_caching: The requested cpu-caching for the pages allocated.
98 * @page_caching: The struct ttm_tt requested caching
187 /* Reset the caching and pages of size 1 << order */
188 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
199 if (caching != ttm_cached && !PageHighMem(p))
218 /* Apply any cpu-caching deferred during page allocation */
311 enum ttm_caching caching, unsigned int order)
314 pt->caching = caching;
334 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
337 /* Return the pool_type to use for the given caching and order */
339 enum ttm_caching caching,
343 return &pool->caching[caching].orders[order];
346 switch (caching) {
349 return &pool->caching[caching].orders[order];
357 return &pool->caching[caching].orders[order];
386 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
457 const dma_addr_t *dma_addr, enum ttm_caching caching)
469 pt = ttm_pool_select_type(pool, caching, order);
478 ttm_pool_free_page(pool, caching, order, page);
602 * If the caching is consistent, update any deferred caching. Otherwise
603 * stage this page for an upcoming deferred caching update.
645 * @caching: The page caching mode used by the range.
655 enum ttm_caching caching,
675 nr = ttm_pool_unmap_and_free(pool, p, dma_addr, caching);
687 alloc->tt_caching = tt->caching;
727 page_caching = tt->caching;
754 page_caching = tt->caching;
790 ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
885 ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
923 * caching different than writeback.
931 ttm_pool_free_range(restore->pool, tt, tt->caching,
985 if (tt->caching != ttm_cached)
1084 if (pt != &pool->caching[i].orders[j])
1122 if (pt != &pool->caching[i].orders[j])
1268 ttm_pool_debugfs_orders(pool->caching[i].orders, m);