Lines Matching defs:pool

24  * allocated from page pool. There is no cache line dirtying for 'struct page'
25 * when a page is recycled back to the page pool.
29 * page allocated from page pool. Page splitting enables memory saving and thus
48 * the same page when a page is split. The API user must setup pool->p.max_len
49 * and pool->p.offset correctly and ensure that page_pool_put_page() is called
67 bool page_pool_get_stats(const struct page_pool *pool,
88 * @pool: pool from which to allocate
92 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
96 return page_pool_alloc_pages(pool, gfp);
101 * @pool: pool from which to allocate
109 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
115 return page_pool_alloc_frag(pool, offset, size, gfp);
118 static inline netmem_ref page_pool_alloc_netmem(struct page_pool *pool,
122 unsigned int max_size = PAGE_SIZE << pool->p.order;
128 return page_pool_alloc_netmems(pool, gfp);
131 netmem = page_pool_alloc_frag_netmem(pool, offset, *size, gfp);
139 if (pool->frag_offset + *size > max_size) {
141 pool->frag_offset = max_size;
147 static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool,
153 return page_pool_alloc_netmem(pool, offset, size, gfp);
156 static inline netmem_ref page_pool_dev_alloc_netmems(struct page_pool *pool)
160 return page_pool_alloc_netmems(pool, gfp);
163 static inline struct page *page_pool_alloc(struct page_pool *pool,
167 return netmem_to_page(page_pool_alloc_netmem(pool, offset, size, gfp));
172 * @pool: pool from which to allocate
182 static inline struct page *page_pool_dev_alloc(struct page_pool *pool,
188 return page_pool_alloc(pool, offset, size, gfp);
191 static inline void *page_pool_alloc_va(struct page_pool *pool,
198 page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM);
208 * @pool: pool from which to allocate
216 static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
221 return page_pool_alloc_va(pool, size, gfp);
226 * @pool: pool from which page was allocated
232 page_pool_get_dma_dir(const struct page_pool *pool)
234 return pool->p.dma_dir;
250 * objects like skbs marked for page pool recycling.
331 static inline void page_pool_put_netmem(struct page_pool *pool,
343 page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct);
348 * page_pool_put_page() - release a reference to a page pool page
349 * @pool: pool from which page was allocated
356 * the allocator owns the page and will try to recycle it in one of the pool
360 static inline void page_pool_put_page(struct page_pool *pool,
365 page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size,
369 static inline void page_pool_put_full_netmem(struct page_pool *pool,
373 page_pool_put_netmem(pool, netmem, -1, allow_direct);
377 * page_pool_put_full_page() - release a reference on a page pool page
378 * @pool: pool from which page was allocated
385 static inline void page_pool_put_full_page(struct page_pool *pool,
388 page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct);
392 * page_pool_recycle_direct() - release a reference on a page pool page
393 * @pool: pool from which page was allocated
397 * (e.g NAPI), since it will recycle the page directly into the pool fast cache.
399 static inline void page_pool_recycle_direct(struct page_pool *pool,
402 page_pool_put_full_page(pool, page, true);
405 static inline void page_pool_recycle_direct_netmem(struct page_pool *pool,
408 page_pool_put_full_netmem(pool, netmem, true);
416 * @pool: pool from which va was allocated
422 static inline void page_pool_free_va(struct page_pool *pool, void *va,
425 page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
440 * @page: page allocated from a page pool
442 * Fetch the DMA address of the page. The page pool to which the page belongs
450 static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool,
454 dma_sync_single_range_for_cpu(pool->p.dev, dma_addr,
455 offset + pool->p.offset, dma_sync_size,
456 page_pool_get_dma_dir(pool));
461 * @pool: &page_pool the @page belongs to
467 * driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``.
471 static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
475 __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr(page), offset,
480 page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool,
484 if (!pool->dma_sync_for_cpu)
487 __page_pool_dma_sync_for_cpu(pool,
492 static inline void page_pool_get(struct page_pool *pool)
494 refcount_inc(&pool->user_cnt);
497 static inline bool page_pool_put(struct page_pool *pool)
499 return refcount_dec_and_test(&pool->user_cnt);
502 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
504 if (unlikely(pool->p.nid != new_nid))
505 page_pool_update_nid(pool, new_nid);
510 * @pool: queried page pool
512 * Check if page pool will return buffers which are unreadable to the CPU /
514 * which returns unreadable memory to the queue served by the page pool.
518 * Return: true if memory allocated by the page pool may be unreadable
520 static inline bool page_pool_is_unreadable(struct page_pool *pool)
522 return !!pool->mp_ops;