Lines Matching +full:pool +full:- +full:long
1 /* SPDX-License-Identifier: GPL-2.0
24 * allocated from page pool. There is no cache line dirtying for 'struct page'
25 * when a page is recycled back to the page pool.
29 * page allocated from page pool. Page splitting enables memory saving and thus
32 * 'struct page' and atomic operation for page->pp_ref_count.
34 * The API keeps track of in-flight pages, in order to let API users know when
37 * attach the page_pool object to a page_pool-aware object like skbs marked with
42 * recycle the page, or in case of page->_refcount > 1, it will release the DMA
43 * mapping and in-flight state accounting.
48 * the same page when a page is split. The API user must setup pool->p.max_len
49 * and pool->p.offset correctly and ensure that page_pool_put_page() is called
50 * with dma_sync_size being -1 for fragment API.
55 #include <linux/dma-mapping.h>
62 /* Deprecated driver-facing API, use netlink instead */
67 bool page_pool_get_stats(const struct page_pool *pool,
87 * page_pool_dev_alloc_pages() - allocate a page.
88 * @pool: pool from which to allocate
92 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
96 return page_pool_alloc_pages(pool, gfp);
100 * page_pool_dev_alloc_frag() - allocate a page fragment.
101 * @pool: pool from which to allocate
109 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
115 return page_pool_alloc_frag(pool, offset, size, gfp);
118 static inline netmem_ref page_pool_alloc_netmem(struct page_pool *pool,
122 unsigned int max_size = PAGE_SIZE << pool->p.order;
128 return page_pool_alloc_netmems(pool, gfp);
131 netmem = page_pool_alloc_frag_netmem(pool, offset, *size, gfp);
139 if (pool->frag_offset + *size > max_size) {
140 *size = max_size - *offset;
141 pool->frag_offset = max_size;
147 static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool,
153 return page_pool_alloc_netmem(pool, offset, size, gfp);
156 static inline netmem_ref page_pool_dev_alloc_netmems(struct page_pool *pool)
160 return page_pool_alloc_netmems(pool, gfp);
163 static inline struct page *page_pool_alloc(struct page_pool *pool,
167 return netmem_to_page(page_pool_alloc_netmem(pool, offset, size, gfp));
171 * page_pool_dev_alloc() - allocate a page or a page fragment.
172 * @pool: pool from which to allocate
182 static inline struct page *page_pool_dev_alloc(struct page_pool *pool,
188 return page_pool_alloc(pool, offset, size, gfp);
191 static inline void *page_pool_alloc_va(struct page_pool *pool,
198 page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM);
206 * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its
208 * @pool: pool from which to allocate
216 static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
221 return page_pool_alloc_va(pool, size, gfp);
225 * page_pool_get_dma_dir() - Retrieve the stored DMA direction.
226 * @pool: pool from which page was allocated
232 page_pool_get_dma_dir(const struct page_pool *pool)
234 return pool->p.dma_dir;
237 static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr)
243 * page_pool_fragment_page() - split a fresh page into fragments
249 * like put_page()). Such references are usually held by page_pool-aware
250 * objects like skbs marked for page pool recycling.
255 * "fragment allocators" to save atomic operations - either when they know
260 static inline void page_pool_fragment_page(struct page *page, long nr)
265 static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
268 long ret;
286 * the BUILD_BUG_ON(), only need to handle the non-constant case
310 static inline long page_pool_unref_page(struct page *page, long nr)
331 static inline void page_pool_put_netmem(struct page_pool *pool,
336 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
343 page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct);
348 * page_pool_put_page() - release a reference to a page pool page
349 * @pool: pool from which page was allocated
356 * the allocator owns the page and will try to recycle it in one of the pool
360 static inline void page_pool_put_page(struct page_pool *pool,
365 page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size,
369 static inline void page_pool_put_full_netmem(struct page_pool *pool,
373 page_pool_put_netmem(pool, netmem, -1, allow_direct);
377 * page_pool_put_full_page() - release a reference on a page pool page
378 * @pool: pool from which page was allocated
385 static inline void page_pool_put_full_page(struct page_pool *pool,
388 page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct);
392 * page_pool_recycle_direct() - release a reference on a page pool page
393 * @pool: pool from which page was allocated
397 * (e.g NAPI), since it will recycle the page directly into the pool fast cache.
399 static inline void page_pool_recycle_direct(struct page_pool *pool,
402 page_pool_put_full_page(pool, page, true);
405 static inline void page_pool_recycle_direct_netmem(struct page_pool *pool,
408 page_pool_put_full_netmem(pool, netmem, true);
412 (sizeof(dma_addr_t) > sizeof(unsigned long))
415 * page_pool_free_va() - free a va into the page_pool
416 * @pool: pool from which va was allocated
422 static inline void page_pool_free_va(struct page_pool *pool, void *va,
425 page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
439 * page_pool_get_dma_addr() - Retrieve the stored DMA address.
440 * @page: page allocated from a page pool
442 * Fetch the DMA address of the page. The page pool to which the page belongs
450 static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool,
454 dma_sync_single_range_for_cpu(pool->p.dev, dma_addr,
455 offset + pool->p.offset, dma_sync_size,
456 page_pool_get_dma_dir(pool));
460 * page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW
461 * @pool: &page_pool the @page belongs to
467 * driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``.
469 * associated PP doesn't perform sync-for-device.
471 static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
475 __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr(page), offset,
480 page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool,
484 if (!pool->dma_sync_for_cpu)
487 __page_pool_dma_sync_for_cpu(pool,
492 static inline void page_pool_get(struct page_pool *pool)
494 refcount_inc(&pool->user_cnt);
497 static inline bool page_pool_put(struct page_pool *pool)
499 return refcount_dec_and_test(&pool->user_cnt);
502 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
504 if (unlikely(pool->p.nid != new_nid))
505 page_pool_update_nid(pool, new_nid);
509 * page_pool_is_unreadable() - will allocated buffers be unreadable for the CPU
510 * @pool: queried page pool
512 * Check if page pool will return buffers which are unreadable to the CPU /
514 * which returns unreadable memory to the queue served by the page pool.
518 * Return: true if memory allocated by the page pool may be unreadable
520 static inline bool page_pool_is_unreadable(struct page_pool *pool)
522 return !!pool->mp_ops;