1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef _NET_PAGE_POOL_TYPES_H 4 #define _NET_PAGE_POOL_TYPES_H 5 6 #include <linux/dma-direction.h> 7 #include <linux/ptr_ring.h> 8 #include <linux/types.h> 9 #include <net/netmem.h> 10 11 #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA 12 * map/unmap 13 */ 14 #define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets 15 * from page_pool will be 16 * DMA-synced-for-device according to 17 * the length provided by the device 18 * driver. 19 * Please note DMA-sync-for-CPU is still 20 * device driver responsibility 21 */ 22 #define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */ 23 24 /* Allow unreadable (net_iov backed) netmem in this page_pool. Drivers setting 25 * this must be able to support unreadable netmem, where netmem_address() would 26 * return NULL. This flag should not be set for header page_pools. 27 * 28 * If the driver sets PP_FLAG_ALLOW_UNREADABLE_NETMEM, it should also set 29 * page_pool_params.slow.queue_idx. 30 */ 31 #define PP_FLAG_ALLOW_UNREADABLE_NETMEM BIT(3) 32 33 #define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \ 34 PP_FLAG_SYSTEM_POOL | PP_FLAG_ALLOW_UNREADABLE_NETMEM) 35 36 /* 37 * Fast allocation side cache array/stack 38 * 39 * The cache size and refill watermark is related to the network 40 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX 41 * ring is usually refilled and the max consumed elements will be 64, 42 * thus a natural max size of objects needed in the cache. 43 * 44 * Keeping room for more objects, is due to XDP_DROP use-case. As 45 * XDP_DROP allows the opportunity to recycle objects directly into 46 * this array, as it shares the same softirq/NAPI protection. If 47 * cache is already full (or partly full) then the XDP_DROP recycles 48 * would have to take a slower code path. 49 */ 50 #define PP_ALLOC_CACHE_SIZE 128 51 #define PP_ALLOC_CACHE_REFILL 64 52 struct pp_alloc_cache { 53 u32 count; 54 netmem_ref cache[PP_ALLOC_CACHE_SIZE]; 55 }; 56 57 /** 58 * struct page_pool_params - page pool parameters 59 * @fast: params accessed frequently on hotpath 60 * @order: 2^order pages on allocation 61 * @pool_size: size of the ptr_ring 62 * @nid: NUMA node id to allocate from pages from 63 * @dev: device, for DMA pre-mapping purposes 64 * @napi: NAPI which is the sole consumer of pages, otherwise NULL 65 * @dma_dir: DMA mapping direction 66 * @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV 67 * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV 68 * @slow: params with slowpath access only (initialization and Netlink) 69 * @netdev: netdev this pool will serve (leave as NULL if none or multiple) 70 * @queue_idx: queue idx this page_pool is being created for. 71 * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL, 72 * PP_FLAG_ALLOW_UNREADABLE_NETMEM. 73 */ 74 struct page_pool_params { 75 struct_group_tagged(page_pool_params_fast, fast, 76 unsigned int order; 77 unsigned int pool_size; 78 int nid; 79 struct device *dev; 80 struct napi_struct *napi; 81 enum dma_data_direction dma_dir; 82 unsigned int max_len; 83 unsigned int offset; 84 ); 85 struct_group_tagged(page_pool_params_slow, slow, 86 struct net_device *netdev; 87 unsigned int queue_idx; 88 unsigned int flags; 89 /* private: used by test code only */ 90 void (*init_callback)(netmem_ref netmem, void *arg); 91 void *init_arg; 92 ); 93 }; 94 95 #ifdef CONFIG_PAGE_POOL_STATS 96 /** 97 * struct page_pool_alloc_stats - allocation statistics 98 * @fast: successful fast path allocations 99 * @slow: slow path order-0 allocations 100 * @slow_high_order: slow path high order allocations 101 * @empty: ptr ring is empty, so a slow path allocation was forced 102 * @refill: an allocation which triggered a refill of the cache 103 * @waive: pages obtained from the ptr ring that cannot be added to 104 * the cache due to a NUMA mismatch 105 */ 106 struct page_pool_alloc_stats { 107 u64 fast; 108 u64 slow; 109 u64 slow_high_order; 110 u64 empty; 111 u64 refill; 112 u64 waive; 113 }; 114 115 /** 116 * struct page_pool_recycle_stats - recycling (freeing) statistics 117 * @cached: recycling placed page in the page pool cache 118 * @cache_full: page pool cache was full 119 * @ring: page placed into the ptr ring 120 * @ring_full: page released from page pool because the ptr ring was full 121 * @released_refcnt: page released (and not recycled) because refcnt > 1 122 */ 123 struct page_pool_recycle_stats { 124 u64 cached; 125 u64 cache_full; 126 u64 ring; 127 u64 ring_full; 128 u64 released_refcnt; 129 }; 130 131 /** 132 * struct page_pool_stats - combined page pool use statistics 133 * @alloc_stats: see struct page_pool_alloc_stats 134 * @recycle_stats: see struct page_pool_recycle_stats 135 * 136 * Wrapper struct for combining page pool stats with different storage 137 * requirements. 138 */ 139 struct page_pool_stats { 140 struct page_pool_alloc_stats alloc_stats; 141 struct page_pool_recycle_stats recycle_stats; 142 }; 143 #endif 144 145 /* The whole frag API block must stay within one cacheline. On 32-bit systems, 146 * sizeof(long) == sizeof(int), so that the block size is ``3 * sizeof(long)``. 147 * On 64-bit systems, the actual size is ``2 * sizeof(long) + sizeof(int)``. 148 * The closest pow-2 to both of them is ``4 * sizeof(long)``, so just use that 149 * one for simplicity. 150 * Having it aligned to a cacheline boundary may be excessive and doesn't bring 151 * any good. 152 */ 153 #define PAGE_POOL_FRAG_GROUP_ALIGN (4 * sizeof(long)) 154 155 struct memory_provider_ops; 156 157 struct pp_memory_provider_params { 158 void *mp_priv; 159 const struct memory_provider_ops *mp_ops; 160 }; 161 162 struct page_pool { 163 struct page_pool_params_fast p; 164 165 int cpuid; 166 u32 pages_state_hold_cnt; 167 168 bool has_init_callback:1; /* slow::init_callback is set */ 169 bool dma_map:1; /* Perform DMA mapping */ 170 bool dma_sync:1; /* Perform DMA sync for device */ 171 bool dma_sync_for_cpu:1; /* Perform DMA sync for cpu */ 172 #ifdef CONFIG_PAGE_POOL_STATS 173 bool system:1; /* This is a global percpu pool */ 174 #endif 175 176 __cacheline_group_begin_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN); 177 long frag_users; 178 netmem_ref frag_page; 179 unsigned int frag_offset; 180 __cacheline_group_end_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN); 181 182 struct delayed_work release_dw; 183 void (*disconnect)(void *pool); 184 unsigned long defer_start; 185 unsigned long defer_warn; 186 187 #ifdef CONFIG_PAGE_POOL_STATS 188 /* these stats are incremented while in softirq context */ 189 struct page_pool_alloc_stats alloc_stats; 190 #endif 191 u32 xdp_mem_id; 192 193 /* 194 * Data structure for allocation side 195 * 196 * Drivers allocation side usually already perform some kind 197 * of resource protection. Piggyback on this protection, and 198 * require driver to protect allocation side. 199 * 200 * For NIC drivers this means, allocate a page_pool per 201 * RX-queue. As the RX-queue is already protected by 202 * Softirq/BH scheduling and napi_schedule. NAPI schedule 203 * guarantee that a single napi_struct will only be scheduled 204 * on a single CPU (see napi_schedule). 205 */ 206 struct pp_alloc_cache alloc ____cacheline_aligned_in_smp; 207 208 /* Data structure for storing recycled pages. 209 * 210 * Returning/freeing pages is more complicated synchronization 211 * wise, because free's can happen on remote CPUs, with no 212 * association with allocation resource. 213 * 214 * Use ptr_ring, as it separates consumer and producer 215 * efficiently, it a way that doesn't bounce cache-lines. 216 * 217 * TODO: Implement bulk return pages into this structure. 218 */ 219 struct ptr_ring ring; 220 221 void *mp_priv; 222 const struct memory_provider_ops *mp_ops; 223 224 #ifdef CONFIG_PAGE_POOL_STATS 225 /* recycle stats are per-cpu to avoid locking */ 226 struct page_pool_recycle_stats __percpu *recycle_stats; 227 #endif 228 atomic_t pages_state_release_cnt; 229 230 /* A page_pool is strictly tied to a single RX-queue being 231 * protected by NAPI, due to above pp_alloc_cache. This 232 * refcnt serves purpose is to simplify drivers error handling. 233 */ 234 refcount_t user_cnt; 235 236 u64 destroy_cnt; 237 238 /* Slow/Control-path information follows */ 239 struct page_pool_params_slow slow; 240 /* User-facing fields, protected by page_pools_lock */ 241 struct { 242 struct hlist_node list; 243 u64 detach_time; 244 u32 id; 245 } user; 246 }; 247 248 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); 249 netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp); 250 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, 251 unsigned int size, gfp_t gfp); 252 netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool, 253 unsigned int *offset, unsigned int size, 254 gfp_t gfp); 255 struct page_pool *page_pool_create(const struct page_pool_params *params); 256 struct page_pool *page_pool_create_percpu(const struct page_pool_params *params, 257 int cpuid); 258 259 struct xdp_mem_info; 260 261 #ifdef CONFIG_PAGE_POOL 262 void page_pool_disable_direct_recycling(struct page_pool *pool); 263 void page_pool_destroy(struct page_pool *pool); 264 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), 265 const struct xdp_mem_info *mem); 266 void page_pool_put_netmem_bulk(netmem_ref *data, u32 count); 267 #else 268 static inline void page_pool_destroy(struct page_pool *pool) 269 { 270 } 271 272 static inline void page_pool_use_xdp_mem(struct page_pool *pool, 273 void (*disconnect)(void *), 274 const struct xdp_mem_info *mem) 275 { 276 } 277 278 static inline void page_pool_put_netmem_bulk(netmem_ref *data, u32 count) 279 { 280 } 281 #endif 282 283 void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem, 284 unsigned int dma_sync_size, 285 bool allow_direct); 286 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, 287 unsigned int dma_sync_size, 288 bool allow_direct); 289 290 static inline bool is_page_pool_compiled_in(void) 291 { 292 #ifdef CONFIG_PAGE_POOL 293 return true; 294 #else 295 return false; 296 #endif 297 } 298 299 /* Caller must provide appropriate safe context, e.g. NAPI. */ 300 void page_pool_update_nid(struct page_pool *pool, int new_nid); 301 302 #endif /* _NET_PAGE_POOL_H */ 303