1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * page_pool/helpers.h 4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> 5 * Copyright (C) 2016 Red Hat, Inc. 6 */ 7 8 /** 9 * DOC: page_pool allocator 10 * 11 * The page_pool allocator is optimized for recycling page or page fragment used 12 * by skb packet and xdp frame. 13 * 14 * Basic use involves replacing any alloc_pages() calls with page_pool_alloc(), 15 * which allocate memory with or without page splitting depending on the 16 * requested memory size. 17 * 18 * If the driver knows that it always requires full pages or its allocations are 19 * always smaller than half a page, it can use one of the more specific API 20 * calls: 21 * 22 * 1. page_pool_alloc_pages(): allocate memory without page splitting when 23 * driver knows that the memory it need is always bigger than half of the page 24 * allocated from page pool. There is no cache line dirtying for 'struct page' 25 * when a page is recycled back to the page pool. 26 * 27 * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver 28 * knows that the memory it need is always smaller than or equal to half of the 29 * page allocated from page pool. Page splitting enables memory saving and thus 30 * avoids TLB/cache miss for data access, but there also is some cost to 31 * implement page splitting, mainly some cache line dirtying/bouncing for 32 * 'struct page' and atomic operation for page->pp_ref_count. 33 * 34 * The API keeps track of in-flight pages, in order to let API users know when 35 * it is safe to free a page_pool object, the API users must call 36 * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or 37 * attach the page_pool object to a page_pool-aware object like skbs marked with 38 * skb_mark_for_recycle(). 39 * 40 * page_pool_put_page() may be called multiple times on the same page if a page 41 * is split into multiple fragments. For the last fragment, it will either 42 * recycle the page, or in case of page->_refcount > 1, it will release the DMA 43 * mapping and in-flight state accounting. 44 * 45 * dma_sync_single_range_for_device() is only called for the last fragment when 46 * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the 47 * last freed fragment to do the sync_for_device operation for all fragments in 48 * the same page when a page is split. The API user must setup pool->p.max_len 49 * and pool->p.offset correctly and ensure that page_pool_put_page() is called 50 * with dma_sync_size being -1 for fragment API. 51 */ 52 #ifndef _NET_PAGE_POOL_HELPERS_H 53 #define _NET_PAGE_POOL_HELPERS_H 54 55 #include <linux/dma-mapping.h> 56 57 #include <net/page_pool/types.h> 58 #include <net/net_debug.h> 59 #include <net/netmem.h> 60 61 #ifdef CONFIG_PAGE_POOL_STATS 62 /* Deprecated driver-facing API, use netlink instead */ 63 int page_pool_ethtool_stats_get_count(void); 64 u8 *page_pool_ethtool_stats_get_strings(u8 *data); 65 u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats); 66 67 bool page_pool_get_stats(const struct page_pool *pool, 68 struct page_pool_stats *stats); 69 #else 70 static inline int page_pool_ethtool_stats_get_count(void) 71 { 72 return 0; 73 } 74 75 static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) 76 { 77 return data; 78 } 79 80 static inline u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats) 81 { 82 return data; 83 } 84 #endif 85 86 /** 87 * page_pool_dev_alloc_pages() - allocate a page. 88 * @pool: pool from which to allocate 89 * 90 * Get a page from the page allocator or page_pool caches. 91 */ 92 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) 93 { 94 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 95 96 return page_pool_alloc_pages(pool, gfp); 97 } 98 99 /** 100 * page_pool_dev_alloc_frag() - allocate a page fragment. 101 * @pool: pool from which to allocate 102 * @offset: offset to the allocated page 103 * @size: requested size 104 * 105 * Get a page fragment from the page allocator or page_pool caches. 106 * 107 * Return: allocated page fragment, otherwise return NULL. 108 */ 109 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, 110 unsigned int *offset, 111 unsigned int size) 112 { 113 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 114 115 return page_pool_alloc_frag(pool, offset, size, gfp); 116 } 117 118 static inline struct page *page_pool_alloc(struct page_pool *pool, 119 unsigned int *offset, 120 unsigned int *size, gfp_t gfp) 121 { 122 unsigned int max_size = PAGE_SIZE << pool->p.order; 123 struct page *page; 124 125 if ((*size << 1) > max_size) { 126 *size = max_size; 127 *offset = 0; 128 return page_pool_alloc_pages(pool, gfp); 129 } 130 131 page = page_pool_alloc_frag(pool, offset, *size, gfp); 132 if (unlikely(!page)) 133 return NULL; 134 135 /* There is very likely not enough space for another fragment, so append 136 * the remaining size to the current fragment to avoid truesize 137 * underestimate problem. 138 */ 139 if (pool->frag_offset + *size > max_size) { 140 *size = max_size - *offset; 141 pool->frag_offset = max_size; 142 } 143 144 return page; 145 } 146 147 /** 148 * page_pool_dev_alloc() - allocate a page or a page fragment. 149 * @pool: pool from which to allocate 150 * @offset: offset to the allocated page 151 * @size: in as the requested size, out as the allocated size 152 * 153 * Get a page or a page fragment from the page allocator or page_pool caches 154 * depending on the requested size in order to allocate memory with least memory 155 * utilization and performance penalty. 156 * 157 * Return: allocated page or page fragment, otherwise return NULL. 158 */ 159 static inline struct page *page_pool_dev_alloc(struct page_pool *pool, 160 unsigned int *offset, 161 unsigned int *size) 162 { 163 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 164 165 return page_pool_alloc(pool, offset, size, gfp); 166 } 167 168 static inline void *page_pool_alloc_va(struct page_pool *pool, 169 unsigned int *size, gfp_t gfp) 170 { 171 unsigned int offset; 172 struct page *page; 173 174 /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */ 175 page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM); 176 if (unlikely(!page)) 177 return NULL; 178 179 return page_address(page) + offset; 180 } 181 182 /** 183 * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its 184 * va. 185 * @pool: pool from which to allocate 186 * @size: in as the requested size, out as the allocated size 187 * 188 * This is just a thin wrapper around the page_pool_alloc() API, and 189 * it returns va of the allocated page or page fragment. 190 * 191 * Return: the va for the allocated page or page fragment, otherwise return NULL. 192 */ 193 static inline void *page_pool_dev_alloc_va(struct page_pool *pool, 194 unsigned int *size) 195 { 196 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 197 198 return page_pool_alloc_va(pool, size, gfp); 199 } 200 201 /** 202 * page_pool_get_dma_dir() - Retrieve the stored DMA direction. 203 * @pool: pool from which page was allocated 204 * 205 * Get the stored dma direction. A driver might decide to store this locally 206 * and avoid the extra cache line from page_pool to determine the direction. 207 */ 208 static inline enum dma_data_direction 209 page_pool_get_dma_dir(const struct page_pool *pool) 210 { 211 return pool->p.dma_dir; 212 } 213 214 static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr) 215 { 216 atomic_long_set(netmem_get_pp_ref_count_ref(netmem), nr); 217 } 218 219 /** 220 * page_pool_fragment_page() - split a fresh page into fragments 221 * @page: page to split 222 * @nr: references to set 223 * 224 * pp_ref_count represents the number of outstanding references to the page, 225 * which will be freed using page_pool APIs (rather than page allocator APIs 226 * like put_page()). Such references are usually held by page_pool-aware 227 * objects like skbs marked for page pool recycling. 228 * 229 * This helper allows the caller to take (set) multiple references to a 230 * freshly allocated page. The page must be freshly allocated (have a 231 * pp_ref_count of 1). This is commonly done by drivers and 232 * "fragment allocators" to save atomic operations - either when they know 233 * upfront how many references they will need; or to take MAX references and 234 * return the unused ones with a single atomic dec(), instead of performing 235 * multiple atomic inc() operations. 236 */ 237 static inline void page_pool_fragment_page(struct page *page, long nr) 238 { 239 page_pool_fragment_netmem(page_to_netmem(page), nr); 240 } 241 242 static inline long page_pool_unref_netmem(netmem_ref netmem, long nr) 243 { 244 atomic_long_t *pp_ref_count = netmem_get_pp_ref_count_ref(netmem); 245 long ret; 246 247 /* If nr == pp_ref_count then we have cleared all remaining 248 * references to the page: 249 * 1. 'n == 1': no need to actually overwrite it. 250 * 2. 'n != 1': overwrite it with one, which is the rare case 251 * for pp_ref_count draining. 252 * 253 * The main advantage to doing this is that not only we avoid a atomic 254 * update, as an atomic_read is generally a much cheaper operation than 255 * an atomic update, especially when dealing with a page that may be 256 * referenced by only 2 or 3 users; but also unify the pp_ref_count 257 * handling by ensuring all pages have partitioned into only 1 piece 258 * initially, and only overwrite it when the page is partitioned into 259 * more than one piece. 260 */ 261 if (atomic_long_read(pp_ref_count) == nr) { 262 /* As we have ensured nr is always one for constant case using 263 * the BUILD_BUG_ON(), only need to handle the non-constant case 264 * here for pp_ref_count draining, which is a rare case. 265 */ 266 BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1); 267 if (!__builtin_constant_p(nr)) 268 atomic_long_set(pp_ref_count, 1); 269 270 return 0; 271 } 272 273 ret = atomic_long_sub_return(nr, pp_ref_count); 274 WARN_ON(ret < 0); 275 276 /* We are the last user here too, reset pp_ref_count back to 1 to 277 * ensure all pages have been partitioned into 1 piece initially, 278 * this should be the rare case when the last two fragment users call 279 * page_pool_unref_page() currently. 280 */ 281 if (unlikely(!ret)) 282 atomic_long_set(pp_ref_count, 1); 283 284 return ret; 285 } 286 287 static inline long page_pool_unref_page(struct page *page, long nr) 288 { 289 return page_pool_unref_netmem(page_to_netmem(page), nr); 290 } 291 292 static inline void page_pool_ref_netmem(netmem_ref netmem) 293 { 294 atomic_long_inc(&netmem_to_page(netmem)->pp_ref_count); 295 } 296 297 static inline void page_pool_ref_page(struct page *page) 298 { 299 page_pool_ref_netmem(page_to_netmem(page)); 300 } 301 302 static inline bool page_pool_is_last_ref(netmem_ref netmem) 303 { 304 /* If page_pool_unref_page() returns 0, we were the last user */ 305 return page_pool_unref_netmem(netmem, 1) == 0; 306 } 307 308 static inline void page_pool_put_netmem(struct page_pool *pool, 309 netmem_ref netmem, 310 unsigned int dma_sync_size, 311 bool allow_direct) 312 { 313 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't 314 * allow registering MEM_TYPE_PAGE_POOL, but shield linker. 315 */ 316 #ifdef CONFIG_PAGE_POOL 317 if (!page_pool_is_last_ref(netmem)) 318 return; 319 320 page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct); 321 #endif 322 } 323 324 /** 325 * page_pool_put_page() - release a reference to a page pool page 326 * @pool: pool from which page was allocated 327 * @page: page to release a reference on 328 * @dma_sync_size: how much of the page may have been touched by the device 329 * @allow_direct: released by the consumer, allow lockless caching 330 * 331 * The outcome of this depends on the page refcnt. If the driver bumps 332 * the refcnt > 1 this will unmap the page. If the page refcnt is 1 333 * the allocator owns the page and will try to recycle it in one of the pool 334 * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device 335 * using dma_sync_single_range_for_device(). 336 */ 337 static inline void page_pool_put_page(struct page_pool *pool, 338 struct page *page, 339 unsigned int dma_sync_size, 340 bool allow_direct) 341 { 342 page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size, 343 allow_direct); 344 } 345 346 static inline void page_pool_put_full_netmem(struct page_pool *pool, 347 netmem_ref netmem, 348 bool allow_direct) 349 { 350 page_pool_put_netmem(pool, netmem, -1, allow_direct); 351 } 352 353 /** 354 * page_pool_put_full_page() - release a reference on a page pool page 355 * @pool: pool from which page was allocated 356 * @page: page to release a reference on 357 * @allow_direct: released by the consumer, allow lockless caching 358 * 359 * Similar to page_pool_put_page(), but will DMA sync the entire memory area 360 * as configured in &page_pool_params.max_len. 361 */ 362 static inline void page_pool_put_full_page(struct page_pool *pool, 363 struct page *page, bool allow_direct) 364 { 365 page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct); 366 } 367 368 /** 369 * page_pool_recycle_direct() - release a reference on a page pool page 370 * @pool: pool from which page was allocated 371 * @page: page to release a reference on 372 * 373 * Similar to page_pool_put_full_page() but caller must guarantee safe context 374 * (e.g NAPI), since it will recycle the page directly into the pool fast cache. 375 */ 376 static inline void page_pool_recycle_direct(struct page_pool *pool, 377 struct page *page) 378 { 379 page_pool_put_full_page(pool, page, true); 380 } 381 382 #define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \ 383 (sizeof(dma_addr_t) > sizeof(unsigned long)) 384 385 /** 386 * page_pool_free_va() - free a va into the page_pool 387 * @pool: pool from which va was allocated 388 * @va: va to be freed 389 * @allow_direct: freed by the consumer, allow lockless caching 390 * 391 * Free a va allocated from page_pool_allo_va(). 392 */ 393 static inline void page_pool_free_va(struct page_pool *pool, void *va, 394 bool allow_direct) 395 { 396 page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct); 397 } 398 399 static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem) 400 { 401 dma_addr_t ret = netmem_get_dma_addr(netmem); 402 403 if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) 404 ret <<= PAGE_SHIFT; 405 406 return ret; 407 } 408 409 /** 410 * page_pool_get_dma_addr() - Retrieve the stored DMA address. 411 * @page: page allocated from a page pool 412 * 413 * Fetch the DMA address of the page. The page pool to which the page belongs 414 * must had been created with PP_FLAG_DMA_MAP. 415 */ 416 static inline dma_addr_t page_pool_get_dma_addr(const struct page *page) 417 { 418 return page_pool_get_dma_addr_netmem(page_to_netmem((struct page *)page)); 419 } 420 421 /** 422 * page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW 423 * @pool: &page_pool the @page belongs to 424 * @page: page to sync 425 * @offset: offset from page start to "hard" start if using PP frags 426 * @dma_sync_size: size of the data written to the page 427 * 428 * Can be used as a shorthand to sync Rx pages before accessing them in the 429 * driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``. 430 * Note that this version performs DMA sync unconditionally, even if the 431 * associated PP doesn't perform sync-for-device. 432 */ 433 static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool, 434 const struct page *page, 435 u32 offset, u32 dma_sync_size) 436 { 437 dma_sync_single_range_for_cpu(pool->p.dev, 438 page_pool_get_dma_addr(page), 439 offset + pool->p.offset, dma_sync_size, 440 page_pool_get_dma_dir(pool)); 441 } 442 443 static inline bool page_pool_put(struct page_pool *pool) 444 { 445 return refcount_dec_and_test(&pool->user_cnt); 446 } 447 448 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) 449 { 450 if (unlikely(pool->p.nid != new_nid)) 451 page_pool_update_nid(pool, new_nid); 452 } 453 454 #endif /* _NET_PAGE_POOL_HELPERS_H */ 455