1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * page_pool/helpers.h 4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> 5 * Copyright (C) 2016 Red Hat, Inc. 6 */ 7 8 /** 9 * DOC: page_pool allocator 10 * 11 * The page_pool allocator is optimized for recycling page or page fragment used 12 * by skb packet and xdp frame. 13 * 14 * Basic use involves replacing any alloc_pages() calls with page_pool_alloc(), 15 * which allocate memory with or without page splitting depending on the 16 * requested memory size. 17 * 18 * If the driver knows that it always requires full pages or its allocations are 19 * always smaller than half a page, it can use one of the more specific API 20 * calls: 21 * 22 * 1. page_pool_alloc_pages(): allocate memory without page splitting when 23 * driver knows that the memory it need is always bigger than half of the page 24 * allocated from page pool. There is no cache line dirtying for 'struct page' 25 * when a page is recycled back to the page pool. 26 * 27 * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver 28 * knows that the memory it need is always smaller than or equal to half of the 29 * page allocated from page pool. Page splitting enables memory saving and thus 30 * avoids TLB/cache miss for data access, but there also is some cost to 31 * implement page splitting, mainly some cache line dirtying/bouncing for 32 * 'struct page' and atomic operation for page->pp_ref_count. 33 * 34 * The API keeps track of in-flight pages, in order to let API users know when 35 * it is safe to free a page_pool object, the API users must call 36 * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or 37 * attach the page_pool object to a page_pool-aware object like skbs marked with 38 * skb_mark_for_recycle(). 39 * 40 * page_pool_put_page() may be called multiple times on the same page if a page 41 * is split into multiple fragments. For the last fragment, it will either 42 * recycle the page, or in case of page->_refcount > 1, it will release the DMA 43 * mapping and in-flight state accounting. 44 * 45 * dma_sync_single_range_for_device() is only called for the last fragment when 46 * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the 47 * last freed fragment to do the sync_for_device operation for all fragments in 48 * the same page when a page is split. The API user must setup pool->p.max_len 49 * and pool->p.offset correctly and ensure that page_pool_put_page() is called 50 * with dma_sync_size being -1 for fragment API. 51 */ 52 #ifndef _NET_PAGE_POOL_HELPERS_H 53 #define _NET_PAGE_POOL_HELPERS_H 54 55 #include <linux/dma-mapping.h> 56 57 #include <net/page_pool/types.h> 58 59 #ifdef CONFIG_PAGE_POOL_STATS 60 /* Deprecated driver-facing API, use netlink instead */ 61 int page_pool_ethtool_stats_get_count(void); 62 u8 *page_pool_ethtool_stats_get_strings(u8 *data); 63 u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats); 64 65 bool page_pool_get_stats(const struct page_pool *pool, 66 struct page_pool_stats *stats); 67 #else 68 static inline int page_pool_ethtool_stats_get_count(void) 69 { 70 return 0; 71 } 72 73 static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) 74 { 75 return data; 76 } 77 78 static inline u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats) 79 { 80 return data; 81 } 82 #endif 83 84 /** 85 * page_pool_dev_alloc_pages() - allocate a page. 86 * @pool: pool from which to allocate 87 * 88 * Get a page from the page allocator or page_pool caches. 89 */ 90 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) 91 { 92 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 93 94 return page_pool_alloc_pages(pool, gfp); 95 } 96 97 /** 98 * page_pool_dev_alloc_frag() - allocate a page fragment. 99 * @pool: pool from which to allocate 100 * @offset: offset to the allocated page 101 * @size: requested size 102 * 103 * Get a page fragment from the page allocator or page_pool caches. 104 * 105 * Return: 106 * Return allocated page fragment, otherwise return NULL. 107 */ 108 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, 109 unsigned int *offset, 110 unsigned int size) 111 { 112 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 113 114 return page_pool_alloc_frag(pool, offset, size, gfp); 115 } 116 117 static inline struct page *page_pool_alloc(struct page_pool *pool, 118 unsigned int *offset, 119 unsigned int *size, gfp_t gfp) 120 { 121 unsigned int max_size = PAGE_SIZE << pool->p.order; 122 struct page *page; 123 124 if ((*size << 1) > max_size) { 125 *size = max_size; 126 *offset = 0; 127 return page_pool_alloc_pages(pool, gfp); 128 } 129 130 page = page_pool_alloc_frag(pool, offset, *size, gfp); 131 if (unlikely(!page)) 132 return NULL; 133 134 /* There is very likely not enough space for another fragment, so append 135 * the remaining size to the current fragment to avoid truesize 136 * underestimate problem. 137 */ 138 if (pool->frag_offset + *size > max_size) { 139 *size = max_size - *offset; 140 pool->frag_offset = max_size; 141 } 142 143 return page; 144 } 145 146 /** 147 * page_pool_dev_alloc() - allocate a page or a page fragment. 148 * @pool: pool from which to allocate 149 * @offset: offset to the allocated page 150 * @size: in as the requested size, out as the allocated size 151 * 152 * Get a page or a page fragment from the page allocator or page_pool caches 153 * depending on the requested size in order to allocate memory with least memory 154 * utilization and performance penalty. 155 * 156 * Return: 157 * Return allocated page or page fragment, otherwise return NULL. 158 */ 159 static inline struct page *page_pool_dev_alloc(struct page_pool *pool, 160 unsigned int *offset, 161 unsigned int *size) 162 { 163 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 164 165 return page_pool_alloc(pool, offset, size, gfp); 166 } 167 168 static inline void *page_pool_alloc_va(struct page_pool *pool, 169 unsigned int *size, gfp_t gfp) 170 { 171 unsigned int offset; 172 struct page *page; 173 174 /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */ 175 page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM); 176 if (unlikely(!page)) 177 return NULL; 178 179 return page_address(page) + offset; 180 } 181 182 /** 183 * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its 184 * va. 185 * @pool: pool from which to allocate 186 * @size: in as the requested size, out as the allocated size 187 * 188 * This is just a thin wrapper around the page_pool_alloc() API, and 189 * it returns va of the allocated page or page fragment. 190 * 191 * Return: 192 * Return the va for the allocated page or page fragment, otherwise return NULL. 193 */ 194 static inline void *page_pool_dev_alloc_va(struct page_pool *pool, 195 unsigned int *size) 196 { 197 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 198 199 return page_pool_alloc_va(pool, size, gfp); 200 } 201 202 /** 203 * page_pool_get_dma_dir() - Retrieve the stored DMA direction. 204 * @pool: pool from which page was allocated 205 * 206 * Get the stored dma direction. A driver might decide to store this locally 207 * and avoid the extra cache line from page_pool to determine the direction. 208 */ 209 static inline enum dma_data_direction 210 page_pool_get_dma_dir(const struct page_pool *pool) 211 { 212 return pool->p.dma_dir; 213 } 214 215 /** 216 * page_pool_fragment_page() - split a fresh page into fragments 217 * @page: page to split 218 * @nr: references to set 219 * 220 * pp_ref_count represents the number of outstanding references to the page, 221 * which will be freed using page_pool APIs (rather than page allocator APIs 222 * like put_page()). Such references are usually held by page_pool-aware 223 * objects like skbs marked for page pool recycling. 224 * 225 * This helper allows the caller to take (set) multiple references to a 226 * freshly allocated page. The page must be freshly allocated (have a 227 * pp_ref_count of 1). This is commonly done by drivers and 228 * "fragment allocators" to save atomic operations - either when they know 229 * upfront how many references they will need; or to take MAX references and 230 * return the unused ones with a single atomic dec(), instead of performing 231 * multiple atomic inc() operations. 232 */ 233 static inline void page_pool_fragment_page(struct page *page, long nr) 234 { 235 atomic_long_set(&page->pp_ref_count, nr); 236 } 237 238 static inline long page_pool_unref_page(struct page *page, long nr) 239 { 240 long ret; 241 242 /* If nr == pp_ref_count then we have cleared all remaining 243 * references to the page: 244 * 1. 'n == 1': no need to actually overwrite it. 245 * 2. 'n != 1': overwrite it with one, which is the rare case 246 * for pp_ref_count draining. 247 * 248 * The main advantage to doing this is that not only we avoid a atomic 249 * update, as an atomic_read is generally a much cheaper operation than 250 * an atomic update, especially when dealing with a page that may be 251 * referenced by only 2 or 3 users; but also unify the pp_ref_count 252 * handling by ensuring all pages have partitioned into only 1 piece 253 * initially, and only overwrite it when the page is partitioned into 254 * more than one piece. 255 */ 256 if (atomic_long_read(&page->pp_ref_count) == nr) { 257 /* As we have ensured nr is always one for constant case using 258 * the BUILD_BUG_ON(), only need to handle the non-constant case 259 * here for pp_ref_count draining, which is a rare case. 260 */ 261 BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1); 262 if (!__builtin_constant_p(nr)) 263 atomic_long_set(&page->pp_ref_count, 1); 264 265 return 0; 266 } 267 268 ret = atomic_long_sub_return(nr, &page->pp_ref_count); 269 WARN_ON(ret < 0); 270 271 /* We are the last user here too, reset pp_ref_count back to 1 to 272 * ensure all pages have been partitioned into 1 piece initially, 273 * this should be the rare case when the last two fragment users call 274 * page_pool_unref_page() currently. 275 */ 276 if (unlikely(!ret)) 277 atomic_long_set(&page->pp_ref_count, 1); 278 279 return ret; 280 } 281 282 static inline void page_pool_ref_page(struct page *page) 283 { 284 atomic_long_inc(&page->pp_ref_count); 285 } 286 287 static inline bool page_pool_is_last_ref(struct page *page) 288 { 289 /* If page_pool_unref_page() returns 0, we were the last user */ 290 return page_pool_unref_page(page, 1) == 0; 291 } 292 293 /** 294 * page_pool_put_page() - release a reference to a page pool page 295 * @pool: pool from which page was allocated 296 * @page: page to release a reference on 297 * @dma_sync_size: how much of the page may have been touched by the device 298 * @allow_direct: released by the consumer, allow lockless caching 299 * 300 * The outcome of this depends on the page refcnt. If the driver bumps 301 * the refcnt > 1 this will unmap the page. If the page refcnt is 1 302 * the allocator owns the page and will try to recycle it in one of the pool 303 * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device 304 * using dma_sync_single_range_for_device(). 305 */ 306 static inline void page_pool_put_page(struct page_pool *pool, 307 struct page *page, 308 unsigned int dma_sync_size, 309 bool allow_direct) 310 { 311 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't 312 * allow registering MEM_TYPE_PAGE_POOL, but shield linker. 313 */ 314 #ifdef CONFIG_PAGE_POOL 315 if (!page_pool_is_last_ref(page)) 316 return; 317 318 page_pool_put_unrefed_page(pool, page, dma_sync_size, allow_direct); 319 #endif 320 } 321 322 /** 323 * page_pool_put_full_page() - release a reference on a page pool page 324 * @pool: pool from which page was allocated 325 * @page: page to release a reference on 326 * @allow_direct: released by the consumer, allow lockless caching 327 * 328 * Similar to page_pool_put_page(), but will DMA sync the entire memory area 329 * as configured in &page_pool_params.max_len. 330 */ 331 static inline void page_pool_put_full_page(struct page_pool *pool, 332 struct page *page, bool allow_direct) 333 { 334 page_pool_put_page(pool, page, -1, allow_direct); 335 } 336 337 /** 338 * page_pool_recycle_direct() - release a reference on a page pool page 339 * @pool: pool from which page was allocated 340 * @page: page to release a reference on 341 * 342 * Similar to page_pool_put_full_page() but caller must guarantee safe context 343 * (e.g NAPI), since it will recycle the page directly into the pool fast cache. 344 */ 345 static inline void page_pool_recycle_direct(struct page_pool *pool, 346 struct page *page) 347 { 348 page_pool_put_full_page(pool, page, true); 349 } 350 351 #define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \ 352 (sizeof(dma_addr_t) > sizeof(unsigned long)) 353 354 /** 355 * page_pool_free_va() - free a va into the page_pool 356 * @pool: pool from which va was allocated 357 * @va: va to be freed 358 * @allow_direct: freed by the consumer, allow lockless caching 359 * 360 * Free a va allocated from page_pool_allo_va(). 361 */ 362 static inline void page_pool_free_va(struct page_pool *pool, void *va, 363 bool allow_direct) 364 { 365 page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct); 366 } 367 368 /** 369 * page_pool_get_dma_addr() - Retrieve the stored DMA address. 370 * @page: page allocated from a page pool 371 * 372 * Fetch the DMA address of the page. The page pool to which the page belongs 373 * must had been created with PP_FLAG_DMA_MAP. 374 */ 375 static inline dma_addr_t page_pool_get_dma_addr(const struct page *page) 376 { 377 dma_addr_t ret = page->dma_addr; 378 379 if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) 380 ret <<= PAGE_SHIFT; 381 382 return ret; 383 } 384 385 static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr) 386 { 387 if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) { 388 page->dma_addr = addr >> PAGE_SHIFT; 389 390 /* We assume page alignment to shave off bottom bits, 391 * if this "compression" doesn't work we need to drop. 392 */ 393 return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT; 394 } 395 396 page->dma_addr = addr; 397 return false; 398 } 399 400 /** 401 * page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW 402 * @pool: &page_pool the @page belongs to 403 * @page: page to sync 404 * @offset: offset from page start to "hard" start if using PP frags 405 * @dma_sync_size: size of the data written to the page 406 * 407 * Can be used as a shorthand to sync Rx pages before accessing them in the 408 * driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``. 409 * Note that this version performs DMA sync unconditionally, even if the 410 * associated PP doesn't perform sync-for-device. 411 */ 412 static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool, 413 const struct page *page, 414 u32 offset, u32 dma_sync_size) 415 { 416 dma_sync_single_range_for_cpu(pool->p.dev, 417 page_pool_get_dma_addr(page), 418 offset + pool->p.offset, dma_sync_size, 419 page_pool_get_dma_dir(pool)); 420 } 421 422 static inline bool page_pool_put(struct page_pool *pool) 423 { 424 return refcount_dec_and_test(&pool->user_cnt); 425 } 426 427 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) 428 { 429 if (unlikely(pool->p.nid != new_nid)) 430 page_pool_update_nid(pool, new_nid); 431 } 432 433 #endif /* _NET_PAGE_POOL_HELPERS_H */ 434