1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * page_pool/helpers.h 4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> 5 * Copyright (C) 2016 Red Hat, Inc. 6 */ 7 8 /** 9 * DOC: page_pool allocator 10 * 11 * The page_pool allocator is optimized for recycling page or page fragment used 12 * by skb packet and xdp frame. 13 * 14 * Basic use involves replacing and alloc_pages() calls with page_pool_alloc(), 15 * which allocate memory with or without page splitting depending on the 16 * requested memory size. 17 * 18 * If the driver knows that it always requires full pages or its allocations are 19 * always smaller than half a page, it can use one of the more specific API 20 * calls: 21 * 22 * 1. page_pool_alloc_pages(): allocate memory without page splitting when 23 * driver knows that the memory it need is always bigger than half of the page 24 * allocated from page pool. There is no cache line dirtying for 'struct page' 25 * when a page is recycled back to the page pool. 26 * 27 * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver 28 * knows that the memory it need is always smaller than or equal to half of the 29 * page allocated from page pool. Page splitting enables memory saving and thus 30 * avoids TLB/cache miss for data access, but there also is some cost to 31 * implement page splitting, mainly some cache line dirtying/bouncing for 32 * 'struct page' and atomic operation for page->pp_frag_count. 33 * 34 * The API keeps track of in-flight pages, in order to let API users know when 35 * it is safe to free a page_pool object, the API users must call 36 * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or 37 * attach the page_pool object to a page_pool-aware object like skbs marked with 38 * skb_mark_for_recycle(). 39 * 40 * page_pool_put_page() may be called multi times on the same page if a page is 41 * split into multi fragments. For the last fragment, it will either recycle the 42 * page, or in case of page->_refcount > 1, it will release the DMA mapping and 43 * in-flight state accounting. 44 * 45 * dma_sync_single_range_for_device() is only called for the last fragment when 46 * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the 47 * last freed fragment to do the sync_for_device operation for all fragments in 48 * the same page when a page is split, the API user must setup pool->p.max_len 49 * and pool->p.offset correctly and ensure that page_pool_put_page() is called 50 * with dma_sync_size being -1 for fragment API. 51 */ 52 #ifndef _NET_PAGE_POOL_HELPERS_H 53 #define _NET_PAGE_POOL_HELPERS_H 54 55 #include <net/page_pool/types.h> 56 57 #ifdef CONFIG_PAGE_POOL_STATS 58 /* Deprecated driver-facing API, use netlink instead */ 59 int page_pool_ethtool_stats_get_count(void); 60 u8 *page_pool_ethtool_stats_get_strings(u8 *data); 61 u64 *page_pool_ethtool_stats_get(u64 *data, void *stats); 62 63 bool page_pool_get_stats(const struct page_pool *pool, 64 struct page_pool_stats *stats); 65 #else 66 static inline int page_pool_ethtool_stats_get_count(void) 67 { 68 return 0; 69 } 70 71 static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) 72 { 73 return data; 74 } 75 76 static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) 77 { 78 return data; 79 } 80 #endif 81 82 /** 83 * page_pool_dev_alloc_pages() - allocate a page. 84 * @pool: pool from which to allocate 85 * 86 * Get a page from the page allocator or page_pool caches. 87 */ 88 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) 89 { 90 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 91 92 return page_pool_alloc_pages(pool, gfp); 93 } 94 95 /** 96 * page_pool_dev_alloc_frag() - allocate a page fragment. 97 * @pool: pool from which to allocate 98 * @offset: offset to the allocated page 99 * @size: requested size 100 * 101 * Get a page fragment from the page allocator or page_pool caches. 102 * 103 * Return: 104 * Return allocated page fragment, otherwise return NULL. 105 */ 106 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, 107 unsigned int *offset, 108 unsigned int size) 109 { 110 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 111 112 return page_pool_alloc_frag(pool, offset, size, gfp); 113 } 114 115 static inline struct page *page_pool_alloc(struct page_pool *pool, 116 unsigned int *offset, 117 unsigned int *size, gfp_t gfp) 118 { 119 unsigned int max_size = PAGE_SIZE << pool->p.order; 120 struct page *page; 121 122 if ((*size << 1) > max_size) { 123 *size = max_size; 124 *offset = 0; 125 return page_pool_alloc_pages(pool, gfp); 126 } 127 128 page = page_pool_alloc_frag(pool, offset, *size, gfp); 129 if (unlikely(!page)) 130 return NULL; 131 132 /* There is very likely not enough space for another fragment, so append 133 * the remaining size to the current fragment to avoid truesize 134 * underestimate problem. 135 */ 136 if (pool->frag_offset + *size > max_size) { 137 *size = max_size - *offset; 138 pool->frag_offset = max_size; 139 } 140 141 return page; 142 } 143 144 /** 145 * page_pool_dev_alloc() - allocate a page or a page fragment. 146 * @pool: pool from which to allocate 147 * @offset: offset to the allocated page 148 * @size: in as the requested size, out as the allocated size 149 * 150 * Get a page or a page fragment from the page allocator or page_pool caches 151 * depending on the requested size in order to allocate memory with least memory 152 * utilization and performance penalty. 153 * 154 * Return: 155 * Return allocated page or page fragment, otherwise return NULL. 156 */ 157 static inline struct page *page_pool_dev_alloc(struct page_pool *pool, 158 unsigned int *offset, 159 unsigned int *size) 160 { 161 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 162 163 return page_pool_alloc(pool, offset, size, gfp); 164 } 165 166 static inline void *page_pool_alloc_va(struct page_pool *pool, 167 unsigned int *size, gfp_t gfp) 168 { 169 unsigned int offset; 170 struct page *page; 171 172 /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */ 173 page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM); 174 if (unlikely(!page)) 175 return NULL; 176 177 return page_address(page) + offset; 178 } 179 180 /** 181 * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its 182 * va. 183 * @pool: pool from which to allocate 184 * @size: in as the requested size, out as the allocated size 185 * 186 * This is just a thin wrapper around the page_pool_alloc() API, and 187 * it returns va of the allocated page or page fragment. 188 * 189 * Return: 190 * Return the va for the allocated page or page fragment, otherwise return NULL. 191 */ 192 static inline void *page_pool_dev_alloc_va(struct page_pool *pool, 193 unsigned int *size) 194 { 195 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 196 197 return page_pool_alloc_va(pool, size, gfp); 198 } 199 200 /** 201 * page_pool_get_dma_dir() - Retrieve the stored DMA direction. 202 * @pool: pool from which page was allocated 203 * 204 * Get the stored dma direction. A driver might decide to store this locally 205 * and avoid the extra cache line from page_pool to determine the direction. 206 */ 207 static 208 inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) 209 { 210 return pool->p.dma_dir; 211 } 212 213 /* pp_frag_count represents the number of writers who can update the page 214 * either by updating skb->data or via DMA mappings for the device. 215 * We can't rely on the page refcnt for that as we don't know who might be 216 * holding page references and we can't reliably destroy or sync DMA mappings 217 * of the fragments. 218 * 219 * When pp_frag_count reaches 0 we can either recycle the page if the page 220 * refcnt is 1 or return it back to the memory allocator and destroy any 221 * mappings we have. 222 */ 223 static inline void page_pool_fragment_page(struct page *page, long nr) 224 { 225 atomic_long_set(&page->pp_frag_count, nr); 226 } 227 228 static inline long page_pool_defrag_page(struct page *page, long nr) 229 { 230 long ret; 231 232 /* If nr == pp_frag_count then we have cleared all remaining 233 * references to the page: 234 * 1. 'n == 1': no need to actually overwrite it. 235 * 2. 'n != 1': overwrite it with one, which is the rare case 236 * for pp_frag_count draining. 237 * 238 * The main advantage to doing this is that not only we avoid a atomic 239 * update, as an atomic_read is generally a much cheaper operation than 240 * an atomic update, especially when dealing with a page that may be 241 * partitioned into only 2 or 3 pieces; but also unify the pp_frag_count 242 * handling by ensuring all pages have partitioned into only 1 piece 243 * initially, and only overwrite it when the page is partitioned into 244 * more than one piece. 245 */ 246 if (atomic_long_read(&page->pp_frag_count) == nr) { 247 /* As we have ensured nr is always one for constant case using 248 * the BUILD_BUG_ON(), only need to handle the non-constant case 249 * here for pp_frag_count draining, which is a rare case. 250 */ 251 BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1); 252 if (!__builtin_constant_p(nr)) 253 atomic_long_set(&page->pp_frag_count, 1); 254 255 return 0; 256 } 257 258 ret = atomic_long_sub_return(nr, &page->pp_frag_count); 259 WARN_ON(ret < 0); 260 261 /* We are the last user here too, reset pp_frag_count back to 1 to 262 * ensure all pages have been partitioned into 1 piece initially, 263 * this should be the rare case when the last two fragment users call 264 * page_pool_defrag_page() currently. 265 */ 266 if (unlikely(!ret)) 267 atomic_long_set(&page->pp_frag_count, 1); 268 269 return ret; 270 } 271 272 static inline bool page_pool_is_last_frag(struct page *page) 273 { 274 /* If page_pool_defrag_page() returns 0, we were the last user */ 275 return page_pool_defrag_page(page, 1) == 0; 276 } 277 278 /** 279 * page_pool_put_page() - release a reference to a page pool page 280 * @pool: pool from which page was allocated 281 * @page: page to release a reference on 282 * @dma_sync_size: how much of the page may have been touched by the device 283 * @allow_direct: released by the consumer, allow lockless caching 284 * 285 * The outcome of this depends on the page refcnt. If the driver bumps 286 * the refcnt > 1 this will unmap the page. If the page refcnt is 1 287 * the allocator owns the page and will try to recycle it in one of the pool 288 * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device 289 * using dma_sync_single_range_for_device(). 290 */ 291 static inline void page_pool_put_page(struct page_pool *pool, 292 struct page *page, 293 unsigned int dma_sync_size, 294 bool allow_direct) 295 { 296 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't 297 * allow registering MEM_TYPE_PAGE_POOL, but shield linker. 298 */ 299 #ifdef CONFIG_PAGE_POOL 300 if (!page_pool_is_last_frag(page)) 301 return; 302 303 page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct); 304 #endif 305 } 306 307 /** 308 * page_pool_put_full_page() - release a reference on a page pool page 309 * @pool: pool from which page was allocated 310 * @page: page to release a reference on 311 * @allow_direct: released by the consumer, allow lockless caching 312 * 313 * Similar to page_pool_put_page(), but will DMA sync the entire memory area 314 * as configured in &page_pool_params.max_len. 315 */ 316 static inline void page_pool_put_full_page(struct page_pool *pool, 317 struct page *page, bool allow_direct) 318 { 319 page_pool_put_page(pool, page, -1, allow_direct); 320 } 321 322 /** 323 * page_pool_recycle_direct() - release a reference on a page pool page 324 * @pool: pool from which page was allocated 325 * @page: page to release a reference on 326 * 327 * Similar to page_pool_put_full_page() but caller must guarantee safe context 328 * (e.g NAPI), since it will recycle the page directly into the pool fast cache. 329 */ 330 static inline void page_pool_recycle_direct(struct page_pool *pool, 331 struct page *page) 332 { 333 page_pool_put_full_page(pool, page, true); 334 } 335 336 #define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \ 337 (sizeof(dma_addr_t) > sizeof(unsigned long)) 338 339 /** 340 * page_pool_free_va() - free a va into the page_pool 341 * @pool: pool from which va was allocated 342 * @va: va to be freed 343 * @allow_direct: freed by the consumer, allow lockless caching 344 * 345 * Free a va allocated from page_pool_allo_va(). 346 */ 347 static inline void page_pool_free_va(struct page_pool *pool, void *va, 348 bool allow_direct) 349 { 350 page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct); 351 } 352 353 /** 354 * page_pool_get_dma_addr() - Retrieve the stored DMA address. 355 * @page: page allocated from a page pool 356 * 357 * Fetch the DMA address of the page. The page pool to which the page belongs 358 * must had been created with PP_FLAG_DMA_MAP. 359 */ 360 static inline dma_addr_t page_pool_get_dma_addr(struct page *page) 361 { 362 dma_addr_t ret = page->dma_addr; 363 364 if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) 365 ret <<= PAGE_SHIFT; 366 367 return ret; 368 } 369 370 static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr) 371 { 372 if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) { 373 page->dma_addr = addr >> PAGE_SHIFT; 374 375 /* We assume page alignment to shave off bottom bits, 376 * if this "compression" doesn't work we need to drop. 377 */ 378 return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT; 379 } 380 381 page->dma_addr = addr; 382 return false; 383 } 384 385 static inline bool page_pool_put(struct page_pool *pool) 386 { 387 return refcount_dec_and_test(&pool->user_cnt); 388 } 389 390 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) 391 { 392 if (unlikely(pool->p.nid != new_nid)) 393 page_pool_update_nid(pool, new_nid); 394 } 395 396 #endif /* _NET_PAGE_POOL_HELPERS_H */ 397