1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * page_pool.c 4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> 5 * Copyright (C) 2016 Red Hat, Inc. 6 */ 7 8 #include <linux/types.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/device.h> 12 13 #include <net/page_pool/helpers.h> 14 #include <net/xdp.h> 15 16 #include <linux/dma-direction.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/page-flags.h> 19 #include <linux/mm.h> /* for put_page() */ 20 #include <linux/poison.h> 21 #include <linux/ethtool.h> 22 #include <linux/netdevice.h> 23 24 #include <trace/events/page_pool.h> 25 26 #include "page_pool_priv.h" 27 28 #define DEFER_TIME (msecs_to_jiffies(1000)) 29 #define DEFER_WARN_INTERVAL (60 * HZ) 30 31 #define BIAS_MAX LONG_MAX 32 33 #ifdef CONFIG_PAGE_POOL_STATS 34 /* alloc_stat_inc is intended to be used in softirq context */ 35 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) 36 /* recycle_stat_inc is safe to use when preemption is possible. */ 37 #define recycle_stat_inc(pool, __stat) \ 38 do { \ 39 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 40 this_cpu_inc(s->__stat); \ 41 } while (0) 42 43 #define recycle_stat_add(pool, __stat, val) \ 44 do { \ 45 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 46 this_cpu_add(s->__stat, val); \ 47 } while (0) 48 49 static const char pp_stats[][ETH_GSTRING_LEN] = { 50 "rx_pp_alloc_fast", 51 "rx_pp_alloc_slow", 52 "rx_pp_alloc_slow_ho", 53 "rx_pp_alloc_empty", 54 "rx_pp_alloc_refill", 55 "rx_pp_alloc_waive", 56 "rx_pp_recycle_cached", 57 "rx_pp_recycle_cache_full", 58 "rx_pp_recycle_ring", 59 "rx_pp_recycle_ring_full", 60 "rx_pp_recycle_released_ref", 61 }; 62 63 /** 64 * page_pool_get_stats() - fetch page pool stats 65 * @pool: pool from which page was allocated 66 * @stats: struct page_pool_stats to fill in 67 * 68 * Retrieve statistics about the page_pool. This API is only available 69 * if the kernel has been configured with ``CONFIG_PAGE_POOL_STATS=y``. 70 * A pointer to a caller allocated struct page_pool_stats structure 71 * is passed to this API which is filled in. The caller can then report 72 * those stats to the user (perhaps via ethtool, debugfs, etc.). 73 */ 74 bool page_pool_get_stats(struct page_pool *pool, 75 struct page_pool_stats *stats) 76 { 77 int cpu = 0; 78 79 if (!stats) 80 return false; 81 82 /* The caller is responsible to initialize stats. */ 83 stats->alloc_stats.fast += pool->alloc_stats.fast; 84 stats->alloc_stats.slow += pool->alloc_stats.slow; 85 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; 86 stats->alloc_stats.empty += pool->alloc_stats.empty; 87 stats->alloc_stats.refill += pool->alloc_stats.refill; 88 stats->alloc_stats.waive += pool->alloc_stats.waive; 89 90 for_each_possible_cpu(cpu) { 91 const struct page_pool_recycle_stats *pcpu = 92 per_cpu_ptr(pool->recycle_stats, cpu); 93 94 stats->recycle_stats.cached += pcpu->cached; 95 stats->recycle_stats.cache_full += pcpu->cache_full; 96 stats->recycle_stats.ring += pcpu->ring; 97 stats->recycle_stats.ring_full += pcpu->ring_full; 98 stats->recycle_stats.released_refcnt += pcpu->released_refcnt; 99 } 100 101 return true; 102 } 103 EXPORT_SYMBOL(page_pool_get_stats); 104 105 u8 *page_pool_ethtool_stats_get_strings(u8 *data) 106 { 107 int i; 108 109 for (i = 0; i < ARRAY_SIZE(pp_stats); i++) { 110 memcpy(data, pp_stats[i], ETH_GSTRING_LEN); 111 data += ETH_GSTRING_LEN; 112 } 113 114 return data; 115 } 116 EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings); 117 118 int page_pool_ethtool_stats_get_count(void) 119 { 120 return ARRAY_SIZE(pp_stats); 121 } 122 EXPORT_SYMBOL(page_pool_ethtool_stats_get_count); 123 124 u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) 125 { 126 struct page_pool_stats *pool_stats = stats; 127 128 *data++ = pool_stats->alloc_stats.fast; 129 *data++ = pool_stats->alloc_stats.slow; 130 *data++ = pool_stats->alloc_stats.slow_high_order; 131 *data++ = pool_stats->alloc_stats.empty; 132 *data++ = pool_stats->alloc_stats.refill; 133 *data++ = pool_stats->alloc_stats.waive; 134 *data++ = pool_stats->recycle_stats.cached; 135 *data++ = pool_stats->recycle_stats.cache_full; 136 *data++ = pool_stats->recycle_stats.ring; 137 *data++ = pool_stats->recycle_stats.ring_full; 138 *data++ = pool_stats->recycle_stats.released_refcnt; 139 140 return data; 141 } 142 EXPORT_SYMBOL(page_pool_ethtool_stats_get); 143 144 #else 145 #define alloc_stat_inc(pool, __stat) 146 #define recycle_stat_inc(pool, __stat) 147 #define recycle_stat_add(pool, __stat, val) 148 #endif 149 150 static bool page_pool_producer_lock(struct page_pool *pool) 151 __acquires(&pool->ring.producer_lock) 152 { 153 bool in_softirq = in_softirq(); 154 155 if (in_softirq) 156 spin_lock(&pool->ring.producer_lock); 157 else 158 spin_lock_bh(&pool->ring.producer_lock); 159 160 return in_softirq; 161 } 162 163 static void page_pool_producer_unlock(struct page_pool *pool, 164 bool in_softirq) 165 __releases(&pool->ring.producer_lock) 166 { 167 if (in_softirq) 168 spin_unlock(&pool->ring.producer_lock); 169 else 170 spin_unlock_bh(&pool->ring.producer_lock); 171 } 172 173 static int page_pool_init(struct page_pool *pool, 174 const struct page_pool_params *params) 175 { 176 unsigned int ring_qsize = 1024; /* Default */ 177 178 memcpy(&pool->p, ¶ms->fast, sizeof(pool->p)); 179 memcpy(&pool->slow, ¶ms->slow, sizeof(pool->slow)); 180 181 /* Validate only known flags were used */ 182 if (pool->p.flags & ~(PP_FLAG_ALL)) 183 return -EINVAL; 184 185 if (pool->p.pool_size) 186 ring_qsize = pool->p.pool_size; 187 188 /* Sanity limit mem that can be pinned down */ 189 if (ring_qsize > 32768) 190 return -E2BIG; 191 192 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL. 193 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, 194 * which is the XDP_TX use-case. 195 */ 196 if (pool->p.flags & PP_FLAG_DMA_MAP) { 197 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && 198 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) 199 return -EINVAL; 200 } 201 202 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) { 203 /* In order to request DMA-sync-for-device the page 204 * needs to be mapped 205 */ 206 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) 207 return -EINVAL; 208 209 if (!pool->p.max_len) 210 return -EINVAL; 211 212 /* pool->p.offset has to be set according to the address 213 * offset used by the DMA engine to start copying rx data 214 */ 215 } 216 217 pool->has_init_callback = !!pool->slow.init_callback; 218 219 #ifdef CONFIG_PAGE_POOL_STATS 220 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); 221 if (!pool->recycle_stats) 222 return -ENOMEM; 223 #endif 224 225 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) { 226 #ifdef CONFIG_PAGE_POOL_STATS 227 free_percpu(pool->recycle_stats); 228 #endif 229 return -ENOMEM; 230 } 231 232 atomic_set(&pool->pages_state_release_cnt, 0); 233 234 /* Driver calling page_pool_create() also call page_pool_destroy() */ 235 refcount_set(&pool->user_cnt, 1); 236 237 if (pool->p.flags & PP_FLAG_DMA_MAP) 238 get_device(pool->p.dev); 239 240 return 0; 241 } 242 243 static void page_pool_uninit(struct page_pool *pool) 244 { 245 ptr_ring_cleanup(&pool->ring, NULL); 246 247 if (pool->p.flags & PP_FLAG_DMA_MAP) 248 put_device(pool->p.dev); 249 250 #ifdef CONFIG_PAGE_POOL_STATS 251 free_percpu(pool->recycle_stats); 252 #endif 253 } 254 255 /** 256 * page_pool_create() - create a page pool. 257 * @params: parameters, see struct page_pool_params 258 */ 259 struct page_pool *page_pool_create(const struct page_pool_params *params) 260 { 261 struct page_pool *pool; 262 int err; 263 264 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); 265 if (!pool) 266 return ERR_PTR(-ENOMEM); 267 268 err = page_pool_init(pool, params); 269 if (err < 0) 270 goto err_free; 271 272 err = page_pool_list(pool); 273 if (err) 274 goto err_uninit; 275 276 return pool; 277 278 err_uninit: 279 page_pool_uninit(pool); 280 err_free: 281 pr_warn("%s() gave up with errno %d\n", __func__, err); 282 kfree(pool); 283 return ERR_PTR(err); 284 } 285 EXPORT_SYMBOL(page_pool_create); 286 287 static void page_pool_return_page(struct page_pool *pool, struct page *page); 288 289 noinline 290 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) 291 { 292 struct ptr_ring *r = &pool->ring; 293 struct page *page; 294 int pref_nid; /* preferred NUMA node */ 295 296 /* Quicker fallback, avoid locks when ring is empty */ 297 if (__ptr_ring_empty(r)) { 298 alloc_stat_inc(pool, empty); 299 return NULL; 300 } 301 302 /* Softirq guarantee CPU and thus NUMA node is stable. This, 303 * assumes CPU refilling driver RX-ring will also run RX-NAPI. 304 */ 305 #ifdef CONFIG_NUMA 306 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid; 307 #else 308 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */ 309 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */ 310 #endif 311 312 /* Refill alloc array, but only if NUMA match */ 313 do { 314 page = __ptr_ring_consume(r); 315 if (unlikely(!page)) 316 break; 317 318 if (likely(page_to_nid(page) == pref_nid)) { 319 pool->alloc.cache[pool->alloc.count++] = page; 320 } else { 321 /* NUMA mismatch; 322 * (1) release 1 page to page-allocator and 323 * (2) break out to fallthrough to alloc_pages_node. 324 * This limit stress on page buddy alloactor. 325 */ 326 page_pool_return_page(pool, page); 327 alloc_stat_inc(pool, waive); 328 page = NULL; 329 break; 330 } 331 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); 332 333 /* Return last page */ 334 if (likely(pool->alloc.count > 0)) { 335 page = pool->alloc.cache[--pool->alloc.count]; 336 alloc_stat_inc(pool, refill); 337 } 338 339 return page; 340 } 341 342 /* fast path */ 343 static struct page *__page_pool_get_cached(struct page_pool *pool) 344 { 345 struct page *page; 346 347 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */ 348 if (likely(pool->alloc.count)) { 349 /* Fast-path */ 350 page = pool->alloc.cache[--pool->alloc.count]; 351 alloc_stat_inc(pool, fast); 352 } else { 353 page = page_pool_refill_alloc_cache(pool); 354 } 355 356 return page; 357 } 358 359 static void page_pool_dma_sync_for_device(struct page_pool *pool, 360 struct page *page, 361 unsigned int dma_sync_size) 362 { 363 dma_addr_t dma_addr = page_pool_get_dma_addr(page); 364 365 dma_sync_size = min(dma_sync_size, pool->p.max_len); 366 dma_sync_single_range_for_device(pool->p.dev, dma_addr, 367 pool->p.offset, dma_sync_size, 368 pool->p.dma_dir); 369 } 370 371 static bool page_pool_dma_map(struct page_pool *pool, struct page *page) 372 { 373 dma_addr_t dma; 374 375 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr 376 * since dma_addr_t can be either 32 or 64 bits and does not always fit 377 * into page private data (i.e 32bit cpu with 64bit DMA caps) 378 * This mapping is kept for lifetime of page, until leaving pool. 379 */ 380 dma = dma_map_page_attrs(pool->p.dev, page, 0, 381 (PAGE_SIZE << pool->p.order), 382 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | 383 DMA_ATTR_WEAK_ORDERING); 384 if (dma_mapping_error(pool->p.dev, dma)) 385 return false; 386 387 if (page_pool_set_dma_addr(page, dma)) 388 goto unmap_failed; 389 390 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) 391 page_pool_dma_sync_for_device(pool, page, pool->p.max_len); 392 393 return true; 394 395 unmap_failed: 396 WARN_ON_ONCE("unexpected DMA address, please report to netdev@"); 397 dma_unmap_page_attrs(pool->p.dev, dma, 398 PAGE_SIZE << pool->p.order, pool->p.dma_dir, 399 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); 400 return false; 401 } 402 403 static void page_pool_set_pp_info(struct page_pool *pool, 404 struct page *page) 405 { 406 page->pp = pool; 407 page->pp_magic |= PP_SIGNATURE; 408 409 /* Ensuring all pages have been split into one fragment initially: 410 * page_pool_set_pp_info() is only called once for every page when it 411 * is allocated from the page allocator and page_pool_fragment_page() 412 * is dirtying the same cache line as the page->pp_magic above, so 413 * the overhead is negligible. 414 */ 415 page_pool_fragment_page(page, 1); 416 if (pool->has_init_callback) 417 pool->slow.init_callback(page, pool->slow.init_arg); 418 } 419 420 static void page_pool_clear_pp_info(struct page *page) 421 { 422 page->pp_magic = 0; 423 page->pp = NULL; 424 } 425 426 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, 427 gfp_t gfp) 428 { 429 struct page *page; 430 431 gfp |= __GFP_COMP; 432 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); 433 if (unlikely(!page)) 434 return NULL; 435 436 if ((pool->p.flags & PP_FLAG_DMA_MAP) && 437 unlikely(!page_pool_dma_map(pool, page))) { 438 put_page(page); 439 return NULL; 440 } 441 442 alloc_stat_inc(pool, slow_high_order); 443 page_pool_set_pp_info(pool, page); 444 445 /* Track how many pages are held 'in-flight' */ 446 pool->pages_state_hold_cnt++; 447 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); 448 return page; 449 } 450 451 /* slow path */ 452 noinline 453 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, 454 gfp_t gfp) 455 { 456 const int bulk = PP_ALLOC_CACHE_REFILL; 457 unsigned int pp_flags = pool->p.flags; 458 unsigned int pp_order = pool->p.order; 459 struct page *page; 460 int i, nr_pages; 461 462 /* Don't support bulk alloc for high-order pages */ 463 if (unlikely(pp_order)) 464 return __page_pool_alloc_page_order(pool, gfp); 465 466 /* Unnecessary as alloc cache is empty, but guarantees zero count */ 467 if (unlikely(pool->alloc.count > 0)) 468 return pool->alloc.cache[--pool->alloc.count]; 469 470 /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */ 471 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); 472 473 nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk, 474 pool->alloc.cache); 475 if (unlikely(!nr_pages)) 476 return NULL; 477 478 /* Pages have been filled into alloc.cache array, but count is zero and 479 * page element have not been (possibly) DMA mapped. 480 */ 481 for (i = 0; i < nr_pages; i++) { 482 page = pool->alloc.cache[i]; 483 if ((pp_flags & PP_FLAG_DMA_MAP) && 484 unlikely(!page_pool_dma_map(pool, page))) { 485 put_page(page); 486 continue; 487 } 488 489 page_pool_set_pp_info(pool, page); 490 pool->alloc.cache[pool->alloc.count++] = page; 491 /* Track how many pages are held 'in-flight' */ 492 pool->pages_state_hold_cnt++; 493 trace_page_pool_state_hold(pool, page, 494 pool->pages_state_hold_cnt); 495 } 496 497 /* Return last page */ 498 if (likely(pool->alloc.count > 0)) { 499 page = pool->alloc.cache[--pool->alloc.count]; 500 alloc_stat_inc(pool, slow); 501 } else { 502 page = NULL; 503 } 504 505 /* When page just alloc'ed is should/must have refcnt 1. */ 506 return page; 507 } 508 509 /* For using page_pool replace: alloc_pages() API calls, but provide 510 * synchronization guarantee for allocation side. 511 */ 512 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) 513 { 514 struct page *page; 515 516 /* Fast-path: Get a page from cache */ 517 page = __page_pool_get_cached(pool); 518 if (page) 519 return page; 520 521 /* Slow-path: cache empty, do real allocation */ 522 page = __page_pool_alloc_pages_slow(pool, gfp); 523 return page; 524 } 525 EXPORT_SYMBOL(page_pool_alloc_pages); 526 527 /* Calculate distance between two u32 values, valid if distance is below 2^(31) 528 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution 529 */ 530 #define _distance(a, b) (s32)((a) - (b)) 531 532 static s32 page_pool_inflight(struct page_pool *pool) 533 { 534 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); 535 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); 536 s32 inflight; 537 538 inflight = _distance(hold_cnt, release_cnt); 539 540 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); 541 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight); 542 543 return inflight; 544 } 545 546 /* Disconnects a page (from a page_pool). API users can have a need 547 * to disconnect a page (from a page_pool), to allow it to be used as 548 * a regular page (that will eventually be returned to the normal 549 * page-allocator via put_page). 550 */ 551 static void page_pool_return_page(struct page_pool *pool, struct page *page) 552 { 553 dma_addr_t dma; 554 int count; 555 556 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) 557 /* Always account for inflight pages, even if we didn't 558 * map them 559 */ 560 goto skip_dma_unmap; 561 562 dma = page_pool_get_dma_addr(page); 563 564 /* When page is unmapped, it cannot be returned to our pool */ 565 dma_unmap_page_attrs(pool->p.dev, dma, 566 PAGE_SIZE << pool->p.order, pool->p.dma_dir, 567 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); 568 page_pool_set_dma_addr(page, 0); 569 skip_dma_unmap: 570 page_pool_clear_pp_info(page); 571 572 /* This may be the last page returned, releasing the pool, so 573 * it is not safe to reference pool afterwards. 574 */ 575 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); 576 trace_page_pool_state_release(pool, page, count); 577 578 put_page(page); 579 /* An optimization would be to call __free_pages(page, pool->p.order) 580 * knowing page is not part of page-cache (thus avoiding a 581 * __page_cache_release() call). 582 */ 583 } 584 585 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) 586 { 587 int ret; 588 /* BH protection not needed if current is softirq */ 589 if (in_softirq()) 590 ret = ptr_ring_produce(&pool->ring, page); 591 else 592 ret = ptr_ring_produce_bh(&pool->ring, page); 593 594 if (!ret) { 595 recycle_stat_inc(pool, ring); 596 return true; 597 } 598 599 return false; 600 } 601 602 /* Only allow direct recycling in special circumstances, into the 603 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case. 604 * 605 * Caller must provide appropriate safe context. 606 */ 607 static bool page_pool_recycle_in_cache(struct page *page, 608 struct page_pool *pool) 609 { 610 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { 611 recycle_stat_inc(pool, cache_full); 612 return false; 613 } 614 615 /* Caller MUST have verified/know (page_ref_count(page) == 1) */ 616 pool->alloc.cache[pool->alloc.count++] = page; 617 recycle_stat_inc(pool, cached); 618 return true; 619 } 620 621 /* If the page refcnt == 1, this will try to recycle the page. 622 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for 623 * the configured size min(dma_sync_size, pool->max_len). 624 * If the page refcnt != 1, then the page will be returned to memory 625 * subsystem. 626 */ 627 static __always_inline struct page * 628 __page_pool_put_page(struct page_pool *pool, struct page *page, 629 unsigned int dma_sync_size, bool allow_direct) 630 { 631 lockdep_assert_no_hardirq(); 632 633 /* This allocator is optimized for the XDP mode that uses 634 * one-frame-per-page, but have fallbacks that act like the 635 * regular page allocator APIs. 636 * 637 * refcnt == 1 means page_pool owns page, and can recycle it. 638 * 639 * page is NOT reusable when allocated when system is under 640 * some pressure. (page_is_pfmemalloc) 641 */ 642 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) { 643 /* Read barrier done in page_ref_count / READ_ONCE */ 644 645 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) 646 page_pool_dma_sync_for_device(pool, page, 647 dma_sync_size); 648 649 if (allow_direct && in_softirq() && 650 page_pool_recycle_in_cache(page, pool)) 651 return NULL; 652 653 /* Page found as candidate for recycling */ 654 return page; 655 } 656 /* Fallback/non-XDP mode: API user have elevated refcnt. 657 * 658 * Many drivers split up the page into fragments, and some 659 * want to keep doing this to save memory and do refcnt based 660 * recycling. Support this use case too, to ease drivers 661 * switching between XDP/non-XDP. 662 * 663 * In-case page_pool maintains the DMA mapping, API user must 664 * call page_pool_put_page once. In this elevated refcnt 665 * case, the DMA is unmapped/released, as driver is likely 666 * doing refcnt based recycle tricks, meaning another process 667 * will be invoking put_page. 668 */ 669 recycle_stat_inc(pool, released_refcnt); 670 page_pool_return_page(pool, page); 671 672 return NULL; 673 } 674 675 void page_pool_put_defragged_page(struct page_pool *pool, struct page *page, 676 unsigned int dma_sync_size, bool allow_direct) 677 { 678 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); 679 if (page && !page_pool_recycle_in_ring(pool, page)) { 680 /* Cache full, fallback to free pages */ 681 recycle_stat_inc(pool, ring_full); 682 page_pool_return_page(pool, page); 683 } 684 } 685 EXPORT_SYMBOL(page_pool_put_defragged_page); 686 687 /** 688 * page_pool_put_page_bulk() - release references on multiple pages 689 * @pool: pool from which pages were allocated 690 * @data: array holding page pointers 691 * @count: number of pages in @data 692 * 693 * Tries to refill a number of pages into the ptr_ring cache holding ptr_ring 694 * producer lock. If the ptr_ring is full, page_pool_put_page_bulk() 695 * will release leftover pages to the page allocator. 696 * page_pool_put_page_bulk() is suitable to be run inside the driver NAPI tx 697 * completion loop for the XDP_REDIRECT use case. 698 * 699 * Please note the caller must not use data area after running 700 * page_pool_put_page_bulk(), as this function overwrites it. 701 */ 702 void page_pool_put_page_bulk(struct page_pool *pool, void **data, 703 int count) 704 { 705 int i, bulk_len = 0; 706 bool in_softirq; 707 708 for (i = 0; i < count; i++) { 709 struct page *page = virt_to_head_page(data[i]); 710 711 /* It is not the last user for the page frag case */ 712 if (!page_pool_is_last_frag(page)) 713 continue; 714 715 page = __page_pool_put_page(pool, page, -1, false); 716 /* Approved for bulk recycling in ptr_ring cache */ 717 if (page) 718 data[bulk_len++] = page; 719 } 720 721 if (unlikely(!bulk_len)) 722 return; 723 724 /* Bulk producer into ptr_ring page_pool cache */ 725 in_softirq = page_pool_producer_lock(pool); 726 for (i = 0; i < bulk_len; i++) { 727 if (__ptr_ring_produce(&pool->ring, data[i])) { 728 /* ring full */ 729 recycle_stat_inc(pool, ring_full); 730 break; 731 } 732 } 733 recycle_stat_add(pool, ring, i); 734 page_pool_producer_unlock(pool, in_softirq); 735 736 /* Hopefully all pages was return into ptr_ring */ 737 if (likely(i == bulk_len)) 738 return; 739 740 /* ptr_ring cache full, free remaining pages outside producer lock 741 * since put_page() with refcnt == 1 can be an expensive operation 742 */ 743 for (; i < bulk_len; i++) 744 page_pool_return_page(pool, data[i]); 745 } 746 EXPORT_SYMBOL(page_pool_put_page_bulk); 747 748 static struct page *page_pool_drain_frag(struct page_pool *pool, 749 struct page *page) 750 { 751 long drain_count = BIAS_MAX - pool->frag_users; 752 753 /* Some user is still using the page frag */ 754 if (likely(page_pool_defrag_page(page, drain_count))) 755 return NULL; 756 757 if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) { 758 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) 759 page_pool_dma_sync_for_device(pool, page, -1); 760 761 return page; 762 } 763 764 page_pool_return_page(pool, page); 765 return NULL; 766 } 767 768 static void page_pool_free_frag(struct page_pool *pool) 769 { 770 long drain_count = BIAS_MAX - pool->frag_users; 771 struct page *page = pool->frag_page; 772 773 pool->frag_page = NULL; 774 775 if (!page || page_pool_defrag_page(page, drain_count)) 776 return; 777 778 page_pool_return_page(pool, page); 779 } 780 781 struct page *page_pool_alloc_frag(struct page_pool *pool, 782 unsigned int *offset, 783 unsigned int size, gfp_t gfp) 784 { 785 unsigned int max_size = PAGE_SIZE << pool->p.order; 786 struct page *page = pool->frag_page; 787 788 if (WARN_ON(size > max_size)) 789 return NULL; 790 791 size = ALIGN(size, dma_get_cache_alignment()); 792 *offset = pool->frag_offset; 793 794 if (page && *offset + size > max_size) { 795 page = page_pool_drain_frag(pool, page); 796 if (page) { 797 alloc_stat_inc(pool, fast); 798 goto frag_reset; 799 } 800 } 801 802 if (!page) { 803 page = page_pool_alloc_pages(pool, gfp); 804 if (unlikely(!page)) { 805 pool->frag_page = NULL; 806 return NULL; 807 } 808 809 pool->frag_page = page; 810 811 frag_reset: 812 pool->frag_users = 1; 813 *offset = 0; 814 pool->frag_offset = size; 815 page_pool_fragment_page(page, BIAS_MAX); 816 return page; 817 } 818 819 pool->frag_users++; 820 pool->frag_offset = *offset + size; 821 alloc_stat_inc(pool, fast); 822 return page; 823 } 824 EXPORT_SYMBOL(page_pool_alloc_frag); 825 826 static void page_pool_empty_ring(struct page_pool *pool) 827 { 828 struct page *page; 829 830 /* Empty recycle ring */ 831 while ((page = ptr_ring_consume_bh(&pool->ring))) { 832 /* Verify the refcnt invariant of cached pages */ 833 if (!(page_ref_count(page) == 1)) 834 pr_crit("%s() page_pool refcnt %d violation\n", 835 __func__, page_ref_count(page)); 836 837 page_pool_return_page(pool, page); 838 } 839 } 840 841 static void __page_pool_destroy(struct page_pool *pool) 842 { 843 if (pool->disconnect) 844 pool->disconnect(pool); 845 846 page_pool_unlist(pool); 847 page_pool_uninit(pool); 848 kfree(pool); 849 } 850 851 static void page_pool_empty_alloc_cache_once(struct page_pool *pool) 852 { 853 struct page *page; 854 855 if (pool->destroy_cnt) 856 return; 857 858 /* Empty alloc cache, assume caller made sure this is 859 * no-longer in use, and page_pool_alloc_pages() cannot be 860 * call concurrently. 861 */ 862 while (pool->alloc.count) { 863 page = pool->alloc.cache[--pool->alloc.count]; 864 page_pool_return_page(pool, page); 865 } 866 } 867 868 static void page_pool_scrub(struct page_pool *pool) 869 { 870 page_pool_empty_alloc_cache_once(pool); 871 pool->destroy_cnt++; 872 873 /* No more consumers should exist, but producers could still 874 * be in-flight. 875 */ 876 page_pool_empty_ring(pool); 877 } 878 879 static int page_pool_release(struct page_pool *pool) 880 { 881 int inflight; 882 883 page_pool_scrub(pool); 884 inflight = page_pool_inflight(pool); 885 if (!inflight) 886 __page_pool_destroy(pool); 887 888 return inflight; 889 } 890 891 static void page_pool_release_retry(struct work_struct *wq) 892 { 893 struct delayed_work *dwq = to_delayed_work(wq); 894 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); 895 int inflight; 896 897 inflight = page_pool_release(pool); 898 if (!inflight) 899 return; 900 901 /* Periodic warning */ 902 if (time_after_eq(jiffies, pool->defer_warn)) { 903 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; 904 905 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n", 906 __func__, inflight, sec); 907 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; 908 } 909 910 /* Still not ready to be disconnected, retry later */ 911 schedule_delayed_work(&pool->release_dw, DEFER_TIME); 912 } 913 914 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), 915 struct xdp_mem_info *mem) 916 { 917 refcount_inc(&pool->user_cnt); 918 pool->disconnect = disconnect; 919 pool->xdp_mem_id = mem->id; 920 } 921 922 void page_pool_unlink_napi(struct page_pool *pool) 923 { 924 if (!pool->p.napi) 925 return; 926 927 /* To avoid races with recycling and additional barriers make sure 928 * pool and NAPI are unlinked when NAPI is disabled. 929 */ 930 WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) || 931 READ_ONCE(pool->p.napi->list_owner) != -1); 932 933 WRITE_ONCE(pool->p.napi, NULL); 934 } 935 EXPORT_SYMBOL(page_pool_unlink_napi); 936 937 void page_pool_destroy(struct page_pool *pool) 938 { 939 if (!pool) 940 return; 941 942 if (!page_pool_put(pool)) 943 return; 944 945 page_pool_unlink_napi(pool); 946 page_pool_free_frag(pool); 947 948 if (!page_pool_release(pool)) 949 return; 950 951 pool->defer_start = jiffies; 952 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; 953 954 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); 955 schedule_delayed_work(&pool->release_dw, DEFER_TIME); 956 } 957 EXPORT_SYMBOL(page_pool_destroy); 958 959 /* Caller must provide appropriate safe context, e.g. NAPI. */ 960 void page_pool_update_nid(struct page_pool *pool, int new_nid) 961 { 962 struct page *page; 963 964 trace_page_pool_update_nid(pool, new_nid); 965 pool->p.nid = new_nid; 966 967 /* Flush pool alloc cache, as refill will check NUMA node */ 968 while (pool->alloc.count) { 969 page = pool->alloc.cache[--pool->alloc.count]; 970 page_pool_return_page(pool, page); 971 } 972 } 973 EXPORT_SYMBOL(page_pool_update_nid); 974