1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * page_pool.c 4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> 5 * Copyright (C) 2016 Red Hat, Inc. 6 */ 7 8 #include <linux/types.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/device.h> 12 13 #include <net/page_pool/helpers.h> 14 #include <net/xdp.h> 15 16 #include <linux/dma-direction.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/page-flags.h> 19 #include <linux/mm.h> /* for put_page() */ 20 #include <linux/poison.h> 21 #include <linux/ethtool.h> 22 #include <linux/netdevice.h> 23 24 #include <trace/events/page_pool.h> 25 26 #include "page_pool_priv.h" 27 28 #define DEFER_TIME (msecs_to_jiffies(1000)) 29 #define DEFER_WARN_INTERVAL (60 * HZ) 30 31 #define BIAS_MAX (LONG_MAX >> 1) 32 33 #ifdef CONFIG_PAGE_POOL_STATS 34 /* alloc_stat_inc is intended to be used in softirq context */ 35 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) 36 /* recycle_stat_inc is safe to use when preemption is possible. */ 37 #define recycle_stat_inc(pool, __stat) \ 38 do { \ 39 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 40 this_cpu_inc(s->__stat); \ 41 } while (0) 42 43 #define recycle_stat_add(pool, __stat, val) \ 44 do { \ 45 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 46 this_cpu_add(s->__stat, val); \ 47 } while (0) 48 49 static const char pp_stats[][ETH_GSTRING_LEN] = { 50 "rx_pp_alloc_fast", 51 "rx_pp_alloc_slow", 52 "rx_pp_alloc_slow_ho", 53 "rx_pp_alloc_empty", 54 "rx_pp_alloc_refill", 55 "rx_pp_alloc_waive", 56 "rx_pp_recycle_cached", 57 "rx_pp_recycle_cache_full", 58 "rx_pp_recycle_ring", 59 "rx_pp_recycle_ring_full", 60 "rx_pp_recycle_released_ref", 61 }; 62 63 /** 64 * page_pool_get_stats() - fetch page pool stats 65 * @pool: pool from which page was allocated 66 * @stats: struct page_pool_stats to fill in 67 * 68 * Retrieve statistics about the page_pool. This API is only available 69 * if the kernel has been configured with ``CONFIG_PAGE_POOL_STATS=y``. 70 * A pointer to a caller allocated struct page_pool_stats structure 71 * is passed to this API which is filled in. The caller can then report 72 * those stats to the user (perhaps via ethtool, debugfs, etc.). 73 */ 74 bool page_pool_get_stats(const struct page_pool *pool, 75 struct page_pool_stats *stats) 76 { 77 int cpu = 0; 78 79 if (!stats) 80 return false; 81 82 /* The caller is responsible to initialize stats. */ 83 stats->alloc_stats.fast += pool->alloc_stats.fast; 84 stats->alloc_stats.slow += pool->alloc_stats.slow; 85 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; 86 stats->alloc_stats.empty += pool->alloc_stats.empty; 87 stats->alloc_stats.refill += pool->alloc_stats.refill; 88 stats->alloc_stats.waive += pool->alloc_stats.waive; 89 90 for_each_possible_cpu(cpu) { 91 const struct page_pool_recycle_stats *pcpu = 92 per_cpu_ptr(pool->recycle_stats, cpu); 93 94 stats->recycle_stats.cached += pcpu->cached; 95 stats->recycle_stats.cache_full += pcpu->cache_full; 96 stats->recycle_stats.ring += pcpu->ring; 97 stats->recycle_stats.ring_full += pcpu->ring_full; 98 stats->recycle_stats.released_refcnt += pcpu->released_refcnt; 99 } 100 101 return true; 102 } 103 EXPORT_SYMBOL(page_pool_get_stats); 104 105 u8 *page_pool_ethtool_stats_get_strings(u8 *data) 106 { 107 int i; 108 109 for (i = 0; i < ARRAY_SIZE(pp_stats); i++) { 110 memcpy(data, pp_stats[i], ETH_GSTRING_LEN); 111 data += ETH_GSTRING_LEN; 112 } 113 114 return data; 115 } 116 EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings); 117 118 int page_pool_ethtool_stats_get_count(void) 119 { 120 return ARRAY_SIZE(pp_stats); 121 } 122 EXPORT_SYMBOL(page_pool_ethtool_stats_get_count); 123 124 u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) 125 { 126 struct page_pool_stats *pool_stats = stats; 127 128 *data++ = pool_stats->alloc_stats.fast; 129 *data++ = pool_stats->alloc_stats.slow; 130 *data++ = pool_stats->alloc_stats.slow_high_order; 131 *data++ = pool_stats->alloc_stats.empty; 132 *data++ = pool_stats->alloc_stats.refill; 133 *data++ = pool_stats->alloc_stats.waive; 134 *data++ = pool_stats->recycle_stats.cached; 135 *data++ = pool_stats->recycle_stats.cache_full; 136 *data++ = pool_stats->recycle_stats.ring; 137 *data++ = pool_stats->recycle_stats.ring_full; 138 *data++ = pool_stats->recycle_stats.released_refcnt; 139 140 return data; 141 } 142 EXPORT_SYMBOL(page_pool_ethtool_stats_get); 143 144 #else 145 #define alloc_stat_inc(pool, __stat) 146 #define recycle_stat_inc(pool, __stat) 147 #define recycle_stat_add(pool, __stat, val) 148 #endif 149 150 static bool page_pool_producer_lock(struct page_pool *pool) 151 __acquires(&pool->ring.producer_lock) 152 { 153 bool in_softirq = in_softirq(); 154 155 if (in_softirq) 156 spin_lock(&pool->ring.producer_lock); 157 else 158 spin_lock_bh(&pool->ring.producer_lock); 159 160 return in_softirq; 161 } 162 163 static void page_pool_producer_unlock(struct page_pool *pool, 164 bool in_softirq) 165 __releases(&pool->ring.producer_lock) 166 { 167 if (in_softirq) 168 spin_unlock(&pool->ring.producer_lock); 169 else 170 spin_unlock_bh(&pool->ring.producer_lock); 171 } 172 173 static int page_pool_init(struct page_pool *pool, 174 const struct page_pool_params *params, 175 int cpuid) 176 { 177 unsigned int ring_qsize = 1024; /* Default */ 178 179 memcpy(&pool->p, ¶ms->fast, sizeof(pool->p)); 180 memcpy(&pool->slow, ¶ms->slow, sizeof(pool->slow)); 181 182 pool->cpuid = cpuid; 183 184 /* Validate only known flags were used */ 185 if (pool->p.flags & ~(PP_FLAG_ALL)) 186 return -EINVAL; 187 188 if (pool->p.pool_size) 189 ring_qsize = pool->p.pool_size; 190 191 /* Sanity limit mem that can be pinned down */ 192 if (ring_qsize > 32768) 193 return -E2BIG; 194 195 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL. 196 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, 197 * which is the XDP_TX use-case. 198 */ 199 if (pool->p.flags & PP_FLAG_DMA_MAP) { 200 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && 201 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) 202 return -EINVAL; 203 } 204 205 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) { 206 /* In order to request DMA-sync-for-device the page 207 * needs to be mapped 208 */ 209 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) 210 return -EINVAL; 211 212 if (!pool->p.max_len) 213 return -EINVAL; 214 215 /* pool->p.offset has to be set according to the address 216 * offset used by the DMA engine to start copying rx data 217 */ 218 } 219 220 pool->has_init_callback = !!pool->slow.init_callback; 221 222 #ifdef CONFIG_PAGE_POOL_STATS 223 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); 224 if (!pool->recycle_stats) 225 return -ENOMEM; 226 #endif 227 228 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) { 229 #ifdef CONFIG_PAGE_POOL_STATS 230 free_percpu(pool->recycle_stats); 231 #endif 232 return -ENOMEM; 233 } 234 235 atomic_set(&pool->pages_state_release_cnt, 0); 236 237 /* Driver calling page_pool_create() also call page_pool_destroy() */ 238 refcount_set(&pool->user_cnt, 1); 239 240 if (pool->p.flags & PP_FLAG_DMA_MAP) 241 get_device(pool->p.dev); 242 243 return 0; 244 } 245 246 static void page_pool_uninit(struct page_pool *pool) 247 { 248 ptr_ring_cleanup(&pool->ring, NULL); 249 250 if (pool->p.flags & PP_FLAG_DMA_MAP) 251 put_device(pool->p.dev); 252 253 #ifdef CONFIG_PAGE_POOL_STATS 254 free_percpu(pool->recycle_stats); 255 #endif 256 } 257 258 /** 259 * page_pool_create_percpu() - create a page pool for a given cpu. 260 * @params: parameters, see struct page_pool_params 261 * @cpuid: cpu identifier 262 */ 263 struct page_pool * 264 page_pool_create_percpu(const struct page_pool_params *params, int cpuid) 265 { 266 struct page_pool *pool; 267 int err; 268 269 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); 270 if (!pool) 271 return ERR_PTR(-ENOMEM); 272 273 err = page_pool_init(pool, params, cpuid); 274 if (err < 0) 275 goto err_free; 276 277 err = page_pool_list(pool); 278 if (err) 279 goto err_uninit; 280 281 return pool; 282 283 err_uninit: 284 page_pool_uninit(pool); 285 err_free: 286 pr_warn("%s() gave up with errno %d\n", __func__, err); 287 kfree(pool); 288 return ERR_PTR(err); 289 } 290 EXPORT_SYMBOL(page_pool_create_percpu); 291 292 /** 293 * page_pool_create() - create a page pool 294 * @params: parameters, see struct page_pool_params 295 */ 296 struct page_pool *page_pool_create(const struct page_pool_params *params) 297 { 298 return page_pool_create_percpu(params, -1); 299 } 300 EXPORT_SYMBOL(page_pool_create); 301 302 static void page_pool_return_page(struct page_pool *pool, struct page *page); 303 304 noinline 305 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) 306 { 307 struct ptr_ring *r = &pool->ring; 308 struct page *page; 309 int pref_nid; /* preferred NUMA node */ 310 311 /* Quicker fallback, avoid locks when ring is empty */ 312 if (__ptr_ring_empty(r)) { 313 alloc_stat_inc(pool, empty); 314 return NULL; 315 } 316 317 /* Softirq guarantee CPU and thus NUMA node is stable. This, 318 * assumes CPU refilling driver RX-ring will also run RX-NAPI. 319 */ 320 #ifdef CONFIG_NUMA 321 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid; 322 #else 323 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */ 324 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */ 325 #endif 326 327 /* Refill alloc array, but only if NUMA match */ 328 do { 329 page = __ptr_ring_consume(r); 330 if (unlikely(!page)) 331 break; 332 333 if (likely(page_to_nid(page) == pref_nid)) { 334 pool->alloc.cache[pool->alloc.count++] = page; 335 } else { 336 /* NUMA mismatch; 337 * (1) release 1 page to page-allocator and 338 * (2) break out to fallthrough to alloc_pages_node. 339 * This limit stress on page buddy alloactor. 340 */ 341 page_pool_return_page(pool, page); 342 alloc_stat_inc(pool, waive); 343 page = NULL; 344 break; 345 } 346 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); 347 348 /* Return last page */ 349 if (likely(pool->alloc.count > 0)) { 350 page = pool->alloc.cache[--pool->alloc.count]; 351 alloc_stat_inc(pool, refill); 352 } 353 354 return page; 355 } 356 357 /* fast path */ 358 static struct page *__page_pool_get_cached(struct page_pool *pool) 359 { 360 struct page *page; 361 362 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */ 363 if (likely(pool->alloc.count)) { 364 /* Fast-path */ 365 page = pool->alloc.cache[--pool->alloc.count]; 366 alloc_stat_inc(pool, fast); 367 } else { 368 page = page_pool_refill_alloc_cache(pool); 369 } 370 371 return page; 372 } 373 374 static void page_pool_dma_sync_for_device(struct page_pool *pool, 375 struct page *page, 376 unsigned int dma_sync_size) 377 { 378 dma_addr_t dma_addr = page_pool_get_dma_addr(page); 379 380 dma_sync_size = min(dma_sync_size, pool->p.max_len); 381 dma_sync_single_range_for_device(pool->p.dev, dma_addr, 382 pool->p.offset, dma_sync_size, 383 pool->p.dma_dir); 384 } 385 386 static bool page_pool_dma_map(struct page_pool *pool, struct page *page) 387 { 388 dma_addr_t dma; 389 390 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr 391 * since dma_addr_t can be either 32 or 64 bits and does not always fit 392 * into page private data (i.e 32bit cpu with 64bit DMA caps) 393 * This mapping is kept for lifetime of page, until leaving pool. 394 */ 395 dma = dma_map_page_attrs(pool->p.dev, page, 0, 396 (PAGE_SIZE << pool->p.order), 397 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | 398 DMA_ATTR_WEAK_ORDERING); 399 if (dma_mapping_error(pool->p.dev, dma)) 400 return false; 401 402 if (page_pool_set_dma_addr(page, dma)) 403 goto unmap_failed; 404 405 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) 406 page_pool_dma_sync_for_device(pool, page, pool->p.max_len); 407 408 return true; 409 410 unmap_failed: 411 WARN_ON_ONCE("unexpected DMA address, please report to netdev@"); 412 dma_unmap_page_attrs(pool->p.dev, dma, 413 PAGE_SIZE << pool->p.order, pool->p.dma_dir, 414 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); 415 return false; 416 } 417 418 static void page_pool_set_pp_info(struct page_pool *pool, 419 struct page *page) 420 { 421 page->pp = pool; 422 page->pp_magic |= PP_SIGNATURE; 423 424 /* Ensuring all pages have been split into one fragment initially: 425 * page_pool_set_pp_info() is only called once for every page when it 426 * is allocated from the page allocator and page_pool_fragment_page() 427 * is dirtying the same cache line as the page->pp_magic above, so 428 * the overhead is negligible. 429 */ 430 page_pool_fragment_page(page, 1); 431 if (pool->has_init_callback) 432 pool->slow.init_callback(page, pool->slow.init_arg); 433 } 434 435 static void page_pool_clear_pp_info(struct page *page) 436 { 437 page->pp_magic = 0; 438 page->pp = NULL; 439 } 440 441 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, 442 gfp_t gfp) 443 { 444 struct page *page; 445 446 gfp |= __GFP_COMP; 447 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); 448 if (unlikely(!page)) 449 return NULL; 450 451 if ((pool->p.flags & PP_FLAG_DMA_MAP) && 452 unlikely(!page_pool_dma_map(pool, page))) { 453 put_page(page); 454 return NULL; 455 } 456 457 alloc_stat_inc(pool, slow_high_order); 458 page_pool_set_pp_info(pool, page); 459 460 /* Track how many pages are held 'in-flight' */ 461 pool->pages_state_hold_cnt++; 462 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); 463 return page; 464 } 465 466 /* slow path */ 467 noinline 468 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, 469 gfp_t gfp) 470 { 471 const int bulk = PP_ALLOC_CACHE_REFILL; 472 unsigned int pp_flags = pool->p.flags; 473 unsigned int pp_order = pool->p.order; 474 struct page *page; 475 int i, nr_pages; 476 477 /* Don't support bulk alloc for high-order pages */ 478 if (unlikely(pp_order)) 479 return __page_pool_alloc_page_order(pool, gfp); 480 481 /* Unnecessary as alloc cache is empty, but guarantees zero count */ 482 if (unlikely(pool->alloc.count > 0)) 483 return pool->alloc.cache[--pool->alloc.count]; 484 485 /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */ 486 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); 487 488 nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk, 489 pool->alloc.cache); 490 if (unlikely(!nr_pages)) 491 return NULL; 492 493 /* Pages have been filled into alloc.cache array, but count is zero and 494 * page element have not been (possibly) DMA mapped. 495 */ 496 for (i = 0; i < nr_pages; i++) { 497 page = pool->alloc.cache[i]; 498 if ((pp_flags & PP_FLAG_DMA_MAP) && 499 unlikely(!page_pool_dma_map(pool, page))) { 500 put_page(page); 501 continue; 502 } 503 504 page_pool_set_pp_info(pool, page); 505 pool->alloc.cache[pool->alloc.count++] = page; 506 /* Track how many pages are held 'in-flight' */ 507 pool->pages_state_hold_cnt++; 508 trace_page_pool_state_hold(pool, page, 509 pool->pages_state_hold_cnt); 510 } 511 512 /* Return last page */ 513 if (likely(pool->alloc.count > 0)) { 514 page = pool->alloc.cache[--pool->alloc.count]; 515 alloc_stat_inc(pool, slow); 516 } else { 517 page = NULL; 518 } 519 520 /* When page just alloc'ed is should/must have refcnt 1. */ 521 return page; 522 } 523 524 /* For using page_pool replace: alloc_pages() API calls, but provide 525 * synchronization guarantee for allocation side. 526 */ 527 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) 528 { 529 struct page *page; 530 531 /* Fast-path: Get a page from cache */ 532 page = __page_pool_get_cached(pool); 533 if (page) 534 return page; 535 536 /* Slow-path: cache empty, do real allocation */ 537 page = __page_pool_alloc_pages_slow(pool, gfp); 538 return page; 539 } 540 EXPORT_SYMBOL(page_pool_alloc_pages); 541 542 /* Calculate distance between two u32 values, valid if distance is below 2^(31) 543 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution 544 */ 545 #define _distance(a, b) (s32)((a) - (b)) 546 547 s32 page_pool_inflight(const struct page_pool *pool, bool strict) 548 { 549 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); 550 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); 551 s32 inflight; 552 553 inflight = _distance(hold_cnt, release_cnt); 554 555 if (strict) { 556 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); 557 WARN(inflight < 0, "Negative(%d) inflight packet-pages", 558 inflight); 559 } else { 560 inflight = max(0, inflight); 561 } 562 563 return inflight; 564 } 565 566 static __always_inline 567 void __page_pool_release_page_dma(struct page_pool *pool, struct page *page) 568 { 569 dma_addr_t dma; 570 571 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) 572 /* Always account for inflight pages, even if we didn't 573 * map them 574 */ 575 return; 576 577 dma = page_pool_get_dma_addr(page); 578 579 /* When page is unmapped, it cannot be returned to our pool */ 580 dma_unmap_page_attrs(pool->p.dev, dma, 581 PAGE_SIZE << pool->p.order, pool->p.dma_dir, 582 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); 583 page_pool_set_dma_addr(page, 0); 584 } 585 586 /* Disconnects a page (from a page_pool). API users can have a need 587 * to disconnect a page (from a page_pool), to allow it to be used as 588 * a regular page (that will eventually be returned to the normal 589 * page-allocator via put_page). 590 */ 591 void page_pool_return_page(struct page_pool *pool, struct page *page) 592 { 593 int count; 594 595 __page_pool_release_page_dma(pool, page); 596 597 page_pool_clear_pp_info(page); 598 599 /* This may be the last page returned, releasing the pool, so 600 * it is not safe to reference pool afterwards. 601 */ 602 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); 603 trace_page_pool_state_release(pool, page, count); 604 605 put_page(page); 606 /* An optimization would be to call __free_pages(page, pool->p.order) 607 * knowing page is not part of page-cache (thus avoiding a 608 * __page_cache_release() call). 609 */ 610 } 611 612 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) 613 { 614 int ret; 615 /* BH protection not needed if current is softirq */ 616 if (in_softirq()) 617 ret = ptr_ring_produce(&pool->ring, page); 618 else 619 ret = ptr_ring_produce_bh(&pool->ring, page); 620 621 if (!ret) { 622 recycle_stat_inc(pool, ring); 623 return true; 624 } 625 626 return false; 627 } 628 629 /* Only allow direct recycling in special circumstances, into the 630 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case. 631 * 632 * Caller must provide appropriate safe context. 633 */ 634 static bool page_pool_recycle_in_cache(struct page *page, 635 struct page_pool *pool) 636 { 637 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { 638 recycle_stat_inc(pool, cache_full); 639 return false; 640 } 641 642 /* Caller MUST have verified/know (page_ref_count(page) == 1) */ 643 pool->alloc.cache[pool->alloc.count++] = page; 644 recycle_stat_inc(pool, cached); 645 return true; 646 } 647 648 /* If the page refcnt == 1, this will try to recycle the page. 649 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for 650 * the configured size min(dma_sync_size, pool->max_len). 651 * If the page refcnt != 1, then the page will be returned to memory 652 * subsystem. 653 */ 654 static __always_inline struct page * 655 __page_pool_put_page(struct page_pool *pool, struct page *page, 656 unsigned int dma_sync_size, bool allow_direct) 657 { 658 lockdep_assert_no_hardirq(); 659 660 /* This allocator is optimized for the XDP mode that uses 661 * one-frame-per-page, but have fallbacks that act like the 662 * regular page allocator APIs. 663 * 664 * refcnt == 1 means page_pool owns page, and can recycle it. 665 * 666 * page is NOT reusable when allocated when system is under 667 * some pressure. (page_is_pfmemalloc) 668 */ 669 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) { 670 /* Read barrier done in page_ref_count / READ_ONCE */ 671 672 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) 673 page_pool_dma_sync_for_device(pool, page, 674 dma_sync_size); 675 676 if (allow_direct && in_softirq() && 677 page_pool_recycle_in_cache(page, pool)) 678 return NULL; 679 680 /* Page found as candidate for recycling */ 681 return page; 682 } 683 /* Fallback/non-XDP mode: API user have elevated refcnt. 684 * 685 * Many drivers split up the page into fragments, and some 686 * want to keep doing this to save memory and do refcnt based 687 * recycling. Support this use case too, to ease drivers 688 * switching between XDP/non-XDP. 689 * 690 * In-case page_pool maintains the DMA mapping, API user must 691 * call page_pool_put_page once. In this elevated refcnt 692 * case, the DMA is unmapped/released, as driver is likely 693 * doing refcnt based recycle tricks, meaning another process 694 * will be invoking put_page. 695 */ 696 recycle_stat_inc(pool, released_refcnt); 697 page_pool_return_page(pool, page); 698 699 return NULL; 700 } 701 702 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, 703 unsigned int dma_sync_size, bool allow_direct) 704 { 705 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); 706 if (page && !page_pool_recycle_in_ring(pool, page)) { 707 /* Cache full, fallback to free pages */ 708 recycle_stat_inc(pool, ring_full); 709 page_pool_return_page(pool, page); 710 } 711 } 712 EXPORT_SYMBOL(page_pool_put_unrefed_page); 713 714 /** 715 * page_pool_put_page_bulk() - release references on multiple pages 716 * @pool: pool from which pages were allocated 717 * @data: array holding page pointers 718 * @count: number of pages in @data 719 * 720 * Tries to refill a number of pages into the ptr_ring cache holding ptr_ring 721 * producer lock. If the ptr_ring is full, page_pool_put_page_bulk() 722 * will release leftover pages to the page allocator. 723 * page_pool_put_page_bulk() is suitable to be run inside the driver NAPI tx 724 * completion loop for the XDP_REDIRECT use case. 725 * 726 * Please note the caller must not use data area after running 727 * page_pool_put_page_bulk(), as this function overwrites it. 728 */ 729 void page_pool_put_page_bulk(struct page_pool *pool, void **data, 730 int count) 731 { 732 int i, bulk_len = 0; 733 bool in_softirq; 734 735 for (i = 0; i < count; i++) { 736 struct page *page = virt_to_head_page(data[i]); 737 738 /* It is not the last user for the page frag case */ 739 if (!page_pool_is_last_ref(page)) 740 continue; 741 742 page = __page_pool_put_page(pool, page, -1, false); 743 /* Approved for bulk recycling in ptr_ring cache */ 744 if (page) 745 data[bulk_len++] = page; 746 } 747 748 if (unlikely(!bulk_len)) 749 return; 750 751 /* Bulk producer into ptr_ring page_pool cache */ 752 in_softirq = page_pool_producer_lock(pool); 753 for (i = 0; i < bulk_len; i++) { 754 if (__ptr_ring_produce(&pool->ring, data[i])) { 755 /* ring full */ 756 recycle_stat_inc(pool, ring_full); 757 break; 758 } 759 } 760 recycle_stat_add(pool, ring, i); 761 page_pool_producer_unlock(pool, in_softirq); 762 763 /* Hopefully all pages was return into ptr_ring */ 764 if (likely(i == bulk_len)) 765 return; 766 767 /* ptr_ring cache full, free remaining pages outside producer lock 768 * since put_page() with refcnt == 1 can be an expensive operation 769 */ 770 for (; i < bulk_len; i++) 771 page_pool_return_page(pool, data[i]); 772 } 773 EXPORT_SYMBOL(page_pool_put_page_bulk); 774 775 static struct page *page_pool_drain_frag(struct page_pool *pool, 776 struct page *page) 777 { 778 long drain_count = BIAS_MAX - pool->frag_users; 779 780 /* Some user is still using the page frag */ 781 if (likely(page_pool_unref_page(page, drain_count))) 782 return NULL; 783 784 if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) { 785 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) 786 page_pool_dma_sync_for_device(pool, page, -1); 787 788 return page; 789 } 790 791 page_pool_return_page(pool, page); 792 return NULL; 793 } 794 795 static void page_pool_free_frag(struct page_pool *pool) 796 { 797 long drain_count = BIAS_MAX - pool->frag_users; 798 struct page *page = pool->frag_page; 799 800 pool->frag_page = NULL; 801 802 if (!page || page_pool_unref_page(page, drain_count)) 803 return; 804 805 page_pool_return_page(pool, page); 806 } 807 808 struct page *page_pool_alloc_frag(struct page_pool *pool, 809 unsigned int *offset, 810 unsigned int size, gfp_t gfp) 811 { 812 unsigned int max_size = PAGE_SIZE << pool->p.order; 813 struct page *page = pool->frag_page; 814 815 if (WARN_ON(size > max_size)) 816 return NULL; 817 818 size = ALIGN(size, dma_get_cache_alignment()); 819 *offset = pool->frag_offset; 820 821 if (page && *offset + size > max_size) { 822 page = page_pool_drain_frag(pool, page); 823 if (page) { 824 alloc_stat_inc(pool, fast); 825 goto frag_reset; 826 } 827 } 828 829 if (!page) { 830 page = page_pool_alloc_pages(pool, gfp); 831 if (unlikely(!page)) { 832 pool->frag_page = NULL; 833 return NULL; 834 } 835 836 pool->frag_page = page; 837 838 frag_reset: 839 pool->frag_users = 1; 840 *offset = 0; 841 pool->frag_offset = size; 842 page_pool_fragment_page(page, BIAS_MAX); 843 return page; 844 } 845 846 pool->frag_users++; 847 pool->frag_offset = *offset + size; 848 alloc_stat_inc(pool, fast); 849 return page; 850 } 851 EXPORT_SYMBOL(page_pool_alloc_frag); 852 853 static void page_pool_empty_ring(struct page_pool *pool) 854 { 855 struct page *page; 856 857 /* Empty recycle ring */ 858 while ((page = ptr_ring_consume_bh(&pool->ring))) { 859 /* Verify the refcnt invariant of cached pages */ 860 if (!(page_ref_count(page) == 1)) 861 pr_crit("%s() page_pool refcnt %d violation\n", 862 __func__, page_ref_count(page)); 863 864 page_pool_return_page(pool, page); 865 } 866 } 867 868 static void __page_pool_destroy(struct page_pool *pool) 869 { 870 if (pool->disconnect) 871 pool->disconnect(pool); 872 873 page_pool_unlist(pool); 874 page_pool_uninit(pool); 875 kfree(pool); 876 } 877 878 static void page_pool_empty_alloc_cache_once(struct page_pool *pool) 879 { 880 struct page *page; 881 882 if (pool->destroy_cnt) 883 return; 884 885 /* Empty alloc cache, assume caller made sure this is 886 * no-longer in use, and page_pool_alloc_pages() cannot be 887 * call concurrently. 888 */ 889 while (pool->alloc.count) { 890 page = pool->alloc.cache[--pool->alloc.count]; 891 page_pool_return_page(pool, page); 892 } 893 } 894 895 static void page_pool_scrub(struct page_pool *pool) 896 { 897 page_pool_empty_alloc_cache_once(pool); 898 pool->destroy_cnt++; 899 900 /* No more consumers should exist, but producers could still 901 * be in-flight. 902 */ 903 page_pool_empty_ring(pool); 904 } 905 906 static int page_pool_release(struct page_pool *pool) 907 { 908 int inflight; 909 910 page_pool_scrub(pool); 911 inflight = page_pool_inflight(pool, true); 912 if (!inflight) 913 __page_pool_destroy(pool); 914 915 return inflight; 916 } 917 918 static void page_pool_release_retry(struct work_struct *wq) 919 { 920 struct delayed_work *dwq = to_delayed_work(wq); 921 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); 922 void *netdev; 923 int inflight; 924 925 inflight = page_pool_release(pool); 926 if (!inflight) 927 return; 928 929 /* Periodic warning for page pools the user can't see */ 930 netdev = READ_ONCE(pool->slow.netdev); 931 if (time_after_eq(jiffies, pool->defer_warn) && 932 (!netdev || netdev == NET_PTR_POISON)) { 933 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; 934 935 pr_warn("%s() stalled pool shutdown: id %u, %d inflight %d sec\n", 936 __func__, pool->user.id, inflight, sec); 937 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; 938 } 939 940 /* Still not ready to be disconnected, retry later */ 941 schedule_delayed_work(&pool->release_dw, DEFER_TIME); 942 } 943 944 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), 945 struct xdp_mem_info *mem) 946 { 947 refcount_inc(&pool->user_cnt); 948 pool->disconnect = disconnect; 949 pool->xdp_mem_id = mem->id; 950 } 951 952 static void page_pool_disable_direct_recycling(struct page_pool *pool) 953 { 954 /* Disable direct recycling based on pool->cpuid. 955 * Paired with READ_ONCE() in napi_pp_put_page(). 956 */ 957 WRITE_ONCE(pool->cpuid, -1); 958 959 if (!pool->p.napi) 960 return; 961 962 /* To avoid races with recycling and additional barriers make sure 963 * pool and NAPI are unlinked when NAPI is disabled. 964 */ 965 WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) || 966 READ_ONCE(pool->p.napi->list_owner) != -1); 967 968 WRITE_ONCE(pool->p.napi, NULL); 969 } 970 971 void page_pool_destroy(struct page_pool *pool) 972 { 973 if (!pool) 974 return; 975 976 if (!page_pool_put(pool)) 977 return; 978 979 page_pool_disable_direct_recycling(pool); 980 page_pool_free_frag(pool); 981 982 if (!page_pool_release(pool)) 983 return; 984 985 page_pool_detached(pool); 986 pool->defer_start = jiffies; 987 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; 988 989 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); 990 schedule_delayed_work(&pool->release_dw, DEFER_TIME); 991 } 992 EXPORT_SYMBOL(page_pool_destroy); 993 994 /* Caller must provide appropriate safe context, e.g. NAPI. */ 995 void page_pool_update_nid(struct page_pool *pool, int new_nid) 996 { 997 struct page *page; 998 999 trace_page_pool_update_nid(pool, new_nid); 1000 pool->p.nid = new_nid; 1001 1002 /* Flush pool alloc cache, as refill will check NUMA node */ 1003 while (pool->alloc.count) { 1004 page = pool->alloc.cache[--pool->alloc.count]; 1005 page_pool_return_page(pool, page); 1006 } 1007 } 1008 EXPORT_SYMBOL(page_pool_update_nid); 1009