1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * page_pool.c 4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> 5 * Copyright (C) 2016 Red Hat, Inc. 6 */ 7 8 #include <linux/error-injection.h> 9 #include <linux/types.h> 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 14 #include <net/netdev_lock.h> 15 #include <net/netdev_rx_queue.h> 16 #include <net/page_pool/helpers.h> 17 #include <net/page_pool/memory_provider.h> 18 #include <net/xdp.h> 19 20 #include <linux/dma-direction.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/page-flags.h> 23 #include <linux/mm.h> /* for put_page() */ 24 #include <linux/poison.h> 25 #include <linux/ethtool.h> 26 #include <linux/netdevice.h> 27 28 #include <trace/events/page_pool.h> 29 30 #include "dev.h" 31 #include "mp_dmabuf_devmem.h" 32 #include "netmem_priv.h" 33 #include "page_pool_priv.h" 34 35 DEFINE_STATIC_KEY_FALSE(page_pool_mem_providers); 36 37 #define DEFER_TIME (msecs_to_jiffies(1000)) 38 #define DEFER_WARN_INTERVAL (60 * HZ) 39 40 #define BIAS_MAX (LONG_MAX >> 1) 41 42 #ifdef CONFIG_PAGE_POOL_STATS 43 static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats); 44 45 /* alloc_stat_inc is intended to be used in softirq context */ 46 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) 47 /* recycle_stat_inc is safe to use when preemption is possible. */ 48 #define recycle_stat_inc(pool, __stat) \ 49 do { \ 50 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 51 this_cpu_inc(s->__stat); \ 52 } while (0) 53 54 #define recycle_stat_add(pool, __stat, val) \ 55 do { \ 56 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 57 this_cpu_add(s->__stat, val); \ 58 } while (0) 59 60 static const char pp_stats[][ETH_GSTRING_LEN] = { 61 "rx_pp_alloc_fast", 62 "rx_pp_alloc_slow", 63 "rx_pp_alloc_slow_ho", 64 "rx_pp_alloc_empty", 65 "rx_pp_alloc_refill", 66 "rx_pp_alloc_waive", 67 "rx_pp_recycle_cached", 68 "rx_pp_recycle_cache_full", 69 "rx_pp_recycle_ring", 70 "rx_pp_recycle_ring_full", 71 "rx_pp_recycle_released_ref", 72 }; 73 74 /** 75 * page_pool_get_stats() - fetch page pool stats 76 * @pool: pool from which page was allocated 77 * @stats: struct page_pool_stats to fill in 78 * 79 * Retrieve statistics about the page_pool. This API is only available 80 * if the kernel has been configured with ``CONFIG_PAGE_POOL_STATS=y``. 81 * A pointer to a caller allocated struct page_pool_stats structure 82 * is passed to this API which is filled in. The caller can then report 83 * those stats to the user (perhaps via ethtool, debugfs, etc.). 84 */ 85 bool page_pool_get_stats(const struct page_pool *pool, 86 struct page_pool_stats *stats) 87 { 88 int cpu = 0; 89 90 if (!stats) 91 return false; 92 93 /* The caller is responsible to initialize stats. */ 94 stats->alloc_stats.fast += pool->alloc_stats.fast; 95 stats->alloc_stats.slow += pool->alloc_stats.slow; 96 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; 97 stats->alloc_stats.empty += pool->alloc_stats.empty; 98 stats->alloc_stats.refill += pool->alloc_stats.refill; 99 stats->alloc_stats.waive += pool->alloc_stats.waive; 100 101 for_each_possible_cpu(cpu) { 102 const struct page_pool_recycle_stats *pcpu = 103 per_cpu_ptr(pool->recycle_stats, cpu); 104 105 stats->recycle_stats.cached += pcpu->cached; 106 stats->recycle_stats.cache_full += pcpu->cache_full; 107 stats->recycle_stats.ring += pcpu->ring; 108 stats->recycle_stats.ring_full += pcpu->ring_full; 109 stats->recycle_stats.released_refcnt += pcpu->released_refcnt; 110 } 111 112 return true; 113 } 114 EXPORT_SYMBOL(page_pool_get_stats); 115 116 u8 *page_pool_ethtool_stats_get_strings(u8 *data) 117 { 118 int i; 119 120 for (i = 0; i < ARRAY_SIZE(pp_stats); i++) { 121 memcpy(data, pp_stats[i], ETH_GSTRING_LEN); 122 data += ETH_GSTRING_LEN; 123 } 124 125 return data; 126 } 127 EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings); 128 129 int page_pool_ethtool_stats_get_count(void) 130 { 131 return ARRAY_SIZE(pp_stats); 132 } 133 EXPORT_SYMBOL(page_pool_ethtool_stats_get_count); 134 135 u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats) 136 { 137 const struct page_pool_stats *pool_stats = stats; 138 139 *data++ = pool_stats->alloc_stats.fast; 140 *data++ = pool_stats->alloc_stats.slow; 141 *data++ = pool_stats->alloc_stats.slow_high_order; 142 *data++ = pool_stats->alloc_stats.empty; 143 *data++ = pool_stats->alloc_stats.refill; 144 *data++ = pool_stats->alloc_stats.waive; 145 *data++ = pool_stats->recycle_stats.cached; 146 *data++ = pool_stats->recycle_stats.cache_full; 147 *data++ = pool_stats->recycle_stats.ring; 148 *data++ = pool_stats->recycle_stats.ring_full; 149 *data++ = pool_stats->recycle_stats.released_refcnt; 150 151 return data; 152 } 153 EXPORT_SYMBOL(page_pool_ethtool_stats_get); 154 155 #else 156 #define alloc_stat_inc(...) do { } while (0) 157 #define recycle_stat_inc(...) do { } while (0) 158 #define recycle_stat_add(...) do { } while (0) 159 #endif 160 161 static bool page_pool_producer_lock(struct page_pool *pool) 162 __acquires(&pool->ring.producer_lock) 163 { 164 bool in_softirq = in_softirq(); 165 166 if (in_softirq) 167 spin_lock(&pool->ring.producer_lock); 168 else 169 spin_lock_bh(&pool->ring.producer_lock); 170 171 return in_softirq; 172 } 173 174 static void page_pool_producer_unlock(struct page_pool *pool, 175 bool in_softirq) 176 __releases(&pool->ring.producer_lock) 177 { 178 if (in_softirq) 179 spin_unlock(&pool->ring.producer_lock); 180 else 181 spin_unlock_bh(&pool->ring.producer_lock); 182 } 183 184 static void page_pool_struct_check(void) 185 { 186 CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_users); 187 CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_page); 188 CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_offset); 189 CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag, 190 PAGE_POOL_FRAG_GROUP_ALIGN); 191 } 192 193 static int page_pool_init(struct page_pool *pool, 194 const struct page_pool_params *params, 195 int cpuid) 196 { 197 unsigned int ring_qsize = 1024; /* Default */ 198 struct netdev_rx_queue *rxq; 199 int err; 200 201 page_pool_struct_check(); 202 203 memcpy(&pool->p, ¶ms->fast, sizeof(pool->p)); 204 memcpy(&pool->slow, ¶ms->slow, sizeof(pool->slow)); 205 206 pool->cpuid = cpuid; 207 pool->dma_sync_for_cpu = true; 208 209 /* Validate only known flags were used */ 210 if (pool->slow.flags & ~PP_FLAG_ALL) 211 return -EINVAL; 212 213 if (pool->p.pool_size) 214 ring_qsize = min(pool->p.pool_size, 16384); 215 216 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL. 217 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, 218 * which is the XDP_TX use-case. 219 */ 220 if (pool->slow.flags & PP_FLAG_DMA_MAP) { 221 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && 222 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) 223 return -EINVAL; 224 225 pool->dma_map = true; 226 } 227 228 if (pool->slow.flags & PP_FLAG_DMA_SYNC_DEV) { 229 /* In order to request DMA-sync-for-device the page 230 * needs to be mapped 231 */ 232 if (!(pool->slow.flags & PP_FLAG_DMA_MAP)) 233 return -EINVAL; 234 235 if (!pool->p.max_len) 236 return -EINVAL; 237 238 pool->dma_sync = true; 239 240 /* pool->p.offset has to be set according to the address 241 * offset used by the DMA engine to start copying rx data 242 */ 243 } 244 245 pool->has_init_callback = !!pool->slow.init_callback; 246 247 #ifdef CONFIG_PAGE_POOL_STATS 248 if (!(pool->slow.flags & PP_FLAG_SYSTEM_POOL)) { 249 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); 250 if (!pool->recycle_stats) 251 return -ENOMEM; 252 } else { 253 /* For system page pool instance we use a singular stats object 254 * instead of allocating a separate percpu variable for each 255 * (also percpu) page pool instance. 256 */ 257 pool->recycle_stats = &pp_system_recycle_stats; 258 pool->system = true; 259 } 260 #endif 261 262 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) { 263 #ifdef CONFIG_PAGE_POOL_STATS 264 if (!pool->system) 265 free_percpu(pool->recycle_stats); 266 #endif 267 return -ENOMEM; 268 } 269 270 atomic_set(&pool->pages_state_release_cnt, 0); 271 272 /* Driver calling page_pool_create() also call page_pool_destroy() */ 273 refcount_set(&pool->user_cnt, 1); 274 275 xa_init_flags(&pool->dma_mapped, XA_FLAGS_ALLOC1); 276 277 if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) { 278 netdev_assert_locked(pool->slow.netdev); 279 rxq = __netif_get_rx_queue(pool->slow.netdev, 280 pool->slow.queue_idx); 281 pool->mp_priv = rxq->mp_params.mp_priv; 282 pool->mp_ops = rxq->mp_params.mp_ops; 283 } 284 285 if (pool->mp_ops) { 286 if (!pool->dma_map || !pool->dma_sync) { 287 err = -EOPNOTSUPP; 288 goto free_ptr_ring; 289 } 290 291 if (WARN_ON(!is_kernel_rodata((unsigned long)pool->mp_ops))) { 292 err = -EFAULT; 293 goto free_ptr_ring; 294 } 295 296 err = pool->mp_ops->init(pool); 297 if (err) { 298 pr_warn("%s() mem-provider init failed %d\n", __func__, 299 err); 300 goto free_ptr_ring; 301 } 302 303 static_branch_inc(&page_pool_mem_providers); 304 } 305 306 return 0; 307 308 free_ptr_ring: 309 ptr_ring_cleanup(&pool->ring, NULL); 310 #ifdef CONFIG_PAGE_POOL_STATS 311 if (!pool->system) 312 free_percpu(pool->recycle_stats); 313 #endif 314 return err; 315 } 316 317 static void page_pool_uninit(struct page_pool *pool) 318 { 319 ptr_ring_cleanup(&pool->ring, NULL); 320 xa_destroy(&pool->dma_mapped); 321 322 #ifdef CONFIG_PAGE_POOL_STATS 323 if (!pool->system) 324 free_percpu(pool->recycle_stats); 325 #endif 326 } 327 328 /** 329 * page_pool_create_percpu() - create a page pool for a given cpu. 330 * @params: parameters, see struct page_pool_params 331 * @cpuid: cpu identifier 332 */ 333 struct page_pool * 334 page_pool_create_percpu(const struct page_pool_params *params, int cpuid) 335 { 336 struct page_pool *pool; 337 int err; 338 339 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); 340 if (!pool) 341 return ERR_PTR(-ENOMEM); 342 343 err = page_pool_init(pool, params, cpuid); 344 if (err < 0) 345 goto err_free; 346 347 err = page_pool_list(pool); 348 if (err) 349 goto err_uninit; 350 351 return pool; 352 353 err_uninit: 354 page_pool_uninit(pool); 355 err_free: 356 pr_warn("%s() gave up with errno %d\n", __func__, err); 357 kfree(pool); 358 return ERR_PTR(err); 359 } 360 EXPORT_SYMBOL(page_pool_create_percpu); 361 362 /** 363 * page_pool_create() - create a page pool 364 * @params: parameters, see struct page_pool_params 365 */ 366 struct page_pool *page_pool_create(const struct page_pool_params *params) 367 { 368 return page_pool_create_percpu(params, -1); 369 } 370 EXPORT_SYMBOL(page_pool_create); 371 372 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem); 373 374 static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool) 375 { 376 struct ptr_ring *r = &pool->ring; 377 netmem_ref netmem; 378 int pref_nid; /* preferred NUMA node */ 379 380 /* Quicker fallback, avoid locks when ring is empty */ 381 if (__ptr_ring_empty(r)) { 382 alloc_stat_inc(pool, empty); 383 return 0; 384 } 385 386 /* Softirq guarantee CPU and thus NUMA node is stable. This, 387 * assumes CPU refilling driver RX-ring will also run RX-NAPI. 388 */ 389 #ifdef CONFIG_NUMA 390 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid; 391 #else 392 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */ 393 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */ 394 #endif 395 396 /* Refill alloc array, but only if NUMA match */ 397 do { 398 netmem = (__force netmem_ref)__ptr_ring_consume(r); 399 if (unlikely(!netmem)) 400 break; 401 402 if (likely(netmem_is_pref_nid(netmem, pref_nid))) { 403 pool->alloc.cache[pool->alloc.count++] = netmem; 404 } else { 405 /* NUMA mismatch; 406 * (1) release 1 page to page-allocator and 407 * (2) break out to fallthrough to alloc_pages_node. 408 * This limit stress on page buddy alloactor. 409 */ 410 page_pool_return_netmem(pool, netmem); 411 alloc_stat_inc(pool, waive); 412 netmem = 0; 413 break; 414 } 415 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); 416 417 /* Return last page */ 418 if (likely(pool->alloc.count > 0)) { 419 netmem = pool->alloc.cache[--pool->alloc.count]; 420 alloc_stat_inc(pool, refill); 421 } 422 423 return netmem; 424 } 425 426 /* fast path */ 427 static netmem_ref __page_pool_get_cached(struct page_pool *pool) 428 { 429 netmem_ref netmem; 430 431 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */ 432 if (likely(pool->alloc.count)) { 433 /* Fast-path */ 434 netmem = pool->alloc.cache[--pool->alloc.count]; 435 alloc_stat_inc(pool, fast); 436 } else { 437 netmem = page_pool_refill_alloc_cache(pool); 438 } 439 440 return netmem; 441 } 442 443 static void __page_pool_dma_sync_for_device(const struct page_pool *pool, 444 netmem_ref netmem, 445 u32 dma_sync_size) 446 { 447 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC) 448 dma_addr_t dma_addr = page_pool_get_dma_addr_netmem(netmem); 449 450 dma_sync_size = min(dma_sync_size, pool->p.max_len); 451 __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset, 452 dma_sync_size, pool->p.dma_dir); 453 #endif 454 } 455 456 static __always_inline void 457 page_pool_dma_sync_for_device(const struct page_pool *pool, 458 netmem_ref netmem, 459 u32 dma_sync_size) 460 { 461 if (pool->dma_sync && dma_dev_need_sync(pool->p.dev)) { 462 rcu_read_lock(); 463 /* re-check under rcu_read_lock() to sync with page_pool_scrub() */ 464 if (pool->dma_sync) 465 __page_pool_dma_sync_for_device(pool, netmem, 466 dma_sync_size); 467 rcu_read_unlock(); 468 } 469 } 470 471 static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp) 472 { 473 dma_addr_t dma; 474 int err; 475 u32 id; 476 477 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr 478 * since dma_addr_t can be either 32 or 64 bits and does not always fit 479 * into page private data (i.e 32bit cpu with 64bit DMA caps) 480 * This mapping is kept for lifetime of page, until leaving pool. 481 */ 482 dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0, 483 (PAGE_SIZE << pool->p.order), pool->p.dma_dir, 484 DMA_ATTR_SKIP_CPU_SYNC | 485 DMA_ATTR_WEAK_ORDERING); 486 if (dma_mapping_error(pool->p.dev, dma)) 487 return false; 488 489 if (page_pool_set_dma_addr_netmem(netmem, dma)) { 490 WARN_ONCE(1, "unexpected DMA address, please report to netdev@"); 491 goto unmap_failed; 492 } 493 494 if (in_softirq()) 495 err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem), 496 PP_DMA_INDEX_LIMIT, gfp); 497 else 498 err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem), 499 PP_DMA_INDEX_LIMIT, gfp); 500 if (err) { 501 WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@"); 502 goto unset_failed; 503 } 504 505 netmem_set_dma_index(netmem, id); 506 page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len); 507 508 return true; 509 510 unset_failed: 511 page_pool_set_dma_addr_netmem(netmem, 0); 512 unmap_failed: 513 dma_unmap_page_attrs(pool->p.dev, dma, 514 PAGE_SIZE << pool->p.order, pool->p.dma_dir, 515 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); 516 return false; 517 } 518 519 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, 520 gfp_t gfp) 521 { 522 struct page *page; 523 524 gfp |= __GFP_COMP; 525 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); 526 if (unlikely(!page)) 527 return NULL; 528 529 if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page), gfp))) { 530 put_page(page); 531 return NULL; 532 } 533 534 alloc_stat_inc(pool, slow_high_order); 535 page_pool_set_pp_info(pool, page_to_netmem(page)); 536 537 /* Track how many pages are held 'in-flight' */ 538 pool->pages_state_hold_cnt++; 539 trace_page_pool_state_hold(pool, page_to_netmem(page), 540 pool->pages_state_hold_cnt); 541 return page; 542 } 543 544 /* slow path */ 545 static noinline netmem_ref __page_pool_alloc_netmems_slow(struct page_pool *pool, 546 gfp_t gfp) 547 { 548 const int bulk = PP_ALLOC_CACHE_REFILL; 549 unsigned int pp_order = pool->p.order; 550 bool dma_map = pool->dma_map; 551 netmem_ref netmem; 552 int i, nr_pages; 553 554 /* Unconditionally set NOWARN if allocating from NAPI. 555 * Drivers forget to set it, and OOM reports on packet Rx are useless. 556 */ 557 if ((gfp & GFP_ATOMIC) == GFP_ATOMIC) 558 gfp |= __GFP_NOWARN; 559 560 /* Don't support bulk alloc for high-order pages */ 561 if (unlikely(pp_order)) 562 return page_to_netmem(__page_pool_alloc_page_order(pool, gfp)); 563 564 /* Unnecessary as alloc cache is empty, but guarantees zero count */ 565 if (unlikely(pool->alloc.count > 0)) 566 return pool->alloc.cache[--pool->alloc.count]; 567 568 /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk */ 569 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); 570 571 nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk, 572 (struct page **)pool->alloc.cache); 573 if (unlikely(!nr_pages)) 574 return 0; 575 576 /* Pages have been filled into alloc.cache array, but count is zero and 577 * page element have not been (possibly) DMA mapped. 578 */ 579 for (i = 0; i < nr_pages; i++) { 580 netmem = pool->alloc.cache[i]; 581 if (dma_map && unlikely(!page_pool_dma_map(pool, netmem, gfp))) { 582 put_page(netmem_to_page(netmem)); 583 continue; 584 } 585 586 page_pool_set_pp_info(pool, netmem); 587 pool->alloc.cache[pool->alloc.count++] = netmem; 588 /* Track how many pages are held 'in-flight' */ 589 pool->pages_state_hold_cnt++; 590 trace_page_pool_state_hold(pool, netmem, 591 pool->pages_state_hold_cnt); 592 } 593 594 /* Return last page */ 595 if (likely(pool->alloc.count > 0)) { 596 netmem = pool->alloc.cache[--pool->alloc.count]; 597 alloc_stat_inc(pool, slow); 598 } else { 599 netmem = 0; 600 } 601 602 /* When page just alloc'ed is should/must have refcnt 1. */ 603 return netmem; 604 } 605 606 /* For using page_pool replace: alloc_pages() API calls, but provide 607 * synchronization guarantee for allocation side. 608 */ 609 netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp) 610 { 611 netmem_ref netmem; 612 613 /* Fast-path: Get a page from cache */ 614 netmem = __page_pool_get_cached(pool); 615 if (netmem) 616 return netmem; 617 618 /* Slow-path: cache empty, do real allocation */ 619 if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops) 620 netmem = pool->mp_ops->alloc_netmems(pool, gfp); 621 else 622 netmem = __page_pool_alloc_netmems_slow(pool, gfp); 623 return netmem; 624 } 625 EXPORT_SYMBOL(page_pool_alloc_netmems); 626 ALLOW_ERROR_INJECTION(page_pool_alloc_netmems, NULL); 627 628 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) 629 { 630 return netmem_to_page(page_pool_alloc_netmems(pool, gfp)); 631 } 632 EXPORT_SYMBOL(page_pool_alloc_pages); 633 634 /* Calculate distance between two u32 values, valid if distance is below 2^(31) 635 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution 636 */ 637 #define _distance(a, b) (s32)((a) - (b)) 638 639 s32 page_pool_inflight(const struct page_pool *pool, bool strict) 640 { 641 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); 642 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); 643 s32 inflight; 644 645 inflight = _distance(hold_cnt, release_cnt); 646 647 if (strict) { 648 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); 649 WARN(inflight < 0, "Negative(%d) inflight packet-pages", 650 inflight); 651 } else { 652 inflight = max(0, inflight); 653 } 654 655 return inflight; 656 } 657 658 void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem) 659 { 660 netmem_set_pp(netmem, pool); 661 netmem_or_pp_magic(netmem, PP_SIGNATURE); 662 663 /* Ensuring all pages have been split into one fragment initially: 664 * page_pool_set_pp_info() is only called once for every page when it 665 * is allocated from the page allocator and page_pool_fragment_page() 666 * is dirtying the same cache line as the page->pp_magic above, so 667 * the overhead is negligible. 668 */ 669 page_pool_fragment_netmem(netmem, 1); 670 if (pool->has_init_callback) 671 pool->slow.init_callback(netmem, pool->slow.init_arg); 672 } 673 674 void page_pool_clear_pp_info(netmem_ref netmem) 675 { 676 netmem_clear_pp_magic(netmem); 677 netmem_set_pp(netmem, NULL); 678 } 679 680 static __always_inline void __page_pool_release_netmem_dma(struct page_pool *pool, 681 netmem_ref netmem) 682 { 683 struct page *old, *page = netmem_to_page(netmem); 684 unsigned long id; 685 dma_addr_t dma; 686 687 if (!pool->dma_map) 688 /* Always account for inflight pages, even if we didn't 689 * map them 690 */ 691 return; 692 693 id = netmem_get_dma_index(netmem); 694 if (!id) 695 return; 696 697 if (in_softirq()) 698 old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0); 699 else 700 old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0); 701 if (old != page) 702 return; 703 704 dma = page_pool_get_dma_addr_netmem(netmem); 705 706 /* When page is unmapped, it cannot be returned to our pool */ 707 dma_unmap_page_attrs(pool->p.dev, dma, 708 PAGE_SIZE << pool->p.order, pool->p.dma_dir, 709 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); 710 page_pool_set_dma_addr_netmem(netmem, 0); 711 netmem_set_dma_index(netmem, 0); 712 } 713 714 /* Disconnects a page (from a page_pool). API users can have a need 715 * to disconnect a page (from a page_pool), to allow it to be used as 716 * a regular page (that will eventually be returned to the normal 717 * page-allocator via put_page). 718 */ 719 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem) 720 { 721 int count; 722 bool put; 723 724 put = true; 725 if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops) 726 put = pool->mp_ops->release_netmem(pool, netmem); 727 else 728 __page_pool_release_netmem_dma(pool, netmem); 729 730 /* This may be the last page returned, releasing the pool, so 731 * it is not safe to reference pool afterwards. 732 */ 733 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); 734 trace_page_pool_state_release(pool, netmem, count); 735 736 if (put) { 737 page_pool_clear_pp_info(netmem); 738 put_page(netmem_to_page(netmem)); 739 } 740 /* An optimization would be to call __free_pages(page, pool->p.order) 741 * knowing page is not part of page-cache (thus avoiding a 742 * __page_cache_release() call). 743 */ 744 } 745 746 static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem) 747 { 748 bool in_softirq, ret; 749 750 /* BH protection not needed if current is softirq */ 751 in_softirq = page_pool_producer_lock(pool); 752 ret = !__ptr_ring_produce(&pool->ring, (__force void *)netmem); 753 if (ret) 754 recycle_stat_inc(pool, ring); 755 page_pool_producer_unlock(pool, in_softirq); 756 757 return ret; 758 } 759 760 /* Only allow direct recycling in special circumstances, into the 761 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case. 762 * 763 * Caller must provide appropriate safe context. 764 */ 765 static bool page_pool_recycle_in_cache(netmem_ref netmem, 766 struct page_pool *pool) 767 { 768 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { 769 recycle_stat_inc(pool, cache_full); 770 return false; 771 } 772 773 /* Caller MUST have verified/know (page_ref_count(page) == 1) */ 774 pool->alloc.cache[pool->alloc.count++] = netmem; 775 recycle_stat_inc(pool, cached); 776 return true; 777 } 778 779 static bool __page_pool_page_can_be_recycled(netmem_ref netmem) 780 { 781 return netmem_is_net_iov(netmem) || 782 (page_ref_count(netmem_to_page(netmem)) == 1 && 783 !page_is_pfmemalloc(netmem_to_page(netmem))); 784 } 785 786 /* If the page refcnt == 1, this will try to recycle the page. 787 * If pool->dma_sync is set, we'll try to sync the DMA area for 788 * the configured size min(dma_sync_size, pool->max_len). 789 * If the page refcnt != 1, then the page will be returned to memory 790 * subsystem. 791 */ 792 static __always_inline netmem_ref 793 __page_pool_put_page(struct page_pool *pool, netmem_ref netmem, 794 unsigned int dma_sync_size, bool allow_direct) 795 { 796 lockdep_assert_no_hardirq(); 797 798 /* This allocator is optimized for the XDP mode that uses 799 * one-frame-per-page, but have fallbacks that act like the 800 * regular page allocator APIs. 801 * 802 * refcnt == 1 means page_pool owns page, and can recycle it. 803 * 804 * page is NOT reusable when allocated when system is under 805 * some pressure. (page_is_pfmemalloc) 806 */ 807 if (likely(__page_pool_page_can_be_recycled(netmem))) { 808 /* Read barrier done in page_ref_count / READ_ONCE */ 809 810 page_pool_dma_sync_for_device(pool, netmem, dma_sync_size); 811 812 if (allow_direct && page_pool_recycle_in_cache(netmem, pool)) 813 return 0; 814 815 /* Page found as candidate for recycling */ 816 return netmem; 817 } 818 819 /* Fallback/non-XDP mode: API user have elevated refcnt. 820 * 821 * Many drivers split up the page into fragments, and some 822 * want to keep doing this to save memory and do refcnt based 823 * recycling. Support this use case too, to ease drivers 824 * switching between XDP/non-XDP. 825 * 826 * In-case page_pool maintains the DMA mapping, API user must 827 * call page_pool_put_page once. In this elevated refcnt 828 * case, the DMA is unmapped/released, as driver is likely 829 * doing refcnt based recycle tricks, meaning another process 830 * will be invoking put_page. 831 */ 832 recycle_stat_inc(pool, released_refcnt); 833 page_pool_return_netmem(pool, netmem); 834 835 return 0; 836 } 837 838 static bool page_pool_napi_local(const struct page_pool *pool) 839 { 840 const struct napi_struct *napi; 841 u32 cpuid; 842 843 /* On PREEMPT_RT the softirq can be preempted by the consumer */ 844 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 845 return false; 846 847 if (unlikely(!in_softirq())) 848 return false; 849 850 /* Allow direct recycle if we have reasons to believe that we are 851 * in the same context as the consumer would run, so there's 852 * no possible race. 853 * __page_pool_put_page() makes sure we're not in hardirq context 854 * and interrupts are enabled prior to accessing the cache. 855 */ 856 cpuid = smp_processor_id(); 857 if (READ_ONCE(pool->cpuid) == cpuid) 858 return true; 859 860 napi = READ_ONCE(pool->p.napi); 861 862 return napi && READ_ONCE(napi->list_owner) == cpuid; 863 } 864 865 void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem, 866 unsigned int dma_sync_size, bool allow_direct) 867 { 868 if (!allow_direct) 869 allow_direct = page_pool_napi_local(pool); 870 871 netmem = __page_pool_put_page(pool, netmem, dma_sync_size, 872 allow_direct); 873 if (netmem && !page_pool_recycle_in_ring(pool, netmem)) { 874 /* Cache full, fallback to free pages */ 875 recycle_stat_inc(pool, ring_full); 876 page_pool_return_netmem(pool, netmem); 877 } 878 } 879 EXPORT_SYMBOL(page_pool_put_unrefed_netmem); 880 881 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, 882 unsigned int dma_sync_size, bool allow_direct) 883 { 884 page_pool_put_unrefed_netmem(pool, page_to_netmem(page), dma_sync_size, 885 allow_direct); 886 } 887 EXPORT_SYMBOL(page_pool_put_unrefed_page); 888 889 static void page_pool_recycle_ring_bulk(struct page_pool *pool, 890 netmem_ref *bulk, 891 u32 bulk_len) 892 { 893 bool in_softirq; 894 u32 i; 895 896 /* Bulk produce into ptr_ring page_pool cache */ 897 in_softirq = page_pool_producer_lock(pool); 898 899 for (i = 0; i < bulk_len; i++) { 900 if (__ptr_ring_produce(&pool->ring, (__force void *)bulk[i])) { 901 /* ring full */ 902 recycle_stat_inc(pool, ring_full); 903 break; 904 } 905 } 906 907 page_pool_producer_unlock(pool, in_softirq); 908 recycle_stat_add(pool, ring, i); 909 910 /* Hopefully all pages were returned into ptr_ring */ 911 if (likely(i == bulk_len)) 912 return; 913 914 /* 915 * ptr_ring cache is full, free remaining pages outside producer lock 916 * since put_page() with refcnt == 1 can be an expensive operation. 917 */ 918 for (; i < bulk_len; i++) 919 page_pool_return_netmem(pool, bulk[i]); 920 } 921 922 /** 923 * page_pool_put_netmem_bulk() - release references on multiple netmems 924 * @data: array holding netmem references 925 * @count: number of entries in @data 926 * 927 * Tries to refill a number of netmems into the ptr_ring cache holding ptr_ring 928 * producer lock. If the ptr_ring is full, page_pool_put_netmem_bulk() 929 * will release leftover netmems to the memory provider. 930 * page_pool_put_netmem_bulk() is suitable to be run inside the driver NAPI tx 931 * completion loop for the XDP_REDIRECT use case. 932 * 933 * Please note the caller must not use data area after running 934 * page_pool_put_netmem_bulk(), as this function overwrites it. 935 */ 936 void page_pool_put_netmem_bulk(netmem_ref *data, u32 count) 937 { 938 u32 bulk_len = 0; 939 940 for (u32 i = 0; i < count; i++) { 941 netmem_ref netmem = netmem_compound_head(data[i]); 942 943 if (page_pool_unref_and_test(netmem)) 944 data[bulk_len++] = netmem; 945 } 946 947 count = bulk_len; 948 while (count) { 949 netmem_ref bulk[XDP_BULK_QUEUE_SIZE]; 950 struct page_pool *pool = NULL; 951 bool allow_direct; 952 u32 foreign = 0; 953 954 bulk_len = 0; 955 956 for (u32 i = 0; i < count; i++) { 957 struct page_pool *netmem_pp; 958 netmem_ref netmem = data[i]; 959 960 netmem_pp = netmem_get_pp(netmem); 961 if (unlikely(!pool)) { 962 pool = netmem_pp; 963 allow_direct = page_pool_napi_local(pool); 964 } else if (netmem_pp != pool) { 965 /* 966 * If the netmem belongs to a different 967 * page_pool, save it for another round. 968 */ 969 data[foreign++] = netmem; 970 continue; 971 } 972 973 netmem = __page_pool_put_page(pool, netmem, -1, 974 allow_direct); 975 /* Approved for bulk recycling in ptr_ring cache */ 976 if (netmem) 977 bulk[bulk_len++] = netmem; 978 } 979 980 if (bulk_len) 981 page_pool_recycle_ring_bulk(pool, bulk, bulk_len); 982 983 count = foreign; 984 } 985 } 986 EXPORT_SYMBOL(page_pool_put_netmem_bulk); 987 988 static netmem_ref page_pool_drain_frag(struct page_pool *pool, 989 netmem_ref netmem) 990 { 991 long drain_count = BIAS_MAX - pool->frag_users; 992 993 /* Some user is still using the page frag */ 994 if (likely(page_pool_unref_netmem(netmem, drain_count))) 995 return 0; 996 997 if (__page_pool_page_can_be_recycled(netmem)) { 998 page_pool_dma_sync_for_device(pool, netmem, -1); 999 return netmem; 1000 } 1001 1002 page_pool_return_netmem(pool, netmem); 1003 return 0; 1004 } 1005 1006 static void page_pool_free_frag(struct page_pool *pool) 1007 { 1008 long drain_count = BIAS_MAX - pool->frag_users; 1009 netmem_ref netmem = pool->frag_page; 1010 1011 pool->frag_page = 0; 1012 1013 if (!netmem || page_pool_unref_netmem(netmem, drain_count)) 1014 return; 1015 1016 page_pool_return_netmem(pool, netmem); 1017 } 1018 1019 netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool, 1020 unsigned int *offset, unsigned int size, 1021 gfp_t gfp) 1022 { 1023 unsigned int max_size = PAGE_SIZE << pool->p.order; 1024 netmem_ref netmem = pool->frag_page; 1025 1026 if (WARN_ON(size > max_size)) 1027 return 0; 1028 1029 size = ALIGN(size, dma_get_cache_alignment()); 1030 *offset = pool->frag_offset; 1031 1032 if (netmem && *offset + size > max_size) { 1033 netmem = page_pool_drain_frag(pool, netmem); 1034 if (netmem) { 1035 recycle_stat_inc(pool, cached); 1036 alloc_stat_inc(pool, fast); 1037 goto frag_reset; 1038 } 1039 } 1040 1041 if (!netmem) { 1042 netmem = page_pool_alloc_netmems(pool, gfp); 1043 if (unlikely(!netmem)) { 1044 pool->frag_page = 0; 1045 return 0; 1046 } 1047 1048 pool->frag_page = netmem; 1049 1050 frag_reset: 1051 pool->frag_users = 1; 1052 *offset = 0; 1053 pool->frag_offset = size; 1054 page_pool_fragment_netmem(netmem, BIAS_MAX); 1055 return netmem; 1056 } 1057 1058 pool->frag_users++; 1059 pool->frag_offset = *offset + size; 1060 return netmem; 1061 } 1062 EXPORT_SYMBOL(page_pool_alloc_frag_netmem); 1063 1064 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, 1065 unsigned int size, gfp_t gfp) 1066 { 1067 return netmem_to_page(page_pool_alloc_frag_netmem(pool, offset, size, 1068 gfp)); 1069 } 1070 EXPORT_SYMBOL(page_pool_alloc_frag); 1071 1072 static void page_pool_empty_ring(struct page_pool *pool) 1073 { 1074 netmem_ref netmem; 1075 1076 /* Empty recycle ring */ 1077 while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) { 1078 /* Verify the refcnt invariant of cached pages */ 1079 if (!(netmem_ref_count(netmem) == 1)) 1080 pr_crit("%s() page_pool refcnt %d violation\n", 1081 __func__, netmem_ref_count(netmem)); 1082 1083 page_pool_return_netmem(pool, netmem); 1084 } 1085 } 1086 1087 static void __page_pool_destroy(struct page_pool *pool) 1088 { 1089 if (pool->disconnect) 1090 pool->disconnect(pool); 1091 1092 page_pool_unlist(pool); 1093 page_pool_uninit(pool); 1094 1095 if (pool->mp_ops) { 1096 pool->mp_ops->destroy(pool); 1097 static_branch_dec(&page_pool_mem_providers); 1098 } 1099 1100 kfree(pool); 1101 } 1102 1103 static void page_pool_empty_alloc_cache_once(struct page_pool *pool) 1104 { 1105 netmem_ref netmem; 1106 1107 if (pool->destroy_cnt) 1108 return; 1109 1110 /* Empty alloc cache, assume caller made sure this is 1111 * no-longer in use, and page_pool_alloc_pages() cannot be 1112 * call concurrently. 1113 */ 1114 while (pool->alloc.count) { 1115 netmem = pool->alloc.cache[--pool->alloc.count]; 1116 page_pool_return_netmem(pool, netmem); 1117 } 1118 } 1119 1120 static void page_pool_scrub(struct page_pool *pool) 1121 { 1122 unsigned long id; 1123 void *ptr; 1124 1125 page_pool_empty_alloc_cache_once(pool); 1126 if (!pool->destroy_cnt++ && pool->dma_map) { 1127 if (pool->dma_sync) { 1128 /* Disable page_pool_dma_sync_for_device() */ 1129 pool->dma_sync = false; 1130 1131 /* Make sure all concurrent returns that may see the old 1132 * value of dma_sync (and thus perform a sync) have 1133 * finished before doing the unmapping below. Skip the 1134 * wait if the device doesn't actually need syncing, or 1135 * if there are no outstanding mapped pages. 1136 */ 1137 if (dma_dev_need_sync(pool->p.dev) && 1138 !xa_empty(&pool->dma_mapped)) 1139 synchronize_net(); 1140 } 1141 1142 xa_for_each(&pool->dma_mapped, id, ptr) 1143 __page_pool_release_netmem_dma(pool, page_to_netmem((struct page *)ptr)); 1144 } 1145 1146 /* No more consumers should exist, but producers could still 1147 * be in-flight. 1148 */ 1149 page_pool_empty_ring(pool); 1150 } 1151 1152 static int page_pool_release(struct page_pool *pool) 1153 { 1154 bool in_softirq; 1155 int inflight; 1156 1157 page_pool_scrub(pool); 1158 inflight = page_pool_inflight(pool, true); 1159 /* Acquire producer lock to make sure producers have exited. */ 1160 in_softirq = page_pool_producer_lock(pool); 1161 page_pool_producer_unlock(pool, in_softirq); 1162 if (!inflight) 1163 __page_pool_destroy(pool); 1164 1165 return inflight; 1166 } 1167 1168 static void page_pool_release_retry(struct work_struct *wq) 1169 { 1170 struct delayed_work *dwq = to_delayed_work(wq); 1171 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); 1172 void *netdev; 1173 int inflight; 1174 1175 inflight = page_pool_release(pool); 1176 /* In rare cases, a driver bug may cause inflight to go negative. 1177 * Don't reschedule release if inflight is 0 or negative. 1178 * - If 0, the page_pool has been destroyed 1179 * - if negative, we will never recover 1180 * in both cases no reschedule is necessary. 1181 */ 1182 if (inflight <= 0) 1183 return; 1184 1185 /* Periodic warning for page pools the user can't see */ 1186 netdev = READ_ONCE(pool->slow.netdev); 1187 if (time_after_eq(jiffies, pool->defer_warn) && 1188 (!netdev || netdev == NET_PTR_POISON)) { 1189 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; 1190 1191 pr_warn("%s() stalled pool shutdown: id %u, %d inflight %d sec\n", 1192 __func__, pool->user.id, inflight, sec); 1193 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; 1194 } 1195 1196 /* Still not ready to be disconnected, retry later */ 1197 schedule_delayed_work(&pool->release_dw, DEFER_TIME); 1198 } 1199 1200 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), 1201 const struct xdp_mem_info *mem) 1202 { 1203 refcount_inc(&pool->user_cnt); 1204 pool->disconnect = disconnect; 1205 pool->xdp_mem_id = mem->id; 1206 } 1207 1208 /** 1209 * page_pool_enable_direct_recycling() - mark page pool as owned by NAPI 1210 * @pool: page pool to modify 1211 * @napi: NAPI instance to associate the page pool with 1212 * 1213 * Associate a page pool with a NAPI instance for lockless page recycling. 1214 * This is useful when a new page pool has to be added to a NAPI instance 1215 * without disabling that NAPI instance, to mark the point at which control 1216 * path "hands over" the page pool to the NAPI instance. In most cases driver 1217 * can simply set the @napi field in struct page_pool_params, and does not 1218 * have to call this helper. 1219 * 1220 * The function is idempotent, but does not implement any refcounting. 1221 * Single page_pool_disable_direct_recycling() will disable recycling, 1222 * no matter how many times enable was called. 1223 */ 1224 void page_pool_enable_direct_recycling(struct page_pool *pool, 1225 struct napi_struct *napi) 1226 { 1227 if (READ_ONCE(pool->p.napi) == napi) 1228 return; 1229 WARN_ON(!napi || pool->p.napi); 1230 1231 mutex_lock(&page_pools_lock); 1232 WRITE_ONCE(pool->p.napi, napi); 1233 mutex_unlock(&page_pools_lock); 1234 } 1235 EXPORT_SYMBOL(page_pool_enable_direct_recycling); 1236 1237 void page_pool_disable_direct_recycling(struct page_pool *pool) 1238 { 1239 /* Disable direct recycling based on pool->cpuid. 1240 * Paired with READ_ONCE() in page_pool_napi_local(). 1241 */ 1242 WRITE_ONCE(pool->cpuid, -1); 1243 1244 if (!pool->p.napi) 1245 return; 1246 1247 napi_assert_will_not_race(pool->p.napi); 1248 1249 mutex_lock(&page_pools_lock); 1250 WRITE_ONCE(pool->p.napi, NULL); 1251 mutex_unlock(&page_pools_lock); 1252 } 1253 EXPORT_SYMBOL(page_pool_disable_direct_recycling); 1254 1255 void page_pool_destroy(struct page_pool *pool) 1256 { 1257 if (!pool) 1258 return; 1259 1260 if (!page_pool_put(pool)) 1261 return; 1262 1263 page_pool_disable_direct_recycling(pool); 1264 page_pool_free_frag(pool); 1265 1266 if (!page_pool_release(pool)) 1267 return; 1268 1269 page_pool_detached(pool); 1270 pool->defer_start = jiffies; 1271 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; 1272 1273 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); 1274 schedule_delayed_work(&pool->release_dw, DEFER_TIME); 1275 } 1276 EXPORT_SYMBOL(page_pool_destroy); 1277 1278 /* Caller must provide appropriate safe context, e.g. NAPI. */ 1279 void page_pool_update_nid(struct page_pool *pool, int new_nid) 1280 { 1281 netmem_ref netmem; 1282 1283 trace_page_pool_update_nid(pool, new_nid); 1284 pool->p.nid = new_nid; 1285 1286 /* Flush pool alloc cache, as refill will check NUMA node */ 1287 while (pool->alloc.count) { 1288 netmem = pool->alloc.cache[--pool->alloc.count]; 1289 page_pool_return_netmem(pool, netmem); 1290 } 1291 } 1292 EXPORT_SYMBOL(page_pool_update_nid); 1293 1294 bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr) 1295 { 1296 return page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), addr); 1297 } 1298 1299 /* Associate a niov with a page pool. Should follow with a matching 1300 * net_mp_niov_clear_page_pool() 1301 */ 1302 void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov) 1303 { 1304 netmem_ref netmem = net_iov_to_netmem(niov); 1305 1306 page_pool_set_pp_info(pool, netmem); 1307 1308 pool->pages_state_hold_cnt++; 1309 trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt); 1310 } 1311 1312 /* Disassociate a niov from a page pool. Should only be used in the 1313 * ->release_netmem() path. 1314 */ 1315 void net_mp_niov_clear_page_pool(struct net_iov *niov) 1316 { 1317 netmem_ref netmem = net_iov_to_netmem(niov); 1318 1319 page_pool_clear_pp_info(netmem); 1320 } 1321