1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * page_pool.c 4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> 5 * Copyright (C) 2016 Red Hat, Inc. 6 */ 7 8 #include <linux/error-injection.h> 9 #include <linux/types.h> 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 14 #include <net/netdev_lock.h> 15 #include <net/netdev_rx_queue.h> 16 #include <net/page_pool/helpers.h> 17 #include <net/page_pool/memory_provider.h> 18 #include <net/xdp.h> 19 20 #include <linux/dma-direction.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/page-flags.h> 23 #include <linux/mm.h> /* for put_page() */ 24 #include <linux/poison.h> 25 #include <linux/ethtool.h> 26 #include <linux/netdevice.h> 27 28 #include <trace/events/page_pool.h> 29 30 #include "dev.h" 31 #include "mp_dmabuf_devmem.h" 32 #include "netmem_priv.h" 33 #include "page_pool_priv.h" 34 35 DEFINE_STATIC_KEY_FALSE(page_pool_mem_providers); 36 37 #define DEFER_TIME (msecs_to_jiffies(1000)) 38 #define DEFER_WARN_INTERVAL (60 * HZ) 39 40 #define BIAS_MAX (LONG_MAX >> 1) 41 42 #ifdef CONFIG_PAGE_POOL_STATS 43 static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats); 44 45 /* alloc_stat_inc is intended to be used in softirq context */ 46 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) 47 /* recycle_stat_inc is safe to use when preemption is possible. */ 48 #define recycle_stat_inc(pool, __stat) \ 49 do { \ 50 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 51 this_cpu_inc(s->__stat); \ 52 } while (0) 53 54 #define recycle_stat_add(pool, __stat, val) \ 55 do { \ 56 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 57 this_cpu_add(s->__stat, val); \ 58 } while (0) 59 60 static const char pp_stats[][ETH_GSTRING_LEN] = { 61 "rx_pp_alloc_fast", 62 "rx_pp_alloc_slow", 63 "rx_pp_alloc_slow_ho", 64 "rx_pp_alloc_empty", 65 "rx_pp_alloc_refill", 66 "rx_pp_alloc_waive", 67 "rx_pp_recycle_cached", 68 "rx_pp_recycle_cache_full", 69 "rx_pp_recycle_ring", 70 "rx_pp_recycle_ring_full", 71 "rx_pp_recycle_released_ref", 72 }; 73 74 /** 75 * page_pool_get_stats() - fetch page pool stats 76 * @pool: pool from which page was allocated 77 * @stats: struct page_pool_stats to fill in 78 * 79 * Retrieve statistics about the page_pool. This API is only available 80 * if the kernel has been configured with ``CONFIG_PAGE_POOL_STATS=y``. 81 * A pointer to a caller allocated struct page_pool_stats structure 82 * is passed to this API which is filled in. The caller can then report 83 * those stats to the user (perhaps via ethtool, debugfs, etc.). 84 */ 85 bool page_pool_get_stats(const struct page_pool *pool, 86 struct page_pool_stats *stats) 87 { 88 int cpu = 0; 89 90 if (!stats) 91 return false; 92 93 /* The caller is responsible to initialize stats. */ 94 stats->alloc_stats.fast += pool->alloc_stats.fast; 95 stats->alloc_stats.slow += pool->alloc_stats.slow; 96 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; 97 stats->alloc_stats.empty += pool->alloc_stats.empty; 98 stats->alloc_stats.refill += pool->alloc_stats.refill; 99 stats->alloc_stats.waive += pool->alloc_stats.waive; 100 101 for_each_possible_cpu(cpu) { 102 const struct page_pool_recycle_stats *pcpu = 103 per_cpu_ptr(pool->recycle_stats, cpu); 104 105 stats->recycle_stats.cached += pcpu->cached; 106 stats->recycle_stats.cache_full += pcpu->cache_full; 107 stats->recycle_stats.ring += pcpu->ring; 108 stats->recycle_stats.ring_full += pcpu->ring_full; 109 stats->recycle_stats.released_refcnt += pcpu->released_refcnt; 110 } 111 112 return true; 113 } 114 EXPORT_SYMBOL(page_pool_get_stats); 115 116 u8 *page_pool_ethtool_stats_get_strings(u8 *data) 117 { 118 int i; 119 120 for (i = 0; i < ARRAY_SIZE(pp_stats); i++) { 121 memcpy(data, pp_stats[i], ETH_GSTRING_LEN); 122 data += ETH_GSTRING_LEN; 123 } 124 125 return data; 126 } 127 EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings); 128 129 int page_pool_ethtool_stats_get_count(void) 130 { 131 return ARRAY_SIZE(pp_stats); 132 } 133 EXPORT_SYMBOL(page_pool_ethtool_stats_get_count); 134 135 u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats) 136 { 137 const struct page_pool_stats *pool_stats = stats; 138 139 *data++ = pool_stats->alloc_stats.fast; 140 *data++ = pool_stats->alloc_stats.slow; 141 *data++ = pool_stats->alloc_stats.slow_high_order; 142 *data++ = pool_stats->alloc_stats.empty; 143 *data++ = pool_stats->alloc_stats.refill; 144 *data++ = pool_stats->alloc_stats.waive; 145 *data++ = pool_stats->recycle_stats.cached; 146 *data++ = pool_stats->recycle_stats.cache_full; 147 *data++ = pool_stats->recycle_stats.ring; 148 *data++ = pool_stats->recycle_stats.ring_full; 149 *data++ = pool_stats->recycle_stats.released_refcnt; 150 151 return data; 152 } 153 EXPORT_SYMBOL(page_pool_ethtool_stats_get); 154 155 #else 156 #define alloc_stat_inc(...) do { } while (0) 157 #define recycle_stat_inc(...) do { } while (0) 158 #define recycle_stat_add(...) do { } while (0) 159 #endif 160 161 static bool page_pool_producer_lock(struct page_pool *pool) 162 __acquires(&pool->ring.producer_lock) 163 { 164 bool in_softirq = in_softirq(); 165 166 if (in_softirq) 167 spin_lock(&pool->ring.producer_lock); 168 else 169 spin_lock_bh(&pool->ring.producer_lock); 170 171 return in_softirq; 172 } 173 174 static void page_pool_producer_unlock(struct page_pool *pool, 175 bool in_softirq) 176 __releases(&pool->ring.producer_lock) 177 { 178 if (in_softirq) 179 spin_unlock(&pool->ring.producer_lock); 180 else 181 spin_unlock_bh(&pool->ring.producer_lock); 182 } 183 184 static void page_pool_struct_check(void) 185 { 186 CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_users); 187 CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_page); 188 CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_offset); 189 CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag, 190 PAGE_POOL_FRAG_GROUP_ALIGN); 191 } 192 193 static int page_pool_init(struct page_pool *pool, 194 const struct page_pool_params *params, 195 int cpuid) 196 { 197 unsigned int ring_qsize = 1024; /* Default */ 198 struct netdev_rx_queue *rxq; 199 int err; 200 201 page_pool_struct_check(); 202 203 memcpy(&pool->p, ¶ms->fast, sizeof(pool->p)); 204 memcpy(&pool->slow, ¶ms->slow, sizeof(pool->slow)); 205 206 pool->cpuid = cpuid; 207 pool->dma_sync_for_cpu = true; 208 209 /* Validate only known flags were used */ 210 if (pool->slow.flags & ~PP_FLAG_ALL) 211 return -EINVAL; 212 213 if (pool->p.pool_size) 214 ring_qsize = pool->p.pool_size; 215 216 /* Sanity limit mem that can be pinned down */ 217 if (ring_qsize > 32768) 218 return -E2BIG; 219 220 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL. 221 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, 222 * which is the XDP_TX use-case. 223 */ 224 if (pool->slow.flags & PP_FLAG_DMA_MAP) { 225 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && 226 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) 227 return -EINVAL; 228 229 pool->dma_map = true; 230 } 231 232 if (pool->slow.flags & PP_FLAG_DMA_SYNC_DEV) { 233 /* In order to request DMA-sync-for-device the page 234 * needs to be mapped 235 */ 236 if (!(pool->slow.flags & PP_FLAG_DMA_MAP)) 237 return -EINVAL; 238 239 if (!pool->p.max_len) 240 return -EINVAL; 241 242 pool->dma_sync = true; 243 244 /* pool->p.offset has to be set according to the address 245 * offset used by the DMA engine to start copying rx data 246 */ 247 } 248 249 pool->has_init_callback = !!pool->slow.init_callback; 250 251 #ifdef CONFIG_PAGE_POOL_STATS 252 if (!(pool->slow.flags & PP_FLAG_SYSTEM_POOL)) { 253 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); 254 if (!pool->recycle_stats) 255 return -ENOMEM; 256 } else { 257 /* For system page pool instance we use a singular stats object 258 * instead of allocating a separate percpu variable for each 259 * (also percpu) page pool instance. 260 */ 261 pool->recycle_stats = &pp_system_recycle_stats; 262 pool->system = true; 263 } 264 #endif 265 266 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) { 267 #ifdef CONFIG_PAGE_POOL_STATS 268 if (!pool->system) 269 free_percpu(pool->recycle_stats); 270 #endif 271 return -ENOMEM; 272 } 273 274 atomic_set(&pool->pages_state_release_cnt, 0); 275 276 /* Driver calling page_pool_create() also call page_pool_destroy() */ 277 refcount_set(&pool->user_cnt, 1); 278 279 xa_init_flags(&pool->dma_mapped, XA_FLAGS_ALLOC1); 280 281 if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) { 282 netdev_assert_locked(pool->slow.netdev); 283 rxq = __netif_get_rx_queue(pool->slow.netdev, 284 pool->slow.queue_idx); 285 pool->mp_priv = rxq->mp_params.mp_priv; 286 pool->mp_ops = rxq->mp_params.mp_ops; 287 } 288 289 if (pool->mp_ops) { 290 if (!pool->dma_map || !pool->dma_sync) { 291 err = -EOPNOTSUPP; 292 goto free_ptr_ring; 293 } 294 295 if (WARN_ON(!is_kernel_rodata((unsigned long)pool->mp_ops))) { 296 err = -EFAULT; 297 goto free_ptr_ring; 298 } 299 300 err = pool->mp_ops->init(pool); 301 if (err) { 302 pr_warn("%s() mem-provider init failed %d\n", __func__, 303 err); 304 goto free_ptr_ring; 305 } 306 307 static_branch_inc(&page_pool_mem_providers); 308 } 309 310 return 0; 311 312 free_ptr_ring: 313 ptr_ring_cleanup(&pool->ring, NULL); 314 #ifdef CONFIG_PAGE_POOL_STATS 315 if (!pool->system) 316 free_percpu(pool->recycle_stats); 317 #endif 318 return err; 319 } 320 321 static void page_pool_uninit(struct page_pool *pool) 322 { 323 ptr_ring_cleanup(&pool->ring, NULL); 324 xa_destroy(&pool->dma_mapped); 325 326 #ifdef CONFIG_PAGE_POOL_STATS 327 if (!pool->system) 328 free_percpu(pool->recycle_stats); 329 #endif 330 } 331 332 /** 333 * page_pool_create_percpu() - create a page pool for a given cpu. 334 * @params: parameters, see struct page_pool_params 335 * @cpuid: cpu identifier 336 */ 337 struct page_pool * 338 page_pool_create_percpu(const struct page_pool_params *params, int cpuid) 339 { 340 struct page_pool *pool; 341 int err; 342 343 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); 344 if (!pool) 345 return ERR_PTR(-ENOMEM); 346 347 err = page_pool_init(pool, params, cpuid); 348 if (err < 0) 349 goto err_free; 350 351 err = page_pool_list(pool); 352 if (err) 353 goto err_uninit; 354 355 return pool; 356 357 err_uninit: 358 page_pool_uninit(pool); 359 err_free: 360 pr_warn("%s() gave up with errno %d\n", __func__, err); 361 kfree(pool); 362 return ERR_PTR(err); 363 } 364 EXPORT_SYMBOL(page_pool_create_percpu); 365 366 /** 367 * page_pool_create() - create a page pool 368 * @params: parameters, see struct page_pool_params 369 */ 370 struct page_pool *page_pool_create(const struct page_pool_params *params) 371 { 372 return page_pool_create_percpu(params, -1); 373 } 374 EXPORT_SYMBOL(page_pool_create); 375 376 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem); 377 378 static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool) 379 { 380 struct ptr_ring *r = &pool->ring; 381 netmem_ref netmem; 382 int pref_nid; /* preferred NUMA node */ 383 384 /* Quicker fallback, avoid locks when ring is empty */ 385 if (__ptr_ring_empty(r)) { 386 alloc_stat_inc(pool, empty); 387 return 0; 388 } 389 390 /* Softirq guarantee CPU and thus NUMA node is stable. This, 391 * assumes CPU refilling driver RX-ring will also run RX-NAPI. 392 */ 393 #ifdef CONFIG_NUMA 394 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid; 395 #else 396 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */ 397 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */ 398 #endif 399 400 /* Refill alloc array, but only if NUMA match */ 401 do { 402 netmem = (__force netmem_ref)__ptr_ring_consume(r); 403 if (unlikely(!netmem)) 404 break; 405 406 if (likely(netmem_is_pref_nid(netmem, pref_nid))) { 407 pool->alloc.cache[pool->alloc.count++] = netmem; 408 } else { 409 /* NUMA mismatch; 410 * (1) release 1 page to page-allocator and 411 * (2) break out to fallthrough to alloc_pages_node. 412 * This limit stress on page buddy alloactor. 413 */ 414 page_pool_return_netmem(pool, netmem); 415 alloc_stat_inc(pool, waive); 416 netmem = 0; 417 break; 418 } 419 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); 420 421 /* Return last page */ 422 if (likely(pool->alloc.count > 0)) { 423 netmem = pool->alloc.cache[--pool->alloc.count]; 424 alloc_stat_inc(pool, refill); 425 } 426 427 return netmem; 428 } 429 430 /* fast path */ 431 static netmem_ref __page_pool_get_cached(struct page_pool *pool) 432 { 433 netmem_ref netmem; 434 435 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */ 436 if (likely(pool->alloc.count)) { 437 /* Fast-path */ 438 netmem = pool->alloc.cache[--pool->alloc.count]; 439 alloc_stat_inc(pool, fast); 440 } else { 441 netmem = page_pool_refill_alloc_cache(pool); 442 } 443 444 return netmem; 445 } 446 447 static void __page_pool_dma_sync_for_device(const struct page_pool *pool, 448 netmem_ref netmem, 449 u32 dma_sync_size) 450 { 451 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC) 452 dma_addr_t dma_addr = page_pool_get_dma_addr_netmem(netmem); 453 454 dma_sync_size = min(dma_sync_size, pool->p.max_len); 455 __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset, 456 dma_sync_size, pool->p.dma_dir); 457 #endif 458 } 459 460 static __always_inline void 461 page_pool_dma_sync_for_device(const struct page_pool *pool, 462 netmem_ref netmem, 463 u32 dma_sync_size) 464 { 465 if (pool->dma_sync && dma_dev_need_sync(pool->p.dev)) { 466 rcu_read_lock(); 467 /* re-check under rcu_read_lock() to sync with page_pool_scrub() */ 468 if (pool->dma_sync) 469 __page_pool_dma_sync_for_device(pool, netmem, 470 dma_sync_size); 471 rcu_read_unlock(); 472 } 473 } 474 475 static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp) 476 { 477 dma_addr_t dma; 478 int err; 479 u32 id; 480 481 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr 482 * since dma_addr_t can be either 32 or 64 bits and does not always fit 483 * into page private data (i.e 32bit cpu with 64bit DMA caps) 484 * This mapping is kept for lifetime of page, until leaving pool. 485 */ 486 dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0, 487 (PAGE_SIZE << pool->p.order), pool->p.dma_dir, 488 DMA_ATTR_SKIP_CPU_SYNC | 489 DMA_ATTR_WEAK_ORDERING); 490 if (dma_mapping_error(pool->p.dev, dma)) 491 return false; 492 493 if (page_pool_set_dma_addr_netmem(netmem, dma)) { 494 WARN_ONCE(1, "unexpected DMA address, please report to netdev@"); 495 goto unmap_failed; 496 } 497 498 if (in_softirq()) 499 err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem), 500 PP_DMA_INDEX_LIMIT, gfp); 501 else 502 err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem), 503 PP_DMA_INDEX_LIMIT, gfp); 504 if (err) { 505 WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@"); 506 goto unset_failed; 507 } 508 509 netmem_set_dma_index(netmem, id); 510 page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len); 511 512 return true; 513 514 unset_failed: 515 page_pool_set_dma_addr_netmem(netmem, 0); 516 unmap_failed: 517 dma_unmap_page_attrs(pool->p.dev, dma, 518 PAGE_SIZE << pool->p.order, pool->p.dma_dir, 519 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); 520 return false; 521 } 522 523 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, 524 gfp_t gfp) 525 { 526 struct page *page; 527 528 gfp |= __GFP_COMP; 529 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); 530 if (unlikely(!page)) 531 return NULL; 532 533 if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page), gfp))) { 534 put_page(page); 535 return NULL; 536 } 537 538 alloc_stat_inc(pool, slow_high_order); 539 page_pool_set_pp_info(pool, page_to_netmem(page)); 540 541 /* Track how many pages are held 'in-flight' */ 542 pool->pages_state_hold_cnt++; 543 trace_page_pool_state_hold(pool, page_to_netmem(page), 544 pool->pages_state_hold_cnt); 545 return page; 546 } 547 548 /* slow path */ 549 static noinline netmem_ref __page_pool_alloc_netmems_slow(struct page_pool *pool, 550 gfp_t gfp) 551 { 552 const int bulk = PP_ALLOC_CACHE_REFILL; 553 unsigned int pp_order = pool->p.order; 554 bool dma_map = pool->dma_map; 555 netmem_ref netmem; 556 int i, nr_pages; 557 558 /* Unconditionally set NOWARN if allocating from NAPI. 559 * Drivers forget to set it, and OOM reports on packet Rx are useless. 560 */ 561 if ((gfp & GFP_ATOMIC) == GFP_ATOMIC) 562 gfp |= __GFP_NOWARN; 563 564 /* Don't support bulk alloc for high-order pages */ 565 if (unlikely(pp_order)) 566 return page_to_netmem(__page_pool_alloc_page_order(pool, gfp)); 567 568 /* Unnecessary as alloc cache is empty, but guarantees zero count */ 569 if (unlikely(pool->alloc.count > 0)) 570 return pool->alloc.cache[--pool->alloc.count]; 571 572 /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk */ 573 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); 574 575 nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk, 576 (struct page **)pool->alloc.cache); 577 if (unlikely(!nr_pages)) 578 return 0; 579 580 /* Pages have been filled into alloc.cache array, but count is zero and 581 * page element have not been (possibly) DMA mapped. 582 */ 583 for (i = 0; i < nr_pages; i++) { 584 netmem = pool->alloc.cache[i]; 585 if (dma_map && unlikely(!page_pool_dma_map(pool, netmem, gfp))) { 586 put_page(netmem_to_page(netmem)); 587 continue; 588 } 589 590 page_pool_set_pp_info(pool, netmem); 591 pool->alloc.cache[pool->alloc.count++] = netmem; 592 /* Track how many pages are held 'in-flight' */ 593 pool->pages_state_hold_cnt++; 594 trace_page_pool_state_hold(pool, netmem, 595 pool->pages_state_hold_cnt); 596 } 597 598 /* Return last page */ 599 if (likely(pool->alloc.count > 0)) { 600 netmem = pool->alloc.cache[--pool->alloc.count]; 601 alloc_stat_inc(pool, slow); 602 } else { 603 netmem = 0; 604 } 605 606 /* When page just alloc'ed is should/must have refcnt 1. */ 607 return netmem; 608 } 609 610 /* For using page_pool replace: alloc_pages() API calls, but provide 611 * synchronization guarantee for allocation side. 612 */ 613 netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp) 614 { 615 netmem_ref netmem; 616 617 /* Fast-path: Get a page from cache */ 618 netmem = __page_pool_get_cached(pool); 619 if (netmem) 620 return netmem; 621 622 /* Slow-path: cache empty, do real allocation */ 623 if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops) 624 netmem = pool->mp_ops->alloc_netmems(pool, gfp); 625 else 626 netmem = __page_pool_alloc_netmems_slow(pool, gfp); 627 return netmem; 628 } 629 EXPORT_SYMBOL(page_pool_alloc_netmems); 630 ALLOW_ERROR_INJECTION(page_pool_alloc_netmems, NULL); 631 632 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) 633 { 634 return netmem_to_page(page_pool_alloc_netmems(pool, gfp)); 635 } 636 EXPORT_SYMBOL(page_pool_alloc_pages); 637 638 /* Calculate distance between two u32 values, valid if distance is below 2^(31) 639 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution 640 */ 641 #define _distance(a, b) (s32)((a) - (b)) 642 643 s32 page_pool_inflight(const struct page_pool *pool, bool strict) 644 { 645 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); 646 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); 647 s32 inflight; 648 649 inflight = _distance(hold_cnt, release_cnt); 650 651 if (strict) { 652 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); 653 WARN(inflight < 0, "Negative(%d) inflight packet-pages", 654 inflight); 655 } else { 656 inflight = max(0, inflight); 657 } 658 659 return inflight; 660 } 661 662 void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem) 663 { 664 netmem_set_pp(netmem, pool); 665 netmem_or_pp_magic(netmem, PP_SIGNATURE); 666 667 /* Ensuring all pages have been split into one fragment initially: 668 * page_pool_set_pp_info() is only called once for every page when it 669 * is allocated from the page allocator and page_pool_fragment_page() 670 * is dirtying the same cache line as the page->pp_magic above, so 671 * the overhead is negligible. 672 */ 673 page_pool_fragment_netmem(netmem, 1); 674 if (pool->has_init_callback) 675 pool->slow.init_callback(netmem, pool->slow.init_arg); 676 } 677 678 void page_pool_clear_pp_info(netmem_ref netmem) 679 { 680 netmem_clear_pp_magic(netmem); 681 netmem_set_pp(netmem, NULL); 682 } 683 684 static __always_inline void __page_pool_release_netmem_dma(struct page_pool *pool, 685 netmem_ref netmem) 686 { 687 struct page *old, *page = netmem_to_page(netmem); 688 unsigned long id; 689 dma_addr_t dma; 690 691 if (!pool->dma_map) 692 /* Always account for inflight pages, even if we didn't 693 * map them 694 */ 695 return; 696 697 id = netmem_get_dma_index(netmem); 698 if (!id) 699 return; 700 701 if (in_softirq()) 702 old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0); 703 else 704 old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0); 705 if (old != page) 706 return; 707 708 dma = page_pool_get_dma_addr_netmem(netmem); 709 710 /* When page is unmapped, it cannot be returned to our pool */ 711 dma_unmap_page_attrs(pool->p.dev, dma, 712 PAGE_SIZE << pool->p.order, pool->p.dma_dir, 713 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); 714 page_pool_set_dma_addr_netmem(netmem, 0); 715 netmem_set_dma_index(netmem, 0); 716 } 717 718 /* Disconnects a page (from a page_pool). API users can have a need 719 * to disconnect a page (from a page_pool), to allow it to be used as 720 * a regular page (that will eventually be returned to the normal 721 * page-allocator via put_page). 722 */ 723 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem) 724 { 725 int count; 726 bool put; 727 728 put = true; 729 if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops) 730 put = pool->mp_ops->release_netmem(pool, netmem); 731 else 732 __page_pool_release_netmem_dma(pool, netmem); 733 734 /* This may be the last page returned, releasing the pool, so 735 * it is not safe to reference pool afterwards. 736 */ 737 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); 738 trace_page_pool_state_release(pool, netmem, count); 739 740 if (put) { 741 page_pool_clear_pp_info(netmem); 742 put_page(netmem_to_page(netmem)); 743 } 744 /* An optimization would be to call __free_pages(page, pool->p.order) 745 * knowing page is not part of page-cache (thus avoiding a 746 * __page_cache_release() call). 747 */ 748 } 749 750 static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem) 751 { 752 bool in_softirq, ret; 753 754 /* BH protection not needed if current is softirq */ 755 in_softirq = page_pool_producer_lock(pool); 756 ret = !__ptr_ring_produce(&pool->ring, (__force void *)netmem); 757 if (ret) 758 recycle_stat_inc(pool, ring); 759 page_pool_producer_unlock(pool, in_softirq); 760 761 return ret; 762 } 763 764 /* Only allow direct recycling in special circumstances, into the 765 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case. 766 * 767 * Caller must provide appropriate safe context. 768 */ 769 static bool page_pool_recycle_in_cache(netmem_ref netmem, 770 struct page_pool *pool) 771 { 772 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { 773 recycle_stat_inc(pool, cache_full); 774 return false; 775 } 776 777 /* Caller MUST have verified/know (page_ref_count(page) == 1) */ 778 pool->alloc.cache[pool->alloc.count++] = netmem; 779 recycle_stat_inc(pool, cached); 780 return true; 781 } 782 783 static bool __page_pool_page_can_be_recycled(netmem_ref netmem) 784 { 785 return netmem_is_net_iov(netmem) || 786 (page_ref_count(netmem_to_page(netmem)) == 1 && 787 !page_is_pfmemalloc(netmem_to_page(netmem))); 788 } 789 790 /* If the page refcnt == 1, this will try to recycle the page. 791 * If pool->dma_sync is set, we'll try to sync the DMA area for 792 * the configured size min(dma_sync_size, pool->max_len). 793 * If the page refcnt != 1, then the page will be returned to memory 794 * subsystem. 795 */ 796 static __always_inline netmem_ref 797 __page_pool_put_page(struct page_pool *pool, netmem_ref netmem, 798 unsigned int dma_sync_size, bool allow_direct) 799 { 800 lockdep_assert_no_hardirq(); 801 802 /* This allocator is optimized for the XDP mode that uses 803 * one-frame-per-page, but have fallbacks that act like the 804 * regular page allocator APIs. 805 * 806 * refcnt == 1 means page_pool owns page, and can recycle it. 807 * 808 * page is NOT reusable when allocated when system is under 809 * some pressure. (page_is_pfmemalloc) 810 */ 811 if (likely(__page_pool_page_can_be_recycled(netmem))) { 812 /* Read barrier done in page_ref_count / READ_ONCE */ 813 814 page_pool_dma_sync_for_device(pool, netmem, dma_sync_size); 815 816 if (allow_direct && page_pool_recycle_in_cache(netmem, pool)) 817 return 0; 818 819 /* Page found as candidate for recycling */ 820 return netmem; 821 } 822 823 /* Fallback/non-XDP mode: API user have elevated refcnt. 824 * 825 * Many drivers split up the page into fragments, and some 826 * want to keep doing this to save memory and do refcnt based 827 * recycling. Support this use case too, to ease drivers 828 * switching between XDP/non-XDP. 829 * 830 * In-case page_pool maintains the DMA mapping, API user must 831 * call page_pool_put_page once. In this elevated refcnt 832 * case, the DMA is unmapped/released, as driver is likely 833 * doing refcnt based recycle tricks, meaning another process 834 * will be invoking put_page. 835 */ 836 recycle_stat_inc(pool, released_refcnt); 837 page_pool_return_netmem(pool, netmem); 838 839 return 0; 840 } 841 842 static bool page_pool_napi_local(const struct page_pool *pool) 843 { 844 const struct napi_struct *napi; 845 u32 cpuid; 846 847 /* On PREEMPT_RT the softirq can be preempted by the consumer */ 848 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 849 return false; 850 851 if (unlikely(!in_softirq())) 852 return false; 853 854 /* Allow direct recycle if we have reasons to believe that we are 855 * in the same context as the consumer would run, so there's 856 * no possible race. 857 * __page_pool_put_page() makes sure we're not in hardirq context 858 * and interrupts are enabled prior to accessing the cache. 859 */ 860 cpuid = smp_processor_id(); 861 if (READ_ONCE(pool->cpuid) == cpuid) 862 return true; 863 864 napi = READ_ONCE(pool->p.napi); 865 866 return napi && READ_ONCE(napi->list_owner) == cpuid; 867 } 868 869 void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem, 870 unsigned int dma_sync_size, bool allow_direct) 871 { 872 if (!allow_direct) 873 allow_direct = page_pool_napi_local(pool); 874 875 netmem = __page_pool_put_page(pool, netmem, dma_sync_size, 876 allow_direct); 877 if (netmem && !page_pool_recycle_in_ring(pool, netmem)) { 878 /* Cache full, fallback to free pages */ 879 recycle_stat_inc(pool, ring_full); 880 page_pool_return_netmem(pool, netmem); 881 } 882 } 883 EXPORT_SYMBOL(page_pool_put_unrefed_netmem); 884 885 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, 886 unsigned int dma_sync_size, bool allow_direct) 887 { 888 page_pool_put_unrefed_netmem(pool, page_to_netmem(page), dma_sync_size, 889 allow_direct); 890 } 891 EXPORT_SYMBOL(page_pool_put_unrefed_page); 892 893 static void page_pool_recycle_ring_bulk(struct page_pool *pool, 894 netmem_ref *bulk, 895 u32 bulk_len) 896 { 897 bool in_softirq; 898 u32 i; 899 900 /* Bulk produce into ptr_ring page_pool cache */ 901 in_softirq = page_pool_producer_lock(pool); 902 903 for (i = 0; i < bulk_len; i++) { 904 if (__ptr_ring_produce(&pool->ring, (__force void *)bulk[i])) { 905 /* ring full */ 906 recycle_stat_inc(pool, ring_full); 907 break; 908 } 909 } 910 911 page_pool_producer_unlock(pool, in_softirq); 912 recycle_stat_add(pool, ring, i); 913 914 /* Hopefully all pages were returned into ptr_ring */ 915 if (likely(i == bulk_len)) 916 return; 917 918 /* 919 * ptr_ring cache is full, free remaining pages outside producer lock 920 * since put_page() with refcnt == 1 can be an expensive operation. 921 */ 922 for (; i < bulk_len; i++) 923 page_pool_return_netmem(pool, bulk[i]); 924 } 925 926 /** 927 * page_pool_put_netmem_bulk() - release references on multiple netmems 928 * @data: array holding netmem references 929 * @count: number of entries in @data 930 * 931 * Tries to refill a number of netmems into the ptr_ring cache holding ptr_ring 932 * producer lock. If the ptr_ring is full, page_pool_put_netmem_bulk() 933 * will release leftover netmems to the memory provider. 934 * page_pool_put_netmem_bulk() is suitable to be run inside the driver NAPI tx 935 * completion loop for the XDP_REDIRECT use case. 936 * 937 * Please note the caller must not use data area after running 938 * page_pool_put_netmem_bulk(), as this function overwrites it. 939 */ 940 void page_pool_put_netmem_bulk(netmem_ref *data, u32 count) 941 { 942 u32 bulk_len = 0; 943 944 for (u32 i = 0; i < count; i++) { 945 netmem_ref netmem = netmem_compound_head(data[i]); 946 947 if (page_pool_unref_and_test(netmem)) 948 data[bulk_len++] = netmem; 949 } 950 951 count = bulk_len; 952 while (count) { 953 netmem_ref bulk[XDP_BULK_QUEUE_SIZE]; 954 struct page_pool *pool = NULL; 955 bool allow_direct; 956 u32 foreign = 0; 957 958 bulk_len = 0; 959 960 for (u32 i = 0; i < count; i++) { 961 struct page_pool *netmem_pp; 962 netmem_ref netmem = data[i]; 963 964 netmem_pp = netmem_get_pp(netmem); 965 if (unlikely(!pool)) { 966 pool = netmem_pp; 967 allow_direct = page_pool_napi_local(pool); 968 } else if (netmem_pp != pool) { 969 /* 970 * If the netmem belongs to a different 971 * page_pool, save it for another round. 972 */ 973 data[foreign++] = netmem; 974 continue; 975 } 976 977 netmem = __page_pool_put_page(pool, netmem, -1, 978 allow_direct); 979 /* Approved for bulk recycling in ptr_ring cache */ 980 if (netmem) 981 bulk[bulk_len++] = netmem; 982 } 983 984 if (bulk_len) 985 page_pool_recycle_ring_bulk(pool, bulk, bulk_len); 986 987 count = foreign; 988 } 989 } 990 EXPORT_SYMBOL(page_pool_put_netmem_bulk); 991 992 static netmem_ref page_pool_drain_frag(struct page_pool *pool, 993 netmem_ref netmem) 994 { 995 long drain_count = BIAS_MAX - pool->frag_users; 996 997 /* Some user is still using the page frag */ 998 if (likely(page_pool_unref_netmem(netmem, drain_count))) 999 return 0; 1000 1001 if (__page_pool_page_can_be_recycled(netmem)) { 1002 page_pool_dma_sync_for_device(pool, netmem, -1); 1003 return netmem; 1004 } 1005 1006 page_pool_return_netmem(pool, netmem); 1007 return 0; 1008 } 1009 1010 static void page_pool_free_frag(struct page_pool *pool) 1011 { 1012 long drain_count = BIAS_MAX - pool->frag_users; 1013 netmem_ref netmem = pool->frag_page; 1014 1015 pool->frag_page = 0; 1016 1017 if (!netmem || page_pool_unref_netmem(netmem, drain_count)) 1018 return; 1019 1020 page_pool_return_netmem(pool, netmem); 1021 } 1022 1023 netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool, 1024 unsigned int *offset, unsigned int size, 1025 gfp_t gfp) 1026 { 1027 unsigned int max_size = PAGE_SIZE << pool->p.order; 1028 netmem_ref netmem = pool->frag_page; 1029 1030 if (WARN_ON(size > max_size)) 1031 return 0; 1032 1033 size = ALIGN(size, dma_get_cache_alignment()); 1034 *offset = pool->frag_offset; 1035 1036 if (netmem && *offset + size > max_size) { 1037 netmem = page_pool_drain_frag(pool, netmem); 1038 if (netmem) { 1039 recycle_stat_inc(pool, cached); 1040 alloc_stat_inc(pool, fast); 1041 goto frag_reset; 1042 } 1043 } 1044 1045 if (!netmem) { 1046 netmem = page_pool_alloc_netmems(pool, gfp); 1047 if (unlikely(!netmem)) { 1048 pool->frag_page = 0; 1049 return 0; 1050 } 1051 1052 pool->frag_page = netmem; 1053 1054 frag_reset: 1055 pool->frag_users = 1; 1056 *offset = 0; 1057 pool->frag_offset = size; 1058 page_pool_fragment_netmem(netmem, BIAS_MAX); 1059 return netmem; 1060 } 1061 1062 pool->frag_users++; 1063 pool->frag_offset = *offset + size; 1064 return netmem; 1065 } 1066 EXPORT_SYMBOL(page_pool_alloc_frag_netmem); 1067 1068 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, 1069 unsigned int size, gfp_t gfp) 1070 { 1071 return netmem_to_page(page_pool_alloc_frag_netmem(pool, offset, size, 1072 gfp)); 1073 } 1074 EXPORT_SYMBOL(page_pool_alloc_frag); 1075 1076 static void page_pool_empty_ring(struct page_pool *pool) 1077 { 1078 netmem_ref netmem; 1079 1080 /* Empty recycle ring */ 1081 while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) { 1082 /* Verify the refcnt invariant of cached pages */ 1083 if (!(netmem_ref_count(netmem) == 1)) 1084 pr_crit("%s() page_pool refcnt %d violation\n", 1085 __func__, netmem_ref_count(netmem)); 1086 1087 page_pool_return_netmem(pool, netmem); 1088 } 1089 } 1090 1091 static void __page_pool_destroy(struct page_pool *pool) 1092 { 1093 if (pool->disconnect) 1094 pool->disconnect(pool); 1095 1096 page_pool_unlist(pool); 1097 page_pool_uninit(pool); 1098 1099 if (pool->mp_ops) { 1100 pool->mp_ops->destroy(pool); 1101 static_branch_dec(&page_pool_mem_providers); 1102 } 1103 1104 kfree(pool); 1105 } 1106 1107 static void page_pool_empty_alloc_cache_once(struct page_pool *pool) 1108 { 1109 netmem_ref netmem; 1110 1111 if (pool->destroy_cnt) 1112 return; 1113 1114 /* Empty alloc cache, assume caller made sure this is 1115 * no-longer in use, and page_pool_alloc_pages() cannot be 1116 * call concurrently. 1117 */ 1118 while (pool->alloc.count) { 1119 netmem = pool->alloc.cache[--pool->alloc.count]; 1120 page_pool_return_netmem(pool, netmem); 1121 } 1122 } 1123 1124 static void page_pool_scrub(struct page_pool *pool) 1125 { 1126 unsigned long id; 1127 void *ptr; 1128 1129 page_pool_empty_alloc_cache_once(pool); 1130 if (!pool->destroy_cnt++ && pool->dma_map) { 1131 if (pool->dma_sync) { 1132 /* Disable page_pool_dma_sync_for_device() */ 1133 pool->dma_sync = false; 1134 1135 /* Make sure all concurrent returns that may see the old 1136 * value of dma_sync (and thus perform a sync) have 1137 * finished before doing the unmapping below. Skip the 1138 * wait if the device doesn't actually need syncing, or 1139 * if there are no outstanding mapped pages. 1140 */ 1141 if (dma_dev_need_sync(pool->p.dev) && 1142 !xa_empty(&pool->dma_mapped)) 1143 synchronize_net(); 1144 } 1145 1146 xa_for_each(&pool->dma_mapped, id, ptr) 1147 __page_pool_release_netmem_dma(pool, page_to_netmem((struct page *)ptr)); 1148 } 1149 1150 /* No more consumers should exist, but producers could still 1151 * be in-flight. 1152 */ 1153 page_pool_empty_ring(pool); 1154 } 1155 1156 static int page_pool_release(struct page_pool *pool) 1157 { 1158 bool in_softirq; 1159 int inflight; 1160 1161 page_pool_scrub(pool); 1162 inflight = page_pool_inflight(pool, true); 1163 /* Acquire producer lock to make sure producers have exited. */ 1164 in_softirq = page_pool_producer_lock(pool); 1165 page_pool_producer_unlock(pool, in_softirq); 1166 if (!inflight) 1167 __page_pool_destroy(pool); 1168 1169 return inflight; 1170 } 1171 1172 static void page_pool_release_retry(struct work_struct *wq) 1173 { 1174 struct delayed_work *dwq = to_delayed_work(wq); 1175 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); 1176 void *netdev; 1177 int inflight; 1178 1179 inflight = page_pool_release(pool); 1180 /* In rare cases, a driver bug may cause inflight to go negative. 1181 * Don't reschedule release if inflight is 0 or negative. 1182 * - If 0, the page_pool has been destroyed 1183 * - if negative, we will never recover 1184 * in both cases no reschedule is necessary. 1185 */ 1186 if (inflight <= 0) 1187 return; 1188 1189 /* Periodic warning for page pools the user can't see */ 1190 netdev = READ_ONCE(pool->slow.netdev); 1191 if (time_after_eq(jiffies, pool->defer_warn) && 1192 (!netdev || netdev == NET_PTR_POISON)) { 1193 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; 1194 1195 pr_warn("%s() stalled pool shutdown: id %u, %d inflight %d sec\n", 1196 __func__, pool->user.id, inflight, sec); 1197 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; 1198 } 1199 1200 /* Still not ready to be disconnected, retry later */ 1201 schedule_delayed_work(&pool->release_dw, DEFER_TIME); 1202 } 1203 1204 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), 1205 const struct xdp_mem_info *mem) 1206 { 1207 refcount_inc(&pool->user_cnt); 1208 pool->disconnect = disconnect; 1209 pool->xdp_mem_id = mem->id; 1210 } 1211 1212 /** 1213 * page_pool_enable_direct_recycling() - mark page pool as owned by NAPI 1214 * @pool: page pool to modify 1215 * @napi: NAPI instance to associate the page pool with 1216 * 1217 * Associate a page pool with a NAPI instance for lockless page recycling. 1218 * This is useful when a new page pool has to be added to a NAPI instance 1219 * without disabling that NAPI instance, to mark the point at which control 1220 * path "hands over" the page pool to the NAPI instance. In most cases driver 1221 * can simply set the @napi field in struct page_pool_params, and does not 1222 * have to call this helper. 1223 * 1224 * The function is idempotent, but does not implement any refcounting. 1225 * Single page_pool_disable_direct_recycling() will disable recycling, 1226 * no matter how many times enable was called. 1227 */ 1228 void page_pool_enable_direct_recycling(struct page_pool *pool, 1229 struct napi_struct *napi) 1230 { 1231 if (READ_ONCE(pool->p.napi) == napi) 1232 return; 1233 WARN_ON(!napi || pool->p.napi); 1234 1235 mutex_lock(&page_pools_lock); 1236 WRITE_ONCE(pool->p.napi, napi); 1237 mutex_unlock(&page_pools_lock); 1238 } 1239 EXPORT_SYMBOL(page_pool_enable_direct_recycling); 1240 1241 void page_pool_disable_direct_recycling(struct page_pool *pool) 1242 { 1243 /* Disable direct recycling based on pool->cpuid. 1244 * Paired with READ_ONCE() in page_pool_napi_local(). 1245 */ 1246 WRITE_ONCE(pool->cpuid, -1); 1247 1248 if (!pool->p.napi) 1249 return; 1250 1251 napi_assert_will_not_race(pool->p.napi); 1252 1253 mutex_lock(&page_pools_lock); 1254 WRITE_ONCE(pool->p.napi, NULL); 1255 mutex_unlock(&page_pools_lock); 1256 } 1257 EXPORT_SYMBOL(page_pool_disable_direct_recycling); 1258 1259 void page_pool_destroy(struct page_pool *pool) 1260 { 1261 if (!pool) 1262 return; 1263 1264 if (!page_pool_put(pool)) 1265 return; 1266 1267 page_pool_disable_direct_recycling(pool); 1268 page_pool_free_frag(pool); 1269 1270 if (!page_pool_release(pool)) 1271 return; 1272 1273 page_pool_detached(pool); 1274 pool->defer_start = jiffies; 1275 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; 1276 1277 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); 1278 schedule_delayed_work(&pool->release_dw, DEFER_TIME); 1279 } 1280 EXPORT_SYMBOL(page_pool_destroy); 1281 1282 /* Caller must provide appropriate safe context, e.g. NAPI. */ 1283 void page_pool_update_nid(struct page_pool *pool, int new_nid) 1284 { 1285 netmem_ref netmem; 1286 1287 trace_page_pool_update_nid(pool, new_nid); 1288 pool->p.nid = new_nid; 1289 1290 /* Flush pool alloc cache, as refill will check NUMA node */ 1291 while (pool->alloc.count) { 1292 netmem = pool->alloc.cache[--pool->alloc.count]; 1293 page_pool_return_netmem(pool, netmem); 1294 } 1295 } 1296 EXPORT_SYMBOL(page_pool_update_nid); 1297 1298 bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr) 1299 { 1300 return page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), addr); 1301 } 1302 1303 /* Associate a niov with a page pool. Should follow with a matching 1304 * net_mp_niov_clear_page_pool() 1305 */ 1306 void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov) 1307 { 1308 netmem_ref netmem = net_iov_to_netmem(niov); 1309 1310 page_pool_set_pp_info(pool, netmem); 1311 1312 pool->pages_state_hold_cnt++; 1313 trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt); 1314 } 1315 1316 /* Disassociate a niov from a page pool. Should only be used in the 1317 * ->release_netmem() path. 1318 */ 1319 void net_mp_niov_clear_page_pool(struct net_iov *niov) 1320 { 1321 netmem_ref netmem = net_iov_to_netmem(niov); 1322 1323 page_pool_clear_pp_info(netmem); 1324 } 1325