1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * page_pool.c 4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> 5 * Copyright (C) 2016 Red Hat, Inc. 6 */ 7 8 #include <linux/types.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/device.h> 12 13 #include <net/page_pool.h> 14 #include <net/xdp.h> 15 16 #include <linux/dma-direction.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/page-flags.h> 19 #include <linux/mm.h> /* for __put_page() */ 20 #include <linux/poison.h> 21 22 #include <trace/events/page_pool.h> 23 24 #define DEFER_TIME (msecs_to_jiffies(1000)) 25 #define DEFER_WARN_INTERVAL (60 * HZ) 26 27 #define BIAS_MAX LONG_MAX 28 29 #ifdef CONFIG_PAGE_POOL_STATS 30 /* alloc_stat_inc is intended to be used in softirq context */ 31 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) 32 /* recycle_stat_inc is safe to use when preemption is possible. */ 33 #define recycle_stat_inc(pool, __stat) \ 34 do { \ 35 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 36 this_cpu_inc(s->__stat); \ 37 } while (0) 38 39 bool page_pool_get_stats(struct page_pool *pool, 40 struct page_pool_stats *stats) 41 { 42 int cpu = 0; 43 44 if (!stats) 45 return false; 46 47 memcpy(&stats->alloc_stats, &pool->alloc_stats, sizeof(pool->alloc_stats)); 48 49 for_each_possible_cpu(cpu) { 50 const struct page_pool_recycle_stats *pcpu = 51 per_cpu_ptr(pool->recycle_stats, cpu); 52 53 stats->recycle_stats.cached += pcpu->cached; 54 stats->recycle_stats.cache_full += pcpu->cache_full; 55 stats->recycle_stats.ring += pcpu->ring; 56 stats->recycle_stats.ring_full += pcpu->ring_full; 57 stats->recycle_stats.released_refcnt += pcpu->released_refcnt; 58 } 59 60 return true; 61 } 62 EXPORT_SYMBOL(page_pool_get_stats); 63 #else 64 #define alloc_stat_inc(pool, __stat) 65 #define recycle_stat_inc(pool, __stat) 66 #endif 67 68 static int page_pool_init(struct page_pool *pool, 69 const struct page_pool_params *params) 70 { 71 unsigned int ring_qsize = 1024; /* Default */ 72 73 memcpy(&pool->p, params, sizeof(pool->p)); 74 75 /* Validate only known flags were used */ 76 if (pool->p.flags & ~(PP_FLAG_ALL)) 77 return -EINVAL; 78 79 if (pool->p.pool_size) 80 ring_qsize = pool->p.pool_size; 81 82 /* Sanity limit mem that can be pinned down */ 83 if (ring_qsize > 32768) 84 return -E2BIG; 85 86 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL. 87 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, 88 * which is the XDP_TX use-case. 89 */ 90 if (pool->p.flags & PP_FLAG_DMA_MAP) { 91 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && 92 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) 93 return -EINVAL; 94 } 95 96 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) { 97 /* In order to request DMA-sync-for-device the page 98 * needs to be mapped 99 */ 100 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) 101 return -EINVAL; 102 103 if (!pool->p.max_len) 104 return -EINVAL; 105 106 /* pool->p.offset has to be set according to the address 107 * offset used by the DMA engine to start copying rx data 108 */ 109 } 110 111 if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT && 112 pool->p.flags & PP_FLAG_PAGE_FRAG) 113 return -EINVAL; 114 115 #ifdef CONFIG_PAGE_POOL_STATS 116 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); 117 if (!pool->recycle_stats) 118 return -ENOMEM; 119 #endif 120 121 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) 122 return -ENOMEM; 123 124 atomic_set(&pool->pages_state_release_cnt, 0); 125 126 /* Driver calling page_pool_create() also call page_pool_destroy() */ 127 refcount_set(&pool->user_cnt, 1); 128 129 if (pool->p.flags & PP_FLAG_DMA_MAP) 130 get_device(pool->p.dev); 131 132 return 0; 133 } 134 135 struct page_pool *page_pool_create(const struct page_pool_params *params) 136 { 137 struct page_pool *pool; 138 int err; 139 140 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); 141 if (!pool) 142 return ERR_PTR(-ENOMEM); 143 144 err = page_pool_init(pool, params); 145 if (err < 0) { 146 pr_warn("%s() gave up with errno %d\n", __func__, err); 147 kfree(pool); 148 return ERR_PTR(err); 149 } 150 151 return pool; 152 } 153 EXPORT_SYMBOL(page_pool_create); 154 155 static void page_pool_return_page(struct page_pool *pool, struct page *page); 156 157 noinline 158 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) 159 { 160 struct ptr_ring *r = &pool->ring; 161 struct page *page; 162 int pref_nid; /* preferred NUMA node */ 163 164 /* Quicker fallback, avoid locks when ring is empty */ 165 if (__ptr_ring_empty(r)) { 166 alloc_stat_inc(pool, empty); 167 return NULL; 168 } 169 170 /* Softirq guarantee CPU and thus NUMA node is stable. This, 171 * assumes CPU refilling driver RX-ring will also run RX-NAPI. 172 */ 173 #ifdef CONFIG_NUMA 174 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid; 175 #else 176 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */ 177 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */ 178 #endif 179 180 /* Refill alloc array, but only if NUMA match */ 181 do { 182 page = __ptr_ring_consume(r); 183 if (unlikely(!page)) 184 break; 185 186 if (likely(page_to_nid(page) == pref_nid)) { 187 pool->alloc.cache[pool->alloc.count++] = page; 188 } else { 189 /* NUMA mismatch; 190 * (1) release 1 page to page-allocator and 191 * (2) break out to fallthrough to alloc_pages_node. 192 * This limit stress on page buddy alloactor. 193 */ 194 page_pool_return_page(pool, page); 195 alloc_stat_inc(pool, waive); 196 page = NULL; 197 break; 198 } 199 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); 200 201 /* Return last page */ 202 if (likely(pool->alloc.count > 0)) { 203 page = pool->alloc.cache[--pool->alloc.count]; 204 alloc_stat_inc(pool, refill); 205 } 206 207 return page; 208 } 209 210 /* fast path */ 211 static struct page *__page_pool_get_cached(struct page_pool *pool) 212 { 213 struct page *page; 214 215 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */ 216 if (likely(pool->alloc.count)) { 217 /* Fast-path */ 218 page = pool->alloc.cache[--pool->alloc.count]; 219 alloc_stat_inc(pool, fast); 220 } else { 221 page = page_pool_refill_alloc_cache(pool); 222 } 223 224 return page; 225 } 226 227 static void page_pool_dma_sync_for_device(struct page_pool *pool, 228 struct page *page, 229 unsigned int dma_sync_size) 230 { 231 dma_addr_t dma_addr = page_pool_get_dma_addr(page); 232 233 dma_sync_size = min(dma_sync_size, pool->p.max_len); 234 dma_sync_single_range_for_device(pool->p.dev, dma_addr, 235 pool->p.offset, dma_sync_size, 236 pool->p.dma_dir); 237 } 238 239 static bool page_pool_dma_map(struct page_pool *pool, struct page *page) 240 { 241 dma_addr_t dma; 242 243 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr 244 * since dma_addr_t can be either 32 or 64 bits and does not always fit 245 * into page private data (i.e 32bit cpu with 64bit DMA caps) 246 * This mapping is kept for lifetime of page, until leaving pool. 247 */ 248 dma = dma_map_page_attrs(pool->p.dev, page, 0, 249 (PAGE_SIZE << pool->p.order), 250 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 251 if (dma_mapping_error(pool->p.dev, dma)) 252 return false; 253 254 page_pool_set_dma_addr(page, dma); 255 256 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) 257 page_pool_dma_sync_for_device(pool, page, pool->p.max_len); 258 259 return true; 260 } 261 262 static void page_pool_set_pp_info(struct page_pool *pool, 263 struct page *page) 264 { 265 page->pp = pool; 266 page->pp_magic |= PP_SIGNATURE; 267 if (pool->p.init_callback) 268 pool->p.init_callback(page, pool->p.init_arg); 269 } 270 271 static void page_pool_clear_pp_info(struct page *page) 272 { 273 page->pp_magic = 0; 274 page->pp = NULL; 275 } 276 277 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, 278 gfp_t gfp) 279 { 280 struct page *page; 281 282 gfp |= __GFP_COMP; 283 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); 284 if (unlikely(!page)) 285 return NULL; 286 287 if ((pool->p.flags & PP_FLAG_DMA_MAP) && 288 unlikely(!page_pool_dma_map(pool, page))) { 289 put_page(page); 290 return NULL; 291 } 292 293 alloc_stat_inc(pool, slow_high_order); 294 page_pool_set_pp_info(pool, page); 295 296 /* Track how many pages are held 'in-flight' */ 297 pool->pages_state_hold_cnt++; 298 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); 299 return page; 300 } 301 302 /* slow path */ 303 noinline 304 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, 305 gfp_t gfp) 306 { 307 const int bulk = PP_ALLOC_CACHE_REFILL; 308 unsigned int pp_flags = pool->p.flags; 309 unsigned int pp_order = pool->p.order; 310 struct page *page; 311 int i, nr_pages; 312 313 /* Don't support bulk alloc for high-order pages */ 314 if (unlikely(pp_order)) 315 return __page_pool_alloc_page_order(pool, gfp); 316 317 /* Unnecessary as alloc cache is empty, but guarantees zero count */ 318 if (unlikely(pool->alloc.count > 0)) 319 return pool->alloc.cache[--pool->alloc.count]; 320 321 /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */ 322 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); 323 324 nr_pages = alloc_pages_bulk_array(gfp, bulk, pool->alloc.cache); 325 if (unlikely(!nr_pages)) 326 return NULL; 327 328 /* Pages have been filled into alloc.cache array, but count is zero and 329 * page element have not been (possibly) DMA mapped. 330 */ 331 for (i = 0; i < nr_pages; i++) { 332 page = pool->alloc.cache[i]; 333 if ((pp_flags & PP_FLAG_DMA_MAP) && 334 unlikely(!page_pool_dma_map(pool, page))) { 335 put_page(page); 336 continue; 337 } 338 339 page_pool_set_pp_info(pool, page); 340 pool->alloc.cache[pool->alloc.count++] = page; 341 /* Track how many pages are held 'in-flight' */ 342 pool->pages_state_hold_cnt++; 343 trace_page_pool_state_hold(pool, page, 344 pool->pages_state_hold_cnt); 345 } 346 347 /* Return last page */ 348 if (likely(pool->alloc.count > 0)) { 349 page = pool->alloc.cache[--pool->alloc.count]; 350 alloc_stat_inc(pool, slow); 351 } else { 352 page = NULL; 353 } 354 355 /* When page just alloc'ed is should/must have refcnt 1. */ 356 return page; 357 } 358 359 /* For using page_pool replace: alloc_pages() API calls, but provide 360 * synchronization guarantee for allocation side. 361 */ 362 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) 363 { 364 struct page *page; 365 366 /* Fast-path: Get a page from cache */ 367 page = __page_pool_get_cached(pool); 368 if (page) 369 return page; 370 371 /* Slow-path: cache empty, do real allocation */ 372 page = __page_pool_alloc_pages_slow(pool, gfp); 373 return page; 374 } 375 EXPORT_SYMBOL(page_pool_alloc_pages); 376 377 /* Calculate distance between two u32 values, valid if distance is below 2^(31) 378 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution 379 */ 380 #define _distance(a, b) (s32)((a) - (b)) 381 382 static s32 page_pool_inflight(struct page_pool *pool) 383 { 384 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); 385 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); 386 s32 inflight; 387 388 inflight = _distance(hold_cnt, release_cnt); 389 390 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); 391 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight); 392 393 return inflight; 394 } 395 396 /* Disconnects a page (from a page_pool). API users can have a need 397 * to disconnect a page (from a page_pool), to allow it to be used as 398 * a regular page (that will eventually be returned to the normal 399 * page-allocator via put_page). 400 */ 401 void page_pool_release_page(struct page_pool *pool, struct page *page) 402 { 403 dma_addr_t dma; 404 int count; 405 406 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) 407 /* Always account for inflight pages, even if we didn't 408 * map them 409 */ 410 goto skip_dma_unmap; 411 412 dma = page_pool_get_dma_addr(page); 413 414 /* When page is unmapped, it cannot be returned to our pool */ 415 dma_unmap_page_attrs(pool->p.dev, dma, 416 PAGE_SIZE << pool->p.order, pool->p.dma_dir, 417 DMA_ATTR_SKIP_CPU_SYNC); 418 page_pool_set_dma_addr(page, 0); 419 skip_dma_unmap: 420 page_pool_clear_pp_info(page); 421 422 /* This may be the last page returned, releasing the pool, so 423 * it is not safe to reference pool afterwards. 424 */ 425 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); 426 trace_page_pool_state_release(pool, page, count); 427 } 428 EXPORT_SYMBOL(page_pool_release_page); 429 430 /* Return a page to the page allocator, cleaning up our state */ 431 static void page_pool_return_page(struct page_pool *pool, struct page *page) 432 { 433 page_pool_release_page(pool, page); 434 435 put_page(page); 436 /* An optimization would be to call __free_pages(page, pool->p.order) 437 * knowing page is not part of page-cache (thus avoiding a 438 * __page_cache_release() call). 439 */ 440 } 441 442 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) 443 { 444 int ret; 445 /* BH protection not needed if current is serving softirq */ 446 if (in_serving_softirq()) 447 ret = ptr_ring_produce(&pool->ring, page); 448 else 449 ret = ptr_ring_produce_bh(&pool->ring, page); 450 451 if (!ret) { 452 recycle_stat_inc(pool, ring); 453 return true; 454 } 455 456 return false; 457 } 458 459 /* Only allow direct recycling in special circumstances, into the 460 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case. 461 * 462 * Caller must provide appropriate safe context. 463 */ 464 static bool page_pool_recycle_in_cache(struct page *page, 465 struct page_pool *pool) 466 { 467 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { 468 recycle_stat_inc(pool, cache_full); 469 return false; 470 } 471 472 /* Caller MUST have verified/know (page_ref_count(page) == 1) */ 473 pool->alloc.cache[pool->alloc.count++] = page; 474 recycle_stat_inc(pool, cached); 475 return true; 476 } 477 478 /* If the page refcnt == 1, this will try to recycle the page. 479 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for 480 * the configured size min(dma_sync_size, pool->max_len). 481 * If the page refcnt != 1, then the page will be returned to memory 482 * subsystem. 483 */ 484 static __always_inline struct page * 485 __page_pool_put_page(struct page_pool *pool, struct page *page, 486 unsigned int dma_sync_size, bool allow_direct) 487 { 488 /* This allocator is optimized for the XDP mode that uses 489 * one-frame-per-page, but have fallbacks that act like the 490 * regular page allocator APIs. 491 * 492 * refcnt == 1 means page_pool owns page, and can recycle it. 493 * 494 * page is NOT reusable when allocated when system is under 495 * some pressure. (page_is_pfmemalloc) 496 */ 497 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) { 498 /* Read barrier done in page_ref_count / READ_ONCE */ 499 500 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) 501 page_pool_dma_sync_for_device(pool, page, 502 dma_sync_size); 503 504 if (allow_direct && in_serving_softirq() && 505 page_pool_recycle_in_cache(page, pool)) 506 return NULL; 507 508 /* Page found as candidate for recycling */ 509 return page; 510 } 511 /* Fallback/non-XDP mode: API user have elevated refcnt. 512 * 513 * Many drivers split up the page into fragments, and some 514 * want to keep doing this to save memory and do refcnt based 515 * recycling. Support this use case too, to ease drivers 516 * switching between XDP/non-XDP. 517 * 518 * In-case page_pool maintains the DMA mapping, API user must 519 * call page_pool_put_page once. In this elevated refcnt 520 * case, the DMA is unmapped/released, as driver is likely 521 * doing refcnt based recycle tricks, meaning another process 522 * will be invoking put_page. 523 */ 524 recycle_stat_inc(pool, released_refcnt); 525 /* Do not replace this with page_pool_return_page() */ 526 page_pool_release_page(pool, page); 527 put_page(page); 528 529 return NULL; 530 } 531 532 void page_pool_put_defragged_page(struct page_pool *pool, struct page *page, 533 unsigned int dma_sync_size, bool allow_direct) 534 { 535 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); 536 if (page && !page_pool_recycle_in_ring(pool, page)) { 537 /* Cache full, fallback to free pages */ 538 recycle_stat_inc(pool, ring_full); 539 page_pool_return_page(pool, page); 540 } 541 } 542 EXPORT_SYMBOL(page_pool_put_defragged_page); 543 544 /* Caller must not use data area after call, as this function overwrites it */ 545 void page_pool_put_page_bulk(struct page_pool *pool, void **data, 546 int count) 547 { 548 int i, bulk_len = 0; 549 550 for (i = 0; i < count; i++) { 551 struct page *page = virt_to_head_page(data[i]); 552 553 /* It is not the last user for the page frag case */ 554 if (!page_pool_is_last_frag(pool, page)) 555 continue; 556 557 page = __page_pool_put_page(pool, page, -1, false); 558 /* Approved for bulk recycling in ptr_ring cache */ 559 if (page) 560 data[bulk_len++] = page; 561 } 562 563 if (unlikely(!bulk_len)) 564 return; 565 566 /* Bulk producer into ptr_ring page_pool cache */ 567 page_pool_ring_lock(pool); 568 for (i = 0; i < bulk_len; i++) { 569 if (__ptr_ring_produce(&pool->ring, data[i])) 570 break; /* ring full */ 571 } 572 page_pool_ring_unlock(pool); 573 574 /* Hopefully all pages was return into ptr_ring */ 575 if (likely(i == bulk_len)) 576 return; 577 578 /* ptr_ring cache full, free remaining pages outside producer lock 579 * since put_page() with refcnt == 1 can be an expensive operation 580 */ 581 for (; i < bulk_len; i++) 582 page_pool_return_page(pool, data[i]); 583 } 584 EXPORT_SYMBOL(page_pool_put_page_bulk); 585 586 static struct page *page_pool_drain_frag(struct page_pool *pool, 587 struct page *page) 588 { 589 long drain_count = BIAS_MAX - pool->frag_users; 590 591 /* Some user is still using the page frag */ 592 if (likely(page_pool_defrag_page(page, drain_count))) 593 return NULL; 594 595 if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) { 596 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) 597 page_pool_dma_sync_for_device(pool, page, -1); 598 599 return page; 600 } 601 602 page_pool_return_page(pool, page); 603 return NULL; 604 } 605 606 static void page_pool_free_frag(struct page_pool *pool) 607 { 608 long drain_count = BIAS_MAX - pool->frag_users; 609 struct page *page = pool->frag_page; 610 611 pool->frag_page = NULL; 612 613 if (!page || page_pool_defrag_page(page, drain_count)) 614 return; 615 616 page_pool_return_page(pool, page); 617 } 618 619 struct page *page_pool_alloc_frag(struct page_pool *pool, 620 unsigned int *offset, 621 unsigned int size, gfp_t gfp) 622 { 623 unsigned int max_size = PAGE_SIZE << pool->p.order; 624 struct page *page = pool->frag_page; 625 626 if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) || 627 size > max_size)) 628 return NULL; 629 630 size = ALIGN(size, dma_get_cache_alignment()); 631 *offset = pool->frag_offset; 632 633 if (page && *offset + size > max_size) { 634 page = page_pool_drain_frag(pool, page); 635 if (page) 636 goto frag_reset; 637 } 638 639 if (!page) { 640 page = page_pool_alloc_pages(pool, gfp); 641 if (unlikely(!page)) { 642 pool->frag_page = NULL; 643 return NULL; 644 } 645 646 pool->frag_page = page; 647 648 frag_reset: 649 pool->frag_users = 1; 650 *offset = 0; 651 pool->frag_offset = size; 652 page_pool_fragment_page(page, BIAS_MAX); 653 return page; 654 } 655 656 pool->frag_users++; 657 pool->frag_offset = *offset + size; 658 return page; 659 } 660 EXPORT_SYMBOL(page_pool_alloc_frag); 661 662 static void page_pool_empty_ring(struct page_pool *pool) 663 { 664 struct page *page; 665 666 /* Empty recycle ring */ 667 while ((page = ptr_ring_consume_bh(&pool->ring))) { 668 /* Verify the refcnt invariant of cached pages */ 669 if (!(page_ref_count(page) == 1)) 670 pr_crit("%s() page_pool refcnt %d violation\n", 671 __func__, page_ref_count(page)); 672 673 page_pool_return_page(pool, page); 674 } 675 } 676 677 static void page_pool_free(struct page_pool *pool) 678 { 679 if (pool->disconnect) 680 pool->disconnect(pool); 681 682 ptr_ring_cleanup(&pool->ring, NULL); 683 684 if (pool->p.flags & PP_FLAG_DMA_MAP) 685 put_device(pool->p.dev); 686 687 #ifdef CONFIG_PAGE_POOL_STATS 688 free_percpu(pool->recycle_stats); 689 #endif 690 kfree(pool); 691 } 692 693 static void page_pool_empty_alloc_cache_once(struct page_pool *pool) 694 { 695 struct page *page; 696 697 if (pool->destroy_cnt) 698 return; 699 700 /* Empty alloc cache, assume caller made sure this is 701 * no-longer in use, and page_pool_alloc_pages() cannot be 702 * call concurrently. 703 */ 704 while (pool->alloc.count) { 705 page = pool->alloc.cache[--pool->alloc.count]; 706 page_pool_return_page(pool, page); 707 } 708 } 709 710 static void page_pool_scrub(struct page_pool *pool) 711 { 712 page_pool_empty_alloc_cache_once(pool); 713 pool->destroy_cnt++; 714 715 /* No more consumers should exist, but producers could still 716 * be in-flight. 717 */ 718 page_pool_empty_ring(pool); 719 } 720 721 static int page_pool_release(struct page_pool *pool) 722 { 723 int inflight; 724 725 page_pool_scrub(pool); 726 inflight = page_pool_inflight(pool); 727 if (!inflight) 728 page_pool_free(pool); 729 730 return inflight; 731 } 732 733 static void page_pool_release_retry(struct work_struct *wq) 734 { 735 struct delayed_work *dwq = to_delayed_work(wq); 736 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); 737 int inflight; 738 739 inflight = page_pool_release(pool); 740 if (!inflight) 741 return; 742 743 /* Periodic warning */ 744 if (time_after_eq(jiffies, pool->defer_warn)) { 745 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; 746 747 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n", 748 __func__, inflight, sec); 749 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; 750 } 751 752 /* Still not ready to be disconnected, retry later */ 753 schedule_delayed_work(&pool->release_dw, DEFER_TIME); 754 } 755 756 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), 757 struct xdp_mem_info *mem) 758 { 759 refcount_inc(&pool->user_cnt); 760 pool->disconnect = disconnect; 761 pool->xdp_mem_id = mem->id; 762 } 763 764 void page_pool_destroy(struct page_pool *pool) 765 { 766 if (!pool) 767 return; 768 769 if (!page_pool_put(pool)) 770 return; 771 772 page_pool_free_frag(pool); 773 774 if (!page_pool_release(pool)) 775 return; 776 777 pool->defer_start = jiffies; 778 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; 779 780 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); 781 schedule_delayed_work(&pool->release_dw, DEFER_TIME); 782 } 783 EXPORT_SYMBOL(page_pool_destroy); 784 785 /* Caller must provide appropriate safe context, e.g. NAPI. */ 786 void page_pool_update_nid(struct page_pool *pool, int new_nid) 787 { 788 struct page *page; 789 790 trace_page_pool_update_nid(pool, new_nid); 791 pool->p.nid = new_nid; 792 793 /* Flush pool alloc cache, as refill will check NUMA node */ 794 while (pool->alloc.count) { 795 page = pool->alloc.cache[--pool->alloc.count]; 796 page_pool_return_page(pool, page); 797 } 798 } 799 EXPORT_SYMBOL(page_pool_update_nid); 800 801 bool page_pool_return_skb_page(struct page *page) 802 { 803 struct page_pool *pp; 804 805 page = compound_head(page); 806 807 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation 808 * in order to preserve any existing bits, such as bit 0 for the 809 * head page of compound page and bit 1 for pfmemalloc page, so 810 * mask those bits for freeing side when doing below checking, 811 * and page_is_pfmemalloc() is checked in __page_pool_put_page() 812 * to avoid recycling the pfmemalloc page. 813 */ 814 if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE)) 815 return false; 816 817 pp = page->pp; 818 819 /* Driver set this to memory recycling info. Reset it on recycle. 820 * This will *not* work for NIC using a split-page memory model. 821 * The page will be returned to the pool here regardless of the 822 * 'flipped' fragment being in use or not. 823 */ 824 page_pool_put_full_page(pp, page, false); 825 826 return true; 827 } 828 EXPORT_SYMBOL(page_pool_return_skb_page); 829