xref: /linux/net/core/page_pool.c (revision 5027ec19f1049a07df5b0a37b1f462514cf2724b)
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * page_pool.c
4  *	Author:	Jesper Dangaard Brouer <netoptimizer@brouer.com>
5  *	Copyright (C) 2016 Red Hat, Inc.
6  */
7 
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
12 
13 #include <net/page_pool/helpers.h>
14 #include <net/xdp.h>
15 
16 #include <linux/dma-direction.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/page-flags.h>
19 #include <linux/mm.h> /* for put_page() */
20 #include <linux/poison.h>
21 #include <linux/ethtool.h>
22 #include <linux/netdevice.h>
23 
24 #include <trace/events/page_pool.h>
25 
26 #define DEFER_TIME (msecs_to_jiffies(1000))
27 #define DEFER_WARN_INTERVAL (60 * HZ)
28 
29 #define BIAS_MAX	LONG_MAX
30 
31 #ifdef CONFIG_PAGE_POOL_STATS
32 /* alloc_stat_inc is intended to be used in softirq context */
33 #define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)
34 /* recycle_stat_inc is safe to use when preemption is possible. */
35 #define recycle_stat_inc(pool, __stat)							\
36 	do {										\
37 		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
38 		this_cpu_inc(s->__stat);						\
39 	} while (0)
40 
41 #define recycle_stat_add(pool, __stat, val)						\
42 	do {										\
43 		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
44 		this_cpu_add(s->__stat, val);						\
45 	} while (0)
46 
47 static const char pp_stats[][ETH_GSTRING_LEN] = {
48 	"rx_pp_alloc_fast",
49 	"rx_pp_alloc_slow",
50 	"rx_pp_alloc_slow_ho",
51 	"rx_pp_alloc_empty",
52 	"rx_pp_alloc_refill",
53 	"rx_pp_alloc_waive",
54 	"rx_pp_recycle_cached",
55 	"rx_pp_recycle_cache_full",
56 	"rx_pp_recycle_ring",
57 	"rx_pp_recycle_ring_full",
58 	"rx_pp_recycle_released_ref",
59 };
60 
61 /**
62  * page_pool_get_stats() - fetch page pool stats
63  * @pool:	pool from which page was allocated
64  * @stats:	struct page_pool_stats to fill in
65  *
66  * Retrieve statistics about the page_pool. This API is only available
67  * if the kernel has been configured with ``CONFIG_PAGE_POOL_STATS=y``.
68  * A pointer to a caller allocated struct page_pool_stats structure
69  * is passed to this API which is filled in. The caller can then report
70  * those stats to the user (perhaps via ethtool, debugfs, etc.).
71  */
72 bool page_pool_get_stats(struct page_pool *pool,
73 			 struct page_pool_stats *stats)
74 {
75 	int cpu = 0;
76 
77 	if (!stats)
78 		return false;
79 
80 	/* The caller is responsible to initialize stats. */
81 	stats->alloc_stats.fast += pool->alloc_stats.fast;
82 	stats->alloc_stats.slow += pool->alloc_stats.slow;
83 	stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
84 	stats->alloc_stats.empty += pool->alloc_stats.empty;
85 	stats->alloc_stats.refill += pool->alloc_stats.refill;
86 	stats->alloc_stats.waive += pool->alloc_stats.waive;
87 
88 	for_each_possible_cpu(cpu) {
89 		const struct page_pool_recycle_stats *pcpu =
90 			per_cpu_ptr(pool->recycle_stats, cpu);
91 
92 		stats->recycle_stats.cached += pcpu->cached;
93 		stats->recycle_stats.cache_full += pcpu->cache_full;
94 		stats->recycle_stats.ring += pcpu->ring;
95 		stats->recycle_stats.ring_full += pcpu->ring_full;
96 		stats->recycle_stats.released_refcnt += pcpu->released_refcnt;
97 	}
98 
99 	return true;
100 }
101 EXPORT_SYMBOL(page_pool_get_stats);
102 
103 u8 *page_pool_ethtool_stats_get_strings(u8 *data)
104 {
105 	int i;
106 
107 	for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
108 		memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
109 		data += ETH_GSTRING_LEN;
110 	}
111 
112 	return data;
113 }
114 EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
115 
116 int page_pool_ethtool_stats_get_count(void)
117 {
118 	return ARRAY_SIZE(pp_stats);
119 }
120 EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
121 
122 u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
123 {
124 	struct page_pool_stats *pool_stats = stats;
125 
126 	*data++ = pool_stats->alloc_stats.fast;
127 	*data++ = pool_stats->alloc_stats.slow;
128 	*data++ = pool_stats->alloc_stats.slow_high_order;
129 	*data++ = pool_stats->alloc_stats.empty;
130 	*data++ = pool_stats->alloc_stats.refill;
131 	*data++ = pool_stats->alloc_stats.waive;
132 	*data++ = pool_stats->recycle_stats.cached;
133 	*data++ = pool_stats->recycle_stats.cache_full;
134 	*data++ = pool_stats->recycle_stats.ring;
135 	*data++ = pool_stats->recycle_stats.ring_full;
136 	*data++ = pool_stats->recycle_stats.released_refcnt;
137 
138 	return data;
139 }
140 EXPORT_SYMBOL(page_pool_ethtool_stats_get);
141 
142 #else
143 #define alloc_stat_inc(pool, __stat)
144 #define recycle_stat_inc(pool, __stat)
145 #define recycle_stat_add(pool, __stat, val)
146 #endif
147 
148 static bool page_pool_producer_lock(struct page_pool *pool)
149 	__acquires(&pool->ring.producer_lock)
150 {
151 	bool in_softirq = in_softirq();
152 
153 	if (in_softirq)
154 		spin_lock(&pool->ring.producer_lock);
155 	else
156 		spin_lock_bh(&pool->ring.producer_lock);
157 
158 	return in_softirq;
159 }
160 
161 static void page_pool_producer_unlock(struct page_pool *pool,
162 				      bool in_softirq)
163 	__releases(&pool->ring.producer_lock)
164 {
165 	if (in_softirq)
166 		spin_unlock(&pool->ring.producer_lock);
167 	else
168 		spin_unlock_bh(&pool->ring.producer_lock);
169 }
170 
171 static int page_pool_init(struct page_pool *pool,
172 			  const struct page_pool_params *params)
173 {
174 	unsigned int ring_qsize = 1024; /* Default */
175 
176 	memcpy(&pool->p, &params->fast, sizeof(pool->p));
177 	memcpy(&pool->slow, &params->slow, sizeof(pool->slow));
178 
179 	/* Validate only known flags were used */
180 	if (pool->p.flags & ~(PP_FLAG_ALL))
181 		return -EINVAL;
182 
183 	if (pool->p.pool_size)
184 		ring_qsize = pool->p.pool_size;
185 
186 	/* Sanity limit mem that can be pinned down */
187 	if (ring_qsize > 32768)
188 		return -E2BIG;
189 
190 	/* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
191 	 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
192 	 * which is the XDP_TX use-case.
193 	 */
194 	if (pool->p.flags & PP_FLAG_DMA_MAP) {
195 		if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
196 		    (pool->p.dma_dir != DMA_BIDIRECTIONAL))
197 			return -EINVAL;
198 	}
199 
200 	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
201 		/* In order to request DMA-sync-for-device the page
202 		 * needs to be mapped
203 		 */
204 		if (!(pool->p.flags & PP_FLAG_DMA_MAP))
205 			return -EINVAL;
206 
207 		if (!pool->p.max_len)
208 			return -EINVAL;
209 
210 		/* pool->p.offset has to be set according to the address
211 		 * offset used by the DMA engine to start copying rx data
212 		 */
213 	}
214 
215 #ifdef CONFIG_PAGE_POOL_STATS
216 	pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
217 	if (!pool->recycle_stats)
218 		return -ENOMEM;
219 #endif
220 
221 	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
222 #ifdef CONFIG_PAGE_POOL_STATS
223 		free_percpu(pool->recycle_stats);
224 #endif
225 		return -ENOMEM;
226 	}
227 
228 	atomic_set(&pool->pages_state_release_cnt, 0);
229 
230 	/* Driver calling page_pool_create() also call page_pool_destroy() */
231 	refcount_set(&pool->user_cnt, 1);
232 
233 	if (pool->p.flags & PP_FLAG_DMA_MAP)
234 		get_device(pool->p.dev);
235 
236 	return 0;
237 }
238 
239 /**
240  * page_pool_create() - create a page pool.
241  * @params: parameters, see struct page_pool_params
242  */
243 struct page_pool *page_pool_create(const struct page_pool_params *params)
244 {
245 	struct page_pool *pool;
246 	int err;
247 
248 	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
249 	if (!pool)
250 		return ERR_PTR(-ENOMEM);
251 
252 	err = page_pool_init(pool, params);
253 	if (err < 0) {
254 		pr_warn("%s() gave up with errno %d\n", __func__, err);
255 		kfree(pool);
256 		return ERR_PTR(err);
257 	}
258 
259 	return pool;
260 }
261 EXPORT_SYMBOL(page_pool_create);
262 
263 static void page_pool_return_page(struct page_pool *pool, struct page *page);
264 
265 noinline
266 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
267 {
268 	struct ptr_ring *r = &pool->ring;
269 	struct page *page;
270 	int pref_nid; /* preferred NUMA node */
271 
272 	/* Quicker fallback, avoid locks when ring is empty */
273 	if (__ptr_ring_empty(r)) {
274 		alloc_stat_inc(pool, empty);
275 		return NULL;
276 	}
277 
278 	/* Softirq guarantee CPU and thus NUMA node is stable. This,
279 	 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
280 	 */
281 #ifdef CONFIG_NUMA
282 	pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
283 #else
284 	/* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
285 	pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
286 #endif
287 
288 	/* Refill alloc array, but only if NUMA match */
289 	do {
290 		page = __ptr_ring_consume(r);
291 		if (unlikely(!page))
292 			break;
293 
294 		if (likely(page_to_nid(page) == pref_nid)) {
295 			pool->alloc.cache[pool->alloc.count++] = page;
296 		} else {
297 			/* NUMA mismatch;
298 			 * (1) release 1 page to page-allocator and
299 			 * (2) break out to fallthrough to alloc_pages_node.
300 			 * This limit stress on page buddy alloactor.
301 			 */
302 			page_pool_return_page(pool, page);
303 			alloc_stat_inc(pool, waive);
304 			page = NULL;
305 			break;
306 		}
307 	} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
308 
309 	/* Return last page */
310 	if (likely(pool->alloc.count > 0)) {
311 		page = pool->alloc.cache[--pool->alloc.count];
312 		alloc_stat_inc(pool, refill);
313 	}
314 
315 	return page;
316 }
317 
318 /* fast path */
319 static struct page *__page_pool_get_cached(struct page_pool *pool)
320 {
321 	struct page *page;
322 
323 	/* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
324 	if (likely(pool->alloc.count)) {
325 		/* Fast-path */
326 		page = pool->alloc.cache[--pool->alloc.count];
327 		alloc_stat_inc(pool, fast);
328 	} else {
329 		page = page_pool_refill_alloc_cache(pool);
330 	}
331 
332 	return page;
333 }
334 
335 static void page_pool_dma_sync_for_device(struct page_pool *pool,
336 					  struct page *page,
337 					  unsigned int dma_sync_size)
338 {
339 	dma_addr_t dma_addr = page_pool_get_dma_addr(page);
340 
341 	dma_sync_size = min(dma_sync_size, pool->p.max_len);
342 	dma_sync_single_range_for_device(pool->p.dev, dma_addr,
343 					 pool->p.offset, dma_sync_size,
344 					 pool->p.dma_dir);
345 }
346 
347 static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
348 {
349 	dma_addr_t dma;
350 
351 	/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
352 	 * since dma_addr_t can be either 32 or 64 bits and does not always fit
353 	 * into page private data (i.e 32bit cpu with 64bit DMA caps)
354 	 * This mapping is kept for lifetime of page, until leaving pool.
355 	 */
356 	dma = dma_map_page_attrs(pool->p.dev, page, 0,
357 				 (PAGE_SIZE << pool->p.order),
358 				 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC |
359 						  DMA_ATTR_WEAK_ORDERING);
360 	if (dma_mapping_error(pool->p.dev, dma))
361 		return false;
362 
363 	if (page_pool_set_dma_addr(page, dma))
364 		goto unmap_failed;
365 
366 	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
367 		page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
368 
369 	return true;
370 
371 unmap_failed:
372 	WARN_ON_ONCE("unexpected DMA address, please report to netdev@");
373 	dma_unmap_page_attrs(pool->p.dev, dma,
374 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
375 			     DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
376 	return false;
377 }
378 
379 static void page_pool_set_pp_info(struct page_pool *pool,
380 				  struct page *page)
381 {
382 	page->pp = pool;
383 	page->pp_magic |= PP_SIGNATURE;
384 
385 	/* Ensuring all pages have been split into one fragment initially:
386 	 * page_pool_set_pp_info() is only called once for every page when it
387 	 * is allocated from the page allocator and page_pool_fragment_page()
388 	 * is dirtying the same cache line as the page->pp_magic above, so
389 	 * the overhead is negligible.
390 	 */
391 	page_pool_fragment_page(page, 1);
392 	if (pool->slow.init_callback)
393 		pool->slow.init_callback(page, pool->slow.init_arg);
394 }
395 
396 static void page_pool_clear_pp_info(struct page *page)
397 {
398 	page->pp_magic = 0;
399 	page->pp = NULL;
400 }
401 
402 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
403 						 gfp_t gfp)
404 {
405 	struct page *page;
406 
407 	gfp |= __GFP_COMP;
408 	page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
409 	if (unlikely(!page))
410 		return NULL;
411 
412 	if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
413 	    unlikely(!page_pool_dma_map(pool, page))) {
414 		put_page(page);
415 		return NULL;
416 	}
417 
418 	alloc_stat_inc(pool, slow_high_order);
419 	page_pool_set_pp_info(pool, page);
420 
421 	/* Track how many pages are held 'in-flight' */
422 	pool->pages_state_hold_cnt++;
423 	trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
424 	return page;
425 }
426 
427 /* slow path */
428 noinline
429 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
430 						 gfp_t gfp)
431 {
432 	const int bulk = PP_ALLOC_CACHE_REFILL;
433 	unsigned int pp_flags = pool->p.flags;
434 	unsigned int pp_order = pool->p.order;
435 	struct page *page;
436 	int i, nr_pages;
437 
438 	/* Don't support bulk alloc for high-order pages */
439 	if (unlikely(pp_order))
440 		return __page_pool_alloc_page_order(pool, gfp);
441 
442 	/* Unnecessary as alloc cache is empty, but guarantees zero count */
443 	if (unlikely(pool->alloc.count > 0))
444 		return pool->alloc.cache[--pool->alloc.count];
445 
446 	/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
447 	memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
448 
449 	nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
450 					       pool->alloc.cache);
451 	if (unlikely(!nr_pages))
452 		return NULL;
453 
454 	/* Pages have been filled into alloc.cache array, but count is zero and
455 	 * page element have not been (possibly) DMA mapped.
456 	 */
457 	for (i = 0; i < nr_pages; i++) {
458 		page = pool->alloc.cache[i];
459 		if ((pp_flags & PP_FLAG_DMA_MAP) &&
460 		    unlikely(!page_pool_dma_map(pool, page))) {
461 			put_page(page);
462 			continue;
463 		}
464 
465 		page_pool_set_pp_info(pool, page);
466 		pool->alloc.cache[pool->alloc.count++] = page;
467 		/* Track how many pages are held 'in-flight' */
468 		pool->pages_state_hold_cnt++;
469 		trace_page_pool_state_hold(pool, page,
470 					   pool->pages_state_hold_cnt);
471 	}
472 
473 	/* Return last page */
474 	if (likely(pool->alloc.count > 0)) {
475 		page = pool->alloc.cache[--pool->alloc.count];
476 		alloc_stat_inc(pool, slow);
477 	} else {
478 		page = NULL;
479 	}
480 
481 	/* When page just alloc'ed is should/must have refcnt 1. */
482 	return page;
483 }
484 
485 /* For using page_pool replace: alloc_pages() API calls, but provide
486  * synchronization guarantee for allocation side.
487  */
488 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
489 {
490 	struct page *page;
491 
492 	/* Fast-path: Get a page from cache */
493 	page = __page_pool_get_cached(pool);
494 	if (page)
495 		return page;
496 
497 	/* Slow-path: cache empty, do real allocation */
498 	page = __page_pool_alloc_pages_slow(pool, gfp);
499 	return page;
500 }
501 EXPORT_SYMBOL(page_pool_alloc_pages);
502 
503 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
504  *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
505  */
506 #define _distance(a, b)	(s32)((a) - (b))
507 
508 static s32 page_pool_inflight(struct page_pool *pool)
509 {
510 	u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
511 	u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
512 	s32 inflight;
513 
514 	inflight = _distance(hold_cnt, release_cnt);
515 
516 	trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
517 	WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
518 
519 	return inflight;
520 }
521 
522 /* Disconnects a page (from a page_pool).  API users can have a need
523  * to disconnect a page (from a page_pool), to allow it to be used as
524  * a regular page (that will eventually be returned to the normal
525  * page-allocator via put_page).
526  */
527 static void page_pool_return_page(struct page_pool *pool, struct page *page)
528 {
529 	dma_addr_t dma;
530 	int count;
531 
532 	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
533 		/* Always account for inflight pages, even if we didn't
534 		 * map them
535 		 */
536 		goto skip_dma_unmap;
537 
538 	dma = page_pool_get_dma_addr(page);
539 
540 	/* When page is unmapped, it cannot be returned to our pool */
541 	dma_unmap_page_attrs(pool->p.dev, dma,
542 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
543 			     DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
544 	page_pool_set_dma_addr(page, 0);
545 skip_dma_unmap:
546 	page_pool_clear_pp_info(page);
547 
548 	/* This may be the last page returned, releasing the pool, so
549 	 * it is not safe to reference pool afterwards.
550 	 */
551 	count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
552 	trace_page_pool_state_release(pool, page, count);
553 
554 	put_page(page);
555 	/* An optimization would be to call __free_pages(page, pool->p.order)
556 	 * knowing page is not part of page-cache (thus avoiding a
557 	 * __page_cache_release() call).
558 	 */
559 }
560 
561 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
562 {
563 	int ret;
564 	/* BH protection not needed if current is softirq */
565 	if (in_softirq())
566 		ret = ptr_ring_produce(&pool->ring, page);
567 	else
568 		ret = ptr_ring_produce_bh(&pool->ring, page);
569 
570 	if (!ret) {
571 		recycle_stat_inc(pool, ring);
572 		return true;
573 	}
574 
575 	return false;
576 }
577 
578 /* Only allow direct recycling in special circumstances, into the
579  * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
580  *
581  * Caller must provide appropriate safe context.
582  */
583 static bool page_pool_recycle_in_cache(struct page *page,
584 				       struct page_pool *pool)
585 {
586 	if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
587 		recycle_stat_inc(pool, cache_full);
588 		return false;
589 	}
590 
591 	/* Caller MUST have verified/know (page_ref_count(page) == 1) */
592 	pool->alloc.cache[pool->alloc.count++] = page;
593 	recycle_stat_inc(pool, cached);
594 	return true;
595 }
596 
597 /* If the page refcnt == 1, this will try to recycle the page.
598  * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
599  * the configured size min(dma_sync_size, pool->max_len).
600  * If the page refcnt != 1, then the page will be returned to memory
601  * subsystem.
602  */
603 static __always_inline struct page *
604 __page_pool_put_page(struct page_pool *pool, struct page *page,
605 		     unsigned int dma_sync_size, bool allow_direct)
606 {
607 	lockdep_assert_no_hardirq();
608 
609 	/* This allocator is optimized for the XDP mode that uses
610 	 * one-frame-per-page, but have fallbacks that act like the
611 	 * regular page allocator APIs.
612 	 *
613 	 * refcnt == 1 means page_pool owns page, and can recycle it.
614 	 *
615 	 * page is NOT reusable when allocated when system is under
616 	 * some pressure. (page_is_pfmemalloc)
617 	 */
618 	if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
619 		/* Read barrier done in page_ref_count / READ_ONCE */
620 
621 		if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
622 			page_pool_dma_sync_for_device(pool, page,
623 						      dma_sync_size);
624 
625 		if (allow_direct && in_softirq() &&
626 		    page_pool_recycle_in_cache(page, pool))
627 			return NULL;
628 
629 		/* Page found as candidate for recycling */
630 		return page;
631 	}
632 	/* Fallback/non-XDP mode: API user have elevated refcnt.
633 	 *
634 	 * Many drivers split up the page into fragments, and some
635 	 * want to keep doing this to save memory and do refcnt based
636 	 * recycling. Support this use case too, to ease drivers
637 	 * switching between XDP/non-XDP.
638 	 *
639 	 * In-case page_pool maintains the DMA mapping, API user must
640 	 * call page_pool_put_page once.  In this elevated refcnt
641 	 * case, the DMA is unmapped/released, as driver is likely
642 	 * doing refcnt based recycle tricks, meaning another process
643 	 * will be invoking put_page.
644 	 */
645 	recycle_stat_inc(pool, released_refcnt);
646 	page_pool_return_page(pool, page);
647 
648 	return NULL;
649 }
650 
651 void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
652 				  unsigned int dma_sync_size, bool allow_direct)
653 {
654 	page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
655 	if (page && !page_pool_recycle_in_ring(pool, page)) {
656 		/* Cache full, fallback to free pages */
657 		recycle_stat_inc(pool, ring_full);
658 		page_pool_return_page(pool, page);
659 	}
660 }
661 EXPORT_SYMBOL(page_pool_put_defragged_page);
662 
663 /**
664  * page_pool_put_page_bulk() - release references on multiple pages
665  * @pool:	pool from which pages were allocated
666  * @data:	array holding page pointers
667  * @count:	number of pages in @data
668  *
669  * Tries to refill a number of pages into the ptr_ring cache holding ptr_ring
670  * producer lock. If the ptr_ring is full, page_pool_put_page_bulk()
671  * will release leftover pages to the page allocator.
672  * page_pool_put_page_bulk() is suitable to be run inside the driver NAPI tx
673  * completion loop for the XDP_REDIRECT use case.
674  *
675  * Please note the caller must not use data area after running
676  * page_pool_put_page_bulk(), as this function overwrites it.
677  */
678 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
679 			     int count)
680 {
681 	int i, bulk_len = 0;
682 	bool in_softirq;
683 
684 	for (i = 0; i < count; i++) {
685 		struct page *page = virt_to_head_page(data[i]);
686 
687 		/* It is not the last user for the page frag case */
688 		if (!page_pool_is_last_frag(page))
689 			continue;
690 
691 		page = __page_pool_put_page(pool, page, -1, false);
692 		/* Approved for bulk recycling in ptr_ring cache */
693 		if (page)
694 			data[bulk_len++] = page;
695 	}
696 
697 	if (unlikely(!bulk_len))
698 		return;
699 
700 	/* Bulk producer into ptr_ring page_pool cache */
701 	in_softirq = page_pool_producer_lock(pool);
702 	for (i = 0; i < bulk_len; i++) {
703 		if (__ptr_ring_produce(&pool->ring, data[i])) {
704 			/* ring full */
705 			recycle_stat_inc(pool, ring_full);
706 			break;
707 		}
708 	}
709 	recycle_stat_add(pool, ring, i);
710 	page_pool_producer_unlock(pool, in_softirq);
711 
712 	/* Hopefully all pages was return into ptr_ring */
713 	if (likely(i == bulk_len))
714 		return;
715 
716 	/* ptr_ring cache full, free remaining pages outside producer lock
717 	 * since put_page() with refcnt == 1 can be an expensive operation
718 	 */
719 	for (; i < bulk_len; i++)
720 		page_pool_return_page(pool, data[i]);
721 }
722 EXPORT_SYMBOL(page_pool_put_page_bulk);
723 
724 static struct page *page_pool_drain_frag(struct page_pool *pool,
725 					 struct page *page)
726 {
727 	long drain_count = BIAS_MAX - pool->frag_users;
728 
729 	/* Some user is still using the page frag */
730 	if (likely(page_pool_defrag_page(page, drain_count)))
731 		return NULL;
732 
733 	if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
734 		if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
735 			page_pool_dma_sync_for_device(pool, page, -1);
736 
737 		return page;
738 	}
739 
740 	page_pool_return_page(pool, page);
741 	return NULL;
742 }
743 
744 static void page_pool_free_frag(struct page_pool *pool)
745 {
746 	long drain_count = BIAS_MAX - pool->frag_users;
747 	struct page *page = pool->frag_page;
748 
749 	pool->frag_page = NULL;
750 
751 	if (!page || page_pool_defrag_page(page, drain_count))
752 		return;
753 
754 	page_pool_return_page(pool, page);
755 }
756 
757 struct page *page_pool_alloc_frag(struct page_pool *pool,
758 				  unsigned int *offset,
759 				  unsigned int size, gfp_t gfp)
760 {
761 	unsigned int max_size = PAGE_SIZE << pool->p.order;
762 	struct page *page = pool->frag_page;
763 
764 	if (WARN_ON(size > max_size))
765 		return NULL;
766 
767 	size = ALIGN(size, dma_get_cache_alignment());
768 	*offset = pool->frag_offset;
769 
770 	if (page && *offset + size > max_size) {
771 		page = page_pool_drain_frag(pool, page);
772 		if (page) {
773 			alloc_stat_inc(pool, fast);
774 			goto frag_reset;
775 		}
776 	}
777 
778 	if (!page) {
779 		page = page_pool_alloc_pages(pool, gfp);
780 		if (unlikely(!page)) {
781 			pool->frag_page = NULL;
782 			return NULL;
783 		}
784 
785 		pool->frag_page = page;
786 
787 frag_reset:
788 		pool->frag_users = 1;
789 		*offset = 0;
790 		pool->frag_offset = size;
791 		page_pool_fragment_page(page, BIAS_MAX);
792 		return page;
793 	}
794 
795 	pool->frag_users++;
796 	pool->frag_offset = *offset + size;
797 	alloc_stat_inc(pool, fast);
798 	return page;
799 }
800 EXPORT_SYMBOL(page_pool_alloc_frag);
801 
802 static void page_pool_empty_ring(struct page_pool *pool)
803 {
804 	struct page *page;
805 
806 	/* Empty recycle ring */
807 	while ((page = ptr_ring_consume_bh(&pool->ring))) {
808 		/* Verify the refcnt invariant of cached pages */
809 		if (!(page_ref_count(page) == 1))
810 			pr_crit("%s() page_pool refcnt %d violation\n",
811 				__func__, page_ref_count(page));
812 
813 		page_pool_return_page(pool, page);
814 	}
815 }
816 
817 static void __page_pool_destroy(struct page_pool *pool)
818 {
819 	if (pool->disconnect)
820 		pool->disconnect(pool);
821 
822 	ptr_ring_cleanup(&pool->ring, NULL);
823 
824 	if (pool->p.flags & PP_FLAG_DMA_MAP)
825 		put_device(pool->p.dev);
826 
827 #ifdef CONFIG_PAGE_POOL_STATS
828 	free_percpu(pool->recycle_stats);
829 #endif
830 	kfree(pool);
831 }
832 
833 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
834 {
835 	struct page *page;
836 
837 	if (pool->destroy_cnt)
838 		return;
839 
840 	/* Empty alloc cache, assume caller made sure this is
841 	 * no-longer in use, and page_pool_alloc_pages() cannot be
842 	 * call concurrently.
843 	 */
844 	while (pool->alloc.count) {
845 		page = pool->alloc.cache[--pool->alloc.count];
846 		page_pool_return_page(pool, page);
847 	}
848 }
849 
850 static void page_pool_scrub(struct page_pool *pool)
851 {
852 	page_pool_empty_alloc_cache_once(pool);
853 	pool->destroy_cnt++;
854 
855 	/* No more consumers should exist, but producers could still
856 	 * be in-flight.
857 	 */
858 	page_pool_empty_ring(pool);
859 }
860 
861 static int page_pool_release(struct page_pool *pool)
862 {
863 	int inflight;
864 
865 	page_pool_scrub(pool);
866 	inflight = page_pool_inflight(pool);
867 	if (!inflight)
868 		__page_pool_destroy(pool);
869 
870 	return inflight;
871 }
872 
873 static void page_pool_release_retry(struct work_struct *wq)
874 {
875 	struct delayed_work *dwq = to_delayed_work(wq);
876 	struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
877 	int inflight;
878 
879 	inflight = page_pool_release(pool);
880 	if (!inflight)
881 		return;
882 
883 	/* Periodic warning */
884 	if (time_after_eq(jiffies, pool->defer_warn)) {
885 		int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
886 
887 		pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
888 			__func__, inflight, sec);
889 		pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
890 	}
891 
892 	/* Still not ready to be disconnected, retry later */
893 	schedule_delayed_work(&pool->release_dw, DEFER_TIME);
894 }
895 
896 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
897 			   struct xdp_mem_info *mem)
898 {
899 	refcount_inc(&pool->user_cnt);
900 	pool->disconnect = disconnect;
901 	pool->xdp_mem_id = mem->id;
902 }
903 
904 void page_pool_unlink_napi(struct page_pool *pool)
905 {
906 	if (!pool->p.napi)
907 		return;
908 
909 	/* To avoid races with recycling and additional barriers make sure
910 	 * pool and NAPI are unlinked when NAPI is disabled.
911 	 */
912 	WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) ||
913 		READ_ONCE(pool->p.napi->list_owner) != -1);
914 
915 	WRITE_ONCE(pool->p.napi, NULL);
916 }
917 EXPORT_SYMBOL(page_pool_unlink_napi);
918 
919 void page_pool_destroy(struct page_pool *pool)
920 {
921 	if (!pool)
922 		return;
923 
924 	if (!page_pool_put(pool))
925 		return;
926 
927 	page_pool_unlink_napi(pool);
928 	page_pool_free_frag(pool);
929 
930 	if (!page_pool_release(pool))
931 		return;
932 
933 	pool->defer_start = jiffies;
934 	pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
935 
936 	INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
937 	schedule_delayed_work(&pool->release_dw, DEFER_TIME);
938 }
939 EXPORT_SYMBOL(page_pool_destroy);
940 
941 /* Caller must provide appropriate safe context, e.g. NAPI. */
942 void page_pool_update_nid(struct page_pool *pool, int new_nid)
943 {
944 	struct page *page;
945 
946 	trace_page_pool_update_nid(pool, new_nid);
947 	pool->p.nid = new_nid;
948 
949 	/* Flush pool alloc cache, as refill will check NUMA node */
950 	while (pool->alloc.count) {
951 		page = pool->alloc.cache[--pool->alloc.count];
952 		page_pool_return_page(pool, page);
953 	}
954 }
955 EXPORT_SYMBOL(page_pool_update_nid);
956