xref: /linux/net/core/page_pool.c (revision e958da0ddbe831197a0023251880a4a09d5ba268)
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * page_pool.c
4  *	Author:	Jesper Dangaard Brouer <netoptimizer@brouer.com>
5  *	Copyright (C) 2016 Red Hat, Inc.
6  */
7 
8 #include <linux/error-injection.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 
14 #include <net/page_pool/helpers.h>
15 #include <net/xdp.h>
16 
17 #include <linux/dma-direction.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/page-flags.h>
20 #include <linux/mm.h> /* for put_page() */
21 #include <linux/poison.h>
22 #include <linux/ethtool.h>
23 #include <linux/netdevice.h>
24 
25 #include <trace/events/page_pool.h>
26 
27 #include "page_pool_priv.h"
28 
29 #define DEFER_TIME (msecs_to_jiffies(1000))
30 #define DEFER_WARN_INTERVAL (60 * HZ)
31 
32 #define BIAS_MAX	(LONG_MAX >> 1)
33 
34 #ifdef CONFIG_PAGE_POOL_STATS
35 static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);
36 
37 /* alloc_stat_inc is intended to be used in softirq context */
38 #define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)
39 /* recycle_stat_inc is safe to use when preemption is possible. */
40 #define recycle_stat_inc(pool, __stat)							\
41 	do {										\
42 		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
43 		this_cpu_inc(s->__stat);						\
44 	} while (0)
45 
46 #define recycle_stat_add(pool, __stat, val)						\
47 	do {										\
48 		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
49 		this_cpu_add(s->__stat, val);						\
50 	} while (0)
51 
52 static const char pp_stats[][ETH_GSTRING_LEN] = {
53 	"rx_pp_alloc_fast",
54 	"rx_pp_alloc_slow",
55 	"rx_pp_alloc_slow_ho",
56 	"rx_pp_alloc_empty",
57 	"rx_pp_alloc_refill",
58 	"rx_pp_alloc_waive",
59 	"rx_pp_recycle_cached",
60 	"rx_pp_recycle_cache_full",
61 	"rx_pp_recycle_ring",
62 	"rx_pp_recycle_ring_full",
63 	"rx_pp_recycle_released_ref",
64 };
65 
66 /**
67  * page_pool_get_stats() - fetch page pool stats
68  * @pool:	pool from which page was allocated
69  * @stats:	struct page_pool_stats to fill in
70  *
71  * Retrieve statistics about the page_pool. This API is only available
72  * if the kernel has been configured with ``CONFIG_PAGE_POOL_STATS=y``.
73  * A pointer to a caller allocated struct page_pool_stats structure
74  * is passed to this API which is filled in. The caller can then report
75  * those stats to the user (perhaps via ethtool, debugfs, etc.).
76  */
77 bool page_pool_get_stats(const struct page_pool *pool,
78 			 struct page_pool_stats *stats)
79 {
80 	int cpu = 0;
81 
82 	if (!stats)
83 		return false;
84 
85 	/* The caller is responsible to initialize stats. */
86 	stats->alloc_stats.fast += pool->alloc_stats.fast;
87 	stats->alloc_stats.slow += pool->alloc_stats.slow;
88 	stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
89 	stats->alloc_stats.empty += pool->alloc_stats.empty;
90 	stats->alloc_stats.refill += pool->alloc_stats.refill;
91 	stats->alloc_stats.waive += pool->alloc_stats.waive;
92 
93 	for_each_possible_cpu(cpu) {
94 		const struct page_pool_recycle_stats *pcpu =
95 			per_cpu_ptr(pool->recycle_stats, cpu);
96 
97 		stats->recycle_stats.cached += pcpu->cached;
98 		stats->recycle_stats.cache_full += pcpu->cache_full;
99 		stats->recycle_stats.ring += pcpu->ring;
100 		stats->recycle_stats.ring_full += pcpu->ring_full;
101 		stats->recycle_stats.released_refcnt += pcpu->released_refcnt;
102 	}
103 
104 	return true;
105 }
106 EXPORT_SYMBOL(page_pool_get_stats);
107 
108 u8 *page_pool_ethtool_stats_get_strings(u8 *data)
109 {
110 	int i;
111 
112 	for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
113 		memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
114 		data += ETH_GSTRING_LEN;
115 	}
116 
117 	return data;
118 }
119 EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
120 
121 int page_pool_ethtool_stats_get_count(void)
122 {
123 	return ARRAY_SIZE(pp_stats);
124 }
125 EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
126 
127 u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
128 {
129 	const struct page_pool_stats *pool_stats = stats;
130 
131 	*data++ = pool_stats->alloc_stats.fast;
132 	*data++ = pool_stats->alloc_stats.slow;
133 	*data++ = pool_stats->alloc_stats.slow_high_order;
134 	*data++ = pool_stats->alloc_stats.empty;
135 	*data++ = pool_stats->alloc_stats.refill;
136 	*data++ = pool_stats->alloc_stats.waive;
137 	*data++ = pool_stats->recycle_stats.cached;
138 	*data++ = pool_stats->recycle_stats.cache_full;
139 	*data++ = pool_stats->recycle_stats.ring;
140 	*data++ = pool_stats->recycle_stats.ring_full;
141 	*data++ = pool_stats->recycle_stats.released_refcnt;
142 
143 	return data;
144 }
145 EXPORT_SYMBOL(page_pool_ethtool_stats_get);
146 
147 #else
148 #define alloc_stat_inc(pool, __stat)
149 #define recycle_stat_inc(pool, __stat)
150 #define recycle_stat_add(pool, __stat, val)
151 #endif
152 
153 static bool page_pool_producer_lock(struct page_pool *pool)
154 	__acquires(&pool->ring.producer_lock)
155 {
156 	bool in_softirq = in_softirq();
157 
158 	if (in_softirq)
159 		spin_lock(&pool->ring.producer_lock);
160 	else
161 		spin_lock_bh(&pool->ring.producer_lock);
162 
163 	return in_softirq;
164 }
165 
166 static void page_pool_producer_unlock(struct page_pool *pool,
167 				      bool in_softirq)
168 	__releases(&pool->ring.producer_lock)
169 {
170 	if (in_softirq)
171 		spin_unlock(&pool->ring.producer_lock);
172 	else
173 		spin_unlock_bh(&pool->ring.producer_lock);
174 }
175 
176 static int page_pool_init(struct page_pool *pool,
177 			  const struct page_pool_params *params,
178 			  int cpuid)
179 {
180 	unsigned int ring_qsize = 1024; /* Default */
181 
182 	memcpy(&pool->p, &params->fast, sizeof(pool->p));
183 	memcpy(&pool->slow, &params->slow, sizeof(pool->slow));
184 
185 	pool->cpuid = cpuid;
186 
187 	/* Validate only known flags were used */
188 	if (pool->p.flags & ~(PP_FLAG_ALL))
189 		return -EINVAL;
190 
191 	if (pool->p.pool_size)
192 		ring_qsize = pool->p.pool_size;
193 
194 	/* Sanity limit mem that can be pinned down */
195 	if (ring_qsize > 32768)
196 		return -E2BIG;
197 
198 	/* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
199 	 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
200 	 * which is the XDP_TX use-case.
201 	 */
202 	if (pool->p.flags & PP_FLAG_DMA_MAP) {
203 		if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
204 		    (pool->p.dma_dir != DMA_BIDIRECTIONAL))
205 			return -EINVAL;
206 	}
207 
208 	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
209 		/* In order to request DMA-sync-for-device the page
210 		 * needs to be mapped
211 		 */
212 		if (!(pool->p.flags & PP_FLAG_DMA_MAP))
213 			return -EINVAL;
214 
215 		if (!pool->p.max_len)
216 			return -EINVAL;
217 
218 		/* pool->p.offset has to be set according to the address
219 		 * offset used by the DMA engine to start copying rx data
220 		 */
221 	}
222 
223 	pool->has_init_callback = !!pool->slow.init_callback;
224 
225 #ifdef CONFIG_PAGE_POOL_STATS
226 	if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL)) {
227 		pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
228 		if (!pool->recycle_stats)
229 			return -ENOMEM;
230 	} else {
231 		/* For system page pool instance we use a singular stats object
232 		 * instead of allocating a separate percpu variable for each
233 		 * (also percpu) page pool instance.
234 		 */
235 		pool->recycle_stats = &pp_system_recycle_stats;
236 	}
237 #endif
238 
239 	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
240 #ifdef CONFIG_PAGE_POOL_STATS
241 		if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
242 			free_percpu(pool->recycle_stats);
243 #endif
244 		return -ENOMEM;
245 	}
246 
247 	atomic_set(&pool->pages_state_release_cnt, 0);
248 
249 	/* Driver calling page_pool_create() also call page_pool_destroy() */
250 	refcount_set(&pool->user_cnt, 1);
251 
252 	if (pool->p.flags & PP_FLAG_DMA_MAP)
253 		get_device(pool->p.dev);
254 
255 	return 0;
256 }
257 
258 static void page_pool_uninit(struct page_pool *pool)
259 {
260 	ptr_ring_cleanup(&pool->ring, NULL);
261 
262 	if (pool->p.flags & PP_FLAG_DMA_MAP)
263 		put_device(pool->p.dev);
264 
265 #ifdef CONFIG_PAGE_POOL_STATS
266 	if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
267 		free_percpu(pool->recycle_stats);
268 #endif
269 }
270 
271 /**
272  * page_pool_create_percpu() - create a page pool for a given cpu.
273  * @params: parameters, see struct page_pool_params
274  * @cpuid: cpu identifier
275  */
276 struct page_pool *
277 page_pool_create_percpu(const struct page_pool_params *params, int cpuid)
278 {
279 	struct page_pool *pool;
280 	int err;
281 
282 	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
283 	if (!pool)
284 		return ERR_PTR(-ENOMEM);
285 
286 	err = page_pool_init(pool, params, cpuid);
287 	if (err < 0)
288 		goto err_free;
289 
290 	err = page_pool_list(pool);
291 	if (err)
292 		goto err_uninit;
293 
294 	return pool;
295 
296 err_uninit:
297 	page_pool_uninit(pool);
298 err_free:
299 	pr_warn("%s() gave up with errno %d\n", __func__, err);
300 	kfree(pool);
301 	return ERR_PTR(err);
302 }
303 EXPORT_SYMBOL(page_pool_create_percpu);
304 
305 /**
306  * page_pool_create() - create a page pool
307  * @params: parameters, see struct page_pool_params
308  */
309 struct page_pool *page_pool_create(const struct page_pool_params *params)
310 {
311 	return page_pool_create_percpu(params, -1);
312 }
313 EXPORT_SYMBOL(page_pool_create);
314 
315 static void page_pool_return_page(struct page_pool *pool, struct page *page);
316 
317 noinline
318 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
319 {
320 	struct ptr_ring *r = &pool->ring;
321 	struct page *page;
322 	int pref_nid; /* preferred NUMA node */
323 
324 	/* Quicker fallback, avoid locks when ring is empty */
325 	if (__ptr_ring_empty(r)) {
326 		alloc_stat_inc(pool, empty);
327 		return NULL;
328 	}
329 
330 	/* Softirq guarantee CPU and thus NUMA node is stable. This,
331 	 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
332 	 */
333 #ifdef CONFIG_NUMA
334 	pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
335 #else
336 	/* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
337 	pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
338 #endif
339 
340 	/* Refill alloc array, but only if NUMA match */
341 	do {
342 		page = __ptr_ring_consume(r);
343 		if (unlikely(!page))
344 			break;
345 
346 		if (likely(page_to_nid(page) == pref_nid)) {
347 			pool->alloc.cache[pool->alloc.count++] = page;
348 		} else {
349 			/* NUMA mismatch;
350 			 * (1) release 1 page to page-allocator and
351 			 * (2) break out to fallthrough to alloc_pages_node.
352 			 * This limit stress on page buddy alloactor.
353 			 */
354 			page_pool_return_page(pool, page);
355 			alloc_stat_inc(pool, waive);
356 			page = NULL;
357 			break;
358 		}
359 	} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
360 
361 	/* Return last page */
362 	if (likely(pool->alloc.count > 0)) {
363 		page = pool->alloc.cache[--pool->alloc.count];
364 		alloc_stat_inc(pool, refill);
365 	}
366 
367 	return page;
368 }
369 
370 /* fast path */
371 static struct page *__page_pool_get_cached(struct page_pool *pool)
372 {
373 	struct page *page;
374 
375 	/* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
376 	if (likely(pool->alloc.count)) {
377 		/* Fast-path */
378 		page = pool->alloc.cache[--pool->alloc.count];
379 		alloc_stat_inc(pool, fast);
380 	} else {
381 		page = page_pool_refill_alloc_cache(pool);
382 	}
383 
384 	return page;
385 }
386 
387 static void page_pool_dma_sync_for_device(const struct page_pool *pool,
388 					  const struct page *page,
389 					  unsigned int dma_sync_size)
390 {
391 	dma_addr_t dma_addr = page_pool_get_dma_addr(page);
392 
393 	dma_sync_size = min(dma_sync_size, pool->p.max_len);
394 	dma_sync_single_range_for_device(pool->p.dev, dma_addr,
395 					 pool->p.offset, dma_sync_size,
396 					 pool->p.dma_dir);
397 }
398 
399 static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
400 {
401 	dma_addr_t dma;
402 
403 	/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
404 	 * since dma_addr_t can be either 32 or 64 bits and does not always fit
405 	 * into page private data (i.e 32bit cpu with 64bit DMA caps)
406 	 * This mapping is kept for lifetime of page, until leaving pool.
407 	 */
408 	dma = dma_map_page_attrs(pool->p.dev, page, 0,
409 				 (PAGE_SIZE << pool->p.order),
410 				 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC |
411 						  DMA_ATTR_WEAK_ORDERING);
412 	if (dma_mapping_error(pool->p.dev, dma))
413 		return false;
414 
415 	if (page_pool_set_dma_addr(page, dma))
416 		goto unmap_failed;
417 
418 	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
419 		page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
420 
421 	return true;
422 
423 unmap_failed:
424 	WARN_ON_ONCE("unexpected DMA address, please report to netdev@");
425 	dma_unmap_page_attrs(pool->p.dev, dma,
426 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
427 			     DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
428 	return false;
429 }
430 
431 static void page_pool_set_pp_info(struct page_pool *pool,
432 				  struct page *page)
433 {
434 	page->pp = pool;
435 	page->pp_magic |= PP_SIGNATURE;
436 
437 	/* Ensuring all pages have been split into one fragment initially:
438 	 * page_pool_set_pp_info() is only called once for every page when it
439 	 * is allocated from the page allocator and page_pool_fragment_page()
440 	 * is dirtying the same cache line as the page->pp_magic above, so
441 	 * the overhead is negligible.
442 	 */
443 	page_pool_fragment_page(page, 1);
444 	if (pool->has_init_callback)
445 		pool->slow.init_callback(page, pool->slow.init_arg);
446 }
447 
448 static void page_pool_clear_pp_info(struct page *page)
449 {
450 	page->pp_magic = 0;
451 	page->pp = NULL;
452 }
453 
454 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
455 						 gfp_t gfp)
456 {
457 	struct page *page;
458 
459 	gfp |= __GFP_COMP;
460 	page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
461 	if (unlikely(!page))
462 		return NULL;
463 
464 	if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
465 	    unlikely(!page_pool_dma_map(pool, page))) {
466 		put_page(page);
467 		return NULL;
468 	}
469 
470 	alloc_stat_inc(pool, slow_high_order);
471 	page_pool_set_pp_info(pool, page);
472 
473 	/* Track how many pages are held 'in-flight' */
474 	pool->pages_state_hold_cnt++;
475 	trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
476 	return page;
477 }
478 
479 /* slow path */
480 noinline
481 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
482 						 gfp_t gfp)
483 {
484 	const int bulk = PP_ALLOC_CACHE_REFILL;
485 	unsigned int pp_flags = pool->p.flags;
486 	unsigned int pp_order = pool->p.order;
487 	struct page *page;
488 	int i, nr_pages;
489 
490 	/* Don't support bulk alloc for high-order pages */
491 	if (unlikely(pp_order))
492 		return __page_pool_alloc_page_order(pool, gfp);
493 
494 	/* Unnecessary as alloc cache is empty, but guarantees zero count */
495 	if (unlikely(pool->alloc.count > 0))
496 		return pool->alloc.cache[--pool->alloc.count];
497 
498 	/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
499 	memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
500 
501 	nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
502 					       pool->alloc.cache);
503 	if (unlikely(!nr_pages))
504 		return NULL;
505 
506 	/* Pages have been filled into alloc.cache array, but count is zero and
507 	 * page element have not been (possibly) DMA mapped.
508 	 */
509 	for (i = 0; i < nr_pages; i++) {
510 		page = pool->alloc.cache[i];
511 		if ((pp_flags & PP_FLAG_DMA_MAP) &&
512 		    unlikely(!page_pool_dma_map(pool, page))) {
513 			put_page(page);
514 			continue;
515 		}
516 
517 		page_pool_set_pp_info(pool, page);
518 		pool->alloc.cache[pool->alloc.count++] = page;
519 		/* Track how many pages are held 'in-flight' */
520 		pool->pages_state_hold_cnt++;
521 		trace_page_pool_state_hold(pool, page,
522 					   pool->pages_state_hold_cnt);
523 	}
524 
525 	/* Return last page */
526 	if (likely(pool->alloc.count > 0)) {
527 		page = pool->alloc.cache[--pool->alloc.count];
528 		alloc_stat_inc(pool, slow);
529 	} else {
530 		page = NULL;
531 	}
532 
533 	/* When page just alloc'ed is should/must have refcnt 1. */
534 	return page;
535 }
536 
537 /* For using page_pool replace: alloc_pages() API calls, but provide
538  * synchronization guarantee for allocation side.
539  */
540 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
541 {
542 	struct page *page;
543 
544 	/* Fast-path: Get a page from cache */
545 	page = __page_pool_get_cached(pool);
546 	if (page)
547 		return page;
548 
549 	/* Slow-path: cache empty, do real allocation */
550 	page = __page_pool_alloc_pages_slow(pool, gfp);
551 	return page;
552 }
553 EXPORT_SYMBOL(page_pool_alloc_pages);
554 ALLOW_ERROR_INJECTION(page_pool_alloc_pages, NULL);
555 
556 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
557  *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
558  */
559 #define _distance(a, b)	(s32)((a) - (b))
560 
561 s32 page_pool_inflight(const struct page_pool *pool, bool strict)
562 {
563 	u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
564 	u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
565 	s32 inflight;
566 
567 	inflight = _distance(hold_cnt, release_cnt);
568 
569 	if (strict) {
570 		trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
571 		WARN(inflight < 0, "Negative(%d) inflight packet-pages",
572 		     inflight);
573 	} else {
574 		inflight = max(0, inflight);
575 	}
576 
577 	return inflight;
578 }
579 
580 static __always_inline
581 void __page_pool_release_page_dma(struct page_pool *pool, struct page *page)
582 {
583 	dma_addr_t dma;
584 
585 	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
586 		/* Always account for inflight pages, even if we didn't
587 		 * map them
588 		 */
589 		return;
590 
591 	dma = page_pool_get_dma_addr(page);
592 
593 	/* When page is unmapped, it cannot be returned to our pool */
594 	dma_unmap_page_attrs(pool->p.dev, dma,
595 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
596 			     DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
597 	page_pool_set_dma_addr(page, 0);
598 }
599 
600 /* Disconnects a page (from a page_pool).  API users can have a need
601  * to disconnect a page (from a page_pool), to allow it to be used as
602  * a regular page (that will eventually be returned to the normal
603  * page-allocator via put_page).
604  */
605 void page_pool_return_page(struct page_pool *pool, struct page *page)
606 {
607 	int count;
608 
609 	__page_pool_release_page_dma(pool, page);
610 
611 	page_pool_clear_pp_info(page);
612 
613 	/* This may be the last page returned, releasing the pool, so
614 	 * it is not safe to reference pool afterwards.
615 	 */
616 	count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
617 	trace_page_pool_state_release(pool, page, count);
618 
619 	put_page(page);
620 	/* An optimization would be to call __free_pages(page, pool->p.order)
621 	 * knowing page is not part of page-cache (thus avoiding a
622 	 * __page_cache_release() call).
623 	 */
624 }
625 
626 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
627 {
628 	int ret;
629 	/* BH protection not needed if current is softirq */
630 	if (in_softirq())
631 		ret = ptr_ring_produce(&pool->ring, page);
632 	else
633 		ret = ptr_ring_produce_bh(&pool->ring, page);
634 
635 	if (!ret) {
636 		recycle_stat_inc(pool, ring);
637 		return true;
638 	}
639 
640 	return false;
641 }
642 
643 /* Only allow direct recycling in special circumstances, into the
644  * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
645  *
646  * Caller must provide appropriate safe context.
647  */
648 static bool page_pool_recycle_in_cache(struct page *page,
649 				       struct page_pool *pool)
650 {
651 	if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
652 		recycle_stat_inc(pool, cache_full);
653 		return false;
654 	}
655 
656 	/* Caller MUST have verified/know (page_ref_count(page) == 1) */
657 	pool->alloc.cache[pool->alloc.count++] = page;
658 	recycle_stat_inc(pool, cached);
659 	return true;
660 }
661 
662 static bool __page_pool_page_can_be_recycled(const struct page *page)
663 {
664 	return page_ref_count(page) == 1 && !page_is_pfmemalloc(page);
665 }
666 
667 /* If the page refcnt == 1, this will try to recycle the page.
668  * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
669  * the configured size min(dma_sync_size, pool->max_len).
670  * If the page refcnt != 1, then the page will be returned to memory
671  * subsystem.
672  */
673 static __always_inline struct page *
674 __page_pool_put_page(struct page_pool *pool, struct page *page,
675 		     unsigned int dma_sync_size, bool allow_direct)
676 {
677 	lockdep_assert_no_hardirq();
678 
679 	/* This allocator is optimized for the XDP mode that uses
680 	 * one-frame-per-page, but have fallbacks that act like the
681 	 * regular page allocator APIs.
682 	 *
683 	 * refcnt == 1 means page_pool owns page, and can recycle it.
684 	 *
685 	 * page is NOT reusable when allocated when system is under
686 	 * some pressure. (page_is_pfmemalloc)
687 	 */
688 	if (likely(__page_pool_page_can_be_recycled(page))) {
689 		/* Read barrier done in page_ref_count / READ_ONCE */
690 
691 		if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
692 			page_pool_dma_sync_for_device(pool, page,
693 						      dma_sync_size);
694 
695 		if (allow_direct && page_pool_recycle_in_cache(page, pool))
696 			return NULL;
697 
698 		/* Page found as candidate for recycling */
699 		return page;
700 	}
701 	/* Fallback/non-XDP mode: API user have elevated refcnt.
702 	 *
703 	 * Many drivers split up the page into fragments, and some
704 	 * want to keep doing this to save memory and do refcnt based
705 	 * recycling. Support this use case too, to ease drivers
706 	 * switching between XDP/non-XDP.
707 	 *
708 	 * In-case page_pool maintains the DMA mapping, API user must
709 	 * call page_pool_put_page once.  In this elevated refcnt
710 	 * case, the DMA is unmapped/released, as driver is likely
711 	 * doing refcnt based recycle tricks, meaning another process
712 	 * will be invoking put_page.
713 	 */
714 	recycle_stat_inc(pool, released_refcnt);
715 	page_pool_return_page(pool, page);
716 
717 	return NULL;
718 }
719 
720 static bool page_pool_napi_local(const struct page_pool *pool)
721 {
722 	const struct napi_struct *napi;
723 	u32 cpuid;
724 
725 	if (unlikely(!in_softirq()))
726 		return false;
727 
728 	/* Allow direct recycle if we have reasons to believe that we are
729 	 * in the same context as the consumer would run, so there's
730 	 * no possible race.
731 	 * __page_pool_put_page() makes sure we're not in hardirq context
732 	 * and interrupts are enabled prior to accessing the cache.
733 	 */
734 	cpuid = smp_processor_id();
735 	if (READ_ONCE(pool->cpuid) == cpuid)
736 		return true;
737 
738 	napi = READ_ONCE(pool->p.napi);
739 
740 	return napi && READ_ONCE(napi->list_owner) == cpuid;
741 }
742 
743 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
744 				unsigned int dma_sync_size, bool allow_direct)
745 {
746 	if (!allow_direct)
747 		allow_direct = page_pool_napi_local(pool);
748 
749 	page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
750 	if (page && !page_pool_recycle_in_ring(pool, page)) {
751 		/* Cache full, fallback to free pages */
752 		recycle_stat_inc(pool, ring_full);
753 		page_pool_return_page(pool, page);
754 	}
755 }
756 EXPORT_SYMBOL(page_pool_put_unrefed_page);
757 
758 /**
759  * page_pool_put_page_bulk() - release references on multiple pages
760  * @pool:	pool from which pages were allocated
761  * @data:	array holding page pointers
762  * @count:	number of pages in @data
763  *
764  * Tries to refill a number of pages into the ptr_ring cache holding ptr_ring
765  * producer lock. If the ptr_ring is full, page_pool_put_page_bulk()
766  * will release leftover pages to the page allocator.
767  * page_pool_put_page_bulk() is suitable to be run inside the driver NAPI tx
768  * completion loop for the XDP_REDIRECT use case.
769  *
770  * Please note the caller must not use data area after running
771  * page_pool_put_page_bulk(), as this function overwrites it.
772  */
773 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
774 			     int count)
775 {
776 	int i, bulk_len = 0;
777 	bool allow_direct;
778 	bool in_softirq;
779 
780 	allow_direct = page_pool_napi_local(pool);
781 
782 	for (i = 0; i < count; i++) {
783 		struct page *page = virt_to_head_page(data[i]);
784 
785 		/* It is not the last user for the page frag case */
786 		if (!page_pool_is_last_ref(page))
787 			continue;
788 
789 		page = __page_pool_put_page(pool, page, -1, allow_direct);
790 		/* Approved for bulk recycling in ptr_ring cache */
791 		if (page)
792 			data[bulk_len++] = page;
793 	}
794 
795 	if (!bulk_len)
796 		return;
797 
798 	/* Bulk producer into ptr_ring page_pool cache */
799 	in_softirq = page_pool_producer_lock(pool);
800 	for (i = 0; i < bulk_len; i++) {
801 		if (__ptr_ring_produce(&pool->ring, data[i])) {
802 			/* ring full */
803 			recycle_stat_inc(pool, ring_full);
804 			break;
805 		}
806 	}
807 	recycle_stat_add(pool, ring, i);
808 	page_pool_producer_unlock(pool, in_softirq);
809 
810 	/* Hopefully all pages was return into ptr_ring */
811 	if (likely(i == bulk_len))
812 		return;
813 
814 	/* ptr_ring cache full, free remaining pages outside producer lock
815 	 * since put_page() with refcnt == 1 can be an expensive operation
816 	 */
817 	for (; i < bulk_len; i++)
818 		page_pool_return_page(pool, data[i]);
819 }
820 EXPORT_SYMBOL(page_pool_put_page_bulk);
821 
822 static struct page *page_pool_drain_frag(struct page_pool *pool,
823 					 struct page *page)
824 {
825 	long drain_count = BIAS_MAX - pool->frag_users;
826 
827 	/* Some user is still using the page frag */
828 	if (likely(page_pool_unref_page(page, drain_count)))
829 		return NULL;
830 
831 	if (__page_pool_page_can_be_recycled(page)) {
832 		if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
833 			page_pool_dma_sync_for_device(pool, page, -1);
834 
835 		return page;
836 	}
837 
838 	page_pool_return_page(pool, page);
839 	return NULL;
840 }
841 
842 static void page_pool_free_frag(struct page_pool *pool)
843 {
844 	long drain_count = BIAS_MAX - pool->frag_users;
845 	struct page *page = pool->frag_page;
846 
847 	pool->frag_page = NULL;
848 
849 	if (!page || page_pool_unref_page(page, drain_count))
850 		return;
851 
852 	page_pool_return_page(pool, page);
853 }
854 
855 struct page *page_pool_alloc_frag(struct page_pool *pool,
856 				  unsigned int *offset,
857 				  unsigned int size, gfp_t gfp)
858 {
859 	unsigned int max_size = PAGE_SIZE << pool->p.order;
860 	struct page *page = pool->frag_page;
861 
862 	if (WARN_ON(size > max_size))
863 		return NULL;
864 
865 	size = ALIGN(size, dma_get_cache_alignment());
866 	*offset = pool->frag_offset;
867 
868 	if (page && *offset + size > max_size) {
869 		page = page_pool_drain_frag(pool, page);
870 		if (page) {
871 			alloc_stat_inc(pool, fast);
872 			goto frag_reset;
873 		}
874 	}
875 
876 	if (!page) {
877 		page = page_pool_alloc_pages(pool, gfp);
878 		if (unlikely(!page)) {
879 			pool->frag_page = NULL;
880 			return NULL;
881 		}
882 
883 		pool->frag_page = page;
884 
885 frag_reset:
886 		pool->frag_users = 1;
887 		*offset = 0;
888 		pool->frag_offset = size;
889 		page_pool_fragment_page(page, BIAS_MAX);
890 		return page;
891 	}
892 
893 	pool->frag_users++;
894 	pool->frag_offset = *offset + size;
895 	alloc_stat_inc(pool, fast);
896 	return page;
897 }
898 EXPORT_SYMBOL(page_pool_alloc_frag);
899 
900 static void page_pool_empty_ring(struct page_pool *pool)
901 {
902 	struct page *page;
903 
904 	/* Empty recycle ring */
905 	while ((page = ptr_ring_consume_bh(&pool->ring))) {
906 		/* Verify the refcnt invariant of cached pages */
907 		if (!(page_ref_count(page) == 1))
908 			pr_crit("%s() page_pool refcnt %d violation\n",
909 				__func__, page_ref_count(page));
910 
911 		page_pool_return_page(pool, page);
912 	}
913 }
914 
915 static void __page_pool_destroy(struct page_pool *pool)
916 {
917 	if (pool->disconnect)
918 		pool->disconnect(pool);
919 
920 	page_pool_unlist(pool);
921 	page_pool_uninit(pool);
922 	kfree(pool);
923 }
924 
925 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
926 {
927 	struct page *page;
928 
929 	if (pool->destroy_cnt)
930 		return;
931 
932 	/* Empty alloc cache, assume caller made sure this is
933 	 * no-longer in use, and page_pool_alloc_pages() cannot be
934 	 * call concurrently.
935 	 */
936 	while (pool->alloc.count) {
937 		page = pool->alloc.cache[--pool->alloc.count];
938 		page_pool_return_page(pool, page);
939 	}
940 }
941 
942 static void page_pool_scrub(struct page_pool *pool)
943 {
944 	page_pool_empty_alloc_cache_once(pool);
945 	pool->destroy_cnt++;
946 
947 	/* No more consumers should exist, but producers could still
948 	 * be in-flight.
949 	 */
950 	page_pool_empty_ring(pool);
951 }
952 
953 static int page_pool_release(struct page_pool *pool)
954 {
955 	int inflight;
956 
957 	page_pool_scrub(pool);
958 	inflight = page_pool_inflight(pool, true);
959 	if (!inflight)
960 		__page_pool_destroy(pool);
961 
962 	return inflight;
963 }
964 
965 static void page_pool_release_retry(struct work_struct *wq)
966 {
967 	struct delayed_work *dwq = to_delayed_work(wq);
968 	struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
969 	void *netdev;
970 	int inflight;
971 
972 	inflight = page_pool_release(pool);
973 	if (!inflight)
974 		return;
975 
976 	/* Periodic warning for page pools the user can't see */
977 	netdev = READ_ONCE(pool->slow.netdev);
978 	if (time_after_eq(jiffies, pool->defer_warn) &&
979 	    (!netdev || netdev == NET_PTR_POISON)) {
980 		int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
981 
982 		pr_warn("%s() stalled pool shutdown: id %u, %d inflight %d sec\n",
983 			__func__, pool->user.id, inflight, sec);
984 		pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
985 	}
986 
987 	/* Still not ready to be disconnected, retry later */
988 	schedule_delayed_work(&pool->release_dw, DEFER_TIME);
989 }
990 
991 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
992 			   const struct xdp_mem_info *mem)
993 {
994 	refcount_inc(&pool->user_cnt);
995 	pool->disconnect = disconnect;
996 	pool->xdp_mem_id = mem->id;
997 }
998 
999 static void page_pool_disable_direct_recycling(struct page_pool *pool)
1000 {
1001 	/* Disable direct recycling based on pool->cpuid.
1002 	 * Paired with READ_ONCE() in page_pool_napi_local().
1003 	 */
1004 	WRITE_ONCE(pool->cpuid, -1);
1005 
1006 	if (!pool->p.napi)
1007 		return;
1008 
1009 	/* To avoid races with recycling and additional barriers make sure
1010 	 * pool and NAPI are unlinked when NAPI is disabled.
1011 	 */
1012 	WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) ||
1013 		READ_ONCE(pool->p.napi->list_owner) != -1);
1014 
1015 	WRITE_ONCE(pool->p.napi, NULL);
1016 }
1017 
1018 void page_pool_destroy(struct page_pool *pool)
1019 {
1020 	if (!pool)
1021 		return;
1022 
1023 	if (!page_pool_put(pool))
1024 		return;
1025 
1026 	page_pool_disable_direct_recycling(pool);
1027 	page_pool_free_frag(pool);
1028 
1029 	if (!page_pool_release(pool))
1030 		return;
1031 
1032 	page_pool_detached(pool);
1033 	pool->defer_start = jiffies;
1034 	pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
1035 
1036 	INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
1037 	schedule_delayed_work(&pool->release_dw, DEFER_TIME);
1038 }
1039 EXPORT_SYMBOL(page_pool_destroy);
1040 
1041 /* Caller must provide appropriate safe context, e.g. NAPI. */
1042 void page_pool_update_nid(struct page_pool *pool, int new_nid)
1043 {
1044 	struct page *page;
1045 
1046 	trace_page_pool_update_nid(pool, new_nid);
1047 	pool->p.nid = new_nid;
1048 
1049 	/* Flush pool alloc cache, as refill will check NUMA node */
1050 	while (pool->alloc.count) {
1051 		page = pool->alloc.cache[--pool->alloc.count];
1052 		page_pool_return_page(pool, page);
1053 	}
1054 }
1055 EXPORT_SYMBOL(page_pool_update_nid);
1056