xref: /linux/net/core/page_pool.c (revision 9c736ace0666efe68efd53fcdfa2c6653c3e0e72)
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * page_pool.c
4  *	Author:	Jesper Dangaard Brouer <netoptimizer@brouer.com>
5  *	Copyright (C) 2016 Red Hat, Inc.
6  */
7 
8 #include <linux/error-injection.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 
14 #include <net/netdev_lock.h>
15 #include <net/netdev_rx_queue.h>
16 #include <net/page_pool/helpers.h>
17 #include <net/page_pool/memory_provider.h>
18 #include <net/xdp.h>
19 
20 #include <linux/dma-direction.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/page-flags.h>
23 #include <linux/mm.h> /* for put_page() */
24 #include <linux/poison.h>
25 #include <linux/ethtool.h>
26 #include <linux/netdevice.h>
27 
28 #include <trace/events/page_pool.h>
29 
30 #include "dev.h"
31 #include "mp_dmabuf_devmem.h"
32 #include "netmem_priv.h"
33 #include "page_pool_priv.h"
34 
35 DEFINE_STATIC_KEY_FALSE(page_pool_mem_providers);
36 
37 #define DEFER_TIME (msecs_to_jiffies(1000))
38 #define DEFER_WARN_INTERVAL (60 * HZ)
39 
40 #define BIAS_MAX	(LONG_MAX >> 1)
41 
42 #ifdef CONFIG_PAGE_POOL_STATS
43 static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);
44 
45 /* alloc_stat_inc is intended to be used in softirq context */
46 #define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)
47 /* recycle_stat_inc is safe to use when preemption is possible. */
48 #define recycle_stat_inc(pool, __stat)							\
49 	do {										\
50 		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
51 		this_cpu_inc(s->__stat);						\
52 	} while (0)
53 
54 #define recycle_stat_add(pool, __stat, val)						\
55 	do {										\
56 		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
57 		this_cpu_add(s->__stat, val);						\
58 	} while (0)
59 
60 static const char pp_stats[][ETH_GSTRING_LEN] = {
61 	"rx_pp_alloc_fast",
62 	"rx_pp_alloc_slow",
63 	"rx_pp_alloc_slow_ho",
64 	"rx_pp_alloc_empty",
65 	"rx_pp_alloc_refill",
66 	"rx_pp_alloc_waive",
67 	"rx_pp_recycle_cached",
68 	"rx_pp_recycle_cache_full",
69 	"rx_pp_recycle_ring",
70 	"rx_pp_recycle_ring_full",
71 	"rx_pp_recycle_released_ref",
72 };
73 
74 /**
75  * page_pool_get_stats() - fetch page pool stats
76  * @pool:	pool from which page was allocated
77  * @stats:	struct page_pool_stats to fill in
78  *
79  * Retrieve statistics about the page_pool. This API is only available
80  * if the kernel has been configured with ``CONFIG_PAGE_POOL_STATS=y``.
81  * A pointer to a caller allocated struct page_pool_stats structure
82  * is passed to this API which is filled in. The caller can then report
83  * those stats to the user (perhaps via ethtool, debugfs, etc.).
84  */
page_pool_get_stats(const struct page_pool * pool,struct page_pool_stats * stats)85 bool page_pool_get_stats(const struct page_pool *pool,
86 			 struct page_pool_stats *stats)
87 {
88 	int cpu = 0;
89 
90 	if (!stats)
91 		return false;
92 
93 	/* The caller is responsible to initialize stats. */
94 	stats->alloc_stats.fast += pool->alloc_stats.fast;
95 	stats->alloc_stats.slow += pool->alloc_stats.slow;
96 	stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
97 	stats->alloc_stats.empty += pool->alloc_stats.empty;
98 	stats->alloc_stats.refill += pool->alloc_stats.refill;
99 	stats->alloc_stats.waive += pool->alloc_stats.waive;
100 
101 	for_each_possible_cpu(cpu) {
102 		const struct page_pool_recycle_stats *pcpu =
103 			per_cpu_ptr(pool->recycle_stats, cpu);
104 
105 		stats->recycle_stats.cached += pcpu->cached;
106 		stats->recycle_stats.cache_full += pcpu->cache_full;
107 		stats->recycle_stats.ring += pcpu->ring;
108 		stats->recycle_stats.ring_full += pcpu->ring_full;
109 		stats->recycle_stats.released_refcnt += pcpu->released_refcnt;
110 	}
111 
112 	return true;
113 }
114 EXPORT_SYMBOL(page_pool_get_stats);
115 
page_pool_ethtool_stats_get_strings(u8 * data)116 u8 *page_pool_ethtool_stats_get_strings(u8 *data)
117 {
118 	int i;
119 
120 	for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
121 		memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
122 		data += ETH_GSTRING_LEN;
123 	}
124 
125 	return data;
126 }
127 EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
128 
page_pool_ethtool_stats_get_count(void)129 int page_pool_ethtool_stats_get_count(void)
130 {
131 	return ARRAY_SIZE(pp_stats);
132 }
133 EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
134 
page_pool_ethtool_stats_get(u64 * data,const void * stats)135 u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
136 {
137 	const struct page_pool_stats *pool_stats = stats;
138 
139 	*data++ = pool_stats->alloc_stats.fast;
140 	*data++ = pool_stats->alloc_stats.slow;
141 	*data++ = pool_stats->alloc_stats.slow_high_order;
142 	*data++ = pool_stats->alloc_stats.empty;
143 	*data++ = pool_stats->alloc_stats.refill;
144 	*data++ = pool_stats->alloc_stats.waive;
145 	*data++ = pool_stats->recycle_stats.cached;
146 	*data++ = pool_stats->recycle_stats.cache_full;
147 	*data++ = pool_stats->recycle_stats.ring;
148 	*data++ = pool_stats->recycle_stats.ring_full;
149 	*data++ = pool_stats->recycle_stats.released_refcnt;
150 
151 	return data;
152 }
153 EXPORT_SYMBOL(page_pool_ethtool_stats_get);
154 
155 #else
156 #define alloc_stat_inc(...)	do { } while (0)
157 #define recycle_stat_inc(...)	do { } while (0)
158 #define recycle_stat_add(...)	do { } while (0)
159 #endif
160 
page_pool_producer_lock(struct page_pool * pool)161 static bool page_pool_producer_lock(struct page_pool *pool)
162 	__acquires(&pool->ring.producer_lock)
163 {
164 	bool in_softirq = in_softirq();
165 
166 	if (in_softirq)
167 		spin_lock(&pool->ring.producer_lock);
168 	else
169 		spin_lock_bh(&pool->ring.producer_lock);
170 
171 	return in_softirq;
172 }
173 
page_pool_producer_unlock(struct page_pool * pool,bool in_softirq)174 static void page_pool_producer_unlock(struct page_pool *pool,
175 				      bool in_softirq)
176 	__releases(&pool->ring.producer_lock)
177 {
178 	if (in_softirq)
179 		spin_unlock(&pool->ring.producer_lock);
180 	else
181 		spin_unlock_bh(&pool->ring.producer_lock);
182 }
183 
page_pool_struct_check(void)184 static void page_pool_struct_check(void)
185 {
186 	CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_users);
187 	CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_page);
188 	CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_offset);
189 	CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag,
190 				    PAGE_POOL_FRAG_GROUP_ALIGN);
191 }
192 
page_pool_init(struct page_pool * pool,const struct page_pool_params * params,int cpuid)193 static int page_pool_init(struct page_pool *pool,
194 			  const struct page_pool_params *params,
195 			  int cpuid)
196 {
197 	unsigned int ring_qsize = 1024; /* Default */
198 	struct netdev_rx_queue *rxq;
199 	int err;
200 
201 	page_pool_struct_check();
202 
203 	memcpy(&pool->p, &params->fast, sizeof(pool->p));
204 	memcpy(&pool->slow, &params->slow, sizeof(pool->slow));
205 
206 	pool->cpuid = cpuid;
207 	pool->dma_sync_for_cpu = true;
208 
209 	/* Validate only known flags were used */
210 	if (pool->slow.flags & ~PP_FLAG_ALL)
211 		return -EINVAL;
212 
213 	if (pool->p.pool_size)
214 		ring_qsize = pool->p.pool_size;
215 
216 	/* Sanity limit mem that can be pinned down */
217 	if (ring_qsize > 32768)
218 		return -E2BIG;
219 
220 	/* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
221 	 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
222 	 * which is the XDP_TX use-case.
223 	 */
224 	if (pool->slow.flags & PP_FLAG_DMA_MAP) {
225 		if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
226 		    (pool->p.dma_dir != DMA_BIDIRECTIONAL))
227 			return -EINVAL;
228 
229 		pool->dma_map = true;
230 	}
231 
232 	if (pool->slow.flags & PP_FLAG_DMA_SYNC_DEV) {
233 		/* In order to request DMA-sync-for-device the page
234 		 * needs to be mapped
235 		 */
236 		if (!(pool->slow.flags & PP_FLAG_DMA_MAP))
237 			return -EINVAL;
238 
239 		if (!pool->p.max_len)
240 			return -EINVAL;
241 
242 		pool->dma_sync = true;
243 
244 		/* pool->p.offset has to be set according to the address
245 		 * offset used by the DMA engine to start copying rx data
246 		 */
247 	}
248 
249 	pool->has_init_callback = !!pool->slow.init_callback;
250 
251 #ifdef CONFIG_PAGE_POOL_STATS
252 	if (!(pool->slow.flags & PP_FLAG_SYSTEM_POOL)) {
253 		pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
254 		if (!pool->recycle_stats)
255 			return -ENOMEM;
256 	} else {
257 		/* For system page pool instance we use a singular stats object
258 		 * instead of allocating a separate percpu variable for each
259 		 * (also percpu) page pool instance.
260 		 */
261 		pool->recycle_stats = &pp_system_recycle_stats;
262 		pool->system = true;
263 	}
264 #endif
265 
266 	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
267 #ifdef CONFIG_PAGE_POOL_STATS
268 		if (!pool->system)
269 			free_percpu(pool->recycle_stats);
270 #endif
271 		return -ENOMEM;
272 	}
273 
274 	atomic_set(&pool->pages_state_release_cnt, 0);
275 
276 	/* Driver calling page_pool_create() also call page_pool_destroy() */
277 	refcount_set(&pool->user_cnt, 1);
278 
279 	xa_init_flags(&pool->dma_mapped, XA_FLAGS_ALLOC1);
280 
281 	if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) {
282 		netdev_assert_locked(pool->slow.netdev);
283 		rxq = __netif_get_rx_queue(pool->slow.netdev,
284 					   pool->slow.queue_idx);
285 		pool->mp_priv = rxq->mp_params.mp_priv;
286 		pool->mp_ops = rxq->mp_params.mp_ops;
287 	}
288 
289 	if (pool->mp_ops) {
290 		if (!pool->dma_map || !pool->dma_sync) {
291 			err = -EOPNOTSUPP;
292 			goto free_ptr_ring;
293 		}
294 
295 		if (WARN_ON(!is_kernel_rodata((unsigned long)pool->mp_ops))) {
296 			err = -EFAULT;
297 			goto free_ptr_ring;
298 		}
299 
300 		err = pool->mp_ops->init(pool);
301 		if (err) {
302 			pr_warn("%s() mem-provider init failed %d\n", __func__,
303 				err);
304 			goto free_ptr_ring;
305 		}
306 
307 		static_branch_inc(&page_pool_mem_providers);
308 	}
309 
310 	return 0;
311 
312 free_ptr_ring:
313 	ptr_ring_cleanup(&pool->ring, NULL);
314 #ifdef CONFIG_PAGE_POOL_STATS
315 	if (!pool->system)
316 		free_percpu(pool->recycle_stats);
317 #endif
318 	return err;
319 }
320 
page_pool_uninit(struct page_pool * pool)321 static void page_pool_uninit(struct page_pool *pool)
322 {
323 	ptr_ring_cleanup(&pool->ring, NULL);
324 	xa_destroy(&pool->dma_mapped);
325 
326 #ifdef CONFIG_PAGE_POOL_STATS
327 	if (!pool->system)
328 		free_percpu(pool->recycle_stats);
329 #endif
330 }
331 
332 /**
333  * page_pool_create_percpu() - create a page pool for a given cpu.
334  * @params: parameters, see struct page_pool_params
335  * @cpuid: cpu identifier
336  */
337 struct page_pool *
page_pool_create_percpu(const struct page_pool_params * params,int cpuid)338 page_pool_create_percpu(const struct page_pool_params *params, int cpuid)
339 {
340 	struct page_pool *pool;
341 	int err;
342 
343 	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
344 	if (!pool)
345 		return ERR_PTR(-ENOMEM);
346 
347 	err = page_pool_init(pool, params, cpuid);
348 	if (err < 0)
349 		goto err_free;
350 
351 	err = page_pool_list(pool);
352 	if (err)
353 		goto err_uninit;
354 
355 	return pool;
356 
357 err_uninit:
358 	page_pool_uninit(pool);
359 err_free:
360 	pr_warn("%s() gave up with errno %d\n", __func__, err);
361 	kfree(pool);
362 	return ERR_PTR(err);
363 }
364 EXPORT_SYMBOL(page_pool_create_percpu);
365 
366 /**
367  * page_pool_create() - create a page pool
368  * @params: parameters, see struct page_pool_params
369  */
page_pool_create(const struct page_pool_params * params)370 struct page_pool *page_pool_create(const struct page_pool_params *params)
371 {
372 	return page_pool_create_percpu(params, -1);
373 }
374 EXPORT_SYMBOL(page_pool_create);
375 
376 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem);
377 
page_pool_refill_alloc_cache(struct page_pool * pool)378 static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
379 {
380 	struct ptr_ring *r = &pool->ring;
381 	netmem_ref netmem;
382 	int pref_nid; /* preferred NUMA node */
383 
384 	/* Quicker fallback, avoid locks when ring is empty */
385 	if (__ptr_ring_empty(r)) {
386 		alloc_stat_inc(pool, empty);
387 		return 0;
388 	}
389 
390 	/* Softirq guarantee CPU and thus NUMA node is stable. This,
391 	 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
392 	 */
393 #ifdef CONFIG_NUMA
394 	pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
395 #else
396 	/* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
397 	pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
398 #endif
399 
400 	/* Refill alloc array, but only if NUMA match */
401 	do {
402 		netmem = (__force netmem_ref)__ptr_ring_consume(r);
403 		if (unlikely(!netmem))
404 			break;
405 
406 		if (likely(netmem_is_pref_nid(netmem, pref_nid))) {
407 			pool->alloc.cache[pool->alloc.count++] = netmem;
408 		} else {
409 			/* NUMA mismatch;
410 			 * (1) release 1 page to page-allocator and
411 			 * (2) break out to fallthrough to alloc_pages_node.
412 			 * This limit stress on page buddy alloactor.
413 			 */
414 			page_pool_return_netmem(pool, netmem);
415 			alloc_stat_inc(pool, waive);
416 			netmem = 0;
417 			break;
418 		}
419 	} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
420 
421 	/* Return last page */
422 	if (likely(pool->alloc.count > 0)) {
423 		netmem = pool->alloc.cache[--pool->alloc.count];
424 		alloc_stat_inc(pool, refill);
425 	}
426 
427 	return netmem;
428 }
429 
430 /* fast path */
__page_pool_get_cached(struct page_pool * pool)431 static netmem_ref __page_pool_get_cached(struct page_pool *pool)
432 {
433 	netmem_ref netmem;
434 
435 	/* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
436 	if (likely(pool->alloc.count)) {
437 		/* Fast-path */
438 		netmem = pool->alloc.cache[--pool->alloc.count];
439 		alloc_stat_inc(pool, fast);
440 	} else {
441 		netmem = page_pool_refill_alloc_cache(pool);
442 	}
443 
444 	return netmem;
445 }
446 
__page_pool_dma_sync_for_device(const struct page_pool * pool,netmem_ref netmem,u32 dma_sync_size)447 static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
448 					    netmem_ref netmem,
449 					    u32 dma_sync_size)
450 {
451 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
452 	dma_addr_t dma_addr = page_pool_get_dma_addr_netmem(netmem);
453 
454 	dma_sync_size = min(dma_sync_size, pool->p.max_len);
455 	__dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
456 				     dma_sync_size, pool->p.dma_dir);
457 #endif
458 }
459 
460 static __always_inline void
page_pool_dma_sync_for_device(const struct page_pool * pool,netmem_ref netmem,u32 dma_sync_size)461 page_pool_dma_sync_for_device(const struct page_pool *pool,
462 			      netmem_ref netmem,
463 			      u32 dma_sync_size)
464 {
465 	if (pool->dma_sync && dma_dev_need_sync(pool->p.dev)) {
466 		rcu_read_lock();
467 		/* re-check under rcu_read_lock() to sync with page_pool_scrub() */
468 		if (pool->dma_sync)
469 			__page_pool_dma_sync_for_device(pool, netmem,
470 							dma_sync_size);
471 		rcu_read_unlock();
472 	}
473 }
474 
page_pool_dma_map(struct page_pool * pool,netmem_ref netmem,gfp_t gfp)475 static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp)
476 {
477 	dma_addr_t dma;
478 	int err;
479 	u32 id;
480 
481 	/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
482 	 * since dma_addr_t can be either 32 or 64 bits and does not always fit
483 	 * into page private data (i.e 32bit cpu with 64bit DMA caps)
484 	 * This mapping is kept for lifetime of page, until leaving pool.
485 	 */
486 	dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0,
487 				 (PAGE_SIZE << pool->p.order), pool->p.dma_dir,
488 				 DMA_ATTR_SKIP_CPU_SYNC |
489 					 DMA_ATTR_WEAK_ORDERING);
490 	if (dma_mapping_error(pool->p.dev, dma))
491 		return false;
492 
493 	if (page_pool_set_dma_addr_netmem(netmem, dma)) {
494 		WARN_ONCE(1, "unexpected DMA address, please report to netdev@");
495 		goto unmap_failed;
496 	}
497 
498 	if (in_softirq())
499 		err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem),
500 			       PP_DMA_INDEX_LIMIT, gfp);
501 	else
502 		err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem),
503 				  PP_DMA_INDEX_LIMIT, gfp);
504 	if (err) {
505 		WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@");
506 		goto unset_failed;
507 	}
508 
509 	netmem_set_dma_index(netmem, id);
510 	page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len);
511 
512 	return true;
513 
514 unset_failed:
515 	page_pool_set_dma_addr_netmem(netmem, 0);
516 unmap_failed:
517 	dma_unmap_page_attrs(pool->p.dev, dma,
518 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
519 			     DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
520 	return false;
521 }
522 
__page_pool_alloc_page_order(struct page_pool * pool,gfp_t gfp)523 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
524 						 gfp_t gfp)
525 {
526 	struct page *page;
527 
528 	gfp |= __GFP_COMP;
529 	page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
530 	if (unlikely(!page))
531 		return NULL;
532 
533 	if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page), gfp))) {
534 		put_page(page);
535 		return NULL;
536 	}
537 
538 	alloc_stat_inc(pool, slow_high_order);
539 	page_pool_set_pp_info(pool, page_to_netmem(page));
540 
541 	/* Track how many pages are held 'in-flight' */
542 	pool->pages_state_hold_cnt++;
543 	trace_page_pool_state_hold(pool, page_to_netmem(page),
544 				   pool->pages_state_hold_cnt);
545 	return page;
546 }
547 
548 /* slow path */
__page_pool_alloc_netmems_slow(struct page_pool * pool,gfp_t gfp)549 static noinline netmem_ref __page_pool_alloc_netmems_slow(struct page_pool *pool,
550 							  gfp_t gfp)
551 {
552 	const int bulk = PP_ALLOC_CACHE_REFILL;
553 	unsigned int pp_order = pool->p.order;
554 	bool dma_map = pool->dma_map;
555 	netmem_ref netmem;
556 	int i, nr_pages;
557 
558 	/* Don't support bulk alloc for high-order pages */
559 	if (unlikely(pp_order))
560 		return page_to_netmem(__page_pool_alloc_page_order(pool, gfp));
561 
562 	/* Unnecessary as alloc cache is empty, but guarantees zero count */
563 	if (unlikely(pool->alloc.count > 0))
564 		return pool->alloc.cache[--pool->alloc.count];
565 
566 	/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk */
567 	memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
568 
569 	nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk,
570 					 (struct page **)pool->alloc.cache);
571 	if (unlikely(!nr_pages))
572 		return 0;
573 
574 	/* Pages have been filled into alloc.cache array, but count is zero and
575 	 * page element have not been (possibly) DMA mapped.
576 	 */
577 	for (i = 0; i < nr_pages; i++) {
578 		netmem = pool->alloc.cache[i];
579 		if (dma_map && unlikely(!page_pool_dma_map(pool, netmem, gfp))) {
580 			put_page(netmem_to_page(netmem));
581 			continue;
582 		}
583 
584 		page_pool_set_pp_info(pool, netmem);
585 		pool->alloc.cache[pool->alloc.count++] = netmem;
586 		/* Track how many pages are held 'in-flight' */
587 		pool->pages_state_hold_cnt++;
588 		trace_page_pool_state_hold(pool, netmem,
589 					   pool->pages_state_hold_cnt);
590 	}
591 
592 	/* Return last page */
593 	if (likely(pool->alloc.count > 0)) {
594 		netmem = pool->alloc.cache[--pool->alloc.count];
595 		alloc_stat_inc(pool, slow);
596 	} else {
597 		netmem = 0;
598 	}
599 
600 	/* When page just alloc'ed is should/must have refcnt 1. */
601 	return netmem;
602 }
603 
604 /* For using page_pool replace: alloc_pages() API calls, but provide
605  * synchronization guarantee for allocation side.
606  */
page_pool_alloc_netmems(struct page_pool * pool,gfp_t gfp)607 netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp)
608 {
609 	netmem_ref netmem;
610 
611 	/* Fast-path: Get a page from cache */
612 	netmem = __page_pool_get_cached(pool);
613 	if (netmem)
614 		return netmem;
615 
616 	/* Slow-path: cache empty, do real allocation */
617 	if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
618 		netmem = pool->mp_ops->alloc_netmems(pool, gfp);
619 	else
620 		netmem = __page_pool_alloc_netmems_slow(pool, gfp);
621 	return netmem;
622 }
623 EXPORT_SYMBOL(page_pool_alloc_netmems);
624 ALLOW_ERROR_INJECTION(page_pool_alloc_netmems, NULL);
625 
page_pool_alloc_pages(struct page_pool * pool,gfp_t gfp)626 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
627 {
628 	return netmem_to_page(page_pool_alloc_netmems(pool, gfp));
629 }
630 EXPORT_SYMBOL(page_pool_alloc_pages);
631 
632 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
633  *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
634  */
635 #define _distance(a, b)	(s32)((a) - (b))
636 
page_pool_inflight(const struct page_pool * pool,bool strict)637 s32 page_pool_inflight(const struct page_pool *pool, bool strict)
638 {
639 	u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
640 	u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
641 	s32 inflight;
642 
643 	inflight = _distance(hold_cnt, release_cnt);
644 
645 	if (strict) {
646 		trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
647 		WARN(inflight < 0, "Negative(%d) inflight packet-pages",
648 		     inflight);
649 	} else {
650 		inflight = max(0, inflight);
651 	}
652 
653 	return inflight;
654 }
655 
page_pool_set_pp_info(struct page_pool * pool,netmem_ref netmem)656 void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
657 {
658 	netmem_set_pp(netmem, pool);
659 	netmem_or_pp_magic(netmem, PP_SIGNATURE);
660 
661 	/* Ensuring all pages have been split into one fragment initially:
662 	 * page_pool_set_pp_info() is only called once for every page when it
663 	 * is allocated from the page allocator and page_pool_fragment_page()
664 	 * is dirtying the same cache line as the page->pp_magic above, so
665 	 * the overhead is negligible.
666 	 */
667 	page_pool_fragment_netmem(netmem, 1);
668 	if (pool->has_init_callback)
669 		pool->slow.init_callback(netmem, pool->slow.init_arg);
670 }
671 
page_pool_clear_pp_info(netmem_ref netmem)672 void page_pool_clear_pp_info(netmem_ref netmem)
673 {
674 	netmem_clear_pp_magic(netmem);
675 	netmem_set_pp(netmem, NULL);
676 }
677 
__page_pool_release_netmem_dma(struct page_pool * pool,netmem_ref netmem)678 static __always_inline void __page_pool_release_netmem_dma(struct page_pool *pool,
679 							   netmem_ref netmem)
680 {
681 	struct page *old, *page = netmem_to_page(netmem);
682 	unsigned long id;
683 	dma_addr_t dma;
684 
685 	if (!pool->dma_map)
686 		/* Always account for inflight pages, even if we didn't
687 		 * map them
688 		 */
689 		return;
690 
691 	id = netmem_get_dma_index(netmem);
692 	if (!id)
693 		return;
694 
695 	if (in_softirq())
696 		old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0);
697 	else
698 		old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0);
699 	if (old != page)
700 		return;
701 
702 	dma = page_pool_get_dma_addr_netmem(netmem);
703 
704 	/* When page is unmapped, it cannot be returned to our pool */
705 	dma_unmap_page_attrs(pool->p.dev, dma,
706 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
707 			     DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
708 	page_pool_set_dma_addr_netmem(netmem, 0);
709 	netmem_set_dma_index(netmem, 0);
710 }
711 
712 /* Disconnects a page (from a page_pool).  API users can have a need
713  * to disconnect a page (from a page_pool), to allow it to be used as
714  * a regular page (that will eventually be returned to the normal
715  * page-allocator via put_page).
716  */
page_pool_return_netmem(struct page_pool * pool,netmem_ref netmem)717 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem)
718 {
719 	int count;
720 	bool put;
721 
722 	put = true;
723 	if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
724 		put = pool->mp_ops->release_netmem(pool, netmem);
725 	else
726 		__page_pool_release_netmem_dma(pool, netmem);
727 
728 	/* This may be the last page returned, releasing the pool, so
729 	 * it is not safe to reference pool afterwards.
730 	 */
731 	count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
732 	trace_page_pool_state_release(pool, netmem, count);
733 
734 	if (put) {
735 		page_pool_clear_pp_info(netmem);
736 		put_page(netmem_to_page(netmem));
737 	}
738 	/* An optimization would be to call __free_pages(page, pool->p.order)
739 	 * knowing page is not part of page-cache (thus avoiding a
740 	 * __page_cache_release() call).
741 	 */
742 }
743 
page_pool_recycle_in_ring(struct page_pool * pool,netmem_ref netmem)744 static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
745 {
746 	bool in_softirq, ret;
747 
748 	/* BH protection not needed if current is softirq */
749 	in_softirq = page_pool_producer_lock(pool);
750 	ret = !__ptr_ring_produce(&pool->ring, (__force void *)netmem);
751 	if (ret)
752 		recycle_stat_inc(pool, ring);
753 	page_pool_producer_unlock(pool, in_softirq);
754 
755 	return ret;
756 }
757 
758 /* Only allow direct recycling in special circumstances, into the
759  * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
760  *
761  * Caller must provide appropriate safe context.
762  */
page_pool_recycle_in_cache(netmem_ref netmem,struct page_pool * pool)763 static bool page_pool_recycle_in_cache(netmem_ref netmem,
764 				       struct page_pool *pool)
765 {
766 	if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
767 		recycle_stat_inc(pool, cache_full);
768 		return false;
769 	}
770 
771 	/* Caller MUST have verified/know (page_ref_count(page) == 1) */
772 	pool->alloc.cache[pool->alloc.count++] = netmem;
773 	recycle_stat_inc(pool, cached);
774 	return true;
775 }
776 
__page_pool_page_can_be_recycled(netmem_ref netmem)777 static bool __page_pool_page_can_be_recycled(netmem_ref netmem)
778 {
779 	return netmem_is_net_iov(netmem) ||
780 	       (page_ref_count(netmem_to_page(netmem)) == 1 &&
781 		!page_is_pfmemalloc(netmem_to_page(netmem)));
782 }
783 
784 /* If the page refcnt == 1, this will try to recycle the page.
785  * If pool->dma_sync is set, we'll try to sync the DMA area for
786  * the configured size min(dma_sync_size, pool->max_len).
787  * If the page refcnt != 1, then the page will be returned to memory
788  * subsystem.
789  */
790 static __always_inline netmem_ref
__page_pool_put_page(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct)791 __page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
792 		     unsigned int dma_sync_size, bool allow_direct)
793 {
794 	lockdep_assert_no_hardirq();
795 
796 	/* This allocator is optimized for the XDP mode that uses
797 	 * one-frame-per-page, but have fallbacks that act like the
798 	 * regular page allocator APIs.
799 	 *
800 	 * refcnt == 1 means page_pool owns page, and can recycle it.
801 	 *
802 	 * page is NOT reusable when allocated when system is under
803 	 * some pressure. (page_is_pfmemalloc)
804 	 */
805 	if (likely(__page_pool_page_can_be_recycled(netmem))) {
806 		/* Read barrier done in page_ref_count / READ_ONCE */
807 
808 		page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
809 
810 		if (allow_direct && page_pool_recycle_in_cache(netmem, pool))
811 			return 0;
812 
813 		/* Page found as candidate for recycling */
814 		return netmem;
815 	}
816 
817 	/* Fallback/non-XDP mode: API user have elevated refcnt.
818 	 *
819 	 * Many drivers split up the page into fragments, and some
820 	 * want to keep doing this to save memory and do refcnt based
821 	 * recycling. Support this use case too, to ease drivers
822 	 * switching between XDP/non-XDP.
823 	 *
824 	 * In-case page_pool maintains the DMA mapping, API user must
825 	 * call page_pool_put_page once.  In this elevated refcnt
826 	 * case, the DMA is unmapped/released, as driver is likely
827 	 * doing refcnt based recycle tricks, meaning another process
828 	 * will be invoking put_page.
829 	 */
830 	recycle_stat_inc(pool, released_refcnt);
831 	page_pool_return_netmem(pool, netmem);
832 
833 	return 0;
834 }
835 
page_pool_napi_local(const struct page_pool * pool)836 static bool page_pool_napi_local(const struct page_pool *pool)
837 {
838 	const struct napi_struct *napi;
839 	u32 cpuid;
840 
841 	/* On PREEMPT_RT the softirq can be preempted by the consumer */
842 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
843 		return false;
844 
845 	if (unlikely(!in_softirq()))
846 		return false;
847 
848 	/* Allow direct recycle if we have reasons to believe that we are
849 	 * in the same context as the consumer would run, so there's
850 	 * no possible race.
851 	 * __page_pool_put_page() makes sure we're not in hardirq context
852 	 * and interrupts are enabled prior to accessing the cache.
853 	 */
854 	cpuid = smp_processor_id();
855 	if (READ_ONCE(pool->cpuid) == cpuid)
856 		return true;
857 
858 	napi = READ_ONCE(pool->p.napi);
859 
860 	return napi && READ_ONCE(napi->list_owner) == cpuid;
861 }
862 
page_pool_put_unrefed_netmem(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct)863 void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
864 				  unsigned int dma_sync_size, bool allow_direct)
865 {
866 	if (!allow_direct)
867 		allow_direct = page_pool_napi_local(pool);
868 
869 	netmem = __page_pool_put_page(pool, netmem, dma_sync_size,
870 				      allow_direct);
871 	if (netmem && !page_pool_recycle_in_ring(pool, netmem)) {
872 		/* Cache full, fallback to free pages */
873 		recycle_stat_inc(pool, ring_full);
874 		page_pool_return_netmem(pool, netmem);
875 	}
876 }
877 EXPORT_SYMBOL(page_pool_put_unrefed_netmem);
878 
page_pool_put_unrefed_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct)879 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
880 				unsigned int dma_sync_size, bool allow_direct)
881 {
882 	page_pool_put_unrefed_netmem(pool, page_to_netmem(page), dma_sync_size,
883 				     allow_direct);
884 }
885 EXPORT_SYMBOL(page_pool_put_unrefed_page);
886 
page_pool_recycle_ring_bulk(struct page_pool * pool,netmem_ref * bulk,u32 bulk_len)887 static void page_pool_recycle_ring_bulk(struct page_pool *pool,
888 					netmem_ref *bulk,
889 					u32 bulk_len)
890 {
891 	bool in_softirq;
892 	u32 i;
893 
894 	/* Bulk produce into ptr_ring page_pool cache */
895 	in_softirq = page_pool_producer_lock(pool);
896 
897 	for (i = 0; i < bulk_len; i++) {
898 		if (__ptr_ring_produce(&pool->ring, (__force void *)bulk[i])) {
899 			/* ring full */
900 			recycle_stat_inc(pool, ring_full);
901 			break;
902 		}
903 	}
904 
905 	page_pool_producer_unlock(pool, in_softirq);
906 	recycle_stat_add(pool, ring, i);
907 
908 	/* Hopefully all pages were returned into ptr_ring */
909 	if (likely(i == bulk_len))
910 		return;
911 
912 	/*
913 	 * ptr_ring cache is full, free remaining pages outside producer lock
914 	 * since put_page() with refcnt == 1 can be an expensive operation.
915 	 */
916 	for (; i < bulk_len; i++)
917 		page_pool_return_netmem(pool, bulk[i]);
918 }
919 
920 /**
921  * page_pool_put_netmem_bulk() - release references on multiple netmems
922  * @data:	array holding netmem references
923  * @count:	number of entries in @data
924  *
925  * Tries to refill a number of netmems into the ptr_ring cache holding ptr_ring
926  * producer lock. If the ptr_ring is full, page_pool_put_netmem_bulk()
927  * will release leftover netmems to the memory provider.
928  * page_pool_put_netmem_bulk() is suitable to be run inside the driver NAPI tx
929  * completion loop for the XDP_REDIRECT use case.
930  *
931  * Please note the caller must not use data area after running
932  * page_pool_put_netmem_bulk(), as this function overwrites it.
933  */
page_pool_put_netmem_bulk(netmem_ref * data,u32 count)934 void page_pool_put_netmem_bulk(netmem_ref *data, u32 count)
935 {
936 	u32 bulk_len = 0;
937 
938 	for (u32 i = 0; i < count; i++) {
939 		netmem_ref netmem = netmem_compound_head(data[i]);
940 
941 		if (page_pool_unref_and_test(netmem))
942 			data[bulk_len++] = netmem;
943 	}
944 
945 	count = bulk_len;
946 	while (count) {
947 		netmem_ref bulk[XDP_BULK_QUEUE_SIZE];
948 		struct page_pool *pool = NULL;
949 		bool allow_direct;
950 		u32 foreign = 0;
951 
952 		bulk_len = 0;
953 
954 		for (u32 i = 0; i < count; i++) {
955 			struct page_pool *netmem_pp;
956 			netmem_ref netmem = data[i];
957 
958 			netmem_pp = netmem_get_pp(netmem);
959 			if (unlikely(!pool)) {
960 				pool = netmem_pp;
961 				allow_direct = page_pool_napi_local(pool);
962 			} else if (netmem_pp != pool) {
963 				/*
964 				 * If the netmem belongs to a different
965 				 * page_pool, save it for another round.
966 				 */
967 				data[foreign++] = netmem;
968 				continue;
969 			}
970 
971 			netmem = __page_pool_put_page(pool, netmem, -1,
972 						      allow_direct);
973 			/* Approved for bulk recycling in ptr_ring cache */
974 			if (netmem)
975 				bulk[bulk_len++] = netmem;
976 		}
977 
978 		if (bulk_len)
979 			page_pool_recycle_ring_bulk(pool, bulk, bulk_len);
980 
981 		count = foreign;
982 	}
983 }
984 EXPORT_SYMBOL(page_pool_put_netmem_bulk);
985 
page_pool_drain_frag(struct page_pool * pool,netmem_ref netmem)986 static netmem_ref page_pool_drain_frag(struct page_pool *pool,
987 				       netmem_ref netmem)
988 {
989 	long drain_count = BIAS_MAX - pool->frag_users;
990 
991 	/* Some user is still using the page frag */
992 	if (likely(page_pool_unref_netmem(netmem, drain_count)))
993 		return 0;
994 
995 	if (__page_pool_page_can_be_recycled(netmem)) {
996 		page_pool_dma_sync_for_device(pool, netmem, -1);
997 		return netmem;
998 	}
999 
1000 	page_pool_return_netmem(pool, netmem);
1001 	return 0;
1002 }
1003 
page_pool_free_frag(struct page_pool * pool)1004 static void page_pool_free_frag(struct page_pool *pool)
1005 {
1006 	long drain_count = BIAS_MAX - pool->frag_users;
1007 	netmem_ref netmem = pool->frag_page;
1008 
1009 	pool->frag_page = 0;
1010 
1011 	if (!netmem || page_pool_unref_netmem(netmem, drain_count))
1012 		return;
1013 
1014 	page_pool_return_netmem(pool, netmem);
1015 }
1016 
page_pool_alloc_frag_netmem(struct page_pool * pool,unsigned int * offset,unsigned int size,gfp_t gfp)1017 netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
1018 				       unsigned int *offset, unsigned int size,
1019 				       gfp_t gfp)
1020 {
1021 	unsigned int max_size = PAGE_SIZE << pool->p.order;
1022 	netmem_ref netmem = pool->frag_page;
1023 
1024 	if (WARN_ON(size > max_size))
1025 		return 0;
1026 
1027 	size = ALIGN(size, dma_get_cache_alignment());
1028 	*offset = pool->frag_offset;
1029 
1030 	if (netmem && *offset + size > max_size) {
1031 		netmem = page_pool_drain_frag(pool, netmem);
1032 		if (netmem) {
1033 			recycle_stat_inc(pool, cached);
1034 			alloc_stat_inc(pool, fast);
1035 			goto frag_reset;
1036 		}
1037 	}
1038 
1039 	if (!netmem) {
1040 		netmem = page_pool_alloc_netmems(pool, gfp);
1041 		if (unlikely(!netmem)) {
1042 			pool->frag_page = 0;
1043 			return 0;
1044 		}
1045 
1046 		pool->frag_page = netmem;
1047 
1048 frag_reset:
1049 		pool->frag_users = 1;
1050 		*offset = 0;
1051 		pool->frag_offset = size;
1052 		page_pool_fragment_netmem(netmem, BIAS_MAX);
1053 		return netmem;
1054 	}
1055 
1056 	pool->frag_users++;
1057 	pool->frag_offset = *offset + size;
1058 	return netmem;
1059 }
1060 EXPORT_SYMBOL(page_pool_alloc_frag_netmem);
1061 
page_pool_alloc_frag(struct page_pool * pool,unsigned int * offset,unsigned int size,gfp_t gfp)1062 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
1063 				  unsigned int size, gfp_t gfp)
1064 {
1065 	return netmem_to_page(page_pool_alloc_frag_netmem(pool, offset, size,
1066 							  gfp));
1067 }
1068 EXPORT_SYMBOL(page_pool_alloc_frag);
1069 
page_pool_empty_ring(struct page_pool * pool)1070 static void page_pool_empty_ring(struct page_pool *pool)
1071 {
1072 	netmem_ref netmem;
1073 
1074 	/* Empty recycle ring */
1075 	while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) {
1076 		/* Verify the refcnt invariant of cached pages */
1077 		if (!(netmem_ref_count(netmem) == 1))
1078 			pr_crit("%s() page_pool refcnt %d violation\n",
1079 				__func__, netmem_ref_count(netmem));
1080 
1081 		page_pool_return_netmem(pool, netmem);
1082 	}
1083 }
1084 
__page_pool_destroy(struct page_pool * pool)1085 static void __page_pool_destroy(struct page_pool *pool)
1086 {
1087 	if (pool->disconnect)
1088 		pool->disconnect(pool);
1089 
1090 	page_pool_unlist(pool);
1091 	page_pool_uninit(pool);
1092 
1093 	if (pool->mp_ops) {
1094 		pool->mp_ops->destroy(pool);
1095 		static_branch_dec(&page_pool_mem_providers);
1096 	}
1097 
1098 	kfree(pool);
1099 }
1100 
page_pool_empty_alloc_cache_once(struct page_pool * pool)1101 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
1102 {
1103 	netmem_ref netmem;
1104 
1105 	if (pool->destroy_cnt)
1106 		return;
1107 
1108 	/* Empty alloc cache, assume caller made sure this is
1109 	 * no-longer in use, and page_pool_alloc_pages() cannot be
1110 	 * call concurrently.
1111 	 */
1112 	while (pool->alloc.count) {
1113 		netmem = pool->alloc.cache[--pool->alloc.count];
1114 		page_pool_return_netmem(pool, netmem);
1115 	}
1116 }
1117 
page_pool_scrub(struct page_pool * pool)1118 static void page_pool_scrub(struct page_pool *pool)
1119 {
1120 	unsigned long id;
1121 	void *ptr;
1122 
1123 	page_pool_empty_alloc_cache_once(pool);
1124 	if (!pool->destroy_cnt++ && pool->dma_map) {
1125 		if (pool->dma_sync) {
1126 			/* Disable page_pool_dma_sync_for_device() */
1127 			pool->dma_sync = false;
1128 
1129 			/* Make sure all concurrent returns that may see the old
1130 			 * value of dma_sync (and thus perform a sync) have
1131 			 * finished before doing the unmapping below. Skip the
1132 			 * wait if the device doesn't actually need syncing, or
1133 			 * if there are no outstanding mapped pages.
1134 			 */
1135 			if (dma_dev_need_sync(pool->p.dev) &&
1136 			    !xa_empty(&pool->dma_mapped))
1137 				synchronize_net();
1138 		}
1139 
1140 		xa_for_each(&pool->dma_mapped, id, ptr)
1141 			__page_pool_release_netmem_dma(pool, page_to_netmem((struct page *)ptr));
1142 	}
1143 
1144 	/* No more consumers should exist, but producers could still
1145 	 * be in-flight.
1146 	 */
1147 	page_pool_empty_ring(pool);
1148 }
1149 
page_pool_release(struct page_pool * pool)1150 static int page_pool_release(struct page_pool *pool)
1151 {
1152 	bool in_softirq;
1153 	int inflight;
1154 
1155 	page_pool_scrub(pool);
1156 	inflight = page_pool_inflight(pool, true);
1157 	/* Acquire producer lock to make sure producers have exited. */
1158 	in_softirq = page_pool_producer_lock(pool);
1159 	page_pool_producer_unlock(pool, in_softirq);
1160 	if (!inflight)
1161 		__page_pool_destroy(pool);
1162 
1163 	return inflight;
1164 }
1165 
page_pool_release_retry(struct work_struct * wq)1166 static void page_pool_release_retry(struct work_struct *wq)
1167 {
1168 	struct delayed_work *dwq = to_delayed_work(wq);
1169 	struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
1170 	void *netdev;
1171 	int inflight;
1172 
1173 	inflight = page_pool_release(pool);
1174 	/* In rare cases, a driver bug may cause inflight to go negative.
1175 	 * Don't reschedule release if inflight is 0 or negative.
1176 	 * - If 0, the page_pool has been destroyed
1177 	 * - if negative, we will never recover
1178 	 * in both cases no reschedule is necessary.
1179 	 */
1180 	if (inflight <= 0)
1181 		return;
1182 
1183 	/* Periodic warning for page pools the user can't see */
1184 	netdev = READ_ONCE(pool->slow.netdev);
1185 	if (time_after_eq(jiffies, pool->defer_warn) &&
1186 	    (!netdev || netdev == NET_PTR_POISON)) {
1187 		int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
1188 
1189 		pr_warn("%s() stalled pool shutdown: id %u, %d inflight %d sec\n",
1190 			__func__, pool->user.id, inflight, sec);
1191 		pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
1192 	}
1193 
1194 	/* Still not ready to be disconnected, retry later */
1195 	schedule_delayed_work(&pool->release_dw, DEFER_TIME);
1196 }
1197 
page_pool_use_xdp_mem(struct page_pool * pool,void (* disconnect)(void *),const struct xdp_mem_info * mem)1198 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
1199 			   const struct xdp_mem_info *mem)
1200 {
1201 	refcount_inc(&pool->user_cnt);
1202 	pool->disconnect = disconnect;
1203 	pool->xdp_mem_id = mem->id;
1204 }
1205 
1206 /**
1207  * page_pool_enable_direct_recycling() - mark page pool as owned by NAPI
1208  * @pool: page pool to modify
1209  * @napi: NAPI instance to associate the page pool with
1210  *
1211  * Associate a page pool with a NAPI instance for lockless page recycling.
1212  * This is useful when a new page pool has to be added to a NAPI instance
1213  * without disabling that NAPI instance, to mark the point at which control
1214  * path "hands over" the page pool to the NAPI instance. In most cases driver
1215  * can simply set the @napi field in struct page_pool_params, and does not
1216  * have to call this helper.
1217  *
1218  * The function is idempotent, but does not implement any refcounting.
1219  * Single page_pool_disable_direct_recycling() will disable recycling,
1220  * no matter how many times enable was called.
1221  */
page_pool_enable_direct_recycling(struct page_pool * pool,struct napi_struct * napi)1222 void page_pool_enable_direct_recycling(struct page_pool *pool,
1223 				       struct napi_struct *napi)
1224 {
1225 	if (READ_ONCE(pool->p.napi) == napi)
1226 		return;
1227 	WARN_ON(!napi || pool->p.napi);
1228 
1229 	mutex_lock(&page_pools_lock);
1230 	WRITE_ONCE(pool->p.napi, napi);
1231 	mutex_unlock(&page_pools_lock);
1232 }
1233 EXPORT_SYMBOL(page_pool_enable_direct_recycling);
1234 
page_pool_disable_direct_recycling(struct page_pool * pool)1235 void page_pool_disable_direct_recycling(struct page_pool *pool)
1236 {
1237 	/* Disable direct recycling based on pool->cpuid.
1238 	 * Paired with READ_ONCE() in page_pool_napi_local().
1239 	 */
1240 	WRITE_ONCE(pool->cpuid, -1);
1241 
1242 	if (!pool->p.napi)
1243 		return;
1244 
1245 	napi_assert_will_not_race(pool->p.napi);
1246 
1247 	mutex_lock(&page_pools_lock);
1248 	WRITE_ONCE(pool->p.napi, NULL);
1249 	mutex_unlock(&page_pools_lock);
1250 }
1251 EXPORT_SYMBOL(page_pool_disable_direct_recycling);
1252 
page_pool_destroy(struct page_pool * pool)1253 void page_pool_destroy(struct page_pool *pool)
1254 {
1255 	if (!pool)
1256 		return;
1257 
1258 	if (!page_pool_put(pool))
1259 		return;
1260 
1261 	page_pool_disable_direct_recycling(pool);
1262 	page_pool_free_frag(pool);
1263 
1264 	if (!page_pool_release(pool))
1265 		return;
1266 
1267 	page_pool_detached(pool);
1268 	pool->defer_start = jiffies;
1269 	pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
1270 
1271 	INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
1272 	schedule_delayed_work(&pool->release_dw, DEFER_TIME);
1273 }
1274 EXPORT_SYMBOL(page_pool_destroy);
1275 
1276 /* Caller must provide appropriate safe context, e.g. NAPI. */
page_pool_update_nid(struct page_pool * pool,int new_nid)1277 void page_pool_update_nid(struct page_pool *pool, int new_nid)
1278 {
1279 	netmem_ref netmem;
1280 
1281 	trace_page_pool_update_nid(pool, new_nid);
1282 	pool->p.nid = new_nid;
1283 
1284 	/* Flush pool alloc cache, as refill will check NUMA node */
1285 	while (pool->alloc.count) {
1286 		netmem = pool->alloc.cache[--pool->alloc.count];
1287 		page_pool_return_netmem(pool, netmem);
1288 	}
1289 }
1290 EXPORT_SYMBOL(page_pool_update_nid);
1291 
net_mp_niov_set_dma_addr(struct net_iov * niov,dma_addr_t addr)1292 bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr)
1293 {
1294 	return page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), addr);
1295 }
1296 
1297 /* Associate a niov with a page pool. Should follow with a matching
1298  * net_mp_niov_clear_page_pool()
1299  */
net_mp_niov_set_page_pool(struct page_pool * pool,struct net_iov * niov)1300 void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov)
1301 {
1302 	netmem_ref netmem = net_iov_to_netmem(niov);
1303 
1304 	page_pool_set_pp_info(pool, netmem);
1305 
1306 	pool->pages_state_hold_cnt++;
1307 	trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
1308 }
1309 
1310 /* Disassociate a niov from a page pool. Should only be used in the
1311  * ->release_netmem() path.
1312  */
net_mp_niov_clear_page_pool(struct net_iov * niov)1313 void net_mp_niov_clear_page_pool(struct net_iov *niov)
1314 {
1315 	netmem_ref netmem = net_iov_to_netmem(niov);
1316 
1317 	page_pool_clear_pp_info(netmem);
1318 }
1319