1 /* SPDX-License-Identifier: GPL-2.0
2 *
3 * page_pool.c
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
6 */
7
8 #include <linux/error-injection.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13
14 #include <net/netdev_lock.h>
15 #include <net/netdev_rx_queue.h>
16 #include <net/page_pool/helpers.h>
17 #include <net/page_pool/memory_provider.h>
18 #include <net/xdp.h>
19
20 #include <linux/dma-direction.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/page-flags.h>
23 #include <linux/mm.h> /* for put_page() */
24 #include <linux/poison.h>
25 #include <linux/ethtool.h>
26 #include <linux/netdevice.h>
27
28 #include <trace/events/page_pool.h>
29
30 #include "dev.h"
31 #include "mp_dmabuf_devmem.h"
32 #include "netmem_priv.h"
33 #include "page_pool_priv.h"
34
35 DEFINE_STATIC_KEY_FALSE(page_pool_mem_providers);
36
37 #define DEFER_TIME (msecs_to_jiffies(1000))
38 #define DEFER_WARN_INTERVAL (60 * HZ)
39
40 #define BIAS_MAX (LONG_MAX >> 1)
41
42 #ifdef CONFIG_PAGE_POOL_STATS
43 static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);
44
45 /* alloc_stat_inc is intended to be used in softirq context */
46 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
47 /* recycle_stat_inc is safe to use when preemption is possible. */
48 #define recycle_stat_inc(pool, __stat) \
49 do { \
50 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
51 this_cpu_inc(s->__stat); \
52 } while (0)
53
54 #define recycle_stat_add(pool, __stat, val) \
55 do { \
56 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
57 this_cpu_add(s->__stat, val); \
58 } while (0)
59
60 static const char pp_stats[][ETH_GSTRING_LEN] = {
61 "rx_pp_alloc_fast",
62 "rx_pp_alloc_slow",
63 "rx_pp_alloc_slow_ho",
64 "rx_pp_alloc_empty",
65 "rx_pp_alloc_refill",
66 "rx_pp_alloc_waive",
67 "rx_pp_recycle_cached",
68 "rx_pp_recycle_cache_full",
69 "rx_pp_recycle_ring",
70 "rx_pp_recycle_ring_full",
71 "rx_pp_recycle_released_ref",
72 };
73
74 /**
75 * page_pool_get_stats() - fetch page pool stats
76 * @pool: pool from which page was allocated
77 * @stats: struct page_pool_stats to fill in
78 *
79 * Retrieve statistics about the page_pool. This API is only available
80 * if the kernel has been configured with ``CONFIG_PAGE_POOL_STATS=y``.
81 * A pointer to a caller allocated struct page_pool_stats structure
82 * is passed to this API which is filled in. The caller can then report
83 * those stats to the user (perhaps via ethtool, debugfs, etc.).
84 */
page_pool_get_stats(const struct page_pool * pool,struct page_pool_stats * stats)85 bool page_pool_get_stats(const struct page_pool *pool,
86 struct page_pool_stats *stats)
87 {
88 int cpu = 0;
89
90 if (!stats)
91 return false;
92
93 /* The caller is responsible to initialize stats. */
94 stats->alloc_stats.fast += pool->alloc_stats.fast;
95 stats->alloc_stats.slow += pool->alloc_stats.slow;
96 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
97 stats->alloc_stats.empty += pool->alloc_stats.empty;
98 stats->alloc_stats.refill += pool->alloc_stats.refill;
99 stats->alloc_stats.waive += pool->alloc_stats.waive;
100
101 for_each_possible_cpu(cpu) {
102 const struct page_pool_recycle_stats *pcpu =
103 per_cpu_ptr(pool->recycle_stats, cpu);
104
105 stats->recycle_stats.cached += pcpu->cached;
106 stats->recycle_stats.cache_full += pcpu->cache_full;
107 stats->recycle_stats.ring += pcpu->ring;
108 stats->recycle_stats.ring_full += pcpu->ring_full;
109 stats->recycle_stats.released_refcnt += pcpu->released_refcnt;
110 }
111
112 return true;
113 }
114 EXPORT_SYMBOL(page_pool_get_stats);
115
page_pool_ethtool_stats_get_strings(u8 * data)116 u8 *page_pool_ethtool_stats_get_strings(u8 *data)
117 {
118 int i;
119
120 for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
121 memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
122 data += ETH_GSTRING_LEN;
123 }
124
125 return data;
126 }
127 EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
128
page_pool_ethtool_stats_get_count(void)129 int page_pool_ethtool_stats_get_count(void)
130 {
131 return ARRAY_SIZE(pp_stats);
132 }
133 EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
134
page_pool_ethtool_stats_get(u64 * data,const void * stats)135 u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
136 {
137 const struct page_pool_stats *pool_stats = stats;
138
139 *data++ = pool_stats->alloc_stats.fast;
140 *data++ = pool_stats->alloc_stats.slow;
141 *data++ = pool_stats->alloc_stats.slow_high_order;
142 *data++ = pool_stats->alloc_stats.empty;
143 *data++ = pool_stats->alloc_stats.refill;
144 *data++ = pool_stats->alloc_stats.waive;
145 *data++ = pool_stats->recycle_stats.cached;
146 *data++ = pool_stats->recycle_stats.cache_full;
147 *data++ = pool_stats->recycle_stats.ring;
148 *data++ = pool_stats->recycle_stats.ring_full;
149 *data++ = pool_stats->recycle_stats.released_refcnt;
150
151 return data;
152 }
153 EXPORT_SYMBOL(page_pool_ethtool_stats_get);
154
155 #else
156 #define alloc_stat_inc(...) do { } while (0)
157 #define recycle_stat_inc(...) do { } while (0)
158 #define recycle_stat_add(...) do { } while (0)
159 #endif
160
page_pool_producer_lock(struct page_pool * pool)161 static bool page_pool_producer_lock(struct page_pool *pool)
162 __acquires(&pool->ring.producer_lock)
163 {
164 bool in_softirq = in_softirq();
165
166 if (in_softirq)
167 spin_lock(&pool->ring.producer_lock);
168 else
169 spin_lock_bh(&pool->ring.producer_lock);
170
171 return in_softirq;
172 }
173
page_pool_producer_unlock(struct page_pool * pool,bool in_softirq)174 static void page_pool_producer_unlock(struct page_pool *pool,
175 bool in_softirq)
176 __releases(&pool->ring.producer_lock)
177 {
178 if (in_softirq)
179 spin_unlock(&pool->ring.producer_lock);
180 else
181 spin_unlock_bh(&pool->ring.producer_lock);
182 }
183
page_pool_struct_check(void)184 static void page_pool_struct_check(void)
185 {
186 CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_users);
187 CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_page);
188 CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_offset);
189 CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag,
190 PAGE_POOL_FRAG_GROUP_ALIGN);
191 }
192
page_pool_init(struct page_pool * pool,const struct page_pool_params * params,int cpuid)193 static int page_pool_init(struct page_pool *pool,
194 const struct page_pool_params *params,
195 int cpuid)
196 {
197 unsigned int ring_qsize = 1024; /* Default */
198 struct netdev_rx_queue *rxq;
199 int err;
200
201 page_pool_struct_check();
202
203 memcpy(&pool->p, ¶ms->fast, sizeof(pool->p));
204 memcpy(&pool->slow, ¶ms->slow, sizeof(pool->slow));
205
206 pool->cpuid = cpuid;
207 pool->dma_sync_for_cpu = true;
208
209 /* Validate only known flags were used */
210 if (pool->slow.flags & ~PP_FLAG_ALL)
211 return -EINVAL;
212
213 if (pool->p.pool_size)
214 ring_qsize = min(pool->p.pool_size, 16384);
215
216 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
217 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
218 * which is the XDP_TX use-case.
219 */
220 if (pool->slow.flags & PP_FLAG_DMA_MAP) {
221 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
222 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
223 return -EINVAL;
224
225 pool->dma_map = true;
226 }
227
228 if (pool->slow.flags & PP_FLAG_DMA_SYNC_DEV) {
229 /* In order to request DMA-sync-for-device the page
230 * needs to be mapped
231 */
232 if (!(pool->slow.flags & PP_FLAG_DMA_MAP))
233 return -EINVAL;
234
235 if (!pool->p.max_len)
236 return -EINVAL;
237
238 pool->dma_sync = true;
239
240 /* pool->p.offset has to be set according to the address
241 * offset used by the DMA engine to start copying rx data
242 */
243 }
244
245 pool->has_init_callback = !!pool->slow.init_callback;
246
247 #ifdef CONFIG_PAGE_POOL_STATS
248 if (!(pool->slow.flags & PP_FLAG_SYSTEM_POOL)) {
249 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
250 if (!pool->recycle_stats)
251 return -ENOMEM;
252 } else {
253 /* For system page pool instance we use a singular stats object
254 * instead of allocating a separate percpu variable for each
255 * (also percpu) page pool instance.
256 */
257 pool->recycle_stats = &pp_system_recycle_stats;
258 pool->system = true;
259 }
260 #endif
261
262 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
263 #ifdef CONFIG_PAGE_POOL_STATS
264 if (!pool->system)
265 free_percpu(pool->recycle_stats);
266 #endif
267 return -ENOMEM;
268 }
269
270 atomic_set(&pool->pages_state_release_cnt, 0);
271
272 /* Driver calling page_pool_create() also call page_pool_destroy() */
273 refcount_set(&pool->user_cnt, 1);
274
275 xa_init_flags(&pool->dma_mapped, XA_FLAGS_ALLOC1);
276
277 if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) {
278 netdev_assert_locked(pool->slow.netdev);
279 rxq = __netif_get_rx_queue(pool->slow.netdev,
280 pool->slow.queue_idx);
281 pool->mp_priv = rxq->mp_params.mp_priv;
282 pool->mp_ops = rxq->mp_params.mp_ops;
283 }
284
285 if (pool->mp_ops) {
286 if (!pool->dma_map || !pool->dma_sync) {
287 err = -EOPNOTSUPP;
288 goto free_ptr_ring;
289 }
290
291 if (WARN_ON(!is_kernel_rodata((unsigned long)pool->mp_ops))) {
292 err = -EFAULT;
293 goto free_ptr_ring;
294 }
295
296 err = pool->mp_ops->init(pool);
297 if (err) {
298 pr_warn("%s() mem-provider init failed %d\n", __func__,
299 err);
300 goto free_ptr_ring;
301 }
302
303 static_branch_inc(&page_pool_mem_providers);
304 } else if (pool->p.order > MAX_PAGE_ORDER) {
305 err = -EINVAL;
306 goto free_ptr_ring;
307 }
308
309 return 0;
310
311 free_ptr_ring:
312 ptr_ring_cleanup(&pool->ring, NULL);
313 xa_destroy(&pool->dma_mapped);
314 #ifdef CONFIG_PAGE_POOL_STATS
315 if (!pool->system)
316 free_percpu(pool->recycle_stats);
317 #endif
318 return err;
319 }
320
page_pool_uninit(struct page_pool * pool)321 static void page_pool_uninit(struct page_pool *pool)
322 {
323 ptr_ring_cleanup(&pool->ring, NULL);
324 xa_destroy(&pool->dma_mapped);
325
326 #ifdef CONFIG_PAGE_POOL_STATS
327 if (!pool->system)
328 free_percpu(pool->recycle_stats);
329 #endif
330
331 if (pool->mp_ops) {
332 pool->mp_ops->destroy(pool);
333 static_branch_dec(&page_pool_mem_providers);
334 }
335 }
336
337 /**
338 * page_pool_create_percpu() - create a page pool for a given cpu.
339 * @params: parameters, see struct page_pool_params
340 * @cpuid: cpu identifier
341 */
342 struct page_pool *
page_pool_create_percpu(const struct page_pool_params * params,int cpuid)343 page_pool_create_percpu(const struct page_pool_params *params, int cpuid)
344 {
345 struct page_pool *pool;
346 int err;
347
348 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
349 if (!pool)
350 return ERR_PTR(-ENOMEM);
351
352 err = page_pool_init(pool, params, cpuid);
353 if (err < 0)
354 goto err_free;
355
356 err = page_pool_list(pool);
357 if (err)
358 goto err_uninit;
359
360 return pool;
361
362 err_uninit:
363 page_pool_uninit(pool);
364 err_free:
365 pr_warn("%s() gave up with errno %d\n", __func__, err);
366 kfree(pool);
367 return ERR_PTR(err);
368 }
369 EXPORT_SYMBOL(page_pool_create_percpu);
370
371 /**
372 * page_pool_create() - create a page pool
373 * @params: parameters, see struct page_pool_params
374 */
page_pool_create(const struct page_pool_params * params)375 struct page_pool *page_pool_create(const struct page_pool_params *params)
376 {
377 return page_pool_create_percpu(params, -1);
378 }
379 EXPORT_SYMBOL(page_pool_create);
380
381 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem);
382
page_pool_refill_alloc_cache(struct page_pool * pool)383 static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
384 {
385 struct ptr_ring *r = &pool->ring;
386 netmem_ref netmem;
387 int pref_nid; /* preferred NUMA node */
388
389 /* Quicker fallback, avoid locks when ring is empty */
390 if (__ptr_ring_empty(r)) {
391 alloc_stat_inc(pool, empty);
392 return 0;
393 }
394
395 /* Softirq guarantee CPU and thus NUMA node is stable. This,
396 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
397 */
398 #ifdef CONFIG_NUMA
399 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
400 #else
401 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
402 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
403 #endif
404
405 /* Refill alloc array, but only if NUMA match */
406 do {
407 netmem = (__force netmem_ref)__ptr_ring_consume(r);
408 if (unlikely(!netmem))
409 break;
410
411 if (likely(netmem_is_pref_nid(netmem, pref_nid))) {
412 pool->alloc.cache[pool->alloc.count++] = netmem;
413 } else {
414 /* NUMA mismatch;
415 * (1) release 1 page to page-allocator and
416 * (2) break out to fallthrough to alloc_pages_node.
417 * This limit stress on page buddy alloactor.
418 */
419 page_pool_return_netmem(pool, netmem);
420 alloc_stat_inc(pool, waive);
421 netmem = 0;
422 break;
423 }
424 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
425
426 /* Return last page */
427 if (likely(pool->alloc.count > 0)) {
428 netmem = pool->alloc.cache[--pool->alloc.count];
429 alloc_stat_inc(pool, refill);
430 }
431
432 return netmem;
433 }
434
435 /* fast path */
__page_pool_get_cached(struct page_pool * pool)436 static netmem_ref __page_pool_get_cached(struct page_pool *pool)
437 {
438 netmem_ref netmem;
439
440 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
441 if (likely(pool->alloc.count)) {
442 /* Fast-path */
443 netmem = pool->alloc.cache[--pool->alloc.count];
444 alloc_stat_inc(pool, fast);
445 } else {
446 netmem = page_pool_refill_alloc_cache(pool);
447 }
448
449 return netmem;
450 }
451
__page_pool_dma_sync_for_device(const struct page_pool * pool,netmem_ref netmem,u32 dma_sync_size)452 static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
453 netmem_ref netmem,
454 u32 dma_sync_size)
455 {
456 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
457 dma_addr_t dma_addr = page_pool_get_dma_addr_netmem(netmem);
458
459 dma_sync_size = min(dma_sync_size, pool->p.max_len);
460 __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
461 dma_sync_size, pool->p.dma_dir);
462 #endif
463 }
464
465 static __always_inline void
page_pool_dma_sync_for_device(const struct page_pool * pool,netmem_ref netmem,u32 dma_sync_size)466 page_pool_dma_sync_for_device(const struct page_pool *pool,
467 netmem_ref netmem,
468 u32 dma_sync_size)
469 {
470 if (pool->dma_sync && dma_dev_need_sync(pool->p.dev)) {
471 rcu_read_lock();
472 /* re-check under rcu_read_lock() to sync with page_pool_scrub() */
473 if (pool->dma_sync)
474 __page_pool_dma_sync_for_device(pool, netmem,
475 dma_sync_size);
476 rcu_read_unlock();
477 }
478 }
479
page_pool_register_dma_index(struct page_pool * pool,netmem_ref netmem,gfp_t gfp)480 static int page_pool_register_dma_index(struct page_pool *pool,
481 netmem_ref netmem, gfp_t gfp)
482 {
483 int err = 0;
484 u32 id;
485
486 if (unlikely(!PP_DMA_INDEX_BITS))
487 goto out;
488
489 if (in_softirq())
490 err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem),
491 PP_DMA_INDEX_LIMIT, gfp);
492 else
493 err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem),
494 PP_DMA_INDEX_LIMIT, gfp);
495 if (err) {
496 WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@");
497 goto out;
498 }
499
500 netmem_set_dma_index(netmem, id);
501 out:
502 return err;
503 }
504
page_pool_release_dma_index(struct page_pool * pool,netmem_ref netmem)505 static int page_pool_release_dma_index(struct page_pool *pool,
506 netmem_ref netmem)
507 {
508 struct page *old, *page = netmem_to_page(netmem);
509 unsigned long id;
510
511 if (unlikely(!PP_DMA_INDEX_BITS))
512 return 0;
513
514 id = netmem_get_dma_index(netmem);
515 if (!id)
516 return -1;
517
518 if (in_softirq())
519 old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0);
520 else
521 old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0);
522 if (old != page)
523 return -1;
524
525 netmem_set_dma_index(netmem, 0);
526
527 return 0;
528 }
529
page_pool_dma_map(struct page_pool * pool,netmem_ref netmem,gfp_t gfp)530 static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp)
531 {
532 dma_addr_t dma;
533 int err;
534
535 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
536 * since dma_addr_t can be either 32 or 64 bits and does not always fit
537 * into page private data (i.e 32bit cpu with 64bit DMA caps)
538 * This mapping is kept for lifetime of page, until leaving pool.
539 */
540 dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0,
541 (PAGE_SIZE << pool->p.order), pool->p.dma_dir,
542 DMA_ATTR_SKIP_CPU_SYNC |
543 DMA_ATTR_WEAK_ORDERING);
544 if (dma_mapping_error(pool->p.dev, dma))
545 return false;
546
547 if (page_pool_set_dma_addr_netmem(netmem, dma)) {
548 WARN_ONCE(1, "unexpected DMA address, please report to netdev@");
549 goto unmap_failed;
550 }
551
552 err = page_pool_register_dma_index(pool, netmem, gfp);
553 if (err)
554 goto unset_failed;
555
556 page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len);
557
558 return true;
559
560 unset_failed:
561 page_pool_set_dma_addr_netmem(netmem, 0);
562 unmap_failed:
563 dma_unmap_page_attrs(pool->p.dev, dma,
564 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
565 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
566 return false;
567 }
568
__page_pool_alloc_page_order(struct page_pool * pool,gfp_t gfp)569 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
570 gfp_t gfp)
571 {
572 struct page *page;
573
574 gfp |= __GFP_COMP;
575 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
576 if (unlikely(!page))
577 return NULL;
578
579 if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page), gfp))) {
580 put_page(page);
581 return NULL;
582 }
583
584 alloc_stat_inc(pool, slow_high_order);
585 page_pool_set_pp_info(pool, page_to_netmem(page));
586
587 /* Track how many pages are held 'in-flight' */
588 pool->pages_state_hold_cnt++;
589 trace_page_pool_state_hold(pool, page_to_netmem(page),
590 pool->pages_state_hold_cnt);
591 return page;
592 }
593
594 /* slow path */
__page_pool_alloc_netmems_slow(struct page_pool * pool,gfp_t gfp)595 static noinline netmem_ref __page_pool_alloc_netmems_slow(struct page_pool *pool,
596 gfp_t gfp)
597 {
598 const int bulk = PP_ALLOC_CACHE_REFILL;
599 unsigned int pp_order = pool->p.order;
600 bool dma_map = pool->dma_map;
601 netmem_ref netmem;
602 int i, nr_pages;
603
604 /* Unconditionally set NOWARN if allocating from NAPI.
605 * Drivers forget to set it, and OOM reports on packet Rx are useless.
606 */
607 if ((gfp & GFP_ATOMIC) == GFP_ATOMIC)
608 gfp |= __GFP_NOWARN;
609
610 /* Don't support bulk alloc for high-order pages */
611 if (unlikely(pp_order))
612 return page_to_netmem(__page_pool_alloc_page_order(pool, gfp));
613
614 /* Unnecessary as alloc cache is empty, but guarantees zero count */
615 if (unlikely(pool->alloc.count > 0))
616 return pool->alloc.cache[--pool->alloc.count];
617
618 /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk */
619 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
620
621 nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk,
622 (struct page **)pool->alloc.cache);
623 if (unlikely(!nr_pages))
624 return 0;
625
626 /* Pages have been filled into alloc.cache array, but count is zero and
627 * page element have not been (possibly) DMA mapped.
628 */
629 for (i = 0; i < nr_pages; i++) {
630 netmem = pool->alloc.cache[i];
631 if (dma_map && unlikely(!page_pool_dma_map(pool, netmem, gfp))) {
632 put_page(netmem_to_page(netmem));
633 continue;
634 }
635
636 page_pool_set_pp_info(pool, netmem);
637 pool->alloc.cache[pool->alloc.count++] = netmem;
638 /* Track how many pages are held 'in-flight' */
639 pool->pages_state_hold_cnt++;
640 trace_page_pool_state_hold(pool, netmem,
641 pool->pages_state_hold_cnt);
642 }
643
644 /* Return last page */
645 if (likely(pool->alloc.count > 0)) {
646 netmem = pool->alloc.cache[--pool->alloc.count];
647 alloc_stat_inc(pool, slow);
648 } else {
649 netmem = 0;
650 }
651
652 /* When page just alloc'ed is should/must have refcnt 1. */
653 return netmem;
654 }
655
656 /* For using page_pool replace: alloc_pages() API calls, but provide
657 * synchronization guarantee for allocation side.
658 */
page_pool_alloc_netmems(struct page_pool * pool,gfp_t gfp)659 netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp)
660 {
661 netmem_ref netmem;
662
663 /* Fast-path: Get a page from cache */
664 netmem = __page_pool_get_cached(pool);
665 if (netmem)
666 return netmem;
667
668 /* Slow-path: cache empty, do real allocation */
669 if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
670 netmem = pool->mp_ops->alloc_netmems(pool, gfp);
671 else
672 netmem = __page_pool_alloc_netmems_slow(pool, gfp);
673 return netmem;
674 }
675 EXPORT_SYMBOL(page_pool_alloc_netmems);
676 ALLOW_ERROR_INJECTION(page_pool_alloc_netmems, NULL);
677
page_pool_alloc_pages(struct page_pool * pool,gfp_t gfp)678 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
679 {
680 return netmem_to_page(page_pool_alloc_netmems(pool, gfp));
681 }
682 EXPORT_SYMBOL(page_pool_alloc_pages);
683
684 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
685 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
686 */
687 #define _distance(a, b) (s32)((a) - (b))
688
page_pool_inflight(const struct page_pool * pool,bool strict)689 s32 page_pool_inflight(const struct page_pool *pool, bool strict)
690 {
691 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
692 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
693 s32 inflight;
694
695 inflight = _distance(hold_cnt, release_cnt);
696
697 if (strict) {
698 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
699 WARN(inflight < 0, "Negative(%d) inflight packet-pages",
700 inflight);
701 } else {
702 inflight = max(0, inflight);
703 }
704
705 return inflight;
706 }
707
page_pool_set_pp_info(struct page_pool * pool,netmem_ref netmem)708 void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
709 {
710 struct page *page;
711
712 netmem_set_pp(netmem, pool);
713
714 /* XXX: Now that the offset of page_type is shared between
715 * struct page and net_iov, just cast the netmem to struct page
716 * unconditionally by clearing NET_IOV if any, no matter whether
717 * it comes from struct net_iov or struct page. This should be
718 * adjusted once the offset is no longer shared.
719 */
720 page = (struct page *)((__force unsigned long)netmem & ~NET_IOV);
721 __SetPageNetpp(page);
722
723 /* Ensuring all pages have been split into one fragment initially:
724 * page_pool_set_pp_info() is only called once for every page when it
725 * is allocated from the page allocator and page_pool_fragment_page()
726 * is dirtying the same cache line as the page->pp_magic above, so
727 * the overhead is negligible.
728 */
729 page_pool_fragment_netmem(netmem, 1);
730 if (pool->has_init_callback)
731 pool->slow.init_callback(netmem, pool->slow.init_arg);
732 }
733
page_pool_clear_pp_info(netmem_ref netmem)734 void page_pool_clear_pp_info(netmem_ref netmem)
735 {
736 struct page *page;
737
738 /* XXX: Now that the offset of page_type is shared between
739 * struct page and net_iov, just cast the netmem to struct page
740 * unconditionally by clearing NET_IOV if any, no matter whether
741 * it comes from struct net_iov or struct page. This should be
742 * adjusted once the offset is no longer shared.
743 */
744 page = (struct page *)((__force unsigned long)netmem & ~NET_IOV);
745 __ClearPageNetpp(page);
746
747 netmem_set_pp(netmem, NULL);
748 }
749
__page_pool_release_netmem_dma(struct page_pool * pool,netmem_ref netmem)750 static __always_inline void __page_pool_release_netmem_dma(struct page_pool *pool,
751 netmem_ref netmem)
752 {
753 dma_addr_t dma;
754
755 if (!pool->dma_map)
756 /* Always account for inflight pages, even if we didn't
757 * map them
758 */
759 return;
760
761 if (page_pool_release_dma_index(pool, netmem))
762 return;
763
764 dma = page_pool_get_dma_addr_netmem(netmem);
765
766 /* When page is unmapped, it cannot be returned to our pool */
767 dma_unmap_page_attrs(pool->p.dev, dma,
768 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
769 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
770 page_pool_set_dma_addr_netmem(netmem, 0);
771 }
772
773 /* Disconnects a page (from a page_pool). API users can have a need
774 * to disconnect a page (from a page_pool), to allow it to be used as
775 * a regular page (that will eventually be returned to the normal
776 * page-allocator via put_page).
777 */
page_pool_return_netmem(struct page_pool * pool,netmem_ref netmem)778 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem)
779 {
780 int count;
781 bool put;
782
783 put = true;
784 if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
785 put = pool->mp_ops->release_netmem(pool, netmem);
786 else
787 __page_pool_release_netmem_dma(pool, netmem);
788
789 /* This may be the last page returned, releasing the pool, so
790 * it is not safe to reference pool afterwards.
791 */
792 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
793 trace_page_pool_state_release(pool, netmem, count);
794
795 if (put) {
796 page_pool_clear_pp_info(netmem);
797 put_page(netmem_to_page(netmem));
798 }
799 /* An optimization would be to call __free_pages(page, pool->p.order)
800 * knowing page is not part of page-cache (thus avoiding a
801 * __page_cache_release() call).
802 */
803 }
804
page_pool_recycle_in_ring(struct page_pool * pool,netmem_ref netmem)805 static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
806 {
807 bool in_softirq, ret;
808
809 /* BH protection not needed if current is softirq */
810 in_softirq = page_pool_producer_lock(pool);
811 ret = !__ptr_ring_produce(&pool->ring, (__force void *)netmem);
812 if (ret)
813 recycle_stat_inc(pool, ring);
814 page_pool_producer_unlock(pool, in_softirq);
815
816 return ret;
817 }
818
819 /* Only allow direct recycling in special circumstances, into the
820 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
821 *
822 * Caller must provide appropriate safe context.
823 */
page_pool_recycle_in_cache(netmem_ref netmem,struct page_pool * pool)824 static bool page_pool_recycle_in_cache(netmem_ref netmem,
825 struct page_pool *pool)
826 {
827 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
828 recycle_stat_inc(pool, cache_full);
829 return false;
830 }
831
832 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
833 pool->alloc.cache[pool->alloc.count++] = netmem;
834 recycle_stat_inc(pool, cached);
835 return true;
836 }
837
__page_pool_page_can_be_recycled(netmem_ref netmem)838 static bool __page_pool_page_can_be_recycled(netmem_ref netmem)
839 {
840 return netmem_is_net_iov(netmem) ||
841 (page_ref_count(netmem_to_page(netmem)) == 1 &&
842 !page_is_pfmemalloc(netmem_to_page(netmem)));
843 }
844
845 /* If the page refcnt == 1, this will try to recycle the page.
846 * If pool->dma_sync is set, we'll try to sync the DMA area for
847 * the configured size min(dma_sync_size, pool->max_len).
848 * If the page refcnt != 1, then the page will be returned to memory
849 * subsystem.
850 */
851 static __always_inline netmem_ref
__page_pool_put_page(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct)852 __page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
853 unsigned int dma_sync_size, bool allow_direct)
854 {
855 lockdep_assert_no_hardirq();
856
857 /* This allocator is optimized for the XDP mode that uses
858 * one-frame-per-page, but have fallbacks that act like the
859 * regular page allocator APIs.
860 *
861 * refcnt == 1 means page_pool owns page, and can recycle it.
862 *
863 * page is NOT reusable when allocated when system is under
864 * some pressure. (page_is_pfmemalloc)
865 */
866 if (likely(__page_pool_page_can_be_recycled(netmem))) {
867 /* Read barrier done in page_ref_count / READ_ONCE */
868
869 page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
870
871 if (allow_direct && page_pool_recycle_in_cache(netmem, pool))
872 return 0;
873
874 /* Page found as candidate for recycling */
875 return netmem;
876 }
877
878 /* Fallback/non-XDP mode: API user have elevated refcnt.
879 *
880 * Many drivers split up the page into fragments, and some
881 * want to keep doing this to save memory and do refcnt based
882 * recycling. Support this use case too, to ease drivers
883 * switching between XDP/non-XDP.
884 *
885 * In-case page_pool maintains the DMA mapping, API user must
886 * call page_pool_put_page once. In this elevated refcnt
887 * case, the DMA is unmapped/released, as driver is likely
888 * doing refcnt based recycle tricks, meaning another process
889 * will be invoking put_page.
890 */
891 recycle_stat_inc(pool, released_refcnt);
892 page_pool_return_netmem(pool, netmem);
893
894 return 0;
895 }
896
page_pool_napi_local(const struct page_pool * pool)897 static bool page_pool_napi_local(const struct page_pool *pool)
898 {
899 const struct napi_struct *napi;
900 u32 cpuid;
901
902 /* On PREEMPT_RT the softirq can be preempted by the consumer */
903 if (IS_ENABLED(CONFIG_PREEMPT_RT))
904 return false;
905
906 if (unlikely(!in_softirq()))
907 return false;
908
909 /* Allow direct recycle if we have reasons to believe that we are
910 * in the same context as the consumer would run, so there's
911 * no possible race.
912 * __page_pool_put_page() makes sure we're not in hardirq context
913 * and interrupts are enabled prior to accessing the cache.
914 */
915 cpuid = smp_processor_id();
916 if (READ_ONCE(pool->cpuid) == cpuid)
917 return true;
918
919 napi = READ_ONCE(pool->p.napi);
920
921 return napi && READ_ONCE(napi->list_owner) == cpuid;
922 }
923
page_pool_put_unrefed_netmem(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct)924 void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
925 unsigned int dma_sync_size, bool allow_direct)
926 {
927 if (!allow_direct)
928 allow_direct = page_pool_napi_local(pool);
929
930 netmem = __page_pool_put_page(pool, netmem, dma_sync_size,
931 allow_direct);
932 if (netmem && !page_pool_recycle_in_ring(pool, netmem)) {
933 /* Cache full, fallback to free pages */
934 recycle_stat_inc(pool, ring_full);
935 page_pool_return_netmem(pool, netmem);
936 }
937 }
938 EXPORT_SYMBOL(page_pool_put_unrefed_netmem);
939
page_pool_put_unrefed_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct)940 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
941 unsigned int dma_sync_size, bool allow_direct)
942 {
943 page_pool_put_unrefed_netmem(pool, page_to_netmem(page), dma_sync_size,
944 allow_direct);
945 }
946 EXPORT_SYMBOL(page_pool_put_unrefed_page);
947
page_pool_recycle_ring_bulk(struct page_pool * pool,netmem_ref * bulk,u32 bulk_len)948 static void page_pool_recycle_ring_bulk(struct page_pool *pool,
949 netmem_ref *bulk,
950 u32 bulk_len)
951 {
952 bool in_softirq;
953 u32 i;
954
955 /* Bulk produce into ptr_ring page_pool cache */
956 in_softirq = page_pool_producer_lock(pool);
957
958 for (i = 0; i < bulk_len; i++) {
959 if (__ptr_ring_produce(&pool->ring, (__force void *)bulk[i])) {
960 /* ring full */
961 recycle_stat_inc(pool, ring_full);
962 break;
963 }
964 }
965
966 page_pool_producer_unlock(pool, in_softirq);
967 recycle_stat_add(pool, ring, i);
968
969 /* Hopefully all pages were returned into ptr_ring */
970 if (likely(i == bulk_len))
971 return;
972
973 /*
974 * ptr_ring cache is full, free remaining pages outside producer lock
975 * since put_page() with refcnt == 1 can be an expensive operation.
976 */
977 for (; i < bulk_len; i++)
978 page_pool_return_netmem(pool, bulk[i]);
979 }
980
981 /**
982 * page_pool_put_netmem_bulk() - release references on multiple netmems
983 * @data: array holding netmem references
984 * @count: number of entries in @data
985 *
986 * Tries to refill a number of netmems into the ptr_ring cache holding ptr_ring
987 * producer lock. If the ptr_ring is full, page_pool_put_netmem_bulk()
988 * will release leftover netmems to the memory provider.
989 * page_pool_put_netmem_bulk() is suitable to be run inside the driver NAPI tx
990 * completion loop for the XDP_REDIRECT use case.
991 *
992 * Please note the caller must not use data area after running
993 * page_pool_put_netmem_bulk(), as this function overwrites it.
994 */
page_pool_put_netmem_bulk(netmem_ref * data,u32 count)995 void page_pool_put_netmem_bulk(netmem_ref *data, u32 count)
996 {
997 u32 bulk_len = 0;
998
999 for (u32 i = 0; i < count; i++) {
1000 netmem_ref netmem = netmem_compound_head(data[i]);
1001
1002 if (page_pool_unref_and_test(netmem))
1003 data[bulk_len++] = netmem;
1004 }
1005
1006 count = bulk_len;
1007 while (count) {
1008 netmem_ref bulk[XDP_BULK_QUEUE_SIZE];
1009 struct page_pool *pool = NULL;
1010 bool allow_direct;
1011 u32 foreign = 0;
1012
1013 bulk_len = 0;
1014
1015 for (u32 i = 0; i < count; i++) {
1016 struct page_pool *netmem_pp;
1017 netmem_ref netmem = data[i];
1018
1019 netmem_pp = netmem_get_pp(netmem);
1020 if (unlikely(!pool)) {
1021 pool = netmem_pp;
1022 allow_direct = page_pool_napi_local(pool);
1023 } else if (netmem_pp != pool) {
1024 /*
1025 * If the netmem belongs to a different
1026 * page_pool, save it for another round.
1027 */
1028 data[foreign++] = netmem;
1029 continue;
1030 }
1031
1032 netmem = __page_pool_put_page(pool, netmem, -1,
1033 allow_direct);
1034 /* Approved for bulk recycling in ptr_ring cache */
1035 if (netmem)
1036 bulk[bulk_len++] = netmem;
1037 }
1038
1039 if (bulk_len)
1040 page_pool_recycle_ring_bulk(pool, bulk, bulk_len);
1041
1042 count = foreign;
1043 }
1044 }
1045 EXPORT_SYMBOL(page_pool_put_netmem_bulk);
1046
page_pool_drain_frag(struct page_pool * pool,netmem_ref netmem)1047 static netmem_ref page_pool_drain_frag(struct page_pool *pool,
1048 netmem_ref netmem)
1049 {
1050 long drain_count = BIAS_MAX - pool->frag_users;
1051
1052 /* Some user is still using the page frag */
1053 if (likely(page_pool_unref_netmem(netmem, drain_count)))
1054 return 0;
1055
1056 if (__page_pool_page_can_be_recycled(netmem)) {
1057 page_pool_dma_sync_for_device(pool, netmem, -1);
1058 return netmem;
1059 }
1060
1061 page_pool_return_netmem(pool, netmem);
1062 return 0;
1063 }
1064
page_pool_free_frag(struct page_pool * pool)1065 static void page_pool_free_frag(struct page_pool *pool)
1066 {
1067 long drain_count = BIAS_MAX - pool->frag_users;
1068 netmem_ref netmem = pool->frag_page;
1069
1070 pool->frag_page = 0;
1071
1072 if (!netmem || page_pool_unref_netmem(netmem, drain_count))
1073 return;
1074
1075 page_pool_return_netmem(pool, netmem);
1076 }
1077
page_pool_alloc_frag_netmem(struct page_pool * pool,unsigned int * offset,unsigned int size,gfp_t gfp)1078 netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
1079 unsigned int *offset, unsigned int size,
1080 gfp_t gfp)
1081 {
1082 unsigned int max_size = PAGE_SIZE << pool->p.order;
1083 netmem_ref netmem = pool->frag_page;
1084
1085 if (WARN_ON(size > max_size))
1086 return 0;
1087
1088 size = ALIGN(size, dma_get_cache_alignment());
1089 *offset = pool->frag_offset;
1090
1091 if (netmem && *offset + size > max_size) {
1092 netmem = page_pool_drain_frag(pool, netmem);
1093 if (netmem) {
1094 recycle_stat_inc(pool, cached);
1095 alloc_stat_inc(pool, fast);
1096 goto frag_reset;
1097 }
1098 }
1099
1100 if (!netmem) {
1101 netmem = page_pool_alloc_netmems(pool, gfp);
1102 if (unlikely(!netmem)) {
1103 pool->frag_page = 0;
1104 return 0;
1105 }
1106
1107 pool->frag_page = netmem;
1108
1109 frag_reset:
1110 pool->frag_users = 1;
1111 *offset = 0;
1112 pool->frag_offset = size;
1113 page_pool_fragment_netmem(netmem, BIAS_MAX);
1114 return netmem;
1115 }
1116
1117 pool->frag_users++;
1118 pool->frag_offset = *offset + size;
1119 return netmem;
1120 }
1121 EXPORT_SYMBOL(page_pool_alloc_frag_netmem);
1122
page_pool_alloc_frag(struct page_pool * pool,unsigned int * offset,unsigned int size,gfp_t gfp)1123 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
1124 unsigned int size, gfp_t gfp)
1125 {
1126 return netmem_to_page(page_pool_alloc_frag_netmem(pool, offset, size,
1127 gfp));
1128 }
1129 EXPORT_SYMBOL(page_pool_alloc_frag);
1130
page_pool_empty_ring(struct page_pool * pool)1131 static void page_pool_empty_ring(struct page_pool *pool)
1132 {
1133 netmem_ref netmem;
1134
1135 /* Empty recycle ring */
1136 while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) {
1137 /* Verify the refcnt invariant of cached pages */
1138 if (!(netmem_ref_count(netmem) == 1))
1139 pr_crit("%s() page_pool refcnt %d violation\n",
1140 __func__, netmem_ref_count(netmem));
1141
1142 page_pool_return_netmem(pool, netmem);
1143 }
1144 }
1145
__page_pool_destroy(struct page_pool * pool)1146 static void __page_pool_destroy(struct page_pool *pool)
1147 {
1148 if (pool->disconnect)
1149 pool->disconnect(pool);
1150
1151 page_pool_unlist(pool);
1152 page_pool_uninit(pool);
1153
1154 kfree(pool);
1155 }
1156
page_pool_empty_alloc_cache_once(struct page_pool * pool)1157 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
1158 {
1159 netmem_ref netmem;
1160
1161 if (pool->destroy_cnt)
1162 return;
1163
1164 /* Empty alloc cache, assume caller made sure this is
1165 * no-longer in use, and page_pool_alloc_pages() cannot be
1166 * call concurrently.
1167 */
1168 while (pool->alloc.count) {
1169 netmem = pool->alloc.cache[--pool->alloc.count];
1170 page_pool_return_netmem(pool, netmem);
1171 }
1172 }
1173
page_pool_scrub(struct page_pool * pool)1174 static void page_pool_scrub(struct page_pool *pool)
1175 {
1176 unsigned long id;
1177 void *ptr;
1178
1179 page_pool_empty_alloc_cache_once(pool);
1180 if (!pool->destroy_cnt++ && pool->dma_map) {
1181 if (pool->dma_sync) {
1182 /* Disable page_pool_dma_sync_for_device() */
1183 pool->dma_sync = false;
1184
1185 /* Make sure all concurrent returns that may see the old
1186 * value of dma_sync (and thus perform a sync) have
1187 * finished before doing the unmapping below. Skip the
1188 * wait if the device doesn't actually need syncing, or
1189 * if there are no outstanding mapped pages.
1190 */
1191 if (dma_dev_need_sync(pool->p.dev) &&
1192 !xa_empty(&pool->dma_mapped))
1193 synchronize_net();
1194 }
1195
1196 xa_for_each(&pool->dma_mapped, id, ptr)
1197 __page_pool_release_netmem_dma(pool, page_to_netmem((struct page *)ptr));
1198 }
1199
1200 /* No more consumers should exist, but producers could still
1201 * be in-flight.
1202 */
1203 page_pool_empty_ring(pool);
1204 }
1205
page_pool_release(struct page_pool * pool)1206 static int page_pool_release(struct page_pool *pool)
1207 {
1208 bool in_softirq;
1209 int inflight;
1210
1211 page_pool_scrub(pool);
1212 inflight = page_pool_inflight(pool, true);
1213 /* Acquire producer lock to make sure producers have exited. */
1214 in_softirq = page_pool_producer_lock(pool);
1215 page_pool_producer_unlock(pool, in_softirq);
1216 if (!inflight)
1217 __page_pool_destroy(pool);
1218
1219 return inflight;
1220 }
1221
page_pool_release_retry(struct work_struct * wq)1222 static void page_pool_release_retry(struct work_struct *wq)
1223 {
1224 struct delayed_work *dwq = to_delayed_work(wq);
1225 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
1226 void *netdev;
1227 int inflight;
1228
1229 inflight = page_pool_release(pool);
1230 /* In rare cases, a driver bug may cause inflight to go negative.
1231 * Don't reschedule release if inflight is 0 or negative.
1232 * - If 0, the page_pool has been destroyed
1233 * - if negative, we will never recover
1234 * in both cases no reschedule is necessary.
1235 */
1236 if (inflight <= 0)
1237 return;
1238
1239 /* Periodic warning for page pools the user can't see */
1240 netdev = READ_ONCE(pool->slow.netdev);
1241 if (time_after_eq(jiffies, pool->defer_warn) &&
1242 (!netdev || netdev == NET_PTR_POISON)) {
1243 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
1244
1245 pr_warn("%s() stalled pool shutdown: id %u, %d inflight %d sec\n",
1246 __func__, pool->user.id, inflight, sec);
1247 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
1248 }
1249
1250 /* Still not ready to be disconnected, retry later */
1251 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
1252 }
1253
page_pool_use_xdp_mem(struct page_pool * pool,void (* disconnect)(void *),const struct xdp_mem_info * mem)1254 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
1255 const struct xdp_mem_info *mem)
1256 {
1257 refcount_inc(&pool->user_cnt);
1258 pool->disconnect = disconnect;
1259 pool->xdp_mem_id = mem->id;
1260 }
1261
1262 /**
1263 * page_pool_enable_direct_recycling() - mark page pool as owned by NAPI
1264 * @pool: page pool to modify
1265 * @napi: NAPI instance to associate the page pool with
1266 *
1267 * Associate a page pool with a NAPI instance for lockless page recycling.
1268 * This is useful when a new page pool has to be added to a NAPI instance
1269 * without disabling that NAPI instance, to mark the point at which control
1270 * path "hands over" the page pool to the NAPI instance. In most cases driver
1271 * can simply set the @napi field in struct page_pool_params, and does not
1272 * have to call this helper.
1273 *
1274 * The function is idempotent, but does not implement any refcounting.
1275 * Single page_pool_disable_direct_recycling() will disable recycling,
1276 * no matter how many times enable was called.
1277 */
page_pool_enable_direct_recycling(struct page_pool * pool,struct napi_struct * napi)1278 void page_pool_enable_direct_recycling(struct page_pool *pool,
1279 struct napi_struct *napi)
1280 {
1281 if (READ_ONCE(pool->p.napi) == napi)
1282 return;
1283 WARN_ON(!napi || pool->p.napi);
1284
1285 mutex_lock(&page_pools_lock);
1286 WRITE_ONCE(pool->p.napi, napi);
1287 mutex_unlock(&page_pools_lock);
1288 }
1289 EXPORT_SYMBOL(page_pool_enable_direct_recycling);
1290
page_pool_disable_direct_recycling(struct page_pool * pool)1291 void page_pool_disable_direct_recycling(struct page_pool *pool)
1292 {
1293 /* Disable direct recycling based on pool->cpuid.
1294 * Paired with READ_ONCE() in page_pool_napi_local().
1295 */
1296 WRITE_ONCE(pool->cpuid, -1);
1297
1298 if (!pool->p.napi)
1299 return;
1300
1301 napi_assert_will_not_race(pool->p.napi);
1302
1303 mutex_lock(&page_pools_lock);
1304 WRITE_ONCE(pool->p.napi, NULL);
1305 mutex_unlock(&page_pools_lock);
1306 }
1307 EXPORT_SYMBOL(page_pool_disable_direct_recycling);
1308
page_pool_destroy(struct page_pool * pool)1309 void page_pool_destroy(struct page_pool *pool)
1310 {
1311 if (!pool)
1312 return;
1313
1314 if (!page_pool_put(pool))
1315 return;
1316
1317 page_pool_disable_direct_recycling(pool);
1318 page_pool_free_frag(pool);
1319
1320 if (!page_pool_release(pool))
1321 return;
1322
1323 page_pool_detached(pool);
1324 pool->defer_start = jiffies;
1325 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
1326
1327 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
1328 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
1329 }
1330 EXPORT_SYMBOL(page_pool_destroy);
1331
1332 /* Caller must provide appropriate safe context, e.g. NAPI. */
page_pool_update_nid(struct page_pool * pool,int new_nid)1333 void page_pool_update_nid(struct page_pool *pool, int new_nid)
1334 {
1335 netmem_ref netmem;
1336
1337 trace_page_pool_update_nid(pool, new_nid);
1338 pool->p.nid = new_nid;
1339
1340 /* Flush pool alloc cache, as refill will check NUMA node */
1341 while (pool->alloc.count) {
1342 netmem = pool->alloc.cache[--pool->alloc.count];
1343 page_pool_return_netmem(pool, netmem);
1344 }
1345 }
1346 EXPORT_SYMBOL(page_pool_update_nid);
1347
net_mp_niov_set_dma_addr(struct net_iov * niov,dma_addr_t addr)1348 bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr)
1349 {
1350 return page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), addr);
1351 }
1352
1353 /* Associate a niov with a page pool. Should follow with a matching
1354 * net_mp_niov_clear_page_pool()
1355 */
net_mp_niov_set_page_pool(struct page_pool * pool,struct net_iov * niov)1356 void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov)
1357 {
1358 netmem_ref netmem = net_iov_to_netmem(niov);
1359
1360 page_pool_set_pp_info(pool, netmem);
1361
1362 pool->pages_state_hold_cnt++;
1363 trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
1364 }
1365
1366 /* Disassociate a niov from a page pool. Should only be used in the
1367 * ->release_netmem() path.
1368 */
net_mp_niov_clear_page_pool(struct net_iov * niov)1369 void net_mp_niov_clear_page_pool(struct net_iov *niov)
1370 {
1371 netmem_ref netmem = net_iov_to_netmem(niov);
1372
1373 page_pool_clear_pp_info(netmem);
1374 }
1375