Lines Matching defs:pool

46 #define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)
48 #define recycle_stat_inc(pool, __stat) \
50 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
54 #define recycle_stat_add(pool, __stat, val) \
56 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
75 * page_pool_get_stats() - fetch page pool stats
76 * @pool: pool from which page was allocated
85 bool page_pool_get_stats(const struct page_pool *pool,
94 stats->alloc_stats.fast += pool->alloc_stats.fast;
95 stats->alloc_stats.slow += pool->alloc_stats.slow;
96 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
97 stats->alloc_stats.empty += pool->alloc_stats.empty;
98 stats->alloc_stats.refill += pool->alloc_stats.refill;
99 stats->alloc_stats.waive += pool->alloc_stats.waive;
103 per_cpu_ptr(pool->recycle_stats, cpu);
161 static bool page_pool_producer_lock(struct page_pool *pool)
162 __acquires(&pool->ring.producer_lock)
167 spin_lock(&pool->ring.producer_lock);
169 spin_lock_bh(&pool->ring.producer_lock);
174 static void page_pool_producer_unlock(struct page_pool *pool,
176 __releases(&pool->ring.producer_lock)
179 spin_unlock(&pool->ring.producer_lock);
181 spin_unlock_bh(&pool->ring.producer_lock);
193 static int page_pool_init(struct page_pool *pool,
203 memcpy(&pool->p, &params->fast, sizeof(pool->p));
204 memcpy(&pool->slow, &params->slow, sizeof(pool->slow));
206 pool->cpuid = cpuid;
207 pool->dma_sync_for_cpu = true;
210 if (pool->slow.flags & ~PP_FLAG_ALL)
213 if (pool->p.pool_size)
214 ring_qsize = min(pool->p.pool_size, 16384);
220 if (pool->slow.flags & PP_FLAG_DMA_MAP) {
221 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
222 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
225 pool->dma_map = true;
228 if (pool->slow.flags & PP_FLAG_DMA_SYNC_DEV) {
232 if (!(pool->slow.flags & PP_FLAG_DMA_MAP))
235 if (!pool->p.max_len)
238 pool->dma_sync = true;
240 /* pool->p.offset has to be set according to the address
245 pool->has_init_callback = !!pool->slow.init_callback;
248 if (!(pool->slow.flags & PP_FLAG_SYSTEM_POOL)) {
249 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
250 if (!pool->recycle_stats)
253 /* For system page pool instance we use a singular stats object
255 * (also percpu) page pool instance.
257 pool->recycle_stats = &pp_system_recycle_stats;
258 pool->system = true;
262 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
264 if (!pool->system)
265 free_percpu(pool->recycle_stats);
270 atomic_set(&pool->pages_state_release_cnt, 0);
273 refcount_set(&pool->user_cnt, 1);
275 xa_init_flags(&pool->dma_mapped, XA_FLAGS_ALLOC1);
277 if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) {
278 netdev_assert_locked(pool->slow.netdev);
279 rxq = __netif_get_rx_queue(pool->slow.netdev,
280 pool->slow.queue_idx);
281 pool->mp_priv = rxq->mp_params.mp_priv;
282 pool->mp_ops = rxq->mp_params.mp_ops;
285 if (pool->mp_ops) {
286 if (!pool->dma_map || !pool->dma_sync) {
291 if (WARN_ON(!is_kernel_rodata((unsigned long)pool->mp_ops))) {
296 err = pool->mp_ops->init(pool);
309 ptr_ring_cleanup(&pool->ring, NULL);
311 if (!pool->system)
312 free_percpu(pool->recycle_stats);
317 static void page_pool_uninit(struct page_pool *pool)
319 ptr_ring_cleanup(&pool->ring, NULL);
320 xa_destroy(&pool->dma_mapped);
323 if (!pool->system)
324 free_percpu(pool->recycle_stats);
329 * page_pool_create_percpu() - create a page pool for a given cpu.
336 struct page_pool *pool;
339 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
340 if (!pool)
343 err = page_pool_init(pool, params, cpuid);
347 err = page_pool_list(pool);
351 return pool;
354 page_pool_uninit(pool);
357 kfree(pool);
363 * page_pool_create() - create a page pool
372 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem);
374 static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
376 struct ptr_ring *r = &pool->ring;
382 alloc_stat_inc(pool, empty);
390 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
392 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
403 pool->alloc.cache[pool->alloc.count++] = netmem;
410 page_pool_return_netmem(pool, netmem);
411 alloc_stat_inc(pool, waive);
415 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
418 if (likely(pool->alloc.count > 0)) {
419 netmem = pool->alloc.cache[--pool->alloc.count];
420 alloc_stat_inc(pool, refill);
427 static netmem_ref __page_pool_get_cached(struct page_pool *pool)
432 if (likely(pool->alloc.count)) {
434 netmem = pool->alloc.cache[--pool->alloc.count];
435 alloc_stat_inc(pool, fast);
437 netmem = page_pool_refill_alloc_cache(pool);
443 static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
450 dma_sync_size = min(dma_sync_size, pool->p.max_len);
451 __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
452 dma_sync_size, pool->p.dma_dir);
457 page_pool_dma_sync_for_device(const struct page_pool *pool,
461 if (pool->dma_sync && dma_dev_need_sync(pool->p.dev)) {
464 if (pool->dma_sync)
465 __page_pool_dma_sync_for_device(pool, netmem,
471 static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp)
480 * This mapping is kept for lifetime of page, until leaving pool.
482 dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0,
483 (PAGE_SIZE << pool->p.order), pool->p.dma_dir,
486 if (dma_mapping_error(pool->p.dev, dma))
495 err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem),
498 err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem),
506 page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len);
513 dma_unmap_page_attrs(pool->p.dev, dma,
514 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
519 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
525 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
529 if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page), gfp))) {
534 alloc_stat_inc(pool, slow_high_order);
535 page_pool_set_pp_info(pool, page_to_netmem(page));
538 pool->pages_state_hold_cnt++;
539 trace_page_pool_state_hold(pool, page_to_netmem(page),
540 pool->pages_state_hold_cnt);
545 static noinline netmem_ref __page_pool_alloc_netmems_slow(struct page_pool *pool,
549 unsigned int pp_order = pool->p.order;
550 bool dma_map = pool->dma_map;
562 return page_to_netmem(__page_pool_alloc_page_order(pool, gfp));
565 if (unlikely(pool->alloc.count > 0))
566 return pool->alloc.cache[--pool->alloc.count];
569 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
571 nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk,
572 (struct page **)pool->alloc.cache);
580 netmem = pool->alloc.cache[i];
581 if (dma_map && unlikely(!page_pool_dma_map(pool, netmem, gfp))) {
586 page_pool_set_pp_info(pool, netmem);
587 pool->alloc.cache[pool->alloc.count++] = netmem;
589 pool->pages_state_hold_cnt++;
590 trace_page_pool_state_hold(pool, netmem,
591 pool->pages_state_hold_cnt);
595 if (likely(pool->alloc.count > 0)) {
596 netmem = pool->alloc.cache[--pool->alloc.count];
597 alloc_stat_inc(pool, slow);
609 netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp)
614 netmem = __page_pool_get_cached(pool);
619 if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
620 netmem = pool->mp_ops->alloc_netmems(pool, gfp);
622 netmem = __page_pool_alloc_netmems_slow(pool, gfp);
628 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
630 return netmem_to_page(page_pool_alloc_netmems(pool, gfp));
639 s32 page_pool_inflight(const struct page_pool *pool, bool strict)
641 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
642 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
648 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
658 void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
660 netmem_set_pp(netmem, pool);
670 if (pool->has_init_callback)
671 pool->slow.init_callback(netmem, pool->slow.init_arg);
680 static __always_inline void __page_pool_release_netmem_dma(struct page_pool *pool,
687 if (!pool->dma_map)
698 old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0);
700 old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0);
706 /* When page is unmapped, it cannot be returned to our pool */
707 dma_unmap_page_attrs(pool->p.dev, dma,
708 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
719 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem)
725 if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
726 put = pool->mp_ops->release_netmem(pool, netmem);
728 __page_pool_release_netmem_dma(pool, netmem);
730 /* This may be the last page returned, releasing the pool, so
731 * it is not safe to reference pool afterwards.
733 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
734 trace_page_pool_state_release(pool, netmem, count);
740 /* An optimization would be to call __free_pages(page, pool->p.order)
746 static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
751 in_softirq = page_pool_producer_lock(pool);
752 ret = !__ptr_ring_produce(&pool->ring, (__force void *)netmem);
754 recycle_stat_inc(pool, ring);
755 page_pool_producer_unlock(pool, in_softirq);
766 struct page_pool *pool)
768 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
769 recycle_stat_inc(pool, cache_full);
774 pool->alloc.cache[pool->alloc.count++] = netmem;
775 recycle_stat_inc(pool, cached);
787 * If pool->dma_sync is set, we'll try to sync the DMA area for
788 * the configured size min(dma_sync_size, pool->max_len).
793 __page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
810 page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
812 if (allow_direct && page_pool_recycle_in_cache(netmem, pool))
832 recycle_stat_inc(pool, released_refcnt);
833 page_pool_return_netmem(pool, netmem);
838 static bool page_pool_napi_local(const struct page_pool *pool)
857 if (READ_ONCE(pool->cpuid) == cpuid)
860 napi = READ_ONCE(pool->p.napi);
865 void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
869 allow_direct = page_pool_napi_local(pool);
871 netmem = __page_pool_put_page(pool, netmem, dma_sync_size,
873 if (netmem && !page_pool_recycle_in_ring(pool, netmem)) {
875 recycle_stat_inc(pool, ring_full);
876 page_pool_return_netmem(pool, netmem);
881 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
884 page_pool_put_unrefed_netmem(pool, page_to_netmem(page), dma_sync_size,
889 static void page_pool_recycle_ring_bulk(struct page_pool *pool,
897 in_softirq = page_pool_producer_lock(pool);
900 if (__ptr_ring_produce(&pool->ring, (__force void *)bulk[i])) {
902 recycle_stat_inc(pool, ring_full);
907 page_pool_producer_unlock(pool, in_softirq);
908 recycle_stat_add(pool, ring, i);
919 page_pool_return_netmem(pool, bulk[i]);
950 struct page_pool *pool = NULL;
961 if (unlikely(!pool)) {
962 pool = netmem_pp;
963 allow_direct = page_pool_napi_local(pool);
964 } else if (netmem_pp != pool) {
973 netmem = __page_pool_put_page(pool, netmem, -1,
981 page_pool_recycle_ring_bulk(pool, bulk, bulk_len);
988 static netmem_ref page_pool_drain_frag(struct page_pool *pool,
991 long drain_count = BIAS_MAX - pool->frag_users;
998 page_pool_dma_sync_for_device(pool, netmem, -1);
1002 page_pool_return_netmem(pool, netmem);
1006 static void page_pool_free_frag(struct page_pool *pool)
1008 long drain_count = BIAS_MAX - pool->frag_users;
1009 netmem_ref netmem = pool->frag_page;
1011 pool->frag_page = 0;
1016 page_pool_return_netmem(pool, netmem);
1019 netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
1023 unsigned int max_size = PAGE_SIZE << pool->p.order;
1024 netmem_ref netmem = pool->frag_page;
1030 *offset = pool->frag_offset;
1033 netmem = page_pool_drain_frag(pool, netmem);
1035 recycle_stat_inc(pool, cached);
1036 alloc_stat_inc(pool, fast);
1042 netmem = page_pool_alloc_netmems(pool, gfp);
1044 pool->frag_page = 0;
1048 pool->frag_page = netmem;
1051 pool->frag_users = 1;
1053 pool->frag_offset = size;
1058 pool->frag_users++;
1059 pool->frag_offset = *offset + size;
1064 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
1067 return netmem_to_page(page_pool_alloc_frag_netmem(pool, offset, size,
1072 static void page_pool_empty_ring(struct page_pool *pool)
1077 while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) {
1083 page_pool_return_netmem(pool, netmem);
1087 static void __page_pool_destroy(struct page_pool *pool)
1089 if (pool->disconnect)
1090 pool->disconnect(pool);
1092 page_pool_unlist(pool);
1093 page_pool_uninit(pool);
1095 if (pool->mp_ops) {
1096 pool->mp_ops->destroy(pool);
1100 kfree(pool);
1103 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
1107 if (pool->destroy_cnt)
1114 while (pool->alloc.count) {
1115 netmem = pool->alloc.cache[--pool->alloc.count];
1116 page_pool_return_netmem(pool, netmem);
1120 static void page_pool_scrub(struct page_pool *pool)
1125 page_pool_empty_alloc_cache_once(pool);
1126 if (!pool->destroy_cnt++ && pool->dma_map) {
1127 if (pool->dma_sync) {
1129 pool->dma_sync = false;
1137 if (dma_dev_need_sync(pool->p.dev) &&
1138 !xa_empty(&pool->dma_mapped))
1142 xa_for_each(&pool->dma_mapped, id, ptr)
1143 __page_pool_release_netmem_dma(pool, page_to_netmem((struct page *)ptr));
1149 page_pool_empty_ring(pool);
1152 static int page_pool_release(struct page_pool *pool)
1157 page_pool_scrub(pool);
1158 inflight = page_pool_inflight(pool, true);
1160 in_softirq = page_pool_producer_lock(pool);
1161 page_pool_producer_unlock(pool, in_softirq);
1163 __page_pool_destroy(pool);
1171 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
1175 inflight = page_pool_release(pool);
1186 netdev = READ_ONCE(pool->slow.netdev);
1187 if (time_after_eq(jiffies, pool->defer_warn) &&
1189 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
1191 pr_warn("%s() stalled pool shutdown: id %u, %d inflight %d sec\n",
1192 __func__, pool->user.id, inflight, sec);
1193 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
1197 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
1200 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
1203 refcount_inc(&pool->user_cnt);
1204 pool->disconnect = disconnect;
1205 pool->xdp_mem_id = mem->id;
1209 * page_pool_enable_direct_recycling() - mark page pool as owned by NAPI
1210 * @pool: page pool to modify
1211 * @napi: NAPI instance to associate the page pool with
1213 * Associate a page pool with a NAPI instance for lockless page recycling.
1214 * This is useful when a new page pool has to be added to a NAPI instance
1216 * path "hands over" the page pool to the NAPI instance. In most cases driver
1224 void page_pool_enable_direct_recycling(struct page_pool *pool,
1227 if (READ_ONCE(pool->p.napi) == napi)
1229 WARN_ON(!napi || pool->p.napi);
1232 WRITE_ONCE(pool->p.napi, napi);
1237 void page_pool_disable_direct_recycling(struct page_pool *pool)
1239 /* Disable direct recycling based on pool->cpuid.
1242 WRITE_ONCE(pool->cpuid, -1);
1244 if (!pool->p.napi)
1247 napi_assert_will_not_race(pool->p.napi);
1250 WRITE_ONCE(pool->p.napi, NULL);
1255 void page_pool_destroy(struct page_pool *pool)
1257 if (!pool)
1260 if (!page_pool_put(pool))
1263 page_pool_disable_direct_recycling(pool);
1264 page_pool_free_frag(pool);
1266 if (!page_pool_release(pool))
1269 page_pool_detached(pool);
1270 pool->defer_start = jiffies;
1271 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
1273 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
1274 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
1279 void page_pool_update_nid(struct page_pool *pool, int new_nid)
1283 trace_page_pool_update_nid(pool, new_nid);
1284 pool->p.nid = new_nid;
1286 /* Flush pool alloc cache, as refill will check NUMA node */
1287 while (pool->alloc.count) {
1288 netmem = pool->alloc.cache[--pool->alloc.count];
1289 page_pool_return_netmem(pool, netmem);
1299 /* Associate a niov with a page pool. Should follow with a matching
1302 void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov)
1306 page_pool_set_pp_info(pool, netmem);
1308 pool->pages_state_hold_cnt++;
1309 trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
1312 /* Disassociate a niov from a page pool. Should only be used in the