Lines Matching refs:pool

43 #define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)  argument
45 #define recycle_stat_inc(pool, __stat) \ argument
47 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
51 #define recycle_stat_add(pool, __stat, val) \ argument
53 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
82 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument
91 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats()
92 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats()
93 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; in page_pool_get_stats()
94 stats->alloc_stats.empty += pool->alloc_stats.empty; in page_pool_get_stats()
95 stats->alloc_stats.refill += pool->alloc_stats.refill; in page_pool_get_stats()
96 stats->alloc_stats.waive += pool->alloc_stats.waive; in page_pool_get_stats()
100 per_cpu_ptr(pool->recycle_stats, cpu); in page_pool_get_stats()
153 #define alloc_stat_inc(pool, __stat) argument
154 #define recycle_stat_inc(pool, __stat) argument
155 #define recycle_stat_add(pool, __stat, val) argument
158 static bool page_pool_producer_lock(struct page_pool *pool) in page_pool_producer_lock() argument
159 __acquires(&pool->ring.producer_lock) in page_pool_producer_lock()
164 spin_lock(&pool->ring.producer_lock); in page_pool_producer_lock()
166 spin_lock_bh(&pool->ring.producer_lock); in page_pool_producer_lock()
171 static void page_pool_producer_unlock(struct page_pool *pool, in page_pool_producer_unlock() argument
173 __releases(&pool->ring.producer_lock) in page_pool_producer_unlock()
176 spin_unlock(&pool->ring.producer_lock); in page_pool_producer_unlock()
178 spin_unlock_bh(&pool->ring.producer_lock); in page_pool_producer_unlock()
190 static int page_pool_init(struct page_pool *pool, in page_pool_init() argument
200 memcpy(&pool->p, &params->fast, sizeof(pool->p)); in page_pool_init()
201 memcpy(&pool->slow, &params->slow, sizeof(pool->slow)); in page_pool_init()
203 pool->cpuid = cpuid; in page_pool_init()
206 if (pool->slow.flags & ~PP_FLAG_ALL) in page_pool_init()
209 if (pool->p.pool_size) in page_pool_init()
210 ring_qsize = pool->p.pool_size; in page_pool_init()
220 if (pool->slow.flags & PP_FLAG_DMA_MAP) { in page_pool_init()
221 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && in page_pool_init()
222 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) in page_pool_init()
225 pool->dma_map = true; in page_pool_init()
228 if (pool->slow.flags & PP_FLAG_DMA_SYNC_DEV) { in page_pool_init()
232 if (!(pool->slow.flags & PP_FLAG_DMA_MAP)) in page_pool_init()
235 if (!pool->p.max_len) in page_pool_init()
238 pool->dma_sync = true; in page_pool_init()
245 pool->has_init_callback = !!pool->slow.init_callback; in page_pool_init()
248 if (!(pool->slow.flags & PP_FLAG_SYSTEM_POOL)) { in page_pool_init()
249 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); in page_pool_init()
250 if (!pool->recycle_stats) in page_pool_init()
257 pool->recycle_stats = &pp_system_recycle_stats; in page_pool_init()
258 pool->system = true; in page_pool_init()
262 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) { in page_pool_init()
264 if (!pool->system) in page_pool_init()
265 free_percpu(pool->recycle_stats); in page_pool_init()
270 atomic_set(&pool->pages_state_release_cnt, 0); in page_pool_init()
273 refcount_set(&pool->user_cnt, 1); in page_pool_init()
275 if (pool->dma_map) in page_pool_init()
276 get_device(pool->p.dev); in page_pool_init()
278 if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) { in page_pool_init()
284 rxq = __netif_get_rx_queue(pool->slow.netdev, in page_pool_init()
285 pool->slow.queue_idx); in page_pool_init()
286 pool->mp_priv = rxq->mp_params.mp_priv; in page_pool_init()
289 if (pool->mp_priv) { in page_pool_init()
290 err = mp_dmabuf_devmem_init(pool); in page_pool_init()
303 ptr_ring_cleanup(&pool->ring, NULL); in page_pool_init()
305 if (!pool->system) in page_pool_init()
306 free_percpu(pool->recycle_stats); in page_pool_init()
311 static void page_pool_uninit(struct page_pool *pool) in page_pool_uninit() argument
313 ptr_ring_cleanup(&pool->ring, NULL); in page_pool_uninit()
315 if (pool->dma_map) in page_pool_uninit()
316 put_device(pool->p.dev); in page_pool_uninit()
319 if (!pool->system) in page_pool_uninit()
320 free_percpu(pool->recycle_stats); in page_pool_uninit()
332 struct page_pool *pool; in page_pool_create_percpu() local
335 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); in page_pool_create_percpu()
336 if (!pool) in page_pool_create_percpu()
339 err = page_pool_init(pool, params, cpuid); in page_pool_create_percpu()
343 err = page_pool_list(pool); in page_pool_create_percpu()
347 return pool; in page_pool_create_percpu()
350 page_pool_uninit(pool); in page_pool_create_percpu()
353 kfree(pool); in page_pool_create_percpu()
368 static void page_pool_return_page(struct page_pool *pool, netmem_ref netmem);
370 static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache() argument
372 struct ptr_ring *r = &pool->ring; in page_pool_refill_alloc_cache()
378 alloc_stat_inc(pool, empty); in page_pool_refill_alloc_cache()
386 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid; in page_pool_refill_alloc_cache()
399 pool->alloc.cache[pool->alloc.count++] = netmem; in page_pool_refill_alloc_cache()
406 page_pool_return_page(pool, netmem); in page_pool_refill_alloc_cache()
407 alloc_stat_inc(pool, waive); in page_pool_refill_alloc_cache()
411 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); in page_pool_refill_alloc_cache()
414 if (likely(pool->alloc.count > 0)) { in page_pool_refill_alloc_cache()
415 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
416 alloc_stat_inc(pool, refill); in page_pool_refill_alloc_cache()
423 static netmem_ref __page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached() argument
428 if (likely(pool->alloc.count)) { in __page_pool_get_cached()
430 netmem = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
431 alloc_stat_inc(pool, fast); in __page_pool_get_cached()
433 netmem = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
439 static void __page_pool_dma_sync_for_device(const struct page_pool *pool, in __page_pool_dma_sync_for_device() argument
446 dma_sync_size = min(dma_sync_size, pool->p.max_len); in __page_pool_dma_sync_for_device()
447 __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset, in __page_pool_dma_sync_for_device()
448 dma_sync_size, pool->p.dma_dir); in __page_pool_dma_sync_for_device()
453 page_pool_dma_sync_for_device(const struct page_pool *pool, in page_pool_dma_sync_for_device() argument
457 if (pool->dma_sync && dma_dev_need_sync(pool->p.dev)) in page_pool_dma_sync_for_device()
458 __page_pool_dma_sync_for_device(pool, netmem, dma_sync_size); in page_pool_dma_sync_for_device()
461 static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem) in page_pool_dma_map() argument
470 dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0, in page_pool_dma_map()
471 (PAGE_SIZE << pool->p.order), pool->p.dma_dir, in page_pool_dma_map()
474 if (dma_mapping_error(pool->p.dev, dma)) in page_pool_dma_map()
480 page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len); in page_pool_dma_map()
486 dma_unmap_page_attrs(pool->p.dev, dma, in page_pool_dma_map()
487 PAGE_SIZE << pool->p.order, pool->p.dma_dir, in page_pool_dma_map()
492 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, in __page_pool_alloc_page_order() argument
498 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_page_order()
502 if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page)))) { in __page_pool_alloc_page_order()
507 alloc_stat_inc(pool, slow_high_order); in __page_pool_alloc_page_order()
508 page_pool_set_pp_info(pool, page_to_netmem(page)); in __page_pool_alloc_page_order()
511 pool->pages_state_hold_cnt++; in __page_pool_alloc_page_order()
512 trace_page_pool_state_hold(pool, page_to_netmem(page), in __page_pool_alloc_page_order()
513 pool->pages_state_hold_cnt); in __page_pool_alloc_page_order()
518 static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool, in __page_pool_alloc_pages_slow() argument
522 unsigned int pp_order = pool->p.order; in __page_pool_alloc_pages_slow()
523 bool dma_map = pool->dma_map; in __page_pool_alloc_pages_slow()
529 return page_to_netmem(__page_pool_alloc_page_order(pool, gfp)); in __page_pool_alloc_pages_slow()
532 if (unlikely(pool->alloc.count > 0)) in __page_pool_alloc_pages_slow()
533 return pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
536 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); in __page_pool_alloc_pages_slow()
539 pool->p.nid, bulk, in __page_pool_alloc_pages_slow()
540 (struct page **)pool->alloc.cache); in __page_pool_alloc_pages_slow()
548 netmem = pool->alloc.cache[i]; in __page_pool_alloc_pages_slow()
549 if (dma_map && unlikely(!page_pool_dma_map(pool, netmem))) { in __page_pool_alloc_pages_slow()
554 page_pool_set_pp_info(pool, netmem); in __page_pool_alloc_pages_slow()
555 pool->alloc.cache[pool->alloc.count++] = netmem; in __page_pool_alloc_pages_slow()
557 pool->pages_state_hold_cnt++; in __page_pool_alloc_pages_slow()
558 trace_page_pool_state_hold(pool, netmem, in __page_pool_alloc_pages_slow()
559 pool->pages_state_hold_cnt); in __page_pool_alloc_pages_slow()
563 if (likely(pool->alloc.count > 0)) { in __page_pool_alloc_pages_slow()
564 netmem = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
565 alloc_stat_inc(pool, slow); in __page_pool_alloc_pages_slow()
577 netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_netmem() argument
582 netmem = __page_pool_get_cached(pool); in page_pool_alloc_netmem()
587 if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv) in page_pool_alloc_netmem()
588 netmem = mp_dmabuf_devmem_alloc_netmems(pool, gfp); in page_pool_alloc_netmem()
590 netmem = __page_pool_alloc_pages_slow(pool, gfp); in page_pool_alloc_netmem()
595 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages() argument
597 return netmem_to_page(page_pool_alloc_netmem(pool, gfp)); in page_pool_alloc_pages()
607 s32 page_pool_inflight(const struct page_pool *pool, bool strict) in page_pool_inflight() argument
609 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); in page_pool_inflight()
610 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); in page_pool_inflight()
616 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); in page_pool_inflight()
626 void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem) in page_pool_set_pp_info() argument
628 netmem_set_pp(netmem, pool); in page_pool_set_pp_info()
638 if (pool->has_init_callback) in page_pool_set_pp_info()
639 pool->slow.init_callback(netmem, pool->slow.init_arg); in page_pool_set_pp_info()
648 static __always_inline void __page_pool_release_page_dma(struct page_pool *pool, in __page_pool_release_page_dma() argument
653 if (!pool->dma_map) in __page_pool_release_page_dma()
662 dma_unmap_page_attrs(pool->p.dev, dma, in __page_pool_release_page_dma()
663 PAGE_SIZE << pool->p.order, pool->p.dma_dir, in __page_pool_release_page_dma()
673 void page_pool_return_page(struct page_pool *pool, netmem_ref netmem) in page_pool_return_page() argument
679 if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv) in page_pool_return_page()
680 put = mp_dmabuf_devmem_release_page(pool, netmem); in page_pool_return_page()
682 __page_pool_release_page_dma(pool, netmem); in page_pool_return_page()
687 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); in page_pool_return_page()
688 trace_page_pool_state_release(pool, netmem, count); in page_pool_return_page()
700 static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem) in page_pool_recycle_in_ring() argument
705 ret = ptr_ring_produce(&pool->ring, (__force void *)netmem); in page_pool_recycle_in_ring()
707 ret = ptr_ring_produce_bh(&pool->ring, (__force void *)netmem); in page_pool_recycle_in_ring()
710 recycle_stat_inc(pool, ring); in page_pool_recycle_in_ring()
723 struct page_pool *pool) in page_pool_recycle_in_cache() argument
725 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { in page_pool_recycle_in_cache()
726 recycle_stat_inc(pool, cache_full); in page_pool_recycle_in_cache()
731 pool->alloc.cache[pool->alloc.count++] = netmem; in page_pool_recycle_in_cache()
732 recycle_stat_inc(pool, cached); in page_pool_recycle_in_cache()
750 __page_pool_put_page(struct page_pool *pool, netmem_ref netmem, in __page_pool_put_page() argument
767 page_pool_dma_sync_for_device(pool, netmem, dma_sync_size); in __page_pool_put_page()
769 if (allow_direct && page_pool_recycle_in_cache(netmem, pool)) in __page_pool_put_page()
789 recycle_stat_inc(pool, released_refcnt); in __page_pool_put_page()
790 page_pool_return_page(pool, netmem); in __page_pool_put_page()
795 static bool page_pool_napi_local(const struct page_pool *pool) in page_pool_napi_local() argument
810 if (READ_ONCE(pool->cpuid) == cpuid) in page_pool_napi_local()
813 napi = READ_ONCE(pool->p.napi); in page_pool_napi_local()
818 void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem, in page_pool_put_unrefed_netmem() argument
822 allow_direct = page_pool_napi_local(pool); in page_pool_put_unrefed_netmem()
825 __page_pool_put_page(pool, netmem, dma_sync_size, allow_direct); in page_pool_put_unrefed_netmem()
826 if (netmem && !page_pool_recycle_in_ring(pool, netmem)) { in page_pool_put_unrefed_netmem()
828 recycle_stat_inc(pool, ring_full); in page_pool_put_unrefed_netmem()
829 page_pool_return_page(pool, netmem); in page_pool_put_unrefed_netmem()
834 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, in page_pool_put_unrefed_page() argument
837 page_pool_put_unrefed_netmem(pool, page_to_netmem(page), dma_sync_size, in page_pool_put_unrefed_page()
857 void page_pool_put_page_bulk(struct page_pool *pool, void **data, in page_pool_put_page_bulk() argument
864 allow_direct = page_pool_napi_local(pool); in page_pool_put_page_bulk()
873 netmem = __page_pool_put_page(pool, netmem, -1, allow_direct); in page_pool_put_page_bulk()
883 in_softirq = page_pool_producer_lock(pool); in page_pool_put_page_bulk()
885 if (__ptr_ring_produce(&pool->ring, data[i])) { in page_pool_put_page_bulk()
887 recycle_stat_inc(pool, ring_full); in page_pool_put_page_bulk()
891 recycle_stat_add(pool, ring, i); in page_pool_put_page_bulk()
892 page_pool_producer_unlock(pool, in_softirq); in page_pool_put_page_bulk()
902 page_pool_return_page(pool, (__force netmem_ref)data[i]); in page_pool_put_page_bulk()
906 static netmem_ref page_pool_drain_frag(struct page_pool *pool, in page_pool_drain_frag() argument
909 long drain_count = BIAS_MAX - pool->frag_users; in page_pool_drain_frag()
916 page_pool_dma_sync_for_device(pool, netmem, -1); in page_pool_drain_frag()
920 page_pool_return_page(pool, netmem); in page_pool_drain_frag()
924 static void page_pool_free_frag(struct page_pool *pool) in page_pool_free_frag() argument
926 long drain_count = BIAS_MAX - pool->frag_users; in page_pool_free_frag()
927 netmem_ref netmem = pool->frag_page; in page_pool_free_frag()
929 pool->frag_page = 0; in page_pool_free_frag()
934 page_pool_return_page(pool, netmem); in page_pool_free_frag()
937 netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool, in page_pool_alloc_frag_netmem() argument
941 unsigned int max_size = PAGE_SIZE << pool->p.order; in page_pool_alloc_frag_netmem()
942 netmem_ref netmem = pool->frag_page; in page_pool_alloc_frag_netmem()
948 *offset = pool->frag_offset; in page_pool_alloc_frag_netmem()
951 netmem = page_pool_drain_frag(pool, netmem); in page_pool_alloc_frag_netmem()
953 recycle_stat_inc(pool, cached); in page_pool_alloc_frag_netmem()
954 alloc_stat_inc(pool, fast); in page_pool_alloc_frag_netmem()
960 netmem = page_pool_alloc_netmem(pool, gfp); in page_pool_alloc_frag_netmem()
962 pool->frag_page = 0; in page_pool_alloc_frag_netmem()
966 pool->frag_page = netmem; in page_pool_alloc_frag_netmem()
969 pool->frag_users = 1; in page_pool_alloc_frag_netmem()
971 pool->frag_offset = size; in page_pool_alloc_frag_netmem()
976 pool->frag_users++; in page_pool_alloc_frag_netmem()
977 pool->frag_offset = *offset + size; in page_pool_alloc_frag_netmem()
982 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, in page_pool_alloc_frag() argument
985 return netmem_to_page(page_pool_alloc_frag_netmem(pool, offset, size, in page_pool_alloc_frag()
990 static void page_pool_empty_ring(struct page_pool *pool) in page_pool_empty_ring() argument
995 while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
1001 page_pool_return_page(pool, netmem); in page_pool_empty_ring()
1005 static void __page_pool_destroy(struct page_pool *pool) in __page_pool_destroy() argument
1007 if (pool->disconnect) in __page_pool_destroy()
1008 pool->disconnect(pool); in __page_pool_destroy()
1010 page_pool_unlist(pool); in __page_pool_destroy()
1011 page_pool_uninit(pool); in __page_pool_destroy()
1013 if (pool->mp_priv) { in __page_pool_destroy()
1014 mp_dmabuf_devmem_destroy(pool); in __page_pool_destroy()
1018 kfree(pool); in __page_pool_destroy()
1021 static void page_pool_empty_alloc_cache_once(struct page_pool *pool) in page_pool_empty_alloc_cache_once() argument
1025 if (pool->destroy_cnt) in page_pool_empty_alloc_cache_once()
1032 while (pool->alloc.count) { in page_pool_empty_alloc_cache_once()
1033 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
1034 page_pool_return_page(pool, netmem); in page_pool_empty_alloc_cache_once()
1038 static void page_pool_scrub(struct page_pool *pool) in page_pool_scrub() argument
1040 page_pool_empty_alloc_cache_once(pool); in page_pool_scrub()
1041 pool->destroy_cnt++; in page_pool_scrub()
1046 page_pool_empty_ring(pool); in page_pool_scrub()
1049 static int page_pool_release(struct page_pool *pool) in page_pool_release() argument
1053 page_pool_scrub(pool); in page_pool_release()
1054 inflight = page_pool_inflight(pool, true); in page_pool_release()
1056 __page_pool_destroy(pool); in page_pool_release()
1064 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); in page_pool_release_retry() local
1068 inflight = page_pool_release(pool); in page_pool_release_retry()
1073 netdev = READ_ONCE(pool->slow.netdev); in page_pool_release_retry()
1074 if (time_after_eq(jiffies, pool->defer_warn) && in page_pool_release_retry()
1076 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; in page_pool_release_retry()
1079 __func__, pool->user.id, inflight, sec); in page_pool_release_retry()
1080 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; in page_pool_release_retry()
1084 schedule_delayed_work(&pool->release_dw, DEFER_TIME); in page_pool_release_retry()
1087 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), in page_pool_use_xdp_mem() argument
1090 refcount_inc(&pool->user_cnt); in page_pool_use_xdp_mem()
1091 pool->disconnect = disconnect; in page_pool_use_xdp_mem()
1092 pool->xdp_mem_id = mem->id; in page_pool_use_xdp_mem()
1095 void page_pool_disable_direct_recycling(struct page_pool *pool) in page_pool_disable_direct_recycling() argument
1100 WRITE_ONCE(pool->cpuid, -1); in page_pool_disable_direct_recycling()
1102 if (!pool->p.napi) in page_pool_disable_direct_recycling()
1108 WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state)); in page_pool_disable_direct_recycling()
1109 WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1); in page_pool_disable_direct_recycling()
1111 WRITE_ONCE(pool->p.napi, NULL); in page_pool_disable_direct_recycling()
1115 void page_pool_destroy(struct page_pool *pool) in page_pool_destroy() argument
1117 if (!pool) in page_pool_destroy()
1120 if (!page_pool_put(pool)) in page_pool_destroy()
1123 page_pool_disable_direct_recycling(pool); in page_pool_destroy()
1124 page_pool_free_frag(pool); in page_pool_destroy()
1126 if (!page_pool_release(pool)) in page_pool_destroy()
1129 page_pool_detached(pool); in page_pool_destroy()
1130 pool->defer_start = jiffies; in page_pool_destroy()
1131 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; in page_pool_destroy()
1133 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); in page_pool_destroy()
1134 schedule_delayed_work(&pool->release_dw, DEFER_TIME); in page_pool_destroy()
1139 void page_pool_update_nid(struct page_pool *pool, int new_nid) in page_pool_update_nid() argument
1143 trace_page_pool_update_nid(pool, new_nid); in page_pool_update_nid()
1144 pool->p.nid = new_nid; in page_pool_update_nid()
1147 while (pool->alloc.count) { in page_pool_update_nid()
1148 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
1149 page_pool_return_page(pool, netmem); in page_pool_update_nid()