Lines Matching full:pool

17 /* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
23 * linked to a netdev at creation time. Following page pool "visibility"
30 * to error, or (c) the entire namespace which owned this pool disappeared
34 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
40 struct page_pool *pool; in netdev_nl_page_pool_get_do() local
45 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do()
46 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do()
47 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { in netdev_nl_page_pool_get_do()
58 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do()
86 struct page_pool *pool; in netdev_nl_page_pool_get_dump() local
92 hlist_for_each_entry(pool, &netdev->page_pools, user.list) { in netdev_nl_page_pool_get_dump()
93 if (state->pp_id && state->pp_id < pool->user.id) in netdev_nl_page_pool_get_dump()
96 state->pp_id = pool->user.id; in netdev_nl_page_pool_get_dump()
97 err = fill(skb, pool, info); in netdev_nl_page_pool_get_dump()
112 page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool, in page_pool_nl_stats_fill() argument
120 if (!page_pool_get_stats(pool, &stats)) in page_pool_nl_stats_fill()
129 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) || in page_pool_nl_stats_fill()
130 (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX && in page_pool_nl_stats_fill()
132 pool->slow.netdev->ifindex))) in page_pool_nl_stats_fill()
214 page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool, in page_pool_nl_fill() argument
217 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in page_pool_nl_fill()
225 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id)) in page_pool_nl_fill()
228 if (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX && in page_pool_nl_fill()
230 pool->slow.netdev->ifindex)) in page_pool_nl_fill()
232 if (pool->user.napi_id && in page_pool_nl_fill()
233 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id)) in page_pool_nl_fill()
236 inflight = page_pool_inflight(pool, false); in page_pool_nl_fill()
237 refsz = PAGE_SIZE << pool->p.order; in page_pool_nl_fill()
242 if (pool->user.detach_time && in page_pool_nl_fill()
244 pool->user.detach_time)) in page_pool_nl_fill()
258 static void netdev_nl_page_pool_event(const struct page_pool *pool, u32 cmd) in netdev_nl_page_pool_event() argument
267 if (hlist_unhashed(&pool->user.list)) in netdev_nl_page_pool_event()
269 net = dev_net(pool->slow.netdev); in netdev_nl_page_pool_event()
280 if (page_pool_nl_fill(ntf, pool, &info)) { in netdev_nl_page_pool_event()
307 int page_pool_list(struct page_pool *pool) in page_pool_list() argument
313 err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b, in page_pool_list()
318 INIT_HLIST_NODE(&pool->user.list); in page_pool_list()
319 if (pool->slow.netdev) { in page_pool_list()
320 hlist_add_head(&pool->user.list, in page_pool_list()
321 &pool->slow.netdev->page_pools); in page_pool_list()
322 pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0; in page_pool_list()
324 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF); in page_pool_list()
335 void page_pool_detached(struct page_pool *pool) in page_pool_detached() argument
338 pool->user.detach_time = ktime_get_boottime_seconds(); in page_pool_detached()
339 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF); in page_pool_detached()
343 void page_pool_unlist(struct page_pool *pool) in page_pool_unlist() argument
346 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_DEL_NTF); in page_pool_unlist()
347 xa_erase(&page_pools, pool->user.id); in page_pool_unlist()
348 if (!hlist_unhashed(&pool->user.list)) in page_pool_unlist()
349 hlist_del(&pool->user.list); in page_pool_unlist()
357 struct page_pool *pool; in page_pool_check_memory_provider() local
364 hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) { in page_pool_check_memory_provider()
365 if (pool->mp_priv != binding) in page_pool_check_memory_provider()
368 if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) { in page_pool_check_memory_provider()
379 struct page_pool *pool; in page_pool_unreg_netdev_wipe() local
383 hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) { in page_pool_unreg_netdev_wipe()
384 hlist_del_init(&pool->user.list); in page_pool_unreg_netdev_wipe()
385 pool->slow.netdev = NET_PTR_POISON; in page_pool_unreg_netdev_wipe()
392 struct page_pool *pool, *last; in page_pool_unreg_netdev() local
399 hlist_for_each_entry(pool, &netdev->page_pools, user.list) { in page_pool_unreg_netdev()
400 pool->slow.netdev = lo; in page_pool_unreg_netdev()
401 netdev_nl_page_pool_event(pool, in page_pool_unreg_netdev()
403 last = pool; in page_pool_unreg_netdev()