157afb483SPavel Begunkov /* SPDX-License-Identifier: GPL-2.0 */
257afb483SPavel Begunkov #ifndef _NET_PAGE_POOL_MEMORY_PROVIDER_H
357afb483SPavel Begunkov #define _NET_PAGE_POOL_MEMORY_PROVIDER_H
457afb483SPavel Begunkov
557afb483SPavel Begunkov #include <net/netmem.h>
657afb483SPavel Begunkov #include <net/page_pool/types.h>
757afb483SPavel Begunkov
82508a46fSPavel Begunkov struct netdev_rx_queue;
9*ec304b70SJakub Kicinski struct netlink_ext_ack;
102508a46fSPavel Begunkov struct sk_buff;
112508a46fSPavel Begunkov
1257afb483SPavel Begunkov struct memory_provider_ops {
1357afb483SPavel Begunkov netmem_ref (*alloc_netmems)(struct page_pool *pool, gfp_t gfp);
1457afb483SPavel Begunkov bool (*release_netmem)(struct page_pool *pool, netmem_ref netmem);
1557afb483SPavel Begunkov int (*init)(struct page_pool *pool);
1657afb483SPavel Begunkov void (*destroy)(struct page_pool *pool);
172508a46fSPavel Begunkov int (*nl_fill)(void *mp_priv, struct sk_buff *rsp,
182508a46fSPavel Begunkov struct netdev_rx_queue *rxq);
19f8350a43SPavel Begunkov void (*uninstall)(void *mp_priv, struct netdev_rx_queue *rxq);
2057afb483SPavel Begunkov };
2157afb483SPavel Begunkov
2256102c01SPavel Begunkov bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr);
2356102c01SPavel Begunkov void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
2456102c01SPavel Begunkov void net_mp_niov_clear_page_pool(struct net_iov *niov);
2556102c01SPavel Begunkov
266e18ed92SDavid Wei int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
276e18ed92SDavid Wei struct pp_memory_provider_params *p);
28*ec304b70SJakub Kicinski int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
29*ec304b70SJakub Kicinski const struct pp_memory_provider_params *p,
30*ec304b70SJakub Kicinski struct netlink_ext_ack *extack);
316e18ed92SDavid Wei void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
326e18ed92SDavid Wei struct pp_memory_provider_params *old_p);
33*ec304b70SJakub Kicinski void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
34*ec304b70SJakub Kicinski const struct pp_memory_provider_params *old_p);
356e18ed92SDavid Wei
3656102c01SPavel Begunkov /**
3756102c01SPavel Begunkov * net_mp_netmem_place_in_cache() - give a netmem to a page pool
3856102c01SPavel Begunkov * @pool: the page pool to place the netmem into
3956102c01SPavel Begunkov * @netmem: netmem to give
4056102c01SPavel Begunkov *
4156102c01SPavel Begunkov * Push an accounted netmem into the page pool's allocation cache. The caller
4256102c01SPavel Begunkov * must ensure that there is space in the cache. It should only be called off
4356102c01SPavel Begunkov * the mp_ops->alloc_netmems() path.
4456102c01SPavel Begunkov */
net_mp_netmem_place_in_cache(struct page_pool * pool,netmem_ref netmem)4556102c01SPavel Begunkov static inline void net_mp_netmem_place_in_cache(struct page_pool *pool,
4656102c01SPavel Begunkov netmem_ref netmem)
4756102c01SPavel Begunkov {
4856102c01SPavel Begunkov pool->alloc.cache[pool->alloc.count++] = netmem;
4956102c01SPavel Begunkov }
5056102c01SPavel Begunkov
5157afb483SPavel Begunkov #endif
52