xref: /linux/net/core/page_pool_user.c (revision 4e887471e8e3a513607495d18333c44f59a82c5a)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/mutex.h>
4 #include <linux/netdevice.h>
5 #include <linux/xarray.h>
6 #include <net/net_debug.h>
7 #include <net/page_pool/types.h>
8 #include <net/page_pool/helpers.h>
9 #include <net/sock.h>
10 
11 #include "page_pool_priv.h"
12 #include "netdev-genl-gen.h"
13 
14 static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
15 /* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
16  * Ordering: inside rtnl_lock
17  */
18 static DEFINE_MUTEX(page_pools_lock);
19 
20 /* Page pools are only reachable from user space (via netlink) if they are
21  * linked to a netdev at creation time. Following page pool "visibility"
22  * states are possible:
23  *  - normal
24  *    - user.list: linked to real netdev, netdev: real netdev
25  *  - orphaned - real netdev has disappeared
26  *    - user.list: linked to lo, netdev: lo
27  *  - invisible - either (a) created without netdev linking, (b) unlisted due
28  *      to error, or (c) the entire namespace which owned this pool disappeared
29  *    - user.list: unhashed, netdev: unknown
30  */
31 
32 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
33 			     const struct genl_info *info);
34 
35 static int
36 netdev_nl_page_pool_get_do(struct genl_info *info, u32 id, pp_nl_fill_cb fill)
37 {
38 	struct page_pool *pool;
39 	struct sk_buff *rsp;
40 	int err;
41 
42 	mutex_lock(&page_pools_lock);
43 	pool = xa_load(&page_pools, id);
44 	if (!pool || hlist_unhashed(&pool->user.list) ||
45 	    !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) {
46 		err = -ENOENT;
47 		goto err_unlock;
48 	}
49 
50 	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
51 	if (!rsp) {
52 		err = -ENOMEM;
53 		goto err_unlock;
54 	}
55 
56 	err = fill(rsp, pool, info);
57 	if (err)
58 		goto err_free_msg;
59 
60 	mutex_unlock(&page_pools_lock);
61 
62 	return genlmsg_reply(rsp, info);
63 
64 err_free_msg:
65 	nlmsg_free(rsp);
66 err_unlock:
67 	mutex_unlock(&page_pools_lock);
68 	return err;
69 }
70 
71 struct page_pool_dump_cb {
72 	unsigned long ifindex;
73 	u32 pp_id;
74 };
75 
76 static int
77 netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
78 			     pp_nl_fill_cb fill)
79 {
80 	struct page_pool_dump_cb *state = (void *)cb->ctx;
81 	const struct genl_info *info = genl_info_dump(cb);
82 	struct net *net = sock_net(skb->sk);
83 	struct net_device *netdev;
84 	struct page_pool *pool;
85 	int err = 0;
86 
87 	rtnl_lock();
88 	mutex_lock(&page_pools_lock);
89 	for_each_netdev_dump(net, netdev, state->ifindex) {
90 		hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
91 			if (state->pp_id && state->pp_id < pool->user.id)
92 				continue;
93 
94 			state->pp_id = pool->user.id;
95 			err = fill(skb, pool, info);
96 			if (err)
97 				break;
98 		}
99 
100 		state->pp_id = 0;
101 	}
102 	mutex_unlock(&page_pools_lock);
103 	rtnl_unlock();
104 
105 	return err;
106 }
107 
108 static int
109 page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool,
110 			const struct genl_info *info)
111 {
112 #ifdef CONFIG_PAGE_POOL_STATS
113 	struct page_pool_stats stats = {};
114 	struct nlattr *nest;
115 	void *hdr;
116 
117 	if (!page_pool_get_stats(pool, &stats))
118 		return 0;
119 
120 	hdr = genlmsg_iput(rsp, info);
121 	if (!hdr)
122 		return -EMSGSIZE;
123 
124 	nest = nla_nest_start(rsp, NETDEV_A_PAGE_POOL_STATS_INFO);
125 
126 	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) ||
127 	    (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
128 	     nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
129 			 pool->slow.netdev->ifindex)))
130 		goto err_cancel_nest;
131 
132 	nla_nest_end(rsp, nest);
133 
134 	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST,
135 			 stats.alloc_stats.fast) ||
136 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
137 			 stats.alloc_stats.slow) ||
138 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
139 			 stats.alloc_stats.slow_high_order) ||
140 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
141 			 stats.alloc_stats.empty) ||
142 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
143 			 stats.alloc_stats.refill) ||
144 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
145 			 stats.alloc_stats.waive) ||
146 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
147 			 stats.recycle_stats.cached) ||
148 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
149 			 stats.recycle_stats.cache_full) ||
150 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
151 			 stats.recycle_stats.ring) ||
152 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
153 			 stats.recycle_stats.ring_full) ||
154 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
155 			 stats.recycle_stats.released_refcnt))
156 		goto err_cancel_msg;
157 
158 	genlmsg_end(rsp, hdr);
159 
160 	return 0;
161 err_cancel_nest:
162 	nla_nest_cancel(rsp, nest);
163 err_cancel_msg:
164 	genlmsg_cancel(rsp, hdr);
165 	return -EMSGSIZE;
166 #else
167 	GENL_SET_ERR_MSG(info, "kernel built without CONFIG_PAGE_POOL_STATS");
168 	return -EOPNOTSUPP;
169 #endif
170 }
171 
172 int netdev_nl_page_pool_stats_get_doit(struct sk_buff *skb,
173 				       struct genl_info *info)
174 {
175 	struct nlattr *tb[ARRAY_SIZE(netdev_page_pool_info_nl_policy)];
176 	struct nlattr *nest;
177 	int err;
178 	u32 id;
179 
180 	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_STATS_INFO))
181 		return -EINVAL;
182 
183 	nest = info->attrs[NETDEV_A_PAGE_POOL_STATS_INFO];
184 	err = nla_parse_nested(tb, ARRAY_SIZE(tb) - 1, nest,
185 			       netdev_page_pool_info_nl_policy,
186 			       info->extack);
187 	if (err)
188 		return err;
189 
190 	if (NL_REQ_ATTR_CHECK(info->extack, nest, tb, NETDEV_A_PAGE_POOL_ID))
191 		return -EINVAL;
192 	if (tb[NETDEV_A_PAGE_POOL_IFINDEX]) {
193 		NL_SET_ERR_MSG_ATTR(info->extack,
194 				    tb[NETDEV_A_PAGE_POOL_IFINDEX],
195 				    "selecting by ifindex not supported");
196 		return -EINVAL;
197 	}
198 
199 	id = nla_get_uint(tb[NETDEV_A_PAGE_POOL_ID]);
200 
201 	return netdev_nl_page_pool_get_do(info, id, page_pool_nl_stats_fill);
202 }
203 
204 int netdev_nl_page_pool_stats_get_dumpit(struct sk_buff *skb,
205 					 struct netlink_callback *cb)
206 {
207 	return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_stats_fill);
208 }
209 
210 static int
211 page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
212 		  const struct genl_info *info)
213 {
214 	size_t inflight, refsz;
215 	void *hdr;
216 
217 	hdr = genlmsg_iput(rsp, info);
218 	if (!hdr)
219 		return -EMSGSIZE;
220 
221 	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id))
222 		goto err_cancel;
223 
224 	if (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
225 	    nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
226 			pool->slow.netdev->ifindex))
227 		goto err_cancel;
228 	if (pool->user.napi_id &&
229 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id))
230 		goto err_cancel;
231 
232 	inflight = page_pool_inflight(pool, false);
233 	refsz =	PAGE_SIZE << pool->p.order;
234 	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT, inflight) ||
235 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
236 			 inflight * refsz))
237 		goto err_cancel;
238 	if (pool->user.detach_time &&
239 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_DETACH_TIME,
240 			 pool->user.detach_time))
241 		goto err_cancel;
242 
243 	genlmsg_end(rsp, hdr);
244 
245 	return 0;
246 err_cancel:
247 	genlmsg_cancel(rsp, hdr);
248 	return -EMSGSIZE;
249 }
250 
251 static void netdev_nl_page_pool_event(const struct page_pool *pool, u32 cmd)
252 {
253 	struct genl_info info;
254 	struct sk_buff *ntf;
255 	struct net *net;
256 
257 	lockdep_assert_held(&page_pools_lock);
258 
259 	/* 'invisible' page pools don't matter */
260 	if (hlist_unhashed(&pool->user.list))
261 		return;
262 	net = dev_net(pool->slow.netdev);
263 
264 	if (!genl_has_listeners(&netdev_nl_family, net, NETDEV_NLGRP_PAGE_POOL))
265 		return;
266 
267 	genl_info_init_ntf(&info, &netdev_nl_family, cmd);
268 
269 	ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
270 	if (!ntf)
271 		return;
272 
273 	if (page_pool_nl_fill(ntf, pool, &info)) {
274 		nlmsg_free(ntf);
275 		return;
276 	}
277 
278 	genlmsg_multicast_netns(&netdev_nl_family, net, ntf,
279 				0, NETDEV_NLGRP_PAGE_POOL, GFP_KERNEL);
280 }
281 
282 int netdev_nl_page_pool_get_doit(struct sk_buff *skb, struct genl_info *info)
283 {
284 	u32 id;
285 
286 	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_ID))
287 		return -EINVAL;
288 
289 	id = nla_get_uint(info->attrs[NETDEV_A_PAGE_POOL_ID]);
290 
291 	return netdev_nl_page_pool_get_do(info, id, page_pool_nl_fill);
292 }
293 
294 int netdev_nl_page_pool_get_dumpit(struct sk_buff *skb,
295 				   struct netlink_callback *cb)
296 {
297 	return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_fill);
298 }
299 
300 int page_pool_list(struct page_pool *pool)
301 {
302 	static u32 id_alloc_next;
303 	int err;
304 
305 	mutex_lock(&page_pools_lock);
306 	err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b,
307 			      &id_alloc_next, GFP_KERNEL);
308 	if (err < 0)
309 		goto err_unlock;
310 
311 	INIT_HLIST_NODE(&pool->user.list);
312 	if (pool->slow.netdev) {
313 		hlist_add_head(&pool->user.list,
314 			       &pool->slow.netdev->page_pools);
315 		pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0;
316 
317 		netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF);
318 	}
319 
320 	mutex_unlock(&page_pools_lock);
321 	return 0;
322 
323 err_unlock:
324 	mutex_unlock(&page_pools_lock);
325 	return err;
326 }
327 
328 void page_pool_detached(struct page_pool *pool)
329 {
330 	mutex_lock(&page_pools_lock);
331 	pool->user.detach_time = ktime_get_boottime_seconds();
332 	netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
333 	mutex_unlock(&page_pools_lock);
334 }
335 
336 void page_pool_unlist(struct page_pool *pool)
337 {
338 	mutex_lock(&page_pools_lock);
339 	netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_DEL_NTF);
340 	xa_erase(&page_pools, pool->user.id);
341 	if (!hlist_unhashed(&pool->user.list))
342 		hlist_del(&pool->user.list);
343 	mutex_unlock(&page_pools_lock);
344 }
345 
346 static void page_pool_unreg_netdev_wipe(struct net_device *netdev)
347 {
348 	struct page_pool *pool;
349 	struct hlist_node *n;
350 
351 	mutex_lock(&page_pools_lock);
352 	hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) {
353 		hlist_del_init(&pool->user.list);
354 		pool->slow.netdev = NET_PTR_POISON;
355 	}
356 	mutex_unlock(&page_pools_lock);
357 }
358 
359 static void page_pool_unreg_netdev(struct net_device *netdev)
360 {
361 	struct page_pool *pool, *last;
362 	struct net_device *lo;
363 
364 	lo = dev_net(netdev)->loopback_dev;
365 
366 	mutex_lock(&page_pools_lock);
367 	last = NULL;
368 	hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
369 		pool->slow.netdev = lo;
370 		netdev_nl_page_pool_event(pool,
371 					  NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
372 		last = pool;
373 	}
374 	if (last)
375 		hlist_splice_init(&netdev->page_pools, &last->user.list,
376 				  &lo->page_pools);
377 	mutex_unlock(&page_pools_lock);
378 }
379 
380 static int
381 page_pool_netdevice_event(struct notifier_block *nb,
382 			  unsigned long event, void *ptr)
383 {
384 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
385 
386 	if (event != NETDEV_UNREGISTER)
387 		return NOTIFY_DONE;
388 
389 	if (hlist_empty(&netdev->page_pools))
390 		return NOTIFY_OK;
391 
392 	if (netdev->ifindex != LOOPBACK_IFINDEX)
393 		page_pool_unreg_netdev(netdev);
394 	else
395 		page_pool_unreg_netdev_wipe(netdev);
396 	return NOTIFY_OK;
397 }
398 
399 static struct notifier_block page_pool_netdevice_nb = {
400 	.notifier_call = page_pool_netdevice_event,
401 };
402 
403 static int __init page_pool_user_init(void)
404 {
405 	return register_netdevice_notifier(&page_pool_netdevice_nb);
406 }
407 
408 subsys_initcall(page_pool_user_init);
409