xref: /linux/net/shaper/shaper.c (revision 4b623f9f0f59652ea71fcb27d60b4c3b65126dbb)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/bits.h>
4 #include <linux/bitfield.h>
5 #include <linux/idr.h>
6 #include <linux/kernel.h>
7 #include <linux/netdevice.h>
8 #include <linux/netlink.h>
9 #include <linux/skbuff.h>
10 #include <linux/xarray.h>
11 #include <net/devlink.h>
12 #include <net/net_shaper.h>
13 
14 #include "shaper_nl_gen.h"
15 
16 #include "../core/dev.h"
17 
18 #define NET_SHAPER_SCOPE_SHIFT	26
19 #define NET_SHAPER_ID_MASK	GENMASK(NET_SHAPER_SCOPE_SHIFT - 1, 0)
20 #define NET_SHAPER_SCOPE_MASK	GENMASK(31, NET_SHAPER_SCOPE_SHIFT)
21 
22 #define NET_SHAPER_ID_UNSPEC NET_SHAPER_ID_MASK
23 
24 struct net_shaper_hierarchy {
25 	struct xarray shapers;
26 };
27 
28 struct net_shaper_nl_ctx {
29 	struct net_shaper_binding binding;
30 	netdevice_tracker dev_tracker;
31 	unsigned long start_index;
32 };
33 
34 static struct net_shaper_binding *net_shaper_binding_from_ctx(void *ctx)
35 {
36 	return &((struct net_shaper_nl_ctx *)ctx)->binding;
37 }
38 
39 static struct net_shaper_hierarchy *
40 net_shaper_hierarchy(struct net_shaper_binding *binding)
41 {
42 	/* Pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. */
43 	if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
44 		return READ_ONCE(binding->netdev->net_shaper_hierarchy);
45 
46 	/* No other type supported yet. */
47 	return NULL;
48 }
49 
50 static int net_shaper_fill_binding(struct sk_buff *msg,
51 				   const struct net_shaper_binding *binding,
52 				   u32 type)
53 {
54 	/* Should never happen, as currently only NETDEV is supported. */
55 	if (WARN_ON_ONCE(binding->type != NET_SHAPER_BINDING_TYPE_NETDEV))
56 		return -EINVAL;
57 
58 	if (nla_put_u32(msg, type, binding->netdev->ifindex))
59 		return -EMSGSIZE;
60 
61 	return 0;
62 }
63 
64 static int net_shaper_fill_handle(struct sk_buff *msg,
65 				  const struct net_shaper_handle *handle,
66 				  u32 type)
67 {
68 	struct nlattr *handle_attr;
69 
70 	if (handle->scope == NET_SHAPER_SCOPE_UNSPEC)
71 		return 0;
72 
73 	handle_attr = nla_nest_start(msg, type);
74 	if (!handle_attr)
75 		return -EMSGSIZE;
76 
77 	if (nla_put_u32(msg, NET_SHAPER_A_HANDLE_SCOPE, handle->scope) ||
78 	    (handle->scope >= NET_SHAPER_SCOPE_QUEUE &&
79 	     nla_put_u32(msg, NET_SHAPER_A_HANDLE_ID, handle->id)))
80 		goto handle_nest_cancel;
81 
82 	nla_nest_end(msg, handle_attr);
83 	return 0;
84 
85 handle_nest_cancel:
86 	nla_nest_cancel(msg, handle_attr);
87 	return -EMSGSIZE;
88 }
89 
90 static int
91 net_shaper_fill_one(struct sk_buff *msg,
92 		    const struct net_shaper_binding *binding,
93 		    const struct net_shaper *shaper,
94 		    const struct genl_info *info)
95 {
96 	void *hdr;
97 
98 	hdr = genlmsg_iput(msg, info);
99 	if (!hdr)
100 		return -EMSGSIZE;
101 
102 	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
103 	    net_shaper_fill_handle(msg, &shaper->parent,
104 				   NET_SHAPER_A_PARENT) ||
105 	    net_shaper_fill_handle(msg, &shaper->handle,
106 				   NET_SHAPER_A_HANDLE) ||
107 	    ((shaper->bw_min || shaper->bw_max || shaper->burst) &&
108 	     nla_put_u32(msg, NET_SHAPER_A_METRIC, shaper->metric)) ||
109 	    (shaper->bw_min &&
110 	     nla_put_uint(msg, NET_SHAPER_A_BW_MIN, shaper->bw_min)) ||
111 	    (shaper->bw_max &&
112 	     nla_put_uint(msg, NET_SHAPER_A_BW_MAX, shaper->bw_max)) ||
113 	    (shaper->burst &&
114 	     nla_put_uint(msg, NET_SHAPER_A_BURST, shaper->burst)) ||
115 	    (shaper->priority &&
116 	     nla_put_u32(msg, NET_SHAPER_A_PRIORITY, shaper->priority)) ||
117 	    (shaper->weight &&
118 	     nla_put_u32(msg, NET_SHAPER_A_WEIGHT, shaper->weight)))
119 		goto nla_put_failure;
120 
121 	genlmsg_end(msg, hdr);
122 
123 	return 0;
124 
125 nla_put_failure:
126 	genlmsg_cancel(msg, hdr);
127 	return -EMSGSIZE;
128 }
129 
130 /* Initialize the context fetching the relevant device and
131  * acquiring a reference to it.
132  */
133 static int net_shaper_ctx_setup(const struct genl_info *info, int type,
134 				struct net_shaper_nl_ctx *ctx)
135 {
136 	struct net *ns = genl_info_net(info);
137 	struct net_device *dev;
138 	int ifindex;
139 
140 	if (GENL_REQ_ATTR_CHECK(info, type))
141 		return -EINVAL;
142 
143 	ifindex = nla_get_u32(info->attrs[type]);
144 	dev = netdev_get_by_index(ns, ifindex, &ctx->dev_tracker, GFP_KERNEL);
145 	if (!dev) {
146 		NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
147 		return -ENOENT;
148 	}
149 
150 	if (!dev->netdev_ops->net_shaper_ops) {
151 		NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
152 		netdev_put(dev, &ctx->dev_tracker);
153 		return -EOPNOTSUPP;
154 	}
155 
156 	ctx->binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
157 	ctx->binding.netdev = dev;
158 	return 0;
159 }
160 
161 static void net_shaper_ctx_cleanup(struct net_shaper_nl_ctx *ctx)
162 {
163 	if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV)
164 		netdev_put(ctx->binding.netdev, &ctx->dev_tracker);
165 }
166 
167 static u32 net_shaper_handle_to_index(const struct net_shaper_handle *handle)
168 {
169 	return FIELD_PREP(NET_SHAPER_SCOPE_MASK, handle->scope) |
170 		FIELD_PREP(NET_SHAPER_ID_MASK, handle->id);
171 }
172 
173 static struct net_shaper *
174 net_shaper_lookup(struct net_shaper_binding *binding,
175 		  const struct net_shaper_handle *handle)
176 {
177 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
178 	u32 index = net_shaper_handle_to_index(handle);
179 
180 	return hierarchy ? xa_load(&hierarchy->shapers, index) : NULL;
181 }
182 
183 static int net_shaper_parse_handle(const struct nlattr *attr,
184 				   const struct genl_info *info,
185 				   struct net_shaper_handle *handle)
186 {
187 	struct nlattr *tb[NET_SHAPER_A_HANDLE_MAX + 1];
188 	struct nlattr *id_attr;
189 	u32 id = 0;
190 	int ret;
191 
192 	ret = nla_parse_nested(tb, NET_SHAPER_A_HANDLE_MAX, attr,
193 			       net_shaper_handle_nl_policy, info->extack);
194 	if (ret < 0)
195 		return ret;
196 
197 	if (NL_REQ_ATTR_CHECK(info->extack, attr, tb,
198 			      NET_SHAPER_A_HANDLE_SCOPE))
199 		return -EINVAL;
200 
201 	handle->scope = nla_get_u32(tb[NET_SHAPER_A_HANDLE_SCOPE]);
202 
203 	/* The default id for NODE scope shapers is an invalid one
204 	 * to help the 'group' operation discriminate between new
205 	 * NODE shaper creation (ID_UNSPEC) and reuse of existing
206 	 * shaper (any other value).
207 	 */
208 	id_attr = tb[NET_SHAPER_A_HANDLE_ID];
209 	if (id_attr)
210 		id = nla_get_u32(id_attr);
211 	else if (handle->scope == NET_SHAPER_SCOPE_NODE)
212 		id = NET_SHAPER_ID_UNSPEC;
213 
214 	handle->id = id;
215 	return 0;
216 }
217 
218 static int net_shaper_generic_pre(struct genl_info *info, int type)
219 {
220 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)info->ctx;
221 
222 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(info->ctx));
223 
224 	return net_shaper_ctx_setup(info, type, ctx);
225 }
226 
227 int net_shaper_nl_pre_doit(const struct genl_split_ops *ops,
228 			   struct sk_buff *skb, struct genl_info *info)
229 {
230 	return net_shaper_generic_pre(info, NET_SHAPER_A_IFINDEX);
231 }
232 
233 static void net_shaper_generic_post(struct genl_info *info)
234 {
235 	net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)info->ctx);
236 }
237 
238 void net_shaper_nl_post_doit(const struct genl_split_ops *ops,
239 			     struct sk_buff *skb, struct genl_info *info)
240 {
241 	net_shaper_generic_post(info);
242 }
243 
244 int net_shaper_nl_pre_dumpit(struct netlink_callback *cb)
245 {
246 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
247 	const struct genl_info *info = genl_info_dump(cb);
248 
249 	return net_shaper_ctx_setup(info, NET_SHAPER_A_IFINDEX, ctx);
250 }
251 
252 int net_shaper_nl_post_dumpit(struct netlink_callback *cb)
253 {
254 	net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)cb->ctx);
255 	return 0;
256 }
257 
258 int net_shaper_nl_get_doit(struct sk_buff *skb, struct genl_info *info)
259 {
260 	struct net_shaper_binding *binding;
261 	struct net_shaper_handle handle;
262 	struct net_shaper *shaper;
263 	struct sk_buff *msg;
264 	int ret;
265 
266 	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
267 		return -EINVAL;
268 
269 	binding = net_shaper_binding_from_ctx(info->ctx);
270 	ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
271 				      &handle);
272 	if (ret < 0)
273 		return ret;
274 
275 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
276 	if (!msg)
277 		return -ENOMEM;
278 
279 	rcu_read_lock();
280 	shaper = net_shaper_lookup(binding, &handle);
281 	if (!shaper) {
282 		NL_SET_BAD_ATTR(info->extack,
283 				info->attrs[NET_SHAPER_A_HANDLE]);
284 		rcu_read_unlock();
285 		ret = -ENOENT;
286 		goto free_msg;
287 	}
288 
289 	ret = net_shaper_fill_one(msg, binding, shaper, info);
290 	rcu_read_unlock();
291 	if (ret)
292 		goto free_msg;
293 
294 	ret = genlmsg_reply(msg, info);
295 	if (ret)
296 		goto free_msg;
297 
298 	return 0;
299 
300 free_msg:
301 	nlmsg_free(msg);
302 	return ret;
303 }
304 
305 int net_shaper_nl_get_dumpit(struct sk_buff *skb,
306 			     struct netlink_callback *cb)
307 {
308 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
309 	const struct genl_info *info = genl_info_dump(cb);
310 	struct net_shaper_hierarchy *hierarchy;
311 	struct net_shaper_binding *binding;
312 	struct net_shaper *shaper;
313 	int ret = 0;
314 
315 	/* Don't error out dumps performed before any set operation. */
316 	binding = net_shaper_binding_from_ctx(ctx);
317 	hierarchy = net_shaper_hierarchy(binding);
318 	if (!hierarchy)
319 		return 0;
320 
321 	rcu_read_lock();
322 	for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index,
323 				 U32_MAX, XA_PRESENT)); ctx->start_index++) {
324 		ret = net_shaper_fill_one(skb, binding, shaper, info);
325 		if (ret)
326 			break;
327 	}
328 	rcu_read_unlock();
329 
330 	return ret;
331 }
332 
333 int net_shaper_nl_set_doit(struct sk_buff *skb, struct genl_info *info)
334 {
335 	return -EOPNOTSUPP;
336 }
337 
338 int net_shaper_nl_delete_doit(struct sk_buff *skb, struct genl_info *info)
339 {
340 	return -EOPNOTSUPP;
341 }
342 
343 static void net_shaper_flush(struct net_shaper_binding *binding)
344 {
345 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
346 	struct net_shaper *cur;
347 	unsigned long index;
348 
349 	if (!hierarchy)
350 		return;
351 
352 	xa_lock(&hierarchy->shapers);
353 	xa_for_each(&hierarchy->shapers, index, cur) {
354 		__xa_erase(&hierarchy->shapers, index);
355 		kfree(cur);
356 	}
357 	xa_unlock(&hierarchy->shapers);
358 	kfree(hierarchy);
359 }
360 
361 void net_shaper_flush_netdev(struct net_device *dev)
362 {
363 	struct net_shaper_binding binding = {
364 		.type = NET_SHAPER_BINDING_TYPE_NETDEV,
365 		.netdev = dev,
366 	};
367 
368 	net_shaper_flush(&binding);
369 }
370 
371 static int __init shaper_init(void)
372 {
373 	return genl_register_family(&net_shaper_nl_family);
374 }
375 
376 subsys_initcall(shaper_init);
377