xref: /linux/net/shaper/shaper.c (revision 2c7e63d702f6c4209c5af833308e7fcbc7d4ab17)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/bits.h>
4 #include <linux/bitfield.h>
5 #include <linux/idr.h>
6 #include <linux/kernel.h>
7 #include <linux/netdevice.h>
8 #include <linux/netlink.h>
9 #include <linux/skbuff.h>
10 #include <linux/xarray.h>
11 #include <net/devlink.h>
12 #include <net/net_shaper.h>
13 
14 #include "shaper_nl_gen.h"
15 
16 #include "../core/dev.h"
17 
18 #define NET_SHAPER_SCOPE_SHIFT	26
19 #define NET_SHAPER_ID_MASK	GENMASK(NET_SHAPER_SCOPE_SHIFT - 1, 0)
20 #define NET_SHAPER_SCOPE_MASK	GENMASK(31, NET_SHAPER_SCOPE_SHIFT)
21 
22 #define NET_SHAPER_ID_UNSPEC NET_SHAPER_ID_MASK
23 
24 struct net_shaper_hierarchy {
25 	struct xarray shapers;
26 };
27 
28 struct net_shaper_nl_ctx {
29 	struct net_shaper_binding binding;
30 	netdevice_tracker dev_tracker;
31 	unsigned long start_index;
32 };
33 
net_shaper_binding_from_ctx(void * ctx)34 static struct net_shaper_binding *net_shaper_binding_from_ctx(void *ctx)
35 {
36 	return &((struct net_shaper_nl_ctx *)ctx)->binding;
37 }
38 
net_shaper_lock(struct net_shaper_binding * binding)39 static void net_shaper_lock(struct net_shaper_binding *binding)
40 {
41 	switch (binding->type) {
42 	case NET_SHAPER_BINDING_TYPE_NETDEV:
43 		netdev_lock(binding->netdev);
44 		break;
45 	}
46 }
47 
net_shaper_unlock(struct net_shaper_binding * binding)48 static void net_shaper_unlock(struct net_shaper_binding *binding)
49 {
50 	switch (binding->type) {
51 	case NET_SHAPER_BINDING_TYPE_NETDEV:
52 		netdev_unlock(binding->netdev);
53 		break;
54 	}
55 }
56 
57 static struct net_shaper_hierarchy *
net_shaper_hierarchy(struct net_shaper_binding * binding)58 net_shaper_hierarchy(struct net_shaper_binding *binding)
59 {
60 	/* Pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. */
61 	if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
62 		return READ_ONCE(binding->netdev->net_shaper_hierarchy);
63 
64 	/* No other type supported yet. */
65 	return NULL;
66 }
67 
68 static const struct net_shaper_ops *
net_shaper_ops(struct net_shaper_binding * binding)69 net_shaper_ops(struct net_shaper_binding *binding)
70 {
71 	if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
72 		return binding->netdev->netdev_ops->net_shaper_ops;
73 
74 	/* No other type supported yet. */
75 	return NULL;
76 }
77 
78 /* Count the number of [multi] attributes of the given type. */
net_shaper_list_len(struct genl_info * info,int type)79 static int net_shaper_list_len(struct genl_info *info, int type)
80 {
81 	struct nlattr *attr;
82 	int rem, cnt = 0;
83 
84 	nla_for_each_attr_type(attr, type, genlmsg_data(info->genlhdr),
85 			       genlmsg_len(info->genlhdr), rem)
86 		cnt++;
87 	return cnt;
88 }
89 
net_shaper_handle_size(void)90 static int net_shaper_handle_size(void)
91 {
92 	return nla_total_size(nla_total_size(sizeof(u32)) +
93 			      nla_total_size(sizeof(u32)));
94 }
95 
net_shaper_fill_binding(struct sk_buff * msg,const struct net_shaper_binding * binding,u32 type)96 static int net_shaper_fill_binding(struct sk_buff *msg,
97 				   const struct net_shaper_binding *binding,
98 				   u32 type)
99 {
100 	/* Should never happen, as currently only NETDEV is supported. */
101 	if (WARN_ON_ONCE(binding->type != NET_SHAPER_BINDING_TYPE_NETDEV))
102 		return -EINVAL;
103 
104 	if (nla_put_u32(msg, type, binding->netdev->ifindex))
105 		return -EMSGSIZE;
106 
107 	return 0;
108 }
109 
net_shaper_fill_handle(struct sk_buff * msg,const struct net_shaper_handle * handle,u32 type)110 static int net_shaper_fill_handle(struct sk_buff *msg,
111 				  const struct net_shaper_handle *handle,
112 				  u32 type)
113 {
114 	struct nlattr *handle_attr;
115 
116 	if (handle->scope == NET_SHAPER_SCOPE_UNSPEC)
117 		return 0;
118 
119 	handle_attr = nla_nest_start(msg, type);
120 	if (!handle_attr)
121 		return -EMSGSIZE;
122 
123 	if (nla_put_u32(msg, NET_SHAPER_A_HANDLE_SCOPE, handle->scope) ||
124 	    (handle->scope >= NET_SHAPER_SCOPE_QUEUE &&
125 	     nla_put_u32(msg, NET_SHAPER_A_HANDLE_ID, handle->id)))
126 		goto handle_nest_cancel;
127 
128 	nla_nest_end(msg, handle_attr);
129 	return 0;
130 
131 handle_nest_cancel:
132 	nla_nest_cancel(msg, handle_attr);
133 	return -EMSGSIZE;
134 }
135 
136 static int
net_shaper_fill_one(struct sk_buff * msg,const struct net_shaper_binding * binding,const struct net_shaper * shaper,const struct genl_info * info)137 net_shaper_fill_one(struct sk_buff *msg,
138 		    const struct net_shaper_binding *binding,
139 		    const struct net_shaper *shaper,
140 		    const struct genl_info *info)
141 {
142 	void *hdr;
143 
144 	hdr = genlmsg_iput(msg, info);
145 	if (!hdr)
146 		return -EMSGSIZE;
147 
148 	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
149 	    net_shaper_fill_handle(msg, &shaper->parent,
150 				   NET_SHAPER_A_PARENT) ||
151 	    net_shaper_fill_handle(msg, &shaper->handle,
152 				   NET_SHAPER_A_HANDLE) ||
153 	    ((shaper->bw_min || shaper->bw_max || shaper->burst) &&
154 	     nla_put_u32(msg, NET_SHAPER_A_METRIC, shaper->metric)) ||
155 	    (shaper->bw_min &&
156 	     nla_put_uint(msg, NET_SHAPER_A_BW_MIN, shaper->bw_min)) ||
157 	    (shaper->bw_max &&
158 	     nla_put_uint(msg, NET_SHAPER_A_BW_MAX, shaper->bw_max)) ||
159 	    (shaper->burst &&
160 	     nla_put_uint(msg, NET_SHAPER_A_BURST, shaper->burst)) ||
161 	    (shaper->priority &&
162 	     nla_put_u32(msg, NET_SHAPER_A_PRIORITY, shaper->priority)) ||
163 	    (shaper->weight &&
164 	     nla_put_u32(msg, NET_SHAPER_A_WEIGHT, shaper->weight)))
165 		goto nla_put_failure;
166 
167 	genlmsg_end(msg, hdr);
168 
169 	return 0;
170 
171 nla_put_failure:
172 	genlmsg_cancel(msg, hdr);
173 	return -EMSGSIZE;
174 }
175 
176 /* Initialize the context fetching the relevant device and
177  * acquiring a reference to it.
178  */
net_shaper_ctx_setup(const struct genl_info * info,int type,struct net_shaper_nl_ctx * ctx)179 static int net_shaper_ctx_setup(const struct genl_info *info, int type,
180 				struct net_shaper_nl_ctx *ctx)
181 {
182 	struct net *ns = genl_info_net(info);
183 	struct net_device *dev;
184 	int ifindex;
185 
186 	if (GENL_REQ_ATTR_CHECK(info, type))
187 		return -EINVAL;
188 
189 	ifindex = nla_get_u32(info->attrs[type]);
190 	dev = netdev_get_by_index(ns, ifindex, &ctx->dev_tracker, GFP_KERNEL);
191 	if (!dev) {
192 		NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
193 		return -ENOENT;
194 	}
195 
196 	if (!dev->netdev_ops->net_shaper_ops) {
197 		NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
198 		netdev_put(dev, &ctx->dev_tracker);
199 		return -EOPNOTSUPP;
200 	}
201 
202 	ctx->binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
203 	ctx->binding.netdev = dev;
204 	return 0;
205 }
206 
net_shaper_ctx_cleanup(struct net_shaper_nl_ctx * ctx)207 static void net_shaper_ctx_cleanup(struct net_shaper_nl_ctx *ctx)
208 {
209 	if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV)
210 		netdev_put(ctx->binding.netdev, &ctx->dev_tracker);
211 }
212 
net_shaper_handle_to_index(const struct net_shaper_handle * handle)213 static u32 net_shaper_handle_to_index(const struct net_shaper_handle *handle)
214 {
215 	return FIELD_PREP(NET_SHAPER_SCOPE_MASK, handle->scope) |
216 		FIELD_PREP(NET_SHAPER_ID_MASK, handle->id);
217 }
218 
net_shaper_index_to_handle(u32 index,struct net_shaper_handle * handle)219 static void net_shaper_index_to_handle(u32 index,
220 				       struct net_shaper_handle *handle)
221 {
222 	handle->scope = FIELD_GET(NET_SHAPER_SCOPE_MASK, index);
223 	handle->id = FIELD_GET(NET_SHAPER_ID_MASK, index);
224 }
225 
net_shaper_default_parent(const struct net_shaper_handle * handle,struct net_shaper_handle * parent)226 static void net_shaper_default_parent(const struct net_shaper_handle *handle,
227 				      struct net_shaper_handle *parent)
228 {
229 	switch (handle->scope) {
230 	case NET_SHAPER_SCOPE_UNSPEC:
231 	case NET_SHAPER_SCOPE_NETDEV:
232 	case __NET_SHAPER_SCOPE_MAX:
233 		parent->scope = NET_SHAPER_SCOPE_UNSPEC;
234 		break;
235 
236 	case NET_SHAPER_SCOPE_QUEUE:
237 	case NET_SHAPER_SCOPE_NODE:
238 		parent->scope = NET_SHAPER_SCOPE_NETDEV;
239 		break;
240 	}
241 	parent->id = 0;
242 }
243 
244 /*
245  * MARK_0 is already in use due to XA_FLAGS_ALLOC, can't reuse such flag as
246  * it's cleared by xa_store().
247  */
248 #define NET_SHAPER_NOT_VALID XA_MARK_1
249 
250 static struct net_shaper *
net_shaper_lookup(struct net_shaper_binding * binding,const struct net_shaper_handle * handle)251 net_shaper_lookup(struct net_shaper_binding *binding,
252 		  const struct net_shaper_handle *handle)
253 {
254 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
255 	u32 index = net_shaper_handle_to_index(handle);
256 
257 	if (!hierarchy || xa_get_mark(&hierarchy->shapers, index,
258 				      NET_SHAPER_NOT_VALID))
259 		return NULL;
260 
261 	return xa_load(&hierarchy->shapers, index);
262 }
263 
264 /* Allocate on demand the per device shaper's hierarchy container.
265  * Called under the net shaper lock
266  */
267 static struct net_shaper_hierarchy *
net_shaper_hierarchy_setup(struct net_shaper_binding * binding)268 net_shaper_hierarchy_setup(struct net_shaper_binding *binding)
269 {
270 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
271 
272 	if (hierarchy)
273 		return hierarchy;
274 
275 	hierarchy = kmalloc_obj(*hierarchy);
276 	if (!hierarchy)
277 		return NULL;
278 
279 	/* The flag is required for ID allocation */
280 	xa_init_flags(&hierarchy->shapers, XA_FLAGS_ALLOC);
281 
282 	switch (binding->type) {
283 	case NET_SHAPER_BINDING_TYPE_NETDEV:
284 		/* Pairs with READ_ONCE in net_shaper_hierarchy. */
285 		WRITE_ONCE(binding->netdev->net_shaper_hierarchy, hierarchy);
286 		break;
287 	}
288 	return hierarchy;
289 }
290 
291 /* Prepare the hierarchy container to actually insert the given shaper, doing
292  * in advance the needed allocations.
293  */
net_shaper_pre_insert(struct net_shaper_binding * binding,struct net_shaper_handle * handle,struct netlink_ext_ack * extack)294 static int net_shaper_pre_insert(struct net_shaper_binding *binding,
295 				 struct net_shaper_handle *handle,
296 				 struct netlink_ext_ack *extack)
297 {
298 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
299 	struct net_shaper *prev, *cur;
300 	bool id_allocated = false;
301 	int ret, index;
302 
303 	if (!hierarchy)
304 		return -ENOMEM;
305 
306 	index = net_shaper_handle_to_index(handle);
307 	cur = xa_load(&hierarchy->shapers, index);
308 	if (cur)
309 		return 0;
310 
311 	/* Allocated a new id, if needed. */
312 	if (handle->scope == NET_SHAPER_SCOPE_NODE &&
313 	    handle->id == NET_SHAPER_ID_UNSPEC) {
314 		u32 min, max;
315 
316 		handle->id = NET_SHAPER_ID_MASK - 1;
317 		max = net_shaper_handle_to_index(handle);
318 		handle->id = 0;
319 		min = net_shaper_handle_to_index(handle);
320 
321 		ret = xa_alloc(&hierarchy->shapers, &index, NULL,
322 			       XA_LIMIT(min, max), GFP_KERNEL);
323 		if (ret < 0) {
324 			NL_SET_ERR_MSG(extack, "Can't allocate new id for NODE shaper");
325 			return ret;
326 		}
327 
328 		net_shaper_index_to_handle(index, handle);
329 		id_allocated = true;
330 	}
331 
332 	cur = kzalloc_obj(*cur);
333 	if (!cur) {
334 		ret = -ENOMEM;
335 		goto free_id;
336 	}
337 
338 	/* Mark 'tentative' shaper inside the hierarchy container.
339 	 * xa_set_mark is a no-op if the previous store fails.
340 	 */
341 	xa_lock(&hierarchy->shapers);
342 	prev = __xa_store(&hierarchy->shapers, index, cur, GFP_KERNEL);
343 	__xa_set_mark(&hierarchy->shapers, index, NET_SHAPER_NOT_VALID);
344 	xa_unlock(&hierarchy->shapers);
345 	if (xa_err(prev)) {
346 		NL_SET_ERR_MSG(extack, "Can't insert shaper into device store");
347 		kfree_rcu(cur, rcu);
348 		ret = xa_err(prev);
349 		goto free_id;
350 	}
351 	return 0;
352 
353 free_id:
354 	if (id_allocated)
355 		xa_erase(&hierarchy->shapers, index);
356 	return ret;
357 }
358 
359 /* Commit the tentative insert with the actual values.
360  * Must be called only after a successful net_shaper_pre_insert().
361  */
net_shaper_commit(struct net_shaper_binding * binding,int nr_shapers,const struct net_shaper * shapers)362 static void net_shaper_commit(struct net_shaper_binding *binding,
363 			      int nr_shapers, const struct net_shaper *shapers)
364 {
365 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
366 	struct net_shaper *cur;
367 	int index;
368 	int i;
369 
370 	xa_lock(&hierarchy->shapers);
371 	for (i = 0; i < nr_shapers; ++i) {
372 		index = net_shaper_handle_to_index(&shapers[i].handle);
373 
374 		cur = xa_load(&hierarchy->shapers, index);
375 		if (WARN_ON_ONCE(!cur))
376 			continue;
377 
378 		/* Successful update: drop the tentative mark
379 		 * and update the hierarchy container.
380 		 */
381 		__xa_clear_mark(&hierarchy->shapers, index,
382 				NET_SHAPER_NOT_VALID);
383 		*cur = shapers[i];
384 	}
385 	xa_unlock(&hierarchy->shapers);
386 }
387 
388 /* Rollback all the tentative inserts from the hierarchy. */
net_shaper_rollback(struct net_shaper_binding * binding)389 static void net_shaper_rollback(struct net_shaper_binding *binding)
390 {
391 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
392 	struct net_shaper *cur;
393 	unsigned long index;
394 
395 	if (!hierarchy)
396 		return;
397 
398 	xa_lock(&hierarchy->shapers);
399 	xa_for_each_marked(&hierarchy->shapers, index, cur,
400 			   NET_SHAPER_NOT_VALID) {
401 		__xa_erase(&hierarchy->shapers, index);
402 		kfree(cur);
403 	}
404 	xa_unlock(&hierarchy->shapers);
405 }
406 
net_shaper_parse_handle(const struct nlattr * attr,const struct genl_info * info,struct net_shaper_handle * handle)407 static int net_shaper_parse_handle(const struct nlattr *attr,
408 				   const struct genl_info *info,
409 				   struct net_shaper_handle *handle)
410 {
411 	struct nlattr *tb[NET_SHAPER_A_HANDLE_MAX + 1];
412 	struct nlattr *id_attr;
413 	u32 id = 0;
414 	int ret;
415 
416 	ret = nla_parse_nested(tb, NET_SHAPER_A_HANDLE_MAX, attr,
417 			       net_shaper_handle_nl_policy, info->extack);
418 	if (ret < 0)
419 		return ret;
420 
421 	if (NL_REQ_ATTR_CHECK(info->extack, attr, tb,
422 			      NET_SHAPER_A_HANDLE_SCOPE))
423 		return -EINVAL;
424 
425 	handle->scope = nla_get_u32(tb[NET_SHAPER_A_HANDLE_SCOPE]);
426 
427 	/* The default id for NODE scope shapers is an invalid one
428 	 * to help the 'group' operation discriminate between new
429 	 * NODE shaper creation (ID_UNSPEC) and reuse of existing
430 	 * shaper (any other value).
431 	 */
432 	id_attr = tb[NET_SHAPER_A_HANDLE_ID];
433 	if (id_attr)
434 		id = nla_get_u32(id_attr);
435 	else if (handle->scope == NET_SHAPER_SCOPE_NODE)
436 		id = NET_SHAPER_ID_UNSPEC;
437 
438 	handle->id = id;
439 	return 0;
440 }
441 
net_shaper_validate_caps(struct net_shaper_binding * binding,struct nlattr ** tb,const struct genl_info * info,struct net_shaper * shaper)442 static int net_shaper_validate_caps(struct net_shaper_binding *binding,
443 				    struct nlattr **tb,
444 				    const struct genl_info *info,
445 				    struct net_shaper *shaper)
446 {
447 	const struct net_shaper_ops *ops = net_shaper_ops(binding);
448 	struct nlattr *bad = NULL;
449 	unsigned long caps = 0;
450 
451 	ops->capabilities(binding, shaper->handle.scope, &caps);
452 
453 	if (tb[NET_SHAPER_A_PRIORITY] &&
454 	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_PRIORITY)))
455 		bad = tb[NET_SHAPER_A_PRIORITY];
456 	if (tb[NET_SHAPER_A_WEIGHT] &&
457 	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_WEIGHT)))
458 		bad = tb[NET_SHAPER_A_WEIGHT];
459 	if (tb[NET_SHAPER_A_BW_MIN] &&
460 	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN)))
461 		bad = tb[NET_SHAPER_A_BW_MIN];
462 	if (tb[NET_SHAPER_A_BW_MAX] &&
463 	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX)))
464 		bad = tb[NET_SHAPER_A_BW_MAX];
465 	if (tb[NET_SHAPER_A_BURST] &&
466 	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BURST)))
467 		bad = tb[NET_SHAPER_A_BURST];
468 
469 	if (!caps)
470 		bad = tb[NET_SHAPER_A_HANDLE];
471 
472 	if (bad) {
473 		NL_SET_BAD_ATTR(info->extack, bad);
474 		return -EOPNOTSUPP;
475 	}
476 
477 	if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE &&
478 	    binding->type == NET_SHAPER_BINDING_TYPE_NETDEV &&
479 	    shaper->handle.id >= binding->netdev->real_num_tx_queues) {
480 		NL_SET_ERR_MSG_FMT(info->extack,
481 				   "Not existing queue id %d max %d",
482 				   shaper->handle.id,
483 				   binding->netdev->real_num_tx_queues);
484 		return -ENOENT;
485 	}
486 
487 	/* The metric is really used only if there is *any* rate-related
488 	 * setting, either in current attributes set or in pre-existing
489 	 * values.
490 	 */
491 	if (shaper->burst || shaper->bw_min || shaper->bw_max) {
492 		u32 metric_cap = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS +
493 				 shaper->metric;
494 
495 		/* The metric test can fail even when the user did not
496 		 * specify the METRIC attribute. Pointing to rate related
497 		 * attribute will be confusing, as the attribute itself
498 		 * could be indeed supported, with a different metric.
499 		 * Be more specific.
500 		 */
501 		if (!(caps & BIT(metric_cap))) {
502 			NL_SET_ERR_MSG_FMT(info->extack, "Bad metric %d",
503 					   shaper->metric);
504 			return -EOPNOTSUPP;
505 		}
506 	}
507 	return 0;
508 }
509 
net_shaper_parse_info(struct net_shaper_binding * binding,struct nlattr ** tb,const struct genl_info * info,struct net_shaper * shaper,bool * exists)510 static int net_shaper_parse_info(struct net_shaper_binding *binding,
511 				 struct nlattr **tb,
512 				 const struct genl_info *info,
513 				 struct net_shaper *shaper,
514 				 bool *exists)
515 {
516 	struct net_shaper *old;
517 	int ret;
518 
519 	/* The shaper handle is the only mandatory attribute. */
520 	if (NL_REQ_ATTR_CHECK(info->extack, NULL, tb, NET_SHAPER_A_HANDLE))
521 		return -EINVAL;
522 
523 	ret = net_shaper_parse_handle(tb[NET_SHAPER_A_HANDLE], info,
524 				      &shaper->handle);
525 	if (ret)
526 		return ret;
527 
528 	if (shaper->handle.scope == NET_SHAPER_SCOPE_UNSPEC) {
529 		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
530 		return -EINVAL;
531 	}
532 
533 	/* Fetch existing hierarchy, if any, so that user provide info will
534 	 * incrementally update the existing shaper configuration.
535 	 */
536 	old = net_shaper_lookup(binding, &shaper->handle);
537 	if (old)
538 		*shaper = *old;
539 	*exists = !!old;
540 
541 	if (tb[NET_SHAPER_A_METRIC])
542 		shaper->metric = nla_get_u32(tb[NET_SHAPER_A_METRIC]);
543 
544 	if (tb[NET_SHAPER_A_BW_MIN])
545 		shaper->bw_min = nla_get_uint(tb[NET_SHAPER_A_BW_MIN]);
546 
547 	if (tb[NET_SHAPER_A_BW_MAX])
548 		shaper->bw_max = nla_get_uint(tb[NET_SHAPER_A_BW_MAX]);
549 
550 	if (tb[NET_SHAPER_A_BURST])
551 		shaper->burst = nla_get_uint(tb[NET_SHAPER_A_BURST]);
552 
553 	if (tb[NET_SHAPER_A_PRIORITY])
554 		shaper->priority = nla_get_u32(tb[NET_SHAPER_A_PRIORITY]);
555 
556 	if (tb[NET_SHAPER_A_WEIGHT])
557 		shaper->weight = nla_get_u32(tb[NET_SHAPER_A_WEIGHT]);
558 
559 	ret = net_shaper_validate_caps(binding, tb, info, shaper);
560 	if (ret < 0)
561 		return ret;
562 
563 	return 0;
564 }
565 
net_shaper_validate_nesting(struct net_shaper_binding * binding,const struct net_shaper * shaper,struct netlink_ext_ack * extack)566 static int net_shaper_validate_nesting(struct net_shaper_binding *binding,
567 				       const struct net_shaper *shaper,
568 				       struct netlink_ext_ack *extack)
569 {
570 	const struct net_shaper_ops *ops = net_shaper_ops(binding);
571 	unsigned long caps = 0;
572 
573 	ops->capabilities(binding, shaper->handle.scope, &caps);
574 	if (!(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_NESTING))) {
575 		NL_SET_ERR_MSG_FMT(extack,
576 				   "Nesting not supported for scope %d",
577 				   shaper->handle.scope);
578 		return -EOPNOTSUPP;
579 	}
580 	return 0;
581 }
582 
583 /* Fetch the existing leaf and update it with the user-provided
584  * attributes.
585  */
net_shaper_parse_leaf(struct net_shaper_binding * binding,const struct nlattr * attr,const struct genl_info * info,const struct net_shaper * node,struct net_shaper * shaper)586 static int net_shaper_parse_leaf(struct net_shaper_binding *binding,
587 				 const struct nlattr *attr,
588 				 const struct genl_info *info,
589 				 const struct net_shaper *node,
590 				 struct net_shaper *shaper)
591 {
592 	struct nlattr *tb[NET_SHAPER_A_WEIGHT + 1];
593 	bool exists;
594 	int ret;
595 
596 	ret = nla_parse_nested(tb, NET_SHAPER_A_WEIGHT, attr,
597 			       net_shaper_leaf_info_nl_policy, info->extack);
598 	if (ret < 0)
599 		return ret;
600 
601 	ret = net_shaper_parse_info(binding, tb, info, shaper, &exists);
602 	if (ret < 0)
603 		return ret;
604 
605 	if (shaper->handle.scope != NET_SHAPER_SCOPE_QUEUE) {
606 		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
607 		return -EINVAL;
608 	}
609 
610 	if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
611 		ret = net_shaper_validate_nesting(binding, shaper,
612 						  info->extack);
613 		if (ret < 0)
614 			return ret;
615 	}
616 
617 	if (!exists)
618 		net_shaper_default_parent(&shaper->handle, &shaper->parent);
619 	return 0;
620 }
621 
622 /* Alike net_parse_shaper_info(), but additionally allow the user specifying
623  * the shaper's parent handle.
624  */
net_shaper_parse_node(struct net_shaper_binding * binding,struct nlattr ** tb,const struct genl_info * info,struct net_shaper * shaper)625 static int net_shaper_parse_node(struct net_shaper_binding *binding,
626 				 struct nlattr **tb,
627 				 const struct genl_info *info,
628 				 struct net_shaper *shaper)
629 {
630 	bool exists;
631 	int ret;
632 
633 	ret = net_shaper_parse_info(binding, tb, info, shaper, &exists);
634 	if (ret)
635 		return ret;
636 
637 	if (shaper->handle.scope != NET_SHAPER_SCOPE_NODE &&
638 	    shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
639 		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
640 		return -EINVAL;
641 	}
642 
643 	if (tb[NET_SHAPER_A_PARENT]) {
644 		ret = net_shaper_parse_handle(tb[NET_SHAPER_A_PARENT], info,
645 					      &shaper->parent);
646 		if (ret)
647 			return ret;
648 
649 		if (shaper->parent.scope != NET_SHAPER_SCOPE_NODE &&
650 		    shaper->parent.scope != NET_SHAPER_SCOPE_NETDEV) {
651 			NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_PARENT]);
652 			return -EINVAL;
653 		}
654 	}
655 	return 0;
656 }
657 
net_shaper_generic_pre(struct genl_info * info,int type)658 static int net_shaper_generic_pre(struct genl_info *info, int type)
659 {
660 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)info->ctx;
661 
662 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(info->ctx));
663 
664 	return net_shaper_ctx_setup(info, type, ctx);
665 }
666 
net_shaper_nl_pre_doit(const struct genl_split_ops * ops,struct sk_buff * skb,struct genl_info * info)667 int net_shaper_nl_pre_doit(const struct genl_split_ops *ops,
668 			   struct sk_buff *skb, struct genl_info *info)
669 {
670 	return net_shaper_generic_pre(info, NET_SHAPER_A_IFINDEX);
671 }
672 
net_shaper_generic_post(struct genl_info * info)673 static void net_shaper_generic_post(struct genl_info *info)
674 {
675 	net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)info->ctx);
676 }
677 
net_shaper_nl_post_doit(const struct genl_split_ops * ops,struct sk_buff * skb,struct genl_info * info)678 void net_shaper_nl_post_doit(const struct genl_split_ops *ops,
679 			     struct sk_buff *skb, struct genl_info *info)
680 {
681 	net_shaper_generic_post(info);
682 }
683 
net_shaper_nl_pre_dumpit(struct netlink_callback * cb)684 int net_shaper_nl_pre_dumpit(struct netlink_callback *cb)
685 {
686 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
687 	const struct genl_info *info = genl_info_dump(cb);
688 
689 	return net_shaper_ctx_setup(info, NET_SHAPER_A_IFINDEX, ctx);
690 }
691 
net_shaper_nl_post_dumpit(struct netlink_callback * cb)692 int net_shaper_nl_post_dumpit(struct netlink_callback *cb)
693 {
694 	net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)cb->ctx);
695 	return 0;
696 }
697 
net_shaper_nl_cap_pre_doit(const struct genl_split_ops * ops,struct sk_buff * skb,struct genl_info * info)698 int net_shaper_nl_cap_pre_doit(const struct genl_split_ops *ops,
699 			       struct sk_buff *skb, struct genl_info *info)
700 {
701 	return net_shaper_generic_pre(info, NET_SHAPER_A_CAPS_IFINDEX);
702 }
703 
net_shaper_nl_cap_post_doit(const struct genl_split_ops * ops,struct sk_buff * skb,struct genl_info * info)704 void net_shaper_nl_cap_post_doit(const struct genl_split_ops *ops,
705 				 struct sk_buff *skb, struct genl_info *info)
706 {
707 	net_shaper_generic_post(info);
708 }
709 
net_shaper_nl_cap_pre_dumpit(struct netlink_callback * cb)710 int net_shaper_nl_cap_pre_dumpit(struct netlink_callback *cb)
711 {
712 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
713 
714 	return net_shaper_ctx_setup(genl_info_dump(cb),
715 				    NET_SHAPER_A_CAPS_IFINDEX, ctx);
716 }
717 
net_shaper_nl_cap_post_dumpit(struct netlink_callback * cb)718 int net_shaper_nl_cap_post_dumpit(struct netlink_callback *cb)
719 {
720 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
721 
722 	net_shaper_ctx_cleanup(ctx);
723 	return 0;
724 }
725 
net_shaper_nl_get_doit(struct sk_buff * skb,struct genl_info * info)726 int net_shaper_nl_get_doit(struct sk_buff *skb, struct genl_info *info)
727 {
728 	struct net_shaper_binding *binding;
729 	struct net_shaper_handle handle;
730 	struct net_shaper *shaper;
731 	struct sk_buff *msg;
732 	int ret;
733 
734 	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
735 		return -EINVAL;
736 
737 	binding = net_shaper_binding_from_ctx(info->ctx);
738 	ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
739 				      &handle);
740 	if (ret < 0)
741 		return ret;
742 
743 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
744 	if (!msg)
745 		return -ENOMEM;
746 
747 	rcu_read_lock();
748 	shaper = net_shaper_lookup(binding, &handle);
749 	if (!shaper) {
750 		NL_SET_BAD_ATTR(info->extack,
751 				info->attrs[NET_SHAPER_A_HANDLE]);
752 		rcu_read_unlock();
753 		ret = -ENOENT;
754 		goto free_msg;
755 	}
756 
757 	ret = net_shaper_fill_one(msg, binding, shaper, info);
758 	rcu_read_unlock();
759 	if (ret)
760 		goto free_msg;
761 
762 	return genlmsg_reply(msg, info);
763 
764 free_msg:
765 	nlmsg_free(msg);
766 	return ret;
767 }
768 
net_shaper_nl_get_dumpit(struct sk_buff * skb,struct netlink_callback * cb)769 int net_shaper_nl_get_dumpit(struct sk_buff *skb,
770 			     struct netlink_callback *cb)
771 {
772 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
773 	const struct genl_info *info = genl_info_dump(cb);
774 	struct net_shaper_hierarchy *hierarchy;
775 	struct net_shaper_binding *binding;
776 	struct net_shaper *shaper;
777 	int ret = 0;
778 
779 	/* Don't error out dumps performed before any set operation. */
780 	binding = net_shaper_binding_from_ctx(ctx);
781 	hierarchy = net_shaper_hierarchy(binding);
782 	if (!hierarchy)
783 		return 0;
784 
785 	rcu_read_lock();
786 	for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index,
787 				 U32_MAX, XA_PRESENT)); ctx->start_index++) {
788 		ret = net_shaper_fill_one(skb, binding, shaper, info);
789 		if (ret)
790 			break;
791 	}
792 	rcu_read_unlock();
793 
794 	return ret;
795 }
796 
net_shaper_nl_set_doit(struct sk_buff * skb,struct genl_info * info)797 int net_shaper_nl_set_doit(struct sk_buff *skb, struct genl_info *info)
798 {
799 	struct net_shaper_hierarchy *hierarchy;
800 	struct net_shaper_binding *binding;
801 	const struct net_shaper_ops *ops;
802 	struct net_shaper_handle handle;
803 	struct net_shaper shaper = {};
804 	bool exists;
805 	int ret;
806 
807 	binding = net_shaper_binding_from_ctx(info->ctx);
808 
809 	net_shaper_lock(binding);
810 	ret = net_shaper_parse_info(binding, info->attrs, info, &shaper,
811 				    &exists);
812 	if (ret)
813 		goto unlock;
814 
815 	if (!exists)
816 		net_shaper_default_parent(&shaper.handle, &shaper.parent);
817 
818 	hierarchy = net_shaper_hierarchy_setup(binding);
819 	if (!hierarchy) {
820 		ret = -ENOMEM;
821 		goto unlock;
822 	}
823 
824 	/* The 'set' operation can't create node-scope shapers. */
825 	handle = shaper.handle;
826 	if (handle.scope == NET_SHAPER_SCOPE_NODE &&
827 	    !net_shaper_lookup(binding, &handle)) {
828 		ret = -ENOENT;
829 		goto unlock;
830 	}
831 
832 	ret = net_shaper_pre_insert(binding, &handle, info->extack);
833 	if (ret)
834 		goto unlock;
835 
836 	ops = net_shaper_ops(binding);
837 	ret = ops->set(binding, &shaper, info->extack);
838 	if (ret) {
839 		net_shaper_rollback(binding);
840 		goto unlock;
841 	}
842 
843 	net_shaper_commit(binding, 1, &shaper);
844 
845 unlock:
846 	net_shaper_unlock(binding);
847 	return ret;
848 }
849 
__net_shaper_delete(struct net_shaper_binding * binding,struct net_shaper * shaper,struct netlink_ext_ack * extack)850 static int __net_shaper_delete(struct net_shaper_binding *binding,
851 			       struct net_shaper *shaper,
852 			       struct netlink_ext_ack *extack)
853 {
854 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
855 	struct net_shaper_handle parent_handle, handle = shaper->handle;
856 	const struct net_shaper_ops *ops = net_shaper_ops(binding);
857 	int ret;
858 
859 again:
860 	parent_handle = shaper->parent;
861 
862 	ret = ops->delete(binding, &handle, extack);
863 	if (ret < 0)
864 		return ret;
865 
866 	xa_erase(&hierarchy->shapers, net_shaper_handle_to_index(&handle));
867 	kfree_rcu(shaper, rcu);
868 
869 	/* Eventually delete the parent, if it is left over with no leaves. */
870 	if (parent_handle.scope == NET_SHAPER_SCOPE_NODE) {
871 		shaper = net_shaper_lookup(binding, &parent_handle);
872 		if (shaper && !--shaper->leaves) {
873 			handle = parent_handle;
874 			goto again;
875 		}
876 	}
877 	return 0;
878 }
879 
net_shaper_handle_cmp(const struct net_shaper_handle * a,const struct net_shaper_handle * b)880 static int net_shaper_handle_cmp(const struct net_shaper_handle *a,
881 				 const struct net_shaper_handle *b)
882 {
883 	/* Must avoid holes in struct net_shaper_handle. */
884 	BUILD_BUG_ON(sizeof(*a) != 8);
885 
886 	return memcmp(a, b, sizeof(*a));
887 }
888 
net_shaper_parent_from_leaves(int leaves_count,const struct net_shaper * leaves,struct net_shaper * node,struct netlink_ext_ack * extack)889 static int net_shaper_parent_from_leaves(int leaves_count,
890 					 const struct net_shaper *leaves,
891 					 struct net_shaper *node,
892 					 struct netlink_ext_ack *extack)
893 {
894 	struct net_shaper_handle parent = leaves[0].parent;
895 	int i;
896 
897 	for (i = 1; i < leaves_count; ++i) {
898 		if (net_shaper_handle_cmp(&leaves[i].parent, &parent)) {
899 			NL_SET_ERR_MSG_FMT(extack, "All the leaves shapers must have the same old parent");
900 			return -EINVAL;
901 		}
902 	}
903 
904 	node->parent = parent;
905 	return 0;
906 }
907 
__net_shaper_group(struct net_shaper_binding * binding,bool update_node,int leaves_count,struct net_shaper * leaves,struct net_shaper * node,struct netlink_ext_ack * extack)908 static int __net_shaper_group(struct net_shaper_binding *binding,
909 			      bool update_node, int leaves_count,
910 			      struct net_shaper *leaves,
911 			      struct net_shaper *node,
912 			      struct netlink_ext_ack *extack)
913 {
914 	const struct net_shaper_ops *ops = net_shaper_ops(binding);
915 	struct net_shaper_handle leaf_handle;
916 	struct net_shaper *parent = NULL;
917 	bool new_node = false;
918 	int i, ret;
919 
920 	if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
921 		new_node = node->handle.id == NET_SHAPER_ID_UNSPEC;
922 
923 		if (!new_node && !net_shaper_lookup(binding, &node->handle)) {
924 			/* The related attribute is not available when
925 			 * reaching here from the delete() op.
926 			 */
927 			NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists",
928 					   node->handle.scope, node->handle.id);
929 			return -ENOENT;
930 		}
931 
932 		/* When unspecified, the node parent scope is inherited from
933 		 * the leaves.
934 		 */
935 		if (node->parent.scope == NET_SHAPER_SCOPE_UNSPEC) {
936 			ret = net_shaper_parent_from_leaves(leaves_count,
937 							    leaves, node,
938 							    extack);
939 			if (ret)
940 				return ret;
941 		}
942 
943 	} else {
944 		net_shaper_default_parent(&node->handle, &node->parent);
945 	}
946 
947 	if (node->parent.scope == NET_SHAPER_SCOPE_NODE) {
948 		parent = net_shaper_lookup(binding, &node->parent);
949 		if (!parent) {
950 			NL_SET_ERR_MSG_FMT(extack, "Node parent shaper %d:%d does not exists",
951 					   node->parent.scope, node->parent.id);
952 			return -ENOENT;
953 		}
954 
955 		ret = net_shaper_validate_nesting(binding, node, extack);
956 		if (ret < 0)
957 			return ret;
958 	}
959 
960 	if (update_node) {
961 		/* For newly created node scope shaper, the following will
962 		 * update the handle, due to id allocation.
963 		 */
964 		ret = net_shaper_pre_insert(binding, &node->handle, extack);
965 		if (ret)
966 			return ret;
967 	}
968 
969 	for (i = 0; i < leaves_count; ++i) {
970 		leaf_handle = leaves[i].handle;
971 
972 		ret = net_shaper_pre_insert(binding, &leaf_handle, extack);
973 		if (ret)
974 			goto rollback;
975 
976 		if (!net_shaper_handle_cmp(&leaves[i].parent, &node->handle))
977 			continue;
978 
979 		/* The leaves shapers will be nested to the node, update the
980 		 * linking accordingly.
981 		 */
982 		leaves[i].parent = node->handle;
983 		node->leaves++;
984 	}
985 
986 	ret = ops->group(binding, leaves_count, leaves, node, extack);
987 	if (ret < 0)
988 		goto rollback;
989 
990 	/* The node's parent gains a new leaf only when the node itself
991 	 * is created by this group operation
992 	 */
993 	if (new_node && parent)
994 		parent->leaves++;
995 	if (update_node)
996 		net_shaper_commit(binding, 1, node);
997 	net_shaper_commit(binding, leaves_count, leaves);
998 	return 0;
999 
1000 rollback:
1001 	net_shaper_rollback(binding);
1002 	return ret;
1003 }
1004 
net_shaper_pre_del_node(struct net_shaper_binding * binding,const struct net_shaper * shaper,struct netlink_ext_ack * extack)1005 static int net_shaper_pre_del_node(struct net_shaper_binding *binding,
1006 				   const struct net_shaper *shaper,
1007 				   struct netlink_ext_ack *extack)
1008 {
1009 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
1010 	struct net_shaper *cur, *leaves, node = {};
1011 	int ret, leaves_count = 0;
1012 	unsigned long index;
1013 	bool update_node;
1014 
1015 	if (!shaper->leaves)
1016 		return 0;
1017 
1018 	/* Fetch the new node information. */
1019 	node.handle = shaper->parent;
1020 	cur = net_shaper_lookup(binding, &node.handle);
1021 	if (cur) {
1022 		node = *cur;
1023 	} else {
1024 		/* A scope NODE shaper can be nested only to the NETDEV scope
1025 		 * shaper without creating the latter, this check may fail only
1026 		 * if the data is in inconsistent status.
1027 		 */
1028 		if (WARN_ON_ONCE(node.handle.scope != NET_SHAPER_SCOPE_NETDEV))
1029 			return -EINVAL;
1030 	}
1031 
1032 	leaves = kzalloc_objs(struct net_shaper, shaper->leaves);
1033 	if (!leaves)
1034 		return -ENOMEM;
1035 
1036 	/* Build the leaves arrays. */
1037 	xa_for_each(&hierarchy->shapers, index, cur) {
1038 		if (net_shaper_handle_cmp(&cur->parent, &shaper->handle))
1039 			continue;
1040 
1041 		if (WARN_ON_ONCE(leaves_count == shaper->leaves)) {
1042 			ret = -EINVAL;
1043 			goto free;
1044 		}
1045 
1046 		leaves[leaves_count++] = *cur;
1047 	}
1048 
1049 	/* When re-linking to the netdev shaper, avoid the eventual, implicit,
1050 	 * creation of the new node, would be surprising since the user is
1051 	 * doing a delete operation.
1052 	 */
1053 	update_node = node.handle.scope != NET_SHAPER_SCOPE_NETDEV;
1054 	ret = __net_shaper_group(binding, update_node, leaves_count,
1055 				 leaves, &node, extack);
1056 
1057 free:
1058 	kfree(leaves);
1059 	return ret;
1060 }
1061 
net_shaper_nl_delete_doit(struct sk_buff * skb,struct genl_info * info)1062 int net_shaper_nl_delete_doit(struct sk_buff *skb, struct genl_info *info)
1063 {
1064 	struct net_shaper_hierarchy *hierarchy;
1065 	struct net_shaper_binding *binding;
1066 	struct net_shaper_handle handle;
1067 	struct net_shaper *shaper;
1068 	int ret;
1069 
1070 	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
1071 		return -EINVAL;
1072 
1073 	binding = net_shaper_binding_from_ctx(info->ctx);
1074 
1075 	net_shaper_lock(binding);
1076 	ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
1077 				      &handle);
1078 	if (ret)
1079 		goto unlock;
1080 
1081 	hierarchy = net_shaper_hierarchy(binding);
1082 	if (!hierarchy) {
1083 		ret = -ENOENT;
1084 		goto unlock;
1085 	}
1086 
1087 	shaper = net_shaper_lookup(binding, &handle);
1088 	if (!shaper) {
1089 		ret = -ENOENT;
1090 		goto unlock;
1091 	}
1092 
1093 	if (handle.scope == NET_SHAPER_SCOPE_NODE) {
1094 		ret = net_shaper_pre_del_node(binding, shaper, info->extack);
1095 		if (ret)
1096 			goto unlock;
1097 	}
1098 
1099 	ret = __net_shaper_delete(binding, shaper, info->extack);
1100 
1101 unlock:
1102 	net_shaper_unlock(binding);
1103 	return ret;
1104 }
1105 
net_shaper_group_send_reply(struct net_shaper_binding * binding,const struct net_shaper_handle * handle,struct genl_info * info,struct sk_buff * msg)1106 static int net_shaper_group_send_reply(struct net_shaper_binding *binding,
1107 				       const struct net_shaper_handle *handle,
1108 				       struct genl_info *info,
1109 				       struct sk_buff *msg)
1110 {
1111 	void *hdr;
1112 
1113 	hdr = genlmsg_iput(msg, info);
1114 	if (!hdr)
1115 		goto free_msg;
1116 
1117 	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
1118 	    net_shaper_fill_handle(msg, handle, NET_SHAPER_A_HANDLE))
1119 		goto free_msg;
1120 
1121 	genlmsg_end(msg, hdr);
1122 
1123 	return genlmsg_reply(msg, info);
1124 
1125 free_msg:
1126 	/* Should never happen as msg is pre-allocated with enough space. */
1127 	WARN_ONCE(true, "calculated message payload length (%d)",
1128 		  net_shaper_handle_size());
1129 	nlmsg_free(msg);
1130 	return -EMSGSIZE;
1131 }
1132 
net_shaper_nl_group_doit(struct sk_buff * skb,struct genl_info * info)1133 int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info)
1134 {
1135 	struct net_shaper **old_nodes, *leaves, node = {};
1136 	struct net_shaper_hierarchy *hierarchy;
1137 	struct net_shaper_binding *binding;
1138 	int i, ret, rem, leaves_count;
1139 	int old_nodes_count = 0;
1140 	struct sk_buff *msg;
1141 	struct nlattr *attr;
1142 
1143 	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_LEAVES))
1144 		return -EINVAL;
1145 
1146 	binding = net_shaper_binding_from_ctx(info->ctx);
1147 
1148 	/* The group operation is optional. */
1149 	if (!net_shaper_ops(binding)->group)
1150 		return -EOPNOTSUPP;
1151 
1152 	net_shaper_lock(binding);
1153 	leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES);
1154 	if (!leaves_count) {
1155 		NL_SET_BAD_ATTR(info->extack,
1156 				info->attrs[NET_SHAPER_A_LEAVES]);
1157 		ret = -EINVAL;
1158 		goto unlock;
1159 	}
1160 
1161 	leaves = kcalloc(leaves_count, sizeof(struct net_shaper) +
1162 			 sizeof(struct net_shaper *), GFP_KERNEL);
1163 	if (!leaves) {
1164 		ret = -ENOMEM;
1165 		goto unlock;
1166 	}
1167 	old_nodes = (void *)&leaves[leaves_count];
1168 
1169 	ret = net_shaper_parse_node(binding, info->attrs, info, &node);
1170 	if (ret)
1171 		goto free_leaves;
1172 
1173 	i = 0;
1174 	nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES,
1175 			       genlmsg_data(info->genlhdr),
1176 			       genlmsg_len(info->genlhdr), rem) {
1177 		if (WARN_ON_ONCE(i >= leaves_count))
1178 			goto free_leaves;
1179 
1180 		ret = net_shaper_parse_leaf(binding, attr, info,
1181 					    &node, &leaves[i]);
1182 		if (ret)
1183 			goto free_leaves;
1184 		i++;
1185 	}
1186 
1187 	/* Prepare the msg reply in advance, to avoid device operation
1188 	 * rollback on allocation failure.
1189 	 */
1190 	msg = genlmsg_new(net_shaper_handle_size(), GFP_KERNEL);
1191 	if (!msg)
1192 		goto free_leaves;
1193 
1194 	hierarchy = net_shaper_hierarchy_setup(binding);
1195 	if (!hierarchy) {
1196 		ret = -ENOMEM;
1197 		goto free_msg;
1198 	}
1199 
1200 	/* Record the node shapers that this group() operation can make
1201 	 * childless for later cleanup.
1202 	 */
1203 	for (i = 0; i < leaves_count; i++) {
1204 		if (leaves[i].parent.scope == NET_SHAPER_SCOPE_NODE &&
1205 		    net_shaper_handle_cmp(&leaves[i].parent, &node.handle)) {
1206 			struct net_shaper *tmp;
1207 
1208 			tmp = net_shaper_lookup(binding, &leaves[i].parent);
1209 			if (!tmp)
1210 				continue;
1211 
1212 			old_nodes[old_nodes_count++] = tmp;
1213 		}
1214 	}
1215 
1216 	ret = __net_shaper_group(binding, true, leaves_count, leaves, &node,
1217 				 info->extack);
1218 	if (ret)
1219 		goto free_msg;
1220 
1221 	/* Check if we need to delete any node left alone by the new leaves
1222 	 * linkage.
1223 	 */
1224 	for (i = 0; i < old_nodes_count; ++i) {
1225 		struct net_shaper *tmp = old_nodes[i];
1226 
1227 		if (--tmp->leaves > 0)
1228 			continue;
1229 
1230 		/* Errors here are not fatal: the grouping operation is
1231 		 * completed, and user-space can still explicitly clean-up
1232 		 * left-over nodes.
1233 		 */
1234 		__net_shaper_delete(binding, tmp, info->extack);
1235 	}
1236 
1237 	ret = net_shaper_group_send_reply(binding, &node.handle, info, msg);
1238 	if (ret)
1239 		GENL_SET_ERR_MSG_FMT(info, "Can't send reply");
1240 
1241 free_leaves:
1242 	kfree(leaves);
1243 
1244 unlock:
1245 	net_shaper_unlock(binding);
1246 	return ret;
1247 
1248 free_msg:
1249 	kfree_skb(msg);
1250 	goto free_leaves;
1251 }
1252 
1253 static int
net_shaper_cap_fill_one(struct sk_buff * msg,struct net_shaper_binding * binding,enum net_shaper_scope scope,unsigned long flags,const struct genl_info * info)1254 net_shaper_cap_fill_one(struct sk_buff *msg,
1255 			struct net_shaper_binding *binding,
1256 			enum net_shaper_scope scope, unsigned long flags,
1257 			const struct genl_info *info)
1258 {
1259 	unsigned long cur;
1260 	void *hdr;
1261 
1262 	hdr = genlmsg_iput(msg, info);
1263 	if (!hdr)
1264 		return -EMSGSIZE;
1265 
1266 	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_CAPS_IFINDEX) ||
1267 	    nla_put_u32(msg, NET_SHAPER_A_CAPS_SCOPE, scope))
1268 		goto nla_put_failure;
1269 
1270 	for (cur = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS;
1271 	     cur <= NET_SHAPER_A_CAPS_MAX; ++cur) {
1272 		if (flags & BIT(cur) && nla_put_flag(msg, cur))
1273 			goto nla_put_failure;
1274 	}
1275 
1276 	genlmsg_end(msg, hdr);
1277 
1278 	return 0;
1279 
1280 nla_put_failure:
1281 	genlmsg_cancel(msg, hdr);
1282 	return -EMSGSIZE;
1283 }
1284 
net_shaper_nl_cap_get_doit(struct sk_buff * skb,struct genl_info * info)1285 int net_shaper_nl_cap_get_doit(struct sk_buff *skb, struct genl_info *info)
1286 {
1287 	struct net_shaper_binding *binding;
1288 	const struct net_shaper_ops *ops;
1289 	enum net_shaper_scope scope;
1290 	unsigned long flags = 0;
1291 	struct sk_buff *msg;
1292 	int ret;
1293 
1294 	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_CAPS_SCOPE))
1295 		return -EINVAL;
1296 
1297 	binding = net_shaper_binding_from_ctx(info->ctx);
1298 	scope = nla_get_u32(info->attrs[NET_SHAPER_A_CAPS_SCOPE]);
1299 	ops = net_shaper_ops(binding);
1300 	ops->capabilities(binding, scope, &flags);
1301 	if (!flags)
1302 		return -EOPNOTSUPP;
1303 
1304 	msg = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1305 	if (!msg)
1306 		return -ENOMEM;
1307 
1308 	ret = net_shaper_cap_fill_one(msg, binding, scope, flags, info);
1309 	if (ret)
1310 		goto free_msg;
1311 
1312 	return genlmsg_reply(msg, info);
1313 
1314 free_msg:
1315 	nlmsg_free(msg);
1316 	return ret;
1317 }
1318 
net_shaper_nl_cap_get_dumpit(struct sk_buff * skb,struct netlink_callback * cb)1319 int net_shaper_nl_cap_get_dumpit(struct sk_buff *skb,
1320 				 struct netlink_callback *cb)
1321 {
1322 	const struct genl_info *info = genl_info_dump(cb);
1323 	struct net_shaper_binding *binding;
1324 	const struct net_shaper_ops *ops;
1325 	enum net_shaper_scope scope;
1326 	int ret;
1327 
1328 	binding = net_shaper_binding_from_ctx(cb->ctx);
1329 	ops = net_shaper_ops(binding);
1330 	for (scope = 0; scope <= NET_SHAPER_SCOPE_MAX; ++scope) {
1331 		unsigned long flags = 0;
1332 
1333 		ops->capabilities(binding, scope, &flags);
1334 		if (!flags)
1335 			continue;
1336 
1337 		ret = net_shaper_cap_fill_one(skb, binding, scope, flags,
1338 					      info);
1339 		if (ret)
1340 			return ret;
1341 	}
1342 
1343 	return 0;
1344 }
1345 
net_shaper_flush(struct net_shaper_binding * binding)1346 static void net_shaper_flush(struct net_shaper_binding *binding)
1347 {
1348 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
1349 	struct net_shaper *cur;
1350 	unsigned long index;
1351 
1352 	if (!hierarchy)
1353 		return;
1354 
1355 	net_shaper_lock(binding);
1356 	xa_lock(&hierarchy->shapers);
1357 	xa_for_each(&hierarchy->shapers, index, cur) {
1358 		__xa_erase(&hierarchy->shapers, index);
1359 		kfree(cur);
1360 	}
1361 	xa_unlock(&hierarchy->shapers);
1362 	net_shaper_unlock(binding);
1363 
1364 	kfree(hierarchy);
1365 }
1366 
net_shaper_flush_netdev(struct net_device * dev)1367 void net_shaper_flush_netdev(struct net_device *dev)
1368 {
1369 	struct net_shaper_binding binding = {
1370 		.type = NET_SHAPER_BINDING_TYPE_NETDEV,
1371 		.netdev = dev,
1372 	};
1373 
1374 	net_shaper_flush(&binding);
1375 }
1376 
net_shaper_set_real_num_tx_queues(struct net_device * dev,unsigned int txq)1377 void net_shaper_set_real_num_tx_queues(struct net_device *dev,
1378 				       unsigned int txq)
1379 {
1380 	struct net_shaper_hierarchy *hierarchy;
1381 	struct net_shaper_binding binding;
1382 	int i;
1383 
1384 	binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
1385 	binding.netdev = dev;
1386 	hierarchy = net_shaper_hierarchy(&binding);
1387 	if (!hierarchy)
1388 		return;
1389 
1390 	/* Only drivers implementing shapers support ensure
1391 	 * the lock is acquired in advance.
1392 	 */
1393 	netdev_assert_locked(dev);
1394 
1395 	/* Take action only when decreasing the tx queue number. */
1396 	for (i = txq; i < dev->real_num_tx_queues; ++i) {
1397 		struct net_shaper_handle handle, parent_handle;
1398 		struct net_shaper *shaper;
1399 		u32 index;
1400 
1401 		handle.scope = NET_SHAPER_SCOPE_QUEUE;
1402 		handle.id = i;
1403 		shaper = net_shaper_lookup(&binding, &handle);
1404 		if (!shaper)
1405 			continue;
1406 
1407 		/* Don't touch the H/W for the queue shaper, the drivers already
1408 		 * deleted the queue and related resources.
1409 		 */
1410 		parent_handle = shaper->parent;
1411 		index = net_shaper_handle_to_index(&handle);
1412 		xa_erase(&hierarchy->shapers, index);
1413 		kfree_rcu(shaper, rcu);
1414 
1415 		/* The recursion on parent does the full job. */
1416 		if (parent_handle.scope != NET_SHAPER_SCOPE_NODE)
1417 			continue;
1418 
1419 		shaper = net_shaper_lookup(&binding, &parent_handle);
1420 		if (shaper && !--shaper->leaves)
1421 			__net_shaper_delete(&binding, shaper, NULL);
1422 	}
1423 }
1424 
shaper_init(void)1425 static int __init shaper_init(void)
1426 {
1427 	return genl_register_family(&net_shaper_nl_family);
1428 }
1429 
1430 subsys_initcall(shaper_init);
1431