xref: /linux/net/shaper/shaper.c (revision 5d5d4700e75d861e83bf18eb6bf66ff90f85fe4e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/bits.h>
4 #include <linux/bitfield.h>
5 #include <linux/idr.h>
6 #include <linux/kernel.h>
7 #include <linux/netdevice.h>
8 #include <linux/netlink.h>
9 #include <linux/skbuff.h>
10 #include <linux/xarray.h>
11 #include <net/devlink.h>
12 #include <net/net_shaper.h>
13 
14 #include "shaper_nl_gen.h"
15 
16 #include "../core/dev.h"
17 
18 #define NET_SHAPER_SCOPE_SHIFT	26
19 #define NET_SHAPER_ID_MASK	GENMASK(NET_SHAPER_SCOPE_SHIFT - 1, 0)
20 #define NET_SHAPER_SCOPE_MASK	GENMASK(31, NET_SHAPER_SCOPE_SHIFT)
21 
22 #define NET_SHAPER_ID_UNSPEC NET_SHAPER_ID_MASK
23 
24 struct net_shaper_hierarchy {
25 	struct xarray shapers;
26 };
27 
28 struct net_shaper_nl_ctx {
29 	struct net_shaper_binding binding;
30 	netdevice_tracker dev_tracker;
31 	unsigned long start_index;
32 };
33 
34 static struct net_shaper_binding *net_shaper_binding_from_ctx(void *ctx)
35 {
36 	return &((struct net_shaper_nl_ctx *)ctx)->binding;
37 }
38 
39 static void net_shaper_lock(struct net_shaper_binding *binding)
40 {
41 	switch (binding->type) {
42 	case NET_SHAPER_BINDING_TYPE_NETDEV:
43 		mutex_lock(&binding->netdev->lock);
44 		break;
45 	}
46 }
47 
48 static void net_shaper_unlock(struct net_shaper_binding *binding)
49 {
50 	switch (binding->type) {
51 	case NET_SHAPER_BINDING_TYPE_NETDEV:
52 		mutex_unlock(&binding->netdev->lock);
53 		break;
54 	}
55 }
56 
57 static struct net_shaper_hierarchy *
58 net_shaper_hierarchy(struct net_shaper_binding *binding)
59 {
60 	/* Pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. */
61 	if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
62 		return READ_ONCE(binding->netdev->net_shaper_hierarchy);
63 
64 	/* No other type supported yet. */
65 	return NULL;
66 }
67 
68 static const struct net_shaper_ops *
69 net_shaper_ops(struct net_shaper_binding *binding)
70 {
71 	if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
72 		return binding->netdev->netdev_ops->net_shaper_ops;
73 
74 	/* No other type supported yet. */
75 	return NULL;
76 }
77 
78 /* Count the number of [multi] attributes of the given type. */
79 static int net_shaper_list_len(struct genl_info *info, int type)
80 {
81 	struct nlattr *attr;
82 	int rem, cnt = 0;
83 
84 	nla_for_each_attr_type(attr, type, genlmsg_data(info->genlhdr),
85 			       genlmsg_len(info->genlhdr), rem)
86 		cnt++;
87 	return cnt;
88 }
89 
90 static int net_shaper_handle_size(void)
91 {
92 	return nla_total_size(nla_total_size(sizeof(u32)) +
93 			      nla_total_size(sizeof(u32)));
94 }
95 
96 static int net_shaper_fill_binding(struct sk_buff *msg,
97 				   const struct net_shaper_binding *binding,
98 				   u32 type)
99 {
100 	/* Should never happen, as currently only NETDEV is supported. */
101 	if (WARN_ON_ONCE(binding->type != NET_SHAPER_BINDING_TYPE_NETDEV))
102 		return -EINVAL;
103 
104 	if (nla_put_u32(msg, type, binding->netdev->ifindex))
105 		return -EMSGSIZE;
106 
107 	return 0;
108 }
109 
110 static int net_shaper_fill_handle(struct sk_buff *msg,
111 				  const struct net_shaper_handle *handle,
112 				  u32 type)
113 {
114 	struct nlattr *handle_attr;
115 
116 	if (handle->scope == NET_SHAPER_SCOPE_UNSPEC)
117 		return 0;
118 
119 	handle_attr = nla_nest_start(msg, type);
120 	if (!handle_attr)
121 		return -EMSGSIZE;
122 
123 	if (nla_put_u32(msg, NET_SHAPER_A_HANDLE_SCOPE, handle->scope) ||
124 	    (handle->scope >= NET_SHAPER_SCOPE_QUEUE &&
125 	     nla_put_u32(msg, NET_SHAPER_A_HANDLE_ID, handle->id)))
126 		goto handle_nest_cancel;
127 
128 	nla_nest_end(msg, handle_attr);
129 	return 0;
130 
131 handle_nest_cancel:
132 	nla_nest_cancel(msg, handle_attr);
133 	return -EMSGSIZE;
134 }
135 
136 static int
137 net_shaper_fill_one(struct sk_buff *msg,
138 		    const struct net_shaper_binding *binding,
139 		    const struct net_shaper *shaper,
140 		    const struct genl_info *info)
141 {
142 	void *hdr;
143 
144 	hdr = genlmsg_iput(msg, info);
145 	if (!hdr)
146 		return -EMSGSIZE;
147 
148 	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
149 	    net_shaper_fill_handle(msg, &shaper->parent,
150 				   NET_SHAPER_A_PARENT) ||
151 	    net_shaper_fill_handle(msg, &shaper->handle,
152 				   NET_SHAPER_A_HANDLE) ||
153 	    ((shaper->bw_min || shaper->bw_max || shaper->burst) &&
154 	     nla_put_u32(msg, NET_SHAPER_A_METRIC, shaper->metric)) ||
155 	    (shaper->bw_min &&
156 	     nla_put_uint(msg, NET_SHAPER_A_BW_MIN, shaper->bw_min)) ||
157 	    (shaper->bw_max &&
158 	     nla_put_uint(msg, NET_SHAPER_A_BW_MAX, shaper->bw_max)) ||
159 	    (shaper->burst &&
160 	     nla_put_uint(msg, NET_SHAPER_A_BURST, shaper->burst)) ||
161 	    (shaper->priority &&
162 	     nla_put_u32(msg, NET_SHAPER_A_PRIORITY, shaper->priority)) ||
163 	    (shaper->weight &&
164 	     nla_put_u32(msg, NET_SHAPER_A_WEIGHT, shaper->weight)))
165 		goto nla_put_failure;
166 
167 	genlmsg_end(msg, hdr);
168 
169 	return 0;
170 
171 nla_put_failure:
172 	genlmsg_cancel(msg, hdr);
173 	return -EMSGSIZE;
174 }
175 
176 /* Initialize the context fetching the relevant device and
177  * acquiring a reference to it.
178  */
179 static int net_shaper_ctx_setup(const struct genl_info *info, int type,
180 				struct net_shaper_nl_ctx *ctx)
181 {
182 	struct net *ns = genl_info_net(info);
183 	struct net_device *dev;
184 	int ifindex;
185 
186 	if (GENL_REQ_ATTR_CHECK(info, type))
187 		return -EINVAL;
188 
189 	ifindex = nla_get_u32(info->attrs[type]);
190 	dev = netdev_get_by_index(ns, ifindex, &ctx->dev_tracker, GFP_KERNEL);
191 	if (!dev) {
192 		NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
193 		return -ENOENT;
194 	}
195 
196 	if (!dev->netdev_ops->net_shaper_ops) {
197 		NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
198 		netdev_put(dev, &ctx->dev_tracker);
199 		return -EOPNOTSUPP;
200 	}
201 
202 	ctx->binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
203 	ctx->binding.netdev = dev;
204 	return 0;
205 }
206 
207 static void net_shaper_ctx_cleanup(struct net_shaper_nl_ctx *ctx)
208 {
209 	if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV)
210 		netdev_put(ctx->binding.netdev, &ctx->dev_tracker);
211 }
212 
213 static u32 net_shaper_handle_to_index(const struct net_shaper_handle *handle)
214 {
215 	return FIELD_PREP(NET_SHAPER_SCOPE_MASK, handle->scope) |
216 		FIELD_PREP(NET_SHAPER_ID_MASK, handle->id);
217 }
218 
219 static void net_shaper_index_to_handle(u32 index,
220 				       struct net_shaper_handle *handle)
221 {
222 	handle->scope = FIELD_GET(NET_SHAPER_SCOPE_MASK, index);
223 	handle->id = FIELD_GET(NET_SHAPER_ID_MASK, index);
224 }
225 
226 static void net_shaper_default_parent(const struct net_shaper_handle *handle,
227 				      struct net_shaper_handle *parent)
228 {
229 	switch (handle->scope) {
230 	case NET_SHAPER_SCOPE_UNSPEC:
231 	case NET_SHAPER_SCOPE_NETDEV:
232 	case __NET_SHAPER_SCOPE_MAX:
233 		parent->scope = NET_SHAPER_SCOPE_UNSPEC;
234 		break;
235 
236 	case NET_SHAPER_SCOPE_QUEUE:
237 	case NET_SHAPER_SCOPE_NODE:
238 		parent->scope = NET_SHAPER_SCOPE_NETDEV;
239 		break;
240 	}
241 	parent->id = 0;
242 }
243 
244 /*
245  * MARK_0 is already in use due to XA_FLAGS_ALLOC, can't reuse such flag as
246  * it's cleared by xa_store().
247  */
248 #define NET_SHAPER_NOT_VALID XA_MARK_1
249 
250 static struct net_shaper *
251 net_shaper_lookup(struct net_shaper_binding *binding,
252 		  const struct net_shaper_handle *handle)
253 {
254 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
255 	u32 index = net_shaper_handle_to_index(handle);
256 
257 	if (!hierarchy || xa_get_mark(&hierarchy->shapers, index,
258 				      NET_SHAPER_NOT_VALID))
259 		return NULL;
260 
261 	return xa_load(&hierarchy->shapers, index);
262 }
263 
264 /* Allocate on demand the per device shaper's hierarchy container.
265  * Called under the net shaper lock
266  */
267 static struct net_shaper_hierarchy *
268 net_shaper_hierarchy_setup(struct net_shaper_binding *binding)
269 {
270 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
271 
272 	if (hierarchy)
273 		return hierarchy;
274 
275 	hierarchy = kmalloc(sizeof(*hierarchy), GFP_KERNEL);
276 	if (!hierarchy)
277 		return NULL;
278 
279 	/* The flag is required for ID allocation */
280 	xa_init_flags(&hierarchy->shapers, XA_FLAGS_ALLOC);
281 
282 	switch (binding->type) {
283 	case NET_SHAPER_BINDING_TYPE_NETDEV:
284 		/* Pairs with READ_ONCE in net_shaper_hierarchy. */
285 		WRITE_ONCE(binding->netdev->net_shaper_hierarchy, hierarchy);
286 		break;
287 	}
288 	return hierarchy;
289 }
290 
291 /* Prepare the hierarchy container to actually insert the given shaper, doing
292  * in advance the needed allocations.
293  */
294 static int net_shaper_pre_insert(struct net_shaper_binding *binding,
295 				 struct net_shaper_handle *handle,
296 				 struct netlink_ext_ack *extack)
297 {
298 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
299 	struct net_shaper *prev, *cur;
300 	bool id_allocated = false;
301 	int ret, index;
302 
303 	if (!hierarchy)
304 		return -ENOMEM;
305 
306 	index = net_shaper_handle_to_index(handle);
307 	cur = xa_load(&hierarchy->shapers, index);
308 	if (cur)
309 		return 0;
310 
311 	/* Allocated a new id, if needed. */
312 	if (handle->scope == NET_SHAPER_SCOPE_NODE &&
313 	    handle->id == NET_SHAPER_ID_UNSPEC) {
314 		u32 min, max;
315 
316 		handle->id = NET_SHAPER_ID_MASK - 1;
317 		max = net_shaper_handle_to_index(handle);
318 		handle->id = 0;
319 		min = net_shaper_handle_to_index(handle);
320 
321 		ret = xa_alloc(&hierarchy->shapers, &index, NULL,
322 			       XA_LIMIT(min, max), GFP_KERNEL);
323 		if (ret < 0) {
324 			NL_SET_ERR_MSG(extack, "Can't allocate new id for NODE shaper");
325 			return ret;
326 		}
327 
328 		net_shaper_index_to_handle(index, handle);
329 		id_allocated = true;
330 	}
331 
332 	cur = kzalloc(sizeof(*cur), GFP_KERNEL);
333 	if (!cur) {
334 		ret = -ENOMEM;
335 		goto free_id;
336 	}
337 
338 	/* Mark 'tentative' shaper inside the hierarchy container.
339 	 * xa_set_mark is a no-op if the previous store fails.
340 	 */
341 	xa_lock(&hierarchy->shapers);
342 	prev = __xa_store(&hierarchy->shapers, index, cur, GFP_KERNEL);
343 	__xa_set_mark(&hierarchy->shapers, index, NET_SHAPER_NOT_VALID);
344 	xa_unlock(&hierarchy->shapers);
345 	if (xa_err(prev)) {
346 		NL_SET_ERR_MSG(extack, "Can't insert shaper into device store");
347 		kfree_rcu(cur, rcu);
348 		ret = xa_err(prev);
349 		goto free_id;
350 	}
351 	return 0;
352 
353 free_id:
354 	if (id_allocated)
355 		xa_erase(&hierarchy->shapers, index);
356 	return ret;
357 }
358 
359 /* Commit the tentative insert with the actual values.
360  * Must be called only after a successful net_shaper_pre_insert().
361  */
362 static void net_shaper_commit(struct net_shaper_binding *binding,
363 			      int nr_shapers, const struct net_shaper *shapers)
364 {
365 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
366 	struct net_shaper *cur;
367 	int index;
368 	int i;
369 
370 	xa_lock(&hierarchy->shapers);
371 	for (i = 0; i < nr_shapers; ++i) {
372 		index = net_shaper_handle_to_index(&shapers[i].handle);
373 
374 		cur = xa_load(&hierarchy->shapers, index);
375 		if (WARN_ON_ONCE(!cur))
376 			continue;
377 
378 		/* Successful update: drop the tentative mark
379 		 * and update the hierarchy container.
380 		 */
381 		__xa_clear_mark(&hierarchy->shapers, index,
382 				NET_SHAPER_NOT_VALID);
383 		*cur = shapers[i];
384 	}
385 	xa_unlock(&hierarchy->shapers);
386 }
387 
388 /* Rollback all the tentative inserts from the hierarchy. */
389 static void net_shaper_rollback(struct net_shaper_binding *binding)
390 {
391 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
392 	struct net_shaper *cur;
393 	unsigned long index;
394 
395 	if (!hierarchy)
396 		return;
397 
398 	xa_lock(&hierarchy->shapers);
399 	xa_for_each_marked(&hierarchy->shapers, index, cur,
400 			   NET_SHAPER_NOT_VALID) {
401 		__xa_erase(&hierarchy->shapers, index);
402 		kfree(cur);
403 	}
404 	xa_unlock(&hierarchy->shapers);
405 }
406 
407 static int net_shaper_parse_handle(const struct nlattr *attr,
408 				   const struct genl_info *info,
409 				   struct net_shaper_handle *handle)
410 {
411 	struct nlattr *tb[NET_SHAPER_A_HANDLE_MAX + 1];
412 	struct nlattr *id_attr;
413 	u32 id = 0;
414 	int ret;
415 
416 	ret = nla_parse_nested(tb, NET_SHAPER_A_HANDLE_MAX, attr,
417 			       net_shaper_handle_nl_policy, info->extack);
418 	if (ret < 0)
419 		return ret;
420 
421 	if (NL_REQ_ATTR_CHECK(info->extack, attr, tb,
422 			      NET_SHAPER_A_HANDLE_SCOPE))
423 		return -EINVAL;
424 
425 	handle->scope = nla_get_u32(tb[NET_SHAPER_A_HANDLE_SCOPE]);
426 
427 	/* The default id for NODE scope shapers is an invalid one
428 	 * to help the 'group' operation discriminate between new
429 	 * NODE shaper creation (ID_UNSPEC) and reuse of existing
430 	 * shaper (any other value).
431 	 */
432 	id_attr = tb[NET_SHAPER_A_HANDLE_ID];
433 	if (id_attr)
434 		id = nla_get_u32(id_attr);
435 	else if (handle->scope == NET_SHAPER_SCOPE_NODE)
436 		id = NET_SHAPER_ID_UNSPEC;
437 
438 	handle->id = id;
439 	return 0;
440 }
441 
442 static int net_shaper_parse_info(struct net_shaper_binding *binding,
443 				 struct nlattr **tb,
444 				 const struct genl_info *info,
445 				 struct net_shaper *shaper,
446 				 bool *exists)
447 {
448 	struct net_shaper *old;
449 	int ret;
450 
451 	/* The shaper handle is the only mandatory attribute. */
452 	if (NL_REQ_ATTR_CHECK(info->extack, NULL, tb, NET_SHAPER_A_HANDLE))
453 		return -EINVAL;
454 
455 	ret = net_shaper_parse_handle(tb[NET_SHAPER_A_HANDLE], info,
456 				      &shaper->handle);
457 	if (ret)
458 		return ret;
459 
460 	if (shaper->handle.scope == NET_SHAPER_SCOPE_UNSPEC) {
461 		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
462 		return -EINVAL;
463 	}
464 
465 	/* Fetch existing hierarchy, if any, so that user provide info will
466 	 * incrementally update the existing shaper configuration.
467 	 */
468 	old = net_shaper_lookup(binding, &shaper->handle);
469 	if (old)
470 		*shaper = *old;
471 	*exists = !!old;
472 
473 	if (tb[NET_SHAPER_A_METRIC])
474 		shaper->metric = nla_get_u32(tb[NET_SHAPER_A_METRIC]);
475 
476 	if (tb[NET_SHAPER_A_BW_MIN])
477 		shaper->bw_min = nla_get_uint(tb[NET_SHAPER_A_BW_MIN]);
478 
479 	if (tb[NET_SHAPER_A_BW_MAX])
480 		shaper->bw_max = nla_get_uint(tb[NET_SHAPER_A_BW_MAX]);
481 
482 	if (tb[NET_SHAPER_A_BURST])
483 		shaper->burst = nla_get_uint(tb[NET_SHAPER_A_BURST]);
484 
485 	if (tb[NET_SHAPER_A_PRIORITY])
486 		shaper->priority = nla_get_u32(tb[NET_SHAPER_A_PRIORITY]);
487 
488 	if (tb[NET_SHAPER_A_WEIGHT])
489 		shaper->weight = nla_get_u32(tb[NET_SHAPER_A_WEIGHT]);
490 	return 0;
491 }
492 
493 /* Fetch the existing leaf and update it with the user-provided
494  * attributes.
495  */
496 static int net_shaper_parse_leaf(struct net_shaper_binding *binding,
497 				 const struct nlattr *attr,
498 				 const struct genl_info *info,
499 				 const struct net_shaper *node,
500 				 struct net_shaper *shaper)
501 {
502 	struct nlattr *tb[NET_SHAPER_A_WEIGHT + 1];
503 	bool exists;
504 	int ret;
505 
506 	ret = nla_parse_nested(tb, NET_SHAPER_A_WEIGHT, attr,
507 			       net_shaper_leaf_info_nl_policy, info->extack);
508 	if (ret < 0)
509 		return ret;
510 
511 	ret = net_shaper_parse_info(binding, tb, info, shaper, &exists);
512 	if (ret < 0)
513 		return ret;
514 
515 	if (shaper->handle.scope != NET_SHAPER_SCOPE_QUEUE) {
516 		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
517 		return -EINVAL;
518 	}
519 
520 	if (!exists)
521 		net_shaper_default_parent(&shaper->handle, &shaper->parent);
522 	return 0;
523 }
524 
525 /* Alike net_parse_shaper_info(), but additionally allow the user specifying
526  * the shaper's parent handle.
527  */
528 static int net_shaper_parse_node(struct net_shaper_binding *binding,
529 				 struct nlattr **tb,
530 				 const struct genl_info *info,
531 				 struct net_shaper *shaper)
532 {
533 	bool exists;
534 	int ret;
535 
536 	ret = net_shaper_parse_info(binding, tb, info, shaper, &exists);
537 	if (ret)
538 		return ret;
539 
540 	if (shaper->handle.scope != NET_SHAPER_SCOPE_NODE &&
541 	    shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
542 		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
543 		return -EINVAL;
544 	}
545 
546 	if (tb[NET_SHAPER_A_PARENT]) {
547 		ret = net_shaper_parse_handle(tb[NET_SHAPER_A_PARENT], info,
548 					      &shaper->parent);
549 		if (ret)
550 			return ret;
551 
552 		if (shaper->parent.scope != NET_SHAPER_SCOPE_NODE &&
553 		    shaper->parent.scope != NET_SHAPER_SCOPE_NETDEV) {
554 			NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_PARENT]);
555 			return -EINVAL;
556 		}
557 	}
558 	return 0;
559 }
560 
561 static int net_shaper_generic_pre(struct genl_info *info, int type)
562 {
563 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)info->ctx;
564 
565 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(info->ctx));
566 
567 	return net_shaper_ctx_setup(info, type, ctx);
568 }
569 
570 int net_shaper_nl_pre_doit(const struct genl_split_ops *ops,
571 			   struct sk_buff *skb, struct genl_info *info)
572 {
573 	return net_shaper_generic_pre(info, NET_SHAPER_A_IFINDEX);
574 }
575 
576 static void net_shaper_generic_post(struct genl_info *info)
577 {
578 	net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)info->ctx);
579 }
580 
581 void net_shaper_nl_post_doit(const struct genl_split_ops *ops,
582 			     struct sk_buff *skb, struct genl_info *info)
583 {
584 	net_shaper_generic_post(info);
585 }
586 
587 int net_shaper_nl_pre_dumpit(struct netlink_callback *cb)
588 {
589 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
590 	const struct genl_info *info = genl_info_dump(cb);
591 
592 	return net_shaper_ctx_setup(info, NET_SHAPER_A_IFINDEX, ctx);
593 }
594 
595 int net_shaper_nl_post_dumpit(struct netlink_callback *cb)
596 {
597 	net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)cb->ctx);
598 	return 0;
599 }
600 
601 int net_shaper_nl_get_doit(struct sk_buff *skb, struct genl_info *info)
602 {
603 	struct net_shaper_binding *binding;
604 	struct net_shaper_handle handle;
605 	struct net_shaper *shaper;
606 	struct sk_buff *msg;
607 	int ret;
608 
609 	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
610 		return -EINVAL;
611 
612 	binding = net_shaper_binding_from_ctx(info->ctx);
613 	ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
614 				      &handle);
615 	if (ret < 0)
616 		return ret;
617 
618 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
619 	if (!msg)
620 		return -ENOMEM;
621 
622 	rcu_read_lock();
623 	shaper = net_shaper_lookup(binding, &handle);
624 	if (!shaper) {
625 		NL_SET_BAD_ATTR(info->extack,
626 				info->attrs[NET_SHAPER_A_HANDLE]);
627 		rcu_read_unlock();
628 		ret = -ENOENT;
629 		goto free_msg;
630 	}
631 
632 	ret = net_shaper_fill_one(msg, binding, shaper, info);
633 	rcu_read_unlock();
634 	if (ret)
635 		goto free_msg;
636 
637 	ret = genlmsg_reply(msg, info);
638 	if (ret)
639 		goto free_msg;
640 
641 	return 0;
642 
643 free_msg:
644 	nlmsg_free(msg);
645 	return ret;
646 }
647 
648 int net_shaper_nl_get_dumpit(struct sk_buff *skb,
649 			     struct netlink_callback *cb)
650 {
651 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
652 	const struct genl_info *info = genl_info_dump(cb);
653 	struct net_shaper_hierarchy *hierarchy;
654 	struct net_shaper_binding *binding;
655 	struct net_shaper *shaper;
656 	int ret = 0;
657 
658 	/* Don't error out dumps performed before any set operation. */
659 	binding = net_shaper_binding_from_ctx(ctx);
660 	hierarchy = net_shaper_hierarchy(binding);
661 	if (!hierarchy)
662 		return 0;
663 
664 	rcu_read_lock();
665 	for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index,
666 				 U32_MAX, XA_PRESENT)); ctx->start_index++) {
667 		ret = net_shaper_fill_one(skb, binding, shaper, info);
668 		if (ret)
669 			break;
670 	}
671 	rcu_read_unlock();
672 
673 	return ret;
674 }
675 
676 int net_shaper_nl_set_doit(struct sk_buff *skb, struct genl_info *info)
677 {
678 	struct net_shaper_hierarchy *hierarchy;
679 	struct net_shaper_binding *binding;
680 	const struct net_shaper_ops *ops;
681 	struct net_shaper_handle handle;
682 	struct net_shaper shaper = {};
683 	bool exists;
684 	int ret;
685 
686 	binding = net_shaper_binding_from_ctx(info->ctx);
687 
688 	net_shaper_lock(binding);
689 	ret = net_shaper_parse_info(binding, info->attrs, info, &shaper,
690 				    &exists);
691 	if (ret)
692 		goto unlock;
693 
694 	if (!exists)
695 		net_shaper_default_parent(&shaper.handle, &shaper.parent);
696 
697 	hierarchy = net_shaper_hierarchy_setup(binding);
698 	if (!hierarchy) {
699 		ret = -ENOMEM;
700 		goto unlock;
701 	}
702 
703 	/* The 'set' operation can't create node-scope shapers. */
704 	handle = shaper.handle;
705 	if (handle.scope == NET_SHAPER_SCOPE_NODE &&
706 	    !net_shaper_lookup(binding, &handle)) {
707 		ret = -ENOENT;
708 		goto unlock;
709 	}
710 
711 	ret = net_shaper_pre_insert(binding, &handle, info->extack);
712 	if (ret)
713 		goto unlock;
714 
715 	ops = net_shaper_ops(binding);
716 	ret = ops->set(binding, &shaper, info->extack);
717 	if (ret) {
718 		net_shaper_rollback(binding);
719 		goto unlock;
720 	}
721 
722 	net_shaper_commit(binding, 1, &shaper);
723 
724 unlock:
725 	net_shaper_unlock(binding);
726 	return ret;
727 }
728 
729 static int __net_shaper_delete(struct net_shaper_binding *binding,
730 			       struct net_shaper *shaper,
731 			       struct netlink_ext_ack *extack)
732 {
733 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
734 	struct net_shaper_handle parent_handle, handle = shaper->handle;
735 	const struct net_shaper_ops *ops = net_shaper_ops(binding);
736 	int ret;
737 
738 again:
739 	parent_handle = shaper->parent;
740 
741 	ret = ops->delete(binding, &handle, extack);
742 	if (ret < 0)
743 		return ret;
744 
745 	xa_erase(&hierarchy->shapers, net_shaper_handle_to_index(&handle));
746 	kfree_rcu(shaper, rcu);
747 
748 	/* Eventually delete the parent, if it is left over with no leaves. */
749 	if (parent_handle.scope == NET_SHAPER_SCOPE_NODE) {
750 		shaper = net_shaper_lookup(binding, &parent_handle);
751 		if (shaper && !--shaper->leaves) {
752 			handle = parent_handle;
753 			goto again;
754 		}
755 	}
756 	return 0;
757 }
758 
759 static int net_shaper_handle_cmp(const struct net_shaper_handle *a,
760 				 const struct net_shaper_handle *b)
761 {
762 	/* Must avoid holes in struct net_shaper_handle. */
763 	BUILD_BUG_ON(sizeof(*a) != 8);
764 
765 	return memcmp(a, b, sizeof(*a));
766 }
767 
768 static int net_shaper_parent_from_leaves(int leaves_count,
769 					 const struct net_shaper *leaves,
770 					 struct net_shaper *node,
771 					 struct netlink_ext_ack *extack)
772 {
773 	struct net_shaper_handle parent = leaves[0].parent;
774 	int i;
775 
776 	for (i = 1; i < leaves_count; ++i) {
777 		if (net_shaper_handle_cmp(&leaves[i].parent, &parent)) {
778 			NL_SET_ERR_MSG_FMT(extack, "All the leaves shapers must have the same old parent");
779 			return -EINVAL;
780 		}
781 	}
782 
783 	node->parent = parent;
784 	return 0;
785 }
786 
787 static int __net_shaper_group(struct net_shaper_binding *binding,
788 			      int leaves_count, struct net_shaper *leaves,
789 			      struct net_shaper *node,
790 			      struct netlink_ext_ack *extack)
791 {
792 	const struct net_shaper_ops *ops = net_shaper_ops(binding);
793 	struct net_shaper_handle leaf_handle;
794 	struct net_shaper *parent = NULL;
795 	bool new_node = false;
796 	int i, ret;
797 
798 	if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
799 		new_node = node->handle.id == NET_SHAPER_ID_UNSPEC;
800 
801 		if (!new_node && !net_shaper_lookup(binding, &node->handle)) {
802 			/* The related attribute is not available when
803 			 * reaching here from the delete() op.
804 			 */
805 			NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists",
806 					   node->handle.scope, node->handle.id);
807 			return -ENOENT;
808 		}
809 
810 		/* When unspecified, the node parent scope is inherited from
811 		 * the leaves.
812 		 */
813 		if (node->parent.scope == NET_SHAPER_SCOPE_UNSPEC) {
814 			ret = net_shaper_parent_from_leaves(leaves_count,
815 							    leaves, node,
816 							    extack);
817 			if (ret)
818 				return ret;
819 		}
820 
821 	} else {
822 		net_shaper_default_parent(&node->handle, &node->parent);
823 	}
824 
825 	if (node->parent.scope == NET_SHAPER_SCOPE_NODE) {
826 		parent = net_shaper_lookup(binding, &node->parent);
827 		if (!parent) {
828 			NL_SET_ERR_MSG_FMT(extack, "Node parent shaper %d:%d does not exists",
829 					   node->parent.scope, node->parent.id);
830 			return -ENOENT;
831 		}
832 	}
833 
834 	/* For newly created node scope shaper, the following will update
835 	 * the handle, due to id allocation.
836 	 */
837 	ret = net_shaper_pre_insert(binding, &node->handle, extack);
838 	if (ret)
839 		return ret;
840 
841 	for (i = 0; i < leaves_count; ++i) {
842 		leaf_handle = leaves[i].handle;
843 
844 		ret = net_shaper_pre_insert(binding, &leaf_handle, extack);
845 		if (ret)
846 			goto rollback;
847 
848 		if (!net_shaper_handle_cmp(&leaves[i].parent, &node->handle))
849 			continue;
850 
851 		/* The leaves shapers will be nested to the node, update the
852 		 * linking accordingly.
853 		 */
854 		leaves[i].parent = node->handle;
855 		node->leaves++;
856 	}
857 
858 	ret = ops->group(binding, leaves_count, leaves, node, extack);
859 	if (ret < 0)
860 		goto rollback;
861 
862 	/* The node's parent gains a new leaf only when the node itself
863 	 * is created by this group operation
864 	 */
865 	if (new_node && parent)
866 		parent->leaves++;
867 	net_shaper_commit(binding, 1, node);
868 	net_shaper_commit(binding, leaves_count, leaves);
869 	return 0;
870 
871 rollback:
872 	net_shaper_rollback(binding);
873 	return ret;
874 }
875 
876 int net_shaper_nl_delete_doit(struct sk_buff *skb, struct genl_info *info)
877 {
878 	struct net_shaper_hierarchy *hierarchy;
879 	struct net_shaper_binding *binding;
880 	struct net_shaper_handle handle;
881 	struct net_shaper *shaper;
882 	int ret;
883 
884 	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
885 		return -EINVAL;
886 
887 	binding = net_shaper_binding_from_ctx(info->ctx);
888 
889 	net_shaper_lock(binding);
890 	ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
891 				      &handle);
892 	if (ret)
893 		goto unlock;
894 
895 	hierarchy = net_shaper_hierarchy(binding);
896 	if (!hierarchy) {
897 		ret = -ENOENT;
898 		goto unlock;
899 	}
900 
901 	shaper = net_shaper_lookup(binding, &handle);
902 	if (!shaper) {
903 		ret = -ENOENT;
904 		goto unlock;
905 	}
906 
907 	if (handle.scope == NET_SHAPER_SCOPE_NODE) {
908 		/* TODO: implement support for scope NODE delete. */
909 		ret = -EINVAL;
910 		goto unlock;
911 	}
912 
913 	ret = __net_shaper_delete(binding, shaper, info->extack);
914 
915 unlock:
916 	net_shaper_unlock(binding);
917 	return ret;
918 }
919 
920 static int net_shaper_group_send_reply(struct net_shaper_binding *binding,
921 				       const struct net_shaper_handle *handle,
922 				       struct genl_info *info,
923 				       struct sk_buff *msg)
924 {
925 	void *hdr;
926 
927 	hdr = genlmsg_iput(msg, info);
928 	if (!hdr)
929 		goto free_msg;
930 
931 	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
932 	    net_shaper_fill_handle(msg, handle, NET_SHAPER_A_HANDLE))
933 		goto free_msg;
934 
935 	genlmsg_end(msg, hdr);
936 
937 	return genlmsg_reply(msg, info);
938 
939 free_msg:
940 	/* Should never happen as msg is pre-allocated with enough space. */
941 	WARN_ONCE(true, "calculated message payload length (%d)",
942 		  net_shaper_handle_size());
943 	nlmsg_free(msg);
944 	return -EMSGSIZE;
945 }
946 
947 int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info)
948 {
949 	struct net_shaper **old_nodes, *leaves, node = {};
950 	struct net_shaper_hierarchy *hierarchy;
951 	struct net_shaper_binding *binding;
952 	int i, ret, rem, leaves_count;
953 	int old_nodes_count = 0;
954 	struct sk_buff *msg;
955 	struct nlattr *attr;
956 
957 	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_LEAVES))
958 		return -EINVAL;
959 
960 	binding = net_shaper_binding_from_ctx(info->ctx);
961 
962 	/* The group operation is optional. */
963 	if (!net_shaper_ops(binding)->group)
964 		return -EOPNOTSUPP;
965 
966 	net_shaper_lock(binding);
967 	leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES);
968 	if (!leaves_count) {
969 		NL_SET_BAD_ATTR(info->extack,
970 				info->attrs[NET_SHAPER_A_LEAVES]);
971 		ret = -EINVAL;
972 		goto unlock;
973 	}
974 
975 	leaves = kcalloc(leaves_count, sizeof(struct net_shaper) +
976 			 sizeof(struct net_shaper *), GFP_KERNEL);
977 	if (!leaves) {
978 		ret = -ENOMEM;
979 		goto unlock;
980 	}
981 	old_nodes = (void *)&leaves[leaves_count];
982 
983 	ret = net_shaper_parse_node(binding, info->attrs, info, &node);
984 	if (ret)
985 		goto free_leaves;
986 
987 	i = 0;
988 	nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES,
989 			       genlmsg_data(info->genlhdr),
990 			       genlmsg_len(info->genlhdr), rem) {
991 		if (WARN_ON_ONCE(i >= leaves_count))
992 			goto free_leaves;
993 
994 		ret = net_shaper_parse_leaf(binding, attr, info,
995 					    &node, &leaves[i]);
996 		if (ret)
997 			goto free_leaves;
998 		i++;
999 	}
1000 
1001 	/* Prepare the msg reply in advance, to avoid device operation
1002 	 * rollback on allocation failure.
1003 	 */
1004 	msg = genlmsg_new(net_shaper_handle_size(), GFP_KERNEL);
1005 	if (!msg)
1006 		goto free_leaves;
1007 
1008 	hierarchy = net_shaper_hierarchy_setup(binding);
1009 	if (!hierarchy) {
1010 		ret = -ENOMEM;
1011 		goto free_msg;
1012 	}
1013 
1014 	/* Record the node shapers that this group() operation can make
1015 	 * childless for later cleanup.
1016 	 */
1017 	for (i = 0; i < leaves_count; i++) {
1018 		if (leaves[i].parent.scope == NET_SHAPER_SCOPE_NODE &&
1019 		    net_shaper_handle_cmp(&leaves[i].parent, &node.handle)) {
1020 			struct net_shaper *tmp;
1021 
1022 			tmp = net_shaper_lookup(binding, &leaves[i].parent);
1023 			if (!tmp)
1024 				continue;
1025 
1026 			old_nodes[old_nodes_count++] = tmp;
1027 		}
1028 	}
1029 
1030 	ret = __net_shaper_group(binding, leaves_count, leaves, &node,
1031 				 info->extack);
1032 	if (ret)
1033 		goto free_msg;
1034 
1035 	/* Check if we need to delete any node left alone by the new leaves
1036 	 * linkage.
1037 	 */
1038 	for (i = 0; i < old_nodes_count; ++i) {
1039 		struct net_shaper *tmp = old_nodes[i];
1040 
1041 		if (--tmp->leaves > 0)
1042 			continue;
1043 
1044 		/* Errors here are not fatal: the grouping operation is
1045 		 * completed, and user-space can still explicitly clean-up
1046 		 * left-over nodes.
1047 		 */
1048 		__net_shaper_delete(binding, tmp, info->extack);
1049 	}
1050 
1051 	ret = net_shaper_group_send_reply(binding, &node.handle, info, msg);
1052 	if (ret)
1053 		GENL_SET_ERR_MSG_FMT(info, "Can't send reply");
1054 
1055 free_leaves:
1056 	kfree(leaves);
1057 
1058 unlock:
1059 	net_shaper_unlock(binding);
1060 	return ret;
1061 
1062 free_msg:
1063 	kfree_skb(msg);
1064 	goto free_leaves;
1065 }
1066 
1067 static void net_shaper_flush(struct net_shaper_binding *binding)
1068 {
1069 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
1070 	struct net_shaper *cur;
1071 	unsigned long index;
1072 
1073 	if (!hierarchy)
1074 		return;
1075 
1076 	net_shaper_lock(binding);
1077 	xa_lock(&hierarchy->shapers);
1078 	xa_for_each(&hierarchy->shapers, index, cur) {
1079 		__xa_erase(&hierarchy->shapers, index);
1080 		kfree(cur);
1081 	}
1082 	xa_unlock(&hierarchy->shapers);
1083 	net_shaper_unlock(binding);
1084 
1085 	kfree(hierarchy);
1086 }
1087 
1088 void net_shaper_flush_netdev(struct net_device *dev)
1089 {
1090 	struct net_shaper_binding binding = {
1091 		.type = NET_SHAPER_BINDING_TYPE_NETDEV,
1092 		.netdev = dev,
1093 	};
1094 
1095 	net_shaper_flush(&binding);
1096 }
1097 
1098 static int __init shaper_init(void)
1099 {
1100 	return genl_register_family(&net_shaper_nl_family);
1101 }
1102 
1103 subsys_initcall(shaper_init);
1104