xref: /linux/net/shaper/shaper.c (revision a1d9d8e833781c44ab688708804ce35f20f3cbbd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/bits.h>
4 #include <linux/bitfield.h>
5 #include <linux/idr.h>
6 #include <linux/kernel.h>
7 #include <linux/netdevice.h>
8 #include <linux/netlink.h>
9 #include <linux/skbuff.h>
10 #include <linux/xarray.h>
11 #include <net/devlink.h>
12 #include <net/net_shaper.h>
13 
14 #include "shaper_nl_gen.h"
15 
16 #include "../core/dev.h"
17 
18 #define NET_SHAPER_SCOPE_SHIFT	26
19 #define NET_SHAPER_ID_MASK	GENMASK(NET_SHAPER_SCOPE_SHIFT - 1, 0)
20 #define NET_SHAPER_SCOPE_MASK	GENMASK(31, NET_SHAPER_SCOPE_SHIFT)
21 
22 #define NET_SHAPER_ID_UNSPEC NET_SHAPER_ID_MASK
23 
24 struct net_shaper_hierarchy {
25 	struct xarray shapers;
26 };
27 
28 struct net_shaper_nl_ctx {
29 	struct net_shaper_binding binding;
30 	netdevice_tracker dev_tracker;
31 	unsigned long start_index;
32 };
33 
net_shaper_binding_from_ctx(void * ctx)34 static struct net_shaper_binding *net_shaper_binding_from_ctx(void *ctx)
35 {
36 	return &((struct net_shaper_nl_ctx *)ctx)->binding;
37 }
38 
39 static struct net_shaper_hierarchy *
net_shaper_hierarchy(struct net_shaper_binding * binding)40 net_shaper_hierarchy(struct net_shaper_binding *binding)
41 {
42 	/* Pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. */
43 	if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
44 		return READ_ONCE(binding->netdev->net_shaper_hierarchy);
45 
46 	/* No other type supported yet. */
47 	return NULL;
48 }
49 
50 static struct net_shaper_hierarchy *
net_shaper_hierarchy_rcu(struct net_shaper_binding * binding)51 net_shaper_hierarchy_rcu(struct net_shaper_binding *binding)
52 {
53 	/* Readers look up the device and take a ref, then take RCU lock
54 	 * later at which point netdev may have been unregistered and flushed.
55 	 * READ_ONCE() pairs with WRITE_ONCE() in net_shaper_hierarchy_setup.
56 	 */
57 	if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV &&
58 	    READ_ONCE(binding->netdev->reg_state) <= NETREG_REGISTERED)
59 		return READ_ONCE(binding->netdev->net_shaper_hierarchy);
60 
61 	/* No other type supported yet. */
62 	return NULL;
63 }
64 
65 static const struct net_shaper_ops *
net_shaper_ops(struct net_shaper_binding * binding)66 net_shaper_ops(struct net_shaper_binding *binding)
67 {
68 	if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV)
69 		return binding->netdev->netdev_ops->net_shaper_ops;
70 
71 	/* No other type supported yet. */
72 	return NULL;
73 }
74 
75 /* Count the number of [multi] attributes of the given type. */
net_shaper_list_len(struct genl_info * info,int type)76 static int net_shaper_list_len(struct genl_info *info, int type)
77 {
78 	struct nlattr *attr;
79 	int rem, cnt = 0;
80 
81 	nla_for_each_attr_type(attr, type, genlmsg_data(info->genlhdr),
82 			       genlmsg_len(info->genlhdr), rem)
83 		cnt++;
84 	return cnt;
85 }
86 
net_shaper_handle_size(void)87 static int net_shaper_handle_size(void)
88 {
89 	return nla_total_size(nla_total_size(sizeof(u32)) +
90 			      nla_total_size(sizeof(u32)));
91 }
92 
net_shaper_fill_binding(struct sk_buff * msg,const struct net_shaper_binding * binding,u32 type)93 static int net_shaper_fill_binding(struct sk_buff *msg,
94 				   const struct net_shaper_binding *binding,
95 				   u32 type)
96 {
97 	/* Should never happen, as currently only NETDEV is supported. */
98 	if (WARN_ON_ONCE(binding->type != NET_SHAPER_BINDING_TYPE_NETDEV))
99 		return -EINVAL;
100 
101 	if (nla_put_u32(msg, type, binding->netdev->ifindex))
102 		return -EMSGSIZE;
103 
104 	return 0;
105 }
106 
net_shaper_fill_handle(struct sk_buff * msg,const struct net_shaper_handle * handle,u32 type)107 static int net_shaper_fill_handle(struct sk_buff *msg,
108 				  const struct net_shaper_handle *handle,
109 				  u32 type)
110 {
111 	struct nlattr *handle_attr;
112 
113 	if (handle->scope == NET_SHAPER_SCOPE_UNSPEC)
114 		return 0;
115 
116 	handle_attr = nla_nest_start(msg, type);
117 	if (!handle_attr)
118 		return -EMSGSIZE;
119 
120 	if (nla_put_u32(msg, NET_SHAPER_A_HANDLE_SCOPE, handle->scope) ||
121 	    (handle->scope >= NET_SHAPER_SCOPE_QUEUE &&
122 	     nla_put_u32(msg, NET_SHAPER_A_HANDLE_ID, handle->id)))
123 		goto handle_nest_cancel;
124 
125 	nla_nest_end(msg, handle_attr);
126 	return 0;
127 
128 handle_nest_cancel:
129 	nla_nest_cancel(msg, handle_attr);
130 	return -EMSGSIZE;
131 }
132 
133 static int
net_shaper_fill_one(struct sk_buff * msg,const struct net_shaper_binding * binding,const struct net_shaper * shaper,const struct genl_info * info)134 net_shaper_fill_one(struct sk_buff *msg,
135 		    const struct net_shaper_binding *binding,
136 		    const struct net_shaper *shaper,
137 		    const struct genl_info *info)
138 {
139 	void *hdr;
140 
141 	hdr = genlmsg_iput(msg, info);
142 	if (!hdr)
143 		return -EMSGSIZE;
144 
145 	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
146 	    net_shaper_fill_handle(msg, &shaper->parent,
147 				   NET_SHAPER_A_PARENT) ||
148 	    net_shaper_fill_handle(msg, &shaper->handle,
149 				   NET_SHAPER_A_HANDLE) ||
150 	    ((shaper->bw_min || shaper->bw_max || shaper->burst) &&
151 	     nla_put_u32(msg, NET_SHAPER_A_METRIC, shaper->metric)) ||
152 	    (shaper->bw_min &&
153 	     nla_put_uint(msg, NET_SHAPER_A_BW_MIN, shaper->bw_min)) ||
154 	    (shaper->bw_max &&
155 	     nla_put_uint(msg, NET_SHAPER_A_BW_MAX, shaper->bw_max)) ||
156 	    (shaper->burst &&
157 	     nla_put_uint(msg, NET_SHAPER_A_BURST, shaper->burst)) ||
158 	    (shaper->priority &&
159 	     nla_put_u32(msg, NET_SHAPER_A_PRIORITY, shaper->priority)) ||
160 	    (shaper->weight &&
161 	     nla_put_u32(msg, NET_SHAPER_A_WEIGHT, shaper->weight)))
162 		goto nla_put_failure;
163 
164 	genlmsg_end(msg, hdr);
165 
166 	return 0;
167 
168 nla_put_failure:
169 	genlmsg_cancel(msg, hdr);
170 	return -EMSGSIZE;
171 }
172 
173 /* Initialize the context fetching the relevant device and
174  * acquiring a reference to it.
175  */
net_shaper_ctx_setup(const struct genl_info * info,int type,struct net_shaper_nl_ctx * ctx)176 static int net_shaper_ctx_setup(const struct genl_info *info, int type,
177 				struct net_shaper_nl_ctx *ctx)
178 {
179 	struct net *ns = genl_info_net(info);
180 	struct net_device *dev;
181 	int ifindex;
182 
183 	if (GENL_REQ_ATTR_CHECK(info, type))
184 		return -EINVAL;
185 
186 	ifindex = nla_get_u32(info->attrs[type]);
187 	dev = netdev_get_by_index(ns, ifindex, &ctx->dev_tracker, GFP_KERNEL);
188 	if (!dev) {
189 		NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
190 		return -ENOENT;
191 	}
192 
193 	if (!dev->netdev_ops->net_shaper_ops) {
194 		NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
195 		netdev_put(dev, &ctx->dev_tracker);
196 		return -EOPNOTSUPP;
197 	}
198 
199 	ctx->binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
200 	ctx->binding.netdev = dev;
201 	return 0;
202 }
203 
204 /* Like net_shaper_ctx_setup(), but for "write" handlers (never for dumps!)
205  * Acquires the lock protecting the hierarchy (instance lock for netdev).
206  */
net_shaper_ctx_setup_lock(const struct genl_info * info,int type,struct net_shaper_nl_ctx * ctx)207 static int net_shaper_ctx_setup_lock(const struct genl_info *info, int type,
208 				     struct net_shaper_nl_ctx *ctx)
209 {
210 	struct net *ns = genl_info_net(info);
211 	struct net_device *dev;
212 	int ifindex;
213 
214 	if (GENL_REQ_ATTR_CHECK(info, type))
215 		return -EINVAL;
216 
217 	ifindex = nla_get_u32(info->attrs[type]);
218 	dev = netdev_get_by_index_lock(ns, ifindex);
219 	if (!dev) {
220 		NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
221 		return -ENOENT;
222 	}
223 
224 	if (!dev->netdev_ops->net_shaper_ops) {
225 		NL_SET_BAD_ATTR(info->extack, info->attrs[type]);
226 		netdev_unlock(dev);
227 		return -EOPNOTSUPP;
228 	}
229 
230 	ctx->binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
231 	ctx->binding.netdev = dev;
232 	return 0;
233 }
234 
net_shaper_ctx_cleanup(struct net_shaper_nl_ctx * ctx)235 static void net_shaper_ctx_cleanup(struct net_shaper_nl_ctx *ctx)
236 {
237 	if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV)
238 		netdev_put(ctx->binding.netdev, &ctx->dev_tracker);
239 }
240 
net_shaper_ctx_cleanup_unlock(struct net_shaper_nl_ctx * ctx)241 static void net_shaper_ctx_cleanup_unlock(struct net_shaper_nl_ctx *ctx)
242 {
243 	if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV)
244 		netdev_unlock(ctx->binding.netdev);
245 }
246 
net_shaper_handle_to_index(const struct net_shaper_handle * handle)247 static u32 net_shaper_handle_to_index(const struct net_shaper_handle *handle)
248 {
249 	return FIELD_PREP(NET_SHAPER_SCOPE_MASK, handle->scope) |
250 		FIELD_PREP(NET_SHAPER_ID_MASK, handle->id);
251 }
252 
net_shaper_index_to_handle(u32 index,struct net_shaper_handle * handle)253 static void net_shaper_index_to_handle(u32 index,
254 				       struct net_shaper_handle *handle)
255 {
256 	handle->scope = FIELD_GET(NET_SHAPER_SCOPE_MASK, index);
257 	handle->id = FIELD_GET(NET_SHAPER_ID_MASK, index);
258 }
259 
net_shaper_default_parent(const struct net_shaper_handle * handle,struct net_shaper_handle * parent)260 static void net_shaper_default_parent(const struct net_shaper_handle *handle,
261 				      struct net_shaper_handle *parent)
262 {
263 	switch (handle->scope) {
264 	case NET_SHAPER_SCOPE_UNSPEC:
265 	case NET_SHAPER_SCOPE_NETDEV:
266 	case __NET_SHAPER_SCOPE_MAX:
267 		parent->scope = NET_SHAPER_SCOPE_UNSPEC;
268 		break;
269 
270 	case NET_SHAPER_SCOPE_QUEUE:
271 	case NET_SHAPER_SCOPE_NODE:
272 		parent->scope = NET_SHAPER_SCOPE_NETDEV;
273 		break;
274 	}
275 	parent->id = 0;
276 }
277 
278 /*
279  * MARK_0 is already in use due to XA_FLAGS_ALLOC, can't reuse such flag as
280  * it's cleared by xa_store().
281  */
282 #define NET_SHAPER_NOT_VALID XA_MARK_1
283 
284 static struct net_shaper *
net_shaper_lookup(struct net_shaper_binding * binding,const struct net_shaper_handle * handle)285 net_shaper_lookup(struct net_shaper_binding *binding,
286 		  const struct net_shaper_handle *handle)
287 {
288 	u32 index = net_shaper_handle_to_index(handle);
289 	struct net_shaper_hierarchy *hierarchy;
290 
291 	hierarchy = net_shaper_hierarchy_rcu(binding);
292 	if (!hierarchy || xa_get_mark(&hierarchy->shapers, index,
293 				      NET_SHAPER_NOT_VALID))
294 		return NULL;
295 
296 	return xa_load(&hierarchy->shapers, index);
297 }
298 
299 /* Allocate on demand the per device shaper's hierarchy container.
300  * Called under the lock protecting the hierarchy (instance lock for netdev)
301  */
302 static struct net_shaper_hierarchy *
net_shaper_hierarchy_setup(struct net_shaper_binding * binding)303 net_shaper_hierarchy_setup(struct net_shaper_binding *binding)
304 {
305 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
306 
307 	if (hierarchy)
308 		return hierarchy;
309 
310 	hierarchy = kmalloc_obj(*hierarchy);
311 	if (!hierarchy)
312 		return NULL;
313 
314 	/* The flag is required for ID allocation */
315 	xa_init_flags(&hierarchy->shapers, XA_FLAGS_ALLOC);
316 
317 	switch (binding->type) {
318 	case NET_SHAPER_BINDING_TYPE_NETDEV:
319 		/* Pairs with READ_ONCE in net_shaper_hierarchy. */
320 		WRITE_ONCE(binding->netdev->net_shaper_hierarchy, hierarchy);
321 		break;
322 	}
323 	return hierarchy;
324 }
325 
326 /* Prepare the hierarchy container to actually insert the given shaper, doing
327  * in advance the needed allocations.
328  */
net_shaper_pre_insert(struct net_shaper_binding * binding,struct net_shaper_handle * handle,struct netlink_ext_ack * extack)329 static int net_shaper_pre_insert(struct net_shaper_binding *binding,
330 				 struct net_shaper_handle *handle,
331 				 struct netlink_ext_ack *extack)
332 {
333 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
334 	struct net_shaper *prev, *cur;
335 	bool id_allocated = false;
336 	int ret, index;
337 
338 	if (!hierarchy)
339 		return -ENOMEM;
340 
341 	index = net_shaper_handle_to_index(handle);
342 	cur = xa_load(&hierarchy->shapers, index);
343 	if (cur)
344 		return 0;
345 
346 	/* Allocated a new id, if needed. */
347 	if (handle->scope == NET_SHAPER_SCOPE_NODE &&
348 	    handle->id == NET_SHAPER_ID_UNSPEC) {
349 		u32 min, max;
350 
351 		handle->id = NET_SHAPER_ID_MASK - 1;
352 		max = net_shaper_handle_to_index(handle);
353 		handle->id = 0;
354 		min = net_shaper_handle_to_index(handle);
355 
356 		ret = xa_alloc(&hierarchy->shapers, &index, NULL,
357 			       XA_LIMIT(min, max), GFP_KERNEL);
358 		if (ret < 0) {
359 			NL_SET_ERR_MSG(extack, "Can't allocate new id for NODE shaper");
360 			return ret;
361 		}
362 
363 		net_shaper_index_to_handle(index, handle);
364 		id_allocated = true;
365 	}
366 
367 	cur = kzalloc_obj(*cur);
368 	if (!cur) {
369 		ret = -ENOMEM;
370 		goto free_id;
371 	}
372 
373 	/* Mark 'tentative' shaper inside the hierarchy container.
374 	 * xa_set_mark is a no-op if the previous store fails.
375 	 */
376 	xa_lock(&hierarchy->shapers);
377 	prev = __xa_store(&hierarchy->shapers, index, cur, GFP_KERNEL);
378 	__xa_set_mark(&hierarchy->shapers, index, NET_SHAPER_NOT_VALID);
379 	xa_unlock(&hierarchy->shapers);
380 	if (xa_err(prev)) {
381 		NL_SET_ERR_MSG(extack, "Can't insert shaper into device store");
382 		kfree_rcu(cur, rcu);
383 		ret = xa_err(prev);
384 		goto free_id;
385 	}
386 	return 0;
387 
388 free_id:
389 	if (id_allocated)
390 		xa_erase(&hierarchy->shapers, index);
391 	return ret;
392 }
393 
394 /* Commit the tentative insert with the actual values.
395  * Must be called only after a successful net_shaper_pre_insert().
396  */
net_shaper_commit(struct net_shaper_binding * binding,int nr_shapers,const struct net_shaper * shapers)397 static void net_shaper_commit(struct net_shaper_binding *binding,
398 			      int nr_shapers, const struct net_shaper *shapers)
399 {
400 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
401 	struct net_shaper *cur;
402 	int index;
403 	int i;
404 
405 	xa_lock(&hierarchy->shapers);
406 	for (i = 0; i < nr_shapers; ++i) {
407 		index = net_shaper_handle_to_index(&shapers[i].handle);
408 
409 		cur = xa_load(&hierarchy->shapers, index);
410 		if (WARN_ON_ONCE(!cur))
411 			continue;
412 
413 		/* Successful update: drop the tentative mark
414 		 * and update the hierarchy container.
415 		 */
416 		__xa_clear_mark(&hierarchy->shapers, index,
417 				NET_SHAPER_NOT_VALID);
418 		*cur = shapers[i];
419 	}
420 	xa_unlock(&hierarchy->shapers);
421 }
422 
423 /* Rollback all the tentative inserts from the hierarchy. */
net_shaper_rollback(struct net_shaper_binding * binding)424 static void net_shaper_rollback(struct net_shaper_binding *binding)
425 {
426 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
427 	struct net_shaper *cur;
428 	unsigned long index;
429 
430 	if (!hierarchy)
431 		return;
432 
433 	xa_lock(&hierarchy->shapers);
434 	xa_for_each_marked(&hierarchy->shapers, index, cur,
435 			   NET_SHAPER_NOT_VALID) {
436 		__xa_erase(&hierarchy->shapers, index);
437 		kfree(cur);
438 	}
439 	xa_unlock(&hierarchy->shapers);
440 }
441 
net_shaper_parse_handle(const struct nlattr * attr,const struct genl_info * info,struct net_shaper_handle * handle)442 static int net_shaper_parse_handle(const struct nlattr *attr,
443 				   const struct genl_info *info,
444 				   struct net_shaper_handle *handle)
445 {
446 	struct nlattr *tb[NET_SHAPER_A_HANDLE_MAX + 1];
447 	struct nlattr *id_attr;
448 	u32 id = 0;
449 	int ret;
450 
451 	ret = nla_parse_nested(tb, NET_SHAPER_A_HANDLE_MAX, attr,
452 			       net_shaper_handle_nl_policy, info->extack);
453 	if (ret < 0)
454 		return ret;
455 
456 	if (NL_REQ_ATTR_CHECK(info->extack, attr, tb,
457 			      NET_SHAPER_A_HANDLE_SCOPE))
458 		return -EINVAL;
459 
460 	handle->scope = nla_get_u32(tb[NET_SHAPER_A_HANDLE_SCOPE]);
461 
462 	/* The default id for NODE scope shapers is an invalid one
463 	 * to help the 'group' operation discriminate between new
464 	 * NODE shaper creation (ID_UNSPEC) and reuse of existing
465 	 * shaper (any other value).
466 	 */
467 	id_attr = tb[NET_SHAPER_A_HANDLE_ID];
468 	if (id_attr)
469 		id = nla_get_u32(id_attr);
470 	else if (handle->scope == NET_SHAPER_SCOPE_NODE)
471 		id = NET_SHAPER_ID_UNSPEC;
472 
473 	handle->id = id;
474 	return 0;
475 }
476 
net_shaper_validate_caps(struct net_shaper_binding * binding,struct nlattr ** tb,const struct genl_info * info,struct net_shaper * shaper)477 static int net_shaper_validate_caps(struct net_shaper_binding *binding,
478 				    struct nlattr **tb,
479 				    const struct genl_info *info,
480 				    struct net_shaper *shaper)
481 {
482 	const struct net_shaper_ops *ops = net_shaper_ops(binding);
483 	struct nlattr *bad = NULL;
484 	unsigned long caps = 0;
485 
486 	ops->capabilities(binding, shaper->handle.scope, &caps);
487 
488 	if (tb[NET_SHAPER_A_PRIORITY] &&
489 	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_PRIORITY)))
490 		bad = tb[NET_SHAPER_A_PRIORITY];
491 	if (tb[NET_SHAPER_A_WEIGHT] &&
492 	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_WEIGHT)))
493 		bad = tb[NET_SHAPER_A_WEIGHT];
494 	if (tb[NET_SHAPER_A_BW_MIN] &&
495 	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN)))
496 		bad = tb[NET_SHAPER_A_BW_MIN];
497 	if (tb[NET_SHAPER_A_BW_MAX] &&
498 	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX)))
499 		bad = tb[NET_SHAPER_A_BW_MAX];
500 	if (tb[NET_SHAPER_A_BURST] &&
501 	    !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BURST)))
502 		bad = tb[NET_SHAPER_A_BURST];
503 
504 	if (!caps)
505 		bad = tb[NET_SHAPER_A_HANDLE];
506 
507 	if (bad) {
508 		NL_SET_BAD_ATTR(info->extack, bad);
509 		return -EOPNOTSUPP;
510 	}
511 
512 	if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE &&
513 	    binding->type == NET_SHAPER_BINDING_TYPE_NETDEV &&
514 	    shaper->handle.id >= binding->netdev->real_num_tx_queues) {
515 		NL_SET_ERR_MSG_FMT(info->extack,
516 				   "Not existing queue id %d max %d",
517 				   shaper->handle.id,
518 				   binding->netdev->real_num_tx_queues);
519 		return -ENOENT;
520 	}
521 
522 	/* The metric is really used only if there is *any* rate-related
523 	 * setting, either in current attributes set or in pre-existing
524 	 * values.
525 	 */
526 	if (shaper->burst || shaper->bw_min || shaper->bw_max) {
527 		u32 metric_cap = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS +
528 				 shaper->metric;
529 
530 		/* The metric test can fail even when the user did not
531 		 * specify the METRIC attribute. Pointing to rate related
532 		 * attribute will be confusing, as the attribute itself
533 		 * could be indeed supported, with a different metric.
534 		 * Be more specific.
535 		 */
536 		if (!(caps & BIT(metric_cap))) {
537 			NL_SET_ERR_MSG_FMT(info->extack, "Bad metric %d",
538 					   shaper->metric);
539 			return -EOPNOTSUPP;
540 		}
541 	}
542 	return 0;
543 }
544 
net_shaper_parse_info(struct net_shaper_binding * binding,struct nlattr ** tb,const struct genl_info * info,struct net_shaper * shaper,bool * exists)545 static int net_shaper_parse_info(struct net_shaper_binding *binding,
546 				 struct nlattr **tb,
547 				 const struct genl_info *info,
548 				 struct net_shaper *shaper,
549 				 bool *exists)
550 {
551 	struct net_shaper *old;
552 	int ret;
553 
554 	/* The shaper handle is the only mandatory attribute. */
555 	if (NL_REQ_ATTR_CHECK(info->extack, NULL, tb, NET_SHAPER_A_HANDLE))
556 		return -EINVAL;
557 
558 	ret = net_shaper_parse_handle(tb[NET_SHAPER_A_HANDLE], info,
559 				      &shaper->handle);
560 	if (ret)
561 		return ret;
562 
563 	if (shaper->handle.scope == NET_SHAPER_SCOPE_UNSPEC) {
564 		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
565 		return -EINVAL;
566 	}
567 
568 	/* Fetch existing hierarchy, if any, so that user provide info will
569 	 * incrementally update the existing shaper configuration.
570 	 */
571 	old = net_shaper_lookup(binding, &shaper->handle);
572 	if (old)
573 		*shaper = *old;
574 	*exists = !!old;
575 
576 	if (tb[NET_SHAPER_A_METRIC])
577 		shaper->metric = nla_get_u32(tb[NET_SHAPER_A_METRIC]);
578 
579 	if (tb[NET_SHAPER_A_BW_MIN])
580 		shaper->bw_min = nla_get_uint(tb[NET_SHAPER_A_BW_MIN]);
581 
582 	if (tb[NET_SHAPER_A_BW_MAX])
583 		shaper->bw_max = nla_get_uint(tb[NET_SHAPER_A_BW_MAX]);
584 
585 	if (tb[NET_SHAPER_A_BURST])
586 		shaper->burst = nla_get_uint(tb[NET_SHAPER_A_BURST]);
587 
588 	if (tb[NET_SHAPER_A_PRIORITY])
589 		shaper->priority = nla_get_u32(tb[NET_SHAPER_A_PRIORITY]);
590 
591 	if (tb[NET_SHAPER_A_WEIGHT])
592 		shaper->weight = nla_get_u32(tb[NET_SHAPER_A_WEIGHT]);
593 
594 	ret = net_shaper_validate_caps(binding, tb, info, shaper);
595 	if (ret < 0)
596 		return ret;
597 
598 	return 0;
599 }
600 
net_shaper_validate_nesting(struct net_shaper_binding * binding,const struct net_shaper * shaper,struct netlink_ext_ack * extack)601 static int net_shaper_validate_nesting(struct net_shaper_binding *binding,
602 				       const struct net_shaper *shaper,
603 				       struct netlink_ext_ack *extack)
604 {
605 	const struct net_shaper_ops *ops = net_shaper_ops(binding);
606 	unsigned long caps = 0;
607 
608 	ops->capabilities(binding, shaper->handle.scope, &caps);
609 	if (!(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_NESTING))) {
610 		NL_SET_ERR_MSG_FMT(extack,
611 				   "Nesting not supported for scope %d",
612 				   shaper->handle.scope);
613 		return -EOPNOTSUPP;
614 	}
615 	return 0;
616 }
617 
618 /* Fetch the existing leaf and update it with the user-provided
619  * attributes.
620  */
net_shaper_parse_leaf(struct net_shaper_binding * binding,const struct nlattr * attr,const struct genl_info * info,const struct net_shaper * node,struct net_shaper * shaper)621 static int net_shaper_parse_leaf(struct net_shaper_binding *binding,
622 				 const struct nlattr *attr,
623 				 const struct genl_info *info,
624 				 const struct net_shaper *node,
625 				 struct net_shaper *shaper)
626 {
627 	struct nlattr *tb[NET_SHAPER_A_WEIGHT + 1];
628 	bool exists;
629 	int ret;
630 
631 	ret = nla_parse_nested(tb, NET_SHAPER_A_WEIGHT, attr,
632 			       net_shaper_leaf_info_nl_policy, info->extack);
633 	if (ret < 0)
634 		return ret;
635 
636 	ret = net_shaper_parse_info(binding, tb, info, shaper, &exists);
637 	if (ret < 0)
638 		return ret;
639 
640 	if (shaper->handle.scope != NET_SHAPER_SCOPE_QUEUE) {
641 		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
642 		return -EINVAL;
643 	}
644 
645 	if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
646 		ret = net_shaper_validate_nesting(binding, shaper,
647 						  info->extack);
648 		if (ret < 0)
649 			return ret;
650 	}
651 
652 	if (!exists)
653 		net_shaper_default_parent(&shaper->handle, &shaper->parent);
654 	return 0;
655 }
656 
657 /* Alike net_parse_shaper_info(), but additionally allow the user specifying
658  * the shaper's parent handle.
659  */
net_shaper_parse_node(struct net_shaper_binding * binding,struct nlattr ** tb,const struct genl_info * info,struct net_shaper * shaper)660 static int net_shaper_parse_node(struct net_shaper_binding *binding,
661 				 struct nlattr **tb,
662 				 const struct genl_info *info,
663 				 struct net_shaper *shaper)
664 {
665 	bool exists;
666 	int ret;
667 
668 	ret = net_shaper_parse_info(binding, tb, info, shaper, &exists);
669 	if (ret)
670 		return ret;
671 
672 	if (shaper->handle.scope != NET_SHAPER_SCOPE_NODE &&
673 	    shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
674 		NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]);
675 		return -EINVAL;
676 	}
677 
678 	if (tb[NET_SHAPER_A_PARENT]) {
679 		ret = net_shaper_parse_handle(tb[NET_SHAPER_A_PARENT], info,
680 					      &shaper->parent);
681 		if (ret)
682 			return ret;
683 
684 		if (shaper->parent.scope != NET_SHAPER_SCOPE_NODE &&
685 		    shaper->parent.scope != NET_SHAPER_SCOPE_NETDEV) {
686 			NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_PARENT]);
687 			return -EINVAL;
688 		}
689 	}
690 	return 0;
691 }
692 
net_shaper_generic_pre(struct genl_info * info,int type)693 static int net_shaper_generic_pre(struct genl_info *info, int type)
694 {
695 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)info->ctx;
696 
697 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(info->ctx));
698 
699 	return net_shaper_ctx_setup(info, type, ctx);
700 }
701 
net_shaper_nl_pre_doit(const struct genl_split_ops * ops,struct sk_buff * skb,struct genl_info * info)702 int net_shaper_nl_pre_doit(const struct genl_split_ops *ops,
703 			   struct sk_buff *skb, struct genl_info *info)
704 {
705 	return net_shaper_generic_pre(info, NET_SHAPER_A_IFINDEX);
706 }
707 
net_shaper_generic_post(struct genl_info * info)708 static void net_shaper_generic_post(struct genl_info *info)
709 {
710 	net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)info->ctx);
711 }
712 
net_shaper_nl_post_doit(const struct genl_split_ops * ops,struct sk_buff * skb,struct genl_info * info)713 void net_shaper_nl_post_doit(const struct genl_split_ops *ops,
714 			     struct sk_buff *skb, struct genl_info *info)
715 {
716 	net_shaper_generic_post(info);
717 }
718 
net_shaper_nl_pre_doit_write(const struct genl_split_ops * ops,struct sk_buff * skb,struct genl_info * info)719 int net_shaper_nl_pre_doit_write(const struct genl_split_ops *ops,
720 				struct sk_buff *skb, struct genl_info *info)
721 {
722 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)info->ctx;
723 
724 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(info->ctx));
725 
726 	return net_shaper_ctx_setup_lock(info, NET_SHAPER_A_IFINDEX, ctx);
727 }
728 
net_shaper_nl_post_doit_write(const struct genl_split_ops * ops,struct sk_buff * skb,struct genl_info * info)729 void net_shaper_nl_post_doit_write(const struct genl_split_ops *ops,
730 				   struct sk_buff *skb, struct genl_info *info)
731 {
732 	net_shaper_ctx_cleanup_unlock((struct net_shaper_nl_ctx *)info->ctx);
733 }
734 
net_shaper_nl_pre_dumpit(struct netlink_callback * cb)735 int net_shaper_nl_pre_dumpit(struct netlink_callback *cb)
736 {
737 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
738 	const struct genl_info *info = genl_info_dump(cb);
739 
740 	return net_shaper_ctx_setup(info, NET_SHAPER_A_IFINDEX, ctx);
741 }
742 
net_shaper_nl_post_dumpit(struct netlink_callback * cb)743 int net_shaper_nl_post_dumpit(struct netlink_callback *cb)
744 {
745 	net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)cb->ctx);
746 	return 0;
747 }
748 
net_shaper_nl_cap_pre_doit(const struct genl_split_ops * ops,struct sk_buff * skb,struct genl_info * info)749 int net_shaper_nl_cap_pre_doit(const struct genl_split_ops *ops,
750 			       struct sk_buff *skb, struct genl_info *info)
751 {
752 	return net_shaper_generic_pre(info, NET_SHAPER_A_CAPS_IFINDEX);
753 }
754 
net_shaper_nl_cap_post_doit(const struct genl_split_ops * ops,struct sk_buff * skb,struct genl_info * info)755 void net_shaper_nl_cap_post_doit(const struct genl_split_ops *ops,
756 				 struct sk_buff *skb, struct genl_info *info)
757 {
758 	net_shaper_generic_post(info);
759 }
760 
net_shaper_nl_cap_pre_dumpit(struct netlink_callback * cb)761 int net_shaper_nl_cap_pre_dumpit(struct netlink_callback *cb)
762 {
763 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
764 
765 	return net_shaper_ctx_setup(genl_info_dump(cb),
766 				    NET_SHAPER_A_CAPS_IFINDEX, ctx);
767 }
768 
net_shaper_nl_cap_post_dumpit(struct netlink_callback * cb)769 int net_shaper_nl_cap_post_dumpit(struct netlink_callback *cb)
770 {
771 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
772 
773 	net_shaper_ctx_cleanup(ctx);
774 	return 0;
775 }
776 
net_shaper_nl_get_doit(struct sk_buff * skb,struct genl_info * info)777 int net_shaper_nl_get_doit(struct sk_buff *skb, struct genl_info *info)
778 {
779 	struct net_shaper_binding *binding;
780 	struct net_shaper_handle handle;
781 	struct net_shaper *shaper;
782 	struct sk_buff *msg;
783 	int ret;
784 
785 	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
786 		return -EINVAL;
787 
788 	binding = net_shaper_binding_from_ctx(info->ctx);
789 	ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
790 				      &handle);
791 	if (ret < 0)
792 		return ret;
793 
794 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
795 	if (!msg)
796 		return -ENOMEM;
797 
798 	rcu_read_lock();
799 	shaper = net_shaper_lookup(binding, &handle);
800 	if (!shaper) {
801 		NL_SET_BAD_ATTR(info->extack,
802 				info->attrs[NET_SHAPER_A_HANDLE]);
803 		rcu_read_unlock();
804 		ret = -ENOENT;
805 		goto free_msg;
806 	}
807 
808 	ret = net_shaper_fill_one(msg, binding, shaper, info);
809 	rcu_read_unlock();
810 	if (ret)
811 		goto free_msg;
812 
813 	return genlmsg_reply(msg, info);
814 
815 free_msg:
816 	nlmsg_free(msg);
817 	return ret;
818 }
819 
net_shaper_nl_get_dumpit(struct sk_buff * skb,struct netlink_callback * cb)820 int net_shaper_nl_get_dumpit(struct sk_buff *skb,
821 			     struct netlink_callback *cb)
822 {
823 	struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx;
824 	const struct genl_info *info = genl_info_dump(cb);
825 	struct net_shaper_hierarchy *hierarchy;
826 	struct net_shaper_binding *binding;
827 	struct net_shaper *shaper;
828 	int ret = 0;
829 
830 	/* Don't error out dumps performed before any set operation. */
831 	binding = net_shaper_binding_from_ctx(ctx);
832 
833 	rcu_read_lock();
834 	hierarchy = net_shaper_hierarchy_rcu(binding);
835 	if (!hierarchy)
836 		goto out_unlock;
837 
838 	for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index,
839 				 U32_MAX, XA_PRESENT)); ctx->start_index++) {
840 		ret = net_shaper_fill_one(skb, binding, shaper, info);
841 		if (ret)
842 			break;
843 	}
844 out_unlock:
845 	rcu_read_unlock();
846 
847 	return ret;
848 }
849 
net_shaper_nl_set_doit(struct sk_buff * skb,struct genl_info * info)850 int net_shaper_nl_set_doit(struct sk_buff *skb, struct genl_info *info)
851 {
852 	struct net_shaper_hierarchy *hierarchy;
853 	struct net_shaper_binding *binding;
854 	const struct net_shaper_ops *ops;
855 	struct net_shaper_handle handle;
856 	struct net_shaper shaper = {};
857 	bool exists;
858 	int ret;
859 
860 	binding = net_shaper_binding_from_ctx(info->ctx);
861 
862 	ret = net_shaper_parse_info(binding, info->attrs, info, &shaper,
863 				    &exists);
864 	if (ret)
865 		return ret;
866 
867 	if (!exists)
868 		net_shaper_default_parent(&shaper.handle, &shaper.parent);
869 
870 	hierarchy = net_shaper_hierarchy_setup(binding);
871 	if (!hierarchy)
872 		return -ENOMEM;
873 
874 	/* The 'set' operation can't create node-scope shapers. */
875 	handle = shaper.handle;
876 	if (handle.scope == NET_SHAPER_SCOPE_NODE &&
877 	    !net_shaper_lookup(binding, &handle))
878 		return -ENOENT;
879 
880 	ret = net_shaper_pre_insert(binding, &handle, info->extack);
881 	if (ret)
882 		return ret;
883 
884 	ops = net_shaper_ops(binding);
885 	ret = ops->set(binding, &shaper, info->extack);
886 	if (ret) {
887 		net_shaper_rollback(binding);
888 		return ret;
889 	}
890 
891 	net_shaper_commit(binding, 1, &shaper);
892 
893 	return 0;
894 }
895 
__net_shaper_delete(struct net_shaper_binding * binding,struct net_shaper * shaper,struct netlink_ext_ack * extack)896 static int __net_shaper_delete(struct net_shaper_binding *binding,
897 			       struct net_shaper *shaper,
898 			       struct netlink_ext_ack *extack)
899 {
900 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
901 	struct net_shaper_handle parent_handle, handle = shaper->handle;
902 	const struct net_shaper_ops *ops = net_shaper_ops(binding);
903 	int ret;
904 
905 again:
906 	parent_handle = shaper->parent;
907 
908 	ret = ops->delete(binding, &handle, extack);
909 	if (ret < 0)
910 		return ret;
911 
912 	xa_erase(&hierarchy->shapers, net_shaper_handle_to_index(&handle));
913 	kfree_rcu(shaper, rcu);
914 
915 	/* Eventually delete the parent, if it is left over with no leaves. */
916 	if (parent_handle.scope == NET_SHAPER_SCOPE_NODE) {
917 		shaper = net_shaper_lookup(binding, &parent_handle);
918 		if (shaper && !--shaper->leaves) {
919 			handle = parent_handle;
920 			goto again;
921 		}
922 	}
923 	return 0;
924 }
925 
net_shaper_handle_cmp(const struct net_shaper_handle * a,const struct net_shaper_handle * b)926 static int net_shaper_handle_cmp(const struct net_shaper_handle *a,
927 				 const struct net_shaper_handle *b)
928 {
929 	/* Must avoid holes in struct net_shaper_handle. */
930 	BUILD_BUG_ON(sizeof(*a) != 8);
931 
932 	return memcmp(a, b, sizeof(*a));
933 }
934 
net_shaper_parent_from_leaves(int leaves_count,const struct net_shaper * leaves,struct net_shaper * node,struct netlink_ext_ack * extack)935 static int net_shaper_parent_from_leaves(int leaves_count,
936 					 const struct net_shaper *leaves,
937 					 struct net_shaper *node,
938 					 struct netlink_ext_ack *extack)
939 {
940 	struct net_shaper_handle parent = leaves[0].parent;
941 	int i;
942 
943 	for (i = 1; i < leaves_count; ++i) {
944 		if (net_shaper_handle_cmp(&leaves[i].parent, &parent)) {
945 			NL_SET_ERR_MSG_FMT(extack, "All the leaves shapers must have the same old parent");
946 			return -EINVAL;
947 		}
948 	}
949 
950 	node->parent = parent;
951 	return 0;
952 }
953 
__net_shaper_group(struct net_shaper_binding * binding,bool update_node,int leaves_count,struct net_shaper * leaves,struct net_shaper * node,struct netlink_ext_ack * extack)954 static int __net_shaper_group(struct net_shaper_binding *binding,
955 			      bool update_node, int leaves_count,
956 			      struct net_shaper *leaves,
957 			      struct net_shaper *node,
958 			      struct netlink_ext_ack *extack)
959 {
960 	const struct net_shaper_ops *ops = net_shaper_ops(binding);
961 	struct net_shaper_handle leaf_handle;
962 	struct net_shaper *parent = NULL;
963 	bool new_node = false;
964 	int i, ret;
965 
966 	if (node->handle.scope == NET_SHAPER_SCOPE_NODE) {
967 		new_node = node->handle.id == NET_SHAPER_ID_UNSPEC;
968 
969 		if (!new_node && !net_shaper_lookup(binding, &node->handle)) {
970 			/* The related attribute is not available when
971 			 * reaching here from the delete() op.
972 			 */
973 			NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists",
974 					   node->handle.scope, node->handle.id);
975 			return -ENOENT;
976 		}
977 
978 		/* When unspecified, the node parent scope is inherited from
979 		 * the leaves.
980 		 */
981 		if (node->parent.scope == NET_SHAPER_SCOPE_UNSPEC) {
982 			ret = net_shaper_parent_from_leaves(leaves_count,
983 							    leaves, node,
984 							    extack);
985 			if (ret)
986 				return ret;
987 		}
988 
989 	} else {
990 		net_shaper_default_parent(&node->handle, &node->parent);
991 	}
992 
993 	if (node->parent.scope == NET_SHAPER_SCOPE_NODE) {
994 		parent = net_shaper_lookup(binding, &node->parent);
995 		if (!parent) {
996 			NL_SET_ERR_MSG_FMT(extack, "Node parent shaper %d:%d does not exists",
997 					   node->parent.scope, node->parent.id);
998 			return -ENOENT;
999 		}
1000 
1001 		ret = net_shaper_validate_nesting(binding, node, extack);
1002 		if (ret < 0)
1003 			return ret;
1004 	}
1005 
1006 	if (update_node) {
1007 		/* For newly created node scope shaper, the following will
1008 		 * update the handle, due to id allocation.
1009 		 */
1010 		ret = net_shaper_pre_insert(binding, &node->handle, extack);
1011 		if (ret)
1012 			return ret;
1013 	}
1014 
1015 	for (i = 0; i < leaves_count; ++i) {
1016 		leaf_handle = leaves[i].handle;
1017 
1018 		ret = net_shaper_pre_insert(binding, &leaf_handle, extack);
1019 		if (ret)
1020 			goto rollback;
1021 
1022 		if (!net_shaper_handle_cmp(&leaves[i].parent, &node->handle))
1023 			continue;
1024 
1025 		/* The leaves shapers will be nested to the node, update the
1026 		 * linking accordingly.
1027 		 */
1028 		leaves[i].parent = node->handle;
1029 		node->leaves++;
1030 	}
1031 
1032 	ret = ops->group(binding, leaves_count, leaves, node, extack);
1033 	if (ret < 0)
1034 		goto rollback;
1035 
1036 	/* The node's parent gains a new leaf only when the node itself
1037 	 * is created by this group operation
1038 	 */
1039 	if (new_node && parent)
1040 		parent->leaves++;
1041 	if (update_node)
1042 		net_shaper_commit(binding, 1, node);
1043 	net_shaper_commit(binding, leaves_count, leaves);
1044 	return 0;
1045 
1046 rollback:
1047 	net_shaper_rollback(binding);
1048 	return ret;
1049 }
1050 
net_shaper_pre_del_node(struct net_shaper_binding * binding,const struct net_shaper * shaper,struct netlink_ext_ack * extack)1051 static int net_shaper_pre_del_node(struct net_shaper_binding *binding,
1052 				   const struct net_shaper *shaper,
1053 				   struct netlink_ext_ack *extack)
1054 {
1055 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
1056 	struct net_shaper *cur, *leaves, node = {};
1057 	int ret, leaves_count = 0;
1058 	unsigned long index;
1059 	bool update_node;
1060 
1061 	if (!shaper->leaves)
1062 		return 0;
1063 
1064 	/* Fetch the new node information. */
1065 	node.handle = shaper->parent;
1066 	cur = net_shaper_lookup(binding, &node.handle);
1067 	if (cur) {
1068 		node = *cur;
1069 	} else {
1070 		/* A scope NODE shaper can be nested only to the NETDEV scope
1071 		 * shaper without creating the latter, this check may fail only
1072 		 * if the data is in inconsistent status.
1073 		 */
1074 		if (WARN_ON_ONCE(node.handle.scope != NET_SHAPER_SCOPE_NETDEV))
1075 			return -EINVAL;
1076 	}
1077 
1078 	leaves = kzalloc_objs(struct net_shaper, shaper->leaves);
1079 	if (!leaves)
1080 		return -ENOMEM;
1081 
1082 	/* Build the leaves arrays. */
1083 	xa_for_each(&hierarchy->shapers, index, cur) {
1084 		if (net_shaper_handle_cmp(&cur->parent, &shaper->handle))
1085 			continue;
1086 
1087 		if (WARN_ON_ONCE(leaves_count == shaper->leaves)) {
1088 			ret = -EINVAL;
1089 			goto free;
1090 		}
1091 
1092 		leaves[leaves_count++] = *cur;
1093 	}
1094 
1095 	/* When re-linking to the netdev shaper, avoid the eventual, implicit,
1096 	 * creation of the new node, would be surprising since the user is
1097 	 * doing a delete operation.
1098 	 */
1099 	update_node = node.handle.scope != NET_SHAPER_SCOPE_NETDEV;
1100 	ret = __net_shaper_group(binding, update_node, leaves_count,
1101 				 leaves, &node, extack);
1102 
1103 free:
1104 	kfree(leaves);
1105 	return ret;
1106 }
1107 
net_shaper_nl_delete_doit(struct sk_buff * skb,struct genl_info * info)1108 int net_shaper_nl_delete_doit(struct sk_buff *skb, struct genl_info *info)
1109 {
1110 	struct net_shaper_hierarchy *hierarchy;
1111 	struct net_shaper_binding *binding;
1112 	struct net_shaper_handle handle;
1113 	struct net_shaper *shaper;
1114 	int ret;
1115 
1116 	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE))
1117 		return -EINVAL;
1118 
1119 	binding = net_shaper_binding_from_ctx(info->ctx);
1120 
1121 	ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info,
1122 				      &handle);
1123 	if (ret)
1124 		return ret;
1125 
1126 	hierarchy = net_shaper_hierarchy(binding);
1127 	if (!hierarchy)
1128 		return -ENOENT;
1129 
1130 	shaper = net_shaper_lookup(binding, &handle);
1131 	if (!shaper)
1132 		return -ENOENT;
1133 
1134 	if (handle.scope == NET_SHAPER_SCOPE_NODE) {
1135 		ret = net_shaper_pre_del_node(binding, shaper, info->extack);
1136 		if (ret)
1137 			return ret;
1138 	}
1139 
1140 	return __net_shaper_delete(binding, shaper, info->extack);
1141 }
1142 
net_shaper_group_send_reply(struct net_shaper_binding * binding,const struct net_shaper_handle * handle,struct genl_info * info,struct sk_buff * msg)1143 static int net_shaper_group_send_reply(struct net_shaper_binding *binding,
1144 				       const struct net_shaper_handle *handle,
1145 				       struct genl_info *info,
1146 				       struct sk_buff *msg)
1147 {
1148 	void *hdr;
1149 
1150 	hdr = genlmsg_iput(msg, info);
1151 	if (!hdr)
1152 		goto free_msg;
1153 
1154 	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) ||
1155 	    net_shaper_fill_handle(msg, handle, NET_SHAPER_A_HANDLE))
1156 		goto free_msg;
1157 
1158 	genlmsg_end(msg, hdr);
1159 
1160 	return genlmsg_reply(msg, info);
1161 
1162 free_msg:
1163 	/* Should never happen as msg is pre-allocated with enough space. */
1164 	WARN_ONCE(true, "calculated message payload length (%d)",
1165 		  net_shaper_handle_size());
1166 	nlmsg_free(msg);
1167 	return -EMSGSIZE;
1168 }
1169 
net_shaper_nl_group_doit(struct sk_buff * skb,struct genl_info * info)1170 int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info)
1171 {
1172 	struct net_shaper **old_nodes, *leaves, node = {};
1173 	struct net_shaper_hierarchy *hierarchy;
1174 	struct net_shaper_binding *binding;
1175 	int i, ret, rem, leaves_count;
1176 	int old_nodes_count = 0;
1177 	struct sk_buff *msg;
1178 	struct nlattr *attr;
1179 
1180 	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_LEAVES))
1181 		return -EINVAL;
1182 
1183 	binding = net_shaper_binding_from_ctx(info->ctx);
1184 
1185 	/* The group operation is optional. */
1186 	if (!net_shaper_ops(binding)->group)
1187 		return -EOPNOTSUPP;
1188 
1189 	leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES);
1190 	if (!leaves_count) {
1191 		NL_SET_BAD_ATTR(info->extack,
1192 				info->attrs[NET_SHAPER_A_LEAVES]);
1193 		return -EINVAL;
1194 	}
1195 
1196 	leaves = kcalloc(leaves_count, sizeof(struct net_shaper) +
1197 			 sizeof(struct net_shaper *), GFP_KERNEL);
1198 	if (!leaves)
1199 		return -ENOMEM;
1200 	old_nodes = (void *)&leaves[leaves_count];
1201 
1202 	ret = net_shaper_parse_node(binding, info->attrs, info, &node);
1203 	if (ret)
1204 		goto free_leaves;
1205 
1206 	i = 0;
1207 	nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES,
1208 			       genlmsg_data(info->genlhdr),
1209 			       genlmsg_len(info->genlhdr), rem) {
1210 		if (WARN_ON_ONCE(i >= leaves_count))
1211 			goto free_leaves;
1212 
1213 		ret = net_shaper_parse_leaf(binding, attr, info,
1214 					    &node, &leaves[i]);
1215 		if (ret)
1216 			goto free_leaves;
1217 		i++;
1218 	}
1219 
1220 	/* Prepare the msg reply in advance, to avoid device operation
1221 	 * rollback on allocation failure.
1222 	 */
1223 	msg = genlmsg_new(net_shaper_handle_size(), GFP_KERNEL);
1224 	if (!msg)
1225 		goto free_leaves;
1226 
1227 	hierarchy = net_shaper_hierarchy_setup(binding);
1228 	if (!hierarchy) {
1229 		ret = -ENOMEM;
1230 		goto free_msg;
1231 	}
1232 
1233 	/* Record the node shapers that this group() operation can make
1234 	 * childless for later cleanup.
1235 	 */
1236 	for (i = 0; i < leaves_count; i++) {
1237 		if (leaves[i].parent.scope == NET_SHAPER_SCOPE_NODE &&
1238 		    net_shaper_handle_cmp(&leaves[i].parent, &node.handle)) {
1239 			struct net_shaper *tmp;
1240 
1241 			tmp = net_shaper_lookup(binding, &leaves[i].parent);
1242 			if (!tmp)
1243 				continue;
1244 
1245 			old_nodes[old_nodes_count++] = tmp;
1246 		}
1247 	}
1248 
1249 	ret = __net_shaper_group(binding, true, leaves_count, leaves, &node,
1250 				 info->extack);
1251 	if (ret)
1252 		goto free_msg;
1253 
1254 	/* Check if we need to delete any node left alone by the new leaves
1255 	 * linkage.
1256 	 */
1257 	for (i = 0; i < old_nodes_count; ++i) {
1258 		struct net_shaper *tmp = old_nodes[i];
1259 
1260 		if (--tmp->leaves > 0)
1261 			continue;
1262 
1263 		/* Errors here are not fatal: the grouping operation is
1264 		 * completed, and user-space can still explicitly clean-up
1265 		 * left-over nodes.
1266 		 */
1267 		__net_shaper_delete(binding, tmp, info->extack);
1268 	}
1269 
1270 	ret = net_shaper_group_send_reply(binding, &node.handle, info, msg);
1271 	if (ret)
1272 		GENL_SET_ERR_MSG_FMT(info, "Can't send reply");
1273 
1274 free_leaves:
1275 	kfree(leaves);
1276 	return ret;
1277 
1278 free_msg:
1279 	kfree_skb(msg);
1280 	goto free_leaves;
1281 }
1282 
1283 static int
net_shaper_cap_fill_one(struct sk_buff * msg,struct net_shaper_binding * binding,enum net_shaper_scope scope,unsigned long flags,const struct genl_info * info)1284 net_shaper_cap_fill_one(struct sk_buff *msg,
1285 			struct net_shaper_binding *binding,
1286 			enum net_shaper_scope scope, unsigned long flags,
1287 			const struct genl_info *info)
1288 {
1289 	unsigned long cur;
1290 	void *hdr;
1291 
1292 	hdr = genlmsg_iput(msg, info);
1293 	if (!hdr)
1294 		return -EMSGSIZE;
1295 
1296 	if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_CAPS_IFINDEX) ||
1297 	    nla_put_u32(msg, NET_SHAPER_A_CAPS_SCOPE, scope))
1298 		goto nla_put_failure;
1299 
1300 	for (cur = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS;
1301 	     cur <= NET_SHAPER_A_CAPS_MAX; ++cur) {
1302 		if (flags & BIT(cur) && nla_put_flag(msg, cur))
1303 			goto nla_put_failure;
1304 	}
1305 
1306 	genlmsg_end(msg, hdr);
1307 
1308 	return 0;
1309 
1310 nla_put_failure:
1311 	genlmsg_cancel(msg, hdr);
1312 	return -EMSGSIZE;
1313 }
1314 
net_shaper_nl_cap_get_doit(struct sk_buff * skb,struct genl_info * info)1315 int net_shaper_nl_cap_get_doit(struct sk_buff *skb, struct genl_info *info)
1316 {
1317 	struct net_shaper_binding *binding;
1318 	const struct net_shaper_ops *ops;
1319 	enum net_shaper_scope scope;
1320 	unsigned long flags = 0;
1321 	struct sk_buff *msg;
1322 	int ret;
1323 
1324 	if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_CAPS_SCOPE))
1325 		return -EINVAL;
1326 
1327 	binding = net_shaper_binding_from_ctx(info->ctx);
1328 	scope = nla_get_u32(info->attrs[NET_SHAPER_A_CAPS_SCOPE]);
1329 	ops = net_shaper_ops(binding);
1330 	ops->capabilities(binding, scope, &flags);
1331 	if (!flags)
1332 		return -EOPNOTSUPP;
1333 
1334 	msg = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1335 	if (!msg)
1336 		return -ENOMEM;
1337 
1338 	ret = net_shaper_cap_fill_one(msg, binding, scope, flags, info);
1339 	if (ret)
1340 		goto free_msg;
1341 
1342 	return genlmsg_reply(msg, info);
1343 
1344 free_msg:
1345 	nlmsg_free(msg);
1346 	return ret;
1347 }
1348 
net_shaper_nl_cap_get_dumpit(struct sk_buff * skb,struct netlink_callback * cb)1349 int net_shaper_nl_cap_get_dumpit(struct sk_buff *skb,
1350 				 struct netlink_callback *cb)
1351 {
1352 	const struct genl_info *info = genl_info_dump(cb);
1353 	struct net_shaper_binding *binding;
1354 	const struct net_shaper_ops *ops;
1355 	enum net_shaper_scope scope;
1356 	int ret;
1357 
1358 	binding = net_shaper_binding_from_ctx(cb->ctx);
1359 	ops = net_shaper_ops(binding);
1360 	for (scope = 0; scope <= NET_SHAPER_SCOPE_MAX; ++scope) {
1361 		unsigned long flags = 0;
1362 
1363 		ops->capabilities(binding, scope, &flags);
1364 		if (!flags)
1365 			continue;
1366 
1367 		ret = net_shaper_cap_fill_one(skb, binding, scope, flags,
1368 					      info);
1369 		if (ret)
1370 			return ret;
1371 	}
1372 
1373 	return 0;
1374 }
1375 
net_shaper_flush(struct net_shaper_binding * binding)1376 static void net_shaper_flush(struct net_shaper_binding *binding)
1377 {
1378 	struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding);
1379 	struct net_shaper *cur;
1380 	unsigned long index;
1381 
1382 	if (!hierarchy)
1383 		return;
1384 
1385 	xa_lock(&hierarchy->shapers);
1386 	xa_for_each(&hierarchy->shapers, index, cur) {
1387 		__xa_erase(&hierarchy->shapers, index);
1388 		kfree(cur);
1389 	}
1390 	xa_unlock(&hierarchy->shapers);
1391 
1392 	kfree(hierarchy);
1393 }
1394 
net_shaper_flush_netdev(struct net_device * dev)1395 void net_shaper_flush_netdev(struct net_device *dev)
1396 {
1397 	struct net_shaper_binding binding = {
1398 		.type = NET_SHAPER_BINDING_TYPE_NETDEV,
1399 		.netdev = dev,
1400 	};
1401 
1402 	net_shaper_flush(&binding);
1403 }
1404 
net_shaper_set_real_num_tx_queues(struct net_device * dev,unsigned int txq)1405 void net_shaper_set_real_num_tx_queues(struct net_device *dev,
1406 				       unsigned int txq)
1407 {
1408 	struct net_shaper_hierarchy *hierarchy;
1409 	struct net_shaper_binding binding;
1410 	int i;
1411 
1412 	binding.type = NET_SHAPER_BINDING_TYPE_NETDEV;
1413 	binding.netdev = dev;
1414 	hierarchy = net_shaper_hierarchy(&binding);
1415 	if (!hierarchy)
1416 		return;
1417 
1418 	/* Only drivers implementing shapers support ensure
1419 	 * the lock is acquired in advance.
1420 	 */
1421 	netdev_assert_locked(dev);
1422 
1423 	/* Take action only when decreasing the tx queue number. */
1424 	for (i = txq; i < dev->real_num_tx_queues; ++i) {
1425 		struct net_shaper_handle handle, parent_handle;
1426 		struct net_shaper *shaper;
1427 		u32 index;
1428 
1429 		handle.scope = NET_SHAPER_SCOPE_QUEUE;
1430 		handle.id = i;
1431 		shaper = net_shaper_lookup(&binding, &handle);
1432 		if (!shaper)
1433 			continue;
1434 
1435 		/* Don't touch the H/W for the queue shaper, the drivers already
1436 		 * deleted the queue and related resources.
1437 		 */
1438 		parent_handle = shaper->parent;
1439 		index = net_shaper_handle_to_index(&handle);
1440 		xa_erase(&hierarchy->shapers, index);
1441 		kfree_rcu(shaper, rcu);
1442 
1443 		/* The recursion on parent does the full job. */
1444 		if (parent_handle.scope != NET_SHAPER_SCOPE_NODE)
1445 			continue;
1446 
1447 		shaper = net_shaper_lookup(&binding, &parent_handle);
1448 		if (shaper && !--shaper->leaves)
1449 			__net_shaper_delete(&binding, shaper, NULL);
1450 	}
1451 }
1452 
shaper_init(void)1453 static int __init shaper_init(void)
1454 {
1455 	return genl_register_family(&net_shaper_nl_family);
1456 }
1457 
1458 subsys_initcall(shaper_init);
1459