1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/sched/sch_drr.c Deficit Round Robin scheduler
4 *
5 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
6 */
7
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/netdevice.h>
13 #include <linux/pkt_sched.h>
14 #include <net/sch_generic.h>
15 #include <net/pkt_sched.h>
16 #include <net/pkt_cls.h>
17
18 struct drr_class {
19 struct Qdisc_class_common common;
20
21 struct gnet_stats_basic_sync bstats;
22 struct gnet_stats_queue qstats;
23 struct net_rate_estimator __rcu *rate_est;
24 struct list_head alist;
25 struct Qdisc *qdisc;
26
27 u32 quantum;
28 u32 deficit;
29 };
30
31 struct drr_sched {
32 struct list_head active;
33 struct tcf_proto __rcu *filter_list;
34 struct tcf_block *block;
35 struct Qdisc_class_hash clhash;
36 };
37
cl_is_active(struct drr_class * cl)38 static bool cl_is_active(struct drr_class *cl)
39 {
40 return !list_empty(&cl->alist);
41 }
42
drr_find_class(struct Qdisc * sch,u32 classid)43 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
44 {
45 struct drr_sched *q = qdisc_priv(sch);
46 struct Qdisc_class_common *clc;
47
48 clc = qdisc_class_find(&q->clhash, classid);
49 if (clc == NULL)
50 return NULL;
51 return container_of(clc, struct drr_class, common);
52 }
53
54 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
55 [TCA_DRR_QUANTUM] = { .type = NLA_U32 },
56 };
57
drr_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)58 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
59 struct nlattr **tca, unsigned long *arg,
60 struct netlink_ext_ack *extack)
61 {
62 struct drr_sched *q = qdisc_priv(sch);
63 struct drr_class *cl = (struct drr_class *)*arg;
64 struct nlattr *opt = tca[TCA_OPTIONS];
65 struct nlattr *tb[TCA_DRR_MAX + 1];
66 u32 quantum;
67 int err;
68
69 if (!opt) {
70 NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
71 return -EINVAL;
72 }
73
74 err = nla_parse_nested_deprecated(tb, TCA_DRR_MAX, opt, drr_policy,
75 extack);
76 if (err < 0)
77 return err;
78
79 if (tb[TCA_DRR_QUANTUM]) {
80 quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
81 if (quantum == 0) {
82 NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
83 return -EINVAL;
84 }
85 } else
86 quantum = psched_mtu(qdisc_dev(sch));
87
88 if (cl != NULL) {
89 if (tca[TCA_RATE]) {
90 err = gen_replace_estimator(&cl->bstats, NULL,
91 &cl->rate_est,
92 NULL, true,
93 tca[TCA_RATE]);
94 if (err) {
95 NL_SET_ERR_MSG(extack, "Failed to replace estimator");
96 return err;
97 }
98 }
99
100 sch_tree_lock(sch);
101 if (tb[TCA_DRR_QUANTUM])
102 cl->quantum = quantum;
103 sch_tree_unlock(sch);
104
105 return 0;
106 }
107
108 cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
109 if (cl == NULL)
110 return -ENOBUFS;
111
112 gnet_stats_basic_sync_init(&cl->bstats);
113 INIT_LIST_HEAD(&cl->alist);
114 cl->common.classid = classid;
115 cl->quantum = quantum;
116 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
117 &pfifo_qdisc_ops, classid,
118 NULL);
119 if (cl->qdisc == NULL)
120 cl->qdisc = &noop_qdisc;
121 else
122 qdisc_hash_add(cl->qdisc, true);
123
124 if (tca[TCA_RATE]) {
125 err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
126 NULL, true, tca[TCA_RATE]);
127 if (err) {
128 NL_SET_ERR_MSG(extack, "Failed to replace estimator");
129 qdisc_put(cl->qdisc);
130 kfree(cl);
131 return err;
132 }
133 }
134
135 sch_tree_lock(sch);
136 qdisc_class_hash_insert(&q->clhash, &cl->common);
137 sch_tree_unlock(sch);
138
139 qdisc_class_hash_grow(sch, &q->clhash);
140
141 *arg = (unsigned long)cl;
142 return 0;
143 }
144
drr_destroy_class(struct Qdisc * sch,struct drr_class * cl)145 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
146 {
147 gen_kill_estimator(&cl->rate_est);
148 qdisc_put(cl->qdisc);
149 kfree(cl);
150 }
151
drr_delete_class(struct Qdisc * sch,unsigned long arg,struct netlink_ext_ack * extack)152 static int drr_delete_class(struct Qdisc *sch, unsigned long arg,
153 struct netlink_ext_ack *extack)
154 {
155 struct drr_sched *q = qdisc_priv(sch);
156 struct drr_class *cl = (struct drr_class *)arg;
157
158 if (qdisc_class_in_use(&cl->common)) {
159 NL_SET_ERR_MSG(extack, "DRR class is in use");
160 return -EBUSY;
161 }
162
163 sch_tree_lock(sch);
164
165 qdisc_purge_queue(cl->qdisc);
166 qdisc_class_hash_remove(&q->clhash, &cl->common);
167
168 sch_tree_unlock(sch);
169
170 drr_destroy_class(sch, cl);
171 return 0;
172 }
173
drr_search_class(struct Qdisc * sch,u32 classid)174 static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
175 {
176 return (unsigned long)drr_find_class(sch, classid);
177 }
178
drr_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)179 static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
180 struct netlink_ext_ack *extack)
181 {
182 struct drr_sched *q = qdisc_priv(sch);
183
184 if (cl) {
185 NL_SET_ERR_MSG(extack, "DRR classid must be zero");
186 return NULL;
187 }
188
189 return q->block;
190 }
191
drr_bind_tcf(struct Qdisc * sch,unsigned long parent,u32 classid)192 static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
193 u32 classid)
194 {
195 struct drr_class *cl = drr_find_class(sch, classid);
196
197 if (cl)
198 qdisc_class_get(&cl->common);
199
200 return (unsigned long)cl;
201 }
202
drr_unbind_tcf(struct Qdisc * sch,unsigned long arg)203 static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
204 {
205 struct drr_class *cl = (struct drr_class *)arg;
206
207 qdisc_class_put(&cl->common);
208 }
209
drr_graft_class(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)210 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
211 struct Qdisc *new, struct Qdisc **old,
212 struct netlink_ext_ack *extack)
213 {
214 struct drr_class *cl = (struct drr_class *)arg;
215
216 if (new == NULL) {
217 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
218 cl->common.classid, NULL);
219 if (new == NULL)
220 new = &noop_qdisc;
221 }
222
223 *old = qdisc_replace(sch, new, &cl->qdisc);
224 return 0;
225 }
226
drr_class_leaf(struct Qdisc * sch,unsigned long arg)227 static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
228 {
229 struct drr_class *cl = (struct drr_class *)arg;
230
231 return cl->qdisc;
232 }
233
drr_qlen_notify(struct Qdisc * csh,unsigned long arg)234 static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
235 {
236 struct drr_class *cl = (struct drr_class *)arg;
237
238 list_del_init(&cl->alist);
239 }
240
drr_dump_class(struct Qdisc * sch,unsigned long arg,struct sk_buff * skb,struct tcmsg * tcm)241 static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
242 struct sk_buff *skb, struct tcmsg *tcm)
243 {
244 struct drr_class *cl = (struct drr_class *)arg;
245 struct nlattr *nest;
246
247 tcm->tcm_parent = TC_H_ROOT;
248 tcm->tcm_handle = cl->common.classid;
249 tcm->tcm_info = cl->qdisc->handle;
250
251 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
252 if (nest == NULL)
253 goto nla_put_failure;
254 if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
255 goto nla_put_failure;
256 return nla_nest_end(skb, nest);
257
258 nla_put_failure:
259 nla_nest_cancel(skb, nest);
260 return -EMSGSIZE;
261 }
262
drr_dump_class_stats(struct Qdisc * sch,unsigned long arg,struct gnet_dump * d)263 static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
264 struct gnet_dump *d)
265 {
266 struct drr_class *cl = (struct drr_class *)arg;
267 __u32 qlen = qdisc_qlen_sum(cl->qdisc);
268 struct Qdisc *cl_q = cl->qdisc;
269 struct tc_drr_stats xstats;
270
271 memset(&xstats, 0, sizeof(xstats));
272 if (qlen)
273 xstats.deficit = cl->deficit;
274
275 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
276 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
277 gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
278 return -1;
279
280 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
281 }
282
drr_walk(struct Qdisc * sch,struct qdisc_walker * arg)283 static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
284 {
285 struct drr_sched *q = qdisc_priv(sch);
286 struct drr_class *cl;
287 unsigned int i;
288
289 if (arg->stop)
290 return;
291
292 for (i = 0; i < q->clhash.hashsize; i++) {
293 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
294 if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
295 return;
296 }
297 }
298 }
299
drr_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)300 static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
301 int *qerr)
302 {
303 struct drr_sched *q = qdisc_priv(sch);
304 struct drr_class *cl;
305 struct tcf_result res;
306 struct tcf_proto *fl;
307 int result;
308
309 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
310 cl = drr_find_class(sch, skb->priority);
311 if (cl != NULL)
312 return cl;
313 }
314
315 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
316 fl = rcu_dereference_bh(q->filter_list);
317 result = tcf_classify(skb, NULL, fl, &res, false);
318 if (result >= 0) {
319 #ifdef CONFIG_NET_CLS_ACT
320 switch (result) {
321 case TC_ACT_QUEUED:
322 case TC_ACT_STOLEN:
323 case TC_ACT_TRAP:
324 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
325 fallthrough;
326 case TC_ACT_SHOT:
327 return NULL;
328 }
329 #endif
330 cl = (struct drr_class *)res.class;
331 if (cl == NULL)
332 cl = drr_find_class(sch, res.classid);
333 return cl;
334 }
335 return NULL;
336 }
337
drr_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)338 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
339 struct sk_buff **to_free)
340 {
341 unsigned int len = qdisc_pkt_len(skb);
342 struct drr_sched *q = qdisc_priv(sch);
343 struct drr_class *cl;
344 int err = 0;
345
346 cl = drr_classify(skb, sch, &err);
347 if (cl == NULL) {
348 if (err & __NET_XMIT_BYPASS)
349 qdisc_qstats_drop(sch);
350 __qdisc_drop(skb, to_free);
351 return err;
352 }
353
354 err = qdisc_enqueue(skb, cl->qdisc, to_free);
355 if (unlikely(err != NET_XMIT_SUCCESS)) {
356 if (net_xmit_drop_count(err)) {
357 cl->qstats.drops++;
358 qdisc_qstats_drop(sch);
359 }
360 return err;
361 }
362
363 if (!cl_is_active(cl)) {
364 list_add_tail(&cl->alist, &q->active);
365 cl->deficit = cl->quantum;
366 }
367
368 sch->qstats.backlog += len;
369 sch->q.qlen++;
370 return err;
371 }
372
drr_dequeue(struct Qdisc * sch)373 static struct sk_buff *drr_dequeue(struct Qdisc *sch)
374 {
375 struct drr_sched *q = qdisc_priv(sch);
376 struct drr_class *cl;
377 struct sk_buff *skb;
378 unsigned int len;
379
380 if (list_empty(&q->active))
381 goto out;
382 while (1) {
383 cl = list_first_entry(&q->active, struct drr_class, alist);
384 skb = cl->qdisc->ops->peek(cl->qdisc);
385 if (skb == NULL) {
386 qdisc_warn_nonwc(__func__, cl->qdisc);
387 goto out;
388 }
389
390 len = qdisc_pkt_len(skb);
391 if (len <= cl->deficit) {
392 cl->deficit -= len;
393 skb = qdisc_dequeue_peeked(cl->qdisc);
394 if (unlikely(skb == NULL))
395 goto out;
396 if (cl->qdisc->q.qlen == 0)
397 list_del_init(&cl->alist);
398
399 bstats_update(&cl->bstats, skb);
400 qdisc_bstats_update(sch, skb);
401 qdisc_qstats_backlog_dec(sch, skb);
402 sch->q.qlen--;
403 return skb;
404 }
405
406 cl->deficit += cl->quantum;
407 list_move_tail(&cl->alist, &q->active);
408 }
409 out:
410 return NULL;
411 }
412
drr_init_qdisc(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)413 static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
414 struct netlink_ext_ack *extack)
415 {
416 struct drr_sched *q = qdisc_priv(sch);
417 int err;
418
419 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
420 if (err)
421 return err;
422 err = qdisc_class_hash_init(&q->clhash);
423 if (err < 0)
424 return err;
425 INIT_LIST_HEAD(&q->active);
426 return 0;
427 }
428
drr_reset_qdisc(struct Qdisc * sch)429 static void drr_reset_qdisc(struct Qdisc *sch)
430 {
431 struct drr_sched *q = qdisc_priv(sch);
432 struct drr_class *cl;
433 unsigned int i;
434
435 for (i = 0; i < q->clhash.hashsize; i++) {
436 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
437 if (cl->qdisc->q.qlen)
438 list_del_init(&cl->alist);
439 qdisc_reset(cl->qdisc);
440 }
441 }
442 }
443
drr_destroy_qdisc(struct Qdisc * sch)444 static void drr_destroy_qdisc(struct Qdisc *sch)
445 {
446 struct drr_sched *q = qdisc_priv(sch);
447 struct drr_class *cl;
448 struct hlist_node *next;
449 unsigned int i;
450
451 tcf_block_put(q->block);
452
453 for (i = 0; i < q->clhash.hashsize; i++) {
454 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
455 common.hnode)
456 drr_destroy_class(sch, cl);
457 }
458 qdisc_class_hash_destroy(&q->clhash);
459 }
460
461 static const struct Qdisc_class_ops drr_class_ops = {
462 .change = drr_change_class,
463 .delete = drr_delete_class,
464 .find = drr_search_class,
465 .tcf_block = drr_tcf_block,
466 .bind_tcf = drr_bind_tcf,
467 .unbind_tcf = drr_unbind_tcf,
468 .graft = drr_graft_class,
469 .leaf = drr_class_leaf,
470 .qlen_notify = drr_qlen_notify,
471 .dump = drr_dump_class,
472 .dump_stats = drr_dump_class_stats,
473 .walk = drr_walk,
474 };
475
476 static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
477 .cl_ops = &drr_class_ops,
478 .id = "drr",
479 .priv_size = sizeof(struct drr_sched),
480 .enqueue = drr_enqueue,
481 .dequeue = drr_dequeue,
482 .peek = qdisc_peek_dequeued,
483 .init = drr_init_qdisc,
484 .reset = drr_reset_qdisc,
485 .destroy = drr_destroy_qdisc,
486 .owner = THIS_MODULE,
487 };
488 MODULE_ALIAS_NET_SCH("drr");
489
drr_init(void)490 static int __init drr_init(void)
491 {
492 return register_qdisc(&drr_qdisc_ops);
493 }
494
drr_exit(void)495 static void __exit drr_exit(void)
496 {
497 unregister_qdisc(&drr_qdisc_ops);
498 }
499
500 module_init(drr_init);
501 module_exit(drr_exit);
502 MODULE_LICENSE("GPL");
503 MODULE_DESCRIPTION("Deficit Round Robin scheduler");
504