xref: /linux/net/sched/sch_mqprio.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * net/sched/sch_mqprio.c
3  *
4  * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * version 2 as published by the Free Software Foundation.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/errno.h>
16 #include <linux/skbuff.h>
17 #include <linux/module.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
20 #include <net/sch_generic.h>
21 
22 struct mqprio_sched {
23 	struct Qdisc		**qdiscs;
24 	int hw_owned;
25 };
26 
27 static void mqprio_destroy(struct Qdisc *sch)
28 {
29 	struct net_device *dev = qdisc_dev(sch);
30 	struct mqprio_sched *priv = qdisc_priv(sch);
31 	unsigned int ntx;
32 
33 	if (priv->qdiscs) {
34 		for (ntx = 0;
35 		     ntx < dev->num_tx_queues && priv->qdiscs[ntx];
36 		     ntx++)
37 			qdisc_destroy(priv->qdiscs[ntx]);
38 		kfree(priv->qdiscs);
39 	}
40 
41 	if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
42 		dev->netdev_ops->ndo_setup_tc(dev, 0);
43 	else
44 		netdev_set_num_tc(dev, 0);
45 }
46 
47 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
48 {
49 	int i, j;
50 
51 	/* Verify num_tc is not out of max range */
52 	if (qopt->num_tc > TC_MAX_QUEUE)
53 		return -EINVAL;
54 
55 	/* Verify priority mapping uses valid tcs */
56 	for (i = 0; i < TC_BITMASK + 1; i++) {
57 		if (qopt->prio_tc_map[i] >= qopt->num_tc)
58 			return -EINVAL;
59 	}
60 
61 	/* net_device does not support requested operation */
62 	if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
63 		return -EINVAL;
64 
65 	/* if hw owned qcount and qoffset are taken from LLD so
66 	 * no reason to verify them here
67 	 */
68 	if (qopt->hw)
69 		return 0;
70 
71 	for (i = 0; i < qopt->num_tc; i++) {
72 		unsigned int last = qopt->offset[i] + qopt->count[i];
73 
74 		/* Verify the queue count is in tx range being equal to the
75 		 * real_num_tx_queues indicates the last queue is in use.
76 		 */
77 		if (qopt->offset[i] >= dev->real_num_tx_queues ||
78 		    !qopt->count[i] ||
79 		    last > dev->real_num_tx_queues)
80 			return -EINVAL;
81 
82 		/* Verify that the offset and counts do not overlap */
83 		for (j = i + 1; j < qopt->num_tc; j++) {
84 			if (last > qopt->offset[j])
85 				return -EINVAL;
86 		}
87 	}
88 
89 	return 0;
90 }
91 
92 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
93 {
94 	struct net_device *dev = qdisc_dev(sch);
95 	struct mqprio_sched *priv = qdisc_priv(sch);
96 	struct netdev_queue *dev_queue;
97 	struct Qdisc *qdisc;
98 	int i, err = -EOPNOTSUPP;
99 	struct tc_mqprio_qopt *qopt = NULL;
100 
101 	BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
102 	BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
103 
104 	if (sch->parent != TC_H_ROOT)
105 		return -EOPNOTSUPP;
106 
107 	if (!netif_is_multiqueue(dev))
108 		return -EOPNOTSUPP;
109 
110 	if (!opt || nla_len(opt) < sizeof(*qopt))
111 		return -EINVAL;
112 
113 	qopt = nla_data(opt);
114 	if (mqprio_parse_opt(dev, qopt))
115 		return -EINVAL;
116 
117 	/* pre-allocate qdisc, attachment can't fail */
118 	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
119 			       GFP_KERNEL);
120 	if (priv->qdiscs == NULL) {
121 		err = -ENOMEM;
122 		goto err;
123 	}
124 
125 	for (i = 0; i < dev->num_tx_queues; i++) {
126 		dev_queue = netdev_get_tx_queue(dev, i);
127 		qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
128 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
129 						    TC_H_MIN(i + 1)));
130 		if (qdisc == NULL) {
131 			err = -ENOMEM;
132 			goto err;
133 		}
134 		priv->qdiscs[i] = qdisc;
135 		qdisc->flags |= TCQ_F_ONETXQUEUE;
136 	}
137 
138 	/* If the mqprio options indicate that hardware should own
139 	 * the queue mapping then run ndo_setup_tc otherwise use the
140 	 * supplied and verified mapping
141 	 */
142 	if (qopt->hw) {
143 		priv->hw_owned = 1;
144 		err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
145 		if (err)
146 			goto err;
147 	} else {
148 		netdev_set_num_tc(dev, qopt->num_tc);
149 		for (i = 0; i < qopt->num_tc; i++)
150 			netdev_set_tc_queue(dev, i,
151 					    qopt->count[i], qopt->offset[i]);
152 	}
153 
154 	/* Always use supplied priority mappings */
155 	for (i = 0; i < TC_BITMASK + 1; i++)
156 		netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
157 
158 	sch->flags |= TCQ_F_MQROOT;
159 	return 0;
160 
161 err:
162 	mqprio_destroy(sch);
163 	return err;
164 }
165 
166 static void mqprio_attach(struct Qdisc *sch)
167 {
168 	struct net_device *dev = qdisc_dev(sch);
169 	struct mqprio_sched *priv = qdisc_priv(sch);
170 	struct Qdisc *qdisc;
171 	unsigned int ntx;
172 
173 	/* Attach underlying qdisc */
174 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
175 		qdisc = priv->qdiscs[ntx];
176 		qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
177 		if (qdisc)
178 			qdisc_destroy(qdisc);
179 	}
180 	kfree(priv->qdiscs);
181 	priv->qdiscs = NULL;
182 }
183 
184 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
185 					     unsigned long cl)
186 {
187 	struct net_device *dev = qdisc_dev(sch);
188 	unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
189 
190 	if (ntx >= dev->num_tx_queues)
191 		return NULL;
192 	return netdev_get_tx_queue(dev, ntx);
193 }
194 
195 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
196 		    struct Qdisc **old)
197 {
198 	struct net_device *dev = qdisc_dev(sch);
199 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
200 
201 	if (!dev_queue)
202 		return -EINVAL;
203 
204 	if (dev->flags & IFF_UP)
205 		dev_deactivate(dev);
206 
207 	*old = dev_graft_qdisc(dev_queue, new);
208 
209 	if (new)
210 		new->flags |= TCQ_F_ONETXQUEUE;
211 
212 	if (dev->flags & IFF_UP)
213 		dev_activate(dev);
214 
215 	return 0;
216 }
217 
218 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
219 {
220 	struct net_device *dev = qdisc_dev(sch);
221 	struct mqprio_sched *priv = qdisc_priv(sch);
222 	unsigned char *b = skb_tail_pointer(skb);
223 	struct tc_mqprio_qopt opt = { 0 };
224 	struct Qdisc *qdisc;
225 	unsigned int i;
226 
227 	sch->q.qlen = 0;
228 	memset(&sch->bstats, 0, sizeof(sch->bstats));
229 	memset(&sch->qstats, 0, sizeof(sch->qstats));
230 
231 	for (i = 0; i < dev->num_tx_queues; i++) {
232 		qdisc = netdev_get_tx_queue(dev, i)->qdisc;
233 		spin_lock_bh(qdisc_lock(qdisc));
234 		sch->q.qlen		+= qdisc->q.qlen;
235 		sch->bstats.bytes	+= qdisc->bstats.bytes;
236 		sch->bstats.packets	+= qdisc->bstats.packets;
237 		sch->qstats.qlen	+= qdisc->qstats.qlen;
238 		sch->qstats.backlog	+= qdisc->qstats.backlog;
239 		sch->qstats.drops	+= qdisc->qstats.drops;
240 		sch->qstats.requeues	+= qdisc->qstats.requeues;
241 		sch->qstats.overlimits	+= qdisc->qstats.overlimits;
242 		spin_unlock_bh(qdisc_lock(qdisc));
243 	}
244 
245 	opt.num_tc = netdev_get_num_tc(dev);
246 	memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
247 	opt.hw = priv->hw_owned;
248 
249 	for (i = 0; i < netdev_get_num_tc(dev); i++) {
250 		opt.count[i] = dev->tc_to_txq[i].count;
251 		opt.offset[i] = dev->tc_to_txq[i].offset;
252 	}
253 
254 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
255 		goto nla_put_failure;
256 
257 	return skb->len;
258 nla_put_failure:
259 	nlmsg_trim(skb, b);
260 	return -1;
261 }
262 
263 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
264 {
265 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
266 
267 	if (!dev_queue)
268 		return NULL;
269 
270 	return dev_queue->qdisc_sleeping;
271 }
272 
273 static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
274 {
275 	struct net_device *dev = qdisc_dev(sch);
276 	unsigned int ntx = TC_H_MIN(classid);
277 
278 	if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
279 		return 0;
280 	return ntx;
281 }
282 
283 static void mqprio_put(struct Qdisc *sch, unsigned long cl)
284 {
285 }
286 
287 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
288 			 struct sk_buff *skb, struct tcmsg *tcm)
289 {
290 	struct net_device *dev = qdisc_dev(sch);
291 
292 	if (cl <= netdev_get_num_tc(dev)) {
293 		tcm->tcm_parent = TC_H_ROOT;
294 		tcm->tcm_info = 0;
295 	} else {
296 		int i;
297 		struct netdev_queue *dev_queue;
298 
299 		dev_queue = mqprio_queue_get(sch, cl);
300 		tcm->tcm_parent = 0;
301 		for (i = 0; i < netdev_get_num_tc(dev); i++) {
302 			struct netdev_tc_txq tc = dev->tc_to_txq[i];
303 			int q_idx = cl - netdev_get_num_tc(dev);
304 
305 			if (q_idx > tc.offset &&
306 			    q_idx <= tc.offset + tc.count) {
307 				tcm->tcm_parent =
308 					TC_H_MAKE(TC_H_MAJ(sch->handle),
309 						  TC_H_MIN(i + 1));
310 				break;
311 			}
312 		}
313 		tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
314 	}
315 	tcm->tcm_handle |= TC_H_MIN(cl);
316 	return 0;
317 }
318 
319 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
320 				   struct gnet_dump *d)
321 	__releases(d->lock)
322 	__acquires(d->lock)
323 {
324 	struct net_device *dev = qdisc_dev(sch);
325 
326 	if (cl <= netdev_get_num_tc(dev)) {
327 		int i;
328 		struct Qdisc *qdisc;
329 		struct gnet_stats_queue qstats = {0};
330 		struct gnet_stats_basic_packed bstats = {0};
331 		struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
332 
333 		/* Drop lock here it will be reclaimed before touching
334 		 * statistics this is required because the d->lock we
335 		 * hold here is the look on dev_queue->qdisc_sleeping
336 		 * also acquired below.
337 		 */
338 		spin_unlock_bh(d->lock);
339 
340 		for (i = tc.offset; i < tc.offset + tc.count; i++) {
341 			qdisc = netdev_get_tx_queue(dev, i)->qdisc;
342 			spin_lock_bh(qdisc_lock(qdisc));
343 			bstats.bytes      += qdisc->bstats.bytes;
344 			bstats.packets    += qdisc->bstats.packets;
345 			qstats.qlen       += qdisc->qstats.qlen;
346 			qstats.backlog    += qdisc->qstats.backlog;
347 			qstats.drops      += qdisc->qstats.drops;
348 			qstats.requeues   += qdisc->qstats.requeues;
349 			qstats.overlimits += qdisc->qstats.overlimits;
350 			spin_unlock_bh(qdisc_lock(qdisc));
351 		}
352 		/* Reclaim root sleeping lock before completing stats */
353 		spin_lock_bh(d->lock);
354 		if (gnet_stats_copy_basic(d, &bstats) < 0 ||
355 		    gnet_stats_copy_queue(d, &qstats) < 0)
356 			return -1;
357 	} else {
358 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
359 
360 		sch = dev_queue->qdisc_sleeping;
361 		sch->qstats.qlen = sch->q.qlen;
362 		if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
363 		    gnet_stats_copy_queue(d, &sch->qstats) < 0)
364 			return -1;
365 	}
366 	return 0;
367 }
368 
369 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
370 {
371 	struct net_device *dev = qdisc_dev(sch);
372 	unsigned long ntx;
373 
374 	if (arg->stop)
375 		return;
376 
377 	/* Walk hierarchy with a virtual class per tc */
378 	arg->count = arg->skip;
379 	for (ntx = arg->skip;
380 	     ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
381 	     ntx++) {
382 		if (arg->fn(sch, ntx + 1, arg) < 0) {
383 			arg->stop = 1;
384 			break;
385 		}
386 		arg->count++;
387 	}
388 }
389 
390 static const struct Qdisc_class_ops mqprio_class_ops = {
391 	.graft		= mqprio_graft,
392 	.leaf		= mqprio_leaf,
393 	.get		= mqprio_get,
394 	.put		= mqprio_put,
395 	.walk		= mqprio_walk,
396 	.dump		= mqprio_dump_class,
397 	.dump_stats	= mqprio_dump_class_stats,
398 };
399 
400 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
401 	.cl_ops		= &mqprio_class_ops,
402 	.id		= "mqprio",
403 	.priv_size	= sizeof(struct mqprio_sched),
404 	.init		= mqprio_init,
405 	.destroy	= mqprio_destroy,
406 	.attach		= mqprio_attach,
407 	.dump		= mqprio_dump,
408 	.owner		= THIS_MODULE,
409 };
410 
411 static int __init mqprio_module_init(void)
412 {
413 	return register_qdisc(&mqprio_qdisc_ops);
414 }
415 
416 static void __exit mqprio_module_exit(void)
417 {
418 	unregister_qdisc(&mqprio_qdisc_ops);
419 }
420 
421 module_init(mqprio_module_init);
422 module_exit(mqprio_module_exit);
423 
424 MODULE_LICENSE("GPL");
425