xref: /linux/net/sched/sch_mq.c (revision 9bacbced0e32204deb8b9d011279f9beddd8c2ef)
1 /*
2  * net/sched/sch_mq.c		Classful multiqueue dummy scheduler
3  *
4  * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * version 2 as published by the Free Software Foundation.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
18 #include <net/netlink.h>
19 #include <net/pkt_cls.h>
20 #include <net/pkt_sched.h>
21 #include <net/sch_generic.h>
22 
23 struct mq_sched {
24 	struct Qdisc		**qdiscs;
25 };
26 
27 static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
28 {
29 	struct net_device *dev = qdisc_dev(sch);
30 	struct tc_mq_qopt_offload opt = {
31 		.command = cmd,
32 		.handle = sch->handle,
33 	};
34 
35 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
36 		return -EOPNOTSUPP;
37 
38 	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
39 }
40 
41 static void mq_offload_stats(struct Qdisc *sch)
42 {
43 	struct net_device *dev = qdisc_dev(sch);
44 	struct tc_mq_qopt_offload opt = {
45 		.command = TC_MQ_STATS,
46 		.handle = sch->handle,
47 		.stats = {
48 			.bstats = &sch->bstats,
49 			.qstats = &sch->qstats,
50 		},
51 	};
52 
53 	if (tc_can_offload(dev) && dev->netdev_ops->ndo_setup_tc)
54 		dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
55 }
56 
57 static void mq_destroy(struct Qdisc *sch)
58 {
59 	struct net_device *dev = qdisc_dev(sch);
60 	struct mq_sched *priv = qdisc_priv(sch);
61 	unsigned int ntx;
62 
63 	mq_offload(sch, TC_MQ_DESTROY);
64 
65 	if (!priv->qdiscs)
66 		return;
67 	for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
68 		qdisc_destroy(priv->qdiscs[ntx]);
69 	kfree(priv->qdiscs);
70 }
71 
72 static int mq_init(struct Qdisc *sch, struct nlattr *opt,
73 		   struct netlink_ext_ack *extack)
74 {
75 	struct net_device *dev = qdisc_dev(sch);
76 	struct mq_sched *priv = qdisc_priv(sch);
77 	struct netdev_queue *dev_queue;
78 	struct Qdisc *qdisc;
79 	unsigned int ntx;
80 
81 	if (sch->parent != TC_H_ROOT)
82 		return -EOPNOTSUPP;
83 
84 	if (!netif_is_multiqueue(dev))
85 		return -EOPNOTSUPP;
86 
87 	/* pre-allocate qdiscs, attachment can't fail */
88 	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
89 			       GFP_KERNEL);
90 	if (!priv->qdiscs)
91 		return -ENOMEM;
92 
93 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
94 		dev_queue = netdev_get_tx_queue(dev, ntx);
95 		qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
96 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
97 						    TC_H_MIN(ntx + 1)),
98 					  extack);
99 		if (!qdisc)
100 			return -ENOMEM;
101 		priv->qdiscs[ntx] = qdisc;
102 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
103 	}
104 
105 	sch->flags |= TCQ_F_MQROOT;
106 
107 	mq_offload(sch, TC_MQ_CREATE);
108 	return 0;
109 }
110 
111 static void mq_attach(struct Qdisc *sch)
112 {
113 	struct net_device *dev = qdisc_dev(sch);
114 	struct mq_sched *priv = qdisc_priv(sch);
115 	struct Qdisc *qdisc, *old;
116 	unsigned int ntx;
117 
118 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
119 		qdisc = priv->qdiscs[ntx];
120 		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
121 		if (old)
122 			qdisc_destroy(old);
123 #ifdef CONFIG_NET_SCHED
124 		if (ntx < dev->real_num_tx_queues)
125 			qdisc_hash_add(qdisc, false);
126 #endif
127 
128 	}
129 	kfree(priv->qdiscs);
130 	priv->qdiscs = NULL;
131 }
132 
133 static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
134 {
135 	struct net_device *dev = qdisc_dev(sch);
136 	struct Qdisc *qdisc;
137 	unsigned int ntx;
138 	__u32 qlen = 0;
139 
140 	sch->q.qlen = 0;
141 	memset(&sch->bstats, 0, sizeof(sch->bstats));
142 	memset(&sch->qstats, 0, sizeof(sch->qstats));
143 
144 	/* MQ supports lockless qdiscs. However, statistics accounting needs
145 	 * to account for all, none, or a mix of locked and unlocked child
146 	 * qdiscs. Percpu stats are added to counters in-band and locking
147 	 * qdisc totals are added at end.
148 	 */
149 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
150 		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
151 		spin_lock_bh(qdisc_lock(qdisc));
152 
153 		if (qdisc_is_percpu_stats(qdisc)) {
154 			qlen = qdisc_qlen_sum(qdisc);
155 			__gnet_stats_copy_basic(NULL, &sch->bstats,
156 						qdisc->cpu_bstats,
157 						&qdisc->bstats);
158 			__gnet_stats_copy_queue(&sch->qstats,
159 						qdisc->cpu_qstats,
160 						&qdisc->qstats, qlen);
161 		} else {
162 			sch->q.qlen		+= qdisc->q.qlen;
163 			sch->bstats.bytes	+= qdisc->bstats.bytes;
164 			sch->bstats.packets	+= qdisc->bstats.packets;
165 			sch->qstats.qlen	+= qdisc->qstats.qlen;
166 			sch->qstats.backlog	+= qdisc->qstats.backlog;
167 			sch->qstats.drops	+= qdisc->qstats.drops;
168 			sch->qstats.requeues	+= qdisc->qstats.requeues;
169 			sch->qstats.overlimits	+= qdisc->qstats.overlimits;
170 		}
171 
172 		spin_unlock_bh(qdisc_lock(qdisc));
173 	}
174 	mq_offload_stats(sch);
175 
176 	return 0;
177 }
178 
179 static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
180 {
181 	struct net_device *dev = qdisc_dev(sch);
182 	unsigned long ntx = cl - 1;
183 
184 	if (ntx >= dev->num_tx_queues)
185 		return NULL;
186 	return netdev_get_tx_queue(dev, ntx);
187 }
188 
189 static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
190 					    struct tcmsg *tcm)
191 {
192 	return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
193 }
194 
195 static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
196 		    struct Qdisc **old, struct netlink_ext_ack *extack)
197 {
198 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
199 	struct net_device *dev = qdisc_dev(sch);
200 
201 	if (dev->flags & IFF_UP)
202 		dev_deactivate(dev);
203 
204 	*old = dev_graft_qdisc(dev_queue, new);
205 	if (new)
206 		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
207 	if (dev->flags & IFF_UP)
208 		dev_activate(dev);
209 	return 0;
210 }
211 
212 static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
213 {
214 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
215 
216 	return dev_queue->qdisc_sleeping;
217 }
218 
219 static unsigned long mq_find(struct Qdisc *sch, u32 classid)
220 {
221 	unsigned int ntx = TC_H_MIN(classid);
222 
223 	if (!mq_queue_get(sch, ntx))
224 		return 0;
225 	return ntx;
226 }
227 
228 static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
229 			 struct sk_buff *skb, struct tcmsg *tcm)
230 {
231 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
232 
233 	tcm->tcm_parent = TC_H_ROOT;
234 	tcm->tcm_handle |= TC_H_MIN(cl);
235 	tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
236 	return 0;
237 }
238 
239 static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
240 			       struct gnet_dump *d)
241 {
242 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
243 
244 	sch = dev_queue->qdisc_sleeping;
245 	if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
246 	    gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
247 		return -1;
248 	return 0;
249 }
250 
251 static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
252 {
253 	struct net_device *dev = qdisc_dev(sch);
254 	unsigned int ntx;
255 
256 	if (arg->stop)
257 		return;
258 
259 	arg->count = arg->skip;
260 	for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
261 		if (arg->fn(sch, ntx + 1, arg) < 0) {
262 			arg->stop = 1;
263 			break;
264 		}
265 		arg->count++;
266 	}
267 }
268 
269 static const struct Qdisc_class_ops mq_class_ops = {
270 	.select_queue	= mq_select_queue,
271 	.graft		= mq_graft,
272 	.leaf		= mq_leaf,
273 	.find		= mq_find,
274 	.walk		= mq_walk,
275 	.dump		= mq_dump_class,
276 	.dump_stats	= mq_dump_class_stats,
277 };
278 
279 struct Qdisc_ops mq_qdisc_ops __read_mostly = {
280 	.cl_ops		= &mq_class_ops,
281 	.id		= "mq",
282 	.priv_size	= sizeof(struct mq_sched),
283 	.init		= mq_init,
284 	.destroy	= mq_destroy,
285 	.attach		= mq_attach,
286 	.dump		= mq_dump,
287 	.owner		= THIS_MODULE,
288 };
289