xref: /linux/net/sched/sch_mq.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sched/sch_mq.c		Classful multiqueue dummy scheduler
4  *
5  * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
6  */
7 
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/kernel.h>
11 #include <linux/export.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <net/netlink.h>
16 #include <net/pkt_cls.h>
17 #include <net/pkt_sched.h>
18 #include <net/sch_priv.h>
19 
mq_offload(struct Qdisc * sch,enum tc_mq_command cmd)20 static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
21 {
22 	struct net_device *dev = qdisc_dev(sch);
23 	struct tc_mq_qopt_offload opt = {
24 		.command = cmd,
25 		.handle = sch->handle,
26 	};
27 
28 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
29 		return -EOPNOTSUPP;
30 
31 	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
32 }
33 
mq_offload_stats(struct Qdisc * sch)34 static int mq_offload_stats(struct Qdisc *sch)
35 {
36 	struct tc_mq_qopt_offload opt = {
37 		.command = TC_MQ_STATS,
38 		.handle = sch->handle,
39 		.stats = {
40 			.bstats = &sch->bstats,
41 			.qstats = &sch->qstats,
42 		},
43 	};
44 
45 	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt);
46 }
47 
mq_destroy_common(struct Qdisc * sch)48 void mq_destroy_common(struct Qdisc *sch)
49 {
50 	struct net_device *dev = qdisc_dev(sch);
51 	struct mq_sched *priv = qdisc_priv(sch);
52 	unsigned int ntx;
53 
54 	if (!priv->qdiscs)
55 		return;
56 	for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
57 		qdisc_put(priv->qdiscs[ntx]);
58 	kfree(priv->qdiscs);
59 }
60 EXPORT_SYMBOL_NS_GPL(mq_destroy_common, "NET_SCHED_INTERNAL");
61 
mq_destroy(struct Qdisc * sch)62 static void mq_destroy(struct Qdisc *sch)
63 {
64 	mq_offload(sch, TC_MQ_DESTROY);
65 	mq_destroy_common(sch);
66 }
67 
mq_init_common(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack,const struct Qdisc_ops * qdisc_ops)68 int mq_init_common(struct Qdisc *sch, struct nlattr *opt,
69 		   struct netlink_ext_ack *extack,
70 		   const struct Qdisc_ops *qdisc_ops)
71 {
72 	struct net_device *dev = qdisc_dev(sch);
73 	struct mq_sched *priv = qdisc_priv(sch);
74 	struct netdev_queue *dev_queue;
75 	struct Qdisc *qdisc;
76 	unsigned int ntx;
77 
78 	if (sch->parent != TC_H_ROOT)
79 		return -EOPNOTSUPP;
80 
81 	if (!netif_is_multiqueue(dev))
82 		return -EOPNOTSUPP;
83 
84 	/* pre-allocate qdiscs, attachment can't fail */
85 	priv->qdiscs = kzalloc_objs(priv->qdiscs[0], dev->num_tx_queues);
86 	if (!priv->qdiscs)
87 		return -ENOMEM;
88 
89 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
90 		dev_queue = netdev_get_tx_queue(dev, ntx);
91 		qdisc = qdisc_create_dflt(dev_queue,
92 					  qdisc_ops ?: get_default_qdisc_ops(dev, ntx),
93 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
94 						    TC_H_MIN(ntx + 1)),
95 					  extack);
96 		if (!qdisc)
97 			return -ENOMEM;
98 		priv->qdiscs[ntx] = qdisc;
99 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
100 	}
101 
102 	sch->flags |= TCQ_F_MQROOT;
103 	return 0;
104 }
105 EXPORT_SYMBOL_NS_GPL(mq_init_common, "NET_SCHED_INTERNAL");
106 
mq_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)107 static int mq_init(struct Qdisc *sch, struct nlattr *opt,
108 		   struct netlink_ext_ack *extack)
109 {
110 	int ret;
111 
112 	ret = mq_init_common(sch, opt, extack, NULL);
113 	if (ret)
114 		return ret;
115 
116 	mq_offload(sch, TC_MQ_CREATE);
117 	return 0;
118 }
119 
mq_attach(struct Qdisc * sch)120 void mq_attach(struct Qdisc *sch)
121 {
122 	struct net_device *dev = qdisc_dev(sch);
123 	struct mq_sched *priv = qdisc_priv(sch);
124 	struct Qdisc *qdisc, *old;
125 	unsigned int ntx;
126 
127 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
128 		qdisc = priv->qdiscs[ntx];
129 		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
130 		if (old)
131 			qdisc_put(old);
132 #ifdef CONFIG_NET_SCHED
133 		if (ntx < dev->real_num_tx_queues)
134 			qdisc_hash_add(qdisc, false);
135 #endif
136 
137 	}
138 	kfree(priv->qdiscs);
139 	priv->qdiscs = NULL;
140 }
141 EXPORT_SYMBOL_NS_GPL(mq_attach, "NET_SCHED_INTERNAL");
142 
mq_dump_common(struct Qdisc * sch,struct sk_buff * skb)143 void mq_dump_common(struct Qdisc *sch, struct sk_buff *skb)
144 {
145 	struct net_device *dev = qdisc_dev(sch);
146 	struct Qdisc *qdisc;
147 	unsigned int ntx;
148 
149 	sch->q.qlen = 0;
150 	gnet_stats_basic_sync_init(&sch->bstats);
151 	memset(&sch->qstats, 0, sizeof(sch->qstats));
152 
153 	/* MQ supports lockless qdiscs. However, statistics accounting needs
154 	 * to account for all, none, or a mix of locked and unlocked child
155 	 * qdiscs. Percpu stats are added to counters in-band and locking
156 	 * qdisc totals are added at end.
157 	 */
158 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
159 		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
160 		spin_lock_bh(qdisc_lock(qdisc));
161 
162 		gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
163 				     &qdisc->bstats, false);
164 		gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
165 				     &qdisc->qstats);
166 		sch->q.qlen += qdisc_qlen(qdisc);
167 
168 		spin_unlock_bh(qdisc_lock(qdisc));
169 	}
170 }
171 EXPORT_SYMBOL_NS_GPL(mq_dump_common, "NET_SCHED_INTERNAL");
172 
mq_dump(struct Qdisc * sch,struct sk_buff * skb)173 static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
174 {
175 	mq_dump_common(sch, skb);
176 	return mq_offload_stats(sch);
177 }
178 
mq_queue_get(struct Qdisc * sch,unsigned long cl)179 static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
180 {
181 	struct net_device *dev = qdisc_dev(sch);
182 	unsigned long ntx = cl - 1;
183 
184 	if (ntx >= dev->num_tx_queues)
185 		return NULL;
186 	return netdev_get_tx_queue(dev, ntx);
187 }
188 
mq_select_queue(struct Qdisc * sch,struct tcmsg * tcm)189 struct netdev_queue *mq_select_queue(struct Qdisc *sch,
190 				     struct tcmsg *tcm)
191 {
192 	return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
193 }
194 EXPORT_SYMBOL_NS_GPL(mq_select_queue, "NET_SCHED_INTERNAL");
195 
mq_graft(struct Qdisc * sch,unsigned long cl,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)196 static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
197 		    struct Qdisc **old, struct netlink_ext_ack *extack)
198 {
199 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
200 	struct tc_mq_qopt_offload graft_offload;
201 	struct net_device *dev = qdisc_dev(sch);
202 
203 	if (dev->flags & IFF_UP)
204 		dev_deactivate(dev);
205 
206 	*old = dev_graft_qdisc(dev_queue, new);
207 	if (new)
208 		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
209 	if (dev->flags & IFF_UP)
210 		dev_activate(dev);
211 
212 	graft_offload.handle = sch->handle;
213 	graft_offload.graft_params.queue = cl - 1;
214 	graft_offload.graft_params.child_handle = new ? new->handle : 0;
215 	graft_offload.command = TC_MQ_GRAFT;
216 
217 	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
218 				   TC_SETUP_QDISC_MQ, &graft_offload, extack);
219 	return 0;
220 }
221 
mq_leaf(struct Qdisc * sch,unsigned long cl)222 struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
223 {
224 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
225 
226 	return rtnl_dereference(dev_queue->qdisc_sleeping);
227 }
228 EXPORT_SYMBOL_NS_GPL(mq_leaf, "NET_SCHED_INTERNAL");
229 
mq_find(struct Qdisc * sch,u32 classid)230 unsigned long mq_find(struct Qdisc *sch, u32 classid)
231 {
232 	unsigned int ntx = TC_H_MIN(classid);
233 
234 	if (!mq_queue_get(sch, ntx))
235 		return 0;
236 	return ntx;
237 }
238 EXPORT_SYMBOL_NS_GPL(mq_find, "NET_SCHED_INTERNAL");
239 
mq_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)240 int mq_dump_class(struct Qdisc *sch, unsigned long cl,
241 		  struct sk_buff *skb, struct tcmsg *tcm)
242 {
243 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
244 
245 	tcm->tcm_parent = TC_H_ROOT;
246 	tcm->tcm_handle |= TC_H_MIN(cl);
247 	tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
248 	return 0;
249 }
250 EXPORT_SYMBOL_NS_GPL(mq_dump_class, "NET_SCHED_INTERNAL");
251 
mq_dump_class_stats(struct Qdisc * sch,unsigned long cl,struct gnet_dump * d)252 int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
253 			struct gnet_dump *d)
254 {
255 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
256 
257 	sch = rtnl_dereference(dev_queue->qdisc_sleeping);
258 	if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 ||
259 	    qdisc_qstats_copy(d, sch) < 0)
260 		return -1;
261 	return 0;
262 }
263 EXPORT_SYMBOL_NS_GPL(mq_dump_class_stats, "NET_SCHED_INTERNAL");
264 
mq_walk(struct Qdisc * sch,struct qdisc_walker * arg)265 void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
266 {
267 	struct net_device *dev = qdisc_dev(sch);
268 	unsigned int ntx;
269 
270 	if (arg->stop)
271 		return;
272 
273 	arg->count = arg->skip;
274 	for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
275 		if (!tc_qdisc_stats_dump(sch, ntx + 1, arg))
276 			break;
277 	}
278 }
279 EXPORT_SYMBOL_NS_GPL(mq_walk, "NET_SCHED_INTERNAL");
280 
281 static const struct Qdisc_class_ops mq_class_ops = {
282 	.select_queue	= mq_select_queue,
283 	.graft		= mq_graft,
284 	.leaf		= mq_leaf,
285 	.find		= mq_find,
286 	.walk		= mq_walk,
287 	.dump		= mq_dump_class,
288 	.dump_stats	= mq_dump_class_stats,
289 };
290 
291 struct Qdisc_ops mq_qdisc_ops __read_mostly = {
292 	.cl_ops		= &mq_class_ops,
293 	.id		= "mq",
294 	.priv_size	= sizeof(struct mq_sched),
295 	.init		= mq_init,
296 	.destroy	= mq_destroy,
297 	.attach		= mq_attach,
298 	.change_real_num_tx = mq_change_real_num_tx,
299 	.dump		= mq_dump,
300 	.owner		= THIS_MODULE,
301 };
302