1 /* 2 * net/sched/sch_mq.c Classful multiqueue dummy scheduler 3 * 4 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * version 2 as published by the Free Software Foundation. 9 */ 10 11 #include <linux/types.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/export.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/skbuff.h> 18 #include <net/netlink.h> 19 #include <net/pkt_sched.h> 20 #include <net/sch_generic.h> 21 22 struct mq_sched { 23 struct Qdisc **qdiscs; 24 }; 25 26 static void mq_destroy(struct Qdisc *sch) 27 { 28 struct net_device *dev = qdisc_dev(sch); 29 struct mq_sched *priv = qdisc_priv(sch); 30 unsigned int ntx; 31 32 if (!priv->qdiscs) 33 return; 34 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) 35 qdisc_destroy(priv->qdiscs[ntx]); 36 kfree(priv->qdiscs); 37 } 38 39 static int mq_init(struct Qdisc *sch, struct nlattr *opt, 40 struct netlink_ext_ack *extack) 41 { 42 struct net_device *dev = qdisc_dev(sch); 43 struct mq_sched *priv = qdisc_priv(sch); 44 struct netdev_queue *dev_queue; 45 struct Qdisc *qdisc; 46 unsigned int ntx; 47 48 if (sch->parent != TC_H_ROOT) 49 return -EOPNOTSUPP; 50 51 if (!netif_is_multiqueue(dev)) 52 return -EOPNOTSUPP; 53 54 /* pre-allocate qdiscs, attachment can't fail */ 55 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), 56 GFP_KERNEL); 57 if (!priv->qdiscs) 58 return -ENOMEM; 59 60 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 61 dev_queue = netdev_get_tx_queue(dev, ntx); 62 qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx), 63 TC_H_MAKE(TC_H_MAJ(sch->handle), 64 TC_H_MIN(ntx + 1)), 65 extack); 66 if (!qdisc) 67 return -ENOMEM; 68 priv->qdiscs[ntx] = qdisc; 69 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 70 } 71 72 sch->flags |= TCQ_F_MQROOT; 73 return 0; 74 } 75 76 static void mq_attach(struct Qdisc *sch) 77 { 78 struct net_device *dev = qdisc_dev(sch); 79 struct mq_sched *priv = qdisc_priv(sch); 80 struct Qdisc *qdisc, *old; 81 unsigned int ntx; 82 83 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 84 qdisc = priv->qdiscs[ntx]; 85 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); 86 if (old) 87 qdisc_destroy(old); 88 #ifdef CONFIG_NET_SCHED 89 if (ntx < dev->real_num_tx_queues) 90 qdisc_hash_add(qdisc, false); 91 #endif 92 93 } 94 kfree(priv->qdiscs); 95 priv->qdiscs = NULL; 96 } 97 98 static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) 99 { 100 struct net_device *dev = qdisc_dev(sch); 101 struct Qdisc *qdisc; 102 unsigned int ntx; 103 __u32 qlen = 0; 104 105 sch->q.qlen = 0; 106 memset(&sch->bstats, 0, sizeof(sch->bstats)); 107 memset(&sch->qstats, 0, sizeof(sch->qstats)); 108 109 /* MQ supports lockless qdiscs. However, statistics accounting needs 110 * to account for all, none, or a mix of locked and unlocked child 111 * qdiscs. Percpu stats are added to counters in-band and locking 112 * qdisc totals are added at end. 113 */ 114 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 115 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; 116 spin_lock_bh(qdisc_lock(qdisc)); 117 118 if (qdisc_is_percpu_stats(qdisc)) { 119 qlen = qdisc_qlen_sum(qdisc); 120 __gnet_stats_copy_basic(NULL, &sch->bstats, 121 qdisc->cpu_bstats, 122 &qdisc->bstats); 123 __gnet_stats_copy_queue(&sch->qstats, 124 qdisc->cpu_qstats, 125 &qdisc->qstats, qlen); 126 } else { 127 sch->q.qlen += qdisc->q.qlen; 128 sch->bstats.bytes += qdisc->bstats.bytes; 129 sch->bstats.packets += qdisc->bstats.packets; 130 sch->qstats.backlog += qdisc->qstats.backlog; 131 sch->qstats.drops += qdisc->qstats.drops; 132 sch->qstats.requeues += qdisc->qstats.requeues; 133 sch->qstats.overlimits += qdisc->qstats.overlimits; 134 } 135 136 spin_unlock_bh(qdisc_lock(qdisc)); 137 } 138 139 return 0; 140 } 141 142 static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl) 143 { 144 struct net_device *dev = qdisc_dev(sch); 145 unsigned long ntx = cl - 1; 146 147 if (ntx >= dev->num_tx_queues) 148 return NULL; 149 return netdev_get_tx_queue(dev, ntx); 150 } 151 152 static struct netdev_queue *mq_select_queue(struct Qdisc *sch, 153 struct tcmsg *tcm) 154 { 155 return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); 156 } 157 158 static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, 159 struct Qdisc **old, struct netlink_ext_ack *extack) 160 { 161 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); 162 struct net_device *dev = qdisc_dev(sch); 163 164 if (dev->flags & IFF_UP) 165 dev_deactivate(dev); 166 167 *old = dev_graft_qdisc(dev_queue, new); 168 if (new) 169 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 170 if (dev->flags & IFF_UP) 171 dev_activate(dev); 172 return 0; 173 } 174 175 static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl) 176 { 177 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); 178 179 return dev_queue->qdisc_sleeping; 180 } 181 182 static unsigned long mq_find(struct Qdisc *sch, u32 classid) 183 { 184 unsigned int ntx = TC_H_MIN(classid); 185 186 if (!mq_queue_get(sch, ntx)) 187 return 0; 188 return ntx; 189 } 190 191 static int mq_dump_class(struct Qdisc *sch, unsigned long cl, 192 struct sk_buff *skb, struct tcmsg *tcm) 193 { 194 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); 195 196 tcm->tcm_parent = TC_H_ROOT; 197 tcm->tcm_handle |= TC_H_MIN(cl); 198 tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 199 return 0; 200 } 201 202 static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, 203 struct gnet_dump *d) 204 { 205 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); 206 207 sch = dev_queue->qdisc_sleeping; 208 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || 209 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) 210 return -1; 211 return 0; 212 } 213 214 static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg) 215 { 216 struct net_device *dev = qdisc_dev(sch); 217 unsigned int ntx; 218 219 if (arg->stop) 220 return; 221 222 arg->count = arg->skip; 223 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { 224 if (arg->fn(sch, ntx + 1, arg) < 0) { 225 arg->stop = 1; 226 break; 227 } 228 arg->count++; 229 } 230 } 231 232 static const struct Qdisc_class_ops mq_class_ops = { 233 .select_queue = mq_select_queue, 234 .graft = mq_graft, 235 .leaf = mq_leaf, 236 .find = mq_find, 237 .walk = mq_walk, 238 .dump = mq_dump_class, 239 .dump_stats = mq_dump_class_stats, 240 }; 241 242 struct Qdisc_ops mq_qdisc_ops __read_mostly = { 243 .cl_ops = &mq_class_ops, 244 .id = "mq", 245 .priv_size = sizeof(struct mq_sched), 246 .init = mq_init, 247 .destroy = mq_destroy, 248 .attach = mq_attach, 249 .dump = mq_dump, 250 .owner = THIS_MODULE, 251 }; 252