1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Codel - The Controlled-Delay Active Queue Management algorithm 4 * 5 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com> 6 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net> 7 * 8 * Implemented on linux by : 9 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net> 10 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/errno.h> 18 #include <linux/skbuff.h> 19 #include <linux/prefetch.h> 20 #include <net/pkt_sched.h> 21 #include <net/codel.h> 22 #include <net/codel_impl.h> 23 #include <net/codel_qdisc.h> 24 25 26 #define DEFAULT_CODEL_LIMIT 1000 27 28 struct codel_sched_data { 29 struct codel_params params; 30 struct codel_vars vars; 31 struct codel_stats stats; 32 u32 drop_overlimit; 33 }; 34 35 /* This is the specific function called from codel_dequeue() 36 * to dequeue a packet from queue. Note: backlog is handled in 37 * codel, we dont need to reduce it here. 38 */ 39 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) 40 { 41 struct Qdisc *sch = ctx; 42 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 43 44 if (skb) { 45 sch->qstats.backlog -= qdisc_pkt_len(skb); 46 prefetch(&skb->end); /* we'll need skb_shinfo() */ 47 } 48 return skb; 49 } 50 51 static void drop_func(struct sk_buff *skb, void *ctx) 52 { 53 struct Qdisc *sch = ctx; 54 55 kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED); 56 qdisc_qstats_drop(sch); 57 } 58 59 static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) 60 { 61 struct codel_sched_data *q = qdisc_priv(sch); 62 struct sk_buff *skb; 63 64 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, 65 &q->stats, qdisc_pkt_len, codel_get_enqueue_time, 66 drop_func, dequeue_func); 67 68 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, 69 * or HTB crashes. Defer it for next round. 70 */ 71 if (q->stats.drop_count && sch->q.qlen) { 72 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); 73 q->stats.drop_count = 0; 74 q->stats.drop_len = 0; 75 } 76 if (skb) 77 qdisc_bstats_update(sch, skb); 78 return skb; 79 } 80 81 static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 82 struct sk_buff **to_free) 83 { 84 struct codel_sched_data *q; 85 86 if (likely(qdisc_qlen(sch) < sch->limit)) { 87 codel_set_enqueue_time(skb); 88 return qdisc_enqueue_tail(skb, sch); 89 } 90 q = qdisc_priv(sch); 91 q->drop_overlimit++; 92 return qdisc_drop_reason(skb, sch, to_free, 93 SKB_DROP_REASON_QDISC_OVERLIMIT); 94 } 95 96 static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = { 97 [TCA_CODEL_TARGET] = { .type = NLA_U32 }, 98 [TCA_CODEL_LIMIT] = { .type = NLA_U32 }, 99 [TCA_CODEL_INTERVAL] = { .type = NLA_U32 }, 100 [TCA_CODEL_ECN] = { .type = NLA_U32 }, 101 [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 }, 102 }; 103 104 static int codel_change(struct Qdisc *sch, struct nlattr *opt, 105 struct netlink_ext_ack *extack) 106 { 107 struct codel_sched_data *q = qdisc_priv(sch); 108 struct nlattr *tb[TCA_CODEL_MAX + 1]; 109 unsigned int qlen, dropped = 0; 110 int err; 111 112 err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt, 113 codel_policy, NULL); 114 if (err < 0) 115 return err; 116 117 sch_tree_lock(sch); 118 119 if (tb[TCA_CODEL_TARGET]) { 120 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]); 121 122 WRITE_ONCE(q->params.target, 123 ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT); 124 } 125 126 if (tb[TCA_CODEL_CE_THRESHOLD]) { 127 u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]); 128 129 WRITE_ONCE(q->params.ce_threshold, 130 (val * NSEC_PER_USEC) >> CODEL_SHIFT); 131 } 132 133 if (tb[TCA_CODEL_INTERVAL]) { 134 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]); 135 136 WRITE_ONCE(q->params.interval, 137 ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT); 138 } 139 140 if (tb[TCA_CODEL_LIMIT]) 141 WRITE_ONCE(sch->limit, 142 nla_get_u32(tb[TCA_CODEL_LIMIT])); 143 144 if (tb[TCA_CODEL_ECN]) 145 WRITE_ONCE(q->params.ecn, 146 !!nla_get_u32(tb[TCA_CODEL_ECN])); 147 148 qlen = sch->q.qlen; 149 while (sch->q.qlen > sch->limit) { 150 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 151 152 dropped += qdisc_pkt_len(skb); 153 qdisc_qstats_backlog_dec(sch, skb); 154 rtnl_qdisc_drop(skb, sch); 155 } 156 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); 157 158 sch_tree_unlock(sch); 159 return 0; 160 } 161 162 static int codel_init(struct Qdisc *sch, struct nlattr *opt, 163 struct netlink_ext_ack *extack) 164 { 165 struct codel_sched_data *q = qdisc_priv(sch); 166 167 sch->limit = DEFAULT_CODEL_LIMIT; 168 169 codel_params_init(&q->params); 170 codel_vars_init(&q->vars); 171 codel_stats_init(&q->stats); 172 q->params.mtu = psched_mtu(qdisc_dev(sch)); 173 174 if (opt) { 175 int err = codel_change(sch, opt, extack); 176 177 if (err) 178 return err; 179 } 180 181 if (sch->limit >= 1) 182 sch->flags |= TCQ_F_CAN_BYPASS; 183 else 184 sch->flags &= ~TCQ_F_CAN_BYPASS; 185 186 return 0; 187 } 188 189 static int codel_dump(struct Qdisc *sch, struct sk_buff *skb) 190 { 191 struct codel_sched_data *q = qdisc_priv(sch); 192 codel_time_t ce_threshold; 193 struct nlattr *opts; 194 195 opts = nla_nest_start_noflag(skb, TCA_OPTIONS); 196 if (opts == NULL) 197 goto nla_put_failure; 198 199 if (nla_put_u32(skb, TCA_CODEL_TARGET, 200 codel_time_to_us(READ_ONCE(q->params.target))) || 201 nla_put_u32(skb, TCA_CODEL_LIMIT, 202 READ_ONCE(sch->limit)) || 203 nla_put_u32(skb, TCA_CODEL_INTERVAL, 204 codel_time_to_us(READ_ONCE(q->params.interval))) || 205 nla_put_u32(skb, TCA_CODEL_ECN, 206 READ_ONCE(q->params.ecn))) 207 goto nla_put_failure; 208 ce_threshold = READ_ONCE(q->params.ce_threshold); 209 if (ce_threshold != CODEL_DISABLED_THRESHOLD && 210 nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD, 211 codel_time_to_us(ce_threshold))) 212 goto nla_put_failure; 213 return nla_nest_end(skb, opts); 214 215 nla_put_failure: 216 nla_nest_cancel(skb, opts); 217 return -1; 218 } 219 220 static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 221 { 222 const struct codel_sched_data *q = qdisc_priv(sch); 223 struct tc_codel_xstats st = { 224 .maxpacket = q->stats.maxpacket, 225 .count = q->vars.count, 226 .lastcount = q->vars.lastcount, 227 .drop_overlimit = q->drop_overlimit, 228 .ldelay = codel_time_to_us(q->vars.ldelay), 229 .dropping = q->vars.dropping, 230 .ecn_mark = q->stats.ecn_mark, 231 .ce_mark = q->stats.ce_mark, 232 }; 233 234 if (q->vars.dropping) { 235 codel_tdiff_t delta = q->vars.drop_next - codel_get_time(); 236 237 if (delta >= 0) 238 st.drop_next = codel_time_to_us(delta); 239 else 240 st.drop_next = -codel_time_to_us(-delta); 241 } 242 243 return gnet_stats_copy_app(d, &st, sizeof(st)); 244 } 245 246 static void codel_reset(struct Qdisc *sch) 247 { 248 struct codel_sched_data *q = qdisc_priv(sch); 249 250 qdisc_reset_queue(sch); 251 codel_vars_init(&q->vars); 252 } 253 254 static struct Qdisc_ops codel_qdisc_ops __read_mostly = { 255 .id = "codel", 256 .priv_size = sizeof(struct codel_sched_data), 257 258 .enqueue = codel_qdisc_enqueue, 259 .dequeue = codel_qdisc_dequeue, 260 .peek = qdisc_peek_dequeued, 261 .init = codel_init, 262 .reset = codel_reset, 263 .change = codel_change, 264 .dump = codel_dump, 265 .dump_stats = codel_dump_stats, 266 .owner = THIS_MODULE, 267 }; 268 MODULE_ALIAS_NET_SCH("codel"); 269 270 static int __init codel_module_init(void) 271 { 272 return register_qdisc(&codel_qdisc_ops); 273 } 274 275 static void __exit codel_module_exit(void) 276 { 277 unregister_qdisc(&codel_qdisc_ops); 278 } 279 280 module_init(codel_module_init) 281 module_exit(codel_module_exit) 282 283 MODULE_DESCRIPTION("Controlled Delay queue discipline"); 284 MODULE_AUTHOR("Dave Taht"); 285 MODULE_AUTHOR("Eric Dumazet"); 286 MODULE_LICENSE("Dual BSD/GPL"); 287