1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Fair Queue CoDel discipline 4 * 5 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/types.h> 10 #include <linux/kernel.h> 11 #include <linux/jiffies.h> 12 #include <linux/string.h> 13 #include <linux/in.h> 14 #include <linux/errno.h> 15 #include <linux/init.h> 16 #include <linux/skbuff.h> 17 #include <linux/slab.h> 18 #include <linux/vmalloc.h> 19 #include <net/netlink.h> 20 #include <net/pkt_sched.h> 21 #include <net/pkt_cls.h> 22 #include <net/codel.h> 23 #include <net/codel_impl.h> 24 #include <net/codel_qdisc.h> 25 26 /* Fair Queue CoDel. 27 * 28 * Principles : 29 * Packets are classified (internal classifier or external) on flows. 30 * This is a Stochastic model (as we use a hash, several flows 31 * might be hashed on same slot) 32 * Each flow has a CoDel managed queue. 33 * Flows are linked onto two (Round Robin) lists, 34 * so that new flows have priority on old ones. 35 * 36 * For a given flow, packets are not reordered (CoDel uses a FIFO) 37 * head drops only. 38 * ECN capability is on by default. 39 * Low memory footprint (64 bytes per flow) 40 */ 41 42 struct fq_codel_flow { 43 struct sk_buff *head; 44 struct sk_buff *tail; 45 struct list_head flowchain; 46 int deficit; 47 struct codel_vars cvars; 48 }; /* please try to keep this structure <= 64 bytes */ 49 50 struct fq_codel_sched_data { 51 struct tcf_proto __rcu *filter_list; /* optional external classifier */ 52 struct tcf_block *block; 53 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ 54 u32 *backlogs; /* backlog table [flows_cnt] */ 55 u32 flows_cnt; /* number of flows */ 56 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ 57 u32 drop_batch_size; 58 u32 memory_limit; 59 struct codel_params cparams; 60 struct codel_stats cstats; 61 u32 memory_usage; 62 u32 drop_overmemory; 63 u32 drop_overlimit; 64 u32 new_flow_count; 65 66 struct list_head new_flows; /* list of new flows */ 67 struct list_head old_flows; /* list of old flows */ 68 }; 69 70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, 71 struct sk_buff *skb) 72 { 73 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); 74 } 75 76 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, 77 int *qerr) 78 { 79 struct fq_codel_sched_data *q = qdisc_priv(sch); 80 struct tcf_proto *filter; 81 struct tcf_result res; 82 int result; 83 84 if (TC_H_MAJ(skb->priority) == sch->handle && 85 TC_H_MIN(skb->priority) > 0 && 86 TC_H_MIN(skb->priority) <= q->flows_cnt) 87 return TC_H_MIN(skb->priority); 88 89 filter = rcu_dereference_bh(q->filter_list); 90 if (!filter) 91 return fq_codel_hash(q, skb) + 1; 92 93 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 94 result = tcf_classify(skb, NULL, filter, &res, false); 95 if (result >= 0) { 96 #ifdef CONFIG_NET_CLS_ACT 97 switch (result) { 98 case TC_ACT_STOLEN: 99 case TC_ACT_QUEUED: 100 case TC_ACT_TRAP: 101 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 102 fallthrough; 103 case TC_ACT_SHOT: 104 return 0; 105 } 106 #endif 107 if (TC_H_MIN(res.classid) <= q->flows_cnt) 108 return TC_H_MIN(res.classid); 109 } 110 return 0; 111 } 112 113 /* helper functions : might be changed when/if skb use a standard list_head */ 114 115 /* remove one skb from head of slot queue */ 116 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) 117 { 118 struct sk_buff *skb = flow->head; 119 120 flow->head = skb->next; 121 skb_mark_not_on_list(skb); 122 return skb; 123 } 124 125 /* add skb to flow queue (tail add) */ 126 static inline void flow_queue_add(struct fq_codel_flow *flow, 127 struct sk_buff *skb) 128 { 129 if (flow->head == NULL) 130 flow->head = skb; 131 else 132 flow->tail->next = skb; 133 flow->tail = skb; 134 skb->next = NULL; 135 } 136 137 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, 138 struct sk_buff **to_free) 139 { 140 struct fq_codel_sched_data *q = qdisc_priv(sch); 141 struct sk_buff *skb; 142 unsigned int maxbacklog = 0, idx = 0, i, len; 143 struct fq_codel_flow *flow; 144 unsigned int threshold; 145 unsigned int mem = 0; 146 147 /* Queue is full! Find the fat flow and drop packet(s) from it. 148 * This might sound expensive, but with 1024 flows, we scan 149 * 4KB of memory, and we dont need to handle a complex tree 150 * in fast path (packet queue/enqueue) with many cache misses. 151 * In stress mode, we'll try to drop 64 packets from the flow, 152 * amortizing this linear lookup to one cache line per drop. 153 */ 154 for (i = 0; i < q->flows_cnt; i++) { 155 if (q->backlogs[i] > maxbacklog) { 156 maxbacklog = q->backlogs[i]; 157 idx = i; 158 } 159 } 160 161 /* Our goal is to drop half of this fat flow backlog */ 162 threshold = maxbacklog >> 1; 163 164 flow = &q->flows[idx]; 165 len = 0; 166 i = 0; 167 do { 168 skb = dequeue_head(flow); 169 len += qdisc_pkt_len(skb); 170 mem += get_codel_cb(skb)->mem_usage; 171 tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_OVERLIMIT); 172 __qdisc_drop(skb, to_free); 173 } while (++i < max_packets && len < threshold); 174 175 /* Tell codel to increase its signal strength also */ 176 flow->cvars.count += i; 177 q->backlogs[idx] -= len; 178 q->memory_usage -= mem; 179 sch->qstats.drops += i; 180 sch->qstats.backlog -= len; 181 sch->q.qlen -= i; 182 return idx; 183 } 184 185 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, 186 struct sk_buff **to_free) 187 { 188 struct fq_codel_sched_data *q = qdisc_priv(sch); 189 unsigned int idx, prev_backlog, prev_qlen; 190 struct fq_codel_flow *flow; 191 int ret; 192 unsigned int pkt_len; 193 bool memory_limited; 194 195 idx = fq_codel_classify(skb, sch, &ret); 196 if (idx == 0) { 197 if (ret & __NET_XMIT_BYPASS) 198 qdisc_qstats_drop(sch); 199 __qdisc_drop(skb, to_free); 200 return ret; 201 } 202 idx--; 203 204 codel_set_enqueue_time(skb); 205 flow = &q->flows[idx]; 206 flow_queue_add(flow, skb); 207 q->backlogs[idx] += qdisc_pkt_len(skb); 208 qdisc_qstats_backlog_inc(sch, skb); 209 210 if (list_empty(&flow->flowchain)) { 211 list_add_tail(&flow->flowchain, &q->new_flows); 212 q->new_flow_count++; 213 flow->deficit = q->quantum; 214 } 215 get_codel_cb(skb)->mem_usage = skb->truesize; 216 q->memory_usage += get_codel_cb(skb)->mem_usage; 217 memory_limited = q->memory_usage > q->memory_limit; 218 if (++sch->q.qlen <= sch->limit && !memory_limited) 219 return NET_XMIT_SUCCESS; 220 221 prev_backlog = sch->qstats.backlog; 222 prev_qlen = sch->q.qlen; 223 224 /* save this packet length as it might be dropped by fq_codel_drop() */ 225 pkt_len = qdisc_pkt_len(skb); 226 /* fq_codel_drop() is quite expensive, as it performs a linear search 227 * in q->backlogs[] to find a fat flow. 228 * So instead of dropping a single packet, drop half of its backlog 229 * with a 64 packets limit to not add a too big cpu spike here. 230 */ 231 ret = fq_codel_drop(sch, q->drop_batch_size, to_free); 232 233 prev_qlen -= sch->q.qlen; 234 prev_backlog -= sch->qstats.backlog; 235 q->drop_overlimit += prev_qlen; 236 if (memory_limited) 237 q->drop_overmemory += prev_qlen; 238 239 /* As we dropped packet(s), better let upper stack know this. 240 * If we dropped a packet for this flow, return NET_XMIT_CN, 241 * but in this case, our parents wont increase their backlogs. 242 */ 243 if (ret == idx) { 244 qdisc_tree_reduce_backlog(sch, prev_qlen - 1, 245 prev_backlog - pkt_len); 246 return NET_XMIT_CN; 247 } 248 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog); 249 return NET_XMIT_SUCCESS; 250 } 251 252 /* This is the specific function called from codel_dequeue() 253 * to dequeue a packet from queue. Note: backlog is handled in 254 * codel, we dont need to reduce it here. 255 */ 256 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) 257 { 258 struct Qdisc *sch = ctx; 259 struct fq_codel_sched_data *q = qdisc_priv(sch); 260 struct fq_codel_flow *flow; 261 struct sk_buff *skb = NULL; 262 263 flow = container_of(vars, struct fq_codel_flow, cvars); 264 if (flow->head) { 265 skb = dequeue_head(flow); 266 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); 267 q->memory_usage -= get_codel_cb(skb)->mem_usage; 268 sch->q.qlen--; 269 sch->qstats.backlog -= qdisc_pkt_len(skb); 270 } 271 return skb; 272 } 273 274 static void drop_func(struct sk_buff *skb, void *ctx) 275 { 276 struct Qdisc *sch = ctx; 277 278 kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED); 279 qdisc_qstats_drop(sch); 280 } 281 282 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) 283 { 284 struct fq_codel_sched_data *q = qdisc_priv(sch); 285 struct sk_buff *skb; 286 struct fq_codel_flow *flow; 287 struct list_head *head; 288 289 begin: 290 head = &q->new_flows; 291 if (list_empty(head)) { 292 head = &q->old_flows; 293 if (list_empty(head)) 294 return NULL; 295 } 296 flow = list_first_entry(head, struct fq_codel_flow, flowchain); 297 298 if (flow->deficit <= 0) { 299 flow->deficit += q->quantum; 300 list_move_tail(&flow->flowchain, &q->old_flows); 301 goto begin; 302 } 303 304 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, 305 &flow->cvars, &q->cstats, qdisc_pkt_len, 306 codel_get_enqueue_time, drop_func, dequeue_func); 307 308 if (!skb) { 309 /* force a pass through old_flows to prevent starvation */ 310 if ((head == &q->new_flows) && !list_empty(&q->old_flows)) 311 list_move_tail(&flow->flowchain, &q->old_flows); 312 else 313 list_del_init(&flow->flowchain); 314 goto begin; 315 } 316 qdisc_bstats_update(sch, skb); 317 flow->deficit -= qdisc_pkt_len(skb); 318 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, 319 * or HTB crashes. Defer it for next round. 320 */ 321 if (q->cstats.drop_count && sch->q.qlen) { 322 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, 323 q->cstats.drop_len); 324 q->cstats.drop_count = 0; 325 q->cstats.drop_len = 0; 326 } 327 return skb; 328 } 329 330 static void fq_codel_flow_purge(struct fq_codel_flow *flow) 331 { 332 rtnl_kfree_skbs(flow->head, flow->tail); 333 flow->head = NULL; 334 } 335 336 static void fq_codel_reset(struct Qdisc *sch) 337 { 338 struct fq_codel_sched_data *q = qdisc_priv(sch); 339 int i; 340 341 INIT_LIST_HEAD(&q->new_flows); 342 INIT_LIST_HEAD(&q->old_flows); 343 for (i = 0; i < q->flows_cnt; i++) { 344 struct fq_codel_flow *flow = q->flows + i; 345 346 fq_codel_flow_purge(flow); 347 INIT_LIST_HEAD(&flow->flowchain); 348 codel_vars_init(&flow->cvars); 349 } 350 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); 351 q->memory_usage = 0; 352 } 353 354 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { 355 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 }, 356 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 }, 357 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 }, 358 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 }, 359 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 }, 360 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 }, 361 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 }, 362 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 }, 363 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 }, 364 [TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR] = { .type = NLA_U8 }, 365 [TCA_FQ_CODEL_CE_THRESHOLD_MASK] = { .type = NLA_U8 }, 366 }; 367 368 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, 369 struct netlink_ext_ack *extack) 370 { 371 struct fq_codel_sched_data *q = qdisc_priv(sch); 372 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; 373 u32 quantum = 0; 374 int err; 375 376 err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt, 377 fq_codel_policy, NULL); 378 if (err < 0) 379 return err; 380 if (tb[TCA_FQ_CODEL_FLOWS]) { 381 if (q->flows) 382 return -EINVAL; 383 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); 384 if (!q->flows_cnt || 385 q->flows_cnt > 65536) 386 return -EINVAL; 387 } 388 if (tb[TCA_FQ_CODEL_QUANTUM]) { 389 quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); 390 if (quantum > FQ_CODEL_QUANTUM_MAX) { 391 NL_SET_ERR_MSG(extack, "Invalid quantum"); 392 return -EINVAL; 393 } 394 } 395 sch_tree_lock(sch); 396 397 if (tb[TCA_FQ_CODEL_TARGET]) { 398 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); 399 400 WRITE_ONCE(q->cparams.target, 401 (target * NSEC_PER_USEC) >> CODEL_SHIFT); 402 } 403 404 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) { 405 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]); 406 407 WRITE_ONCE(q->cparams.ce_threshold, 408 (val * NSEC_PER_USEC) >> CODEL_SHIFT); 409 } 410 411 if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]) 412 WRITE_ONCE(q->cparams.ce_threshold_selector, 413 nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR])); 414 if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]) 415 WRITE_ONCE(q->cparams.ce_threshold_mask, 416 nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK])); 417 418 if (tb[TCA_FQ_CODEL_INTERVAL]) { 419 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); 420 421 WRITE_ONCE(q->cparams.interval, 422 (interval * NSEC_PER_USEC) >> CODEL_SHIFT); 423 } 424 425 if (tb[TCA_FQ_CODEL_LIMIT]) 426 WRITE_ONCE(sch->limit, 427 nla_get_u32(tb[TCA_FQ_CODEL_LIMIT])); 428 429 if (tb[TCA_FQ_CODEL_ECN]) 430 WRITE_ONCE(q->cparams.ecn, 431 !!nla_get_u32(tb[TCA_FQ_CODEL_ECN])); 432 433 if (quantum) 434 WRITE_ONCE(q->quantum, quantum); 435 436 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) 437 WRITE_ONCE(q->drop_batch_size, 438 max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]))); 439 440 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) 441 WRITE_ONCE(q->memory_limit, 442 min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]))); 443 444 while (sch->q.qlen > sch->limit || 445 q->memory_usage > q->memory_limit) { 446 struct sk_buff *skb = fq_codel_dequeue(sch); 447 448 q->cstats.drop_len += qdisc_pkt_len(skb); 449 rtnl_kfree_skbs(skb, skb); 450 q->cstats.drop_count++; 451 } 452 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); 453 q->cstats.drop_count = 0; 454 q->cstats.drop_len = 0; 455 456 sch_tree_unlock(sch); 457 return 0; 458 } 459 460 static void fq_codel_destroy(struct Qdisc *sch) 461 { 462 struct fq_codel_sched_data *q = qdisc_priv(sch); 463 464 tcf_block_put(q->block); 465 kvfree(q->backlogs); 466 kvfree(q->flows); 467 } 468 469 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, 470 struct netlink_ext_ack *extack) 471 { 472 struct fq_codel_sched_data *q = qdisc_priv(sch); 473 int i; 474 int err; 475 476 sch->limit = 10*1024; 477 q->flows_cnt = 1024; 478 q->memory_limit = 32 << 20; /* 32 MBytes */ 479 q->drop_batch_size = 64; 480 q->quantum = psched_mtu(qdisc_dev(sch)); 481 INIT_LIST_HEAD(&q->new_flows); 482 INIT_LIST_HEAD(&q->old_flows); 483 codel_params_init(&q->cparams); 484 codel_stats_init(&q->cstats); 485 q->cparams.ecn = true; 486 q->cparams.mtu = psched_mtu(qdisc_dev(sch)); 487 488 if (opt) { 489 err = fq_codel_change(sch, opt, extack); 490 if (err) 491 goto init_failure; 492 } 493 494 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); 495 if (err) 496 goto init_failure; 497 498 if (!q->flows) { 499 q->flows = kvcalloc(q->flows_cnt, 500 sizeof(struct fq_codel_flow), 501 GFP_KERNEL); 502 if (!q->flows) { 503 err = -ENOMEM; 504 goto init_failure; 505 } 506 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); 507 if (!q->backlogs) { 508 err = -ENOMEM; 509 goto alloc_failure; 510 } 511 for (i = 0; i < q->flows_cnt; i++) { 512 struct fq_codel_flow *flow = q->flows + i; 513 514 INIT_LIST_HEAD(&flow->flowchain); 515 codel_vars_init(&flow->cvars); 516 } 517 } 518 if (sch->limit >= 1) 519 sch->flags |= TCQ_F_CAN_BYPASS; 520 else 521 sch->flags &= ~TCQ_F_CAN_BYPASS; 522 return 0; 523 524 alloc_failure: 525 kvfree(q->flows); 526 q->flows = NULL; 527 init_failure: 528 q->flows_cnt = 0; 529 return err; 530 } 531 532 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) 533 { 534 struct fq_codel_sched_data *q = qdisc_priv(sch); 535 codel_time_t ce_threshold; 536 struct nlattr *opts; 537 538 opts = nla_nest_start_noflag(skb, TCA_OPTIONS); 539 if (opts == NULL) 540 goto nla_put_failure; 541 542 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET, 543 codel_time_to_us(READ_ONCE(q->cparams.target))) || 544 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT, 545 READ_ONCE(sch->limit)) || 546 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL, 547 codel_time_to_us(READ_ONCE(q->cparams.interval))) || 548 nla_put_u32(skb, TCA_FQ_CODEL_ECN, 549 READ_ONCE(q->cparams.ecn)) || 550 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM, 551 READ_ONCE(q->quantum)) || 552 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE, 553 READ_ONCE(q->drop_batch_size)) || 554 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT, 555 READ_ONCE(q->memory_limit)) || 556 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, 557 READ_ONCE(q->flows_cnt))) 558 goto nla_put_failure; 559 560 ce_threshold = READ_ONCE(q->cparams.ce_threshold); 561 if (ce_threshold != CODEL_DISABLED_THRESHOLD) { 562 if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD, 563 codel_time_to_us(ce_threshold))) 564 goto nla_put_failure; 565 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, 566 READ_ONCE(q->cparams.ce_threshold_selector))) 567 goto nla_put_failure; 568 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, 569 READ_ONCE(q->cparams.ce_threshold_mask))) 570 goto nla_put_failure; 571 } 572 573 return nla_nest_end(skb, opts); 574 575 nla_put_failure: 576 return -1; 577 } 578 579 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 580 { 581 struct fq_codel_sched_data *q = qdisc_priv(sch); 582 struct tc_fq_codel_xstats st = { 583 .type = TCA_FQ_CODEL_XSTATS_QDISC, 584 }; 585 struct list_head *pos; 586 587 st.qdisc_stats.maxpacket = q->cstats.maxpacket; 588 st.qdisc_stats.drop_overlimit = q->drop_overlimit; 589 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; 590 st.qdisc_stats.new_flow_count = q->new_flow_count; 591 st.qdisc_stats.ce_mark = q->cstats.ce_mark; 592 st.qdisc_stats.memory_usage = q->memory_usage; 593 st.qdisc_stats.drop_overmemory = q->drop_overmemory; 594 595 sch_tree_lock(sch); 596 list_for_each(pos, &q->new_flows) 597 st.qdisc_stats.new_flows_len++; 598 599 list_for_each(pos, &q->old_flows) 600 st.qdisc_stats.old_flows_len++; 601 sch_tree_unlock(sch); 602 603 return gnet_stats_copy_app(d, &st, sizeof(st)); 604 } 605 606 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg) 607 { 608 return NULL; 609 } 610 611 static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid) 612 { 613 return 0; 614 } 615 616 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent, 617 u32 classid) 618 { 619 return 0; 620 } 621 622 static void fq_codel_unbind(struct Qdisc *q, unsigned long cl) 623 { 624 } 625 626 static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl, 627 struct netlink_ext_ack *extack) 628 { 629 struct fq_codel_sched_data *q = qdisc_priv(sch); 630 631 if (cl) 632 return NULL; 633 return q->block; 634 } 635 636 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl, 637 struct sk_buff *skb, struct tcmsg *tcm) 638 { 639 tcm->tcm_handle |= TC_H_MIN(cl); 640 return 0; 641 } 642 643 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, 644 struct gnet_dump *d) 645 { 646 struct fq_codel_sched_data *q = qdisc_priv(sch); 647 u32 idx = cl - 1; 648 struct gnet_stats_queue qs = { 0 }; 649 struct tc_fq_codel_xstats xstats; 650 651 if (idx < q->flows_cnt) { 652 const struct fq_codel_flow *flow = &q->flows[idx]; 653 const struct sk_buff *skb; 654 655 memset(&xstats, 0, sizeof(xstats)); 656 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS; 657 xstats.class_stats.deficit = flow->deficit; 658 xstats.class_stats.ldelay = 659 codel_time_to_us(flow->cvars.ldelay); 660 xstats.class_stats.count = flow->cvars.count; 661 xstats.class_stats.lastcount = flow->cvars.lastcount; 662 xstats.class_stats.dropping = flow->cvars.dropping; 663 if (flow->cvars.dropping) { 664 codel_tdiff_t delta = flow->cvars.drop_next - 665 codel_get_time(); 666 667 xstats.class_stats.drop_next = (delta >= 0) ? 668 codel_time_to_us(delta) : 669 -codel_time_to_us(-delta); 670 } 671 if (flow->head) { 672 sch_tree_lock(sch); 673 skb = flow->head; 674 while (skb) { 675 qs.qlen++; 676 skb = skb->next; 677 } 678 sch_tree_unlock(sch); 679 } 680 qs.backlog = q->backlogs[idx]; 681 qs.drops = 0; 682 } 683 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) 684 return -1; 685 if (idx < q->flows_cnt) 686 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 687 return 0; 688 } 689 690 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg) 691 { 692 struct fq_codel_sched_data *q = qdisc_priv(sch); 693 unsigned int i; 694 695 if (arg->stop) 696 return; 697 698 for (i = 0; i < q->flows_cnt; i++) { 699 if (list_empty(&q->flows[i].flowchain)) { 700 arg->count++; 701 continue; 702 } 703 if (!tc_qdisc_stats_dump(sch, i + 1, arg)) 704 break; 705 } 706 } 707 708 static const struct Qdisc_class_ops fq_codel_class_ops = { 709 .leaf = fq_codel_leaf, 710 .find = fq_codel_find, 711 .tcf_block = fq_codel_tcf_block, 712 .bind_tcf = fq_codel_bind, 713 .unbind_tcf = fq_codel_unbind, 714 .dump = fq_codel_dump_class, 715 .dump_stats = fq_codel_dump_class_stats, 716 .walk = fq_codel_walk, 717 }; 718 719 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = { 720 .cl_ops = &fq_codel_class_ops, 721 .id = "fq_codel", 722 .priv_size = sizeof(struct fq_codel_sched_data), 723 .enqueue = fq_codel_enqueue, 724 .dequeue = fq_codel_dequeue, 725 .peek = qdisc_peek_dequeued, 726 .init = fq_codel_init, 727 .reset = fq_codel_reset, 728 .destroy = fq_codel_destroy, 729 .change = fq_codel_change, 730 .dump = fq_codel_dump, 731 .dump_stats = fq_codel_dump_stats, 732 .owner = THIS_MODULE, 733 }; 734 MODULE_ALIAS_NET_SCH("fq_codel"); 735 736 static int __init fq_codel_module_init(void) 737 { 738 return register_qdisc(&fq_codel_qdisc_ops); 739 } 740 741 static void __exit fq_codel_module_exit(void) 742 { 743 unregister_qdisc(&fq_codel_qdisc_ops); 744 } 745 746 module_init(fq_codel_module_init) 747 module_exit(fq_codel_module_exit) 748 MODULE_AUTHOR("Eric Dumazet"); 749 MODULE_LICENSE("GPL"); 750 MODULE_DESCRIPTION("Fair Queue CoDel discipline"); 751