1 /* 2 * Fair Queue CoDel discipline 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/jiffies.h> 16 #include <linux/string.h> 17 #include <linux/in.h> 18 #include <linux/errno.h> 19 #include <linux/init.h> 20 #include <linux/skbuff.h> 21 #include <linux/jhash.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <net/netlink.h> 25 #include <net/pkt_sched.h> 26 #include <net/codel.h> 27 #include <net/codel_impl.h> 28 #include <net/codel_qdisc.h> 29 30 /* Fair Queue CoDel. 31 * 32 * Principles : 33 * Packets are classified (internal classifier or external) on flows. 34 * This is a Stochastic model (as we use a hash, several flows 35 * might be hashed on same slot) 36 * Each flow has a CoDel managed queue. 37 * Flows are linked onto two (Round Robin) lists, 38 * so that new flows have priority on old ones. 39 * 40 * For a given flow, packets are not reordered (CoDel uses a FIFO) 41 * head drops only. 42 * ECN capability is on by default. 43 * Low memory footprint (64 bytes per flow) 44 */ 45 46 struct fq_codel_flow { 47 struct sk_buff *head; 48 struct sk_buff *tail; 49 struct list_head flowchain; 50 int deficit; 51 u32 dropped; /* number of drops (or ECN marks) on this flow */ 52 struct codel_vars cvars; 53 }; /* please try to keep this structure <= 64 bytes */ 54 55 struct fq_codel_sched_data { 56 struct tcf_proto __rcu *filter_list; /* optional external classifier */ 57 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ 58 u32 *backlogs; /* backlog table [flows_cnt] */ 59 u32 flows_cnt; /* number of flows */ 60 u32 perturbation; /* hash perturbation */ 61 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ 62 u32 drop_batch_size; 63 u32 memory_limit; 64 struct codel_params cparams; 65 struct codel_stats cstats; 66 u32 memory_usage; 67 u32 drop_overmemory; 68 u32 drop_overlimit; 69 u32 new_flow_count; 70 71 struct list_head new_flows; /* list of new flows */ 72 struct list_head old_flows; /* list of old flows */ 73 }; 74 75 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, 76 struct sk_buff *skb) 77 { 78 u32 hash = skb_get_hash_perturb(skb, q->perturbation); 79 80 return reciprocal_scale(hash, q->flows_cnt); 81 } 82 83 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, 84 int *qerr) 85 { 86 struct fq_codel_sched_data *q = qdisc_priv(sch); 87 struct tcf_proto *filter; 88 struct tcf_result res; 89 int result; 90 91 if (TC_H_MAJ(skb->priority) == sch->handle && 92 TC_H_MIN(skb->priority) > 0 && 93 TC_H_MIN(skb->priority) <= q->flows_cnt) 94 return TC_H_MIN(skb->priority); 95 96 filter = rcu_dereference_bh(q->filter_list); 97 if (!filter) 98 return fq_codel_hash(q, skb) + 1; 99 100 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 101 result = tc_classify(skb, filter, &res, false); 102 if (result >= 0) { 103 #ifdef CONFIG_NET_CLS_ACT 104 switch (result) { 105 case TC_ACT_STOLEN: 106 case TC_ACT_QUEUED: 107 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 108 case TC_ACT_SHOT: 109 return 0; 110 } 111 #endif 112 if (TC_H_MIN(res.classid) <= q->flows_cnt) 113 return TC_H_MIN(res.classid); 114 } 115 return 0; 116 } 117 118 /* helper functions : might be changed when/if skb use a standard list_head */ 119 120 /* remove one skb from head of slot queue */ 121 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) 122 { 123 struct sk_buff *skb = flow->head; 124 125 flow->head = skb->next; 126 skb->next = NULL; 127 return skb; 128 } 129 130 /* add skb to flow queue (tail add) */ 131 static inline void flow_queue_add(struct fq_codel_flow *flow, 132 struct sk_buff *skb) 133 { 134 if (flow->head == NULL) 135 flow->head = skb; 136 else 137 flow->tail->next = skb; 138 flow->tail = skb; 139 skb->next = NULL; 140 } 141 142 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) 143 { 144 struct fq_codel_sched_data *q = qdisc_priv(sch); 145 struct sk_buff *skb; 146 unsigned int maxbacklog = 0, idx = 0, i, len; 147 struct fq_codel_flow *flow; 148 unsigned int threshold; 149 unsigned int mem = 0; 150 151 /* Queue is full! Find the fat flow and drop packet(s) from it. 152 * This might sound expensive, but with 1024 flows, we scan 153 * 4KB of memory, and we dont need to handle a complex tree 154 * in fast path (packet queue/enqueue) with many cache misses. 155 * In stress mode, we'll try to drop 64 packets from the flow, 156 * amortizing this linear lookup to one cache line per drop. 157 */ 158 for (i = 0; i < q->flows_cnt; i++) { 159 if (q->backlogs[i] > maxbacklog) { 160 maxbacklog = q->backlogs[i]; 161 idx = i; 162 } 163 } 164 165 /* Our goal is to drop half of this fat flow backlog */ 166 threshold = maxbacklog >> 1; 167 168 flow = &q->flows[idx]; 169 len = 0; 170 i = 0; 171 do { 172 skb = dequeue_head(flow); 173 len += qdisc_pkt_len(skb); 174 mem += skb->truesize; 175 kfree_skb(skb); 176 } while (++i < max_packets && len < threshold); 177 178 flow->dropped += i; 179 q->backlogs[idx] -= len; 180 q->memory_usage -= mem; 181 sch->qstats.drops += i; 182 sch->qstats.backlog -= len; 183 sch->q.qlen -= i; 184 return idx; 185 } 186 187 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) 188 { 189 struct fq_codel_sched_data *q = qdisc_priv(sch); 190 unsigned int idx, prev_backlog, prev_qlen; 191 struct fq_codel_flow *flow; 192 int uninitialized_var(ret); 193 unsigned int pkt_len; 194 bool memory_limited; 195 196 idx = fq_codel_classify(skb, sch, &ret); 197 if (idx == 0) { 198 if (ret & __NET_XMIT_BYPASS) 199 qdisc_qstats_drop(sch); 200 kfree_skb(skb); 201 return ret; 202 } 203 idx--; 204 205 codel_set_enqueue_time(skb); 206 flow = &q->flows[idx]; 207 flow_queue_add(flow, skb); 208 q->backlogs[idx] += qdisc_pkt_len(skb); 209 qdisc_qstats_backlog_inc(sch, skb); 210 211 if (list_empty(&flow->flowchain)) { 212 list_add_tail(&flow->flowchain, &q->new_flows); 213 q->new_flow_count++; 214 flow->deficit = q->quantum; 215 flow->dropped = 0; 216 } 217 q->memory_usage += skb->truesize; 218 memory_limited = q->memory_usage > q->memory_limit; 219 if (++sch->q.qlen <= sch->limit && !memory_limited) 220 return NET_XMIT_SUCCESS; 221 222 prev_backlog = sch->qstats.backlog; 223 prev_qlen = sch->q.qlen; 224 225 /* save this packet length as it might be dropped by fq_codel_drop() */ 226 pkt_len = qdisc_pkt_len(skb); 227 /* fq_codel_drop() is quite expensive, as it performs a linear search 228 * in q->backlogs[] to find a fat flow. 229 * So instead of dropping a single packet, drop half of its backlog 230 * with a 64 packets limit to not add a too big cpu spike here. 231 */ 232 ret = fq_codel_drop(sch, q->drop_batch_size); 233 234 prev_qlen -= sch->q.qlen; 235 prev_backlog -= sch->qstats.backlog; 236 q->drop_overlimit += prev_qlen; 237 if (memory_limited) 238 q->drop_overmemory += prev_qlen; 239 240 /* As we dropped packet(s), better let upper stack know this. 241 * If we dropped a packet for this flow, return NET_XMIT_CN, 242 * but in this case, our parents wont increase their backlogs. 243 */ 244 if (ret == idx) { 245 qdisc_tree_reduce_backlog(sch, prev_qlen - 1, 246 prev_backlog - pkt_len); 247 return NET_XMIT_CN; 248 } 249 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog); 250 return NET_XMIT_SUCCESS; 251 } 252 253 /* This is the specific function called from codel_dequeue() 254 * to dequeue a packet from queue. Note: backlog is handled in 255 * codel, we dont need to reduce it here. 256 */ 257 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) 258 { 259 struct Qdisc *sch = ctx; 260 struct fq_codel_sched_data *q = qdisc_priv(sch); 261 struct fq_codel_flow *flow; 262 struct sk_buff *skb = NULL; 263 264 flow = container_of(vars, struct fq_codel_flow, cvars); 265 if (flow->head) { 266 skb = dequeue_head(flow); 267 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); 268 q->memory_usage -= skb->truesize; 269 sch->q.qlen--; 270 sch->qstats.backlog -= qdisc_pkt_len(skb); 271 } 272 return skb; 273 } 274 275 static void drop_func(struct sk_buff *skb, void *ctx) 276 { 277 struct Qdisc *sch = ctx; 278 279 qdisc_drop(skb, sch); 280 } 281 282 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) 283 { 284 struct fq_codel_sched_data *q = qdisc_priv(sch); 285 struct sk_buff *skb; 286 struct fq_codel_flow *flow; 287 struct list_head *head; 288 u32 prev_drop_count, prev_ecn_mark; 289 unsigned int prev_backlog; 290 291 begin: 292 head = &q->new_flows; 293 if (list_empty(head)) { 294 head = &q->old_flows; 295 if (list_empty(head)) 296 return NULL; 297 } 298 flow = list_first_entry(head, struct fq_codel_flow, flowchain); 299 300 if (flow->deficit <= 0) { 301 flow->deficit += q->quantum; 302 list_move_tail(&flow->flowchain, &q->old_flows); 303 goto begin; 304 } 305 306 prev_drop_count = q->cstats.drop_count; 307 prev_ecn_mark = q->cstats.ecn_mark; 308 prev_backlog = sch->qstats.backlog; 309 310 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, 311 &flow->cvars, &q->cstats, qdisc_pkt_len, 312 codel_get_enqueue_time, drop_func, dequeue_func); 313 314 flow->dropped += q->cstats.drop_count - prev_drop_count; 315 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; 316 317 if (!skb) { 318 /* force a pass through old_flows to prevent starvation */ 319 if ((head == &q->new_flows) && !list_empty(&q->old_flows)) 320 list_move_tail(&flow->flowchain, &q->old_flows); 321 else 322 list_del_init(&flow->flowchain); 323 goto begin; 324 } 325 qdisc_bstats_update(sch, skb); 326 flow->deficit -= qdisc_pkt_len(skb); 327 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, 328 * or HTB crashes. Defer it for next round. 329 */ 330 if (q->cstats.drop_count && sch->q.qlen) { 331 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, 332 q->cstats.drop_len); 333 q->cstats.drop_count = 0; 334 q->cstats.drop_len = 0; 335 } 336 return skb; 337 } 338 339 static void fq_codel_reset(struct Qdisc *sch) 340 { 341 struct fq_codel_sched_data *q = qdisc_priv(sch); 342 int i; 343 344 INIT_LIST_HEAD(&q->new_flows); 345 INIT_LIST_HEAD(&q->old_flows); 346 for (i = 0; i < q->flows_cnt; i++) { 347 struct fq_codel_flow *flow = q->flows + i; 348 349 while (flow->head) { 350 struct sk_buff *skb = dequeue_head(flow); 351 352 qdisc_qstats_backlog_dec(sch, skb); 353 kfree_skb(skb); 354 } 355 356 INIT_LIST_HEAD(&flow->flowchain); 357 codel_vars_init(&flow->cvars); 358 } 359 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); 360 sch->q.qlen = 0; 361 q->memory_usage = 0; 362 } 363 364 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { 365 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 }, 366 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 }, 367 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 }, 368 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 }, 369 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 }, 370 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 }, 371 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 }, 372 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 }, 373 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 }, 374 }; 375 376 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) 377 { 378 struct fq_codel_sched_data *q = qdisc_priv(sch); 379 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; 380 int err; 381 382 if (!opt) 383 return -EINVAL; 384 385 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy); 386 if (err < 0) 387 return err; 388 if (tb[TCA_FQ_CODEL_FLOWS]) { 389 if (q->flows) 390 return -EINVAL; 391 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); 392 if (!q->flows_cnt || 393 q->flows_cnt > 65536) 394 return -EINVAL; 395 } 396 sch_tree_lock(sch); 397 398 if (tb[TCA_FQ_CODEL_TARGET]) { 399 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); 400 401 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT; 402 } 403 404 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) { 405 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]); 406 407 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; 408 } 409 410 if (tb[TCA_FQ_CODEL_INTERVAL]) { 411 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); 412 413 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT; 414 } 415 416 if (tb[TCA_FQ_CODEL_LIMIT]) 417 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); 418 419 if (tb[TCA_FQ_CODEL_ECN]) 420 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); 421 422 if (tb[TCA_FQ_CODEL_QUANTUM]) 423 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); 424 425 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) 426 q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); 427 428 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) 429 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); 430 431 while (sch->q.qlen > sch->limit || 432 q->memory_usage > q->memory_limit) { 433 struct sk_buff *skb = fq_codel_dequeue(sch); 434 435 q->cstats.drop_len += qdisc_pkt_len(skb); 436 kfree_skb(skb); 437 q->cstats.drop_count++; 438 } 439 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); 440 q->cstats.drop_count = 0; 441 q->cstats.drop_len = 0; 442 443 sch_tree_unlock(sch); 444 return 0; 445 } 446 447 static void *fq_codel_zalloc(size_t sz) 448 { 449 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); 450 451 if (!ptr) 452 ptr = vzalloc(sz); 453 return ptr; 454 } 455 456 static void fq_codel_free(void *addr) 457 { 458 kvfree(addr); 459 } 460 461 static void fq_codel_destroy(struct Qdisc *sch) 462 { 463 struct fq_codel_sched_data *q = qdisc_priv(sch); 464 465 tcf_destroy_chain(&q->filter_list); 466 fq_codel_free(q->backlogs); 467 fq_codel_free(q->flows); 468 } 469 470 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) 471 { 472 struct fq_codel_sched_data *q = qdisc_priv(sch); 473 int i; 474 475 sch->limit = 10*1024; 476 q->flows_cnt = 1024; 477 q->memory_limit = 32 << 20; /* 32 MBytes */ 478 q->drop_batch_size = 64; 479 q->quantum = psched_mtu(qdisc_dev(sch)); 480 q->perturbation = prandom_u32(); 481 INIT_LIST_HEAD(&q->new_flows); 482 INIT_LIST_HEAD(&q->old_flows); 483 codel_params_init(&q->cparams); 484 codel_stats_init(&q->cstats); 485 q->cparams.ecn = true; 486 q->cparams.mtu = psched_mtu(qdisc_dev(sch)); 487 488 if (opt) { 489 int err = fq_codel_change(sch, opt); 490 if (err) 491 return err; 492 } 493 494 if (!q->flows) { 495 q->flows = fq_codel_zalloc(q->flows_cnt * 496 sizeof(struct fq_codel_flow)); 497 if (!q->flows) 498 return -ENOMEM; 499 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32)); 500 if (!q->backlogs) { 501 fq_codel_free(q->flows); 502 return -ENOMEM; 503 } 504 for (i = 0; i < q->flows_cnt; i++) { 505 struct fq_codel_flow *flow = q->flows + i; 506 507 INIT_LIST_HEAD(&flow->flowchain); 508 codel_vars_init(&flow->cvars); 509 } 510 } 511 if (sch->limit >= 1) 512 sch->flags |= TCQ_F_CAN_BYPASS; 513 else 514 sch->flags &= ~TCQ_F_CAN_BYPASS; 515 return 0; 516 } 517 518 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) 519 { 520 struct fq_codel_sched_data *q = qdisc_priv(sch); 521 struct nlattr *opts; 522 523 opts = nla_nest_start(skb, TCA_OPTIONS); 524 if (opts == NULL) 525 goto nla_put_failure; 526 527 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET, 528 codel_time_to_us(q->cparams.target)) || 529 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT, 530 sch->limit) || 531 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL, 532 codel_time_to_us(q->cparams.interval)) || 533 nla_put_u32(skb, TCA_FQ_CODEL_ECN, 534 q->cparams.ecn) || 535 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM, 536 q->quantum) || 537 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE, 538 q->drop_batch_size) || 539 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT, 540 q->memory_limit) || 541 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, 542 q->flows_cnt)) 543 goto nla_put_failure; 544 545 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD && 546 nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD, 547 codel_time_to_us(q->cparams.ce_threshold))) 548 goto nla_put_failure; 549 550 return nla_nest_end(skb, opts); 551 552 nla_put_failure: 553 return -1; 554 } 555 556 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 557 { 558 struct fq_codel_sched_data *q = qdisc_priv(sch); 559 struct tc_fq_codel_xstats st = { 560 .type = TCA_FQ_CODEL_XSTATS_QDISC, 561 }; 562 struct list_head *pos; 563 564 st.qdisc_stats.maxpacket = q->cstats.maxpacket; 565 st.qdisc_stats.drop_overlimit = q->drop_overlimit; 566 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; 567 st.qdisc_stats.new_flow_count = q->new_flow_count; 568 st.qdisc_stats.ce_mark = q->cstats.ce_mark; 569 st.qdisc_stats.memory_usage = q->memory_usage; 570 st.qdisc_stats.drop_overmemory = q->drop_overmemory; 571 572 sch_tree_lock(sch); 573 list_for_each(pos, &q->new_flows) 574 st.qdisc_stats.new_flows_len++; 575 576 list_for_each(pos, &q->old_flows) 577 st.qdisc_stats.old_flows_len++; 578 sch_tree_unlock(sch); 579 580 return gnet_stats_copy_app(d, &st, sizeof(st)); 581 } 582 583 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg) 584 { 585 return NULL; 586 } 587 588 static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid) 589 { 590 return 0; 591 } 592 593 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent, 594 u32 classid) 595 { 596 /* we cannot bypass queue discipline anymore */ 597 sch->flags &= ~TCQ_F_CAN_BYPASS; 598 return 0; 599 } 600 601 static void fq_codel_put(struct Qdisc *q, unsigned long cl) 602 { 603 } 604 605 static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch, 606 unsigned long cl) 607 { 608 struct fq_codel_sched_data *q = qdisc_priv(sch); 609 610 if (cl) 611 return NULL; 612 return &q->filter_list; 613 } 614 615 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl, 616 struct sk_buff *skb, struct tcmsg *tcm) 617 { 618 tcm->tcm_handle |= TC_H_MIN(cl); 619 return 0; 620 } 621 622 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, 623 struct gnet_dump *d) 624 { 625 struct fq_codel_sched_data *q = qdisc_priv(sch); 626 u32 idx = cl - 1; 627 struct gnet_stats_queue qs = { 0 }; 628 struct tc_fq_codel_xstats xstats; 629 630 if (idx < q->flows_cnt) { 631 const struct fq_codel_flow *flow = &q->flows[idx]; 632 const struct sk_buff *skb; 633 634 memset(&xstats, 0, sizeof(xstats)); 635 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS; 636 xstats.class_stats.deficit = flow->deficit; 637 xstats.class_stats.ldelay = 638 codel_time_to_us(flow->cvars.ldelay); 639 xstats.class_stats.count = flow->cvars.count; 640 xstats.class_stats.lastcount = flow->cvars.lastcount; 641 xstats.class_stats.dropping = flow->cvars.dropping; 642 if (flow->cvars.dropping) { 643 codel_tdiff_t delta = flow->cvars.drop_next - 644 codel_get_time(); 645 646 xstats.class_stats.drop_next = (delta >= 0) ? 647 codel_time_to_us(delta) : 648 -codel_time_to_us(-delta); 649 } 650 if (flow->head) { 651 sch_tree_lock(sch); 652 skb = flow->head; 653 while (skb) { 654 qs.qlen++; 655 skb = skb->next; 656 } 657 sch_tree_unlock(sch); 658 } 659 qs.backlog = q->backlogs[idx]; 660 qs.drops = flow->dropped; 661 } 662 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) 663 return -1; 664 if (idx < q->flows_cnt) 665 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 666 return 0; 667 } 668 669 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg) 670 { 671 struct fq_codel_sched_data *q = qdisc_priv(sch); 672 unsigned int i; 673 674 if (arg->stop) 675 return; 676 677 for (i = 0; i < q->flows_cnt; i++) { 678 if (list_empty(&q->flows[i].flowchain) || 679 arg->count < arg->skip) { 680 arg->count++; 681 continue; 682 } 683 if (arg->fn(sch, i + 1, arg) < 0) { 684 arg->stop = 1; 685 break; 686 } 687 arg->count++; 688 } 689 } 690 691 static const struct Qdisc_class_ops fq_codel_class_ops = { 692 .leaf = fq_codel_leaf, 693 .get = fq_codel_get, 694 .put = fq_codel_put, 695 .tcf_chain = fq_codel_find_tcf, 696 .bind_tcf = fq_codel_bind, 697 .unbind_tcf = fq_codel_put, 698 .dump = fq_codel_dump_class, 699 .dump_stats = fq_codel_dump_class_stats, 700 .walk = fq_codel_walk, 701 }; 702 703 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = { 704 .cl_ops = &fq_codel_class_ops, 705 .id = "fq_codel", 706 .priv_size = sizeof(struct fq_codel_sched_data), 707 .enqueue = fq_codel_enqueue, 708 .dequeue = fq_codel_dequeue, 709 .peek = qdisc_peek_dequeued, 710 .init = fq_codel_init, 711 .reset = fq_codel_reset, 712 .destroy = fq_codel_destroy, 713 .change = fq_codel_change, 714 .dump = fq_codel_dump, 715 .dump_stats = fq_codel_dump_stats, 716 .owner = THIS_MODULE, 717 }; 718 719 static int __init fq_codel_module_init(void) 720 { 721 return register_qdisc(&fq_codel_qdisc_ops); 722 } 723 724 static void __exit fq_codel_module_exit(void) 725 { 726 unregister_qdisc(&fq_codel_qdisc_ops); 727 } 728 729 module_init(fq_codel_module_init) 730 module_exit(fq_codel_module_exit) 731 MODULE_AUTHOR("Eric Dumazet"); 732 MODULE_LICENSE("GPL"); 733