Lines Matching full:q

70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
73 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
79 struct fq_codel_sched_data *q = qdisc_priv(sch);
86 TC_H_MIN(skb->priority) <= q->flows_cnt)
89 filter = rcu_dereference_bh(q->filter_list);
91 return fq_codel_hash(q, skb) + 1;
107 if (TC_H_MIN(res.classid) <= q->flows_cnt)
140 struct fq_codel_sched_data *q = qdisc_priv(sch);
154 for (i = 0; i < q->flows_cnt; i++) {
155 if (q->backlogs[i] > maxbacklog) {
156 maxbacklog = q->backlogs[i];
164 flow = &q->flows[idx];
177 q->backlogs[idx] -= len;
178 q->memory_usage -= mem;
181 sch->q.qlen -= i;
188 struct fq_codel_sched_data *q = qdisc_priv(sch);
205 flow = &q->flows[idx];
207 q->backlogs[idx] += qdisc_pkt_len(skb);
211 list_add_tail(&flow->flowchain, &q->new_flows);
212 q->new_flow_count++;
213 flow->deficit = q->quantum;
216 q->memory_usage += get_codel_cb(skb)->mem_usage;
217 memory_limited = q->memory_usage > q->memory_limit;
218 if (++sch->q.qlen <= sch->limit && !memory_limited)
222 prev_qlen = sch->q.qlen;
227 * in q->backlogs[] to find a fat flow.
231 ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
233 prev_qlen -= sch->q.qlen;
235 q->drop_overlimit += prev_qlen;
237 q->drop_overmemory += prev_qlen;
259 struct fq_codel_sched_data *q = qdisc_priv(sch);
266 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
267 q->memory_usage -= get_codel_cb(skb)->mem_usage;
268 sch->q.qlen--;
284 struct fq_codel_sched_data *q = qdisc_priv(sch);
290 head = &q->new_flows;
292 head = &q->old_flows;
299 flow->deficit += q->quantum;
300 list_move_tail(&flow->flowchain, &q->old_flows);
304 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
305 &flow->cvars, &q->cstats, qdisc_pkt_len,
310 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
311 list_move_tail(&flow->flowchain, &q->old_flows);
319 if (q->cstats.drop_count) {
320 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
321 q->cstats.drop_len);
322 q->cstats.drop_count = 0;
323 q->cstats.drop_len = 0;
336 struct fq_codel_sched_data *q = qdisc_priv(sch);
339 INIT_LIST_HEAD(&q->new_flows);
340 INIT_LIST_HEAD(&q->old_flows);
341 for (i = 0; i < q->flows_cnt; i++) {
342 struct fq_codel_flow *flow = q->flows + i;
348 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
349 q->memory_usage = 0;
370 struct fq_codel_sched_data *q = qdisc_priv(sch);
380 if (q->flows)
382 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
383 if (!q->flows_cnt ||
384 q->flows_cnt > 65536)
399 WRITE_ONCE(q->cparams.target,
406 WRITE_ONCE(q->cparams.ce_threshold,
411 WRITE_ONCE(q->cparams.ce_threshold_selector,
414 WRITE_ONCE(q->cparams.ce_threshold_mask,
420 WRITE_ONCE(q->cparams.interval,
429 WRITE_ONCE(q->cparams.ecn,
433 WRITE_ONCE(q->quantum, quantum);
436 WRITE_ONCE(q->drop_batch_size,
440 WRITE_ONCE(q->memory_limit,
443 while (sch->q.qlen > sch->limit ||
444 q->memory_usage > q->memory_limit) {
462 struct fq_codel_sched_data *q = qdisc_priv(sch);
464 tcf_block_put(q->block);
465 kvfree(q->backlogs);
466 kvfree(q->flows);
472 struct fq_codel_sched_data *q = qdisc_priv(sch);
477 q->flows_cnt = 1024;
478 q->memory_limit = 32 << 20; /* 32 MBytes */
479 q->drop_batch_size = 64;
480 q->quantum = psched_mtu(qdisc_dev(sch));
481 INIT_LIST_HEAD(&q->new_flows);
482 INIT_LIST_HEAD(&q->old_flows);
483 codel_params_init(&q->cparams);
484 codel_stats_init(&q->cstats);
485 q->cparams.ecn = true;
486 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
494 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
498 if (!q->flows) {
499 q->flows = kvcalloc(q->flows_cnt,
502 if (!q->flows) {
506 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
507 if (!q->backlogs) {
511 for (i = 0; i < q->flows_cnt; i++) {
512 struct fq_codel_flow *flow = q->flows + i;
528 kvfree(q->flows);
529 q->flows = NULL;
531 q->flows_cnt = 0;
537 struct fq_codel_sched_data *q = qdisc_priv(sch);
546 codel_time_to_us(READ_ONCE(q->cparams.target))) ||
550 codel_time_to_us(READ_ONCE(q->cparams.interval))) ||
552 READ_ONCE(q->cparams.ecn)) ||
554 READ_ONCE(q->quantum)) ||
556 READ_ONCE(q->drop_batch_size)) ||
558 READ_ONCE(q->memory_limit)) ||
560 READ_ONCE(q->flows_cnt)))
563 ce_threshold = READ_ONCE(q->cparams.ce_threshold);
569 READ_ONCE(q->cparams.ce_threshold_selector)))
572 READ_ONCE(q->cparams.ce_threshold_mask)))
584 struct fq_codel_sched_data *q = qdisc_priv(sch);
590 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
591 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
592 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
593 st.qdisc_stats.new_flow_count = q->new_flow_count;
594 st.qdisc_stats.ce_mark = q->cstats.ce_mark;
595 st.qdisc_stats.memory_usage = q->memory_usage;
596 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
599 list_for_each(pos, &q->new_flows)
602 list_for_each(pos, &q->old_flows)
625 static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
632 struct fq_codel_sched_data *q = qdisc_priv(sch);
636 return q->block;
649 struct fq_codel_sched_data *q = qdisc_priv(sch);
654 if (idx < q->flows_cnt) {
655 const struct fq_codel_flow *flow = &q->flows[idx];
683 qs.backlog = q->backlogs[idx];
688 if (idx < q->flows_cnt)
695 struct fq_codel_sched_data *q = qdisc_priv(sch);
701 for (i = 0; i < q->flows_cnt; i++) {
702 if (list_empty(&q->flows[i].flowchain)) {