Lines Matching full:q
79 /* Following field is only used for q->internal,
80 * because q->internal is not hashed in fq_root[]
93 struct rb_node rate_node; /* anchor in q->delayed tree */
197 static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow,
200 struct fq_perband_flows *pband = &q->band_flows[flow->band];
213 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
215 rb_erase(&f->rate_node, &q->delayed);
216 q->throttled_flows--;
217 fq_flow_add_tail(q, f, OLD_FLOW);
220 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
222 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
235 rb_insert_color(&f->rate_node, &q->delayed);
236 q->throttled_flows++;
237 q->stat_throttled++;
240 if (q->time_next_delayed_flow > f->time_next_packet)
241 q->time_next_delayed_flow = f->time_next_packet;
258 static void fq_gc(struct fq_sched_data *q,
295 q->flows -= fcnt;
296 q->inactive_flows -= fcnt;
297 q->stat_gc_flows += fcnt;
315 const struct fq_sched_data *q = qdisc_priv(sch);
318 if (fq_skb_cb(skb)->time_to_send > now + q->offload_horizon)
321 if (sch->q.qlen != 0) {
327 if (q->flows != q->inactive_flows + q->throttled_flows)
333 if (q->internal.qlen >= 8)
339 if (q->time_next_delayed_flow <= now + q->offload_horizon)
348 if (q->flow_max_rate != ~0UL)
357 struct fq_sched_data *q = qdisc_priv(sch);
374 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
382 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
395 q->internal.stat_fastpath_packets++;
396 if (skb->sk == sk && q->rate_enable &&
400 return &q->internal;
403 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
405 fq_gc(q, root, sk);
421 f->credit = q->initial_quantum;
423 if (q->rate_enable)
427 fq_flow_unset_throttled(q, f);
440 q->stat_allocation_errors++;
441 return &q->internal;
449 if (q->rate_enable)
453 f->credit = q->initial_quantum;
458 q->flows++;
459 q->inactive_flows++;
502 sch->q.qlen--;
539 const struct fq_sched_data *q, u64 now)
541 return unlikely((s64)skb->tstamp > (s64)(now + q->horizon));
549 struct fq_sched_data *q = qdisc_priv(sch);
554 band = fq_prio2band(q->prio2band, skb->priority & TC_PRIO_MAX);
555 if (unlikely(q->band_pkt_count[band] >= sch->limit)) {
556 q->stat_band_drops[band]++;
566 if (fq_packet_beyond_horizon(skb, q, now)) {
567 if (q->horizon_drop) {
568 q->stat_horizon_drops++;
572 q->stat_horizon_caps++;
573 skb->tstamp = now + q->horizon;
580 if (f != &q->internal) {
581 if (unlikely(f->qlen >= q->flow_plimit)) {
582 q->stat_flows_plimit++;
588 fq_flow_add_tail(q, f, NEW_FLOW);
589 if (time_after(jiffies, f->age + q->flow_refill_delay))
590 f->credit = max_t(u32, f->credit, q->quantum);
594 q->band_pkt_count[band]++;
597 q->inactive_flows--;
605 sch->q.qlen++;
611 static void fq_check_throttled(struct fq_sched_data *q, u64 now)
616 if (q->time_next_delayed_flow > now + q->offload_horizon)
622 sample = (unsigned long)(now - q->time_next_delayed_flow);
624 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
625 q->unthrottle_latency_ns += sample >> 3;
627 now += q->offload_horizon;
629 q->time_next_delayed_flow = ~0ULL;
630 while ((p = rb_first(&q->delayed)) != NULL) {
634 q->time_next_delayed_flow = f->time_next_packet;
637 fq_flow_unset_throttled(q, f);
654 struct fq_sched_data *q = qdisc_priv(sch);
664 if (!sch->q.qlen)
667 skb = fq_peek(&q->internal);
669 q->internal.qlen--;
670 fq_dequeue_skb(sch, &q->internal, skb);
675 fq_check_throttled(q, now);
677 pband = &q->band_flows[q->band_nr];
682 if (++q->band_nr == FQ_BANDS)
683 q->band_nr = 0;
684 pband = &q->band_flows[q->band_nr];
691 if (q->time_next_delayed_flow != ~0ULL)
692 qdisc_watchdog_schedule_range_ns(&q->watchdog,
693 q->time_next_delayed_flow,
694 q->timer_slack);
700 f->credit += q->quantum;
702 fq_flow_add_tail(q, f, OLD_FLOW);
711 if (now + q->offload_horizon < time_next_packet) {
714 fq_flow_set_throttled(q, f);
719 if ((s64)(now - time_next_packet - q->ce_threshold) > 0) {
721 q->stat_ce_mark++;
724 q->inactive_flows++;
725 q->band_pkt_count[fq_skb_cb(skb)->band]--;
730 fq_flow_add_tail(q, f, OLD_FLOW);
740 if (!q->rate_enable)
743 rate = q->flow_max_rate;
753 if (rate <= q->low_rate_threshold) {
756 plen = max(plen, q->quantum);
772 q->stat_pkts_too_long++;
804 struct fq_sched_data *q = qdisc_priv(sch);
810 sch->q.qlen = 0;
813 fq_flow_purge(&q->internal);
815 if (!q->fq_root)
818 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
819 root = &q->fq_root[idx];
830 q->band_flows[idx].new_flows.first = NULL;
831 q->band_flows[idx].old_flows.first = NULL;
833 q->delayed = RB_ROOT;
834 q->flows = 0;
835 q->inactive_flows = 0;
836 q->throttled_flows = 0;
839 static void fq_rehash(struct fq_sched_data *q,
879 q->flows -= fcnt;
880 q->inactive_flows -= fcnt;
881 q->stat_gc_flows += fcnt;
891 struct fq_sched_data *q = qdisc_priv(sch);
896 if (q->fq_root && log == q->fq_trees_log)
910 old_fq_root = q->fq_root;
912 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
914 q->fq_root = array;
915 WRITE_ONCE(q->fq_trees_log, log);
975 static int fq_load_weights(struct fq_sched_data *q,
990 WRITE_ONCE(q->band_flows[i].quantum, weights[i]);
994 static int fq_load_priomap(struct fq_sched_data *q,
1012 fq_prio2band_compress_crumb(map->priomap, q->prio2band);
1020 struct fq_sched_data *q = qdisc_priv(sch);
1032 fq_log = q->fq_trees_log;
1047 WRITE_ONCE(q->flow_plimit,
1054 WRITE_ONCE(q->quantum, quantum);
1062 WRITE_ONCE(q->initial_quantum,
1072 WRITE_ONCE(q->flow_max_rate,
1076 WRITE_ONCE(q->low_rate_threshold,
1083 WRITE_ONCE(q->rate_enable,
1092 WRITE_ONCE(q->flow_refill_delay,
1097 err = fq_load_priomap(q, tb[TCA_FQ_PRIOMAP], extack);
1100 err = fq_load_weights(q, tb[TCA_FQ_WEIGHTS], extack);
1103 WRITE_ONCE(q->orphan_mask,
1107 WRITE_ONCE(q->ce_threshold,
1112 WRITE_ONCE(q->timer_slack,
1116 WRITE_ONCE(q->horizon,
1121 WRITE_ONCE(q->horizon_drop,
1129 WRITE_ONCE(q->offload_horizon, offload_horizon);
1142 while (sch->q.qlen > sch->limit) {
1160 struct fq_sched_data *q = qdisc_priv(sch);
1163 fq_free(q->fq_root);
1164 qdisc_watchdog_cancel(&q->watchdog);
1170 struct fq_sched_data *q = qdisc_priv(sch);
1174 q->flow_plimit = 100;
1175 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
1176 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
1177 q->flow_refill_delay = msecs_to_jiffies(40);
1178 q->flow_max_rate = ~0UL;
1179 q->time_next_delayed_flow = ~0ULL;
1180 q->rate_enable = 1;
1182 q->band_flows[i].new_flows.first = NULL;
1183 q->band_flows[i].old_flows.first = NULL;
1185 q->band_flows[0].quantum = 9 << 16;
1186 q->band_flows[1].quantum = 3 << 16;
1187 q->band_flows[2].quantum = 1 << 16;
1188 q->delayed = RB_ROOT;
1189 q->fq_root = NULL;
1190 q->fq_trees_log = ilog2(1024);
1191 q->orphan_mask = 1024 - 1;
1192 q->low_rate_threshold = 550000 / 8;
1194 q->timer_slack = 10 * NSEC_PER_USEC; /* 10 usec of hrtimer slack */
1196 q->horizon = 10ULL * NSEC_PER_SEC; /* 10 seconds */
1197 q->horizon_drop = 1; /* by default, drop packets beyond horizon */
1200 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
1202 fq_prio2band_compress_crumb(sch_default_prio2band, q->prio2band);
1203 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
1208 err = fq_resize(sch, q->fq_trees_log);
1215 struct fq_sched_data *q = qdisc_priv(sch);
1231 ce_threshold = READ_ONCE(q->ce_threshold);
1234 horizon = READ_ONCE(q->horizon);
1237 offload_horizon = READ_ONCE(q->offload_horizon);
1243 READ_ONCE(q->flow_plimit)) ||
1245 READ_ONCE(q->quantum)) ||
1247 READ_ONCE(q->initial_quantum)) ||
1249 READ_ONCE(q->rate_enable)) ||
1252 READ_ONCE(q->flow_max_rate), ~0U)) ||
1254 jiffies_to_usecs(READ_ONCE(q->flow_refill_delay))) ||
1256 READ_ONCE(q->orphan_mask)) ||
1258 READ_ONCE(q->low_rate_threshold)) ||
1261 READ_ONCE(q->fq_trees_log)) ||
1263 READ_ONCE(q->timer_slack)) ||
1267 READ_ONCE(q->horizon_drop)))
1270 fq_prio2band_decompress_crumb(q->prio2band, prio.priomap);
1274 weights[0] = READ_ONCE(q->band_flows[0].quantum);
1275 weights[1] = READ_ONCE(q->band_flows[1].quantum);
1276 weights[2] = READ_ONCE(q->band_flows[2].quantum);
1288 struct fq_sched_data *q = qdisc_priv(sch);
1296 st.gc_flows = q->stat_gc_flows;
1298 st.fastpath_packets = q->internal.stat_fastpath_packets;
1300 st.throttled = q->stat_throttled;
1301 st.flows_plimit = q->stat_flows_plimit;
1302 st.pkts_too_long = q->stat_pkts_too_long;
1303 st.allocation_errors = q->stat_allocation_errors;
1304 st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack -
1306 st.flows = q->flows;
1307 st.inactive_flows = q->inactive_flows;
1308 st.throttled_flows = q->throttled_flows;
1310 q->unthrottle_latency_ns, ~0U);
1311 st.ce_mark = q->stat_ce_mark;
1312 st.horizon_drops = q->stat_horizon_drops;
1313 st.horizon_caps = q->stat_horizon_caps;
1315 st.band_drops[i] = q->stat_band_drops[i];
1316 st.band_pkt_count[i] = q->band_pkt_count[i];