Lines Matching full:q

210 static bool loss_4state(struct netem_sched_data *q)
212 struct clgstate *clg = &q->clg;
213 u32 rnd = prandom_u32_state(&q->prng.prng_state);
275 static bool loss_gilb_ell(struct netem_sched_data *q)
277 struct clgstate *clg = &q->clg;
278 struct rnd_state *s = &q->prng.prng_state;
297 static bool loss_event(struct netem_sched_data *q)
299 switch (q->loss_model) {
302 return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng);
310 return loss_4state(q);
318 return loss_gilb_ell(q);
357 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
359 len += q->packet_overhead;
361 if (q->cell_size) {
362 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
364 if (len > cells * q->cell_size) /* extra cell needed for remainder */
366 len = cells * (q->cell_size + q->cell_overhead);
369 return div64_u64(len * NSEC_PER_SEC, q->rate);
374 struct netem_sched_data *q = qdisc_priv(sch);
375 struct rb_node *p = rb_first(&q->t_root);
381 rb_erase(&skb->rbnode, &q->t_root);
385 rtnl_kfree_skbs(q->t_head, q->t_tail);
386 q->t_head = NULL;
387 q->t_tail = NULL;
388 q->t_len = 0;
393 struct netem_sched_data *q = qdisc_priv(sch);
396 if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
397 if (q->t_tail)
398 q->t_tail->next = nskb;
400 q->t_head = nskb;
401 q->t_tail = nskb;
403 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
416 rb_insert_color(&nskb->rbnode, &q->t_root);
418 q->t_len++;
419 sch->q.qlen++;
452 struct netem_sched_data *q = qdisc_priv(sch);
464 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
468 if (loss_event(q)) {
469 if (q->ecn && INET_ECN_set_ce(skb))
483 if (q->latency || q->jitter || q->rate)
499 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
526 if (unlikely(q->t_len >= sch->limit)) {
542 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
544 q->duplicate = 0;
546 q->duplicate = dupsave;
553 if (q->gap == 0 || /* not doing reordering */
554 q->counter < q->gap - 1 || /* inside last reordering gap */
555 q->reorder < get_crandom(&q->reorder_cor, &q->prng)) {
559 delay = tabledist(q->latency, q->jitter,
560 &q->delay_cor, &q->prng, q->delay_dist);
564 if (q->rate) {
567 if (sch->q.tail)
568 last = netem_skb_cb(sch->q.tail);
569 if (q->t_root.rb_node) {
573 t_skb = skb_rb_last(&q->t_root);
579 if (q->t_tail) {
581 netem_skb_cb(q->t_tail);
599 delay += packet_time_ns(qdisc_pkt_len(skb), q);
603 ++q->counter;
611 q->counter = 0;
613 __qdisc_enqueue_head(skb, &sch->q);
655 static void get_slot_next(struct netem_sched_data *q, u64 now)
659 if (!q->slot_dist)
660 next_delay = q->slot_config.min_delay +
662 (q->slot_config.max_delay -
663 q->slot_config.min_delay) >> 32);
665 next_delay = tabledist(q->slot_config.dist_delay,
666 (s32)(q->slot_config.dist_jitter),
667 NULL, &q->prng, q->slot_dist);
669 q->slot.slot_next = now + next_delay;
670 q->slot.packets_left = q->slot_config.max_packets;
671 q->slot.bytes_left = q->slot_config.max_bytes;
674 static struct sk_buff *netem_peek(struct netem_sched_data *q)
676 struct sk_buff *skb = skb_rb_first(&q->t_root);
680 return q->t_head;
681 if (!q->t_head)
685 t2 = netem_skb_cb(q->t_head)->time_to_send;
688 return q->t_head;
691 static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
693 if (skb == q->t_head) {
694 q->t_head = skb->next;
695 if (!q->t_head)
696 q->t_tail = NULL;
698 rb_erase(&skb->rbnode, &q->t_root);
704 struct netem_sched_data *q = qdisc_priv(sch);
708 skb = __qdisc_dequeue_head(&sch->q);
715 skb = netem_peek(q);
722 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
723 get_slot_next(q, now);
725 if (time_to_send <= now && q->slot.slot_next <= now) {
726 netem_erase_head(q, skb);
727 q->t_len--;
735 if (q->slot.slot_next) {
736 q->slot.packets_left--;
737 q->slot.bytes_left -= qdisc_pkt_len(skb);
738 if (q->slot.packets_left <= 0 ||
739 q->slot.bytes_left <= 0)
740 get_slot_next(q, now);
743 if (q->qdisc) {
748 err = qdisc_enqueue(skb, q->qdisc, &to_free);
754 sch->q.qlen--;
759 sch->q.qlen--;
763 if (q->qdisc) {
764 skb = q->qdisc->ops->dequeue(q->qdisc);
766 sch->q.qlen--;
771 qdisc_watchdog_schedule_ns(&q->watchdog,
773 q->slot.slot_next));
776 if (q->qdisc) {
777 skb = q->qdisc->ops->dequeue(q->qdisc);
779 sch->q.qlen--;
788 struct netem_sched_data *q = qdisc_priv(sch);
792 if (q->qdisc)
793 qdisc_reset(q->qdisc);
794 qdisc_watchdog_cancel(&q->watchdog);
829 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
833 q->slot_config = *c;
834 if (q->slot_config.max_packets == 0)
835 q->slot_config.max_packets = INT_MAX;
836 if (q->slot_config.max_bytes == 0)
837 q->slot_config.max_bytes = INT_MAX;
840 q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
842 q->slot.packets_left = q->slot_config.max_packets;
843 q->slot.bytes_left = q->slot_config.max_bytes;
844 if (q->slot_config.min_delay | q->slot_config.max_delay |
845 q->slot_config.dist_jitter)
846 q->slot.slot_next = ktime_get_ns();
848 q->slot.slot_next = 0;
851 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
855 init_crandom(&q->delay_cor, c->delay_corr);
856 init_crandom(&q->loss_cor, c->loss_corr);
857 init_crandom(&q->dup_cor, c->dup_corr);
860 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
864 q->reorder = r->probability;
865 init_crandom(&q->reorder_cor, r->correlation);
868 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
872 q->corrupt = r->probability;
873 init_crandom(&q->corrupt_cor, r->correlation);
876 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
880 q->rate = r->rate;
881 q->packet_overhead = r->packet_overhead;
882 q->cell_size = r->cell_size;
883 q->cell_overhead = r->cell_overhead;
884 if (q->cell_size)
885 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
887 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
890 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
907 q->loss_model = CLG_4_STATES;
909 q->clg.state = TX_IN_GAP_PERIOD;
910 q->clg.a1 = gi->p13;
911 q->clg.a2 = gi->p31;
912 q->clg.a3 = gi->p32;
913 q->clg.a4 = gi->p14;
914 q->clg.a5 = gi->p23;
926 q->loss_model = CLG_GILB_ELL;
927 q->clg.state = GOOD_STATE;
928 q->clg.a1 = ge->p;
929 q->clg.a2 = ge->r;
930 q->clg.a3 = ge->h;
931 q->clg.a4 = ge->k1;
982 struct Qdisc *root, *q;
996 hash_for_each(qdisc_dev(root)->qdisc_hash, i, q, hash) {
997 if (sch != q && q->ops->cl_ops == &netem_class_ops) {
999 ((struct netem_sched_data *)qdisc_priv(q))->duplicate)
1016 struct netem_sched_data *q = qdisc_priv(sch);
1043 /* backup q->clg and q->loss_model */
1044 old_clg = q->clg;
1045 old_loss_model = q->loss_model;
1048 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
1050 q->loss_model = old_loss_model;
1051 q->clg = old_clg;
1055 q->loss_model = CLG_RANDOM;
1059 swap(q->delay_dist, delay_dist);
1061 swap(q->slot_dist, slot_dist);
1064 q->latency = PSCHED_TICKS2NS(qopt->latency);
1065 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1066 q->limit = qopt->limit;
1067 q->gap = qopt->gap;
1068 q->counter = 0;
1069 q->loss = qopt->loss;
1075 q->duplicate = qopt->duplicate;
1080 if (q->gap)
1081 q->reorder = ~0;
1084 get_correlation(q, tb[TCA_NETEM_CORR]);
1087 get_reorder(q, tb[TCA_NETEM_REORDER]);
1090 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1093 get_rate(q, tb[TCA_NETEM_RATE]);
1096 q->rate = max_t(u64, q->rate,
1100 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1103 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1106 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1109 get_slot(q, tb[TCA_NETEM_SLOT]);
1112 q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
1115 q->prng.seed = nla_get_u64(tb[TCA_NETEM_PRNG_SEED]);
1117 q->prng.seed = get_random_u64();
1118 prandom_seed_state(&q->prng.prng_state, q->prng.seed);
1132 struct netem_sched_data *q = qdisc_priv(sch);
1135 qdisc_watchdog_init(&q->watchdog, sch);
1140 q->loss_model = CLG_RANDOM;
1149 struct netem_sched_data *q = qdisc_priv(sch);
1151 qdisc_watchdog_cancel(&q->watchdog);
1152 if (q->qdisc)
1153 qdisc_put(q->qdisc);
1154 dist_free(q->delay_dist);
1155 dist_free(q->slot_dist);
1158 static int dump_loss_model(const struct netem_sched_data *q,
1167 switch (q->loss_model) {
1175 .p13 = q->clg.a1,
1176 .p31 = q->clg.a2,
1177 .p32 = q->clg.a3,
1178 .p14 = q->clg.a4,
1179 .p23 = q->clg.a5,
1188 .p = q->clg.a1,
1189 .r = q->clg.a2,
1190 .h = q->clg.a3,
1191 .k1 = q->clg.a4,
1210 const struct netem_sched_data *q = qdisc_priv(sch);
1219 qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
1221 qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
1223 qopt.limit = q->limit;
1224 qopt.loss = q->loss;
1225 qopt.gap = q->gap;
1226 qopt.duplicate = q->duplicate;
1230 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1233 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1236 cor.delay_corr = q->delay_cor.rho;
1237 cor.loss_corr = q->loss_cor.rho;
1238 cor.dup_corr = q->dup_cor.rho;
1242 reorder.probability = q->reorder;
1243 reorder.correlation = q->reorder_cor.rho;
1247 corrupt.probability = q->corrupt;
1248 corrupt.correlation = q->corrupt_cor.rho;
1252 if (q->rate >= (1ULL << 32)) {
1253 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1258 rate.rate = q->rate;
1260 rate.packet_overhead = q->packet_overhead;
1261 rate.cell_size = q->cell_size;
1262 rate.cell_overhead = q->cell_overhead;
1266 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1269 if (dump_loss_model(q, skb) != 0)
1272 if (q->slot_config.min_delay | q->slot_config.max_delay |
1273 q->slot_config.dist_jitter) {
1274 slot = q->slot_config;
1283 if (nla_put_u64_64bit(skb, TCA_NETEM_PRNG_SEED, q->prng.seed,
1297 struct netem_sched_data *q = qdisc_priv(sch);
1299 if (cl != 1 || !q->qdisc) /* only one class */
1303 tcm->tcm_info = q->qdisc->handle;
1311 struct netem_sched_data *q = qdisc_priv(sch);
1313 *old = qdisc_replace(sch, new, &q->qdisc);
1319 struct netem_sched_data *q = qdisc_priv(sch);
1320 return q->qdisc;