Lines Matching +full:static +full:- +full:beta

1 // SPDX-License-Identifier: GPL-2.0-only
37 u64 local_prob = vars->prob; in pie_drop_early()
41 if (vars->burst_time > 0) in pie_drop_early()
47 if ((vars->qdelay < params->target / 2) && in pie_drop_early()
48 (vars->prob < MAX_PROB / 5)) in pie_drop_early()
51 /* If we have fewer than 2 mtu-sized packets, disable pie_drop_early, in pie_drop_early()
60 if (params->bytemode && packet_size <= mtu) in pie_drop_early()
63 local_prob = vars->prob; in pie_drop_early()
66 vars->accu_prob = 0; in pie_drop_early()
68 vars->accu_prob += local_prob; in pie_drop_early()
70 if (vars->accu_prob < (MAX_PROB / 100) * 85) in pie_drop_early()
72 if (vars->accu_prob >= (MAX_PROB / 2) * 17) in pie_drop_early()
77 vars->accu_prob = 0; in pie_drop_early()
85 static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, in pie_qdisc_enqueue()
91 if (unlikely(qdisc_qlen(sch) >= sch->limit)) { in pie_qdisc_enqueue()
92 q->stats.overlimit++; in pie_qdisc_enqueue()
96 if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog, in pie_qdisc_enqueue()
97 skb->len)) { in pie_qdisc_enqueue()
99 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && in pie_qdisc_enqueue()
104 q->stats.ecn_mark++; in pie_qdisc_enqueue()
111 if (!q->params.dq_rate_estimator) in pie_qdisc_enqueue()
114 q->stats.packets_in++; in pie_qdisc_enqueue()
115 if (qdisc_qlen(sch) > q->stats.maxq) in pie_qdisc_enqueue()
116 q->stats.maxq = qdisc_qlen(sch); in pie_qdisc_enqueue()
122 q->stats.dropped++; in pie_qdisc_enqueue()
123 q->vars.accu_prob = 0; in pie_qdisc_enqueue()
127 static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
138 static int pie_change(struct Qdisc *sch, struct nlattr *opt, in pie_change()
159 WRITE_ONCE(q->params.target, in pie_change()
165 WRITE_ONCE(q->params.tupdate, in pie_change()
171 WRITE_ONCE(q->params.limit, limit); in pie_change()
172 WRITE_ONCE(sch->limit, limit); in pie_change()
176 WRITE_ONCE(q->params.alpha, nla_get_u32(tb[TCA_PIE_ALPHA])); in pie_change()
179 WRITE_ONCE(q->params.beta, nla_get_u32(tb[TCA_PIE_BETA])); in pie_change()
182 WRITE_ONCE(q->params.ecn, nla_get_u32(tb[TCA_PIE_ECN])); in pie_change()
185 WRITE_ONCE(q->params.bytemode, in pie_change()
189 WRITE_ONCE(q->params.dq_rate_estimator, in pie_change()
193 qlen = sch->q.qlen; in pie_change()
194 while (sch->q.qlen > sch->limit) { in pie_change()
195 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); in pie_change()
201 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); in pie_change()
216 if (!params->dq_rate_estimator) { in pie_process_dequeue()
217 vars->qdelay = now - pie_get_enqueue_time(skb); in pie_process_dequeue()
219 if (vars->dq_tstamp != DTIME_INVALID) in pie_process_dequeue()
220 dtime = now - vars->dq_tstamp; in pie_process_dequeue()
222 vars->dq_tstamp = now; in pie_process_dequeue()
225 vars->qdelay = 0; in pie_process_dequeue()
237 if (backlog >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) { in pie_process_dequeue()
238 vars->dq_tstamp = psched_get_time(); in pie_process_dequeue()
239 vars->dq_count = 0; in pie_process_dequeue()
244 * the dq_count to -1 as we don't have enough packets to calculate the in pie_process_dequeue()
251 if (vars->dq_count != DQCOUNT_INVALID) { in pie_process_dequeue()
252 vars->dq_count += skb->len; in pie_process_dequeue()
254 if (vars->dq_count >= QUEUE_THRESHOLD) { in pie_process_dequeue()
255 u32 count = vars->dq_count << PIE_SCALE; in pie_process_dequeue()
257 dtime = now - vars->dq_tstamp; in pie_process_dequeue()
264 if (vars->avg_dq_rate == 0) in pie_process_dequeue()
265 vars->avg_dq_rate = count; in pie_process_dequeue()
267 vars->avg_dq_rate = in pie_process_dequeue()
268 (vars->avg_dq_rate - in pie_process_dequeue()
269 (vars->avg_dq_rate >> 3)) + (count >> 3); in pie_process_dequeue()
273 * dq_count to 0 to re-enter the if block when the next in pie_process_dequeue()
277 vars->dq_count = DQCOUNT_INVALID; in pie_process_dequeue()
279 vars->dq_count = 0; in pie_process_dequeue()
280 vars->dq_tstamp = psched_get_time(); in pie_process_dequeue()
290 if (vars->burst_time > 0) { in pie_process_dequeue()
291 if (vars->burst_time > dtime) in pie_process_dequeue()
292 vars->burst_time -= dtime; in pie_process_dequeue()
294 vars->burst_time = 0; in pie_process_dequeue()
306 u64 alpha, beta; in pie_calculate_probability() local
310 if (params->dq_rate_estimator) { in pie_calculate_probability()
311 qdelay_old = vars->qdelay; in pie_calculate_probability()
312 vars->qdelay_old = vars->qdelay; in pie_calculate_probability()
314 if (vars->avg_dq_rate > 0) in pie_calculate_probability()
315 qdelay = (backlog << PIE_SCALE) / vars->avg_dq_rate; in pie_calculate_probability()
319 qdelay = vars->qdelay; in pie_calculate_probability()
320 qdelay_old = vars->qdelay_old; in pie_calculate_probability()
329 /* In the algorithm, alpha and beta are between 0 and 2 with typical in pie_calculate_probability()
330 * value for alpha as 0.125. In this implementation, we use values 0-32 in pie_calculate_probability()
331 * passed from user space to represent this. Also, alpha and beta have in pie_calculate_probability()
333 * probability. alpha/beta are updated locally below by scaling down in pie_calculate_probability()
334 * by 16 to come to 0-2 range. in pie_calculate_probability()
336 alpha = ((u64)params->alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; in pie_calculate_probability()
337 beta = ((u64)params->beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; in pie_calculate_probability()
339 /* We scale alpha and beta differently depending on how heavy the in pie_calculate_probability()
342 if (vars->prob < MAX_PROB / 10) { in pie_calculate_probability()
344 beta >>= 1; in pie_calculate_probability()
347 while (vars->prob < div_u64(MAX_PROB, power) && in pie_calculate_probability()
350 beta >>= 2; in pie_calculate_probability()
355 /* alpha and beta should be between 0 and 32, in multiples of 1/16 */ in pie_calculate_probability()
356 delta += alpha * (qdelay - params->target); in pie_calculate_probability()
357 delta += beta * (qdelay - qdelay_old); in pie_calculate_probability()
359 oldprob = vars->prob; in pie_calculate_probability()
363 vars->prob >= MAX_PROB / 10) in pie_calculate_probability()
366 /* Non-linear drop: in pie_calculate_probability()
374 vars->prob += delta; in pie_calculate_probability()
378 if (vars->prob < oldprob) { in pie_calculate_probability()
379 vars->prob = MAX_PROB; in pie_calculate_probability()
382 * skip the check to do a non-linear drop in the next in pie_calculate_probability()
389 if (vars->prob > oldprob) in pie_calculate_probability()
390 vars->prob = 0; in pie_calculate_probability()
393 /* Non-linear drop in probability: Reduce drop probability quickly if in pie_calculate_probability()
399 vars->prob -= vars->prob / 64; in pie_calculate_probability()
401 vars->qdelay = qdelay; in pie_calculate_probability()
402 vars->backlog_old = backlog; in pie_calculate_probability()
408 * estimate for the avg_dq_rate ie., is a non-zero value in pie_calculate_probability()
410 if ((vars->qdelay < params->target / 2) && in pie_calculate_probability()
411 (vars->qdelay_old < params->target / 2) && in pie_calculate_probability()
412 vars->prob == 0 && in pie_calculate_probability()
413 (!params->dq_rate_estimator || vars->avg_dq_rate > 0)) { in pie_calculate_probability()
417 if (!params->dq_rate_estimator) in pie_calculate_probability()
418 vars->qdelay_old = qdelay; in pie_calculate_probability()
422 static void pie_timer(struct timer_list *t) in pie_timer()
425 struct Qdisc *sch = q->sch; in pie_timer()
431 pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog); in pie_timer()
434 if (q->params.tupdate) in pie_timer()
435 mod_timer(&q->adapt_timer, jiffies + q->params.tupdate); in pie_timer()
440 static int pie_init(struct Qdisc *sch, struct nlattr *opt, in pie_init()
445 pie_params_init(&q->params); in pie_init()
446 pie_vars_init(&q->vars); in pie_init()
447 sch->limit = q->params.limit; in pie_init()
449 q->sch = sch; in pie_init()
450 timer_setup(&q->adapt_timer, pie_timer, 0); in pie_init()
459 mod_timer(&q->adapt_timer, jiffies + HZ / 2); in pie_init()
463 static int pie_dump(struct Qdisc *sch, struct sk_buff *skb) in pie_dump()
474 ((u32)PSCHED_TICKS2NS(READ_ONCE(q->params.target))) / in pie_dump()
476 nla_put_u32(skb, TCA_PIE_LIMIT, READ_ONCE(sch->limit)) || in pie_dump()
478 jiffies_to_usecs(READ_ONCE(q->params.tupdate))) || in pie_dump()
479 nla_put_u32(skb, TCA_PIE_ALPHA, READ_ONCE(q->params.alpha)) || in pie_dump()
480 nla_put_u32(skb, TCA_PIE_BETA, READ_ONCE(q->params.beta)) || in pie_dump()
481 nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) || in pie_dump()
483 READ_ONCE(q->params.bytemode)) || in pie_dump()
485 READ_ONCE(q->params.dq_rate_estimator))) in pie_dump()
492 return -1; in pie_dump()
495 static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) in pie_dump_stats()
499 .prob = q->vars.prob << BITS_PER_BYTE, in pie_dump_stats()
500 .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) / in pie_dump_stats()
502 .packets_in = q->stats.packets_in, in pie_dump_stats()
503 .overlimit = q->stats.overlimit, in pie_dump_stats()
504 .maxq = q->stats.maxq, in pie_dump_stats()
505 .dropped = q->stats.dropped, in pie_dump_stats()
506 .ecn_mark = q->stats.ecn_mark, in pie_dump_stats()
510 st.dq_rate_estimating = q->params.dq_rate_estimator; in pie_dump_stats()
513 if (q->params.dq_rate_estimator) in pie_dump_stats()
514 st.avg_dq_rate = q->vars.avg_dq_rate * in pie_dump_stats()
520 static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch) in pie_qdisc_dequeue()
528 pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog); in pie_qdisc_dequeue()
532 static void pie_reset(struct Qdisc *sch) in pie_reset()
537 pie_vars_init(&q->vars); in pie_reset()
540 static void pie_destroy(struct Qdisc *sch) in pie_destroy()
544 q->params.tupdate = 0; in pie_destroy()
545 del_timer_sync(&q->adapt_timer); in pie_destroy()
548 static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
564 static int __init pie_module_init(void) in pie_module_init()
569 static void __exit pie_module_exit(void) in pie_module_exit()