1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2013 Cisco Systems, Inc, 2013. 3 * 4 * Author: Vijay Subramanian <vijaynsu@cisco.com> 5 * Author: Mythili Prabhu <mysuryan@cisco.com> 6 * 7 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> 8 * University of Oslo, Norway. 9 * 10 * References: 11 * RFC 8033: https://tools.ietf.org/html/rfc8033 12 */ 13 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/skbuff.h> 20 #include <net/pkt_sched.h> 21 #include <net/inet_ecn.h> 22 #include <net/pie.h> 23 24 /* private data for the Qdisc */ 25 struct pie_sched_data { 26 struct pie_vars vars; 27 struct pie_params params; 28 struct pie_stats stats; 29 struct timer_list adapt_timer; 30 struct Qdisc *sch; 31 }; 32 33 bool pie_drop_early(struct Qdisc *sch, struct pie_params *params, 34 struct pie_vars *vars, u32 backlog, u32 packet_size) 35 { 36 u64 rnd; 37 u64 local_prob = vars->prob; 38 u32 mtu = psched_mtu(qdisc_dev(sch)); 39 40 /* If there is still burst allowance left skip random early drop */ 41 if (vars->burst_time > 0) 42 return false; 43 44 /* If current delay is less than half of target, and 45 * if drop prob is low already, disable early_drop 46 */ 47 if ((vars->qdelay < params->target / 2) && 48 (vars->prob < MAX_PROB / 5)) 49 return false; 50 51 /* If we have fewer than 2 mtu-sized packets, disable pie_drop_early, 52 * similar to min_th in RED 53 */ 54 if (backlog < 2 * mtu) 55 return false; 56 57 /* If bytemode is turned on, use packet size to compute new 58 * probablity. Smaller packets will have lower drop prob in this case 59 */ 60 if (params->bytemode && packet_size <= mtu) 61 local_prob = (u64)packet_size * div_u64(local_prob, mtu); 62 else 63 local_prob = vars->prob; 64 65 if (local_prob == 0) 66 vars->accu_prob = 0; 67 else 68 vars->accu_prob += local_prob; 69 70 if (vars->accu_prob < (MAX_PROB / 100) * 85) 71 return false; 72 if (vars->accu_prob >= (MAX_PROB / 2) * 17) 73 return true; 74 75 get_random_bytes(&rnd, 8); 76 if ((rnd >> BITS_PER_BYTE) < local_prob) { 77 vars->accu_prob = 0; 78 return true; 79 } 80 81 return false; 82 } 83 EXPORT_SYMBOL_GPL(pie_drop_early); 84 85 static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 86 struct sk_buff **to_free) 87 { 88 enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT; 89 struct pie_sched_data *q = qdisc_priv(sch); 90 bool enqueue = false; 91 92 if (unlikely(qdisc_qlen(sch) >= sch->limit)) { 93 q->stats.overlimit++; 94 goto out; 95 } 96 97 reason = SKB_DROP_REASON_QDISC_CONGESTED; 98 99 if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog, 100 skb->len)) { 101 enqueue = true; 102 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && 103 INET_ECN_set_ce(skb)) { 104 /* If packet is ecn capable, mark it if drop probability 105 * is lower than 10%, else drop it. 106 */ 107 q->stats.ecn_mark++; 108 enqueue = true; 109 } 110 111 /* we can enqueue the packet */ 112 if (enqueue) { 113 /* Set enqueue time only when dq_rate_estimator is disabled. */ 114 if (!q->params.dq_rate_estimator) 115 pie_set_enqueue_time(skb); 116 117 q->stats.packets_in++; 118 if (qdisc_qlen(sch) > q->stats.maxq) 119 q->stats.maxq = qdisc_qlen(sch); 120 121 return qdisc_enqueue_tail(skb, sch); 122 } 123 124 out: 125 q->stats.dropped++; 126 q->vars.accu_prob = 0; 127 return qdisc_drop_reason(skb, sch, to_free, reason); 128 } 129 130 static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = { 131 [TCA_PIE_TARGET] = {.type = NLA_U32}, 132 [TCA_PIE_LIMIT] = {.type = NLA_U32}, 133 [TCA_PIE_TUPDATE] = {.type = NLA_U32}, 134 [TCA_PIE_ALPHA] = {.type = NLA_U32}, 135 [TCA_PIE_BETA] = {.type = NLA_U32}, 136 [TCA_PIE_ECN] = {.type = NLA_U32}, 137 [TCA_PIE_BYTEMODE] = {.type = NLA_U32}, 138 [TCA_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32}, 139 }; 140 141 static int pie_change(struct Qdisc *sch, struct nlattr *opt, 142 struct netlink_ext_ack *extack) 143 { 144 struct pie_sched_data *q = qdisc_priv(sch); 145 struct nlattr *tb[TCA_PIE_MAX + 1]; 146 unsigned int qlen, dropped = 0; 147 int err; 148 149 err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy, 150 NULL); 151 if (err < 0) 152 return err; 153 154 sch_tree_lock(sch); 155 156 /* convert from microseconds to pschedtime */ 157 if (tb[TCA_PIE_TARGET]) { 158 /* target is in us */ 159 u32 target = nla_get_u32(tb[TCA_PIE_TARGET]); 160 161 /* convert to pschedtime */ 162 WRITE_ONCE(q->params.target, 163 PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC)); 164 } 165 166 /* tupdate is in jiffies */ 167 if (tb[TCA_PIE_TUPDATE]) 168 WRITE_ONCE(q->params.tupdate, 169 usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]))); 170 171 if (tb[TCA_PIE_LIMIT]) { 172 u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]); 173 174 WRITE_ONCE(q->params.limit, limit); 175 WRITE_ONCE(sch->limit, limit); 176 } 177 178 if (tb[TCA_PIE_ALPHA]) 179 WRITE_ONCE(q->params.alpha, nla_get_u32(tb[TCA_PIE_ALPHA])); 180 181 if (tb[TCA_PIE_BETA]) 182 WRITE_ONCE(q->params.beta, nla_get_u32(tb[TCA_PIE_BETA])); 183 184 if (tb[TCA_PIE_ECN]) 185 WRITE_ONCE(q->params.ecn, nla_get_u32(tb[TCA_PIE_ECN])); 186 187 if (tb[TCA_PIE_BYTEMODE]) 188 WRITE_ONCE(q->params.bytemode, 189 nla_get_u32(tb[TCA_PIE_BYTEMODE])); 190 191 if (tb[TCA_PIE_DQ_RATE_ESTIMATOR]) 192 WRITE_ONCE(q->params.dq_rate_estimator, 193 nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR])); 194 195 /* Drop excess packets if new limit is lower */ 196 qlen = sch->q.qlen; 197 while (sch->q.qlen > sch->limit) { 198 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 199 200 dropped += qdisc_pkt_len(skb); 201 qdisc_qstats_backlog_dec(sch, skb); 202 rtnl_qdisc_drop(skb, sch); 203 } 204 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); 205 206 sch_tree_unlock(sch); 207 return 0; 208 } 209 210 void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params, 211 struct pie_vars *vars, u32 backlog) 212 { 213 psched_time_t now = psched_get_time(); 214 u32 dtime = 0; 215 216 /* If dq_rate_estimator is disabled, calculate qdelay using the 217 * packet timestamp. 218 */ 219 if (!params->dq_rate_estimator) { 220 vars->qdelay = now - pie_get_enqueue_time(skb); 221 222 if (vars->dq_tstamp != DTIME_INVALID) 223 dtime = now - vars->dq_tstamp; 224 225 vars->dq_tstamp = now; 226 227 if (backlog == 0) 228 vars->qdelay = 0; 229 230 if (dtime == 0) 231 return; 232 233 goto burst_allowance_reduction; 234 } 235 236 /* If current queue is about 10 packets or more and dq_count is unset 237 * we have enough packets to calculate the drain rate. Save 238 * current time as dq_tstamp and start measurement cycle. 239 */ 240 if (backlog >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) { 241 vars->dq_tstamp = psched_get_time(); 242 vars->dq_count = 0; 243 } 244 245 /* Calculate the average drain rate from this value. If queue length 246 * has receded to a small value viz., <= QUEUE_THRESHOLD bytes, reset 247 * the dq_count to -1 as we don't have enough packets to calculate the 248 * drain rate anymore. The following if block is entered only when we 249 * have a substantial queue built up (QUEUE_THRESHOLD bytes or more) 250 * and we calculate the drain rate for the threshold here. dq_count is 251 * in bytes, time difference in psched_time, hence rate is in 252 * bytes/psched_time. 253 */ 254 if (vars->dq_count != DQCOUNT_INVALID) { 255 vars->dq_count += skb->len; 256 257 if (vars->dq_count >= QUEUE_THRESHOLD) { 258 u32 count = vars->dq_count << PIE_SCALE; 259 260 dtime = now - vars->dq_tstamp; 261 262 if (dtime == 0) 263 return; 264 265 count = count / dtime; 266 267 if (vars->avg_dq_rate == 0) 268 vars->avg_dq_rate = count; 269 else 270 vars->avg_dq_rate = 271 (vars->avg_dq_rate - 272 (vars->avg_dq_rate >> 3)) + (count >> 3); 273 274 /* If the queue has receded below the threshold, we hold 275 * on to the last drain rate calculated, else we reset 276 * dq_count to 0 to re-enter the if block when the next 277 * packet is dequeued 278 */ 279 if (backlog < QUEUE_THRESHOLD) { 280 vars->dq_count = DQCOUNT_INVALID; 281 } else { 282 vars->dq_count = 0; 283 vars->dq_tstamp = psched_get_time(); 284 } 285 286 goto burst_allowance_reduction; 287 } 288 } 289 290 return; 291 292 burst_allowance_reduction: 293 if (vars->burst_time > 0) { 294 if (vars->burst_time > dtime) 295 vars->burst_time -= dtime; 296 else 297 vars->burst_time = 0; 298 } 299 } 300 EXPORT_SYMBOL_GPL(pie_process_dequeue); 301 302 void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars, 303 u32 backlog) 304 { 305 psched_time_t qdelay = 0; /* in pschedtime */ 306 psched_time_t qdelay_old = 0; /* in pschedtime */ 307 s64 delta = 0; /* determines the change in probability */ 308 u64 oldprob; 309 u64 alpha, beta; 310 u32 power; 311 bool update_prob = true; 312 313 if (params->dq_rate_estimator) { 314 qdelay_old = vars->qdelay; 315 vars->qdelay_old = vars->qdelay; 316 317 if (vars->avg_dq_rate > 0) 318 qdelay = (backlog << PIE_SCALE) / vars->avg_dq_rate; 319 else 320 qdelay = 0; 321 } else { 322 qdelay = vars->qdelay; 323 qdelay_old = vars->qdelay_old; 324 } 325 326 /* If qdelay is zero and backlog is not, it means backlog is very small, 327 * so we do not update probability in this round. 328 */ 329 if (qdelay == 0 && backlog != 0) 330 update_prob = false; 331 332 /* In the algorithm, alpha and beta are between 0 and 2 with typical 333 * value for alpha as 0.125. In this implementation, we use values 0-32 334 * passed from user space to represent this. Also, alpha and beta have 335 * unit of HZ and need to be scaled before they can used to update 336 * probability. alpha/beta are updated locally below by scaling down 337 * by 16 to come to 0-2 range. 338 */ 339 alpha = ((u64)params->alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; 340 beta = ((u64)params->beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; 341 342 /* We scale alpha and beta differently depending on how heavy the 343 * congestion is. Please see RFC 8033 for details. 344 */ 345 if (vars->prob < MAX_PROB / 10) { 346 alpha >>= 1; 347 beta >>= 1; 348 349 power = 100; 350 while (vars->prob < div_u64(MAX_PROB, power) && 351 power <= 1000000) { 352 alpha >>= 2; 353 beta >>= 2; 354 power *= 10; 355 } 356 } 357 358 /* alpha and beta should be between 0 and 32, in multiples of 1/16 */ 359 delta += alpha * (qdelay - params->target); 360 delta += beta * (qdelay - qdelay_old); 361 362 oldprob = vars->prob; 363 364 /* to ensure we increase probability in steps of no more than 2% */ 365 if (delta > (s64)(MAX_PROB / (100 / 2)) && 366 vars->prob >= MAX_PROB / 10) 367 delta = (MAX_PROB / 100) * 2; 368 369 /* Non-linear drop: 370 * Tune drop probability to increase quickly for high delays(>= 250ms) 371 * 250ms is derived through experiments and provides error protection 372 */ 373 374 if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC))) 375 delta += MAX_PROB / (100 / 2); 376 377 vars->prob += delta; 378 379 if (delta > 0) { 380 /* prevent overflow */ 381 if (vars->prob < oldprob) { 382 vars->prob = MAX_PROB; 383 /* Prevent normalization error. If probability is at 384 * maximum value already, we normalize it here, and 385 * skip the check to do a non-linear drop in the next 386 * section. 387 */ 388 update_prob = false; 389 } 390 } else { 391 /* prevent underflow */ 392 if (vars->prob > oldprob) 393 vars->prob = 0; 394 } 395 396 /* Non-linear drop in probability: Reduce drop probability quickly if 397 * delay is 0 for 2 consecutive Tupdate periods. 398 */ 399 400 if (qdelay == 0 && qdelay_old == 0 && update_prob) 401 /* Reduce drop probability to 98.4% */ 402 vars->prob -= vars->prob / 64; 403 404 vars->qdelay = qdelay; 405 vars->backlog_old = backlog; 406 407 /* We restart the measurement cycle if the following conditions are met 408 * 1. If the delay has been low for 2 consecutive Tupdate periods 409 * 2. Calculated drop probability is zero 410 * 3. If average dq_rate_estimator is enabled, we have at least one 411 * estimate for the avg_dq_rate ie., is a non-zero value 412 */ 413 if ((vars->qdelay < params->target / 2) && 414 (vars->qdelay_old < params->target / 2) && 415 vars->prob == 0 && 416 (!params->dq_rate_estimator || vars->avg_dq_rate > 0)) { 417 pie_vars_init(vars); 418 } 419 420 if (!params->dq_rate_estimator) 421 vars->qdelay_old = qdelay; 422 } 423 EXPORT_SYMBOL_GPL(pie_calculate_probability); 424 425 static void pie_timer(struct timer_list *t) 426 { 427 struct pie_sched_data *q = from_timer(q, t, adapt_timer); 428 struct Qdisc *sch = q->sch; 429 spinlock_t *root_lock; 430 431 rcu_read_lock(); 432 root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 433 spin_lock(root_lock); 434 pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog); 435 436 /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */ 437 if (q->params.tupdate) 438 mod_timer(&q->adapt_timer, jiffies + q->params.tupdate); 439 spin_unlock(root_lock); 440 rcu_read_unlock(); 441 } 442 443 static int pie_init(struct Qdisc *sch, struct nlattr *opt, 444 struct netlink_ext_ack *extack) 445 { 446 struct pie_sched_data *q = qdisc_priv(sch); 447 448 pie_params_init(&q->params); 449 pie_vars_init(&q->vars); 450 sch->limit = q->params.limit; 451 452 q->sch = sch; 453 timer_setup(&q->adapt_timer, pie_timer, 0); 454 455 if (opt) { 456 int err = pie_change(sch, opt, extack); 457 458 if (err) 459 return err; 460 } 461 462 mod_timer(&q->adapt_timer, jiffies + HZ / 2); 463 return 0; 464 } 465 466 static int pie_dump(struct Qdisc *sch, struct sk_buff *skb) 467 { 468 struct pie_sched_data *q = qdisc_priv(sch); 469 struct nlattr *opts; 470 471 opts = nla_nest_start_noflag(skb, TCA_OPTIONS); 472 if (!opts) 473 goto nla_put_failure; 474 475 /* convert target from pschedtime to us */ 476 if (nla_put_u32(skb, TCA_PIE_TARGET, 477 ((u32)PSCHED_TICKS2NS(READ_ONCE(q->params.target))) / 478 NSEC_PER_USEC) || 479 nla_put_u32(skb, TCA_PIE_LIMIT, READ_ONCE(sch->limit)) || 480 nla_put_u32(skb, TCA_PIE_TUPDATE, 481 jiffies_to_usecs(READ_ONCE(q->params.tupdate))) || 482 nla_put_u32(skb, TCA_PIE_ALPHA, READ_ONCE(q->params.alpha)) || 483 nla_put_u32(skb, TCA_PIE_BETA, READ_ONCE(q->params.beta)) || 484 nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) || 485 nla_put_u32(skb, TCA_PIE_BYTEMODE, 486 READ_ONCE(q->params.bytemode)) || 487 nla_put_u32(skb, TCA_PIE_DQ_RATE_ESTIMATOR, 488 READ_ONCE(q->params.dq_rate_estimator))) 489 goto nla_put_failure; 490 491 return nla_nest_end(skb, opts); 492 493 nla_put_failure: 494 nla_nest_cancel(skb, opts); 495 return -1; 496 } 497 498 static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 499 { 500 struct pie_sched_data *q = qdisc_priv(sch); 501 struct tc_pie_xstats st = { 502 .prob = q->vars.prob << BITS_PER_BYTE, 503 .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) / 504 NSEC_PER_USEC, 505 .packets_in = q->stats.packets_in, 506 .overlimit = q->stats.overlimit, 507 .maxq = q->stats.maxq, 508 .dropped = q->stats.dropped, 509 .ecn_mark = q->stats.ecn_mark, 510 }; 511 512 /* avg_dq_rate is only valid if dq_rate_estimator is enabled */ 513 st.dq_rate_estimating = q->params.dq_rate_estimator; 514 515 /* unscale and return dq_rate in bytes per sec */ 516 if (q->params.dq_rate_estimator) 517 st.avg_dq_rate = q->vars.avg_dq_rate * 518 (PSCHED_TICKS_PER_SEC) >> PIE_SCALE; 519 520 return gnet_stats_copy_app(d, &st, sizeof(st)); 521 } 522 523 static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch) 524 { 525 struct pie_sched_data *q = qdisc_priv(sch); 526 struct sk_buff *skb = qdisc_dequeue_head(sch); 527 528 if (!skb) 529 return NULL; 530 531 pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog); 532 return skb; 533 } 534 535 static void pie_reset(struct Qdisc *sch) 536 { 537 struct pie_sched_data *q = qdisc_priv(sch); 538 539 qdisc_reset_queue(sch); 540 pie_vars_init(&q->vars); 541 } 542 543 static void pie_destroy(struct Qdisc *sch) 544 { 545 struct pie_sched_data *q = qdisc_priv(sch); 546 547 q->params.tupdate = 0; 548 del_timer_sync(&q->adapt_timer); 549 } 550 551 static struct Qdisc_ops pie_qdisc_ops __read_mostly = { 552 .id = "pie", 553 .priv_size = sizeof(struct pie_sched_data), 554 .enqueue = pie_qdisc_enqueue, 555 .dequeue = pie_qdisc_dequeue, 556 .peek = qdisc_peek_dequeued, 557 .init = pie_init, 558 .destroy = pie_destroy, 559 .reset = pie_reset, 560 .change = pie_change, 561 .dump = pie_dump, 562 .dump_stats = pie_dump_stats, 563 .owner = THIS_MODULE, 564 }; 565 MODULE_ALIAS_NET_SCH("pie"); 566 567 static int __init pie_module_init(void) 568 { 569 return register_qdisc(&pie_qdisc_ops); 570 } 571 572 static void __exit pie_module_exit(void) 573 { 574 unregister_qdisc(&pie_qdisc_ops); 575 } 576 577 module_init(pie_module_init); 578 module_exit(pie_module_exit); 579 580 MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler"); 581 MODULE_AUTHOR("Vijay Subramanian"); 582 MODULE_AUTHOR("Mythili Prabhu"); 583 MODULE_LICENSE("GPL"); 584