1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Flow Queue PIE discipline 3 * 4 * Copyright (C) 2019 Mohit P. Tahiliani <tahiliani@nitk.edu.in> 5 * Copyright (C) 2019 Sachin D. Patil <sdp.sachin@gmail.com> 6 * Copyright (C) 2019 V. Saicharan <vsaicharan1998@gmail.com> 7 * Copyright (C) 2019 Mohit Bhasi <mohitbhasi1998@gmail.com> 8 * Copyright (C) 2019 Leslie Monis <lesliemonis@gmail.com> 9 * Copyright (C) 2019 Gautam Ramakrishnan <gautamramk@gmail.com> 10 */ 11 12 #include <linux/jhash.h> 13 #include <linux/sizes.h> 14 #include <linux/vmalloc.h> 15 #include <net/pkt_cls.h> 16 #include <net/pie.h> 17 18 /* Flow Queue PIE 19 * 20 * Principles: 21 * - Packets are classified on flows. 22 * - This is a Stochastic model (as we use a hash, several flows might 23 * be hashed to the same slot) 24 * - Each flow has a PIE managed queue. 25 * - Flows are linked onto two (Round Robin) lists, 26 * so that new flows have priority on old ones. 27 * - For a given flow, packets are not reordered. 28 * - Drops during enqueue only. 29 * - ECN capability is off by default. 30 * - ECN threshold (if ECN is enabled) is at 10% by default. 31 * - Uses timestamps to calculate queue delay by default. 32 */ 33 34 /** 35 * struct fq_pie_flow - contains data for each flow 36 * @vars: pie vars associated with the flow 37 * @deficit: number of remaining byte credits 38 * @backlog: size of data in the flow 39 * @qlen: number of packets in the flow 40 * @flowchain: flowchain for the flow 41 * @head: first packet in the flow 42 * @tail: last packet in the flow 43 */ 44 struct fq_pie_flow { 45 struct pie_vars vars; 46 s32 deficit; 47 u32 backlog; 48 u32 qlen; 49 struct list_head flowchain; 50 struct sk_buff *head; 51 struct sk_buff *tail; 52 }; 53 54 struct fq_pie_sched_data { 55 struct tcf_proto __rcu *filter_list; /* optional external classifier */ 56 struct tcf_block *block; 57 struct fq_pie_flow *flows; 58 struct Qdisc *sch; 59 struct list_head old_flows; 60 struct list_head new_flows; 61 struct pie_params p_params; 62 u32 ecn_prob; 63 u32 flows_cnt; 64 u32 flows_cursor; 65 u32 quantum; 66 u32 memory_limit; 67 u32 new_flow_count; 68 u32 memory_usage; 69 u32 overmemory; 70 struct pie_stats stats; 71 struct timer_list adapt_timer; 72 }; 73 74 static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q, 75 struct sk_buff *skb) 76 { 77 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); 78 } 79 80 static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch, 81 int *qerr) 82 { 83 struct fq_pie_sched_data *q = qdisc_priv(sch); 84 struct tcf_proto *filter; 85 struct tcf_result res; 86 int result; 87 88 if (TC_H_MAJ(skb->priority) == sch->handle && 89 TC_H_MIN(skb->priority) > 0 && 90 TC_H_MIN(skb->priority) <= q->flows_cnt) 91 return TC_H_MIN(skb->priority); 92 93 filter = rcu_dereference_bh(q->filter_list); 94 if (!filter) 95 return fq_pie_hash(q, skb) + 1; 96 97 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 98 result = tcf_classify(skb, NULL, filter, &res, false); 99 if (result >= 0) { 100 #ifdef CONFIG_NET_CLS_ACT 101 switch (result) { 102 case TC_ACT_STOLEN: 103 case TC_ACT_QUEUED: 104 case TC_ACT_TRAP: 105 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 106 fallthrough; 107 case TC_ACT_SHOT: 108 return 0; 109 } 110 #endif 111 if (TC_H_MIN(res.classid) <= q->flows_cnt) 112 return TC_H_MIN(res.classid); 113 } 114 return 0; 115 } 116 117 /* add skb to flow queue (tail add) */ 118 static inline void flow_queue_add(struct fq_pie_flow *flow, 119 struct sk_buff *skb) 120 { 121 if (!flow->head) 122 flow->head = skb; 123 else 124 flow->tail->next = skb; 125 flow->tail = skb; 126 skb->next = NULL; 127 } 128 129 static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 130 struct sk_buff **to_free) 131 { 132 struct fq_pie_sched_data *q = qdisc_priv(sch); 133 struct fq_pie_flow *sel_flow; 134 int ret; 135 u8 memory_limited = false; 136 u8 enqueue = false; 137 u32 pkt_len; 138 u32 idx; 139 140 /* Classifies packet into corresponding flow */ 141 idx = fq_pie_classify(skb, sch, &ret); 142 if (idx == 0) { 143 if (ret & __NET_XMIT_BYPASS) 144 qdisc_qstats_drop(sch); 145 __qdisc_drop(skb, to_free); 146 return ret; 147 } 148 idx--; 149 150 sel_flow = &q->flows[idx]; 151 /* Checks whether adding a new packet would exceed memory limit */ 152 get_pie_cb(skb)->mem_usage = skb->truesize; 153 memory_limited = q->memory_usage > q->memory_limit + skb->truesize; 154 155 /* Checks if the qdisc is full */ 156 if (unlikely(qdisc_qlen(sch) >= sch->limit)) { 157 q->stats.overlimit++; 158 goto out; 159 } else if (unlikely(memory_limited)) { 160 q->overmemory++; 161 } 162 163 if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars, 164 sel_flow->backlog, skb->len)) { 165 enqueue = true; 166 } else if (q->p_params.ecn && 167 sel_flow->vars.prob <= (MAX_PROB / 100) * q->ecn_prob && 168 INET_ECN_set_ce(skb)) { 169 /* If packet is ecn capable, mark it if drop probability 170 * is lower than the parameter ecn_prob, else drop it. 171 */ 172 q->stats.ecn_mark++; 173 enqueue = true; 174 } 175 if (enqueue) { 176 /* Set enqueue time only when dq_rate_estimator is disabled. */ 177 if (!q->p_params.dq_rate_estimator) 178 pie_set_enqueue_time(skb); 179 180 pkt_len = qdisc_pkt_len(skb); 181 q->stats.packets_in++; 182 q->memory_usage += skb->truesize; 183 sch->qstats.backlog += pkt_len; 184 sch->q.qlen++; 185 flow_queue_add(sel_flow, skb); 186 if (list_empty(&sel_flow->flowchain)) { 187 list_add_tail(&sel_flow->flowchain, &q->new_flows); 188 q->new_flow_count++; 189 sel_flow->deficit = q->quantum; 190 sel_flow->qlen = 0; 191 sel_flow->backlog = 0; 192 } 193 sel_flow->qlen++; 194 sel_flow->backlog += pkt_len; 195 return NET_XMIT_SUCCESS; 196 } 197 out: 198 q->stats.dropped++; 199 sel_flow->vars.accu_prob = 0; 200 __qdisc_drop(skb, to_free); 201 qdisc_qstats_drop(sch); 202 return NET_XMIT_CN; 203 } 204 205 static const struct netlink_range_validation fq_pie_q_range = { 206 .min = 1, 207 .max = 1 << 20, 208 }; 209 210 static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = { 211 [TCA_FQ_PIE_LIMIT] = {.type = NLA_U32}, 212 [TCA_FQ_PIE_FLOWS] = {.type = NLA_U32}, 213 [TCA_FQ_PIE_TARGET] = {.type = NLA_U32}, 214 [TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32}, 215 [TCA_FQ_PIE_ALPHA] = {.type = NLA_U32}, 216 [TCA_FQ_PIE_BETA] = {.type = NLA_U32}, 217 [TCA_FQ_PIE_QUANTUM] = 218 NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range), 219 [TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32}, 220 [TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32}, 221 [TCA_FQ_PIE_ECN] = {.type = NLA_U32}, 222 [TCA_FQ_PIE_BYTEMODE] = {.type = NLA_U32}, 223 [TCA_FQ_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32}, 224 }; 225 226 static inline struct sk_buff *dequeue_head(struct fq_pie_flow *flow) 227 { 228 struct sk_buff *skb = flow->head; 229 230 flow->head = skb->next; 231 skb->next = NULL; 232 return skb; 233 } 234 235 static struct sk_buff *fq_pie_qdisc_dequeue(struct Qdisc *sch) 236 { 237 struct fq_pie_sched_data *q = qdisc_priv(sch); 238 struct sk_buff *skb = NULL; 239 struct fq_pie_flow *flow; 240 struct list_head *head; 241 u32 pkt_len; 242 243 begin: 244 head = &q->new_flows; 245 if (list_empty(head)) { 246 head = &q->old_flows; 247 if (list_empty(head)) 248 return NULL; 249 } 250 251 flow = list_first_entry(head, struct fq_pie_flow, flowchain); 252 /* Flow has exhausted all its credits */ 253 if (flow->deficit <= 0) { 254 flow->deficit += q->quantum; 255 list_move_tail(&flow->flowchain, &q->old_flows); 256 goto begin; 257 } 258 259 if (flow->head) { 260 skb = dequeue_head(flow); 261 pkt_len = qdisc_pkt_len(skb); 262 sch->qstats.backlog -= pkt_len; 263 sch->q.qlen--; 264 qdisc_bstats_update(sch, skb); 265 } 266 267 if (!skb) { 268 /* force a pass through old_flows to prevent starvation */ 269 if (head == &q->new_flows && !list_empty(&q->old_flows)) 270 list_move_tail(&flow->flowchain, &q->old_flows); 271 else 272 list_del_init(&flow->flowchain); 273 goto begin; 274 } 275 276 flow->qlen--; 277 flow->deficit -= pkt_len; 278 flow->backlog -= pkt_len; 279 q->memory_usage -= get_pie_cb(skb)->mem_usage; 280 pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog); 281 return skb; 282 } 283 284 static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, 285 struct netlink_ext_ack *extack) 286 { 287 struct fq_pie_sched_data *q = qdisc_priv(sch); 288 struct nlattr *tb[TCA_FQ_PIE_MAX + 1]; 289 unsigned int len_dropped = 0; 290 unsigned int num_dropped = 0; 291 int err; 292 293 err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack); 294 if (err < 0) 295 return err; 296 297 sch_tree_lock(sch); 298 if (tb[TCA_FQ_PIE_LIMIT]) { 299 u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]); 300 301 q->p_params.limit = limit; 302 sch->limit = limit; 303 } 304 if (tb[TCA_FQ_PIE_FLOWS]) { 305 if (q->flows) { 306 NL_SET_ERR_MSG_MOD(extack, 307 "Number of flows cannot be changed"); 308 goto flow_error; 309 } 310 q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]); 311 if (!q->flows_cnt || q->flows_cnt > 65536) { 312 NL_SET_ERR_MSG_MOD(extack, 313 "Number of flows must range in [1..65536]"); 314 goto flow_error; 315 } 316 } 317 318 /* convert from microseconds to pschedtime */ 319 if (tb[TCA_FQ_PIE_TARGET]) { 320 /* target is in us */ 321 u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]); 322 323 /* convert to pschedtime */ 324 q->p_params.target = 325 PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC); 326 } 327 328 /* tupdate is in jiffies */ 329 if (tb[TCA_FQ_PIE_TUPDATE]) 330 q->p_params.tupdate = 331 usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE])); 332 333 if (tb[TCA_FQ_PIE_ALPHA]) 334 q->p_params.alpha = nla_get_u32(tb[TCA_FQ_PIE_ALPHA]); 335 336 if (tb[TCA_FQ_PIE_BETA]) 337 q->p_params.beta = nla_get_u32(tb[TCA_FQ_PIE_BETA]); 338 339 if (tb[TCA_FQ_PIE_QUANTUM]) 340 q->quantum = nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]); 341 342 if (tb[TCA_FQ_PIE_MEMORY_LIMIT]) 343 q->memory_limit = nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]); 344 345 if (tb[TCA_FQ_PIE_ECN_PROB]) 346 q->ecn_prob = nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]); 347 348 if (tb[TCA_FQ_PIE_ECN]) 349 q->p_params.ecn = nla_get_u32(tb[TCA_FQ_PIE_ECN]); 350 351 if (tb[TCA_FQ_PIE_BYTEMODE]) 352 q->p_params.bytemode = nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]); 353 354 if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]) 355 q->p_params.dq_rate_estimator = 356 nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]); 357 358 /* Drop excess packets if new limit is lower */ 359 while (sch->q.qlen > sch->limit) { 360 struct sk_buff *skb = fq_pie_qdisc_dequeue(sch); 361 362 len_dropped += qdisc_pkt_len(skb); 363 num_dropped += 1; 364 rtnl_kfree_skbs(skb, skb); 365 } 366 qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped); 367 368 sch_tree_unlock(sch); 369 return 0; 370 371 flow_error: 372 sch_tree_unlock(sch); 373 return -EINVAL; 374 } 375 376 static void fq_pie_timer(struct timer_list *t) 377 { 378 struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer); 379 unsigned long next, tupdate; 380 struct Qdisc *sch = q->sch; 381 spinlock_t *root_lock; /* to lock qdisc for probability calculations */ 382 int max_cnt, i; 383 384 rcu_read_lock(); 385 root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 386 spin_lock(root_lock); 387 388 /* Limit this expensive loop to 2048 flows per round. */ 389 max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048); 390 for (i = 0; i < max_cnt; i++) { 391 pie_calculate_probability(&q->p_params, 392 &q->flows[q->flows_cursor].vars, 393 q->flows[q->flows_cursor].backlog); 394 q->flows_cursor++; 395 } 396 397 tupdate = q->p_params.tupdate; 398 next = 0; 399 if (q->flows_cursor >= q->flows_cnt) { 400 q->flows_cursor = 0; 401 next = tupdate; 402 } 403 if (tupdate) 404 mod_timer(&q->adapt_timer, jiffies + next); 405 spin_unlock(root_lock); 406 rcu_read_unlock(); 407 } 408 409 static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt, 410 struct netlink_ext_ack *extack) 411 { 412 struct fq_pie_sched_data *q = qdisc_priv(sch); 413 int err; 414 u32 idx; 415 416 pie_params_init(&q->p_params); 417 sch->limit = 10 * 1024; 418 q->p_params.limit = sch->limit; 419 q->quantum = psched_mtu(qdisc_dev(sch)); 420 q->sch = sch; 421 q->ecn_prob = 10; 422 q->flows_cnt = 1024; 423 q->memory_limit = SZ_32M; 424 425 INIT_LIST_HEAD(&q->new_flows); 426 INIT_LIST_HEAD(&q->old_flows); 427 timer_setup(&q->adapt_timer, fq_pie_timer, 0); 428 429 if (opt) { 430 err = fq_pie_change(sch, opt, extack); 431 432 if (err) 433 return err; 434 } 435 436 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); 437 if (err) 438 goto init_failure; 439 440 q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow), 441 GFP_KERNEL); 442 if (!q->flows) { 443 err = -ENOMEM; 444 goto init_failure; 445 } 446 for (idx = 0; idx < q->flows_cnt; idx++) { 447 struct fq_pie_flow *flow = q->flows + idx; 448 449 INIT_LIST_HEAD(&flow->flowchain); 450 pie_vars_init(&flow->vars); 451 } 452 453 mod_timer(&q->adapt_timer, jiffies + HZ / 2); 454 455 return 0; 456 457 init_failure: 458 q->flows_cnt = 0; 459 460 return err; 461 } 462 463 static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb) 464 { 465 struct fq_pie_sched_data *q = qdisc_priv(sch); 466 struct nlattr *opts; 467 468 opts = nla_nest_start(skb, TCA_OPTIONS); 469 if (!opts) 470 return -EMSGSIZE; 471 472 /* convert target from pschedtime to us */ 473 if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, sch->limit) || 474 nla_put_u32(skb, TCA_FQ_PIE_FLOWS, q->flows_cnt) || 475 nla_put_u32(skb, TCA_FQ_PIE_TARGET, 476 ((u32)PSCHED_TICKS2NS(q->p_params.target)) / 477 NSEC_PER_USEC) || 478 nla_put_u32(skb, TCA_FQ_PIE_TUPDATE, 479 jiffies_to_usecs(q->p_params.tupdate)) || 480 nla_put_u32(skb, TCA_FQ_PIE_ALPHA, q->p_params.alpha) || 481 nla_put_u32(skb, TCA_FQ_PIE_BETA, q->p_params.beta) || 482 nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, q->quantum) || 483 nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT, q->memory_limit) || 484 nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, q->ecn_prob) || 485 nla_put_u32(skb, TCA_FQ_PIE_ECN, q->p_params.ecn) || 486 nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, q->p_params.bytemode) || 487 nla_put_u32(skb, TCA_FQ_PIE_DQ_RATE_ESTIMATOR, 488 q->p_params.dq_rate_estimator)) 489 goto nla_put_failure; 490 491 return nla_nest_end(skb, opts); 492 493 nla_put_failure: 494 nla_nest_cancel(skb, opts); 495 return -EMSGSIZE; 496 } 497 498 static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 499 { 500 struct fq_pie_sched_data *q = qdisc_priv(sch); 501 struct tc_fq_pie_xstats st = { 502 .packets_in = q->stats.packets_in, 503 .overlimit = q->stats.overlimit, 504 .overmemory = q->overmemory, 505 .dropped = q->stats.dropped, 506 .ecn_mark = q->stats.ecn_mark, 507 .new_flow_count = q->new_flow_count, 508 .memory_usage = q->memory_usage, 509 }; 510 struct list_head *pos; 511 512 sch_tree_lock(sch); 513 list_for_each(pos, &q->new_flows) 514 st.new_flows_len++; 515 516 list_for_each(pos, &q->old_flows) 517 st.old_flows_len++; 518 sch_tree_unlock(sch); 519 520 return gnet_stats_copy_app(d, &st, sizeof(st)); 521 } 522 523 static void fq_pie_reset(struct Qdisc *sch) 524 { 525 struct fq_pie_sched_data *q = qdisc_priv(sch); 526 u32 idx; 527 528 INIT_LIST_HEAD(&q->new_flows); 529 INIT_LIST_HEAD(&q->old_flows); 530 for (idx = 0; idx < q->flows_cnt; idx++) { 531 struct fq_pie_flow *flow = q->flows + idx; 532 533 /* Removes all packets from flow */ 534 rtnl_kfree_skbs(flow->head, flow->tail); 535 flow->head = NULL; 536 537 INIT_LIST_HEAD(&flow->flowchain); 538 pie_vars_init(&flow->vars); 539 } 540 } 541 542 static void fq_pie_destroy(struct Qdisc *sch) 543 { 544 struct fq_pie_sched_data *q = qdisc_priv(sch); 545 546 tcf_block_put(q->block); 547 q->p_params.tupdate = 0; 548 del_timer_sync(&q->adapt_timer); 549 kvfree(q->flows); 550 } 551 552 static struct Qdisc_ops fq_pie_qdisc_ops __read_mostly = { 553 .id = "fq_pie", 554 .priv_size = sizeof(struct fq_pie_sched_data), 555 .enqueue = fq_pie_qdisc_enqueue, 556 .dequeue = fq_pie_qdisc_dequeue, 557 .peek = qdisc_peek_dequeued, 558 .init = fq_pie_init, 559 .destroy = fq_pie_destroy, 560 .reset = fq_pie_reset, 561 .change = fq_pie_change, 562 .dump = fq_pie_dump, 563 .dump_stats = fq_pie_dump_stats, 564 .owner = THIS_MODULE, 565 }; 566 567 static int __init fq_pie_module_init(void) 568 { 569 return register_qdisc(&fq_pie_qdisc_ops); 570 } 571 572 static void __exit fq_pie_module_exit(void) 573 { 574 unregister_qdisc(&fq_pie_qdisc_ops); 575 } 576 577 module_init(fq_pie_module_init); 578 module_exit(fq_pie_module_exit); 579 580 MODULE_DESCRIPTION("Flow Queue Proportional Integral controller Enhanced (FQ-PIE)"); 581 MODULE_AUTHOR("Mohit P. Tahiliani"); 582 MODULE_LICENSE("GPL"); 583