1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Flow Queue PIE discipline
3 *
4 * Copyright (C) 2019 Mohit P. Tahiliani <tahiliani@nitk.edu.in>
5 * Copyright (C) 2019 Sachin D. Patil <sdp.sachin@gmail.com>
6 * Copyright (C) 2019 V. Saicharan <vsaicharan1998@gmail.com>
7 * Copyright (C) 2019 Mohit Bhasi <mohitbhasi1998@gmail.com>
8 * Copyright (C) 2019 Leslie Monis <lesliemonis@gmail.com>
9 * Copyright (C) 2019 Gautam Ramakrishnan <gautamramk@gmail.com>
10 */
11
12 #include <linux/jhash.h>
13 #include <linux/module.h>
14 #include <linux/sizes.h>
15 #include <linux/vmalloc.h>
16 #include <net/pkt_cls.h>
17 #include <net/pie.h>
18
19 /* Flow Queue PIE
20 *
21 * Principles:
22 * - Packets are classified on flows.
23 * - This is a Stochastic model (as we use a hash, several flows might
24 * be hashed to the same slot)
25 * - Each flow has a PIE managed queue.
26 * - Flows are linked onto two (Round Robin) lists,
27 * so that new flows have priority on old ones.
28 * - For a given flow, packets are not reordered.
29 * - Drops during enqueue only.
30 * - ECN capability is off by default.
31 * - ECN threshold (if ECN is enabled) is at 10% by default.
32 * - Uses timestamps to calculate queue delay by default.
33 */
34
35 /**
36 * struct fq_pie_flow - contains data for each flow
37 * @vars: pie vars associated with the flow
38 * @deficit: number of remaining byte credits
39 * @backlog: size of data in the flow
40 * @qlen: number of packets in the flow
41 * @flowchain: flowchain for the flow
42 * @head: first packet in the flow
43 * @tail: last packet in the flow
44 */
45 struct fq_pie_flow {
46 struct pie_vars vars;
47 s32 deficit;
48 u32 backlog;
49 u32 qlen;
50 struct list_head flowchain;
51 struct sk_buff *head;
52 struct sk_buff *tail;
53 };
54
55 struct fq_pie_sched_data {
56 struct tcf_proto __rcu *filter_list; /* optional external classifier */
57 struct tcf_block *block;
58 struct fq_pie_flow *flows;
59 struct Qdisc *sch;
60 struct list_head old_flows;
61 struct list_head new_flows;
62 struct pie_params p_params;
63 u32 ecn_prob;
64 u32 flows_cnt;
65 u32 flows_cursor;
66 u32 quantum;
67 u32 memory_limit;
68 u32 new_flow_count;
69 u32 memory_usage;
70 u32 overmemory;
71 struct pie_stats stats;
72 struct timer_list adapt_timer;
73 };
74
fq_pie_hash(const struct fq_pie_sched_data * q,struct sk_buff * skb)75 static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q,
76 struct sk_buff *skb)
77 {
78 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
79 }
80
fq_pie_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)81 static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch,
82 int *qerr)
83 {
84 struct fq_pie_sched_data *q = qdisc_priv(sch);
85 struct tcf_proto *filter;
86 struct tcf_result res;
87 int result;
88
89 if (TC_H_MAJ(skb->priority) == sch->handle &&
90 TC_H_MIN(skb->priority) > 0 &&
91 TC_H_MIN(skb->priority) <= q->flows_cnt)
92 return TC_H_MIN(skb->priority);
93
94 filter = rcu_dereference_bh(q->filter_list);
95 if (!filter)
96 return fq_pie_hash(q, skb) + 1;
97
98 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
99 result = tcf_classify(skb, NULL, filter, &res, false);
100 if (result >= 0) {
101 #ifdef CONFIG_NET_CLS_ACT
102 switch (result) {
103 case TC_ACT_STOLEN:
104 case TC_ACT_QUEUED:
105 case TC_ACT_TRAP:
106 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
107 fallthrough;
108 case TC_ACT_SHOT:
109 return 0;
110 }
111 #endif
112 if (TC_H_MIN(res.classid) <= q->flows_cnt)
113 return TC_H_MIN(res.classid);
114 }
115 return 0;
116 }
117
118 /* add skb to flow queue (tail add) */
flow_queue_add(struct fq_pie_flow * flow,struct sk_buff * skb)119 static inline void flow_queue_add(struct fq_pie_flow *flow,
120 struct sk_buff *skb)
121 {
122 if (!flow->head)
123 flow->head = skb;
124 else
125 flow->tail->next = skb;
126 flow->tail = skb;
127 skb->next = NULL;
128 }
129
fq_pie_qdisc_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)130 static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
131 struct sk_buff **to_free)
132 {
133 enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT;
134 struct fq_pie_sched_data *q = qdisc_priv(sch);
135 struct fq_pie_flow *sel_flow;
136 int ret;
137 u8 memory_limited = false;
138 u8 enqueue = false;
139 u32 pkt_len;
140 u32 idx;
141
142 /* Classifies packet into corresponding flow */
143 idx = fq_pie_classify(skb, sch, &ret);
144 if (idx == 0) {
145 if (ret & __NET_XMIT_BYPASS)
146 qdisc_qstats_drop(sch);
147 __qdisc_drop(skb, to_free);
148 return ret;
149 }
150 idx--;
151
152 sel_flow = &q->flows[idx];
153 /* Checks whether adding a new packet would exceed memory limit */
154 get_pie_cb(skb)->mem_usage = skb->truesize;
155 memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
156
157 /* Checks if the qdisc is full */
158 if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
159 q->stats.overlimit++;
160 goto out;
161 } else if (unlikely(memory_limited)) {
162 q->overmemory++;
163 }
164
165 reason = SKB_DROP_REASON_QDISC_CONGESTED;
166
167 if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars,
168 sel_flow->backlog, skb->len)) {
169 enqueue = true;
170 } else if (q->p_params.ecn &&
171 sel_flow->vars.prob <= (MAX_PROB / 100) * q->ecn_prob &&
172 INET_ECN_set_ce(skb)) {
173 /* If packet is ecn capable, mark it if drop probability
174 * is lower than the parameter ecn_prob, else drop it.
175 */
176 q->stats.ecn_mark++;
177 enqueue = true;
178 }
179 if (enqueue) {
180 /* Set enqueue time only when dq_rate_estimator is disabled. */
181 if (!q->p_params.dq_rate_estimator)
182 pie_set_enqueue_time(skb);
183
184 pkt_len = qdisc_pkt_len(skb);
185 q->stats.packets_in++;
186 q->memory_usage += skb->truesize;
187 sch->qstats.backlog += pkt_len;
188 sch->q.qlen++;
189 flow_queue_add(sel_flow, skb);
190 if (list_empty(&sel_flow->flowchain)) {
191 list_add_tail(&sel_flow->flowchain, &q->new_flows);
192 q->new_flow_count++;
193 sel_flow->deficit = q->quantum;
194 sel_flow->qlen = 0;
195 sel_flow->backlog = 0;
196 }
197 sel_flow->qlen++;
198 sel_flow->backlog += pkt_len;
199 return NET_XMIT_SUCCESS;
200 }
201 out:
202 q->stats.dropped++;
203 sel_flow->vars.accu_prob = 0;
204 qdisc_drop_reason(skb, sch, to_free, reason);
205 return NET_XMIT_CN;
206 }
207
208 static const struct netlink_range_validation fq_pie_q_range = {
209 .min = 1,
210 .max = 1 << 20,
211 };
212
213 static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
214 [TCA_FQ_PIE_LIMIT] = {.type = NLA_U32},
215 [TCA_FQ_PIE_FLOWS] = {.type = NLA_U32},
216 [TCA_FQ_PIE_TARGET] = {.type = NLA_U32},
217 [TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32},
218 [TCA_FQ_PIE_ALPHA] = {.type = NLA_U32},
219 [TCA_FQ_PIE_BETA] = {.type = NLA_U32},
220 [TCA_FQ_PIE_QUANTUM] =
221 NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range),
222 [TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32},
223 [TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32},
224 [TCA_FQ_PIE_ECN] = {.type = NLA_U32},
225 [TCA_FQ_PIE_BYTEMODE] = {.type = NLA_U32},
226 [TCA_FQ_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32},
227 };
228
dequeue_head(struct fq_pie_flow * flow)229 static inline struct sk_buff *dequeue_head(struct fq_pie_flow *flow)
230 {
231 struct sk_buff *skb = flow->head;
232
233 flow->head = skb->next;
234 skb->next = NULL;
235 return skb;
236 }
237
fq_pie_qdisc_dequeue(struct Qdisc * sch)238 static struct sk_buff *fq_pie_qdisc_dequeue(struct Qdisc *sch)
239 {
240 struct fq_pie_sched_data *q = qdisc_priv(sch);
241 struct sk_buff *skb = NULL;
242 struct fq_pie_flow *flow;
243 struct list_head *head;
244 u32 pkt_len;
245
246 begin:
247 head = &q->new_flows;
248 if (list_empty(head)) {
249 head = &q->old_flows;
250 if (list_empty(head))
251 return NULL;
252 }
253
254 flow = list_first_entry(head, struct fq_pie_flow, flowchain);
255 /* Flow has exhausted all its credits */
256 if (flow->deficit <= 0) {
257 flow->deficit += q->quantum;
258 list_move_tail(&flow->flowchain, &q->old_flows);
259 goto begin;
260 }
261
262 if (flow->head) {
263 skb = dequeue_head(flow);
264 pkt_len = qdisc_pkt_len(skb);
265 sch->qstats.backlog -= pkt_len;
266 sch->q.qlen--;
267 qdisc_bstats_update(sch, skb);
268 }
269
270 if (!skb) {
271 /* force a pass through old_flows to prevent starvation */
272 if (head == &q->new_flows && !list_empty(&q->old_flows))
273 list_move_tail(&flow->flowchain, &q->old_flows);
274 else
275 list_del_init(&flow->flowchain);
276 goto begin;
277 }
278
279 flow->qlen--;
280 flow->deficit -= pkt_len;
281 flow->backlog -= pkt_len;
282 q->memory_usage -= get_pie_cb(skb)->mem_usage;
283 pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog);
284 return skb;
285 }
286
fq_pie_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)287 static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
288 struct netlink_ext_ack *extack)
289 {
290 struct fq_pie_sched_data *q = qdisc_priv(sch);
291 struct nlattr *tb[TCA_FQ_PIE_MAX + 1];
292 unsigned int len_dropped = 0;
293 unsigned int num_dropped = 0;
294 int err;
295
296 err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack);
297 if (err < 0)
298 return err;
299
300 sch_tree_lock(sch);
301 if (tb[TCA_FQ_PIE_LIMIT]) {
302 u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]);
303
304 WRITE_ONCE(q->p_params.limit, limit);
305 WRITE_ONCE(sch->limit, limit);
306 }
307 if (tb[TCA_FQ_PIE_FLOWS]) {
308 if (q->flows) {
309 NL_SET_ERR_MSG_MOD(extack,
310 "Number of flows cannot be changed");
311 goto flow_error;
312 }
313 q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
314 if (!q->flows_cnt || q->flows_cnt > 65536) {
315 NL_SET_ERR_MSG_MOD(extack,
316 "Number of flows must range in [1..65536]");
317 goto flow_error;
318 }
319 }
320
321 /* convert from microseconds to pschedtime */
322 if (tb[TCA_FQ_PIE_TARGET]) {
323 /* target is in us */
324 u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]);
325
326 /* convert to pschedtime */
327 WRITE_ONCE(q->p_params.target,
328 PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC));
329 }
330
331 /* tupdate is in jiffies */
332 if (tb[TCA_FQ_PIE_TUPDATE])
333 WRITE_ONCE(q->p_params.tupdate,
334 usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE])));
335
336 if (tb[TCA_FQ_PIE_ALPHA])
337 WRITE_ONCE(q->p_params.alpha,
338 nla_get_u32(tb[TCA_FQ_PIE_ALPHA]));
339
340 if (tb[TCA_FQ_PIE_BETA])
341 WRITE_ONCE(q->p_params.beta,
342 nla_get_u32(tb[TCA_FQ_PIE_BETA]));
343
344 if (tb[TCA_FQ_PIE_QUANTUM])
345 WRITE_ONCE(q->quantum, nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]));
346
347 if (tb[TCA_FQ_PIE_MEMORY_LIMIT])
348 WRITE_ONCE(q->memory_limit,
349 nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]));
350
351 if (tb[TCA_FQ_PIE_ECN_PROB])
352 WRITE_ONCE(q->ecn_prob,
353 nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]));
354
355 if (tb[TCA_FQ_PIE_ECN])
356 WRITE_ONCE(q->p_params.ecn,
357 nla_get_u32(tb[TCA_FQ_PIE_ECN]));
358
359 if (tb[TCA_FQ_PIE_BYTEMODE])
360 WRITE_ONCE(q->p_params.bytemode,
361 nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]));
362
363 if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR])
364 WRITE_ONCE(q->p_params.dq_rate_estimator,
365 nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]));
366
367 /* Drop excess packets if new limit is lower */
368 while (sch->q.qlen > sch->limit) {
369 struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
370
371 len_dropped += qdisc_pkt_len(skb);
372 num_dropped += 1;
373 rtnl_kfree_skbs(skb, skb);
374 }
375 qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
376
377 sch_tree_unlock(sch);
378 return 0;
379
380 flow_error:
381 sch_tree_unlock(sch);
382 return -EINVAL;
383 }
384
fq_pie_timer(struct timer_list * t)385 static void fq_pie_timer(struct timer_list *t)
386 {
387 struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
388 unsigned long next, tupdate;
389 struct Qdisc *sch = q->sch;
390 spinlock_t *root_lock; /* to lock qdisc for probability calculations */
391 int max_cnt, i;
392
393 rcu_read_lock();
394 root_lock = qdisc_lock(qdisc_root_sleeping(sch));
395 spin_lock(root_lock);
396
397 /* Limit this expensive loop to 2048 flows per round. */
398 max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048);
399 for (i = 0; i < max_cnt; i++) {
400 pie_calculate_probability(&q->p_params,
401 &q->flows[q->flows_cursor].vars,
402 q->flows[q->flows_cursor].backlog);
403 q->flows_cursor++;
404 }
405
406 tupdate = q->p_params.tupdate;
407 next = 0;
408 if (q->flows_cursor >= q->flows_cnt) {
409 q->flows_cursor = 0;
410 next = tupdate;
411 }
412 if (tupdate)
413 mod_timer(&q->adapt_timer, jiffies + next);
414 spin_unlock(root_lock);
415 rcu_read_unlock();
416 }
417
fq_pie_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)418 static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
419 struct netlink_ext_ack *extack)
420 {
421 struct fq_pie_sched_data *q = qdisc_priv(sch);
422 int err;
423 u32 idx;
424
425 pie_params_init(&q->p_params);
426 sch->limit = 10 * 1024;
427 q->p_params.limit = sch->limit;
428 q->quantum = psched_mtu(qdisc_dev(sch));
429 q->sch = sch;
430 q->ecn_prob = 10;
431 q->flows_cnt = 1024;
432 q->memory_limit = SZ_32M;
433
434 INIT_LIST_HEAD(&q->new_flows);
435 INIT_LIST_HEAD(&q->old_flows);
436 timer_setup(&q->adapt_timer, fq_pie_timer, 0);
437
438 if (opt) {
439 err = fq_pie_change(sch, opt, extack);
440
441 if (err)
442 return err;
443 }
444
445 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
446 if (err)
447 goto init_failure;
448
449 q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow),
450 GFP_KERNEL);
451 if (!q->flows) {
452 err = -ENOMEM;
453 goto init_failure;
454 }
455 for (idx = 0; idx < q->flows_cnt; idx++) {
456 struct fq_pie_flow *flow = q->flows + idx;
457
458 INIT_LIST_HEAD(&flow->flowchain);
459 pie_vars_init(&flow->vars);
460 }
461
462 mod_timer(&q->adapt_timer, jiffies + HZ / 2);
463
464 return 0;
465
466 init_failure:
467 q->flows_cnt = 0;
468
469 return err;
470 }
471
fq_pie_dump(struct Qdisc * sch,struct sk_buff * skb)472 static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb)
473 {
474 struct fq_pie_sched_data *q = qdisc_priv(sch);
475 struct nlattr *opts;
476
477 opts = nla_nest_start(skb, TCA_OPTIONS);
478 if (!opts)
479 return -EMSGSIZE;
480
481 /* convert target from pschedtime to us */
482 if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, READ_ONCE(sch->limit)) ||
483 nla_put_u32(skb, TCA_FQ_PIE_FLOWS, READ_ONCE(q->flows_cnt)) ||
484 nla_put_u32(skb, TCA_FQ_PIE_TARGET,
485 ((u32)PSCHED_TICKS2NS(READ_ONCE(q->p_params.target))) /
486 NSEC_PER_USEC) ||
487 nla_put_u32(skb, TCA_FQ_PIE_TUPDATE,
488 jiffies_to_usecs(READ_ONCE(q->p_params.tupdate))) ||
489 nla_put_u32(skb, TCA_FQ_PIE_ALPHA, READ_ONCE(q->p_params.alpha)) ||
490 nla_put_u32(skb, TCA_FQ_PIE_BETA, READ_ONCE(q->p_params.beta)) ||
491 nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, READ_ONCE(q->quantum)) ||
492 nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT,
493 READ_ONCE(q->memory_limit)) ||
494 nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, READ_ONCE(q->ecn_prob)) ||
495 nla_put_u32(skb, TCA_FQ_PIE_ECN, READ_ONCE(q->p_params.ecn)) ||
496 nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, READ_ONCE(q->p_params.bytemode)) ||
497 nla_put_u32(skb, TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
498 READ_ONCE(q->p_params.dq_rate_estimator)))
499 goto nla_put_failure;
500
501 return nla_nest_end(skb, opts);
502
503 nla_put_failure:
504 nla_nest_cancel(skb, opts);
505 return -EMSGSIZE;
506 }
507
fq_pie_dump_stats(struct Qdisc * sch,struct gnet_dump * d)508 static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
509 {
510 struct fq_pie_sched_data *q = qdisc_priv(sch);
511 struct tc_fq_pie_xstats st = {
512 .packets_in = q->stats.packets_in,
513 .overlimit = q->stats.overlimit,
514 .overmemory = q->overmemory,
515 .dropped = q->stats.dropped,
516 .ecn_mark = q->stats.ecn_mark,
517 .new_flow_count = q->new_flow_count,
518 .memory_usage = q->memory_usage,
519 };
520 struct list_head *pos;
521
522 sch_tree_lock(sch);
523 list_for_each(pos, &q->new_flows)
524 st.new_flows_len++;
525
526 list_for_each(pos, &q->old_flows)
527 st.old_flows_len++;
528 sch_tree_unlock(sch);
529
530 return gnet_stats_copy_app(d, &st, sizeof(st));
531 }
532
fq_pie_reset(struct Qdisc * sch)533 static void fq_pie_reset(struct Qdisc *sch)
534 {
535 struct fq_pie_sched_data *q = qdisc_priv(sch);
536 u32 idx;
537
538 INIT_LIST_HEAD(&q->new_flows);
539 INIT_LIST_HEAD(&q->old_flows);
540 for (idx = 0; idx < q->flows_cnt; idx++) {
541 struct fq_pie_flow *flow = q->flows + idx;
542
543 /* Removes all packets from flow */
544 rtnl_kfree_skbs(flow->head, flow->tail);
545 flow->head = NULL;
546
547 INIT_LIST_HEAD(&flow->flowchain);
548 pie_vars_init(&flow->vars);
549 }
550 }
551
fq_pie_destroy(struct Qdisc * sch)552 static void fq_pie_destroy(struct Qdisc *sch)
553 {
554 struct fq_pie_sched_data *q = qdisc_priv(sch);
555
556 tcf_block_put(q->block);
557 q->p_params.tupdate = 0;
558 timer_delete_sync(&q->adapt_timer);
559 kvfree(q->flows);
560 }
561
562 static struct Qdisc_ops fq_pie_qdisc_ops __read_mostly = {
563 .id = "fq_pie",
564 .priv_size = sizeof(struct fq_pie_sched_data),
565 .enqueue = fq_pie_qdisc_enqueue,
566 .dequeue = fq_pie_qdisc_dequeue,
567 .peek = qdisc_peek_dequeued,
568 .init = fq_pie_init,
569 .destroy = fq_pie_destroy,
570 .reset = fq_pie_reset,
571 .change = fq_pie_change,
572 .dump = fq_pie_dump,
573 .dump_stats = fq_pie_dump_stats,
574 .owner = THIS_MODULE,
575 };
576 MODULE_ALIAS_NET_SCH("fq_pie");
577
fq_pie_module_init(void)578 static int __init fq_pie_module_init(void)
579 {
580 return register_qdisc(&fq_pie_qdisc_ops);
581 }
582
fq_pie_module_exit(void)583 static void __exit fq_pie_module_exit(void)
584 {
585 unregister_qdisc(&fq_pie_qdisc_ops);
586 }
587
588 module_init(fq_pie_module_init);
589 module_exit(fq_pie_module_exit);
590
591 MODULE_DESCRIPTION("Flow Queue Proportional Integral controller Enhanced (FQ-PIE)");
592 MODULE_AUTHOR("Mohit P. Tahiliani");
593 MODULE_LICENSE("GPL");
594