1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/sched/sch_netem.c Network emulator
4 *
5 * Many of the algorithms and ideas for this came from
6 * NIST Net which is not copyrighted.
7 *
8 * Authors: Stephen Hemminger <shemminger@osdl.org>
9 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
10 */
11
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/vmalloc.h>
20 #include <linux/prandom.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/reciprocal_div.h>
23 #include <linux/rbtree.h>
24
25 #include <net/gso.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
28 #include <net/inet_ecn.h>
29
30 #define VERSION "1.3"
31
32 /* Network Emulation Queuing algorithm.
33 ====================================
34
35 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
36 Network Emulation Tool
37 [2] Luigi Rizzo, DummyNet for FreeBSD
38
39 ----------------------------------------------------------------
40
41 This started out as a simple way to delay outgoing packets to
42 test TCP but has grown to include most of the functionality
43 of a full blown network emulator like NISTnet. It can delay
44 packets and add random jitter (and correlation). The random
45 distribution can be loaded from a table as well to provide
46 normal, Pareto, or experimental curves. Packet loss,
47 duplication, and reordering can also be emulated.
48
49 This qdisc does not do classification that can be handled in
50 layering other disciplines. It does not need to do bandwidth
51 control either since that can be handled by using token
52 bucket or other rate control.
53
54 Correlated Loss Generator models
55
56 Added generation of correlated loss according to the
57 "Gilbert-Elliot" model, a 4-state markov model.
58
59 References:
60 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
61 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
62 and intuitive loss model for packet networks and its implementation
63 in the Netem module in the Linux kernel", available in [1]
64
65 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
66 Fabio Ludovici <fabio.ludovici at yahoo.it>
67 */
68
69 struct disttable {
70 u32 size;
71 s16 table[] __counted_by(size);
72 };
73
74 struct netem_sched_data {
75 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
76 struct rb_root t_root;
77
78 /* a linear queue; reduces rbtree rebalancing when jitter is low */
79 struct sk_buff *t_head;
80 struct sk_buff *t_tail;
81
82 u32 t_len;
83
84 /* optional qdisc for classful handling (NULL at netem init) */
85 struct Qdisc *qdisc;
86
87 struct qdisc_watchdog watchdog;
88
89 s64 latency;
90 s64 jitter;
91
92 u32 loss;
93 u32 ecn;
94 u32 limit;
95 u32 counter;
96 u32 gap;
97 u32 duplicate;
98 u32 reorder;
99 u32 corrupt;
100 u64 rate;
101 s32 packet_overhead;
102 u32 cell_size;
103 struct reciprocal_value cell_size_reciprocal;
104 s32 cell_overhead;
105
106 struct crndstate {
107 u32 last;
108 u32 rho;
109 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
110
111 struct prng {
112 u64 seed;
113 struct rnd_state prng_state;
114 } prng;
115
116 struct disttable *delay_dist;
117
118 enum {
119 CLG_RANDOM,
120 CLG_4_STATES,
121 CLG_GILB_ELL,
122 } loss_model;
123
124 enum {
125 TX_IN_GAP_PERIOD = 1,
126 TX_IN_BURST_PERIOD,
127 LOST_IN_GAP_PERIOD,
128 LOST_IN_BURST_PERIOD,
129 } _4_state_model;
130
131 enum {
132 GOOD_STATE = 1,
133 BAD_STATE,
134 } GE_state_model;
135
136 /* Correlated Loss Generation models */
137 struct clgstate {
138 /* state of the Markov chain */
139 u8 state;
140
141 /* 4-states and Gilbert-Elliot models */
142 u32 a1; /* p13 for 4-states or p for GE */
143 u32 a2; /* p31 for 4-states or r for GE */
144 u32 a3; /* p32 for 4-states or h for GE */
145 u32 a4; /* p14 for 4-states or 1-k for GE */
146 u32 a5; /* p23 used only in 4-states */
147 } clg;
148
149 struct tc_netem_slot slot_config;
150 struct slotstate {
151 u64 slot_next;
152 s32 packets_left;
153 s32 bytes_left;
154 } slot;
155
156 struct disttable *slot_dist;
157 };
158
159 /* Time stamp put into socket buffer control block
160 * Only valid when skbs are in our internal t(ime)fifo queue.
161 *
162 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
163 * and skb->next & skb->prev are scratch space for a qdisc,
164 * we save skb->tstamp value in skb->cb[] before destroying it.
165 */
166 struct netem_skb_cb {
167 u64 time_to_send;
168 };
169
netem_skb_cb(struct sk_buff * skb)170 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
171 {
172 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
173 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
174 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
175 }
176
177 /* init_crandom - initialize correlated random number generator
178 * Use entropy source for initial seed.
179 */
init_crandom(struct crndstate * state,unsigned long rho)180 static void init_crandom(struct crndstate *state, unsigned long rho)
181 {
182 state->rho = rho;
183 state->last = get_random_u32();
184 }
185
186 /* get_crandom - correlated random number generator
187 * Next number depends on last value.
188 * rho is scaled to avoid floating point.
189 */
get_crandom(struct crndstate * state,struct prng * p)190 static u32 get_crandom(struct crndstate *state, struct prng *p)
191 {
192 u64 value, rho;
193 unsigned long answer;
194 struct rnd_state *s = &p->prng_state;
195
196 if (!state || state->rho == 0) /* no correlation */
197 return prandom_u32_state(s);
198
199 value = prandom_u32_state(s);
200 rho = (u64)state->rho + 1;
201 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
202 state->last = answer;
203 return answer;
204 }
205
206 /* loss_4state - 4-state model loss generator
207 * Generates losses according to the 4-state Markov chain adopted in
208 * the GI (General and Intuitive) loss model.
209 */
loss_4state(struct netem_sched_data * q)210 static bool loss_4state(struct netem_sched_data *q)
211 {
212 struct clgstate *clg = &q->clg;
213 u32 rnd = prandom_u32_state(&q->prng.prng_state);
214
215 /*
216 * Makes a comparison between rnd and the transition
217 * probabilities outgoing from the current state, then decides the
218 * next state and if the next packet has to be transmitted or lost.
219 * The four states correspond to:
220 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
221 * LOST_IN_GAP_PERIOD => isolated losses within a gap period
222 * LOST_IN_BURST_PERIOD => lost packets within a burst period
223 * TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period
224 */
225 switch (clg->state) {
226 case TX_IN_GAP_PERIOD:
227 if (rnd < clg->a4) {
228 clg->state = LOST_IN_GAP_PERIOD;
229 return true;
230 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
231 clg->state = LOST_IN_BURST_PERIOD;
232 return true;
233 } else if (clg->a1 + clg->a4 < rnd) {
234 clg->state = TX_IN_GAP_PERIOD;
235 }
236
237 break;
238 case TX_IN_BURST_PERIOD:
239 if (rnd < clg->a5) {
240 clg->state = LOST_IN_BURST_PERIOD;
241 return true;
242 } else {
243 clg->state = TX_IN_BURST_PERIOD;
244 }
245
246 break;
247 case LOST_IN_BURST_PERIOD:
248 if (rnd < clg->a3)
249 clg->state = TX_IN_BURST_PERIOD;
250 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
251 clg->state = TX_IN_GAP_PERIOD;
252 } else if (clg->a2 + clg->a3 < rnd) {
253 clg->state = LOST_IN_BURST_PERIOD;
254 return true;
255 }
256 break;
257 case LOST_IN_GAP_PERIOD:
258 clg->state = TX_IN_GAP_PERIOD;
259 break;
260 }
261
262 return false;
263 }
264
265 /* loss_gilb_ell - Gilbert-Elliot model loss generator
266 * Generates losses according to the Gilbert-Elliot loss model or
267 * its special cases (Gilbert or Simple Gilbert)
268 *
269 * Makes a comparison between random number and the transition
270 * probabilities outgoing from the current state, then decides the
271 * next state. A second random number is extracted and the comparison
272 * with the loss probability of the current state decides if the next
273 * packet will be transmitted or lost.
274 */
loss_gilb_ell(struct netem_sched_data * q)275 static bool loss_gilb_ell(struct netem_sched_data *q)
276 {
277 struct clgstate *clg = &q->clg;
278 struct rnd_state *s = &q->prng.prng_state;
279
280 switch (clg->state) {
281 case GOOD_STATE:
282 if (prandom_u32_state(s) < clg->a1)
283 clg->state = BAD_STATE;
284 if (prandom_u32_state(s) < clg->a4)
285 return true;
286 break;
287 case BAD_STATE:
288 if (prandom_u32_state(s) < clg->a2)
289 clg->state = GOOD_STATE;
290 if (prandom_u32_state(s) > clg->a3)
291 return true;
292 }
293
294 return false;
295 }
296
loss_event(struct netem_sched_data * q)297 static bool loss_event(struct netem_sched_data *q)
298 {
299 switch (q->loss_model) {
300 case CLG_RANDOM:
301 /* Random packet drop 0 => none, ~0 => all */
302 return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng);
303
304 case CLG_4_STATES:
305 /* 4state loss model algorithm (used also for GI model)
306 * Extracts a value from the markov 4 state loss generator,
307 * if it is 1 drops a packet and if needed writes the event in
308 * the kernel logs
309 */
310 return loss_4state(q);
311
312 case CLG_GILB_ELL:
313 /* Gilbert-Elliot loss model algorithm
314 * Extracts a value from the Gilbert-Elliot loss generator,
315 * if it is 1 drops a packet and if needed writes the event in
316 * the kernel logs
317 */
318 return loss_gilb_ell(q);
319 }
320
321 return false; /* not reached */
322 }
323
324
325 /* tabledist - return a pseudo-randomly distributed value with mean mu and
326 * std deviation sigma. Uses table lookup to approximate the desired
327 * distribution, and a uniformly-distributed pseudo-random source.
328 */
tabledist(s64 mu,s32 sigma,struct crndstate * state,struct prng * prng,const struct disttable * dist)329 static s64 tabledist(s64 mu, s32 sigma,
330 struct crndstate *state,
331 struct prng *prng,
332 const struct disttable *dist)
333 {
334 s64 x;
335 long t;
336 u32 rnd;
337
338 if (sigma == 0)
339 return mu;
340
341 rnd = get_crandom(state, prng);
342
343 /* default uniform distribution */
344 if (dist == NULL)
345 return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
346
347 t = dist->table[rnd % dist->size];
348 x = (sigma % NETEM_DIST_SCALE) * t;
349 if (x >= 0)
350 x += NETEM_DIST_SCALE/2;
351 else
352 x -= NETEM_DIST_SCALE/2;
353
354 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
355 }
356
packet_time_ns(u64 len,const struct netem_sched_data * q)357 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
358 {
359 len += q->packet_overhead;
360
361 if (q->cell_size) {
362 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
363
364 if (len > cells * q->cell_size) /* extra cell needed for remainder */
365 cells++;
366 len = cells * (q->cell_size + q->cell_overhead);
367 }
368
369 return div64_u64(len * NSEC_PER_SEC, q->rate);
370 }
371
tfifo_reset(struct Qdisc * sch)372 static void tfifo_reset(struct Qdisc *sch)
373 {
374 struct netem_sched_data *q = qdisc_priv(sch);
375 struct rb_node *p = rb_first(&q->t_root);
376
377 while (p) {
378 struct sk_buff *skb = rb_to_skb(p);
379
380 p = rb_next(p);
381 rb_erase(&skb->rbnode, &q->t_root);
382 rtnl_kfree_skbs(skb, skb);
383 }
384
385 rtnl_kfree_skbs(q->t_head, q->t_tail);
386 q->t_head = NULL;
387 q->t_tail = NULL;
388 q->t_len = 0;
389 }
390
tfifo_enqueue(struct sk_buff * nskb,struct Qdisc * sch)391 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
392 {
393 struct netem_sched_data *q = qdisc_priv(sch);
394 u64 tnext = netem_skb_cb(nskb)->time_to_send;
395
396 if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
397 if (q->t_tail)
398 q->t_tail->next = nskb;
399 else
400 q->t_head = nskb;
401 q->t_tail = nskb;
402 } else {
403 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
404
405 while (*p) {
406 struct sk_buff *skb;
407
408 parent = *p;
409 skb = rb_to_skb(parent);
410 if (tnext >= netem_skb_cb(skb)->time_to_send)
411 p = &parent->rb_right;
412 else
413 p = &parent->rb_left;
414 }
415 rb_link_node(&nskb->rbnode, parent, p);
416 rb_insert_color(&nskb->rbnode, &q->t_root);
417 }
418 q->t_len++;
419 sch->q.qlen++;
420 }
421
422 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
423 * when we statistically choose to corrupt one, we instead segment it, returning
424 * the first packet to be corrupted, and re-enqueue the remaining frames
425 */
netem_segment(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)426 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
427 struct sk_buff **to_free)
428 {
429 struct sk_buff *segs;
430 netdev_features_t features = netif_skb_features(skb);
431
432 qdisc_skb_cb(skb)->pkt_segs = 1;
433 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
434
435 if (IS_ERR_OR_NULL(segs)) {
436 qdisc_drop(skb, sch, to_free);
437 return NULL;
438 }
439 consume_skb(skb);
440 return segs;
441 }
442
443 /*
444 * Insert one skb into qdisc.
445 * Note: parent depends on return value to account for queue length.
446 * NET_XMIT_DROP: queue length didn't change.
447 * NET_XMIT_SUCCESS: one skb was queued.
448 */
netem_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)449 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
450 struct sk_buff **to_free)
451 {
452 struct netem_sched_data *q = qdisc_priv(sch);
453 /* We don't fill cb now as skb_unshare() may invalidate it */
454 struct netem_skb_cb *cb;
455 struct sk_buff *skb2 = NULL;
456 struct sk_buff *segs = NULL;
457 unsigned int prev_len = qdisc_pkt_len(skb);
458 int count = 1;
459
460 /* Do not fool qdisc_drop_all() */
461 skb->prev = NULL;
462
463 /* Random duplication */
464 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
465 ++count;
466
467 /* Drop packet? */
468 if (loss_event(q)) {
469 if (q->ecn && INET_ECN_set_ce(skb))
470 qdisc_qstats_drop(sch); /* mark packet */
471 else
472 --count;
473 }
474 if (count == 0) {
475 qdisc_qstats_drop(sch);
476 __qdisc_drop(skb, to_free);
477 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
478 }
479
480 /* If a delay is expected, orphan the skb. (orphaning usually takes
481 * place at TX completion time, so _before_ the link transit delay)
482 */
483 if (q->latency || q->jitter || q->rate)
484 skb_orphan_partial(skb);
485
486 /*
487 * If we need to duplicate packet, then clone it before
488 * original is modified.
489 */
490 if (count > 1)
491 skb2 = skb_clone(skb, GFP_ATOMIC);
492
493 /*
494 * Randomized packet corruption.
495 * Make copy if needed since we are modifying
496 * If packet is going to be hardware checksummed, then
497 * do it now in software before we mangle it.
498 */
499 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
500 if (skb_is_gso(skb)) {
501 skb = netem_segment(skb, sch, to_free);
502 if (!skb)
503 goto finish_segs;
504
505 segs = skb->next;
506 skb_mark_not_on_list(skb);
507 qdisc_skb_cb(skb)->pkt_len = skb->len;
508 }
509
510 skb = skb_unshare(skb, GFP_ATOMIC);
511 if (unlikely(!skb)) {
512 qdisc_qstats_drop(sch);
513 goto finish_segs;
514 }
515 if (skb->ip_summed == CHECKSUM_PARTIAL &&
516 skb_checksum_help(skb)) {
517 qdisc_drop(skb, sch, to_free);
518 skb = NULL;
519 goto finish_segs;
520 }
521
522 skb->data[get_random_u32_below(skb_headlen(skb))] ^=
523 1<<get_random_u32_below(8);
524 }
525
526 if (unlikely(q->t_len >= sch->limit)) {
527 /* re-link segs, so that qdisc_drop_all() frees them all */
528 skb->next = segs;
529 qdisc_drop_all(skb, sch, to_free);
530 if (skb2)
531 __qdisc_drop(skb2, to_free);
532 return NET_XMIT_DROP;
533 }
534
535 /*
536 * If doing duplication then re-insert at top of the
537 * qdisc tree, since parent queuer expects that only one
538 * skb will be queued.
539 */
540 if (skb2) {
541 struct Qdisc *rootq = qdisc_root_bh(sch);
542 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
543
544 q->duplicate = 0;
545 rootq->enqueue(skb2, rootq, to_free);
546 q->duplicate = dupsave;
547 skb2 = NULL;
548 }
549
550 qdisc_qstats_backlog_inc(sch, skb);
551
552 cb = netem_skb_cb(skb);
553 if (q->gap == 0 || /* not doing reordering */
554 q->counter < q->gap - 1 || /* inside last reordering gap */
555 q->reorder < get_crandom(&q->reorder_cor, &q->prng)) {
556 u64 now;
557 s64 delay;
558
559 delay = tabledist(q->latency, q->jitter,
560 &q->delay_cor, &q->prng, q->delay_dist);
561
562 now = ktime_get_ns();
563
564 if (q->rate) {
565 struct netem_skb_cb *last = NULL;
566
567 if (sch->q.tail)
568 last = netem_skb_cb(sch->q.tail);
569 if (q->t_root.rb_node) {
570 struct sk_buff *t_skb;
571 struct netem_skb_cb *t_last;
572
573 t_skb = skb_rb_last(&q->t_root);
574 t_last = netem_skb_cb(t_skb);
575 if (!last ||
576 t_last->time_to_send > last->time_to_send)
577 last = t_last;
578 }
579 if (q->t_tail) {
580 struct netem_skb_cb *t_last =
581 netem_skb_cb(q->t_tail);
582
583 if (!last ||
584 t_last->time_to_send > last->time_to_send)
585 last = t_last;
586 }
587
588 if (last) {
589 /*
590 * Last packet in queue is reference point (now),
591 * calculate this time bonus and subtract
592 * from delay.
593 */
594 delay -= last->time_to_send - now;
595 delay = max_t(s64, 0, delay);
596 now = last->time_to_send;
597 }
598
599 delay += packet_time_ns(qdisc_pkt_len(skb), q);
600 }
601
602 cb->time_to_send = now + delay;
603 ++q->counter;
604 tfifo_enqueue(skb, sch);
605 } else {
606 /*
607 * Do re-ordering by putting one out of N packets at the front
608 * of the queue.
609 */
610 cb->time_to_send = ktime_get_ns();
611 q->counter = 0;
612
613 __qdisc_enqueue_head(skb, &sch->q);
614 sch->qstats.requeues++;
615 }
616
617 finish_segs:
618 if (skb2)
619 __qdisc_drop(skb2, to_free);
620
621 if (segs) {
622 unsigned int len, last_len;
623 int rc, nb;
624
625 len = skb ? skb->len : 0;
626 nb = skb ? 1 : 0;
627
628 while (segs) {
629 skb2 = segs->next;
630 skb_mark_not_on_list(segs);
631 qdisc_skb_cb(segs)->pkt_len = segs->len;
632 last_len = segs->len;
633 rc = qdisc_enqueue(segs, sch, to_free);
634 if (rc != NET_XMIT_SUCCESS) {
635 if (net_xmit_drop_count(rc))
636 qdisc_qstats_drop(sch);
637 } else {
638 nb++;
639 len += last_len;
640 }
641 segs = skb2;
642 }
643 /* Parent qdiscs accounted for 1 skb of size @prev_len */
644 qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
645 } else if (!skb) {
646 return NET_XMIT_DROP;
647 }
648 return NET_XMIT_SUCCESS;
649 }
650
651 /* Delay the next round with a new future slot with a
652 * correct number of bytes and packets.
653 */
654
get_slot_next(struct netem_sched_data * q,u64 now)655 static void get_slot_next(struct netem_sched_data *q, u64 now)
656 {
657 s64 next_delay;
658
659 if (!q->slot_dist)
660 next_delay = q->slot_config.min_delay +
661 (get_random_u32() *
662 (q->slot_config.max_delay -
663 q->slot_config.min_delay) >> 32);
664 else
665 next_delay = tabledist(q->slot_config.dist_delay,
666 (s32)(q->slot_config.dist_jitter),
667 NULL, &q->prng, q->slot_dist);
668
669 q->slot.slot_next = now + next_delay;
670 q->slot.packets_left = q->slot_config.max_packets;
671 q->slot.bytes_left = q->slot_config.max_bytes;
672 }
673
netem_peek(struct netem_sched_data * q)674 static struct sk_buff *netem_peek(struct netem_sched_data *q)
675 {
676 struct sk_buff *skb = skb_rb_first(&q->t_root);
677 u64 t1, t2;
678
679 if (!skb)
680 return q->t_head;
681 if (!q->t_head)
682 return skb;
683
684 t1 = netem_skb_cb(skb)->time_to_send;
685 t2 = netem_skb_cb(q->t_head)->time_to_send;
686 if (t1 < t2)
687 return skb;
688 return q->t_head;
689 }
690
netem_erase_head(struct netem_sched_data * q,struct sk_buff * skb)691 static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
692 {
693 if (skb == q->t_head) {
694 q->t_head = skb->next;
695 if (!q->t_head)
696 q->t_tail = NULL;
697 } else {
698 rb_erase(&skb->rbnode, &q->t_root);
699 }
700 }
701
netem_dequeue(struct Qdisc * sch)702 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
703 {
704 struct netem_sched_data *q = qdisc_priv(sch);
705 struct sk_buff *skb;
706
707 tfifo_dequeue:
708 skb = __qdisc_dequeue_head(&sch->q);
709 if (skb) {
710 deliver:
711 qdisc_qstats_backlog_dec(sch, skb);
712 qdisc_bstats_update(sch, skb);
713 return skb;
714 }
715 skb = netem_peek(q);
716 if (skb) {
717 u64 time_to_send;
718 u64 now = ktime_get_ns();
719
720 /* if more time remaining? */
721 time_to_send = netem_skb_cb(skb)->time_to_send;
722 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
723 get_slot_next(q, now);
724
725 if (time_to_send <= now && q->slot.slot_next <= now) {
726 netem_erase_head(q, skb);
727 q->t_len--;
728 skb->next = NULL;
729 skb->prev = NULL;
730 /* skb->dev shares skb->rbnode area,
731 * we need to restore its value.
732 */
733 skb->dev = qdisc_dev(sch);
734
735 if (q->slot.slot_next) {
736 q->slot.packets_left--;
737 q->slot.bytes_left -= qdisc_pkt_len(skb);
738 if (q->slot.packets_left <= 0 ||
739 q->slot.bytes_left <= 0)
740 get_slot_next(q, now);
741 }
742
743 if (q->qdisc) {
744 unsigned int pkt_len = qdisc_pkt_len(skb);
745 struct sk_buff *to_free = NULL;
746 int err;
747
748 err = qdisc_enqueue(skb, q->qdisc, &to_free);
749 kfree_skb_list(to_free);
750 if (err != NET_XMIT_SUCCESS) {
751 if (net_xmit_drop_count(err))
752 qdisc_qstats_drop(sch);
753 sch->qstats.backlog -= pkt_len;
754 sch->q.qlen--;
755 qdisc_tree_reduce_backlog(sch, 1, pkt_len);
756 }
757 goto tfifo_dequeue;
758 }
759 sch->q.qlen--;
760 goto deliver;
761 }
762
763 if (q->qdisc) {
764 skb = q->qdisc->ops->dequeue(q->qdisc);
765 if (skb) {
766 sch->q.qlen--;
767 goto deliver;
768 }
769 }
770
771 qdisc_watchdog_schedule_ns(&q->watchdog,
772 max(time_to_send,
773 q->slot.slot_next));
774 }
775
776 if (q->qdisc) {
777 skb = q->qdisc->ops->dequeue(q->qdisc);
778 if (skb) {
779 sch->q.qlen--;
780 goto deliver;
781 }
782 }
783 return NULL;
784 }
785
netem_reset(struct Qdisc * sch)786 static void netem_reset(struct Qdisc *sch)
787 {
788 struct netem_sched_data *q = qdisc_priv(sch);
789
790 qdisc_reset_queue(sch);
791 tfifo_reset(sch);
792 if (q->qdisc)
793 qdisc_reset(q->qdisc);
794 qdisc_watchdog_cancel(&q->watchdog);
795 }
796
dist_free(struct disttable * d)797 static void dist_free(struct disttable *d)
798 {
799 kvfree(d);
800 }
801
802 /*
803 * Distribution data is a variable size payload containing
804 * signed 16 bit values.
805 */
806
get_dist_table(struct disttable ** tbl,const struct nlattr * attr)807 static int get_dist_table(struct disttable **tbl, const struct nlattr *attr)
808 {
809 size_t n = nla_len(attr)/sizeof(__s16);
810 const __s16 *data = nla_data(attr);
811 struct disttable *d;
812 int i;
813
814 if (!n || n > NETEM_DIST_MAX)
815 return -EINVAL;
816
817 d = kvmalloc_flex(*d, table, n);
818 if (!d)
819 return -ENOMEM;
820
821 d->size = n;
822 for (i = 0; i < n; i++)
823 d->table[i] = data[i];
824
825 *tbl = d;
826 return 0;
827 }
828
get_slot(struct netem_sched_data * q,const struct nlattr * attr)829 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
830 {
831 const struct tc_netem_slot *c = nla_data(attr);
832
833 q->slot_config = *c;
834 if (q->slot_config.max_packets == 0)
835 q->slot_config.max_packets = INT_MAX;
836 if (q->slot_config.max_bytes == 0)
837 q->slot_config.max_bytes = INT_MAX;
838
839 /* capping dist_jitter to the range acceptable by tabledist() */
840 q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
841
842 q->slot.packets_left = q->slot_config.max_packets;
843 q->slot.bytes_left = q->slot_config.max_bytes;
844 if (q->slot_config.min_delay | q->slot_config.max_delay |
845 q->slot_config.dist_jitter)
846 q->slot.slot_next = ktime_get_ns();
847 else
848 q->slot.slot_next = 0;
849 }
850
get_correlation(struct netem_sched_data * q,const struct nlattr * attr)851 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
852 {
853 const struct tc_netem_corr *c = nla_data(attr);
854
855 init_crandom(&q->delay_cor, c->delay_corr);
856 init_crandom(&q->loss_cor, c->loss_corr);
857 init_crandom(&q->dup_cor, c->dup_corr);
858 }
859
get_reorder(struct netem_sched_data * q,const struct nlattr * attr)860 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
861 {
862 const struct tc_netem_reorder *r = nla_data(attr);
863
864 q->reorder = r->probability;
865 init_crandom(&q->reorder_cor, r->correlation);
866 }
867
get_corrupt(struct netem_sched_data * q,const struct nlattr * attr)868 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
869 {
870 const struct tc_netem_corrupt *r = nla_data(attr);
871
872 q->corrupt = r->probability;
873 init_crandom(&q->corrupt_cor, r->correlation);
874 }
875
get_rate(struct netem_sched_data * q,const struct nlattr * attr)876 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
877 {
878 const struct tc_netem_rate *r = nla_data(attr);
879
880 q->rate = r->rate;
881 q->packet_overhead = r->packet_overhead;
882 q->cell_size = r->cell_size;
883 q->cell_overhead = r->cell_overhead;
884 if (q->cell_size)
885 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
886 else
887 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
888 }
889
get_loss_clg(struct netem_sched_data * q,const struct nlattr * attr)890 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
891 {
892 const struct nlattr *la;
893 int rem;
894
895 nla_for_each_nested(la, attr, rem) {
896 u16 type = nla_type(la);
897
898 switch (type) {
899 case NETEM_LOSS_GI: {
900 const struct tc_netem_gimodel *gi = nla_data(la);
901
902 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
903 pr_info("netem: incorrect gi model size\n");
904 return -EINVAL;
905 }
906
907 q->loss_model = CLG_4_STATES;
908
909 q->clg.state = TX_IN_GAP_PERIOD;
910 q->clg.a1 = gi->p13;
911 q->clg.a2 = gi->p31;
912 q->clg.a3 = gi->p32;
913 q->clg.a4 = gi->p14;
914 q->clg.a5 = gi->p23;
915 break;
916 }
917
918 case NETEM_LOSS_GE: {
919 const struct tc_netem_gemodel *ge = nla_data(la);
920
921 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
922 pr_info("netem: incorrect ge model size\n");
923 return -EINVAL;
924 }
925
926 q->loss_model = CLG_GILB_ELL;
927 q->clg.state = GOOD_STATE;
928 q->clg.a1 = ge->p;
929 q->clg.a2 = ge->r;
930 q->clg.a3 = ge->h;
931 q->clg.a4 = ge->k1;
932 break;
933 }
934
935 default:
936 pr_info("netem: unknown loss type %u\n", type);
937 return -EINVAL;
938 }
939 }
940
941 return 0;
942 }
943
944 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
945 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
946 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
947 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
948 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
949 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
950 [TCA_NETEM_ECN] = { .type = NLA_U32 },
951 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
952 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
953 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
954 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
955 [TCA_NETEM_PRNG_SEED] = { .type = NLA_U64 },
956 };
957
parse_attr(struct nlattr * tb[],int maxtype,struct nlattr * nla,const struct nla_policy * policy,int len)958 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
959 const struct nla_policy *policy, int len)
960 {
961 int nested_len = nla_len(nla) - NLA_ALIGN(len);
962
963 if (nested_len < 0) {
964 pr_info("netem: invalid attributes len %d\n", nested_len);
965 return -EINVAL;
966 }
967
968 if (nested_len >= nla_attr_size(0))
969 return nla_parse_deprecated(tb, maxtype,
970 nla_data(nla) + NLA_ALIGN(len),
971 nested_len, policy, NULL);
972
973 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
974 return 0;
975 }
976
977 static const struct Qdisc_class_ops netem_class_ops;
978
check_netem_in_tree(struct Qdisc * sch,bool duplicates,struct netlink_ext_ack * extack)979 static int check_netem_in_tree(struct Qdisc *sch, bool duplicates,
980 struct netlink_ext_ack *extack)
981 {
982 struct Qdisc *root, *q;
983 unsigned int i;
984
985 root = qdisc_root_sleeping(sch);
986
987 if (sch != root && root->ops->cl_ops == &netem_class_ops) {
988 if (duplicates ||
989 ((struct netem_sched_data *)qdisc_priv(root))->duplicate)
990 goto err;
991 }
992
993 if (!qdisc_dev(root))
994 return 0;
995
996 hash_for_each(qdisc_dev(root)->qdisc_hash, i, q, hash) {
997 if (sch != q && q->ops->cl_ops == &netem_class_ops) {
998 if (duplicates ||
999 ((struct netem_sched_data *)qdisc_priv(q))->duplicate)
1000 goto err;
1001 }
1002 }
1003
1004 return 0;
1005
1006 err:
1007 NL_SET_ERR_MSG(extack,
1008 "netem: cannot mix duplicating netems with other netems in tree");
1009 return -EINVAL;
1010 }
1011
1012 /* Parse netlink message to set options */
netem_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1013 static int netem_change(struct Qdisc *sch, struct nlattr *opt,
1014 struct netlink_ext_ack *extack)
1015 {
1016 struct netem_sched_data *q = qdisc_priv(sch);
1017 struct nlattr *tb[TCA_NETEM_MAX + 1];
1018 struct disttable *delay_dist = NULL;
1019 struct disttable *slot_dist = NULL;
1020 struct tc_netem_qopt *qopt;
1021 struct clgstate old_clg;
1022 int old_loss_model = CLG_RANDOM;
1023 int ret;
1024
1025 qopt = nla_data(opt);
1026 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
1027 if (ret < 0)
1028 return ret;
1029
1030 if (tb[TCA_NETEM_DELAY_DIST]) {
1031 ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]);
1032 if (ret)
1033 goto table_free;
1034 }
1035
1036 if (tb[TCA_NETEM_SLOT_DIST]) {
1037 ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]);
1038 if (ret)
1039 goto table_free;
1040 }
1041
1042 sch_tree_lock(sch);
1043 /* backup q->clg and q->loss_model */
1044 old_clg = q->clg;
1045 old_loss_model = q->loss_model;
1046
1047 if (tb[TCA_NETEM_LOSS]) {
1048 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
1049 if (ret) {
1050 q->loss_model = old_loss_model;
1051 q->clg = old_clg;
1052 goto unlock;
1053 }
1054 } else {
1055 q->loss_model = CLG_RANDOM;
1056 }
1057
1058 if (delay_dist)
1059 swap(q->delay_dist, delay_dist);
1060 if (slot_dist)
1061 swap(q->slot_dist, slot_dist);
1062 sch->limit = qopt->limit;
1063
1064 q->latency = PSCHED_TICKS2NS(qopt->latency);
1065 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1066 q->limit = qopt->limit;
1067 q->gap = qopt->gap;
1068 q->counter = 0;
1069 q->loss = qopt->loss;
1070
1071 ret = check_netem_in_tree(sch, qopt->duplicate, extack);
1072 if (ret)
1073 goto unlock;
1074
1075 q->duplicate = qopt->duplicate;
1076
1077 /* for compatibility with earlier versions.
1078 * if gap is set, need to assume 100% probability
1079 */
1080 if (q->gap)
1081 q->reorder = ~0;
1082
1083 if (tb[TCA_NETEM_CORR])
1084 get_correlation(q, tb[TCA_NETEM_CORR]);
1085
1086 if (tb[TCA_NETEM_REORDER])
1087 get_reorder(q, tb[TCA_NETEM_REORDER]);
1088
1089 if (tb[TCA_NETEM_CORRUPT])
1090 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1091
1092 if (tb[TCA_NETEM_RATE])
1093 get_rate(q, tb[TCA_NETEM_RATE]);
1094
1095 if (tb[TCA_NETEM_RATE64])
1096 q->rate = max_t(u64, q->rate,
1097 nla_get_u64(tb[TCA_NETEM_RATE64]));
1098
1099 if (tb[TCA_NETEM_LATENCY64])
1100 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1101
1102 if (tb[TCA_NETEM_JITTER64])
1103 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1104
1105 if (tb[TCA_NETEM_ECN])
1106 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1107
1108 if (tb[TCA_NETEM_SLOT])
1109 get_slot(q, tb[TCA_NETEM_SLOT]);
1110
1111 /* capping jitter to the range acceptable by tabledist() */
1112 q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
1113
1114 if (tb[TCA_NETEM_PRNG_SEED])
1115 q->prng.seed = nla_get_u64(tb[TCA_NETEM_PRNG_SEED]);
1116 else
1117 q->prng.seed = get_random_u64();
1118 prandom_seed_state(&q->prng.prng_state, q->prng.seed);
1119
1120 unlock:
1121 sch_tree_unlock(sch);
1122
1123 table_free:
1124 dist_free(delay_dist);
1125 dist_free(slot_dist);
1126 return ret;
1127 }
1128
netem_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1129 static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1130 struct netlink_ext_ack *extack)
1131 {
1132 struct netem_sched_data *q = qdisc_priv(sch);
1133 int ret;
1134
1135 qdisc_watchdog_init(&q->watchdog, sch);
1136
1137 if (!opt)
1138 return -EINVAL;
1139
1140 q->loss_model = CLG_RANDOM;
1141 ret = netem_change(sch, opt, extack);
1142 if (ret)
1143 pr_info("netem: change failed\n");
1144 return ret;
1145 }
1146
netem_destroy(struct Qdisc * sch)1147 static void netem_destroy(struct Qdisc *sch)
1148 {
1149 struct netem_sched_data *q = qdisc_priv(sch);
1150
1151 qdisc_watchdog_cancel(&q->watchdog);
1152 if (q->qdisc)
1153 qdisc_put(q->qdisc);
1154 dist_free(q->delay_dist);
1155 dist_free(q->slot_dist);
1156 }
1157
dump_loss_model(const struct netem_sched_data * q,struct sk_buff * skb)1158 static int dump_loss_model(const struct netem_sched_data *q,
1159 struct sk_buff *skb)
1160 {
1161 struct nlattr *nest;
1162
1163 nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
1164 if (nest == NULL)
1165 goto nla_put_failure;
1166
1167 switch (q->loss_model) {
1168 case CLG_RANDOM:
1169 /* legacy loss model */
1170 nla_nest_cancel(skb, nest);
1171 return 0; /* no data */
1172
1173 case CLG_4_STATES: {
1174 struct tc_netem_gimodel gi = {
1175 .p13 = q->clg.a1,
1176 .p31 = q->clg.a2,
1177 .p32 = q->clg.a3,
1178 .p14 = q->clg.a4,
1179 .p23 = q->clg.a5,
1180 };
1181
1182 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1183 goto nla_put_failure;
1184 break;
1185 }
1186 case CLG_GILB_ELL: {
1187 struct tc_netem_gemodel ge = {
1188 .p = q->clg.a1,
1189 .r = q->clg.a2,
1190 .h = q->clg.a3,
1191 .k1 = q->clg.a4,
1192 };
1193
1194 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1195 goto nla_put_failure;
1196 break;
1197 }
1198 }
1199
1200 nla_nest_end(skb, nest);
1201 return 0;
1202
1203 nla_put_failure:
1204 nla_nest_cancel(skb, nest);
1205 return -1;
1206 }
1207
netem_dump(struct Qdisc * sch,struct sk_buff * skb)1208 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1209 {
1210 const struct netem_sched_data *q = qdisc_priv(sch);
1211 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1212 struct tc_netem_qopt qopt;
1213 struct tc_netem_corr cor;
1214 struct tc_netem_reorder reorder;
1215 struct tc_netem_corrupt corrupt;
1216 struct tc_netem_rate rate;
1217 struct tc_netem_slot slot;
1218
1219 qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
1220 UINT_MAX);
1221 qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
1222 UINT_MAX);
1223 qopt.limit = q->limit;
1224 qopt.loss = q->loss;
1225 qopt.gap = q->gap;
1226 qopt.duplicate = q->duplicate;
1227 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1228 goto nla_put_failure;
1229
1230 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1231 goto nla_put_failure;
1232
1233 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1234 goto nla_put_failure;
1235
1236 cor.delay_corr = q->delay_cor.rho;
1237 cor.loss_corr = q->loss_cor.rho;
1238 cor.dup_corr = q->dup_cor.rho;
1239 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1240 goto nla_put_failure;
1241
1242 reorder.probability = q->reorder;
1243 reorder.correlation = q->reorder_cor.rho;
1244 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1245 goto nla_put_failure;
1246
1247 corrupt.probability = q->corrupt;
1248 corrupt.correlation = q->corrupt_cor.rho;
1249 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1250 goto nla_put_failure;
1251
1252 if (q->rate >= (1ULL << 32)) {
1253 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1254 TCA_NETEM_PAD))
1255 goto nla_put_failure;
1256 rate.rate = ~0U;
1257 } else {
1258 rate.rate = q->rate;
1259 }
1260 rate.packet_overhead = q->packet_overhead;
1261 rate.cell_size = q->cell_size;
1262 rate.cell_overhead = q->cell_overhead;
1263 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1264 goto nla_put_failure;
1265
1266 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1267 goto nla_put_failure;
1268
1269 if (dump_loss_model(q, skb) != 0)
1270 goto nla_put_failure;
1271
1272 if (q->slot_config.min_delay | q->slot_config.max_delay |
1273 q->slot_config.dist_jitter) {
1274 slot = q->slot_config;
1275 if (slot.max_packets == INT_MAX)
1276 slot.max_packets = 0;
1277 if (slot.max_bytes == INT_MAX)
1278 slot.max_bytes = 0;
1279 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1280 goto nla_put_failure;
1281 }
1282
1283 if (nla_put_u64_64bit(skb, TCA_NETEM_PRNG_SEED, q->prng.seed,
1284 TCA_NETEM_PAD))
1285 goto nla_put_failure;
1286
1287 return nla_nest_end(skb, nla);
1288
1289 nla_put_failure:
1290 nlmsg_trim(skb, nla);
1291 return -1;
1292 }
1293
netem_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)1294 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1295 struct sk_buff *skb, struct tcmsg *tcm)
1296 {
1297 struct netem_sched_data *q = qdisc_priv(sch);
1298
1299 if (cl != 1 || !q->qdisc) /* only one class */
1300 return -ENOENT;
1301
1302 tcm->tcm_handle |= TC_H_MIN(1);
1303 tcm->tcm_info = q->qdisc->handle;
1304
1305 return 0;
1306 }
1307
netem_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)1308 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1309 struct Qdisc **old, struct netlink_ext_ack *extack)
1310 {
1311 struct netem_sched_data *q = qdisc_priv(sch);
1312
1313 *old = qdisc_replace(sch, new, &q->qdisc);
1314 return 0;
1315 }
1316
netem_leaf(struct Qdisc * sch,unsigned long arg)1317 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1318 {
1319 struct netem_sched_data *q = qdisc_priv(sch);
1320 return q->qdisc;
1321 }
1322
netem_find(struct Qdisc * sch,u32 classid)1323 static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1324 {
1325 return 1;
1326 }
1327
netem_walk(struct Qdisc * sch,struct qdisc_walker * walker)1328 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1329 {
1330 if (!walker->stop) {
1331 if (!tc_qdisc_stats_dump(sch, 1, walker))
1332 return;
1333 }
1334 }
1335
1336 static const struct Qdisc_class_ops netem_class_ops = {
1337 .graft = netem_graft,
1338 .leaf = netem_leaf,
1339 .find = netem_find,
1340 .walk = netem_walk,
1341 .dump = netem_dump_class,
1342 };
1343
1344 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1345 .id = "netem",
1346 .cl_ops = &netem_class_ops,
1347 .priv_size = sizeof(struct netem_sched_data),
1348 .enqueue = netem_enqueue,
1349 .dequeue = netem_dequeue,
1350 .peek = qdisc_peek_dequeued,
1351 .init = netem_init,
1352 .reset = netem_reset,
1353 .destroy = netem_destroy,
1354 .change = netem_change,
1355 .dump = netem_dump,
1356 .owner = THIS_MODULE,
1357 };
1358 MODULE_ALIAS_NET_SCH("netem");
1359
1360
netem_module_init(void)1361 static int __init netem_module_init(void)
1362 {
1363 pr_info("netem: version " VERSION "\n");
1364 return register_qdisc(&netem_qdisc_ops);
1365 }
netem_module_exit(void)1366 static void __exit netem_module_exit(void)
1367 {
1368 unregister_qdisc(&netem_qdisc_ops);
1369 }
1370 module_init(netem_module_init)
1371 module_exit(netem_module_exit)
1372 MODULE_LICENSE("GPL");
1373 MODULE_DESCRIPTION("Network characteristics emulator qdisc");
1374