xref: /linux/net/sched/sch_netem.c (revision d64cb81dcbd54927515a7f65e5e24affdc73c14b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sched/sch_netem.c	Network emulator
4  *
5  *  		Many of the algorithms and ideas for this came from
6  *		NIST Net which is not copyrighted.
7  *
8  * Authors:	Stephen Hemminger <shemminger@osdl.org>
9  *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/vmalloc.h>
20 #include <linux/prandom.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/reciprocal_div.h>
23 #include <linux/rbtree.h>
24 
25 #include <net/gso.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
28 #include <net/inet_ecn.h>
29 
30 #define VERSION "1.3"
31 
32 /*	Network Emulation Queuing algorithm.
33 	====================================
34 
35 	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
36 		 Network Emulation Tool
37 		 [2] Luigi Rizzo, DummyNet for FreeBSD
38 
39 	 ----------------------------------------------------------------
40 
41 	 This started out as a simple way to delay outgoing packets to
42 	 test TCP but has grown to include most of the functionality
43 	 of a full blown network emulator like NISTnet. It can delay
44 	 packets and add random jitter (and correlation). The random
45 	 distribution can be loaded from a table as well to provide
46 	 normal, Pareto, or experimental curves. Packet loss,
47 	 duplication, and reordering can also be emulated.
48 
49 	 This qdisc does not do classification that can be handled in
50 	 layering other disciplines.  It does not need to do bandwidth
51 	 control either since that can be handled by using token
52 	 bucket or other rate control.
53 
54      Correlated Loss Generator models
55 
56 	Added generation of correlated loss according to the
57 	"Gilbert-Elliot" model, a 4-state markov model.
58 
59 	References:
60 	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
61 	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
62 	and intuitive loss model for packet networks and its implementation
63 	in the Netem module in the Linux kernel", available in [1]
64 
65 	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
66 		 Fabio Ludovici <fabio.ludovici at yahoo.it>
67 */
68 
69 struct disttable {
70 	u32  size;
71 	s16 table[] __counted_by(size);
72 };
73 
74 struct netem_sched_data {
75 	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
76 	struct rb_root t_root;
77 
78 	/* a linear queue; reduces rbtree rebalancing when jitter is low */
79 	struct sk_buff	*t_head;
80 	struct sk_buff	*t_tail;
81 
82 	u32 t_len;
83 
84 	/* optional qdisc for classful handling (NULL at netem init) */
85 	struct Qdisc	*qdisc;
86 
87 	struct qdisc_watchdog watchdog;
88 
89 	s64 latency;
90 	s64 jitter;
91 
92 	u32 loss;
93 	u32 ecn;
94 	u32 limit;
95 	u32 counter;
96 	u32 gap;
97 	u32 duplicate;
98 	u32 reorder;
99 	u32 corrupt;
100 	u64 rate;
101 	s32 packet_overhead;
102 	u32 cell_size;
103 	struct reciprocal_value cell_size_reciprocal;
104 	s32 cell_overhead;
105 
106 	struct crndstate {
107 		u32 last;
108 		u32 rho;
109 	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
110 
111 	struct prng  {
112 		u64 seed;
113 		struct rnd_state prng_state;
114 	} prng;
115 
116 	struct disttable *delay_dist;
117 
118 	enum  {
119 		CLG_RANDOM,
120 		CLG_4_STATES,
121 		CLG_GILB_ELL,
122 	} loss_model;
123 
124 	enum {
125 		TX_IN_GAP_PERIOD = 1,
126 		TX_IN_BURST_PERIOD,
127 		LOST_IN_GAP_PERIOD,
128 		LOST_IN_BURST_PERIOD,
129 	} _4_state_model;
130 
131 	enum {
132 		GOOD_STATE = 1,
133 		BAD_STATE,
134 	} GE_state_model;
135 
136 	/* Correlated Loss Generation models */
137 	struct clgstate {
138 		/* state of the Markov chain */
139 		u8 state;
140 
141 		/* 4-states and Gilbert-Elliot models */
142 		u32 a1;	/* p13 for 4-states or p for GE */
143 		u32 a2;	/* p31 for 4-states or r for GE */
144 		u32 a3;	/* p32 for 4-states or h for GE */
145 		u32 a4;	/* p14 for 4-states or 1-k for GE */
146 		u32 a5; /* p23 used only in 4-states */
147 	} clg;
148 
149 	struct tc_netem_slot slot_config;
150 	struct slotstate {
151 		u64 slot_next;
152 		s32 packets_left;
153 		s32 bytes_left;
154 	} slot;
155 
156 	struct disttable *slot_dist;
157 };
158 
159 /* Time stamp put into socket buffer control block
160  * Only valid when skbs are in our internal t(ime)fifo queue.
161  *
162  * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
163  * and skb->next & skb->prev are scratch space for a qdisc,
164  * we save skb->tstamp value in skb->cb[] before destroying it.
165  */
166 struct netem_skb_cb {
167 	u64	        time_to_send;
168 };
169 
170 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
171 {
172 	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
173 	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
174 	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
175 }
176 
177 /* init_crandom - initialize correlated random number generator
178  * Use entropy source for initial seed.
179  */
180 static void init_crandom(struct crndstate *state, unsigned long rho)
181 {
182 	state->rho = rho;
183 	state->last = get_random_u32();
184 }
185 
186 /* get_crandom - correlated random number generator
187  * Next number depends on last value.
188  * rho is scaled to avoid floating point.
189  */
190 static u32 get_crandom(struct crndstate *state, struct prng *p)
191 {
192 	u64 value, rho;
193 	unsigned long answer;
194 	struct rnd_state *s = &p->prng_state;
195 
196 	if (!state || state->rho == 0)	/* no correlation */
197 		return prandom_u32_state(s);
198 
199 	value = prandom_u32_state(s);
200 	rho = (u64)state->rho + 1;
201 	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
202 	state->last = answer;
203 	return answer;
204 }
205 
206 /* loss_4state - 4-state model loss generator
207  * Generates losses according to the 4-state Markov chain adopted in
208  * the GI (General and Intuitive) loss model.
209  */
210 static bool loss_4state(struct netem_sched_data *q)
211 {
212 	struct clgstate *clg = &q->clg;
213 	u32 rnd = prandom_u32_state(&q->prng.prng_state);
214 
215 	/*
216 	 * Makes a comparison between rnd and the transition
217 	 * probabilities outgoing from the current state, then decides the
218 	 * next state and if the next packet has to be transmitted or lost.
219 	 * The four states correspond to:
220 	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
221 	 *   LOST_IN_GAP_PERIOD => isolated losses within a gap period
222 	 *   LOST_IN_BURST_PERIOD => lost packets within a burst period
223 	 *   TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period
224 	 */
225 	switch (clg->state) {
226 	case TX_IN_GAP_PERIOD:
227 		if (rnd < clg->a4) {
228 			clg->state = LOST_IN_GAP_PERIOD;
229 			return true;
230 		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
231 			clg->state = LOST_IN_BURST_PERIOD;
232 			return true;
233 		} else if (clg->a1 + clg->a4 < rnd) {
234 			clg->state = TX_IN_GAP_PERIOD;
235 		}
236 
237 		break;
238 	case TX_IN_BURST_PERIOD:
239 		if (rnd < clg->a5) {
240 			clg->state = LOST_IN_BURST_PERIOD;
241 			return true;
242 		} else {
243 			clg->state = TX_IN_BURST_PERIOD;
244 		}
245 
246 		break;
247 	case LOST_IN_BURST_PERIOD:
248 		if (rnd < clg->a3)
249 			clg->state = TX_IN_BURST_PERIOD;
250 		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
251 			clg->state = TX_IN_GAP_PERIOD;
252 		} else if (clg->a2 + clg->a3 < rnd) {
253 			clg->state = LOST_IN_BURST_PERIOD;
254 			return true;
255 		}
256 		break;
257 	case LOST_IN_GAP_PERIOD:
258 		clg->state = TX_IN_GAP_PERIOD;
259 		break;
260 	}
261 
262 	return false;
263 }
264 
265 /* loss_gilb_ell - Gilbert-Elliot model loss generator
266  * Generates losses according to the Gilbert-Elliot loss model or
267  * its special cases  (Gilbert or Simple Gilbert)
268  *
269  * Makes a comparison between random number and the transition
270  * probabilities outgoing from the current state, then decides the
271  * next state. A second random number is extracted and the comparison
272  * with the loss probability of the current state decides if the next
273  * packet will be transmitted or lost.
274  */
275 static bool loss_gilb_ell(struct netem_sched_data *q)
276 {
277 	struct clgstate *clg = &q->clg;
278 	struct rnd_state *s = &q->prng.prng_state;
279 
280 	switch (clg->state) {
281 	case GOOD_STATE:
282 		if (prandom_u32_state(s) < clg->a1)
283 			clg->state = BAD_STATE;
284 		if (prandom_u32_state(s) < clg->a4)
285 			return true;
286 		break;
287 	case BAD_STATE:
288 		if (prandom_u32_state(s) < clg->a2)
289 			clg->state = GOOD_STATE;
290 		if (prandom_u32_state(s) > clg->a3)
291 			return true;
292 	}
293 
294 	return false;
295 }
296 
297 static bool loss_event(struct netem_sched_data *q)
298 {
299 	switch (q->loss_model) {
300 	case CLG_RANDOM:
301 		/* Random packet drop 0 => none, ~0 => all */
302 		return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng);
303 
304 	case CLG_4_STATES:
305 		/* 4state loss model algorithm (used also for GI model)
306 		* Extracts a value from the markov 4 state loss generator,
307 		* if it is 1 drops a packet and if needed writes the event in
308 		* the kernel logs
309 		*/
310 		return loss_4state(q);
311 
312 	case CLG_GILB_ELL:
313 		/* Gilbert-Elliot loss model algorithm
314 		* Extracts a value from the Gilbert-Elliot loss generator,
315 		* if it is 1 drops a packet and if needed writes the event in
316 		* the kernel logs
317 		*/
318 		return loss_gilb_ell(q);
319 	}
320 
321 	return false;	/* not reached */
322 }
323 
324 
325 /* tabledist - return a pseudo-randomly distributed value with mean mu and
326  * std deviation sigma.  Uses table lookup to approximate the desired
327  * distribution, and a uniformly-distributed pseudo-random source.
328  */
329 static s64 tabledist(s64 mu, s32 sigma,
330 		     struct crndstate *state,
331 		     struct prng *prng,
332 		     const struct disttable *dist)
333 {
334 	s64 x;
335 	long t;
336 	u32 rnd;
337 
338 	if (sigma == 0)
339 		return mu;
340 
341 	rnd = get_crandom(state, prng);
342 
343 	/* default uniform distribution */
344 	if (dist == NULL)
345 		return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
346 
347 	t = dist->table[rnd % dist->size];
348 	x = (sigma % NETEM_DIST_SCALE) * t;
349 	if (x >= 0)
350 		x += NETEM_DIST_SCALE/2;
351 	else
352 		x -= NETEM_DIST_SCALE/2;
353 
354 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
355 }
356 
357 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
358 {
359 	len += q->packet_overhead;
360 
361 	if (q->cell_size) {
362 		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
363 
364 		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
365 			cells++;
366 		len = cells * (q->cell_size + q->cell_overhead);
367 	}
368 
369 	return div64_u64(len * NSEC_PER_SEC, q->rate);
370 }
371 
372 static void tfifo_reset(struct Qdisc *sch)
373 {
374 	struct netem_sched_data *q = qdisc_priv(sch);
375 	struct rb_node *p = rb_first(&q->t_root);
376 
377 	while (p) {
378 		struct sk_buff *skb = rb_to_skb(p);
379 
380 		p = rb_next(p);
381 		rb_erase(&skb->rbnode, &q->t_root);
382 		rtnl_kfree_skbs(skb, skb);
383 	}
384 
385 	rtnl_kfree_skbs(q->t_head, q->t_tail);
386 	q->t_head = NULL;
387 	q->t_tail = NULL;
388 	q->t_len = 0;
389 }
390 
391 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
392 {
393 	struct netem_sched_data *q = qdisc_priv(sch);
394 	u64 tnext = netem_skb_cb(nskb)->time_to_send;
395 
396 	if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
397 		if (q->t_tail)
398 			q->t_tail->next = nskb;
399 		else
400 			q->t_head = nskb;
401 		q->t_tail = nskb;
402 	} else {
403 		struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
404 
405 		while (*p) {
406 			struct sk_buff *skb;
407 
408 			parent = *p;
409 			skb = rb_to_skb(parent);
410 			if (tnext >= netem_skb_cb(skb)->time_to_send)
411 				p = &parent->rb_right;
412 			else
413 				p = &parent->rb_left;
414 		}
415 		rb_link_node(&nskb->rbnode, parent, p);
416 		rb_insert_color(&nskb->rbnode, &q->t_root);
417 	}
418 	q->t_len++;
419 	sch->q.qlen++;
420 }
421 
422 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
423  * when we statistically choose to corrupt one, we instead segment it, returning
424  * the first packet to be corrupted, and re-enqueue the remaining frames
425  */
426 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
427 				     struct sk_buff **to_free)
428 {
429 	struct sk_buff *segs;
430 	netdev_features_t features = netif_skb_features(skb);
431 
432 	qdisc_skb_cb(skb)->pkt_segs = 1;
433 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
434 
435 	if (IS_ERR_OR_NULL(segs)) {
436 		qdisc_drop(skb, sch, to_free);
437 		return NULL;
438 	}
439 	consume_skb(skb);
440 	return segs;
441 }
442 
443 /*
444  * Insert one skb into qdisc.
445  * Note: parent depends on return value to account for queue length.
446  * 	NET_XMIT_DROP: queue length didn't change.
447  *      NET_XMIT_SUCCESS: one skb was queued.
448  */
449 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
450 			 struct sk_buff **to_free)
451 {
452 	struct netem_sched_data *q = qdisc_priv(sch);
453 	/* We don't fill cb now as skb_unshare() may invalidate it */
454 	struct netem_skb_cb *cb;
455 	struct sk_buff *skb2 = NULL;
456 	struct sk_buff *segs = NULL;
457 	unsigned int prev_len = qdisc_pkt_len(skb);
458 	int count = 1;
459 
460 	/* Do not fool qdisc_drop_all() */
461 	skb->prev = NULL;
462 
463 	/* Random duplication */
464 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
465 		++count;
466 
467 	/* Drop packet? */
468 	if (loss_event(q)) {
469 		if (q->ecn && INET_ECN_set_ce(skb))
470 			qdisc_qstats_drop(sch); /* mark packet */
471 		else
472 			--count;
473 	}
474 	if (count == 0) {
475 		qdisc_qstats_drop(sch);
476 		__qdisc_drop(skb, to_free);
477 		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
478 	}
479 
480 	/* If a delay is expected, orphan the skb. (orphaning usually takes
481 	 * place at TX completion time, so _before_ the link transit delay)
482 	 */
483 	if (q->latency || q->jitter || q->rate)
484 		skb_orphan_partial(skb);
485 
486 	/*
487 	 * If we need to duplicate packet, then clone it before
488 	 * original is modified.
489 	 */
490 	if (count > 1)
491 		skb2 = skb_clone(skb, GFP_ATOMIC);
492 
493 	/*
494 	 * Randomized packet corruption.
495 	 * Make copy if needed since we are modifying
496 	 * If packet is going to be hardware checksummed, then
497 	 * do it now in software before we mangle it.
498 	 */
499 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
500 		if (skb_is_gso(skb)) {
501 			skb = netem_segment(skb, sch, to_free);
502 			if (!skb)
503 				goto finish_segs;
504 
505 			segs = skb->next;
506 			skb_mark_not_on_list(skb);
507 			qdisc_skb_cb(skb)->pkt_len = skb->len;
508 		}
509 
510 		skb = skb_unshare(skb, GFP_ATOMIC);
511 		if (unlikely(!skb)) {
512 			qdisc_qstats_drop(sch);
513 			goto finish_segs;
514 		}
515 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
516 		    skb_checksum_help(skb)) {
517 			qdisc_drop(skb, sch, to_free);
518 			skb = NULL;
519 			goto finish_segs;
520 		}
521 
522 		if (skb_headlen(skb))
523 			skb->data[get_random_u32_below(skb_headlen(skb))] ^=
524 				1 << get_random_u32_below(8);
525 	}
526 
527 	if (unlikely(q->t_len >= sch->limit)) {
528 		/* re-link segs, so that qdisc_drop_all() frees them all */
529 		skb->next = segs;
530 		qdisc_drop_all(skb, sch, to_free);
531 		if (skb2)
532 			__qdisc_drop(skb2, to_free);
533 		return NET_XMIT_DROP;
534 	}
535 
536 	/*
537 	 * If doing duplication then re-insert at top of the
538 	 * qdisc tree, since parent queuer expects that only one
539 	 * skb will be queued.
540 	 */
541 	if (skb2) {
542 		struct Qdisc *rootq = qdisc_root_bh(sch);
543 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
544 
545 		q->duplicate = 0;
546 		rootq->enqueue(skb2, rootq, to_free);
547 		q->duplicate = dupsave;
548 		skb2 = NULL;
549 	}
550 
551 	qdisc_qstats_backlog_inc(sch, skb);
552 
553 	cb = netem_skb_cb(skb);
554 	if (q->gap == 0 ||		/* not doing reordering */
555 	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
556 	    q->reorder < get_crandom(&q->reorder_cor, &q->prng)) {
557 		u64 now;
558 		s64 delay;
559 
560 		delay = tabledist(q->latency, q->jitter,
561 				  &q->delay_cor, &q->prng, q->delay_dist);
562 
563 		now = ktime_get_ns();
564 
565 		if (q->rate) {
566 			struct netem_skb_cb *last = NULL;
567 
568 			if (sch->q.tail)
569 				last = netem_skb_cb(sch->q.tail);
570 			if (q->t_root.rb_node) {
571 				struct sk_buff *t_skb;
572 				struct netem_skb_cb *t_last;
573 
574 				t_skb = skb_rb_last(&q->t_root);
575 				t_last = netem_skb_cb(t_skb);
576 				if (!last ||
577 				    t_last->time_to_send > last->time_to_send)
578 					last = t_last;
579 			}
580 			if (q->t_tail) {
581 				struct netem_skb_cb *t_last =
582 					netem_skb_cb(q->t_tail);
583 
584 				if (!last ||
585 				    t_last->time_to_send > last->time_to_send)
586 					last = t_last;
587 			}
588 
589 			if (last) {
590 				/*
591 				 * Last packet in queue is reference point (now),
592 				 * calculate this time bonus and subtract
593 				 * from delay.
594 				 */
595 				delay -= last->time_to_send - now;
596 				delay = max_t(s64, 0, delay);
597 				now = last->time_to_send;
598 			}
599 
600 			delay += packet_time_ns(qdisc_pkt_len(skb), q);
601 		}
602 
603 		cb->time_to_send = now + delay;
604 		++q->counter;
605 		tfifo_enqueue(skb, sch);
606 	} else {
607 		/*
608 		 * Do re-ordering by putting one out of N packets at the front
609 		 * of the queue.
610 		 */
611 		cb->time_to_send = ktime_get_ns();
612 		q->counter = 0;
613 
614 		__qdisc_enqueue_head(skb, &sch->q);
615 		sch->qstats.requeues++;
616 	}
617 
618 finish_segs:
619 	if (skb2)
620 		__qdisc_drop(skb2, to_free);
621 
622 	if (segs) {
623 		unsigned int len, last_len;
624 		int rc, nb;
625 
626 		len = skb ? skb->len : 0;
627 		nb = skb ? 1 : 0;
628 
629 		while (segs) {
630 			skb2 = segs->next;
631 			skb_mark_not_on_list(segs);
632 			qdisc_skb_cb(segs)->pkt_len = segs->len;
633 			last_len = segs->len;
634 			rc = qdisc_enqueue(segs, sch, to_free);
635 			if (rc != NET_XMIT_SUCCESS) {
636 				if (net_xmit_drop_count(rc))
637 					qdisc_qstats_drop(sch);
638 			} else {
639 				nb++;
640 				len += last_len;
641 			}
642 			segs = skb2;
643 		}
644 		/* Parent qdiscs accounted for 1 skb of size @prev_len */
645 		qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
646 	} else if (!skb) {
647 		return NET_XMIT_DROP;
648 	}
649 	return NET_XMIT_SUCCESS;
650 }
651 
652 /* Delay the next round with a new future slot with a
653  * correct number of bytes and packets.
654  */
655 
656 static void get_slot_next(struct netem_sched_data *q, u64 now)
657 {
658 	s64 next_delay;
659 
660 	if (!q->slot_dist)
661 		next_delay = q->slot_config.min_delay +
662 				(get_random_u32() *
663 				 (q->slot_config.max_delay -
664 				  q->slot_config.min_delay) >> 32);
665 	else
666 		next_delay = tabledist(q->slot_config.dist_delay,
667 				       (s32)(q->slot_config.dist_jitter),
668 				       NULL, &q->prng, q->slot_dist);
669 
670 	q->slot.slot_next = now + next_delay;
671 	q->slot.packets_left = q->slot_config.max_packets;
672 	q->slot.bytes_left = q->slot_config.max_bytes;
673 }
674 
675 static struct sk_buff *netem_peek(struct netem_sched_data *q)
676 {
677 	struct sk_buff *skb = skb_rb_first(&q->t_root);
678 	u64 t1, t2;
679 
680 	if (!skb)
681 		return q->t_head;
682 	if (!q->t_head)
683 		return skb;
684 
685 	t1 = netem_skb_cb(skb)->time_to_send;
686 	t2 = netem_skb_cb(q->t_head)->time_to_send;
687 	if (t1 < t2)
688 		return skb;
689 	return q->t_head;
690 }
691 
692 static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
693 {
694 	if (skb == q->t_head) {
695 		q->t_head = skb->next;
696 		if (!q->t_head)
697 			q->t_tail = NULL;
698 	} else {
699 		rb_erase(&skb->rbnode, &q->t_root);
700 	}
701 }
702 
703 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
704 {
705 	struct netem_sched_data *q = qdisc_priv(sch);
706 	struct sk_buff *skb;
707 
708 tfifo_dequeue:
709 	skb = __qdisc_dequeue_head(&sch->q);
710 	if (skb) {
711 deliver:
712 		qdisc_qstats_backlog_dec(sch, skb);
713 		qdisc_bstats_update(sch, skb);
714 		return skb;
715 	}
716 	skb = netem_peek(q);
717 	if (skb) {
718 		u64 time_to_send;
719 		u64 now = ktime_get_ns();
720 
721 		/* if more time remaining? */
722 		time_to_send = netem_skb_cb(skb)->time_to_send;
723 		if (q->slot.slot_next && q->slot.slot_next < time_to_send)
724 			get_slot_next(q, now);
725 
726 		if (time_to_send <= now && q->slot.slot_next <= now) {
727 			netem_erase_head(q, skb);
728 			q->t_len--;
729 			skb->next = NULL;
730 			skb->prev = NULL;
731 			/* skb->dev shares skb->rbnode area,
732 			 * we need to restore its value.
733 			 */
734 			skb->dev = qdisc_dev(sch);
735 
736 			if (q->slot.slot_next) {
737 				q->slot.packets_left--;
738 				q->slot.bytes_left -= qdisc_pkt_len(skb);
739 				if (q->slot.packets_left <= 0 ||
740 				    q->slot.bytes_left <= 0)
741 					get_slot_next(q, now);
742 			}
743 
744 			if (q->qdisc) {
745 				unsigned int pkt_len = qdisc_pkt_len(skb);
746 				struct sk_buff *to_free = NULL;
747 				int err;
748 
749 				err = qdisc_enqueue(skb, q->qdisc, &to_free);
750 				kfree_skb_list(to_free);
751 				if (err != NET_XMIT_SUCCESS) {
752 					if (net_xmit_drop_count(err))
753 						qdisc_qstats_drop(sch);
754 					sch->qstats.backlog -= pkt_len;
755 					sch->q.qlen--;
756 					qdisc_tree_reduce_backlog(sch, 1, pkt_len);
757 				}
758 				goto tfifo_dequeue;
759 			}
760 			sch->q.qlen--;
761 			goto deliver;
762 		}
763 
764 		if (q->qdisc) {
765 			skb = q->qdisc->ops->dequeue(q->qdisc);
766 			if (skb) {
767 				sch->q.qlen--;
768 				goto deliver;
769 			}
770 		}
771 
772 		qdisc_watchdog_schedule_ns(&q->watchdog,
773 					   max(time_to_send,
774 					       q->slot.slot_next));
775 	}
776 
777 	if (q->qdisc) {
778 		skb = q->qdisc->ops->dequeue(q->qdisc);
779 		if (skb) {
780 			sch->q.qlen--;
781 			goto deliver;
782 		}
783 	}
784 	return NULL;
785 }
786 
787 static void netem_reset(struct Qdisc *sch)
788 {
789 	struct netem_sched_data *q = qdisc_priv(sch);
790 
791 	qdisc_reset_queue(sch);
792 	tfifo_reset(sch);
793 	if (q->qdisc)
794 		qdisc_reset(q->qdisc);
795 	qdisc_watchdog_cancel(&q->watchdog);
796 }
797 
798 static void dist_free(struct disttable *d)
799 {
800 	kvfree(d);
801 }
802 
803 /*
804  * Distribution data is a variable size payload containing
805  * signed 16 bit values.
806  */
807 
808 static int get_dist_table(struct disttable **tbl, const struct nlattr *attr)
809 {
810 	size_t n = nla_len(attr)/sizeof(__s16);
811 	const __s16 *data = nla_data(attr);
812 	struct disttable *d;
813 	int i;
814 
815 	if (!n || n > NETEM_DIST_MAX)
816 		return -EINVAL;
817 
818 	d = kvmalloc_flex(*d, table, n);
819 	if (!d)
820 		return -ENOMEM;
821 
822 	d->size = n;
823 	for (i = 0; i < n; i++)
824 		d->table[i] = data[i];
825 
826 	*tbl = d;
827 	return 0;
828 }
829 
830 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
831 {
832 	const struct tc_netem_slot *c = nla_data(attr);
833 
834 	q->slot_config = *c;
835 	if (q->slot_config.max_packets == 0)
836 		q->slot_config.max_packets = INT_MAX;
837 	if (q->slot_config.max_bytes == 0)
838 		q->slot_config.max_bytes = INT_MAX;
839 
840 	/* capping dist_jitter to the range acceptable by tabledist() */
841 	q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
842 
843 	q->slot.packets_left = q->slot_config.max_packets;
844 	q->slot.bytes_left = q->slot_config.max_bytes;
845 	if (q->slot_config.min_delay | q->slot_config.max_delay |
846 	    q->slot_config.dist_jitter)
847 		q->slot.slot_next = ktime_get_ns();
848 	else
849 		q->slot.slot_next = 0;
850 }
851 
852 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
853 {
854 	const struct tc_netem_corr *c = nla_data(attr);
855 
856 	init_crandom(&q->delay_cor, c->delay_corr);
857 	init_crandom(&q->loss_cor, c->loss_corr);
858 	init_crandom(&q->dup_cor, c->dup_corr);
859 }
860 
861 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
862 {
863 	const struct tc_netem_reorder *r = nla_data(attr);
864 
865 	q->reorder = r->probability;
866 	init_crandom(&q->reorder_cor, r->correlation);
867 }
868 
869 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
870 {
871 	const struct tc_netem_corrupt *r = nla_data(attr);
872 
873 	q->corrupt = r->probability;
874 	init_crandom(&q->corrupt_cor, r->correlation);
875 }
876 
877 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
878 {
879 	const struct tc_netem_rate *r = nla_data(attr);
880 
881 	q->rate = r->rate;
882 	q->packet_overhead = r->packet_overhead;
883 	q->cell_size = r->cell_size;
884 	q->cell_overhead = r->cell_overhead;
885 	if (q->cell_size)
886 		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
887 	else
888 		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
889 }
890 
891 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
892 {
893 	const struct nlattr *la;
894 	int rem;
895 
896 	nla_for_each_nested(la, attr, rem) {
897 		u16 type = nla_type(la);
898 
899 		switch (type) {
900 		case NETEM_LOSS_GI: {
901 			const struct tc_netem_gimodel *gi = nla_data(la);
902 
903 			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
904 				pr_info("netem: incorrect gi model size\n");
905 				return -EINVAL;
906 			}
907 
908 			q->loss_model = CLG_4_STATES;
909 
910 			q->clg.state = TX_IN_GAP_PERIOD;
911 			q->clg.a1 = gi->p13;
912 			q->clg.a2 = gi->p31;
913 			q->clg.a3 = gi->p32;
914 			q->clg.a4 = gi->p14;
915 			q->clg.a5 = gi->p23;
916 			break;
917 		}
918 
919 		case NETEM_LOSS_GE: {
920 			const struct tc_netem_gemodel *ge = nla_data(la);
921 
922 			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
923 				pr_info("netem: incorrect ge model size\n");
924 				return -EINVAL;
925 			}
926 
927 			q->loss_model = CLG_GILB_ELL;
928 			q->clg.state = GOOD_STATE;
929 			q->clg.a1 = ge->p;
930 			q->clg.a2 = ge->r;
931 			q->clg.a3 = ge->h;
932 			q->clg.a4 = ge->k1;
933 			break;
934 		}
935 
936 		default:
937 			pr_info("netem: unknown loss type %u\n", type);
938 			return -EINVAL;
939 		}
940 	}
941 
942 	return 0;
943 }
944 
945 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
946 	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
947 	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
948 	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
949 	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
950 	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
951 	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
952 	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
953 	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
954 	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
955 	[TCA_NETEM_SLOT]	= { .len = sizeof(struct tc_netem_slot) },
956 	[TCA_NETEM_PRNG_SEED]	= { .type = NLA_U64 },
957 };
958 
959 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
960 		      const struct nla_policy *policy, int len)
961 {
962 	int nested_len = nla_len(nla) - NLA_ALIGN(len);
963 
964 	if (nested_len < 0) {
965 		pr_info("netem: invalid attributes len %d\n", nested_len);
966 		return -EINVAL;
967 	}
968 
969 	if (nested_len >= nla_attr_size(0))
970 		return nla_parse_deprecated(tb, maxtype,
971 					    nla_data(nla) + NLA_ALIGN(len),
972 					    nested_len, policy, NULL);
973 
974 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
975 	return 0;
976 }
977 
978 static const struct Qdisc_class_ops netem_class_ops;
979 
980 static int check_netem_in_tree(struct Qdisc *sch, bool duplicates,
981 			       struct netlink_ext_ack *extack)
982 {
983 	struct Qdisc *root, *q;
984 	unsigned int i;
985 
986 	root = qdisc_root_sleeping(sch);
987 
988 	if (sch != root && root->ops->cl_ops == &netem_class_ops) {
989 		if (duplicates ||
990 		    ((struct netem_sched_data *)qdisc_priv(root))->duplicate)
991 			goto err;
992 	}
993 
994 	if (!qdisc_dev(root))
995 		return 0;
996 
997 	hash_for_each(qdisc_dev(root)->qdisc_hash, i, q, hash) {
998 		if (sch != q && q->ops->cl_ops == &netem_class_ops) {
999 			if (duplicates ||
1000 			    ((struct netem_sched_data *)qdisc_priv(q))->duplicate)
1001 				goto err;
1002 		}
1003 	}
1004 
1005 	return 0;
1006 
1007 err:
1008 	NL_SET_ERR_MSG(extack,
1009 		       "netem: cannot mix duplicating netems with other netems in tree");
1010 	return -EINVAL;
1011 }
1012 
1013 /* Parse netlink message to set options */
1014 static int netem_change(struct Qdisc *sch, struct nlattr *opt,
1015 			struct netlink_ext_ack *extack)
1016 {
1017 	struct netem_sched_data *q = qdisc_priv(sch);
1018 	struct nlattr *tb[TCA_NETEM_MAX + 1];
1019 	struct disttable *delay_dist = NULL;
1020 	struct disttable *slot_dist = NULL;
1021 	struct tc_netem_qopt *qopt;
1022 	struct clgstate old_clg;
1023 	int old_loss_model = CLG_RANDOM;
1024 	int ret;
1025 
1026 	qopt = nla_data(opt);
1027 	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
1028 	if (ret < 0)
1029 		return ret;
1030 
1031 	if (tb[TCA_NETEM_DELAY_DIST]) {
1032 		ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]);
1033 		if (ret)
1034 			goto table_free;
1035 	}
1036 
1037 	if (tb[TCA_NETEM_SLOT_DIST]) {
1038 		ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]);
1039 		if (ret)
1040 			goto table_free;
1041 	}
1042 
1043 	sch_tree_lock(sch);
1044 	/* backup q->clg and q->loss_model */
1045 	old_clg = q->clg;
1046 	old_loss_model = q->loss_model;
1047 
1048 	if (tb[TCA_NETEM_LOSS]) {
1049 		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
1050 		if (ret) {
1051 			q->loss_model = old_loss_model;
1052 			q->clg = old_clg;
1053 			goto unlock;
1054 		}
1055 	} else {
1056 		q->loss_model = CLG_RANDOM;
1057 	}
1058 
1059 	if (delay_dist)
1060 		swap(q->delay_dist, delay_dist);
1061 	if (slot_dist)
1062 		swap(q->slot_dist, slot_dist);
1063 	sch->limit = qopt->limit;
1064 
1065 	q->latency = PSCHED_TICKS2NS(qopt->latency);
1066 	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1067 	q->limit = qopt->limit;
1068 	q->gap = qopt->gap;
1069 	q->counter = 0;
1070 	q->loss = qopt->loss;
1071 
1072 	ret = check_netem_in_tree(sch, qopt->duplicate, extack);
1073 	if (ret)
1074 		goto unlock;
1075 
1076 	q->duplicate = qopt->duplicate;
1077 
1078 	/* for compatibility with earlier versions.
1079 	 * if gap is set, need to assume 100% probability
1080 	 */
1081 	if (q->gap)
1082 		q->reorder = ~0;
1083 
1084 	if (tb[TCA_NETEM_CORR])
1085 		get_correlation(q, tb[TCA_NETEM_CORR]);
1086 
1087 	if (tb[TCA_NETEM_REORDER])
1088 		get_reorder(q, tb[TCA_NETEM_REORDER]);
1089 
1090 	if (tb[TCA_NETEM_CORRUPT])
1091 		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1092 
1093 	if (tb[TCA_NETEM_RATE])
1094 		get_rate(q, tb[TCA_NETEM_RATE]);
1095 
1096 	if (tb[TCA_NETEM_RATE64])
1097 		q->rate = max_t(u64, q->rate,
1098 				nla_get_u64(tb[TCA_NETEM_RATE64]));
1099 
1100 	if (tb[TCA_NETEM_LATENCY64])
1101 		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1102 
1103 	if (tb[TCA_NETEM_JITTER64])
1104 		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1105 
1106 	if (tb[TCA_NETEM_ECN])
1107 		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1108 
1109 	if (tb[TCA_NETEM_SLOT])
1110 		get_slot(q, tb[TCA_NETEM_SLOT]);
1111 
1112 	/* capping jitter to the range acceptable by tabledist() */
1113 	q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
1114 
1115 	if (tb[TCA_NETEM_PRNG_SEED])
1116 		q->prng.seed = nla_get_u64(tb[TCA_NETEM_PRNG_SEED]);
1117 	else
1118 		q->prng.seed = get_random_u64();
1119 	prandom_seed_state(&q->prng.prng_state, q->prng.seed);
1120 
1121 unlock:
1122 	sch_tree_unlock(sch);
1123 
1124 table_free:
1125 	dist_free(delay_dist);
1126 	dist_free(slot_dist);
1127 	return ret;
1128 }
1129 
1130 static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1131 		      struct netlink_ext_ack *extack)
1132 {
1133 	struct netem_sched_data *q = qdisc_priv(sch);
1134 	int ret;
1135 
1136 	qdisc_watchdog_init(&q->watchdog, sch);
1137 
1138 	if (!opt)
1139 		return -EINVAL;
1140 
1141 	q->loss_model = CLG_RANDOM;
1142 	ret = netem_change(sch, opt, extack);
1143 	if (ret)
1144 		pr_info("netem: change failed\n");
1145 	return ret;
1146 }
1147 
1148 static void netem_destroy(struct Qdisc *sch)
1149 {
1150 	struct netem_sched_data *q = qdisc_priv(sch);
1151 
1152 	qdisc_watchdog_cancel(&q->watchdog);
1153 	if (q->qdisc)
1154 		qdisc_put(q->qdisc);
1155 	dist_free(q->delay_dist);
1156 	dist_free(q->slot_dist);
1157 }
1158 
1159 static int dump_loss_model(const struct netem_sched_data *q,
1160 			   struct sk_buff *skb)
1161 {
1162 	struct nlattr *nest;
1163 
1164 	nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
1165 	if (nest == NULL)
1166 		goto nla_put_failure;
1167 
1168 	switch (q->loss_model) {
1169 	case CLG_RANDOM:
1170 		/* legacy loss model */
1171 		nla_nest_cancel(skb, nest);
1172 		return 0;	/* no data */
1173 
1174 	case CLG_4_STATES: {
1175 		struct tc_netem_gimodel gi = {
1176 			.p13 = q->clg.a1,
1177 			.p31 = q->clg.a2,
1178 			.p32 = q->clg.a3,
1179 			.p14 = q->clg.a4,
1180 			.p23 = q->clg.a5,
1181 		};
1182 
1183 		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1184 			goto nla_put_failure;
1185 		break;
1186 	}
1187 	case CLG_GILB_ELL: {
1188 		struct tc_netem_gemodel ge = {
1189 			.p = q->clg.a1,
1190 			.r = q->clg.a2,
1191 			.h = q->clg.a3,
1192 			.k1 = q->clg.a4,
1193 		};
1194 
1195 		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1196 			goto nla_put_failure;
1197 		break;
1198 	}
1199 	}
1200 
1201 	nla_nest_end(skb, nest);
1202 	return 0;
1203 
1204 nla_put_failure:
1205 	nla_nest_cancel(skb, nest);
1206 	return -1;
1207 }
1208 
1209 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1210 {
1211 	const struct netem_sched_data *q = qdisc_priv(sch);
1212 	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1213 	struct tc_netem_qopt qopt;
1214 	struct tc_netem_corr cor;
1215 	struct tc_netem_reorder reorder;
1216 	struct tc_netem_corrupt corrupt;
1217 	struct tc_netem_rate rate;
1218 	struct tc_netem_slot slot;
1219 
1220 	qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
1221 			     UINT_MAX);
1222 	qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
1223 			    UINT_MAX);
1224 	qopt.limit = q->limit;
1225 	qopt.loss = q->loss;
1226 	qopt.gap = q->gap;
1227 	qopt.duplicate = q->duplicate;
1228 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1229 		goto nla_put_failure;
1230 
1231 	if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1232 		goto nla_put_failure;
1233 
1234 	if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1235 		goto nla_put_failure;
1236 
1237 	cor.delay_corr = q->delay_cor.rho;
1238 	cor.loss_corr = q->loss_cor.rho;
1239 	cor.dup_corr = q->dup_cor.rho;
1240 	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1241 		goto nla_put_failure;
1242 
1243 	reorder.probability = q->reorder;
1244 	reorder.correlation = q->reorder_cor.rho;
1245 	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1246 		goto nla_put_failure;
1247 
1248 	corrupt.probability = q->corrupt;
1249 	corrupt.correlation = q->corrupt_cor.rho;
1250 	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1251 		goto nla_put_failure;
1252 
1253 	if (q->rate >= (1ULL << 32)) {
1254 		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1255 				      TCA_NETEM_PAD))
1256 			goto nla_put_failure;
1257 		rate.rate = ~0U;
1258 	} else {
1259 		rate.rate = q->rate;
1260 	}
1261 	rate.packet_overhead = q->packet_overhead;
1262 	rate.cell_size = q->cell_size;
1263 	rate.cell_overhead = q->cell_overhead;
1264 	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1265 		goto nla_put_failure;
1266 
1267 	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1268 		goto nla_put_failure;
1269 
1270 	if (dump_loss_model(q, skb) != 0)
1271 		goto nla_put_failure;
1272 
1273 	if (q->slot_config.min_delay | q->slot_config.max_delay |
1274 	    q->slot_config.dist_jitter) {
1275 		slot = q->slot_config;
1276 		if (slot.max_packets == INT_MAX)
1277 			slot.max_packets = 0;
1278 		if (slot.max_bytes == INT_MAX)
1279 			slot.max_bytes = 0;
1280 		if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1281 			goto nla_put_failure;
1282 	}
1283 
1284 	if (nla_put_u64_64bit(skb, TCA_NETEM_PRNG_SEED, q->prng.seed,
1285 			      TCA_NETEM_PAD))
1286 		goto nla_put_failure;
1287 
1288 	return nla_nest_end(skb, nla);
1289 
1290 nla_put_failure:
1291 	nlmsg_trim(skb, nla);
1292 	return -1;
1293 }
1294 
1295 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1296 			  struct sk_buff *skb, struct tcmsg *tcm)
1297 {
1298 	struct netem_sched_data *q = qdisc_priv(sch);
1299 
1300 	if (cl != 1 || !q->qdisc) 	/* only one class */
1301 		return -ENOENT;
1302 
1303 	tcm->tcm_handle |= TC_H_MIN(1);
1304 	tcm->tcm_info = q->qdisc->handle;
1305 
1306 	return 0;
1307 }
1308 
1309 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1310 		     struct Qdisc **old, struct netlink_ext_ack *extack)
1311 {
1312 	struct netem_sched_data *q = qdisc_priv(sch);
1313 
1314 	*old = qdisc_replace(sch, new, &q->qdisc);
1315 	return 0;
1316 }
1317 
1318 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1319 {
1320 	struct netem_sched_data *q = qdisc_priv(sch);
1321 	return q->qdisc;
1322 }
1323 
1324 static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1325 {
1326 	return 1;
1327 }
1328 
1329 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1330 {
1331 	if (!walker->stop) {
1332 		if (!tc_qdisc_stats_dump(sch, 1, walker))
1333 			return;
1334 	}
1335 }
1336 
1337 static const struct Qdisc_class_ops netem_class_ops = {
1338 	.graft		=	netem_graft,
1339 	.leaf		=	netem_leaf,
1340 	.find		=	netem_find,
1341 	.walk		=	netem_walk,
1342 	.dump		=	netem_dump_class,
1343 };
1344 
1345 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1346 	.id		=	"netem",
1347 	.cl_ops		=	&netem_class_ops,
1348 	.priv_size	=	sizeof(struct netem_sched_data),
1349 	.enqueue	=	netem_enqueue,
1350 	.dequeue	=	netem_dequeue,
1351 	.peek		=	qdisc_peek_dequeued,
1352 	.init		=	netem_init,
1353 	.reset		=	netem_reset,
1354 	.destroy	=	netem_destroy,
1355 	.change		=	netem_change,
1356 	.dump		=	netem_dump,
1357 	.owner		=	THIS_MODULE,
1358 };
1359 MODULE_ALIAS_NET_SCH("netem");
1360 
1361 
1362 static int __init netem_module_init(void)
1363 {
1364 	pr_info("netem: version " VERSION "\n");
1365 	return register_qdisc(&netem_qdisc_ops);
1366 }
1367 static void __exit netem_module_exit(void)
1368 {
1369 	unregister_qdisc(&netem_qdisc_ops);
1370 }
1371 module_init(netem_module_init)
1372 module_exit(netem_module_exit)
1373 MODULE_LICENSE("GPL");
1374 MODULE_DESCRIPTION("Network characteristics emulator qdisc");
1375