xref: /linux/net/sched/sch_netem.c (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sched/sch_netem.c	Network emulator
4  *
5  *  		Many of the algorithms and ideas for this came from
6  *		NIST Net which is not copyrighted.
7  *
8  * Authors:	Stephen Hemminger <shemminger@osdl.org>
9  *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/vmalloc.h>
20 #include <linux/prandom.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/reciprocal_div.h>
23 #include <linux/rbtree.h>
24 
25 #include <net/gso.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
28 #include <net/inet_ecn.h>
29 
30 #define VERSION "1.3"
31 
32 /*	Network Emulation Queuing algorithm.
33 	====================================
34 
35 	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
36 		 Network Emulation Tool
37 		 [2] Luigi Rizzo, DummyNet for FreeBSD
38 
39 	 ----------------------------------------------------------------
40 
41 	 This started out as a simple way to delay outgoing packets to
42 	 test TCP but has grown to include most of the functionality
43 	 of a full blown network emulator like NISTnet. It can delay
44 	 packets and add random jitter (and correlation). The random
45 	 distribution can be loaded from a table as well to provide
46 	 normal, Pareto, or experimental curves. Packet loss,
47 	 duplication, and reordering can also be emulated.
48 
49 	 This qdisc does not do classification that can be handled in
50 	 layering other disciplines.  It does not need to do bandwidth
51 	 control either since that can be handled by using token
52 	 bucket or other rate control.
53 
54      Correlated Loss Generator models
55 
56 	Added generation of correlated loss according to the
57 	"Gilbert-Elliot" model, a 4-state markov model.
58 
59 	References:
60 	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
61 	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
62 	and intuitive loss model for packet networks and its implementation
63 	in the Netem module in the Linux kernel", available in [1]
64 
65 	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
66 		 Fabio Ludovici <fabio.ludovici at yahoo.it>
67 */
68 
69 struct disttable {
70 	u32  size;
71 	s16 table[] __counted_by(size);
72 };
73 
74 struct netem_sched_data {
75 	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
76 	struct rb_root t_root;
77 
78 	/* a linear queue; reduces rbtree rebalancing when jitter is low */
79 	struct sk_buff	*t_head;
80 	struct sk_buff	*t_tail;
81 
82 	u32 t_len;
83 
84 	/* optional qdisc for classful handling (NULL at netem init) */
85 	struct Qdisc	*qdisc;
86 
87 	struct qdisc_watchdog watchdog;
88 
89 	s64 latency;
90 	s64 jitter;
91 
92 	u32 loss;
93 	u32 ecn;
94 	u32 limit;
95 	u32 counter;
96 	u32 gap;
97 	u32 duplicate;
98 	u32 reorder;
99 	u32 corrupt;
100 	u64 rate;
101 	s32 packet_overhead;
102 	u32 cell_size;
103 	struct reciprocal_value cell_size_reciprocal;
104 	s32 cell_overhead;
105 
106 	struct crndstate {
107 		u32 last;
108 		u32 rho;
109 	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
110 
111 	struct prng  {
112 		u64 seed;
113 		struct rnd_state prng_state;
114 	} prng;
115 
116 	struct disttable *delay_dist;
117 
118 	enum  {
119 		CLG_RANDOM,
120 		CLG_4_STATES,
121 		CLG_GILB_ELL,
122 	} loss_model;
123 
124 	enum {
125 		TX_IN_GAP_PERIOD = 1,
126 		TX_IN_BURST_PERIOD,
127 		LOST_IN_GAP_PERIOD,
128 		LOST_IN_BURST_PERIOD,
129 	} _4_state_model;
130 
131 	enum {
132 		GOOD_STATE = 1,
133 		BAD_STATE,
134 	} GE_state_model;
135 
136 	/* Correlated Loss Generation models */
137 	struct clgstate {
138 		/* state of the Markov chain */
139 		u8 state;
140 
141 		/* 4-states and Gilbert-Elliot models */
142 		u32 a1;	/* p13 for 4-states or p for GE */
143 		u32 a2;	/* p31 for 4-states or r for GE */
144 		u32 a3;	/* p32 for 4-states or h for GE */
145 		u32 a4;	/* p14 for 4-states or 1-k for GE */
146 		u32 a5; /* p23 used only in 4-states */
147 	} clg;
148 
149 	struct tc_netem_slot slot_config;
150 	struct slotstate {
151 		u64 slot_next;
152 		s32 packets_left;
153 		s32 bytes_left;
154 	} slot;
155 
156 	struct disttable *slot_dist;
157 };
158 
159 /* Time stamp put into socket buffer control block
160  * Only valid when skbs are in our internal t(ime)fifo queue.
161  *
162  * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
163  * and skb->next & skb->prev are scratch space for a qdisc,
164  * we save skb->tstamp value in skb->cb[] before destroying it.
165  */
166 struct netem_skb_cb {
167 	u64	        time_to_send;
168 };
169 
netem_skb_cb(struct sk_buff * skb)170 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
171 {
172 	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
173 	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
174 	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
175 }
176 
177 /* init_crandom - initialize correlated random number generator
178  * Use entropy source for initial seed.
179  */
init_crandom(struct crndstate * state,unsigned long rho)180 static void init_crandom(struct crndstate *state, unsigned long rho)
181 {
182 	state->rho = rho;
183 	state->last = get_random_u32();
184 }
185 
186 /* get_crandom - correlated random number generator
187  * Next number depends on last value.
188  * rho is scaled to avoid floating point.
189  */
get_crandom(struct crndstate * state,struct prng * p)190 static u32 get_crandom(struct crndstate *state, struct prng *p)
191 {
192 	u64 value, rho;
193 	unsigned long answer;
194 	struct rnd_state *s = &p->prng_state;
195 
196 	if (!state || state->rho == 0)	/* no correlation */
197 		return prandom_u32_state(s);
198 
199 	value = prandom_u32_state(s);
200 	rho = (u64)state->rho + 1;
201 	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
202 	state->last = answer;
203 	return answer;
204 }
205 
206 /* loss_4state - 4-state model loss generator
207  * Generates losses according to the 4-state Markov chain adopted in
208  * the GI (General and Intuitive) loss model.
209  */
loss_4state(struct netem_sched_data * q)210 static bool loss_4state(struct netem_sched_data *q)
211 {
212 	struct clgstate *clg = &q->clg;
213 	u32 rnd = prandom_u32_state(&q->prng.prng_state);
214 
215 	/*
216 	 * Makes a comparison between rnd and the transition
217 	 * probabilities outgoing from the current state, then decides the
218 	 * next state and if the next packet has to be transmitted or lost.
219 	 * The four states correspond to:
220 	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
221 	 *   LOST_IN_GAP_PERIOD => isolated losses within a gap period
222 	 *   LOST_IN_BURST_PERIOD => lost packets within a burst period
223 	 *   TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period
224 	 */
225 	switch (clg->state) {
226 	case TX_IN_GAP_PERIOD:
227 		if (rnd < clg->a4) {
228 			clg->state = LOST_IN_GAP_PERIOD;
229 			return true;
230 		} else if (rnd < clg->a1 + clg->a4) {
231 			clg->state = LOST_IN_BURST_PERIOD;
232 			return true;
233 		} else {
234 			clg->state = TX_IN_GAP_PERIOD;
235 		}
236 
237 		break;
238 	case TX_IN_BURST_PERIOD:
239 		if (rnd < clg->a5) {
240 			clg->state = LOST_IN_BURST_PERIOD;
241 			return true;
242 		} else {
243 			clg->state = TX_IN_BURST_PERIOD;
244 		}
245 
246 		break;
247 	case LOST_IN_BURST_PERIOD:
248 		if (rnd < clg->a3)
249 			clg->state = TX_IN_BURST_PERIOD;
250 		else if (rnd < clg->a2 + clg->a3) {
251 			clg->state = TX_IN_GAP_PERIOD;
252 		} else {
253 			clg->state = LOST_IN_BURST_PERIOD;
254 			return true;
255 		}
256 		break;
257 	case LOST_IN_GAP_PERIOD:
258 		clg->state = TX_IN_GAP_PERIOD;
259 		break;
260 	}
261 
262 	return false;
263 }
264 
265 /* loss_gilb_ell - Gilbert-Elliot model loss generator
266  * Generates losses according to the Gilbert-Elliot loss model or
267  * its special cases  (Gilbert or Simple Gilbert)
268  *
269  * Makes a comparison between random number and the transition
270  * probabilities outgoing from the current state, then decides the
271  * next state. A second random number is extracted and the comparison
272  * with the loss probability of the current state decides if the next
273  * packet will be transmitted or lost.
274  */
loss_gilb_ell(struct netem_sched_data * q)275 static bool loss_gilb_ell(struct netem_sched_data *q)
276 {
277 	struct clgstate *clg = &q->clg;
278 	struct rnd_state *s = &q->prng.prng_state;
279 
280 	switch (clg->state) {
281 	case GOOD_STATE:
282 		if (prandom_u32_state(s) < clg->a1)
283 			clg->state = BAD_STATE;
284 		if (prandom_u32_state(s) < clg->a4)
285 			return true;
286 		break;
287 	case BAD_STATE:
288 		if (prandom_u32_state(s) < clg->a2)
289 			clg->state = GOOD_STATE;
290 		if (prandom_u32_state(s) > clg->a3)
291 			return true;
292 	}
293 
294 	return false;
295 }
296 
loss_event(struct netem_sched_data * q)297 static bool loss_event(struct netem_sched_data *q)
298 {
299 	switch (q->loss_model) {
300 	case CLG_RANDOM:
301 		/* Random packet drop 0 => none, ~0 => all */
302 		return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng);
303 
304 	case CLG_4_STATES:
305 		/* 4state loss model algorithm (used also for GI model)
306 		* Extracts a value from the markov 4 state loss generator,
307 		* if it is 1 drops a packet and if needed writes the event in
308 		* the kernel logs
309 		*/
310 		return loss_4state(q);
311 
312 	case CLG_GILB_ELL:
313 		/* Gilbert-Elliot loss model algorithm
314 		* Extracts a value from the Gilbert-Elliot loss generator,
315 		* if it is 1 drops a packet and if needed writes the event in
316 		* the kernel logs
317 		*/
318 		return loss_gilb_ell(q);
319 	}
320 
321 	return false;	/* not reached */
322 }
323 
324 
325 /* tabledist - return a pseudo-randomly distributed value with mean mu and
326  * std deviation sigma.  Uses table lookup to approximate the desired
327  * distribution, and a uniformly-distributed pseudo-random source.
328  */
tabledist(s64 mu,s32 sigma,struct crndstate * state,struct prng * prng,const struct disttable * dist)329 static s64 tabledist(s64 mu, s32 sigma,
330 		     struct crndstate *state,
331 		     struct prng *prng,
332 		     const struct disttable *dist)
333 {
334 	s64 x;
335 	long t;
336 	u32 rnd;
337 
338 	if (sigma == 0)
339 		return mu;
340 
341 	rnd = get_crandom(state, prng);
342 
343 	/* default uniform distribution */
344 	if (dist == NULL)
345 		return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
346 
347 	t = dist->table[rnd % dist->size];
348 	x = (sigma % NETEM_DIST_SCALE) * t;
349 	if (x >= 0)
350 		x += NETEM_DIST_SCALE/2;
351 	else
352 		x -= NETEM_DIST_SCALE/2;
353 
354 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
355 }
356 
packet_time_ns(u64 len,const struct netem_sched_data * q)357 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
358 {
359 	len += q->packet_overhead;
360 
361 	if (q->cell_size) {
362 		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
363 
364 		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
365 			cells++;
366 		len = cells * (q->cell_size + q->cell_overhead);
367 	}
368 
369 	return div64_u64(len * NSEC_PER_SEC, q->rate);
370 }
371 
tfifo_reset(struct Qdisc * sch)372 static void tfifo_reset(struct Qdisc *sch)
373 {
374 	struct netem_sched_data *q = qdisc_priv(sch);
375 	struct rb_node *p = rb_first(&q->t_root);
376 
377 	while (p) {
378 		struct sk_buff *skb = rb_to_skb(p);
379 
380 		p = rb_next(p);
381 		rb_erase(&skb->rbnode, &q->t_root);
382 		rtnl_kfree_skbs(skb, skb);
383 	}
384 
385 	rtnl_kfree_skbs(q->t_head, q->t_tail);
386 	q->t_head = NULL;
387 	q->t_tail = NULL;
388 	q->t_len = 0;
389 }
390 
tfifo_enqueue(struct sk_buff * nskb,struct Qdisc * sch)391 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
392 {
393 	struct netem_sched_data *q = qdisc_priv(sch);
394 	u64 tnext = netem_skb_cb(nskb)->time_to_send;
395 
396 	if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
397 		if (q->t_tail)
398 			q->t_tail->next = nskb;
399 		else
400 			q->t_head = nskb;
401 		q->t_tail = nskb;
402 	} else {
403 		struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
404 
405 		while (*p) {
406 			struct sk_buff *skb;
407 
408 			parent = *p;
409 			skb = rb_to_skb(parent);
410 			if (tnext >= netem_skb_cb(skb)->time_to_send)
411 				p = &parent->rb_right;
412 			else
413 				p = &parent->rb_left;
414 		}
415 		rb_link_node(&nskb->rbnode, parent, p);
416 		rb_insert_color(&nskb->rbnode, &q->t_root);
417 	}
418 	q->t_len++;
419 	sch->q.qlen++;
420 }
421 
422 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
423  * when we statistically choose to corrupt one, we instead segment it, returning
424  * the first packet to be corrupted, and re-enqueue the remaining frames
425  */
netem_segment(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)426 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
427 				     struct sk_buff **to_free)
428 {
429 	struct sk_buff *segs;
430 	netdev_features_t features = netif_skb_features(skb);
431 
432 	qdisc_skb_cb(skb)->pkt_segs = 1;
433 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
434 
435 	if (IS_ERR_OR_NULL(segs)) {
436 		qdisc_drop(skb, sch, to_free);
437 		return NULL;
438 	}
439 	consume_skb(skb);
440 	return segs;
441 }
442 
443 /*
444  * Insert one skb into qdisc.
445  * Note: parent depends on return value to account for queue length.
446  * 	NET_XMIT_DROP: queue length didn't change.
447  *      NET_XMIT_SUCCESS: one skb was queued.
448  */
netem_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)449 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
450 			 struct sk_buff **to_free)
451 {
452 	struct netem_sched_data *q = qdisc_priv(sch);
453 	/* We don't fill cb now as skb_unshare() may invalidate it */
454 	struct netem_skb_cb *cb;
455 	struct sk_buff *skb2 = NULL;
456 	struct sk_buff *segs = NULL;
457 	unsigned int prev_len = qdisc_pkt_len(skb);
458 	int count = 1;
459 
460 	/* Do not fool qdisc_drop_all() */
461 	skb->prev = NULL;
462 
463 	/* Random duplication */
464 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
465 		++count;
466 
467 	/* Drop packet? */
468 	if (loss_event(q)) {
469 		if (q->ecn && INET_ECN_set_ce(skb))
470 			qdisc_qstats_drop(sch); /* mark packet */
471 		else
472 			--count;
473 	}
474 	if (count == 0) {
475 		qdisc_qstats_drop(sch);
476 		__qdisc_drop(skb, to_free);
477 		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
478 	}
479 
480 	/* If a delay is expected, orphan the skb. (orphaning usually takes
481 	 * place at TX completion time, so _before_ the link transit delay)
482 	 */
483 	if (q->latency || q->jitter || q->rate)
484 		skb_orphan_partial(skb);
485 
486 	/*
487 	 * If we need to duplicate packet, then clone it before
488 	 * original is modified.
489 	 */
490 	if (count > 1)
491 		skb2 = skb_clone(skb, GFP_ATOMIC);
492 
493 	/*
494 	 * Randomized packet corruption.
495 	 * Make copy if needed since we are modifying
496 	 * If packet is going to be hardware checksummed, then
497 	 * do it now in software before we mangle it.
498 	 */
499 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
500 		if (skb_is_gso(skb)) {
501 			skb = netem_segment(skb, sch, to_free);
502 			if (!skb)
503 				goto finish_segs;
504 
505 			segs = skb->next;
506 			skb_mark_not_on_list(skb);
507 			qdisc_skb_cb(skb)->pkt_len = skb->len;
508 		}
509 
510 		skb = skb_unshare(skb, GFP_ATOMIC);
511 		if (unlikely(!skb)) {
512 			qdisc_qstats_drop(sch);
513 			goto finish_segs;
514 		}
515 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
516 		    skb_checksum_help(skb)) {
517 			qdisc_drop(skb, sch, to_free);
518 			skb = NULL;
519 			goto finish_segs;
520 		}
521 
522 		if (skb_headlen(skb))
523 			skb->data[get_random_u32_below(skb_headlen(skb))] ^=
524 				1 << get_random_u32_below(8);
525 	}
526 
527 	if (unlikely(sch->q.qlen >= sch->limit)) {
528 		/* re-link segs, so that qdisc_drop_all() frees them all */
529 		skb->next = segs;
530 		qdisc_drop_all(skb, sch, to_free);
531 		if (skb2)
532 			__qdisc_drop(skb2, to_free);
533 		return NET_XMIT_DROP;
534 	}
535 
536 	/*
537 	 * If doing duplication then re-insert at top of the
538 	 * qdisc tree, since parent queuer expects that only one
539 	 * skb will be queued.
540 	 */
541 	if (skb2) {
542 		struct Qdisc *rootq = qdisc_root_bh(sch);
543 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
544 
545 		q->duplicate = 0;
546 		rootq->enqueue(skb2, rootq, to_free);
547 		q->duplicate = dupsave;
548 		skb2 = NULL;
549 	}
550 
551 	qdisc_qstats_backlog_inc(sch, skb);
552 
553 	cb = netem_skb_cb(skb);
554 	if (q->gap == 0 ||		/* not doing reordering */
555 	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
556 	    q->reorder < get_crandom(&q->reorder_cor, &q->prng)) {
557 		u64 now;
558 		s64 delay;
559 
560 		delay = tabledist(q->latency, q->jitter,
561 				  &q->delay_cor, &q->prng, q->delay_dist);
562 
563 		now = ktime_get_ns();
564 
565 		if (q->rate) {
566 			struct netem_skb_cb *last = NULL;
567 
568 			if (sch->q.tail)
569 				last = netem_skb_cb(sch->q.tail);
570 			if (q->t_root.rb_node) {
571 				struct sk_buff *t_skb;
572 				struct netem_skb_cb *t_last;
573 
574 				t_skb = skb_rb_last(&q->t_root);
575 				t_last = netem_skb_cb(t_skb);
576 				if (!last ||
577 				    t_last->time_to_send > last->time_to_send)
578 					last = t_last;
579 			}
580 			if (q->t_tail) {
581 				struct netem_skb_cb *t_last =
582 					netem_skb_cb(q->t_tail);
583 
584 				if (!last ||
585 				    t_last->time_to_send > last->time_to_send)
586 					last = t_last;
587 			}
588 
589 			if (last) {
590 				/*
591 				 * Last packet in queue is reference point (now),
592 				 * calculate this time bonus and subtract
593 				 * from delay.
594 				 */
595 				delay -= last->time_to_send - now;
596 				delay = max_t(s64, 0, delay);
597 				now = last->time_to_send;
598 			}
599 
600 			delay += packet_time_ns(qdisc_pkt_len(skb), q);
601 		}
602 
603 		cb->time_to_send = now + delay;
604 		++q->counter;
605 		tfifo_enqueue(skb, sch);
606 	} else {
607 		/*
608 		 * Do re-ordering by putting one out of N packets at the front
609 		 * of the queue.
610 		 */
611 		cb->time_to_send = ktime_get_ns();
612 		q->counter = 0;
613 
614 		__qdisc_enqueue_head(skb, &sch->q);
615 		sch->qstats.requeues++;
616 	}
617 
618 finish_segs:
619 	if (skb2)
620 		__qdisc_drop(skb2, to_free);
621 
622 	if (segs) {
623 		unsigned int len, last_len;
624 		int rc, nb;
625 
626 		len = skb ? skb->len : 0;
627 		nb = skb ? 1 : 0;
628 
629 		while (segs) {
630 			skb2 = segs->next;
631 			skb_mark_not_on_list(segs);
632 			qdisc_skb_cb(segs)->pkt_len = segs->len;
633 			last_len = segs->len;
634 			rc = qdisc_enqueue(segs, sch, to_free);
635 			if (rc != NET_XMIT_SUCCESS) {
636 				if (net_xmit_drop_count(rc))
637 					qdisc_qstats_drop(sch);
638 			} else {
639 				nb++;
640 				len += last_len;
641 			}
642 			segs = skb2;
643 		}
644 		/* Parent qdiscs accounted for 1 skb of size @prev_len */
645 		qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
646 	} else if (!skb) {
647 		return NET_XMIT_DROP;
648 	}
649 	return NET_XMIT_SUCCESS;
650 }
651 
652 /* Delay the next round with a new future slot with a
653  * correct number of bytes and packets.
654  */
655 
get_slot_next(struct netem_sched_data * q,u64 now)656 static void get_slot_next(struct netem_sched_data *q, u64 now)
657 {
658 	s64 next_delay;
659 
660 	if (!q->slot_dist)
661 		next_delay = q->slot_config.min_delay +
662 			mul_u64_u32_shr(q->slot_config.max_delay - q->slot_config.min_delay,
663 					get_random_u32(), 32);
664 	else
665 		next_delay = tabledist(q->slot_config.dist_delay,
666 				       (s32)(q->slot_config.dist_jitter),
667 				       NULL, &q->prng, q->slot_dist);
668 
669 	q->slot.slot_next = now + next_delay;
670 	q->slot.packets_left = q->slot_config.max_packets;
671 	q->slot.bytes_left = q->slot_config.max_bytes;
672 }
673 
netem_peek(struct netem_sched_data * q)674 static struct sk_buff *netem_peek(struct netem_sched_data *q)
675 {
676 	struct sk_buff *skb = skb_rb_first(&q->t_root);
677 	u64 t1, t2;
678 
679 	if (!skb)
680 		return q->t_head;
681 	if (!q->t_head)
682 		return skb;
683 
684 	t1 = netem_skb_cb(skb)->time_to_send;
685 	t2 = netem_skb_cb(q->t_head)->time_to_send;
686 	if (t1 < t2)
687 		return skb;
688 	return q->t_head;
689 }
690 
netem_erase_head(struct netem_sched_data * q,struct sk_buff * skb)691 static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
692 {
693 	if (skb == q->t_head) {
694 		q->t_head = skb->next;
695 		if (!q->t_head)
696 			q->t_tail = NULL;
697 	} else {
698 		rb_erase(&skb->rbnode, &q->t_root);
699 	}
700 }
701 
netem_dequeue(struct Qdisc * sch)702 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
703 {
704 	struct netem_sched_data *q = qdisc_priv(sch);
705 	struct sk_buff *skb;
706 
707 tfifo_dequeue:
708 	skb = __qdisc_dequeue_head(&sch->q);
709 	if (skb) {
710 deliver:
711 		qdisc_qstats_backlog_dec(sch, skb);
712 		qdisc_bstats_update(sch, skb);
713 		return skb;
714 	}
715 	skb = netem_peek(q);
716 	if (skb) {
717 		u64 time_to_send;
718 		u64 now = ktime_get_ns();
719 
720 		/* if more time remaining? */
721 		time_to_send = netem_skb_cb(skb)->time_to_send;
722 		if (q->slot.slot_next && q->slot.slot_next < time_to_send)
723 			get_slot_next(q, now);
724 
725 		if (time_to_send <= now && q->slot.slot_next <= now) {
726 			netem_erase_head(q, skb);
727 			q->t_len--;
728 			skb->next = NULL;
729 			skb->prev = NULL;
730 			/* skb->dev shares skb->rbnode area,
731 			 * we need to restore its value.
732 			 */
733 			skb->dev = qdisc_dev(sch);
734 
735 			if (q->slot.slot_next) {
736 				q->slot.packets_left--;
737 				q->slot.bytes_left -= qdisc_pkt_len(skb);
738 				if (q->slot.packets_left <= 0 ||
739 				    q->slot.bytes_left <= 0)
740 					get_slot_next(q, now);
741 			}
742 
743 			if (q->qdisc) {
744 				unsigned int pkt_len = qdisc_pkt_len(skb);
745 				struct sk_buff *to_free = NULL;
746 				int err;
747 
748 				err = qdisc_enqueue(skb, q->qdisc, &to_free);
749 				kfree_skb_list(to_free);
750 				if (err != NET_XMIT_SUCCESS) {
751 					if (net_xmit_drop_count(err))
752 						qdisc_qstats_drop(sch);
753 					sch->qstats.backlog -= pkt_len;
754 					sch->q.qlen--;
755 					qdisc_tree_reduce_backlog(sch, 1, pkt_len);
756 				}
757 				goto tfifo_dequeue;
758 			}
759 			sch->q.qlen--;
760 			goto deliver;
761 		}
762 
763 		if (q->qdisc) {
764 			skb = q->qdisc->ops->dequeue(q->qdisc);
765 			if (skb) {
766 				sch->q.qlen--;
767 				goto deliver;
768 			}
769 		}
770 
771 		qdisc_watchdog_schedule_ns(&q->watchdog,
772 					   max(time_to_send,
773 					       q->slot.slot_next));
774 	}
775 
776 	if (q->qdisc) {
777 		skb = q->qdisc->ops->dequeue(q->qdisc);
778 		if (skb) {
779 			sch->q.qlen--;
780 			goto deliver;
781 		}
782 	}
783 	return NULL;
784 }
785 
netem_reset(struct Qdisc * sch)786 static void netem_reset(struct Qdisc *sch)
787 {
788 	struct netem_sched_data *q = qdisc_priv(sch);
789 
790 	qdisc_reset_queue(sch);
791 	tfifo_reset(sch);
792 	if (q->qdisc)
793 		qdisc_reset(q->qdisc);
794 	qdisc_watchdog_cancel(&q->watchdog);
795 }
796 
dist_free(struct disttable * d)797 static void dist_free(struct disttable *d)
798 {
799 	kvfree(d);
800 }
801 
802 /*
803  * Distribution data is a variable size payload containing
804  * signed 16 bit values.
805  */
806 
get_dist_table(struct disttable ** tbl,const struct nlattr * attr)807 static int get_dist_table(struct disttable **tbl, const struct nlattr *attr)
808 {
809 	size_t n = nla_len(attr)/sizeof(__s16);
810 	const __s16 *data = nla_data(attr);
811 	struct disttable *d;
812 	int i;
813 
814 	if (!n || n > NETEM_DIST_MAX)
815 		return -EINVAL;
816 
817 	d = kvmalloc_flex(*d, table, n);
818 	if (!d)
819 		return -ENOMEM;
820 
821 	d->size = n;
822 	for (i = 0; i < n; i++)
823 		d->table[i] = data[i];
824 
825 	*tbl = d;
826 	return 0;
827 }
828 
validate_time(const struct nlattr * attr,const char * name,struct netlink_ext_ack * extack)829 static int validate_time(const struct nlattr *attr, const char *name,
830 			 struct netlink_ext_ack *extack)
831 {
832 	if (nla_get_s64(attr) < 0) {
833 		NL_SET_ERR_MSG_ATTR_FMT(extack, attr, "negative %s", name);
834 		return -EINVAL;
835 	}
836 	return 0;
837 }
838 
validate_slot(const struct nlattr * attr,struct netlink_ext_ack * extack)839 static int validate_slot(const struct nlattr *attr, struct netlink_ext_ack *extack)
840 {
841 	const struct tc_netem_slot *c = nla_data(attr);
842 
843 	if (c->min_delay < 0 || c->max_delay < 0) {
844 		NL_SET_ERR_MSG_ATTR(extack, attr, "negative slot delay");
845 		return -EINVAL;
846 	}
847 	if (c->min_delay > c->max_delay) {
848 		NL_SET_ERR_MSG_ATTR(extack, attr, "slot min delay greater than max delay");
849 		return -EINVAL;
850 	}
851 	if (c->dist_delay < 0 || c->dist_jitter < 0) {
852 		NL_SET_ERR_MSG_ATTR(extack, attr, "negative dist delay");
853 		return -EINVAL;
854 	}
855 	if (c->max_packets < 0 || c->max_bytes < 0) {
856 		NL_SET_ERR_MSG_ATTR(extack, attr, "negative slot limit");
857 		return -EINVAL;
858 	}
859 	return 0;
860 }
861 
get_slot(struct netem_sched_data * q,const struct nlattr * attr)862 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
863 {
864 	const struct tc_netem_slot *c = nla_data(attr);
865 
866 	q->slot_config = *c;
867 	if (q->slot_config.max_packets == 0)
868 		q->slot_config.max_packets = INT_MAX;
869 	if (q->slot_config.max_bytes == 0)
870 		q->slot_config.max_bytes = INT_MAX;
871 
872 	/* capping dist_jitter to the range acceptable by tabledist() */
873 	q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
874 
875 	q->slot.packets_left = q->slot_config.max_packets;
876 	q->slot.bytes_left = q->slot_config.max_bytes;
877 	if (q->slot_config.min_delay | q->slot_config.max_delay |
878 	    q->slot_config.dist_jitter)
879 		q->slot.slot_next = ktime_get_ns();
880 	else
881 		q->slot.slot_next = 0;
882 }
883 
get_correlation(struct netem_sched_data * q,const struct nlattr * attr)884 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
885 {
886 	const struct tc_netem_corr *c = nla_data(attr);
887 
888 	init_crandom(&q->delay_cor, c->delay_corr);
889 	init_crandom(&q->loss_cor, c->loss_corr);
890 	init_crandom(&q->dup_cor, c->dup_corr);
891 }
892 
get_reorder(struct netem_sched_data * q,const struct nlattr * attr)893 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
894 {
895 	const struct tc_netem_reorder *r = nla_data(attr);
896 
897 	q->reorder = r->probability;
898 	init_crandom(&q->reorder_cor, r->correlation);
899 }
900 
get_corrupt(struct netem_sched_data * q,const struct nlattr * attr)901 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
902 {
903 	const struct tc_netem_corrupt *r = nla_data(attr);
904 
905 	q->corrupt = r->probability;
906 	init_crandom(&q->corrupt_cor, r->correlation);
907 }
908 
get_rate(struct netem_sched_data * q,const struct nlattr * attr)909 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
910 {
911 	const struct tc_netem_rate *r = nla_data(attr);
912 
913 	q->rate = r->rate;
914 	q->packet_overhead = r->packet_overhead;
915 	q->cell_size = r->cell_size;
916 	q->cell_overhead = r->cell_overhead;
917 	if (q->cell_size)
918 		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
919 	else
920 		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
921 }
922 
get_loss_clg(struct netem_sched_data * q,const struct nlattr * attr)923 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
924 {
925 	const struct nlattr *la;
926 	int rem;
927 
928 	nla_for_each_nested(la, attr, rem) {
929 		u16 type = nla_type(la);
930 
931 		switch (type) {
932 		case NETEM_LOSS_GI: {
933 			const struct tc_netem_gimodel *gi = nla_data(la);
934 
935 			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
936 				pr_info("netem: incorrect gi model size\n");
937 				return -EINVAL;
938 			}
939 
940 			q->loss_model = CLG_4_STATES;
941 
942 			q->clg.state = TX_IN_GAP_PERIOD;
943 			q->clg.a1 = gi->p13;
944 			q->clg.a2 = gi->p31;
945 			q->clg.a3 = gi->p32;
946 			q->clg.a4 = gi->p14;
947 			q->clg.a5 = gi->p23;
948 			break;
949 		}
950 
951 		case NETEM_LOSS_GE: {
952 			const struct tc_netem_gemodel *ge = nla_data(la);
953 
954 			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
955 				pr_info("netem: incorrect ge model size\n");
956 				return -EINVAL;
957 			}
958 
959 			q->loss_model = CLG_GILB_ELL;
960 			q->clg.state = GOOD_STATE;
961 			q->clg.a1 = ge->p;
962 			q->clg.a2 = ge->r;
963 			q->clg.a3 = ge->h;
964 			q->clg.a4 = ge->k1;
965 			break;
966 		}
967 
968 		default:
969 			pr_info("netem: unknown loss type %u\n", type);
970 			return -EINVAL;
971 		}
972 	}
973 
974 	return 0;
975 }
976 
977 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
978 	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
979 	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
980 	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
981 	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
982 	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
983 	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
984 	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
985 	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
986 	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
987 	[TCA_NETEM_SLOT]	= { .len = sizeof(struct tc_netem_slot) },
988 	[TCA_NETEM_PRNG_SEED]	= { .type = NLA_U64 },
989 };
990 
parse_attr(struct nlattr * tb[],int maxtype,struct nlattr * nla,const struct nla_policy * policy,int len)991 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
992 		      const struct nla_policy *policy, int len)
993 {
994 	int nested_len = nla_len(nla) - NLA_ALIGN(len);
995 
996 	if (nested_len < 0) {
997 		pr_info("netem: invalid attributes len %d\n", nested_len);
998 		return -EINVAL;
999 	}
1000 
1001 	if (nested_len >= nla_attr_size(0))
1002 		return nla_parse_deprecated(tb, maxtype,
1003 					    nla_data(nla) + NLA_ALIGN(len),
1004 					    nested_len, policy, NULL);
1005 
1006 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
1007 	return 0;
1008 }
1009 
1010 static const struct Qdisc_class_ops netem_class_ops;
1011 
check_netem_in_tree(struct Qdisc * sch,bool duplicates,struct netlink_ext_ack * extack)1012 static int check_netem_in_tree(struct Qdisc *sch, bool duplicates,
1013 			       struct netlink_ext_ack *extack)
1014 {
1015 	struct Qdisc *root, *q;
1016 	unsigned int i;
1017 
1018 	root = qdisc_root_sleeping(sch);
1019 
1020 	if (sch != root && root->ops->cl_ops == &netem_class_ops) {
1021 		if (duplicates ||
1022 		    ((struct netem_sched_data *)qdisc_priv(root))->duplicate)
1023 			goto err;
1024 	}
1025 
1026 	if (!qdisc_dev(root))
1027 		return 0;
1028 
1029 	hash_for_each(qdisc_dev(root)->qdisc_hash, i, q, hash) {
1030 		if (sch != q && q->ops->cl_ops == &netem_class_ops) {
1031 			if (duplicates ||
1032 			    ((struct netem_sched_data *)qdisc_priv(q))->duplicate)
1033 				goto err;
1034 		}
1035 	}
1036 
1037 	return 0;
1038 
1039 err:
1040 	NL_SET_ERR_MSG(extack,
1041 		       "netem: cannot mix duplicating netems with other netems in tree");
1042 	return -EINVAL;
1043 }
1044 
1045 /* Parse netlink message to set options */
netem_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1046 static int netem_change(struct Qdisc *sch, struct nlattr *opt,
1047 			struct netlink_ext_ack *extack)
1048 {
1049 	struct netem_sched_data *q = qdisc_priv(sch);
1050 	struct nlattr *tb[TCA_NETEM_MAX + 1];
1051 	struct disttable *delay_dist = NULL;
1052 	struct disttable *slot_dist = NULL;
1053 	struct tc_netem_qopt *qopt;
1054 	struct clgstate old_clg;
1055 	int old_loss_model = CLG_RANDOM;
1056 	int ret;
1057 
1058 	qopt = nla_data(opt);
1059 	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
1060 	if (ret < 0)
1061 		return ret;
1062 
1063 	if (tb[TCA_NETEM_DELAY_DIST]) {
1064 		ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]);
1065 		if (ret)
1066 			goto table_free;
1067 	}
1068 
1069 	if (tb[TCA_NETEM_SLOT_DIST]) {
1070 		ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]);
1071 		if (ret)
1072 			goto table_free;
1073 	}
1074 
1075 	if (tb[TCA_NETEM_SLOT]) {
1076 		ret = validate_slot(tb[TCA_NETEM_SLOT], extack);
1077 		if (ret)
1078 			goto table_free;
1079 	}
1080 
1081 	if (tb[TCA_NETEM_LATENCY64]) {
1082 		ret = validate_time(tb[TCA_NETEM_LATENCY64], "latency", extack);
1083 		if (ret)
1084 			goto table_free;
1085 	}
1086 
1087 	if (tb[TCA_NETEM_JITTER64]) {
1088 		ret = validate_time(tb[TCA_NETEM_JITTER64], "jitter", extack);
1089 		if (ret)
1090 			goto table_free;
1091 	}
1092 
1093 	sch_tree_lock(sch);
1094 	/* backup q->clg and q->loss_model */
1095 	old_clg = q->clg;
1096 	old_loss_model = q->loss_model;
1097 
1098 	if (tb[TCA_NETEM_LOSS]) {
1099 		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
1100 		if (ret) {
1101 			q->loss_model = old_loss_model;
1102 			q->clg = old_clg;
1103 			goto unlock;
1104 		}
1105 	} else {
1106 		q->loss_model = CLG_RANDOM;
1107 	}
1108 
1109 	if (delay_dist)
1110 		swap(q->delay_dist, delay_dist);
1111 	if (slot_dist)
1112 		swap(q->slot_dist, slot_dist);
1113 	sch->limit = qopt->limit;
1114 
1115 	q->latency = PSCHED_TICKS2NS(qopt->latency);
1116 	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1117 	q->limit = qopt->limit;
1118 	q->gap = qopt->gap;
1119 	q->counter = 0;
1120 	q->loss = qopt->loss;
1121 
1122 	ret = check_netem_in_tree(sch, qopt->duplicate, extack);
1123 	if (ret)
1124 		goto unlock;
1125 
1126 	q->duplicate = qopt->duplicate;
1127 
1128 	/* for compatibility with earlier versions.
1129 	 * if gap is set, need to assume 100% probability
1130 	 */
1131 	if (q->gap)
1132 		q->reorder = ~0;
1133 
1134 	if (tb[TCA_NETEM_CORR])
1135 		get_correlation(q, tb[TCA_NETEM_CORR]);
1136 
1137 	if (tb[TCA_NETEM_REORDER])
1138 		get_reorder(q, tb[TCA_NETEM_REORDER]);
1139 
1140 	if (tb[TCA_NETEM_CORRUPT])
1141 		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1142 
1143 	if (tb[TCA_NETEM_RATE])
1144 		get_rate(q, tb[TCA_NETEM_RATE]);
1145 
1146 	if (tb[TCA_NETEM_RATE64])
1147 		q->rate = max_t(u64, q->rate,
1148 				nla_get_u64(tb[TCA_NETEM_RATE64]));
1149 
1150 	if (tb[TCA_NETEM_LATENCY64])
1151 		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1152 
1153 	if (tb[TCA_NETEM_JITTER64])
1154 		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1155 
1156 	if (tb[TCA_NETEM_ECN])
1157 		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1158 
1159 	if (tb[TCA_NETEM_SLOT])
1160 		get_slot(q, tb[TCA_NETEM_SLOT]);
1161 
1162 	/* capping jitter to the range acceptable by tabledist() */
1163 	q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
1164 
1165 	if (tb[TCA_NETEM_PRNG_SEED]) {
1166 		q->prng.seed = nla_get_u64(tb[TCA_NETEM_PRNG_SEED]);
1167 		prandom_seed_state(&q->prng.prng_state, q->prng.seed);
1168 	}
1169 
1170 unlock:
1171 	sch_tree_unlock(sch);
1172 
1173 table_free:
1174 	dist_free(delay_dist);
1175 	dist_free(slot_dist);
1176 	return ret;
1177 }
1178 
netem_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1179 static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1180 		      struct netlink_ext_ack *extack)
1181 {
1182 	struct netem_sched_data *q = qdisc_priv(sch);
1183 	int ret;
1184 
1185 	qdisc_watchdog_init(&q->watchdog, sch);
1186 
1187 	if (!opt)
1188 		return -EINVAL;
1189 
1190 	q->loss_model = CLG_RANDOM;
1191 	q->prng.seed = get_random_u64();
1192 	prandom_seed_state(&q->prng.prng_state, q->prng.seed);
1193 
1194 	ret = netem_change(sch, opt, extack);
1195 	if (ret)
1196 		pr_info("netem: change failed\n");
1197 	return ret;
1198 }
1199 
netem_destroy(struct Qdisc * sch)1200 static void netem_destroy(struct Qdisc *sch)
1201 {
1202 	struct netem_sched_data *q = qdisc_priv(sch);
1203 
1204 	qdisc_watchdog_cancel(&q->watchdog);
1205 	if (q->qdisc)
1206 		qdisc_put(q->qdisc);
1207 	dist_free(q->delay_dist);
1208 	dist_free(q->slot_dist);
1209 }
1210 
dump_loss_model(const struct netem_sched_data * q,struct sk_buff * skb)1211 static int dump_loss_model(const struct netem_sched_data *q,
1212 			   struct sk_buff *skb)
1213 {
1214 	struct nlattr *nest;
1215 
1216 	nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
1217 	if (nest == NULL)
1218 		goto nla_put_failure;
1219 
1220 	switch (q->loss_model) {
1221 	case CLG_RANDOM:
1222 		/* legacy loss model */
1223 		nla_nest_cancel(skb, nest);
1224 		return 0;	/* no data */
1225 
1226 	case CLG_4_STATES: {
1227 		struct tc_netem_gimodel gi = {
1228 			.p13 = q->clg.a1,
1229 			.p31 = q->clg.a2,
1230 			.p32 = q->clg.a3,
1231 			.p14 = q->clg.a4,
1232 			.p23 = q->clg.a5,
1233 		};
1234 
1235 		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1236 			goto nla_put_failure;
1237 		break;
1238 	}
1239 	case CLG_GILB_ELL: {
1240 		struct tc_netem_gemodel ge = {
1241 			.p = q->clg.a1,
1242 			.r = q->clg.a2,
1243 			.h = q->clg.a3,
1244 			.k1 = q->clg.a4,
1245 		};
1246 
1247 		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1248 			goto nla_put_failure;
1249 		break;
1250 	}
1251 	}
1252 
1253 	nla_nest_end(skb, nest);
1254 	return 0;
1255 
1256 nla_put_failure:
1257 	nla_nest_cancel(skb, nest);
1258 	return -1;
1259 }
1260 
netem_dump(struct Qdisc * sch,struct sk_buff * skb)1261 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1262 {
1263 	const struct netem_sched_data *q = qdisc_priv(sch);
1264 	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1265 	struct tc_netem_qopt qopt;
1266 	struct tc_netem_corr cor;
1267 	struct tc_netem_reorder reorder;
1268 	struct tc_netem_corrupt corrupt;
1269 	struct tc_netem_rate rate;
1270 	struct tc_netem_slot slot;
1271 
1272 	qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
1273 			     UINT_MAX);
1274 	qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
1275 			    UINT_MAX);
1276 	qopt.limit = q->limit;
1277 	qopt.loss = q->loss;
1278 	qopt.gap = q->gap;
1279 	qopt.duplicate = q->duplicate;
1280 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1281 		goto nla_put_failure;
1282 
1283 	if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1284 		goto nla_put_failure;
1285 
1286 	if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1287 		goto nla_put_failure;
1288 
1289 	cor.delay_corr = q->delay_cor.rho;
1290 	cor.loss_corr = q->loss_cor.rho;
1291 	cor.dup_corr = q->dup_cor.rho;
1292 	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1293 		goto nla_put_failure;
1294 
1295 	reorder.probability = q->reorder;
1296 	reorder.correlation = q->reorder_cor.rho;
1297 	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1298 		goto nla_put_failure;
1299 
1300 	corrupt.probability = q->corrupt;
1301 	corrupt.correlation = q->corrupt_cor.rho;
1302 	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1303 		goto nla_put_failure;
1304 
1305 	if (q->rate >= (1ULL << 32)) {
1306 		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1307 				      TCA_NETEM_PAD))
1308 			goto nla_put_failure;
1309 		rate.rate = ~0U;
1310 	} else {
1311 		rate.rate = q->rate;
1312 	}
1313 	rate.packet_overhead = q->packet_overhead;
1314 	rate.cell_size = q->cell_size;
1315 	rate.cell_overhead = q->cell_overhead;
1316 	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1317 		goto nla_put_failure;
1318 
1319 	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1320 		goto nla_put_failure;
1321 
1322 	if (dump_loss_model(q, skb) != 0)
1323 		goto nla_put_failure;
1324 
1325 	if (q->slot_config.min_delay | q->slot_config.max_delay |
1326 	    q->slot_config.dist_jitter) {
1327 		slot = q->slot_config;
1328 		if (slot.max_packets == INT_MAX)
1329 			slot.max_packets = 0;
1330 		if (slot.max_bytes == INT_MAX)
1331 			slot.max_bytes = 0;
1332 		if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1333 			goto nla_put_failure;
1334 	}
1335 
1336 	if (nla_put_u64_64bit(skb, TCA_NETEM_PRNG_SEED, q->prng.seed,
1337 			      TCA_NETEM_PAD))
1338 		goto nla_put_failure;
1339 
1340 	return nla_nest_end(skb, nla);
1341 
1342 nla_put_failure:
1343 	nlmsg_trim(skb, nla);
1344 	return -1;
1345 }
1346 
netem_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)1347 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1348 			  struct sk_buff *skb, struct tcmsg *tcm)
1349 {
1350 	struct netem_sched_data *q = qdisc_priv(sch);
1351 
1352 	if (cl != 1 || !q->qdisc) 	/* only one class */
1353 		return -ENOENT;
1354 
1355 	tcm->tcm_handle |= TC_H_MIN(1);
1356 	tcm->tcm_info = q->qdisc->handle;
1357 
1358 	return 0;
1359 }
1360 
netem_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)1361 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1362 		     struct Qdisc **old, struct netlink_ext_ack *extack)
1363 {
1364 	struct netem_sched_data *q = qdisc_priv(sch);
1365 
1366 	*old = qdisc_replace(sch, new, &q->qdisc);
1367 	return 0;
1368 }
1369 
netem_leaf(struct Qdisc * sch,unsigned long arg)1370 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1371 {
1372 	struct netem_sched_data *q = qdisc_priv(sch);
1373 	return q->qdisc;
1374 }
1375 
netem_find(struct Qdisc * sch,u32 classid)1376 static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1377 {
1378 	return 1;
1379 }
1380 
netem_walk(struct Qdisc * sch,struct qdisc_walker * walker)1381 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1382 {
1383 	if (!walker->stop) {
1384 		if (!tc_qdisc_stats_dump(sch, 1, walker))
1385 			return;
1386 	}
1387 }
1388 
1389 static const struct Qdisc_class_ops netem_class_ops = {
1390 	.graft		=	netem_graft,
1391 	.leaf		=	netem_leaf,
1392 	.find		=	netem_find,
1393 	.walk		=	netem_walk,
1394 	.dump		=	netem_dump_class,
1395 };
1396 
1397 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1398 	.id		=	"netem",
1399 	.cl_ops		=	&netem_class_ops,
1400 	.priv_size	=	sizeof(struct netem_sched_data),
1401 	.enqueue	=	netem_enqueue,
1402 	.dequeue	=	netem_dequeue,
1403 	.peek		=	qdisc_peek_dequeued,
1404 	.init		=	netem_init,
1405 	.reset		=	netem_reset,
1406 	.destroy	=	netem_destroy,
1407 	.change		=	netem_change,
1408 	.dump		=	netem_dump,
1409 	.owner		=	THIS_MODULE,
1410 };
1411 MODULE_ALIAS_NET_SCH("netem");
1412 
1413 
netem_module_init(void)1414 static int __init netem_module_init(void)
1415 {
1416 	pr_info("netem: version " VERSION "\n");
1417 	return register_qdisc(&netem_qdisc_ops);
1418 }
netem_module_exit(void)1419 static void __exit netem_module_exit(void)
1420 {
1421 	unregister_qdisc(&netem_qdisc_ops);
1422 }
1423 module_init(netem_module_init)
1424 module_exit(netem_module_exit)
1425 MODULE_LICENSE("GPL");
1426 MODULE_DESCRIPTION("Network characteristics emulator qdisc");
1427