xref: /linux/net/sched/sch_netem.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * net/sched/sch_netem.c	Network emulator
3  *
4  * 		This program is free software; you can redistribute it and/or
5  * 		modify it under the terms of the GNU General Public License
6  * 		as published by the Free Software Foundation; either version
7  * 		2 of the License.
8  *
9  *  		Many of the algorithms and ideas for this came from
10  *		NIST Net which is not copyrighted.
11  *
12  * Authors:	Stephen Hemminger <shemminger@osdl.org>
13  *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14  */
15 
16 #include <linux/module.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
24 
25 #include <net/pkt_sched.h>
26 
27 #define VERSION "1.2"
28 
29 /*	Network Emulation Queuing algorithm.
30 	====================================
31 
32 	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
33 		 Network Emulation Tool
34 		 [2] Luigi Rizzo, DummyNet for FreeBSD
35 
36 	 ----------------------------------------------------------------
37 
38 	 This started out as a simple way to delay outgoing packets to
39 	 test TCP but has grown to include most of the functionality
40 	 of a full blown network emulator like NISTnet. It can delay
41 	 packets and add random jitter (and correlation). The random
42 	 distribution can be loaded from a table as well to provide
43 	 normal, Pareto, or experimental curves. Packet loss,
44 	 duplication, and reordering can also be emulated.
45 
46 	 This qdisc does not do classification that can be handled in
47 	 layering other disciplines.  It does not need to do bandwidth
48 	 control either since that can be handled by using token
49 	 bucket or other rate control.
50 
51 	 The simulator is limited by the Linux timer resolution
52 	 and will create packet bursts on the HZ boundary (1ms).
53 */
54 
55 struct netem_sched_data {
56 	struct Qdisc	*qdisc;
57 	struct timer_list timer;
58 
59 	u32 latency;
60 	u32 loss;
61 	u32 limit;
62 	u32 counter;
63 	u32 gap;
64 	u32 jitter;
65 	u32 duplicate;
66 	u32 reorder;
67 	u32 corrupt;
68 
69 	struct crndstate {
70 		unsigned long last;
71 		unsigned long rho;
72 	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
73 
74 	struct disttable {
75 		u32  size;
76 		s16 table[0];
77 	} *delay_dist;
78 };
79 
80 /* Time stamp put into socket buffer control block */
81 struct netem_skb_cb {
82 	psched_time_t	time_to_send;
83 };
84 
85 /* init_crandom - initialize correlated random number generator
86  * Use entropy source for initial seed.
87  */
88 static void init_crandom(struct crndstate *state, unsigned long rho)
89 {
90 	state->rho = rho;
91 	state->last = net_random();
92 }
93 
94 /* get_crandom - correlated random number generator
95  * Next number depends on last value.
96  * rho is scaled to avoid floating point.
97  */
98 static unsigned long get_crandom(struct crndstate *state)
99 {
100 	u64 value, rho;
101 	unsigned long answer;
102 
103 	if (state->rho == 0)	/* no correllation */
104 		return net_random();
105 
106 	value = net_random();
107 	rho = (u64)state->rho + 1;
108 	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
109 	state->last = answer;
110 	return answer;
111 }
112 
113 /* tabledist - return a pseudo-randomly distributed value with mean mu and
114  * std deviation sigma.  Uses table lookup to approximate the desired
115  * distribution, and a uniformly-distributed pseudo-random source.
116  */
117 static long tabledist(unsigned long mu, long sigma,
118 		      struct crndstate *state, const struct disttable *dist)
119 {
120 	long t, x;
121 	unsigned long rnd;
122 
123 	if (sigma == 0)
124 		return mu;
125 
126 	rnd = get_crandom(state);
127 
128 	/* default uniform distribution */
129 	if (dist == NULL)
130 		return (rnd % (2*sigma)) - sigma + mu;
131 
132 	t = dist->table[rnd % dist->size];
133 	x = (sigma % NETEM_DIST_SCALE) * t;
134 	if (x >= 0)
135 		x += NETEM_DIST_SCALE/2;
136 	else
137 		x -= NETEM_DIST_SCALE/2;
138 
139 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
140 }
141 
142 /*
143  * Insert one skb into qdisc.
144  * Note: parent depends on return value to account for queue length.
145  * 	NET_XMIT_DROP: queue length didn't change.
146  *      NET_XMIT_SUCCESS: one skb was queued.
147  */
148 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
149 {
150 	struct netem_sched_data *q = qdisc_priv(sch);
151 	/* We don't fill cb now as skb_unshare() may invalidate it */
152 	struct netem_skb_cb *cb;
153 	struct sk_buff *skb2;
154 	int ret;
155 	int count = 1;
156 
157 	pr_debug("netem_enqueue skb=%p\n", skb);
158 
159 	/* Random duplication */
160 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
161 		++count;
162 
163 	/* Random packet drop 0 => none, ~0 => all */
164 	if (q->loss && q->loss >= get_crandom(&q->loss_cor))
165 		--count;
166 
167 	if (count == 0) {
168 		sch->qstats.drops++;
169 		kfree_skb(skb);
170 		return NET_XMIT_BYPASS;
171 	}
172 
173 	skb_orphan(skb);
174 
175 	/*
176 	 * If we need to duplicate packet, then re-insert at top of the
177 	 * qdisc tree, since parent queuer expects that only one
178 	 * skb will be queued.
179 	 */
180 	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
181 		struct Qdisc *rootq = sch->dev->qdisc;
182 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
183 		q->duplicate = 0;
184 
185 		rootq->enqueue(skb2, rootq);
186 		q->duplicate = dupsave;
187 	}
188 
189 	/*
190 	 * Randomized packet corruption.
191 	 * Make copy if needed since we are modifying
192 	 * If packet is going to be hardware checksummed, then
193 	 * do it now in software before we mangle it.
194 	 */
195 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
196 		if (!(skb = skb_unshare(skb, GFP_ATOMIC))
197 		    || (skb->ip_summed == CHECKSUM_PARTIAL
198 			&& skb_checksum_help(skb))) {
199 			sch->qstats.drops++;
200 			return NET_XMIT_DROP;
201 		}
202 
203 		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
204 	}
205 
206 	cb = (struct netem_skb_cb *)skb->cb;
207 	if (q->gap == 0 		/* not doing reordering */
208 	    || q->counter < q->gap 	/* inside last reordering gap */
209 	    || q->reorder < get_crandom(&q->reorder_cor)) {
210 		psched_time_t now;
211 		psched_tdiff_t delay;
212 
213 		delay = tabledist(q->latency, q->jitter,
214 				  &q->delay_cor, q->delay_dist);
215 
216 		PSCHED_GET_TIME(now);
217 		PSCHED_TADD2(now, delay, cb->time_to_send);
218 		++q->counter;
219 		ret = q->qdisc->enqueue(skb, q->qdisc);
220 	} else {
221 		/*
222 		 * Do re-ordering by putting one out of N packets at the front
223 		 * of the queue.
224 		 */
225 		PSCHED_GET_TIME(cb->time_to_send);
226 		q->counter = 0;
227 		ret = q->qdisc->ops->requeue(skb, q->qdisc);
228 	}
229 
230 	if (likely(ret == NET_XMIT_SUCCESS)) {
231 		sch->q.qlen++;
232 		sch->bstats.bytes += skb->len;
233 		sch->bstats.packets++;
234 	} else
235 		sch->qstats.drops++;
236 
237 	pr_debug("netem: enqueue ret %d\n", ret);
238 	return ret;
239 }
240 
241 /* Requeue packets but don't change time stamp */
242 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
243 {
244 	struct netem_sched_data *q = qdisc_priv(sch);
245 	int ret;
246 
247 	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
248 		sch->q.qlen++;
249 		sch->qstats.requeues++;
250 	}
251 
252 	return ret;
253 }
254 
255 static unsigned int netem_drop(struct Qdisc* sch)
256 {
257 	struct netem_sched_data *q = qdisc_priv(sch);
258 	unsigned int len = 0;
259 
260 	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
261 		sch->q.qlen--;
262 		sch->qstats.drops++;
263 	}
264 	return len;
265 }
266 
267 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
268 {
269 	struct netem_sched_data *q = qdisc_priv(sch);
270 	struct sk_buff *skb;
271 
272 	skb = q->qdisc->dequeue(q->qdisc);
273 	if (skb) {
274 		const struct netem_skb_cb *cb
275 			= (const struct netem_skb_cb *)skb->cb;
276 		psched_time_t now;
277 
278 		/* if more time remaining? */
279 		PSCHED_GET_TIME(now);
280 
281 		if (PSCHED_TLESS(cb->time_to_send, now)) {
282 			pr_debug("netem_dequeue: return skb=%p\n", skb);
283 			sch->q.qlen--;
284 			sch->flags &= ~TCQ_F_THROTTLED;
285 			return skb;
286 		} else {
287 			psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now);
288 
289 			if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
290 				qdisc_tree_decrease_qlen(q->qdisc, 1);
291 				sch->qstats.drops++;
292 				printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
293 				       q->qdisc->ops->id);
294 			}
295 
296 			mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay));
297 			sch->flags |= TCQ_F_THROTTLED;
298 		}
299 	}
300 
301 	return NULL;
302 }
303 
304 static void netem_watchdog(unsigned long arg)
305 {
306 	struct Qdisc *sch = (struct Qdisc *)arg;
307 
308 	pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen);
309 	sch->flags &= ~TCQ_F_THROTTLED;
310 	netif_schedule(sch->dev);
311 }
312 
313 static void netem_reset(struct Qdisc *sch)
314 {
315 	struct netem_sched_data *q = qdisc_priv(sch);
316 
317 	qdisc_reset(q->qdisc);
318 	sch->q.qlen = 0;
319 	sch->flags &= ~TCQ_F_THROTTLED;
320 	del_timer_sync(&q->timer);
321 }
322 
323 /* Pass size change message down to embedded FIFO */
324 static int set_fifo_limit(struct Qdisc *q, int limit)
325 {
326 	struct rtattr *rta;
327 	int ret = -ENOMEM;
328 
329 	/* Hack to avoid sending change message to non-FIFO */
330 	if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
331 		return 0;
332 
333 	rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
334 	if (rta) {
335 		rta->rta_type = RTM_NEWQDISC;
336 		rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
337 		((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
338 
339 		ret = q->ops->change(q, rta);
340 		kfree(rta);
341 	}
342 	return ret;
343 }
344 
345 /*
346  * Distribution data is a variable size payload containing
347  * signed 16 bit values.
348  */
349 static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
350 {
351 	struct netem_sched_data *q = qdisc_priv(sch);
352 	unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
353 	const __s16 *data = RTA_DATA(attr);
354 	struct disttable *d;
355 	int i;
356 
357 	if (n > 65536)
358 		return -EINVAL;
359 
360 	d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
361 	if (!d)
362 		return -ENOMEM;
363 
364 	d->size = n;
365 	for (i = 0; i < n; i++)
366 		d->table[i] = data[i];
367 
368 	spin_lock_bh(&sch->dev->queue_lock);
369 	d = xchg(&q->delay_dist, d);
370 	spin_unlock_bh(&sch->dev->queue_lock);
371 
372 	kfree(d);
373 	return 0;
374 }
375 
376 static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
377 {
378 	struct netem_sched_data *q = qdisc_priv(sch);
379 	const struct tc_netem_corr *c = RTA_DATA(attr);
380 
381 	if (RTA_PAYLOAD(attr) != sizeof(*c))
382 		return -EINVAL;
383 
384 	init_crandom(&q->delay_cor, c->delay_corr);
385 	init_crandom(&q->loss_cor, c->loss_corr);
386 	init_crandom(&q->dup_cor, c->dup_corr);
387 	return 0;
388 }
389 
390 static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
391 {
392 	struct netem_sched_data *q = qdisc_priv(sch);
393 	const struct tc_netem_reorder *r = RTA_DATA(attr);
394 
395 	if (RTA_PAYLOAD(attr) != sizeof(*r))
396 		return -EINVAL;
397 
398 	q->reorder = r->probability;
399 	init_crandom(&q->reorder_cor, r->correlation);
400 	return 0;
401 }
402 
403 static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
404 {
405 	struct netem_sched_data *q = qdisc_priv(sch);
406 	const struct tc_netem_corrupt *r = RTA_DATA(attr);
407 
408 	if (RTA_PAYLOAD(attr) != sizeof(*r))
409 		return -EINVAL;
410 
411 	q->corrupt = r->probability;
412 	init_crandom(&q->corrupt_cor, r->correlation);
413 	return 0;
414 }
415 
416 /* Parse netlink message to set options */
417 static int netem_change(struct Qdisc *sch, struct rtattr *opt)
418 {
419 	struct netem_sched_data *q = qdisc_priv(sch);
420 	struct tc_netem_qopt *qopt;
421 	int ret;
422 
423 	if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
424 		return -EINVAL;
425 
426 	qopt = RTA_DATA(opt);
427 	ret = set_fifo_limit(q->qdisc, qopt->limit);
428 	if (ret) {
429 		pr_debug("netem: can't set fifo limit\n");
430 		return ret;
431 	}
432 
433 	q->latency = qopt->latency;
434 	q->jitter = qopt->jitter;
435 	q->limit = qopt->limit;
436 	q->gap = qopt->gap;
437 	q->counter = 0;
438 	q->loss = qopt->loss;
439 	q->duplicate = qopt->duplicate;
440 
441 	/* for compatiablity with earlier versions.
442 	 * if gap is set, need to assume 100% probablity
443 	 */
444 	q->reorder = ~0;
445 
446 	/* Handle nested options after initial queue options.
447 	 * Should have put all options in nested format but too late now.
448 	 */
449 	if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
450 		struct rtattr *tb[TCA_NETEM_MAX];
451 		if (rtattr_parse(tb, TCA_NETEM_MAX,
452 				 RTA_DATA(opt) + sizeof(*qopt),
453 				 RTA_PAYLOAD(opt) - sizeof(*qopt)))
454 			return -EINVAL;
455 
456 		if (tb[TCA_NETEM_CORR-1]) {
457 			ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
458 			if (ret)
459 				return ret;
460 		}
461 
462 		if (tb[TCA_NETEM_DELAY_DIST-1]) {
463 			ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
464 			if (ret)
465 				return ret;
466 		}
467 
468 		if (tb[TCA_NETEM_REORDER-1]) {
469 			ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
470 			if (ret)
471 				return ret;
472 		}
473 
474 		if (tb[TCA_NETEM_CORRUPT-1]) {
475 			ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
476 			if (ret)
477 				return ret;
478 		}
479 	}
480 
481 	return 0;
482 }
483 
484 /*
485  * Special case version of FIFO queue for use by netem.
486  * It queues in order based on timestamps in skb's
487  */
488 struct fifo_sched_data {
489 	u32 limit;
490 };
491 
492 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
493 {
494 	struct fifo_sched_data *q = qdisc_priv(sch);
495 	struct sk_buff_head *list = &sch->q;
496 	const struct netem_skb_cb *ncb
497 		= (const struct netem_skb_cb *)nskb->cb;
498 	struct sk_buff *skb;
499 
500 	if (likely(skb_queue_len(list) < q->limit)) {
501 		skb_queue_reverse_walk(list, skb) {
502 			const struct netem_skb_cb *cb
503 				= (const struct netem_skb_cb *)skb->cb;
504 
505 			if (!PSCHED_TLESS(ncb->time_to_send, cb->time_to_send))
506 				break;
507 		}
508 
509 		__skb_queue_after(list, skb, nskb);
510 
511 		sch->qstats.backlog += nskb->len;
512 		sch->bstats.bytes += nskb->len;
513 		sch->bstats.packets++;
514 
515 		return NET_XMIT_SUCCESS;
516 	}
517 
518 	return qdisc_drop(nskb, sch);
519 }
520 
521 static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
522 {
523 	struct fifo_sched_data *q = qdisc_priv(sch);
524 
525 	if (opt) {
526 		struct tc_fifo_qopt *ctl = RTA_DATA(opt);
527 		if (RTA_PAYLOAD(opt) < sizeof(*ctl))
528 			return -EINVAL;
529 
530 		q->limit = ctl->limit;
531 	} else
532 		q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
533 
534 	return 0;
535 }
536 
537 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
538 {
539 	struct fifo_sched_data *q = qdisc_priv(sch);
540 	struct tc_fifo_qopt opt = { .limit = q->limit };
541 
542 	RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
543 	return skb->len;
544 
545 rtattr_failure:
546 	return -1;
547 }
548 
549 static struct Qdisc_ops tfifo_qdisc_ops = {
550 	.id		=	"tfifo",
551 	.priv_size	=	sizeof(struct fifo_sched_data),
552 	.enqueue	=	tfifo_enqueue,
553 	.dequeue	=	qdisc_dequeue_head,
554 	.requeue	=	qdisc_requeue,
555 	.drop		=	qdisc_queue_drop,
556 	.init		=	tfifo_init,
557 	.reset		=	qdisc_reset_queue,
558 	.change		=	tfifo_init,
559 	.dump		=	tfifo_dump,
560 };
561 
562 static int netem_init(struct Qdisc *sch, struct rtattr *opt)
563 {
564 	struct netem_sched_data *q = qdisc_priv(sch);
565 	int ret;
566 
567 	if (!opt)
568 		return -EINVAL;
569 
570 	init_timer(&q->timer);
571 	q->timer.function = netem_watchdog;
572 	q->timer.data = (unsigned long) sch;
573 
574 	q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
575 				     TC_H_MAKE(sch->handle, 1));
576 	if (!q->qdisc) {
577 		pr_debug("netem: qdisc create failed\n");
578 		return -ENOMEM;
579 	}
580 
581 	ret = netem_change(sch, opt);
582 	if (ret) {
583 		pr_debug("netem: change failed\n");
584 		qdisc_destroy(q->qdisc);
585 	}
586 	return ret;
587 }
588 
589 static void netem_destroy(struct Qdisc *sch)
590 {
591 	struct netem_sched_data *q = qdisc_priv(sch);
592 
593 	del_timer_sync(&q->timer);
594 	qdisc_destroy(q->qdisc);
595 	kfree(q->delay_dist);
596 }
597 
598 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
599 {
600 	const struct netem_sched_data *q = qdisc_priv(sch);
601 	unsigned char	 *b = skb->tail;
602 	struct rtattr *rta = (struct rtattr *) b;
603 	struct tc_netem_qopt qopt;
604 	struct tc_netem_corr cor;
605 	struct tc_netem_reorder reorder;
606 	struct tc_netem_corrupt corrupt;
607 
608 	qopt.latency = q->latency;
609 	qopt.jitter = q->jitter;
610 	qopt.limit = q->limit;
611 	qopt.loss = q->loss;
612 	qopt.gap = q->gap;
613 	qopt.duplicate = q->duplicate;
614 	RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
615 
616 	cor.delay_corr = q->delay_cor.rho;
617 	cor.loss_corr = q->loss_cor.rho;
618 	cor.dup_corr = q->dup_cor.rho;
619 	RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
620 
621 	reorder.probability = q->reorder;
622 	reorder.correlation = q->reorder_cor.rho;
623 	RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
624 
625 	corrupt.probability = q->corrupt;
626 	corrupt.correlation = q->corrupt_cor.rho;
627 	RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
628 
629 	rta->rta_len = skb->tail - b;
630 
631 	return skb->len;
632 
633 rtattr_failure:
634 	skb_trim(skb, b - skb->data);
635 	return -1;
636 }
637 
638 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
639 			  struct sk_buff *skb, struct tcmsg *tcm)
640 {
641 	struct netem_sched_data *q = qdisc_priv(sch);
642 
643 	if (cl != 1) 	/* only one class */
644 		return -ENOENT;
645 
646 	tcm->tcm_handle |= TC_H_MIN(1);
647 	tcm->tcm_info = q->qdisc->handle;
648 
649 	return 0;
650 }
651 
652 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
653 		     struct Qdisc **old)
654 {
655 	struct netem_sched_data *q = qdisc_priv(sch);
656 
657 	if (new == NULL)
658 		new = &noop_qdisc;
659 
660 	sch_tree_lock(sch);
661 	*old = xchg(&q->qdisc, new);
662 	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
663 	qdisc_reset(*old);
664 	sch_tree_unlock(sch);
665 
666 	return 0;
667 }
668 
669 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
670 {
671 	struct netem_sched_data *q = qdisc_priv(sch);
672 	return q->qdisc;
673 }
674 
675 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
676 {
677 	return 1;
678 }
679 
680 static void netem_put(struct Qdisc *sch, unsigned long arg)
681 {
682 }
683 
684 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
685 			    struct rtattr **tca, unsigned long *arg)
686 {
687 	return -ENOSYS;
688 }
689 
690 static int netem_delete(struct Qdisc *sch, unsigned long arg)
691 {
692 	return -ENOSYS;
693 }
694 
695 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
696 {
697 	if (!walker->stop) {
698 		if (walker->count >= walker->skip)
699 			if (walker->fn(sch, 1, walker) < 0) {
700 				walker->stop = 1;
701 				return;
702 			}
703 		walker->count++;
704 	}
705 }
706 
707 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
708 {
709 	return NULL;
710 }
711 
712 static struct Qdisc_class_ops netem_class_ops = {
713 	.graft		=	netem_graft,
714 	.leaf		=	netem_leaf,
715 	.get		=	netem_get,
716 	.put		=	netem_put,
717 	.change		=	netem_change_class,
718 	.delete		=	netem_delete,
719 	.walk		=	netem_walk,
720 	.tcf_chain	=	netem_find_tcf,
721 	.dump		=	netem_dump_class,
722 };
723 
724 static struct Qdisc_ops netem_qdisc_ops = {
725 	.id		=	"netem",
726 	.cl_ops		=	&netem_class_ops,
727 	.priv_size	=	sizeof(struct netem_sched_data),
728 	.enqueue	=	netem_enqueue,
729 	.dequeue	=	netem_dequeue,
730 	.requeue	=	netem_requeue,
731 	.drop		=	netem_drop,
732 	.init		=	netem_init,
733 	.reset		=	netem_reset,
734 	.destroy	=	netem_destroy,
735 	.change		=	netem_change,
736 	.dump		=	netem_dump,
737 	.owner		=	THIS_MODULE,
738 };
739 
740 
741 static int __init netem_module_init(void)
742 {
743 	pr_info("netem: version " VERSION "\n");
744 	return register_qdisc(&netem_qdisc_ops);
745 }
746 static void __exit netem_module_exit(void)
747 {
748 	unregister_qdisc(&netem_qdisc_ops);
749 }
750 module_init(netem_module_init)
751 module_exit(netem_module_exit)
752 MODULE_LICENSE("GPL");
753