xref: /linux/net/sched/sch_netem.c (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 /*
2  * net/sched/sch_netem.c	Network emulator
3  *
4  * 		This program is free software; you can redistribute it and/or
5  * 		modify it under the terms of the GNU General Public License
6  * 		as published by the Free Software Foundation; either version
7  * 		2 of the License, or (at your option) any later version.
8  *
9  *  		Many of the algorithms and ideas for this came from
10  *		NIST Net which is not copyrighted.
11  *
12  * Authors:	Stephen Hemminger <shemminger@osdl.org>
13  *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14  */
15 
16 #include <linux/config.h>
17 #include <linux/module.h>
18 #include <linux/bitops.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/netdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/rtnetlink.h>
25 
26 #include <net/pkt_sched.h>
27 
28 #define VERSION "1.1"
29 
30 /*	Network Emulation Queuing algorithm.
31 	====================================
32 
33 	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 		 Network Emulation Tool
35 		 [2] Luigi Rizzo, DummyNet for FreeBSD
36 
37 	 ----------------------------------------------------------------
38 
39 	 This started out as a simple way to delay outgoing packets to
40 	 test TCP but has grown to include most of the functionality
41 	 of a full blown network emulator like NISTnet. It can delay
42 	 packets and add random jitter (and correlation). The random
43 	 distribution can be loaded from a table as well to provide
44 	 normal, Pareto, or experimental curves. Packet loss,
45 	 duplication, and reordering can also be emulated.
46 
47 	 This qdisc does not do classification that can be handled in
48 	 layering other disciplines.  It does not need to do bandwidth
49 	 control either since that can be handled by using token
50 	 bucket or other rate control.
51 
52 	 The simulator is limited by the Linux timer resolution
53 	 and will create packet bursts on the HZ boundary (1ms).
54 */
55 
56 struct netem_sched_data {
57 	struct Qdisc	*qdisc;
58 	struct timer_list timer;
59 
60 	u32 latency;
61 	u32 loss;
62 	u32 limit;
63 	u32 counter;
64 	u32 gap;
65 	u32 jitter;
66 	u32 duplicate;
67 	u32 reorder;
68 
69 	struct crndstate {
70 		unsigned long last;
71 		unsigned long rho;
72 	} delay_cor, loss_cor, dup_cor, reorder_cor;
73 
74 	struct disttable {
75 		u32  size;
76 		s16 table[0];
77 	} *delay_dist;
78 };
79 
80 /* Time stamp put into socket buffer control block */
81 struct netem_skb_cb {
82 	psched_time_t	time_to_send;
83 };
84 
85 /* init_crandom - initialize correlated random number generator
86  * Use entropy source for initial seed.
87  */
88 static void init_crandom(struct crndstate *state, unsigned long rho)
89 {
90 	state->rho = rho;
91 	state->last = net_random();
92 }
93 
94 /* get_crandom - correlated random number generator
95  * Next number depends on last value.
96  * rho is scaled to avoid floating point.
97  */
98 static unsigned long get_crandom(struct crndstate *state)
99 {
100 	u64 value, rho;
101 	unsigned long answer;
102 
103 	if (state->rho == 0)	/* no correllation */
104 		return net_random();
105 
106 	value = net_random();
107 	rho = (u64)state->rho + 1;
108 	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
109 	state->last = answer;
110 	return answer;
111 }
112 
113 /* tabledist - return a pseudo-randomly distributed value with mean mu and
114  * std deviation sigma.  Uses table lookup to approximate the desired
115  * distribution, and a uniformly-distributed pseudo-random source.
116  */
117 static long tabledist(unsigned long mu, long sigma,
118 		      struct crndstate *state, const struct disttable *dist)
119 {
120 	long t, x;
121 	unsigned long rnd;
122 
123 	if (sigma == 0)
124 		return mu;
125 
126 	rnd = get_crandom(state);
127 
128 	/* default uniform distribution */
129 	if (dist == NULL)
130 		return (rnd % (2*sigma)) - sigma + mu;
131 
132 	t = dist->table[rnd % dist->size];
133 	x = (sigma % NETEM_DIST_SCALE) * t;
134 	if (x >= 0)
135 		x += NETEM_DIST_SCALE/2;
136 	else
137 		x -= NETEM_DIST_SCALE/2;
138 
139 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
140 }
141 
142 /*
143  * Insert one skb into qdisc.
144  * Note: parent depends on return value to account for queue length.
145  * 	NET_XMIT_DROP: queue length didn't change.
146  *      NET_XMIT_SUCCESS: one skb was queued.
147  */
148 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
149 {
150 	struct netem_sched_data *q = qdisc_priv(sch);
151 	struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
152 	struct sk_buff *skb2;
153 	int ret;
154 	int count = 1;
155 
156 	pr_debug("netem_enqueue skb=%p\n", skb);
157 
158 	/* Random duplication */
159 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
160 		++count;
161 
162 	/* Random packet drop 0 => none, ~0 => all */
163 	if (q->loss && q->loss >= get_crandom(&q->loss_cor))
164 		--count;
165 
166 	if (count == 0) {
167 		sch->qstats.drops++;
168 		kfree_skb(skb);
169 		return NET_XMIT_DROP;
170 	}
171 
172 	/*
173 	 * If we need to duplicate packet, then re-insert at top of the
174 	 * qdisc tree, since parent queuer expects that only one
175 	 * skb will be queued.
176 	 */
177 	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
178 		struct Qdisc *rootq = sch->dev->qdisc;
179 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
180 		q->duplicate = 0;
181 
182 		rootq->enqueue(skb2, rootq);
183 		q->duplicate = dupsave;
184 	}
185 
186 	if (q->gap == 0 		/* not doing reordering */
187 	    || q->counter < q->gap 	/* inside last reordering gap */
188 	    || q->reorder < get_crandom(&q->reorder_cor)) {
189 		psched_time_t now;
190 		psched_tdiff_t delay;
191 
192 		delay = tabledist(q->latency, q->jitter,
193 				  &q->delay_cor, q->delay_dist);
194 
195 		PSCHED_GET_TIME(now);
196 		PSCHED_TADD2(now, delay, cb->time_to_send);
197 		++q->counter;
198 		ret = q->qdisc->enqueue(skb, q->qdisc);
199 	} else {
200 		/*
201 		 * Do re-ordering by putting one out of N packets at the front
202 		 * of the queue.
203 		 */
204 		PSCHED_GET_TIME(cb->time_to_send);
205 		q->counter = 0;
206 		ret = q->qdisc->ops->requeue(skb, q->qdisc);
207 	}
208 
209 	if (likely(ret == NET_XMIT_SUCCESS)) {
210 		sch->q.qlen++;
211 		sch->bstats.bytes += skb->len;
212 		sch->bstats.packets++;
213 	} else
214 		sch->qstats.drops++;
215 
216 	pr_debug("netem: enqueue ret %d\n", ret);
217 	return ret;
218 }
219 
220 /* Requeue packets but don't change time stamp */
221 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
222 {
223 	struct netem_sched_data *q = qdisc_priv(sch);
224 	int ret;
225 
226 	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
227 		sch->q.qlen++;
228 		sch->qstats.requeues++;
229 	}
230 
231 	return ret;
232 }
233 
234 static unsigned int netem_drop(struct Qdisc* sch)
235 {
236 	struct netem_sched_data *q = qdisc_priv(sch);
237 	unsigned int len;
238 
239 	if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
240 		sch->q.qlen--;
241 		sch->qstats.drops++;
242 	}
243 	return len;
244 }
245 
246 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
247 {
248 	struct netem_sched_data *q = qdisc_priv(sch);
249 	struct sk_buff *skb;
250 
251 	skb = q->qdisc->dequeue(q->qdisc);
252 	if (skb) {
253 		const struct netem_skb_cb *cb
254 			= (const struct netem_skb_cb *)skb->cb;
255 		psched_time_t now;
256 
257 		/* if more time remaining? */
258 		PSCHED_GET_TIME(now);
259 
260 		if (PSCHED_TLESS(cb->time_to_send, now)) {
261 			pr_debug("netem_dequeue: return skb=%p\n", skb);
262 			sch->q.qlen--;
263 			sch->flags &= ~TCQ_F_THROTTLED;
264 			return skb;
265 		} else {
266 			psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now);
267 
268 			if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
269 				sch->qstats.drops++;
270 
271 				/* After this qlen is confused */
272 				printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
273 				       q->qdisc->ops->id);
274 
275 				sch->q.qlen--;
276 			}
277 
278 			mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay));
279 			sch->flags |= TCQ_F_THROTTLED;
280 		}
281 	}
282 
283 	return NULL;
284 }
285 
286 static void netem_watchdog(unsigned long arg)
287 {
288 	struct Qdisc *sch = (struct Qdisc *)arg;
289 
290 	pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen);
291 	sch->flags &= ~TCQ_F_THROTTLED;
292 	netif_schedule(sch->dev);
293 }
294 
295 static void netem_reset(struct Qdisc *sch)
296 {
297 	struct netem_sched_data *q = qdisc_priv(sch);
298 
299 	qdisc_reset(q->qdisc);
300 	sch->q.qlen = 0;
301 	sch->flags &= ~TCQ_F_THROTTLED;
302 	del_timer_sync(&q->timer);
303 }
304 
305 /* Pass size change message down to embedded FIFO */
306 static int set_fifo_limit(struct Qdisc *q, int limit)
307 {
308         struct rtattr *rta;
309 	int ret = -ENOMEM;
310 
311 	/* Hack to avoid sending change message to non-FIFO */
312 	if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
313 		return 0;
314 
315 	rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
316 	if (rta) {
317 		rta->rta_type = RTM_NEWQDISC;
318 		rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
319 		((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
320 
321 		ret = q->ops->change(q, rta);
322 		kfree(rta);
323 	}
324 	return ret;
325 }
326 
327 /*
328  * Distribution data is a variable size payload containing
329  * signed 16 bit values.
330  */
331 static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
332 {
333 	struct netem_sched_data *q = qdisc_priv(sch);
334 	unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
335 	const __s16 *data = RTA_DATA(attr);
336 	struct disttable *d;
337 	int i;
338 
339 	if (n > 65536)
340 		return -EINVAL;
341 
342 	d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
343 	if (!d)
344 		return -ENOMEM;
345 
346 	d->size = n;
347 	for (i = 0; i < n; i++)
348 		d->table[i] = data[i];
349 
350 	spin_lock_bh(&sch->dev->queue_lock);
351 	d = xchg(&q->delay_dist, d);
352 	spin_unlock_bh(&sch->dev->queue_lock);
353 
354 	kfree(d);
355 	return 0;
356 }
357 
358 static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
359 {
360 	struct netem_sched_data *q = qdisc_priv(sch);
361 	const struct tc_netem_corr *c = RTA_DATA(attr);
362 
363 	if (RTA_PAYLOAD(attr) != sizeof(*c))
364 		return -EINVAL;
365 
366 	init_crandom(&q->delay_cor, c->delay_corr);
367 	init_crandom(&q->loss_cor, c->loss_corr);
368 	init_crandom(&q->dup_cor, c->dup_corr);
369 	return 0;
370 }
371 
372 static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
373 {
374 	struct netem_sched_data *q = qdisc_priv(sch);
375 	const struct tc_netem_reorder *r = RTA_DATA(attr);
376 
377 	if (RTA_PAYLOAD(attr) != sizeof(*r))
378 		return -EINVAL;
379 
380 	q->reorder = r->probability;
381 	init_crandom(&q->reorder_cor, r->correlation);
382 	return 0;
383 }
384 
385 static int netem_change(struct Qdisc *sch, struct rtattr *opt)
386 {
387 	struct netem_sched_data *q = qdisc_priv(sch);
388 	struct tc_netem_qopt *qopt;
389 	int ret;
390 
391 	if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
392 		return -EINVAL;
393 
394 	qopt = RTA_DATA(opt);
395 	ret = set_fifo_limit(q->qdisc, qopt->limit);
396 	if (ret) {
397 		pr_debug("netem: can't set fifo limit\n");
398 		return ret;
399 	}
400 
401 	q->latency = qopt->latency;
402 	q->jitter = qopt->jitter;
403 	q->limit = qopt->limit;
404 	q->gap = qopt->gap;
405 	q->counter = 0;
406 	q->loss = qopt->loss;
407 	q->duplicate = qopt->duplicate;
408 
409 	/* for compatiablity with earlier versions.
410 	 * if gap is set, need to assume 100% probablity
411 	 */
412 	q->reorder = ~0;
413 
414 	/* Handle nested options after initial queue options.
415 	 * Should have put all options in nested format but too late now.
416 	 */
417 	if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
418 		struct rtattr *tb[TCA_NETEM_MAX];
419 		if (rtattr_parse(tb, TCA_NETEM_MAX,
420 				 RTA_DATA(opt) + sizeof(*qopt),
421 				 RTA_PAYLOAD(opt) - sizeof(*qopt)))
422 			return -EINVAL;
423 
424 		if (tb[TCA_NETEM_CORR-1]) {
425 			ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
426 			if (ret)
427 				return ret;
428 		}
429 
430 		if (tb[TCA_NETEM_DELAY_DIST-1]) {
431 			ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
432 			if (ret)
433 				return ret;
434 		}
435 		if (tb[TCA_NETEM_REORDER-1]) {
436 			ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
437 			if (ret)
438 				return ret;
439 		}
440 	}
441 
442 
443 	return 0;
444 }
445 
446 /*
447  * Special case version of FIFO queue for use by netem.
448  * It queues in order based on timestamps in skb's
449  */
450 struct fifo_sched_data {
451 	u32 limit;
452 };
453 
454 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
455 {
456 	struct fifo_sched_data *q = qdisc_priv(sch);
457 	struct sk_buff_head *list = &sch->q;
458 	const struct netem_skb_cb *ncb
459 		= (const struct netem_skb_cb *)nskb->cb;
460 	struct sk_buff *skb;
461 
462 	if (likely(skb_queue_len(list) < q->limit)) {
463 		skb_queue_reverse_walk(list, skb) {
464 			const struct netem_skb_cb *cb
465 				= (const struct netem_skb_cb *)skb->cb;
466 
467 			if (!PSCHED_TLESS(ncb->time_to_send, cb->time_to_send))
468 				break;
469 		}
470 
471 		__skb_queue_after(list, skb, nskb);
472 
473 		sch->qstats.backlog += nskb->len;
474 		sch->bstats.bytes += nskb->len;
475 		sch->bstats.packets++;
476 
477 		return NET_XMIT_SUCCESS;
478 	}
479 
480 	return qdisc_drop(nskb, sch);
481 }
482 
483 static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
484 {
485 	struct fifo_sched_data *q = qdisc_priv(sch);
486 
487 	if (opt) {
488 		struct tc_fifo_qopt *ctl = RTA_DATA(opt);
489 		if (RTA_PAYLOAD(opt) < sizeof(*ctl))
490 			return -EINVAL;
491 
492 		q->limit = ctl->limit;
493 	} else
494 		q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
495 
496 	return 0;
497 }
498 
499 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
500 {
501 	struct fifo_sched_data *q = qdisc_priv(sch);
502 	struct tc_fifo_qopt opt = { .limit = q->limit };
503 
504 	RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
505 	return skb->len;
506 
507 rtattr_failure:
508 	return -1;
509 }
510 
511 static struct Qdisc_ops tfifo_qdisc_ops = {
512 	.id		=	"tfifo",
513 	.priv_size	=	sizeof(struct fifo_sched_data),
514 	.enqueue	=	tfifo_enqueue,
515 	.dequeue	=	qdisc_dequeue_head,
516 	.requeue	=	qdisc_requeue,
517 	.drop		=	qdisc_queue_drop,
518 	.init		=	tfifo_init,
519 	.reset		=	qdisc_reset_queue,
520 	.change		=	tfifo_init,
521 	.dump		=	tfifo_dump,
522 };
523 
524 static int netem_init(struct Qdisc *sch, struct rtattr *opt)
525 {
526 	struct netem_sched_data *q = qdisc_priv(sch);
527 	int ret;
528 
529 	if (!opt)
530 		return -EINVAL;
531 
532 	init_timer(&q->timer);
533 	q->timer.function = netem_watchdog;
534 	q->timer.data = (unsigned long) sch;
535 
536 	q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops);
537 	if (!q->qdisc) {
538 		pr_debug("netem: qdisc create failed\n");
539 		return -ENOMEM;
540 	}
541 
542 	ret = netem_change(sch, opt);
543 	if (ret) {
544 		pr_debug("netem: change failed\n");
545 		qdisc_destroy(q->qdisc);
546 	}
547 	return ret;
548 }
549 
550 static void netem_destroy(struct Qdisc *sch)
551 {
552 	struct netem_sched_data *q = qdisc_priv(sch);
553 
554 	del_timer_sync(&q->timer);
555 	qdisc_destroy(q->qdisc);
556 	kfree(q->delay_dist);
557 }
558 
559 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
560 {
561 	const struct netem_sched_data *q = qdisc_priv(sch);
562 	unsigned char	 *b = skb->tail;
563 	struct rtattr *rta = (struct rtattr *) b;
564 	struct tc_netem_qopt qopt;
565 	struct tc_netem_corr cor;
566 	struct tc_netem_reorder reorder;
567 
568 	qopt.latency = q->latency;
569 	qopt.jitter = q->jitter;
570 	qopt.limit = q->limit;
571 	qopt.loss = q->loss;
572 	qopt.gap = q->gap;
573 	qopt.duplicate = q->duplicate;
574 	RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
575 
576 	cor.delay_corr = q->delay_cor.rho;
577 	cor.loss_corr = q->loss_cor.rho;
578 	cor.dup_corr = q->dup_cor.rho;
579 	RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
580 
581 	reorder.probability = q->reorder;
582 	reorder.correlation = q->reorder_cor.rho;
583 	RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
584 
585 	rta->rta_len = skb->tail - b;
586 
587 	return skb->len;
588 
589 rtattr_failure:
590 	skb_trim(skb, b - skb->data);
591 	return -1;
592 }
593 
594 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
595 			  struct sk_buff *skb, struct tcmsg *tcm)
596 {
597 	struct netem_sched_data *q = qdisc_priv(sch);
598 
599 	if (cl != 1) 	/* only one class */
600 		return -ENOENT;
601 
602 	tcm->tcm_handle |= TC_H_MIN(1);
603 	tcm->tcm_info = q->qdisc->handle;
604 
605 	return 0;
606 }
607 
608 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
609 		     struct Qdisc **old)
610 {
611 	struct netem_sched_data *q = qdisc_priv(sch);
612 
613 	if (new == NULL)
614 		new = &noop_qdisc;
615 
616 	sch_tree_lock(sch);
617 	*old = xchg(&q->qdisc, new);
618 	qdisc_reset(*old);
619 	sch->q.qlen = 0;
620 	sch_tree_unlock(sch);
621 
622 	return 0;
623 }
624 
625 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
626 {
627 	struct netem_sched_data *q = qdisc_priv(sch);
628 	return q->qdisc;
629 }
630 
631 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
632 {
633 	return 1;
634 }
635 
636 static void netem_put(struct Qdisc *sch, unsigned long arg)
637 {
638 }
639 
640 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
641 			    struct rtattr **tca, unsigned long *arg)
642 {
643 	return -ENOSYS;
644 }
645 
646 static int netem_delete(struct Qdisc *sch, unsigned long arg)
647 {
648 	return -ENOSYS;
649 }
650 
651 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
652 {
653 	if (!walker->stop) {
654 		if (walker->count >= walker->skip)
655 			if (walker->fn(sch, 1, walker) < 0) {
656 				walker->stop = 1;
657 				return;
658 			}
659 		walker->count++;
660 	}
661 }
662 
663 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
664 {
665 	return NULL;
666 }
667 
668 static struct Qdisc_class_ops netem_class_ops = {
669 	.graft		=	netem_graft,
670 	.leaf		=	netem_leaf,
671 	.get		=	netem_get,
672 	.put		=	netem_put,
673 	.change		=	netem_change_class,
674 	.delete		=	netem_delete,
675 	.walk		=	netem_walk,
676 	.tcf_chain	=	netem_find_tcf,
677 	.dump		=	netem_dump_class,
678 };
679 
680 static struct Qdisc_ops netem_qdisc_ops = {
681 	.id		=	"netem",
682 	.cl_ops		=	&netem_class_ops,
683 	.priv_size	=	sizeof(struct netem_sched_data),
684 	.enqueue	=	netem_enqueue,
685 	.dequeue	=	netem_dequeue,
686 	.requeue	=	netem_requeue,
687 	.drop		=	netem_drop,
688 	.init		=	netem_init,
689 	.reset		=	netem_reset,
690 	.destroy	=	netem_destroy,
691 	.change		=	netem_change,
692 	.dump		=	netem_dump,
693 	.owner		=	THIS_MODULE,
694 };
695 
696 
697 static int __init netem_module_init(void)
698 {
699 	pr_info("netem: version " VERSION "\n");
700 	return register_qdisc(&netem_qdisc_ops);
701 }
702 static void __exit netem_module_exit(void)
703 {
704 	unregister_qdisc(&netem_qdisc_ops);
705 }
706 module_init(netem_module_init)
707 module_exit(netem_module_exit)
708 MODULE_LICENSE("GPL");
709