xref: /linux/drivers/net/ppp/ppp_generic.c (revision 0f29b46d49b0ca50536632c6a33986c3171f5ea1)
1 /*
2  * Generic PPP layer for Linux.
3  *
4  * Copyright 1999-2002 Paul Mackerras.
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  *
11  * The generic PPP layer handles the PPP network interfaces, the
12  * /dev/ppp device, packet and VJ compression, and multilink.
13  * It talks to PPP `channels' via the interface defined in
14  * include/linux/ppp_channel.h.  Channels provide the basic means for
15  * sending and receiving PPP frames on some kind of communications
16  * channel.
17  *
18  * Part of the code in this driver was inspired by the old async-only
19  * PPP driver, written by Michael Callahan and Al Longyear, and
20  * subsequently hacked by Paul Mackerras.
21  *
22  * ==FILEVERSION 20041108==
23  */
24 
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/kmod.h>
28 #include <linux/init.h>
29 #include <linux/list.h>
30 #include <linux/idr.h>
31 #include <linux/netdevice.h>
32 #include <linux/poll.h>
33 #include <linux/ppp_defs.h>
34 #include <linux/filter.h>
35 #include <linux/ppp-ioctl.h>
36 #include <linux/ppp_channel.h>
37 #include <linux/ppp-comp.h>
38 #include <linux/skbuff.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/if_arp.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/spinlock.h>
44 #include <linux/rwsem.h>
45 #include <linux/stddef.h>
46 #include <linux/device.h>
47 #include <linux/mutex.h>
48 #include <linux/slab.h>
49 #include <asm/unaligned.h>
50 #include <net/slhc_vj.h>
51 #include <linux/atomic.h>
52 
53 #include <linux/nsproxy.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 
57 #define PPP_VERSION	"2.4.2"
58 
59 /*
60  * Network protocols we support.
61  */
62 #define NP_IP	0		/* Internet Protocol V4 */
63 #define NP_IPV6	1		/* Internet Protocol V6 */
64 #define NP_IPX	2		/* IPX protocol */
65 #define NP_AT	3		/* Appletalk protocol */
66 #define NP_MPLS_UC 4		/* MPLS unicast */
67 #define NP_MPLS_MC 5		/* MPLS multicast */
68 #define NUM_NP	6		/* Number of NPs. */
69 
70 #define MPHDRLEN	6	/* multilink protocol header length */
71 #define MPHDRLEN_SSN	4	/* ditto with short sequence numbers */
72 
73 /*
74  * An instance of /dev/ppp can be associated with either a ppp
75  * interface unit or a ppp channel.  In both cases, file->private_data
76  * points to one of these.
77  */
78 struct ppp_file {
79 	enum {
80 		INTERFACE=1, CHANNEL
81 	}		kind;
82 	struct sk_buff_head xq;		/* pppd transmit queue */
83 	struct sk_buff_head rq;		/* receive queue for pppd */
84 	wait_queue_head_t rwait;	/* for poll on reading /dev/ppp */
85 	atomic_t	refcnt;		/* # refs (incl /dev/ppp attached) */
86 	int		hdrlen;		/* space to leave for headers */
87 	int		index;		/* interface unit / channel number */
88 	int		dead;		/* unit/channel has been shut down */
89 };
90 
91 #define PF_TO_X(pf, X)		container_of(pf, X, file)
92 
93 #define PF_TO_PPP(pf)		PF_TO_X(pf, struct ppp)
94 #define PF_TO_CHANNEL(pf)	PF_TO_X(pf, struct channel)
95 
96 /*
97  * Data structure to hold primary network stats for which
98  * we want to use 64 bit storage.  Other network stats
99  * are stored in dev->stats of the ppp strucute.
100  */
101 struct ppp_link_stats {
102 	u64 rx_packets;
103 	u64 tx_packets;
104 	u64 rx_bytes;
105 	u64 tx_bytes;
106 };
107 
108 /*
109  * Data structure describing one ppp unit.
110  * A ppp unit corresponds to a ppp network interface device
111  * and represents a multilink bundle.
112  * It can have 0 or more ppp channels connected to it.
113  */
114 struct ppp {
115 	struct ppp_file	file;		/* stuff for read/write/poll 0 */
116 	struct file	*owner;		/* file that owns this unit 48 */
117 	struct list_head channels;	/* list of attached channels 4c */
118 	int		n_channels;	/* how many channels are attached 54 */
119 	spinlock_t	rlock;		/* lock for receive side 58 */
120 	spinlock_t	wlock;		/* lock for transmit side 5c */
121 	int		mru;		/* max receive unit 60 */
122 	unsigned int	flags;		/* control bits 64 */
123 	unsigned int	xstate;		/* transmit state bits 68 */
124 	unsigned int	rstate;		/* receive state bits 6c */
125 	int		debug;		/* debug flags 70 */
126 	struct slcompress *vj;		/* state for VJ header compression */
127 	enum NPmode	npmode[NUM_NP];	/* what to do with each net proto 78 */
128 	struct sk_buff	*xmit_pending;	/* a packet ready to go out 88 */
129 	struct compressor *xcomp;	/* transmit packet compressor 8c */
130 	void		*xc_state;	/* its internal state 90 */
131 	struct compressor *rcomp;	/* receive decompressor 94 */
132 	void		*rc_state;	/* its internal state 98 */
133 	unsigned long	last_xmit;	/* jiffies when last pkt sent 9c */
134 	unsigned long	last_recv;	/* jiffies when last pkt rcvd a0 */
135 	struct net_device *dev;		/* network interface device a4 */
136 	int		closing;	/* is device closing down? a8 */
137 #ifdef CONFIG_PPP_MULTILINK
138 	int		nxchan;		/* next channel to send something on */
139 	u32		nxseq;		/* next sequence number to send */
140 	int		mrru;		/* MP: max reconst. receive unit */
141 	u32		nextseq;	/* MP: seq no of next packet */
142 	u32		minseq;		/* MP: min of most recent seqnos */
143 	struct sk_buff_head mrq;	/* MP: receive reconstruction queue */
144 #endif /* CONFIG_PPP_MULTILINK */
145 #ifdef CONFIG_PPP_FILTER
146 	struct sk_filter *pass_filter;	/* filter for packets to pass */
147 	struct sk_filter *active_filter;/* filter for pkts to reset idle */
148 #endif /* CONFIG_PPP_FILTER */
149 	struct net	*ppp_net;	/* the net we belong to */
150 	struct ppp_link_stats stats64;	/* 64 bit network stats */
151 };
152 
153 /*
154  * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
155  * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
156  * SC_MUST_COMP
157  * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
158  * Bits in xstate: SC_COMP_RUN
159  */
160 #define SC_FLAG_BITS	(SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
161 			 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
162 			 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
163 
164 /*
165  * Private data structure for each channel.
166  * This includes the data structure used for multilink.
167  */
168 struct channel {
169 	struct ppp_file	file;		/* stuff for read/write/poll */
170 	struct list_head list;		/* link in all/new_channels list */
171 	struct ppp_channel *chan;	/* public channel data structure */
172 	struct rw_semaphore chan_sem;	/* protects `chan' during chan ioctl */
173 	spinlock_t	downl;		/* protects `chan', file.xq dequeue */
174 	struct ppp	*ppp;		/* ppp unit we're connected to */
175 	struct net	*chan_net;	/* the net channel belongs to */
176 	struct list_head clist;		/* link in list of channels per unit */
177 	rwlock_t	upl;		/* protects `ppp' */
178 #ifdef CONFIG_PPP_MULTILINK
179 	u8		avail;		/* flag used in multilink stuff */
180 	u8		had_frag;	/* >= 1 fragments have been sent */
181 	u32		lastseq;	/* MP: last sequence # received */
182 	int		speed;		/* speed of the corresponding ppp channel*/
183 #endif /* CONFIG_PPP_MULTILINK */
184 };
185 
186 /*
187  * SMP locking issues:
188  * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
189  * list and the ppp.n_channels field, you need to take both locks
190  * before you modify them.
191  * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
192  * channel.downl.
193  */
194 
195 static DEFINE_MUTEX(ppp_mutex);
196 static atomic_t ppp_unit_count = ATOMIC_INIT(0);
197 static atomic_t channel_count = ATOMIC_INIT(0);
198 
199 /* per-net private data for this module */
200 static int ppp_net_id __read_mostly;
201 struct ppp_net {
202 	/* units to ppp mapping */
203 	struct idr units_idr;
204 
205 	/*
206 	 * all_ppp_mutex protects the units_idr mapping.
207 	 * It also ensures that finding a ppp unit in the units_idr
208 	 * map and updating its file.refcnt field is atomic.
209 	 */
210 	struct mutex all_ppp_mutex;
211 
212 	/* channels */
213 	struct list_head all_channels;
214 	struct list_head new_channels;
215 	int last_channel_index;
216 
217 	/*
218 	 * all_channels_lock protects all_channels and
219 	 * last_channel_index, and the atomicity of find
220 	 * a channel and updating its file.refcnt field.
221 	 */
222 	spinlock_t all_channels_lock;
223 };
224 
225 /* Get the PPP protocol number from a skb */
226 #define PPP_PROTO(skb)	get_unaligned_be16((skb)->data)
227 
228 /* We limit the length of ppp->file.rq to this (arbitrary) value */
229 #define PPP_MAX_RQLEN	32
230 
231 /*
232  * Maximum number of multilink fragments queued up.
233  * This has to be large enough to cope with the maximum latency of
234  * the slowest channel relative to the others.  Strictly it should
235  * depend on the number of channels and their characteristics.
236  */
237 #define PPP_MP_MAX_QLEN	128
238 
239 /* Multilink header bits. */
240 #define B	0x80		/* this fragment begins a packet */
241 #define E	0x40		/* this fragment ends a packet */
242 
243 /* Compare multilink sequence numbers (assumed to be 32 bits wide) */
244 #define seq_before(a, b)	((s32)((a) - (b)) < 0)
245 #define seq_after(a, b)		((s32)((a) - (b)) > 0)
246 
247 /* Prototypes. */
248 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
249 			struct file *file, unsigned int cmd, unsigned long arg);
250 static void ppp_xmit_process(struct ppp *ppp);
251 static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
252 static void ppp_push(struct ppp *ppp);
253 static void ppp_channel_push(struct channel *pch);
254 static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
255 			      struct channel *pch);
256 static void ppp_receive_error(struct ppp *ppp);
257 static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
258 static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
259 					    struct sk_buff *skb);
260 #ifdef CONFIG_PPP_MULTILINK
261 static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
262 				struct channel *pch);
263 static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
264 static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
265 static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
266 #endif /* CONFIG_PPP_MULTILINK */
267 static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
268 static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
269 static void ppp_ccp_closed(struct ppp *ppp);
270 static struct compressor *find_compressor(int type);
271 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
272 static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
273 static void init_ppp_file(struct ppp_file *pf, int kind);
274 static void ppp_shutdown_interface(struct ppp *ppp);
275 static void ppp_destroy_interface(struct ppp *ppp);
276 static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
277 static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
278 static int ppp_connect_channel(struct channel *pch, int unit);
279 static int ppp_disconnect_channel(struct channel *pch);
280 static void ppp_destroy_channel(struct channel *pch);
281 static int unit_get(struct idr *p, void *ptr);
282 static int unit_set(struct idr *p, void *ptr, int n);
283 static void unit_put(struct idr *p, int n);
284 static void *unit_find(struct idr *p, int n);
285 
286 static struct class *ppp_class;
287 
288 /* per net-namespace data */
289 static inline struct ppp_net *ppp_pernet(struct net *net)
290 {
291 	BUG_ON(!net);
292 
293 	return net_generic(net, ppp_net_id);
294 }
295 
296 /* Translates a PPP protocol number to a NP index (NP == network protocol) */
297 static inline int proto_to_npindex(int proto)
298 {
299 	switch (proto) {
300 	case PPP_IP:
301 		return NP_IP;
302 	case PPP_IPV6:
303 		return NP_IPV6;
304 	case PPP_IPX:
305 		return NP_IPX;
306 	case PPP_AT:
307 		return NP_AT;
308 	case PPP_MPLS_UC:
309 		return NP_MPLS_UC;
310 	case PPP_MPLS_MC:
311 		return NP_MPLS_MC;
312 	}
313 	return -EINVAL;
314 }
315 
316 /* Translates an NP index into a PPP protocol number */
317 static const int npindex_to_proto[NUM_NP] = {
318 	PPP_IP,
319 	PPP_IPV6,
320 	PPP_IPX,
321 	PPP_AT,
322 	PPP_MPLS_UC,
323 	PPP_MPLS_MC,
324 };
325 
326 /* Translates an ethertype into an NP index */
327 static inline int ethertype_to_npindex(int ethertype)
328 {
329 	switch (ethertype) {
330 	case ETH_P_IP:
331 		return NP_IP;
332 	case ETH_P_IPV6:
333 		return NP_IPV6;
334 	case ETH_P_IPX:
335 		return NP_IPX;
336 	case ETH_P_PPPTALK:
337 	case ETH_P_ATALK:
338 		return NP_AT;
339 	case ETH_P_MPLS_UC:
340 		return NP_MPLS_UC;
341 	case ETH_P_MPLS_MC:
342 		return NP_MPLS_MC;
343 	}
344 	return -1;
345 }
346 
347 /* Translates an NP index into an ethertype */
348 static const int npindex_to_ethertype[NUM_NP] = {
349 	ETH_P_IP,
350 	ETH_P_IPV6,
351 	ETH_P_IPX,
352 	ETH_P_PPPTALK,
353 	ETH_P_MPLS_UC,
354 	ETH_P_MPLS_MC,
355 };
356 
357 /*
358  * Locking shorthand.
359  */
360 #define ppp_xmit_lock(ppp)	spin_lock_bh(&(ppp)->wlock)
361 #define ppp_xmit_unlock(ppp)	spin_unlock_bh(&(ppp)->wlock)
362 #define ppp_recv_lock(ppp)	spin_lock_bh(&(ppp)->rlock)
363 #define ppp_recv_unlock(ppp)	spin_unlock_bh(&(ppp)->rlock)
364 #define ppp_lock(ppp)		do { ppp_xmit_lock(ppp); \
365 				     ppp_recv_lock(ppp); } while (0)
366 #define ppp_unlock(ppp)		do { ppp_recv_unlock(ppp); \
367 				     ppp_xmit_unlock(ppp); } while (0)
368 
369 /*
370  * /dev/ppp device routines.
371  * The /dev/ppp device is used by pppd to control the ppp unit.
372  * It supports the read, write, ioctl and poll functions.
373  * Open instances of /dev/ppp can be in one of three states:
374  * unattached, attached to a ppp unit, or attached to a ppp channel.
375  */
376 static int ppp_open(struct inode *inode, struct file *file)
377 {
378 	/*
379 	 * This could (should?) be enforced by the permissions on /dev/ppp.
380 	 */
381 	if (!capable(CAP_NET_ADMIN))
382 		return -EPERM;
383 	return 0;
384 }
385 
386 static int ppp_release(struct inode *unused, struct file *file)
387 {
388 	struct ppp_file *pf = file->private_data;
389 	struct ppp *ppp;
390 
391 	if (pf) {
392 		file->private_data = NULL;
393 		if (pf->kind == INTERFACE) {
394 			ppp = PF_TO_PPP(pf);
395 			if (file == ppp->owner)
396 				ppp_shutdown_interface(ppp);
397 		}
398 		if (atomic_dec_and_test(&pf->refcnt)) {
399 			switch (pf->kind) {
400 			case INTERFACE:
401 				ppp_destroy_interface(PF_TO_PPP(pf));
402 				break;
403 			case CHANNEL:
404 				ppp_destroy_channel(PF_TO_CHANNEL(pf));
405 				break;
406 			}
407 		}
408 	}
409 	return 0;
410 }
411 
412 static ssize_t ppp_read(struct file *file, char __user *buf,
413 			size_t count, loff_t *ppos)
414 {
415 	struct ppp_file *pf = file->private_data;
416 	DECLARE_WAITQUEUE(wait, current);
417 	ssize_t ret;
418 	struct sk_buff *skb = NULL;
419 	struct iovec iov;
420 
421 	ret = count;
422 
423 	if (!pf)
424 		return -ENXIO;
425 	add_wait_queue(&pf->rwait, &wait);
426 	for (;;) {
427 		set_current_state(TASK_INTERRUPTIBLE);
428 		skb = skb_dequeue(&pf->rq);
429 		if (skb)
430 			break;
431 		ret = 0;
432 		if (pf->dead)
433 			break;
434 		if (pf->kind == INTERFACE) {
435 			/*
436 			 * Return 0 (EOF) on an interface that has no
437 			 * channels connected, unless it is looping
438 			 * network traffic (demand mode).
439 			 */
440 			struct ppp *ppp = PF_TO_PPP(pf);
441 			if (ppp->n_channels == 0 &&
442 			    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
443 				break;
444 		}
445 		ret = -EAGAIN;
446 		if (file->f_flags & O_NONBLOCK)
447 			break;
448 		ret = -ERESTARTSYS;
449 		if (signal_pending(current))
450 			break;
451 		schedule();
452 	}
453 	set_current_state(TASK_RUNNING);
454 	remove_wait_queue(&pf->rwait, &wait);
455 
456 	if (!skb)
457 		goto out;
458 
459 	ret = -EOVERFLOW;
460 	if (skb->len > count)
461 		goto outf;
462 	ret = -EFAULT;
463 	iov.iov_base = buf;
464 	iov.iov_len = count;
465 	if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
466 		goto outf;
467 	ret = skb->len;
468 
469  outf:
470 	kfree_skb(skb);
471  out:
472 	return ret;
473 }
474 
475 static ssize_t ppp_write(struct file *file, const char __user *buf,
476 			 size_t count, loff_t *ppos)
477 {
478 	struct ppp_file *pf = file->private_data;
479 	struct sk_buff *skb;
480 	ssize_t ret;
481 
482 	if (!pf)
483 		return -ENXIO;
484 	ret = -ENOMEM;
485 	skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
486 	if (!skb)
487 		goto out;
488 	skb_reserve(skb, pf->hdrlen);
489 	ret = -EFAULT;
490 	if (copy_from_user(skb_put(skb, count), buf, count)) {
491 		kfree_skb(skb);
492 		goto out;
493 	}
494 
495 	skb_queue_tail(&pf->xq, skb);
496 
497 	switch (pf->kind) {
498 	case INTERFACE:
499 		ppp_xmit_process(PF_TO_PPP(pf));
500 		break;
501 	case CHANNEL:
502 		ppp_channel_push(PF_TO_CHANNEL(pf));
503 		break;
504 	}
505 
506 	ret = count;
507 
508  out:
509 	return ret;
510 }
511 
512 /* No kernel lock - fine */
513 static unsigned int ppp_poll(struct file *file, poll_table *wait)
514 {
515 	struct ppp_file *pf = file->private_data;
516 	unsigned int mask;
517 
518 	if (!pf)
519 		return 0;
520 	poll_wait(file, &pf->rwait, wait);
521 	mask = POLLOUT | POLLWRNORM;
522 	if (skb_peek(&pf->rq))
523 		mask |= POLLIN | POLLRDNORM;
524 	if (pf->dead)
525 		mask |= POLLHUP;
526 	else if (pf->kind == INTERFACE) {
527 		/* see comment in ppp_read */
528 		struct ppp *ppp = PF_TO_PPP(pf);
529 		if (ppp->n_channels == 0 &&
530 		    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
531 			mask |= POLLIN | POLLRDNORM;
532 	}
533 
534 	return mask;
535 }
536 
537 #ifdef CONFIG_PPP_FILTER
538 static int get_filter(void __user *arg, struct sock_filter **p)
539 {
540 	struct sock_fprog uprog;
541 	struct sock_filter *code = NULL;
542 	int len;
543 
544 	if (copy_from_user(&uprog, arg, sizeof(uprog)))
545 		return -EFAULT;
546 
547 	if (!uprog.len) {
548 		*p = NULL;
549 		return 0;
550 	}
551 
552 	len = uprog.len * sizeof(struct sock_filter);
553 	code = memdup_user(uprog.filter, len);
554 	if (IS_ERR(code))
555 		return PTR_ERR(code);
556 
557 	*p = code;
558 	return uprog.len;
559 }
560 #endif /* CONFIG_PPP_FILTER */
561 
562 static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
563 {
564 	struct ppp_file *pf = file->private_data;
565 	struct ppp *ppp;
566 	int err = -EFAULT, val, val2, i;
567 	struct ppp_idle idle;
568 	struct npioctl npi;
569 	int unit, cflags;
570 	struct slcompress *vj;
571 	void __user *argp = (void __user *)arg;
572 	int __user *p = argp;
573 
574 	if (!pf)
575 		return ppp_unattached_ioctl(current->nsproxy->net_ns,
576 					pf, file, cmd, arg);
577 
578 	if (cmd == PPPIOCDETACH) {
579 		/*
580 		 * We have to be careful here... if the file descriptor
581 		 * has been dup'd, we could have another process in the
582 		 * middle of a poll using the same file *, so we had
583 		 * better not free the interface data structures -
584 		 * instead we fail the ioctl.  Even in this case, we
585 		 * shut down the interface if we are the owner of it.
586 		 * Actually, we should get rid of PPPIOCDETACH, userland
587 		 * (i.e. pppd) could achieve the same effect by closing
588 		 * this fd and reopening /dev/ppp.
589 		 */
590 		err = -EINVAL;
591 		mutex_lock(&ppp_mutex);
592 		if (pf->kind == INTERFACE) {
593 			ppp = PF_TO_PPP(pf);
594 			if (file == ppp->owner)
595 				ppp_shutdown_interface(ppp);
596 		}
597 		if (atomic_long_read(&file->f_count) <= 2) {
598 			ppp_release(NULL, file);
599 			err = 0;
600 		} else
601 			pr_warn("PPPIOCDETACH file->f_count=%ld\n",
602 				atomic_long_read(&file->f_count));
603 		mutex_unlock(&ppp_mutex);
604 		return err;
605 	}
606 
607 	if (pf->kind == CHANNEL) {
608 		struct channel *pch;
609 		struct ppp_channel *chan;
610 
611 		mutex_lock(&ppp_mutex);
612 		pch = PF_TO_CHANNEL(pf);
613 
614 		switch (cmd) {
615 		case PPPIOCCONNECT:
616 			if (get_user(unit, p))
617 				break;
618 			err = ppp_connect_channel(pch, unit);
619 			break;
620 
621 		case PPPIOCDISCONN:
622 			err = ppp_disconnect_channel(pch);
623 			break;
624 
625 		default:
626 			down_read(&pch->chan_sem);
627 			chan = pch->chan;
628 			err = -ENOTTY;
629 			if (chan && chan->ops->ioctl)
630 				err = chan->ops->ioctl(chan, cmd, arg);
631 			up_read(&pch->chan_sem);
632 		}
633 		mutex_unlock(&ppp_mutex);
634 		return err;
635 	}
636 
637 	if (pf->kind != INTERFACE) {
638 		/* can't happen */
639 		pr_err("PPP: not interface or channel??\n");
640 		return -EINVAL;
641 	}
642 
643 	mutex_lock(&ppp_mutex);
644 	ppp = PF_TO_PPP(pf);
645 	switch (cmd) {
646 	case PPPIOCSMRU:
647 		if (get_user(val, p))
648 			break;
649 		ppp->mru = val;
650 		err = 0;
651 		break;
652 
653 	case PPPIOCSFLAGS:
654 		if (get_user(val, p))
655 			break;
656 		ppp_lock(ppp);
657 		cflags = ppp->flags & ~val;
658 		ppp->flags = val & SC_FLAG_BITS;
659 		ppp_unlock(ppp);
660 		if (cflags & SC_CCP_OPEN)
661 			ppp_ccp_closed(ppp);
662 		err = 0;
663 		break;
664 
665 	case PPPIOCGFLAGS:
666 		val = ppp->flags | ppp->xstate | ppp->rstate;
667 		if (put_user(val, p))
668 			break;
669 		err = 0;
670 		break;
671 
672 	case PPPIOCSCOMPRESS:
673 		err = ppp_set_compress(ppp, arg);
674 		break;
675 
676 	case PPPIOCGUNIT:
677 		if (put_user(ppp->file.index, p))
678 			break;
679 		err = 0;
680 		break;
681 
682 	case PPPIOCSDEBUG:
683 		if (get_user(val, p))
684 			break;
685 		ppp->debug = val;
686 		err = 0;
687 		break;
688 
689 	case PPPIOCGDEBUG:
690 		if (put_user(ppp->debug, p))
691 			break;
692 		err = 0;
693 		break;
694 
695 	case PPPIOCGIDLE:
696 		idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
697 		idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
698 		if (copy_to_user(argp, &idle, sizeof(idle)))
699 			break;
700 		err = 0;
701 		break;
702 
703 	case PPPIOCSMAXCID:
704 		if (get_user(val, p))
705 			break;
706 		val2 = 15;
707 		if ((val >> 16) != 0) {
708 			val2 = val >> 16;
709 			val &= 0xffff;
710 		}
711 		vj = slhc_init(val2+1, val+1);
712 		if (!vj) {
713 			netdev_err(ppp->dev,
714 				   "PPP: no memory (VJ compressor)\n");
715 			err = -ENOMEM;
716 			break;
717 		}
718 		ppp_lock(ppp);
719 		if (ppp->vj)
720 			slhc_free(ppp->vj);
721 		ppp->vj = vj;
722 		ppp_unlock(ppp);
723 		err = 0;
724 		break;
725 
726 	case PPPIOCGNPMODE:
727 	case PPPIOCSNPMODE:
728 		if (copy_from_user(&npi, argp, sizeof(npi)))
729 			break;
730 		err = proto_to_npindex(npi.protocol);
731 		if (err < 0)
732 			break;
733 		i = err;
734 		if (cmd == PPPIOCGNPMODE) {
735 			err = -EFAULT;
736 			npi.mode = ppp->npmode[i];
737 			if (copy_to_user(argp, &npi, sizeof(npi)))
738 				break;
739 		} else {
740 			ppp->npmode[i] = npi.mode;
741 			/* we may be able to transmit more packets now (??) */
742 			netif_wake_queue(ppp->dev);
743 		}
744 		err = 0;
745 		break;
746 
747 #ifdef CONFIG_PPP_FILTER
748 	case PPPIOCSPASS:
749 	{
750 		struct sock_filter *code;
751 
752 		err = get_filter(argp, &code);
753 		if (err >= 0) {
754 			struct sock_fprog_kern fprog = {
755 				.len = err,
756 				.filter = code,
757 			};
758 
759 			ppp_lock(ppp);
760 			if (ppp->pass_filter) {
761 				sk_unattached_filter_destroy(ppp->pass_filter);
762 				ppp->pass_filter = NULL;
763 			}
764 			if (fprog.filter != NULL)
765 				err = sk_unattached_filter_create(&ppp->pass_filter,
766 								  &fprog);
767 			else
768 				err = 0;
769 			kfree(code);
770 			ppp_unlock(ppp);
771 		}
772 		break;
773 	}
774 	case PPPIOCSACTIVE:
775 	{
776 		struct sock_filter *code;
777 
778 		err = get_filter(argp, &code);
779 		if (err >= 0) {
780 			struct sock_fprog_kern fprog = {
781 				.len = err,
782 				.filter = code,
783 			};
784 
785 			ppp_lock(ppp);
786 			if (ppp->active_filter) {
787 				sk_unattached_filter_destroy(ppp->active_filter);
788 				ppp->active_filter = NULL;
789 			}
790 			if (fprog.filter != NULL)
791 				err = sk_unattached_filter_create(&ppp->active_filter,
792 								  &fprog);
793 			else
794 				err = 0;
795 			kfree(code);
796 			ppp_unlock(ppp);
797 		}
798 		break;
799 	}
800 #endif /* CONFIG_PPP_FILTER */
801 
802 #ifdef CONFIG_PPP_MULTILINK
803 	case PPPIOCSMRRU:
804 		if (get_user(val, p))
805 			break;
806 		ppp_recv_lock(ppp);
807 		ppp->mrru = val;
808 		ppp_recv_unlock(ppp);
809 		err = 0;
810 		break;
811 #endif /* CONFIG_PPP_MULTILINK */
812 
813 	default:
814 		err = -ENOTTY;
815 	}
816 	mutex_unlock(&ppp_mutex);
817 	return err;
818 }
819 
820 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
821 			struct file *file, unsigned int cmd, unsigned long arg)
822 {
823 	int unit, err = -EFAULT;
824 	struct ppp *ppp;
825 	struct channel *chan;
826 	struct ppp_net *pn;
827 	int __user *p = (int __user *)arg;
828 
829 	mutex_lock(&ppp_mutex);
830 	switch (cmd) {
831 	case PPPIOCNEWUNIT:
832 		/* Create a new ppp unit */
833 		if (get_user(unit, p))
834 			break;
835 		ppp = ppp_create_interface(net, unit, &err);
836 		if (!ppp)
837 			break;
838 		file->private_data = &ppp->file;
839 		ppp->owner = file;
840 		err = -EFAULT;
841 		if (put_user(ppp->file.index, p))
842 			break;
843 		err = 0;
844 		break;
845 
846 	case PPPIOCATTACH:
847 		/* Attach to an existing ppp unit */
848 		if (get_user(unit, p))
849 			break;
850 		err = -ENXIO;
851 		pn = ppp_pernet(net);
852 		mutex_lock(&pn->all_ppp_mutex);
853 		ppp = ppp_find_unit(pn, unit);
854 		if (ppp) {
855 			atomic_inc(&ppp->file.refcnt);
856 			file->private_data = &ppp->file;
857 			err = 0;
858 		}
859 		mutex_unlock(&pn->all_ppp_mutex);
860 		break;
861 
862 	case PPPIOCATTCHAN:
863 		if (get_user(unit, p))
864 			break;
865 		err = -ENXIO;
866 		pn = ppp_pernet(net);
867 		spin_lock_bh(&pn->all_channels_lock);
868 		chan = ppp_find_channel(pn, unit);
869 		if (chan) {
870 			atomic_inc(&chan->file.refcnt);
871 			file->private_data = &chan->file;
872 			err = 0;
873 		}
874 		spin_unlock_bh(&pn->all_channels_lock);
875 		break;
876 
877 	default:
878 		err = -ENOTTY;
879 	}
880 	mutex_unlock(&ppp_mutex);
881 	return err;
882 }
883 
884 static const struct file_operations ppp_device_fops = {
885 	.owner		= THIS_MODULE,
886 	.read		= ppp_read,
887 	.write		= ppp_write,
888 	.poll		= ppp_poll,
889 	.unlocked_ioctl	= ppp_ioctl,
890 	.open		= ppp_open,
891 	.release	= ppp_release,
892 	.llseek		= noop_llseek,
893 };
894 
895 static __net_init int ppp_init_net(struct net *net)
896 {
897 	struct ppp_net *pn = net_generic(net, ppp_net_id);
898 
899 	idr_init(&pn->units_idr);
900 	mutex_init(&pn->all_ppp_mutex);
901 
902 	INIT_LIST_HEAD(&pn->all_channels);
903 	INIT_LIST_HEAD(&pn->new_channels);
904 
905 	spin_lock_init(&pn->all_channels_lock);
906 
907 	return 0;
908 }
909 
910 static __net_exit void ppp_exit_net(struct net *net)
911 {
912 	struct ppp_net *pn = net_generic(net, ppp_net_id);
913 
914 	idr_destroy(&pn->units_idr);
915 }
916 
917 static struct pernet_operations ppp_net_ops = {
918 	.init = ppp_init_net,
919 	.exit = ppp_exit_net,
920 	.id   = &ppp_net_id,
921 	.size = sizeof(struct ppp_net),
922 };
923 
924 #define PPP_MAJOR	108
925 
926 /* Called at boot time if ppp is compiled into the kernel,
927    or at module load time (from init_module) if compiled as a module. */
928 static int __init ppp_init(void)
929 {
930 	int err;
931 
932 	pr_info("PPP generic driver version " PPP_VERSION "\n");
933 
934 	err = register_pernet_device(&ppp_net_ops);
935 	if (err) {
936 		pr_err("failed to register PPP pernet device (%d)\n", err);
937 		goto out;
938 	}
939 
940 	err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
941 	if (err) {
942 		pr_err("failed to register PPP device (%d)\n", err);
943 		goto out_net;
944 	}
945 
946 	ppp_class = class_create(THIS_MODULE, "ppp");
947 	if (IS_ERR(ppp_class)) {
948 		err = PTR_ERR(ppp_class);
949 		goto out_chrdev;
950 	}
951 
952 	/* not a big deal if we fail here :-) */
953 	device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
954 
955 	return 0;
956 
957 out_chrdev:
958 	unregister_chrdev(PPP_MAJOR, "ppp");
959 out_net:
960 	unregister_pernet_device(&ppp_net_ops);
961 out:
962 	return err;
963 }
964 
965 /*
966  * Network interface unit routines.
967  */
968 static netdev_tx_t
969 ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
970 {
971 	struct ppp *ppp = netdev_priv(dev);
972 	int npi, proto;
973 	unsigned char *pp;
974 
975 	npi = ethertype_to_npindex(ntohs(skb->protocol));
976 	if (npi < 0)
977 		goto outf;
978 
979 	/* Drop, accept or reject the packet */
980 	switch (ppp->npmode[npi]) {
981 	case NPMODE_PASS:
982 		break;
983 	case NPMODE_QUEUE:
984 		/* it would be nice to have a way to tell the network
985 		   system to queue this one up for later. */
986 		goto outf;
987 	case NPMODE_DROP:
988 	case NPMODE_ERROR:
989 		goto outf;
990 	}
991 
992 	/* Put the 2-byte PPP protocol number on the front,
993 	   making sure there is room for the address and control fields. */
994 	if (skb_cow_head(skb, PPP_HDRLEN))
995 		goto outf;
996 
997 	pp = skb_push(skb, 2);
998 	proto = npindex_to_proto[npi];
999 	put_unaligned_be16(proto, pp);
1000 
1001 	skb_queue_tail(&ppp->file.xq, skb);
1002 	ppp_xmit_process(ppp);
1003 	return NETDEV_TX_OK;
1004 
1005  outf:
1006 	kfree_skb(skb);
1007 	++dev->stats.tx_dropped;
1008 	return NETDEV_TX_OK;
1009 }
1010 
1011 static int
1012 ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1013 {
1014 	struct ppp *ppp = netdev_priv(dev);
1015 	int err = -EFAULT;
1016 	void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
1017 	struct ppp_stats stats;
1018 	struct ppp_comp_stats cstats;
1019 	char *vers;
1020 
1021 	switch (cmd) {
1022 	case SIOCGPPPSTATS:
1023 		ppp_get_stats(ppp, &stats);
1024 		if (copy_to_user(addr, &stats, sizeof(stats)))
1025 			break;
1026 		err = 0;
1027 		break;
1028 
1029 	case SIOCGPPPCSTATS:
1030 		memset(&cstats, 0, sizeof(cstats));
1031 		if (ppp->xc_state)
1032 			ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
1033 		if (ppp->rc_state)
1034 			ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
1035 		if (copy_to_user(addr, &cstats, sizeof(cstats)))
1036 			break;
1037 		err = 0;
1038 		break;
1039 
1040 	case SIOCGPPPVER:
1041 		vers = PPP_VERSION;
1042 		if (copy_to_user(addr, vers, strlen(vers) + 1))
1043 			break;
1044 		err = 0;
1045 		break;
1046 
1047 	default:
1048 		err = -EINVAL;
1049 	}
1050 
1051 	return err;
1052 }
1053 
1054 static struct rtnl_link_stats64*
1055 ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
1056 {
1057 	struct ppp *ppp = netdev_priv(dev);
1058 
1059 	ppp_recv_lock(ppp);
1060 	stats64->rx_packets = ppp->stats64.rx_packets;
1061 	stats64->rx_bytes   = ppp->stats64.rx_bytes;
1062 	ppp_recv_unlock(ppp);
1063 
1064 	ppp_xmit_lock(ppp);
1065 	stats64->tx_packets = ppp->stats64.tx_packets;
1066 	stats64->tx_bytes   = ppp->stats64.tx_bytes;
1067 	ppp_xmit_unlock(ppp);
1068 
1069 	stats64->rx_errors        = dev->stats.rx_errors;
1070 	stats64->tx_errors        = dev->stats.tx_errors;
1071 	stats64->rx_dropped       = dev->stats.rx_dropped;
1072 	stats64->tx_dropped       = dev->stats.tx_dropped;
1073 	stats64->rx_length_errors = dev->stats.rx_length_errors;
1074 
1075 	return stats64;
1076 }
1077 
1078 static struct lock_class_key ppp_tx_busylock;
1079 static int ppp_dev_init(struct net_device *dev)
1080 {
1081 	dev->qdisc_tx_busylock = &ppp_tx_busylock;
1082 	return 0;
1083 }
1084 
1085 static const struct net_device_ops ppp_netdev_ops = {
1086 	.ndo_init	 = ppp_dev_init,
1087 	.ndo_start_xmit  = ppp_start_xmit,
1088 	.ndo_do_ioctl    = ppp_net_ioctl,
1089 	.ndo_get_stats64 = ppp_get_stats64,
1090 };
1091 
1092 static void ppp_setup(struct net_device *dev)
1093 {
1094 	dev->netdev_ops = &ppp_netdev_ops;
1095 	dev->hard_header_len = PPP_HDRLEN;
1096 	dev->mtu = PPP_MRU;
1097 	dev->addr_len = 0;
1098 	dev->tx_queue_len = 3;
1099 	dev->type = ARPHRD_PPP;
1100 	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1101 	dev->features |= NETIF_F_NETNS_LOCAL;
1102 	dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1103 }
1104 
1105 /*
1106  * Transmit-side routines.
1107  */
1108 
1109 /*
1110  * Called to do any work queued up on the transmit side
1111  * that can now be done.
1112  */
1113 static void
1114 ppp_xmit_process(struct ppp *ppp)
1115 {
1116 	struct sk_buff *skb;
1117 
1118 	ppp_xmit_lock(ppp);
1119 	if (!ppp->closing) {
1120 		ppp_push(ppp);
1121 		while (!ppp->xmit_pending &&
1122 		       (skb = skb_dequeue(&ppp->file.xq)))
1123 			ppp_send_frame(ppp, skb);
1124 		/* If there's no work left to do, tell the core net
1125 		   code that we can accept some more. */
1126 		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
1127 			netif_wake_queue(ppp->dev);
1128 		else
1129 			netif_stop_queue(ppp->dev);
1130 	}
1131 	ppp_xmit_unlock(ppp);
1132 }
1133 
1134 static inline struct sk_buff *
1135 pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1136 {
1137 	struct sk_buff *new_skb;
1138 	int len;
1139 	int new_skb_size = ppp->dev->mtu +
1140 		ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
1141 	int compressor_skb_size = ppp->dev->mtu +
1142 		ppp->xcomp->comp_extra + PPP_HDRLEN;
1143 	new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1144 	if (!new_skb) {
1145 		if (net_ratelimit())
1146 			netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
1147 		return NULL;
1148 	}
1149 	if (ppp->dev->hard_header_len > PPP_HDRLEN)
1150 		skb_reserve(new_skb,
1151 			    ppp->dev->hard_header_len - PPP_HDRLEN);
1152 
1153 	/* compressor still expects A/C bytes in hdr */
1154 	len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
1155 				   new_skb->data, skb->len + 2,
1156 				   compressor_skb_size);
1157 	if (len > 0 && (ppp->flags & SC_CCP_UP)) {
1158 		consume_skb(skb);
1159 		skb = new_skb;
1160 		skb_put(skb, len);
1161 		skb_pull(skb, 2);	/* pull off A/C bytes */
1162 	} else if (len == 0) {
1163 		/* didn't compress, or CCP not up yet */
1164 		consume_skb(new_skb);
1165 		new_skb = skb;
1166 	} else {
1167 		/*
1168 		 * (len < 0)
1169 		 * MPPE requires that we do not send unencrypted
1170 		 * frames.  The compressor will return -1 if we
1171 		 * should drop the frame.  We cannot simply test
1172 		 * the compress_proto because MPPE and MPPC share
1173 		 * the same number.
1174 		 */
1175 		if (net_ratelimit())
1176 			netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1177 		kfree_skb(skb);
1178 		consume_skb(new_skb);
1179 		new_skb = NULL;
1180 	}
1181 	return new_skb;
1182 }
1183 
1184 /*
1185  * Compress and send a frame.
1186  * The caller should have locked the xmit path,
1187  * and xmit_pending should be 0.
1188  */
1189 static void
1190 ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1191 {
1192 	int proto = PPP_PROTO(skb);
1193 	struct sk_buff *new_skb;
1194 	int len;
1195 	unsigned char *cp;
1196 
1197 	if (proto < 0x8000) {
1198 #ifdef CONFIG_PPP_FILTER
1199 		/* check if we should pass this packet */
1200 		/* the filter instructions are constructed assuming
1201 		   a four-byte PPP header on each packet */
1202 		*skb_push(skb, 2) = 1;
1203 		if (ppp->pass_filter &&
1204 		    SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
1205 			if (ppp->debug & 1)
1206 				netdev_printk(KERN_DEBUG, ppp->dev,
1207 					      "PPP: outbound frame "
1208 					      "not passed\n");
1209 			kfree_skb(skb);
1210 			return;
1211 		}
1212 		/* if this packet passes the active filter, record the time */
1213 		if (!(ppp->active_filter &&
1214 		      SK_RUN_FILTER(ppp->active_filter, skb) == 0))
1215 			ppp->last_xmit = jiffies;
1216 		skb_pull(skb, 2);
1217 #else
1218 		/* for data packets, record the time */
1219 		ppp->last_xmit = jiffies;
1220 #endif /* CONFIG_PPP_FILTER */
1221 	}
1222 
1223 	++ppp->stats64.tx_packets;
1224 	ppp->stats64.tx_bytes += skb->len - 2;
1225 
1226 	switch (proto) {
1227 	case PPP_IP:
1228 		if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
1229 			break;
1230 		/* try to do VJ TCP header compression */
1231 		new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1232 				    GFP_ATOMIC);
1233 		if (!new_skb) {
1234 			netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
1235 			goto drop;
1236 		}
1237 		skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
1238 		cp = skb->data + 2;
1239 		len = slhc_compress(ppp->vj, cp, skb->len - 2,
1240 				    new_skb->data + 2, &cp,
1241 				    !(ppp->flags & SC_NO_TCP_CCID));
1242 		if (cp == skb->data + 2) {
1243 			/* didn't compress */
1244 			consume_skb(new_skb);
1245 		} else {
1246 			if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
1247 				proto = PPP_VJC_COMP;
1248 				cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
1249 			} else {
1250 				proto = PPP_VJC_UNCOMP;
1251 				cp[0] = skb->data[2];
1252 			}
1253 			consume_skb(skb);
1254 			skb = new_skb;
1255 			cp = skb_put(skb, len + 2);
1256 			cp[0] = 0;
1257 			cp[1] = proto;
1258 		}
1259 		break;
1260 
1261 	case PPP_CCP:
1262 		/* peek at outbound CCP frames */
1263 		ppp_ccp_peek(ppp, skb, 0);
1264 		break;
1265 	}
1266 
1267 	/* try to do packet compression */
1268 	if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
1269 	    proto != PPP_LCP && proto != PPP_CCP) {
1270 		if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1271 			if (net_ratelimit())
1272 				netdev_err(ppp->dev,
1273 					   "ppp: compression required but "
1274 					   "down - pkt dropped.\n");
1275 			goto drop;
1276 		}
1277 		skb = pad_compress_skb(ppp, skb);
1278 		if (!skb)
1279 			goto drop;
1280 	}
1281 
1282 	/*
1283 	 * If we are waiting for traffic (demand dialling),
1284 	 * queue it up for pppd to receive.
1285 	 */
1286 	if (ppp->flags & SC_LOOP_TRAFFIC) {
1287 		if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
1288 			goto drop;
1289 		skb_queue_tail(&ppp->file.rq, skb);
1290 		wake_up_interruptible(&ppp->file.rwait);
1291 		return;
1292 	}
1293 
1294 	ppp->xmit_pending = skb;
1295 	ppp_push(ppp);
1296 	return;
1297 
1298  drop:
1299 	kfree_skb(skb);
1300 	++ppp->dev->stats.tx_errors;
1301 }
1302 
1303 /*
1304  * Try to send the frame in xmit_pending.
1305  * The caller should have the xmit path locked.
1306  */
1307 static void
1308 ppp_push(struct ppp *ppp)
1309 {
1310 	struct list_head *list;
1311 	struct channel *pch;
1312 	struct sk_buff *skb = ppp->xmit_pending;
1313 
1314 	if (!skb)
1315 		return;
1316 
1317 	list = &ppp->channels;
1318 	if (list_empty(list)) {
1319 		/* nowhere to send the packet, just drop it */
1320 		ppp->xmit_pending = NULL;
1321 		kfree_skb(skb);
1322 		return;
1323 	}
1324 
1325 	if ((ppp->flags & SC_MULTILINK) == 0) {
1326 		/* not doing multilink: send it down the first channel */
1327 		list = list->next;
1328 		pch = list_entry(list, struct channel, clist);
1329 
1330 		spin_lock_bh(&pch->downl);
1331 		if (pch->chan) {
1332 			if (pch->chan->ops->start_xmit(pch->chan, skb))
1333 				ppp->xmit_pending = NULL;
1334 		} else {
1335 			/* channel got unregistered */
1336 			kfree_skb(skb);
1337 			ppp->xmit_pending = NULL;
1338 		}
1339 		spin_unlock_bh(&pch->downl);
1340 		return;
1341 	}
1342 
1343 #ifdef CONFIG_PPP_MULTILINK
1344 	/* Multilink: fragment the packet over as many links
1345 	   as can take the packet at the moment. */
1346 	if (!ppp_mp_explode(ppp, skb))
1347 		return;
1348 #endif /* CONFIG_PPP_MULTILINK */
1349 
1350 	ppp->xmit_pending = NULL;
1351 	kfree_skb(skb);
1352 }
1353 
1354 #ifdef CONFIG_PPP_MULTILINK
1355 static bool mp_protocol_compress __read_mostly = true;
1356 module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR);
1357 MODULE_PARM_DESC(mp_protocol_compress,
1358 		 "compress protocol id in multilink fragments");
1359 
1360 /*
1361  * Divide a packet to be transmitted into fragments and
1362  * send them out the individual links.
1363  */
1364 static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1365 {
1366 	int len, totlen;
1367 	int i, bits, hdrlen, mtu;
1368 	int flen;
1369 	int navail, nfree, nzero;
1370 	int nbigger;
1371 	int totspeed;
1372 	int totfree;
1373 	unsigned char *p, *q;
1374 	struct list_head *list;
1375 	struct channel *pch;
1376 	struct sk_buff *frag;
1377 	struct ppp_channel *chan;
1378 
1379 	totspeed = 0; /*total bitrate of the bundle*/
1380 	nfree = 0; /* # channels which have no packet already queued */
1381 	navail = 0; /* total # of usable channels (not deregistered) */
1382 	nzero = 0; /* number of channels with zero speed associated*/
1383 	totfree = 0; /*total # of channels available and
1384 				  *having no queued packets before
1385 				  *starting the fragmentation*/
1386 
1387 	hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1388 	i = 0;
1389 	list_for_each_entry(pch, &ppp->channels, clist) {
1390 		if (pch->chan) {
1391 			pch->avail = 1;
1392 			navail++;
1393 			pch->speed = pch->chan->speed;
1394 		} else {
1395 			pch->avail = 0;
1396 		}
1397 		if (pch->avail) {
1398 			if (skb_queue_empty(&pch->file.xq) ||
1399 				!pch->had_frag) {
1400 					if (pch->speed == 0)
1401 						nzero++;
1402 					else
1403 						totspeed += pch->speed;
1404 
1405 					pch->avail = 2;
1406 					++nfree;
1407 					++totfree;
1408 				}
1409 			if (!pch->had_frag && i < ppp->nxchan)
1410 				ppp->nxchan = i;
1411 		}
1412 		++i;
1413 	}
1414 	/*
1415 	 * Don't start sending this packet unless at least half of
1416 	 * the channels are free.  This gives much better TCP
1417 	 * performance if we have a lot of channels.
1418 	 */
1419 	if (nfree == 0 || nfree < navail / 2)
1420 		return 0; /* can't take now, leave it in xmit_pending */
1421 
1422 	/* Do protocol field compression */
1423 	p = skb->data;
1424 	len = skb->len;
1425 	if (*p == 0 && mp_protocol_compress) {
1426 		++p;
1427 		--len;
1428 	}
1429 
1430 	totlen = len;
1431 	nbigger = len % nfree;
1432 
1433 	/* skip to the channel after the one we last used
1434 	   and start at that one */
1435 	list = &ppp->channels;
1436 	for (i = 0; i < ppp->nxchan; ++i) {
1437 		list = list->next;
1438 		if (list == &ppp->channels) {
1439 			i = 0;
1440 			break;
1441 		}
1442 	}
1443 
1444 	/* create a fragment for each channel */
1445 	bits = B;
1446 	while (len > 0) {
1447 		list = list->next;
1448 		if (list == &ppp->channels) {
1449 			i = 0;
1450 			continue;
1451 		}
1452 		pch = list_entry(list, struct channel, clist);
1453 		++i;
1454 		if (!pch->avail)
1455 			continue;
1456 
1457 		/*
1458 		 * Skip this channel if it has a fragment pending already and
1459 		 * we haven't given a fragment to all of the free channels.
1460 		 */
1461 		if (pch->avail == 1) {
1462 			if (nfree > 0)
1463 				continue;
1464 		} else {
1465 			pch->avail = 1;
1466 		}
1467 
1468 		/* check the channel's mtu and whether it is still attached. */
1469 		spin_lock_bh(&pch->downl);
1470 		if (pch->chan == NULL) {
1471 			/* can't use this channel, it's being deregistered */
1472 			if (pch->speed == 0)
1473 				nzero--;
1474 			else
1475 				totspeed -= pch->speed;
1476 
1477 			spin_unlock_bh(&pch->downl);
1478 			pch->avail = 0;
1479 			totlen = len;
1480 			totfree--;
1481 			nfree--;
1482 			if (--navail == 0)
1483 				break;
1484 			continue;
1485 		}
1486 
1487 		/*
1488 		*if the channel speed is not set divide
1489 		*the packet evenly among the free channels;
1490 		*otherwise divide it according to the speed
1491 		*of the channel we are going to transmit on
1492 		*/
1493 		flen = len;
1494 		if (nfree > 0) {
1495 			if (pch->speed == 0) {
1496 				flen = len/nfree;
1497 				if (nbigger > 0) {
1498 					flen++;
1499 					nbigger--;
1500 				}
1501 			} else {
1502 				flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
1503 					((totspeed*totfree)/pch->speed)) - hdrlen;
1504 				if (nbigger > 0) {
1505 					flen += ((totfree - nzero)*pch->speed)/totspeed;
1506 					nbigger -= ((totfree - nzero)*pch->speed)/
1507 							totspeed;
1508 				}
1509 			}
1510 			nfree--;
1511 		}
1512 
1513 		/*
1514 		 *check if we are on the last channel or
1515 		 *we exceded the length of the data to
1516 		 *fragment
1517 		 */
1518 		if ((nfree <= 0) || (flen > len))
1519 			flen = len;
1520 		/*
1521 		 *it is not worth to tx on slow channels:
1522 		 *in that case from the resulting flen according to the
1523 		 *above formula will be equal or less than zero.
1524 		 *Skip the channel in this case
1525 		 */
1526 		if (flen <= 0) {
1527 			pch->avail = 2;
1528 			spin_unlock_bh(&pch->downl);
1529 			continue;
1530 		}
1531 
1532 		/*
1533 		 * hdrlen includes the 2-byte PPP protocol field, but the
1534 		 * MTU counts only the payload excluding the protocol field.
1535 		 * (RFC1661 Section 2)
1536 		 */
1537 		mtu = pch->chan->mtu - (hdrlen - 2);
1538 		if (mtu < 4)
1539 			mtu = 4;
1540 		if (flen > mtu)
1541 			flen = mtu;
1542 		if (flen == len)
1543 			bits |= E;
1544 		frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
1545 		if (!frag)
1546 			goto noskb;
1547 		q = skb_put(frag, flen + hdrlen);
1548 
1549 		/* make the MP header */
1550 		put_unaligned_be16(PPP_MP, q);
1551 		if (ppp->flags & SC_MP_XSHORTSEQ) {
1552 			q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1553 			q[3] = ppp->nxseq;
1554 		} else {
1555 			q[2] = bits;
1556 			q[3] = ppp->nxseq >> 16;
1557 			q[4] = ppp->nxseq >> 8;
1558 			q[5] = ppp->nxseq;
1559 		}
1560 
1561 		memcpy(q + hdrlen, p, flen);
1562 
1563 		/* try to send it down the channel */
1564 		chan = pch->chan;
1565 		if (!skb_queue_empty(&pch->file.xq) ||
1566 			!chan->ops->start_xmit(chan, frag))
1567 			skb_queue_tail(&pch->file.xq, frag);
1568 		pch->had_frag = 1;
1569 		p += flen;
1570 		len -= flen;
1571 		++ppp->nxseq;
1572 		bits = 0;
1573 		spin_unlock_bh(&pch->downl);
1574 	}
1575 	ppp->nxchan = i;
1576 
1577 	return 1;
1578 
1579  noskb:
1580 	spin_unlock_bh(&pch->downl);
1581 	if (ppp->debug & 1)
1582 		netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
1583 	++ppp->dev->stats.tx_errors;
1584 	++ppp->nxseq;
1585 	return 1;	/* abandon the frame */
1586 }
1587 #endif /* CONFIG_PPP_MULTILINK */
1588 
1589 /*
1590  * Try to send data out on a channel.
1591  */
1592 static void
1593 ppp_channel_push(struct channel *pch)
1594 {
1595 	struct sk_buff *skb;
1596 	struct ppp *ppp;
1597 
1598 	spin_lock_bh(&pch->downl);
1599 	if (pch->chan) {
1600 		while (!skb_queue_empty(&pch->file.xq)) {
1601 			skb = skb_dequeue(&pch->file.xq);
1602 			if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
1603 				/* put the packet back and try again later */
1604 				skb_queue_head(&pch->file.xq, skb);
1605 				break;
1606 			}
1607 		}
1608 	} else {
1609 		/* channel got deregistered */
1610 		skb_queue_purge(&pch->file.xq);
1611 	}
1612 	spin_unlock_bh(&pch->downl);
1613 	/* see if there is anything from the attached unit to be sent */
1614 	if (skb_queue_empty(&pch->file.xq)) {
1615 		read_lock_bh(&pch->upl);
1616 		ppp = pch->ppp;
1617 		if (ppp)
1618 			ppp_xmit_process(ppp);
1619 		read_unlock_bh(&pch->upl);
1620 	}
1621 }
1622 
1623 /*
1624  * Receive-side routines.
1625  */
1626 
1627 struct ppp_mp_skb_parm {
1628 	u32		sequence;
1629 	u8		BEbits;
1630 };
1631 #define PPP_MP_CB(skb)	((struct ppp_mp_skb_parm *)((skb)->cb))
1632 
1633 static inline void
1634 ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1635 {
1636 	ppp_recv_lock(ppp);
1637 	if (!ppp->closing)
1638 		ppp_receive_frame(ppp, skb, pch);
1639 	else
1640 		kfree_skb(skb);
1641 	ppp_recv_unlock(ppp);
1642 }
1643 
1644 void
1645 ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
1646 {
1647 	struct channel *pch = chan->ppp;
1648 	int proto;
1649 
1650 	if (!pch) {
1651 		kfree_skb(skb);
1652 		return;
1653 	}
1654 
1655 	read_lock_bh(&pch->upl);
1656 	if (!pskb_may_pull(skb, 2)) {
1657 		kfree_skb(skb);
1658 		if (pch->ppp) {
1659 			++pch->ppp->dev->stats.rx_length_errors;
1660 			ppp_receive_error(pch->ppp);
1661 		}
1662 		goto done;
1663 	}
1664 
1665 	proto = PPP_PROTO(skb);
1666 	if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
1667 		/* put it on the channel queue */
1668 		skb_queue_tail(&pch->file.rq, skb);
1669 		/* drop old frames if queue too long */
1670 		while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
1671 		       (skb = skb_dequeue(&pch->file.rq)))
1672 			kfree_skb(skb);
1673 		wake_up_interruptible(&pch->file.rwait);
1674 	} else {
1675 		ppp_do_recv(pch->ppp, skb, pch);
1676 	}
1677 
1678 done:
1679 	read_unlock_bh(&pch->upl);
1680 }
1681 
1682 /* Put a 0-length skb in the receive queue as an error indication */
1683 void
1684 ppp_input_error(struct ppp_channel *chan, int code)
1685 {
1686 	struct channel *pch = chan->ppp;
1687 	struct sk_buff *skb;
1688 
1689 	if (!pch)
1690 		return;
1691 
1692 	read_lock_bh(&pch->upl);
1693 	if (pch->ppp) {
1694 		skb = alloc_skb(0, GFP_ATOMIC);
1695 		if (skb) {
1696 			skb->len = 0;		/* probably unnecessary */
1697 			skb->cb[0] = code;
1698 			ppp_do_recv(pch->ppp, skb, pch);
1699 		}
1700 	}
1701 	read_unlock_bh(&pch->upl);
1702 }
1703 
1704 /*
1705  * We come in here to process a received frame.
1706  * The receive side of the ppp unit is locked.
1707  */
1708 static void
1709 ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1710 {
1711 	/* note: a 0-length skb is used as an error indication */
1712 	if (skb->len > 0) {
1713 #ifdef CONFIG_PPP_MULTILINK
1714 		/* XXX do channel-level decompression here */
1715 		if (PPP_PROTO(skb) == PPP_MP)
1716 			ppp_receive_mp_frame(ppp, skb, pch);
1717 		else
1718 #endif /* CONFIG_PPP_MULTILINK */
1719 			ppp_receive_nonmp_frame(ppp, skb);
1720 	} else {
1721 		kfree_skb(skb);
1722 		ppp_receive_error(ppp);
1723 	}
1724 }
1725 
1726 static void
1727 ppp_receive_error(struct ppp *ppp)
1728 {
1729 	++ppp->dev->stats.rx_errors;
1730 	if (ppp->vj)
1731 		slhc_toss(ppp->vj);
1732 }
1733 
1734 static void
1735 ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1736 {
1737 	struct sk_buff *ns;
1738 	int proto, len, npi;
1739 
1740 	/*
1741 	 * Decompress the frame, if compressed.
1742 	 * Note that some decompressors need to see uncompressed frames
1743 	 * that come in as well as compressed frames.
1744 	 */
1745 	if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
1746 	    (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
1747 		skb = ppp_decompress_frame(ppp, skb);
1748 
1749 	if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
1750 		goto err;
1751 
1752 	proto = PPP_PROTO(skb);
1753 	switch (proto) {
1754 	case PPP_VJC_COMP:
1755 		/* decompress VJ compressed packets */
1756 		if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
1757 			goto err;
1758 
1759 		if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
1760 			/* copy to a new sk_buff with more tailroom */
1761 			ns = dev_alloc_skb(skb->len + 128);
1762 			if (!ns) {
1763 				netdev_err(ppp->dev, "PPP: no memory "
1764 					   "(VJ decomp)\n");
1765 				goto err;
1766 			}
1767 			skb_reserve(ns, 2);
1768 			skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
1769 			consume_skb(skb);
1770 			skb = ns;
1771 		}
1772 		else
1773 			skb->ip_summed = CHECKSUM_NONE;
1774 
1775 		len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
1776 		if (len <= 0) {
1777 			netdev_printk(KERN_DEBUG, ppp->dev,
1778 				      "PPP: VJ decompression error\n");
1779 			goto err;
1780 		}
1781 		len += 2;
1782 		if (len > skb->len)
1783 			skb_put(skb, len - skb->len);
1784 		else if (len < skb->len)
1785 			skb_trim(skb, len);
1786 		proto = PPP_IP;
1787 		break;
1788 
1789 	case PPP_VJC_UNCOMP:
1790 		if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
1791 			goto err;
1792 
1793 		/* Until we fix the decompressor need to make sure
1794 		 * data portion is linear.
1795 		 */
1796 		if (!pskb_may_pull(skb, skb->len))
1797 			goto err;
1798 
1799 		if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
1800 			netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
1801 			goto err;
1802 		}
1803 		proto = PPP_IP;
1804 		break;
1805 
1806 	case PPP_CCP:
1807 		ppp_ccp_peek(ppp, skb, 1);
1808 		break;
1809 	}
1810 
1811 	++ppp->stats64.rx_packets;
1812 	ppp->stats64.rx_bytes += skb->len - 2;
1813 
1814 	npi = proto_to_npindex(proto);
1815 	if (npi < 0) {
1816 		/* control or unknown frame - pass it to pppd */
1817 		skb_queue_tail(&ppp->file.rq, skb);
1818 		/* limit queue length by dropping old frames */
1819 		while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
1820 		       (skb = skb_dequeue(&ppp->file.rq)))
1821 			kfree_skb(skb);
1822 		/* wake up any process polling or blocking on read */
1823 		wake_up_interruptible(&ppp->file.rwait);
1824 
1825 	} else {
1826 		/* network protocol frame - give it to the kernel */
1827 
1828 #ifdef CONFIG_PPP_FILTER
1829 		/* check if the packet passes the pass and active filters */
1830 		/* the filter instructions are constructed assuming
1831 		   a four-byte PPP header on each packet */
1832 		if (ppp->pass_filter || ppp->active_filter) {
1833 			if (skb_unclone(skb, GFP_ATOMIC))
1834 				goto err;
1835 
1836 			*skb_push(skb, 2) = 0;
1837 			if (ppp->pass_filter &&
1838 			    SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
1839 				if (ppp->debug & 1)
1840 					netdev_printk(KERN_DEBUG, ppp->dev,
1841 						      "PPP: inbound frame "
1842 						      "not passed\n");
1843 				kfree_skb(skb);
1844 				return;
1845 			}
1846 			if (!(ppp->active_filter &&
1847 			      SK_RUN_FILTER(ppp->active_filter, skb) == 0))
1848 				ppp->last_recv = jiffies;
1849 			__skb_pull(skb, 2);
1850 		} else
1851 #endif /* CONFIG_PPP_FILTER */
1852 			ppp->last_recv = jiffies;
1853 
1854 		if ((ppp->dev->flags & IFF_UP) == 0 ||
1855 		    ppp->npmode[npi] != NPMODE_PASS) {
1856 			kfree_skb(skb);
1857 		} else {
1858 			/* chop off protocol */
1859 			skb_pull_rcsum(skb, 2);
1860 			skb->dev = ppp->dev;
1861 			skb->protocol = htons(npindex_to_ethertype[npi]);
1862 			skb_reset_mac_header(skb);
1863 			netif_rx(skb);
1864 		}
1865 	}
1866 	return;
1867 
1868  err:
1869 	kfree_skb(skb);
1870 	ppp_receive_error(ppp);
1871 }
1872 
1873 static struct sk_buff *
1874 ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
1875 {
1876 	int proto = PPP_PROTO(skb);
1877 	struct sk_buff *ns;
1878 	int len;
1879 
1880 	/* Until we fix all the decompressor's need to make sure
1881 	 * data portion is linear.
1882 	 */
1883 	if (!pskb_may_pull(skb, skb->len))
1884 		goto err;
1885 
1886 	if (proto == PPP_COMP) {
1887 		int obuff_size;
1888 
1889 		switch(ppp->rcomp->compress_proto) {
1890 		case CI_MPPE:
1891 			obuff_size = ppp->mru + PPP_HDRLEN + 1;
1892 			break;
1893 		default:
1894 			obuff_size = ppp->mru + PPP_HDRLEN;
1895 			break;
1896 		}
1897 
1898 		ns = dev_alloc_skb(obuff_size);
1899 		if (!ns) {
1900 			netdev_err(ppp->dev, "ppp_decompress_frame: "
1901 				   "no memory\n");
1902 			goto err;
1903 		}
1904 		/* the decompressor still expects the A/C bytes in the hdr */
1905 		len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
1906 				skb->len + 2, ns->data, obuff_size);
1907 		if (len < 0) {
1908 			/* Pass the compressed frame to pppd as an
1909 			   error indication. */
1910 			if (len == DECOMP_FATALERROR)
1911 				ppp->rstate |= SC_DC_FERROR;
1912 			kfree_skb(ns);
1913 			goto err;
1914 		}
1915 
1916 		consume_skb(skb);
1917 		skb = ns;
1918 		skb_put(skb, len);
1919 		skb_pull(skb, 2);	/* pull off the A/C bytes */
1920 
1921 	} else {
1922 		/* Uncompressed frame - pass to decompressor so it
1923 		   can update its dictionary if necessary. */
1924 		if (ppp->rcomp->incomp)
1925 			ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
1926 					   skb->len + 2);
1927 	}
1928 
1929 	return skb;
1930 
1931  err:
1932 	ppp->rstate |= SC_DC_ERROR;
1933 	ppp_receive_error(ppp);
1934 	return skb;
1935 }
1936 
1937 #ifdef CONFIG_PPP_MULTILINK
1938 /*
1939  * Receive a multilink frame.
1940  * We put it on the reconstruction queue and then pull off
1941  * as many completed frames as we can.
1942  */
1943 static void
1944 ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1945 {
1946 	u32 mask, seq;
1947 	struct channel *ch;
1948 	int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1949 
1950 	if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
1951 		goto err;		/* no good, throw it away */
1952 
1953 	/* Decode sequence number and begin/end bits */
1954 	if (ppp->flags & SC_MP_SHORTSEQ) {
1955 		seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
1956 		mask = 0xfff;
1957 	} else {
1958 		seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
1959 		mask = 0xffffff;
1960 	}
1961 	PPP_MP_CB(skb)->BEbits = skb->data[2];
1962 	skb_pull(skb, mphdrlen);	/* pull off PPP and MP headers */
1963 
1964 	/*
1965 	 * Do protocol ID decompression on the first fragment of each packet.
1966 	 */
1967 	if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1))
1968 		*skb_push(skb, 1) = 0;
1969 
1970 	/*
1971 	 * Expand sequence number to 32 bits, making it as close
1972 	 * as possible to ppp->minseq.
1973 	 */
1974 	seq |= ppp->minseq & ~mask;
1975 	if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
1976 		seq += mask + 1;
1977 	else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
1978 		seq -= mask + 1;	/* should never happen */
1979 	PPP_MP_CB(skb)->sequence = seq;
1980 	pch->lastseq = seq;
1981 
1982 	/*
1983 	 * If this packet comes before the next one we were expecting,
1984 	 * drop it.
1985 	 */
1986 	if (seq_before(seq, ppp->nextseq)) {
1987 		kfree_skb(skb);
1988 		++ppp->dev->stats.rx_dropped;
1989 		ppp_receive_error(ppp);
1990 		return;
1991 	}
1992 
1993 	/*
1994 	 * Reevaluate minseq, the minimum over all channels of the
1995 	 * last sequence number received on each channel.  Because of
1996 	 * the increasing sequence number rule, we know that any fragment
1997 	 * before `minseq' which hasn't arrived is never going to arrive.
1998 	 * The list of channels can't change because we have the receive
1999 	 * side of the ppp unit locked.
2000 	 */
2001 	list_for_each_entry(ch, &ppp->channels, clist) {
2002 		if (seq_before(ch->lastseq, seq))
2003 			seq = ch->lastseq;
2004 	}
2005 	if (seq_before(ppp->minseq, seq))
2006 		ppp->minseq = seq;
2007 
2008 	/* Put the fragment on the reconstruction queue */
2009 	ppp_mp_insert(ppp, skb);
2010 
2011 	/* If the queue is getting long, don't wait any longer for packets
2012 	   before the start of the queue. */
2013 	if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
2014 		struct sk_buff *mskb = skb_peek(&ppp->mrq);
2015 		if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
2016 			ppp->minseq = PPP_MP_CB(mskb)->sequence;
2017 	}
2018 
2019 	/* Pull completed packets off the queue and receive them. */
2020 	while ((skb = ppp_mp_reconstruct(ppp))) {
2021 		if (pskb_may_pull(skb, 2))
2022 			ppp_receive_nonmp_frame(ppp, skb);
2023 		else {
2024 			++ppp->dev->stats.rx_length_errors;
2025 			kfree_skb(skb);
2026 			ppp_receive_error(ppp);
2027 		}
2028 	}
2029 
2030 	return;
2031 
2032  err:
2033 	kfree_skb(skb);
2034 	ppp_receive_error(ppp);
2035 }
2036 
2037 /*
2038  * Insert a fragment on the MP reconstruction queue.
2039  * The queue is ordered by increasing sequence number.
2040  */
2041 static void
2042 ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
2043 {
2044 	struct sk_buff *p;
2045 	struct sk_buff_head *list = &ppp->mrq;
2046 	u32 seq = PPP_MP_CB(skb)->sequence;
2047 
2048 	/* N.B. we don't need to lock the list lock because we have the
2049 	   ppp unit receive-side lock. */
2050 	skb_queue_walk(list, p) {
2051 		if (seq_before(seq, PPP_MP_CB(p)->sequence))
2052 			break;
2053 	}
2054 	__skb_queue_before(list, p, skb);
2055 }
2056 
2057 /*
2058  * Reconstruct a packet from the MP fragment queue.
2059  * We go through increasing sequence numbers until we find a
2060  * complete packet, or we get to the sequence number for a fragment
2061  * which hasn't arrived but might still do so.
2062  */
2063 static struct sk_buff *
2064 ppp_mp_reconstruct(struct ppp *ppp)
2065 {
2066 	u32 seq = ppp->nextseq;
2067 	u32 minseq = ppp->minseq;
2068 	struct sk_buff_head *list = &ppp->mrq;
2069 	struct sk_buff *p, *tmp;
2070 	struct sk_buff *head, *tail;
2071 	struct sk_buff *skb = NULL;
2072 	int lost = 0, len = 0;
2073 
2074 	if (ppp->mrru == 0)	/* do nothing until mrru is set */
2075 		return NULL;
2076 	head = list->next;
2077 	tail = NULL;
2078 	skb_queue_walk_safe(list, p, tmp) {
2079 	again:
2080 		if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
2081 			/* this can't happen, anyway ignore the skb */
2082 			netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
2083 				   "seq %u < %u\n",
2084 				   PPP_MP_CB(p)->sequence, seq);
2085 			__skb_unlink(p, list);
2086 			kfree_skb(p);
2087 			continue;
2088 		}
2089 		if (PPP_MP_CB(p)->sequence != seq) {
2090 			u32 oldseq;
2091 			/* Fragment `seq' is missing.  If it is after
2092 			   minseq, it might arrive later, so stop here. */
2093 			if (seq_after(seq, minseq))
2094 				break;
2095 			/* Fragment `seq' is lost, keep going. */
2096 			lost = 1;
2097 			oldseq = seq;
2098 			seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2099 				minseq + 1: PPP_MP_CB(p)->sequence;
2100 
2101 			if (ppp->debug & 1)
2102 				netdev_printk(KERN_DEBUG, ppp->dev,
2103 					      "lost frag %u..%u\n",
2104 					      oldseq, seq-1);
2105 
2106 			goto again;
2107 		}
2108 
2109 		/*
2110 		 * At this point we know that all the fragments from
2111 		 * ppp->nextseq to seq are either present or lost.
2112 		 * Also, there are no complete packets in the queue
2113 		 * that have no missing fragments and end before this
2114 		 * fragment.
2115 		 */
2116 
2117 		/* B bit set indicates this fragment starts a packet */
2118 		if (PPP_MP_CB(p)->BEbits & B) {
2119 			head = p;
2120 			lost = 0;
2121 			len = 0;
2122 		}
2123 
2124 		len += p->len;
2125 
2126 		/* Got a complete packet yet? */
2127 		if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
2128 		    (PPP_MP_CB(head)->BEbits & B)) {
2129 			if (len > ppp->mrru + 2) {
2130 				++ppp->dev->stats.rx_length_errors;
2131 				netdev_printk(KERN_DEBUG, ppp->dev,
2132 					      "PPP: reconstructed packet"
2133 					      " is too long (%d)\n", len);
2134 			} else {
2135 				tail = p;
2136 				break;
2137 			}
2138 			ppp->nextseq = seq + 1;
2139 		}
2140 
2141 		/*
2142 		 * If this is the ending fragment of a packet,
2143 		 * and we haven't found a complete valid packet yet,
2144 		 * we can discard up to and including this fragment.
2145 		 */
2146 		if (PPP_MP_CB(p)->BEbits & E) {
2147 			struct sk_buff *tmp2;
2148 
2149 			skb_queue_reverse_walk_from_safe(list, p, tmp2) {
2150 				if (ppp->debug & 1)
2151 					netdev_printk(KERN_DEBUG, ppp->dev,
2152 						      "discarding frag %u\n",
2153 						      PPP_MP_CB(p)->sequence);
2154 				__skb_unlink(p, list);
2155 				kfree_skb(p);
2156 			}
2157 			head = skb_peek(list);
2158 			if (!head)
2159 				break;
2160 		}
2161 		++seq;
2162 	}
2163 
2164 	/* If we have a complete packet, copy it all into one skb. */
2165 	if (tail != NULL) {
2166 		/* If we have discarded any fragments,
2167 		   signal a receive error. */
2168 		if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2169 			skb_queue_walk_safe(list, p, tmp) {
2170 				if (p == head)
2171 					break;
2172 				if (ppp->debug & 1)
2173 					netdev_printk(KERN_DEBUG, ppp->dev,
2174 						      "discarding frag %u\n",
2175 						      PPP_MP_CB(p)->sequence);
2176 				__skb_unlink(p, list);
2177 				kfree_skb(p);
2178 			}
2179 
2180 			if (ppp->debug & 1)
2181 				netdev_printk(KERN_DEBUG, ppp->dev,
2182 					      "  missed pkts %u..%u\n",
2183 					      ppp->nextseq,
2184 					      PPP_MP_CB(head)->sequence-1);
2185 			++ppp->dev->stats.rx_dropped;
2186 			ppp_receive_error(ppp);
2187 		}
2188 
2189 		skb = head;
2190 		if (head != tail) {
2191 			struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
2192 			p = skb_queue_next(list, head);
2193 			__skb_unlink(skb, list);
2194 			skb_queue_walk_from_safe(list, p, tmp) {
2195 				__skb_unlink(p, list);
2196 				*fragpp = p;
2197 				p->next = NULL;
2198 				fragpp = &p->next;
2199 
2200 				skb->len += p->len;
2201 				skb->data_len += p->len;
2202 				skb->truesize += p->truesize;
2203 
2204 				if (p == tail)
2205 					break;
2206 			}
2207 		} else {
2208 			__skb_unlink(skb, list);
2209 		}
2210 
2211 		ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2212 	}
2213 
2214 	return skb;
2215 }
2216 #endif /* CONFIG_PPP_MULTILINK */
2217 
2218 /*
2219  * Channel interface.
2220  */
2221 
2222 /* Create a new, unattached ppp channel. */
2223 int ppp_register_channel(struct ppp_channel *chan)
2224 {
2225 	return ppp_register_net_channel(current->nsproxy->net_ns, chan);
2226 }
2227 
2228 /* Create a new, unattached ppp channel for specified net. */
2229 int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
2230 {
2231 	struct channel *pch;
2232 	struct ppp_net *pn;
2233 
2234 	pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
2235 	if (!pch)
2236 		return -ENOMEM;
2237 
2238 	pn = ppp_pernet(net);
2239 
2240 	pch->ppp = NULL;
2241 	pch->chan = chan;
2242 	pch->chan_net = net;
2243 	chan->ppp = pch;
2244 	init_ppp_file(&pch->file, CHANNEL);
2245 	pch->file.hdrlen = chan->hdrlen;
2246 #ifdef CONFIG_PPP_MULTILINK
2247 	pch->lastseq = -1;
2248 #endif /* CONFIG_PPP_MULTILINK */
2249 	init_rwsem(&pch->chan_sem);
2250 	spin_lock_init(&pch->downl);
2251 	rwlock_init(&pch->upl);
2252 
2253 	spin_lock_bh(&pn->all_channels_lock);
2254 	pch->file.index = ++pn->last_channel_index;
2255 	list_add(&pch->list, &pn->new_channels);
2256 	atomic_inc(&channel_count);
2257 	spin_unlock_bh(&pn->all_channels_lock);
2258 
2259 	return 0;
2260 }
2261 
2262 /*
2263  * Return the index of a channel.
2264  */
2265 int ppp_channel_index(struct ppp_channel *chan)
2266 {
2267 	struct channel *pch = chan->ppp;
2268 
2269 	if (pch)
2270 		return pch->file.index;
2271 	return -1;
2272 }
2273 
2274 /*
2275  * Return the PPP unit number to which a channel is connected.
2276  */
2277 int ppp_unit_number(struct ppp_channel *chan)
2278 {
2279 	struct channel *pch = chan->ppp;
2280 	int unit = -1;
2281 
2282 	if (pch) {
2283 		read_lock_bh(&pch->upl);
2284 		if (pch->ppp)
2285 			unit = pch->ppp->file.index;
2286 		read_unlock_bh(&pch->upl);
2287 	}
2288 	return unit;
2289 }
2290 
2291 /*
2292  * Return the PPP device interface name of a channel.
2293  */
2294 char *ppp_dev_name(struct ppp_channel *chan)
2295 {
2296 	struct channel *pch = chan->ppp;
2297 	char *name = NULL;
2298 
2299 	if (pch) {
2300 		read_lock_bh(&pch->upl);
2301 		if (pch->ppp && pch->ppp->dev)
2302 			name = pch->ppp->dev->name;
2303 		read_unlock_bh(&pch->upl);
2304 	}
2305 	return name;
2306 }
2307 
2308 
2309 /*
2310  * Disconnect a channel from the generic layer.
2311  * This must be called in process context.
2312  */
2313 void
2314 ppp_unregister_channel(struct ppp_channel *chan)
2315 {
2316 	struct channel *pch = chan->ppp;
2317 	struct ppp_net *pn;
2318 
2319 	if (!pch)
2320 		return;		/* should never happen */
2321 
2322 	chan->ppp = NULL;
2323 
2324 	/*
2325 	 * This ensures that we have returned from any calls into the
2326 	 * the channel's start_xmit or ioctl routine before we proceed.
2327 	 */
2328 	down_write(&pch->chan_sem);
2329 	spin_lock_bh(&pch->downl);
2330 	pch->chan = NULL;
2331 	spin_unlock_bh(&pch->downl);
2332 	up_write(&pch->chan_sem);
2333 	ppp_disconnect_channel(pch);
2334 
2335 	pn = ppp_pernet(pch->chan_net);
2336 	spin_lock_bh(&pn->all_channels_lock);
2337 	list_del(&pch->list);
2338 	spin_unlock_bh(&pn->all_channels_lock);
2339 
2340 	pch->file.dead = 1;
2341 	wake_up_interruptible(&pch->file.rwait);
2342 	if (atomic_dec_and_test(&pch->file.refcnt))
2343 		ppp_destroy_channel(pch);
2344 }
2345 
2346 /*
2347  * Callback from a channel when it can accept more to transmit.
2348  * This should be called at BH/softirq level, not interrupt level.
2349  */
2350 void
2351 ppp_output_wakeup(struct ppp_channel *chan)
2352 {
2353 	struct channel *pch = chan->ppp;
2354 
2355 	if (!pch)
2356 		return;
2357 	ppp_channel_push(pch);
2358 }
2359 
2360 /*
2361  * Compression control.
2362  */
2363 
2364 /* Process the PPPIOCSCOMPRESS ioctl. */
2365 static int
2366 ppp_set_compress(struct ppp *ppp, unsigned long arg)
2367 {
2368 	int err;
2369 	struct compressor *cp, *ocomp;
2370 	struct ppp_option_data data;
2371 	void *state, *ostate;
2372 	unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
2373 
2374 	err = -EFAULT;
2375 	if (copy_from_user(&data, (void __user *) arg, sizeof(data)) ||
2376 	    (data.length <= CCP_MAX_OPTION_LENGTH &&
2377 	     copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
2378 		goto out;
2379 	err = -EINVAL;
2380 	if (data.length > CCP_MAX_OPTION_LENGTH ||
2381 	    ccp_option[1] < 2 || ccp_option[1] > data.length)
2382 		goto out;
2383 
2384 	cp = try_then_request_module(
2385 		find_compressor(ccp_option[0]),
2386 		"ppp-compress-%d", ccp_option[0]);
2387 	if (!cp)
2388 		goto out;
2389 
2390 	err = -ENOBUFS;
2391 	if (data.transmit) {
2392 		state = cp->comp_alloc(ccp_option, data.length);
2393 		if (state) {
2394 			ppp_xmit_lock(ppp);
2395 			ppp->xstate &= ~SC_COMP_RUN;
2396 			ocomp = ppp->xcomp;
2397 			ostate = ppp->xc_state;
2398 			ppp->xcomp = cp;
2399 			ppp->xc_state = state;
2400 			ppp_xmit_unlock(ppp);
2401 			if (ostate) {
2402 				ocomp->comp_free(ostate);
2403 				module_put(ocomp->owner);
2404 			}
2405 			err = 0;
2406 		} else
2407 			module_put(cp->owner);
2408 
2409 	} else {
2410 		state = cp->decomp_alloc(ccp_option, data.length);
2411 		if (state) {
2412 			ppp_recv_lock(ppp);
2413 			ppp->rstate &= ~SC_DECOMP_RUN;
2414 			ocomp = ppp->rcomp;
2415 			ostate = ppp->rc_state;
2416 			ppp->rcomp = cp;
2417 			ppp->rc_state = state;
2418 			ppp_recv_unlock(ppp);
2419 			if (ostate) {
2420 				ocomp->decomp_free(ostate);
2421 				module_put(ocomp->owner);
2422 			}
2423 			err = 0;
2424 		} else
2425 			module_put(cp->owner);
2426 	}
2427 
2428  out:
2429 	return err;
2430 }
2431 
2432 /*
2433  * Look at a CCP packet and update our state accordingly.
2434  * We assume the caller has the xmit or recv path locked.
2435  */
2436 static void
2437 ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
2438 {
2439 	unsigned char *dp;
2440 	int len;
2441 
2442 	if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
2443 		return;	/* no header */
2444 	dp = skb->data + 2;
2445 
2446 	switch (CCP_CODE(dp)) {
2447 	case CCP_CONFREQ:
2448 
2449 		/* A ConfReq starts negotiation of compression
2450 		 * in one direction of transmission,
2451 		 * and hence brings it down...but which way?
2452 		 *
2453 		 * Remember:
2454 		 * A ConfReq indicates what the sender would like to receive
2455 		 */
2456 		if(inbound)
2457 			/* He is proposing what I should send */
2458 			ppp->xstate &= ~SC_COMP_RUN;
2459 		else
2460 			/* I am proposing to what he should send */
2461 			ppp->rstate &= ~SC_DECOMP_RUN;
2462 
2463 		break;
2464 
2465 	case CCP_TERMREQ:
2466 	case CCP_TERMACK:
2467 		/*
2468 		 * CCP is going down, both directions of transmission
2469 		 */
2470 		ppp->rstate &= ~SC_DECOMP_RUN;
2471 		ppp->xstate &= ~SC_COMP_RUN;
2472 		break;
2473 
2474 	case CCP_CONFACK:
2475 		if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
2476 			break;
2477 		len = CCP_LENGTH(dp);
2478 		if (!pskb_may_pull(skb, len + 2))
2479 			return;		/* too short */
2480 		dp += CCP_HDRLEN;
2481 		len -= CCP_HDRLEN;
2482 		if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
2483 			break;
2484 		if (inbound) {
2485 			/* we will start receiving compressed packets */
2486 			if (!ppp->rc_state)
2487 				break;
2488 			if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
2489 					ppp->file.index, 0, ppp->mru, ppp->debug)) {
2490 				ppp->rstate |= SC_DECOMP_RUN;
2491 				ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
2492 			}
2493 		} else {
2494 			/* we will soon start sending compressed packets */
2495 			if (!ppp->xc_state)
2496 				break;
2497 			if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
2498 					ppp->file.index, 0, ppp->debug))
2499 				ppp->xstate |= SC_COMP_RUN;
2500 		}
2501 		break;
2502 
2503 	case CCP_RESETACK:
2504 		/* reset the [de]compressor */
2505 		if ((ppp->flags & SC_CCP_UP) == 0)
2506 			break;
2507 		if (inbound) {
2508 			if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
2509 				ppp->rcomp->decomp_reset(ppp->rc_state);
2510 				ppp->rstate &= ~SC_DC_ERROR;
2511 			}
2512 		} else {
2513 			if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
2514 				ppp->xcomp->comp_reset(ppp->xc_state);
2515 		}
2516 		break;
2517 	}
2518 }
2519 
2520 /* Free up compression resources. */
2521 static void
2522 ppp_ccp_closed(struct ppp *ppp)
2523 {
2524 	void *xstate, *rstate;
2525 	struct compressor *xcomp, *rcomp;
2526 
2527 	ppp_lock(ppp);
2528 	ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
2529 	ppp->xstate = 0;
2530 	xcomp = ppp->xcomp;
2531 	xstate = ppp->xc_state;
2532 	ppp->xc_state = NULL;
2533 	ppp->rstate = 0;
2534 	rcomp = ppp->rcomp;
2535 	rstate = ppp->rc_state;
2536 	ppp->rc_state = NULL;
2537 	ppp_unlock(ppp);
2538 
2539 	if (xstate) {
2540 		xcomp->comp_free(xstate);
2541 		module_put(xcomp->owner);
2542 	}
2543 	if (rstate) {
2544 		rcomp->decomp_free(rstate);
2545 		module_put(rcomp->owner);
2546 	}
2547 }
2548 
2549 /* List of compressors. */
2550 static LIST_HEAD(compressor_list);
2551 static DEFINE_SPINLOCK(compressor_list_lock);
2552 
2553 struct compressor_entry {
2554 	struct list_head list;
2555 	struct compressor *comp;
2556 };
2557 
2558 static struct compressor_entry *
2559 find_comp_entry(int proto)
2560 {
2561 	struct compressor_entry *ce;
2562 
2563 	list_for_each_entry(ce, &compressor_list, list) {
2564 		if (ce->comp->compress_proto == proto)
2565 			return ce;
2566 	}
2567 	return NULL;
2568 }
2569 
2570 /* Register a compressor */
2571 int
2572 ppp_register_compressor(struct compressor *cp)
2573 {
2574 	struct compressor_entry *ce;
2575 	int ret;
2576 	spin_lock(&compressor_list_lock);
2577 	ret = -EEXIST;
2578 	if (find_comp_entry(cp->compress_proto))
2579 		goto out;
2580 	ret = -ENOMEM;
2581 	ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
2582 	if (!ce)
2583 		goto out;
2584 	ret = 0;
2585 	ce->comp = cp;
2586 	list_add(&ce->list, &compressor_list);
2587  out:
2588 	spin_unlock(&compressor_list_lock);
2589 	return ret;
2590 }
2591 
2592 /* Unregister a compressor */
2593 void
2594 ppp_unregister_compressor(struct compressor *cp)
2595 {
2596 	struct compressor_entry *ce;
2597 
2598 	spin_lock(&compressor_list_lock);
2599 	ce = find_comp_entry(cp->compress_proto);
2600 	if (ce && ce->comp == cp) {
2601 		list_del(&ce->list);
2602 		kfree(ce);
2603 	}
2604 	spin_unlock(&compressor_list_lock);
2605 }
2606 
2607 /* Find a compressor. */
2608 static struct compressor *
2609 find_compressor(int type)
2610 {
2611 	struct compressor_entry *ce;
2612 	struct compressor *cp = NULL;
2613 
2614 	spin_lock(&compressor_list_lock);
2615 	ce = find_comp_entry(type);
2616 	if (ce) {
2617 		cp = ce->comp;
2618 		if (!try_module_get(cp->owner))
2619 			cp = NULL;
2620 	}
2621 	spin_unlock(&compressor_list_lock);
2622 	return cp;
2623 }
2624 
2625 /*
2626  * Miscelleneous stuff.
2627  */
2628 
2629 static void
2630 ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2631 {
2632 	struct slcompress *vj = ppp->vj;
2633 
2634 	memset(st, 0, sizeof(*st));
2635 	st->p.ppp_ipackets = ppp->stats64.rx_packets;
2636 	st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
2637 	st->p.ppp_ibytes = ppp->stats64.rx_bytes;
2638 	st->p.ppp_opackets = ppp->stats64.tx_packets;
2639 	st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
2640 	st->p.ppp_obytes = ppp->stats64.tx_bytes;
2641 	if (!vj)
2642 		return;
2643 	st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
2644 	st->vj.vjs_compressed = vj->sls_o_compressed;
2645 	st->vj.vjs_searches = vj->sls_o_searches;
2646 	st->vj.vjs_misses = vj->sls_o_misses;
2647 	st->vj.vjs_errorin = vj->sls_i_error;
2648 	st->vj.vjs_tossed = vj->sls_i_tossed;
2649 	st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
2650 	st->vj.vjs_compressedin = vj->sls_i_compressed;
2651 }
2652 
2653 /*
2654  * Stuff for handling the lists of ppp units and channels
2655  * and for initialization.
2656  */
2657 
2658 /*
2659  * Create a new ppp interface unit.  Fails if it can't allocate memory
2660  * or if there is already a unit with the requested number.
2661  * unit == -1 means allocate a new number.
2662  */
2663 static struct ppp *
2664 ppp_create_interface(struct net *net, int unit, int *retp)
2665 {
2666 	struct ppp *ppp;
2667 	struct ppp_net *pn;
2668 	struct net_device *dev = NULL;
2669 	int ret = -ENOMEM;
2670 	int i;
2671 
2672 	dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup);
2673 	if (!dev)
2674 		goto out1;
2675 
2676 	pn = ppp_pernet(net);
2677 
2678 	ppp = netdev_priv(dev);
2679 	ppp->dev = dev;
2680 	ppp->mru = PPP_MRU;
2681 	init_ppp_file(&ppp->file, INTERFACE);
2682 	ppp->file.hdrlen = PPP_HDRLEN - 2;	/* don't count proto bytes */
2683 	for (i = 0; i < NUM_NP; ++i)
2684 		ppp->npmode[i] = NPMODE_PASS;
2685 	INIT_LIST_HEAD(&ppp->channels);
2686 	spin_lock_init(&ppp->rlock);
2687 	spin_lock_init(&ppp->wlock);
2688 #ifdef CONFIG_PPP_MULTILINK
2689 	ppp->minseq = -1;
2690 	skb_queue_head_init(&ppp->mrq);
2691 #endif /* CONFIG_PPP_MULTILINK */
2692 #ifdef CONFIG_PPP_FILTER
2693 	ppp->pass_filter = NULL;
2694 	ppp->active_filter = NULL;
2695 #endif /* CONFIG_PPP_FILTER */
2696 
2697 	/*
2698 	 * drum roll: don't forget to set
2699 	 * the net device is belong to
2700 	 */
2701 	dev_net_set(dev, net);
2702 
2703 	mutex_lock(&pn->all_ppp_mutex);
2704 
2705 	if (unit < 0) {
2706 		unit = unit_get(&pn->units_idr, ppp);
2707 		if (unit < 0) {
2708 			ret = unit;
2709 			goto out2;
2710 		}
2711 	} else {
2712 		ret = -EEXIST;
2713 		if (unit_find(&pn->units_idr, unit))
2714 			goto out2; /* unit already exists */
2715 		/*
2716 		 * if caller need a specified unit number
2717 		 * lets try to satisfy him, otherwise --
2718 		 * he should better ask us for new unit number
2719 		 *
2720 		 * NOTE: yes I know that returning EEXIST it's not
2721 		 * fair but at least pppd will ask us to allocate
2722 		 * new unit in this case so user is happy :)
2723 		 */
2724 		unit = unit_set(&pn->units_idr, ppp, unit);
2725 		if (unit < 0)
2726 			goto out2;
2727 	}
2728 
2729 	/* Initialize the new ppp unit */
2730 	ppp->file.index = unit;
2731 	sprintf(dev->name, "ppp%d", unit);
2732 
2733 	ret = register_netdev(dev);
2734 	if (ret != 0) {
2735 		unit_put(&pn->units_idr, unit);
2736 		netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
2737 			   dev->name, ret);
2738 		goto out2;
2739 	}
2740 
2741 	ppp->ppp_net = net;
2742 
2743 	atomic_inc(&ppp_unit_count);
2744 	mutex_unlock(&pn->all_ppp_mutex);
2745 
2746 	*retp = 0;
2747 	return ppp;
2748 
2749 out2:
2750 	mutex_unlock(&pn->all_ppp_mutex);
2751 	free_netdev(dev);
2752 out1:
2753 	*retp = ret;
2754 	return NULL;
2755 }
2756 
2757 /*
2758  * Initialize a ppp_file structure.
2759  */
2760 static void
2761 init_ppp_file(struct ppp_file *pf, int kind)
2762 {
2763 	pf->kind = kind;
2764 	skb_queue_head_init(&pf->xq);
2765 	skb_queue_head_init(&pf->rq);
2766 	atomic_set(&pf->refcnt, 1);
2767 	init_waitqueue_head(&pf->rwait);
2768 }
2769 
2770 /*
2771  * Take down a ppp interface unit - called when the owning file
2772  * (the one that created the unit) is closed or detached.
2773  */
2774 static void ppp_shutdown_interface(struct ppp *ppp)
2775 {
2776 	struct ppp_net *pn;
2777 
2778 	pn = ppp_pernet(ppp->ppp_net);
2779 	mutex_lock(&pn->all_ppp_mutex);
2780 
2781 	/* This will call dev_close() for us. */
2782 	ppp_lock(ppp);
2783 	if (!ppp->closing) {
2784 		ppp->closing = 1;
2785 		ppp_unlock(ppp);
2786 		unregister_netdev(ppp->dev);
2787 		unit_put(&pn->units_idr, ppp->file.index);
2788 	} else
2789 		ppp_unlock(ppp);
2790 
2791 	ppp->file.dead = 1;
2792 	ppp->owner = NULL;
2793 	wake_up_interruptible(&ppp->file.rwait);
2794 
2795 	mutex_unlock(&pn->all_ppp_mutex);
2796 }
2797 
2798 /*
2799  * Free the memory used by a ppp unit.  This is only called once
2800  * there are no channels connected to the unit and no file structs
2801  * that reference the unit.
2802  */
2803 static void ppp_destroy_interface(struct ppp *ppp)
2804 {
2805 	atomic_dec(&ppp_unit_count);
2806 
2807 	if (!ppp->file.dead || ppp->n_channels) {
2808 		/* "can't happen" */
2809 		netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
2810 			   "but dead=%d n_channels=%d !\n",
2811 			   ppp, ppp->file.dead, ppp->n_channels);
2812 		return;
2813 	}
2814 
2815 	ppp_ccp_closed(ppp);
2816 	if (ppp->vj) {
2817 		slhc_free(ppp->vj);
2818 		ppp->vj = NULL;
2819 	}
2820 	skb_queue_purge(&ppp->file.xq);
2821 	skb_queue_purge(&ppp->file.rq);
2822 #ifdef CONFIG_PPP_MULTILINK
2823 	skb_queue_purge(&ppp->mrq);
2824 #endif /* CONFIG_PPP_MULTILINK */
2825 #ifdef CONFIG_PPP_FILTER
2826 	if (ppp->pass_filter) {
2827 		sk_unattached_filter_destroy(ppp->pass_filter);
2828 		ppp->pass_filter = NULL;
2829 	}
2830 
2831 	if (ppp->active_filter) {
2832 		sk_unattached_filter_destroy(ppp->active_filter);
2833 		ppp->active_filter = NULL;
2834 	}
2835 #endif /* CONFIG_PPP_FILTER */
2836 
2837 	kfree_skb(ppp->xmit_pending);
2838 
2839 	free_netdev(ppp->dev);
2840 }
2841 
2842 /*
2843  * Locate an existing ppp unit.
2844  * The caller should have locked the all_ppp_mutex.
2845  */
2846 static struct ppp *
2847 ppp_find_unit(struct ppp_net *pn, int unit)
2848 {
2849 	return unit_find(&pn->units_idr, unit);
2850 }
2851 
2852 /*
2853  * Locate an existing ppp channel.
2854  * The caller should have locked the all_channels_lock.
2855  * First we look in the new_channels list, then in the
2856  * all_channels list.  If found in the new_channels list,
2857  * we move it to the all_channels list.  This is for speed
2858  * when we have a lot of channels in use.
2859  */
2860 static struct channel *
2861 ppp_find_channel(struct ppp_net *pn, int unit)
2862 {
2863 	struct channel *pch;
2864 
2865 	list_for_each_entry(pch, &pn->new_channels, list) {
2866 		if (pch->file.index == unit) {
2867 			list_move(&pch->list, &pn->all_channels);
2868 			return pch;
2869 		}
2870 	}
2871 
2872 	list_for_each_entry(pch, &pn->all_channels, list) {
2873 		if (pch->file.index == unit)
2874 			return pch;
2875 	}
2876 
2877 	return NULL;
2878 }
2879 
2880 /*
2881  * Connect a PPP channel to a PPP interface unit.
2882  */
2883 static int
2884 ppp_connect_channel(struct channel *pch, int unit)
2885 {
2886 	struct ppp *ppp;
2887 	struct ppp_net *pn;
2888 	int ret = -ENXIO;
2889 	int hdrlen;
2890 
2891 	pn = ppp_pernet(pch->chan_net);
2892 
2893 	mutex_lock(&pn->all_ppp_mutex);
2894 	ppp = ppp_find_unit(pn, unit);
2895 	if (!ppp)
2896 		goto out;
2897 	write_lock_bh(&pch->upl);
2898 	ret = -EINVAL;
2899 	if (pch->ppp)
2900 		goto outl;
2901 
2902 	ppp_lock(ppp);
2903 	if (pch->file.hdrlen > ppp->file.hdrlen)
2904 		ppp->file.hdrlen = pch->file.hdrlen;
2905 	hdrlen = pch->file.hdrlen + 2;	/* for protocol bytes */
2906 	if (hdrlen > ppp->dev->hard_header_len)
2907 		ppp->dev->hard_header_len = hdrlen;
2908 	list_add_tail(&pch->clist, &ppp->channels);
2909 	++ppp->n_channels;
2910 	pch->ppp = ppp;
2911 	atomic_inc(&ppp->file.refcnt);
2912 	ppp_unlock(ppp);
2913 	ret = 0;
2914 
2915  outl:
2916 	write_unlock_bh(&pch->upl);
2917  out:
2918 	mutex_unlock(&pn->all_ppp_mutex);
2919 	return ret;
2920 }
2921 
2922 /*
2923  * Disconnect a channel from its ppp unit.
2924  */
2925 static int
2926 ppp_disconnect_channel(struct channel *pch)
2927 {
2928 	struct ppp *ppp;
2929 	int err = -EINVAL;
2930 
2931 	write_lock_bh(&pch->upl);
2932 	ppp = pch->ppp;
2933 	pch->ppp = NULL;
2934 	write_unlock_bh(&pch->upl);
2935 	if (ppp) {
2936 		/* remove it from the ppp unit's list */
2937 		ppp_lock(ppp);
2938 		list_del(&pch->clist);
2939 		if (--ppp->n_channels == 0)
2940 			wake_up_interruptible(&ppp->file.rwait);
2941 		ppp_unlock(ppp);
2942 		if (atomic_dec_and_test(&ppp->file.refcnt))
2943 			ppp_destroy_interface(ppp);
2944 		err = 0;
2945 	}
2946 	return err;
2947 }
2948 
2949 /*
2950  * Free up the resources used by a ppp channel.
2951  */
2952 static void ppp_destroy_channel(struct channel *pch)
2953 {
2954 	atomic_dec(&channel_count);
2955 
2956 	if (!pch->file.dead) {
2957 		/* "can't happen" */
2958 		pr_err("ppp: destroying undead channel %p !\n", pch);
2959 		return;
2960 	}
2961 	skb_queue_purge(&pch->file.xq);
2962 	skb_queue_purge(&pch->file.rq);
2963 	kfree(pch);
2964 }
2965 
2966 static void __exit ppp_cleanup(void)
2967 {
2968 	/* should never happen */
2969 	if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
2970 		pr_err("PPP: removing module but units remain!\n");
2971 	unregister_chrdev(PPP_MAJOR, "ppp");
2972 	device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
2973 	class_destroy(ppp_class);
2974 	unregister_pernet_device(&ppp_net_ops);
2975 }
2976 
2977 /*
2978  * Units handling. Caller must protect concurrent access
2979  * by holding all_ppp_mutex
2980  */
2981 
2982 /* associate pointer with specified number */
2983 static int unit_set(struct idr *p, void *ptr, int n)
2984 {
2985 	int unit;
2986 
2987 	unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
2988 	if (unit == -ENOSPC)
2989 		unit = -EINVAL;
2990 	return unit;
2991 }
2992 
2993 /* get new free unit number and associate pointer with it */
2994 static int unit_get(struct idr *p, void *ptr)
2995 {
2996 	return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
2997 }
2998 
2999 /* put unit number back to a pool */
3000 static void unit_put(struct idr *p, int n)
3001 {
3002 	idr_remove(p, n);
3003 }
3004 
3005 /* get pointer associated with the number */
3006 static void *unit_find(struct idr *p, int n)
3007 {
3008 	return idr_find(p, n);
3009 }
3010 
3011 /* Module/initialization stuff */
3012 
3013 module_init(ppp_init);
3014 module_exit(ppp_cleanup);
3015 
3016 EXPORT_SYMBOL(ppp_register_net_channel);
3017 EXPORT_SYMBOL(ppp_register_channel);
3018 EXPORT_SYMBOL(ppp_unregister_channel);
3019 EXPORT_SYMBOL(ppp_channel_index);
3020 EXPORT_SYMBOL(ppp_unit_number);
3021 EXPORT_SYMBOL(ppp_dev_name);
3022 EXPORT_SYMBOL(ppp_input);
3023 EXPORT_SYMBOL(ppp_input_error);
3024 EXPORT_SYMBOL(ppp_output_wakeup);
3025 EXPORT_SYMBOL(ppp_register_compressor);
3026 EXPORT_SYMBOL(ppp_unregister_compressor);
3027 MODULE_LICENSE("GPL");
3028 MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
3029 MODULE_ALIAS("devname:ppp");
3030