1 /* 2 * Generic PPP layer for Linux. 3 * 4 * Copyright 1999-2002 Paul Mackerras. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * The generic PPP layer handles the PPP network interfaces, the 12 * /dev/ppp device, packet and VJ compression, and multilink. 13 * It talks to PPP `channels' via the interface defined in 14 * include/linux/ppp_channel.h. Channels provide the basic means for 15 * sending and receiving PPP frames on some kind of communications 16 * channel. 17 * 18 * Part of the code in this driver was inspired by the old async-only 19 * PPP driver, written by Michael Callahan and Al Longyear, and 20 * subsequently hacked by Paul Mackerras. 21 * 22 * ==FILEVERSION 20041108== 23 */ 24 25 #include <linux/module.h> 26 #include <linux/kernel.h> 27 #include <linux/kmod.h> 28 #include <linux/init.h> 29 #include <linux/list.h> 30 #include <linux/idr.h> 31 #include <linux/netdevice.h> 32 #include <linux/poll.h> 33 #include <linux/ppp_defs.h> 34 #include <linux/filter.h> 35 #include <linux/ppp-ioctl.h> 36 #include <linux/ppp_channel.h> 37 #include <linux/ppp-comp.h> 38 #include <linux/skbuff.h> 39 #include <linux/rtnetlink.h> 40 #include <linux/if_arp.h> 41 #include <linux/ip.h> 42 #include <linux/tcp.h> 43 #include <linux/spinlock.h> 44 #include <linux/rwsem.h> 45 #include <linux/stddef.h> 46 #include <linux/device.h> 47 #include <linux/mutex.h> 48 #include <linux/slab.h> 49 #include <asm/unaligned.h> 50 #include <net/slhc_vj.h> 51 #include <linux/atomic.h> 52 53 #include <linux/nsproxy.h> 54 #include <net/net_namespace.h> 55 #include <net/netns/generic.h> 56 57 #define PPP_VERSION "2.4.2" 58 59 /* 60 * Network protocols we support. 61 */ 62 #define NP_IP 0 /* Internet Protocol V4 */ 63 #define NP_IPV6 1 /* Internet Protocol V6 */ 64 #define NP_IPX 2 /* IPX protocol */ 65 #define NP_AT 3 /* Appletalk protocol */ 66 #define NP_MPLS_UC 4 /* MPLS unicast */ 67 #define NP_MPLS_MC 5 /* MPLS multicast */ 68 #define NUM_NP 6 /* Number of NPs. */ 69 70 #define MPHDRLEN 6 /* multilink protocol header length */ 71 #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ 72 73 /* 74 * An instance of /dev/ppp can be associated with either a ppp 75 * interface unit or a ppp channel. In both cases, file->private_data 76 * points to one of these. 77 */ 78 struct ppp_file { 79 enum { 80 INTERFACE=1, CHANNEL 81 } kind; 82 struct sk_buff_head xq; /* pppd transmit queue */ 83 struct sk_buff_head rq; /* receive queue for pppd */ 84 wait_queue_head_t rwait; /* for poll on reading /dev/ppp */ 85 atomic_t refcnt; /* # refs (incl /dev/ppp attached) */ 86 int hdrlen; /* space to leave for headers */ 87 int index; /* interface unit / channel number */ 88 int dead; /* unit/channel has been shut down */ 89 }; 90 91 #define PF_TO_X(pf, X) container_of(pf, X, file) 92 93 #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) 94 #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) 95 96 /* 97 * Data structure to hold primary network stats for which 98 * we want to use 64 bit storage. Other network stats 99 * are stored in dev->stats of the ppp strucute. 100 */ 101 struct ppp_link_stats { 102 u64 rx_packets; 103 u64 tx_packets; 104 u64 rx_bytes; 105 u64 tx_bytes; 106 }; 107 108 /* 109 * Data structure describing one ppp unit. 110 * A ppp unit corresponds to a ppp network interface device 111 * and represents a multilink bundle. 112 * It can have 0 or more ppp channels connected to it. 113 */ 114 struct ppp { 115 struct ppp_file file; /* stuff for read/write/poll 0 */ 116 struct file *owner; /* file that owns this unit 48 */ 117 struct list_head channels; /* list of attached channels 4c */ 118 int n_channels; /* how many channels are attached 54 */ 119 spinlock_t rlock; /* lock for receive side 58 */ 120 spinlock_t wlock; /* lock for transmit side 5c */ 121 int mru; /* max receive unit 60 */ 122 unsigned int flags; /* control bits 64 */ 123 unsigned int xstate; /* transmit state bits 68 */ 124 unsigned int rstate; /* receive state bits 6c */ 125 int debug; /* debug flags 70 */ 126 struct slcompress *vj; /* state for VJ header compression */ 127 enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */ 128 struct sk_buff *xmit_pending; /* a packet ready to go out 88 */ 129 struct compressor *xcomp; /* transmit packet compressor 8c */ 130 void *xc_state; /* its internal state 90 */ 131 struct compressor *rcomp; /* receive decompressor 94 */ 132 void *rc_state; /* its internal state 98 */ 133 unsigned long last_xmit; /* jiffies when last pkt sent 9c */ 134 unsigned long last_recv; /* jiffies when last pkt rcvd a0 */ 135 struct net_device *dev; /* network interface device a4 */ 136 int closing; /* is device closing down? a8 */ 137 #ifdef CONFIG_PPP_MULTILINK 138 int nxchan; /* next channel to send something on */ 139 u32 nxseq; /* next sequence number to send */ 140 int mrru; /* MP: max reconst. receive unit */ 141 u32 nextseq; /* MP: seq no of next packet */ 142 u32 minseq; /* MP: min of most recent seqnos */ 143 struct sk_buff_head mrq; /* MP: receive reconstruction queue */ 144 #endif /* CONFIG_PPP_MULTILINK */ 145 #ifdef CONFIG_PPP_FILTER 146 struct sock_filter *pass_filter; /* filter for packets to pass */ 147 struct sock_filter *active_filter;/* filter for pkts to reset idle */ 148 unsigned pass_len, active_len; 149 #endif /* CONFIG_PPP_FILTER */ 150 struct net *ppp_net; /* the net we belong to */ 151 struct ppp_link_stats stats64; /* 64 bit network stats */ 152 }; 153 154 /* 155 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC, 156 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP, 157 * SC_MUST_COMP 158 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR. 159 * Bits in xstate: SC_COMP_RUN 160 */ 161 #define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \ 162 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \ 163 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP) 164 165 /* 166 * Private data structure for each channel. 167 * This includes the data structure used for multilink. 168 */ 169 struct channel { 170 struct ppp_file file; /* stuff for read/write/poll */ 171 struct list_head list; /* link in all/new_channels list */ 172 struct ppp_channel *chan; /* public channel data structure */ 173 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ 174 spinlock_t downl; /* protects `chan', file.xq dequeue */ 175 struct ppp *ppp; /* ppp unit we're connected to */ 176 struct net *chan_net; /* the net channel belongs to */ 177 struct list_head clist; /* link in list of channels per unit */ 178 rwlock_t upl; /* protects `ppp' */ 179 #ifdef CONFIG_PPP_MULTILINK 180 u8 avail; /* flag used in multilink stuff */ 181 u8 had_frag; /* >= 1 fragments have been sent */ 182 u32 lastseq; /* MP: last sequence # received */ 183 int speed; /* speed of the corresponding ppp channel*/ 184 #endif /* CONFIG_PPP_MULTILINK */ 185 }; 186 187 /* 188 * SMP locking issues: 189 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels 190 * list and the ppp.n_channels field, you need to take both locks 191 * before you modify them. 192 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock -> 193 * channel.downl. 194 */ 195 196 static DEFINE_MUTEX(ppp_mutex); 197 static atomic_t ppp_unit_count = ATOMIC_INIT(0); 198 static atomic_t channel_count = ATOMIC_INIT(0); 199 200 /* per-net private data for this module */ 201 static int ppp_net_id __read_mostly; 202 struct ppp_net { 203 /* units to ppp mapping */ 204 struct idr units_idr; 205 206 /* 207 * all_ppp_mutex protects the units_idr mapping. 208 * It also ensures that finding a ppp unit in the units_idr 209 * map and updating its file.refcnt field is atomic. 210 */ 211 struct mutex all_ppp_mutex; 212 213 /* channels */ 214 struct list_head all_channels; 215 struct list_head new_channels; 216 int last_channel_index; 217 218 /* 219 * all_channels_lock protects all_channels and 220 * last_channel_index, and the atomicity of find 221 * a channel and updating its file.refcnt field. 222 */ 223 spinlock_t all_channels_lock; 224 }; 225 226 /* Get the PPP protocol number from a skb */ 227 #define PPP_PROTO(skb) get_unaligned_be16((skb)->data) 228 229 /* We limit the length of ppp->file.rq to this (arbitrary) value */ 230 #define PPP_MAX_RQLEN 32 231 232 /* 233 * Maximum number of multilink fragments queued up. 234 * This has to be large enough to cope with the maximum latency of 235 * the slowest channel relative to the others. Strictly it should 236 * depend on the number of channels and their characteristics. 237 */ 238 #define PPP_MP_MAX_QLEN 128 239 240 /* Multilink header bits. */ 241 #define B 0x80 /* this fragment begins a packet */ 242 #define E 0x40 /* this fragment ends a packet */ 243 244 /* Compare multilink sequence numbers (assumed to be 32 bits wide) */ 245 #define seq_before(a, b) ((s32)((a) - (b)) < 0) 246 #define seq_after(a, b) ((s32)((a) - (b)) > 0) 247 248 /* Prototypes. */ 249 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 250 struct file *file, unsigned int cmd, unsigned long arg); 251 static void ppp_xmit_process(struct ppp *ppp); 252 static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 253 static void ppp_push(struct ppp *ppp); 254 static void ppp_channel_push(struct channel *pch); 255 static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, 256 struct channel *pch); 257 static void ppp_receive_error(struct ppp *ppp); 258 static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb); 259 static struct sk_buff *ppp_decompress_frame(struct ppp *ppp, 260 struct sk_buff *skb); 261 #ifdef CONFIG_PPP_MULTILINK 262 static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, 263 struct channel *pch); 264 static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb); 265 static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp); 266 static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb); 267 #endif /* CONFIG_PPP_MULTILINK */ 268 static int ppp_set_compress(struct ppp *ppp, unsigned long arg); 269 static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); 270 static void ppp_ccp_closed(struct ppp *ppp); 271 static struct compressor *find_compressor(int type); 272 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 273 static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); 274 static void init_ppp_file(struct ppp_file *pf, int kind); 275 static void ppp_shutdown_interface(struct ppp *ppp); 276 static void ppp_destroy_interface(struct ppp *ppp); 277 static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); 278 static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); 279 static int ppp_connect_channel(struct channel *pch, int unit); 280 static int ppp_disconnect_channel(struct channel *pch); 281 static void ppp_destroy_channel(struct channel *pch); 282 static int unit_get(struct idr *p, void *ptr); 283 static int unit_set(struct idr *p, void *ptr, int n); 284 static void unit_put(struct idr *p, int n); 285 static void *unit_find(struct idr *p, int n); 286 287 static struct class *ppp_class; 288 289 /* per net-namespace data */ 290 static inline struct ppp_net *ppp_pernet(struct net *net) 291 { 292 BUG_ON(!net); 293 294 return net_generic(net, ppp_net_id); 295 } 296 297 /* Translates a PPP protocol number to a NP index (NP == network protocol) */ 298 static inline int proto_to_npindex(int proto) 299 { 300 switch (proto) { 301 case PPP_IP: 302 return NP_IP; 303 case PPP_IPV6: 304 return NP_IPV6; 305 case PPP_IPX: 306 return NP_IPX; 307 case PPP_AT: 308 return NP_AT; 309 case PPP_MPLS_UC: 310 return NP_MPLS_UC; 311 case PPP_MPLS_MC: 312 return NP_MPLS_MC; 313 } 314 return -EINVAL; 315 } 316 317 /* Translates an NP index into a PPP protocol number */ 318 static const int npindex_to_proto[NUM_NP] = { 319 PPP_IP, 320 PPP_IPV6, 321 PPP_IPX, 322 PPP_AT, 323 PPP_MPLS_UC, 324 PPP_MPLS_MC, 325 }; 326 327 /* Translates an ethertype into an NP index */ 328 static inline int ethertype_to_npindex(int ethertype) 329 { 330 switch (ethertype) { 331 case ETH_P_IP: 332 return NP_IP; 333 case ETH_P_IPV6: 334 return NP_IPV6; 335 case ETH_P_IPX: 336 return NP_IPX; 337 case ETH_P_PPPTALK: 338 case ETH_P_ATALK: 339 return NP_AT; 340 case ETH_P_MPLS_UC: 341 return NP_MPLS_UC; 342 case ETH_P_MPLS_MC: 343 return NP_MPLS_MC; 344 } 345 return -1; 346 } 347 348 /* Translates an NP index into an ethertype */ 349 static const int npindex_to_ethertype[NUM_NP] = { 350 ETH_P_IP, 351 ETH_P_IPV6, 352 ETH_P_IPX, 353 ETH_P_PPPTALK, 354 ETH_P_MPLS_UC, 355 ETH_P_MPLS_MC, 356 }; 357 358 /* 359 * Locking shorthand. 360 */ 361 #define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock) 362 #define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock) 363 #define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock) 364 #define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock) 365 #define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \ 366 ppp_recv_lock(ppp); } while (0) 367 #define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \ 368 ppp_xmit_unlock(ppp); } while (0) 369 370 /* 371 * /dev/ppp device routines. 372 * The /dev/ppp device is used by pppd to control the ppp unit. 373 * It supports the read, write, ioctl and poll functions. 374 * Open instances of /dev/ppp can be in one of three states: 375 * unattached, attached to a ppp unit, or attached to a ppp channel. 376 */ 377 static int ppp_open(struct inode *inode, struct file *file) 378 { 379 /* 380 * This could (should?) be enforced by the permissions on /dev/ppp. 381 */ 382 if (!capable(CAP_NET_ADMIN)) 383 return -EPERM; 384 return 0; 385 } 386 387 static int ppp_release(struct inode *unused, struct file *file) 388 { 389 struct ppp_file *pf = file->private_data; 390 struct ppp *ppp; 391 392 if (pf) { 393 file->private_data = NULL; 394 if (pf->kind == INTERFACE) { 395 ppp = PF_TO_PPP(pf); 396 if (file == ppp->owner) 397 ppp_shutdown_interface(ppp); 398 } 399 if (atomic_dec_and_test(&pf->refcnt)) { 400 switch (pf->kind) { 401 case INTERFACE: 402 ppp_destroy_interface(PF_TO_PPP(pf)); 403 break; 404 case CHANNEL: 405 ppp_destroy_channel(PF_TO_CHANNEL(pf)); 406 break; 407 } 408 } 409 } 410 return 0; 411 } 412 413 static ssize_t ppp_read(struct file *file, char __user *buf, 414 size_t count, loff_t *ppos) 415 { 416 struct ppp_file *pf = file->private_data; 417 DECLARE_WAITQUEUE(wait, current); 418 ssize_t ret; 419 struct sk_buff *skb = NULL; 420 struct iovec iov; 421 422 ret = count; 423 424 if (!pf) 425 return -ENXIO; 426 add_wait_queue(&pf->rwait, &wait); 427 for (;;) { 428 set_current_state(TASK_INTERRUPTIBLE); 429 skb = skb_dequeue(&pf->rq); 430 if (skb) 431 break; 432 ret = 0; 433 if (pf->dead) 434 break; 435 if (pf->kind == INTERFACE) { 436 /* 437 * Return 0 (EOF) on an interface that has no 438 * channels connected, unless it is looping 439 * network traffic (demand mode). 440 */ 441 struct ppp *ppp = PF_TO_PPP(pf); 442 if (ppp->n_channels == 0 && 443 (ppp->flags & SC_LOOP_TRAFFIC) == 0) 444 break; 445 } 446 ret = -EAGAIN; 447 if (file->f_flags & O_NONBLOCK) 448 break; 449 ret = -ERESTARTSYS; 450 if (signal_pending(current)) 451 break; 452 schedule(); 453 } 454 set_current_state(TASK_RUNNING); 455 remove_wait_queue(&pf->rwait, &wait); 456 457 if (!skb) 458 goto out; 459 460 ret = -EOVERFLOW; 461 if (skb->len > count) 462 goto outf; 463 ret = -EFAULT; 464 iov.iov_base = buf; 465 iov.iov_len = count; 466 if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len)) 467 goto outf; 468 ret = skb->len; 469 470 outf: 471 kfree_skb(skb); 472 out: 473 return ret; 474 } 475 476 static ssize_t ppp_write(struct file *file, const char __user *buf, 477 size_t count, loff_t *ppos) 478 { 479 struct ppp_file *pf = file->private_data; 480 struct sk_buff *skb; 481 ssize_t ret; 482 483 if (!pf) 484 return -ENXIO; 485 ret = -ENOMEM; 486 skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); 487 if (!skb) 488 goto out; 489 skb_reserve(skb, pf->hdrlen); 490 ret = -EFAULT; 491 if (copy_from_user(skb_put(skb, count), buf, count)) { 492 kfree_skb(skb); 493 goto out; 494 } 495 496 skb_queue_tail(&pf->xq, skb); 497 498 switch (pf->kind) { 499 case INTERFACE: 500 ppp_xmit_process(PF_TO_PPP(pf)); 501 break; 502 case CHANNEL: 503 ppp_channel_push(PF_TO_CHANNEL(pf)); 504 break; 505 } 506 507 ret = count; 508 509 out: 510 return ret; 511 } 512 513 /* No kernel lock - fine */ 514 static unsigned int ppp_poll(struct file *file, poll_table *wait) 515 { 516 struct ppp_file *pf = file->private_data; 517 unsigned int mask; 518 519 if (!pf) 520 return 0; 521 poll_wait(file, &pf->rwait, wait); 522 mask = POLLOUT | POLLWRNORM; 523 if (skb_peek(&pf->rq)) 524 mask |= POLLIN | POLLRDNORM; 525 if (pf->dead) 526 mask |= POLLHUP; 527 else if (pf->kind == INTERFACE) { 528 /* see comment in ppp_read */ 529 struct ppp *ppp = PF_TO_PPP(pf); 530 if (ppp->n_channels == 0 && 531 (ppp->flags & SC_LOOP_TRAFFIC) == 0) 532 mask |= POLLIN | POLLRDNORM; 533 } 534 535 return mask; 536 } 537 538 #ifdef CONFIG_PPP_FILTER 539 static int get_filter(void __user *arg, struct sock_filter **p) 540 { 541 struct sock_fprog uprog; 542 struct sock_filter *code = NULL; 543 int len, err; 544 545 if (copy_from_user(&uprog, arg, sizeof(uprog))) 546 return -EFAULT; 547 548 if (!uprog.len) { 549 *p = NULL; 550 return 0; 551 } 552 553 len = uprog.len * sizeof(struct sock_filter); 554 code = memdup_user(uprog.filter, len); 555 if (IS_ERR(code)) 556 return PTR_ERR(code); 557 558 err = sk_chk_filter(code, uprog.len); 559 if (err) { 560 kfree(code); 561 return err; 562 } 563 564 *p = code; 565 return uprog.len; 566 } 567 #endif /* CONFIG_PPP_FILTER */ 568 569 static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 570 { 571 struct ppp_file *pf = file->private_data; 572 struct ppp *ppp; 573 int err = -EFAULT, val, val2, i; 574 struct ppp_idle idle; 575 struct npioctl npi; 576 int unit, cflags; 577 struct slcompress *vj; 578 void __user *argp = (void __user *)arg; 579 int __user *p = argp; 580 581 if (!pf) 582 return ppp_unattached_ioctl(current->nsproxy->net_ns, 583 pf, file, cmd, arg); 584 585 if (cmd == PPPIOCDETACH) { 586 /* 587 * We have to be careful here... if the file descriptor 588 * has been dup'd, we could have another process in the 589 * middle of a poll using the same file *, so we had 590 * better not free the interface data structures - 591 * instead we fail the ioctl. Even in this case, we 592 * shut down the interface if we are the owner of it. 593 * Actually, we should get rid of PPPIOCDETACH, userland 594 * (i.e. pppd) could achieve the same effect by closing 595 * this fd and reopening /dev/ppp. 596 */ 597 err = -EINVAL; 598 mutex_lock(&ppp_mutex); 599 if (pf->kind == INTERFACE) { 600 ppp = PF_TO_PPP(pf); 601 if (file == ppp->owner) 602 ppp_shutdown_interface(ppp); 603 } 604 if (atomic_long_read(&file->f_count) <= 2) { 605 ppp_release(NULL, file); 606 err = 0; 607 } else 608 pr_warn("PPPIOCDETACH file->f_count=%ld\n", 609 atomic_long_read(&file->f_count)); 610 mutex_unlock(&ppp_mutex); 611 return err; 612 } 613 614 if (pf->kind == CHANNEL) { 615 struct channel *pch; 616 struct ppp_channel *chan; 617 618 mutex_lock(&ppp_mutex); 619 pch = PF_TO_CHANNEL(pf); 620 621 switch (cmd) { 622 case PPPIOCCONNECT: 623 if (get_user(unit, p)) 624 break; 625 err = ppp_connect_channel(pch, unit); 626 break; 627 628 case PPPIOCDISCONN: 629 err = ppp_disconnect_channel(pch); 630 break; 631 632 default: 633 down_read(&pch->chan_sem); 634 chan = pch->chan; 635 err = -ENOTTY; 636 if (chan && chan->ops->ioctl) 637 err = chan->ops->ioctl(chan, cmd, arg); 638 up_read(&pch->chan_sem); 639 } 640 mutex_unlock(&ppp_mutex); 641 return err; 642 } 643 644 if (pf->kind != INTERFACE) { 645 /* can't happen */ 646 pr_err("PPP: not interface or channel??\n"); 647 return -EINVAL; 648 } 649 650 mutex_lock(&ppp_mutex); 651 ppp = PF_TO_PPP(pf); 652 switch (cmd) { 653 case PPPIOCSMRU: 654 if (get_user(val, p)) 655 break; 656 ppp->mru = val; 657 err = 0; 658 break; 659 660 case PPPIOCSFLAGS: 661 if (get_user(val, p)) 662 break; 663 ppp_lock(ppp); 664 cflags = ppp->flags & ~val; 665 ppp->flags = val & SC_FLAG_BITS; 666 ppp_unlock(ppp); 667 if (cflags & SC_CCP_OPEN) 668 ppp_ccp_closed(ppp); 669 err = 0; 670 break; 671 672 case PPPIOCGFLAGS: 673 val = ppp->flags | ppp->xstate | ppp->rstate; 674 if (put_user(val, p)) 675 break; 676 err = 0; 677 break; 678 679 case PPPIOCSCOMPRESS: 680 err = ppp_set_compress(ppp, arg); 681 break; 682 683 case PPPIOCGUNIT: 684 if (put_user(ppp->file.index, p)) 685 break; 686 err = 0; 687 break; 688 689 case PPPIOCSDEBUG: 690 if (get_user(val, p)) 691 break; 692 ppp->debug = val; 693 err = 0; 694 break; 695 696 case PPPIOCGDEBUG: 697 if (put_user(ppp->debug, p)) 698 break; 699 err = 0; 700 break; 701 702 case PPPIOCGIDLE: 703 idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ; 704 idle.recv_idle = (jiffies - ppp->last_recv) / HZ; 705 if (copy_to_user(argp, &idle, sizeof(idle))) 706 break; 707 err = 0; 708 break; 709 710 case PPPIOCSMAXCID: 711 if (get_user(val, p)) 712 break; 713 val2 = 15; 714 if ((val >> 16) != 0) { 715 val2 = val >> 16; 716 val &= 0xffff; 717 } 718 vj = slhc_init(val2+1, val+1); 719 if (!vj) { 720 netdev_err(ppp->dev, 721 "PPP: no memory (VJ compressor)\n"); 722 err = -ENOMEM; 723 break; 724 } 725 ppp_lock(ppp); 726 if (ppp->vj) 727 slhc_free(ppp->vj); 728 ppp->vj = vj; 729 ppp_unlock(ppp); 730 err = 0; 731 break; 732 733 case PPPIOCGNPMODE: 734 case PPPIOCSNPMODE: 735 if (copy_from_user(&npi, argp, sizeof(npi))) 736 break; 737 err = proto_to_npindex(npi.protocol); 738 if (err < 0) 739 break; 740 i = err; 741 if (cmd == PPPIOCGNPMODE) { 742 err = -EFAULT; 743 npi.mode = ppp->npmode[i]; 744 if (copy_to_user(argp, &npi, sizeof(npi))) 745 break; 746 } else { 747 ppp->npmode[i] = npi.mode; 748 /* we may be able to transmit more packets now (??) */ 749 netif_wake_queue(ppp->dev); 750 } 751 err = 0; 752 break; 753 754 #ifdef CONFIG_PPP_FILTER 755 case PPPIOCSPASS: 756 { 757 struct sock_filter *code; 758 err = get_filter(argp, &code); 759 if (err >= 0) { 760 ppp_lock(ppp); 761 kfree(ppp->pass_filter); 762 ppp->pass_filter = code; 763 ppp->pass_len = err; 764 ppp_unlock(ppp); 765 err = 0; 766 } 767 break; 768 } 769 case PPPIOCSACTIVE: 770 { 771 struct sock_filter *code; 772 err = get_filter(argp, &code); 773 if (err >= 0) { 774 ppp_lock(ppp); 775 kfree(ppp->active_filter); 776 ppp->active_filter = code; 777 ppp->active_len = err; 778 ppp_unlock(ppp); 779 err = 0; 780 } 781 break; 782 } 783 #endif /* CONFIG_PPP_FILTER */ 784 785 #ifdef CONFIG_PPP_MULTILINK 786 case PPPIOCSMRRU: 787 if (get_user(val, p)) 788 break; 789 ppp_recv_lock(ppp); 790 ppp->mrru = val; 791 ppp_recv_unlock(ppp); 792 err = 0; 793 break; 794 #endif /* CONFIG_PPP_MULTILINK */ 795 796 default: 797 err = -ENOTTY; 798 } 799 mutex_unlock(&ppp_mutex); 800 return err; 801 } 802 803 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 804 struct file *file, unsigned int cmd, unsigned long arg) 805 { 806 int unit, err = -EFAULT; 807 struct ppp *ppp; 808 struct channel *chan; 809 struct ppp_net *pn; 810 int __user *p = (int __user *)arg; 811 812 mutex_lock(&ppp_mutex); 813 switch (cmd) { 814 case PPPIOCNEWUNIT: 815 /* Create a new ppp unit */ 816 if (get_user(unit, p)) 817 break; 818 ppp = ppp_create_interface(net, unit, &err); 819 if (!ppp) 820 break; 821 file->private_data = &ppp->file; 822 ppp->owner = file; 823 err = -EFAULT; 824 if (put_user(ppp->file.index, p)) 825 break; 826 err = 0; 827 break; 828 829 case PPPIOCATTACH: 830 /* Attach to an existing ppp unit */ 831 if (get_user(unit, p)) 832 break; 833 err = -ENXIO; 834 pn = ppp_pernet(net); 835 mutex_lock(&pn->all_ppp_mutex); 836 ppp = ppp_find_unit(pn, unit); 837 if (ppp) { 838 atomic_inc(&ppp->file.refcnt); 839 file->private_data = &ppp->file; 840 err = 0; 841 } 842 mutex_unlock(&pn->all_ppp_mutex); 843 break; 844 845 case PPPIOCATTCHAN: 846 if (get_user(unit, p)) 847 break; 848 err = -ENXIO; 849 pn = ppp_pernet(net); 850 spin_lock_bh(&pn->all_channels_lock); 851 chan = ppp_find_channel(pn, unit); 852 if (chan) { 853 atomic_inc(&chan->file.refcnt); 854 file->private_data = &chan->file; 855 err = 0; 856 } 857 spin_unlock_bh(&pn->all_channels_lock); 858 break; 859 860 default: 861 err = -ENOTTY; 862 } 863 mutex_unlock(&ppp_mutex); 864 return err; 865 } 866 867 static const struct file_operations ppp_device_fops = { 868 .owner = THIS_MODULE, 869 .read = ppp_read, 870 .write = ppp_write, 871 .poll = ppp_poll, 872 .unlocked_ioctl = ppp_ioctl, 873 .open = ppp_open, 874 .release = ppp_release, 875 .llseek = noop_llseek, 876 }; 877 878 static __net_init int ppp_init_net(struct net *net) 879 { 880 struct ppp_net *pn = net_generic(net, ppp_net_id); 881 882 idr_init(&pn->units_idr); 883 mutex_init(&pn->all_ppp_mutex); 884 885 INIT_LIST_HEAD(&pn->all_channels); 886 INIT_LIST_HEAD(&pn->new_channels); 887 888 spin_lock_init(&pn->all_channels_lock); 889 890 return 0; 891 } 892 893 static __net_exit void ppp_exit_net(struct net *net) 894 { 895 struct ppp_net *pn = net_generic(net, ppp_net_id); 896 897 idr_destroy(&pn->units_idr); 898 } 899 900 static struct pernet_operations ppp_net_ops = { 901 .init = ppp_init_net, 902 .exit = ppp_exit_net, 903 .id = &ppp_net_id, 904 .size = sizeof(struct ppp_net), 905 }; 906 907 #define PPP_MAJOR 108 908 909 /* Called at boot time if ppp is compiled into the kernel, 910 or at module load time (from init_module) if compiled as a module. */ 911 static int __init ppp_init(void) 912 { 913 int err; 914 915 pr_info("PPP generic driver version " PPP_VERSION "\n"); 916 917 err = register_pernet_device(&ppp_net_ops); 918 if (err) { 919 pr_err("failed to register PPP pernet device (%d)\n", err); 920 goto out; 921 } 922 923 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 924 if (err) { 925 pr_err("failed to register PPP device (%d)\n", err); 926 goto out_net; 927 } 928 929 ppp_class = class_create(THIS_MODULE, "ppp"); 930 if (IS_ERR(ppp_class)) { 931 err = PTR_ERR(ppp_class); 932 goto out_chrdev; 933 } 934 935 /* not a big deal if we fail here :-) */ 936 device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); 937 938 return 0; 939 940 out_chrdev: 941 unregister_chrdev(PPP_MAJOR, "ppp"); 942 out_net: 943 unregister_pernet_device(&ppp_net_ops); 944 out: 945 return err; 946 } 947 948 /* 949 * Network interface unit routines. 950 */ 951 static netdev_tx_t 952 ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) 953 { 954 struct ppp *ppp = netdev_priv(dev); 955 int npi, proto; 956 unsigned char *pp; 957 958 npi = ethertype_to_npindex(ntohs(skb->protocol)); 959 if (npi < 0) 960 goto outf; 961 962 /* Drop, accept or reject the packet */ 963 switch (ppp->npmode[npi]) { 964 case NPMODE_PASS: 965 break; 966 case NPMODE_QUEUE: 967 /* it would be nice to have a way to tell the network 968 system to queue this one up for later. */ 969 goto outf; 970 case NPMODE_DROP: 971 case NPMODE_ERROR: 972 goto outf; 973 } 974 975 /* Put the 2-byte PPP protocol number on the front, 976 making sure there is room for the address and control fields. */ 977 if (skb_cow_head(skb, PPP_HDRLEN)) 978 goto outf; 979 980 pp = skb_push(skb, 2); 981 proto = npindex_to_proto[npi]; 982 put_unaligned_be16(proto, pp); 983 984 skb_queue_tail(&ppp->file.xq, skb); 985 ppp_xmit_process(ppp); 986 return NETDEV_TX_OK; 987 988 outf: 989 kfree_skb(skb); 990 ++dev->stats.tx_dropped; 991 return NETDEV_TX_OK; 992 } 993 994 static int 995 ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 996 { 997 struct ppp *ppp = netdev_priv(dev); 998 int err = -EFAULT; 999 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; 1000 struct ppp_stats stats; 1001 struct ppp_comp_stats cstats; 1002 char *vers; 1003 1004 switch (cmd) { 1005 case SIOCGPPPSTATS: 1006 ppp_get_stats(ppp, &stats); 1007 if (copy_to_user(addr, &stats, sizeof(stats))) 1008 break; 1009 err = 0; 1010 break; 1011 1012 case SIOCGPPPCSTATS: 1013 memset(&cstats, 0, sizeof(cstats)); 1014 if (ppp->xc_state) 1015 ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); 1016 if (ppp->rc_state) 1017 ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d); 1018 if (copy_to_user(addr, &cstats, sizeof(cstats))) 1019 break; 1020 err = 0; 1021 break; 1022 1023 case SIOCGPPPVER: 1024 vers = PPP_VERSION; 1025 if (copy_to_user(addr, vers, strlen(vers) + 1)) 1026 break; 1027 err = 0; 1028 break; 1029 1030 default: 1031 err = -EINVAL; 1032 } 1033 1034 return err; 1035 } 1036 1037 static struct rtnl_link_stats64* 1038 ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) 1039 { 1040 struct ppp *ppp = netdev_priv(dev); 1041 1042 ppp_recv_lock(ppp); 1043 stats64->rx_packets = ppp->stats64.rx_packets; 1044 stats64->rx_bytes = ppp->stats64.rx_bytes; 1045 ppp_recv_unlock(ppp); 1046 1047 ppp_xmit_lock(ppp); 1048 stats64->tx_packets = ppp->stats64.tx_packets; 1049 stats64->tx_bytes = ppp->stats64.tx_bytes; 1050 ppp_xmit_unlock(ppp); 1051 1052 stats64->rx_errors = dev->stats.rx_errors; 1053 stats64->tx_errors = dev->stats.tx_errors; 1054 stats64->rx_dropped = dev->stats.rx_dropped; 1055 stats64->tx_dropped = dev->stats.tx_dropped; 1056 stats64->rx_length_errors = dev->stats.rx_length_errors; 1057 1058 return stats64; 1059 } 1060 1061 static struct lock_class_key ppp_tx_busylock; 1062 static int ppp_dev_init(struct net_device *dev) 1063 { 1064 dev->qdisc_tx_busylock = &ppp_tx_busylock; 1065 return 0; 1066 } 1067 1068 static const struct net_device_ops ppp_netdev_ops = { 1069 .ndo_init = ppp_dev_init, 1070 .ndo_start_xmit = ppp_start_xmit, 1071 .ndo_do_ioctl = ppp_net_ioctl, 1072 .ndo_get_stats64 = ppp_get_stats64, 1073 }; 1074 1075 static void ppp_setup(struct net_device *dev) 1076 { 1077 dev->netdev_ops = &ppp_netdev_ops; 1078 dev->hard_header_len = PPP_HDRLEN; 1079 dev->mtu = PPP_MRU; 1080 dev->addr_len = 0; 1081 dev->tx_queue_len = 3; 1082 dev->type = ARPHRD_PPP; 1083 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1084 dev->features |= NETIF_F_NETNS_LOCAL; 1085 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1086 } 1087 1088 /* 1089 * Transmit-side routines. 1090 */ 1091 1092 /* 1093 * Called to do any work queued up on the transmit side 1094 * that can now be done. 1095 */ 1096 static void 1097 ppp_xmit_process(struct ppp *ppp) 1098 { 1099 struct sk_buff *skb; 1100 1101 ppp_xmit_lock(ppp); 1102 if (!ppp->closing) { 1103 ppp_push(ppp); 1104 while (!ppp->xmit_pending && 1105 (skb = skb_dequeue(&ppp->file.xq))) 1106 ppp_send_frame(ppp, skb); 1107 /* If there's no work left to do, tell the core net 1108 code that we can accept some more. */ 1109 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) 1110 netif_wake_queue(ppp->dev); 1111 else 1112 netif_stop_queue(ppp->dev); 1113 } 1114 ppp_xmit_unlock(ppp); 1115 } 1116 1117 static inline struct sk_buff * 1118 pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) 1119 { 1120 struct sk_buff *new_skb; 1121 int len; 1122 int new_skb_size = ppp->dev->mtu + 1123 ppp->xcomp->comp_extra + ppp->dev->hard_header_len; 1124 int compressor_skb_size = ppp->dev->mtu + 1125 ppp->xcomp->comp_extra + PPP_HDRLEN; 1126 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); 1127 if (!new_skb) { 1128 if (net_ratelimit()) 1129 netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n"); 1130 return NULL; 1131 } 1132 if (ppp->dev->hard_header_len > PPP_HDRLEN) 1133 skb_reserve(new_skb, 1134 ppp->dev->hard_header_len - PPP_HDRLEN); 1135 1136 /* compressor still expects A/C bytes in hdr */ 1137 len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2, 1138 new_skb->data, skb->len + 2, 1139 compressor_skb_size); 1140 if (len > 0 && (ppp->flags & SC_CCP_UP)) { 1141 consume_skb(skb); 1142 skb = new_skb; 1143 skb_put(skb, len); 1144 skb_pull(skb, 2); /* pull off A/C bytes */ 1145 } else if (len == 0) { 1146 /* didn't compress, or CCP not up yet */ 1147 consume_skb(new_skb); 1148 new_skb = skb; 1149 } else { 1150 /* 1151 * (len < 0) 1152 * MPPE requires that we do not send unencrypted 1153 * frames. The compressor will return -1 if we 1154 * should drop the frame. We cannot simply test 1155 * the compress_proto because MPPE and MPPC share 1156 * the same number. 1157 */ 1158 if (net_ratelimit()) 1159 netdev_err(ppp->dev, "ppp: compressor dropped pkt\n"); 1160 kfree_skb(skb); 1161 consume_skb(new_skb); 1162 new_skb = NULL; 1163 } 1164 return new_skb; 1165 } 1166 1167 /* 1168 * Compress and send a frame. 1169 * The caller should have locked the xmit path, 1170 * and xmit_pending should be 0. 1171 */ 1172 static void 1173 ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) 1174 { 1175 int proto = PPP_PROTO(skb); 1176 struct sk_buff *new_skb; 1177 int len; 1178 unsigned char *cp; 1179 1180 if (proto < 0x8000) { 1181 #ifdef CONFIG_PPP_FILTER 1182 /* check if we should pass this packet */ 1183 /* the filter instructions are constructed assuming 1184 a four-byte PPP header on each packet */ 1185 *skb_push(skb, 2) = 1; 1186 if (ppp->pass_filter && 1187 sk_run_filter(skb, ppp->pass_filter) == 0) { 1188 if (ppp->debug & 1) 1189 netdev_printk(KERN_DEBUG, ppp->dev, 1190 "PPP: outbound frame " 1191 "not passed\n"); 1192 kfree_skb(skb); 1193 return; 1194 } 1195 /* if this packet passes the active filter, record the time */ 1196 if (!(ppp->active_filter && 1197 sk_run_filter(skb, ppp->active_filter) == 0)) 1198 ppp->last_xmit = jiffies; 1199 skb_pull(skb, 2); 1200 #else 1201 /* for data packets, record the time */ 1202 ppp->last_xmit = jiffies; 1203 #endif /* CONFIG_PPP_FILTER */ 1204 } 1205 1206 ++ppp->stats64.tx_packets; 1207 ppp->stats64.tx_bytes += skb->len - 2; 1208 1209 switch (proto) { 1210 case PPP_IP: 1211 if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0) 1212 break; 1213 /* try to do VJ TCP header compression */ 1214 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, 1215 GFP_ATOMIC); 1216 if (!new_skb) { 1217 netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n"); 1218 goto drop; 1219 } 1220 skb_reserve(new_skb, ppp->dev->hard_header_len - 2); 1221 cp = skb->data + 2; 1222 len = slhc_compress(ppp->vj, cp, skb->len - 2, 1223 new_skb->data + 2, &cp, 1224 !(ppp->flags & SC_NO_TCP_CCID)); 1225 if (cp == skb->data + 2) { 1226 /* didn't compress */ 1227 consume_skb(new_skb); 1228 } else { 1229 if (cp[0] & SL_TYPE_COMPRESSED_TCP) { 1230 proto = PPP_VJC_COMP; 1231 cp[0] &= ~SL_TYPE_COMPRESSED_TCP; 1232 } else { 1233 proto = PPP_VJC_UNCOMP; 1234 cp[0] = skb->data[2]; 1235 } 1236 consume_skb(skb); 1237 skb = new_skb; 1238 cp = skb_put(skb, len + 2); 1239 cp[0] = 0; 1240 cp[1] = proto; 1241 } 1242 break; 1243 1244 case PPP_CCP: 1245 /* peek at outbound CCP frames */ 1246 ppp_ccp_peek(ppp, skb, 0); 1247 break; 1248 } 1249 1250 /* try to do packet compression */ 1251 if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state && 1252 proto != PPP_LCP && proto != PPP_CCP) { 1253 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { 1254 if (net_ratelimit()) 1255 netdev_err(ppp->dev, 1256 "ppp: compression required but " 1257 "down - pkt dropped.\n"); 1258 goto drop; 1259 } 1260 skb = pad_compress_skb(ppp, skb); 1261 if (!skb) 1262 goto drop; 1263 } 1264 1265 /* 1266 * If we are waiting for traffic (demand dialling), 1267 * queue it up for pppd to receive. 1268 */ 1269 if (ppp->flags & SC_LOOP_TRAFFIC) { 1270 if (ppp->file.rq.qlen > PPP_MAX_RQLEN) 1271 goto drop; 1272 skb_queue_tail(&ppp->file.rq, skb); 1273 wake_up_interruptible(&ppp->file.rwait); 1274 return; 1275 } 1276 1277 ppp->xmit_pending = skb; 1278 ppp_push(ppp); 1279 return; 1280 1281 drop: 1282 kfree_skb(skb); 1283 ++ppp->dev->stats.tx_errors; 1284 } 1285 1286 /* 1287 * Try to send the frame in xmit_pending. 1288 * The caller should have the xmit path locked. 1289 */ 1290 static void 1291 ppp_push(struct ppp *ppp) 1292 { 1293 struct list_head *list; 1294 struct channel *pch; 1295 struct sk_buff *skb = ppp->xmit_pending; 1296 1297 if (!skb) 1298 return; 1299 1300 list = &ppp->channels; 1301 if (list_empty(list)) { 1302 /* nowhere to send the packet, just drop it */ 1303 ppp->xmit_pending = NULL; 1304 kfree_skb(skb); 1305 return; 1306 } 1307 1308 if ((ppp->flags & SC_MULTILINK) == 0) { 1309 /* not doing multilink: send it down the first channel */ 1310 list = list->next; 1311 pch = list_entry(list, struct channel, clist); 1312 1313 spin_lock_bh(&pch->downl); 1314 if (pch->chan) { 1315 if (pch->chan->ops->start_xmit(pch->chan, skb)) 1316 ppp->xmit_pending = NULL; 1317 } else { 1318 /* channel got unregistered */ 1319 kfree_skb(skb); 1320 ppp->xmit_pending = NULL; 1321 } 1322 spin_unlock_bh(&pch->downl); 1323 return; 1324 } 1325 1326 #ifdef CONFIG_PPP_MULTILINK 1327 /* Multilink: fragment the packet over as many links 1328 as can take the packet at the moment. */ 1329 if (!ppp_mp_explode(ppp, skb)) 1330 return; 1331 #endif /* CONFIG_PPP_MULTILINK */ 1332 1333 ppp->xmit_pending = NULL; 1334 kfree_skb(skb); 1335 } 1336 1337 #ifdef CONFIG_PPP_MULTILINK 1338 static bool mp_protocol_compress __read_mostly = true; 1339 module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR); 1340 MODULE_PARM_DESC(mp_protocol_compress, 1341 "compress protocol id in multilink fragments"); 1342 1343 /* 1344 * Divide a packet to be transmitted into fragments and 1345 * send them out the individual links. 1346 */ 1347 static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) 1348 { 1349 int len, totlen; 1350 int i, bits, hdrlen, mtu; 1351 int flen; 1352 int navail, nfree, nzero; 1353 int nbigger; 1354 int totspeed; 1355 int totfree; 1356 unsigned char *p, *q; 1357 struct list_head *list; 1358 struct channel *pch; 1359 struct sk_buff *frag; 1360 struct ppp_channel *chan; 1361 1362 totspeed = 0; /*total bitrate of the bundle*/ 1363 nfree = 0; /* # channels which have no packet already queued */ 1364 navail = 0; /* total # of usable channels (not deregistered) */ 1365 nzero = 0; /* number of channels with zero speed associated*/ 1366 totfree = 0; /*total # of channels available and 1367 *having no queued packets before 1368 *starting the fragmentation*/ 1369 1370 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1371 i = 0; 1372 list_for_each_entry(pch, &ppp->channels, clist) { 1373 if (pch->chan) { 1374 pch->avail = 1; 1375 navail++; 1376 pch->speed = pch->chan->speed; 1377 } else { 1378 pch->avail = 0; 1379 } 1380 if (pch->avail) { 1381 if (skb_queue_empty(&pch->file.xq) || 1382 !pch->had_frag) { 1383 if (pch->speed == 0) 1384 nzero++; 1385 else 1386 totspeed += pch->speed; 1387 1388 pch->avail = 2; 1389 ++nfree; 1390 ++totfree; 1391 } 1392 if (!pch->had_frag && i < ppp->nxchan) 1393 ppp->nxchan = i; 1394 } 1395 ++i; 1396 } 1397 /* 1398 * Don't start sending this packet unless at least half of 1399 * the channels are free. This gives much better TCP 1400 * performance if we have a lot of channels. 1401 */ 1402 if (nfree == 0 || nfree < navail / 2) 1403 return 0; /* can't take now, leave it in xmit_pending */ 1404 1405 /* Do protocol field compression */ 1406 p = skb->data; 1407 len = skb->len; 1408 if (*p == 0 && mp_protocol_compress) { 1409 ++p; 1410 --len; 1411 } 1412 1413 totlen = len; 1414 nbigger = len % nfree; 1415 1416 /* skip to the channel after the one we last used 1417 and start at that one */ 1418 list = &ppp->channels; 1419 for (i = 0; i < ppp->nxchan; ++i) { 1420 list = list->next; 1421 if (list == &ppp->channels) { 1422 i = 0; 1423 break; 1424 } 1425 } 1426 1427 /* create a fragment for each channel */ 1428 bits = B; 1429 while (len > 0) { 1430 list = list->next; 1431 if (list == &ppp->channels) { 1432 i = 0; 1433 continue; 1434 } 1435 pch = list_entry(list, struct channel, clist); 1436 ++i; 1437 if (!pch->avail) 1438 continue; 1439 1440 /* 1441 * Skip this channel if it has a fragment pending already and 1442 * we haven't given a fragment to all of the free channels. 1443 */ 1444 if (pch->avail == 1) { 1445 if (nfree > 0) 1446 continue; 1447 } else { 1448 pch->avail = 1; 1449 } 1450 1451 /* check the channel's mtu and whether it is still attached. */ 1452 spin_lock_bh(&pch->downl); 1453 if (pch->chan == NULL) { 1454 /* can't use this channel, it's being deregistered */ 1455 if (pch->speed == 0) 1456 nzero--; 1457 else 1458 totspeed -= pch->speed; 1459 1460 spin_unlock_bh(&pch->downl); 1461 pch->avail = 0; 1462 totlen = len; 1463 totfree--; 1464 nfree--; 1465 if (--navail == 0) 1466 break; 1467 continue; 1468 } 1469 1470 /* 1471 *if the channel speed is not set divide 1472 *the packet evenly among the free channels; 1473 *otherwise divide it according to the speed 1474 *of the channel we are going to transmit on 1475 */ 1476 flen = len; 1477 if (nfree > 0) { 1478 if (pch->speed == 0) { 1479 flen = len/nfree; 1480 if (nbigger > 0) { 1481 flen++; 1482 nbigger--; 1483 } 1484 } else { 1485 flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / 1486 ((totspeed*totfree)/pch->speed)) - hdrlen; 1487 if (nbigger > 0) { 1488 flen += ((totfree - nzero)*pch->speed)/totspeed; 1489 nbigger -= ((totfree - nzero)*pch->speed)/ 1490 totspeed; 1491 } 1492 } 1493 nfree--; 1494 } 1495 1496 /* 1497 *check if we are on the last channel or 1498 *we exceded the length of the data to 1499 *fragment 1500 */ 1501 if ((nfree <= 0) || (flen > len)) 1502 flen = len; 1503 /* 1504 *it is not worth to tx on slow channels: 1505 *in that case from the resulting flen according to the 1506 *above formula will be equal or less than zero. 1507 *Skip the channel in this case 1508 */ 1509 if (flen <= 0) { 1510 pch->avail = 2; 1511 spin_unlock_bh(&pch->downl); 1512 continue; 1513 } 1514 1515 /* 1516 * hdrlen includes the 2-byte PPP protocol field, but the 1517 * MTU counts only the payload excluding the protocol field. 1518 * (RFC1661 Section 2) 1519 */ 1520 mtu = pch->chan->mtu - (hdrlen - 2); 1521 if (mtu < 4) 1522 mtu = 4; 1523 if (flen > mtu) 1524 flen = mtu; 1525 if (flen == len) 1526 bits |= E; 1527 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); 1528 if (!frag) 1529 goto noskb; 1530 q = skb_put(frag, flen + hdrlen); 1531 1532 /* make the MP header */ 1533 put_unaligned_be16(PPP_MP, q); 1534 if (ppp->flags & SC_MP_XSHORTSEQ) { 1535 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1536 q[3] = ppp->nxseq; 1537 } else { 1538 q[2] = bits; 1539 q[3] = ppp->nxseq >> 16; 1540 q[4] = ppp->nxseq >> 8; 1541 q[5] = ppp->nxseq; 1542 } 1543 1544 memcpy(q + hdrlen, p, flen); 1545 1546 /* try to send it down the channel */ 1547 chan = pch->chan; 1548 if (!skb_queue_empty(&pch->file.xq) || 1549 !chan->ops->start_xmit(chan, frag)) 1550 skb_queue_tail(&pch->file.xq, frag); 1551 pch->had_frag = 1; 1552 p += flen; 1553 len -= flen; 1554 ++ppp->nxseq; 1555 bits = 0; 1556 spin_unlock_bh(&pch->downl); 1557 } 1558 ppp->nxchan = i; 1559 1560 return 1; 1561 1562 noskb: 1563 spin_unlock_bh(&pch->downl); 1564 if (ppp->debug & 1) 1565 netdev_err(ppp->dev, "PPP: no memory (fragment)\n"); 1566 ++ppp->dev->stats.tx_errors; 1567 ++ppp->nxseq; 1568 return 1; /* abandon the frame */ 1569 } 1570 #endif /* CONFIG_PPP_MULTILINK */ 1571 1572 /* 1573 * Try to send data out on a channel. 1574 */ 1575 static void 1576 ppp_channel_push(struct channel *pch) 1577 { 1578 struct sk_buff *skb; 1579 struct ppp *ppp; 1580 1581 spin_lock_bh(&pch->downl); 1582 if (pch->chan) { 1583 while (!skb_queue_empty(&pch->file.xq)) { 1584 skb = skb_dequeue(&pch->file.xq); 1585 if (!pch->chan->ops->start_xmit(pch->chan, skb)) { 1586 /* put the packet back and try again later */ 1587 skb_queue_head(&pch->file.xq, skb); 1588 break; 1589 } 1590 } 1591 } else { 1592 /* channel got deregistered */ 1593 skb_queue_purge(&pch->file.xq); 1594 } 1595 spin_unlock_bh(&pch->downl); 1596 /* see if there is anything from the attached unit to be sent */ 1597 if (skb_queue_empty(&pch->file.xq)) { 1598 read_lock_bh(&pch->upl); 1599 ppp = pch->ppp; 1600 if (ppp) 1601 ppp_xmit_process(ppp); 1602 read_unlock_bh(&pch->upl); 1603 } 1604 } 1605 1606 /* 1607 * Receive-side routines. 1608 */ 1609 1610 struct ppp_mp_skb_parm { 1611 u32 sequence; 1612 u8 BEbits; 1613 }; 1614 #define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb)) 1615 1616 static inline void 1617 ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1618 { 1619 ppp_recv_lock(ppp); 1620 if (!ppp->closing) 1621 ppp_receive_frame(ppp, skb, pch); 1622 else 1623 kfree_skb(skb); 1624 ppp_recv_unlock(ppp); 1625 } 1626 1627 void 1628 ppp_input(struct ppp_channel *chan, struct sk_buff *skb) 1629 { 1630 struct channel *pch = chan->ppp; 1631 int proto; 1632 1633 if (!pch) { 1634 kfree_skb(skb); 1635 return; 1636 } 1637 1638 read_lock_bh(&pch->upl); 1639 if (!pskb_may_pull(skb, 2)) { 1640 kfree_skb(skb); 1641 if (pch->ppp) { 1642 ++pch->ppp->dev->stats.rx_length_errors; 1643 ppp_receive_error(pch->ppp); 1644 } 1645 goto done; 1646 } 1647 1648 proto = PPP_PROTO(skb); 1649 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { 1650 /* put it on the channel queue */ 1651 skb_queue_tail(&pch->file.rq, skb); 1652 /* drop old frames if queue too long */ 1653 while (pch->file.rq.qlen > PPP_MAX_RQLEN && 1654 (skb = skb_dequeue(&pch->file.rq))) 1655 kfree_skb(skb); 1656 wake_up_interruptible(&pch->file.rwait); 1657 } else { 1658 ppp_do_recv(pch->ppp, skb, pch); 1659 } 1660 1661 done: 1662 read_unlock_bh(&pch->upl); 1663 } 1664 1665 /* Put a 0-length skb in the receive queue as an error indication */ 1666 void 1667 ppp_input_error(struct ppp_channel *chan, int code) 1668 { 1669 struct channel *pch = chan->ppp; 1670 struct sk_buff *skb; 1671 1672 if (!pch) 1673 return; 1674 1675 read_lock_bh(&pch->upl); 1676 if (pch->ppp) { 1677 skb = alloc_skb(0, GFP_ATOMIC); 1678 if (skb) { 1679 skb->len = 0; /* probably unnecessary */ 1680 skb->cb[0] = code; 1681 ppp_do_recv(pch->ppp, skb, pch); 1682 } 1683 } 1684 read_unlock_bh(&pch->upl); 1685 } 1686 1687 /* 1688 * We come in here to process a received frame. 1689 * The receive side of the ppp unit is locked. 1690 */ 1691 static void 1692 ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1693 { 1694 /* note: a 0-length skb is used as an error indication */ 1695 if (skb->len > 0) { 1696 #ifdef CONFIG_PPP_MULTILINK 1697 /* XXX do channel-level decompression here */ 1698 if (PPP_PROTO(skb) == PPP_MP) 1699 ppp_receive_mp_frame(ppp, skb, pch); 1700 else 1701 #endif /* CONFIG_PPP_MULTILINK */ 1702 ppp_receive_nonmp_frame(ppp, skb); 1703 } else { 1704 kfree_skb(skb); 1705 ppp_receive_error(ppp); 1706 } 1707 } 1708 1709 static void 1710 ppp_receive_error(struct ppp *ppp) 1711 { 1712 ++ppp->dev->stats.rx_errors; 1713 if (ppp->vj) 1714 slhc_toss(ppp->vj); 1715 } 1716 1717 static void 1718 ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) 1719 { 1720 struct sk_buff *ns; 1721 int proto, len, npi; 1722 1723 /* 1724 * Decompress the frame, if compressed. 1725 * Note that some decompressors need to see uncompressed frames 1726 * that come in as well as compressed frames. 1727 */ 1728 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) && 1729 (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0) 1730 skb = ppp_decompress_frame(ppp, skb); 1731 1732 if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR) 1733 goto err; 1734 1735 proto = PPP_PROTO(skb); 1736 switch (proto) { 1737 case PPP_VJC_COMP: 1738 /* decompress VJ compressed packets */ 1739 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) 1740 goto err; 1741 1742 if (skb_tailroom(skb) < 124 || skb_cloned(skb)) { 1743 /* copy to a new sk_buff with more tailroom */ 1744 ns = dev_alloc_skb(skb->len + 128); 1745 if (!ns) { 1746 netdev_err(ppp->dev, "PPP: no memory " 1747 "(VJ decomp)\n"); 1748 goto err; 1749 } 1750 skb_reserve(ns, 2); 1751 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); 1752 consume_skb(skb); 1753 skb = ns; 1754 } 1755 else 1756 skb->ip_summed = CHECKSUM_NONE; 1757 1758 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); 1759 if (len <= 0) { 1760 netdev_printk(KERN_DEBUG, ppp->dev, 1761 "PPP: VJ decompression error\n"); 1762 goto err; 1763 } 1764 len += 2; 1765 if (len > skb->len) 1766 skb_put(skb, len - skb->len); 1767 else if (len < skb->len) 1768 skb_trim(skb, len); 1769 proto = PPP_IP; 1770 break; 1771 1772 case PPP_VJC_UNCOMP: 1773 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) 1774 goto err; 1775 1776 /* Until we fix the decompressor need to make sure 1777 * data portion is linear. 1778 */ 1779 if (!pskb_may_pull(skb, skb->len)) 1780 goto err; 1781 1782 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { 1783 netdev_err(ppp->dev, "PPP: VJ uncompressed error\n"); 1784 goto err; 1785 } 1786 proto = PPP_IP; 1787 break; 1788 1789 case PPP_CCP: 1790 ppp_ccp_peek(ppp, skb, 1); 1791 break; 1792 } 1793 1794 ++ppp->stats64.rx_packets; 1795 ppp->stats64.rx_bytes += skb->len - 2; 1796 1797 npi = proto_to_npindex(proto); 1798 if (npi < 0) { 1799 /* control or unknown frame - pass it to pppd */ 1800 skb_queue_tail(&ppp->file.rq, skb); 1801 /* limit queue length by dropping old frames */ 1802 while (ppp->file.rq.qlen > PPP_MAX_RQLEN && 1803 (skb = skb_dequeue(&ppp->file.rq))) 1804 kfree_skb(skb); 1805 /* wake up any process polling or blocking on read */ 1806 wake_up_interruptible(&ppp->file.rwait); 1807 1808 } else { 1809 /* network protocol frame - give it to the kernel */ 1810 1811 #ifdef CONFIG_PPP_FILTER 1812 /* check if the packet passes the pass and active filters */ 1813 /* the filter instructions are constructed assuming 1814 a four-byte PPP header on each packet */ 1815 if (ppp->pass_filter || ppp->active_filter) { 1816 if (skb_unclone(skb, GFP_ATOMIC)) 1817 goto err; 1818 1819 *skb_push(skb, 2) = 0; 1820 if (ppp->pass_filter && 1821 sk_run_filter(skb, ppp->pass_filter) == 0) { 1822 if (ppp->debug & 1) 1823 netdev_printk(KERN_DEBUG, ppp->dev, 1824 "PPP: inbound frame " 1825 "not passed\n"); 1826 kfree_skb(skb); 1827 return; 1828 } 1829 if (!(ppp->active_filter && 1830 sk_run_filter(skb, ppp->active_filter) == 0)) 1831 ppp->last_recv = jiffies; 1832 __skb_pull(skb, 2); 1833 } else 1834 #endif /* CONFIG_PPP_FILTER */ 1835 ppp->last_recv = jiffies; 1836 1837 if ((ppp->dev->flags & IFF_UP) == 0 || 1838 ppp->npmode[npi] != NPMODE_PASS) { 1839 kfree_skb(skb); 1840 } else { 1841 /* chop off protocol */ 1842 skb_pull_rcsum(skb, 2); 1843 skb->dev = ppp->dev; 1844 skb->protocol = htons(npindex_to_ethertype[npi]); 1845 skb_reset_mac_header(skb); 1846 netif_rx(skb); 1847 } 1848 } 1849 return; 1850 1851 err: 1852 kfree_skb(skb); 1853 ppp_receive_error(ppp); 1854 } 1855 1856 static struct sk_buff * 1857 ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) 1858 { 1859 int proto = PPP_PROTO(skb); 1860 struct sk_buff *ns; 1861 int len; 1862 1863 /* Until we fix all the decompressor's need to make sure 1864 * data portion is linear. 1865 */ 1866 if (!pskb_may_pull(skb, skb->len)) 1867 goto err; 1868 1869 if (proto == PPP_COMP) { 1870 int obuff_size; 1871 1872 switch(ppp->rcomp->compress_proto) { 1873 case CI_MPPE: 1874 obuff_size = ppp->mru + PPP_HDRLEN + 1; 1875 break; 1876 default: 1877 obuff_size = ppp->mru + PPP_HDRLEN; 1878 break; 1879 } 1880 1881 ns = dev_alloc_skb(obuff_size); 1882 if (!ns) { 1883 netdev_err(ppp->dev, "ppp_decompress_frame: " 1884 "no memory\n"); 1885 goto err; 1886 } 1887 /* the decompressor still expects the A/C bytes in the hdr */ 1888 len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, 1889 skb->len + 2, ns->data, obuff_size); 1890 if (len < 0) { 1891 /* Pass the compressed frame to pppd as an 1892 error indication. */ 1893 if (len == DECOMP_FATALERROR) 1894 ppp->rstate |= SC_DC_FERROR; 1895 kfree_skb(ns); 1896 goto err; 1897 } 1898 1899 consume_skb(skb); 1900 skb = ns; 1901 skb_put(skb, len); 1902 skb_pull(skb, 2); /* pull off the A/C bytes */ 1903 1904 } else { 1905 /* Uncompressed frame - pass to decompressor so it 1906 can update its dictionary if necessary. */ 1907 if (ppp->rcomp->incomp) 1908 ppp->rcomp->incomp(ppp->rc_state, skb->data - 2, 1909 skb->len + 2); 1910 } 1911 1912 return skb; 1913 1914 err: 1915 ppp->rstate |= SC_DC_ERROR; 1916 ppp_receive_error(ppp); 1917 return skb; 1918 } 1919 1920 #ifdef CONFIG_PPP_MULTILINK 1921 /* 1922 * Receive a multilink frame. 1923 * We put it on the reconstruction queue and then pull off 1924 * as many completed frames as we can. 1925 */ 1926 static void 1927 ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1928 { 1929 u32 mask, seq; 1930 struct channel *ch; 1931 int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1932 1933 if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0) 1934 goto err; /* no good, throw it away */ 1935 1936 /* Decode sequence number and begin/end bits */ 1937 if (ppp->flags & SC_MP_SHORTSEQ) { 1938 seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3]; 1939 mask = 0xfff; 1940 } else { 1941 seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5]; 1942 mask = 0xffffff; 1943 } 1944 PPP_MP_CB(skb)->BEbits = skb->data[2]; 1945 skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */ 1946 1947 /* 1948 * Do protocol ID decompression on the first fragment of each packet. 1949 */ 1950 if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1)) 1951 *skb_push(skb, 1) = 0; 1952 1953 /* 1954 * Expand sequence number to 32 bits, making it as close 1955 * as possible to ppp->minseq. 1956 */ 1957 seq |= ppp->minseq & ~mask; 1958 if ((int)(ppp->minseq - seq) > (int)(mask >> 1)) 1959 seq += mask + 1; 1960 else if ((int)(seq - ppp->minseq) > (int)(mask >> 1)) 1961 seq -= mask + 1; /* should never happen */ 1962 PPP_MP_CB(skb)->sequence = seq; 1963 pch->lastseq = seq; 1964 1965 /* 1966 * If this packet comes before the next one we were expecting, 1967 * drop it. 1968 */ 1969 if (seq_before(seq, ppp->nextseq)) { 1970 kfree_skb(skb); 1971 ++ppp->dev->stats.rx_dropped; 1972 ppp_receive_error(ppp); 1973 return; 1974 } 1975 1976 /* 1977 * Reevaluate minseq, the minimum over all channels of the 1978 * last sequence number received on each channel. Because of 1979 * the increasing sequence number rule, we know that any fragment 1980 * before `minseq' which hasn't arrived is never going to arrive. 1981 * The list of channels can't change because we have the receive 1982 * side of the ppp unit locked. 1983 */ 1984 list_for_each_entry(ch, &ppp->channels, clist) { 1985 if (seq_before(ch->lastseq, seq)) 1986 seq = ch->lastseq; 1987 } 1988 if (seq_before(ppp->minseq, seq)) 1989 ppp->minseq = seq; 1990 1991 /* Put the fragment on the reconstruction queue */ 1992 ppp_mp_insert(ppp, skb); 1993 1994 /* If the queue is getting long, don't wait any longer for packets 1995 before the start of the queue. */ 1996 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { 1997 struct sk_buff *mskb = skb_peek(&ppp->mrq); 1998 if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence)) 1999 ppp->minseq = PPP_MP_CB(mskb)->sequence; 2000 } 2001 2002 /* Pull completed packets off the queue and receive them. */ 2003 while ((skb = ppp_mp_reconstruct(ppp))) { 2004 if (pskb_may_pull(skb, 2)) 2005 ppp_receive_nonmp_frame(ppp, skb); 2006 else { 2007 ++ppp->dev->stats.rx_length_errors; 2008 kfree_skb(skb); 2009 ppp_receive_error(ppp); 2010 } 2011 } 2012 2013 return; 2014 2015 err: 2016 kfree_skb(skb); 2017 ppp_receive_error(ppp); 2018 } 2019 2020 /* 2021 * Insert a fragment on the MP reconstruction queue. 2022 * The queue is ordered by increasing sequence number. 2023 */ 2024 static void 2025 ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb) 2026 { 2027 struct sk_buff *p; 2028 struct sk_buff_head *list = &ppp->mrq; 2029 u32 seq = PPP_MP_CB(skb)->sequence; 2030 2031 /* N.B. we don't need to lock the list lock because we have the 2032 ppp unit receive-side lock. */ 2033 skb_queue_walk(list, p) { 2034 if (seq_before(seq, PPP_MP_CB(p)->sequence)) 2035 break; 2036 } 2037 __skb_queue_before(list, p, skb); 2038 } 2039 2040 /* 2041 * Reconstruct a packet from the MP fragment queue. 2042 * We go through increasing sequence numbers until we find a 2043 * complete packet, or we get to the sequence number for a fragment 2044 * which hasn't arrived but might still do so. 2045 */ 2046 static struct sk_buff * 2047 ppp_mp_reconstruct(struct ppp *ppp) 2048 { 2049 u32 seq = ppp->nextseq; 2050 u32 minseq = ppp->minseq; 2051 struct sk_buff_head *list = &ppp->mrq; 2052 struct sk_buff *p, *tmp; 2053 struct sk_buff *head, *tail; 2054 struct sk_buff *skb = NULL; 2055 int lost = 0, len = 0; 2056 2057 if (ppp->mrru == 0) /* do nothing until mrru is set */ 2058 return NULL; 2059 head = list->next; 2060 tail = NULL; 2061 skb_queue_walk_safe(list, p, tmp) { 2062 again: 2063 if (seq_before(PPP_MP_CB(p)->sequence, seq)) { 2064 /* this can't happen, anyway ignore the skb */ 2065 netdev_err(ppp->dev, "ppp_mp_reconstruct bad " 2066 "seq %u < %u\n", 2067 PPP_MP_CB(p)->sequence, seq); 2068 __skb_unlink(p, list); 2069 kfree_skb(p); 2070 continue; 2071 } 2072 if (PPP_MP_CB(p)->sequence != seq) { 2073 u32 oldseq; 2074 /* Fragment `seq' is missing. If it is after 2075 minseq, it might arrive later, so stop here. */ 2076 if (seq_after(seq, minseq)) 2077 break; 2078 /* Fragment `seq' is lost, keep going. */ 2079 lost = 1; 2080 oldseq = seq; 2081 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? 2082 minseq + 1: PPP_MP_CB(p)->sequence; 2083 2084 if (ppp->debug & 1) 2085 netdev_printk(KERN_DEBUG, ppp->dev, 2086 "lost frag %u..%u\n", 2087 oldseq, seq-1); 2088 2089 goto again; 2090 } 2091 2092 /* 2093 * At this point we know that all the fragments from 2094 * ppp->nextseq to seq are either present or lost. 2095 * Also, there are no complete packets in the queue 2096 * that have no missing fragments and end before this 2097 * fragment. 2098 */ 2099 2100 /* B bit set indicates this fragment starts a packet */ 2101 if (PPP_MP_CB(p)->BEbits & B) { 2102 head = p; 2103 lost = 0; 2104 len = 0; 2105 } 2106 2107 len += p->len; 2108 2109 /* Got a complete packet yet? */ 2110 if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) && 2111 (PPP_MP_CB(head)->BEbits & B)) { 2112 if (len > ppp->mrru + 2) { 2113 ++ppp->dev->stats.rx_length_errors; 2114 netdev_printk(KERN_DEBUG, ppp->dev, 2115 "PPP: reconstructed packet" 2116 " is too long (%d)\n", len); 2117 } else { 2118 tail = p; 2119 break; 2120 } 2121 ppp->nextseq = seq + 1; 2122 } 2123 2124 /* 2125 * If this is the ending fragment of a packet, 2126 * and we haven't found a complete valid packet yet, 2127 * we can discard up to and including this fragment. 2128 */ 2129 if (PPP_MP_CB(p)->BEbits & E) { 2130 struct sk_buff *tmp2; 2131 2132 skb_queue_reverse_walk_from_safe(list, p, tmp2) { 2133 if (ppp->debug & 1) 2134 netdev_printk(KERN_DEBUG, ppp->dev, 2135 "discarding frag %u\n", 2136 PPP_MP_CB(p)->sequence); 2137 __skb_unlink(p, list); 2138 kfree_skb(p); 2139 } 2140 head = skb_peek(list); 2141 if (!head) 2142 break; 2143 } 2144 ++seq; 2145 } 2146 2147 /* If we have a complete packet, copy it all into one skb. */ 2148 if (tail != NULL) { 2149 /* If we have discarded any fragments, 2150 signal a receive error. */ 2151 if (PPP_MP_CB(head)->sequence != ppp->nextseq) { 2152 skb_queue_walk_safe(list, p, tmp) { 2153 if (p == head) 2154 break; 2155 if (ppp->debug & 1) 2156 netdev_printk(KERN_DEBUG, ppp->dev, 2157 "discarding frag %u\n", 2158 PPP_MP_CB(p)->sequence); 2159 __skb_unlink(p, list); 2160 kfree_skb(p); 2161 } 2162 2163 if (ppp->debug & 1) 2164 netdev_printk(KERN_DEBUG, ppp->dev, 2165 " missed pkts %u..%u\n", 2166 ppp->nextseq, 2167 PPP_MP_CB(head)->sequence-1); 2168 ++ppp->dev->stats.rx_dropped; 2169 ppp_receive_error(ppp); 2170 } 2171 2172 skb = head; 2173 if (head != tail) { 2174 struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list; 2175 p = skb_queue_next(list, head); 2176 __skb_unlink(skb, list); 2177 skb_queue_walk_from_safe(list, p, tmp) { 2178 __skb_unlink(p, list); 2179 *fragpp = p; 2180 p->next = NULL; 2181 fragpp = &p->next; 2182 2183 skb->len += p->len; 2184 skb->data_len += p->len; 2185 skb->truesize += p->truesize; 2186 2187 if (p == tail) 2188 break; 2189 } 2190 } else { 2191 __skb_unlink(skb, list); 2192 } 2193 2194 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; 2195 } 2196 2197 return skb; 2198 } 2199 #endif /* CONFIG_PPP_MULTILINK */ 2200 2201 /* 2202 * Channel interface. 2203 */ 2204 2205 /* Create a new, unattached ppp channel. */ 2206 int ppp_register_channel(struct ppp_channel *chan) 2207 { 2208 return ppp_register_net_channel(current->nsproxy->net_ns, chan); 2209 } 2210 2211 /* Create a new, unattached ppp channel for specified net. */ 2212 int ppp_register_net_channel(struct net *net, struct ppp_channel *chan) 2213 { 2214 struct channel *pch; 2215 struct ppp_net *pn; 2216 2217 pch = kzalloc(sizeof(struct channel), GFP_KERNEL); 2218 if (!pch) 2219 return -ENOMEM; 2220 2221 pn = ppp_pernet(net); 2222 2223 pch->ppp = NULL; 2224 pch->chan = chan; 2225 pch->chan_net = net; 2226 chan->ppp = pch; 2227 init_ppp_file(&pch->file, CHANNEL); 2228 pch->file.hdrlen = chan->hdrlen; 2229 #ifdef CONFIG_PPP_MULTILINK 2230 pch->lastseq = -1; 2231 #endif /* CONFIG_PPP_MULTILINK */ 2232 init_rwsem(&pch->chan_sem); 2233 spin_lock_init(&pch->downl); 2234 rwlock_init(&pch->upl); 2235 2236 spin_lock_bh(&pn->all_channels_lock); 2237 pch->file.index = ++pn->last_channel_index; 2238 list_add(&pch->list, &pn->new_channels); 2239 atomic_inc(&channel_count); 2240 spin_unlock_bh(&pn->all_channels_lock); 2241 2242 return 0; 2243 } 2244 2245 /* 2246 * Return the index of a channel. 2247 */ 2248 int ppp_channel_index(struct ppp_channel *chan) 2249 { 2250 struct channel *pch = chan->ppp; 2251 2252 if (pch) 2253 return pch->file.index; 2254 return -1; 2255 } 2256 2257 /* 2258 * Return the PPP unit number to which a channel is connected. 2259 */ 2260 int ppp_unit_number(struct ppp_channel *chan) 2261 { 2262 struct channel *pch = chan->ppp; 2263 int unit = -1; 2264 2265 if (pch) { 2266 read_lock_bh(&pch->upl); 2267 if (pch->ppp) 2268 unit = pch->ppp->file.index; 2269 read_unlock_bh(&pch->upl); 2270 } 2271 return unit; 2272 } 2273 2274 /* 2275 * Return the PPP device interface name of a channel. 2276 */ 2277 char *ppp_dev_name(struct ppp_channel *chan) 2278 { 2279 struct channel *pch = chan->ppp; 2280 char *name = NULL; 2281 2282 if (pch) { 2283 read_lock_bh(&pch->upl); 2284 if (pch->ppp && pch->ppp->dev) 2285 name = pch->ppp->dev->name; 2286 read_unlock_bh(&pch->upl); 2287 } 2288 return name; 2289 } 2290 2291 2292 /* 2293 * Disconnect a channel from the generic layer. 2294 * This must be called in process context. 2295 */ 2296 void 2297 ppp_unregister_channel(struct ppp_channel *chan) 2298 { 2299 struct channel *pch = chan->ppp; 2300 struct ppp_net *pn; 2301 2302 if (!pch) 2303 return; /* should never happen */ 2304 2305 chan->ppp = NULL; 2306 2307 /* 2308 * This ensures that we have returned from any calls into the 2309 * the channel's start_xmit or ioctl routine before we proceed. 2310 */ 2311 down_write(&pch->chan_sem); 2312 spin_lock_bh(&pch->downl); 2313 pch->chan = NULL; 2314 spin_unlock_bh(&pch->downl); 2315 up_write(&pch->chan_sem); 2316 ppp_disconnect_channel(pch); 2317 2318 pn = ppp_pernet(pch->chan_net); 2319 spin_lock_bh(&pn->all_channels_lock); 2320 list_del(&pch->list); 2321 spin_unlock_bh(&pn->all_channels_lock); 2322 2323 pch->file.dead = 1; 2324 wake_up_interruptible(&pch->file.rwait); 2325 if (atomic_dec_and_test(&pch->file.refcnt)) 2326 ppp_destroy_channel(pch); 2327 } 2328 2329 /* 2330 * Callback from a channel when it can accept more to transmit. 2331 * This should be called at BH/softirq level, not interrupt level. 2332 */ 2333 void 2334 ppp_output_wakeup(struct ppp_channel *chan) 2335 { 2336 struct channel *pch = chan->ppp; 2337 2338 if (!pch) 2339 return; 2340 ppp_channel_push(pch); 2341 } 2342 2343 /* 2344 * Compression control. 2345 */ 2346 2347 /* Process the PPPIOCSCOMPRESS ioctl. */ 2348 static int 2349 ppp_set_compress(struct ppp *ppp, unsigned long arg) 2350 { 2351 int err; 2352 struct compressor *cp, *ocomp; 2353 struct ppp_option_data data; 2354 void *state, *ostate; 2355 unsigned char ccp_option[CCP_MAX_OPTION_LENGTH]; 2356 2357 err = -EFAULT; 2358 if (copy_from_user(&data, (void __user *) arg, sizeof(data)) || 2359 (data.length <= CCP_MAX_OPTION_LENGTH && 2360 copy_from_user(ccp_option, (void __user *) data.ptr, data.length))) 2361 goto out; 2362 err = -EINVAL; 2363 if (data.length > CCP_MAX_OPTION_LENGTH || 2364 ccp_option[1] < 2 || ccp_option[1] > data.length) 2365 goto out; 2366 2367 cp = try_then_request_module( 2368 find_compressor(ccp_option[0]), 2369 "ppp-compress-%d", ccp_option[0]); 2370 if (!cp) 2371 goto out; 2372 2373 err = -ENOBUFS; 2374 if (data.transmit) { 2375 state = cp->comp_alloc(ccp_option, data.length); 2376 if (state) { 2377 ppp_xmit_lock(ppp); 2378 ppp->xstate &= ~SC_COMP_RUN; 2379 ocomp = ppp->xcomp; 2380 ostate = ppp->xc_state; 2381 ppp->xcomp = cp; 2382 ppp->xc_state = state; 2383 ppp_xmit_unlock(ppp); 2384 if (ostate) { 2385 ocomp->comp_free(ostate); 2386 module_put(ocomp->owner); 2387 } 2388 err = 0; 2389 } else 2390 module_put(cp->owner); 2391 2392 } else { 2393 state = cp->decomp_alloc(ccp_option, data.length); 2394 if (state) { 2395 ppp_recv_lock(ppp); 2396 ppp->rstate &= ~SC_DECOMP_RUN; 2397 ocomp = ppp->rcomp; 2398 ostate = ppp->rc_state; 2399 ppp->rcomp = cp; 2400 ppp->rc_state = state; 2401 ppp_recv_unlock(ppp); 2402 if (ostate) { 2403 ocomp->decomp_free(ostate); 2404 module_put(ocomp->owner); 2405 } 2406 err = 0; 2407 } else 2408 module_put(cp->owner); 2409 } 2410 2411 out: 2412 return err; 2413 } 2414 2415 /* 2416 * Look at a CCP packet and update our state accordingly. 2417 * We assume the caller has the xmit or recv path locked. 2418 */ 2419 static void 2420 ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) 2421 { 2422 unsigned char *dp; 2423 int len; 2424 2425 if (!pskb_may_pull(skb, CCP_HDRLEN + 2)) 2426 return; /* no header */ 2427 dp = skb->data + 2; 2428 2429 switch (CCP_CODE(dp)) { 2430 case CCP_CONFREQ: 2431 2432 /* A ConfReq starts negotiation of compression 2433 * in one direction of transmission, 2434 * and hence brings it down...but which way? 2435 * 2436 * Remember: 2437 * A ConfReq indicates what the sender would like to receive 2438 */ 2439 if(inbound) 2440 /* He is proposing what I should send */ 2441 ppp->xstate &= ~SC_COMP_RUN; 2442 else 2443 /* I am proposing to what he should send */ 2444 ppp->rstate &= ~SC_DECOMP_RUN; 2445 2446 break; 2447 2448 case CCP_TERMREQ: 2449 case CCP_TERMACK: 2450 /* 2451 * CCP is going down, both directions of transmission 2452 */ 2453 ppp->rstate &= ~SC_DECOMP_RUN; 2454 ppp->xstate &= ~SC_COMP_RUN; 2455 break; 2456 2457 case CCP_CONFACK: 2458 if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN) 2459 break; 2460 len = CCP_LENGTH(dp); 2461 if (!pskb_may_pull(skb, len + 2)) 2462 return; /* too short */ 2463 dp += CCP_HDRLEN; 2464 len -= CCP_HDRLEN; 2465 if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp)) 2466 break; 2467 if (inbound) { 2468 /* we will start receiving compressed packets */ 2469 if (!ppp->rc_state) 2470 break; 2471 if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len, 2472 ppp->file.index, 0, ppp->mru, ppp->debug)) { 2473 ppp->rstate |= SC_DECOMP_RUN; 2474 ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR); 2475 } 2476 } else { 2477 /* we will soon start sending compressed packets */ 2478 if (!ppp->xc_state) 2479 break; 2480 if (ppp->xcomp->comp_init(ppp->xc_state, dp, len, 2481 ppp->file.index, 0, ppp->debug)) 2482 ppp->xstate |= SC_COMP_RUN; 2483 } 2484 break; 2485 2486 case CCP_RESETACK: 2487 /* reset the [de]compressor */ 2488 if ((ppp->flags & SC_CCP_UP) == 0) 2489 break; 2490 if (inbound) { 2491 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) { 2492 ppp->rcomp->decomp_reset(ppp->rc_state); 2493 ppp->rstate &= ~SC_DC_ERROR; 2494 } 2495 } else { 2496 if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN)) 2497 ppp->xcomp->comp_reset(ppp->xc_state); 2498 } 2499 break; 2500 } 2501 } 2502 2503 /* Free up compression resources. */ 2504 static void 2505 ppp_ccp_closed(struct ppp *ppp) 2506 { 2507 void *xstate, *rstate; 2508 struct compressor *xcomp, *rcomp; 2509 2510 ppp_lock(ppp); 2511 ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP); 2512 ppp->xstate = 0; 2513 xcomp = ppp->xcomp; 2514 xstate = ppp->xc_state; 2515 ppp->xc_state = NULL; 2516 ppp->rstate = 0; 2517 rcomp = ppp->rcomp; 2518 rstate = ppp->rc_state; 2519 ppp->rc_state = NULL; 2520 ppp_unlock(ppp); 2521 2522 if (xstate) { 2523 xcomp->comp_free(xstate); 2524 module_put(xcomp->owner); 2525 } 2526 if (rstate) { 2527 rcomp->decomp_free(rstate); 2528 module_put(rcomp->owner); 2529 } 2530 } 2531 2532 /* List of compressors. */ 2533 static LIST_HEAD(compressor_list); 2534 static DEFINE_SPINLOCK(compressor_list_lock); 2535 2536 struct compressor_entry { 2537 struct list_head list; 2538 struct compressor *comp; 2539 }; 2540 2541 static struct compressor_entry * 2542 find_comp_entry(int proto) 2543 { 2544 struct compressor_entry *ce; 2545 2546 list_for_each_entry(ce, &compressor_list, list) { 2547 if (ce->comp->compress_proto == proto) 2548 return ce; 2549 } 2550 return NULL; 2551 } 2552 2553 /* Register a compressor */ 2554 int 2555 ppp_register_compressor(struct compressor *cp) 2556 { 2557 struct compressor_entry *ce; 2558 int ret; 2559 spin_lock(&compressor_list_lock); 2560 ret = -EEXIST; 2561 if (find_comp_entry(cp->compress_proto)) 2562 goto out; 2563 ret = -ENOMEM; 2564 ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); 2565 if (!ce) 2566 goto out; 2567 ret = 0; 2568 ce->comp = cp; 2569 list_add(&ce->list, &compressor_list); 2570 out: 2571 spin_unlock(&compressor_list_lock); 2572 return ret; 2573 } 2574 2575 /* Unregister a compressor */ 2576 void 2577 ppp_unregister_compressor(struct compressor *cp) 2578 { 2579 struct compressor_entry *ce; 2580 2581 spin_lock(&compressor_list_lock); 2582 ce = find_comp_entry(cp->compress_proto); 2583 if (ce && ce->comp == cp) { 2584 list_del(&ce->list); 2585 kfree(ce); 2586 } 2587 spin_unlock(&compressor_list_lock); 2588 } 2589 2590 /* Find a compressor. */ 2591 static struct compressor * 2592 find_compressor(int type) 2593 { 2594 struct compressor_entry *ce; 2595 struct compressor *cp = NULL; 2596 2597 spin_lock(&compressor_list_lock); 2598 ce = find_comp_entry(type); 2599 if (ce) { 2600 cp = ce->comp; 2601 if (!try_module_get(cp->owner)) 2602 cp = NULL; 2603 } 2604 spin_unlock(&compressor_list_lock); 2605 return cp; 2606 } 2607 2608 /* 2609 * Miscelleneous stuff. 2610 */ 2611 2612 static void 2613 ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) 2614 { 2615 struct slcompress *vj = ppp->vj; 2616 2617 memset(st, 0, sizeof(*st)); 2618 st->p.ppp_ipackets = ppp->stats64.rx_packets; 2619 st->p.ppp_ierrors = ppp->dev->stats.rx_errors; 2620 st->p.ppp_ibytes = ppp->stats64.rx_bytes; 2621 st->p.ppp_opackets = ppp->stats64.tx_packets; 2622 st->p.ppp_oerrors = ppp->dev->stats.tx_errors; 2623 st->p.ppp_obytes = ppp->stats64.tx_bytes; 2624 if (!vj) 2625 return; 2626 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; 2627 st->vj.vjs_compressed = vj->sls_o_compressed; 2628 st->vj.vjs_searches = vj->sls_o_searches; 2629 st->vj.vjs_misses = vj->sls_o_misses; 2630 st->vj.vjs_errorin = vj->sls_i_error; 2631 st->vj.vjs_tossed = vj->sls_i_tossed; 2632 st->vj.vjs_uncompressedin = vj->sls_i_uncompressed; 2633 st->vj.vjs_compressedin = vj->sls_i_compressed; 2634 } 2635 2636 /* 2637 * Stuff for handling the lists of ppp units and channels 2638 * and for initialization. 2639 */ 2640 2641 /* 2642 * Create a new ppp interface unit. Fails if it can't allocate memory 2643 * or if there is already a unit with the requested number. 2644 * unit == -1 means allocate a new number. 2645 */ 2646 static struct ppp * 2647 ppp_create_interface(struct net *net, int unit, int *retp) 2648 { 2649 struct ppp *ppp; 2650 struct ppp_net *pn; 2651 struct net_device *dev = NULL; 2652 int ret = -ENOMEM; 2653 int i; 2654 2655 dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup); 2656 if (!dev) 2657 goto out1; 2658 2659 pn = ppp_pernet(net); 2660 2661 ppp = netdev_priv(dev); 2662 ppp->dev = dev; 2663 ppp->mru = PPP_MRU; 2664 init_ppp_file(&ppp->file, INTERFACE); 2665 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 2666 for (i = 0; i < NUM_NP; ++i) 2667 ppp->npmode[i] = NPMODE_PASS; 2668 INIT_LIST_HEAD(&ppp->channels); 2669 spin_lock_init(&ppp->rlock); 2670 spin_lock_init(&ppp->wlock); 2671 #ifdef CONFIG_PPP_MULTILINK 2672 ppp->minseq = -1; 2673 skb_queue_head_init(&ppp->mrq); 2674 #endif /* CONFIG_PPP_MULTILINK */ 2675 2676 /* 2677 * drum roll: don't forget to set 2678 * the net device is belong to 2679 */ 2680 dev_net_set(dev, net); 2681 2682 mutex_lock(&pn->all_ppp_mutex); 2683 2684 if (unit < 0) { 2685 unit = unit_get(&pn->units_idr, ppp); 2686 if (unit < 0) { 2687 ret = unit; 2688 goto out2; 2689 } 2690 } else { 2691 ret = -EEXIST; 2692 if (unit_find(&pn->units_idr, unit)) 2693 goto out2; /* unit already exists */ 2694 /* 2695 * if caller need a specified unit number 2696 * lets try to satisfy him, otherwise -- 2697 * he should better ask us for new unit number 2698 * 2699 * NOTE: yes I know that returning EEXIST it's not 2700 * fair but at least pppd will ask us to allocate 2701 * new unit in this case so user is happy :) 2702 */ 2703 unit = unit_set(&pn->units_idr, ppp, unit); 2704 if (unit < 0) 2705 goto out2; 2706 } 2707 2708 /* Initialize the new ppp unit */ 2709 ppp->file.index = unit; 2710 sprintf(dev->name, "ppp%d", unit); 2711 2712 ret = register_netdev(dev); 2713 if (ret != 0) { 2714 unit_put(&pn->units_idr, unit); 2715 netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", 2716 dev->name, ret); 2717 goto out2; 2718 } 2719 2720 ppp->ppp_net = net; 2721 2722 atomic_inc(&ppp_unit_count); 2723 mutex_unlock(&pn->all_ppp_mutex); 2724 2725 *retp = 0; 2726 return ppp; 2727 2728 out2: 2729 mutex_unlock(&pn->all_ppp_mutex); 2730 free_netdev(dev); 2731 out1: 2732 *retp = ret; 2733 return NULL; 2734 } 2735 2736 /* 2737 * Initialize a ppp_file structure. 2738 */ 2739 static void 2740 init_ppp_file(struct ppp_file *pf, int kind) 2741 { 2742 pf->kind = kind; 2743 skb_queue_head_init(&pf->xq); 2744 skb_queue_head_init(&pf->rq); 2745 atomic_set(&pf->refcnt, 1); 2746 init_waitqueue_head(&pf->rwait); 2747 } 2748 2749 /* 2750 * Take down a ppp interface unit - called when the owning file 2751 * (the one that created the unit) is closed or detached. 2752 */ 2753 static void ppp_shutdown_interface(struct ppp *ppp) 2754 { 2755 struct ppp_net *pn; 2756 2757 pn = ppp_pernet(ppp->ppp_net); 2758 mutex_lock(&pn->all_ppp_mutex); 2759 2760 /* This will call dev_close() for us. */ 2761 ppp_lock(ppp); 2762 if (!ppp->closing) { 2763 ppp->closing = 1; 2764 ppp_unlock(ppp); 2765 unregister_netdev(ppp->dev); 2766 unit_put(&pn->units_idr, ppp->file.index); 2767 } else 2768 ppp_unlock(ppp); 2769 2770 ppp->file.dead = 1; 2771 ppp->owner = NULL; 2772 wake_up_interruptible(&ppp->file.rwait); 2773 2774 mutex_unlock(&pn->all_ppp_mutex); 2775 } 2776 2777 /* 2778 * Free the memory used by a ppp unit. This is only called once 2779 * there are no channels connected to the unit and no file structs 2780 * that reference the unit. 2781 */ 2782 static void ppp_destroy_interface(struct ppp *ppp) 2783 { 2784 atomic_dec(&ppp_unit_count); 2785 2786 if (!ppp->file.dead || ppp->n_channels) { 2787 /* "can't happen" */ 2788 netdev_err(ppp->dev, "ppp: destroying ppp struct %p " 2789 "but dead=%d n_channels=%d !\n", 2790 ppp, ppp->file.dead, ppp->n_channels); 2791 return; 2792 } 2793 2794 ppp_ccp_closed(ppp); 2795 if (ppp->vj) { 2796 slhc_free(ppp->vj); 2797 ppp->vj = NULL; 2798 } 2799 skb_queue_purge(&ppp->file.xq); 2800 skb_queue_purge(&ppp->file.rq); 2801 #ifdef CONFIG_PPP_MULTILINK 2802 skb_queue_purge(&ppp->mrq); 2803 #endif /* CONFIG_PPP_MULTILINK */ 2804 #ifdef CONFIG_PPP_FILTER 2805 kfree(ppp->pass_filter); 2806 ppp->pass_filter = NULL; 2807 kfree(ppp->active_filter); 2808 ppp->active_filter = NULL; 2809 #endif /* CONFIG_PPP_FILTER */ 2810 2811 kfree_skb(ppp->xmit_pending); 2812 2813 free_netdev(ppp->dev); 2814 } 2815 2816 /* 2817 * Locate an existing ppp unit. 2818 * The caller should have locked the all_ppp_mutex. 2819 */ 2820 static struct ppp * 2821 ppp_find_unit(struct ppp_net *pn, int unit) 2822 { 2823 return unit_find(&pn->units_idr, unit); 2824 } 2825 2826 /* 2827 * Locate an existing ppp channel. 2828 * The caller should have locked the all_channels_lock. 2829 * First we look in the new_channels list, then in the 2830 * all_channels list. If found in the new_channels list, 2831 * we move it to the all_channels list. This is for speed 2832 * when we have a lot of channels in use. 2833 */ 2834 static struct channel * 2835 ppp_find_channel(struct ppp_net *pn, int unit) 2836 { 2837 struct channel *pch; 2838 2839 list_for_each_entry(pch, &pn->new_channels, list) { 2840 if (pch->file.index == unit) { 2841 list_move(&pch->list, &pn->all_channels); 2842 return pch; 2843 } 2844 } 2845 2846 list_for_each_entry(pch, &pn->all_channels, list) { 2847 if (pch->file.index == unit) 2848 return pch; 2849 } 2850 2851 return NULL; 2852 } 2853 2854 /* 2855 * Connect a PPP channel to a PPP interface unit. 2856 */ 2857 static int 2858 ppp_connect_channel(struct channel *pch, int unit) 2859 { 2860 struct ppp *ppp; 2861 struct ppp_net *pn; 2862 int ret = -ENXIO; 2863 int hdrlen; 2864 2865 pn = ppp_pernet(pch->chan_net); 2866 2867 mutex_lock(&pn->all_ppp_mutex); 2868 ppp = ppp_find_unit(pn, unit); 2869 if (!ppp) 2870 goto out; 2871 write_lock_bh(&pch->upl); 2872 ret = -EINVAL; 2873 if (pch->ppp) 2874 goto outl; 2875 2876 ppp_lock(ppp); 2877 if (pch->file.hdrlen > ppp->file.hdrlen) 2878 ppp->file.hdrlen = pch->file.hdrlen; 2879 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ 2880 if (hdrlen > ppp->dev->hard_header_len) 2881 ppp->dev->hard_header_len = hdrlen; 2882 list_add_tail(&pch->clist, &ppp->channels); 2883 ++ppp->n_channels; 2884 pch->ppp = ppp; 2885 atomic_inc(&ppp->file.refcnt); 2886 ppp_unlock(ppp); 2887 ret = 0; 2888 2889 outl: 2890 write_unlock_bh(&pch->upl); 2891 out: 2892 mutex_unlock(&pn->all_ppp_mutex); 2893 return ret; 2894 } 2895 2896 /* 2897 * Disconnect a channel from its ppp unit. 2898 */ 2899 static int 2900 ppp_disconnect_channel(struct channel *pch) 2901 { 2902 struct ppp *ppp; 2903 int err = -EINVAL; 2904 2905 write_lock_bh(&pch->upl); 2906 ppp = pch->ppp; 2907 pch->ppp = NULL; 2908 write_unlock_bh(&pch->upl); 2909 if (ppp) { 2910 /* remove it from the ppp unit's list */ 2911 ppp_lock(ppp); 2912 list_del(&pch->clist); 2913 if (--ppp->n_channels == 0) 2914 wake_up_interruptible(&ppp->file.rwait); 2915 ppp_unlock(ppp); 2916 if (atomic_dec_and_test(&ppp->file.refcnt)) 2917 ppp_destroy_interface(ppp); 2918 err = 0; 2919 } 2920 return err; 2921 } 2922 2923 /* 2924 * Free up the resources used by a ppp channel. 2925 */ 2926 static void ppp_destroy_channel(struct channel *pch) 2927 { 2928 atomic_dec(&channel_count); 2929 2930 if (!pch->file.dead) { 2931 /* "can't happen" */ 2932 pr_err("ppp: destroying undead channel %p !\n", pch); 2933 return; 2934 } 2935 skb_queue_purge(&pch->file.xq); 2936 skb_queue_purge(&pch->file.rq); 2937 kfree(pch); 2938 } 2939 2940 static void __exit ppp_cleanup(void) 2941 { 2942 /* should never happen */ 2943 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) 2944 pr_err("PPP: removing module but units remain!\n"); 2945 unregister_chrdev(PPP_MAJOR, "ppp"); 2946 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); 2947 class_destroy(ppp_class); 2948 unregister_pernet_device(&ppp_net_ops); 2949 } 2950 2951 /* 2952 * Units handling. Caller must protect concurrent access 2953 * by holding all_ppp_mutex 2954 */ 2955 2956 /* associate pointer with specified number */ 2957 static int unit_set(struct idr *p, void *ptr, int n) 2958 { 2959 int unit; 2960 2961 unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL); 2962 if (unit == -ENOSPC) 2963 unit = -EINVAL; 2964 return unit; 2965 } 2966 2967 /* get new free unit number and associate pointer with it */ 2968 static int unit_get(struct idr *p, void *ptr) 2969 { 2970 return idr_alloc(p, ptr, 0, 0, GFP_KERNEL); 2971 } 2972 2973 /* put unit number back to a pool */ 2974 static void unit_put(struct idr *p, int n) 2975 { 2976 idr_remove(p, n); 2977 } 2978 2979 /* get pointer associated with the number */ 2980 static void *unit_find(struct idr *p, int n) 2981 { 2982 return idr_find(p, n); 2983 } 2984 2985 /* Module/initialization stuff */ 2986 2987 module_init(ppp_init); 2988 module_exit(ppp_cleanup); 2989 2990 EXPORT_SYMBOL(ppp_register_net_channel); 2991 EXPORT_SYMBOL(ppp_register_channel); 2992 EXPORT_SYMBOL(ppp_unregister_channel); 2993 EXPORT_SYMBOL(ppp_channel_index); 2994 EXPORT_SYMBOL(ppp_unit_number); 2995 EXPORT_SYMBOL(ppp_dev_name); 2996 EXPORT_SYMBOL(ppp_input); 2997 EXPORT_SYMBOL(ppp_input_error); 2998 EXPORT_SYMBOL(ppp_output_wakeup); 2999 EXPORT_SYMBOL(ppp_register_compressor); 3000 EXPORT_SYMBOL(ppp_unregister_compressor); 3001 MODULE_LICENSE("GPL"); 3002 MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); 3003 MODULE_ALIAS("devname:ppp"); 3004