1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Generic PPP layer for Linux. 4 * 5 * Copyright 1999-2002 Paul Mackerras. 6 * 7 * The generic PPP layer handles the PPP network interfaces, the 8 * /dev/ppp device, packet and VJ compression, and multilink. 9 * It talks to PPP `channels' via the interface defined in 10 * include/linux/ppp_channel.h. Channels provide the basic means for 11 * sending and receiving PPP frames on some kind of communications 12 * channel. 13 * 14 * Part of the code in this driver was inspired by the old async-only 15 * PPP driver, written by Michael Callahan and Al Longyear, and 16 * subsequently hacked by Paul Mackerras. 17 * 18 * ==FILEVERSION 20041108== 19 */ 20 21 #include <linux/module.h> 22 #include <linux/kernel.h> 23 #include <linux/sched/signal.h> 24 #include <linux/kmod.h> 25 #include <linux/init.h> 26 #include <linux/list.h> 27 #include <linux/idr.h> 28 #include <linux/netdevice.h> 29 #include <linux/poll.h> 30 #include <linux/ppp_defs.h> 31 #include <linux/filter.h> 32 #include <linux/ppp-ioctl.h> 33 #include <linux/ppp_channel.h> 34 #include <linux/ppp-comp.h> 35 #include <linux/skbuff.h> 36 #include <linux/rtnetlink.h> 37 #include <linux/if_arp.h> 38 #include <linux/ip.h> 39 #include <linux/tcp.h> 40 #include <linux/spinlock.h> 41 #include <linux/rwsem.h> 42 #include <linux/stddef.h> 43 #include <linux/device.h> 44 #include <linux/mutex.h> 45 #include <linux/slab.h> 46 #include <linux/file.h> 47 #include <linux/unaligned.h> 48 #include <net/netdev_lock.h> 49 #include <net/slhc_vj.h> 50 #include <linux/atomic.h> 51 #include <linux/refcount.h> 52 53 #include <linux/nsproxy.h> 54 #include <net/net_namespace.h> 55 #include <net/netns/generic.h> 56 57 #define PPP_VERSION "2.4.2" 58 59 /* 60 * Network protocols we support. 61 */ 62 #define NP_IP 0 /* Internet Protocol V4 */ 63 #define NP_IPV6 1 /* Internet Protocol V6 */ 64 #define NP_IPX 2 /* IPX protocol */ 65 #define NP_AT 3 /* Appletalk protocol */ 66 #define NP_MPLS_UC 4 /* MPLS unicast */ 67 #define NP_MPLS_MC 5 /* MPLS multicast */ 68 #define NUM_NP 6 /* Number of NPs. */ 69 70 #define MPHDRLEN 6 /* multilink protocol header length */ 71 #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ 72 73 #define PPP_PROTO_LEN 2 74 #define PPP_LCP_HDRLEN 4 75 76 /* The filter instructions generated by libpcap are constructed 77 * assuming a four-byte PPP header on each packet, where the last 78 * 2 bytes are the protocol field defined in the RFC and the first 79 * byte of the first 2 bytes indicates the direction. 80 * The second byte is currently unused, but we still need to initialize 81 * it to prevent crafted BPF programs from reading them which would 82 * cause reading of uninitialized data. 83 */ 84 #define PPP_FILTER_OUTBOUND_TAG 0x0100 85 #define PPP_FILTER_INBOUND_TAG 0x0000 86 87 /* 88 * An instance of /dev/ppp can be associated with either a ppp 89 * interface unit or a ppp channel. In both cases, file->private_data 90 * points to one of these. 91 */ 92 struct ppp_file { 93 enum { 94 INTERFACE=1, CHANNEL 95 } kind; 96 struct sk_buff_head xq; /* pppd transmit queue */ 97 struct sk_buff_head rq; /* receive queue for pppd */ 98 wait_queue_head_t rwait; /* for poll on reading /dev/ppp */ 99 refcount_t refcnt; /* # refs (incl /dev/ppp attached) */ 100 int hdrlen; /* space to leave for headers */ 101 int index; /* interface unit / channel number */ 102 int dead; /* unit/channel has been shut down */ 103 }; 104 105 #define PF_TO_X(pf, X) container_of(pf, X, file) 106 107 #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) 108 #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) 109 110 /* 111 * Data structure to hold primary network stats for which 112 * we want to use 64 bit storage. Other network stats 113 * are stored in dev->stats of the ppp strucute. 114 */ 115 struct ppp_link_stats { 116 u64 rx_packets; 117 u64 tx_packets; 118 u64 rx_bytes; 119 u64 tx_bytes; 120 }; 121 122 /* 123 * Data structure describing one ppp unit. 124 * A ppp unit corresponds to a ppp network interface device 125 * and represents a multilink bundle. 126 * It can have 0 or more ppp channels connected to it. 127 */ 128 struct ppp { 129 struct ppp_file file; /* stuff for read/write/poll 0 */ 130 struct file *owner; /* file that owns this unit 48 */ 131 struct list_head channels; /* list of attached channels 4c */ 132 int n_channels; /* how many channels are attached 54 */ 133 spinlock_t rlock; /* lock for receive side 58 */ 134 spinlock_t wlock; /* lock for transmit side 5c */ 135 int __percpu *xmit_recursion; /* xmit recursion detect */ 136 int mru; /* max receive unit 60 */ 137 unsigned int flags; /* control bits 64 */ 138 unsigned int xstate; /* transmit state bits 68 */ 139 unsigned int rstate; /* receive state bits 6c */ 140 int debug; /* debug flags 70 */ 141 struct slcompress *vj; /* state for VJ header compression */ 142 enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */ 143 struct sk_buff *xmit_pending; /* a packet ready to go out 88 */ 144 struct compressor *xcomp; /* transmit packet compressor 8c */ 145 void *xc_state; /* its internal state 90 */ 146 struct compressor *rcomp; /* receive decompressor 94 */ 147 void *rc_state; /* its internal state 98 */ 148 unsigned long last_xmit; /* jiffies when last pkt sent 9c */ 149 unsigned long last_recv; /* jiffies when last pkt rcvd a0 */ 150 struct net_device *dev; /* network interface device a4 */ 151 int closing; /* is device closing down? a8 */ 152 #ifdef CONFIG_PPP_MULTILINK 153 int nxchan; /* next channel to send something on */ 154 u32 nxseq; /* next sequence number to send */ 155 int mrru; /* MP: max reconst. receive unit */ 156 u32 nextseq; /* MP: seq no of next packet */ 157 u32 minseq; /* MP: min of most recent seqnos */ 158 struct sk_buff_head mrq; /* MP: receive reconstruction queue */ 159 #endif /* CONFIG_PPP_MULTILINK */ 160 #ifdef CONFIG_PPP_FILTER 161 struct bpf_prog *pass_filter; /* filter for packets to pass */ 162 struct bpf_prog *active_filter; /* filter for pkts to reset idle */ 163 #endif /* CONFIG_PPP_FILTER */ 164 struct net *ppp_net; /* the net we belong to */ 165 struct ppp_link_stats stats64; /* 64 bit network stats */ 166 }; 167 168 /* 169 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC, 170 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP, 171 * SC_MUST_COMP 172 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR. 173 * Bits in xstate: SC_COMP_RUN 174 */ 175 #define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \ 176 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \ 177 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP) 178 179 /* 180 * Private data structure for each channel. 181 * This includes the data structure used for multilink. 182 */ 183 struct channel { 184 struct ppp_file file; /* stuff for read/write/poll */ 185 struct list_head list; /* link in all/new_channels list */ 186 struct ppp_channel *chan; /* public channel data structure */ 187 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ 188 spinlock_t downl; /* protects `chan', file.xq dequeue */ 189 struct ppp *ppp; /* ppp unit we're connected to */ 190 struct net *chan_net; /* the net channel belongs to */ 191 netns_tracker ns_tracker; 192 struct list_head clist; /* link in list of channels per unit */ 193 rwlock_t upl; /* protects `ppp' and 'bridge' */ 194 struct channel __rcu *bridge; /* "bridged" ppp channel */ 195 #ifdef CONFIG_PPP_MULTILINK 196 u8 avail; /* flag used in multilink stuff */ 197 u8 had_frag; /* >= 1 fragments have been sent */ 198 u32 lastseq; /* MP: last sequence # received */ 199 int speed; /* speed of the corresponding ppp channel*/ 200 #endif /* CONFIG_PPP_MULTILINK */ 201 }; 202 203 struct ppp_config { 204 struct file *file; 205 s32 unit; 206 bool ifname_is_set; 207 }; 208 209 /* 210 * SMP locking issues: 211 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels 212 * list and the ppp.n_channels field, you need to take both locks 213 * before you modify them. 214 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock -> 215 * channel.downl. 216 */ 217 218 static DEFINE_MUTEX(ppp_mutex); 219 static atomic_t ppp_unit_count = ATOMIC_INIT(0); 220 static atomic_t channel_count = ATOMIC_INIT(0); 221 222 /* per-net private data for this module */ 223 static unsigned int ppp_net_id __read_mostly; 224 struct ppp_net { 225 /* units to ppp mapping */ 226 struct idr units_idr; 227 228 /* 229 * all_ppp_mutex protects the units_idr mapping. 230 * It also ensures that finding a ppp unit in the units_idr 231 * map and updating its file.refcnt field is atomic. 232 */ 233 struct mutex all_ppp_mutex; 234 235 /* channels */ 236 struct list_head all_channels; 237 struct list_head new_channels; 238 int last_channel_index; 239 240 /* 241 * all_channels_lock protects all_channels and 242 * last_channel_index, and the atomicity of find 243 * a channel and updating its file.refcnt field. 244 */ 245 spinlock_t all_channels_lock; 246 }; 247 248 /* Get the PPP protocol number from a skb */ 249 #define PPP_PROTO(skb) get_unaligned_be16((skb)->data) 250 251 /* We limit the length of ppp->file.rq to this (arbitrary) value */ 252 #define PPP_MAX_RQLEN 32 253 254 /* 255 * Maximum number of multilink fragments queued up. 256 * This has to be large enough to cope with the maximum latency of 257 * the slowest channel relative to the others. Strictly it should 258 * depend on the number of channels and their characteristics. 259 */ 260 #define PPP_MP_MAX_QLEN 128 261 262 /* Multilink header bits. */ 263 #define B 0x80 /* this fragment begins a packet */ 264 #define E 0x40 /* this fragment ends a packet */ 265 266 /* Compare multilink sequence numbers (assumed to be 32 bits wide) */ 267 #define seq_before(a, b) ((s32)((a) - (b)) < 0) 268 #define seq_after(a, b) ((s32)((a) - (b)) > 0) 269 270 /* Prototypes. */ 271 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 272 struct file *file, unsigned int cmd, unsigned long arg); 273 static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb); 274 static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 275 static void ppp_push(struct ppp *ppp); 276 static void ppp_channel_push(struct channel *pch); 277 static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, 278 struct channel *pch); 279 static void ppp_receive_error(struct ppp *ppp); 280 static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb); 281 static struct sk_buff *ppp_decompress_frame(struct ppp *ppp, 282 struct sk_buff *skb); 283 #ifdef CONFIG_PPP_MULTILINK 284 static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, 285 struct channel *pch); 286 static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb); 287 static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp); 288 static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb); 289 #endif /* CONFIG_PPP_MULTILINK */ 290 static int ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data); 291 static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); 292 static void ppp_ccp_closed(struct ppp *ppp); 293 static struct compressor *find_compressor(int type); 294 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 295 static int ppp_create_interface(struct net *net, struct file *file, int *unit); 296 static void init_ppp_file(struct ppp_file *pf, int kind); 297 static void ppp_destroy_interface(struct ppp *ppp); 298 static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); 299 static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); 300 static int ppp_connect_channel(struct channel *pch, int unit); 301 static int ppp_disconnect_channel(struct channel *pch); 302 static void ppp_destroy_channel(struct channel *pch); 303 static int unit_get(struct idr *p, void *ptr, int min); 304 static int unit_set(struct idr *p, void *ptr, int n); 305 static void unit_put(struct idr *p, int n); 306 static void *unit_find(struct idr *p, int n); 307 static void ppp_setup(struct net_device *dev); 308 309 static const struct net_device_ops ppp_netdev_ops; 310 311 static const struct class ppp_class = { 312 .name = "ppp", 313 }; 314 315 /* per net-namespace data */ 316 static inline struct ppp_net *ppp_pernet(struct net *net) 317 { 318 return net_generic(net, ppp_net_id); 319 } 320 321 /* Translates a PPP protocol number to a NP index (NP == network protocol) */ 322 static inline int proto_to_npindex(int proto) 323 { 324 switch (proto) { 325 case PPP_IP: 326 return NP_IP; 327 case PPP_IPV6: 328 return NP_IPV6; 329 case PPP_IPX: 330 return NP_IPX; 331 case PPP_AT: 332 return NP_AT; 333 case PPP_MPLS_UC: 334 return NP_MPLS_UC; 335 case PPP_MPLS_MC: 336 return NP_MPLS_MC; 337 } 338 return -EINVAL; 339 } 340 341 /* Translates an NP index into a PPP protocol number */ 342 static const int npindex_to_proto[NUM_NP] = { 343 PPP_IP, 344 PPP_IPV6, 345 PPP_IPX, 346 PPP_AT, 347 PPP_MPLS_UC, 348 PPP_MPLS_MC, 349 }; 350 351 /* Translates an ethertype into an NP index */ 352 static inline int ethertype_to_npindex(int ethertype) 353 { 354 switch (ethertype) { 355 case ETH_P_IP: 356 return NP_IP; 357 case ETH_P_IPV6: 358 return NP_IPV6; 359 case ETH_P_IPX: 360 return NP_IPX; 361 case ETH_P_PPPTALK: 362 case ETH_P_ATALK: 363 return NP_AT; 364 case ETH_P_MPLS_UC: 365 return NP_MPLS_UC; 366 case ETH_P_MPLS_MC: 367 return NP_MPLS_MC; 368 } 369 return -1; 370 } 371 372 /* Translates an NP index into an ethertype */ 373 static const int npindex_to_ethertype[NUM_NP] = { 374 ETH_P_IP, 375 ETH_P_IPV6, 376 ETH_P_IPX, 377 ETH_P_PPPTALK, 378 ETH_P_MPLS_UC, 379 ETH_P_MPLS_MC, 380 }; 381 382 /* 383 * Locking shorthand. 384 */ 385 #define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock) 386 #define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock) 387 #define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock) 388 #define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock) 389 #define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \ 390 ppp_recv_lock(ppp); } while (0) 391 #define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \ 392 ppp_xmit_unlock(ppp); } while (0) 393 394 /* 395 * /dev/ppp device routines. 396 * The /dev/ppp device is used by pppd to control the ppp unit. 397 * It supports the read, write, ioctl and poll functions. 398 * Open instances of /dev/ppp can be in one of three states: 399 * unattached, attached to a ppp unit, or attached to a ppp channel. 400 */ 401 static int ppp_open(struct inode *inode, struct file *file) 402 { 403 /* 404 * This could (should?) be enforced by the permissions on /dev/ppp. 405 */ 406 if (!ns_capable(file->f_cred->user_ns, CAP_NET_ADMIN)) 407 return -EPERM; 408 return 0; 409 } 410 411 static int ppp_release(struct inode *unused, struct file *file) 412 { 413 struct ppp_file *pf = file->private_data; 414 struct ppp *ppp; 415 416 if (pf) { 417 file->private_data = NULL; 418 if (pf->kind == INTERFACE) { 419 ppp = PF_TO_PPP(pf); 420 rtnl_lock(); 421 if (file == ppp->owner) 422 unregister_netdevice(ppp->dev); 423 rtnl_unlock(); 424 } 425 if (refcount_dec_and_test(&pf->refcnt)) { 426 switch (pf->kind) { 427 case INTERFACE: 428 ppp_destroy_interface(PF_TO_PPP(pf)); 429 break; 430 case CHANNEL: 431 ppp_destroy_channel(PF_TO_CHANNEL(pf)); 432 break; 433 } 434 } 435 } 436 return 0; 437 } 438 439 static ssize_t ppp_read(struct file *file, char __user *buf, 440 size_t count, loff_t *ppos) 441 { 442 struct ppp_file *pf = file->private_data; 443 DECLARE_WAITQUEUE(wait, current); 444 ssize_t ret; 445 struct sk_buff *skb = NULL; 446 struct iovec iov; 447 struct iov_iter to; 448 449 ret = count; 450 451 if (!pf) 452 return -ENXIO; 453 add_wait_queue(&pf->rwait, &wait); 454 for (;;) { 455 set_current_state(TASK_INTERRUPTIBLE); 456 skb = skb_dequeue(&pf->rq); 457 if (skb) 458 break; 459 ret = 0; 460 if (pf->dead) 461 break; 462 if (pf->kind == INTERFACE) { 463 /* 464 * Return 0 (EOF) on an interface that has no 465 * channels connected, unless it is looping 466 * network traffic (demand mode). 467 */ 468 struct ppp *ppp = PF_TO_PPP(pf); 469 470 ppp_recv_lock(ppp); 471 if (ppp->n_channels == 0 && 472 (ppp->flags & SC_LOOP_TRAFFIC) == 0) { 473 ppp_recv_unlock(ppp); 474 break; 475 } 476 ppp_recv_unlock(ppp); 477 } 478 ret = -EAGAIN; 479 if (file->f_flags & O_NONBLOCK) 480 break; 481 ret = -ERESTARTSYS; 482 if (signal_pending(current)) 483 break; 484 schedule(); 485 } 486 set_current_state(TASK_RUNNING); 487 remove_wait_queue(&pf->rwait, &wait); 488 489 if (!skb) 490 goto out; 491 492 ret = -EOVERFLOW; 493 if (skb->len > count) 494 goto outf; 495 ret = -EFAULT; 496 iov.iov_base = buf; 497 iov.iov_len = count; 498 iov_iter_init(&to, ITER_DEST, &iov, 1, count); 499 if (skb_copy_datagram_iter(skb, 0, &to, skb->len)) 500 goto outf; 501 ret = skb->len; 502 503 outf: 504 kfree_skb(skb); 505 out: 506 return ret; 507 } 508 509 static bool ppp_check_packet(struct sk_buff *skb, size_t count) 510 { 511 /* LCP packets must include LCP header which 4 bytes long: 512 * 1-byte code, 1-byte identifier, and 2-byte length. 513 */ 514 return get_unaligned_be16(skb->data) != PPP_LCP || 515 count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN; 516 } 517 518 static ssize_t ppp_write(struct file *file, const char __user *buf, 519 size_t count, loff_t *ppos) 520 { 521 struct ppp_file *pf = file->private_data; 522 struct sk_buff *skb; 523 ssize_t ret; 524 525 if (!pf) 526 return -ENXIO; 527 /* All PPP packets should start with the 2-byte protocol */ 528 if (count < PPP_PROTO_LEN) 529 return -EINVAL; 530 ret = -ENOMEM; 531 skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); 532 if (!skb) 533 goto out; 534 skb_reserve(skb, pf->hdrlen); 535 ret = -EFAULT; 536 if (copy_from_user(skb_put(skb, count), buf, count)) { 537 kfree_skb(skb); 538 goto out; 539 } 540 ret = -EINVAL; 541 if (unlikely(!ppp_check_packet(skb, count))) { 542 kfree_skb(skb); 543 goto out; 544 } 545 546 switch (pf->kind) { 547 case INTERFACE: 548 ppp_xmit_process(PF_TO_PPP(pf), skb); 549 break; 550 case CHANNEL: 551 skb_queue_tail(&pf->xq, skb); 552 ppp_channel_push(PF_TO_CHANNEL(pf)); 553 break; 554 } 555 556 ret = count; 557 558 out: 559 return ret; 560 } 561 562 /* No kernel lock - fine */ 563 static __poll_t ppp_poll(struct file *file, poll_table *wait) 564 { 565 struct ppp_file *pf = file->private_data; 566 __poll_t mask; 567 568 if (!pf) 569 return 0; 570 poll_wait(file, &pf->rwait, wait); 571 mask = EPOLLOUT | EPOLLWRNORM; 572 if (skb_peek(&pf->rq)) 573 mask |= EPOLLIN | EPOLLRDNORM; 574 if (pf->dead) 575 mask |= EPOLLHUP; 576 else if (pf->kind == INTERFACE) { 577 /* see comment in ppp_read */ 578 struct ppp *ppp = PF_TO_PPP(pf); 579 580 ppp_recv_lock(ppp); 581 if (ppp->n_channels == 0 && 582 (ppp->flags & SC_LOOP_TRAFFIC) == 0) 583 mask |= EPOLLIN | EPOLLRDNORM; 584 ppp_recv_unlock(ppp); 585 } 586 587 return mask; 588 } 589 590 #ifdef CONFIG_PPP_FILTER 591 static struct bpf_prog *get_filter(struct sock_fprog *uprog) 592 { 593 struct sock_fprog_kern fprog; 594 struct bpf_prog *res = NULL; 595 int err; 596 597 if (!uprog->len) 598 return NULL; 599 600 /* uprog->len is unsigned short, so no overflow here */ 601 fprog.len = uprog->len; 602 fprog.filter = memdup_array_user(uprog->filter, 603 uprog->len, sizeof(struct sock_filter)); 604 if (IS_ERR(fprog.filter)) 605 return ERR_CAST(fprog.filter); 606 607 err = bpf_prog_create(&res, &fprog); 608 kfree(fprog.filter); 609 610 return err ? ERR_PTR(err) : res; 611 } 612 613 static struct bpf_prog *ppp_get_filter(struct sock_fprog __user *p) 614 { 615 struct sock_fprog uprog; 616 617 if (copy_from_user(&uprog, p, sizeof(struct sock_fprog))) 618 return ERR_PTR(-EFAULT); 619 return get_filter(&uprog); 620 } 621 622 #ifdef CONFIG_COMPAT 623 struct sock_fprog32 { 624 unsigned short len; 625 compat_caddr_t filter; 626 }; 627 628 #define PPPIOCSPASS32 _IOW('t', 71, struct sock_fprog32) 629 #define PPPIOCSACTIVE32 _IOW('t', 70, struct sock_fprog32) 630 631 static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p) 632 { 633 struct sock_fprog32 uprog32; 634 struct sock_fprog uprog; 635 636 if (copy_from_user(&uprog32, p, sizeof(struct sock_fprog32))) 637 return ERR_PTR(-EFAULT); 638 uprog.len = uprog32.len; 639 uprog.filter = compat_ptr(uprog32.filter); 640 return get_filter(&uprog); 641 } 642 #endif 643 #endif 644 645 /* Bridge one PPP channel to another. 646 * When two channels are bridged, ppp_input on one channel is redirected to 647 * the other's ops->start_xmit handler. 648 * In order to safely bridge channels we must reject channels which are already 649 * part of a bridge instance, or which form part of an existing unit. 650 * Once successfully bridged, each channel holds a reference on the other 651 * to prevent it being freed while the bridge is extant. 652 */ 653 static int ppp_bridge_channels(struct channel *pch, struct channel *pchb) 654 { 655 write_lock_bh(&pch->upl); 656 if (pch->ppp || 657 rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) { 658 write_unlock_bh(&pch->upl); 659 return -EALREADY; 660 } 661 refcount_inc(&pchb->file.refcnt); 662 rcu_assign_pointer(pch->bridge, pchb); 663 write_unlock_bh(&pch->upl); 664 665 write_lock_bh(&pchb->upl); 666 if (pchb->ppp || 667 rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl))) { 668 write_unlock_bh(&pchb->upl); 669 goto err_unset; 670 } 671 refcount_inc(&pch->file.refcnt); 672 rcu_assign_pointer(pchb->bridge, pch); 673 write_unlock_bh(&pchb->upl); 674 675 return 0; 676 677 err_unset: 678 write_lock_bh(&pch->upl); 679 /* Re-read pch->bridge with upl held in case it was modified concurrently */ 680 pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl)); 681 RCU_INIT_POINTER(pch->bridge, NULL); 682 write_unlock_bh(&pch->upl); 683 synchronize_rcu(); 684 685 if (pchb) 686 if (refcount_dec_and_test(&pchb->file.refcnt)) 687 ppp_destroy_channel(pchb); 688 689 return -EALREADY; 690 } 691 692 static int ppp_unbridge_channels(struct channel *pch) 693 { 694 struct channel *pchb, *pchbb; 695 696 write_lock_bh(&pch->upl); 697 pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl)); 698 if (!pchb) { 699 write_unlock_bh(&pch->upl); 700 return -EINVAL; 701 } 702 RCU_INIT_POINTER(pch->bridge, NULL); 703 write_unlock_bh(&pch->upl); 704 705 /* Only modify pchb if phcb->bridge points back to pch. 706 * If not, it implies that there has been a race unbridging (and possibly 707 * even rebridging) pchb. We should leave pchb alone to avoid either a 708 * refcount underflow, or breaking another established bridge instance. 709 */ 710 write_lock_bh(&pchb->upl); 711 pchbb = rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl)); 712 if (pchbb == pch) 713 RCU_INIT_POINTER(pchb->bridge, NULL); 714 write_unlock_bh(&pchb->upl); 715 716 synchronize_rcu(); 717 718 if (pchbb == pch) 719 if (refcount_dec_and_test(&pch->file.refcnt)) 720 ppp_destroy_channel(pch); 721 722 if (refcount_dec_and_test(&pchb->file.refcnt)) 723 ppp_destroy_channel(pchb); 724 725 return 0; 726 } 727 728 static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 729 { 730 struct ppp_file *pf; 731 struct ppp *ppp; 732 int err = -EFAULT, val, val2, i; 733 struct ppp_idle32 idle32; 734 struct ppp_idle64 idle64; 735 struct npioctl npi; 736 int unit, cflags; 737 struct slcompress *vj; 738 void __user *argp = (void __user *)arg; 739 int __user *p = argp; 740 741 mutex_lock(&ppp_mutex); 742 743 pf = file->private_data; 744 if (!pf) { 745 err = ppp_unattached_ioctl(current->nsproxy->net_ns, 746 pf, file, cmd, arg); 747 goto out; 748 } 749 750 if (cmd == PPPIOCDETACH) { 751 /* 752 * PPPIOCDETACH is no longer supported as it was heavily broken, 753 * and is only known to have been used by pppd older than 754 * ppp-2.4.2 (released November 2003). 755 */ 756 pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n", 757 current->comm, current->pid); 758 err = -EINVAL; 759 goto out; 760 } 761 762 if (pf->kind == CHANNEL) { 763 struct channel *pch, *pchb; 764 struct ppp_channel *chan; 765 struct ppp_net *pn; 766 767 pch = PF_TO_CHANNEL(pf); 768 769 switch (cmd) { 770 case PPPIOCCONNECT: 771 if (get_user(unit, p)) 772 break; 773 err = ppp_connect_channel(pch, unit); 774 break; 775 776 case PPPIOCDISCONN: 777 err = ppp_disconnect_channel(pch); 778 break; 779 780 case PPPIOCBRIDGECHAN: 781 if (get_user(unit, p)) 782 break; 783 err = -ENXIO; 784 pn = ppp_pernet(current->nsproxy->net_ns); 785 spin_lock_bh(&pn->all_channels_lock); 786 pchb = ppp_find_channel(pn, unit); 787 /* Hold a reference to prevent pchb being freed while 788 * we establish the bridge. 789 */ 790 if (pchb) 791 refcount_inc(&pchb->file.refcnt); 792 spin_unlock_bh(&pn->all_channels_lock); 793 if (!pchb) 794 break; 795 err = ppp_bridge_channels(pch, pchb); 796 /* Drop earlier refcount now bridge establishment is complete */ 797 if (refcount_dec_and_test(&pchb->file.refcnt)) 798 ppp_destroy_channel(pchb); 799 break; 800 801 case PPPIOCUNBRIDGECHAN: 802 err = ppp_unbridge_channels(pch); 803 break; 804 805 default: 806 down_read(&pch->chan_sem); 807 chan = pch->chan; 808 err = -ENOTTY; 809 if (chan && chan->ops->ioctl) 810 err = chan->ops->ioctl(chan, cmd, arg); 811 up_read(&pch->chan_sem); 812 } 813 goto out; 814 } 815 816 if (pf->kind != INTERFACE) { 817 /* can't happen */ 818 pr_err("PPP: not interface or channel??\n"); 819 err = -EINVAL; 820 goto out; 821 } 822 823 ppp = PF_TO_PPP(pf); 824 switch (cmd) { 825 case PPPIOCSMRU: 826 if (get_user(val, p)) 827 break; 828 ppp->mru = val; 829 err = 0; 830 break; 831 832 case PPPIOCSFLAGS: 833 if (get_user(val, p)) 834 break; 835 ppp_lock(ppp); 836 cflags = ppp->flags & ~val; 837 #ifdef CONFIG_PPP_MULTILINK 838 if (!(ppp->flags & SC_MULTILINK) && (val & SC_MULTILINK)) 839 ppp->nextseq = 0; 840 #endif 841 ppp->flags = val & SC_FLAG_BITS; 842 ppp_unlock(ppp); 843 if (cflags & SC_CCP_OPEN) 844 ppp_ccp_closed(ppp); 845 err = 0; 846 break; 847 848 case PPPIOCGFLAGS: 849 val = ppp->flags | ppp->xstate | ppp->rstate; 850 if (put_user(val, p)) 851 break; 852 err = 0; 853 break; 854 855 case PPPIOCSCOMPRESS: 856 { 857 struct ppp_option_data data; 858 if (copy_from_user(&data, argp, sizeof(data))) 859 err = -EFAULT; 860 else 861 err = ppp_set_compress(ppp, &data); 862 break; 863 } 864 case PPPIOCGUNIT: 865 if (put_user(ppp->file.index, p)) 866 break; 867 err = 0; 868 break; 869 870 case PPPIOCSDEBUG: 871 if (get_user(val, p)) 872 break; 873 ppp->debug = val; 874 err = 0; 875 break; 876 877 case PPPIOCGDEBUG: 878 if (put_user(ppp->debug, p)) 879 break; 880 err = 0; 881 break; 882 883 case PPPIOCGIDLE32: 884 idle32.xmit_idle = (jiffies - ppp->last_xmit) / HZ; 885 idle32.recv_idle = (jiffies - ppp->last_recv) / HZ; 886 if (copy_to_user(argp, &idle32, sizeof(idle32))) 887 break; 888 err = 0; 889 break; 890 891 case PPPIOCGIDLE64: 892 idle64.xmit_idle = (jiffies - ppp->last_xmit) / HZ; 893 idle64.recv_idle = (jiffies - ppp->last_recv) / HZ; 894 if (copy_to_user(argp, &idle64, sizeof(idle64))) 895 break; 896 err = 0; 897 break; 898 899 case PPPIOCSMAXCID: 900 if (get_user(val, p)) 901 break; 902 val2 = 15; 903 if ((val >> 16) != 0) { 904 val2 = val >> 16; 905 val &= 0xffff; 906 } 907 vj = slhc_init(val2+1, val+1); 908 if (IS_ERR(vj)) { 909 err = PTR_ERR(vj); 910 break; 911 } 912 ppp_lock(ppp); 913 if (ppp->vj) 914 slhc_free(ppp->vj); 915 ppp->vj = vj; 916 ppp_unlock(ppp); 917 err = 0; 918 break; 919 920 case PPPIOCGNPMODE: 921 case PPPIOCSNPMODE: 922 if (copy_from_user(&npi, argp, sizeof(npi))) 923 break; 924 err = proto_to_npindex(npi.protocol); 925 if (err < 0) 926 break; 927 i = err; 928 if (cmd == PPPIOCGNPMODE) { 929 err = -EFAULT; 930 npi.mode = ppp->npmode[i]; 931 if (copy_to_user(argp, &npi, sizeof(npi))) 932 break; 933 } else { 934 ppp->npmode[i] = npi.mode; 935 /* we may be able to transmit more packets now (??) */ 936 netif_wake_queue(ppp->dev); 937 } 938 err = 0; 939 break; 940 941 #ifdef CONFIG_PPP_FILTER 942 case PPPIOCSPASS: 943 case PPPIOCSACTIVE: 944 { 945 struct bpf_prog *filter = ppp_get_filter(argp); 946 struct bpf_prog **which; 947 948 if (IS_ERR(filter)) { 949 err = PTR_ERR(filter); 950 break; 951 } 952 if (cmd == PPPIOCSPASS) 953 which = &ppp->pass_filter; 954 else 955 which = &ppp->active_filter; 956 ppp_lock(ppp); 957 if (*which) 958 bpf_prog_destroy(*which); 959 *which = filter; 960 ppp_unlock(ppp); 961 err = 0; 962 break; 963 } 964 #endif /* CONFIG_PPP_FILTER */ 965 966 #ifdef CONFIG_PPP_MULTILINK 967 case PPPIOCSMRRU: 968 if (get_user(val, p)) 969 break; 970 ppp_recv_lock(ppp); 971 ppp->mrru = val; 972 ppp_recv_unlock(ppp); 973 err = 0; 974 break; 975 #endif /* CONFIG_PPP_MULTILINK */ 976 977 default: 978 err = -ENOTTY; 979 } 980 981 out: 982 mutex_unlock(&ppp_mutex); 983 984 return err; 985 } 986 987 #ifdef CONFIG_COMPAT 988 struct ppp_option_data32 { 989 compat_uptr_t ptr; 990 u32 length; 991 compat_int_t transmit; 992 }; 993 #define PPPIOCSCOMPRESS32 _IOW('t', 77, struct ppp_option_data32) 994 995 static long ppp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 996 { 997 struct ppp_file *pf; 998 int err = -ENOIOCTLCMD; 999 void __user *argp = (void __user *)arg; 1000 1001 mutex_lock(&ppp_mutex); 1002 1003 pf = file->private_data; 1004 if (pf && pf->kind == INTERFACE) { 1005 struct ppp *ppp = PF_TO_PPP(pf); 1006 switch (cmd) { 1007 #ifdef CONFIG_PPP_FILTER 1008 case PPPIOCSPASS32: 1009 case PPPIOCSACTIVE32: 1010 { 1011 struct bpf_prog *filter = compat_ppp_get_filter(argp); 1012 struct bpf_prog **which; 1013 1014 if (IS_ERR(filter)) { 1015 err = PTR_ERR(filter); 1016 break; 1017 } 1018 if (cmd == PPPIOCSPASS32) 1019 which = &ppp->pass_filter; 1020 else 1021 which = &ppp->active_filter; 1022 ppp_lock(ppp); 1023 if (*which) 1024 bpf_prog_destroy(*which); 1025 *which = filter; 1026 ppp_unlock(ppp); 1027 err = 0; 1028 break; 1029 } 1030 #endif /* CONFIG_PPP_FILTER */ 1031 case PPPIOCSCOMPRESS32: 1032 { 1033 struct ppp_option_data32 data32; 1034 if (copy_from_user(&data32, argp, sizeof(data32))) { 1035 err = -EFAULT; 1036 } else { 1037 struct ppp_option_data data = { 1038 .ptr = compat_ptr(data32.ptr), 1039 .length = data32.length, 1040 .transmit = data32.transmit 1041 }; 1042 err = ppp_set_compress(ppp, &data); 1043 } 1044 break; 1045 } 1046 } 1047 } 1048 mutex_unlock(&ppp_mutex); 1049 1050 /* all other commands have compatible arguments */ 1051 if (err == -ENOIOCTLCMD) 1052 err = ppp_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1053 1054 return err; 1055 } 1056 #endif 1057 1058 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 1059 struct file *file, unsigned int cmd, unsigned long arg) 1060 { 1061 int unit, err = -EFAULT; 1062 struct ppp *ppp; 1063 struct channel *chan; 1064 struct ppp_net *pn; 1065 int __user *p = (int __user *)arg; 1066 1067 switch (cmd) { 1068 case PPPIOCNEWUNIT: 1069 /* Create a new ppp unit */ 1070 if (get_user(unit, p)) 1071 break; 1072 err = ppp_create_interface(net, file, &unit); 1073 if (err < 0) 1074 break; 1075 1076 err = -EFAULT; 1077 if (put_user(unit, p)) 1078 break; 1079 err = 0; 1080 break; 1081 1082 case PPPIOCATTACH: 1083 /* Attach to an existing ppp unit */ 1084 if (get_user(unit, p)) 1085 break; 1086 err = -ENXIO; 1087 pn = ppp_pernet(net); 1088 mutex_lock(&pn->all_ppp_mutex); 1089 ppp = ppp_find_unit(pn, unit); 1090 if (ppp) { 1091 refcount_inc(&ppp->file.refcnt); 1092 file->private_data = &ppp->file; 1093 err = 0; 1094 } 1095 mutex_unlock(&pn->all_ppp_mutex); 1096 break; 1097 1098 case PPPIOCATTCHAN: 1099 if (get_user(unit, p)) 1100 break; 1101 err = -ENXIO; 1102 pn = ppp_pernet(net); 1103 spin_lock_bh(&pn->all_channels_lock); 1104 chan = ppp_find_channel(pn, unit); 1105 if (chan) { 1106 refcount_inc(&chan->file.refcnt); 1107 file->private_data = &chan->file; 1108 err = 0; 1109 } 1110 spin_unlock_bh(&pn->all_channels_lock); 1111 break; 1112 1113 default: 1114 err = -ENOTTY; 1115 } 1116 1117 return err; 1118 } 1119 1120 static const struct file_operations ppp_device_fops = { 1121 .owner = THIS_MODULE, 1122 .read = ppp_read, 1123 .write = ppp_write, 1124 .poll = ppp_poll, 1125 .unlocked_ioctl = ppp_ioctl, 1126 #ifdef CONFIG_COMPAT 1127 .compat_ioctl = ppp_compat_ioctl, 1128 #endif 1129 .open = ppp_open, 1130 .release = ppp_release, 1131 .llseek = noop_llseek, 1132 }; 1133 1134 static __net_init int ppp_init_net(struct net *net) 1135 { 1136 struct ppp_net *pn = net_generic(net, ppp_net_id); 1137 1138 idr_init(&pn->units_idr); 1139 mutex_init(&pn->all_ppp_mutex); 1140 1141 INIT_LIST_HEAD(&pn->all_channels); 1142 INIT_LIST_HEAD(&pn->new_channels); 1143 1144 spin_lock_init(&pn->all_channels_lock); 1145 1146 return 0; 1147 } 1148 1149 static __net_exit void ppp_exit_net(struct net *net) 1150 { 1151 struct ppp_net *pn = net_generic(net, ppp_net_id); 1152 struct net_device *dev; 1153 struct net_device *aux; 1154 struct ppp *ppp; 1155 LIST_HEAD(list); 1156 int id; 1157 1158 rtnl_lock(); 1159 for_each_netdev_safe(net, dev, aux) { 1160 if (dev->netdev_ops == &ppp_netdev_ops) 1161 unregister_netdevice_queue(dev, &list); 1162 } 1163 1164 idr_for_each_entry(&pn->units_idr, ppp, id) 1165 /* Skip devices already unregistered by previous loop */ 1166 if (!net_eq(dev_net(ppp->dev), net)) 1167 unregister_netdevice_queue(ppp->dev, &list); 1168 1169 unregister_netdevice_many(&list); 1170 rtnl_unlock(); 1171 1172 mutex_destroy(&pn->all_ppp_mutex); 1173 idr_destroy(&pn->units_idr); 1174 WARN_ON_ONCE(!list_empty(&pn->all_channels)); 1175 WARN_ON_ONCE(!list_empty(&pn->new_channels)); 1176 } 1177 1178 static struct pernet_operations ppp_net_ops = { 1179 .init = ppp_init_net, 1180 .exit = ppp_exit_net, 1181 .id = &ppp_net_id, 1182 .size = sizeof(struct ppp_net), 1183 }; 1184 1185 static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set) 1186 { 1187 struct ppp_net *pn = ppp_pernet(ppp->ppp_net); 1188 int ret; 1189 1190 mutex_lock(&pn->all_ppp_mutex); 1191 1192 if (unit < 0) { 1193 ret = unit_get(&pn->units_idr, ppp, 0); 1194 if (ret < 0) 1195 goto err; 1196 if (!ifname_is_set) { 1197 while (1) { 1198 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret); 1199 if (!netdev_name_in_use(ppp->ppp_net, ppp->dev->name)) 1200 break; 1201 unit_put(&pn->units_idr, ret); 1202 ret = unit_get(&pn->units_idr, ppp, ret + 1); 1203 if (ret < 0) 1204 goto err; 1205 } 1206 } 1207 } else { 1208 /* Caller asked for a specific unit number. Fail with -EEXIST 1209 * if unavailable. For backward compatibility, return -EEXIST 1210 * too if idr allocation fails; this makes pppd retry without 1211 * requesting a specific unit number. 1212 */ 1213 if (unit_find(&pn->units_idr, unit)) { 1214 ret = -EEXIST; 1215 goto err; 1216 } 1217 ret = unit_set(&pn->units_idr, ppp, unit); 1218 if (ret < 0) { 1219 /* Rewrite error for backward compatibility */ 1220 ret = -EEXIST; 1221 goto err; 1222 } 1223 } 1224 ppp->file.index = ret; 1225 1226 if (!ifname_is_set) 1227 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); 1228 1229 mutex_unlock(&pn->all_ppp_mutex); 1230 1231 ret = register_netdevice(ppp->dev); 1232 if (ret < 0) 1233 goto err_unit; 1234 1235 atomic_inc(&ppp_unit_count); 1236 1237 return 0; 1238 1239 err_unit: 1240 mutex_lock(&pn->all_ppp_mutex); 1241 unit_put(&pn->units_idr, ppp->file.index); 1242 err: 1243 mutex_unlock(&pn->all_ppp_mutex); 1244 1245 return ret; 1246 } 1247 1248 static int ppp_dev_configure(struct net *src_net, struct net_device *dev, 1249 const struct ppp_config *conf) 1250 { 1251 struct ppp *ppp = netdev_priv(dev); 1252 int indx; 1253 int err; 1254 int cpu; 1255 1256 ppp->dev = dev; 1257 ppp->ppp_net = src_net; 1258 ppp->mru = PPP_MRU; 1259 ppp->owner = conf->file; 1260 1261 init_ppp_file(&ppp->file, INTERFACE); 1262 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 1263 1264 for (indx = 0; indx < NUM_NP; ++indx) 1265 ppp->npmode[indx] = NPMODE_PASS; 1266 INIT_LIST_HEAD(&ppp->channels); 1267 spin_lock_init(&ppp->rlock); 1268 spin_lock_init(&ppp->wlock); 1269 1270 ppp->xmit_recursion = alloc_percpu(int); 1271 if (!ppp->xmit_recursion) { 1272 err = -ENOMEM; 1273 goto err1; 1274 } 1275 for_each_possible_cpu(cpu) 1276 (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0; 1277 1278 #ifdef CONFIG_PPP_MULTILINK 1279 ppp->minseq = -1; 1280 skb_queue_head_init(&ppp->mrq); 1281 #endif /* CONFIG_PPP_MULTILINK */ 1282 #ifdef CONFIG_PPP_FILTER 1283 ppp->pass_filter = NULL; 1284 ppp->active_filter = NULL; 1285 #endif /* CONFIG_PPP_FILTER */ 1286 1287 err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set); 1288 if (err < 0) 1289 goto err2; 1290 1291 conf->file->private_data = &ppp->file; 1292 1293 return 0; 1294 err2: 1295 free_percpu(ppp->xmit_recursion); 1296 err1: 1297 return err; 1298 } 1299 1300 static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = { 1301 [IFLA_PPP_DEV_FD] = { .type = NLA_S32 }, 1302 }; 1303 1304 static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[], 1305 struct netlink_ext_ack *extack) 1306 { 1307 if (!data) 1308 return -EINVAL; 1309 1310 if (!data[IFLA_PPP_DEV_FD]) 1311 return -EINVAL; 1312 if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0) 1313 return -EBADF; 1314 1315 return 0; 1316 } 1317 1318 static int ppp_nl_newlink(struct net_device *dev, 1319 struct rtnl_newlink_params *params, 1320 struct netlink_ext_ack *extack) 1321 { 1322 struct net *link_net = rtnl_newlink_link_net(params); 1323 struct nlattr **data = params->data; 1324 struct nlattr **tb = params->tb; 1325 struct ppp_config conf = { 1326 .unit = -1, 1327 .ifname_is_set = true, 1328 }; 1329 struct file *file; 1330 int err; 1331 1332 file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD])); 1333 if (!file) 1334 return -EBADF; 1335 1336 /* rtnl_lock is already held here, but ppp_create_interface() locks 1337 * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids 1338 * possible deadlock due to lock order inversion, at the cost of 1339 * pushing the problem back to userspace. 1340 */ 1341 if (!mutex_trylock(&ppp_mutex)) { 1342 err = -EBUSY; 1343 goto out; 1344 } 1345 1346 if (file->f_op != &ppp_device_fops || file->private_data) { 1347 err = -EBADF; 1348 goto out_unlock; 1349 } 1350 1351 conf.file = file; 1352 1353 /* Don't use device name generated by the rtnetlink layer when ifname 1354 * isn't specified. Let ppp_dev_configure() set the device name using 1355 * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows 1356 * userspace to infer the device name using to the PPPIOCGUNIT ioctl. 1357 */ 1358 if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME])) 1359 conf.ifname_is_set = false; 1360 1361 err = ppp_dev_configure(link_net, dev, &conf); 1362 1363 out_unlock: 1364 mutex_unlock(&ppp_mutex); 1365 out: 1366 fput(file); 1367 1368 return err; 1369 } 1370 1371 static void ppp_nl_dellink(struct net_device *dev, struct list_head *head) 1372 { 1373 unregister_netdevice_queue(dev, head); 1374 } 1375 1376 static size_t ppp_nl_get_size(const struct net_device *dev) 1377 { 1378 return 0; 1379 } 1380 1381 static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev) 1382 { 1383 return 0; 1384 } 1385 1386 static struct net *ppp_nl_get_link_net(const struct net_device *dev) 1387 { 1388 struct ppp *ppp = netdev_priv(dev); 1389 1390 return READ_ONCE(ppp->ppp_net); 1391 } 1392 1393 static struct rtnl_link_ops ppp_link_ops __read_mostly = { 1394 .kind = "ppp", 1395 .maxtype = IFLA_PPP_MAX, 1396 .policy = ppp_nl_policy, 1397 .priv_size = sizeof(struct ppp), 1398 .setup = ppp_setup, 1399 .validate = ppp_nl_validate, 1400 .newlink = ppp_nl_newlink, 1401 .dellink = ppp_nl_dellink, 1402 .get_size = ppp_nl_get_size, 1403 .fill_info = ppp_nl_fill_info, 1404 .get_link_net = ppp_nl_get_link_net, 1405 }; 1406 1407 #define PPP_MAJOR 108 1408 1409 /* Called at boot time if ppp is compiled into the kernel, 1410 or at module load time (from init_module) if compiled as a module. */ 1411 static int __init ppp_init(void) 1412 { 1413 int err; 1414 1415 pr_info("PPP generic driver version " PPP_VERSION "\n"); 1416 1417 err = register_pernet_device(&ppp_net_ops); 1418 if (err) { 1419 pr_err("failed to register PPP pernet device (%d)\n", err); 1420 goto out; 1421 } 1422 1423 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 1424 if (err) { 1425 pr_err("failed to register PPP device (%d)\n", err); 1426 goto out_net; 1427 } 1428 1429 err = class_register(&ppp_class); 1430 if (err) 1431 goto out_chrdev; 1432 1433 err = rtnl_link_register(&ppp_link_ops); 1434 if (err) { 1435 pr_err("failed to register rtnetlink PPP handler\n"); 1436 goto out_class; 1437 } 1438 1439 /* not a big deal if we fail here :-) */ 1440 device_create(&ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); 1441 1442 return 0; 1443 1444 out_class: 1445 class_unregister(&ppp_class); 1446 out_chrdev: 1447 unregister_chrdev(PPP_MAJOR, "ppp"); 1448 out_net: 1449 unregister_pernet_device(&ppp_net_ops); 1450 out: 1451 return err; 1452 } 1453 1454 /* 1455 * Network interface unit routines. 1456 */ 1457 static netdev_tx_t 1458 ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) 1459 { 1460 struct ppp *ppp = netdev_priv(dev); 1461 int npi, proto; 1462 unsigned char *pp; 1463 1464 npi = ethertype_to_npindex(ntohs(skb->protocol)); 1465 if (npi < 0) 1466 goto outf; 1467 1468 /* Drop, accept or reject the packet */ 1469 switch (ppp->npmode[npi]) { 1470 case NPMODE_PASS: 1471 break; 1472 case NPMODE_QUEUE: 1473 /* it would be nice to have a way to tell the network 1474 system to queue this one up for later. */ 1475 goto outf; 1476 case NPMODE_DROP: 1477 case NPMODE_ERROR: 1478 goto outf; 1479 } 1480 1481 /* Put the 2-byte PPP protocol number on the front, 1482 making sure there is room for the address and control fields. */ 1483 if (skb_cow_head(skb, PPP_HDRLEN)) 1484 goto outf; 1485 1486 pp = skb_push(skb, 2); 1487 proto = npindex_to_proto[npi]; 1488 put_unaligned_be16(proto, pp); 1489 1490 skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); 1491 ppp_xmit_process(ppp, skb); 1492 1493 return NETDEV_TX_OK; 1494 1495 outf: 1496 kfree_skb(skb); 1497 ++dev->stats.tx_dropped; 1498 return NETDEV_TX_OK; 1499 } 1500 1501 static int 1502 ppp_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr, 1503 void __user *addr, int cmd) 1504 { 1505 struct ppp *ppp = netdev_priv(dev); 1506 int err = -EFAULT; 1507 struct ppp_stats stats; 1508 struct ppp_comp_stats cstats; 1509 char *vers; 1510 1511 switch (cmd) { 1512 case SIOCGPPPSTATS: 1513 ppp_get_stats(ppp, &stats); 1514 if (copy_to_user(addr, &stats, sizeof(stats))) 1515 break; 1516 err = 0; 1517 break; 1518 1519 case SIOCGPPPCSTATS: 1520 memset(&cstats, 0, sizeof(cstats)); 1521 if (ppp->xc_state) 1522 ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); 1523 if (ppp->rc_state) 1524 ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d); 1525 if (copy_to_user(addr, &cstats, sizeof(cstats))) 1526 break; 1527 err = 0; 1528 break; 1529 1530 case SIOCGPPPVER: 1531 vers = PPP_VERSION; 1532 if (copy_to_user(addr, vers, strlen(vers) + 1)) 1533 break; 1534 err = 0; 1535 break; 1536 1537 default: 1538 err = -EINVAL; 1539 } 1540 1541 return err; 1542 } 1543 1544 static void 1545 ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) 1546 { 1547 struct ppp *ppp = netdev_priv(dev); 1548 1549 ppp_recv_lock(ppp); 1550 stats64->rx_packets = ppp->stats64.rx_packets; 1551 stats64->rx_bytes = ppp->stats64.rx_bytes; 1552 ppp_recv_unlock(ppp); 1553 1554 ppp_xmit_lock(ppp); 1555 stats64->tx_packets = ppp->stats64.tx_packets; 1556 stats64->tx_bytes = ppp->stats64.tx_bytes; 1557 ppp_xmit_unlock(ppp); 1558 1559 stats64->rx_errors = dev->stats.rx_errors; 1560 stats64->tx_errors = dev->stats.tx_errors; 1561 stats64->rx_dropped = dev->stats.rx_dropped; 1562 stats64->tx_dropped = dev->stats.tx_dropped; 1563 stats64->rx_length_errors = dev->stats.rx_length_errors; 1564 } 1565 1566 static int ppp_dev_init(struct net_device *dev) 1567 { 1568 struct ppp *ppp; 1569 1570 netdev_lockdep_set_classes(dev); 1571 1572 ppp = netdev_priv(dev); 1573 /* Let the netdevice take a reference on the ppp file. This ensures 1574 * that ppp_destroy_interface() won't run before the device gets 1575 * unregistered. 1576 */ 1577 refcount_inc(&ppp->file.refcnt); 1578 1579 return 0; 1580 } 1581 1582 static void ppp_dev_uninit(struct net_device *dev) 1583 { 1584 struct ppp *ppp = netdev_priv(dev); 1585 struct ppp_net *pn = ppp_pernet(ppp->ppp_net); 1586 1587 ppp_lock(ppp); 1588 ppp->closing = 1; 1589 ppp_unlock(ppp); 1590 1591 mutex_lock(&pn->all_ppp_mutex); 1592 unit_put(&pn->units_idr, ppp->file.index); 1593 mutex_unlock(&pn->all_ppp_mutex); 1594 1595 ppp->owner = NULL; 1596 1597 ppp->file.dead = 1; 1598 wake_up_interruptible(&ppp->file.rwait); 1599 } 1600 1601 static void ppp_dev_priv_destructor(struct net_device *dev) 1602 { 1603 struct ppp *ppp; 1604 1605 ppp = netdev_priv(dev); 1606 if (refcount_dec_and_test(&ppp->file.refcnt)) 1607 ppp_destroy_interface(ppp); 1608 } 1609 1610 static int ppp_fill_forward_path(struct net_device_path_ctx *ctx, 1611 struct net_device_path *path) 1612 { 1613 struct ppp *ppp = netdev_priv(ctx->dev); 1614 struct ppp_channel *chan; 1615 struct channel *pch; 1616 1617 if (ppp->flags & SC_MULTILINK) 1618 return -EOPNOTSUPP; 1619 1620 if (list_empty(&ppp->channels)) 1621 return -ENODEV; 1622 1623 pch = list_first_entry(&ppp->channels, struct channel, clist); 1624 chan = pch->chan; 1625 if (!chan->ops->fill_forward_path) 1626 return -EOPNOTSUPP; 1627 1628 return chan->ops->fill_forward_path(ctx, path, chan); 1629 } 1630 1631 static const struct net_device_ops ppp_netdev_ops = { 1632 .ndo_init = ppp_dev_init, 1633 .ndo_uninit = ppp_dev_uninit, 1634 .ndo_start_xmit = ppp_start_xmit, 1635 .ndo_siocdevprivate = ppp_net_siocdevprivate, 1636 .ndo_get_stats64 = ppp_get_stats64, 1637 .ndo_fill_forward_path = ppp_fill_forward_path, 1638 }; 1639 1640 static const struct device_type ppp_type = { 1641 .name = "ppp", 1642 }; 1643 1644 static void ppp_setup(struct net_device *dev) 1645 { 1646 dev->netdev_ops = &ppp_netdev_ops; 1647 SET_NETDEV_DEVTYPE(dev, &ppp_type); 1648 1649 dev->lltx = true; 1650 1651 dev->hard_header_len = PPP_HDRLEN; 1652 dev->mtu = PPP_MRU; 1653 dev->addr_len = 0; 1654 dev->tx_queue_len = 3; 1655 dev->type = ARPHRD_PPP; 1656 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1657 dev->priv_destructor = ppp_dev_priv_destructor; 1658 netif_keep_dst(dev); 1659 } 1660 1661 /* 1662 * Transmit-side routines. 1663 */ 1664 1665 /* Called to do any work queued up on the transmit side that can now be done */ 1666 static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) 1667 { 1668 ppp_xmit_lock(ppp); 1669 if (!ppp->closing) { 1670 ppp_push(ppp); 1671 1672 if (skb) 1673 skb_queue_tail(&ppp->file.xq, skb); 1674 while (!ppp->xmit_pending && 1675 (skb = skb_dequeue(&ppp->file.xq))) 1676 ppp_send_frame(ppp, skb); 1677 /* If there's no work left to do, tell the core net 1678 code that we can accept some more. */ 1679 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) 1680 netif_wake_queue(ppp->dev); 1681 else 1682 netif_stop_queue(ppp->dev); 1683 } else { 1684 kfree_skb(skb); 1685 } 1686 ppp_xmit_unlock(ppp); 1687 } 1688 1689 static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) 1690 { 1691 local_bh_disable(); 1692 1693 if (unlikely(*this_cpu_ptr(ppp->xmit_recursion))) 1694 goto err; 1695 1696 (*this_cpu_ptr(ppp->xmit_recursion))++; 1697 __ppp_xmit_process(ppp, skb); 1698 (*this_cpu_ptr(ppp->xmit_recursion))--; 1699 1700 local_bh_enable(); 1701 1702 return; 1703 1704 err: 1705 local_bh_enable(); 1706 1707 kfree_skb(skb); 1708 1709 if (net_ratelimit()) 1710 netdev_err(ppp->dev, "recursion detected\n"); 1711 } 1712 1713 static inline struct sk_buff * 1714 pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) 1715 { 1716 struct sk_buff *new_skb; 1717 int len; 1718 int new_skb_size = ppp->dev->mtu + 1719 ppp->xcomp->comp_extra + ppp->dev->hard_header_len; 1720 int compressor_skb_size = ppp->dev->mtu + 1721 ppp->xcomp->comp_extra + PPP_HDRLEN; 1722 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); 1723 if (!new_skb) { 1724 if (net_ratelimit()) 1725 netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n"); 1726 return NULL; 1727 } 1728 if (ppp->dev->hard_header_len > PPP_HDRLEN) 1729 skb_reserve(new_skb, 1730 ppp->dev->hard_header_len - PPP_HDRLEN); 1731 1732 /* compressor still expects A/C bytes in hdr */ 1733 len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2, 1734 new_skb->data, skb->len + 2, 1735 compressor_skb_size); 1736 if (len > 0 && (ppp->flags & SC_CCP_UP)) { 1737 consume_skb(skb); 1738 skb = new_skb; 1739 skb_put(skb, len); 1740 skb_pull(skb, 2); /* pull off A/C bytes */ 1741 } else if (len == 0) { 1742 /* didn't compress, or CCP not up yet */ 1743 consume_skb(new_skb); 1744 new_skb = skb; 1745 } else { 1746 /* 1747 * (len < 0) 1748 * MPPE requires that we do not send unencrypted 1749 * frames. The compressor will return -1 if we 1750 * should drop the frame. We cannot simply test 1751 * the compress_proto because MPPE and MPPC share 1752 * the same number. 1753 */ 1754 if (net_ratelimit()) 1755 netdev_err(ppp->dev, "ppp: compressor dropped pkt\n"); 1756 kfree_skb(skb); 1757 consume_skb(new_skb); 1758 new_skb = NULL; 1759 } 1760 return new_skb; 1761 } 1762 1763 /* 1764 * Compress and send a frame. 1765 * The caller should have locked the xmit path, 1766 * and xmit_pending should be 0. 1767 */ 1768 static void 1769 ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) 1770 { 1771 int proto = PPP_PROTO(skb); 1772 struct sk_buff *new_skb; 1773 int len; 1774 unsigned char *cp; 1775 1776 skb->dev = ppp->dev; 1777 1778 if (proto < 0x8000) { 1779 #ifdef CONFIG_PPP_FILTER 1780 /* check if the packet passes the pass and active filters. 1781 * See comment for PPP_FILTER_OUTBOUND_TAG above. 1782 */ 1783 *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG); 1784 if (ppp->pass_filter && 1785 bpf_prog_run(ppp->pass_filter, skb) == 0) { 1786 if (ppp->debug & 1) 1787 netdev_printk(KERN_DEBUG, ppp->dev, 1788 "PPP: outbound frame " 1789 "not passed\n"); 1790 kfree_skb(skb); 1791 return; 1792 } 1793 /* if this packet passes the active filter, record the time */ 1794 if (!(ppp->active_filter && 1795 bpf_prog_run(ppp->active_filter, skb) == 0)) 1796 ppp->last_xmit = jiffies; 1797 skb_pull(skb, 2); 1798 #else 1799 /* for data packets, record the time */ 1800 ppp->last_xmit = jiffies; 1801 #endif /* CONFIG_PPP_FILTER */ 1802 } 1803 1804 ++ppp->stats64.tx_packets; 1805 ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN; 1806 1807 switch (proto) { 1808 case PPP_IP: 1809 if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0) 1810 break; 1811 /* try to do VJ TCP header compression */ 1812 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, 1813 GFP_ATOMIC); 1814 if (!new_skb) { 1815 netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n"); 1816 goto drop; 1817 } 1818 skb_reserve(new_skb, ppp->dev->hard_header_len - 2); 1819 cp = skb->data + 2; 1820 len = slhc_compress(ppp->vj, cp, skb->len - 2, 1821 new_skb->data + 2, &cp, 1822 !(ppp->flags & SC_NO_TCP_CCID)); 1823 if (cp == skb->data + 2) { 1824 /* didn't compress */ 1825 consume_skb(new_skb); 1826 } else { 1827 if (cp[0] & SL_TYPE_COMPRESSED_TCP) { 1828 proto = PPP_VJC_COMP; 1829 cp[0] &= ~SL_TYPE_COMPRESSED_TCP; 1830 } else { 1831 proto = PPP_VJC_UNCOMP; 1832 cp[0] = skb->data[2]; 1833 } 1834 consume_skb(skb); 1835 skb = new_skb; 1836 cp = skb_put(skb, len + 2); 1837 cp[0] = 0; 1838 cp[1] = proto; 1839 } 1840 break; 1841 1842 case PPP_CCP: 1843 /* peek at outbound CCP frames */ 1844 ppp_ccp_peek(ppp, skb, 0); 1845 break; 1846 } 1847 1848 /* try to do packet compression */ 1849 if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state && 1850 proto != PPP_LCP && proto != PPP_CCP) { 1851 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { 1852 if (net_ratelimit()) 1853 netdev_err(ppp->dev, 1854 "ppp: compression required but " 1855 "down - pkt dropped.\n"); 1856 goto drop; 1857 } 1858 skb = pad_compress_skb(ppp, skb); 1859 if (!skb) 1860 goto drop; 1861 } 1862 1863 /* 1864 * If we are waiting for traffic (demand dialling), 1865 * queue it up for pppd to receive. 1866 */ 1867 if (ppp->flags & SC_LOOP_TRAFFIC) { 1868 if (ppp->file.rq.qlen > PPP_MAX_RQLEN) 1869 goto drop; 1870 skb_queue_tail(&ppp->file.rq, skb); 1871 wake_up_interruptible(&ppp->file.rwait); 1872 return; 1873 } 1874 1875 ppp->xmit_pending = skb; 1876 ppp_push(ppp); 1877 return; 1878 1879 drop: 1880 kfree_skb(skb); 1881 ++ppp->dev->stats.tx_errors; 1882 } 1883 1884 /* 1885 * Try to send the frame in xmit_pending. 1886 * The caller should have the xmit path locked. 1887 */ 1888 static void 1889 ppp_push(struct ppp *ppp) 1890 { 1891 struct list_head *list; 1892 struct channel *pch; 1893 struct sk_buff *skb = ppp->xmit_pending; 1894 1895 if (!skb) 1896 return; 1897 1898 list = &ppp->channels; 1899 if (list_empty(list)) { 1900 /* nowhere to send the packet, just drop it */ 1901 ppp->xmit_pending = NULL; 1902 kfree_skb(skb); 1903 return; 1904 } 1905 1906 if ((ppp->flags & SC_MULTILINK) == 0) { 1907 /* not doing multilink: send it down the first channel */ 1908 list = list->next; 1909 pch = list_entry(list, struct channel, clist); 1910 1911 spin_lock(&pch->downl); 1912 if (pch->chan) { 1913 if (pch->chan->ops->start_xmit(pch->chan, skb)) 1914 ppp->xmit_pending = NULL; 1915 } else { 1916 /* channel got unregistered */ 1917 kfree_skb(skb); 1918 ppp->xmit_pending = NULL; 1919 } 1920 spin_unlock(&pch->downl); 1921 return; 1922 } 1923 1924 #ifdef CONFIG_PPP_MULTILINK 1925 /* Multilink: fragment the packet over as many links 1926 as can take the packet at the moment. */ 1927 if (!ppp_mp_explode(ppp, skb)) 1928 return; 1929 #endif /* CONFIG_PPP_MULTILINK */ 1930 1931 ppp->xmit_pending = NULL; 1932 kfree_skb(skb); 1933 } 1934 1935 #ifdef CONFIG_PPP_MULTILINK 1936 static bool mp_protocol_compress __read_mostly = true; 1937 module_param(mp_protocol_compress, bool, 0644); 1938 MODULE_PARM_DESC(mp_protocol_compress, 1939 "compress protocol id in multilink fragments"); 1940 1941 /* 1942 * Divide a packet to be transmitted into fragments and 1943 * send them out the individual links. 1944 */ 1945 static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) 1946 { 1947 int len, totlen; 1948 int i, bits, hdrlen, mtu; 1949 int flen; 1950 int navail, nfree, nzero; 1951 int nbigger; 1952 int totspeed; 1953 int totfree; 1954 unsigned char *p, *q; 1955 struct list_head *list; 1956 struct channel *pch; 1957 struct sk_buff *frag; 1958 struct ppp_channel *chan; 1959 1960 totspeed = 0; /*total bitrate of the bundle*/ 1961 nfree = 0; /* # channels which have no packet already queued */ 1962 navail = 0; /* total # of usable channels (not deregistered) */ 1963 nzero = 0; /* number of channels with zero speed associated*/ 1964 totfree = 0; /*total # of channels available and 1965 *having no queued packets before 1966 *starting the fragmentation*/ 1967 1968 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1969 i = 0; 1970 list_for_each_entry(pch, &ppp->channels, clist) { 1971 if (pch->chan) { 1972 pch->avail = 1; 1973 navail++; 1974 pch->speed = pch->chan->speed; 1975 } else { 1976 pch->avail = 0; 1977 } 1978 if (pch->avail) { 1979 if (skb_queue_empty(&pch->file.xq) || 1980 !pch->had_frag) { 1981 if (pch->speed == 0) 1982 nzero++; 1983 else 1984 totspeed += pch->speed; 1985 1986 pch->avail = 2; 1987 ++nfree; 1988 ++totfree; 1989 } 1990 if (!pch->had_frag && i < ppp->nxchan) 1991 ppp->nxchan = i; 1992 } 1993 ++i; 1994 } 1995 /* 1996 * Don't start sending this packet unless at least half of 1997 * the channels are free. This gives much better TCP 1998 * performance if we have a lot of channels. 1999 */ 2000 if (nfree == 0 || nfree < navail / 2) 2001 return 0; /* can't take now, leave it in xmit_pending */ 2002 2003 /* Do protocol field compression */ 2004 p = skb->data; 2005 len = skb->len; 2006 if (*p == 0 && mp_protocol_compress) { 2007 ++p; 2008 --len; 2009 } 2010 2011 totlen = len; 2012 nbigger = len % nfree; 2013 2014 /* skip to the channel after the one we last used 2015 and start at that one */ 2016 list = &ppp->channels; 2017 for (i = 0; i < ppp->nxchan; ++i) { 2018 list = list->next; 2019 if (list == &ppp->channels) { 2020 i = 0; 2021 break; 2022 } 2023 } 2024 2025 /* create a fragment for each channel */ 2026 bits = B; 2027 while (len > 0) { 2028 list = list->next; 2029 if (list == &ppp->channels) { 2030 i = 0; 2031 continue; 2032 } 2033 pch = list_entry(list, struct channel, clist); 2034 ++i; 2035 if (!pch->avail) 2036 continue; 2037 2038 /* 2039 * Skip this channel if it has a fragment pending already and 2040 * we haven't given a fragment to all of the free channels. 2041 */ 2042 if (pch->avail == 1) { 2043 if (nfree > 0) 2044 continue; 2045 } else { 2046 pch->avail = 1; 2047 } 2048 2049 /* check the channel's mtu and whether it is still attached. */ 2050 spin_lock(&pch->downl); 2051 if (pch->chan == NULL) { 2052 /* can't use this channel, it's being deregistered */ 2053 if (pch->speed == 0) 2054 nzero--; 2055 else 2056 totspeed -= pch->speed; 2057 2058 spin_unlock(&pch->downl); 2059 pch->avail = 0; 2060 totlen = len; 2061 totfree--; 2062 nfree--; 2063 if (--navail == 0) 2064 break; 2065 continue; 2066 } 2067 2068 /* 2069 *if the channel speed is not set divide 2070 *the packet evenly among the free channels; 2071 *otherwise divide it according to the speed 2072 *of the channel we are going to transmit on 2073 */ 2074 flen = len; 2075 if (nfree > 0) { 2076 if (pch->speed == 0) { 2077 flen = len/nfree; 2078 if (nbigger > 0) { 2079 flen++; 2080 nbigger--; 2081 } 2082 } else { 2083 flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / 2084 ((totspeed*totfree)/pch->speed)) - hdrlen; 2085 if (nbigger > 0) { 2086 flen += ((totfree - nzero)*pch->speed)/totspeed; 2087 nbigger -= ((totfree - nzero)*pch->speed)/ 2088 totspeed; 2089 } 2090 } 2091 nfree--; 2092 } 2093 2094 /* 2095 *check if we are on the last channel or 2096 *we exceded the length of the data to 2097 *fragment 2098 */ 2099 if ((nfree <= 0) || (flen > len)) 2100 flen = len; 2101 /* 2102 *it is not worth to tx on slow channels: 2103 *in that case from the resulting flen according to the 2104 *above formula will be equal or less than zero. 2105 *Skip the channel in this case 2106 */ 2107 if (flen <= 0) { 2108 pch->avail = 2; 2109 spin_unlock(&pch->downl); 2110 continue; 2111 } 2112 2113 /* 2114 * hdrlen includes the 2-byte PPP protocol field, but the 2115 * MTU counts only the payload excluding the protocol field. 2116 * (RFC1661 Section 2) 2117 */ 2118 mtu = pch->chan->mtu - (hdrlen - 2); 2119 if (mtu < 4) 2120 mtu = 4; 2121 if (flen > mtu) 2122 flen = mtu; 2123 if (flen == len) 2124 bits |= E; 2125 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); 2126 if (!frag) 2127 goto noskb; 2128 q = skb_put(frag, flen + hdrlen); 2129 2130 /* make the MP header */ 2131 put_unaligned_be16(PPP_MP, q); 2132 if (ppp->flags & SC_MP_XSHORTSEQ) { 2133 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 2134 q[3] = ppp->nxseq; 2135 } else { 2136 q[2] = bits; 2137 q[3] = ppp->nxseq >> 16; 2138 q[4] = ppp->nxseq >> 8; 2139 q[5] = ppp->nxseq; 2140 } 2141 2142 memcpy(q + hdrlen, p, flen); 2143 2144 /* try to send it down the channel */ 2145 chan = pch->chan; 2146 if (!skb_queue_empty(&pch->file.xq) || 2147 !chan->ops->start_xmit(chan, frag)) 2148 skb_queue_tail(&pch->file.xq, frag); 2149 pch->had_frag = 1; 2150 p += flen; 2151 len -= flen; 2152 ++ppp->nxseq; 2153 bits = 0; 2154 spin_unlock(&pch->downl); 2155 } 2156 ppp->nxchan = i; 2157 2158 return 1; 2159 2160 noskb: 2161 spin_unlock(&pch->downl); 2162 if (ppp->debug & 1) 2163 netdev_err(ppp->dev, "PPP: no memory (fragment)\n"); 2164 ++ppp->dev->stats.tx_errors; 2165 ++ppp->nxseq; 2166 return 1; /* abandon the frame */ 2167 } 2168 #endif /* CONFIG_PPP_MULTILINK */ 2169 2170 /* Try to send data out on a channel */ 2171 static void __ppp_channel_push(struct channel *pch) 2172 { 2173 struct sk_buff *skb; 2174 struct ppp *ppp; 2175 2176 spin_lock(&pch->downl); 2177 if (pch->chan) { 2178 while (!skb_queue_empty(&pch->file.xq)) { 2179 skb = skb_dequeue(&pch->file.xq); 2180 if (!pch->chan->ops->start_xmit(pch->chan, skb)) { 2181 /* put the packet back and try again later */ 2182 skb_queue_head(&pch->file.xq, skb); 2183 break; 2184 } 2185 } 2186 } else { 2187 /* channel got deregistered */ 2188 skb_queue_purge(&pch->file.xq); 2189 } 2190 spin_unlock(&pch->downl); 2191 /* see if there is anything from the attached unit to be sent */ 2192 if (skb_queue_empty(&pch->file.xq)) { 2193 ppp = pch->ppp; 2194 if (ppp) 2195 __ppp_xmit_process(ppp, NULL); 2196 } 2197 } 2198 2199 static void ppp_channel_push(struct channel *pch) 2200 { 2201 read_lock_bh(&pch->upl); 2202 if (pch->ppp) { 2203 (*this_cpu_ptr(pch->ppp->xmit_recursion))++; 2204 __ppp_channel_push(pch); 2205 (*this_cpu_ptr(pch->ppp->xmit_recursion))--; 2206 } else { 2207 __ppp_channel_push(pch); 2208 } 2209 read_unlock_bh(&pch->upl); 2210 } 2211 2212 /* 2213 * Receive-side routines. 2214 */ 2215 2216 struct ppp_mp_skb_parm { 2217 u32 sequence; 2218 u8 BEbits; 2219 }; 2220 #define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb)) 2221 2222 static inline void 2223 ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 2224 { 2225 ppp_recv_lock(ppp); 2226 if (!ppp->closing) 2227 ppp_receive_frame(ppp, skb, pch); 2228 else 2229 kfree_skb(skb); 2230 ppp_recv_unlock(ppp); 2231 } 2232 2233 /** 2234 * __ppp_decompress_proto - Decompress protocol field, slim version. 2235 * @skb: Socket buffer where protocol field should be decompressed. It must have 2236 * at least 1 byte of head room and 1 byte of linear data. First byte of 2237 * data must be a protocol field byte. 2238 * 2239 * Decompress protocol field in PPP header if it's compressed, e.g. when 2240 * Protocol-Field-Compression (PFC) was negotiated. No checks w.r.t. skb data 2241 * length are done in this function. 2242 */ 2243 static void __ppp_decompress_proto(struct sk_buff *skb) 2244 { 2245 if (skb->data[0] & 0x01) 2246 *(u8 *)skb_push(skb, 1) = 0x00; 2247 } 2248 2249 /** 2250 * ppp_decompress_proto - Check skb data room and decompress protocol field. 2251 * @skb: Socket buffer where protocol field should be decompressed. First byte 2252 * of data must be a protocol field byte. 2253 * 2254 * Decompress protocol field in PPP header if it's compressed, e.g. when 2255 * Protocol-Field-Compression (PFC) was negotiated. This function also makes 2256 * sure that skb data room is sufficient for Protocol field, before and after 2257 * decompression. 2258 * 2259 * Return: true - decompressed successfully, false - not enough room in skb. 2260 */ 2261 static bool ppp_decompress_proto(struct sk_buff *skb) 2262 { 2263 /* At least one byte should be present (if protocol is compressed) */ 2264 if (!pskb_may_pull(skb, 1)) 2265 return false; 2266 2267 __ppp_decompress_proto(skb); 2268 2269 /* Protocol field should occupy 2 bytes when not compressed */ 2270 return pskb_may_pull(skb, 2); 2271 } 2272 2273 /* Attempt to handle a frame via. a bridged channel, if one exists. 2274 * If the channel is bridged, the frame is consumed by the bridge. 2275 * If not, the caller must handle the frame by normal recv mechanisms. 2276 * Returns true if the frame is consumed, false otherwise. 2277 */ 2278 static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb) 2279 { 2280 struct channel *pchb; 2281 2282 rcu_read_lock(); 2283 pchb = rcu_dereference(pch->bridge); 2284 if (!pchb) 2285 goto out_rcu; 2286 2287 spin_lock_bh(&pchb->downl); 2288 if (!pchb->chan) { 2289 /* channel got unregistered */ 2290 kfree_skb(skb); 2291 goto outl; 2292 } 2293 2294 skb_scrub_packet(skb, !net_eq(pch->chan_net, pchb->chan_net)); 2295 if (!pchb->chan->ops->start_xmit(pchb->chan, skb)) 2296 kfree_skb(skb); 2297 2298 outl: 2299 spin_unlock_bh(&pchb->downl); 2300 out_rcu: 2301 rcu_read_unlock(); 2302 2303 /* If pchb is set then we've consumed the packet */ 2304 return !!pchb; 2305 } 2306 2307 void 2308 ppp_input(struct ppp_channel *chan, struct sk_buff *skb) 2309 { 2310 struct channel *pch = chan->ppp; 2311 int proto; 2312 2313 if (!pch) { 2314 kfree_skb(skb); 2315 return; 2316 } 2317 2318 /* If the channel is bridged, transmit via. bridge */ 2319 if (ppp_channel_bridge_input(pch, skb)) 2320 return; 2321 2322 read_lock_bh(&pch->upl); 2323 if (!ppp_decompress_proto(skb)) { 2324 kfree_skb(skb); 2325 if (pch->ppp) { 2326 ++pch->ppp->dev->stats.rx_length_errors; 2327 ppp_receive_error(pch->ppp); 2328 } 2329 goto done; 2330 } 2331 2332 proto = PPP_PROTO(skb); 2333 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { 2334 /* put it on the channel queue */ 2335 skb_queue_tail(&pch->file.rq, skb); 2336 /* drop old frames if queue too long */ 2337 while (pch->file.rq.qlen > PPP_MAX_RQLEN && 2338 (skb = skb_dequeue(&pch->file.rq))) 2339 kfree_skb(skb); 2340 wake_up_interruptible(&pch->file.rwait); 2341 } else { 2342 ppp_do_recv(pch->ppp, skb, pch); 2343 } 2344 2345 done: 2346 read_unlock_bh(&pch->upl); 2347 } 2348 2349 /* Put a 0-length skb in the receive queue as an error indication */ 2350 void 2351 ppp_input_error(struct ppp_channel *chan, int code) 2352 { 2353 struct channel *pch = chan->ppp; 2354 struct sk_buff *skb; 2355 2356 if (!pch) 2357 return; 2358 2359 read_lock_bh(&pch->upl); 2360 if (pch->ppp) { 2361 skb = alloc_skb(0, GFP_ATOMIC); 2362 if (skb) { 2363 skb->len = 0; /* probably unnecessary */ 2364 skb->cb[0] = code; 2365 ppp_do_recv(pch->ppp, skb, pch); 2366 } 2367 } 2368 read_unlock_bh(&pch->upl); 2369 } 2370 2371 /* 2372 * We come in here to process a received frame. 2373 * The receive side of the ppp unit is locked. 2374 */ 2375 static void 2376 ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 2377 { 2378 /* note: a 0-length skb is used as an error indication */ 2379 if (skb->len > 0) { 2380 skb_checksum_complete_unset(skb); 2381 #ifdef CONFIG_PPP_MULTILINK 2382 /* XXX do channel-level decompression here */ 2383 if (PPP_PROTO(skb) == PPP_MP) 2384 ppp_receive_mp_frame(ppp, skb, pch); 2385 else 2386 #endif /* CONFIG_PPP_MULTILINK */ 2387 ppp_receive_nonmp_frame(ppp, skb); 2388 } else { 2389 kfree_skb(skb); 2390 ppp_receive_error(ppp); 2391 } 2392 } 2393 2394 static void 2395 ppp_receive_error(struct ppp *ppp) 2396 { 2397 ++ppp->dev->stats.rx_errors; 2398 if (ppp->vj) 2399 slhc_toss(ppp->vj); 2400 } 2401 2402 static void 2403 ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) 2404 { 2405 struct sk_buff *ns; 2406 int proto, len, npi; 2407 2408 /* 2409 * Decompress the frame, if compressed. 2410 * Note that some decompressors need to see uncompressed frames 2411 * that come in as well as compressed frames. 2412 */ 2413 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) && 2414 (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0) 2415 skb = ppp_decompress_frame(ppp, skb); 2416 2417 if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR) 2418 goto err; 2419 2420 /* At this point the "Protocol" field MUST be decompressed, either in 2421 * ppp_input(), ppp_decompress_frame() or in ppp_receive_mp_frame(). 2422 */ 2423 proto = PPP_PROTO(skb); 2424 switch (proto) { 2425 case PPP_VJC_COMP: 2426 /* decompress VJ compressed packets */ 2427 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) 2428 goto err; 2429 2430 if (skb_tailroom(skb) < 124 || skb_cloned(skb)) { 2431 /* copy to a new sk_buff with more tailroom */ 2432 ns = dev_alloc_skb(skb->len + 128); 2433 if (!ns) { 2434 netdev_err(ppp->dev, "PPP: no memory " 2435 "(VJ decomp)\n"); 2436 goto err; 2437 } 2438 skb_reserve(ns, 2); 2439 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); 2440 consume_skb(skb); 2441 skb = ns; 2442 } 2443 else 2444 skb->ip_summed = CHECKSUM_NONE; 2445 2446 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); 2447 if (len <= 0) { 2448 netdev_printk(KERN_DEBUG, ppp->dev, 2449 "PPP: VJ decompression error\n"); 2450 goto err; 2451 } 2452 len += 2; 2453 if (len > skb->len) 2454 skb_put(skb, len - skb->len); 2455 else if (len < skb->len) 2456 skb_trim(skb, len); 2457 proto = PPP_IP; 2458 break; 2459 2460 case PPP_VJC_UNCOMP: 2461 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) 2462 goto err; 2463 2464 /* Until we fix the decompressor need to make sure 2465 * data portion is linear. 2466 */ 2467 if (!pskb_may_pull(skb, skb->len)) 2468 goto err; 2469 2470 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { 2471 netdev_err(ppp->dev, "PPP: VJ uncompressed error\n"); 2472 goto err; 2473 } 2474 proto = PPP_IP; 2475 break; 2476 2477 case PPP_CCP: 2478 ppp_ccp_peek(ppp, skb, 1); 2479 break; 2480 } 2481 2482 ++ppp->stats64.rx_packets; 2483 ppp->stats64.rx_bytes += skb->len - 2; 2484 2485 npi = proto_to_npindex(proto); 2486 if (npi < 0) { 2487 /* control or unknown frame - pass it to pppd */ 2488 skb_queue_tail(&ppp->file.rq, skb); 2489 /* limit queue length by dropping old frames */ 2490 while (ppp->file.rq.qlen > PPP_MAX_RQLEN && 2491 (skb = skb_dequeue(&ppp->file.rq))) 2492 kfree_skb(skb); 2493 /* wake up any process polling or blocking on read */ 2494 wake_up_interruptible(&ppp->file.rwait); 2495 2496 } else { 2497 /* network protocol frame - give it to the kernel */ 2498 2499 #ifdef CONFIG_PPP_FILTER 2500 if (ppp->pass_filter || ppp->active_filter) { 2501 if (skb_unclone(skb, GFP_ATOMIC)) 2502 goto err; 2503 /* Check if the packet passes the pass and active filters. 2504 * See comment for PPP_FILTER_INBOUND_TAG above. 2505 */ 2506 *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG); 2507 if (ppp->pass_filter && 2508 bpf_prog_run(ppp->pass_filter, skb) == 0) { 2509 if (ppp->debug & 1) 2510 netdev_printk(KERN_DEBUG, ppp->dev, 2511 "PPP: inbound frame " 2512 "not passed\n"); 2513 kfree_skb(skb); 2514 return; 2515 } 2516 if (!(ppp->active_filter && 2517 bpf_prog_run(ppp->active_filter, skb) == 0)) 2518 ppp->last_recv = jiffies; 2519 __skb_pull(skb, 2); 2520 } else 2521 #endif /* CONFIG_PPP_FILTER */ 2522 ppp->last_recv = jiffies; 2523 2524 if ((ppp->dev->flags & IFF_UP) == 0 || 2525 ppp->npmode[npi] != NPMODE_PASS) { 2526 kfree_skb(skb); 2527 } else { 2528 /* chop off protocol */ 2529 skb_pull_rcsum(skb, 2); 2530 skb->dev = ppp->dev; 2531 skb->protocol = htons(npindex_to_ethertype[npi]); 2532 skb_reset_mac_header(skb); 2533 skb_scrub_packet(skb, !net_eq(ppp->ppp_net, 2534 dev_net(ppp->dev))); 2535 netif_rx(skb); 2536 } 2537 } 2538 return; 2539 2540 err: 2541 kfree_skb(skb); 2542 ppp_receive_error(ppp); 2543 } 2544 2545 static struct sk_buff * 2546 ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) 2547 { 2548 int proto = PPP_PROTO(skb); 2549 struct sk_buff *ns; 2550 int len; 2551 2552 /* Until we fix all the decompressor's need to make sure 2553 * data portion is linear. 2554 */ 2555 if (!pskb_may_pull(skb, skb->len)) 2556 goto err; 2557 2558 if (proto == PPP_COMP) { 2559 int obuff_size; 2560 2561 switch(ppp->rcomp->compress_proto) { 2562 case CI_MPPE: 2563 obuff_size = ppp->mru + PPP_HDRLEN + 1; 2564 break; 2565 default: 2566 obuff_size = ppp->mru + PPP_HDRLEN; 2567 break; 2568 } 2569 2570 ns = dev_alloc_skb(obuff_size); 2571 if (!ns) { 2572 netdev_err(ppp->dev, "ppp_decompress_frame: " 2573 "no memory\n"); 2574 goto err; 2575 } 2576 /* the decompressor still expects the A/C bytes in the hdr */ 2577 len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, 2578 skb->len + 2, ns->data, obuff_size); 2579 if (len < 0) { 2580 /* Pass the compressed frame to pppd as an 2581 error indication. */ 2582 if (len == DECOMP_FATALERROR) 2583 ppp->rstate |= SC_DC_FERROR; 2584 kfree_skb(ns); 2585 goto err; 2586 } 2587 2588 consume_skb(skb); 2589 skb = ns; 2590 skb_put(skb, len); 2591 skb_pull(skb, 2); /* pull off the A/C bytes */ 2592 2593 /* Don't call __ppp_decompress_proto() here, but instead rely on 2594 * corresponding algo (mppe/bsd/deflate) to decompress it. 2595 */ 2596 } else { 2597 /* Uncompressed frame - pass to decompressor so it 2598 can update its dictionary if necessary. */ 2599 if (ppp->rcomp->incomp) 2600 ppp->rcomp->incomp(ppp->rc_state, skb->data - 2, 2601 skb->len + 2); 2602 } 2603 2604 return skb; 2605 2606 err: 2607 ppp->rstate |= SC_DC_ERROR; 2608 ppp_receive_error(ppp); 2609 return skb; 2610 } 2611 2612 #ifdef CONFIG_PPP_MULTILINK 2613 /* 2614 * Receive a multilink frame. 2615 * We put it on the reconstruction queue and then pull off 2616 * as many completed frames as we can. 2617 */ 2618 static void 2619 ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 2620 { 2621 u32 mask, seq; 2622 struct channel *ch; 2623 int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 2624 2625 if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0) 2626 goto err; /* no good, throw it away */ 2627 2628 /* Decode sequence number and begin/end bits */ 2629 if (ppp->flags & SC_MP_SHORTSEQ) { 2630 seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3]; 2631 mask = 0xfff; 2632 } else { 2633 seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5]; 2634 mask = 0xffffff; 2635 } 2636 PPP_MP_CB(skb)->BEbits = skb->data[2]; 2637 skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */ 2638 2639 /* 2640 * Do protocol ID decompression on the first fragment of each packet. 2641 * We have to do that here, because ppp_receive_nonmp_frame() expects 2642 * decompressed protocol field. 2643 */ 2644 if (PPP_MP_CB(skb)->BEbits & B) 2645 __ppp_decompress_proto(skb); 2646 2647 /* 2648 * Expand sequence number to 32 bits, making it as close 2649 * as possible to ppp->minseq. 2650 */ 2651 seq |= ppp->minseq & ~mask; 2652 if ((int)(ppp->minseq - seq) > (int)(mask >> 1)) 2653 seq += mask + 1; 2654 else if ((int)(seq - ppp->minseq) > (int)(mask >> 1)) 2655 seq -= mask + 1; /* should never happen */ 2656 PPP_MP_CB(skb)->sequence = seq; 2657 pch->lastseq = seq; 2658 2659 /* 2660 * If this packet comes before the next one we were expecting, 2661 * drop it. 2662 */ 2663 if (seq_before(seq, ppp->nextseq)) { 2664 kfree_skb(skb); 2665 ++ppp->dev->stats.rx_dropped; 2666 ppp_receive_error(ppp); 2667 return; 2668 } 2669 2670 /* 2671 * Reevaluate minseq, the minimum over all channels of the 2672 * last sequence number received on each channel. Because of 2673 * the increasing sequence number rule, we know that any fragment 2674 * before `minseq' which hasn't arrived is never going to arrive. 2675 * The list of channels can't change because we have the receive 2676 * side of the ppp unit locked. 2677 */ 2678 list_for_each_entry(ch, &ppp->channels, clist) { 2679 if (seq_before(ch->lastseq, seq)) 2680 seq = ch->lastseq; 2681 } 2682 if (seq_before(ppp->minseq, seq)) 2683 ppp->minseq = seq; 2684 2685 /* Put the fragment on the reconstruction queue */ 2686 ppp_mp_insert(ppp, skb); 2687 2688 /* If the queue is getting long, don't wait any longer for packets 2689 before the start of the queue. */ 2690 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { 2691 struct sk_buff *mskb = skb_peek(&ppp->mrq); 2692 if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence)) 2693 ppp->minseq = PPP_MP_CB(mskb)->sequence; 2694 } 2695 2696 /* Pull completed packets off the queue and receive them. */ 2697 while ((skb = ppp_mp_reconstruct(ppp))) { 2698 if (pskb_may_pull(skb, 2)) 2699 ppp_receive_nonmp_frame(ppp, skb); 2700 else { 2701 ++ppp->dev->stats.rx_length_errors; 2702 kfree_skb(skb); 2703 ppp_receive_error(ppp); 2704 } 2705 } 2706 2707 return; 2708 2709 err: 2710 kfree_skb(skb); 2711 ppp_receive_error(ppp); 2712 } 2713 2714 /* 2715 * Insert a fragment on the MP reconstruction queue. 2716 * The queue is ordered by increasing sequence number. 2717 */ 2718 static void 2719 ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb) 2720 { 2721 struct sk_buff *p; 2722 struct sk_buff_head *list = &ppp->mrq; 2723 u32 seq = PPP_MP_CB(skb)->sequence; 2724 2725 /* N.B. we don't need to lock the list lock because we have the 2726 ppp unit receive-side lock. */ 2727 skb_queue_walk(list, p) { 2728 if (seq_before(seq, PPP_MP_CB(p)->sequence)) 2729 break; 2730 } 2731 __skb_queue_before(list, p, skb); 2732 } 2733 2734 /* 2735 * Reconstruct a packet from the MP fragment queue. 2736 * We go through increasing sequence numbers until we find a 2737 * complete packet, or we get to the sequence number for a fragment 2738 * which hasn't arrived but might still do so. 2739 */ 2740 static struct sk_buff * 2741 ppp_mp_reconstruct(struct ppp *ppp) 2742 { 2743 u32 seq = ppp->nextseq; 2744 u32 minseq = ppp->minseq; 2745 struct sk_buff_head *list = &ppp->mrq; 2746 struct sk_buff *p, *tmp; 2747 struct sk_buff *head, *tail; 2748 struct sk_buff *skb = NULL; 2749 int lost = 0, len = 0; 2750 2751 if (ppp->mrru == 0) /* do nothing until mrru is set */ 2752 return NULL; 2753 head = __skb_peek(list); 2754 tail = NULL; 2755 skb_queue_walk_safe(list, p, tmp) { 2756 again: 2757 if (seq_before(PPP_MP_CB(p)->sequence, seq)) { 2758 /* this can't happen, anyway ignore the skb */ 2759 netdev_err(ppp->dev, "ppp_mp_reconstruct bad " 2760 "seq %u < %u\n", 2761 PPP_MP_CB(p)->sequence, seq); 2762 __skb_unlink(p, list); 2763 kfree_skb(p); 2764 continue; 2765 } 2766 if (PPP_MP_CB(p)->sequence != seq) { 2767 u32 oldseq; 2768 /* Fragment `seq' is missing. If it is after 2769 minseq, it might arrive later, so stop here. */ 2770 if (seq_after(seq, minseq)) 2771 break; 2772 /* Fragment `seq' is lost, keep going. */ 2773 lost = 1; 2774 oldseq = seq; 2775 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? 2776 minseq + 1: PPP_MP_CB(p)->sequence; 2777 2778 if (ppp->debug & 1) 2779 netdev_printk(KERN_DEBUG, ppp->dev, 2780 "lost frag %u..%u\n", 2781 oldseq, seq-1); 2782 2783 goto again; 2784 } 2785 2786 /* 2787 * At this point we know that all the fragments from 2788 * ppp->nextseq to seq are either present or lost. 2789 * Also, there are no complete packets in the queue 2790 * that have no missing fragments and end before this 2791 * fragment. 2792 */ 2793 2794 /* B bit set indicates this fragment starts a packet */ 2795 if (PPP_MP_CB(p)->BEbits & B) { 2796 head = p; 2797 lost = 0; 2798 len = 0; 2799 } 2800 2801 len += p->len; 2802 2803 /* Got a complete packet yet? */ 2804 if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) && 2805 (PPP_MP_CB(head)->BEbits & B)) { 2806 if (len > ppp->mrru + 2) { 2807 ++ppp->dev->stats.rx_length_errors; 2808 netdev_printk(KERN_DEBUG, ppp->dev, 2809 "PPP: reconstructed packet" 2810 " is too long (%d)\n", len); 2811 } else { 2812 tail = p; 2813 break; 2814 } 2815 ppp->nextseq = seq + 1; 2816 } 2817 2818 /* 2819 * If this is the ending fragment of a packet, 2820 * and we haven't found a complete valid packet yet, 2821 * we can discard up to and including this fragment. 2822 */ 2823 if (PPP_MP_CB(p)->BEbits & E) { 2824 struct sk_buff *tmp2; 2825 2826 skb_queue_reverse_walk_from_safe(list, p, tmp2) { 2827 if (ppp->debug & 1) 2828 netdev_printk(KERN_DEBUG, ppp->dev, 2829 "discarding frag %u\n", 2830 PPP_MP_CB(p)->sequence); 2831 __skb_unlink(p, list); 2832 kfree_skb(p); 2833 } 2834 head = skb_peek(list); 2835 if (!head) 2836 break; 2837 } 2838 ++seq; 2839 } 2840 2841 /* If we have a complete packet, copy it all into one skb. */ 2842 if (tail != NULL) { 2843 /* If we have discarded any fragments, 2844 signal a receive error. */ 2845 if (PPP_MP_CB(head)->sequence != ppp->nextseq) { 2846 skb_queue_walk_safe(list, p, tmp) { 2847 if (p == head) 2848 break; 2849 if (ppp->debug & 1) 2850 netdev_printk(KERN_DEBUG, ppp->dev, 2851 "discarding frag %u\n", 2852 PPP_MP_CB(p)->sequence); 2853 __skb_unlink(p, list); 2854 kfree_skb(p); 2855 } 2856 2857 if (ppp->debug & 1) 2858 netdev_printk(KERN_DEBUG, ppp->dev, 2859 " missed pkts %u..%u\n", 2860 ppp->nextseq, 2861 PPP_MP_CB(head)->sequence-1); 2862 ++ppp->dev->stats.rx_dropped; 2863 ppp_receive_error(ppp); 2864 } 2865 2866 skb = head; 2867 if (head != tail) { 2868 struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list; 2869 p = skb_queue_next(list, head); 2870 __skb_unlink(skb, list); 2871 skb_queue_walk_from_safe(list, p, tmp) { 2872 __skb_unlink(p, list); 2873 *fragpp = p; 2874 p->next = NULL; 2875 fragpp = &p->next; 2876 2877 skb->len += p->len; 2878 skb->data_len += p->len; 2879 skb->truesize += p->truesize; 2880 2881 if (p == tail) 2882 break; 2883 } 2884 } else { 2885 __skb_unlink(skb, list); 2886 } 2887 2888 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; 2889 } 2890 2891 return skb; 2892 } 2893 #endif /* CONFIG_PPP_MULTILINK */ 2894 2895 /* 2896 * Channel interface. 2897 */ 2898 2899 /* Create a new, unattached ppp channel. */ 2900 int ppp_register_channel(struct ppp_channel *chan) 2901 { 2902 return ppp_register_net_channel(current->nsproxy->net_ns, chan); 2903 } 2904 2905 /* Create a new, unattached ppp channel for specified net. */ 2906 int ppp_register_net_channel(struct net *net, struct ppp_channel *chan) 2907 { 2908 struct channel *pch; 2909 struct ppp_net *pn; 2910 2911 pch = kzalloc(sizeof(struct channel), GFP_KERNEL); 2912 if (!pch) 2913 return -ENOMEM; 2914 2915 pn = ppp_pernet(net); 2916 2917 pch->ppp = NULL; 2918 pch->chan = chan; 2919 pch->chan_net = get_net_track(net, &pch->ns_tracker, GFP_KERNEL); 2920 chan->ppp = pch; 2921 init_ppp_file(&pch->file, CHANNEL); 2922 pch->file.hdrlen = chan->hdrlen; 2923 #ifdef CONFIG_PPP_MULTILINK 2924 pch->lastseq = -1; 2925 #endif /* CONFIG_PPP_MULTILINK */ 2926 init_rwsem(&pch->chan_sem); 2927 spin_lock_init(&pch->downl); 2928 rwlock_init(&pch->upl); 2929 2930 spin_lock_bh(&pn->all_channels_lock); 2931 pch->file.index = ++pn->last_channel_index; 2932 list_add(&pch->list, &pn->new_channels); 2933 atomic_inc(&channel_count); 2934 spin_unlock_bh(&pn->all_channels_lock); 2935 2936 return 0; 2937 } 2938 2939 /* 2940 * Return the index of a channel. 2941 */ 2942 int ppp_channel_index(struct ppp_channel *chan) 2943 { 2944 struct channel *pch = chan->ppp; 2945 2946 if (pch) 2947 return pch->file.index; 2948 return -1; 2949 } 2950 2951 /* 2952 * Return the PPP unit number to which a channel is connected. 2953 */ 2954 int ppp_unit_number(struct ppp_channel *chan) 2955 { 2956 struct channel *pch = chan->ppp; 2957 int unit = -1; 2958 2959 if (pch) { 2960 read_lock_bh(&pch->upl); 2961 if (pch->ppp) 2962 unit = pch->ppp->file.index; 2963 read_unlock_bh(&pch->upl); 2964 } 2965 return unit; 2966 } 2967 2968 /* 2969 * Return the PPP device interface name of a channel. 2970 */ 2971 char *ppp_dev_name(struct ppp_channel *chan) 2972 { 2973 struct channel *pch = chan->ppp; 2974 char *name = NULL; 2975 2976 if (pch) { 2977 read_lock_bh(&pch->upl); 2978 if (pch->ppp && pch->ppp->dev) 2979 name = pch->ppp->dev->name; 2980 read_unlock_bh(&pch->upl); 2981 } 2982 return name; 2983 } 2984 2985 2986 /* 2987 * Disconnect a channel from the generic layer. 2988 * This must be called in process context. 2989 */ 2990 void 2991 ppp_unregister_channel(struct ppp_channel *chan) 2992 { 2993 struct channel *pch = chan->ppp; 2994 struct ppp_net *pn; 2995 2996 if (!pch) 2997 return; /* should never happen */ 2998 2999 chan->ppp = NULL; 3000 3001 /* 3002 * This ensures that we have returned from any calls into 3003 * the channel's start_xmit or ioctl routine before we proceed. 3004 */ 3005 down_write(&pch->chan_sem); 3006 spin_lock_bh(&pch->downl); 3007 pch->chan = NULL; 3008 spin_unlock_bh(&pch->downl); 3009 up_write(&pch->chan_sem); 3010 ppp_disconnect_channel(pch); 3011 3012 pn = ppp_pernet(pch->chan_net); 3013 spin_lock_bh(&pn->all_channels_lock); 3014 list_del(&pch->list); 3015 spin_unlock_bh(&pn->all_channels_lock); 3016 3017 ppp_unbridge_channels(pch); 3018 3019 pch->file.dead = 1; 3020 wake_up_interruptible(&pch->file.rwait); 3021 3022 if (refcount_dec_and_test(&pch->file.refcnt)) 3023 ppp_destroy_channel(pch); 3024 } 3025 3026 /* 3027 * Callback from a channel when it can accept more to transmit. 3028 * This should be called at BH/softirq level, not interrupt level. 3029 */ 3030 void 3031 ppp_output_wakeup(struct ppp_channel *chan) 3032 { 3033 struct channel *pch = chan->ppp; 3034 3035 if (!pch) 3036 return; 3037 ppp_channel_push(pch); 3038 } 3039 3040 /* 3041 * Compression control. 3042 */ 3043 3044 /* Process the PPPIOCSCOMPRESS ioctl. */ 3045 static int 3046 ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data) 3047 { 3048 int err = -EFAULT; 3049 struct compressor *cp, *ocomp; 3050 void *state, *ostate; 3051 unsigned char ccp_option[CCP_MAX_OPTION_LENGTH]; 3052 3053 if (data->length > CCP_MAX_OPTION_LENGTH) 3054 goto out; 3055 if (copy_from_user(ccp_option, data->ptr, data->length)) 3056 goto out; 3057 3058 err = -EINVAL; 3059 if (data->length < 2 || ccp_option[1] < 2 || ccp_option[1] > data->length) 3060 goto out; 3061 3062 cp = try_then_request_module( 3063 find_compressor(ccp_option[0]), 3064 "ppp-compress-%d", ccp_option[0]); 3065 if (!cp) 3066 goto out; 3067 3068 err = -ENOBUFS; 3069 if (data->transmit) { 3070 state = cp->comp_alloc(ccp_option, data->length); 3071 if (state) { 3072 ppp_xmit_lock(ppp); 3073 ppp->xstate &= ~SC_COMP_RUN; 3074 ocomp = ppp->xcomp; 3075 ostate = ppp->xc_state; 3076 ppp->xcomp = cp; 3077 ppp->xc_state = state; 3078 ppp_xmit_unlock(ppp); 3079 if (ostate) { 3080 ocomp->comp_free(ostate); 3081 module_put(ocomp->owner); 3082 } 3083 err = 0; 3084 } else 3085 module_put(cp->owner); 3086 3087 } else { 3088 state = cp->decomp_alloc(ccp_option, data->length); 3089 if (state) { 3090 ppp_recv_lock(ppp); 3091 ppp->rstate &= ~SC_DECOMP_RUN; 3092 ocomp = ppp->rcomp; 3093 ostate = ppp->rc_state; 3094 ppp->rcomp = cp; 3095 ppp->rc_state = state; 3096 ppp_recv_unlock(ppp); 3097 if (ostate) { 3098 ocomp->decomp_free(ostate); 3099 module_put(ocomp->owner); 3100 } 3101 err = 0; 3102 } else 3103 module_put(cp->owner); 3104 } 3105 3106 out: 3107 return err; 3108 } 3109 3110 /* 3111 * Look at a CCP packet and update our state accordingly. 3112 * We assume the caller has the xmit or recv path locked. 3113 */ 3114 static void 3115 ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) 3116 { 3117 unsigned char *dp; 3118 int len; 3119 3120 if (!pskb_may_pull(skb, CCP_HDRLEN + 2)) 3121 return; /* no header */ 3122 dp = skb->data + 2; 3123 3124 switch (CCP_CODE(dp)) { 3125 case CCP_CONFREQ: 3126 3127 /* A ConfReq starts negotiation of compression 3128 * in one direction of transmission, 3129 * and hence brings it down...but which way? 3130 * 3131 * Remember: 3132 * A ConfReq indicates what the sender would like to receive 3133 */ 3134 if(inbound) 3135 /* He is proposing what I should send */ 3136 ppp->xstate &= ~SC_COMP_RUN; 3137 else 3138 /* I am proposing to what he should send */ 3139 ppp->rstate &= ~SC_DECOMP_RUN; 3140 3141 break; 3142 3143 case CCP_TERMREQ: 3144 case CCP_TERMACK: 3145 /* 3146 * CCP is going down, both directions of transmission 3147 */ 3148 ppp->rstate &= ~SC_DECOMP_RUN; 3149 ppp->xstate &= ~SC_COMP_RUN; 3150 break; 3151 3152 case CCP_CONFACK: 3153 if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN) 3154 break; 3155 len = CCP_LENGTH(dp); 3156 if (!pskb_may_pull(skb, len + 2)) 3157 return; /* too short */ 3158 dp += CCP_HDRLEN; 3159 len -= CCP_HDRLEN; 3160 if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp)) 3161 break; 3162 if (inbound) { 3163 /* we will start receiving compressed packets */ 3164 if (!ppp->rc_state) 3165 break; 3166 if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len, 3167 ppp->file.index, 0, ppp->mru, ppp->debug)) { 3168 ppp->rstate |= SC_DECOMP_RUN; 3169 ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR); 3170 } 3171 } else { 3172 /* we will soon start sending compressed packets */ 3173 if (!ppp->xc_state) 3174 break; 3175 if (ppp->xcomp->comp_init(ppp->xc_state, dp, len, 3176 ppp->file.index, 0, ppp->debug)) 3177 ppp->xstate |= SC_COMP_RUN; 3178 } 3179 break; 3180 3181 case CCP_RESETACK: 3182 /* reset the [de]compressor */ 3183 if ((ppp->flags & SC_CCP_UP) == 0) 3184 break; 3185 if (inbound) { 3186 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) { 3187 ppp->rcomp->decomp_reset(ppp->rc_state); 3188 ppp->rstate &= ~SC_DC_ERROR; 3189 } 3190 } else { 3191 if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN)) 3192 ppp->xcomp->comp_reset(ppp->xc_state); 3193 } 3194 break; 3195 } 3196 } 3197 3198 /* Free up compression resources. */ 3199 static void 3200 ppp_ccp_closed(struct ppp *ppp) 3201 { 3202 void *xstate, *rstate; 3203 struct compressor *xcomp, *rcomp; 3204 3205 ppp_lock(ppp); 3206 ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP); 3207 ppp->xstate = 0; 3208 xcomp = ppp->xcomp; 3209 xstate = ppp->xc_state; 3210 ppp->xc_state = NULL; 3211 ppp->rstate = 0; 3212 rcomp = ppp->rcomp; 3213 rstate = ppp->rc_state; 3214 ppp->rc_state = NULL; 3215 ppp_unlock(ppp); 3216 3217 if (xstate) { 3218 xcomp->comp_free(xstate); 3219 module_put(xcomp->owner); 3220 } 3221 if (rstate) { 3222 rcomp->decomp_free(rstate); 3223 module_put(rcomp->owner); 3224 } 3225 } 3226 3227 /* List of compressors. */ 3228 static LIST_HEAD(compressor_list); 3229 static DEFINE_SPINLOCK(compressor_list_lock); 3230 3231 struct compressor_entry { 3232 struct list_head list; 3233 struct compressor *comp; 3234 }; 3235 3236 static struct compressor_entry * 3237 find_comp_entry(int proto) 3238 { 3239 struct compressor_entry *ce; 3240 3241 list_for_each_entry(ce, &compressor_list, list) { 3242 if (ce->comp->compress_proto == proto) 3243 return ce; 3244 } 3245 return NULL; 3246 } 3247 3248 /* Register a compressor */ 3249 int 3250 ppp_register_compressor(struct compressor *cp) 3251 { 3252 struct compressor_entry *ce; 3253 int ret; 3254 spin_lock(&compressor_list_lock); 3255 ret = -EEXIST; 3256 if (find_comp_entry(cp->compress_proto)) 3257 goto out; 3258 ret = -ENOMEM; 3259 ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); 3260 if (!ce) 3261 goto out; 3262 ret = 0; 3263 ce->comp = cp; 3264 list_add(&ce->list, &compressor_list); 3265 out: 3266 spin_unlock(&compressor_list_lock); 3267 return ret; 3268 } 3269 3270 /* Unregister a compressor */ 3271 void 3272 ppp_unregister_compressor(struct compressor *cp) 3273 { 3274 struct compressor_entry *ce; 3275 3276 spin_lock(&compressor_list_lock); 3277 ce = find_comp_entry(cp->compress_proto); 3278 if (ce && ce->comp == cp) { 3279 list_del(&ce->list); 3280 kfree(ce); 3281 } 3282 spin_unlock(&compressor_list_lock); 3283 } 3284 3285 /* Find a compressor. */ 3286 static struct compressor * 3287 find_compressor(int type) 3288 { 3289 struct compressor_entry *ce; 3290 struct compressor *cp = NULL; 3291 3292 spin_lock(&compressor_list_lock); 3293 ce = find_comp_entry(type); 3294 if (ce) { 3295 cp = ce->comp; 3296 if (!try_module_get(cp->owner)) 3297 cp = NULL; 3298 } 3299 spin_unlock(&compressor_list_lock); 3300 return cp; 3301 } 3302 3303 /* 3304 * Miscelleneous stuff. 3305 */ 3306 3307 static void 3308 ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) 3309 { 3310 struct slcompress *vj = ppp->vj; 3311 3312 memset(st, 0, sizeof(*st)); 3313 st->p.ppp_ipackets = ppp->stats64.rx_packets; 3314 st->p.ppp_ierrors = ppp->dev->stats.rx_errors; 3315 st->p.ppp_ibytes = ppp->stats64.rx_bytes; 3316 st->p.ppp_opackets = ppp->stats64.tx_packets; 3317 st->p.ppp_oerrors = ppp->dev->stats.tx_errors; 3318 st->p.ppp_obytes = ppp->stats64.tx_bytes; 3319 if (!vj) 3320 return; 3321 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; 3322 st->vj.vjs_compressed = vj->sls_o_compressed; 3323 st->vj.vjs_searches = vj->sls_o_searches; 3324 st->vj.vjs_misses = vj->sls_o_misses; 3325 st->vj.vjs_errorin = vj->sls_i_error; 3326 st->vj.vjs_tossed = vj->sls_i_tossed; 3327 st->vj.vjs_uncompressedin = vj->sls_i_uncompressed; 3328 st->vj.vjs_compressedin = vj->sls_i_compressed; 3329 } 3330 3331 /* 3332 * Stuff for handling the lists of ppp units and channels 3333 * and for initialization. 3334 */ 3335 3336 /* 3337 * Create a new ppp interface unit. Fails if it can't allocate memory 3338 * or if there is already a unit with the requested number. 3339 * unit == -1 means allocate a new number. 3340 */ 3341 static int ppp_create_interface(struct net *net, struct file *file, int *unit) 3342 { 3343 struct ppp_config conf = { 3344 .file = file, 3345 .unit = *unit, 3346 .ifname_is_set = false, 3347 }; 3348 struct net_device *dev; 3349 struct ppp *ppp; 3350 int err; 3351 3352 dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup); 3353 if (!dev) { 3354 err = -ENOMEM; 3355 goto err; 3356 } 3357 dev_net_set(dev, net); 3358 dev->rtnl_link_ops = &ppp_link_ops; 3359 3360 rtnl_lock(); 3361 3362 err = ppp_dev_configure(net, dev, &conf); 3363 if (err < 0) 3364 goto err_dev; 3365 ppp = netdev_priv(dev); 3366 *unit = ppp->file.index; 3367 3368 rtnl_unlock(); 3369 3370 return 0; 3371 3372 err_dev: 3373 rtnl_unlock(); 3374 free_netdev(dev); 3375 err: 3376 return err; 3377 } 3378 3379 /* 3380 * Initialize a ppp_file structure. 3381 */ 3382 static void 3383 init_ppp_file(struct ppp_file *pf, int kind) 3384 { 3385 pf->kind = kind; 3386 skb_queue_head_init(&pf->xq); 3387 skb_queue_head_init(&pf->rq); 3388 refcount_set(&pf->refcnt, 1); 3389 init_waitqueue_head(&pf->rwait); 3390 } 3391 3392 /* 3393 * Free the memory used by a ppp unit. This is only called once 3394 * there are no channels connected to the unit and no file structs 3395 * that reference the unit. 3396 */ 3397 static void ppp_destroy_interface(struct ppp *ppp) 3398 { 3399 atomic_dec(&ppp_unit_count); 3400 3401 if (!ppp->file.dead || ppp->n_channels) { 3402 /* "can't happen" */ 3403 netdev_err(ppp->dev, "ppp: destroying ppp struct %p " 3404 "but dead=%d n_channels=%d !\n", 3405 ppp, ppp->file.dead, ppp->n_channels); 3406 return; 3407 } 3408 3409 ppp_ccp_closed(ppp); 3410 if (ppp->vj) { 3411 slhc_free(ppp->vj); 3412 ppp->vj = NULL; 3413 } 3414 skb_queue_purge(&ppp->file.xq); 3415 skb_queue_purge(&ppp->file.rq); 3416 #ifdef CONFIG_PPP_MULTILINK 3417 skb_queue_purge(&ppp->mrq); 3418 #endif /* CONFIG_PPP_MULTILINK */ 3419 #ifdef CONFIG_PPP_FILTER 3420 if (ppp->pass_filter) { 3421 bpf_prog_destroy(ppp->pass_filter); 3422 ppp->pass_filter = NULL; 3423 } 3424 3425 if (ppp->active_filter) { 3426 bpf_prog_destroy(ppp->active_filter); 3427 ppp->active_filter = NULL; 3428 } 3429 #endif /* CONFIG_PPP_FILTER */ 3430 3431 kfree_skb(ppp->xmit_pending); 3432 free_percpu(ppp->xmit_recursion); 3433 3434 free_netdev(ppp->dev); 3435 } 3436 3437 /* 3438 * Locate an existing ppp unit. 3439 * The caller should have locked the all_ppp_mutex. 3440 */ 3441 static struct ppp * 3442 ppp_find_unit(struct ppp_net *pn, int unit) 3443 { 3444 return unit_find(&pn->units_idr, unit); 3445 } 3446 3447 /* 3448 * Locate an existing ppp channel. 3449 * The caller should have locked the all_channels_lock. 3450 * First we look in the new_channels list, then in the 3451 * all_channels list. If found in the new_channels list, 3452 * we move it to the all_channels list. This is for speed 3453 * when we have a lot of channels in use. 3454 */ 3455 static struct channel * 3456 ppp_find_channel(struct ppp_net *pn, int unit) 3457 { 3458 struct channel *pch; 3459 3460 list_for_each_entry(pch, &pn->new_channels, list) { 3461 if (pch->file.index == unit) { 3462 list_move(&pch->list, &pn->all_channels); 3463 return pch; 3464 } 3465 } 3466 3467 list_for_each_entry(pch, &pn->all_channels, list) { 3468 if (pch->file.index == unit) 3469 return pch; 3470 } 3471 3472 return NULL; 3473 } 3474 3475 /* 3476 * Connect a PPP channel to a PPP interface unit. 3477 */ 3478 static int 3479 ppp_connect_channel(struct channel *pch, int unit) 3480 { 3481 struct ppp *ppp; 3482 struct ppp_net *pn; 3483 int ret = -ENXIO; 3484 int hdrlen; 3485 3486 pn = ppp_pernet(pch->chan_net); 3487 3488 mutex_lock(&pn->all_ppp_mutex); 3489 ppp = ppp_find_unit(pn, unit); 3490 if (!ppp) 3491 goto out; 3492 write_lock_bh(&pch->upl); 3493 ret = -EINVAL; 3494 if (pch->ppp || 3495 rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) 3496 goto outl; 3497 3498 ppp_lock(ppp); 3499 spin_lock_bh(&pch->downl); 3500 if (!pch->chan) { 3501 /* Don't connect unregistered channels */ 3502 spin_unlock_bh(&pch->downl); 3503 ppp_unlock(ppp); 3504 ret = -ENOTCONN; 3505 goto outl; 3506 } 3507 if (pch->chan->direct_xmit) 3508 ppp->dev->priv_flags |= IFF_NO_QUEUE; 3509 else 3510 ppp->dev->priv_flags &= ~IFF_NO_QUEUE; 3511 spin_unlock_bh(&pch->downl); 3512 if (pch->file.hdrlen > ppp->file.hdrlen) 3513 ppp->file.hdrlen = pch->file.hdrlen; 3514 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ 3515 if (hdrlen > ppp->dev->hard_header_len) 3516 ppp->dev->hard_header_len = hdrlen; 3517 list_add_tail(&pch->clist, &ppp->channels); 3518 ++ppp->n_channels; 3519 pch->ppp = ppp; 3520 refcount_inc(&ppp->file.refcnt); 3521 ppp_unlock(ppp); 3522 ret = 0; 3523 3524 outl: 3525 write_unlock_bh(&pch->upl); 3526 out: 3527 mutex_unlock(&pn->all_ppp_mutex); 3528 return ret; 3529 } 3530 3531 /* 3532 * Disconnect a channel from its ppp unit. 3533 */ 3534 static int 3535 ppp_disconnect_channel(struct channel *pch) 3536 { 3537 struct ppp *ppp; 3538 int err = -EINVAL; 3539 3540 write_lock_bh(&pch->upl); 3541 ppp = pch->ppp; 3542 pch->ppp = NULL; 3543 write_unlock_bh(&pch->upl); 3544 if (ppp) { 3545 /* remove it from the ppp unit's list */ 3546 ppp_lock(ppp); 3547 list_del(&pch->clist); 3548 if (--ppp->n_channels == 0) 3549 wake_up_interruptible(&ppp->file.rwait); 3550 ppp_unlock(ppp); 3551 if (refcount_dec_and_test(&ppp->file.refcnt)) 3552 ppp_destroy_interface(ppp); 3553 err = 0; 3554 } 3555 return err; 3556 } 3557 3558 /* 3559 * Free up the resources used by a ppp channel. 3560 */ 3561 static void ppp_destroy_channel(struct channel *pch) 3562 { 3563 put_net_track(pch->chan_net, &pch->ns_tracker); 3564 pch->chan_net = NULL; 3565 3566 atomic_dec(&channel_count); 3567 3568 if (!pch->file.dead) { 3569 /* "can't happen" */ 3570 pr_err("ppp: destroying undead channel %p !\n", pch); 3571 return; 3572 } 3573 skb_queue_purge(&pch->file.xq); 3574 skb_queue_purge(&pch->file.rq); 3575 kfree(pch); 3576 } 3577 3578 static void __exit ppp_cleanup(void) 3579 { 3580 /* should never happen */ 3581 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) 3582 pr_err("PPP: removing module but units remain!\n"); 3583 rtnl_link_unregister(&ppp_link_ops); 3584 unregister_chrdev(PPP_MAJOR, "ppp"); 3585 device_destroy(&ppp_class, MKDEV(PPP_MAJOR, 0)); 3586 class_unregister(&ppp_class); 3587 unregister_pernet_device(&ppp_net_ops); 3588 } 3589 3590 /* 3591 * Units handling. Caller must protect concurrent access 3592 * by holding all_ppp_mutex 3593 */ 3594 3595 /* associate pointer with specified number */ 3596 static int unit_set(struct idr *p, void *ptr, int n) 3597 { 3598 int unit; 3599 3600 unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL); 3601 if (unit == -ENOSPC) 3602 unit = -EINVAL; 3603 return unit; 3604 } 3605 3606 /* get new free unit number and associate pointer with it */ 3607 static int unit_get(struct idr *p, void *ptr, int min) 3608 { 3609 return idr_alloc(p, ptr, min, 0, GFP_KERNEL); 3610 } 3611 3612 /* put unit number back to a pool */ 3613 static void unit_put(struct idr *p, int n) 3614 { 3615 idr_remove(p, n); 3616 } 3617 3618 /* get pointer associated with the number */ 3619 static void *unit_find(struct idr *p, int n) 3620 { 3621 return idr_find(p, n); 3622 } 3623 3624 /* Module/initialization stuff */ 3625 3626 module_init(ppp_init); 3627 module_exit(ppp_cleanup); 3628 3629 EXPORT_SYMBOL(ppp_register_net_channel); 3630 EXPORT_SYMBOL(ppp_register_channel); 3631 EXPORT_SYMBOL(ppp_unregister_channel); 3632 EXPORT_SYMBOL(ppp_channel_index); 3633 EXPORT_SYMBOL(ppp_unit_number); 3634 EXPORT_SYMBOL(ppp_dev_name); 3635 EXPORT_SYMBOL(ppp_input); 3636 EXPORT_SYMBOL(ppp_input_error); 3637 EXPORT_SYMBOL(ppp_output_wakeup); 3638 EXPORT_SYMBOL(ppp_register_compressor); 3639 EXPORT_SYMBOL(ppp_unregister_compressor); 3640 MODULE_DESCRIPTION("Generic PPP layer driver"); 3641 MODULE_LICENSE("GPL"); 3642 MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); 3643 MODULE_ALIAS_RTNL_LINK("ppp"); 3644 MODULE_ALIAS("devname:ppp"); 3645