1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Point-to-Point Tunneling Protocol for Linux 4 * 5 * Authors: Dmitry Kozlov <xeb@mail.ru> 6 */ 7 8 #include <linux/string.h> 9 #include <linux/module.h> 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/errno.h> 13 #include <linux/netdevice.h> 14 #include <linux/net.h> 15 #include <linux/skbuff.h> 16 #include <linux/vmalloc.h> 17 #include <linux/init.h> 18 #include <linux/ppp_channel.h> 19 #include <linux/ppp_defs.h> 20 #include <linux/if_pppox.h> 21 #include <linux/ppp-ioctl.h> 22 #include <linux/notifier.h> 23 #include <linux/file.h> 24 #include <linux/in.h> 25 #include <linux/ip.h> 26 #include <linux/rcupdate.h> 27 #include <linux/spinlock.h> 28 29 #include <net/sock.h> 30 #include <net/protocol.h> 31 #include <net/ip.h> 32 #include <net/icmp.h> 33 #include <net/route.h> 34 #include <net/gre.h> 35 #include <net/pptp.h> 36 37 #include <linux/uaccess.h> 38 39 #define PPTP_DRIVER_VERSION "0.8.5" 40 41 #define MAX_CALLID 65535 42 43 static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1); 44 static struct pppox_sock __rcu **callid_sock; 45 46 static DEFINE_SPINLOCK(chan_lock); 47 48 static struct proto pptp_sk_proto __read_mostly; 49 static const struct ppp_channel_ops pptp_chan_ops; 50 static const struct proto_ops pptp_ops; 51 52 static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr) 53 { 54 struct pppox_sock *sock; 55 struct pptp_opt *opt; 56 57 rcu_read_lock(); 58 sock = rcu_dereference(callid_sock[call_id]); 59 if (sock) { 60 opt = &sock->proto.pptp; 61 if (opt->dst_addr.sin_addr.s_addr != s_addr) 62 sock = NULL; 63 else 64 sock_hold(sk_pppox(sock)); 65 } 66 rcu_read_unlock(); 67 68 return sock; 69 } 70 71 static int lookup_chan_dst(u16 call_id, __be32 d_addr) 72 { 73 struct pppox_sock *sock; 74 struct pptp_opt *opt; 75 int i; 76 77 rcu_read_lock(); 78 i = 1; 79 for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) { 80 sock = rcu_dereference(callid_sock[i]); 81 if (!sock) 82 continue; 83 opt = &sock->proto.pptp; 84 if (opt->dst_addr.call_id == call_id && 85 opt->dst_addr.sin_addr.s_addr == d_addr) 86 break; 87 } 88 rcu_read_unlock(); 89 90 return i < MAX_CALLID; 91 } 92 93 static int add_chan(struct pppox_sock *sock, 94 struct pptp_addr *sa) 95 { 96 static int call_id; 97 98 spin_lock(&chan_lock); 99 if (!sa->call_id) { 100 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1); 101 if (call_id == MAX_CALLID) { 102 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1); 103 if (call_id == MAX_CALLID) 104 goto out_err; 105 } 106 sa->call_id = call_id; 107 } else if (test_bit(sa->call_id, callid_bitmap)) { 108 goto out_err; 109 } 110 111 sock->proto.pptp.src_addr = *sa; 112 set_bit(sa->call_id, callid_bitmap); 113 rcu_assign_pointer(callid_sock[sa->call_id], sock); 114 spin_unlock(&chan_lock); 115 116 return 0; 117 118 out_err: 119 spin_unlock(&chan_lock); 120 return -1; 121 } 122 123 static void del_chan(struct pppox_sock *sock) 124 { 125 spin_lock(&chan_lock); 126 clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); 127 RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); 128 spin_unlock(&chan_lock); 129 } 130 131 static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) 132 { 133 struct sock *sk = (struct sock *) chan->private; 134 struct pppox_sock *po = pppox_sk(sk); 135 struct net *net = sock_net(sk); 136 struct pptp_opt *opt = &po->proto.pptp; 137 struct pptp_gre_header *hdr; 138 unsigned int header_len = sizeof(*hdr); 139 struct flowi4 fl4; 140 int islcp; 141 int len; 142 unsigned char *data; 143 __u32 seq_recv; 144 145 146 struct rtable *rt; 147 struct net_device *tdev; 148 struct iphdr *iph; 149 int max_headroom; 150 151 if (sk_pppox(po)->sk_state & PPPOX_DEAD) 152 goto tx_error; 153 154 rt = ip_route_output_ports(net, &fl4, NULL, 155 opt->dst_addr.sin_addr.s_addr, 156 opt->src_addr.sin_addr.s_addr, 157 0, 0, IPPROTO_GRE, 158 RT_TOS(0), sk->sk_bound_dev_if); 159 if (IS_ERR(rt)) 160 goto tx_error; 161 162 tdev = rt->dst.dev; 163 164 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2; 165 166 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) { 167 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 168 if (!new_skb) { 169 ip_rt_put(rt); 170 goto tx_error; 171 } 172 if (skb->sk) 173 skb_set_owner_w(new_skb, skb->sk); 174 consume_skb(skb); 175 skb = new_skb; 176 } 177 178 data = skb->data; 179 islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7; 180 181 /* compress protocol field */ 182 if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp) 183 skb_pull(skb, 1); 184 185 /* Put in the address/control bytes if necessary */ 186 if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) { 187 data = skb_push(skb, 2); 188 data[0] = PPP_ALLSTATIONS; 189 data[1] = PPP_UI; 190 } 191 192 len = skb->len; 193 194 seq_recv = opt->seq_recv; 195 196 if (opt->ack_sent == seq_recv) 197 header_len -= sizeof(hdr->ack); 198 199 /* Push down and install GRE header */ 200 skb_push(skb, header_len); 201 hdr = (struct pptp_gre_header *)(skb->data); 202 203 hdr->gre_hd.flags = GRE_KEY | GRE_VERSION_1 | GRE_SEQ; 204 hdr->gre_hd.protocol = GRE_PROTO_PPP; 205 hdr->call_id = htons(opt->dst_addr.call_id); 206 207 hdr->seq = htonl(++opt->seq_sent); 208 if (opt->ack_sent != seq_recv) { 209 /* send ack with this message */ 210 hdr->gre_hd.flags |= GRE_ACK; 211 hdr->ack = htonl(seq_recv); 212 opt->ack_sent = seq_recv; 213 } 214 hdr->payload_len = htons(len); 215 216 /* Push down and install the IP header. */ 217 218 skb_reset_transport_header(skb); 219 skb_push(skb, sizeof(*iph)); 220 skb_reset_network_header(skb); 221 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 222 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); 223 224 iph = ip_hdr(skb); 225 iph->version = 4; 226 iph->ihl = sizeof(struct iphdr) >> 2; 227 if (ip_dont_fragment(sk, &rt->dst)) 228 iph->frag_off = htons(IP_DF); 229 else 230 iph->frag_off = 0; 231 iph->protocol = IPPROTO_GRE; 232 iph->tos = 0; 233 iph->daddr = fl4.daddr; 234 iph->saddr = fl4.saddr; 235 iph->ttl = ip4_dst_hoplimit(&rt->dst); 236 iph->tot_len = htons(skb->len); 237 238 skb_dst_drop(skb); 239 skb_dst_set(skb, &rt->dst); 240 241 nf_reset_ct(skb); 242 243 skb->ip_summed = CHECKSUM_NONE; 244 ip_select_ident(net, skb, NULL); 245 ip_send_check(iph); 246 247 ip_local_out(net, skb->sk, skb); 248 return 1; 249 250 tx_error: 251 kfree_skb(skb); 252 return 1; 253 } 254 255 static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) 256 { 257 struct pppox_sock *po = pppox_sk(sk); 258 struct pptp_opt *opt = &po->proto.pptp; 259 int headersize, payload_len, seq; 260 __u8 *payload; 261 struct pptp_gre_header *header; 262 263 if (!(sk->sk_state & PPPOX_CONNECTED)) { 264 if (sock_queue_rcv_skb(sk, skb)) 265 goto drop; 266 return NET_RX_SUCCESS; 267 } 268 269 header = (struct pptp_gre_header *)(skb->data); 270 headersize = sizeof(*header); 271 272 /* test if acknowledgement present */ 273 if (GRE_IS_ACK(header->gre_hd.flags)) { 274 __u32 ack; 275 276 if (!pskb_may_pull(skb, headersize)) 277 goto drop; 278 header = (struct pptp_gre_header *)(skb->data); 279 280 /* ack in different place if S = 0 */ 281 ack = GRE_IS_SEQ(header->gre_hd.flags) ? header->ack : header->seq; 282 283 ack = ntohl(ack); 284 285 if (ack > opt->ack_recv) 286 opt->ack_recv = ack; 287 /* also handle sequence number wrap-around */ 288 if (WRAPPED(ack, opt->ack_recv)) 289 opt->ack_recv = ack; 290 } else { 291 headersize -= sizeof(header->ack); 292 } 293 /* test if payload present */ 294 if (!GRE_IS_SEQ(header->gre_hd.flags)) 295 goto drop; 296 297 payload_len = ntohs(header->payload_len); 298 seq = ntohl(header->seq); 299 300 /* check for incomplete packet (length smaller than expected) */ 301 if (!pskb_may_pull(skb, headersize + payload_len)) 302 goto drop; 303 304 payload = skb->data + headersize; 305 /* check for expected sequence number */ 306 if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) { 307 if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) && 308 (PPP_PROTOCOL(payload) == PPP_LCP) && 309 ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP))) 310 goto allow_packet; 311 } else { 312 opt->seq_recv = seq; 313 allow_packet: 314 skb_pull(skb, headersize); 315 316 if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) { 317 /* chop off address/control */ 318 if (skb->len < 3) 319 goto drop; 320 skb_pull(skb, 2); 321 } 322 323 skb->ip_summed = CHECKSUM_NONE; 324 skb_set_network_header(skb, skb->head-skb->data); 325 ppp_input(&po->chan, skb); 326 327 return NET_RX_SUCCESS; 328 } 329 drop: 330 kfree_skb(skb); 331 return NET_RX_DROP; 332 } 333 334 static int pptp_rcv(struct sk_buff *skb) 335 { 336 struct pppox_sock *po; 337 struct pptp_gre_header *header; 338 struct iphdr *iph; 339 340 if (skb->pkt_type != PACKET_HOST) 341 goto drop; 342 343 if (!pskb_may_pull(skb, 12)) 344 goto drop; 345 346 iph = ip_hdr(skb); 347 348 header = (struct pptp_gre_header *)skb->data; 349 350 if (header->gre_hd.protocol != GRE_PROTO_PPP || /* PPTP-GRE protocol for PPTP */ 351 GRE_IS_CSUM(header->gre_hd.flags) || /* flag CSUM should be clear */ 352 GRE_IS_ROUTING(header->gre_hd.flags) || /* flag ROUTING should be clear */ 353 !GRE_IS_KEY(header->gre_hd.flags) || /* flag KEY should be set */ 354 (header->gre_hd.flags & GRE_FLAGS)) /* flag Recursion Ctrl should be clear */ 355 /* if invalid, discard this packet */ 356 goto drop; 357 358 po = lookup_chan(htons(header->call_id), iph->saddr); 359 if (po) { 360 skb_dst_drop(skb); 361 nf_reset_ct(skb); 362 return sk_receive_skb(sk_pppox(po), skb, 0); 363 } 364 drop: 365 kfree_skb(skb); 366 return NET_RX_DROP; 367 } 368 369 static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, 370 int sockaddr_len) 371 { 372 struct sock *sk = sock->sk; 373 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; 374 struct pppox_sock *po = pppox_sk(sk); 375 int error = 0; 376 377 if (sockaddr_len < sizeof(struct sockaddr_pppox)) 378 return -EINVAL; 379 380 lock_sock(sk); 381 382 if (sk->sk_state & PPPOX_DEAD) { 383 error = -EALREADY; 384 goto out; 385 } 386 387 if (sk->sk_state & PPPOX_BOUND) { 388 error = -EBUSY; 389 goto out; 390 } 391 392 if (add_chan(po, &sp->sa_addr.pptp)) 393 error = -EBUSY; 394 else 395 sk->sk_state |= PPPOX_BOUND; 396 397 out: 398 release_sock(sk); 399 return error; 400 } 401 402 static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, 403 int sockaddr_len, int flags) 404 { 405 struct sock *sk = sock->sk; 406 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; 407 struct pppox_sock *po = pppox_sk(sk); 408 struct pptp_opt *opt = &po->proto.pptp; 409 struct rtable *rt; 410 struct flowi4 fl4; 411 int error = 0; 412 413 if (sockaddr_len < sizeof(struct sockaddr_pppox)) 414 return -EINVAL; 415 416 if (sp->sa_protocol != PX_PROTO_PPTP) 417 return -EINVAL; 418 419 if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr)) 420 return -EALREADY; 421 422 lock_sock(sk); 423 /* Check for already bound sockets */ 424 if (sk->sk_state & PPPOX_CONNECTED) { 425 error = -EBUSY; 426 goto end; 427 } 428 429 /* Check for already disconnected sockets, on attempts to disconnect */ 430 if (sk->sk_state & PPPOX_DEAD) { 431 error = -EALREADY; 432 goto end; 433 } 434 435 if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) { 436 error = -EINVAL; 437 goto end; 438 } 439 440 po->chan.private = sk; 441 po->chan.ops = &pptp_chan_ops; 442 443 rt = ip_route_output_ports(sock_net(sk), &fl4, sk, 444 opt->dst_addr.sin_addr.s_addr, 445 opt->src_addr.sin_addr.s_addr, 446 0, 0, 447 IPPROTO_GRE, RT_CONN_FLAGS(sk), 448 sk->sk_bound_dev_if); 449 if (IS_ERR(rt)) { 450 error = -EHOSTUNREACH; 451 goto end; 452 } 453 sk_setup_caps(sk, &rt->dst); 454 455 po->chan.mtu = dst_mtu(&rt->dst); 456 if (!po->chan.mtu) 457 po->chan.mtu = PPP_MRU; 458 po->chan.mtu -= PPTP_HEADER_OVERHEAD; 459 460 po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); 461 error = ppp_register_channel(&po->chan); 462 if (error) { 463 pr_err("PPTP: failed to register PPP channel (%d)\n", error); 464 goto end; 465 } 466 467 opt->dst_addr = sp->sa_addr.pptp; 468 sk->sk_state |= PPPOX_CONNECTED; 469 470 end: 471 release_sock(sk); 472 return error; 473 } 474 475 static int pptp_getname(struct socket *sock, struct sockaddr *uaddr, 476 int peer) 477 { 478 int len = sizeof(struct sockaddr_pppox); 479 struct sockaddr_pppox sp; 480 481 memset(&sp.sa_addr, 0, sizeof(sp.sa_addr)); 482 483 sp.sa_family = AF_PPPOX; 484 sp.sa_protocol = PX_PROTO_PPTP; 485 sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; 486 487 memcpy(uaddr, &sp, len); 488 489 return len; 490 } 491 492 static int pptp_release(struct socket *sock) 493 { 494 struct sock *sk = sock->sk; 495 struct pppox_sock *po; 496 int error = 0; 497 498 if (!sk) 499 return 0; 500 501 lock_sock(sk); 502 503 if (sock_flag(sk, SOCK_DEAD)) { 504 release_sock(sk); 505 return -EBADF; 506 } 507 508 po = pppox_sk(sk); 509 del_chan(po); 510 synchronize_rcu(); 511 512 pppox_unbind_sock(sk); 513 sk->sk_state = PPPOX_DEAD; 514 515 sock_orphan(sk); 516 sock->sk = NULL; 517 518 release_sock(sk); 519 sock_put(sk); 520 521 return error; 522 } 523 524 static void pptp_sock_destruct(struct sock *sk) 525 { 526 if (!(sk->sk_state & PPPOX_DEAD)) { 527 del_chan(pppox_sk(sk)); 528 pppox_unbind_sock(sk); 529 } 530 skb_queue_purge(&sk->sk_receive_queue); 531 dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); 532 } 533 534 static int pptp_create(struct net *net, struct socket *sock, int kern) 535 { 536 int error = -ENOMEM; 537 struct sock *sk; 538 struct pppox_sock *po; 539 struct pptp_opt *opt; 540 541 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, kern); 542 if (!sk) 543 goto out; 544 545 sock_init_data(sock, sk); 546 547 sock->state = SS_UNCONNECTED; 548 sock->ops = &pptp_ops; 549 550 sk->sk_backlog_rcv = pptp_rcv_core; 551 sk->sk_state = PPPOX_NONE; 552 sk->sk_type = SOCK_STREAM; 553 sk->sk_family = PF_PPPOX; 554 sk->sk_protocol = PX_PROTO_PPTP; 555 sk->sk_destruct = pptp_sock_destruct; 556 557 po = pppox_sk(sk); 558 opt = &po->proto.pptp; 559 560 opt->seq_sent = 0; opt->seq_recv = 0xffffffff; 561 opt->ack_recv = 0; opt->ack_sent = 0xffffffff; 562 563 error = 0; 564 out: 565 return error; 566 } 567 568 static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd, 569 unsigned long arg) 570 { 571 struct sock *sk = (struct sock *) chan->private; 572 struct pppox_sock *po = pppox_sk(sk); 573 struct pptp_opt *opt = &po->proto.pptp; 574 void __user *argp = (void __user *)arg; 575 int __user *p = argp; 576 int err, val; 577 578 err = -EFAULT; 579 switch (cmd) { 580 case PPPIOCGFLAGS: 581 val = opt->ppp_flags; 582 if (put_user(val, p)) 583 break; 584 err = 0; 585 break; 586 case PPPIOCSFLAGS: 587 if (get_user(val, p)) 588 break; 589 opt->ppp_flags = val & ~SC_RCV_BITS; 590 err = 0; 591 break; 592 default: 593 err = -ENOTTY; 594 } 595 596 return err; 597 } 598 599 static const struct ppp_channel_ops pptp_chan_ops = { 600 .start_xmit = pptp_xmit, 601 .ioctl = pptp_ppp_ioctl, 602 }; 603 604 static struct proto pptp_sk_proto __read_mostly = { 605 .name = "PPTP", 606 .owner = THIS_MODULE, 607 .obj_size = sizeof(struct pppox_sock), 608 }; 609 610 static const struct proto_ops pptp_ops = { 611 .family = AF_PPPOX, 612 .owner = THIS_MODULE, 613 .release = pptp_release, 614 .bind = pptp_bind, 615 .connect = pptp_connect, 616 .socketpair = sock_no_socketpair, 617 .accept = sock_no_accept, 618 .getname = pptp_getname, 619 .listen = sock_no_listen, 620 .shutdown = sock_no_shutdown, 621 .setsockopt = sock_no_setsockopt, 622 .getsockopt = sock_no_getsockopt, 623 .sendmsg = sock_no_sendmsg, 624 .recvmsg = sock_no_recvmsg, 625 .mmap = sock_no_mmap, 626 .ioctl = pppox_ioctl, 627 #ifdef CONFIG_COMPAT 628 .compat_ioctl = pppox_compat_ioctl, 629 #endif 630 }; 631 632 static const struct pppox_proto pppox_pptp_proto = { 633 .create = pptp_create, 634 .owner = THIS_MODULE, 635 }; 636 637 static const struct gre_protocol gre_pptp_protocol = { 638 .handler = pptp_rcv, 639 }; 640 641 static int __init pptp_init_module(void) 642 { 643 int err = 0; 644 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n"); 645 646 callid_sock = vzalloc(array_size(sizeof(void *), (MAX_CALLID + 1))); 647 if (!callid_sock) 648 return -ENOMEM; 649 650 err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP); 651 if (err) { 652 pr_err("PPTP: can't add gre protocol\n"); 653 goto out_mem_free; 654 } 655 656 err = proto_register(&pptp_sk_proto, 0); 657 if (err) { 658 pr_err("PPTP: can't register sk_proto\n"); 659 goto out_gre_del_protocol; 660 } 661 662 err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto); 663 if (err) { 664 pr_err("PPTP: can't register pppox_proto\n"); 665 goto out_unregister_sk_proto; 666 } 667 668 return 0; 669 670 out_unregister_sk_proto: 671 proto_unregister(&pptp_sk_proto); 672 out_gre_del_protocol: 673 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP); 674 out_mem_free: 675 vfree(callid_sock); 676 677 return err; 678 } 679 680 static void __exit pptp_exit_module(void) 681 { 682 unregister_pppox_proto(PX_PROTO_PPTP); 683 proto_unregister(&pptp_sk_proto); 684 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP); 685 vfree(callid_sock); 686 } 687 688 module_init(pptp_init_module); 689 module_exit(pptp_exit_module); 690 691 MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol"); 692 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); 693 MODULE_LICENSE("GPL"); 694 MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_PPTP); 695