xref: /linux/drivers/net/ppp/pptp.c (revision b77e0ce62d63a761ffb7f7245a215a49f5921c2f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Point-to-Point Tunneling Protocol for Linux
4  *
5  *	Authors: Dmitry Kozlov <xeb@mail.ru>
6  */
7 
8 #include <linux/string.h>
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/netdevice.h>
14 #include <linux/net.h>
15 #include <linux/skbuff.h>
16 #include <linux/vmalloc.h>
17 #include <linux/init.h>
18 #include <linux/ppp_channel.h>
19 #include <linux/ppp_defs.h>
20 #include <linux/if_pppox.h>
21 #include <linux/ppp-ioctl.h>
22 #include <linux/notifier.h>
23 #include <linux/file.h>
24 #include <linux/in.h>
25 #include <linux/ip.h>
26 #include <linux/rcupdate.h>
27 #include <linux/spinlock.h>
28 
29 #include <net/sock.h>
30 #include <net/protocol.h>
31 #include <net/ip.h>
32 #include <net/icmp.h>
33 #include <net/route.h>
34 #include <net/gre.h>
35 #include <net/pptp.h>
36 
37 #include <linux/uaccess.h>
38 
39 #define PPTP_DRIVER_VERSION "0.8.5"
40 
41 #define MAX_CALLID 65535
42 
43 static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
44 static struct pppox_sock __rcu **callid_sock;
45 
46 static DEFINE_SPINLOCK(chan_lock);
47 
48 static struct proto pptp_sk_proto __read_mostly;
49 static const struct ppp_channel_ops pptp_chan_ops;
50 static const struct proto_ops pptp_ops;
51 
52 static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
53 {
54 	struct pppox_sock *sock;
55 	struct pptp_opt *opt;
56 
57 	rcu_read_lock();
58 	sock = rcu_dereference(callid_sock[call_id]);
59 	if (sock) {
60 		opt = &sock->proto.pptp;
61 		if (opt->dst_addr.sin_addr.s_addr != s_addr)
62 			sock = NULL;
63 		else
64 			sock_hold(sk_pppox(sock));
65 	}
66 	rcu_read_unlock();
67 
68 	return sock;
69 }
70 
71 static int lookup_chan_dst(u16 call_id, __be32 d_addr)
72 {
73 	struct pppox_sock *sock;
74 	struct pptp_opt *opt;
75 	int i;
76 
77 	rcu_read_lock();
78 	i = 1;
79 	for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
80 		sock = rcu_dereference(callid_sock[i]);
81 		if (!sock)
82 			continue;
83 		opt = &sock->proto.pptp;
84 		if (opt->dst_addr.call_id == call_id &&
85 			  opt->dst_addr.sin_addr.s_addr == d_addr)
86 			break;
87 	}
88 	rcu_read_unlock();
89 
90 	return i < MAX_CALLID;
91 }
92 
93 static int add_chan(struct pppox_sock *sock,
94 		    struct pptp_addr *sa)
95 {
96 	static int call_id;
97 
98 	spin_lock(&chan_lock);
99 	if (!sa->call_id)	{
100 		call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
101 		if (call_id == MAX_CALLID) {
102 			call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
103 			if (call_id == MAX_CALLID)
104 				goto out_err;
105 		}
106 		sa->call_id = call_id;
107 	} else if (test_bit(sa->call_id, callid_bitmap)) {
108 		goto out_err;
109 	}
110 
111 	sock->proto.pptp.src_addr = *sa;
112 	set_bit(sa->call_id, callid_bitmap);
113 	rcu_assign_pointer(callid_sock[sa->call_id], sock);
114 	spin_unlock(&chan_lock);
115 
116 	return 0;
117 
118 out_err:
119 	spin_unlock(&chan_lock);
120 	return -1;
121 }
122 
123 static void del_chan(struct pppox_sock *sock)
124 {
125 	spin_lock(&chan_lock);
126 	clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
127 	RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
128 	spin_unlock(&chan_lock);
129 }
130 
131 static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
132 {
133 	struct sock *sk = (struct sock *) chan->private;
134 	struct pppox_sock *po = pppox_sk(sk);
135 	struct net *net = sock_net(sk);
136 	struct pptp_opt *opt = &po->proto.pptp;
137 	struct pptp_gre_header *hdr;
138 	unsigned int header_len = sizeof(*hdr);
139 	struct flowi4 fl4;
140 	int islcp;
141 	int len;
142 	unsigned char *data;
143 	__u32 seq_recv;
144 
145 
146 	struct rtable *rt;
147 	struct net_device *tdev;
148 	struct iphdr  *iph;
149 	int    max_headroom;
150 
151 	if (sk_pppox(po)->sk_state & PPPOX_DEAD)
152 		goto tx_error;
153 
154 	rt = ip_route_output_ports(net, &fl4, NULL,
155 				   opt->dst_addr.sin_addr.s_addr,
156 				   opt->src_addr.sin_addr.s_addr,
157 				   0, 0, IPPROTO_GRE,
158 				   RT_TOS(0), sk->sk_bound_dev_if);
159 	if (IS_ERR(rt))
160 		goto tx_error;
161 
162 	tdev = rt->dst.dev;
163 
164 	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
165 
166 	if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
167 		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
168 		if (!new_skb) {
169 			ip_rt_put(rt);
170 			goto tx_error;
171 		}
172 		if (skb->sk)
173 			skb_set_owner_w(new_skb, skb->sk);
174 		consume_skb(skb);
175 		skb = new_skb;
176 	}
177 
178 	data = skb->data;
179 	islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
180 
181 	/* compress protocol field */
182 	if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp)
183 		skb_pull(skb, 1);
184 
185 	/* Put in the address/control bytes if necessary */
186 	if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
187 		data = skb_push(skb, 2);
188 		data[0] = PPP_ALLSTATIONS;
189 		data[1] = PPP_UI;
190 	}
191 
192 	len = skb->len;
193 
194 	seq_recv = opt->seq_recv;
195 
196 	if (opt->ack_sent == seq_recv)
197 		header_len -= sizeof(hdr->ack);
198 
199 	/* Push down and install GRE header */
200 	skb_push(skb, header_len);
201 	hdr = (struct pptp_gre_header *)(skb->data);
202 
203 	hdr->gre_hd.flags = GRE_KEY | GRE_VERSION_1 | GRE_SEQ;
204 	hdr->gre_hd.protocol = GRE_PROTO_PPP;
205 	hdr->call_id = htons(opt->dst_addr.call_id);
206 
207 	hdr->seq = htonl(++opt->seq_sent);
208 	if (opt->ack_sent != seq_recv)	{
209 		/* send ack with this message */
210 		hdr->gre_hd.flags |= GRE_ACK;
211 		hdr->ack  = htonl(seq_recv);
212 		opt->ack_sent = seq_recv;
213 	}
214 	hdr->payload_len = htons(len);
215 
216 	/*	Push down and install the IP header. */
217 
218 	skb_reset_transport_header(skb);
219 	skb_push(skb, sizeof(*iph));
220 	skb_reset_network_header(skb);
221 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
222 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
223 
224 	iph =	ip_hdr(skb);
225 	iph->version =	4;
226 	iph->ihl =	sizeof(struct iphdr) >> 2;
227 	if (ip_dont_fragment(sk, &rt->dst))
228 		iph->frag_off	=	htons(IP_DF);
229 	else
230 		iph->frag_off	=	0;
231 	iph->protocol = IPPROTO_GRE;
232 	iph->tos      = 0;
233 	iph->daddr    = fl4.daddr;
234 	iph->saddr    = fl4.saddr;
235 	iph->ttl      = ip4_dst_hoplimit(&rt->dst);
236 	iph->tot_len  = htons(skb->len);
237 
238 	skb_dst_drop(skb);
239 	skb_dst_set(skb, &rt->dst);
240 
241 	nf_reset_ct(skb);
242 
243 	skb->ip_summed = CHECKSUM_NONE;
244 	ip_select_ident(net, skb, NULL);
245 	ip_send_check(iph);
246 
247 	ip_local_out(net, skb->sk, skb);
248 	return 1;
249 
250 tx_error:
251 	kfree_skb(skb);
252 	return 1;
253 }
254 
255 static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
256 {
257 	struct pppox_sock *po = pppox_sk(sk);
258 	struct pptp_opt *opt = &po->proto.pptp;
259 	int headersize, payload_len, seq;
260 	__u8 *payload;
261 	struct pptp_gre_header *header;
262 
263 	if (!(sk->sk_state & PPPOX_CONNECTED)) {
264 		if (sock_queue_rcv_skb(sk, skb))
265 			goto drop;
266 		return NET_RX_SUCCESS;
267 	}
268 
269 	header = (struct pptp_gre_header *)(skb->data);
270 	headersize  = sizeof(*header);
271 
272 	/* test if acknowledgement present */
273 	if (GRE_IS_ACK(header->gre_hd.flags)) {
274 		__u32 ack;
275 
276 		if (!pskb_may_pull(skb, headersize))
277 			goto drop;
278 		header = (struct pptp_gre_header *)(skb->data);
279 
280 		/* ack in different place if S = 0 */
281 		ack = GRE_IS_SEQ(header->gre_hd.flags) ? ntohl(header->ack) :
282 							 ntohl(header->seq);
283 		if (ack > opt->ack_recv)
284 			opt->ack_recv = ack;
285 		/* also handle sequence number wrap-around  */
286 		if (WRAPPED(ack, opt->ack_recv))
287 			opt->ack_recv = ack;
288 	} else {
289 		headersize -= sizeof(header->ack);
290 	}
291 	/* test if payload present */
292 	if (!GRE_IS_SEQ(header->gre_hd.flags))
293 		goto drop;
294 
295 	payload_len = ntohs(header->payload_len);
296 	seq         = ntohl(header->seq);
297 
298 	/* check for incomplete packet (length smaller than expected) */
299 	if (!pskb_may_pull(skb, headersize + payload_len))
300 		goto drop;
301 
302 	payload = skb->data + headersize;
303 	/* check for expected sequence number */
304 	if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
305 		if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
306 				(PPP_PROTOCOL(payload) == PPP_LCP) &&
307 				((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)))
308 			goto allow_packet;
309 	} else {
310 		opt->seq_recv = seq;
311 allow_packet:
312 		skb_pull(skb, headersize);
313 
314 		if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) {
315 			/* chop off address/control */
316 			if (skb->len < 3)
317 				goto drop;
318 			skb_pull(skb, 2);
319 		}
320 
321 		skb->ip_summed = CHECKSUM_NONE;
322 		skb_set_network_header(skb, skb->head-skb->data);
323 		ppp_input(&po->chan, skb);
324 
325 		return NET_RX_SUCCESS;
326 	}
327 drop:
328 	kfree_skb(skb);
329 	return NET_RX_DROP;
330 }
331 
332 static int pptp_rcv(struct sk_buff *skb)
333 {
334 	struct pppox_sock *po;
335 	struct pptp_gre_header *header;
336 	struct iphdr *iph;
337 
338 	if (skb->pkt_type != PACKET_HOST)
339 		goto drop;
340 
341 	if (!pskb_may_pull(skb, 12))
342 		goto drop;
343 
344 	iph = ip_hdr(skb);
345 
346 	header = (struct pptp_gre_header *)skb->data;
347 
348 	if (header->gre_hd.protocol != GRE_PROTO_PPP || /* PPTP-GRE protocol for PPTP */
349 		GRE_IS_CSUM(header->gre_hd.flags) ||    /* flag CSUM should be clear */
350 		GRE_IS_ROUTING(header->gre_hd.flags) || /* flag ROUTING should be clear */
351 		!GRE_IS_KEY(header->gre_hd.flags) ||    /* flag KEY should be set */
352 		(header->gre_hd.flags & GRE_FLAGS))     /* flag Recursion Ctrl should be clear */
353 		/* if invalid, discard this packet */
354 		goto drop;
355 
356 	po = lookup_chan(ntohs(header->call_id), iph->saddr);
357 	if (po) {
358 		skb_dst_drop(skb);
359 		nf_reset_ct(skb);
360 		return sk_receive_skb(sk_pppox(po), skb, 0);
361 	}
362 drop:
363 	kfree_skb(skb);
364 	return NET_RX_DROP;
365 }
366 
367 static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
368 	int sockaddr_len)
369 {
370 	struct sock *sk = sock->sk;
371 	struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
372 	struct pppox_sock *po = pppox_sk(sk);
373 	int error = 0;
374 
375 	if (sockaddr_len < sizeof(struct sockaddr_pppox))
376 		return -EINVAL;
377 
378 	lock_sock(sk);
379 
380 	if (sk->sk_state & PPPOX_DEAD) {
381 		error = -EALREADY;
382 		goto out;
383 	}
384 
385 	if (sk->sk_state & PPPOX_BOUND) {
386 		error = -EBUSY;
387 		goto out;
388 	}
389 
390 	if (add_chan(po, &sp->sa_addr.pptp))
391 		error = -EBUSY;
392 	else
393 		sk->sk_state |= PPPOX_BOUND;
394 
395 out:
396 	release_sock(sk);
397 	return error;
398 }
399 
400 static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
401 	int sockaddr_len, int flags)
402 {
403 	struct sock *sk = sock->sk;
404 	struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
405 	struct pppox_sock *po = pppox_sk(sk);
406 	struct pptp_opt *opt = &po->proto.pptp;
407 	struct rtable *rt;
408 	struct flowi4 fl4;
409 	int error = 0;
410 
411 	if (sockaddr_len < sizeof(struct sockaddr_pppox))
412 		return -EINVAL;
413 
414 	if (sp->sa_protocol != PX_PROTO_PPTP)
415 		return -EINVAL;
416 
417 	if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr))
418 		return -EALREADY;
419 
420 	lock_sock(sk);
421 	/* Check for already bound sockets */
422 	if (sk->sk_state & PPPOX_CONNECTED) {
423 		error = -EBUSY;
424 		goto end;
425 	}
426 
427 	/* Check for already disconnected sockets, on attempts to disconnect */
428 	if (sk->sk_state & PPPOX_DEAD) {
429 		error = -EALREADY;
430 		goto end;
431 	}
432 
433 	if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) {
434 		error = -EINVAL;
435 		goto end;
436 	}
437 
438 	po->chan.private = sk;
439 	po->chan.ops = &pptp_chan_ops;
440 
441 	rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
442 				   opt->dst_addr.sin_addr.s_addr,
443 				   opt->src_addr.sin_addr.s_addr,
444 				   0, 0,
445 				   IPPROTO_GRE, RT_CONN_FLAGS(sk),
446 				   sk->sk_bound_dev_if);
447 	if (IS_ERR(rt)) {
448 		error = -EHOSTUNREACH;
449 		goto end;
450 	}
451 	sk_setup_caps(sk, &rt->dst);
452 
453 	po->chan.mtu = dst_mtu(&rt->dst);
454 	if (!po->chan.mtu)
455 		po->chan.mtu = PPP_MRU;
456 	po->chan.mtu -= PPTP_HEADER_OVERHEAD;
457 
458 	po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
459 	error = ppp_register_channel(&po->chan);
460 	if (error) {
461 		pr_err("PPTP: failed to register PPP channel (%d)\n", error);
462 		goto end;
463 	}
464 
465 	opt->dst_addr = sp->sa_addr.pptp;
466 	sk->sk_state |= PPPOX_CONNECTED;
467 
468  end:
469 	release_sock(sk);
470 	return error;
471 }
472 
473 static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
474 	int peer)
475 {
476 	int len = sizeof(struct sockaddr_pppox);
477 	struct sockaddr_pppox sp;
478 
479 	memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
480 
481 	sp.sa_family    = AF_PPPOX;
482 	sp.sa_protocol  = PX_PROTO_PPTP;
483 	sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
484 
485 	memcpy(uaddr, &sp, len);
486 
487 	return len;
488 }
489 
490 static int pptp_release(struct socket *sock)
491 {
492 	struct sock *sk = sock->sk;
493 	struct pppox_sock *po;
494 	int error = 0;
495 
496 	if (!sk)
497 		return 0;
498 
499 	lock_sock(sk);
500 
501 	if (sock_flag(sk, SOCK_DEAD)) {
502 		release_sock(sk);
503 		return -EBADF;
504 	}
505 
506 	po = pppox_sk(sk);
507 	del_chan(po);
508 	synchronize_rcu();
509 
510 	pppox_unbind_sock(sk);
511 	sk->sk_state = PPPOX_DEAD;
512 
513 	sock_orphan(sk);
514 	sock->sk = NULL;
515 
516 	release_sock(sk);
517 	sock_put(sk);
518 
519 	return error;
520 }
521 
522 static void pptp_sock_destruct(struct sock *sk)
523 {
524 	if (!(sk->sk_state & PPPOX_DEAD)) {
525 		del_chan(pppox_sk(sk));
526 		pppox_unbind_sock(sk);
527 	}
528 	skb_queue_purge(&sk->sk_receive_queue);
529 	dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
530 }
531 
532 static int pptp_create(struct net *net, struct socket *sock, int kern)
533 {
534 	int error = -ENOMEM;
535 	struct sock *sk;
536 	struct pppox_sock *po;
537 	struct pptp_opt *opt;
538 
539 	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, kern);
540 	if (!sk)
541 		goto out;
542 
543 	sock_init_data(sock, sk);
544 
545 	sock->state = SS_UNCONNECTED;
546 	sock->ops   = &pptp_ops;
547 
548 	sk->sk_backlog_rcv = pptp_rcv_core;
549 	sk->sk_state       = PPPOX_NONE;
550 	sk->sk_type        = SOCK_STREAM;
551 	sk->sk_family      = PF_PPPOX;
552 	sk->sk_protocol    = PX_PROTO_PPTP;
553 	sk->sk_destruct    = pptp_sock_destruct;
554 
555 	po = pppox_sk(sk);
556 	opt = &po->proto.pptp;
557 
558 	opt->seq_sent = 0; opt->seq_recv = 0xffffffff;
559 	opt->ack_recv = 0; opt->ack_sent = 0xffffffff;
560 
561 	error = 0;
562 out:
563 	return error;
564 }
565 
566 static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
567 	unsigned long arg)
568 {
569 	struct sock *sk = (struct sock *) chan->private;
570 	struct pppox_sock *po = pppox_sk(sk);
571 	struct pptp_opt *opt = &po->proto.pptp;
572 	void __user *argp = (void __user *)arg;
573 	int __user *p = argp;
574 	int err, val;
575 
576 	err = -EFAULT;
577 	switch (cmd) {
578 	case PPPIOCGFLAGS:
579 		val = opt->ppp_flags;
580 		if (put_user(val, p))
581 			break;
582 		err = 0;
583 		break;
584 	case PPPIOCSFLAGS:
585 		if (get_user(val, p))
586 			break;
587 		opt->ppp_flags = val & ~SC_RCV_BITS;
588 		err = 0;
589 		break;
590 	default:
591 		err = -ENOTTY;
592 	}
593 
594 	return err;
595 }
596 
597 static const struct ppp_channel_ops pptp_chan_ops = {
598 	.start_xmit = pptp_xmit,
599 	.ioctl      = pptp_ppp_ioctl,
600 };
601 
602 static struct proto pptp_sk_proto __read_mostly = {
603 	.name     = "PPTP",
604 	.owner    = THIS_MODULE,
605 	.obj_size = sizeof(struct pppox_sock),
606 };
607 
608 static const struct proto_ops pptp_ops = {
609 	.family     = AF_PPPOX,
610 	.owner      = THIS_MODULE,
611 	.release    = pptp_release,
612 	.bind       = pptp_bind,
613 	.connect    = pptp_connect,
614 	.socketpair = sock_no_socketpair,
615 	.accept     = sock_no_accept,
616 	.getname    = pptp_getname,
617 	.listen     = sock_no_listen,
618 	.shutdown   = sock_no_shutdown,
619 	.sendmsg    = sock_no_sendmsg,
620 	.recvmsg    = sock_no_recvmsg,
621 	.mmap       = sock_no_mmap,
622 	.ioctl      = pppox_ioctl,
623 #ifdef CONFIG_COMPAT
624 	.compat_ioctl = pppox_compat_ioctl,
625 #endif
626 };
627 
628 static const struct pppox_proto pppox_pptp_proto = {
629 	.create = pptp_create,
630 	.owner  = THIS_MODULE,
631 };
632 
633 static const struct gre_protocol gre_pptp_protocol = {
634 	.handler = pptp_rcv,
635 };
636 
637 static int __init pptp_init_module(void)
638 {
639 	int err = 0;
640 	pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
641 
642 	callid_sock = vzalloc(array_size(sizeof(void *), (MAX_CALLID + 1)));
643 	if (!callid_sock)
644 		return -ENOMEM;
645 
646 	err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
647 	if (err) {
648 		pr_err("PPTP: can't add gre protocol\n");
649 		goto out_mem_free;
650 	}
651 
652 	err = proto_register(&pptp_sk_proto, 0);
653 	if (err) {
654 		pr_err("PPTP: can't register sk_proto\n");
655 		goto out_gre_del_protocol;
656 	}
657 
658 	err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto);
659 	if (err) {
660 		pr_err("PPTP: can't register pppox_proto\n");
661 		goto out_unregister_sk_proto;
662 	}
663 
664 	return 0;
665 
666 out_unregister_sk_proto:
667 	proto_unregister(&pptp_sk_proto);
668 out_gre_del_protocol:
669 	gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
670 out_mem_free:
671 	vfree(callid_sock);
672 
673 	return err;
674 }
675 
676 static void __exit pptp_exit_module(void)
677 {
678 	unregister_pppox_proto(PX_PROTO_PPTP);
679 	proto_unregister(&pptp_sk_proto);
680 	gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
681 	vfree(callid_sock);
682 }
683 
684 module_init(pptp_init_module);
685 module_exit(pptp_exit_module);
686 
687 MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
688 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
689 MODULE_LICENSE("GPL");
690 MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_PPTP);
691