xref: /linux/net/l2tp/l2tp_core.c (revision eb3ab13d997a2f12ec9d557b6ae2aea4e28e2bc3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* L2TP core.
3  *
4  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5  *
6  * This file contains some code of the original L2TPv2 pppol2tp
7  * driver, which has the following copyright:
8  *
9  * Authors:	Martijn van Oosterhout <kleptog@svana.org>
10  *		James Chapman (jchapman@katalix.com)
11  * Contributors:
12  *		Michal Ostrowski <mostrows@speakeasy.net>
13  *		Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14  *		David S. Miller (davem@redhat.com)
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/module.h>
20 #include <linux/string.h>
21 #include <linux/list.h>
22 #include <linux/rculist.h>
23 #include <linux/uaccess.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/spinlock.h>
27 #include <linux/kthread.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/errno.h>
31 #include <linux/jiffies.h>
32 
33 #include <linux/netdevice.h>
34 #include <linux/net.h>
35 #include <linux/inetdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/init.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <linux/udp.h>
41 #include <linux/l2tp.h>
42 #include <linux/sort.h>
43 #include <linux/file.h>
44 #include <linux/nsproxy.h>
45 #include <net/net_namespace.h>
46 #include <net/netns/generic.h>
47 #include <net/dst.h>
48 #include <net/ip.h>
49 #include <net/udp.h>
50 #include <net/udp_tunnel.h>
51 #include <net/inet_common.h>
52 #include <net/xfrm.h>
53 #include <net/protocol.h>
54 #include <net/inet6_connection_sock.h>
55 #include <net/inet_ecn.h>
56 #include <net/ip6_route.h>
57 #include <net/ip6_checksum.h>
58 
59 #include <asm/byteorder.h>
60 #include <linux/atomic.h>
61 
62 #include "l2tp_core.h"
63 
64 #define CREATE_TRACE_POINTS
65 #include "trace.h"
66 
67 #define L2TP_DRV_VERSION	"V2.0"
68 
69 /* L2TP header constants */
70 #define L2TP_HDRFLAG_T	   0x8000
71 #define L2TP_HDRFLAG_L	   0x4000
72 #define L2TP_HDRFLAG_S	   0x0800
73 #define L2TP_HDRFLAG_O	   0x0200
74 #define L2TP_HDRFLAG_P	   0x0100
75 
76 #define L2TP_HDR_VER_MASK  0x000F
77 #define L2TP_HDR_VER_2	   0x0002
78 #define L2TP_HDR_VER_3	   0x0003
79 
80 /* L2TPv3 default L2-specific sublayer */
81 #define L2TP_SLFLAG_S	   0x40000000
82 #define L2TP_SL_SEQ_MASK   0x00ffffff
83 
84 #define L2TP_HDR_SIZE_MAX		14
85 
86 /* Default trace flags */
87 #define L2TP_DEFAULT_DEBUG_FLAGS	0
88 
89 #define L2TP_DEPTH_NESTING		2
90 #if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
91 #error "L2TP requires its own lockdep subclass"
92 #endif
93 
94 /* Private data stored for received packets in the skb.
95  */
96 struct l2tp_skb_cb {
97 	u32			ns;
98 	u16			has_seq;
99 	u16			length;
100 	unsigned long		expires;
101 };
102 
103 #define L2TP_SKB_CB(skb)	((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
104 
105 static struct workqueue_struct *l2tp_wq;
106 
107 /* per-net private data for this module */
108 static unsigned int l2tp_net_id;
109 struct l2tp_net {
110 	/* Lock for write access to l2tp_tunnel_idr */
111 	spinlock_t l2tp_tunnel_idr_lock;
112 	struct idr l2tp_tunnel_idr;
113 	/* Lock for write access to l2tp_v[23]_session_idr/htable */
114 	spinlock_t l2tp_session_idr_lock;
115 	struct idr l2tp_v2_session_idr;
116 	struct idr l2tp_v3_session_idr;
117 	struct hlist_head l2tp_v3_session_htable[16];
118 };
119 
120 static inline u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id)
121 {
122 	return ((u32)tunnel_id) << 16 | session_id;
123 }
124 
125 static inline unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id)
126 {
127 	return ((unsigned long)sk) + session_id;
128 }
129 
130 #if IS_ENABLED(CONFIG_IPV6)
131 static bool l2tp_sk_is_v6(struct sock *sk)
132 {
133 	return sk->sk_family == PF_INET6 &&
134 	       !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
135 }
136 #endif
137 
138 static inline struct l2tp_net *l2tp_pernet(const struct net *net)
139 {
140 	return net_generic(net, l2tp_net_id);
141 }
142 
143 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
144 {
145 	struct sock *sk = tunnel->sock;
146 
147 	trace_free_tunnel(tunnel);
148 
149 	if (sk) {
150 		/* Disable udp encapsulation */
151 		switch (tunnel->encap) {
152 		case L2TP_ENCAPTYPE_UDP:
153 			/* No longer an encapsulation socket. See net/ipv4/udp.c */
154 			WRITE_ONCE(udp_sk(sk)->encap_type, 0);
155 			udp_sk(sk)->encap_rcv = NULL;
156 			udp_sk(sk)->encap_destroy = NULL;
157 			break;
158 		case L2TP_ENCAPTYPE_IP:
159 			break;
160 		}
161 
162 		tunnel->sock = NULL;
163 		sock_put(sk);
164 	}
165 
166 	kfree_rcu(tunnel, rcu);
167 }
168 
169 static void l2tp_session_free(struct l2tp_session *session)
170 {
171 	trace_free_session(session);
172 	if (session->tunnel)
173 		l2tp_tunnel_dec_refcount(session->tunnel);
174 	kfree_rcu(session, rcu);
175 }
176 
177 struct l2tp_tunnel *l2tp_sk_to_tunnel(const struct sock *sk)
178 {
179 	const struct net *net = sock_net(sk);
180 	unsigned long tunnel_id, tmp;
181 	struct l2tp_tunnel *tunnel;
182 	struct l2tp_net *pn;
183 
184 	rcu_read_lock_bh();
185 	pn = l2tp_pernet(net);
186 	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
187 		if (tunnel &&
188 		    tunnel->sock == sk &&
189 		    refcount_inc_not_zero(&tunnel->ref_count)) {
190 			rcu_read_unlock_bh();
191 			return tunnel;
192 		}
193 	}
194 	rcu_read_unlock_bh();
195 
196 	return NULL;
197 }
198 EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
199 
200 void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
201 {
202 	refcount_inc(&tunnel->ref_count);
203 }
204 EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount);
205 
206 void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
207 {
208 	if (refcount_dec_and_test(&tunnel->ref_count))
209 		l2tp_tunnel_free(tunnel);
210 }
211 EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount);
212 
213 void l2tp_session_inc_refcount(struct l2tp_session *session)
214 {
215 	refcount_inc(&session->ref_count);
216 }
217 EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount);
218 
219 void l2tp_session_dec_refcount(struct l2tp_session *session)
220 {
221 	if (refcount_dec_and_test(&session->ref_count))
222 		l2tp_session_free(session);
223 }
224 EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount);
225 
226 /* Lookup a tunnel. A new reference is held on the returned tunnel. */
227 struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
228 {
229 	const struct l2tp_net *pn = l2tp_pernet(net);
230 	struct l2tp_tunnel *tunnel;
231 
232 	rcu_read_lock_bh();
233 	tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
234 	if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
235 		rcu_read_unlock_bh();
236 		return tunnel;
237 	}
238 	rcu_read_unlock_bh();
239 
240 	return NULL;
241 }
242 EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
243 
244 struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
245 {
246 	struct l2tp_net *pn = l2tp_pernet(net);
247 	unsigned long tunnel_id, tmp;
248 	struct l2tp_tunnel *tunnel;
249 	int count = 0;
250 
251 	rcu_read_lock_bh();
252 	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
253 		if (tunnel && ++count > nth &&
254 		    refcount_inc_not_zero(&tunnel->ref_count)) {
255 			rcu_read_unlock_bh();
256 			return tunnel;
257 		}
258 	}
259 	rcu_read_unlock_bh();
260 
261 	return NULL;
262 }
263 EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
264 
265 struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id)
266 {
267 	const struct l2tp_net *pn = l2tp_pernet(net);
268 	struct l2tp_session *session;
269 
270 	rcu_read_lock_bh();
271 	session = idr_find(&pn->l2tp_v3_session_idr, session_id);
272 	if (session && !hash_hashed(&session->hlist) &&
273 	    refcount_inc_not_zero(&session->ref_count)) {
274 		rcu_read_unlock_bh();
275 		return session;
276 	}
277 
278 	/* If we get here and session is non-NULL, the session_id
279 	 * collides with one in another tunnel. If sk is non-NULL,
280 	 * find the session matching sk.
281 	 */
282 	if (session && sk) {
283 		unsigned long key = l2tp_v3_session_hashkey(sk, session->session_id);
284 
285 		hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
286 					   hlist, key) {
287 			/* session->tunnel may be NULL if another thread is in
288 			 * l2tp_session_register and has added an item to
289 			 * l2tp_v3_session_htable but hasn't yet added the
290 			 * session to its tunnel's session_list.
291 			 */
292 			struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
293 
294 			if (tunnel && tunnel->sock == sk &&
295 			    refcount_inc_not_zero(&session->ref_count)) {
296 				rcu_read_unlock_bh();
297 				return session;
298 			}
299 		}
300 	}
301 	rcu_read_unlock_bh();
302 
303 	return NULL;
304 }
305 EXPORT_SYMBOL_GPL(l2tp_v3_session_get);
306 
307 struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id)
308 {
309 	u32 session_key = l2tp_v2_session_key(tunnel_id, session_id);
310 	const struct l2tp_net *pn = l2tp_pernet(net);
311 	struct l2tp_session *session;
312 
313 	rcu_read_lock_bh();
314 	session = idr_find(&pn->l2tp_v2_session_idr, session_key);
315 	if (session && refcount_inc_not_zero(&session->ref_count)) {
316 		rcu_read_unlock_bh();
317 		return session;
318 	}
319 	rcu_read_unlock_bh();
320 
321 	return NULL;
322 }
323 EXPORT_SYMBOL_GPL(l2tp_v2_session_get);
324 
325 struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver,
326 				      u32 tunnel_id, u32 session_id)
327 {
328 	if (pver == L2TP_HDR_VER_2)
329 		return l2tp_v2_session_get(net, tunnel_id, session_id);
330 	else
331 		return l2tp_v3_session_get(net, sk, session_id);
332 }
333 EXPORT_SYMBOL_GPL(l2tp_session_get);
334 
335 struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
336 {
337 	struct l2tp_session *session;
338 	int count = 0;
339 
340 	rcu_read_lock_bh();
341 	list_for_each_entry_rcu(session, &tunnel->session_list, list) {
342 		if (++count > nth) {
343 			l2tp_session_inc_refcount(session);
344 			rcu_read_unlock_bh();
345 			return session;
346 		}
347 	}
348 	rcu_read_unlock_bh();
349 
350 	return NULL;
351 }
352 EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
353 
354 /* Lookup a session by interface name.
355  * This is very inefficient but is only used by management interfaces.
356  */
357 struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
358 						const char *ifname)
359 {
360 	struct l2tp_net *pn = l2tp_pernet(net);
361 	unsigned long tunnel_id, tmp;
362 	struct l2tp_session *session;
363 	struct l2tp_tunnel *tunnel;
364 
365 	rcu_read_lock_bh();
366 	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
367 		if (tunnel) {
368 			list_for_each_entry_rcu(session, &tunnel->session_list, list) {
369 				if (!strcmp(session->ifname, ifname)) {
370 					l2tp_session_inc_refcount(session);
371 					rcu_read_unlock_bh();
372 
373 					return session;
374 				}
375 			}
376 		}
377 	}
378 	rcu_read_unlock_bh();
379 
380 	return NULL;
381 }
382 EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
383 
384 static void l2tp_session_coll_list_add(struct l2tp_session_coll_list *clist,
385 				       struct l2tp_session *session)
386 {
387 	l2tp_session_inc_refcount(session);
388 	WARN_ON_ONCE(session->coll_list);
389 	session->coll_list = clist;
390 	spin_lock(&clist->lock);
391 	list_add(&session->clist, &clist->list);
392 	spin_unlock(&clist->lock);
393 }
394 
395 static int l2tp_session_collision_add(struct l2tp_net *pn,
396 				      struct l2tp_session *session1,
397 				      struct l2tp_session *session2)
398 {
399 	struct l2tp_session_coll_list *clist;
400 
401 	lockdep_assert_held(&pn->l2tp_session_idr_lock);
402 
403 	if (!session2)
404 		return -EEXIST;
405 
406 	/* If existing session is in IP-encap tunnel, refuse new session */
407 	if (session2->tunnel->encap == L2TP_ENCAPTYPE_IP)
408 		return -EEXIST;
409 
410 	clist = session2->coll_list;
411 	if (!clist) {
412 		/* First collision. Allocate list to manage the collided sessions
413 		 * and add the existing session to the list.
414 		 */
415 		clist = kmalloc(sizeof(*clist), GFP_ATOMIC);
416 		if (!clist)
417 			return -ENOMEM;
418 
419 		spin_lock_init(&clist->lock);
420 		INIT_LIST_HEAD(&clist->list);
421 		refcount_set(&clist->ref_count, 1);
422 		l2tp_session_coll_list_add(clist, session2);
423 	}
424 
425 	/* If existing session isn't already in the session hlist, add it. */
426 	if (!hash_hashed(&session2->hlist))
427 		hash_add_rcu(pn->l2tp_v3_session_htable, &session2->hlist,
428 			     session2->hlist_key);
429 
430 	/* Add new session to the hlist and collision list */
431 	hash_add_rcu(pn->l2tp_v3_session_htable, &session1->hlist,
432 		     session1->hlist_key);
433 	refcount_inc(&clist->ref_count);
434 	l2tp_session_coll_list_add(clist, session1);
435 
436 	return 0;
437 }
438 
439 static void l2tp_session_collision_del(struct l2tp_net *pn,
440 				       struct l2tp_session *session)
441 {
442 	struct l2tp_session_coll_list *clist = session->coll_list;
443 	unsigned long session_key = session->session_id;
444 	struct l2tp_session *session2;
445 
446 	lockdep_assert_held(&pn->l2tp_session_idr_lock);
447 
448 	hash_del_rcu(&session->hlist);
449 
450 	if (clist) {
451 		/* Remove session from its collision list. If there
452 		 * are other sessions with the same ID, replace this
453 		 * session's IDR entry with that session, otherwise
454 		 * remove the IDR entry. If this is the last session,
455 		 * the collision list data is freed.
456 		 */
457 		spin_lock(&clist->lock);
458 		list_del_init(&session->clist);
459 		session2 = list_first_entry_or_null(&clist->list, struct l2tp_session, clist);
460 		if (session2) {
461 			void *old = idr_replace(&pn->l2tp_v3_session_idr, session2, session_key);
462 
463 			WARN_ON_ONCE(IS_ERR_VALUE(old));
464 		} else {
465 			void *removed = idr_remove(&pn->l2tp_v3_session_idr, session_key);
466 
467 			WARN_ON_ONCE(removed != session);
468 		}
469 		session->coll_list = NULL;
470 		spin_unlock(&clist->lock);
471 		if (refcount_dec_and_test(&clist->ref_count))
472 			kfree(clist);
473 		l2tp_session_dec_refcount(session);
474 	}
475 }
476 
477 int l2tp_session_register(struct l2tp_session *session,
478 			  struct l2tp_tunnel *tunnel)
479 {
480 	struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
481 	struct l2tp_session *other_session = NULL;
482 	void *old = NULL;
483 	u32 session_key;
484 	int err;
485 
486 	spin_lock_bh(&tunnel->list_lock);
487 	spin_lock_bh(&pn->l2tp_session_idr_lock);
488 
489 	if (!tunnel->acpt_newsess) {
490 		err = -ENODEV;
491 		goto out;
492 	}
493 
494 	if (tunnel->version == L2TP_HDR_VER_3) {
495 		session_key = session->session_id;
496 		err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL,
497 				    &session_key, session_key, GFP_ATOMIC);
498 		/* IP encap expects session IDs to be globally unique, while
499 		 * UDP encap doesn't. This isn't per the RFC, which says that
500 		 * sessions are identified only by the session ID, but is to
501 		 * support existing userspace which depends on it.
502 		 */
503 		if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) {
504 			other_session = idr_find(&pn->l2tp_v3_session_idr,
505 						 session_key);
506 			err = l2tp_session_collision_add(pn, session,
507 							 other_session);
508 		}
509 	} else {
510 		session_key = l2tp_v2_session_key(tunnel->tunnel_id,
511 						  session->session_id);
512 		err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL,
513 				    &session_key, session_key, GFP_ATOMIC);
514 	}
515 
516 	if (err) {
517 		if (err == -ENOSPC)
518 			err = -EEXIST;
519 		goto out;
520 	}
521 
522 	l2tp_tunnel_inc_refcount(tunnel);
523 	WRITE_ONCE(session->tunnel, tunnel);
524 	list_add_rcu(&session->list, &tunnel->session_list);
525 
526 	/* this makes session available to lockless getters */
527 	if (tunnel->version == L2TP_HDR_VER_3) {
528 		if (!other_session)
529 			old = idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
530 	} else {
531 		old = idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
532 	}
533 
534 	/* old should be NULL, unless something removed or modified
535 	 * the IDR entry after our idr_alloc_32 above (which shouldn't
536 	 * happen).
537 	 */
538 	WARN_ON_ONCE(old);
539 out:
540 	spin_unlock_bh(&pn->l2tp_session_idr_lock);
541 	spin_unlock_bh(&tunnel->list_lock);
542 
543 	if (!err)
544 		trace_register_session(session);
545 
546 	return err;
547 }
548 EXPORT_SYMBOL_GPL(l2tp_session_register);
549 
550 /*****************************************************************************
551  * Receive data handling
552  *****************************************************************************/
553 
554 /* Queue a skb in order. We come here only if the skb has an L2TP sequence
555  * number.
556  */
557 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
558 {
559 	struct sk_buff *skbp;
560 	struct sk_buff *tmp;
561 	u32 ns = L2TP_SKB_CB(skb)->ns;
562 
563 	spin_lock_bh(&session->reorder_q.lock);
564 	skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
565 		if (L2TP_SKB_CB(skbp)->ns > ns) {
566 			__skb_queue_before(&session->reorder_q, skbp, skb);
567 			atomic_long_inc(&session->stats.rx_oos_packets);
568 			goto out;
569 		}
570 	}
571 
572 	__skb_queue_tail(&session->reorder_q, skb);
573 
574 out:
575 	spin_unlock_bh(&session->reorder_q.lock);
576 }
577 
578 /* Dequeue a single skb.
579  */
580 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
581 {
582 	struct l2tp_tunnel *tunnel = session->tunnel;
583 	int length = L2TP_SKB_CB(skb)->length;
584 
585 	/* We're about to requeue the skb, so return resources
586 	 * to its current owner (a socket receive buffer).
587 	 */
588 	skb_orphan(skb);
589 
590 	atomic_long_inc(&tunnel->stats.rx_packets);
591 	atomic_long_add(length, &tunnel->stats.rx_bytes);
592 	atomic_long_inc(&session->stats.rx_packets);
593 	atomic_long_add(length, &session->stats.rx_bytes);
594 
595 	if (L2TP_SKB_CB(skb)->has_seq) {
596 		/* Bump our Nr */
597 		session->nr++;
598 		session->nr &= session->nr_max;
599 		trace_session_seqnum_update(session);
600 	}
601 
602 	/* call private receive handler */
603 	if (session->recv_skb)
604 		(*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
605 	else
606 		kfree_skb(skb);
607 }
608 
609 /* Dequeue skbs from the session's reorder_q, subject to packet order.
610  * Skbs that have been in the queue for too long are simply discarded.
611  */
612 static void l2tp_recv_dequeue(struct l2tp_session *session)
613 {
614 	struct sk_buff *skb;
615 	struct sk_buff *tmp;
616 
617 	/* If the pkt at the head of the queue has the nr that we
618 	 * expect to send up next, dequeue it and any other
619 	 * in-sequence packets behind it.
620 	 */
621 start:
622 	spin_lock_bh(&session->reorder_q.lock);
623 	skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
624 		struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
625 
626 		/* If the packet has been pending on the queue for too long, discard it */
627 		if (time_after(jiffies, cb->expires)) {
628 			atomic_long_inc(&session->stats.rx_seq_discards);
629 			atomic_long_inc(&session->stats.rx_errors);
630 			trace_session_pkt_expired(session, cb->ns);
631 			session->reorder_skip = 1;
632 			__skb_unlink(skb, &session->reorder_q);
633 			kfree_skb(skb);
634 			continue;
635 		}
636 
637 		if (cb->has_seq) {
638 			if (session->reorder_skip) {
639 				session->reorder_skip = 0;
640 				session->nr = cb->ns;
641 				trace_session_seqnum_reset(session);
642 			}
643 			if (cb->ns != session->nr)
644 				goto out;
645 		}
646 		__skb_unlink(skb, &session->reorder_q);
647 
648 		/* Process the skb. We release the queue lock while we
649 		 * do so to let other contexts process the queue.
650 		 */
651 		spin_unlock_bh(&session->reorder_q.lock);
652 		l2tp_recv_dequeue_skb(session, skb);
653 		goto start;
654 	}
655 
656 out:
657 	spin_unlock_bh(&session->reorder_q.lock);
658 }
659 
660 static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
661 {
662 	u32 nws;
663 
664 	if (nr >= session->nr)
665 		nws = nr - session->nr;
666 	else
667 		nws = (session->nr_max + 1) - (session->nr - nr);
668 
669 	return nws < session->nr_window_size;
670 }
671 
672 /* If packet has sequence numbers, queue it if acceptable. Returns 0 if
673  * acceptable, else non-zero.
674  */
675 static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
676 {
677 	struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
678 
679 	if (!l2tp_seq_check_rx_window(session, cb->ns)) {
680 		/* Packet sequence number is outside allowed window.
681 		 * Discard it.
682 		 */
683 		trace_session_pkt_outside_rx_window(session, cb->ns);
684 		goto discard;
685 	}
686 
687 	if (session->reorder_timeout != 0) {
688 		/* Packet reordering enabled. Add skb to session's
689 		 * reorder queue, in order of ns.
690 		 */
691 		l2tp_recv_queue_skb(session, skb);
692 		goto out;
693 	}
694 
695 	/* Packet reordering disabled. Discard out-of-sequence packets, while
696 	 * tracking the number if in-sequence packets after the first OOS packet
697 	 * is seen. After nr_oos_count_max in-sequence packets, reset the
698 	 * sequence number to re-enable packet reception.
699 	 */
700 	if (cb->ns == session->nr) {
701 		skb_queue_tail(&session->reorder_q, skb);
702 	} else {
703 		u32 nr_oos = cb->ns;
704 		u32 nr_next = (session->nr_oos + 1) & session->nr_max;
705 
706 		if (nr_oos == nr_next)
707 			session->nr_oos_count++;
708 		else
709 			session->nr_oos_count = 0;
710 
711 		session->nr_oos = nr_oos;
712 		if (session->nr_oos_count > session->nr_oos_count_max) {
713 			session->reorder_skip = 1;
714 		}
715 		if (!session->reorder_skip) {
716 			atomic_long_inc(&session->stats.rx_seq_discards);
717 			trace_session_pkt_oos(session, cb->ns);
718 			goto discard;
719 		}
720 		skb_queue_tail(&session->reorder_q, skb);
721 	}
722 
723 out:
724 	return 0;
725 
726 discard:
727 	return 1;
728 }
729 
730 /* Do receive processing of L2TP data frames. We handle both L2TPv2
731  * and L2TPv3 data frames here.
732  *
733  * L2TPv2 Data Message Header
734  *
735  *  0                   1                   2                   3
736  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
737  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
738  * |T|L|x|x|S|x|O|P|x|x|x|x|  Ver  |          Length (opt)         |
739  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
740  * |           Tunnel ID           |           Session ID          |
741  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
742  * |             Ns (opt)          |             Nr (opt)          |
743  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
744  * |      Offset Size (opt)        |    Offset pad... (opt)
745  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
746  *
747  * Data frames are marked by T=0. All other fields are the same as
748  * those in L2TP control frames.
749  *
750  * L2TPv3 Data Message Header
751  *
752  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
753  * |                      L2TP Session Header                      |
754  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
755  * |                      L2-Specific Sublayer                     |
756  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
757  * |                        Tunnel Payload                      ...
758  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
759  *
760  * L2TPv3 Session Header Over IP
761  *
762  *  0                   1                   2                   3
763  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
764  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
765  * |                           Session ID                          |
766  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
767  * |               Cookie (optional, maximum 64 bits)...
768  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
769  *                                                                 |
770  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
771  *
772  * L2TPv3 L2-Specific Sublayer Format
773  *
774  *  0                   1                   2                   3
775  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
776  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
777  * |x|S|x|x|x|x|x|x|              Sequence Number                  |
778  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
779  *
780  * Cookie value and sublayer format are negotiated with the peer when
781  * the session is set up. Unlike L2TPv2, we do not need to parse the
782  * packet header to determine if optional fields are present.
783  *
784  * Caller must already have parsed the frame and determined that it is
785  * a data (not control) frame before coming here. Fields up to the
786  * session-id have already been parsed and ptr points to the data
787  * after the session-id.
788  */
789 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
790 		      unsigned char *ptr, unsigned char *optr, u16 hdrflags,
791 		      int length)
792 {
793 	struct l2tp_tunnel *tunnel = session->tunnel;
794 	int offset;
795 
796 	/* Parse and check optional cookie */
797 	if (session->peer_cookie_len > 0) {
798 		if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
799 			pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
800 					     tunnel->name, tunnel->tunnel_id,
801 					     session->session_id);
802 			atomic_long_inc(&session->stats.rx_cookie_discards);
803 			goto discard;
804 		}
805 		ptr += session->peer_cookie_len;
806 	}
807 
808 	/* Handle the optional sequence numbers. Sequence numbers are
809 	 * in different places for L2TPv2 and L2TPv3.
810 	 *
811 	 * If we are the LAC, enable/disable sequence numbers under
812 	 * the control of the LNS.  If no sequence numbers present but
813 	 * we were expecting them, discard frame.
814 	 */
815 	L2TP_SKB_CB(skb)->has_seq = 0;
816 	if (tunnel->version == L2TP_HDR_VER_2) {
817 		if (hdrflags & L2TP_HDRFLAG_S) {
818 			/* Store L2TP info in the skb */
819 			L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
820 			L2TP_SKB_CB(skb)->has_seq = 1;
821 			ptr += 2;
822 			/* Skip past nr in the header */
823 			ptr += 2;
824 
825 		}
826 	} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
827 		u32 l2h = ntohl(*(__be32 *)ptr);
828 
829 		if (l2h & 0x40000000) {
830 			/* Store L2TP info in the skb */
831 			L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
832 			L2TP_SKB_CB(skb)->has_seq = 1;
833 		}
834 		ptr += 4;
835 	}
836 
837 	if (L2TP_SKB_CB(skb)->has_seq) {
838 		/* Received a packet with sequence numbers. If we're the LAC,
839 		 * check if we sre sending sequence numbers and if not,
840 		 * configure it so.
841 		 */
842 		if (!session->lns_mode && !session->send_seq) {
843 			trace_session_seqnum_lns_enable(session);
844 			session->send_seq = 1;
845 			l2tp_session_set_header_len(session, tunnel->version,
846 						    tunnel->encap);
847 		}
848 	} else {
849 		/* No sequence numbers.
850 		 * If user has configured mandatory sequence numbers, discard.
851 		 */
852 		if (session->recv_seq) {
853 			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
854 					     session->name);
855 			atomic_long_inc(&session->stats.rx_seq_discards);
856 			goto discard;
857 		}
858 
859 		/* If we're the LAC and we're sending sequence numbers, the
860 		 * LNS has requested that we no longer send sequence numbers.
861 		 * If we're the LNS and we're sending sequence numbers, the
862 		 * LAC is broken. Discard the frame.
863 		 */
864 		if (!session->lns_mode && session->send_seq) {
865 			trace_session_seqnum_lns_disable(session);
866 			session->send_seq = 0;
867 			l2tp_session_set_header_len(session, tunnel->version,
868 						    tunnel->encap);
869 		} else if (session->send_seq) {
870 			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
871 					     session->name);
872 			atomic_long_inc(&session->stats.rx_seq_discards);
873 			goto discard;
874 		}
875 	}
876 
877 	/* Session data offset is defined only for L2TPv2 and is
878 	 * indicated by an optional 16-bit value in the header.
879 	 */
880 	if (tunnel->version == L2TP_HDR_VER_2) {
881 		/* If offset bit set, skip it. */
882 		if (hdrflags & L2TP_HDRFLAG_O) {
883 			offset = ntohs(*(__be16 *)ptr);
884 			ptr += 2 + offset;
885 		}
886 	}
887 
888 	offset = ptr - optr;
889 	if (!pskb_may_pull(skb, offset))
890 		goto discard;
891 
892 	__skb_pull(skb, offset);
893 
894 	/* Prepare skb for adding to the session's reorder_q.  Hold
895 	 * packets for max reorder_timeout or 1 second if not
896 	 * reordering.
897 	 */
898 	L2TP_SKB_CB(skb)->length = length;
899 	L2TP_SKB_CB(skb)->expires = jiffies +
900 		(session->reorder_timeout ? session->reorder_timeout : HZ);
901 
902 	/* Add packet to the session's receive queue. Reordering is done here, if
903 	 * enabled. Saved L2TP protocol info is stored in skb->sb[].
904 	 */
905 	if (L2TP_SKB_CB(skb)->has_seq) {
906 		if (l2tp_recv_data_seq(session, skb))
907 			goto discard;
908 	} else {
909 		/* No sequence numbers. Add the skb to the tail of the
910 		 * reorder queue. This ensures that it will be
911 		 * delivered after all previous sequenced skbs.
912 		 */
913 		skb_queue_tail(&session->reorder_q, skb);
914 	}
915 
916 	/* Try to dequeue as many skbs from reorder_q as we can. */
917 	l2tp_recv_dequeue(session);
918 
919 	return;
920 
921 discard:
922 	atomic_long_inc(&session->stats.rx_errors);
923 	kfree_skb(skb);
924 }
925 EXPORT_SYMBOL_GPL(l2tp_recv_common);
926 
927 /* Drop skbs from the session's reorder_q
928  */
929 static void l2tp_session_queue_purge(struct l2tp_session *session)
930 {
931 	struct sk_buff *skb = NULL;
932 
933 	while ((skb = skb_dequeue(&session->reorder_q))) {
934 		atomic_long_inc(&session->stats.rx_errors);
935 		kfree_skb(skb);
936 	}
937 }
938 
939 /* UDP encapsulation receive handler. See net/ipv4/udp.c for details. */
940 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
941 {
942 	struct l2tp_session *session = NULL;
943 	struct l2tp_tunnel *tunnel = NULL;
944 	struct net *net = sock_net(sk);
945 	unsigned char *ptr, *optr;
946 	u16 hdrflags;
947 	u16 version;
948 	int length;
949 
950 	/* UDP has verified checksum */
951 
952 	/* UDP always verifies the packet length. */
953 	__skb_pull(skb, sizeof(struct udphdr));
954 
955 	/* Short packet? */
956 	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX))
957 		goto pass;
958 
959 	/* Point to L2TP header */
960 	optr = skb->data;
961 	ptr = skb->data;
962 
963 	/* Get L2TP header flags */
964 	hdrflags = ntohs(*(__be16 *)ptr);
965 
966 	/* Get protocol version */
967 	version = hdrflags & L2TP_HDR_VER_MASK;
968 
969 	/* Get length of L2TP packet */
970 	length = skb->len;
971 
972 	/* If type is control packet, it is handled by userspace. */
973 	if (hdrflags & L2TP_HDRFLAG_T)
974 		goto pass;
975 
976 	/* Skip flags */
977 	ptr += 2;
978 
979 	if (version == L2TP_HDR_VER_2) {
980 		u16 tunnel_id, session_id;
981 
982 		/* If length is present, skip it */
983 		if (hdrflags & L2TP_HDRFLAG_L)
984 			ptr += 2;
985 
986 		/* Extract tunnel and session ID */
987 		tunnel_id = ntohs(*(__be16 *)ptr);
988 		ptr += 2;
989 		session_id = ntohs(*(__be16 *)ptr);
990 		ptr += 2;
991 
992 		session = l2tp_v2_session_get(net, tunnel_id, session_id);
993 	} else {
994 		u32 session_id;
995 
996 		ptr += 2;	/* skip reserved bits */
997 		session_id = ntohl(*(__be32 *)ptr);
998 		ptr += 4;
999 
1000 		session = l2tp_v3_session_get(net, sk, session_id);
1001 	}
1002 
1003 	if (!session || !session->recv_skb) {
1004 		if (session)
1005 			l2tp_session_dec_refcount(session);
1006 
1007 		/* Not found? Pass to userspace to deal with */
1008 		goto pass;
1009 	}
1010 
1011 	tunnel = session->tunnel;
1012 
1013 	/* Check protocol version */
1014 	if (version != tunnel->version)
1015 		goto invalid;
1016 
1017 	if (version == L2TP_HDR_VER_3 &&
1018 	    l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
1019 		l2tp_session_dec_refcount(session);
1020 		goto invalid;
1021 	}
1022 
1023 	l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
1024 	l2tp_session_dec_refcount(session);
1025 
1026 	return 0;
1027 
1028 invalid:
1029 	atomic_long_inc(&tunnel->stats.rx_invalid);
1030 
1031 pass:
1032 	/* Put UDP header back */
1033 	__skb_push(skb, sizeof(struct udphdr));
1034 
1035 	return 1;
1036 }
1037 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
1038 
1039 /* UDP encapsulation receive error handler. See net/ipv4/udp.c for details. */
1040 static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err,
1041 				    __be16 port, u32 info, u8 *payload)
1042 {
1043 	sk->sk_err = err;
1044 	sk_error_report(sk);
1045 
1046 	if (ip_hdr(skb)->version == IPVERSION) {
1047 		if (inet_test_bit(RECVERR, sk))
1048 			return ip_icmp_error(sk, skb, err, port, info, payload);
1049 #if IS_ENABLED(CONFIG_IPV6)
1050 	} else {
1051 		if (inet6_test_bit(RECVERR6, sk))
1052 			return ipv6_icmp_error(sk, skb, err, port, info, payload);
1053 #endif
1054 	}
1055 }
1056 
1057 /************************************************************************
1058  * Transmit handling
1059  ***********************************************************************/
1060 
1061 /* Build an L2TP header for the session into the buffer provided.
1062  */
1063 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
1064 {
1065 	struct l2tp_tunnel *tunnel = session->tunnel;
1066 	__be16 *bufp = buf;
1067 	__be16 *optr = buf;
1068 	u16 flags = L2TP_HDR_VER_2;
1069 	u32 tunnel_id = tunnel->peer_tunnel_id;
1070 	u32 session_id = session->peer_session_id;
1071 
1072 	if (session->send_seq)
1073 		flags |= L2TP_HDRFLAG_S;
1074 
1075 	/* Setup L2TP header. */
1076 	*bufp++ = htons(flags);
1077 	*bufp++ = htons(tunnel_id);
1078 	*bufp++ = htons(session_id);
1079 	if (session->send_seq) {
1080 		*bufp++ = htons(session->ns);
1081 		*bufp++ = 0;
1082 		session->ns++;
1083 		session->ns &= 0xffff;
1084 		trace_session_seqnum_update(session);
1085 	}
1086 
1087 	return bufp - optr;
1088 }
1089 
1090 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1091 {
1092 	struct l2tp_tunnel *tunnel = session->tunnel;
1093 	char *bufp = buf;
1094 	char *optr = bufp;
1095 
1096 	/* Setup L2TP header. The header differs slightly for UDP and
1097 	 * IP encapsulations. For UDP, there is 4 bytes of flags.
1098 	 */
1099 	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1100 		u16 flags = L2TP_HDR_VER_3;
1101 		*((__be16 *)bufp) = htons(flags);
1102 		bufp += 2;
1103 		*((__be16 *)bufp) = 0;
1104 		bufp += 2;
1105 	}
1106 
1107 	*((__be32 *)bufp) = htonl(session->peer_session_id);
1108 	bufp += 4;
1109 	if (session->cookie_len) {
1110 		memcpy(bufp, &session->cookie[0], session->cookie_len);
1111 		bufp += session->cookie_len;
1112 	}
1113 	if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1114 		u32 l2h = 0;
1115 
1116 		if (session->send_seq) {
1117 			l2h = 0x40000000 | session->ns;
1118 			session->ns++;
1119 			session->ns &= 0xffffff;
1120 			trace_session_seqnum_update(session);
1121 		}
1122 
1123 		*((__be32 *)bufp) = htonl(l2h);
1124 		bufp += 4;
1125 	}
1126 
1127 	return bufp - optr;
1128 }
1129 
1130 /* Queue the packet to IP for output: tunnel socket lock must be held */
1131 static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
1132 {
1133 	int err;
1134 
1135 	skb->ignore_df = 1;
1136 	skb_dst_drop(skb);
1137 #if IS_ENABLED(CONFIG_IPV6)
1138 	if (l2tp_sk_is_v6(tunnel->sock))
1139 		err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1140 	else
1141 #endif
1142 		err = ip_queue_xmit(tunnel->sock, skb, fl);
1143 
1144 	return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1145 }
1146 
1147 static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1148 {
1149 	struct l2tp_tunnel *tunnel = session->tunnel;
1150 	unsigned int data_len = skb->len;
1151 	struct sock *sk = tunnel->sock;
1152 	int headroom, uhlen, udp_len;
1153 	int ret = NET_XMIT_SUCCESS;
1154 	struct inet_sock *inet;
1155 	struct udphdr *uh;
1156 
1157 	/* Check that there's enough headroom in the skb to insert IP,
1158 	 * UDP and L2TP headers. If not enough, expand it to
1159 	 * make room. Adjust truesize.
1160 	 */
1161 	uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1162 	headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1163 	if (skb_cow_head(skb, headroom)) {
1164 		kfree_skb(skb);
1165 		return NET_XMIT_DROP;
1166 	}
1167 
1168 	/* Setup L2TP header */
1169 	if (tunnel->version == L2TP_HDR_VER_2)
1170 		l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1171 	else
1172 		l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1173 
1174 	/* Reset skb netfilter state */
1175 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1176 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
1177 	nf_reset_ct(skb);
1178 
1179 	/* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
1180 	 * nested socket calls on the same lockdep socket class. This can
1181 	 * happen when data from a user socket is routed over l2tp, which uses
1182 	 * another userspace socket.
1183 	 */
1184 	spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
1185 
1186 	if (sock_owned_by_user(sk)) {
1187 		kfree_skb(skb);
1188 		ret = NET_XMIT_DROP;
1189 		goto out_unlock;
1190 	}
1191 
1192 	/* The user-space may change the connection status for the user-space
1193 	 * provided socket at run time: we must check it under the socket lock
1194 	 */
1195 	if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1196 		kfree_skb(skb);
1197 		ret = NET_XMIT_DROP;
1198 		goto out_unlock;
1199 	}
1200 
1201 	/* Report transmitted length before we add encap header, which keeps
1202 	 * statistics consistent for both UDP and IP encap tx/rx paths.
1203 	 */
1204 	*len = skb->len;
1205 
1206 	inet = inet_sk(sk);
1207 	switch (tunnel->encap) {
1208 	case L2TP_ENCAPTYPE_UDP:
1209 		/* Setup UDP header */
1210 		__skb_push(skb, sizeof(*uh));
1211 		skb_reset_transport_header(skb);
1212 		uh = udp_hdr(skb);
1213 		uh->source = inet->inet_sport;
1214 		uh->dest = inet->inet_dport;
1215 		udp_len = uhlen + session->hdr_len + data_len;
1216 		uh->len = htons(udp_len);
1217 
1218 		/* Calculate UDP checksum if configured to do so */
1219 #if IS_ENABLED(CONFIG_IPV6)
1220 		if (l2tp_sk_is_v6(sk))
1221 			udp6_set_csum(udp_get_no_check6_tx(sk),
1222 				      skb, &inet6_sk(sk)->saddr,
1223 				      &sk->sk_v6_daddr, udp_len);
1224 		else
1225 #endif
1226 			udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1227 				     inet->inet_daddr, udp_len);
1228 		break;
1229 
1230 	case L2TP_ENCAPTYPE_IP:
1231 		break;
1232 	}
1233 
1234 	ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1235 
1236 out_unlock:
1237 	spin_unlock(&sk->sk_lock.slock);
1238 
1239 	return ret;
1240 }
1241 
1242 /* If caller requires the skb to have a ppp header, the header must be
1243  * inserted in the skb data before calling this function.
1244  */
1245 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1246 {
1247 	unsigned int len = 0;
1248 	int ret;
1249 
1250 	ret = l2tp_xmit_core(session, skb, &len);
1251 	if (ret == NET_XMIT_SUCCESS) {
1252 		atomic_long_inc(&session->tunnel->stats.tx_packets);
1253 		atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1254 		atomic_long_inc(&session->stats.tx_packets);
1255 		atomic_long_add(len, &session->stats.tx_bytes);
1256 	} else {
1257 		atomic_long_inc(&session->tunnel->stats.tx_errors);
1258 		atomic_long_inc(&session->stats.tx_errors);
1259 	}
1260 	return ret;
1261 }
1262 EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1263 
1264 /*****************************************************************************
1265  * Tinnel and session create/destroy.
1266  *****************************************************************************/
1267 
1268 /* Remove an l2tp session from l2tp_core's lists. */
1269 static void l2tp_session_unhash(struct l2tp_session *session)
1270 {
1271 	struct l2tp_tunnel *tunnel = session->tunnel;
1272 
1273 	if (tunnel) {
1274 		struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1275 		struct l2tp_session *removed = session;
1276 
1277 		spin_lock_bh(&tunnel->list_lock);
1278 		spin_lock_bh(&pn->l2tp_session_idr_lock);
1279 
1280 		/* Remove from the per-tunnel list */
1281 		list_del_init(&session->list);
1282 
1283 		/* Remove from per-net IDR */
1284 		if (tunnel->version == L2TP_HDR_VER_3) {
1285 			if (hash_hashed(&session->hlist))
1286 				l2tp_session_collision_del(pn, session);
1287 			else
1288 				removed = idr_remove(&pn->l2tp_v3_session_idr,
1289 						     session->session_id);
1290 		} else {
1291 			u32 session_key = l2tp_v2_session_key(tunnel->tunnel_id,
1292 							      session->session_id);
1293 			removed = idr_remove(&pn->l2tp_v2_session_idr,
1294 					     session_key);
1295 		}
1296 		WARN_ON_ONCE(removed && removed != session);
1297 
1298 		spin_unlock_bh(&pn->l2tp_session_idr_lock);
1299 		spin_unlock_bh(&tunnel->list_lock);
1300 	}
1301 }
1302 
1303 /* When the tunnel is closed, all the attached sessions need to go too.
1304  */
1305 static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1306 {
1307 	struct l2tp_session *session;
1308 
1309 	spin_lock_bh(&tunnel->list_lock);
1310 	tunnel->acpt_newsess = false;
1311 	list_for_each_entry(session, &tunnel->session_list, list)
1312 		l2tp_session_delete(session);
1313 	spin_unlock_bh(&tunnel->list_lock);
1314 }
1315 
1316 /* Tunnel socket destroy hook for UDP encapsulation */
1317 static void l2tp_udp_encap_destroy(struct sock *sk)
1318 {
1319 	struct l2tp_tunnel *tunnel;
1320 
1321 	tunnel = l2tp_sk_to_tunnel(sk);
1322 	if (tunnel) {
1323 		l2tp_tunnel_delete(tunnel);
1324 		l2tp_tunnel_dec_refcount(tunnel);
1325 	}
1326 }
1327 
1328 static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
1329 {
1330 	struct l2tp_net *pn = l2tp_pernet(net);
1331 
1332 	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1333 	idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
1334 	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1335 }
1336 
1337 /* Workqueue tunnel deletion function */
1338 static void l2tp_tunnel_del_work(struct work_struct *work)
1339 {
1340 	struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1341 						  del_work);
1342 	struct sock *sk = tunnel->sock;
1343 	struct socket *sock = sk->sk_socket;
1344 
1345 	l2tp_tunnel_closeall(tunnel);
1346 
1347 	/* If the tunnel socket was created within the kernel, use
1348 	 * the sk API to release it here.
1349 	 */
1350 	if (tunnel->fd < 0) {
1351 		if (sock) {
1352 			kernel_sock_shutdown(sock, SHUT_RDWR);
1353 			sock_release(sock);
1354 		}
1355 	}
1356 
1357 	l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
1358 	/* drop initial ref */
1359 	l2tp_tunnel_dec_refcount(tunnel);
1360 
1361 	/* drop workqueue ref */
1362 	l2tp_tunnel_dec_refcount(tunnel);
1363 }
1364 
1365 /* Create a socket for the tunnel, if one isn't set up by
1366  * userspace. This is used for static tunnels where there is no
1367  * managing L2TP daemon.
1368  *
1369  * Since we don't want these sockets to keep a namespace alive by
1370  * themselves, we drop the socket's namespace refcount after creation.
1371  * These sockets are freed when the namespace exits using the pernet
1372  * exit hook.
1373  */
1374 static int l2tp_tunnel_sock_create(struct net *net,
1375 				   u32 tunnel_id,
1376 				   u32 peer_tunnel_id,
1377 				   struct l2tp_tunnel_cfg *cfg,
1378 				   struct socket **sockp)
1379 {
1380 	int err = -EINVAL;
1381 	struct socket *sock = NULL;
1382 	struct udp_port_cfg udp_conf;
1383 
1384 	switch (cfg->encap) {
1385 	case L2TP_ENCAPTYPE_UDP:
1386 		memset(&udp_conf, 0, sizeof(udp_conf));
1387 
1388 #if IS_ENABLED(CONFIG_IPV6)
1389 		if (cfg->local_ip6 && cfg->peer_ip6) {
1390 			udp_conf.family = AF_INET6;
1391 			memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1392 			       sizeof(udp_conf.local_ip6));
1393 			memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1394 			       sizeof(udp_conf.peer_ip6));
1395 			udp_conf.use_udp6_tx_checksums =
1396 			  !cfg->udp6_zero_tx_checksums;
1397 			udp_conf.use_udp6_rx_checksums =
1398 			  !cfg->udp6_zero_rx_checksums;
1399 		} else
1400 #endif
1401 		{
1402 			udp_conf.family = AF_INET;
1403 			udp_conf.local_ip = cfg->local_ip;
1404 			udp_conf.peer_ip = cfg->peer_ip;
1405 			udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1406 		}
1407 
1408 		udp_conf.local_udp_port = htons(cfg->local_udp_port);
1409 		udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1410 
1411 		err = udp_sock_create(net, &udp_conf, &sock);
1412 		if (err < 0)
1413 			goto out;
1414 
1415 		break;
1416 
1417 	case L2TP_ENCAPTYPE_IP:
1418 #if IS_ENABLED(CONFIG_IPV6)
1419 		if (cfg->local_ip6 && cfg->peer_ip6) {
1420 			struct sockaddr_l2tpip6 ip6_addr = {0};
1421 
1422 			err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1423 					       IPPROTO_L2TP, &sock);
1424 			if (err < 0)
1425 				goto out;
1426 
1427 			ip6_addr.l2tp_family = AF_INET6;
1428 			memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1429 			       sizeof(ip6_addr.l2tp_addr));
1430 			ip6_addr.l2tp_conn_id = tunnel_id;
1431 			err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
1432 					  sizeof(ip6_addr));
1433 			if (err < 0)
1434 				goto out;
1435 
1436 			ip6_addr.l2tp_family = AF_INET6;
1437 			memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1438 			       sizeof(ip6_addr.l2tp_addr));
1439 			ip6_addr.l2tp_conn_id = peer_tunnel_id;
1440 			err = kernel_connect(sock,
1441 					     (struct sockaddr *)&ip6_addr,
1442 					     sizeof(ip6_addr), 0);
1443 			if (err < 0)
1444 				goto out;
1445 		} else
1446 #endif
1447 		{
1448 			struct sockaddr_l2tpip ip_addr = {0};
1449 
1450 			err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1451 					       IPPROTO_L2TP, &sock);
1452 			if (err < 0)
1453 				goto out;
1454 
1455 			ip_addr.l2tp_family = AF_INET;
1456 			ip_addr.l2tp_addr = cfg->local_ip;
1457 			ip_addr.l2tp_conn_id = tunnel_id;
1458 			err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
1459 					  sizeof(ip_addr));
1460 			if (err < 0)
1461 				goto out;
1462 
1463 			ip_addr.l2tp_family = AF_INET;
1464 			ip_addr.l2tp_addr = cfg->peer_ip;
1465 			ip_addr.l2tp_conn_id = peer_tunnel_id;
1466 			err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
1467 					     sizeof(ip_addr), 0);
1468 			if (err < 0)
1469 				goto out;
1470 		}
1471 		break;
1472 
1473 	default:
1474 		goto out;
1475 	}
1476 
1477 out:
1478 	*sockp = sock;
1479 	if (err < 0 && sock) {
1480 		kernel_sock_shutdown(sock, SHUT_RDWR);
1481 		sock_release(sock);
1482 		*sockp = NULL;
1483 	}
1484 
1485 	return err;
1486 }
1487 
1488 int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1489 		       struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1490 {
1491 	struct l2tp_tunnel *tunnel = NULL;
1492 	int err;
1493 	enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1494 
1495 	if (cfg)
1496 		encap = cfg->encap;
1497 
1498 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
1499 	if (!tunnel) {
1500 		err = -ENOMEM;
1501 		goto err;
1502 	}
1503 
1504 	tunnel->version = version;
1505 	tunnel->tunnel_id = tunnel_id;
1506 	tunnel->peer_tunnel_id = peer_tunnel_id;
1507 
1508 	sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1509 	spin_lock_init(&tunnel->list_lock);
1510 	tunnel->acpt_newsess = true;
1511 	INIT_LIST_HEAD(&tunnel->session_list);
1512 
1513 	tunnel->encap = encap;
1514 
1515 	refcount_set(&tunnel->ref_count, 1);
1516 	tunnel->fd = fd;
1517 
1518 	/* Init delete workqueue struct */
1519 	INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1520 
1521 	err = 0;
1522 err:
1523 	if (tunnelp)
1524 		*tunnelp = tunnel;
1525 
1526 	return err;
1527 }
1528 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1529 
1530 static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1531 				enum l2tp_encap_type encap)
1532 {
1533 	struct l2tp_tunnel *tunnel;
1534 
1535 	if (!net_eq(sock_net(sk), net))
1536 		return -EINVAL;
1537 
1538 	if (sk->sk_type != SOCK_DGRAM)
1539 		return -EPROTONOSUPPORT;
1540 
1541 	if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1542 		return -EPROTONOSUPPORT;
1543 
1544 	if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1545 	    (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1546 		return -EPROTONOSUPPORT;
1547 
1548 	tunnel = l2tp_sk_to_tunnel(sk);
1549 	if (tunnel) {
1550 		l2tp_tunnel_dec_refcount(tunnel);
1551 		return -EBUSY;
1552 	}
1553 
1554 	return 0;
1555 }
1556 
1557 int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1558 			 struct l2tp_tunnel_cfg *cfg)
1559 {
1560 	struct l2tp_net *pn = l2tp_pernet(net);
1561 	u32 tunnel_id = tunnel->tunnel_id;
1562 	struct socket *sock;
1563 	struct sock *sk;
1564 	int ret;
1565 
1566 	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1567 	ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
1568 			    GFP_ATOMIC);
1569 	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1570 	if (ret)
1571 		return ret == -ENOSPC ? -EEXIST : ret;
1572 
1573 	if (tunnel->fd < 0) {
1574 		ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1575 					      tunnel->peer_tunnel_id, cfg,
1576 					      &sock);
1577 		if (ret < 0)
1578 			goto err;
1579 	} else {
1580 		sock = sockfd_lookup(tunnel->fd, &ret);
1581 		if (!sock)
1582 			goto err;
1583 	}
1584 
1585 	sk = sock->sk;
1586 	lock_sock(sk);
1587 	write_lock_bh(&sk->sk_callback_lock);
1588 	ret = l2tp_validate_socket(sk, net, tunnel->encap);
1589 	if (ret < 0)
1590 		goto err_inval_sock;
1591 	write_unlock_bh(&sk->sk_callback_lock);
1592 
1593 	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1594 		struct udp_tunnel_sock_cfg udp_cfg = {
1595 			.encap_type = UDP_ENCAP_L2TPINUDP,
1596 			.encap_rcv = l2tp_udp_encap_recv,
1597 			.encap_err_rcv = l2tp_udp_encap_err_recv,
1598 			.encap_destroy = l2tp_udp_encap_destroy,
1599 		};
1600 
1601 		setup_udp_tunnel_sock(net, sock, &udp_cfg);
1602 	}
1603 
1604 	sk->sk_allocation = GFP_ATOMIC;
1605 	release_sock(sk);
1606 
1607 	sock_hold(sk);
1608 	tunnel->sock = sk;
1609 	tunnel->l2tp_net = net;
1610 
1611 	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1612 	idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
1613 	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1614 
1615 	trace_register_tunnel(tunnel);
1616 
1617 	if (tunnel->fd >= 0)
1618 		sockfd_put(sock);
1619 
1620 	return 0;
1621 
1622 err_inval_sock:
1623 	write_unlock_bh(&sk->sk_callback_lock);
1624 	release_sock(sk);
1625 
1626 	if (tunnel->fd < 0)
1627 		sock_release(sock);
1628 	else
1629 		sockfd_put(sock);
1630 err:
1631 	l2tp_tunnel_remove(net, tunnel);
1632 	return ret;
1633 }
1634 EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1635 
1636 /* This function is used by the netlink TUNNEL_DELETE command.
1637  */
1638 void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1639 {
1640 	if (!test_and_set_bit(0, &tunnel->dead)) {
1641 		trace_delete_tunnel(tunnel);
1642 		l2tp_tunnel_inc_refcount(tunnel);
1643 		queue_work(l2tp_wq, &tunnel->del_work);
1644 	}
1645 }
1646 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1647 
1648 void l2tp_session_delete(struct l2tp_session *session)
1649 {
1650 	if (!test_and_set_bit(0, &session->dead)) {
1651 		trace_delete_session(session);
1652 		l2tp_session_inc_refcount(session);
1653 		queue_work(l2tp_wq, &session->del_work);
1654 	}
1655 }
1656 EXPORT_SYMBOL_GPL(l2tp_session_delete);
1657 
1658 /* Workqueue session deletion function */
1659 static void l2tp_session_del_work(struct work_struct *work)
1660 {
1661 	struct l2tp_session *session = container_of(work, struct l2tp_session,
1662 						    del_work);
1663 
1664 	l2tp_session_unhash(session);
1665 	l2tp_session_queue_purge(session);
1666 	if (session->session_close)
1667 		(*session->session_close)(session);
1668 
1669 	/* drop initial ref */
1670 	l2tp_session_dec_refcount(session);
1671 
1672 	/* drop workqueue ref */
1673 	l2tp_session_dec_refcount(session);
1674 }
1675 
1676 /* We come here whenever a session's send_seq, cookie_len or
1677  * l2specific_type parameters are set.
1678  */
1679 void l2tp_session_set_header_len(struct l2tp_session *session, int version,
1680 				 enum l2tp_encap_type encap)
1681 {
1682 	if (version == L2TP_HDR_VER_2) {
1683 		session->hdr_len = 6;
1684 		if (session->send_seq)
1685 			session->hdr_len += 4;
1686 	} else {
1687 		session->hdr_len = 4 + session->cookie_len;
1688 		session->hdr_len += l2tp_get_l2specific_len(session);
1689 		if (encap == L2TP_ENCAPTYPE_UDP)
1690 			session->hdr_len += 4;
1691 	}
1692 }
1693 EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1694 
1695 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1696 					 u32 peer_session_id, struct l2tp_session_cfg *cfg)
1697 {
1698 	struct l2tp_session *session;
1699 
1700 	session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1701 	if (session) {
1702 		session->magic = L2TP_SESSION_MAGIC;
1703 
1704 		session->session_id = session_id;
1705 		session->peer_session_id = peer_session_id;
1706 		session->nr = 0;
1707 		if (tunnel->version == L2TP_HDR_VER_2)
1708 			session->nr_max = 0xffff;
1709 		else
1710 			session->nr_max = 0xffffff;
1711 		session->nr_window_size = session->nr_max / 2;
1712 		session->nr_oos_count_max = 4;
1713 
1714 		/* Use NR of first received packet */
1715 		session->reorder_skip = 1;
1716 
1717 		sprintf(&session->name[0], "sess %u/%u",
1718 			tunnel->tunnel_id, session->session_id);
1719 
1720 		skb_queue_head_init(&session->reorder_q);
1721 
1722 		session->hlist_key = l2tp_v3_session_hashkey(tunnel->sock, session->session_id);
1723 		INIT_HLIST_NODE(&session->hlist);
1724 		INIT_LIST_HEAD(&session->clist);
1725 		INIT_LIST_HEAD(&session->list);
1726 		INIT_WORK(&session->del_work, l2tp_session_del_work);
1727 
1728 		if (cfg) {
1729 			session->pwtype = cfg->pw_type;
1730 			session->send_seq = cfg->send_seq;
1731 			session->recv_seq = cfg->recv_seq;
1732 			session->lns_mode = cfg->lns_mode;
1733 			session->reorder_timeout = cfg->reorder_timeout;
1734 			session->l2specific_type = cfg->l2specific_type;
1735 			session->cookie_len = cfg->cookie_len;
1736 			memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1737 			session->peer_cookie_len = cfg->peer_cookie_len;
1738 			memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1739 		}
1740 
1741 		l2tp_session_set_header_len(session, tunnel->version, tunnel->encap);
1742 
1743 		refcount_set(&session->ref_count, 1);
1744 
1745 		return session;
1746 	}
1747 
1748 	return ERR_PTR(-ENOMEM);
1749 }
1750 EXPORT_SYMBOL_GPL(l2tp_session_create);
1751 
1752 /*****************************************************************************
1753  * Init and cleanup
1754  *****************************************************************************/
1755 
1756 static __net_init int l2tp_init_net(struct net *net)
1757 {
1758 	struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1759 
1760 	idr_init(&pn->l2tp_tunnel_idr);
1761 	spin_lock_init(&pn->l2tp_tunnel_idr_lock);
1762 
1763 	idr_init(&pn->l2tp_v2_session_idr);
1764 	idr_init(&pn->l2tp_v3_session_idr);
1765 	spin_lock_init(&pn->l2tp_session_idr_lock);
1766 
1767 	return 0;
1768 }
1769 
1770 static __net_exit void l2tp_pre_exit_net(struct net *net)
1771 {
1772 	struct l2tp_net *pn = l2tp_pernet(net);
1773 	struct l2tp_tunnel *tunnel = NULL;
1774 	unsigned long tunnel_id, tmp;
1775 
1776 	rcu_read_lock_bh();
1777 	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
1778 		if (tunnel)
1779 			l2tp_tunnel_delete(tunnel);
1780 	}
1781 	rcu_read_unlock_bh();
1782 
1783 	if (l2tp_wq)
1784 		drain_workqueue(l2tp_wq);
1785 }
1786 
1787 static __net_exit void l2tp_exit_net(struct net *net)
1788 {
1789 	struct l2tp_net *pn = l2tp_pernet(net);
1790 
1791 	idr_destroy(&pn->l2tp_v2_session_idr);
1792 	idr_destroy(&pn->l2tp_v3_session_idr);
1793 	idr_destroy(&pn->l2tp_tunnel_idr);
1794 }
1795 
1796 static struct pernet_operations l2tp_net_ops = {
1797 	.init = l2tp_init_net,
1798 	.exit = l2tp_exit_net,
1799 	.pre_exit = l2tp_pre_exit_net,
1800 	.id   = &l2tp_net_id,
1801 	.size = sizeof(struct l2tp_net),
1802 };
1803 
1804 static int __init l2tp_init(void)
1805 {
1806 	int rc = 0;
1807 
1808 	rc = register_pernet_device(&l2tp_net_ops);
1809 	if (rc)
1810 		goto out;
1811 
1812 	l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1813 	if (!l2tp_wq) {
1814 		pr_err("alloc_workqueue failed\n");
1815 		unregister_pernet_device(&l2tp_net_ops);
1816 		rc = -ENOMEM;
1817 		goto out;
1818 	}
1819 
1820 	pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1821 
1822 out:
1823 	return rc;
1824 }
1825 
1826 static void __exit l2tp_exit(void)
1827 {
1828 	unregister_pernet_device(&l2tp_net_ops);
1829 	if (l2tp_wq) {
1830 		destroy_workqueue(l2tp_wq);
1831 		l2tp_wq = NULL;
1832 	}
1833 }
1834 
1835 module_init(l2tp_init);
1836 module_exit(l2tp_exit);
1837 
1838 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1839 MODULE_DESCRIPTION("L2TP core");
1840 MODULE_LICENSE("GPL");
1841 MODULE_VERSION(L2TP_DRV_VERSION);
1842