xref: /linux/net/l2tp/l2tp_core.c (revision ebe560ea5f54134279356703e73b7f867c89db13)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* L2TP core.
3  *
4  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5  *
6  * This file contains some code of the original L2TPv2 pppol2tp
7  * driver, which has the following copyright:
8  *
9  * Authors:	Martijn van Oosterhout <kleptog@svana.org>
10  *		James Chapman (jchapman@katalix.com)
11  * Contributors:
12  *		Michal Ostrowski <mostrows@speakeasy.net>
13  *		Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14  *		David S. Miller (davem@redhat.com)
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/module.h>
20 #include <linux/string.h>
21 #include <linux/list.h>
22 #include <linux/rculist.h>
23 #include <linux/uaccess.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/spinlock.h>
27 #include <linux/kthread.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/errno.h>
31 #include <linux/jiffies.h>
32 
33 #include <linux/netdevice.h>
34 #include <linux/net.h>
35 #include <linux/inetdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/init.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <linux/udp.h>
41 #include <linux/l2tp.h>
42 #include <linux/sort.h>
43 #include <linux/file.h>
44 #include <linux/nsproxy.h>
45 #include <net/net_namespace.h>
46 #include <net/netns/generic.h>
47 #include <net/dst.h>
48 #include <net/ip.h>
49 #include <net/udp.h>
50 #include <net/udp_tunnel.h>
51 #include <net/inet_common.h>
52 #include <net/xfrm.h>
53 #include <net/protocol.h>
54 #include <net/inet6_connection_sock.h>
55 #include <net/inet_ecn.h>
56 #include <net/ip6_route.h>
57 #include <net/ip6_checksum.h>
58 
59 #include <asm/byteorder.h>
60 #include <linux/atomic.h>
61 
62 #include "l2tp_core.h"
63 
64 #define CREATE_TRACE_POINTS
65 #include "trace.h"
66 
67 #define L2TP_DRV_VERSION	"V2.0"
68 
69 /* L2TP header constants */
70 #define L2TP_HDRFLAG_T	   0x8000
71 #define L2TP_HDRFLAG_L	   0x4000
72 #define L2TP_HDRFLAG_S	   0x0800
73 #define L2TP_HDRFLAG_O	   0x0200
74 #define L2TP_HDRFLAG_P	   0x0100
75 
76 #define L2TP_HDR_VER_MASK  0x000F
77 #define L2TP_HDR_VER_2	   0x0002
78 #define L2TP_HDR_VER_3	   0x0003
79 
80 /* L2TPv3 default L2-specific sublayer */
81 #define L2TP_SLFLAG_S	   0x40000000
82 #define L2TP_SL_SEQ_MASK   0x00ffffff
83 
84 #define L2TP_HDR_SIZE_MAX		14
85 
86 /* Default trace flags */
87 #define L2TP_DEFAULT_DEBUG_FLAGS	0
88 
89 #define L2TP_DEPTH_NESTING		2
90 #if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
91 #error "L2TP requires its own lockdep subclass"
92 #endif
93 
94 /* Private data stored for received packets in the skb.
95  */
96 struct l2tp_skb_cb {
97 	u32			ns;
98 	u16			has_seq;
99 	u16			length;
100 	unsigned long		expires;
101 };
102 
103 #define L2TP_SKB_CB(skb)	((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
104 
105 static struct workqueue_struct *l2tp_wq;
106 
107 /* per-net private data for this module */
108 static unsigned int l2tp_net_id;
109 struct l2tp_net {
110 	/* Lock for write access to l2tp_tunnel_idr */
111 	spinlock_t l2tp_tunnel_idr_lock;
112 	struct idr l2tp_tunnel_idr;
113 	/* Lock for write access to l2tp_v[23]_session_idr/htable */
114 	spinlock_t l2tp_session_idr_lock;
115 	struct idr l2tp_v2_session_idr;
116 	struct idr l2tp_v3_session_idr;
117 	struct hlist_head l2tp_v3_session_htable[16];
118 };
119 
120 static u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id)
121 {
122 	return ((u32)tunnel_id) << 16 | session_id;
123 }
124 
125 static unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id)
126 {
127 	return ((unsigned long)sk) + session_id;
128 }
129 
130 #if IS_ENABLED(CONFIG_IPV6)
131 static bool l2tp_sk_is_v6(struct sock *sk)
132 {
133 	return sk->sk_family == PF_INET6 &&
134 	       !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
135 }
136 #endif
137 
138 static struct l2tp_net *l2tp_pernet(const struct net *net)
139 {
140 	return net_generic(net, l2tp_net_id);
141 }
142 
143 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
144 {
145 	struct sock *sk = tunnel->sock;
146 
147 	trace_free_tunnel(tunnel);
148 
149 	if (sk) {
150 		/* Disable udp encapsulation */
151 		switch (tunnel->encap) {
152 		case L2TP_ENCAPTYPE_UDP:
153 			/* No longer an encapsulation socket. See net/ipv4/udp.c */
154 			WRITE_ONCE(udp_sk(sk)->encap_type, 0);
155 			udp_sk(sk)->encap_rcv = NULL;
156 			udp_sk(sk)->encap_destroy = NULL;
157 			break;
158 		case L2TP_ENCAPTYPE_IP:
159 			break;
160 		}
161 
162 		tunnel->sock = NULL;
163 		sock_put(sk);
164 	}
165 
166 	kfree_rcu(tunnel, rcu);
167 }
168 
169 static void l2tp_session_free(struct l2tp_session *session)
170 {
171 	trace_free_session(session);
172 	if (session->tunnel)
173 		l2tp_tunnel_put(session->tunnel);
174 	kfree_rcu(session, rcu);
175 }
176 
177 struct l2tp_tunnel *l2tp_sk_to_tunnel(const struct sock *sk)
178 {
179 	const struct net *net = sock_net(sk);
180 	unsigned long tunnel_id, tmp;
181 	struct l2tp_tunnel *tunnel;
182 	struct l2tp_net *pn;
183 
184 	rcu_read_lock_bh();
185 	pn = l2tp_pernet(net);
186 	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
187 		if (tunnel &&
188 		    tunnel->sock == sk &&
189 		    refcount_inc_not_zero(&tunnel->ref_count)) {
190 			rcu_read_unlock_bh();
191 			return tunnel;
192 		}
193 	}
194 	rcu_read_unlock_bh();
195 
196 	return NULL;
197 }
198 EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
199 
200 void l2tp_tunnel_put(struct l2tp_tunnel *tunnel)
201 {
202 	if (refcount_dec_and_test(&tunnel->ref_count))
203 		l2tp_tunnel_free(tunnel);
204 }
205 EXPORT_SYMBOL_GPL(l2tp_tunnel_put);
206 
207 void l2tp_session_put(struct l2tp_session *session)
208 {
209 	if (refcount_dec_and_test(&session->ref_count))
210 		l2tp_session_free(session);
211 }
212 EXPORT_SYMBOL_GPL(l2tp_session_put);
213 
214 /* Lookup a tunnel. A new reference is held on the returned tunnel. */
215 struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
216 {
217 	const struct l2tp_net *pn = l2tp_pernet(net);
218 	struct l2tp_tunnel *tunnel;
219 
220 	rcu_read_lock_bh();
221 	tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
222 	if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
223 		rcu_read_unlock_bh();
224 		return tunnel;
225 	}
226 	rcu_read_unlock_bh();
227 
228 	return NULL;
229 }
230 EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
231 
232 struct l2tp_tunnel *l2tp_tunnel_get_next(const struct net *net, unsigned long *key)
233 {
234 	struct l2tp_net *pn = l2tp_pernet(net);
235 	struct l2tp_tunnel *tunnel = NULL;
236 
237 	rcu_read_lock_bh();
238 again:
239 	tunnel = idr_get_next_ul(&pn->l2tp_tunnel_idr, key);
240 	if (tunnel) {
241 		if (refcount_inc_not_zero(&tunnel->ref_count)) {
242 			rcu_read_unlock_bh();
243 			return tunnel;
244 		}
245 		(*key)++;
246 		goto again;
247 	}
248 	rcu_read_unlock_bh();
249 
250 	return NULL;
251 }
252 EXPORT_SYMBOL_GPL(l2tp_tunnel_get_next);
253 
254 struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id)
255 {
256 	const struct l2tp_net *pn = l2tp_pernet(net);
257 	struct l2tp_session *session;
258 
259 	rcu_read_lock_bh();
260 	session = idr_find(&pn->l2tp_v3_session_idr, session_id);
261 	if (session && !hash_hashed(&session->hlist) &&
262 	    refcount_inc_not_zero(&session->ref_count)) {
263 		rcu_read_unlock_bh();
264 		return session;
265 	}
266 
267 	/* If we get here and session is non-NULL, the session_id
268 	 * collides with one in another tunnel. If sk is non-NULL,
269 	 * find the session matching sk.
270 	 */
271 	if (session && sk) {
272 		unsigned long key = l2tp_v3_session_hashkey(sk, session->session_id);
273 
274 		hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
275 					   hlist, key) {
276 			/* session->tunnel may be NULL if another thread is in
277 			 * l2tp_session_register and has added an item to
278 			 * l2tp_v3_session_htable but hasn't yet added the
279 			 * session to its tunnel's session_list.
280 			 */
281 			struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
282 
283 			if (session->session_id == session_id &&
284 			    tunnel && tunnel->sock == sk &&
285 			    refcount_inc_not_zero(&session->ref_count)) {
286 				rcu_read_unlock_bh();
287 				return session;
288 			}
289 		}
290 	}
291 	rcu_read_unlock_bh();
292 
293 	return NULL;
294 }
295 EXPORT_SYMBOL_GPL(l2tp_v3_session_get);
296 
297 struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id)
298 {
299 	u32 session_key = l2tp_v2_session_key(tunnel_id, session_id);
300 	const struct l2tp_net *pn = l2tp_pernet(net);
301 	struct l2tp_session *session;
302 
303 	rcu_read_lock_bh();
304 	session = idr_find(&pn->l2tp_v2_session_idr, session_key);
305 	if (session && refcount_inc_not_zero(&session->ref_count)) {
306 		rcu_read_unlock_bh();
307 		return session;
308 	}
309 	rcu_read_unlock_bh();
310 
311 	return NULL;
312 }
313 EXPORT_SYMBOL_GPL(l2tp_v2_session_get);
314 
315 struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver,
316 				      u32 tunnel_id, u32 session_id)
317 {
318 	if (pver == L2TP_HDR_VER_2)
319 		return l2tp_v2_session_get(net, tunnel_id, session_id);
320 	else
321 		return l2tp_v3_session_get(net, sk, session_id);
322 }
323 EXPORT_SYMBOL_GPL(l2tp_session_get);
324 
325 static struct l2tp_session *l2tp_v2_session_get_next(const struct net *net,
326 						     u16 tid,
327 						     unsigned long *key)
328 {
329 	struct l2tp_net *pn = l2tp_pernet(net);
330 	struct l2tp_session *session = NULL;
331 
332 	/* Start searching within the range of the tid */
333 	if (*key == 0)
334 		*key = l2tp_v2_session_key(tid, 0);
335 
336 	rcu_read_lock_bh();
337 again:
338 	session = idr_get_next_ul(&pn->l2tp_v2_session_idr, key);
339 	if (session) {
340 		struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
341 
342 		/* ignore sessions with id 0 as they are internal for pppol2tp */
343 		if (session->session_id == 0) {
344 			(*key)++;
345 			goto again;
346 		}
347 
348 		if (tunnel->tunnel_id == tid &&
349 		    refcount_inc_not_zero(&session->ref_count)) {
350 			rcu_read_unlock_bh();
351 			return session;
352 		}
353 
354 		(*key)++;
355 		if (tunnel->tunnel_id == tid)
356 			goto again;
357 	}
358 	rcu_read_unlock_bh();
359 
360 	return NULL;
361 }
362 
363 static struct l2tp_session *l2tp_v3_session_get_next(const struct net *net,
364 						     u32 tid, struct sock *sk,
365 						     unsigned long *key)
366 {
367 	struct l2tp_net *pn = l2tp_pernet(net);
368 	struct l2tp_session *session = NULL;
369 
370 	rcu_read_lock_bh();
371 again:
372 	session = idr_get_next_ul(&pn->l2tp_v3_session_idr, key);
373 	if (session && !hash_hashed(&session->hlist)) {
374 		struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
375 
376 		if (tunnel && tunnel->tunnel_id == tid &&
377 		    refcount_inc_not_zero(&session->ref_count)) {
378 			rcu_read_unlock_bh();
379 			return session;
380 		}
381 
382 		(*key)++;
383 		goto again;
384 	}
385 
386 	/* If we get here and session is non-NULL, the IDR entry may be one
387 	 * where the session_id collides with one in another tunnel. Check
388 	 * session_htable for a match. There can only be one session of a given
389 	 * ID per tunnel so we can return as soon as a match is found.
390 	 */
391 	if (session && hash_hashed(&session->hlist)) {
392 		unsigned long hkey = l2tp_v3_session_hashkey(sk, session->session_id);
393 		u32 sid = session->session_id;
394 
395 		hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
396 					   hlist, hkey) {
397 			struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
398 
399 			if (session->session_id == sid &&
400 			    tunnel && tunnel->tunnel_id == tid &&
401 			    refcount_inc_not_zero(&session->ref_count)) {
402 				rcu_read_unlock_bh();
403 				return session;
404 			}
405 		}
406 
407 		/* If no match found, the colliding session ID isn't in our
408 		 * tunnel so try the next session ID.
409 		 */
410 		(*key)++;
411 		goto again;
412 	}
413 
414 	rcu_read_unlock_bh();
415 
416 	return NULL;
417 }
418 
419 struct l2tp_session *l2tp_session_get_next(const struct net *net, struct sock *sk, int pver,
420 					   u32 tunnel_id, unsigned long *key)
421 {
422 	if (pver == L2TP_HDR_VER_2)
423 		return l2tp_v2_session_get_next(net, tunnel_id, key);
424 	else
425 		return l2tp_v3_session_get_next(net, tunnel_id, sk, key);
426 }
427 EXPORT_SYMBOL_GPL(l2tp_session_get_next);
428 
429 /* Lookup a session by interface name.
430  * This is very inefficient but is only used by management interfaces.
431  */
432 struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
433 						const char *ifname)
434 {
435 	struct l2tp_net *pn = l2tp_pernet(net);
436 	unsigned long tunnel_id, tmp;
437 	struct l2tp_session *session;
438 	struct l2tp_tunnel *tunnel;
439 
440 	rcu_read_lock_bh();
441 	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
442 		if (tunnel) {
443 			list_for_each_entry_rcu(session, &tunnel->session_list, list) {
444 				if (!strcmp(session->ifname, ifname)) {
445 					refcount_inc(&session->ref_count);
446 					rcu_read_unlock_bh();
447 
448 					return session;
449 				}
450 			}
451 		}
452 	}
453 	rcu_read_unlock_bh();
454 
455 	return NULL;
456 }
457 EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
458 
459 static void l2tp_session_coll_list_add(struct l2tp_session_coll_list *clist,
460 				       struct l2tp_session *session)
461 {
462 	refcount_inc(&session->ref_count);
463 	WARN_ON_ONCE(session->coll_list);
464 	session->coll_list = clist;
465 	spin_lock(&clist->lock);
466 	list_add(&session->clist, &clist->list);
467 	spin_unlock(&clist->lock);
468 }
469 
470 static int l2tp_session_collision_add(struct l2tp_net *pn,
471 				      struct l2tp_session *session1,
472 				      struct l2tp_session *session2)
473 {
474 	struct l2tp_session_coll_list *clist;
475 
476 	lockdep_assert_held(&pn->l2tp_session_idr_lock);
477 
478 	if (!session2)
479 		return -EEXIST;
480 
481 	/* If existing session is in IP-encap tunnel, refuse new session */
482 	if (session2->tunnel->encap == L2TP_ENCAPTYPE_IP)
483 		return -EEXIST;
484 
485 	clist = session2->coll_list;
486 	if (!clist) {
487 		/* First collision. Allocate list to manage the collided sessions
488 		 * and add the existing session to the list.
489 		 */
490 		clist = kmalloc_obj(*clist, GFP_ATOMIC);
491 		if (!clist)
492 			return -ENOMEM;
493 
494 		spin_lock_init(&clist->lock);
495 		INIT_LIST_HEAD(&clist->list);
496 		refcount_set(&clist->ref_count, 1);
497 		l2tp_session_coll_list_add(clist, session2);
498 	}
499 
500 	/* If existing session isn't already in the session hlist, add it. */
501 	if (!hash_hashed(&session2->hlist))
502 		hash_add_rcu(pn->l2tp_v3_session_htable, &session2->hlist,
503 			     session2->hlist_key);
504 
505 	/* Add new session to the hlist and collision list */
506 	hash_add_rcu(pn->l2tp_v3_session_htable, &session1->hlist,
507 		     session1->hlist_key);
508 	refcount_inc(&clist->ref_count);
509 	l2tp_session_coll_list_add(clist, session1);
510 
511 	return 0;
512 }
513 
514 static void l2tp_session_collision_del(struct l2tp_net *pn,
515 				       struct l2tp_session *session)
516 {
517 	struct l2tp_session_coll_list *clist = session->coll_list;
518 	unsigned long session_key = session->session_id;
519 	struct l2tp_session *session2;
520 
521 	lockdep_assert_held(&pn->l2tp_session_idr_lock);
522 
523 	hash_del_rcu(&session->hlist);
524 
525 	if (clist) {
526 		/* Remove session from its collision list. If there
527 		 * are other sessions with the same ID, replace this
528 		 * session's IDR entry with that session, otherwise
529 		 * remove the IDR entry. If this is the last session,
530 		 * the collision list data is freed.
531 		 */
532 		spin_lock(&clist->lock);
533 		list_del_init(&session->clist);
534 		session2 = list_first_entry_or_null(&clist->list, struct l2tp_session, clist);
535 		if (session2) {
536 			void *old = idr_replace(&pn->l2tp_v3_session_idr, session2, session_key);
537 
538 			WARN_ON_ONCE(IS_ERR_VALUE(old));
539 		} else {
540 			void *removed = idr_remove(&pn->l2tp_v3_session_idr, session_key);
541 
542 			WARN_ON_ONCE(removed != session);
543 		}
544 		session->coll_list = NULL;
545 		spin_unlock(&clist->lock);
546 		if (refcount_dec_and_test(&clist->ref_count))
547 			kfree(clist);
548 		l2tp_session_put(session);
549 	}
550 }
551 
552 int l2tp_session_register(struct l2tp_session *session,
553 			  struct l2tp_tunnel *tunnel)
554 {
555 	struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
556 	struct l2tp_session *other_session = NULL;
557 	void *old = NULL;
558 	u32 session_key;
559 	int err;
560 
561 	spin_lock_bh(&tunnel->list_lock);
562 	spin_lock_bh(&pn->l2tp_session_idr_lock);
563 
564 	if (!tunnel->acpt_newsess) {
565 		err = -ENODEV;
566 		goto out;
567 	}
568 
569 	if (tunnel->version == L2TP_HDR_VER_3) {
570 		session_key = session->session_id;
571 		err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL,
572 				    &session_key, session_key, GFP_ATOMIC);
573 		/* IP encap expects session IDs to be globally unique, while
574 		 * UDP encap doesn't. This isn't per the RFC, which says that
575 		 * sessions are identified only by the session ID, but is to
576 		 * support existing userspace which depends on it.
577 		 */
578 		if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) {
579 			other_session = idr_find(&pn->l2tp_v3_session_idr,
580 						 session_key);
581 			err = l2tp_session_collision_add(pn, session,
582 							 other_session);
583 		}
584 	} else {
585 		session_key = l2tp_v2_session_key(tunnel->tunnel_id,
586 						  session->session_id);
587 		err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL,
588 				    &session_key, session_key, GFP_ATOMIC);
589 	}
590 
591 	if (err) {
592 		if (err == -ENOSPC)
593 			err = -EEXIST;
594 		goto out;
595 	}
596 
597 	refcount_inc(&tunnel->ref_count);
598 	WRITE_ONCE(session->tunnel, tunnel);
599 	list_add_rcu(&session->list, &tunnel->session_list);
600 
601 	/* this makes session available to lockless getters */
602 	if (tunnel->version == L2TP_HDR_VER_3) {
603 		if (!other_session)
604 			old = idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
605 	} else {
606 		old = idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
607 	}
608 
609 	/* old should be NULL, unless something removed or modified
610 	 * the IDR entry after our idr_alloc_32 above (which shouldn't
611 	 * happen).
612 	 */
613 	WARN_ON_ONCE(old);
614 out:
615 	spin_unlock_bh(&pn->l2tp_session_idr_lock);
616 	spin_unlock_bh(&tunnel->list_lock);
617 
618 	if (!err)
619 		trace_register_session(session);
620 
621 	return err;
622 }
623 EXPORT_SYMBOL_GPL(l2tp_session_register);
624 
625 /*****************************************************************************
626  * Receive data handling
627  *****************************************************************************/
628 
629 /* Queue a skb in order. We come here only if the skb has an L2TP sequence
630  * number.
631  */
632 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
633 {
634 	struct sk_buff *skbp;
635 	struct sk_buff *tmp;
636 	u32 ns = L2TP_SKB_CB(skb)->ns;
637 
638 	spin_lock_bh(&session->reorder_q.lock);
639 	skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
640 		if (L2TP_SKB_CB(skbp)->ns > ns) {
641 			__skb_queue_before(&session->reorder_q, skbp, skb);
642 			atomic_long_inc(&session->stats.rx_oos_packets);
643 			goto out;
644 		}
645 	}
646 
647 	__skb_queue_tail(&session->reorder_q, skb);
648 
649 out:
650 	spin_unlock_bh(&session->reorder_q.lock);
651 }
652 
653 /* Dequeue a single skb.
654  */
655 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
656 {
657 	struct l2tp_tunnel *tunnel = session->tunnel;
658 	int length = L2TP_SKB_CB(skb)->length;
659 
660 	/* We're about to requeue the skb, so return resources
661 	 * to its current owner (a socket receive buffer).
662 	 */
663 	skb_orphan(skb);
664 
665 	atomic_long_inc(&tunnel->stats.rx_packets);
666 	atomic_long_add(length, &tunnel->stats.rx_bytes);
667 	atomic_long_inc(&session->stats.rx_packets);
668 	atomic_long_add(length, &session->stats.rx_bytes);
669 
670 	if (L2TP_SKB_CB(skb)->has_seq) {
671 		/* Bump our Nr */
672 		session->nr++;
673 		session->nr &= session->nr_max;
674 		trace_session_seqnum_update(session);
675 	}
676 
677 	/* call private receive handler */
678 	if (session->recv_skb)
679 		(*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
680 	else
681 		kfree_skb(skb);
682 }
683 
684 /* Dequeue skbs from the session's reorder_q, subject to packet order.
685  * Skbs that have been in the queue for too long are simply discarded.
686  */
687 static void l2tp_recv_dequeue(struct l2tp_session *session)
688 {
689 	struct sk_buff *skb;
690 	struct sk_buff *tmp;
691 
692 	/* If the pkt at the head of the queue has the nr that we
693 	 * expect to send up next, dequeue it and any other
694 	 * in-sequence packets behind it.
695 	 */
696 start:
697 	spin_lock_bh(&session->reorder_q.lock);
698 	skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
699 		struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
700 
701 		/* If the packet has been pending on the queue for too long, discard it */
702 		if (time_after(jiffies, cb->expires)) {
703 			atomic_long_inc(&session->stats.rx_seq_discards);
704 			atomic_long_inc(&session->stats.rx_errors);
705 			trace_session_pkt_expired(session, cb->ns);
706 			session->reorder_skip = 1;
707 			__skb_unlink(skb, &session->reorder_q);
708 			kfree_skb(skb);
709 			continue;
710 		}
711 
712 		if (cb->has_seq) {
713 			if (session->reorder_skip) {
714 				session->reorder_skip = 0;
715 				session->nr = cb->ns;
716 				trace_session_seqnum_reset(session);
717 			}
718 			if (cb->ns != session->nr)
719 				goto out;
720 		}
721 		__skb_unlink(skb, &session->reorder_q);
722 
723 		/* Process the skb. We release the queue lock while we
724 		 * do so to let other contexts process the queue.
725 		 */
726 		spin_unlock_bh(&session->reorder_q.lock);
727 		l2tp_recv_dequeue_skb(session, skb);
728 		goto start;
729 	}
730 
731 out:
732 	spin_unlock_bh(&session->reorder_q.lock);
733 }
734 
735 static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
736 {
737 	u32 nws;
738 
739 	if (nr >= session->nr)
740 		nws = nr - session->nr;
741 	else
742 		nws = (session->nr_max + 1) - (session->nr - nr);
743 
744 	return nws < session->nr_window_size;
745 }
746 
747 /* If packet has sequence numbers, queue it if acceptable. Returns 0 if
748  * acceptable, else non-zero.
749  */
750 static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
751 {
752 	struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
753 
754 	if (!l2tp_seq_check_rx_window(session, cb->ns)) {
755 		/* Packet sequence number is outside allowed window.
756 		 * Discard it.
757 		 */
758 		trace_session_pkt_outside_rx_window(session, cb->ns);
759 		goto discard;
760 	}
761 
762 	if (session->reorder_timeout != 0) {
763 		/* Packet reordering enabled. Add skb to session's
764 		 * reorder queue, in order of ns.
765 		 */
766 		l2tp_recv_queue_skb(session, skb);
767 		goto out;
768 	}
769 
770 	/* Packet reordering disabled. Discard out-of-sequence packets, while
771 	 * tracking the number if in-sequence packets after the first OOS packet
772 	 * is seen. After nr_oos_count_max in-sequence packets, reset the
773 	 * sequence number to re-enable packet reception.
774 	 */
775 	if (cb->ns == session->nr) {
776 		skb_queue_tail(&session->reorder_q, skb);
777 	} else {
778 		u32 nr_oos = cb->ns;
779 		u32 nr_next = (session->nr_oos + 1) & session->nr_max;
780 
781 		if (nr_oos == nr_next)
782 			session->nr_oos_count++;
783 		else
784 			session->nr_oos_count = 0;
785 
786 		session->nr_oos = nr_oos;
787 		if (session->nr_oos_count > session->nr_oos_count_max) {
788 			session->reorder_skip = 1;
789 		}
790 		if (!session->reorder_skip) {
791 			atomic_long_inc(&session->stats.rx_seq_discards);
792 			trace_session_pkt_oos(session, cb->ns);
793 			goto discard;
794 		}
795 		skb_queue_tail(&session->reorder_q, skb);
796 	}
797 
798 out:
799 	return 0;
800 
801 discard:
802 	return 1;
803 }
804 
805 /* Do receive processing of L2TP data frames. We handle both L2TPv2
806  * and L2TPv3 data frames here.
807  *
808  * L2TPv2 Data Message Header
809  *
810  *  0                   1                   2                   3
811  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
812  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
813  * |T|L|x|x|S|x|O|P|x|x|x|x|  Ver  |          Length (opt)         |
814  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
815  * |           Tunnel ID           |           Session ID          |
816  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
817  * |             Ns (opt)          |             Nr (opt)          |
818  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
819  * |      Offset Size (opt)        |    Offset pad... (opt)
820  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
821  *
822  * Data frames are marked by T=0. All other fields are the same as
823  * those in L2TP control frames.
824  *
825  * L2TPv3 Data Message Header
826  *
827  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
828  * |                      L2TP Session Header                      |
829  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
830  * |                      L2-Specific Sublayer                     |
831  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
832  * |                        Tunnel Payload                      ...
833  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
834  *
835  * L2TPv3 Session Header Over IP
836  *
837  *  0                   1                   2                   3
838  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
839  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
840  * |                           Session ID                          |
841  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
842  * |               Cookie (optional, maximum 64 bits)...
843  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
844  *                                                                 |
845  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
846  *
847  * L2TPv3 L2-Specific Sublayer Format
848  *
849  *  0                   1                   2                   3
850  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
851  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
852  * |x|S|x|x|x|x|x|x|              Sequence Number                  |
853  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
854  *
855  * Cookie value and sublayer format are negotiated with the peer when
856  * the session is set up. Unlike L2TPv2, we do not need to parse the
857  * packet header to determine if optional fields are present.
858  *
859  * Caller must already have parsed the frame and determined that it is
860  * a data (not control) frame before coming here. Fields up to the
861  * session-id have already been parsed and ptr points to the data
862  * after the session-id.
863  */
864 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
865 		      unsigned char *ptr, unsigned char *optr, u16 hdrflags,
866 		      int length)
867 {
868 	struct l2tp_tunnel *tunnel = session->tunnel;
869 	int offset;
870 
871 	/* Parse and check optional cookie */
872 	if (session->peer_cookie_len > 0) {
873 		if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
874 			pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
875 					     tunnel->name, tunnel->tunnel_id,
876 					     session->session_id);
877 			atomic_long_inc(&session->stats.rx_cookie_discards);
878 			goto discard;
879 		}
880 		ptr += session->peer_cookie_len;
881 	}
882 
883 	/* Handle the optional sequence numbers. Sequence numbers are
884 	 * in different places for L2TPv2 and L2TPv3.
885 	 *
886 	 * If we are the LAC, enable/disable sequence numbers under
887 	 * the control of the LNS.  If no sequence numbers present but
888 	 * we were expecting them, discard frame.
889 	 */
890 	L2TP_SKB_CB(skb)->has_seq = 0;
891 	if (tunnel->version == L2TP_HDR_VER_2) {
892 		if (hdrflags & L2TP_HDRFLAG_S) {
893 			/* Store L2TP info in the skb */
894 			L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
895 			L2TP_SKB_CB(skb)->has_seq = 1;
896 			ptr += 2;
897 			/* Skip past nr in the header */
898 			ptr += 2;
899 
900 		}
901 	} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
902 		u32 l2h = ntohl(*(__be32 *)ptr);
903 
904 		if (l2h & 0x40000000) {
905 			/* Store L2TP info in the skb */
906 			L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
907 			L2TP_SKB_CB(skb)->has_seq = 1;
908 		}
909 		ptr += 4;
910 	}
911 
912 	if (L2TP_SKB_CB(skb)->has_seq) {
913 		/* Received a packet with sequence numbers. If we're the LAC,
914 		 * check if we sre sending sequence numbers and if not,
915 		 * configure it so.
916 		 */
917 		if (!session->lns_mode && !session->send_seq) {
918 			trace_session_seqnum_lns_enable(session);
919 			session->send_seq = 1;
920 			l2tp_session_set_header_len(session, tunnel->version,
921 						    tunnel->encap);
922 		}
923 	} else {
924 		/* No sequence numbers.
925 		 * If user has configured mandatory sequence numbers, discard.
926 		 */
927 		if (session->recv_seq) {
928 			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
929 					     session->name);
930 			atomic_long_inc(&session->stats.rx_seq_discards);
931 			goto discard;
932 		}
933 
934 		/* If we're the LAC and we're sending sequence numbers, the
935 		 * LNS has requested that we no longer send sequence numbers.
936 		 * If we're the LNS and we're sending sequence numbers, the
937 		 * LAC is broken. Discard the frame.
938 		 */
939 		if (!session->lns_mode && session->send_seq) {
940 			trace_session_seqnum_lns_disable(session);
941 			session->send_seq = 0;
942 			l2tp_session_set_header_len(session, tunnel->version,
943 						    tunnel->encap);
944 		} else if (session->send_seq) {
945 			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
946 					     session->name);
947 			atomic_long_inc(&session->stats.rx_seq_discards);
948 			goto discard;
949 		}
950 	}
951 
952 	/* Session data offset is defined only for L2TPv2 and is
953 	 * indicated by an optional 16-bit value in the header.
954 	 */
955 	if (tunnel->version == L2TP_HDR_VER_2) {
956 		/* If offset bit set, skip it. */
957 		if (hdrflags & L2TP_HDRFLAG_O) {
958 			offset = ntohs(*(__be16 *)ptr);
959 			ptr += 2 + offset;
960 		}
961 	}
962 
963 	offset = ptr - optr;
964 	if (!pskb_may_pull(skb, offset))
965 		goto discard;
966 
967 	__skb_pull(skb, offset);
968 
969 	/* Prepare skb for adding to the session's reorder_q.  Hold
970 	 * packets for max reorder_timeout or 1 second if not
971 	 * reordering.
972 	 */
973 	L2TP_SKB_CB(skb)->length = length;
974 	L2TP_SKB_CB(skb)->expires = jiffies +
975 		(session->reorder_timeout ? session->reorder_timeout : HZ);
976 
977 	/* Add packet to the session's receive queue. Reordering is done here, if
978 	 * enabled. Saved L2TP protocol info is stored in skb->sb[].
979 	 */
980 	if (L2TP_SKB_CB(skb)->has_seq) {
981 		if (l2tp_recv_data_seq(session, skb))
982 			goto discard;
983 	} else {
984 		/* No sequence numbers. Add the skb to the tail of the
985 		 * reorder queue. This ensures that it will be
986 		 * delivered after all previous sequenced skbs.
987 		 */
988 		skb_queue_tail(&session->reorder_q, skb);
989 	}
990 
991 	/* Try to dequeue as many skbs from reorder_q as we can. */
992 	l2tp_recv_dequeue(session);
993 
994 	return;
995 
996 discard:
997 	atomic_long_inc(&session->stats.rx_errors);
998 	kfree_skb(skb);
999 }
1000 EXPORT_SYMBOL_GPL(l2tp_recv_common);
1001 
1002 /* Drop skbs from the session's reorder_q
1003  */
1004 static void l2tp_session_queue_purge(struct l2tp_session *session)
1005 {
1006 	struct sk_buff *skb = NULL;
1007 
1008 	while ((skb = skb_dequeue(&session->reorder_q))) {
1009 		atomic_long_inc(&session->stats.rx_errors);
1010 		kfree_skb(skb);
1011 	}
1012 }
1013 
1014 /* UDP encapsulation receive handler. See net/ipv4/udp.c for details. */
1015 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1016 {
1017 	struct l2tp_session *session = NULL;
1018 	struct l2tp_tunnel *tunnel = NULL;
1019 	struct net *net = sock_net(sk);
1020 	unsigned char *ptr, *optr;
1021 	u16 hdrflags;
1022 	u16 version;
1023 	int length;
1024 
1025 	/* UDP has verified checksum */
1026 
1027 	/* UDP always verifies the packet length. */
1028 	__skb_pull(skb, sizeof(struct udphdr));
1029 
1030 	/* Short packet? */
1031 	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX))
1032 		goto pass;
1033 
1034 	/* Point to L2TP header */
1035 	optr = skb->data;
1036 	ptr = skb->data;
1037 
1038 	/* Get L2TP header flags */
1039 	hdrflags = ntohs(*(__be16 *)ptr);
1040 
1041 	/* Get protocol version */
1042 	version = hdrflags & L2TP_HDR_VER_MASK;
1043 
1044 	/* Get length of L2TP packet */
1045 	length = skb->len;
1046 
1047 	/* If type is control packet, it is handled by userspace. */
1048 	if (hdrflags & L2TP_HDRFLAG_T)
1049 		goto pass;
1050 
1051 	/* Skip flags */
1052 	ptr += 2;
1053 
1054 	if (version == L2TP_HDR_VER_2) {
1055 		u16 tunnel_id, session_id;
1056 
1057 		/* If length is present, skip it */
1058 		if (hdrflags & L2TP_HDRFLAG_L)
1059 			ptr += 2;
1060 
1061 		/* Extract tunnel and session ID */
1062 		tunnel_id = ntohs(*(__be16 *)ptr);
1063 		ptr += 2;
1064 		session_id = ntohs(*(__be16 *)ptr);
1065 		ptr += 2;
1066 
1067 		session = l2tp_v2_session_get(net, tunnel_id, session_id);
1068 	} else {
1069 		u32 session_id;
1070 
1071 		ptr += 2;	/* skip reserved bits */
1072 		session_id = ntohl(*(__be32 *)ptr);
1073 		ptr += 4;
1074 
1075 		session = l2tp_v3_session_get(net, sk, session_id);
1076 	}
1077 
1078 	if (!session || !session->recv_skb) {
1079 		if (session)
1080 			l2tp_session_put(session);
1081 
1082 		/* Not found? Pass to userspace to deal with */
1083 		goto pass;
1084 	}
1085 
1086 	tunnel = session->tunnel;
1087 
1088 	/* Check protocol version */
1089 	if (version != tunnel->version) {
1090 		l2tp_session_put(session);
1091 		goto invalid;
1092 	}
1093 
1094 	if (version == L2TP_HDR_VER_3 &&
1095 	    l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
1096 		l2tp_session_put(session);
1097 		goto invalid;
1098 	}
1099 
1100 	l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
1101 	l2tp_session_put(session);
1102 
1103 	return 0;
1104 
1105 invalid:
1106 	atomic_long_inc(&tunnel->stats.rx_invalid);
1107 
1108 pass:
1109 	/* Put UDP header back */
1110 	__skb_push(skb, sizeof(struct udphdr));
1111 
1112 	return 1;
1113 }
1114 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
1115 
1116 /* UDP encapsulation receive error handler. See net/ipv4/udp.c for details. */
1117 static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err,
1118 				    __be16 port, u32 info, u8 *payload)
1119 {
1120 	sk->sk_err = err;
1121 	sk_error_report(sk);
1122 
1123 	if (ip_hdr(skb)->version == IPVERSION) {
1124 		if (inet_test_bit(RECVERR, sk))
1125 			return ip_icmp_error(sk, skb, err, port, info, payload);
1126 #if IS_ENABLED(CONFIG_IPV6)
1127 	} else {
1128 		if (inet6_test_bit(RECVERR6, sk))
1129 			return ipv6_icmp_error(sk, skb, err, port, info, payload);
1130 #endif
1131 	}
1132 }
1133 
1134 /************************************************************************
1135  * Transmit handling
1136  ***********************************************************************/
1137 
1138 /* Build an L2TP header for the session into the buffer provided.
1139  */
1140 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
1141 {
1142 	struct l2tp_tunnel *tunnel = session->tunnel;
1143 	__be16 *bufp = buf;
1144 	__be16 *optr = buf;
1145 	u16 flags = L2TP_HDR_VER_2;
1146 	u32 tunnel_id = tunnel->peer_tunnel_id;
1147 	u32 session_id = session->peer_session_id;
1148 
1149 	if (session->send_seq)
1150 		flags |= L2TP_HDRFLAG_S;
1151 
1152 	/* Setup L2TP header. */
1153 	*bufp++ = htons(flags);
1154 	*bufp++ = htons(tunnel_id);
1155 	*bufp++ = htons(session_id);
1156 	if (session->send_seq) {
1157 		*bufp++ = htons(session->ns);
1158 		*bufp++ = 0;
1159 		session->ns++;
1160 		session->ns &= 0xffff;
1161 		trace_session_seqnum_update(session);
1162 	}
1163 
1164 	return bufp - optr;
1165 }
1166 
1167 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1168 {
1169 	struct l2tp_tunnel *tunnel = session->tunnel;
1170 	char *bufp = buf;
1171 	char *optr = bufp;
1172 
1173 	/* Setup L2TP header. The header differs slightly for UDP and
1174 	 * IP encapsulations. For UDP, there is 4 bytes of flags.
1175 	 */
1176 	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1177 		u16 flags = L2TP_HDR_VER_3;
1178 		*((__be16 *)bufp) = htons(flags);
1179 		bufp += 2;
1180 		*((__be16 *)bufp) = 0;
1181 		bufp += 2;
1182 	}
1183 
1184 	*((__be32 *)bufp) = htonl(session->peer_session_id);
1185 	bufp += 4;
1186 	if (session->cookie_len) {
1187 		memcpy(bufp, &session->cookie[0], session->cookie_len);
1188 		bufp += session->cookie_len;
1189 	}
1190 	if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1191 		u32 l2h = 0;
1192 
1193 		if (session->send_seq) {
1194 			l2h = 0x40000000 | session->ns;
1195 			session->ns++;
1196 			session->ns &= 0xffffff;
1197 			trace_session_seqnum_update(session);
1198 		}
1199 
1200 		*((__be32 *)bufp) = htonl(l2h);
1201 		bufp += 4;
1202 	}
1203 
1204 	return bufp - optr;
1205 }
1206 
1207 /* Queue the packet to IP for output: tunnel socket lock must be held */
1208 static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
1209 {
1210 	int err;
1211 
1212 	skb->ignore_df = 1;
1213 	skb_dst_drop(skb);
1214 #if IS_ENABLED(CONFIG_IPV6)
1215 	if (l2tp_sk_is_v6(tunnel->sock))
1216 		err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1217 	else
1218 #endif
1219 		err = ip_queue_xmit(tunnel->sock, skb, fl);
1220 
1221 	return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1222 }
1223 
1224 static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1225 {
1226 	struct l2tp_tunnel *tunnel = session->tunnel;
1227 	unsigned int data_len = skb->len;
1228 	struct sock *sk = tunnel->sock;
1229 	int headroom, uhlen, udp_len;
1230 	int ret = NET_XMIT_SUCCESS;
1231 	struct inet_sock *inet;
1232 	struct udphdr *uh;
1233 
1234 	/* Check that there's enough headroom in the skb to insert IP,
1235 	 * UDP and L2TP headers. If not enough, expand it to
1236 	 * make room. Adjust truesize.
1237 	 */
1238 	uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1239 	headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1240 	if (skb_cow_head(skb, headroom)) {
1241 		kfree_skb(skb);
1242 		return NET_XMIT_DROP;
1243 	}
1244 
1245 	/* Setup L2TP header */
1246 	if (tunnel->version == L2TP_HDR_VER_2)
1247 		l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1248 	else
1249 		l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1250 
1251 	/* Reset control buffer */
1252 	memset(skb->cb, 0, sizeof(skb->cb));
1253 
1254 	nf_reset_ct(skb);
1255 
1256 	/* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
1257 	 * nested socket calls on the same lockdep socket class. This can
1258 	 * happen when data from a user socket is routed over l2tp, which uses
1259 	 * another userspace socket.
1260 	 */
1261 	spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
1262 
1263 	if (sock_owned_by_user(sk)) {
1264 		kfree_skb(skb);
1265 		ret = NET_XMIT_DROP;
1266 		goto out_unlock;
1267 	}
1268 
1269 	/* The user-space may change the connection status for the user-space
1270 	 * provided socket at run time: we must check it under the socket lock
1271 	 */
1272 	if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1273 		kfree_skb(skb);
1274 		ret = NET_XMIT_DROP;
1275 		goto out_unlock;
1276 	}
1277 
1278 	/* Report transmitted length before we add encap header, which keeps
1279 	 * statistics consistent for both UDP and IP encap tx/rx paths.
1280 	 */
1281 	*len = skb->len;
1282 
1283 	inet = inet_sk(sk);
1284 	switch (tunnel->encap) {
1285 	case L2TP_ENCAPTYPE_UDP:
1286 		/* Setup UDP header */
1287 		__skb_push(skb, sizeof(*uh));
1288 		skb_reset_transport_header(skb);
1289 		uh = udp_hdr(skb);
1290 		uh->source = inet->inet_sport;
1291 		uh->dest = inet->inet_dport;
1292 		udp_len = uhlen + session->hdr_len + data_len;
1293 		if (udp_len > U16_MAX) {
1294 			kfree_skb(skb);
1295 			ret = NET_XMIT_DROP;
1296 			goto out_unlock;
1297 		}
1298 		uh->len = htons(udp_len);
1299 
1300 		/* Calculate UDP checksum if configured to do so */
1301 #if IS_ENABLED(CONFIG_IPV6)
1302 		if (l2tp_sk_is_v6(sk))
1303 			udp6_set_csum(udp_get_no_check6_tx(sk),
1304 				      skb, &inet6_sk(sk)->saddr,
1305 				      &sk->sk_v6_daddr, udp_len);
1306 		else
1307 #endif
1308 			udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1309 				     inet->inet_daddr, udp_len);
1310 		break;
1311 
1312 	case L2TP_ENCAPTYPE_IP:
1313 		break;
1314 	}
1315 
1316 	ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1317 
1318 out_unlock:
1319 	spin_unlock(&sk->sk_lock.slock);
1320 
1321 	return ret;
1322 }
1323 
1324 /* If caller requires the skb to have a ppp header, the header must be
1325  * inserted in the skb data before calling this function.
1326  */
1327 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1328 {
1329 	unsigned int len = 0;
1330 	int ret;
1331 
1332 	ret = l2tp_xmit_core(session, skb, &len);
1333 	if (ret == NET_XMIT_SUCCESS) {
1334 		atomic_long_inc(&session->tunnel->stats.tx_packets);
1335 		atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1336 		atomic_long_inc(&session->stats.tx_packets);
1337 		atomic_long_add(len, &session->stats.tx_bytes);
1338 	} else {
1339 		atomic_long_inc(&session->tunnel->stats.tx_errors);
1340 		atomic_long_inc(&session->stats.tx_errors);
1341 	}
1342 	return ret;
1343 }
1344 EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1345 
1346 /*****************************************************************************
1347  * Tinnel and session create/destroy.
1348  *****************************************************************************/
1349 
1350 /* Remove an l2tp session from l2tp_core's lists. */
1351 static void l2tp_session_unhash(struct l2tp_session *session)
1352 {
1353 	struct l2tp_tunnel *tunnel = session->tunnel;
1354 
1355 	if (tunnel) {
1356 		struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1357 		struct l2tp_session *removed = session;
1358 
1359 		spin_lock_bh(&tunnel->list_lock);
1360 		spin_lock_bh(&pn->l2tp_session_idr_lock);
1361 
1362 		/* Remove from the per-tunnel list */
1363 		list_del_init(&session->list);
1364 
1365 		/* Remove from per-net IDR */
1366 		if (tunnel->version == L2TP_HDR_VER_3) {
1367 			if (hash_hashed(&session->hlist))
1368 				l2tp_session_collision_del(pn, session);
1369 			else
1370 				removed = idr_remove(&pn->l2tp_v3_session_idr,
1371 						     session->session_id);
1372 		} else {
1373 			u32 session_key = l2tp_v2_session_key(tunnel->tunnel_id,
1374 							      session->session_id);
1375 			removed = idr_remove(&pn->l2tp_v2_session_idr,
1376 					     session_key);
1377 		}
1378 		WARN_ON_ONCE(removed && removed != session);
1379 
1380 		spin_unlock_bh(&pn->l2tp_session_idr_lock);
1381 		spin_unlock_bh(&tunnel->list_lock);
1382 	}
1383 }
1384 
1385 /* When the tunnel is closed, all the attached sessions need to go too.
1386  */
1387 static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1388 {
1389 	struct l2tp_session *session;
1390 
1391 	spin_lock_bh(&tunnel->list_lock);
1392 	tunnel->acpt_newsess = false;
1393 	list_for_each_entry(session, &tunnel->session_list, list)
1394 		l2tp_session_delete(session);
1395 	spin_unlock_bh(&tunnel->list_lock);
1396 }
1397 
1398 /* Tunnel socket destroy hook for UDP encapsulation */
1399 static void l2tp_udp_encap_destroy(struct sock *sk)
1400 {
1401 	struct l2tp_tunnel *tunnel;
1402 
1403 	tunnel = l2tp_sk_to_tunnel(sk);
1404 	if (tunnel) {
1405 		l2tp_tunnel_delete(tunnel);
1406 		l2tp_tunnel_put(tunnel);
1407 	}
1408 }
1409 
1410 static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
1411 {
1412 	struct l2tp_net *pn = l2tp_pernet(net);
1413 
1414 	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1415 	idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
1416 	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1417 }
1418 
1419 /* Workqueue tunnel deletion function */
1420 static void l2tp_tunnel_del_work(struct work_struct *work)
1421 {
1422 	struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1423 						  del_work);
1424 
1425 	l2tp_tunnel_closeall(tunnel);
1426 
1427 	/* If the tunnel socket was created within the kernel, use
1428 	 * the sk API to release it here.
1429 	 */
1430 	if (tunnel->fd < 0) {
1431 		struct socket *sock = tunnel->sock->sk_socket;
1432 
1433 		if (sock) {
1434 			kernel_sock_shutdown(sock, SHUT_RDWR);
1435 			sock_release(sock);
1436 		}
1437 	}
1438 
1439 	l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
1440 	/* drop initial ref */
1441 	l2tp_tunnel_put(tunnel);
1442 
1443 	/* drop workqueue ref */
1444 	l2tp_tunnel_put(tunnel);
1445 }
1446 
1447 /* Create a socket for the tunnel, if one isn't set up by
1448  * userspace. This is used for static tunnels where there is no
1449  * managing L2TP daemon.
1450  *
1451  * Since we don't want these sockets to keep a namespace alive by
1452  * themselves, we drop the socket's namespace refcount after creation.
1453  * These sockets are freed when the namespace exits using the pernet
1454  * exit hook.
1455  */
1456 static int l2tp_tunnel_sock_create(struct net *net,
1457 				   u32 tunnel_id,
1458 				   u32 peer_tunnel_id,
1459 				   struct l2tp_tunnel_cfg *cfg,
1460 				   struct socket **sockp)
1461 {
1462 	int err = -EINVAL;
1463 	struct socket *sock = NULL;
1464 	struct udp_port_cfg udp_conf;
1465 
1466 	switch (cfg->encap) {
1467 	case L2TP_ENCAPTYPE_UDP:
1468 		memset(&udp_conf, 0, sizeof(udp_conf));
1469 
1470 #if IS_ENABLED(CONFIG_IPV6)
1471 		if (cfg->local_ip6 && cfg->peer_ip6) {
1472 			udp_conf.family = AF_INET6;
1473 			memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1474 			       sizeof(udp_conf.local_ip6));
1475 			memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1476 			       sizeof(udp_conf.peer_ip6));
1477 			udp_conf.use_udp6_tx_checksums =
1478 			  !cfg->udp6_zero_tx_checksums;
1479 			udp_conf.use_udp6_rx_checksums =
1480 			  !cfg->udp6_zero_rx_checksums;
1481 		} else
1482 #endif
1483 		{
1484 			udp_conf.family = AF_INET;
1485 			udp_conf.local_ip = cfg->local_ip;
1486 			udp_conf.peer_ip = cfg->peer_ip;
1487 			udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1488 		}
1489 
1490 		udp_conf.local_udp_port = htons(cfg->local_udp_port);
1491 		udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1492 
1493 		err = udp_sock_create(net, &udp_conf, &sock);
1494 		if (err < 0)
1495 			goto out;
1496 
1497 		break;
1498 
1499 	case L2TP_ENCAPTYPE_IP:
1500 #if IS_ENABLED(CONFIG_IPV6)
1501 		if (cfg->local_ip6 && cfg->peer_ip6) {
1502 			struct sockaddr_l2tpip6 ip6_addr = {0};
1503 
1504 			err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1505 					       IPPROTO_L2TP, &sock);
1506 			if (err < 0)
1507 				goto out;
1508 
1509 			ip6_addr.l2tp_family = AF_INET6;
1510 			memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1511 			       sizeof(ip6_addr.l2tp_addr));
1512 			ip6_addr.l2tp_conn_id = tunnel_id;
1513 			err = kernel_bind(sock, (struct sockaddr_unsized *)&ip6_addr,
1514 					  sizeof(ip6_addr));
1515 			if (err < 0)
1516 				goto out;
1517 
1518 			ip6_addr.l2tp_family = AF_INET6;
1519 			memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1520 			       sizeof(ip6_addr.l2tp_addr));
1521 			ip6_addr.l2tp_conn_id = peer_tunnel_id;
1522 			err = kernel_connect(sock,
1523 					     (struct sockaddr_unsized *)&ip6_addr,
1524 					     sizeof(ip6_addr), 0);
1525 			if (err < 0)
1526 				goto out;
1527 		} else
1528 #endif
1529 		{
1530 			struct sockaddr_l2tpip ip_addr = {0};
1531 
1532 			err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1533 					       IPPROTO_L2TP, &sock);
1534 			if (err < 0)
1535 				goto out;
1536 
1537 			ip_addr.l2tp_family = AF_INET;
1538 			ip_addr.l2tp_addr = cfg->local_ip;
1539 			ip_addr.l2tp_conn_id = tunnel_id;
1540 			err = kernel_bind(sock, (struct sockaddr_unsized *)&ip_addr,
1541 					  sizeof(ip_addr));
1542 			if (err < 0)
1543 				goto out;
1544 
1545 			ip_addr.l2tp_family = AF_INET;
1546 			ip_addr.l2tp_addr = cfg->peer_ip;
1547 			ip_addr.l2tp_conn_id = peer_tunnel_id;
1548 			err = kernel_connect(sock, (struct sockaddr_unsized *)&ip_addr,
1549 					     sizeof(ip_addr), 0);
1550 			if (err < 0)
1551 				goto out;
1552 		}
1553 		break;
1554 
1555 	default:
1556 		goto out;
1557 	}
1558 
1559 out:
1560 	*sockp = sock;
1561 	if (err < 0 && sock) {
1562 		kernel_sock_shutdown(sock, SHUT_RDWR);
1563 		sock_release(sock);
1564 		*sockp = NULL;
1565 	}
1566 
1567 	return err;
1568 }
1569 
1570 int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1571 		       struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1572 {
1573 	struct l2tp_tunnel *tunnel = NULL;
1574 	int err;
1575 	enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1576 
1577 	if (cfg)
1578 		encap = cfg->encap;
1579 
1580 	tunnel = kzalloc_obj(*tunnel);
1581 	if (!tunnel) {
1582 		err = -ENOMEM;
1583 		goto err;
1584 	}
1585 
1586 	tunnel->version = version;
1587 	tunnel->tunnel_id = tunnel_id;
1588 	tunnel->peer_tunnel_id = peer_tunnel_id;
1589 
1590 	sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1591 	spin_lock_init(&tunnel->list_lock);
1592 	tunnel->acpt_newsess = true;
1593 	INIT_LIST_HEAD(&tunnel->session_list);
1594 
1595 	tunnel->encap = encap;
1596 
1597 	refcount_set(&tunnel->ref_count, 1);
1598 	tunnel->fd = fd;
1599 
1600 	/* Init delete workqueue struct */
1601 	INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1602 
1603 	err = 0;
1604 err:
1605 	if (tunnelp)
1606 		*tunnelp = tunnel;
1607 
1608 	return err;
1609 }
1610 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1611 
1612 static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1613 				enum l2tp_encap_type encap)
1614 {
1615 	struct l2tp_tunnel *tunnel;
1616 
1617 	if (!net_eq(sock_net(sk), net))
1618 		return -EINVAL;
1619 
1620 	if (sk->sk_type != SOCK_DGRAM)
1621 		return -EPROTONOSUPPORT;
1622 
1623 	if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1624 		return -EPROTONOSUPPORT;
1625 
1626 	if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1627 	    (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1628 		return -EPROTONOSUPPORT;
1629 
1630 	if (encap == L2TP_ENCAPTYPE_UDP && sk->sk_user_data)
1631 		return -EBUSY;
1632 
1633 	tunnel = l2tp_sk_to_tunnel(sk);
1634 	if (tunnel) {
1635 		l2tp_tunnel_put(tunnel);
1636 		return -EBUSY;
1637 	}
1638 
1639 	return 0;
1640 }
1641 
1642 int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1643 			 struct l2tp_tunnel_cfg *cfg)
1644 {
1645 	struct l2tp_net *pn = l2tp_pernet(net);
1646 	u32 tunnel_id = tunnel->tunnel_id;
1647 	struct socket *sock;
1648 	struct sock *sk;
1649 	int ret;
1650 
1651 	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1652 	ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
1653 			    GFP_ATOMIC);
1654 	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1655 	if (ret)
1656 		return ret == -ENOSPC ? -EEXIST : ret;
1657 
1658 	if (tunnel->fd < 0) {
1659 		ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1660 					      tunnel->peer_tunnel_id, cfg,
1661 					      &sock);
1662 		if (ret < 0)
1663 			goto err;
1664 	} else {
1665 		sock = sockfd_lookup(tunnel->fd, &ret);
1666 		if (!sock)
1667 			goto err;
1668 	}
1669 
1670 	sk = sock->sk;
1671 	lock_sock(sk);
1672 	write_lock_bh(&sk->sk_callback_lock);
1673 	ret = l2tp_validate_socket(sk, net, tunnel->encap);
1674 	if (ret < 0)
1675 		goto err_inval_sock;
1676 	write_unlock_bh(&sk->sk_callback_lock);
1677 
1678 	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1679 		struct udp_tunnel_sock_cfg udp_cfg = {
1680 			.encap_type = UDP_ENCAP_L2TPINUDP,
1681 			.encap_rcv = l2tp_udp_encap_recv,
1682 			.encap_err_rcv = l2tp_udp_encap_err_recv,
1683 			.encap_destroy = l2tp_udp_encap_destroy,
1684 		};
1685 
1686 		setup_udp_tunnel_sock(net, sock, &udp_cfg);
1687 	}
1688 
1689 	sk->sk_allocation = GFP_ATOMIC;
1690 	release_sock(sk);
1691 
1692 	sock_hold(sk);
1693 	tunnel->sock = sk;
1694 	tunnel->l2tp_net = net;
1695 
1696 	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1697 	idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
1698 	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1699 
1700 	trace_register_tunnel(tunnel);
1701 
1702 	if (tunnel->fd >= 0)
1703 		sockfd_put(sock);
1704 
1705 	return 0;
1706 
1707 err_inval_sock:
1708 	write_unlock_bh(&sk->sk_callback_lock);
1709 	release_sock(sk);
1710 
1711 	if (tunnel->fd < 0)
1712 		sock_release(sock);
1713 	else
1714 		sockfd_put(sock);
1715 err:
1716 	l2tp_tunnel_remove(net, tunnel);
1717 	return ret;
1718 }
1719 EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1720 
1721 /* This function is used by the netlink TUNNEL_DELETE command.
1722  */
1723 void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1724 {
1725 	if (!test_and_set_bit(0, &tunnel->dead)) {
1726 		trace_delete_tunnel(tunnel);
1727 		refcount_inc(&tunnel->ref_count);
1728 		queue_work(l2tp_wq, &tunnel->del_work);
1729 	}
1730 }
1731 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1732 
1733 void l2tp_session_delete(struct l2tp_session *session)
1734 {
1735 	if (!test_and_set_bit(0, &session->dead)) {
1736 		trace_delete_session(session);
1737 		refcount_inc(&session->ref_count);
1738 		queue_work(l2tp_wq, &session->del_work);
1739 	}
1740 }
1741 EXPORT_SYMBOL_GPL(l2tp_session_delete);
1742 
1743 /* Workqueue session deletion function */
1744 static void l2tp_session_del_work(struct work_struct *work)
1745 {
1746 	struct l2tp_session *session = container_of(work, struct l2tp_session,
1747 						    del_work);
1748 
1749 	l2tp_session_unhash(session);
1750 	l2tp_session_queue_purge(session);
1751 	if (session->session_close)
1752 		(*session->session_close)(session);
1753 
1754 	/* drop initial ref */
1755 	l2tp_session_put(session);
1756 
1757 	/* drop workqueue ref */
1758 	l2tp_session_put(session);
1759 }
1760 
1761 /* We come here whenever a session's send_seq, cookie_len or
1762  * l2specific_type parameters are set.
1763  */
1764 void l2tp_session_set_header_len(struct l2tp_session *session, int version,
1765 				 enum l2tp_encap_type encap)
1766 {
1767 	if (version == L2TP_HDR_VER_2) {
1768 		session->hdr_len = 6;
1769 		if (session->send_seq)
1770 			session->hdr_len += 4;
1771 	} else {
1772 		session->hdr_len = 4 + session->cookie_len;
1773 		session->hdr_len += l2tp_get_l2specific_len(session);
1774 		if (encap == L2TP_ENCAPTYPE_UDP)
1775 			session->hdr_len += 4;
1776 	}
1777 }
1778 EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1779 
1780 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1781 					 u32 peer_session_id, struct l2tp_session_cfg *cfg)
1782 {
1783 	struct l2tp_session *session;
1784 
1785 	session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1786 	if (session) {
1787 		session->magic = L2TP_SESSION_MAGIC;
1788 
1789 		session->session_id = session_id;
1790 		session->peer_session_id = peer_session_id;
1791 		session->nr = 0;
1792 		if (tunnel->version == L2TP_HDR_VER_2)
1793 			session->nr_max = 0xffff;
1794 		else
1795 			session->nr_max = 0xffffff;
1796 		session->nr_window_size = session->nr_max / 2;
1797 		session->nr_oos_count_max = 4;
1798 
1799 		/* Use NR of first received packet */
1800 		session->reorder_skip = 1;
1801 
1802 		sprintf(&session->name[0], "sess %u/%u",
1803 			tunnel->tunnel_id, session->session_id);
1804 
1805 		skb_queue_head_init(&session->reorder_q);
1806 
1807 		session->hlist_key = l2tp_v3_session_hashkey(tunnel->sock, session->session_id);
1808 		INIT_HLIST_NODE(&session->hlist);
1809 		INIT_LIST_HEAD(&session->clist);
1810 		INIT_LIST_HEAD(&session->list);
1811 		INIT_WORK(&session->del_work, l2tp_session_del_work);
1812 
1813 		if (cfg) {
1814 			session->pwtype = cfg->pw_type;
1815 			session->send_seq = cfg->send_seq;
1816 			session->recv_seq = cfg->recv_seq;
1817 			session->lns_mode = cfg->lns_mode;
1818 			session->reorder_timeout = cfg->reorder_timeout;
1819 			session->l2specific_type = cfg->l2specific_type;
1820 			session->cookie_len = cfg->cookie_len;
1821 			memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1822 			session->peer_cookie_len = cfg->peer_cookie_len;
1823 			memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1824 		}
1825 
1826 		l2tp_session_set_header_len(session, tunnel->version, tunnel->encap);
1827 
1828 		refcount_set(&session->ref_count, 1);
1829 
1830 		return session;
1831 	}
1832 
1833 	return ERR_PTR(-ENOMEM);
1834 }
1835 EXPORT_SYMBOL_GPL(l2tp_session_create);
1836 
1837 /*****************************************************************************
1838  * Init and cleanup
1839  *****************************************************************************/
1840 
1841 static __net_init int l2tp_init_net(struct net *net)
1842 {
1843 	struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1844 
1845 	idr_init(&pn->l2tp_tunnel_idr);
1846 	spin_lock_init(&pn->l2tp_tunnel_idr_lock);
1847 
1848 	idr_init(&pn->l2tp_v2_session_idr);
1849 	idr_init(&pn->l2tp_v3_session_idr);
1850 	spin_lock_init(&pn->l2tp_session_idr_lock);
1851 
1852 	return 0;
1853 }
1854 
1855 static __net_exit void l2tp_pre_exit_net(struct net *net)
1856 {
1857 	struct l2tp_net *pn = l2tp_pernet(net);
1858 	struct l2tp_tunnel *tunnel = NULL;
1859 	unsigned long tunnel_id, tmp;
1860 
1861 	rcu_read_lock_bh();
1862 	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
1863 		if (tunnel)
1864 			l2tp_tunnel_delete(tunnel);
1865 	}
1866 	rcu_read_unlock_bh();
1867 
1868 	if (l2tp_wq) {
1869 		/* Run all TUNNEL_DELETE work items just queued. */
1870 		__flush_workqueue(l2tp_wq);
1871 
1872 		/* Each TUNNEL_DELETE work item will queue a SESSION_DELETE
1873 		 * work item for each session in the tunnel. Flush the
1874 		 * workqueue again to process these.
1875 		 */
1876 		__flush_workqueue(l2tp_wq);
1877 	}
1878 }
1879 
1880 static int l2tp_idr_item_unexpected(int id, void *p, void *data)
1881 {
1882 	const char *idr_name = data;
1883 
1884 	pr_err("l2tp: %s IDR not empty at net %d exit\n", idr_name, id);
1885 	WARN_ON_ONCE(1);
1886 	return 1;
1887 }
1888 
1889 static __net_exit void l2tp_exit_net(struct net *net)
1890 {
1891 	struct l2tp_net *pn = l2tp_pernet(net);
1892 
1893 	/* Our per-net IDRs should be empty. Check that is so, to
1894 	 * help catch cleanup races or refcnt leaks.
1895 	 */
1896 	idr_for_each(&pn->l2tp_v2_session_idr, l2tp_idr_item_unexpected,
1897 		     "v2_session");
1898 	idr_for_each(&pn->l2tp_v3_session_idr, l2tp_idr_item_unexpected,
1899 		     "v3_session");
1900 	idr_for_each(&pn->l2tp_tunnel_idr, l2tp_idr_item_unexpected,
1901 		     "tunnel");
1902 
1903 	idr_destroy(&pn->l2tp_v2_session_idr);
1904 	idr_destroy(&pn->l2tp_v3_session_idr);
1905 	idr_destroy(&pn->l2tp_tunnel_idr);
1906 }
1907 
1908 static struct pernet_operations l2tp_net_ops = {
1909 	.init = l2tp_init_net,
1910 	.exit = l2tp_exit_net,
1911 	.pre_exit = l2tp_pre_exit_net,
1912 	.id   = &l2tp_net_id,
1913 	.size = sizeof(struct l2tp_net),
1914 };
1915 
1916 static int __init l2tp_init(void)
1917 {
1918 	int rc = 0;
1919 
1920 	rc = register_pernet_device(&l2tp_net_ops);
1921 	if (rc)
1922 		goto out;
1923 
1924 	l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1925 	if (!l2tp_wq) {
1926 		pr_err("alloc_workqueue failed\n");
1927 		unregister_pernet_device(&l2tp_net_ops);
1928 		rc = -ENOMEM;
1929 		goto out;
1930 	}
1931 
1932 	pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1933 
1934 out:
1935 	return rc;
1936 }
1937 
1938 static void __exit l2tp_exit(void)
1939 {
1940 	unregister_pernet_device(&l2tp_net_ops);
1941 	if (l2tp_wq) {
1942 		destroy_workqueue(l2tp_wq);
1943 		l2tp_wq = NULL;
1944 	}
1945 }
1946 
1947 module_init(l2tp_init);
1948 module_exit(l2tp_exit);
1949 
1950 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1951 MODULE_DESCRIPTION("L2TP core");
1952 MODULE_LICENSE("GPL");
1953 MODULE_VERSION(L2TP_DRV_VERSION);
1954