xref: /linux/net/l2tp/l2tp_core.c (revision 2fe05e1139a555ae91f00a812cb9520e7d3022ab)
1 /*
2  * L2TP core.
3  *
4  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5  *
6  * This file contains some code of the original L2TPv2 pppol2tp
7  * driver, which has the following copyright:
8  *
9  * Authors:	Martijn van Oosterhout <kleptog@svana.org>
10  *		James Chapman (jchapman@katalix.com)
11  * Contributors:
12  *		Michal Ostrowski <mostrows@speakeasy.net>
13  *		Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14  *		David S. Miller (davem@redhat.com)
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License version 2 as
18  * published by the Free Software Foundation.
19  */
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/module.h>
24 #include <linux/string.h>
25 #include <linux/list.h>
26 #include <linux/rculist.h>
27 #include <linux/uaccess.h>
28 
29 #include <linux/kernel.h>
30 #include <linux/spinlock.h>
31 #include <linux/kthread.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/errno.h>
35 #include <linux/jiffies.h>
36 
37 #include <linux/netdevice.h>
38 #include <linux/net.h>
39 #include <linux/inetdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/init.h>
42 #include <linux/in.h>
43 #include <linux/ip.h>
44 #include <linux/udp.h>
45 #include <linux/l2tp.h>
46 #include <linux/hash.h>
47 #include <linux/sort.h>
48 #include <linux/file.h>
49 #include <linux/nsproxy.h>
50 #include <net/net_namespace.h>
51 #include <net/netns/generic.h>
52 #include <net/dst.h>
53 #include <net/ip.h>
54 #include <net/udp.h>
55 #include <net/udp_tunnel.h>
56 #include <net/inet_common.h>
57 #include <net/xfrm.h>
58 #include <net/protocol.h>
59 #include <net/inet6_connection_sock.h>
60 #include <net/inet_ecn.h>
61 #include <net/ip6_route.h>
62 #include <net/ip6_checksum.h>
63 
64 #include <asm/byteorder.h>
65 #include <linux/atomic.h>
66 
67 #include "l2tp_core.h"
68 
69 #define L2TP_DRV_VERSION	"V2.0"
70 
71 /* L2TP header constants */
72 #define L2TP_HDRFLAG_T	   0x8000
73 #define L2TP_HDRFLAG_L	   0x4000
74 #define L2TP_HDRFLAG_S	   0x0800
75 #define L2TP_HDRFLAG_O	   0x0200
76 #define L2TP_HDRFLAG_P	   0x0100
77 
78 #define L2TP_HDR_VER_MASK  0x000F
79 #define L2TP_HDR_VER_2	   0x0002
80 #define L2TP_HDR_VER_3	   0x0003
81 
82 /* L2TPv3 default L2-specific sublayer */
83 #define L2TP_SLFLAG_S	   0x40000000
84 #define L2TP_SL_SEQ_MASK   0x00ffffff
85 
86 #define L2TP_HDR_SIZE_SEQ		10
87 #define L2TP_HDR_SIZE_NOSEQ		6
88 
89 /* Default trace flags */
90 #define L2TP_DEFAULT_DEBUG_FLAGS	0
91 
92 /* Private data stored for received packets in the skb.
93  */
94 struct l2tp_skb_cb {
95 	u32			ns;
96 	u16			has_seq;
97 	u16			length;
98 	unsigned long		expires;
99 };
100 
101 #define L2TP_SKB_CB(skb)	((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
102 
103 static atomic_t l2tp_tunnel_count;
104 static atomic_t l2tp_session_count;
105 static struct workqueue_struct *l2tp_wq;
106 
107 /* per-net private data for this module */
108 static unsigned int l2tp_net_id;
109 struct l2tp_net {
110 	struct list_head l2tp_tunnel_list;
111 	spinlock_t l2tp_tunnel_list_lock;
112 	struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
113 	spinlock_t l2tp_session_hlist_lock;
114 };
115 
116 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
117 
118 static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
119 {
120 	return sk->sk_user_data;
121 }
122 
123 static inline struct l2tp_net *l2tp_pernet(const struct net *net)
124 {
125 	BUG_ON(!net);
126 
127 	return net_generic(net, l2tp_net_id);
128 }
129 
130 /* Tunnel reference counts. Incremented per session that is added to
131  * the tunnel.
132  */
133 static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
134 {
135 	refcount_inc(&tunnel->ref_count);
136 }
137 
138 static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
139 {
140 	if (refcount_dec_and_test(&tunnel->ref_count))
141 		l2tp_tunnel_free(tunnel);
142 }
143 #ifdef L2TP_REFCNT_DEBUG
144 #define l2tp_tunnel_inc_refcount(_t)					\
145 do {									\
146 	pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n",	\
147 		 __func__, __LINE__, (_t)->name,			\
148 		 refcount_read(&_t->ref_count));			\
149 	l2tp_tunnel_inc_refcount_1(_t);					\
150 } while (0)
151 #define l2tp_tunnel_dec_refcount(_t)					\
152 do {									\
153 	pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n",	\
154 		 __func__, __LINE__, (_t)->name,			\
155 		 refcount_read(&_t->ref_count));			\
156 	l2tp_tunnel_dec_refcount_1(_t);					\
157 } while (0)
158 #else
159 #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
160 #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
161 #endif
162 
163 /* Session hash global list for L2TPv3.
164  * The session_id SHOULD be random according to RFC3931, but several
165  * L2TP implementations use incrementing session_ids.  So we do a real
166  * hash on the session_id, rather than a simple bitmask.
167  */
168 static inline struct hlist_head *
169 l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
170 {
171 	return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
172 
173 }
174 
175 /* Lookup the tunnel socket, possibly involving the fs code if the socket is
176  * owned by userspace.  A struct sock returned from this function must be
177  * released using l2tp_tunnel_sock_put once you're done with it.
178  */
179 static struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
180 {
181 	int err = 0;
182 	struct socket *sock = NULL;
183 	struct sock *sk = NULL;
184 
185 	if (!tunnel)
186 		goto out;
187 
188 	if (tunnel->fd >= 0) {
189 		/* Socket is owned by userspace, who might be in the process
190 		 * of closing it.  Look the socket up using the fd to ensure
191 		 * consistency.
192 		 */
193 		sock = sockfd_lookup(tunnel->fd, &err);
194 		if (sock)
195 			sk = sock->sk;
196 	} else {
197 		/* Socket is owned by kernelspace */
198 		sk = tunnel->sock;
199 		sock_hold(sk);
200 	}
201 
202 out:
203 	return sk;
204 }
205 
206 /* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
207 static void l2tp_tunnel_sock_put(struct sock *sk)
208 {
209 	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
210 	if (tunnel) {
211 		if (tunnel->fd >= 0) {
212 			/* Socket is owned by userspace */
213 			sockfd_put(sk->sk_socket);
214 		}
215 		sock_put(sk);
216 	}
217 	sock_put(sk);
218 }
219 
220 /* Session hash list.
221  * The session_id SHOULD be random according to RFC2661, but several
222  * L2TP implementations (Cisco and Microsoft) use incrementing
223  * session_ids.  So we do a real hash on the session_id, rather than a
224  * simple bitmask.
225  */
226 static inline struct hlist_head *
227 l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
228 {
229 	return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
230 }
231 
232 /* Lookup a session. A new reference is held on the returned session.
233  * Optionally calls session->ref() too if do_ref is true.
234  */
235 struct l2tp_session *l2tp_session_get(const struct net *net,
236 				      struct l2tp_tunnel *tunnel,
237 				      u32 session_id, bool do_ref)
238 {
239 	struct hlist_head *session_list;
240 	struct l2tp_session *session;
241 
242 	if (!tunnel) {
243 		struct l2tp_net *pn = l2tp_pernet(net);
244 
245 		session_list = l2tp_session_id_hash_2(pn, session_id);
246 
247 		rcu_read_lock_bh();
248 		hlist_for_each_entry_rcu(session, session_list, global_hlist) {
249 			if (session->session_id == session_id) {
250 				l2tp_session_inc_refcount(session);
251 				if (do_ref && session->ref)
252 					session->ref(session);
253 				rcu_read_unlock_bh();
254 
255 				return session;
256 			}
257 		}
258 		rcu_read_unlock_bh();
259 
260 		return NULL;
261 	}
262 
263 	session_list = l2tp_session_id_hash(tunnel, session_id);
264 	read_lock_bh(&tunnel->hlist_lock);
265 	hlist_for_each_entry(session, session_list, hlist) {
266 		if (session->session_id == session_id) {
267 			l2tp_session_inc_refcount(session);
268 			if (do_ref && session->ref)
269 				session->ref(session);
270 			read_unlock_bh(&tunnel->hlist_lock);
271 
272 			return session;
273 		}
274 	}
275 	read_unlock_bh(&tunnel->hlist_lock);
276 
277 	return NULL;
278 }
279 EXPORT_SYMBOL_GPL(l2tp_session_get);
280 
281 struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
282 					  bool do_ref)
283 {
284 	int hash;
285 	struct l2tp_session *session;
286 	int count = 0;
287 
288 	read_lock_bh(&tunnel->hlist_lock);
289 	for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
290 		hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
291 			if (++count > nth) {
292 				l2tp_session_inc_refcount(session);
293 				if (do_ref && session->ref)
294 					session->ref(session);
295 				read_unlock_bh(&tunnel->hlist_lock);
296 				return session;
297 			}
298 		}
299 	}
300 
301 	read_unlock_bh(&tunnel->hlist_lock);
302 
303 	return NULL;
304 }
305 EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
306 
307 /* Lookup a session by interface name.
308  * This is very inefficient but is only used by management interfaces.
309  */
310 struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
311 						const char *ifname,
312 						bool do_ref)
313 {
314 	struct l2tp_net *pn = l2tp_pernet(net);
315 	int hash;
316 	struct l2tp_session *session;
317 
318 	rcu_read_lock_bh();
319 	for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
320 		hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
321 			if (!strcmp(session->ifname, ifname)) {
322 				l2tp_session_inc_refcount(session);
323 				if (do_ref && session->ref)
324 					session->ref(session);
325 				rcu_read_unlock_bh();
326 
327 				return session;
328 			}
329 		}
330 	}
331 
332 	rcu_read_unlock_bh();
333 
334 	return NULL;
335 }
336 EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
337 
338 static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
339 				      struct l2tp_session *session)
340 {
341 	struct l2tp_session *session_walk;
342 	struct hlist_head *g_head;
343 	struct hlist_head *head;
344 	struct l2tp_net *pn;
345 
346 	head = l2tp_session_id_hash(tunnel, session->session_id);
347 
348 	write_lock_bh(&tunnel->hlist_lock);
349 	hlist_for_each_entry(session_walk, head, hlist)
350 		if (session_walk->session_id == session->session_id)
351 			goto exist;
352 
353 	if (tunnel->version == L2TP_HDR_VER_3) {
354 		pn = l2tp_pernet(tunnel->l2tp_net);
355 		g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
356 						session->session_id);
357 
358 		spin_lock_bh(&pn->l2tp_session_hlist_lock);
359 		hlist_for_each_entry(session_walk, g_head, global_hlist)
360 			if (session_walk->session_id == session->session_id)
361 				goto exist_glob;
362 
363 		hlist_add_head_rcu(&session->global_hlist, g_head);
364 		spin_unlock_bh(&pn->l2tp_session_hlist_lock);
365 	}
366 
367 	hlist_add_head(&session->hlist, head);
368 	write_unlock_bh(&tunnel->hlist_lock);
369 
370 	return 0;
371 
372 exist_glob:
373 	spin_unlock_bh(&pn->l2tp_session_hlist_lock);
374 exist:
375 	write_unlock_bh(&tunnel->hlist_lock);
376 
377 	return -EEXIST;
378 }
379 
380 /* Lookup a tunnel by id
381  */
382 struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id)
383 {
384 	struct l2tp_tunnel *tunnel;
385 	struct l2tp_net *pn = l2tp_pernet(net);
386 
387 	rcu_read_lock_bh();
388 	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
389 		if (tunnel->tunnel_id == tunnel_id) {
390 			rcu_read_unlock_bh();
391 			return tunnel;
392 		}
393 	}
394 	rcu_read_unlock_bh();
395 
396 	return NULL;
397 }
398 EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
399 
400 struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
401 {
402 	struct l2tp_net *pn = l2tp_pernet(net);
403 	struct l2tp_tunnel *tunnel;
404 	int count = 0;
405 
406 	rcu_read_lock_bh();
407 	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
408 		if (++count > nth) {
409 			rcu_read_unlock_bh();
410 			return tunnel;
411 		}
412 	}
413 
414 	rcu_read_unlock_bh();
415 
416 	return NULL;
417 }
418 EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
419 
420 /*****************************************************************************
421  * Receive data handling
422  *****************************************************************************/
423 
424 /* Queue a skb in order. We come here only if the skb has an L2TP sequence
425  * number.
426  */
427 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
428 {
429 	struct sk_buff *skbp;
430 	struct sk_buff *tmp;
431 	u32 ns = L2TP_SKB_CB(skb)->ns;
432 
433 	spin_lock_bh(&session->reorder_q.lock);
434 	skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
435 		if (L2TP_SKB_CB(skbp)->ns > ns) {
436 			__skb_queue_before(&session->reorder_q, skbp, skb);
437 			l2tp_dbg(session, L2TP_MSG_SEQ,
438 				 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
439 				 session->name, ns, L2TP_SKB_CB(skbp)->ns,
440 				 skb_queue_len(&session->reorder_q));
441 			atomic_long_inc(&session->stats.rx_oos_packets);
442 			goto out;
443 		}
444 	}
445 
446 	__skb_queue_tail(&session->reorder_q, skb);
447 
448 out:
449 	spin_unlock_bh(&session->reorder_q.lock);
450 }
451 
452 /* Dequeue a single skb.
453  */
454 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
455 {
456 	struct l2tp_tunnel *tunnel = session->tunnel;
457 	int length = L2TP_SKB_CB(skb)->length;
458 
459 	/* We're about to requeue the skb, so return resources
460 	 * to its current owner (a socket receive buffer).
461 	 */
462 	skb_orphan(skb);
463 
464 	atomic_long_inc(&tunnel->stats.rx_packets);
465 	atomic_long_add(length, &tunnel->stats.rx_bytes);
466 	atomic_long_inc(&session->stats.rx_packets);
467 	atomic_long_add(length, &session->stats.rx_bytes);
468 
469 	if (L2TP_SKB_CB(skb)->has_seq) {
470 		/* Bump our Nr */
471 		session->nr++;
472 		session->nr &= session->nr_max;
473 
474 		l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
475 			 session->name, session->nr);
476 	}
477 
478 	/* call private receive handler */
479 	if (session->recv_skb != NULL)
480 		(*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
481 	else
482 		kfree_skb(skb);
483 
484 	if (session->deref)
485 		(*session->deref)(session);
486 }
487 
488 /* Dequeue skbs from the session's reorder_q, subject to packet order.
489  * Skbs that have been in the queue for too long are simply discarded.
490  */
491 static void l2tp_recv_dequeue(struct l2tp_session *session)
492 {
493 	struct sk_buff *skb;
494 	struct sk_buff *tmp;
495 
496 	/* If the pkt at the head of the queue has the nr that we
497 	 * expect to send up next, dequeue it and any other
498 	 * in-sequence packets behind it.
499 	 */
500 start:
501 	spin_lock_bh(&session->reorder_q.lock);
502 	skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
503 		if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
504 			atomic_long_inc(&session->stats.rx_seq_discards);
505 			atomic_long_inc(&session->stats.rx_errors);
506 			l2tp_dbg(session, L2TP_MSG_SEQ,
507 				 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
508 				 session->name, L2TP_SKB_CB(skb)->ns,
509 				 L2TP_SKB_CB(skb)->length, session->nr,
510 				 skb_queue_len(&session->reorder_q));
511 			session->reorder_skip = 1;
512 			__skb_unlink(skb, &session->reorder_q);
513 			kfree_skb(skb);
514 			if (session->deref)
515 				(*session->deref)(session);
516 			continue;
517 		}
518 
519 		if (L2TP_SKB_CB(skb)->has_seq) {
520 			if (session->reorder_skip) {
521 				l2tp_dbg(session, L2TP_MSG_SEQ,
522 					 "%s: advancing nr to next pkt: %u -> %u",
523 					 session->name, session->nr,
524 					 L2TP_SKB_CB(skb)->ns);
525 				session->reorder_skip = 0;
526 				session->nr = L2TP_SKB_CB(skb)->ns;
527 			}
528 			if (L2TP_SKB_CB(skb)->ns != session->nr) {
529 				l2tp_dbg(session, L2TP_MSG_SEQ,
530 					 "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
531 					 session->name, L2TP_SKB_CB(skb)->ns,
532 					 L2TP_SKB_CB(skb)->length, session->nr,
533 					 skb_queue_len(&session->reorder_q));
534 				goto out;
535 			}
536 		}
537 		__skb_unlink(skb, &session->reorder_q);
538 
539 		/* Process the skb. We release the queue lock while we
540 		 * do so to let other contexts process the queue.
541 		 */
542 		spin_unlock_bh(&session->reorder_q.lock);
543 		l2tp_recv_dequeue_skb(session, skb);
544 		goto start;
545 	}
546 
547 out:
548 	spin_unlock_bh(&session->reorder_q.lock);
549 }
550 
551 static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
552 {
553 	u32 nws;
554 
555 	if (nr >= session->nr)
556 		nws = nr - session->nr;
557 	else
558 		nws = (session->nr_max + 1) - (session->nr - nr);
559 
560 	return nws < session->nr_window_size;
561 }
562 
563 /* If packet has sequence numbers, queue it if acceptable. Returns 0 if
564  * acceptable, else non-zero.
565  */
566 static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
567 {
568 	if (!l2tp_seq_check_rx_window(session, L2TP_SKB_CB(skb)->ns)) {
569 		/* Packet sequence number is outside allowed window.
570 		 * Discard it.
571 		 */
572 		l2tp_dbg(session, L2TP_MSG_SEQ,
573 			 "%s: pkt %u len %d discarded, outside window, nr=%u\n",
574 			 session->name, L2TP_SKB_CB(skb)->ns,
575 			 L2TP_SKB_CB(skb)->length, session->nr);
576 		goto discard;
577 	}
578 
579 	if (session->reorder_timeout != 0) {
580 		/* Packet reordering enabled. Add skb to session's
581 		 * reorder queue, in order of ns.
582 		 */
583 		l2tp_recv_queue_skb(session, skb);
584 		goto out;
585 	}
586 
587 	/* Packet reordering disabled. Discard out-of-sequence packets, while
588 	 * tracking the number if in-sequence packets after the first OOS packet
589 	 * is seen. After nr_oos_count_max in-sequence packets, reset the
590 	 * sequence number to re-enable packet reception.
591 	 */
592 	if (L2TP_SKB_CB(skb)->ns == session->nr) {
593 		skb_queue_tail(&session->reorder_q, skb);
594 	} else {
595 		u32 nr_oos = L2TP_SKB_CB(skb)->ns;
596 		u32 nr_next = (session->nr_oos + 1) & session->nr_max;
597 
598 		if (nr_oos == nr_next)
599 			session->nr_oos_count++;
600 		else
601 			session->nr_oos_count = 0;
602 
603 		session->nr_oos = nr_oos;
604 		if (session->nr_oos_count > session->nr_oos_count_max) {
605 			session->reorder_skip = 1;
606 			l2tp_dbg(session, L2TP_MSG_SEQ,
607 				 "%s: %d oos packets received. Resetting sequence numbers\n",
608 				 session->name, session->nr_oos_count);
609 		}
610 		if (!session->reorder_skip) {
611 			atomic_long_inc(&session->stats.rx_seq_discards);
612 			l2tp_dbg(session, L2TP_MSG_SEQ,
613 				 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
614 				 session->name, L2TP_SKB_CB(skb)->ns,
615 				 L2TP_SKB_CB(skb)->length, session->nr,
616 				 skb_queue_len(&session->reorder_q));
617 			goto discard;
618 		}
619 		skb_queue_tail(&session->reorder_q, skb);
620 	}
621 
622 out:
623 	return 0;
624 
625 discard:
626 	return 1;
627 }
628 
629 /* Do receive processing of L2TP data frames. We handle both L2TPv2
630  * and L2TPv3 data frames here.
631  *
632  * L2TPv2 Data Message Header
633  *
634  *  0                   1                   2                   3
635  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
636  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
637  * |T|L|x|x|S|x|O|P|x|x|x|x|  Ver  |          Length (opt)         |
638  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
639  * |           Tunnel ID           |           Session ID          |
640  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
641  * |             Ns (opt)          |             Nr (opt)          |
642  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
643  * |      Offset Size (opt)        |    Offset pad... (opt)
644  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
645  *
646  * Data frames are marked by T=0. All other fields are the same as
647  * those in L2TP control frames.
648  *
649  * L2TPv3 Data Message Header
650  *
651  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
652  * |                      L2TP Session Header                      |
653  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
654  * |                      L2-Specific Sublayer                     |
655  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
656  * |                        Tunnel Payload                      ...
657  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
658  *
659  * L2TPv3 Session Header Over IP
660  *
661  *  0                   1                   2                   3
662  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
663  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
664  * |                           Session ID                          |
665  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
666  * |               Cookie (optional, maximum 64 bits)...
667  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
668  *                                                                 |
669  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
670  *
671  * L2TPv3 L2-Specific Sublayer Format
672  *
673  *  0                   1                   2                   3
674  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
675  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
676  * |x|S|x|x|x|x|x|x|              Sequence Number                  |
677  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
678  *
679  * Cookie value, sublayer format and offset (pad) are negotiated with
680  * the peer when the session is set up. Unlike L2TPv2, we do not need
681  * to parse the packet header to determine if optional fields are
682  * present.
683  *
684  * Caller must already have parsed the frame and determined that it is
685  * a data (not control) frame before coming here. Fields up to the
686  * session-id have already been parsed and ptr points to the data
687  * after the session-id.
688  *
689  * session->ref() must have been called prior to l2tp_recv_common().
690  * session->deref() will be called automatically after skb is processed.
691  */
692 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
693 		      unsigned char *ptr, unsigned char *optr, u16 hdrflags,
694 		      int length, int (*payload_hook)(struct sk_buff *skb))
695 {
696 	struct l2tp_tunnel *tunnel = session->tunnel;
697 	int offset;
698 	u32 ns, nr;
699 
700 	/* Parse and check optional cookie */
701 	if (session->peer_cookie_len > 0) {
702 		if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
703 			l2tp_info(tunnel, L2TP_MSG_DATA,
704 				  "%s: cookie mismatch (%u/%u). Discarding.\n",
705 				  tunnel->name, tunnel->tunnel_id,
706 				  session->session_id);
707 			atomic_long_inc(&session->stats.rx_cookie_discards);
708 			goto discard;
709 		}
710 		ptr += session->peer_cookie_len;
711 	}
712 
713 	/* Handle the optional sequence numbers. Sequence numbers are
714 	 * in different places for L2TPv2 and L2TPv3.
715 	 *
716 	 * If we are the LAC, enable/disable sequence numbers under
717 	 * the control of the LNS.  If no sequence numbers present but
718 	 * we were expecting them, discard frame.
719 	 */
720 	ns = nr = 0;
721 	L2TP_SKB_CB(skb)->has_seq = 0;
722 	if (tunnel->version == L2TP_HDR_VER_2) {
723 		if (hdrflags & L2TP_HDRFLAG_S) {
724 			ns = ntohs(*(__be16 *) ptr);
725 			ptr += 2;
726 			nr = ntohs(*(__be16 *) ptr);
727 			ptr += 2;
728 
729 			/* Store L2TP info in the skb */
730 			L2TP_SKB_CB(skb)->ns = ns;
731 			L2TP_SKB_CB(skb)->has_seq = 1;
732 
733 			l2tp_dbg(session, L2TP_MSG_SEQ,
734 				 "%s: recv data ns=%u, nr=%u, session nr=%u\n",
735 				 session->name, ns, nr, session->nr);
736 		}
737 	} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
738 		u32 l2h = ntohl(*(__be32 *) ptr);
739 
740 		if (l2h & 0x40000000) {
741 			ns = l2h & 0x00ffffff;
742 
743 			/* Store L2TP info in the skb */
744 			L2TP_SKB_CB(skb)->ns = ns;
745 			L2TP_SKB_CB(skb)->has_seq = 1;
746 
747 			l2tp_dbg(session, L2TP_MSG_SEQ,
748 				 "%s: recv data ns=%u, session nr=%u\n",
749 				 session->name, ns, session->nr);
750 		}
751 	}
752 
753 	/* Advance past L2-specific header, if present */
754 	ptr += session->l2specific_len;
755 
756 	if (L2TP_SKB_CB(skb)->has_seq) {
757 		/* Received a packet with sequence numbers. If we're the LNS,
758 		 * check if we sre sending sequence numbers and if not,
759 		 * configure it so.
760 		 */
761 		if ((!session->lns_mode) && (!session->send_seq)) {
762 			l2tp_info(session, L2TP_MSG_SEQ,
763 				  "%s: requested to enable seq numbers by LNS\n",
764 				  session->name);
765 			session->send_seq = 1;
766 			l2tp_session_set_header_len(session, tunnel->version);
767 		}
768 	} else {
769 		/* No sequence numbers.
770 		 * If user has configured mandatory sequence numbers, discard.
771 		 */
772 		if (session->recv_seq) {
773 			l2tp_warn(session, L2TP_MSG_SEQ,
774 				  "%s: recv data has no seq numbers when required. Discarding.\n",
775 				  session->name);
776 			atomic_long_inc(&session->stats.rx_seq_discards);
777 			goto discard;
778 		}
779 
780 		/* If we're the LAC and we're sending sequence numbers, the
781 		 * LNS has requested that we no longer send sequence numbers.
782 		 * If we're the LNS and we're sending sequence numbers, the
783 		 * LAC is broken. Discard the frame.
784 		 */
785 		if ((!session->lns_mode) && (session->send_seq)) {
786 			l2tp_info(session, L2TP_MSG_SEQ,
787 				  "%s: requested to disable seq numbers by LNS\n",
788 				  session->name);
789 			session->send_seq = 0;
790 			l2tp_session_set_header_len(session, tunnel->version);
791 		} else if (session->send_seq) {
792 			l2tp_warn(session, L2TP_MSG_SEQ,
793 				  "%s: recv data has no seq numbers when required. Discarding.\n",
794 				  session->name);
795 			atomic_long_inc(&session->stats.rx_seq_discards);
796 			goto discard;
797 		}
798 	}
799 
800 	/* Session data offset is handled differently for L2TPv2 and
801 	 * L2TPv3. For L2TPv2, there is an optional 16-bit value in
802 	 * the header. For L2TPv3, the offset is negotiated using AVPs
803 	 * in the session setup control protocol.
804 	 */
805 	if (tunnel->version == L2TP_HDR_VER_2) {
806 		/* If offset bit set, skip it. */
807 		if (hdrflags & L2TP_HDRFLAG_O) {
808 			offset = ntohs(*(__be16 *)ptr);
809 			ptr += 2 + offset;
810 		}
811 	} else
812 		ptr += session->offset;
813 
814 	offset = ptr - optr;
815 	if (!pskb_may_pull(skb, offset))
816 		goto discard;
817 
818 	__skb_pull(skb, offset);
819 
820 	/* If caller wants to process the payload before we queue the
821 	 * packet, do so now.
822 	 */
823 	if (payload_hook)
824 		if ((*payload_hook)(skb))
825 			goto discard;
826 
827 	/* Prepare skb for adding to the session's reorder_q.  Hold
828 	 * packets for max reorder_timeout or 1 second if not
829 	 * reordering.
830 	 */
831 	L2TP_SKB_CB(skb)->length = length;
832 	L2TP_SKB_CB(skb)->expires = jiffies +
833 		(session->reorder_timeout ? session->reorder_timeout : HZ);
834 
835 	/* Add packet to the session's receive queue. Reordering is done here, if
836 	 * enabled. Saved L2TP protocol info is stored in skb->sb[].
837 	 */
838 	if (L2TP_SKB_CB(skb)->has_seq) {
839 		if (l2tp_recv_data_seq(session, skb))
840 			goto discard;
841 	} else {
842 		/* No sequence numbers. Add the skb to the tail of the
843 		 * reorder queue. This ensures that it will be
844 		 * delivered after all previous sequenced skbs.
845 		 */
846 		skb_queue_tail(&session->reorder_q, skb);
847 	}
848 
849 	/* Try to dequeue as many skbs from reorder_q as we can. */
850 	l2tp_recv_dequeue(session);
851 
852 	return;
853 
854 discard:
855 	atomic_long_inc(&session->stats.rx_errors);
856 	kfree_skb(skb);
857 
858 	if (session->deref)
859 		(*session->deref)(session);
860 }
861 EXPORT_SYMBOL(l2tp_recv_common);
862 
863 /* Drop skbs from the session's reorder_q
864  */
865 int l2tp_session_queue_purge(struct l2tp_session *session)
866 {
867 	struct sk_buff *skb = NULL;
868 	BUG_ON(!session);
869 	BUG_ON(session->magic != L2TP_SESSION_MAGIC);
870 	while ((skb = skb_dequeue(&session->reorder_q))) {
871 		atomic_long_inc(&session->stats.rx_errors);
872 		kfree_skb(skb);
873 		if (session->deref)
874 			(*session->deref)(session);
875 	}
876 	return 0;
877 }
878 EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
879 
880 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
881  * here. The skb is not on a list when we get here.
882  * Returns 0 if the packet was a data packet and was successfully passed on.
883  * Returns 1 if the packet was not a good data packet and could not be
884  * forwarded.  All such packets are passed up to userspace to deal with.
885  */
886 static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
887 			      int (*payload_hook)(struct sk_buff *skb))
888 {
889 	struct l2tp_session *session = NULL;
890 	unsigned char *ptr, *optr;
891 	u16 hdrflags;
892 	u32 tunnel_id, session_id;
893 	u16 version;
894 	int length;
895 
896 	/* UDP has verifed checksum */
897 
898 	/* UDP always verifies the packet length. */
899 	__skb_pull(skb, sizeof(struct udphdr));
900 
901 	/* Short packet? */
902 	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
903 		l2tp_info(tunnel, L2TP_MSG_DATA,
904 			  "%s: recv short packet (len=%d)\n",
905 			  tunnel->name, skb->len);
906 		goto error;
907 	}
908 
909 	/* Trace packet contents, if enabled */
910 	if (tunnel->debug & L2TP_MSG_DATA) {
911 		length = min(32u, skb->len);
912 		if (!pskb_may_pull(skb, length))
913 			goto error;
914 
915 		pr_debug("%s: recv\n", tunnel->name);
916 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
917 	}
918 
919 	/* Point to L2TP header */
920 	optr = ptr = skb->data;
921 
922 	/* Get L2TP header flags */
923 	hdrflags = ntohs(*(__be16 *) ptr);
924 
925 	/* Check protocol version */
926 	version = hdrflags & L2TP_HDR_VER_MASK;
927 	if (version != tunnel->version) {
928 		l2tp_info(tunnel, L2TP_MSG_DATA,
929 			  "%s: recv protocol version mismatch: got %d expected %d\n",
930 			  tunnel->name, version, tunnel->version);
931 		goto error;
932 	}
933 
934 	/* Get length of L2TP packet */
935 	length = skb->len;
936 
937 	/* If type is control packet, it is handled by userspace. */
938 	if (hdrflags & L2TP_HDRFLAG_T) {
939 		l2tp_dbg(tunnel, L2TP_MSG_DATA,
940 			 "%s: recv control packet, len=%d\n",
941 			 tunnel->name, length);
942 		goto error;
943 	}
944 
945 	/* Skip flags */
946 	ptr += 2;
947 
948 	if (tunnel->version == L2TP_HDR_VER_2) {
949 		/* If length is present, skip it */
950 		if (hdrflags & L2TP_HDRFLAG_L)
951 			ptr += 2;
952 
953 		/* Extract tunnel and session ID */
954 		tunnel_id = ntohs(*(__be16 *) ptr);
955 		ptr += 2;
956 		session_id = ntohs(*(__be16 *) ptr);
957 		ptr += 2;
958 	} else {
959 		ptr += 2;	/* skip reserved bits */
960 		tunnel_id = tunnel->tunnel_id;
961 		session_id = ntohl(*(__be32 *) ptr);
962 		ptr += 4;
963 	}
964 
965 	/* Find the session context */
966 	session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
967 	if (!session || !session->recv_skb) {
968 		if (session) {
969 			if (session->deref)
970 				session->deref(session);
971 			l2tp_session_dec_refcount(session);
972 		}
973 
974 		/* Not found? Pass to userspace to deal with */
975 		l2tp_info(tunnel, L2TP_MSG_DATA,
976 			  "%s: no session found (%u/%u). Passing up.\n",
977 			  tunnel->name, tunnel_id, session_id);
978 		goto error;
979 	}
980 
981 	l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
982 	l2tp_session_dec_refcount(session);
983 
984 	return 0;
985 
986 error:
987 	/* Put UDP header back */
988 	__skb_push(skb, sizeof(struct udphdr));
989 
990 	return 1;
991 }
992 
993 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
994  * Return codes:
995  * 0 : success.
996  * <0: error
997  * >0: skb should be passed up to userspace as UDP.
998  */
999 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1000 {
1001 	struct l2tp_tunnel *tunnel;
1002 
1003 	tunnel = l2tp_sock_to_tunnel(sk);
1004 	if (tunnel == NULL)
1005 		goto pass_up;
1006 
1007 	l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
1008 		 tunnel->name, skb->len);
1009 
1010 	if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
1011 		goto pass_up_put;
1012 
1013 	sock_put(sk);
1014 	return 0;
1015 
1016 pass_up_put:
1017 	sock_put(sk);
1018 pass_up:
1019 	return 1;
1020 }
1021 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
1022 
1023 /************************************************************************
1024  * Transmit handling
1025  ***********************************************************************/
1026 
1027 /* Build an L2TP header for the session into the buffer provided.
1028  */
1029 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
1030 {
1031 	struct l2tp_tunnel *tunnel = session->tunnel;
1032 	__be16 *bufp = buf;
1033 	__be16 *optr = buf;
1034 	u16 flags = L2TP_HDR_VER_2;
1035 	u32 tunnel_id = tunnel->peer_tunnel_id;
1036 	u32 session_id = session->peer_session_id;
1037 
1038 	if (session->send_seq)
1039 		flags |= L2TP_HDRFLAG_S;
1040 
1041 	/* Setup L2TP header. */
1042 	*bufp++ = htons(flags);
1043 	*bufp++ = htons(tunnel_id);
1044 	*bufp++ = htons(session_id);
1045 	if (session->send_seq) {
1046 		*bufp++ = htons(session->ns);
1047 		*bufp++ = 0;
1048 		session->ns++;
1049 		session->ns &= 0xffff;
1050 		l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
1051 			 session->name, session->ns);
1052 	}
1053 
1054 	return bufp - optr;
1055 }
1056 
1057 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1058 {
1059 	struct l2tp_tunnel *tunnel = session->tunnel;
1060 	char *bufp = buf;
1061 	char *optr = bufp;
1062 
1063 	/* Setup L2TP header. The header differs slightly for UDP and
1064 	 * IP encapsulations. For UDP, there is 4 bytes of flags.
1065 	 */
1066 	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1067 		u16 flags = L2TP_HDR_VER_3;
1068 		*((__be16 *) bufp) = htons(flags);
1069 		bufp += 2;
1070 		*((__be16 *) bufp) = 0;
1071 		bufp += 2;
1072 	}
1073 
1074 	*((__be32 *) bufp) = htonl(session->peer_session_id);
1075 	bufp += 4;
1076 	if (session->cookie_len) {
1077 		memcpy(bufp, &session->cookie[0], session->cookie_len);
1078 		bufp += session->cookie_len;
1079 	}
1080 	if (session->l2specific_len) {
1081 		if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1082 			u32 l2h = 0;
1083 			if (session->send_seq) {
1084 				l2h = 0x40000000 | session->ns;
1085 				session->ns++;
1086 				session->ns &= 0xffffff;
1087 				l2tp_dbg(session, L2TP_MSG_SEQ,
1088 					 "%s: updated ns to %u\n",
1089 					 session->name, session->ns);
1090 			}
1091 
1092 			*((__be32 *) bufp) = htonl(l2h);
1093 		}
1094 		bufp += session->l2specific_len;
1095 	}
1096 	if (session->offset)
1097 		bufp += session->offset;
1098 
1099 	return bufp - optr;
1100 }
1101 
1102 static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1103 			  struct flowi *fl, size_t data_len)
1104 {
1105 	struct l2tp_tunnel *tunnel = session->tunnel;
1106 	unsigned int len = skb->len;
1107 	int error;
1108 
1109 	/* Debug */
1110 	if (session->send_seq)
1111 		l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %zd bytes, ns=%u\n",
1112 			 session->name, data_len, session->ns - 1);
1113 	else
1114 		l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %zd bytes\n",
1115 			 session->name, data_len);
1116 
1117 	if (session->debug & L2TP_MSG_DATA) {
1118 		int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1119 		unsigned char *datap = skb->data + uhlen;
1120 
1121 		pr_debug("%s: xmit\n", session->name);
1122 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
1123 				     datap, min_t(size_t, 32, len - uhlen));
1124 	}
1125 
1126 	/* Queue the packet to IP for output */
1127 	skb->ignore_df = 1;
1128 #if IS_ENABLED(CONFIG_IPV6)
1129 	if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
1130 		error = inet6_csk_xmit(tunnel->sock, skb, NULL);
1131 	else
1132 #endif
1133 		error = ip_queue_xmit(tunnel->sock, skb, fl);
1134 
1135 	/* Update stats */
1136 	if (error >= 0) {
1137 		atomic_long_inc(&tunnel->stats.tx_packets);
1138 		atomic_long_add(len, &tunnel->stats.tx_bytes);
1139 		atomic_long_inc(&session->stats.tx_packets);
1140 		atomic_long_add(len, &session->stats.tx_bytes);
1141 	} else {
1142 		atomic_long_inc(&tunnel->stats.tx_errors);
1143 		atomic_long_inc(&session->stats.tx_errors);
1144 	}
1145 
1146 	return 0;
1147 }
1148 
1149 /* If caller requires the skb to have a ppp header, the header must be
1150  * inserted in the skb data before calling this function.
1151  */
1152 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
1153 {
1154 	int data_len = skb->len;
1155 	struct l2tp_tunnel *tunnel = session->tunnel;
1156 	struct sock *sk = tunnel->sock;
1157 	struct flowi *fl;
1158 	struct udphdr *uh;
1159 	struct inet_sock *inet;
1160 	int headroom;
1161 	int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1162 	int udp_len;
1163 	int ret = NET_XMIT_SUCCESS;
1164 
1165 	/* Check that there's enough headroom in the skb to insert IP,
1166 	 * UDP and L2TP headers. If not enough, expand it to
1167 	 * make room. Adjust truesize.
1168 	 */
1169 	headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1170 		uhlen + hdr_len;
1171 	if (skb_cow_head(skb, headroom)) {
1172 		kfree_skb(skb);
1173 		return NET_XMIT_DROP;
1174 	}
1175 
1176 	/* Setup L2TP header */
1177 	session->build_header(session, __skb_push(skb, hdr_len));
1178 
1179 	/* Reset skb netfilter state */
1180 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1181 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1182 			      IPSKB_REROUTED);
1183 	nf_reset(skb);
1184 
1185 	bh_lock_sock(sk);
1186 	if (sock_owned_by_user(sk)) {
1187 		kfree_skb(skb);
1188 		ret = NET_XMIT_DROP;
1189 		goto out_unlock;
1190 	}
1191 
1192 	/* Get routing info from the tunnel socket */
1193 	skb_dst_drop(skb);
1194 	skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
1195 
1196 	inet = inet_sk(sk);
1197 	fl = &inet->cork.fl;
1198 	switch (tunnel->encap) {
1199 	case L2TP_ENCAPTYPE_UDP:
1200 		/* Setup UDP header */
1201 		__skb_push(skb, sizeof(*uh));
1202 		skb_reset_transport_header(skb);
1203 		uh = udp_hdr(skb);
1204 		uh->source = inet->inet_sport;
1205 		uh->dest = inet->inet_dport;
1206 		udp_len = uhlen + hdr_len + data_len;
1207 		uh->len = htons(udp_len);
1208 
1209 		/* Calculate UDP checksum if configured to do so */
1210 #if IS_ENABLED(CONFIG_IPV6)
1211 		if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1212 			udp6_set_csum(udp_get_no_check6_tx(sk),
1213 				      skb, &inet6_sk(sk)->saddr,
1214 				      &sk->sk_v6_daddr, udp_len);
1215 		else
1216 #endif
1217 		udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1218 			     inet->inet_daddr, udp_len);
1219 		break;
1220 
1221 	case L2TP_ENCAPTYPE_IP:
1222 		break;
1223 	}
1224 
1225 	l2tp_xmit_core(session, skb, fl, data_len);
1226 out_unlock:
1227 	bh_unlock_sock(sk);
1228 
1229 	return ret;
1230 }
1231 EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1232 
1233 /*****************************************************************************
1234  * Tinnel and session create/destroy.
1235  *****************************************************************************/
1236 
1237 /* Tunnel socket destruct hook.
1238  * The tunnel context is deleted only when all session sockets have been
1239  * closed.
1240  */
1241 static void l2tp_tunnel_destruct(struct sock *sk)
1242 {
1243 	struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1244 	struct l2tp_net *pn;
1245 
1246 	if (tunnel == NULL)
1247 		goto end;
1248 
1249 	l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1250 
1251 
1252 	/* Disable udp encapsulation */
1253 	switch (tunnel->encap) {
1254 	case L2TP_ENCAPTYPE_UDP:
1255 		/* No longer an encapsulation socket. See net/ipv4/udp.c */
1256 		(udp_sk(sk))->encap_type = 0;
1257 		(udp_sk(sk))->encap_rcv = NULL;
1258 		(udp_sk(sk))->encap_destroy = NULL;
1259 		break;
1260 	case L2TP_ENCAPTYPE_IP:
1261 		break;
1262 	}
1263 
1264 	/* Remove hooks into tunnel socket */
1265 	sk->sk_destruct = tunnel->old_sk_destruct;
1266 	sk->sk_user_data = NULL;
1267 	tunnel->sock = NULL;
1268 
1269 	/* Remove the tunnel struct from the tunnel list */
1270 	pn = l2tp_pernet(tunnel->l2tp_net);
1271 	spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1272 	list_del_rcu(&tunnel->list);
1273 	spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1274 	atomic_dec(&l2tp_tunnel_count);
1275 
1276 	l2tp_tunnel_closeall(tunnel);
1277 	l2tp_tunnel_dec_refcount(tunnel);
1278 
1279 	/* Call the original destructor */
1280 	if (sk->sk_destruct)
1281 		(*sk->sk_destruct)(sk);
1282 end:
1283 	return;
1284 }
1285 
1286 /* When the tunnel is closed, all the attached sessions need to go too.
1287  */
1288 void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1289 {
1290 	int hash;
1291 	struct hlist_node *walk;
1292 	struct hlist_node *tmp;
1293 	struct l2tp_session *session;
1294 
1295 	BUG_ON(tunnel == NULL);
1296 
1297 	l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
1298 		  tunnel->name);
1299 
1300 	write_lock_bh(&tunnel->hlist_lock);
1301 	for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1302 again:
1303 		hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1304 			session = hlist_entry(walk, struct l2tp_session, hlist);
1305 
1306 			l2tp_info(session, L2TP_MSG_CONTROL,
1307 				  "%s: closing session\n", session->name);
1308 
1309 			hlist_del_init(&session->hlist);
1310 
1311 			if (session->ref != NULL)
1312 				(*session->ref)(session);
1313 
1314 			write_unlock_bh(&tunnel->hlist_lock);
1315 
1316 			__l2tp_session_unhash(session);
1317 			l2tp_session_queue_purge(session);
1318 
1319 			if (session->session_close != NULL)
1320 				(*session->session_close)(session);
1321 
1322 			if (session->deref != NULL)
1323 				(*session->deref)(session);
1324 
1325 			l2tp_session_dec_refcount(session);
1326 
1327 			write_lock_bh(&tunnel->hlist_lock);
1328 
1329 			/* Now restart from the beginning of this hash
1330 			 * chain.  We always remove a session from the
1331 			 * list so we are guaranteed to make forward
1332 			 * progress.
1333 			 */
1334 			goto again;
1335 		}
1336 	}
1337 	write_unlock_bh(&tunnel->hlist_lock);
1338 }
1339 EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1340 
1341 /* Tunnel socket destroy hook for UDP encapsulation */
1342 static void l2tp_udp_encap_destroy(struct sock *sk)
1343 {
1344 	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
1345 	if (tunnel) {
1346 		l2tp_tunnel_closeall(tunnel);
1347 		sock_put(sk);
1348 	}
1349 }
1350 
1351 /* Really kill the tunnel.
1352  * Come here only when all sessions have been cleared from the tunnel.
1353  */
1354 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1355 {
1356 	BUG_ON(refcount_read(&tunnel->ref_count) != 0);
1357 	BUG_ON(tunnel->sock != NULL);
1358 	l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
1359 	kfree_rcu(tunnel, rcu);
1360 }
1361 
1362 /* Workqueue tunnel deletion function */
1363 static void l2tp_tunnel_del_work(struct work_struct *work)
1364 {
1365 	struct l2tp_tunnel *tunnel = NULL;
1366 	struct socket *sock = NULL;
1367 	struct sock *sk = NULL;
1368 
1369 	tunnel = container_of(work, struct l2tp_tunnel, del_work);
1370 
1371 	l2tp_tunnel_closeall(tunnel);
1372 
1373 	sk = l2tp_tunnel_sock_lookup(tunnel);
1374 	if (!sk)
1375 		goto out;
1376 
1377 	sock = sk->sk_socket;
1378 
1379 	/* If the tunnel socket was created by userspace, then go through the
1380 	 * inet layer to shut the socket down, and let userspace close it.
1381 	 * Otherwise, if we created the socket directly within the kernel, use
1382 	 * the sk API to release it here.
1383 	 * In either case the tunnel resources are freed in the socket
1384 	 * destructor when the tunnel socket goes away.
1385 	 */
1386 	if (tunnel->fd >= 0) {
1387 		if (sock)
1388 			inet_shutdown(sock, 2);
1389 	} else {
1390 		if (sock) {
1391 			kernel_sock_shutdown(sock, SHUT_RDWR);
1392 			sock_release(sock);
1393 		}
1394 	}
1395 
1396 	l2tp_tunnel_sock_put(sk);
1397 out:
1398 	l2tp_tunnel_dec_refcount(tunnel);
1399 }
1400 
1401 /* Create a socket for the tunnel, if one isn't set up by
1402  * userspace. This is used for static tunnels where there is no
1403  * managing L2TP daemon.
1404  *
1405  * Since we don't want these sockets to keep a namespace alive by
1406  * themselves, we drop the socket's namespace refcount after creation.
1407  * These sockets are freed when the namespace exits using the pernet
1408  * exit hook.
1409  */
1410 static int l2tp_tunnel_sock_create(struct net *net,
1411 				u32 tunnel_id,
1412 				u32 peer_tunnel_id,
1413 				struct l2tp_tunnel_cfg *cfg,
1414 				struct socket **sockp)
1415 {
1416 	int err = -EINVAL;
1417 	struct socket *sock = NULL;
1418 	struct udp_port_cfg udp_conf;
1419 
1420 	switch (cfg->encap) {
1421 	case L2TP_ENCAPTYPE_UDP:
1422 		memset(&udp_conf, 0, sizeof(udp_conf));
1423 
1424 #if IS_ENABLED(CONFIG_IPV6)
1425 		if (cfg->local_ip6 && cfg->peer_ip6) {
1426 			udp_conf.family = AF_INET6;
1427 			memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1428 			       sizeof(udp_conf.local_ip6));
1429 			memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1430 			       sizeof(udp_conf.peer_ip6));
1431 			udp_conf.use_udp6_tx_checksums =
1432 			  ! cfg->udp6_zero_tx_checksums;
1433 			udp_conf.use_udp6_rx_checksums =
1434 			  ! cfg->udp6_zero_rx_checksums;
1435 		} else
1436 #endif
1437 		{
1438 			udp_conf.family = AF_INET;
1439 			udp_conf.local_ip = cfg->local_ip;
1440 			udp_conf.peer_ip = cfg->peer_ip;
1441 			udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1442 		}
1443 
1444 		udp_conf.local_udp_port = htons(cfg->local_udp_port);
1445 		udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1446 
1447 		err = udp_sock_create(net, &udp_conf, &sock);
1448 		if (err < 0)
1449 			goto out;
1450 
1451 		break;
1452 
1453 	case L2TP_ENCAPTYPE_IP:
1454 #if IS_ENABLED(CONFIG_IPV6)
1455 		if (cfg->local_ip6 && cfg->peer_ip6) {
1456 			struct sockaddr_l2tpip6 ip6_addr = {0};
1457 
1458 			err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1459 					  IPPROTO_L2TP, &sock);
1460 			if (err < 0)
1461 				goto out;
1462 
1463 			ip6_addr.l2tp_family = AF_INET6;
1464 			memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1465 			       sizeof(ip6_addr.l2tp_addr));
1466 			ip6_addr.l2tp_conn_id = tunnel_id;
1467 			err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
1468 					  sizeof(ip6_addr));
1469 			if (err < 0)
1470 				goto out;
1471 
1472 			ip6_addr.l2tp_family = AF_INET6;
1473 			memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1474 			       sizeof(ip6_addr.l2tp_addr));
1475 			ip6_addr.l2tp_conn_id = peer_tunnel_id;
1476 			err = kernel_connect(sock,
1477 					     (struct sockaddr *) &ip6_addr,
1478 					     sizeof(ip6_addr), 0);
1479 			if (err < 0)
1480 				goto out;
1481 		} else
1482 #endif
1483 		{
1484 			struct sockaddr_l2tpip ip_addr = {0};
1485 
1486 			err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1487 					  IPPROTO_L2TP, &sock);
1488 			if (err < 0)
1489 				goto out;
1490 
1491 			ip_addr.l2tp_family = AF_INET;
1492 			ip_addr.l2tp_addr = cfg->local_ip;
1493 			ip_addr.l2tp_conn_id = tunnel_id;
1494 			err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
1495 					  sizeof(ip_addr));
1496 			if (err < 0)
1497 				goto out;
1498 
1499 			ip_addr.l2tp_family = AF_INET;
1500 			ip_addr.l2tp_addr = cfg->peer_ip;
1501 			ip_addr.l2tp_conn_id = peer_tunnel_id;
1502 			err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
1503 					     sizeof(ip_addr), 0);
1504 			if (err < 0)
1505 				goto out;
1506 		}
1507 		break;
1508 
1509 	default:
1510 		goto out;
1511 	}
1512 
1513 out:
1514 	*sockp = sock;
1515 	if ((err < 0) && sock) {
1516 		kernel_sock_shutdown(sock, SHUT_RDWR);
1517 		sock_release(sock);
1518 		*sockp = NULL;
1519 	}
1520 
1521 	return err;
1522 }
1523 
1524 static struct lock_class_key l2tp_socket_class;
1525 
1526 int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1527 {
1528 	struct l2tp_tunnel *tunnel = NULL;
1529 	int err;
1530 	struct socket *sock = NULL;
1531 	struct sock *sk = NULL;
1532 	struct l2tp_net *pn;
1533 	enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1534 
1535 	/* Get the tunnel socket from the fd, which was opened by
1536 	 * the userspace L2TP daemon. If not specified, create a
1537 	 * kernel socket.
1538 	 */
1539 	if (fd < 0) {
1540 		err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
1541 				cfg, &sock);
1542 		if (err < 0)
1543 			goto err;
1544 	} else {
1545 		sock = sockfd_lookup(fd, &err);
1546 		if (!sock) {
1547 			pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
1548 			       tunnel_id, fd, err);
1549 			err = -EBADF;
1550 			goto err;
1551 		}
1552 
1553 		/* Reject namespace mismatches */
1554 		if (!net_eq(sock_net(sock->sk), net)) {
1555 			pr_err("tunl %u: netns mismatch\n", tunnel_id);
1556 			err = -EINVAL;
1557 			goto err;
1558 		}
1559 	}
1560 
1561 	sk = sock->sk;
1562 
1563 	if (cfg != NULL)
1564 		encap = cfg->encap;
1565 
1566 	/* Quick sanity checks */
1567 	switch (encap) {
1568 	case L2TP_ENCAPTYPE_UDP:
1569 		err = -EPROTONOSUPPORT;
1570 		if (sk->sk_protocol != IPPROTO_UDP) {
1571 			pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1572 			       tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1573 			goto err;
1574 		}
1575 		break;
1576 	case L2TP_ENCAPTYPE_IP:
1577 		err = -EPROTONOSUPPORT;
1578 		if (sk->sk_protocol != IPPROTO_L2TP) {
1579 			pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1580 			       tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1581 			goto err;
1582 		}
1583 		break;
1584 	}
1585 
1586 	/* Check if this socket has already been prepped */
1587 	tunnel = l2tp_tunnel(sk);
1588 	if (tunnel != NULL) {
1589 		/* This socket has already been prepped */
1590 		err = -EBUSY;
1591 		goto err;
1592 	}
1593 
1594 	tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1595 	if (tunnel == NULL) {
1596 		err = -ENOMEM;
1597 		goto err;
1598 	}
1599 
1600 	tunnel->version = version;
1601 	tunnel->tunnel_id = tunnel_id;
1602 	tunnel->peer_tunnel_id = peer_tunnel_id;
1603 	tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1604 
1605 	tunnel->magic = L2TP_TUNNEL_MAGIC;
1606 	sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1607 	rwlock_init(&tunnel->hlist_lock);
1608 
1609 	/* The net we belong to */
1610 	tunnel->l2tp_net = net;
1611 	pn = l2tp_pernet(net);
1612 
1613 	if (cfg != NULL)
1614 		tunnel->debug = cfg->debug;
1615 
1616 #if IS_ENABLED(CONFIG_IPV6)
1617 	if (sk->sk_family == PF_INET6) {
1618 		struct ipv6_pinfo *np = inet6_sk(sk);
1619 
1620 		if (ipv6_addr_v4mapped(&np->saddr) &&
1621 		    ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
1622 			struct inet_sock *inet = inet_sk(sk);
1623 
1624 			tunnel->v4mapped = true;
1625 			inet->inet_saddr = np->saddr.s6_addr32[3];
1626 			inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
1627 			inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
1628 		} else {
1629 			tunnel->v4mapped = false;
1630 		}
1631 	}
1632 #endif
1633 
1634 	/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1635 	tunnel->encap = encap;
1636 	if (encap == L2TP_ENCAPTYPE_UDP) {
1637 		struct udp_tunnel_sock_cfg udp_cfg = { };
1638 
1639 		udp_cfg.sk_user_data = tunnel;
1640 		udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
1641 		udp_cfg.encap_rcv = l2tp_udp_encap_recv;
1642 		udp_cfg.encap_destroy = l2tp_udp_encap_destroy;
1643 
1644 		setup_udp_tunnel_sock(net, sock, &udp_cfg);
1645 	} else {
1646 		sk->sk_user_data = tunnel;
1647 	}
1648 
1649 	/* Hook on the tunnel socket destructor so that we can cleanup
1650 	 * if the tunnel socket goes away.
1651 	 */
1652 	tunnel->old_sk_destruct = sk->sk_destruct;
1653 	sk->sk_destruct = &l2tp_tunnel_destruct;
1654 	tunnel->sock = sk;
1655 	tunnel->fd = fd;
1656 	lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1657 
1658 	sk->sk_allocation = GFP_ATOMIC;
1659 
1660 	/* Init delete workqueue struct */
1661 	INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1662 
1663 	/* Add tunnel to our list */
1664 	INIT_LIST_HEAD(&tunnel->list);
1665 	atomic_inc(&l2tp_tunnel_count);
1666 
1667 	/* Bump the reference count. The tunnel context is deleted
1668 	 * only when this drops to zero. Must be done before list insertion
1669 	 */
1670 	refcount_set(&tunnel->ref_count, 1);
1671 	spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1672 	list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1673 	spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1674 
1675 	err = 0;
1676 err:
1677 	if (tunnelp)
1678 		*tunnelp = tunnel;
1679 
1680 	/* If tunnel's socket was created by the kernel, it doesn't
1681 	 *  have a file.
1682 	 */
1683 	if (sock && sock->file)
1684 		sockfd_put(sock);
1685 
1686 	return err;
1687 }
1688 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1689 
1690 /* This function is used by the netlink TUNNEL_DELETE command.
1691  */
1692 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1693 {
1694 	l2tp_tunnel_inc_refcount(tunnel);
1695 	if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
1696 		l2tp_tunnel_dec_refcount(tunnel);
1697 		return 1;
1698 	}
1699 	return 0;
1700 }
1701 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1702 
1703 /* Really kill the session.
1704  */
1705 void l2tp_session_free(struct l2tp_session *session)
1706 {
1707 	struct l2tp_tunnel *tunnel = session->tunnel;
1708 
1709 	BUG_ON(refcount_read(&session->ref_count) != 0);
1710 
1711 	if (tunnel) {
1712 		BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1713 		if (session->session_id != 0)
1714 			atomic_dec(&l2tp_session_count);
1715 		sock_put(tunnel->sock);
1716 		session->tunnel = NULL;
1717 		l2tp_tunnel_dec_refcount(tunnel);
1718 	}
1719 
1720 	kfree(session);
1721 }
1722 EXPORT_SYMBOL_GPL(l2tp_session_free);
1723 
1724 /* Remove an l2tp session from l2tp_core's hash lists.
1725  * Provides a tidyup interface for pseudowire code which can't just route all
1726  * shutdown via. l2tp_session_delete and a pseudowire-specific session_close
1727  * callback.
1728  */
1729 void __l2tp_session_unhash(struct l2tp_session *session)
1730 {
1731 	struct l2tp_tunnel *tunnel = session->tunnel;
1732 
1733 	/* Remove the session from core hashes */
1734 	if (tunnel) {
1735 		/* Remove from the per-tunnel hash */
1736 		write_lock_bh(&tunnel->hlist_lock);
1737 		hlist_del_init(&session->hlist);
1738 		write_unlock_bh(&tunnel->hlist_lock);
1739 
1740 		/* For L2TPv3 we have a per-net hash: remove from there, too */
1741 		if (tunnel->version != L2TP_HDR_VER_2) {
1742 			struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1743 			spin_lock_bh(&pn->l2tp_session_hlist_lock);
1744 			hlist_del_init_rcu(&session->global_hlist);
1745 			spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1746 			synchronize_rcu();
1747 		}
1748 	}
1749 }
1750 EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
1751 
1752 /* This function is used by the netlink SESSION_DELETE command and by
1753    pseudowire modules.
1754  */
1755 int l2tp_session_delete(struct l2tp_session *session)
1756 {
1757 	if (session->ref)
1758 		(*session->ref)(session);
1759 	__l2tp_session_unhash(session);
1760 	l2tp_session_queue_purge(session);
1761 	if (session->session_close != NULL)
1762 		(*session->session_close)(session);
1763 	if (session->deref)
1764 		(*session->deref)(session);
1765 	l2tp_session_dec_refcount(session);
1766 	return 0;
1767 }
1768 EXPORT_SYMBOL_GPL(l2tp_session_delete);
1769 
1770 /* We come here whenever a session's send_seq, cookie_len or
1771  * l2specific_len parameters are set.
1772  */
1773 void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1774 {
1775 	if (version == L2TP_HDR_VER_2) {
1776 		session->hdr_len = 6;
1777 		if (session->send_seq)
1778 			session->hdr_len += 4;
1779 	} else {
1780 		session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
1781 		if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1782 			session->hdr_len += 4;
1783 	}
1784 
1785 }
1786 EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1787 
1788 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1789 {
1790 	struct l2tp_session *session;
1791 	int err;
1792 
1793 	session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1794 	if (session != NULL) {
1795 		session->magic = L2TP_SESSION_MAGIC;
1796 		session->tunnel = tunnel;
1797 
1798 		session->session_id = session_id;
1799 		session->peer_session_id = peer_session_id;
1800 		session->nr = 0;
1801 		if (tunnel->version == L2TP_HDR_VER_2)
1802 			session->nr_max = 0xffff;
1803 		else
1804 			session->nr_max = 0xffffff;
1805 		session->nr_window_size = session->nr_max / 2;
1806 		session->nr_oos_count_max = 4;
1807 
1808 		/* Use NR of first received packet */
1809 		session->reorder_skip = 1;
1810 
1811 		sprintf(&session->name[0], "sess %u/%u",
1812 			tunnel->tunnel_id, session->session_id);
1813 
1814 		skb_queue_head_init(&session->reorder_q);
1815 
1816 		INIT_HLIST_NODE(&session->hlist);
1817 		INIT_HLIST_NODE(&session->global_hlist);
1818 
1819 		/* Inherit debug options from tunnel */
1820 		session->debug = tunnel->debug;
1821 
1822 		if (cfg) {
1823 			session->pwtype = cfg->pw_type;
1824 			session->debug = cfg->debug;
1825 			session->mtu = cfg->mtu;
1826 			session->mru = cfg->mru;
1827 			session->send_seq = cfg->send_seq;
1828 			session->recv_seq = cfg->recv_seq;
1829 			session->lns_mode = cfg->lns_mode;
1830 			session->reorder_timeout = cfg->reorder_timeout;
1831 			session->offset = cfg->offset;
1832 			session->l2specific_type = cfg->l2specific_type;
1833 			session->l2specific_len = cfg->l2specific_len;
1834 			session->cookie_len = cfg->cookie_len;
1835 			memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1836 			session->peer_cookie_len = cfg->peer_cookie_len;
1837 			memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1838 		}
1839 
1840 		if (tunnel->version == L2TP_HDR_VER_2)
1841 			session->build_header = l2tp_build_l2tpv2_header;
1842 		else
1843 			session->build_header = l2tp_build_l2tpv3_header;
1844 
1845 		l2tp_session_set_header_len(session, tunnel->version);
1846 
1847 		err = l2tp_session_add_to_tunnel(tunnel, session);
1848 		if (err) {
1849 			kfree(session);
1850 
1851 			return ERR_PTR(err);
1852 		}
1853 
1854 		/* Bump the reference count. The session context is deleted
1855 		 * only when this drops to zero.
1856 		 */
1857 		refcount_set(&session->ref_count, 1);
1858 		l2tp_tunnel_inc_refcount(tunnel);
1859 
1860 		/* Ensure tunnel socket isn't deleted */
1861 		sock_hold(tunnel->sock);
1862 
1863 		/* Ignore management session in session count value */
1864 		if (session->session_id != 0)
1865 			atomic_inc(&l2tp_session_count);
1866 
1867 		return session;
1868 	}
1869 
1870 	return ERR_PTR(-ENOMEM);
1871 }
1872 EXPORT_SYMBOL_GPL(l2tp_session_create);
1873 
1874 /*****************************************************************************
1875  * Init and cleanup
1876  *****************************************************************************/
1877 
1878 static __net_init int l2tp_init_net(struct net *net)
1879 {
1880 	struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1881 	int hash;
1882 
1883 	INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1884 	spin_lock_init(&pn->l2tp_tunnel_list_lock);
1885 
1886 	for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1887 		INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1888 
1889 	spin_lock_init(&pn->l2tp_session_hlist_lock);
1890 
1891 	return 0;
1892 }
1893 
1894 static __net_exit void l2tp_exit_net(struct net *net)
1895 {
1896 	struct l2tp_net *pn = l2tp_pernet(net);
1897 	struct l2tp_tunnel *tunnel = NULL;
1898 
1899 	rcu_read_lock_bh();
1900 	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1901 		(void)l2tp_tunnel_delete(tunnel);
1902 	}
1903 	rcu_read_unlock_bh();
1904 
1905 	flush_workqueue(l2tp_wq);
1906 	rcu_barrier();
1907 }
1908 
1909 static struct pernet_operations l2tp_net_ops = {
1910 	.init = l2tp_init_net,
1911 	.exit = l2tp_exit_net,
1912 	.id   = &l2tp_net_id,
1913 	.size = sizeof(struct l2tp_net),
1914 };
1915 
1916 static int __init l2tp_init(void)
1917 {
1918 	int rc = 0;
1919 
1920 	rc = register_pernet_device(&l2tp_net_ops);
1921 	if (rc)
1922 		goto out;
1923 
1924 	l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1925 	if (!l2tp_wq) {
1926 		pr_err("alloc_workqueue failed\n");
1927 		unregister_pernet_device(&l2tp_net_ops);
1928 		rc = -ENOMEM;
1929 		goto out;
1930 	}
1931 
1932 	pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1933 
1934 out:
1935 	return rc;
1936 }
1937 
1938 static void __exit l2tp_exit(void)
1939 {
1940 	unregister_pernet_device(&l2tp_net_ops);
1941 	if (l2tp_wq) {
1942 		destroy_workqueue(l2tp_wq);
1943 		l2tp_wq = NULL;
1944 	}
1945 }
1946 
1947 module_init(l2tp_init);
1948 module_exit(l2tp_exit);
1949 
1950 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1951 MODULE_DESCRIPTION("L2TP core");
1952 MODULE_LICENSE("GPL");
1953 MODULE_VERSION(L2TP_DRV_VERSION);
1954 
1955