xref: /linux/net/l2tp/l2tp_core.c (revision 39daa09d34ada1bc7227d68def63e0a2105b5496)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* L2TP core.
3  *
4  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5  *
6  * This file contains some code of the original L2TPv2 pppol2tp
7  * driver, which has the following copyright:
8  *
9  * Authors:	Martijn van Oosterhout <kleptog@svana.org>
10  *		James Chapman (jchapman@katalix.com)
11  * Contributors:
12  *		Michal Ostrowski <mostrows@speakeasy.net>
13  *		Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14  *		David S. Miller (davem@redhat.com)
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/module.h>
20 #include <linux/string.h>
21 #include <linux/list.h>
22 #include <linux/rculist.h>
23 #include <linux/uaccess.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/spinlock.h>
27 #include <linux/kthread.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/errno.h>
31 #include <linux/jiffies.h>
32 
33 #include <linux/netdevice.h>
34 #include <linux/net.h>
35 #include <linux/inetdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/init.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <linux/udp.h>
41 #include <linux/l2tp.h>
42 #include <linux/sort.h>
43 #include <linux/file.h>
44 #include <linux/nsproxy.h>
45 #include <net/net_namespace.h>
46 #include <net/netns/generic.h>
47 #include <net/dst.h>
48 #include <net/ip.h>
49 #include <net/udp.h>
50 #include <net/udp_tunnel.h>
51 #include <net/inet_common.h>
52 #include <net/xfrm.h>
53 #include <net/protocol.h>
54 #include <net/inet6_connection_sock.h>
55 #include <net/inet_ecn.h>
56 #include <net/ip6_route.h>
57 #include <net/ip6_checksum.h>
58 
59 #include <asm/byteorder.h>
60 #include <linux/atomic.h>
61 
62 #include "l2tp_core.h"
63 
64 #define CREATE_TRACE_POINTS
65 #include "trace.h"
66 
67 #define L2TP_DRV_VERSION	"V2.0"
68 
69 /* L2TP header constants */
70 #define L2TP_HDRFLAG_T	   0x8000
71 #define L2TP_HDRFLAG_L	   0x4000
72 #define L2TP_HDRFLAG_S	   0x0800
73 #define L2TP_HDRFLAG_O	   0x0200
74 #define L2TP_HDRFLAG_P	   0x0100
75 
76 #define L2TP_HDR_VER_MASK  0x000F
77 #define L2TP_HDR_VER_2	   0x0002
78 #define L2TP_HDR_VER_3	   0x0003
79 
80 /* L2TPv3 default L2-specific sublayer */
81 #define L2TP_SLFLAG_S	   0x40000000
82 #define L2TP_SL_SEQ_MASK   0x00ffffff
83 
84 #define L2TP_HDR_SIZE_MAX		14
85 
86 /* Default trace flags */
87 #define L2TP_DEFAULT_DEBUG_FLAGS	0
88 
89 /* Private data stored for received packets in the skb.
90  */
91 struct l2tp_skb_cb {
92 	u32			ns;
93 	u16			has_seq;
94 	u16			length;
95 	unsigned long		expires;
96 };
97 
98 #define L2TP_SKB_CB(skb)	((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
99 
100 static struct workqueue_struct *l2tp_wq;
101 
102 /* per-net private data for this module */
103 static unsigned int l2tp_net_id;
104 struct l2tp_net {
105 	/* Lock for write access to l2tp_tunnel_idr */
106 	spinlock_t l2tp_tunnel_idr_lock;
107 	struct idr l2tp_tunnel_idr;
108 	/* Lock for write access to l2tp_v[23]_session_idr/htable */
109 	spinlock_t l2tp_session_idr_lock;
110 	struct idr l2tp_v2_session_idr;
111 	struct idr l2tp_v3_session_idr;
112 	struct hlist_head l2tp_v3_session_htable[16];
113 };
114 
115 static inline u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id)
116 {
117 	return ((u32)tunnel_id) << 16 | session_id;
118 }
119 
120 static inline unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id)
121 {
122 	return ((unsigned long)sk) + session_id;
123 }
124 
125 #if IS_ENABLED(CONFIG_IPV6)
126 static bool l2tp_sk_is_v6(struct sock *sk)
127 {
128 	return sk->sk_family == PF_INET6 &&
129 	       !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
130 }
131 #endif
132 
133 static inline struct l2tp_net *l2tp_pernet(const struct net *net)
134 {
135 	return net_generic(net, l2tp_net_id);
136 }
137 
138 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
139 {
140 	trace_free_tunnel(tunnel);
141 	sock_put(tunnel->sock);
142 	/* the tunnel is freed in the socket destructor */
143 }
144 
145 static void l2tp_session_free(struct l2tp_session *session)
146 {
147 	trace_free_session(session);
148 	if (session->tunnel)
149 		l2tp_tunnel_dec_refcount(session->tunnel);
150 	kfree(session);
151 }
152 
153 struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk)
154 {
155 	struct l2tp_tunnel *tunnel = sk->sk_user_data;
156 
157 	if (tunnel)
158 		if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
159 			return NULL;
160 
161 	return tunnel;
162 }
163 EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
164 
165 void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
166 {
167 	refcount_inc(&tunnel->ref_count);
168 }
169 EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount);
170 
171 void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
172 {
173 	if (refcount_dec_and_test(&tunnel->ref_count))
174 		l2tp_tunnel_free(tunnel);
175 }
176 EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount);
177 
178 void l2tp_session_inc_refcount(struct l2tp_session *session)
179 {
180 	refcount_inc(&session->ref_count);
181 }
182 EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount);
183 
184 void l2tp_session_dec_refcount(struct l2tp_session *session)
185 {
186 	if (refcount_dec_and_test(&session->ref_count))
187 		l2tp_session_free(session);
188 }
189 EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount);
190 
191 /* Lookup a tunnel. A new reference is held on the returned tunnel. */
192 struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
193 {
194 	const struct l2tp_net *pn = l2tp_pernet(net);
195 	struct l2tp_tunnel *tunnel;
196 
197 	rcu_read_lock_bh();
198 	tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
199 	if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
200 		rcu_read_unlock_bh();
201 		return tunnel;
202 	}
203 	rcu_read_unlock_bh();
204 
205 	return NULL;
206 }
207 EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
208 
209 struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
210 {
211 	struct l2tp_net *pn = l2tp_pernet(net);
212 	unsigned long tunnel_id, tmp;
213 	struct l2tp_tunnel *tunnel;
214 	int count = 0;
215 
216 	rcu_read_lock_bh();
217 	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
218 		if (tunnel && ++count > nth &&
219 		    refcount_inc_not_zero(&tunnel->ref_count)) {
220 			rcu_read_unlock_bh();
221 			return tunnel;
222 		}
223 	}
224 	rcu_read_unlock_bh();
225 
226 	return NULL;
227 }
228 EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
229 
230 struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id)
231 {
232 	const struct l2tp_net *pn = l2tp_pernet(net);
233 	struct l2tp_session *session;
234 
235 	rcu_read_lock_bh();
236 	session = idr_find(&pn->l2tp_v3_session_idr, session_id);
237 	if (session && !hash_hashed(&session->hlist) &&
238 	    refcount_inc_not_zero(&session->ref_count)) {
239 		rcu_read_unlock_bh();
240 		return session;
241 	}
242 
243 	/* If we get here and session is non-NULL, the session_id
244 	 * collides with one in another tunnel. If sk is non-NULL,
245 	 * find the session matching sk.
246 	 */
247 	if (session && sk) {
248 		unsigned long key = l2tp_v3_session_hashkey(sk, session->session_id);
249 
250 		hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
251 					   hlist, key) {
252 			if (session->tunnel->sock == sk &&
253 			    refcount_inc_not_zero(&session->ref_count)) {
254 				rcu_read_unlock_bh();
255 				return session;
256 			}
257 		}
258 	}
259 	rcu_read_unlock_bh();
260 
261 	return NULL;
262 }
263 EXPORT_SYMBOL_GPL(l2tp_v3_session_get);
264 
265 struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id)
266 {
267 	u32 session_key = l2tp_v2_session_key(tunnel_id, session_id);
268 	const struct l2tp_net *pn = l2tp_pernet(net);
269 	struct l2tp_session *session;
270 
271 	rcu_read_lock_bh();
272 	session = idr_find(&pn->l2tp_v2_session_idr, session_key);
273 	if (session && refcount_inc_not_zero(&session->ref_count)) {
274 		rcu_read_unlock_bh();
275 		return session;
276 	}
277 	rcu_read_unlock_bh();
278 
279 	return NULL;
280 }
281 EXPORT_SYMBOL_GPL(l2tp_v2_session_get);
282 
283 struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver,
284 				      u32 tunnel_id, u32 session_id)
285 {
286 	if (pver == L2TP_HDR_VER_2)
287 		return l2tp_v2_session_get(net, tunnel_id, session_id);
288 	else
289 		return l2tp_v3_session_get(net, sk, session_id);
290 }
291 EXPORT_SYMBOL_GPL(l2tp_session_get);
292 
293 struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
294 {
295 	struct l2tp_session *session;
296 	int count = 0;
297 
298 	rcu_read_lock_bh();
299 	list_for_each_entry_rcu(session, &tunnel->session_list, list) {
300 		if (++count > nth) {
301 			l2tp_session_inc_refcount(session);
302 			rcu_read_unlock_bh();
303 			return session;
304 		}
305 	}
306 	rcu_read_unlock_bh();
307 
308 	return NULL;
309 }
310 EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
311 
312 /* Lookup a session by interface name.
313  * This is very inefficient but is only used by management interfaces.
314  */
315 struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
316 						const char *ifname)
317 {
318 	struct l2tp_net *pn = l2tp_pernet(net);
319 	unsigned long tunnel_id, tmp;
320 	struct l2tp_session *session;
321 	struct l2tp_tunnel *tunnel;
322 
323 	rcu_read_lock_bh();
324 	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
325 		if (tunnel) {
326 			list_for_each_entry_rcu(session, &tunnel->session_list, list) {
327 				if (!strcmp(session->ifname, ifname)) {
328 					l2tp_session_inc_refcount(session);
329 					rcu_read_unlock_bh();
330 
331 					return session;
332 				}
333 			}
334 		}
335 	}
336 	rcu_read_unlock_bh();
337 
338 	return NULL;
339 }
340 EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
341 
342 static void l2tp_session_coll_list_add(struct l2tp_session_coll_list *clist,
343 				       struct l2tp_session *session)
344 {
345 	l2tp_session_inc_refcount(session);
346 	WARN_ON_ONCE(session->coll_list);
347 	session->coll_list = clist;
348 	spin_lock(&clist->lock);
349 	list_add(&session->clist, &clist->list);
350 	spin_unlock(&clist->lock);
351 }
352 
353 static int l2tp_session_collision_add(struct l2tp_net *pn,
354 				      struct l2tp_session *session1,
355 				      struct l2tp_session *session2)
356 {
357 	struct l2tp_session_coll_list *clist;
358 
359 	lockdep_assert_held(&pn->l2tp_session_idr_lock);
360 
361 	if (!session2)
362 		return -EEXIST;
363 
364 	/* If existing session is in IP-encap tunnel, refuse new session */
365 	if (session2->tunnel->encap == L2TP_ENCAPTYPE_IP)
366 		return -EEXIST;
367 
368 	clist = session2->coll_list;
369 	if (!clist) {
370 		/* First collision. Allocate list to manage the collided sessions
371 		 * and add the existing session to the list.
372 		 */
373 		clist = kmalloc(sizeof(*clist), GFP_ATOMIC);
374 		if (!clist)
375 			return -ENOMEM;
376 
377 		spin_lock_init(&clist->lock);
378 		INIT_LIST_HEAD(&clist->list);
379 		refcount_set(&clist->ref_count, 1);
380 		l2tp_session_coll_list_add(clist, session2);
381 	}
382 
383 	/* If existing session isn't already in the session hlist, add it. */
384 	if (!hash_hashed(&session2->hlist))
385 		hash_add(pn->l2tp_v3_session_htable, &session2->hlist,
386 			 session2->hlist_key);
387 
388 	/* Add new session to the hlist and collision list */
389 	hash_add(pn->l2tp_v3_session_htable, &session1->hlist,
390 		 session1->hlist_key);
391 	refcount_inc(&clist->ref_count);
392 	l2tp_session_coll_list_add(clist, session1);
393 
394 	return 0;
395 }
396 
397 static void l2tp_session_collision_del(struct l2tp_net *pn,
398 				       struct l2tp_session *session)
399 {
400 	struct l2tp_session_coll_list *clist = session->coll_list;
401 	unsigned long session_key = session->session_id;
402 	struct l2tp_session *session2;
403 
404 	lockdep_assert_held(&pn->l2tp_session_idr_lock);
405 
406 	hash_del(&session->hlist);
407 
408 	if (clist) {
409 		/* Remove session from its collision list. If there
410 		 * are other sessions with the same ID, replace this
411 		 * session's IDR entry with that session, otherwise
412 		 * remove the IDR entry. If this is the last session,
413 		 * the collision list data is freed.
414 		 */
415 		spin_lock(&clist->lock);
416 		list_del_init(&session->clist);
417 		session2 = list_first_entry_or_null(&clist->list, struct l2tp_session, clist);
418 		if (session2) {
419 			void *old = idr_replace(&pn->l2tp_v3_session_idr, session2, session_key);
420 
421 			WARN_ON_ONCE(IS_ERR_VALUE(old));
422 		} else {
423 			void *removed = idr_remove(&pn->l2tp_v3_session_idr, session_key);
424 
425 			WARN_ON_ONCE(removed != session);
426 		}
427 		session->coll_list = NULL;
428 		spin_unlock(&clist->lock);
429 		if (refcount_dec_and_test(&clist->ref_count))
430 			kfree(clist);
431 		l2tp_session_dec_refcount(session);
432 	}
433 }
434 
435 int l2tp_session_register(struct l2tp_session *session,
436 			  struct l2tp_tunnel *tunnel)
437 {
438 	struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
439 	u32 session_key;
440 	int err;
441 
442 	spin_lock_bh(&tunnel->list_lock);
443 	if (!tunnel->acpt_newsess) {
444 		err = -ENODEV;
445 		goto err_tlock;
446 	}
447 
448 	if (tunnel->version == L2TP_HDR_VER_3) {
449 		session_key = session->session_id;
450 		spin_lock_bh(&pn->l2tp_session_idr_lock);
451 		err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL,
452 				    &session_key, session_key, GFP_ATOMIC);
453 		/* IP encap expects session IDs to be globally unique, while
454 		 * UDP encap doesn't. This isn't per the RFC, which says that
455 		 * sessions are identified only by the session ID, but is to
456 		 * support existing userspace which depends on it.
457 		 */
458 		if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) {
459 			struct l2tp_session *session2;
460 
461 			session2 = idr_find(&pn->l2tp_v3_session_idr,
462 					    session_key);
463 			err = l2tp_session_collision_add(pn, session, session2);
464 		}
465 		spin_unlock_bh(&pn->l2tp_session_idr_lock);
466 	} else {
467 		session_key = l2tp_v2_session_key(tunnel->tunnel_id,
468 						  session->session_id);
469 		spin_lock_bh(&pn->l2tp_session_idr_lock);
470 		err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL,
471 				    &session_key, session_key, GFP_ATOMIC);
472 		spin_unlock_bh(&pn->l2tp_session_idr_lock);
473 	}
474 
475 	if (err) {
476 		if (err == -ENOSPC)
477 			err = -EEXIST;
478 		goto err_tlock;
479 	}
480 
481 	l2tp_tunnel_inc_refcount(tunnel);
482 
483 	list_add(&session->list, &tunnel->session_list);
484 	spin_unlock_bh(&tunnel->list_lock);
485 
486 	spin_lock_bh(&pn->l2tp_session_idr_lock);
487 	if (tunnel->version == L2TP_HDR_VER_3)
488 		idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
489 	else
490 		idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
491 	spin_unlock_bh(&pn->l2tp_session_idr_lock);
492 
493 	trace_register_session(session);
494 
495 	return 0;
496 
497 err_tlock:
498 	spin_unlock_bh(&tunnel->list_lock);
499 
500 	return err;
501 }
502 EXPORT_SYMBOL_GPL(l2tp_session_register);
503 
504 /*****************************************************************************
505  * Receive data handling
506  *****************************************************************************/
507 
508 /* Queue a skb in order. We come here only if the skb has an L2TP sequence
509  * number.
510  */
511 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
512 {
513 	struct sk_buff *skbp;
514 	struct sk_buff *tmp;
515 	u32 ns = L2TP_SKB_CB(skb)->ns;
516 
517 	spin_lock_bh(&session->reorder_q.lock);
518 	skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
519 		if (L2TP_SKB_CB(skbp)->ns > ns) {
520 			__skb_queue_before(&session->reorder_q, skbp, skb);
521 			atomic_long_inc(&session->stats.rx_oos_packets);
522 			goto out;
523 		}
524 	}
525 
526 	__skb_queue_tail(&session->reorder_q, skb);
527 
528 out:
529 	spin_unlock_bh(&session->reorder_q.lock);
530 }
531 
532 /* Dequeue a single skb.
533  */
534 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
535 {
536 	struct l2tp_tunnel *tunnel = session->tunnel;
537 	int length = L2TP_SKB_CB(skb)->length;
538 
539 	/* We're about to requeue the skb, so return resources
540 	 * to its current owner (a socket receive buffer).
541 	 */
542 	skb_orphan(skb);
543 
544 	atomic_long_inc(&tunnel->stats.rx_packets);
545 	atomic_long_add(length, &tunnel->stats.rx_bytes);
546 	atomic_long_inc(&session->stats.rx_packets);
547 	atomic_long_add(length, &session->stats.rx_bytes);
548 
549 	if (L2TP_SKB_CB(skb)->has_seq) {
550 		/* Bump our Nr */
551 		session->nr++;
552 		session->nr &= session->nr_max;
553 		trace_session_seqnum_update(session);
554 	}
555 
556 	/* call private receive handler */
557 	if (session->recv_skb)
558 		(*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
559 	else
560 		kfree_skb(skb);
561 }
562 
563 /* Dequeue skbs from the session's reorder_q, subject to packet order.
564  * Skbs that have been in the queue for too long are simply discarded.
565  */
566 static void l2tp_recv_dequeue(struct l2tp_session *session)
567 {
568 	struct sk_buff *skb;
569 	struct sk_buff *tmp;
570 
571 	/* If the pkt at the head of the queue has the nr that we
572 	 * expect to send up next, dequeue it and any other
573 	 * in-sequence packets behind it.
574 	 */
575 start:
576 	spin_lock_bh(&session->reorder_q.lock);
577 	skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
578 		struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
579 
580 		/* If the packet has been pending on the queue for too long, discard it */
581 		if (time_after(jiffies, cb->expires)) {
582 			atomic_long_inc(&session->stats.rx_seq_discards);
583 			atomic_long_inc(&session->stats.rx_errors);
584 			trace_session_pkt_expired(session, cb->ns);
585 			session->reorder_skip = 1;
586 			__skb_unlink(skb, &session->reorder_q);
587 			kfree_skb(skb);
588 			continue;
589 		}
590 
591 		if (cb->has_seq) {
592 			if (session->reorder_skip) {
593 				session->reorder_skip = 0;
594 				session->nr = cb->ns;
595 				trace_session_seqnum_reset(session);
596 			}
597 			if (cb->ns != session->nr)
598 				goto out;
599 		}
600 		__skb_unlink(skb, &session->reorder_q);
601 
602 		/* Process the skb. We release the queue lock while we
603 		 * do so to let other contexts process the queue.
604 		 */
605 		spin_unlock_bh(&session->reorder_q.lock);
606 		l2tp_recv_dequeue_skb(session, skb);
607 		goto start;
608 	}
609 
610 out:
611 	spin_unlock_bh(&session->reorder_q.lock);
612 }
613 
614 static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
615 {
616 	u32 nws;
617 
618 	if (nr >= session->nr)
619 		nws = nr - session->nr;
620 	else
621 		nws = (session->nr_max + 1) - (session->nr - nr);
622 
623 	return nws < session->nr_window_size;
624 }
625 
626 /* If packet has sequence numbers, queue it if acceptable. Returns 0 if
627  * acceptable, else non-zero.
628  */
629 static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
630 {
631 	struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
632 
633 	if (!l2tp_seq_check_rx_window(session, cb->ns)) {
634 		/* Packet sequence number is outside allowed window.
635 		 * Discard it.
636 		 */
637 		trace_session_pkt_outside_rx_window(session, cb->ns);
638 		goto discard;
639 	}
640 
641 	if (session->reorder_timeout != 0) {
642 		/* Packet reordering enabled. Add skb to session's
643 		 * reorder queue, in order of ns.
644 		 */
645 		l2tp_recv_queue_skb(session, skb);
646 		goto out;
647 	}
648 
649 	/* Packet reordering disabled. Discard out-of-sequence packets, while
650 	 * tracking the number if in-sequence packets after the first OOS packet
651 	 * is seen. After nr_oos_count_max in-sequence packets, reset the
652 	 * sequence number to re-enable packet reception.
653 	 */
654 	if (cb->ns == session->nr) {
655 		skb_queue_tail(&session->reorder_q, skb);
656 	} else {
657 		u32 nr_oos = cb->ns;
658 		u32 nr_next = (session->nr_oos + 1) & session->nr_max;
659 
660 		if (nr_oos == nr_next)
661 			session->nr_oos_count++;
662 		else
663 			session->nr_oos_count = 0;
664 
665 		session->nr_oos = nr_oos;
666 		if (session->nr_oos_count > session->nr_oos_count_max) {
667 			session->reorder_skip = 1;
668 		}
669 		if (!session->reorder_skip) {
670 			atomic_long_inc(&session->stats.rx_seq_discards);
671 			trace_session_pkt_oos(session, cb->ns);
672 			goto discard;
673 		}
674 		skb_queue_tail(&session->reorder_q, skb);
675 	}
676 
677 out:
678 	return 0;
679 
680 discard:
681 	return 1;
682 }
683 
684 /* Do receive processing of L2TP data frames. We handle both L2TPv2
685  * and L2TPv3 data frames here.
686  *
687  * L2TPv2 Data Message Header
688  *
689  *  0                   1                   2                   3
690  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
691  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
692  * |T|L|x|x|S|x|O|P|x|x|x|x|  Ver  |          Length (opt)         |
693  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
694  * |           Tunnel ID           |           Session ID          |
695  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
696  * |             Ns (opt)          |             Nr (opt)          |
697  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
698  * |      Offset Size (opt)        |    Offset pad... (opt)
699  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
700  *
701  * Data frames are marked by T=0. All other fields are the same as
702  * those in L2TP control frames.
703  *
704  * L2TPv3 Data Message Header
705  *
706  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
707  * |                      L2TP Session Header                      |
708  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
709  * |                      L2-Specific Sublayer                     |
710  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
711  * |                        Tunnel Payload                      ...
712  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
713  *
714  * L2TPv3 Session Header Over IP
715  *
716  *  0                   1                   2                   3
717  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
718  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
719  * |                           Session ID                          |
720  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
721  * |               Cookie (optional, maximum 64 bits)...
722  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
723  *                                                                 |
724  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
725  *
726  * L2TPv3 L2-Specific Sublayer Format
727  *
728  *  0                   1                   2                   3
729  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
730  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
731  * |x|S|x|x|x|x|x|x|              Sequence Number                  |
732  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
733  *
734  * Cookie value and sublayer format are negotiated with the peer when
735  * the session is set up. Unlike L2TPv2, we do not need to parse the
736  * packet header to determine if optional fields are present.
737  *
738  * Caller must already have parsed the frame and determined that it is
739  * a data (not control) frame before coming here. Fields up to the
740  * session-id have already been parsed and ptr points to the data
741  * after the session-id.
742  */
743 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
744 		      unsigned char *ptr, unsigned char *optr, u16 hdrflags,
745 		      int length)
746 {
747 	struct l2tp_tunnel *tunnel = session->tunnel;
748 	int offset;
749 
750 	/* Parse and check optional cookie */
751 	if (session->peer_cookie_len > 0) {
752 		if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
753 			pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
754 					     tunnel->name, tunnel->tunnel_id,
755 					     session->session_id);
756 			atomic_long_inc(&session->stats.rx_cookie_discards);
757 			goto discard;
758 		}
759 		ptr += session->peer_cookie_len;
760 	}
761 
762 	/* Handle the optional sequence numbers. Sequence numbers are
763 	 * in different places for L2TPv2 and L2TPv3.
764 	 *
765 	 * If we are the LAC, enable/disable sequence numbers under
766 	 * the control of the LNS.  If no sequence numbers present but
767 	 * we were expecting them, discard frame.
768 	 */
769 	L2TP_SKB_CB(skb)->has_seq = 0;
770 	if (tunnel->version == L2TP_HDR_VER_2) {
771 		if (hdrflags & L2TP_HDRFLAG_S) {
772 			/* Store L2TP info in the skb */
773 			L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
774 			L2TP_SKB_CB(skb)->has_seq = 1;
775 			ptr += 2;
776 			/* Skip past nr in the header */
777 			ptr += 2;
778 
779 		}
780 	} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
781 		u32 l2h = ntohl(*(__be32 *)ptr);
782 
783 		if (l2h & 0x40000000) {
784 			/* Store L2TP info in the skb */
785 			L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
786 			L2TP_SKB_CB(skb)->has_seq = 1;
787 		}
788 		ptr += 4;
789 	}
790 
791 	if (L2TP_SKB_CB(skb)->has_seq) {
792 		/* Received a packet with sequence numbers. If we're the LAC,
793 		 * check if we sre sending sequence numbers and if not,
794 		 * configure it so.
795 		 */
796 		if (!session->lns_mode && !session->send_seq) {
797 			trace_session_seqnum_lns_enable(session);
798 			session->send_seq = 1;
799 			l2tp_session_set_header_len(session, tunnel->version);
800 		}
801 	} else {
802 		/* No sequence numbers.
803 		 * If user has configured mandatory sequence numbers, discard.
804 		 */
805 		if (session->recv_seq) {
806 			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
807 					     session->name);
808 			atomic_long_inc(&session->stats.rx_seq_discards);
809 			goto discard;
810 		}
811 
812 		/* If we're the LAC and we're sending sequence numbers, the
813 		 * LNS has requested that we no longer send sequence numbers.
814 		 * If we're the LNS and we're sending sequence numbers, the
815 		 * LAC is broken. Discard the frame.
816 		 */
817 		if (!session->lns_mode && session->send_seq) {
818 			trace_session_seqnum_lns_disable(session);
819 			session->send_seq = 0;
820 			l2tp_session_set_header_len(session, tunnel->version);
821 		} else if (session->send_seq) {
822 			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
823 					     session->name);
824 			atomic_long_inc(&session->stats.rx_seq_discards);
825 			goto discard;
826 		}
827 	}
828 
829 	/* Session data offset is defined only for L2TPv2 and is
830 	 * indicated by an optional 16-bit value in the header.
831 	 */
832 	if (tunnel->version == L2TP_HDR_VER_2) {
833 		/* If offset bit set, skip it. */
834 		if (hdrflags & L2TP_HDRFLAG_O) {
835 			offset = ntohs(*(__be16 *)ptr);
836 			ptr += 2 + offset;
837 		}
838 	}
839 
840 	offset = ptr - optr;
841 	if (!pskb_may_pull(skb, offset))
842 		goto discard;
843 
844 	__skb_pull(skb, offset);
845 
846 	/* Prepare skb for adding to the session's reorder_q.  Hold
847 	 * packets for max reorder_timeout or 1 second if not
848 	 * reordering.
849 	 */
850 	L2TP_SKB_CB(skb)->length = length;
851 	L2TP_SKB_CB(skb)->expires = jiffies +
852 		(session->reorder_timeout ? session->reorder_timeout : HZ);
853 
854 	/* Add packet to the session's receive queue. Reordering is done here, if
855 	 * enabled. Saved L2TP protocol info is stored in skb->sb[].
856 	 */
857 	if (L2TP_SKB_CB(skb)->has_seq) {
858 		if (l2tp_recv_data_seq(session, skb))
859 			goto discard;
860 	} else {
861 		/* No sequence numbers. Add the skb to the tail of the
862 		 * reorder queue. This ensures that it will be
863 		 * delivered after all previous sequenced skbs.
864 		 */
865 		skb_queue_tail(&session->reorder_q, skb);
866 	}
867 
868 	/* Try to dequeue as many skbs from reorder_q as we can. */
869 	l2tp_recv_dequeue(session);
870 
871 	return;
872 
873 discard:
874 	atomic_long_inc(&session->stats.rx_errors);
875 	kfree_skb(skb);
876 }
877 EXPORT_SYMBOL_GPL(l2tp_recv_common);
878 
879 /* Drop skbs from the session's reorder_q
880  */
881 static void l2tp_session_queue_purge(struct l2tp_session *session)
882 {
883 	struct sk_buff *skb = NULL;
884 
885 	while ((skb = skb_dequeue(&session->reorder_q))) {
886 		atomic_long_inc(&session->stats.rx_errors);
887 		kfree_skb(skb);
888 	}
889 }
890 
891 /* UDP encapsulation receive handler. See net/ipv4/udp.c for details. */
892 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
893 {
894 	struct l2tp_session *session = NULL;
895 	struct l2tp_tunnel *tunnel = NULL;
896 	struct net *net = sock_net(sk);
897 	unsigned char *ptr, *optr;
898 	u16 hdrflags;
899 	u16 version;
900 	int length;
901 
902 	/* UDP has verified checksum */
903 
904 	/* UDP always verifies the packet length. */
905 	__skb_pull(skb, sizeof(struct udphdr));
906 
907 	/* Short packet? */
908 	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX))
909 		goto pass;
910 
911 	/* Point to L2TP header */
912 	optr = skb->data;
913 	ptr = skb->data;
914 
915 	/* Get L2TP header flags */
916 	hdrflags = ntohs(*(__be16 *)ptr);
917 
918 	/* Get protocol version */
919 	version = hdrflags & L2TP_HDR_VER_MASK;
920 
921 	/* Get length of L2TP packet */
922 	length = skb->len;
923 
924 	/* If type is control packet, it is handled by userspace. */
925 	if (hdrflags & L2TP_HDRFLAG_T)
926 		goto pass;
927 
928 	/* Skip flags */
929 	ptr += 2;
930 
931 	if (version == L2TP_HDR_VER_2) {
932 		u16 tunnel_id, session_id;
933 
934 		/* If length is present, skip it */
935 		if (hdrflags & L2TP_HDRFLAG_L)
936 			ptr += 2;
937 
938 		/* Extract tunnel and session ID */
939 		tunnel_id = ntohs(*(__be16 *)ptr);
940 		ptr += 2;
941 		session_id = ntohs(*(__be16 *)ptr);
942 		ptr += 2;
943 
944 		session = l2tp_v2_session_get(net, tunnel_id, session_id);
945 	} else {
946 		u32 session_id;
947 
948 		ptr += 2;	/* skip reserved bits */
949 		session_id = ntohl(*(__be32 *)ptr);
950 		ptr += 4;
951 
952 		session = l2tp_v3_session_get(net, sk, session_id);
953 	}
954 
955 	if (!session || !session->recv_skb) {
956 		if (session)
957 			l2tp_session_dec_refcount(session);
958 
959 		/* Not found? Pass to userspace to deal with */
960 		goto pass;
961 	}
962 
963 	tunnel = session->tunnel;
964 
965 	/* Check protocol version */
966 	if (version != tunnel->version)
967 		goto invalid;
968 
969 	if (version == L2TP_HDR_VER_3 &&
970 	    l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
971 		l2tp_session_dec_refcount(session);
972 		goto invalid;
973 	}
974 
975 	l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
976 	l2tp_session_dec_refcount(session);
977 
978 	return 0;
979 
980 invalid:
981 	atomic_long_inc(&tunnel->stats.rx_invalid);
982 
983 pass:
984 	/* Put UDP header back */
985 	__skb_push(skb, sizeof(struct udphdr));
986 
987 	return 1;
988 }
989 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
990 
991 /* UDP encapsulation receive error handler. See net/ipv4/udp.c for details. */
992 static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err,
993 				    __be16 port, u32 info, u8 *payload)
994 {
995 	sk->sk_err = err;
996 	sk_error_report(sk);
997 
998 	if (ip_hdr(skb)->version == IPVERSION) {
999 		if (inet_test_bit(RECVERR, sk))
1000 			return ip_icmp_error(sk, skb, err, port, info, payload);
1001 #if IS_ENABLED(CONFIG_IPV6)
1002 	} else {
1003 		if (inet6_test_bit(RECVERR6, sk))
1004 			return ipv6_icmp_error(sk, skb, err, port, info, payload);
1005 #endif
1006 	}
1007 }
1008 
1009 /************************************************************************
1010  * Transmit handling
1011  ***********************************************************************/
1012 
1013 /* Build an L2TP header for the session into the buffer provided.
1014  */
1015 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
1016 {
1017 	struct l2tp_tunnel *tunnel = session->tunnel;
1018 	__be16 *bufp = buf;
1019 	__be16 *optr = buf;
1020 	u16 flags = L2TP_HDR_VER_2;
1021 	u32 tunnel_id = tunnel->peer_tunnel_id;
1022 	u32 session_id = session->peer_session_id;
1023 
1024 	if (session->send_seq)
1025 		flags |= L2TP_HDRFLAG_S;
1026 
1027 	/* Setup L2TP header. */
1028 	*bufp++ = htons(flags);
1029 	*bufp++ = htons(tunnel_id);
1030 	*bufp++ = htons(session_id);
1031 	if (session->send_seq) {
1032 		*bufp++ = htons(session->ns);
1033 		*bufp++ = 0;
1034 		session->ns++;
1035 		session->ns &= 0xffff;
1036 		trace_session_seqnum_update(session);
1037 	}
1038 
1039 	return bufp - optr;
1040 }
1041 
1042 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1043 {
1044 	struct l2tp_tunnel *tunnel = session->tunnel;
1045 	char *bufp = buf;
1046 	char *optr = bufp;
1047 
1048 	/* Setup L2TP header. The header differs slightly for UDP and
1049 	 * IP encapsulations. For UDP, there is 4 bytes of flags.
1050 	 */
1051 	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1052 		u16 flags = L2TP_HDR_VER_3;
1053 		*((__be16 *)bufp) = htons(flags);
1054 		bufp += 2;
1055 		*((__be16 *)bufp) = 0;
1056 		bufp += 2;
1057 	}
1058 
1059 	*((__be32 *)bufp) = htonl(session->peer_session_id);
1060 	bufp += 4;
1061 	if (session->cookie_len) {
1062 		memcpy(bufp, &session->cookie[0], session->cookie_len);
1063 		bufp += session->cookie_len;
1064 	}
1065 	if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1066 		u32 l2h = 0;
1067 
1068 		if (session->send_seq) {
1069 			l2h = 0x40000000 | session->ns;
1070 			session->ns++;
1071 			session->ns &= 0xffffff;
1072 			trace_session_seqnum_update(session);
1073 		}
1074 
1075 		*((__be32 *)bufp) = htonl(l2h);
1076 		bufp += 4;
1077 	}
1078 
1079 	return bufp - optr;
1080 }
1081 
1082 /* Queue the packet to IP for output: tunnel socket lock must be held */
1083 static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
1084 {
1085 	int err;
1086 
1087 	skb->ignore_df = 1;
1088 	skb_dst_drop(skb);
1089 #if IS_ENABLED(CONFIG_IPV6)
1090 	if (l2tp_sk_is_v6(tunnel->sock))
1091 		err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1092 	else
1093 #endif
1094 		err = ip_queue_xmit(tunnel->sock, skb, fl);
1095 
1096 	return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1097 }
1098 
1099 static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1100 {
1101 	struct l2tp_tunnel *tunnel = session->tunnel;
1102 	unsigned int data_len = skb->len;
1103 	struct sock *sk = tunnel->sock;
1104 	int headroom, uhlen, udp_len;
1105 	int ret = NET_XMIT_SUCCESS;
1106 	struct inet_sock *inet;
1107 	struct udphdr *uh;
1108 
1109 	/* Check that there's enough headroom in the skb to insert IP,
1110 	 * UDP and L2TP headers. If not enough, expand it to
1111 	 * make room. Adjust truesize.
1112 	 */
1113 	uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1114 	headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1115 	if (skb_cow_head(skb, headroom)) {
1116 		kfree_skb(skb);
1117 		return NET_XMIT_DROP;
1118 	}
1119 
1120 	/* Setup L2TP header */
1121 	if (tunnel->version == L2TP_HDR_VER_2)
1122 		l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1123 	else
1124 		l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1125 
1126 	/* Reset skb netfilter state */
1127 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1128 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
1129 	nf_reset_ct(skb);
1130 
1131 	bh_lock_sock_nested(sk);
1132 	if (sock_owned_by_user(sk)) {
1133 		kfree_skb(skb);
1134 		ret = NET_XMIT_DROP;
1135 		goto out_unlock;
1136 	}
1137 
1138 	/* The user-space may change the connection status for the user-space
1139 	 * provided socket at run time: we must check it under the socket lock
1140 	 */
1141 	if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1142 		kfree_skb(skb);
1143 		ret = NET_XMIT_DROP;
1144 		goto out_unlock;
1145 	}
1146 
1147 	/* Report transmitted length before we add encap header, which keeps
1148 	 * statistics consistent for both UDP and IP encap tx/rx paths.
1149 	 */
1150 	*len = skb->len;
1151 
1152 	inet = inet_sk(sk);
1153 	switch (tunnel->encap) {
1154 	case L2TP_ENCAPTYPE_UDP:
1155 		/* Setup UDP header */
1156 		__skb_push(skb, sizeof(*uh));
1157 		skb_reset_transport_header(skb);
1158 		uh = udp_hdr(skb);
1159 		uh->source = inet->inet_sport;
1160 		uh->dest = inet->inet_dport;
1161 		udp_len = uhlen + session->hdr_len + data_len;
1162 		uh->len = htons(udp_len);
1163 
1164 		/* Calculate UDP checksum if configured to do so */
1165 #if IS_ENABLED(CONFIG_IPV6)
1166 		if (l2tp_sk_is_v6(sk))
1167 			udp6_set_csum(udp_get_no_check6_tx(sk),
1168 				      skb, &inet6_sk(sk)->saddr,
1169 				      &sk->sk_v6_daddr, udp_len);
1170 		else
1171 #endif
1172 			udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1173 				     inet->inet_daddr, udp_len);
1174 		break;
1175 
1176 	case L2TP_ENCAPTYPE_IP:
1177 		break;
1178 	}
1179 
1180 	ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1181 
1182 out_unlock:
1183 	bh_unlock_sock(sk);
1184 
1185 	return ret;
1186 }
1187 
1188 /* If caller requires the skb to have a ppp header, the header must be
1189  * inserted in the skb data before calling this function.
1190  */
1191 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1192 {
1193 	unsigned int len = 0;
1194 	int ret;
1195 
1196 	ret = l2tp_xmit_core(session, skb, &len);
1197 	if (ret == NET_XMIT_SUCCESS) {
1198 		atomic_long_inc(&session->tunnel->stats.tx_packets);
1199 		atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1200 		atomic_long_inc(&session->stats.tx_packets);
1201 		atomic_long_add(len, &session->stats.tx_bytes);
1202 	} else {
1203 		atomic_long_inc(&session->tunnel->stats.tx_errors);
1204 		atomic_long_inc(&session->stats.tx_errors);
1205 	}
1206 	return ret;
1207 }
1208 EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1209 
1210 /*****************************************************************************
1211  * Tinnel and session create/destroy.
1212  *****************************************************************************/
1213 
1214 /* Tunnel socket destruct hook.
1215  * The tunnel context is deleted only when all session sockets have been
1216  * closed.
1217  */
1218 static void l2tp_tunnel_destruct(struct sock *sk)
1219 {
1220 	struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1221 
1222 	if (!tunnel)
1223 		goto end;
1224 
1225 	/* Disable udp encapsulation */
1226 	switch (tunnel->encap) {
1227 	case L2TP_ENCAPTYPE_UDP:
1228 		/* No longer an encapsulation socket. See net/ipv4/udp.c */
1229 		WRITE_ONCE(udp_sk(sk)->encap_type, 0);
1230 		udp_sk(sk)->encap_rcv = NULL;
1231 		udp_sk(sk)->encap_destroy = NULL;
1232 		break;
1233 	case L2TP_ENCAPTYPE_IP:
1234 		break;
1235 	}
1236 
1237 	/* Remove hooks into tunnel socket */
1238 	write_lock_bh(&sk->sk_callback_lock);
1239 	sk->sk_destruct = tunnel->old_sk_destruct;
1240 	sk->sk_user_data = NULL;
1241 	write_unlock_bh(&sk->sk_callback_lock);
1242 
1243 	/* Call the original destructor */
1244 	if (sk->sk_destruct)
1245 		(*sk->sk_destruct)(sk);
1246 
1247 	kfree_rcu(tunnel, rcu);
1248 end:
1249 	return;
1250 }
1251 
1252 /* Remove an l2tp session from l2tp_core's lists. */
1253 static void l2tp_session_unhash(struct l2tp_session *session)
1254 {
1255 	struct l2tp_tunnel *tunnel = session->tunnel;
1256 
1257 	if (tunnel) {
1258 		struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1259 		struct l2tp_session *removed = session;
1260 
1261 		/* Remove from the per-tunnel list */
1262 		spin_lock_bh(&tunnel->list_lock);
1263 		list_del_init(&session->list);
1264 		spin_unlock_bh(&tunnel->list_lock);
1265 
1266 		/* Remove from per-net IDR */
1267 		spin_lock_bh(&pn->l2tp_session_idr_lock);
1268 		if (tunnel->version == L2TP_HDR_VER_3) {
1269 			if (hash_hashed(&session->hlist))
1270 				l2tp_session_collision_del(pn, session);
1271 			else
1272 				removed = idr_remove(&pn->l2tp_v3_session_idr,
1273 						     session->session_id);
1274 		} else {
1275 			u32 session_key = l2tp_v2_session_key(tunnel->tunnel_id,
1276 							      session->session_id);
1277 			removed = idr_remove(&pn->l2tp_v2_session_idr,
1278 					     session_key);
1279 		}
1280 		WARN_ON_ONCE(removed && removed != session);
1281 		spin_unlock_bh(&pn->l2tp_session_idr_lock);
1282 
1283 		synchronize_rcu();
1284 	}
1285 }
1286 
1287 /* When the tunnel is closed, all the attached sessions need to go too.
1288  */
1289 static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1290 {
1291 	struct l2tp_session *session;
1292 
1293 	spin_lock_bh(&tunnel->list_lock);
1294 	tunnel->acpt_newsess = false;
1295 	for (;;) {
1296 		session = list_first_entry_or_null(&tunnel->session_list,
1297 						   struct l2tp_session, list);
1298 		if (!session)
1299 			break;
1300 		l2tp_session_inc_refcount(session);
1301 		list_del_init(&session->list);
1302 		spin_unlock_bh(&tunnel->list_lock);
1303 		l2tp_session_delete(session);
1304 		spin_lock_bh(&tunnel->list_lock);
1305 		l2tp_session_dec_refcount(session);
1306 	}
1307 	spin_unlock_bh(&tunnel->list_lock);
1308 }
1309 
1310 /* Tunnel socket destroy hook for UDP encapsulation */
1311 static void l2tp_udp_encap_destroy(struct sock *sk)
1312 {
1313 	struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1314 
1315 	if (tunnel)
1316 		l2tp_tunnel_delete(tunnel);
1317 }
1318 
1319 static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
1320 {
1321 	struct l2tp_net *pn = l2tp_pernet(net);
1322 
1323 	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1324 	idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
1325 	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1326 }
1327 
1328 /* Workqueue tunnel deletion function */
1329 static void l2tp_tunnel_del_work(struct work_struct *work)
1330 {
1331 	struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1332 						  del_work);
1333 	struct sock *sk = tunnel->sock;
1334 	struct socket *sock = sk->sk_socket;
1335 
1336 	l2tp_tunnel_closeall(tunnel);
1337 
1338 	/* If the tunnel socket was created within the kernel, use
1339 	 * the sk API to release it here.
1340 	 */
1341 	if (tunnel->fd < 0) {
1342 		if (sock) {
1343 			kernel_sock_shutdown(sock, SHUT_RDWR);
1344 			sock_release(sock);
1345 		}
1346 	}
1347 
1348 	l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
1349 	/* drop initial ref */
1350 	l2tp_tunnel_dec_refcount(tunnel);
1351 
1352 	/* drop workqueue ref */
1353 	l2tp_tunnel_dec_refcount(tunnel);
1354 }
1355 
1356 /* Create a socket for the tunnel, if one isn't set up by
1357  * userspace. This is used for static tunnels where there is no
1358  * managing L2TP daemon.
1359  *
1360  * Since we don't want these sockets to keep a namespace alive by
1361  * themselves, we drop the socket's namespace refcount after creation.
1362  * These sockets are freed when the namespace exits using the pernet
1363  * exit hook.
1364  */
1365 static int l2tp_tunnel_sock_create(struct net *net,
1366 				   u32 tunnel_id,
1367 				   u32 peer_tunnel_id,
1368 				   struct l2tp_tunnel_cfg *cfg,
1369 				   struct socket **sockp)
1370 {
1371 	int err = -EINVAL;
1372 	struct socket *sock = NULL;
1373 	struct udp_port_cfg udp_conf;
1374 
1375 	switch (cfg->encap) {
1376 	case L2TP_ENCAPTYPE_UDP:
1377 		memset(&udp_conf, 0, sizeof(udp_conf));
1378 
1379 #if IS_ENABLED(CONFIG_IPV6)
1380 		if (cfg->local_ip6 && cfg->peer_ip6) {
1381 			udp_conf.family = AF_INET6;
1382 			memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1383 			       sizeof(udp_conf.local_ip6));
1384 			memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1385 			       sizeof(udp_conf.peer_ip6));
1386 			udp_conf.use_udp6_tx_checksums =
1387 			  !cfg->udp6_zero_tx_checksums;
1388 			udp_conf.use_udp6_rx_checksums =
1389 			  !cfg->udp6_zero_rx_checksums;
1390 		} else
1391 #endif
1392 		{
1393 			udp_conf.family = AF_INET;
1394 			udp_conf.local_ip = cfg->local_ip;
1395 			udp_conf.peer_ip = cfg->peer_ip;
1396 			udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1397 		}
1398 
1399 		udp_conf.local_udp_port = htons(cfg->local_udp_port);
1400 		udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1401 
1402 		err = udp_sock_create(net, &udp_conf, &sock);
1403 		if (err < 0)
1404 			goto out;
1405 
1406 		break;
1407 
1408 	case L2TP_ENCAPTYPE_IP:
1409 #if IS_ENABLED(CONFIG_IPV6)
1410 		if (cfg->local_ip6 && cfg->peer_ip6) {
1411 			struct sockaddr_l2tpip6 ip6_addr = {0};
1412 
1413 			err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1414 					       IPPROTO_L2TP, &sock);
1415 			if (err < 0)
1416 				goto out;
1417 
1418 			ip6_addr.l2tp_family = AF_INET6;
1419 			memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1420 			       sizeof(ip6_addr.l2tp_addr));
1421 			ip6_addr.l2tp_conn_id = tunnel_id;
1422 			err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
1423 					  sizeof(ip6_addr));
1424 			if (err < 0)
1425 				goto out;
1426 
1427 			ip6_addr.l2tp_family = AF_INET6;
1428 			memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1429 			       sizeof(ip6_addr.l2tp_addr));
1430 			ip6_addr.l2tp_conn_id = peer_tunnel_id;
1431 			err = kernel_connect(sock,
1432 					     (struct sockaddr *)&ip6_addr,
1433 					     sizeof(ip6_addr), 0);
1434 			if (err < 0)
1435 				goto out;
1436 		} else
1437 #endif
1438 		{
1439 			struct sockaddr_l2tpip ip_addr = {0};
1440 
1441 			err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1442 					       IPPROTO_L2TP, &sock);
1443 			if (err < 0)
1444 				goto out;
1445 
1446 			ip_addr.l2tp_family = AF_INET;
1447 			ip_addr.l2tp_addr = cfg->local_ip;
1448 			ip_addr.l2tp_conn_id = tunnel_id;
1449 			err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
1450 					  sizeof(ip_addr));
1451 			if (err < 0)
1452 				goto out;
1453 
1454 			ip_addr.l2tp_family = AF_INET;
1455 			ip_addr.l2tp_addr = cfg->peer_ip;
1456 			ip_addr.l2tp_conn_id = peer_tunnel_id;
1457 			err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
1458 					     sizeof(ip_addr), 0);
1459 			if (err < 0)
1460 				goto out;
1461 		}
1462 		break;
1463 
1464 	default:
1465 		goto out;
1466 	}
1467 
1468 out:
1469 	*sockp = sock;
1470 	if (err < 0 && sock) {
1471 		kernel_sock_shutdown(sock, SHUT_RDWR);
1472 		sock_release(sock);
1473 		*sockp = NULL;
1474 	}
1475 
1476 	return err;
1477 }
1478 
1479 int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1480 		       struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1481 {
1482 	struct l2tp_tunnel *tunnel = NULL;
1483 	int err;
1484 	enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1485 
1486 	if (cfg)
1487 		encap = cfg->encap;
1488 
1489 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
1490 	if (!tunnel) {
1491 		err = -ENOMEM;
1492 		goto err;
1493 	}
1494 
1495 	tunnel->version = version;
1496 	tunnel->tunnel_id = tunnel_id;
1497 	tunnel->peer_tunnel_id = peer_tunnel_id;
1498 
1499 	tunnel->magic = L2TP_TUNNEL_MAGIC;
1500 	sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1501 	spin_lock_init(&tunnel->list_lock);
1502 	tunnel->acpt_newsess = true;
1503 	INIT_LIST_HEAD(&tunnel->session_list);
1504 
1505 	tunnel->encap = encap;
1506 
1507 	refcount_set(&tunnel->ref_count, 1);
1508 	tunnel->fd = fd;
1509 
1510 	/* Init delete workqueue struct */
1511 	INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1512 
1513 	err = 0;
1514 err:
1515 	if (tunnelp)
1516 		*tunnelp = tunnel;
1517 
1518 	return err;
1519 }
1520 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1521 
1522 static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1523 				enum l2tp_encap_type encap)
1524 {
1525 	if (!net_eq(sock_net(sk), net))
1526 		return -EINVAL;
1527 
1528 	if (sk->sk_type != SOCK_DGRAM)
1529 		return -EPROTONOSUPPORT;
1530 
1531 	if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1532 		return -EPROTONOSUPPORT;
1533 
1534 	if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1535 	    (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1536 		return -EPROTONOSUPPORT;
1537 
1538 	if (sk->sk_user_data)
1539 		return -EBUSY;
1540 
1541 	return 0;
1542 }
1543 
1544 int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1545 			 struct l2tp_tunnel_cfg *cfg)
1546 {
1547 	struct l2tp_net *pn = l2tp_pernet(net);
1548 	u32 tunnel_id = tunnel->tunnel_id;
1549 	struct socket *sock;
1550 	struct sock *sk;
1551 	int ret;
1552 
1553 	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1554 	ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
1555 			    GFP_ATOMIC);
1556 	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1557 	if (ret)
1558 		return ret == -ENOSPC ? -EEXIST : ret;
1559 
1560 	if (tunnel->fd < 0) {
1561 		ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1562 					      tunnel->peer_tunnel_id, cfg,
1563 					      &sock);
1564 		if (ret < 0)
1565 			goto err;
1566 	} else {
1567 		sock = sockfd_lookup(tunnel->fd, &ret);
1568 		if (!sock)
1569 			goto err;
1570 	}
1571 
1572 	sk = sock->sk;
1573 	lock_sock(sk);
1574 	write_lock_bh(&sk->sk_callback_lock);
1575 	ret = l2tp_validate_socket(sk, net, tunnel->encap);
1576 	if (ret < 0)
1577 		goto err_inval_sock;
1578 	rcu_assign_sk_user_data(sk, tunnel);
1579 	write_unlock_bh(&sk->sk_callback_lock);
1580 
1581 	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1582 		struct udp_tunnel_sock_cfg udp_cfg = {
1583 			.sk_user_data = tunnel,
1584 			.encap_type = UDP_ENCAP_L2TPINUDP,
1585 			.encap_rcv = l2tp_udp_encap_recv,
1586 			.encap_err_rcv = l2tp_udp_encap_err_recv,
1587 			.encap_destroy = l2tp_udp_encap_destroy,
1588 		};
1589 
1590 		setup_udp_tunnel_sock(net, sock, &udp_cfg);
1591 	}
1592 
1593 	tunnel->old_sk_destruct = sk->sk_destruct;
1594 	sk->sk_destruct = &l2tp_tunnel_destruct;
1595 	sk->sk_allocation = GFP_ATOMIC;
1596 	release_sock(sk);
1597 
1598 	sock_hold(sk);
1599 	tunnel->sock = sk;
1600 	tunnel->l2tp_net = net;
1601 
1602 	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1603 	idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
1604 	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1605 
1606 	trace_register_tunnel(tunnel);
1607 
1608 	if (tunnel->fd >= 0)
1609 		sockfd_put(sock);
1610 
1611 	return 0;
1612 
1613 err_inval_sock:
1614 	write_unlock_bh(&sk->sk_callback_lock);
1615 	release_sock(sk);
1616 
1617 	if (tunnel->fd < 0)
1618 		sock_release(sock);
1619 	else
1620 		sockfd_put(sock);
1621 err:
1622 	l2tp_tunnel_remove(net, tunnel);
1623 	return ret;
1624 }
1625 EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1626 
1627 /* This function is used by the netlink TUNNEL_DELETE command.
1628  */
1629 void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1630 {
1631 	if (!test_and_set_bit(0, &tunnel->dead)) {
1632 		trace_delete_tunnel(tunnel);
1633 		l2tp_tunnel_inc_refcount(tunnel);
1634 		queue_work(l2tp_wq, &tunnel->del_work);
1635 	}
1636 }
1637 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1638 
1639 void l2tp_session_delete(struct l2tp_session *session)
1640 {
1641 	if (test_and_set_bit(0, &session->dead))
1642 		return;
1643 
1644 	trace_delete_session(session);
1645 	l2tp_session_unhash(session);
1646 	l2tp_session_queue_purge(session);
1647 	if (session->session_close)
1648 		(*session->session_close)(session);
1649 
1650 	l2tp_session_dec_refcount(session);
1651 }
1652 EXPORT_SYMBOL_GPL(l2tp_session_delete);
1653 
1654 /* We come here whenever a session's send_seq, cookie_len or
1655  * l2specific_type parameters are set.
1656  */
1657 void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1658 {
1659 	if (version == L2TP_HDR_VER_2) {
1660 		session->hdr_len = 6;
1661 		if (session->send_seq)
1662 			session->hdr_len += 4;
1663 	} else {
1664 		session->hdr_len = 4 + session->cookie_len;
1665 		session->hdr_len += l2tp_get_l2specific_len(session);
1666 		if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1667 			session->hdr_len += 4;
1668 	}
1669 }
1670 EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1671 
1672 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1673 					 u32 peer_session_id, struct l2tp_session_cfg *cfg)
1674 {
1675 	struct l2tp_session *session;
1676 
1677 	session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1678 	if (session) {
1679 		session->magic = L2TP_SESSION_MAGIC;
1680 		session->tunnel = tunnel;
1681 
1682 		session->session_id = session_id;
1683 		session->peer_session_id = peer_session_id;
1684 		session->nr = 0;
1685 		if (tunnel->version == L2TP_HDR_VER_2)
1686 			session->nr_max = 0xffff;
1687 		else
1688 			session->nr_max = 0xffffff;
1689 		session->nr_window_size = session->nr_max / 2;
1690 		session->nr_oos_count_max = 4;
1691 
1692 		/* Use NR of first received packet */
1693 		session->reorder_skip = 1;
1694 
1695 		sprintf(&session->name[0], "sess %u/%u",
1696 			tunnel->tunnel_id, session->session_id);
1697 
1698 		skb_queue_head_init(&session->reorder_q);
1699 
1700 		session->hlist_key = l2tp_v3_session_hashkey(tunnel->sock, session->session_id);
1701 		INIT_HLIST_NODE(&session->hlist);
1702 		INIT_LIST_HEAD(&session->clist);
1703 		INIT_LIST_HEAD(&session->list);
1704 
1705 		if (cfg) {
1706 			session->pwtype = cfg->pw_type;
1707 			session->send_seq = cfg->send_seq;
1708 			session->recv_seq = cfg->recv_seq;
1709 			session->lns_mode = cfg->lns_mode;
1710 			session->reorder_timeout = cfg->reorder_timeout;
1711 			session->l2specific_type = cfg->l2specific_type;
1712 			session->cookie_len = cfg->cookie_len;
1713 			memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1714 			session->peer_cookie_len = cfg->peer_cookie_len;
1715 			memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1716 		}
1717 
1718 		l2tp_session_set_header_len(session, tunnel->version);
1719 
1720 		refcount_set(&session->ref_count, 1);
1721 
1722 		return session;
1723 	}
1724 
1725 	return ERR_PTR(-ENOMEM);
1726 }
1727 EXPORT_SYMBOL_GPL(l2tp_session_create);
1728 
1729 /*****************************************************************************
1730  * Init and cleanup
1731  *****************************************************************************/
1732 
1733 static __net_init int l2tp_init_net(struct net *net)
1734 {
1735 	struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1736 
1737 	idr_init(&pn->l2tp_tunnel_idr);
1738 	spin_lock_init(&pn->l2tp_tunnel_idr_lock);
1739 
1740 	idr_init(&pn->l2tp_v2_session_idr);
1741 	idr_init(&pn->l2tp_v3_session_idr);
1742 	spin_lock_init(&pn->l2tp_session_idr_lock);
1743 
1744 	return 0;
1745 }
1746 
1747 static __net_exit void l2tp_exit_net(struct net *net)
1748 {
1749 	struct l2tp_net *pn = l2tp_pernet(net);
1750 	struct l2tp_tunnel *tunnel = NULL;
1751 	unsigned long tunnel_id, tmp;
1752 
1753 	rcu_read_lock_bh();
1754 	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
1755 		if (tunnel)
1756 			l2tp_tunnel_delete(tunnel);
1757 	}
1758 	rcu_read_unlock_bh();
1759 
1760 	if (l2tp_wq)
1761 		flush_workqueue(l2tp_wq);
1762 	rcu_barrier();
1763 
1764 	idr_destroy(&pn->l2tp_v2_session_idr);
1765 	idr_destroy(&pn->l2tp_v3_session_idr);
1766 	idr_destroy(&pn->l2tp_tunnel_idr);
1767 }
1768 
1769 static struct pernet_operations l2tp_net_ops = {
1770 	.init = l2tp_init_net,
1771 	.exit = l2tp_exit_net,
1772 	.id   = &l2tp_net_id,
1773 	.size = sizeof(struct l2tp_net),
1774 };
1775 
1776 static int __init l2tp_init(void)
1777 {
1778 	int rc = 0;
1779 
1780 	rc = register_pernet_device(&l2tp_net_ops);
1781 	if (rc)
1782 		goto out;
1783 
1784 	l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1785 	if (!l2tp_wq) {
1786 		pr_err("alloc_workqueue failed\n");
1787 		unregister_pernet_device(&l2tp_net_ops);
1788 		rc = -ENOMEM;
1789 		goto out;
1790 	}
1791 
1792 	pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1793 
1794 out:
1795 	return rc;
1796 }
1797 
1798 static void __exit l2tp_exit(void)
1799 {
1800 	unregister_pernet_device(&l2tp_net_ops);
1801 	if (l2tp_wq) {
1802 		destroy_workqueue(l2tp_wq);
1803 		l2tp_wq = NULL;
1804 	}
1805 }
1806 
1807 module_init(l2tp_init);
1808 module_exit(l2tp_exit);
1809 
1810 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1811 MODULE_DESCRIPTION("L2TP core");
1812 MODULE_LICENSE("GPL");
1813 MODULE_VERSION(L2TP_DRV_VERSION);
1814