xref: /linux/net/ipv4/af_inet.c (revision 2c7e63d702f6c4209c5af833308e7fcbc7d4ab17)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		PF_INET protocol family socket handler.
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Florian La Roche, <flla@stud.uni-sb.de>
12  *		Alan Cox, <A.Cox@swansea.ac.uk>
13  *
14  * Changes (see also sock.c)
15  *
16  *		piggy,
17  *		Karl Knutson	:	Socket protocol table
18  *		A.N.Kuznetsov	:	Socket death error in accept().
19  *		John Richardson :	Fix non blocking error in connect()
20  *					so sockets that fail to connect
21  *					don't return -EINPROGRESS.
22  *		Alan Cox	:	Asynchronous I/O support
23  *		Alan Cox	:	Keep correct socket pointer on sock
24  *					structures
25  *					when accept() ed
26  *		Alan Cox	:	Semantics of SO_LINGER aren't state
27  *					moved to close when you look carefully.
28  *					With this fixed and the accept bug fixed
29  *					some RPC stuff seems happier.
30  *		Niibe Yutaka	:	4.4BSD style write async I/O
31  *		Alan Cox,
32  *		Tony Gale 	:	Fixed reuse semantics.
33  *		Alan Cox	:	bind() shouldn't abort existing but dead
34  *					sockets. Stops FTP netin:.. I hope.
35  *		Alan Cox	:	bind() works correctly for RAW sockets.
36  *					Note that FreeBSD at least was broken
37  *					in this respect so be careful with
38  *					compatibility tests...
39  *		Alan Cox	:	routing cache support
40  *		Alan Cox	:	memzero the socket structure for
41  *					compactness.
42  *		Matt Day	:	nonblock connect error handler
43  *		Alan Cox	:	Allow large numbers of pending sockets
44  *					(eg for big web sites), but only if
45  *					specifically application requested.
46  *		Alan Cox	:	New buffering throughout IP. Used
47  *					dumbly.
48  *		Alan Cox	:	New buffering now used smartly.
49  *		Alan Cox	:	BSD rather than common sense
50  *					interpretation of listen.
51  *		Germano Caronni	:	Assorted small races.
52  *		Alan Cox	:	sendmsg/recvmsg basic support.
53  *		Alan Cox	:	Only sendmsg/recvmsg now supported.
54  *		Alan Cox	:	Locked down bind (see security list).
55  *		Alan Cox	:	Loosened bind a little.
56  *		Mike McLagan	:	ADD/DEL DLCI Ioctls
57  *	Willy Konynenberg	:	Transparent proxying support.
58  *		David S. Miller	:	New socket lookup architecture.
59  *					Some other random speedups.
60  *		Cyrus Durgin	:	Cleaned up file for kmod hacks.
61  *		Andi Kleen	:	Fix inet_stream_connect TCP race.
62  */
63 
64 #define pr_fmt(fmt) "IPv4: " fmt
65 
66 #include <linux/err.h>
67 #include <linux/errno.h>
68 #include <linux/types.h>
69 #include <linux/socket.h>
70 #include <linux/in.h>
71 #include <linux/kernel.h>
72 #include <linux/kmod.h>
73 #include <linux/sched.h>
74 #include <linux/timer.h>
75 #include <linux/string.h>
76 #include <linux/sockios.h>
77 #include <linux/net.h>
78 #include <linux/capability.h>
79 #include <linux/fcntl.h>
80 #include <linux/mm.h>
81 #include <linux/interrupt.h>
82 #include <linux/stat.h>
83 #include <linux/init.h>
84 #include <linux/poll.h>
85 #include <linux/netfilter_ipv4.h>
86 #include <linux/random.h>
87 #include <linux/slab.h>
88 
89 #include <linux/uaccess.h>
90 
91 #include <linux/inet.h>
92 #include <linux/igmp.h>
93 #include <linux/inetdevice.h>
94 #include <linux/netdevice.h>
95 #include <net/checksum.h>
96 #include <net/ip.h>
97 #include <net/protocol.h>
98 #include <net/arp.h>
99 #include <net/route.h>
100 #include <net/ip_fib.h>
101 #include <net/inet_connection_sock.h>
102 #include <net/gro.h>
103 #include <net/gso.h>
104 #include <net/tcp.h>
105 #include <net/psp.h>
106 #include <net/udp.h>
107 #include <net/udplite.h>
108 #include <net/ping.h>
109 #include <linux/skbuff.h>
110 #include <net/sock.h>
111 #include <net/raw.h>
112 #include <net/icmp.h>
113 #include <net/inet_common.h>
114 #include <net/ip_tunnels.h>
115 #include <net/xfrm.h>
116 #include <net/net_namespace.h>
117 #include <net/secure_seq.h>
118 #ifdef CONFIG_IP_MROUTE
119 #include <linux/mroute.h>
120 #endif
121 #include <net/l3mdev.h>
122 #include <net/compat.h>
123 #include <net/rps.h>
124 
125 #include <trace/events/sock.h>
126 
127 /* Keep the definition of IPv6 disable here for now, to avoid annoying linker
128  * issues in case IPv6=m
129  */
130 int disable_ipv6_mod;
131 EXPORT_SYMBOL(disable_ipv6_mod);
132 
133 /* The inetsw table contains everything that inet_create needs to
134  * build a new socket.
135  */
136 static struct list_head inetsw[SOCK_MAX];
137 static DEFINE_SPINLOCK(inetsw_lock);
138 
139 /* New destruction routine */
140 
inet_sock_destruct(struct sock * sk)141 void inet_sock_destruct(struct sock *sk)
142 {
143 	struct inet_sock *inet = inet_sk(sk);
144 
145 	__skb_queue_purge(&sk->sk_receive_queue);
146 	__skb_queue_purge(&sk->sk_error_queue);
147 
148 	sk_mem_reclaim_final(sk);
149 
150 	if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
151 		pr_err("Attempt to release TCP socket in state %d %p\n",
152 		       sk->sk_state, sk);
153 		return;
154 	}
155 	if (!sock_flag(sk, SOCK_DEAD)) {
156 		pr_err("Attempt to release alive inet socket %p\n", sk);
157 		return;
158 	}
159 
160 	WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc));
161 	WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
162 	WARN_ON_ONCE(sk->sk_wmem_queued);
163 	WARN_ON_ONCE(sk->sk_forward_alloc);
164 
165 	kfree(rcu_dereference_protected(inet->inet_opt, 1));
166 	dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
167 	dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
168 	psp_sk_assoc_free(sk);
169 }
170 EXPORT_SYMBOL(inet_sock_destruct);
171 
172 /*
173  *	The routines beyond this point handle the behaviour of an AF_INET
174  *	socket object. Mostly it punts to the subprotocols of IP to do
175  *	the work.
176  */
177 
178 /*
179  *	Automatically bind an unbound socket.
180  */
181 
inet_autobind(struct sock * sk)182 static int inet_autobind(struct sock *sk)
183 {
184 	struct inet_sock *inet;
185 	/* We may need to bind the socket. */
186 	lock_sock(sk);
187 	inet = inet_sk(sk);
188 	if (!inet->inet_num) {
189 		if (sk->sk_prot->get_port(sk, 0)) {
190 			release_sock(sk);
191 			return -EAGAIN;
192 		}
193 		inet->inet_sport = htons(inet->inet_num);
194 	}
195 	release_sock(sk);
196 	return 0;
197 }
198 
__inet_listen_sk(struct sock * sk,int backlog)199 int __inet_listen_sk(struct sock *sk, int backlog)
200 {
201 	unsigned char old_state = sk->sk_state;
202 	int err, tcp_fastopen;
203 
204 	if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
205 		return -EINVAL;
206 
207 	WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
208 	/* Really, if the socket is already in listen state
209 	 * we can only allow the backlog to be adjusted.
210 	 */
211 	if (old_state != TCP_LISTEN) {
212 		/* Enable TFO w/o requiring TCP_FASTOPEN socket option.
213 		 * Note that only TCP sockets (SOCK_STREAM) will reach here.
214 		 * Also fastopen backlog may already been set via the option
215 		 * because the socket was in TCP_LISTEN state previously but
216 		 * was shutdown() rather than close().
217 		 */
218 		tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
219 		if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
220 		    (tcp_fastopen & TFO_SERVER_ENABLE) &&
221 		    !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
222 			fastopen_queue_tune(sk, backlog);
223 			tcp_fastopen_init_key_once(sock_net(sk));
224 		}
225 
226 		err = inet_csk_listen_start(sk);
227 		if (err)
228 			return err;
229 
230 		tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
231 	}
232 	return 0;
233 }
234 
235 /*
236  *	Move a socket into listening state.
237  */
inet_listen(struct socket * sock,int backlog)238 int inet_listen(struct socket *sock, int backlog)
239 {
240 	struct sock *sk = sock->sk;
241 	int err = -EINVAL;
242 
243 	lock_sock(sk);
244 
245 	if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
246 		goto out;
247 
248 	err = __inet_listen_sk(sk, backlog);
249 
250 out:
251 	release_sock(sk);
252 	return err;
253 }
254 EXPORT_SYMBOL(inet_listen);
255 
256 /*
257  *	Create an inet socket.
258  */
259 
inet_create(struct net * net,struct socket * sock,int protocol,int kern)260 static int inet_create(struct net *net, struct socket *sock, int protocol,
261 		       int kern)
262 {
263 	struct sock *sk;
264 	struct inet_protosw *answer;
265 	struct inet_sock *inet;
266 	struct proto *answer_prot;
267 	unsigned char answer_flags;
268 	int try_loading_module = 0;
269 	int err;
270 
271 	if (protocol < 0 || protocol >= IPPROTO_MAX)
272 		return -EINVAL;
273 
274 	sock->state = SS_UNCONNECTED;
275 
276 	/* Look for the requested type/protocol pair. */
277 lookup_protocol:
278 	err = -ESOCKTNOSUPPORT;
279 	rcu_read_lock();
280 	list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
281 
282 		err = 0;
283 		/* Check the non-wild match. */
284 		if (protocol == answer->protocol) {
285 			if (protocol != IPPROTO_IP)
286 				break;
287 		} else {
288 			/* Check for the two wild cases. */
289 			if (IPPROTO_IP == protocol) {
290 				protocol = answer->protocol;
291 				break;
292 			}
293 			if (IPPROTO_IP == answer->protocol)
294 				break;
295 		}
296 		err = -EPROTONOSUPPORT;
297 	}
298 
299 	if (unlikely(err)) {
300 		if (try_loading_module < 2) {
301 			rcu_read_unlock();
302 			/*
303 			 * Be more specific, e.g. net-pf-2-proto-132-type-1
304 			 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
305 			 */
306 			if (++try_loading_module == 1)
307 				request_module("net-pf-%d-proto-%d-type-%d",
308 					       PF_INET, protocol, sock->type);
309 			/*
310 			 * Fall back to generic, e.g. net-pf-2-proto-132
311 			 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
312 			 */
313 			else
314 				request_module("net-pf-%d-proto-%d",
315 					       PF_INET, protocol);
316 			goto lookup_protocol;
317 		} else
318 			goto out_rcu_unlock;
319 	}
320 
321 	err = -EPERM;
322 	if (sock->type == SOCK_RAW && !kern &&
323 	    !ns_capable(net->user_ns, CAP_NET_RAW))
324 		goto out_rcu_unlock;
325 
326 	sock->ops = answer->ops;
327 	answer_prot = answer->prot;
328 	answer_flags = answer->flags;
329 	rcu_read_unlock();
330 
331 	WARN_ON(!answer_prot->slab);
332 
333 	err = -ENOMEM;
334 	sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
335 	if (!sk)
336 		goto out;
337 
338 	err = 0;
339 	if (INET_PROTOSW_REUSE & answer_flags)
340 		sk->sk_reuse = SK_CAN_REUSE;
341 
342 	if (INET_PROTOSW_ICSK & answer_flags)
343 		inet_init_csk_locks(sk);
344 
345 	inet = inet_sk(sk);
346 	inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
347 
348 	inet_clear_bit(NODEFRAG, sk);
349 
350 	if (SOCK_RAW == sock->type) {
351 		inet->inet_num = protocol;
352 		if (IPPROTO_RAW == protocol)
353 			inet_set_bit(HDRINCL, sk);
354 	}
355 
356 	if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
357 		inet->pmtudisc = IP_PMTUDISC_DONT;
358 	else
359 		inet->pmtudisc = IP_PMTUDISC_WANT;
360 
361 	atomic_set(&inet->inet_id, 0);
362 
363 	sock_init_data(sock, sk);
364 
365 	sk->sk_destruct	   = inet_sock_destruct;
366 	sk->sk_protocol	   = protocol;
367 	sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
368 	sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
369 
370 	inet->uc_ttl	= -1;
371 	inet_set_bit(MC_LOOP, sk);
372 	inet->mc_ttl	= 1;
373 	inet_set_bit(MC_ALL, sk);
374 	inet->mc_index	= 0;
375 	inet->mc_list	= NULL;
376 	inet->rcv_tos	= 0;
377 
378 	if (inet->inet_num) {
379 		/* It assumes that any protocol which allows
380 		 * the user to assign a number at socket
381 		 * creation time automatically
382 		 * shares.
383 		 */
384 		inet->inet_sport = htons(inet->inet_num);
385 		/* Add to protocol hash chains. */
386 		err = sk->sk_prot->hash(sk);
387 		if (err)
388 			goto out_sk_release;
389 	}
390 
391 	if (sk->sk_prot->init) {
392 		err = sk->sk_prot->init(sk);
393 		if (err)
394 			goto out_sk_release;
395 	}
396 
397 	if (!kern) {
398 		err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
399 		if (err)
400 			goto out_sk_release;
401 	}
402 out:
403 	return err;
404 out_rcu_unlock:
405 	rcu_read_unlock();
406 	goto out;
407 out_sk_release:
408 	sk_common_release(sk);
409 	sock->sk = NULL;
410 	goto out;
411 }
412 
413 
414 /*
415  *	The peer socket should always be NULL (or else). When we call this
416  *	function we are destroying the object and from then on nobody
417  *	should refer to it.
418  */
inet_release(struct socket * sock)419 int inet_release(struct socket *sock)
420 {
421 	struct sock *sk = sock->sk;
422 
423 	if (sk) {
424 		long timeout;
425 
426 		if (!sk->sk_kern_sock)
427 			BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk);
428 
429 		/* Applications forget to leave groups before exiting */
430 		ip_mc_drop_socket(sk);
431 
432 		/* If linger is set, we don't return until the close
433 		 * is complete.  Otherwise we return immediately. The
434 		 * actually closing is done the same either way.
435 		 *
436 		 * If the close is due to the process exiting, we never
437 		 * linger..
438 		 */
439 		timeout = 0;
440 		if (sock_flag(sk, SOCK_LINGER) &&
441 		    !(current->flags & PF_EXITING))
442 			timeout = sk->sk_lingertime;
443 		sk->sk_prot->close(sk, timeout);
444 		sock->sk = NULL;
445 	}
446 	return 0;
447 }
448 EXPORT_SYMBOL(inet_release);
449 
inet_bind_sk(struct sock * sk,struct sockaddr_unsized * uaddr,int addr_len)450 int inet_bind_sk(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len)
451 {
452 	u32 flags = BIND_WITH_LOCK;
453 	int err;
454 
455 	/* If the socket has its own bind function then use it. (RAW) */
456 	if (sk->sk_prot->bind) {
457 		return sk->sk_prot->bind(sk, uaddr, addr_len);
458 	}
459 	if (addr_len < sizeof(struct sockaddr_in))
460 		return -EINVAL;
461 
462 	/* BPF prog is run before any checks are done so that if the prog
463 	 * changes context in a wrong way it will be caught.
464 	 */
465 	err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, &addr_len,
466 						 CGROUP_INET4_BIND, &flags);
467 	if (err)
468 		return err;
469 
470 	return __inet_bind(sk, uaddr, addr_len, flags);
471 }
472 
inet_bind(struct socket * sock,struct sockaddr_unsized * uaddr,int addr_len)473 int inet_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len)
474 {
475 	return inet_bind_sk(sock->sk, uaddr, addr_len);
476 }
477 EXPORT_SYMBOL(inet_bind);
478 
__inet_bind(struct sock * sk,struct sockaddr_unsized * uaddr,int addr_len,u32 flags)479 int __inet_bind(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len,
480 		u32 flags)
481 {
482 	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
483 	struct inet_sock *inet = inet_sk(sk);
484 	struct net *net = sock_net(sk);
485 	unsigned short snum;
486 	int chk_addr_ret;
487 	u32 tb_id = RT_TABLE_LOCAL;
488 	int err;
489 
490 	if (addr->sin_family != AF_INET) {
491 		/* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
492 		 * only if s_addr is INADDR_ANY.
493 		 */
494 		err = -EAFNOSUPPORT;
495 		if (addr->sin_family != AF_UNSPEC ||
496 		    addr->sin_addr.s_addr != htonl(INADDR_ANY))
497 			goto out;
498 	}
499 
500 	tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
501 	chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
502 
503 	/* Not specified by any standard per-se, however it breaks too
504 	 * many applications when removed.  It is unfortunate since
505 	 * allowing applications to make a non-local bind solves
506 	 * several problems with systems using dynamic addressing.
507 	 * (ie. your servers still start up even if your ISDN link
508 	 *  is temporarily down)
509 	 */
510 	err = -EADDRNOTAVAIL;
511 	if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr,
512 	                                 chk_addr_ret))
513 		goto out;
514 
515 	snum = ntohs(addr->sin_port);
516 	err = -EACCES;
517 	if (!(flags & BIND_NO_CAP_NET_BIND_SERVICE) &&
518 	    snum && inet_port_requires_bind_service(net, snum) &&
519 	    !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
520 		goto out;
521 
522 	/*      We keep a pair of addresses. rcv_saddr is the one
523 	 *      used by hash lookups, and saddr is used for transmit.
524 	 *
525 	 *      In the BSD API these are the same except where it
526 	 *      would be illegal to use them (multicast/broadcast) in
527 	 *      which case the sending device address is used.
528 	 */
529 	if (flags & BIND_WITH_LOCK)
530 		lock_sock(sk);
531 
532 	/* Check these errors (active socket, double bind). */
533 	err = -EINVAL;
534 	if (sk->sk_state != TCP_CLOSE || inet->inet_num)
535 		goto out_release_sock;
536 
537 	inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
538 	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
539 		inet->inet_saddr = 0;  /* Use device */
540 
541 	/* Make sure we are allowed to bind here. */
542 	if (snum || !(inet_test_bit(BIND_ADDRESS_NO_PORT, sk) ||
543 		      (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
544 		err = sk->sk_prot->get_port(sk, snum);
545 		if (err) {
546 			inet->inet_saddr = inet->inet_rcv_saddr = 0;
547 			goto out_release_sock;
548 		}
549 		if (!(flags & BIND_FROM_BPF)) {
550 			err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
551 			if (err) {
552 				inet->inet_saddr = inet->inet_rcv_saddr = 0;
553 				if (sk->sk_prot->put_port)
554 					sk->sk_prot->put_port(sk);
555 				goto out_release_sock;
556 			}
557 		}
558 	}
559 
560 	if (inet->inet_rcv_saddr)
561 		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
562 	if (snum)
563 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
564 	inet->inet_sport = htons(inet->inet_num);
565 	inet->inet_daddr = 0;
566 	inet->inet_dport = 0;
567 	sk_dst_reset(sk);
568 	err = 0;
569 out_release_sock:
570 	if (flags & BIND_WITH_LOCK)
571 		release_sock(sk);
572 out:
573 	return err;
574 }
575 
inet_dgram_connect(struct socket * sock,struct sockaddr_unsized * uaddr,int addr_len,int flags)576 int inet_dgram_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
577 		       int addr_len, int flags)
578 {
579 	struct sock *sk = sock->sk;
580 	const struct proto *prot;
581 	int err;
582 
583 	if (addr_len < sizeof(uaddr->sa_family))
584 		return -EINVAL;
585 
586 	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
587 	prot = READ_ONCE(sk->sk_prot);
588 
589 	if (uaddr->sa_family == AF_UNSPEC)
590 		return prot->disconnect(sk, flags);
591 
592 	if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
593 		err = prot->pre_connect(sk, uaddr, addr_len);
594 		if (err)
595 			return err;
596 	}
597 
598 	if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
599 		return -EAGAIN;
600 	return prot->connect(sk, uaddr, addr_len);
601 }
602 EXPORT_SYMBOL(inet_dgram_connect);
603 
inet_wait_for_connect(struct sock * sk,long timeo,int writebias)604 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
605 {
606 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
607 
608 	add_wait_queue(sk_sleep(sk), &wait);
609 	sk->sk_write_pending += writebias;
610 
611 	/* Basic assumption: if someone sets sk->sk_err, he _must_
612 	 * change state of the socket from TCP_SYN_*.
613 	 * Connect() does not allow to get error notifications
614 	 * without closing the socket.
615 	 */
616 	while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
617 		release_sock(sk);
618 		timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
619 		lock_sock(sk);
620 		if (signal_pending(current) || !timeo)
621 			break;
622 	}
623 	remove_wait_queue(sk_sleep(sk), &wait);
624 	sk->sk_write_pending -= writebias;
625 	return timeo;
626 }
627 
628 /*
629  *	Connect to a remote host. There is regrettably still a little
630  *	TCP 'magic' in here.
631  */
__inet_stream_connect(struct socket * sock,struct sockaddr_unsized * uaddr,int addr_len,int flags,int is_sendmsg)632 int __inet_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
633 			  int addr_len, int flags, int is_sendmsg)
634 {
635 	struct sock *sk = sock->sk;
636 	int err;
637 	long timeo;
638 
639 	/*
640 	 * uaddr can be NULL and addr_len can be 0 if:
641 	 * sk is a TCP fastopen active socket and
642 	 * TCP_FASTOPEN_CONNECT sockopt is set and
643 	 * we already have a valid cookie for this socket.
644 	 * In this case, user can call write() after connect().
645 	 * write() will invoke tcp_sendmsg_fastopen() which calls
646 	 * __inet_stream_connect().
647 	 */
648 	if (uaddr) {
649 		if (addr_len < sizeof(uaddr->sa_family))
650 			return -EINVAL;
651 
652 		if (uaddr->sa_family == AF_UNSPEC) {
653 			sk->sk_disconnects++;
654 			err = sk->sk_prot->disconnect(sk, flags);
655 			sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
656 			goto out;
657 		}
658 	}
659 
660 	switch (sock->state) {
661 	default:
662 		err = -EINVAL;
663 		goto out;
664 	case SS_CONNECTED:
665 		err = -EISCONN;
666 		goto out;
667 	case SS_CONNECTING:
668 		if (inet_test_bit(DEFER_CONNECT, sk))
669 			err = is_sendmsg ? -EINPROGRESS : -EISCONN;
670 		else
671 			err = -EALREADY;
672 		/* Fall out of switch with err, set for this state */
673 		break;
674 	case SS_UNCONNECTED:
675 		err = -EISCONN;
676 		if (sk->sk_state != TCP_CLOSE)
677 			goto out;
678 
679 		if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
680 			err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
681 			if (err)
682 				goto out;
683 		}
684 
685 		err = sk->sk_prot->connect(sk, uaddr, addr_len);
686 		if (err < 0)
687 			goto out;
688 
689 		sock->state = SS_CONNECTING;
690 
691 		if (!err && inet_test_bit(DEFER_CONNECT, sk))
692 			goto out;
693 
694 		/* Just entered SS_CONNECTING state; the only
695 		 * difference is that return value in non-blocking
696 		 * case is EINPROGRESS, rather than EALREADY.
697 		 */
698 		err = -EINPROGRESS;
699 		break;
700 	}
701 
702 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
703 
704 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
705 		int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
706 				tcp_sk(sk)->fastopen_req &&
707 				tcp_sk(sk)->fastopen_req->data ? 1 : 0;
708 		int dis = sk->sk_disconnects;
709 
710 		/* Error code is set above */
711 		if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
712 			goto out;
713 
714 		err = sock_intr_errno(timeo);
715 		if (signal_pending(current))
716 			goto out;
717 
718 		if (dis != sk->sk_disconnects) {
719 			err = -EPIPE;
720 			goto out;
721 		}
722 	}
723 
724 	/* Connection was closed by RST, timeout, ICMP error
725 	 * or another process disconnected us.
726 	 */
727 	if (sk->sk_state == TCP_CLOSE)
728 		goto sock_error;
729 
730 	/* sk->sk_err may be not zero now, if RECVERR was ordered by user
731 	 * and error was received after socket entered established state.
732 	 * Hence, it is handled normally after connect() return successfully.
733 	 */
734 
735 	sock->state = SS_CONNECTED;
736 	err = 0;
737 out:
738 	return err;
739 
740 sock_error:
741 	err = sock_error(sk) ? : -ECONNABORTED;
742 	sock->state = SS_UNCONNECTED;
743 	sk->sk_disconnects++;
744 	if (sk->sk_prot->disconnect(sk, flags))
745 		sock->state = SS_DISCONNECTING;
746 	goto out;
747 }
748 EXPORT_SYMBOL(__inet_stream_connect);
749 
inet_stream_connect(struct socket * sock,struct sockaddr_unsized * uaddr,int addr_len,int flags)750 int inet_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
751 			int addr_len, int flags)
752 {
753 	int err;
754 
755 	lock_sock(sock->sk);
756 	err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
757 	release_sock(sock->sk);
758 	return err;
759 }
760 EXPORT_SYMBOL(inet_stream_connect);
761 
__inet_accept(struct socket * sock,struct socket * newsock,struct sock * newsk)762 void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk)
763 {
764 	if (mem_cgroup_sockets_enabled) {
765 		mem_cgroup_sk_alloc(newsk);
766 		__sk_charge(newsk, GFP_KERNEL);
767 	}
768 
769 	sock_rps_record_flow(newsk);
770 	WARN_ON(!((1 << newsk->sk_state) &
771 		  (TCPF_ESTABLISHED | TCPF_SYN_RECV |
772 		   TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 |
773 		   TCPF_CLOSING | TCPF_CLOSE_WAIT |
774 		   TCPF_CLOSE)));
775 
776 	if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
777 		set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
778 	sock_graft(newsk, newsock);
779 
780 	newsock->state = SS_CONNECTED;
781 }
782 EXPORT_SYMBOL_GPL(__inet_accept);
783 
784 /*
785  *	Accept a pending connection. The TCP layer now gives BSD semantics.
786  */
787 
inet_accept(struct socket * sock,struct socket * newsock,struct proto_accept_arg * arg)788 int inet_accept(struct socket *sock, struct socket *newsock,
789 		struct proto_accept_arg *arg)
790 {
791 	struct sock *sk1 = sock->sk, *sk2;
792 
793 	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
794 	arg->err = -EINVAL;
795 	sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, arg);
796 	if (!sk2)
797 		return arg->err;
798 
799 	lock_sock(sk2);
800 	__inet_accept(sock, newsock, sk2);
801 	release_sock(sk2);
802 	return 0;
803 }
804 EXPORT_SYMBOL(inet_accept);
805 
806 /*
807  *	This does both peername and sockname.
808  */
inet_getname(struct socket * sock,struct sockaddr * uaddr,int peer)809 int inet_getname(struct socket *sock, struct sockaddr *uaddr,
810 		 int peer)
811 {
812 	struct sock *sk		= sock->sk;
813 	struct inet_sock *inet	= inet_sk(sk);
814 	DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
815 	int sin_addr_len = sizeof(*sin);
816 
817 	sin->sin_family = AF_INET;
818 	lock_sock(sk);
819 	if (peer) {
820 		if (!inet->inet_dport ||
821 		    (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
822 		     peer == 1)) {
823 			release_sock(sk);
824 			return -ENOTCONN;
825 		}
826 		sin->sin_port = inet->inet_dport;
827 		sin->sin_addr.s_addr = inet->inet_daddr;
828 		BPF_CGROUP_RUN_SA_PROG(sk, sin, &sin_addr_len,
829 				       CGROUP_INET4_GETPEERNAME);
830 	} else {
831 		__be32 addr = inet->inet_rcv_saddr;
832 		if (!addr)
833 			addr = inet->inet_saddr;
834 		sin->sin_port = inet->inet_sport;
835 		sin->sin_addr.s_addr = addr;
836 		BPF_CGROUP_RUN_SA_PROG(sk, sin, &sin_addr_len,
837 				       CGROUP_INET4_GETSOCKNAME);
838 	}
839 	release_sock(sk);
840 	memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
841 	return sin_addr_len;
842 }
843 EXPORT_SYMBOL(inet_getname);
844 
inet_send_prepare(struct sock * sk)845 int inet_send_prepare(struct sock *sk)
846 {
847 	sock_rps_record_flow(sk);
848 
849 	/* We may need to bind the socket. */
850 	if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
851 	    inet_autobind(sk))
852 		return -EAGAIN;
853 
854 	return 0;
855 }
856 EXPORT_SYMBOL_GPL(inet_send_prepare);
857 
inet_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)858 int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
859 {
860 	struct sock *sk = sock->sk;
861 
862 	if (unlikely(inet_send_prepare(sk)))
863 		return -EAGAIN;
864 
865 	return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udp_sendmsg,
866 			       sk, msg, size);
867 }
868 EXPORT_SYMBOL(inet_sendmsg);
869 
inet_splice_eof(struct socket * sock)870 void inet_splice_eof(struct socket *sock)
871 {
872 	const struct proto *prot;
873 	struct sock *sk = sock->sk;
874 
875 	if (unlikely(inet_send_prepare(sk)))
876 		return;
877 
878 	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
879 	prot = READ_ONCE(sk->sk_prot);
880 	if (prot->splice_eof)
881 		prot->splice_eof(sock);
882 }
883 EXPORT_SYMBOL_GPL(inet_splice_eof);
884 
885 INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *,
886 					  size_t, int, int *));
inet_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)887 int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
888 		 int flags)
889 {
890 	struct sock *sk = sock->sk;
891 	int addr_len = 0;
892 	int err;
893 
894 	if (likely(!(flags & MSG_ERRQUEUE)))
895 		sock_rps_record_flow(sk);
896 
897 	err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udp_recvmsg,
898 			      sk, msg, size, flags, &addr_len);
899 	if (err >= 0)
900 		msg->msg_namelen = addr_len;
901 	return err;
902 }
903 EXPORT_SYMBOL(inet_recvmsg);
904 
inet_shutdown(struct socket * sock,int how)905 int inet_shutdown(struct socket *sock, int how)
906 {
907 	struct sock *sk = sock->sk;
908 	int err = 0;
909 
910 	/* This should really check to make sure
911 	 * the socket is a TCP socket. (WHY AC...)
912 	 */
913 	how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
914 		       1->2 bit 2 snds.
915 		       2->3 */
916 	if ((how & ~SHUTDOWN_MASK) || !how)	/* MAXINT->0 */
917 		return -EINVAL;
918 
919 	lock_sock(sk);
920 	if (sock->state == SS_CONNECTING) {
921 		if ((1 << sk->sk_state) &
922 		    (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
923 			sock->state = SS_DISCONNECTING;
924 		else
925 			sock->state = SS_CONNECTED;
926 	}
927 
928 	switch (sk->sk_state) {
929 	case TCP_CLOSE:
930 		err = -ENOTCONN;
931 		/* Hack to wake up other listeners, who can poll for
932 		   EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
933 		fallthrough;
934 	default:
935 		WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how);
936 		if (sk->sk_prot->shutdown)
937 			sk->sk_prot->shutdown(sk, how);
938 		break;
939 
940 	/* Remaining two branches are temporary solution for missing
941 	 * close() in multithreaded environment. It is _not_ a good idea,
942 	 * but we have no choice until close() is repaired at VFS level.
943 	 */
944 	case TCP_LISTEN:
945 		if (!(how & RCV_SHUTDOWN))
946 			break;
947 		fallthrough;
948 	case TCP_SYN_SENT:
949 		err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
950 		sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
951 		break;
952 	}
953 
954 	/* Wake up anyone sleeping in poll. */
955 	sk->sk_state_change(sk);
956 	release_sock(sk);
957 	return err;
958 }
959 EXPORT_SYMBOL(inet_shutdown);
960 
961 /*
962  *	ioctl() calls you can issue on an INET socket. Most of these are
963  *	device configuration and stuff and very rarely used. Some ioctls
964  *	pass on to the socket itself.
965  *
966  *	NOTE: I like the idea of a module for the config stuff. ie ifconfig
967  *	loads the devconfigure module does its configuring and unloads it.
968  *	There's a good 20K of config code hanging around the kernel.
969  */
970 
inet_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)971 int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
972 {
973 	struct sock *sk = sock->sk;
974 	int err = 0;
975 	struct net *net = sock_net(sk);
976 	void __user *p = (void __user *)arg;
977 	struct ifreq ifr;
978 	struct rtentry rt;
979 
980 	switch (cmd) {
981 	case SIOCADDRT:
982 	case SIOCDELRT:
983 		if (copy_from_user(&rt, p, sizeof(struct rtentry)))
984 			return -EFAULT;
985 		err = ip_rt_ioctl(net, cmd, &rt);
986 		break;
987 	case SIOCRTMSG:
988 		err = -EINVAL;
989 		break;
990 	case SIOCDARP:
991 	case SIOCGARP:
992 	case SIOCSARP:
993 		err = arp_ioctl(net, cmd, (void __user *)arg);
994 		break;
995 	case SIOCGIFADDR:
996 	case SIOCGIFBRDADDR:
997 	case SIOCGIFNETMASK:
998 	case SIOCGIFDSTADDR:
999 	case SIOCGIFPFLAGS:
1000 		if (get_user_ifreq(&ifr, NULL, p))
1001 			return -EFAULT;
1002 		err = devinet_ioctl(net, cmd, &ifr);
1003 		if (!err && put_user_ifreq(&ifr, p))
1004 			err = -EFAULT;
1005 		break;
1006 
1007 	case SIOCSIFADDR:
1008 	case SIOCSIFBRDADDR:
1009 	case SIOCSIFNETMASK:
1010 	case SIOCSIFDSTADDR:
1011 	case SIOCSIFPFLAGS:
1012 	case SIOCSIFFLAGS:
1013 		if (get_user_ifreq(&ifr, NULL, p))
1014 			return -EFAULT;
1015 		err = devinet_ioctl(net, cmd, &ifr);
1016 		break;
1017 	default:
1018 		if (sk->sk_prot->ioctl)
1019 			err = sk_ioctl(sk, cmd, (void __user *)arg);
1020 		else
1021 			err = -ENOIOCTLCMD;
1022 		break;
1023 	}
1024 	return err;
1025 }
1026 EXPORT_SYMBOL(inet_ioctl);
1027 
1028 #ifdef CONFIG_COMPAT
inet_compat_routing_ioctl(struct sock * sk,unsigned int cmd,struct compat_rtentry __user * ur)1029 static int inet_compat_routing_ioctl(struct sock *sk, unsigned int cmd,
1030 		struct compat_rtentry __user *ur)
1031 {
1032 	compat_uptr_t rtdev;
1033 	struct rtentry rt;
1034 
1035 	if (copy_from_user(&rt.rt_dst, &ur->rt_dst,
1036 			3 * sizeof(struct sockaddr)) ||
1037 	    get_user(rt.rt_flags, &ur->rt_flags) ||
1038 	    get_user(rt.rt_metric, &ur->rt_metric) ||
1039 	    get_user(rt.rt_mtu, &ur->rt_mtu) ||
1040 	    get_user(rt.rt_window, &ur->rt_window) ||
1041 	    get_user(rt.rt_irtt, &ur->rt_irtt) ||
1042 	    get_user(rtdev, &ur->rt_dev))
1043 		return -EFAULT;
1044 
1045 	rt.rt_dev = compat_ptr(rtdev);
1046 	return ip_rt_ioctl(sock_net(sk), cmd, &rt);
1047 }
1048 
inet_compat_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1049 static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1050 {
1051 	void __user *argp = compat_ptr(arg);
1052 	struct sock *sk = sock->sk;
1053 
1054 	switch (cmd) {
1055 	case SIOCADDRT:
1056 	case SIOCDELRT:
1057 		return inet_compat_routing_ioctl(sk, cmd, argp);
1058 	default:
1059 		if (!sk->sk_prot->compat_ioctl)
1060 			return -ENOIOCTLCMD;
1061 		return sk->sk_prot->compat_ioctl(sk, cmd, arg);
1062 	}
1063 }
1064 #endif /* CONFIG_COMPAT */
1065 
1066 const struct proto_ops inet_stream_ops = {
1067 	.family		   = PF_INET,
1068 	.owner		   = THIS_MODULE,
1069 	.release	   = inet_release,
1070 	.bind		   = inet_bind,
1071 	.connect	   = inet_stream_connect,
1072 	.socketpair	   = sock_no_socketpair,
1073 	.accept		   = inet_accept,
1074 	.getname	   = inet_getname,
1075 	.poll		   = tcp_poll,
1076 	.ioctl		   = inet_ioctl,
1077 	.gettstamp	   = sock_gettstamp,
1078 	.listen		   = inet_listen,
1079 	.shutdown	   = inet_shutdown,
1080 	.setsockopt	   = sock_common_setsockopt,
1081 	.getsockopt	   = sock_common_getsockopt,
1082 	.sendmsg	   = inet_sendmsg,
1083 	.recvmsg	   = inet_recvmsg,
1084 #ifdef CONFIG_MMU
1085 	.mmap		   = tcp_mmap,
1086 #endif
1087 	.splice_eof	   = inet_splice_eof,
1088 	.splice_read	   = tcp_splice_read,
1089 	.set_peek_off      = sk_set_peek_off,
1090 	.read_sock	   = tcp_read_sock,
1091 	.read_skb	   = tcp_read_skb,
1092 	.sendmsg_locked    = tcp_sendmsg_locked,
1093 	.peek_len	   = tcp_peek_len,
1094 #ifdef CONFIG_COMPAT
1095 	.compat_ioctl	   = inet_compat_ioctl,
1096 #endif
1097 	.set_rcvlowat	   = tcp_set_rcvlowat,
1098 };
1099 EXPORT_SYMBOL(inet_stream_ops);
1100 
1101 const struct proto_ops inet_dgram_ops = {
1102 	.family		   = PF_INET,
1103 	.owner		   = THIS_MODULE,
1104 	.release	   = inet_release,
1105 	.bind		   = inet_bind,
1106 	.connect	   = inet_dgram_connect,
1107 	.socketpair	   = sock_no_socketpair,
1108 	.accept		   = sock_no_accept,
1109 	.getname	   = inet_getname,
1110 	.poll		   = udp_poll,
1111 	.ioctl		   = inet_ioctl,
1112 	.gettstamp	   = sock_gettstamp,
1113 	.listen		   = sock_no_listen,
1114 	.shutdown	   = inet_shutdown,
1115 	.setsockopt	   = sock_common_setsockopt,
1116 	.getsockopt	   = sock_common_getsockopt,
1117 	.sendmsg	   = inet_sendmsg,
1118 	.read_skb	   = udp_read_skb,
1119 	.recvmsg	   = inet_recvmsg,
1120 	.mmap		   = sock_no_mmap,
1121 	.splice_eof	   = inet_splice_eof,
1122 	.set_peek_off	   = udp_set_peek_off,
1123 #ifdef CONFIG_COMPAT
1124 	.compat_ioctl	   = inet_compat_ioctl,
1125 #endif
1126 };
1127 EXPORT_SYMBOL(inet_dgram_ops);
1128 
1129 /*
1130  * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
1131  * udp_poll
1132  */
1133 static const struct proto_ops inet_sockraw_ops = {
1134 	.family		   = PF_INET,
1135 	.owner		   = THIS_MODULE,
1136 	.release	   = inet_release,
1137 	.bind		   = inet_bind,
1138 	.connect	   = inet_dgram_connect,
1139 	.socketpair	   = sock_no_socketpair,
1140 	.accept		   = sock_no_accept,
1141 	.getname	   = inet_getname,
1142 	.poll		   = datagram_poll,
1143 	.ioctl		   = inet_ioctl,
1144 	.gettstamp	   = sock_gettstamp,
1145 	.listen		   = sock_no_listen,
1146 	.shutdown	   = inet_shutdown,
1147 	.setsockopt	   = sock_common_setsockopt,
1148 	.getsockopt	   = sock_common_getsockopt,
1149 	.sendmsg	   = inet_sendmsg,
1150 	.recvmsg	   = inet_recvmsg,
1151 	.mmap		   = sock_no_mmap,
1152 	.splice_eof	   = inet_splice_eof,
1153 #ifdef CONFIG_COMPAT
1154 	.compat_ioctl	   = inet_compat_ioctl,
1155 #endif
1156 };
1157 
1158 static const struct net_proto_family inet_family_ops = {
1159 	.family = PF_INET,
1160 	.create = inet_create,
1161 	.owner	= THIS_MODULE,
1162 };
1163 
1164 /* Upon startup we insert all the elements in inetsw_array[] into
1165  * the linked list inetsw.
1166  */
1167 static struct inet_protosw inetsw_array[] =
1168 {
1169 	{
1170 		.type =       SOCK_STREAM,
1171 		.protocol =   IPPROTO_TCP,
1172 		.prot =       &tcp_prot,
1173 		.ops =        &inet_stream_ops,
1174 		.flags =      INET_PROTOSW_PERMANENT |
1175 			      INET_PROTOSW_ICSK,
1176 	},
1177 
1178 	{
1179 		.type =       SOCK_DGRAM,
1180 		.protocol =   IPPROTO_UDP,
1181 		.prot =       &udp_prot,
1182 		.ops =        &inet_dgram_ops,
1183 		.flags =      INET_PROTOSW_PERMANENT,
1184        },
1185 
1186        {
1187 		.type =       SOCK_DGRAM,
1188 		.protocol =   IPPROTO_ICMP,
1189 		.prot =       &ping_prot,
1190 		.ops =        &inet_sockraw_ops,
1191 		.flags =      INET_PROTOSW_REUSE,
1192        },
1193 
1194        {
1195 	       .type =       SOCK_RAW,
1196 	       .protocol =   IPPROTO_IP,	/* wild card */
1197 	       .prot =       &raw_prot,
1198 	       .ops =        &inet_sockraw_ops,
1199 	       .flags =      INET_PROTOSW_REUSE,
1200        }
1201 };
1202 
1203 #define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
1204 
inet_register_protosw(struct inet_protosw * p)1205 void inet_register_protosw(struct inet_protosw *p)
1206 {
1207 	struct list_head *lh;
1208 	struct inet_protosw *answer;
1209 	int protocol = p->protocol;
1210 	struct list_head *last_perm;
1211 
1212 	spin_lock_bh(&inetsw_lock);
1213 
1214 	if (p->type >= SOCK_MAX)
1215 		goto out_illegal;
1216 
1217 	/* If we are trying to override a permanent protocol, bail. */
1218 	last_perm = &inetsw[p->type];
1219 	list_for_each(lh, &inetsw[p->type]) {
1220 		answer = list_entry(lh, struct inet_protosw, list);
1221 		/* Check only the non-wild match. */
1222 		if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
1223 			break;
1224 		if (protocol == answer->protocol)
1225 			goto out_permanent;
1226 		last_perm = lh;
1227 	}
1228 
1229 	/* Add the new entry after the last permanent entry if any, so that
1230 	 * the new entry does not override a permanent entry when matched with
1231 	 * a wild-card protocol. But it is allowed to override any existing
1232 	 * non-permanent entry.  This means that when we remove this entry, the
1233 	 * system automatically returns to the old behavior.
1234 	 */
1235 	list_add_rcu(&p->list, last_perm);
1236 out:
1237 	spin_unlock_bh(&inetsw_lock);
1238 
1239 	return;
1240 
1241 out_permanent:
1242 	pr_err("Attempt to override permanent protocol %d\n", protocol);
1243 	goto out;
1244 
1245 out_illegal:
1246 	pr_err("Ignoring attempt to register invalid socket type %d\n",
1247 	       p->type);
1248 	goto out;
1249 }
1250 EXPORT_SYMBOL(inet_register_protosw);
1251 
inet_unregister_protosw(struct inet_protosw * p)1252 void inet_unregister_protosw(struct inet_protosw *p)
1253 {
1254 	if (INET_PROTOSW_PERMANENT & p->flags) {
1255 		pr_err("Attempt to unregister permanent protocol %d\n",
1256 		       p->protocol);
1257 	} else {
1258 		spin_lock_bh(&inetsw_lock);
1259 		list_del_rcu(&p->list);
1260 		spin_unlock_bh(&inetsw_lock);
1261 
1262 		synchronize_net();
1263 	}
1264 }
1265 EXPORT_SYMBOL(inet_unregister_protosw);
1266 
inet_sk_reselect_saddr(struct sock * sk)1267 static int inet_sk_reselect_saddr(struct sock *sk)
1268 {
1269 	struct inet_sock *inet = inet_sk(sk);
1270 	__be32 old_saddr = inet->inet_saddr;
1271 	__be32 daddr = inet->inet_daddr;
1272 	struct flowi4 *fl4;
1273 	struct rtable *rt;
1274 	__be32 new_saddr;
1275 	struct ip_options_rcu *inet_opt;
1276 	int err;
1277 
1278 	inet_opt = rcu_dereference_protected(inet->inet_opt,
1279 					     lockdep_sock_is_held(sk));
1280 	if (inet_opt && inet_opt->opt.srr)
1281 		daddr = inet_opt->opt.faddr;
1282 
1283 	/* Query new route. */
1284 	fl4 = &inet->cork.fl.u.ip4;
1285 	rt = ip_route_connect(fl4, daddr, 0, sk->sk_bound_dev_if,
1286 			      sk->sk_protocol, inet->inet_sport,
1287 			      inet->inet_dport, sk);
1288 	if (IS_ERR(rt))
1289 		return PTR_ERR(rt);
1290 
1291 	new_saddr = fl4->saddr;
1292 
1293 	if (new_saddr == old_saddr) {
1294 		sk_setup_caps(sk, &rt->dst);
1295 		return 0;
1296 	}
1297 
1298 	err = inet_bhash2_update_saddr(sk, &new_saddr, AF_INET);
1299 	if (err) {
1300 		ip_rt_put(rt);
1301 		return err;
1302 	}
1303 
1304 	sk_setup_caps(sk, &rt->dst);
1305 
1306 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) {
1307 		pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1308 			__func__, &old_saddr, &new_saddr);
1309 	}
1310 
1311 	/*
1312 	 * XXX The only one ugly spot where we need to
1313 	 * XXX really change the sockets identity after
1314 	 * XXX it has entered the hashes. -DaveM
1315 	 *
1316 	 * Besides that, it does not check for connection
1317 	 * uniqueness. Wait for troubles.
1318 	 */
1319 	return __sk_prot_rehash(sk);
1320 }
1321 
inet_sk_rebuild_header(struct sock * sk)1322 int inet_sk_rebuild_header(struct sock *sk)
1323 {
1324 	struct rtable *rt = dst_rtable(__sk_dst_check(sk, 0));
1325 	struct inet_sock *inet = inet_sk(sk);
1326 	struct flowi4 *fl4;
1327 	int err;
1328 
1329 	/* Route is OK, nothing to do. */
1330 	if (rt)
1331 		return 0;
1332 
1333 	/* Reroute. */
1334 	fl4 = &inet->cork.fl.u.ip4;
1335 	inet_sk_init_flowi4(inet, fl4);
1336 	rt = ip_route_output_flow(sock_net(sk), fl4, sk);
1337 	if (!IS_ERR(rt)) {
1338 		err = 0;
1339 		sk_setup_caps(sk, &rt->dst);
1340 	} else {
1341 		err = PTR_ERR(rt);
1342 
1343 		/* Routing failed... */
1344 		sk->sk_route_caps = 0;
1345 
1346 		if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) ||
1347 		    sk->sk_state != TCP_SYN_SENT ||
1348 		    (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1349 		    (err = inet_sk_reselect_saddr(sk)) != 0)
1350 			WRITE_ONCE(sk->sk_err_soft, -err);
1351 	}
1352 
1353 	return err;
1354 }
1355 EXPORT_SYMBOL(inet_sk_rebuild_header);
1356 
inet_sk_set_state(struct sock * sk,int state)1357 void inet_sk_set_state(struct sock *sk, int state)
1358 {
1359 	trace_inet_sock_set_state(sk, sk->sk_state, state);
1360 	sk->sk_state = state;
1361 }
1362 EXPORT_SYMBOL(inet_sk_set_state);
1363 
inet_sk_state_store(struct sock * sk,int newstate)1364 void inet_sk_state_store(struct sock *sk, int newstate)
1365 {
1366 	trace_inet_sock_set_state(sk, sk->sk_state, newstate);
1367 	smp_store_release(&sk->sk_state, newstate);
1368 }
1369 
inet_gso_segment(struct sk_buff * skb,netdev_features_t features)1370 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1371 				 netdev_features_t features)
1372 {
1373 	bool udpfrag = false, fixedid = false, gso_partial, encap;
1374 	struct sk_buff *segs = ERR_PTR(-EINVAL);
1375 	const struct net_offload *ops;
1376 	unsigned int offset = 0;
1377 	struct iphdr *iph;
1378 	int proto, tot_len;
1379 	int nhoff;
1380 	int ihl;
1381 	int id;
1382 
1383 	skb_reset_network_header(skb);
1384 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
1385 	if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1386 		goto out;
1387 
1388 	iph = ip_hdr(skb);
1389 	ihl = iph->ihl * 4;
1390 	if (ihl < sizeof(*iph))
1391 		goto out;
1392 
1393 	id = ntohs(iph->id);
1394 	proto = iph->protocol;
1395 
1396 	/* Warning: after this point, iph might be no longer valid */
1397 	if (unlikely(!pskb_may_pull(skb, ihl)))
1398 		goto out;
1399 	__skb_pull(skb, ihl);
1400 
1401 	encap = SKB_GSO_CB(skb)->encap_level > 0;
1402 	if (encap)
1403 		features &= skb->dev->hw_enc_features;
1404 	SKB_GSO_CB(skb)->encap_level += ihl;
1405 
1406 	skb_reset_transport_header(skb);
1407 
1408 	segs = ERR_PTR(-EPROTONOSUPPORT);
1409 
1410 	fixedid = !!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCP_FIXEDID << encap));
1411 
1412 	if (!skb->encapsulation || encap)
1413 		udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
1414 
1415 	ops = rcu_dereference(inet_offloads[proto]);
1416 	if (likely(ops && ops->callbacks.gso_segment)) {
1417 		segs = ops->callbacks.gso_segment(skb, features);
1418 		if (!segs)
1419 			skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
1420 	}
1421 
1422 	if (IS_ERR_OR_NULL(segs))
1423 		goto out;
1424 
1425 	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
1426 
1427 	skb = segs;
1428 	do {
1429 		iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
1430 		if (udpfrag) {
1431 			iph->frag_off = htons(offset >> 3);
1432 			if (skb->next)
1433 				iph->frag_off |= htons(IP_MF);
1434 			offset += skb->len - nhoff - ihl;
1435 			tot_len = skb->len - nhoff;
1436 		} else if (skb_is_gso(skb)) {
1437 			if (!fixedid) {
1438 				iph->id = htons(id);
1439 				id += skb_shinfo(skb)->gso_segs;
1440 			}
1441 
1442 			if (gso_partial)
1443 				tot_len = skb_shinfo(skb)->gso_size +
1444 					  SKB_GSO_CB(skb)->data_offset +
1445 					  skb->head - (unsigned char *)iph;
1446 			else
1447 				tot_len = skb->len - nhoff;
1448 		} else {
1449 			if (!fixedid)
1450 				iph->id = htons(id++);
1451 			tot_len = skb->len - nhoff;
1452 		}
1453 		iph->tot_len = htons(tot_len);
1454 		ip_send_check(iph);
1455 		if (encap)
1456 			skb_reset_inner_headers(skb);
1457 		skb->network_header = (u8 *)iph - skb->head;
1458 		skb_reset_mac_len(skb);
1459 	} while ((skb = skb->next));
1460 
1461 out:
1462 	return segs;
1463 }
1464 
ipip_gso_segment(struct sk_buff * skb,netdev_features_t features)1465 static struct sk_buff *ipip_gso_segment(struct sk_buff *skb,
1466 					netdev_features_t features)
1467 {
1468 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
1469 		return ERR_PTR(-EINVAL);
1470 
1471 	return inet_gso_segment(skb, features);
1472 }
1473 
inet_gro_receive(struct list_head * head,struct sk_buff * skb)1474 struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
1475 {
1476 	const struct net_offload *ops;
1477 	struct sk_buff *pp = NULL;
1478 	const struct iphdr *iph;
1479 	struct sk_buff *p;
1480 	unsigned int hlen;
1481 	unsigned int off;
1482 	int flush = 1;
1483 	int proto;
1484 
1485 	off = skb_gro_offset(skb);
1486 	hlen = off + sizeof(*iph);
1487 	iph = skb_gro_header(skb, hlen, off);
1488 	if (unlikely(!iph))
1489 		goto out;
1490 
1491 	proto = iph->protocol;
1492 
1493 	ops = rcu_dereference(inet_offloads[proto]);
1494 	if (!ops || !ops->callbacks.gro_receive)
1495 		goto out;
1496 
1497 	if (*(u8 *)iph != 0x45)
1498 		goto out;
1499 
1500 	if (ip_is_fragment(iph))
1501 		goto out;
1502 
1503 	if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1504 		goto out;
1505 
1506 	NAPI_GRO_CB(skb)->proto = proto;
1507 	flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (ntohl(*(__be32 *)&iph->id) & ~IP_DF));
1508 
1509 	list_for_each_entry(p, head, list) {
1510 		struct iphdr *iph2;
1511 
1512 		if (!NAPI_GRO_CB(p)->same_flow)
1513 			continue;
1514 
1515 		iph2 = (struct iphdr *)(p->data + off);
1516 		/* The above works because, with the exception of the top
1517 		 * (inner most) layer, we only aggregate pkts with the same
1518 		 * hdr length so all the hdrs we'll need to verify will start
1519 		 * at the same offset.
1520 		 */
1521 		if ((iph->protocol ^ iph2->protocol) |
1522 		    ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1523 		    ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1524 			NAPI_GRO_CB(p)->same_flow = 0;
1525 			continue;
1526 		}
1527 	}
1528 
1529 	NAPI_GRO_CB(skb)->flush |= flush;
1530 	NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
1531 
1532 	/* Note : No need to call skb_gro_postpull_rcsum() here,
1533 	 * as we already checked checksum over ipv4 header was 0
1534 	 */
1535 	skb_gro_pull(skb, sizeof(*iph));
1536 	skb_set_transport_header(skb, skb_gro_offset(skb));
1537 
1538 	pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
1539 				       ops->callbacks.gro_receive, head, skb);
1540 
1541 out:
1542 	skb_gro_flush_final(skb, pp, flush);
1543 
1544 	return pp;
1545 }
1546 
ipip_gro_receive(struct list_head * head,struct sk_buff * skb)1547 static struct sk_buff *ipip_gro_receive(struct list_head *head,
1548 					struct sk_buff *skb)
1549 {
1550 	if (NAPI_GRO_CB(skb)->encap_mark) {
1551 		NAPI_GRO_CB(skb)->flush = 1;
1552 		return NULL;
1553 	}
1554 
1555 	NAPI_GRO_CB(skb)->encap_mark = 1;
1556 
1557 	return inet_gro_receive(head, skb);
1558 }
1559 
1560 #define SECONDS_PER_DAY	86400
1561 
1562 /* inet_current_timestamp - Return IP network timestamp
1563  *
1564  * Return milliseconds since midnight in network byte order.
1565  */
inet_current_timestamp(void)1566 __be32 inet_current_timestamp(void)
1567 {
1568 	u32 secs;
1569 	u32 msecs;
1570 	struct timespec64 ts;
1571 
1572 	ktime_get_real_ts64(&ts);
1573 
1574 	/* Get secs since midnight. */
1575 	(void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
1576 	/* Convert to msecs. */
1577 	msecs = secs * MSEC_PER_SEC;
1578 	/* Convert nsec to msec. */
1579 	msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
1580 
1581 	/* Convert to network byte order. */
1582 	return htonl(msecs);
1583 }
1584 EXPORT_SYMBOL(inet_current_timestamp);
1585 
inet_recv_error(struct sock * sk,struct msghdr * msg,int len,int * addr_len)1586 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1587 {
1588 	unsigned int family = READ_ONCE(sk->sk_family);
1589 
1590 	if (family == AF_INET)
1591 		return ip_recv_error(sk, msg, len, addr_len);
1592 #if IS_ENABLED(CONFIG_IPV6)
1593 	if (family == AF_INET6)
1594 		return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
1595 #endif
1596 	return -EINVAL;
1597 }
1598 EXPORT_SYMBOL(inet_recv_error);
1599 
inet_gro_complete(struct sk_buff * skb,int nhoff)1600 int inet_gro_complete(struct sk_buff *skb, int nhoff)
1601 {
1602 	struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
1603 	const struct net_offload *ops;
1604 	__be16 totlen = iph->tot_len;
1605 	int proto = iph->protocol;
1606 	int err = -ENOSYS;
1607 
1608 	if (skb->encapsulation) {
1609 		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1610 		skb_set_inner_network_header(skb, nhoff);
1611 	}
1612 
1613 	iph_set_totlen(iph, skb->len - nhoff);
1614 	csum_replace2(&iph->check, totlen, iph->tot_len);
1615 
1616 	ops = rcu_dereference(inet_offloads[proto]);
1617 	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1618 		goto out;
1619 
1620 	/* Only need to add sizeof(*iph) to get to the next hdr below
1621 	 * because any hdr with option will have been flushed in
1622 	 * inet_gro_receive().
1623 	 */
1624 	err = INDIRECT_CALL_2(ops->callbacks.gro_complete,
1625 			      tcp4_gro_complete, udp4_gro_complete,
1626 			      skb, nhoff + sizeof(*iph));
1627 
1628 out:
1629 	return err;
1630 }
1631 
ipip_gro_complete(struct sk_buff * skb,int nhoff)1632 static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
1633 {
1634 	skb->encapsulation = 1;
1635 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
1636 	return inet_gro_complete(skb, nhoff);
1637 }
1638 
inet_ctl_sock_create(struct sock ** sk,unsigned short family,unsigned short type,unsigned char protocol,struct net * net)1639 int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1640 			 unsigned short type, unsigned char protocol,
1641 			 struct net *net)
1642 {
1643 	struct socket *sock;
1644 	int rc = sock_create_kern(net, family, type, protocol, &sock);
1645 
1646 	if (rc == 0) {
1647 		*sk = sock->sk;
1648 		(*sk)->sk_allocation = GFP_ATOMIC;
1649 		(*sk)->sk_use_task_frag = false;
1650 		/*
1651 		 * Unhash it so that IP input processing does not even see it,
1652 		 * we do not wish this socket to see incoming packets.
1653 		 */
1654 		(*sk)->sk_prot->unhash(*sk);
1655 	}
1656 	return rc;
1657 }
1658 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1659 
snmp_fold_field(void __percpu * mib,int offt)1660 unsigned long snmp_fold_field(void __percpu *mib, int offt)
1661 {
1662 	unsigned long res = 0;
1663 	int i;
1664 
1665 	for_each_possible_cpu(i)
1666 		res += snmp_get_cpu_field(mib, i, offt);
1667 	return res;
1668 }
1669 EXPORT_SYMBOL_GPL(snmp_fold_field);
1670 
1671 #if BITS_PER_LONG==32
1672 
snmp_get_cpu_field64(void __percpu * mib,int cpu,int offt,size_t syncp_offset)1673 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
1674 			 size_t syncp_offset)
1675 {
1676 	void *bhptr;
1677 	struct u64_stats_sync *syncp;
1678 	u64 v;
1679 	unsigned int start;
1680 
1681 	bhptr = per_cpu_ptr(mib, cpu);
1682 	syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1683 	do {
1684 		start = u64_stats_fetch_begin(syncp);
1685 		v = *(((u64 *)bhptr) + offt);
1686 	} while (u64_stats_fetch_retry(syncp, start));
1687 
1688 	return v;
1689 }
1690 EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
1691 
snmp_fold_field64(void __percpu * mib,int offt,size_t syncp_offset)1692 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1693 {
1694 	u64 res = 0;
1695 	int cpu;
1696 
1697 	for_each_possible_cpu(cpu) {
1698 		res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
1699 	}
1700 	return res;
1701 }
1702 EXPORT_SYMBOL_GPL(snmp_fold_field64);
1703 #endif
1704 
1705 #ifdef CONFIG_IP_MULTICAST
1706 static const struct net_protocol igmp_protocol = {
1707 	.handler =	igmp_rcv,
1708 };
1709 #endif
1710 
1711 static const struct net_protocol icmp_protocol = {
1712 	.handler =	icmp_rcv,
1713 	.err_handler =	icmp_err,
1714 	.no_policy =	1,
1715 };
1716 
ipv4_mib_init_net(struct net * net)1717 static __net_init int ipv4_mib_init_net(struct net *net)
1718 {
1719 	int i;
1720 
1721 	net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1722 	if (!net->mib.tcp_statistics)
1723 		goto err_tcp_mib;
1724 	net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1725 	if (!net->mib.ip_statistics)
1726 		goto err_ip_mib;
1727 
1728 	for_each_possible_cpu(i) {
1729 		struct ipstats_mib *af_inet_stats;
1730 		af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1731 		u64_stats_init(&af_inet_stats->syncp);
1732 	}
1733 
1734 	net->mib.net_statistics = alloc_percpu(struct linux_mib);
1735 	if (!net->mib.net_statistics)
1736 		goto err_net_mib;
1737 	net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1738 	if (!net->mib.udp_statistics)
1739 		goto err_udp_mib;
1740 	net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
1741 	if (!net->mib.udplite_statistics)
1742 		goto err_udplite_mib;
1743 	net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1744 	if (!net->mib.icmp_statistics)
1745 		goto err_icmp_mib;
1746 	net->mib.icmpmsg_statistics = kzalloc_obj(struct icmpmsg_mib);
1747 	if (!net->mib.icmpmsg_statistics)
1748 		goto err_icmpmsg_mib;
1749 
1750 	tcp_mib_init(net);
1751 	return 0;
1752 
1753 err_icmpmsg_mib:
1754 	free_percpu(net->mib.icmp_statistics);
1755 err_icmp_mib:
1756 	free_percpu(net->mib.udplite_statistics);
1757 err_udplite_mib:
1758 	free_percpu(net->mib.udp_statistics);
1759 err_udp_mib:
1760 	free_percpu(net->mib.net_statistics);
1761 err_net_mib:
1762 	free_percpu(net->mib.ip_statistics);
1763 err_ip_mib:
1764 	free_percpu(net->mib.tcp_statistics);
1765 err_tcp_mib:
1766 	return -ENOMEM;
1767 }
1768 
ipv4_mib_exit_net(struct net * net)1769 static __net_exit void ipv4_mib_exit_net(struct net *net)
1770 {
1771 	kfree(net->mib.icmpmsg_statistics);
1772 	free_percpu(net->mib.icmp_statistics);
1773 	free_percpu(net->mib.udplite_statistics);
1774 	free_percpu(net->mib.udp_statistics);
1775 	free_percpu(net->mib.net_statistics);
1776 	free_percpu(net->mib.ip_statistics);
1777 	free_percpu(net->mib.tcp_statistics);
1778 #ifdef CONFIG_MPTCP
1779 	/* allocated on demand, see mptcp_init_sock() */
1780 	free_percpu(net->mib.mptcp_statistics);
1781 #endif
1782 }
1783 
1784 static __net_initdata struct pernet_operations ipv4_mib_ops = {
1785 	.init = ipv4_mib_init_net,
1786 	.exit = ipv4_mib_exit_net,
1787 };
1788 
init_ipv4_mibs(void)1789 static int __init init_ipv4_mibs(void)
1790 {
1791 	return register_pernet_subsys(&ipv4_mib_ops);
1792 }
1793 
inet_init_net(struct net * net)1794 static __net_init int inet_init_net(struct net *net)
1795 {
1796 	/*
1797 	 * Set defaults for local port range
1798 	 */
1799 	net->ipv4.ip_local_ports.range = 60999u << 16 | 32768u;
1800 
1801 	seqlock_init(&net->ipv4.ping_group_range.lock);
1802 	/*
1803 	 * Sane defaults - nobody may create ping sockets.
1804 	 * Boot scripts should set this to distro-specific group.
1805 	 */
1806 	net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1807 	net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1808 
1809 	/* Default values for sysctl-controlled parameters.
1810 	 * We set them here, in case sysctl is not compiled.
1811 	 */
1812 	net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1813 	net->ipv4.sysctl_ip_fwd_update_priority = 1;
1814 	net->ipv4.sysctl_ip_dynaddr = 0;
1815 	net->ipv4.sysctl_ip_early_demux = 1;
1816 	net->ipv4.sysctl_udp_early_demux = 1;
1817 	net->ipv4.sysctl_tcp_early_demux = 1;
1818 	net->ipv4.sysctl_nexthop_compat_mode = 1;
1819 #ifdef CONFIG_SYSCTL
1820 	net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
1821 #endif
1822 
1823 	/* Some igmp sysctl, whose values are always used */
1824 	net->ipv4.sysctl_igmp_max_memberships = 20;
1825 	net->ipv4.sysctl_igmp_max_msf = 10;
1826 	/* IGMP reports for link-local multicast groups are enabled by default */
1827 	net->ipv4.sysctl_igmp_llm_reports = 1;
1828 	net->ipv4.sysctl_igmp_qrv = 2;
1829 
1830 	net->ipv4.sysctl_fib_notify_on_flag_change = 0;
1831 
1832 	return 0;
1833 }
1834 
1835 static __net_initdata struct pernet_operations af_inet_ops = {
1836 	.init = inet_init_net,
1837 };
1838 
init_inet_pernet_ops(void)1839 static int __init init_inet_pernet_ops(void)
1840 {
1841 	return register_pernet_subsys(&af_inet_ops);
1842 }
1843 
1844 static int ipv4_proc_init(void);
1845 
1846 /*
1847  *	IP protocol layer initialiser
1848  */
1849 
1850 
1851 static const struct net_offload ipip_offload = {
1852 	.callbacks = {
1853 		.gso_segment	= ipip_gso_segment,
1854 		.gro_receive	= ipip_gro_receive,
1855 		.gro_complete	= ipip_gro_complete,
1856 	},
1857 };
1858 
ipip_offload_init(void)1859 static int __init ipip_offload_init(void)
1860 {
1861 	return inet_add_offload(&ipip_offload, IPPROTO_IPIP);
1862 }
1863 
ipv4_offload_init(void)1864 static int __init ipv4_offload_init(void)
1865 {
1866 	/*
1867 	 * Add offloads
1868 	 */
1869 	if (udpv4_offload_init() < 0)
1870 		pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1871 	if (tcpv4_offload_init() < 0)
1872 		pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
1873 	if (ipip_offload_init() < 0)
1874 		pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
1875 
1876 	net_hotdata.ip_packet_offload = (struct packet_offload) {
1877 		.type = cpu_to_be16(ETH_P_IP),
1878 		.callbacks = {
1879 			.gso_segment = inet_gso_segment,
1880 			.gro_receive = inet_gro_receive,
1881 			.gro_complete = inet_gro_complete,
1882 		},
1883 	};
1884 	dev_add_offload(&net_hotdata.ip_packet_offload);
1885 	return 0;
1886 }
1887 
1888 fs_initcall(ipv4_offload_init);
1889 
1890 static struct packet_type ip_packet_type __read_mostly = {
1891 	.type = cpu_to_be16(ETH_P_IP),
1892 	.func = ip_rcv,
1893 	.list_func = ip_list_rcv,
1894 };
1895 
inet_init(void)1896 static int __init inet_init(void)
1897 {
1898 	struct inet_protosw *q;
1899 	struct list_head *r;
1900 	int rc;
1901 
1902 	sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
1903 
1904 	raw_hashinfo_init(&raw_v4_hashinfo);
1905 
1906 	rc = proto_register(&tcp_prot, 1);
1907 	if (rc)
1908 		goto out;
1909 
1910 	rc = proto_register(&udp_prot, 1);
1911 	if (rc)
1912 		goto out_unregister_tcp_proto;
1913 
1914 	rc = proto_register(&raw_prot, 1);
1915 	if (rc)
1916 		goto out_unregister_udp_proto;
1917 
1918 	rc = proto_register(&ping_prot, 1);
1919 	if (rc)
1920 		goto out_unregister_raw_proto;
1921 
1922 	/*
1923 	 *	Tell SOCKET that we are alive...
1924 	 */
1925 
1926 	(void)sock_register(&inet_family_ops);
1927 
1928 #ifdef CONFIG_SYSCTL
1929 	ip_static_sysctl_init();
1930 #endif
1931 
1932 	/*
1933 	 *	Add all the base protocols.
1934 	 */
1935 
1936 	if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1937 		pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1938 
1939 	net_hotdata.udp_protocol = (struct net_protocol) {
1940 		.handler =	udp_rcv,
1941 		.err_handler =	udp_err,
1942 		.no_policy =	1,
1943 	};
1944 	if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0)
1945 		pr_crit("%s: Cannot add UDP protocol\n", __func__);
1946 
1947 	net_hotdata.tcp_protocol = (struct net_protocol) {
1948 		.handler	=	tcp_v4_rcv,
1949 		.err_handler	=	tcp_v4_err,
1950 		.no_policy	=	1,
1951 		.icmp_strict_tag_validation = 1,
1952 	};
1953 	if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0)
1954 		pr_crit("%s: Cannot add TCP protocol\n", __func__);
1955 #ifdef CONFIG_IP_MULTICAST
1956 	if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1957 		pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1958 #endif
1959 
1960 	/* Register the socket-side information for inet_create. */
1961 	for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
1962 		INIT_LIST_HEAD(r);
1963 
1964 	for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
1965 		inet_register_protosw(q);
1966 
1967 	/*
1968 	 *	Set the ARP module up
1969 	 */
1970 
1971 	arp_init();
1972 
1973 	/*
1974 	 *	Set the IP module up
1975 	 */
1976 
1977 	ip_init();
1978 
1979 	/* Initialise per-cpu ipv4 mibs */
1980 	if (init_ipv4_mibs())
1981 		panic("%s: Cannot init ipv4 mibs\n", __func__);
1982 
1983 	/* Setup TCP slab cache for open requests. */
1984 	tcp_init();
1985 
1986 	/* Setup UDP memory threshold */
1987 	udp_init();
1988 
1989 	/* Add UDP-Lite (RFC 3828) */
1990 	udplite4_register();
1991 
1992 	raw_init();
1993 
1994 	ping_init();
1995 
1996 	/*
1997 	 *	Set the ICMP layer up
1998 	 */
1999 
2000 	if (icmp_init() < 0)
2001 		panic("Failed to create the ICMP control socket.\n");
2002 
2003 	/*
2004 	 *	Initialise the multicast router
2005 	 */
2006 #if defined(CONFIG_IP_MROUTE)
2007 	if (ip_mr_init())
2008 		pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
2009 #endif
2010 
2011 	if (init_inet_pernet_ops())
2012 		pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
2013 
2014 	ipv4_proc_init();
2015 
2016 	ipfrag_init();
2017 
2018 	dev_add_pack(&ip_packet_type);
2019 
2020 	ip_tunnel_core_init();
2021 
2022 	rc = 0;
2023 out:
2024 	return rc;
2025 out_unregister_raw_proto:
2026 	proto_unregister(&raw_prot);
2027 out_unregister_udp_proto:
2028 	proto_unregister(&udp_prot);
2029 out_unregister_tcp_proto:
2030 	proto_unregister(&tcp_prot);
2031 	goto out;
2032 }
2033 
2034 fs_initcall(inet_init);
2035 
2036 /* ------------------------------------------------------------------------ */
2037 
2038 #ifdef CONFIG_PROC_FS
ipv4_proc_init(void)2039 static int __init ipv4_proc_init(void)
2040 {
2041 	int rc = 0;
2042 
2043 	if (raw_proc_init())
2044 		goto out_raw;
2045 	if (tcp4_proc_init())
2046 		goto out_tcp;
2047 	if (udp4_proc_init())
2048 		goto out_udp;
2049 	if (ping_proc_init())
2050 		goto out_ping;
2051 	if (ip_misc_proc_init())
2052 		goto out_misc;
2053 out:
2054 	return rc;
2055 out_misc:
2056 	ping_proc_exit();
2057 out_ping:
2058 	udp4_proc_exit();
2059 out_udp:
2060 	tcp4_proc_exit();
2061 out_tcp:
2062 	raw_proc_exit();
2063 out_raw:
2064 	rc = -ENOMEM;
2065 	goto out;
2066 }
2067 
2068 #else /* CONFIG_PROC_FS */
ipv4_proc_init(void)2069 static int __init ipv4_proc_init(void)
2070 {
2071 	return 0;
2072 }
2073 #endif /* CONFIG_PROC_FS */
2074