xref: /linux/net/ipv4/af_inet.c (revision dfecb0c5af3b07ebfa84be63a7a21bfc9e29a872)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		PF_INET protocol family socket handler.
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Florian La Roche, <flla@stud.uni-sb.de>
12  *		Alan Cox, <A.Cox@swansea.ac.uk>
13  *
14  * Changes (see also sock.c)
15  *
16  *		piggy,
17  *		Karl Knutson	:	Socket protocol table
18  *		A.N.Kuznetsov	:	Socket death error in accept().
19  *		John Richardson :	Fix non blocking error in connect()
20  *					so sockets that fail to connect
21  *					don't return -EINPROGRESS.
22  *		Alan Cox	:	Asynchronous I/O support
23  *		Alan Cox	:	Keep correct socket pointer on sock
24  *					structures
25  *					when accept() ed
26  *		Alan Cox	:	Semantics of SO_LINGER aren't state
27  *					moved to close when you look carefully.
28  *					With this fixed and the accept bug fixed
29  *					some RPC stuff seems happier.
30  *		Niibe Yutaka	:	4.4BSD style write async I/O
31  *		Alan Cox,
32  *		Tony Gale 	:	Fixed reuse semantics.
33  *		Alan Cox	:	bind() shouldn't abort existing but dead
34  *					sockets. Stops FTP netin:.. I hope.
35  *		Alan Cox	:	bind() works correctly for RAW sockets.
36  *					Note that FreeBSD at least was broken
37  *					in this respect so be careful with
38  *					compatibility tests...
39  *		Alan Cox	:	routing cache support
40  *		Alan Cox	:	memzero the socket structure for
41  *					compactness.
42  *		Matt Day	:	nonblock connect error handler
43  *		Alan Cox	:	Allow large numbers of pending sockets
44  *					(eg for big web sites), but only if
45  *					specifically application requested.
46  *		Alan Cox	:	New buffering throughout IP. Used
47  *					dumbly.
48  *		Alan Cox	:	New buffering now used smartly.
49  *		Alan Cox	:	BSD rather than common sense
50  *					interpretation of listen.
51  *		Germano Caronni	:	Assorted small races.
52  *		Alan Cox	:	sendmsg/recvmsg basic support.
53  *		Alan Cox	:	Only sendmsg/recvmsg now supported.
54  *		Alan Cox	:	Locked down bind (see security list).
55  *		Alan Cox	:	Loosened bind a little.
56  *		Mike McLagan	:	ADD/DEL DLCI Ioctls
57  *	Willy Konynenberg	:	Transparent proxying support.
58  *		David S. Miller	:	New socket lookup architecture.
59  *					Some other random speedups.
60  *		Cyrus Durgin	:	Cleaned up file for kmod hacks.
61  *		Andi Kleen	:	Fix inet_stream_connect TCP race.
62  */
63 
64 #define pr_fmt(fmt) "IPv4: " fmt
65 
66 #include <linux/err.h>
67 #include <linux/errno.h>
68 #include <linux/types.h>
69 #include <linux/socket.h>
70 #include <linux/in.h>
71 #include <linux/kernel.h>
72 #include <linux/kmod.h>
73 #include <linux/sched.h>
74 #include <linux/timer.h>
75 #include <linux/string.h>
76 #include <linux/sockios.h>
77 #include <linux/net.h>
78 #include <linux/capability.h>
79 #include <linux/fcntl.h>
80 #include <linux/mm.h>
81 #include <linux/interrupt.h>
82 #include <linux/stat.h>
83 #include <linux/init.h>
84 #include <linux/poll.h>
85 #include <linux/netfilter_ipv4.h>
86 #include <linux/random.h>
87 #include <linux/slab.h>
88 
89 #include <linux/uaccess.h>
90 
91 #include <linux/inet.h>
92 #include <linux/igmp.h>
93 #include <linux/inetdevice.h>
94 #include <linux/netdevice.h>
95 #include <net/checksum.h>
96 #include <net/ip.h>
97 #include <net/protocol.h>
98 #include <net/arp.h>
99 #include <net/route.h>
100 #include <net/ip_fib.h>
101 #include <net/inet_connection_sock.h>
102 #include <net/gro.h>
103 #include <net/gso.h>
104 #include <net/tcp.h>
105 #include <net/psp.h>
106 #include <net/udp.h>
107 #include <net/ping.h>
108 #include <linux/skbuff.h>
109 #include <net/sock.h>
110 #include <net/raw.h>
111 #include <net/icmp.h>
112 #include <net/inet_common.h>
113 #include <net/ip_tunnels.h>
114 #include <net/xfrm.h>
115 #include <net/net_namespace.h>
116 #include <net/secure_seq.h>
117 #ifdef CONFIG_IP_MROUTE
118 #include <linux/mroute.h>
119 #endif
120 #include <net/l3mdev.h>
121 #include <net/compat.h>
122 #include <net/rps.h>
123 
124 #include <trace/events/sock.h>
125 
126 /* Keep the definition of IPv6 disable here for now, to avoid annoying linker
127  * issues in case IPv6=m
128  */
129 int disable_ipv6_mod;
130 EXPORT_SYMBOL(disable_ipv6_mod);
131 
132 /* The inetsw table contains everything that inet_create needs to
133  * build a new socket.
134  */
135 static struct list_head inetsw[SOCK_MAX];
136 static DEFINE_SPINLOCK(inetsw_lock);
137 
138 /* New destruction routine */
139 
140 void inet_sock_destruct(struct sock *sk)
141 {
142 	struct inet_sock *inet = inet_sk(sk);
143 
144 	__skb_queue_purge(&sk->sk_receive_queue);
145 	__skb_queue_purge(&sk->sk_error_queue);
146 
147 	sk_mem_reclaim_final(sk);
148 
149 	if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
150 		pr_err("Attempt to release TCP socket in state %d %p\n",
151 		       sk->sk_state, sk);
152 		return;
153 	}
154 	if (!sock_flag(sk, SOCK_DEAD)) {
155 		pr_err("Attempt to release alive inet socket %p\n", sk);
156 		return;
157 	}
158 
159 	WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc));
160 	WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
161 	WARN_ON_ONCE(sk->sk_wmem_queued);
162 	WARN_ON_ONCE(sk->sk_forward_alloc);
163 
164 	kfree(rcu_dereference_protected(inet->inet_opt, 1));
165 	dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
166 	dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
167 	psp_sk_assoc_free(sk);
168 }
169 EXPORT_SYMBOL(inet_sock_destruct);
170 
171 /*
172  *	The routines beyond this point handle the behaviour of an AF_INET
173  *	socket object. Mostly it punts to the subprotocols of IP to do
174  *	the work.
175  */
176 
177 /*
178  *	Automatically bind an unbound socket.
179  */
180 
181 static int inet_autobind(struct sock *sk)
182 {
183 	struct inet_sock *inet;
184 	/* We may need to bind the socket. */
185 	lock_sock(sk);
186 	inet = inet_sk(sk);
187 	if (!inet->inet_num) {
188 		if (sk->sk_prot->get_port(sk, 0)) {
189 			release_sock(sk);
190 			return -EAGAIN;
191 		}
192 		inet->inet_sport = htons(inet->inet_num);
193 	}
194 	release_sock(sk);
195 	return 0;
196 }
197 
198 int __inet_listen_sk(struct sock *sk, int backlog)
199 {
200 	unsigned char old_state = sk->sk_state;
201 	int err, tcp_fastopen;
202 
203 	if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
204 		return -EINVAL;
205 
206 	WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
207 	/* Really, if the socket is already in listen state
208 	 * we can only allow the backlog to be adjusted.
209 	 */
210 	if (old_state != TCP_LISTEN) {
211 		/* Enable TFO w/o requiring TCP_FASTOPEN socket option.
212 		 * Note that only TCP sockets (SOCK_STREAM) will reach here.
213 		 * Also fastopen backlog may already been set via the option
214 		 * because the socket was in TCP_LISTEN state previously but
215 		 * was shutdown() rather than close().
216 		 */
217 		tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
218 		if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
219 		    (tcp_fastopen & TFO_SERVER_ENABLE) &&
220 		    !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
221 			fastopen_queue_tune(sk, backlog);
222 			tcp_fastopen_init_key_once(sock_net(sk));
223 		}
224 
225 		err = inet_csk_listen_start(sk);
226 		if (err)
227 			return err;
228 
229 		tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
230 	}
231 	return 0;
232 }
233 
234 /*
235  *	Move a socket into listening state.
236  */
237 int inet_listen(struct socket *sock, int backlog)
238 {
239 	struct sock *sk = sock->sk;
240 	int err = -EINVAL;
241 
242 	lock_sock(sk);
243 
244 	if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
245 		goto out;
246 
247 	err = __inet_listen_sk(sk, backlog);
248 
249 out:
250 	release_sock(sk);
251 	return err;
252 }
253 EXPORT_SYMBOL(inet_listen);
254 
255 /*
256  *	Create an inet socket.
257  */
258 
259 static int inet_create(struct net *net, struct socket *sock, int protocol,
260 		       int kern)
261 {
262 	struct sock *sk;
263 	struct inet_protosw *answer;
264 	struct inet_sock *inet;
265 	struct proto *answer_prot;
266 	unsigned char answer_flags;
267 	int try_loading_module = 0;
268 	int err;
269 
270 	if (protocol < 0 || protocol >= IPPROTO_MAX)
271 		return -EINVAL;
272 
273 	sock->state = SS_UNCONNECTED;
274 
275 	/* Look for the requested type/protocol pair. */
276 lookup_protocol:
277 	err = -ESOCKTNOSUPPORT;
278 	rcu_read_lock();
279 	list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
280 
281 		err = 0;
282 		/* Check the non-wild match. */
283 		if (protocol == answer->protocol) {
284 			if (protocol != IPPROTO_IP)
285 				break;
286 		} else {
287 			/* Check for the two wild cases. */
288 			if (IPPROTO_IP == protocol) {
289 				protocol = answer->protocol;
290 				break;
291 			}
292 			if (IPPROTO_IP == answer->protocol)
293 				break;
294 		}
295 		err = -EPROTONOSUPPORT;
296 	}
297 
298 	if (unlikely(err)) {
299 		if (try_loading_module < 2) {
300 			rcu_read_unlock();
301 			/*
302 			 * Be more specific, e.g. net-pf-2-proto-132-type-1
303 			 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
304 			 */
305 			if (++try_loading_module == 1)
306 				request_module("net-pf-%d-proto-%d-type-%d",
307 					       PF_INET, protocol, sock->type);
308 			/*
309 			 * Fall back to generic, e.g. net-pf-2-proto-132
310 			 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
311 			 */
312 			else
313 				request_module("net-pf-%d-proto-%d",
314 					       PF_INET, protocol);
315 			goto lookup_protocol;
316 		} else
317 			goto out_rcu_unlock;
318 	}
319 
320 	err = -EPERM;
321 	if (sock->type == SOCK_RAW && !kern &&
322 	    !ns_capable(net->user_ns, CAP_NET_RAW))
323 		goto out_rcu_unlock;
324 
325 	sock->ops = answer->ops;
326 	answer_prot = answer->prot;
327 	answer_flags = answer->flags;
328 	rcu_read_unlock();
329 
330 	WARN_ON(!answer_prot->slab);
331 
332 	err = -ENOMEM;
333 	sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
334 	if (!sk)
335 		goto out;
336 
337 	err = 0;
338 	if (INET_PROTOSW_REUSE & answer_flags)
339 		sk->sk_reuse = SK_CAN_REUSE;
340 
341 	if (INET_PROTOSW_ICSK & answer_flags)
342 		inet_init_csk_locks(sk);
343 
344 	inet = inet_sk(sk);
345 	inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
346 
347 	inet_clear_bit(NODEFRAG, sk);
348 
349 	if (SOCK_RAW == sock->type) {
350 		inet->inet_num = protocol;
351 		if (IPPROTO_RAW == protocol)
352 			inet_set_bit(HDRINCL, sk);
353 	}
354 
355 	if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
356 		inet->pmtudisc = IP_PMTUDISC_DONT;
357 	else
358 		inet->pmtudisc = IP_PMTUDISC_WANT;
359 
360 	atomic_set(&inet->inet_id, 0);
361 
362 	sock_init_data(sock, sk);
363 
364 	sk->sk_destruct	   = inet_sock_destruct;
365 	sk->sk_protocol	   = protocol;
366 	sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
367 	sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
368 
369 	inet->uc_ttl	= -1;
370 	inet_set_bit(MC_LOOP, sk);
371 	inet->mc_ttl	= 1;
372 	inet_set_bit(MC_ALL, sk);
373 	inet->mc_index	= 0;
374 	inet->mc_list	= NULL;
375 	inet->rcv_tos	= 0;
376 
377 	if (inet->inet_num) {
378 		/* It assumes that any protocol which allows
379 		 * the user to assign a number at socket
380 		 * creation time automatically
381 		 * shares.
382 		 */
383 		inet->inet_sport = htons(inet->inet_num);
384 		/* Add to protocol hash chains. */
385 		err = sk->sk_prot->hash(sk);
386 		if (err)
387 			goto out_sk_release;
388 	}
389 
390 	if (sk->sk_prot->init) {
391 		err = sk->sk_prot->init(sk);
392 		if (err)
393 			goto out_sk_release;
394 	}
395 
396 	if (!kern) {
397 		err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
398 		if (err)
399 			goto out_sk_release;
400 	}
401 out:
402 	return err;
403 out_rcu_unlock:
404 	rcu_read_unlock();
405 	goto out;
406 out_sk_release:
407 	sk_common_release(sk);
408 	sock->sk = NULL;
409 	goto out;
410 }
411 
412 
413 /*
414  *	The peer socket should always be NULL (or else). When we call this
415  *	function we are destroying the object and from then on nobody
416  *	should refer to it.
417  */
418 int inet_release(struct socket *sock)
419 {
420 	struct sock *sk = sock->sk;
421 
422 	if (sk) {
423 		long timeout;
424 
425 		if (!sk->sk_kern_sock)
426 			BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk);
427 
428 		/* Applications forget to leave groups before exiting */
429 		ip_mc_drop_socket(sk);
430 
431 		/* If linger is set, we don't return until the close
432 		 * is complete.  Otherwise we return immediately. The
433 		 * actually closing is done the same either way.
434 		 *
435 		 * If the close is due to the process exiting, we never
436 		 * linger..
437 		 */
438 		timeout = 0;
439 		if (sock_flag(sk, SOCK_LINGER) &&
440 		    !(current->flags & PF_EXITING))
441 			timeout = sk->sk_lingertime;
442 		sk->sk_prot->close(sk, timeout);
443 		sock->sk = NULL;
444 	}
445 	return 0;
446 }
447 EXPORT_SYMBOL(inet_release);
448 
449 int inet_bind_sk(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len)
450 {
451 	u32 flags = BIND_WITH_LOCK;
452 	int err;
453 
454 	/* If the socket has its own bind function then use it. (RAW) */
455 	if (sk->sk_prot->bind) {
456 		return sk->sk_prot->bind(sk, uaddr, addr_len);
457 	}
458 	if (addr_len < sizeof(struct sockaddr_in))
459 		return -EINVAL;
460 
461 	/* BPF prog is run before any checks are done so that if the prog
462 	 * changes context in a wrong way it will be caught.
463 	 */
464 	err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, &addr_len,
465 						 CGROUP_INET4_BIND, &flags);
466 	if (err)
467 		return err;
468 
469 	return __inet_bind(sk, uaddr, addr_len, flags);
470 }
471 
472 int inet_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len)
473 {
474 	return inet_bind_sk(sock->sk, uaddr, addr_len);
475 }
476 EXPORT_SYMBOL(inet_bind);
477 
478 int __inet_bind(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len,
479 		u32 flags)
480 {
481 	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
482 	struct inet_sock *inet = inet_sk(sk);
483 	struct net *net = sock_net(sk);
484 	unsigned short snum;
485 	int chk_addr_ret;
486 	u32 tb_id = RT_TABLE_LOCAL;
487 	int err;
488 
489 	if (addr->sin_family != AF_INET) {
490 		/* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
491 		 * only if s_addr is INADDR_ANY.
492 		 */
493 		err = -EAFNOSUPPORT;
494 		if (addr->sin_family != AF_UNSPEC ||
495 		    addr->sin_addr.s_addr != htonl(INADDR_ANY))
496 			goto out;
497 	}
498 
499 	tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
500 	chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
501 
502 	/* Not specified by any standard per-se, however it breaks too
503 	 * many applications when removed.  It is unfortunate since
504 	 * allowing applications to make a non-local bind solves
505 	 * several problems with systems using dynamic addressing.
506 	 * (ie. your servers still start up even if your ISDN link
507 	 *  is temporarily down)
508 	 */
509 	err = -EADDRNOTAVAIL;
510 	if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr,
511 	                                 chk_addr_ret))
512 		goto out;
513 
514 	snum = ntohs(addr->sin_port);
515 	err = -EACCES;
516 	if (!(flags & BIND_NO_CAP_NET_BIND_SERVICE) &&
517 	    snum && inet_port_requires_bind_service(net, snum) &&
518 	    !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
519 		goto out;
520 
521 	/*      We keep a pair of addresses. rcv_saddr is the one
522 	 *      used by hash lookups, and saddr is used for transmit.
523 	 *
524 	 *      In the BSD API these are the same except where it
525 	 *      would be illegal to use them (multicast/broadcast) in
526 	 *      which case the sending device address is used.
527 	 */
528 	if (flags & BIND_WITH_LOCK)
529 		lock_sock(sk);
530 
531 	/* Check these errors (active socket, double bind). */
532 	err = -EINVAL;
533 	if (sk->sk_state != TCP_CLOSE || inet->inet_num)
534 		goto out_release_sock;
535 
536 	inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
537 	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
538 		inet->inet_saddr = 0;  /* Use device */
539 
540 	/* Make sure we are allowed to bind here. */
541 	if (snum || !(inet_test_bit(BIND_ADDRESS_NO_PORT, sk) ||
542 		      (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
543 		err = sk->sk_prot->get_port(sk, snum);
544 		if (err) {
545 			inet->inet_saddr = inet->inet_rcv_saddr = 0;
546 			goto out_release_sock;
547 		}
548 		if (!(flags & BIND_FROM_BPF)) {
549 			err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
550 			if (err) {
551 				inet->inet_saddr = inet->inet_rcv_saddr = 0;
552 				if (sk->sk_prot->put_port)
553 					sk->sk_prot->put_port(sk);
554 				goto out_release_sock;
555 			}
556 		}
557 	}
558 
559 	if (inet->inet_rcv_saddr)
560 		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
561 	if (snum)
562 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
563 	inet->inet_sport = htons(inet->inet_num);
564 	inet->inet_daddr = 0;
565 	inet->inet_dport = 0;
566 	sk_dst_reset(sk);
567 	err = 0;
568 out_release_sock:
569 	if (flags & BIND_WITH_LOCK)
570 		release_sock(sk);
571 out:
572 	return err;
573 }
574 
575 int inet_dgram_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
576 		       int addr_len, int flags)
577 {
578 	struct sock *sk = sock->sk;
579 	const struct proto *prot;
580 	int err;
581 
582 	if (addr_len < sizeof(uaddr->sa_family))
583 		return -EINVAL;
584 
585 	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
586 	prot = READ_ONCE(sk->sk_prot);
587 
588 	if (uaddr->sa_family == AF_UNSPEC)
589 		return prot->disconnect(sk, flags);
590 
591 	if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
592 		err = prot->pre_connect(sk, uaddr, addr_len);
593 		if (err)
594 			return err;
595 	}
596 
597 	if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
598 		return -EAGAIN;
599 	return prot->connect(sk, uaddr, addr_len);
600 }
601 EXPORT_SYMBOL(inet_dgram_connect);
602 
603 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
604 {
605 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
606 
607 	add_wait_queue(sk_sleep(sk), &wait);
608 	sk->sk_write_pending += writebias;
609 
610 	/* Basic assumption: if someone sets sk->sk_err, he _must_
611 	 * change state of the socket from TCP_SYN_*.
612 	 * Connect() does not allow to get error notifications
613 	 * without closing the socket.
614 	 */
615 	while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
616 		release_sock(sk);
617 		timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
618 		lock_sock(sk);
619 		if (signal_pending(current) || !timeo)
620 			break;
621 	}
622 	remove_wait_queue(sk_sleep(sk), &wait);
623 	sk->sk_write_pending -= writebias;
624 	return timeo;
625 }
626 
627 /*
628  *	Connect to a remote host. There is regrettably still a little
629  *	TCP 'magic' in here.
630  */
631 int __inet_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
632 			  int addr_len, int flags, int is_sendmsg)
633 {
634 	struct sock *sk = sock->sk;
635 	int err;
636 	long timeo;
637 
638 	/*
639 	 * uaddr can be NULL and addr_len can be 0 if:
640 	 * sk is a TCP fastopen active socket and
641 	 * TCP_FASTOPEN_CONNECT sockopt is set and
642 	 * we already have a valid cookie for this socket.
643 	 * In this case, user can call write() after connect().
644 	 * write() will invoke tcp_sendmsg_fastopen() which calls
645 	 * __inet_stream_connect().
646 	 */
647 	if (uaddr) {
648 		if (addr_len < sizeof(uaddr->sa_family))
649 			return -EINVAL;
650 
651 		if (uaddr->sa_family == AF_UNSPEC) {
652 			sk->sk_disconnects++;
653 			err = sk->sk_prot->disconnect(sk, flags);
654 			sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
655 			goto out;
656 		}
657 	}
658 
659 	switch (sock->state) {
660 	default:
661 		err = -EINVAL;
662 		goto out;
663 	case SS_CONNECTED:
664 		err = -EISCONN;
665 		goto out;
666 	case SS_CONNECTING:
667 		if (inet_test_bit(DEFER_CONNECT, sk))
668 			err = is_sendmsg ? -EINPROGRESS : -EISCONN;
669 		else
670 			err = -EALREADY;
671 		/* Fall out of switch with err, set for this state */
672 		break;
673 	case SS_UNCONNECTED:
674 		err = -EISCONN;
675 		if (sk->sk_state != TCP_CLOSE)
676 			goto out;
677 
678 		if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
679 			err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
680 			if (err)
681 				goto out;
682 		}
683 
684 		err = sk->sk_prot->connect(sk, uaddr, addr_len);
685 		if (err < 0)
686 			goto out;
687 
688 		sock->state = SS_CONNECTING;
689 
690 		if (!err && inet_test_bit(DEFER_CONNECT, sk))
691 			goto out;
692 
693 		/* Just entered SS_CONNECTING state; the only
694 		 * difference is that return value in non-blocking
695 		 * case is EINPROGRESS, rather than EALREADY.
696 		 */
697 		err = -EINPROGRESS;
698 		break;
699 	}
700 
701 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
702 
703 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
704 		int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
705 				tcp_sk(sk)->fastopen_req &&
706 				tcp_sk(sk)->fastopen_req->data ? 1 : 0;
707 		int dis = sk->sk_disconnects;
708 
709 		/* Error code is set above */
710 		if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
711 			goto out;
712 
713 		err = sock_intr_errno(timeo);
714 		if (signal_pending(current))
715 			goto out;
716 
717 		if (dis != sk->sk_disconnects) {
718 			err = -EPIPE;
719 			goto out;
720 		}
721 	}
722 
723 	/* Connection was closed by RST, timeout, ICMP error
724 	 * or another process disconnected us.
725 	 */
726 	if (sk->sk_state == TCP_CLOSE)
727 		goto sock_error;
728 
729 	/* sk->sk_err may be not zero now, if RECVERR was ordered by user
730 	 * and error was received after socket entered established state.
731 	 * Hence, it is handled normally after connect() return successfully.
732 	 */
733 
734 	sock->state = SS_CONNECTED;
735 	err = 0;
736 out:
737 	return err;
738 
739 sock_error:
740 	err = sock_error(sk) ? : -ECONNABORTED;
741 	sock->state = SS_UNCONNECTED;
742 	sk->sk_disconnects++;
743 	if (sk->sk_prot->disconnect(sk, flags))
744 		sock->state = SS_DISCONNECTING;
745 	goto out;
746 }
747 EXPORT_SYMBOL(__inet_stream_connect);
748 
749 int inet_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
750 			int addr_len, int flags)
751 {
752 	int err;
753 
754 	lock_sock(sock->sk);
755 	err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
756 	release_sock(sock->sk);
757 	return err;
758 }
759 EXPORT_SYMBOL(inet_stream_connect);
760 
761 void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk)
762 {
763 	if (mem_cgroup_sockets_enabled) {
764 		mem_cgroup_sk_alloc(newsk);
765 		__sk_charge(newsk, GFP_KERNEL);
766 	}
767 
768 	sock_rps_record_flow(newsk);
769 	WARN_ON(!((1 << newsk->sk_state) &
770 		  (TCPF_ESTABLISHED | TCPF_SYN_RECV |
771 		   TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 |
772 		   TCPF_CLOSING | TCPF_CLOSE_WAIT |
773 		   TCPF_CLOSE)));
774 
775 	if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
776 		set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
777 	sock_graft(newsk, newsock);
778 
779 	newsock->state = SS_CONNECTED;
780 }
781 EXPORT_SYMBOL_GPL(__inet_accept);
782 
783 /*
784  *	Accept a pending connection. The TCP layer now gives BSD semantics.
785  */
786 
787 int inet_accept(struct socket *sock, struct socket *newsock,
788 		struct proto_accept_arg *arg)
789 {
790 	struct sock *sk1 = sock->sk, *sk2;
791 
792 	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
793 	arg->err = -EINVAL;
794 	sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, arg);
795 	if (!sk2)
796 		return arg->err;
797 
798 	lock_sock(sk2);
799 	__inet_accept(sock, newsock, sk2);
800 	release_sock(sk2);
801 	return 0;
802 }
803 EXPORT_SYMBOL(inet_accept);
804 
805 /*
806  *	This does both peername and sockname.
807  */
808 int inet_getname(struct socket *sock, struct sockaddr *uaddr,
809 		 int peer)
810 {
811 	struct sock *sk		= sock->sk;
812 	struct inet_sock *inet	= inet_sk(sk);
813 	DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
814 	int sin_addr_len = sizeof(*sin);
815 
816 	sin->sin_family = AF_INET;
817 	lock_sock(sk);
818 	if (peer) {
819 		if (!inet->inet_dport ||
820 		    (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
821 		     peer == 1)) {
822 			release_sock(sk);
823 			return -ENOTCONN;
824 		}
825 		sin->sin_port = inet->inet_dport;
826 		sin->sin_addr.s_addr = inet->inet_daddr;
827 		BPF_CGROUP_RUN_SA_PROG(sk, sin, &sin_addr_len,
828 				       CGROUP_INET4_GETPEERNAME);
829 	} else {
830 		__be32 addr = inet->inet_rcv_saddr;
831 		if (!addr)
832 			addr = inet->inet_saddr;
833 		sin->sin_port = inet->inet_sport;
834 		sin->sin_addr.s_addr = addr;
835 		BPF_CGROUP_RUN_SA_PROG(sk, sin, &sin_addr_len,
836 				       CGROUP_INET4_GETSOCKNAME);
837 	}
838 	release_sock(sk);
839 	memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
840 	return sin_addr_len;
841 }
842 EXPORT_SYMBOL(inet_getname);
843 
844 int inet_send_prepare(struct sock *sk)
845 {
846 	sock_rps_record_flow(sk);
847 
848 	/* We may need to bind the socket. */
849 	if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
850 	    inet_autobind(sk))
851 		return -EAGAIN;
852 
853 	return 0;
854 }
855 EXPORT_SYMBOL_GPL(inet_send_prepare);
856 
857 int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
858 {
859 	struct sock *sk = sock->sk;
860 	const struct proto *prot;
861 
862 	if (unlikely(inet_send_prepare(sk)))
863 		return -EAGAIN;
864 
865 	prot = READ_ONCE(sk->sk_prot);
866 	return INDIRECT_CALL_2(prot->sendmsg, tcp_sendmsg, udp_sendmsg,
867 			       sk, msg, size);
868 }
869 EXPORT_SYMBOL(inet_sendmsg);
870 
871 void inet_splice_eof(struct socket *sock)
872 {
873 	const struct proto *prot;
874 	struct sock *sk = sock->sk;
875 
876 	if (unlikely(inet_send_prepare(sk)))
877 		return;
878 
879 	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
880 	prot = READ_ONCE(sk->sk_prot);
881 	if (prot->splice_eof)
882 		prot->splice_eof(sock);
883 }
884 EXPORT_SYMBOL_GPL(inet_splice_eof);
885 
886 int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
887 		 int flags)
888 {
889 	struct sock *sk = sock->sk;
890 	const struct proto *prot;
891 
892 	if (likely(!(flags & MSG_ERRQUEUE)))
893 		sock_rps_record_flow(sk);
894 
895 	prot = READ_ONCE(sk->sk_prot);
896 	return INDIRECT_CALL_2(prot->recvmsg, tcp_recvmsg, udp_recvmsg,
897 			       sk, msg, size, flags);
898 }
899 EXPORT_SYMBOL(inet_recvmsg);
900 
901 int inet_shutdown(struct socket *sock, int how)
902 {
903 	struct sock *sk = sock->sk;
904 	int err = 0;
905 
906 	/* This should really check to make sure
907 	 * the socket is a TCP socket. (WHY AC...)
908 	 */
909 	how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
910 		       1->2 bit 2 snds.
911 		       2->3 */
912 	if ((how & ~SHUTDOWN_MASK) || !how)	/* MAXINT->0 */
913 		return -EINVAL;
914 
915 	lock_sock(sk);
916 	if (sock->state == SS_CONNECTING) {
917 		if ((1 << sk->sk_state) &
918 		    (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
919 			sock->state = SS_DISCONNECTING;
920 		else
921 			sock->state = SS_CONNECTED;
922 	}
923 
924 	switch (sk->sk_state) {
925 	case TCP_CLOSE:
926 		err = -ENOTCONN;
927 		/* Hack to wake up other listeners, who can poll for
928 		   EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
929 		fallthrough;
930 	default:
931 		WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how);
932 		if (sk->sk_prot->shutdown)
933 			sk->sk_prot->shutdown(sk, how);
934 		break;
935 
936 	/* Remaining two branches are temporary solution for missing
937 	 * close() in multithreaded environment. It is _not_ a good idea,
938 	 * but we have no choice until close() is repaired at VFS level.
939 	 */
940 	case TCP_LISTEN:
941 		if (!(how & RCV_SHUTDOWN))
942 			break;
943 		fallthrough;
944 	case TCP_SYN_SENT:
945 		err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
946 		sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
947 		break;
948 	}
949 
950 	/* Wake up anyone sleeping in poll. */
951 	sk->sk_state_change(sk);
952 	release_sock(sk);
953 	return err;
954 }
955 EXPORT_SYMBOL(inet_shutdown);
956 
957 /*
958  *	ioctl() calls you can issue on an INET socket. Most of these are
959  *	device configuration and stuff and very rarely used. Some ioctls
960  *	pass on to the socket itself.
961  *
962  *	NOTE: I like the idea of a module for the config stuff. ie ifconfig
963  *	loads the devconfigure module does its configuring and unloads it.
964  *	There's a good 20K of config code hanging around the kernel.
965  */
966 
967 int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
968 {
969 	struct sock *sk = sock->sk;
970 	int err = 0;
971 	struct net *net = sock_net(sk);
972 	void __user *p = (void __user *)arg;
973 	struct ifreq ifr;
974 	struct rtentry rt;
975 
976 	switch (cmd) {
977 	case SIOCADDRT:
978 	case SIOCDELRT:
979 		if (copy_from_user(&rt, p, sizeof(struct rtentry)))
980 			return -EFAULT;
981 		err = ip_rt_ioctl(net, cmd, &rt);
982 		break;
983 	case SIOCRTMSG:
984 		err = -EINVAL;
985 		break;
986 	case SIOCDARP:
987 	case SIOCGARP:
988 	case SIOCSARP:
989 		err = arp_ioctl(net, cmd, (void __user *)arg);
990 		break;
991 	case SIOCGIFADDR:
992 	case SIOCGIFBRDADDR:
993 	case SIOCGIFNETMASK:
994 	case SIOCGIFDSTADDR:
995 	case SIOCGIFPFLAGS:
996 		if (get_user_ifreq(&ifr, NULL, p))
997 			return -EFAULT;
998 		err = devinet_ioctl(net, cmd, &ifr);
999 		if (!err && put_user_ifreq(&ifr, p))
1000 			err = -EFAULT;
1001 		break;
1002 
1003 	case SIOCSIFADDR:
1004 	case SIOCSIFBRDADDR:
1005 	case SIOCSIFNETMASK:
1006 	case SIOCSIFDSTADDR:
1007 	case SIOCSIFPFLAGS:
1008 	case SIOCSIFFLAGS:
1009 		if (get_user_ifreq(&ifr, NULL, p))
1010 			return -EFAULT;
1011 		err = devinet_ioctl(net, cmd, &ifr);
1012 		break;
1013 	default:
1014 		if (sk->sk_prot->ioctl)
1015 			err = sk_ioctl(sk, cmd, (void __user *)arg);
1016 		else
1017 			err = -ENOIOCTLCMD;
1018 		break;
1019 	}
1020 	return err;
1021 }
1022 EXPORT_SYMBOL(inet_ioctl);
1023 
1024 #ifdef CONFIG_COMPAT
1025 static int inet_compat_routing_ioctl(struct sock *sk, unsigned int cmd,
1026 		struct compat_rtentry __user *ur)
1027 {
1028 	compat_uptr_t rtdev;
1029 	struct rtentry rt;
1030 
1031 	if (copy_from_user(&rt.rt_dst, &ur->rt_dst,
1032 			3 * sizeof(struct sockaddr)) ||
1033 	    get_user(rt.rt_flags, &ur->rt_flags) ||
1034 	    get_user(rt.rt_metric, &ur->rt_metric) ||
1035 	    get_user(rt.rt_mtu, &ur->rt_mtu) ||
1036 	    get_user(rt.rt_window, &ur->rt_window) ||
1037 	    get_user(rt.rt_irtt, &ur->rt_irtt) ||
1038 	    get_user(rtdev, &ur->rt_dev))
1039 		return -EFAULT;
1040 
1041 	rt.rt_dev = compat_ptr(rtdev);
1042 	return ip_rt_ioctl(sock_net(sk), cmd, &rt);
1043 }
1044 
1045 static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1046 {
1047 	void __user *argp = compat_ptr(arg);
1048 	struct sock *sk = sock->sk;
1049 
1050 	switch (cmd) {
1051 	case SIOCADDRT:
1052 	case SIOCDELRT:
1053 		return inet_compat_routing_ioctl(sk, cmd, argp);
1054 	default:
1055 		if (!sk->sk_prot->compat_ioctl)
1056 			return -ENOIOCTLCMD;
1057 		return sk->sk_prot->compat_ioctl(sk, cmd, arg);
1058 	}
1059 }
1060 #endif /* CONFIG_COMPAT */
1061 
1062 const struct proto_ops inet_stream_ops = {
1063 	.family		   = PF_INET,
1064 	.owner		   = THIS_MODULE,
1065 	.release	   = inet_release,
1066 	.bind		   = inet_bind,
1067 	.connect	   = inet_stream_connect,
1068 	.socketpair	   = sock_no_socketpair,
1069 	.accept		   = inet_accept,
1070 	.getname	   = inet_getname,
1071 	.poll		   = tcp_poll,
1072 	.ioctl		   = inet_ioctl,
1073 	.gettstamp	   = sock_gettstamp,
1074 	.listen		   = inet_listen,
1075 	.shutdown	   = inet_shutdown,
1076 	.setsockopt	   = sock_common_setsockopt,
1077 	.getsockopt	   = sock_common_getsockopt,
1078 	.sendmsg	   = inet_sendmsg,
1079 	.recvmsg	   = inet_recvmsg,
1080 #ifdef CONFIG_MMU
1081 	.mmap		   = tcp_mmap,
1082 #endif
1083 	.splice_eof	   = inet_splice_eof,
1084 	.splice_read	   = tcp_splice_read,
1085 	.set_peek_off      = sk_set_peek_off,
1086 	.read_sock	   = tcp_read_sock,
1087 	.read_skb	   = tcp_read_skb,
1088 	.sendmsg_locked    = tcp_sendmsg_locked,
1089 	.peek_len	   = tcp_peek_len,
1090 #ifdef CONFIG_COMPAT
1091 	.compat_ioctl	   = inet_compat_ioctl,
1092 #endif
1093 	.set_rcvlowat	   = tcp_set_rcvlowat,
1094 };
1095 EXPORT_SYMBOL(inet_stream_ops);
1096 
1097 const struct proto_ops inet_dgram_ops = {
1098 	.family		   = PF_INET,
1099 	.owner		   = THIS_MODULE,
1100 	.release	   = inet_release,
1101 	.bind		   = inet_bind,
1102 	.connect	   = inet_dgram_connect,
1103 	.socketpair	   = sock_no_socketpair,
1104 	.accept		   = sock_no_accept,
1105 	.getname	   = inet_getname,
1106 	.poll		   = udp_poll,
1107 	.ioctl		   = inet_ioctl,
1108 	.gettstamp	   = sock_gettstamp,
1109 	.listen		   = sock_no_listen,
1110 	.shutdown	   = inet_shutdown,
1111 	.setsockopt	   = sock_common_setsockopt,
1112 	.getsockopt	   = sock_common_getsockopt,
1113 	.sendmsg	   = inet_sendmsg,
1114 	.read_skb	   = udp_read_skb,
1115 	.recvmsg	   = inet_recvmsg,
1116 	.mmap		   = sock_no_mmap,
1117 	.splice_eof	   = inet_splice_eof,
1118 	.set_peek_off	   = udp_set_peek_off,
1119 #ifdef CONFIG_COMPAT
1120 	.compat_ioctl	   = inet_compat_ioctl,
1121 #endif
1122 };
1123 EXPORT_SYMBOL(inet_dgram_ops);
1124 
1125 /*
1126  * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
1127  * udp_poll
1128  */
1129 static const struct proto_ops inet_sockraw_ops = {
1130 	.family		   = PF_INET,
1131 	.owner		   = THIS_MODULE,
1132 	.release	   = inet_release,
1133 	.bind		   = inet_bind,
1134 	.connect	   = inet_dgram_connect,
1135 	.socketpair	   = sock_no_socketpair,
1136 	.accept		   = sock_no_accept,
1137 	.getname	   = inet_getname,
1138 	.poll		   = datagram_poll,
1139 	.ioctl		   = inet_ioctl,
1140 	.gettstamp	   = sock_gettstamp,
1141 	.listen		   = sock_no_listen,
1142 	.shutdown	   = inet_shutdown,
1143 	.setsockopt	   = sock_common_setsockopt,
1144 	.getsockopt	   = sock_common_getsockopt,
1145 	.sendmsg	   = inet_sendmsg,
1146 	.recvmsg	   = inet_recvmsg,
1147 	.mmap		   = sock_no_mmap,
1148 	.splice_eof	   = inet_splice_eof,
1149 #ifdef CONFIG_COMPAT
1150 	.compat_ioctl	   = inet_compat_ioctl,
1151 #endif
1152 };
1153 
1154 static const struct net_proto_family inet_family_ops = {
1155 	.family = PF_INET,
1156 	.create = inet_create,
1157 	.owner	= THIS_MODULE,
1158 };
1159 
1160 /* Upon startup we insert all the elements in inetsw_array[] into
1161  * the linked list inetsw.
1162  */
1163 static struct inet_protosw inetsw_array[] =
1164 {
1165 	{
1166 		.type =       SOCK_STREAM,
1167 		.protocol =   IPPROTO_TCP,
1168 		.prot =       &tcp_prot,
1169 		.ops =        &inet_stream_ops,
1170 		.flags =      INET_PROTOSW_PERMANENT |
1171 			      INET_PROTOSW_ICSK,
1172 	},
1173 
1174 	{
1175 		.type =       SOCK_DGRAM,
1176 		.protocol =   IPPROTO_UDP,
1177 		.prot =       &udp_prot,
1178 		.ops =        &inet_dgram_ops,
1179 		.flags =      INET_PROTOSW_PERMANENT,
1180        },
1181 
1182        {
1183 		.type =       SOCK_DGRAM,
1184 		.protocol =   IPPROTO_ICMP,
1185 		.prot =       &ping_prot,
1186 		.ops =        &inet_sockraw_ops,
1187 		.flags =      INET_PROTOSW_REUSE,
1188        },
1189 
1190        {
1191 	       .type =       SOCK_RAW,
1192 	       .protocol =   IPPROTO_IP,	/* wild card */
1193 	       .prot =       &raw_prot,
1194 	       .ops =        &inet_sockraw_ops,
1195 	       .flags =      INET_PROTOSW_REUSE,
1196        }
1197 };
1198 
1199 #define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
1200 
1201 void inet_register_protosw(struct inet_protosw *p)
1202 {
1203 	struct list_head *lh;
1204 	struct inet_protosw *answer;
1205 	int protocol = p->protocol;
1206 	struct list_head *last_perm;
1207 
1208 	spin_lock_bh(&inetsw_lock);
1209 
1210 	if (p->type >= SOCK_MAX)
1211 		goto out_illegal;
1212 
1213 	/* If we are trying to override a permanent protocol, bail. */
1214 	last_perm = &inetsw[p->type];
1215 	list_for_each(lh, &inetsw[p->type]) {
1216 		answer = list_entry(lh, struct inet_protosw, list);
1217 		/* Check only the non-wild match. */
1218 		if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
1219 			break;
1220 		if (protocol == answer->protocol)
1221 			goto out_permanent;
1222 		last_perm = lh;
1223 	}
1224 
1225 	/* Add the new entry after the last permanent entry if any, so that
1226 	 * the new entry does not override a permanent entry when matched with
1227 	 * a wild-card protocol. But it is allowed to override any existing
1228 	 * non-permanent entry.  This means that when we remove this entry, the
1229 	 * system automatically returns to the old behavior.
1230 	 */
1231 	list_add_rcu(&p->list, last_perm);
1232 out:
1233 	spin_unlock_bh(&inetsw_lock);
1234 
1235 	return;
1236 
1237 out_permanent:
1238 	pr_err("Attempt to override permanent protocol %d\n", protocol);
1239 	goto out;
1240 
1241 out_illegal:
1242 	pr_err("Ignoring attempt to register invalid socket type %d\n",
1243 	       p->type);
1244 	goto out;
1245 }
1246 EXPORT_SYMBOL(inet_register_protosw);
1247 
1248 void inet_unregister_protosw(struct inet_protosw *p)
1249 {
1250 	if (INET_PROTOSW_PERMANENT & p->flags) {
1251 		pr_err("Attempt to unregister permanent protocol %d\n",
1252 		       p->protocol);
1253 	} else {
1254 		spin_lock_bh(&inetsw_lock);
1255 		list_del_rcu(&p->list);
1256 		spin_unlock_bh(&inetsw_lock);
1257 
1258 		synchronize_net();
1259 	}
1260 }
1261 EXPORT_SYMBOL(inet_unregister_protosw);
1262 
1263 static int inet_sk_reselect_saddr(struct sock *sk)
1264 {
1265 	struct inet_sock *inet = inet_sk(sk);
1266 	__be32 old_saddr = inet->inet_saddr;
1267 	__be32 daddr = inet->inet_daddr;
1268 	struct flowi4 *fl4;
1269 	struct rtable *rt;
1270 	__be32 new_saddr;
1271 	struct ip_options_rcu *inet_opt;
1272 	int err;
1273 
1274 	inet_opt = rcu_dereference_protected(inet->inet_opt,
1275 					     lockdep_sock_is_held(sk));
1276 	if (inet_opt && inet_opt->opt.srr)
1277 		daddr = inet_opt->opt.faddr;
1278 
1279 	/* Query new route. */
1280 	fl4 = &inet->cork.fl.u.ip4;
1281 	rt = ip_route_connect(fl4, daddr, 0, sk->sk_bound_dev_if,
1282 			      sk->sk_protocol, inet->inet_sport,
1283 			      inet->inet_dport, sk);
1284 	if (IS_ERR(rt))
1285 		return PTR_ERR(rt);
1286 
1287 	new_saddr = fl4->saddr;
1288 
1289 	if (new_saddr == old_saddr) {
1290 		sk_setup_caps(sk, &rt->dst);
1291 		return 0;
1292 	}
1293 
1294 	err = inet_bhash2_update_saddr(sk, &new_saddr, AF_INET);
1295 	if (err) {
1296 		ip_rt_put(rt);
1297 		return err;
1298 	}
1299 
1300 	sk_setup_caps(sk, &rt->dst);
1301 
1302 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) {
1303 		pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1304 			__func__, &old_saddr, &new_saddr);
1305 	}
1306 
1307 	/*
1308 	 * XXX The only one ugly spot where we need to
1309 	 * XXX really change the sockets identity after
1310 	 * XXX it has entered the hashes. -DaveM
1311 	 *
1312 	 * Besides that, it does not check for connection
1313 	 * uniqueness. Wait for troubles.
1314 	 */
1315 	return __sk_prot_rehash(sk);
1316 }
1317 
1318 int inet_sk_rebuild_header(struct sock *sk)
1319 {
1320 	struct rtable *rt = dst_rtable(__sk_dst_check(sk, 0));
1321 	struct inet_sock *inet = inet_sk(sk);
1322 	struct flowi4 *fl4;
1323 	int err;
1324 
1325 	/* Route is OK, nothing to do. */
1326 	if (rt)
1327 		return 0;
1328 
1329 	/* Reroute. */
1330 	fl4 = &inet->cork.fl.u.ip4;
1331 	inet_sk_init_flowi4(inet, fl4);
1332 	rt = ip_route_output_flow(sock_net(sk), fl4, sk);
1333 	if (!IS_ERR(rt)) {
1334 		err = 0;
1335 		sk_setup_caps(sk, &rt->dst);
1336 	} else {
1337 		err = PTR_ERR(rt);
1338 
1339 		/* Routing failed... */
1340 		sk->sk_route_caps = 0;
1341 
1342 		if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) ||
1343 		    sk->sk_state != TCP_SYN_SENT ||
1344 		    (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1345 		    (err = inet_sk_reselect_saddr(sk)) != 0)
1346 			WRITE_ONCE(sk->sk_err_soft, -err);
1347 	}
1348 
1349 	return err;
1350 }
1351 EXPORT_SYMBOL(inet_sk_rebuild_header);
1352 
1353 void inet_sk_set_state(struct sock *sk, int state)
1354 {
1355 	trace_inet_sock_set_state(sk, sk->sk_state, state);
1356 	sk->sk_state = state;
1357 }
1358 EXPORT_SYMBOL(inet_sk_set_state);
1359 
1360 void inet_sk_state_store(struct sock *sk, int newstate)
1361 {
1362 	trace_inet_sock_set_state(sk, sk->sk_state, newstate);
1363 	smp_store_release(&sk->sk_state, newstate);
1364 }
1365 
1366 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1367 				 netdev_features_t features)
1368 {
1369 	bool udpfrag = false, fixedid = false, gso_partial, encap;
1370 	struct sk_buff *segs = ERR_PTR(-EINVAL);
1371 	const struct net_offload *ops;
1372 	unsigned int offset = 0;
1373 	struct iphdr *iph;
1374 	int proto, tot_len;
1375 	int nhoff;
1376 	int ihl;
1377 	int id;
1378 
1379 	skb_reset_network_header(skb);
1380 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
1381 	if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1382 		goto out;
1383 
1384 	iph = ip_hdr(skb);
1385 	ihl = iph->ihl * 4;
1386 	if (ihl < sizeof(*iph))
1387 		goto out;
1388 
1389 	id = ntohs(iph->id);
1390 	proto = iph->protocol;
1391 
1392 	/* Warning: after this point, iph might be no longer valid */
1393 	if (unlikely(!pskb_may_pull(skb, ihl)))
1394 		goto out;
1395 	__skb_pull(skb, ihl);
1396 
1397 	encap = SKB_GSO_CB(skb)->encap_level > 0;
1398 	if (encap)
1399 		features &= skb->dev->hw_enc_features;
1400 	SKB_GSO_CB(skb)->encap_level += ihl;
1401 
1402 	skb_reset_transport_header(skb);
1403 
1404 	segs = ERR_PTR(-EPROTONOSUPPORT);
1405 
1406 	fixedid = !!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCP_FIXEDID << encap));
1407 
1408 	if (!skb->encapsulation || encap)
1409 		udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
1410 
1411 	ops = rcu_dereference(inet_offloads[proto]);
1412 	if (likely(ops && ops->callbacks.gso_segment)) {
1413 		segs = ops->callbacks.gso_segment(skb, features);
1414 		if (!segs)
1415 			skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
1416 	}
1417 
1418 	if (IS_ERR_OR_NULL(segs))
1419 		goto out;
1420 
1421 	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
1422 
1423 	skb = segs;
1424 	do {
1425 		iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
1426 		if (udpfrag) {
1427 			iph->frag_off = htons(offset >> 3);
1428 			if (skb->next)
1429 				iph->frag_off |= htons(IP_MF);
1430 			offset += skb->len - nhoff - ihl;
1431 			tot_len = skb->len - nhoff;
1432 		} else if (skb_is_gso(skb)) {
1433 			if (!fixedid) {
1434 				iph->id = htons(id);
1435 				id += skb_shinfo(skb)->gso_segs;
1436 			}
1437 
1438 			if (gso_partial)
1439 				tot_len = skb_shinfo(skb)->gso_size +
1440 					  SKB_GSO_CB(skb)->data_offset +
1441 					  skb->head - (unsigned char *)iph;
1442 			else
1443 				tot_len = skb->len - nhoff;
1444 		} else {
1445 			if (!fixedid)
1446 				iph->id = htons(id++);
1447 			tot_len = skb->len - nhoff;
1448 		}
1449 		iph->tot_len = htons(tot_len);
1450 		ip_send_check(iph);
1451 		if (encap)
1452 			skb_reset_inner_headers(skb);
1453 		skb->network_header = (u8 *)iph - skb->head;
1454 		skb_reset_mac_len(skb);
1455 	} while ((skb = skb->next));
1456 
1457 out:
1458 	return segs;
1459 }
1460 
1461 static struct sk_buff *ipip_gso_segment(struct sk_buff *skb,
1462 					netdev_features_t features)
1463 {
1464 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
1465 		return ERR_PTR(-EINVAL);
1466 
1467 	return inet_gso_segment(skb, features);
1468 }
1469 
1470 struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
1471 {
1472 	const struct net_offload *ops;
1473 	struct sk_buff *pp = NULL;
1474 	const struct iphdr *iph;
1475 	struct sk_buff *p;
1476 	unsigned int hlen;
1477 	unsigned int off;
1478 	int flush = 1;
1479 	int proto;
1480 
1481 	off = skb_gro_offset(skb);
1482 	hlen = off + sizeof(*iph);
1483 	iph = skb_gro_header(skb, hlen, off);
1484 	if (unlikely(!iph))
1485 		goto out;
1486 
1487 	proto = iph->protocol;
1488 
1489 	ops = rcu_dereference(inet_offloads[proto]);
1490 	if (!ops || !ops->callbacks.gro_receive)
1491 		goto out;
1492 
1493 	if (*(u8 *)iph != 0x45)
1494 		goto out;
1495 
1496 	if (ip_is_fragment(iph))
1497 		goto out;
1498 
1499 	if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1500 		goto out;
1501 
1502 	NAPI_GRO_CB(skb)->proto = proto;
1503 	flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (ntohl(*(__be32 *)&iph->id) & ~IP_DF));
1504 
1505 	list_for_each_entry(p, head, list) {
1506 		struct iphdr *iph2;
1507 
1508 		if (!NAPI_GRO_CB(p)->same_flow)
1509 			continue;
1510 
1511 		iph2 = (struct iphdr *)(p->data + off);
1512 		/* The above works because, with the exception of the top
1513 		 * (inner most) layer, we only aggregate pkts with the same
1514 		 * hdr length so all the hdrs we'll need to verify will start
1515 		 * at the same offset.
1516 		 */
1517 		if ((iph->protocol ^ iph2->protocol) |
1518 		    ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1519 		    ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1520 			NAPI_GRO_CB(p)->same_flow = 0;
1521 			continue;
1522 		}
1523 	}
1524 
1525 	NAPI_GRO_CB(skb)->flush |= flush;
1526 	NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
1527 
1528 	/* Note : No need to call skb_gro_postpull_rcsum() here,
1529 	 * as we already checked checksum over ipv4 header was 0
1530 	 */
1531 	skb_gro_pull(skb, sizeof(*iph));
1532 	skb_set_transport_header(skb, skb_gro_offset(skb));
1533 
1534 	pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
1535 				       ops->callbacks.gro_receive, head, skb);
1536 
1537 out:
1538 	skb_gro_flush_final(skb, pp, flush);
1539 
1540 	return pp;
1541 }
1542 
1543 static struct sk_buff *ipip_gro_receive(struct list_head *head,
1544 					struct sk_buff *skb)
1545 {
1546 	if (NAPI_GRO_CB(skb)->encap_mark) {
1547 		NAPI_GRO_CB(skb)->flush = 1;
1548 		return NULL;
1549 	}
1550 
1551 	NAPI_GRO_CB(skb)->encap_mark = 1;
1552 
1553 	return inet_gro_receive(head, skb);
1554 }
1555 
1556 #define SECONDS_PER_DAY	86400
1557 
1558 /* inet_current_timestamp - Return IP network timestamp
1559  *
1560  * Return milliseconds since midnight in network byte order.
1561  */
1562 __be32 inet_current_timestamp(void)
1563 {
1564 	u32 secs;
1565 	u32 msecs;
1566 	struct timespec64 ts;
1567 
1568 	ktime_get_real_ts64(&ts);
1569 
1570 	/* Get secs since midnight. */
1571 	(void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
1572 	/* Convert to msecs. */
1573 	msecs = secs * MSEC_PER_SEC;
1574 	/* Convert nsec to msec. */
1575 	msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
1576 
1577 	/* Convert to network byte order. */
1578 	return htonl(msecs);
1579 }
1580 EXPORT_SYMBOL(inet_current_timestamp);
1581 
1582 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len)
1583 {
1584 	unsigned int family = READ_ONCE(sk->sk_family);
1585 
1586 	if (family == AF_INET)
1587 		return ip_recv_error(sk, msg, len);
1588 #if IS_ENABLED(CONFIG_IPV6)
1589 	if (family == AF_INET6)
1590 		return pingv6_ops.ipv6_recv_error(sk, msg, len);
1591 #endif
1592 	return -EINVAL;
1593 }
1594 EXPORT_SYMBOL(inet_recv_error);
1595 
1596 int inet_gro_complete(struct sk_buff *skb, int nhoff)
1597 {
1598 	struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
1599 	const struct net_offload *ops;
1600 	__be16 totlen = iph->tot_len;
1601 	int proto = iph->protocol;
1602 	int err = -ENOSYS;
1603 
1604 	if (skb->encapsulation) {
1605 		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1606 		skb_set_inner_network_header(skb, nhoff);
1607 	}
1608 
1609 	iph_set_totlen(iph, skb->len - nhoff);
1610 	csum_replace2(&iph->check, totlen, iph->tot_len);
1611 
1612 	ops = rcu_dereference(inet_offloads[proto]);
1613 	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1614 		goto out;
1615 
1616 	/* Only need to add sizeof(*iph) to get to the next hdr below
1617 	 * because any hdr with option will have been flushed in
1618 	 * inet_gro_receive().
1619 	 */
1620 	err = INDIRECT_CALL_2(ops->callbacks.gro_complete,
1621 			      tcp4_gro_complete, udp4_gro_complete,
1622 			      skb, nhoff + sizeof(*iph));
1623 
1624 out:
1625 	return err;
1626 }
1627 
1628 static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
1629 {
1630 	skb->encapsulation = 1;
1631 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
1632 	return inet_gro_complete(skb, nhoff);
1633 }
1634 
1635 int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1636 			 unsigned short type, unsigned char protocol,
1637 			 struct net *net)
1638 {
1639 	struct socket *sock;
1640 	int rc = sock_create_kern(net, family, type, protocol, &sock);
1641 
1642 	if (rc == 0) {
1643 		*sk = sock->sk;
1644 		(*sk)->sk_allocation = GFP_ATOMIC;
1645 		(*sk)->sk_use_task_frag = false;
1646 		/*
1647 		 * Unhash it so that IP input processing does not even see it,
1648 		 * we do not wish this socket to see incoming packets.
1649 		 */
1650 		(*sk)->sk_prot->unhash(*sk);
1651 	}
1652 	return rc;
1653 }
1654 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1655 
1656 unsigned long snmp_fold_field(void __percpu *mib, int offt)
1657 {
1658 	unsigned long res = 0;
1659 	int i;
1660 
1661 	for_each_possible_cpu(i)
1662 		res += snmp_get_cpu_field(mib, i, offt);
1663 	return res;
1664 }
1665 EXPORT_SYMBOL_GPL(snmp_fold_field);
1666 
1667 #if BITS_PER_LONG==32
1668 
1669 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
1670 			 size_t syncp_offset)
1671 {
1672 	void *bhptr;
1673 	struct u64_stats_sync *syncp;
1674 	u64 v;
1675 	unsigned int start;
1676 
1677 	bhptr = per_cpu_ptr(mib, cpu);
1678 	syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1679 	do {
1680 		start = u64_stats_fetch_begin(syncp);
1681 		v = *(((u64 *)bhptr) + offt);
1682 	} while (u64_stats_fetch_retry(syncp, start));
1683 
1684 	return v;
1685 }
1686 EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
1687 
1688 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1689 {
1690 	u64 res = 0;
1691 	int cpu;
1692 
1693 	for_each_possible_cpu(cpu) {
1694 		res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
1695 	}
1696 	return res;
1697 }
1698 EXPORT_SYMBOL_GPL(snmp_fold_field64);
1699 #endif
1700 
1701 #ifdef CONFIG_IP_MULTICAST
1702 static const struct net_protocol igmp_protocol = {
1703 	.handler =	igmp_rcv,
1704 };
1705 #endif
1706 
1707 static const struct net_protocol icmp_protocol = {
1708 	.handler =	icmp_rcv,
1709 	.err_handler =	icmp_err,
1710 	.no_policy =	1,
1711 };
1712 
1713 static __net_init int ipv4_mib_init_net(struct net *net)
1714 {
1715 	int i;
1716 
1717 	net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1718 	if (!net->mib.tcp_statistics)
1719 		goto err_tcp_mib;
1720 	net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1721 	if (!net->mib.ip_statistics)
1722 		goto err_ip_mib;
1723 
1724 	for_each_possible_cpu(i) {
1725 		struct ipstats_mib *af_inet_stats;
1726 		af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1727 		u64_stats_init(&af_inet_stats->syncp);
1728 	}
1729 
1730 	net->mib.net_statistics = alloc_percpu(struct linux_mib);
1731 	if (!net->mib.net_statistics)
1732 		goto err_net_mib;
1733 	net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1734 	if (!net->mib.udp_statistics)
1735 		goto err_udp_mib;
1736 	net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1737 	if (!net->mib.icmp_statistics)
1738 		goto err_icmp_mib;
1739 	net->mib.icmpmsg_statistics = kzalloc_obj(struct icmpmsg_mib);
1740 	if (!net->mib.icmpmsg_statistics)
1741 		goto err_icmpmsg_mib;
1742 
1743 	tcp_mib_init(net);
1744 	return 0;
1745 
1746 err_icmpmsg_mib:
1747 	free_percpu(net->mib.icmp_statistics);
1748 err_icmp_mib:
1749 	free_percpu(net->mib.udp_statistics);
1750 err_udp_mib:
1751 	free_percpu(net->mib.net_statistics);
1752 err_net_mib:
1753 	free_percpu(net->mib.ip_statistics);
1754 err_ip_mib:
1755 	free_percpu(net->mib.tcp_statistics);
1756 err_tcp_mib:
1757 	return -ENOMEM;
1758 }
1759 
1760 static __net_exit void ipv4_mib_exit_net(struct net *net)
1761 {
1762 	kfree(net->mib.icmpmsg_statistics);
1763 	free_percpu(net->mib.icmp_statistics);
1764 	free_percpu(net->mib.udp_statistics);
1765 	free_percpu(net->mib.net_statistics);
1766 	free_percpu(net->mib.ip_statistics);
1767 	free_percpu(net->mib.tcp_statistics);
1768 #ifdef CONFIG_MPTCP
1769 	/* allocated on demand, see mptcp_init_sock() */
1770 	free_percpu(net->mib.mptcp_statistics);
1771 #endif
1772 }
1773 
1774 static __net_initdata struct pernet_operations ipv4_mib_ops = {
1775 	.init = ipv4_mib_init_net,
1776 	.exit = ipv4_mib_exit_net,
1777 };
1778 
1779 static int __init init_ipv4_mibs(void)
1780 {
1781 	return register_pernet_subsys(&ipv4_mib_ops);
1782 }
1783 
1784 static __net_init int inet_init_net(struct net *net)
1785 {
1786 	/*
1787 	 * Set defaults for local port range
1788 	 */
1789 	net->ipv4.ip_local_ports.range = 60999u << 16 | 32768u;
1790 
1791 	seqlock_init(&net->ipv4.ping_group_range.lock);
1792 	/*
1793 	 * Sane defaults - nobody may create ping sockets.
1794 	 * Boot scripts should set this to distro-specific group.
1795 	 */
1796 	net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1797 	net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1798 
1799 	/* Default values for sysctl-controlled parameters.
1800 	 * We set them here, in case sysctl is not compiled.
1801 	 */
1802 	net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1803 	net->ipv4.sysctl_ip_fwd_update_priority = 1;
1804 	net->ipv4.sysctl_ip_dynaddr = 0;
1805 	net->ipv4.sysctl_ip_early_demux = 1;
1806 	net->ipv4.sysctl_udp_early_demux = 1;
1807 	net->ipv4.sysctl_tcp_early_demux = 1;
1808 	net->ipv4.sysctl_nexthop_compat_mode = 1;
1809 #ifdef CONFIG_SYSCTL
1810 	net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
1811 #endif
1812 
1813 	/* Some igmp sysctl, whose values are always used */
1814 	net->ipv4.sysctl_igmp_max_memberships = 20;
1815 	net->ipv4.sysctl_igmp_max_msf = 10;
1816 	/* IGMP reports for link-local multicast groups are enabled by default */
1817 	net->ipv4.sysctl_igmp_llm_reports = 1;
1818 	net->ipv4.sysctl_igmp_qrv = 2;
1819 
1820 	net->ipv4.sysctl_fib_notify_on_flag_change = 0;
1821 
1822 	return 0;
1823 }
1824 
1825 static __net_initdata struct pernet_operations af_inet_ops = {
1826 	.init = inet_init_net,
1827 };
1828 
1829 static int __init init_inet_pernet_ops(void)
1830 {
1831 	return register_pernet_subsys(&af_inet_ops);
1832 }
1833 
1834 static int ipv4_proc_init(void);
1835 
1836 /*
1837  *	IP protocol layer initialiser
1838  */
1839 
1840 
1841 static const struct net_offload ipip_offload = {
1842 	.callbacks = {
1843 		.gso_segment	= ipip_gso_segment,
1844 		.gro_receive	= ipip_gro_receive,
1845 		.gro_complete	= ipip_gro_complete,
1846 	},
1847 };
1848 
1849 static int __init ipip_offload_init(void)
1850 {
1851 	return inet_add_offload(&ipip_offload, IPPROTO_IPIP);
1852 }
1853 
1854 static int __init ipv4_offload_init(void)
1855 {
1856 	/*
1857 	 * Add offloads
1858 	 */
1859 	if (udpv4_offload_init() < 0)
1860 		pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1861 	if (tcpv4_offload_init() < 0)
1862 		pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
1863 	if (ipip_offload_init() < 0)
1864 		pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
1865 
1866 	net_hotdata.ip_packet_offload = (struct packet_offload) {
1867 		.type = cpu_to_be16(ETH_P_IP),
1868 		.callbacks = {
1869 			.gso_segment = inet_gso_segment,
1870 			.gro_receive = inet_gro_receive,
1871 			.gro_complete = inet_gro_complete,
1872 		},
1873 	};
1874 	dev_add_offload(&net_hotdata.ip_packet_offload);
1875 	return 0;
1876 }
1877 
1878 fs_initcall(ipv4_offload_init);
1879 
1880 static struct packet_type ip_packet_type __read_mostly = {
1881 	.type = cpu_to_be16(ETH_P_IP),
1882 	.func = ip_rcv,
1883 	.list_func = ip_list_rcv,
1884 };
1885 
1886 static int __init inet_init(void)
1887 {
1888 	struct inet_protosw *q;
1889 	struct list_head *r;
1890 	int rc;
1891 
1892 	sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
1893 
1894 	raw_hashinfo_init(&raw_v4_hashinfo);
1895 
1896 	rc = proto_register(&tcp_prot, 1);
1897 	if (rc)
1898 		goto out;
1899 
1900 	rc = proto_register(&udp_prot, 1);
1901 	if (rc)
1902 		goto out_unregister_tcp_proto;
1903 
1904 	rc = proto_register(&raw_prot, 1);
1905 	if (rc)
1906 		goto out_unregister_udp_proto;
1907 
1908 	rc = proto_register(&ping_prot, 1);
1909 	if (rc)
1910 		goto out_unregister_raw_proto;
1911 
1912 	/*
1913 	 *	Tell SOCKET that we are alive...
1914 	 */
1915 
1916 	(void)sock_register(&inet_family_ops);
1917 
1918 #ifdef CONFIG_SYSCTL
1919 	ip_static_sysctl_init();
1920 #endif
1921 
1922 	/*
1923 	 *	Add all the base protocols.
1924 	 */
1925 
1926 	if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1927 		pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1928 
1929 	net_hotdata.udp_protocol = (struct net_protocol) {
1930 		.handler =	udp_rcv,
1931 		.err_handler =	udp_err,
1932 		.no_policy =	1,
1933 	};
1934 	if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0)
1935 		pr_crit("%s: Cannot add UDP protocol\n", __func__);
1936 
1937 	net_hotdata.tcp_protocol = (struct net_protocol) {
1938 		.handler	=	tcp_v4_rcv,
1939 		.err_handler	=	tcp_v4_err,
1940 		.no_policy	=	1,
1941 		.icmp_strict_tag_validation = 1,
1942 	};
1943 	if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0)
1944 		pr_crit("%s: Cannot add TCP protocol\n", __func__);
1945 #ifdef CONFIG_IP_MULTICAST
1946 	if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1947 		pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1948 #endif
1949 
1950 	/* Register the socket-side information for inet_create. */
1951 	for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
1952 		INIT_LIST_HEAD(r);
1953 
1954 	for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
1955 		inet_register_protosw(q);
1956 
1957 	/*
1958 	 *	Set the ARP module up
1959 	 */
1960 
1961 	arp_init();
1962 
1963 	/*
1964 	 *	Set the IP module up
1965 	 */
1966 
1967 	ip_init();
1968 
1969 	/* Initialise per-cpu ipv4 mibs */
1970 	if (init_ipv4_mibs())
1971 		panic("%s: Cannot init ipv4 mibs\n", __func__);
1972 
1973 	/* Setup TCP slab cache for open requests. */
1974 	tcp_init();
1975 
1976 	/* Setup UDP memory threshold */
1977 	udp_init();
1978 
1979 	raw_init();
1980 
1981 	ping_init();
1982 
1983 	/*
1984 	 *	Set the ICMP layer up
1985 	 */
1986 
1987 	if (icmp_init() < 0)
1988 		panic("Failed to create the ICMP control socket.\n");
1989 
1990 	/*
1991 	 *	Initialise the multicast router
1992 	 */
1993 #if defined(CONFIG_IP_MROUTE)
1994 	if (ip_mr_init())
1995 		pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
1996 #endif
1997 
1998 	if (init_inet_pernet_ops())
1999 		pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
2000 
2001 	ipv4_proc_init();
2002 
2003 	ipfrag_init();
2004 
2005 	dev_add_pack(&ip_packet_type);
2006 
2007 	ip_tunnel_core_init();
2008 
2009 	rc = 0;
2010 out:
2011 	return rc;
2012 out_unregister_raw_proto:
2013 	proto_unregister(&raw_prot);
2014 out_unregister_udp_proto:
2015 	proto_unregister(&udp_prot);
2016 out_unregister_tcp_proto:
2017 	proto_unregister(&tcp_prot);
2018 	goto out;
2019 }
2020 
2021 fs_initcall(inet_init);
2022 
2023 /* ------------------------------------------------------------------------ */
2024 
2025 #ifdef CONFIG_PROC_FS
2026 static int __init ipv4_proc_init(void)
2027 {
2028 	int rc = 0;
2029 
2030 	if (raw_proc_init())
2031 		goto out_raw;
2032 	if (tcp4_proc_init())
2033 		goto out_tcp;
2034 	if (udp4_proc_init())
2035 		goto out_udp;
2036 	if (ping_proc_init())
2037 		goto out_ping;
2038 	if (ip_misc_proc_init())
2039 		goto out_misc;
2040 out:
2041 	return rc;
2042 out_misc:
2043 	ping_proc_exit();
2044 out_ping:
2045 	udp4_proc_exit();
2046 out_udp:
2047 	tcp4_proc_exit();
2048 out_tcp:
2049 	raw_proc_exit();
2050 out_raw:
2051 	rc = -ENOMEM;
2052 	goto out;
2053 }
2054 
2055 #else /* CONFIG_PROC_FS */
2056 static int __init ipv4_proc_init(void)
2057 {
2058 	return 0;
2059 }
2060 #endif /* CONFIG_PROC_FS */
2061