1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * PF_INET protocol family socket handler. 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Florian La Roche, <flla@stud.uni-sb.de> 12 * Alan Cox, <A.Cox@swansea.ac.uk> 13 * 14 * Changes (see also sock.c) 15 * 16 * piggy, 17 * Karl Knutson : Socket protocol table 18 * A.N.Kuznetsov : Socket death error in accept(). 19 * John Richardson : Fix non blocking error in connect() 20 * so sockets that fail to connect 21 * don't return -EINPROGRESS. 22 * Alan Cox : Asynchronous I/O support 23 * Alan Cox : Keep correct socket pointer on sock 24 * structures 25 * when accept() ed 26 * Alan Cox : Semantics of SO_LINGER aren't state 27 * moved to close when you look carefully. 28 * With this fixed and the accept bug fixed 29 * some RPC stuff seems happier. 30 * Niibe Yutaka : 4.4BSD style write async I/O 31 * Alan Cox, 32 * Tony Gale : Fixed reuse semantics. 33 * Alan Cox : bind() shouldn't abort existing but dead 34 * sockets. Stops FTP netin:.. I hope. 35 * Alan Cox : bind() works correctly for RAW sockets. 36 * Note that FreeBSD at least was broken 37 * in this respect so be careful with 38 * compatibility tests... 39 * Alan Cox : routing cache support 40 * Alan Cox : memzero the socket structure for 41 * compactness. 42 * Matt Day : nonblock connect error handler 43 * Alan Cox : Allow large numbers of pending sockets 44 * (eg for big web sites), but only if 45 * specifically application requested. 46 * Alan Cox : New buffering throughout IP. Used 47 * dumbly. 48 * Alan Cox : New buffering now used smartly. 49 * Alan Cox : BSD rather than common sense 50 * interpretation of listen. 51 * Germano Caronni : Assorted small races. 52 * Alan Cox : sendmsg/recvmsg basic support. 53 * Alan Cox : Only sendmsg/recvmsg now supported. 54 * Alan Cox : Locked down bind (see security list). 55 * Alan Cox : Loosened bind a little. 56 * Mike McLagan : ADD/DEL DLCI Ioctls 57 * Willy Konynenberg : Transparent proxying support. 58 * David S. Miller : New socket lookup architecture. 59 * Some other random speedups. 60 * Cyrus Durgin : Cleaned up file for kmod hacks. 61 * Andi Kleen : Fix inet_stream_connect TCP race. 62 */ 63 64 #define pr_fmt(fmt) "IPv4: " fmt 65 66 #include <linux/err.h> 67 #include <linux/errno.h> 68 #include <linux/types.h> 69 #include <linux/socket.h> 70 #include <linux/in.h> 71 #include <linux/kernel.h> 72 #include <linux/kmod.h> 73 #include <linux/sched.h> 74 #include <linux/timer.h> 75 #include <linux/string.h> 76 #include <linux/sockios.h> 77 #include <linux/net.h> 78 #include <linux/capability.h> 79 #include <linux/fcntl.h> 80 #include <linux/mm.h> 81 #include <linux/interrupt.h> 82 #include <linux/stat.h> 83 #include <linux/init.h> 84 #include <linux/poll.h> 85 #include <linux/netfilter_ipv4.h> 86 #include <linux/random.h> 87 #include <linux/slab.h> 88 89 #include <linux/uaccess.h> 90 91 #include <linux/inet.h> 92 #include <linux/igmp.h> 93 #include <linux/inetdevice.h> 94 #include <linux/netdevice.h> 95 #include <net/checksum.h> 96 #include <net/ip.h> 97 #include <net/protocol.h> 98 #include <net/arp.h> 99 #include <net/route.h> 100 #include <net/ip_fib.h> 101 #include <net/inet_connection_sock.h> 102 #include <net/gro.h> 103 #include <net/gso.h> 104 #include <net/tcp.h> 105 #include <net/psp.h> 106 #include <net/udp.h> 107 #include <net/udplite.h> 108 #include <net/ping.h> 109 #include <linux/skbuff.h> 110 #include <net/sock.h> 111 #include <net/raw.h> 112 #include <net/icmp.h> 113 #include <net/inet_common.h> 114 #include <net/ip_tunnels.h> 115 #include <net/xfrm.h> 116 #include <net/net_namespace.h> 117 #include <net/secure_seq.h> 118 #ifdef CONFIG_IP_MROUTE 119 #include <linux/mroute.h> 120 #endif 121 #include <net/l3mdev.h> 122 #include <net/compat.h> 123 #include <net/rps.h> 124 125 #include <trace/events/sock.h> 126 127 /* Keep the definition of IPv6 disable here for now, to avoid annoying linker 128 * issues in case IPv6=m 129 */ 130 int disable_ipv6_mod; 131 EXPORT_SYMBOL(disable_ipv6_mod); 132 133 /* The inetsw table contains everything that inet_create needs to 134 * build a new socket. 135 */ 136 static struct list_head inetsw[SOCK_MAX]; 137 static DEFINE_SPINLOCK(inetsw_lock); 138 139 /* New destruction routine */ 140 141 void inet_sock_destruct(struct sock *sk) 142 { 143 struct inet_sock *inet = inet_sk(sk); 144 145 __skb_queue_purge(&sk->sk_receive_queue); 146 __skb_queue_purge(&sk->sk_error_queue); 147 148 sk_mem_reclaim_final(sk); 149 150 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) { 151 pr_err("Attempt to release TCP socket in state %d %p\n", 152 sk->sk_state, sk); 153 return; 154 } 155 if (!sock_flag(sk, SOCK_DEAD)) { 156 pr_err("Attempt to release alive inet socket %p\n", sk); 157 return; 158 } 159 160 WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc)); 161 WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc)); 162 WARN_ON_ONCE(sk->sk_wmem_queued); 163 WARN_ON_ONCE(sk->sk_forward_alloc); 164 165 kfree(rcu_dereference_protected(inet->inet_opt, 1)); 166 dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); 167 dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1)); 168 psp_sk_assoc_free(sk); 169 } 170 EXPORT_SYMBOL(inet_sock_destruct); 171 172 /* 173 * The routines beyond this point handle the behaviour of an AF_INET 174 * socket object. Mostly it punts to the subprotocols of IP to do 175 * the work. 176 */ 177 178 /* 179 * Automatically bind an unbound socket. 180 */ 181 182 static int inet_autobind(struct sock *sk) 183 { 184 struct inet_sock *inet; 185 /* We may need to bind the socket. */ 186 lock_sock(sk); 187 inet = inet_sk(sk); 188 if (!inet->inet_num) { 189 if (sk->sk_prot->get_port(sk, 0)) { 190 release_sock(sk); 191 return -EAGAIN; 192 } 193 inet->inet_sport = htons(inet->inet_num); 194 } 195 release_sock(sk); 196 return 0; 197 } 198 199 int __inet_listen_sk(struct sock *sk, int backlog) 200 { 201 unsigned char old_state = sk->sk_state; 202 int err, tcp_fastopen; 203 204 if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN))) 205 return -EINVAL; 206 207 WRITE_ONCE(sk->sk_max_ack_backlog, backlog); 208 /* Really, if the socket is already in listen state 209 * we can only allow the backlog to be adjusted. 210 */ 211 if (old_state != TCP_LISTEN) { 212 /* Enable TFO w/o requiring TCP_FASTOPEN socket option. 213 * Note that only TCP sockets (SOCK_STREAM) will reach here. 214 * Also fastopen backlog may already been set via the option 215 * because the socket was in TCP_LISTEN state previously but 216 * was shutdown() rather than close(). 217 */ 218 tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); 219 if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) && 220 (tcp_fastopen & TFO_SERVER_ENABLE) && 221 !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) { 222 fastopen_queue_tune(sk, backlog); 223 tcp_fastopen_init_key_once(sock_net(sk)); 224 } 225 226 err = inet_csk_listen_start(sk); 227 if (err) 228 return err; 229 230 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL); 231 } 232 return 0; 233 } 234 235 /* 236 * Move a socket into listening state. 237 */ 238 int inet_listen(struct socket *sock, int backlog) 239 { 240 struct sock *sk = sock->sk; 241 int err = -EINVAL; 242 243 lock_sock(sk); 244 245 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM) 246 goto out; 247 248 err = __inet_listen_sk(sk, backlog); 249 250 out: 251 release_sock(sk); 252 return err; 253 } 254 EXPORT_SYMBOL(inet_listen); 255 256 /* 257 * Create an inet socket. 258 */ 259 260 static int inet_create(struct net *net, struct socket *sock, int protocol, 261 int kern) 262 { 263 struct sock *sk; 264 struct inet_protosw *answer; 265 struct inet_sock *inet; 266 struct proto *answer_prot; 267 unsigned char answer_flags; 268 int try_loading_module = 0; 269 int err; 270 271 if (protocol < 0 || protocol >= IPPROTO_MAX) 272 return -EINVAL; 273 274 sock->state = SS_UNCONNECTED; 275 276 /* Look for the requested type/protocol pair. */ 277 lookup_protocol: 278 err = -ESOCKTNOSUPPORT; 279 rcu_read_lock(); 280 list_for_each_entry_rcu(answer, &inetsw[sock->type], list) { 281 282 err = 0; 283 /* Check the non-wild match. */ 284 if (protocol == answer->protocol) { 285 if (protocol != IPPROTO_IP) 286 break; 287 } else { 288 /* Check for the two wild cases. */ 289 if (IPPROTO_IP == protocol) { 290 protocol = answer->protocol; 291 break; 292 } 293 if (IPPROTO_IP == answer->protocol) 294 break; 295 } 296 err = -EPROTONOSUPPORT; 297 } 298 299 if (unlikely(err)) { 300 if (try_loading_module < 2) { 301 rcu_read_unlock(); 302 /* 303 * Be more specific, e.g. net-pf-2-proto-132-type-1 304 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM) 305 */ 306 if (++try_loading_module == 1) 307 request_module("net-pf-%d-proto-%d-type-%d", 308 PF_INET, protocol, sock->type); 309 /* 310 * Fall back to generic, e.g. net-pf-2-proto-132 311 * (net-pf-PF_INET-proto-IPPROTO_SCTP) 312 */ 313 else 314 request_module("net-pf-%d-proto-%d", 315 PF_INET, protocol); 316 goto lookup_protocol; 317 } else 318 goto out_rcu_unlock; 319 } 320 321 err = -EPERM; 322 if (sock->type == SOCK_RAW && !kern && 323 !ns_capable(net->user_ns, CAP_NET_RAW)) 324 goto out_rcu_unlock; 325 326 sock->ops = answer->ops; 327 answer_prot = answer->prot; 328 answer_flags = answer->flags; 329 rcu_read_unlock(); 330 331 WARN_ON(!answer_prot->slab); 332 333 err = -ENOMEM; 334 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern); 335 if (!sk) 336 goto out; 337 338 err = 0; 339 if (INET_PROTOSW_REUSE & answer_flags) 340 sk->sk_reuse = SK_CAN_REUSE; 341 342 if (INET_PROTOSW_ICSK & answer_flags) 343 inet_init_csk_locks(sk); 344 345 inet = inet_sk(sk); 346 inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags); 347 348 inet_clear_bit(NODEFRAG, sk); 349 350 if (SOCK_RAW == sock->type) { 351 inet->inet_num = protocol; 352 if (IPPROTO_RAW == protocol) 353 inet_set_bit(HDRINCL, sk); 354 } 355 356 if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) 357 inet->pmtudisc = IP_PMTUDISC_DONT; 358 else 359 inet->pmtudisc = IP_PMTUDISC_WANT; 360 361 atomic_set(&inet->inet_id, 0); 362 363 sock_init_data(sock, sk); 364 365 sk->sk_destruct = inet_sock_destruct; 366 sk->sk_protocol = protocol; 367 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 368 sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash); 369 370 inet->uc_ttl = -1; 371 inet_set_bit(MC_LOOP, sk); 372 inet->mc_ttl = 1; 373 inet_set_bit(MC_ALL, sk); 374 inet->mc_index = 0; 375 inet->mc_list = NULL; 376 inet->rcv_tos = 0; 377 378 if (inet->inet_num) { 379 /* It assumes that any protocol which allows 380 * the user to assign a number at socket 381 * creation time automatically 382 * shares. 383 */ 384 inet->inet_sport = htons(inet->inet_num); 385 /* Add to protocol hash chains. */ 386 err = sk->sk_prot->hash(sk); 387 if (err) 388 goto out_sk_release; 389 } 390 391 if (sk->sk_prot->init) { 392 err = sk->sk_prot->init(sk); 393 if (err) 394 goto out_sk_release; 395 } 396 397 if (!kern) { 398 err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk); 399 if (err) 400 goto out_sk_release; 401 } 402 out: 403 return err; 404 out_rcu_unlock: 405 rcu_read_unlock(); 406 goto out; 407 out_sk_release: 408 sk_common_release(sk); 409 sock->sk = NULL; 410 goto out; 411 } 412 413 414 /* 415 * The peer socket should always be NULL (or else). When we call this 416 * function we are destroying the object and from then on nobody 417 * should refer to it. 418 */ 419 int inet_release(struct socket *sock) 420 { 421 struct sock *sk = sock->sk; 422 423 if (sk) { 424 long timeout; 425 426 if (!sk->sk_kern_sock) 427 BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk); 428 429 /* Applications forget to leave groups before exiting */ 430 ip_mc_drop_socket(sk); 431 432 /* If linger is set, we don't return until the close 433 * is complete. Otherwise we return immediately. The 434 * actually closing is done the same either way. 435 * 436 * If the close is due to the process exiting, we never 437 * linger.. 438 */ 439 timeout = 0; 440 if (sock_flag(sk, SOCK_LINGER) && 441 !(current->flags & PF_EXITING)) 442 timeout = sk->sk_lingertime; 443 sk->sk_prot->close(sk, timeout); 444 sock->sk = NULL; 445 } 446 return 0; 447 } 448 EXPORT_SYMBOL(inet_release); 449 450 int inet_bind_sk(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) 451 { 452 u32 flags = BIND_WITH_LOCK; 453 int err; 454 455 /* If the socket has its own bind function then use it. (RAW) */ 456 if (sk->sk_prot->bind) { 457 return sk->sk_prot->bind(sk, uaddr, addr_len); 458 } 459 if (addr_len < sizeof(struct sockaddr_in)) 460 return -EINVAL; 461 462 /* BPF prog is run before any checks are done so that if the prog 463 * changes context in a wrong way it will be caught. 464 */ 465 err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, &addr_len, 466 CGROUP_INET4_BIND, &flags); 467 if (err) 468 return err; 469 470 return __inet_bind(sk, uaddr, addr_len, flags); 471 } 472 473 int inet_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) 474 { 475 return inet_bind_sk(sock->sk, uaddr, addr_len); 476 } 477 EXPORT_SYMBOL(inet_bind); 478 479 int __inet_bind(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len, 480 u32 flags) 481 { 482 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; 483 struct inet_sock *inet = inet_sk(sk); 484 struct net *net = sock_net(sk); 485 unsigned short snum; 486 int chk_addr_ret; 487 u32 tb_id = RT_TABLE_LOCAL; 488 int err; 489 490 if (addr->sin_family != AF_INET) { 491 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET) 492 * only if s_addr is INADDR_ANY. 493 */ 494 err = -EAFNOSUPPORT; 495 if (addr->sin_family != AF_UNSPEC || 496 addr->sin_addr.s_addr != htonl(INADDR_ANY)) 497 goto out; 498 } 499 500 tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id; 501 chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id); 502 503 /* Not specified by any standard per-se, however it breaks too 504 * many applications when removed. It is unfortunate since 505 * allowing applications to make a non-local bind solves 506 * several problems with systems using dynamic addressing. 507 * (ie. your servers still start up even if your ISDN link 508 * is temporarily down) 509 */ 510 err = -EADDRNOTAVAIL; 511 if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr, 512 chk_addr_ret)) 513 goto out; 514 515 snum = ntohs(addr->sin_port); 516 err = -EACCES; 517 if (!(flags & BIND_NO_CAP_NET_BIND_SERVICE) && 518 snum && inet_port_requires_bind_service(net, snum) && 519 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) 520 goto out; 521 522 /* We keep a pair of addresses. rcv_saddr is the one 523 * used by hash lookups, and saddr is used for transmit. 524 * 525 * In the BSD API these are the same except where it 526 * would be illegal to use them (multicast/broadcast) in 527 * which case the sending device address is used. 528 */ 529 if (flags & BIND_WITH_LOCK) 530 lock_sock(sk); 531 532 /* Check these errors (active socket, double bind). */ 533 err = -EINVAL; 534 if (sk->sk_state != TCP_CLOSE || inet->inet_num) 535 goto out_release_sock; 536 537 inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; 538 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) 539 inet->inet_saddr = 0; /* Use device */ 540 541 /* Make sure we are allowed to bind here. */ 542 if (snum || !(inet_test_bit(BIND_ADDRESS_NO_PORT, sk) || 543 (flags & BIND_FORCE_ADDRESS_NO_PORT))) { 544 err = sk->sk_prot->get_port(sk, snum); 545 if (err) { 546 inet->inet_saddr = inet->inet_rcv_saddr = 0; 547 goto out_release_sock; 548 } 549 if (!(flags & BIND_FROM_BPF)) { 550 err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk); 551 if (err) { 552 inet->inet_saddr = inet->inet_rcv_saddr = 0; 553 if (sk->sk_prot->put_port) 554 sk->sk_prot->put_port(sk); 555 goto out_release_sock; 556 } 557 } 558 } 559 560 if (inet->inet_rcv_saddr) 561 sk->sk_userlocks |= SOCK_BINDADDR_LOCK; 562 if (snum) 563 sk->sk_userlocks |= SOCK_BINDPORT_LOCK; 564 inet->inet_sport = htons(inet->inet_num); 565 inet->inet_daddr = 0; 566 inet->inet_dport = 0; 567 sk_dst_reset(sk); 568 err = 0; 569 out_release_sock: 570 if (flags & BIND_WITH_LOCK) 571 release_sock(sk); 572 out: 573 return err; 574 } 575 576 int inet_dgram_connect(struct socket *sock, struct sockaddr_unsized *uaddr, 577 int addr_len, int flags) 578 { 579 struct sock *sk = sock->sk; 580 const struct proto *prot; 581 int err; 582 583 if (addr_len < sizeof(uaddr->sa_family)) 584 return -EINVAL; 585 586 /* IPV6_ADDRFORM can change sk->sk_prot under us. */ 587 prot = READ_ONCE(sk->sk_prot); 588 589 if (uaddr->sa_family == AF_UNSPEC) 590 return prot->disconnect(sk, flags); 591 592 if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) { 593 err = prot->pre_connect(sk, uaddr, addr_len); 594 if (err) 595 return err; 596 } 597 598 if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk)) 599 return -EAGAIN; 600 return prot->connect(sk, uaddr, addr_len); 601 } 602 EXPORT_SYMBOL(inet_dgram_connect); 603 604 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) 605 { 606 DEFINE_WAIT_FUNC(wait, woken_wake_function); 607 608 add_wait_queue(sk_sleep(sk), &wait); 609 sk->sk_write_pending += writebias; 610 611 /* Basic assumption: if someone sets sk->sk_err, he _must_ 612 * change state of the socket from TCP_SYN_*. 613 * Connect() does not allow to get error notifications 614 * without closing the socket. 615 */ 616 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 617 release_sock(sk); 618 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); 619 lock_sock(sk); 620 if (signal_pending(current) || !timeo) 621 break; 622 } 623 remove_wait_queue(sk_sleep(sk), &wait); 624 sk->sk_write_pending -= writebias; 625 return timeo; 626 } 627 628 /* 629 * Connect to a remote host. There is regrettably still a little 630 * TCP 'magic' in here. 631 */ 632 int __inet_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr, 633 int addr_len, int flags, int is_sendmsg) 634 { 635 struct sock *sk = sock->sk; 636 int err; 637 long timeo; 638 639 /* 640 * uaddr can be NULL and addr_len can be 0 if: 641 * sk is a TCP fastopen active socket and 642 * TCP_FASTOPEN_CONNECT sockopt is set and 643 * we already have a valid cookie for this socket. 644 * In this case, user can call write() after connect(). 645 * write() will invoke tcp_sendmsg_fastopen() which calls 646 * __inet_stream_connect(). 647 */ 648 if (uaddr) { 649 if (addr_len < sizeof(uaddr->sa_family)) 650 return -EINVAL; 651 652 if (uaddr->sa_family == AF_UNSPEC) { 653 sk->sk_disconnects++; 654 err = sk->sk_prot->disconnect(sk, flags); 655 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED; 656 goto out; 657 } 658 } 659 660 switch (sock->state) { 661 default: 662 err = -EINVAL; 663 goto out; 664 case SS_CONNECTED: 665 err = -EISCONN; 666 goto out; 667 case SS_CONNECTING: 668 if (inet_test_bit(DEFER_CONNECT, sk)) 669 err = is_sendmsg ? -EINPROGRESS : -EISCONN; 670 else 671 err = -EALREADY; 672 /* Fall out of switch with err, set for this state */ 673 break; 674 case SS_UNCONNECTED: 675 err = -EISCONN; 676 if (sk->sk_state != TCP_CLOSE) 677 goto out; 678 679 if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) { 680 err = sk->sk_prot->pre_connect(sk, uaddr, addr_len); 681 if (err) 682 goto out; 683 } 684 685 err = sk->sk_prot->connect(sk, uaddr, addr_len); 686 if (err < 0) 687 goto out; 688 689 sock->state = SS_CONNECTING; 690 691 if (!err && inet_test_bit(DEFER_CONNECT, sk)) 692 goto out; 693 694 /* Just entered SS_CONNECTING state; the only 695 * difference is that return value in non-blocking 696 * case is EINPROGRESS, rather than EALREADY. 697 */ 698 err = -EINPROGRESS; 699 break; 700 } 701 702 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 703 704 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 705 int writebias = (sk->sk_protocol == IPPROTO_TCP) && 706 tcp_sk(sk)->fastopen_req && 707 tcp_sk(sk)->fastopen_req->data ? 1 : 0; 708 int dis = sk->sk_disconnects; 709 710 /* Error code is set above */ 711 if (!timeo || !inet_wait_for_connect(sk, timeo, writebias)) 712 goto out; 713 714 err = sock_intr_errno(timeo); 715 if (signal_pending(current)) 716 goto out; 717 718 if (dis != sk->sk_disconnects) { 719 err = -EPIPE; 720 goto out; 721 } 722 } 723 724 /* Connection was closed by RST, timeout, ICMP error 725 * or another process disconnected us. 726 */ 727 if (sk->sk_state == TCP_CLOSE) 728 goto sock_error; 729 730 /* sk->sk_err may be not zero now, if RECVERR was ordered by user 731 * and error was received after socket entered established state. 732 * Hence, it is handled normally after connect() return successfully. 733 */ 734 735 sock->state = SS_CONNECTED; 736 err = 0; 737 out: 738 return err; 739 740 sock_error: 741 err = sock_error(sk) ? : -ECONNABORTED; 742 sock->state = SS_UNCONNECTED; 743 sk->sk_disconnects++; 744 if (sk->sk_prot->disconnect(sk, flags)) 745 sock->state = SS_DISCONNECTING; 746 goto out; 747 } 748 EXPORT_SYMBOL(__inet_stream_connect); 749 750 int inet_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr, 751 int addr_len, int flags) 752 { 753 int err; 754 755 lock_sock(sock->sk); 756 err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0); 757 release_sock(sock->sk); 758 return err; 759 } 760 EXPORT_SYMBOL(inet_stream_connect); 761 762 void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk) 763 { 764 if (mem_cgroup_sockets_enabled) { 765 mem_cgroup_sk_alloc(newsk); 766 __sk_charge(newsk, GFP_KERNEL); 767 } 768 769 sock_rps_record_flow(newsk); 770 WARN_ON(!((1 << newsk->sk_state) & 771 (TCPF_ESTABLISHED | TCPF_SYN_RECV | 772 TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | 773 TCPF_CLOSING | TCPF_CLOSE_WAIT | 774 TCPF_CLOSE))); 775 776 if (test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 777 set_bit(SOCK_SUPPORT_ZC, &newsock->flags); 778 sock_graft(newsk, newsock); 779 780 newsock->state = SS_CONNECTED; 781 } 782 EXPORT_SYMBOL_GPL(__inet_accept); 783 784 /* 785 * Accept a pending connection. The TCP layer now gives BSD semantics. 786 */ 787 788 int inet_accept(struct socket *sock, struct socket *newsock, 789 struct proto_accept_arg *arg) 790 { 791 struct sock *sk1 = sock->sk, *sk2; 792 793 /* IPV6_ADDRFORM can change sk->sk_prot under us. */ 794 arg->err = -EINVAL; 795 sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, arg); 796 if (!sk2) 797 return arg->err; 798 799 lock_sock(sk2); 800 __inet_accept(sock, newsock, sk2); 801 release_sock(sk2); 802 return 0; 803 } 804 EXPORT_SYMBOL(inet_accept); 805 806 /* 807 * This does both peername and sockname. 808 */ 809 int inet_getname(struct socket *sock, struct sockaddr *uaddr, 810 int peer) 811 { 812 struct sock *sk = sock->sk; 813 struct inet_sock *inet = inet_sk(sk); 814 DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr); 815 int sin_addr_len = sizeof(*sin); 816 817 sin->sin_family = AF_INET; 818 lock_sock(sk); 819 if (peer) { 820 if (!inet->inet_dport || 821 (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && 822 peer == 1)) { 823 release_sock(sk); 824 return -ENOTCONN; 825 } 826 sin->sin_port = inet->inet_dport; 827 sin->sin_addr.s_addr = inet->inet_daddr; 828 BPF_CGROUP_RUN_SA_PROG(sk, sin, &sin_addr_len, 829 CGROUP_INET4_GETPEERNAME); 830 } else { 831 __be32 addr = inet->inet_rcv_saddr; 832 if (!addr) 833 addr = inet->inet_saddr; 834 sin->sin_port = inet->inet_sport; 835 sin->sin_addr.s_addr = addr; 836 BPF_CGROUP_RUN_SA_PROG(sk, sin, &sin_addr_len, 837 CGROUP_INET4_GETSOCKNAME); 838 } 839 release_sock(sk); 840 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 841 return sin_addr_len; 842 } 843 EXPORT_SYMBOL(inet_getname); 844 845 int inet_send_prepare(struct sock *sk) 846 { 847 sock_rps_record_flow(sk); 848 849 /* We may need to bind the socket. */ 850 if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind && 851 inet_autobind(sk)) 852 return -EAGAIN; 853 854 return 0; 855 } 856 EXPORT_SYMBOL_GPL(inet_send_prepare); 857 858 int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 859 { 860 struct sock *sk = sock->sk; 861 const struct proto *prot; 862 863 if (unlikely(inet_send_prepare(sk))) 864 return -EAGAIN; 865 866 prot = READ_ONCE(sk->sk_prot); 867 return INDIRECT_CALL_2(prot->sendmsg, tcp_sendmsg, udp_sendmsg, 868 sk, msg, size); 869 } 870 EXPORT_SYMBOL(inet_sendmsg); 871 872 void inet_splice_eof(struct socket *sock) 873 { 874 const struct proto *prot; 875 struct sock *sk = sock->sk; 876 877 if (unlikely(inet_send_prepare(sk))) 878 return; 879 880 /* IPV6_ADDRFORM can change sk->sk_prot under us. */ 881 prot = READ_ONCE(sk->sk_prot); 882 if (prot->splice_eof) 883 prot->splice_eof(sock); 884 } 885 EXPORT_SYMBOL_GPL(inet_splice_eof); 886 887 INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *, 888 size_t, int)); 889 int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 890 int flags) 891 { 892 struct sock *sk = sock->sk; 893 const struct proto *prot; 894 895 if (likely(!(flags & MSG_ERRQUEUE))) 896 sock_rps_record_flow(sk); 897 898 prot = READ_ONCE(sk->sk_prot); 899 return INDIRECT_CALL_2(prot->recvmsg, tcp_recvmsg, udp_recvmsg, 900 sk, msg, size, flags); 901 } 902 EXPORT_SYMBOL(inet_recvmsg); 903 904 int inet_shutdown(struct socket *sock, int how) 905 { 906 struct sock *sk = sock->sk; 907 int err = 0; 908 909 /* This should really check to make sure 910 * the socket is a TCP socket. (WHY AC...) 911 */ 912 how++; /* maps 0->1 has the advantage of making bit 1 rcvs and 913 1->2 bit 2 snds. 914 2->3 */ 915 if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */ 916 return -EINVAL; 917 918 lock_sock(sk); 919 if (sock->state == SS_CONNECTING) { 920 if ((1 << sk->sk_state) & 921 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)) 922 sock->state = SS_DISCONNECTING; 923 else 924 sock->state = SS_CONNECTED; 925 } 926 927 switch (sk->sk_state) { 928 case TCP_CLOSE: 929 err = -ENOTCONN; 930 /* Hack to wake up other listeners, who can poll for 931 EPOLLHUP, even on eg. unconnected UDP sockets -- RR */ 932 fallthrough; 933 default: 934 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how); 935 if (sk->sk_prot->shutdown) 936 sk->sk_prot->shutdown(sk, how); 937 break; 938 939 /* Remaining two branches are temporary solution for missing 940 * close() in multithreaded environment. It is _not_ a good idea, 941 * but we have no choice until close() is repaired at VFS level. 942 */ 943 case TCP_LISTEN: 944 if (!(how & RCV_SHUTDOWN)) 945 break; 946 fallthrough; 947 case TCP_SYN_SENT: 948 err = sk->sk_prot->disconnect(sk, O_NONBLOCK); 949 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED; 950 break; 951 } 952 953 /* Wake up anyone sleeping in poll. */ 954 sk->sk_state_change(sk); 955 release_sock(sk); 956 return err; 957 } 958 EXPORT_SYMBOL(inet_shutdown); 959 960 /* 961 * ioctl() calls you can issue on an INET socket. Most of these are 962 * device configuration and stuff and very rarely used. Some ioctls 963 * pass on to the socket itself. 964 * 965 * NOTE: I like the idea of a module for the config stuff. ie ifconfig 966 * loads the devconfigure module does its configuring and unloads it. 967 * There's a good 20K of config code hanging around the kernel. 968 */ 969 970 int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 971 { 972 struct sock *sk = sock->sk; 973 int err = 0; 974 struct net *net = sock_net(sk); 975 void __user *p = (void __user *)arg; 976 struct ifreq ifr; 977 struct rtentry rt; 978 979 switch (cmd) { 980 case SIOCADDRT: 981 case SIOCDELRT: 982 if (copy_from_user(&rt, p, sizeof(struct rtentry))) 983 return -EFAULT; 984 err = ip_rt_ioctl(net, cmd, &rt); 985 break; 986 case SIOCRTMSG: 987 err = -EINVAL; 988 break; 989 case SIOCDARP: 990 case SIOCGARP: 991 case SIOCSARP: 992 err = arp_ioctl(net, cmd, (void __user *)arg); 993 break; 994 case SIOCGIFADDR: 995 case SIOCGIFBRDADDR: 996 case SIOCGIFNETMASK: 997 case SIOCGIFDSTADDR: 998 case SIOCGIFPFLAGS: 999 if (get_user_ifreq(&ifr, NULL, p)) 1000 return -EFAULT; 1001 err = devinet_ioctl(net, cmd, &ifr); 1002 if (!err && put_user_ifreq(&ifr, p)) 1003 err = -EFAULT; 1004 break; 1005 1006 case SIOCSIFADDR: 1007 case SIOCSIFBRDADDR: 1008 case SIOCSIFNETMASK: 1009 case SIOCSIFDSTADDR: 1010 case SIOCSIFPFLAGS: 1011 case SIOCSIFFLAGS: 1012 if (get_user_ifreq(&ifr, NULL, p)) 1013 return -EFAULT; 1014 err = devinet_ioctl(net, cmd, &ifr); 1015 break; 1016 default: 1017 if (sk->sk_prot->ioctl) 1018 err = sk_ioctl(sk, cmd, (void __user *)arg); 1019 else 1020 err = -ENOIOCTLCMD; 1021 break; 1022 } 1023 return err; 1024 } 1025 EXPORT_SYMBOL(inet_ioctl); 1026 1027 #ifdef CONFIG_COMPAT 1028 static int inet_compat_routing_ioctl(struct sock *sk, unsigned int cmd, 1029 struct compat_rtentry __user *ur) 1030 { 1031 compat_uptr_t rtdev; 1032 struct rtentry rt; 1033 1034 if (copy_from_user(&rt.rt_dst, &ur->rt_dst, 1035 3 * sizeof(struct sockaddr)) || 1036 get_user(rt.rt_flags, &ur->rt_flags) || 1037 get_user(rt.rt_metric, &ur->rt_metric) || 1038 get_user(rt.rt_mtu, &ur->rt_mtu) || 1039 get_user(rt.rt_window, &ur->rt_window) || 1040 get_user(rt.rt_irtt, &ur->rt_irtt) || 1041 get_user(rtdev, &ur->rt_dev)) 1042 return -EFAULT; 1043 1044 rt.rt_dev = compat_ptr(rtdev); 1045 return ip_rt_ioctl(sock_net(sk), cmd, &rt); 1046 } 1047 1048 static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1049 { 1050 void __user *argp = compat_ptr(arg); 1051 struct sock *sk = sock->sk; 1052 1053 switch (cmd) { 1054 case SIOCADDRT: 1055 case SIOCDELRT: 1056 return inet_compat_routing_ioctl(sk, cmd, argp); 1057 default: 1058 if (!sk->sk_prot->compat_ioctl) 1059 return -ENOIOCTLCMD; 1060 return sk->sk_prot->compat_ioctl(sk, cmd, arg); 1061 } 1062 } 1063 #endif /* CONFIG_COMPAT */ 1064 1065 const struct proto_ops inet_stream_ops = { 1066 .family = PF_INET, 1067 .owner = THIS_MODULE, 1068 .release = inet_release, 1069 .bind = inet_bind, 1070 .connect = inet_stream_connect, 1071 .socketpair = sock_no_socketpair, 1072 .accept = inet_accept, 1073 .getname = inet_getname, 1074 .poll = tcp_poll, 1075 .ioctl = inet_ioctl, 1076 .gettstamp = sock_gettstamp, 1077 .listen = inet_listen, 1078 .shutdown = inet_shutdown, 1079 .setsockopt = sock_common_setsockopt, 1080 .getsockopt = sock_common_getsockopt, 1081 .sendmsg = inet_sendmsg, 1082 .recvmsg = inet_recvmsg, 1083 #ifdef CONFIG_MMU 1084 .mmap = tcp_mmap, 1085 #endif 1086 .splice_eof = inet_splice_eof, 1087 .splice_read = tcp_splice_read, 1088 .set_peek_off = sk_set_peek_off, 1089 .read_sock = tcp_read_sock, 1090 .read_skb = tcp_read_skb, 1091 .sendmsg_locked = tcp_sendmsg_locked, 1092 .peek_len = tcp_peek_len, 1093 #ifdef CONFIG_COMPAT 1094 .compat_ioctl = inet_compat_ioctl, 1095 #endif 1096 .set_rcvlowat = tcp_set_rcvlowat, 1097 }; 1098 EXPORT_SYMBOL(inet_stream_ops); 1099 1100 const struct proto_ops inet_dgram_ops = { 1101 .family = PF_INET, 1102 .owner = THIS_MODULE, 1103 .release = inet_release, 1104 .bind = inet_bind, 1105 .connect = inet_dgram_connect, 1106 .socketpair = sock_no_socketpair, 1107 .accept = sock_no_accept, 1108 .getname = inet_getname, 1109 .poll = udp_poll, 1110 .ioctl = inet_ioctl, 1111 .gettstamp = sock_gettstamp, 1112 .listen = sock_no_listen, 1113 .shutdown = inet_shutdown, 1114 .setsockopt = sock_common_setsockopt, 1115 .getsockopt = sock_common_getsockopt, 1116 .sendmsg = inet_sendmsg, 1117 .read_skb = udp_read_skb, 1118 .recvmsg = inet_recvmsg, 1119 .mmap = sock_no_mmap, 1120 .splice_eof = inet_splice_eof, 1121 .set_peek_off = udp_set_peek_off, 1122 #ifdef CONFIG_COMPAT 1123 .compat_ioctl = inet_compat_ioctl, 1124 #endif 1125 }; 1126 EXPORT_SYMBOL(inet_dgram_ops); 1127 1128 /* 1129 * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without 1130 * udp_poll 1131 */ 1132 static const struct proto_ops inet_sockraw_ops = { 1133 .family = PF_INET, 1134 .owner = THIS_MODULE, 1135 .release = inet_release, 1136 .bind = inet_bind, 1137 .connect = inet_dgram_connect, 1138 .socketpair = sock_no_socketpair, 1139 .accept = sock_no_accept, 1140 .getname = inet_getname, 1141 .poll = datagram_poll, 1142 .ioctl = inet_ioctl, 1143 .gettstamp = sock_gettstamp, 1144 .listen = sock_no_listen, 1145 .shutdown = inet_shutdown, 1146 .setsockopt = sock_common_setsockopt, 1147 .getsockopt = sock_common_getsockopt, 1148 .sendmsg = inet_sendmsg, 1149 .recvmsg = inet_recvmsg, 1150 .mmap = sock_no_mmap, 1151 .splice_eof = inet_splice_eof, 1152 #ifdef CONFIG_COMPAT 1153 .compat_ioctl = inet_compat_ioctl, 1154 #endif 1155 }; 1156 1157 static const struct net_proto_family inet_family_ops = { 1158 .family = PF_INET, 1159 .create = inet_create, 1160 .owner = THIS_MODULE, 1161 }; 1162 1163 /* Upon startup we insert all the elements in inetsw_array[] into 1164 * the linked list inetsw. 1165 */ 1166 static struct inet_protosw inetsw_array[] = 1167 { 1168 { 1169 .type = SOCK_STREAM, 1170 .protocol = IPPROTO_TCP, 1171 .prot = &tcp_prot, 1172 .ops = &inet_stream_ops, 1173 .flags = INET_PROTOSW_PERMANENT | 1174 INET_PROTOSW_ICSK, 1175 }, 1176 1177 { 1178 .type = SOCK_DGRAM, 1179 .protocol = IPPROTO_UDP, 1180 .prot = &udp_prot, 1181 .ops = &inet_dgram_ops, 1182 .flags = INET_PROTOSW_PERMANENT, 1183 }, 1184 1185 { 1186 .type = SOCK_DGRAM, 1187 .protocol = IPPROTO_ICMP, 1188 .prot = &ping_prot, 1189 .ops = &inet_sockraw_ops, 1190 .flags = INET_PROTOSW_REUSE, 1191 }, 1192 1193 { 1194 .type = SOCK_RAW, 1195 .protocol = IPPROTO_IP, /* wild card */ 1196 .prot = &raw_prot, 1197 .ops = &inet_sockraw_ops, 1198 .flags = INET_PROTOSW_REUSE, 1199 } 1200 }; 1201 1202 #define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array) 1203 1204 void inet_register_protosw(struct inet_protosw *p) 1205 { 1206 struct list_head *lh; 1207 struct inet_protosw *answer; 1208 int protocol = p->protocol; 1209 struct list_head *last_perm; 1210 1211 spin_lock_bh(&inetsw_lock); 1212 1213 if (p->type >= SOCK_MAX) 1214 goto out_illegal; 1215 1216 /* If we are trying to override a permanent protocol, bail. */ 1217 last_perm = &inetsw[p->type]; 1218 list_for_each(lh, &inetsw[p->type]) { 1219 answer = list_entry(lh, struct inet_protosw, list); 1220 /* Check only the non-wild match. */ 1221 if ((INET_PROTOSW_PERMANENT & answer->flags) == 0) 1222 break; 1223 if (protocol == answer->protocol) 1224 goto out_permanent; 1225 last_perm = lh; 1226 } 1227 1228 /* Add the new entry after the last permanent entry if any, so that 1229 * the new entry does not override a permanent entry when matched with 1230 * a wild-card protocol. But it is allowed to override any existing 1231 * non-permanent entry. This means that when we remove this entry, the 1232 * system automatically returns to the old behavior. 1233 */ 1234 list_add_rcu(&p->list, last_perm); 1235 out: 1236 spin_unlock_bh(&inetsw_lock); 1237 1238 return; 1239 1240 out_permanent: 1241 pr_err("Attempt to override permanent protocol %d\n", protocol); 1242 goto out; 1243 1244 out_illegal: 1245 pr_err("Ignoring attempt to register invalid socket type %d\n", 1246 p->type); 1247 goto out; 1248 } 1249 EXPORT_SYMBOL(inet_register_protosw); 1250 1251 void inet_unregister_protosw(struct inet_protosw *p) 1252 { 1253 if (INET_PROTOSW_PERMANENT & p->flags) { 1254 pr_err("Attempt to unregister permanent protocol %d\n", 1255 p->protocol); 1256 } else { 1257 spin_lock_bh(&inetsw_lock); 1258 list_del_rcu(&p->list); 1259 spin_unlock_bh(&inetsw_lock); 1260 1261 synchronize_net(); 1262 } 1263 } 1264 EXPORT_SYMBOL(inet_unregister_protosw); 1265 1266 static int inet_sk_reselect_saddr(struct sock *sk) 1267 { 1268 struct inet_sock *inet = inet_sk(sk); 1269 __be32 old_saddr = inet->inet_saddr; 1270 __be32 daddr = inet->inet_daddr; 1271 struct flowi4 *fl4; 1272 struct rtable *rt; 1273 __be32 new_saddr; 1274 struct ip_options_rcu *inet_opt; 1275 int err; 1276 1277 inet_opt = rcu_dereference_protected(inet->inet_opt, 1278 lockdep_sock_is_held(sk)); 1279 if (inet_opt && inet_opt->opt.srr) 1280 daddr = inet_opt->opt.faddr; 1281 1282 /* Query new route. */ 1283 fl4 = &inet->cork.fl.u.ip4; 1284 rt = ip_route_connect(fl4, daddr, 0, sk->sk_bound_dev_if, 1285 sk->sk_protocol, inet->inet_sport, 1286 inet->inet_dport, sk); 1287 if (IS_ERR(rt)) 1288 return PTR_ERR(rt); 1289 1290 new_saddr = fl4->saddr; 1291 1292 if (new_saddr == old_saddr) { 1293 sk_setup_caps(sk, &rt->dst); 1294 return 0; 1295 } 1296 1297 err = inet_bhash2_update_saddr(sk, &new_saddr, AF_INET); 1298 if (err) { 1299 ip_rt_put(rt); 1300 return err; 1301 } 1302 1303 sk_setup_caps(sk, &rt->dst); 1304 1305 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) { 1306 pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n", 1307 __func__, &old_saddr, &new_saddr); 1308 } 1309 1310 /* 1311 * XXX The only one ugly spot where we need to 1312 * XXX really change the sockets identity after 1313 * XXX it has entered the hashes. -DaveM 1314 * 1315 * Besides that, it does not check for connection 1316 * uniqueness. Wait for troubles. 1317 */ 1318 return __sk_prot_rehash(sk); 1319 } 1320 1321 int inet_sk_rebuild_header(struct sock *sk) 1322 { 1323 struct rtable *rt = dst_rtable(__sk_dst_check(sk, 0)); 1324 struct inet_sock *inet = inet_sk(sk); 1325 struct flowi4 *fl4; 1326 int err; 1327 1328 /* Route is OK, nothing to do. */ 1329 if (rt) 1330 return 0; 1331 1332 /* Reroute. */ 1333 fl4 = &inet->cork.fl.u.ip4; 1334 inet_sk_init_flowi4(inet, fl4); 1335 rt = ip_route_output_flow(sock_net(sk), fl4, sk); 1336 if (!IS_ERR(rt)) { 1337 err = 0; 1338 sk_setup_caps(sk, &rt->dst); 1339 } else { 1340 err = PTR_ERR(rt); 1341 1342 /* Routing failed... */ 1343 sk->sk_route_caps = 0; 1344 1345 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) || 1346 sk->sk_state != TCP_SYN_SENT || 1347 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) || 1348 (err = inet_sk_reselect_saddr(sk)) != 0) 1349 WRITE_ONCE(sk->sk_err_soft, -err); 1350 } 1351 1352 return err; 1353 } 1354 EXPORT_SYMBOL(inet_sk_rebuild_header); 1355 1356 void inet_sk_set_state(struct sock *sk, int state) 1357 { 1358 trace_inet_sock_set_state(sk, sk->sk_state, state); 1359 sk->sk_state = state; 1360 } 1361 EXPORT_SYMBOL(inet_sk_set_state); 1362 1363 void inet_sk_state_store(struct sock *sk, int newstate) 1364 { 1365 trace_inet_sock_set_state(sk, sk->sk_state, newstate); 1366 smp_store_release(&sk->sk_state, newstate); 1367 } 1368 1369 struct sk_buff *inet_gso_segment(struct sk_buff *skb, 1370 netdev_features_t features) 1371 { 1372 bool udpfrag = false, fixedid = false, gso_partial, encap; 1373 struct sk_buff *segs = ERR_PTR(-EINVAL); 1374 const struct net_offload *ops; 1375 unsigned int offset = 0; 1376 struct iphdr *iph; 1377 int proto, tot_len; 1378 int nhoff; 1379 int ihl; 1380 int id; 1381 1382 skb_reset_network_header(skb); 1383 nhoff = skb_network_header(skb) - skb_mac_header(skb); 1384 if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) 1385 goto out; 1386 1387 iph = ip_hdr(skb); 1388 ihl = iph->ihl * 4; 1389 if (ihl < sizeof(*iph)) 1390 goto out; 1391 1392 id = ntohs(iph->id); 1393 proto = iph->protocol; 1394 1395 /* Warning: after this point, iph might be no longer valid */ 1396 if (unlikely(!pskb_may_pull(skb, ihl))) 1397 goto out; 1398 __skb_pull(skb, ihl); 1399 1400 encap = SKB_GSO_CB(skb)->encap_level > 0; 1401 if (encap) 1402 features &= skb->dev->hw_enc_features; 1403 SKB_GSO_CB(skb)->encap_level += ihl; 1404 1405 skb_reset_transport_header(skb); 1406 1407 segs = ERR_PTR(-EPROTONOSUPPORT); 1408 1409 fixedid = !!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCP_FIXEDID << encap)); 1410 1411 if (!skb->encapsulation || encap) 1412 udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 1413 1414 ops = rcu_dereference(inet_offloads[proto]); 1415 if (likely(ops && ops->callbacks.gso_segment)) { 1416 segs = ops->callbacks.gso_segment(skb, features); 1417 if (!segs) 1418 skb->network_header = skb_mac_header(skb) + nhoff - skb->head; 1419 } 1420 1421 if (IS_ERR_OR_NULL(segs)) 1422 goto out; 1423 1424 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); 1425 1426 skb = segs; 1427 do { 1428 iph = (struct iphdr *)(skb_mac_header(skb) + nhoff); 1429 if (udpfrag) { 1430 iph->frag_off = htons(offset >> 3); 1431 if (skb->next) 1432 iph->frag_off |= htons(IP_MF); 1433 offset += skb->len - nhoff - ihl; 1434 tot_len = skb->len - nhoff; 1435 } else if (skb_is_gso(skb)) { 1436 if (!fixedid) { 1437 iph->id = htons(id); 1438 id += skb_shinfo(skb)->gso_segs; 1439 } 1440 1441 if (gso_partial) 1442 tot_len = skb_shinfo(skb)->gso_size + 1443 SKB_GSO_CB(skb)->data_offset + 1444 skb->head - (unsigned char *)iph; 1445 else 1446 tot_len = skb->len - nhoff; 1447 } else { 1448 if (!fixedid) 1449 iph->id = htons(id++); 1450 tot_len = skb->len - nhoff; 1451 } 1452 iph->tot_len = htons(tot_len); 1453 ip_send_check(iph); 1454 if (encap) 1455 skb_reset_inner_headers(skb); 1456 skb->network_header = (u8 *)iph - skb->head; 1457 skb_reset_mac_len(skb); 1458 } while ((skb = skb->next)); 1459 1460 out: 1461 return segs; 1462 } 1463 1464 static struct sk_buff *ipip_gso_segment(struct sk_buff *skb, 1465 netdev_features_t features) 1466 { 1467 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4)) 1468 return ERR_PTR(-EINVAL); 1469 1470 return inet_gso_segment(skb, features); 1471 } 1472 1473 struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) 1474 { 1475 const struct net_offload *ops; 1476 struct sk_buff *pp = NULL; 1477 const struct iphdr *iph; 1478 struct sk_buff *p; 1479 unsigned int hlen; 1480 unsigned int off; 1481 int flush = 1; 1482 int proto; 1483 1484 off = skb_gro_offset(skb); 1485 hlen = off + sizeof(*iph); 1486 iph = skb_gro_header(skb, hlen, off); 1487 if (unlikely(!iph)) 1488 goto out; 1489 1490 proto = iph->protocol; 1491 1492 ops = rcu_dereference(inet_offloads[proto]); 1493 if (!ops || !ops->callbacks.gro_receive) 1494 goto out; 1495 1496 if (*(u8 *)iph != 0x45) 1497 goto out; 1498 1499 if (ip_is_fragment(iph)) 1500 goto out; 1501 1502 if (unlikely(ip_fast_csum((u8 *)iph, 5))) 1503 goto out; 1504 1505 NAPI_GRO_CB(skb)->proto = proto; 1506 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (ntohl(*(__be32 *)&iph->id) & ~IP_DF)); 1507 1508 list_for_each_entry(p, head, list) { 1509 struct iphdr *iph2; 1510 1511 if (!NAPI_GRO_CB(p)->same_flow) 1512 continue; 1513 1514 iph2 = (struct iphdr *)(p->data + off); 1515 /* The above works because, with the exception of the top 1516 * (inner most) layer, we only aggregate pkts with the same 1517 * hdr length so all the hdrs we'll need to verify will start 1518 * at the same offset. 1519 */ 1520 if ((iph->protocol ^ iph2->protocol) | 1521 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) | 1522 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) { 1523 NAPI_GRO_CB(p)->same_flow = 0; 1524 continue; 1525 } 1526 } 1527 1528 NAPI_GRO_CB(skb)->flush |= flush; 1529 NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off; 1530 1531 /* Note : No need to call skb_gro_postpull_rcsum() here, 1532 * as we already checked checksum over ipv4 header was 0 1533 */ 1534 skb_gro_pull(skb, sizeof(*iph)); 1535 skb_set_transport_header(skb, skb_gro_offset(skb)); 1536 1537 pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive, 1538 ops->callbacks.gro_receive, head, skb); 1539 1540 out: 1541 skb_gro_flush_final(skb, pp, flush); 1542 1543 return pp; 1544 } 1545 1546 static struct sk_buff *ipip_gro_receive(struct list_head *head, 1547 struct sk_buff *skb) 1548 { 1549 if (NAPI_GRO_CB(skb)->encap_mark) { 1550 NAPI_GRO_CB(skb)->flush = 1; 1551 return NULL; 1552 } 1553 1554 NAPI_GRO_CB(skb)->encap_mark = 1; 1555 1556 return inet_gro_receive(head, skb); 1557 } 1558 1559 #define SECONDS_PER_DAY 86400 1560 1561 /* inet_current_timestamp - Return IP network timestamp 1562 * 1563 * Return milliseconds since midnight in network byte order. 1564 */ 1565 __be32 inet_current_timestamp(void) 1566 { 1567 u32 secs; 1568 u32 msecs; 1569 struct timespec64 ts; 1570 1571 ktime_get_real_ts64(&ts); 1572 1573 /* Get secs since midnight. */ 1574 (void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs); 1575 /* Convert to msecs. */ 1576 msecs = secs * MSEC_PER_SEC; 1577 /* Convert nsec to msec. */ 1578 msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC; 1579 1580 /* Convert to network byte order. */ 1581 return htonl(msecs); 1582 } 1583 EXPORT_SYMBOL(inet_current_timestamp); 1584 1585 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len) 1586 { 1587 unsigned int family = READ_ONCE(sk->sk_family); 1588 1589 if (family == AF_INET) 1590 return ip_recv_error(sk, msg, len); 1591 #if IS_ENABLED(CONFIG_IPV6) 1592 if (family == AF_INET6) 1593 return pingv6_ops.ipv6_recv_error(sk, msg, len); 1594 #endif 1595 return -EINVAL; 1596 } 1597 EXPORT_SYMBOL(inet_recv_error); 1598 1599 int inet_gro_complete(struct sk_buff *skb, int nhoff) 1600 { 1601 struct iphdr *iph = (struct iphdr *)(skb->data + nhoff); 1602 const struct net_offload *ops; 1603 __be16 totlen = iph->tot_len; 1604 int proto = iph->protocol; 1605 int err = -ENOSYS; 1606 1607 if (skb->encapsulation) { 1608 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP)); 1609 skb_set_inner_network_header(skb, nhoff); 1610 } 1611 1612 iph_set_totlen(iph, skb->len - nhoff); 1613 csum_replace2(&iph->check, totlen, iph->tot_len); 1614 1615 ops = rcu_dereference(inet_offloads[proto]); 1616 if (WARN_ON(!ops || !ops->callbacks.gro_complete)) 1617 goto out; 1618 1619 /* Only need to add sizeof(*iph) to get to the next hdr below 1620 * because any hdr with option will have been flushed in 1621 * inet_gro_receive(). 1622 */ 1623 err = INDIRECT_CALL_2(ops->callbacks.gro_complete, 1624 tcp4_gro_complete, udp4_gro_complete, 1625 skb, nhoff + sizeof(*iph)); 1626 1627 out: 1628 return err; 1629 } 1630 1631 static int ipip_gro_complete(struct sk_buff *skb, int nhoff) 1632 { 1633 skb->encapsulation = 1; 1634 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; 1635 return inet_gro_complete(skb, nhoff); 1636 } 1637 1638 int inet_ctl_sock_create(struct sock **sk, unsigned short family, 1639 unsigned short type, unsigned char protocol, 1640 struct net *net) 1641 { 1642 struct socket *sock; 1643 int rc = sock_create_kern(net, family, type, protocol, &sock); 1644 1645 if (rc == 0) { 1646 *sk = sock->sk; 1647 (*sk)->sk_allocation = GFP_ATOMIC; 1648 (*sk)->sk_use_task_frag = false; 1649 /* 1650 * Unhash it so that IP input processing does not even see it, 1651 * we do not wish this socket to see incoming packets. 1652 */ 1653 (*sk)->sk_prot->unhash(*sk); 1654 } 1655 return rc; 1656 } 1657 EXPORT_SYMBOL_GPL(inet_ctl_sock_create); 1658 1659 unsigned long snmp_fold_field(void __percpu *mib, int offt) 1660 { 1661 unsigned long res = 0; 1662 int i; 1663 1664 for_each_possible_cpu(i) 1665 res += snmp_get_cpu_field(mib, i, offt); 1666 return res; 1667 } 1668 EXPORT_SYMBOL_GPL(snmp_fold_field); 1669 1670 #if BITS_PER_LONG==32 1671 1672 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt, 1673 size_t syncp_offset) 1674 { 1675 void *bhptr; 1676 struct u64_stats_sync *syncp; 1677 u64 v; 1678 unsigned int start; 1679 1680 bhptr = per_cpu_ptr(mib, cpu); 1681 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset); 1682 do { 1683 start = u64_stats_fetch_begin(syncp); 1684 v = *(((u64 *)bhptr) + offt); 1685 } while (u64_stats_fetch_retry(syncp, start)); 1686 1687 return v; 1688 } 1689 EXPORT_SYMBOL_GPL(snmp_get_cpu_field64); 1690 1691 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset) 1692 { 1693 u64 res = 0; 1694 int cpu; 1695 1696 for_each_possible_cpu(cpu) { 1697 res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset); 1698 } 1699 return res; 1700 } 1701 EXPORT_SYMBOL_GPL(snmp_fold_field64); 1702 #endif 1703 1704 #ifdef CONFIG_IP_MULTICAST 1705 static const struct net_protocol igmp_protocol = { 1706 .handler = igmp_rcv, 1707 }; 1708 #endif 1709 1710 static const struct net_protocol icmp_protocol = { 1711 .handler = icmp_rcv, 1712 .err_handler = icmp_err, 1713 .no_policy = 1, 1714 }; 1715 1716 static __net_init int ipv4_mib_init_net(struct net *net) 1717 { 1718 int i; 1719 1720 net->mib.tcp_statistics = alloc_percpu(struct tcp_mib); 1721 if (!net->mib.tcp_statistics) 1722 goto err_tcp_mib; 1723 net->mib.ip_statistics = alloc_percpu(struct ipstats_mib); 1724 if (!net->mib.ip_statistics) 1725 goto err_ip_mib; 1726 1727 for_each_possible_cpu(i) { 1728 struct ipstats_mib *af_inet_stats; 1729 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i); 1730 u64_stats_init(&af_inet_stats->syncp); 1731 } 1732 1733 net->mib.net_statistics = alloc_percpu(struct linux_mib); 1734 if (!net->mib.net_statistics) 1735 goto err_net_mib; 1736 net->mib.udp_statistics = alloc_percpu(struct udp_mib); 1737 if (!net->mib.udp_statistics) 1738 goto err_udp_mib; 1739 net->mib.udplite_statistics = alloc_percpu(struct udp_mib); 1740 if (!net->mib.udplite_statistics) 1741 goto err_udplite_mib; 1742 net->mib.icmp_statistics = alloc_percpu(struct icmp_mib); 1743 if (!net->mib.icmp_statistics) 1744 goto err_icmp_mib; 1745 net->mib.icmpmsg_statistics = kzalloc_obj(struct icmpmsg_mib); 1746 if (!net->mib.icmpmsg_statistics) 1747 goto err_icmpmsg_mib; 1748 1749 tcp_mib_init(net); 1750 return 0; 1751 1752 err_icmpmsg_mib: 1753 free_percpu(net->mib.icmp_statistics); 1754 err_icmp_mib: 1755 free_percpu(net->mib.udplite_statistics); 1756 err_udplite_mib: 1757 free_percpu(net->mib.udp_statistics); 1758 err_udp_mib: 1759 free_percpu(net->mib.net_statistics); 1760 err_net_mib: 1761 free_percpu(net->mib.ip_statistics); 1762 err_ip_mib: 1763 free_percpu(net->mib.tcp_statistics); 1764 err_tcp_mib: 1765 return -ENOMEM; 1766 } 1767 1768 static __net_exit void ipv4_mib_exit_net(struct net *net) 1769 { 1770 kfree(net->mib.icmpmsg_statistics); 1771 free_percpu(net->mib.icmp_statistics); 1772 free_percpu(net->mib.udplite_statistics); 1773 free_percpu(net->mib.udp_statistics); 1774 free_percpu(net->mib.net_statistics); 1775 free_percpu(net->mib.ip_statistics); 1776 free_percpu(net->mib.tcp_statistics); 1777 #ifdef CONFIG_MPTCP 1778 /* allocated on demand, see mptcp_init_sock() */ 1779 free_percpu(net->mib.mptcp_statistics); 1780 #endif 1781 } 1782 1783 static __net_initdata struct pernet_operations ipv4_mib_ops = { 1784 .init = ipv4_mib_init_net, 1785 .exit = ipv4_mib_exit_net, 1786 }; 1787 1788 static int __init init_ipv4_mibs(void) 1789 { 1790 return register_pernet_subsys(&ipv4_mib_ops); 1791 } 1792 1793 static __net_init int inet_init_net(struct net *net) 1794 { 1795 /* 1796 * Set defaults for local port range 1797 */ 1798 net->ipv4.ip_local_ports.range = 60999u << 16 | 32768u; 1799 1800 seqlock_init(&net->ipv4.ping_group_range.lock); 1801 /* 1802 * Sane defaults - nobody may create ping sockets. 1803 * Boot scripts should set this to distro-specific group. 1804 */ 1805 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1); 1806 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0); 1807 1808 /* Default values for sysctl-controlled parameters. 1809 * We set them here, in case sysctl is not compiled. 1810 */ 1811 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL; 1812 net->ipv4.sysctl_ip_fwd_update_priority = 1; 1813 net->ipv4.sysctl_ip_dynaddr = 0; 1814 net->ipv4.sysctl_ip_early_demux = 1; 1815 net->ipv4.sysctl_udp_early_demux = 1; 1816 net->ipv4.sysctl_tcp_early_demux = 1; 1817 net->ipv4.sysctl_nexthop_compat_mode = 1; 1818 #ifdef CONFIG_SYSCTL 1819 net->ipv4.sysctl_ip_prot_sock = PROT_SOCK; 1820 #endif 1821 1822 /* Some igmp sysctl, whose values are always used */ 1823 net->ipv4.sysctl_igmp_max_memberships = 20; 1824 net->ipv4.sysctl_igmp_max_msf = 10; 1825 /* IGMP reports for link-local multicast groups are enabled by default */ 1826 net->ipv4.sysctl_igmp_llm_reports = 1; 1827 net->ipv4.sysctl_igmp_qrv = 2; 1828 1829 net->ipv4.sysctl_fib_notify_on_flag_change = 0; 1830 1831 return 0; 1832 } 1833 1834 static __net_initdata struct pernet_operations af_inet_ops = { 1835 .init = inet_init_net, 1836 }; 1837 1838 static int __init init_inet_pernet_ops(void) 1839 { 1840 return register_pernet_subsys(&af_inet_ops); 1841 } 1842 1843 static int ipv4_proc_init(void); 1844 1845 /* 1846 * IP protocol layer initialiser 1847 */ 1848 1849 1850 static const struct net_offload ipip_offload = { 1851 .callbacks = { 1852 .gso_segment = ipip_gso_segment, 1853 .gro_receive = ipip_gro_receive, 1854 .gro_complete = ipip_gro_complete, 1855 }, 1856 }; 1857 1858 static int __init ipip_offload_init(void) 1859 { 1860 return inet_add_offload(&ipip_offload, IPPROTO_IPIP); 1861 } 1862 1863 static int __init ipv4_offload_init(void) 1864 { 1865 /* 1866 * Add offloads 1867 */ 1868 if (udpv4_offload_init() < 0) 1869 pr_crit("%s: Cannot add UDP protocol offload\n", __func__); 1870 if (tcpv4_offload_init() < 0) 1871 pr_crit("%s: Cannot add TCP protocol offload\n", __func__); 1872 if (ipip_offload_init() < 0) 1873 pr_crit("%s: Cannot add IPIP protocol offload\n", __func__); 1874 1875 net_hotdata.ip_packet_offload = (struct packet_offload) { 1876 .type = cpu_to_be16(ETH_P_IP), 1877 .callbacks = { 1878 .gso_segment = inet_gso_segment, 1879 .gro_receive = inet_gro_receive, 1880 .gro_complete = inet_gro_complete, 1881 }, 1882 }; 1883 dev_add_offload(&net_hotdata.ip_packet_offload); 1884 return 0; 1885 } 1886 1887 fs_initcall(ipv4_offload_init); 1888 1889 static struct packet_type ip_packet_type __read_mostly = { 1890 .type = cpu_to_be16(ETH_P_IP), 1891 .func = ip_rcv, 1892 .list_func = ip_list_rcv, 1893 }; 1894 1895 static int __init inet_init(void) 1896 { 1897 struct inet_protosw *q; 1898 struct list_head *r; 1899 int rc; 1900 1901 sock_skb_cb_check_size(sizeof(struct inet_skb_parm)); 1902 1903 raw_hashinfo_init(&raw_v4_hashinfo); 1904 1905 rc = proto_register(&tcp_prot, 1); 1906 if (rc) 1907 goto out; 1908 1909 rc = proto_register(&udp_prot, 1); 1910 if (rc) 1911 goto out_unregister_tcp_proto; 1912 1913 rc = proto_register(&raw_prot, 1); 1914 if (rc) 1915 goto out_unregister_udp_proto; 1916 1917 rc = proto_register(&ping_prot, 1); 1918 if (rc) 1919 goto out_unregister_raw_proto; 1920 1921 /* 1922 * Tell SOCKET that we are alive... 1923 */ 1924 1925 (void)sock_register(&inet_family_ops); 1926 1927 #ifdef CONFIG_SYSCTL 1928 ip_static_sysctl_init(); 1929 #endif 1930 1931 /* 1932 * Add all the base protocols. 1933 */ 1934 1935 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0) 1936 pr_crit("%s: Cannot add ICMP protocol\n", __func__); 1937 1938 net_hotdata.udp_protocol = (struct net_protocol) { 1939 .handler = udp_rcv, 1940 .err_handler = udp_err, 1941 .no_policy = 1, 1942 }; 1943 if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0) 1944 pr_crit("%s: Cannot add UDP protocol\n", __func__); 1945 1946 net_hotdata.tcp_protocol = (struct net_protocol) { 1947 .handler = tcp_v4_rcv, 1948 .err_handler = tcp_v4_err, 1949 .no_policy = 1, 1950 .icmp_strict_tag_validation = 1, 1951 }; 1952 if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0) 1953 pr_crit("%s: Cannot add TCP protocol\n", __func__); 1954 #ifdef CONFIG_IP_MULTICAST 1955 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0) 1956 pr_crit("%s: Cannot add IGMP protocol\n", __func__); 1957 #endif 1958 1959 /* Register the socket-side information for inet_create. */ 1960 for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r) 1961 INIT_LIST_HEAD(r); 1962 1963 for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q) 1964 inet_register_protosw(q); 1965 1966 /* 1967 * Set the ARP module up 1968 */ 1969 1970 arp_init(); 1971 1972 /* 1973 * Set the IP module up 1974 */ 1975 1976 ip_init(); 1977 1978 /* Initialise per-cpu ipv4 mibs */ 1979 if (init_ipv4_mibs()) 1980 panic("%s: Cannot init ipv4 mibs\n", __func__); 1981 1982 /* Setup TCP slab cache for open requests. */ 1983 tcp_init(); 1984 1985 /* Setup UDP memory threshold */ 1986 udp_init(); 1987 1988 /* Add UDP-Lite (RFC 3828) */ 1989 udplite4_register(); 1990 1991 raw_init(); 1992 1993 ping_init(); 1994 1995 /* 1996 * Set the ICMP layer up 1997 */ 1998 1999 if (icmp_init() < 0) 2000 panic("Failed to create the ICMP control socket.\n"); 2001 2002 /* 2003 * Initialise the multicast router 2004 */ 2005 #if defined(CONFIG_IP_MROUTE) 2006 if (ip_mr_init()) 2007 pr_crit("%s: Cannot init ipv4 mroute\n", __func__); 2008 #endif 2009 2010 if (init_inet_pernet_ops()) 2011 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__); 2012 2013 ipv4_proc_init(); 2014 2015 ipfrag_init(); 2016 2017 dev_add_pack(&ip_packet_type); 2018 2019 ip_tunnel_core_init(); 2020 2021 rc = 0; 2022 out: 2023 return rc; 2024 out_unregister_raw_proto: 2025 proto_unregister(&raw_prot); 2026 out_unregister_udp_proto: 2027 proto_unregister(&udp_prot); 2028 out_unregister_tcp_proto: 2029 proto_unregister(&tcp_prot); 2030 goto out; 2031 } 2032 2033 fs_initcall(inet_init); 2034 2035 /* ------------------------------------------------------------------------ */ 2036 2037 #ifdef CONFIG_PROC_FS 2038 static int __init ipv4_proc_init(void) 2039 { 2040 int rc = 0; 2041 2042 if (raw_proc_init()) 2043 goto out_raw; 2044 if (tcp4_proc_init()) 2045 goto out_tcp; 2046 if (udp4_proc_init()) 2047 goto out_udp; 2048 if (ping_proc_init()) 2049 goto out_ping; 2050 if (ip_misc_proc_init()) 2051 goto out_misc; 2052 out: 2053 return rc; 2054 out_misc: 2055 ping_proc_exit(); 2056 out_ping: 2057 udp4_proc_exit(); 2058 out_udp: 2059 tcp4_proc_exit(); 2060 out_tcp: 2061 raw_proc_exit(); 2062 out_raw: 2063 rc = -ENOMEM; 2064 goto out; 2065 } 2066 2067 #else /* CONFIG_PROC_FS */ 2068 static int __init ipv4_proc_init(void) 2069 { 2070 return 0; 2071 } 2072 #endif /* CONFIG_PROC_FS */ 2073