1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * IPv4 specific functions 9 * 10 * 11 * code split from: 12 * linux/ipv4/tcp.c 13 * linux/ipv4/tcp_input.c 14 * linux/ipv4/tcp_output.c 15 * 16 * See tcp.c for author information 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License 20 * as published by the Free Software Foundation; either version 21 * 2 of the License, or (at your option) any later version. 22 */ 23 24 /* 25 * Changes: 26 * David S. Miller : New socket lookup architecture. 27 * This code is dedicated to John Dyson. 28 * David S. Miller : Change semantics of established hash, 29 * half is devoted to TIME_WAIT sockets 30 * and the rest go in the other half. 31 * Andi Kleen : Add support for syncookies and fixed 32 * some bugs: ip options weren't passed to 33 * the TCP layer, missed a check for an 34 * ACK bit. 35 * Andi Kleen : Implemented fast path mtu discovery. 36 * Fixed many serious bugs in the 37 * request_sock handling and moved 38 * most of it into the af independent code. 39 * Added tail drop and some other bugfixes. 40 * Added new listen semantics. 41 * Mike McLagan : Routing by source 42 * Juan Jose Ciarlante: ip_dynaddr bits 43 * Andi Kleen: various fixes. 44 * Vitaly E. Lavrov : Transparent proxy revived after year 45 * coma. 46 * Andi Kleen : Fix new listen. 47 * Andi Kleen : Fix accept error reporting. 48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 50 * a single port at the same time. 51 */ 52 53 #define pr_fmt(fmt) "TCP: " fmt 54 55 #include <linux/bottom_half.h> 56 #include <linux/types.h> 57 #include <linux/fcntl.h> 58 #include <linux/module.h> 59 #include <linux/random.h> 60 #include <linux/cache.h> 61 #include <linux/jhash.h> 62 #include <linux/init.h> 63 #include <linux/times.h> 64 #include <linux/slab.h> 65 66 #include <net/net_namespace.h> 67 #include <net/icmp.h> 68 #include <net/inet_hashtables.h> 69 #include <net/tcp.h> 70 #include <net/transp_v6.h> 71 #include <net/ipv6.h> 72 #include <net/inet_common.h> 73 #include <net/timewait_sock.h> 74 #include <net/xfrm.h> 75 #include <net/secure_seq.h> 76 #include <net/busy_poll.h> 77 78 #include <linux/inet.h> 79 #include <linux/ipv6.h> 80 #include <linux/stddef.h> 81 #include <linux/proc_fs.h> 82 #include <linux/seq_file.h> 83 84 #include <linux/crypto.h> 85 #include <linux/scatterlist.h> 86 87 int sysctl_tcp_tw_reuse __read_mostly; 88 int sysctl_tcp_low_latency __read_mostly; 89 EXPORT_SYMBOL(sysctl_tcp_low_latency); 90 91 #ifdef CONFIG_TCP_MD5SIG 92 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, 93 __be32 daddr, __be32 saddr, const struct tcphdr *th); 94 #endif 95 96 struct inet_hashinfo tcp_hashinfo; 97 EXPORT_SYMBOL(tcp_hashinfo); 98 99 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb) 100 { 101 return secure_tcp_sequence_number(ip_hdr(skb)->daddr, 102 ip_hdr(skb)->saddr, 103 tcp_hdr(skb)->dest, 104 tcp_hdr(skb)->source); 105 } 106 107 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) 108 { 109 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); 110 struct tcp_sock *tp = tcp_sk(sk); 111 112 /* With PAWS, it is safe from the viewpoint 113 of data integrity. Even without PAWS it is safe provided sequence 114 spaces do not overlap i.e. at data rates <= 80Mbit/sec. 115 116 Actually, the idea is close to VJ's one, only timestamp cache is 117 held not per host, but per port pair and TW bucket is used as state 118 holder. 119 120 If TW bucket has been already destroyed we fall back to VJ's scheme 121 and use initial timestamp retrieved from peer table. 122 */ 123 if (tcptw->tw_ts_recent_stamp && 124 (!twp || (sysctl_tcp_tw_reuse && 125 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { 126 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; 127 if (tp->write_seq == 0) 128 tp->write_seq = 1; 129 tp->rx_opt.ts_recent = tcptw->tw_ts_recent; 130 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 131 sock_hold(sktw); 132 return 1; 133 } 134 135 return 0; 136 } 137 EXPORT_SYMBOL_GPL(tcp_twsk_unique); 138 139 /* This will initiate an outgoing connection. */ 140 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 141 { 142 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; 143 struct inet_sock *inet = inet_sk(sk); 144 struct tcp_sock *tp = tcp_sk(sk); 145 __be16 orig_sport, orig_dport; 146 __be32 daddr, nexthop; 147 struct flowi4 *fl4; 148 struct rtable *rt; 149 int err; 150 struct ip_options_rcu *inet_opt; 151 152 if (addr_len < sizeof(struct sockaddr_in)) 153 return -EINVAL; 154 155 if (usin->sin_family != AF_INET) 156 return -EAFNOSUPPORT; 157 158 nexthop = daddr = usin->sin_addr.s_addr; 159 inet_opt = rcu_dereference_protected(inet->inet_opt, 160 sock_owned_by_user(sk)); 161 if (inet_opt && inet_opt->opt.srr) { 162 if (!daddr) 163 return -EINVAL; 164 nexthop = inet_opt->opt.faddr; 165 } 166 167 orig_sport = inet->inet_sport; 168 orig_dport = usin->sin_port; 169 fl4 = &inet->cork.fl.u.ip4; 170 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, 171 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 172 IPPROTO_TCP, 173 orig_sport, orig_dport, sk); 174 if (IS_ERR(rt)) { 175 err = PTR_ERR(rt); 176 if (err == -ENETUNREACH) 177 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 178 return err; 179 } 180 181 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 182 ip_rt_put(rt); 183 return -ENETUNREACH; 184 } 185 186 if (!inet_opt || !inet_opt->opt.srr) 187 daddr = fl4->daddr; 188 189 if (!inet->inet_saddr) 190 inet->inet_saddr = fl4->saddr; 191 sk_rcv_saddr_set(sk, inet->inet_saddr); 192 193 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { 194 /* Reset inherited state */ 195 tp->rx_opt.ts_recent = 0; 196 tp->rx_opt.ts_recent_stamp = 0; 197 if (likely(!tp->repair)) 198 tp->write_seq = 0; 199 } 200 201 if (tcp_death_row.sysctl_tw_recycle && 202 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) 203 tcp_fetch_timewait_stamp(sk, &rt->dst); 204 205 inet->inet_dport = usin->sin_port; 206 sk_daddr_set(sk, daddr); 207 208 inet_csk(sk)->icsk_ext_hdr_len = 0; 209 if (inet_opt) 210 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 211 212 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; 213 214 /* Socket identity is still unknown (sport may be zero). 215 * However we set state to SYN-SENT and not releasing socket 216 * lock select source port, enter ourselves into the hash tables and 217 * complete initialization after this. 218 */ 219 tcp_set_state(sk, TCP_SYN_SENT); 220 err = inet_hash_connect(&tcp_death_row, sk); 221 if (err) 222 goto failure; 223 224 sk_set_txhash(sk); 225 226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, 227 inet->inet_sport, inet->inet_dport, sk); 228 if (IS_ERR(rt)) { 229 err = PTR_ERR(rt); 230 rt = NULL; 231 goto failure; 232 } 233 /* OK, now commit destination to socket. */ 234 sk->sk_gso_type = SKB_GSO_TCPV4; 235 sk_setup_caps(sk, &rt->dst); 236 237 if (!tp->write_seq && likely(!tp->repair)) 238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, 239 inet->inet_daddr, 240 inet->inet_sport, 241 usin->sin_port); 242 243 inet->inet_id = tp->write_seq ^ jiffies; 244 245 err = tcp_connect(sk); 246 247 rt = NULL; 248 if (err) 249 goto failure; 250 251 return 0; 252 253 failure: 254 /* 255 * This unhashes the socket and releases the local port, 256 * if necessary. 257 */ 258 tcp_set_state(sk, TCP_CLOSE); 259 ip_rt_put(rt); 260 sk->sk_route_caps = 0; 261 inet->inet_dport = 0; 262 return err; 263 } 264 EXPORT_SYMBOL(tcp_v4_connect); 265 266 /* 267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191. 268 * It can be called through tcp_release_cb() if socket was owned by user 269 * at the time tcp_v4_err() was called to handle ICMP message. 270 */ 271 void tcp_v4_mtu_reduced(struct sock *sk) 272 { 273 struct dst_entry *dst; 274 struct inet_sock *inet = inet_sk(sk); 275 u32 mtu = tcp_sk(sk)->mtu_info; 276 277 dst = inet_csk_update_pmtu(sk, mtu); 278 if (!dst) 279 return; 280 281 /* Something is about to be wrong... Remember soft error 282 * for the case, if this connection will not able to recover. 283 */ 284 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) 285 sk->sk_err_soft = EMSGSIZE; 286 287 mtu = dst_mtu(dst); 288 289 if (inet->pmtudisc != IP_PMTUDISC_DONT && 290 ip_sk_accept_pmtu(sk) && 291 inet_csk(sk)->icsk_pmtu_cookie > mtu) { 292 tcp_sync_mss(sk, mtu); 293 294 /* Resend the TCP packet because it's 295 * clear that the old packet has been 296 * dropped. This is the new "fast" path mtu 297 * discovery. 298 */ 299 tcp_simple_retransmit(sk); 300 } /* else let the usual retransmit timer handle it */ 301 } 302 EXPORT_SYMBOL(tcp_v4_mtu_reduced); 303 304 static void do_redirect(struct sk_buff *skb, struct sock *sk) 305 { 306 struct dst_entry *dst = __sk_dst_check(sk, 0); 307 308 if (dst) 309 dst->ops->redirect(dst, sk, skb); 310 } 311 312 313 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ 314 void tcp_req_err(struct sock *sk, u32 seq, bool abort) 315 { 316 struct request_sock *req = inet_reqsk(sk); 317 struct net *net = sock_net(sk); 318 319 /* ICMPs are not backlogged, hence we cannot get 320 * an established socket here. 321 */ 322 WARN_ON(req->sk); 323 324 if (seq != tcp_rsk(req)->snt_isn) { 325 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 326 } else if (abort) { 327 /* 328 * Still in SYN_RECV, just remove it silently. 329 * There is no good way to pass the error to the newly 330 * created socket, and POSIX does not want network 331 * errors returned from accept(). 332 */ 333 inet_csk_reqsk_queue_drop(req->rsk_listener, req); 334 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS); 335 } 336 reqsk_put(req); 337 } 338 EXPORT_SYMBOL(tcp_req_err); 339 340 /* 341 * This routine is called by the ICMP module when it gets some 342 * sort of error condition. If err < 0 then the socket should 343 * be closed and the error returned to the user. If err > 0 344 * it's just the icmp type << 8 | icmp code. After adjustment 345 * header points to the first 8 bytes of the tcp header. We need 346 * to find the appropriate port. 347 * 348 * The locking strategy used here is very "optimistic". When 349 * someone else accesses the socket the ICMP is just dropped 350 * and for some paths there is no check at all. 351 * A more general error queue to queue errors for later handling 352 * is probably better. 353 * 354 */ 355 356 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) 357 { 358 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data; 359 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); 360 struct inet_connection_sock *icsk; 361 struct tcp_sock *tp; 362 struct inet_sock *inet; 363 const int type = icmp_hdr(icmp_skb)->type; 364 const int code = icmp_hdr(icmp_skb)->code; 365 struct sock *sk; 366 struct sk_buff *skb; 367 struct request_sock *fastopen; 368 __u32 seq, snd_una; 369 __u32 remaining; 370 int err; 371 struct net *net = dev_net(icmp_skb->dev); 372 373 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr, 374 th->dest, iph->saddr, ntohs(th->source), 375 inet_iif(icmp_skb)); 376 if (!sk) { 377 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); 378 return; 379 } 380 if (sk->sk_state == TCP_TIME_WAIT) { 381 inet_twsk_put(inet_twsk(sk)); 382 return; 383 } 384 seq = ntohl(th->seq); 385 if (sk->sk_state == TCP_NEW_SYN_RECV) 386 return tcp_req_err(sk, seq, 387 type == ICMP_PARAMETERPROB || 388 type == ICMP_TIME_EXCEEDED || 389 (type == ICMP_DEST_UNREACH && 390 (code == ICMP_NET_UNREACH || 391 code == ICMP_HOST_UNREACH))); 392 393 bh_lock_sock(sk); 394 /* If too many ICMPs get dropped on busy 395 * servers this needs to be solved differently. 396 * We do take care of PMTU discovery (RFC1191) special case : 397 * we can receive locally generated ICMP messages while socket is held. 398 */ 399 if (sock_owned_by_user(sk)) { 400 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) 401 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 402 } 403 if (sk->sk_state == TCP_CLOSE) 404 goto out; 405 406 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { 407 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); 408 goto out; 409 } 410 411 icsk = inet_csk(sk); 412 tp = tcp_sk(sk); 413 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ 414 fastopen = tp->fastopen_rsk; 415 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; 416 if (sk->sk_state != TCP_LISTEN && 417 !between(seq, snd_una, tp->snd_nxt)) { 418 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 419 goto out; 420 } 421 422 switch (type) { 423 case ICMP_REDIRECT: 424 do_redirect(icmp_skb, sk); 425 goto out; 426 case ICMP_SOURCE_QUENCH: 427 /* Just silently ignore these. */ 428 goto out; 429 case ICMP_PARAMETERPROB: 430 err = EPROTO; 431 break; 432 case ICMP_DEST_UNREACH: 433 if (code > NR_ICMP_UNREACH) 434 goto out; 435 436 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ 437 /* We are not interested in TCP_LISTEN and open_requests 438 * (SYN-ACKs send out by Linux are always <576bytes so 439 * they should go through unfragmented). 440 */ 441 if (sk->sk_state == TCP_LISTEN) 442 goto out; 443 444 tp->mtu_info = info; 445 if (!sock_owned_by_user(sk)) { 446 tcp_v4_mtu_reduced(sk); 447 } else { 448 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags)) 449 sock_hold(sk); 450 } 451 goto out; 452 } 453 454 err = icmp_err_convert[code].errno; 455 /* check if icmp_skb allows revert of backoff 456 * (see draft-zimmermann-tcp-lcd) */ 457 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH) 458 break; 459 if (seq != tp->snd_una || !icsk->icsk_retransmits || 460 !icsk->icsk_backoff || fastopen) 461 break; 462 463 if (sock_owned_by_user(sk)) 464 break; 465 466 icsk->icsk_backoff--; 467 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : 468 TCP_TIMEOUT_INIT; 469 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); 470 471 skb = tcp_write_queue_head(sk); 472 BUG_ON(!skb); 473 474 remaining = icsk->icsk_rto - 475 min(icsk->icsk_rto, 476 tcp_time_stamp - tcp_skb_timestamp(skb)); 477 478 if (remaining) { 479 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 480 remaining, TCP_RTO_MAX); 481 } else { 482 /* RTO revert clocked out retransmission. 483 * Will retransmit now */ 484 tcp_retransmit_timer(sk); 485 } 486 487 break; 488 case ICMP_TIME_EXCEEDED: 489 err = EHOSTUNREACH; 490 break; 491 default: 492 goto out; 493 } 494 495 switch (sk->sk_state) { 496 case TCP_SYN_SENT: 497 case TCP_SYN_RECV: 498 /* Only in fast or simultaneous open. If a fast open socket is 499 * is already accepted it is treated as a connected one below. 500 */ 501 if (fastopen && !fastopen->sk) 502 break; 503 504 if (!sock_owned_by_user(sk)) { 505 sk->sk_err = err; 506 507 sk->sk_error_report(sk); 508 509 tcp_done(sk); 510 } else { 511 sk->sk_err_soft = err; 512 } 513 goto out; 514 } 515 516 /* If we've already connected we will keep trying 517 * until we time out, or the user gives up. 518 * 519 * rfc1122 4.2.3.9 allows to consider as hard errors 520 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, 521 * but it is obsoleted by pmtu discovery). 522 * 523 * Note, that in modern internet, where routing is unreliable 524 * and in each dark corner broken firewalls sit, sending random 525 * errors ordered by their masters even this two messages finally lose 526 * their original sense (even Linux sends invalid PORT_UNREACHs) 527 * 528 * Now we are in compliance with RFCs. 529 * --ANK (980905) 530 */ 531 532 inet = inet_sk(sk); 533 if (!sock_owned_by_user(sk) && inet->recverr) { 534 sk->sk_err = err; 535 sk->sk_error_report(sk); 536 } else { /* Only an error on timeout */ 537 sk->sk_err_soft = err; 538 } 539 540 out: 541 bh_unlock_sock(sk); 542 sock_put(sk); 543 } 544 545 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr) 546 { 547 struct tcphdr *th = tcp_hdr(skb); 548 549 if (skb->ip_summed == CHECKSUM_PARTIAL) { 550 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); 551 skb->csum_start = skb_transport_header(skb) - skb->head; 552 skb->csum_offset = offsetof(struct tcphdr, check); 553 } else { 554 th->check = tcp_v4_check(skb->len, saddr, daddr, 555 csum_partial(th, 556 th->doff << 2, 557 skb->csum)); 558 } 559 } 560 561 /* This routine computes an IPv4 TCP checksum. */ 562 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) 563 { 564 const struct inet_sock *inet = inet_sk(sk); 565 566 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); 567 } 568 EXPORT_SYMBOL(tcp_v4_send_check); 569 570 /* 571 * This routine will send an RST to the other tcp. 572 * 573 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.) 574 * for reset. 575 * Answer: if a packet caused RST, it is not for a socket 576 * existing in our system, if it is matched to a socket, 577 * it is just duplicate segment or bug in other side's TCP. 578 * So that we build reply only basing on parameters 579 * arrived with segment. 580 * Exception: precedence violation. We do not implement it in any case. 581 */ 582 583 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) 584 { 585 const struct tcphdr *th = tcp_hdr(skb); 586 struct { 587 struct tcphdr th; 588 #ifdef CONFIG_TCP_MD5SIG 589 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)]; 590 #endif 591 } rep; 592 struct ip_reply_arg arg; 593 #ifdef CONFIG_TCP_MD5SIG 594 struct tcp_md5sig_key *key = NULL; 595 const __u8 *hash_location = NULL; 596 unsigned char newhash[16]; 597 int genhash; 598 struct sock *sk1 = NULL; 599 #endif 600 struct net *net; 601 602 /* Never send a reset in response to a reset. */ 603 if (th->rst) 604 return; 605 606 /* If sk not NULL, it means we did a successful lookup and incoming 607 * route had to be correct. prequeue might have dropped our dst. 608 */ 609 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) 610 return; 611 612 /* Swap the send and the receive. */ 613 memset(&rep, 0, sizeof(rep)); 614 rep.th.dest = th->source; 615 rep.th.source = th->dest; 616 rep.th.doff = sizeof(struct tcphdr) / 4; 617 rep.th.rst = 1; 618 619 if (th->ack) { 620 rep.th.seq = th->ack_seq; 621 } else { 622 rep.th.ack = 1; 623 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + 624 skb->len - (th->doff << 2)); 625 } 626 627 memset(&arg, 0, sizeof(arg)); 628 arg.iov[0].iov_base = (unsigned char *)&rep; 629 arg.iov[0].iov_len = sizeof(rep.th); 630 631 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); 632 #ifdef CONFIG_TCP_MD5SIG 633 hash_location = tcp_parse_md5sig_option(th); 634 if (sk && sk_fullsock(sk)) { 635 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *) 636 &ip_hdr(skb)->saddr, AF_INET); 637 } else if (hash_location) { 638 /* 639 * active side is lost. Try to find listening socket through 640 * source port, and then find md5 key through listening socket. 641 * we are not loose security here: 642 * Incoming packet is checked with md5 hash with finding key, 643 * no RST generated if md5 hash doesn't match. 644 */ 645 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0, 646 ip_hdr(skb)->saddr, 647 th->source, ip_hdr(skb)->daddr, 648 ntohs(th->source), inet_iif(skb)); 649 /* don't send rst if it can't find key */ 650 if (!sk1) 651 return; 652 rcu_read_lock(); 653 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *) 654 &ip_hdr(skb)->saddr, AF_INET); 655 if (!key) 656 goto release_sk1; 657 658 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); 659 if (genhash || memcmp(hash_location, newhash, 16) != 0) 660 goto release_sk1; 661 } 662 663 if (key) { 664 rep.opt[0] = htonl((TCPOPT_NOP << 24) | 665 (TCPOPT_NOP << 16) | 666 (TCPOPT_MD5SIG << 8) | 667 TCPOLEN_MD5SIG); 668 /* Update length and the length the header thinks exists */ 669 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; 670 rep.th.doff = arg.iov[0].iov_len / 4; 671 672 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1], 673 key, ip_hdr(skb)->saddr, 674 ip_hdr(skb)->daddr, &rep.th); 675 } 676 #endif 677 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 678 ip_hdr(skb)->saddr, /* XXX */ 679 arg.iov[0].iov_len, IPPROTO_TCP, 0); 680 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 681 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0; 682 683 /* When socket is gone, all binding information is lost. 684 * routing might fail in this case. No choice here, if we choose to force 685 * input interface, we will misroute in case of asymmetric route. 686 */ 687 if (sk) 688 arg.bound_dev_if = sk->sk_bound_dev_if; 689 690 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) != 691 offsetof(struct inet_timewait_sock, tw_bound_dev_if)); 692 693 arg.tos = ip_hdr(skb)->tos; 694 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), 695 skb, &TCP_SKB_CB(skb)->header.h4.opt, 696 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 697 &arg, arg.iov[0].iov_len); 698 699 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 700 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 701 702 #ifdef CONFIG_TCP_MD5SIG 703 release_sk1: 704 if (sk1) { 705 rcu_read_unlock(); 706 sock_put(sk1); 707 } 708 #endif 709 } 710 711 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states 712 outside socket context is ugly, certainly. What can I do? 713 */ 714 715 static void tcp_v4_send_ack(struct net *net, 716 struct sk_buff *skb, u32 seq, u32 ack, 717 u32 win, u32 tsval, u32 tsecr, int oif, 718 struct tcp_md5sig_key *key, 719 int reply_flags, u8 tos) 720 { 721 const struct tcphdr *th = tcp_hdr(skb); 722 struct { 723 struct tcphdr th; 724 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) 725 #ifdef CONFIG_TCP_MD5SIG 726 + (TCPOLEN_MD5SIG_ALIGNED >> 2) 727 #endif 728 ]; 729 } rep; 730 struct ip_reply_arg arg; 731 732 memset(&rep.th, 0, sizeof(struct tcphdr)); 733 memset(&arg, 0, sizeof(arg)); 734 735 arg.iov[0].iov_base = (unsigned char *)&rep; 736 arg.iov[0].iov_len = sizeof(rep.th); 737 if (tsecr) { 738 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 739 (TCPOPT_TIMESTAMP << 8) | 740 TCPOLEN_TIMESTAMP); 741 rep.opt[1] = htonl(tsval); 742 rep.opt[2] = htonl(tsecr); 743 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED; 744 } 745 746 /* Swap the send and the receive. */ 747 rep.th.dest = th->source; 748 rep.th.source = th->dest; 749 rep.th.doff = arg.iov[0].iov_len / 4; 750 rep.th.seq = htonl(seq); 751 rep.th.ack_seq = htonl(ack); 752 rep.th.ack = 1; 753 rep.th.window = htons(win); 754 755 #ifdef CONFIG_TCP_MD5SIG 756 if (key) { 757 int offset = (tsecr) ? 3 : 0; 758 759 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | 760 (TCPOPT_NOP << 16) | 761 (TCPOPT_MD5SIG << 8) | 762 TCPOLEN_MD5SIG); 763 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; 764 rep.th.doff = arg.iov[0].iov_len/4; 765 766 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset], 767 key, ip_hdr(skb)->saddr, 768 ip_hdr(skb)->daddr, &rep.th); 769 } 770 #endif 771 arg.flags = reply_flags; 772 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 773 ip_hdr(skb)->saddr, /* XXX */ 774 arg.iov[0].iov_len, IPPROTO_TCP, 0); 775 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 776 if (oif) 777 arg.bound_dev_if = oif; 778 arg.tos = tos; 779 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), 780 skb, &TCP_SKB_CB(skb)->header.h4.opt, 781 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 782 &arg, arg.iov[0].iov_len); 783 784 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 785 } 786 787 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) 788 { 789 struct inet_timewait_sock *tw = inet_twsk(sk); 790 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 791 792 tcp_v4_send_ack(sock_net(sk), skb, 793 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 794 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 795 tcp_time_stamp + tcptw->tw_ts_offset, 796 tcptw->tw_ts_recent, 797 tw->tw_bound_dev_if, 798 tcp_twsk_md5_key(tcptw), 799 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0, 800 tw->tw_tos 801 ); 802 803 inet_twsk_put(tw); 804 } 805 806 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, 807 struct request_sock *req) 808 { 809 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV 810 * sk->sk_state == TCP_SYN_RECV -> for Fast Open. 811 */ 812 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : 813 tcp_sk(sk)->snd_nxt; 814 815 tcp_v4_send_ack(sock_net(sk), skb, seq, 816 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd, 817 tcp_time_stamp, 818 req->ts_recent, 819 0, 820 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, 821 AF_INET), 822 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, 823 ip_hdr(skb)->tos); 824 } 825 826 /* 827 * Send a SYN-ACK after having received a SYN. 828 * This still operates on a request_sock only, not on a big 829 * socket. 830 */ 831 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, 832 struct flowi *fl, 833 struct request_sock *req, 834 struct tcp_fastopen_cookie *foc, 835 bool attach_req) 836 { 837 const struct inet_request_sock *ireq = inet_rsk(req); 838 struct flowi4 fl4; 839 int err = -1; 840 struct sk_buff *skb; 841 842 /* First, grab a route. */ 843 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) 844 return -1; 845 846 skb = tcp_make_synack(sk, dst, req, foc, attach_req); 847 848 if (skb) { 849 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); 850 851 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 852 ireq->ir_rmt_addr, 853 ireq->opt); 854 err = net_xmit_eval(err); 855 } 856 857 return err; 858 } 859 860 /* 861 * IPv4 request_sock destructor. 862 */ 863 static void tcp_v4_reqsk_destructor(struct request_sock *req) 864 { 865 kfree(inet_rsk(req)->opt); 866 } 867 868 #ifdef CONFIG_TCP_MD5SIG 869 /* 870 * RFC2385 MD5 checksumming requires a mapping of 871 * IP address->MD5 Key. 872 * We need to maintain these in the sk structure. 873 */ 874 875 /* Find the Key structure for an address. */ 876 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, 877 const union tcp_md5_addr *addr, 878 int family) 879 { 880 const struct tcp_sock *tp = tcp_sk(sk); 881 struct tcp_md5sig_key *key; 882 unsigned int size = sizeof(struct in_addr); 883 const struct tcp_md5sig_info *md5sig; 884 885 /* caller either holds rcu_read_lock() or socket lock */ 886 md5sig = rcu_dereference_check(tp->md5sig_info, 887 sock_owned_by_user(sk) || 888 lockdep_is_held((spinlock_t *)&sk->sk_lock.slock)); 889 if (!md5sig) 890 return NULL; 891 #if IS_ENABLED(CONFIG_IPV6) 892 if (family == AF_INET6) 893 size = sizeof(struct in6_addr); 894 #endif 895 hlist_for_each_entry_rcu(key, &md5sig->head, node) { 896 if (key->family != family) 897 continue; 898 if (!memcmp(&key->addr, addr, size)) 899 return key; 900 } 901 return NULL; 902 } 903 EXPORT_SYMBOL(tcp_md5_do_lookup); 904 905 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, 906 const struct sock *addr_sk) 907 { 908 const union tcp_md5_addr *addr; 909 910 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr; 911 return tcp_md5_do_lookup(sk, addr, AF_INET); 912 } 913 EXPORT_SYMBOL(tcp_v4_md5_lookup); 914 915 /* This can be called on a newly created socket, from other files */ 916 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, 917 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp) 918 { 919 /* Add Key to the list */ 920 struct tcp_md5sig_key *key; 921 struct tcp_sock *tp = tcp_sk(sk); 922 struct tcp_md5sig_info *md5sig; 923 924 key = tcp_md5_do_lookup(sk, addr, family); 925 if (key) { 926 /* Pre-existing entry - just update that one. */ 927 memcpy(key->key, newkey, newkeylen); 928 key->keylen = newkeylen; 929 return 0; 930 } 931 932 md5sig = rcu_dereference_protected(tp->md5sig_info, 933 sock_owned_by_user(sk) || 934 lockdep_is_held(&sk->sk_lock.slock)); 935 if (!md5sig) { 936 md5sig = kmalloc(sizeof(*md5sig), gfp); 937 if (!md5sig) 938 return -ENOMEM; 939 940 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 941 INIT_HLIST_HEAD(&md5sig->head); 942 rcu_assign_pointer(tp->md5sig_info, md5sig); 943 } 944 945 key = sock_kmalloc(sk, sizeof(*key), gfp); 946 if (!key) 947 return -ENOMEM; 948 if (!tcp_alloc_md5sig_pool()) { 949 sock_kfree_s(sk, key, sizeof(*key)); 950 return -ENOMEM; 951 } 952 953 memcpy(key->key, newkey, newkeylen); 954 key->keylen = newkeylen; 955 key->family = family; 956 memcpy(&key->addr, addr, 957 (family == AF_INET6) ? sizeof(struct in6_addr) : 958 sizeof(struct in_addr)); 959 hlist_add_head_rcu(&key->node, &md5sig->head); 960 return 0; 961 } 962 EXPORT_SYMBOL(tcp_md5_do_add); 963 964 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) 965 { 966 struct tcp_md5sig_key *key; 967 968 key = tcp_md5_do_lookup(sk, addr, family); 969 if (!key) 970 return -ENOENT; 971 hlist_del_rcu(&key->node); 972 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); 973 kfree_rcu(key, rcu); 974 return 0; 975 } 976 EXPORT_SYMBOL(tcp_md5_do_del); 977 978 static void tcp_clear_md5_list(struct sock *sk) 979 { 980 struct tcp_sock *tp = tcp_sk(sk); 981 struct tcp_md5sig_key *key; 982 struct hlist_node *n; 983 struct tcp_md5sig_info *md5sig; 984 985 md5sig = rcu_dereference_protected(tp->md5sig_info, 1); 986 987 hlist_for_each_entry_safe(key, n, &md5sig->head, node) { 988 hlist_del_rcu(&key->node); 989 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); 990 kfree_rcu(key, rcu); 991 } 992 } 993 994 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, 995 int optlen) 996 { 997 struct tcp_md5sig cmd; 998 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; 999 1000 if (optlen < sizeof(cmd)) 1001 return -EINVAL; 1002 1003 if (copy_from_user(&cmd, optval, sizeof(cmd))) 1004 return -EFAULT; 1005 1006 if (sin->sin_family != AF_INET) 1007 return -EINVAL; 1008 1009 if (!cmd.tcpm_keylen) 1010 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, 1011 AF_INET); 1012 1013 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) 1014 return -EINVAL; 1015 1016 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, 1017 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, 1018 GFP_KERNEL); 1019 } 1020 1021 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 1022 __be32 daddr, __be32 saddr, int nbytes) 1023 { 1024 struct tcp4_pseudohdr *bp; 1025 struct scatterlist sg; 1026 1027 bp = &hp->md5_blk.ip4; 1028 1029 /* 1030 * 1. the TCP pseudo-header (in the order: source IP address, 1031 * destination IP address, zero-padded protocol number, and 1032 * segment length) 1033 */ 1034 bp->saddr = saddr; 1035 bp->daddr = daddr; 1036 bp->pad = 0; 1037 bp->protocol = IPPROTO_TCP; 1038 bp->len = cpu_to_be16(nbytes); 1039 1040 sg_init_one(&sg, bp, sizeof(*bp)); 1041 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); 1042 } 1043 1044 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, 1045 __be32 daddr, __be32 saddr, const struct tcphdr *th) 1046 { 1047 struct tcp_md5sig_pool *hp; 1048 struct hash_desc *desc; 1049 1050 hp = tcp_get_md5sig_pool(); 1051 if (!hp) 1052 goto clear_hash_noput; 1053 desc = &hp->md5_desc; 1054 1055 if (crypto_hash_init(desc)) 1056 goto clear_hash; 1057 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) 1058 goto clear_hash; 1059 if (tcp_md5_hash_header(hp, th)) 1060 goto clear_hash; 1061 if (tcp_md5_hash_key(hp, key)) 1062 goto clear_hash; 1063 if (crypto_hash_final(desc, md5_hash)) 1064 goto clear_hash; 1065 1066 tcp_put_md5sig_pool(); 1067 return 0; 1068 1069 clear_hash: 1070 tcp_put_md5sig_pool(); 1071 clear_hash_noput: 1072 memset(md5_hash, 0, 16); 1073 return 1; 1074 } 1075 1076 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, 1077 const struct sock *sk, 1078 const struct sk_buff *skb) 1079 { 1080 struct tcp_md5sig_pool *hp; 1081 struct hash_desc *desc; 1082 const struct tcphdr *th = tcp_hdr(skb); 1083 __be32 saddr, daddr; 1084 1085 if (sk) { /* valid for establish/request sockets */ 1086 saddr = sk->sk_rcv_saddr; 1087 daddr = sk->sk_daddr; 1088 } else { 1089 const struct iphdr *iph = ip_hdr(skb); 1090 saddr = iph->saddr; 1091 daddr = iph->daddr; 1092 } 1093 1094 hp = tcp_get_md5sig_pool(); 1095 if (!hp) 1096 goto clear_hash_noput; 1097 desc = &hp->md5_desc; 1098 1099 if (crypto_hash_init(desc)) 1100 goto clear_hash; 1101 1102 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) 1103 goto clear_hash; 1104 if (tcp_md5_hash_header(hp, th)) 1105 goto clear_hash; 1106 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) 1107 goto clear_hash; 1108 if (tcp_md5_hash_key(hp, key)) 1109 goto clear_hash; 1110 if (crypto_hash_final(desc, md5_hash)) 1111 goto clear_hash; 1112 1113 tcp_put_md5sig_pool(); 1114 return 0; 1115 1116 clear_hash: 1117 tcp_put_md5sig_pool(); 1118 clear_hash_noput: 1119 memset(md5_hash, 0, 16); 1120 return 1; 1121 } 1122 EXPORT_SYMBOL(tcp_v4_md5_hash_skb); 1123 1124 #endif 1125 1126 /* Called with rcu_read_lock() */ 1127 static bool tcp_v4_inbound_md5_hash(const struct sock *sk, 1128 const struct sk_buff *skb) 1129 { 1130 #ifdef CONFIG_TCP_MD5SIG 1131 /* 1132 * This gets called for each TCP segment that arrives 1133 * so we want to be efficient. 1134 * We have 3 drop cases: 1135 * o No MD5 hash and one expected. 1136 * o MD5 hash and we're not expecting one. 1137 * o MD5 hash and its wrong. 1138 */ 1139 const __u8 *hash_location = NULL; 1140 struct tcp_md5sig_key *hash_expected; 1141 const struct iphdr *iph = ip_hdr(skb); 1142 const struct tcphdr *th = tcp_hdr(skb); 1143 int genhash; 1144 unsigned char newhash[16]; 1145 1146 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr, 1147 AF_INET); 1148 hash_location = tcp_parse_md5sig_option(th); 1149 1150 /* We've parsed the options - do we have a hash? */ 1151 if (!hash_expected && !hash_location) 1152 return false; 1153 1154 if (hash_expected && !hash_location) { 1155 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 1156 return true; 1157 } 1158 1159 if (!hash_expected && hash_location) { 1160 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 1161 return true; 1162 } 1163 1164 /* Okay, so this is hash_expected and hash_location - 1165 * so we need to calculate the checksum. 1166 */ 1167 genhash = tcp_v4_md5_hash_skb(newhash, 1168 hash_expected, 1169 NULL, skb); 1170 1171 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 1172 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", 1173 &iph->saddr, ntohs(th->source), 1174 &iph->daddr, ntohs(th->dest), 1175 genhash ? " tcp_v4_calc_md5_hash failed" 1176 : ""); 1177 return true; 1178 } 1179 return false; 1180 #endif 1181 return false; 1182 } 1183 1184 static void tcp_v4_init_req(struct request_sock *req, 1185 const struct sock *sk_listener, 1186 struct sk_buff *skb) 1187 { 1188 struct inet_request_sock *ireq = inet_rsk(req); 1189 1190 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); 1191 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); 1192 ireq->no_srccheck = inet_sk(sk_listener)->transparent; 1193 ireq->opt = tcp_v4_save_options(skb); 1194 } 1195 1196 static struct dst_entry *tcp_v4_route_req(const struct sock *sk, 1197 struct flowi *fl, 1198 const struct request_sock *req, 1199 bool *strict) 1200 { 1201 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req); 1202 1203 if (strict) { 1204 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr) 1205 *strict = true; 1206 else 1207 *strict = false; 1208 } 1209 1210 return dst; 1211 } 1212 1213 struct request_sock_ops tcp_request_sock_ops __read_mostly = { 1214 .family = PF_INET, 1215 .obj_size = sizeof(struct tcp_request_sock), 1216 .rtx_syn_ack = tcp_rtx_synack, 1217 .send_ack = tcp_v4_reqsk_send_ack, 1218 .destructor = tcp_v4_reqsk_destructor, 1219 .send_reset = tcp_v4_send_reset, 1220 .syn_ack_timeout = tcp_syn_ack_timeout, 1221 }; 1222 1223 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { 1224 .mss_clamp = TCP_MSS_DEFAULT, 1225 #ifdef CONFIG_TCP_MD5SIG 1226 .req_md5_lookup = tcp_v4_md5_lookup, 1227 .calc_md5_hash = tcp_v4_md5_hash_skb, 1228 #endif 1229 .init_req = tcp_v4_init_req, 1230 #ifdef CONFIG_SYN_COOKIES 1231 .cookie_init_seq = cookie_v4_init_sequence, 1232 #endif 1233 .route_req = tcp_v4_route_req, 1234 .init_seq = tcp_v4_init_sequence, 1235 .send_synack = tcp_v4_send_synack, 1236 }; 1237 1238 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1239 { 1240 /* Never answer to SYNs send to broadcast or multicast */ 1241 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1242 goto drop; 1243 1244 return tcp_conn_request(&tcp_request_sock_ops, 1245 &tcp_request_sock_ipv4_ops, sk, skb); 1246 1247 drop: 1248 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1249 return 0; 1250 } 1251 EXPORT_SYMBOL(tcp_v4_conn_request); 1252 1253 1254 /* 1255 * The three way handshake has completed - we got a valid synack - 1256 * now create the new socket. 1257 */ 1258 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, 1259 struct request_sock *req, 1260 struct dst_entry *dst, 1261 struct request_sock *req_unhash, 1262 bool *own_req) 1263 { 1264 struct inet_request_sock *ireq; 1265 struct inet_sock *newinet; 1266 struct tcp_sock *newtp; 1267 struct sock *newsk; 1268 #ifdef CONFIG_TCP_MD5SIG 1269 struct tcp_md5sig_key *key; 1270 #endif 1271 struct ip_options_rcu *inet_opt; 1272 1273 if (sk_acceptq_is_full(sk)) 1274 goto exit_overflow; 1275 1276 newsk = tcp_create_openreq_child(sk, req, skb); 1277 if (!newsk) 1278 goto exit_nonewsk; 1279 1280 newsk->sk_gso_type = SKB_GSO_TCPV4; 1281 inet_sk_rx_dst_set(newsk, skb); 1282 1283 newtp = tcp_sk(newsk); 1284 newinet = inet_sk(newsk); 1285 ireq = inet_rsk(req); 1286 sk_daddr_set(newsk, ireq->ir_rmt_addr); 1287 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); 1288 newsk->sk_bound_dev_if = ireq->ir_iif; 1289 newinet->inet_saddr = ireq->ir_loc_addr; 1290 inet_opt = ireq->opt; 1291 rcu_assign_pointer(newinet->inet_opt, inet_opt); 1292 ireq->opt = NULL; 1293 newinet->mc_index = inet_iif(skb); 1294 newinet->mc_ttl = ip_hdr(skb)->ttl; 1295 newinet->rcv_tos = ip_hdr(skb)->tos; 1296 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1297 if (inet_opt) 1298 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 1299 newinet->inet_id = newtp->write_seq ^ jiffies; 1300 1301 if (!dst) { 1302 dst = inet_csk_route_child_sock(sk, newsk, req); 1303 if (!dst) 1304 goto put_and_exit; 1305 } else { 1306 /* syncookie case : see end of cookie_v4_check() */ 1307 } 1308 sk_setup_caps(newsk, dst); 1309 1310 tcp_ca_openreq_child(newsk, dst); 1311 1312 tcp_sync_mss(newsk, dst_mtu(dst)); 1313 newtp->advmss = dst_metric_advmss(dst); 1314 if (tcp_sk(sk)->rx_opt.user_mss && 1315 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) 1316 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; 1317 1318 tcp_initialize_rcv_mss(newsk); 1319 1320 #ifdef CONFIG_TCP_MD5SIG 1321 /* Copy over the MD5 key from the original socket */ 1322 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, 1323 AF_INET); 1324 if (key) { 1325 /* 1326 * We're using one, so create a matching key 1327 * on the newsk structure. If we fail to get 1328 * memory, then we end up not copying the key 1329 * across. Shucks. 1330 */ 1331 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr, 1332 AF_INET, key->key, key->keylen, GFP_ATOMIC); 1333 sk_nocaps_add(newsk, NETIF_F_GSO_MASK); 1334 } 1335 #endif 1336 1337 if (__inet_inherit_port(sk, newsk) < 0) 1338 goto put_and_exit; 1339 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); 1340 if (*own_req) 1341 tcp_move_syn(newtp, req); 1342 1343 return newsk; 1344 1345 exit_overflow: 1346 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1347 exit_nonewsk: 1348 dst_release(dst); 1349 exit: 1350 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1351 return NULL; 1352 put_and_exit: 1353 inet_csk_prepare_forced_close(newsk); 1354 tcp_done(newsk); 1355 goto exit; 1356 } 1357 EXPORT_SYMBOL(tcp_v4_syn_recv_sock); 1358 1359 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) 1360 { 1361 #ifdef CONFIG_SYN_COOKIES 1362 const struct tcphdr *th = tcp_hdr(skb); 1363 1364 if (!th->syn) 1365 sk = cookie_v4_check(sk, skb); 1366 #endif 1367 return sk; 1368 } 1369 1370 /* The socket must have it's spinlock held when we get 1371 * here, unless it is a TCP_LISTEN socket. 1372 * 1373 * We have a potential double-lock case here, so even when 1374 * doing backlog processing we use the BH locking scheme. 1375 * This is because we cannot sleep with the original spinlock 1376 * held. 1377 */ 1378 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) 1379 { 1380 struct sock *rsk; 1381 1382 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1383 struct dst_entry *dst = sk->sk_rx_dst; 1384 1385 sock_rps_save_rxhash(sk, skb); 1386 sk_mark_napi_id(sk, skb); 1387 if (dst) { 1388 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || 1389 !dst->ops->check(dst, 0)) { 1390 dst_release(dst); 1391 sk->sk_rx_dst = NULL; 1392 } 1393 } 1394 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); 1395 return 0; 1396 } 1397 1398 if (tcp_checksum_complete(skb)) 1399 goto csum_err; 1400 1401 if (sk->sk_state == TCP_LISTEN) { 1402 struct sock *nsk = tcp_v4_cookie_check(sk, skb); 1403 1404 if (!nsk) 1405 goto discard; 1406 if (nsk != sk) { 1407 sock_rps_save_rxhash(nsk, skb); 1408 sk_mark_napi_id(nsk, skb); 1409 if (tcp_child_process(sk, nsk, skb)) { 1410 rsk = nsk; 1411 goto reset; 1412 } 1413 return 0; 1414 } 1415 } else 1416 sock_rps_save_rxhash(sk, skb); 1417 1418 if (tcp_rcv_state_process(sk, skb)) { 1419 rsk = sk; 1420 goto reset; 1421 } 1422 return 0; 1423 1424 reset: 1425 tcp_v4_send_reset(rsk, skb); 1426 discard: 1427 kfree_skb(skb); 1428 /* Be careful here. If this function gets more complicated and 1429 * gcc suffers from register pressure on the x86, sk (in %ebx) 1430 * might be destroyed here. This current version compiles correctly, 1431 * but you have been warned. 1432 */ 1433 return 0; 1434 1435 csum_err: 1436 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); 1437 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 1438 goto discard; 1439 } 1440 EXPORT_SYMBOL(tcp_v4_do_rcv); 1441 1442 void tcp_v4_early_demux(struct sk_buff *skb) 1443 { 1444 const struct iphdr *iph; 1445 const struct tcphdr *th; 1446 struct sock *sk; 1447 1448 if (skb->pkt_type != PACKET_HOST) 1449 return; 1450 1451 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) 1452 return; 1453 1454 iph = ip_hdr(skb); 1455 th = tcp_hdr(skb); 1456 1457 if (th->doff < sizeof(struct tcphdr) / 4) 1458 return; 1459 1460 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, 1461 iph->saddr, th->source, 1462 iph->daddr, ntohs(th->dest), 1463 skb->skb_iif); 1464 if (sk) { 1465 skb->sk = sk; 1466 skb->destructor = sock_edemux; 1467 if (sk_fullsock(sk)) { 1468 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); 1469 1470 if (dst) 1471 dst = dst_check(dst, 0); 1472 if (dst && 1473 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) 1474 skb_dst_set_noref(skb, dst); 1475 } 1476 } 1477 } 1478 1479 /* Packet is added to VJ-style prequeue for processing in process 1480 * context, if a reader task is waiting. Apparently, this exciting 1481 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) 1482 * failed somewhere. Latency? Burstiness? Well, at least now we will 1483 * see, why it failed. 8)8) --ANK 1484 * 1485 */ 1486 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) 1487 { 1488 struct tcp_sock *tp = tcp_sk(sk); 1489 1490 if (sysctl_tcp_low_latency || !tp->ucopy.task) 1491 return false; 1492 1493 if (skb->len <= tcp_hdrlen(skb) && 1494 skb_queue_len(&tp->ucopy.prequeue) == 0) 1495 return false; 1496 1497 /* Before escaping RCU protected region, we need to take care of skb 1498 * dst. Prequeue is only enabled for established sockets. 1499 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst 1500 * Instead of doing full sk_rx_dst validity here, let's perform 1501 * an optimistic check. 1502 */ 1503 if (likely(sk->sk_rx_dst)) 1504 skb_dst_drop(skb); 1505 else 1506 skb_dst_force_safe(skb); 1507 1508 __skb_queue_tail(&tp->ucopy.prequeue, skb); 1509 tp->ucopy.memory += skb->truesize; 1510 if (tp->ucopy.memory > sk->sk_rcvbuf) { 1511 struct sk_buff *skb1; 1512 1513 BUG_ON(sock_owned_by_user(sk)); 1514 1515 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { 1516 sk_backlog_rcv(sk, skb1); 1517 NET_INC_STATS_BH(sock_net(sk), 1518 LINUX_MIB_TCPPREQUEUEDROPPED); 1519 } 1520 1521 tp->ucopy.memory = 0; 1522 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { 1523 wake_up_interruptible_sync_poll(sk_sleep(sk), 1524 POLLIN | POLLRDNORM | POLLRDBAND); 1525 if (!inet_csk_ack_scheduled(sk)) 1526 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 1527 (3 * tcp_rto_min(sk)) / 4, 1528 TCP_RTO_MAX); 1529 } 1530 return true; 1531 } 1532 EXPORT_SYMBOL(tcp_prequeue); 1533 1534 /* 1535 * From tcp_input.c 1536 */ 1537 1538 int tcp_v4_rcv(struct sk_buff *skb) 1539 { 1540 const struct iphdr *iph; 1541 const struct tcphdr *th; 1542 struct sock *sk; 1543 int ret; 1544 struct net *net = dev_net(skb->dev); 1545 1546 if (skb->pkt_type != PACKET_HOST) 1547 goto discard_it; 1548 1549 /* Count it even if it's bad */ 1550 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); 1551 1552 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1553 goto discard_it; 1554 1555 th = tcp_hdr(skb); 1556 1557 if (th->doff < sizeof(struct tcphdr) / 4) 1558 goto bad_packet; 1559 if (!pskb_may_pull(skb, th->doff * 4)) 1560 goto discard_it; 1561 1562 /* An explanation is required here, I think. 1563 * Packet length and doff are validated by header prediction, 1564 * provided case of th->doff==0 is eliminated. 1565 * So, we defer the checks. */ 1566 1567 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo)) 1568 goto csum_error; 1569 1570 th = tcp_hdr(skb); 1571 iph = ip_hdr(skb); 1572 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() 1573 * barrier() makes sure compiler wont play fool^Waliasing games. 1574 */ 1575 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb), 1576 sizeof(struct inet_skb_parm)); 1577 barrier(); 1578 1579 TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1580 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1581 skb->len - th->doff * 4); 1582 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1583 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); 1584 TCP_SKB_CB(skb)->tcp_tw_isn = 0; 1585 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); 1586 TCP_SKB_CB(skb)->sacked = 0; 1587 1588 lookup: 1589 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, 1590 th->dest); 1591 if (!sk) 1592 goto no_tcp_socket; 1593 1594 process: 1595 if (sk->sk_state == TCP_TIME_WAIT) 1596 goto do_time_wait; 1597 1598 if (sk->sk_state == TCP_NEW_SYN_RECV) { 1599 struct request_sock *req = inet_reqsk(sk); 1600 struct sock *nsk; 1601 1602 sk = req->rsk_listener; 1603 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { 1604 reqsk_put(req); 1605 goto discard_it; 1606 } 1607 if (unlikely(sk->sk_state != TCP_LISTEN)) { 1608 inet_csk_reqsk_queue_drop_and_put(sk, req); 1609 goto lookup; 1610 } 1611 sock_hold(sk); 1612 nsk = tcp_check_req(sk, skb, req, false); 1613 if (!nsk) { 1614 reqsk_put(req); 1615 goto discard_and_relse; 1616 } 1617 if (nsk == sk) { 1618 reqsk_put(req); 1619 } else if (tcp_child_process(sk, nsk, skb)) { 1620 tcp_v4_send_reset(nsk, skb); 1621 goto discard_and_relse; 1622 } else { 1623 sock_put(sk); 1624 return 0; 1625 } 1626 } 1627 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { 1628 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); 1629 goto discard_and_relse; 1630 } 1631 1632 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 1633 goto discard_and_relse; 1634 1635 if (tcp_v4_inbound_md5_hash(sk, skb)) 1636 goto discard_and_relse; 1637 1638 nf_reset(skb); 1639 1640 if (sk_filter(sk, skb)) 1641 goto discard_and_relse; 1642 1643 skb->dev = NULL; 1644 1645 if (sk->sk_state == TCP_LISTEN) { 1646 ret = tcp_v4_do_rcv(sk, skb); 1647 goto put_and_return; 1648 } 1649 1650 sk_incoming_cpu_update(sk); 1651 1652 bh_lock_sock_nested(sk); 1653 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs); 1654 ret = 0; 1655 if (!sock_owned_by_user(sk)) { 1656 if (!tcp_prequeue(sk, skb)) 1657 ret = tcp_v4_do_rcv(sk, skb); 1658 } else if (unlikely(sk_add_backlog(sk, skb, 1659 sk->sk_rcvbuf + sk->sk_sndbuf))) { 1660 bh_unlock_sock(sk); 1661 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); 1662 goto discard_and_relse; 1663 } 1664 bh_unlock_sock(sk); 1665 1666 put_and_return: 1667 sock_put(sk); 1668 1669 return ret; 1670 1671 no_tcp_socket: 1672 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 1673 goto discard_it; 1674 1675 if (tcp_checksum_complete(skb)) { 1676 csum_error: 1677 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); 1678 bad_packet: 1679 TCP_INC_STATS_BH(net, TCP_MIB_INERRS); 1680 } else { 1681 tcp_v4_send_reset(NULL, skb); 1682 } 1683 1684 discard_it: 1685 /* Discard frame. */ 1686 kfree_skb(skb); 1687 return 0; 1688 1689 discard_and_relse: 1690 sock_put(sk); 1691 goto discard_it; 1692 1693 do_time_wait: 1694 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1695 inet_twsk_put(inet_twsk(sk)); 1696 goto discard_it; 1697 } 1698 1699 if (tcp_checksum_complete(skb)) { 1700 inet_twsk_put(inet_twsk(sk)); 1701 goto csum_error; 1702 } 1703 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1704 case TCP_TW_SYN: { 1705 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev), 1706 &tcp_hashinfo, skb, 1707 __tcp_hdrlen(th), 1708 iph->saddr, th->source, 1709 iph->daddr, th->dest, 1710 inet_iif(skb)); 1711 if (sk2) { 1712 inet_twsk_deschedule_put(inet_twsk(sk)); 1713 sk = sk2; 1714 goto process; 1715 } 1716 /* Fall through to ACK */ 1717 } 1718 case TCP_TW_ACK: 1719 tcp_v4_timewait_ack(sk, skb); 1720 break; 1721 case TCP_TW_RST: 1722 tcp_v4_send_reset(sk, skb); 1723 inet_twsk_deschedule_put(inet_twsk(sk)); 1724 goto discard_it; 1725 case TCP_TW_SUCCESS:; 1726 } 1727 goto discard_it; 1728 } 1729 1730 static struct timewait_sock_ops tcp_timewait_sock_ops = { 1731 .twsk_obj_size = sizeof(struct tcp_timewait_sock), 1732 .twsk_unique = tcp_twsk_unique, 1733 .twsk_destructor= tcp_twsk_destructor, 1734 }; 1735 1736 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) 1737 { 1738 struct dst_entry *dst = skb_dst(skb); 1739 1740 if (dst && dst_hold_safe(dst)) { 1741 sk->sk_rx_dst = dst; 1742 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 1743 } 1744 } 1745 EXPORT_SYMBOL(inet_sk_rx_dst_set); 1746 1747 const struct inet_connection_sock_af_ops ipv4_specific = { 1748 .queue_xmit = ip_queue_xmit, 1749 .send_check = tcp_v4_send_check, 1750 .rebuild_header = inet_sk_rebuild_header, 1751 .sk_rx_dst_set = inet_sk_rx_dst_set, 1752 .conn_request = tcp_v4_conn_request, 1753 .syn_recv_sock = tcp_v4_syn_recv_sock, 1754 .net_header_len = sizeof(struct iphdr), 1755 .setsockopt = ip_setsockopt, 1756 .getsockopt = ip_getsockopt, 1757 .addr2sockaddr = inet_csk_addr2sockaddr, 1758 .sockaddr_len = sizeof(struct sockaddr_in), 1759 .bind_conflict = inet_csk_bind_conflict, 1760 #ifdef CONFIG_COMPAT 1761 .compat_setsockopt = compat_ip_setsockopt, 1762 .compat_getsockopt = compat_ip_getsockopt, 1763 #endif 1764 .mtu_reduced = tcp_v4_mtu_reduced, 1765 }; 1766 EXPORT_SYMBOL(ipv4_specific); 1767 1768 #ifdef CONFIG_TCP_MD5SIG 1769 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { 1770 .md5_lookup = tcp_v4_md5_lookup, 1771 .calc_md5_hash = tcp_v4_md5_hash_skb, 1772 .md5_parse = tcp_v4_parse_md5_keys, 1773 }; 1774 #endif 1775 1776 /* NOTE: A lot of things set to zero explicitly by call to 1777 * sk_alloc() so need not be done here. 1778 */ 1779 static int tcp_v4_init_sock(struct sock *sk) 1780 { 1781 struct inet_connection_sock *icsk = inet_csk(sk); 1782 1783 tcp_init_sock(sk); 1784 1785 icsk->icsk_af_ops = &ipv4_specific; 1786 1787 #ifdef CONFIG_TCP_MD5SIG 1788 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; 1789 #endif 1790 1791 return 0; 1792 } 1793 1794 void tcp_v4_destroy_sock(struct sock *sk) 1795 { 1796 struct tcp_sock *tp = tcp_sk(sk); 1797 1798 tcp_clear_xmit_timers(sk); 1799 1800 tcp_cleanup_congestion_control(sk); 1801 1802 /* Cleanup up the write buffer. */ 1803 tcp_write_queue_purge(sk); 1804 1805 /* Cleans up our, hopefully empty, out_of_order_queue. */ 1806 __skb_queue_purge(&tp->out_of_order_queue); 1807 1808 #ifdef CONFIG_TCP_MD5SIG 1809 /* Clean up the MD5 key list, if any */ 1810 if (tp->md5sig_info) { 1811 tcp_clear_md5_list(sk); 1812 kfree_rcu(tp->md5sig_info, rcu); 1813 tp->md5sig_info = NULL; 1814 } 1815 #endif 1816 1817 /* Clean prequeue, it must be empty really */ 1818 __skb_queue_purge(&tp->ucopy.prequeue); 1819 1820 /* Clean up a referenced TCP bind bucket. */ 1821 if (inet_csk(sk)->icsk_bind_hash) 1822 inet_put_port(sk); 1823 1824 BUG_ON(tp->fastopen_rsk); 1825 1826 /* If socket is aborted during connect operation */ 1827 tcp_free_fastopen_req(tp); 1828 tcp_saved_syn_free(tp); 1829 1830 sk_sockets_allocated_dec(sk); 1831 1832 if (mem_cgroup_sockets_enabled && sk->sk_memcg) 1833 sock_release_memcg(sk); 1834 } 1835 EXPORT_SYMBOL(tcp_v4_destroy_sock); 1836 1837 #ifdef CONFIG_PROC_FS 1838 /* Proc filesystem TCP sock list dumping. */ 1839 1840 /* 1841 * Get next listener socket follow cur. If cur is NULL, get first socket 1842 * starting from bucket given in st->bucket; when st->bucket is zero the 1843 * very first socket in the hash table is returned. 1844 */ 1845 static void *listening_get_next(struct seq_file *seq, void *cur) 1846 { 1847 struct inet_connection_sock *icsk; 1848 struct hlist_nulls_node *node; 1849 struct sock *sk = cur; 1850 struct inet_listen_hashbucket *ilb; 1851 struct tcp_iter_state *st = seq->private; 1852 struct net *net = seq_file_net(seq); 1853 1854 if (!sk) { 1855 ilb = &tcp_hashinfo.listening_hash[st->bucket]; 1856 spin_lock_bh(&ilb->lock); 1857 sk = sk_nulls_head(&ilb->head); 1858 st->offset = 0; 1859 goto get_sk; 1860 } 1861 ilb = &tcp_hashinfo.listening_hash[st->bucket]; 1862 ++st->num; 1863 ++st->offset; 1864 1865 sk = sk_nulls_next(sk); 1866 get_sk: 1867 sk_nulls_for_each_from(sk, node) { 1868 if (!net_eq(sock_net(sk), net)) 1869 continue; 1870 if (sk->sk_family == st->family) { 1871 cur = sk; 1872 goto out; 1873 } 1874 icsk = inet_csk(sk); 1875 } 1876 spin_unlock_bh(&ilb->lock); 1877 st->offset = 0; 1878 if (++st->bucket < INET_LHTABLE_SIZE) { 1879 ilb = &tcp_hashinfo.listening_hash[st->bucket]; 1880 spin_lock_bh(&ilb->lock); 1881 sk = sk_nulls_head(&ilb->head); 1882 goto get_sk; 1883 } 1884 cur = NULL; 1885 out: 1886 return cur; 1887 } 1888 1889 static void *listening_get_idx(struct seq_file *seq, loff_t *pos) 1890 { 1891 struct tcp_iter_state *st = seq->private; 1892 void *rc; 1893 1894 st->bucket = 0; 1895 st->offset = 0; 1896 rc = listening_get_next(seq, NULL); 1897 1898 while (rc && *pos) { 1899 rc = listening_get_next(seq, rc); 1900 --*pos; 1901 } 1902 return rc; 1903 } 1904 1905 static inline bool empty_bucket(const struct tcp_iter_state *st) 1906 { 1907 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain); 1908 } 1909 1910 /* 1911 * Get first established socket starting from bucket given in st->bucket. 1912 * If st->bucket is zero, the very first socket in the hash is returned. 1913 */ 1914 static void *established_get_first(struct seq_file *seq) 1915 { 1916 struct tcp_iter_state *st = seq->private; 1917 struct net *net = seq_file_net(seq); 1918 void *rc = NULL; 1919 1920 st->offset = 0; 1921 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { 1922 struct sock *sk; 1923 struct hlist_nulls_node *node; 1924 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); 1925 1926 /* Lockless fast path for the common case of empty buckets */ 1927 if (empty_bucket(st)) 1928 continue; 1929 1930 spin_lock_bh(lock); 1931 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { 1932 if (sk->sk_family != st->family || 1933 !net_eq(sock_net(sk), net)) { 1934 continue; 1935 } 1936 rc = sk; 1937 goto out; 1938 } 1939 spin_unlock_bh(lock); 1940 } 1941 out: 1942 return rc; 1943 } 1944 1945 static void *established_get_next(struct seq_file *seq, void *cur) 1946 { 1947 struct sock *sk = cur; 1948 struct hlist_nulls_node *node; 1949 struct tcp_iter_state *st = seq->private; 1950 struct net *net = seq_file_net(seq); 1951 1952 ++st->num; 1953 ++st->offset; 1954 1955 sk = sk_nulls_next(sk); 1956 1957 sk_nulls_for_each_from(sk, node) { 1958 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) 1959 return sk; 1960 } 1961 1962 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 1963 ++st->bucket; 1964 return established_get_first(seq); 1965 } 1966 1967 static void *established_get_idx(struct seq_file *seq, loff_t pos) 1968 { 1969 struct tcp_iter_state *st = seq->private; 1970 void *rc; 1971 1972 st->bucket = 0; 1973 rc = established_get_first(seq); 1974 1975 while (rc && pos) { 1976 rc = established_get_next(seq, rc); 1977 --pos; 1978 } 1979 return rc; 1980 } 1981 1982 static void *tcp_get_idx(struct seq_file *seq, loff_t pos) 1983 { 1984 void *rc; 1985 struct tcp_iter_state *st = seq->private; 1986 1987 st->state = TCP_SEQ_STATE_LISTENING; 1988 rc = listening_get_idx(seq, &pos); 1989 1990 if (!rc) { 1991 st->state = TCP_SEQ_STATE_ESTABLISHED; 1992 rc = established_get_idx(seq, pos); 1993 } 1994 1995 return rc; 1996 } 1997 1998 static void *tcp_seek_last_pos(struct seq_file *seq) 1999 { 2000 struct tcp_iter_state *st = seq->private; 2001 int offset = st->offset; 2002 int orig_num = st->num; 2003 void *rc = NULL; 2004 2005 switch (st->state) { 2006 case TCP_SEQ_STATE_LISTENING: 2007 if (st->bucket >= INET_LHTABLE_SIZE) 2008 break; 2009 st->state = TCP_SEQ_STATE_LISTENING; 2010 rc = listening_get_next(seq, NULL); 2011 while (offset-- && rc) 2012 rc = listening_get_next(seq, rc); 2013 if (rc) 2014 break; 2015 st->bucket = 0; 2016 st->state = TCP_SEQ_STATE_ESTABLISHED; 2017 /* Fallthrough */ 2018 case TCP_SEQ_STATE_ESTABLISHED: 2019 if (st->bucket > tcp_hashinfo.ehash_mask) 2020 break; 2021 rc = established_get_first(seq); 2022 while (offset-- && rc) 2023 rc = established_get_next(seq, rc); 2024 } 2025 2026 st->num = orig_num; 2027 2028 return rc; 2029 } 2030 2031 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) 2032 { 2033 struct tcp_iter_state *st = seq->private; 2034 void *rc; 2035 2036 if (*pos && *pos == st->last_pos) { 2037 rc = tcp_seek_last_pos(seq); 2038 if (rc) 2039 goto out; 2040 } 2041 2042 st->state = TCP_SEQ_STATE_LISTENING; 2043 st->num = 0; 2044 st->bucket = 0; 2045 st->offset = 0; 2046 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2047 2048 out: 2049 st->last_pos = *pos; 2050 return rc; 2051 } 2052 2053 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2054 { 2055 struct tcp_iter_state *st = seq->private; 2056 void *rc = NULL; 2057 2058 if (v == SEQ_START_TOKEN) { 2059 rc = tcp_get_idx(seq, 0); 2060 goto out; 2061 } 2062 2063 switch (st->state) { 2064 case TCP_SEQ_STATE_LISTENING: 2065 rc = listening_get_next(seq, v); 2066 if (!rc) { 2067 st->state = TCP_SEQ_STATE_ESTABLISHED; 2068 st->bucket = 0; 2069 st->offset = 0; 2070 rc = established_get_first(seq); 2071 } 2072 break; 2073 case TCP_SEQ_STATE_ESTABLISHED: 2074 rc = established_get_next(seq, v); 2075 break; 2076 } 2077 out: 2078 ++*pos; 2079 st->last_pos = *pos; 2080 return rc; 2081 } 2082 2083 static void tcp_seq_stop(struct seq_file *seq, void *v) 2084 { 2085 struct tcp_iter_state *st = seq->private; 2086 2087 switch (st->state) { 2088 case TCP_SEQ_STATE_LISTENING: 2089 if (v != SEQ_START_TOKEN) 2090 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock); 2091 break; 2092 case TCP_SEQ_STATE_ESTABLISHED: 2093 if (v) 2094 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2095 break; 2096 } 2097 } 2098 2099 int tcp_seq_open(struct inode *inode, struct file *file) 2100 { 2101 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode); 2102 struct tcp_iter_state *s; 2103 int err; 2104 2105 err = seq_open_net(inode, file, &afinfo->seq_ops, 2106 sizeof(struct tcp_iter_state)); 2107 if (err < 0) 2108 return err; 2109 2110 s = ((struct seq_file *)file->private_data)->private; 2111 s->family = afinfo->family; 2112 s->last_pos = 0; 2113 return 0; 2114 } 2115 EXPORT_SYMBOL(tcp_seq_open); 2116 2117 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo) 2118 { 2119 int rc = 0; 2120 struct proc_dir_entry *p; 2121 2122 afinfo->seq_ops.start = tcp_seq_start; 2123 afinfo->seq_ops.next = tcp_seq_next; 2124 afinfo->seq_ops.stop = tcp_seq_stop; 2125 2126 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, 2127 afinfo->seq_fops, afinfo); 2128 if (!p) 2129 rc = -ENOMEM; 2130 return rc; 2131 } 2132 EXPORT_SYMBOL(tcp_proc_register); 2133 2134 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) 2135 { 2136 remove_proc_entry(afinfo->name, net->proc_net); 2137 } 2138 EXPORT_SYMBOL(tcp_proc_unregister); 2139 2140 static void get_openreq4(const struct request_sock *req, 2141 struct seq_file *f, int i) 2142 { 2143 const struct inet_request_sock *ireq = inet_rsk(req); 2144 long delta = req->rsk_timer.expires - jiffies; 2145 2146 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2147 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK", 2148 i, 2149 ireq->ir_loc_addr, 2150 ireq->ir_num, 2151 ireq->ir_rmt_addr, 2152 ntohs(ireq->ir_rmt_port), 2153 TCP_SYN_RECV, 2154 0, 0, /* could print option size, but that is af dependent. */ 2155 1, /* timers active (only the expire timer) */ 2156 jiffies_delta_to_clock_t(delta), 2157 req->num_timeout, 2158 from_kuid_munged(seq_user_ns(f), 2159 sock_i_uid(req->rsk_listener)), 2160 0, /* non standard timer */ 2161 0, /* open_requests have no inode */ 2162 0, 2163 req); 2164 } 2165 2166 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) 2167 { 2168 int timer_active; 2169 unsigned long timer_expires; 2170 const struct tcp_sock *tp = tcp_sk(sk); 2171 const struct inet_connection_sock *icsk = inet_csk(sk); 2172 const struct inet_sock *inet = inet_sk(sk); 2173 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; 2174 __be32 dest = inet->inet_daddr; 2175 __be32 src = inet->inet_rcv_saddr; 2176 __u16 destp = ntohs(inet->inet_dport); 2177 __u16 srcp = ntohs(inet->inet_sport); 2178 int rx_queue; 2179 int state; 2180 2181 if (icsk->icsk_pending == ICSK_TIME_RETRANS || 2182 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 2183 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 2184 timer_active = 1; 2185 timer_expires = icsk->icsk_timeout; 2186 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 2187 timer_active = 4; 2188 timer_expires = icsk->icsk_timeout; 2189 } else if (timer_pending(&sk->sk_timer)) { 2190 timer_active = 2; 2191 timer_expires = sk->sk_timer.expires; 2192 } else { 2193 timer_active = 0; 2194 timer_expires = jiffies; 2195 } 2196 2197 state = sk_state_load(sk); 2198 if (state == TCP_LISTEN) 2199 rx_queue = sk->sk_ack_backlog; 2200 else 2201 /* Because we don't lock the socket, 2202 * we might find a transient negative value. 2203 */ 2204 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 2205 2206 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2207 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d", 2208 i, src, srcp, dest, destp, state, 2209 tp->write_seq - tp->snd_una, 2210 rx_queue, 2211 timer_active, 2212 jiffies_delta_to_clock_t(timer_expires - jiffies), 2213 icsk->icsk_retransmits, 2214 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), 2215 icsk->icsk_probes_out, 2216 sock_i_ino(sk), 2217 atomic_read(&sk->sk_refcnt), sk, 2218 jiffies_to_clock_t(icsk->icsk_rto), 2219 jiffies_to_clock_t(icsk->icsk_ack.ato), 2220 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 2221 tp->snd_cwnd, 2222 state == TCP_LISTEN ? 2223 fastopenq->max_qlen : 2224 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); 2225 } 2226 2227 static void get_timewait4_sock(const struct inet_timewait_sock *tw, 2228 struct seq_file *f, int i) 2229 { 2230 long delta = tw->tw_timer.expires - jiffies; 2231 __be32 dest, src; 2232 __u16 destp, srcp; 2233 2234 dest = tw->tw_daddr; 2235 src = tw->tw_rcv_saddr; 2236 destp = ntohs(tw->tw_dport); 2237 srcp = ntohs(tw->tw_sport); 2238 2239 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2240 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK", 2241 i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 2242 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, 2243 atomic_read(&tw->tw_refcnt), tw); 2244 } 2245 2246 #define TMPSZ 150 2247 2248 static int tcp4_seq_show(struct seq_file *seq, void *v) 2249 { 2250 struct tcp_iter_state *st; 2251 struct sock *sk = v; 2252 2253 seq_setwidth(seq, TMPSZ - 1); 2254 if (v == SEQ_START_TOKEN) { 2255 seq_puts(seq, " sl local_address rem_address st tx_queue " 2256 "rx_queue tr tm->when retrnsmt uid timeout " 2257 "inode"); 2258 goto out; 2259 } 2260 st = seq->private; 2261 2262 if (sk->sk_state == TCP_TIME_WAIT) 2263 get_timewait4_sock(v, seq, st->num); 2264 else if (sk->sk_state == TCP_NEW_SYN_RECV) 2265 get_openreq4(v, seq, st->num); 2266 else 2267 get_tcp4_sock(v, seq, st->num); 2268 out: 2269 seq_pad(seq, '\n'); 2270 return 0; 2271 } 2272 2273 static const struct file_operations tcp_afinfo_seq_fops = { 2274 .owner = THIS_MODULE, 2275 .open = tcp_seq_open, 2276 .read = seq_read, 2277 .llseek = seq_lseek, 2278 .release = seq_release_net 2279 }; 2280 2281 static struct tcp_seq_afinfo tcp4_seq_afinfo = { 2282 .name = "tcp", 2283 .family = AF_INET, 2284 .seq_fops = &tcp_afinfo_seq_fops, 2285 .seq_ops = { 2286 .show = tcp4_seq_show, 2287 }, 2288 }; 2289 2290 static int __net_init tcp4_proc_init_net(struct net *net) 2291 { 2292 return tcp_proc_register(net, &tcp4_seq_afinfo); 2293 } 2294 2295 static void __net_exit tcp4_proc_exit_net(struct net *net) 2296 { 2297 tcp_proc_unregister(net, &tcp4_seq_afinfo); 2298 } 2299 2300 static struct pernet_operations tcp4_net_ops = { 2301 .init = tcp4_proc_init_net, 2302 .exit = tcp4_proc_exit_net, 2303 }; 2304 2305 int __init tcp4_proc_init(void) 2306 { 2307 return register_pernet_subsys(&tcp4_net_ops); 2308 } 2309 2310 void tcp4_proc_exit(void) 2311 { 2312 unregister_pernet_subsys(&tcp4_net_ops); 2313 } 2314 #endif /* CONFIG_PROC_FS */ 2315 2316 struct proto tcp_prot = { 2317 .name = "TCP", 2318 .owner = THIS_MODULE, 2319 .close = tcp_close, 2320 .connect = tcp_v4_connect, 2321 .disconnect = tcp_disconnect, 2322 .accept = inet_csk_accept, 2323 .ioctl = tcp_ioctl, 2324 .init = tcp_v4_init_sock, 2325 .destroy = tcp_v4_destroy_sock, 2326 .shutdown = tcp_shutdown, 2327 .setsockopt = tcp_setsockopt, 2328 .getsockopt = tcp_getsockopt, 2329 .recvmsg = tcp_recvmsg, 2330 .sendmsg = tcp_sendmsg, 2331 .sendpage = tcp_sendpage, 2332 .backlog_rcv = tcp_v4_do_rcv, 2333 .release_cb = tcp_release_cb, 2334 .hash = inet_hash, 2335 .unhash = inet_unhash, 2336 .get_port = inet_csk_get_port, 2337 .enter_memory_pressure = tcp_enter_memory_pressure, 2338 .stream_memory_free = tcp_stream_memory_free, 2339 .sockets_allocated = &tcp_sockets_allocated, 2340 .orphan_count = &tcp_orphan_count, 2341 .memory_allocated = &tcp_memory_allocated, 2342 .memory_pressure = &tcp_memory_pressure, 2343 .sysctl_mem = sysctl_tcp_mem, 2344 .sysctl_wmem = sysctl_tcp_wmem, 2345 .sysctl_rmem = sysctl_tcp_rmem, 2346 .max_header = MAX_TCP_HEADER, 2347 .obj_size = sizeof(struct tcp_sock), 2348 .slab_flags = SLAB_DESTROY_BY_RCU, 2349 .twsk_prot = &tcp_timewait_sock_ops, 2350 .rsk_prot = &tcp_request_sock_ops, 2351 .h.hashinfo = &tcp_hashinfo, 2352 .no_autobind = true, 2353 #ifdef CONFIG_COMPAT 2354 .compat_setsockopt = compat_tcp_setsockopt, 2355 .compat_getsockopt = compat_tcp_getsockopt, 2356 #endif 2357 .diag_destroy = tcp_abort, 2358 }; 2359 EXPORT_SYMBOL(tcp_prot); 2360 2361 static void __net_exit tcp_sk_exit(struct net *net) 2362 { 2363 int cpu; 2364 2365 for_each_possible_cpu(cpu) 2366 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); 2367 free_percpu(net->ipv4.tcp_sk); 2368 } 2369 2370 static int __net_init tcp_sk_init(struct net *net) 2371 { 2372 int res, cpu; 2373 2374 net->ipv4.tcp_sk = alloc_percpu(struct sock *); 2375 if (!net->ipv4.tcp_sk) 2376 return -ENOMEM; 2377 2378 for_each_possible_cpu(cpu) { 2379 struct sock *sk; 2380 2381 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, 2382 IPPROTO_TCP, net); 2383 if (res) 2384 goto fail; 2385 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; 2386 } 2387 2388 net->ipv4.sysctl_tcp_ecn = 2; 2389 net->ipv4.sysctl_tcp_ecn_fallback = 1; 2390 2391 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; 2392 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; 2393 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; 2394 2395 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME; 2396 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES; 2397 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL; 2398 2399 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES; 2400 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES; 2401 net->ipv4.sysctl_tcp_syncookies = 1; 2402 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH; 2403 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1; 2404 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2; 2405 net->ipv4.sysctl_tcp_orphan_retries = 0; 2406 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; 2407 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; 2408 2409 return 0; 2410 fail: 2411 tcp_sk_exit(net); 2412 2413 return res; 2414 } 2415 2416 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) 2417 { 2418 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET); 2419 } 2420 2421 static struct pernet_operations __net_initdata tcp_sk_ops = { 2422 .init = tcp_sk_init, 2423 .exit = tcp_sk_exit, 2424 .exit_batch = tcp_sk_exit_batch, 2425 }; 2426 2427 void __init tcp_v4_init(void) 2428 { 2429 inet_hashinfo_init(&tcp_hashinfo); 2430 if (register_pernet_subsys(&tcp_sk_ops)) 2431 panic("Failed to create the TCP control socket.\n"); 2432 } 2433