17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 57c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 67c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 77c478bd9Sstevel@tonic-gate * with the License. 87c478bd9Sstevel@tonic-gate * 97c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 107c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 117c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 127c478bd9Sstevel@tonic-gate * and limitations under the License. 137c478bd9Sstevel@tonic-gate * 147c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 157c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 167c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 177c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 187c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 197c478bd9Sstevel@tonic-gate * 207c478bd9Sstevel@tonic-gate * CDDL HEADER END 217c478bd9Sstevel@tonic-gate */ 227c478bd9Sstevel@tonic-gate /* 23*53391bafSeota * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate * 267c478bd9Sstevel@tonic-gate * tcp.c, Code implementing the TCP protocol. 277c478bd9Sstevel@tonic-gate */ 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 307c478bd9Sstevel@tonic-gate 317c478bd9Sstevel@tonic-gate #include <sys/types.h> 327c478bd9Sstevel@tonic-gate #include <socket_impl.h> 337c478bd9Sstevel@tonic-gate #include <socket_inet.h> 347c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 357c478bd9Sstevel@tonic-gate #include <sys/promif.h> 367c478bd9Sstevel@tonic-gate #include <sys/socket.h> 377c478bd9Sstevel@tonic-gate #include <netinet/in_systm.h> 387c478bd9Sstevel@tonic-gate #include <netinet/in.h> 397c478bd9Sstevel@tonic-gate #include <netinet/ip.h> 407c478bd9Sstevel@tonic-gate #include <netinet/tcp.h> 417c478bd9Sstevel@tonic-gate #include <net/if_types.h> 427c478bd9Sstevel@tonic-gate #include <sys/salib.h> 437c478bd9Sstevel@tonic-gate 447c478bd9Sstevel@tonic-gate #include "ipv4.h" 457c478bd9Sstevel@tonic-gate #include "ipv4_impl.h" 467c478bd9Sstevel@tonic-gate #include "mac.h" 477c478bd9Sstevel@tonic-gate #include "mac_impl.h" 487c478bd9Sstevel@tonic-gate #include "v4_sum_impl.h" 497c478bd9Sstevel@tonic-gate #include <sys/bootdebug.h> 507c478bd9Sstevel@tonic-gate #include "tcp_inet.h" 517c478bd9Sstevel@tonic-gate #include "tcp_sack.h" 527c478bd9Sstevel@tonic-gate #include <inet/common.h> 537c478bd9Sstevel@tonic-gate #include <inet/mib2.h> 547c478bd9Sstevel@tonic-gate 557c478bd9Sstevel@tonic-gate /* 567c478bd9Sstevel@tonic-gate * We need to redefine BUMP_MIB/UPDATE_MIB to not have DTrace probes. 577c478bd9Sstevel@tonic-gate */ 587c478bd9Sstevel@tonic-gate #undef BUMP_MIB 597c478bd9Sstevel@tonic-gate #define BUMP_MIB(x) (x)++ 607c478bd9Sstevel@tonic-gate 617c478bd9Sstevel@tonic-gate #undef UPDATE_MIB 627c478bd9Sstevel@tonic-gate #define UPDATE_MIB(x, y) x += y 637c478bd9Sstevel@tonic-gate 647c478bd9Sstevel@tonic-gate /* 657c478bd9Sstevel@tonic-gate * MIB-2 stuff for SNMP 667c478bd9Sstevel@tonic-gate */ 677c478bd9Sstevel@tonic-gate mib2_tcp_t tcp_mib; /* SNMP fixed size info */ 687c478bd9Sstevel@tonic-gate 697c478bd9Sstevel@tonic-gate /* The TCP mib does not include the following errors. */ 707c478bd9Sstevel@tonic-gate static uint_t tcp_cksum_errors; 717c478bd9Sstevel@tonic-gate static uint_t tcp_drops; 727c478bd9Sstevel@tonic-gate 737c478bd9Sstevel@tonic-gate /* Macros for timestamp comparisons */ 747c478bd9Sstevel@tonic-gate #define TSTMP_GEQ(a, b) ((int32_t)((a)-(b)) >= 0) 757c478bd9Sstevel@tonic-gate #define TSTMP_LT(a, b) ((int32_t)((a)-(b)) < 0) 767c478bd9Sstevel@tonic-gate 777c478bd9Sstevel@tonic-gate /* 787c478bd9Sstevel@tonic-gate * Parameters for TCP Initial Send Sequence number (ISS) generation. 797c478bd9Sstevel@tonic-gate * The ISS is calculated by adding three components: a time component 807c478bd9Sstevel@tonic-gate * which grows by 1 every 4096 nanoseconds (versus every 4 microseconds 817c478bd9Sstevel@tonic-gate * suggested by RFC 793, page 27); 827c478bd9Sstevel@tonic-gate * a per-connection component which grows by 125000 for every new connection; 837c478bd9Sstevel@tonic-gate * and an "extra" component that grows by a random amount centered 847c478bd9Sstevel@tonic-gate * approximately on 64000. This causes the the ISS generator to cycle every 857c478bd9Sstevel@tonic-gate * 4.89 hours if no TCP connections are made, and faster if connections are 867c478bd9Sstevel@tonic-gate * made. 877c478bd9Sstevel@tonic-gate */ 887c478bd9Sstevel@tonic-gate #define ISS_INCR 250000 897c478bd9Sstevel@tonic-gate #define ISS_NSEC_SHT 0 907c478bd9Sstevel@tonic-gate 917c478bd9Sstevel@tonic-gate static uint32_t tcp_iss_incr_extra; /* Incremented for each connection */ 927c478bd9Sstevel@tonic-gate 937c478bd9Sstevel@tonic-gate #define TCP_XMIT_LOWATER 4096 947c478bd9Sstevel@tonic-gate #define TCP_XMIT_HIWATER 49152 957c478bd9Sstevel@tonic-gate #define TCP_RECV_LOWATER 2048 967c478bd9Sstevel@tonic-gate #define TCP_RECV_HIWATER 49152 977c478bd9Sstevel@tonic-gate 987c478bd9Sstevel@tonic-gate /* 997c478bd9Sstevel@tonic-gate * PAWS needs a timer for 24 days. This is the number of ms in 24 days 1007c478bd9Sstevel@tonic-gate */ 1017c478bd9Sstevel@tonic-gate #define PAWS_TIMEOUT ((uint32_t)(24*24*60*60*1000)) 1027c478bd9Sstevel@tonic-gate 1037c478bd9Sstevel@tonic-gate /* 1047c478bd9Sstevel@tonic-gate * TCP options struct returned from tcp_parse_options. 1057c478bd9Sstevel@tonic-gate */ 1067c478bd9Sstevel@tonic-gate typedef struct tcp_opt_s { 1077c478bd9Sstevel@tonic-gate uint32_t tcp_opt_mss; 1087c478bd9Sstevel@tonic-gate uint32_t tcp_opt_wscale; 1097c478bd9Sstevel@tonic-gate uint32_t tcp_opt_ts_val; 1107c478bd9Sstevel@tonic-gate uint32_t tcp_opt_ts_ecr; 1117c478bd9Sstevel@tonic-gate tcp_t *tcp; 1127c478bd9Sstevel@tonic-gate } tcp_opt_t; 1137c478bd9Sstevel@tonic-gate 1147c478bd9Sstevel@tonic-gate /* 1157c478bd9Sstevel@tonic-gate * RFC1323-recommended phrasing of TSTAMP option, for easier parsing 1167c478bd9Sstevel@tonic-gate */ 1177c478bd9Sstevel@tonic-gate 1187c478bd9Sstevel@tonic-gate #ifdef _BIG_ENDIAN 1197c478bd9Sstevel@tonic-gate #define TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 1207c478bd9Sstevel@tonic-gate (TCPOPT_TSTAMP << 8) | 10) 1217c478bd9Sstevel@tonic-gate #else 1227c478bd9Sstevel@tonic-gate #define TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \ 1237c478bd9Sstevel@tonic-gate (TCPOPT_NOP << 8) | TCPOPT_NOP) 1247c478bd9Sstevel@tonic-gate #endif 1257c478bd9Sstevel@tonic-gate 1267c478bd9Sstevel@tonic-gate /* 1277c478bd9Sstevel@tonic-gate * Flags returned from tcp_parse_options. 1287c478bd9Sstevel@tonic-gate */ 1297c478bd9Sstevel@tonic-gate #define TCP_OPT_MSS_PRESENT 1 1307c478bd9Sstevel@tonic-gate #define TCP_OPT_WSCALE_PRESENT 2 1317c478bd9Sstevel@tonic-gate #define TCP_OPT_TSTAMP_PRESENT 4 1327c478bd9Sstevel@tonic-gate #define TCP_OPT_SACK_OK_PRESENT 8 1337c478bd9Sstevel@tonic-gate #define TCP_OPT_SACK_PRESENT 16 1347c478bd9Sstevel@tonic-gate 1357c478bd9Sstevel@tonic-gate /* TCP option length */ 1367c478bd9Sstevel@tonic-gate #define TCPOPT_NOP_LEN 1 1377c478bd9Sstevel@tonic-gate #define TCPOPT_MAXSEG_LEN 4 1387c478bd9Sstevel@tonic-gate #define TCPOPT_WS_LEN 3 1397c478bd9Sstevel@tonic-gate #define TCPOPT_REAL_WS_LEN (TCPOPT_WS_LEN+1) 1407c478bd9Sstevel@tonic-gate #define TCPOPT_TSTAMP_LEN 10 1417c478bd9Sstevel@tonic-gate #define TCPOPT_REAL_TS_LEN (TCPOPT_TSTAMP_LEN+2) 1427c478bd9Sstevel@tonic-gate #define TCPOPT_SACK_OK_LEN 2 1437c478bd9Sstevel@tonic-gate #define TCPOPT_REAL_SACK_OK_LEN (TCPOPT_SACK_OK_LEN+2) 1447c478bd9Sstevel@tonic-gate #define TCPOPT_REAL_SACK_LEN 4 1457c478bd9Sstevel@tonic-gate #define TCPOPT_MAX_SACK_LEN 36 1467c478bd9Sstevel@tonic-gate #define TCPOPT_HEADER_LEN 2 1477c478bd9Sstevel@tonic-gate 1487c478bd9Sstevel@tonic-gate /* TCP cwnd burst factor. */ 1497c478bd9Sstevel@tonic-gate #define TCP_CWND_INFINITE 65535 1507c478bd9Sstevel@tonic-gate #define TCP_CWND_SS 3 1517c478bd9Sstevel@tonic-gate #define TCP_CWND_NORMAL 5 1527c478bd9Sstevel@tonic-gate 1537c478bd9Sstevel@tonic-gate /* Named Dispatch Parameter Management Structure */ 1547c478bd9Sstevel@tonic-gate typedef struct tcpparam_s { 1557c478bd9Sstevel@tonic-gate uint32_t tcp_param_min; 1567c478bd9Sstevel@tonic-gate uint32_t tcp_param_max; 1577c478bd9Sstevel@tonic-gate uint32_t tcp_param_val; 1587c478bd9Sstevel@tonic-gate char *tcp_param_name; 1597c478bd9Sstevel@tonic-gate } tcpparam_t; 1607c478bd9Sstevel@tonic-gate 1617c478bd9Sstevel@tonic-gate /* Max size IP datagram is 64k - 1 */ 1627c478bd9Sstevel@tonic-gate #define TCP_MSS_MAX_IPV4 (IP_MAXPACKET - (sizeof (struct ip) + \ 1637c478bd9Sstevel@tonic-gate sizeof (tcph_t))) 1647c478bd9Sstevel@tonic-gate 1657c478bd9Sstevel@tonic-gate /* Max of the above */ 1667c478bd9Sstevel@tonic-gate #define TCP_MSS_MAX TCP_MSS_MAX_IPV4 1677c478bd9Sstevel@tonic-gate 1687c478bd9Sstevel@tonic-gate /* Largest TCP port number */ 1697c478bd9Sstevel@tonic-gate #define TCP_MAX_PORT (64 * 1024 - 1) 1707c478bd9Sstevel@tonic-gate 1717c478bd9Sstevel@tonic-gate /* Round up the value to the nearest mss. */ 1727c478bd9Sstevel@tonic-gate #define MSS_ROUNDUP(value, mss) ((((value) - 1) / (mss) + 1) * (mss)) 1737c478bd9Sstevel@tonic-gate 1747c478bd9Sstevel@tonic-gate #define MS 1L 1757c478bd9Sstevel@tonic-gate #define SECONDS (1000 * MS) 1767c478bd9Sstevel@tonic-gate #define MINUTES (60 * SECONDS) 1777c478bd9Sstevel@tonic-gate #define HOURS (60 * MINUTES) 1787c478bd9Sstevel@tonic-gate #define DAYS (24 * HOURS) 1797c478bd9Sstevel@tonic-gate 1807c478bd9Sstevel@tonic-gate /* All NDD params in the core TCP became static variables. */ 1817c478bd9Sstevel@tonic-gate static int tcp_time_wait_interval = 1 * MINUTES; 1827c478bd9Sstevel@tonic-gate static int tcp_conn_req_max_q = 128; 1837c478bd9Sstevel@tonic-gate static int tcp_conn_req_max_q0 = 1024; 1847c478bd9Sstevel@tonic-gate static int tcp_conn_req_min = 1; 1857c478bd9Sstevel@tonic-gate static int tcp_conn_grace_period = 0 * SECONDS; 1867c478bd9Sstevel@tonic-gate static int tcp_cwnd_max_ = 1024 * 1024; 1877c478bd9Sstevel@tonic-gate static int tcp_smallest_nonpriv_port = 1024; 1887c478bd9Sstevel@tonic-gate static int tcp_ip_abort_cinterval = 3 * MINUTES; 1897c478bd9Sstevel@tonic-gate static int tcp_ip_abort_linterval = 3 * MINUTES; 1907c478bd9Sstevel@tonic-gate static int tcp_ip_abort_interval = 8 * MINUTES; 1917c478bd9Sstevel@tonic-gate static int tcp_ip_notify_cinterval = 10 * SECONDS; 1927c478bd9Sstevel@tonic-gate static int tcp_ip_notify_interval = 10 * SECONDS; 1937c478bd9Sstevel@tonic-gate static int tcp_ipv4_ttl = 64; 1947c478bd9Sstevel@tonic-gate static int tcp_mss_def_ipv4 = 536; 1957c478bd9Sstevel@tonic-gate static int tcp_mss_max_ipv4 = TCP_MSS_MAX_IPV4; 1967c478bd9Sstevel@tonic-gate static int tcp_mss_min = 108; 1977c478bd9Sstevel@tonic-gate static int tcp_naglim_def = (4*1024)-1; 1987c478bd9Sstevel@tonic-gate static int tcp_rexmit_interval_initial = 3 * SECONDS; 1997c478bd9Sstevel@tonic-gate static int tcp_rexmit_interval_max = 60 * SECONDS; 2007c478bd9Sstevel@tonic-gate static int tcp_rexmit_interval_min = 400 * MS; 2017c478bd9Sstevel@tonic-gate static int tcp_dupack_fast_retransmit = 3; 2027c478bd9Sstevel@tonic-gate static int tcp_smallest_anon_port = 32 * 1024; 2037c478bd9Sstevel@tonic-gate static int tcp_largest_anon_port = TCP_MAX_PORT; 2047c478bd9Sstevel@tonic-gate static int tcp_xmit_lowat = TCP_XMIT_LOWATER; 2057c478bd9Sstevel@tonic-gate static int tcp_recv_hiwat_minmss = 4; 2067c478bd9Sstevel@tonic-gate static int tcp_fin_wait_2_flush_interval = 1 * MINUTES; 2077c478bd9Sstevel@tonic-gate static int tcp_max_buf = 1024 * 1024; 2087c478bd9Sstevel@tonic-gate static int tcp_wscale_always = 1; 2097c478bd9Sstevel@tonic-gate static int tcp_tstamp_always = 1; 2107c478bd9Sstevel@tonic-gate static int tcp_tstamp_if_wscale = 1; 2117c478bd9Sstevel@tonic-gate static int tcp_rexmit_interval_extra = 0; 2127c478bd9Sstevel@tonic-gate static int tcp_slow_start_after_idle = 2; 2137c478bd9Sstevel@tonic-gate static int tcp_slow_start_initial = 2; 2147c478bd9Sstevel@tonic-gate static int tcp_sack_permitted = 2; 2157c478bd9Sstevel@tonic-gate static int tcp_ecn_permitted = 2; 2167c478bd9Sstevel@tonic-gate 2177c478bd9Sstevel@tonic-gate /* Extra room to fit in headers. */ 2187c478bd9Sstevel@tonic-gate static uint_t tcp_wroff_xtra; 2197c478bd9Sstevel@tonic-gate 2207c478bd9Sstevel@tonic-gate /* Hint for next port to try. */ 2217c478bd9Sstevel@tonic-gate static in_port_t tcp_next_port_to_try = 32*1024; 2227c478bd9Sstevel@tonic-gate 2237c478bd9Sstevel@tonic-gate /* 2247c478bd9Sstevel@tonic-gate * Figure out the value of window scale opton. Note that the rwnd is 2257c478bd9Sstevel@tonic-gate * ASSUMED to be rounded up to the nearest MSS before the calculation. 2267c478bd9Sstevel@tonic-gate * We cannot find the scale value and then do a round up of tcp_rwnd 2277c478bd9Sstevel@tonic-gate * because the scale value may not be correct after that. 2287c478bd9Sstevel@tonic-gate */ 2297c478bd9Sstevel@tonic-gate #define SET_WS_VALUE(tcp) \ 2307c478bd9Sstevel@tonic-gate { \ 2317c478bd9Sstevel@tonic-gate int i; \ 2327c478bd9Sstevel@tonic-gate uint32_t rwnd = (tcp)->tcp_rwnd; \ 2337c478bd9Sstevel@tonic-gate for (i = 0; rwnd > TCP_MAXWIN && i < TCP_MAX_WINSHIFT; \ 2347c478bd9Sstevel@tonic-gate i++, rwnd >>= 1) \ 2357c478bd9Sstevel@tonic-gate ; \ 2367c478bd9Sstevel@tonic-gate (tcp)->tcp_rcv_ws = i; \ 2377c478bd9Sstevel@tonic-gate } 2387c478bd9Sstevel@tonic-gate 2397c478bd9Sstevel@tonic-gate /* 2407c478bd9Sstevel@tonic-gate * Set ECN capable transport (ECT) code point in IP header. 2417c478bd9Sstevel@tonic-gate * 2427c478bd9Sstevel@tonic-gate * Note that there are 2 ECT code points '01' and '10', which are called 2437c478bd9Sstevel@tonic-gate * ECT(1) and ECT(0) respectively. Here we follow the original ECT code 2447c478bd9Sstevel@tonic-gate * point ECT(0) for TCP as described in RFC 2481. 2457c478bd9Sstevel@tonic-gate */ 2467c478bd9Sstevel@tonic-gate #define SET_ECT(tcp, iph) \ 2477c478bd9Sstevel@tonic-gate if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 2487c478bd9Sstevel@tonic-gate /* We need to clear the code point first. */ \ 2497c478bd9Sstevel@tonic-gate ((struct ip *)(iph))->ip_tos &= 0xFC; \ 2507c478bd9Sstevel@tonic-gate ((struct ip *)(iph))->ip_tos |= IPH_ECN_ECT0; \ 2517c478bd9Sstevel@tonic-gate } 2527c478bd9Sstevel@tonic-gate 2537c478bd9Sstevel@tonic-gate /* 2547c478bd9Sstevel@tonic-gate * The format argument to pass to tcp_display(). 2557c478bd9Sstevel@tonic-gate * DISP_PORT_ONLY means that the returned string has only port info. 2567c478bd9Sstevel@tonic-gate * DISP_ADDR_AND_PORT means that the returned string also contains the 2577c478bd9Sstevel@tonic-gate * remote and local IP address. 2587c478bd9Sstevel@tonic-gate */ 2597c478bd9Sstevel@tonic-gate #define DISP_PORT_ONLY 1 2607c478bd9Sstevel@tonic-gate #define DISP_ADDR_AND_PORT 2 2617c478bd9Sstevel@tonic-gate 2627c478bd9Sstevel@tonic-gate /* 2637c478bd9Sstevel@tonic-gate * TCP reassembly macros. We hide starting and ending sequence numbers in 2647c478bd9Sstevel@tonic-gate * b_next and b_prev of messages on the reassembly queue. The messages are 2657c478bd9Sstevel@tonic-gate * chained using b_cont. These macros are used in tcp_reass() so we don't 2667c478bd9Sstevel@tonic-gate * have to see the ugly casts and assignments. 267*53391bafSeota * Note. use uintptr_t to suppress the gcc warning. 2687c478bd9Sstevel@tonic-gate */ 269*53391bafSeota #define TCP_REASS_SEQ(mp) ((uint32_t)(uintptr_t)((mp)->b_next)) 270*53391bafSeota #define TCP_REASS_SET_SEQ(mp, u) ((mp)->b_next = \ 271*53391bafSeota (mblk_t *)((uintptr_t)(u))) 272*53391bafSeota #define TCP_REASS_END(mp) ((uint32_t)(uintptr_t)((mp)->b_prev)) 273*53391bafSeota #define TCP_REASS_SET_END(mp, u) ((mp)->b_prev = \ 274*53391bafSeota (mblk_t *)((uintptr_t)(u))) 2757c478bd9Sstevel@tonic-gate 2767c478bd9Sstevel@tonic-gate #define TCP_TIMER_RESTART(tcp, intvl) \ 2777c478bd9Sstevel@tonic-gate (tcp)->tcp_rto_timeout = prom_gettime() + intvl; \ 2787c478bd9Sstevel@tonic-gate (tcp)->tcp_timer_running = B_TRUE; 2797c478bd9Sstevel@tonic-gate 2807c478bd9Sstevel@tonic-gate static int tcp_accept_comm(tcp_t *, tcp_t *, mblk_t *, uint_t); 2817c478bd9Sstevel@tonic-gate static mblk_t *tcp_ack_mp(tcp_t *); 2827c478bd9Sstevel@tonic-gate static in_port_t tcp_bindi(in_port_t, in_addr_t *, boolean_t, boolean_t); 2837c478bd9Sstevel@tonic-gate static uint16_t tcp_cksum(uint16_t *, uint32_t); 2847c478bd9Sstevel@tonic-gate static void tcp_clean_death(int, tcp_t *, int err); 2857c478bd9Sstevel@tonic-gate static tcp_t *tcp_conn_request(tcp_t *, mblk_t *mp, uint_t, uint_t); 2867c478bd9Sstevel@tonic-gate static char *tcp_display(tcp_t *, char *, char); 2877c478bd9Sstevel@tonic-gate static int tcp_drain_input(tcp_t *, int, int); 2887c478bd9Sstevel@tonic-gate static void tcp_drain_needed(int, tcp_t *); 2897c478bd9Sstevel@tonic-gate static boolean_t tcp_drop_q0(tcp_t *); 2907c478bd9Sstevel@tonic-gate static mblk_t *tcp_get_seg_mp(tcp_t *, uint32_t, int32_t *); 2917c478bd9Sstevel@tonic-gate static int tcp_header_len(struct inetgram *); 2927c478bd9Sstevel@tonic-gate static in_port_t tcp_report_ports(uint16_t *, enum Ports); 2937c478bd9Sstevel@tonic-gate static int tcp_input(int); 2947c478bd9Sstevel@tonic-gate static void tcp_iss_init(tcp_t *); 2957c478bd9Sstevel@tonic-gate static tcp_t *tcp_lookup_ipv4(struct ip *, tcpha_t *, int, int *); 2967c478bd9Sstevel@tonic-gate static tcp_t *tcp_lookup_listener_ipv4(in_addr_t, in_port_t, int *); 2977c478bd9Sstevel@tonic-gate static int tcp_conn_check(tcp_t *); 2987c478bd9Sstevel@tonic-gate static int tcp_close(int); 2997c478bd9Sstevel@tonic-gate static void tcp_close_detached(tcp_t *); 3007c478bd9Sstevel@tonic-gate static void tcp_eager_cleanup(tcp_t *, boolean_t, int); 3017c478bd9Sstevel@tonic-gate static void tcp_eager_unlink(tcp_t *); 3027c478bd9Sstevel@tonic-gate static void tcp_free(tcp_t *); 3037c478bd9Sstevel@tonic-gate static int tcp_header_init_ipv4(tcp_t *); 3047c478bd9Sstevel@tonic-gate static void tcp_mss_set(tcp_t *, uint32_t); 3057c478bd9Sstevel@tonic-gate static int tcp_parse_options(tcph_t *, tcp_opt_t *); 3067c478bd9Sstevel@tonic-gate static boolean_t tcp_paws_check(tcp_t *, tcph_t *, tcp_opt_t *); 3077c478bd9Sstevel@tonic-gate static void tcp_process_options(tcp_t *, tcph_t *); 3087c478bd9Sstevel@tonic-gate static int tcp_random(void); 3097c478bd9Sstevel@tonic-gate static void tcp_random_init(void); 3107c478bd9Sstevel@tonic-gate static mblk_t *tcp_reass(tcp_t *, mblk_t *, uint32_t); 3117c478bd9Sstevel@tonic-gate static void tcp_reass_elim_overlap(tcp_t *, mblk_t *); 3127c478bd9Sstevel@tonic-gate static void tcp_rcv_drain(int sock_id, tcp_t *); 3137c478bd9Sstevel@tonic-gate static void tcp_rcv_enqueue(tcp_t *, mblk_t *, uint_t); 3147c478bd9Sstevel@tonic-gate static void tcp_rput_data(tcp_t *, mblk_t *, int); 3157c478bd9Sstevel@tonic-gate static int tcp_rwnd_set(tcp_t *, uint32_t); 3167c478bd9Sstevel@tonic-gate static int32_t tcp_sack_rxmit(tcp_t *, int); 3177c478bd9Sstevel@tonic-gate static void tcp_set_cksum(mblk_t *); 3187c478bd9Sstevel@tonic-gate static void tcp_set_rto(tcp_t *, int32_t); 3197c478bd9Sstevel@tonic-gate static void tcp_ss_rexmit(tcp_t *, int); 3207c478bd9Sstevel@tonic-gate static int tcp_state_wait(int, tcp_t *, int); 3217c478bd9Sstevel@tonic-gate static void tcp_timer(tcp_t *, int); 3227c478bd9Sstevel@tonic-gate static void tcp_time_wait_append(tcp_t *); 3237c478bd9Sstevel@tonic-gate static void tcp_time_wait_collector(void); 3247c478bd9Sstevel@tonic-gate static void tcp_time_wait_processing(tcp_t *, mblk_t *, uint32_t, 3257c478bd9Sstevel@tonic-gate uint32_t, int, tcph_t *, int sock_id); 3267c478bd9Sstevel@tonic-gate static void tcp_time_wait_remove(tcp_t *); 3277c478bd9Sstevel@tonic-gate static in_port_t tcp_update_next_port(in_port_t); 3287c478bd9Sstevel@tonic-gate static int tcp_verify_cksum(mblk_t *); 3297c478bd9Sstevel@tonic-gate static void tcp_wput_data(tcp_t *, mblk_t *, int); 3307c478bd9Sstevel@tonic-gate static void tcp_xmit_ctl(char *, tcp_t *, mblk_t *, uint32_t, uint32_t, 3317c478bd9Sstevel@tonic-gate int, uint_t, int); 3327c478bd9Sstevel@tonic-gate static void tcp_xmit_early_reset(char *, int, mblk_t *, uint32_t, uint32_t, 3337c478bd9Sstevel@tonic-gate int, uint_t); 3347c478bd9Sstevel@tonic-gate static int tcp_xmit_end(tcp_t *, int); 3357c478bd9Sstevel@tonic-gate static void tcp_xmit_listeners_reset(int, mblk_t *, uint_t); 3367c478bd9Sstevel@tonic-gate static mblk_t *tcp_xmit_mp(tcp_t *, mblk_t *, int32_t, int32_t *, 3377c478bd9Sstevel@tonic-gate mblk_t **, uint32_t, boolean_t, uint32_t *, boolean_t); 3387c478bd9Sstevel@tonic-gate static int tcp_init_values(tcp_t *, struct inetboot_socket *); 3397c478bd9Sstevel@tonic-gate 3407c478bd9Sstevel@tonic-gate #if DEBUG > 1 3417c478bd9Sstevel@tonic-gate #define TCP_DUMP_PACKET(str, mp) \ 3427c478bd9Sstevel@tonic-gate { \ 3437c478bd9Sstevel@tonic-gate int len = (mp)->b_wptr - (mp)->b_rptr; \ 3447c478bd9Sstevel@tonic-gate \ 3457c478bd9Sstevel@tonic-gate printf("%s: dump TCP(%d): \n", (str), len); \ 3467c478bd9Sstevel@tonic-gate hexdump((char *)(mp)->b_rptr, len); \ 3477c478bd9Sstevel@tonic-gate } 3487c478bd9Sstevel@tonic-gate #else 3497c478bd9Sstevel@tonic-gate #define TCP_DUMP_PACKET(str, mp) 3507c478bd9Sstevel@tonic-gate #endif 3517c478bd9Sstevel@tonic-gate 3527c478bd9Sstevel@tonic-gate #ifdef DEBUG 3537c478bd9Sstevel@tonic-gate #define DEBUG_1(str, arg) printf(str, (arg)) 3547c478bd9Sstevel@tonic-gate #define DEBUG_2(str, arg1, arg2) printf(str, (arg1), (arg2)) 3557c478bd9Sstevel@tonic-gate #define DEBUG_3(str, arg1, arg2, arg3) printf(str, (arg1), (arg2), (arg3)) 3567c478bd9Sstevel@tonic-gate #else 3577c478bd9Sstevel@tonic-gate #define DEBUG_1(str, arg) 3587c478bd9Sstevel@tonic-gate #define DEBUG_2(str, arg1, arg2) 3597c478bd9Sstevel@tonic-gate #define DEBUG_3(str, arg1, arg2, arg3) 3607c478bd9Sstevel@tonic-gate #endif 3617c478bd9Sstevel@tonic-gate 3627c478bd9Sstevel@tonic-gate /* Whether it is the first time TCP is used. */ 3637c478bd9Sstevel@tonic-gate static boolean_t tcp_initialized = B_FALSE; 3647c478bd9Sstevel@tonic-gate 3657c478bd9Sstevel@tonic-gate /* TCP time wait list. */ 3667c478bd9Sstevel@tonic-gate static tcp_t *tcp_time_wait_head; 3677c478bd9Sstevel@tonic-gate static tcp_t *tcp_time_wait_tail; 3687c478bd9Sstevel@tonic-gate static uint32_t tcp_cum_timewait; 3697c478bd9Sstevel@tonic-gate /* When the tcp_time_wait_collector is run. */ 3707c478bd9Sstevel@tonic-gate static uint32_t tcp_time_wait_runtime; 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate #define TCP_RUN_TIME_WAIT_COLLECTOR() \ 3737c478bd9Sstevel@tonic-gate if (prom_gettime() > tcp_time_wait_runtime) \ 3747c478bd9Sstevel@tonic-gate tcp_time_wait_collector(); 3757c478bd9Sstevel@tonic-gate 3767c478bd9Sstevel@tonic-gate /* 3777c478bd9Sstevel@tonic-gate * Accept will return with an error if there is no connection coming in 3787c478bd9Sstevel@tonic-gate * after this (in ms). 3797c478bd9Sstevel@tonic-gate */ 3807c478bd9Sstevel@tonic-gate static int tcp_accept_timeout = 60000; 3817c478bd9Sstevel@tonic-gate 3827c478bd9Sstevel@tonic-gate /* 3837c478bd9Sstevel@tonic-gate * Initialize the TCP-specific parts of a socket. 3847c478bd9Sstevel@tonic-gate */ 3857c478bd9Sstevel@tonic-gate void 3867c478bd9Sstevel@tonic-gate tcp_socket_init(struct inetboot_socket *isp) 3877c478bd9Sstevel@tonic-gate { 3887c478bd9Sstevel@tonic-gate /* Do some initializations. */ 3897c478bd9Sstevel@tonic-gate if (!tcp_initialized) { 3907c478bd9Sstevel@tonic-gate tcp_random_init(); 3917c478bd9Sstevel@tonic-gate /* Extra head room for the MAC layer address. */ 3927c478bd9Sstevel@tonic-gate if ((tcp_wroff_xtra = mac_get_hdr_len()) & 0x3) { 3937c478bd9Sstevel@tonic-gate tcp_wroff_xtra = (tcp_wroff_xtra & ~0x3) + 0x4; 3947c478bd9Sstevel@tonic-gate } 3957c478bd9Sstevel@tonic-gate /* Schedule the first time wait cleanup time */ 3967c478bd9Sstevel@tonic-gate tcp_time_wait_runtime = prom_gettime() + tcp_time_wait_interval; 3977c478bd9Sstevel@tonic-gate tcp_initialized = B_TRUE; 3987c478bd9Sstevel@tonic-gate } 3997c478bd9Sstevel@tonic-gate TCP_RUN_TIME_WAIT_COLLECTOR(); 4007c478bd9Sstevel@tonic-gate 4017c478bd9Sstevel@tonic-gate isp->proto = IPPROTO_TCP; 4027c478bd9Sstevel@tonic-gate isp->input[TRANSPORT_LVL] = tcp_input; 4037c478bd9Sstevel@tonic-gate /* Socket layer should call tcp_send() directly. */ 4047c478bd9Sstevel@tonic-gate isp->output[TRANSPORT_LVL] = NULL; 4057c478bd9Sstevel@tonic-gate isp->close[TRANSPORT_LVL] = tcp_close; 4067c478bd9Sstevel@tonic-gate isp->headerlen[TRANSPORT_LVL] = tcp_header_len; 4077c478bd9Sstevel@tonic-gate isp->ports = tcp_report_ports; 4087c478bd9Sstevel@tonic-gate if ((isp->pcb = bkmem_alloc(sizeof (tcp_t))) == NULL) { 4097c478bd9Sstevel@tonic-gate errno = ENOBUFS; 4107c478bd9Sstevel@tonic-gate return; 4117c478bd9Sstevel@tonic-gate } 4127c478bd9Sstevel@tonic-gate if ((errno = tcp_init_values((tcp_t *)isp->pcb, isp)) != 0) { 4137c478bd9Sstevel@tonic-gate bkmem_free(isp->pcb, sizeof (tcp_t)); 4147c478bd9Sstevel@tonic-gate return; 4157c478bd9Sstevel@tonic-gate } 4167c478bd9Sstevel@tonic-gate /* 4177c478bd9Sstevel@tonic-gate * This is set last because this field is used to determine if 4187c478bd9Sstevel@tonic-gate * a socket is in use or not. 4197c478bd9Sstevel@tonic-gate */ 4207c478bd9Sstevel@tonic-gate isp->type = INETBOOT_STREAM; 4217c478bd9Sstevel@tonic-gate } 4227c478bd9Sstevel@tonic-gate 4237c478bd9Sstevel@tonic-gate /* 4247c478bd9Sstevel@tonic-gate * Return the size of a TCP header including TCP option. 4257c478bd9Sstevel@tonic-gate */ 4267c478bd9Sstevel@tonic-gate static int 4277c478bd9Sstevel@tonic-gate tcp_header_len(struct inetgram *igm) 4287c478bd9Sstevel@tonic-gate { 4297c478bd9Sstevel@tonic-gate mblk_t *pkt; 4307c478bd9Sstevel@tonic-gate int ipvers; 4317c478bd9Sstevel@tonic-gate 4327c478bd9Sstevel@tonic-gate /* Just returns the standard TCP header without option */ 4337c478bd9Sstevel@tonic-gate if (igm == NULL) 4347c478bd9Sstevel@tonic-gate return (sizeof (tcph_t)); 4357c478bd9Sstevel@tonic-gate 4367c478bd9Sstevel@tonic-gate if ((pkt = igm->igm_mp) == NULL) 4377c478bd9Sstevel@tonic-gate return (0); 4387c478bd9Sstevel@tonic-gate 4397c478bd9Sstevel@tonic-gate ipvers = ((struct ip *)pkt->b_rptr)->ip_v; 4407c478bd9Sstevel@tonic-gate if (ipvers == IPV4_VERSION) { 4417c478bd9Sstevel@tonic-gate return (TCP_HDR_LENGTH((tcph_t *)(pkt + IPH_HDR_LENGTH(pkt)))); 4427c478bd9Sstevel@tonic-gate } else { 4437c478bd9Sstevel@tonic-gate dprintf("tcp_header_len: non-IPv4 packet.\n"); 4447c478bd9Sstevel@tonic-gate return (0); 4457c478bd9Sstevel@tonic-gate } 4467c478bd9Sstevel@tonic-gate } 4477c478bd9Sstevel@tonic-gate 4487c478bd9Sstevel@tonic-gate /* 4497c478bd9Sstevel@tonic-gate * Return the requested port number in network order. 4507c478bd9Sstevel@tonic-gate */ 4517c478bd9Sstevel@tonic-gate static in_port_t 4527c478bd9Sstevel@tonic-gate tcp_report_ports(uint16_t *tcphp, enum Ports request) 4537c478bd9Sstevel@tonic-gate { 4547c478bd9Sstevel@tonic-gate if (request == SOURCE) 4557c478bd9Sstevel@tonic-gate return (*(uint16_t *)(((tcph_t *)tcphp)->th_lport)); 4567c478bd9Sstevel@tonic-gate return (*(uint16_t *)(((tcph_t *)tcphp)->th_fport)); 4577c478bd9Sstevel@tonic-gate } 4587c478bd9Sstevel@tonic-gate 4597c478bd9Sstevel@tonic-gate /* 4607c478bd9Sstevel@tonic-gate * Because inetboot is not interrupt driven, TCP can only poll. This 4617c478bd9Sstevel@tonic-gate * means that there can be packets stuck in the NIC buffer waiting to 4627c478bd9Sstevel@tonic-gate * be processed. Thus we need to drain them before, for example, sending 4637c478bd9Sstevel@tonic-gate * anything because an ACK may actually be stuck there. 4647c478bd9Sstevel@tonic-gate * 4657c478bd9Sstevel@tonic-gate * The timeout arguments determine how long we should wait for draining. 4667c478bd9Sstevel@tonic-gate */ 4677c478bd9Sstevel@tonic-gate static int 4687c478bd9Sstevel@tonic-gate tcp_drain_input(tcp_t *tcp, int sock_id, int timeout) 4697c478bd9Sstevel@tonic-gate { 4707c478bd9Sstevel@tonic-gate struct inetgram *in_gram; 4717c478bd9Sstevel@tonic-gate struct inetgram *old_in_gram; 4727c478bd9Sstevel@tonic-gate int old_timeout; 4737c478bd9Sstevel@tonic-gate mblk_t *mp; 4747c478bd9Sstevel@tonic-gate int i; 4757c478bd9Sstevel@tonic-gate 4767c478bd9Sstevel@tonic-gate dprintf("tcp_drain_input(%d): %s\n", sock_id, 4777c478bd9Sstevel@tonic-gate tcp_display(tcp, NULL, DISP_ADDR_AND_PORT)); 4787c478bd9Sstevel@tonic-gate 4797c478bd9Sstevel@tonic-gate /* 4807c478bd9Sstevel@tonic-gate * Since the driver uses the in_timeout value in the socket 4817c478bd9Sstevel@tonic-gate * structure to determine the timeout value, we need to save 4827c478bd9Sstevel@tonic-gate * the original one so that we can restore that after draining. 4837c478bd9Sstevel@tonic-gate */ 4847c478bd9Sstevel@tonic-gate old_timeout = sockets[sock_id].in_timeout; 4857c478bd9Sstevel@tonic-gate sockets[sock_id].in_timeout = timeout; 4867c478bd9Sstevel@tonic-gate 4877c478bd9Sstevel@tonic-gate /* 4887c478bd9Sstevel@tonic-gate * We do this because the input queue may have some user 4897c478bd9Sstevel@tonic-gate * data already. 4907c478bd9Sstevel@tonic-gate */ 4917c478bd9Sstevel@tonic-gate old_in_gram = sockets[sock_id].inq; 4927c478bd9Sstevel@tonic-gate sockets[sock_id].inq = NULL; 4937c478bd9Sstevel@tonic-gate 4947c478bd9Sstevel@tonic-gate /* Go out and check the wire */ 4957c478bd9Sstevel@tonic-gate for (i = MEDIA_LVL; i < TRANSPORT_LVL; i++) { 4967c478bd9Sstevel@tonic-gate if (sockets[sock_id].input[i] != NULL) { 4977c478bd9Sstevel@tonic-gate if (sockets[sock_id].input[i](sock_id) < 0) { 4987c478bd9Sstevel@tonic-gate sockets[sock_id].in_timeout = old_timeout; 4997c478bd9Sstevel@tonic-gate if (sockets[sock_id].inq != NULL) 5007c478bd9Sstevel@tonic-gate nuke_grams(&sockets[sock_id].inq); 5017c478bd9Sstevel@tonic-gate sockets[sock_id].inq = old_in_gram; 5027c478bd9Sstevel@tonic-gate return (-1); 5037c478bd9Sstevel@tonic-gate } 5047c478bd9Sstevel@tonic-gate } 5057c478bd9Sstevel@tonic-gate } 5067c478bd9Sstevel@tonic-gate #if DEBUG 5077c478bd9Sstevel@tonic-gate printf("tcp_drain_input: done with checking packets\n"); 5087c478bd9Sstevel@tonic-gate #endif 5097c478bd9Sstevel@tonic-gate while ((in_gram = sockets[sock_id].inq) != NULL) { 5107c478bd9Sstevel@tonic-gate /* Remove unknown inetgrams from the head of inq. */ 5117c478bd9Sstevel@tonic-gate if (in_gram->igm_level != TRANSPORT_LVL) { 5127c478bd9Sstevel@tonic-gate #if DEBUG 5137c478bd9Sstevel@tonic-gate printf("tcp_drain_input: unexpected packet " 5147c478bd9Sstevel@tonic-gate "level %d frame found\n", in_gram->igm_level); 5157c478bd9Sstevel@tonic-gate #endif 5167c478bd9Sstevel@tonic-gate del_gram(&sockets[sock_id].inq, in_gram, B_TRUE); 5177c478bd9Sstevel@tonic-gate continue; 5187c478bd9Sstevel@tonic-gate } 5197c478bd9Sstevel@tonic-gate mp = in_gram->igm_mp; 5207c478bd9Sstevel@tonic-gate del_gram(&sockets[sock_id].inq, in_gram, B_FALSE); 5217c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)in_gram, sizeof (struct inetgram)); 5227c478bd9Sstevel@tonic-gate tcp_rput_data(tcp, mp, sock_id); 5237c478bd9Sstevel@tonic-gate sockets[sock_id].in_timeout = old_timeout; 5247c478bd9Sstevel@tonic-gate 5257c478bd9Sstevel@tonic-gate /* 5267c478bd9Sstevel@tonic-gate * The other side may have closed this connection or 5277c478bd9Sstevel@tonic-gate * RST us. But we need to continue to process other 5287c478bd9Sstevel@tonic-gate * packets in the socket's queue because they may be 5297c478bd9Sstevel@tonic-gate * belong to another TCP connections. 5307c478bd9Sstevel@tonic-gate */ 5317c478bd9Sstevel@tonic-gate if (sockets[sock_id].pcb == NULL) 5327c478bd9Sstevel@tonic-gate tcp = NULL; 5337c478bd9Sstevel@tonic-gate } 5347c478bd9Sstevel@tonic-gate 5357c478bd9Sstevel@tonic-gate if (tcp == NULL || sockets[sock_id].pcb == NULL) { 5367c478bd9Sstevel@tonic-gate if (sockets[sock_id].so_error != 0) 5377c478bd9Sstevel@tonic-gate return (-1); 5387c478bd9Sstevel@tonic-gate else 5397c478bd9Sstevel@tonic-gate return (0); 5407c478bd9Sstevel@tonic-gate } 5417c478bd9Sstevel@tonic-gate #if DEBUG 5427c478bd9Sstevel@tonic-gate printf("tcp_drain_input: done with processing packets\n"); 5437c478bd9Sstevel@tonic-gate #endif 5447c478bd9Sstevel@tonic-gate sockets[sock_id].in_timeout = old_timeout; 5457c478bd9Sstevel@tonic-gate sockets[sock_id].inq = old_in_gram; 5467c478bd9Sstevel@tonic-gate 5477c478bd9Sstevel@tonic-gate /* 5487c478bd9Sstevel@tonic-gate * Data may have been received so indicate it is available 5497c478bd9Sstevel@tonic-gate */ 5507c478bd9Sstevel@tonic-gate tcp_drain_needed(sock_id, tcp); 5517c478bd9Sstevel@tonic-gate return (0); 5527c478bd9Sstevel@tonic-gate } 5537c478bd9Sstevel@tonic-gate 5547c478bd9Sstevel@tonic-gate /* 5557c478bd9Sstevel@tonic-gate * The receive entry point for upper layer to call to get data. Note 5567c478bd9Sstevel@tonic-gate * that this follows the current architecture that lower layer receive 5577c478bd9Sstevel@tonic-gate * routines have been called already. Thus if the inq of socket is 5587c478bd9Sstevel@tonic-gate * not NULL, the packets must be for us. 5597c478bd9Sstevel@tonic-gate */ 5607c478bd9Sstevel@tonic-gate static int 5617c478bd9Sstevel@tonic-gate tcp_input(int sock_id) 5627c478bd9Sstevel@tonic-gate { 5637c478bd9Sstevel@tonic-gate struct inetgram *in_gram; 5647c478bd9Sstevel@tonic-gate mblk_t *mp; 5657c478bd9Sstevel@tonic-gate tcp_t *tcp; 5667c478bd9Sstevel@tonic-gate 5677c478bd9Sstevel@tonic-gate TCP_RUN_TIME_WAIT_COLLECTOR(); 5687c478bd9Sstevel@tonic-gate 5697c478bd9Sstevel@tonic-gate if ((tcp = sockets[sock_id].pcb) == NULL) 5707c478bd9Sstevel@tonic-gate return (-1); 5717c478bd9Sstevel@tonic-gate 5727c478bd9Sstevel@tonic-gate while ((in_gram = sockets[sock_id].inq) != NULL) { 5737c478bd9Sstevel@tonic-gate /* Remove unknown inetgrams from the head of inq. */ 5747c478bd9Sstevel@tonic-gate if (in_gram->igm_level != TRANSPORT_LVL) { 5757c478bd9Sstevel@tonic-gate #ifdef DEBUG 5767c478bd9Sstevel@tonic-gate printf("tcp_input: unexpected packet " 5777c478bd9Sstevel@tonic-gate "level %d frame found\n", in_gram->igm_level); 5787c478bd9Sstevel@tonic-gate #endif 5797c478bd9Sstevel@tonic-gate del_gram(&sockets[sock_id].inq, in_gram, B_TRUE); 5807c478bd9Sstevel@tonic-gate continue; 5817c478bd9Sstevel@tonic-gate } 5827c478bd9Sstevel@tonic-gate mp = in_gram->igm_mp; 5837c478bd9Sstevel@tonic-gate del_gram(&sockets[sock_id].inq, in_gram, B_FALSE); 5847c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)in_gram, sizeof (struct inetgram)); 5857c478bd9Sstevel@tonic-gate tcp_rput_data(tcp, mp, sock_id); 5867c478bd9Sstevel@tonic-gate /* The TCP may be gone because it gets a RST. */ 5877c478bd9Sstevel@tonic-gate if (sockets[sock_id].pcb == NULL) 5887c478bd9Sstevel@tonic-gate return (-1); 5897c478bd9Sstevel@tonic-gate } 5907c478bd9Sstevel@tonic-gate 5917c478bd9Sstevel@tonic-gate /* Flush the receive list. */ 5927c478bd9Sstevel@tonic-gate if (tcp->tcp_rcv_list != NULL) { 5937c478bd9Sstevel@tonic-gate tcp_rcv_drain(sock_id, tcp); 5947c478bd9Sstevel@tonic-gate } else { 5957c478bd9Sstevel@tonic-gate /* The other side has closed the connection, report this up. */ 5967c478bd9Sstevel@tonic-gate if (tcp->tcp_state == TCPS_CLOSE_WAIT) { 5977c478bd9Sstevel@tonic-gate sockets[sock_id].so_state |= SS_CANTRCVMORE; 5987c478bd9Sstevel@tonic-gate return (0); 5997c478bd9Sstevel@tonic-gate } 6007c478bd9Sstevel@tonic-gate } 6017c478bd9Sstevel@tonic-gate return (0); 6027c478bd9Sstevel@tonic-gate } 6037c478bd9Sstevel@tonic-gate 6047c478bd9Sstevel@tonic-gate /* 6057c478bd9Sstevel@tonic-gate * The send entry point for upper layer to call to send data. In order 6067c478bd9Sstevel@tonic-gate * to minimize changes to the core TCP code, we need to put the 6077c478bd9Sstevel@tonic-gate * data into mblks. 6087c478bd9Sstevel@tonic-gate */ 6097c478bd9Sstevel@tonic-gate int 6107c478bd9Sstevel@tonic-gate tcp_send(int sock_id, tcp_t *tcp, const void *msg, int len) 6117c478bd9Sstevel@tonic-gate { 6127c478bd9Sstevel@tonic-gate mblk_t *mp; 6137c478bd9Sstevel@tonic-gate mblk_t *head = NULL; 6147c478bd9Sstevel@tonic-gate mblk_t *tail; 6157c478bd9Sstevel@tonic-gate int mss = tcp->tcp_mss; 6167c478bd9Sstevel@tonic-gate int cnt = 0; 6177c478bd9Sstevel@tonic-gate int win_size; 6187c478bd9Sstevel@tonic-gate char *buf = (char *)msg; 6197c478bd9Sstevel@tonic-gate 6207c478bd9Sstevel@tonic-gate TCP_RUN_TIME_WAIT_COLLECTOR(); 6217c478bd9Sstevel@tonic-gate 6227c478bd9Sstevel@tonic-gate /* We don't want to append 0 size mblk. */ 6237c478bd9Sstevel@tonic-gate if (len == 0) 6247c478bd9Sstevel@tonic-gate return (0); 6257c478bd9Sstevel@tonic-gate while (len > 0) { 6267c478bd9Sstevel@tonic-gate if (len < mss) { 6277c478bd9Sstevel@tonic-gate mss = len; 6287c478bd9Sstevel@tonic-gate } 6297c478bd9Sstevel@tonic-gate /* 6307c478bd9Sstevel@tonic-gate * If we cannot allocate more buffer, stop here and 6317c478bd9Sstevel@tonic-gate * the number of bytes buffered will be returned. 6327c478bd9Sstevel@tonic-gate * 6337c478bd9Sstevel@tonic-gate * Note that we follow the core TCP optimization that 6347c478bd9Sstevel@tonic-gate * each mblk contains only MSS bytes data. 6357c478bd9Sstevel@tonic-gate */ 6367c478bd9Sstevel@tonic-gate if ((mp = allocb(mss + tcp->tcp_ip_hdr_len + 6377c478bd9Sstevel@tonic-gate TCP_MAX_HDR_LENGTH + tcp_wroff_xtra, 0)) == NULL) { 6387c478bd9Sstevel@tonic-gate break; 6397c478bd9Sstevel@tonic-gate } 6407c478bd9Sstevel@tonic-gate mp->b_rptr += tcp->tcp_hdr_len + tcp_wroff_xtra; 6417c478bd9Sstevel@tonic-gate bcopy(buf, mp->b_rptr, mss); 6427c478bd9Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + mss; 6437c478bd9Sstevel@tonic-gate buf += mss; 6447c478bd9Sstevel@tonic-gate cnt += mss; 6457c478bd9Sstevel@tonic-gate len -= mss; 6467c478bd9Sstevel@tonic-gate 6477c478bd9Sstevel@tonic-gate if (head == NULL) { 6487c478bd9Sstevel@tonic-gate head = mp; 6497c478bd9Sstevel@tonic-gate tail = mp; 6507c478bd9Sstevel@tonic-gate } else { 6517c478bd9Sstevel@tonic-gate tail->b_cont = mp; 6527c478bd9Sstevel@tonic-gate tail = mp; 6537c478bd9Sstevel@tonic-gate } 6547c478bd9Sstevel@tonic-gate } 6557c478bd9Sstevel@tonic-gate 6567c478bd9Sstevel@tonic-gate /* 6577c478bd9Sstevel@tonic-gate * Since inetboot is not interrupt driven, there may be 6587c478bd9Sstevel@tonic-gate * some ACKs in the MAC's buffer. Drain them first, 6597c478bd9Sstevel@tonic-gate * otherwise, we may not be able to send. 6607c478bd9Sstevel@tonic-gate * 6617c478bd9Sstevel@tonic-gate * We expect an ACK in two cases: 6627c478bd9Sstevel@tonic-gate * 6637c478bd9Sstevel@tonic-gate * 1) We have un-ACK'ed data. 6647c478bd9Sstevel@tonic-gate * 6657c478bd9Sstevel@tonic-gate * 2) All ACK's have been received and the sender's window has been 6667c478bd9Sstevel@tonic-gate * closed. We need an ACK back to open the window so that we can 6677c478bd9Sstevel@tonic-gate * send. In this case, call tcp_drain_input() if the window size is 6687c478bd9Sstevel@tonic-gate * less than 2 * MSS. 6697c478bd9Sstevel@tonic-gate */ 6707c478bd9Sstevel@tonic-gate 6717c478bd9Sstevel@tonic-gate /* window size = MIN(swnd, cwnd) - unacked bytes */ 6727c478bd9Sstevel@tonic-gate win_size = (tcp->tcp_swnd > tcp->tcp_cwnd) ? tcp->tcp_cwnd : 6737c478bd9Sstevel@tonic-gate tcp->tcp_swnd; 6747c478bd9Sstevel@tonic-gate win_size -= tcp->tcp_snxt; 6757c478bd9Sstevel@tonic-gate win_size += tcp->tcp_suna; 6767c478bd9Sstevel@tonic-gate if (win_size < (2 * tcp->tcp_mss)) 6777c478bd9Sstevel@tonic-gate if (tcp_drain_input(tcp, sock_id, 5) < 0) 6787c478bd9Sstevel@tonic-gate return (-1); 6797c478bd9Sstevel@tonic-gate 6807c478bd9Sstevel@tonic-gate tcp_wput_data(tcp, head, sock_id); 6817c478bd9Sstevel@tonic-gate return (cnt); 6827c478bd9Sstevel@tonic-gate } 6837c478bd9Sstevel@tonic-gate 6847c478bd9Sstevel@tonic-gate /* Free up all TCP related stuff */ 6857c478bd9Sstevel@tonic-gate static void 6867c478bd9Sstevel@tonic-gate tcp_free(tcp_t *tcp) 6877c478bd9Sstevel@tonic-gate { 6887c478bd9Sstevel@tonic-gate if (tcp->tcp_iphc != NULL) { 6897c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)tcp->tcp_iphc, tcp->tcp_iphc_len); 6907c478bd9Sstevel@tonic-gate tcp->tcp_iphc = NULL; 6917c478bd9Sstevel@tonic-gate } 6927c478bd9Sstevel@tonic-gate if (tcp->tcp_xmit_head != NULL) { 6937c478bd9Sstevel@tonic-gate freemsg(tcp->tcp_xmit_head); 6947c478bd9Sstevel@tonic-gate tcp->tcp_xmit_head = NULL; 6957c478bd9Sstevel@tonic-gate } 6967c478bd9Sstevel@tonic-gate if (tcp->tcp_rcv_list != NULL) { 6977c478bd9Sstevel@tonic-gate freemsg(tcp->tcp_rcv_list); 6987c478bd9Sstevel@tonic-gate tcp->tcp_rcv_list = NULL; 6997c478bd9Sstevel@tonic-gate } 7007c478bd9Sstevel@tonic-gate if (tcp->tcp_reass_head != NULL) { 7017c478bd9Sstevel@tonic-gate freemsg(tcp->tcp_reass_head); 7027c478bd9Sstevel@tonic-gate tcp->tcp_reass_head = NULL; 7037c478bd9Sstevel@tonic-gate } 7047c478bd9Sstevel@tonic-gate if (tcp->tcp_sack_info != NULL) { 7057c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)tcp->tcp_sack_info, 7067c478bd9Sstevel@tonic-gate sizeof (tcp_sack_info_t)); 7077c478bd9Sstevel@tonic-gate tcp->tcp_sack_info = NULL; 7087c478bd9Sstevel@tonic-gate } 7097c478bd9Sstevel@tonic-gate } 7107c478bd9Sstevel@tonic-gate 7117c478bd9Sstevel@tonic-gate static void 7127c478bd9Sstevel@tonic-gate tcp_close_detached(tcp_t *tcp) 7137c478bd9Sstevel@tonic-gate { 7147c478bd9Sstevel@tonic-gate if (tcp->tcp_listener != NULL) 7157c478bd9Sstevel@tonic-gate tcp_eager_unlink(tcp); 7167c478bd9Sstevel@tonic-gate tcp_free(tcp); 7177c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)tcp, sizeof (tcp_t)); 7187c478bd9Sstevel@tonic-gate } 7197c478bd9Sstevel@tonic-gate 7207c478bd9Sstevel@tonic-gate /* 7217c478bd9Sstevel@tonic-gate * If we are an eager connection hanging off a listener that hasn't 7227c478bd9Sstevel@tonic-gate * formally accepted the connection yet, get off his list and blow off 7237c478bd9Sstevel@tonic-gate * any data that we have accumulated. 7247c478bd9Sstevel@tonic-gate */ 7257c478bd9Sstevel@tonic-gate static void 7267c478bd9Sstevel@tonic-gate tcp_eager_unlink(tcp_t *tcp) 7277c478bd9Sstevel@tonic-gate { 7287c478bd9Sstevel@tonic-gate tcp_t *listener = tcp->tcp_listener; 7297c478bd9Sstevel@tonic-gate 7307c478bd9Sstevel@tonic-gate assert(listener != NULL); 7317c478bd9Sstevel@tonic-gate if (tcp->tcp_eager_next_q0 != NULL) { 7327c478bd9Sstevel@tonic-gate assert(tcp->tcp_eager_prev_q0 != NULL); 7337c478bd9Sstevel@tonic-gate 7347c478bd9Sstevel@tonic-gate /* Remove the eager tcp from q0 */ 7357c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 7367c478bd9Sstevel@tonic-gate tcp->tcp_eager_prev_q0; 7377c478bd9Sstevel@tonic-gate tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 7387c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q0; 7397c478bd9Sstevel@tonic-gate listener->tcp_conn_req_cnt_q0--; 7407c478bd9Sstevel@tonic-gate } else { 7417c478bd9Sstevel@tonic-gate tcp_t **tcpp = &listener->tcp_eager_next_q; 7427c478bd9Sstevel@tonic-gate tcp_t *prev = NULL; 7437c478bd9Sstevel@tonic-gate 7447c478bd9Sstevel@tonic-gate for (; tcpp[0]; tcpp = &tcpp[0]->tcp_eager_next_q) { 7457c478bd9Sstevel@tonic-gate if (tcpp[0] == tcp) { 7467c478bd9Sstevel@tonic-gate if (listener->tcp_eager_last_q == tcp) { 7477c478bd9Sstevel@tonic-gate /* 7487c478bd9Sstevel@tonic-gate * If we are unlinking the last 7497c478bd9Sstevel@tonic-gate * element on the list, adjust 7507c478bd9Sstevel@tonic-gate * tail pointer. Set tail pointer 7517c478bd9Sstevel@tonic-gate * to nil when list is empty. 7527c478bd9Sstevel@tonic-gate */ 7537c478bd9Sstevel@tonic-gate assert(tcp->tcp_eager_next_q == NULL); 7547c478bd9Sstevel@tonic-gate if (listener->tcp_eager_last_q == 7557c478bd9Sstevel@tonic-gate listener->tcp_eager_next_q) { 7567c478bd9Sstevel@tonic-gate listener->tcp_eager_last_q = 7577c478bd9Sstevel@tonic-gate NULL; 7587c478bd9Sstevel@tonic-gate } else { 7597c478bd9Sstevel@tonic-gate /* 7607c478bd9Sstevel@tonic-gate * We won't get here if there 7617c478bd9Sstevel@tonic-gate * is only one eager in the 7627c478bd9Sstevel@tonic-gate * list. 7637c478bd9Sstevel@tonic-gate */ 7647c478bd9Sstevel@tonic-gate assert(prev != NULL); 7657c478bd9Sstevel@tonic-gate listener->tcp_eager_last_q = 7667c478bd9Sstevel@tonic-gate prev; 7677c478bd9Sstevel@tonic-gate } 7687c478bd9Sstevel@tonic-gate } 7697c478bd9Sstevel@tonic-gate tcpp[0] = tcp->tcp_eager_next_q; 7707c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q = NULL; 7717c478bd9Sstevel@tonic-gate tcp->tcp_eager_last_q = NULL; 7727c478bd9Sstevel@tonic-gate listener->tcp_conn_req_cnt_q--; 7737c478bd9Sstevel@tonic-gate break; 7747c478bd9Sstevel@tonic-gate } 7757c478bd9Sstevel@tonic-gate prev = tcpp[0]; 7767c478bd9Sstevel@tonic-gate } 7777c478bd9Sstevel@tonic-gate } 7787c478bd9Sstevel@tonic-gate tcp->tcp_listener = NULL; 7797c478bd9Sstevel@tonic-gate } 7807c478bd9Sstevel@tonic-gate 7817c478bd9Sstevel@tonic-gate /* 7827c478bd9Sstevel@tonic-gate * Reset any eager connection hanging off this listener 7837c478bd9Sstevel@tonic-gate * and then reclaim it's resources. 7847c478bd9Sstevel@tonic-gate */ 7857c478bd9Sstevel@tonic-gate static void 7867c478bd9Sstevel@tonic-gate tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only, int sock_id) 7877c478bd9Sstevel@tonic-gate { 7887c478bd9Sstevel@tonic-gate tcp_t *eager; 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate if (!q0_only) { 7917c478bd9Sstevel@tonic-gate /* First cleanup q */ 7927c478bd9Sstevel@tonic-gate while ((eager = listener->tcp_eager_next_q) != NULL) { 7937c478bd9Sstevel@tonic-gate assert(listener->tcp_eager_last_q != NULL); 7947c478bd9Sstevel@tonic-gate tcp_xmit_ctl("tcp_eager_cleanup, can't wait", 7957c478bd9Sstevel@tonic-gate eager, NULL, eager->tcp_snxt, 0, TH_RST, 0, 7967c478bd9Sstevel@tonic-gate sock_id); 7977c478bd9Sstevel@tonic-gate tcp_close_detached(eager); 7987c478bd9Sstevel@tonic-gate } 7997c478bd9Sstevel@tonic-gate assert(listener->tcp_eager_last_q == NULL); 8007c478bd9Sstevel@tonic-gate } 8017c478bd9Sstevel@tonic-gate /* Then cleanup q0 */ 8027c478bd9Sstevel@tonic-gate while ((eager = listener->tcp_eager_next_q0) != listener) { 8037c478bd9Sstevel@tonic-gate tcp_xmit_ctl("tcp_eager_cleanup, can't wait", 8047c478bd9Sstevel@tonic-gate eager, NULL, eager->tcp_snxt, 0, TH_RST, 0, sock_id); 8057c478bd9Sstevel@tonic-gate tcp_close_detached(eager); 8067c478bd9Sstevel@tonic-gate } 8077c478bd9Sstevel@tonic-gate } 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate /* 8107c478bd9Sstevel@tonic-gate * To handle the shutdown request. Called from shutdown() 8117c478bd9Sstevel@tonic-gate */ 8127c478bd9Sstevel@tonic-gate int 8137c478bd9Sstevel@tonic-gate tcp_shutdown(int sock_id) 8147c478bd9Sstevel@tonic-gate { 8157c478bd9Sstevel@tonic-gate tcp_t *tcp; 8167c478bd9Sstevel@tonic-gate 8177c478bd9Sstevel@tonic-gate DEBUG_1("tcp_shutdown: sock_id %x\n", sock_id); 8187c478bd9Sstevel@tonic-gate 8197c478bd9Sstevel@tonic-gate if ((tcp = sockets[sock_id].pcb) == NULL) { 8207c478bd9Sstevel@tonic-gate return (-1); 8217c478bd9Sstevel@tonic-gate } 8227c478bd9Sstevel@tonic-gate 8237c478bd9Sstevel@tonic-gate /* 8247c478bd9Sstevel@tonic-gate * Since inetboot is not interrupt driven, there may be 8257c478bd9Sstevel@tonic-gate * some ACKs in the MAC's buffer. Drain them first, 8267c478bd9Sstevel@tonic-gate * otherwise, we may not be able to send. 8277c478bd9Sstevel@tonic-gate */ 8287c478bd9Sstevel@tonic-gate if (tcp_drain_input(tcp, sock_id, 5) < 0) { 8297c478bd9Sstevel@tonic-gate /* 8307c478bd9Sstevel@tonic-gate * If we return now without freeing TCP, there will be 8317c478bd9Sstevel@tonic-gate * a memory leak. 8327c478bd9Sstevel@tonic-gate */ 8337c478bd9Sstevel@tonic-gate if (sockets[sock_id].pcb != NULL) 8347c478bd9Sstevel@tonic-gate tcp_clean_death(sock_id, tcp, 0); 8357c478bd9Sstevel@tonic-gate return (-1); 8367c478bd9Sstevel@tonic-gate } 8377c478bd9Sstevel@tonic-gate 8387c478bd9Sstevel@tonic-gate DEBUG_1("tcp_shutdown: tcp_state %x\n", tcp->tcp_state); 8397c478bd9Sstevel@tonic-gate switch (tcp->tcp_state) { 8407c478bd9Sstevel@tonic-gate 8417c478bd9Sstevel@tonic-gate case TCPS_SYN_RCVD: 8427c478bd9Sstevel@tonic-gate /* 8437c478bd9Sstevel@tonic-gate * Shutdown during the connect 3-way handshake 8447c478bd9Sstevel@tonic-gate */ 8457c478bd9Sstevel@tonic-gate case TCPS_ESTABLISHED: 8467c478bd9Sstevel@tonic-gate /* 8477c478bd9Sstevel@tonic-gate * Transmit the FIN 8487c478bd9Sstevel@tonic-gate * wait for the FIN to be ACKed, 8497c478bd9Sstevel@tonic-gate * then remain in FIN_WAIT_2 8507c478bd9Sstevel@tonic-gate */ 8517c478bd9Sstevel@tonic-gate dprintf("tcp_shutdown: sending fin\n"); 8527c478bd9Sstevel@tonic-gate if (tcp_xmit_end(tcp, sock_id) == 0 && 8537c478bd9Sstevel@tonic-gate tcp_state_wait(sock_id, tcp, TCPS_FIN_WAIT_2) < 0) { 8547c478bd9Sstevel@tonic-gate /* During the wait, TCP may be gone... */ 8557c478bd9Sstevel@tonic-gate if (sockets[sock_id].pcb == NULL) 8567c478bd9Sstevel@tonic-gate return (-1); 8577c478bd9Sstevel@tonic-gate } 8587c478bd9Sstevel@tonic-gate dprintf("tcp_shutdown: done\n"); 8597c478bd9Sstevel@tonic-gate break; 8607c478bd9Sstevel@tonic-gate 8617c478bd9Sstevel@tonic-gate default: 8627c478bd9Sstevel@tonic-gate break; 8637c478bd9Sstevel@tonic-gate 8647c478bd9Sstevel@tonic-gate } 8657c478bd9Sstevel@tonic-gate return (0); 8667c478bd9Sstevel@tonic-gate } 8677c478bd9Sstevel@tonic-gate 8687c478bd9Sstevel@tonic-gate /* To handle closing of the socket */ 8697c478bd9Sstevel@tonic-gate static int 8707c478bd9Sstevel@tonic-gate tcp_close(int sock_id) 8717c478bd9Sstevel@tonic-gate { 8727c478bd9Sstevel@tonic-gate char *msg; 8737c478bd9Sstevel@tonic-gate tcp_t *tcp; 8747c478bd9Sstevel@tonic-gate int error = 0; 8757c478bd9Sstevel@tonic-gate 8767c478bd9Sstevel@tonic-gate if ((tcp = sockets[sock_id].pcb) == NULL) { 8777c478bd9Sstevel@tonic-gate return (-1); 8787c478bd9Sstevel@tonic-gate } 8797c478bd9Sstevel@tonic-gate 8807c478bd9Sstevel@tonic-gate TCP_RUN_TIME_WAIT_COLLECTOR(); 8817c478bd9Sstevel@tonic-gate 8827c478bd9Sstevel@tonic-gate /* 8837c478bd9Sstevel@tonic-gate * Since inetboot is not interrupt driven, there may be 8847c478bd9Sstevel@tonic-gate * some ACKs in the MAC's buffer. Drain them first, 8857c478bd9Sstevel@tonic-gate * otherwise, we may not be able to send. 8867c478bd9Sstevel@tonic-gate */ 8877c478bd9Sstevel@tonic-gate if (tcp_drain_input(tcp, sock_id, 5) < 0) { 8887c478bd9Sstevel@tonic-gate /* 8897c478bd9Sstevel@tonic-gate * If we return now without freeing TCP, there will be 8907c478bd9Sstevel@tonic-gate * a memory leak. 8917c478bd9Sstevel@tonic-gate */ 8927c478bd9Sstevel@tonic-gate if (sockets[sock_id].pcb != NULL) 8937c478bd9Sstevel@tonic-gate tcp_clean_death(sock_id, tcp, 0); 8947c478bd9Sstevel@tonic-gate return (-1); 8957c478bd9Sstevel@tonic-gate } 8967c478bd9Sstevel@tonic-gate 8977c478bd9Sstevel@tonic-gate if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) { 8987c478bd9Sstevel@tonic-gate /* Cleanup for listener */ 8997c478bd9Sstevel@tonic-gate tcp_eager_cleanup(tcp, 0, sock_id); 9007c478bd9Sstevel@tonic-gate } 9017c478bd9Sstevel@tonic-gate 9027c478bd9Sstevel@tonic-gate msg = NULL; 9037c478bd9Sstevel@tonic-gate switch (tcp->tcp_state) { 9047c478bd9Sstevel@tonic-gate case TCPS_CLOSED: 9057c478bd9Sstevel@tonic-gate case TCPS_IDLE: 9067c478bd9Sstevel@tonic-gate case TCPS_BOUND: 9077c478bd9Sstevel@tonic-gate case TCPS_LISTEN: 9087c478bd9Sstevel@tonic-gate break; 9097c478bd9Sstevel@tonic-gate case TCPS_SYN_SENT: 9107c478bd9Sstevel@tonic-gate msg = "tcp_close, during connect"; 9117c478bd9Sstevel@tonic-gate break; 9127c478bd9Sstevel@tonic-gate case TCPS_SYN_RCVD: 9137c478bd9Sstevel@tonic-gate /* 9147c478bd9Sstevel@tonic-gate * Close during the connect 3-way handshake 9157c478bd9Sstevel@tonic-gate * but here there may or may not be pending data 9167c478bd9Sstevel@tonic-gate * already on queue. Process almost same as in 9177c478bd9Sstevel@tonic-gate * the ESTABLISHED state. 9187c478bd9Sstevel@tonic-gate */ 9197c478bd9Sstevel@tonic-gate /* FALLTHRU */ 9207c478bd9Sstevel@tonic-gate default: 9217c478bd9Sstevel@tonic-gate /* 9227c478bd9Sstevel@tonic-gate * If SO_LINGER has set a zero linger time, abort the 9237c478bd9Sstevel@tonic-gate * connection with a reset. 9247c478bd9Sstevel@tonic-gate */ 9257c478bd9Sstevel@tonic-gate if (tcp->tcp_linger && tcp->tcp_lingertime == 0) { 9267c478bd9Sstevel@tonic-gate msg = "tcp_close, zero lingertime"; 9277c478bd9Sstevel@tonic-gate break; 9287c478bd9Sstevel@tonic-gate } 9297c478bd9Sstevel@tonic-gate 9307c478bd9Sstevel@tonic-gate /* 9317c478bd9Sstevel@tonic-gate * Abort connection if there is unread data queued. 9327c478bd9Sstevel@tonic-gate */ 9337c478bd9Sstevel@tonic-gate if (tcp->tcp_rcv_list != NULL || 9347c478bd9Sstevel@tonic-gate tcp->tcp_reass_head != NULL) { 9357c478bd9Sstevel@tonic-gate msg = "tcp_close, unread data"; 9367c478bd9Sstevel@tonic-gate break; 9377c478bd9Sstevel@tonic-gate } 9387c478bd9Sstevel@tonic-gate if (tcp->tcp_state <= TCPS_LISTEN) 9397c478bd9Sstevel@tonic-gate break; 9407c478bd9Sstevel@tonic-gate 9417c478bd9Sstevel@tonic-gate /* 9427c478bd9Sstevel@tonic-gate * Transmit the FIN before detaching the tcp_t. 9437c478bd9Sstevel@tonic-gate * After tcp_detach returns this queue/perimeter 9447c478bd9Sstevel@tonic-gate * no longer owns the tcp_t thus others can modify it. 9457c478bd9Sstevel@tonic-gate * The TCP could be closed in tcp_state_wait called by 9467c478bd9Sstevel@tonic-gate * tcp_wput_data called by tcp_xmit_end. 9477c478bd9Sstevel@tonic-gate */ 9487c478bd9Sstevel@tonic-gate (void) tcp_xmit_end(tcp, sock_id); 9497c478bd9Sstevel@tonic-gate if (sockets[sock_id].pcb == NULL) 9507c478bd9Sstevel@tonic-gate return (0); 9517c478bd9Sstevel@tonic-gate 9527c478bd9Sstevel@tonic-gate /* 9537c478bd9Sstevel@tonic-gate * If lingering on close then wait until the fin is acked, 9547c478bd9Sstevel@tonic-gate * the SO_LINGER time passes, or a reset is sent/received. 9557c478bd9Sstevel@tonic-gate */ 9567c478bd9Sstevel@tonic-gate if (tcp->tcp_linger && tcp->tcp_lingertime > 0 && 9577c478bd9Sstevel@tonic-gate !(tcp->tcp_fin_acked) && 9587c478bd9Sstevel@tonic-gate tcp->tcp_state >= TCPS_ESTABLISHED) { 9597c478bd9Sstevel@tonic-gate uint32_t stoptime; /* in ms */ 9607c478bd9Sstevel@tonic-gate 9617c478bd9Sstevel@tonic-gate tcp->tcp_client_errno = 0; 9627c478bd9Sstevel@tonic-gate stoptime = prom_gettime() + 9637c478bd9Sstevel@tonic-gate (tcp->tcp_lingertime * 1000); 9647c478bd9Sstevel@tonic-gate while (!(tcp->tcp_fin_acked) && 9657c478bd9Sstevel@tonic-gate tcp->tcp_state >= TCPS_ESTABLISHED && 9667c478bd9Sstevel@tonic-gate tcp->tcp_client_errno == 0 && 9677c478bd9Sstevel@tonic-gate ((int32_t)(stoptime - prom_gettime()) > 0)) { 9687c478bd9Sstevel@tonic-gate if (tcp_drain_input(tcp, sock_id, 5) < 0) { 9697c478bd9Sstevel@tonic-gate if (sockets[sock_id].pcb != NULL) { 9707c478bd9Sstevel@tonic-gate tcp_clean_death(sock_id, 9717c478bd9Sstevel@tonic-gate tcp, 0); 9727c478bd9Sstevel@tonic-gate } 9737c478bd9Sstevel@tonic-gate return (-1); 9747c478bd9Sstevel@tonic-gate } 9757c478bd9Sstevel@tonic-gate } 9767c478bd9Sstevel@tonic-gate tcp->tcp_client_errno = 0; 9777c478bd9Sstevel@tonic-gate } 9787c478bd9Sstevel@tonic-gate if (tcp_state_wait(sock_id, tcp, TCPS_TIME_WAIT) < 0) { 9797c478bd9Sstevel@tonic-gate /* During the wait, TCP may be gone... */ 9807c478bd9Sstevel@tonic-gate if (sockets[sock_id].pcb == NULL) 9817c478bd9Sstevel@tonic-gate return (0); 9827c478bd9Sstevel@tonic-gate msg = "tcp_close, couldn't detach"; 9837c478bd9Sstevel@tonic-gate } else { 9847c478bd9Sstevel@tonic-gate return (0); 9857c478bd9Sstevel@tonic-gate } 9867c478bd9Sstevel@tonic-gate break; 9877c478bd9Sstevel@tonic-gate } 9887c478bd9Sstevel@tonic-gate 9897c478bd9Sstevel@tonic-gate /* Something went wrong... Send a RST and report the error */ 9907c478bd9Sstevel@tonic-gate if (msg != NULL) { 9917c478bd9Sstevel@tonic-gate if (tcp->tcp_state == TCPS_ESTABLISHED || 9927c478bd9Sstevel@tonic-gate tcp->tcp_state == TCPS_CLOSE_WAIT) 9937c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpEstabResets); 9947c478bd9Sstevel@tonic-gate if (tcp->tcp_state == TCPS_SYN_SENT || 9957c478bd9Sstevel@tonic-gate tcp->tcp_state == TCPS_SYN_RCVD) 9967c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpAttemptFails); 9977c478bd9Sstevel@tonic-gate tcp_xmit_ctl(msg, tcp, NULL, tcp->tcp_snxt, 0, TH_RST, 0, 9987c478bd9Sstevel@tonic-gate sock_id); 9997c478bd9Sstevel@tonic-gate } 10007c478bd9Sstevel@tonic-gate 10017c478bd9Sstevel@tonic-gate tcp_free(tcp); 10027c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)tcp, sizeof (tcp_t)); 10037c478bd9Sstevel@tonic-gate sockets[sock_id].pcb = NULL; 10047c478bd9Sstevel@tonic-gate return (error); 10057c478bd9Sstevel@tonic-gate } 10067c478bd9Sstevel@tonic-gate 10077c478bd9Sstevel@tonic-gate /* To make an endpoint a listener. */ 10087c478bd9Sstevel@tonic-gate int 10097c478bd9Sstevel@tonic-gate tcp_listen(int sock_id, int backlog) 10107c478bd9Sstevel@tonic-gate { 10117c478bd9Sstevel@tonic-gate tcp_t *tcp; 10127c478bd9Sstevel@tonic-gate 10137c478bd9Sstevel@tonic-gate if ((tcp = (tcp_t *)(sockets[sock_id].pcb)) == NULL) { 10147c478bd9Sstevel@tonic-gate errno = EINVAL; 10157c478bd9Sstevel@tonic-gate return (-1); 10167c478bd9Sstevel@tonic-gate } 10177c478bd9Sstevel@tonic-gate /* We allow calling listen() multiple times to change the backlog. */ 10187c478bd9Sstevel@tonic-gate if (tcp->tcp_state > TCPS_LISTEN || tcp->tcp_state < TCPS_BOUND) { 10197c478bd9Sstevel@tonic-gate errno = EOPNOTSUPP; 10207c478bd9Sstevel@tonic-gate return (-1); 10217c478bd9Sstevel@tonic-gate } 10227c478bd9Sstevel@tonic-gate /* The following initialization should only be done once. */ 10237c478bd9Sstevel@tonic-gate if (tcp->tcp_state != TCPS_LISTEN) { 10247c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp; 10257c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q = NULL; 10267c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_LISTEN; 10277c478bd9Sstevel@tonic-gate tcp->tcp_second_ctimer_threshold = tcp_ip_abort_linterval; 10287c478bd9Sstevel@tonic-gate } 10297c478bd9Sstevel@tonic-gate if ((tcp->tcp_conn_req_max = backlog) > tcp_conn_req_max_q) { 10307c478bd9Sstevel@tonic-gate tcp->tcp_conn_req_max = tcp_conn_req_max_q; 10317c478bd9Sstevel@tonic-gate } 10327c478bd9Sstevel@tonic-gate if (tcp->tcp_conn_req_max < tcp_conn_req_min) { 10337c478bd9Sstevel@tonic-gate tcp->tcp_conn_req_max = tcp_conn_req_min; 10347c478bd9Sstevel@tonic-gate } 10357c478bd9Sstevel@tonic-gate return (0); 10367c478bd9Sstevel@tonic-gate } 10377c478bd9Sstevel@tonic-gate 10387c478bd9Sstevel@tonic-gate /* To accept connections. */ 10397c478bd9Sstevel@tonic-gate int 10407c478bd9Sstevel@tonic-gate tcp_accept(int sock_id, struct sockaddr *addr, socklen_t *addr_len) 10417c478bd9Sstevel@tonic-gate { 10427c478bd9Sstevel@tonic-gate tcp_t *listener; 10437c478bd9Sstevel@tonic-gate tcp_t *eager; 10447c478bd9Sstevel@tonic-gate int sd, new_sock_id; 10457c478bd9Sstevel@tonic-gate struct sockaddr_in *new_addr = (struct sockaddr_in *)addr; 10467c478bd9Sstevel@tonic-gate int timeout; 10477c478bd9Sstevel@tonic-gate 10487c478bd9Sstevel@tonic-gate /* Sanity check. */ 10497c478bd9Sstevel@tonic-gate if ((listener = (tcp_t *)(sockets[sock_id].pcb)) == NULL || 10507c478bd9Sstevel@tonic-gate new_addr == NULL || addr_len == NULL || 10517c478bd9Sstevel@tonic-gate *addr_len < sizeof (struct sockaddr_in) || 10527c478bd9Sstevel@tonic-gate listener->tcp_state != TCPS_LISTEN) { 10537c478bd9Sstevel@tonic-gate errno = EINVAL; 10547c478bd9Sstevel@tonic-gate return (-1); 10557c478bd9Sstevel@tonic-gate } 10567c478bd9Sstevel@tonic-gate 10577c478bd9Sstevel@tonic-gate if (sockets[sock_id].in_timeout > tcp_accept_timeout) 10587c478bd9Sstevel@tonic-gate timeout = prom_gettime() + sockets[sock_id].in_timeout; 10597c478bd9Sstevel@tonic-gate else 10607c478bd9Sstevel@tonic-gate timeout = prom_gettime() + tcp_accept_timeout; 10617c478bd9Sstevel@tonic-gate while (listener->tcp_eager_next_q == NULL && 10627c478bd9Sstevel@tonic-gate timeout > prom_gettime()) { 10637c478bd9Sstevel@tonic-gate #if DEBUG 10647c478bd9Sstevel@tonic-gate printf("tcp_accept: Waiting in tcp_accept()\n"); 10657c478bd9Sstevel@tonic-gate #endif 10667c478bd9Sstevel@tonic-gate if (tcp_drain_input(listener, sock_id, 5) < 0) { 10677c478bd9Sstevel@tonic-gate return (-1); 10687c478bd9Sstevel@tonic-gate } 10697c478bd9Sstevel@tonic-gate } 10707c478bd9Sstevel@tonic-gate /* If there is an eager, don't timeout... */ 10717c478bd9Sstevel@tonic-gate if (timeout <= prom_gettime() && listener->tcp_eager_next_q == NULL) { 10727c478bd9Sstevel@tonic-gate #if DEBUG 10737c478bd9Sstevel@tonic-gate printf("tcp_accept: timeout\n"); 10747c478bd9Sstevel@tonic-gate #endif 10757c478bd9Sstevel@tonic-gate errno = ETIMEDOUT; 10767c478bd9Sstevel@tonic-gate return (-1); 10777c478bd9Sstevel@tonic-gate } 10787c478bd9Sstevel@tonic-gate #if DEBUG 10797c478bd9Sstevel@tonic-gate printf("tcp_accept: got a connection\n"); 10807c478bd9Sstevel@tonic-gate #endif 10817c478bd9Sstevel@tonic-gate 10827c478bd9Sstevel@tonic-gate /* Now create the socket for this new TCP. */ 10837c478bd9Sstevel@tonic-gate if ((sd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { 10847c478bd9Sstevel@tonic-gate return (-1); 10857c478bd9Sstevel@tonic-gate } 10867c478bd9Sstevel@tonic-gate if ((new_sock_id = so_check_fd(sd, &errno)) == -1) 10877c478bd9Sstevel@tonic-gate /* This should not happen! */ 10887c478bd9Sstevel@tonic-gate prom_panic("so_check_fd() fails in tcp_accept()"); 10897c478bd9Sstevel@tonic-gate /* Free the TCP PCB in the original socket. */ 10907c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)(sockets[new_sock_id].pcb), sizeof (tcp_t)); 10917c478bd9Sstevel@tonic-gate /* Dequeue the eager and attach it to the socket. */ 10927c478bd9Sstevel@tonic-gate eager = listener->tcp_eager_next_q; 10937c478bd9Sstevel@tonic-gate listener->tcp_eager_next_q = eager->tcp_eager_next_q; 10947c478bd9Sstevel@tonic-gate if (listener->tcp_eager_last_q == eager) 10957c478bd9Sstevel@tonic-gate listener->tcp_eager_last_q = NULL; 10967c478bd9Sstevel@tonic-gate eager->tcp_eager_next_q = NULL; 10977c478bd9Sstevel@tonic-gate sockets[new_sock_id].pcb = eager; 10987c478bd9Sstevel@tonic-gate listener->tcp_conn_req_cnt_q--; 10997c478bd9Sstevel@tonic-gate 11007c478bd9Sstevel@tonic-gate /* Copy in the address info. */ 11017c478bd9Sstevel@tonic-gate bcopy(&eager->tcp_remote, &new_addr->sin_addr.s_addr, 11027c478bd9Sstevel@tonic-gate sizeof (in_addr_t)); 11037c478bd9Sstevel@tonic-gate bcopy(&eager->tcp_fport, &new_addr->sin_port, sizeof (in_port_t)); 11047c478bd9Sstevel@tonic-gate new_addr->sin_family = AF_INET; 11057c478bd9Sstevel@tonic-gate 11067c478bd9Sstevel@tonic-gate #ifdef DEBUG 11077c478bd9Sstevel@tonic-gate printf("tcp_accept(), new sock_id: %d\n", sd); 11087c478bd9Sstevel@tonic-gate #endif 11097c478bd9Sstevel@tonic-gate return (sd); 11107c478bd9Sstevel@tonic-gate } 11117c478bd9Sstevel@tonic-gate 11127c478bd9Sstevel@tonic-gate /* Update the next anonymous port to use. */ 11137c478bd9Sstevel@tonic-gate static in_port_t 11147c478bd9Sstevel@tonic-gate tcp_update_next_port(in_port_t port) 11157c478bd9Sstevel@tonic-gate { 11167c478bd9Sstevel@tonic-gate /* Don't allow the port to fall out of the anonymous port range. */ 11177c478bd9Sstevel@tonic-gate if (port < tcp_smallest_anon_port || port > tcp_largest_anon_port) 11187c478bd9Sstevel@tonic-gate port = (in_port_t)tcp_smallest_anon_port; 11197c478bd9Sstevel@tonic-gate 11207c478bd9Sstevel@tonic-gate if (port < tcp_smallest_nonpriv_port) 11217c478bd9Sstevel@tonic-gate port = (in_port_t)tcp_smallest_nonpriv_port; 11227c478bd9Sstevel@tonic-gate return (port); 11237c478bd9Sstevel@tonic-gate } 11247c478bd9Sstevel@tonic-gate 11257c478bd9Sstevel@tonic-gate /* To check whether a bind to a port is allowed. */ 11267c478bd9Sstevel@tonic-gate static in_port_t 11277c478bd9Sstevel@tonic-gate tcp_bindi(in_port_t port, in_addr_t *addr, boolean_t reuseaddr, 11287c478bd9Sstevel@tonic-gate boolean_t bind_to_req_port_only) 11297c478bd9Sstevel@tonic-gate { 11307c478bd9Sstevel@tonic-gate int i, count; 11317c478bd9Sstevel@tonic-gate tcp_t *tcp; 11327c478bd9Sstevel@tonic-gate 11337c478bd9Sstevel@tonic-gate count = tcp_largest_anon_port - tcp_smallest_anon_port; 11347c478bd9Sstevel@tonic-gate try_again: 11357c478bd9Sstevel@tonic-gate for (i = 0; i < MAXSOCKET; i++) { 11367c478bd9Sstevel@tonic-gate if (sockets[i].type != INETBOOT_STREAM || 11377c478bd9Sstevel@tonic-gate ((tcp = (tcp_t *)sockets[i].pcb) == NULL) || 11387c478bd9Sstevel@tonic-gate ntohs(tcp->tcp_lport) != port) { 11397c478bd9Sstevel@tonic-gate continue; 11407c478bd9Sstevel@tonic-gate } 11417c478bd9Sstevel@tonic-gate /* 11427c478bd9Sstevel@tonic-gate * Both TCPs have the same port. If SO_REUSEDADDR is 11437c478bd9Sstevel@tonic-gate * set and the bound TCP has a state greater than 11447c478bd9Sstevel@tonic-gate * TCPS_LISTEN, it is fine. 11457c478bd9Sstevel@tonic-gate */ 11467c478bd9Sstevel@tonic-gate if (reuseaddr && tcp->tcp_state > TCPS_LISTEN) { 11477c478bd9Sstevel@tonic-gate continue; 11487c478bd9Sstevel@tonic-gate } 11497c478bd9Sstevel@tonic-gate if (tcp->tcp_bound_source != INADDR_ANY && 11507c478bd9Sstevel@tonic-gate *addr != INADDR_ANY && 11517c478bd9Sstevel@tonic-gate tcp->tcp_bound_source != *addr) { 11527c478bd9Sstevel@tonic-gate continue; 11537c478bd9Sstevel@tonic-gate } 11547c478bd9Sstevel@tonic-gate if (bind_to_req_port_only) { 11557c478bd9Sstevel@tonic-gate return (0); 11567c478bd9Sstevel@tonic-gate } 11577c478bd9Sstevel@tonic-gate if (--count > 0) { 11587c478bd9Sstevel@tonic-gate port = tcp_update_next_port(++port); 11597c478bd9Sstevel@tonic-gate goto try_again; 11607c478bd9Sstevel@tonic-gate } else { 11617c478bd9Sstevel@tonic-gate return (0); 11627c478bd9Sstevel@tonic-gate } 11637c478bd9Sstevel@tonic-gate } 11647c478bd9Sstevel@tonic-gate return (port); 11657c478bd9Sstevel@tonic-gate } 11667c478bd9Sstevel@tonic-gate 11677c478bd9Sstevel@tonic-gate /* To handle the bind request. */ 11687c478bd9Sstevel@tonic-gate int 11697c478bd9Sstevel@tonic-gate tcp_bind(int sock_id) 11707c478bd9Sstevel@tonic-gate { 11717c478bd9Sstevel@tonic-gate tcp_t *tcp; 11727c478bd9Sstevel@tonic-gate in_port_t requested_port, allocated_port; 11737c478bd9Sstevel@tonic-gate boolean_t bind_to_req_port_only; 11747c478bd9Sstevel@tonic-gate boolean_t reuseaddr; 11757c478bd9Sstevel@tonic-gate 11767c478bd9Sstevel@tonic-gate if ((tcp = (tcp_t *)sockets[sock_id].pcb) == NULL) { 11777c478bd9Sstevel@tonic-gate errno = EINVAL; 11787c478bd9Sstevel@tonic-gate return (-1); 11797c478bd9Sstevel@tonic-gate } 11807c478bd9Sstevel@tonic-gate 11817c478bd9Sstevel@tonic-gate if (tcp->tcp_state >= TCPS_BOUND) { 11827c478bd9Sstevel@tonic-gate /* We don't allow multiple bind(). */ 11837c478bd9Sstevel@tonic-gate errno = EPROTO; 11847c478bd9Sstevel@tonic-gate return (-1); 11857c478bd9Sstevel@tonic-gate } 11867c478bd9Sstevel@tonic-gate 11877c478bd9Sstevel@tonic-gate requested_port = ntohs(sockets[sock_id].bind.sin_port); 11887c478bd9Sstevel@tonic-gate 11897c478bd9Sstevel@tonic-gate /* The bound source can be INADDR_ANY. */ 11907c478bd9Sstevel@tonic-gate tcp->tcp_bound_source = sockets[sock_id].bind.sin_addr.s_addr; 11917c478bd9Sstevel@tonic-gate 11927c478bd9Sstevel@tonic-gate tcp->tcp_ipha->ip_src.s_addr = tcp->tcp_bound_source; 11937c478bd9Sstevel@tonic-gate 11947c478bd9Sstevel@tonic-gate /* Verify the port is available. */ 11957c478bd9Sstevel@tonic-gate if (requested_port == 0) 11967c478bd9Sstevel@tonic-gate bind_to_req_port_only = B_FALSE; 11977c478bd9Sstevel@tonic-gate else /* T_BIND_REQ and requested_port != 0 */ 11987c478bd9Sstevel@tonic-gate bind_to_req_port_only = B_TRUE; 11997c478bd9Sstevel@tonic-gate 12007c478bd9Sstevel@tonic-gate if (requested_port == 0) { 12017c478bd9Sstevel@tonic-gate requested_port = tcp_update_next_port(++tcp_next_port_to_try); 12027c478bd9Sstevel@tonic-gate } 12037c478bd9Sstevel@tonic-gate reuseaddr = sockets[sock_id].so_opt & SO_REUSEADDR; 12047c478bd9Sstevel@tonic-gate allocated_port = tcp_bindi(requested_port, &(tcp->tcp_bound_source), 12057c478bd9Sstevel@tonic-gate reuseaddr, bind_to_req_port_only); 12067c478bd9Sstevel@tonic-gate 12077c478bd9Sstevel@tonic-gate if (allocated_port == 0) { 12087c478bd9Sstevel@tonic-gate errno = EADDRINUSE; 12097c478bd9Sstevel@tonic-gate return (-1); 12107c478bd9Sstevel@tonic-gate } 12117c478bd9Sstevel@tonic-gate tcp->tcp_lport = htons(allocated_port); 12127c478bd9Sstevel@tonic-gate *(uint16_t *)tcp->tcp_tcph->th_lport = tcp->tcp_lport; 12137c478bd9Sstevel@tonic-gate sockets[sock_id].bind.sin_port = tcp->tcp_lport; 12147c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_BOUND; 12157c478bd9Sstevel@tonic-gate return (0); 12167c478bd9Sstevel@tonic-gate } 12177c478bd9Sstevel@tonic-gate 12187c478bd9Sstevel@tonic-gate /* 12197c478bd9Sstevel@tonic-gate * Check for duplicate TCP connections. 12207c478bd9Sstevel@tonic-gate */ 12217c478bd9Sstevel@tonic-gate static int 12227c478bd9Sstevel@tonic-gate tcp_conn_check(tcp_t *tcp) 12237c478bd9Sstevel@tonic-gate { 12247c478bd9Sstevel@tonic-gate int i; 12257c478bd9Sstevel@tonic-gate tcp_t *tmp_tcp; 12267c478bd9Sstevel@tonic-gate 12277c478bd9Sstevel@tonic-gate for (i = 0; i < MAXSOCKET; i++) { 12287c478bd9Sstevel@tonic-gate if (sockets[i].type != INETBOOT_STREAM) 12297c478bd9Sstevel@tonic-gate continue; 12307c478bd9Sstevel@tonic-gate /* Socket may not be closed but the TCP can be gone. */ 12317c478bd9Sstevel@tonic-gate if ((tmp_tcp = (tcp_t *)sockets[i].pcb) == NULL) 12327c478bd9Sstevel@tonic-gate continue; 12337c478bd9Sstevel@tonic-gate /* We only care about TCP in states later than SYN_SENT. */ 12347c478bd9Sstevel@tonic-gate if (tmp_tcp->tcp_state < TCPS_SYN_SENT) 12357c478bd9Sstevel@tonic-gate continue; 12367c478bd9Sstevel@tonic-gate if (tmp_tcp->tcp_lport != tcp->tcp_lport || 12377c478bd9Sstevel@tonic-gate tmp_tcp->tcp_fport != tcp->tcp_fport || 12387c478bd9Sstevel@tonic-gate tmp_tcp->tcp_bound_source != tcp->tcp_bound_source || 12397c478bd9Sstevel@tonic-gate tmp_tcp->tcp_remote != tcp->tcp_remote) { 12407c478bd9Sstevel@tonic-gate continue; 12417c478bd9Sstevel@tonic-gate } else { 12427c478bd9Sstevel@tonic-gate return (-1); 12437c478bd9Sstevel@tonic-gate } 12447c478bd9Sstevel@tonic-gate } 12457c478bd9Sstevel@tonic-gate return (0); 12467c478bd9Sstevel@tonic-gate } 12477c478bd9Sstevel@tonic-gate 12487c478bd9Sstevel@tonic-gate /* To handle a connect request. */ 12497c478bd9Sstevel@tonic-gate int 12507c478bd9Sstevel@tonic-gate tcp_connect(int sock_id) 12517c478bd9Sstevel@tonic-gate { 12527c478bd9Sstevel@tonic-gate tcp_t *tcp; 12537c478bd9Sstevel@tonic-gate in_addr_t dstaddr; 12547c478bd9Sstevel@tonic-gate in_port_t dstport; 12557c478bd9Sstevel@tonic-gate tcph_t *tcph; 12567c478bd9Sstevel@tonic-gate int mss; 12577c478bd9Sstevel@tonic-gate mblk_t *syn_mp; 12587c478bd9Sstevel@tonic-gate 12597c478bd9Sstevel@tonic-gate if ((tcp = (tcp_t *)(sockets[sock_id].pcb)) == NULL) { 12607c478bd9Sstevel@tonic-gate errno = EINVAL; 12617c478bd9Sstevel@tonic-gate return (-1); 12627c478bd9Sstevel@tonic-gate } 12637c478bd9Sstevel@tonic-gate 12647c478bd9Sstevel@tonic-gate TCP_RUN_TIME_WAIT_COLLECTOR(); 12657c478bd9Sstevel@tonic-gate 12667c478bd9Sstevel@tonic-gate dstaddr = sockets[sock_id].remote.sin_addr.s_addr; 12677c478bd9Sstevel@tonic-gate dstport = sockets[sock_id].remote.sin_port; 12687c478bd9Sstevel@tonic-gate 12697c478bd9Sstevel@tonic-gate /* 12707c478bd9Sstevel@tonic-gate * Check for attempt to connect to INADDR_ANY or non-unicast addrress. 12717c478bd9Sstevel@tonic-gate * We don't have enough info to check for broadcast addr, except 12727c478bd9Sstevel@tonic-gate * for the all 1 broadcast. 12737c478bd9Sstevel@tonic-gate */ 12747c478bd9Sstevel@tonic-gate if (dstaddr == INADDR_ANY || IN_CLASSD(ntohl(dstaddr)) || 12757c478bd9Sstevel@tonic-gate dstaddr == INADDR_BROADCAST) { 12767c478bd9Sstevel@tonic-gate /* 12777c478bd9Sstevel@tonic-gate * SunOS 4.x and 4.3 BSD allow an application 12787c478bd9Sstevel@tonic-gate * to connect a TCP socket to INADDR_ANY. 12797c478bd9Sstevel@tonic-gate * When they do this, the kernel picks the 12807c478bd9Sstevel@tonic-gate * address of one interface and uses it 12817c478bd9Sstevel@tonic-gate * instead. The kernel usually ends up 12827c478bd9Sstevel@tonic-gate * picking the address of the loopback 12837c478bd9Sstevel@tonic-gate * interface. This is an undocumented feature. 12847c478bd9Sstevel@tonic-gate * However, we provide the same thing here 12857c478bd9Sstevel@tonic-gate * in order to have source and binary 12867c478bd9Sstevel@tonic-gate * compatibility with SunOS 4.x. 12877c478bd9Sstevel@tonic-gate * Update the T_CONN_REQ (sin/sin6) since it is used to 12887c478bd9Sstevel@tonic-gate * generate the T_CONN_CON. 12897c478bd9Sstevel@tonic-gate * 12907c478bd9Sstevel@tonic-gate * Fail this for inetboot TCP. 12917c478bd9Sstevel@tonic-gate */ 12927c478bd9Sstevel@tonic-gate errno = EINVAL; 12937c478bd9Sstevel@tonic-gate return (-1); 12947c478bd9Sstevel@tonic-gate } 12957c478bd9Sstevel@tonic-gate 12967c478bd9Sstevel@tonic-gate /* It is not bound to any address yet... */ 12977c478bd9Sstevel@tonic-gate if (tcp->tcp_bound_source == INADDR_ANY) { 12987c478bd9Sstevel@tonic-gate ipv4_getipaddr(&(sockets[sock_id].bind.sin_addr)); 12997c478bd9Sstevel@tonic-gate /* We don't have an address! */ 13007c478bd9Sstevel@tonic-gate if (ntohl(sockets[sock_id].bind.sin_addr.s_addr) == 13017c478bd9Sstevel@tonic-gate INADDR_ANY) { 13027c478bd9Sstevel@tonic-gate errno = EPROTO; 13037c478bd9Sstevel@tonic-gate return (-1); 13047c478bd9Sstevel@tonic-gate } 13057c478bd9Sstevel@tonic-gate tcp->tcp_bound_source = sockets[sock_id].bind.sin_addr.s_addr; 13067c478bd9Sstevel@tonic-gate tcp->tcp_ipha->ip_src.s_addr = tcp->tcp_bound_source; 13077c478bd9Sstevel@tonic-gate } 13087c478bd9Sstevel@tonic-gate 13097c478bd9Sstevel@tonic-gate /* 13107c478bd9Sstevel@tonic-gate * Don't let an endpoint connect to itself. 13117c478bd9Sstevel@tonic-gate */ 13127c478bd9Sstevel@tonic-gate if (dstaddr == tcp->tcp_ipha->ip_src.s_addr && 13137c478bd9Sstevel@tonic-gate dstport == tcp->tcp_lport) { 13147c478bd9Sstevel@tonic-gate errno = EINVAL; 13157c478bd9Sstevel@tonic-gate return (-1); 13167c478bd9Sstevel@tonic-gate } 13177c478bd9Sstevel@tonic-gate 13187c478bd9Sstevel@tonic-gate tcp->tcp_ipha->ip_dst.s_addr = dstaddr; 13197c478bd9Sstevel@tonic-gate tcp->tcp_remote = dstaddr; 13207c478bd9Sstevel@tonic-gate tcph = tcp->tcp_tcph; 13217c478bd9Sstevel@tonic-gate *(uint16_t *)tcph->th_fport = dstport; 13227c478bd9Sstevel@tonic-gate tcp->tcp_fport = dstport; 13237c478bd9Sstevel@tonic-gate 13247c478bd9Sstevel@tonic-gate /* 13257c478bd9Sstevel@tonic-gate * Don't allow this connection to completely duplicate 13267c478bd9Sstevel@tonic-gate * an existing connection. 13277c478bd9Sstevel@tonic-gate */ 13287c478bd9Sstevel@tonic-gate if (tcp_conn_check(tcp) < 0) { 13297c478bd9Sstevel@tonic-gate errno = EADDRINUSE; 13307c478bd9Sstevel@tonic-gate return (-1); 13317c478bd9Sstevel@tonic-gate } 13327c478bd9Sstevel@tonic-gate 13337c478bd9Sstevel@tonic-gate /* 13347c478bd9Sstevel@tonic-gate * Just make sure our rwnd is at 13357c478bd9Sstevel@tonic-gate * least tcp_recv_hiwat_mss * MSS 13367c478bd9Sstevel@tonic-gate * large, and round up to the nearest 13377c478bd9Sstevel@tonic-gate * MSS. 13387c478bd9Sstevel@tonic-gate * 13397c478bd9Sstevel@tonic-gate * We do the round up here because 13407c478bd9Sstevel@tonic-gate * we need to get the interface 13417c478bd9Sstevel@tonic-gate * MTU first before we can do the 13427c478bd9Sstevel@tonic-gate * round up. 13437c478bd9Sstevel@tonic-gate */ 13447c478bd9Sstevel@tonic-gate mss = tcp->tcp_mss - tcp->tcp_hdr_len; 13457c478bd9Sstevel@tonic-gate tcp->tcp_rwnd = MAX(MSS_ROUNDUP(tcp->tcp_rwnd, mss), 13467c478bd9Sstevel@tonic-gate tcp_recv_hiwat_minmss * mss); 13477c478bd9Sstevel@tonic-gate tcp->tcp_rwnd_max = tcp->tcp_rwnd; 13487c478bd9Sstevel@tonic-gate SET_WS_VALUE(tcp); 13497c478bd9Sstevel@tonic-gate U32_TO_ABE16((tcp->tcp_rwnd >> tcp->tcp_rcv_ws), 13507c478bd9Sstevel@tonic-gate tcp->tcp_tcph->th_win); 13517c478bd9Sstevel@tonic-gate if (tcp->tcp_rcv_ws > 0 || tcp_wscale_always) 13527c478bd9Sstevel@tonic-gate tcp->tcp_snd_ws_ok = B_TRUE; 13537c478bd9Sstevel@tonic-gate 13547c478bd9Sstevel@tonic-gate /* 13557c478bd9Sstevel@tonic-gate * Set tcp_snd_ts_ok to true 13567c478bd9Sstevel@tonic-gate * so that tcp_xmit_mp will 13577c478bd9Sstevel@tonic-gate * include the timestamp 13587c478bd9Sstevel@tonic-gate * option in the SYN segment. 13597c478bd9Sstevel@tonic-gate */ 13607c478bd9Sstevel@tonic-gate if (tcp_tstamp_always || 13617c478bd9Sstevel@tonic-gate (tcp->tcp_rcv_ws && tcp_tstamp_if_wscale)) { 13627c478bd9Sstevel@tonic-gate tcp->tcp_snd_ts_ok = B_TRUE; 13637c478bd9Sstevel@tonic-gate } 13647c478bd9Sstevel@tonic-gate 13657c478bd9Sstevel@tonic-gate if (tcp_sack_permitted == 2 || 13667c478bd9Sstevel@tonic-gate tcp->tcp_snd_sack_ok) { 13677c478bd9Sstevel@tonic-gate assert(tcp->tcp_sack_info == NULL); 13687c478bd9Sstevel@tonic-gate if ((tcp->tcp_sack_info = (tcp_sack_info_t *)bkmem_zalloc( 13697c478bd9Sstevel@tonic-gate sizeof (tcp_sack_info_t))) == NULL) { 13707c478bd9Sstevel@tonic-gate tcp->tcp_snd_sack_ok = B_FALSE; 13717c478bd9Sstevel@tonic-gate } else { 13727c478bd9Sstevel@tonic-gate tcp->tcp_snd_sack_ok = B_TRUE; 13737c478bd9Sstevel@tonic-gate } 13747c478bd9Sstevel@tonic-gate } 13757c478bd9Sstevel@tonic-gate /* 13767c478bd9Sstevel@tonic-gate * Should we use ECN? Note that the current 13777c478bd9Sstevel@tonic-gate * default value (SunOS 5.9) of tcp_ecn_permitted 13787c478bd9Sstevel@tonic-gate * is 2. The reason for doing this is that there 13797c478bd9Sstevel@tonic-gate * are equipments out there that will drop ECN 13807c478bd9Sstevel@tonic-gate * enabled IP packets. Setting it to 1 avoids 13817c478bd9Sstevel@tonic-gate * compatibility problems. 13827c478bd9Sstevel@tonic-gate */ 13837c478bd9Sstevel@tonic-gate if (tcp_ecn_permitted == 2) 13847c478bd9Sstevel@tonic-gate tcp->tcp_ecn_ok = B_TRUE; 13857c478bd9Sstevel@tonic-gate 13867c478bd9Sstevel@tonic-gate tcp_iss_init(tcp); 13877c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 13887c478bd9Sstevel@tonic-gate tcp->tcp_active_open = B_TRUE; 13897c478bd9Sstevel@tonic-gate 13907c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_SYN_SENT; 13917c478bd9Sstevel@tonic-gate syn_mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, tcp->tcp_iss, B_FALSE, 13927c478bd9Sstevel@tonic-gate NULL, B_FALSE); 13937c478bd9Sstevel@tonic-gate if (syn_mp != NULL) { 13947c478bd9Sstevel@tonic-gate int ret; 13957c478bd9Sstevel@tonic-gate 13967c478bd9Sstevel@tonic-gate /* Dump the packet when debugging. */ 13977c478bd9Sstevel@tonic-gate TCP_DUMP_PACKET("tcp_connect", syn_mp); 13987c478bd9Sstevel@tonic-gate /* Send out the SYN packet. */ 13997c478bd9Sstevel@tonic-gate ret = ipv4_tcp_output(sock_id, syn_mp); 14007c478bd9Sstevel@tonic-gate freeb(syn_mp); 14017c478bd9Sstevel@tonic-gate if (ret < 0) { 14027c478bd9Sstevel@tonic-gate return (-1); 14037c478bd9Sstevel@tonic-gate } 14047c478bd9Sstevel@tonic-gate /* tcp_state_wait() will finish the 3 way handshake. */ 14057c478bd9Sstevel@tonic-gate return (tcp_state_wait(sock_id, tcp, TCPS_ESTABLISHED)); 14067c478bd9Sstevel@tonic-gate } else { 14077c478bd9Sstevel@tonic-gate errno = ENOBUFS; 14087c478bd9Sstevel@tonic-gate return (-1); 14097c478bd9Sstevel@tonic-gate } 14107c478bd9Sstevel@tonic-gate } 14117c478bd9Sstevel@tonic-gate 14127c478bd9Sstevel@tonic-gate /* 14137c478bd9Sstevel@tonic-gate * Common accept code. Called by tcp_conn_request. 14147c478bd9Sstevel@tonic-gate * cr_pkt is the SYN packet. 14157c478bd9Sstevel@tonic-gate */ 14167c478bd9Sstevel@tonic-gate static int 14177c478bd9Sstevel@tonic-gate tcp_accept_comm(tcp_t *listener, tcp_t *acceptor, mblk_t *cr_pkt, 14187c478bd9Sstevel@tonic-gate uint_t ip_hdr_len) 14197c478bd9Sstevel@tonic-gate { 14207c478bd9Sstevel@tonic-gate tcph_t *tcph; 14217c478bd9Sstevel@tonic-gate 14227c478bd9Sstevel@tonic-gate #ifdef DEBUG 14237c478bd9Sstevel@tonic-gate printf("tcp_accept_comm #######################\n"); 14247c478bd9Sstevel@tonic-gate #endif 14257c478bd9Sstevel@tonic-gate 14267c478bd9Sstevel@tonic-gate /* 14277c478bd9Sstevel@tonic-gate * When we get here, we know that the acceptor header template 14287c478bd9Sstevel@tonic-gate * has already been initialized. 14297c478bd9Sstevel@tonic-gate * However, it may not match the listener if the listener 14307c478bd9Sstevel@tonic-gate * includes options... 14317c478bd9Sstevel@tonic-gate * It may also not match the listener if the listener is v6 and 14327c478bd9Sstevel@tonic-gate * and the acceptor is v4 14337c478bd9Sstevel@tonic-gate */ 14347c478bd9Sstevel@tonic-gate acceptor->tcp_lport = listener->tcp_lport; 14357c478bd9Sstevel@tonic-gate 14367c478bd9Sstevel@tonic-gate if (listener->tcp_ipversion == acceptor->tcp_ipversion) { 14377c478bd9Sstevel@tonic-gate if (acceptor->tcp_iphc_len != listener->tcp_iphc_len) { 14387c478bd9Sstevel@tonic-gate /* 14397c478bd9Sstevel@tonic-gate * Listener had options of some sort; acceptor inherits. 14407c478bd9Sstevel@tonic-gate * Free up the acceptor template and allocate one 14417c478bd9Sstevel@tonic-gate * of the right size. 14427c478bd9Sstevel@tonic-gate */ 14437c478bd9Sstevel@tonic-gate bkmem_free(acceptor->tcp_iphc, acceptor->tcp_iphc_len); 14447c478bd9Sstevel@tonic-gate acceptor->tcp_iphc = bkmem_zalloc( 14457c478bd9Sstevel@tonic-gate listener->tcp_iphc_len); 14467c478bd9Sstevel@tonic-gate if (acceptor->tcp_iphc == NULL) { 14477c478bd9Sstevel@tonic-gate acceptor->tcp_iphc_len = 0; 14487c478bd9Sstevel@tonic-gate return (ENOMEM); 14497c478bd9Sstevel@tonic-gate } 14507c478bd9Sstevel@tonic-gate acceptor->tcp_iphc_len = listener->tcp_iphc_len; 14517c478bd9Sstevel@tonic-gate } 14527c478bd9Sstevel@tonic-gate acceptor->tcp_hdr_len = listener->tcp_hdr_len; 14537c478bd9Sstevel@tonic-gate acceptor->tcp_ip_hdr_len = listener->tcp_ip_hdr_len; 14547c478bd9Sstevel@tonic-gate acceptor->tcp_tcp_hdr_len = listener->tcp_tcp_hdr_len; 14557c478bd9Sstevel@tonic-gate 14567c478bd9Sstevel@tonic-gate /* 14577c478bd9Sstevel@tonic-gate * Copy the IP+TCP header template from listener to acceptor 14587c478bd9Sstevel@tonic-gate */ 14597c478bd9Sstevel@tonic-gate bcopy(listener->tcp_iphc, acceptor->tcp_iphc, 14607c478bd9Sstevel@tonic-gate listener->tcp_hdr_len); 14617c478bd9Sstevel@tonic-gate acceptor->tcp_ipha = (struct ip *)acceptor->tcp_iphc; 14627c478bd9Sstevel@tonic-gate acceptor->tcp_tcph = (tcph_t *)(acceptor->tcp_iphc + 14637c478bd9Sstevel@tonic-gate acceptor->tcp_ip_hdr_len); 14647c478bd9Sstevel@tonic-gate } else { 14657c478bd9Sstevel@tonic-gate prom_panic("tcp_accept_comm: version not equal"); 14667c478bd9Sstevel@tonic-gate } 14677c478bd9Sstevel@tonic-gate 14687c478bd9Sstevel@tonic-gate /* Copy our new dest and fport from the connection request packet */ 14697c478bd9Sstevel@tonic-gate if (acceptor->tcp_ipversion == IPV4_VERSION) { 14707c478bd9Sstevel@tonic-gate struct ip *ipha; 14717c478bd9Sstevel@tonic-gate 14727c478bd9Sstevel@tonic-gate ipha = (struct ip *)cr_pkt->b_rptr; 14737c478bd9Sstevel@tonic-gate acceptor->tcp_ipha->ip_dst = ipha->ip_src; 14747c478bd9Sstevel@tonic-gate acceptor->tcp_remote = ipha->ip_src.s_addr; 14757c478bd9Sstevel@tonic-gate acceptor->tcp_ipha->ip_src = ipha->ip_dst; 14767c478bd9Sstevel@tonic-gate acceptor->tcp_bound_source = ipha->ip_dst.s_addr; 14777c478bd9Sstevel@tonic-gate tcph = (tcph_t *)&cr_pkt->b_rptr[ip_hdr_len]; 14787c478bd9Sstevel@tonic-gate } else { 14797c478bd9Sstevel@tonic-gate prom_panic("tcp_accept_comm: not IPv4"); 14807c478bd9Sstevel@tonic-gate } 14817c478bd9Sstevel@tonic-gate bcopy(tcph->th_lport, acceptor->tcp_tcph->th_fport, sizeof (in_port_t)); 14827c478bd9Sstevel@tonic-gate bcopy(acceptor->tcp_tcph->th_fport, &acceptor->tcp_fport, 14837c478bd9Sstevel@tonic-gate sizeof (in_port_t)); 14847c478bd9Sstevel@tonic-gate /* 14857c478bd9Sstevel@tonic-gate * For an all-port proxy listener, the local port is determined by 14867c478bd9Sstevel@tonic-gate * the port number field in the SYN packet. 14877c478bd9Sstevel@tonic-gate */ 14887c478bd9Sstevel@tonic-gate if (listener->tcp_lport == 0) { 14897c478bd9Sstevel@tonic-gate acceptor->tcp_lport = *(in_port_t *)tcph->th_fport; 14907c478bd9Sstevel@tonic-gate bcopy(tcph->th_fport, acceptor->tcp_tcph->th_lport, 14917c478bd9Sstevel@tonic-gate sizeof (in_port_t)); 14927c478bd9Sstevel@tonic-gate } 14937c478bd9Sstevel@tonic-gate /* Inherit various TCP parameters from the listener */ 14947c478bd9Sstevel@tonic-gate acceptor->tcp_naglim = listener->tcp_naglim; 14957c478bd9Sstevel@tonic-gate acceptor->tcp_first_timer_threshold = 14967c478bd9Sstevel@tonic-gate listener->tcp_first_timer_threshold; 14977c478bd9Sstevel@tonic-gate acceptor->tcp_second_timer_threshold = 14987c478bd9Sstevel@tonic-gate listener->tcp_second_timer_threshold; 14997c478bd9Sstevel@tonic-gate 15007c478bd9Sstevel@tonic-gate acceptor->tcp_first_ctimer_threshold = 15017c478bd9Sstevel@tonic-gate listener->tcp_first_ctimer_threshold; 15027c478bd9Sstevel@tonic-gate acceptor->tcp_second_ctimer_threshold = 15037c478bd9Sstevel@tonic-gate listener->tcp_second_ctimer_threshold; 15047c478bd9Sstevel@tonic-gate 15057c478bd9Sstevel@tonic-gate acceptor->tcp_xmit_hiwater = listener->tcp_xmit_hiwater; 15067c478bd9Sstevel@tonic-gate 15077c478bd9Sstevel@tonic-gate acceptor->tcp_state = TCPS_LISTEN; 15087c478bd9Sstevel@tonic-gate tcp_iss_init(acceptor); 15097c478bd9Sstevel@tonic-gate 15107c478bd9Sstevel@tonic-gate /* Process all TCP options. */ 15117c478bd9Sstevel@tonic-gate tcp_process_options(acceptor, tcph); 15127c478bd9Sstevel@tonic-gate 15137c478bd9Sstevel@tonic-gate /* Is the other end ECN capable? */ 15147c478bd9Sstevel@tonic-gate if (tcp_ecn_permitted >= 1 && 15157c478bd9Sstevel@tonic-gate (tcph->th_flags[0] & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) { 15167c478bd9Sstevel@tonic-gate acceptor->tcp_ecn_ok = B_TRUE; 15177c478bd9Sstevel@tonic-gate } 15187c478bd9Sstevel@tonic-gate 15197c478bd9Sstevel@tonic-gate /* 15207c478bd9Sstevel@tonic-gate * listener->tcp_rq->q_hiwat should be the default window size or a 15217c478bd9Sstevel@tonic-gate * window size changed via SO_RCVBUF option. First round up the 15227c478bd9Sstevel@tonic-gate * acceptor's tcp_rwnd to the nearest MSS. Then find out the window 15237c478bd9Sstevel@tonic-gate * scale option value if needed. Call tcp_rwnd_set() to finish the 15247c478bd9Sstevel@tonic-gate * setting. 15257c478bd9Sstevel@tonic-gate * 15267c478bd9Sstevel@tonic-gate * Note if there is a rpipe metric associated with the remote host, 15277c478bd9Sstevel@tonic-gate * we should not inherit receive window size from listener. 15287c478bd9Sstevel@tonic-gate */ 15297c478bd9Sstevel@tonic-gate acceptor->tcp_rwnd = MSS_ROUNDUP( 15307c478bd9Sstevel@tonic-gate (acceptor->tcp_rwnd == 0 ? listener->tcp_rwnd_max : 15317c478bd9Sstevel@tonic-gate acceptor->tcp_rwnd), acceptor->tcp_mss); 15327c478bd9Sstevel@tonic-gate if (acceptor->tcp_snd_ws_ok) 15337c478bd9Sstevel@tonic-gate SET_WS_VALUE(acceptor); 15347c478bd9Sstevel@tonic-gate /* 15357c478bd9Sstevel@tonic-gate * Note that this is the only place tcp_rwnd_set() is called for 15367c478bd9Sstevel@tonic-gate * accepting a connection. We need to call it here instead of 15377c478bd9Sstevel@tonic-gate * after the 3-way handshake because we need to tell the other 15387c478bd9Sstevel@tonic-gate * side our rwnd in the SYN-ACK segment. 15397c478bd9Sstevel@tonic-gate */ 15407c478bd9Sstevel@tonic-gate (void) tcp_rwnd_set(acceptor, acceptor->tcp_rwnd); 15417c478bd9Sstevel@tonic-gate 15427c478bd9Sstevel@tonic-gate return (0); 15437c478bd9Sstevel@tonic-gate } 15447c478bd9Sstevel@tonic-gate 15457c478bd9Sstevel@tonic-gate /* 15467c478bd9Sstevel@tonic-gate * Defense for the SYN attack - 15477c478bd9Sstevel@tonic-gate * 1. When q0 is full, drop from the tail (tcp_eager_prev_q0) the oldest 15487c478bd9Sstevel@tonic-gate * one that doesn't have the dontdrop bit set. 15497c478bd9Sstevel@tonic-gate * 2. Don't drop a SYN request before its first timeout. This gives every 15507c478bd9Sstevel@tonic-gate * request at least til the first timeout to complete its 3-way handshake. 15517c478bd9Sstevel@tonic-gate * 3. The current threshold is - # of timeout > q0len/4 => SYN alert on 15527c478bd9Sstevel@tonic-gate * # of timeout drops back to <= q0len/32 => SYN alert off 15537c478bd9Sstevel@tonic-gate */ 15547c478bd9Sstevel@tonic-gate static boolean_t 15557c478bd9Sstevel@tonic-gate tcp_drop_q0(tcp_t *tcp) 15567c478bd9Sstevel@tonic-gate { 15577c478bd9Sstevel@tonic-gate tcp_t *eager; 15587c478bd9Sstevel@tonic-gate 15597c478bd9Sstevel@tonic-gate assert(tcp->tcp_eager_next_q0 != tcp->tcp_eager_prev_q0); 15607c478bd9Sstevel@tonic-gate /* 15617c478bd9Sstevel@tonic-gate * New one is added after next_q0 so prev_q0 points to the oldest 15627c478bd9Sstevel@tonic-gate * Also do not drop any established connections that are deferred on 15637c478bd9Sstevel@tonic-gate * q0 due to q being full 15647c478bd9Sstevel@tonic-gate */ 15657c478bd9Sstevel@tonic-gate 15667c478bd9Sstevel@tonic-gate eager = tcp->tcp_eager_prev_q0; 15677c478bd9Sstevel@tonic-gate while (eager->tcp_dontdrop || eager->tcp_conn_def_q0) { 15687c478bd9Sstevel@tonic-gate /* XXX should move the eager to the head */ 15697c478bd9Sstevel@tonic-gate eager = eager->tcp_eager_prev_q0; 15707c478bd9Sstevel@tonic-gate if (eager == tcp) { 15717c478bd9Sstevel@tonic-gate eager = tcp->tcp_eager_prev_q0; 15727c478bd9Sstevel@tonic-gate break; 15737c478bd9Sstevel@tonic-gate } 15747c478bd9Sstevel@tonic-gate } 15757c478bd9Sstevel@tonic-gate dprintf("tcp_drop_q0: listen half-open queue (max=%d) overflow" 15767c478bd9Sstevel@tonic-gate " (%d pending) on %s, drop one", tcp_conn_req_max_q0, 15777c478bd9Sstevel@tonic-gate tcp->tcp_conn_req_cnt_q0, 15787c478bd9Sstevel@tonic-gate tcp_display(tcp, NULL, DISP_PORT_ONLY)); 15797c478bd9Sstevel@tonic-gate 15807c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpHalfOpenDrop); 15817c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)eager, sizeof (tcp_t)); 15827c478bd9Sstevel@tonic-gate return (B_TRUE); 15837c478bd9Sstevel@tonic-gate } 15847c478bd9Sstevel@tonic-gate 15857c478bd9Sstevel@tonic-gate /* ARGSUSED */ 15867c478bd9Sstevel@tonic-gate static tcp_t * 15877c478bd9Sstevel@tonic-gate tcp_conn_request(tcp_t *tcp, mblk_t *mp, uint_t sock_id, uint_t ip_hdr_len) 15887c478bd9Sstevel@tonic-gate { 15897c478bd9Sstevel@tonic-gate tcp_t *eager; 15907c478bd9Sstevel@tonic-gate struct ip *ipha; 15917c478bd9Sstevel@tonic-gate int err; 15927c478bd9Sstevel@tonic-gate 15937c478bd9Sstevel@tonic-gate #ifdef DEBUG 15947c478bd9Sstevel@tonic-gate printf("tcp_conn_request ###################\n"); 15957c478bd9Sstevel@tonic-gate #endif 15967c478bd9Sstevel@tonic-gate 15977c478bd9Sstevel@tonic-gate if (tcp->tcp_conn_req_cnt_q >= tcp->tcp_conn_req_max) { 15987c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpListenDrop); 15997c478bd9Sstevel@tonic-gate dprintf("tcp_conn_request: listen backlog (max=%d) " 16007c478bd9Sstevel@tonic-gate "overflow (%d pending) on %s", 16017c478bd9Sstevel@tonic-gate tcp->tcp_conn_req_max, tcp->tcp_conn_req_cnt_q, 16027c478bd9Sstevel@tonic-gate tcp_display(tcp, NULL, DISP_PORT_ONLY)); 16037c478bd9Sstevel@tonic-gate return (NULL); 16047c478bd9Sstevel@tonic-gate } 16057c478bd9Sstevel@tonic-gate 16067c478bd9Sstevel@tonic-gate assert(OK_32PTR(mp->b_rptr)); 16077c478bd9Sstevel@tonic-gate 16087c478bd9Sstevel@tonic-gate if (tcp->tcp_conn_req_cnt_q0 >= 16097c478bd9Sstevel@tonic-gate tcp->tcp_conn_req_max + tcp_conn_req_max_q0) { 16107c478bd9Sstevel@tonic-gate /* 16117c478bd9Sstevel@tonic-gate * Q0 is full. Drop a pending half-open req from the queue 16127c478bd9Sstevel@tonic-gate * to make room for the new SYN req. Also mark the time we 16137c478bd9Sstevel@tonic-gate * drop a SYN. 16147c478bd9Sstevel@tonic-gate */ 16157c478bd9Sstevel@tonic-gate tcp->tcp_last_rcv_lbolt = prom_gettime(); 16167c478bd9Sstevel@tonic-gate if (!tcp_drop_q0(tcp)) { 16177c478bd9Sstevel@tonic-gate freemsg(mp); 16187c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpListenDropQ0); 16197c478bd9Sstevel@tonic-gate dprintf("tcp_conn_request: listen half-open queue " 16207c478bd9Sstevel@tonic-gate "(max=%d) full (%d pending) on %s", 16217c478bd9Sstevel@tonic-gate tcp_conn_req_max_q0, 16227c478bd9Sstevel@tonic-gate tcp->tcp_conn_req_cnt_q0, 16237c478bd9Sstevel@tonic-gate tcp_display(tcp, NULL, DISP_PORT_ONLY)); 16247c478bd9Sstevel@tonic-gate return (NULL); 16257c478bd9Sstevel@tonic-gate } 16267c478bd9Sstevel@tonic-gate } 16277c478bd9Sstevel@tonic-gate 16287c478bd9Sstevel@tonic-gate ipha = (struct ip *)mp->b_rptr; 16297c478bd9Sstevel@tonic-gate if (IN_CLASSD(ntohl(ipha->ip_src.s_addr)) || 16307c478bd9Sstevel@tonic-gate ipha->ip_src.s_addr == INADDR_BROADCAST || 16317c478bd9Sstevel@tonic-gate ipha->ip_src.s_addr == INADDR_ANY || 16327c478bd9Sstevel@tonic-gate ipha->ip_dst.s_addr == INADDR_BROADCAST) { 16337c478bd9Sstevel@tonic-gate freemsg(mp); 16347c478bd9Sstevel@tonic-gate return (NULL); 16357c478bd9Sstevel@tonic-gate } 16367c478bd9Sstevel@tonic-gate /* 16377c478bd9Sstevel@tonic-gate * We allow the connection to proceed 16387c478bd9Sstevel@tonic-gate * by generating a detached tcp state vector and put it in 16397c478bd9Sstevel@tonic-gate * the eager queue. When an accept happens, it will be 16407c478bd9Sstevel@tonic-gate * dequeued sequentially. 16417c478bd9Sstevel@tonic-gate */ 16427c478bd9Sstevel@tonic-gate if ((eager = (tcp_t *)bkmem_alloc(sizeof (tcp_t))) == NULL) { 16437c478bd9Sstevel@tonic-gate freemsg(mp); 16447c478bd9Sstevel@tonic-gate errno = ENOBUFS; 16457c478bd9Sstevel@tonic-gate return (NULL); 16467c478bd9Sstevel@tonic-gate } 16477c478bd9Sstevel@tonic-gate if ((errno = tcp_init_values(eager, NULL)) != 0) { 16487c478bd9Sstevel@tonic-gate freemsg(mp); 16497c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)eager, sizeof (tcp_t)); 16507c478bd9Sstevel@tonic-gate return (NULL); 16517c478bd9Sstevel@tonic-gate } 16527c478bd9Sstevel@tonic-gate 16537c478bd9Sstevel@tonic-gate /* 16547c478bd9Sstevel@tonic-gate * Eager connection inherits address form from its listener, 16557c478bd9Sstevel@tonic-gate * but its packet form comes from the version of the received 16567c478bd9Sstevel@tonic-gate * SYN segment. 16577c478bd9Sstevel@tonic-gate */ 16587c478bd9Sstevel@tonic-gate eager->tcp_family = tcp->tcp_family; 16597c478bd9Sstevel@tonic-gate 16607c478bd9Sstevel@tonic-gate err = tcp_accept_comm(tcp, eager, mp, ip_hdr_len); 16617c478bd9Sstevel@tonic-gate if (err) { 16627c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)eager, sizeof (tcp_t)); 16637c478bd9Sstevel@tonic-gate return (NULL); 16647c478bd9Sstevel@tonic-gate } 16657c478bd9Sstevel@tonic-gate 16667c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = eager; 16677c478bd9Sstevel@tonic-gate eager->tcp_eager_next_q0 = tcp->tcp_eager_next_q0; 16687c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q0 = eager; 16697c478bd9Sstevel@tonic-gate eager->tcp_eager_prev_q0 = tcp; 16707c478bd9Sstevel@tonic-gate 16717c478bd9Sstevel@tonic-gate /* Set tcp_listener before adding it to tcp_conn_fanout */ 16727c478bd9Sstevel@tonic-gate eager->tcp_listener = tcp; 16737c478bd9Sstevel@tonic-gate tcp->tcp_conn_req_cnt_q0++; 16747c478bd9Sstevel@tonic-gate 16757c478bd9Sstevel@tonic-gate return (eager); 16767c478bd9Sstevel@tonic-gate } 16777c478bd9Sstevel@tonic-gate 16787c478bd9Sstevel@tonic-gate /* 16797c478bd9Sstevel@tonic-gate * To get around the non-interrupt problem of inetboot. 16807c478bd9Sstevel@tonic-gate * Keep on processing packets until a certain state is reached or the 16817c478bd9Sstevel@tonic-gate * TCP is destroyed because of getting a RST packet. 16827c478bd9Sstevel@tonic-gate */ 16837c478bd9Sstevel@tonic-gate static int 16847c478bd9Sstevel@tonic-gate tcp_state_wait(int sock_id, tcp_t *tcp, int state) 16857c478bd9Sstevel@tonic-gate { 16867c478bd9Sstevel@tonic-gate int i; 16877c478bd9Sstevel@tonic-gate struct inetgram *in_gram; 16887c478bd9Sstevel@tonic-gate mblk_t *mp; 16897c478bd9Sstevel@tonic-gate int timeout; 16907c478bd9Sstevel@tonic-gate boolean_t changed = B_FALSE; 16917c478bd9Sstevel@tonic-gate 16927c478bd9Sstevel@tonic-gate /* 16937c478bd9Sstevel@tonic-gate * We need to make sure that the MAC does not wait longer 16947c478bd9Sstevel@tonic-gate * than RTO for any packet so that TCP can do retransmission. 16957c478bd9Sstevel@tonic-gate * But if the MAC timeout is less than tcp_rto, we are fine 16967c478bd9Sstevel@tonic-gate * and do not need to change it. 16977c478bd9Sstevel@tonic-gate */ 16987c478bd9Sstevel@tonic-gate timeout = sockets[sock_id].in_timeout; 16997c478bd9Sstevel@tonic-gate if (timeout > tcp->tcp_rto) { 17007c478bd9Sstevel@tonic-gate sockets[sock_id].in_timeout = tcp->tcp_rto; 17017c478bd9Sstevel@tonic-gate changed = B_TRUE; 17027c478bd9Sstevel@tonic-gate } 17037c478bd9Sstevel@tonic-gate retry: 17047c478bd9Sstevel@tonic-gate if (sockets[sock_id].inq == NULL) { 17057c478bd9Sstevel@tonic-gate /* Go out and check the wire */ 17067c478bd9Sstevel@tonic-gate for (i = MEDIA_LVL; i < TRANSPORT_LVL; i++) { 17077c478bd9Sstevel@tonic-gate if (sockets[sock_id].input[i] != NULL) { 17087c478bd9Sstevel@tonic-gate if (sockets[sock_id].input[i](sock_id) < 0) { 17097c478bd9Sstevel@tonic-gate if (changed) { 17107c478bd9Sstevel@tonic-gate sockets[sock_id].in_timeout = 17117c478bd9Sstevel@tonic-gate timeout; 17127c478bd9Sstevel@tonic-gate } 17137c478bd9Sstevel@tonic-gate return (-1); 17147c478bd9Sstevel@tonic-gate } 17157c478bd9Sstevel@tonic-gate } 17167c478bd9Sstevel@tonic-gate } 17177c478bd9Sstevel@tonic-gate } 17187c478bd9Sstevel@tonic-gate 17197c478bd9Sstevel@tonic-gate while ((in_gram = sockets[sock_id].inq) != NULL) { 17207c478bd9Sstevel@tonic-gate if (tcp != NULL && tcp->tcp_state == state) 17217c478bd9Sstevel@tonic-gate break; 17227c478bd9Sstevel@tonic-gate 17237c478bd9Sstevel@tonic-gate /* Remove unknown inetgrams from the head of inq. */ 17247c478bd9Sstevel@tonic-gate if (in_gram->igm_level != TRANSPORT_LVL) { 17257c478bd9Sstevel@tonic-gate #ifdef DEBUG 17267c478bd9Sstevel@tonic-gate printf("tcp_state_wait for state %d: unexpected " 17277c478bd9Sstevel@tonic-gate "packet level %d frame found\n", state, 17287c478bd9Sstevel@tonic-gate in_gram->igm_level); 17297c478bd9Sstevel@tonic-gate #endif 17307c478bd9Sstevel@tonic-gate del_gram(&sockets[sock_id].inq, in_gram, B_TRUE); 17317c478bd9Sstevel@tonic-gate continue; 17327c478bd9Sstevel@tonic-gate } 17337c478bd9Sstevel@tonic-gate mp = in_gram->igm_mp; 17347c478bd9Sstevel@tonic-gate del_gram(&sockets[sock_id].inq, in_gram, B_FALSE); 17357c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)in_gram, sizeof (struct inetgram)); 17367c478bd9Sstevel@tonic-gate tcp_rput_data(tcp, mp, sock_id); 17377c478bd9Sstevel@tonic-gate 17387c478bd9Sstevel@tonic-gate /* 17397c478bd9Sstevel@tonic-gate * The other side may have closed this connection or 17407c478bd9Sstevel@tonic-gate * RST us. But we need to continue to process other 17417c478bd9Sstevel@tonic-gate * packets in the socket's queue because they may be 17427c478bd9Sstevel@tonic-gate * belong to another TCP connections. 17437c478bd9Sstevel@tonic-gate */ 17447c478bd9Sstevel@tonic-gate if (sockets[sock_id].pcb == NULL) { 17457c478bd9Sstevel@tonic-gate tcp = NULL; 17467c478bd9Sstevel@tonic-gate } 17477c478bd9Sstevel@tonic-gate } 17487c478bd9Sstevel@tonic-gate 17497c478bd9Sstevel@tonic-gate /* If the other side has closed the connection, just return. */ 17507c478bd9Sstevel@tonic-gate if (tcp == NULL || sockets[sock_id].pcb == NULL) { 17517c478bd9Sstevel@tonic-gate #ifdef DEBUG 17527c478bd9Sstevel@tonic-gate printf("tcp_state_wait other side dead: state %d " 17537c478bd9Sstevel@tonic-gate "error %d\n", state, sockets[sock_id].so_error); 17547c478bd9Sstevel@tonic-gate #endif 17557c478bd9Sstevel@tonic-gate if (sockets[sock_id].so_error != 0) 17567c478bd9Sstevel@tonic-gate return (-1); 17577c478bd9Sstevel@tonic-gate else 17587c478bd9Sstevel@tonic-gate return (0); 17597c478bd9Sstevel@tonic-gate } 17607c478bd9Sstevel@tonic-gate /* 17617c478bd9Sstevel@tonic-gate * TCPS_ALL_ACKED is not a valid TCP state, it is just used as an 17627c478bd9Sstevel@tonic-gate * indicator to tcp_state_wait to mean that it is being called 17637c478bd9Sstevel@tonic-gate * to wait till we have received acks for all the new segments sent. 17647c478bd9Sstevel@tonic-gate */ 17657c478bd9Sstevel@tonic-gate if ((state == TCPS_ALL_ACKED) && (tcp->tcp_suna == tcp->tcp_snxt)) { 17667c478bd9Sstevel@tonic-gate goto done; 17677c478bd9Sstevel@tonic-gate } 17687c478bd9Sstevel@tonic-gate if (tcp->tcp_state != state) { 17697c478bd9Sstevel@tonic-gate if (prom_gettime() > tcp->tcp_rto_timeout) 17707c478bd9Sstevel@tonic-gate tcp_timer(tcp, sock_id); 17717c478bd9Sstevel@tonic-gate goto retry; 17727c478bd9Sstevel@tonic-gate } 17737c478bd9Sstevel@tonic-gate done: 17747c478bd9Sstevel@tonic-gate if (changed) 17757c478bd9Sstevel@tonic-gate sockets[sock_id].in_timeout = timeout; 17767c478bd9Sstevel@tonic-gate 17777c478bd9Sstevel@tonic-gate tcp_drain_needed(sock_id, tcp); 17787c478bd9Sstevel@tonic-gate return (0); 17797c478bd9Sstevel@tonic-gate } 17807c478bd9Sstevel@tonic-gate 17817c478bd9Sstevel@tonic-gate /* Verify the checksum of a segment. */ 17827c478bd9Sstevel@tonic-gate static int 17837c478bd9Sstevel@tonic-gate tcp_verify_cksum(mblk_t *mp) 17847c478bd9Sstevel@tonic-gate { 17857c478bd9Sstevel@tonic-gate struct ip *iph; 17867c478bd9Sstevel@tonic-gate tcpha_t *tcph; 17877c478bd9Sstevel@tonic-gate int len; 17887c478bd9Sstevel@tonic-gate uint16_t old_sum; 17897c478bd9Sstevel@tonic-gate 17907c478bd9Sstevel@tonic-gate iph = (struct ip *)mp->b_rptr; 17917c478bd9Sstevel@tonic-gate tcph = (tcpha_t *)(iph + 1); 17927c478bd9Sstevel@tonic-gate len = ntohs(iph->ip_len); 17937c478bd9Sstevel@tonic-gate 17947c478bd9Sstevel@tonic-gate /* 17957c478bd9Sstevel@tonic-gate * Calculate the TCP checksum. Need to include the psuedo header, 17967c478bd9Sstevel@tonic-gate * which is similar to the real IP header starting at the TTL field. 17977c478bd9Sstevel@tonic-gate */ 17987c478bd9Sstevel@tonic-gate iph->ip_sum = htons(len - IP_SIMPLE_HDR_LENGTH); 17997c478bd9Sstevel@tonic-gate old_sum = tcph->tha_sum; 18007c478bd9Sstevel@tonic-gate tcph->tha_sum = 0; 18017c478bd9Sstevel@tonic-gate iph->ip_ttl = 0; 18027c478bd9Sstevel@tonic-gate if (old_sum == tcp_cksum((uint16_t *)&(iph->ip_ttl), 18037c478bd9Sstevel@tonic-gate len - IP_SIMPLE_HDR_LENGTH + 12)) { 18047c478bd9Sstevel@tonic-gate return (0); 18057c478bd9Sstevel@tonic-gate } else { 18067c478bd9Sstevel@tonic-gate tcp_cksum_errors++; 18077c478bd9Sstevel@tonic-gate return (-1); 18087c478bd9Sstevel@tonic-gate } 18097c478bd9Sstevel@tonic-gate } 18107c478bd9Sstevel@tonic-gate 18117c478bd9Sstevel@tonic-gate /* To find a TCP connection matching the incoming segment. */ 18127c478bd9Sstevel@tonic-gate static tcp_t * 18137c478bd9Sstevel@tonic-gate tcp_lookup_ipv4(struct ip *iph, tcpha_t *tcph, int min_state, int *sock_id) 18147c478bd9Sstevel@tonic-gate { 18157c478bd9Sstevel@tonic-gate int i; 18167c478bd9Sstevel@tonic-gate tcp_t *tcp; 18177c478bd9Sstevel@tonic-gate 18187c478bd9Sstevel@tonic-gate for (i = 0; i < MAXSOCKET; i++) { 18197c478bd9Sstevel@tonic-gate if (sockets[i].type == INETBOOT_STREAM && 18207c478bd9Sstevel@tonic-gate (tcp = (tcp_t *)sockets[i].pcb) != NULL) { 18217c478bd9Sstevel@tonic-gate if (tcph->tha_lport == tcp->tcp_fport && 18227c478bd9Sstevel@tonic-gate tcph->tha_fport == tcp->tcp_lport && 18237c478bd9Sstevel@tonic-gate iph->ip_src.s_addr == tcp->tcp_remote && 18247c478bd9Sstevel@tonic-gate iph->ip_dst.s_addr == tcp->tcp_bound_source && 18257c478bd9Sstevel@tonic-gate tcp->tcp_state >= min_state) { 18267c478bd9Sstevel@tonic-gate *sock_id = i; 18277c478bd9Sstevel@tonic-gate return (tcp); 18287c478bd9Sstevel@tonic-gate } 18297c478bd9Sstevel@tonic-gate } 18307c478bd9Sstevel@tonic-gate } 18317c478bd9Sstevel@tonic-gate /* Find it in the time wait list. */ 18327c478bd9Sstevel@tonic-gate for (tcp = tcp_time_wait_head; tcp != NULL; 18337c478bd9Sstevel@tonic-gate tcp = tcp->tcp_time_wait_next) { 18347c478bd9Sstevel@tonic-gate if (tcph->tha_lport == tcp->tcp_fport && 18357c478bd9Sstevel@tonic-gate tcph->tha_fport == tcp->tcp_lport && 18367c478bd9Sstevel@tonic-gate iph->ip_src.s_addr == tcp->tcp_remote && 18377c478bd9Sstevel@tonic-gate iph->ip_dst.s_addr == tcp->tcp_bound_source && 18387c478bd9Sstevel@tonic-gate tcp->tcp_state >= min_state) { 18397c478bd9Sstevel@tonic-gate *sock_id = -1; 18407c478bd9Sstevel@tonic-gate return (tcp); 18417c478bd9Sstevel@tonic-gate } 18427c478bd9Sstevel@tonic-gate } 18437c478bd9Sstevel@tonic-gate return (NULL); 18447c478bd9Sstevel@tonic-gate } 18457c478bd9Sstevel@tonic-gate 18467c478bd9Sstevel@tonic-gate /* To find a TCP listening connection matching the incoming segment. */ 18477c478bd9Sstevel@tonic-gate static tcp_t * 18487c478bd9Sstevel@tonic-gate tcp_lookup_listener_ipv4(in_addr_t addr, in_port_t port, int *sock_id) 18497c478bd9Sstevel@tonic-gate { 18507c478bd9Sstevel@tonic-gate int i; 18517c478bd9Sstevel@tonic-gate tcp_t *tcp; 18527c478bd9Sstevel@tonic-gate 18537c478bd9Sstevel@tonic-gate for (i = 0; i < MAXSOCKET; i++) { 18547c478bd9Sstevel@tonic-gate if (sockets[i].type == INETBOOT_STREAM && 18557c478bd9Sstevel@tonic-gate (tcp = (tcp_t *)sockets[i].pcb) != NULL) { 18567c478bd9Sstevel@tonic-gate if (tcp->tcp_lport == port && 18577c478bd9Sstevel@tonic-gate (tcp->tcp_bound_source == addr || 18587c478bd9Sstevel@tonic-gate tcp->tcp_bound_source == INADDR_ANY)) { 18597c478bd9Sstevel@tonic-gate *sock_id = i; 18607c478bd9Sstevel@tonic-gate return (tcp); 18617c478bd9Sstevel@tonic-gate } 18627c478bd9Sstevel@tonic-gate } 18637c478bd9Sstevel@tonic-gate } 18647c478bd9Sstevel@tonic-gate 18657c478bd9Sstevel@tonic-gate return (NULL); 18667c478bd9Sstevel@tonic-gate } 18677c478bd9Sstevel@tonic-gate 18687c478bd9Sstevel@tonic-gate /* To find a TCP eager matching the incoming segment. */ 18697c478bd9Sstevel@tonic-gate static tcp_t * 18707c478bd9Sstevel@tonic-gate tcp_lookup_eager_ipv4(tcp_t *listener, struct ip *iph, tcpha_t *tcph) 18717c478bd9Sstevel@tonic-gate { 18727c478bd9Sstevel@tonic-gate tcp_t *tcp; 18737c478bd9Sstevel@tonic-gate 18747c478bd9Sstevel@tonic-gate #ifdef DEBUG 18757c478bd9Sstevel@tonic-gate printf("tcp_lookup_eager_ipv4 ###############\n"); 18767c478bd9Sstevel@tonic-gate #endif 18777c478bd9Sstevel@tonic-gate for (tcp = listener->tcp_eager_next_q; tcp != NULL; 18787c478bd9Sstevel@tonic-gate tcp = tcp->tcp_eager_next_q) { 18797c478bd9Sstevel@tonic-gate if (tcph->tha_lport == tcp->tcp_fport && 18807c478bd9Sstevel@tonic-gate tcph->tha_fport == tcp->tcp_lport && 18817c478bd9Sstevel@tonic-gate iph->ip_src.s_addr == tcp->tcp_remote && 18827c478bd9Sstevel@tonic-gate iph->ip_dst.s_addr == tcp->tcp_bound_source) { 18837c478bd9Sstevel@tonic-gate return (tcp); 18847c478bd9Sstevel@tonic-gate } 18857c478bd9Sstevel@tonic-gate } 18867c478bd9Sstevel@tonic-gate 18877c478bd9Sstevel@tonic-gate for (tcp = listener->tcp_eager_next_q0; tcp != listener; 18887c478bd9Sstevel@tonic-gate tcp = tcp->tcp_eager_next_q0) { 18897c478bd9Sstevel@tonic-gate if (tcph->tha_lport == tcp->tcp_fport && 18907c478bd9Sstevel@tonic-gate tcph->tha_fport == tcp->tcp_lport && 18917c478bd9Sstevel@tonic-gate iph->ip_src.s_addr == tcp->tcp_remote && 18927c478bd9Sstevel@tonic-gate iph->ip_dst.s_addr == tcp->tcp_bound_source) { 18937c478bd9Sstevel@tonic-gate return (tcp); 18947c478bd9Sstevel@tonic-gate } 18957c478bd9Sstevel@tonic-gate } 18967c478bd9Sstevel@tonic-gate #ifdef DEBUG 18977c478bd9Sstevel@tonic-gate printf("No eager found\n"); 18987c478bd9Sstevel@tonic-gate #endif 18997c478bd9Sstevel@tonic-gate return (NULL); 19007c478bd9Sstevel@tonic-gate } 19017c478bd9Sstevel@tonic-gate 19027c478bd9Sstevel@tonic-gate /* To destroy a TCP control block. */ 19037c478bd9Sstevel@tonic-gate static void 19047c478bd9Sstevel@tonic-gate tcp_clean_death(int sock_id, tcp_t *tcp, int err) 19057c478bd9Sstevel@tonic-gate { 19067c478bd9Sstevel@tonic-gate tcp_free(tcp); 19077c478bd9Sstevel@tonic-gate if (tcp->tcp_state == TCPS_TIME_WAIT) 19087c478bd9Sstevel@tonic-gate tcp_time_wait_remove(tcp); 19097c478bd9Sstevel@tonic-gate 19107c478bd9Sstevel@tonic-gate if (sock_id >= 0) { 19117c478bd9Sstevel@tonic-gate sockets[sock_id].pcb = NULL; 19127c478bd9Sstevel@tonic-gate if (err != 0) 19137c478bd9Sstevel@tonic-gate sockets[sock_id].so_error = err; 19147c478bd9Sstevel@tonic-gate } 19157c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)tcp, sizeof (tcp_t)); 19167c478bd9Sstevel@tonic-gate } 19177c478bd9Sstevel@tonic-gate 19187c478bd9Sstevel@tonic-gate /* 19197c478bd9Sstevel@tonic-gate * tcp_rwnd_set() is called to adjust the receive window to a desired value. 19207c478bd9Sstevel@tonic-gate * We do not allow the receive window to shrink. After setting rwnd, 19217c478bd9Sstevel@tonic-gate * set the flow control hiwat of the stream. 19227c478bd9Sstevel@tonic-gate * 19237c478bd9Sstevel@tonic-gate * This function is called in 2 cases: 19247c478bd9Sstevel@tonic-gate * 19257c478bd9Sstevel@tonic-gate * 1) Before data transfer begins, in tcp_accept_comm() for accepting a 19267c478bd9Sstevel@tonic-gate * connection (passive open) and in tcp_rput_data() for active connect. 19277c478bd9Sstevel@tonic-gate * This is called after tcp_mss_set() when the desired MSS value is known. 19287c478bd9Sstevel@tonic-gate * This makes sure that our window size is a mutiple of the other side's 19297c478bd9Sstevel@tonic-gate * MSS. 19307c478bd9Sstevel@tonic-gate * 2) Handling SO_RCVBUF option. 19317c478bd9Sstevel@tonic-gate * 19327c478bd9Sstevel@tonic-gate * It is ASSUMED that the requested size is a multiple of the current MSS. 19337c478bd9Sstevel@tonic-gate * 19347c478bd9Sstevel@tonic-gate * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the 19357c478bd9Sstevel@tonic-gate * user requests so. 19367c478bd9Sstevel@tonic-gate */ 19377c478bd9Sstevel@tonic-gate static int 19387c478bd9Sstevel@tonic-gate tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd) 19397c478bd9Sstevel@tonic-gate { 19407c478bd9Sstevel@tonic-gate uint32_t mss = tcp->tcp_mss; 19417c478bd9Sstevel@tonic-gate uint32_t old_max_rwnd; 19427c478bd9Sstevel@tonic-gate uint32_t max_transmittable_rwnd; 19437c478bd9Sstevel@tonic-gate 19447c478bd9Sstevel@tonic-gate if (tcp->tcp_rwnd_max != 0) 19457c478bd9Sstevel@tonic-gate old_max_rwnd = tcp->tcp_rwnd_max; 19467c478bd9Sstevel@tonic-gate else 19477c478bd9Sstevel@tonic-gate old_max_rwnd = tcp->tcp_rwnd; 19487c478bd9Sstevel@tonic-gate 19497c478bd9Sstevel@tonic-gate /* 19507c478bd9Sstevel@tonic-gate * Insist on a receive window that is at least 19517c478bd9Sstevel@tonic-gate * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid 19527c478bd9Sstevel@tonic-gate * funny TCP interactions of Nagle algorithm, SWS avoidance 19537c478bd9Sstevel@tonic-gate * and delayed acknowledgement. 19547c478bd9Sstevel@tonic-gate */ 19557c478bd9Sstevel@tonic-gate rwnd = MAX(rwnd, tcp_recv_hiwat_minmss * mss); 19567c478bd9Sstevel@tonic-gate 19577c478bd9Sstevel@tonic-gate /* 19587c478bd9Sstevel@tonic-gate * If window size info has already been exchanged, TCP should not 19597c478bd9Sstevel@tonic-gate * shrink the window. Shrinking window is doable if done carefully. 19607c478bd9Sstevel@tonic-gate * We may add that support later. But so far there is not a real 19617c478bd9Sstevel@tonic-gate * need to do that. 19627c478bd9Sstevel@tonic-gate */ 19637c478bd9Sstevel@tonic-gate if (rwnd < old_max_rwnd && tcp->tcp_state > TCPS_SYN_SENT) { 19647c478bd9Sstevel@tonic-gate /* MSS may have changed, do a round up again. */ 19657c478bd9Sstevel@tonic-gate rwnd = MSS_ROUNDUP(old_max_rwnd, mss); 19667c478bd9Sstevel@tonic-gate } 19677c478bd9Sstevel@tonic-gate 19687c478bd9Sstevel@tonic-gate /* 19697c478bd9Sstevel@tonic-gate * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check 19707c478bd9Sstevel@tonic-gate * can be applied even before the window scale option is decided. 19717c478bd9Sstevel@tonic-gate */ 19727c478bd9Sstevel@tonic-gate max_transmittable_rwnd = TCP_MAXWIN << tcp->tcp_rcv_ws; 19737c478bd9Sstevel@tonic-gate if (rwnd > max_transmittable_rwnd) { 19747c478bd9Sstevel@tonic-gate rwnd = max_transmittable_rwnd - 19757c478bd9Sstevel@tonic-gate (max_transmittable_rwnd % mss); 19767c478bd9Sstevel@tonic-gate if (rwnd < mss) 19777c478bd9Sstevel@tonic-gate rwnd = max_transmittable_rwnd; 19787c478bd9Sstevel@tonic-gate /* 19797c478bd9Sstevel@tonic-gate * If we're over the limit we may have to back down tcp_rwnd. 19807c478bd9Sstevel@tonic-gate * The increment below won't work for us. So we set all three 19817c478bd9Sstevel@tonic-gate * here and the increment below will have no effect. 19827c478bd9Sstevel@tonic-gate */ 19837c478bd9Sstevel@tonic-gate tcp->tcp_rwnd = old_max_rwnd = rwnd; 19847c478bd9Sstevel@tonic-gate } 19857c478bd9Sstevel@tonic-gate 19867c478bd9Sstevel@tonic-gate /* 19877c478bd9Sstevel@tonic-gate * Increment the current rwnd by the amount the maximum grew (we 19887c478bd9Sstevel@tonic-gate * can not overwrite it since we might be in the middle of a 19897c478bd9Sstevel@tonic-gate * connection.) 19907c478bd9Sstevel@tonic-gate */ 19917c478bd9Sstevel@tonic-gate tcp->tcp_rwnd += rwnd - old_max_rwnd; 19927c478bd9Sstevel@tonic-gate U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, tcp->tcp_tcph->th_win); 19937c478bd9Sstevel@tonic-gate if ((tcp->tcp_rcv_ws > 0) && rwnd > tcp->tcp_cwnd_max) 19947c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_max = rwnd; 19957c478bd9Sstevel@tonic-gate tcp->tcp_rwnd_max = rwnd; 19967c478bd9Sstevel@tonic-gate 19977c478bd9Sstevel@tonic-gate return (rwnd); 19987c478bd9Sstevel@tonic-gate } 19997c478bd9Sstevel@tonic-gate 20007c478bd9Sstevel@tonic-gate /* 20017c478bd9Sstevel@tonic-gate * Extract option values from a tcp header. We put any found values into the 20027c478bd9Sstevel@tonic-gate * tcpopt struct and return a bitmask saying which options were found. 20037c478bd9Sstevel@tonic-gate */ 20047c478bd9Sstevel@tonic-gate static int 20057c478bd9Sstevel@tonic-gate tcp_parse_options(tcph_t *tcph, tcp_opt_t *tcpopt) 20067c478bd9Sstevel@tonic-gate { 20077c478bd9Sstevel@tonic-gate uchar_t *endp; 20087c478bd9Sstevel@tonic-gate int len; 20097c478bd9Sstevel@tonic-gate uint32_t mss; 20107c478bd9Sstevel@tonic-gate uchar_t *up = (uchar_t *)tcph; 20117c478bd9Sstevel@tonic-gate int found = 0; 20127c478bd9Sstevel@tonic-gate int32_t sack_len; 20137c478bd9Sstevel@tonic-gate tcp_seq sack_begin, sack_end; 20147c478bd9Sstevel@tonic-gate tcp_t *tcp; 20157c478bd9Sstevel@tonic-gate 20167c478bd9Sstevel@tonic-gate endp = up + TCP_HDR_LENGTH(tcph); 20177c478bd9Sstevel@tonic-gate up += TCP_MIN_HEADER_LENGTH; 20187c478bd9Sstevel@tonic-gate while (up < endp) { 20197c478bd9Sstevel@tonic-gate len = endp - up; 20207c478bd9Sstevel@tonic-gate switch (*up) { 20217c478bd9Sstevel@tonic-gate case TCPOPT_EOL: 20227c478bd9Sstevel@tonic-gate break; 20237c478bd9Sstevel@tonic-gate 20247c478bd9Sstevel@tonic-gate case TCPOPT_NOP: 20257c478bd9Sstevel@tonic-gate up++; 20267c478bd9Sstevel@tonic-gate continue; 20277c478bd9Sstevel@tonic-gate 20287c478bd9Sstevel@tonic-gate case TCPOPT_MAXSEG: 20297c478bd9Sstevel@tonic-gate if (len < TCPOPT_MAXSEG_LEN || 20307c478bd9Sstevel@tonic-gate up[1] != TCPOPT_MAXSEG_LEN) 20317c478bd9Sstevel@tonic-gate break; 20327c478bd9Sstevel@tonic-gate 20337c478bd9Sstevel@tonic-gate mss = BE16_TO_U16(up+2); 20347c478bd9Sstevel@tonic-gate /* Caller must handle tcp_mss_min and tcp_mss_max_* */ 20357c478bd9Sstevel@tonic-gate tcpopt->tcp_opt_mss = mss; 20367c478bd9Sstevel@tonic-gate found |= TCP_OPT_MSS_PRESENT; 20377c478bd9Sstevel@tonic-gate 20387c478bd9Sstevel@tonic-gate up += TCPOPT_MAXSEG_LEN; 20397c478bd9Sstevel@tonic-gate continue; 20407c478bd9Sstevel@tonic-gate 20417c478bd9Sstevel@tonic-gate case TCPOPT_WSCALE: 20427c478bd9Sstevel@tonic-gate if (len < TCPOPT_WS_LEN || up[1] != TCPOPT_WS_LEN) 20437c478bd9Sstevel@tonic-gate break; 20447c478bd9Sstevel@tonic-gate 20457c478bd9Sstevel@tonic-gate if (up[2] > TCP_MAX_WINSHIFT) 20467c478bd9Sstevel@tonic-gate tcpopt->tcp_opt_wscale = TCP_MAX_WINSHIFT; 20477c478bd9Sstevel@tonic-gate else 20487c478bd9Sstevel@tonic-gate tcpopt->tcp_opt_wscale = up[2]; 20497c478bd9Sstevel@tonic-gate found |= TCP_OPT_WSCALE_PRESENT; 20507c478bd9Sstevel@tonic-gate 20517c478bd9Sstevel@tonic-gate up += TCPOPT_WS_LEN; 20527c478bd9Sstevel@tonic-gate continue; 20537c478bd9Sstevel@tonic-gate 20547c478bd9Sstevel@tonic-gate case TCPOPT_SACK_PERMITTED: 20557c478bd9Sstevel@tonic-gate if (len < TCPOPT_SACK_OK_LEN || 20567c478bd9Sstevel@tonic-gate up[1] != TCPOPT_SACK_OK_LEN) 20577c478bd9Sstevel@tonic-gate break; 20587c478bd9Sstevel@tonic-gate found |= TCP_OPT_SACK_OK_PRESENT; 20597c478bd9Sstevel@tonic-gate up += TCPOPT_SACK_OK_LEN; 20607c478bd9Sstevel@tonic-gate continue; 20617c478bd9Sstevel@tonic-gate 20627c478bd9Sstevel@tonic-gate case TCPOPT_SACK: 20637c478bd9Sstevel@tonic-gate if (len <= 2 || up[1] <= 2 || len < up[1]) 20647c478bd9Sstevel@tonic-gate break; 20657c478bd9Sstevel@tonic-gate 20667c478bd9Sstevel@tonic-gate /* If TCP is not interested in SACK blks... */ 20677c478bd9Sstevel@tonic-gate if ((tcp = tcpopt->tcp) == NULL) { 20687c478bd9Sstevel@tonic-gate up += up[1]; 20697c478bd9Sstevel@tonic-gate continue; 20707c478bd9Sstevel@tonic-gate } 20717c478bd9Sstevel@tonic-gate sack_len = up[1] - TCPOPT_HEADER_LEN; 20727c478bd9Sstevel@tonic-gate up += TCPOPT_HEADER_LEN; 20737c478bd9Sstevel@tonic-gate 20747c478bd9Sstevel@tonic-gate /* 20757c478bd9Sstevel@tonic-gate * If the list is empty, allocate one and assume 20767c478bd9Sstevel@tonic-gate * nothing is sack'ed. 20777c478bd9Sstevel@tonic-gate */ 20787c478bd9Sstevel@tonic-gate assert(tcp->tcp_sack_info != NULL); 20797c478bd9Sstevel@tonic-gate if (tcp->tcp_notsack_list == NULL) { 20807c478bd9Sstevel@tonic-gate tcp_notsack_update(&(tcp->tcp_notsack_list), 20817c478bd9Sstevel@tonic-gate tcp->tcp_suna, tcp->tcp_snxt, 20827c478bd9Sstevel@tonic-gate &(tcp->tcp_num_notsack_blk), 20837c478bd9Sstevel@tonic-gate &(tcp->tcp_cnt_notsack_list)); 20847c478bd9Sstevel@tonic-gate 20857c478bd9Sstevel@tonic-gate /* 20867c478bd9Sstevel@tonic-gate * Make sure tcp_notsack_list is not NULL. 20877c478bd9Sstevel@tonic-gate * This happens when kmem_alloc(KM_NOSLEEP) 20887c478bd9Sstevel@tonic-gate * returns NULL. 20897c478bd9Sstevel@tonic-gate */ 20907c478bd9Sstevel@tonic-gate if (tcp->tcp_notsack_list == NULL) { 20917c478bd9Sstevel@tonic-gate up += sack_len; 20927c478bd9Sstevel@tonic-gate continue; 20937c478bd9Sstevel@tonic-gate } 20947c478bd9Sstevel@tonic-gate tcp->tcp_fack = tcp->tcp_suna; 20957c478bd9Sstevel@tonic-gate } 20967c478bd9Sstevel@tonic-gate 20977c478bd9Sstevel@tonic-gate while (sack_len > 0) { 20987c478bd9Sstevel@tonic-gate if (up + 8 > endp) { 20997c478bd9Sstevel@tonic-gate up = endp; 21007c478bd9Sstevel@tonic-gate break; 21017c478bd9Sstevel@tonic-gate } 21027c478bd9Sstevel@tonic-gate sack_begin = BE32_TO_U32(up); 21037c478bd9Sstevel@tonic-gate up += 4; 21047c478bd9Sstevel@tonic-gate sack_end = BE32_TO_U32(up); 21057c478bd9Sstevel@tonic-gate up += 4; 21067c478bd9Sstevel@tonic-gate sack_len -= 8; 21077c478bd9Sstevel@tonic-gate /* 21087c478bd9Sstevel@tonic-gate * Bounds checking. Make sure the SACK 21097c478bd9Sstevel@tonic-gate * info is within tcp_suna and tcp_snxt. 21107c478bd9Sstevel@tonic-gate * If this SACK blk is out of bound, ignore 21117c478bd9Sstevel@tonic-gate * it but continue to parse the following 21127c478bd9Sstevel@tonic-gate * blks. 21137c478bd9Sstevel@tonic-gate */ 21147c478bd9Sstevel@tonic-gate if (SEQ_LEQ(sack_end, sack_begin) || 21157c478bd9Sstevel@tonic-gate SEQ_LT(sack_begin, tcp->tcp_suna) || 21167c478bd9Sstevel@tonic-gate SEQ_GT(sack_end, tcp->tcp_snxt)) { 21177c478bd9Sstevel@tonic-gate continue; 21187c478bd9Sstevel@tonic-gate } 21197c478bd9Sstevel@tonic-gate tcp_notsack_insert(&(tcp->tcp_notsack_list), 21207c478bd9Sstevel@tonic-gate sack_begin, sack_end, 21217c478bd9Sstevel@tonic-gate &(tcp->tcp_num_notsack_blk), 21227c478bd9Sstevel@tonic-gate &(tcp->tcp_cnt_notsack_list)); 21237c478bd9Sstevel@tonic-gate if (SEQ_GT(sack_end, tcp->tcp_fack)) { 21247c478bd9Sstevel@tonic-gate tcp->tcp_fack = sack_end; 21257c478bd9Sstevel@tonic-gate } 21267c478bd9Sstevel@tonic-gate } 21277c478bd9Sstevel@tonic-gate found |= TCP_OPT_SACK_PRESENT; 21287c478bd9Sstevel@tonic-gate continue; 21297c478bd9Sstevel@tonic-gate 21307c478bd9Sstevel@tonic-gate case TCPOPT_TSTAMP: 21317c478bd9Sstevel@tonic-gate if (len < TCPOPT_TSTAMP_LEN || 21327c478bd9Sstevel@tonic-gate up[1] != TCPOPT_TSTAMP_LEN) 21337c478bd9Sstevel@tonic-gate break; 21347c478bd9Sstevel@tonic-gate 21357c478bd9Sstevel@tonic-gate tcpopt->tcp_opt_ts_val = BE32_TO_U32(up+2); 21367c478bd9Sstevel@tonic-gate tcpopt->tcp_opt_ts_ecr = BE32_TO_U32(up+6); 21377c478bd9Sstevel@tonic-gate 21387c478bd9Sstevel@tonic-gate found |= TCP_OPT_TSTAMP_PRESENT; 21397c478bd9Sstevel@tonic-gate 21407c478bd9Sstevel@tonic-gate up += TCPOPT_TSTAMP_LEN; 21417c478bd9Sstevel@tonic-gate continue; 21427c478bd9Sstevel@tonic-gate 21437c478bd9Sstevel@tonic-gate default: 21447c478bd9Sstevel@tonic-gate if (len <= 1 || len < (int)up[1] || up[1] == 0) 21457c478bd9Sstevel@tonic-gate break; 21467c478bd9Sstevel@tonic-gate up += up[1]; 21477c478bd9Sstevel@tonic-gate continue; 21487c478bd9Sstevel@tonic-gate } 21497c478bd9Sstevel@tonic-gate break; 21507c478bd9Sstevel@tonic-gate } 21517c478bd9Sstevel@tonic-gate return (found); 21527c478bd9Sstevel@tonic-gate } 21537c478bd9Sstevel@tonic-gate 21547c478bd9Sstevel@tonic-gate /* 21557c478bd9Sstevel@tonic-gate * Set the mss associated with a particular tcp based on its current value, 21567c478bd9Sstevel@tonic-gate * and a new one passed in. Observe minimums and maximums, and reset 21577c478bd9Sstevel@tonic-gate * other state variables that we want to view as multiples of mss. 21587c478bd9Sstevel@tonic-gate * 21597c478bd9Sstevel@tonic-gate * This function is called in various places mainly because 21607c478bd9Sstevel@tonic-gate * 1) Various stuffs, tcp_mss, tcp_cwnd, ... need to be adjusted when the 21617c478bd9Sstevel@tonic-gate * other side's SYN/SYN-ACK packet arrives. 21627c478bd9Sstevel@tonic-gate * 2) PMTUd may get us a new MSS. 21637c478bd9Sstevel@tonic-gate * 3) If the other side stops sending us timestamp option, we need to 21647c478bd9Sstevel@tonic-gate * increase the MSS size to use the extra bytes available. 21657c478bd9Sstevel@tonic-gate */ 21667c478bd9Sstevel@tonic-gate static void 21677c478bd9Sstevel@tonic-gate tcp_mss_set(tcp_t *tcp, uint32_t mss) 21687c478bd9Sstevel@tonic-gate { 21697c478bd9Sstevel@tonic-gate uint32_t mss_max; 21707c478bd9Sstevel@tonic-gate 21717c478bd9Sstevel@tonic-gate mss_max = tcp_mss_max_ipv4; 21727c478bd9Sstevel@tonic-gate 21737c478bd9Sstevel@tonic-gate if (mss < tcp_mss_min) 21747c478bd9Sstevel@tonic-gate mss = tcp_mss_min; 21757c478bd9Sstevel@tonic-gate if (mss > mss_max) 21767c478bd9Sstevel@tonic-gate mss = mss_max; 21777c478bd9Sstevel@tonic-gate /* 21787c478bd9Sstevel@tonic-gate * Unless naglim has been set by our client to 21797c478bd9Sstevel@tonic-gate * a non-mss value, force naglim to track mss. 21807c478bd9Sstevel@tonic-gate * This can help to aggregate small writes. 21817c478bd9Sstevel@tonic-gate */ 21827c478bd9Sstevel@tonic-gate if (mss < tcp->tcp_naglim || tcp->tcp_mss == tcp->tcp_naglim) 21837c478bd9Sstevel@tonic-gate tcp->tcp_naglim = mss; 21847c478bd9Sstevel@tonic-gate /* 21857c478bd9Sstevel@tonic-gate * TCP should be able to buffer at least 4 MSS data for obvious 21867c478bd9Sstevel@tonic-gate * performance reason. 21877c478bd9Sstevel@tonic-gate */ 21887c478bd9Sstevel@tonic-gate if ((mss << 2) > tcp->tcp_xmit_hiwater) 21897c478bd9Sstevel@tonic-gate tcp->tcp_xmit_hiwater = mss << 2; 21907c478bd9Sstevel@tonic-gate tcp->tcp_mss = mss; 21917c478bd9Sstevel@tonic-gate /* 21927c478bd9Sstevel@tonic-gate * Initialize cwnd according to draft-floyd-incr-init-win-01.txt. 21937c478bd9Sstevel@tonic-gate * Previously, we use tcp_slow_start_initial to control the size 21947c478bd9Sstevel@tonic-gate * of the initial cwnd. Now, when tcp_slow_start_initial * mss 21957c478bd9Sstevel@tonic-gate * is smaller than the cwnd calculated from the formula suggested in 21967c478bd9Sstevel@tonic-gate * the draft, we use tcp_slow_start_initial * mss as the cwnd. 21977c478bd9Sstevel@tonic-gate * Otherwise, use the cwnd from the draft's formula. The default 21987c478bd9Sstevel@tonic-gate * of tcp_slow_start_initial is 2. 21997c478bd9Sstevel@tonic-gate */ 22007c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = MIN(tcp_slow_start_initial * mss, 22017c478bd9Sstevel@tonic-gate MIN(4 * mss, MAX(2 * mss, 4380 / mss * mss))); 22027c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_cnt = 0; 22037c478bd9Sstevel@tonic-gate } 22047c478bd9Sstevel@tonic-gate 22057c478bd9Sstevel@tonic-gate /* 22067c478bd9Sstevel@tonic-gate * Process all TCP option in SYN segment. 22077c478bd9Sstevel@tonic-gate * 22087c478bd9Sstevel@tonic-gate * This function sets up the correct tcp_mss value according to the 22097c478bd9Sstevel@tonic-gate * MSS option value and our header size. It also sets up the window scale 22107c478bd9Sstevel@tonic-gate * and timestamp values, and initialize SACK info blocks. But it does not 22117c478bd9Sstevel@tonic-gate * change receive window size after setting the tcp_mss value. The caller 22127c478bd9Sstevel@tonic-gate * should do the appropriate change. 22137c478bd9Sstevel@tonic-gate */ 22147c478bd9Sstevel@tonic-gate void 22157c478bd9Sstevel@tonic-gate tcp_process_options(tcp_t *tcp, tcph_t *tcph) 22167c478bd9Sstevel@tonic-gate { 22177c478bd9Sstevel@tonic-gate int options; 22187c478bd9Sstevel@tonic-gate tcp_opt_t tcpopt; 22197c478bd9Sstevel@tonic-gate uint32_t mss_max; 22207c478bd9Sstevel@tonic-gate char *tmp_tcph; 22217c478bd9Sstevel@tonic-gate 22227c478bd9Sstevel@tonic-gate tcpopt.tcp = NULL; 22237c478bd9Sstevel@tonic-gate options = tcp_parse_options(tcph, &tcpopt); 22247c478bd9Sstevel@tonic-gate 22257c478bd9Sstevel@tonic-gate /* 22267c478bd9Sstevel@tonic-gate * Process MSS option. Note that MSS option value does not account 22277c478bd9Sstevel@tonic-gate * for IP or TCP options. This means that it is equal to MTU - minimum 22287c478bd9Sstevel@tonic-gate * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for 22297c478bd9Sstevel@tonic-gate * IPv6. 22307c478bd9Sstevel@tonic-gate */ 22317c478bd9Sstevel@tonic-gate if (!(options & TCP_OPT_MSS_PRESENT)) { 22327c478bd9Sstevel@tonic-gate tcpopt.tcp_opt_mss = tcp_mss_def_ipv4; 22337c478bd9Sstevel@tonic-gate } else { 22347c478bd9Sstevel@tonic-gate if (tcp->tcp_ipversion == IPV4_VERSION) 22357c478bd9Sstevel@tonic-gate mss_max = tcp_mss_max_ipv4; 22367c478bd9Sstevel@tonic-gate if (tcpopt.tcp_opt_mss < tcp_mss_min) 22377c478bd9Sstevel@tonic-gate tcpopt.tcp_opt_mss = tcp_mss_min; 22387c478bd9Sstevel@tonic-gate else if (tcpopt.tcp_opt_mss > mss_max) 22397c478bd9Sstevel@tonic-gate tcpopt.tcp_opt_mss = mss_max; 22407c478bd9Sstevel@tonic-gate } 22417c478bd9Sstevel@tonic-gate 22427c478bd9Sstevel@tonic-gate /* Process Window Scale option. */ 22437c478bd9Sstevel@tonic-gate if (options & TCP_OPT_WSCALE_PRESENT) { 22447c478bd9Sstevel@tonic-gate tcp->tcp_snd_ws = tcpopt.tcp_opt_wscale; 22457c478bd9Sstevel@tonic-gate tcp->tcp_snd_ws_ok = B_TRUE; 22467c478bd9Sstevel@tonic-gate } else { 22477c478bd9Sstevel@tonic-gate tcp->tcp_snd_ws = B_FALSE; 22487c478bd9Sstevel@tonic-gate tcp->tcp_snd_ws_ok = B_FALSE; 22497c478bd9Sstevel@tonic-gate tcp->tcp_rcv_ws = B_FALSE; 22507c478bd9Sstevel@tonic-gate } 22517c478bd9Sstevel@tonic-gate 22527c478bd9Sstevel@tonic-gate /* Process Timestamp option. */ 22537c478bd9Sstevel@tonic-gate if ((options & TCP_OPT_TSTAMP_PRESENT) && 22547c478bd9Sstevel@tonic-gate (tcp->tcp_snd_ts_ok || !tcp->tcp_active_open)) { 22557c478bd9Sstevel@tonic-gate tmp_tcph = (char *)tcp->tcp_tcph; 22567c478bd9Sstevel@tonic-gate 22577c478bd9Sstevel@tonic-gate tcp->tcp_snd_ts_ok = B_TRUE; 22587c478bd9Sstevel@tonic-gate tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 22597c478bd9Sstevel@tonic-gate tcp->tcp_last_rcv_lbolt = prom_gettime(); 22607c478bd9Sstevel@tonic-gate assert(OK_32PTR(tmp_tcph)); 22617c478bd9Sstevel@tonic-gate assert(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 22627c478bd9Sstevel@tonic-gate 22637c478bd9Sstevel@tonic-gate /* Fill in our template header with basic timestamp option. */ 22647c478bd9Sstevel@tonic-gate tmp_tcph += tcp->tcp_tcp_hdr_len; 22657c478bd9Sstevel@tonic-gate tmp_tcph[0] = TCPOPT_NOP; 22667c478bd9Sstevel@tonic-gate tmp_tcph[1] = TCPOPT_NOP; 22677c478bd9Sstevel@tonic-gate tmp_tcph[2] = TCPOPT_TSTAMP; 22687c478bd9Sstevel@tonic-gate tmp_tcph[3] = TCPOPT_TSTAMP_LEN; 22697c478bd9Sstevel@tonic-gate tcp->tcp_hdr_len += TCPOPT_REAL_TS_LEN; 22707c478bd9Sstevel@tonic-gate tcp->tcp_tcp_hdr_len += TCPOPT_REAL_TS_LEN; 22717c478bd9Sstevel@tonic-gate tcp->tcp_tcph->th_offset_and_rsrvd[0] += (3 << 4); 22727c478bd9Sstevel@tonic-gate } else { 22737c478bd9Sstevel@tonic-gate tcp->tcp_snd_ts_ok = B_FALSE; 22747c478bd9Sstevel@tonic-gate } 22757c478bd9Sstevel@tonic-gate 22767c478bd9Sstevel@tonic-gate /* 22777c478bd9Sstevel@tonic-gate * Process SACK options. If SACK is enabled for this connection, 22787c478bd9Sstevel@tonic-gate * then allocate the SACK info structure. 22797c478bd9Sstevel@tonic-gate */ 22807c478bd9Sstevel@tonic-gate if ((options & TCP_OPT_SACK_OK_PRESENT) && 22817c478bd9Sstevel@tonic-gate (tcp->tcp_snd_sack_ok || 22827c478bd9Sstevel@tonic-gate (tcp_sack_permitted != 0 && !tcp->tcp_active_open))) { 22837c478bd9Sstevel@tonic-gate /* This should be true only in the passive case. */ 22847c478bd9Sstevel@tonic-gate if (tcp->tcp_sack_info == NULL) { 22857c478bd9Sstevel@tonic-gate tcp->tcp_sack_info = (tcp_sack_info_t *)bkmem_zalloc( 22867c478bd9Sstevel@tonic-gate sizeof (tcp_sack_info_t)); 22877c478bd9Sstevel@tonic-gate } 22887c478bd9Sstevel@tonic-gate if (tcp->tcp_sack_info == NULL) { 22897c478bd9Sstevel@tonic-gate tcp->tcp_snd_sack_ok = B_FALSE; 22907c478bd9Sstevel@tonic-gate } else { 22917c478bd9Sstevel@tonic-gate tcp->tcp_snd_sack_ok = B_TRUE; 22927c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ts_ok) { 22937c478bd9Sstevel@tonic-gate tcp->tcp_max_sack_blk = 3; 22947c478bd9Sstevel@tonic-gate } else { 22957c478bd9Sstevel@tonic-gate tcp->tcp_max_sack_blk = 4; 22967c478bd9Sstevel@tonic-gate } 22977c478bd9Sstevel@tonic-gate } 22987c478bd9Sstevel@tonic-gate } else { 22997c478bd9Sstevel@tonic-gate /* 23007c478bd9Sstevel@tonic-gate * Resetting tcp_snd_sack_ok to B_FALSE so that 23017c478bd9Sstevel@tonic-gate * no SACK info will be used for this 23027c478bd9Sstevel@tonic-gate * connection. This assumes that SACK usage 23037c478bd9Sstevel@tonic-gate * permission is negotiated. This may need 23047c478bd9Sstevel@tonic-gate * to be changed once this is clarified. 23057c478bd9Sstevel@tonic-gate */ 23067c478bd9Sstevel@tonic-gate if (tcp->tcp_sack_info != NULL) { 23077c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)tcp->tcp_sack_info, 23087c478bd9Sstevel@tonic-gate sizeof (tcp_sack_info_t)); 23097c478bd9Sstevel@tonic-gate tcp->tcp_sack_info = NULL; 23107c478bd9Sstevel@tonic-gate } 23117c478bd9Sstevel@tonic-gate tcp->tcp_snd_sack_ok = B_FALSE; 23127c478bd9Sstevel@tonic-gate } 23137c478bd9Sstevel@tonic-gate 23147c478bd9Sstevel@tonic-gate /* 23157c478bd9Sstevel@tonic-gate * Now we know the exact TCP/IP header length, subtract 23167c478bd9Sstevel@tonic-gate * that from tcp_mss to get our side's MSS. 23177c478bd9Sstevel@tonic-gate */ 23187c478bd9Sstevel@tonic-gate tcp->tcp_mss -= tcp->tcp_hdr_len; 23197c478bd9Sstevel@tonic-gate /* 23207c478bd9Sstevel@tonic-gate * Here we assume that the other side's header size will be equal to 23217c478bd9Sstevel@tonic-gate * our header size. We calculate the real MSS accordingly. Need to 23227c478bd9Sstevel@tonic-gate * take into additional stuffs IPsec puts in. 23237c478bd9Sstevel@tonic-gate * 23247c478bd9Sstevel@tonic-gate * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header) 23257c478bd9Sstevel@tonic-gate */ 23267c478bd9Sstevel@tonic-gate tcpopt.tcp_opt_mss -= tcp->tcp_hdr_len - 23277c478bd9Sstevel@tonic-gate (IP_SIMPLE_HDR_LENGTH + TCP_MIN_HEADER_LENGTH); 23287c478bd9Sstevel@tonic-gate 23297c478bd9Sstevel@tonic-gate /* 23307c478bd9Sstevel@tonic-gate * Set MSS to the smaller one of both ends of the connection. 23317c478bd9Sstevel@tonic-gate * We should not have called tcp_mss_set() before, but our 23327c478bd9Sstevel@tonic-gate * side of the MSS should have been set to a proper value 23337c478bd9Sstevel@tonic-gate * by tcp_adapt_ire(). tcp_mss_set() will also set up the 23347c478bd9Sstevel@tonic-gate * STREAM head parameters properly. 23357c478bd9Sstevel@tonic-gate * 23367c478bd9Sstevel@tonic-gate * If we have a larger-than-16-bit window but the other side 23377c478bd9Sstevel@tonic-gate * didn't want to do window scale, tcp_rwnd_set() will take 23387c478bd9Sstevel@tonic-gate * care of that. 23397c478bd9Sstevel@tonic-gate */ 23407c478bd9Sstevel@tonic-gate tcp_mss_set(tcp, MIN(tcpopt.tcp_opt_mss, tcp->tcp_mss)); 23417c478bd9Sstevel@tonic-gate } 23427c478bd9Sstevel@tonic-gate 23437c478bd9Sstevel@tonic-gate /* 23447c478bd9Sstevel@tonic-gate * This function does PAWS protection check. Returns B_TRUE if the 23457c478bd9Sstevel@tonic-gate * segment passes the PAWS test, else returns B_FALSE. 23467c478bd9Sstevel@tonic-gate */ 23477c478bd9Sstevel@tonic-gate boolean_t 23487c478bd9Sstevel@tonic-gate tcp_paws_check(tcp_t *tcp, tcph_t *tcph, tcp_opt_t *tcpoptp) 23497c478bd9Sstevel@tonic-gate { 23507c478bd9Sstevel@tonic-gate uint8_t flags; 23517c478bd9Sstevel@tonic-gate int options; 23527c478bd9Sstevel@tonic-gate uint8_t *up; 23537c478bd9Sstevel@tonic-gate 23547c478bd9Sstevel@tonic-gate flags = (unsigned int)tcph->th_flags[0] & 0xFF; 23557c478bd9Sstevel@tonic-gate /* 23567c478bd9Sstevel@tonic-gate * If timestamp option is aligned nicely, get values inline, 23577c478bd9Sstevel@tonic-gate * otherwise call general routine to parse. Only do that 23587c478bd9Sstevel@tonic-gate * if timestamp is the only option. 23597c478bd9Sstevel@tonic-gate */ 23607c478bd9Sstevel@tonic-gate if (TCP_HDR_LENGTH(tcph) == (uint32_t)TCP_MIN_HEADER_LENGTH + 23617c478bd9Sstevel@tonic-gate TCPOPT_REAL_TS_LEN && 23627c478bd9Sstevel@tonic-gate OK_32PTR((up = ((uint8_t *)tcph) + 23637c478bd9Sstevel@tonic-gate TCP_MIN_HEADER_LENGTH)) && 23647c478bd9Sstevel@tonic-gate *(uint32_t *)up == TCPOPT_NOP_NOP_TSTAMP) { 23657c478bd9Sstevel@tonic-gate tcpoptp->tcp_opt_ts_val = ABE32_TO_U32((up+4)); 23667c478bd9Sstevel@tonic-gate tcpoptp->tcp_opt_ts_ecr = ABE32_TO_U32((up+8)); 23677c478bd9Sstevel@tonic-gate 23687c478bd9Sstevel@tonic-gate options = TCP_OPT_TSTAMP_PRESENT; 23697c478bd9Sstevel@tonic-gate } else { 23707c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok) { 23717c478bd9Sstevel@tonic-gate tcpoptp->tcp = tcp; 23727c478bd9Sstevel@tonic-gate } else { 23737c478bd9Sstevel@tonic-gate tcpoptp->tcp = NULL; 23747c478bd9Sstevel@tonic-gate } 23757c478bd9Sstevel@tonic-gate options = tcp_parse_options(tcph, tcpoptp); 23767c478bd9Sstevel@tonic-gate } 23777c478bd9Sstevel@tonic-gate 23787c478bd9Sstevel@tonic-gate if (options & TCP_OPT_TSTAMP_PRESENT) { 23797c478bd9Sstevel@tonic-gate /* 23807c478bd9Sstevel@tonic-gate * Do PAWS per RFC 1323 section 4.2. Accept RST 23817c478bd9Sstevel@tonic-gate * regardless of the timestamp, page 18 RFC 1323.bis. 23827c478bd9Sstevel@tonic-gate */ 23837c478bd9Sstevel@tonic-gate if ((flags & TH_RST) == 0 && 23847c478bd9Sstevel@tonic-gate TSTMP_LT(tcpoptp->tcp_opt_ts_val, 23857c478bd9Sstevel@tonic-gate tcp->tcp_ts_recent)) { 23867c478bd9Sstevel@tonic-gate if (TSTMP_LT(prom_gettime(), 23877c478bd9Sstevel@tonic-gate tcp->tcp_last_rcv_lbolt + PAWS_TIMEOUT)) { 23887c478bd9Sstevel@tonic-gate /* This segment is not acceptable. */ 23897c478bd9Sstevel@tonic-gate return (B_FALSE); 23907c478bd9Sstevel@tonic-gate } else { 23917c478bd9Sstevel@tonic-gate /* 23927c478bd9Sstevel@tonic-gate * Connection has been idle for 23937c478bd9Sstevel@tonic-gate * too long. Reset the timestamp 23947c478bd9Sstevel@tonic-gate * and assume the segment is valid. 23957c478bd9Sstevel@tonic-gate */ 23967c478bd9Sstevel@tonic-gate tcp->tcp_ts_recent = 23977c478bd9Sstevel@tonic-gate tcpoptp->tcp_opt_ts_val; 23987c478bd9Sstevel@tonic-gate } 23997c478bd9Sstevel@tonic-gate } 24007c478bd9Sstevel@tonic-gate } else { 24017c478bd9Sstevel@tonic-gate /* 24027c478bd9Sstevel@tonic-gate * If we don't get a timestamp on every packet, we 24037c478bd9Sstevel@tonic-gate * figure we can't really trust 'em, so we stop sending 24047c478bd9Sstevel@tonic-gate * and parsing them. 24057c478bd9Sstevel@tonic-gate */ 24067c478bd9Sstevel@tonic-gate tcp->tcp_snd_ts_ok = B_FALSE; 24077c478bd9Sstevel@tonic-gate 24087c478bd9Sstevel@tonic-gate tcp->tcp_hdr_len -= TCPOPT_REAL_TS_LEN; 24097c478bd9Sstevel@tonic-gate tcp->tcp_tcp_hdr_len -= TCPOPT_REAL_TS_LEN; 24107c478bd9Sstevel@tonic-gate tcp->tcp_tcph->th_offset_and_rsrvd[0] -= (3 << 4); 24117c478bd9Sstevel@tonic-gate tcp_mss_set(tcp, tcp->tcp_mss + TCPOPT_REAL_TS_LEN); 24127c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok) { 24137c478bd9Sstevel@tonic-gate assert(tcp->tcp_sack_info != NULL); 24147c478bd9Sstevel@tonic-gate tcp->tcp_max_sack_blk = 4; 24157c478bd9Sstevel@tonic-gate } 24167c478bd9Sstevel@tonic-gate } 24177c478bd9Sstevel@tonic-gate return (B_TRUE); 24187c478bd9Sstevel@tonic-gate } 24197c478bd9Sstevel@tonic-gate 24207c478bd9Sstevel@tonic-gate /* 24217c478bd9Sstevel@tonic-gate * tcp_get_seg_mp() is called to get the pointer to a segment in the 24227c478bd9Sstevel@tonic-gate * send queue which starts at the given seq. no. 24237c478bd9Sstevel@tonic-gate * 24247c478bd9Sstevel@tonic-gate * Parameters: 24257c478bd9Sstevel@tonic-gate * tcp_t *tcp: the tcp instance pointer. 24267c478bd9Sstevel@tonic-gate * uint32_t seq: the starting seq. no of the requested segment. 24277c478bd9Sstevel@tonic-gate * int32_t *off: after the execution, *off will be the offset to 24287c478bd9Sstevel@tonic-gate * the returned mblk which points to the requested seq no. 24297c478bd9Sstevel@tonic-gate * 24307c478bd9Sstevel@tonic-gate * Return: 24317c478bd9Sstevel@tonic-gate * A mblk_t pointer pointing to the requested segment in send queue. 24327c478bd9Sstevel@tonic-gate */ 24337c478bd9Sstevel@tonic-gate static mblk_t * 24347c478bd9Sstevel@tonic-gate tcp_get_seg_mp(tcp_t *tcp, uint32_t seq, int32_t *off) 24357c478bd9Sstevel@tonic-gate { 24367c478bd9Sstevel@tonic-gate int32_t cnt; 24377c478bd9Sstevel@tonic-gate mblk_t *mp; 24387c478bd9Sstevel@tonic-gate 24397c478bd9Sstevel@tonic-gate /* Defensive coding. Make sure we don't send incorrect data. */ 24407c478bd9Sstevel@tonic-gate if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GEQ(seq, tcp->tcp_snxt) || 24417c478bd9Sstevel@tonic-gate off == NULL) { 24427c478bd9Sstevel@tonic-gate return (NULL); 24437c478bd9Sstevel@tonic-gate } 24447c478bd9Sstevel@tonic-gate cnt = seq - tcp->tcp_suna; 24457c478bd9Sstevel@tonic-gate mp = tcp->tcp_xmit_head; 24467c478bd9Sstevel@tonic-gate while (cnt > 0 && mp) { 24477c478bd9Sstevel@tonic-gate cnt -= mp->b_wptr - mp->b_rptr; 24487c478bd9Sstevel@tonic-gate if (cnt < 0) { 24497c478bd9Sstevel@tonic-gate cnt += mp->b_wptr - mp->b_rptr; 24507c478bd9Sstevel@tonic-gate break; 24517c478bd9Sstevel@tonic-gate } 24527c478bd9Sstevel@tonic-gate mp = mp->b_cont; 24537c478bd9Sstevel@tonic-gate } 24547c478bd9Sstevel@tonic-gate assert(mp != NULL); 24557c478bd9Sstevel@tonic-gate *off = cnt; 24567c478bd9Sstevel@tonic-gate return (mp); 24577c478bd9Sstevel@tonic-gate } 24587c478bd9Sstevel@tonic-gate 24597c478bd9Sstevel@tonic-gate /* 24607c478bd9Sstevel@tonic-gate * This function handles all retransmissions if SACK is enabled for this 24617c478bd9Sstevel@tonic-gate * connection. First it calculates how many segments can be retransmitted 24627c478bd9Sstevel@tonic-gate * based on tcp_pipe. Then it goes thru the notsack list to find eligible 24637c478bd9Sstevel@tonic-gate * segments. A segment is eligible if sack_cnt for that segment is greater 24647c478bd9Sstevel@tonic-gate * than or equal tcp_dupack_fast_retransmit. After it has retransmitted 24657c478bd9Sstevel@tonic-gate * all eligible segments, it checks to see if TCP can send some new segments 24667c478bd9Sstevel@tonic-gate * (fast recovery). If it can, it returns 1. Otherwise it returns 0. 24677c478bd9Sstevel@tonic-gate * 24687c478bd9Sstevel@tonic-gate * Parameters: 24697c478bd9Sstevel@tonic-gate * tcp_t *tcp: the tcp structure of the connection. 24707c478bd9Sstevel@tonic-gate * 24717c478bd9Sstevel@tonic-gate * Return: 24727c478bd9Sstevel@tonic-gate * 1 if the pipe is not full (new data can be sent), 0 otherwise 24737c478bd9Sstevel@tonic-gate */ 24747c478bd9Sstevel@tonic-gate static int32_t 24757c478bd9Sstevel@tonic-gate tcp_sack_rxmit(tcp_t *tcp, int sock_id) 24767c478bd9Sstevel@tonic-gate { 24777c478bd9Sstevel@tonic-gate notsack_blk_t *notsack_blk; 24787c478bd9Sstevel@tonic-gate int32_t usable_swnd; 24797c478bd9Sstevel@tonic-gate int32_t mss; 24807c478bd9Sstevel@tonic-gate uint32_t seg_len; 24817c478bd9Sstevel@tonic-gate mblk_t *xmit_mp; 24827c478bd9Sstevel@tonic-gate 24837c478bd9Sstevel@tonic-gate assert(tcp->tcp_sack_info != NULL); 24847c478bd9Sstevel@tonic-gate assert(tcp->tcp_notsack_list != NULL); 24857c478bd9Sstevel@tonic-gate assert(tcp->tcp_rexmit == B_FALSE); 24867c478bd9Sstevel@tonic-gate 24877c478bd9Sstevel@tonic-gate /* Defensive coding in case there is a bug... */ 24887c478bd9Sstevel@tonic-gate if (tcp->tcp_notsack_list == NULL) { 24897c478bd9Sstevel@tonic-gate return (0); 24907c478bd9Sstevel@tonic-gate } 24917c478bd9Sstevel@tonic-gate notsack_blk = tcp->tcp_notsack_list; 24927c478bd9Sstevel@tonic-gate mss = tcp->tcp_mss; 24937c478bd9Sstevel@tonic-gate 24947c478bd9Sstevel@tonic-gate /* 24957c478bd9Sstevel@tonic-gate * Limit the num of outstanding data in the network to be 24967c478bd9Sstevel@tonic-gate * tcp_cwnd_ssthresh, which is half of the original congestion wnd. 24977c478bd9Sstevel@tonic-gate */ 24987c478bd9Sstevel@tonic-gate usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe; 24997c478bd9Sstevel@tonic-gate 25007c478bd9Sstevel@tonic-gate /* At least retransmit 1 MSS of data. */ 25017c478bd9Sstevel@tonic-gate if (usable_swnd <= 0) { 25027c478bd9Sstevel@tonic-gate usable_swnd = mss; 25037c478bd9Sstevel@tonic-gate } 25047c478bd9Sstevel@tonic-gate 25057c478bd9Sstevel@tonic-gate /* Make sure no new RTT samples will be taken. */ 25067c478bd9Sstevel@tonic-gate tcp->tcp_csuna = tcp->tcp_snxt; 25077c478bd9Sstevel@tonic-gate 25087c478bd9Sstevel@tonic-gate notsack_blk = tcp->tcp_notsack_list; 25097c478bd9Sstevel@tonic-gate while (usable_swnd > 0) { 25107c478bd9Sstevel@tonic-gate mblk_t *snxt_mp, *tmp_mp; 25117c478bd9Sstevel@tonic-gate tcp_seq begin = tcp->tcp_sack_snxt; 25127c478bd9Sstevel@tonic-gate tcp_seq end; 25137c478bd9Sstevel@tonic-gate int32_t off; 25147c478bd9Sstevel@tonic-gate 25157c478bd9Sstevel@tonic-gate for (; notsack_blk != NULL; notsack_blk = notsack_blk->next) { 25167c478bd9Sstevel@tonic-gate if (SEQ_GT(notsack_blk->end, begin) && 25177c478bd9Sstevel@tonic-gate (notsack_blk->sack_cnt >= 25187c478bd9Sstevel@tonic-gate tcp_dupack_fast_retransmit)) { 25197c478bd9Sstevel@tonic-gate end = notsack_blk->end; 25207c478bd9Sstevel@tonic-gate if (SEQ_LT(begin, notsack_blk->begin)) { 25217c478bd9Sstevel@tonic-gate begin = notsack_blk->begin; 25227c478bd9Sstevel@tonic-gate } 25237c478bd9Sstevel@tonic-gate break; 25247c478bd9Sstevel@tonic-gate } 25257c478bd9Sstevel@tonic-gate } 25267c478bd9Sstevel@tonic-gate /* 25277c478bd9Sstevel@tonic-gate * All holes are filled. Manipulate tcp_cwnd to send more 25287c478bd9Sstevel@tonic-gate * if we can. Note that after the SACK recovery, tcp_cwnd is 25297c478bd9Sstevel@tonic-gate * set to tcp_cwnd_ssthresh. 25307c478bd9Sstevel@tonic-gate */ 25317c478bd9Sstevel@tonic-gate if (notsack_blk == NULL) { 25327c478bd9Sstevel@tonic-gate usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe; 25337c478bd9Sstevel@tonic-gate if (usable_swnd <= 0) { 25347c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna; 25357c478bd9Sstevel@tonic-gate assert(tcp->tcp_cwnd > 0); 25367c478bd9Sstevel@tonic-gate return (0); 25377c478bd9Sstevel@tonic-gate } else { 25387c478bd9Sstevel@tonic-gate usable_swnd = usable_swnd / mss; 25397c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna + 25407c478bd9Sstevel@tonic-gate MAX(usable_swnd * mss, mss); 25417c478bd9Sstevel@tonic-gate return (1); 25427c478bd9Sstevel@tonic-gate } 25437c478bd9Sstevel@tonic-gate } 25447c478bd9Sstevel@tonic-gate 25457c478bd9Sstevel@tonic-gate /* 25467c478bd9Sstevel@tonic-gate * Note that we may send more than usable_swnd allows here 25477c478bd9Sstevel@tonic-gate * because of round off, but no more than 1 MSS of data. 25487c478bd9Sstevel@tonic-gate */ 25497c478bd9Sstevel@tonic-gate seg_len = end - begin; 25507c478bd9Sstevel@tonic-gate if (seg_len > mss) 25517c478bd9Sstevel@tonic-gate seg_len = mss; 25527c478bd9Sstevel@tonic-gate snxt_mp = tcp_get_seg_mp(tcp, begin, &off); 25537c478bd9Sstevel@tonic-gate assert(snxt_mp != NULL); 25547c478bd9Sstevel@tonic-gate /* This should not happen. Defensive coding again... */ 25557c478bd9Sstevel@tonic-gate if (snxt_mp == NULL) { 25567c478bd9Sstevel@tonic-gate return (0); 25577c478bd9Sstevel@tonic-gate } 25587c478bd9Sstevel@tonic-gate 25597c478bd9Sstevel@tonic-gate xmit_mp = tcp_xmit_mp(tcp, snxt_mp, seg_len, &off, 25607c478bd9Sstevel@tonic-gate &tmp_mp, begin, B_TRUE, &seg_len, B_TRUE); 25617c478bd9Sstevel@tonic-gate 25627c478bd9Sstevel@tonic-gate if (xmit_mp == NULL) 25637c478bd9Sstevel@tonic-gate return (0); 25647c478bd9Sstevel@tonic-gate 25657c478bd9Sstevel@tonic-gate usable_swnd -= seg_len; 25667c478bd9Sstevel@tonic-gate tcp->tcp_pipe += seg_len; 25677c478bd9Sstevel@tonic-gate tcp->tcp_sack_snxt = begin + seg_len; 25687c478bd9Sstevel@tonic-gate TCP_DUMP_PACKET("tcp_sack_rxmit", xmit_mp); 25697c478bd9Sstevel@tonic-gate (void) ipv4_tcp_output(sock_id, xmit_mp); 25707c478bd9Sstevel@tonic-gate freeb(xmit_mp); 25717c478bd9Sstevel@tonic-gate 25727c478bd9Sstevel@tonic-gate /* 25737c478bd9Sstevel@tonic-gate * Update the send timestamp to avoid false retransmission. 2574*53391bafSeota * Note. use uintptr_t to suppress the gcc warning. 25757c478bd9Sstevel@tonic-gate */ 2576*53391bafSeota snxt_mp->b_prev = (mblk_t *)(uintptr_t)prom_gettime(); 25777c478bd9Sstevel@tonic-gate 25787c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpRetransSegs); 25797c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpRetransBytes, seg_len); 25807c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutSackRetransSegs); 25817c478bd9Sstevel@tonic-gate /* 25827c478bd9Sstevel@tonic-gate * Update tcp_rexmit_max to extend this SACK recovery phase. 25837c478bd9Sstevel@tonic-gate * This happens when new data sent during fast recovery is 25847c478bd9Sstevel@tonic-gate * also lost. If TCP retransmits those new data, it needs 25857c478bd9Sstevel@tonic-gate * to extend SACK recover phase to avoid starting another 25867c478bd9Sstevel@tonic-gate * fast retransmit/recovery unnecessarily. 25877c478bd9Sstevel@tonic-gate */ 25887c478bd9Sstevel@tonic-gate if (SEQ_GT(tcp->tcp_sack_snxt, tcp->tcp_rexmit_max)) { 25897c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_max = tcp->tcp_sack_snxt; 25907c478bd9Sstevel@tonic-gate } 25917c478bd9Sstevel@tonic-gate } 25927c478bd9Sstevel@tonic-gate return (0); 25937c478bd9Sstevel@tonic-gate } 25947c478bd9Sstevel@tonic-gate 25957c478bd9Sstevel@tonic-gate static void 25967c478bd9Sstevel@tonic-gate tcp_rput_data(tcp_t *tcp, mblk_t *mp, int sock_id) 25977c478bd9Sstevel@tonic-gate { 25987c478bd9Sstevel@tonic-gate uchar_t *rptr; 25997c478bd9Sstevel@tonic-gate struct ip *iph; 26007c478bd9Sstevel@tonic-gate tcp_t *tcp1; 26017c478bd9Sstevel@tonic-gate tcpha_t *tcph; 26027c478bd9Sstevel@tonic-gate uint32_t seg_ack; 26037c478bd9Sstevel@tonic-gate int seg_len; 26047c478bd9Sstevel@tonic-gate uint_t ip_hdr_len; 26057c478bd9Sstevel@tonic-gate uint32_t seg_seq; 26067c478bd9Sstevel@tonic-gate mblk_t *mp1; 26077c478bd9Sstevel@tonic-gate uint_t flags; 26087c478bd9Sstevel@tonic-gate uint32_t new_swnd = 0; 26097c478bd9Sstevel@tonic-gate int mss; 26107c478bd9Sstevel@tonic-gate boolean_t ofo_seg = B_FALSE; /* Out of order segment */ 26117c478bd9Sstevel@tonic-gate int32_t gap; 26127c478bd9Sstevel@tonic-gate int32_t rgap; 26137c478bd9Sstevel@tonic-gate tcp_opt_t tcpopt; 26147c478bd9Sstevel@tonic-gate int32_t bytes_acked; 26157c478bd9Sstevel@tonic-gate int npkt; 26167c478bd9Sstevel@tonic-gate uint32_t cwnd; 26177c478bd9Sstevel@tonic-gate uint32_t add; 26187c478bd9Sstevel@tonic-gate 26197c478bd9Sstevel@tonic-gate #ifdef DEBUG 26207c478bd9Sstevel@tonic-gate printf("tcp_rput_data sock %d mp %x mp_datap %x #################\n", 26217c478bd9Sstevel@tonic-gate sock_id, mp, mp->b_datap); 26227c478bd9Sstevel@tonic-gate #endif 26237c478bd9Sstevel@tonic-gate 26247c478bd9Sstevel@tonic-gate /* Dump the packet when debugging. */ 26257c478bd9Sstevel@tonic-gate TCP_DUMP_PACKET("tcp_rput_data", mp); 26267c478bd9Sstevel@tonic-gate 26277c478bd9Sstevel@tonic-gate assert(OK_32PTR(mp->b_rptr)); 26287c478bd9Sstevel@tonic-gate 26297c478bd9Sstevel@tonic-gate rptr = mp->b_rptr; 26307c478bd9Sstevel@tonic-gate iph = (struct ip *)rptr; 26317c478bd9Sstevel@tonic-gate ip_hdr_len = IPH_HDR_LENGTH(rptr); 26327c478bd9Sstevel@tonic-gate if (ip_hdr_len != IP_SIMPLE_HDR_LENGTH) { 26337c478bd9Sstevel@tonic-gate #ifdef DEBUG 26347c478bd9Sstevel@tonic-gate printf("Not simple IP header\n"); 26357c478bd9Sstevel@tonic-gate #endif 26367c478bd9Sstevel@tonic-gate /* We cannot handle IP option yet... */ 26377c478bd9Sstevel@tonic-gate tcp_drops++; 26387c478bd9Sstevel@tonic-gate freeb(mp); 26397c478bd9Sstevel@tonic-gate return; 26407c478bd9Sstevel@tonic-gate } 26417c478bd9Sstevel@tonic-gate /* The TCP header must be aligned. */ 26427c478bd9Sstevel@tonic-gate tcph = (tcpha_t *)&rptr[ip_hdr_len]; 26437c478bd9Sstevel@tonic-gate seg_seq = ntohl(tcph->tha_seq); 26447c478bd9Sstevel@tonic-gate seg_ack = ntohl(tcph->tha_ack); 26457c478bd9Sstevel@tonic-gate assert((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 26467c478bd9Sstevel@tonic-gate seg_len = (int)(mp->b_wptr - rptr) - 26477c478bd9Sstevel@tonic-gate (ip_hdr_len + TCP_HDR_LENGTH(((tcph_t *)tcph))); 26487c478bd9Sstevel@tonic-gate /* In inetboot, b_cont should always be NULL. */ 26497c478bd9Sstevel@tonic-gate assert(mp->b_cont == NULL); 26507c478bd9Sstevel@tonic-gate 26517c478bd9Sstevel@tonic-gate /* Verify the checksum. */ 26527c478bd9Sstevel@tonic-gate if (tcp_verify_cksum(mp) < 0) { 26537c478bd9Sstevel@tonic-gate #ifdef DEBUG 26547c478bd9Sstevel@tonic-gate printf("tcp_rput_data: wrong cksum\n"); 26557c478bd9Sstevel@tonic-gate #endif 26567c478bd9Sstevel@tonic-gate freemsg(mp); 26577c478bd9Sstevel@tonic-gate return; 26587c478bd9Sstevel@tonic-gate } 26597c478bd9Sstevel@tonic-gate 26607c478bd9Sstevel@tonic-gate /* 26617c478bd9Sstevel@tonic-gate * This segment is not for us, try to find its 26627c478bd9Sstevel@tonic-gate * intended receiver. 26637c478bd9Sstevel@tonic-gate */ 26647c478bd9Sstevel@tonic-gate if (tcp == NULL || 26657c478bd9Sstevel@tonic-gate tcph->tha_lport != tcp->tcp_fport || 26667c478bd9Sstevel@tonic-gate tcph->tha_fport != tcp->tcp_lport || 26677c478bd9Sstevel@tonic-gate iph->ip_src.s_addr != tcp->tcp_remote || 26687c478bd9Sstevel@tonic-gate iph->ip_dst.s_addr != tcp->tcp_bound_source) { 26697c478bd9Sstevel@tonic-gate #ifdef DEBUG 26707c478bd9Sstevel@tonic-gate printf("tcp_rput_data: not for us, state %d\n", 26717c478bd9Sstevel@tonic-gate tcp->tcp_state); 26727c478bd9Sstevel@tonic-gate #endif 26737c478bd9Sstevel@tonic-gate /* 26747c478bd9Sstevel@tonic-gate * First try to find a established connection. If none 26757c478bd9Sstevel@tonic-gate * is found, look for a listener. 26767c478bd9Sstevel@tonic-gate * 26777c478bd9Sstevel@tonic-gate * If a listener is found, we need to check to see if the 26787c478bd9Sstevel@tonic-gate * incoming segment is for one of its eagers. If it is, 26797c478bd9Sstevel@tonic-gate * give it to the eager. If not, listener should take care 26807c478bd9Sstevel@tonic-gate * of it. 26817c478bd9Sstevel@tonic-gate */ 26827c478bd9Sstevel@tonic-gate if ((tcp1 = tcp_lookup_ipv4(iph, tcph, TCPS_SYN_SENT, 26837c478bd9Sstevel@tonic-gate &sock_id)) != NULL || 26847c478bd9Sstevel@tonic-gate (tcp1 = tcp_lookup_listener_ipv4(iph->ip_dst.s_addr, 26857c478bd9Sstevel@tonic-gate tcph->tha_fport, &sock_id)) != NULL) { 26867c478bd9Sstevel@tonic-gate if (tcp1->tcp_state == TCPS_LISTEN) { 26877c478bd9Sstevel@tonic-gate if ((tcp = tcp_lookup_eager_ipv4(tcp1, 26887c478bd9Sstevel@tonic-gate iph, tcph)) == NULL) { 26897c478bd9Sstevel@tonic-gate /* No eager... sent to listener */ 26907c478bd9Sstevel@tonic-gate #ifdef DEBUG 26917c478bd9Sstevel@tonic-gate printf("found the listener: %s\n", 26927c478bd9Sstevel@tonic-gate tcp_display(tcp1, NULL, 26937c478bd9Sstevel@tonic-gate DISP_ADDR_AND_PORT)); 26947c478bd9Sstevel@tonic-gate #endif 26957c478bd9Sstevel@tonic-gate tcp = tcp1; 26967c478bd9Sstevel@tonic-gate } 26977c478bd9Sstevel@tonic-gate #ifdef DEBUG 26987c478bd9Sstevel@tonic-gate else { 26997c478bd9Sstevel@tonic-gate printf("found the eager: %s\n", 27007c478bd9Sstevel@tonic-gate tcp_display(tcp, NULL, 27017c478bd9Sstevel@tonic-gate DISP_ADDR_AND_PORT)); 27027c478bd9Sstevel@tonic-gate } 27037c478bd9Sstevel@tonic-gate #endif 27047c478bd9Sstevel@tonic-gate } else { 27057c478bd9Sstevel@tonic-gate /* Non listener found... */ 27067c478bd9Sstevel@tonic-gate #ifdef DEBUG 27077c478bd9Sstevel@tonic-gate printf("found the connection: %s\n", 27087c478bd9Sstevel@tonic-gate tcp_display(tcp1, NULL, 27097c478bd9Sstevel@tonic-gate DISP_ADDR_AND_PORT)); 27107c478bd9Sstevel@tonic-gate #endif 27117c478bd9Sstevel@tonic-gate tcp = tcp1; 27127c478bd9Sstevel@tonic-gate } 27137c478bd9Sstevel@tonic-gate } else { 27147c478bd9Sstevel@tonic-gate /* 27157c478bd9Sstevel@tonic-gate * No connection for this segment... 27167c478bd9Sstevel@tonic-gate * Send a RST to the other side. 27177c478bd9Sstevel@tonic-gate */ 27187c478bd9Sstevel@tonic-gate tcp_xmit_listeners_reset(sock_id, mp, ip_hdr_len); 27197c478bd9Sstevel@tonic-gate return; 27207c478bd9Sstevel@tonic-gate } 27217c478bd9Sstevel@tonic-gate } 27227c478bd9Sstevel@tonic-gate 27237c478bd9Sstevel@tonic-gate flags = tcph->tha_flags & 0xFF; 27247c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInSegs); 27257c478bd9Sstevel@tonic-gate if (tcp->tcp_state == TCPS_TIME_WAIT) { 27267c478bd9Sstevel@tonic-gate tcp_time_wait_processing(tcp, mp, seg_seq, seg_ack, 27277c478bd9Sstevel@tonic-gate seg_len, (tcph_t *)tcph, sock_id); 27287c478bd9Sstevel@tonic-gate return; 27297c478bd9Sstevel@tonic-gate } 27307c478bd9Sstevel@tonic-gate /* 27317c478bd9Sstevel@tonic-gate * From this point we can assume that the tcp is not compressed, 27327c478bd9Sstevel@tonic-gate * since we would have branched off to tcp_time_wait_processing() 27337c478bd9Sstevel@tonic-gate * in such a case. 27347c478bd9Sstevel@tonic-gate */ 27357c478bd9Sstevel@tonic-gate assert(tcp != NULL && tcp->tcp_state != TCPS_TIME_WAIT); 27367c478bd9Sstevel@tonic-gate 27377c478bd9Sstevel@tonic-gate /* 27387c478bd9Sstevel@tonic-gate * After this point, we know we have the correct TCP, so update 27397c478bd9Sstevel@tonic-gate * the receive time. 27407c478bd9Sstevel@tonic-gate */ 27417c478bd9Sstevel@tonic-gate tcp->tcp_last_recv_time = prom_gettime(); 27427c478bd9Sstevel@tonic-gate 27437c478bd9Sstevel@tonic-gate /* In inetboot, we do not handle urgent pointer... */ 27447c478bd9Sstevel@tonic-gate if (flags & TH_URG) { 27457c478bd9Sstevel@tonic-gate freemsg(mp); 27467c478bd9Sstevel@tonic-gate DEBUG_1("tcp_rput_data(%d): received segment with urgent " 27477c478bd9Sstevel@tonic-gate "pointer\n", sock_id); 27487c478bd9Sstevel@tonic-gate tcp_drops++; 27497c478bd9Sstevel@tonic-gate return; 27507c478bd9Sstevel@tonic-gate } 27517c478bd9Sstevel@tonic-gate 27527c478bd9Sstevel@tonic-gate switch (tcp->tcp_state) { 27537c478bd9Sstevel@tonic-gate case TCPS_LISTEN: 27547c478bd9Sstevel@tonic-gate if ((flags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) { 27557c478bd9Sstevel@tonic-gate if (flags & TH_RST) { 27567c478bd9Sstevel@tonic-gate freemsg(mp); 27577c478bd9Sstevel@tonic-gate return; 27587c478bd9Sstevel@tonic-gate } 27597c478bd9Sstevel@tonic-gate if (flags & TH_ACK) { 27607c478bd9Sstevel@tonic-gate tcp_xmit_early_reset("TCPS_LISTEN-TH_ACK", 27617c478bd9Sstevel@tonic-gate sock_id, mp, seg_ack, 0, TH_RST, 27627c478bd9Sstevel@tonic-gate ip_hdr_len); 27637c478bd9Sstevel@tonic-gate return; 27647c478bd9Sstevel@tonic-gate } 27657c478bd9Sstevel@tonic-gate if (!(flags & TH_SYN)) { 27667c478bd9Sstevel@tonic-gate freemsg(mp); 27677c478bd9Sstevel@tonic-gate return; 27687c478bd9Sstevel@tonic-gate } 27697c478bd9Sstevel@tonic-gate printf("tcp_rput_data: %d\n", __LINE__); 27707c478bd9Sstevel@tonic-gate prom_panic("inetboot"); 27717c478bd9Sstevel@tonic-gate } 27727c478bd9Sstevel@tonic-gate if (tcp->tcp_conn_req_max > 0) { 27737c478bd9Sstevel@tonic-gate tcp = tcp_conn_request(tcp, mp, sock_id, ip_hdr_len); 27747c478bd9Sstevel@tonic-gate if (tcp == NULL) { 27757c478bd9Sstevel@tonic-gate freemsg(mp); 27767c478bd9Sstevel@tonic-gate return; 27777c478bd9Sstevel@tonic-gate } 27787c478bd9Sstevel@tonic-gate #ifdef DEBUG 27797c478bd9Sstevel@tonic-gate printf("tcp_rput_data: new tcp created\n"); 27807c478bd9Sstevel@tonic-gate #endif 27817c478bd9Sstevel@tonic-gate } 27827c478bd9Sstevel@tonic-gate tcp->tcp_irs = seg_seq; 27837c478bd9Sstevel@tonic-gate tcp->tcp_rack = seg_seq; 27847c478bd9Sstevel@tonic-gate tcp->tcp_rnxt = seg_seq + 1; 27857c478bd9Sstevel@tonic-gate U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 27867c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpPassiveOpens); 27877c478bd9Sstevel@tonic-gate goto syn_rcvd; 27887c478bd9Sstevel@tonic-gate case TCPS_SYN_SENT: 27897c478bd9Sstevel@tonic-gate if (flags & TH_ACK) { 27907c478bd9Sstevel@tonic-gate /* 27917c478bd9Sstevel@tonic-gate * Note that our stack cannot send data before a 27927c478bd9Sstevel@tonic-gate * connection is established, therefore the 27937c478bd9Sstevel@tonic-gate * following check is valid. Otherwise, it has 27947c478bd9Sstevel@tonic-gate * to be changed. 27957c478bd9Sstevel@tonic-gate */ 27967c478bd9Sstevel@tonic-gate if (SEQ_LEQ(seg_ack, tcp->tcp_iss) || 27977c478bd9Sstevel@tonic-gate SEQ_GT(seg_ack, tcp->tcp_snxt)) { 27987c478bd9Sstevel@tonic-gate if (flags & TH_RST) { 27997c478bd9Sstevel@tonic-gate freemsg(mp); 28007c478bd9Sstevel@tonic-gate return; 28017c478bd9Sstevel@tonic-gate } 28027c478bd9Sstevel@tonic-gate tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq", 28037c478bd9Sstevel@tonic-gate tcp, mp, seg_ack, 0, TH_RST, 28047c478bd9Sstevel@tonic-gate ip_hdr_len, sock_id); 28057c478bd9Sstevel@tonic-gate return; 28067c478bd9Sstevel@tonic-gate } 28077c478bd9Sstevel@tonic-gate assert(tcp->tcp_suna + 1 == seg_ack); 28087c478bd9Sstevel@tonic-gate } 28097c478bd9Sstevel@tonic-gate if (flags & TH_RST) { 28107c478bd9Sstevel@tonic-gate freemsg(mp); 28117c478bd9Sstevel@tonic-gate if (flags & TH_ACK) { 28127c478bd9Sstevel@tonic-gate tcp_clean_death(sock_id, tcp, ECONNREFUSED); 28137c478bd9Sstevel@tonic-gate } 28147c478bd9Sstevel@tonic-gate return; 28157c478bd9Sstevel@tonic-gate } 28167c478bd9Sstevel@tonic-gate if (!(flags & TH_SYN)) { 28177c478bd9Sstevel@tonic-gate freemsg(mp); 28187c478bd9Sstevel@tonic-gate return; 28197c478bd9Sstevel@tonic-gate } 28207c478bd9Sstevel@tonic-gate 28217c478bd9Sstevel@tonic-gate /* Process all TCP options. */ 28227c478bd9Sstevel@tonic-gate tcp_process_options(tcp, (tcph_t *)tcph); 28237c478bd9Sstevel@tonic-gate /* 28247c478bd9Sstevel@tonic-gate * The following changes our rwnd to be a multiple of the 28257c478bd9Sstevel@tonic-gate * MIN(peer MSS, our MSS) for performance reason. 28267c478bd9Sstevel@tonic-gate */ 28277c478bd9Sstevel@tonic-gate (void) tcp_rwnd_set(tcp, MSS_ROUNDUP(tcp->tcp_rwnd, 28287c478bd9Sstevel@tonic-gate tcp->tcp_mss)); 28297c478bd9Sstevel@tonic-gate 28307c478bd9Sstevel@tonic-gate /* Is the other end ECN capable? */ 28317c478bd9Sstevel@tonic-gate if (tcp->tcp_ecn_ok) { 28327c478bd9Sstevel@tonic-gate if ((flags & (TH_ECE|TH_CWR)) != TH_ECE) { 28337c478bd9Sstevel@tonic-gate tcp->tcp_ecn_ok = B_FALSE; 28347c478bd9Sstevel@tonic-gate } 28357c478bd9Sstevel@tonic-gate } 28367c478bd9Sstevel@tonic-gate /* 28377c478bd9Sstevel@tonic-gate * Clear ECN flags because it may interfere with later 28387c478bd9Sstevel@tonic-gate * processing. 28397c478bd9Sstevel@tonic-gate */ 28407c478bd9Sstevel@tonic-gate flags &= ~(TH_ECE|TH_CWR); 28417c478bd9Sstevel@tonic-gate 28427c478bd9Sstevel@tonic-gate tcp->tcp_irs = seg_seq; 28437c478bd9Sstevel@tonic-gate tcp->tcp_rack = seg_seq; 28447c478bd9Sstevel@tonic-gate tcp->tcp_rnxt = seg_seq + 1; 28457c478bd9Sstevel@tonic-gate U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 28467c478bd9Sstevel@tonic-gate 28477c478bd9Sstevel@tonic-gate if (flags & TH_ACK) { 28487c478bd9Sstevel@tonic-gate /* One for the SYN */ 28497c478bd9Sstevel@tonic-gate tcp->tcp_suna = tcp->tcp_iss + 1; 28507c478bd9Sstevel@tonic-gate tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 28517c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_ESTABLISHED; 28527c478bd9Sstevel@tonic-gate 28537c478bd9Sstevel@tonic-gate /* 28547c478bd9Sstevel@tonic-gate * If SYN was retransmitted, need to reset all 28557c478bd9Sstevel@tonic-gate * retransmission info. This is because this 28567c478bd9Sstevel@tonic-gate * segment will be treated as a dup ACK. 28577c478bd9Sstevel@tonic-gate */ 28587c478bd9Sstevel@tonic-gate if (tcp->tcp_rexmit) { 28597c478bd9Sstevel@tonic-gate tcp->tcp_rexmit = B_FALSE; 28607c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 28617c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_max = tcp->tcp_snxt; 28627c478bd9Sstevel@tonic-gate tcp->tcp_snd_burst = TCP_CWND_NORMAL; 28637c478bd9Sstevel@tonic-gate 28647c478bd9Sstevel@tonic-gate /* 28657c478bd9Sstevel@tonic-gate * Set tcp_cwnd back to 1 MSS, per 28667c478bd9Sstevel@tonic-gate * recommendation from 28677c478bd9Sstevel@tonic-gate * draft-floyd-incr-init-win-01.txt, 28687c478bd9Sstevel@tonic-gate * Increasing TCP's Initial Window. 28697c478bd9Sstevel@tonic-gate */ 28707c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = tcp->tcp_mss; 28717c478bd9Sstevel@tonic-gate } 28727c478bd9Sstevel@tonic-gate 28737c478bd9Sstevel@tonic-gate tcp->tcp_swl1 = seg_seq; 28747c478bd9Sstevel@tonic-gate tcp->tcp_swl2 = seg_ack; 28757c478bd9Sstevel@tonic-gate 28767c478bd9Sstevel@tonic-gate new_swnd = BE16_TO_U16(((tcph_t *)tcph)->th_win); 28777c478bd9Sstevel@tonic-gate tcp->tcp_swnd = new_swnd; 28787c478bd9Sstevel@tonic-gate if (new_swnd > tcp->tcp_max_swnd) 28797c478bd9Sstevel@tonic-gate tcp->tcp_max_swnd = new_swnd; 28807c478bd9Sstevel@tonic-gate 28817c478bd9Sstevel@tonic-gate /* 28827c478bd9Sstevel@tonic-gate * Always send the three-way handshake ack immediately 28837c478bd9Sstevel@tonic-gate * in order to make the connection complete as soon as 28847c478bd9Sstevel@tonic-gate * possible on the accepting host. 28857c478bd9Sstevel@tonic-gate */ 28867c478bd9Sstevel@tonic-gate flags |= TH_ACK_NEEDED; 28877c478bd9Sstevel@tonic-gate /* 28887c478bd9Sstevel@tonic-gate * Check to see if there is data to be sent. If 28897c478bd9Sstevel@tonic-gate * yes, set the transmit flag. Then check to see 28907c478bd9Sstevel@tonic-gate * if received data processing needs to be done. 28917c478bd9Sstevel@tonic-gate * If not, go straight to xmit_check. This short 28927c478bd9Sstevel@tonic-gate * cut is OK as we don't support T/TCP. 28937c478bd9Sstevel@tonic-gate */ 28947c478bd9Sstevel@tonic-gate if (tcp->tcp_unsent) 28957c478bd9Sstevel@tonic-gate flags |= TH_XMIT_NEEDED; 28967c478bd9Sstevel@tonic-gate 28977c478bd9Sstevel@tonic-gate if (seg_len == 0) { 28987c478bd9Sstevel@tonic-gate freemsg(mp); 28997c478bd9Sstevel@tonic-gate goto xmit_check; 29007c478bd9Sstevel@tonic-gate } 29017c478bd9Sstevel@tonic-gate 29027c478bd9Sstevel@tonic-gate flags &= ~TH_SYN; 29037c478bd9Sstevel@tonic-gate seg_seq++; 29047c478bd9Sstevel@tonic-gate break; 29057c478bd9Sstevel@tonic-gate } 29067c478bd9Sstevel@tonic-gate syn_rcvd: 29077c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_SYN_RCVD; 29087c478bd9Sstevel@tonic-gate mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, tcp->tcp_mss, 29097c478bd9Sstevel@tonic-gate NULL, NULL, tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 29107c478bd9Sstevel@tonic-gate if (mp1 != NULL) { 29117c478bd9Sstevel@tonic-gate TCP_DUMP_PACKET("tcp_rput_data replying SYN", mp1); 29127c478bd9Sstevel@tonic-gate (void) ipv4_tcp_output(sock_id, mp1); 29137c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 29147c478bd9Sstevel@tonic-gate freeb(mp1); 29157c478bd9Sstevel@tonic-gate /* 29167c478bd9Sstevel@tonic-gate * Let's wait till our SYN has been ACKED since we 29177c478bd9Sstevel@tonic-gate * don't have a timer. 29187c478bd9Sstevel@tonic-gate */ 29197c478bd9Sstevel@tonic-gate if (tcp_state_wait(sock_id, tcp, TCPS_ALL_ACKED) < 0) { 29207c478bd9Sstevel@tonic-gate freemsg(mp); 29217c478bd9Sstevel@tonic-gate return; 29227c478bd9Sstevel@tonic-gate } 29237c478bd9Sstevel@tonic-gate } 29247c478bd9Sstevel@tonic-gate freemsg(mp); 29257c478bd9Sstevel@tonic-gate return; 29267c478bd9Sstevel@tonic-gate default: 29277c478bd9Sstevel@tonic-gate break; 29287c478bd9Sstevel@tonic-gate } 29297c478bd9Sstevel@tonic-gate mp->b_rptr = (uchar_t *)tcph + TCP_HDR_LENGTH((tcph_t *)tcph); 29307c478bd9Sstevel@tonic-gate new_swnd = ntohs(tcph->tha_win) << 29317c478bd9Sstevel@tonic-gate ((flags & TH_SYN) ? 0 : tcp->tcp_snd_ws); 29327c478bd9Sstevel@tonic-gate mss = tcp->tcp_mss; 29337c478bd9Sstevel@tonic-gate 29347c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ts_ok) { 29357c478bd9Sstevel@tonic-gate if (!tcp_paws_check(tcp, (tcph_t *)tcph, &tcpopt)) { 29367c478bd9Sstevel@tonic-gate /* 29377c478bd9Sstevel@tonic-gate * This segment is not acceptable. 29387c478bd9Sstevel@tonic-gate * Drop it and send back an ACK. 29397c478bd9Sstevel@tonic-gate */ 29407c478bd9Sstevel@tonic-gate freemsg(mp); 29417c478bd9Sstevel@tonic-gate flags |= TH_ACK_NEEDED; 29427c478bd9Sstevel@tonic-gate goto ack_check; 29437c478bd9Sstevel@tonic-gate } 29447c478bd9Sstevel@tonic-gate } else if (tcp->tcp_snd_sack_ok) { 29457c478bd9Sstevel@tonic-gate assert(tcp->tcp_sack_info != NULL); 29467c478bd9Sstevel@tonic-gate tcpopt.tcp = tcp; 29477c478bd9Sstevel@tonic-gate /* 29487c478bd9Sstevel@tonic-gate * SACK info in already updated in tcp_parse_options. Ignore 29497c478bd9Sstevel@tonic-gate * all other TCP options... 29507c478bd9Sstevel@tonic-gate */ 29517c478bd9Sstevel@tonic-gate (void) tcp_parse_options((tcph_t *)tcph, &tcpopt); 29527c478bd9Sstevel@tonic-gate } 29537c478bd9Sstevel@tonic-gate try_again:; 29547c478bd9Sstevel@tonic-gate gap = seg_seq - tcp->tcp_rnxt; 29557c478bd9Sstevel@tonic-gate rgap = tcp->tcp_rwnd - (gap + seg_len); 29567c478bd9Sstevel@tonic-gate /* 29577c478bd9Sstevel@tonic-gate * gap is the amount of sequence space between what we expect to see 29587c478bd9Sstevel@tonic-gate * and what we got for seg_seq. A positive value for gap means 29597c478bd9Sstevel@tonic-gate * something got lost. A negative value means we got some old stuff. 29607c478bd9Sstevel@tonic-gate */ 29617c478bd9Sstevel@tonic-gate if (gap < 0) { 29627c478bd9Sstevel@tonic-gate /* Old stuff present. Is the SYN in there? */ 29637c478bd9Sstevel@tonic-gate if (seg_seq == tcp->tcp_irs && (flags & TH_SYN) && 29647c478bd9Sstevel@tonic-gate (seg_len != 0)) { 29657c478bd9Sstevel@tonic-gate flags &= ~TH_SYN; 29667c478bd9Sstevel@tonic-gate seg_seq++; 29677c478bd9Sstevel@tonic-gate /* Recompute the gaps after noting the SYN. */ 29687c478bd9Sstevel@tonic-gate goto try_again; 29697c478bd9Sstevel@tonic-gate } 29707c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInDataDupSegs); 29717c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpInDataDupBytes, 29727c478bd9Sstevel@tonic-gate (seg_len > -gap ? -gap : seg_len)); 29737c478bd9Sstevel@tonic-gate /* Remove the old stuff from seg_len. */ 29747c478bd9Sstevel@tonic-gate seg_len += gap; 29757c478bd9Sstevel@tonic-gate /* 29767c478bd9Sstevel@tonic-gate * Anything left? 29777c478bd9Sstevel@tonic-gate * Make sure to check for unack'd FIN when rest of data 29787c478bd9Sstevel@tonic-gate * has been previously ack'd. 29797c478bd9Sstevel@tonic-gate */ 29807c478bd9Sstevel@tonic-gate if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 29817c478bd9Sstevel@tonic-gate /* 29827c478bd9Sstevel@tonic-gate * Resets are only valid if they lie within our offered 29837c478bd9Sstevel@tonic-gate * window. If the RST bit is set, we just ignore this 29847c478bd9Sstevel@tonic-gate * segment. 29857c478bd9Sstevel@tonic-gate */ 29867c478bd9Sstevel@tonic-gate if (flags & TH_RST) { 29877c478bd9Sstevel@tonic-gate freemsg(mp); 29887c478bd9Sstevel@tonic-gate return; 29897c478bd9Sstevel@tonic-gate } 29907c478bd9Sstevel@tonic-gate 29917c478bd9Sstevel@tonic-gate /* 29927c478bd9Sstevel@tonic-gate * This segment is "unacceptable". None of its 29937c478bd9Sstevel@tonic-gate * sequence space lies within our advertized window. 29947c478bd9Sstevel@tonic-gate * 29957c478bd9Sstevel@tonic-gate * Adjust seg_len to the original value for tracing. 29967c478bd9Sstevel@tonic-gate */ 29977c478bd9Sstevel@tonic-gate seg_len -= gap; 29987c478bd9Sstevel@tonic-gate #ifdef DEBUG 29997c478bd9Sstevel@tonic-gate printf("tcp_rput: unacceptable, gap %d, rgap " 30007c478bd9Sstevel@tonic-gate "%d, flags 0x%x, seg_seq %u, seg_ack %u, " 30017c478bd9Sstevel@tonic-gate "seg_len %d, rnxt %u, snxt %u, %s", 30027c478bd9Sstevel@tonic-gate gap, rgap, flags, seg_seq, seg_ack, 30037c478bd9Sstevel@tonic-gate seg_len, tcp->tcp_rnxt, tcp->tcp_snxt, 30047c478bd9Sstevel@tonic-gate tcp_display(tcp, NULL, DISP_ADDR_AND_PORT)); 30057c478bd9Sstevel@tonic-gate #endif 30067c478bd9Sstevel@tonic-gate 30077c478bd9Sstevel@tonic-gate /* 30087c478bd9Sstevel@tonic-gate * Arrange to send an ACK in response to the 30097c478bd9Sstevel@tonic-gate * unacceptable segment per RFC 793 page 69. There 30107c478bd9Sstevel@tonic-gate * is only one small difference between ours and the 30117c478bd9Sstevel@tonic-gate * acceptability test in the RFC - we accept ACK-only 30127c478bd9Sstevel@tonic-gate * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK 30137c478bd9Sstevel@tonic-gate * will be generated. 30147c478bd9Sstevel@tonic-gate * 30157c478bd9Sstevel@tonic-gate * Note that we have to ACK an ACK-only packet at least 30167c478bd9Sstevel@tonic-gate * for stacks that send 0-length keep-alives with 30177c478bd9Sstevel@tonic-gate * SEG.SEQ = SND.NXT-1 as recommended by RFC1122, 30187c478bd9Sstevel@tonic-gate * section 4.2.3.6. As long as we don't ever generate 30197c478bd9Sstevel@tonic-gate * an unacceptable packet in response to an incoming 30207c478bd9Sstevel@tonic-gate * packet that is unacceptable, it should not cause 30217c478bd9Sstevel@tonic-gate * "ACK wars". 30227c478bd9Sstevel@tonic-gate */ 30237c478bd9Sstevel@tonic-gate flags |= TH_ACK_NEEDED; 30247c478bd9Sstevel@tonic-gate 30257c478bd9Sstevel@tonic-gate /* 30267c478bd9Sstevel@tonic-gate * Continue processing this segment in order to use the 30277c478bd9Sstevel@tonic-gate * ACK information it contains, but skip all other 30287c478bd9Sstevel@tonic-gate * sequence-number processing. Processing the ACK 30297c478bd9Sstevel@tonic-gate * information is necessary in order to 30307c478bd9Sstevel@tonic-gate * re-synchronize connections that may have lost 30317c478bd9Sstevel@tonic-gate * synchronization. 30327c478bd9Sstevel@tonic-gate * 30337c478bd9Sstevel@tonic-gate * We clear seg_len and flag fields related to 30347c478bd9Sstevel@tonic-gate * sequence number processing as they are not 30357c478bd9Sstevel@tonic-gate * to be trusted for an unacceptable segment. 30367c478bd9Sstevel@tonic-gate */ 30377c478bd9Sstevel@tonic-gate seg_len = 0; 30387c478bd9Sstevel@tonic-gate flags &= ~(TH_SYN | TH_FIN | TH_URG); 30397c478bd9Sstevel@tonic-gate goto process_ack; 30407c478bd9Sstevel@tonic-gate } 30417c478bd9Sstevel@tonic-gate 30427c478bd9Sstevel@tonic-gate /* Fix seg_seq, and chew the gap off the front. */ 30437c478bd9Sstevel@tonic-gate seg_seq = tcp->tcp_rnxt; 30447c478bd9Sstevel@tonic-gate do { 30457c478bd9Sstevel@tonic-gate mblk_t *mp2; 30467c478bd9Sstevel@tonic-gate assert((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 30477c478bd9Sstevel@tonic-gate (uintptr_t)UINT_MAX); 30487c478bd9Sstevel@tonic-gate gap += (uint_t)(mp->b_wptr - mp->b_rptr); 30497c478bd9Sstevel@tonic-gate if (gap > 0) { 30507c478bd9Sstevel@tonic-gate mp->b_rptr = mp->b_wptr - gap; 30517c478bd9Sstevel@tonic-gate break; 30527c478bd9Sstevel@tonic-gate } 30537c478bd9Sstevel@tonic-gate mp2 = mp; 30547c478bd9Sstevel@tonic-gate mp = mp->b_cont; 30557c478bd9Sstevel@tonic-gate freeb(mp2); 30567c478bd9Sstevel@tonic-gate } while (gap < 0); 30577c478bd9Sstevel@tonic-gate } 30587c478bd9Sstevel@tonic-gate /* 30597c478bd9Sstevel@tonic-gate * rgap is the amount of stuff received out of window. A negative 30607c478bd9Sstevel@tonic-gate * value is the amount out of window. 30617c478bd9Sstevel@tonic-gate */ 30627c478bd9Sstevel@tonic-gate if (rgap < 0) { 30637c478bd9Sstevel@tonic-gate mblk_t *mp2; 30647c478bd9Sstevel@tonic-gate 30657c478bd9Sstevel@tonic-gate if (tcp->tcp_rwnd == 0) 30667c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInWinProbe); 30677c478bd9Sstevel@tonic-gate else { 30687c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInDataPastWinSegs); 30697c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpInDataPastWinBytes, -rgap); 30707c478bd9Sstevel@tonic-gate } 30717c478bd9Sstevel@tonic-gate 30727c478bd9Sstevel@tonic-gate /* 30737c478bd9Sstevel@tonic-gate * seg_len does not include the FIN, so if more than 30747c478bd9Sstevel@tonic-gate * just the FIN is out of window, we act like we don't 30757c478bd9Sstevel@tonic-gate * see it. (If just the FIN is out of window, rgap 30767c478bd9Sstevel@tonic-gate * will be zero and we will go ahead and acknowledge 30777c478bd9Sstevel@tonic-gate * the FIN.) 30787c478bd9Sstevel@tonic-gate */ 30797c478bd9Sstevel@tonic-gate flags &= ~TH_FIN; 30807c478bd9Sstevel@tonic-gate 30817c478bd9Sstevel@tonic-gate /* Fix seg_len and make sure there is something left. */ 30827c478bd9Sstevel@tonic-gate seg_len += rgap; 30837c478bd9Sstevel@tonic-gate if (seg_len <= 0) { 30847c478bd9Sstevel@tonic-gate /* 30857c478bd9Sstevel@tonic-gate * Resets are only valid if they lie within our offered 30867c478bd9Sstevel@tonic-gate * window. If the RST bit is set, we just ignore this 30877c478bd9Sstevel@tonic-gate * segment. 30887c478bd9Sstevel@tonic-gate */ 30897c478bd9Sstevel@tonic-gate if (flags & TH_RST) { 30907c478bd9Sstevel@tonic-gate freemsg(mp); 30917c478bd9Sstevel@tonic-gate return; 30927c478bd9Sstevel@tonic-gate } 30937c478bd9Sstevel@tonic-gate 30947c478bd9Sstevel@tonic-gate /* Per RFC 793, we need to send back an ACK. */ 30957c478bd9Sstevel@tonic-gate flags |= TH_ACK_NEEDED; 30967c478bd9Sstevel@tonic-gate 30977c478bd9Sstevel@tonic-gate /* 30987c478bd9Sstevel@tonic-gate * If this is a zero window probe, continue to 30997c478bd9Sstevel@tonic-gate * process the ACK part. But we need to set seg_len 31007c478bd9Sstevel@tonic-gate * to 0 to avoid data processing. Otherwise just 31017c478bd9Sstevel@tonic-gate * drop the segment and send back an ACK. 31027c478bd9Sstevel@tonic-gate */ 31037c478bd9Sstevel@tonic-gate if (tcp->tcp_rwnd == 0 && seg_seq == tcp->tcp_rnxt) { 31047c478bd9Sstevel@tonic-gate flags &= ~(TH_SYN | TH_URG); 31057c478bd9Sstevel@tonic-gate seg_len = 0; 31067c478bd9Sstevel@tonic-gate /* Let's see if we can update our rwnd */ 31077c478bd9Sstevel@tonic-gate tcp_rcv_drain(sock_id, tcp); 31087c478bd9Sstevel@tonic-gate goto process_ack; 31097c478bd9Sstevel@tonic-gate } else { 31107c478bd9Sstevel@tonic-gate freemsg(mp); 31117c478bd9Sstevel@tonic-gate goto ack_check; 31127c478bd9Sstevel@tonic-gate } 31137c478bd9Sstevel@tonic-gate } 31147c478bd9Sstevel@tonic-gate /* Pitch out of window stuff off the end. */ 31157c478bd9Sstevel@tonic-gate rgap = seg_len; 31167c478bd9Sstevel@tonic-gate mp2 = mp; 31177c478bd9Sstevel@tonic-gate do { 31187c478bd9Sstevel@tonic-gate assert((uintptr_t)(mp2->b_wptr - 31197c478bd9Sstevel@tonic-gate mp2->b_rptr) <= (uintptr_t)INT_MAX); 31207c478bd9Sstevel@tonic-gate rgap -= (int)(mp2->b_wptr - mp2->b_rptr); 31217c478bd9Sstevel@tonic-gate if (rgap < 0) { 31227c478bd9Sstevel@tonic-gate mp2->b_wptr += rgap; 31237c478bd9Sstevel@tonic-gate if ((mp1 = mp2->b_cont) != NULL) { 31247c478bd9Sstevel@tonic-gate mp2->b_cont = NULL; 31257c478bd9Sstevel@tonic-gate freemsg(mp1); 31267c478bd9Sstevel@tonic-gate } 31277c478bd9Sstevel@tonic-gate break; 31287c478bd9Sstevel@tonic-gate } 31297c478bd9Sstevel@tonic-gate } while ((mp2 = mp2->b_cont) != NULL); 31307c478bd9Sstevel@tonic-gate } 31317c478bd9Sstevel@tonic-gate ok:; 31327c478bd9Sstevel@tonic-gate /* 31337c478bd9Sstevel@tonic-gate * TCP should check ECN info for segments inside the window only. 31347c478bd9Sstevel@tonic-gate * Therefore the check should be done here. 31357c478bd9Sstevel@tonic-gate */ 31367c478bd9Sstevel@tonic-gate if (tcp->tcp_ecn_ok) { 31377c478bd9Sstevel@tonic-gate uchar_t tos = ((struct ip *)rptr)->ip_tos; 31387c478bd9Sstevel@tonic-gate 31397c478bd9Sstevel@tonic-gate if (flags & TH_CWR) { 31407c478bd9Sstevel@tonic-gate tcp->tcp_ecn_echo_on = B_FALSE; 31417c478bd9Sstevel@tonic-gate } 31427c478bd9Sstevel@tonic-gate /* 31437c478bd9Sstevel@tonic-gate * Note that both ECN_CE and CWR can be set in the 31447c478bd9Sstevel@tonic-gate * same segment. In this case, we once again turn 31457c478bd9Sstevel@tonic-gate * on ECN_ECHO. 31467c478bd9Sstevel@tonic-gate */ 31477c478bd9Sstevel@tonic-gate if ((tos & IPH_ECN_CE) == IPH_ECN_CE) { 31487c478bd9Sstevel@tonic-gate tcp->tcp_ecn_echo_on = B_TRUE; 31497c478bd9Sstevel@tonic-gate } 31507c478bd9Sstevel@tonic-gate } 31517c478bd9Sstevel@tonic-gate 31527c478bd9Sstevel@tonic-gate /* 31537c478bd9Sstevel@tonic-gate * Check whether we can update tcp_ts_recent. This test is 31547c478bd9Sstevel@tonic-gate * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 31557c478bd9Sstevel@tonic-gate * Extensions for High Performance: An Update", Internet Draft. 31567c478bd9Sstevel@tonic-gate */ 31577c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ts_ok && 31587c478bd9Sstevel@tonic-gate TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 31597c478bd9Sstevel@tonic-gate SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 31607c478bd9Sstevel@tonic-gate tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 31617c478bd9Sstevel@tonic-gate tcp->tcp_last_rcv_lbolt = prom_gettime(); 31627c478bd9Sstevel@tonic-gate } 31637c478bd9Sstevel@tonic-gate 31647c478bd9Sstevel@tonic-gate if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) { 31657c478bd9Sstevel@tonic-gate /* 31667c478bd9Sstevel@tonic-gate * FIN in an out of order segment. We record this in 31677c478bd9Sstevel@tonic-gate * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq. 31687c478bd9Sstevel@tonic-gate * Clear the FIN so that any check on FIN flag will fail. 31697c478bd9Sstevel@tonic-gate * Remember that FIN also counts in the sequence number 31707c478bd9Sstevel@tonic-gate * space. So we need to ack out of order FIN only segments. 31717c478bd9Sstevel@tonic-gate */ 31727c478bd9Sstevel@tonic-gate if (flags & TH_FIN) { 31737c478bd9Sstevel@tonic-gate tcp->tcp_valid_bits |= TCP_OFO_FIN_VALID; 31747c478bd9Sstevel@tonic-gate tcp->tcp_ofo_fin_seq = seg_seq + seg_len; 31757c478bd9Sstevel@tonic-gate flags &= ~TH_FIN; 31767c478bd9Sstevel@tonic-gate flags |= TH_ACK_NEEDED; 31777c478bd9Sstevel@tonic-gate } 31787c478bd9Sstevel@tonic-gate if (seg_len > 0) { 31797c478bd9Sstevel@tonic-gate /* Fill in the SACK blk list. */ 31807c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok) { 31817c478bd9Sstevel@tonic-gate assert(tcp->tcp_sack_info != NULL); 31827c478bd9Sstevel@tonic-gate tcp_sack_insert(tcp->tcp_sack_list, 31837c478bd9Sstevel@tonic-gate seg_seq, seg_seq + seg_len, 31847c478bd9Sstevel@tonic-gate &(tcp->tcp_num_sack_blk)); 31857c478bd9Sstevel@tonic-gate } 31867c478bd9Sstevel@tonic-gate 31877c478bd9Sstevel@tonic-gate /* 31887c478bd9Sstevel@tonic-gate * Attempt reassembly and see if we have something 31897c478bd9Sstevel@tonic-gate * ready to go. 31907c478bd9Sstevel@tonic-gate */ 31917c478bd9Sstevel@tonic-gate mp = tcp_reass(tcp, mp, seg_seq); 31927c478bd9Sstevel@tonic-gate /* Always ack out of order packets */ 31937c478bd9Sstevel@tonic-gate flags |= TH_ACK_NEEDED | TH_PUSH; 31947c478bd9Sstevel@tonic-gate if (mp != NULL) { 31957c478bd9Sstevel@tonic-gate assert((uintptr_t)(mp->b_wptr - 31967c478bd9Sstevel@tonic-gate mp->b_rptr) <= (uintptr_t)INT_MAX); 31977c478bd9Sstevel@tonic-gate seg_len = mp->b_cont ? msgdsize(mp) : 31987c478bd9Sstevel@tonic-gate (int)(mp->b_wptr - mp->b_rptr); 31997c478bd9Sstevel@tonic-gate seg_seq = tcp->tcp_rnxt; 32007c478bd9Sstevel@tonic-gate /* 32017c478bd9Sstevel@tonic-gate * A gap is filled and the seq num and len 32027c478bd9Sstevel@tonic-gate * of the gap match that of a previously 32037c478bd9Sstevel@tonic-gate * received FIN, put the FIN flag back in. 32047c478bd9Sstevel@tonic-gate */ 32057c478bd9Sstevel@tonic-gate if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 32067c478bd9Sstevel@tonic-gate seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 32077c478bd9Sstevel@tonic-gate flags |= TH_FIN; 32087c478bd9Sstevel@tonic-gate tcp->tcp_valid_bits &= 32097c478bd9Sstevel@tonic-gate ~TCP_OFO_FIN_VALID; 32107c478bd9Sstevel@tonic-gate } 32117c478bd9Sstevel@tonic-gate } else { 32127c478bd9Sstevel@tonic-gate /* 32137c478bd9Sstevel@tonic-gate * Keep going even with NULL mp. 32147c478bd9Sstevel@tonic-gate * There may be a useful ACK or something else 32157c478bd9Sstevel@tonic-gate * we don't want to miss. 32167c478bd9Sstevel@tonic-gate * 32177c478bd9Sstevel@tonic-gate * But TCP should not perform fast retransmit 32187c478bd9Sstevel@tonic-gate * because of the ack number. TCP uses 32197c478bd9Sstevel@tonic-gate * seg_len == 0 to determine if it is a pure 32207c478bd9Sstevel@tonic-gate * ACK. And this is not a pure ACK. 32217c478bd9Sstevel@tonic-gate */ 32227c478bd9Sstevel@tonic-gate seg_len = 0; 32237c478bd9Sstevel@tonic-gate ofo_seg = B_TRUE; 32247c478bd9Sstevel@tonic-gate } 32257c478bd9Sstevel@tonic-gate } 32267c478bd9Sstevel@tonic-gate } else if (seg_len > 0) { 32277c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInDataInorderSegs); 32287c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpInDataInorderBytes, seg_len); 32297c478bd9Sstevel@tonic-gate /* 32307c478bd9Sstevel@tonic-gate * If an out of order FIN was received before, and the seq 32317c478bd9Sstevel@tonic-gate * num and len of the new segment match that of the FIN, 32327c478bd9Sstevel@tonic-gate * put the FIN flag back in. 32337c478bd9Sstevel@tonic-gate */ 32347c478bd9Sstevel@tonic-gate if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 32357c478bd9Sstevel@tonic-gate seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 32367c478bd9Sstevel@tonic-gate flags |= TH_FIN; 32377c478bd9Sstevel@tonic-gate tcp->tcp_valid_bits &= ~TCP_OFO_FIN_VALID; 32387c478bd9Sstevel@tonic-gate } 32397c478bd9Sstevel@tonic-gate } 32407c478bd9Sstevel@tonic-gate if ((flags & (TH_RST | TH_SYN | TH_URG | TH_ACK)) != TH_ACK) { 32417c478bd9Sstevel@tonic-gate if (flags & TH_RST) { 32427c478bd9Sstevel@tonic-gate freemsg(mp); 32437c478bd9Sstevel@tonic-gate switch (tcp->tcp_state) { 32447c478bd9Sstevel@tonic-gate case TCPS_SYN_RCVD: 32457c478bd9Sstevel@tonic-gate (void) tcp_clean_death(sock_id, tcp, ECONNREFUSED); 32467c478bd9Sstevel@tonic-gate break; 32477c478bd9Sstevel@tonic-gate case TCPS_ESTABLISHED: 32487c478bd9Sstevel@tonic-gate case TCPS_FIN_WAIT_1: 32497c478bd9Sstevel@tonic-gate case TCPS_FIN_WAIT_2: 32507c478bd9Sstevel@tonic-gate case TCPS_CLOSE_WAIT: 32517c478bd9Sstevel@tonic-gate (void) tcp_clean_death(sock_id, tcp, ECONNRESET); 32527c478bd9Sstevel@tonic-gate break; 32537c478bd9Sstevel@tonic-gate case TCPS_CLOSING: 32547c478bd9Sstevel@tonic-gate case TCPS_LAST_ACK: 32557c478bd9Sstevel@tonic-gate (void) tcp_clean_death(sock_id, tcp, 0); 32567c478bd9Sstevel@tonic-gate break; 32577c478bd9Sstevel@tonic-gate default: 32587c478bd9Sstevel@tonic-gate assert(tcp->tcp_state != TCPS_TIME_WAIT); 32597c478bd9Sstevel@tonic-gate (void) tcp_clean_death(sock_id, tcp, ENXIO); 32607c478bd9Sstevel@tonic-gate break; 32617c478bd9Sstevel@tonic-gate } 32627c478bd9Sstevel@tonic-gate return; 32637c478bd9Sstevel@tonic-gate } 32647c478bd9Sstevel@tonic-gate if (flags & TH_SYN) { 32657c478bd9Sstevel@tonic-gate /* 32667c478bd9Sstevel@tonic-gate * See RFC 793, Page 71 32677c478bd9Sstevel@tonic-gate * 32687c478bd9Sstevel@tonic-gate * The seq number must be in the window as it should 32697c478bd9Sstevel@tonic-gate * be "fixed" above. If it is outside window, it should 32707c478bd9Sstevel@tonic-gate * be already rejected. Note that we allow seg_seq to be 32717c478bd9Sstevel@tonic-gate * rnxt + rwnd because we want to accept 0 window probe. 32727c478bd9Sstevel@tonic-gate */ 32737c478bd9Sstevel@tonic-gate assert(SEQ_GEQ(seg_seq, tcp->tcp_rnxt) && 32747c478bd9Sstevel@tonic-gate SEQ_LEQ(seg_seq, tcp->tcp_rnxt + tcp->tcp_rwnd)); 32757c478bd9Sstevel@tonic-gate freemsg(mp); 32767c478bd9Sstevel@tonic-gate /* 32777c478bd9Sstevel@tonic-gate * If the ACK flag is not set, just use our snxt as the 32787c478bd9Sstevel@tonic-gate * seq number of the RST segment. 32797c478bd9Sstevel@tonic-gate */ 32807c478bd9Sstevel@tonic-gate if (!(flags & TH_ACK)) { 32817c478bd9Sstevel@tonic-gate seg_ack = tcp->tcp_snxt; 32827c478bd9Sstevel@tonic-gate } 32837c478bd9Sstevel@tonic-gate tcp_xmit_ctl("TH_SYN", tcp, NULL, seg_ack, 32847c478bd9Sstevel@tonic-gate seg_seq + 1, TH_RST|TH_ACK, 0, sock_id); 32857c478bd9Sstevel@tonic-gate assert(tcp->tcp_state != TCPS_TIME_WAIT); 32867c478bd9Sstevel@tonic-gate (void) tcp_clean_death(sock_id, tcp, ECONNRESET); 32877c478bd9Sstevel@tonic-gate return; 32887c478bd9Sstevel@tonic-gate } 32897c478bd9Sstevel@tonic-gate 32907c478bd9Sstevel@tonic-gate process_ack: 32917c478bd9Sstevel@tonic-gate if (!(flags & TH_ACK)) { 32927c478bd9Sstevel@tonic-gate #ifdef DEBUG 32937c478bd9Sstevel@tonic-gate printf("No ack in segment, dropped it, seq:%x\n", seg_seq); 32947c478bd9Sstevel@tonic-gate #endif 32957c478bd9Sstevel@tonic-gate freemsg(mp); 32967c478bd9Sstevel@tonic-gate goto xmit_check; 32977c478bd9Sstevel@tonic-gate } 32987c478bd9Sstevel@tonic-gate } 32997c478bd9Sstevel@tonic-gate bytes_acked = (int)(seg_ack - tcp->tcp_suna); 33007c478bd9Sstevel@tonic-gate 33017c478bd9Sstevel@tonic-gate if (tcp->tcp_state == TCPS_SYN_RCVD) { 33027c478bd9Sstevel@tonic-gate tcp_t *listener = tcp->tcp_listener; 33037c478bd9Sstevel@tonic-gate #ifdef DEBUG 33047c478bd9Sstevel@tonic-gate printf("Done with eager 3-way handshake\n"); 33057c478bd9Sstevel@tonic-gate #endif 33067c478bd9Sstevel@tonic-gate /* 33077c478bd9Sstevel@tonic-gate * NOTE: RFC 793 pg. 72 says this should be 'bytes_acked < 0' 33087c478bd9Sstevel@tonic-gate * but that would mean we have an ack that ignored our SYN. 33097c478bd9Sstevel@tonic-gate */ 33107c478bd9Sstevel@tonic-gate if (bytes_acked < 1 || SEQ_GT(seg_ack, tcp->tcp_snxt)) { 33117c478bd9Sstevel@tonic-gate freemsg(mp); 33127c478bd9Sstevel@tonic-gate tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack", 33137c478bd9Sstevel@tonic-gate tcp, NULL, seg_ack, 0, TH_RST, 0, sock_id); 33147c478bd9Sstevel@tonic-gate return; 33157c478bd9Sstevel@tonic-gate } 33167c478bd9Sstevel@tonic-gate 33177c478bd9Sstevel@tonic-gate /* 33187c478bd9Sstevel@tonic-gate * if the conn_req_q is full defer processing 33197c478bd9Sstevel@tonic-gate * until space is availabe after accept() 33207c478bd9Sstevel@tonic-gate * processing 33217c478bd9Sstevel@tonic-gate */ 33227c478bd9Sstevel@tonic-gate if (listener->tcp_conn_req_cnt_q < 33237c478bd9Sstevel@tonic-gate listener->tcp_conn_req_max) { 33247c478bd9Sstevel@tonic-gate tcp_t *tail; 33257c478bd9Sstevel@tonic-gate 33267c478bd9Sstevel@tonic-gate listener->tcp_conn_req_cnt_q0--; 33277c478bd9Sstevel@tonic-gate listener->tcp_conn_req_cnt_q++; 33287c478bd9Sstevel@tonic-gate 33297c478bd9Sstevel@tonic-gate /* Move from SYN_RCVD to ESTABLISHED list */ 33307c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 33317c478bd9Sstevel@tonic-gate tcp->tcp_eager_prev_q0; 33327c478bd9Sstevel@tonic-gate tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 33337c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q0; 33347c478bd9Sstevel@tonic-gate tcp->tcp_eager_prev_q0 = NULL; 33357c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q0 = NULL; 33367c478bd9Sstevel@tonic-gate 33377c478bd9Sstevel@tonic-gate /* 33387c478bd9Sstevel@tonic-gate * Insert at end of the queue because sockfs 33397c478bd9Sstevel@tonic-gate * sends down T_CONN_RES in chronological 33407c478bd9Sstevel@tonic-gate * order. Leaving the older conn indications 33417c478bd9Sstevel@tonic-gate * at front of the queue helps reducing search 33427c478bd9Sstevel@tonic-gate * time. 33437c478bd9Sstevel@tonic-gate */ 33447c478bd9Sstevel@tonic-gate tail = listener->tcp_eager_last_q; 33457c478bd9Sstevel@tonic-gate if (tail != NULL) { 33467c478bd9Sstevel@tonic-gate tail->tcp_eager_next_q = tcp; 33477c478bd9Sstevel@tonic-gate } else { 33487c478bd9Sstevel@tonic-gate listener->tcp_eager_next_q = tcp; 33497c478bd9Sstevel@tonic-gate } 33507c478bd9Sstevel@tonic-gate listener->tcp_eager_last_q = tcp; 33517c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q = NULL; 33527c478bd9Sstevel@tonic-gate } else { 33537c478bd9Sstevel@tonic-gate /* 33547c478bd9Sstevel@tonic-gate * Defer connection on q0 and set deferred 33557c478bd9Sstevel@tonic-gate * connection bit true 33567c478bd9Sstevel@tonic-gate */ 33577c478bd9Sstevel@tonic-gate tcp->tcp_conn_def_q0 = B_TRUE; 33587c478bd9Sstevel@tonic-gate 33597c478bd9Sstevel@tonic-gate /* take tcp out of q0 ... */ 33607c478bd9Sstevel@tonic-gate tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 33617c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q0; 33627c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 33637c478bd9Sstevel@tonic-gate tcp->tcp_eager_prev_q0; 33647c478bd9Sstevel@tonic-gate 33657c478bd9Sstevel@tonic-gate /* ... and place it at the end of q0 */ 33667c478bd9Sstevel@tonic-gate tcp->tcp_eager_prev_q0 = listener->tcp_eager_prev_q0; 33677c478bd9Sstevel@tonic-gate tcp->tcp_eager_next_q0 = listener; 33687c478bd9Sstevel@tonic-gate listener->tcp_eager_prev_q0->tcp_eager_next_q0 = tcp; 33697c478bd9Sstevel@tonic-gate listener->tcp_eager_prev_q0 = tcp; 33707c478bd9Sstevel@tonic-gate } 33717c478bd9Sstevel@tonic-gate 33727c478bd9Sstevel@tonic-gate tcp->tcp_suna = tcp->tcp_iss + 1; /* One for the SYN */ 33737c478bd9Sstevel@tonic-gate bytes_acked--; 33747c478bd9Sstevel@tonic-gate 33757c478bd9Sstevel@tonic-gate /* 33767c478bd9Sstevel@tonic-gate * If SYN was retransmitted, need to reset all 33777c478bd9Sstevel@tonic-gate * retransmission info as this segment will be 33787c478bd9Sstevel@tonic-gate * treated as a dup ACK. 33797c478bd9Sstevel@tonic-gate */ 33807c478bd9Sstevel@tonic-gate if (tcp->tcp_rexmit) { 33817c478bd9Sstevel@tonic-gate tcp->tcp_rexmit = B_FALSE; 33827c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 33837c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_max = tcp->tcp_snxt; 33847c478bd9Sstevel@tonic-gate tcp->tcp_snd_burst = TCP_CWND_NORMAL; 33857c478bd9Sstevel@tonic-gate tcp->tcp_ms_we_have_waited = 0; 33867c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = mss; 33877c478bd9Sstevel@tonic-gate } 33887c478bd9Sstevel@tonic-gate 33897c478bd9Sstevel@tonic-gate /* 33907c478bd9Sstevel@tonic-gate * We set the send window to zero here. 33917c478bd9Sstevel@tonic-gate * This is needed if there is data to be 33927c478bd9Sstevel@tonic-gate * processed already on the queue. 33937c478bd9Sstevel@tonic-gate * Later (at swnd_update label), the 33947c478bd9Sstevel@tonic-gate * "new_swnd > tcp_swnd" condition is satisfied 33957c478bd9Sstevel@tonic-gate * the XMIT_NEEDED flag is set in the current 33967c478bd9Sstevel@tonic-gate * (SYN_RCVD) state. This ensures tcp_wput_data() is 33977c478bd9Sstevel@tonic-gate * called if there is already data on queue in 33987c478bd9Sstevel@tonic-gate * this state. 33997c478bd9Sstevel@tonic-gate */ 34007c478bd9Sstevel@tonic-gate tcp->tcp_swnd = 0; 34017c478bd9Sstevel@tonic-gate 34027c478bd9Sstevel@tonic-gate if (new_swnd > tcp->tcp_max_swnd) 34037c478bd9Sstevel@tonic-gate tcp->tcp_max_swnd = new_swnd; 34047c478bd9Sstevel@tonic-gate tcp->tcp_swl1 = seg_seq; 34057c478bd9Sstevel@tonic-gate tcp->tcp_swl2 = seg_ack; 34067c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_ESTABLISHED; 34077c478bd9Sstevel@tonic-gate tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 34087c478bd9Sstevel@tonic-gate } 34097c478bd9Sstevel@tonic-gate /* This code follows 4.4BSD-Lite2 mostly. */ 34107c478bd9Sstevel@tonic-gate if (bytes_acked < 0) 34117c478bd9Sstevel@tonic-gate goto est; 34127c478bd9Sstevel@tonic-gate 34137c478bd9Sstevel@tonic-gate /* 34147c478bd9Sstevel@tonic-gate * If TCP is ECN capable and the congestion experience bit is 34157c478bd9Sstevel@tonic-gate * set, reduce tcp_cwnd and tcp_ssthresh. But this should only be 34167c478bd9Sstevel@tonic-gate * done once per window (or more loosely, per RTT). 34177c478bd9Sstevel@tonic-gate */ 34187c478bd9Sstevel@tonic-gate if (tcp->tcp_cwr && SEQ_GT(seg_ack, tcp->tcp_cwr_snd_max)) 34197c478bd9Sstevel@tonic-gate tcp->tcp_cwr = B_FALSE; 34207c478bd9Sstevel@tonic-gate if (tcp->tcp_ecn_ok && (flags & TH_ECE)) { 34217c478bd9Sstevel@tonic-gate if (!tcp->tcp_cwr) { 34227c478bd9Sstevel@tonic-gate npkt = (MIN(tcp->tcp_cwnd, tcp->tcp_swnd) >> 1) / mss; 34237c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * mss; 34247c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = npkt * mss; 34257c478bd9Sstevel@tonic-gate /* 34267c478bd9Sstevel@tonic-gate * If the cwnd is 0, use the timer to clock out 34277c478bd9Sstevel@tonic-gate * new segments. This is required by the ECN spec. 34287c478bd9Sstevel@tonic-gate */ 34297c478bd9Sstevel@tonic-gate if (npkt == 0) { 34307c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 34317c478bd9Sstevel@tonic-gate /* 34327c478bd9Sstevel@tonic-gate * This makes sure that when the ACK comes 34337c478bd9Sstevel@tonic-gate * back, we will increase tcp_cwnd by 1 MSS. 34347c478bd9Sstevel@tonic-gate */ 34357c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_cnt = 0; 34367c478bd9Sstevel@tonic-gate } 34377c478bd9Sstevel@tonic-gate tcp->tcp_cwr = B_TRUE; 34387c478bd9Sstevel@tonic-gate /* 34397c478bd9Sstevel@tonic-gate * This marks the end of the current window of in 34407c478bd9Sstevel@tonic-gate * flight data. That is why we don't use 34417c478bd9Sstevel@tonic-gate * tcp_suna + tcp_swnd. Only data in flight can 34427c478bd9Sstevel@tonic-gate * provide ECN info. 34437c478bd9Sstevel@tonic-gate */ 34447c478bd9Sstevel@tonic-gate tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 34457c478bd9Sstevel@tonic-gate tcp->tcp_ecn_cwr_sent = B_FALSE; 34467c478bd9Sstevel@tonic-gate } 34477c478bd9Sstevel@tonic-gate } 34487c478bd9Sstevel@tonic-gate 34497c478bd9Sstevel@tonic-gate mp1 = tcp->tcp_xmit_head; 34507c478bd9Sstevel@tonic-gate if (bytes_acked == 0) { 34517c478bd9Sstevel@tonic-gate if (!ofo_seg && seg_len == 0 && new_swnd == tcp->tcp_swnd) { 34527c478bd9Sstevel@tonic-gate int dupack_cnt; 34537c478bd9Sstevel@tonic-gate 34547c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInDupAck); 34557c478bd9Sstevel@tonic-gate /* 34567c478bd9Sstevel@tonic-gate * Fast retransmit. When we have seen exactly three 34577c478bd9Sstevel@tonic-gate * identical ACKs while we have unacked data 34587c478bd9Sstevel@tonic-gate * outstanding we take it as a hint that our peer 34597c478bd9Sstevel@tonic-gate * dropped something. 34607c478bd9Sstevel@tonic-gate * 34617c478bd9Sstevel@tonic-gate * If TCP is retransmitting, don't do fast retransmit. 34627c478bd9Sstevel@tonic-gate */ 34637c478bd9Sstevel@tonic-gate if (mp1 != NULL && tcp->tcp_suna != tcp->tcp_snxt && 34647c478bd9Sstevel@tonic-gate ! tcp->tcp_rexmit) { 34657c478bd9Sstevel@tonic-gate /* Do Limited Transmit */ 34667c478bd9Sstevel@tonic-gate if ((dupack_cnt = ++tcp->tcp_dupack_cnt) < 34677c478bd9Sstevel@tonic-gate tcp_dupack_fast_retransmit) { 34687c478bd9Sstevel@tonic-gate /* 34697c478bd9Sstevel@tonic-gate * RFC 3042 34707c478bd9Sstevel@tonic-gate * 34717c478bd9Sstevel@tonic-gate * What we need to do is temporarily 34727c478bd9Sstevel@tonic-gate * increase tcp_cwnd so that new 34737c478bd9Sstevel@tonic-gate * data can be sent if it is allowed 34747c478bd9Sstevel@tonic-gate * by the receive window (tcp_rwnd). 34757c478bd9Sstevel@tonic-gate * tcp_wput_data() will take care of 34767c478bd9Sstevel@tonic-gate * the rest. 34777c478bd9Sstevel@tonic-gate * 34787c478bd9Sstevel@tonic-gate * If the connection is SACK capable, 34797c478bd9Sstevel@tonic-gate * only do limited xmit when there 34807c478bd9Sstevel@tonic-gate * is SACK info. 34817c478bd9Sstevel@tonic-gate * 34827c478bd9Sstevel@tonic-gate * Note how tcp_cwnd is incremented. 34837c478bd9Sstevel@tonic-gate * The first dup ACK will increase 34847c478bd9Sstevel@tonic-gate * it by 1 MSS. The second dup ACK 34857c478bd9Sstevel@tonic-gate * will increase it by 2 MSS. This 34867c478bd9Sstevel@tonic-gate * means that only 1 new segment will 34877c478bd9Sstevel@tonic-gate * be sent for each dup ACK. 34887c478bd9Sstevel@tonic-gate */ 34897c478bd9Sstevel@tonic-gate if (tcp->tcp_unsent > 0 && 34907c478bd9Sstevel@tonic-gate (!tcp->tcp_snd_sack_ok || 34917c478bd9Sstevel@tonic-gate (tcp->tcp_snd_sack_ok && 34927c478bd9Sstevel@tonic-gate tcp->tcp_notsack_list != NULL))) { 34937c478bd9Sstevel@tonic-gate tcp->tcp_cwnd += mss << 34947c478bd9Sstevel@tonic-gate (tcp->tcp_dupack_cnt - 1); 34957c478bd9Sstevel@tonic-gate flags |= TH_LIMIT_XMIT; 34967c478bd9Sstevel@tonic-gate } 34977c478bd9Sstevel@tonic-gate } else if (dupack_cnt == 34987c478bd9Sstevel@tonic-gate tcp_dupack_fast_retransmit) { 34997c478bd9Sstevel@tonic-gate 35007c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutFastRetrans); 35017c478bd9Sstevel@tonic-gate /* 35027c478bd9Sstevel@tonic-gate * If we have reduced tcp_ssthresh 35037c478bd9Sstevel@tonic-gate * because of ECN, do not reduce it again 35047c478bd9Sstevel@tonic-gate * unless it is already one window of data 35057c478bd9Sstevel@tonic-gate * away. After one window of data, tcp_cwr 35067c478bd9Sstevel@tonic-gate * should then be cleared. Note that 35077c478bd9Sstevel@tonic-gate * for non ECN capable connection, tcp_cwr 35087c478bd9Sstevel@tonic-gate * should always be false. 35097c478bd9Sstevel@tonic-gate * 35107c478bd9Sstevel@tonic-gate * Adjust cwnd since the duplicate 35117c478bd9Sstevel@tonic-gate * ack indicates that a packet was 35127c478bd9Sstevel@tonic-gate * dropped (due to congestion.) 35137c478bd9Sstevel@tonic-gate */ 35147c478bd9Sstevel@tonic-gate if (!tcp->tcp_cwr) { 35157c478bd9Sstevel@tonic-gate npkt = (MIN(tcp->tcp_cwnd, 35167c478bd9Sstevel@tonic-gate tcp->tcp_swnd) >> 1) / mss; 35177c478bd9Sstevel@tonic-gate if (npkt < 2) 35187c478bd9Sstevel@tonic-gate npkt = 2; 35197c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_ssthresh = npkt * mss; 35207c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = (npkt + 35217c478bd9Sstevel@tonic-gate tcp->tcp_dupack_cnt) * mss; 35227c478bd9Sstevel@tonic-gate } 35237c478bd9Sstevel@tonic-gate if (tcp->tcp_ecn_ok) { 35247c478bd9Sstevel@tonic-gate tcp->tcp_cwr = B_TRUE; 35257c478bd9Sstevel@tonic-gate tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 35267c478bd9Sstevel@tonic-gate tcp->tcp_ecn_cwr_sent = B_FALSE; 35277c478bd9Sstevel@tonic-gate } 35287c478bd9Sstevel@tonic-gate 35297c478bd9Sstevel@tonic-gate /* 35307c478bd9Sstevel@tonic-gate * We do Hoe's algorithm. Refer to her 35317c478bd9Sstevel@tonic-gate * paper "Improving the Start-up Behavior 35327c478bd9Sstevel@tonic-gate * of a Congestion Control Scheme for TCP," 35337c478bd9Sstevel@tonic-gate * appeared in SIGCOMM'96. 35347c478bd9Sstevel@tonic-gate * 35357c478bd9Sstevel@tonic-gate * Save highest seq no we have sent so far. 35367c478bd9Sstevel@tonic-gate * Be careful about the invisible FIN byte. 35377c478bd9Sstevel@tonic-gate */ 35387c478bd9Sstevel@tonic-gate if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 35397c478bd9Sstevel@tonic-gate (tcp->tcp_unsent == 0)) { 35407c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_max = tcp->tcp_fss; 35417c478bd9Sstevel@tonic-gate } else { 35427c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_max = tcp->tcp_snxt; 35437c478bd9Sstevel@tonic-gate } 35447c478bd9Sstevel@tonic-gate 35457c478bd9Sstevel@tonic-gate /* 35467c478bd9Sstevel@tonic-gate * Do not allow bursty traffic during. 35477c478bd9Sstevel@tonic-gate * fast recovery. Refer to Fall and Floyd's 35487c478bd9Sstevel@tonic-gate * paper "Simulation-based Comparisons of 35497c478bd9Sstevel@tonic-gate * Tahoe, Reno and SACK TCP" (in CCR ??) 35507c478bd9Sstevel@tonic-gate * This is a best current practise. 35517c478bd9Sstevel@tonic-gate */ 35527c478bd9Sstevel@tonic-gate tcp->tcp_snd_burst = TCP_CWND_SS; 35537c478bd9Sstevel@tonic-gate 35547c478bd9Sstevel@tonic-gate /* 35557c478bd9Sstevel@tonic-gate * For SACK: 35567c478bd9Sstevel@tonic-gate * Calculate tcp_pipe, which is the 35577c478bd9Sstevel@tonic-gate * estimated number of bytes in 35587c478bd9Sstevel@tonic-gate * network. 35597c478bd9Sstevel@tonic-gate * 35607c478bd9Sstevel@tonic-gate * tcp_fack is the highest sack'ed seq num 35617c478bd9Sstevel@tonic-gate * TCP has received. 35627c478bd9Sstevel@tonic-gate * 35637c478bd9Sstevel@tonic-gate * tcp_pipe is explained in the above quoted 35647c478bd9Sstevel@tonic-gate * Fall and Floyd's paper. tcp_fack is 35657c478bd9Sstevel@tonic-gate * explained in Mathis and Mahdavi's 35667c478bd9Sstevel@tonic-gate * "Forward Acknowledgment: Refining TCP 35677c478bd9Sstevel@tonic-gate * Congestion Control" in SIGCOMM '96. 35687c478bd9Sstevel@tonic-gate */ 35697c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok) { 35707c478bd9Sstevel@tonic-gate assert(tcp->tcp_sack_info != NULL); 35717c478bd9Sstevel@tonic-gate if (tcp->tcp_notsack_list != NULL) { 35727c478bd9Sstevel@tonic-gate tcp->tcp_pipe = tcp->tcp_snxt - 35737c478bd9Sstevel@tonic-gate tcp->tcp_fack; 35747c478bd9Sstevel@tonic-gate tcp->tcp_sack_snxt = seg_ack; 35757c478bd9Sstevel@tonic-gate flags |= TH_NEED_SACK_REXMIT; 35767c478bd9Sstevel@tonic-gate } else { 35777c478bd9Sstevel@tonic-gate /* 35787c478bd9Sstevel@tonic-gate * Always initialize tcp_pipe 35797c478bd9Sstevel@tonic-gate * even though we don't have 35807c478bd9Sstevel@tonic-gate * any SACK info. If later 35817c478bd9Sstevel@tonic-gate * we get SACK info and 35827c478bd9Sstevel@tonic-gate * tcp_pipe is not initialized, 35837c478bd9Sstevel@tonic-gate * funny things will happen. 35847c478bd9Sstevel@tonic-gate */ 35857c478bd9Sstevel@tonic-gate tcp->tcp_pipe = 35867c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_ssthresh; 35877c478bd9Sstevel@tonic-gate } 35887c478bd9Sstevel@tonic-gate } else { 35897c478bd9Sstevel@tonic-gate flags |= TH_REXMIT_NEEDED; 35907c478bd9Sstevel@tonic-gate } /* tcp_snd_sack_ok */ 35917c478bd9Sstevel@tonic-gate 35927c478bd9Sstevel@tonic-gate } else { 35937c478bd9Sstevel@tonic-gate /* 35947c478bd9Sstevel@tonic-gate * Here we perform congestion 35957c478bd9Sstevel@tonic-gate * avoidance, but NOT slow start. 35967c478bd9Sstevel@tonic-gate * This is known as the Fast 35977c478bd9Sstevel@tonic-gate * Recovery Algorithm. 35987c478bd9Sstevel@tonic-gate */ 35997c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok && 36007c478bd9Sstevel@tonic-gate tcp->tcp_notsack_list != NULL) { 36017c478bd9Sstevel@tonic-gate flags |= TH_NEED_SACK_REXMIT; 36027c478bd9Sstevel@tonic-gate tcp->tcp_pipe -= mss; 36037c478bd9Sstevel@tonic-gate if (tcp->tcp_pipe < 0) 36047c478bd9Sstevel@tonic-gate tcp->tcp_pipe = 0; 36057c478bd9Sstevel@tonic-gate } else { 36067c478bd9Sstevel@tonic-gate /* 36077c478bd9Sstevel@tonic-gate * We know that one more packet has 36087c478bd9Sstevel@tonic-gate * left the pipe thus we can update 36097c478bd9Sstevel@tonic-gate * cwnd. 36107c478bd9Sstevel@tonic-gate */ 36117c478bd9Sstevel@tonic-gate cwnd = tcp->tcp_cwnd + mss; 36127c478bd9Sstevel@tonic-gate if (cwnd > tcp->tcp_cwnd_max) 36137c478bd9Sstevel@tonic-gate cwnd = tcp->tcp_cwnd_max; 36147c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = cwnd; 36157c478bd9Sstevel@tonic-gate flags |= TH_XMIT_NEEDED; 36167c478bd9Sstevel@tonic-gate } 36177c478bd9Sstevel@tonic-gate } 36187c478bd9Sstevel@tonic-gate } 36197c478bd9Sstevel@tonic-gate } else if (tcp->tcp_zero_win_probe) { 36207c478bd9Sstevel@tonic-gate /* 36217c478bd9Sstevel@tonic-gate * If the window has opened, need to arrange 36227c478bd9Sstevel@tonic-gate * to send additional data. 36237c478bd9Sstevel@tonic-gate */ 36247c478bd9Sstevel@tonic-gate if (new_swnd != 0) { 36257c478bd9Sstevel@tonic-gate /* tcp_suna != tcp_snxt */ 36267c478bd9Sstevel@tonic-gate /* Packet contains a window update */ 36277c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInWinUpdate); 36287c478bd9Sstevel@tonic-gate tcp->tcp_zero_win_probe = 0; 36297c478bd9Sstevel@tonic-gate tcp->tcp_timer_backoff = 0; 36307c478bd9Sstevel@tonic-gate tcp->tcp_ms_we_have_waited = 0; 36317c478bd9Sstevel@tonic-gate 36327c478bd9Sstevel@tonic-gate /* 36337c478bd9Sstevel@tonic-gate * Transmit starting with tcp_suna since 36347c478bd9Sstevel@tonic-gate * the one byte probe is not ack'ed. 36357c478bd9Sstevel@tonic-gate * If TCP has sent more than one identical 36367c478bd9Sstevel@tonic-gate * probe, tcp_rexmit will be set. That means 36377c478bd9Sstevel@tonic-gate * tcp_ss_rexmit() will send out the one 36387c478bd9Sstevel@tonic-gate * byte along with new data. Otherwise, 36397c478bd9Sstevel@tonic-gate * fake the retransmission. 36407c478bd9Sstevel@tonic-gate */ 36417c478bd9Sstevel@tonic-gate flags |= TH_XMIT_NEEDED; 36427c478bd9Sstevel@tonic-gate if (!tcp->tcp_rexmit) { 36437c478bd9Sstevel@tonic-gate tcp->tcp_rexmit = B_TRUE; 36447c478bd9Sstevel@tonic-gate tcp->tcp_dupack_cnt = 0; 36457c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_nxt = tcp->tcp_suna; 36467c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_max = tcp->tcp_suna + 1; 36477c478bd9Sstevel@tonic-gate } 36487c478bd9Sstevel@tonic-gate } 36497c478bd9Sstevel@tonic-gate } 36507c478bd9Sstevel@tonic-gate goto swnd_update; 36517c478bd9Sstevel@tonic-gate } 36527c478bd9Sstevel@tonic-gate 36537c478bd9Sstevel@tonic-gate /* 36547c478bd9Sstevel@tonic-gate * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73. 36557c478bd9Sstevel@tonic-gate * If the ACK value acks something that we have not yet sent, it might 36567c478bd9Sstevel@tonic-gate * be an old duplicate segment. Send an ACK to re-synchronize the 36577c478bd9Sstevel@tonic-gate * other side. 36587c478bd9Sstevel@tonic-gate * Note: reset in response to unacceptable ACK in SYN_RECEIVE 36597c478bd9Sstevel@tonic-gate * state is handled above, so we can always just drop the segment and 36607c478bd9Sstevel@tonic-gate * send an ACK here. 36617c478bd9Sstevel@tonic-gate * 36627c478bd9Sstevel@tonic-gate * Should we send ACKs in response to ACK only segments? 36637c478bd9Sstevel@tonic-gate */ 36647c478bd9Sstevel@tonic-gate if (SEQ_GT(seg_ack, tcp->tcp_snxt)) { 36657c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInAckUnsent); 36667c478bd9Sstevel@tonic-gate /* drop the received segment */ 36677c478bd9Sstevel@tonic-gate freemsg(mp); 36687c478bd9Sstevel@tonic-gate 36697c478bd9Sstevel@tonic-gate /* Send back an ACK. */ 36707c478bd9Sstevel@tonic-gate mp = tcp_ack_mp(tcp); 36717c478bd9Sstevel@tonic-gate 36727c478bd9Sstevel@tonic-gate if (mp == NULL) { 36737c478bd9Sstevel@tonic-gate return; 36747c478bd9Sstevel@tonic-gate } 36757c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutAck); 36767c478bd9Sstevel@tonic-gate (void) ipv4_tcp_output(sock_id, mp); 36777c478bd9Sstevel@tonic-gate freeb(mp); 36787c478bd9Sstevel@tonic-gate return; 36797c478bd9Sstevel@tonic-gate } 36807c478bd9Sstevel@tonic-gate 36817c478bd9Sstevel@tonic-gate /* 36827c478bd9Sstevel@tonic-gate * TCP gets a new ACK, update the notsack'ed list to delete those 36837c478bd9Sstevel@tonic-gate * blocks that are covered by this ACK. 36847c478bd9Sstevel@tonic-gate */ 36857c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 36867c478bd9Sstevel@tonic-gate tcp_notsack_remove(&(tcp->tcp_notsack_list), seg_ack, 36877c478bd9Sstevel@tonic-gate &(tcp->tcp_num_notsack_blk), &(tcp->tcp_cnt_notsack_list)); 36887c478bd9Sstevel@tonic-gate } 36897c478bd9Sstevel@tonic-gate 36907c478bd9Sstevel@tonic-gate /* 36917c478bd9Sstevel@tonic-gate * If we got an ACK after fast retransmit, check to see 36927c478bd9Sstevel@tonic-gate * if it is a partial ACK. If it is not and the congestion 36937c478bd9Sstevel@tonic-gate * window was inflated to account for the other side's 36947c478bd9Sstevel@tonic-gate * cached packets, retract it. If it is, do Hoe's algorithm. 36957c478bd9Sstevel@tonic-gate */ 36967c478bd9Sstevel@tonic-gate if (tcp->tcp_dupack_cnt >= tcp_dupack_fast_retransmit) { 36977c478bd9Sstevel@tonic-gate assert(tcp->tcp_rexmit == B_FALSE); 36987c478bd9Sstevel@tonic-gate if (SEQ_GEQ(seg_ack, tcp->tcp_rexmit_max)) { 36997c478bd9Sstevel@tonic-gate tcp->tcp_dupack_cnt = 0; 37007c478bd9Sstevel@tonic-gate /* 37017c478bd9Sstevel@tonic-gate * Restore the orig tcp_cwnd_ssthresh after 37027c478bd9Sstevel@tonic-gate * fast retransmit phase. 37037c478bd9Sstevel@tonic-gate */ 37047c478bd9Sstevel@tonic-gate if (tcp->tcp_cwnd > tcp->tcp_cwnd_ssthresh) { 37057c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh; 37067c478bd9Sstevel@tonic-gate } 37077c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_max = seg_ack; 37087c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_cnt = 0; 37097c478bd9Sstevel@tonic-gate tcp->tcp_snd_burst = TCP_CWND_NORMAL; 37107c478bd9Sstevel@tonic-gate 37117c478bd9Sstevel@tonic-gate /* 37127c478bd9Sstevel@tonic-gate * Remove all notsack info to avoid confusion with 37137c478bd9Sstevel@tonic-gate * the next fast retrasnmit/recovery phase. 37147c478bd9Sstevel@tonic-gate */ 37157c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok && 37167c478bd9Sstevel@tonic-gate tcp->tcp_notsack_list != NULL) { 37177c478bd9Sstevel@tonic-gate TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 37187c478bd9Sstevel@tonic-gate } 37197c478bd9Sstevel@tonic-gate } else { 37207c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok && 37217c478bd9Sstevel@tonic-gate tcp->tcp_notsack_list != NULL) { 37227c478bd9Sstevel@tonic-gate flags |= TH_NEED_SACK_REXMIT; 37237c478bd9Sstevel@tonic-gate tcp->tcp_pipe -= mss; 37247c478bd9Sstevel@tonic-gate if (tcp->tcp_pipe < 0) 37257c478bd9Sstevel@tonic-gate tcp->tcp_pipe = 0; 37267c478bd9Sstevel@tonic-gate } else { 37277c478bd9Sstevel@tonic-gate /* 37287c478bd9Sstevel@tonic-gate * Hoe's algorithm: 37297c478bd9Sstevel@tonic-gate * 37307c478bd9Sstevel@tonic-gate * Retransmit the unack'ed segment and 37317c478bd9Sstevel@tonic-gate * restart fast recovery. Note that we 37327c478bd9Sstevel@tonic-gate * need to scale back tcp_cwnd to the 37337c478bd9Sstevel@tonic-gate * original value when we started fast 37347c478bd9Sstevel@tonic-gate * recovery. This is to prevent overly 37357c478bd9Sstevel@tonic-gate * aggressive behaviour in sending new 37367c478bd9Sstevel@tonic-gate * segments. 37377c478bd9Sstevel@tonic-gate */ 37387c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh + 37397c478bd9Sstevel@tonic-gate tcp_dupack_fast_retransmit * mss; 37407c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_cnt = tcp->tcp_cwnd; 37417c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutFastRetrans); 37427c478bd9Sstevel@tonic-gate flags |= TH_REXMIT_NEEDED; 37437c478bd9Sstevel@tonic-gate } 37447c478bd9Sstevel@tonic-gate } 37457c478bd9Sstevel@tonic-gate } else { 37467c478bd9Sstevel@tonic-gate tcp->tcp_dupack_cnt = 0; 37477c478bd9Sstevel@tonic-gate if (tcp->tcp_rexmit) { 37487c478bd9Sstevel@tonic-gate /* 37497c478bd9Sstevel@tonic-gate * TCP is retranmitting. If the ACK ack's all 37507c478bd9Sstevel@tonic-gate * outstanding data, update tcp_rexmit_max and 37517c478bd9Sstevel@tonic-gate * tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt 37527c478bd9Sstevel@tonic-gate * to the correct value. 37537c478bd9Sstevel@tonic-gate * 37547c478bd9Sstevel@tonic-gate * Note that SEQ_LEQ() is used. This is to avoid 37557c478bd9Sstevel@tonic-gate * unnecessary fast retransmit caused by dup ACKs 37567c478bd9Sstevel@tonic-gate * received when TCP does slow start retransmission 37577c478bd9Sstevel@tonic-gate * after a time out. During this phase, TCP may 37587c478bd9Sstevel@tonic-gate * send out segments which are already received. 37597c478bd9Sstevel@tonic-gate * This causes dup ACKs to be sent back. 37607c478bd9Sstevel@tonic-gate */ 37617c478bd9Sstevel@tonic-gate if (SEQ_LEQ(seg_ack, tcp->tcp_rexmit_max)) { 37627c478bd9Sstevel@tonic-gate if (SEQ_GT(seg_ack, tcp->tcp_rexmit_nxt)) { 37637c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_nxt = seg_ack; 37647c478bd9Sstevel@tonic-gate } 37657c478bd9Sstevel@tonic-gate if (seg_ack != tcp->tcp_rexmit_max) { 37667c478bd9Sstevel@tonic-gate flags |= TH_XMIT_NEEDED; 37677c478bd9Sstevel@tonic-gate } 37687c478bd9Sstevel@tonic-gate } else { 37697c478bd9Sstevel@tonic-gate tcp->tcp_rexmit = B_FALSE; 37707c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 37717c478bd9Sstevel@tonic-gate tcp->tcp_snd_burst = TCP_CWND_NORMAL; 37727c478bd9Sstevel@tonic-gate } 37737c478bd9Sstevel@tonic-gate tcp->tcp_ms_we_have_waited = 0; 37747c478bd9Sstevel@tonic-gate } 37757c478bd9Sstevel@tonic-gate } 37767c478bd9Sstevel@tonic-gate 37777c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInAckSegs); 37787c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpInAckBytes, bytes_acked); 37797c478bd9Sstevel@tonic-gate tcp->tcp_suna = seg_ack; 37807c478bd9Sstevel@tonic-gate if (tcp->tcp_zero_win_probe != 0) { 37817c478bd9Sstevel@tonic-gate tcp->tcp_zero_win_probe = 0; 37827c478bd9Sstevel@tonic-gate tcp->tcp_timer_backoff = 0; 37837c478bd9Sstevel@tonic-gate } 37847c478bd9Sstevel@tonic-gate 37857c478bd9Sstevel@tonic-gate /* 37867c478bd9Sstevel@tonic-gate * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed. 37877c478bd9Sstevel@tonic-gate * Note that it cannot be the SYN being ack'ed. The code flow 37887c478bd9Sstevel@tonic-gate * will not reach here. 37897c478bd9Sstevel@tonic-gate */ 37907c478bd9Sstevel@tonic-gate if (mp1 == NULL) { 37917c478bd9Sstevel@tonic-gate goto fin_acked; 37927c478bd9Sstevel@tonic-gate } 37937c478bd9Sstevel@tonic-gate 37947c478bd9Sstevel@tonic-gate /* 37957c478bd9Sstevel@tonic-gate * Update the congestion window. 37967c478bd9Sstevel@tonic-gate * 37977c478bd9Sstevel@tonic-gate * If TCP is not ECN capable or TCP is ECN capable but the 37987c478bd9Sstevel@tonic-gate * congestion experience bit is not set, increase the tcp_cwnd as 37997c478bd9Sstevel@tonic-gate * usual. 38007c478bd9Sstevel@tonic-gate */ 38017c478bd9Sstevel@tonic-gate if (!tcp->tcp_ecn_ok || !(flags & TH_ECE)) { 38027c478bd9Sstevel@tonic-gate cwnd = tcp->tcp_cwnd; 38037c478bd9Sstevel@tonic-gate add = mss; 38047c478bd9Sstevel@tonic-gate 38057c478bd9Sstevel@tonic-gate if (cwnd >= tcp->tcp_cwnd_ssthresh) { 38067c478bd9Sstevel@tonic-gate /* 38077c478bd9Sstevel@tonic-gate * This is to prevent an increase of less than 1 MSS of 38087c478bd9Sstevel@tonic-gate * tcp_cwnd. With partial increase, tcp_wput_data() 38097c478bd9Sstevel@tonic-gate * may send out tinygrams in order to preserve mblk 38107c478bd9Sstevel@tonic-gate * boundaries. 38117c478bd9Sstevel@tonic-gate * 38127c478bd9Sstevel@tonic-gate * By initializing tcp_cwnd_cnt to new tcp_cwnd and 38137c478bd9Sstevel@tonic-gate * decrementing it by 1 MSS for every ACKs, tcp_cwnd is 38147c478bd9Sstevel@tonic-gate * increased by 1 MSS for every RTTs. 38157c478bd9Sstevel@tonic-gate */ 38167c478bd9Sstevel@tonic-gate if (tcp->tcp_cwnd_cnt <= 0) { 38177c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_cnt = cwnd + add; 38187c478bd9Sstevel@tonic-gate } else { 38197c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_cnt -= add; 38207c478bd9Sstevel@tonic-gate add = 0; 38217c478bd9Sstevel@tonic-gate } 38227c478bd9Sstevel@tonic-gate } 38237c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = MIN(cwnd + add, tcp->tcp_cwnd_max); 38247c478bd9Sstevel@tonic-gate } 38257c478bd9Sstevel@tonic-gate 38267c478bd9Sstevel@tonic-gate /* Can we update the RTT estimates? */ 38277c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ts_ok) { 38287c478bd9Sstevel@tonic-gate /* Ignore zero timestamp echo-reply. */ 38297c478bd9Sstevel@tonic-gate if (tcpopt.tcp_opt_ts_ecr != 0) { 38307c478bd9Sstevel@tonic-gate tcp_set_rto(tcp, (int32_t)(prom_gettime() - 38317c478bd9Sstevel@tonic-gate tcpopt.tcp_opt_ts_ecr)); 38327c478bd9Sstevel@tonic-gate } 38337c478bd9Sstevel@tonic-gate 38347c478bd9Sstevel@tonic-gate /* If needed, restart the timer. */ 38357c478bd9Sstevel@tonic-gate if (tcp->tcp_set_timer == 1) { 38367c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 38377c478bd9Sstevel@tonic-gate tcp->tcp_set_timer = 0; 38387c478bd9Sstevel@tonic-gate } 38397c478bd9Sstevel@tonic-gate /* 38407c478bd9Sstevel@tonic-gate * Update tcp_csuna in case the other side stops sending 38417c478bd9Sstevel@tonic-gate * us timestamps. 38427c478bd9Sstevel@tonic-gate */ 38437c478bd9Sstevel@tonic-gate tcp->tcp_csuna = tcp->tcp_snxt; 38447c478bd9Sstevel@tonic-gate } else if (SEQ_GT(seg_ack, tcp->tcp_csuna)) { 38457c478bd9Sstevel@tonic-gate /* 38467c478bd9Sstevel@tonic-gate * An ACK sequence we haven't seen before, so get the RTT 38477c478bd9Sstevel@tonic-gate * and update the RTO. 3848*53391bafSeota * Note. use uintptr_t to suppress the gcc warning. 38497c478bd9Sstevel@tonic-gate */ 38507c478bd9Sstevel@tonic-gate tcp_set_rto(tcp, (int32_t)(prom_gettime() - 3851*53391bafSeota (uint32_t)(uintptr_t)mp1->b_prev)); 38527c478bd9Sstevel@tonic-gate 38537c478bd9Sstevel@tonic-gate /* Remeber the last sequence to be ACKed */ 38547c478bd9Sstevel@tonic-gate tcp->tcp_csuna = seg_ack; 38557c478bd9Sstevel@tonic-gate if (tcp->tcp_set_timer == 1) { 38567c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 38577c478bd9Sstevel@tonic-gate tcp->tcp_set_timer = 0; 38587c478bd9Sstevel@tonic-gate } 38597c478bd9Sstevel@tonic-gate } else { 38607c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpRttNoUpdate); 38617c478bd9Sstevel@tonic-gate } 38627c478bd9Sstevel@tonic-gate 38637c478bd9Sstevel@tonic-gate /* Eat acknowledged bytes off the xmit queue. */ 38647c478bd9Sstevel@tonic-gate for (;;) { 38657c478bd9Sstevel@tonic-gate mblk_t *mp2; 38667c478bd9Sstevel@tonic-gate uchar_t *wptr; 38677c478bd9Sstevel@tonic-gate 38687c478bd9Sstevel@tonic-gate wptr = mp1->b_wptr; 38697c478bd9Sstevel@tonic-gate assert((uintptr_t)(wptr - mp1->b_rptr) <= (uintptr_t)INT_MAX); 38707c478bd9Sstevel@tonic-gate bytes_acked -= (int)(wptr - mp1->b_rptr); 38717c478bd9Sstevel@tonic-gate if (bytes_acked < 0) { 38727c478bd9Sstevel@tonic-gate mp1->b_rptr = wptr + bytes_acked; 38737c478bd9Sstevel@tonic-gate break; 38747c478bd9Sstevel@tonic-gate } 38757c478bd9Sstevel@tonic-gate mp1->b_prev = NULL; 38767c478bd9Sstevel@tonic-gate mp2 = mp1; 38777c478bd9Sstevel@tonic-gate mp1 = mp1->b_cont; 38787c478bd9Sstevel@tonic-gate freeb(mp2); 38797c478bd9Sstevel@tonic-gate if (bytes_acked == 0) { 38807c478bd9Sstevel@tonic-gate if (mp1 == NULL) { 38817c478bd9Sstevel@tonic-gate /* Everything is ack'ed, clear the tail. */ 38827c478bd9Sstevel@tonic-gate tcp->tcp_xmit_tail = NULL; 38837c478bd9Sstevel@tonic-gate goto pre_swnd_update; 38847c478bd9Sstevel@tonic-gate } 38857c478bd9Sstevel@tonic-gate if (mp2 != tcp->tcp_xmit_tail) 38867c478bd9Sstevel@tonic-gate break; 38877c478bd9Sstevel@tonic-gate tcp->tcp_xmit_tail = mp1; 38887c478bd9Sstevel@tonic-gate assert((uintptr_t)(mp1->b_wptr - 38897c478bd9Sstevel@tonic-gate mp1->b_rptr) <= (uintptr_t)INT_MAX); 38907c478bd9Sstevel@tonic-gate tcp->tcp_xmit_tail_unsent = (int)(mp1->b_wptr - 38917c478bd9Sstevel@tonic-gate mp1->b_rptr); 38927c478bd9Sstevel@tonic-gate break; 38937c478bd9Sstevel@tonic-gate } 38947c478bd9Sstevel@tonic-gate if (mp1 == NULL) { 38957c478bd9Sstevel@tonic-gate /* 38967c478bd9Sstevel@tonic-gate * More was acked but there is nothing more 38977c478bd9Sstevel@tonic-gate * outstanding. This means that the FIN was 38987c478bd9Sstevel@tonic-gate * just acked or that we're talking to a clown. 38997c478bd9Sstevel@tonic-gate */ 39007c478bd9Sstevel@tonic-gate fin_acked: 39017c478bd9Sstevel@tonic-gate assert(tcp->tcp_fin_sent); 39027c478bd9Sstevel@tonic-gate tcp->tcp_xmit_tail = NULL; 39037c478bd9Sstevel@tonic-gate if (tcp->tcp_fin_sent) { 39047c478bd9Sstevel@tonic-gate tcp->tcp_fin_acked = B_TRUE; 39057c478bd9Sstevel@tonic-gate } else { 39067c478bd9Sstevel@tonic-gate /* 39077c478bd9Sstevel@tonic-gate * We should never got here because 39087c478bd9Sstevel@tonic-gate * we have already checked that the 39097c478bd9Sstevel@tonic-gate * number of bytes ack'ed should be 39107c478bd9Sstevel@tonic-gate * smaller than or equal to what we 39117c478bd9Sstevel@tonic-gate * have sent so far (it is the 39127c478bd9Sstevel@tonic-gate * acceptability check of the ACK). 39137c478bd9Sstevel@tonic-gate * We can only get here if the send 39147c478bd9Sstevel@tonic-gate * queue is corrupted. 39157c478bd9Sstevel@tonic-gate * 39167c478bd9Sstevel@tonic-gate * Terminate the connection and 39177c478bd9Sstevel@tonic-gate * panic the system. It is better 39187c478bd9Sstevel@tonic-gate * for us to panic instead of 39197c478bd9Sstevel@tonic-gate * continuing to avoid other disaster. 39207c478bd9Sstevel@tonic-gate */ 39217c478bd9Sstevel@tonic-gate tcp_xmit_ctl(NULL, tcp, NULL, tcp->tcp_snxt, 39227c478bd9Sstevel@tonic-gate tcp->tcp_rnxt, TH_RST|TH_ACK, 0, sock_id); 39237c478bd9Sstevel@tonic-gate printf("Memory corruption " 39247c478bd9Sstevel@tonic-gate "detected for connection %s.\n", 39257c478bd9Sstevel@tonic-gate tcp_display(tcp, NULL, 39267c478bd9Sstevel@tonic-gate DISP_ADDR_AND_PORT)); 39277c478bd9Sstevel@tonic-gate /* We should never get here... */ 39287c478bd9Sstevel@tonic-gate prom_panic("tcp_rput_data"); 39297c478bd9Sstevel@tonic-gate return; 39307c478bd9Sstevel@tonic-gate } 39317c478bd9Sstevel@tonic-gate goto pre_swnd_update; 39327c478bd9Sstevel@tonic-gate } 39337c478bd9Sstevel@tonic-gate assert(mp2 != tcp->tcp_xmit_tail); 39347c478bd9Sstevel@tonic-gate } 39357c478bd9Sstevel@tonic-gate if (tcp->tcp_unsent) { 39367c478bd9Sstevel@tonic-gate flags |= TH_XMIT_NEEDED; 39377c478bd9Sstevel@tonic-gate } 39387c478bd9Sstevel@tonic-gate pre_swnd_update: 39397c478bd9Sstevel@tonic-gate tcp->tcp_xmit_head = mp1; 39407c478bd9Sstevel@tonic-gate swnd_update: 39417c478bd9Sstevel@tonic-gate /* 39427c478bd9Sstevel@tonic-gate * The following check is different from most other implementations. 39437c478bd9Sstevel@tonic-gate * For bi-directional transfer, when segments are dropped, the 39447c478bd9Sstevel@tonic-gate * "normal" check will not accept a window update in those 39457c478bd9Sstevel@tonic-gate * retransmitted segemnts. Failing to do that, TCP may send out 39467c478bd9Sstevel@tonic-gate * segments which are outside receiver's window. As TCP accepts 39477c478bd9Sstevel@tonic-gate * the ack in those retransmitted segments, if the window update in 39487c478bd9Sstevel@tonic-gate * the same segment is not accepted, TCP will incorrectly calculates 39497c478bd9Sstevel@tonic-gate * that it can send more segments. This can create a deadlock 39507c478bd9Sstevel@tonic-gate * with the receiver if its window becomes zero. 39517c478bd9Sstevel@tonic-gate */ 39527c478bd9Sstevel@tonic-gate if (SEQ_LT(tcp->tcp_swl2, seg_ack) || 39537c478bd9Sstevel@tonic-gate SEQ_LT(tcp->tcp_swl1, seg_seq) || 39547c478bd9Sstevel@tonic-gate (tcp->tcp_swl1 == seg_seq && new_swnd > tcp->tcp_swnd)) { 39557c478bd9Sstevel@tonic-gate /* 39567c478bd9Sstevel@tonic-gate * The criteria for update is: 39577c478bd9Sstevel@tonic-gate * 39587c478bd9Sstevel@tonic-gate * 1. the segment acknowledges some data. Or 39597c478bd9Sstevel@tonic-gate * 2. the segment is new, i.e. it has a higher seq num. Or 39607c478bd9Sstevel@tonic-gate * 3. the segment is not old and the advertised window is 39617c478bd9Sstevel@tonic-gate * larger than the previous advertised window. 39627c478bd9Sstevel@tonic-gate */ 39637c478bd9Sstevel@tonic-gate if (tcp->tcp_unsent && new_swnd > tcp->tcp_swnd) 39647c478bd9Sstevel@tonic-gate flags |= TH_XMIT_NEEDED; 39657c478bd9Sstevel@tonic-gate tcp->tcp_swnd = new_swnd; 39667c478bd9Sstevel@tonic-gate if (new_swnd > tcp->tcp_max_swnd) 39677c478bd9Sstevel@tonic-gate tcp->tcp_max_swnd = new_swnd; 39687c478bd9Sstevel@tonic-gate tcp->tcp_swl1 = seg_seq; 39697c478bd9Sstevel@tonic-gate tcp->tcp_swl2 = seg_ack; 39707c478bd9Sstevel@tonic-gate } 39717c478bd9Sstevel@tonic-gate est: 39727c478bd9Sstevel@tonic-gate if (tcp->tcp_state > TCPS_ESTABLISHED) { 39737c478bd9Sstevel@tonic-gate switch (tcp->tcp_state) { 39747c478bd9Sstevel@tonic-gate case TCPS_FIN_WAIT_1: 39757c478bd9Sstevel@tonic-gate if (tcp->tcp_fin_acked) { 39767c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_FIN_WAIT_2; 39777c478bd9Sstevel@tonic-gate /* 39787c478bd9Sstevel@tonic-gate * We implement the non-standard BSD/SunOS 39797c478bd9Sstevel@tonic-gate * FIN_WAIT_2 flushing algorithm. 39807c478bd9Sstevel@tonic-gate * If there is no user attached to this 39817c478bd9Sstevel@tonic-gate * TCP endpoint, then this TCP struct 39827c478bd9Sstevel@tonic-gate * could hang around forever in FIN_WAIT_2 39837c478bd9Sstevel@tonic-gate * state if the peer forgets to send us 39847c478bd9Sstevel@tonic-gate * a FIN. To prevent this, we wait only 39857c478bd9Sstevel@tonic-gate * 2*MSL (a convenient time value) for 39867c478bd9Sstevel@tonic-gate * the FIN to arrive. If it doesn't show up, 39877c478bd9Sstevel@tonic-gate * we flush the TCP endpoint. This algorithm, 39887c478bd9Sstevel@tonic-gate * though a violation of RFC-793, has worked 39897c478bd9Sstevel@tonic-gate * for over 10 years in BSD systems. 39907c478bd9Sstevel@tonic-gate * Note: SunOS 4.x waits 675 seconds before 39917c478bd9Sstevel@tonic-gate * flushing the FIN_WAIT_2 connection. 39927c478bd9Sstevel@tonic-gate */ 39937c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, 39947c478bd9Sstevel@tonic-gate tcp_fin_wait_2_flush_interval); 39957c478bd9Sstevel@tonic-gate } 39967c478bd9Sstevel@tonic-gate break; 39977c478bd9Sstevel@tonic-gate case TCPS_FIN_WAIT_2: 39987c478bd9Sstevel@tonic-gate break; /* Shutdown hook? */ 39997c478bd9Sstevel@tonic-gate case TCPS_LAST_ACK: 40007c478bd9Sstevel@tonic-gate freemsg(mp); 40017c478bd9Sstevel@tonic-gate if (tcp->tcp_fin_acked) { 40027c478bd9Sstevel@tonic-gate (void) tcp_clean_death(sock_id, tcp, 0); 40037c478bd9Sstevel@tonic-gate return; 40047c478bd9Sstevel@tonic-gate } 40057c478bd9Sstevel@tonic-gate goto xmit_check; 40067c478bd9Sstevel@tonic-gate case TCPS_CLOSING: 40077c478bd9Sstevel@tonic-gate if (tcp->tcp_fin_acked) { 40087c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_TIME_WAIT; 40097c478bd9Sstevel@tonic-gate tcp_time_wait_append(tcp); 40107c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp_time_wait_interval); 40117c478bd9Sstevel@tonic-gate } 40127c478bd9Sstevel@tonic-gate /*FALLTHRU*/ 40137c478bd9Sstevel@tonic-gate case TCPS_CLOSE_WAIT: 40147c478bd9Sstevel@tonic-gate freemsg(mp); 40157c478bd9Sstevel@tonic-gate goto xmit_check; 40167c478bd9Sstevel@tonic-gate default: 40177c478bd9Sstevel@tonic-gate assert(tcp->tcp_state != TCPS_TIME_WAIT); 40187c478bd9Sstevel@tonic-gate break; 40197c478bd9Sstevel@tonic-gate } 40207c478bd9Sstevel@tonic-gate } 40217c478bd9Sstevel@tonic-gate if (flags & TH_FIN) { 40227c478bd9Sstevel@tonic-gate /* Make sure we ack the fin */ 40237c478bd9Sstevel@tonic-gate flags |= TH_ACK_NEEDED; 40247c478bd9Sstevel@tonic-gate if (!tcp->tcp_fin_rcvd) { 40257c478bd9Sstevel@tonic-gate tcp->tcp_fin_rcvd = B_TRUE; 40267c478bd9Sstevel@tonic-gate tcp->tcp_rnxt++; 40277c478bd9Sstevel@tonic-gate U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 40287c478bd9Sstevel@tonic-gate 40297c478bd9Sstevel@tonic-gate switch (tcp->tcp_state) { 40307c478bd9Sstevel@tonic-gate case TCPS_SYN_RCVD: 40317c478bd9Sstevel@tonic-gate case TCPS_ESTABLISHED: 40327c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_CLOSE_WAIT; 40337c478bd9Sstevel@tonic-gate /* Keepalive? */ 40347c478bd9Sstevel@tonic-gate break; 40357c478bd9Sstevel@tonic-gate case TCPS_FIN_WAIT_1: 40367c478bd9Sstevel@tonic-gate if (!tcp->tcp_fin_acked) { 40377c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_CLOSING; 40387c478bd9Sstevel@tonic-gate break; 40397c478bd9Sstevel@tonic-gate } 40407c478bd9Sstevel@tonic-gate /* FALLTHRU */ 40417c478bd9Sstevel@tonic-gate case TCPS_FIN_WAIT_2: 40427c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_TIME_WAIT; 40437c478bd9Sstevel@tonic-gate tcp_time_wait_append(tcp); 40447c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp_time_wait_interval); 40457c478bd9Sstevel@tonic-gate if (seg_len) { 40467c478bd9Sstevel@tonic-gate /* 40477c478bd9Sstevel@tonic-gate * implies data piggybacked on FIN. 40487c478bd9Sstevel@tonic-gate * break to handle data. 40497c478bd9Sstevel@tonic-gate */ 40507c478bd9Sstevel@tonic-gate break; 40517c478bd9Sstevel@tonic-gate } 40527c478bd9Sstevel@tonic-gate freemsg(mp); 40537c478bd9Sstevel@tonic-gate goto ack_check; 40547c478bd9Sstevel@tonic-gate } 40557c478bd9Sstevel@tonic-gate } 40567c478bd9Sstevel@tonic-gate } 40577c478bd9Sstevel@tonic-gate if (mp == NULL) 40587c478bd9Sstevel@tonic-gate goto xmit_check; 40597c478bd9Sstevel@tonic-gate if (seg_len == 0) { 40607c478bd9Sstevel@tonic-gate freemsg(mp); 40617c478bd9Sstevel@tonic-gate goto xmit_check; 40627c478bd9Sstevel@tonic-gate } 40637c478bd9Sstevel@tonic-gate if (mp->b_rptr == mp->b_wptr) { 40647c478bd9Sstevel@tonic-gate /* 40657c478bd9Sstevel@tonic-gate * The header has been consumed, so we remove the 40667c478bd9Sstevel@tonic-gate * zero-length mblk here. 40677c478bd9Sstevel@tonic-gate */ 40687c478bd9Sstevel@tonic-gate mp1 = mp; 40697c478bd9Sstevel@tonic-gate mp = mp->b_cont; 40707c478bd9Sstevel@tonic-gate freeb(mp1); 40717c478bd9Sstevel@tonic-gate } 40727c478bd9Sstevel@tonic-gate /* 40737c478bd9Sstevel@tonic-gate * ACK every other segments, unless the input queue is empty 40747c478bd9Sstevel@tonic-gate * as we don't have a timer available. 40757c478bd9Sstevel@tonic-gate */ 40767c478bd9Sstevel@tonic-gate if (++tcp->tcp_rack_cnt == 2 || sockets[sock_id].inq == NULL) { 40777c478bd9Sstevel@tonic-gate flags |= TH_ACK_NEEDED; 40787c478bd9Sstevel@tonic-gate tcp->tcp_rack_cnt = 0; 40797c478bd9Sstevel@tonic-gate } 40807c478bd9Sstevel@tonic-gate tcp->tcp_rnxt += seg_len; 40817c478bd9Sstevel@tonic-gate U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 40827c478bd9Sstevel@tonic-gate 40837c478bd9Sstevel@tonic-gate /* Update SACK list */ 40847c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 40857c478bd9Sstevel@tonic-gate tcp_sack_remove(tcp->tcp_sack_list, tcp->tcp_rnxt, 40867c478bd9Sstevel@tonic-gate &(tcp->tcp_num_sack_blk)); 40877c478bd9Sstevel@tonic-gate } 40887c478bd9Sstevel@tonic-gate 40897c478bd9Sstevel@tonic-gate if (tcp->tcp_listener) { 40907c478bd9Sstevel@tonic-gate /* 40917c478bd9Sstevel@tonic-gate * Side queue inbound data until the accept happens. 40927c478bd9Sstevel@tonic-gate * tcp_accept/tcp_rput drains this when the accept happens. 40937c478bd9Sstevel@tonic-gate */ 40947c478bd9Sstevel@tonic-gate tcp_rcv_enqueue(tcp, mp, seg_len); 40957c478bd9Sstevel@tonic-gate } else { 40967c478bd9Sstevel@tonic-gate /* Just queue the data until the app calls read. */ 40977c478bd9Sstevel@tonic-gate tcp_rcv_enqueue(tcp, mp, seg_len); 40987c478bd9Sstevel@tonic-gate /* 40997c478bd9Sstevel@tonic-gate * Make sure the timer is running if we have data waiting 41007c478bd9Sstevel@tonic-gate * for a push bit. This provides resiliency against 41017c478bd9Sstevel@tonic-gate * implementations that do not correctly generate push bits. 41027c478bd9Sstevel@tonic-gate */ 41037c478bd9Sstevel@tonic-gate if (tcp->tcp_rcv_list != NULL) 41047c478bd9Sstevel@tonic-gate flags |= TH_TIMER_NEEDED; 41057c478bd9Sstevel@tonic-gate } 41067c478bd9Sstevel@tonic-gate 41077c478bd9Sstevel@tonic-gate xmit_check: 41087c478bd9Sstevel@tonic-gate /* Is there anything left to do? */ 41097c478bd9Sstevel@tonic-gate if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_ACK_NEEDED| 41107c478bd9Sstevel@tonic-gate TH_NEED_SACK_REXMIT|TH_LIMIT_XMIT|TH_TIMER_NEEDED)) == 0) 41117c478bd9Sstevel@tonic-gate return; 41127c478bd9Sstevel@tonic-gate 41137c478bd9Sstevel@tonic-gate /* Any transmit work to do and a non-zero window? */ 41147c478bd9Sstevel@tonic-gate if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_NEED_SACK_REXMIT| 41157c478bd9Sstevel@tonic-gate TH_LIMIT_XMIT)) && tcp->tcp_swnd != 0) { 41167c478bd9Sstevel@tonic-gate if (flags & TH_REXMIT_NEEDED) { 41177c478bd9Sstevel@tonic-gate uint32_t snd_size = tcp->tcp_snxt - tcp->tcp_suna; 41187c478bd9Sstevel@tonic-gate 41197c478bd9Sstevel@tonic-gate if (snd_size > mss) 41207c478bd9Sstevel@tonic-gate snd_size = mss; 41217c478bd9Sstevel@tonic-gate if (snd_size > tcp->tcp_swnd) 41227c478bd9Sstevel@tonic-gate snd_size = tcp->tcp_swnd; 41237c478bd9Sstevel@tonic-gate mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, snd_size, 41247c478bd9Sstevel@tonic-gate NULL, NULL, tcp->tcp_suna, B_TRUE, &snd_size, 41257c478bd9Sstevel@tonic-gate B_TRUE); 41267c478bd9Sstevel@tonic-gate 41277c478bd9Sstevel@tonic-gate if (mp1 != NULL) { 4128*53391bafSeota /* use uintptr_t to suppress the gcc warning */ 41297c478bd9Sstevel@tonic-gate tcp->tcp_xmit_head->b_prev = 4130*53391bafSeota (mblk_t *)(uintptr_t)prom_gettime(); 41317c478bd9Sstevel@tonic-gate tcp->tcp_csuna = tcp->tcp_snxt; 41327c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpRetransSegs); 41337c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpRetransBytes, snd_size); 41347c478bd9Sstevel@tonic-gate (void) ipv4_tcp_output(sock_id, mp1); 41357c478bd9Sstevel@tonic-gate freeb(mp1); 41367c478bd9Sstevel@tonic-gate } 41377c478bd9Sstevel@tonic-gate } 41387c478bd9Sstevel@tonic-gate if (flags & TH_NEED_SACK_REXMIT) { 41397c478bd9Sstevel@tonic-gate if (tcp_sack_rxmit(tcp, sock_id) != 0) { 41407c478bd9Sstevel@tonic-gate flags |= TH_XMIT_NEEDED; 41417c478bd9Sstevel@tonic-gate } 41427c478bd9Sstevel@tonic-gate } 41437c478bd9Sstevel@tonic-gate /* 41447c478bd9Sstevel@tonic-gate * For TH_LIMIT_XMIT, tcp_wput_data() is called to send 41457c478bd9Sstevel@tonic-gate * out new segment. Note that tcp_rexmit should not be 41467c478bd9Sstevel@tonic-gate * set, otherwise TH_LIMIT_XMIT should not be set. 41477c478bd9Sstevel@tonic-gate */ 41487c478bd9Sstevel@tonic-gate if (flags & (TH_XMIT_NEEDED|TH_LIMIT_XMIT)) { 41497c478bd9Sstevel@tonic-gate if (!tcp->tcp_rexmit) { 41507c478bd9Sstevel@tonic-gate tcp_wput_data(tcp, NULL, sock_id); 41517c478bd9Sstevel@tonic-gate } else { 41527c478bd9Sstevel@tonic-gate tcp_ss_rexmit(tcp, sock_id); 41537c478bd9Sstevel@tonic-gate } 41547c478bd9Sstevel@tonic-gate /* 41557c478bd9Sstevel@tonic-gate * The TCP could be closed in tcp_state_wait via 41567c478bd9Sstevel@tonic-gate * tcp_wput_data (tcp_ss_rexmit could call 41577c478bd9Sstevel@tonic-gate * tcp_wput_data as well). 41587c478bd9Sstevel@tonic-gate */ 41597c478bd9Sstevel@tonic-gate if (sockets[sock_id].pcb == NULL) 41607c478bd9Sstevel@tonic-gate return; 41617c478bd9Sstevel@tonic-gate } 41627c478bd9Sstevel@tonic-gate /* 41637c478bd9Sstevel@tonic-gate * Adjust tcp_cwnd back to normal value after sending 41647c478bd9Sstevel@tonic-gate * new data segments. 41657c478bd9Sstevel@tonic-gate */ 41667c478bd9Sstevel@tonic-gate if (flags & TH_LIMIT_XMIT) { 41677c478bd9Sstevel@tonic-gate tcp->tcp_cwnd -= mss << (tcp->tcp_dupack_cnt - 1); 41687c478bd9Sstevel@tonic-gate } 41697c478bd9Sstevel@tonic-gate 41707c478bd9Sstevel@tonic-gate /* Anything more to do? */ 41717c478bd9Sstevel@tonic-gate if ((flags & (TH_ACK_NEEDED|TH_TIMER_NEEDED)) == 0) 41727c478bd9Sstevel@tonic-gate return; 41737c478bd9Sstevel@tonic-gate } 41747c478bd9Sstevel@tonic-gate ack_check: 41757c478bd9Sstevel@tonic-gate if (flags & TH_ACK_NEEDED) { 41767c478bd9Sstevel@tonic-gate /* 41777c478bd9Sstevel@tonic-gate * Time to send an ack for some reason. 41787c478bd9Sstevel@tonic-gate */ 41797c478bd9Sstevel@tonic-gate if ((mp1 = tcp_ack_mp(tcp)) != NULL) { 41807c478bd9Sstevel@tonic-gate TCP_DUMP_PACKET("tcp_rput_data: ack mp", mp1); 41817c478bd9Sstevel@tonic-gate (void) ipv4_tcp_output(sock_id, mp1); 41827c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutAck); 41837c478bd9Sstevel@tonic-gate freeb(mp1); 41847c478bd9Sstevel@tonic-gate } 41857c478bd9Sstevel@tonic-gate } 41867c478bd9Sstevel@tonic-gate } 41877c478bd9Sstevel@tonic-gate 41887c478bd9Sstevel@tonic-gate /* 41897c478bd9Sstevel@tonic-gate * tcp_ss_rexmit() is called in tcp_rput_data() to do slow start 41907c478bd9Sstevel@tonic-gate * retransmission after a timeout. 41917c478bd9Sstevel@tonic-gate * 41927c478bd9Sstevel@tonic-gate * To limit the number of duplicate segments, we limit the number of segment 41937c478bd9Sstevel@tonic-gate * to be sent in one time to tcp_snd_burst, the burst variable. 41947c478bd9Sstevel@tonic-gate */ 41957c478bd9Sstevel@tonic-gate static void 41967c478bd9Sstevel@tonic-gate tcp_ss_rexmit(tcp_t *tcp, int sock_id) 41977c478bd9Sstevel@tonic-gate { 41987c478bd9Sstevel@tonic-gate uint32_t snxt; 41997c478bd9Sstevel@tonic-gate uint32_t smax; 42007c478bd9Sstevel@tonic-gate int32_t win; 42017c478bd9Sstevel@tonic-gate int32_t mss; 42027c478bd9Sstevel@tonic-gate int32_t off; 42037c478bd9Sstevel@tonic-gate int32_t burst = tcp->tcp_snd_burst; 42047c478bd9Sstevel@tonic-gate mblk_t *snxt_mp; 42057c478bd9Sstevel@tonic-gate 42067c478bd9Sstevel@tonic-gate /* 42077c478bd9Sstevel@tonic-gate * Note that tcp_rexmit can be set even though TCP has retransmitted 42087c478bd9Sstevel@tonic-gate * all unack'ed segments. 42097c478bd9Sstevel@tonic-gate */ 42107c478bd9Sstevel@tonic-gate if (SEQ_LT(tcp->tcp_rexmit_nxt, tcp->tcp_rexmit_max)) { 42117c478bd9Sstevel@tonic-gate smax = tcp->tcp_rexmit_max; 42127c478bd9Sstevel@tonic-gate snxt = tcp->tcp_rexmit_nxt; 42137c478bd9Sstevel@tonic-gate if (SEQ_LT(snxt, tcp->tcp_suna)) { 42147c478bd9Sstevel@tonic-gate snxt = tcp->tcp_suna; 42157c478bd9Sstevel@tonic-gate } 42167c478bd9Sstevel@tonic-gate win = MIN(tcp->tcp_cwnd, tcp->tcp_swnd); 42177c478bd9Sstevel@tonic-gate win -= snxt - tcp->tcp_suna; 42187c478bd9Sstevel@tonic-gate mss = tcp->tcp_mss; 42197c478bd9Sstevel@tonic-gate snxt_mp = tcp_get_seg_mp(tcp, snxt, &off); 42207c478bd9Sstevel@tonic-gate 42217c478bd9Sstevel@tonic-gate while (SEQ_LT(snxt, smax) && (win > 0) && 42227c478bd9Sstevel@tonic-gate (burst > 0) && (snxt_mp != NULL)) { 42237c478bd9Sstevel@tonic-gate mblk_t *xmit_mp; 42247c478bd9Sstevel@tonic-gate mblk_t *old_snxt_mp = snxt_mp; 42257c478bd9Sstevel@tonic-gate uint32_t cnt = mss; 42267c478bd9Sstevel@tonic-gate 42277c478bd9Sstevel@tonic-gate if (win < cnt) { 42287c478bd9Sstevel@tonic-gate cnt = win; 42297c478bd9Sstevel@tonic-gate } 42307c478bd9Sstevel@tonic-gate if (SEQ_GT(snxt + cnt, smax)) { 42317c478bd9Sstevel@tonic-gate cnt = smax - snxt; 42327c478bd9Sstevel@tonic-gate } 42337c478bd9Sstevel@tonic-gate xmit_mp = tcp_xmit_mp(tcp, snxt_mp, cnt, &off, 42347c478bd9Sstevel@tonic-gate &snxt_mp, snxt, B_TRUE, &cnt, B_TRUE); 42357c478bd9Sstevel@tonic-gate 42367c478bd9Sstevel@tonic-gate if (xmit_mp == NULL) 42377c478bd9Sstevel@tonic-gate return; 42387c478bd9Sstevel@tonic-gate 42397c478bd9Sstevel@tonic-gate (void) ipv4_tcp_output(sock_id, xmit_mp); 42407c478bd9Sstevel@tonic-gate freeb(xmit_mp); 42417c478bd9Sstevel@tonic-gate 42427c478bd9Sstevel@tonic-gate snxt += cnt; 42437c478bd9Sstevel@tonic-gate win -= cnt; 42447c478bd9Sstevel@tonic-gate /* 42457c478bd9Sstevel@tonic-gate * Update the send timestamp to avoid false 42467c478bd9Sstevel@tonic-gate * retransmission. 4247*53391bafSeota * Note. use uintptr_t to suppress the gcc warning. 42487c478bd9Sstevel@tonic-gate */ 4249*53391bafSeota old_snxt_mp->b_prev = 4250*53391bafSeota (mblk_t *)(uintptr_t)prom_gettime(); 42517c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpRetransSegs); 42527c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpRetransBytes, cnt); 42537c478bd9Sstevel@tonic-gate 42547c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_nxt = snxt; 42557c478bd9Sstevel@tonic-gate burst--; 42567c478bd9Sstevel@tonic-gate } 42577c478bd9Sstevel@tonic-gate /* 42587c478bd9Sstevel@tonic-gate * If we have transmitted all we have at the time 42597c478bd9Sstevel@tonic-gate * we started the retranmission, we can leave 42607c478bd9Sstevel@tonic-gate * the rest of the job to tcp_wput_data(). But we 42617c478bd9Sstevel@tonic-gate * need to check the send window first. If the 42627c478bd9Sstevel@tonic-gate * win is not 0, go on with tcp_wput_data(). 42637c478bd9Sstevel@tonic-gate */ 42647c478bd9Sstevel@tonic-gate if (SEQ_LT(snxt, smax) || win == 0) { 42657c478bd9Sstevel@tonic-gate return; 42667c478bd9Sstevel@tonic-gate } 42677c478bd9Sstevel@tonic-gate } 42687c478bd9Sstevel@tonic-gate /* Only call tcp_wput_data() if there is data to be sent. */ 42697c478bd9Sstevel@tonic-gate if (tcp->tcp_unsent) { 42707c478bd9Sstevel@tonic-gate tcp_wput_data(tcp, NULL, sock_id); 42717c478bd9Sstevel@tonic-gate } 42727c478bd9Sstevel@tonic-gate } 42737c478bd9Sstevel@tonic-gate 42747c478bd9Sstevel@tonic-gate /* 42757c478bd9Sstevel@tonic-gate * tcp_timer is the timer service routine. It handles all timer events for 42767c478bd9Sstevel@tonic-gate * a tcp instance except keepalives. It figures out from the state of the 42777c478bd9Sstevel@tonic-gate * tcp instance what kind of action needs to be done at the time it is called. 42787c478bd9Sstevel@tonic-gate */ 42797c478bd9Sstevel@tonic-gate static void 42807c478bd9Sstevel@tonic-gate tcp_timer(tcp_t *tcp, int sock_id) 42817c478bd9Sstevel@tonic-gate { 42827c478bd9Sstevel@tonic-gate mblk_t *mp; 42837c478bd9Sstevel@tonic-gate uint32_t first_threshold; 42847c478bd9Sstevel@tonic-gate uint32_t second_threshold; 42857c478bd9Sstevel@tonic-gate uint32_t ms; 42867c478bd9Sstevel@tonic-gate uint32_t mss; 42877c478bd9Sstevel@tonic-gate 42887c478bd9Sstevel@tonic-gate first_threshold = tcp->tcp_first_timer_threshold; 42897c478bd9Sstevel@tonic-gate second_threshold = tcp->tcp_second_timer_threshold; 42907c478bd9Sstevel@tonic-gate switch (tcp->tcp_state) { 42917c478bd9Sstevel@tonic-gate case TCPS_IDLE: 42927c478bd9Sstevel@tonic-gate case TCPS_BOUND: 42937c478bd9Sstevel@tonic-gate case TCPS_LISTEN: 42947c478bd9Sstevel@tonic-gate return; 42957c478bd9Sstevel@tonic-gate case TCPS_SYN_RCVD: 42967c478bd9Sstevel@tonic-gate case TCPS_SYN_SENT: 42977c478bd9Sstevel@tonic-gate first_threshold = tcp->tcp_first_ctimer_threshold; 42987c478bd9Sstevel@tonic-gate second_threshold = tcp->tcp_second_ctimer_threshold; 42997c478bd9Sstevel@tonic-gate break; 43007c478bd9Sstevel@tonic-gate case TCPS_ESTABLISHED: 43017c478bd9Sstevel@tonic-gate case TCPS_FIN_WAIT_1: 43027c478bd9Sstevel@tonic-gate case TCPS_CLOSING: 43037c478bd9Sstevel@tonic-gate case TCPS_CLOSE_WAIT: 43047c478bd9Sstevel@tonic-gate case TCPS_LAST_ACK: 43057c478bd9Sstevel@tonic-gate /* If we have data to rexmit */ 43067c478bd9Sstevel@tonic-gate if (tcp->tcp_suna != tcp->tcp_snxt) { 43077c478bd9Sstevel@tonic-gate int32_t time_to_wait; 43087c478bd9Sstevel@tonic-gate 43097c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpTimRetrans); 43107c478bd9Sstevel@tonic-gate if (tcp->tcp_xmit_head == NULL) 43117c478bd9Sstevel@tonic-gate break; 4312*53391bafSeota /* use uintptr_t to suppress the gcc warning */ 43137c478bd9Sstevel@tonic-gate time_to_wait = (int32_t)(prom_gettime() - 4314*53391bafSeota (uint32_t)(uintptr_t)tcp->tcp_xmit_head->b_prev); 43157c478bd9Sstevel@tonic-gate time_to_wait = tcp->tcp_rto - time_to_wait; 43167c478bd9Sstevel@tonic-gate if (time_to_wait > 0) { 43177c478bd9Sstevel@tonic-gate /* 43187c478bd9Sstevel@tonic-gate * Timer fired too early, so restart it. 43197c478bd9Sstevel@tonic-gate */ 43207c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, time_to_wait); 43217c478bd9Sstevel@tonic-gate return; 43227c478bd9Sstevel@tonic-gate } 43237c478bd9Sstevel@tonic-gate /* 43247c478bd9Sstevel@tonic-gate * When we probe zero windows, we force the swnd open. 43257c478bd9Sstevel@tonic-gate * If our peer acks with a closed window swnd will be 43267c478bd9Sstevel@tonic-gate * set to zero by tcp_rput(). As long as we are 43277c478bd9Sstevel@tonic-gate * receiving acks tcp_rput will 43287c478bd9Sstevel@tonic-gate * reset 'tcp_ms_we_have_waited' so as not to trip the 43297c478bd9Sstevel@tonic-gate * first and second interval actions. NOTE: the timer 43307c478bd9Sstevel@tonic-gate * interval is allowed to continue its exponential 43317c478bd9Sstevel@tonic-gate * backoff. 43327c478bd9Sstevel@tonic-gate */ 43337c478bd9Sstevel@tonic-gate if (tcp->tcp_swnd == 0 || tcp->tcp_zero_win_probe) { 43347c478bd9Sstevel@tonic-gate DEBUG_1("tcp_timer (%d): zero win", sock_id); 43357c478bd9Sstevel@tonic-gate break; 43367c478bd9Sstevel@tonic-gate } else { 43377c478bd9Sstevel@tonic-gate /* 43387c478bd9Sstevel@tonic-gate * After retransmission, we need to do 43397c478bd9Sstevel@tonic-gate * slow start. Set the ssthresh to one 43407c478bd9Sstevel@tonic-gate * half of current effective window and 43417c478bd9Sstevel@tonic-gate * cwnd to one MSS. Also reset 43427c478bd9Sstevel@tonic-gate * tcp_cwnd_cnt. 43437c478bd9Sstevel@tonic-gate * 43447c478bd9Sstevel@tonic-gate * Note that if tcp_ssthresh is reduced because 43457c478bd9Sstevel@tonic-gate * of ECN, do not reduce it again unless it is 43467c478bd9Sstevel@tonic-gate * already one window of data away (tcp_cwr 43477c478bd9Sstevel@tonic-gate * should then be cleared) or this is a 43487c478bd9Sstevel@tonic-gate * timeout for a retransmitted segment. 43497c478bd9Sstevel@tonic-gate */ 43507c478bd9Sstevel@tonic-gate uint32_t npkt; 43517c478bd9Sstevel@tonic-gate 43527c478bd9Sstevel@tonic-gate if (!tcp->tcp_cwr || tcp->tcp_rexmit) { 43537c478bd9Sstevel@tonic-gate npkt = (MIN((tcp->tcp_timer_backoff ? 43547c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_ssthresh : 43557c478bd9Sstevel@tonic-gate tcp->tcp_cwnd), 43567c478bd9Sstevel@tonic-gate tcp->tcp_swnd) >> 1) / 43577c478bd9Sstevel@tonic-gate tcp->tcp_mss; 43587c478bd9Sstevel@tonic-gate if (npkt < 2) 43597c478bd9Sstevel@tonic-gate npkt = 2; 43607c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_ssthresh = npkt * 43617c478bd9Sstevel@tonic-gate tcp->tcp_mss; 43627c478bd9Sstevel@tonic-gate } 43637c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = tcp->tcp_mss; 43647c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_cnt = 0; 43657c478bd9Sstevel@tonic-gate if (tcp->tcp_ecn_ok) { 43667c478bd9Sstevel@tonic-gate tcp->tcp_cwr = B_TRUE; 43677c478bd9Sstevel@tonic-gate tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 43687c478bd9Sstevel@tonic-gate tcp->tcp_ecn_cwr_sent = B_FALSE; 43697c478bd9Sstevel@tonic-gate } 43707c478bd9Sstevel@tonic-gate } 43717c478bd9Sstevel@tonic-gate break; 43727c478bd9Sstevel@tonic-gate } 43737c478bd9Sstevel@tonic-gate /* 43747c478bd9Sstevel@tonic-gate * We have something to send yet we cannot send. The 43757c478bd9Sstevel@tonic-gate * reason can be: 43767c478bd9Sstevel@tonic-gate * 43777c478bd9Sstevel@tonic-gate * 1. Zero send window: we need to do zero window probe. 43787c478bd9Sstevel@tonic-gate * 2. Zero cwnd: because of ECN, we need to "clock out 43797c478bd9Sstevel@tonic-gate * segments. 43807c478bd9Sstevel@tonic-gate * 3. SWS avoidance: receiver may have shrunk window, 43817c478bd9Sstevel@tonic-gate * reset our knowledge. 43827c478bd9Sstevel@tonic-gate * 43837c478bd9Sstevel@tonic-gate * Note that condition 2 can happen with either 1 or 43847c478bd9Sstevel@tonic-gate * 3. But 1 and 3 are exclusive. 43857c478bd9Sstevel@tonic-gate */ 43867c478bd9Sstevel@tonic-gate if (tcp->tcp_unsent != 0) { 43877c478bd9Sstevel@tonic-gate if (tcp->tcp_cwnd == 0) { 43887c478bd9Sstevel@tonic-gate /* 43897c478bd9Sstevel@tonic-gate * Set tcp_cwnd to 1 MSS so that a 43907c478bd9Sstevel@tonic-gate * new segment can be sent out. We 43917c478bd9Sstevel@tonic-gate * are "clocking out" new data when 43927c478bd9Sstevel@tonic-gate * the network is really congested. 43937c478bd9Sstevel@tonic-gate */ 43947c478bd9Sstevel@tonic-gate assert(tcp->tcp_ecn_ok); 43957c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = tcp->tcp_mss; 43967c478bd9Sstevel@tonic-gate } 43977c478bd9Sstevel@tonic-gate if (tcp->tcp_swnd == 0) { 43987c478bd9Sstevel@tonic-gate /* Extend window for zero window probe */ 43997c478bd9Sstevel@tonic-gate tcp->tcp_swnd++; 44007c478bd9Sstevel@tonic-gate tcp->tcp_zero_win_probe = B_TRUE; 44017c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutWinProbe); 44027c478bd9Sstevel@tonic-gate } else { 44037c478bd9Sstevel@tonic-gate /* 44047c478bd9Sstevel@tonic-gate * Handle timeout from sender SWS avoidance. 44057c478bd9Sstevel@tonic-gate * Reset our knowledge of the max send window 44067c478bd9Sstevel@tonic-gate * since the receiver might have reduced its 44077c478bd9Sstevel@tonic-gate * receive buffer. Avoid setting tcp_max_swnd 44087c478bd9Sstevel@tonic-gate * to one since that will essentially disable 44097c478bd9Sstevel@tonic-gate * the SWS checks. 44107c478bd9Sstevel@tonic-gate * 44117c478bd9Sstevel@tonic-gate * Note that since we don't have a SWS 44127c478bd9Sstevel@tonic-gate * state variable, if the timeout is set 44137c478bd9Sstevel@tonic-gate * for ECN but not for SWS, this 44147c478bd9Sstevel@tonic-gate * code will also be executed. This is 44157c478bd9Sstevel@tonic-gate * fine as tcp_max_swnd is updated 44167c478bd9Sstevel@tonic-gate * constantly and it will not affect 44177c478bd9Sstevel@tonic-gate * anything. 44187c478bd9Sstevel@tonic-gate */ 44197c478bd9Sstevel@tonic-gate tcp->tcp_max_swnd = MAX(tcp->tcp_swnd, 2); 44207c478bd9Sstevel@tonic-gate } 44217c478bd9Sstevel@tonic-gate tcp_wput_data(tcp, NULL, sock_id); 44227c478bd9Sstevel@tonic-gate return; 44237c478bd9Sstevel@tonic-gate } 44247c478bd9Sstevel@tonic-gate /* Is there a FIN that needs to be to re retransmitted? */ 44257c478bd9Sstevel@tonic-gate if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 44267c478bd9Sstevel@tonic-gate !tcp->tcp_fin_acked) 44277c478bd9Sstevel@tonic-gate break; 44287c478bd9Sstevel@tonic-gate /* Nothing to do, return without restarting timer. */ 44297c478bd9Sstevel@tonic-gate return; 44307c478bd9Sstevel@tonic-gate case TCPS_FIN_WAIT_2: 44317c478bd9Sstevel@tonic-gate /* 44327c478bd9Sstevel@tonic-gate * User closed the TCP endpoint and peer ACK'ed our FIN. 44337c478bd9Sstevel@tonic-gate * We waited some time for for peer's FIN, but it hasn't 44347c478bd9Sstevel@tonic-gate * arrived. We flush the connection now to avoid 44357c478bd9Sstevel@tonic-gate * case where the peer has rebooted. 44367c478bd9Sstevel@tonic-gate */ 44377c478bd9Sstevel@tonic-gate /* FALLTHRU */ 44387c478bd9Sstevel@tonic-gate case TCPS_TIME_WAIT: 44397c478bd9Sstevel@tonic-gate (void) tcp_clean_death(sock_id, tcp, 0); 44407c478bd9Sstevel@tonic-gate return; 44417c478bd9Sstevel@tonic-gate default: 44427c478bd9Sstevel@tonic-gate DEBUG_3("tcp_timer (%d): strange state (%d) %s", sock_id, 44437c478bd9Sstevel@tonic-gate tcp->tcp_state, tcp_display(tcp, NULL, 44447c478bd9Sstevel@tonic-gate DISP_PORT_ONLY)); 44457c478bd9Sstevel@tonic-gate return; 44467c478bd9Sstevel@tonic-gate } 44477c478bd9Sstevel@tonic-gate if ((ms = tcp->tcp_ms_we_have_waited) > second_threshold) { 44487c478bd9Sstevel@tonic-gate /* 44497c478bd9Sstevel@tonic-gate * For zero window probe, we need to send indefinitely, 44507c478bd9Sstevel@tonic-gate * unless we have not heard from the other side for some 44517c478bd9Sstevel@tonic-gate * time... 44527c478bd9Sstevel@tonic-gate */ 44537c478bd9Sstevel@tonic-gate if ((tcp->tcp_zero_win_probe == 0) || 44547c478bd9Sstevel@tonic-gate ((prom_gettime() - tcp->tcp_last_recv_time) > 44557c478bd9Sstevel@tonic-gate second_threshold)) { 44567c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpTimRetransDrop); 44577c478bd9Sstevel@tonic-gate /* 44587c478bd9Sstevel@tonic-gate * If TCP is in SYN_RCVD state, send back a 44597c478bd9Sstevel@tonic-gate * RST|ACK as BSD does. Note that tcp_zero_win_probe 44607c478bd9Sstevel@tonic-gate * should be zero in TCPS_SYN_RCVD state. 44617c478bd9Sstevel@tonic-gate */ 44627c478bd9Sstevel@tonic-gate if (tcp->tcp_state == TCPS_SYN_RCVD) { 44637c478bd9Sstevel@tonic-gate tcp_xmit_ctl("tcp_timer: RST sent on timeout " 44647c478bd9Sstevel@tonic-gate "in SYN_RCVD", 44657c478bd9Sstevel@tonic-gate tcp, NULL, tcp->tcp_snxt, 44667c478bd9Sstevel@tonic-gate tcp->tcp_rnxt, TH_RST | TH_ACK, 0, sock_id); 44677c478bd9Sstevel@tonic-gate } 44687c478bd9Sstevel@tonic-gate (void) tcp_clean_death(sock_id, tcp, 44697c478bd9Sstevel@tonic-gate tcp->tcp_client_errno ? 44707c478bd9Sstevel@tonic-gate tcp->tcp_client_errno : ETIMEDOUT); 44717c478bd9Sstevel@tonic-gate return; 44727c478bd9Sstevel@tonic-gate } else { 44737c478bd9Sstevel@tonic-gate /* 44747c478bd9Sstevel@tonic-gate * Set tcp_ms_we_have_waited to second_threshold 44757c478bd9Sstevel@tonic-gate * so that in next timeout, we will do the above 44767c478bd9Sstevel@tonic-gate * check (lbolt - tcp_last_recv_time). This is 44777c478bd9Sstevel@tonic-gate * also to avoid overflow. 44787c478bd9Sstevel@tonic-gate * 44797c478bd9Sstevel@tonic-gate * We don't need to decrement tcp_timer_backoff 44807c478bd9Sstevel@tonic-gate * to avoid overflow because it will be decremented 44817c478bd9Sstevel@tonic-gate * later if new timeout value is greater than 44827c478bd9Sstevel@tonic-gate * tcp_rexmit_interval_max. In the case when 44837c478bd9Sstevel@tonic-gate * tcp_rexmit_interval_max is greater than 44847c478bd9Sstevel@tonic-gate * second_threshold, it means that we will wait 44857c478bd9Sstevel@tonic-gate * longer than second_threshold to send the next 44867c478bd9Sstevel@tonic-gate * window probe. 44877c478bd9Sstevel@tonic-gate */ 44887c478bd9Sstevel@tonic-gate tcp->tcp_ms_we_have_waited = second_threshold; 44897c478bd9Sstevel@tonic-gate } 44907c478bd9Sstevel@tonic-gate } else if (ms > first_threshold && tcp->tcp_rtt_sa != 0) { 44917c478bd9Sstevel@tonic-gate /* 44927c478bd9Sstevel@tonic-gate * We have been retransmitting for too long... The RTT 44937c478bd9Sstevel@tonic-gate * we calculated is probably incorrect. Reinitialize it. 44947c478bd9Sstevel@tonic-gate * Need to compensate for 0 tcp_rtt_sa. Reset 44957c478bd9Sstevel@tonic-gate * tcp_rtt_update so that we won't accidentally cache a 44967c478bd9Sstevel@tonic-gate * bad value. But only do this if this is not a zero 44977c478bd9Sstevel@tonic-gate * window probe. 44987c478bd9Sstevel@tonic-gate */ 44997c478bd9Sstevel@tonic-gate if (tcp->tcp_zero_win_probe == 0) { 45007c478bd9Sstevel@tonic-gate tcp->tcp_rtt_sd += (tcp->tcp_rtt_sa >> 3) + 45017c478bd9Sstevel@tonic-gate (tcp->tcp_rtt_sa >> 5); 45027c478bd9Sstevel@tonic-gate tcp->tcp_rtt_sa = 0; 45037c478bd9Sstevel@tonic-gate tcp->tcp_rtt_update = 0; 45047c478bd9Sstevel@tonic-gate } 45057c478bd9Sstevel@tonic-gate } 45067c478bd9Sstevel@tonic-gate tcp->tcp_timer_backoff++; 45077c478bd9Sstevel@tonic-gate if ((ms = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 45087c478bd9Sstevel@tonic-gate tcp_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5)) < 45097c478bd9Sstevel@tonic-gate tcp_rexmit_interval_min) { 45107c478bd9Sstevel@tonic-gate /* 45117c478bd9Sstevel@tonic-gate * This means the original RTO is tcp_rexmit_interval_min. 45127c478bd9Sstevel@tonic-gate * So we will use tcp_rexmit_interval_min as the RTO value 45137c478bd9Sstevel@tonic-gate * and do the backoff. 45147c478bd9Sstevel@tonic-gate */ 45157c478bd9Sstevel@tonic-gate ms = tcp_rexmit_interval_min << tcp->tcp_timer_backoff; 45167c478bd9Sstevel@tonic-gate } else { 45177c478bd9Sstevel@tonic-gate ms <<= tcp->tcp_timer_backoff; 45187c478bd9Sstevel@tonic-gate } 45197c478bd9Sstevel@tonic-gate if (ms > tcp_rexmit_interval_max) { 45207c478bd9Sstevel@tonic-gate ms = tcp_rexmit_interval_max; 45217c478bd9Sstevel@tonic-gate /* 45227c478bd9Sstevel@tonic-gate * ms is at max, decrement tcp_timer_backoff to avoid 45237c478bd9Sstevel@tonic-gate * overflow. 45247c478bd9Sstevel@tonic-gate */ 45257c478bd9Sstevel@tonic-gate tcp->tcp_timer_backoff--; 45267c478bd9Sstevel@tonic-gate } 45277c478bd9Sstevel@tonic-gate tcp->tcp_ms_we_have_waited += ms; 45287c478bd9Sstevel@tonic-gate if (tcp->tcp_zero_win_probe == 0) { 45297c478bd9Sstevel@tonic-gate tcp->tcp_rto = ms; 45307c478bd9Sstevel@tonic-gate } 45317c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, ms); 45327c478bd9Sstevel@tonic-gate /* 45337c478bd9Sstevel@tonic-gate * This is after a timeout and tcp_rto is backed off. Set 45347c478bd9Sstevel@tonic-gate * tcp_set_timer to 1 so that next time RTO is updated, we will 45357c478bd9Sstevel@tonic-gate * restart the timer with a correct value. 45367c478bd9Sstevel@tonic-gate */ 45377c478bd9Sstevel@tonic-gate tcp->tcp_set_timer = 1; 45387c478bd9Sstevel@tonic-gate mss = tcp->tcp_snxt - tcp->tcp_suna; 45397c478bd9Sstevel@tonic-gate if (mss > tcp->tcp_mss) 45407c478bd9Sstevel@tonic-gate mss = tcp->tcp_mss; 45417c478bd9Sstevel@tonic-gate if (mss > tcp->tcp_swnd && tcp->tcp_swnd != 0) 45427c478bd9Sstevel@tonic-gate mss = tcp->tcp_swnd; 45437c478bd9Sstevel@tonic-gate 4544*53391bafSeota if ((mp = tcp->tcp_xmit_head) != NULL) { 4545*53391bafSeota /* use uintptr_t to suppress the gcc warning */ 4546*53391bafSeota mp->b_prev = (mblk_t *)(uintptr_t)prom_gettime(); 4547*53391bafSeota } 45487c478bd9Sstevel@tonic-gate mp = tcp_xmit_mp(tcp, mp, mss, NULL, NULL, tcp->tcp_suna, B_TRUE, &mss, 45497c478bd9Sstevel@tonic-gate B_TRUE); 45507c478bd9Sstevel@tonic-gate if (mp == NULL) 45517c478bd9Sstevel@tonic-gate return; 45527c478bd9Sstevel@tonic-gate tcp->tcp_csuna = tcp->tcp_snxt; 45537c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpRetransSegs); 45547c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpRetransBytes, mss); 45557c478bd9Sstevel@tonic-gate /* Dump the packet when debugging. */ 45567c478bd9Sstevel@tonic-gate TCP_DUMP_PACKET("tcp_timer", mp); 45577c478bd9Sstevel@tonic-gate 45587c478bd9Sstevel@tonic-gate (void) ipv4_tcp_output(sock_id, mp); 45597c478bd9Sstevel@tonic-gate freeb(mp); 45607c478bd9Sstevel@tonic-gate 45617c478bd9Sstevel@tonic-gate /* 45627c478bd9Sstevel@tonic-gate * When slow start after retransmission begins, start with 45637c478bd9Sstevel@tonic-gate * this seq no. tcp_rexmit_max marks the end of special slow 45647c478bd9Sstevel@tonic-gate * start phase. tcp_snd_burst controls how many segments 45657c478bd9Sstevel@tonic-gate * can be sent because of an ack. 45667c478bd9Sstevel@tonic-gate */ 45677c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_nxt = tcp->tcp_suna; 45687c478bd9Sstevel@tonic-gate tcp->tcp_snd_burst = TCP_CWND_SS; 45697c478bd9Sstevel@tonic-gate if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 45707c478bd9Sstevel@tonic-gate (tcp->tcp_unsent == 0)) { 45717c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_max = tcp->tcp_fss; 45727c478bd9Sstevel@tonic-gate } else { 45737c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_max = tcp->tcp_snxt; 45747c478bd9Sstevel@tonic-gate } 45757c478bd9Sstevel@tonic-gate tcp->tcp_rexmit = B_TRUE; 45767c478bd9Sstevel@tonic-gate tcp->tcp_dupack_cnt = 0; 45777c478bd9Sstevel@tonic-gate 45787c478bd9Sstevel@tonic-gate /* 45797c478bd9Sstevel@tonic-gate * Remove all rexmit SACK blk to start from fresh. 45807c478bd9Sstevel@tonic-gate */ 45817c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 45827c478bd9Sstevel@tonic-gate TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 45837c478bd9Sstevel@tonic-gate tcp->tcp_num_notsack_blk = 0; 45847c478bd9Sstevel@tonic-gate tcp->tcp_cnt_notsack_list = 0; 45857c478bd9Sstevel@tonic-gate } 45867c478bd9Sstevel@tonic-gate } 45877c478bd9Sstevel@tonic-gate 45887c478bd9Sstevel@tonic-gate /* 45897c478bd9Sstevel@tonic-gate * The TCP normal data output path. 45907c478bd9Sstevel@tonic-gate * NOTE: the logic of the fast path is duplicated from this function. 45917c478bd9Sstevel@tonic-gate */ 45927c478bd9Sstevel@tonic-gate static void 45937c478bd9Sstevel@tonic-gate tcp_wput_data(tcp_t *tcp, mblk_t *mp, int sock_id) 45947c478bd9Sstevel@tonic-gate { 45957c478bd9Sstevel@tonic-gate int len; 45967c478bd9Sstevel@tonic-gate mblk_t *local_time; 45977c478bd9Sstevel@tonic-gate mblk_t *mp1; 45987c478bd9Sstevel@tonic-gate uchar_t *rptr; 45997c478bd9Sstevel@tonic-gate uint32_t snxt; 46007c478bd9Sstevel@tonic-gate int tail_unsent; 46017c478bd9Sstevel@tonic-gate int tcpstate; 46027c478bd9Sstevel@tonic-gate int usable = 0; 46037c478bd9Sstevel@tonic-gate mblk_t *xmit_tail; 46047c478bd9Sstevel@tonic-gate int32_t num_burst_seg; 46057c478bd9Sstevel@tonic-gate int32_t mss; 46067c478bd9Sstevel@tonic-gate int32_t num_sack_blk = 0; 46077c478bd9Sstevel@tonic-gate int32_t tcp_hdr_len; 46087c478bd9Sstevel@tonic-gate ipaddr_t *dst; 46097c478bd9Sstevel@tonic-gate ipaddr_t *src; 46107c478bd9Sstevel@tonic-gate 46117c478bd9Sstevel@tonic-gate #ifdef DEBUG 46127c478bd9Sstevel@tonic-gate printf("tcp_wput_data(%d) ##############################\n", sock_id); 46137c478bd9Sstevel@tonic-gate #endif 46147c478bd9Sstevel@tonic-gate tcpstate = tcp->tcp_state; 46157c478bd9Sstevel@tonic-gate if (mp == NULL) { 46167c478bd9Sstevel@tonic-gate /* Really tacky... but we need this for detached closes. */ 46177c478bd9Sstevel@tonic-gate len = tcp->tcp_unsent; 46187c478bd9Sstevel@tonic-gate goto data_null; 46197c478bd9Sstevel@tonic-gate } 46207c478bd9Sstevel@tonic-gate 46217c478bd9Sstevel@tonic-gate /* 46227c478bd9Sstevel@tonic-gate * Don't allow data after T_ORDREL_REQ or T_DISCON_REQ, 46237c478bd9Sstevel@tonic-gate * or before a connection attempt has begun. 46247c478bd9Sstevel@tonic-gate * 46257c478bd9Sstevel@tonic-gate * The following should not happen in inetboot.... 46267c478bd9Sstevel@tonic-gate */ 46277c478bd9Sstevel@tonic-gate if (tcpstate < TCPS_SYN_SENT || tcpstate > TCPS_CLOSE_WAIT || 46287c478bd9Sstevel@tonic-gate (tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) { 46297c478bd9Sstevel@tonic-gate if ((tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) { 46307c478bd9Sstevel@tonic-gate printf("tcp_wput_data: data after ordrel, %s\n", 46317c478bd9Sstevel@tonic-gate tcp_display(tcp, NULL, DISP_ADDR_AND_PORT)); 46327c478bd9Sstevel@tonic-gate } 46337c478bd9Sstevel@tonic-gate freemsg(mp); 46347c478bd9Sstevel@tonic-gate return; 46357c478bd9Sstevel@tonic-gate } 46367c478bd9Sstevel@tonic-gate 46377c478bd9Sstevel@tonic-gate /* Strip empties */ 46387c478bd9Sstevel@tonic-gate for (;;) { 46397c478bd9Sstevel@tonic-gate assert((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 46407c478bd9Sstevel@tonic-gate (uintptr_t)INT_MAX); 46417c478bd9Sstevel@tonic-gate len = (int)(mp->b_wptr - mp->b_rptr); 46427c478bd9Sstevel@tonic-gate if (len > 0) 46437c478bd9Sstevel@tonic-gate break; 46447c478bd9Sstevel@tonic-gate mp1 = mp; 46457c478bd9Sstevel@tonic-gate mp = mp->b_cont; 46467c478bd9Sstevel@tonic-gate freeb(mp1); 46477c478bd9Sstevel@tonic-gate if (mp == NULL) { 46487c478bd9Sstevel@tonic-gate return; 46497c478bd9Sstevel@tonic-gate } 46507c478bd9Sstevel@tonic-gate } 46517c478bd9Sstevel@tonic-gate 46527c478bd9Sstevel@tonic-gate /* If we are the first on the list ... */ 46537c478bd9Sstevel@tonic-gate if (tcp->tcp_xmit_head == NULL) { 46547c478bd9Sstevel@tonic-gate tcp->tcp_xmit_head = mp; 46557c478bd9Sstevel@tonic-gate tcp->tcp_xmit_tail = mp; 46567c478bd9Sstevel@tonic-gate tcp->tcp_xmit_tail_unsent = len; 46577c478bd9Sstevel@tonic-gate } else { 46587c478bd9Sstevel@tonic-gate tcp->tcp_xmit_last->b_cont = mp; 46597c478bd9Sstevel@tonic-gate len += tcp->tcp_unsent; 46607c478bd9Sstevel@tonic-gate } 46617c478bd9Sstevel@tonic-gate 46627c478bd9Sstevel@tonic-gate /* Tack on however many more positive length mblks we have */ 46637c478bd9Sstevel@tonic-gate if ((mp1 = mp->b_cont) != NULL) { 46647c478bd9Sstevel@tonic-gate do { 46657c478bd9Sstevel@tonic-gate int tlen; 46667c478bd9Sstevel@tonic-gate assert((uintptr_t)(mp1->b_wptr - 46677c478bd9Sstevel@tonic-gate mp1->b_rptr) <= (uintptr_t)INT_MAX); 46687c478bd9Sstevel@tonic-gate tlen = (int)(mp1->b_wptr - mp1->b_rptr); 46697c478bd9Sstevel@tonic-gate if (tlen <= 0) { 46707c478bd9Sstevel@tonic-gate mp->b_cont = mp1->b_cont; 46717c478bd9Sstevel@tonic-gate freeb(mp1); 46727c478bd9Sstevel@tonic-gate } else { 46737c478bd9Sstevel@tonic-gate len += tlen; 46747c478bd9Sstevel@tonic-gate mp = mp1; 46757c478bd9Sstevel@tonic-gate } 46767c478bd9Sstevel@tonic-gate } while ((mp1 = mp->b_cont) != NULL); 46777c478bd9Sstevel@tonic-gate } 46787c478bd9Sstevel@tonic-gate tcp->tcp_xmit_last = mp; 46797c478bd9Sstevel@tonic-gate tcp->tcp_unsent = len; 46807c478bd9Sstevel@tonic-gate 46817c478bd9Sstevel@tonic-gate data_null: 46827c478bd9Sstevel@tonic-gate snxt = tcp->tcp_snxt; 46837c478bd9Sstevel@tonic-gate xmit_tail = tcp->tcp_xmit_tail; 46847c478bd9Sstevel@tonic-gate tail_unsent = tcp->tcp_xmit_tail_unsent; 46857c478bd9Sstevel@tonic-gate 46867c478bd9Sstevel@tonic-gate /* 46877c478bd9Sstevel@tonic-gate * Note that tcp_mss has been adjusted to take into account the 46887c478bd9Sstevel@tonic-gate * timestamp option if applicable. Because SACK options do not 46897c478bd9Sstevel@tonic-gate * appear in every TCP segments and they are of variable lengths, 46907c478bd9Sstevel@tonic-gate * they cannot be included in tcp_mss. Thus we need to calculate 46917c478bd9Sstevel@tonic-gate * the actual segment length when we need to send a segment which 46927c478bd9Sstevel@tonic-gate * includes SACK options. 46937c478bd9Sstevel@tonic-gate */ 46947c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 46957c478bd9Sstevel@tonic-gate int32_t opt_len; 46967c478bd9Sstevel@tonic-gate 46977c478bd9Sstevel@tonic-gate num_sack_blk = MIN(tcp->tcp_max_sack_blk, 46987c478bd9Sstevel@tonic-gate tcp->tcp_num_sack_blk); 46997c478bd9Sstevel@tonic-gate opt_len = num_sack_blk * sizeof (sack_blk_t) + TCPOPT_NOP_LEN * 47007c478bd9Sstevel@tonic-gate 2 + TCPOPT_HEADER_LEN; 47017c478bd9Sstevel@tonic-gate mss = tcp->tcp_mss - opt_len; 47027c478bd9Sstevel@tonic-gate tcp_hdr_len = tcp->tcp_hdr_len + opt_len; 47037c478bd9Sstevel@tonic-gate } else { 47047c478bd9Sstevel@tonic-gate mss = tcp->tcp_mss; 47057c478bd9Sstevel@tonic-gate tcp_hdr_len = tcp->tcp_hdr_len; 47067c478bd9Sstevel@tonic-gate } 47077c478bd9Sstevel@tonic-gate 47087c478bd9Sstevel@tonic-gate if ((tcp->tcp_suna == snxt) && 47097c478bd9Sstevel@tonic-gate (prom_gettime() - tcp->tcp_last_recv_time) >= tcp->tcp_rto) { 47107c478bd9Sstevel@tonic-gate tcp->tcp_cwnd = MIN(tcp_slow_start_after_idle * mss, 47117c478bd9Sstevel@tonic-gate MIN(4 * mss, MAX(2 * mss, 4380 / mss * mss))); 47127c478bd9Sstevel@tonic-gate } 47137c478bd9Sstevel@tonic-gate if (tcpstate == TCPS_SYN_RCVD) { 47147c478bd9Sstevel@tonic-gate /* 47157c478bd9Sstevel@tonic-gate * The three-way connection establishment handshake is not 47167c478bd9Sstevel@tonic-gate * complete yet. We want to queue the data for transmission 47177c478bd9Sstevel@tonic-gate * after entering ESTABLISHED state (RFC793). Setting usable to 47187c478bd9Sstevel@tonic-gate * zero cause a jump to "done" label effectively leaving data 47197c478bd9Sstevel@tonic-gate * on the queue. 47207c478bd9Sstevel@tonic-gate */ 47217c478bd9Sstevel@tonic-gate 47227c478bd9Sstevel@tonic-gate usable = 0; 47237c478bd9Sstevel@tonic-gate } else { 47247c478bd9Sstevel@tonic-gate int usable_r = tcp->tcp_swnd; 47257c478bd9Sstevel@tonic-gate 47267c478bd9Sstevel@tonic-gate /* 47277c478bd9Sstevel@tonic-gate * In the special case when cwnd is zero, which can only 47287c478bd9Sstevel@tonic-gate * happen if the connection is ECN capable, return now. 47297c478bd9Sstevel@tonic-gate * New segments is sent using tcp_timer(). The timer 47307c478bd9Sstevel@tonic-gate * is set in tcp_rput_data(). 47317c478bd9Sstevel@tonic-gate */ 47327c478bd9Sstevel@tonic-gate if (tcp->tcp_cwnd == 0) { 47337c478bd9Sstevel@tonic-gate /* 47347c478bd9Sstevel@tonic-gate * Note that tcp_cwnd is 0 before 3-way handshake is 47357c478bd9Sstevel@tonic-gate * finished. 47367c478bd9Sstevel@tonic-gate */ 47377c478bd9Sstevel@tonic-gate assert(tcp->tcp_ecn_ok || 47387c478bd9Sstevel@tonic-gate tcp->tcp_state < TCPS_ESTABLISHED); 47397c478bd9Sstevel@tonic-gate return; 47407c478bd9Sstevel@tonic-gate } 47417c478bd9Sstevel@tonic-gate 47427c478bd9Sstevel@tonic-gate /* usable = MIN(swnd, cwnd) - unacked_bytes */ 47437c478bd9Sstevel@tonic-gate if (usable_r > tcp->tcp_cwnd) 47447c478bd9Sstevel@tonic-gate usable_r = tcp->tcp_cwnd; 47457c478bd9Sstevel@tonic-gate 47467c478bd9Sstevel@tonic-gate /* NOTE: trouble if xmitting while SYN not acked? */ 47477c478bd9Sstevel@tonic-gate usable_r -= snxt; 47487c478bd9Sstevel@tonic-gate usable_r += tcp->tcp_suna; 47497c478bd9Sstevel@tonic-gate 47507c478bd9Sstevel@tonic-gate /* usable = MIN(usable, unsent) */ 47517c478bd9Sstevel@tonic-gate if (usable_r > len) 47527c478bd9Sstevel@tonic-gate usable_r = len; 47537c478bd9Sstevel@tonic-gate 47547c478bd9Sstevel@tonic-gate /* usable = MAX(usable, {1 for urgent, 0 for data}) */ 47557c478bd9Sstevel@tonic-gate if (usable_r != 0) 47567c478bd9Sstevel@tonic-gate usable = usable_r; 47577c478bd9Sstevel@tonic-gate } 47587c478bd9Sstevel@tonic-gate 4759*53391bafSeota /* use uintptr_t to suppress the gcc warning */ 4760*53391bafSeota local_time = (mblk_t *)(uintptr_t)prom_gettime(); 47617c478bd9Sstevel@tonic-gate 47627c478bd9Sstevel@tonic-gate /* 47637c478bd9Sstevel@tonic-gate * "Our" Nagle Algorithm. This is not the same as in the old 47647c478bd9Sstevel@tonic-gate * BSD. This is more in line with the true intent of Nagle. 47657c478bd9Sstevel@tonic-gate * 47667c478bd9Sstevel@tonic-gate * The conditions are: 47677c478bd9Sstevel@tonic-gate * 1. The amount of unsent data (or amount of data which can be 47687c478bd9Sstevel@tonic-gate * sent, whichever is smaller) is less than Nagle limit. 47697c478bd9Sstevel@tonic-gate * 2. The last sent size is also less than Nagle limit. 47707c478bd9Sstevel@tonic-gate * 3. There is unack'ed data. 47717c478bd9Sstevel@tonic-gate * 4. Urgent pointer is not set. Send urgent data ignoring the 47727c478bd9Sstevel@tonic-gate * Nagle algorithm. This reduces the probability that urgent 47737c478bd9Sstevel@tonic-gate * bytes get "merged" together. 47747c478bd9Sstevel@tonic-gate * 5. The app has not closed the connection. This eliminates the 47757c478bd9Sstevel@tonic-gate * wait time of the receiving side waiting for the last piece of 47767c478bd9Sstevel@tonic-gate * (small) data. 47777c478bd9Sstevel@tonic-gate * 47787c478bd9Sstevel@tonic-gate * If all are satisified, exit without sending anything. Note 47797c478bd9Sstevel@tonic-gate * that Nagle limit can be smaller than 1 MSS. Nagle limit is 47807c478bd9Sstevel@tonic-gate * the smaller of 1 MSS and global tcp_naglim_def (default to be 47817c478bd9Sstevel@tonic-gate * 4095). 47827c478bd9Sstevel@tonic-gate */ 47837c478bd9Sstevel@tonic-gate if (usable < (int)tcp->tcp_naglim && 47847c478bd9Sstevel@tonic-gate tcp->tcp_naglim > tcp->tcp_last_sent_len && 47857c478bd9Sstevel@tonic-gate snxt != tcp->tcp_suna && 47867c478bd9Sstevel@tonic-gate !(tcp->tcp_valid_bits & TCP_URG_VALID)) 47877c478bd9Sstevel@tonic-gate goto done; 47887c478bd9Sstevel@tonic-gate 47897c478bd9Sstevel@tonic-gate num_burst_seg = tcp->tcp_snd_burst; 47907c478bd9Sstevel@tonic-gate for (;;) { 47917c478bd9Sstevel@tonic-gate tcph_t *tcph; 47927c478bd9Sstevel@tonic-gate mblk_t *new_mp; 47937c478bd9Sstevel@tonic-gate 47947c478bd9Sstevel@tonic-gate if (num_burst_seg-- == 0) 47957c478bd9Sstevel@tonic-gate goto done; 47967c478bd9Sstevel@tonic-gate 47977c478bd9Sstevel@tonic-gate len = mss; 47987c478bd9Sstevel@tonic-gate if (len > usable) { 47997c478bd9Sstevel@tonic-gate len = usable; 48007c478bd9Sstevel@tonic-gate if (len <= 0) { 48017c478bd9Sstevel@tonic-gate /* Terminate the loop */ 48027c478bd9Sstevel@tonic-gate goto done; 48037c478bd9Sstevel@tonic-gate } 48047c478bd9Sstevel@tonic-gate /* 48057c478bd9Sstevel@tonic-gate * Sender silly-window avoidance. 48067c478bd9Sstevel@tonic-gate * Ignore this if we are going to send a 48077c478bd9Sstevel@tonic-gate * zero window probe out. 48087c478bd9Sstevel@tonic-gate * 48097c478bd9Sstevel@tonic-gate * TODO: force data into microscopic window ?? 48107c478bd9Sstevel@tonic-gate * ==> (!pushed || (unsent > usable)) 48117c478bd9Sstevel@tonic-gate */ 48127c478bd9Sstevel@tonic-gate if (len < (tcp->tcp_max_swnd >> 1) && 48137c478bd9Sstevel@tonic-gate (tcp->tcp_unsent - (snxt - tcp->tcp_snxt)) > len && 48147c478bd9Sstevel@tonic-gate !((tcp->tcp_valid_bits & TCP_URG_VALID) && 48157c478bd9Sstevel@tonic-gate len == 1) && (! tcp->tcp_zero_win_probe)) { 48167c478bd9Sstevel@tonic-gate /* 48177c478bd9Sstevel@tonic-gate * If the retransmit timer is not running 48187c478bd9Sstevel@tonic-gate * we start it so that we will retransmit 48197c478bd9Sstevel@tonic-gate * in the case when the the receiver has 48207c478bd9Sstevel@tonic-gate * decremented the window. 48217c478bd9Sstevel@tonic-gate */ 48227c478bd9Sstevel@tonic-gate if (snxt == tcp->tcp_snxt && 48237c478bd9Sstevel@tonic-gate snxt == tcp->tcp_suna) { 48247c478bd9Sstevel@tonic-gate /* 48257c478bd9Sstevel@tonic-gate * We are not supposed to send 48267c478bd9Sstevel@tonic-gate * anything. So let's wait a little 48277c478bd9Sstevel@tonic-gate * bit longer before breaking SWS 48287c478bd9Sstevel@tonic-gate * avoidance. 48297c478bd9Sstevel@tonic-gate * 48307c478bd9Sstevel@tonic-gate * What should the value be? 48317c478bd9Sstevel@tonic-gate * Suggestion: MAX(init rexmit time, 48327c478bd9Sstevel@tonic-gate * tcp->tcp_rto) 48337c478bd9Sstevel@tonic-gate */ 48347c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 48357c478bd9Sstevel@tonic-gate } 48367c478bd9Sstevel@tonic-gate goto done; 48377c478bd9Sstevel@tonic-gate } 48387c478bd9Sstevel@tonic-gate } 48397c478bd9Sstevel@tonic-gate 48407c478bd9Sstevel@tonic-gate tcph = tcp->tcp_tcph; 48417c478bd9Sstevel@tonic-gate 48427c478bd9Sstevel@tonic-gate usable -= len; /* Approximate - can be adjusted later */ 48437c478bd9Sstevel@tonic-gate if (usable > 0) 48447c478bd9Sstevel@tonic-gate tcph->th_flags[0] = TH_ACK; 48457c478bd9Sstevel@tonic-gate else 48467c478bd9Sstevel@tonic-gate tcph->th_flags[0] = (TH_ACK | TH_PUSH); 48477c478bd9Sstevel@tonic-gate 48487c478bd9Sstevel@tonic-gate U32_TO_ABE32(snxt, tcph->th_seq); 48497c478bd9Sstevel@tonic-gate 48507c478bd9Sstevel@tonic-gate if (tcp->tcp_valid_bits) { 48517c478bd9Sstevel@tonic-gate uchar_t *prev_rptr = xmit_tail->b_rptr; 48527c478bd9Sstevel@tonic-gate uint32_t prev_snxt = tcp->tcp_snxt; 48537c478bd9Sstevel@tonic-gate 48547c478bd9Sstevel@tonic-gate if (tail_unsent == 0) { 48557c478bd9Sstevel@tonic-gate assert(xmit_tail->b_cont != NULL); 48567c478bd9Sstevel@tonic-gate xmit_tail = xmit_tail->b_cont; 48577c478bd9Sstevel@tonic-gate prev_rptr = xmit_tail->b_rptr; 48587c478bd9Sstevel@tonic-gate tail_unsent = (int)(xmit_tail->b_wptr - 48597c478bd9Sstevel@tonic-gate xmit_tail->b_rptr); 48607c478bd9Sstevel@tonic-gate } else { 48617c478bd9Sstevel@tonic-gate xmit_tail->b_rptr = xmit_tail->b_wptr - 48627c478bd9Sstevel@tonic-gate tail_unsent; 48637c478bd9Sstevel@tonic-gate } 48647c478bd9Sstevel@tonic-gate mp = tcp_xmit_mp(tcp, xmit_tail, len, NULL, NULL, 48657c478bd9Sstevel@tonic-gate snxt, B_FALSE, (uint32_t *)&len, B_FALSE); 48667c478bd9Sstevel@tonic-gate /* Restore tcp_snxt so we get amount sent right. */ 48677c478bd9Sstevel@tonic-gate tcp->tcp_snxt = prev_snxt; 48687c478bd9Sstevel@tonic-gate if (prev_rptr == xmit_tail->b_rptr) 48697c478bd9Sstevel@tonic-gate xmit_tail->b_prev = local_time; 48707c478bd9Sstevel@tonic-gate else 48717c478bd9Sstevel@tonic-gate xmit_tail->b_rptr = prev_rptr; 48727c478bd9Sstevel@tonic-gate 48737c478bd9Sstevel@tonic-gate if (mp == NULL) 48747c478bd9Sstevel@tonic-gate break; 48757c478bd9Sstevel@tonic-gate 48767c478bd9Sstevel@tonic-gate mp1 = mp->b_cont; 48777c478bd9Sstevel@tonic-gate 48787c478bd9Sstevel@tonic-gate snxt += len; 48797c478bd9Sstevel@tonic-gate tcp->tcp_last_sent_len = (ushort_t)len; 48807c478bd9Sstevel@tonic-gate while (mp1->b_cont) { 48817c478bd9Sstevel@tonic-gate xmit_tail = xmit_tail->b_cont; 48827c478bd9Sstevel@tonic-gate xmit_tail->b_prev = local_time; 48837c478bd9Sstevel@tonic-gate mp1 = mp1->b_cont; 48847c478bd9Sstevel@tonic-gate } 48857c478bd9Sstevel@tonic-gate tail_unsent = xmit_tail->b_wptr - mp1->b_wptr; 48867c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutDataSegs); 48877c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpOutDataBytes, len); 48887c478bd9Sstevel@tonic-gate /* Dump the packet when debugging. */ 48897c478bd9Sstevel@tonic-gate TCP_DUMP_PACKET("tcp_wput_data (valid bits)", mp); 48907c478bd9Sstevel@tonic-gate (void) ipv4_tcp_output(sock_id, mp); 48917c478bd9Sstevel@tonic-gate freeb(mp); 48927c478bd9Sstevel@tonic-gate continue; 48937c478bd9Sstevel@tonic-gate } 48947c478bd9Sstevel@tonic-gate 48957c478bd9Sstevel@tonic-gate snxt += len; /* Adjust later if we don't send all of len */ 48967c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutDataSegs); 48977c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpOutDataBytes, len); 48987c478bd9Sstevel@tonic-gate 48997c478bd9Sstevel@tonic-gate if (tail_unsent) { 49007c478bd9Sstevel@tonic-gate /* Are the bytes above us in flight? */ 49017c478bd9Sstevel@tonic-gate rptr = xmit_tail->b_wptr - tail_unsent; 49027c478bd9Sstevel@tonic-gate if (rptr != xmit_tail->b_rptr) { 49037c478bd9Sstevel@tonic-gate tail_unsent -= len; 49047c478bd9Sstevel@tonic-gate len += tcp_hdr_len; 49057c478bd9Sstevel@tonic-gate tcp->tcp_ipha->ip_len = htons(len); 49067c478bd9Sstevel@tonic-gate mp = dupb(xmit_tail); 49077c478bd9Sstevel@tonic-gate if (!mp) 49087c478bd9Sstevel@tonic-gate break; 49097c478bd9Sstevel@tonic-gate mp->b_rptr = rptr; 49107c478bd9Sstevel@tonic-gate goto must_alloc; 49117c478bd9Sstevel@tonic-gate } 49127c478bd9Sstevel@tonic-gate } else { 49137c478bd9Sstevel@tonic-gate xmit_tail = xmit_tail->b_cont; 49147c478bd9Sstevel@tonic-gate assert((uintptr_t)(xmit_tail->b_wptr - 49157c478bd9Sstevel@tonic-gate xmit_tail->b_rptr) <= (uintptr_t)INT_MAX); 49167c478bd9Sstevel@tonic-gate tail_unsent = (int)(xmit_tail->b_wptr - 49177c478bd9Sstevel@tonic-gate xmit_tail->b_rptr); 49187c478bd9Sstevel@tonic-gate } 49197c478bd9Sstevel@tonic-gate 49207c478bd9Sstevel@tonic-gate tail_unsent -= len; 49217c478bd9Sstevel@tonic-gate tcp->tcp_last_sent_len = (ushort_t)len; 49227c478bd9Sstevel@tonic-gate 49237c478bd9Sstevel@tonic-gate len += tcp_hdr_len; 49247c478bd9Sstevel@tonic-gate if (tcp->tcp_ipversion == IPV4_VERSION) 49257c478bd9Sstevel@tonic-gate tcp->tcp_ipha->ip_len = htons(len); 49267c478bd9Sstevel@tonic-gate 49277c478bd9Sstevel@tonic-gate xmit_tail->b_prev = local_time; 49287c478bd9Sstevel@tonic-gate 49297c478bd9Sstevel@tonic-gate mp = dupb(xmit_tail); 49307c478bd9Sstevel@tonic-gate if (mp == NULL) 49317c478bd9Sstevel@tonic-gate goto out_of_mem; 49327c478bd9Sstevel@tonic-gate 49337c478bd9Sstevel@tonic-gate len = tcp_hdr_len; 49347c478bd9Sstevel@tonic-gate /* 49357c478bd9Sstevel@tonic-gate * There are four reasons to allocate a new hdr mblk: 49367c478bd9Sstevel@tonic-gate * 1) The bytes above us are in use by another packet 49377c478bd9Sstevel@tonic-gate * 2) We don't have good alignment 49387c478bd9Sstevel@tonic-gate * 3) The mblk is being shared 49397c478bd9Sstevel@tonic-gate * 4) We don't have enough room for a header 49407c478bd9Sstevel@tonic-gate */ 49417c478bd9Sstevel@tonic-gate rptr = mp->b_rptr - len; 49427c478bd9Sstevel@tonic-gate if (!OK_32PTR(rptr) || 49437c478bd9Sstevel@tonic-gate rptr < mp->b_datap) { 49447c478bd9Sstevel@tonic-gate /* NOTE: we assume allocb returns an OK_32PTR */ 49457c478bd9Sstevel@tonic-gate 49467c478bd9Sstevel@tonic-gate must_alloc:; 49477c478bd9Sstevel@tonic-gate mp1 = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 49487c478bd9Sstevel@tonic-gate tcp_wroff_xtra, 0); 49497c478bd9Sstevel@tonic-gate if (mp1 == NULL) { 49507c478bd9Sstevel@tonic-gate freemsg(mp); 49517c478bd9Sstevel@tonic-gate goto out_of_mem; 49527c478bd9Sstevel@tonic-gate } 49537c478bd9Sstevel@tonic-gate mp1->b_cont = mp; 49547c478bd9Sstevel@tonic-gate mp = mp1; 49557c478bd9Sstevel@tonic-gate /* Leave room for Link Level header */ 49567c478bd9Sstevel@tonic-gate len = tcp_hdr_len; 49577c478bd9Sstevel@tonic-gate rptr = &mp->b_rptr[tcp_wroff_xtra]; 49587c478bd9Sstevel@tonic-gate mp->b_wptr = &rptr[len]; 49597c478bd9Sstevel@tonic-gate } 49607c478bd9Sstevel@tonic-gate 49617c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ts_ok) { 4962*53391bafSeota /* use uintptr_t to suppress the gcc warning */ 4963*53391bafSeota U32_TO_BE32((uint32_t)(uintptr_t)local_time, 49647c478bd9Sstevel@tonic-gate (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 49657c478bd9Sstevel@tonic-gate U32_TO_BE32(tcp->tcp_ts_recent, 49667c478bd9Sstevel@tonic-gate (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 49677c478bd9Sstevel@tonic-gate } else { 49687c478bd9Sstevel@tonic-gate assert(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 49697c478bd9Sstevel@tonic-gate } 49707c478bd9Sstevel@tonic-gate 49717c478bd9Sstevel@tonic-gate mp->b_rptr = rptr; 49727c478bd9Sstevel@tonic-gate 49737c478bd9Sstevel@tonic-gate /* Copy the template header. */ 49747c478bd9Sstevel@tonic-gate dst = (ipaddr_t *)rptr; 49757c478bd9Sstevel@tonic-gate src = (ipaddr_t *)tcp->tcp_iphc; 49767c478bd9Sstevel@tonic-gate dst[0] = src[0]; 49777c478bd9Sstevel@tonic-gate dst[1] = src[1]; 49787c478bd9Sstevel@tonic-gate dst[2] = src[2]; 49797c478bd9Sstevel@tonic-gate dst[3] = src[3]; 49807c478bd9Sstevel@tonic-gate dst[4] = src[4]; 49817c478bd9Sstevel@tonic-gate dst[5] = src[5]; 49827c478bd9Sstevel@tonic-gate dst[6] = src[6]; 49837c478bd9Sstevel@tonic-gate dst[7] = src[7]; 49847c478bd9Sstevel@tonic-gate dst[8] = src[8]; 49857c478bd9Sstevel@tonic-gate dst[9] = src[9]; 49867c478bd9Sstevel@tonic-gate len = tcp->tcp_hdr_len; 49877c478bd9Sstevel@tonic-gate if (len -= 40) { 49887c478bd9Sstevel@tonic-gate len >>= 2; 49897c478bd9Sstevel@tonic-gate dst += 10; 49907c478bd9Sstevel@tonic-gate src += 10; 49917c478bd9Sstevel@tonic-gate do { 49927c478bd9Sstevel@tonic-gate *dst++ = *src++; 49937c478bd9Sstevel@tonic-gate } while (--len); 49947c478bd9Sstevel@tonic-gate } 49957c478bd9Sstevel@tonic-gate 49967c478bd9Sstevel@tonic-gate /* 49977c478bd9Sstevel@tonic-gate * Set tcph to point to the header of the outgoing packet, 49987c478bd9Sstevel@tonic-gate * not to the template header. 49997c478bd9Sstevel@tonic-gate */ 50007c478bd9Sstevel@tonic-gate tcph = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 50017c478bd9Sstevel@tonic-gate 50027c478bd9Sstevel@tonic-gate /* 50037c478bd9Sstevel@tonic-gate * Set the ECN info in the TCP header if it is not a zero 50047c478bd9Sstevel@tonic-gate * window probe. Zero window probe is only sent in 50057c478bd9Sstevel@tonic-gate * tcp_wput_data() and tcp_timer(). 50067c478bd9Sstevel@tonic-gate */ 50077c478bd9Sstevel@tonic-gate if (tcp->tcp_ecn_ok && !tcp->tcp_zero_win_probe) { 50087c478bd9Sstevel@tonic-gate SET_ECT(tcp, rptr); 50097c478bd9Sstevel@tonic-gate 50107c478bd9Sstevel@tonic-gate if (tcp->tcp_ecn_echo_on) 50117c478bd9Sstevel@tonic-gate tcph->th_flags[0] |= TH_ECE; 50127c478bd9Sstevel@tonic-gate if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 50137c478bd9Sstevel@tonic-gate tcph->th_flags[0] |= TH_CWR; 50147c478bd9Sstevel@tonic-gate tcp->tcp_ecn_cwr_sent = B_TRUE; 50157c478bd9Sstevel@tonic-gate } 50167c478bd9Sstevel@tonic-gate } 50177c478bd9Sstevel@tonic-gate 50187c478bd9Sstevel@tonic-gate /* Fill in SACK options */ 50197c478bd9Sstevel@tonic-gate if (num_sack_blk > 0) { 50207c478bd9Sstevel@tonic-gate uchar_t *wptr = rptr + tcp->tcp_hdr_len; 50217c478bd9Sstevel@tonic-gate sack_blk_t *tmp; 50227c478bd9Sstevel@tonic-gate int32_t i; 50237c478bd9Sstevel@tonic-gate 50247c478bd9Sstevel@tonic-gate wptr[0] = TCPOPT_NOP; 50257c478bd9Sstevel@tonic-gate wptr[1] = TCPOPT_NOP; 50267c478bd9Sstevel@tonic-gate wptr[2] = TCPOPT_SACK; 50277c478bd9Sstevel@tonic-gate wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 50287c478bd9Sstevel@tonic-gate sizeof (sack_blk_t); 50297c478bd9Sstevel@tonic-gate wptr += TCPOPT_REAL_SACK_LEN; 50307c478bd9Sstevel@tonic-gate 50317c478bd9Sstevel@tonic-gate tmp = tcp->tcp_sack_list; 50327c478bd9Sstevel@tonic-gate for (i = 0; i < num_sack_blk; i++) { 50337c478bd9Sstevel@tonic-gate U32_TO_BE32(tmp[i].begin, wptr); 50347c478bd9Sstevel@tonic-gate wptr += sizeof (tcp_seq); 50357c478bd9Sstevel@tonic-gate U32_TO_BE32(tmp[i].end, wptr); 50367c478bd9Sstevel@tonic-gate wptr += sizeof (tcp_seq); 50377c478bd9Sstevel@tonic-gate } 50387c478bd9Sstevel@tonic-gate tcph->th_offset_and_rsrvd[0] += ((num_sack_blk * 2 + 1) 50397c478bd9Sstevel@tonic-gate << 4); 50407c478bd9Sstevel@tonic-gate } 50417c478bd9Sstevel@tonic-gate 50427c478bd9Sstevel@tonic-gate if (tail_unsent) { 50437c478bd9Sstevel@tonic-gate mp1 = mp->b_cont; 50447c478bd9Sstevel@tonic-gate if (mp1 == NULL) 50457c478bd9Sstevel@tonic-gate mp1 = mp; 50467c478bd9Sstevel@tonic-gate /* 50477c478bd9Sstevel@tonic-gate * If we're a little short, tack on more mblks 50487c478bd9Sstevel@tonic-gate * as long as we don't need to split an mblk. 50497c478bd9Sstevel@tonic-gate */ 50507c478bd9Sstevel@tonic-gate while (tail_unsent < 0 && 50517c478bd9Sstevel@tonic-gate tail_unsent + (int)(xmit_tail->b_cont->b_wptr - 50527c478bd9Sstevel@tonic-gate xmit_tail->b_cont->b_rptr) <= 0) { 50537c478bd9Sstevel@tonic-gate xmit_tail = xmit_tail->b_cont; 50547c478bd9Sstevel@tonic-gate /* Stash for rtt use later */ 50557c478bd9Sstevel@tonic-gate xmit_tail->b_prev = local_time; 50567c478bd9Sstevel@tonic-gate mp1->b_cont = dupb(xmit_tail); 50577c478bd9Sstevel@tonic-gate mp1 = mp1->b_cont; 50587c478bd9Sstevel@tonic-gate assert((uintptr_t)(xmit_tail->b_wptr - 50597c478bd9Sstevel@tonic-gate xmit_tail->b_rptr) <= (uintptr_t)INT_MAX); 50607c478bd9Sstevel@tonic-gate tail_unsent += (int)(xmit_tail->b_wptr - 50617c478bd9Sstevel@tonic-gate xmit_tail->b_rptr); 50627c478bd9Sstevel@tonic-gate if (mp1 == NULL) { 50637c478bd9Sstevel@tonic-gate freemsg(mp); 50647c478bd9Sstevel@tonic-gate goto out_of_mem; 50657c478bd9Sstevel@tonic-gate } 50667c478bd9Sstevel@tonic-gate } 50677c478bd9Sstevel@tonic-gate /* Trim back any surplus on the last mblk */ 50687c478bd9Sstevel@tonic-gate if (tail_unsent > 0) 50697c478bd9Sstevel@tonic-gate mp1->b_wptr -= tail_unsent; 50707c478bd9Sstevel@tonic-gate if (tail_unsent < 0) { 50717c478bd9Sstevel@tonic-gate uint32_t ip_len; 50727c478bd9Sstevel@tonic-gate 50737c478bd9Sstevel@tonic-gate /* 50747c478bd9Sstevel@tonic-gate * We did not send everything we could in 50757c478bd9Sstevel@tonic-gate * order to preserve mblk boundaries. 50767c478bd9Sstevel@tonic-gate */ 50777c478bd9Sstevel@tonic-gate usable -= tail_unsent; 50787c478bd9Sstevel@tonic-gate snxt += tail_unsent; 50797c478bd9Sstevel@tonic-gate tcp->tcp_last_sent_len += tail_unsent; 50807c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpOutDataBytes, 50817c478bd9Sstevel@tonic-gate tail_unsent); 50827c478bd9Sstevel@tonic-gate /* Adjust the IP length field. */ 50837c478bd9Sstevel@tonic-gate ip_len = ntohs(((struct ip *)rptr)->ip_len) + 50847c478bd9Sstevel@tonic-gate tail_unsent; 50857c478bd9Sstevel@tonic-gate ((struct ip *)rptr)->ip_len = htons(ip_len); 50867c478bd9Sstevel@tonic-gate tail_unsent = 0; 50877c478bd9Sstevel@tonic-gate } 50887c478bd9Sstevel@tonic-gate } 50897c478bd9Sstevel@tonic-gate 50907c478bd9Sstevel@tonic-gate if (mp == NULL) 50917c478bd9Sstevel@tonic-gate goto out_of_mem; 50927c478bd9Sstevel@tonic-gate 50937c478bd9Sstevel@tonic-gate /* 50947c478bd9Sstevel@tonic-gate * Performance hit! We need to pullup the whole message 50957c478bd9Sstevel@tonic-gate * in order to do checksum and for the MAC output routine. 50967c478bd9Sstevel@tonic-gate */ 50977c478bd9Sstevel@tonic-gate if (mp->b_cont != NULL) { 50987c478bd9Sstevel@tonic-gate int mp_size; 50997c478bd9Sstevel@tonic-gate #ifdef DEBUG 51007c478bd9Sstevel@tonic-gate printf("Multiple mblk %d\n", msgdsize(mp)); 51017c478bd9Sstevel@tonic-gate #endif 51027c478bd9Sstevel@tonic-gate new_mp = allocb(msgdsize(mp) + tcp_wroff_xtra, 0); 51037c478bd9Sstevel@tonic-gate new_mp->b_rptr += tcp_wroff_xtra; 51047c478bd9Sstevel@tonic-gate new_mp->b_wptr = new_mp->b_rptr; 51057c478bd9Sstevel@tonic-gate while (mp != NULL) { 51067c478bd9Sstevel@tonic-gate mp_size = mp->b_wptr - mp->b_rptr; 51077c478bd9Sstevel@tonic-gate bcopy(mp->b_rptr, new_mp->b_wptr, mp_size); 51087c478bd9Sstevel@tonic-gate new_mp->b_wptr += mp_size; 51097c478bd9Sstevel@tonic-gate mp = mp->b_cont; 51107c478bd9Sstevel@tonic-gate } 51117c478bd9Sstevel@tonic-gate freemsg(mp); 51127c478bd9Sstevel@tonic-gate mp = new_mp; 51137c478bd9Sstevel@tonic-gate } 51147c478bd9Sstevel@tonic-gate tcp_set_cksum(mp); 51157c478bd9Sstevel@tonic-gate ((struct ip *)mp->b_rptr)->ip_ttl = (uint8_t)tcp_ipv4_ttl; 51167c478bd9Sstevel@tonic-gate TCP_DUMP_PACKET("tcp_wput_data", mp); 51177c478bd9Sstevel@tonic-gate (void) ipv4_tcp_output(sock_id, mp); 51187c478bd9Sstevel@tonic-gate freemsg(mp); 51197c478bd9Sstevel@tonic-gate } 51207c478bd9Sstevel@tonic-gate out_of_mem:; 51217c478bd9Sstevel@tonic-gate /* Pretend that all we were trying to send really got sent */ 51227c478bd9Sstevel@tonic-gate if (tail_unsent < 0) { 51237c478bd9Sstevel@tonic-gate do { 51247c478bd9Sstevel@tonic-gate xmit_tail = xmit_tail->b_cont; 51257c478bd9Sstevel@tonic-gate xmit_tail->b_prev = local_time; 51267c478bd9Sstevel@tonic-gate assert((uintptr_t)(xmit_tail->b_wptr - 51277c478bd9Sstevel@tonic-gate xmit_tail->b_rptr) <= (uintptr_t)INT_MAX); 51287c478bd9Sstevel@tonic-gate tail_unsent += (int)(xmit_tail->b_wptr - 51297c478bd9Sstevel@tonic-gate xmit_tail->b_rptr); 51307c478bd9Sstevel@tonic-gate } while (tail_unsent < 0); 51317c478bd9Sstevel@tonic-gate } 51327c478bd9Sstevel@tonic-gate done:; 51337c478bd9Sstevel@tonic-gate tcp->tcp_xmit_tail = xmit_tail; 51347c478bd9Sstevel@tonic-gate tcp->tcp_xmit_tail_unsent = tail_unsent; 51357c478bd9Sstevel@tonic-gate len = tcp->tcp_snxt - snxt; 51367c478bd9Sstevel@tonic-gate if (len) { 51377c478bd9Sstevel@tonic-gate /* 51387c478bd9Sstevel@tonic-gate * If new data was sent, need to update the notsack 51397c478bd9Sstevel@tonic-gate * list, which is, afterall, data blocks that have 51407c478bd9Sstevel@tonic-gate * not been sack'ed by the receiver. New data is 51417c478bd9Sstevel@tonic-gate * not sack'ed. 51427c478bd9Sstevel@tonic-gate */ 51437c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 51447c478bd9Sstevel@tonic-gate /* len is a negative value. */ 51457c478bd9Sstevel@tonic-gate tcp->tcp_pipe -= len; 51467c478bd9Sstevel@tonic-gate tcp_notsack_update(&(tcp->tcp_notsack_list), 51477c478bd9Sstevel@tonic-gate tcp->tcp_snxt, snxt, 51487c478bd9Sstevel@tonic-gate &(tcp->tcp_num_notsack_blk), 51497c478bd9Sstevel@tonic-gate &(tcp->tcp_cnt_notsack_list)); 51507c478bd9Sstevel@tonic-gate } 51517c478bd9Sstevel@tonic-gate tcp->tcp_snxt = snxt + tcp->tcp_fin_sent; 51527c478bd9Sstevel@tonic-gate tcp->tcp_rack = tcp->tcp_rnxt; 51537c478bd9Sstevel@tonic-gate tcp->tcp_rack_cnt = 0; 51547c478bd9Sstevel@tonic-gate if ((snxt + len) == tcp->tcp_suna) { 51557c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 51567c478bd9Sstevel@tonic-gate } 51577c478bd9Sstevel@tonic-gate /* 51587c478bd9Sstevel@tonic-gate * Note that len is the amount we just sent but with a negative 51597c478bd9Sstevel@tonic-gate * sign. We update tcp_unsent here since we may come back to 51607c478bd9Sstevel@tonic-gate * tcp_wput_data from tcp_state_wait. 51617c478bd9Sstevel@tonic-gate */ 51627c478bd9Sstevel@tonic-gate len += tcp->tcp_unsent; 51637c478bd9Sstevel@tonic-gate tcp->tcp_unsent = len; 51647c478bd9Sstevel@tonic-gate 51657c478bd9Sstevel@tonic-gate /* 51667c478bd9Sstevel@tonic-gate * Let's wait till all the segments have been acked, since we 51677c478bd9Sstevel@tonic-gate * don't have a timer. 51687c478bd9Sstevel@tonic-gate */ 51697c478bd9Sstevel@tonic-gate (void) tcp_state_wait(sock_id, tcp, TCPS_ALL_ACKED); 51707c478bd9Sstevel@tonic-gate return; 51717c478bd9Sstevel@tonic-gate } else if (snxt == tcp->tcp_suna && tcp->tcp_swnd == 0) { 51727c478bd9Sstevel@tonic-gate /* 51737c478bd9Sstevel@tonic-gate * Didn't send anything. Make sure the timer is running 51747c478bd9Sstevel@tonic-gate * so that we will probe a zero window. 51757c478bd9Sstevel@tonic-gate */ 51767c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 51777c478bd9Sstevel@tonic-gate } 51787c478bd9Sstevel@tonic-gate 51797c478bd9Sstevel@tonic-gate /* Note that len is the amount we just sent but with a negative sign */ 51807c478bd9Sstevel@tonic-gate len += tcp->tcp_unsent; 51817c478bd9Sstevel@tonic-gate tcp->tcp_unsent = len; 51827c478bd9Sstevel@tonic-gate 51837c478bd9Sstevel@tonic-gate } 51847c478bd9Sstevel@tonic-gate 51857c478bd9Sstevel@tonic-gate static void 51867c478bd9Sstevel@tonic-gate tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, 51877c478bd9Sstevel@tonic-gate uint32_t seg_seq, uint32_t seg_ack, int seg_len, tcph_t *tcph, 51887c478bd9Sstevel@tonic-gate int sock_id) 51897c478bd9Sstevel@tonic-gate { 51907c478bd9Sstevel@tonic-gate int32_t bytes_acked; 51917c478bd9Sstevel@tonic-gate int32_t gap; 51927c478bd9Sstevel@tonic-gate int32_t rgap; 51937c478bd9Sstevel@tonic-gate tcp_opt_t tcpopt; 51947c478bd9Sstevel@tonic-gate uint_t flags; 51957c478bd9Sstevel@tonic-gate uint32_t new_swnd = 0; 51967c478bd9Sstevel@tonic-gate 51977c478bd9Sstevel@tonic-gate #ifdef DEBUG 51987c478bd9Sstevel@tonic-gate printf("Time wait processing called ###############3\n"); 51997c478bd9Sstevel@tonic-gate #endif 52007c478bd9Sstevel@tonic-gate 52017c478bd9Sstevel@tonic-gate /* Just make sure we send the right sock_id to tcp_clean_death */ 52027c478bd9Sstevel@tonic-gate if ((sockets[sock_id].pcb == NULL) || (sockets[sock_id].pcb != tcp)) 52037c478bd9Sstevel@tonic-gate sock_id = -1; 52047c478bd9Sstevel@tonic-gate 52057c478bd9Sstevel@tonic-gate flags = (unsigned int)tcph->th_flags[0] & 0xFF; 52067c478bd9Sstevel@tonic-gate new_swnd = BE16_TO_U16(tcph->th_win) << 52077c478bd9Sstevel@tonic-gate ((tcph->th_flags[0] & TH_SYN) ? 0 : tcp->tcp_snd_ws); 52087c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ts_ok) { 52097c478bd9Sstevel@tonic-gate if (!tcp_paws_check(tcp, tcph, &tcpopt)) { 52107c478bd9Sstevel@tonic-gate freemsg(mp); 52117c478bd9Sstevel@tonic-gate tcp_xmit_ctl(NULL, tcp, NULL, tcp->tcp_snxt, 52127c478bd9Sstevel@tonic-gate tcp->tcp_rnxt, TH_ACK, 0, -1); 52137c478bd9Sstevel@tonic-gate return; 52147c478bd9Sstevel@tonic-gate } 52157c478bd9Sstevel@tonic-gate } 52167c478bd9Sstevel@tonic-gate gap = seg_seq - tcp->tcp_rnxt; 52177c478bd9Sstevel@tonic-gate rgap = tcp->tcp_rwnd - (gap + seg_len); 52187c478bd9Sstevel@tonic-gate if (gap < 0) { 52197c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInDataDupSegs); 52207c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpInDataDupBytes, 52217c478bd9Sstevel@tonic-gate (seg_len > -gap ? -gap : seg_len)); 52227c478bd9Sstevel@tonic-gate seg_len += gap; 52237c478bd9Sstevel@tonic-gate if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 52247c478bd9Sstevel@tonic-gate if (flags & TH_RST) { 52257c478bd9Sstevel@tonic-gate freemsg(mp); 52267c478bd9Sstevel@tonic-gate return; 52277c478bd9Sstevel@tonic-gate } 52287c478bd9Sstevel@tonic-gate if ((flags & TH_FIN) && seg_len == -1) { 52297c478bd9Sstevel@tonic-gate /* 52307c478bd9Sstevel@tonic-gate * When TCP receives a duplicate FIN in 52317c478bd9Sstevel@tonic-gate * TIME_WAIT state, restart the 2 MSL timer. 52327c478bd9Sstevel@tonic-gate * See page 73 in RFC 793. Make sure this TCP 52337c478bd9Sstevel@tonic-gate * is already on the TIME_WAIT list. If not, 52347c478bd9Sstevel@tonic-gate * just restart the timer. 52357c478bd9Sstevel@tonic-gate */ 52367c478bd9Sstevel@tonic-gate tcp_time_wait_remove(tcp); 52377c478bd9Sstevel@tonic-gate tcp_time_wait_append(tcp); 52387c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp_time_wait_interval); 52397c478bd9Sstevel@tonic-gate tcp_xmit_ctl(NULL, tcp, NULL, tcp->tcp_snxt, 52407c478bd9Sstevel@tonic-gate tcp->tcp_rnxt, TH_ACK, 0, -1); 52417c478bd9Sstevel@tonic-gate freemsg(mp); 52427c478bd9Sstevel@tonic-gate return; 52437c478bd9Sstevel@tonic-gate } 52447c478bd9Sstevel@tonic-gate flags |= TH_ACK_NEEDED; 52457c478bd9Sstevel@tonic-gate seg_len = 0; 52467c478bd9Sstevel@tonic-gate goto process_ack; 52477c478bd9Sstevel@tonic-gate } 52487c478bd9Sstevel@tonic-gate 52497c478bd9Sstevel@tonic-gate /* Fix seg_seq, and chew the gap off the front. */ 52507c478bd9Sstevel@tonic-gate seg_seq = tcp->tcp_rnxt; 52517c478bd9Sstevel@tonic-gate } 52527c478bd9Sstevel@tonic-gate 52537c478bd9Sstevel@tonic-gate if ((flags & TH_SYN) && gap > 0 && rgap < 0) { 52547c478bd9Sstevel@tonic-gate /* 52557c478bd9Sstevel@tonic-gate * Make sure that when we accept the connection, pick 52567c478bd9Sstevel@tonic-gate * an ISS greater than (tcp_snxt + ISS_INCR/2) for the 52577c478bd9Sstevel@tonic-gate * old connection. 52587c478bd9Sstevel@tonic-gate * 52597c478bd9Sstevel@tonic-gate * The next ISS generated is equal to tcp_iss_incr_extra 52607c478bd9Sstevel@tonic-gate * + ISS_INCR/2 + other components depending on the 52617c478bd9Sstevel@tonic-gate * value of tcp_strong_iss. We pre-calculate the new 52627c478bd9Sstevel@tonic-gate * ISS here and compare with tcp_snxt to determine if 52637c478bd9Sstevel@tonic-gate * we need to make adjustment to tcp_iss_incr_extra. 52647c478bd9Sstevel@tonic-gate * 52657c478bd9Sstevel@tonic-gate * Note that since we are now in the global queue 52667c478bd9Sstevel@tonic-gate * perimeter and need to do a lateral_put() to the 52677c478bd9Sstevel@tonic-gate * listener queue, there can be other connection requests/ 52687c478bd9Sstevel@tonic-gate * attempts while the lateral_put() is going on. That 52697c478bd9Sstevel@tonic-gate * means what we calculate here may not be correct. This 52707c478bd9Sstevel@tonic-gate * is extremely difficult to solve unless TCP and IP 52717c478bd9Sstevel@tonic-gate * modules are merged and there is no perimeter, but just 52727c478bd9Sstevel@tonic-gate * locks. The above calculation is ugly and is a 52737c478bd9Sstevel@tonic-gate * waste of CPU cycles... 52747c478bd9Sstevel@tonic-gate */ 52757c478bd9Sstevel@tonic-gate uint32_t new_iss = tcp_iss_incr_extra; 52767c478bd9Sstevel@tonic-gate int32_t adj; 52777c478bd9Sstevel@tonic-gate 52787c478bd9Sstevel@tonic-gate /* Add time component and min random (i.e. 1). */ 52797c478bd9Sstevel@tonic-gate new_iss += (prom_gettime() >> ISS_NSEC_SHT) + 1; 52807c478bd9Sstevel@tonic-gate if ((adj = (int32_t)(tcp->tcp_snxt - new_iss)) > 0) { 52817c478bd9Sstevel@tonic-gate /* 52827c478bd9Sstevel@tonic-gate * New ISS not guaranteed to be ISS_INCR/2 52837c478bd9Sstevel@tonic-gate * ahead of the current tcp_snxt, so add the 52847c478bd9Sstevel@tonic-gate * difference to tcp_iss_incr_extra. 52857c478bd9Sstevel@tonic-gate */ 52867c478bd9Sstevel@tonic-gate tcp_iss_incr_extra += adj; 52877c478bd9Sstevel@tonic-gate } 52887c478bd9Sstevel@tonic-gate tcp_clean_death(sock_id, tcp, 0); 52897c478bd9Sstevel@tonic-gate 52907c478bd9Sstevel@tonic-gate /* 52917c478bd9Sstevel@tonic-gate * This is a passive open. Right now we do not 52927c478bd9Sstevel@tonic-gate * do anything... 52937c478bd9Sstevel@tonic-gate */ 52947c478bd9Sstevel@tonic-gate freemsg(mp); 52957c478bd9Sstevel@tonic-gate return; 52967c478bd9Sstevel@tonic-gate } 52977c478bd9Sstevel@tonic-gate 52987c478bd9Sstevel@tonic-gate /* 52997c478bd9Sstevel@tonic-gate * rgap is the amount of stuff received out of window. A negative 53007c478bd9Sstevel@tonic-gate * value is the amount out of window. 53017c478bd9Sstevel@tonic-gate */ 53027c478bd9Sstevel@tonic-gate if (rgap < 0) { 53037c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInDataPastWinSegs); 53047c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpInDataPastWinBytes, -rgap); 53057c478bd9Sstevel@tonic-gate /* Fix seg_len and make sure there is something left. */ 53067c478bd9Sstevel@tonic-gate seg_len += rgap; 53077c478bd9Sstevel@tonic-gate if (seg_len <= 0) { 53087c478bd9Sstevel@tonic-gate if (flags & TH_RST) { 53097c478bd9Sstevel@tonic-gate freemsg(mp); 53107c478bd9Sstevel@tonic-gate return; 53117c478bd9Sstevel@tonic-gate } 53127c478bd9Sstevel@tonic-gate flags |= TH_ACK_NEEDED; 53137c478bd9Sstevel@tonic-gate seg_len = 0; 53147c478bd9Sstevel@tonic-gate goto process_ack; 53157c478bd9Sstevel@tonic-gate } 53167c478bd9Sstevel@tonic-gate } 53177c478bd9Sstevel@tonic-gate /* 53187c478bd9Sstevel@tonic-gate * Check whether we can update tcp_ts_recent. This test is 53197c478bd9Sstevel@tonic-gate * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 53207c478bd9Sstevel@tonic-gate * Extensions for High Performance: An Update", Internet Draft. 53217c478bd9Sstevel@tonic-gate */ 53227c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ts_ok && 53237c478bd9Sstevel@tonic-gate TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 53247c478bd9Sstevel@tonic-gate SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 53257c478bd9Sstevel@tonic-gate tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 53267c478bd9Sstevel@tonic-gate tcp->tcp_last_rcv_lbolt = prom_gettime(); 53277c478bd9Sstevel@tonic-gate } 53287c478bd9Sstevel@tonic-gate 53297c478bd9Sstevel@tonic-gate if (seg_seq != tcp->tcp_rnxt && seg_len > 0) { 53307c478bd9Sstevel@tonic-gate /* Always ack out of order packets */ 53317c478bd9Sstevel@tonic-gate flags |= TH_ACK_NEEDED; 53327c478bd9Sstevel@tonic-gate seg_len = 0; 53337c478bd9Sstevel@tonic-gate } else if (seg_len > 0) { 53347c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInDataInorderSegs); 53357c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpInDataInorderBytes, seg_len); 53367c478bd9Sstevel@tonic-gate } 53377c478bd9Sstevel@tonic-gate if (flags & TH_RST) { 53387c478bd9Sstevel@tonic-gate freemsg(mp); 53397c478bd9Sstevel@tonic-gate (void) tcp_clean_death(sock_id, tcp, 0); 53407c478bd9Sstevel@tonic-gate return; 53417c478bd9Sstevel@tonic-gate } 53427c478bd9Sstevel@tonic-gate if (flags & TH_SYN) { 53437c478bd9Sstevel@tonic-gate freemsg(mp); 53447c478bd9Sstevel@tonic-gate tcp_xmit_ctl("TH_SYN", tcp, NULL, seg_ack, seg_seq + 1, 53457c478bd9Sstevel@tonic-gate TH_RST|TH_ACK, 0, -1); 53467c478bd9Sstevel@tonic-gate /* 53477c478bd9Sstevel@tonic-gate * Do not delete the TCP structure if it is in 53487c478bd9Sstevel@tonic-gate * TIME_WAIT state. Refer to RFC 1122, 4.2.2.13. 53497c478bd9Sstevel@tonic-gate */ 53507c478bd9Sstevel@tonic-gate return; 53517c478bd9Sstevel@tonic-gate } 53527c478bd9Sstevel@tonic-gate process_ack: 53537c478bd9Sstevel@tonic-gate if (flags & TH_ACK) { 53547c478bd9Sstevel@tonic-gate bytes_acked = (int)(seg_ack - tcp->tcp_suna); 53557c478bd9Sstevel@tonic-gate if (bytes_acked <= 0) { 53567c478bd9Sstevel@tonic-gate if (bytes_acked == 0 && seg_len == 0 && 53577c478bd9Sstevel@tonic-gate new_swnd == tcp->tcp_swnd) 53587c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInDupAck); 53597c478bd9Sstevel@tonic-gate } else { 53607c478bd9Sstevel@tonic-gate /* Acks something not sent */ 53617c478bd9Sstevel@tonic-gate flags |= TH_ACK_NEEDED; 53627c478bd9Sstevel@tonic-gate } 53637c478bd9Sstevel@tonic-gate } 53647c478bd9Sstevel@tonic-gate freemsg(mp); 53657c478bd9Sstevel@tonic-gate if (flags & TH_ACK_NEEDED) { 53667c478bd9Sstevel@tonic-gate /* 53677c478bd9Sstevel@tonic-gate * Time to send an ack for some reason. 53687c478bd9Sstevel@tonic-gate */ 53697c478bd9Sstevel@tonic-gate tcp_xmit_ctl(NULL, tcp, NULL, tcp->tcp_snxt, 53707c478bd9Sstevel@tonic-gate tcp->tcp_rnxt, TH_ACK, 0, -1); 53717c478bd9Sstevel@tonic-gate } 53727c478bd9Sstevel@tonic-gate } 53737c478bd9Sstevel@tonic-gate 53747c478bd9Sstevel@tonic-gate static int 53757c478bd9Sstevel@tonic-gate tcp_init_values(tcp_t *tcp, struct inetboot_socket *isp) 53767c478bd9Sstevel@tonic-gate { 53777c478bd9Sstevel@tonic-gate int err; 53787c478bd9Sstevel@tonic-gate 53797c478bd9Sstevel@tonic-gate tcp->tcp_family = AF_INET; 53807c478bd9Sstevel@tonic-gate tcp->tcp_ipversion = IPV4_VERSION; 53817c478bd9Sstevel@tonic-gate 53827c478bd9Sstevel@tonic-gate /* 53837c478bd9Sstevel@tonic-gate * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO 53847c478bd9Sstevel@tonic-gate * will be close to tcp_rexmit_interval_initial. By doing this, we 53857c478bd9Sstevel@tonic-gate * allow the algorithm to adjust slowly to large fluctuations of RTT 53867c478bd9Sstevel@tonic-gate * during first few transmissions of a connection as seen in slow 53877c478bd9Sstevel@tonic-gate * links. 53887c478bd9Sstevel@tonic-gate */ 53897c478bd9Sstevel@tonic-gate tcp->tcp_rtt_sa = tcp_rexmit_interval_initial << 2; 53907c478bd9Sstevel@tonic-gate tcp->tcp_rtt_sd = tcp_rexmit_interval_initial >> 1; 53917c478bd9Sstevel@tonic-gate tcp->tcp_rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 53927c478bd9Sstevel@tonic-gate tcp_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5) + 53937c478bd9Sstevel@tonic-gate tcp_conn_grace_period; 53947c478bd9Sstevel@tonic-gate if (tcp->tcp_rto < tcp_rexmit_interval_min) 53957c478bd9Sstevel@tonic-gate tcp->tcp_rto = tcp_rexmit_interval_min; 53967c478bd9Sstevel@tonic-gate tcp->tcp_timer_backoff = 0; 53977c478bd9Sstevel@tonic-gate tcp->tcp_ms_we_have_waited = 0; 53987c478bd9Sstevel@tonic-gate tcp->tcp_last_recv_time = prom_gettime(); 53997c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_max = tcp_cwnd_max_; 54007c478bd9Sstevel@tonic-gate tcp->tcp_snd_burst = TCP_CWND_INFINITE; 54017c478bd9Sstevel@tonic-gate tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN; 54027c478bd9Sstevel@tonic-gate /* For Ethernet, the mtu returned is actually 1550... */ 54037c478bd9Sstevel@tonic-gate if (mac_get_type() == IFT_ETHER) { 54047c478bd9Sstevel@tonic-gate tcp->tcp_if_mtu = mac_get_mtu() - 50; 54057c478bd9Sstevel@tonic-gate } else { 54067c478bd9Sstevel@tonic-gate tcp->tcp_if_mtu = mac_get_mtu(); 54077c478bd9Sstevel@tonic-gate } 54087c478bd9Sstevel@tonic-gate tcp->tcp_mss = tcp->tcp_if_mtu; 54097c478bd9Sstevel@tonic-gate 54107c478bd9Sstevel@tonic-gate tcp->tcp_first_timer_threshold = tcp_ip_notify_interval; 54117c478bd9Sstevel@tonic-gate tcp->tcp_first_ctimer_threshold = tcp_ip_notify_cinterval; 54127c478bd9Sstevel@tonic-gate tcp->tcp_second_timer_threshold = tcp_ip_abort_interval; 54137c478bd9Sstevel@tonic-gate /* 54147c478bd9Sstevel@tonic-gate * Fix it to tcp_ip_abort_linterval later if it turns out to be a 54157c478bd9Sstevel@tonic-gate * passive open. 54167c478bd9Sstevel@tonic-gate */ 54177c478bd9Sstevel@tonic-gate tcp->tcp_second_ctimer_threshold = tcp_ip_abort_cinterval; 54187c478bd9Sstevel@tonic-gate 54197c478bd9Sstevel@tonic-gate tcp->tcp_naglim = tcp_naglim_def; 54207c478bd9Sstevel@tonic-gate 54217c478bd9Sstevel@tonic-gate /* NOTE: ISS is now set in tcp_adapt_ire(). */ 54227c478bd9Sstevel@tonic-gate 54237c478bd9Sstevel@tonic-gate /* Initialize the header template */ 54247c478bd9Sstevel@tonic-gate if (tcp->tcp_ipversion == IPV4_VERSION) { 54257c478bd9Sstevel@tonic-gate err = tcp_header_init_ipv4(tcp); 54267c478bd9Sstevel@tonic-gate } 54277c478bd9Sstevel@tonic-gate if (err) 54287c478bd9Sstevel@tonic-gate return (err); 54297c478bd9Sstevel@tonic-gate 54307c478bd9Sstevel@tonic-gate /* 54317c478bd9Sstevel@tonic-gate * Init the window scale to the max so tcp_rwnd_set() won't pare 54327c478bd9Sstevel@tonic-gate * down tcp_rwnd. tcp_adapt_ire() will set the right value later. 54337c478bd9Sstevel@tonic-gate */ 54347c478bd9Sstevel@tonic-gate tcp->tcp_rcv_ws = TCP_MAX_WINSHIFT; 54357c478bd9Sstevel@tonic-gate tcp->tcp_xmit_lowater = tcp_xmit_lowat; 54367c478bd9Sstevel@tonic-gate if (isp != NULL) { 54377c478bd9Sstevel@tonic-gate tcp->tcp_xmit_hiwater = isp->so_sndbuf; 54387c478bd9Sstevel@tonic-gate tcp->tcp_rwnd = isp->so_rcvbuf; 54397c478bd9Sstevel@tonic-gate tcp->tcp_rwnd_max = isp->so_rcvbuf; 54407c478bd9Sstevel@tonic-gate } 54417c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_IDLE; 54427c478bd9Sstevel@tonic-gate return (0); 54437c478bd9Sstevel@tonic-gate } 54447c478bd9Sstevel@tonic-gate 54457c478bd9Sstevel@tonic-gate /* 54467c478bd9Sstevel@tonic-gate * Initialize the IPv4 header. Loses any record of any IP options. 54477c478bd9Sstevel@tonic-gate */ 54487c478bd9Sstevel@tonic-gate static int 54497c478bd9Sstevel@tonic-gate tcp_header_init_ipv4(tcp_t *tcp) 54507c478bd9Sstevel@tonic-gate { 54517c478bd9Sstevel@tonic-gate tcph_t *tcph; 54527c478bd9Sstevel@tonic-gate 54537c478bd9Sstevel@tonic-gate /* 54547c478bd9Sstevel@tonic-gate * This is a simple initialization. If there's 54557c478bd9Sstevel@tonic-gate * already a template, it should never be too small, 54567c478bd9Sstevel@tonic-gate * so reuse it. Otherwise, allocate space for the new one. 54577c478bd9Sstevel@tonic-gate */ 54587c478bd9Sstevel@tonic-gate if (tcp->tcp_iphc != NULL) { 54597c478bd9Sstevel@tonic-gate assert(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 54607c478bd9Sstevel@tonic-gate bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 54617c478bd9Sstevel@tonic-gate } else { 54627c478bd9Sstevel@tonic-gate tcp->tcp_iphc_len = TCP_MAX_COMBINED_HEADER_LENGTH; 54637c478bd9Sstevel@tonic-gate tcp->tcp_iphc = bkmem_zalloc(tcp->tcp_iphc_len); 54647c478bd9Sstevel@tonic-gate if (tcp->tcp_iphc == NULL) { 54657c478bd9Sstevel@tonic-gate tcp->tcp_iphc_len = 0; 54667c478bd9Sstevel@tonic-gate return (ENOMEM); 54677c478bd9Sstevel@tonic-gate } 54687c478bd9Sstevel@tonic-gate } 54697c478bd9Sstevel@tonic-gate tcp->tcp_ipha = (struct ip *)tcp->tcp_iphc; 54707c478bd9Sstevel@tonic-gate tcp->tcp_ipversion = IPV4_VERSION; 54717c478bd9Sstevel@tonic-gate 54727c478bd9Sstevel@tonic-gate /* 54737c478bd9Sstevel@tonic-gate * Note that it does not include TCP options yet. It will 54747c478bd9Sstevel@tonic-gate * after the connection is established. 54757c478bd9Sstevel@tonic-gate */ 54767c478bd9Sstevel@tonic-gate tcp->tcp_hdr_len = sizeof (struct ip) + sizeof (tcph_t); 54777c478bd9Sstevel@tonic-gate tcp->tcp_tcp_hdr_len = sizeof (tcph_t); 54787c478bd9Sstevel@tonic-gate tcp->tcp_ip_hdr_len = sizeof (struct ip); 54797c478bd9Sstevel@tonic-gate tcp->tcp_ipha->ip_v = IP_VERSION; 54807c478bd9Sstevel@tonic-gate /* We don't support IP options... */ 54817c478bd9Sstevel@tonic-gate tcp->tcp_ipha->ip_hl = IP_SIMPLE_HDR_LENGTH_IN_WORDS; 54827c478bd9Sstevel@tonic-gate tcp->tcp_ipha->ip_p = IPPROTO_TCP; 54837c478bd9Sstevel@tonic-gate /* We are not supposed to do PMTU discovery... */ 54847c478bd9Sstevel@tonic-gate tcp->tcp_ipha->ip_sum = 0; 54857c478bd9Sstevel@tonic-gate 54867c478bd9Sstevel@tonic-gate tcph = (tcph_t *)(tcp->tcp_iphc + sizeof (struct ip)); 54877c478bd9Sstevel@tonic-gate tcp->tcp_tcph = tcph; 54887c478bd9Sstevel@tonic-gate tcph->th_offset_and_rsrvd[0] = (5 << 4); 54897c478bd9Sstevel@tonic-gate return (0); 54907c478bd9Sstevel@tonic-gate } 54917c478bd9Sstevel@tonic-gate 54927c478bd9Sstevel@tonic-gate /* 54937c478bd9Sstevel@tonic-gate * Send out a control packet on the tcp connection specified. This routine 54947c478bd9Sstevel@tonic-gate * is typically called where we need a simple ACK or RST generated. 54957c478bd9Sstevel@tonic-gate * 54967c478bd9Sstevel@tonic-gate * This function is called with or without a mp. 54977c478bd9Sstevel@tonic-gate */ 54987c478bd9Sstevel@tonic-gate static void 54997c478bd9Sstevel@tonic-gate tcp_xmit_ctl(char *str, tcp_t *tcp, mblk_t *mp, uint32_t seq, 55007c478bd9Sstevel@tonic-gate uint32_t ack, int ctl, uint_t ip_hdr_len, int sock_id) 55017c478bd9Sstevel@tonic-gate { 55027c478bd9Sstevel@tonic-gate uchar_t *rptr; 55037c478bd9Sstevel@tonic-gate tcph_t *tcph; 55047c478bd9Sstevel@tonic-gate struct ip *iph = NULL; 55057c478bd9Sstevel@tonic-gate int tcp_hdr_len; 55067c478bd9Sstevel@tonic-gate int tcp_ip_hdr_len; 55077c478bd9Sstevel@tonic-gate 55087c478bd9Sstevel@tonic-gate tcp_hdr_len = tcp->tcp_hdr_len; 55097c478bd9Sstevel@tonic-gate tcp_ip_hdr_len = tcp->tcp_ip_hdr_len; 55107c478bd9Sstevel@tonic-gate 55117c478bd9Sstevel@tonic-gate if (mp) { 55127c478bd9Sstevel@tonic-gate assert(ip_hdr_len != 0); 55137c478bd9Sstevel@tonic-gate rptr = mp->b_rptr; 55147c478bd9Sstevel@tonic-gate tcph = (tcph_t *)(rptr + ip_hdr_len); 55157c478bd9Sstevel@tonic-gate /* Don't reply to a RST segment. */ 55167c478bd9Sstevel@tonic-gate if (tcph->th_flags[0] & TH_RST) { 55177c478bd9Sstevel@tonic-gate freeb(mp); 55187c478bd9Sstevel@tonic-gate return; 55197c478bd9Sstevel@tonic-gate } 55207c478bd9Sstevel@tonic-gate freemsg(mp); 55217c478bd9Sstevel@tonic-gate rptr = NULL; 55227c478bd9Sstevel@tonic-gate } else { 55237c478bd9Sstevel@tonic-gate assert(ip_hdr_len == 0); 55247c478bd9Sstevel@tonic-gate } 55257c478bd9Sstevel@tonic-gate /* If a text string is passed in with the request, print it out. */ 55267c478bd9Sstevel@tonic-gate if (str != NULL) { 55277c478bd9Sstevel@tonic-gate dprintf("tcp_xmit_ctl(%d): '%s', seq 0x%x, ack 0x%x, " 55287c478bd9Sstevel@tonic-gate "ctl 0x%x\n", sock_id, str, seq, ack, ctl); 55297c478bd9Sstevel@tonic-gate } 55307c478bd9Sstevel@tonic-gate mp = allocb(tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + tcp_wroff_xtra, 0); 55317c478bd9Sstevel@tonic-gate if (mp == NULL) { 55327c478bd9Sstevel@tonic-gate dprintf("tcp_xmit_ctl(%d): Cannot allocate memory\n", sock_id); 55337c478bd9Sstevel@tonic-gate return; 55347c478bd9Sstevel@tonic-gate } 55357c478bd9Sstevel@tonic-gate rptr = &mp->b_rptr[tcp_wroff_xtra]; 55367c478bd9Sstevel@tonic-gate mp->b_rptr = rptr; 55377c478bd9Sstevel@tonic-gate mp->b_wptr = &rptr[tcp_hdr_len]; 55387c478bd9Sstevel@tonic-gate bcopy(tcp->tcp_iphc, rptr, tcp_hdr_len); 55397c478bd9Sstevel@tonic-gate 55407c478bd9Sstevel@tonic-gate iph = (struct ip *)rptr; 55417c478bd9Sstevel@tonic-gate iph->ip_len = htons(tcp_hdr_len); 55427c478bd9Sstevel@tonic-gate 55437c478bd9Sstevel@tonic-gate tcph = (tcph_t *)&rptr[tcp_ip_hdr_len]; 55447c478bd9Sstevel@tonic-gate tcph->th_flags[0] = (uint8_t)ctl; 55457c478bd9Sstevel@tonic-gate if (ctl & TH_RST) { 55467c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutRsts); 55477c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutControl); 55487c478bd9Sstevel@tonic-gate /* 55497c478bd9Sstevel@tonic-gate * Don't send TSopt w/ TH_RST packets per RFC 1323. 55507c478bd9Sstevel@tonic-gate */ 55517c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ts_ok && tcp->tcp_state > TCPS_SYN_SENT) { 55527c478bd9Sstevel@tonic-gate mp->b_wptr = &rptr[tcp_hdr_len - TCPOPT_REAL_TS_LEN]; 55537c478bd9Sstevel@tonic-gate *(mp->b_wptr) = TCPOPT_EOL; 55547c478bd9Sstevel@tonic-gate iph->ip_len = htons(tcp_hdr_len - 55557c478bd9Sstevel@tonic-gate TCPOPT_REAL_TS_LEN); 55567c478bd9Sstevel@tonic-gate tcph->th_offset_and_rsrvd[0] -= (3 << 4); 55577c478bd9Sstevel@tonic-gate } 55587c478bd9Sstevel@tonic-gate } 55597c478bd9Sstevel@tonic-gate if (ctl & TH_ACK) { 55607c478bd9Sstevel@tonic-gate uint32_t now = prom_gettime(); 55617c478bd9Sstevel@tonic-gate 55627c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ts_ok) { 55637c478bd9Sstevel@tonic-gate U32_TO_BE32(now, 55647c478bd9Sstevel@tonic-gate (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 55657c478bd9Sstevel@tonic-gate U32_TO_BE32(tcp->tcp_ts_recent, 55667c478bd9Sstevel@tonic-gate (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 55677c478bd9Sstevel@tonic-gate } 55687c478bd9Sstevel@tonic-gate tcp->tcp_rack = ack; 55697c478bd9Sstevel@tonic-gate tcp->tcp_rack_cnt = 0; 55707c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutAck); 55717c478bd9Sstevel@tonic-gate } 55727c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutSegs); 55737c478bd9Sstevel@tonic-gate U32_TO_BE32(seq, tcph->th_seq); 55747c478bd9Sstevel@tonic-gate U32_TO_BE32(ack, tcph->th_ack); 55757c478bd9Sstevel@tonic-gate 55767c478bd9Sstevel@tonic-gate tcp_set_cksum(mp); 55777c478bd9Sstevel@tonic-gate iph->ip_ttl = (uint8_t)tcp_ipv4_ttl; 55787c478bd9Sstevel@tonic-gate TCP_DUMP_PACKET("tcp_xmit_ctl", mp); 55797c478bd9Sstevel@tonic-gate (void) ipv4_tcp_output(sock_id, mp); 55807c478bd9Sstevel@tonic-gate freeb(mp); 55817c478bd9Sstevel@tonic-gate } 55827c478bd9Sstevel@tonic-gate 55837c478bd9Sstevel@tonic-gate /* Generate an ACK-only (no data) segment for a TCP endpoint */ 55847c478bd9Sstevel@tonic-gate static mblk_t * 55857c478bd9Sstevel@tonic-gate tcp_ack_mp(tcp_t *tcp) 55867c478bd9Sstevel@tonic-gate { 55877c478bd9Sstevel@tonic-gate if (tcp->tcp_valid_bits) { 55887c478bd9Sstevel@tonic-gate /* 55897c478bd9Sstevel@tonic-gate * For the complex case where we have to send some 55907c478bd9Sstevel@tonic-gate * controls (FIN or SYN), let tcp_xmit_mp do it. 55917c478bd9Sstevel@tonic-gate * When sending an ACK-only segment (no data) 55927c478bd9Sstevel@tonic-gate * into a zero window, always set the seq number to 55937c478bd9Sstevel@tonic-gate * suna, since snxt will be extended past the window. 55947c478bd9Sstevel@tonic-gate * If we used snxt, the receiver might consider the ACK 55957c478bd9Sstevel@tonic-gate * unacceptable. 55967c478bd9Sstevel@tonic-gate */ 55977c478bd9Sstevel@tonic-gate return (tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, 55987c478bd9Sstevel@tonic-gate (tcp->tcp_zero_win_probe) ? 55997c478bd9Sstevel@tonic-gate tcp->tcp_suna : 56007c478bd9Sstevel@tonic-gate tcp->tcp_snxt, B_FALSE, NULL, B_FALSE)); 56017c478bd9Sstevel@tonic-gate } else { 56027c478bd9Sstevel@tonic-gate /* Generate a simple ACK */ 56037c478bd9Sstevel@tonic-gate uchar_t *rptr; 56047c478bd9Sstevel@tonic-gate tcph_t *tcph; 56057c478bd9Sstevel@tonic-gate mblk_t *mp1; 56067c478bd9Sstevel@tonic-gate int32_t tcp_hdr_len; 56077c478bd9Sstevel@tonic-gate int32_t num_sack_blk = 0; 56087c478bd9Sstevel@tonic-gate int32_t sack_opt_len; 56097c478bd9Sstevel@tonic-gate 56107c478bd9Sstevel@tonic-gate /* 56117c478bd9Sstevel@tonic-gate * Allocate space for TCP + IP headers 56127c478bd9Sstevel@tonic-gate * and link-level header 56137c478bd9Sstevel@tonic-gate */ 56147c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 56157c478bd9Sstevel@tonic-gate num_sack_blk = MIN(tcp->tcp_max_sack_blk, 56167c478bd9Sstevel@tonic-gate tcp->tcp_num_sack_blk); 56177c478bd9Sstevel@tonic-gate sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 56187c478bd9Sstevel@tonic-gate TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 56197c478bd9Sstevel@tonic-gate tcp_hdr_len = tcp->tcp_hdr_len + sack_opt_len; 56207c478bd9Sstevel@tonic-gate } else { 56217c478bd9Sstevel@tonic-gate tcp_hdr_len = tcp->tcp_hdr_len; 56227c478bd9Sstevel@tonic-gate } 56237c478bd9Sstevel@tonic-gate mp1 = allocb(tcp_hdr_len + tcp_wroff_xtra, 0); 56247c478bd9Sstevel@tonic-gate if (mp1 == NULL) 56257c478bd9Sstevel@tonic-gate return (NULL); 56267c478bd9Sstevel@tonic-gate 56277c478bd9Sstevel@tonic-gate /* copy in prototype TCP + IP header */ 56287c478bd9Sstevel@tonic-gate rptr = mp1->b_rptr + tcp_wroff_xtra; 56297c478bd9Sstevel@tonic-gate mp1->b_rptr = rptr; 56307c478bd9Sstevel@tonic-gate mp1->b_wptr = rptr + tcp_hdr_len; 56317c478bd9Sstevel@tonic-gate bcopy(tcp->tcp_iphc, rptr, tcp->tcp_hdr_len); 56327c478bd9Sstevel@tonic-gate 56337c478bd9Sstevel@tonic-gate tcph = (tcph_t *)&rptr[tcp->tcp_ip_hdr_len]; 56347c478bd9Sstevel@tonic-gate 56357c478bd9Sstevel@tonic-gate /* 56367c478bd9Sstevel@tonic-gate * Set the TCP sequence number. 56377c478bd9Sstevel@tonic-gate * When sending an ACK-only segment (no data) 56387c478bd9Sstevel@tonic-gate * into a zero window, always set the seq number to 56397c478bd9Sstevel@tonic-gate * suna, since snxt will be extended past the window. 56407c478bd9Sstevel@tonic-gate * If we used snxt, the receiver might consider the ACK 56417c478bd9Sstevel@tonic-gate * unacceptable. 56427c478bd9Sstevel@tonic-gate */ 56437c478bd9Sstevel@tonic-gate U32_TO_ABE32((tcp->tcp_zero_win_probe) ? 56447c478bd9Sstevel@tonic-gate tcp->tcp_suna : tcp->tcp_snxt, tcph->th_seq); 56457c478bd9Sstevel@tonic-gate 56467c478bd9Sstevel@tonic-gate /* Set up the TCP flag field. */ 56477c478bd9Sstevel@tonic-gate tcph->th_flags[0] = (uchar_t)TH_ACK; 56487c478bd9Sstevel@tonic-gate if (tcp->tcp_ecn_echo_on) 56497c478bd9Sstevel@tonic-gate tcph->th_flags[0] |= TH_ECE; 56507c478bd9Sstevel@tonic-gate 56517c478bd9Sstevel@tonic-gate tcp->tcp_rack = tcp->tcp_rnxt; 56527c478bd9Sstevel@tonic-gate tcp->tcp_rack_cnt = 0; 56537c478bd9Sstevel@tonic-gate 56547c478bd9Sstevel@tonic-gate /* fill in timestamp option if in use */ 56557c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ts_ok) { 56567c478bd9Sstevel@tonic-gate uint32_t llbolt = (uint32_t)prom_gettime(); 56577c478bd9Sstevel@tonic-gate 56587c478bd9Sstevel@tonic-gate U32_TO_BE32(llbolt, 56597c478bd9Sstevel@tonic-gate (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 56607c478bd9Sstevel@tonic-gate U32_TO_BE32(tcp->tcp_ts_recent, 56617c478bd9Sstevel@tonic-gate (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 56627c478bd9Sstevel@tonic-gate } 56637c478bd9Sstevel@tonic-gate 56647c478bd9Sstevel@tonic-gate /* Fill in SACK options */ 56657c478bd9Sstevel@tonic-gate if (num_sack_blk > 0) { 56667c478bd9Sstevel@tonic-gate uchar_t *wptr = (uchar_t *)tcph + tcp->tcp_tcp_hdr_len; 56677c478bd9Sstevel@tonic-gate sack_blk_t *tmp; 56687c478bd9Sstevel@tonic-gate int32_t i; 56697c478bd9Sstevel@tonic-gate 56707c478bd9Sstevel@tonic-gate wptr[0] = TCPOPT_NOP; 56717c478bd9Sstevel@tonic-gate wptr[1] = TCPOPT_NOP; 56727c478bd9Sstevel@tonic-gate wptr[2] = TCPOPT_SACK; 56737c478bd9Sstevel@tonic-gate wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 56747c478bd9Sstevel@tonic-gate sizeof (sack_blk_t); 56757c478bd9Sstevel@tonic-gate wptr += TCPOPT_REAL_SACK_LEN; 56767c478bd9Sstevel@tonic-gate 56777c478bd9Sstevel@tonic-gate tmp = tcp->tcp_sack_list; 56787c478bd9Sstevel@tonic-gate for (i = 0; i < num_sack_blk; i++) { 56797c478bd9Sstevel@tonic-gate U32_TO_BE32(tmp[i].begin, wptr); 56807c478bd9Sstevel@tonic-gate wptr += sizeof (tcp_seq); 56817c478bd9Sstevel@tonic-gate U32_TO_BE32(tmp[i].end, wptr); 56827c478bd9Sstevel@tonic-gate wptr += sizeof (tcp_seq); 56837c478bd9Sstevel@tonic-gate } 56847c478bd9Sstevel@tonic-gate tcph->th_offset_and_rsrvd[0] += ((num_sack_blk * 2 + 1) 56857c478bd9Sstevel@tonic-gate << 4); 56867c478bd9Sstevel@tonic-gate } 56877c478bd9Sstevel@tonic-gate 56887c478bd9Sstevel@tonic-gate ((struct ip *)rptr)->ip_len = htons(tcp_hdr_len); 56897c478bd9Sstevel@tonic-gate tcp_set_cksum(mp1); 56907c478bd9Sstevel@tonic-gate ((struct ip *)rptr)->ip_ttl = (uint8_t)tcp_ipv4_ttl; 56917c478bd9Sstevel@tonic-gate return (mp1); 56927c478bd9Sstevel@tonic-gate } 56937c478bd9Sstevel@tonic-gate } 56947c478bd9Sstevel@tonic-gate 56957c478bd9Sstevel@tonic-gate /* 56967c478bd9Sstevel@tonic-gate * tcp_xmit_mp is called to return a pointer to an mblk chain complete with 56977c478bd9Sstevel@tonic-gate * ip and tcp header ready to pass down to IP. If the mp passed in is 56987c478bd9Sstevel@tonic-gate * non-NULL, then up to max_to_send bytes of data will be dup'ed off that 56997c478bd9Sstevel@tonic-gate * mblk. (If sendall is not set the dup'ing will stop at an mblk boundary 57007c478bd9Sstevel@tonic-gate * otherwise it will dup partial mblks.) 57017c478bd9Sstevel@tonic-gate * Otherwise, an appropriate ACK packet will be generated. This 57027c478bd9Sstevel@tonic-gate * routine is not usually called to send new data for the first time. It 57037c478bd9Sstevel@tonic-gate * is mostly called out of the timer for retransmits, and to generate ACKs. 57047c478bd9Sstevel@tonic-gate * 57057c478bd9Sstevel@tonic-gate * If offset is not NULL, the returned mblk chain's first mblk's b_rptr will 57067c478bd9Sstevel@tonic-gate * be adjusted by *offset. And after dupb(), the offset and the ending mblk 57077c478bd9Sstevel@tonic-gate * of the original mblk chain will be returned in *offset and *end_mp. 57087c478bd9Sstevel@tonic-gate */ 57097c478bd9Sstevel@tonic-gate static mblk_t * 57107c478bd9Sstevel@tonic-gate tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send, int32_t *offset, 57117c478bd9Sstevel@tonic-gate mblk_t **end_mp, uint32_t seq, boolean_t sendall, uint32_t *seg_len, 57127c478bd9Sstevel@tonic-gate boolean_t rexmit) 57137c478bd9Sstevel@tonic-gate { 57147c478bd9Sstevel@tonic-gate int data_length; 57157c478bd9Sstevel@tonic-gate int32_t off = 0; 57167c478bd9Sstevel@tonic-gate uint_t flags; 57177c478bd9Sstevel@tonic-gate mblk_t *mp1; 57187c478bd9Sstevel@tonic-gate mblk_t *mp2; 57197c478bd9Sstevel@tonic-gate mblk_t *new_mp; 57207c478bd9Sstevel@tonic-gate uchar_t *rptr; 57217c478bd9Sstevel@tonic-gate tcph_t *tcph; 57227c478bd9Sstevel@tonic-gate int32_t num_sack_blk = 0; 57237c478bd9Sstevel@tonic-gate int32_t sack_opt_len = 0; 57247c478bd9Sstevel@tonic-gate 57257c478bd9Sstevel@tonic-gate /* Allocate for our maximum TCP header + link-level */ 57267c478bd9Sstevel@tonic-gate mp1 = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 57277c478bd9Sstevel@tonic-gate tcp_wroff_xtra, 0); 57287c478bd9Sstevel@tonic-gate if (mp1 == NULL) 57297c478bd9Sstevel@tonic-gate return (NULL); 57307c478bd9Sstevel@tonic-gate data_length = 0; 57317c478bd9Sstevel@tonic-gate 57327c478bd9Sstevel@tonic-gate /* 57337c478bd9Sstevel@tonic-gate * Note that tcp_mss has been adjusted to take into account the 57347c478bd9Sstevel@tonic-gate * timestamp option if applicable. Because SACK options do not 57357c478bd9Sstevel@tonic-gate * appear in every TCP segments and they are of variable lengths, 57367c478bd9Sstevel@tonic-gate * they cannot be included in tcp_mss. Thus we need to calculate 57377c478bd9Sstevel@tonic-gate * the actual segment length when we need to send a segment which 57387c478bd9Sstevel@tonic-gate * includes SACK options. 57397c478bd9Sstevel@tonic-gate */ 57407c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 57417c478bd9Sstevel@tonic-gate num_sack_blk = MIN(tcp->tcp_max_sack_blk, 57427c478bd9Sstevel@tonic-gate tcp->tcp_num_sack_blk); 57437c478bd9Sstevel@tonic-gate sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 57447c478bd9Sstevel@tonic-gate TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 57457c478bd9Sstevel@tonic-gate if (max_to_send + sack_opt_len > tcp->tcp_mss) 57467c478bd9Sstevel@tonic-gate max_to_send -= sack_opt_len; 57477c478bd9Sstevel@tonic-gate } 57487c478bd9Sstevel@tonic-gate 57497c478bd9Sstevel@tonic-gate if (offset != NULL) { 57507c478bd9Sstevel@tonic-gate off = *offset; 57517c478bd9Sstevel@tonic-gate /* We use offset as an indicator that end_mp is not NULL. */ 57527c478bd9Sstevel@tonic-gate *end_mp = NULL; 57537c478bd9Sstevel@tonic-gate } 57547c478bd9Sstevel@tonic-gate for (mp2 = mp1; mp && data_length != max_to_send; mp = mp->b_cont) { 57557c478bd9Sstevel@tonic-gate /* This could be faster with cooperation from downstream */ 57567c478bd9Sstevel@tonic-gate if (mp2 != mp1 && !sendall && 57577c478bd9Sstevel@tonic-gate data_length + (int)(mp->b_wptr - mp->b_rptr) > 57587c478bd9Sstevel@tonic-gate max_to_send) 57597c478bd9Sstevel@tonic-gate /* 57607c478bd9Sstevel@tonic-gate * Don't send the next mblk since the whole mblk 57617c478bd9Sstevel@tonic-gate * does not fit. 57627c478bd9Sstevel@tonic-gate */ 57637c478bd9Sstevel@tonic-gate break; 57647c478bd9Sstevel@tonic-gate mp2->b_cont = dupb(mp); 57657c478bd9Sstevel@tonic-gate mp2 = mp2->b_cont; 57667c478bd9Sstevel@tonic-gate if (mp2 == NULL) { 57677c478bd9Sstevel@tonic-gate freemsg(mp1); 57687c478bd9Sstevel@tonic-gate return (NULL); 57697c478bd9Sstevel@tonic-gate } 57707c478bd9Sstevel@tonic-gate mp2->b_rptr += off; 57717c478bd9Sstevel@tonic-gate assert((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 57727c478bd9Sstevel@tonic-gate (uintptr_t)INT_MAX); 57737c478bd9Sstevel@tonic-gate 57747c478bd9Sstevel@tonic-gate data_length += (int)(mp2->b_wptr - mp2->b_rptr); 57757c478bd9Sstevel@tonic-gate if (data_length > max_to_send) { 57767c478bd9Sstevel@tonic-gate mp2->b_wptr -= data_length - max_to_send; 57777c478bd9Sstevel@tonic-gate data_length = max_to_send; 57787c478bd9Sstevel@tonic-gate off = mp2->b_wptr - mp->b_rptr; 57797c478bd9Sstevel@tonic-gate break; 57807c478bd9Sstevel@tonic-gate } else { 57817c478bd9Sstevel@tonic-gate off = 0; 57827c478bd9Sstevel@tonic-gate } 57837c478bd9Sstevel@tonic-gate } 57847c478bd9Sstevel@tonic-gate if (offset != NULL) { 57857c478bd9Sstevel@tonic-gate *offset = off; 57867c478bd9Sstevel@tonic-gate *end_mp = mp; 57877c478bd9Sstevel@tonic-gate } 57887c478bd9Sstevel@tonic-gate if (seg_len != NULL) { 57897c478bd9Sstevel@tonic-gate *seg_len = data_length; 57907c478bd9Sstevel@tonic-gate } 57917c478bd9Sstevel@tonic-gate 57927c478bd9Sstevel@tonic-gate rptr = mp1->b_rptr + tcp_wroff_xtra; 57937c478bd9Sstevel@tonic-gate mp1->b_rptr = rptr; 57947c478bd9Sstevel@tonic-gate mp1->b_wptr = rptr + tcp->tcp_hdr_len + sack_opt_len; 57957c478bd9Sstevel@tonic-gate bcopy(tcp->tcp_iphc, rptr, tcp->tcp_hdr_len); 57967c478bd9Sstevel@tonic-gate tcph = (tcph_t *)&rptr[tcp->tcp_ip_hdr_len]; 57977c478bd9Sstevel@tonic-gate U32_TO_ABE32(seq, tcph->th_seq); 57987c478bd9Sstevel@tonic-gate 57997c478bd9Sstevel@tonic-gate /* 58007c478bd9Sstevel@tonic-gate * Use tcp_unsent to determine if the PUSH bit should be used assumes 58017c478bd9Sstevel@tonic-gate * that this function was called from tcp_wput_data. Thus, when called 58027c478bd9Sstevel@tonic-gate * to retransmit data the setting of the PUSH bit may appear some 58037c478bd9Sstevel@tonic-gate * what random in that it might get set when it should not. This 58047c478bd9Sstevel@tonic-gate * should not pose any performance issues. 58057c478bd9Sstevel@tonic-gate */ 58067c478bd9Sstevel@tonic-gate if (data_length != 0 && (tcp->tcp_unsent == 0 || 58077c478bd9Sstevel@tonic-gate tcp->tcp_unsent == data_length)) { 58087c478bd9Sstevel@tonic-gate flags = TH_ACK | TH_PUSH; 58097c478bd9Sstevel@tonic-gate } else { 58107c478bd9Sstevel@tonic-gate flags = TH_ACK; 58117c478bd9Sstevel@tonic-gate } 58127c478bd9Sstevel@tonic-gate 58137c478bd9Sstevel@tonic-gate if (tcp->tcp_ecn_ok) { 58147c478bd9Sstevel@tonic-gate if (tcp->tcp_ecn_echo_on) 58157c478bd9Sstevel@tonic-gate flags |= TH_ECE; 58167c478bd9Sstevel@tonic-gate 58177c478bd9Sstevel@tonic-gate /* 58187c478bd9Sstevel@tonic-gate * Only set ECT bit and ECN_CWR if a segment contains new data. 58197c478bd9Sstevel@tonic-gate * There is no TCP flow control for non-data segments, and 58207c478bd9Sstevel@tonic-gate * only data segment is transmitted reliably. 58217c478bd9Sstevel@tonic-gate */ 58227c478bd9Sstevel@tonic-gate if (data_length > 0 && !rexmit) { 58237c478bd9Sstevel@tonic-gate SET_ECT(tcp, rptr); 58247c478bd9Sstevel@tonic-gate if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 58257c478bd9Sstevel@tonic-gate flags |= TH_CWR; 58267c478bd9Sstevel@tonic-gate tcp->tcp_ecn_cwr_sent = B_TRUE; 58277c478bd9Sstevel@tonic-gate } 58287c478bd9Sstevel@tonic-gate } 58297c478bd9Sstevel@tonic-gate } 58307c478bd9Sstevel@tonic-gate 58317c478bd9Sstevel@tonic-gate if (tcp->tcp_valid_bits) { 58327c478bd9Sstevel@tonic-gate uint32_t u1; 58337c478bd9Sstevel@tonic-gate 58347c478bd9Sstevel@tonic-gate if ((tcp->tcp_valid_bits & TCP_ISS_VALID) && 58357c478bd9Sstevel@tonic-gate seq == tcp->tcp_iss) { 58367c478bd9Sstevel@tonic-gate uchar_t *wptr; 58377c478bd9Sstevel@tonic-gate 58387c478bd9Sstevel@tonic-gate /* 58397c478bd9Sstevel@tonic-gate * Tack on the MSS option. It is always needed 58407c478bd9Sstevel@tonic-gate * for both active and passive open. 58417c478bd9Sstevel@tonic-gate */ 58427c478bd9Sstevel@tonic-gate wptr = mp1->b_wptr; 58437c478bd9Sstevel@tonic-gate wptr[0] = TCPOPT_MAXSEG; 58447c478bd9Sstevel@tonic-gate wptr[1] = TCPOPT_MAXSEG_LEN; 58457c478bd9Sstevel@tonic-gate wptr += 2; 58467c478bd9Sstevel@tonic-gate /* 58477c478bd9Sstevel@tonic-gate * MSS option value should be interface MTU - MIN 58487c478bd9Sstevel@tonic-gate * TCP/IP header. 58497c478bd9Sstevel@tonic-gate */ 58507c478bd9Sstevel@tonic-gate u1 = tcp->tcp_if_mtu - IP_SIMPLE_HDR_LENGTH - 58517c478bd9Sstevel@tonic-gate TCP_MIN_HEADER_LENGTH; 58527c478bd9Sstevel@tonic-gate U16_TO_BE16(u1, wptr); 58537c478bd9Sstevel@tonic-gate mp1->b_wptr = wptr + 2; 58547c478bd9Sstevel@tonic-gate /* Update the offset to cover the additional word */ 58557c478bd9Sstevel@tonic-gate tcph->th_offset_and_rsrvd[0] += (1 << 4); 58567c478bd9Sstevel@tonic-gate 58577c478bd9Sstevel@tonic-gate /* 58587c478bd9Sstevel@tonic-gate * Note that the following way of filling in 58597c478bd9Sstevel@tonic-gate * TCP options are not optimal. Some NOPs can 58607c478bd9Sstevel@tonic-gate * be saved. But there is no need at this time 58617c478bd9Sstevel@tonic-gate * to optimize it. When it is needed, we will 58627c478bd9Sstevel@tonic-gate * do it. 58637c478bd9Sstevel@tonic-gate */ 58647c478bd9Sstevel@tonic-gate switch (tcp->tcp_state) { 58657c478bd9Sstevel@tonic-gate case TCPS_SYN_SENT: 58667c478bd9Sstevel@tonic-gate flags = TH_SYN; 58677c478bd9Sstevel@tonic-gate 58687c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ws_ok) { 58697c478bd9Sstevel@tonic-gate wptr = mp1->b_wptr; 58707c478bd9Sstevel@tonic-gate wptr[0] = TCPOPT_NOP; 58717c478bd9Sstevel@tonic-gate wptr[1] = TCPOPT_WSCALE; 58727c478bd9Sstevel@tonic-gate wptr[2] = TCPOPT_WS_LEN; 58737c478bd9Sstevel@tonic-gate wptr[3] = (uchar_t)tcp->tcp_rcv_ws; 58747c478bd9Sstevel@tonic-gate mp1->b_wptr += TCPOPT_REAL_WS_LEN; 58757c478bd9Sstevel@tonic-gate tcph->th_offset_and_rsrvd[0] += 58767c478bd9Sstevel@tonic-gate (1 << 4); 58777c478bd9Sstevel@tonic-gate } 58787c478bd9Sstevel@tonic-gate 58797c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ts_ok) { 58807c478bd9Sstevel@tonic-gate uint32_t llbolt; 58817c478bd9Sstevel@tonic-gate 58827c478bd9Sstevel@tonic-gate llbolt = prom_gettime(); 58837c478bd9Sstevel@tonic-gate wptr = mp1->b_wptr; 58847c478bd9Sstevel@tonic-gate wptr[0] = TCPOPT_NOP; 58857c478bd9Sstevel@tonic-gate wptr[1] = TCPOPT_NOP; 58867c478bd9Sstevel@tonic-gate wptr[2] = TCPOPT_TSTAMP; 58877c478bd9Sstevel@tonic-gate wptr[3] = TCPOPT_TSTAMP_LEN; 58887c478bd9Sstevel@tonic-gate wptr += 4; 58897c478bd9Sstevel@tonic-gate U32_TO_BE32(llbolt, wptr); 58907c478bd9Sstevel@tonic-gate wptr += 4; 58917c478bd9Sstevel@tonic-gate assert(tcp->tcp_ts_recent == 0); 58927c478bd9Sstevel@tonic-gate U32_TO_BE32(0L, wptr); 58937c478bd9Sstevel@tonic-gate mp1->b_wptr += TCPOPT_REAL_TS_LEN; 58947c478bd9Sstevel@tonic-gate tcph->th_offset_and_rsrvd[0] += 58957c478bd9Sstevel@tonic-gate (3 << 4); 58967c478bd9Sstevel@tonic-gate } 58977c478bd9Sstevel@tonic-gate 58987c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok) { 58997c478bd9Sstevel@tonic-gate wptr = mp1->b_wptr; 59007c478bd9Sstevel@tonic-gate wptr[0] = TCPOPT_NOP; 59017c478bd9Sstevel@tonic-gate wptr[1] = TCPOPT_NOP; 59027c478bd9Sstevel@tonic-gate wptr[2] = TCPOPT_SACK_PERMITTED; 59037c478bd9Sstevel@tonic-gate wptr[3] = TCPOPT_SACK_OK_LEN; 59047c478bd9Sstevel@tonic-gate mp1->b_wptr += TCPOPT_REAL_SACK_OK_LEN; 59057c478bd9Sstevel@tonic-gate tcph->th_offset_and_rsrvd[0] += 59067c478bd9Sstevel@tonic-gate (1 << 4); 59077c478bd9Sstevel@tonic-gate } 59087c478bd9Sstevel@tonic-gate 59097c478bd9Sstevel@tonic-gate /* 59107c478bd9Sstevel@tonic-gate * Set up all the bits to tell other side 59117c478bd9Sstevel@tonic-gate * we are ECN capable. 59127c478bd9Sstevel@tonic-gate */ 59137c478bd9Sstevel@tonic-gate if (tcp->tcp_ecn_ok) { 59147c478bd9Sstevel@tonic-gate flags |= (TH_ECE | TH_CWR); 59157c478bd9Sstevel@tonic-gate } 59167c478bd9Sstevel@tonic-gate break; 59177c478bd9Sstevel@tonic-gate case TCPS_SYN_RCVD: 59187c478bd9Sstevel@tonic-gate flags |= TH_SYN; 59197c478bd9Sstevel@tonic-gate 59207c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ws_ok) { 59217c478bd9Sstevel@tonic-gate wptr = mp1->b_wptr; 59227c478bd9Sstevel@tonic-gate wptr[0] = TCPOPT_NOP; 59237c478bd9Sstevel@tonic-gate wptr[1] = TCPOPT_WSCALE; 59247c478bd9Sstevel@tonic-gate wptr[2] = TCPOPT_WS_LEN; 59257c478bd9Sstevel@tonic-gate wptr[3] = (uchar_t)tcp->tcp_rcv_ws; 59267c478bd9Sstevel@tonic-gate mp1->b_wptr += TCPOPT_REAL_WS_LEN; 59277c478bd9Sstevel@tonic-gate tcph->th_offset_and_rsrvd[0] += (1 << 4); 59287c478bd9Sstevel@tonic-gate } 59297c478bd9Sstevel@tonic-gate 59307c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_sack_ok) { 59317c478bd9Sstevel@tonic-gate wptr = mp1->b_wptr; 59327c478bd9Sstevel@tonic-gate wptr[0] = TCPOPT_NOP; 59337c478bd9Sstevel@tonic-gate wptr[1] = TCPOPT_NOP; 59347c478bd9Sstevel@tonic-gate wptr[2] = TCPOPT_SACK_PERMITTED; 59357c478bd9Sstevel@tonic-gate wptr[3] = TCPOPT_SACK_OK_LEN; 59367c478bd9Sstevel@tonic-gate mp1->b_wptr += TCPOPT_REAL_SACK_OK_LEN; 59377c478bd9Sstevel@tonic-gate tcph->th_offset_and_rsrvd[0] += 59387c478bd9Sstevel@tonic-gate (1 << 4); 59397c478bd9Sstevel@tonic-gate } 59407c478bd9Sstevel@tonic-gate 59417c478bd9Sstevel@tonic-gate /* 59427c478bd9Sstevel@tonic-gate * If the other side is ECN capable, reply 59437c478bd9Sstevel@tonic-gate * that we are also ECN capable. 59447c478bd9Sstevel@tonic-gate */ 59457c478bd9Sstevel@tonic-gate if (tcp->tcp_ecn_ok) { 59467c478bd9Sstevel@tonic-gate flags |= TH_ECE; 59477c478bd9Sstevel@tonic-gate } 59487c478bd9Sstevel@tonic-gate break; 59497c478bd9Sstevel@tonic-gate default: 59507c478bd9Sstevel@tonic-gate break; 59517c478bd9Sstevel@tonic-gate } 59527c478bd9Sstevel@tonic-gate /* allocb() of adequate mblk assures space */ 59537c478bd9Sstevel@tonic-gate assert((uintptr_t)(mp1->b_wptr - 59547c478bd9Sstevel@tonic-gate mp1->b_rptr) <= (uintptr_t)INT_MAX); 59557c478bd9Sstevel@tonic-gate if (flags & TH_SYN) 59567c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutControl); 59577c478bd9Sstevel@tonic-gate } 59587c478bd9Sstevel@tonic-gate if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 59597c478bd9Sstevel@tonic-gate (seq + data_length) == tcp->tcp_fss) { 59607c478bd9Sstevel@tonic-gate if (!tcp->tcp_fin_acked) { 59617c478bd9Sstevel@tonic-gate flags |= TH_FIN; 59627c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutControl); 59637c478bd9Sstevel@tonic-gate } 59647c478bd9Sstevel@tonic-gate if (!tcp->tcp_fin_sent) { 59657c478bd9Sstevel@tonic-gate tcp->tcp_fin_sent = B_TRUE; 59667c478bd9Sstevel@tonic-gate switch (tcp->tcp_state) { 59677c478bd9Sstevel@tonic-gate case TCPS_SYN_RCVD: 59687c478bd9Sstevel@tonic-gate case TCPS_ESTABLISHED: 59697c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_FIN_WAIT_1; 59707c478bd9Sstevel@tonic-gate break; 59717c478bd9Sstevel@tonic-gate case TCPS_CLOSE_WAIT: 59727c478bd9Sstevel@tonic-gate tcp->tcp_state = TCPS_LAST_ACK; 59737c478bd9Sstevel@tonic-gate break; 59747c478bd9Sstevel@tonic-gate } 59757c478bd9Sstevel@tonic-gate if (tcp->tcp_suna == tcp->tcp_snxt) 59767c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 59777c478bd9Sstevel@tonic-gate tcp->tcp_snxt = tcp->tcp_fss + 1; 59787c478bd9Sstevel@tonic-gate } 59797c478bd9Sstevel@tonic-gate } 59807c478bd9Sstevel@tonic-gate } 59817c478bd9Sstevel@tonic-gate tcph->th_flags[0] = (uchar_t)flags; 59827c478bd9Sstevel@tonic-gate tcp->tcp_rack = tcp->tcp_rnxt; 59837c478bd9Sstevel@tonic-gate tcp->tcp_rack_cnt = 0; 59847c478bd9Sstevel@tonic-gate 59857c478bd9Sstevel@tonic-gate if (tcp->tcp_snd_ts_ok) { 59867c478bd9Sstevel@tonic-gate if (tcp->tcp_state != TCPS_SYN_SENT) { 59877c478bd9Sstevel@tonic-gate uint32_t llbolt = prom_gettime(); 59887c478bd9Sstevel@tonic-gate 59897c478bd9Sstevel@tonic-gate U32_TO_BE32(llbolt, 59907c478bd9Sstevel@tonic-gate (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 59917c478bd9Sstevel@tonic-gate U32_TO_BE32(tcp->tcp_ts_recent, 59927c478bd9Sstevel@tonic-gate (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 59937c478bd9Sstevel@tonic-gate } 59947c478bd9Sstevel@tonic-gate } 59957c478bd9Sstevel@tonic-gate 59967c478bd9Sstevel@tonic-gate if (num_sack_blk > 0) { 59977c478bd9Sstevel@tonic-gate uchar_t *wptr = (uchar_t *)tcph + tcp->tcp_tcp_hdr_len; 59987c478bd9Sstevel@tonic-gate sack_blk_t *tmp; 59997c478bd9Sstevel@tonic-gate int32_t i; 60007c478bd9Sstevel@tonic-gate 60017c478bd9Sstevel@tonic-gate wptr[0] = TCPOPT_NOP; 60027c478bd9Sstevel@tonic-gate wptr[1] = TCPOPT_NOP; 60037c478bd9Sstevel@tonic-gate wptr[2] = TCPOPT_SACK; 60047c478bd9Sstevel@tonic-gate wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 60057c478bd9Sstevel@tonic-gate sizeof (sack_blk_t); 60067c478bd9Sstevel@tonic-gate wptr += TCPOPT_REAL_SACK_LEN; 60077c478bd9Sstevel@tonic-gate 60087c478bd9Sstevel@tonic-gate tmp = tcp->tcp_sack_list; 60097c478bd9Sstevel@tonic-gate for (i = 0; i < num_sack_blk; i++) { 60107c478bd9Sstevel@tonic-gate U32_TO_BE32(tmp[i].begin, wptr); 60117c478bd9Sstevel@tonic-gate wptr += sizeof (tcp_seq); 60127c478bd9Sstevel@tonic-gate U32_TO_BE32(tmp[i].end, wptr); 60137c478bd9Sstevel@tonic-gate wptr += sizeof (tcp_seq); 60147c478bd9Sstevel@tonic-gate } 60157c478bd9Sstevel@tonic-gate tcph->th_offset_and_rsrvd[0] += ((num_sack_blk * 2 + 1) << 4); 60167c478bd9Sstevel@tonic-gate } 60177c478bd9Sstevel@tonic-gate assert((uintptr_t)(mp1->b_wptr - rptr) <= (uintptr_t)INT_MAX); 60187c478bd9Sstevel@tonic-gate data_length += (int)(mp1->b_wptr - rptr); 60197c478bd9Sstevel@tonic-gate if (tcp->tcp_ipversion == IPV4_VERSION) 60207c478bd9Sstevel@tonic-gate ((struct ip *)rptr)->ip_len = htons(data_length); 60217c478bd9Sstevel@tonic-gate 60227c478bd9Sstevel@tonic-gate /* 60237c478bd9Sstevel@tonic-gate * Performance hit! We need to pullup the whole message 60247c478bd9Sstevel@tonic-gate * in order to do checksum and for the MAC output routine. 60257c478bd9Sstevel@tonic-gate */ 60267c478bd9Sstevel@tonic-gate if (mp1->b_cont != NULL) { 60277c478bd9Sstevel@tonic-gate int mp_size; 60287c478bd9Sstevel@tonic-gate #ifdef DEBUG 60297c478bd9Sstevel@tonic-gate printf("Multiple mblk %d\n", msgdsize(mp1)); 60307c478bd9Sstevel@tonic-gate #endif 60317c478bd9Sstevel@tonic-gate new_mp = allocb(msgdsize(mp1) + tcp_wroff_xtra, 0); 60327c478bd9Sstevel@tonic-gate new_mp->b_rptr += tcp_wroff_xtra; 60337c478bd9Sstevel@tonic-gate new_mp->b_wptr = new_mp->b_rptr; 60347c478bd9Sstevel@tonic-gate while (mp1 != NULL) { 60357c478bd9Sstevel@tonic-gate mp_size = mp1->b_wptr - mp1->b_rptr; 60367c478bd9Sstevel@tonic-gate bcopy(mp1->b_rptr, new_mp->b_wptr, mp_size); 60377c478bd9Sstevel@tonic-gate new_mp->b_wptr += mp_size; 60387c478bd9Sstevel@tonic-gate mp1 = mp1->b_cont; 60397c478bd9Sstevel@tonic-gate } 60407c478bd9Sstevel@tonic-gate freemsg(mp1); 60417c478bd9Sstevel@tonic-gate mp1 = new_mp; 60427c478bd9Sstevel@tonic-gate } 60437c478bd9Sstevel@tonic-gate tcp_set_cksum(mp1); 60447c478bd9Sstevel@tonic-gate /* Fill in the TTL field as it is 0 in the header template. */ 60457c478bd9Sstevel@tonic-gate ((struct ip *)mp1->b_rptr)->ip_ttl = (uint8_t)tcp_ipv4_ttl; 60467c478bd9Sstevel@tonic-gate 60477c478bd9Sstevel@tonic-gate return (mp1); 60487c478bd9Sstevel@tonic-gate } 60497c478bd9Sstevel@tonic-gate 60507c478bd9Sstevel@tonic-gate /* 60517c478bd9Sstevel@tonic-gate * Generate a "no listener here" reset in response to the 60527c478bd9Sstevel@tonic-gate * connection request contained within 'mp' 60537c478bd9Sstevel@tonic-gate */ 60547c478bd9Sstevel@tonic-gate static void 60557c478bd9Sstevel@tonic-gate tcp_xmit_listeners_reset(int sock_id, mblk_t *mp, uint_t ip_hdr_len) 60567c478bd9Sstevel@tonic-gate { 60577c478bd9Sstevel@tonic-gate uchar_t *rptr; 60587c478bd9Sstevel@tonic-gate uint32_t seg_len; 60597c478bd9Sstevel@tonic-gate tcph_t *tcph; 60607c478bd9Sstevel@tonic-gate uint32_t seg_seq; 60617c478bd9Sstevel@tonic-gate uint32_t seg_ack; 60627c478bd9Sstevel@tonic-gate uint_t flags; 60637c478bd9Sstevel@tonic-gate 60647c478bd9Sstevel@tonic-gate rptr = mp->b_rptr; 60657c478bd9Sstevel@tonic-gate 60667c478bd9Sstevel@tonic-gate tcph = (tcph_t *)&rptr[ip_hdr_len]; 60677c478bd9Sstevel@tonic-gate seg_seq = BE32_TO_U32(tcph->th_seq); 60687c478bd9Sstevel@tonic-gate seg_ack = BE32_TO_U32(tcph->th_ack); 60697c478bd9Sstevel@tonic-gate flags = tcph->th_flags[0]; 60707c478bd9Sstevel@tonic-gate 60717c478bd9Sstevel@tonic-gate seg_len = msgdsize(mp) - (TCP_HDR_LENGTH(tcph) + ip_hdr_len); 60727c478bd9Sstevel@tonic-gate if (flags & TH_RST) { 60737c478bd9Sstevel@tonic-gate freeb(mp); 60747c478bd9Sstevel@tonic-gate } else if (flags & TH_ACK) { 60757c478bd9Sstevel@tonic-gate tcp_xmit_early_reset("no tcp, reset", 60767c478bd9Sstevel@tonic-gate sock_id, mp, seg_ack, 0, TH_RST, ip_hdr_len); 60777c478bd9Sstevel@tonic-gate } else { 60787c478bd9Sstevel@tonic-gate if (flags & TH_SYN) 60797c478bd9Sstevel@tonic-gate seg_len++; 60807c478bd9Sstevel@tonic-gate tcp_xmit_early_reset("no tcp, reset/ack", sock_id, 60817c478bd9Sstevel@tonic-gate mp, 0, seg_seq + seg_len, 60827c478bd9Sstevel@tonic-gate TH_RST | TH_ACK, ip_hdr_len); 60837c478bd9Sstevel@tonic-gate } 60847c478bd9Sstevel@tonic-gate } 60857c478bd9Sstevel@tonic-gate 60867c478bd9Sstevel@tonic-gate /* Non overlapping byte exchanger */ 60877c478bd9Sstevel@tonic-gate static void 60887c478bd9Sstevel@tonic-gate tcp_xchg(uchar_t *a, uchar_t *b, int len) 60897c478bd9Sstevel@tonic-gate { 60907c478bd9Sstevel@tonic-gate uchar_t uch; 60917c478bd9Sstevel@tonic-gate 60927c478bd9Sstevel@tonic-gate while (len-- > 0) { 60937c478bd9Sstevel@tonic-gate uch = a[len]; 60947c478bd9Sstevel@tonic-gate a[len] = b[len]; 60957c478bd9Sstevel@tonic-gate b[len] = uch; 60967c478bd9Sstevel@tonic-gate } 60977c478bd9Sstevel@tonic-gate } 60987c478bd9Sstevel@tonic-gate 60997c478bd9Sstevel@tonic-gate /* 61007c478bd9Sstevel@tonic-gate * Generate a reset based on an inbound packet for which there is no active 61017c478bd9Sstevel@tonic-gate * tcp state that we can find. 61027c478bd9Sstevel@tonic-gate */ 61037c478bd9Sstevel@tonic-gate static void 61047c478bd9Sstevel@tonic-gate tcp_xmit_early_reset(char *str, int sock_id, mblk_t *mp, uint32_t seq, 61057c478bd9Sstevel@tonic-gate uint32_t ack, int ctl, uint_t ip_hdr_len) 61067c478bd9Sstevel@tonic-gate { 61077c478bd9Sstevel@tonic-gate struct ip *iph = NULL; 61087c478bd9Sstevel@tonic-gate ushort_t len; 61097c478bd9Sstevel@tonic-gate tcph_t *tcph; 61107c478bd9Sstevel@tonic-gate int i; 61117c478bd9Sstevel@tonic-gate ipaddr_t addr; 61127c478bd9Sstevel@tonic-gate mblk_t *new_mp; 61137c478bd9Sstevel@tonic-gate 61147c478bd9Sstevel@tonic-gate if (str != NULL) { 61157c478bd9Sstevel@tonic-gate dprintf("tcp_xmit_early_reset: '%s', seq 0x%x, ack 0x%x, " 61167c478bd9Sstevel@tonic-gate "flags 0x%x\n", str, seq, ack, ctl); 61177c478bd9Sstevel@tonic-gate } 61187c478bd9Sstevel@tonic-gate 61197c478bd9Sstevel@tonic-gate /* 61207c478bd9Sstevel@tonic-gate * We skip reversing source route here. 61217c478bd9Sstevel@tonic-gate * (for now we replace all IP options with EOL) 61227c478bd9Sstevel@tonic-gate */ 61237c478bd9Sstevel@tonic-gate iph = (struct ip *)mp->b_rptr; 61247c478bd9Sstevel@tonic-gate for (i = IP_SIMPLE_HDR_LENGTH; i < (int)ip_hdr_len; i++) 61257c478bd9Sstevel@tonic-gate mp->b_rptr[i] = IPOPT_EOL; 61267c478bd9Sstevel@tonic-gate /* 61277c478bd9Sstevel@tonic-gate * Make sure that src address is not a limited broadcast 61287c478bd9Sstevel@tonic-gate * address. Not all broadcast address checking for the 61297c478bd9Sstevel@tonic-gate * src address is possible, since we don't know the 61307c478bd9Sstevel@tonic-gate * netmask of the src addr. 61317c478bd9Sstevel@tonic-gate * No check for destination address is done, since 61327c478bd9Sstevel@tonic-gate * IP will not pass up a packet with a broadcast dest address 61337c478bd9Sstevel@tonic-gate * to TCP. 61347c478bd9Sstevel@tonic-gate */ 61357c478bd9Sstevel@tonic-gate if (iph->ip_src.s_addr == INADDR_ANY || 61367c478bd9Sstevel@tonic-gate iph->ip_src.s_addr == INADDR_BROADCAST) { 61377c478bd9Sstevel@tonic-gate freemsg(mp); 61387c478bd9Sstevel@tonic-gate return; 61397c478bd9Sstevel@tonic-gate } 61407c478bd9Sstevel@tonic-gate 61417c478bd9Sstevel@tonic-gate tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 61427c478bd9Sstevel@tonic-gate if (tcph->th_flags[0] & TH_RST) { 61437c478bd9Sstevel@tonic-gate freemsg(mp); 61447c478bd9Sstevel@tonic-gate return; 61457c478bd9Sstevel@tonic-gate } 61467c478bd9Sstevel@tonic-gate /* 61477c478bd9Sstevel@tonic-gate * Now copy the original header to a new buffer. The reason 61487c478bd9Sstevel@tonic-gate * for doing this is that we need to put extra room before 61497c478bd9Sstevel@tonic-gate * the header for the MAC layer address. The original mblk 61507c478bd9Sstevel@tonic-gate * does not have this extra head room. 61517c478bd9Sstevel@tonic-gate */ 61527c478bd9Sstevel@tonic-gate len = ip_hdr_len + sizeof (tcph_t); 61537c478bd9Sstevel@tonic-gate if ((new_mp = allocb(len + tcp_wroff_xtra, 0)) == NULL) { 61547c478bd9Sstevel@tonic-gate freemsg(mp); 61557c478bd9Sstevel@tonic-gate return; 61567c478bd9Sstevel@tonic-gate } 61577c478bd9Sstevel@tonic-gate new_mp->b_rptr += tcp_wroff_xtra; 61587c478bd9Sstevel@tonic-gate bcopy(mp->b_rptr, new_mp->b_rptr, len); 61597c478bd9Sstevel@tonic-gate new_mp->b_wptr = new_mp->b_rptr + len; 61607c478bd9Sstevel@tonic-gate freemsg(mp); 61617c478bd9Sstevel@tonic-gate mp = new_mp; 61627c478bd9Sstevel@tonic-gate iph = (struct ip *)mp->b_rptr; 61637c478bd9Sstevel@tonic-gate tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 61647c478bd9Sstevel@tonic-gate 61657c478bd9Sstevel@tonic-gate tcph->th_offset_and_rsrvd[0] = (5 << 4); 61667c478bd9Sstevel@tonic-gate tcp_xchg(tcph->th_fport, tcph->th_lport, 2); 61677c478bd9Sstevel@tonic-gate U32_TO_BE32(ack, tcph->th_ack); 61687c478bd9Sstevel@tonic-gate U32_TO_BE32(seq, tcph->th_seq); 61697c478bd9Sstevel@tonic-gate U16_TO_BE16(0, tcph->th_win); 61707c478bd9Sstevel@tonic-gate bzero(tcph->th_sum, sizeof (int16_t)); 61717c478bd9Sstevel@tonic-gate tcph->th_flags[0] = (uint8_t)ctl; 61727c478bd9Sstevel@tonic-gate if (ctl & TH_RST) { 61737c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutRsts); 61747c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpOutControl); 61757c478bd9Sstevel@tonic-gate } 61767c478bd9Sstevel@tonic-gate 61777c478bd9Sstevel@tonic-gate iph->ip_len = htons(len); 61787c478bd9Sstevel@tonic-gate /* Swap addresses */ 61797c478bd9Sstevel@tonic-gate addr = iph->ip_src.s_addr; 61807c478bd9Sstevel@tonic-gate iph->ip_src = iph->ip_dst; 61817c478bd9Sstevel@tonic-gate iph->ip_dst.s_addr = addr; 61827c478bd9Sstevel@tonic-gate iph->ip_id = 0; 61837c478bd9Sstevel@tonic-gate iph->ip_ttl = 0; 61847c478bd9Sstevel@tonic-gate tcp_set_cksum(mp); 61857c478bd9Sstevel@tonic-gate iph->ip_ttl = (uint8_t)tcp_ipv4_ttl; 61867c478bd9Sstevel@tonic-gate 61877c478bd9Sstevel@tonic-gate /* Dump the packet when debugging. */ 61887c478bd9Sstevel@tonic-gate TCP_DUMP_PACKET("tcp_xmit_early_reset", mp); 61897c478bd9Sstevel@tonic-gate (void) ipv4_tcp_output(sock_id, mp); 61907c478bd9Sstevel@tonic-gate freemsg(mp); 61917c478bd9Sstevel@tonic-gate } 61927c478bd9Sstevel@tonic-gate 61937c478bd9Sstevel@tonic-gate static void 61947c478bd9Sstevel@tonic-gate tcp_set_cksum(mblk_t *mp) 61957c478bd9Sstevel@tonic-gate { 61967c478bd9Sstevel@tonic-gate struct ip *iph; 61977c478bd9Sstevel@tonic-gate tcpha_t *tcph; 61987c478bd9Sstevel@tonic-gate int len; 61997c478bd9Sstevel@tonic-gate 62007c478bd9Sstevel@tonic-gate iph = (struct ip *)mp->b_rptr; 62017c478bd9Sstevel@tonic-gate tcph = (tcpha_t *)(iph + 1); 62027c478bd9Sstevel@tonic-gate len = ntohs(iph->ip_len); 62037c478bd9Sstevel@tonic-gate /* 62047c478bd9Sstevel@tonic-gate * Calculate the TCP checksum. Need to include the psuedo header, 62057c478bd9Sstevel@tonic-gate * which is similar to the real IP header starting at the TTL field. 62067c478bd9Sstevel@tonic-gate */ 62077c478bd9Sstevel@tonic-gate iph->ip_sum = htons(len - IP_SIMPLE_HDR_LENGTH); 62087c478bd9Sstevel@tonic-gate tcph->tha_sum = 0; 62097c478bd9Sstevel@tonic-gate tcph->tha_sum = tcp_cksum((uint16_t *)&(iph->ip_ttl), 62107c478bd9Sstevel@tonic-gate len - IP_SIMPLE_HDR_LENGTH + 12); 62117c478bd9Sstevel@tonic-gate iph->ip_sum = 0; 62127c478bd9Sstevel@tonic-gate } 62137c478bd9Sstevel@tonic-gate 62147c478bd9Sstevel@tonic-gate static uint16_t 62157c478bd9Sstevel@tonic-gate tcp_cksum(uint16_t *buf, uint32_t len) 62167c478bd9Sstevel@tonic-gate { 62177c478bd9Sstevel@tonic-gate /* 62187c478bd9Sstevel@tonic-gate * Compute Internet Checksum for "count" bytes 62197c478bd9Sstevel@tonic-gate * beginning at location "addr". 62207c478bd9Sstevel@tonic-gate */ 62217c478bd9Sstevel@tonic-gate int32_t sum = 0; 62227c478bd9Sstevel@tonic-gate 62237c478bd9Sstevel@tonic-gate while (len > 1) { 62247c478bd9Sstevel@tonic-gate /* This is the inner loop */ 62257c478bd9Sstevel@tonic-gate sum += *buf++; 62267c478bd9Sstevel@tonic-gate len -= 2; 62277c478bd9Sstevel@tonic-gate } 62287c478bd9Sstevel@tonic-gate 62297c478bd9Sstevel@tonic-gate /* Add left-over byte, if any */ 62307c478bd9Sstevel@tonic-gate if (len > 0) 62317c478bd9Sstevel@tonic-gate sum += *(unsigned char *)buf * 256; 62327c478bd9Sstevel@tonic-gate 62337c478bd9Sstevel@tonic-gate /* Fold 32-bit sum to 16 bits */ 62347c478bd9Sstevel@tonic-gate while (sum >> 16) 62357c478bd9Sstevel@tonic-gate sum = (sum & 0xffff) + (sum >> 16); 62367c478bd9Sstevel@tonic-gate 62377c478bd9Sstevel@tonic-gate return ((uint16_t)~sum); 62387c478bd9Sstevel@tonic-gate } 62397c478bd9Sstevel@tonic-gate 62407c478bd9Sstevel@tonic-gate /* 62417c478bd9Sstevel@tonic-gate * Type three generator adapted from the random() function in 4.4 BSD: 62427c478bd9Sstevel@tonic-gate */ 62437c478bd9Sstevel@tonic-gate 62447c478bd9Sstevel@tonic-gate /* 62457c478bd9Sstevel@tonic-gate * Copyright (c) 1983, 1993 62467c478bd9Sstevel@tonic-gate * The Regents of the University of California. All rights reserved. 62477c478bd9Sstevel@tonic-gate * 62487c478bd9Sstevel@tonic-gate * Redistribution and use in source and binary forms, with or without 62497c478bd9Sstevel@tonic-gate * modification, are permitted provided that the following conditions 62507c478bd9Sstevel@tonic-gate * are met: 62517c478bd9Sstevel@tonic-gate * 1. Redistributions of source code must retain the above copyright 62527c478bd9Sstevel@tonic-gate * notice, this list of conditions and the following disclaimer. 62537c478bd9Sstevel@tonic-gate * 2. Redistributions in binary form must reproduce the above copyright 62547c478bd9Sstevel@tonic-gate * notice, this list of conditions and the following disclaimer in the 62557c478bd9Sstevel@tonic-gate * documentation and/or other materials provided with the distribution. 62567c478bd9Sstevel@tonic-gate * 3. All advertising materials mentioning features or use of this software 62577c478bd9Sstevel@tonic-gate * must display the following acknowledgement: 62587c478bd9Sstevel@tonic-gate * This product includes software developed by the University of 62597c478bd9Sstevel@tonic-gate * California, Berkeley and its contributors. 62607c478bd9Sstevel@tonic-gate * 4. Neither the name of the University nor the names of its contributors 62617c478bd9Sstevel@tonic-gate * may be used to endorse or promote products derived from this software 62627c478bd9Sstevel@tonic-gate * without specific prior written permission. 62637c478bd9Sstevel@tonic-gate * 62647c478bd9Sstevel@tonic-gate * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 62657c478bd9Sstevel@tonic-gate * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 62667c478bd9Sstevel@tonic-gate * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62677c478bd9Sstevel@tonic-gate * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62687c478bd9Sstevel@tonic-gate * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62697c478bd9Sstevel@tonic-gate * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62707c478bd9Sstevel@tonic-gate * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62717c478bd9Sstevel@tonic-gate * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62727c478bd9Sstevel@tonic-gate * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62737c478bd9Sstevel@tonic-gate * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62747c478bd9Sstevel@tonic-gate * SUCH DAMAGE. 62757c478bd9Sstevel@tonic-gate */ 62767c478bd9Sstevel@tonic-gate 62777c478bd9Sstevel@tonic-gate /* Type 3 -- x**31 + x**3 + 1 */ 62787c478bd9Sstevel@tonic-gate #define DEG_3 31 62797c478bd9Sstevel@tonic-gate #define SEP_3 3 62807c478bd9Sstevel@tonic-gate 62817c478bd9Sstevel@tonic-gate 62827c478bd9Sstevel@tonic-gate /* Protected by tcp_random_lock */ 62837c478bd9Sstevel@tonic-gate static int tcp_randtbl[DEG_3 + 1]; 62847c478bd9Sstevel@tonic-gate 62857c478bd9Sstevel@tonic-gate static int *tcp_random_fptr = &tcp_randtbl[SEP_3 + 1]; 62867c478bd9Sstevel@tonic-gate static int *tcp_random_rptr = &tcp_randtbl[1]; 62877c478bd9Sstevel@tonic-gate 62887c478bd9Sstevel@tonic-gate static int *tcp_random_state = &tcp_randtbl[1]; 62897c478bd9Sstevel@tonic-gate static int *tcp_random_end_ptr = &tcp_randtbl[DEG_3 + 1]; 62907c478bd9Sstevel@tonic-gate 62917c478bd9Sstevel@tonic-gate static void 62927c478bd9Sstevel@tonic-gate tcp_random_init(void) 62937c478bd9Sstevel@tonic-gate { 62947c478bd9Sstevel@tonic-gate int i; 62957c478bd9Sstevel@tonic-gate uint32_t hrt; 62967c478bd9Sstevel@tonic-gate uint32_t wallclock; 62977c478bd9Sstevel@tonic-gate uint32_t result; 62987c478bd9Sstevel@tonic-gate 62997c478bd9Sstevel@tonic-gate /* 63007c478bd9Sstevel@tonic-gate * 63017c478bd9Sstevel@tonic-gate * XXX We don't have high resolution time in standalone... The 63027c478bd9Sstevel@tonic-gate * following is just some approximation on the comment below. 63037c478bd9Sstevel@tonic-gate * 63047c478bd9Sstevel@tonic-gate * Use high-res timer and current time for seed. Gethrtime() returns 63057c478bd9Sstevel@tonic-gate * a longlong, which may contain resolution down to nanoseconds. 63067c478bd9Sstevel@tonic-gate * The current time will either be a 32-bit or a 64-bit quantity. 63077c478bd9Sstevel@tonic-gate * XOR the two together in a 64-bit result variable. 63087c478bd9Sstevel@tonic-gate * Convert the result to a 32-bit value by multiplying the high-order 63097c478bd9Sstevel@tonic-gate * 32-bits by the low-order 32-bits. 63107c478bd9Sstevel@tonic-gate * 63117c478bd9Sstevel@tonic-gate * XXX We don't have gethrtime() in prom and the wallclock.... 63127c478bd9Sstevel@tonic-gate */ 63137c478bd9Sstevel@tonic-gate 63147c478bd9Sstevel@tonic-gate hrt = prom_gettime(); 63157c478bd9Sstevel@tonic-gate wallclock = (uint32_t)time(NULL); 63167c478bd9Sstevel@tonic-gate result = wallclock ^ hrt; 63177c478bd9Sstevel@tonic-gate tcp_random_state[0] = result; 63187c478bd9Sstevel@tonic-gate 63197c478bd9Sstevel@tonic-gate for (i = 1; i < DEG_3; i++) 63207c478bd9Sstevel@tonic-gate tcp_random_state[i] = 1103515245 * tcp_random_state[i - 1] 63217c478bd9Sstevel@tonic-gate + 12345; 63227c478bd9Sstevel@tonic-gate tcp_random_fptr = &tcp_random_state[SEP_3]; 63237c478bd9Sstevel@tonic-gate tcp_random_rptr = &tcp_random_state[0]; 63247c478bd9Sstevel@tonic-gate for (i = 0; i < 10 * DEG_3; i++) 63257c478bd9Sstevel@tonic-gate (void) tcp_random(); 63267c478bd9Sstevel@tonic-gate } 63277c478bd9Sstevel@tonic-gate 63287c478bd9Sstevel@tonic-gate /* 63297c478bd9Sstevel@tonic-gate * tcp_random: Return a random number in the range [1 - (128K + 1)]. 63307c478bd9Sstevel@tonic-gate * This range is selected to be approximately centered on TCP_ISS / 2, 63317c478bd9Sstevel@tonic-gate * and easy to compute. We get this value by generating a 32-bit random 63327c478bd9Sstevel@tonic-gate * number, selecting out the high-order 17 bits, and then adding one so 63337c478bd9Sstevel@tonic-gate * that we never return zero. 63347c478bd9Sstevel@tonic-gate */ 63357c478bd9Sstevel@tonic-gate static int 63367c478bd9Sstevel@tonic-gate tcp_random(void) 63377c478bd9Sstevel@tonic-gate { 63387c478bd9Sstevel@tonic-gate int i; 63397c478bd9Sstevel@tonic-gate 63407c478bd9Sstevel@tonic-gate *tcp_random_fptr += *tcp_random_rptr; 63417c478bd9Sstevel@tonic-gate 63427c478bd9Sstevel@tonic-gate /* 63437c478bd9Sstevel@tonic-gate * The high-order bits are more random than the low-order bits, 63447c478bd9Sstevel@tonic-gate * so we select out the high-order 17 bits and add one so that 63457c478bd9Sstevel@tonic-gate * we never return zero. 63467c478bd9Sstevel@tonic-gate */ 63477c478bd9Sstevel@tonic-gate i = ((*tcp_random_fptr >> 15) & 0x1ffff) + 1; 63487c478bd9Sstevel@tonic-gate if (++tcp_random_fptr >= tcp_random_end_ptr) { 63497c478bd9Sstevel@tonic-gate tcp_random_fptr = tcp_random_state; 63507c478bd9Sstevel@tonic-gate ++tcp_random_rptr; 63517c478bd9Sstevel@tonic-gate } else if (++tcp_random_rptr >= tcp_random_end_ptr) 63527c478bd9Sstevel@tonic-gate tcp_random_rptr = tcp_random_state; 63537c478bd9Sstevel@tonic-gate 63547c478bd9Sstevel@tonic-gate return (i); 63557c478bd9Sstevel@tonic-gate } 63567c478bd9Sstevel@tonic-gate 63577c478bd9Sstevel@tonic-gate /* 63587c478bd9Sstevel@tonic-gate * Generate ISS, taking into account NDD changes may happen halfway through. 63597c478bd9Sstevel@tonic-gate * (If the iss is not zero, set it.) 63607c478bd9Sstevel@tonic-gate */ 63617c478bd9Sstevel@tonic-gate static void 63627c478bd9Sstevel@tonic-gate tcp_iss_init(tcp_t *tcp) 63637c478bd9Sstevel@tonic-gate { 63647c478bd9Sstevel@tonic-gate tcp_iss_incr_extra += (ISS_INCR >> 1); 63657c478bd9Sstevel@tonic-gate tcp->tcp_iss = tcp_iss_incr_extra; 63667c478bd9Sstevel@tonic-gate tcp->tcp_iss += (prom_gettime() >> ISS_NSEC_SHT) + tcp_random(); 63677c478bd9Sstevel@tonic-gate tcp->tcp_valid_bits = TCP_ISS_VALID; 63687c478bd9Sstevel@tonic-gate tcp->tcp_fss = tcp->tcp_iss - 1; 63697c478bd9Sstevel@tonic-gate tcp->tcp_suna = tcp->tcp_iss; 63707c478bd9Sstevel@tonic-gate tcp->tcp_snxt = tcp->tcp_iss + 1; 63717c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 63727c478bd9Sstevel@tonic-gate tcp->tcp_csuna = tcp->tcp_snxt; 63737c478bd9Sstevel@tonic-gate } 63747c478bd9Sstevel@tonic-gate 63757c478bd9Sstevel@tonic-gate /* 63767c478bd9Sstevel@tonic-gate * Diagnostic routine used to return a string associated with the tcp state. 63777c478bd9Sstevel@tonic-gate * Note that if the caller does not supply a buffer, it will use an internal 63787c478bd9Sstevel@tonic-gate * static string. This means that if multiple threads call this function at 63797c478bd9Sstevel@tonic-gate * the same time, output can be corrupted... Note also that this function 63807c478bd9Sstevel@tonic-gate * does not check the size of the supplied buffer. The caller has to make 63817c478bd9Sstevel@tonic-gate * sure that it is big enough. 63827c478bd9Sstevel@tonic-gate */ 63837c478bd9Sstevel@tonic-gate static char * 63847c478bd9Sstevel@tonic-gate tcp_display(tcp_t *tcp, char *sup_buf, char format) 63857c478bd9Sstevel@tonic-gate { 63867c478bd9Sstevel@tonic-gate char buf1[30]; 63877c478bd9Sstevel@tonic-gate static char priv_buf[INET_ADDRSTRLEN * 2 + 80]; 63887c478bd9Sstevel@tonic-gate char *buf; 63897c478bd9Sstevel@tonic-gate char *cp; 63907c478bd9Sstevel@tonic-gate char local_addrbuf[INET_ADDRSTRLEN]; 63917c478bd9Sstevel@tonic-gate char remote_addrbuf[INET_ADDRSTRLEN]; 63927c478bd9Sstevel@tonic-gate struct in_addr addr; 63937c478bd9Sstevel@tonic-gate 63947c478bd9Sstevel@tonic-gate if (sup_buf != NULL) 63957c478bd9Sstevel@tonic-gate buf = sup_buf; 63967c478bd9Sstevel@tonic-gate else 63977c478bd9Sstevel@tonic-gate buf = priv_buf; 63987c478bd9Sstevel@tonic-gate 63997c478bd9Sstevel@tonic-gate if (tcp == NULL) 64007c478bd9Sstevel@tonic-gate return ("NULL_TCP"); 64017c478bd9Sstevel@tonic-gate switch (tcp->tcp_state) { 64027c478bd9Sstevel@tonic-gate case TCPS_CLOSED: 64037c478bd9Sstevel@tonic-gate cp = "TCP_CLOSED"; 64047c478bd9Sstevel@tonic-gate break; 64057c478bd9Sstevel@tonic-gate case TCPS_IDLE: 64067c478bd9Sstevel@tonic-gate cp = "TCP_IDLE"; 64077c478bd9Sstevel@tonic-gate break; 64087c478bd9Sstevel@tonic-gate case TCPS_BOUND: 64097c478bd9Sstevel@tonic-gate cp = "TCP_BOUND"; 64107c478bd9Sstevel@tonic-gate break; 64117c478bd9Sstevel@tonic-gate case TCPS_LISTEN: 64127c478bd9Sstevel@tonic-gate cp = "TCP_LISTEN"; 64137c478bd9Sstevel@tonic-gate break; 64147c478bd9Sstevel@tonic-gate case TCPS_SYN_SENT: 64157c478bd9Sstevel@tonic-gate cp = "TCP_SYN_SENT"; 64167c478bd9Sstevel@tonic-gate break; 64177c478bd9Sstevel@tonic-gate case TCPS_SYN_RCVD: 64187c478bd9Sstevel@tonic-gate cp = "TCP_SYN_RCVD"; 64197c478bd9Sstevel@tonic-gate break; 64207c478bd9Sstevel@tonic-gate case TCPS_ESTABLISHED: 64217c478bd9Sstevel@tonic-gate cp = "TCP_ESTABLISHED"; 64227c478bd9Sstevel@tonic-gate break; 64237c478bd9Sstevel@tonic-gate case TCPS_CLOSE_WAIT: 64247c478bd9Sstevel@tonic-gate cp = "TCP_CLOSE_WAIT"; 64257c478bd9Sstevel@tonic-gate break; 64267c478bd9Sstevel@tonic-gate case TCPS_FIN_WAIT_1: 64277c478bd9Sstevel@tonic-gate cp = "TCP_FIN_WAIT_1"; 64287c478bd9Sstevel@tonic-gate break; 64297c478bd9Sstevel@tonic-gate case TCPS_CLOSING: 64307c478bd9Sstevel@tonic-gate cp = "TCP_CLOSING"; 64317c478bd9Sstevel@tonic-gate break; 64327c478bd9Sstevel@tonic-gate case TCPS_LAST_ACK: 64337c478bd9Sstevel@tonic-gate cp = "TCP_LAST_ACK"; 64347c478bd9Sstevel@tonic-gate break; 64357c478bd9Sstevel@tonic-gate case TCPS_FIN_WAIT_2: 64367c478bd9Sstevel@tonic-gate cp = "TCP_FIN_WAIT_2"; 64377c478bd9Sstevel@tonic-gate break; 64387c478bd9Sstevel@tonic-gate case TCPS_TIME_WAIT: 64397c478bd9Sstevel@tonic-gate cp = "TCP_TIME_WAIT"; 64407c478bd9Sstevel@tonic-gate break; 64417c478bd9Sstevel@tonic-gate default: 64427c478bd9Sstevel@tonic-gate (void) sprintf(buf1, "TCPUnkState(%d)", tcp->tcp_state); 64437c478bd9Sstevel@tonic-gate cp = buf1; 64447c478bd9Sstevel@tonic-gate break; 64457c478bd9Sstevel@tonic-gate } 64467c478bd9Sstevel@tonic-gate switch (format) { 64477c478bd9Sstevel@tonic-gate case DISP_ADDR_AND_PORT: 64487c478bd9Sstevel@tonic-gate /* 64497c478bd9Sstevel@tonic-gate * Note that we use the remote address in the tcp_b 64507c478bd9Sstevel@tonic-gate * structure. This means that it will print out 64517c478bd9Sstevel@tonic-gate * the real destination address, not the next hop's 64527c478bd9Sstevel@tonic-gate * address if source routing is used. 64537c478bd9Sstevel@tonic-gate */ 64547c478bd9Sstevel@tonic-gate addr.s_addr = tcp->tcp_bound_source; 64557c478bd9Sstevel@tonic-gate bcopy(inet_ntoa(addr), local_addrbuf, sizeof (local_addrbuf)); 64567c478bd9Sstevel@tonic-gate addr.s_addr = tcp->tcp_remote; 64577c478bd9Sstevel@tonic-gate bcopy(inet_ntoa(addr), remote_addrbuf, sizeof (remote_addrbuf)); 64587c478bd9Sstevel@tonic-gate (void) snprintf(buf, sizeof (priv_buf), "[%s.%u, %s.%u] %s", 64597c478bd9Sstevel@tonic-gate local_addrbuf, ntohs(tcp->tcp_lport), remote_addrbuf, 64607c478bd9Sstevel@tonic-gate ntohs(tcp->tcp_fport), cp); 64617c478bd9Sstevel@tonic-gate break; 64627c478bd9Sstevel@tonic-gate case DISP_PORT_ONLY: 64637c478bd9Sstevel@tonic-gate default: 64647c478bd9Sstevel@tonic-gate (void) snprintf(buf, sizeof (priv_buf), "[%u, %u] %s", 64657c478bd9Sstevel@tonic-gate ntohs(tcp->tcp_lport), ntohs(tcp->tcp_fport), cp); 64667c478bd9Sstevel@tonic-gate break; 64677c478bd9Sstevel@tonic-gate } 64687c478bd9Sstevel@tonic-gate 64697c478bd9Sstevel@tonic-gate return (buf); 64707c478bd9Sstevel@tonic-gate } 64717c478bd9Sstevel@tonic-gate 64727c478bd9Sstevel@tonic-gate /* 64737c478bd9Sstevel@tonic-gate * Add a new piece to the tcp reassembly queue. If the gap at the beginning 64747c478bd9Sstevel@tonic-gate * is filled, return as much as we can. The message passed in may be 64757c478bd9Sstevel@tonic-gate * multi-part, chained using b_cont. "start" is the starting sequence 64767c478bd9Sstevel@tonic-gate * number for this piece. 64777c478bd9Sstevel@tonic-gate */ 64787c478bd9Sstevel@tonic-gate static mblk_t * 64797c478bd9Sstevel@tonic-gate tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start) 64807c478bd9Sstevel@tonic-gate { 64817c478bd9Sstevel@tonic-gate uint32_t end; 64827c478bd9Sstevel@tonic-gate mblk_t *mp1; 64837c478bd9Sstevel@tonic-gate mblk_t *mp2; 64847c478bd9Sstevel@tonic-gate mblk_t *next_mp; 64857c478bd9Sstevel@tonic-gate uint32_t u1; 64867c478bd9Sstevel@tonic-gate 64877c478bd9Sstevel@tonic-gate /* Walk through all the new pieces. */ 64887c478bd9Sstevel@tonic-gate do { 64897c478bd9Sstevel@tonic-gate assert((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 64907c478bd9Sstevel@tonic-gate (uintptr_t)INT_MAX); 64917c478bd9Sstevel@tonic-gate end = start + (int)(mp->b_wptr - mp->b_rptr); 64927c478bd9Sstevel@tonic-gate next_mp = mp->b_cont; 64937c478bd9Sstevel@tonic-gate if (start == end) { 64947c478bd9Sstevel@tonic-gate /* Empty. Blast it. */ 64957c478bd9Sstevel@tonic-gate freeb(mp); 64967c478bd9Sstevel@tonic-gate continue; 64977c478bd9Sstevel@tonic-gate } 64987c478bd9Sstevel@tonic-gate mp->b_cont = NULL; 64997c478bd9Sstevel@tonic-gate TCP_REASS_SET_SEQ(mp, start); 65007c478bd9Sstevel@tonic-gate TCP_REASS_SET_END(mp, end); 65017c478bd9Sstevel@tonic-gate mp1 = tcp->tcp_reass_tail; 65027c478bd9Sstevel@tonic-gate if (!mp1) { 65037c478bd9Sstevel@tonic-gate tcp->tcp_reass_tail = mp; 65047c478bd9Sstevel@tonic-gate tcp->tcp_reass_head = mp; 65057c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInDataUnorderSegs); 65067c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpInDataUnorderBytes, end - start); 65077c478bd9Sstevel@tonic-gate continue; 65087c478bd9Sstevel@tonic-gate } 65097c478bd9Sstevel@tonic-gate /* New stuff completely beyond tail? */ 65107c478bd9Sstevel@tonic-gate if (SEQ_GEQ(start, TCP_REASS_END(mp1))) { 65117c478bd9Sstevel@tonic-gate /* Link it on end. */ 65127c478bd9Sstevel@tonic-gate mp1->b_cont = mp; 65137c478bd9Sstevel@tonic-gate tcp->tcp_reass_tail = mp; 65147c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInDataUnorderSegs); 65157c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpInDataUnorderBytes, end - start); 65167c478bd9Sstevel@tonic-gate continue; 65177c478bd9Sstevel@tonic-gate } 65187c478bd9Sstevel@tonic-gate mp1 = tcp->tcp_reass_head; 65197c478bd9Sstevel@tonic-gate u1 = TCP_REASS_SEQ(mp1); 65207c478bd9Sstevel@tonic-gate /* New stuff at the front? */ 65217c478bd9Sstevel@tonic-gate if (SEQ_LT(start, u1)) { 65227c478bd9Sstevel@tonic-gate /* Yes... Check for overlap. */ 65237c478bd9Sstevel@tonic-gate mp->b_cont = mp1; 65247c478bd9Sstevel@tonic-gate tcp->tcp_reass_head = mp; 65257c478bd9Sstevel@tonic-gate tcp_reass_elim_overlap(tcp, mp); 65267c478bd9Sstevel@tonic-gate continue; 65277c478bd9Sstevel@tonic-gate } 65287c478bd9Sstevel@tonic-gate /* 65297c478bd9Sstevel@tonic-gate * The new piece fits somewhere between the head and tail. 65307c478bd9Sstevel@tonic-gate * We find our slot, where mp1 precedes us and mp2 trails. 65317c478bd9Sstevel@tonic-gate */ 65327c478bd9Sstevel@tonic-gate for (; (mp2 = mp1->b_cont) != NULL; mp1 = mp2) { 65337c478bd9Sstevel@tonic-gate u1 = TCP_REASS_SEQ(mp2); 65347c478bd9Sstevel@tonic-gate if (SEQ_LEQ(start, u1)) 65357c478bd9Sstevel@tonic-gate break; 65367c478bd9Sstevel@tonic-gate } 65377c478bd9Sstevel@tonic-gate /* Link ourselves in */ 65387c478bd9Sstevel@tonic-gate mp->b_cont = mp2; 65397c478bd9Sstevel@tonic-gate mp1->b_cont = mp; 65407c478bd9Sstevel@tonic-gate 65417c478bd9Sstevel@tonic-gate /* Trim overlap with following mblk(s) first */ 65427c478bd9Sstevel@tonic-gate tcp_reass_elim_overlap(tcp, mp); 65437c478bd9Sstevel@tonic-gate 65447c478bd9Sstevel@tonic-gate /* Trim overlap with preceding mblk */ 65457c478bd9Sstevel@tonic-gate tcp_reass_elim_overlap(tcp, mp1); 65467c478bd9Sstevel@tonic-gate 65477c478bd9Sstevel@tonic-gate } while (start = end, mp = next_mp); 65487c478bd9Sstevel@tonic-gate mp1 = tcp->tcp_reass_head; 65497c478bd9Sstevel@tonic-gate /* Anything ready to go? */ 65507c478bd9Sstevel@tonic-gate if (TCP_REASS_SEQ(mp1) != tcp->tcp_rnxt) 65517c478bd9Sstevel@tonic-gate return (NULL); 65527c478bd9Sstevel@tonic-gate /* Eat what we can off the queue */ 65537c478bd9Sstevel@tonic-gate for (;;) { 65547c478bd9Sstevel@tonic-gate mp = mp1->b_cont; 65557c478bd9Sstevel@tonic-gate end = TCP_REASS_END(mp1); 65567c478bd9Sstevel@tonic-gate TCP_REASS_SET_SEQ(mp1, 0); 65577c478bd9Sstevel@tonic-gate TCP_REASS_SET_END(mp1, 0); 65587c478bd9Sstevel@tonic-gate if (!mp) { 65597c478bd9Sstevel@tonic-gate tcp->tcp_reass_tail = NULL; 65607c478bd9Sstevel@tonic-gate break; 65617c478bd9Sstevel@tonic-gate } 65627c478bd9Sstevel@tonic-gate if (end != TCP_REASS_SEQ(mp)) { 65637c478bd9Sstevel@tonic-gate mp1->b_cont = NULL; 65647c478bd9Sstevel@tonic-gate break; 65657c478bd9Sstevel@tonic-gate } 65667c478bd9Sstevel@tonic-gate mp1 = mp; 65677c478bd9Sstevel@tonic-gate } 65687c478bd9Sstevel@tonic-gate mp1 = tcp->tcp_reass_head; 65697c478bd9Sstevel@tonic-gate tcp->tcp_reass_head = mp; 65707c478bd9Sstevel@tonic-gate return (mp1); 65717c478bd9Sstevel@tonic-gate } 65727c478bd9Sstevel@tonic-gate 65737c478bd9Sstevel@tonic-gate /* Eliminate any overlap that mp may have over later mblks */ 65747c478bd9Sstevel@tonic-gate static void 65757c478bd9Sstevel@tonic-gate tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp) 65767c478bd9Sstevel@tonic-gate { 65777c478bd9Sstevel@tonic-gate uint32_t end; 65787c478bd9Sstevel@tonic-gate mblk_t *mp1; 65797c478bd9Sstevel@tonic-gate uint32_t u1; 65807c478bd9Sstevel@tonic-gate 65817c478bd9Sstevel@tonic-gate end = TCP_REASS_END(mp); 65827c478bd9Sstevel@tonic-gate while ((mp1 = mp->b_cont) != NULL) { 65837c478bd9Sstevel@tonic-gate u1 = TCP_REASS_SEQ(mp1); 65847c478bd9Sstevel@tonic-gate if (!SEQ_GT(end, u1)) 65857c478bd9Sstevel@tonic-gate break; 65867c478bd9Sstevel@tonic-gate if (!SEQ_GEQ(end, TCP_REASS_END(mp1))) { 65877c478bd9Sstevel@tonic-gate mp->b_wptr -= end - u1; 65887c478bd9Sstevel@tonic-gate TCP_REASS_SET_END(mp, u1); 65897c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInDataPartDupSegs); 65907c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpInDataPartDupBytes, end - u1); 65917c478bd9Sstevel@tonic-gate break; 65927c478bd9Sstevel@tonic-gate } 65937c478bd9Sstevel@tonic-gate mp->b_cont = mp1->b_cont; 65947c478bd9Sstevel@tonic-gate freeb(mp1); 65957c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpInDataDupSegs); 65967c478bd9Sstevel@tonic-gate UPDATE_MIB(tcp_mib.tcpInDataDupBytes, end - u1); 65977c478bd9Sstevel@tonic-gate } 65987c478bd9Sstevel@tonic-gate if (!mp1) 65997c478bd9Sstevel@tonic-gate tcp->tcp_reass_tail = mp; 66007c478bd9Sstevel@tonic-gate } 66017c478bd9Sstevel@tonic-gate 66027c478bd9Sstevel@tonic-gate /* 66037c478bd9Sstevel@tonic-gate * Remove a connection from the list of detached TIME_WAIT connections. 66047c478bd9Sstevel@tonic-gate */ 66057c478bd9Sstevel@tonic-gate static void 66067c478bd9Sstevel@tonic-gate tcp_time_wait_remove(tcp_t *tcp) 66077c478bd9Sstevel@tonic-gate { 66087c478bd9Sstevel@tonic-gate if (tcp->tcp_time_wait_expire == 0) { 66097c478bd9Sstevel@tonic-gate assert(tcp->tcp_time_wait_next == NULL); 66107c478bd9Sstevel@tonic-gate assert(tcp->tcp_time_wait_prev == NULL); 66117c478bd9Sstevel@tonic-gate return; 66127c478bd9Sstevel@tonic-gate } 66137c478bd9Sstevel@tonic-gate assert(tcp->tcp_state == TCPS_TIME_WAIT); 66147c478bd9Sstevel@tonic-gate if (tcp == tcp_time_wait_head) { 66157c478bd9Sstevel@tonic-gate assert(tcp->tcp_time_wait_prev == NULL); 66167c478bd9Sstevel@tonic-gate tcp_time_wait_head = tcp->tcp_time_wait_next; 66177c478bd9Sstevel@tonic-gate if (tcp_time_wait_head != NULL) { 66187c478bd9Sstevel@tonic-gate tcp_time_wait_head->tcp_time_wait_prev = NULL; 66197c478bd9Sstevel@tonic-gate } else { 66207c478bd9Sstevel@tonic-gate tcp_time_wait_tail = NULL; 66217c478bd9Sstevel@tonic-gate } 66227c478bd9Sstevel@tonic-gate } else if (tcp == tcp_time_wait_tail) { 66237c478bd9Sstevel@tonic-gate assert(tcp != tcp_time_wait_head); 66247c478bd9Sstevel@tonic-gate assert(tcp->tcp_time_wait_next == NULL); 66257c478bd9Sstevel@tonic-gate tcp_time_wait_tail = tcp->tcp_time_wait_prev; 66267c478bd9Sstevel@tonic-gate assert(tcp_time_wait_tail != NULL); 66277c478bd9Sstevel@tonic-gate tcp_time_wait_tail->tcp_time_wait_next = NULL; 66287c478bd9Sstevel@tonic-gate } else { 66297c478bd9Sstevel@tonic-gate assert(tcp->tcp_time_wait_prev->tcp_time_wait_next == tcp); 66307c478bd9Sstevel@tonic-gate assert(tcp->tcp_time_wait_next->tcp_time_wait_prev == tcp); 66317c478bd9Sstevel@tonic-gate tcp->tcp_time_wait_prev->tcp_time_wait_next = 66327c478bd9Sstevel@tonic-gate tcp->tcp_time_wait_next; 66337c478bd9Sstevel@tonic-gate tcp->tcp_time_wait_next->tcp_time_wait_prev = 66347c478bd9Sstevel@tonic-gate tcp->tcp_time_wait_prev; 66357c478bd9Sstevel@tonic-gate } 66367c478bd9Sstevel@tonic-gate tcp->tcp_time_wait_next = NULL; 66377c478bd9Sstevel@tonic-gate tcp->tcp_time_wait_prev = NULL; 66387c478bd9Sstevel@tonic-gate tcp->tcp_time_wait_expire = 0; 66397c478bd9Sstevel@tonic-gate } 66407c478bd9Sstevel@tonic-gate 66417c478bd9Sstevel@tonic-gate /* 66427c478bd9Sstevel@tonic-gate * Add a connection to the list of detached TIME_WAIT connections 66437c478bd9Sstevel@tonic-gate * and set its time to expire ... 66447c478bd9Sstevel@tonic-gate */ 66457c478bd9Sstevel@tonic-gate static void 66467c478bd9Sstevel@tonic-gate tcp_time_wait_append(tcp_t *tcp) 66477c478bd9Sstevel@tonic-gate { 66487c478bd9Sstevel@tonic-gate tcp->tcp_time_wait_expire = prom_gettime() + tcp_time_wait_interval; 66497c478bd9Sstevel@tonic-gate if (tcp->tcp_time_wait_expire == 0) 66507c478bd9Sstevel@tonic-gate tcp->tcp_time_wait_expire = 1; 66517c478bd9Sstevel@tonic-gate 66527c478bd9Sstevel@tonic-gate if (tcp_time_wait_head == NULL) { 66537c478bd9Sstevel@tonic-gate assert(tcp_time_wait_tail == NULL); 66547c478bd9Sstevel@tonic-gate tcp_time_wait_head = tcp; 66557c478bd9Sstevel@tonic-gate } else { 66567c478bd9Sstevel@tonic-gate assert(tcp_time_wait_tail != NULL); 66577c478bd9Sstevel@tonic-gate assert(tcp_time_wait_tail->tcp_state == TCPS_TIME_WAIT); 66587c478bd9Sstevel@tonic-gate tcp_time_wait_tail->tcp_time_wait_next = tcp; 66597c478bd9Sstevel@tonic-gate tcp->tcp_time_wait_prev = tcp_time_wait_tail; 66607c478bd9Sstevel@tonic-gate } 66617c478bd9Sstevel@tonic-gate tcp_time_wait_tail = tcp; 66627c478bd9Sstevel@tonic-gate 66637c478bd9Sstevel@tonic-gate /* for ndd stats about compression */ 66647c478bd9Sstevel@tonic-gate tcp_cum_timewait++; 66657c478bd9Sstevel@tonic-gate } 66667c478bd9Sstevel@tonic-gate 66677c478bd9Sstevel@tonic-gate /* 66687c478bd9Sstevel@tonic-gate * Periodic qtimeout routine run on the default queue. 66697c478bd9Sstevel@tonic-gate * Performs 2 functions. 66707c478bd9Sstevel@tonic-gate * 1. Does TIME_WAIT compression on all recently added tcps. List 66717c478bd9Sstevel@tonic-gate * traversal is done backwards from the tail. 66727c478bd9Sstevel@tonic-gate * 2. Blows away all tcps whose TIME_WAIT has expired. List traversal 66737c478bd9Sstevel@tonic-gate * is done forwards from the head. 66747c478bd9Sstevel@tonic-gate */ 66757c478bd9Sstevel@tonic-gate void 66767c478bd9Sstevel@tonic-gate tcp_time_wait_collector(void) 66777c478bd9Sstevel@tonic-gate { 66787c478bd9Sstevel@tonic-gate tcp_t *tcp; 66797c478bd9Sstevel@tonic-gate uint32_t now; 66807c478bd9Sstevel@tonic-gate 66817c478bd9Sstevel@tonic-gate /* 66827c478bd9Sstevel@tonic-gate * In order to reap time waits reliably, we should use a 66837c478bd9Sstevel@tonic-gate * source of time that is not adjustable by the user 66847c478bd9Sstevel@tonic-gate */ 66857c478bd9Sstevel@tonic-gate now = prom_gettime(); 66867c478bd9Sstevel@tonic-gate while ((tcp = tcp_time_wait_head) != NULL) { 66877c478bd9Sstevel@tonic-gate /* 66887c478bd9Sstevel@tonic-gate * Compare times using modular arithmetic, since 66897c478bd9Sstevel@tonic-gate * lbolt can wrapover. 66907c478bd9Sstevel@tonic-gate */ 66917c478bd9Sstevel@tonic-gate if ((int32_t)(now - tcp->tcp_time_wait_expire) < 0) { 66927c478bd9Sstevel@tonic-gate break; 66937c478bd9Sstevel@tonic-gate } 66947c478bd9Sstevel@tonic-gate /* 66957c478bd9Sstevel@tonic-gate * Note that the err must be 0 as there is no socket 66967c478bd9Sstevel@tonic-gate * associated with this TCP... 66977c478bd9Sstevel@tonic-gate */ 66987c478bd9Sstevel@tonic-gate (void) tcp_clean_death(-1, tcp, 0); 66997c478bd9Sstevel@tonic-gate } 67007c478bd9Sstevel@tonic-gate /* Schedule next run time. */ 67017c478bd9Sstevel@tonic-gate tcp_time_wait_runtime = prom_gettime() + 10000; 67027c478bd9Sstevel@tonic-gate } 67037c478bd9Sstevel@tonic-gate 67047c478bd9Sstevel@tonic-gate void 67057c478bd9Sstevel@tonic-gate tcp_time_wait_report(void) 67067c478bd9Sstevel@tonic-gate { 67077c478bd9Sstevel@tonic-gate tcp_t *tcp; 67087c478bd9Sstevel@tonic-gate 67097c478bd9Sstevel@tonic-gate printf("Current time %u\n", prom_gettime()); 67107c478bd9Sstevel@tonic-gate for (tcp = tcp_time_wait_head; tcp != NULL; 67117c478bd9Sstevel@tonic-gate tcp = tcp->tcp_time_wait_next) { 67127c478bd9Sstevel@tonic-gate printf("%s expires at %u\n", tcp_display(tcp, NULL, 67137c478bd9Sstevel@tonic-gate DISP_ADDR_AND_PORT), tcp->tcp_time_wait_expire); 67147c478bd9Sstevel@tonic-gate } 67157c478bd9Sstevel@tonic-gate } 67167c478bd9Sstevel@tonic-gate 67177c478bd9Sstevel@tonic-gate /* 67187c478bd9Sstevel@tonic-gate * Send up all messages queued on tcp_rcv_list. 67197c478bd9Sstevel@tonic-gate * Have to set tcp_co_norm since we use putnext. 67207c478bd9Sstevel@tonic-gate */ 67217c478bd9Sstevel@tonic-gate static void 67227c478bd9Sstevel@tonic-gate tcp_rcv_drain(int sock_id, tcp_t *tcp) 67237c478bd9Sstevel@tonic-gate { 67247c478bd9Sstevel@tonic-gate mblk_t *mp; 67257c478bd9Sstevel@tonic-gate struct inetgram *in_gram; 67267c478bd9Sstevel@tonic-gate mblk_t *in_mp; 67277c478bd9Sstevel@tonic-gate int len; 67287c478bd9Sstevel@tonic-gate 67297c478bd9Sstevel@tonic-gate /* Don't drain if the app has not finished reading all the data. */ 67307c478bd9Sstevel@tonic-gate if (sockets[sock_id].so_rcvbuf <= 0) 67317c478bd9Sstevel@tonic-gate return; 67327c478bd9Sstevel@tonic-gate 67337c478bd9Sstevel@tonic-gate /* We might have come here just to updated the rwnd */ 67347c478bd9Sstevel@tonic-gate if (tcp->tcp_rcv_list == NULL) 67357c478bd9Sstevel@tonic-gate goto win_update; 67367c478bd9Sstevel@tonic-gate 67377c478bd9Sstevel@tonic-gate if ((in_gram = (struct inetgram *)bkmem_zalloc( 67387c478bd9Sstevel@tonic-gate sizeof (struct inetgram))) == NULL) { 67397c478bd9Sstevel@tonic-gate return; 67407c478bd9Sstevel@tonic-gate } 67417c478bd9Sstevel@tonic-gate if ((in_mp = allocb(tcp->tcp_rcv_cnt, 0)) == NULL) { 67427c478bd9Sstevel@tonic-gate bkmem_free((caddr_t)in_gram, sizeof (struct inetgram)); 67437c478bd9Sstevel@tonic-gate return; 67447c478bd9Sstevel@tonic-gate } 67457c478bd9Sstevel@tonic-gate in_gram->igm_level = APP_LVL; 67467c478bd9Sstevel@tonic-gate in_gram->igm_mp = in_mp; 67477c478bd9Sstevel@tonic-gate in_gram->igm_id = 0; 67487c478bd9Sstevel@tonic-gate 67497c478bd9Sstevel@tonic-gate while ((mp = tcp->tcp_rcv_list) != NULL) { 67507c478bd9Sstevel@tonic-gate tcp->tcp_rcv_list = mp->b_cont; 67517c478bd9Sstevel@tonic-gate len = mp->b_wptr - mp->b_rptr; 67527c478bd9Sstevel@tonic-gate bcopy(mp->b_rptr, in_mp->b_wptr, len); 67537c478bd9Sstevel@tonic-gate in_mp->b_wptr += len; 67547c478bd9Sstevel@tonic-gate freeb(mp); 67557c478bd9Sstevel@tonic-gate } 67567c478bd9Sstevel@tonic-gate 67577c478bd9Sstevel@tonic-gate tcp->tcp_rcv_last_tail = NULL; 67587c478bd9Sstevel@tonic-gate tcp->tcp_rcv_cnt = 0; 67597c478bd9Sstevel@tonic-gate add_grams(&sockets[sock_id].inq, in_gram); 67607c478bd9Sstevel@tonic-gate 67617c478bd9Sstevel@tonic-gate /* This means that so_rcvbuf can be less than 0. */ 67627c478bd9Sstevel@tonic-gate sockets[sock_id].so_rcvbuf -= in_mp->b_wptr - in_mp->b_rptr; 67637c478bd9Sstevel@tonic-gate win_update: 67647c478bd9Sstevel@tonic-gate /* 67657c478bd9Sstevel@tonic-gate * Increase the receive window to max. But we need to do receiver 67667c478bd9Sstevel@tonic-gate * SWS avoidance. This means that we need to check the increase of 67677c478bd9Sstevel@tonic-gate * of receive window is at least 1 MSS. 67687c478bd9Sstevel@tonic-gate */ 67697c478bd9Sstevel@tonic-gate if (sockets[sock_id].so_rcvbuf > 0 && 67707c478bd9Sstevel@tonic-gate (tcp->tcp_rwnd_max - tcp->tcp_rwnd >= tcp->tcp_mss)) { 67717c478bd9Sstevel@tonic-gate tcp->tcp_rwnd = tcp->tcp_rwnd_max; 67727c478bd9Sstevel@tonic-gate U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 67737c478bd9Sstevel@tonic-gate tcp->tcp_tcph->th_win); 67747c478bd9Sstevel@tonic-gate } 67757c478bd9Sstevel@tonic-gate } 67767c478bd9Sstevel@tonic-gate 67777c478bd9Sstevel@tonic-gate /* 67787c478bd9Sstevel@tonic-gate * Wrapper for recvfrom to call 67797c478bd9Sstevel@tonic-gate */ 67807c478bd9Sstevel@tonic-gate void 67817c478bd9Sstevel@tonic-gate tcp_rcv_drain_sock(int sock_id) 67827c478bd9Sstevel@tonic-gate { 67837c478bd9Sstevel@tonic-gate tcp_t *tcp; 67847c478bd9Sstevel@tonic-gate if ((tcp = sockets[sock_id].pcb) == NULL) 67857c478bd9Sstevel@tonic-gate return; 67867c478bd9Sstevel@tonic-gate tcp_rcv_drain(sock_id, tcp); 67877c478bd9Sstevel@tonic-gate } 67887c478bd9Sstevel@tonic-gate 67897c478bd9Sstevel@tonic-gate /* 67907c478bd9Sstevel@tonic-gate * If the inq == NULL and the tcp_rcv_list != NULL, we have data that 67917c478bd9Sstevel@tonic-gate * recvfrom could read. Place a magic message in the inq to let recvfrom 67927c478bd9Sstevel@tonic-gate * know that it needs to call tcp_rcv_drain_sock to pullup the data. 67937c478bd9Sstevel@tonic-gate */ 67947c478bd9Sstevel@tonic-gate static void 67957c478bd9Sstevel@tonic-gate tcp_drain_needed(int sock_id, tcp_t *tcp) 67967c478bd9Sstevel@tonic-gate { 67977c478bd9Sstevel@tonic-gate struct inetgram *in_gram; 67987c478bd9Sstevel@tonic-gate #ifdef DEBUG 67997c478bd9Sstevel@tonic-gate printf("tcp_drain_needed: inq %x, tcp_rcv_list %x\n", 68007c478bd9Sstevel@tonic-gate sockets[sock_id].inq, tcp->tcp_rcv_list); 68017c478bd9Sstevel@tonic-gate #endif 68027c478bd9Sstevel@tonic-gate if ((sockets[sock_id].inq != NULL) || 68037c478bd9Sstevel@tonic-gate (tcp->tcp_rcv_list == NULL)) 68047c478bd9Sstevel@tonic-gate return; 68057c478bd9Sstevel@tonic-gate 68067c478bd9Sstevel@tonic-gate if ((in_gram = (struct inetgram *)bkmem_zalloc( 68077c478bd9Sstevel@tonic-gate sizeof (struct inetgram))) == NULL) 68087c478bd9Sstevel@tonic-gate return; 68097c478bd9Sstevel@tonic-gate 68107c478bd9Sstevel@tonic-gate in_gram->igm_level = APP_LVL; 68117c478bd9Sstevel@tonic-gate in_gram->igm_mp = NULL; 68127c478bd9Sstevel@tonic-gate in_gram->igm_id = TCP_CALLB_MAGIC_ID; 68137c478bd9Sstevel@tonic-gate 68147c478bd9Sstevel@tonic-gate add_grams(&sockets[sock_id].inq, in_gram); 68157c478bd9Sstevel@tonic-gate } 68167c478bd9Sstevel@tonic-gate 68177c478bd9Sstevel@tonic-gate /* 68187c478bd9Sstevel@tonic-gate * Queue data on tcp_rcv_list which is a b_next chain. 68197c478bd9Sstevel@tonic-gate * Each element of the chain is a b_cont chain. 68207c478bd9Sstevel@tonic-gate * 68217c478bd9Sstevel@tonic-gate * M_DATA messages are added to the current element. 68227c478bd9Sstevel@tonic-gate * Other messages are added as new (b_next) elements. 68237c478bd9Sstevel@tonic-gate */ 68247c478bd9Sstevel@tonic-gate static void 68257c478bd9Sstevel@tonic-gate tcp_rcv_enqueue(tcp_t *tcp, mblk_t *mp, uint_t seg_len) 68267c478bd9Sstevel@tonic-gate { 68277c478bd9Sstevel@tonic-gate assert(seg_len == msgdsize(mp)); 68287c478bd9Sstevel@tonic-gate if (tcp->tcp_rcv_list == NULL) { 68297c478bd9Sstevel@tonic-gate tcp->tcp_rcv_list = mp; 68307c478bd9Sstevel@tonic-gate } else { 68317c478bd9Sstevel@tonic-gate tcp->tcp_rcv_last_tail->b_cont = mp; 68327c478bd9Sstevel@tonic-gate } 68337c478bd9Sstevel@tonic-gate while (mp->b_cont) 68347c478bd9Sstevel@tonic-gate mp = mp->b_cont; 68357c478bd9Sstevel@tonic-gate tcp->tcp_rcv_last_tail = mp; 68367c478bd9Sstevel@tonic-gate tcp->tcp_rcv_cnt += seg_len; 68377c478bd9Sstevel@tonic-gate tcp->tcp_rwnd -= seg_len; 68387c478bd9Sstevel@tonic-gate #ifdef DEBUG 68397c478bd9Sstevel@tonic-gate printf("tcp_rcv_enqueue rwnd %d\n", tcp->tcp_rwnd); 68407c478bd9Sstevel@tonic-gate #endif 68417c478bd9Sstevel@tonic-gate U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, tcp->tcp_tcph->th_win); 68427c478bd9Sstevel@tonic-gate } 68437c478bd9Sstevel@tonic-gate 68447c478bd9Sstevel@tonic-gate /* The minimum of smoothed mean deviation in RTO calculation. */ 68457c478bd9Sstevel@tonic-gate #define TCP_SD_MIN 400 68467c478bd9Sstevel@tonic-gate 68477c478bd9Sstevel@tonic-gate /* 68487c478bd9Sstevel@tonic-gate * Set RTO for this connection. The formula is from Jacobson and Karels' 68497c478bd9Sstevel@tonic-gate * "Congestion Avoidance and Control" in SIGCOMM '88. The variable names 68507c478bd9Sstevel@tonic-gate * are the same as those in Appendix A.2 of that paper. 68517c478bd9Sstevel@tonic-gate * 68527c478bd9Sstevel@tonic-gate * m = new measurement 68537c478bd9Sstevel@tonic-gate * sa = smoothed RTT average (8 * average estimates). 68547c478bd9Sstevel@tonic-gate * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates). 68557c478bd9Sstevel@tonic-gate */ 68567c478bd9Sstevel@tonic-gate static void 68577c478bd9Sstevel@tonic-gate tcp_set_rto(tcp_t *tcp, int32_t rtt) 68587c478bd9Sstevel@tonic-gate { 68597c478bd9Sstevel@tonic-gate int32_t m = rtt; 68607c478bd9Sstevel@tonic-gate uint32_t sa = tcp->tcp_rtt_sa; 68617c478bd9Sstevel@tonic-gate uint32_t sv = tcp->tcp_rtt_sd; 68627c478bd9Sstevel@tonic-gate uint32_t rto; 68637c478bd9Sstevel@tonic-gate 68647c478bd9Sstevel@tonic-gate BUMP_MIB(tcp_mib.tcpRttUpdate); 68657c478bd9Sstevel@tonic-gate tcp->tcp_rtt_update++; 68667c478bd9Sstevel@tonic-gate 68677c478bd9Sstevel@tonic-gate /* tcp_rtt_sa is not 0 means this is a new sample. */ 68687c478bd9Sstevel@tonic-gate if (sa != 0) { 68697c478bd9Sstevel@tonic-gate /* 68707c478bd9Sstevel@tonic-gate * Update average estimator: 68717c478bd9Sstevel@tonic-gate * new rtt = 7/8 old rtt + 1/8 Error 68727c478bd9Sstevel@tonic-gate */ 68737c478bd9Sstevel@tonic-gate 68747c478bd9Sstevel@tonic-gate /* m is now Error in estimate. */ 68757c478bd9Sstevel@tonic-gate m -= sa >> 3; 68767c478bd9Sstevel@tonic-gate if ((int32_t)(sa += m) <= 0) { 68777c478bd9Sstevel@tonic-gate /* 68787c478bd9Sstevel@tonic-gate * Don't allow the smoothed average to be negative. 68797c478bd9Sstevel@tonic-gate * We use 0 to denote reinitialization of the 68807c478bd9Sstevel@tonic-gate * variables. 68817c478bd9Sstevel@tonic-gate */ 68827c478bd9Sstevel@tonic-gate sa = 1; 68837c478bd9Sstevel@tonic-gate } 68847c478bd9Sstevel@tonic-gate 68857c478bd9Sstevel@tonic-gate /* 68867c478bd9Sstevel@tonic-gate * Update deviation estimator: 68877c478bd9Sstevel@tonic-gate * new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev) 68887c478bd9Sstevel@tonic-gate */ 68897c478bd9Sstevel@tonic-gate if (m < 0) 68907c478bd9Sstevel@tonic-gate m = -m; 68917c478bd9Sstevel@tonic-gate m -= sv >> 2; 68927c478bd9Sstevel@tonic-gate sv += m; 68937c478bd9Sstevel@tonic-gate } else { 68947c478bd9Sstevel@tonic-gate /* 68957c478bd9Sstevel@tonic-gate * This follows BSD's implementation. So the reinitialized 68967c478bd9Sstevel@tonic-gate * RTO is 3 * m. We cannot go less than 2 because if the 68977c478bd9Sstevel@tonic-gate * link is bandwidth dominated, doubling the window size 68987c478bd9Sstevel@tonic-gate * during slow start means doubling the RTT. We want to be 68997c478bd9Sstevel@tonic-gate * more conservative when we reinitialize our estimates. 3 69007c478bd9Sstevel@tonic-gate * is just a convenient number. 69017c478bd9Sstevel@tonic-gate */ 69027c478bd9Sstevel@tonic-gate sa = m << 3; 69037c478bd9Sstevel@tonic-gate sv = m << 1; 69047c478bd9Sstevel@tonic-gate } 69057c478bd9Sstevel@tonic-gate if (sv < TCP_SD_MIN) { 69067c478bd9Sstevel@tonic-gate /* 69077c478bd9Sstevel@tonic-gate * We do not know that if sa captures the delay ACK 69087c478bd9Sstevel@tonic-gate * effect as in a long train of segments, a receiver 69097c478bd9Sstevel@tonic-gate * does not delay its ACKs. So set the minimum of sv 69107c478bd9Sstevel@tonic-gate * to be TCP_SD_MIN, which is default to 400 ms, twice 69117c478bd9Sstevel@tonic-gate * of BSD DATO. That means the minimum of mean 69127c478bd9Sstevel@tonic-gate * deviation is 100 ms. 69137c478bd9Sstevel@tonic-gate * 69147c478bd9Sstevel@tonic-gate */ 69157c478bd9Sstevel@tonic-gate sv = TCP_SD_MIN; 69167c478bd9Sstevel@tonic-gate } 69177c478bd9Sstevel@tonic-gate tcp->tcp_rtt_sa = sa; 69187c478bd9Sstevel@tonic-gate tcp->tcp_rtt_sd = sv; 69197c478bd9Sstevel@tonic-gate /* 69207c478bd9Sstevel@tonic-gate * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv) 69217c478bd9Sstevel@tonic-gate * 69227c478bd9Sstevel@tonic-gate * Add tcp_rexmit_interval extra in case of extreme environment 69237c478bd9Sstevel@tonic-gate * where the algorithm fails to work. The default value of 69247c478bd9Sstevel@tonic-gate * tcp_rexmit_interval_extra should be 0. 69257c478bd9Sstevel@tonic-gate * 69267c478bd9Sstevel@tonic-gate * As we use a finer grained clock than BSD and update 69277c478bd9Sstevel@tonic-gate * RTO for every ACKs, add in another .25 of RTT to the 69287c478bd9Sstevel@tonic-gate * deviation of RTO to accomodate burstiness of 1/4 of 69297c478bd9Sstevel@tonic-gate * window size. 69307c478bd9Sstevel@tonic-gate */ 69317c478bd9Sstevel@tonic-gate rto = (sa >> 3) + sv + tcp_rexmit_interval_extra + (sa >> 5); 69327c478bd9Sstevel@tonic-gate 69337c478bd9Sstevel@tonic-gate if (rto > tcp_rexmit_interval_max) { 69347c478bd9Sstevel@tonic-gate tcp->tcp_rto = tcp_rexmit_interval_max; 69357c478bd9Sstevel@tonic-gate } else if (rto < tcp_rexmit_interval_min) { 69367c478bd9Sstevel@tonic-gate tcp->tcp_rto = tcp_rexmit_interval_min; 69377c478bd9Sstevel@tonic-gate } else { 69387c478bd9Sstevel@tonic-gate tcp->tcp_rto = rto; 69397c478bd9Sstevel@tonic-gate } 69407c478bd9Sstevel@tonic-gate 69417c478bd9Sstevel@tonic-gate /* Now, we can reset tcp_timer_backoff to use the new RTO... */ 69427c478bd9Sstevel@tonic-gate tcp->tcp_timer_backoff = 0; 69437c478bd9Sstevel@tonic-gate } 69447c478bd9Sstevel@tonic-gate 69457c478bd9Sstevel@tonic-gate /* 69467c478bd9Sstevel@tonic-gate * Initiate closedown sequence on an active connection. 69477c478bd9Sstevel@tonic-gate * Return value zero for OK return, non-zero for error return. 69487c478bd9Sstevel@tonic-gate */ 69497c478bd9Sstevel@tonic-gate static int 69507c478bd9Sstevel@tonic-gate tcp_xmit_end(tcp_t *tcp, int sock_id) 69517c478bd9Sstevel@tonic-gate { 69527c478bd9Sstevel@tonic-gate mblk_t *mp; 69537c478bd9Sstevel@tonic-gate 69547c478bd9Sstevel@tonic-gate if (tcp->tcp_state < TCPS_SYN_RCVD || 69557c478bd9Sstevel@tonic-gate tcp->tcp_state > TCPS_CLOSE_WAIT) { 69567c478bd9Sstevel@tonic-gate /* 69577c478bd9Sstevel@tonic-gate * Invalid state, only states TCPS_SYN_RCVD, 69587c478bd9Sstevel@tonic-gate * TCPS_ESTABLISHED and TCPS_CLOSE_WAIT are valid 69597c478bd9Sstevel@tonic-gate */ 69607c478bd9Sstevel@tonic-gate return (-1); 69617c478bd9Sstevel@tonic-gate } 69627c478bd9Sstevel@tonic-gate 69637c478bd9Sstevel@tonic-gate tcp->tcp_fss = tcp->tcp_snxt + tcp->tcp_unsent; 69647c478bd9Sstevel@tonic-gate tcp->tcp_valid_bits |= TCP_FSS_VALID; 69657c478bd9Sstevel@tonic-gate /* 69667c478bd9Sstevel@tonic-gate * If there is nothing more unsent, send the FIN now. 69677c478bd9Sstevel@tonic-gate * Otherwise, it will go out with the last segment. 69687c478bd9Sstevel@tonic-gate */ 69697c478bd9Sstevel@tonic-gate if (tcp->tcp_unsent == 0) { 69707c478bd9Sstevel@tonic-gate mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, 69717c478bd9Sstevel@tonic-gate tcp->tcp_fss, B_FALSE, NULL, B_FALSE); 69727c478bd9Sstevel@tonic-gate 69737c478bd9Sstevel@tonic-gate if (mp != NULL) { 69747c478bd9Sstevel@tonic-gate /* Dump the packet when debugging. */ 69757c478bd9Sstevel@tonic-gate TCP_DUMP_PACKET("tcp_xmit_end", mp); 69767c478bd9Sstevel@tonic-gate (void) ipv4_tcp_output(sock_id, mp); 69777c478bd9Sstevel@tonic-gate freeb(mp); 69787c478bd9Sstevel@tonic-gate } else { 69797c478bd9Sstevel@tonic-gate /* 69807c478bd9Sstevel@tonic-gate * Couldn't allocate msg. Pretend we got it out. 69817c478bd9Sstevel@tonic-gate * Wait for rexmit timeout. 69827c478bd9Sstevel@tonic-gate */ 69837c478bd9Sstevel@tonic-gate tcp->tcp_snxt = tcp->tcp_fss + 1; 69847c478bd9Sstevel@tonic-gate TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 69857c478bd9Sstevel@tonic-gate } 69867c478bd9Sstevel@tonic-gate 69877c478bd9Sstevel@tonic-gate /* 69887c478bd9Sstevel@tonic-gate * If needed, update tcp_rexmit_snxt as tcp_snxt is 69897c478bd9Sstevel@tonic-gate * changed. 69907c478bd9Sstevel@tonic-gate */ 69917c478bd9Sstevel@tonic-gate if (tcp->tcp_rexmit && tcp->tcp_rexmit_nxt == tcp->tcp_fss) { 69927c478bd9Sstevel@tonic-gate tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 69937c478bd9Sstevel@tonic-gate } 69947c478bd9Sstevel@tonic-gate } else { 69957c478bd9Sstevel@tonic-gate tcp_wput_data(tcp, NULL, B_FALSE); 69967c478bd9Sstevel@tonic-gate } 69977c478bd9Sstevel@tonic-gate 69987c478bd9Sstevel@tonic-gate return (0); 69997c478bd9Sstevel@tonic-gate } 70007c478bd9Sstevel@tonic-gate 70017c478bd9Sstevel@tonic-gate int 70027c478bd9Sstevel@tonic-gate tcp_opt_set(tcp_t *tcp, int level, int option, const void *optval, 70037c478bd9Sstevel@tonic-gate socklen_t optlen) 70047c478bd9Sstevel@tonic-gate { 70057c478bd9Sstevel@tonic-gate switch (level) { 70067c478bd9Sstevel@tonic-gate case SOL_SOCKET: { 70077c478bd9Sstevel@tonic-gate switch (option) { 70087c478bd9Sstevel@tonic-gate case SO_RCVBUF: 70097c478bd9Sstevel@tonic-gate if (optlen == sizeof (int)) { 70107c478bd9Sstevel@tonic-gate int val = *(int *)optval; 70117c478bd9Sstevel@tonic-gate 70127c478bd9Sstevel@tonic-gate if (val > tcp_max_buf) { 70137c478bd9Sstevel@tonic-gate errno = ENOBUFS; 70147c478bd9Sstevel@tonic-gate break; 70157c478bd9Sstevel@tonic-gate } 70167c478bd9Sstevel@tonic-gate /* Silently ignore zero */ 70177c478bd9Sstevel@tonic-gate if (val != 0) { 70187c478bd9Sstevel@tonic-gate val = MSS_ROUNDUP(val, tcp->tcp_mss); 70197c478bd9Sstevel@tonic-gate (void) tcp_rwnd_set(tcp, val); 70207c478bd9Sstevel@tonic-gate } 70217c478bd9Sstevel@tonic-gate } else { 70227c478bd9Sstevel@tonic-gate errno = EINVAL; 70237c478bd9Sstevel@tonic-gate } 70247c478bd9Sstevel@tonic-gate break; 70257c478bd9Sstevel@tonic-gate case SO_SNDBUF: 70267c478bd9Sstevel@tonic-gate if (optlen == sizeof (int)) { 70277c478bd9Sstevel@tonic-gate tcp->tcp_xmit_hiwater = *(int *)optval; 70287c478bd9Sstevel@tonic-gate if (tcp->tcp_xmit_hiwater > tcp_max_buf) 70297c478bd9Sstevel@tonic-gate tcp->tcp_xmit_hiwater = tcp_max_buf; 70307c478bd9Sstevel@tonic-gate } else { 70317c478bd9Sstevel@tonic-gate errno = EINVAL; 70327c478bd9Sstevel@tonic-gate } 70337c478bd9Sstevel@tonic-gate break; 70347c478bd9Sstevel@tonic-gate case SO_LINGER: 70357c478bd9Sstevel@tonic-gate if (optlen == sizeof (struct linger)) { 70367c478bd9Sstevel@tonic-gate struct linger *lgr = (struct linger *)optval; 70377c478bd9Sstevel@tonic-gate 70387c478bd9Sstevel@tonic-gate if (lgr->l_onoff) { 70397c478bd9Sstevel@tonic-gate tcp->tcp_linger = 1; 70407c478bd9Sstevel@tonic-gate tcp->tcp_lingertime = lgr->l_linger; 70417c478bd9Sstevel@tonic-gate } else { 70427c478bd9Sstevel@tonic-gate tcp->tcp_linger = 0; 70437c478bd9Sstevel@tonic-gate tcp->tcp_lingertime = 0; 70447c478bd9Sstevel@tonic-gate } 70457c478bd9Sstevel@tonic-gate } else { 70467c478bd9Sstevel@tonic-gate errno = EINVAL; 70477c478bd9Sstevel@tonic-gate } 70487c478bd9Sstevel@tonic-gate break; 70497c478bd9Sstevel@tonic-gate default: 70507c478bd9Sstevel@tonic-gate errno = ENOPROTOOPT; 70517c478bd9Sstevel@tonic-gate break; 70527c478bd9Sstevel@tonic-gate } 70537c478bd9Sstevel@tonic-gate break; 70547c478bd9Sstevel@tonic-gate } /* case SOL_SOCKET */ 70557c478bd9Sstevel@tonic-gate case IPPROTO_TCP: { 70567c478bd9Sstevel@tonic-gate switch (option) { 70577c478bd9Sstevel@tonic-gate default: 70587c478bd9Sstevel@tonic-gate errno = ENOPROTOOPT; 70597c478bd9Sstevel@tonic-gate break; 70607c478bd9Sstevel@tonic-gate } 70617c478bd9Sstevel@tonic-gate break; 70627c478bd9Sstevel@tonic-gate } /* case IPPROTO_TCP */ 70637c478bd9Sstevel@tonic-gate case IPPROTO_IP: { 70647c478bd9Sstevel@tonic-gate switch (option) { 70657c478bd9Sstevel@tonic-gate default: 70667c478bd9Sstevel@tonic-gate errno = ENOPROTOOPT; 70677c478bd9Sstevel@tonic-gate break; 70687c478bd9Sstevel@tonic-gate } 70697c478bd9Sstevel@tonic-gate break; 70707c478bd9Sstevel@tonic-gate } /* case IPPROTO_IP */ 70717c478bd9Sstevel@tonic-gate default: 70727c478bd9Sstevel@tonic-gate errno = ENOPROTOOPT; 70737c478bd9Sstevel@tonic-gate break; 70747c478bd9Sstevel@tonic-gate } /* switch (level) */ 70757c478bd9Sstevel@tonic-gate 70767c478bd9Sstevel@tonic-gate if (errno != 0) 70777c478bd9Sstevel@tonic-gate return (-1); 70787c478bd9Sstevel@tonic-gate else 70797c478bd9Sstevel@tonic-gate return (0); 70807c478bd9Sstevel@tonic-gate } 7081