1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 /* This file contains all TCP input processing functions. */ 27 28 #include <sys/types.h> 29 #include <sys/stream.h> 30 #include <sys/strsun.h> 31 #include <sys/strsubr.h> 32 #include <sys/stropts.h> 33 #include <sys/strlog.h> 34 #define _SUN_TPI_VERSION 2 35 #include <sys/tihdr.h> 36 #include <sys/suntpi.h> 37 #include <sys/xti_inet.h> 38 #include <sys/squeue_impl.h> 39 #include <sys/squeue.h> 40 #include <sys/tsol/tnet.h> 41 42 #include <inet/common.h> 43 #include <inet/ip.h> 44 #include <inet/tcp.h> 45 #include <inet/tcp_impl.h> 46 #include <inet/tcp_cluster.h> 47 #include <inet/proto_set.h> 48 #include <inet/ipsec_impl.h> 49 50 /* 51 * RFC1323-recommended phrasing of TSTAMP option, for easier parsing 52 */ 53 54 #ifdef _BIG_ENDIAN 55 #define TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 56 (TCPOPT_TSTAMP << 8) | 10) 57 #else 58 #define TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \ 59 (TCPOPT_NOP << 8) | TCPOPT_NOP) 60 #endif 61 62 /* 63 * Flags returned from tcp_parse_options. 64 */ 65 #define TCP_OPT_MSS_PRESENT 1 66 #define TCP_OPT_WSCALE_PRESENT 2 67 #define TCP_OPT_TSTAMP_PRESENT 4 68 #define TCP_OPT_SACK_OK_PRESENT 8 69 #define TCP_OPT_SACK_PRESENT 16 70 71 /* 72 * PAWS needs a timer for 24 days. This is the number of ticks in 24 days 73 */ 74 #define PAWS_TIMEOUT ((clock_t)(24*24*60*60*hz)) 75 76 /* 77 * Since tcp_listener is not cleared atomically with tcp_detached 78 * being cleared we need this extra bit to tell a detached connection 79 * apart from one that is in the process of being accepted. 80 */ 81 #define TCP_IS_DETACHED_NONEAGER(tcp) \ 82 (TCP_IS_DETACHED(tcp) && \ 83 (!(tcp)->tcp_hard_binding)) 84 85 /* 86 * Steps to do when a tcp_t moves to TIME-WAIT state. 87 * 88 * This connection is done, we don't need to account for it. Decrement 89 * the listener connection counter if needed. 90 * 91 * Decrement the connection counter of the stack. Note that this counter 92 * is per CPU. So the total number of connections in a stack is the sum of all 93 * of them. Since there is no lock for handling all of them exclusively, the 94 * resulting sum is only an approximation. 95 * 96 * Unconditionally clear the exclusive binding bit so this TIME-WAIT 97 * connection won't interfere with new ones. 98 * 99 * Start the TIME-WAIT timer. If upper layer has not closed the connection, 100 * the timer is handled within the context of this tcp_t. When the timer 101 * fires, tcp_clean_death() is called. If upper layer closes the connection 102 * during this period, tcp_time_wait_append() will be called to add this 103 * tcp_t to the global TIME-WAIT list. Note that this means that the 104 * actual wait time in TIME-WAIT state will be longer than the 105 * tcps_time_wait_interval since the period before upper layer closes the 106 * connection is not accounted for when tcp_time_wait_append() is called. 107 * 108 * If uppser layer has closed the connection, call tcp_time_wait_append() 109 * directly. 110 * 111 */ 112 #define SET_TIME_WAIT(tcps, tcp, connp) \ 113 { \ 114 (tcp)->tcp_state = TCPS_TIME_WAIT; \ 115 if ((tcp)->tcp_listen_cnt != NULL) \ 116 TCP_DECR_LISTEN_CNT(tcp); \ 117 atomic_dec_64( \ 118 (uint64_t *)&(tcps)->tcps_sc[CPU->cpu_seqid]->tcp_sc_conn_cnt); \ 119 (connp)->conn_exclbind = 0; \ 120 if (!TCP_IS_DETACHED(tcp)) { \ 121 TCP_TIMER_RESTART(tcp, (tcps)->tcps_time_wait_interval); \ 122 } else { \ 123 tcp_time_wait_append(tcp); \ 124 TCP_DBGSTAT(tcps, tcp_rput_time_wait); \ 125 } \ 126 } 127 128 /* 129 * If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more 130 * than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent 131 * data, TCP will not respond with an ACK. RFC 793 requires that 132 * TCP responds with an ACK for such a bogus ACK. By not following 133 * the RFC, we prevent TCP from getting into an ACK storm if somehow 134 * an attacker successfully spoofs an acceptable segment to our 135 * peer; or when our peer is "confused." 136 */ 137 static uint32_t tcp_drop_ack_unsent_cnt = 10; 138 139 /* 140 * The shift factor applied to tcp_mss to decide if the peer sends us a 141 * valid initial receive window. By default, if the peer receive window 142 * is smaller than 1 MSS (shift factor is 0), it is considered as invalid. 143 */ 144 static uint32_t tcp_init_wnd_shft = 0; 145 146 /* Process ICMP source quench message or not. */ 147 static boolean_t tcp_icmp_source_quench = B_FALSE; 148 149 static boolean_t tcp_outbound_squeue_switch = B_FALSE; 150 151 static mblk_t *tcp_conn_create_v4(conn_t *, conn_t *, mblk_t *, 152 ip_recv_attr_t *); 153 static mblk_t *tcp_conn_create_v6(conn_t *, conn_t *, mblk_t *, 154 ip_recv_attr_t *); 155 static boolean_t tcp_drop_q0(tcp_t *); 156 static void tcp_icmp_error_ipv6(tcp_t *, mblk_t *, ip_recv_attr_t *); 157 static mblk_t *tcp_input_add_ancillary(tcp_t *, mblk_t *, ip_pkt_t *, 158 ip_recv_attr_t *); 159 static void tcp_input_listener(void *, mblk_t *, void *, ip_recv_attr_t *); 160 static int tcp_parse_options(tcpha_t *, tcp_opt_t *); 161 static void tcp_process_options(tcp_t *, tcpha_t *); 162 static mblk_t *tcp_reass(tcp_t *, mblk_t *, uint32_t); 163 static void tcp_reass_elim_overlap(tcp_t *, mblk_t *); 164 static void tcp_rsrv_input(void *, mblk_t *, void *, ip_recv_attr_t *); 165 static void tcp_set_rto(tcp_t *, time_t); 166 static void tcp_setcred_data(mblk_t *, ip_recv_attr_t *); 167 168 extern void tcp_kssl_input(tcp_t *, mblk_t *, cred_t *); 169 170 /* 171 * Set the MSS associated with a particular tcp based on its current value, 172 * and a new one passed in. Observe minimums and maximums, and reset other 173 * state variables that we want to view as multiples of MSS. 174 * 175 * The value of MSS could be either increased or descreased. 176 */ 177 void 178 tcp_mss_set(tcp_t *tcp, uint32_t mss) 179 { 180 uint32_t mss_max; 181 tcp_stack_t *tcps = tcp->tcp_tcps; 182 conn_t *connp = tcp->tcp_connp; 183 184 if (connp->conn_ipversion == IPV4_VERSION) 185 mss_max = tcps->tcps_mss_max_ipv4; 186 else 187 mss_max = tcps->tcps_mss_max_ipv6; 188 189 if (mss < tcps->tcps_mss_min) 190 mss = tcps->tcps_mss_min; 191 if (mss > mss_max) 192 mss = mss_max; 193 /* 194 * Unless naglim has been set by our client to 195 * a non-mss value, force naglim to track mss. 196 * This can help to aggregate small writes. 197 */ 198 if (mss < tcp->tcp_naglim || tcp->tcp_mss == tcp->tcp_naglim) 199 tcp->tcp_naglim = mss; 200 /* 201 * TCP should be able to buffer at least 4 MSS data for obvious 202 * performance reason. 203 */ 204 if ((mss << 2) > connp->conn_sndbuf) 205 connp->conn_sndbuf = mss << 2; 206 207 /* 208 * Set the send lowater to at least twice of MSS. 209 */ 210 if ((mss << 1) > connp->conn_sndlowat) 211 connp->conn_sndlowat = mss << 1; 212 213 /* 214 * Update tcp_cwnd according to the new value of MSS. Keep the 215 * previous ratio to preserve the transmit rate. 216 */ 217 tcp->tcp_cwnd = (tcp->tcp_cwnd / tcp->tcp_mss) * mss; 218 tcp->tcp_cwnd_cnt = 0; 219 220 tcp->tcp_mss = mss; 221 (void) tcp_maxpsz_set(tcp, B_TRUE); 222 } 223 224 /* 225 * Extract option values from a tcp header. We put any found values into the 226 * tcpopt struct and return a bitmask saying which options were found. 227 */ 228 static int 229 tcp_parse_options(tcpha_t *tcpha, tcp_opt_t *tcpopt) 230 { 231 uchar_t *endp; 232 int len; 233 uint32_t mss; 234 uchar_t *up = (uchar_t *)tcpha; 235 int found = 0; 236 int32_t sack_len; 237 tcp_seq sack_begin, sack_end; 238 tcp_t *tcp; 239 240 endp = up + TCP_HDR_LENGTH(tcpha); 241 up += TCP_MIN_HEADER_LENGTH; 242 while (up < endp) { 243 len = endp - up; 244 switch (*up) { 245 case TCPOPT_EOL: 246 break; 247 248 case TCPOPT_NOP: 249 up++; 250 continue; 251 252 case TCPOPT_MAXSEG: 253 if (len < TCPOPT_MAXSEG_LEN || 254 up[1] != TCPOPT_MAXSEG_LEN) 255 break; 256 257 mss = BE16_TO_U16(up+2); 258 /* Caller must handle tcp_mss_min and tcp_mss_max_* */ 259 tcpopt->tcp_opt_mss = mss; 260 found |= TCP_OPT_MSS_PRESENT; 261 262 up += TCPOPT_MAXSEG_LEN; 263 continue; 264 265 case TCPOPT_WSCALE: 266 if (len < TCPOPT_WS_LEN || up[1] != TCPOPT_WS_LEN) 267 break; 268 269 if (up[2] > TCP_MAX_WINSHIFT) 270 tcpopt->tcp_opt_wscale = TCP_MAX_WINSHIFT; 271 else 272 tcpopt->tcp_opt_wscale = up[2]; 273 found |= TCP_OPT_WSCALE_PRESENT; 274 275 up += TCPOPT_WS_LEN; 276 continue; 277 278 case TCPOPT_SACK_PERMITTED: 279 if (len < TCPOPT_SACK_OK_LEN || 280 up[1] != TCPOPT_SACK_OK_LEN) 281 break; 282 found |= TCP_OPT_SACK_OK_PRESENT; 283 up += TCPOPT_SACK_OK_LEN; 284 continue; 285 286 case TCPOPT_SACK: 287 if (len <= 2 || up[1] <= 2 || len < up[1]) 288 break; 289 290 /* If TCP is not interested in SACK blks... */ 291 if ((tcp = tcpopt->tcp) == NULL) { 292 up += up[1]; 293 continue; 294 } 295 sack_len = up[1] - TCPOPT_HEADER_LEN; 296 up += TCPOPT_HEADER_LEN; 297 298 /* 299 * If the list is empty, allocate one and assume 300 * nothing is sack'ed. 301 */ 302 if (tcp->tcp_notsack_list == NULL) { 303 tcp_notsack_update(&(tcp->tcp_notsack_list), 304 tcp->tcp_suna, tcp->tcp_snxt, 305 &(tcp->tcp_num_notsack_blk), 306 &(tcp->tcp_cnt_notsack_list)); 307 308 /* 309 * Make sure tcp_notsack_list is not NULL. 310 * This happens when kmem_alloc(KM_NOSLEEP) 311 * returns NULL. 312 */ 313 if (tcp->tcp_notsack_list == NULL) { 314 up += sack_len; 315 continue; 316 } 317 tcp->tcp_fack = tcp->tcp_suna; 318 } 319 320 while (sack_len > 0) { 321 if (up + 8 > endp) { 322 up = endp; 323 break; 324 } 325 sack_begin = BE32_TO_U32(up); 326 up += 4; 327 sack_end = BE32_TO_U32(up); 328 up += 4; 329 sack_len -= 8; 330 /* 331 * Bounds checking. Make sure the SACK 332 * info is within tcp_suna and tcp_snxt. 333 * If this SACK blk is out of bound, ignore 334 * it but continue to parse the following 335 * blks. 336 */ 337 if (SEQ_LEQ(sack_end, sack_begin) || 338 SEQ_LT(sack_begin, tcp->tcp_suna) || 339 SEQ_GT(sack_end, tcp->tcp_snxt)) { 340 continue; 341 } 342 tcp_notsack_insert(&(tcp->tcp_notsack_list), 343 sack_begin, sack_end, 344 &(tcp->tcp_num_notsack_blk), 345 &(tcp->tcp_cnt_notsack_list)); 346 if (SEQ_GT(sack_end, tcp->tcp_fack)) { 347 tcp->tcp_fack = sack_end; 348 } 349 } 350 found |= TCP_OPT_SACK_PRESENT; 351 continue; 352 353 case TCPOPT_TSTAMP: 354 if (len < TCPOPT_TSTAMP_LEN || 355 up[1] != TCPOPT_TSTAMP_LEN) 356 break; 357 358 tcpopt->tcp_opt_ts_val = BE32_TO_U32(up+2); 359 tcpopt->tcp_opt_ts_ecr = BE32_TO_U32(up+6); 360 361 found |= TCP_OPT_TSTAMP_PRESENT; 362 363 up += TCPOPT_TSTAMP_LEN; 364 continue; 365 366 default: 367 if (len <= 1 || len < (int)up[1] || up[1] == 0) 368 break; 369 up += up[1]; 370 continue; 371 } 372 break; 373 } 374 return (found); 375 } 376 377 /* 378 * Process all TCP option in SYN segment. Note that this function should 379 * be called after tcp_set_destination() is called so that the necessary info 380 * from IRE is already set in the tcp structure. 381 * 382 * This function sets up the correct tcp_mss value according to the 383 * MSS option value and our header size. It also sets up the window scale 384 * and timestamp values, and initialize SACK info blocks. But it does not 385 * change receive window size after setting the tcp_mss value. The caller 386 * should do the appropriate change. 387 */ 388 static void 389 tcp_process_options(tcp_t *tcp, tcpha_t *tcpha) 390 { 391 int options; 392 tcp_opt_t tcpopt; 393 uint32_t mss_max; 394 char *tmp_tcph; 395 tcp_stack_t *tcps = tcp->tcp_tcps; 396 conn_t *connp = tcp->tcp_connp; 397 398 tcpopt.tcp = NULL; 399 options = tcp_parse_options(tcpha, &tcpopt); 400 401 /* 402 * Process MSS option. Note that MSS option value does not account 403 * for IP or TCP options. This means that it is equal to MTU - minimum 404 * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for 405 * IPv6. 406 */ 407 if (!(options & TCP_OPT_MSS_PRESENT)) { 408 if (connp->conn_ipversion == IPV4_VERSION) 409 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv4; 410 else 411 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv6; 412 } else { 413 if (connp->conn_ipversion == IPV4_VERSION) 414 mss_max = tcps->tcps_mss_max_ipv4; 415 else 416 mss_max = tcps->tcps_mss_max_ipv6; 417 if (tcpopt.tcp_opt_mss < tcps->tcps_mss_min) 418 tcpopt.tcp_opt_mss = tcps->tcps_mss_min; 419 else if (tcpopt.tcp_opt_mss > mss_max) 420 tcpopt.tcp_opt_mss = mss_max; 421 } 422 423 /* Process Window Scale option. */ 424 if (options & TCP_OPT_WSCALE_PRESENT) { 425 tcp->tcp_snd_ws = tcpopt.tcp_opt_wscale; 426 tcp->tcp_snd_ws_ok = B_TRUE; 427 } else { 428 tcp->tcp_snd_ws = B_FALSE; 429 tcp->tcp_snd_ws_ok = B_FALSE; 430 tcp->tcp_rcv_ws = B_FALSE; 431 } 432 433 /* Process Timestamp option. */ 434 if ((options & TCP_OPT_TSTAMP_PRESENT) && 435 (tcp->tcp_snd_ts_ok || TCP_IS_DETACHED(tcp))) { 436 tmp_tcph = (char *)tcp->tcp_tcpha; 437 438 tcp->tcp_snd_ts_ok = B_TRUE; 439 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 440 tcp->tcp_last_rcv_lbolt = ddi_get_lbolt64(); 441 ASSERT(OK_32PTR(tmp_tcph)); 442 ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH); 443 444 /* Fill in our template header with basic timestamp option. */ 445 tmp_tcph += connp->conn_ht_ulp_len; 446 tmp_tcph[0] = TCPOPT_NOP; 447 tmp_tcph[1] = TCPOPT_NOP; 448 tmp_tcph[2] = TCPOPT_TSTAMP; 449 tmp_tcph[3] = TCPOPT_TSTAMP_LEN; 450 connp->conn_ht_iphc_len += TCPOPT_REAL_TS_LEN; 451 connp->conn_ht_ulp_len += TCPOPT_REAL_TS_LEN; 452 tcp->tcp_tcpha->tha_offset_and_reserved += (3 << 4); 453 } else { 454 tcp->tcp_snd_ts_ok = B_FALSE; 455 } 456 457 /* 458 * Process SACK options. If SACK is enabled for this connection, 459 * then allocate the SACK info structure. Note the following ways 460 * when tcp_snd_sack_ok is set to true. 461 * 462 * For active connection: in tcp_set_destination() called in 463 * tcp_connect(). 464 * 465 * For passive connection: in tcp_set_destination() called in 466 * tcp_input_listener(). 467 * 468 * That's the reason why the extra TCP_IS_DETACHED() check is there. 469 * That check makes sure that if we did not send a SACK OK option, 470 * we will not enable SACK for this connection even though the other 471 * side sends us SACK OK option. For active connection, the SACK 472 * info structure has already been allocated. So we need to free 473 * it if SACK is disabled. 474 */ 475 if ((options & TCP_OPT_SACK_OK_PRESENT) && 476 (tcp->tcp_snd_sack_ok || 477 (tcps->tcps_sack_permitted != 0 && TCP_IS_DETACHED(tcp)))) { 478 ASSERT(tcp->tcp_num_sack_blk == 0); 479 ASSERT(tcp->tcp_notsack_list == NULL); 480 481 tcp->tcp_snd_sack_ok = B_TRUE; 482 if (tcp->tcp_snd_ts_ok) { 483 tcp->tcp_max_sack_blk = 3; 484 } else { 485 tcp->tcp_max_sack_blk = 4; 486 } 487 } else if (tcp->tcp_snd_sack_ok) { 488 /* 489 * Resetting tcp_snd_sack_ok to B_FALSE so that 490 * no SACK info will be used for this 491 * connection. This assumes that SACK usage 492 * permission is negotiated. This may need 493 * to be changed once this is clarified. 494 */ 495 ASSERT(tcp->tcp_num_sack_blk == 0); 496 ASSERT(tcp->tcp_notsack_list == NULL); 497 tcp->tcp_snd_sack_ok = B_FALSE; 498 } 499 500 /* 501 * Now we know the exact TCP/IP header length, subtract 502 * that from tcp_mss to get our side's MSS. 503 */ 504 tcp->tcp_mss -= connp->conn_ht_iphc_len; 505 506 /* 507 * Here we assume that the other side's header size will be equal to 508 * our header size. We calculate the real MSS accordingly. Need to 509 * take into additional stuffs IPsec puts in. 510 * 511 * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header) 512 */ 513 tcpopt.tcp_opt_mss -= connp->conn_ht_iphc_len + 514 tcp->tcp_ipsec_overhead - 515 ((connp->conn_ipversion == IPV4_VERSION ? 516 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) + TCP_MIN_HEADER_LENGTH); 517 518 /* 519 * Set MSS to the smaller one of both ends of the connection. 520 * We should not have called tcp_mss_set() before, but our 521 * side of the MSS should have been set to a proper value 522 * by tcp_set_destination(). tcp_mss_set() will also set up the 523 * STREAM head parameters properly. 524 * 525 * If we have a larger-than-16-bit window but the other side 526 * didn't want to do window scale, tcp_rwnd_set() will take 527 * care of that. 528 */ 529 tcp_mss_set(tcp, MIN(tcpopt.tcp_opt_mss, tcp->tcp_mss)); 530 531 /* 532 * Initialize tcp_cwnd value. After tcp_mss_set(), tcp_mss has been 533 * updated properly. 534 */ 535 TCP_SET_INIT_CWND(tcp, tcp->tcp_mss, tcps->tcps_slow_start_initial); 536 } 537 538 /* 539 * Add a new piece to the tcp reassembly queue. If the gap at the beginning 540 * is filled, return as much as we can. The message passed in may be 541 * multi-part, chained using b_cont. "start" is the starting sequence 542 * number for this piece. 543 */ 544 static mblk_t * 545 tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start) 546 { 547 uint32_t end; 548 mblk_t *mp1; 549 mblk_t *mp2; 550 mblk_t *next_mp; 551 uint32_t u1; 552 tcp_stack_t *tcps = tcp->tcp_tcps; 553 554 555 /* Walk through all the new pieces. */ 556 do { 557 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 558 (uintptr_t)INT_MAX); 559 end = start + (int)(mp->b_wptr - mp->b_rptr); 560 next_mp = mp->b_cont; 561 if (start == end) { 562 /* Empty. Blast it. */ 563 freeb(mp); 564 continue; 565 } 566 mp->b_cont = NULL; 567 TCP_REASS_SET_SEQ(mp, start); 568 TCP_REASS_SET_END(mp, end); 569 mp1 = tcp->tcp_reass_tail; 570 if (!mp1) { 571 tcp->tcp_reass_tail = mp; 572 tcp->tcp_reass_head = mp; 573 TCPS_BUMP_MIB(tcps, tcpInDataUnorderSegs); 574 TCPS_UPDATE_MIB(tcps, tcpInDataUnorderBytes, 575 end - start); 576 continue; 577 } 578 /* New stuff completely beyond tail? */ 579 if (SEQ_GEQ(start, TCP_REASS_END(mp1))) { 580 /* Link it on end. */ 581 mp1->b_cont = mp; 582 tcp->tcp_reass_tail = mp; 583 TCPS_BUMP_MIB(tcps, tcpInDataUnorderSegs); 584 TCPS_UPDATE_MIB(tcps, tcpInDataUnorderBytes, 585 end - start); 586 continue; 587 } 588 mp1 = tcp->tcp_reass_head; 589 u1 = TCP_REASS_SEQ(mp1); 590 /* New stuff at the front? */ 591 if (SEQ_LT(start, u1)) { 592 /* Yes... Check for overlap. */ 593 mp->b_cont = mp1; 594 tcp->tcp_reass_head = mp; 595 tcp_reass_elim_overlap(tcp, mp); 596 continue; 597 } 598 /* 599 * The new piece fits somewhere between the head and tail. 600 * We find our slot, where mp1 precedes us and mp2 trails. 601 */ 602 for (; (mp2 = mp1->b_cont) != NULL; mp1 = mp2) { 603 u1 = TCP_REASS_SEQ(mp2); 604 if (SEQ_LEQ(start, u1)) 605 break; 606 } 607 /* Link ourselves in */ 608 mp->b_cont = mp2; 609 mp1->b_cont = mp; 610 611 /* Trim overlap with following mblk(s) first */ 612 tcp_reass_elim_overlap(tcp, mp); 613 614 /* Trim overlap with preceding mblk */ 615 tcp_reass_elim_overlap(tcp, mp1); 616 617 } while (start = end, mp = next_mp); 618 mp1 = tcp->tcp_reass_head; 619 /* Anything ready to go? */ 620 if (TCP_REASS_SEQ(mp1) != tcp->tcp_rnxt) 621 return (NULL); 622 /* Eat what we can off the queue */ 623 for (;;) { 624 mp = mp1->b_cont; 625 end = TCP_REASS_END(mp1); 626 TCP_REASS_SET_SEQ(mp1, 0); 627 TCP_REASS_SET_END(mp1, 0); 628 if (!mp) { 629 tcp->tcp_reass_tail = NULL; 630 break; 631 } 632 if (end != TCP_REASS_SEQ(mp)) { 633 mp1->b_cont = NULL; 634 break; 635 } 636 mp1 = mp; 637 } 638 mp1 = tcp->tcp_reass_head; 639 tcp->tcp_reass_head = mp; 640 return (mp1); 641 } 642 643 /* Eliminate any overlap that mp may have over later mblks */ 644 static void 645 tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp) 646 { 647 uint32_t end; 648 mblk_t *mp1; 649 uint32_t u1; 650 tcp_stack_t *tcps = tcp->tcp_tcps; 651 652 end = TCP_REASS_END(mp); 653 while ((mp1 = mp->b_cont) != NULL) { 654 u1 = TCP_REASS_SEQ(mp1); 655 if (!SEQ_GT(end, u1)) 656 break; 657 if (!SEQ_GEQ(end, TCP_REASS_END(mp1))) { 658 mp->b_wptr -= end - u1; 659 TCP_REASS_SET_END(mp, u1); 660 TCPS_BUMP_MIB(tcps, tcpInDataPartDupSegs); 661 TCPS_UPDATE_MIB(tcps, tcpInDataPartDupBytes, 662 end - u1); 663 break; 664 } 665 mp->b_cont = mp1->b_cont; 666 TCP_REASS_SET_SEQ(mp1, 0); 667 TCP_REASS_SET_END(mp1, 0); 668 freeb(mp1); 669 TCPS_BUMP_MIB(tcps, tcpInDataDupSegs); 670 TCPS_UPDATE_MIB(tcps, tcpInDataDupBytes, end - u1); 671 } 672 if (!mp1) 673 tcp->tcp_reass_tail = mp; 674 } 675 676 /* 677 * This function does PAWS protection check. Returns B_TRUE if the 678 * segment passes the PAWS test, else returns B_FALSE. 679 */ 680 boolean_t 681 tcp_paws_check(tcp_t *tcp, tcpha_t *tcpha, tcp_opt_t *tcpoptp) 682 { 683 uint8_t flags; 684 int options; 685 uint8_t *up; 686 conn_t *connp = tcp->tcp_connp; 687 688 flags = (unsigned int)tcpha->tha_flags & 0xFF; 689 /* 690 * If timestamp option is aligned nicely, get values inline, 691 * otherwise call general routine to parse. Only do that 692 * if timestamp is the only option. 693 */ 694 if (TCP_HDR_LENGTH(tcpha) == (uint32_t)TCP_MIN_HEADER_LENGTH + 695 TCPOPT_REAL_TS_LEN && 696 OK_32PTR((up = ((uint8_t *)tcpha) + 697 TCP_MIN_HEADER_LENGTH)) && 698 *(uint32_t *)up == TCPOPT_NOP_NOP_TSTAMP) { 699 tcpoptp->tcp_opt_ts_val = ABE32_TO_U32((up+4)); 700 tcpoptp->tcp_opt_ts_ecr = ABE32_TO_U32((up+8)); 701 702 options = TCP_OPT_TSTAMP_PRESENT; 703 } else { 704 if (tcp->tcp_snd_sack_ok) { 705 tcpoptp->tcp = tcp; 706 } else { 707 tcpoptp->tcp = NULL; 708 } 709 options = tcp_parse_options(tcpha, tcpoptp); 710 } 711 712 if (options & TCP_OPT_TSTAMP_PRESENT) { 713 /* 714 * Do PAWS per RFC 1323 section 4.2. Accept RST 715 * regardless of the timestamp, page 18 RFC 1323.bis. 716 */ 717 if ((flags & TH_RST) == 0 && 718 TSTMP_LT(tcpoptp->tcp_opt_ts_val, 719 tcp->tcp_ts_recent)) { 720 if (TSTMP_LT(LBOLT_FASTPATH64, 721 tcp->tcp_last_rcv_lbolt + PAWS_TIMEOUT)) { 722 /* This segment is not acceptable. */ 723 return (B_FALSE); 724 } else { 725 /* 726 * Connection has been idle for 727 * too long. Reset the timestamp 728 * and assume the segment is valid. 729 */ 730 tcp->tcp_ts_recent = 731 tcpoptp->tcp_opt_ts_val; 732 } 733 } 734 } else { 735 /* 736 * If we don't get a timestamp on every packet, we 737 * figure we can't really trust 'em, so we stop sending 738 * and parsing them. 739 */ 740 tcp->tcp_snd_ts_ok = B_FALSE; 741 742 connp->conn_ht_iphc_len -= TCPOPT_REAL_TS_LEN; 743 connp->conn_ht_ulp_len -= TCPOPT_REAL_TS_LEN; 744 tcp->tcp_tcpha->tha_offset_and_reserved -= (3 << 4); 745 /* 746 * Adjust the tcp_mss and tcp_cwnd accordingly. We avoid 747 * doing a slow start here so as to not to lose on the 748 * transfer rate built up so far. 749 */ 750 tcp_mss_set(tcp, tcp->tcp_mss + TCPOPT_REAL_TS_LEN); 751 if (tcp->tcp_snd_sack_ok) 752 tcp->tcp_max_sack_blk = 4; 753 } 754 return (B_TRUE); 755 } 756 757 /* 758 * Defense for the SYN attack - 759 * 1. When q0 is full, drop from the tail (tcp_eager_prev_drop_q0) the oldest 760 * one from the list of droppable eagers. This list is a subset of q0. 761 * see comments before the definition of MAKE_DROPPABLE(). 762 * 2. Don't drop a SYN request before its first timeout. This gives every 763 * request at least til the first timeout to complete its 3-way handshake. 764 * 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many 765 * requests currently on the queue that has timed out. This will be used 766 * as an indicator of whether an attack is under way, so that appropriate 767 * actions can be taken. (It's incremented in tcp_timer() and decremented 768 * either when eager goes into ESTABLISHED, or gets freed up.) 769 * 4. The current threshold is - # of timeout > q0len/4 => SYN alert on 770 * # of timeout drops back to <= q0len/32 => SYN alert off 771 */ 772 static boolean_t 773 tcp_drop_q0(tcp_t *tcp) 774 { 775 tcp_t *eager; 776 mblk_t *mp; 777 tcp_stack_t *tcps = tcp->tcp_tcps; 778 779 ASSERT(MUTEX_HELD(&tcp->tcp_eager_lock)); 780 ASSERT(tcp->tcp_eager_next_q0 != tcp->tcp_eager_prev_q0); 781 782 /* Pick oldest eager from the list of droppable eagers */ 783 eager = tcp->tcp_eager_prev_drop_q0; 784 785 /* If list is empty. return B_FALSE */ 786 if (eager == tcp) { 787 return (B_FALSE); 788 } 789 790 /* If allocated, the mp will be freed in tcp_clean_death_wrapper() */ 791 if ((mp = allocb(0, BPRI_HI)) == NULL) 792 return (B_FALSE); 793 794 /* 795 * Take this eager out from the list of droppable eagers since we are 796 * going to drop it. 797 */ 798 MAKE_UNDROPPABLE(eager); 799 800 if (tcp->tcp_connp->conn_debug) { 801 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 802 "tcp_drop_q0: listen half-open queue (max=%d) overflow" 803 " (%d pending) on %s, drop one", tcps->tcps_conn_req_max_q0, 804 tcp->tcp_conn_req_cnt_q0, 805 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 806 } 807 808 TCPS_BUMP_MIB(tcps, tcpHalfOpenDrop); 809 810 /* Put a reference on the conn as we are enqueueing it in the sqeue */ 811 CONN_INC_REF(eager->tcp_connp); 812 813 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 814 tcp_clean_death_wrapper, eager->tcp_connp, NULL, 815 SQ_FILL, SQTAG_TCP_DROP_Q0); 816 817 return (B_TRUE); 818 } 819 820 /* 821 * Handle a SYN on an AF_INET6 socket; can be either IPv4 or IPv6 822 */ 823 static mblk_t * 824 tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 825 ip_recv_attr_t *ira) 826 { 827 tcp_t *ltcp = lconnp->conn_tcp; 828 tcp_t *tcp = connp->conn_tcp; 829 mblk_t *tpi_mp; 830 ipha_t *ipha; 831 ip6_t *ip6h; 832 sin6_t sin6; 833 uint_t ifindex = ira->ira_ruifindex; 834 tcp_stack_t *tcps = tcp->tcp_tcps; 835 836 if (ira->ira_flags & IRAF_IS_IPV4) { 837 ipha = (ipha_t *)mp->b_rptr; 838 839 connp->conn_ipversion = IPV4_VERSION; 840 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6); 841 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6); 842 connp->conn_saddr_v6 = connp->conn_laddr_v6; 843 844 sin6 = sin6_null; 845 sin6.sin6_addr = connp->conn_faddr_v6; 846 sin6.sin6_port = connp->conn_fport; 847 sin6.sin6_family = AF_INET6; 848 sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6, 849 IPCL_ZONEID(lconnp), tcps->tcps_netstack); 850 851 if (connp->conn_recv_ancillary.crb_recvdstaddr) { 852 sin6_t sin6d; 853 854 sin6d = sin6_null; 855 sin6d.sin6_addr = connp->conn_laddr_v6; 856 sin6d.sin6_port = connp->conn_lport; 857 sin6d.sin6_family = AF_INET; 858 tpi_mp = mi_tpi_extconn_ind(NULL, 859 (char *)&sin6d, sizeof (sin6_t), 860 (char *)&tcp, 861 (t_scalar_t)sizeof (intptr_t), 862 (char *)&sin6d, sizeof (sin6_t), 863 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 864 } else { 865 tpi_mp = mi_tpi_conn_ind(NULL, 866 (char *)&sin6, sizeof (sin6_t), 867 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 868 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 869 } 870 } else { 871 ip6h = (ip6_t *)mp->b_rptr; 872 873 connp->conn_ipversion = IPV6_VERSION; 874 connp->conn_laddr_v6 = ip6h->ip6_dst; 875 connp->conn_faddr_v6 = ip6h->ip6_src; 876 connp->conn_saddr_v6 = connp->conn_laddr_v6; 877 878 sin6 = sin6_null; 879 sin6.sin6_addr = connp->conn_faddr_v6; 880 sin6.sin6_port = connp->conn_fport; 881 sin6.sin6_family = AF_INET6; 882 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 883 sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6, 884 IPCL_ZONEID(lconnp), tcps->tcps_netstack); 885 886 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 887 /* Pass up the scope_id of remote addr */ 888 sin6.sin6_scope_id = ifindex; 889 } else { 890 sin6.sin6_scope_id = 0; 891 } 892 if (connp->conn_recv_ancillary.crb_recvdstaddr) { 893 sin6_t sin6d; 894 895 sin6d = sin6_null; 896 sin6.sin6_addr = connp->conn_laddr_v6; 897 sin6d.sin6_port = connp->conn_lport; 898 sin6d.sin6_family = AF_INET6; 899 if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_laddr_v6)) 900 sin6d.sin6_scope_id = ifindex; 901 902 tpi_mp = mi_tpi_extconn_ind(NULL, 903 (char *)&sin6d, sizeof (sin6_t), 904 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 905 (char *)&sin6d, sizeof (sin6_t), 906 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 907 } else { 908 tpi_mp = mi_tpi_conn_ind(NULL, 909 (char *)&sin6, sizeof (sin6_t), 910 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 911 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 912 } 913 } 914 915 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 916 return (tpi_mp); 917 } 918 919 /* Handle a SYN on an AF_INET socket */ 920 static mblk_t * 921 tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, mblk_t *mp, 922 ip_recv_attr_t *ira) 923 { 924 tcp_t *ltcp = lconnp->conn_tcp; 925 tcp_t *tcp = connp->conn_tcp; 926 sin_t sin; 927 mblk_t *tpi_mp = NULL; 928 tcp_stack_t *tcps = tcp->tcp_tcps; 929 ipha_t *ipha; 930 931 ASSERT(ira->ira_flags & IRAF_IS_IPV4); 932 ipha = (ipha_t *)mp->b_rptr; 933 934 connp->conn_ipversion = IPV4_VERSION; 935 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6); 936 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6); 937 connp->conn_saddr_v6 = connp->conn_laddr_v6; 938 939 sin = sin_null; 940 sin.sin_addr.s_addr = connp->conn_faddr_v4; 941 sin.sin_port = connp->conn_fport; 942 sin.sin_family = AF_INET; 943 if (lconnp->conn_recv_ancillary.crb_recvdstaddr) { 944 sin_t sind; 945 946 sind = sin_null; 947 sind.sin_addr.s_addr = connp->conn_laddr_v4; 948 sind.sin_port = connp->conn_lport; 949 sind.sin_family = AF_INET; 950 tpi_mp = mi_tpi_extconn_ind(NULL, 951 (char *)&sind, sizeof (sin_t), (char *)&tcp, 952 (t_scalar_t)sizeof (intptr_t), (char *)&sind, 953 sizeof (sin_t), (t_scalar_t)ltcp->tcp_conn_req_seqnum); 954 } else { 955 tpi_mp = mi_tpi_conn_ind(NULL, 956 (char *)&sin, sizeof (sin_t), 957 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 958 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 959 } 960 961 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 962 return (tpi_mp); 963 } 964 965 /* 966 * Called via squeue to get on to eager's perimeter. It sends a 967 * TH_RST if eager is in the fanout table. The listener wants the 968 * eager to disappear either by means of tcp_eager_blowoff() or 969 * tcp_eager_cleanup() being called. tcp_eager_kill() can also be 970 * called (via squeue) if the eager cannot be inserted in the 971 * fanout table in tcp_input_listener(). 972 */ 973 /* ARGSUSED */ 974 void 975 tcp_eager_kill(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy) 976 { 977 conn_t *econnp = (conn_t *)arg; 978 tcp_t *eager = econnp->conn_tcp; 979 tcp_t *listener = eager->tcp_listener; 980 981 /* 982 * We could be called because listener is closing. Since 983 * the eager was using listener's queue's, we avoid 984 * using the listeners queues from now on. 985 */ 986 ASSERT(eager->tcp_detached); 987 econnp->conn_rq = NULL; 988 econnp->conn_wq = NULL; 989 990 /* 991 * An eager's conn_fanout will be NULL if it's a duplicate 992 * for an existing 4-tuples in the conn fanout table. 993 * We don't want to send an RST out in such case. 994 */ 995 if (econnp->conn_fanout != NULL && eager->tcp_state > TCPS_LISTEN) { 996 tcp_xmit_ctl("tcp_eager_kill, can't wait", 997 eager, eager->tcp_snxt, 0, TH_RST); 998 } 999 1000 /* We are here because listener wants this eager gone */ 1001 if (listener != NULL) { 1002 mutex_enter(&listener->tcp_eager_lock); 1003 tcp_eager_unlink(eager); 1004 if (eager->tcp_tconnind_started) { 1005 /* 1006 * The eager has sent a conn_ind up to the 1007 * listener but listener decides to close 1008 * instead. We need to drop the extra ref 1009 * placed on eager in tcp_input_data() before 1010 * sending the conn_ind to listener. 1011 */ 1012 CONN_DEC_REF(econnp); 1013 } 1014 mutex_exit(&listener->tcp_eager_lock); 1015 CONN_DEC_REF(listener->tcp_connp); 1016 } 1017 1018 if (eager->tcp_state != TCPS_CLOSED) 1019 tcp_close_detached(eager); 1020 } 1021 1022 /* 1023 * Reset any eager connection hanging off this listener marked 1024 * with 'seqnum' and then reclaim it's resources. 1025 */ 1026 boolean_t 1027 tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum) 1028 { 1029 tcp_t *eager; 1030 mblk_t *mp; 1031 1032 eager = listener; 1033 mutex_enter(&listener->tcp_eager_lock); 1034 do { 1035 eager = eager->tcp_eager_next_q; 1036 if (eager == NULL) { 1037 mutex_exit(&listener->tcp_eager_lock); 1038 return (B_FALSE); 1039 } 1040 } while (eager->tcp_conn_req_seqnum != seqnum); 1041 1042 if (eager->tcp_closemp_used) { 1043 mutex_exit(&listener->tcp_eager_lock); 1044 return (B_TRUE); 1045 } 1046 eager->tcp_closemp_used = B_TRUE; 1047 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1048 CONN_INC_REF(eager->tcp_connp); 1049 mutex_exit(&listener->tcp_eager_lock); 1050 mp = &eager->tcp_closemp; 1051 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, tcp_eager_kill, 1052 eager->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_EAGER_BLOWOFF); 1053 return (B_TRUE); 1054 } 1055 1056 /* 1057 * Reset any eager connection hanging off this listener 1058 * and then reclaim it's resources. 1059 */ 1060 void 1061 tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only) 1062 { 1063 tcp_t *eager; 1064 mblk_t *mp; 1065 tcp_stack_t *tcps = listener->tcp_tcps; 1066 1067 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 1068 1069 if (!q0_only) { 1070 /* First cleanup q */ 1071 TCP_STAT(tcps, tcp_eager_blowoff_q); 1072 eager = listener->tcp_eager_next_q; 1073 while (eager != NULL) { 1074 if (!eager->tcp_closemp_used) { 1075 eager->tcp_closemp_used = B_TRUE; 1076 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1077 CONN_INC_REF(eager->tcp_connp); 1078 mp = &eager->tcp_closemp; 1079 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 1080 tcp_eager_kill, eager->tcp_connp, NULL, 1081 SQ_FILL, SQTAG_TCP_EAGER_CLEANUP); 1082 } 1083 eager = eager->tcp_eager_next_q; 1084 } 1085 } 1086 /* Then cleanup q0 */ 1087 TCP_STAT(tcps, tcp_eager_blowoff_q0); 1088 eager = listener->tcp_eager_next_q0; 1089 while (eager != listener) { 1090 if (!eager->tcp_closemp_used) { 1091 eager->tcp_closemp_used = B_TRUE; 1092 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1093 CONN_INC_REF(eager->tcp_connp); 1094 mp = &eager->tcp_closemp; 1095 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 1096 tcp_eager_kill, eager->tcp_connp, NULL, SQ_FILL, 1097 SQTAG_TCP_EAGER_CLEANUP_Q0); 1098 } 1099 eager = eager->tcp_eager_next_q0; 1100 } 1101 } 1102 1103 /* 1104 * If we are an eager connection hanging off a listener that hasn't 1105 * formally accepted the connection yet, get off his list and blow off 1106 * any data that we have accumulated. 1107 */ 1108 void 1109 tcp_eager_unlink(tcp_t *tcp) 1110 { 1111 tcp_t *listener = tcp->tcp_listener; 1112 1113 ASSERT(listener != NULL); 1114 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 1115 if (tcp->tcp_eager_next_q0 != NULL) { 1116 ASSERT(tcp->tcp_eager_prev_q0 != NULL); 1117 1118 /* Remove the eager tcp from q0 */ 1119 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 1120 tcp->tcp_eager_prev_q0; 1121 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 1122 tcp->tcp_eager_next_q0; 1123 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 1124 listener->tcp_conn_req_cnt_q0--; 1125 1126 tcp->tcp_eager_next_q0 = NULL; 1127 tcp->tcp_eager_prev_q0 = NULL; 1128 1129 /* 1130 * Take the eager out, if it is in the list of droppable 1131 * eagers. 1132 */ 1133 MAKE_UNDROPPABLE(tcp); 1134 1135 if (tcp->tcp_syn_rcvd_timeout != 0) { 1136 /* we have timed out before */ 1137 ASSERT(listener->tcp_syn_rcvd_timeout > 0); 1138 listener->tcp_syn_rcvd_timeout--; 1139 } 1140 } else { 1141 tcp_t **tcpp = &listener->tcp_eager_next_q; 1142 tcp_t *prev = NULL; 1143 1144 for (; tcpp[0]; tcpp = &tcpp[0]->tcp_eager_next_q) { 1145 if (tcpp[0] == tcp) { 1146 if (listener->tcp_eager_last_q == tcp) { 1147 /* 1148 * If we are unlinking the last 1149 * element on the list, adjust 1150 * tail pointer. Set tail pointer 1151 * to nil when list is empty. 1152 */ 1153 ASSERT(tcp->tcp_eager_next_q == NULL); 1154 if (listener->tcp_eager_last_q == 1155 listener->tcp_eager_next_q) { 1156 listener->tcp_eager_last_q = 1157 NULL; 1158 } else { 1159 /* 1160 * We won't get here if there 1161 * is only one eager in the 1162 * list. 1163 */ 1164 ASSERT(prev != NULL); 1165 listener->tcp_eager_last_q = 1166 prev; 1167 } 1168 } 1169 tcpp[0] = tcp->tcp_eager_next_q; 1170 tcp->tcp_eager_next_q = NULL; 1171 tcp->tcp_eager_last_q = NULL; 1172 ASSERT(listener->tcp_conn_req_cnt_q > 0); 1173 listener->tcp_conn_req_cnt_q--; 1174 break; 1175 } 1176 prev = tcpp[0]; 1177 } 1178 } 1179 tcp->tcp_listener = NULL; 1180 } 1181 1182 /* BEGIN CSTYLED */ 1183 /* 1184 * 1185 * The sockfs ACCEPT path: 1186 * ======================= 1187 * 1188 * The eager is now established in its own perimeter as soon as SYN is 1189 * received in tcp_input_listener(). When sockfs receives conn_ind, it 1190 * completes the accept processing on the acceptor STREAM. The sending 1191 * of conn_ind part is common for both sockfs listener and a TLI/XTI 1192 * listener but a TLI/XTI listener completes the accept processing 1193 * on the listener perimeter. 1194 * 1195 * Common control flow for 3 way handshake: 1196 * ---------------------------------------- 1197 * 1198 * incoming SYN (listener perimeter) -> tcp_input_listener() 1199 * 1200 * incoming SYN-ACK-ACK (eager perim) -> tcp_input_data() 1201 * send T_CONN_IND (listener perim) -> tcp_send_conn_ind() 1202 * 1203 * Sockfs ACCEPT Path: 1204 * ------------------- 1205 * 1206 * open acceptor stream (tcp_open allocates tcp_tli_accept() 1207 * as STREAM entry point) 1208 * 1209 * soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_tli_accept() 1210 * 1211 * tcp_tli_accept() extracts the eager and makes the q->q_ptr <-> eager 1212 * association (we are not behind eager's squeue but sockfs is protecting us 1213 * and no one knows about this stream yet. The STREAMS entry point q->q_info 1214 * is changed to point at tcp_wput(). 1215 * 1216 * tcp_accept_common() sends any deferred eagers via tcp_send_pending() to 1217 * listener (done on listener's perimeter). 1218 * 1219 * tcp_tli_accept() calls tcp_accept_finish() on eagers perimeter to finish 1220 * accept. 1221 * 1222 * TLI/XTI client ACCEPT path: 1223 * --------------------------- 1224 * 1225 * soaccept() sends T_CONN_RES on the listener STREAM. 1226 * 1227 * tcp_tli_accept() -> tcp_accept_swap() complete the processing and send 1228 * a M_SETOPS mblk to eager perimeter to finish accept (tcp_accept_finish()). 1229 * 1230 * Locks: 1231 * ====== 1232 * 1233 * listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and 1234 * and listeners->tcp_eager_next_q. 1235 * 1236 * Referencing: 1237 * ============ 1238 * 1239 * 1) We start out in tcp_input_listener by eager placing a ref on 1240 * listener and listener adding eager to listeners->tcp_eager_next_q0. 1241 * 1242 * 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before 1243 * doing so we place a ref on the eager. This ref is finally dropped at the 1244 * end of tcp_accept_finish() while unwinding from the squeue, i.e. the 1245 * reference is dropped by the squeue framework. 1246 * 1247 * 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish 1248 * 1249 * The reference must be released by the same entity that added the reference 1250 * In the above scheme, the eager is the entity that adds and releases the 1251 * references. Note that tcp_accept_finish executes in the squeue of the eager 1252 * (albeit after it is attached to the acceptor stream). Though 1. executes 1253 * in the listener's squeue, the eager is nascent at this point and the 1254 * reference can be considered to have been added on behalf of the eager. 1255 * 1256 * Eager getting a Reset or listener closing: 1257 * ========================================== 1258 * 1259 * Once the listener and eager are linked, the listener never does the unlink. 1260 * If the listener needs to close, tcp_eager_cleanup() is called which queues 1261 * a message on all eager perimeter. The eager then does the unlink, clears 1262 * any pointers to the listener's queue and drops the reference to the 1263 * listener. The listener waits in tcp_close outside the squeue until its 1264 * refcount has dropped to 1. This ensures that the listener has waited for 1265 * all eagers to clear their association with the listener. 1266 * 1267 * Similarly, if eager decides to go away, it can unlink itself and close. 1268 * When the T_CONN_RES comes down, we check if eager has closed. Note that 1269 * the reference to eager is still valid because of the extra ref we put 1270 * in tcp_send_conn_ind. 1271 * 1272 * Listener can always locate the eager under the protection 1273 * of the listener->tcp_eager_lock, and then do a refhold 1274 * on the eager during the accept processing. 1275 * 1276 * The acceptor stream accesses the eager in the accept processing 1277 * based on the ref placed on eager before sending T_conn_ind. 1278 * The only entity that can negate this refhold is a listener close 1279 * which is mutually exclusive with an active acceptor stream. 1280 * 1281 * Eager's reference on the listener 1282 * =================================== 1283 * 1284 * If the accept happens (even on a closed eager) the eager drops its 1285 * reference on the listener at the start of tcp_accept_finish. If the 1286 * eager is killed due to an incoming RST before the T_conn_ind is sent up, 1287 * the reference is dropped in tcp_closei_local. If the listener closes, 1288 * the reference is dropped in tcp_eager_kill. In all cases the reference 1289 * is dropped while executing in the eager's context (squeue). 1290 */ 1291 /* END CSTYLED */ 1292 1293 /* Process the SYN packet, mp, directed at the listener 'tcp' */ 1294 1295 /* 1296 * THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN. 1297 * tcp_input_data will not see any packets for listeners since the listener 1298 * has conn_recv set to tcp_input_listener. 1299 */ 1300 /* ARGSUSED */ 1301 static void 1302 tcp_input_listener(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 1303 { 1304 tcpha_t *tcpha; 1305 uint32_t seg_seq; 1306 tcp_t *eager; 1307 int err; 1308 conn_t *econnp = NULL; 1309 squeue_t *new_sqp; 1310 mblk_t *mp1; 1311 uint_t ip_hdr_len; 1312 conn_t *lconnp = (conn_t *)arg; 1313 tcp_t *listener = lconnp->conn_tcp; 1314 tcp_stack_t *tcps = listener->tcp_tcps; 1315 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 1316 uint_t flags; 1317 mblk_t *tpi_mp; 1318 uint_t ifindex = ira->ira_ruifindex; 1319 boolean_t tlc_set = B_FALSE; 1320 1321 ip_hdr_len = ira->ira_ip_hdr_length; 1322 tcpha = (tcpha_t *)&mp->b_rptr[ip_hdr_len]; 1323 flags = (unsigned int)tcpha->tha_flags & 0xFF; 1324 1325 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, lconnp->conn_ixa, 1326 __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, listener, 1327 __dtrace_tcp_tcph_t *, tcpha); 1328 1329 if (!(flags & TH_SYN)) { 1330 if ((flags & TH_RST) || (flags & TH_URG)) { 1331 freemsg(mp); 1332 return; 1333 } 1334 if (flags & TH_ACK) { 1335 /* Note this executes in listener's squeue */ 1336 tcp_xmit_listeners_reset(mp, ira, ipst, lconnp); 1337 return; 1338 } 1339 1340 freemsg(mp); 1341 return; 1342 } 1343 1344 if (listener->tcp_state != TCPS_LISTEN) 1345 goto error2; 1346 1347 ASSERT(IPCL_IS_BOUND(lconnp)); 1348 1349 mutex_enter(&listener->tcp_eager_lock); 1350 1351 /* 1352 * The system is under memory pressure, so we need to do our part 1353 * to relieve the pressure. So we only accept new request if there 1354 * is nothing waiting to be accepted or waiting to complete the 3-way 1355 * handshake. This means that busy listener will not get too many 1356 * new requests which they cannot handle in time while non-busy 1357 * listener is still functioning properly. 1358 */ 1359 if (tcps->tcps_reclaim && (listener->tcp_conn_req_cnt_q > 0 || 1360 listener->tcp_conn_req_cnt_q0 > 0)) { 1361 mutex_exit(&listener->tcp_eager_lock); 1362 TCP_STAT(tcps, tcp_listen_mem_drop); 1363 goto error2; 1364 } 1365 1366 if (listener->tcp_conn_req_cnt_q >= listener->tcp_conn_req_max) { 1367 mutex_exit(&listener->tcp_eager_lock); 1368 TCP_STAT(tcps, tcp_listendrop); 1369 TCPS_BUMP_MIB(tcps, tcpListenDrop); 1370 if (lconnp->conn_debug) { 1371 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 1372 "tcp_input_listener: listen backlog (max=%d) " 1373 "overflow (%d pending) on %s", 1374 listener->tcp_conn_req_max, 1375 listener->tcp_conn_req_cnt_q, 1376 tcp_display(listener, NULL, DISP_PORT_ONLY)); 1377 } 1378 goto error2; 1379 } 1380 1381 if (listener->tcp_conn_req_cnt_q0 >= 1382 listener->tcp_conn_req_max + tcps->tcps_conn_req_max_q0) { 1383 /* 1384 * Q0 is full. Drop a pending half-open req from the queue 1385 * to make room for the new SYN req. Also mark the time we 1386 * drop a SYN. 1387 * 1388 * A more aggressive defense against SYN attack will 1389 * be to set the "tcp_syn_defense" flag now. 1390 */ 1391 TCP_STAT(tcps, tcp_listendropq0); 1392 listener->tcp_last_rcv_lbolt = ddi_get_lbolt64(); 1393 if (!tcp_drop_q0(listener)) { 1394 mutex_exit(&listener->tcp_eager_lock); 1395 TCPS_BUMP_MIB(tcps, tcpListenDropQ0); 1396 if (lconnp->conn_debug) { 1397 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 1398 "tcp_input_listener: listen half-open " 1399 "queue (max=%d) full (%d pending) on %s", 1400 tcps->tcps_conn_req_max_q0, 1401 listener->tcp_conn_req_cnt_q0, 1402 tcp_display(listener, NULL, 1403 DISP_PORT_ONLY)); 1404 } 1405 goto error2; 1406 } 1407 } 1408 1409 /* 1410 * Enforce the limit set on the number of connections per listener. 1411 * Note that tlc_cnt starts with 1. So need to add 1 to tlc_max 1412 * for comparison. 1413 */ 1414 if (listener->tcp_listen_cnt != NULL) { 1415 tcp_listen_cnt_t *tlc = listener->tcp_listen_cnt; 1416 int64_t now; 1417 1418 if (atomic_add_32_nv(&tlc->tlc_cnt, 1) > tlc->tlc_max + 1) { 1419 mutex_exit(&listener->tcp_eager_lock); 1420 now = ddi_get_lbolt64(); 1421 atomic_add_32(&tlc->tlc_cnt, -1); 1422 TCP_STAT(tcps, tcp_listen_cnt_drop); 1423 tlc->tlc_drop++; 1424 if (now - tlc->tlc_report_time > 1425 MSEC_TO_TICK(TCP_TLC_REPORT_INTERVAL)) { 1426 zcmn_err(lconnp->conn_zoneid, CE_WARN, 1427 "Listener (port %d) connection max (%u) " 1428 "reached: %u attempts dropped total\n", 1429 ntohs(listener->tcp_connp->conn_lport), 1430 tlc->tlc_max, tlc->tlc_drop); 1431 tlc->tlc_report_time = now; 1432 } 1433 goto error2; 1434 } 1435 tlc_set = B_TRUE; 1436 } 1437 1438 mutex_exit(&listener->tcp_eager_lock); 1439 1440 /* 1441 * IP sets ira_sqp to either the senders conn_sqp (for loopback) 1442 * or based on the ring (for packets from GLD). Otherwise it is 1443 * set based on lbolt i.e., a somewhat random number. 1444 */ 1445 ASSERT(ira->ira_sqp != NULL); 1446 new_sqp = ira->ira_sqp; 1447 1448 econnp = (conn_t *)tcp_get_conn(arg2, tcps); 1449 if (econnp == NULL) 1450 goto error2; 1451 1452 ASSERT(econnp->conn_netstack == lconnp->conn_netstack); 1453 econnp->conn_sqp = new_sqp; 1454 econnp->conn_initial_sqp = new_sqp; 1455 econnp->conn_ixa->ixa_sqp = new_sqp; 1456 1457 econnp->conn_fport = tcpha->tha_lport; 1458 econnp->conn_lport = tcpha->tha_fport; 1459 1460 err = conn_inherit_parent(lconnp, econnp); 1461 if (err != 0) 1462 goto error3; 1463 1464 /* We already know the laddr of the new connection is ours */ 1465 econnp->conn_ixa->ixa_src_generation = ipst->ips_src_generation; 1466 1467 ASSERT(OK_32PTR(mp->b_rptr)); 1468 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION || 1469 IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION); 1470 1471 if (lconnp->conn_family == AF_INET) { 1472 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION); 1473 tpi_mp = tcp_conn_create_v4(lconnp, econnp, mp, ira); 1474 } else { 1475 tpi_mp = tcp_conn_create_v6(lconnp, econnp, mp, ira); 1476 } 1477 1478 if (tpi_mp == NULL) 1479 goto error3; 1480 1481 eager = econnp->conn_tcp; 1482 eager->tcp_detached = B_TRUE; 1483 SOCK_CONNID_INIT(eager->tcp_connid); 1484 1485 /* 1486 * Initialize the eager's tcp_t and inherit some parameters from 1487 * the listener. 1488 */ 1489 tcp_init_values(eager, listener); 1490 1491 ASSERT((econnp->conn_ixa->ixa_flags & 1492 (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE | 1493 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO)) == 1494 (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE | 1495 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO)); 1496 1497 if (!tcps->tcps_dev_flow_ctl) 1498 econnp->conn_ixa->ixa_flags |= IXAF_NO_DEV_FLOW_CTL; 1499 1500 /* Prepare for diffing against previous packets */ 1501 eager->tcp_recvifindex = 0; 1502 eager->tcp_recvhops = 0xffffffffU; 1503 1504 if (!(ira->ira_flags & IRAF_IS_IPV4) && econnp->conn_bound_if == 0) { 1505 if (IN6_IS_ADDR_LINKSCOPE(&econnp->conn_faddr_v6) || 1506 IN6_IS_ADDR_LINKSCOPE(&econnp->conn_laddr_v6)) { 1507 econnp->conn_incoming_ifindex = ifindex; 1508 econnp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET; 1509 econnp->conn_ixa->ixa_scopeid = ifindex; 1510 } 1511 } 1512 1513 if ((ira->ira_flags & (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS)) == 1514 (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS) && 1515 tcps->tcps_rev_src_routes) { 1516 ipha_t *ipha = (ipha_t *)mp->b_rptr; 1517 ip_pkt_t *ipp = &econnp->conn_xmit_ipp; 1518 1519 /* Source routing option copyover (reverse it) */ 1520 err = ip_find_hdr_v4(ipha, ipp, B_TRUE); 1521 if (err != 0) { 1522 freemsg(tpi_mp); 1523 goto error3; 1524 } 1525 ip_pkt_source_route_reverse_v4(ipp); 1526 } 1527 1528 ASSERT(eager->tcp_conn.tcp_eager_conn_ind == NULL); 1529 ASSERT(!eager->tcp_tconnind_started); 1530 /* 1531 * If the SYN came with a credential, it's a loopback packet or a 1532 * labeled packet; attach the credential to the TPI message. 1533 */ 1534 if (ira->ira_cred != NULL) 1535 mblk_setcred(tpi_mp, ira->ira_cred, ira->ira_cpid); 1536 1537 eager->tcp_conn.tcp_eager_conn_ind = tpi_mp; 1538 1539 /* Inherit the listener's SSL protection state */ 1540 if ((eager->tcp_kssl_ent = listener->tcp_kssl_ent) != NULL) { 1541 kssl_hold_ent(eager->tcp_kssl_ent); 1542 eager->tcp_kssl_pending = B_TRUE; 1543 } 1544 1545 /* Inherit the listener's non-STREAMS flag */ 1546 if (IPCL_IS_NONSTR(lconnp)) { 1547 econnp->conn_flags |= IPCL_NONSTR; 1548 } 1549 1550 ASSERT(eager->tcp_ordrel_mp == NULL); 1551 1552 if (!IPCL_IS_NONSTR(econnp)) { 1553 /* 1554 * Pre-allocate the T_ordrel_ind mblk for TPI socket so that 1555 * at close time, we will always have that to send up. 1556 * Otherwise, we need to do special handling in case the 1557 * allocation fails at that time. 1558 */ 1559 if ((eager->tcp_ordrel_mp = mi_tpi_ordrel_ind()) == NULL) 1560 goto error3; 1561 } 1562 /* 1563 * Now that the IP addresses and ports are setup in econnp we 1564 * can do the IPsec policy work. 1565 */ 1566 if (ira->ira_flags & IRAF_IPSEC_SECURE) { 1567 if (lconnp->conn_policy != NULL) { 1568 /* 1569 * Inherit the policy from the listener; use 1570 * actions from ira 1571 */ 1572 if (!ip_ipsec_policy_inherit(econnp, lconnp, ira)) { 1573 CONN_DEC_REF(econnp); 1574 freemsg(mp); 1575 goto error3; 1576 } 1577 } 1578 } 1579 1580 /* 1581 * tcp_set_destination() may set tcp_rwnd according to the route 1582 * metrics. If it does not, the eager's receive window will be set 1583 * to the listener's receive window later in this function. 1584 */ 1585 eager->tcp_rwnd = 0; 1586 1587 if (is_system_labeled()) { 1588 ip_xmit_attr_t *ixa = econnp->conn_ixa; 1589 1590 ASSERT(ira->ira_tsl != NULL); 1591 /* Discard any old label */ 1592 if (ixa->ixa_free_flags & IXA_FREE_TSL) { 1593 ASSERT(ixa->ixa_tsl != NULL); 1594 label_rele(ixa->ixa_tsl); 1595 ixa->ixa_free_flags &= ~IXA_FREE_TSL; 1596 ixa->ixa_tsl = NULL; 1597 } 1598 if ((lconnp->conn_mlp_type != mlptSingle || 1599 lconnp->conn_mac_mode != CONN_MAC_DEFAULT) && 1600 ira->ira_tsl != NULL) { 1601 /* 1602 * If this is an MLP connection or a MAC-Exempt 1603 * connection with an unlabeled node, packets are to be 1604 * exchanged using the security label of the received 1605 * SYN packet instead of the server application's label. 1606 * tsol_check_dest called from ip_set_destination 1607 * might later update TSF_UNLABELED by replacing 1608 * ixa_tsl with a new label. 1609 */ 1610 label_hold(ira->ira_tsl); 1611 ip_xmit_attr_replace_tsl(ixa, ira->ira_tsl); 1612 DTRACE_PROBE2(mlp_syn_accept, conn_t *, 1613 econnp, ts_label_t *, ixa->ixa_tsl) 1614 } else { 1615 ixa->ixa_tsl = crgetlabel(econnp->conn_cred); 1616 DTRACE_PROBE2(syn_accept, conn_t *, 1617 econnp, ts_label_t *, ixa->ixa_tsl) 1618 } 1619 /* 1620 * conn_connect() called from tcp_set_destination will verify 1621 * the destination is allowed to receive packets at the 1622 * security label of the SYN-ACK we are generating. As part of 1623 * that, tsol_check_dest() may create a new effective label for 1624 * this connection. 1625 * Finally conn_connect() will call conn_update_label. 1626 * All that remains for TCP to do is to call 1627 * conn_build_hdr_template which is done as part of 1628 * tcp_set_destination. 1629 */ 1630 } 1631 1632 /* 1633 * Since we will clear tcp_listener before we clear tcp_detached 1634 * in the accept code we need tcp_hard_binding aka tcp_accept_inprogress 1635 * so we can tell a TCP_DETACHED_NONEAGER apart. 1636 */ 1637 eager->tcp_hard_binding = B_TRUE; 1638 1639 tcp_bind_hash_insert(&tcps->tcps_bind_fanout[ 1640 TCP_BIND_HASH(econnp->conn_lport)], eager, 0); 1641 1642 CL_INET_CONNECT(econnp, B_FALSE, err); 1643 if (err != 0) { 1644 tcp_bind_hash_remove(eager); 1645 goto error3; 1646 } 1647 1648 SOCK_CONNID_BUMP(eager->tcp_connid); 1649 1650 /* 1651 * Adapt our mss, ttl, ... based on the remote address. 1652 */ 1653 1654 if (tcp_set_destination(eager) != 0) { 1655 TCPS_BUMP_MIB(tcps, tcpAttemptFails); 1656 /* Undo the bind_hash_insert */ 1657 tcp_bind_hash_remove(eager); 1658 goto error3; 1659 } 1660 1661 /* Process all TCP options. */ 1662 tcp_process_options(eager, tcpha); 1663 1664 /* Is the other end ECN capable? */ 1665 if (tcps->tcps_ecn_permitted >= 1 && 1666 (tcpha->tha_flags & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) { 1667 eager->tcp_ecn_ok = B_TRUE; 1668 } 1669 1670 /* 1671 * The listener's conn_rcvbuf should be the default window size or a 1672 * window size changed via SO_RCVBUF option. First round up the 1673 * eager's tcp_rwnd to the nearest MSS. Then find out the window 1674 * scale option value if needed. Call tcp_rwnd_set() to finish the 1675 * setting. 1676 * 1677 * Note if there is a rpipe metric associated with the remote host, 1678 * we should not inherit receive window size from listener. 1679 */ 1680 eager->tcp_rwnd = MSS_ROUNDUP( 1681 (eager->tcp_rwnd == 0 ? econnp->conn_rcvbuf : 1682 eager->tcp_rwnd), eager->tcp_mss); 1683 if (eager->tcp_snd_ws_ok) 1684 tcp_set_ws_value(eager); 1685 /* 1686 * Note that this is the only place tcp_rwnd_set() is called for 1687 * accepting a connection. We need to call it here instead of 1688 * after the 3-way handshake because we need to tell the other 1689 * side our rwnd in the SYN-ACK segment. 1690 */ 1691 (void) tcp_rwnd_set(eager, eager->tcp_rwnd); 1692 1693 ASSERT(eager->tcp_connp->conn_rcvbuf != 0 && 1694 eager->tcp_connp->conn_rcvbuf == eager->tcp_rwnd); 1695 1696 ASSERT(econnp->conn_rcvbuf != 0 && 1697 econnp->conn_rcvbuf == eager->tcp_rwnd); 1698 1699 /* Put a ref on the listener for the eager. */ 1700 CONN_INC_REF(lconnp); 1701 mutex_enter(&listener->tcp_eager_lock); 1702 listener->tcp_eager_next_q0->tcp_eager_prev_q0 = eager; 1703 eager->tcp_eager_next_q0 = listener->tcp_eager_next_q0; 1704 listener->tcp_eager_next_q0 = eager; 1705 eager->tcp_eager_prev_q0 = listener; 1706 1707 /* Set tcp_listener before adding it to tcp_conn_fanout */ 1708 eager->tcp_listener = listener; 1709 eager->tcp_saved_listener = listener; 1710 1711 /* 1712 * Set tcp_listen_cnt so that when the connection is done, the counter 1713 * is decremented. 1714 */ 1715 eager->tcp_listen_cnt = listener->tcp_listen_cnt; 1716 1717 /* 1718 * Tag this detached tcp vector for later retrieval 1719 * by our listener client in tcp_accept(). 1720 */ 1721 eager->tcp_conn_req_seqnum = listener->tcp_conn_req_seqnum; 1722 listener->tcp_conn_req_cnt_q0++; 1723 if (++listener->tcp_conn_req_seqnum == -1) { 1724 /* 1725 * -1 is "special" and defined in TPI as something 1726 * that should never be used in T_CONN_IND 1727 */ 1728 ++listener->tcp_conn_req_seqnum; 1729 } 1730 mutex_exit(&listener->tcp_eager_lock); 1731 1732 if (listener->tcp_syn_defense) { 1733 /* Don't drop the SYN that comes from a good IP source */ 1734 ipaddr_t *addr_cache; 1735 1736 addr_cache = (ipaddr_t *)(listener->tcp_ip_addr_cache); 1737 if (addr_cache != NULL && econnp->conn_faddr_v4 == 1738 addr_cache[IP_ADDR_CACHE_HASH(econnp->conn_faddr_v4)]) { 1739 eager->tcp_dontdrop = B_TRUE; 1740 } 1741 } 1742 1743 /* 1744 * We need to insert the eager in its own perimeter but as soon 1745 * as we do that, we expose the eager to the classifier and 1746 * should not touch any field outside the eager's perimeter. 1747 * So do all the work necessary before inserting the eager 1748 * in its own perimeter. Be optimistic that conn_connect() 1749 * will succeed but undo everything if it fails. 1750 */ 1751 seg_seq = ntohl(tcpha->tha_seq); 1752 eager->tcp_irs = seg_seq; 1753 eager->tcp_rack = seg_seq; 1754 eager->tcp_rnxt = seg_seq + 1; 1755 eager->tcp_tcpha->tha_ack = htonl(eager->tcp_rnxt); 1756 TCPS_BUMP_MIB(tcps, tcpPassiveOpens); 1757 eager->tcp_state = TCPS_SYN_RCVD; 1758 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 1759 econnp->conn_ixa, void, NULL, tcp_t *, eager, void, NULL, 1760 int32_t, TCPS_LISTEN); 1761 1762 mp1 = tcp_xmit_mp(eager, eager->tcp_xmit_head, eager->tcp_mss, 1763 NULL, NULL, eager->tcp_iss, B_FALSE, NULL, B_FALSE); 1764 if (mp1 == NULL) { 1765 /* 1766 * Increment the ref count as we are going to 1767 * enqueueing an mp in squeue 1768 */ 1769 CONN_INC_REF(econnp); 1770 goto error; 1771 } 1772 1773 /* 1774 * We need to start the rto timer. In normal case, we start 1775 * the timer after sending the packet on the wire (or at 1776 * least believing that packet was sent by waiting for 1777 * conn_ip_output() to return). Since this is the first packet 1778 * being sent on the wire for the eager, our initial tcp_rto 1779 * is at least tcp_rexmit_interval_min which is a fairly 1780 * large value to allow the algorithm to adjust slowly to large 1781 * fluctuations of RTT during first few transmissions. 1782 * 1783 * Starting the timer first and then sending the packet in this 1784 * case shouldn't make much difference since tcp_rexmit_interval_min 1785 * is of the order of several 100ms and starting the timer 1786 * first and then sending the packet will result in difference 1787 * of few micro seconds. 1788 * 1789 * Without this optimization, we are forced to hold the fanout 1790 * lock across the ipcl_bind_insert() and sending the packet 1791 * so that we don't race against an incoming packet (maybe RST) 1792 * for this eager. 1793 * 1794 * It is necessary to acquire an extra reference on the eager 1795 * at this point and hold it until after tcp_send_data() to 1796 * ensure against an eager close race. 1797 */ 1798 1799 CONN_INC_REF(econnp); 1800 1801 TCP_TIMER_RESTART(eager, eager->tcp_rto); 1802 1803 /* 1804 * Insert the eager in its own perimeter now. We are ready to deal 1805 * with any packets on eager. 1806 */ 1807 if (ipcl_conn_insert(econnp) != 0) 1808 goto error; 1809 1810 ASSERT(econnp->conn_ixa->ixa_notify_cookie == econnp->conn_tcp); 1811 freemsg(mp); 1812 /* 1813 * Send the SYN-ACK. Use the right squeue so that conn_ixa is 1814 * only used by one thread at a time. 1815 */ 1816 if (econnp->conn_sqp == lconnp->conn_sqp) { 1817 DTRACE_TCP5(send, mblk_t *, NULL, ip_xmit_attr_t *, 1818 econnp->conn_ixa, __dtrace_tcp_void_ip_t *, mp1->b_rptr, 1819 tcp_t *, eager, __dtrace_tcp_tcph_t *, 1820 &mp1->b_rptr[econnp->conn_ixa->ixa_ip_hdr_length]); 1821 (void) conn_ip_output(mp1, econnp->conn_ixa); 1822 CONN_DEC_REF(econnp); 1823 } else { 1824 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_send_synack, 1825 econnp, NULL, SQ_PROCESS, SQTAG_TCP_SEND_SYNACK); 1826 } 1827 return; 1828 error: 1829 freemsg(mp1); 1830 eager->tcp_closemp_used = B_TRUE; 1831 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1832 mp1 = &eager->tcp_closemp; 1833 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_eager_kill, 1834 econnp, NULL, SQ_FILL, SQTAG_TCP_CONN_REQ_2); 1835 1836 /* 1837 * If a connection already exists, send the mp to that connections so 1838 * that it can be appropriately dealt with. 1839 */ 1840 ipst = tcps->tcps_netstack->netstack_ip; 1841 1842 if ((econnp = ipcl_classify(mp, ira, ipst)) != NULL) { 1843 if (!IPCL_IS_CONNECTED(econnp)) { 1844 /* 1845 * Something bad happened. ipcl_conn_insert() 1846 * failed because a connection already existed 1847 * in connected hash but we can't find it 1848 * anymore (someone blew it away). Just 1849 * free this message and hopefully remote 1850 * will retransmit at which time the SYN can be 1851 * treated as a new connection or dealth with 1852 * a TH_RST if a connection already exists. 1853 */ 1854 CONN_DEC_REF(econnp); 1855 freemsg(mp); 1856 } else { 1857 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp, tcp_input_data, 1858 econnp, ira, SQ_FILL, SQTAG_TCP_CONN_REQ_1); 1859 } 1860 } else { 1861 /* Nobody wants this packet */ 1862 freemsg(mp); 1863 } 1864 return; 1865 error3: 1866 CONN_DEC_REF(econnp); 1867 error2: 1868 freemsg(mp); 1869 if (tlc_set) 1870 atomic_add_32(&listener->tcp_listen_cnt->tlc_cnt, -1); 1871 } 1872 1873 /* 1874 * In an ideal case of vertical partition in NUMA architecture, its 1875 * beneficial to have the listener and all the incoming connections 1876 * tied to the same squeue. The other constraint is that incoming 1877 * connections should be tied to the squeue attached to interrupted 1878 * CPU for obvious locality reason so this leaves the listener to 1879 * be tied to the same squeue. Our only problem is that when listener 1880 * is binding, the CPU that will get interrupted by the NIC whose 1881 * IP address the listener is binding to is not even known. So 1882 * the code below allows us to change that binding at the time the 1883 * CPU is interrupted by virtue of incoming connection's squeue. 1884 * 1885 * This is usefull only in case of a listener bound to a specific IP 1886 * address. For other kind of listeners, they get bound the 1887 * very first time and there is no attempt to rebind them. 1888 */ 1889 void 1890 tcp_input_listener_unbound(void *arg, mblk_t *mp, void *arg2, 1891 ip_recv_attr_t *ira) 1892 { 1893 conn_t *connp = (conn_t *)arg; 1894 squeue_t *sqp = (squeue_t *)arg2; 1895 squeue_t *new_sqp; 1896 uint32_t conn_flags; 1897 1898 /* 1899 * IP sets ira_sqp to either the senders conn_sqp (for loopback) 1900 * or based on the ring (for packets from GLD). Otherwise it is 1901 * set based on lbolt i.e., a somewhat random number. 1902 */ 1903 ASSERT(ira->ira_sqp != NULL); 1904 new_sqp = ira->ira_sqp; 1905 1906 if (connp->conn_fanout == NULL) 1907 goto done; 1908 1909 if (!(connp->conn_flags & IPCL_FULLY_BOUND)) { 1910 mutex_enter(&connp->conn_fanout->connf_lock); 1911 mutex_enter(&connp->conn_lock); 1912 /* 1913 * No one from read or write side can access us now 1914 * except for already queued packets on this squeue. 1915 * But since we haven't changed the squeue yet, they 1916 * can't execute. If they are processed after we have 1917 * changed the squeue, they are sent back to the 1918 * correct squeue down below. 1919 * But a listner close can race with processing of 1920 * incoming SYN. If incoming SYN processing changes 1921 * the squeue then the listener close which is waiting 1922 * to enter the squeue would operate on the wrong 1923 * squeue. Hence we don't change the squeue here unless 1924 * the refcount is exactly the minimum refcount. The 1925 * minimum refcount of 4 is counted as - 1 each for 1926 * TCP and IP, 1 for being in the classifier hash, and 1927 * 1 for the mblk being processed. 1928 */ 1929 1930 if (connp->conn_ref != 4 || 1931 connp->conn_tcp->tcp_state != TCPS_LISTEN) { 1932 mutex_exit(&connp->conn_lock); 1933 mutex_exit(&connp->conn_fanout->connf_lock); 1934 goto done; 1935 } 1936 if (connp->conn_sqp != new_sqp) { 1937 while (connp->conn_sqp != new_sqp) 1938 (void) casptr(&connp->conn_sqp, sqp, new_sqp); 1939 /* No special MT issues for outbound ixa_sqp hint */ 1940 connp->conn_ixa->ixa_sqp = new_sqp; 1941 } 1942 1943 do { 1944 conn_flags = connp->conn_flags; 1945 conn_flags |= IPCL_FULLY_BOUND; 1946 (void) cas32(&connp->conn_flags, connp->conn_flags, 1947 conn_flags); 1948 } while (!(connp->conn_flags & IPCL_FULLY_BOUND)); 1949 1950 mutex_exit(&connp->conn_fanout->connf_lock); 1951 mutex_exit(&connp->conn_lock); 1952 1953 /* 1954 * Assume we have picked a good squeue for the listener. Make 1955 * subsequent SYNs not try to change the squeue. 1956 */ 1957 connp->conn_recv = tcp_input_listener; 1958 } 1959 1960 done: 1961 if (connp->conn_sqp != sqp) { 1962 CONN_INC_REF(connp); 1963 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, connp->conn_recv, connp, 1964 ira, SQ_FILL, SQTAG_TCP_CONN_REQ_UNBOUND); 1965 } else { 1966 tcp_input_listener(connp, mp, sqp, ira); 1967 } 1968 } 1969 1970 /* 1971 * Send up all messages queued on tcp_rcv_list. 1972 */ 1973 uint_t 1974 tcp_rcv_drain(tcp_t *tcp) 1975 { 1976 mblk_t *mp; 1977 uint_t ret = 0; 1978 #ifdef DEBUG 1979 uint_t cnt = 0; 1980 #endif 1981 queue_t *q = tcp->tcp_connp->conn_rq; 1982 1983 /* Can't drain on an eager connection */ 1984 if (tcp->tcp_listener != NULL) 1985 return (ret); 1986 1987 /* Can't be a non-STREAMS connection */ 1988 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp)); 1989 1990 /* No need for the push timer now. */ 1991 if (tcp->tcp_push_tid != 0) { 1992 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 1993 tcp->tcp_push_tid = 0; 1994 } 1995 1996 /* 1997 * Handle two cases here: we are currently fused or we were 1998 * previously fused and have some urgent data to be delivered 1999 * upstream. The latter happens because we either ran out of 2000 * memory or were detached and therefore sending the SIGURG was 2001 * deferred until this point. In either case we pass control 2002 * over to tcp_fuse_rcv_drain() since it may need to complete 2003 * some work. 2004 */ 2005 if ((tcp->tcp_fused || tcp->tcp_fused_sigurg)) { 2006 ASSERT(IPCL_IS_NONSTR(tcp->tcp_connp) || 2007 tcp->tcp_fused_sigurg_mp != NULL); 2008 if (tcp_fuse_rcv_drain(q, tcp, tcp->tcp_fused ? NULL : 2009 &tcp->tcp_fused_sigurg_mp)) 2010 return (ret); 2011 } 2012 2013 while ((mp = tcp->tcp_rcv_list) != NULL) { 2014 tcp->tcp_rcv_list = mp->b_next; 2015 mp->b_next = NULL; 2016 #ifdef DEBUG 2017 cnt += msgdsize(mp); 2018 #endif 2019 /* Does this need SSL processing first? */ 2020 if ((tcp->tcp_kssl_ctx != NULL) && (DB_TYPE(mp) == M_DATA)) { 2021 DTRACE_PROBE1(kssl_mblk__ksslinput_rcvdrain, 2022 mblk_t *, mp); 2023 tcp_kssl_input(tcp, mp, NULL); 2024 continue; 2025 } 2026 putnext(q, mp); 2027 } 2028 #ifdef DEBUG 2029 ASSERT(cnt == tcp->tcp_rcv_cnt); 2030 #endif 2031 tcp->tcp_rcv_last_head = NULL; 2032 tcp->tcp_rcv_last_tail = NULL; 2033 tcp->tcp_rcv_cnt = 0; 2034 2035 if (canputnext(q)) 2036 return (tcp_rwnd_reopen(tcp)); 2037 2038 return (ret); 2039 } 2040 2041 /* 2042 * Queue data on tcp_rcv_list which is a b_next chain. 2043 * tcp_rcv_last_head/tail is the last element of this chain. 2044 * Each element of the chain is a b_cont chain. 2045 * 2046 * M_DATA messages are added to the current element. 2047 * Other messages are added as new (b_next) elements. 2048 */ 2049 void 2050 tcp_rcv_enqueue(tcp_t *tcp, mblk_t *mp, uint_t seg_len, cred_t *cr) 2051 { 2052 ASSERT(seg_len == msgdsize(mp)); 2053 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_rcv_last_head != NULL); 2054 2055 if (is_system_labeled()) { 2056 ASSERT(cr != NULL || msg_getcred(mp, NULL) != NULL); 2057 /* 2058 * Provide for protocols above TCP such as RPC. NOPID leaves 2059 * db_cpid unchanged. 2060 * The cred could have already been set. 2061 */ 2062 if (cr != NULL) 2063 mblk_setcred(mp, cr, NOPID); 2064 } 2065 2066 if (tcp->tcp_rcv_list == NULL) { 2067 ASSERT(tcp->tcp_rcv_last_head == NULL); 2068 tcp->tcp_rcv_list = mp; 2069 tcp->tcp_rcv_last_head = mp; 2070 } else if (DB_TYPE(mp) == DB_TYPE(tcp->tcp_rcv_last_head)) { 2071 tcp->tcp_rcv_last_tail->b_cont = mp; 2072 } else { 2073 tcp->tcp_rcv_last_head->b_next = mp; 2074 tcp->tcp_rcv_last_head = mp; 2075 } 2076 2077 while (mp->b_cont) 2078 mp = mp->b_cont; 2079 2080 tcp->tcp_rcv_last_tail = mp; 2081 tcp->tcp_rcv_cnt += seg_len; 2082 tcp->tcp_rwnd -= seg_len; 2083 } 2084 2085 /* Generate an ACK-only (no data) segment for a TCP endpoint */ 2086 mblk_t * 2087 tcp_ack_mp(tcp_t *tcp) 2088 { 2089 uint32_t seq_no; 2090 tcp_stack_t *tcps = tcp->tcp_tcps; 2091 conn_t *connp = tcp->tcp_connp; 2092 2093 /* 2094 * There are a few cases to be considered while setting the sequence no. 2095 * Essentially, we can come here while processing an unacceptable pkt 2096 * in the TCPS_SYN_RCVD state, in which case we set the sequence number 2097 * to snxt (per RFC 793), note the swnd wouldn't have been set yet. 2098 * If we are here for a zero window probe, stick with suna. In all 2099 * other cases, we check if suna + swnd encompasses snxt and set 2100 * the sequence number to snxt, if so. If snxt falls outside the 2101 * window (the receiver probably shrunk its window), we will go with 2102 * suna + swnd, otherwise the sequence no will be unacceptable to the 2103 * receiver. 2104 */ 2105 if (tcp->tcp_zero_win_probe) { 2106 seq_no = tcp->tcp_suna; 2107 } else if (tcp->tcp_state == TCPS_SYN_RCVD) { 2108 ASSERT(tcp->tcp_swnd == 0); 2109 seq_no = tcp->tcp_snxt; 2110 } else { 2111 seq_no = SEQ_GT(tcp->tcp_snxt, 2112 (tcp->tcp_suna + tcp->tcp_swnd)) ? 2113 (tcp->tcp_suna + tcp->tcp_swnd) : tcp->tcp_snxt; 2114 } 2115 2116 if (tcp->tcp_valid_bits) { 2117 /* 2118 * For the complex case where we have to send some 2119 * controls (FIN or SYN), let tcp_xmit_mp do it. 2120 */ 2121 return (tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, seq_no, B_FALSE, 2122 NULL, B_FALSE)); 2123 } else { 2124 /* Generate a simple ACK */ 2125 int data_length; 2126 uchar_t *rptr; 2127 tcpha_t *tcpha; 2128 mblk_t *mp1; 2129 int32_t total_hdr_len; 2130 int32_t tcp_hdr_len; 2131 int32_t num_sack_blk = 0; 2132 int32_t sack_opt_len; 2133 ip_xmit_attr_t *ixa = connp->conn_ixa; 2134 2135 /* 2136 * Allocate space for TCP + IP headers 2137 * and link-level header 2138 */ 2139 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 2140 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 2141 tcp->tcp_num_sack_blk); 2142 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 2143 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 2144 total_hdr_len = connp->conn_ht_iphc_len + sack_opt_len; 2145 tcp_hdr_len = connp->conn_ht_ulp_len + sack_opt_len; 2146 } else { 2147 total_hdr_len = connp->conn_ht_iphc_len; 2148 tcp_hdr_len = connp->conn_ht_ulp_len; 2149 } 2150 mp1 = allocb(total_hdr_len + tcps->tcps_wroff_xtra, BPRI_MED); 2151 if (!mp1) 2152 return (NULL); 2153 2154 /* Update the latest receive window size in TCP header. */ 2155 tcp->tcp_tcpha->tha_win = 2156 htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws); 2157 /* copy in prototype TCP + IP header */ 2158 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra; 2159 mp1->b_rptr = rptr; 2160 mp1->b_wptr = rptr + total_hdr_len; 2161 bcopy(connp->conn_ht_iphc, rptr, connp->conn_ht_iphc_len); 2162 2163 tcpha = (tcpha_t *)&rptr[ixa->ixa_ip_hdr_length]; 2164 2165 /* Set the TCP sequence number. */ 2166 tcpha->tha_seq = htonl(seq_no); 2167 2168 /* Set up the TCP flag field. */ 2169 tcpha->tha_flags = (uchar_t)TH_ACK; 2170 if (tcp->tcp_ecn_echo_on) 2171 tcpha->tha_flags |= TH_ECE; 2172 2173 tcp->tcp_rack = tcp->tcp_rnxt; 2174 tcp->tcp_rack_cnt = 0; 2175 2176 /* fill in timestamp option if in use */ 2177 if (tcp->tcp_snd_ts_ok) { 2178 uint32_t llbolt = (uint32_t)LBOLT_FASTPATH; 2179 2180 U32_TO_BE32(llbolt, 2181 (char *)tcpha + TCP_MIN_HEADER_LENGTH+4); 2182 U32_TO_BE32(tcp->tcp_ts_recent, 2183 (char *)tcpha + TCP_MIN_HEADER_LENGTH+8); 2184 } 2185 2186 /* Fill in SACK options */ 2187 if (num_sack_blk > 0) { 2188 uchar_t *wptr = (uchar_t *)tcpha + 2189 connp->conn_ht_ulp_len; 2190 sack_blk_t *tmp; 2191 int32_t i; 2192 2193 wptr[0] = TCPOPT_NOP; 2194 wptr[1] = TCPOPT_NOP; 2195 wptr[2] = TCPOPT_SACK; 2196 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 2197 sizeof (sack_blk_t); 2198 wptr += TCPOPT_REAL_SACK_LEN; 2199 2200 tmp = tcp->tcp_sack_list; 2201 for (i = 0; i < num_sack_blk; i++) { 2202 U32_TO_BE32(tmp[i].begin, wptr); 2203 wptr += sizeof (tcp_seq); 2204 U32_TO_BE32(tmp[i].end, wptr); 2205 wptr += sizeof (tcp_seq); 2206 } 2207 tcpha->tha_offset_and_reserved += 2208 ((num_sack_blk * 2 + 1) << 4); 2209 } 2210 2211 ixa->ixa_pktlen = total_hdr_len; 2212 2213 if (ixa->ixa_flags & IXAF_IS_IPV4) { 2214 ((ipha_t *)rptr)->ipha_length = htons(total_hdr_len); 2215 } else { 2216 ip6_t *ip6 = (ip6_t *)rptr; 2217 2218 ip6->ip6_plen = htons(total_hdr_len - IPV6_HDR_LEN); 2219 } 2220 2221 /* 2222 * Prime pump for checksum calculation in IP. Include the 2223 * adjustment for a source route if any. 2224 */ 2225 data_length = tcp_hdr_len + connp->conn_sum; 2226 data_length = (data_length >> 16) + (data_length & 0xFFFF); 2227 tcpha->tha_sum = htons(data_length); 2228 2229 if (tcp->tcp_ip_forward_progress) { 2230 tcp->tcp_ip_forward_progress = B_FALSE; 2231 connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF; 2232 } else { 2233 connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF; 2234 } 2235 return (mp1); 2236 } 2237 } 2238 2239 /* 2240 * Handle M_DATA messages from IP. Its called directly from IP via 2241 * squeue for received IP packets. 2242 * 2243 * The first argument is always the connp/tcp to which the mp belongs. 2244 * There are no exceptions to this rule. The caller has already put 2245 * a reference on this connp/tcp and once tcp_input_data() returns, 2246 * the squeue will do the refrele. 2247 * 2248 * The TH_SYN for the listener directly go to tcp_input_listener via 2249 * squeue. ICMP errors go directly to tcp_icmp_input(). 2250 * 2251 * sqp: NULL = recursive, sqp != NULL means called from squeue 2252 */ 2253 void 2254 tcp_input_data(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 2255 { 2256 int32_t bytes_acked; 2257 int32_t gap; 2258 mblk_t *mp1; 2259 uint_t flags; 2260 uint32_t new_swnd = 0; 2261 uchar_t *iphdr; 2262 uchar_t *rptr; 2263 int32_t rgap; 2264 uint32_t seg_ack; 2265 int seg_len; 2266 uint_t ip_hdr_len; 2267 uint32_t seg_seq; 2268 tcpha_t *tcpha; 2269 int urp; 2270 tcp_opt_t tcpopt; 2271 ip_pkt_t ipp; 2272 boolean_t ofo_seg = B_FALSE; /* Out of order segment */ 2273 uint32_t cwnd; 2274 uint32_t add; 2275 int npkt; 2276 int mss; 2277 conn_t *connp = (conn_t *)arg; 2278 squeue_t *sqp = (squeue_t *)arg2; 2279 tcp_t *tcp = connp->conn_tcp; 2280 tcp_stack_t *tcps = tcp->tcp_tcps; 2281 2282 /* 2283 * RST from fused tcp loopback peer should trigger an unfuse. 2284 */ 2285 if (tcp->tcp_fused) { 2286 TCP_STAT(tcps, tcp_fusion_aborted); 2287 tcp_unfuse(tcp); 2288 } 2289 2290 iphdr = mp->b_rptr; 2291 rptr = mp->b_rptr; 2292 ASSERT(OK_32PTR(rptr)); 2293 2294 ip_hdr_len = ira->ira_ip_hdr_length; 2295 if (connp->conn_recv_ancillary.crb_all != 0) { 2296 /* 2297 * Record packet information in the ip_pkt_t 2298 */ 2299 ipp.ipp_fields = 0; 2300 if (ira->ira_flags & IRAF_IS_IPV4) { 2301 (void) ip_find_hdr_v4((ipha_t *)rptr, &ipp, 2302 B_FALSE); 2303 } else { 2304 uint8_t nexthdrp; 2305 2306 /* 2307 * IPv6 packets can only be received by applications 2308 * that are prepared to receive IPv6 addresses. 2309 * The IP fanout must ensure this. 2310 */ 2311 ASSERT(connp->conn_family == AF_INET6); 2312 2313 (void) ip_find_hdr_v6(mp, (ip6_t *)rptr, B_TRUE, &ipp, 2314 &nexthdrp); 2315 ASSERT(nexthdrp == IPPROTO_TCP); 2316 2317 /* Could have caused a pullup? */ 2318 iphdr = mp->b_rptr; 2319 rptr = mp->b_rptr; 2320 } 2321 } 2322 ASSERT(DB_TYPE(mp) == M_DATA); 2323 ASSERT(mp->b_next == NULL); 2324 2325 tcpha = (tcpha_t *)&rptr[ip_hdr_len]; 2326 seg_seq = ntohl(tcpha->tha_seq); 2327 seg_ack = ntohl(tcpha->tha_ack); 2328 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 2329 seg_len = (int)(mp->b_wptr - rptr) - 2330 (ip_hdr_len + TCP_HDR_LENGTH(tcpha)); 2331 if ((mp1 = mp->b_cont) != NULL && mp1->b_datap->db_type == M_DATA) { 2332 do { 2333 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 2334 (uintptr_t)INT_MAX); 2335 seg_len += (int)(mp1->b_wptr - mp1->b_rptr); 2336 } while ((mp1 = mp1->b_cont) != NULL && 2337 mp1->b_datap->db_type == M_DATA); 2338 } 2339 2340 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, connp->conn_ixa, 2341 __dtrace_tcp_void_ip_t *, iphdr, tcp_t *, tcp, 2342 __dtrace_tcp_tcph_t *, tcpha); 2343 2344 if (tcp->tcp_state == TCPS_TIME_WAIT) { 2345 tcp_time_wait_processing(tcp, mp, seg_seq, seg_ack, 2346 seg_len, tcpha, ira); 2347 return; 2348 } 2349 2350 if (sqp != NULL) { 2351 /* 2352 * This is the correct place to update tcp_last_recv_time. Note 2353 * that it is also updated for tcp structure that belongs to 2354 * global and listener queues which do not really need updating. 2355 * But that should not cause any harm. And it is updated for 2356 * all kinds of incoming segments, not only for data segments. 2357 */ 2358 tcp->tcp_last_recv_time = LBOLT_FASTPATH; 2359 } 2360 2361 flags = (unsigned int)tcpha->tha_flags & 0xFF; 2362 2363 BUMP_LOCAL(tcp->tcp_ibsegs); 2364 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp); 2365 2366 if ((flags & TH_URG) && sqp != NULL) { 2367 /* 2368 * TCP can't handle urgent pointers that arrive before 2369 * the connection has been accept()ed since it can't 2370 * buffer OOB data. Discard segment if this happens. 2371 * 2372 * We can't just rely on a non-null tcp_listener to indicate 2373 * that the accept() has completed since unlinking of the 2374 * eager and completion of the accept are not atomic. 2375 * tcp_detached, when it is not set (B_FALSE) indicates 2376 * that the accept() has completed. 2377 * 2378 * Nor can it reassemble urgent pointers, so discard 2379 * if it's not the next segment expected. 2380 * 2381 * Otherwise, collapse chain into one mblk (discard if 2382 * that fails). This makes sure the headers, retransmitted 2383 * data, and new data all are in the same mblk. 2384 */ 2385 ASSERT(mp != NULL); 2386 if (tcp->tcp_detached || !pullupmsg(mp, -1)) { 2387 freemsg(mp); 2388 return; 2389 } 2390 /* Update pointers into message */ 2391 iphdr = rptr = mp->b_rptr; 2392 tcpha = (tcpha_t *)&rptr[ip_hdr_len]; 2393 if (SEQ_GT(seg_seq, tcp->tcp_rnxt)) { 2394 /* 2395 * Since we can't handle any data with this urgent 2396 * pointer that is out of sequence, we expunge 2397 * the data. This allows us to still register 2398 * the urgent mark and generate the M_PCSIG, 2399 * which we can do. 2400 */ 2401 mp->b_wptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha); 2402 seg_len = 0; 2403 } 2404 } 2405 2406 switch (tcp->tcp_state) { 2407 case TCPS_SYN_SENT: 2408 if (connp->conn_final_sqp == NULL && 2409 tcp_outbound_squeue_switch && sqp != NULL) { 2410 ASSERT(connp->conn_initial_sqp == connp->conn_sqp); 2411 connp->conn_final_sqp = sqp; 2412 if (connp->conn_final_sqp != connp->conn_sqp) { 2413 DTRACE_PROBE1(conn__final__sqp__switch, 2414 conn_t *, connp); 2415 CONN_INC_REF(connp); 2416 SQUEUE_SWITCH(connp, connp->conn_final_sqp); 2417 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, 2418 tcp_input_data, connp, ira, ip_squeue_flag, 2419 SQTAG_CONNECT_FINISH); 2420 return; 2421 } 2422 DTRACE_PROBE1(conn__final__sqp__same, conn_t *, connp); 2423 } 2424 if (flags & TH_ACK) { 2425 /* 2426 * Note that our stack cannot send data before a 2427 * connection is established, therefore the 2428 * following check is valid. Otherwise, it has 2429 * to be changed. 2430 */ 2431 if (SEQ_LEQ(seg_ack, tcp->tcp_iss) || 2432 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 2433 freemsg(mp); 2434 if (flags & TH_RST) 2435 return; 2436 tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq", 2437 tcp, seg_ack, 0, TH_RST); 2438 return; 2439 } 2440 ASSERT(tcp->tcp_suna + 1 == seg_ack); 2441 } 2442 if (flags & TH_RST) { 2443 if (flags & TH_ACK) { 2444 DTRACE_TCP5(connect__refused, mblk_t *, NULL, 2445 ip_xmit_attr_t *, connp->conn_ixa, 2446 void_ip_t *, iphdr, tcp_t *, tcp, 2447 tcph_t *, tcpha); 2448 (void) tcp_clean_death(tcp, ECONNREFUSED); 2449 } 2450 freemsg(mp); 2451 return; 2452 } 2453 if (!(flags & TH_SYN)) { 2454 freemsg(mp); 2455 return; 2456 } 2457 2458 /* Process all TCP options. */ 2459 tcp_process_options(tcp, tcpha); 2460 /* 2461 * The following changes our rwnd to be a multiple of the 2462 * MIN(peer MSS, our MSS) for performance reason. 2463 */ 2464 (void) tcp_rwnd_set(tcp, MSS_ROUNDUP(connp->conn_rcvbuf, 2465 tcp->tcp_mss)); 2466 2467 /* Is the other end ECN capable? */ 2468 if (tcp->tcp_ecn_ok) { 2469 if ((flags & (TH_ECE|TH_CWR)) != TH_ECE) { 2470 tcp->tcp_ecn_ok = B_FALSE; 2471 } 2472 } 2473 /* 2474 * Clear ECN flags because it may interfere with later 2475 * processing. 2476 */ 2477 flags &= ~(TH_ECE|TH_CWR); 2478 2479 tcp->tcp_irs = seg_seq; 2480 tcp->tcp_rack = seg_seq; 2481 tcp->tcp_rnxt = seg_seq + 1; 2482 tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt); 2483 if (!TCP_IS_DETACHED(tcp)) { 2484 /* Allocate room for SACK options if needed. */ 2485 connp->conn_wroff = connp->conn_ht_iphc_len; 2486 if (tcp->tcp_snd_sack_ok) 2487 connp->conn_wroff += TCPOPT_MAX_SACK_LEN; 2488 if (!tcp->tcp_loopback) 2489 connp->conn_wroff += tcps->tcps_wroff_xtra; 2490 2491 (void) proto_set_tx_wroff(connp->conn_rq, connp, 2492 connp->conn_wroff); 2493 } 2494 if (flags & TH_ACK) { 2495 /* 2496 * If we can't get the confirmation upstream, pretend 2497 * we didn't even see this one. 2498 * 2499 * XXX: how can we pretend we didn't see it if we 2500 * have updated rnxt et. al. 2501 * 2502 * For loopback we defer sending up the T_CONN_CON 2503 * until after some checks below. 2504 */ 2505 mp1 = NULL; 2506 /* 2507 * tcp_sendmsg() checks tcp_state without entering 2508 * the squeue so tcp_state should be updated before 2509 * sending up connection confirmation. Probe the 2510 * state change below when we are sure the connection 2511 * confirmation has been sent. 2512 */ 2513 tcp->tcp_state = TCPS_ESTABLISHED; 2514 if (!tcp_conn_con(tcp, iphdr, mp, 2515 tcp->tcp_loopback ? &mp1 : NULL, ira)) { 2516 tcp->tcp_state = TCPS_SYN_SENT; 2517 freemsg(mp); 2518 return; 2519 } 2520 TCPS_CONN_INC(tcps); 2521 /* SYN was acked - making progress */ 2522 tcp->tcp_ip_forward_progress = B_TRUE; 2523 2524 /* One for the SYN */ 2525 tcp->tcp_suna = tcp->tcp_iss + 1; 2526 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 2527 2528 /* 2529 * If SYN was retransmitted, need to reset all 2530 * retransmission info. This is because this 2531 * segment will be treated as a dup ACK. 2532 */ 2533 if (tcp->tcp_rexmit) { 2534 tcp->tcp_rexmit = B_FALSE; 2535 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 2536 tcp->tcp_rexmit_max = tcp->tcp_snxt; 2537 tcp->tcp_snd_burst = tcp->tcp_localnet ? 2538 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 2539 tcp->tcp_ms_we_have_waited = 0; 2540 2541 /* 2542 * Set tcp_cwnd back to 1 MSS, per 2543 * recommendation from 2544 * draft-floyd-incr-init-win-01.txt, 2545 * Increasing TCP's Initial Window. 2546 */ 2547 tcp->tcp_cwnd = tcp->tcp_mss; 2548 } 2549 2550 tcp->tcp_swl1 = seg_seq; 2551 tcp->tcp_swl2 = seg_ack; 2552 2553 new_swnd = ntohs(tcpha->tha_win); 2554 tcp->tcp_swnd = new_swnd; 2555 if (new_swnd > tcp->tcp_max_swnd) 2556 tcp->tcp_max_swnd = new_swnd; 2557 2558 /* 2559 * Always send the three-way handshake ack immediately 2560 * in order to make the connection complete as soon as 2561 * possible on the accepting host. 2562 */ 2563 flags |= TH_ACK_NEEDED; 2564 2565 /* 2566 * Trace connect-established here. 2567 */ 2568 DTRACE_TCP5(connect__established, mblk_t *, NULL, 2569 ip_xmit_attr_t *, tcp->tcp_connp->conn_ixa, 2570 void_ip_t *, iphdr, tcp_t *, tcp, tcph_t *, tcpha); 2571 2572 /* Trace change from SYN_SENT -> ESTABLISHED here */ 2573 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 2574 connp->conn_ixa, void, NULL, tcp_t *, tcp, 2575 void, NULL, int32_t, TCPS_SYN_SENT); 2576 2577 /* 2578 * Special case for loopback. At this point we have 2579 * received SYN-ACK from the remote endpoint. In 2580 * order to ensure that both endpoints reach the 2581 * fused state prior to any data exchange, the final 2582 * ACK needs to be sent before we indicate T_CONN_CON 2583 * to the module upstream. 2584 */ 2585 if (tcp->tcp_loopback) { 2586 mblk_t *ack_mp; 2587 2588 ASSERT(!tcp->tcp_unfusable); 2589 ASSERT(mp1 != NULL); 2590 /* 2591 * For loopback, we always get a pure SYN-ACK 2592 * and only need to send back the final ACK 2593 * with no data (this is because the other 2594 * tcp is ours and we don't do T/TCP). This 2595 * final ACK triggers the passive side to 2596 * perform fusion in ESTABLISHED state. 2597 */ 2598 if ((ack_mp = tcp_ack_mp(tcp)) != NULL) { 2599 if (tcp->tcp_ack_tid != 0) { 2600 (void) TCP_TIMER_CANCEL(tcp, 2601 tcp->tcp_ack_tid); 2602 tcp->tcp_ack_tid = 0; 2603 } 2604 tcp_send_data(tcp, ack_mp); 2605 BUMP_LOCAL(tcp->tcp_obsegs); 2606 TCPS_BUMP_MIB(tcps, tcpOutAck); 2607 2608 if (!IPCL_IS_NONSTR(connp)) { 2609 /* Send up T_CONN_CON */ 2610 if (ira->ira_cred != NULL) { 2611 mblk_setcred(mp1, 2612 ira->ira_cred, 2613 ira->ira_cpid); 2614 } 2615 putnext(connp->conn_rq, mp1); 2616 } else { 2617 (*connp->conn_upcalls-> 2618 su_connected) 2619 (connp->conn_upper_handle, 2620 tcp->tcp_connid, 2621 ira->ira_cred, 2622 ira->ira_cpid); 2623 freemsg(mp1); 2624 } 2625 2626 freemsg(mp); 2627 return; 2628 } 2629 /* 2630 * Forget fusion; we need to handle more 2631 * complex cases below. Send the deferred 2632 * T_CONN_CON message upstream and proceed 2633 * as usual. Mark this tcp as not capable 2634 * of fusion. 2635 */ 2636 TCP_STAT(tcps, tcp_fusion_unfusable); 2637 tcp->tcp_unfusable = B_TRUE; 2638 if (!IPCL_IS_NONSTR(connp)) { 2639 if (ira->ira_cred != NULL) { 2640 mblk_setcred(mp1, ira->ira_cred, 2641 ira->ira_cpid); 2642 } 2643 putnext(connp->conn_rq, mp1); 2644 } else { 2645 (*connp->conn_upcalls->su_connected) 2646 (connp->conn_upper_handle, 2647 tcp->tcp_connid, ira->ira_cred, 2648 ira->ira_cpid); 2649 freemsg(mp1); 2650 } 2651 } 2652 2653 /* 2654 * Check to see if there is data to be sent. If 2655 * yes, set the transmit flag. Then check to see 2656 * if received data processing needs to be done. 2657 * If not, go straight to xmit_check. This short 2658 * cut is OK as we don't support T/TCP. 2659 */ 2660 if (tcp->tcp_unsent) 2661 flags |= TH_XMIT_NEEDED; 2662 2663 if (seg_len == 0 && !(flags & TH_URG)) { 2664 freemsg(mp); 2665 goto xmit_check; 2666 } 2667 2668 flags &= ~TH_SYN; 2669 seg_seq++; 2670 break; 2671 } 2672 tcp->tcp_state = TCPS_SYN_RCVD; 2673 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 2674 connp->conn_ixa, void_ip_t *, NULL, tcp_t *, tcp, 2675 tcph_t *, NULL, int32_t, TCPS_SYN_SENT); 2676 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, tcp->tcp_mss, 2677 NULL, NULL, tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 2678 if (mp1 != NULL) { 2679 tcp_send_data(tcp, mp1); 2680 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 2681 } 2682 freemsg(mp); 2683 return; 2684 case TCPS_SYN_RCVD: 2685 if (flags & TH_ACK) { 2686 /* 2687 * In this state, a SYN|ACK packet is either bogus 2688 * because the other side must be ACKing our SYN which 2689 * indicates it has seen the ACK for their SYN and 2690 * shouldn't retransmit it or we're crossing SYNs 2691 * on active open. 2692 */ 2693 if ((flags & TH_SYN) && !tcp->tcp_active_open) { 2694 freemsg(mp); 2695 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn", 2696 tcp, seg_ack, 0, TH_RST); 2697 return; 2698 } 2699 /* 2700 * NOTE: RFC 793 pg. 72 says this should be 2701 * tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt 2702 * but that would mean we have an ack that ignored 2703 * our SYN. 2704 */ 2705 if (SEQ_LEQ(seg_ack, tcp->tcp_suna) || 2706 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 2707 freemsg(mp); 2708 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack", 2709 tcp, seg_ack, 0, TH_RST); 2710 return; 2711 } 2712 /* 2713 * No sane TCP stack will send such a small window 2714 * without receiving any data. Just drop this invalid 2715 * ACK. We also shorten the abort timeout in case 2716 * this is an attack. 2717 */ 2718 if ((ntohs(tcpha->tha_win) << tcp->tcp_snd_ws) < 2719 (tcp->tcp_mss >> tcp_init_wnd_shft)) { 2720 freemsg(mp); 2721 TCP_STAT(tcps, tcp_zwin_ack_syn); 2722 tcp->tcp_second_ctimer_threshold = 2723 tcp_early_abort * SECONDS; 2724 return; 2725 } 2726 } 2727 break; 2728 case TCPS_LISTEN: 2729 /* 2730 * Only a TLI listener can come through this path when a 2731 * acceptor is going back to be a listener and a packet 2732 * for the acceptor hits the classifier. For a socket 2733 * listener, this can never happen because a listener 2734 * can never accept connection on itself and hence a 2735 * socket acceptor can not go back to being a listener. 2736 */ 2737 ASSERT(!TCP_IS_SOCKET(tcp)); 2738 /*FALLTHRU*/ 2739 case TCPS_CLOSED: 2740 case TCPS_BOUND: { 2741 conn_t *new_connp; 2742 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 2743 2744 /* 2745 * Don't accept any input on a closed tcp as this TCP logically 2746 * does not exist on the system. Don't proceed further with 2747 * this TCP. For instance, this packet could trigger another 2748 * close of this tcp which would be disastrous for tcp_refcnt. 2749 * tcp_close_detached / tcp_clean_death / tcp_closei_local must 2750 * be called at most once on a TCP. In this case we need to 2751 * refeed the packet into the classifier and figure out where 2752 * the packet should go. 2753 */ 2754 new_connp = ipcl_classify(mp, ira, ipst); 2755 if (new_connp != NULL) { 2756 /* Drops ref on new_connp */ 2757 tcp_reinput(new_connp, mp, ira, ipst); 2758 return; 2759 } 2760 /* We failed to classify. For now just drop the packet */ 2761 freemsg(mp); 2762 return; 2763 } 2764 case TCPS_IDLE: 2765 /* 2766 * Handle the case where the tcp_clean_death() has happened 2767 * on a connection (application hasn't closed yet) but a packet 2768 * was already queued on squeue before tcp_clean_death() 2769 * was processed. Calling tcp_clean_death() twice on same 2770 * connection can result in weird behaviour. 2771 */ 2772 freemsg(mp); 2773 return; 2774 default: 2775 break; 2776 } 2777 2778 /* 2779 * Already on the correct queue/perimeter. 2780 * If this is a detached connection and not an eager 2781 * connection hanging off a listener then new data 2782 * (past the FIN) will cause a reset. 2783 * We do a special check here where it 2784 * is out of the main line, rather than check 2785 * if we are detached every time we see new 2786 * data down below. 2787 */ 2788 if (TCP_IS_DETACHED_NONEAGER(tcp) && 2789 (seg_len > 0 && SEQ_GT(seg_seq + seg_len, tcp->tcp_rnxt))) { 2790 TCPS_BUMP_MIB(tcps, tcpInClosed); 2791 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp); 2792 2793 freemsg(mp); 2794 /* 2795 * This could be an SSL closure alert. We're detached so just 2796 * acknowledge it this last time. 2797 */ 2798 if (tcp->tcp_kssl_ctx != NULL) { 2799 kssl_release_ctx(tcp->tcp_kssl_ctx); 2800 tcp->tcp_kssl_ctx = NULL; 2801 2802 tcp->tcp_rnxt += seg_len; 2803 tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt); 2804 flags |= TH_ACK_NEEDED; 2805 goto ack_check; 2806 } 2807 2808 tcp_xmit_ctl("new data when detached", tcp, 2809 tcp->tcp_snxt, 0, TH_RST); 2810 (void) tcp_clean_death(tcp, EPROTO); 2811 return; 2812 } 2813 2814 mp->b_rptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha); 2815 urp = ntohs(tcpha->tha_urp) - TCP_OLD_URP_INTERPRETATION; 2816 new_swnd = ntohs(tcpha->tha_win) << 2817 ((tcpha->tha_flags & TH_SYN) ? 0 : tcp->tcp_snd_ws); 2818 2819 if (tcp->tcp_snd_ts_ok) { 2820 if (!tcp_paws_check(tcp, tcpha, &tcpopt)) { 2821 /* 2822 * This segment is not acceptable. 2823 * Drop it and send back an ACK. 2824 */ 2825 freemsg(mp); 2826 flags |= TH_ACK_NEEDED; 2827 goto ack_check; 2828 } 2829 } else if (tcp->tcp_snd_sack_ok) { 2830 tcpopt.tcp = tcp; 2831 /* 2832 * SACK info in already updated in tcp_parse_options. Ignore 2833 * all other TCP options... 2834 */ 2835 (void) tcp_parse_options(tcpha, &tcpopt); 2836 } 2837 try_again:; 2838 mss = tcp->tcp_mss; 2839 gap = seg_seq - tcp->tcp_rnxt; 2840 rgap = tcp->tcp_rwnd - (gap + seg_len); 2841 /* 2842 * gap is the amount of sequence space between what we expect to see 2843 * and what we got for seg_seq. A positive value for gap means 2844 * something got lost. A negative value means we got some old stuff. 2845 */ 2846 if (gap < 0) { 2847 /* Old stuff present. Is the SYN in there? */ 2848 if (seg_seq == tcp->tcp_irs && (flags & TH_SYN) && 2849 (seg_len != 0)) { 2850 flags &= ~TH_SYN; 2851 seg_seq++; 2852 urp--; 2853 /* Recompute the gaps after noting the SYN. */ 2854 goto try_again; 2855 } 2856 TCPS_BUMP_MIB(tcps, tcpInDataDupSegs); 2857 TCPS_UPDATE_MIB(tcps, tcpInDataDupBytes, 2858 (seg_len > -gap ? -gap : seg_len)); 2859 /* Remove the old stuff from seg_len. */ 2860 seg_len += gap; 2861 /* 2862 * Anything left? 2863 * Make sure to check for unack'd FIN when rest of data 2864 * has been previously ack'd. 2865 */ 2866 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 2867 /* 2868 * Resets are only valid if they lie within our offered 2869 * window. If the RST bit is set, we just ignore this 2870 * segment. 2871 */ 2872 if (flags & TH_RST) { 2873 freemsg(mp); 2874 return; 2875 } 2876 2877 /* 2878 * The arriving of dup data packets indicate that we 2879 * may have postponed an ack for too long, or the other 2880 * side's RTT estimate is out of shape. Start acking 2881 * more often. 2882 */ 2883 if (SEQ_GEQ(seg_seq + seg_len - gap, tcp->tcp_rack) && 2884 tcp->tcp_rack_cnt >= 1 && 2885 tcp->tcp_rack_abs_max > 2) { 2886 tcp->tcp_rack_abs_max--; 2887 } 2888 tcp->tcp_rack_cur_max = 1; 2889 2890 /* 2891 * This segment is "unacceptable". None of its 2892 * sequence space lies within our advertized window. 2893 * 2894 * Adjust seg_len to the original value for tracing. 2895 */ 2896 seg_len -= gap; 2897 if (connp->conn_debug) { 2898 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 2899 "tcp_rput: unacceptable, gap %d, rgap %d, " 2900 "flags 0x%x, seg_seq %u, seg_ack %u, " 2901 "seg_len %d, rnxt %u, snxt %u, %s", 2902 gap, rgap, flags, seg_seq, seg_ack, 2903 seg_len, tcp->tcp_rnxt, tcp->tcp_snxt, 2904 tcp_display(tcp, NULL, 2905 DISP_ADDR_AND_PORT)); 2906 } 2907 2908 /* 2909 * Arrange to send an ACK in response to the 2910 * unacceptable segment per RFC 793 page 69. There 2911 * is only one small difference between ours and the 2912 * acceptability test in the RFC - we accept ACK-only 2913 * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK 2914 * will be generated. 2915 * 2916 * Note that we have to ACK an ACK-only packet at least 2917 * for stacks that send 0-length keep-alives with 2918 * SEG.SEQ = SND.NXT-1 as recommended by RFC1122, 2919 * section 4.2.3.6. As long as we don't ever generate 2920 * an unacceptable packet in response to an incoming 2921 * packet that is unacceptable, it should not cause 2922 * "ACK wars". 2923 */ 2924 flags |= TH_ACK_NEEDED; 2925 2926 /* 2927 * Continue processing this segment in order to use the 2928 * ACK information it contains, but skip all other 2929 * sequence-number processing. Processing the ACK 2930 * information is necessary in order to 2931 * re-synchronize connections that may have lost 2932 * synchronization. 2933 * 2934 * We clear seg_len and flag fields related to 2935 * sequence number processing as they are not 2936 * to be trusted for an unacceptable segment. 2937 */ 2938 seg_len = 0; 2939 flags &= ~(TH_SYN | TH_FIN | TH_URG); 2940 goto process_ack; 2941 } 2942 2943 /* Fix seg_seq, and chew the gap off the front. */ 2944 seg_seq = tcp->tcp_rnxt; 2945 urp += gap; 2946 do { 2947 mblk_t *mp2; 2948 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 2949 (uintptr_t)UINT_MAX); 2950 gap += (uint_t)(mp->b_wptr - mp->b_rptr); 2951 if (gap > 0) { 2952 mp->b_rptr = mp->b_wptr - gap; 2953 break; 2954 } 2955 mp2 = mp; 2956 mp = mp->b_cont; 2957 freeb(mp2); 2958 } while (gap < 0); 2959 /* 2960 * If the urgent data has already been acknowledged, we 2961 * should ignore TH_URG below 2962 */ 2963 if (urp < 0) 2964 flags &= ~TH_URG; 2965 } 2966 /* 2967 * rgap is the amount of stuff received out of window. A negative 2968 * value is the amount out of window. 2969 */ 2970 if (rgap < 0) { 2971 mblk_t *mp2; 2972 2973 if (tcp->tcp_rwnd == 0) { 2974 TCPS_BUMP_MIB(tcps, tcpInWinProbe); 2975 } else { 2976 TCPS_BUMP_MIB(tcps, tcpInDataPastWinSegs); 2977 TCPS_UPDATE_MIB(tcps, tcpInDataPastWinBytes, -rgap); 2978 } 2979 2980 /* 2981 * seg_len does not include the FIN, so if more than 2982 * just the FIN is out of window, we act like we don't 2983 * see it. (If just the FIN is out of window, rgap 2984 * will be zero and we will go ahead and acknowledge 2985 * the FIN.) 2986 */ 2987 flags &= ~TH_FIN; 2988 2989 /* Fix seg_len and make sure there is something left. */ 2990 seg_len += rgap; 2991 if (seg_len <= 0) { 2992 /* 2993 * Resets are only valid if they lie within our offered 2994 * window. If the RST bit is set, we just ignore this 2995 * segment. 2996 */ 2997 if (flags & TH_RST) { 2998 freemsg(mp); 2999 return; 3000 } 3001 3002 /* Per RFC 793, we need to send back an ACK. */ 3003 flags |= TH_ACK_NEEDED; 3004 3005 /* 3006 * Send SIGURG as soon as possible i.e. even 3007 * if the TH_URG was delivered in a window probe 3008 * packet (which will be unacceptable). 3009 * 3010 * We generate a signal if none has been generated 3011 * for this connection or if this is a new urgent 3012 * byte. Also send a zero-length "unmarked" message 3013 * to inform SIOCATMARK that this is not the mark. 3014 * 3015 * tcp_urp_last_valid is cleared when the T_exdata_ind 3016 * is sent up. This plus the check for old data 3017 * (gap >= 0) handles the wraparound of the sequence 3018 * number space without having to always track the 3019 * correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks 3020 * this max in its rcv_up variable). 3021 * 3022 * This prevents duplicate SIGURGS due to a "late" 3023 * zero-window probe when the T_EXDATA_IND has already 3024 * been sent up. 3025 */ 3026 if ((flags & TH_URG) && 3027 (!tcp->tcp_urp_last_valid || SEQ_GT(urp + seg_seq, 3028 tcp->tcp_urp_last))) { 3029 if (IPCL_IS_NONSTR(connp)) { 3030 if (!TCP_IS_DETACHED(tcp)) { 3031 (*connp->conn_upcalls-> 3032 su_signal_oob) 3033 (connp->conn_upper_handle, 3034 urp); 3035 } 3036 } else { 3037 mp1 = allocb(0, BPRI_MED); 3038 if (mp1 == NULL) { 3039 freemsg(mp); 3040 return; 3041 } 3042 if (!TCP_IS_DETACHED(tcp) && 3043 !putnextctl1(connp->conn_rq, 3044 M_PCSIG, SIGURG)) { 3045 /* Try again on the rexmit. */ 3046 freemsg(mp1); 3047 freemsg(mp); 3048 return; 3049 } 3050 /* 3051 * If the next byte would be the mark 3052 * then mark with MARKNEXT else mark 3053 * with NOTMARKNEXT. 3054 */ 3055 if (gap == 0 && urp == 0) 3056 mp1->b_flag |= MSGMARKNEXT; 3057 else 3058 mp1->b_flag |= MSGNOTMARKNEXT; 3059 freemsg(tcp->tcp_urp_mark_mp); 3060 tcp->tcp_urp_mark_mp = mp1; 3061 flags |= TH_SEND_URP_MARK; 3062 } 3063 tcp->tcp_urp_last_valid = B_TRUE; 3064 tcp->tcp_urp_last = urp + seg_seq; 3065 } 3066 /* 3067 * If this is a zero window probe, continue to 3068 * process the ACK part. But we need to set seg_len 3069 * to 0 to avoid data processing. Otherwise just 3070 * drop the segment and send back an ACK. 3071 */ 3072 if (tcp->tcp_rwnd == 0 && seg_seq == tcp->tcp_rnxt) { 3073 flags &= ~(TH_SYN | TH_URG); 3074 seg_len = 0; 3075 goto process_ack; 3076 } else { 3077 freemsg(mp); 3078 goto ack_check; 3079 } 3080 } 3081 /* Pitch out of window stuff off the end. */ 3082 rgap = seg_len; 3083 mp2 = mp; 3084 do { 3085 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 3086 (uintptr_t)INT_MAX); 3087 rgap -= (int)(mp2->b_wptr - mp2->b_rptr); 3088 if (rgap < 0) { 3089 mp2->b_wptr += rgap; 3090 if ((mp1 = mp2->b_cont) != NULL) { 3091 mp2->b_cont = NULL; 3092 freemsg(mp1); 3093 } 3094 break; 3095 } 3096 } while ((mp2 = mp2->b_cont) != NULL); 3097 } 3098 ok:; 3099 /* 3100 * TCP should check ECN info for segments inside the window only. 3101 * Therefore the check should be done here. 3102 */ 3103 if (tcp->tcp_ecn_ok) { 3104 if (flags & TH_CWR) { 3105 tcp->tcp_ecn_echo_on = B_FALSE; 3106 } 3107 /* 3108 * Note that both ECN_CE and CWR can be set in the 3109 * same segment. In this case, we once again turn 3110 * on ECN_ECHO. 3111 */ 3112 if (connp->conn_ipversion == IPV4_VERSION) { 3113 uchar_t tos = ((ipha_t *)rptr)->ipha_type_of_service; 3114 3115 if ((tos & IPH_ECN_CE) == IPH_ECN_CE) { 3116 tcp->tcp_ecn_echo_on = B_TRUE; 3117 } 3118 } else { 3119 uint32_t vcf = ((ip6_t *)rptr)->ip6_vcf; 3120 3121 if ((vcf & htonl(IPH_ECN_CE << 20)) == 3122 htonl(IPH_ECN_CE << 20)) { 3123 tcp->tcp_ecn_echo_on = B_TRUE; 3124 } 3125 } 3126 } 3127 3128 /* 3129 * Check whether we can update tcp_ts_recent. This test is 3130 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 3131 * Extensions for High Performance: An Update", Internet Draft. 3132 */ 3133 if (tcp->tcp_snd_ts_ok && 3134 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 3135 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 3136 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 3137 tcp->tcp_last_rcv_lbolt = LBOLT_FASTPATH64; 3138 } 3139 3140 if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) { 3141 /* 3142 * FIN in an out of order segment. We record this in 3143 * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq. 3144 * Clear the FIN so that any check on FIN flag will fail. 3145 * Remember that FIN also counts in the sequence number 3146 * space. So we need to ack out of order FIN only segments. 3147 */ 3148 if (flags & TH_FIN) { 3149 tcp->tcp_valid_bits |= TCP_OFO_FIN_VALID; 3150 tcp->tcp_ofo_fin_seq = seg_seq + seg_len; 3151 flags &= ~TH_FIN; 3152 flags |= TH_ACK_NEEDED; 3153 } 3154 if (seg_len > 0) { 3155 /* Fill in the SACK blk list. */ 3156 if (tcp->tcp_snd_sack_ok) { 3157 tcp_sack_insert(tcp->tcp_sack_list, 3158 seg_seq, seg_seq + seg_len, 3159 &(tcp->tcp_num_sack_blk)); 3160 } 3161 3162 /* 3163 * Attempt reassembly and see if we have something 3164 * ready to go. 3165 */ 3166 mp = tcp_reass(tcp, mp, seg_seq); 3167 /* Always ack out of order packets */ 3168 flags |= TH_ACK_NEEDED | TH_PUSH; 3169 if (mp) { 3170 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 3171 (uintptr_t)INT_MAX); 3172 seg_len = mp->b_cont ? msgdsize(mp) : 3173 (int)(mp->b_wptr - mp->b_rptr); 3174 seg_seq = tcp->tcp_rnxt; 3175 /* 3176 * A gap is filled and the seq num and len 3177 * of the gap match that of a previously 3178 * received FIN, put the FIN flag back in. 3179 */ 3180 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 3181 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 3182 flags |= TH_FIN; 3183 tcp->tcp_valid_bits &= 3184 ~TCP_OFO_FIN_VALID; 3185 } 3186 if (tcp->tcp_reass_tid != 0) { 3187 (void) TCP_TIMER_CANCEL(tcp, 3188 tcp->tcp_reass_tid); 3189 /* 3190 * Restart the timer if there is still 3191 * data in the reassembly queue. 3192 */ 3193 if (tcp->tcp_reass_head != NULL) { 3194 tcp->tcp_reass_tid = TCP_TIMER( 3195 tcp, tcp_reass_timer, 3196 tcps->tcps_reass_timeout); 3197 } else { 3198 tcp->tcp_reass_tid = 0; 3199 } 3200 } 3201 } else { 3202 /* 3203 * Keep going even with NULL mp. 3204 * There may be a useful ACK or something else 3205 * we don't want to miss. 3206 * 3207 * But TCP should not perform fast retransmit 3208 * because of the ack number. TCP uses 3209 * seg_len == 0 to determine if it is a pure 3210 * ACK. And this is not a pure ACK. 3211 */ 3212 seg_len = 0; 3213 ofo_seg = B_TRUE; 3214 3215 if (tcps->tcps_reass_timeout != 0 && 3216 tcp->tcp_reass_tid == 0) { 3217 tcp->tcp_reass_tid = TCP_TIMER(tcp, 3218 tcp_reass_timer, 3219 tcps->tcps_reass_timeout); 3220 } 3221 } 3222 } 3223 } else if (seg_len > 0) { 3224 TCPS_BUMP_MIB(tcps, tcpInDataInorderSegs); 3225 TCPS_UPDATE_MIB(tcps, tcpInDataInorderBytes, seg_len); 3226 /* 3227 * If an out of order FIN was received before, and the seq 3228 * num and len of the new segment match that of the FIN, 3229 * put the FIN flag back in. 3230 */ 3231 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 3232 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 3233 flags |= TH_FIN; 3234 tcp->tcp_valid_bits &= ~TCP_OFO_FIN_VALID; 3235 } 3236 } 3237 if ((flags & (TH_RST | TH_SYN | TH_URG | TH_ACK)) != TH_ACK) { 3238 if (flags & TH_RST) { 3239 freemsg(mp); 3240 switch (tcp->tcp_state) { 3241 case TCPS_SYN_RCVD: 3242 (void) tcp_clean_death(tcp, ECONNREFUSED); 3243 break; 3244 case TCPS_ESTABLISHED: 3245 case TCPS_FIN_WAIT_1: 3246 case TCPS_FIN_WAIT_2: 3247 case TCPS_CLOSE_WAIT: 3248 (void) tcp_clean_death(tcp, ECONNRESET); 3249 break; 3250 case TCPS_CLOSING: 3251 case TCPS_LAST_ACK: 3252 (void) tcp_clean_death(tcp, 0); 3253 break; 3254 default: 3255 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 3256 (void) tcp_clean_death(tcp, ENXIO); 3257 break; 3258 } 3259 return; 3260 } 3261 if (flags & TH_SYN) { 3262 /* 3263 * See RFC 793, Page 71 3264 * 3265 * The seq number must be in the window as it should 3266 * be "fixed" above. If it is outside window, it should 3267 * be already rejected. Note that we allow seg_seq to be 3268 * rnxt + rwnd because we want to accept 0 window probe. 3269 */ 3270 ASSERT(SEQ_GEQ(seg_seq, tcp->tcp_rnxt) && 3271 SEQ_LEQ(seg_seq, tcp->tcp_rnxt + tcp->tcp_rwnd)); 3272 freemsg(mp); 3273 /* 3274 * If the ACK flag is not set, just use our snxt as the 3275 * seq number of the RST segment. 3276 */ 3277 if (!(flags & TH_ACK)) { 3278 seg_ack = tcp->tcp_snxt; 3279 } 3280 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 3281 TH_RST|TH_ACK); 3282 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 3283 (void) tcp_clean_death(tcp, ECONNRESET); 3284 return; 3285 } 3286 /* 3287 * urp could be -1 when the urp field in the packet is 0 3288 * and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent 3289 * byte was at seg_seq - 1, in which case we ignore the urgent flag. 3290 */ 3291 if (flags & TH_URG && urp >= 0) { 3292 if (!tcp->tcp_urp_last_valid || 3293 SEQ_GT(urp + seg_seq, tcp->tcp_urp_last)) { 3294 /* 3295 * Non-STREAMS sockets handle the urgent data a litte 3296 * differently from STREAMS based sockets. There is no 3297 * need to mark any mblks with the MSG{NOT,}MARKNEXT 3298 * flags to keep SIOCATMARK happy. Instead a 3299 * su_signal_oob upcall is made to update the mark. 3300 * Neither is a T_EXDATA_IND mblk needed to be 3301 * prepended to the urgent data. The urgent data is 3302 * delivered using the su_recv upcall, where we set 3303 * the MSG_OOB flag to indicate that it is urg data. 3304 * 3305 * Neither TH_SEND_URP_MARK nor TH_MARKNEXT_NEEDED 3306 * are used by non-STREAMS sockets. 3307 */ 3308 if (IPCL_IS_NONSTR(connp)) { 3309 if (!TCP_IS_DETACHED(tcp)) { 3310 (*connp->conn_upcalls->su_signal_oob) 3311 (connp->conn_upper_handle, urp); 3312 } 3313 } else { 3314 /* 3315 * If we haven't generated the signal yet for 3316 * this urgent pointer value, do it now. Also, 3317 * send up a zero-length M_DATA indicating 3318 * whether or not this is the mark. The latter 3319 * is not needed when a T_EXDATA_IND is sent up. 3320 * However, if there are allocation failures 3321 * this code relies on the sender retransmitting 3322 * and the socket code for determining the mark 3323 * should not block waiting for the peer to 3324 * transmit. Thus, for simplicity we always 3325 * send up the mark indication. 3326 */ 3327 mp1 = allocb(0, BPRI_MED); 3328 if (mp1 == NULL) { 3329 freemsg(mp); 3330 return; 3331 } 3332 if (!TCP_IS_DETACHED(tcp) && 3333 !putnextctl1(connp->conn_rq, M_PCSIG, 3334 SIGURG)) { 3335 /* Try again on the rexmit. */ 3336 freemsg(mp1); 3337 freemsg(mp); 3338 return; 3339 } 3340 /* 3341 * Mark with NOTMARKNEXT for now. 3342 * The code below will change this to MARKNEXT 3343 * if we are at the mark. 3344 * 3345 * If there are allocation failures (e.g. in 3346 * dupmsg below) the next time tcp_input_data 3347 * sees the urgent segment it will send up the 3348 * MSGMARKNEXT message. 3349 */ 3350 mp1->b_flag |= MSGNOTMARKNEXT; 3351 freemsg(tcp->tcp_urp_mark_mp); 3352 tcp->tcp_urp_mark_mp = mp1; 3353 flags |= TH_SEND_URP_MARK; 3354 #ifdef DEBUG 3355 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3356 "tcp_rput: sent M_PCSIG 2 seq %x urp %x " 3357 "last %x, %s", 3358 seg_seq, urp, tcp->tcp_urp_last, 3359 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 3360 #endif /* DEBUG */ 3361 } 3362 tcp->tcp_urp_last_valid = B_TRUE; 3363 tcp->tcp_urp_last = urp + seg_seq; 3364 } else if (tcp->tcp_urp_mark_mp != NULL) { 3365 /* 3366 * An allocation failure prevented the previous 3367 * tcp_input_data from sending up the allocated 3368 * MSG*MARKNEXT message - send it up this time 3369 * around. 3370 */ 3371 flags |= TH_SEND_URP_MARK; 3372 } 3373 3374 /* 3375 * If the urgent byte is in this segment, make sure that it is 3376 * all by itself. This makes it much easier to deal with the 3377 * possibility of an allocation failure on the T_exdata_ind. 3378 * Note that seg_len is the number of bytes in the segment, and 3379 * urp is the offset into the segment of the urgent byte. 3380 * urp < seg_len means that the urgent byte is in this segment. 3381 */ 3382 if (urp < seg_len) { 3383 if (seg_len != 1) { 3384 uint32_t tmp_rnxt; 3385 /* 3386 * Break it up and feed it back in. 3387 * Re-attach the IP header. 3388 */ 3389 mp->b_rptr = iphdr; 3390 if (urp > 0) { 3391 /* 3392 * There is stuff before the urgent 3393 * byte. 3394 */ 3395 mp1 = dupmsg(mp); 3396 if (!mp1) { 3397 /* 3398 * Trim from urgent byte on. 3399 * The rest will come back. 3400 */ 3401 (void) adjmsg(mp, 3402 urp - seg_len); 3403 tcp_input_data(connp, 3404 mp, NULL, ira); 3405 return; 3406 } 3407 (void) adjmsg(mp1, urp - seg_len); 3408 /* Feed this piece back in. */ 3409 tmp_rnxt = tcp->tcp_rnxt; 3410 tcp_input_data(connp, mp1, NULL, ira); 3411 /* 3412 * If the data passed back in was not 3413 * processed (ie: bad ACK) sending 3414 * the remainder back in will cause a 3415 * loop. In this case, drop the 3416 * packet and let the sender try 3417 * sending a good packet. 3418 */ 3419 if (tmp_rnxt == tcp->tcp_rnxt) { 3420 freemsg(mp); 3421 return; 3422 } 3423 } 3424 if (urp != seg_len - 1) { 3425 uint32_t tmp_rnxt; 3426 /* 3427 * There is stuff after the urgent 3428 * byte. 3429 */ 3430 mp1 = dupmsg(mp); 3431 if (!mp1) { 3432 /* 3433 * Trim everything beyond the 3434 * urgent byte. The rest will 3435 * come back. 3436 */ 3437 (void) adjmsg(mp, 3438 urp + 1 - seg_len); 3439 tcp_input_data(connp, 3440 mp, NULL, ira); 3441 return; 3442 } 3443 (void) adjmsg(mp1, urp + 1 - seg_len); 3444 tmp_rnxt = tcp->tcp_rnxt; 3445 tcp_input_data(connp, mp1, NULL, ira); 3446 /* 3447 * If the data passed back in was not 3448 * processed (ie: bad ACK) sending 3449 * the remainder back in will cause a 3450 * loop. In this case, drop the 3451 * packet and let the sender try 3452 * sending a good packet. 3453 */ 3454 if (tmp_rnxt == tcp->tcp_rnxt) { 3455 freemsg(mp); 3456 return; 3457 } 3458 } 3459 tcp_input_data(connp, mp, NULL, ira); 3460 return; 3461 } 3462 /* 3463 * This segment contains only the urgent byte. We 3464 * have to allocate the T_exdata_ind, if we can. 3465 */ 3466 if (IPCL_IS_NONSTR(connp)) { 3467 int error; 3468 3469 (*connp->conn_upcalls->su_recv) 3470 (connp->conn_upper_handle, mp, seg_len, 3471 MSG_OOB, &error, NULL); 3472 /* 3473 * We should never be in middle of a 3474 * fallback, the squeue guarantees that. 3475 */ 3476 ASSERT(error != EOPNOTSUPP); 3477 mp = NULL; 3478 goto update_ack; 3479 } else if (!tcp->tcp_urp_mp) { 3480 struct T_exdata_ind *tei; 3481 mp1 = allocb(sizeof (struct T_exdata_ind), 3482 BPRI_MED); 3483 if (!mp1) { 3484 /* 3485 * Sigh... It'll be back. 3486 * Generate any MSG*MARK message now. 3487 */ 3488 freemsg(mp); 3489 seg_len = 0; 3490 if (flags & TH_SEND_URP_MARK) { 3491 3492 3493 ASSERT(tcp->tcp_urp_mark_mp); 3494 tcp->tcp_urp_mark_mp->b_flag &= 3495 ~MSGNOTMARKNEXT; 3496 tcp->tcp_urp_mark_mp->b_flag |= 3497 MSGMARKNEXT; 3498 } 3499 goto ack_check; 3500 } 3501 mp1->b_datap->db_type = M_PROTO; 3502 tei = (struct T_exdata_ind *)mp1->b_rptr; 3503 tei->PRIM_type = T_EXDATA_IND; 3504 tei->MORE_flag = 0; 3505 mp1->b_wptr = (uchar_t *)&tei[1]; 3506 tcp->tcp_urp_mp = mp1; 3507 #ifdef DEBUG 3508 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3509 "tcp_rput: allocated exdata_ind %s", 3510 tcp_display(tcp, NULL, 3511 DISP_PORT_ONLY)); 3512 #endif /* DEBUG */ 3513 /* 3514 * There is no need to send a separate MSG*MARK 3515 * message since the T_EXDATA_IND will be sent 3516 * now. 3517 */ 3518 flags &= ~TH_SEND_URP_MARK; 3519 freemsg(tcp->tcp_urp_mark_mp); 3520 tcp->tcp_urp_mark_mp = NULL; 3521 } 3522 /* 3523 * Now we are all set. On the next putnext upstream, 3524 * tcp_urp_mp will be non-NULL and will get prepended 3525 * to what has to be this piece containing the urgent 3526 * byte. If for any reason we abort this segment below, 3527 * if it comes back, we will have this ready, or it 3528 * will get blown off in close. 3529 */ 3530 } else if (urp == seg_len) { 3531 /* 3532 * The urgent byte is the next byte after this sequence 3533 * number. If this endpoint is non-STREAMS, then there 3534 * is nothing to do here since the socket has already 3535 * been notified about the urg pointer by the 3536 * su_signal_oob call above. 3537 * 3538 * In case of STREAMS, some more work might be needed. 3539 * If there is data it is marked with MSGMARKNEXT and 3540 * and any tcp_urp_mark_mp is discarded since it is not 3541 * needed. Otherwise, if the code above just allocated 3542 * a zero-length tcp_urp_mark_mp message, that message 3543 * is tagged with MSGMARKNEXT. Sending up these 3544 * MSGMARKNEXT messages makes SIOCATMARK work correctly 3545 * even though the T_EXDATA_IND will not be sent up 3546 * until the urgent byte arrives. 3547 */ 3548 if (!IPCL_IS_NONSTR(tcp->tcp_connp)) { 3549 if (seg_len != 0) { 3550 flags |= TH_MARKNEXT_NEEDED; 3551 freemsg(tcp->tcp_urp_mark_mp); 3552 tcp->tcp_urp_mark_mp = NULL; 3553 flags &= ~TH_SEND_URP_MARK; 3554 } else if (tcp->tcp_urp_mark_mp != NULL) { 3555 flags |= TH_SEND_URP_MARK; 3556 tcp->tcp_urp_mark_mp->b_flag &= 3557 ~MSGNOTMARKNEXT; 3558 tcp->tcp_urp_mark_mp->b_flag |= 3559 MSGMARKNEXT; 3560 } 3561 } 3562 #ifdef DEBUG 3563 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3564 "tcp_rput: AT MARK, len %d, flags 0x%x, %s", 3565 seg_len, flags, 3566 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 3567 #endif /* DEBUG */ 3568 } 3569 #ifdef DEBUG 3570 else { 3571 /* Data left until we hit mark */ 3572 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3573 "tcp_rput: URP %d bytes left, %s", 3574 urp - seg_len, tcp_display(tcp, NULL, 3575 DISP_PORT_ONLY)); 3576 } 3577 #endif /* DEBUG */ 3578 } 3579 3580 process_ack: 3581 if (!(flags & TH_ACK)) { 3582 freemsg(mp); 3583 goto xmit_check; 3584 } 3585 } 3586 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 3587 3588 if (bytes_acked > 0) 3589 tcp->tcp_ip_forward_progress = B_TRUE; 3590 if (tcp->tcp_state == TCPS_SYN_RCVD) { 3591 if ((tcp->tcp_conn.tcp_eager_conn_ind != NULL) && 3592 ((tcp->tcp_kssl_ent == NULL) || !tcp->tcp_kssl_pending)) { 3593 /* 3-way handshake complete - pass up the T_CONN_IND */ 3594 tcp_t *listener = tcp->tcp_listener; 3595 mblk_t *mp = tcp->tcp_conn.tcp_eager_conn_ind; 3596 3597 tcp->tcp_tconnind_started = B_TRUE; 3598 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 3599 /* 3600 * We are here means eager is fine but it can 3601 * get a TH_RST at any point between now and till 3602 * accept completes and disappear. We need to 3603 * ensure that reference to eager is valid after 3604 * we get out of eager's perimeter. So we do 3605 * an extra refhold. 3606 */ 3607 CONN_INC_REF(connp); 3608 3609 /* 3610 * The listener also exists because of the refhold 3611 * done in tcp_input_listener. Its possible that it 3612 * might have closed. We will check that once we 3613 * get inside listeners context. 3614 */ 3615 CONN_INC_REF(listener->tcp_connp); 3616 if (listener->tcp_connp->conn_sqp == 3617 connp->conn_sqp) { 3618 /* 3619 * We optimize by not calling an SQUEUE_ENTER 3620 * on the listener since we know that the 3621 * listener and eager squeues are the same. 3622 * We are able to make this check safely only 3623 * because neither the eager nor the listener 3624 * can change its squeue. Only an active connect 3625 * can change its squeue 3626 */ 3627 tcp_send_conn_ind(listener->tcp_connp, mp, 3628 listener->tcp_connp->conn_sqp); 3629 CONN_DEC_REF(listener->tcp_connp); 3630 } else if (!tcp->tcp_loopback) { 3631 SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp, 3632 mp, tcp_send_conn_ind, 3633 listener->tcp_connp, NULL, SQ_FILL, 3634 SQTAG_TCP_CONN_IND); 3635 } else { 3636 SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp, 3637 mp, tcp_send_conn_ind, 3638 listener->tcp_connp, NULL, SQ_NODRAIN, 3639 SQTAG_TCP_CONN_IND); 3640 } 3641 } 3642 3643 /* 3644 * We are seeing the final ack in the three way 3645 * hand shake of a active open'ed connection 3646 * so we must send up a T_CONN_CON 3647 * 3648 * tcp_sendmsg() checks tcp_state without entering 3649 * the squeue so tcp_state should be updated before 3650 * sending up connection confirmation. Probe the state 3651 * change below when we are sure sending of the confirmation 3652 * has succeeded. 3653 */ 3654 tcp->tcp_state = TCPS_ESTABLISHED; 3655 3656 if (tcp->tcp_active_open) { 3657 if (!tcp_conn_con(tcp, iphdr, mp, NULL, ira)) { 3658 freemsg(mp); 3659 tcp->tcp_state = TCPS_SYN_RCVD; 3660 return; 3661 } 3662 /* 3663 * Don't fuse the loopback endpoints for 3664 * simultaneous active opens. 3665 */ 3666 if (tcp->tcp_loopback) { 3667 TCP_STAT(tcps, tcp_fusion_unfusable); 3668 tcp->tcp_unfusable = B_TRUE; 3669 } 3670 /* 3671 * For simultaneous active open, trace receipt of final 3672 * ACK as tcp:::connect-established. 3673 */ 3674 DTRACE_TCP5(connect__established, mblk_t *, NULL, 3675 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3676 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3677 } else { 3678 /* 3679 * For passive open, trace receipt of final ACK as 3680 * tcp:::accept-established. 3681 */ 3682 DTRACE_TCP5(accept__established, mlbk_t *, NULL, 3683 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3684 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3685 } 3686 TCPS_CONN_INC(tcps); 3687 3688 tcp->tcp_suna = tcp->tcp_iss + 1; /* One for the SYN */ 3689 bytes_acked--; 3690 /* SYN was acked - making progress */ 3691 tcp->tcp_ip_forward_progress = B_TRUE; 3692 3693 /* 3694 * If SYN was retransmitted, need to reset all 3695 * retransmission info as this segment will be 3696 * treated as a dup ACK. 3697 */ 3698 if (tcp->tcp_rexmit) { 3699 tcp->tcp_rexmit = B_FALSE; 3700 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 3701 tcp->tcp_rexmit_max = tcp->tcp_snxt; 3702 tcp->tcp_snd_burst = tcp->tcp_localnet ? 3703 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 3704 tcp->tcp_ms_we_have_waited = 0; 3705 tcp->tcp_cwnd = mss; 3706 } 3707 3708 /* 3709 * We set the send window to zero here. 3710 * This is needed if there is data to be 3711 * processed already on the queue. 3712 * Later (at swnd_update label), the 3713 * "new_swnd > tcp_swnd" condition is satisfied 3714 * the XMIT_NEEDED flag is set in the current 3715 * (SYN_RCVD) state. This ensures tcp_wput_data() is 3716 * called if there is already data on queue in 3717 * this state. 3718 */ 3719 tcp->tcp_swnd = 0; 3720 3721 if (new_swnd > tcp->tcp_max_swnd) 3722 tcp->tcp_max_swnd = new_swnd; 3723 tcp->tcp_swl1 = seg_seq; 3724 tcp->tcp_swl2 = seg_ack; 3725 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 3726 3727 /* Trace change from SYN_RCVD -> ESTABLISHED here */ 3728 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 3729 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL, 3730 int32_t, TCPS_SYN_RCVD); 3731 3732 /* Fuse when both sides are in ESTABLISHED state */ 3733 if (tcp->tcp_loopback && do_tcp_fusion) 3734 tcp_fuse(tcp, iphdr, tcpha); 3735 3736 } 3737 /* This code follows 4.4BSD-Lite2 mostly. */ 3738 if (bytes_acked < 0) 3739 goto est; 3740 3741 /* 3742 * If TCP is ECN capable and the congestion experience bit is 3743 * set, reduce tcp_cwnd and tcp_ssthresh. But this should only be 3744 * done once per window (or more loosely, per RTT). 3745 */ 3746 if (tcp->tcp_cwr && SEQ_GT(seg_ack, tcp->tcp_cwr_snd_max)) 3747 tcp->tcp_cwr = B_FALSE; 3748 if (tcp->tcp_ecn_ok && (flags & TH_ECE)) { 3749 if (!tcp->tcp_cwr) { 3750 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / mss; 3751 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * mss; 3752 tcp->tcp_cwnd = npkt * mss; 3753 /* 3754 * If the cwnd is 0, use the timer to clock out 3755 * new segments. This is required by the ECN spec. 3756 */ 3757 if (npkt == 0) { 3758 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 3759 /* 3760 * This makes sure that when the ACK comes 3761 * back, we will increase tcp_cwnd by 1 MSS. 3762 */ 3763 tcp->tcp_cwnd_cnt = 0; 3764 } 3765 tcp->tcp_cwr = B_TRUE; 3766 /* 3767 * This marks the end of the current window of in 3768 * flight data. That is why we don't use 3769 * tcp_suna + tcp_swnd. Only data in flight can 3770 * provide ECN info. 3771 */ 3772 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 3773 tcp->tcp_ecn_cwr_sent = B_FALSE; 3774 } 3775 } 3776 3777 mp1 = tcp->tcp_xmit_head; 3778 if (bytes_acked == 0) { 3779 if (!ofo_seg && seg_len == 0 && new_swnd == tcp->tcp_swnd) { 3780 int dupack_cnt; 3781 3782 TCPS_BUMP_MIB(tcps, tcpInDupAck); 3783 /* 3784 * Fast retransmit. When we have seen exactly three 3785 * identical ACKs while we have unacked data 3786 * outstanding we take it as a hint that our peer 3787 * dropped something. 3788 * 3789 * If TCP is retransmitting, don't do fast retransmit. 3790 */ 3791 if (mp1 && tcp->tcp_suna != tcp->tcp_snxt && 3792 ! tcp->tcp_rexmit) { 3793 /* Do Limited Transmit */ 3794 if ((dupack_cnt = ++tcp->tcp_dupack_cnt) < 3795 tcps->tcps_dupack_fast_retransmit) { 3796 /* 3797 * RFC 3042 3798 * 3799 * What we need to do is temporarily 3800 * increase tcp_cwnd so that new 3801 * data can be sent if it is allowed 3802 * by the receive window (tcp_rwnd). 3803 * tcp_wput_data() will take care of 3804 * the rest. 3805 * 3806 * If the connection is SACK capable, 3807 * only do limited xmit when there 3808 * is SACK info. 3809 * 3810 * Note how tcp_cwnd is incremented. 3811 * The first dup ACK will increase 3812 * it by 1 MSS. The second dup ACK 3813 * will increase it by 2 MSS. This 3814 * means that only 1 new segment will 3815 * be sent for each dup ACK. 3816 */ 3817 if (tcp->tcp_unsent > 0 && 3818 (!tcp->tcp_snd_sack_ok || 3819 (tcp->tcp_snd_sack_ok && 3820 tcp->tcp_notsack_list != NULL))) { 3821 tcp->tcp_cwnd += mss << 3822 (tcp->tcp_dupack_cnt - 1); 3823 flags |= TH_LIMIT_XMIT; 3824 } 3825 } else if (dupack_cnt == 3826 tcps->tcps_dupack_fast_retransmit) { 3827 3828 /* 3829 * If we have reduced tcp_ssthresh 3830 * because of ECN, do not reduce it again 3831 * unless it is already one window of data 3832 * away. After one window of data, tcp_cwr 3833 * should then be cleared. Note that 3834 * for non ECN capable connection, tcp_cwr 3835 * should always be false. 3836 * 3837 * Adjust cwnd since the duplicate 3838 * ack indicates that a packet was 3839 * dropped (due to congestion.) 3840 */ 3841 if (!tcp->tcp_cwr) { 3842 npkt = ((tcp->tcp_snxt - 3843 tcp->tcp_suna) >> 1) / mss; 3844 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 3845 mss; 3846 tcp->tcp_cwnd = (npkt + 3847 tcp->tcp_dupack_cnt) * mss; 3848 } 3849 if (tcp->tcp_ecn_ok) { 3850 tcp->tcp_cwr = B_TRUE; 3851 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 3852 tcp->tcp_ecn_cwr_sent = B_FALSE; 3853 } 3854 3855 /* 3856 * We do Hoe's algorithm. Refer to her 3857 * paper "Improving the Start-up Behavior 3858 * of a Congestion Control Scheme for TCP," 3859 * appeared in SIGCOMM'96. 3860 * 3861 * Save highest seq no we have sent so far. 3862 * Be careful about the invisible FIN byte. 3863 */ 3864 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 3865 (tcp->tcp_unsent == 0)) { 3866 tcp->tcp_rexmit_max = tcp->tcp_fss; 3867 } else { 3868 tcp->tcp_rexmit_max = tcp->tcp_snxt; 3869 } 3870 3871 /* 3872 * Do not allow bursty traffic during. 3873 * fast recovery. Refer to Fall and Floyd's 3874 * paper "Simulation-based Comparisons of 3875 * Tahoe, Reno and SACK TCP" (in CCR?) 3876 * This is a best current practise. 3877 */ 3878 tcp->tcp_snd_burst = TCP_CWND_SS; 3879 3880 /* 3881 * For SACK: 3882 * Calculate tcp_pipe, which is the 3883 * estimated number of bytes in 3884 * network. 3885 * 3886 * tcp_fack is the highest sack'ed seq num 3887 * TCP has received. 3888 * 3889 * tcp_pipe is explained in the above quoted 3890 * Fall and Floyd's paper. tcp_fack is 3891 * explained in Mathis and Mahdavi's 3892 * "Forward Acknowledgment: Refining TCP 3893 * Congestion Control" in SIGCOMM '96. 3894 */ 3895 if (tcp->tcp_snd_sack_ok) { 3896 if (tcp->tcp_notsack_list != NULL) { 3897 tcp->tcp_pipe = tcp->tcp_snxt - 3898 tcp->tcp_fack; 3899 tcp->tcp_sack_snxt = seg_ack; 3900 flags |= TH_NEED_SACK_REXMIT; 3901 } else { 3902 /* 3903 * Always initialize tcp_pipe 3904 * even though we don't have 3905 * any SACK info. If later 3906 * we get SACK info and 3907 * tcp_pipe is not initialized, 3908 * funny things will happen. 3909 */ 3910 tcp->tcp_pipe = 3911 tcp->tcp_cwnd_ssthresh; 3912 } 3913 } else { 3914 flags |= TH_REXMIT_NEEDED; 3915 } /* tcp_snd_sack_ok */ 3916 3917 } else { 3918 /* 3919 * Here we perform congestion 3920 * avoidance, but NOT slow start. 3921 * This is known as the Fast 3922 * Recovery Algorithm. 3923 */ 3924 if (tcp->tcp_snd_sack_ok && 3925 tcp->tcp_notsack_list != NULL) { 3926 flags |= TH_NEED_SACK_REXMIT; 3927 tcp->tcp_pipe -= mss; 3928 if (tcp->tcp_pipe < 0) 3929 tcp->tcp_pipe = 0; 3930 } else { 3931 /* 3932 * We know that one more packet has 3933 * left the pipe thus we can update 3934 * cwnd. 3935 */ 3936 cwnd = tcp->tcp_cwnd + mss; 3937 if (cwnd > tcp->tcp_cwnd_max) 3938 cwnd = tcp->tcp_cwnd_max; 3939 tcp->tcp_cwnd = cwnd; 3940 if (tcp->tcp_unsent > 0) 3941 flags |= TH_XMIT_NEEDED; 3942 } 3943 } 3944 } 3945 } else if (tcp->tcp_zero_win_probe) { 3946 /* 3947 * If the window has opened, need to arrange 3948 * to send additional data. 3949 */ 3950 if (new_swnd != 0) { 3951 /* tcp_suna != tcp_snxt */ 3952 /* Packet contains a window update */ 3953 TCPS_BUMP_MIB(tcps, tcpInWinUpdate); 3954 tcp->tcp_zero_win_probe = 0; 3955 tcp->tcp_timer_backoff = 0; 3956 tcp->tcp_ms_we_have_waited = 0; 3957 3958 /* 3959 * Transmit starting with tcp_suna since 3960 * the one byte probe is not ack'ed. 3961 * If TCP has sent more than one identical 3962 * probe, tcp_rexmit will be set. That means 3963 * tcp_ss_rexmit() will send out the one 3964 * byte along with new data. Otherwise, 3965 * fake the retransmission. 3966 */ 3967 flags |= TH_XMIT_NEEDED; 3968 if (!tcp->tcp_rexmit) { 3969 tcp->tcp_rexmit = B_TRUE; 3970 tcp->tcp_dupack_cnt = 0; 3971 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 3972 tcp->tcp_rexmit_max = tcp->tcp_suna + 1; 3973 } 3974 } 3975 } 3976 goto swnd_update; 3977 } 3978 3979 /* 3980 * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73. 3981 * If the ACK value acks something that we have not yet sent, it might 3982 * be an old duplicate segment. Send an ACK to re-synchronize the 3983 * other side. 3984 * Note: reset in response to unacceptable ACK in SYN_RECEIVE 3985 * state is handled above, so we can always just drop the segment and 3986 * send an ACK here. 3987 * 3988 * In the case where the peer shrinks the window, we see the new window 3989 * update, but all the data sent previously is queued up by the peer. 3990 * To account for this, in tcp_process_shrunk_swnd(), the sequence 3991 * number, which was already sent, and within window, is recorded. 3992 * tcp_snxt is then updated. 3993 * 3994 * If the window has previously shrunk, and an ACK for data not yet 3995 * sent, according to tcp_snxt is recieved, it may still be valid. If 3996 * the ACK is for data within the window at the time the window was 3997 * shrunk, then the ACK is acceptable. In this case tcp_snxt is set to 3998 * the sequence number ACK'ed. 3999 * 4000 * If the ACK covers all the data sent at the time the window was 4001 * shrunk, we can now set tcp_is_wnd_shrnk to B_FALSE. 4002 * 4003 * Should we send ACKs in response to ACK only segments? 4004 */ 4005 4006 if (SEQ_GT(seg_ack, tcp->tcp_snxt)) { 4007 if ((tcp->tcp_is_wnd_shrnk) && 4008 (SEQ_LEQ(seg_ack, tcp->tcp_snxt_shrunk))) { 4009 uint32_t data_acked_ahead_snxt; 4010 4011 data_acked_ahead_snxt = seg_ack - tcp->tcp_snxt; 4012 tcp_update_xmit_tail(tcp, seg_ack); 4013 tcp->tcp_unsent -= data_acked_ahead_snxt; 4014 } else { 4015 TCPS_BUMP_MIB(tcps, tcpInAckUnsent); 4016 /* drop the received segment */ 4017 freemsg(mp); 4018 4019 /* 4020 * Send back an ACK. If tcp_drop_ack_unsent_cnt is 4021 * greater than 0, check if the number of such 4022 * bogus ACks is greater than that count. If yes, 4023 * don't send back any ACK. This prevents TCP from 4024 * getting into an ACK storm if somehow an attacker 4025 * successfully spoofs an acceptable segment to our 4026 * peer. If this continues (count > 2 X threshold), 4027 * we should abort this connection. 4028 */ 4029 if (tcp_drop_ack_unsent_cnt > 0 && 4030 ++tcp->tcp_in_ack_unsent > 4031 tcp_drop_ack_unsent_cnt) { 4032 TCP_STAT(tcps, tcp_in_ack_unsent_drop); 4033 if (tcp->tcp_in_ack_unsent > 2 * 4034 tcp_drop_ack_unsent_cnt) { 4035 (void) tcp_clean_death(tcp, EPROTO); 4036 } 4037 return; 4038 } 4039 mp = tcp_ack_mp(tcp); 4040 if (mp != NULL) { 4041 BUMP_LOCAL(tcp->tcp_obsegs); 4042 TCPS_BUMP_MIB(tcps, tcpOutAck); 4043 tcp_send_data(tcp, mp); 4044 } 4045 return; 4046 } 4047 } else if (tcp->tcp_is_wnd_shrnk && SEQ_GEQ(seg_ack, 4048 tcp->tcp_snxt_shrunk)) { 4049 tcp->tcp_is_wnd_shrnk = B_FALSE; 4050 } 4051 4052 /* 4053 * TCP gets a new ACK, update the notsack'ed list to delete those 4054 * blocks that are covered by this ACK. 4055 */ 4056 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 4057 tcp_notsack_remove(&(tcp->tcp_notsack_list), seg_ack, 4058 &(tcp->tcp_num_notsack_blk), &(tcp->tcp_cnt_notsack_list)); 4059 } 4060 4061 /* 4062 * If we got an ACK after fast retransmit, check to see 4063 * if it is a partial ACK. If it is not and the congestion 4064 * window was inflated to account for the other side's 4065 * cached packets, retract it. If it is, do Hoe's algorithm. 4066 */ 4067 if (tcp->tcp_dupack_cnt >= tcps->tcps_dupack_fast_retransmit) { 4068 ASSERT(tcp->tcp_rexmit == B_FALSE); 4069 if (SEQ_GEQ(seg_ack, tcp->tcp_rexmit_max)) { 4070 tcp->tcp_dupack_cnt = 0; 4071 /* 4072 * Restore the orig tcp_cwnd_ssthresh after 4073 * fast retransmit phase. 4074 */ 4075 if (tcp->tcp_cwnd > tcp->tcp_cwnd_ssthresh) { 4076 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh; 4077 } 4078 tcp->tcp_rexmit_max = seg_ack; 4079 tcp->tcp_cwnd_cnt = 0; 4080 tcp->tcp_snd_burst = tcp->tcp_localnet ? 4081 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 4082 4083 /* 4084 * Remove all notsack info to avoid confusion with 4085 * the next fast retrasnmit/recovery phase. 4086 */ 4087 if (tcp->tcp_snd_sack_ok) { 4088 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, 4089 tcp); 4090 } 4091 } else { 4092 if (tcp->tcp_snd_sack_ok && 4093 tcp->tcp_notsack_list != NULL) { 4094 flags |= TH_NEED_SACK_REXMIT; 4095 tcp->tcp_pipe -= mss; 4096 if (tcp->tcp_pipe < 0) 4097 tcp->tcp_pipe = 0; 4098 } else { 4099 /* 4100 * Hoe's algorithm: 4101 * 4102 * Retransmit the unack'ed segment and 4103 * restart fast recovery. Note that we 4104 * need to scale back tcp_cwnd to the 4105 * original value when we started fast 4106 * recovery. This is to prevent overly 4107 * aggressive behaviour in sending new 4108 * segments. 4109 */ 4110 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh + 4111 tcps->tcps_dupack_fast_retransmit * mss; 4112 tcp->tcp_cwnd_cnt = tcp->tcp_cwnd; 4113 flags |= TH_REXMIT_NEEDED; 4114 } 4115 } 4116 } else { 4117 tcp->tcp_dupack_cnt = 0; 4118 if (tcp->tcp_rexmit) { 4119 /* 4120 * TCP is retranmitting. If the ACK ack's all 4121 * outstanding data, update tcp_rexmit_max and 4122 * tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt 4123 * to the correct value. 4124 * 4125 * Note that SEQ_LEQ() is used. This is to avoid 4126 * unnecessary fast retransmit caused by dup ACKs 4127 * received when TCP does slow start retransmission 4128 * after a time out. During this phase, TCP may 4129 * send out segments which are already received. 4130 * This causes dup ACKs to be sent back. 4131 */ 4132 if (SEQ_LEQ(seg_ack, tcp->tcp_rexmit_max)) { 4133 if (SEQ_GT(seg_ack, tcp->tcp_rexmit_nxt)) { 4134 tcp->tcp_rexmit_nxt = seg_ack; 4135 } 4136 if (seg_ack != tcp->tcp_rexmit_max) { 4137 flags |= TH_XMIT_NEEDED; 4138 } 4139 } else { 4140 tcp->tcp_rexmit = B_FALSE; 4141 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 4142 tcp->tcp_snd_burst = tcp->tcp_localnet ? 4143 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 4144 } 4145 tcp->tcp_ms_we_have_waited = 0; 4146 } 4147 } 4148 4149 TCPS_BUMP_MIB(tcps, tcpInAckSegs); 4150 TCPS_UPDATE_MIB(tcps, tcpInAckBytes, bytes_acked); 4151 tcp->tcp_suna = seg_ack; 4152 if (tcp->tcp_zero_win_probe != 0) { 4153 tcp->tcp_zero_win_probe = 0; 4154 tcp->tcp_timer_backoff = 0; 4155 } 4156 4157 /* 4158 * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed. 4159 * Note that it cannot be the SYN being ack'ed. The code flow 4160 * will not reach here. 4161 */ 4162 if (mp1 == NULL) { 4163 goto fin_acked; 4164 } 4165 4166 /* 4167 * Update the congestion window. 4168 * 4169 * If TCP is not ECN capable or TCP is ECN capable but the 4170 * congestion experience bit is not set, increase the tcp_cwnd as 4171 * usual. 4172 */ 4173 if (!tcp->tcp_ecn_ok || !(flags & TH_ECE)) { 4174 cwnd = tcp->tcp_cwnd; 4175 add = mss; 4176 4177 if (cwnd >= tcp->tcp_cwnd_ssthresh) { 4178 /* 4179 * This is to prevent an increase of less than 1 MSS of 4180 * tcp_cwnd. With partial increase, tcp_wput_data() 4181 * may send out tinygrams in order to preserve mblk 4182 * boundaries. 4183 * 4184 * By initializing tcp_cwnd_cnt to new tcp_cwnd and 4185 * decrementing it by 1 MSS for every ACKs, tcp_cwnd is 4186 * increased by 1 MSS for every RTTs. 4187 */ 4188 if (tcp->tcp_cwnd_cnt <= 0) { 4189 tcp->tcp_cwnd_cnt = cwnd + add; 4190 } else { 4191 tcp->tcp_cwnd_cnt -= add; 4192 add = 0; 4193 } 4194 } 4195 tcp->tcp_cwnd = MIN(cwnd + add, tcp->tcp_cwnd_max); 4196 } 4197 4198 /* See if the latest urgent data has been acknowledged */ 4199 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && 4200 SEQ_GT(seg_ack, tcp->tcp_urg)) 4201 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 4202 4203 /* Can we update the RTT estimates? */ 4204 if (tcp->tcp_snd_ts_ok) { 4205 /* Ignore zero timestamp echo-reply. */ 4206 if (tcpopt.tcp_opt_ts_ecr != 0) { 4207 tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH - 4208 (int32_t)tcpopt.tcp_opt_ts_ecr); 4209 } 4210 4211 /* If needed, restart the timer. */ 4212 if (tcp->tcp_set_timer == 1) { 4213 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 4214 tcp->tcp_set_timer = 0; 4215 } 4216 /* 4217 * Update tcp_csuna in case the other side stops sending 4218 * us timestamps. 4219 */ 4220 tcp->tcp_csuna = tcp->tcp_snxt; 4221 } else if (SEQ_GT(seg_ack, tcp->tcp_csuna)) { 4222 /* 4223 * An ACK sequence we haven't seen before, so get the RTT 4224 * and update the RTO. But first check if the timestamp is 4225 * valid to use. 4226 */ 4227 if ((mp1->b_next != NULL) && 4228 SEQ_GT(seg_ack, (uint32_t)(uintptr_t)(mp1->b_next))) 4229 tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH - 4230 (int32_t)(intptr_t)mp1->b_prev); 4231 else 4232 TCPS_BUMP_MIB(tcps, tcpRttNoUpdate); 4233 4234 /* Remeber the last sequence to be ACKed */ 4235 tcp->tcp_csuna = seg_ack; 4236 if (tcp->tcp_set_timer == 1) { 4237 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 4238 tcp->tcp_set_timer = 0; 4239 } 4240 } else { 4241 TCPS_BUMP_MIB(tcps, tcpRttNoUpdate); 4242 } 4243 4244 /* Eat acknowledged bytes off the xmit queue. */ 4245 for (;;) { 4246 mblk_t *mp2; 4247 uchar_t *wptr; 4248 4249 wptr = mp1->b_wptr; 4250 ASSERT((uintptr_t)(wptr - mp1->b_rptr) <= (uintptr_t)INT_MAX); 4251 bytes_acked -= (int)(wptr - mp1->b_rptr); 4252 if (bytes_acked < 0) { 4253 mp1->b_rptr = wptr + bytes_acked; 4254 /* 4255 * Set a new timestamp if all the bytes timed by the 4256 * old timestamp have been ack'ed. 4257 */ 4258 if (SEQ_GT(seg_ack, 4259 (uint32_t)(uintptr_t)(mp1->b_next))) { 4260 mp1->b_prev = 4261 (mblk_t *)(uintptr_t)LBOLT_FASTPATH; 4262 mp1->b_next = NULL; 4263 } 4264 break; 4265 } 4266 mp1->b_next = NULL; 4267 mp1->b_prev = NULL; 4268 mp2 = mp1; 4269 mp1 = mp1->b_cont; 4270 4271 /* 4272 * This notification is required for some zero-copy 4273 * clients to maintain a copy semantic. After the data 4274 * is ack'ed, client is safe to modify or reuse the buffer. 4275 */ 4276 if (tcp->tcp_snd_zcopy_aware && 4277 (mp2->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 4278 tcp_zcopy_notify(tcp); 4279 freeb(mp2); 4280 if (bytes_acked == 0) { 4281 if (mp1 == NULL) { 4282 /* Everything is ack'ed, clear the tail. */ 4283 tcp->tcp_xmit_tail = NULL; 4284 /* 4285 * Cancel the timer unless we are still 4286 * waiting for an ACK for the FIN packet. 4287 */ 4288 if (tcp->tcp_timer_tid != 0 && 4289 tcp->tcp_snxt == tcp->tcp_suna) { 4290 (void) TCP_TIMER_CANCEL(tcp, 4291 tcp->tcp_timer_tid); 4292 tcp->tcp_timer_tid = 0; 4293 } 4294 goto pre_swnd_update; 4295 } 4296 if (mp2 != tcp->tcp_xmit_tail) 4297 break; 4298 tcp->tcp_xmit_tail = mp1; 4299 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 4300 (uintptr_t)INT_MAX); 4301 tcp->tcp_xmit_tail_unsent = (int)(mp1->b_wptr - 4302 mp1->b_rptr); 4303 break; 4304 } 4305 if (mp1 == NULL) { 4306 /* 4307 * More was acked but there is nothing more 4308 * outstanding. This means that the FIN was 4309 * just acked or that we're talking to a clown. 4310 */ 4311 fin_acked: 4312 ASSERT(tcp->tcp_fin_sent); 4313 tcp->tcp_xmit_tail = NULL; 4314 if (tcp->tcp_fin_sent) { 4315 /* FIN was acked - making progress */ 4316 if (!tcp->tcp_fin_acked) 4317 tcp->tcp_ip_forward_progress = B_TRUE; 4318 tcp->tcp_fin_acked = B_TRUE; 4319 if (tcp->tcp_linger_tid != 0 && 4320 TCP_TIMER_CANCEL(tcp, 4321 tcp->tcp_linger_tid) >= 0) { 4322 tcp_stop_lingering(tcp); 4323 freemsg(mp); 4324 mp = NULL; 4325 } 4326 } else { 4327 /* 4328 * We should never get here because 4329 * we have already checked that the 4330 * number of bytes ack'ed should be 4331 * smaller than or equal to what we 4332 * have sent so far (it is the 4333 * acceptability check of the ACK). 4334 * We can only get here if the send 4335 * queue is corrupted. 4336 * 4337 * Terminate the connection and 4338 * panic the system. It is better 4339 * for us to panic instead of 4340 * continuing to avoid other disaster. 4341 */ 4342 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 4343 tcp->tcp_rnxt, TH_RST|TH_ACK); 4344 panic("Memory corruption " 4345 "detected for connection %s.", 4346 tcp_display(tcp, NULL, 4347 DISP_ADDR_AND_PORT)); 4348 /*NOTREACHED*/ 4349 } 4350 goto pre_swnd_update; 4351 } 4352 ASSERT(mp2 != tcp->tcp_xmit_tail); 4353 } 4354 if (tcp->tcp_unsent) { 4355 flags |= TH_XMIT_NEEDED; 4356 } 4357 pre_swnd_update: 4358 tcp->tcp_xmit_head = mp1; 4359 swnd_update: 4360 /* 4361 * The following check is different from most other implementations. 4362 * For bi-directional transfer, when segments are dropped, the 4363 * "normal" check will not accept a window update in those 4364 * retransmitted segemnts. Failing to do that, TCP may send out 4365 * segments which are outside receiver's window. As TCP accepts 4366 * the ack in those retransmitted segments, if the window update in 4367 * the same segment is not accepted, TCP will incorrectly calculates 4368 * that it can send more segments. This can create a deadlock 4369 * with the receiver if its window becomes zero. 4370 */ 4371 if (SEQ_LT(tcp->tcp_swl2, seg_ack) || 4372 SEQ_LT(tcp->tcp_swl1, seg_seq) || 4373 (tcp->tcp_swl1 == seg_seq && new_swnd > tcp->tcp_swnd)) { 4374 /* 4375 * The criteria for update is: 4376 * 4377 * 1. the segment acknowledges some data. Or 4378 * 2. the segment is new, i.e. it has a higher seq num. Or 4379 * 3. the segment is not old and the advertised window is 4380 * larger than the previous advertised window. 4381 */ 4382 if (tcp->tcp_unsent && new_swnd > tcp->tcp_swnd) 4383 flags |= TH_XMIT_NEEDED; 4384 tcp->tcp_swnd = new_swnd; 4385 if (new_swnd > tcp->tcp_max_swnd) 4386 tcp->tcp_max_swnd = new_swnd; 4387 tcp->tcp_swl1 = seg_seq; 4388 tcp->tcp_swl2 = seg_ack; 4389 } 4390 est: 4391 if (tcp->tcp_state > TCPS_ESTABLISHED) { 4392 4393 switch (tcp->tcp_state) { 4394 case TCPS_FIN_WAIT_1: 4395 if (tcp->tcp_fin_acked) { 4396 tcp->tcp_state = TCPS_FIN_WAIT_2; 4397 DTRACE_TCP6(state__change, void, NULL, 4398 ip_xmit_attr_t *, connp->conn_ixa, 4399 void, NULL, tcp_t *, tcp, void, NULL, 4400 int32_t, TCPS_FIN_WAIT_1); 4401 /* 4402 * We implement the non-standard BSD/SunOS 4403 * FIN_WAIT_2 flushing algorithm. 4404 * If there is no user attached to this 4405 * TCP endpoint, then this TCP struct 4406 * could hang around forever in FIN_WAIT_2 4407 * state if the peer forgets to send us 4408 * a FIN. To prevent this, we wait only 4409 * 2*MSL (a convenient time value) for 4410 * the FIN to arrive. If it doesn't show up, 4411 * we flush the TCP endpoint. This algorithm, 4412 * though a violation of RFC-793, has worked 4413 * for over 10 years in BSD systems. 4414 * Note: SunOS 4.x waits 675 seconds before 4415 * flushing the FIN_WAIT_2 connection. 4416 */ 4417 TCP_TIMER_RESTART(tcp, 4418 tcp->tcp_fin_wait_2_flush_interval); 4419 } 4420 break; 4421 case TCPS_FIN_WAIT_2: 4422 break; /* Shutdown hook? */ 4423 case TCPS_LAST_ACK: 4424 freemsg(mp); 4425 if (tcp->tcp_fin_acked) { 4426 (void) tcp_clean_death(tcp, 0); 4427 return; 4428 } 4429 goto xmit_check; 4430 case TCPS_CLOSING: 4431 if (tcp->tcp_fin_acked) { 4432 SET_TIME_WAIT(tcps, tcp, connp); 4433 DTRACE_TCP6(state__change, void, NULL, 4434 ip_xmit_attr_t *, connp->conn_ixa, void, 4435 NULL, tcp_t *, tcp, void, NULL, int32_t, 4436 TCPS_CLOSING); 4437 } 4438 /*FALLTHRU*/ 4439 case TCPS_CLOSE_WAIT: 4440 freemsg(mp); 4441 goto xmit_check; 4442 default: 4443 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 4444 break; 4445 } 4446 } 4447 if (flags & TH_FIN) { 4448 /* Make sure we ack the fin */ 4449 flags |= TH_ACK_NEEDED; 4450 if (!tcp->tcp_fin_rcvd) { 4451 tcp->tcp_fin_rcvd = B_TRUE; 4452 tcp->tcp_rnxt++; 4453 tcpha = tcp->tcp_tcpha; 4454 tcpha->tha_ack = htonl(tcp->tcp_rnxt); 4455 4456 /* 4457 * Generate the ordrel_ind at the end unless we 4458 * are an eager guy. 4459 * In the eager case tcp_rsrv will do this when run 4460 * after tcp_accept is done. 4461 */ 4462 if (tcp->tcp_listener == NULL && 4463 !TCP_IS_DETACHED(tcp) && !tcp->tcp_hard_binding) 4464 flags |= TH_ORDREL_NEEDED; 4465 switch (tcp->tcp_state) { 4466 case TCPS_SYN_RCVD: 4467 tcp->tcp_state = TCPS_CLOSE_WAIT; 4468 DTRACE_TCP6(state__change, void, NULL, 4469 ip_xmit_attr_t *, connp->conn_ixa, 4470 void, NULL, tcp_t *, tcp, void, NULL, 4471 int32_t, TCPS_SYN_RCVD); 4472 /* Keepalive? */ 4473 break; 4474 case TCPS_ESTABLISHED: 4475 tcp->tcp_state = TCPS_CLOSE_WAIT; 4476 DTRACE_TCP6(state__change, void, NULL, 4477 ip_xmit_attr_t *, connp->conn_ixa, 4478 void, NULL, tcp_t *, tcp, void, NULL, 4479 int32_t, TCPS_ESTABLISHED); 4480 /* Keepalive? */ 4481 break; 4482 case TCPS_FIN_WAIT_1: 4483 if (!tcp->tcp_fin_acked) { 4484 tcp->tcp_state = TCPS_CLOSING; 4485 DTRACE_TCP6(state__change, void, NULL, 4486 ip_xmit_attr_t *, connp->conn_ixa, 4487 void, NULL, tcp_t *, tcp, void, 4488 NULL, int32_t, TCPS_FIN_WAIT_1); 4489 break; 4490 } 4491 /* FALLTHRU */ 4492 case TCPS_FIN_WAIT_2: 4493 SET_TIME_WAIT(tcps, tcp, connp); 4494 DTRACE_TCP6(state__change, void, NULL, 4495 ip_xmit_attr_t *, connp->conn_ixa, void, 4496 NULL, tcp_t *, tcp, void, NULL, int32_t, 4497 TCPS_FIN_WAIT_2); 4498 if (seg_len) { 4499 /* 4500 * implies data piggybacked on FIN. 4501 * break to handle data. 4502 */ 4503 break; 4504 } 4505 freemsg(mp); 4506 goto ack_check; 4507 } 4508 } 4509 } 4510 if (mp == NULL) 4511 goto xmit_check; 4512 if (seg_len == 0) { 4513 freemsg(mp); 4514 goto xmit_check; 4515 } 4516 if (mp->b_rptr == mp->b_wptr) { 4517 /* 4518 * The header has been consumed, so we remove the 4519 * zero-length mblk here. 4520 */ 4521 mp1 = mp; 4522 mp = mp->b_cont; 4523 freeb(mp1); 4524 } 4525 update_ack: 4526 tcpha = tcp->tcp_tcpha; 4527 tcp->tcp_rack_cnt++; 4528 { 4529 uint32_t cur_max; 4530 4531 cur_max = tcp->tcp_rack_cur_max; 4532 if (tcp->tcp_rack_cnt >= cur_max) { 4533 /* 4534 * We have more unacked data than we should - send 4535 * an ACK now. 4536 */ 4537 flags |= TH_ACK_NEEDED; 4538 cur_max++; 4539 if (cur_max > tcp->tcp_rack_abs_max) 4540 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 4541 else 4542 tcp->tcp_rack_cur_max = cur_max; 4543 } else if (TCP_IS_DETACHED(tcp)) { 4544 /* We don't have an ACK timer for detached TCP. */ 4545 flags |= TH_ACK_NEEDED; 4546 } else if (seg_len < mss) { 4547 /* 4548 * If we get a segment that is less than an mss, and we 4549 * already have unacknowledged data, and the amount 4550 * unacknowledged is not a multiple of mss, then we 4551 * better generate an ACK now. Otherwise, this may be 4552 * the tail piece of a transaction, and we would rather 4553 * wait for the response. 4554 */ 4555 uint32_t udif; 4556 ASSERT((uintptr_t)(tcp->tcp_rnxt - tcp->tcp_rack) <= 4557 (uintptr_t)INT_MAX); 4558 udif = (int)(tcp->tcp_rnxt - tcp->tcp_rack); 4559 if (udif && (udif % mss)) 4560 flags |= TH_ACK_NEEDED; 4561 else 4562 flags |= TH_ACK_TIMER_NEEDED; 4563 } else { 4564 /* Start delayed ack timer */ 4565 flags |= TH_ACK_TIMER_NEEDED; 4566 } 4567 } 4568 tcp->tcp_rnxt += seg_len; 4569 tcpha->tha_ack = htonl(tcp->tcp_rnxt); 4570 4571 if (mp == NULL) 4572 goto xmit_check; 4573 4574 /* Update SACK list */ 4575 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 4576 tcp_sack_remove(tcp->tcp_sack_list, tcp->tcp_rnxt, 4577 &(tcp->tcp_num_sack_blk)); 4578 } 4579 4580 if (tcp->tcp_urp_mp) { 4581 tcp->tcp_urp_mp->b_cont = mp; 4582 mp = tcp->tcp_urp_mp; 4583 tcp->tcp_urp_mp = NULL; 4584 /* Ready for a new signal. */ 4585 tcp->tcp_urp_last_valid = B_FALSE; 4586 #ifdef DEBUG 4587 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4588 "tcp_rput: sending exdata_ind %s", 4589 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4590 #endif /* DEBUG */ 4591 } 4592 4593 /* 4594 * Check for ancillary data changes compared to last segment. 4595 */ 4596 if (connp->conn_recv_ancillary.crb_all != 0) { 4597 mp = tcp_input_add_ancillary(tcp, mp, &ipp, ira); 4598 if (mp == NULL) 4599 return; 4600 } 4601 4602 if (tcp->tcp_listener != NULL || tcp->tcp_hard_binding) { 4603 /* 4604 * Side queue inbound data until the accept happens. 4605 * tcp_accept/tcp_rput drains this when the accept happens. 4606 * M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or 4607 * T_EXDATA_IND) it is queued on b_next. 4608 * XXX Make urgent data use this. Requires: 4609 * Removing tcp_listener check for TH_URG 4610 * Making M_PCPROTO and MARK messages skip the eager case 4611 */ 4612 4613 if (tcp->tcp_kssl_pending) { 4614 DTRACE_PROBE1(kssl_mblk__ksslinput_pending, 4615 mblk_t *, mp); 4616 tcp_kssl_input(tcp, mp, ira->ira_cred); 4617 } else { 4618 tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred); 4619 } 4620 } else if (IPCL_IS_NONSTR(connp)) { 4621 /* 4622 * Non-STREAMS socket 4623 * 4624 * Note that no KSSL processing is done here, because 4625 * KSSL is not supported for non-STREAMS sockets. 4626 */ 4627 boolean_t push = flags & (TH_PUSH|TH_FIN); 4628 int error; 4629 4630 if ((*connp->conn_upcalls->su_recv)( 4631 connp->conn_upper_handle, 4632 mp, seg_len, 0, &error, &push) <= 0) { 4633 /* 4634 * We should never be in middle of a 4635 * fallback, the squeue guarantees that. 4636 */ 4637 ASSERT(error != EOPNOTSUPP); 4638 if (error == ENOSPC) 4639 tcp->tcp_rwnd -= seg_len; 4640 } else if (push) { 4641 /* PUSH bit set and sockfs is not flow controlled */ 4642 flags |= tcp_rwnd_reopen(tcp); 4643 } 4644 } else { 4645 /* STREAMS socket */ 4646 if (mp->b_datap->db_type != M_DATA || 4647 (flags & TH_MARKNEXT_NEEDED)) { 4648 if (tcp->tcp_rcv_list != NULL) { 4649 flags |= tcp_rcv_drain(tcp); 4650 } 4651 ASSERT(tcp->tcp_rcv_list == NULL || 4652 tcp->tcp_fused_sigurg); 4653 4654 if (flags & TH_MARKNEXT_NEEDED) { 4655 #ifdef DEBUG 4656 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4657 "tcp_rput: sending MSGMARKNEXT %s", 4658 tcp_display(tcp, NULL, 4659 DISP_PORT_ONLY)); 4660 #endif /* DEBUG */ 4661 mp->b_flag |= MSGMARKNEXT; 4662 flags &= ~TH_MARKNEXT_NEEDED; 4663 } 4664 4665 /* Does this need SSL processing first? */ 4666 if ((tcp->tcp_kssl_ctx != NULL) && 4667 (DB_TYPE(mp) == M_DATA)) { 4668 DTRACE_PROBE1(kssl_mblk__ksslinput_data1, 4669 mblk_t *, mp); 4670 tcp_kssl_input(tcp, mp, ira->ira_cred); 4671 } else { 4672 if (is_system_labeled()) 4673 tcp_setcred_data(mp, ira); 4674 4675 putnext(connp->conn_rq, mp); 4676 if (!canputnext(connp->conn_rq)) 4677 tcp->tcp_rwnd -= seg_len; 4678 } 4679 } else if ((tcp->tcp_kssl_ctx != NULL) && 4680 (DB_TYPE(mp) == M_DATA)) { 4681 /* Does this need SSL processing first? */ 4682 DTRACE_PROBE1(kssl_mblk__ksslinput_data2, mblk_t *, mp); 4683 tcp_kssl_input(tcp, mp, ira->ira_cred); 4684 } else if ((flags & (TH_PUSH|TH_FIN)) || 4685 tcp->tcp_rcv_cnt + seg_len >= connp->conn_rcvbuf >> 3) { 4686 if (tcp->tcp_rcv_list != NULL) { 4687 /* 4688 * Enqueue the new segment first and then 4689 * call tcp_rcv_drain() to send all data 4690 * up. The other way to do this is to 4691 * send all queued data up and then call 4692 * putnext() to send the new segment up. 4693 * This way can remove the else part later 4694 * on. 4695 * 4696 * We don't do this to avoid one more call to 4697 * canputnext() as tcp_rcv_drain() needs to 4698 * call canputnext(). 4699 */ 4700 tcp_rcv_enqueue(tcp, mp, seg_len, 4701 ira->ira_cred); 4702 flags |= tcp_rcv_drain(tcp); 4703 } else { 4704 if (is_system_labeled()) 4705 tcp_setcred_data(mp, ira); 4706 4707 putnext(connp->conn_rq, mp); 4708 if (!canputnext(connp->conn_rq)) 4709 tcp->tcp_rwnd -= seg_len; 4710 } 4711 } else { 4712 /* 4713 * Enqueue all packets when processing an mblk 4714 * from the co queue and also enqueue normal packets. 4715 */ 4716 tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred); 4717 } 4718 /* 4719 * Make sure the timer is running if we have data waiting 4720 * for a push bit. This provides resiliency against 4721 * implementations that do not correctly generate push bits. 4722 */ 4723 if (tcp->tcp_rcv_list != NULL && tcp->tcp_push_tid == 0) { 4724 /* 4725 * The connection may be closed at this point, so don't 4726 * do anything for a detached tcp. 4727 */ 4728 if (!TCP_IS_DETACHED(tcp)) 4729 tcp->tcp_push_tid = TCP_TIMER(tcp, 4730 tcp_push_timer, 4731 tcps->tcps_push_timer_interval); 4732 } 4733 } 4734 4735 xmit_check: 4736 /* Is there anything left to do? */ 4737 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 4738 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_ACK_NEEDED| 4739 TH_NEED_SACK_REXMIT|TH_LIMIT_XMIT|TH_ACK_TIMER_NEEDED| 4740 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 4741 goto done; 4742 4743 /* Any transmit work to do and a non-zero window? */ 4744 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_NEED_SACK_REXMIT| 4745 TH_LIMIT_XMIT)) && tcp->tcp_swnd != 0) { 4746 if (flags & TH_REXMIT_NEEDED) { 4747 uint32_t snd_size = tcp->tcp_snxt - tcp->tcp_suna; 4748 4749 TCPS_BUMP_MIB(tcps, tcpOutFastRetrans); 4750 if (snd_size > mss) 4751 snd_size = mss; 4752 if (snd_size > tcp->tcp_swnd) 4753 snd_size = tcp->tcp_swnd; 4754 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, snd_size, 4755 NULL, NULL, tcp->tcp_suna, B_TRUE, &snd_size, 4756 B_TRUE); 4757 4758 if (mp1 != NULL) { 4759 tcp->tcp_xmit_head->b_prev = 4760 (mblk_t *)LBOLT_FASTPATH; 4761 tcp->tcp_csuna = tcp->tcp_snxt; 4762 TCPS_BUMP_MIB(tcps, tcpRetransSegs); 4763 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, 4764 snd_size); 4765 tcp_send_data(tcp, mp1); 4766 } 4767 } 4768 if (flags & TH_NEED_SACK_REXMIT) { 4769 tcp_sack_rexmit(tcp, &flags); 4770 } 4771 /* 4772 * For TH_LIMIT_XMIT, tcp_wput_data() is called to send 4773 * out new segment. Note that tcp_rexmit should not be 4774 * set, otherwise TH_LIMIT_XMIT should not be set. 4775 */ 4776 if (flags & (TH_XMIT_NEEDED|TH_LIMIT_XMIT)) { 4777 if (!tcp->tcp_rexmit) { 4778 tcp_wput_data(tcp, NULL, B_FALSE); 4779 } else { 4780 tcp_ss_rexmit(tcp); 4781 } 4782 } 4783 /* 4784 * Adjust tcp_cwnd back to normal value after sending 4785 * new data segments. 4786 */ 4787 if (flags & TH_LIMIT_XMIT) { 4788 tcp->tcp_cwnd -= mss << (tcp->tcp_dupack_cnt - 1); 4789 /* 4790 * This will restart the timer. Restarting the 4791 * timer is used to avoid a timeout before the 4792 * limited transmitted segment's ACK gets back. 4793 */ 4794 if (tcp->tcp_xmit_head != NULL) 4795 tcp->tcp_xmit_head->b_prev = 4796 (mblk_t *)LBOLT_FASTPATH; 4797 } 4798 4799 /* Anything more to do? */ 4800 if ((flags & (TH_ACK_NEEDED|TH_ACK_TIMER_NEEDED| 4801 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 4802 goto done; 4803 } 4804 ack_check: 4805 if (flags & TH_SEND_URP_MARK) { 4806 ASSERT(tcp->tcp_urp_mark_mp); 4807 ASSERT(!IPCL_IS_NONSTR(connp)); 4808 /* 4809 * Send up any queued data and then send the mark message 4810 */ 4811 if (tcp->tcp_rcv_list != NULL) { 4812 flags |= tcp_rcv_drain(tcp); 4813 4814 } 4815 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 4816 mp1 = tcp->tcp_urp_mark_mp; 4817 tcp->tcp_urp_mark_mp = NULL; 4818 if (is_system_labeled()) 4819 tcp_setcred_data(mp1, ira); 4820 4821 putnext(connp->conn_rq, mp1); 4822 #ifdef DEBUG 4823 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4824 "tcp_rput: sending zero-length %s %s", 4825 ((mp1->b_flag & MSGMARKNEXT) ? "MSGMARKNEXT" : 4826 "MSGNOTMARKNEXT"), 4827 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4828 #endif /* DEBUG */ 4829 flags &= ~TH_SEND_URP_MARK; 4830 } 4831 if (flags & TH_ACK_NEEDED) { 4832 /* 4833 * Time to send an ack for some reason. 4834 */ 4835 mp1 = tcp_ack_mp(tcp); 4836 4837 if (mp1 != NULL) { 4838 tcp_send_data(tcp, mp1); 4839 BUMP_LOCAL(tcp->tcp_obsegs); 4840 TCPS_BUMP_MIB(tcps, tcpOutAck); 4841 } 4842 if (tcp->tcp_ack_tid != 0) { 4843 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 4844 tcp->tcp_ack_tid = 0; 4845 } 4846 } 4847 if (flags & TH_ACK_TIMER_NEEDED) { 4848 /* 4849 * Arrange for deferred ACK or push wait timeout. 4850 * Start timer if it is not already running. 4851 */ 4852 if (tcp->tcp_ack_tid == 0) { 4853 tcp->tcp_ack_tid = TCP_TIMER(tcp, tcp_ack_timer, 4854 tcp->tcp_localnet ? 4855 tcps->tcps_local_dack_interval : 4856 tcps->tcps_deferred_ack_interval); 4857 } 4858 } 4859 if (flags & TH_ORDREL_NEEDED) { 4860 /* 4861 * Send up the ordrel_ind unless we are an eager guy. 4862 * In the eager case tcp_rsrv will do this when run 4863 * after tcp_accept is done. 4864 */ 4865 ASSERT(tcp->tcp_listener == NULL); 4866 ASSERT(!tcp->tcp_detached); 4867 4868 if (IPCL_IS_NONSTR(connp)) { 4869 ASSERT(tcp->tcp_ordrel_mp == NULL); 4870 tcp->tcp_ordrel_done = B_TRUE; 4871 (*connp->conn_upcalls->su_opctl) 4872 (connp->conn_upper_handle, SOCK_OPCTL_SHUT_RECV, 0); 4873 goto done; 4874 } 4875 4876 if (tcp->tcp_rcv_list != NULL) { 4877 /* 4878 * Push any mblk(s) enqueued from co processing. 4879 */ 4880 flags |= tcp_rcv_drain(tcp); 4881 } 4882 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 4883 4884 mp1 = tcp->tcp_ordrel_mp; 4885 tcp->tcp_ordrel_mp = NULL; 4886 tcp->tcp_ordrel_done = B_TRUE; 4887 putnext(connp->conn_rq, mp1); 4888 } 4889 done: 4890 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 4891 } 4892 4893 /* 4894 * Attach ancillary data to a received TCP segments for the 4895 * ancillary pieces requested by the application that are 4896 * different than they were in the previous data segment. 4897 * 4898 * Save the "current" values once memory allocation is ok so that 4899 * when memory allocation fails we can just wait for the next data segment. 4900 */ 4901 static mblk_t * 4902 tcp_input_add_ancillary(tcp_t *tcp, mblk_t *mp, ip_pkt_t *ipp, 4903 ip_recv_attr_t *ira) 4904 { 4905 struct T_optdata_ind *todi; 4906 int optlen; 4907 uchar_t *optptr; 4908 struct T_opthdr *toh; 4909 crb_t addflag; /* Which pieces to add */ 4910 mblk_t *mp1; 4911 conn_t *connp = tcp->tcp_connp; 4912 4913 optlen = 0; 4914 addflag.crb_all = 0; 4915 /* If app asked for pktinfo and the index has changed ... */ 4916 if (connp->conn_recv_ancillary.crb_ip_recvpktinfo && 4917 ira->ira_ruifindex != tcp->tcp_recvifindex) { 4918 optlen += sizeof (struct T_opthdr) + 4919 sizeof (struct in6_pktinfo); 4920 addflag.crb_ip_recvpktinfo = 1; 4921 } 4922 /* If app asked for hoplimit and it has changed ... */ 4923 if (connp->conn_recv_ancillary.crb_ipv6_recvhoplimit && 4924 ipp->ipp_hoplimit != tcp->tcp_recvhops) { 4925 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 4926 addflag.crb_ipv6_recvhoplimit = 1; 4927 } 4928 /* If app asked for tclass and it has changed ... */ 4929 if (connp->conn_recv_ancillary.crb_ipv6_recvtclass && 4930 ipp->ipp_tclass != tcp->tcp_recvtclass) { 4931 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 4932 addflag.crb_ipv6_recvtclass = 1; 4933 } 4934 /* 4935 * If app asked for hopbyhop headers and it has changed ... 4936 * For security labels, note that (1) security labels can't change on 4937 * a connected socket at all, (2) we're connected to at most one peer, 4938 * (3) if anything changes, then it must be some other extra option. 4939 */ 4940 if (connp->conn_recv_ancillary.crb_ipv6_recvhopopts && 4941 ip_cmpbuf(tcp->tcp_hopopts, tcp->tcp_hopoptslen, 4942 (ipp->ipp_fields & IPPF_HOPOPTS), 4943 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) { 4944 optlen += sizeof (struct T_opthdr) + ipp->ipp_hopoptslen; 4945 addflag.crb_ipv6_recvhopopts = 1; 4946 if (!ip_allocbuf((void **)&tcp->tcp_hopopts, 4947 &tcp->tcp_hopoptslen, (ipp->ipp_fields & IPPF_HOPOPTS), 4948 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) 4949 return (mp); 4950 } 4951 /* If app asked for dst headers before routing headers ... */ 4952 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdrdstopts && 4953 ip_cmpbuf(tcp->tcp_rthdrdstopts, tcp->tcp_rthdrdstoptslen, 4954 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 4955 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) { 4956 optlen += sizeof (struct T_opthdr) + 4957 ipp->ipp_rthdrdstoptslen; 4958 addflag.crb_ipv6_recvrthdrdstopts = 1; 4959 if (!ip_allocbuf((void **)&tcp->tcp_rthdrdstopts, 4960 &tcp->tcp_rthdrdstoptslen, 4961 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 4962 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) 4963 return (mp); 4964 } 4965 /* If app asked for routing headers and it has changed ... */ 4966 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdr && 4967 ip_cmpbuf(tcp->tcp_rthdr, tcp->tcp_rthdrlen, 4968 (ipp->ipp_fields & IPPF_RTHDR), 4969 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) { 4970 optlen += sizeof (struct T_opthdr) + ipp->ipp_rthdrlen; 4971 addflag.crb_ipv6_recvrthdr = 1; 4972 if (!ip_allocbuf((void **)&tcp->tcp_rthdr, 4973 &tcp->tcp_rthdrlen, (ipp->ipp_fields & IPPF_RTHDR), 4974 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) 4975 return (mp); 4976 } 4977 /* If app asked for dest headers and it has changed ... */ 4978 if ((connp->conn_recv_ancillary.crb_ipv6_recvdstopts || 4979 connp->conn_recv_ancillary.crb_old_ipv6_recvdstopts) && 4980 ip_cmpbuf(tcp->tcp_dstopts, tcp->tcp_dstoptslen, 4981 (ipp->ipp_fields & IPPF_DSTOPTS), 4982 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) { 4983 optlen += sizeof (struct T_opthdr) + ipp->ipp_dstoptslen; 4984 addflag.crb_ipv6_recvdstopts = 1; 4985 if (!ip_allocbuf((void **)&tcp->tcp_dstopts, 4986 &tcp->tcp_dstoptslen, (ipp->ipp_fields & IPPF_DSTOPTS), 4987 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) 4988 return (mp); 4989 } 4990 4991 if (optlen == 0) { 4992 /* Nothing to add */ 4993 return (mp); 4994 } 4995 mp1 = allocb(sizeof (struct T_optdata_ind) + optlen, BPRI_MED); 4996 if (mp1 == NULL) { 4997 /* 4998 * Defer sending ancillary data until the next TCP segment 4999 * arrives. 5000 */ 5001 return (mp); 5002 } 5003 mp1->b_cont = mp; 5004 mp = mp1; 5005 mp->b_wptr += sizeof (*todi) + optlen; 5006 mp->b_datap->db_type = M_PROTO; 5007 todi = (struct T_optdata_ind *)mp->b_rptr; 5008 todi->PRIM_type = T_OPTDATA_IND; 5009 todi->DATA_flag = 1; /* MORE data */ 5010 todi->OPT_length = optlen; 5011 todi->OPT_offset = sizeof (*todi); 5012 optptr = (uchar_t *)&todi[1]; 5013 /* 5014 * If app asked for pktinfo and the index has changed ... 5015 * Note that the local address never changes for the connection. 5016 */ 5017 if (addflag.crb_ip_recvpktinfo) { 5018 struct in6_pktinfo *pkti; 5019 uint_t ifindex; 5020 5021 ifindex = ira->ira_ruifindex; 5022 toh = (struct T_opthdr *)optptr; 5023 toh->level = IPPROTO_IPV6; 5024 toh->name = IPV6_PKTINFO; 5025 toh->len = sizeof (*toh) + sizeof (*pkti); 5026 toh->status = 0; 5027 optptr += sizeof (*toh); 5028 pkti = (struct in6_pktinfo *)optptr; 5029 pkti->ipi6_addr = connp->conn_laddr_v6; 5030 pkti->ipi6_ifindex = ifindex; 5031 optptr += sizeof (*pkti); 5032 ASSERT(OK_32PTR(optptr)); 5033 /* Save as "last" value */ 5034 tcp->tcp_recvifindex = ifindex; 5035 } 5036 /* If app asked for hoplimit and it has changed ... */ 5037 if (addflag.crb_ipv6_recvhoplimit) { 5038 toh = (struct T_opthdr *)optptr; 5039 toh->level = IPPROTO_IPV6; 5040 toh->name = IPV6_HOPLIMIT; 5041 toh->len = sizeof (*toh) + sizeof (uint_t); 5042 toh->status = 0; 5043 optptr += sizeof (*toh); 5044 *(uint_t *)optptr = ipp->ipp_hoplimit; 5045 optptr += sizeof (uint_t); 5046 ASSERT(OK_32PTR(optptr)); 5047 /* Save as "last" value */ 5048 tcp->tcp_recvhops = ipp->ipp_hoplimit; 5049 } 5050 /* If app asked for tclass and it has changed ... */ 5051 if (addflag.crb_ipv6_recvtclass) { 5052 toh = (struct T_opthdr *)optptr; 5053 toh->level = IPPROTO_IPV6; 5054 toh->name = IPV6_TCLASS; 5055 toh->len = sizeof (*toh) + sizeof (uint_t); 5056 toh->status = 0; 5057 optptr += sizeof (*toh); 5058 *(uint_t *)optptr = ipp->ipp_tclass; 5059 optptr += sizeof (uint_t); 5060 ASSERT(OK_32PTR(optptr)); 5061 /* Save as "last" value */ 5062 tcp->tcp_recvtclass = ipp->ipp_tclass; 5063 } 5064 if (addflag.crb_ipv6_recvhopopts) { 5065 toh = (struct T_opthdr *)optptr; 5066 toh->level = IPPROTO_IPV6; 5067 toh->name = IPV6_HOPOPTS; 5068 toh->len = sizeof (*toh) + ipp->ipp_hopoptslen; 5069 toh->status = 0; 5070 optptr += sizeof (*toh); 5071 bcopy((uchar_t *)ipp->ipp_hopopts, optptr, ipp->ipp_hopoptslen); 5072 optptr += ipp->ipp_hopoptslen; 5073 ASSERT(OK_32PTR(optptr)); 5074 /* Save as last value */ 5075 ip_savebuf((void **)&tcp->tcp_hopopts, &tcp->tcp_hopoptslen, 5076 (ipp->ipp_fields & IPPF_HOPOPTS), 5077 ipp->ipp_hopopts, ipp->ipp_hopoptslen); 5078 } 5079 if (addflag.crb_ipv6_recvrthdrdstopts) { 5080 toh = (struct T_opthdr *)optptr; 5081 toh->level = IPPROTO_IPV6; 5082 toh->name = IPV6_RTHDRDSTOPTS; 5083 toh->len = sizeof (*toh) + ipp->ipp_rthdrdstoptslen; 5084 toh->status = 0; 5085 optptr += sizeof (*toh); 5086 bcopy(ipp->ipp_rthdrdstopts, optptr, ipp->ipp_rthdrdstoptslen); 5087 optptr += ipp->ipp_rthdrdstoptslen; 5088 ASSERT(OK_32PTR(optptr)); 5089 /* Save as last value */ 5090 ip_savebuf((void **)&tcp->tcp_rthdrdstopts, 5091 &tcp->tcp_rthdrdstoptslen, 5092 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 5093 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen); 5094 } 5095 if (addflag.crb_ipv6_recvrthdr) { 5096 toh = (struct T_opthdr *)optptr; 5097 toh->level = IPPROTO_IPV6; 5098 toh->name = IPV6_RTHDR; 5099 toh->len = sizeof (*toh) + ipp->ipp_rthdrlen; 5100 toh->status = 0; 5101 optptr += sizeof (*toh); 5102 bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen); 5103 optptr += ipp->ipp_rthdrlen; 5104 ASSERT(OK_32PTR(optptr)); 5105 /* Save as last value */ 5106 ip_savebuf((void **)&tcp->tcp_rthdr, &tcp->tcp_rthdrlen, 5107 (ipp->ipp_fields & IPPF_RTHDR), 5108 ipp->ipp_rthdr, ipp->ipp_rthdrlen); 5109 } 5110 if (addflag.crb_ipv6_recvdstopts) { 5111 toh = (struct T_opthdr *)optptr; 5112 toh->level = IPPROTO_IPV6; 5113 toh->name = IPV6_DSTOPTS; 5114 toh->len = sizeof (*toh) + ipp->ipp_dstoptslen; 5115 toh->status = 0; 5116 optptr += sizeof (*toh); 5117 bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen); 5118 optptr += ipp->ipp_dstoptslen; 5119 ASSERT(OK_32PTR(optptr)); 5120 /* Save as last value */ 5121 ip_savebuf((void **)&tcp->tcp_dstopts, &tcp->tcp_dstoptslen, 5122 (ipp->ipp_fields & IPPF_DSTOPTS), 5123 ipp->ipp_dstopts, ipp->ipp_dstoptslen); 5124 } 5125 ASSERT(optptr == mp->b_wptr); 5126 return (mp); 5127 } 5128 5129 /* The minimum of smoothed mean deviation in RTO calculation. */ 5130 #define TCP_SD_MIN 400 5131 5132 /* 5133 * Set RTO for this connection. The formula is from Jacobson and Karels' 5134 * "Congestion Avoidance and Control" in SIGCOMM '88. The variable names 5135 * are the same as those in Appendix A.2 of that paper. 5136 * 5137 * m = new measurement 5138 * sa = smoothed RTT average (8 * average estimates). 5139 * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates). 5140 */ 5141 static void 5142 tcp_set_rto(tcp_t *tcp, clock_t rtt) 5143 { 5144 long m = TICK_TO_MSEC(rtt); 5145 clock_t sa = tcp->tcp_rtt_sa; 5146 clock_t sv = tcp->tcp_rtt_sd; 5147 clock_t rto; 5148 tcp_stack_t *tcps = tcp->tcp_tcps; 5149 5150 TCPS_BUMP_MIB(tcps, tcpRttUpdate); 5151 tcp->tcp_rtt_update++; 5152 5153 /* tcp_rtt_sa is not 0 means this is a new sample. */ 5154 if (sa != 0) { 5155 /* 5156 * Update average estimator: 5157 * new rtt = 7/8 old rtt + 1/8 Error 5158 */ 5159 5160 /* m is now Error in estimate. */ 5161 m -= sa >> 3; 5162 if ((sa += m) <= 0) { 5163 /* 5164 * Don't allow the smoothed average to be negative. 5165 * We use 0 to denote reinitialization of the 5166 * variables. 5167 */ 5168 sa = 1; 5169 } 5170 5171 /* 5172 * Update deviation estimator: 5173 * new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev) 5174 */ 5175 if (m < 0) 5176 m = -m; 5177 m -= sv >> 2; 5178 sv += m; 5179 } else { 5180 /* 5181 * This follows BSD's implementation. So the reinitialized 5182 * RTO is 3 * m. We cannot go less than 2 because if the 5183 * link is bandwidth dominated, doubling the window size 5184 * during slow start means doubling the RTT. We want to be 5185 * more conservative when we reinitialize our estimates. 3 5186 * is just a convenient number. 5187 */ 5188 sa = m << 3; 5189 sv = m << 1; 5190 } 5191 if (sv < TCP_SD_MIN) { 5192 /* 5193 * We do not know that if sa captures the delay ACK 5194 * effect as in a long train of segments, a receiver 5195 * does not delay its ACKs. So set the minimum of sv 5196 * to be TCP_SD_MIN, which is default to 400 ms, twice 5197 * of BSD DATO. That means the minimum of mean 5198 * deviation is 100 ms. 5199 * 5200 */ 5201 sv = TCP_SD_MIN; 5202 } 5203 tcp->tcp_rtt_sa = sa; 5204 tcp->tcp_rtt_sd = sv; 5205 /* 5206 * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv) 5207 * 5208 * Add tcp_rexmit_interval extra in case of extreme environment 5209 * where the algorithm fails to work. The default value of 5210 * tcp_rexmit_interval_extra should be 0. 5211 * 5212 * As we use a finer grained clock than BSD and update 5213 * RTO for every ACKs, add in another .25 of RTT to the 5214 * deviation of RTO to accomodate burstiness of 1/4 of 5215 * window size. 5216 */ 5217 rto = (sa >> 3) + sv + tcps->tcps_rexmit_interval_extra + (sa >> 5); 5218 5219 TCP_SET_RTO(tcp, rto); 5220 5221 /* Now, we can reset tcp_timer_backoff to use the new RTO... */ 5222 tcp->tcp_timer_backoff = 0; 5223 } 5224 5225 /* 5226 * On a labeled system we have some protocols above TCP, such as RPC, which 5227 * appear to assume that every mblk in a chain has a db_credp. 5228 */ 5229 static void 5230 tcp_setcred_data(mblk_t *mp, ip_recv_attr_t *ira) 5231 { 5232 ASSERT(is_system_labeled()); 5233 ASSERT(ira->ira_cred != NULL); 5234 5235 while (mp != NULL) { 5236 mblk_setcred(mp, ira->ira_cred, NOPID); 5237 mp = mp->b_cont; 5238 } 5239 } 5240 5241 uint_t 5242 tcp_rwnd_reopen(tcp_t *tcp) 5243 { 5244 uint_t ret = 0; 5245 uint_t thwin; 5246 conn_t *connp = tcp->tcp_connp; 5247 5248 /* Learn the latest rwnd information that we sent to the other side. */ 5249 thwin = ((uint_t)ntohs(tcp->tcp_tcpha->tha_win)) 5250 << tcp->tcp_rcv_ws; 5251 /* This is peer's calculated send window (our receive window). */ 5252 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 5253 /* 5254 * Increase the receive window to max. But we need to do receiver 5255 * SWS avoidance. This means that we need to check the increase of 5256 * of receive window is at least 1 MSS. 5257 */ 5258 if (connp->conn_rcvbuf - thwin >= tcp->tcp_mss) { 5259 /* 5260 * If the window that the other side knows is less than max 5261 * deferred acks segments, send an update immediately. 5262 */ 5263 if (thwin < tcp->tcp_rack_cur_max * tcp->tcp_mss) { 5264 TCPS_BUMP_MIB(tcp->tcp_tcps, tcpOutWinUpdate); 5265 ret = TH_ACK_NEEDED; 5266 } 5267 tcp->tcp_rwnd = connp->conn_rcvbuf; 5268 } 5269 return (ret); 5270 } 5271 5272 /* 5273 * Handle a packet that has been reclassified by TCP. 5274 * This function drops the ref on connp that the caller had. 5275 */ 5276 void 5277 tcp_reinput(conn_t *connp, mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst) 5278 { 5279 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec; 5280 5281 if (connp->conn_incoming_ifindex != 0 && 5282 connp->conn_incoming_ifindex != ira->ira_ruifindex) { 5283 freemsg(mp); 5284 CONN_DEC_REF(connp); 5285 return; 5286 } 5287 5288 if (CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss) || 5289 (ira->ira_flags & IRAF_IPSEC_SECURE)) { 5290 ip6_t *ip6h; 5291 ipha_t *ipha; 5292 5293 if (ira->ira_flags & IRAF_IS_IPV4) { 5294 ipha = (ipha_t *)mp->b_rptr; 5295 ip6h = NULL; 5296 } else { 5297 ipha = NULL; 5298 ip6h = (ip6_t *)mp->b_rptr; 5299 } 5300 mp = ipsec_check_inbound_policy(mp, connp, ipha, ip6h, ira); 5301 if (mp == NULL) { 5302 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 5303 /* Note that mp is NULL */ 5304 ip_drop_input("ipIfStatsInDiscards", mp, NULL); 5305 CONN_DEC_REF(connp); 5306 return; 5307 } 5308 } 5309 5310 if (IPCL_IS_TCP(connp)) { 5311 /* 5312 * do not drain, certain use cases can blow 5313 * the stack 5314 */ 5315 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, 5316 connp->conn_recv, connp, ira, 5317 SQ_NODRAIN, SQTAG_IP_TCP_INPUT); 5318 } else { 5319 /* Not TCP; must be SOCK_RAW, IPPROTO_TCP */ 5320 (connp->conn_recv)(connp, mp, NULL, 5321 ira); 5322 CONN_DEC_REF(connp); 5323 } 5324 5325 } 5326 5327 /* ARGSUSED */ 5328 static void 5329 tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy) 5330 { 5331 conn_t *connp = (conn_t *)arg; 5332 tcp_t *tcp = connp->conn_tcp; 5333 queue_t *q = connp->conn_rq; 5334 5335 ASSERT(!IPCL_IS_NONSTR(connp)); 5336 mutex_enter(&tcp->tcp_rsrv_mp_lock); 5337 tcp->tcp_rsrv_mp = mp; 5338 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5339 5340 if (TCP_IS_DETACHED(tcp) || q == NULL) { 5341 return; 5342 } 5343 5344 if (tcp->tcp_fused) { 5345 tcp_fuse_backenable(tcp); 5346 return; 5347 } 5348 5349 if (canputnext(q)) { 5350 /* Not flow-controlled, open rwnd */ 5351 tcp->tcp_rwnd = connp->conn_rcvbuf; 5352 5353 /* 5354 * Send back a window update immediately if TCP is above 5355 * ESTABLISHED state and the increase of the rcv window 5356 * that the other side knows is at least 1 MSS after flow 5357 * control is lifted. 5358 */ 5359 if (tcp->tcp_state >= TCPS_ESTABLISHED && 5360 tcp_rwnd_reopen(tcp) == TH_ACK_NEEDED) { 5361 tcp_xmit_ctl(NULL, tcp, 5362 (tcp->tcp_swnd == 0) ? tcp->tcp_suna : 5363 tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 5364 } 5365 } 5366 } 5367 5368 /* 5369 * The read side service routine is called mostly when we get back-enabled as a 5370 * result of flow control relief. Since we don't actually queue anything in 5371 * TCP, we have no data to send out of here. What we do is clear the receive 5372 * window, and send out a window update. 5373 */ 5374 void 5375 tcp_rsrv(queue_t *q) 5376 { 5377 conn_t *connp = Q_TO_CONN(q); 5378 tcp_t *tcp = connp->conn_tcp; 5379 mblk_t *mp; 5380 5381 /* No code does a putq on the read side */ 5382 ASSERT(q->q_first == NULL); 5383 5384 /* 5385 * If tcp->tcp_rsrv_mp == NULL, it means that tcp_rsrv() has already 5386 * been run. So just return. 5387 */ 5388 mutex_enter(&tcp->tcp_rsrv_mp_lock); 5389 if ((mp = tcp->tcp_rsrv_mp) == NULL) { 5390 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5391 return; 5392 } 5393 tcp->tcp_rsrv_mp = NULL; 5394 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5395 5396 CONN_INC_REF(connp); 5397 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_rsrv_input, connp, 5398 NULL, SQ_PROCESS, SQTAG_TCP_RSRV); 5399 } 5400 5401 /* At minimum we need 8 bytes in the TCP header for the lookup */ 5402 #define ICMP_MIN_TCP_HDR 8 5403 5404 /* 5405 * tcp_icmp_input is called as conn_recvicmp to process ICMP error messages 5406 * passed up by IP. The message is always received on the correct tcp_t. 5407 * Assumes that IP has pulled up everything up to and including the ICMP header. 5408 */ 5409 /* ARGSUSED2 */ 5410 void 5411 tcp_icmp_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 5412 { 5413 conn_t *connp = (conn_t *)arg1; 5414 icmph_t *icmph; 5415 ipha_t *ipha; 5416 int iph_hdr_length; 5417 tcpha_t *tcpha; 5418 uint32_t seg_seq; 5419 tcp_t *tcp = connp->conn_tcp; 5420 5421 /* Assume IP provides aligned packets */ 5422 ASSERT(OK_32PTR(mp->b_rptr)); 5423 ASSERT((MBLKL(mp) >= sizeof (ipha_t))); 5424 5425 /* 5426 * Verify IP version. Anything other than IPv4 or IPv6 packet is sent 5427 * upstream. ICMPv6 is handled in tcp_icmp_error_ipv6. 5428 */ 5429 if (!(ira->ira_flags & IRAF_IS_IPV4)) { 5430 tcp_icmp_error_ipv6(tcp, mp, ira); 5431 return; 5432 } 5433 5434 /* Skip past the outer IP and ICMP headers */ 5435 iph_hdr_length = ira->ira_ip_hdr_length; 5436 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length]; 5437 /* 5438 * If we don't have the correct outer IP header length 5439 * or if we don't have a complete inner IP header 5440 * drop it. 5441 */ 5442 if (iph_hdr_length < sizeof (ipha_t) || 5443 (ipha_t *)&icmph[1] + 1 > (ipha_t *)mp->b_wptr) { 5444 noticmpv4: 5445 freemsg(mp); 5446 return; 5447 } 5448 ipha = (ipha_t *)&icmph[1]; 5449 5450 /* Skip past the inner IP and find the ULP header */ 5451 iph_hdr_length = IPH_HDR_LENGTH(ipha); 5452 tcpha = (tcpha_t *)((char *)ipha + iph_hdr_length); 5453 /* 5454 * If we don't have the correct inner IP header length or if the ULP 5455 * is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR 5456 * bytes of TCP header, drop it. 5457 */ 5458 if (iph_hdr_length < sizeof (ipha_t) || 5459 ipha->ipha_protocol != IPPROTO_TCP || 5460 (uchar_t *)tcpha + ICMP_MIN_TCP_HDR > mp->b_wptr) { 5461 goto noticmpv4; 5462 } 5463 5464 seg_seq = ntohl(tcpha->tha_seq); 5465 switch (icmph->icmph_type) { 5466 case ICMP_DEST_UNREACHABLE: 5467 switch (icmph->icmph_code) { 5468 case ICMP_FRAGMENTATION_NEEDED: 5469 /* 5470 * Update Path MTU, then try to send something out. 5471 */ 5472 tcp_update_pmtu(tcp, B_TRUE); 5473 tcp_rexmit_after_error(tcp); 5474 break; 5475 case ICMP_PORT_UNREACHABLE: 5476 case ICMP_PROTOCOL_UNREACHABLE: 5477 switch (tcp->tcp_state) { 5478 case TCPS_SYN_SENT: 5479 case TCPS_SYN_RCVD: 5480 /* 5481 * ICMP can snipe away incipient 5482 * TCP connections as long as 5483 * seq number is same as initial 5484 * send seq number. 5485 */ 5486 if (seg_seq == tcp->tcp_iss) { 5487 (void) tcp_clean_death(tcp, 5488 ECONNREFUSED); 5489 } 5490 break; 5491 } 5492 break; 5493 case ICMP_HOST_UNREACHABLE: 5494 case ICMP_NET_UNREACHABLE: 5495 /* Record the error in case we finally time out. */ 5496 if (icmph->icmph_code == ICMP_HOST_UNREACHABLE) 5497 tcp->tcp_client_errno = EHOSTUNREACH; 5498 else 5499 tcp->tcp_client_errno = ENETUNREACH; 5500 if (tcp->tcp_state == TCPS_SYN_RCVD) { 5501 if (tcp->tcp_listener != NULL && 5502 tcp->tcp_listener->tcp_syn_defense) { 5503 /* 5504 * Ditch the half-open connection if we 5505 * suspect a SYN attack is under way. 5506 */ 5507 (void) tcp_clean_death(tcp, 5508 tcp->tcp_client_errno); 5509 } 5510 } 5511 break; 5512 default: 5513 break; 5514 } 5515 break; 5516 case ICMP_SOURCE_QUENCH: { 5517 /* 5518 * use a global boolean to control 5519 * whether TCP should respond to ICMP_SOURCE_QUENCH. 5520 * The default is false. 5521 */ 5522 if (tcp_icmp_source_quench) { 5523 /* 5524 * Reduce the sending rate as if we got a 5525 * retransmit timeout 5526 */ 5527 uint32_t npkt; 5528 5529 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / 5530 tcp->tcp_mss; 5531 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * tcp->tcp_mss; 5532 tcp->tcp_cwnd = tcp->tcp_mss; 5533 tcp->tcp_cwnd_cnt = 0; 5534 } 5535 break; 5536 } 5537 } 5538 freemsg(mp); 5539 } 5540 5541 /* 5542 * tcp_icmp_error_ipv6 is called from tcp_icmp_input to process ICMPv6 5543 * error messages passed up by IP. 5544 * Assumes that IP has pulled up all the extension headers as well 5545 * as the ICMPv6 header. 5546 */ 5547 static void 5548 tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, ip_recv_attr_t *ira) 5549 { 5550 icmp6_t *icmp6; 5551 ip6_t *ip6h; 5552 uint16_t iph_hdr_length = ira->ira_ip_hdr_length; 5553 tcpha_t *tcpha; 5554 uint8_t *nexthdrp; 5555 uint32_t seg_seq; 5556 5557 /* 5558 * Verify that we have a complete IP header. 5559 */ 5560 ASSERT((MBLKL(mp) >= sizeof (ip6_t))); 5561 5562 icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length]; 5563 ip6h = (ip6_t *)&icmp6[1]; 5564 /* 5565 * Verify if we have a complete ICMP and inner IP header. 5566 */ 5567 if ((uchar_t *)&ip6h[1] > mp->b_wptr) { 5568 noticmpv6: 5569 freemsg(mp); 5570 return; 5571 } 5572 5573 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp)) 5574 goto noticmpv6; 5575 tcpha = (tcpha_t *)((char *)ip6h + iph_hdr_length); 5576 /* 5577 * Validate inner header. If the ULP is not IPPROTO_TCP or if we don't 5578 * have at least ICMP_MIN_TCP_HDR bytes of TCP header drop the 5579 * packet. 5580 */ 5581 if ((*nexthdrp != IPPROTO_TCP) || 5582 ((uchar_t *)tcpha + ICMP_MIN_TCP_HDR) > mp->b_wptr) { 5583 goto noticmpv6; 5584 } 5585 5586 seg_seq = ntohl(tcpha->tha_seq); 5587 switch (icmp6->icmp6_type) { 5588 case ICMP6_PACKET_TOO_BIG: 5589 /* 5590 * Update Path MTU, then try to send something out. 5591 */ 5592 tcp_update_pmtu(tcp, B_TRUE); 5593 tcp_rexmit_after_error(tcp); 5594 break; 5595 case ICMP6_DST_UNREACH: 5596 switch (icmp6->icmp6_code) { 5597 case ICMP6_DST_UNREACH_NOPORT: 5598 if (((tcp->tcp_state == TCPS_SYN_SENT) || 5599 (tcp->tcp_state == TCPS_SYN_RCVD)) && 5600 (seg_seq == tcp->tcp_iss)) { 5601 (void) tcp_clean_death(tcp, ECONNREFUSED); 5602 } 5603 break; 5604 case ICMP6_DST_UNREACH_ADMIN: 5605 case ICMP6_DST_UNREACH_NOROUTE: 5606 case ICMP6_DST_UNREACH_BEYONDSCOPE: 5607 case ICMP6_DST_UNREACH_ADDR: 5608 /* Record the error in case we finally time out. */ 5609 tcp->tcp_client_errno = EHOSTUNREACH; 5610 if (((tcp->tcp_state == TCPS_SYN_SENT) || 5611 (tcp->tcp_state == TCPS_SYN_RCVD)) && 5612 (seg_seq == tcp->tcp_iss)) { 5613 if (tcp->tcp_listener != NULL && 5614 tcp->tcp_listener->tcp_syn_defense) { 5615 /* 5616 * Ditch the half-open connection if we 5617 * suspect a SYN attack is under way. 5618 */ 5619 (void) tcp_clean_death(tcp, 5620 tcp->tcp_client_errno); 5621 } 5622 } 5623 5624 5625 break; 5626 default: 5627 break; 5628 } 5629 break; 5630 case ICMP6_PARAM_PROB: 5631 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */ 5632 if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER && 5633 (uchar_t *)ip6h + icmp6->icmp6_pptr == 5634 (uchar_t *)nexthdrp) { 5635 if (tcp->tcp_state == TCPS_SYN_SENT || 5636 tcp->tcp_state == TCPS_SYN_RCVD) { 5637 (void) tcp_clean_death(tcp, ECONNREFUSED); 5638 } 5639 break; 5640 } 5641 break; 5642 5643 case ICMP6_TIME_EXCEEDED: 5644 default: 5645 break; 5646 } 5647 freemsg(mp); 5648 } 5649 5650 /* 5651 * CALLED OUTSIDE OF SQUEUE! It can not follow any pointers that tcp might 5652 * change. But it can refer to fields like tcp_suna and tcp_snxt. 5653 * 5654 * Function tcp_verifyicmp is called as conn_verifyicmp to verify the ICMP 5655 * error messages received by IP. The message is always received on the correct 5656 * tcp_t. 5657 */ 5658 /* ARGSUSED */ 5659 boolean_t 5660 tcp_verifyicmp(conn_t *connp, void *arg2, icmph_t *icmph, icmp6_t *icmp6, 5661 ip_recv_attr_t *ira) 5662 { 5663 tcpha_t *tcpha = (tcpha_t *)arg2; 5664 uint32_t seq = ntohl(tcpha->tha_seq); 5665 tcp_t *tcp = connp->conn_tcp; 5666 5667 /* 5668 * TCP sequence number contained in payload of the ICMP error message 5669 * should be within the range SND.UNA <= SEG.SEQ < SND.NXT. Otherwise, 5670 * the message is either a stale ICMP error, or an attack from the 5671 * network. Fail the verification. 5672 */ 5673 if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GEQ(seq, tcp->tcp_snxt)) 5674 return (B_FALSE); 5675 5676 /* For "too big" we also check the ignore flag */ 5677 if (ira->ira_flags & IRAF_IS_IPV4) { 5678 ASSERT(icmph != NULL); 5679 if (icmph->icmph_type == ICMP_DEST_UNREACHABLE && 5680 icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED && 5681 tcp->tcp_tcps->tcps_ignore_path_mtu) 5682 return (B_FALSE); 5683 } else { 5684 ASSERT(icmp6 != NULL); 5685 if (icmp6->icmp6_type == ICMP6_PACKET_TOO_BIG && 5686 tcp->tcp_tcps->tcps_ignore_path_mtu) 5687 return (B_FALSE); 5688 } 5689 return (B_TRUE); 5690 } 5691