1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 /* This file contains all TCP input processing functions. */ 27 28 #include <sys/types.h> 29 #include <sys/stream.h> 30 #include <sys/strsun.h> 31 #include <sys/strsubr.h> 32 #include <sys/stropts.h> 33 #include <sys/strlog.h> 34 #define _SUN_TPI_VERSION 2 35 #include <sys/tihdr.h> 36 #include <sys/suntpi.h> 37 #include <sys/xti_inet.h> 38 #include <sys/squeue_impl.h> 39 #include <sys/squeue.h> 40 #include <sys/tsol/tnet.h> 41 42 #include <inet/common.h> 43 #include <inet/ip.h> 44 #include <inet/tcp.h> 45 #include <inet/tcp_impl.h> 46 #include <inet/tcp_cluster.h> 47 #include <inet/proto_set.h> 48 #include <inet/ipsec_impl.h> 49 50 /* 51 * RFC1323-recommended phrasing of TSTAMP option, for easier parsing 52 */ 53 54 #ifdef _BIG_ENDIAN 55 #define TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 56 (TCPOPT_TSTAMP << 8) | 10) 57 #else 58 #define TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \ 59 (TCPOPT_NOP << 8) | TCPOPT_NOP) 60 #endif 61 62 /* 63 * Flags returned from tcp_parse_options. 64 */ 65 #define TCP_OPT_MSS_PRESENT 1 66 #define TCP_OPT_WSCALE_PRESENT 2 67 #define TCP_OPT_TSTAMP_PRESENT 4 68 #define TCP_OPT_SACK_OK_PRESENT 8 69 #define TCP_OPT_SACK_PRESENT 16 70 71 /* 72 * PAWS needs a timer for 24 days. This is the number of ticks in 24 days 73 */ 74 #define PAWS_TIMEOUT ((clock_t)(24*24*60*60*hz)) 75 76 /* 77 * Since tcp_listener is not cleared atomically with tcp_detached 78 * being cleared we need this extra bit to tell a detached connection 79 * apart from one that is in the process of being accepted. 80 */ 81 #define TCP_IS_DETACHED_NONEAGER(tcp) \ 82 (TCP_IS_DETACHED(tcp) && \ 83 (!(tcp)->tcp_hard_binding)) 84 85 /* 86 * Steps to do when a tcp_t moves to TIME-WAIT state. 87 * 88 * This connection is done, we don't need to account for it. Decrement 89 * the listener connection counter if needed. 90 * 91 * Decrement the connection counter of the stack. Note that this counter 92 * is per CPU. So the total number of connections in a stack is the sum of all 93 * of them. Since there is no lock for handling all of them exclusively, the 94 * resulting sum is only an approximation. 95 * 96 * Unconditionally clear the exclusive binding bit so this TIME-WAIT 97 * connection won't interfere with new ones. 98 * 99 * Start the TIME-WAIT timer. If upper layer has not closed the connection, 100 * the timer is handled within the context of this tcp_t. When the timer 101 * fires, tcp_clean_death() is called. If upper layer closes the connection 102 * during this period, tcp_time_wait_append() will be called to add this 103 * tcp_t to the global TIME-WAIT list. Note that this means that the 104 * actual wait time in TIME-WAIT state will be longer than the 105 * tcps_time_wait_interval since the period before upper layer closes the 106 * connection is not accounted for when tcp_time_wait_append() is called. 107 * 108 * If uppser layer has closed the connection, call tcp_time_wait_append() 109 * directly. 110 * 111 */ 112 #define SET_TIME_WAIT(tcps, tcp, connp) \ 113 { \ 114 (tcp)->tcp_state = TCPS_TIME_WAIT; \ 115 if ((tcp)->tcp_listen_cnt != NULL) \ 116 TCP_DECR_LISTEN_CNT(tcp); \ 117 atomic_dec_64( \ 118 (uint64_t *)&(tcps)->tcps_sc[CPU->cpu_seqid]->tcp_sc_conn_cnt); \ 119 (connp)->conn_exclbind = 0; \ 120 if (!TCP_IS_DETACHED(tcp)) { \ 121 TCP_TIMER_RESTART(tcp, (tcps)->tcps_time_wait_interval); \ 122 } else { \ 123 tcp_time_wait_append(tcp); \ 124 TCP_DBGSTAT(tcps, tcp_rput_time_wait); \ 125 } \ 126 } 127 128 /* 129 * If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more 130 * than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent 131 * data, TCP will not respond with an ACK. RFC 793 requires that 132 * TCP responds with an ACK for such a bogus ACK. By not following 133 * the RFC, we prevent TCP from getting into an ACK storm if somehow 134 * an attacker successfully spoofs an acceptable segment to our 135 * peer; or when our peer is "confused." 136 */ 137 static uint32_t tcp_drop_ack_unsent_cnt = 10; 138 139 /* 140 * The shift factor applied to tcp_mss to decide if the peer sends us a 141 * valid initial receive window. By default, if the peer receive window 142 * is smaller than 1 MSS (shift factor is 0), it is considered as invalid. 143 */ 144 static uint32_t tcp_init_wnd_shft = 0; 145 146 /* Process ICMP source quench message or not. */ 147 static boolean_t tcp_icmp_source_quench = B_FALSE; 148 149 static boolean_t tcp_outbound_squeue_switch = B_FALSE; 150 151 static mblk_t *tcp_conn_create_v4(conn_t *, conn_t *, mblk_t *, 152 ip_recv_attr_t *); 153 static mblk_t *tcp_conn_create_v6(conn_t *, conn_t *, mblk_t *, 154 ip_recv_attr_t *); 155 static boolean_t tcp_drop_q0(tcp_t *); 156 static void tcp_icmp_error_ipv6(tcp_t *, mblk_t *, ip_recv_attr_t *); 157 static mblk_t *tcp_input_add_ancillary(tcp_t *, mblk_t *, ip_pkt_t *, 158 ip_recv_attr_t *); 159 static void tcp_input_listener(void *, mblk_t *, void *, ip_recv_attr_t *); 160 static int tcp_parse_options(tcpha_t *, tcp_opt_t *); 161 static void tcp_process_options(tcp_t *, tcpha_t *); 162 static mblk_t *tcp_reass(tcp_t *, mblk_t *, uint32_t); 163 static void tcp_reass_elim_overlap(tcp_t *, mblk_t *); 164 static void tcp_rsrv_input(void *, mblk_t *, void *, ip_recv_attr_t *); 165 static void tcp_set_rto(tcp_t *, time_t); 166 static void tcp_setcred_data(mblk_t *, ip_recv_attr_t *); 167 168 extern void tcp_kssl_input(tcp_t *, mblk_t *, cred_t *); 169 170 /* 171 * Set the MSS associated with a particular tcp based on its current value, 172 * and a new one passed in. Observe minimums and maximums, and reset other 173 * state variables that we want to view as multiples of MSS. 174 * 175 * The value of MSS could be either increased or descreased. 176 */ 177 void 178 tcp_mss_set(tcp_t *tcp, uint32_t mss) 179 { 180 uint32_t mss_max; 181 tcp_stack_t *tcps = tcp->tcp_tcps; 182 conn_t *connp = tcp->tcp_connp; 183 184 if (connp->conn_ipversion == IPV4_VERSION) 185 mss_max = tcps->tcps_mss_max_ipv4; 186 else 187 mss_max = tcps->tcps_mss_max_ipv6; 188 189 if (mss < tcps->tcps_mss_min) 190 mss = tcps->tcps_mss_min; 191 if (mss > mss_max) 192 mss = mss_max; 193 /* 194 * Unless naglim has been set by our client to 195 * a non-mss value, force naglim to track mss. 196 * This can help to aggregate small writes. 197 */ 198 if (mss < tcp->tcp_naglim || tcp->tcp_mss == tcp->tcp_naglim) 199 tcp->tcp_naglim = mss; 200 /* 201 * TCP should be able to buffer at least 4 MSS data for obvious 202 * performance reason. 203 */ 204 if ((mss << 2) > connp->conn_sndbuf) 205 connp->conn_sndbuf = mss << 2; 206 207 /* 208 * Set the send lowater to at least twice of MSS. 209 */ 210 if ((mss << 1) > connp->conn_sndlowat) 211 connp->conn_sndlowat = mss << 1; 212 213 /* 214 * Update tcp_cwnd according to the new value of MSS. Keep the 215 * previous ratio to preserve the transmit rate. 216 */ 217 tcp->tcp_cwnd = (tcp->tcp_cwnd / tcp->tcp_mss) * mss; 218 tcp->tcp_cwnd_cnt = 0; 219 220 tcp->tcp_mss = mss; 221 (void) tcp_maxpsz_set(tcp, B_TRUE); 222 } 223 224 /* 225 * Extract option values from a tcp header. We put any found values into the 226 * tcpopt struct and return a bitmask saying which options were found. 227 */ 228 static int 229 tcp_parse_options(tcpha_t *tcpha, tcp_opt_t *tcpopt) 230 { 231 uchar_t *endp; 232 int len; 233 uint32_t mss; 234 uchar_t *up = (uchar_t *)tcpha; 235 int found = 0; 236 int32_t sack_len; 237 tcp_seq sack_begin, sack_end; 238 tcp_t *tcp; 239 240 endp = up + TCP_HDR_LENGTH(tcpha); 241 up += TCP_MIN_HEADER_LENGTH; 242 while (up < endp) { 243 len = endp - up; 244 switch (*up) { 245 case TCPOPT_EOL: 246 break; 247 248 case TCPOPT_NOP: 249 up++; 250 continue; 251 252 case TCPOPT_MAXSEG: 253 if (len < TCPOPT_MAXSEG_LEN || 254 up[1] != TCPOPT_MAXSEG_LEN) 255 break; 256 257 mss = BE16_TO_U16(up+2); 258 /* Caller must handle tcp_mss_min and tcp_mss_max_* */ 259 tcpopt->tcp_opt_mss = mss; 260 found |= TCP_OPT_MSS_PRESENT; 261 262 up += TCPOPT_MAXSEG_LEN; 263 continue; 264 265 case TCPOPT_WSCALE: 266 if (len < TCPOPT_WS_LEN || up[1] != TCPOPT_WS_LEN) 267 break; 268 269 if (up[2] > TCP_MAX_WINSHIFT) 270 tcpopt->tcp_opt_wscale = TCP_MAX_WINSHIFT; 271 else 272 tcpopt->tcp_opt_wscale = up[2]; 273 found |= TCP_OPT_WSCALE_PRESENT; 274 275 up += TCPOPT_WS_LEN; 276 continue; 277 278 case TCPOPT_SACK_PERMITTED: 279 if (len < TCPOPT_SACK_OK_LEN || 280 up[1] != TCPOPT_SACK_OK_LEN) 281 break; 282 found |= TCP_OPT_SACK_OK_PRESENT; 283 up += TCPOPT_SACK_OK_LEN; 284 continue; 285 286 case TCPOPT_SACK: 287 if (len <= 2 || up[1] <= 2 || len < up[1]) 288 break; 289 290 /* If TCP is not interested in SACK blks... */ 291 if ((tcp = tcpopt->tcp) == NULL) { 292 up += up[1]; 293 continue; 294 } 295 sack_len = up[1] - TCPOPT_HEADER_LEN; 296 up += TCPOPT_HEADER_LEN; 297 298 /* 299 * If the list is empty, allocate one and assume 300 * nothing is sack'ed. 301 */ 302 if (tcp->tcp_notsack_list == NULL) { 303 tcp_notsack_update(&(tcp->tcp_notsack_list), 304 tcp->tcp_suna, tcp->tcp_snxt, 305 &(tcp->tcp_num_notsack_blk), 306 &(tcp->tcp_cnt_notsack_list)); 307 308 /* 309 * Make sure tcp_notsack_list is not NULL. 310 * This happens when kmem_alloc(KM_NOSLEEP) 311 * returns NULL. 312 */ 313 if (tcp->tcp_notsack_list == NULL) { 314 up += sack_len; 315 continue; 316 } 317 tcp->tcp_fack = tcp->tcp_suna; 318 } 319 320 while (sack_len > 0) { 321 if (up + 8 > endp) { 322 up = endp; 323 break; 324 } 325 sack_begin = BE32_TO_U32(up); 326 up += 4; 327 sack_end = BE32_TO_U32(up); 328 up += 4; 329 sack_len -= 8; 330 /* 331 * Bounds checking. Make sure the SACK 332 * info is within tcp_suna and tcp_snxt. 333 * If this SACK blk is out of bound, ignore 334 * it but continue to parse the following 335 * blks. 336 */ 337 if (SEQ_LEQ(sack_end, sack_begin) || 338 SEQ_LT(sack_begin, tcp->tcp_suna) || 339 SEQ_GT(sack_end, tcp->tcp_snxt)) { 340 continue; 341 } 342 tcp_notsack_insert(&(tcp->tcp_notsack_list), 343 sack_begin, sack_end, 344 &(tcp->tcp_num_notsack_blk), 345 &(tcp->tcp_cnt_notsack_list)); 346 if (SEQ_GT(sack_end, tcp->tcp_fack)) { 347 tcp->tcp_fack = sack_end; 348 } 349 } 350 found |= TCP_OPT_SACK_PRESENT; 351 continue; 352 353 case TCPOPT_TSTAMP: 354 if (len < TCPOPT_TSTAMP_LEN || 355 up[1] != TCPOPT_TSTAMP_LEN) 356 break; 357 358 tcpopt->tcp_opt_ts_val = BE32_TO_U32(up+2); 359 tcpopt->tcp_opt_ts_ecr = BE32_TO_U32(up+6); 360 361 found |= TCP_OPT_TSTAMP_PRESENT; 362 363 up += TCPOPT_TSTAMP_LEN; 364 continue; 365 366 default: 367 if (len <= 1 || len < (int)up[1] || up[1] == 0) 368 break; 369 up += up[1]; 370 continue; 371 } 372 break; 373 } 374 return (found); 375 } 376 377 /* 378 * Process all TCP option in SYN segment. Note that this function should 379 * be called after tcp_set_destination() is called so that the necessary info 380 * from IRE is already set in the tcp structure. 381 * 382 * This function sets up the correct tcp_mss value according to the 383 * MSS option value and our header size. It also sets up the window scale 384 * and timestamp values, and initialize SACK info blocks. But it does not 385 * change receive window size after setting the tcp_mss value. The caller 386 * should do the appropriate change. 387 */ 388 static void 389 tcp_process_options(tcp_t *tcp, tcpha_t *tcpha) 390 { 391 int options; 392 tcp_opt_t tcpopt; 393 uint32_t mss_max; 394 char *tmp_tcph; 395 tcp_stack_t *tcps = tcp->tcp_tcps; 396 conn_t *connp = tcp->tcp_connp; 397 398 tcpopt.tcp = NULL; 399 options = tcp_parse_options(tcpha, &tcpopt); 400 401 /* 402 * Process MSS option. Note that MSS option value does not account 403 * for IP or TCP options. This means that it is equal to MTU - minimum 404 * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for 405 * IPv6. 406 */ 407 if (!(options & TCP_OPT_MSS_PRESENT)) { 408 if (connp->conn_ipversion == IPV4_VERSION) 409 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv4; 410 else 411 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv6; 412 } else { 413 if (connp->conn_ipversion == IPV4_VERSION) 414 mss_max = tcps->tcps_mss_max_ipv4; 415 else 416 mss_max = tcps->tcps_mss_max_ipv6; 417 if (tcpopt.tcp_opt_mss < tcps->tcps_mss_min) 418 tcpopt.tcp_opt_mss = tcps->tcps_mss_min; 419 else if (tcpopt.tcp_opt_mss > mss_max) 420 tcpopt.tcp_opt_mss = mss_max; 421 } 422 423 /* Process Window Scale option. */ 424 if (options & TCP_OPT_WSCALE_PRESENT) { 425 tcp->tcp_snd_ws = tcpopt.tcp_opt_wscale; 426 tcp->tcp_snd_ws_ok = B_TRUE; 427 } else { 428 tcp->tcp_snd_ws = B_FALSE; 429 tcp->tcp_snd_ws_ok = B_FALSE; 430 tcp->tcp_rcv_ws = B_FALSE; 431 } 432 433 /* Process Timestamp option. */ 434 if ((options & TCP_OPT_TSTAMP_PRESENT) && 435 (tcp->tcp_snd_ts_ok || TCP_IS_DETACHED(tcp))) { 436 tmp_tcph = (char *)tcp->tcp_tcpha; 437 438 tcp->tcp_snd_ts_ok = B_TRUE; 439 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 440 tcp->tcp_last_rcv_lbolt = ddi_get_lbolt64(); 441 ASSERT(OK_32PTR(tmp_tcph)); 442 ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH); 443 444 /* Fill in our template header with basic timestamp option. */ 445 tmp_tcph += connp->conn_ht_ulp_len; 446 tmp_tcph[0] = TCPOPT_NOP; 447 tmp_tcph[1] = TCPOPT_NOP; 448 tmp_tcph[2] = TCPOPT_TSTAMP; 449 tmp_tcph[3] = TCPOPT_TSTAMP_LEN; 450 connp->conn_ht_iphc_len += TCPOPT_REAL_TS_LEN; 451 connp->conn_ht_ulp_len += TCPOPT_REAL_TS_LEN; 452 tcp->tcp_tcpha->tha_offset_and_reserved += (3 << 4); 453 } else { 454 tcp->tcp_snd_ts_ok = B_FALSE; 455 } 456 457 /* 458 * Process SACK options. If SACK is enabled for this connection, 459 * then allocate the SACK info structure. Note the following ways 460 * when tcp_snd_sack_ok is set to true. 461 * 462 * For active connection: in tcp_set_destination() called in 463 * tcp_connect(). 464 * 465 * For passive connection: in tcp_set_destination() called in 466 * tcp_input_listener(). 467 * 468 * That's the reason why the extra TCP_IS_DETACHED() check is there. 469 * That check makes sure that if we did not send a SACK OK option, 470 * we will not enable SACK for this connection even though the other 471 * side sends us SACK OK option. For active connection, the SACK 472 * info structure has already been allocated. So we need to free 473 * it if SACK is disabled. 474 */ 475 if ((options & TCP_OPT_SACK_OK_PRESENT) && 476 (tcp->tcp_snd_sack_ok || 477 (tcps->tcps_sack_permitted != 0 && TCP_IS_DETACHED(tcp)))) { 478 ASSERT(tcp->tcp_num_sack_blk == 0); 479 ASSERT(tcp->tcp_notsack_list == NULL); 480 481 tcp->tcp_snd_sack_ok = B_TRUE; 482 if (tcp->tcp_snd_ts_ok) { 483 tcp->tcp_max_sack_blk = 3; 484 } else { 485 tcp->tcp_max_sack_blk = 4; 486 } 487 } else if (tcp->tcp_snd_sack_ok) { 488 /* 489 * Resetting tcp_snd_sack_ok to B_FALSE so that 490 * no SACK info will be used for this 491 * connection. This assumes that SACK usage 492 * permission is negotiated. This may need 493 * to be changed once this is clarified. 494 */ 495 ASSERT(tcp->tcp_num_sack_blk == 0); 496 ASSERT(tcp->tcp_notsack_list == NULL); 497 tcp->tcp_snd_sack_ok = B_FALSE; 498 } 499 500 /* 501 * Now we know the exact TCP/IP header length, subtract 502 * that from tcp_mss to get our side's MSS. 503 */ 504 tcp->tcp_mss -= connp->conn_ht_iphc_len; 505 506 /* 507 * Here we assume that the other side's header size will be equal to 508 * our header size. We calculate the real MSS accordingly. Need to 509 * take into additional stuffs IPsec puts in. 510 * 511 * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header) 512 */ 513 tcpopt.tcp_opt_mss -= connp->conn_ht_iphc_len + 514 tcp->tcp_ipsec_overhead - 515 ((connp->conn_ipversion == IPV4_VERSION ? 516 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) + TCP_MIN_HEADER_LENGTH); 517 518 /* 519 * Set MSS to the smaller one of both ends of the connection. 520 * We should not have called tcp_mss_set() before, but our 521 * side of the MSS should have been set to a proper value 522 * by tcp_set_destination(). tcp_mss_set() will also set up the 523 * STREAM head parameters properly. 524 * 525 * If we have a larger-than-16-bit window but the other side 526 * didn't want to do window scale, tcp_rwnd_set() will take 527 * care of that. 528 */ 529 tcp_mss_set(tcp, MIN(tcpopt.tcp_opt_mss, tcp->tcp_mss)); 530 531 /* 532 * Initialize tcp_cwnd value. After tcp_mss_set(), tcp_mss has been 533 * updated properly. 534 */ 535 TCP_SET_INIT_CWND(tcp, tcp->tcp_mss, tcps->tcps_slow_start_initial); 536 } 537 538 /* 539 * Add a new piece to the tcp reassembly queue. If the gap at the beginning 540 * is filled, return as much as we can. The message passed in may be 541 * multi-part, chained using b_cont. "start" is the starting sequence 542 * number for this piece. 543 */ 544 static mblk_t * 545 tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start) 546 { 547 uint32_t end; 548 mblk_t *mp1; 549 mblk_t *mp2; 550 mblk_t *next_mp; 551 uint32_t u1; 552 tcp_stack_t *tcps = tcp->tcp_tcps; 553 554 555 /* Walk through all the new pieces. */ 556 do { 557 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 558 (uintptr_t)INT_MAX); 559 end = start + (int)(mp->b_wptr - mp->b_rptr); 560 next_mp = mp->b_cont; 561 if (start == end) { 562 /* Empty. Blast it. */ 563 freeb(mp); 564 continue; 565 } 566 mp->b_cont = NULL; 567 TCP_REASS_SET_SEQ(mp, start); 568 TCP_REASS_SET_END(mp, end); 569 mp1 = tcp->tcp_reass_tail; 570 if (!mp1) { 571 tcp->tcp_reass_tail = mp; 572 tcp->tcp_reass_head = mp; 573 TCPS_BUMP_MIB(tcps, tcpInDataUnorderSegs); 574 TCPS_UPDATE_MIB(tcps, tcpInDataUnorderBytes, 575 end - start); 576 continue; 577 } 578 /* New stuff completely beyond tail? */ 579 if (SEQ_GEQ(start, TCP_REASS_END(mp1))) { 580 /* Link it on end. */ 581 mp1->b_cont = mp; 582 tcp->tcp_reass_tail = mp; 583 TCPS_BUMP_MIB(tcps, tcpInDataUnorderSegs); 584 TCPS_UPDATE_MIB(tcps, tcpInDataUnorderBytes, 585 end - start); 586 continue; 587 } 588 mp1 = tcp->tcp_reass_head; 589 u1 = TCP_REASS_SEQ(mp1); 590 /* New stuff at the front? */ 591 if (SEQ_LT(start, u1)) { 592 /* Yes... Check for overlap. */ 593 mp->b_cont = mp1; 594 tcp->tcp_reass_head = mp; 595 tcp_reass_elim_overlap(tcp, mp); 596 continue; 597 } 598 /* 599 * The new piece fits somewhere between the head and tail. 600 * We find our slot, where mp1 precedes us and mp2 trails. 601 */ 602 for (; (mp2 = mp1->b_cont) != NULL; mp1 = mp2) { 603 u1 = TCP_REASS_SEQ(mp2); 604 if (SEQ_LEQ(start, u1)) 605 break; 606 } 607 /* Link ourselves in */ 608 mp->b_cont = mp2; 609 mp1->b_cont = mp; 610 611 /* Trim overlap with following mblk(s) first */ 612 tcp_reass_elim_overlap(tcp, mp); 613 614 /* Trim overlap with preceding mblk */ 615 tcp_reass_elim_overlap(tcp, mp1); 616 617 } while (start = end, mp = next_mp); 618 mp1 = tcp->tcp_reass_head; 619 /* Anything ready to go? */ 620 if (TCP_REASS_SEQ(mp1) != tcp->tcp_rnxt) 621 return (NULL); 622 /* Eat what we can off the queue */ 623 for (;;) { 624 mp = mp1->b_cont; 625 end = TCP_REASS_END(mp1); 626 TCP_REASS_SET_SEQ(mp1, 0); 627 TCP_REASS_SET_END(mp1, 0); 628 if (!mp) { 629 tcp->tcp_reass_tail = NULL; 630 break; 631 } 632 if (end != TCP_REASS_SEQ(mp)) { 633 mp1->b_cont = NULL; 634 break; 635 } 636 mp1 = mp; 637 } 638 mp1 = tcp->tcp_reass_head; 639 tcp->tcp_reass_head = mp; 640 return (mp1); 641 } 642 643 /* Eliminate any overlap that mp may have over later mblks */ 644 static void 645 tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp) 646 { 647 uint32_t end; 648 mblk_t *mp1; 649 uint32_t u1; 650 tcp_stack_t *tcps = tcp->tcp_tcps; 651 652 end = TCP_REASS_END(mp); 653 while ((mp1 = mp->b_cont) != NULL) { 654 u1 = TCP_REASS_SEQ(mp1); 655 if (!SEQ_GT(end, u1)) 656 break; 657 if (!SEQ_GEQ(end, TCP_REASS_END(mp1))) { 658 mp->b_wptr -= end - u1; 659 TCP_REASS_SET_END(mp, u1); 660 TCPS_BUMP_MIB(tcps, tcpInDataPartDupSegs); 661 TCPS_UPDATE_MIB(tcps, tcpInDataPartDupBytes, 662 end - u1); 663 break; 664 } 665 mp->b_cont = mp1->b_cont; 666 TCP_REASS_SET_SEQ(mp1, 0); 667 TCP_REASS_SET_END(mp1, 0); 668 freeb(mp1); 669 TCPS_BUMP_MIB(tcps, tcpInDataDupSegs); 670 TCPS_UPDATE_MIB(tcps, tcpInDataDupBytes, end - u1); 671 } 672 if (!mp1) 673 tcp->tcp_reass_tail = mp; 674 } 675 676 /* 677 * This function does PAWS protection check. Returns B_TRUE if the 678 * segment passes the PAWS test, else returns B_FALSE. 679 */ 680 boolean_t 681 tcp_paws_check(tcp_t *tcp, tcpha_t *tcpha, tcp_opt_t *tcpoptp) 682 { 683 uint8_t flags; 684 int options; 685 uint8_t *up; 686 conn_t *connp = tcp->tcp_connp; 687 688 flags = (unsigned int)tcpha->tha_flags & 0xFF; 689 /* 690 * If timestamp option is aligned nicely, get values inline, 691 * otherwise call general routine to parse. Only do that 692 * if timestamp is the only option. 693 */ 694 if (TCP_HDR_LENGTH(tcpha) == (uint32_t)TCP_MIN_HEADER_LENGTH + 695 TCPOPT_REAL_TS_LEN && 696 OK_32PTR((up = ((uint8_t *)tcpha) + 697 TCP_MIN_HEADER_LENGTH)) && 698 *(uint32_t *)up == TCPOPT_NOP_NOP_TSTAMP) { 699 tcpoptp->tcp_opt_ts_val = ABE32_TO_U32((up+4)); 700 tcpoptp->tcp_opt_ts_ecr = ABE32_TO_U32((up+8)); 701 702 options = TCP_OPT_TSTAMP_PRESENT; 703 } else { 704 if (tcp->tcp_snd_sack_ok) { 705 tcpoptp->tcp = tcp; 706 } else { 707 tcpoptp->tcp = NULL; 708 } 709 options = tcp_parse_options(tcpha, tcpoptp); 710 } 711 712 if (options & TCP_OPT_TSTAMP_PRESENT) { 713 /* 714 * Do PAWS per RFC 1323 section 4.2. Accept RST 715 * regardless of the timestamp, page 18 RFC 1323.bis. 716 */ 717 if ((flags & TH_RST) == 0 && 718 TSTMP_LT(tcpoptp->tcp_opt_ts_val, 719 tcp->tcp_ts_recent)) { 720 if (TSTMP_LT(LBOLT_FASTPATH64, 721 tcp->tcp_last_rcv_lbolt + PAWS_TIMEOUT)) { 722 /* This segment is not acceptable. */ 723 return (B_FALSE); 724 } else { 725 /* 726 * Connection has been idle for 727 * too long. Reset the timestamp 728 * and assume the segment is valid. 729 */ 730 tcp->tcp_ts_recent = 731 tcpoptp->tcp_opt_ts_val; 732 } 733 } 734 } else { 735 /* 736 * If we don't get a timestamp on every packet, we 737 * figure we can't really trust 'em, so we stop sending 738 * and parsing them. 739 */ 740 tcp->tcp_snd_ts_ok = B_FALSE; 741 742 connp->conn_ht_iphc_len -= TCPOPT_REAL_TS_LEN; 743 connp->conn_ht_ulp_len -= TCPOPT_REAL_TS_LEN; 744 tcp->tcp_tcpha->tha_offset_and_reserved -= (3 << 4); 745 /* 746 * Adjust the tcp_mss and tcp_cwnd accordingly. We avoid 747 * doing a slow start here so as to not to lose on the 748 * transfer rate built up so far. 749 */ 750 tcp_mss_set(tcp, tcp->tcp_mss + TCPOPT_REAL_TS_LEN); 751 if (tcp->tcp_snd_sack_ok) 752 tcp->tcp_max_sack_blk = 4; 753 } 754 return (B_TRUE); 755 } 756 757 /* 758 * Defense for the SYN attack - 759 * 1. When q0 is full, drop from the tail (tcp_eager_prev_drop_q0) the oldest 760 * one from the list of droppable eagers. This list is a subset of q0. 761 * see comments before the definition of MAKE_DROPPABLE(). 762 * 2. Don't drop a SYN request before its first timeout. This gives every 763 * request at least til the first timeout to complete its 3-way handshake. 764 * 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many 765 * requests currently on the queue that has timed out. This will be used 766 * as an indicator of whether an attack is under way, so that appropriate 767 * actions can be taken. (It's incremented in tcp_timer() and decremented 768 * either when eager goes into ESTABLISHED, or gets freed up.) 769 * 4. The current threshold is - # of timeout > q0len/4 => SYN alert on 770 * # of timeout drops back to <= q0len/32 => SYN alert off 771 */ 772 static boolean_t 773 tcp_drop_q0(tcp_t *tcp) 774 { 775 tcp_t *eager; 776 mblk_t *mp; 777 tcp_stack_t *tcps = tcp->tcp_tcps; 778 779 ASSERT(MUTEX_HELD(&tcp->tcp_eager_lock)); 780 ASSERT(tcp->tcp_eager_next_q0 != tcp->tcp_eager_prev_q0); 781 782 /* Pick oldest eager from the list of droppable eagers */ 783 eager = tcp->tcp_eager_prev_drop_q0; 784 785 /* If list is empty. return B_FALSE */ 786 if (eager == tcp) { 787 return (B_FALSE); 788 } 789 790 /* If allocated, the mp will be freed in tcp_clean_death_wrapper() */ 791 if ((mp = allocb(0, BPRI_HI)) == NULL) 792 return (B_FALSE); 793 794 /* 795 * Take this eager out from the list of droppable eagers since we are 796 * going to drop it. 797 */ 798 MAKE_UNDROPPABLE(eager); 799 800 if (tcp->tcp_connp->conn_debug) { 801 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 802 "tcp_drop_q0: listen half-open queue (max=%d) overflow" 803 " (%d pending) on %s, drop one", tcps->tcps_conn_req_max_q0, 804 tcp->tcp_conn_req_cnt_q0, 805 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 806 } 807 808 TCPS_BUMP_MIB(tcps, tcpHalfOpenDrop); 809 810 /* Put a reference on the conn as we are enqueueing it in the sqeue */ 811 CONN_INC_REF(eager->tcp_connp); 812 813 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 814 tcp_clean_death_wrapper, eager->tcp_connp, NULL, 815 SQ_FILL, SQTAG_TCP_DROP_Q0); 816 817 return (B_TRUE); 818 } 819 820 /* 821 * Handle a SYN on an AF_INET6 socket; can be either IPv4 or IPv6 822 */ 823 static mblk_t * 824 tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 825 ip_recv_attr_t *ira) 826 { 827 tcp_t *ltcp = lconnp->conn_tcp; 828 tcp_t *tcp = connp->conn_tcp; 829 mblk_t *tpi_mp; 830 ipha_t *ipha; 831 ip6_t *ip6h; 832 sin6_t sin6; 833 uint_t ifindex = ira->ira_ruifindex; 834 tcp_stack_t *tcps = tcp->tcp_tcps; 835 836 if (ira->ira_flags & IRAF_IS_IPV4) { 837 ipha = (ipha_t *)mp->b_rptr; 838 839 connp->conn_ipversion = IPV4_VERSION; 840 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6); 841 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6); 842 connp->conn_saddr_v6 = connp->conn_laddr_v6; 843 844 sin6 = sin6_null; 845 sin6.sin6_addr = connp->conn_faddr_v6; 846 sin6.sin6_port = connp->conn_fport; 847 sin6.sin6_family = AF_INET6; 848 sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6, 849 IPCL_ZONEID(lconnp), tcps->tcps_netstack); 850 851 if (connp->conn_recv_ancillary.crb_recvdstaddr) { 852 sin6_t sin6d; 853 854 sin6d = sin6_null; 855 sin6d.sin6_addr = connp->conn_laddr_v6; 856 sin6d.sin6_port = connp->conn_lport; 857 sin6d.sin6_family = AF_INET; 858 tpi_mp = mi_tpi_extconn_ind(NULL, 859 (char *)&sin6d, sizeof (sin6_t), 860 (char *)&tcp, 861 (t_scalar_t)sizeof (intptr_t), 862 (char *)&sin6d, sizeof (sin6_t), 863 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 864 } else { 865 tpi_mp = mi_tpi_conn_ind(NULL, 866 (char *)&sin6, sizeof (sin6_t), 867 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 868 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 869 } 870 } else { 871 ip6h = (ip6_t *)mp->b_rptr; 872 873 connp->conn_ipversion = IPV6_VERSION; 874 connp->conn_laddr_v6 = ip6h->ip6_dst; 875 connp->conn_faddr_v6 = ip6h->ip6_src; 876 connp->conn_saddr_v6 = connp->conn_laddr_v6; 877 878 sin6 = sin6_null; 879 sin6.sin6_addr = connp->conn_faddr_v6; 880 sin6.sin6_port = connp->conn_fport; 881 sin6.sin6_family = AF_INET6; 882 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 883 sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6, 884 IPCL_ZONEID(lconnp), tcps->tcps_netstack); 885 886 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 887 /* Pass up the scope_id of remote addr */ 888 sin6.sin6_scope_id = ifindex; 889 } else { 890 sin6.sin6_scope_id = 0; 891 } 892 if (connp->conn_recv_ancillary.crb_recvdstaddr) { 893 sin6_t sin6d; 894 895 sin6d = sin6_null; 896 sin6.sin6_addr = connp->conn_laddr_v6; 897 sin6d.sin6_port = connp->conn_lport; 898 sin6d.sin6_family = AF_INET6; 899 if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_laddr_v6)) 900 sin6d.sin6_scope_id = ifindex; 901 902 tpi_mp = mi_tpi_extconn_ind(NULL, 903 (char *)&sin6d, sizeof (sin6_t), 904 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 905 (char *)&sin6d, sizeof (sin6_t), 906 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 907 } else { 908 tpi_mp = mi_tpi_conn_ind(NULL, 909 (char *)&sin6, sizeof (sin6_t), 910 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 911 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 912 } 913 } 914 915 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 916 return (tpi_mp); 917 } 918 919 /* Handle a SYN on an AF_INET socket */ 920 static mblk_t * 921 tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, mblk_t *mp, 922 ip_recv_attr_t *ira) 923 { 924 tcp_t *ltcp = lconnp->conn_tcp; 925 tcp_t *tcp = connp->conn_tcp; 926 sin_t sin; 927 mblk_t *tpi_mp = NULL; 928 tcp_stack_t *tcps = tcp->tcp_tcps; 929 ipha_t *ipha; 930 931 ASSERT(ira->ira_flags & IRAF_IS_IPV4); 932 ipha = (ipha_t *)mp->b_rptr; 933 934 connp->conn_ipversion = IPV4_VERSION; 935 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6); 936 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6); 937 connp->conn_saddr_v6 = connp->conn_laddr_v6; 938 939 sin = sin_null; 940 sin.sin_addr.s_addr = connp->conn_faddr_v4; 941 sin.sin_port = connp->conn_fport; 942 sin.sin_family = AF_INET; 943 if (lconnp->conn_recv_ancillary.crb_recvdstaddr) { 944 sin_t sind; 945 946 sind = sin_null; 947 sind.sin_addr.s_addr = connp->conn_laddr_v4; 948 sind.sin_port = connp->conn_lport; 949 sind.sin_family = AF_INET; 950 tpi_mp = mi_tpi_extconn_ind(NULL, 951 (char *)&sind, sizeof (sin_t), (char *)&tcp, 952 (t_scalar_t)sizeof (intptr_t), (char *)&sind, 953 sizeof (sin_t), (t_scalar_t)ltcp->tcp_conn_req_seqnum); 954 } else { 955 tpi_mp = mi_tpi_conn_ind(NULL, 956 (char *)&sin, sizeof (sin_t), 957 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 958 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 959 } 960 961 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 962 return (tpi_mp); 963 } 964 965 /* 966 * Called via squeue to get on to eager's perimeter. It sends a 967 * TH_RST if eager is in the fanout table. The listener wants the 968 * eager to disappear either by means of tcp_eager_blowoff() or 969 * tcp_eager_cleanup() being called. tcp_eager_kill() can also be 970 * called (via squeue) if the eager cannot be inserted in the 971 * fanout table in tcp_input_listener(). 972 */ 973 /* ARGSUSED */ 974 void 975 tcp_eager_kill(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy) 976 { 977 conn_t *econnp = (conn_t *)arg; 978 tcp_t *eager = econnp->conn_tcp; 979 tcp_t *listener = eager->tcp_listener; 980 981 /* 982 * We could be called because listener is closing. Since 983 * the eager was using listener's queue's, we avoid 984 * using the listeners queues from now on. 985 */ 986 ASSERT(eager->tcp_detached); 987 econnp->conn_rq = NULL; 988 econnp->conn_wq = NULL; 989 990 /* 991 * An eager's conn_fanout will be NULL if it's a duplicate 992 * for an existing 4-tuples in the conn fanout table. 993 * We don't want to send an RST out in such case. 994 */ 995 if (econnp->conn_fanout != NULL && eager->tcp_state > TCPS_LISTEN) { 996 tcp_xmit_ctl("tcp_eager_kill, can't wait", 997 eager, eager->tcp_snxt, 0, TH_RST); 998 } 999 1000 /* We are here because listener wants this eager gone */ 1001 if (listener != NULL) { 1002 mutex_enter(&listener->tcp_eager_lock); 1003 tcp_eager_unlink(eager); 1004 if (eager->tcp_tconnind_started) { 1005 /* 1006 * The eager has sent a conn_ind up to the 1007 * listener but listener decides to close 1008 * instead. We need to drop the extra ref 1009 * placed on eager in tcp_input_data() before 1010 * sending the conn_ind to listener. 1011 */ 1012 CONN_DEC_REF(econnp); 1013 } 1014 mutex_exit(&listener->tcp_eager_lock); 1015 CONN_DEC_REF(listener->tcp_connp); 1016 } 1017 1018 if (eager->tcp_state != TCPS_CLOSED) 1019 tcp_close_detached(eager); 1020 } 1021 1022 /* 1023 * Reset any eager connection hanging off this listener marked 1024 * with 'seqnum' and then reclaim it's resources. 1025 */ 1026 boolean_t 1027 tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum) 1028 { 1029 tcp_t *eager; 1030 mblk_t *mp; 1031 1032 eager = listener; 1033 mutex_enter(&listener->tcp_eager_lock); 1034 do { 1035 eager = eager->tcp_eager_next_q; 1036 if (eager == NULL) { 1037 mutex_exit(&listener->tcp_eager_lock); 1038 return (B_FALSE); 1039 } 1040 } while (eager->tcp_conn_req_seqnum != seqnum); 1041 1042 if (eager->tcp_closemp_used) { 1043 mutex_exit(&listener->tcp_eager_lock); 1044 return (B_TRUE); 1045 } 1046 eager->tcp_closemp_used = B_TRUE; 1047 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1048 CONN_INC_REF(eager->tcp_connp); 1049 mutex_exit(&listener->tcp_eager_lock); 1050 mp = &eager->tcp_closemp; 1051 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, tcp_eager_kill, 1052 eager->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_EAGER_BLOWOFF); 1053 return (B_TRUE); 1054 } 1055 1056 /* 1057 * Reset any eager connection hanging off this listener 1058 * and then reclaim it's resources. 1059 */ 1060 void 1061 tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only) 1062 { 1063 tcp_t *eager; 1064 mblk_t *mp; 1065 tcp_stack_t *tcps = listener->tcp_tcps; 1066 1067 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 1068 1069 if (!q0_only) { 1070 /* First cleanup q */ 1071 TCP_STAT(tcps, tcp_eager_blowoff_q); 1072 eager = listener->tcp_eager_next_q; 1073 while (eager != NULL) { 1074 if (!eager->tcp_closemp_used) { 1075 eager->tcp_closemp_used = B_TRUE; 1076 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1077 CONN_INC_REF(eager->tcp_connp); 1078 mp = &eager->tcp_closemp; 1079 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 1080 tcp_eager_kill, eager->tcp_connp, NULL, 1081 SQ_FILL, SQTAG_TCP_EAGER_CLEANUP); 1082 } 1083 eager = eager->tcp_eager_next_q; 1084 } 1085 } 1086 /* Then cleanup q0 */ 1087 TCP_STAT(tcps, tcp_eager_blowoff_q0); 1088 eager = listener->tcp_eager_next_q0; 1089 while (eager != listener) { 1090 if (!eager->tcp_closemp_used) { 1091 eager->tcp_closemp_used = B_TRUE; 1092 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1093 CONN_INC_REF(eager->tcp_connp); 1094 mp = &eager->tcp_closemp; 1095 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 1096 tcp_eager_kill, eager->tcp_connp, NULL, SQ_FILL, 1097 SQTAG_TCP_EAGER_CLEANUP_Q0); 1098 } 1099 eager = eager->tcp_eager_next_q0; 1100 } 1101 } 1102 1103 /* 1104 * If we are an eager connection hanging off a listener that hasn't 1105 * formally accepted the connection yet, get off his list and blow off 1106 * any data that we have accumulated. 1107 */ 1108 void 1109 tcp_eager_unlink(tcp_t *tcp) 1110 { 1111 tcp_t *listener = tcp->tcp_listener; 1112 1113 ASSERT(listener != NULL); 1114 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 1115 if (tcp->tcp_eager_next_q0 != NULL) { 1116 ASSERT(tcp->tcp_eager_prev_q0 != NULL); 1117 1118 /* Remove the eager tcp from q0 */ 1119 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 1120 tcp->tcp_eager_prev_q0; 1121 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 1122 tcp->tcp_eager_next_q0; 1123 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 1124 listener->tcp_conn_req_cnt_q0--; 1125 1126 tcp->tcp_eager_next_q0 = NULL; 1127 tcp->tcp_eager_prev_q0 = NULL; 1128 1129 /* 1130 * Take the eager out, if it is in the list of droppable 1131 * eagers. 1132 */ 1133 MAKE_UNDROPPABLE(tcp); 1134 1135 if (tcp->tcp_syn_rcvd_timeout != 0) { 1136 /* we have timed out before */ 1137 ASSERT(listener->tcp_syn_rcvd_timeout > 0); 1138 listener->tcp_syn_rcvd_timeout--; 1139 } 1140 } else { 1141 tcp_t **tcpp = &listener->tcp_eager_next_q; 1142 tcp_t *prev = NULL; 1143 1144 for (; tcpp[0]; tcpp = &tcpp[0]->tcp_eager_next_q) { 1145 if (tcpp[0] == tcp) { 1146 if (listener->tcp_eager_last_q == tcp) { 1147 /* 1148 * If we are unlinking the last 1149 * element on the list, adjust 1150 * tail pointer. Set tail pointer 1151 * to nil when list is empty. 1152 */ 1153 ASSERT(tcp->tcp_eager_next_q == NULL); 1154 if (listener->tcp_eager_last_q == 1155 listener->tcp_eager_next_q) { 1156 listener->tcp_eager_last_q = 1157 NULL; 1158 } else { 1159 /* 1160 * We won't get here if there 1161 * is only one eager in the 1162 * list. 1163 */ 1164 ASSERT(prev != NULL); 1165 listener->tcp_eager_last_q = 1166 prev; 1167 } 1168 } 1169 tcpp[0] = tcp->tcp_eager_next_q; 1170 tcp->tcp_eager_next_q = NULL; 1171 tcp->tcp_eager_last_q = NULL; 1172 ASSERT(listener->tcp_conn_req_cnt_q > 0); 1173 listener->tcp_conn_req_cnt_q--; 1174 break; 1175 } 1176 prev = tcpp[0]; 1177 } 1178 } 1179 tcp->tcp_listener = NULL; 1180 } 1181 1182 /* BEGIN CSTYLED */ 1183 /* 1184 * 1185 * The sockfs ACCEPT path: 1186 * ======================= 1187 * 1188 * The eager is now established in its own perimeter as soon as SYN is 1189 * received in tcp_input_listener(). When sockfs receives conn_ind, it 1190 * completes the accept processing on the acceptor STREAM. The sending 1191 * of conn_ind part is common for both sockfs listener and a TLI/XTI 1192 * listener but a TLI/XTI listener completes the accept processing 1193 * on the listener perimeter. 1194 * 1195 * Common control flow for 3 way handshake: 1196 * ---------------------------------------- 1197 * 1198 * incoming SYN (listener perimeter) -> tcp_input_listener() 1199 * 1200 * incoming SYN-ACK-ACK (eager perim) -> tcp_input_data() 1201 * send T_CONN_IND (listener perim) -> tcp_send_conn_ind() 1202 * 1203 * Sockfs ACCEPT Path: 1204 * ------------------- 1205 * 1206 * open acceptor stream (tcp_open allocates tcp_tli_accept() 1207 * as STREAM entry point) 1208 * 1209 * soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_tli_accept() 1210 * 1211 * tcp_tli_accept() extracts the eager and makes the q->q_ptr <-> eager 1212 * association (we are not behind eager's squeue but sockfs is protecting us 1213 * and no one knows about this stream yet. The STREAMS entry point q->q_info 1214 * is changed to point at tcp_wput(). 1215 * 1216 * tcp_accept_common() sends any deferred eagers via tcp_send_pending() to 1217 * listener (done on listener's perimeter). 1218 * 1219 * tcp_tli_accept() calls tcp_accept_finish() on eagers perimeter to finish 1220 * accept. 1221 * 1222 * TLI/XTI client ACCEPT path: 1223 * --------------------------- 1224 * 1225 * soaccept() sends T_CONN_RES on the listener STREAM. 1226 * 1227 * tcp_tli_accept() -> tcp_accept_swap() complete the processing and send 1228 * a M_SETOPS mblk to eager perimeter to finish accept (tcp_accept_finish()). 1229 * 1230 * Locks: 1231 * ====== 1232 * 1233 * listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and 1234 * and listeners->tcp_eager_next_q. 1235 * 1236 * Referencing: 1237 * ============ 1238 * 1239 * 1) We start out in tcp_input_listener by eager placing a ref on 1240 * listener and listener adding eager to listeners->tcp_eager_next_q0. 1241 * 1242 * 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before 1243 * doing so we place a ref on the eager. This ref is finally dropped at the 1244 * end of tcp_accept_finish() while unwinding from the squeue, i.e. the 1245 * reference is dropped by the squeue framework. 1246 * 1247 * 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish 1248 * 1249 * The reference must be released by the same entity that added the reference 1250 * In the above scheme, the eager is the entity that adds and releases the 1251 * references. Note that tcp_accept_finish executes in the squeue of the eager 1252 * (albeit after it is attached to the acceptor stream). Though 1. executes 1253 * in the listener's squeue, the eager is nascent at this point and the 1254 * reference can be considered to have been added on behalf of the eager. 1255 * 1256 * Eager getting a Reset or listener closing: 1257 * ========================================== 1258 * 1259 * Once the listener and eager are linked, the listener never does the unlink. 1260 * If the listener needs to close, tcp_eager_cleanup() is called which queues 1261 * a message on all eager perimeter. The eager then does the unlink, clears 1262 * any pointers to the listener's queue and drops the reference to the 1263 * listener. The listener waits in tcp_close outside the squeue until its 1264 * refcount has dropped to 1. This ensures that the listener has waited for 1265 * all eagers to clear their association with the listener. 1266 * 1267 * Similarly, if eager decides to go away, it can unlink itself and close. 1268 * When the T_CONN_RES comes down, we check if eager has closed. Note that 1269 * the reference to eager is still valid because of the extra ref we put 1270 * in tcp_send_conn_ind. 1271 * 1272 * Listener can always locate the eager under the protection 1273 * of the listener->tcp_eager_lock, and then do a refhold 1274 * on the eager during the accept processing. 1275 * 1276 * The acceptor stream accesses the eager in the accept processing 1277 * based on the ref placed on eager before sending T_conn_ind. 1278 * The only entity that can negate this refhold is a listener close 1279 * which is mutually exclusive with an active acceptor stream. 1280 * 1281 * Eager's reference on the listener 1282 * =================================== 1283 * 1284 * If the accept happens (even on a closed eager) the eager drops its 1285 * reference on the listener at the start of tcp_accept_finish. If the 1286 * eager is killed due to an incoming RST before the T_conn_ind is sent up, 1287 * the reference is dropped in tcp_closei_local. If the listener closes, 1288 * the reference is dropped in tcp_eager_kill. In all cases the reference 1289 * is dropped while executing in the eager's context (squeue). 1290 */ 1291 /* END CSTYLED */ 1292 1293 /* Process the SYN packet, mp, directed at the listener 'tcp' */ 1294 1295 /* 1296 * THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN. 1297 * tcp_input_data will not see any packets for listeners since the listener 1298 * has conn_recv set to tcp_input_listener. 1299 */ 1300 /* ARGSUSED */ 1301 static void 1302 tcp_input_listener(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 1303 { 1304 tcpha_t *tcpha; 1305 uint32_t seg_seq; 1306 tcp_t *eager; 1307 int err; 1308 conn_t *econnp = NULL; 1309 squeue_t *new_sqp; 1310 mblk_t *mp1; 1311 uint_t ip_hdr_len; 1312 conn_t *lconnp = (conn_t *)arg; 1313 tcp_t *listener = lconnp->conn_tcp; 1314 tcp_stack_t *tcps = listener->tcp_tcps; 1315 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 1316 uint_t flags; 1317 mblk_t *tpi_mp; 1318 uint_t ifindex = ira->ira_ruifindex; 1319 boolean_t tlc_set = B_FALSE; 1320 1321 ip_hdr_len = ira->ira_ip_hdr_length; 1322 tcpha = (tcpha_t *)&mp->b_rptr[ip_hdr_len]; 1323 flags = (unsigned int)tcpha->tha_flags & 0xFF; 1324 1325 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, lconnp->conn_ixa, 1326 __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, listener, 1327 __dtrace_tcp_tcph_t *, tcpha); 1328 1329 if (!(flags & TH_SYN)) { 1330 if ((flags & TH_RST) || (flags & TH_URG)) { 1331 freemsg(mp); 1332 return; 1333 } 1334 if (flags & TH_ACK) { 1335 /* Note this executes in listener's squeue */ 1336 tcp_xmit_listeners_reset(mp, ira, ipst, lconnp); 1337 return; 1338 } 1339 1340 freemsg(mp); 1341 return; 1342 } 1343 1344 if (listener->tcp_state != TCPS_LISTEN) 1345 goto error2; 1346 1347 ASSERT(IPCL_IS_BOUND(lconnp)); 1348 1349 mutex_enter(&listener->tcp_eager_lock); 1350 1351 /* 1352 * The system is under memory pressure, so we need to do our part 1353 * to relieve the pressure. So we only accept new request if there 1354 * is nothing waiting to be accepted or waiting to complete the 3-way 1355 * handshake. This means that busy listener will not get too many 1356 * new requests which they cannot handle in time while non-busy 1357 * listener is still functioning properly. 1358 */ 1359 if (tcps->tcps_reclaim && (listener->tcp_conn_req_cnt_q > 0 || 1360 listener->tcp_conn_req_cnt_q0 > 0)) { 1361 mutex_exit(&listener->tcp_eager_lock); 1362 TCP_STAT(tcps, tcp_listen_mem_drop); 1363 goto error2; 1364 } 1365 1366 if (listener->tcp_conn_req_cnt_q >= listener->tcp_conn_req_max) { 1367 mutex_exit(&listener->tcp_eager_lock); 1368 TCP_STAT(tcps, tcp_listendrop); 1369 TCPS_BUMP_MIB(tcps, tcpListenDrop); 1370 if (lconnp->conn_debug) { 1371 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 1372 "tcp_input_listener: listen backlog (max=%d) " 1373 "overflow (%d pending) on %s", 1374 listener->tcp_conn_req_max, 1375 listener->tcp_conn_req_cnt_q, 1376 tcp_display(listener, NULL, DISP_PORT_ONLY)); 1377 } 1378 goto error2; 1379 } 1380 1381 if (listener->tcp_conn_req_cnt_q0 >= 1382 listener->tcp_conn_req_max + tcps->tcps_conn_req_max_q0) { 1383 /* 1384 * Q0 is full. Drop a pending half-open req from the queue 1385 * to make room for the new SYN req. Also mark the time we 1386 * drop a SYN. 1387 * 1388 * A more aggressive defense against SYN attack will 1389 * be to set the "tcp_syn_defense" flag now. 1390 */ 1391 TCP_STAT(tcps, tcp_listendropq0); 1392 listener->tcp_last_rcv_lbolt = ddi_get_lbolt64(); 1393 if (!tcp_drop_q0(listener)) { 1394 mutex_exit(&listener->tcp_eager_lock); 1395 TCPS_BUMP_MIB(tcps, tcpListenDropQ0); 1396 if (lconnp->conn_debug) { 1397 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 1398 "tcp_input_listener: listen half-open " 1399 "queue (max=%d) full (%d pending) on %s", 1400 tcps->tcps_conn_req_max_q0, 1401 listener->tcp_conn_req_cnt_q0, 1402 tcp_display(listener, NULL, 1403 DISP_PORT_ONLY)); 1404 } 1405 goto error2; 1406 } 1407 } 1408 1409 /* 1410 * Enforce the limit set on the number of connections per listener. 1411 * Note that tlc_cnt starts with 1. So need to add 1 to tlc_max 1412 * for comparison. 1413 */ 1414 if (listener->tcp_listen_cnt != NULL) { 1415 tcp_listen_cnt_t *tlc = listener->tcp_listen_cnt; 1416 int64_t now; 1417 1418 if (atomic_add_32_nv(&tlc->tlc_cnt, 1) > tlc->tlc_max + 1) { 1419 mutex_exit(&listener->tcp_eager_lock); 1420 now = ddi_get_lbolt64(); 1421 atomic_add_32(&tlc->tlc_cnt, -1); 1422 TCP_STAT(tcps, tcp_listen_cnt_drop); 1423 tlc->tlc_drop++; 1424 if (now - tlc->tlc_report_time > 1425 MSEC_TO_TICK(TCP_TLC_REPORT_INTERVAL)) { 1426 zcmn_err(lconnp->conn_zoneid, CE_WARN, 1427 "Listener (port %d) connection max (%u) " 1428 "reached: %u attempts dropped total\n", 1429 ntohs(listener->tcp_connp->conn_lport), 1430 tlc->tlc_max, tlc->tlc_drop); 1431 tlc->tlc_report_time = now; 1432 } 1433 goto error2; 1434 } 1435 tlc_set = B_TRUE; 1436 } 1437 1438 mutex_exit(&listener->tcp_eager_lock); 1439 1440 /* 1441 * IP sets ira_sqp to either the senders conn_sqp (for loopback) 1442 * or based on the ring (for packets from GLD). Otherwise it is 1443 * set based on lbolt i.e., a somewhat random number. 1444 */ 1445 ASSERT(ira->ira_sqp != NULL); 1446 new_sqp = ira->ira_sqp; 1447 1448 econnp = (conn_t *)tcp_get_conn(arg2, tcps); 1449 if (econnp == NULL) 1450 goto error2; 1451 1452 ASSERT(econnp->conn_netstack == lconnp->conn_netstack); 1453 econnp->conn_sqp = new_sqp; 1454 econnp->conn_initial_sqp = new_sqp; 1455 econnp->conn_ixa->ixa_sqp = new_sqp; 1456 1457 econnp->conn_fport = tcpha->tha_lport; 1458 econnp->conn_lport = tcpha->tha_fport; 1459 1460 err = conn_inherit_parent(lconnp, econnp); 1461 if (err != 0) 1462 goto error3; 1463 1464 /* We already know the laddr of the new connection is ours */ 1465 econnp->conn_ixa->ixa_src_generation = ipst->ips_src_generation; 1466 1467 ASSERT(OK_32PTR(mp->b_rptr)); 1468 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION || 1469 IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION); 1470 1471 if (lconnp->conn_family == AF_INET) { 1472 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION); 1473 tpi_mp = tcp_conn_create_v4(lconnp, econnp, mp, ira); 1474 } else { 1475 tpi_mp = tcp_conn_create_v6(lconnp, econnp, mp, ira); 1476 } 1477 1478 if (tpi_mp == NULL) 1479 goto error3; 1480 1481 eager = econnp->conn_tcp; 1482 eager->tcp_detached = B_TRUE; 1483 SOCK_CONNID_INIT(eager->tcp_connid); 1484 1485 tcp_init_values(eager); 1486 1487 ASSERT((econnp->conn_ixa->ixa_flags & 1488 (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE | 1489 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO)) == 1490 (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE | 1491 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO)); 1492 1493 if (!tcps->tcps_dev_flow_ctl) 1494 econnp->conn_ixa->ixa_flags |= IXAF_NO_DEV_FLOW_CTL; 1495 1496 /* Prepare for diffing against previous packets */ 1497 eager->tcp_recvifindex = 0; 1498 eager->tcp_recvhops = 0xffffffffU; 1499 1500 if (!(ira->ira_flags & IRAF_IS_IPV4) && econnp->conn_bound_if == 0) { 1501 if (IN6_IS_ADDR_LINKSCOPE(&econnp->conn_faddr_v6) || 1502 IN6_IS_ADDR_LINKSCOPE(&econnp->conn_laddr_v6)) { 1503 econnp->conn_incoming_ifindex = ifindex; 1504 econnp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET; 1505 econnp->conn_ixa->ixa_scopeid = ifindex; 1506 } 1507 } 1508 1509 if ((ira->ira_flags & (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS)) == 1510 (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS) && 1511 tcps->tcps_rev_src_routes) { 1512 ipha_t *ipha = (ipha_t *)mp->b_rptr; 1513 ip_pkt_t *ipp = &econnp->conn_xmit_ipp; 1514 1515 /* Source routing option copyover (reverse it) */ 1516 err = ip_find_hdr_v4(ipha, ipp, B_TRUE); 1517 if (err != 0) { 1518 freemsg(tpi_mp); 1519 goto error3; 1520 } 1521 ip_pkt_source_route_reverse_v4(ipp); 1522 } 1523 1524 ASSERT(eager->tcp_conn.tcp_eager_conn_ind == NULL); 1525 ASSERT(!eager->tcp_tconnind_started); 1526 /* 1527 * If the SYN came with a credential, it's a loopback packet or a 1528 * labeled packet; attach the credential to the TPI message. 1529 */ 1530 if (ira->ira_cred != NULL) 1531 mblk_setcred(tpi_mp, ira->ira_cred, ira->ira_cpid); 1532 1533 eager->tcp_conn.tcp_eager_conn_ind = tpi_mp; 1534 1535 /* Inherit the listener's SSL protection state */ 1536 if ((eager->tcp_kssl_ent = listener->tcp_kssl_ent) != NULL) { 1537 kssl_hold_ent(eager->tcp_kssl_ent); 1538 eager->tcp_kssl_pending = B_TRUE; 1539 } 1540 1541 /* Inherit the listener's non-STREAMS flag */ 1542 if (IPCL_IS_NONSTR(lconnp)) { 1543 econnp->conn_flags |= IPCL_NONSTR; 1544 } 1545 1546 ASSERT(eager->tcp_ordrel_mp == NULL); 1547 1548 if (!IPCL_IS_NONSTR(econnp)) { 1549 /* 1550 * Pre-allocate the T_ordrel_ind mblk for TPI socket so that 1551 * at close time, we will always have that to send up. 1552 * Otherwise, we need to do special handling in case the 1553 * allocation fails at that time. 1554 */ 1555 if ((eager->tcp_ordrel_mp = mi_tpi_ordrel_ind()) == NULL) 1556 goto error3; 1557 } 1558 /* 1559 * Now that the IP addresses and ports are setup in econnp we 1560 * can do the IPsec policy work. 1561 */ 1562 if (ira->ira_flags & IRAF_IPSEC_SECURE) { 1563 if (lconnp->conn_policy != NULL) { 1564 /* 1565 * Inherit the policy from the listener; use 1566 * actions from ira 1567 */ 1568 if (!ip_ipsec_policy_inherit(econnp, lconnp, ira)) { 1569 CONN_DEC_REF(econnp); 1570 freemsg(mp); 1571 goto error3; 1572 } 1573 } 1574 } 1575 1576 /* Inherit various TCP parameters from the listener */ 1577 eager->tcp_naglim = listener->tcp_naglim; 1578 eager->tcp_first_timer_threshold = listener->tcp_first_timer_threshold; 1579 eager->tcp_second_timer_threshold = 1580 listener->tcp_second_timer_threshold; 1581 eager->tcp_first_ctimer_threshold = 1582 listener->tcp_first_ctimer_threshold; 1583 eager->tcp_second_ctimer_threshold = 1584 listener->tcp_second_ctimer_threshold; 1585 1586 /* 1587 * tcp_set_destination() may set tcp_rwnd according to the route 1588 * metrics. If it does not, the eager's receive window will be set 1589 * to the listener's receive window later in this function. 1590 */ 1591 eager->tcp_rwnd = 0; 1592 1593 /* 1594 * Inherit listener's tcp_init_cwnd. Need to do this before 1595 * calling tcp_process_options() which set the initial cwnd. 1596 */ 1597 eager->tcp_init_cwnd = listener->tcp_init_cwnd; 1598 1599 if (is_system_labeled()) { 1600 ip_xmit_attr_t *ixa = econnp->conn_ixa; 1601 1602 ASSERT(ira->ira_tsl != NULL); 1603 /* Discard any old label */ 1604 if (ixa->ixa_free_flags & IXA_FREE_TSL) { 1605 ASSERT(ixa->ixa_tsl != NULL); 1606 label_rele(ixa->ixa_tsl); 1607 ixa->ixa_free_flags &= ~IXA_FREE_TSL; 1608 ixa->ixa_tsl = NULL; 1609 } 1610 if ((lconnp->conn_mlp_type != mlptSingle || 1611 lconnp->conn_mac_mode != CONN_MAC_DEFAULT) && 1612 ira->ira_tsl != NULL) { 1613 /* 1614 * If this is an MLP connection or a MAC-Exempt 1615 * connection with an unlabeled node, packets are to be 1616 * exchanged using the security label of the received 1617 * SYN packet instead of the server application's label. 1618 * tsol_check_dest called from ip_set_destination 1619 * might later update TSF_UNLABELED by replacing 1620 * ixa_tsl with a new label. 1621 */ 1622 label_hold(ira->ira_tsl); 1623 ip_xmit_attr_replace_tsl(ixa, ira->ira_tsl); 1624 DTRACE_PROBE2(mlp_syn_accept, conn_t *, 1625 econnp, ts_label_t *, ixa->ixa_tsl) 1626 } else { 1627 ixa->ixa_tsl = crgetlabel(econnp->conn_cred); 1628 DTRACE_PROBE2(syn_accept, conn_t *, 1629 econnp, ts_label_t *, ixa->ixa_tsl) 1630 } 1631 /* 1632 * conn_connect() called from tcp_set_destination will verify 1633 * the destination is allowed to receive packets at the 1634 * security label of the SYN-ACK we are generating. As part of 1635 * that, tsol_check_dest() may create a new effective label for 1636 * this connection. 1637 * Finally conn_connect() will call conn_update_label. 1638 * All that remains for TCP to do is to call 1639 * conn_build_hdr_template which is done as part of 1640 * tcp_set_destination. 1641 */ 1642 } 1643 1644 /* 1645 * Since we will clear tcp_listener before we clear tcp_detached 1646 * in the accept code we need tcp_hard_binding aka tcp_accept_inprogress 1647 * so we can tell a TCP_DETACHED_NONEAGER apart. 1648 */ 1649 eager->tcp_hard_binding = B_TRUE; 1650 1651 tcp_bind_hash_insert(&tcps->tcps_bind_fanout[ 1652 TCP_BIND_HASH(econnp->conn_lport)], eager, 0); 1653 1654 CL_INET_CONNECT(econnp, B_FALSE, err); 1655 if (err != 0) { 1656 tcp_bind_hash_remove(eager); 1657 goto error3; 1658 } 1659 1660 SOCK_CONNID_BUMP(eager->tcp_connid); 1661 1662 /* 1663 * Adapt our mss, ttl, ... based on the remote address. 1664 */ 1665 1666 if (tcp_set_destination(eager) != 0) { 1667 TCPS_BUMP_MIB(tcps, tcpAttemptFails); 1668 /* Undo the bind_hash_insert */ 1669 tcp_bind_hash_remove(eager); 1670 goto error3; 1671 } 1672 1673 /* Process all TCP options. */ 1674 tcp_process_options(eager, tcpha); 1675 1676 /* Is the other end ECN capable? */ 1677 if (tcps->tcps_ecn_permitted >= 1 && 1678 (tcpha->tha_flags & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) { 1679 eager->tcp_ecn_ok = B_TRUE; 1680 } 1681 1682 /* 1683 * The listener's conn_rcvbuf should be the default window size or a 1684 * window size changed via SO_RCVBUF option. First round up the 1685 * eager's tcp_rwnd to the nearest MSS. Then find out the window 1686 * scale option value if needed. Call tcp_rwnd_set() to finish the 1687 * setting. 1688 * 1689 * Note if there is a rpipe metric associated with the remote host, 1690 * we should not inherit receive window size from listener. 1691 */ 1692 eager->tcp_rwnd = MSS_ROUNDUP( 1693 (eager->tcp_rwnd == 0 ? econnp->conn_rcvbuf : 1694 eager->tcp_rwnd), eager->tcp_mss); 1695 if (eager->tcp_snd_ws_ok) 1696 tcp_set_ws_value(eager); 1697 /* 1698 * Note that this is the only place tcp_rwnd_set() is called for 1699 * accepting a connection. We need to call it here instead of 1700 * after the 3-way handshake because we need to tell the other 1701 * side our rwnd in the SYN-ACK segment. 1702 */ 1703 (void) tcp_rwnd_set(eager, eager->tcp_rwnd); 1704 1705 ASSERT(eager->tcp_connp->conn_rcvbuf != 0 && 1706 eager->tcp_connp->conn_rcvbuf == eager->tcp_rwnd); 1707 1708 ASSERT(econnp->conn_rcvbuf != 0 && 1709 econnp->conn_rcvbuf == eager->tcp_rwnd); 1710 1711 /* Put a ref on the listener for the eager. */ 1712 CONN_INC_REF(lconnp); 1713 mutex_enter(&listener->tcp_eager_lock); 1714 listener->tcp_eager_next_q0->tcp_eager_prev_q0 = eager; 1715 eager->tcp_eager_next_q0 = listener->tcp_eager_next_q0; 1716 listener->tcp_eager_next_q0 = eager; 1717 eager->tcp_eager_prev_q0 = listener; 1718 1719 /* Set tcp_listener before adding it to tcp_conn_fanout */ 1720 eager->tcp_listener = listener; 1721 eager->tcp_saved_listener = listener; 1722 1723 /* 1724 * Set tcp_listen_cnt so that when the connection is done, the counter 1725 * is decremented. 1726 */ 1727 eager->tcp_listen_cnt = listener->tcp_listen_cnt; 1728 1729 /* 1730 * Tag this detached tcp vector for later retrieval 1731 * by our listener client in tcp_accept(). 1732 */ 1733 eager->tcp_conn_req_seqnum = listener->tcp_conn_req_seqnum; 1734 listener->tcp_conn_req_cnt_q0++; 1735 if (++listener->tcp_conn_req_seqnum == -1) { 1736 /* 1737 * -1 is "special" and defined in TPI as something 1738 * that should never be used in T_CONN_IND 1739 */ 1740 ++listener->tcp_conn_req_seqnum; 1741 } 1742 mutex_exit(&listener->tcp_eager_lock); 1743 1744 if (listener->tcp_syn_defense) { 1745 /* Don't drop the SYN that comes from a good IP source */ 1746 ipaddr_t *addr_cache; 1747 1748 addr_cache = (ipaddr_t *)(listener->tcp_ip_addr_cache); 1749 if (addr_cache != NULL && econnp->conn_faddr_v4 == 1750 addr_cache[IP_ADDR_CACHE_HASH(econnp->conn_faddr_v4)]) { 1751 eager->tcp_dontdrop = B_TRUE; 1752 } 1753 } 1754 1755 /* 1756 * We need to insert the eager in its own perimeter but as soon 1757 * as we do that, we expose the eager to the classifier and 1758 * should not touch any field outside the eager's perimeter. 1759 * So do all the work necessary before inserting the eager 1760 * in its own perimeter. Be optimistic that conn_connect() 1761 * will succeed but undo everything if it fails. 1762 */ 1763 seg_seq = ntohl(tcpha->tha_seq); 1764 eager->tcp_irs = seg_seq; 1765 eager->tcp_rack = seg_seq; 1766 eager->tcp_rnxt = seg_seq + 1; 1767 eager->tcp_tcpha->tha_ack = htonl(eager->tcp_rnxt); 1768 TCPS_BUMP_MIB(tcps, tcpPassiveOpens); 1769 eager->tcp_state = TCPS_SYN_RCVD; 1770 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 1771 econnp->conn_ixa, void, NULL, tcp_t *, eager, void, NULL, 1772 int32_t, TCPS_LISTEN); 1773 1774 mp1 = tcp_xmit_mp(eager, eager->tcp_xmit_head, eager->tcp_mss, 1775 NULL, NULL, eager->tcp_iss, B_FALSE, NULL, B_FALSE); 1776 if (mp1 == NULL) { 1777 /* 1778 * Increment the ref count as we are going to 1779 * enqueueing an mp in squeue 1780 */ 1781 CONN_INC_REF(econnp); 1782 goto error; 1783 } 1784 1785 /* 1786 * We need to start the rto timer. In normal case, we start 1787 * the timer after sending the packet on the wire (or at 1788 * least believing that packet was sent by waiting for 1789 * conn_ip_output() to return). Since this is the first packet 1790 * being sent on the wire for the eager, our initial tcp_rto 1791 * is at least tcp_rexmit_interval_min which is a fairly 1792 * large value to allow the algorithm to adjust slowly to large 1793 * fluctuations of RTT during first few transmissions. 1794 * 1795 * Starting the timer first and then sending the packet in this 1796 * case shouldn't make much difference since tcp_rexmit_interval_min 1797 * is of the order of several 100ms and starting the timer 1798 * first and then sending the packet will result in difference 1799 * of few micro seconds. 1800 * 1801 * Without this optimization, we are forced to hold the fanout 1802 * lock across the ipcl_bind_insert() and sending the packet 1803 * so that we don't race against an incoming packet (maybe RST) 1804 * for this eager. 1805 * 1806 * It is necessary to acquire an extra reference on the eager 1807 * at this point and hold it until after tcp_send_data() to 1808 * ensure against an eager close race. 1809 */ 1810 1811 CONN_INC_REF(econnp); 1812 1813 TCP_TIMER_RESTART(eager, eager->tcp_rto); 1814 1815 /* 1816 * Insert the eager in its own perimeter now. We are ready to deal 1817 * with any packets on eager. 1818 */ 1819 if (ipcl_conn_insert(econnp) != 0) 1820 goto error; 1821 1822 ASSERT(econnp->conn_ixa->ixa_notify_cookie == econnp->conn_tcp); 1823 freemsg(mp); 1824 /* 1825 * Send the SYN-ACK. Use the right squeue so that conn_ixa is 1826 * only used by one thread at a time. 1827 */ 1828 if (econnp->conn_sqp == lconnp->conn_sqp) { 1829 DTRACE_TCP5(send, mblk_t *, NULL, ip_xmit_attr_t *, 1830 econnp->conn_ixa, __dtrace_tcp_void_ip_t *, mp1->b_rptr, 1831 tcp_t *, eager, __dtrace_tcp_tcph_t *, 1832 &mp1->b_rptr[econnp->conn_ixa->ixa_ip_hdr_length]); 1833 (void) conn_ip_output(mp1, econnp->conn_ixa); 1834 CONN_DEC_REF(econnp); 1835 } else { 1836 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_send_synack, 1837 econnp, NULL, SQ_PROCESS, SQTAG_TCP_SEND_SYNACK); 1838 } 1839 return; 1840 error: 1841 freemsg(mp1); 1842 eager->tcp_closemp_used = B_TRUE; 1843 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1844 mp1 = &eager->tcp_closemp; 1845 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_eager_kill, 1846 econnp, NULL, SQ_FILL, SQTAG_TCP_CONN_REQ_2); 1847 1848 /* 1849 * If a connection already exists, send the mp to that connections so 1850 * that it can be appropriately dealt with. 1851 */ 1852 ipst = tcps->tcps_netstack->netstack_ip; 1853 1854 if ((econnp = ipcl_classify(mp, ira, ipst)) != NULL) { 1855 if (!IPCL_IS_CONNECTED(econnp)) { 1856 /* 1857 * Something bad happened. ipcl_conn_insert() 1858 * failed because a connection already existed 1859 * in connected hash but we can't find it 1860 * anymore (someone blew it away). Just 1861 * free this message and hopefully remote 1862 * will retransmit at which time the SYN can be 1863 * treated as a new connection or dealth with 1864 * a TH_RST if a connection already exists. 1865 */ 1866 CONN_DEC_REF(econnp); 1867 freemsg(mp); 1868 } else { 1869 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp, tcp_input_data, 1870 econnp, ira, SQ_FILL, SQTAG_TCP_CONN_REQ_1); 1871 } 1872 } else { 1873 /* Nobody wants this packet */ 1874 freemsg(mp); 1875 } 1876 return; 1877 error3: 1878 CONN_DEC_REF(econnp); 1879 error2: 1880 freemsg(mp); 1881 if (tlc_set) 1882 atomic_add_32(&listener->tcp_listen_cnt->tlc_cnt, -1); 1883 } 1884 1885 /* 1886 * In an ideal case of vertical partition in NUMA architecture, its 1887 * beneficial to have the listener and all the incoming connections 1888 * tied to the same squeue. The other constraint is that incoming 1889 * connections should be tied to the squeue attached to interrupted 1890 * CPU for obvious locality reason so this leaves the listener to 1891 * be tied to the same squeue. Our only problem is that when listener 1892 * is binding, the CPU that will get interrupted by the NIC whose 1893 * IP address the listener is binding to is not even known. So 1894 * the code below allows us to change that binding at the time the 1895 * CPU is interrupted by virtue of incoming connection's squeue. 1896 * 1897 * This is usefull only in case of a listener bound to a specific IP 1898 * address. For other kind of listeners, they get bound the 1899 * very first time and there is no attempt to rebind them. 1900 */ 1901 void 1902 tcp_input_listener_unbound(void *arg, mblk_t *mp, void *arg2, 1903 ip_recv_attr_t *ira) 1904 { 1905 conn_t *connp = (conn_t *)arg; 1906 squeue_t *sqp = (squeue_t *)arg2; 1907 squeue_t *new_sqp; 1908 uint32_t conn_flags; 1909 1910 /* 1911 * IP sets ira_sqp to either the senders conn_sqp (for loopback) 1912 * or based on the ring (for packets from GLD). Otherwise it is 1913 * set based on lbolt i.e., a somewhat random number. 1914 */ 1915 ASSERT(ira->ira_sqp != NULL); 1916 new_sqp = ira->ira_sqp; 1917 1918 if (connp->conn_fanout == NULL) 1919 goto done; 1920 1921 if (!(connp->conn_flags & IPCL_FULLY_BOUND)) { 1922 mutex_enter(&connp->conn_fanout->connf_lock); 1923 mutex_enter(&connp->conn_lock); 1924 /* 1925 * No one from read or write side can access us now 1926 * except for already queued packets on this squeue. 1927 * But since we haven't changed the squeue yet, they 1928 * can't execute. If they are processed after we have 1929 * changed the squeue, they are sent back to the 1930 * correct squeue down below. 1931 * But a listner close can race with processing of 1932 * incoming SYN. If incoming SYN processing changes 1933 * the squeue then the listener close which is waiting 1934 * to enter the squeue would operate on the wrong 1935 * squeue. Hence we don't change the squeue here unless 1936 * the refcount is exactly the minimum refcount. The 1937 * minimum refcount of 4 is counted as - 1 each for 1938 * TCP and IP, 1 for being in the classifier hash, and 1939 * 1 for the mblk being processed. 1940 */ 1941 1942 if (connp->conn_ref != 4 || 1943 connp->conn_tcp->tcp_state != TCPS_LISTEN) { 1944 mutex_exit(&connp->conn_lock); 1945 mutex_exit(&connp->conn_fanout->connf_lock); 1946 goto done; 1947 } 1948 if (connp->conn_sqp != new_sqp) { 1949 while (connp->conn_sqp != new_sqp) 1950 (void) casptr(&connp->conn_sqp, sqp, new_sqp); 1951 /* No special MT issues for outbound ixa_sqp hint */ 1952 connp->conn_ixa->ixa_sqp = new_sqp; 1953 } 1954 1955 do { 1956 conn_flags = connp->conn_flags; 1957 conn_flags |= IPCL_FULLY_BOUND; 1958 (void) cas32(&connp->conn_flags, connp->conn_flags, 1959 conn_flags); 1960 } while (!(connp->conn_flags & IPCL_FULLY_BOUND)); 1961 1962 mutex_exit(&connp->conn_fanout->connf_lock); 1963 mutex_exit(&connp->conn_lock); 1964 1965 /* 1966 * Assume we have picked a good squeue for the listener. Make 1967 * subsequent SYNs not try to change the squeue. 1968 */ 1969 connp->conn_recv = tcp_input_listener; 1970 } 1971 1972 done: 1973 if (connp->conn_sqp != sqp) { 1974 CONN_INC_REF(connp); 1975 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, connp->conn_recv, connp, 1976 ira, SQ_FILL, SQTAG_TCP_CONN_REQ_UNBOUND); 1977 } else { 1978 tcp_input_listener(connp, mp, sqp, ira); 1979 } 1980 } 1981 1982 /* 1983 * Send up all messages queued on tcp_rcv_list. 1984 */ 1985 uint_t 1986 tcp_rcv_drain(tcp_t *tcp) 1987 { 1988 mblk_t *mp; 1989 uint_t ret = 0; 1990 #ifdef DEBUG 1991 uint_t cnt = 0; 1992 #endif 1993 queue_t *q = tcp->tcp_connp->conn_rq; 1994 1995 /* Can't drain on an eager connection */ 1996 if (tcp->tcp_listener != NULL) 1997 return (ret); 1998 1999 /* Can't be a non-STREAMS connection */ 2000 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp)); 2001 2002 /* No need for the push timer now. */ 2003 if (tcp->tcp_push_tid != 0) { 2004 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 2005 tcp->tcp_push_tid = 0; 2006 } 2007 2008 /* 2009 * Handle two cases here: we are currently fused or we were 2010 * previously fused and have some urgent data to be delivered 2011 * upstream. The latter happens because we either ran out of 2012 * memory or were detached and therefore sending the SIGURG was 2013 * deferred until this point. In either case we pass control 2014 * over to tcp_fuse_rcv_drain() since it may need to complete 2015 * some work. 2016 */ 2017 if ((tcp->tcp_fused || tcp->tcp_fused_sigurg)) { 2018 ASSERT(IPCL_IS_NONSTR(tcp->tcp_connp) || 2019 tcp->tcp_fused_sigurg_mp != NULL); 2020 if (tcp_fuse_rcv_drain(q, tcp, tcp->tcp_fused ? NULL : 2021 &tcp->tcp_fused_sigurg_mp)) 2022 return (ret); 2023 } 2024 2025 while ((mp = tcp->tcp_rcv_list) != NULL) { 2026 tcp->tcp_rcv_list = mp->b_next; 2027 mp->b_next = NULL; 2028 #ifdef DEBUG 2029 cnt += msgdsize(mp); 2030 #endif 2031 /* Does this need SSL processing first? */ 2032 if ((tcp->tcp_kssl_ctx != NULL) && (DB_TYPE(mp) == M_DATA)) { 2033 DTRACE_PROBE1(kssl_mblk__ksslinput_rcvdrain, 2034 mblk_t *, mp); 2035 tcp_kssl_input(tcp, mp, NULL); 2036 continue; 2037 } 2038 putnext(q, mp); 2039 } 2040 #ifdef DEBUG 2041 ASSERT(cnt == tcp->tcp_rcv_cnt); 2042 #endif 2043 tcp->tcp_rcv_last_head = NULL; 2044 tcp->tcp_rcv_last_tail = NULL; 2045 tcp->tcp_rcv_cnt = 0; 2046 2047 if (canputnext(q)) 2048 return (tcp_rwnd_reopen(tcp)); 2049 2050 return (ret); 2051 } 2052 2053 /* 2054 * Queue data on tcp_rcv_list which is a b_next chain. 2055 * tcp_rcv_last_head/tail is the last element of this chain. 2056 * Each element of the chain is a b_cont chain. 2057 * 2058 * M_DATA messages are added to the current element. 2059 * Other messages are added as new (b_next) elements. 2060 */ 2061 void 2062 tcp_rcv_enqueue(tcp_t *tcp, mblk_t *mp, uint_t seg_len, cred_t *cr) 2063 { 2064 ASSERT(seg_len == msgdsize(mp)); 2065 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_rcv_last_head != NULL); 2066 2067 if (is_system_labeled()) { 2068 ASSERT(cr != NULL || msg_getcred(mp, NULL) != NULL); 2069 /* 2070 * Provide for protocols above TCP such as RPC. NOPID leaves 2071 * db_cpid unchanged. 2072 * The cred could have already been set. 2073 */ 2074 if (cr != NULL) 2075 mblk_setcred(mp, cr, NOPID); 2076 } 2077 2078 if (tcp->tcp_rcv_list == NULL) { 2079 ASSERT(tcp->tcp_rcv_last_head == NULL); 2080 tcp->tcp_rcv_list = mp; 2081 tcp->tcp_rcv_last_head = mp; 2082 } else if (DB_TYPE(mp) == DB_TYPE(tcp->tcp_rcv_last_head)) { 2083 tcp->tcp_rcv_last_tail->b_cont = mp; 2084 } else { 2085 tcp->tcp_rcv_last_head->b_next = mp; 2086 tcp->tcp_rcv_last_head = mp; 2087 } 2088 2089 while (mp->b_cont) 2090 mp = mp->b_cont; 2091 2092 tcp->tcp_rcv_last_tail = mp; 2093 tcp->tcp_rcv_cnt += seg_len; 2094 tcp->tcp_rwnd -= seg_len; 2095 } 2096 2097 /* Generate an ACK-only (no data) segment for a TCP endpoint */ 2098 mblk_t * 2099 tcp_ack_mp(tcp_t *tcp) 2100 { 2101 uint32_t seq_no; 2102 tcp_stack_t *tcps = tcp->tcp_tcps; 2103 conn_t *connp = tcp->tcp_connp; 2104 2105 /* 2106 * There are a few cases to be considered while setting the sequence no. 2107 * Essentially, we can come here while processing an unacceptable pkt 2108 * in the TCPS_SYN_RCVD state, in which case we set the sequence number 2109 * to snxt (per RFC 793), note the swnd wouldn't have been set yet. 2110 * If we are here for a zero window probe, stick with suna. In all 2111 * other cases, we check if suna + swnd encompasses snxt and set 2112 * the sequence number to snxt, if so. If snxt falls outside the 2113 * window (the receiver probably shrunk its window), we will go with 2114 * suna + swnd, otherwise the sequence no will be unacceptable to the 2115 * receiver. 2116 */ 2117 if (tcp->tcp_zero_win_probe) { 2118 seq_no = tcp->tcp_suna; 2119 } else if (tcp->tcp_state == TCPS_SYN_RCVD) { 2120 ASSERT(tcp->tcp_swnd == 0); 2121 seq_no = tcp->tcp_snxt; 2122 } else { 2123 seq_no = SEQ_GT(tcp->tcp_snxt, 2124 (tcp->tcp_suna + tcp->tcp_swnd)) ? 2125 (tcp->tcp_suna + tcp->tcp_swnd) : tcp->tcp_snxt; 2126 } 2127 2128 if (tcp->tcp_valid_bits) { 2129 /* 2130 * For the complex case where we have to send some 2131 * controls (FIN or SYN), let tcp_xmit_mp do it. 2132 */ 2133 return (tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, seq_no, B_FALSE, 2134 NULL, B_FALSE)); 2135 } else { 2136 /* Generate a simple ACK */ 2137 int data_length; 2138 uchar_t *rptr; 2139 tcpha_t *tcpha; 2140 mblk_t *mp1; 2141 int32_t total_hdr_len; 2142 int32_t tcp_hdr_len; 2143 int32_t num_sack_blk = 0; 2144 int32_t sack_opt_len; 2145 ip_xmit_attr_t *ixa = connp->conn_ixa; 2146 2147 /* 2148 * Allocate space for TCP + IP headers 2149 * and link-level header 2150 */ 2151 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 2152 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 2153 tcp->tcp_num_sack_blk); 2154 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 2155 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 2156 total_hdr_len = connp->conn_ht_iphc_len + sack_opt_len; 2157 tcp_hdr_len = connp->conn_ht_ulp_len + sack_opt_len; 2158 } else { 2159 total_hdr_len = connp->conn_ht_iphc_len; 2160 tcp_hdr_len = connp->conn_ht_ulp_len; 2161 } 2162 mp1 = allocb(total_hdr_len + tcps->tcps_wroff_xtra, BPRI_MED); 2163 if (!mp1) 2164 return (NULL); 2165 2166 /* Update the latest receive window size in TCP header. */ 2167 tcp->tcp_tcpha->tha_win = 2168 htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws); 2169 /* copy in prototype TCP + IP header */ 2170 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra; 2171 mp1->b_rptr = rptr; 2172 mp1->b_wptr = rptr + total_hdr_len; 2173 bcopy(connp->conn_ht_iphc, rptr, connp->conn_ht_iphc_len); 2174 2175 tcpha = (tcpha_t *)&rptr[ixa->ixa_ip_hdr_length]; 2176 2177 /* Set the TCP sequence number. */ 2178 tcpha->tha_seq = htonl(seq_no); 2179 2180 /* Set up the TCP flag field. */ 2181 tcpha->tha_flags = (uchar_t)TH_ACK; 2182 if (tcp->tcp_ecn_echo_on) 2183 tcpha->tha_flags |= TH_ECE; 2184 2185 tcp->tcp_rack = tcp->tcp_rnxt; 2186 tcp->tcp_rack_cnt = 0; 2187 2188 /* fill in timestamp option if in use */ 2189 if (tcp->tcp_snd_ts_ok) { 2190 uint32_t llbolt = (uint32_t)LBOLT_FASTPATH; 2191 2192 U32_TO_BE32(llbolt, 2193 (char *)tcpha + TCP_MIN_HEADER_LENGTH+4); 2194 U32_TO_BE32(tcp->tcp_ts_recent, 2195 (char *)tcpha + TCP_MIN_HEADER_LENGTH+8); 2196 } 2197 2198 /* Fill in SACK options */ 2199 if (num_sack_blk > 0) { 2200 uchar_t *wptr = (uchar_t *)tcpha + 2201 connp->conn_ht_ulp_len; 2202 sack_blk_t *tmp; 2203 int32_t i; 2204 2205 wptr[0] = TCPOPT_NOP; 2206 wptr[1] = TCPOPT_NOP; 2207 wptr[2] = TCPOPT_SACK; 2208 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 2209 sizeof (sack_blk_t); 2210 wptr += TCPOPT_REAL_SACK_LEN; 2211 2212 tmp = tcp->tcp_sack_list; 2213 for (i = 0; i < num_sack_blk; i++) { 2214 U32_TO_BE32(tmp[i].begin, wptr); 2215 wptr += sizeof (tcp_seq); 2216 U32_TO_BE32(tmp[i].end, wptr); 2217 wptr += sizeof (tcp_seq); 2218 } 2219 tcpha->tha_offset_and_reserved += 2220 ((num_sack_blk * 2 + 1) << 4); 2221 } 2222 2223 ixa->ixa_pktlen = total_hdr_len; 2224 2225 if (ixa->ixa_flags & IXAF_IS_IPV4) { 2226 ((ipha_t *)rptr)->ipha_length = htons(total_hdr_len); 2227 } else { 2228 ip6_t *ip6 = (ip6_t *)rptr; 2229 2230 ip6->ip6_plen = htons(total_hdr_len - IPV6_HDR_LEN); 2231 } 2232 2233 /* 2234 * Prime pump for checksum calculation in IP. Include the 2235 * adjustment for a source route if any. 2236 */ 2237 data_length = tcp_hdr_len + connp->conn_sum; 2238 data_length = (data_length >> 16) + (data_length & 0xFFFF); 2239 tcpha->tha_sum = htons(data_length); 2240 2241 if (tcp->tcp_ip_forward_progress) { 2242 tcp->tcp_ip_forward_progress = B_FALSE; 2243 connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF; 2244 } else { 2245 connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF; 2246 } 2247 return (mp1); 2248 } 2249 } 2250 2251 /* 2252 * Handle M_DATA messages from IP. Its called directly from IP via 2253 * squeue for received IP packets. 2254 * 2255 * The first argument is always the connp/tcp to which the mp belongs. 2256 * There are no exceptions to this rule. The caller has already put 2257 * a reference on this connp/tcp and once tcp_input_data() returns, 2258 * the squeue will do the refrele. 2259 * 2260 * The TH_SYN for the listener directly go to tcp_input_listener via 2261 * squeue. ICMP errors go directly to tcp_icmp_input(). 2262 * 2263 * sqp: NULL = recursive, sqp != NULL means called from squeue 2264 */ 2265 void 2266 tcp_input_data(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 2267 { 2268 int32_t bytes_acked; 2269 int32_t gap; 2270 mblk_t *mp1; 2271 uint_t flags; 2272 uint32_t new_swnd = 0; 2273 uchar_t *iphdr; 2274 uchar_t *rptr; 2275 int32_t rgap; 2276 uint32_t seg_ack; 2277 int seg_len; 2278 uint_t ip_hdr_len; 2279 uint32_t seg_seq; 2280 tcpha_t *tcpha; 2281 int urp; 2282 tcp_opt_t tcpopt; 2283 ip_pkt_t ipp; 2284 boolean_t ofo_seg = B_FALSE; /* Out of order segment */ 2285 uint32_t cwnd; 2286 uint32_t add; 2287 int npkt; 2288 int mss; 2289 conn_t *connp = (conn_t *)arg; 2290 squeue_t *sqp = (squeue_t *)arg2; 2291 tcp_t *tcp = connp->conn_tcp; 2292 tcp_stack_t *tcps = tcp->tcp_tcps; 2293 2294 /* 2295 * RST from fused tcp loopback peer should trigger an unfuse. 2296 */ 2297 if (tcp->tcp_fused) { 2298 TCP_STAT(tcps, tcp_fusion_aborted); 2299 tcp_unfuse(tcp); 2300 } 2301 2302 iphdr = mp->b_rptr; 2303 rptr = mp->b_rptr; 2304 ASSERT(OK_32PTR(rptr)); 2305 2306 ip_hdr_len = ira->ira_ip_hdr_length; 2307 if (connp->conn_recv_ancillary.crb_all != 0) { 2308 /* 2309 * Record packet information in the ip_pkt_t 2310 */ 2311 ipp.ipp_fields = 0; 2312 if (ira->ira_flags & IRAF_IS_IPV4) { 2313 (void) ip_find_hdr_v4((ipha_t *)rptr, &ipp, 2314 B_FALSE); 2315 } else { 2316 uint8_t nexthdrp; 2317 2318 /* 2319 * IPv6 packets can only be received by applications 2320 * that are prepared to receive IPv6 addresses. 2321 * The IP fanout must ensure this. 2322 */ 2323 ASSERT(connp->conn_family == AF_INET6); 2324 2325 (void) ip_find_hdr_v6(mp, (ip6_t *)rptr, B_TRUE, &ipp, 2326 &nexthdrp); 2327 ASSERT(nexthdrp == IPPROTO_TCP); 2328 2329 /* Could have caused a pullup? */ 2330 iphdr = mp->b_rptr; 2331 rptr = mp->b_rptr; 2332 } 2333 } 2334 ASSERT(DB_TYPE(mp) == M_DATA); 2335 ASSERT(mp->b_next == NULL); 2336 2337 tcpha = (tcpha_t *)&rptr[ip_hdr_len]; 2338 seg_seq = ntohl(tcpha->tha_seq); 2339 seg_ack = ntohl(tcpha->tha_ack); 2340 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 2341 seg_len = (int)(mp->b_wptr - rptr) - 2342 (ip_hdr_len + TCP_HDR_LENGTH(tcpha)); 2343 if ((mp1 = mp->b_cont) != NULL && mp1->b_datap->db_type == M_DATA) { 2344 do { 2345 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 2346 (uintptr_t)INT_MAX); 2347 seg_len += (int)(mp1->b_wptr - mp1->b_rptr); 2348 } while ((mp1 = mp1->b_cont) != NULL && 2349 mp1->b_datap->db_type == M_DATA); 2350 } 2351 2352 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, connp->conn_ixa, 2353 __dtrace_tcp_void_ip_t *, iphdr, tcp_t *, tcp, 2354 __dtrace_tcp_tcph_t *, tcpha); 2355 2356 if (tcp->tcp_state == TCPS_TIME_WAIT) { 2357 tcp_time_wait_processing(tcp, mp, seg_seq, seg_ack, 2358 seg_len, tcpha, ira); 2359 return; 2360 } 2361 2362 if (sqp != NULL) { 2363 /* 2364 * This is the correct place to update tcp_last_recv_time. Note 2365 * that it is also updated for tcp structure that belongs to 2366 * global and listener queues which do not really need updating. 2367 * But that should not cause any harm. And it is updated for 2368 * all kinds of incoming segments, not only for data segments. 2369 */ 2370 tcp->tcp_last_recv_time = LBOLT_FASTPATH; 2371 } 2372 2373 flags = (unsigned int)tcpha->tha_flags & 0xFF; 2374 2375 BUMP_LOCAL(tcp->tcp_ibsegs); 2376 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp); 2377 2378 if ((flags & TH_URG) && sqp != NULL) { 2379 /* 2380 * TCP can't handle urgent pointers that arrive before 2381 * the connection has been accept()ed since it can't 2382 * buffer OOB data. Discard segment if this happens. 2383 * 2384 * We can't just rely on a non-null tcp_listener to indicate 2385 * that the accept() has completed since unlinking of the 2386 * eager and completion of the accept are not atomic. 2387 * tcp_detached, when it is not set (B_FALSE) indicates 2388 * that the accept() has completed. 2389 * 2390 * Nor can it reassemble urgent pointers, so discard 2391 * if it's not the next segment expected. 2392 * 2393 * Otherwise, collapse chain into one mblk (discard if 2394 * that fails). This makes sure the headers, retransmitted 2395 * data, and new data all are in the same mblk. 2396 */ 2397 ASSERT(mp != NULL); 2398 if (tcp->tcp_detached || !pullupmsg(mp, -1)) { 2399 freemsg(mp); 2400 return; 2401 } 2402 /* Update pointers into message */ 2403 iphdr = rptr = mp->b_rptr; 2404 tcpha = (tcpha_t *)&rptr[ip_hdr_len]; 2405 if (SEQ_GT(seg_seq, tcp->tcp_rnxt)) { 2406 /* 2407 * Since we can't handle any data with this urgent 2408 * pointer that is out of sequence, we expunge 2409 * the data. This allows us to still register 2410 * the urgent mark and generate the M_PCSIG, 2411 * which we can do. 2412 */ 2413 mp->b_wptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha); 2414 seg_len = 0; 2415 } 2416 } 2417 2418 switch (tcp->tcp_state) { 2419 case TCPS_SYN_SENT: 2420 if (connp->conn_final_sqp == NULL && 2421 tcp_outbound_squeue_switch && sqp != NULL) { 2422 ASSERT(connp->conn_initial_sqp == connp->conn_sqp); 2423 connp->conn_final_sqp = sqp; 2424 if (connp->conn_final_sqp != connp->conn_sqp) { 2425 DTRACE_PROBE1(conn__final__sqp__switch, 2426 conn_t *, connp); 2427 CONN_INC_REF(connp); 2428 SQUEUE_SWITCH(connp, connp->conn_final_sqp); 2429 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, 2430 tcp_input_data, connp, ira, ip_squeue_flag, 2431 SQTAG_CONNECT_FINISH); 2432 return; 2433 } 2434 DTRACE_PROBE1(conn__final__sqp__same, conn_t *, connp); 2435 } 2436 if (flags & TH_ACK) { 2437 /* 2438 * Note that our stack cannot send data before a 2439 * connection is established, therefore the 2440 * following check is valid. Otherwise, it has 2441 * to be changed. 2442 */ 2443 if (SEQ_LEQ(seg_ack, tcp->tcp_iss) || 2444 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 2445 freemsg(mp); 2446 if (flags & TH_RST) 2447 return; 2448 tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq", 2449 tcp, seg_ack, 0, TH_RST); 2450 return; 2451 } 2452 ASSERT(tcp->tcp_suna + 1 == seg_ack); 2453 } 2454 if (flags & TH_RST) { 2455 if (flags & TH_ACK) { 2456 DTRACE_TCP5(connect__refused, mblk_t *, NULL, 2457 ip_xmit_attr_t *, connp->conn_ixa, 2458 void_ip_t *, iphdr, tcp_t *, tcp, 2459 tcph_t *, tcpha); 2460 (void) tcp_clean_death(tcp, ECONNREFUSED); 2461 } 2462 freemsg(mp); 2463 return; 2464 } 2465 if (!(flags & TH_SYN)) { 2466 freemsg(mp); 2467 return; 2468 } 2469 2470 /* Process all TCP options. */ 2471 tcp_process_options(tcp, tcpha); 2472 /* 2473 * The following changes our rwnd to be a multiple of the 2474 * MIN(peer MSS, our MSS) for performance reason. 2475 */ 2476 (void) tcp_rwnd_set(tcp, MSS_ROUNDUP(connp->conn_rcvbuf, 2477 tcp->tcp_mss)); 2478 2479 /* Is the other end ECN capable? */ 2480 if (tcp->tcp_ecn_ok) { 2481 if ((flags & (TH_ECE|TH_CWR)) != TH_ECE) { 2482 tcp->tcp_ecn_ok = B_FALSE; 2483 } 2484 } 2485 /* 2486 * Clear ECN flags because it may interfere with later 2487 * processing. 2488 */ 2489 flags &= ~(TH_ECE|TH_CWR); 2490 2491 tcp->tcp_irs = seg_seq; 2492 tcp->tcp_rack = seg_seq; 2493 tcp->tcp_rnxt = seg_seq + 1; 2494 tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt); 2495 if (!TCP_IS_DETACHED(tcp)) { 2496 /* Allocate room for SACK options if needed. */ 2497 connp->conn_wroff = connp->conn_ht_iphc_len; 2498 if (tcp->tcp_snd_sack_ok) 2499 connp->conn_wroff += TCPOPT_MAX_SACK_LEN; 2500 if (!tcp->tcp_loopback) 2501 connp->conn_wroff += tcps->tcps_wroff_xtra; 2502 2503 (void) proto_set_tx_wroff(connp->conn_rq, connp, 2504 connp->conn_wroff); 2505 } 2506 if (flags & TH_ACK) { 2507 /* 2508 * If we can't get the confirmation upstream, pretend 2509 * we didn't even see this one. 2510 * 2511 * XXX: how can we pretend we didn't see it if we 2512 * have updated rnxt et. al. 2513 * 2514 * For loopback we defer sending up the T_CONN_CON 2515 * until after some checks below. 2516 */ 2517 mp1 = NULL; 2518 /* 2519 * tcp_sendmsg() checks tcp_state without entering 2520 * the squeue so tcp_state should be updated before 2521 * sending up connection confirmation. Probe the 2522 * state change below when we are sure the connection 2523 * confirmation has been sent. 2524 */ 2525 tcp->tcp_state = TCPS_ESTABLISHED; 2526 if (!tcp_conn_con(tcp, iphdr, mp, 2527 tcp->tcp_loopback ? &mp1 : NULL, ira)) { 2528 tcp->tcp_state = TCPS_SYN_SENT; 2529 freemsg(mp); 2530 return; 2531 } 2532 TCPS_CONN_INC(tcps); 2533 /* SYN was acked - making progress */ 2534 tcp->tcp_ip_forward_progress = B_TRUE; 2535 2536 /* One for the SYN */ 2537 tcp->tcp_suna = tcp->tcp_iss + 1; 2538 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 2539 2540 /* 2541 * If SYN was retransmitted, need to reset all 2542 * retransmission info. This is because this 2543 * segment will be treated as a dup ACK. 2544 */ 2545 if (tcp->tcp_rexmit) { 2546 tcp->tcp_rexmit = B_FALSE; 2547 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 2548 tcp->tcp_rexmit_max = tcp->tcp_snxt; 2549 tcp->tcp_snd_burst = tcp->tcp_localnet ? 2550 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 2551 tcp->tcp_ms_we_have_waited = 0; 2552 2553 /* 2554 * Set tcp_cwnd back to 1 MSS, per 2555 * recommendation from 2556 * draft-floyd-incr-init-win-01.txt, 2557 * Increasing TCP's Initial Window. 2558 */ 2559 tcp->tcp_cwnd = tcp->tcp_mss; 2560 } 2561 2562 tcp->tcp_swl1 = seg_seq; 2563 tcp->tcp_swl2 = seg_ack; 2564 2565 new_swnd = ntohs(tcpha->tha_win); 2566 tcp->tcp_swnd = new_swnd; 2567 if (new_swnd > tcp->tcp_max_swnd) 2568 tcp->tcp_max_swnd = new_swnd; 2569 2570 /* 2571 * Always send the three-way handshake ack immediately 2572 * in order to make the connection complete as soon as 2573 * possible on the accepting host. 2574 */ 2575 flags |= TH_ACK_NEEDED; 2576 2577 /* 2578 * Trace connect-established here. 2579 */ 2580 DTRACE_TCP5(connect__established, mblk_t *, NULL, 2581 ip_xmit_attr_t *, tcp->tcp_connp->conn_ixa, 2582 void_ip_t *, iphdr, tcp_t *, tcp, tcph_t *, tcpha); 2583 2584 /* Trace change from SYN_SENT -> ESTABLISHED here */ 2585 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 2586 connp->conn_ixa, void, NULL, tcp_t *, tcp, 2587 void, NULL, int32_t, TCPS_SYN_SENT); 2588 2589 /* 2590 * Special case for loopback. At this point we have 2591 * received SYN-ACK from the remote endpoint. In 2592 * order to ensure that both endpoints reach the 2593 * fused state prior to any data exchange, the final 2594 * ACK needs to be sent before we indicate T_CONN_CON 2595 * to the module upstream. 2596 */ 2597 if (tcp->tcp_loopback) { 2598 mblk_t *ack_mp; 2599 2600 ASSERT(!tcp->tcp_unfusable); 2601 ASSERT(mp1 != NULL); 2602 /* 2603 * For loopback, we always get a pure SYN-ACK 2604 * and only need to send back the final ACK 2605 * with no data (this is because the other 2606 * tcp is ours and we don't do T/TCP). This 2607 * final ACK triggers the passive side to 2608 * perform fusion in ESTABLISHED state. 2609 */ 2610 if ((ack_mp = tcp_ack_mp(tcp)) != NULL) { 2611 if (tcp->tcp_ack_tid != 0) { 2612 (void) TCP_TIMER_CANCEL(tcp, 2613 tcp->tcp_ack_tid); 2614 tcp->tcp_ack_tid = 0; 2615 } 2616 tcp_send_data(tcp, ack_mp); 2617 BUMP_LOCAL(tcp->tcp_obsegs); 2618 TCPS_BUMP_MIB(tcps, tcpOutAck); 2619 2620 if (!IPCL_IS_NONSTR(connp)) { 2621 /* Send up T_CONN_CON */ 2622 if (ira->ira_cred != NULL) { 2623 mblk_setcred(mp1, 2624 ira->ira_cred, 2625 ira->ira_cpid); 2626 } 2627 putnext(connp->conn_rq, mp1); 2628 } else { 2629 (*connp->conn_upcalls-> 2630 su_connected) 2631 (connp->conn_upper_handle, 2632 tcp->tcp_connid, 2633 ira->ira_cred, 2634 ira->ira_cpid); 2635 freemsg(mp1); 2636 } 2637 2638 freemsg(mp); 2639 return; 2640 } 2641 /* 2642 * Forget fusion; we need to handle more 2643 * complex cases below. Send the deferred 2644 * T_CONN_CON message upstream and proceed 2645 * as usual. Mark this tcp as not capable 2646 * of fusion. 2647 */ 2648 TCP_STAT(tcps, tcp_fusion_unfusable); 2649 tcp->tcp_unfusable = B_TRUE; 2650 if (!IPCL_IS_NONSTR(connp)) { 2651 if (ira->ira_cred != NULL) { 2652 mblk_setcred(mp1, ira->ira_cred, 2653 ira->ira_cpid); 2654 } 2655 putnext(connp->conn_rq, mp1); 2656 } else { 2657 (*connp->conn_upcalls->su_connected) 2658 (connp->conn_upper_handle, 2659 tcp->tcp_connid, ira->ira_cred, 2660 ira->ira_cpid); 2661 freemsg(mp1); 2662 } 2663 } 2664 2665 /* 2666 * Check to see if there is data to be sent. If 2667 * yes, set the transmit flag. Then check to see 2668 * if received data processing needs to be done. 2669 * If not, go straight to xmit_check. This short 2670 * cut is OK as we don't support T/TCP. 2671 */ 2672 if (tcp->tcp_unsent) 2673 flags |= TH_XMIT_NEEDED; 2674 2675 if (seg_len == 0 && !(flags & TH_URG)) { 2676 freemsg(mp); 2677 goto xmit_check; 2678 } 2679 2680 flags &= ~TH_SYN; 2681 seg_seq++; 2682 break; 2683 } 2684 tcp->tcp_state = TCPS_SYN_RCVD; 2685 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 2686 connp->conn_ixa, void_ip_t *, NULL, tcp_t *, tcp, 2687 tcph_t *, NULL, int32_t, TCPS_SYN_SENT); 2688 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, tcp->tcp_mss, 2689 NULL, NULL, tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 2690 if (mp1 != NULL) { 2691 tcp_send_data(tcp, mp1); 2692 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 2693 } 2694 freemsg(mp); 2695 return; 2696 case TCPS_SYN_RCVD: 2697 if (flags & TH_ACK) { 2698 /* 2699 * In this state, a SYN|ACK packet is either bogus 2700 * because the other side must be ACKing our SYN which 2701 * indicates it has seen the ACK for their SYN and 2702 * shouldn't retransmit it or we're crossing SYNs 2703 * on active open. 2704 */ 2705 if ((flags & TH_SYN) && !tcp->tcp_active_open) { 2706 freemsg(mp); 2707 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn", 2708 tcp, seg_ack, 0, TH_RST); 2709 return; 2710 } 2711 /* 2712 * NOTE: RFC 793 pg. 72 says this should be 2713 * tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt 2714 * but that would mean we have an ack that ignored 2715 * our SYN. 2716 */ 2717 if (SEQ_LEQ(seg_ack, tcp->tcp_suna) || 2718 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 2719 freemsg(mp); 2720 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack", 2721 tcp, seg_ack, 0, TH_RST); 2722 return; 2723 } 2724 /* 2725 * No sane TCP stack will send such a small window 2726 * without receiving any data. Just drop this invalid 2727 * ACK. We also shorten the abort timeout in case 2728 * this is an attack. 2729 */ 2730 if ((ntohs(tcpha->tha_win) << tcp->tcp_snd_ws) < 2731 (tcp->tcp_mss >> tcp_init_wnd_shft)) { 2732 freemsg(mp); 2733 TCP_STAT(tcps, tcp_zwin_ack_syn); 2734 tcp->tcp_second_ctimer_threshold = 2735 tcp_early_abort * SECONDS; 2736 return; 2737 } 2738 } 2739 break; 2740 case TCPS_LISTEN: 2741 /* 2742 * Only a TLI listener can come through this path when a 2743 * acceptor is going back to be a listener and a packet 2744 * for the acceptor hits the classifier. For a socket 2745 * listener, this can never happen because a listener 2746 * can never accept connection on itself and hence a 2747 * socket acceptor can not go back to being a listener. 2748 */ 2749 ASSERT(!TCP_IS_SOCKET(tcp)); 2750 /*FALLTHRU*/ 2751 case TCPS_CLOSED: 2752 case TCPS_BOUND: { 2753 conn_t *new_connp; 2754 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 2755 2756 /* 2757 * Don't accept any input on a closed tcp as this TCP logically 2758 * does not exist on the system. Don't proceed further with 2759 * this TCP. For instance, this packet could trigger another 2760 * close of this tcp which would be disastrous for tcp_refcnt. 2761 * tcp_close_detached / tcp_clean_death / tcp_closei_local must 2762 * be called at most once on a TCP. In this case we need to 2763 * refeed the packet into the classifier and figure out where 2764 * the packet should go. 2765 */ 2766 new_connp = ipcl_classify(mp, ira, ipst); 2767 if (new_connp != NULL) { 2768 /* Drops ref on new_connp */ 2769 tcp_reinput(new_connp, mp, ira, ipst); 2770 return; 2771 } 2772 /* We failed to classify. For now just drop the packet */ 2773 freemsg(mp); 2774 return; 2775 } 2776 case TCPS_IDLE: 2777 /* 2778 * Handle the case where the tcp_clean_death() has happened 2779 * on a connection (application hasn't closed yet) but a packet 2780 * was already queued on squeue before tcp_clean_death() 2781 * was processed. Calling tcp_clean_death() twice on same 2782 * connection can result in weird behaviour. 2783 */ 2784 freemsg(mp); 2785 return; 2786 default: 2787 break; 2788 } 2789 2790 /* 2791 * Already on the correct queue/perimeter. 2792 * If this is a detached connection and not an eager 2793 * connection hanging off a listener then new data 2794 * (past the FIN) will cause a reset. 2795 * We do a special check here where it 2796 * is out of the main line, rather than check 2797 * if we are detached every time we see new 2798 * data down below. 2799 */ 2800 if (TCP_IS_DETACHED_NONEAGER(tcp) && 2801 (seg_len > 0 && SEQ_GT(seg_seq + seg_len, tcp->tcp_rnxt))) { 2802 TCPS_BUMP_MIB(tcps, tcpInClosed); 2803 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp); 2804 2805 freemsg(mp); 2806 /* 2807 * This could be an SSL closure alert. We're detached so just 2808 * acknowledge it this last time. 2809 */ 2810 if (tcp->tcp_kssl_ctx != NULL) { 2811 kssl_release_ctx(tcp->tcp_kssl_ctx); 2812 tcp->tcp_kssl_ctx = NULL; 2813 2814 tcp->tcp_rnxt += seg_len; 2815 tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt); 2816 flags |= TH_ACK_NEEDED; 2817 goto ack_check; 2818 } 2819 2820 tcp_xmit_ctl("new data when detached", tcp, 2821 tcp->tcp_snxt, 0, TH_RST); 2822 (void) tcp_clean_death(tcp, EPROTO); 2823 return; 2824 } 2825 2826 mp->b_rptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha); 2827 urp = ntohs(tcpha->tha_urp) - TCP_OLD_URP_INTERPRETATION; 2828 new_swnd = ntohs(tcpha->tha_win) << 2829 ((tcpha->tha_flags & TH_SYN) ? 0 : tcp->tcp_snd_ws); 2830 2831 if (tcp->tcp_snd_ts_ok) { 2832 if (!tcp_paws_check(tcp, tcpha, &tcpopt)) { 2833 /* 2834 * This segment is not acceptable. 2835 * Drop it and send back an ACK. 2836 */ 2837 freemsg(mp); 2838 flags |= TH_ACK_NEEDED; 2839 goto ack_check; 2840 } 2841 } else if (tcp->tcp_snd_sack_ok) { 2842 tcpopt.tcp = tcp; 2843 /* 2844 * SACK info in already updated in tcp_parse_options. Ignore 2845 * all other TCP options... 2846 */ 2847 (void) tcp_parse_options(tcpha, &tcpopt); 2848 } 2849 try_again:; 2850 mss = tcp->tcp_mss; 2851 gap = seg_seq - tcp->tcp_rnxt; 2852 rgap = tcp->tcp_rwnd - (gap + seg_len); 2853 /* 2854 * gap is the amount of sequence space between what we expect to see 2855 * and what we got for seg_seq. A positive value for gap means 2856 * something got lost. A negative value means we got some old stuff. 2857 */ 2858 if (gap < 0) { 2859 /* Old stuff present. Is the SYN in there? */ 2860 if (seg_seq == tcp->tcp_irs && (flags & TH_SYN) && 2861 (seg_len != 0)) { 2862 flags &= ~TH_SYN; 2863 seg_seq++; 2864 urp--; 2865 /* Recompute the gaps after noting the SYN. */ 2866 goto try_again; 2867 } 2868 TCPS_BUMP_MIB(tcps, tcpInDataDupSegs); 2869 TCPS_UPDATE_MIB(tcps, tcpInDataDupBytes, 2870 (seg_len > -gap ? -gap : seg_len)); 2871 /* Remove the old stuff from seg_len. */ 2872 seg_len += gap; 2873 /* 2874 * Anything left? 2875 * Make sure to check for unack'd FIN when rest of data 2876 * has been previously ack'd. 2877 */ 2878 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 2879 /* 2880 * Resets are only valid if they lie within our offered 2881 * window. If the RST bit is set, we just ignore this 2882 * segment. 2883 */ 2884 if (flags & TH_RST) { 2885 freemsg(mp); 2886 return; 2887 } 2888 2889 /* 2890 * The arriving of dup data packets indicate that we 2891 * may have postponed an ack for too long, or the other 2892 * side's RTT estimate is out of shape. Start acking 2893 * more often. 2894 */ 2895 if (SEQ_GEQ(seg_seq + seg_len - gap, tcp->tcp_rack) && 2896 tcp->tcp_rack_cnt >= 1 && 2897 tcp->tcp_rack_abs_max > 2) { 2898 tcp->tcp_rack_abs_max--; 2899 } 2900 tcp->tcp_rack_cur_max = 1; 2901 2902 /* 2903 * This segment is "unacceptable". None of its 2904 * sequence space lies within our advertized window. 2905 * 2906 * Adjust seg_len to the original value for tracing. 2907 */ 2908 seg_len -= gap; 2909 if (connp->conn_debug) { 2910 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 2911 "tcp_rput: unacceptable, gap %d, rgap %d, " 2912 "flags 0x%x, seg_seq %u, seg_ack %u, " 2913 "seg_len %d, rnxt %u, snxt %u, %s", 2914 gap, rgap, flags, seg_seq, seg_ack, 2915 seg_len, tcp->tcp_rnxt, tcp->tcp_snxt, 2916 tcp_display(tcp, NULL, 2917 DISP_ADDR_AND_PORT)); 2918 } 2919 2920 /* 2921 * Arrange to send an ACK in response to the 2922 * unacceptable segment per RFC 793 page 69. There 2923 * is only one small difference between ours and the 2924 * acceptability test in the RFC - we accept ACK-only 2925 * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK 2926 * will be generated. 2927 * 2928 * Note that we have to ACK an ACK-only packet at least 2929 * for stacks that send 0-length keep-alives with 2930 * SEG.SEQ = SND.NXT-1 as recommended by RFC1122, 2931 * section 4.2.3.6. As long as we don't ever generate 2932 * an unacceptable packet in response to an incoming 2933 * packet that is unacceptable, it should not cause 2934 * "ACK wars". 2935 */ 2936 flags |= TH_ACK_NEEDED; 2937 2938 /* 2939 * Continue processing this segment in order to use the 2940 * ACK information it contains, but skip all other 2941 * sequence-number processing. Processing the ACK 2942 * information is necessary in order to 2943 * re-synchronize connections that may have lost 2944 * synchronization. 2945 * 2946 * We clear seg_len and flag fields related to 2947 * sequence number processing as they are not 2948 * to be trusted for an unacceptable segment. 2949 */ 2950 seg_len = 0; 2951 flags &= ~(TH_SYN | TH_FIN | TH_URG); 2952 goto process_ack; 2953 } 2954 2955 /* Fix seg_seq, and chew the gap off the front. */ 2956 seg_seq = tcp->tcp_rnxt; 2957 urp += gap; 2958 do { 2959 mblk_t *mp2; 2960 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 2961 (uintptr_t)UINT_MAX); 2962 gap += (uint_t)(mp->b_wptr - mp->b_rptr); 2963 if (gap > 0) { 2964 mp->b_rptr = mp->b_wptr - gap; 2965 break; 2966 } 2967 mp2 = mp; 2968 mp = mp->b_cont; 2969 freeb(mp2); 2970 } while (gap < 0); 2971 /* 2972 * If the urgent data has already been acknowledged, we 2973 * should ignore TH_URG below 2974 */ 2975 if (urp < 0) 2976 flags &= ~TH_URG; 2977 } 2978 /* 2979 * rgap is the amount of stuff received out of window. A negative 2980 * value is the amount out of window. 2981 */ 2982 if (rgap < 0) { 2983 mblk_t *mp2; 2984 2985 if (tcp->tcp_rwnd == 0) { 2986 TCPS_BUMP_MIB(tcps, tcpInWinProbe); 2987 } else { 2988 TCPS_BUMP_MIB(tcps, tcpInDataPastWinSegs); 2989 TCPS_UPDATE_MIB(tcps, tcpInDataPastWinBytes, -rgap); 2990 } 2991 2992 /* 2993 * seg_len does not include the FIN, so if more than 2994 * just the FIN is out of window, we act like we don't 2995 * see it. (If just the FIN is out of window, rgap 2996 * will be zero and we will go ahead and acknowledge 2997 * the FIN.) 2998 */ 2999 flags &= ~TH_FIN; 3000 3001 /* Fix seg_len and make sure there is something left. */ 3002 seg_len += rgap; 3003 if (seg_len <= 0) { 3004 /* 3005 * Resets are only valid if they lie within our offered 3006 * window. If the RST bit is set, we just ignore this 3007 * segment. 3008 */ 3009 if (flags & TH_RST) { 3010 freemsg(mp); 3011 return; 3012 } 3013 3014 /* Per RFC 793, we need to send back an ACK. */ 3015 flags |= TH_ACK_NEEDED; 3016 3017 /* 3018 * Send SIGURG as soon as possible i.e. even 3019 * if the TH_URG was delivered in a window probe 3020 * packet (which will be unacceptable). 3021 * 3022 * We generate a signal if none has been generated 3023 * for this connection or if this is a new urgent 3024 * byte. Also send a zero-length "unmarked" message 3025 * to inform SIOCATMARK that this is not the mark. 3026 * 3027 * tcp_urp_last_valid is cleared when the T_exdata_ind 3028 * is sent up. This plus the check for old data 3029 * (gap >= 0) handles the wraparound of the sequence 3030 * number space without having to always track the 3031 * correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks 3032 * this max in its rcv_up variable). 3033 * 3034 * This prevents duplicate SIGURGS due to a "late" 3035 * zero-window probe when the T_EXDATA_IND has already 3036 * been sent up. 3037 */ 3038 if ((flags & TH_URG) && 3039 (!tcp->tcp_urp_last_valid || SEQ_GT(urp + seg_seq, 3040 tcp->tcp_urp_last))) { 3041 if (IPCL_IS_NONSTR(connp)) { 3042 if (!TCP_IS_DETACHED(tcp)) { 3043 (*connp->conn_upcalls-> 3044 su_signal_oob) 3045 (connp->conn_upper_handle, 3046 urp); 3047 } 3048 } else { 3049 mp1 = allocb(0, BPRI_MED); 3050 if (mp1 == NULL) { 3051 freemsg(mp); 3052 return; 3053 } 3054 if (!TCP_IS_DETACHED(tcp) && 3055 !putnextctl1(connp->conn_rq, 3056 M_PCSIG, SIGURG)) { 3057 /* Try again on the rexmit. */ 3058 freemsg(mp1); 3059 freemsg(mp); 3060 return; 3061 } 3062 /* 3063 * If the next byte would be the mark 3064 * then mark with MARKNEXT else mark 3065 * with NOTMARKNEXT. 3066 */ 3067 if (gap == 0 && urp == 0) 3068 mp1->b_flag |= MSGMARKNEXT; 3069 else 3070 mp1->b_flag |= MSGNOTMARKNEXT; 3071 freemsg(tcp->tcp_urp_mark_mp); 3072 tcp->tcp_urp_mark_mp = mp1; 3073 flags |= TH_SEND_URP_MARK; 3074 } 3075 tcp->tcp_urp_last_valid = B_TRUE; 3076 tcp->tcp_urp_last = urp + seg_seq; 3077 } 3078 /* 3079 * If this is a zero window probe, continue to 3080 * process the ACK part. But we need to set seg_len 3081 * to 0 to avoid data processing. Otherwise just 3082 * drop the segment and send back an ACK. 3083 */ 3084 if (tcp->tcp_rwnd == 0 && seg_seq == tcp->tcp_rnxt) { 3085 flags &= ~(TH_SYN | TH_URG); 3086 seg_len = 0; 3087 goto process_ack; 3088 } else { 3089 freemsg(mp); 3090 goto ack_check; 3091 } 3092 } 3093 /* Pitch out of window stuff off the end. */ 3094 rgap = seg_len; 3095 mp2 = mp; 3096 do { 3097 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 3098 (uintptr_t)INT_MAX); 3099 rgap -= (int)(mp2->b_wptr - mp2->b_rptr); 3100 if (rgap < 0) { 3101 mp2->b_wptr += rgap; 3102 if ((mp1 = mp2->b_cont) != NULL) { 3103 mp2->b_cont = NULL; 3104 freemsg(mp1); 3105 } 3106 break; 3107 } 3108 } while ((mp2 = mp2->b_cont) != NULL); 3109 } 3110 ok:; 3111 /* 3112 * TCP should check ECN info for segments inside the window only. 3113 * Therefore the check should be done here. 3114 */ 3115 if (tcp->tcp_ecn_ok) { 3116 if (flags & TH_CWR) { 3117 tcp->tcp_ecn_echo_on = B_FALSE; 3118 } 3119 /* 3120 * Note that both ECN_CE and CWR can be set in the 3121 * same segment. In this case, we once again turn 3122 * on ECN_ECHO. 3123 */ 3124 if (connp->conn_ipversion == IPV4_VERSION) { 3125 uchar_t tos = ((ipha_t *)rptr)->ipha_type_of_service; 3126 3127 if ((tos & IPH_ECN_CE) == IPH_ECN_CE) { 3128 tcp->tcp_ecn_echo_on = B_TRUE; 3129 } 3130 } else { 3131 uint32_t vcf = ((ip6_t *)rptr)->ip6_vcf; 3132 3133 if ((vcf & htonl(IPH_ECN_CE << 20)) == 3134 htonl(IPH_ECN_CE << 20)) { 3135 tcp->tcp_ecn_echo_on = B_TRUE; 3136 } 3137 } 3138 } 3139 3140 /* 3141 * Check whether we can update tcp_ts_recent. This test is 3142 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 3143 * Extensions for High Performance: An Update", Internet Draft. 3144 */ 3145 if (tcp->tcp_snd_ts_ok && 3146 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 3147 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 3148 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 3149 tcp->tcp_last_rcv_lbolt = LBOLT_FASTPATH64; 3150 } 3151 3152 if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) { 3153 /* 3154 * FIN in an out of order segment. We record this in 3155 * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq. 3156 * Clear the FIN so that any check on FIN flag will fail. 3157 * Remember that FIN also counts in the sequence number 3158 * space. So we need to ack out of order FIN only segments. 3159 */ 3160 if (flags & TH_FIN) { 3161 tcp->tcp_valid_bits |= TCP_OFO_FIN_VALID; 3162 tcp->tcp_ofo_fin_seq = seg_seq + seg_len; 3163 flags &= ~TH_FIN; 3164 flags |= TH_ACK_NEEDED; 3165 } 3166 if (seg_len > 0) { 3167 /* Fill in the SACK blk list. */ 3168 if (tcp->tcp_snd_sack_ok) { 3169 tcp_sack_insert(tcp->tcp_sack_list, 3170 seg_seq, seg_seq + seg_len, 3171 &(tcp->tcp_num_sack_blk)); 3172 } 3173 3174 /* 3175 * Attempt reassembly and see if we have something 3176 * ready to go. 3177 */ 3178 mp = tcp_reass(tcp, mp, seg_seq); 3179 /* Always ack out of order packets */ 3180 flags |= TH_ACK_NEEDED | TH_PUSH; 3181 if (mp) { 3182 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 3183 (uintptr_t)INT_MAX); 3184 seg_len = mp->b_cont ? msgdsize(mp) : 3185 (int)(mp->b_wptr - mp->b_rptr); 3186 seg_seq = tcp->tcp_rnxt; 3187 /* 3188 * A gap is filled and the seq num and len 3189 * of the gap match that of a previously 3190 * received FIN, put the FIN flag back in. 3191 */ 3192 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 3193 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 3194 flags |= TH_FIN; 3195 tcp->tcp_valid_bits &= 3196 ~TCP_OFO_FIN_VALID; 3197 } 3198 if (tcp->tcp_reass_tid != 0) { 3199 (void) TCP_TIMER_CANCEL(tcp, 3200 tcp->tcp_reass_tid); 3201 /* 3202 * Restart the timer if there is still 3203 * data in the reassembly queue. 3204 */ 3205 if (tcp->tcp_reass_head != NULL) { 3206 tcp->tcp_reass_tid = TCP_TIMER( 3207 tcp, tcp_reass_timer, 3208 tcps->tcps_reass_timeout); 3209 } else { 3210 tcp->tcp_reass_tid = 0; 3211 } 3212 } 3213 } else { 3214 /* 3215 * Keep going even with NULL mp. 3216 * There may be a useful ACK or something else 3217 * we don't want to miss. 3218 * 3219 * But TCP should not perform fast retransmit 3220 * because of the ack number. TCP uses 3221 * seg_len == 0 to determine if it is a pure 3222 * ACK. And this is not a pure ACK. 3223 */ 3224 seg_len = 0; 3225 ofo_seg = B_TRUE; 3226 3227 if (tcps->tcps_reass_timeout != 0 && 3228 tcp->tcp_reass_tid == 0) { 3229 tcp->tcp_reass_tid = TCP_TIMER(tcp, 3230 tcp_reass_timer, 3231 tcps->tcps_reass_timeout); 3232 } 3233 } 3234 } 3235 } else if (seg_len > 0) { 3236 TCPS_BUMP_MIB(tcps, tcpInDataInorderSegs); 3237 TCPS_UPDATE_MIB(tcps, tcpInDataInorderBytes, seg_len); 3238 /* 3239 * If an out of order FIN was received before, and the seq 3240 * num and len of the new segment match that of the FIN, 3241 * put the FIN flag back in. 3242 */ 3243 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 3244 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 3245 flags |= TH_FIN; 3246 tcp->tcp_valid_bits &= ~TCP_OFO_FIN_VALID; 3247 } 3248 } 3249 if ((flags & (TH_RST | TH_SYN | TH_URG | TH_ACK)) != TH_ACK) { 3250 if (flags & TH_RST) { 3251 freemsg(mp); 3252 switch (tcp->tcp_state) { 3253 case TCPS_SYN_RCVD: 3254 (void) tcp_clean_death(tcp, ECONNREFUSED); 3255 break; 3256 case TCPS_ESTABLISHED: 3257 case TCPS_FIN_WAIT_1: 3258 case TCPS_FIN_WAIT_2: 3259 case TCPS_CLOSE_WAIT: 3260 (void) tcp_clean_death(tcp, ECONNRESET); 3261 break; 3262 case TCPS_CLOSING: 3263 case TCPS_LAST_ACK: 3264 (void) tcp_clean_death(tcp, 0); 3265 break; 3266 default: 3267 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 3268 (void) tcp_clean_death(tcp, ENXIO); 3269 break; 3270 } 3271 return; 3272 } 3273 if (flags & TH_SYN) { 3274 /* 3275 * See RFC 793, Page 71 3276 * 3277 * The seq number must be in the window as it should 3278 * be "fixed" above. If it is outside window, it should 3279 * be already rejected. Note that we allow seg_seq to be 3280 * rnxt + rwnd because we want to accept 0 window probe. 3281 */ 3282 ASSERT(SEQ_GEQ(seg_seq, tcp->tcp_rnxt) && 3283 SEQ_LEQ(seg_seq, tcp->tcp_rnxt + tcp->tcp_rwnd)); 3284 freemsg(mp); 3285 /* 3286 * If the ACK flag is not set, just use our snxt as the 3287 * seq number of the RST segment. 3288 */ 3289 if (!(flags & TH_ACK)) { 3290 seg_ack = tcp->tcp_snxt; 3291 } 3292 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 3293 TH_RST|TH_ACK); 3294 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 3295 (void) tcp_clean_death(tcp, ECONNRESET); 3296 return; 3297 } 3298 /* 3299 * urp could be -1 when the urp field in the packet is 0 3300 * and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent 3301 * byte was at seg_seq - 1, in which case we ignore the urgent flag. 3302 */ 3303 if (flags & TH_URG && urp >= 0) { 3304 if (!tcp->tcp_urp_last_valid || 3305 SEQ_GT(urp + seg_seq, tcp->tcp_urp_last)) { 3306 /* 3307 * Non-STREAMS sockets handle the urgent data a litte 3308 * differently from STREAMS based sockets. There is no 3309 * need to mark any mblks with the MSG{NOT,}MARKNEXT 3310 * flags to keep SIOCATMARK happy. Instead a 3311 * su_signal_oob upcall is made to update the mark. 3312 * Neither is a T_EXDATA_IND mblk needed to be 3313 * prepended to the urgent data. The urgent data is 3314 * delivered using the su_recv upcall, where we set 3315 * the MSG_OOB flag to indicate that it is urg data. 3316 * 3317 * Neither TH_SEND_URP_MARK nor TH_MARKNEXT_NEEDED 3318 * are used by non-STREAMS sockets. 3319 */ 3320 if (IPCL_IS_NONSTR(connp)) { 3321 if (!TCP_IS_DETACHED(tcp)) { 3322 (*connp->conn_upcalls->su_signal_oob) 3323 (connp->conn_upper_handle, urp); 3324 } 3325 } else { 3326 /* 3327 * If we haven't generated the signal yet for 3328 * this urgent pointer value, do it now. Also, 3329 * send up a zero-length M_DATA indicating 3330 * whether or not this is the mark. The latter 3331 * is not needed when a T_EXDATA_IND is sent up. 3332 * However, if there are allocation failures 3333 * this code relies on the sender retransmitting 3334 * and the socket code for determining the mark 3335 * should not block waiting for the peer to 3336 * transmit. Thus, for simplicity we always 3337 * send up the mark indication. 3338 */ 3339 mp1 = allocb(0, BPRI_MED); 3340 if (mp1 == NULL) { 3341 freemsg(mp); 3342 return; 3343 } 3344 if (!TCP_IS_DETACHED(tcp) && 3345 !putnextctl1(connp->conn_rq, M_PCSIG, 3346 SIGURG)) { 3347 /* Try again on the rexmit. */ 3348 freemsg(mp1); 3349 freemsg(mp); 3350 return; 3351 } 3352 /* 3353 * Mark with NOTMARKNEXT for now. 3354 * The code below will change this to MARKNEXT 3355 * if we are at the mark. 3356 * 3357 * If there are allocation failures (e.g. in 3358 * dupmsg below) the next time tcp_input_data 3359 * sees the urgent segment it will send up the 3360 * MSGMARKNEXT message. 3361 */ 3362 mp1->b_flag |= MSGNOTMARKNEXT; 3363 freemsg(tcp->tcp_urp_mark_mp); 3364 tcp->tcp_urp_mark_mp = mp1; 3365 flags |= TH_SEND_URP_MARK; 3366 #ifdef DEBUG 3367 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3368 "tcp_rput: sent M_PCSIG 2 seq %x urp %x " 3369 "last %x, %s", 3370 seg_seq, urp, tcp->tcp_urp_last, 3371 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 3372 #endif /* DEBUG */ 3373 } 3374 tcp->tcp_urp_last_valid = B_TRUE; 3375 tcp->tcp_urp_last = urp + seg_seq; 3376 } else if (tcp->tcp_urp_mark_mp != NULL) { 3377 /* 3378 * An allocation failure prevented the previous 3379 * tcp_input_data from sending up the allocated 3380 * MSG*MARKNEXT message - send it up this time 3381 * around. 3382 */ 3383 flags |= TH_SEND_URP_MARK; 3384 } 3385 3386 /* 3387 * If the urgent byte is in this segment, make sure that it is 3388 * all by itself. This makes it much easier to deal with the 3389 * possibility of an allocation failure on the T_exdata_ind. 3390 * Note that seg_len is the number of bytes in the segment, and 3391 * urp is the offset into the segment of the urgent byte. 3392 * urp < seg_len means that the urgent byte is in this segment. 3393 */ 3394 if (urp < seg_len) { 3395 if (seg_len != 1) { 3396 uint32_t tmp_rnxt; 3397 /* 3398 * Break it up and feed it back in. 3399 * Re-attach the IP header. 3400 */ 3401 mp->b_rptr = iphdr; 3402 if (urp > 0) { 3403 /* 3404 * There is stuff before the urgent 3405 * byte. 3406 */ 3407 mp1 = dupmsg(mp); 3408 if (!mp1) { 3409 /* 3410 * Trim from urgent byte on. 3411 * The rest will come back. 3412 */ 3413 (void) adjmsg(mp, 3414 urp - seg_len); 3415 tcp_input_data(connp, 3416 mp, NULL, ira); 3417 return; 3418 } 3419 (void) adjmsg(mp1, urp - seg_len); 3420 /* Feed this piece back in. */ 3421 tmp_rnxt = tcp->tcp_rnxt; 3422 tcp_input_data(connp, mp1, NULL, ira); 3423 /* 3424 * If the data passed back in was not 3425 * processed (ie: bad ACK) sending 3426 * the remainder back in will cause a 3427 * loop. In this case, drop the 3428 * packet and let the sender try 3429 * sending a good packet. 3430 */ 3431 if (tmp_rnxt == tcp->tcp_rnxt) { 3432 freemsg(mp); 3433 return; 3434 } 3435 } 3436 if (urp != seg_len - 1) { 3437 uint32_t tmp_rnxt; 3438 /* 3439 * There is stuff after the urgent 3440 * byte. 3441 */ 3442 mp1 = dupmsg(mp); 3443 if (!mp1) { 3444 /* 3445 * Trim everything beyond the 3446 * urgent byte. The rest will 3447 * come back. 3448 */ 3449 (void) adjmsg(mp, 3450 urp + 1 - seg_len); 3451 tcp_input_data(connp, 3452 mp, NULL, ira); 3453 return; 3454 } 3455 (void) adjmsg(mp1, urp + 1 - seg_len); 3456 tmp_rnxt = tcp->tcp_rnxt; 3457 tcp_input_data(connp, mp1, NULL, ira); 3458 /* 3459 * If the data passed back in was not 3460 * processed (ie: bad ACK) sending 3461 * the remainder back in will cause a 3462 * loop. In this case, drop the 3463 * packet and let the sender try 3464 * sending a good packet. 3465 */ 3466 if (tmp_rnxt == tcp->tcp_rnxt) { 3467 freemsg(mp); 3468 return; 3469 } 3470 } 3471 tcp_input_data(connp, mp, NULL, ira); 3472 return; 3473 } 3474 /* 3475 * This segment contains only the urgent byte. We 3476 * have to allocate the T_exdata_ind, if we can. 3477 */ 3478 if (IPCL_IS_NONSTR(connp)) { 3479 int error; 3480 3481 (*connp->conn_upcalls->su_recv) 3482 (connp->conn_upper_handle, mp, seg_len, 3483 MSG_OOB, &error, NULL); 3484 /* 3485 * We should never be in middle of a 3486 * fallback, the squeue guarantees that. 3487 */ 3488 ASSERT(error != EOPNOTSUPP); 3489 mp = NULL; 3490 goto update_ack; 3491 } else if (!tcp->tcp_urp_mp) { 3492 struct T_exdata_ind *tei; 3493 mp1 = allocb(sizeof (struct T_exdata_ind), 3494 BPRI_MED); 3495 if (!mp1) { 3496 /* 3497 * Sigh... It'll be back. 3498 * Generate any MSG*MARK message now. 3499 */ 3500 freemsg(mp); 3501 seg_len = 0; 3502 if (flags & TH_SEND_URP_MARK) { 3503 3504 3505 ASSERT(tcp->tcp_urp_mark_mp); 3506 tcp->tcp_urp_mark_mp->b_flag &= 3507 ~MSGNOTMARKNEXT; 3508 tcp->tcp_urp_mark_mp->b_flag |= 3509 MSGMARKNEXT; 3510 } 3511 goto ack_check; 3512 } 3513 mp1->b_datap->db_type = M_PROTO; 3514 tei = (struct T_exdata_ind *)mp1->b_rptr; 3515 tei->PRIM_type = T_EXDATA_IND; 3516 tei->MORE_flag = 0; 3517 mp1->b_wptr = (uchar_t *)&tei[1]; 3518 tcp->tcp_urp_mp = mp1; 3519 #ifdef DEBUG 3520 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3521 "tcp_rput: allocated exdata_ind %s", 3522 tcp_display(tcp, NULL, 3523 DISP_PORT_ONLY)); 3524 #endif /* DEBUG */ 3525 /* 3526 * There is no need to send a separate MSG*MARK 3527 * message since the T_EXDATA_IND will be sent 3528 * now. 3529 */ 3530 flags &= ~TH_SEND_URP_MARK; 3531 freemsg(tcp->tcp_urp_mark_mp); 3532 tcp->tcp_urp_mark_mp = NULL; 3533 } 3534 /* 3535 * Now we are all set. On the next putnext upstream, 3536 * tcp_urp_mp will be non-NULL and will get prepended 3537 * to what has to be this piece containing the urgent 3538 * byte. If for any reason we abort this segment below, 3539 * if it comes back, we will have this ready, or it 3540 * will get blown off in close. 3541 */ 3542 } else if (urp == seg_len) { 3543 /* 3544 * The urgent byte is the next byte after this sequence 3545 * number. If this endpoint is non-STREAMS, then there 3546 * is nothing to do here since the socket has already 3547 * been notified about the urg pointer by the 3548 * su_signal_oob call above. 3549 * 3550 * In case of STREAMS, some more work might be needed. 3551 * If there is data it is marked with MSGMARKNEXT and 3552 * and any tcp_urp_mark_mp is discarded since it is not 3553 * needed. Otherwise, if the code above just allocated 3554 * a zero-length tcp_urp_mark_mp message, that message 3555 * is tagged with MSGMARKNEXT. Sending up these 3556 * MSGMARKNEXT messages makes SIOCATMARK work correctly 3557 * even though the T_EXDATA_IND will not be sent up 3558 * until the urgent byte arrives. 3559 */ 3560 if (!IPCL_IS_NONSTR(tcp->tcp_connp)) { 3561 if (seg_len != 0) { 3562 flags |= TH_MARKNEXT_NEEDED; 3563 freemsg(tcp->tcp_urp_mark_mp); 3564 tcp->tcp_urp_mark_mp = NULL; 3565 flags &= ~TH_SEND_URP_MARK; 3566 } else if (tcp->tcp_urp_mark_mp != NULL) { 3567 flags |= TH_SEND_URP_MARK; 3568 tcp->tcp_urp_mark_mp->b_flag &= 3569 ~MSGNOTMARKNEXT; 3570 tcp->tcp_urp_mark_mp->b_flag |= 3571 MSGMARKNEXT; 3572 } 3573 } 3574 #ifdef DEBUG 3575 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3576 "tcp_rput: AT MARK, len %d, flags 0x%x, %s", 3577 seg_len, flags, 3578 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 3579 #endif /* DEBUG */ 3580 } 3581 #ifdef DEBUG 3582 else { 3583 /* Data left until we hit mark */ 3584 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3585 "tcp_rput: URP %d bytes left, %s", 3586 urp - seg_len, tcp_display(tcp, NULL, 3587 DISP_PORT_ONLY)); 3588 } 3589 #endif /* DEBUG */ 3590 } 3591 3592 process_ack: 3593 if (!(flags & TH_ACK)) { 3594 freemsg(mp); 3595 goto xmit_check; 3596 } 3597 } 3598 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 3599 3600 if (bytes_acked > 0) 3601 tcp->tcp_ip_forward_progress = B_TRUE; 3602 if (tcp->tcp_state == TCPS_SYN_RCVD) { 3603 if ((tcp->tcp_conn.tcp_eager_conn_ind != NULL) && 3604 ((tcp->tcp_kssl_ent == NULL) || !tcp->tcp_kssl_pending)) { 3605 /* 3-way handshake complete - pass up the T_CONN_IND */ 3606 tcp_t *listener = tcp->tcp_listener; 3607 mblk_t *mp = tcp->tcp_conn.tcp_eager_conn_ind; 3608 3609 tcp->tcp_tconnind_started = B_TRUE; 3610 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 3611 /* 3612 * We are here means eager is fine but it can 3613 * get a TH_RST at any point between now and till 3614 * accept completes and disappear. We need to 3615 * ensure that reference to eager is valid after 3616 * we get out of eager's perimeter. So we do 3617 * an extra refhold. 3618 */ 3619 CONN_INC_REF(connp); 3620 3621 /* 3622 * The listener also exists because of the refhold 3623 * done in tcp_input_listener. Its possible that it 3624 * might have closed. We will check that once we 3625 * get inside listeners context. 3626 */ 3627 CONN_INC_REF(listener->tcp_connp); 3628 if (listener->tcp_connp->conn_sqp == 3629 connp->conn_sqp) { 3630 /* 3631 * We optimize by not calling an SQUEUE_ENTER 3632 * on the listener since we know that the 3633 * listener and eager squeues are the same. 3634 * We are able to make this check safely only 3635 * because neither the eager nor the listener 3636 * can change its squeue. Only an active connect 3637 * can change its squeue 3638 */ 3639 tcp_send_conn_ind(listener->tcp_connp, mp, 3640 listener->tcp_connp->conn_sqp); 3641 CONN_DEC_REF(listener->tcp_connp); 3642 } else if (!tcp->tcp_loopback) { 3643 SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp, 3644 mp, tcp_send_conn_ind, 3645 listener->tcp_connp, NULL, SQ_FILL, 3646 SQTAG_TCP_CONN_IND); 3647 } else { 3648 SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp, 3649 mp, tcp_send_conn_ind, 3650 listener->tcp_connp, NULL, SQ_NODRAIN, 3651 SQTAG_TCP_CONN_IND); 3652 } 3653 } 3654 3655 /* 3656 * We are seeing the final ack in the three way 3657 * hand shake of a active open'ed connection 3658 * so we must send up a T_CONN_CON 3659 * 3660 * tcp_sendmsg() checks tcp_state without entering 3661 * the squeue so tcp_state should be updated before 3662 * sending up connection confirmation. Probe the state 3663 * change below when we are sure sending of the confirmation 3664 * has succeeded. 3665 */ 3666 tcp->tcp_state = TCPS_ESTABLISHED; 3667 3668 if (tcp->tcp_active_open) { 3669 if (!tcp_conn_con(tcp, iphdr, mp, NULL, ira)) { 3670 freemsg(mp); 3671 tcp->tcp_state = TCPS_SYN_RCVD; 3672 return; 3673 } 3674 /* 3675 * Don't fuse the loopback endpoints for 3676 * simultaneous active opens. 3677 */ 3678 if (tcp->tcp_loopback) { 3679 TCP_STAT(tcps, tcp_fusion_unfusable); 3680 tcp->tcp_unfusable = B_TRUE; 3681 } 3682 /* 3683 * For simultaneous active open, trace receipt of final 3684 * ACK as tcp:::connect-established. 3685 */ 3686 DTRACE_TCP5(connect__established, mblk_t *, NULL, 3687 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3688 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3689 } else { 3690 /* 3691 * For passive open, trace receipt of final ACK as 3692 * tcp:::accept-established. 3693 */ 3694 DTRACE_TCP5(accept__established, mlbk_t *, NULL, 3695 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3696 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3697 } 3698 TCPS_CONN_INC(tcps); 3699 3700 tcp->tcp_suna = tcp->tcp_iss + 1; /* One for the SYN */ 3701 bytes_acked--; 3702 /* SYN was acked - making progress */ 3703 tcp->tcp_ip_forward_progress = B_TRUE; 3704 3705 /* 3706 * If SYN was retransmitted, need to reset all 3707 * retransmission info as this segment will be 3708 * treated as a dup ACK. 3709 */ 3710 if (tcp->tcp_rexmit) { 3711 tcp->tcp_rexmit = B_FALSE; 3712 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 3713 tcp->tcp_rexmit_max = tcp->tcp_snxt; 3714 tcp->tcp_snd_burst = tcp->tcp_localnet ? 3715 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 3716 tcp->tcp_ms_we_have_waited = 0; 3717 tcp->tcp_cwnd = mss; 3718 } 3719 3720 /* 3721 * We set the send window to zero here. 3722 * This is needed if there is data to be 3723 * processed already on the queue. 3724 * Later (at swnd_update label), the 3725 * "new_swnd > tcp_swnd" condition is satisfied 3726 * the XMIT_NEEDED flag is set in the current 3727 * (SYN_RCVD) state. This ensures tcp_wput_data() is 3728 * called if there is already data on queue in 3729 * this state. 3730 */ 3731 tcp->tcp_swnd = 0; 3732 3733 if (new_swnd > tcp->tcp_max_swnd) 3734 tcp->tcp_max_swnd = new_swnd; 3735 tcp->tcp_swl1 = seg_seq; 3736 tcp->tcp_swl2 = seg_ack; 3737 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 3738 3739 /* Trace change from SYN_RCVD -> ESTABLISHED here */ 3740 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 3741 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL, 3742 int32_t, TCPS_SYN_RCVD); 3743 3744 /* Fuse when both sides are in ESTABLISHED state */ 3745 if (tcp->tcp_loopback && do_tcp_fusion) 3746 tcp_fuse(tcp, iphdr, tcpha); 3747 3748 } 3749 /* This code follows 4.4BSD-Lite2 mostly. */ 3750 if (bytes_acked < 0) 3751 goto est; 3752 3753 /* 3754 * If TCP is ECN capable and the congestion experience bit is 3755 * set, reduce tcp_cwnd and tcp_ssthresh. But this should only be 3756 * done once per window (or more loosely, per RTT). 3757 */ 3758 if (tcp->tcp_cwr && SEQ_GT(seg_ack, tcp->tcp_cwr_snd_max)) 3759 tcp->tcp_cwr = B_FALSE; 3760 if (tcp->tcp_ecn_ok && (flags & TH_ECE)) { 3761 if (!tcp->tcp_cwr) { 3762 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / mss; 3763 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * mss; 3764 tcp->tcp_cwnd = npkt * mss; 3765 /* 3766 * If the cwnd is 0, use the timer to clock out 3767 * new segments. This is required by the ECN spec. 3768 */ 3769 if (npkt == 0) { 3770 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 3771 /* 3772 * This makes sure that when the ACK comes 3773 * back, we will increase tcp_cwnd by 1 MSS. 3774 */ 3775 tcp->tcp_cwnd_cnt = 0; 3776 } 3777 tcp->tcp_cwr = B_TRUE; 3778 /* 3779 * This marks the end of the current window of in 3780 * flight data. That is why we don't use 3781 * tcp_suna + tcp_swnd. Only data in flight can 3782 * provide ECN info. 3783 */ 3784 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 3785 tcp->tcp_ecn_cwr_sent = B_FALSE; 3786 } 3787 } 3788 3789 mp1 = tcp->tcp_xmit_head; 3790 if (bytes_acked == 0) { 3791 if (!ofo_seg && seg_len == 0 && new_swnd == tcp->tcp_swnd) { 3792 int dupack_cnt; 3793 3794 TCPS_BUMP_MIB(tcps, tcpInDupAck); 3795 /* 3796 * Fast retransmit. When we have seen exactly three 3797 * identical ACKs while we have unacked data 3798 * outstanding we take it as a hint that our peer 3799 * dropped something. 3800 * 3801 * If TCP is retransmitting, don't do fast retransmit. 3802 */ 3803 if (mp1 && tcp->tcp_suna != tcp->tcp_snxt && 3804 ! tcp->tcp_rexmit) { 3805 /* Do Limited Transmit */ 3806 if ((dupack_cnt = ++tcp->tcp_dupack_cnt) < 3807 tcps->tcps_dupack_fast_retransmit) { 3808 /* 3809 * RFC 3042 3810 * 3811 * What we need to do is temporarily 3812 * increase tcp_cwnd so that new 3813 * data can be sent if it is allowed 3814 * by the receive window (tcp_rwnd). 3815 * tcp_wput_data() will take care of 3816 * the rest. 3817 * 3818 * If the connection is SACK capable, 3819 * only do limited xmit when there 3820 * is SACK info. 3821 * 3822 * Note how tcp_cwnd is incremented. 3823 * The first dup ACK will increase 3824 * it by 1 MSS. The second dup ACK 3825 * will increase it by 2 MSS. This 3826 * means that only 1 new segment will 3827 * be sent for each dup ACK. 3828 */ 3829 if (tcp->tcp_unsent > 0 && 3830 (!tcp->tcp_snd_sack_ok || 3831 (tcp->tcp_snd_sack_ok && 3832 tcp->tcp_notsack_list != NULL))) { 3833 tcp->tcp_cwnd += mss << 3834 (tcp->tcp_dupack_cnt - 1); 3835 flags |= TH_LIMIT_XMIT; 3836 } 3837 } else if (dupack_cnt == 3838 tcps->tcps_dupack_fast_retransmit) { 3839 3840 /* 3841 * If we have reduced tcp_ssthresh 3842 * because of ECN, do not reduce it again 3843 * unless it is already one window of data 3844 * away. After one window of data, tcp_cwr 3845 * should then be cleared. Note that 3846 * for non ECN capable connection, tcp_cwr 3847 * should always be false. 3848 * 3849 * Adjust cwnd since the duplicate 3850 * ack indicates that a packet was 3851 * dropped (due to congestion.) 3852 */ 3853 if (!tcp->tcp_cwr) { 3854 npkt = ((tcp->tcp_snxt - 3855 tcp->tcp_suna) >> 1) / mss; 3856 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 3857 mss; 3858 tcp->tcp_cwnd = (npkt + 3859 tcp->tcp_dupack_cnt) * mss; 3860 } 3861 if (tcp->tcp_ecn_ok) { 3862 tcp->tcp_cwr = B_TRUE; 3863 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 3864 tcp->tcp_ecn_cwr_sent = B_FALSE; 3865 } 3866 3867 /* 3868 * We do Hoe's algorithm. Refer to her 3869 * paper "Improving the Start-up Behavior 3870 * of a Congestion Control Scheme for TCP," 3871 * appeared in SIGCOMM'96. 3872 * 3873 * Save highest seq no we have sent so far. 3874 * Be careful about the invisible FIN byte. 3875 */ 3876 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 3877 (tcp->tcp_unsent == 0)) { 3878 tcp->tcp_rexmit_max = tcp->tcp_fss; 3879 } else { 3880 tcp->tcp_rexmit_max = tcp->tcp_snxt; 3881 } 3882 3883 /* 3884 * Do not allow bursty traffic during. 3885 * fast recovery. Refer to Fall and Floyd's 3886 * paper "Simulation-based Comparisons of 3887 * Tahoe, Reno and SACK TCP" (in CCR?) 3888 * This is a best current practise. 3889 */ 3890 tcp->tcp_snd_burst = TCP_CWND_SS; 3891 3892 /* 3893 * For SACK: 3894 * Calculate tcp_pipe, which is the 3895 * estimated number of bytes in 3896 * network. 3897 * 3898 * tcp_fack is the highest sack'ed seq num 3899 * TCP has received. 3900 * 3901 * tcp_pipe is explained in the above quoted 3902 * Fall and Floyd's paper. tcp_fack is 3903 * explained in Mathis and Mahdavi's 3904 * "Forward Acknowledgment: Refining TCP 3905 * Congestion Control" in SIGCOMM '96. 3906 */ 3907 if (tcp->tcp_snd_sack_ok) { 3908 if (tcp->tcp_notsack_list != NULL) { 3909 tcp->tcp_pipe = tcp->tcp_snxt - 3910 tcp->tcp_fack; 3911 tcp->tcp_sack_snxt = seg_ack; 3912 flags |= TH_NEED_SACK_REXMIT; 3913 } else { 3914 /* 3915 * Always initialize tcp_pipe 3916 * even though we don't have 3917 * any SACK info. If later 3918 * we get SACK info and 3919 * tcp_pipe is not initialized, 3920 * funny things will happen. 3921 */ 3922 tcp->tcp_pipe = 3923 tcp->tcp_cwnd_ssthresh; 3924 } 3925 } else { 3926 flags |= TH_REXMIT_NEEDED; 3927 } /* tcp_snd_sack_ok */ 3928 3929 } else { 3930 /* 3931 * Here we perform congestion 3932 * avoidance, but NOT slow start. 3933 * This is known as the Fast 3934 * Recovery Algorithm. 3935 */ 3936 if (tcp->tcp_snd_sack_ok && 3937 tcp->tcp_notsack_list != NULL) { 3938 flags |= TH_NEED_SACK_REXMIT; 3939 tcp->tcp_pipe -= mss; 3940 if (tcp->tcp_pipe < 0) 3941 tcp->tcp_pipe = 0; 3942 } else { 3943 /* 3944 * We know that one more packet has 3945 * left the pipe thus we can update 3946 * cwnd. 3947 */ 3948 cwnd = tcp->tcp_cwnd + mss; 3949 if (cwnd > tcp->tcp_cwnd_max) 3950 cwnd = tcp->tcp_cwnd_max; 3951 tcp->tcp_cwnd = cwnd; 3952 if (tcp->tcp_unsent > 0) 3953 flags |= TH_XMIT_NEEDED; 3954 } 3955 } 3956 } 3957 } else if (tcp->tcp_zero_win_probe) { 3958 /* 3959 * If the window has opened, need to arrange 3960 * to send additional data. 3961 */ 3962 if (new_swnd != 0) { 3963 /* tcp_suna != tcp_snxt */ 3964 /* Packet contains a window update */ 3965 TCPS_BUMP_MIB(tcps, tcpInWinUpdate); 3966 tcp->tcp_zero_win_probe = 0; 3967 tcp->tcp_timer_backoff = 0; 3968 tcp->tcp_ms_we_have_waited = 0; 3969 3970 /* 3971 * Transmit starting with tcp_suna since 3972 * the one byte probe is not ack'ed. 3973 * If TCP has sent more than one identical 3974 * probe, tcp_rexmit will be set. That means 3975 * tcp_ss_rexmit() will send out the one 3976 * byte along with new data. Otherwise, 3977 * fake the retransmission. 3978 */ 3979 flags |= TH_XMIT_NEEDED; 3980 if (!tcp->tcp_rexmit) { 3981 tcp->tcp_rexmit = B_TRUE; 3982 tcp->tcp_dupack_cnt = 0; 3983 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 3984 tcp->tcp_rexmit_max = tcp->tcp_suna + 1; 3985 } 3986 } 3987 } 3988 goto swnd_update; 3989 } 3990 3991 /* 3992 * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73. 3993 * If the ACK value acks something that we have not yet sent, it might 3994 * be an old duplicate segment. Send an ACK to re-synchronize the 3995 * other side. 3996 * Note: reset in response to unacceptable ACK in SYN_RECEIVE 3997 * state is handled above, so we can always just drop the segment and 3998 * send an ACK here. 3999 * 4000 * In the case where the peer shrinks the window, we see the new window 4001 * update, but all the data sent previously is queued up by the peer. 4002 * To account for this, in tcp_process_shrunk_swnd(), the sequence 4003 * number, which was already sent, and within window, is recorded. 4004 * tcp_snxt is then updated. 4005 * 4006 * If the window has previously shrunk, and an ACK for data not yet 4007 * sent, according to tcp_snxt is recieved, it may still be valid. If 4008 * the ACK is for data within the window at the time the window was 4009 * shrunk, then the ACK is acceptable. In this case tcp_snxt is set to 4010 * the sequence number ACK'ed. 4011 * 4012 * If the ACK covers all the data sent at the time the window was 4013 * shrunk, we can now set tcp_is_wnd_shrnk to B_FALSE. 4014 * 4015 * Should we send ACKs in response to ACK only segments? 4016 */ 4017 4018 if (SEQ_GT(seg_ack, tcp->tcp_snxt)) { 4019 if ((tcp->tcp_is_wnd_shrnk) && 4020 (SEQ_LEQ(seg_ack, tcp->tcp_snxt_shrunk))) { 4021 uint32_t data_acked_ahead_snxt; 4022 4023 data_acked_ahead_snxt = seg_ack - tcp->tcp_snxt; 4024 tcp_update_xmit_tail(tcp, seg_ack); 4025 tcp->tcp_unsent -= data_acked_ahead_snxt; 4026 } else { 4027 TCPS_BUMP_MIB(tcps, tcpInAckUnsent); 4028 /* drop the received segment */ 4029 freemsg(mp); 4030 4031 /* 4032 * Send back an ACK. If tcp_drop_ack_unsent_cnt is 4033 * greater than 0, check if the number of such 4034 * bogus ACks is greater than that count. If yes, 4035 * don't send back any ACK. This prevents TCP from 4036 * getting into an ACK storm if somehow an attacker 4037 * successfully spoofs an acceptable segment to our 4038 * peer. If this continues (count > 2 X threshold), 4039 * we should abort this connection. 4040 */ 4041 if (tcp_drop_ack_unsent_cnt > 0 && 4042 ++tcp->tcp_in_ack_unsent > 4043 tcp_drop_ack_unsent_cnt) { 4044 TCP_STAT(tcps, tcp_in_ack_unsent_drop); 4045 if (tcp->tcp_in_ack_unsent > 2 * 4046 tcp_drop_ack_unsent_cnt) { 4047 (void) tcp_clean_death(tcp, EPROTO); 4048 } 4049 return; 4050 } 4051 mp = tcp_ack_mp(tcp); 4052 if (mp != NULL) { 4053 BUMP_LOCAL(tcp->tcp_obsegs); 4054 TCPS_BUMP_MIB(tcps, tcpOutAck); 4055 tcp_send_data(tcp, mp); 4056 } 4057 return; 4058 } 4059 } else if (tcp->tcp_is_wnd_shrnk && SEQ_GEQ(seg_ack, 4060 tcp->tcp_snxt_shrunk)) { 4061 tcp->tcp_is_wnd_shrnk = B_FALSE; 4062 } 4063 4064 /* 4065 * TCP gets a new ACK, update the notsack'ed list to delete those 4066 * blocks that are covered by this ACK. 4067 */ 4068 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 4069 tcp_notsack_remove(&(tcp->tcp_notsack_list), seg_ack, 4070 &(tcp->tcp_num_notsack_blk), &(tcp->tcp_cnt_notsack_list)); 4071 } 4072 4073 /* 4074 * If we got an ACK after fast retransmit, check to see 4075 * if it is a partial ACK. If it is not and the congestion 4076 * window was inflated to account for the other side's 4077 * cached packets, retract it. If it is, do Hoe's algorithm. 4078 */ 4079 if (tcp->tcp_dupack_cnt >= tcps->tcps_dupack_fast_retransmit) { 4080 ASSERT(tcp->tcp_rexmit == B_FALSE); 4081 if (SEQ_GEQ(seg_ack, tcp->tcp_rexmit_max)) { 4082 tcp->tcp_dupack_cnt = 0; 4083 /* 4084 * Restore the orig tcp_cwnd_ssthresh after 4085 * fast retransmit phase. 4086 */ 4087 if (tcp->tcp_cwnd > tcp->tcp_cwnd_ssthresh) { 4088 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh; 4089 } 4090 tcp->tcp_rexmit_max = seg_ack; 4091 tcp->tcp_cwnd_cnt = 0; 4092 tcp->tcp_snd_burst = tcp->tcp_localnet ? 4093 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 4094 4095 /* 4096 * Remove all notsack info to avoid confusion with 4097 * the next fast retrasnmit/recovery phase. 4098 */ 4099 if (tcp->tcp_snd_sack_ok) { 4100 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, 4101 tcp); 4102 } 4103 } else { 4104 if (tcp->tcp_snd_sack_ok && 4105 tcp->tcp_notsack_list != NULL) { 4106 flags |= TH_NEED_SACK_REXMIT; 4107 tcp->tcp_pipe -= mss; 4108 if (tcp->tcp_pipe < 0) 4109 tcp->tcp_pipe = 0; 4110 } else { 4111 /* 4112 * Hoe's algorithm: 4113 * 4114 * Retransmit the unack'ed segment and 4115 * restart fast recovery. Note that we 4116 * need to scale back tcp_cwnd to the 4117 * original value when we started fast 4118 * recovery. This is to prevent overly 4119 * aggressive behaviour in sending new 4120 * segments. 4121 */ 4122 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh + 4123 tcps->tcps_dupack_fast_retransmit * mss; 4124 tcp->tcp_cwnd_cnt = tcp->tcp_cwnd; 4125 flags |= TH_REXMIT_NEEDED; 4126 } 4127 } 4128 } else { 4129 tcp->tcp_dupack_cnt = 0; 4130 if (tcp->tcp_rexmit) { 4131 /* 4132 * TCP is retranmitting. If the ACK ack's all 4133 * outstanding data, update tcp_rexmit_max and 4134 * tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt 4135 * to the correct value. 4136 * 4137 * Note that SEQ_LEQ() is used. This is to avoid 4138 * unnecessary fast retransmit caused by dup ACKs 4139 * received when TCP does slow start retransmission 4140 * after a time out. During this phase, TCP may 4141 * send out segments which are already received. 4142 * This causes dup ACKs to be sent back. 4143 */ 4144 if (SEQ_LEQ(seg_ack, tcp->tcp_rexmit_max)) { 4145 if (SEQ_GT(seg_ack, tcp->tcp_rexmit_nxt)) { 4146 tcp->tcp_rexmit_nxt = seg_ack; 4147 } 4148 if (seg_ack != tcp->tcp_rexmit_max) { 4149 flags |= TH_XMIT_NEEDED; 4150 } 4151 } else { 4152 tcp->tcp_rexmit = B_FALSE; 4153 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 4154 tcp->tcp_snd_burst = tcp->tcp_localnet ? 4155 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 4156 } 4157 tcp->tcp_ms_we_have_waited = 0; 4158 } 4159 } 4160 4161 TCPS_BUMP_MIB(tcps, tcpInAckSegs); 4162 TCPS_UPDATE_MIB(tcps, tcpInAckBytes, bytes_acked); 4163 tcp->tcp_suna = seg_ack; 4164 if (tcp->tcp_zero_win_probe != 0) { 4165 tcp->tcp_zero_win_probe = 0; 4166 tcp->tcp_timer_backoff = 0; 4167 } 4168 4169 /* 4170 * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed. 4171 * Note that it cannot be the SYN being ack'ed. The code flow 4172 * will not reach here. 4173 */ 4174 if (mp1 == NULL) { 4175 goto fin_acked; 4176 } 4177 4178 /* 4179 * Update the congestion window. 4180 * 4181 * If TCP is not ECN capable or TCP is ECN capable but the 4182 * congestion experience bit is not set, increase the tcp_cwnd as 4183 * usual. 4184 */ 4185 if (!tcp->tcp_ecn_ok || !(flags & TH_ECE)) { 4186 cwnd = tcp->tcp_cwnd; 4187 add = mss; 4188 4189 if (cwnd >= tcp->tcp_cwnd_ssthresh) { 4190 /* 4191 * This is to prevent an increase of less than 1 MSS of 4192 * tcp_cwnd. With partial increase, tcp_wput_data() 4193 * may send out tinygrams in order to preserve mblk 4194 * boundaries. 4195 * 4196 * By initializing tcp_cwnd_cnt to new tcp_cwnd and 4197 * decrementing it by 1 MSS for every ACKs, tcp_cwnd is 4198 * increased by 1 MSS for every RTTs. 4199 */ 4200 if (tcp->tcp_cwnd_cnt <= 0) { 4201 tcp->tcp_cwnd_cnt = cwnd + add; 4202 } else { 4203 tcp->tcp_cwnd_cnt -= add; 4204 add = 0; 4205 } 4206 } 4207 tcp->tcp_cwnd = MIN(cwnd + add, tcp->tcp_cwnd_max); 4208 } 4209 4210 /* See if the latest urgent data has been acknowledged */ 4211 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && 4212 SEQ_GT(seg_ack, tcp->tcp_urg)) 4213 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 4214 4215 /* Can we update the RTT estimates? */ 4216 if (tcp->tcp_snd_ts_ok) { 4217 /* Ignore zero timestamp echo-reply. */ 4218 if (tcpopt.tcp_opt_ts_ecr != 0) { 4219 tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH - 4220 (int32_t)tcpopt.tcp_opt_ts_ecr); 4221 } 4222 4223 /* If needed, restart the timer. */ 4224 if (tcp->tcp_set_timer == 1) { 4225 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 4226 tcp->tcp_set_timer = 0; 4227 } 4228 /* 4229 * Update tcp_csuna in case the other side stops sending 4230 * us timestamps. 4231 */ 4232 tcp->tcp_csuna = tcp->tcp_snxt; 4233 } else if (SEQ_GT(seg_ack, tcp->tcp_csuna)) { 4234 /* 4235 * An ACK sequence we haven't seen before, so get the RTT 4236 * and update the RTO. But first check if the timestamp is 4237 * valid to use. 4238 */ 4239 if ((mp1->b_next != NULL) && 4240 SEQ_GT(seg_ack, (uint32_t)(uintptr_t)(mp1->b_next))) 4241 tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH - 4242 (int32_t)(intptr_t)mp1->b_prev); 4243 else 4244 TCPS_BUMP_MIB(tcps, tcpRttNoUpdate); 4245 4246 /* Remeber the last sequence to be ACKed */ 4247 tcp->tcp_csuna = seg_ack; 4248 if (tcp->tcp_set_timer == 1) { 4249 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 4250 tcp->tcp_set_timer = 0; 4251 } 4252 } else { 4253 TCPS_BUMP_MIB(tcps, tcpRttNoUpdate); 4254 } 4255 4256 /* Eat acknowledged bytes off the xmit queue. */ 4257 for (;;) { 4258 mblk_t *mp2; 4259 uchar_t *wptr; 4260 4261 wptr = mp1->b_wptr; 4262 ASSERT((uintptr_t)(wptr - mp1->b_rptr) <= (uintptr_t)INT_MAX); 4263 bytes_acked -= (int)(wptr - mp1->b_rptr); 4264 if (bytes_acked < 0) { 4265 mp1->b_rptr = wptr + bytes_acked; 4266 /* 4267 * Set a new timestamp if all the bytes timed by the 4268 * old timestamp have been ack'ed. 4269 */ 4270 if (SEQ_GT(seg_ack, 4271 (uint32_t)(uintptr_t)(mp1->b_next))) { 4272 mp1->b_prev = 4273 (mblk_t *)(uintptr_t)LBOLT_FASTPATH; 4274 mp1->b_next = NULL; 4275 } 4276 break; 4277 } 4278 mp1->b_next = NULL; 4279 mp1->b_prev = NULL; 4280 mp2 = mp1; 4281 mp1 = mp1->b_cont; 4282 4283 /* 4284 * This notification is required for some zero-copy 4285 * clients to maintain a copy semantic. After the data 4286 * is ack'ed, client is safe to modify or reuse the buffer. 4287 */ 4288 if (tcp->tcp_snd_zcopy_aware && 4289 (mp2->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 4290 tcp_zcopy_notify(tcp); 4291 freeb(mp2); 4292 if (bytes_acked == 0) { 4293 if (mp1 == NULL) { 4294 /* Everything is ack'ed, clear the tail. */ 4295 tcp->tcp_xmit_tail = NULL; 4296 /* 4297 * Cancel the timer unless we are still 4298 * waiting for an ACK for the FIN packet. 4299 */ 4300 if (tcp->tcp_timer_tid != 0 && 4301 tcp->tcp_snxt == tcp->tcp_suna) { 4302 (void) TCP_TIMER_CANCEL(tcp, 4303 tcp->tcp_timer_tid); 4304 tcp->tcp_timer_tid = 0; 4305 } 4306 goto pre_swnd_update; 4307 } 4308 if (mp2 != tcp->tcp_xmit_tail) 4309 break; 4310 tcp->tcp_xmit_tail = mp1; 4311 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 4312 (uintptr_t)INT_MAX); 4313 tcp->tcp_xmit_tail_unsent = (int)(mp1->b_wptr - 4314 mp1->b_rptr); 4315 break; 4316 } 4317 if (mp1 == NULL) { 4318 /* 4319 * More was acked but there is nothing more 4320 * outstanding. This means that the FIN was 4321 * just acked or that we're talking to a clown. 4322 */ 4323 fin_acked: 4324 ASSERT(tcp->tcp_fin_sent); 4325 tcp->tcp_xmit_tail = NULL; 4326 if (tcp->tcp_fin_sent) { 4327 /* FIN was acked - making progress */ 4328 if (!tcp->tcp_fin_acked) 4329 tcp->tcp_ip_forward_progress = B_TRUE; 4330 tcp->tcp_fin_acked = B_TRUE; 4331 if (tcp->tcp_linger_tid != 0 && 4332 TCP_TIMER_CANCEL(tcp, 4333 tcp->tcp_linger_tid) >= 0) { 4334 tcp_stop_lingering(tcp); 4335 freemsg(mp); 4336 mp = NULL; 4337 } 4338 } else { 4339 /* 4340 * We should never get here because 4341 * we have already checked that the 4342 * number of bytes ack'ed should be 4343 * smaller than or equal to what we 4344 * have sent so far (it is the 4345 * acceptability check of the ACK). 4346 * We can only get here if the send 4347 * queue is corrupted. 4348 * 4349 * Terminate the connection and 4350 * panic the system. It is better 4351 * for us to panic instead of 4352 * continuing to avoid other disaster. 4353 */ 4354 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 4355 tcp->tcp_rnxt, TH_RST|TH_ACK); 4356 panic("Memory corruption " 4357 "detected for connection %s.", 4358 tcp_display(tcp, NULL, 4359 DISP_ADDR_AND_PORT)); 4360 /*NOTREACHED*/ 4361 } 4362 goto pre_swnd_update; 4363 } 4364 ASSERT(mp2 != tcp->tcp_xmit_tail); 4365 } 4366 if (tcp->tcp_unsent) { 4367 flags |= TH_XMIT_NEEDED; 4368 } 4369 pre_swnd_update: 4370 tcp->tcp_xmit_head = mp1; 4371 swnd_update: 4372 /* 4373 * The following check is different from most other implementations. 4374 * For bi-directional transfer, when segments are dropped, the 4375 * "normal" check will not accept a window update in those 4376 * retransmitted segemnts. Failing to do that, TCP may send out 4377 * segments which are outside receiver's window. As TCP accepts 4378 * the ack in those retransmitted segments, if the window update in 4379 * the same segment is not accepted, TCP will incorrectly calculates 4380 * that it can send more segments. This can create a deadlock 4381 * with the receiver if its window becomes zero. 4382 */ 4383 if (SEQ_LT(tcp->tcp_swl2, seg_ack) || 4384 SEQ_LT(tcp->tcp_swl1, seg_seq) || 4385 (tcp->tcp_swl1 == seg_seq && new_swnd > tcp->tcp_swnd)) { 4386 /* 4387 * The criteria for update is: 4388 * 4389 * 1. the segment acknowledges some data. Or 4390 * 2. the segment is new, i.e. it has a higher seq num. Or 4391 * 3. the segment is not old and the advertised window is 4392 * larger than the previous advertised window. 4393 */ 4394 if (tcp->tcp_unsent && new_swnd > tcp->tcp_swnd) 4395 flags |= TH_XMIT_NEEDED; 4396 tcp->tcp_swnd = new_swnd; 4397 if (new_swnd > tcp->tcp_max_swnd) 4398 tcp->tcp_max_swnd = new_swnd; 4399 tcp->tcp_swl1 = seg_seq; 4400 tcp->tcp_swl2 = seg_ack; 4401 } 4402 est: 4403 if (tcp->tcp_state > TCPS_ESTABLISHED) { 4404 4405 switch (tcp->tcp_state) { 4406 case TCPS_FIN_WAIT_1: 4407 if (tcp->tcp_fin_acked) { 4408 tcp->tcp_state = TCPS_FIN_WAIT_2; 4409 DTRACE_TCP6(state__change, void, NULL, 4410 ip_xmit_attr_t *, connp->conn_ixa, 4411 void, NULL, tcp_t *, tcp, void, NULL, 4412 int32_t, TCPS_FIN_WAIT_1); 4413 /* 4414 * We implement the non-standard BSD/SunOS 4415 * FIN_WAIT_2 flushing algorithm. 4416 * If there is no user attached to this 4417 * TCP endpoint, then this TCP struct 4418 * could hang around forever in FIN_WAIT_2 4419 * state if the peer forgets to send us 4420 * a FIN. To prevent this, we wait only 4421 * 2*MSL (a convenient time value) for 4422 * the FIN to arrive. If it doesn't show up, 4423 * we flush the TCP endpoint. This algorithm, 4424 * though a violation of RFC-793, has worked 4425 * for over 10 years in BSD systems. 4426 * Note: SunOS 4.x waits 675 seconds before 4427 * flushing the FIN_WAIT_2 connection. 4428 */ 4429 TCP_TIMER_RESTART(tcp, 4430 tcps->tcps_fin_wait_2_flush_interval); 4431 } 4432 break; 4433 case TCPS_FIN_WAIT_2: 4434 break; /* Shutdown hook? */ 4435 case TCPS_LAST_ACK: 4436 freemsg(mp); 4437 if (tcp->tcp_fin_acked) { 4438 (void) tcp_clean_death(tcp, 0); 4439 return; 4440 } 4441 goto xmit_check; 4442 case TCPS_CLOSING: 4443 if (tcp->tcp_fin_acked) { 4444 SET_TIME_WAIT(tcps, tcp, connp); 4445 DTRACE_TCP6(state__change, void, NULL, 4446 ip_xmit_attr_t *, connp->conn_ixa, void, 4447 NULL, tcp_t *, tcp, void, NULL, int32_t, 4448 TCPS_CLOSING); 4449 } 4450 /*FALLTHRU*/ 4451 case TCPS_CLOSE_WAIT: 4452 freemsg(mp); 4453 goto xmit_check; 4454 default: 4455 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 4456 break; 4457 } 4458 } 4459 if (flags & TH_FIN) { 4460 /* Make sure we ack the fin */ 4461 flags |= TH_ACK_NEEDED; 4462 if (!tcp->tcp_fin_rcvd) { 4463 tcp->tcp_fin_rcvd = B_TRUE; 4464 tcp->tcp_rnxt++; 4465 tcpha = tcp->tcp_tcpha; 4466 tcpha->tha_ack = htonl(tcp->tcp_rnxt); 4467 4468 /* 4469 * Generate the ordrel_ind at the end unless we 4470 * are an eager guy. 4471 * In the eager case tcp_rsrv will do this when run 4472 * after tcp_accept is done. 4473 */ 4474 if (tcp->tcp_listener == NULL && 4475 !TCP_IS_DETACHED(tcp) && !tcp->tcp_hard_binding) 4476 flags |= TH_ORDREL_NEEDED; 4477 switch (tcp->tcp_state) { 4478 case TCPS_SYN_RCVD: 4479 tcp->tcp_state = TCPS_CLOSE_WAIT; 4480 DTRACE_TCP6(state__change, void, NULL, 4481 ip_xmit_attr_t *, connp->conn_ixa, 4482 void, NULL, tcp_t *, tcp, void, NULL, 4483 int32_t, TCPS_SYN_RCVD); 4484 /* Keepalive? */ 4485 break; 4486 case TCPS_ESTABLISHED: 4487 tcp->tcp_state = TCPS_CLOSE_WAIT; 4488 DTRACE_TCP6(state__change, void, NULL, 4489 ip_xmit_attr_t *, connp->conn_ixa, 4490 void, NULL, tcp_t *, tcp, void, NULL, 4491 int32_t, TCPS_ESTABLISHED); 4492 /* Keepalive? */ 4493 break; 4494 case TCPS_FIN_WAIT_1: 4495 if (!tcp->tcp_fin_acked) { 4496 tcp->tcp_state = TCPS_CLOSING; 4497 DTRACE_TCP6(state__change, void, NULL, 4498 ip_xmit_attr_t *, connp->conn_ixa, 4499 void, NULL, tcp_t *, tcp, void, 4500 NULL, int32_t, TCPS_FIN_WAIT_1); 4501 break; 4502 } 4503 /* FALLTHRU */ 4504 case TCPS_FIN_WAIT_2: 4505 SET_TIME_WAIT(tcps, tcp, connp); 4506 DTRACE_TCP6(state__change, void, NULL, 4507 ip_xmit_attr_t *, connp->conn_ixa, void, 4508 NULL, tcp_t *, tcp, void, NULL, int32_t, 4509 TCPS_FIN_WAIT_2); 4510 if (seg_len) { 4511 /* 4512 * implies data piggybacked on FIN. 4513 * break to handle data. 4514 */ 4515 break; 4516 } 4517 freemsg(mp); 4518 goto ack_check; 4519 } 4520 } 4521 } 4522 if (mp == NULL) 4523 goto xmit_check; 4524 if (seg_len == 0) { 4525 freemsg(mp); 4526 goto xmit_check; 4527 } 4528 if (mp->b_rptr == mp->b_wptr) { 4529 /* 4530 * The header has been consumed, so we remove the 4531 * zero-length mblk here. 4532 */ 4533 mp1 = mp; 4534 mp = mp->b_cont; 4535 freeb(mp1); 4536 } 4537 update_ack: 4538 tcpha = tcp->tcp_tcpha; 4539 tcp->tcp_rack_cnt++; 4540 { 4541 uint32_t cur_max; 4542 4543 cur_max = tcp->tcp_rack_cur_max; 4544 if (tcp->tcp_rack_cnt >= cur_max) { 4545 /* 4546 * We have more unacked data than we should - send 4547 * an ACK now. 4548 */ 4549 flags |= TH_ACK_NEEDED; 4550 cur_max++; 4551 if (cur_max > tcp->tcp_rack_abs_max) 4552 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 4553 else 4554 tcp->tcp_rack_cur_max = cur_max; 4555 } else if (TCP_IS_DETACHED(tcp)) { 4556 /* We don't have an ACK timer for detached TCP. */ 4557 flags |= TH_ACK_NEEDED; 4558 } else if (seg_len < mss) { 4559 /* 4560 * If we get a segment that is less than an mss, and we 4561 * already have unacknowledged data, and the amount 4562 * unacknowledged is not a multiple of mss, then we 4563 * better generate an ACK now. Otherwise, this may be 4564 * the tail piece of a transaction, and we would rather 4565 * wait for the response. 4566 */ 4567 uint32_t udif; 4568 ASSERT((uintptr_t)(tcp->tcp_rnxt - tcp->tcp_rack) <= 4569 (uintptr_t)INT_MAX); 4570 udif = (int)(tcp->tcp_rnxt - tcp->tcp_rack); 4571 if (udif && (udif % mss)) 4572 flags |= TH_ACK_NEEDED; 4573 else 4574 flags |= TH_ACK_TIMER_NEEDED; 4575 } else { 4576 /* Start delayed ack timer */ 4577 flags |= TH_ACK_TIMER_NEEDED; 4578 } 4579 } 4580 tcp->tcp_rnxt += seg_len; 4581 tcpha->tha_ack = htonl(tcp->tcp_rnxt); 4582 4583 if (mp == NULL) 4584 goto xmit_check; 4585 4586 /* Update SACK list */ 4587 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 4588 tcp_sack_remove(tcp->tcp_sack_list, tcp->tcp_rnxt, 4589 &(tcp->tcp_num_sack_blk)); 4590 } 4591 4592 if (tcp->tcp_urp_mp) { 4593 tcp->tcp_urp_mp->b_cont = mp; 4594 mp = tcp->tcp_urp_mp; 4595 tcp->tcp_urp_mp = NULL; 4596 /* Ready for a new signal. */ 4597 tcp->tcp_urp_last_valid = B_FALSE; 4598 #ifdef DEBUG 4599 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4600 "tcp_rput: sending exdata_ind %s", 4601 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4602 #endif /* DEBUG */ 4603 } 4604 4605 /* 4606 * Check for ancillary data changes compared to last segment. 4607 */ 4608 if (connp->conn_recv_ancillary.crb_all != 0) { 4609 mp = tcp_input_add_ancillary(tcp, mp, &ipp, ira); 4610 if (mp == NULL) 4611 return; 4612 } 4613 4614 if (tcp->tcp_listener != NULL || tcp->tcp_hard_binding) { 4615 /* 4616 * Side queue inbound data until the accept happens. 4617 * tcp_accept/tcp_rput drains this when the accept happens. 4618 * M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or 4619 * T_EXDATA_IND) it is queued on b_next. 4620 * XXX Make urgent data use this. Requires: 4621 * Removing tcp_listener check for TH_URG 4622 * Making M_PCPROTO and MARK messages skip the eager case 4623 */ 4624 4625 if (tcp->tcp_kssl_pending) { 4626 DTRACE_PROBE1(kssl_mblk__ksslinput_pending, 4627 mblk_t *, mp); 4628 tcp_kssl_input(tcp, mp, ira->ira_cred); 4629 } else { 4630 tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred); 4631 } 4632 } else if (IPCL_IS_NONSTR(connp)) { 4633 /* 4634 * Non-STREAMS socket 4635 * 4636 * Note that no KSSL processing is done here, because 4637 * KSSL is not supported for non-STREAMS sockets. 4638 */ 4639 boolean_t push = flags & (TH_PUSH|TH_FIN); 4640 int error; 4641 4642 if ((*connp->conn_upcalls->su_recv)( 4643 connp->conn_upper_handle, 4644 mp, seg_len, 0, &error, &push) <= 0) { 4645 /* 4646 * We should never be in middle of a 4647 * fallback, the squeue guarantees that. 4648 */ 4649 ASSERT(error != EOPNOTSUPP); 4650 if (error == ENOSPC) 4651 tcp->tcp_rwnd -= seg_len; 4652 } else if (push) { 4653 /* PUSH bit set and sockfs is not flow controlled */ 4654 flags |= tcp_rwnd_reopen(tcp); 4655 } 4656 } else { 4657 /* STREAMS socket */ 4658 if (mp->b_datap->db_type != M_DATA || 4659 (flags & TH_MARKNEXT_NEEDED)) { 4660 if (tcp->tcp_rcv_list != NULL) { 4661 flags |= tcp_rcv_drain(tcp); 4662 } 4663 ASSERT(tcp->tcp_rcv_list == NULL || 4664 tcp->tcp_fused_sigurg); 4665 4666 if (flags & TH_MARKNEXT_NEEDED) { 4667 #ifdef DEBUG 4668 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4669 "tcp_rput: sending MSGMARKNEXT %s", 4670 tcp_display(tcp, NULL, 4671 DISP_PORT_ONLY)); 4672 #endif /* DEBUG */ 4673 mp->b_flag |= MSGMARKNEXT; 4674 flags &= ~TH_MARKNEXT_NEEDED; 4675 } 4676 4677 /* Does this need SSL processing first? */ 4678 if ((tcp->tcp_kssl_ctx != NULL) && 4679 (DB_TYPE(mp) == M_DATA)) { 4680 DTRACE_PROBE1(kssl_mblk__ksslinput_data1, 4681 mblk_t *, mp); 4682 tcp_kssl_input(tcp, mp, ira->ira_cred); 4683 } else { 4684 if (is_system_labeled()) 4685 tcp_setcred_data(mp, ira); 4686 4687 putnext(connp->conn_rq, mp); 4688 if (!canputnext(connp->conn_rq)) 4689 tcp->tcp_rwnd -= seg_len; 4690 } 4691 } else if ((tcp->tcp_kssl_ctx != NULL) && 4692 (DB_TYPE(mp) == M_DATA)) { 4693 /* Does this need SSL processing first? */ 4694 DTRACE_PROBE1(kssl_mblk__ksslinput_data2, mblk_t *, mp); 4695 tcp_kssl_input(tcp, mp, ira->ira_cred); 4696 } else if ((flags & (TH_PUSH|TH_FIN)) || 4697 tcp->tcp_rcv_cnt + seg_len >= connp->conn_rcvbuf >> 3) { 4698 if (tcp->tcp_rcv_list != NULL) { 4699 /* 4700 * Enqueue the new segment first and then 4701 * call tcp_rcv_drain() to send all data 4702 * up. The other way to do this is to 4703 * send all queued data up and then call 4704 * putnext() to send the new segment up. 4705 * This way can remove the else part later 4706 * on. 4707 * 4708 * We don't do this to avoid one more call to 4709 * canputnext() as tcp_rcv_drain() needs to 4710 * call canputnext(). 4711 */ 4712 tcp_rcv_enqueue(tcp, mp, seg_len, 4713 ira->ira_cred); 4714 flags |= tcp_rcv_drain(tcp); 4715 } else { 4716 if (is_system_labeled()) 4717 tcp_setcred_data(mp, ira); 4718 4719 putnext(connp->conn_rq, mp); 4720 if (!canputnext(connp->conn_rq)) 4721 tcp->tcp_rwnd -= seg_len; 4722 } 4723 } else { 4724 /* 4725 * Enqueue all packets when processing an mblk 4726 * from the co queue and also enqueue normal packets. 4727 */ 4728 tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred); 4729 } 4730 /* 4731 * Make sure the timer is running if we have data waiting 4732 * for a push bit. This provides resiliency against 4733 * implementations that do not correctly generate push bits. 4734 */ 4735 if (tcp->tcp_rcv_list != NULL && tcp->tcp_push_tid == 0) { 4736 /* 4737 * The connection may be closed at this point, so don't 4738 * do anything for a detached tcp. 4739 */ 4740 if (!TCP_IS_DETACHED(tcp)) 4741 tcp->tcp_push_tid = TCP_TIMER(tcp, 4742 tcp_push_timer, 4743 tcps->tcps_push_timer_interval); 4744 } 4745 } 4746 4747 xmit_check: 4748 /* Is there anything left to do? */ 4749 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 4750 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_ACK_NEEDED| 4751 TH_NEED_SACK_REXMIT|TH_LIMIT_XMIT|TH_ACK_TIMER_NEEDED| 4752 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 4753 goto done; 4754 4755 /* Any transmit work to do and a non-zero window? */ 4756 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_NEED_SACK_REXMIT| 4757 TH_LIMIT_XMIT)) && tcp->tcp_swnd != 0) { 4758 if (flags & TH_REXMIT_NEEDED) { 4759 uint32_t snd_size = tcp->tcp_snxt - tcp->tcp_suna; 4760 4761 TCPS_BUMP_MIB(tcps, tcpOutFastRetrans); 4762 if (snd_size > mss) 4763 snd_size = mss; 4764 if (snd_size > tcp->tcp_swnd) 4765 snd_size = tcp->tcp_swnd; 4766 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, snd_size, 4767 NULL, NULL, tcp->tcp_suna, B_TRUE, &snd_size, 4768 B_TRUE); 4769 4770 if (mp1 != NULL) { 4771 tcp->tcp_xmit_head->b_prev = 4772 (mblk_t *)LBOLT_FASTPATH; 4773 tcp->tcp_csuna = tcp->tcp_snxt; 4774 TCPS_BUMP_MIB(tcps, tcpRetransSegs); 4775 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, 4776 snd_size); 4777 tcp_send_data(tcp, mp1); 4778 } 4779 } 4780 if (flags & TH_NEED_SACK_REXMIT) { 4781 tcp_sack_rexmit(tcp, &flags); 4782 } 4783 /* 4784 * For TH_LIMIT_XMIT, tcp_wput_data() is called to send 4785 * out new segment. Note that tcp_rexmit should not be 4786 * set, otherwise TH_LIMIT_XMIT should not be set. 4787 */ 4788 if (flags & (TH_XMIT_NEEDED|TH_LIMIT_XMIT)) { 4789 if (!tcp->tcp_rexmit) { 4790 tcp_wput_data(tcp, NULL, B_FALSE); 4791 } else { 4792 tcp_ss_rexmit(tcp); 4793 } 4794 } 4795 /* 4796 * Adjust tcp_cwnd back to normal value after sending 4797 * new data segments. 4798 */ 4799 if (flags & TH_LIMIT_XMIT) { 4800 tcp->tcp_cwnd -= mss << (tcp->tcp_dupack_cnt - 1); 4801 /* 4802 * This will restart the timer. Restarting the 4803 * timer is used to avoid a timeout before the 4804 * limited transmitted segment's ACK gets back. 4805 */ 4806 if (tcp->tcp_xmit_head != NULL) 4807 tcp->tcp_xmit_head->b_prev = 4808 (mblk_t *)LBOLT_FASTPATH; 4809 } 4810 4811 /* Anything more to do? */ 4812 if ((flags & (TH_ACK_NEEDED|TH_ACK_TIMER_NEEDED| 4813 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 4814 goto done; 4815 } 4816 ack_check: 4817 if (flags & TH_SEND_URP_MARK) { 4818 ASSERT(tcp->tcp_urp_mark_mp); 4819 ASSERT(!IPCL_IS_NONSTR(connp)); 4820 /* 4821 * Send up any queued data and then send the mark message 4822 */ 4823 if (tcp->tcp_rcv_list != NULL) { 4824 flags |= tcp_rcv_drain(tcp); 4825 4826 } 4827 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 4828 mp1 = tcp->tcp_urp_mark_mp; 4829 tcp->tcp_urp_mark_mp = NULL; 4830 if (is_system_labeled()) 4831 tcp_setcred_data(mp1, ira); 4832 4833 putnext(connp->conn_rq, mp1); 4834 #ifdef DEBUG 4835 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4836 "tcp_rput: sending zero-length %s %s", 4837 ((mp1->b_flag & MSGMARKNEXT) ? "MSGMARKNEXT" : 4838 "MSGNOTMARKNEXT"), 4839 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4840 #endif /* DEBUG */ 4841 flags &= ~TH_SEND_URP_MARK; 4842 } 4843 if (flags & TH_ACK_NEEDED) { 4844 /* 4845 * Time to send an ack for some reason. 4846 */ 4847 mp1 = tcp_ack_mp(tcp); 4848 4849 if (mp1 != NULL) { 4850 tcp_send_data(tcp, mp1); 4851 BUMP_LOCAL(tcp->tcp_obsegs); 4852 TCPS_BUMP_MIB(tcps, tcpOutAck); 4853 } 4854 if (tcp->tcp_ack_tid != 0) { 4855 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 4856 tcp->tcp_ack_tid = 0; 4857 } 4858 } 4859 if (flags & TH_ACK_TIMER_NEEDED) { 4860 /* 4861 * Arrange for deferred ACK or push wait timeout. 4862 * Start timer if it is not already running. 4863 */ 4864 if (tcp->tcp_ack_tid == 0) { 4865 tcp->tcp_ack_tid = TCP_TIMER(tcp, tcp_ack_timer, 4866 tcp->tcp_localnet ? 4867 tcps->tcps_local_dack_interval : 4868 tcps->tcps_deferred_ack_interval); 4869 } 4870 } 4871 if (flags & TH_ORDREL_NEEDED) { 4872 /* 4873 * Send up the ordrel_ind unless we are an eager guy. 4874 * In the eager case tcp_rsrv will do this when run 4875 * after tcp_accept is done. 4876 */ 4877 ASSERT(tcp->tcp_listener == NULL); 4878 ASSERT(!tcp->tcp_detached); 4879 4880 if (IPCL_IS_NONSTR(connp)) { 4881 ASSERT(tcp->tcp_ordrel_mp == NULL); 4882 tcp->tcp_ordrel_done = B_TRUE; 4883 (*connp->conn_upcalls->su_opctl) 4884 (connp->conn_upper_handle, SOCK_OPCTL_SHUT_RECV, 0); 4885 goto done; 4886 } 4887 4888 if (tcp->tcp_rcv_list != NULL) { 4889 /* 4890 * Push any mblk(s) enqueued from co processing. 4891 */ 4892 flags |= tcp_rcv_drain(tcp); 4893 } 4894 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 4895 4896 mp1 = tcp->tcp_ordrel_mp; 4897 tcp->tcp_ordrel_mp = NULL; 4898 tcp->tcp_ordrel_done = B_TRUE; 4899 putnext(connp->conn_rq, mp1); 4900 } 4901 done: 4902 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 4903 } 4904 4905 /* 4906 * Attach ancillary data to a received TCP segments for the 4907 * ancillary pieces requested by the application that are 4908 * different than they were in the previous data segment. 4909 * 4910 * Save the "current" values once memory allocation is ok so that 4911 * when memory allocation fails we can just wait for the next data segment. 4912 */ 4913 static mblk_t * 4914 tcp_input_add_ancillary(tcp_t *tcp, mblk_t *mp, ip_pkt_t *ipp, 4915 ip_recv_attr_t *ira) 4916 { 4917 struct T_optdata_ind *todi; 4918 int optlen; 4919 uchar_t *optptr; 4920 struct T_opthdr *toh; 4921 crb_t addflag; /* Which pieces to add */ 4922 mblk_t *mp1; 4923 conn_t *connp = tcp->tcp_connp; 4924 4925 optlen = 0; 4926 addflag.crb_all = 0; 4927 /* If app asked for pktinfo and the index has changed ... */ 4928 if (connp->conn_recv_ancillary.crb_ip_recvpktinfo && 4929 ira->ira_ruifindex != tcp->tcp_recvifindex) { 4930 optlen += sizeof (struct T_opthdr) + 4931 sizeof (struct in6_pktinfo); 4932 addflag.crb_ip_recvpktinfo = 1; 4933 } 4934 /* If app asked for hoplimit and it has changed ... */ 4935 if (connp->conn_recv_ancillary.crb_ipv6_recvhoplimit && 4936 ipp->ipp_hoplimit != tcp->tcp_recvhops) { 4937 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 4938 addflag.crb_ipv6_recvhoplimit = 1; 4939 } 4940 /* If app asked for tclass and it has changed ... */ 4941 if (connp->conn_recv_ancillary.crb_ipv6_recvtclass && 4942 ipp->ipp_tclass != tcp->tcp_recvtclass) { 4943 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 4944 addflag.crb_ipv6_recvtclass = 1; 4945 } 4946 /* 4947 * If app asked for hopbyhop headers and it has changed ... 4948 * For security labels, note that (1) security labels can't change on 4949 * a connected socket at all, (2) we're connected to at most one peer, 4950 * (3) if anything changes, then it must be some other extra option. 4951 */ 4952 if (connp->conn_recv_ancillary.crb_ipv6_recvhopopts && 4953 ip_cmpbuf(tcp->tcp_hopopts, tcp->tcp_hopoptslen, 4954 (ipp->ipp_fields & IPPF_HOPOPTS), 4955 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) { 4956 optlen += sizeof (struct T_opthdr) + ipp->ipp_hopoptslen; 4957 addflag.crb_ipv6_recvhopopts = 1; 4958 if (!ip_allocbuf((void **)&tcp->tcp_hopopts, 4959 &tcp->tcp_hopoptslen, (ipp->ipp_fields & IPPF_HOPOPTS), 4960 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) 4961 return (mp); 4962 } 4963 /* If app asked for dst headers before routing headers ... */ 4964 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdrdstopts && 4965 ip_cmpbuf(tcp->tcp_rthdrdstopts, tcp->tcp_rthdrdstoptslen, 4966 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 4967 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) { 4968 optlen += sizeof (struct T_opthdr) + 4969 ipp->ipp_rthdrdstoptslen; 4970 addflag.crb_ipv6_recvrthdrdstopts = 1; 4971 if (!ip_allocbuf((void **)&tcp->tcp_rthdrdstopts, 4972 &tcp->tcp_rthdrdstoptslen, 4973 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 4974 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) 4975 return (mp); 4976 } 4977 /* If app asked for routing headers and it has changed ... */ 4978 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdr && 4979 ip_cmpbuf(tcp->tcp_rthdr, tcp->tcp_rthdrlen, 4980 (ipp->ipp_fields & IPPF_RTHDR), 4981 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) { 4982 optlen += sizeof (struct T_opthdr) + ipp->ipp_rthdrlen; 4983 addflag.crb_ipv6_recvrthdr = 1; 4984 if (!ip_allocbuf((void **)&tcp->tcp_rthdr, 4985 &tcp->tcp_rthdrlen, (ipp->ipp_fields & IPPF_RTHDR), 4986 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) 4987 return (mp); 4988 } 4989 /* If app asked for dest headers and it has changed ... */ 4990 if ((connp->conn_recv_ancillary.crb_ipv6_recvdstopts || 4991 connp->conn_recv_ancillary.crb_old_ipv6_recvdstopts) && 4992 ip_cmpbuf(tcp->tcp_dstopts, tcp->tcp_dstoptslen, 4993 (ipp->ipp_fields & IPPF_DSTOPTS), 4994 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) { 4995 optlen += sizeof (struct T_opthdr) + ipp->ipp_dstoptslen; 4996 addflag.crb_ipv6_recvdstopts = 1; 4997 if (!ip_allocbuf((void **)&tcp->tcp_dstopts, 4998 &tcp->tcp_dstoptslen, (ipp->ipp_fields & IPPF_DSTOPTS), 4999 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) 5000 return (mp); 5001 } 5002 5003 if (optlen == 0) { 5004 /* Nothing to add */ 5005 return (mp); 5006 } 5007 mp1 = allocb(sizeof (struct T_optdata_ind) + optlen, BPRI_MED); 5008 if (mp1 == NULL) { 5009 /* 5010 * Defer sending ancillary data until the next TCP segment 5011 * arrives. 5012 */ 5013 return (mp); 5014 } 5015 mp1->b_cont = mp; 5016 mp = mp1; 5017 mp->b_wptr += sizeof (*todi) + optlen; 5018 mp->b_datap->db_type = M_PROTO; 5019 todi = (struct T_optdata_ind *)mp->b_rptr; 5020 todi->PRIM_type = T_OPTDATA_IND; 5021 todi->DATA_flag = 1; /* MORE data */ 5022 todi->OPT_length = optlen; 5023 todi->OPT_offset = sizeof (*todi); 5024 optptr = (uchar_t *)&todi[1]; 5025 /* 5026 * If app asked for pktinfo and the index has changed ... 5027 * Note that the local address never changes for the connection. 5028 */ 5029 if (addflag.crb_ip_recvpktinfo) { 5030 struct in6_pktinfo *pkti; 5031 uint_t ifindex; 5032 5033 ifindex = ira->ira_ruifindex; 5034 toh = (struct T_opthdr *)optptr; 5035 toh->level = IPPROTO_IPV6; 5036 toh->name = IPV6_PKTINFO; 5037 toh->len = sizeof (*toh) + sizeof (*pkti); 5038 toh->status = 0; 5039 optptr += sizeof (*toh); 5040 pkti = (struct in6_pktinfo *)optptr; 5041 pkti->ipi6_addr = connp->conn_laddr_v6; 5042 pkti->ipi6_ifindex = ifindex; 5043 optptr += sizeof (*pkti); 5044 ASSERT(OK_32PTR(optptr)); 5045 /* Save as "last" value */ 5046 tcp->tcp_recvifindex = ifindex; 5047 } 5048 /* If app asked for hoplimit and it has changed ... */ 5049 if (addflag.crb_ipv6_recvhoplimit) { 5050 toh = (struct T_opthdr *)optptr; 5051 toh->level = IPPROTO_IPV6; 5052 toh->name = IPV6_HOPLIMIT; 5053 toh->len = sizeof (*toh) + sizeof (uint_t); 5054 toh->status = 0; 5055 optptr += sizeof (*toh); 5056 *(uint_t *)optptr = ipp->ipp_hoplimit; 5057 optptr += sizeof (uint_t); 5058 ASSERT(OK_32PTR(optptr)); 5059 /* Save as "last" value */ 5060 tcp->tcp_recvhops = ipp->ipp_hoplimit; 5061 } 5062 /* If app asked for tclass and it has changed ... */ 5063 if (addflag.crb_ipv6_recvtclass) { 5064 toh = (struct T_opthdr *)optptr; 5065 toh->level = IPPROTO_IPV6; 5066 toh->name = IPV6_TCLASS; 5067 toh->len = sizeof (*toh) + sizeof (uint_t); 5068 toh->status = 0; 5069 optptr += sizeof (*toh); 5070 *(uint_t *)optptr = ipp->ipp_tclass; 5071 optptr += sizeof (uint_t); 5072 ASSERT(OK_32PTR(optptr)); 5073 /* Save as "last" value */ 5074 tcp->tcp_recvtclass = ipp->ipp_tclass; 5075 } 5076 if (addflag.crb_ipv6_recvhopopts) { 5077 toh = (struct T_opthdr *)optptr; 5078 toh->level = IPPROTO_IPV6; 5079 toh->name = IPV6_HOPOPTS; 5080 toh->len = sizeof (*toh) + ipp->ipp_hopoptslen; 5081 toh->status = 0; 5082 optptr += sizeof (*toh); 5083 bcopy((uchar_t *)ipp->ipp_hopopts, optptr, ipp->ipp_hopoptslen); 5084 optptr += ipp->ipp_hopoptslen; 5085 ASSERT(OK_32PTR(optptr)); 5086 /* Save as last value */ 5087 ip_savebuf((void **)&tcp->tcp_hopopts, &tcp->tcp_hopoptslen, 5088 (ipp->ipp_fields & IPPF_HOPOPTS), 5089 ipp->ipp_hopopts, ipp->ipp_hopoptslen); 5090 } 5091 if (addflag.crb_ipv6_recvrthdrdstopts) { 5092 toh = (struct T_opthdr *)optptr; 5093 toh->level = IPPROTO_IPV6; 5094 toh->name = IPV6_RTHDRDSTOPTS; 5095 toh->len = sizeof (*toh) + ipp->ipp_rthdrdstoptslen; 5096 toh->status = 0; 5097 optptr += sizeof (*toh); 5098 bcopy(ipp->ipp_rthdrdstopts, optptr, ipp->ipp_rthdrdstoptslen); 5099 optptr += ipp->ipp_rthdrdstoptslen; 5100 ASSERT(OK_32PTR(optptr)); 5101 /* Save as last value */ 5102 ip_savebuf((void **)&tcp->tcp_rthdrdstopts, 5103 &tcp->tcp_rthdrdstoptslen, 5104 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 5105 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen); 5106 } 5107 if (addflag.crb_ipv6_recvrthdr) { 5108 toh = (struct T_opthdr *)optptr; 5109 toh->level = IPPROTO_IPV6; 5110 toh->name = IPV6_RTHDR; 5111 toh->len = sizeof (*toh) + ipp->ipp_rthdrlen; 5112 toh->status = 0; 5113 optptr += sizeof (*toh); 5114 bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen); 5115 optptr += ipp->ipp_rthdrlen; 5116 ASSERT(OK_32PTR(optptr)); 5117 /* Save as last value */ 5118 ip_savebuf((void **)&tcp->tcp_rthdr, &tcp->tcp_rthdrlen, 5119 (ipp->ipp_fields & IPPF_RTHDR), 5120 ipp->ipp_rthdr, ipp->ipp_rthdrlen); 5121 } 5122 if (addflag.crb_ipv6_recvdstopts) { 5123 toh = (struct T_opthdr *)optptr; 5124 toh->level = IPPROTO_IPV6; 5125 toh->name = IPV6_DSTOPTS; 5126 toh->len = sizeof (*toh) + ipp->ipp_dstoptslen; 5127 toh->status = 0; 5128 optptr += sizeof (*toh); 5129 bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen); 5130 optptr += ipp->ipp_dstoptslen; 5131 ASSERT(OK_32PTR(optptr)); 5132 /* Save as last value */ 5133 ip_savebuf((void **)&tcp->tcp_dstopts, &tcp->tcp_dstoptslen, 5134 (ipp->ipp_fields & IPPF_DSTOPTS), 5135 ipp->ipp_dstopts, ipp->ipp_dstoptslen); 5136 } 5137 ASSERT(optptr == mp->b_wptr); 5138 return (mp); 5139 } 5140 5141 /* The minimum of smoothed mean deviation in RTO calculation. */ 5142 #define TCP_SD_MIN 400 5143 5144 /* 5145 * Set RTO for this connection. The formula is from Jacobson and Karels' 5146 * "Congestion Avoidance and Control" in SIGCOMM '88. The variable names 5147 * are the same as those in Appendix A.2 of that paper. 5148 * 5149 * m = new measurement 5150 * sa = smoothed RTT average (8 * average estimates). 5151 * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates). 5152 */ 5153 static void 5154 tcp_set_rto(tcp_t *tcp, clock_t rtt) 5155 { 5156 long m = TICK_TO_MSEC(rtt); 5157 clock_t sa = tcp->tcp_rtt_sa; 5158 clock_t sv = tcp->tcp_rtt_sd; 5159 clock_t rto; 5160 tcp_stack_t *tcps = tcp->tcp_tcps; 5161 5162 TCPS_BUMP_MIB(tcps, tcpRttUpdate); 5163 tcp->tcp_rtt_update++; 5164 5165 /* tcp_rtt_sa is not 0 means this is a new sample. */ 5166 if (sa != 0) { 5167 /* 5168 * Update average estimator: 5169 * new rtt = 7/8 old rtt + 1/8 Error 5170 */ 5171 5172 /* m is now Error in estimate. */ 5173 m -= sa >> 3; 5174 if ((sa += m) <= 0) { 5175 /* 5176 * Don't allow the smoothed average to be negative. 5177 * We use 0 to denote reinitialization of the 5178 * variables. 5179 */ 5180 sa = 1; 5181 } 5182 5183 /* 5184 * Update deviation estimator: 5185 * new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev) 5186 */ 5187 if (m < 0) 5188 m = -m; 5189 m -= sv >> 2; 5190 sv += m; 5191 } else { 5192 /* 5193 * This follows BSD's implementation. So the reinitialized 5194 * RTO is 3 * m. We cannot go less than 2 because if the 5195 * link is bandwidth dominated, doubling the window size 5196 * during slow start means doubling the RTT. We want to be 5197 * more conservative when we reinitialize our estimates. 3 5198 * is just a convenient number. 5199 */ 5200 sa = m << 3; 5201 sv = m << 1; 5202 } 5203 if (sv < TCP_SD_MIN) { 5204 /* 5205 * We do not know that if sa captures the delay ACK 5206 * effect as in a long train of segments, a receiver 5207 * does not delay its ACKs. So set the minimum of sv 5208 * to be TCP_SD_MIN, which is default to 400 ms, twice 5209 * of BSD DATO. That means the minimum of mean 5210 * deviation is 100 ms. 5211 * 5212 */ 5213 sv = TCP_SD_MIN; 5214 } 5215 tcp->tcp_rtt_sa = sa; 5216 tcp->tcp_rtt_sd = sv; 5217 /* 5218 * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv) 5219 * 5220 * Add tcp_rexmit_interval extra in case of extreme environment 5221 * where the algorithm fails to work. The default value of 5222 * tcp_rexmit_interval_extra should be 0. 5223 * 5224 * As we use a finer grained clock than BSD and update 5225 * RTO for every ACKs, add in another .25 of RTT to the 5226 * deviation of RTO to accomodate burstiness of 1/4 of 5227 * window size. 5228 */ 5229 rto = (sa >> 3) + sv + tcps->tcps_rexmit_interval_extra + (sa >> 5); 5230 5231 if (rto > tcps->tcps_rexmit_interval_max) { 5232 tcp->tcp_rto = tcps->tcps_rexmit_interval_max; 5233 } else if (rto < tcps->tcps_rexmit_interval_min) { 5234 tcp->tcp_rto = tcps->tcps_rexmit_interval_min; 5235 } else { 5236 tcp->tcp_rto = rto; 5237 } 5238 5239 /* Now, we can reset tcp_timer_backoff to use the new RTO... */ 5240 tcp->tcp_timer_backoff = 0; 5241 } 5242 5243 /* 5244 * On a labeled system we have some protocols above TCP, such as RPC, which 5245 * appear to assume that every mblk in a chain has a db_credp. 5246 */ 5247 static void 5248 tcp_setcred_data(mblk_t *mp, ip_recv_attr_t *ira) 5249 { 5250 ASSERT(is_system_labeled()); 5251 ASSERT(ira->ira_cred != NULL); 5252 5253 while (mp != NULL) { 5254 mblk_setcred(mp, ira->ira_cred, NOPID); 5255 mp = mp->b_cont; 5256 } 5257 } 5258 5259 uint_t 5260 tcp_rwnd_reopen(tcp_t *tcp) 5261 { 5262 uint_t ret = 0; 5263 uint_t thwin; 5264 conn_t *connp = tcp->tcp_connp; 5265 5266 /* Learn the latest rwnd information that we sent to the other side. */ 5267 thwin = ((uint_t)ntohs(tcp->tcp_tcpha->tha_win)) 5268 << tcp->tcp_rcv_ws; 5269 /* This is peer's calculated send window (our receive window). */ 5270 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 5271 /* 5272 * Increase the receive window to max. But we need to do receiver 5273 * SWS avoidance. This means that we need to check the increase of 5274 * of receive window is at least 1 MSS. 5275 */ 5276 if (connp->conn_rcvbuf - thwin >= tcp->tcp_mss) { 5277 /* 5278 * If the window that the other side knows is less than max 5279 * deferred acks segments, send an update immediately. 5280 */ 5281 if (thwin < tcp->tcp_rack_cur_max * tcp->tcp_mss) { 5282 TCPS_BUMP_MIB(tcp->tcp_tcps, tcpOutWinUpdate); 5283 ret = TH_ACK_NEEDED; 5284 } 5285 tcp->tcp_rwnd = connp->conn_rcvbuf; 5286 } 5287 return (ret); 5288 } 5289 5290 /* 5291 * Handle a packet that has been reclassified by TCP. 5292 * This function drops the ref on connp that the caller had. 5293 */ 5294 void 5295 tcp_reinput(conn_t *connp, mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst) 5296 { 5297 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec; 5298 5299 if (connp->conn_incoming_ifindex != 0 && 5300 connp->conn_incoming_ifindex != ira->ira_ruifindex) { 5301 freemsg(mp); 5302 CONN_DEC_REF(connp); 5303 return; 5304 } 5305 5306 if (CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss) || 5307 (ira->ira_flags & IRAF_IPSEC_SECURE)) { 5308 ip6_t *ip6h; 5309 ipha_t *ipha; 5310 5311 if (ira->ira_flags & IRAF_IS_IPV4) { 5312 ipha = (ipha_t *)mp->b_rptr; 5313 ip6h = NULL; 5314 } else { 5315 ipha = NULL; 5316 ip6h = (ip6_t *)mp->b_rptr; 5317 } 5318 mp = ipsec_check_inbound_policy(mp, connp, ipha, ip6h, ira); 5319 if (mp == NULL) { 5320 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 5321 /* Note that mp is NULL */ 5322 ip_drop_input("ipIfStatsInDiscards", mp, NULL); 5323 CONN_DEC_REF(connp); 5324 return; 5325 } 5326 } 5327 5328 if (IPCL_IS_TCP(connp)) { 5329 /* 5330 * do not drain, certain use cases can blow 5331 * the stack 5332 */ 5333 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, 5334 connp->conn_recv, connp, ira, 5335 SQ_NODRAIN, SQTAG_IP_TCP_INPUT); 5336 } else { 5337 /* Not TCP; must be SOCK_RAW, IPPROTO_TCP */ 5338 (connp->conn_recv)(connp, mp, NULL, 5339 ira); 5340 CONN_DEC_REF(connp); 5341 } 5342 5343 } 5344 5345 /* ARGSUSED */ 5346 static void 5347 tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy) 5348 { 5349 conn_t *connp = (conn_t *)arg; 5350 tcp_t *tcp = connp->conn_tcp; 5351 queue_t *q = connp->conn_rq; 5352 5353 ASSERT(!IPCL_IS_NONSTR(connp)); 5354 mutex_enter(&tcp->tcp_rsrv_mp_lock); 5355 tcp->tcp_rsrv_mp = mp; 5356 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5357 5358 if (TCP_IS_DETACHED(tcp) || q == NULL) { 5359 return; 5360 } 5361 5362 if (tcp->tcp_fused) { 5363 tcp_fuse_backenable(tcp); 5364 return; 5365 } 5366 5367 if (canputnext(q)) { 5368 /* Not flow-controlled, open rwnd */ 5369 tcp->tcp_rwnd = connp->conn_rcvbuf; 5370 5371 /* 5372 * Send back a window update immediately if TCP is above 5373 * ESTABLISHED state and the increase of the rcv window 5374 * that the other side knows is at least 1 MSS after flow 5375 * control is lifted. 5376 */ 5377 if (tcp->tcp_state >= TCPS_ESTABLISHED && 5378 tcp_rwnd_reopen(tcp) == TH_ACK_NEEDED) { 5379 tcp_xmit_ctl(NULL, tcp, 5380 (tcp->tcp_swnd == 0) ? tcp->tcp_suna : 5381 tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 5382 } 5383 } 5384 } 5385 5386 /* 5387 * The read side service routine is called mostly when we get back-enabled as a 5388 * result of flow control relief. Since we don't actually queue anything in 5389 * TCP, we have no data to send out of here. What we do is clear the receive 5390 * window, and send out a window update. 5391 */ 5392 void 5393 tcp_rsrv(queue_t *q) 5394 { 5395 conn_t *connp = Q_TO_CONN(q); 5396 tcp_t *tcp = connp->conn_tcp; 5397 mblk_t *mp; 5398 5399 /* No code does a putq on the read side */ 5400 ASSERT(q->q_first == NULL); 5401 5402 /* 5403 * If tcp->tcp_rsrv_mp == NULL, it means that tcp_rsrv() has already 5404 * been run. So just return. 5405 */ 5406 mutex_enter(&tcp->tcp_rsrv_mp_lock); 5407 if ((mp = tcp->tcp_rsrv_mp) == NULL) { 5408 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5409 return; 5410 } 5411 tcp->tcp_rsrv_mp = NULL; 5412 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5413 5414 CONN_INC_REF(connp); 5415 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_rsrv_input, connp, 5416 NULL, SQ_PROCESS, SQTAG_TCP_RSRV); 5417 } 5418 5419 /* At minimum we need 8 bytes in the TCP header for the lookup */ 5420 #define ICMP_MIN_TCP_HDR 8 5421 5422 /* 5423 * tcp_icmp_input is called as conn_recvicmp to process ICMP error messages 5424 * passed up by IP. The message is always received on the correct tcp_t. 5425 * Assumes that IP has pulled up everything up to and including the ICMP header. 5426 */ 5427 /* ARGSUSED2 */ 5428 void 5429 tcp_icmp_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 5430 { 5431 conn_t *connp = (conn_t *)arg1; 5432 icmph_t *icmph; 5433 ipha_t *ipha; 5434 int iph_hdr_length; 5435 tcpha_t *tcpha; 5436 uint32_t seg_seq; 5437 tcp_t *tcp = connp->conn_tcp; 5438 5439 /* Assume IP provides aligned packets */ 5440 ASSERT(OK_32PTR(mp->b_rptr)); 5441 ASSERT((MBLKL(mp) >= sizeof (ipha_t))); 5442 5443 /* 5444 * Verify IP version. Anything other than IPv4 or IPv6 packet is sent 5445 * upstream. ICMPv6 is handled in tcp_icmp_error_ipv6. 5446 */ 5447 if (!(ira->ira_flags & IRAF_IS_IPV4)) { 5448 tcp_icmp_error_ipv6(tcp, mp, ira); 5449 return; 5450 } 5451 5452 /* Skip past the outer IP and ICMP headers */ 5453 iph_hdr_length = ira->ira_ip_hdr_length; 5454 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length]; 5455 /* 5456 * If we don't have the correct outer IP header length 5457 * or if we don't have a complete inner IP header 5458 * drop it. 5459 */ 5460 if (iph_hdr_length < sizeof (ipha_t) || 5461 (ipha_t *)&icmph[1] + 1 > (ipha_t *)mp->b_wptr) { 5462 noticmpv4: 5463 freemsg(mp); 5464 return; 5465 } 5466 ipha = (ipha_t *)&icmph[1]; 5467 5468 /* Skip past the inner IP and find the ULP header */ 5469 iph_hdr_length = IPH_HDR_LENGTH(ipha); 5470 tcpha = (tcpha_t *)((char *)ipha + iph_hdr_length); 5471 /* 5472 * If we don't have the correct inner IP header length or if the ULP 5473 * is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR 5474 * bytes of TCP header, drop it. 5475 */ 5476 if (iph_hdr_length < sizeof (ipha_t) || 5477 ipha->ipha_protocol != IPPROTO_TCP || 5478 (uchar_t *)tcpha + ICMP_MIN_TCP_HDR > mp->b_wptr) { 5479 goto noticmpv4; 5480 } 5481 5482 seg_seq = ntohl(tcpha->tha_seq); 5483 switch (icmph->icmph_type) { 5484 case ICMP_DEST_UNREACHABLE: 5485 switch (icmph->icmph_code) { 5486 case ICMP_FRAGMENTATION_NEEDED: 5487 /* 5488 * Update Path MTU, then try to send something out. 5489 */ 5490 tcp_update_pmtu(tcp, B_TRUE); 5491 tcp_rexmit_after_error(tcp); 5492 break; 5493 case ICMP_PORT_UNREACHABLE: 5494 case ICMP_PROTOCOL_UNREACHABLE: 5495 switch (tcp->tcp_state) { 5496 case TCPS_SYN_SENT: 5497 case TCPS_SYN_RCVD: 5498 /* 5499 * ICMP can snipe away incipient 5500 * TCP connections as long as 5501 * seq number is same as initial 5502 * send seq number. 5503 */ 5504 if (seg_seq == tcp->tcp_iss) { 5505 (void) tcp_clean_death(tcp, 5506 ECONNREFUSED); 5507 } 5508 break; 5509 } 5510 break; 5511 case ICMP_HOST_UNREACHABLE: 5512 case ICMP_NET_UNREACHABLE: 5513 /* Record the error in case we finally time out. */ 5514 if (icmph->icmph_code == ICMP_HOST_UNREACHABLE) 5515 tcp->tcp_client_errno = EHOSTUNREACH; 5516 else 5517 tcp->tcp_client_errno = ENETUNREACH; 5518 if (tcp->tcp_state == TCPS_SYN_RCVD) { 5519 if (tcp->tcp_listener != NULL && 5520 tcp->tcp_listener->tcp_syn_defense) { 5521 /* 5522 * Ditch the half-open connection if we 5523 * suspect a SYN attack is under way. 5524 */ 5525 (void) tcp_clean_death(tcp, 5526 tcp->tcp_client_errno); 5527 } 5528 } 5529 break; 5530 default: 5531 break; 5532 } 5533 break; 5534 case ICMP_SOURCE_QUENCH: { 5535 /* 5536 * use a global boolean to control 5537 * whether TCP should respond to ICMP_SOURCE_QUENCH. 5538 * The default is false. 5539 */ 5540 if (tcp_icmp_source_quench) { 5541 /* 5542 * Reduce the sending rate as if we got a 5543 * retransmit timeout 5544 */ 5545 uint32_t npkt; 5546 5547 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / 5548 tcp->tcp_mss; 5549 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * tcp->tcp_mss; 5550 tcp->tcp_cwnd = tcp->tcp_mss; 5551 tcp->tcp_cwnd_cnt = 0; 5552 } 5553 break; 5554 } 5555 } 5556 freemsg(mp); 5557 } 5558 5559 /* 5560 * tcp_icmp_error_ipv6 is called from tcp_icmp_input to process ICMPv6 5561 * error messages passed up by IP. 5562 * Assumes that IP has pulled up all the extension headers as well 5563 * as the ICMPv6 header. 5564 */ 5565 static void 5566 tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, ip_recv_attr_t *ira) 5567 { 5568 icmp6_t *icmp6; 5569 ip6_t *ip6h; 5570 uint16_t iph_hdr_length = ira->ira_ip_hdr_length; 5571 tcpha_t *tcpha; 5572 uint8_t *nexthdrp; 5573 uint32_t seg_seq; 5574 5575 /* 5576 * Verify that we have a complete IP header. 5577 */ 5578 ASSERT((MBLKL(mp) >= sizeof (ip6_t))); 5579 5580 icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length]; 5581 ip6h = (ip6_t *)&icmp6[1]; 5582 /* 5583 * Verify if we have a complete ICMP and inner IP header. 5584 */ 5585 if ((uchar_t *)&ip6h[1] > mp->b_wptr) { 5586 noticmpv6: 5587 freemsg(mp); 5588 return; 5589 } 5590 5591 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp)) 5592 goto noticmpv6; 5593 tcpha = (tcpha_t *)((char *)ip6h + iph_hdr_length); 5594 /* 5595 * Validate inner header. If the ULP is not IPPROTO_TCP or if we don't 5596 * have at least ICMP_MIN_TCP_HDR bytes of TCP header drop the 5597 * packet. 5598 */ 5599 if ((*nexthdrp != IPPROTO_TCP) || 5600 ((uchar_t *)tcpha + ICMP_MIN_TCP_HDR) > mp->b_wptr) { 5601 goto noticmpv6; 5602 } 5603 5604 seg_seq = ntohl(tcpha->tha_seq); 5605 switch (icmp6->icmp6_type) { 5606 case ICMP6_PACKET_TOO_BIG: 5607 /* 5608 * Update Path MTU, then try to send something out. 5609 */ 5610 tcp_update_pmtu(tcp, B_TRUE); 5611 tcp_rexmit_after_error(tcp); 5612 break; 5613 case ICMP6_DST_UNREACH: 5614 switch (icmp6->icmp6_code) { 5615 case ICMP6_DST_UNREACH_NOPORT: 5616 if (((tcp->tcp_state == TCPS_SYN_SENT) || 5617 (tcp->tcp_state == TCPS_SYN_RCVD)) && 5618 (seg_seq == tcp->tcp_iss)) { 5619 (void) tcp_clean_death(tcp, ECONNREFUSED); 5620 } 5621 break; 5622 case ICMP6_DST_UNREACH_ADMIN: 5623 case ICMP6_DST_UNREACH_NOROUTE: 5624 case ICMP6_DST_UNREACH_BEYONDSCOPE: 5625 case ICMP6_DST_UNREACH_ADDR: 5626 /* Record the error in case we finally time out. */ 5627 tcp->tcp_client_errno = EHOSTUNREACH; 5628 if (((tcp->tcp_state == TCPS_SYN_SENT) || 5629 (tcp->tcp_state == TCPS_SYN_RCVD)) && 5630 (seg_seq == tcp->tcp_iss)) { 5631 if (tcp->tcp_listener != NULL && 5632 tcp->tcp_listener->tcp_syn_defense) { 5633 /* 5634 * Ditch the half-open connection if we 5635 * suspect a SYN attack is under way. 5636 */ 5637 (void) tcp_clean_death(tcp, 5638 tcp->tcp_client_errno); 5639 } 5640 } 5641 5642 5643 break; 5644 default: 5645 break; 5646 } 5647 break; 5648 case ICMP6_PARAM_PROB: 5649 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */ 5650 if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER && 5651 (uchar_t *)ip6h + icmp6->icmp6_pptr == 5652 (uchar_t *)nexthdrp) { 5653 if (tcp->tcp_state == TCPS_SYN_SENT || 5654 tcp->tcp_state == TCPS_SYN_RCVD) { 5655 (void) tcp_clean_death(tcp, ECONNREFUSED); 5656 } 5657 break; 5658 } 5659 break; 5660 5661 case ICMP6_TIME_EXCEEDED: 5662 default: 5663 break; 5664 } 5665 freemsg(mp); 5666 } 5667 5668 /* 5669 * CALLED OUTSIDE OF SQUEUE! It can not follow any pointers that tcp might 5670 * change. But it can refer to fields like tcp_suna and tcp_snxt. 5671 * 5672 * Function tcp_verifyicmp is called as conn_verifyicmp to verify the ICMP 5673 * error messages received by IP. The message is always received on the correct 5674 * tcp_t. 5675 */ 5676 /* ARGSUSED */ 5677 boolean_t 5678 tcp_verifyicmp(conn_t *connp, void *arg2, icmph_t *icmph, icmp6_t *icmp6, 5679 ip_recv_attr_t *ira) 5680 { 5681 tcpha_t *tcpha = (tcpha_t *)arg2; 5682 uint32_t seq = ntohl(tcpha->tha_seq); 5683 tcp_t *tcp = connp->conn_tcp; 5684 5685 /* 5686 * TCP sequence number contained in payload of the ICMP error message 5687 * should be within the range SND.UNA <= SEG.SEQ < SND.NXT. Otherwise, 5688 * the message is either a stale ICMP error, or an attack from the 5689 * network. Fail the verification. 5690 */ 5691 if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GEQ(seq, tcp->tcp_snxt)) 5692 return (B_FALSE); 5693 5694 /* For "too big" we also check the ignore flag */ 5695 if (ira->ira_flags & IRAF_IS_IPV4) { 5696 ASSERT(icmph != NULL); 5697 if (icmph->icmph_type == ICMP_DEST_UNREACHABLE && 5698 icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED && 5699 tcp->tcp_tcps->tcps_ignore_path_mtu) 5700 return (B_FALSE); 5701 } else { 5702 ASSERT(icmp6 != NULL); 5703 if (icmp6->icmp6_type == ICMP6_PACKET_TOO_BIG && 5704 tcp->tcp_tcps->tcps_ignore_path_mtu) 5705 return (B_FALSE); 5706 } 5707 return (B_TRUE); 5708 } 5709