1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 /* This file contains all TCP input processing functions. */ 27 28 #include <sys/types.h> 29 #include <sys/stream.h> 30 #include <sys/strsun.h> 31 #include <sys/strsubr.h> 32 #include <sys/stropts.h> 33 #include <sys/strlog.h> 34 #define _SUN_TPI_VERSION 2 35 #include <sys/tihdr.h> 36 #include <sys/suntpi.h> 37 #include <sys/xti_inet.h> 38 #include <sys/squeue_impl.h> 39 #include <sys/squeue.h> 40 #include <sys/tsol/tnet.h> 41 42 #include <inet/common.h> 43 #include <inet/ip.h> 44 #include <inet/tcp.h> 45 #include <inet/tcp_impl.h> 46 #include <inet/tcp_cluster.h> 47 #include <inet/proto_set.h> 48 #include <inet/ipsec_impl.h> 49 50 /* 51 * RFC1323-recommended phrasing of TSTAMP option, for easier parsing 52 */ 53 54 #ifdef _BIG_ENDIAN 55 #define TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 56 (TCPOPT_TSTAMP << 8) | 10) 57 #else 58 #define TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \ 59 (TCPOPT_NOP << 8) | TCPOPT_NOP) 60 #endif 61 62 /* 63 * Flags returned from tcp_parse_options. 64 */ 65 #define TCP_OPT_MSS_PRESENT 1 66 #define TCP_OPT_WSCALE_PRESENT 2 67 #define TCP_OPT_TSTAMP_PRESENT 4 68 #define TCP_OPT_SACK_OK_PRESENT 8 69 #define TCP_OPT_SACK_PRESENT 16 70 71 /* 72 * PAWS needs a timer for 24 days. This is the number of ticks in 24 days 73 */ 74 #define PAWS_TIMEOUT ((clock_t)(24*24*60*60*hz)) 75 76 /* 77 * Since tcp_listener is not cleared atomically with tcp_detached 78 * being cleared we need this extra bit to tell a detached connection 79 * apart from one that is in the process of being accepted. 80 */ 81 #define TCP_IS_DETACHED_NONEAGER(tcp) \ 82 (TCP_IS_DETACHED(tcp) && \ 83 (!(tcp)->tcp_hard_binding)) 84 85 /* 86 * Steps to do when a tcp_t moves to TIME-WAIT state. 87 * 88 * This connection is done, we don't need to account for it. Decrement 89 * the listener connection counter if needed. 90 * 91 * Decrement the connection counter of the stack. Note that this counter 92 * is per CPU. So the total number of connections in a stack is the sum of all 93 * of them. Since there is no lock for handling all of them exclusively, the 94 * resulting sum is only an approximation. 95 * 96 * Unconditionally clear the exclusive binding bit so this TIME-WAIT 97 * connection won't interfere with new ones. 98 * 99 * Start the TIME-WAIT timer. If upper layer has not closed the connection, 100 * the timer is handled within the context of this tcp_t. When the timer 101 * fires, tcp_clean_death() is called. If upper layer closes the connection 102 * during this period, tcp_time_wait_append() will be called to add this 103 * tcp_t to the global TIME-WAIT list. Note that this means that the 104 * actual wait time in TIME-WAIT state will be longer than the 105 * tcps_time_wait_interval since the period before upper layer closes the 106 * connection is not accounted for when tcp_time_wait_append() is called. 107 * 108 * If uppser layer has closed the connection, call tcp_time_wait_append() 109 * directly. 110 * 111 */ 112 #define SET_TIME_WAIT(tcps, tcp, connp) \ 113 { \ 114 (tcp)->tcp_state = TCPS_TIME_WAIT; \ 115 if ((tcp)->tcp_listen_cnt != NULL) \ 116 TCP_DECR_LISTEN_CNT(tcp); \ 117 atomic_dec_64( \ 118 (uint64_t *)&(tcps)->tcps_sc[CPU->cpu_seqid]->tcp_sc_conn_cnt); \ 119 (connp)->conn_exclbind = 0; \ 120 if (!TCP_IS_DETACHED(tcp)) { \ 121 TCP_TIMER_RESTART(tcp, (tcps)->tcps_time_wait_interval); \ 122 } else { \ 123 tcp_time_wait_append(tcp); \ 124 TCP_DBGSTAT(tcps, tcp_rput_time_wait); \ 125 } \ 126 } 127 128 /* 129 * If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more 130 * than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent 131 * data, TCP will not respond with an ACK. RFC 793 requires that 132 * TCP responds with an ACK for such a bogus ACK. By not following 133 * the RFC, we prevent TCP from getting into an ACK storm if somehow 134 * an attacker successfully spoofs an acceptable segment to our 135 * peer; or when our peer is "confused." 136 */ 137 static uint32_t tcp_drop_ack_unsent_cnt = 10; 138 139 /* 140 * The shift factor applied to tcp_mss to decide if the peer sends us a 141 * valid initial receive window. By default, if the peer receive window 142 * is smaller than 1 MSS (shift factor is 0), it is considered as invalid. 143 */ 144 static uint32_t tcp_init_wnd_shft = 0; 145 146 /* Process ICMP source quench message or not. */ 147 static boolean_t tcp_icmp_source_quench = B_FALSE; 148 149 static boolean_t tcp_outbound_squeue_switch = B_FALSE; 150 151 static mblk_t *tcp_conn_create_v4(conn_t *, conn_t *, mblk_t *, 152 ip_recv_attr_t *); 153 static mblk_t *tcp_conn_create_v6(conn_t *, conn_t *, mblk_t *, 154 ip_recv_attr_t *); 155 static boolean_t tcp_drop_q0(tcp_t *); 156 static void tcp_icmp_error_ipv6(tcp_t *, mblk_t *, ip_recv_attr_t *); 157 static mblk_t *tcp_input_add_ancillary(tcp_t *, mblk_t *, ip_pkt_t *, 158 ip_recv_attr_t *); 159 static void tcp_input_listener(void *, mblk_t *, void *, ip_recv_attr_t *); 160 static int tcp_parse_options(tcpha_t *, tcp_opt_t *); 161 static void tcp_process_options(tcp_t *, tcpha_t *); 162 static mblk_t *tcp_reass(tcp_t *, mblk_t *, uint32_t); 163 static void tcp_reass_elim_overlap(tcp_t *, mblk_t *); 164 static void tcp_rsrv_input(void *, mblk_t *, void *, ip_recv_attr_t *); 165 static void tcp_set_rto(tcp_t *, time_t); 166 static void tcp_setcred_data(mblk_t *, ip_recv_attr_t *); 167 168 extern void tcp_kssl_input(tcp_t *, mblk_t *, cred_t *); 169 170 /* 171 * Set the MSS associated with a particular tcp based on its current value, 172 * and a new one passed in. Observe minimums and maximums, and reset other 173 * state variables that we want to view as multiples of MSS. 174 * 175 * The value of MSS could be either increased or descreased. 176 */ 177 void 178 tcp_mss_set(tcp_t *tcp, uint32_t mss) 179 { 180 uint32_t mss_max; 181 tcp_stack_t *tcps = tcp->tcp_tcps; 182 conn_t *connp = tcp->tcp_connp; 183 184 if (connp->conn_ipversion == IPV4_VERSION) 185 mss_max = tcps->tcps_mss_max_ipv4; 186 else 187 mss_max = tcps->tcps_mss_max_ipv6; 188 189 if (mss < tcps->tcps_mss_min) 190 mss = tcps->tcps_mss_min; 191 if (mss > mss_max) 192 mss = mss_max; 193 /* 194 * Unless naglim has been set by our client to 195 * a non-mss value, force naglim to track mss. 196 * This can help to aggregate small writes. 197 */ 198 if (mss < tcp->tcp_naglim || tcp->tcp_mss == tcp->tcp_naglim) 199 tcp->tcp_naglim = mss; 200 /* 201 * TCP should be able to buffer at least 4 MSS data for obvious 202 * performance reason. 203 */ 204 if ((mss << 2) > connp->conn_sndbuf) 205 connp->conn_sndbuf = mss << 2; 206 207 /* 208 * Set the send lowater to at least twice of MSS. 209 */ 210 if ((mss << 1) > connp->conn_sndlowat) 211 connp->conn_sndlowat = mss << 1; 212 213 /* 214 * Update tcp_cwnd according to the new value of MSS. Keep the 215 * previous ratio to preserve the transmit rate. 216 */ 217 tcp->tcp_cwnd = (tcp->tcp_cwnd / tcp->tcp_mss) * mss; 218 tcp->tcp_cwnd_cnt = 0; 219 220 tcp->tcp_mss = mss; 221 (void) tcp_maxpsz_set(tcp, B_TRUE); 222 } 223 224 /* 225 * Extract option values from a tcp header. We put any found values into the 226 * tcpopt struct and return a bitmask saying which options were found. 227 */ 228 static int 229 tcp_parse_options(tcpha_t *tcpha, tcp_opt_t *tcpopt) 230 { 231 uchar_t *endp; 232 int len; 233 uint32_t mss; 234 uchar_t *up = (uchar_t *)tcpha; 235 int found = 0; 236 int32_t sack_len; 237 tcp_seq sack_begin, sack_end; 238 tcp_t *tcp; 239 240 endp = up + TCP_HDR_LENGTH(tcpha); 241 up += TCP_MIN_HEADER_LENGTH; 242 while (up < endp) { 243 len = endp - up; 244 switch (*up) { 245 case TCPOPT_EOL: 246 break; 247 248 case TCPOPT_NOP: 249 up++; 250 continue; 251 252 case TCPOPT_MAXSEG: 253 if (len < TCPOPT_MAXSEG_LEN || 254 up[1] != TCPOPT_MAXSEG_LEN) 255 break; 256 257 mss = BE16_TO_U16(up+2); 258 /* Caller must handle tcp_mss_min and tcp_mss_max_* */ 259 tcpopt->tcp_opt_mss = mss; 260 found |= TCP_OPT_MSS_PRESENT; 261 262 up += TCPOPT_MAXSEG_LEN; 263 continue; 264 265 case TCPOPT_WSCALE: 266 if (len < TCPOPT_WS_LEN || up[1] != TCPOPT_WS_LEN) 267 break; 268 269 if (up[2] > TCP_MAX_WINSHIFT) 270 tcpopt->tcp_opt_wscale = TCP_MAX_WINSHIFT; 271 else 272 tcpopt->tcp_opt_wscale = up[2]; 273 found |= TCP_OPT_WSCALE_PRESENT; 274 275 up += TCPOPT_WS_LEN; 276 continue; 277 278 case TCPOPT_SACK_PERMITTED: 279 if (len < TCPOPT_SACK_OK_LEN || 280 up[1] != TCPOPT_SACK_OK_LEN) 281 break; 282 found |= TCP_OPT_SACK_OK_PRESENT; 283 up += TCPOPT_SACK_OK_LEN; 284 continue; 285 286 case TCPOPT_SACK: 287 if (len <= 2 || up[1] <= 2 || len < up[1]) 288 break; 289 290 /* If TCP is not interested in SACK blks... */ 291 if ((tcp = tcpopt->tcp) == NULL) { 292 up += up[1]; 293 continue; 294 } 295 sack_len = up[1] - TCPOPT_HEADER_LEN; 296 up += TCPOPT_HEADER_LEN; 297 298 /* 299 * If the list is empty, allocate one and assume 300 * nothing is sack'ed. 301 */ 302 if (tcp->tcp_notsack_list == NULL) { 303 tcp_notsack_update(&(tcp->tcp_notsack_list), 304 tcp->tcp_suna, tcp->tcp_snxt, 305 &(tcp->tcp_num_notsack_blk), 306 &(tcp->tcp_cnt_notsack_list)); 307 308 /* 309 * Make sure tcp_notsack_list is not NULL. 310 * This happens when kmem_alloc(KM_NOSLEEP) 311 * returns NULL. 312 */ 313 if (tcp->tcp_notsack_list == NULL) { 314 up += sack_len; 315 continue; 316 } 317 tcp->tcp_fack = tcp->tcp_suna; 318 } 319 320 while (sack_len > 0) { 321 if (up + 8 > endp) { 322 up = endp; 323 break; 324 } 325 sack_begin = BE32_TO_U32(up); 326 up += 4; 327 sack_end = BE32_TO_U32(up); 328 up += 4; 329 sack_len -= 8; 330 /* 331 * Bounds checking. Make sure the SACK 332 * info is within tcp_suna and tcp_snxt. 333 * If this SACK blk is out of bound, ignore 334 * it but continue to parse the following 335 * blks. 336 */ 337 if (SEQ_LEQ(sack_end, sack_begin) || 338 SEQ_LT(sack_begin, tcp->tcp_suna) || 339 SEQ_GT(sack_end, tcp->tcp_snxt)) { 340 continue; 341 } 342 tcp_notsack_insert(&(tcp->tcp_notsack_list), 343 sack_begin, sack_end, 344 &(tcp->tcp_num_notsack_blk), 345 &(tcp->tcp_cnt_notsack_list)); 346 if (SEQ_GT(sack_end, tcp->tcp_fack)) { 347 tcp->tcp_fack = sack_end; 348 } 349 } 350 found |= TCP_OPT_SACK_PRESENT; 351 continue; 352 353 case TCPOPT_TSTAMP: 354 if (len < TCPOPT_TSTAMP_LEN || 355 up[1] != TCPOPT_TSTAMP_LEN) 356 break; 357 358 tcpopt->tcp_opt_ts_val = BE32_TO_U32(up+2); 359 tcpopt->tcp_opt_ts_ecr = BE32_TO_U32(up+6); 360 361 found |= TCP_OPT_TSTAMP_PRESENT; 362 363 up += TCPOPT_TSTAMP_LEN; 364 continue; 365 366 default: 367 if (len <= 1 || len < (int)up[1] || up[1] == 0) 368 break; 369 up += up[1]; 370 continue; 371 } 372 break; 373 } 374 return (found); 375 } 376 377 /* 378 * Process all TCP option in SYN segment. Note that this function should 379 * be called after tcp_set_destination() is called so that the necessary info 380 * from IRE is already set in the tcp structure. 381 * 382 * This function sets up the correct tcp_mss value according to the 383 * MSS option value and our header size. It also sets up the window scale 384 * and timestamp values, and initialize SACK info blocks. But it does not 385 * change receive window size after setting the tcp_mss value. The caller 386 * should do the appropriate change. 387 */ 388 static void 389 tcp_process_options(tcp_t *tcp, tcpha_t *tcpha) 390 { 391 int options; 392 tcp_opt_t tcpopt; 393 uint32_t mss_max; 394 char *tmp_tcph; 395 tcp_stack_t *tcps = tcp->tcp_tcps; 396 conn_t *connp = tcp->tcp_connp; 397 398 tcpopt.tcp = NULL; 399 options = tcp_parse_options(tcpha, &tcpopt); 400 401 /* 402 * Process MSS option. Note that MSS option value does not account 403 * for IP or TCP options. This means that it is equal to MTU - minimum 404 * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for 405 * IPv6. 406 */ 407 if (!(options & TCP_OPT_MSS_PRESENT)) { 408 if (connp->conn_ipversion == IPV4_VERSION) 409 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv4; 410 else 411 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv6; 412 } else { 413 if (connp->conn_ipversion == IPV4_VERSION) 414 mss_max = tcps->tcps_mss_max_ipv4; 415 else 416 mss_max = tcps->tcps_mss_max_ipv6; 417 if (tcpopt.tcp_opt_mss < tcps->tcps_mss_min) 418 tcpopt.tcp_opt_mss = tcps->tcps_mss_min; 419 else if (tcpopt.tcp_opt_mss > mss_max) 420 tcpopt.tcp_opt_mss = mss_max; 421 } 422 423 /* Process Window Scale option. */ 424 if (options & TCP_OPT_WSCALE_PRESENT) { 425 tcp->tcp_snd_ws = tcpopt.tcp_opt_wscale; 426 tcp->tcp_snd_ws_ok = B_TRUE; 427 } else { 428 tcp->tcp_snd_ws = B_FALSE; 429 tcp->tcp_snd_ws_ok = B_FALSE; 430 tcp->tcp_rcv_ws = B_FALSE; 431 } 432 433 /* Process Timestamp option. */ 434 if ((options & TCP_OPT_TSTAMP_PRESENT) && 435 (tcp->tcp_snd_ts_ok || TCP_IS_DETACHED(tcp))) { 436 tmp_tcph = (char *)tcp->tcp_tcpha; 437 438 tcp->tcp_snd_ts_ok = B_TRUE; 439 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 440 tcp->tcp_last_rcv_lbolt = ddi_get_lbolt64(); 441 ASSERT(OK_32PTR(tmp_tcph)); 442 ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH); 443 444 /* Fill in our template header with basic timestamp option. */ 445 tmp_tcph += connp->conn_ht_ulp_len; 446 tmp_tcph[0] = TCPOPT_NOP; 447 tmp_tcph[1] = TCPOPT_NOP; 448 tmp_tcph[2] = TCPOPT_TSTAMP; 449 tmp_tcph[3] = TCPOPT_TSTAMP_LEN; 450 connp->conn_ht_iphc_len += TCPOPT_REAL_TS_LEN; 451 connp->conn_ht_ulp_len += TCPOPT_REAL_TS_LEN; 452 tcp->tcp_tcpha->tha_offset_and_reserved += (3 << 4); 453 } else { 454 tcp->tcp_snd_ts_ok = B_FALSE; 455 } 456 457 /* 458 * Process SACK options. If SACK is enabled for this connection, 459 * then allocate the SACK info structure. Note the following ways 460 * when tcp_snd_sack_ok is set to true. 461 * 462 * For active connection: in tcp_set_destination() called in 463 * tcp_connect(). 464 * 465 * For passive connection: in tcp_set_destination() called in 466 * tcp_input_listener(). 467 * 468 * That's the reason why the extra TCP_IS_DETACHED() check is there. 469 * That check makes sure that if we did not send a SACK OK option, 470 * we will not enable SACK for this connection even though the other 471 * side sends us SACK OK option. For active connection, the SACK 472 * info structure has already been allocated. So we need to free 473 * it if SACK is disabled. 474 */ 475 if ((options & TCP_OPT_SACK_OK_PRESENT) && 476 (tcp->tcp_snd_sack_ok || 477 (tcps->tcps_sack_permitted != 0 && TCP_IS_DETACHED(tcp)))) { 478 ASSERT(tcp->tcp_num_sack_blk == 0); 479 ASSERT(tcp->tcp_notsack_list == NULL); 480 481 tcp->tcp_snd_sack_ok = B_TRUE; 482 if (tcp->tcp_snd_ts_ok) { 483 tcp->tcp_max_sack_blk = 3; 484 } else { 485 tcp->tcp_max_sack_blk = 4; 486 } 487 } else if (tcp->tcp_snd_sack_ok) { 488 /* 489 * Resetting tcp_snd_sack_ok to B_FALSE so that 490 * no SACK info will be used for this 491 * connection. This assumes that SACK usage 492 * permission is negotiated. This may need 493 * to be changed once this is clarified. 494 */ 495 ASSERT(tcp->tcp_num_sack_blk == 0); 496 ASSERT(tcp->tcp_notsack_list == NULL); 497 tcp->tcp_snd_sack_ok = B_FALSE; 498 } 499 500 /* 501 * Now we know the exact TCP/IP header length, subtract 502 * that from tcp_mss to get our side's MSS. 503 */ 504 tcp->tcp_mss -= connp->conn_ht_iphc_len; 505 506 /* 507 * Here we assume that the other side's header size will be equal to 508 * our header size. We calculate the real MSS accordingly. Need to 509 * take into additional stuffs IPsec puts in. 510 * 511 * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header) 512 */ 513 tcpopt.tcp_opt_mss -= connp->conn_ht_iphc_len + 514 tcp->tcp_ipsec_overhead - 515 ((connp->conn_ipversion == IPV4_VERSION ? 516 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) + TCP_MIN_HEADER_LENGTH); 517 518 /* 519 * Set MSS to the smaller one of both ends of the connection. 520 * We should not have called tcp_mss_set() before, but our 521 * side of the MSS should have been set to a proper value 522 * by tcp_set_destination(). tcp_mss_set() will also set up the 523 * STREAM head parameters properly. 524 * 525 * If we have a larger-than-16-bit window but the other side 526 * didn't want to do window scale, tcp_rwnd_set() will take 527 * care of that. 528 */ 529 tcp_mss_set(tcp, MIN(tcpopt.tcp_opt_mss, tcp->tcp_mss)); 530 531 /* 532 * Initialize tcp_cwnd value. After tcp_mss_set(), tcp_mss has been 533 * updated properly. 534 */ 535 TCP_SET_INIT_CWND(tcp, tcp->tcp_mss, tcps->tcps_slow_start_initial); 536 } 537 538 /* 539 * Add a new piece to the tcp reassembly queue. If the gap at the beginning 540 * is filled, return as much as we can. The message passed in may be 541 * multi-part, chained using b_cont. "start" is the starting sequence 542 * number for this piece. 543 */ 544 static mblk_t * 545 tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start) 546 { 547 uint32_t end; 548 mblk_t *mp1; 549 mblk_t *mp2; 550 mblk_t *next_mp; 551 uint32_t u1; 552 tcp_stack_t *tcps = tcp->tcp_tcps; 553 554 555 /* Walk through all the new pieces. */ 556 do { 557 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 558 (uintptr_t)INT_MAX); 559 end = start + (int)(mp->b_wptr - mp->b_rptr); 560 next_mp = mp->b_cont; 561 if (start == end) { 562 /* Empty. Blast it. */ 563 freeb(mp); 564 continue; 565 } 566 mp->b_cont = NULL; 567 TCP_REASS_SET_SEQ(mp, start); 568 TCP_REASS_SET_END(mp, end); 569 mp1 = tcp->tcp_reass_tail; 570 if (!mp1) { 571 tcp->tcp_reass_tail = mp; 572 tcp->tcp_reass_head = mp; 573 TCPS_BUMP_MIB(tcps, tcpInDataUnorderSegs); 574 TCPS_UPDATE_MIB(tcps, tcpInDataUnorderBytes, 575 end - start); 576 continue; 577 } 578 /* New stuff completely beyond tail? */ 579 if (SEQ_GEQ(start, TCP_REASS_END(mp1))) { 580 /* Link it on end. */ 581 mp1->b_cont = mp; 582 tcp->tcp_reass_tail = mp; 583 TCPS_BUMP_MIB(tcps, tcpInDataUnorderSegs); 584 TCPS_UPDATE_MIB(tcps, tcpInDataUnorderBytes, 585 end - start); 586 continue; 587 } 588 mp1 = tcp->tcp_reass_head; 589 u1 = TCP_REASS_SEQ(mp1); 590 /* New stuff at the front? */ 591 if (SEQ_LT(start, u1)) { 592 /* Yes... Check for overlap. */ 593 mp->b_cont = mp1; 594 tcp->tcp_reass_head = mp; 595 tcp_reass_elim_overlap(tcp, mp); 596 continue; 597 } 598 /* 599 * The new piece fits somewhere between the head and tail. 600 * We find our slot, where mp1 precedes us and mp2 trails. 601 */ 602 for (; (mp2 = mp1->b_cont) != NULL; mp1 = mp2) { 603 u1 = TCP_REASS_SEQ(mp2); 604 if (SEQ_LEQ(start, u1)) 605 break; 606 } 607 /* Link ourselves in */ 608 mp->b_cont = mp2; 609 mp1->b_cont = mp; 610 611 /* Trim overlap with following mblk(s) first */ 612 tcp_reass_elim_overlap(tcp, mp); 613 614 /* Trim overlap with preceding mblk */ 615 tcp_reass_elim_overlap(tcp, mp1); 616 617 } while (start = end, mp = next_mp); 618 mp1 = tcp->tcp_reass_head; 619 /* Anything ready to go? */ 620 if (TCP_REASS_SEQ(mp1) != tcp->tcp_rnxt) 621 return (NULL); 622 /* Eat what we can off the queue */ 623 for (;;) { 624 mp = mp1->b_cont; 625 end = TCP_REASS_END(mp1); 626 TCP_REASS_SET_SEQ(mp1, 0); 627 TCP_REASS_SET_END(mp1, 0); 628 if (!mp) { 629 tcp->tcp_reass_tail = NULL; 630 break; 631 } 632 if (end != TCP_REASS_SEQ(mp)) { 633 mp1->b_cont = NULL; 634 break; 635 } 636 mp1 = mp; 637 } 638 mp1 = tcp->tcp_reass_head; 639 tcp->tcp_reass_head = mp; 640 return (mp1); 641 } 642 643 /* Eliminate any overlap that mp may have over later mblks */ 644 static void 645 tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp) 646 { 647 uint32_t end; 648 mblk_t *mp1; 649 uint32_t u1; 650 tcp_stack_t *tcps = tcp->tcp_tcps; 651 652 end = TCP_REASS_END(mp); 653 while ((mp1 = mp->b_cont) != NULL) { 654 u1 = TCP_REASS_SEQ(mp1); 655 if (!SEQ_GT(end, u1)) 656 break; 657 if (!SEQ_GEQ(end, TCP_REASS_END(mp1))) { 658 mp->b_wptr -= end - u1; 659 TCP_REASS_SET_END(mp, u1); 660 TCPS_BUMP_MIB(tcps, tcpInDataPartDupSegs); 661 TCPS_UPDATE_MIB(tcps, tcpInDataPartDupBytes, 662 end - u1); 663 break; 664 } 665 mp->b_cont = mp1->b_cont; 666 TCP_REASS_SET_SEQ(mp1, 0); 667 TCP_REASS_SET_END(mp1, 0); 668 freeb(mp1); 669 TCPS_BUMP_MIB(tcps, tcpInDataDupSegs); 670 TCPS_UPDATE_MIB(tcps, tcpInDataDupBytes, end - u1); 671 } 672 if (!mp1) 673 tcp->tcp_reass_tail = mp; 674 } 675 676 /* 677 * This function does PAWS protection check. Returns B_TRUE if the 678 * segment passes the PAWS test, else returns B_FALSE. 679 */ 680 boolean_t 681 tcp_paws_check(tcp_t *tcp, tcpha_t *tcpha, tcp_opt_t *tcpoptp) 682 { 683 uint8_t flags; 684 int options; 685 uint8_t *up; 686 conn_t *connp = tcp->tcp_connp; 687 688 flags = (unsigned int)tcpha->tha_flags & 0xFF; 689 /* 690 * If timestamp option is aligned nicely, get values inline, 691 * otherwise call general routine to parse. Only do that 692 * if timestamp is the only option. 693 */ 694 if (TCP_HDR_LENGTH(tcpha) == (uint32_t)TCP_MIN_HEADER_LENGTH + 695 TCPOPT_REAL_TS_LEN && 696 OK_32PTR((up = ((uint8_t *)tcpha) + 697 TCP_MIN_HEADER_LENGTH)) && 698 *(uint32_t *)up == TCPOPT_NOP_NOP_TSTAMP) { 699 tcpoptp->tcp_opt_ts_val = ABE32_TO_U32((up+4)); 700 tcpoptp->tcp_opt_ts_ecr = ABE32_TO_U32((up+8)); 701 702 options = TCP_OPT_TSTAMP_PRESENT; 703 } else { 704 if (tcp->tcp_snd_sack_ok) { 705 tcpoptp->tcp = tcp; 706 } else { 707 tcpoptp->tcp = NULL; 708 } 709 options = tcp_parse_options(tcpha, tcpoptp); 710 } 711 712 if (options & TCP_OPT_TSTAMP_PRESENT) { 713 /* 714 * Do PAWS per RFC 1323 section 4.2. Accept RST 715 * regardless of the timestamp, page 18 RFC 1323.bis. 716 */ 717 if ((flags & TH_RST) == 0 && 718 TSTMP_LT(tcpoptp->tcp_opt_ts_val, 719 tcp->tcp_ts_recent)) { 720 if (TSTMP_LT(LBOLT_FASTPATH64, 721 tcp->tcp_last_rcv_lbolt + PAWS_TIMEOUT)) { 722 /* This segment is not acceptable. */ 723 return (B_FALSE); 724 } else { 725 /* 726 * Connection has been idle for 727 * too long. Reset the timestamp 728 * and assume the segment is valid. 729 */ 730 tcp->tcp_ts_recent = 731 tcpoptp->tcp_opt_ts_val; 732 } 733 } 734 } else { 735 /* 736 * If we don't get a timestamp on every packet, we 737 * figure we can't really trust 'em, so we stop sending 738 * and parsing them. 739 */ 740 tcp->tcp_snd_ts_ok = B_FALSE; 741 742 connp->conn_ht_iphc_len -= TCPOPT_REAL_TS_LEN; 743 connp->conn_ht_ulp_len -= TCPOPT_REAL_TS_LEN; 744 tcp->tcp_tcpha->tha_offset_and_reserved -= (3 << 4); 745 /* 746 * Adjust the tcp_mss and tcp_cwnd accordingly. We avoid 747 * doing a slow start here so as to not to lose on the 748 * transfer rate built up so far. 749 */ 750 tcp_mss_set(tcp, tcp->tcp_mss + TCPOPT_REAL_TS_LEN); 751 if (tcp->tcp_snd_sack_ok) 752 tcp->tcp_max_sack_blk = 4; 753 } 754 return (B_TRUE); 755 } 756 757 /* 758 * Defense for the SYN attack - 759 * 1. When q0 is full, drop from the tail (tcp_eager_prev_drop_q0) the oldest 760 * one from the list of droppable eagers. This list is a subset of q0. 761 * see comments before the definition of MAKE_DROPPABLE(). 762 * 2. Don't drop a SYN request before its first timeout. This gives every 763 * request at least til the first timeout to complete its 3-way handshake. 764 * 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many 765 * requests currently on the queue that has timed out. This will be used 766 * as an indicator of whether an attack is under way, so that appropriate 767 * actions can be taken. (It's incremented in tcp_timer() and decremented 768 * either when eager goes into ESTABLISHED, or gets freed up.) 769 * 4. The current threshold is - # of timeout > q0len/4 => SYN alert on 770 * # of timeout drops back to <= q0len/32 => SYN alert off 771 */ 772 static boolean_t 773 tcp_drop_q0(tcp_t *tcp) 774 { 775 tcp_t *eager; 776 mblk_t *mp; 777 tcp_stack_t *tcps = tcp->tcp_tcps; 778 779 ASSERT(MUTEX_HELD(&tcp->tcp_eager_lock)); 780 ASSERT(tcp->tcp_eager_next_q0 != tcp->tcp_eager_prev_q0); 781 782 /* Pick oldest eager from the list of droppable eagers */ 783 eager = tcp->tcp_eager_prev_drop_q0; 784 785 /* If list is empty. return B_FALSE */ 786 if (eager == tcp) { 787 return (B_FALSE); 788 } 789 790 /* If allocated, the mp will be freed in tcp_clean_death_wrapper() */ 791 if ((mp = allocb(0, BPRI_HI)) == NULL) 792 return (B_FALSE); 793 794 /* 795 * Take this eager out from the list of droppable eagers since we are 796 * going to drop it. 797 */ 798 MAKE_UNDROPPABLE(eager); 799 800 if (tcp->tcp_connp->conn_debug) { 801 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 802 "tcp_drop_q0: listen half-open queue (max=%d) overflow" 803 " (%d pending) on %s, drop one", tcps->tcps_conn_req_max_q0, 804 tcp->tcp_conn_req_cnt_q0, 805 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 806 } 807 808 TCPS_BUMP_MIB(tcps, tcpHalfOpenDrop); 809 810 /* Put a reference on the conn as we are enqueueing it in the sqeue */ 811 CONN_INC_REF(eager->tcp_connp); 812 813 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 814 tcp_clean_death_wrapper, eager->tcp_connp, NULL, 815 SQ_FILL, SQTAG_TCP_DROP_Q0); 816 817 return (B_TRUE); 818 } 819 820 /* 821 * Handle a SYN on an AF_INET6 socket; can be either IPv4 or IPv6 822 */ 823 static mblk_t * 824 tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 825 ip_recv_attr_t *ira) 826 { 827 tcp_t *ltcp = lconnp->conn_tcp; 828 tcp_t *tcp = connp->conn_tcp; 829 mblk_t *tpi_mp; 830 ipha_t *ipha; 831 ip6_t *ip6h; 832 sin6_t sin6; 833 uint_t ifindex = ira->ira_ruifindex; 834 tcp_stack_t *tcps = tcp->tcp_tcps; 835 836 if (ira->ira_flags & IRAF_IS_IPV4) { 837 ipha = (ipha_t *)mp->b_rptr; 838 839 connp->conn_ipversion = IPV4_VERSION; 840 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6); 841 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6); 842 connp->conn_saddr_v6 = connp->conn_laddr_v6; 843 844 sin6 = sin6_null; 845 sin6.sin6_addr = connp->conn_faddr_v6; 846 sin6.sin6_port = connp->conn_fport; 847 sin6.sin6_family = AF_INET6; 848 sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6, 849 IPCL_ZONEID(lconnp), tcps->tcps_netstack); 850 851 if (connp->conn_recv_ancillary.crb_recvdstaddr) { 852 sin6_t sin6d; 853 854 sin6d = sin6_null; 855 sin6d.sin6_addr = connp->conn_laddr_v6; 856 sin6d.sin6_port = connp->conn_lport; 857 sin6d.sin6_family = AF_INET; 858 tpi_mp = mi_tpi_extconn_ind(NULL, 859 (char *)&sin6d, sizeof (sin6_t), 860 (char *)&tcp, 861 (t_scalar_t)sizeof (intptr_t), 862 (char *)&sin6d, sizeof (sin6_t), 863 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 864 } else { 865 tpi_mp = mi_tpi_conn_ind(NULL, 866 (char *)&sin6, sizeof (sin6_t), 867 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 868 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 869 } 870 } else { 871 ip6h = (ip6_t *)mp->b_rptr; 872 873 connp->conn_ipversion = IPV6_VERSION; 874 connp->conn_laddr_v6 = ip6h->ip6_dst; 875 connp->conn_faddr_v6 = ip6h->ip6_src; 876 connp->conn_saddr_v6 = connp->conn_laddr_v6; 877 878 sin6 = sin6_null; 879 sin6.sin6_addr = connp->conn_faddr_v6; 880 sin6.sin6_port = connp->conn_fport; 881 sin6.sin6_family = AF_INET6; 882 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 883 sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6, 884 IPCL_ZONEID(lconnp), tcps->tcps_netstack); 885 886 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 887 /* Pass up the scope_id of remote addr */ 888 sin6.sin6_scope_id = ifindex; 889 } else { 890 sin6.sin6_scope_id = 0; 891 } 892 if (connp->conn_recv_ancillary.crb_recvdstaddr) { 893 sin6_t sin6d; 894 895 sin6d = sin6_null; 896 sin6.sin6_addr = connp->conn_laddr_v6; 897 sin6d.sin6_port = connp->conn_lport; 898 sin6d.sin6_family = AF_INET6; 899 if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_laddr_v6)) 900 sin6d.sin6_scope_id = ifindex; 901 902 tpi_mp = mi_tpi_extconn_ind(NULL, 903 (char *)&sin6d, sizeof (sin6_t), 904 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 905 (char *)&sin6d, sizeof (sin6_t), 906 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 907 } else { 908 tpi_mp = mi_tpi_conn_ind(NULL, 909 (char *)&sin6, sizeof (sin6_t), 910 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 911 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 912 } 913 } 914 915 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 916 return (tpi_mp); 917 } 918 919 /* Handle a SYN on an AF_INET socket */ 920 static mblk_t * 921 tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, mblk_t *mp, 922 ip_recv_attr_t *ira) 923 { 924 tcp_t *ltcp = lconnp->conn_tcp; 925 tcp_t *tcp = connp->conn_tcp; 926 sin_t sin; 927 mblk_t *tpi_mp = NULL; 928 tcp_stack_t *tcps = tcp->tcp_tcps; 929 ipha_t *ipha; 930 931 ASSERT(ira->ira_flags & IRAF_IS_IPV4); 932 ipha = (ipha_t *)mp->b_rptr; 933 934 connp->conn_ipversion = IPV4_VERSION; 935 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6); 936 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6); 937 connp->conn_saddr_v6 = connp->conn_laddr_v6; 938 939 sin = sin_null; 940 sin.sin_addr.s_addr = connp->conn_faddr_v4; 941 sin.sin_port = connp->conn_fport; 942 sin.sin_family = AF_INET; 943 if (lconnp->conn_recv_ancillary.crb_recvdstaddr) { 944 sin_t sind; 945 946 sind = sin_null; 947 sind.sin_addr.s_addr = connp->conn_laddr_v4; 948 sind.sin_port = connp->conn_lport; 949 sind.sin_family = AF_INET; 950 tpi_mp = mi_tpi_extconn_ind(NULL, 951 (char *)&sind, sizeof (sin_t), (char *)&tcp, 952 (t_scalar_t)sizeof (intptr_t), (char *)&sind, 953 sizeof (sin_t), (t_scalar_t)ltcp->tcp_conn_req_seqnum); 954 } else { 955 tpi_mp = mi_tpi_conn_ind(NULL, 956 (char *)&sin, sizeof (sin_t), 957 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 958 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 959 } 960 961 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 962 return (tpi_mp); 963 } 964 965 /* 966 * Called via squeue to get on to eager's perimeter. It sends a 967 * TH_RST if eager is in the fanout table. The listener wants the 968 * eager to disappear either by means of tcp_eager_blowoff() or 969 * tcp_eager_cleanup() being called. tcp_eager_kill() can also be 970 * called (via squeue) if the eager cannot be inserted in the 971 * fanout table in tcp_input_listener(). 972 */ 973 /* ARGSUSED */ 974 void 975 tcp_eager_kill(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy) 976 { 977 conn_t *econnp = (conn_t *)arg; 978 tcp_t *eager = econnp->conn_tcp; 979 tcp_t *listener = eager->tcp_listener; 980 981 /* 982 * We could be called because listener is closing. Since 983 * the eager was using listener's queue's, we avoid 984 * using the listeners queues from now on. 985 */ 986 ASSERT(eager->tcp_detached); 987 econnp->conn_rq = NULL; 988 econnp->conn_wq = NULL; 989 990 /* 991 * An eager's conn_fanout will be NULL if it's a duplicate 992 * for an existing 4-tuples in the conn fanout table. 993 * We don't want to send an RST out in such case. 994 */ 995 if (econnp->conn_fanout != NULL && eager->tcp_state > TCPS_LISTEN) { 996 tcp_xmit_ctl("tcp_eager_kill, can't wait", 997 eager, eager->tcp_snxt, 0, TH_RST); 998 } 999 1000 /* We are here because listener wants this eager gone */ 1001 if (listener != NULL) { 1002 mutex_enter(&listener->tcp_eager_lock); 1003 tcp_eager_unlink(eager); 1004 if (eager->tcp_tconnind_started) { 1005 /* 1006 * The eager has sent a conn_ind up to the 1007 * listener but listener decides to close 1008 * instead. We need to drop the extra ref 1009 * placed on eager in tcp_input_data() before 1010 * sending the conn_ind to listener. 1011 */ 1012 CONN_DEC_REF(econnp); 1013 } 1014 mutex_exit(&listener->tcp_eager_lock); 1015 CONN_DEC_REF(listener->tcp_connp); 1016 } 1017 1018 if (eager->tcp_state != TCPS_CLOSED) 1019 tcp_close_detached(eager); 1020 } 1021 1022 /* 1023 * Reset any eager connection hanging off this listener marked 1024 * with 'seqnum' and then reclaim it's resources. 1025 */ 1026 boolean_t 1027 tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum) 1028 { 1029 tcp_t *eager; 1030 mblk_t *mp; 1031 1032 eager = listener; 1033 mutex_enter(&listener->tcp_eager_lock); 1034 do { 1035 eager = eager->tcp_eager_next_q; 1036 if (eager == NULL) { 1037 mutex_exit(&listener->tcp_eager_lock); 1038 return (B_FALSE); 1039 } 1040 } while (eager->tcp_conn_req_seqnum != seqnum); 1041 1042 if (eager->tcp_closemp_used) { 1043 mutex_exit(&listener->tcp_eager_lock); 1044 return (B_TRUE); 1045 } 1046 eager->tcp_closemp_used = B_TRUE; 1047 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1048 CONN_INC_REF(eager->tcp_connp); 1049 mutex_exit(&listener->tcp_eager_lock); 1050 mp = &eager->tcp_closemp; 1051 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, tcp_eager_kill, 1052 eager->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_EAGER_BLOWOFF); 1053 return (B_TRUE); 1054 } 1055 1056 /* 1057 * Reset any eager connection hanging off this listener 1058 * and then reclaim it's resources. 1059 */ 1060 void 1061 tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only) 1062 { 1063 tcp_t *eager; 1064 mblk_t *mp; 1065 tcp_stack_t *tcps = listener->tcp_tcps; 1066 1067 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 1068 1069 if (!q0_only) { 1070 /* First cleanup q */ 1071 TCP_STAT(tcps, tcp_eager_blowoff_q); 1072 eager = listener->tcp_eager_next_q; 1073 while (eager != NULL) { 1074 if (!eager->tcp_closemp_used) { 1075 eager->tcp_closemp_used = B_TRUE; 1076 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1077 CONN_INC_REF(eager->tcp_connp); 1078 mp = &eager->tcp_closemp; 1079 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 1080 tcp_eager_kill, eager->tcp_connp, NULL, 1081 SQ_FILL, SQTAG_TCP_EAGER_CLEANUP); 1082 } 1083 eager = eager->tcp_eager_next_q; 1084 } 1085 } 1086 /* Then cleanup q0 */ 1087 TCP_STAT(tcps, tcp_eager_blowoff_q0); 1088 eager = listener->tcp_eager_next_q0; 1089 while (eager != listener) { 1090 if (!eager->tcp_closemp_used) { 1091 eager->tcp_closemp_used = B_TRUE; 1092 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1093 CONN_INC_REF(eager->tcp_connp); 1094 mp = &eager->tcp_closemp; 1095 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 1096 tcp_eager_kill, eager->tcp_connp, NULL, SQ_FILL, 1097 SQTAG_TCP_EAGER_CLEANUP_Q0); 1098 } 1099 eager = eager->tcp_eager_next_q0; 1100 } 1101 } 1102 1103 /* 1104 * If we are an eager connection hanging off a listener that hasn't 1105 * formally accepted the connection yet, get off his list and blow off 1106 * any data that we have accumulated. 1107 */ 1108 void 1109 tcp_eager_unlink(tcp_t *tcp) 1110 { 1111 tcp_t *listener = tcp->tcp_listener; 1112 1113 ASSERT(listener != NULL); 1114 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 1115 if (tcp->tcp_eager_next_q0 != NULL) { 1116 ASSERT(tcp->tcp_eager_prev_q0 != NULL); 1117 1118 /* Remove the eager tcp from q0 */ 1119 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 1120 tcp->tcp_eager_prev_q0; 1121 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 1122 tcp->tcp_eager_next_q0; 1123 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 1124 listener->tcp_conn_req_cnt_q0--; 1125 1126 tcp->tcp_eager_next_q0 = NULL; 1127 tcp->tcp_eager_prev_q0 = NULL; 1128 1129 /* 1130 * Take the eager out, if it is in the list of droppable 1131 * eagers. 1132 */ 1133 MAKE_UNDROPPABLE(tcp); 1134 1135 if (tcp->tcp_syn_rcvd_timeout != 0) { 1136 /* we have timed out before */ 1137 ASSERT(listener->tcp_syn_rcvd_timeout > 0); 1138 listener->tcp_syn_rcvd_timeout--; 1139 } 1140 } else { 1141 tcp_t **tcpp = &listener->tcp_eager_next_q; 1142 tcp_t *prev = NULL; 1143 1144 for (; tcpp[0]; tcpp = &tcpp[0]->tcp_eager_next_q) { 1145 if (tcpp[0] == tcp) { 1146 if (listener->tcp_eager_last_q == tcp) { 1147 /* 1148 * If we are unlinking the last 1149 * element on the list, adjust 1150 * tail pointer. Set tail pointer 1151 * to nil when list is empty. 1152 */ 1153 ASSERT(tcp->tcp_eager_next_q == NULL); 1154 if (listener->tcp_eager_last_q == 1155 listener->tcp_eager_next_q) { 1156 listener->tcp_eager_last_q = 1157 NULL; 1158 } else { 1159 /* 1160 * We won't get here if there 1161 * is only one eager in the 1162 * list. 1163 */ 1164 ASSERT(prev != NULL); 1165 listener->tcp_eager_last_q = 1166 prev; 1167 } 1168 } 1169 tcpp[0] = tcp->tcp_eager_next_q; 1170 tcp->tcp_eager_next_q = NULL; 1171 tcp->tcp_eager_last_q = NULL; 1172 ASSERT(listener->tcp_conn_req_cnt_q > 0); 1173 listener->tcp_conn_req_cnt_q--; 1174 break; 1175 } 1176 prev = tcpp[0]; 1177 } 1178 } 1179 tcp->tcp_listener = NULL; 1180 } 1181 1182 /* BEGIN CSTYLED */ 1183 /* 1184 * 1185 * The sockfs ACCEPT path: 1186 * ======================= 1187 * 1188 * The eager is now established in its own perimeter as soon as SYN is 1189 * received in tcp_input_listener(). When sockfs receives conn_ind, it 1190 * completes the accept processing on the acceptor STREAM. The sending 1191 * of conn_ind part is common for both sockfs listener and a TLI/XTI 1192 * listener but a TLI/XTI listener completes the accept processing 1193 * on the listener perimeter. 1194 * 1195 * Common control flow for 3 way handshake: 1196 * ---------------------------------------- 1197 * 1198 * incoming SYN (listener perimeter) -> tcp_input_listener() 1199 * 1200 * incoming SYN-ACK-ACK (eager perim) -> tcp_input_data() 1201 * send T_CONN_IND (listener perim) -> tcp_send_conn_ind() 1202 * 1203 * Sockfs ACCEPT Path: 1204 * ------------------- 1205 * 1206 * open acceptor stream (tcp_open allocates tcp_tli_accept() 1207 * as STREAM entry point) 1208 * 1209 * soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_tli_accept() 1210 * 1211 * tcp_tli_accept() extracts the eager and makes the q->q_ptr <-> eager 1212 * association (we are not behind eager's squeue but sockfs is protecting us 1213 * and no one knows about this stream yet. The STREAMS entry point q->q_info 1214 * is changed to point at tcp_wput(). 1215 * 1216 * tcp_accept_common() sends any deferred eagers via tcp_send_pending() to 1217 * listener (done on listener's perimeter). 1218 * 1219 * tcp_tli_accept() calls tcp_accept_finish() on eagers perimeter to finish 1220 * accept. 1221 * 1222 * TLI/XTI client ACCEPT path: 1223 * --------------------------- 1224 * 1225 * soaccept() sends T_CONN_RES on the listener STREAM. 1226 * 1227 * tcp_tli_accept() -> tcp_accept_swap() complete the processing and send 1228 * a M_SETOPS mblk to eager perimeter to finish accept (tcp_accept_finish()). 1229 * 1230 * Locks: 1231 * ====== 1232 * 1233 * listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and 1234 * and listeners->tcp_eager_next_q. 1235 * 1236 * Referencing: 1237 * ============ 1238 * 1239 * 1) We start out in tcp_input_listener by eager placing a ref on 1240 * listener and listener adding eager to listeners->tcp_eager_next_q0. 1241 * 1242 * 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before 1243 * doing so we place a ref on the eager. This ref is finally dropped at the 1244 * end of tcp_accept_finish() while unwinding from the squeue, i.e. the 1245 * reference is dropped by the squeue framework. 1246 * 1247 * 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish 1248 * 1249 * The reference must be released by the same entity that added the reference 1250 * In the above scheme, the eager is the entity that adds and releases the 1251 * references. Note that tcp_accept_finish executes in the squeue of the eager 1252 * (albeit after it is attached to the acceptor stream). Though 1. executes 1253 * in the listener's squeue, the eager is nascent at this point and the 1254 * reference can be considered to have been added on behalf of the eager. 1255 * 1256 * Eager getting a Reset or listener closing: 1257 * ========================================== 1258 * 1259 * Once the listener and eager are linked, the listener never does the unlink. 1260 * If the listener needs to close, tcp_eager_cleanup() is called which queues 1261 * a message on all eager perimeter. The eager then does the unlink, clears 1262 * any pointers to the listener's queue and drops the reference to the 1263 * listener. The listener waits in tcp_close outside the squeue until its 1264 * refcount has dropped to 1. This ensures that the listener has waited for 1265 * all eagers to clear their association with the listener. 1266 * 1267 * Similarly, if eager decides to go away, it can unlink itself and close. 1268 * When the T_CONN_RES comes down, we check if eager has closed. Note that 1269 * the reference to eager is still valid because of the extra ref we put 1270 * in tcp_send_conn_ind. 1271 * 1272 * Listener can always locate the eager under the protection 1273 * of the listener->tcp_eager_lock, and then do a refhold 1274 * on the eager during the accept processing. 1275 * 1276 * The acceptor stream accesses the eager in the accept processing 1277 * based on the ref placed on eager before sending T_conn_ind. 1278 * The only entity that can negate this refhold is a listener close 1279 * which is mutually exclusive with an active acceptor stream. 1280 * 1281 * Eager's reference on the listener 1282 * =================================== 1283 * 1284 * If the accept happens (even on a closed eager) the eager drops its 1285 * reference on the listener at the start of tcp_accept_finish. If the 1286 * eager is killed due to an incoming RST before the T_conn_ind is sent up, 1287 * the reference is dropped in tcp_closei_local. If the listener closes, 1288 * the reference is dropped in tcp_eager_kill. In all cases the reference 1289 * is dropped while executing in the eager's context (squeue). 1290 */ 1291 /* END CSTYLED */ 1292 1293 /* Process the SYN packet, mp, directed at the listener 'tcp' */ 1294 1295 /* 1296 * THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN. 1297 * tcp_input_data will not see any packets for listeners since the listener 1298 * has conn_recv set to tcp_input_listener. 1299 */ 1300 /* ARGSUSED */ 1301 static void 1302 tcp_input_listener(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 1303 { 1304 tcpha_t *tcpha; 1305 uint32_t seg_seq; 1306 tcp_t *eager; 1307 int err; 1308 conn_t *econnp = NULL; 1309 squeue_t *new_sqp; 1310 mblk_t *mp1; 1311 uint_t ip_hdr_len; 1312 conn_t *lconnp = (conn_t *)arg; 1313 tcp_t *listener = lconnp->conn_tcp; 1314 tcp_stack_t *tcps = listener->tcp_tcps; 1315 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 1316 uint_t flags; 1317 mblk_t *tpi_mp; 1318 uint_t ifindex = ira->ira_ruifindex; 1319 boolean_t tlc_set = B_FALSE; 1320 1321 ip_hdr_len = ira->ira_ip_hdr_length; 1322 tcpha = (tcpha_t *)&mp->b_rptr[ip_hdr_len]; 1323 flags = (unsigned int)tcpha->tha_flags & 0xFF; 1324 1325 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, lconnp->conn_ixa, 1326 __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, listener, 1327 __dtrace_tcp_tcph_t *, tcpha); 1328 1329 if (!(flags & TH_SYN)) { 1330 if ((flags & TH_RST) || (flags & TH_URG)) { 1331 freemsg(mp); 1332 return; 1333 } 1334 if (flags & TH_ACK) { 1335 /* Note this executes in listener's squeue */ 1336 tcp_xmit_listeners_reset(mp, ira, ipst, lconnp); 1337 return; 1338 } 1339 1340 freemsg(mp); 1341 return; 1342 } 1343 1344 if (listener->tcp_state != TCPS_LISTEN) 1345 goto error2; 1346 1347 ASSERT(IPCL_IS_BOUND(lconnp)); 1348 1349 mutex_enter(&listener->tcp_eager_lock); 1350 1351 /* 1352 * The system is under memory pressure, so we need to do our part 1353 * to relieve the pressure. So we only accept new request if there 1354 * is nothing waiting to be accepted or waiting to complete the 3-way 1355 * handshake. This means that busy listener will not get too many 1356 * new requests which they cannot handle in time while non-busy 1357 * listener is still functioning properly. 1358 */ 1359 if (tcps->tcps_reclaim && (listener->tcp_conn_req_cnt_q > 0 || 1360 listener->tcp_conn_req_cnt_q0 > 0)) { 1361 mutex_exit(&listener->tcp_eager_lock); 1362 TCP_STAT(tcps, tcp_listen_mem_drop); 1363 goto error2; 1364 } 1365 1366 if (listener->tcp_conn_req_cnt_q >= listener->tcp_conn_req_max) { 1367 mutex_exit(&listener->tcp_eager_lock); 1368 TCP_STAT(tcps, tcp_listendrop); 1369 TCPS_BUMP_MIB(tcps, tcpListenDrop); 1370 if (lconnp->conn_debug) { 1371 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 1372 "tcp_input_listener: listen backlog (max=%d) " 1373 "overflow (%d pending) on %s", 1374 listener->tcp_conn_req_max, 1375 listener->tcp_conn_req_cnt_q, 1376 tcp_display(listener, NULL, DISP_PORT_ONLY)); 1377 } 1378 goto error2; 1379 } 1380 1381 if (listener->tcp_conn_req_cnt_q0 >= 1382 listener->tcp_conn_req_max + tcps->tcps_conn_req_max_q0) { 1383 /* 1384 * Q0 is full. Drop a pending half-open req from the queue 1385 * to make room for the new SYN req. Also mark the time we 1386 * drop a SYN. 1387 * 1388 * A more aggressive defense against SYN attack will 1389 * be to set the "tcp_syn_defense" flag now. 1390 */ 1391 TCP_STAT(tcps, tcp_listendropq0); 1392 listener->tcp_last_rcv_lbolt = ddi_get_lbolt64(); 1393 if (!tcp_drop_q0(listener)) { 1394 mutex_exit(&listener->tcp_eager_lock); 1395 TCPS_BUMP_MIB(tcps, tcpListenDropQ0); 1396 if (lconnp->conn_debug) { 1397 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 1398 "tcp_input_listener: listen half-open " 1399 "queue (max=%d) full (%d pending) on %s", 1400 tcps->tcps_conn_req_max_q0, 1401 listener->tcp_conn_req_cnt_q0, 1402 tcp_display(listener, NULL, 1403 DISP_PORT_ONLY)); 1404 } 1405 goto error2; 1406 } 1407 } 1408 1409 /* 1410 * Enforce the limit set on the number of connections per listener. 1411 * Note that tlc_cnt starts with 1. So need to add 1 to tlc_max 1412 * for comparison. 1413 */ 1414 if (listener->tcp_listen_cnt != NULL) { 1415 tcp_listen_cnt_t *tlc = listener->tcp_listen_cnt; 1416 int64_t now; 1417 1418 if (atomic_add_32_nv(&tlc->tlc_cnt, 1) > tlc->tlc_max + 1) { 1419 mutex_exit(&listener->tcp_eager_lock); 1420 now = ddi_get_lbolt64(); 1421 atomic_add_32(&tlc->tlc_cnt, -1); 1422 TCP_STAT(tcps, tcp_listen_cnt_drop); 1423 tlc->tlc_drop++; 1424 if (now - tlc->tlc_report_time > 1425 MSEC_TO_TICK(TCP_TLC_REPORT_INTERVAL)) { 1426 zcmn_err(lconnp->conn_zoneid, CE_WARN, 1427 "Listener (port %d) connection max (%u) " 1428 "reached: %u attempts dropped total\n", 1429 ntohs(listener->tcp_connp->conn_lport), 1430 tlc->tlc_max, tlc->tlc_drop); 1431 tlc->tlc_report_time = now; 1432 } 1433 goto error2; 1434 } 1435 tlc_set = B_TRUE; 1436 } 1437 1438 mutex_exit(&listener->tcp_eager_lock); 1439 1440 /* 1441 * IP sets ira_sqp to either the senders conn_sqp (for loopback) 1442 * or based on the ring (for packets from GLD). Otherwise it is 1443 * set based on lbolt i.e., a somewhat random number. 1444 */ 1445 ASSERT(ira->ira_sqp != NULL); 1446 new_sqp = ira->ira_sqp; 1447 1448 econnp = (conn_t *)tcp_get_conn(arg2, tcps); 1449 if (econnp == NULL) 1450 goto error2; 1451 1452 ASSERT(econnp->conn_netstack == lconnp->conn_netstack); 1453 econnp->conn_sqp = new_sqp; 1454 econnp->conn_initial_sqp = new_sqp; 1455 econnp->conn_ixa->ixa_sqp = new_sqp; 1456 1457 econnp->conn_fport = tcpha->tha_lport; 1458 econnp->conn_lport = tcpha->tha_fport; 1459 1460 err = conn_inherit_parent(lconnp, econnp); 1461 if (err != 0) 1462 goto error3; 1463 1464 /* We already know the laddr of the new connection is ours */ 1465 econnp->conn_ixa->ixa_src_generation = ipst->ips_src_generation; 1466 1467 ASSERT(OK_32PTR(mp->b_rptr)); 1468 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION || 1469 IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION); 1470 1471 if (lconnp->conn_family == AF_INET) { 1472 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION); 1473 tpi_mp = tcp_conn_create_v4(lconnp, econnp, mp, ira); 1474 } else { 1475 tpi_mp = tcp_conn_create_v6(lconnp, econnp, mp, ira); 1476 } 1477 1478 if (tpi_mp == NULL) 1479 goto error3; 1480 1481 eager = econnp->conn_tcp; 1482 eager->tcp_detached = B_TRUE; 1483 SOCK_CONNID_INIT(eager->tcp_connid); 1484 1485 /* 1486 * Initialize the eager's tcp_t and inherit some parameters from 1487 * the listener. 1488 */ 1489 tcp_init_values(eager, listener); 1490 1491 ASSERT((econnp->conn_ixa->ixa_flags & 1492 (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE | 1493 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO)) == 1494 (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE | 1495 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO)); 1496 1497 if (!tcps->tcps_dev_flow_ctl) 1498 econnp->conn_ixa->ixa_flags |= IXAF_NO_DEV_FLOW_CTL; 1499 1500 /* Prepare for diffing against previous packets */ 1501 eager->tcp_recvifindex = 0; 1502 eager->tcp_recvhops = 0xffffffffU; 1503 1504 if (!(ira->ira_flags & IRAF_IS_IPV4) && econnp->conn_bound_if == 0) { 1505 if (IN6_IS_ADDR_LINKSCOPE(&econnp->conn_faddr_v6) || 1506 IN6_IS_ADDR_LINKSCOPE(&econnp->conn_laddr_v6)) { 1507 econnp->conn_incoming_ifindex = ifindex; 1508 econnp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET; 1509 econnp->conn_ixa->ixa_scopeid = ifindex; 1510 } 1511 } 1512 1513 if ((ira->ira_flags & (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS)) == 1514 (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS) && 1515 tcps->tcps_rev_src_routes) { 1516 ipha_t *ipha = (ipha_t *)mp->b_rptr; 1517 ip_pkt_t *ipp = &econnp->conn_xmit_ipp; 1518 1519 /* Source routing option copyover (reverse it) */ 1520 err = ip_find_hdr_v4(ipha, ipp, B_TRUE); 1521 if (err != 0) { 1522 freemsg(tpi_mp); 1523 goto error3; 1524 } 1525 ip_pkt_source_route_reverse_v4(ipp); 1526 } 1527 1528 ASSERT(eager->tcp_conn.tcp_eager_conn_ind == NULL); 1529 ASSERT(!eager->tcp_tconnind_started); 1530 /* 1531 * If the SYN came with a credential, it's a loopback packet or a 1532 * labeled packet; attach the credential to the TPI message. 1533 */ 1534 if (ira->ira_cred != NULL) 1535 mblk_setcred(tpi_mp, ira->ira_cred, ira->ira_cpid); 1536 1537 eager->tcp_conn.tcp_eager_conn_ind = tpi_mp; 1538 1539 /* Inherit the listener's SSL protection state */ 1540 if ((eager->tcp_kssl_ent = listener->tcp_kssl_ent) != NULL) { 1541 kssl_hold_ent(eager->tcp_kssl_ent); 1542 eager->tcp_kssl_pending = B_TRUE; 1543 } 1544 1545 ASSERT(eager->tcp_ordrel_mp == NULL); 1546 1547 /* Inherit the listener's non-STREAMS flag */ 1548 if (IPCL_IS_NONSTR(lconnp)) { 1549 econnp->conn_flags |= IPCL_NONSTR; 1550 /* All non-STREAMS tcp_ts are sockets */ 1551 eager->tcp_issocket = B_TRUE; 1552 } else { 1553 /* 1554 * Pre-allocate the T_ordrel_ind mblk for TPI socket so that 1555 * at close time, we will always have that to send up. 1556 * Otherwise, we need to do special handling in case the 1557 * allocation fails at that time. 1558 */ 1559 if ((eager->tcp_ordrel_mp = mi_tpi_ordrel_ind()) == NULL) 1560 goto error3; 1561 } 1562 /* 1563 * Now that the IP addresses and ports are setup in econnp we 1564 * can do the IPsec policy work. 1565 */ 1566 if (ira->ira_flags & IRAF_IPSEC_SECURE) { 1567 if (lconnp->conn_policy != NULL) { 1568 /* 1569 * Inherit the policy from the listener; use 1570 * actions from ira 1571 */ 1572 if (!ip_ipsec_policy_inherit(econnp, lconnp, ira)) { 1573 CONN_DEC_REF(econnp); 1574 freemsg(mp); 1575 goto error3; 1576 } 1577 } 1578 } 1579 1580 /* 1581 * tcp_set_destination() may set tcp_rwnd according to the route 1582 * metrics. If it does not, the eager's receive window will be set 1583 * to the listener's receive window later in this function. 1584 */ 1585 eager->tcp_rwnd = 0; 1586 1587 if (is_system_labeled()) { 1588 ip_xmit_attr_t *ixa = econnp->conn_ixa; 1589 1590 ASSERT(ira->ira_tsl != NULL); 1591 /* Discard any old label */ 1592 if (ixa->ixa_free_flags & IXA_FREE_TSL) { 1593 ASSERT(ixa->ixa_tsl != NULL); 1594 label_rele(ixa->ixa_tsl); 1595 ixa->ixa_free_flags &= ~IXA_FREE_TSL; 1596 ixa->ixa_tsl = NULL; 1597 } 1598 if ((lconnp->conn_mlp_type != mlptSingle || 1599 lconnp->conn_mac_mode != CONN_MAC_DEFAULT) && 1600 ira->ira_tsl != NULL) { 1601 /* 1602 * If this is an MLP connection or a MAC-Exempt 1603 * connection with an unlabeled node, packets are to be 1604 * exchanged using the security label of the received 1605 * SYN packet instead of the server application's label. 1606 * tsol_check_dest called from ip_set_destination 1607 * might later update TSF_UNLABELED by replacing 1608 * ixa_tsl with a new label. 1609 */ 1610 label_hold(ira->ira_tsl); 1611 ip_xmit_attr_replace_tsl(ixa, ira->ira_tsl); 1612 DTRACE_PROBE2(mlp_syn_accept, conn_t *, 1613 econnp, ts_label_t *, ixa->ixa_tsl) 1614 } else { 1615 ixa->ixa_tsl = crgetlabel(econnp->conn_cred); 1616 DTRACE_PROBE2(syn_accept, conn_t *, 1617 econnp, ts_label_t *, ixa->ixa_tsl) 1618 } 1619 /* 1620 * conn_connect() called from tcp_set_destination will verify 1621 * the destination is allowed to receive packets at the 1622 * security label of the SYN-ACK we are generating. As part of 1623 * that, tsol_check_dest() may create a new effective label for 1624 * this connection. 1625 * Finally conn_connect() will call conn_update_label. 1626 * All that remains for TCP to do is to call 1627 * conn_build_hdr_template which is done as part of 1628 * tcp_set_destination. 1629 */ 1630 } 1631 1632 /* 1633 * Since we will clear tcp_listener before we clear tcp_detached 1634 * in the accept code we need tcp_hard_binding aka tcp_accept_inprogress 1635 * so we can tell a TCP_IS_DETACHED_NONEAGER apart. 1636 */ 1637 eager->tcp_hard_binding = B_TRUE; 1638 1639 tcp_bind_hash_insert(&tcps->tcps_bind_fanout[ 1640 TCP_BIND_HASH(econnp->conn_lport)], eager, 0); 1641 1642 CL_INET_CONNECT(econnp, B_FALSE, err); 1643 if (err != 0) { 1644 tcp_bind_hash_remove(eager); 1645 goto error3; 1646 } 1647 1648 SOCK_CONNID_BUMP(eager->tcp_connid); 1649 1650 /* 1651 * Adapt our mss, ttl, ... based on the remote address. 1652 */ 1653 1654 if (tcp_set_destination(eager) != 0) { 1655 TCPS_BUMP_MIB(tcps, tcpAttemptFails); 1656 /* Undo the bind_hash_insert */ 1657 tcp_bind_hash_remove(eager); 1658 goto error3; 1659 } 1660 1661 /* Process all TCP options. */ 1662 tcp_process_options(eager, tcpha); 1663 1664 /* Is the other end ECN capable? */ 1665 if (tcps->tcps_ecn_permitted >= 1 && 1666 (tcpha->tha_flags & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) { 1667 eager->tcp_ecn_ok = B_TRUE; 1668 } 1669 1670 /* 1671 * The listener's conn_rcvbuf should be the default window size or a 1672 * window size changed via SO_RCVBUF option. First round up the 1673 * eager's tcp_rwnd to the nearest MSS. Then find out the window 1674 * scale option value if needed. Call tcp_rwnd_set() to finish the 1675 * setting. 1676 * 1677 * Note if there is a rpipe metric associated with the remote host, 1678 * we should not inherit receive window size from listener. 1679 */ 1680 eager->tcp_rwnd = MSS_ROUNDUP( 1681 (eager->tcp_rwnd == 0 ? econnp->conn_rcvbuf : 1682 eager->tcp_rwnd), eager->tcp_mss); 1683 if (eager->tcp_snd_ws_ok) 1684 tcp_set_ws_value(eager); 1685 /* 1686 * Note that this is the only place tcp_rwnd_set() is called for 1687 * accepting a connection. We need to call it here instead of 1688 * after the 3-way handshake because we need to tell the other 1689 * side our rwnd in the SYN-ACK segment. 1690 */ 1691 (void) tcp_rwnd_set(eager, eager->tcp_rwnd); 1692 1693 ASSERT(eager->tcp_connp->conn_rcvbuf != 0 && 1694 eager->tcp_connp->conn_rcvbuf == eager->tcp_rwnd); 1695 1696 ASSERT(econnp->conn_rcvbuf != 0 && 1697 econnp->conn_rcvbuf == eager->tcp_rwnd); 1698 1699 /* Put a ref on the listener for the eager. */ 1700 CONN_INC_REF(lconnp); 1701 mutex_enter(&listener->tcp_eager_lock); 1702 listener->tcp_eager_next_q0->tcp_eager_prev_q0 = eager; 1703 eager->tcp_eager_next_q0 = listener->tcp_eager_next_q0; 1704 listener->tcp_eager_next_q0 = eager; 1705 eager->tcp_eager_prev_q0 = listener; 1706 1707 /* Set tcp_listener before adding it to tcp_conn_fanout */ 1708 eager->tcp_listener = listener; 1709 eager->tcp_saved_listener = listener; 1710 1711 /* 1712 * Set tcp_listen_cnt so that when the connection is done, the counter 1713 * is decremented. 1714 */ 1715 eager->tcp_listen_cnt = listener->tcp_listen_cnt; 1716 1717 /* 1718 * Tag this detached tcp vector for later retrieval 1719 * by our listener client in tcp_accept(). 1720 */ 1721 eager->tcp_conn_req_seqnum = listener->tcp_conn_req_seqnum; 1722 listener->tcp_conn_req_cnt_q0++; 1723 if (++listener->tcp_conn_req_seqnum == -1) { 1724 /* 1725 * -1 is "special" and defined in TPI as something 1726 * that should never be used in T_CONN_IND 1727 */ 1728 ++listener->tcp_conn_req_seqnum; 1729 } 1730 mutex_exit(&listener->tcp_eager_lock); 1731 1732 if (listener->tcp_syn_defense) { 1733 /* Don't drop the SYN that comes from a good IP source */ 1734 ipaddr_t *addr_cache; 1735 1736 addr_cache = (ipaddr_t *)(listener->tcp_ip_addr_cache); 1737 if (addr_cache != NULL && econnp->conn_faddr_v4 == 1738 addr_cache[IP_ADDR_CACHE_HASH(econnp->conn_faddr_v4)]) { 1739 eager->tcp_dontdrop = B_TRUE; 1740 } 1741 } 1742 1743 /* 1744 * We need to insert the eager in its own perimeter but as soon 1745 * as we do that, we expose the eager to the classifier and 1746 * should not touch any field outside the eager's perimeter. 1747 * So do all the work necessary before inserting the eager 1748 * in its own perimeter. Be optimistic that conn_connect() 1749 * will succeed but undo everything if it fails. 1750 */ 1751 seg_seq = ntohl(tcpha->tha_seq); 1752 eager->tcp_irs = seg_seq; 1753 eager->tcp_rack = seg_seq; 1754 eager->tcp_rnxt = seg_seq + 1; 1755 eager->tcp_tcpha->tha_ack = htonl(eager->tcp_rnxt); 1756 TCPS_BUMP_MIB(tcps, tcpPassiveOpens); 1757 eager->tcp_state = TCPS_SYN_RCVD; 1758 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 1759 econnp->conn_ixa, void, NULL, tcp_t *, eager, void, NULL, 1760 int32_t, TCPS_LISTEN); 1761 1762 mp1 = tcp_xmit_mp(eager, eager->tcp_xmit_head, eager->tcp_mss, 1763 NULL, NULL, eager->tcp_iss, B_FALSE, NULL, B_FALSE); 1764 if (mp1 == NULL) { 1765 /* 1766 * Increment the ref count as we are going to 1767 * enqueueing an mp in squeue 1768 */ 1769 CONN_INC_REF(econnp); 1770 goto error; 1771 } 1772 1773 /* 1774 * We need to start the rto timer. In normal case, we start 1775 * the timer after sending the packet on the wire (or at 1776 * least believing that packet was sent by waiting for 1777 * conn_ip_output() to return). Since this is the first packet 1778 * being sent on the wire for the eager, our initial tcp_rto 1779 * is at least tcp_rexmit_interval_min which is a fairly 1780 * large value to allow the algorithm to adjust slowly to large 1781 * fluctuations of RTT during first few transmissions. 1782 * 1783 * Starting the timer first and then sending the packet in this 1784 * case shouldn't make much difference since tcp_rexmit_interval_min 1785 * is of the order of several 100ms and starting the timer 1786 * first and then sending the packet will result in difference 1787 * of few micro seconds. 1788 * 1789 * Without this optimization, we are forced to hold the fanout 1790 * lock across the ipcl_bind_insert() and sending the packet 1791 * so that we don't race against an incoming packet (maybe RST) 1792 * for this eager. 1793 * 1794 * It is necessary to acquire an extra reference on the eager 1795 * at this point and hold it until after tcp_send_data() to 1796 * ensure against an eager close race. 1797 */ 1798 1799 CONN_INC_REF(econnp); 1800 1801 TCP_TIMER_RESTART(eager, eager->tcp_rto); 1802 1803 /* 1804 * Insert the eager in its own perimeter now. We are ready to deal 1805 * with any packets on eager. 1806 */ 1807 if (ipcl_conn_insert(econnp) != 0) 1808 goto error; 1809 1810 ASSERT(econnp->conn_ixa->ixa_notify_cookie == econnp->conn_tcp); 1811 freemsg(mp); 1812 /* 1813 * Send the SYN-ACK. Use the right squeue so that conn_ixa is 1814 * only used by one thread at a time. 1815 */ 1816 if (econnp->conn_sqp == lconnp->conn_sqp) { 1817 DTRACE_TCP5(send, mblk_t *, NULL, ip_xmit_attr_t *, 1818 econnp->conn_ixa, __dtrace_tcp_void_ip_t *, mp1->b_rptr, 1819 tcp_t *, eager, __dtrace_tcp_tcph_t *, 1820 &mp1->b_rptr[econnp->conn_ixa->ixa_ip_hdr_length]); 1821 (void) conn_ip_output(mp1, econnp->conn_ixa); 1822 CONN_DEC_REF(econnp); 1823 } else { 1824 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_send_synack, 1825 econnp, NULL, SQ_PROCESS, SQTAG_TCP_SEND_SYNACK); 1826 } 1827 return; 1828 error: 1829 freemsg(mp1); 1830 eager->tcp_closemp_used = B_TRUE; 1831 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1832 mp1 = &eager->tcp_closemp; 1833 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_eager_kill, 1834 econnp, NULL, SQ_FILL, SQTAG_TCP_CONN_REQ_2); 1835 1836 /* 1837 * If a connection already exists, send the mp to that connections so 1838 * that it can be appropriately dealt with. 1839 */ 1840 ipst = tcps->tcps_netstack->netstack_ip; 1841 1842 if ((econnp = ipcl_classify(mp, ira, ipst)) != NULL) { 1843 if (!IPCL_IS_CONNECTED(econnp)) { 1844 /* 1845 * Something bad happened. ipcl_conn_insert() 1846 * failed because a connection already existed 1847 * in connected hash but we can't find it 1848 * anymore (someone blew it away). Just 1849 * free this message and hopefully remote 1850 * will retransmit at which time the SYN can be 1851 * treated as a new connection or dealth with 1852 * a TH_RST if a connection already exists. 1853 */ 1854 CONN_DEC_REF(econnp); 1855 freemsg(mp); 1856 } else { 1857 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp, tcp_input_data, 1858 econnp, ira, SQ_FILL, SQTAG_TCP_CONN_REQ_1); 1859 } 1860 } else { 1861 /* Nobody wants this packet */ 1862 freemsg(mp); 1863 } 1864 return; 1865 error3: 1866 CONN_DEC_REF(econnp); 1867 error2: 1868 freemsg(mp); 1869 if (tlc_set) 1870 atomic_add_32(&listener->tcp_listen_cnt->tlc_cnt, -1); 1871 } 1872 1873 /* 1874 * In an ideal case of vertical partition in NUMA architecture, its 1875 * beneficial to have the listener and all the incoming connections 1876 * tied to the same squeue. The other constraint is that incoming 1877 * connections should be tied to the squeue attached to interrupted 1878 * CPU for obvious locality reason so this leaves the listener to 1879 * be tied to the same squeue. Our only problem is that when listener 1880 * is binding, the CPU that will get interrupted by the NIC whose 1881 * IP address the listener is binding to is not even known. So 1882 * the code below allows us to change that binding at the time the 1883 * CPU is interrupted by virtue of incoming connection's squeue. 1884 * 1885 * This is usefull only in case of a listener bound to a specific IP 1886 * address. For other kind of listeners, they get bound the 1887 * very first time and there is no attempt to rebind them. 1888 */ 1889 void 1890 tcp_input_listener_unbound(void *arg, mblk_t *mp, void *arg2, 1891 ip_recv_attr_t *ira) 1892 { 1893 conn_t *connp = (conn_t *)arg; 1894 squeue_t *sqp = (squeue_t *)arg2; 1895 squeue_t *new_sqp; 1896 uint32_t conn_flags; 1897 1898 /* 1899 * IP sets ira_sqp to either the senders conn_sqp (for loopback) 1900 * or based on the ring (for packets from GLD). Otherwise it is 1901 * set based on lbolt i.e., a somewhat random number. 1902 */ 1903 ASSERT(ira->ira_sqp != NULL); 1904 new_sqp = ira->ira_sqp; 1905 1906 if (connp->conn_fanout == NULL) 1907 goto done; 1908 1909 if (!(connp->conn_flags & IPCL_FULLY_BOUND)) { 1910 mutex_enter(&connp->conn_fanout->connf_lock); 1911 mutex_enter(&connp->conn_lock); 1912 /* 1913 * No one from read or write side can access us now 1914 * except for already queued packets on this squeue. 1915 * But since we haven't changed the squeue yet, they 1916 * can't execute. If they are processed after we have 1917 * changed the squeue, they are sent back to the 1918 * correct squeue down below. 1919 * But a listner close can race with processing of 1920 * incoming SYN. If incoming SYN processing changes 1921 * the squeue then the listener close which is waiting 1922 * to enter the squeue would operate on the wrong 1923 * squeue. Hence we don't change the squeue here unless 1924 * the refcount is exactly the minimum refcount. The 1925 * minimum refcount of 4 is counted as - 1 each for 1926 * TCP and IP, 1 for being in the classifier hash, and 1927 * 1 for the mblk being processed. 1928 */ 1929 1930 if (connp->conn_ref != 4 || 1931 connp->conn_tcp->tcp_state != TCPS_LISTEN) { 1932 mutex_exit(&connp->conn_lock); 1933 mutex_exit(&connp->conn_fanout->connf_lock); 1934 goto done; 1935 } 1936 if (connp->conn_sqp != new_sqp) { 1937 while (connp->conn_sqp != new_sqp) 1938 (void) casptr(&connp->conn_sqp, sqp, new_sqp); 1939 /* No special MT issues for outbound ixa_sqp hint */ 1940 connp->conn_ixa->ixa_sqp = new_sqp; 1941 } 1942 1943 do { 1944 conn_flags = connp->conn_flags; 1945 conn_flags |= IPCL_FULLY_BOUND; 1946 (void) cas32(&connp->conn_flags, connp->conn_flags, 1947 conn_flags); 1948 } while (!(connp->conn_flags & IPCL_FULLY_BOUND)); 1949 1950 mutex_exit(&connp->conn_fanout->connf_lock); 1951 mutex_exit(&connp->conn_lock); 1952 1953 /* 1954 * Assume we have picked a good squeue for the listener. Make 1955 * subsequent SYNs not try to change the squeue. 1956 */ 1957 connp->conn_recv = tcp_input_listener; 1958 } 1959 1960 done: 1961 if (connp->conn_sqp != sqp) { 1962 CONN_INC_REF(connp); 1963 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, connp->conn_recv, connp, 1964 ira, SQ_FILL, SQTAG_TCP_CONN_REQ_UNBOUND); 1965 } else { 1966 tcp_input_listener(connp, mp, sqp, ira); 1967 } 1968 } 1969 1970 /* 1971 * Send up all messages queued on tcp_rcv_list. 1972 */ 1973 uint_t 1974 tcp_rcv_drain(tcp_t *tcp) 1975 { 1976 mblk_t *mp; 1977 uint_t ret = 0; 1978 #ifdef DEBUG 1979 uint_t cnt = 0; 1980 #endif 1981 queue_t *q = tcp->tcp_connp->conn_rq; 1982 1983 /* Can't drain on an eager connection */ 1984 if (tcp->tcp_listener != NULL) 1985 return (ret); 1986 1987 /* Can't be a non-STREAMS connection */ 1988 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp)); 1989 1990 /* No need for the push timer now. */ 1991 if (tcp->tcp_push_tid != 0) { 1992 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 1993 tcp->tcp_push_tid = 0; 1994 } 1995 1996 /* 1997 * Handle two cases here: we are currently fused or we were 1998 * previously fused and have some urgent data to be delivered 1999 * upstream. The latter happens because we either ran out of 2000 * memory or were detached and therefore sending the SIGURG was 2001 * deferred until this point. In either case we pass control 2002 * over to tcp_fuse_rcv_drain() since it may need to complete 2003 * some work. 2004 */ 2005 if ((tcp->tcp_fused || tcp->tcp_fused_sigurg)) { 2006 if (tcp_fuse_rcv_drain(q, tcp, tcp->tcp_fused ? NULL : 2007 &tcp->tcp_fused_sigurg_mp)) 2008 return (ret); 2009 } 2010 2011 while ((mp = tcp->tcp_rcv_list) != NULL) { 2012 tcp->tcp_rcv_list = mp->b_next; 2013 mp->b_next = NULL; 2014 #ifdef DEBUG 2015 cnt += msgdsize(mp); 2016 #endif 2017 /* Does this need SSL processing first? */ 2018 if ((tcp->tcp_kssl_ctx != NULL) && (DB_TYPE(mp) == M_DATA)) { 2019 DTRACE_PROBE1(kssl_mblk__ksslinput_rcvdrain, 2020 mblk_t *, mp); 2021 tcp_kssl_input(tcp, mp, NULL); 2022 continue; 2023 } 2024 putnext(q, mp); 2025 } 2026 #ifdef DEBUG 2027 ASSERT(cnt == tcp->tcp_rcv_cnt); 2028 #endif 2029 tcp->tcp_rcv_last_head = NULL; 2030 tcp->tcp_rcv_last_tail = NULL; 2031 tcp->tcp_rcv_cnt = 0; 2032 2033 if (canputnext(q)) 2034 return (tcp_rwnd_reopen(tcp)); 2035 2036 return (ret); 2037 } 2038 2039 /* 2040 * Queue data on tcp_rcv_list which is a b_next chain. 2041 * tcp_rcv_last_head/tail is the last element of this chain. 2042 * Each element of the chain is a b_cont chain. 2043 * 2044 * M_DATA messages are added to the current element. 2045 * Other messages are added as new (b_next) elements. 2046 */ 2047 void 2048 tcp_rcv_enqueue(tcp_t *tcp, mblk_t *mp, uint_t seg_len, cred_t *cr) 2049 { 2050 ASSERT(seg_len == msgdsize(mp)); 2051 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_rcv_last_head != NULL); 2052 2053 if (is_system_labeled()) { 2054 ASSERT(cr != NULL || msg_getcred(mp, NULL) != NULL); 2055 /* 2056 * Provide for protocols above TCP such as RPC. NOPID leaves 2057 * db_cpid unchanged. 2058 * The cred could have already been set. 2059 */ 2060 if (cr != NULL) 2061 mblk_setcred(mp, cr, NOPID); 2062 } 2063 2064 if (tcp->tcp_rcv_list == NULL) { 2065 ASSERT(tcp->tcp_rcv_last_head == NULL); 2066 tcp->tcp_rcv_list = mp; 2067 tcp->tcp_rcv_last_head = mp; 2068 } else if (DB_TYPE(mp) == DB_TYPE(tcp->tcp_rcv_last_head)) { 2069 tcp->tcp_rcv_last_tail->b_cont = mp; 2070 } else { 2071 tcp->tcp_rcv_last_head->b_next = mp; 2072 tcp->tcp_rcv_last_head = mp; 2073 } 2074 2075 while (mp->b_cont) 2076 mp = mp->b_cont; 2077 2078 tcp->tcp_rcv_last_tail = mp; 2079 tcp->tcp_rcv_cnt += seg_len; 2080 tcp->tcp_rwnd -= seg_len; 2081 } 2082 2083 /* Generate an ACK-only (no data) segment for a TCP endpoint */ 2084 mblk_t * 2085 tcp_ack_mp(tcp_t *tcp) 2086 { 2087 uint32_t seq_no; 2088 tcp_stack_t *tcps = tcp->tcp_tcps; 2089 conn_t *connp = tcp->tcp_connp; 2090 2091 /* 2092 * There are a few cases to be considered while setting the sequence no. 2093 * Essentially, we can come here while processing an unacceptable pkt 2094 * in the TCPS_SYN_RCVD state, in which case we set the sequence number 2095 * to snxt (per RFC 793), note the swnd wouldn't have been set yet. 2096 * If we are here for a zero window probe, stick with suna. In all 2097 * other cases, we check if suna + swnd encompasses snxt and set 2098 * the sequence number to snxt, if so. If snxt falls outside the 2099 * window (the receiver probably shrunk its window), we will go with 2100 * suna + swnd, otherwise the sequence no will be unacceptable to the 2101 * receiver. 2102 */ 2103 if (tcp->tcp_zero_win_probe) { 2104 seq_no = tcp->tcp_suna; 2105 } else if (tcp->tcp_state == TCPS_SYN_RCVD) { 2106 ASSERT(tcp->tcp_swnd == 0); 2107 seq_no = tcp->tcp_snxt; 2108 } else { 2109 seq_no = SEQ_GT(tcp->tcp_snxt, 2110 (tcp->tcp_suna + tcp->tcp_swnd)) ? 2111 (tcp->tcp_suna + tcp->tcp_swnd) : tcp->tcp_snxt; 2112 } 2113 2114 if (tcp->tcp_valid_bits) { 2115 /* 2116 * For the complex case where we have to send some 2117 * controls (FIN or SYN), let tcp_xmit_mp do it. 2118 */ 2119 return (tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, seq_no, B_FALSE, 2120 NULL, B_FALSE)); 2121 } else { 2122 /* Generate a simple ACK */ 2123 int data_length; 2124 uchar_t *rptr; 2125 tcpha_t *tcpha; 2126 mblk_t *mp1; 2127 int32_t total_hdr_len; 2128 int32_t tcp_hdr_len; 2129 int32_t num_sack_blk = 0; 2130 int32_t sack_opt_len; 2131 ip_xmit_attr_t *ixa = connp->conn_ixa; 2132 2133 /* 2134 * Allocate space for TCP + IP headers 2135 * and link-level header 2136 */ 2137 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 2138 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 2139 tcp->tcp_num_sack_blk); 2140 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 2141 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 2142 total_hdr_len = connp->conn_ht_iphc_len + sack_opt_len; 2143 tcp_hdr_len = connp->conn_ht_ulp_len + sack_opt_len; 2144 } else { 2145 total_hdr_len = connp->conn_ht_iphc_len; 2146 tcp_hdr_len = connp->conn_ht_ulp_len; 2147 } 2148 mp1 = allocb(total_hdr_len + tcps->tcps_wroff_xtra, BPRI_MED); 2149 if (!mp1) 2150 return (NULL); 2151 2152 /* Update the latest receive window size in TCP header. */ 2153 tcp->tcp_tcpha->tha_win = 2154 htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws); 2155 /* copy in prototype TCP + IP header */ 2156 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra; 2157 mp1->b_rptr = rptr; 2158 mp1->b_wptr = rptr + total_hdr_len; 2159 bcopy(connp->conn_ht_iphc, rptr, connp->conn_ht_iphc_len); 2160 2161 tcpha = (tcpha_t *)&rptr[ixa->ixa_ip_hdr_length]; 2162 2163 /* Set the TCP sequence number. */ 2164 tcpha->tha_seq = htonl(seq_no); 2165 2166 /* Set up the TCP flag field. */ 2167 tcpha->tha_flags = (uchar_t)TH_ACK; 2168 if (tcp->tcp_ecn_echo_on) 2169 tcpha->tha_flags |= TH_ECE; 2170 2171 tcp->tcp_rack = tcp->tcp_rnxt; 2172 tcp->tcp_rack_cnt = 0; 2173 2174 /* fill in timestamp option if in use */ 2175 if (tcp->tcp_snd_ts_ok) { 2176 uint32_t llbolt = (uint32_t)LBOLT_FASTPATH; 2177 2178 U32_TO_BE32(llbolt, 2179 (char *)tcpha + TCP_MIN_HEADER_LENGTH+4); 2180 U32_TO_BE32(tcp->tcp_ts_recent, 2181 (char *)tcpha + TCP_MIN_HEADER_LENGTH+8); 2182 } 2183 2184 /* Fill in SACK options */ 2185 if (num_sack_blk > 0) { 2186 uchar_t *wptr = (uchar_t *)tcpha + 2187 connp->conn_ht_ulp_len; 2188 sack_blk_t *tmp; 2189 int32_t i; 2190 2191 wptr[0] = TCPOPT_NOP; 2192 wptr[1] = TCPOPT_NOP; 2193 wptr[2] = TCPOPT_SACK; 2194 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 2195 sizeof (sack_blk_t); 2196 wptr += TCPOPT_REAL_SACK_LEN; 2197 2198 tmp = tcp->tcp_sack_list; 2199 for (i = 0; i < num_sack_blk; i++) { 2200 U32_TO_BE32(tmp[i].begin, wptr); 2201 wptr += sizeof (tcp_seq); 2202 U32_TO_BE32(tmp[i].end, wptr); 2203 wptr += sizeof (tcp_seq); 2204 } 2205 tcpha->tha_offset_and_reserved += 2206 ((num_sack_blk * 2 + 1) << 4); 2207 } 2208 2209 ixa->ixa_pktlen = total_hdr_len; 2210 2211 if (ixa->ixa_flags & IXAF_IS_IPV4) { 2212 ((ipha_t *)rptr)->ipha_length = htons(total_hdr_len); 2213 } else { 2214 ip6_t *ip6 = (ip6_t *)rptr; 2215 2216 ip6->ip6_plen = htons(total_hdr_len - IPV6_HDR_LEN); 2217 } 2218 2219 /* 2220 * Prime pump for checksum calculation in IP. Include the 2221 * adjustment for a source route if any. 2222 */ 2223 data_length = tcp_hdr_len + connp->conn_sum; 2224 data_length = (data_length >> 16) + (data_length & 0xFFFF); 2225 tcpha->tha_sum = htons(data_length); 2226 2227 if (tcp->tcp_ip_forward_progress) { 2228 tcp->tcp_ip_forward_progress = B_FALSE; 2229 connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF; 2230 } else { 2231 connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF; 2232 } 2233 return (mp1); 2234 } 2235 } 2236 2237 /* 2238 * Handle M_DATA messages from IP. Its called directly from IP via 2239 * squeue for received IP packets. 2240 * 2241 * The first argument is always the connp/tcp to which the mp belongs. 2242 * There are no exceptions to this rule. The caller has already put 2243 * a reference on this connp/tcp and once tcp_input_data() returns, 2244 * the squeue will do the refrele. 2245 * 2246 * The TH_SYN for the listener directly go to tcp_input_listener via 2247 * squeue. ICMP errors go directly to tcp_icmp_input(). 2248 * 2249 * sqp: NULL = recursive, sqp != NULL means called from squeue 2250 */ 2251 void 2252 tcp_input_data(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 2253 { 2254 int32_t bytes_acked; 2255 int32_t gap; 2256 mblk_t *mp1; 2257 uint_t flags; 2258 uint32_t new_swnd = 0; 2259 uchar_t *iphdr; 2260 uchar_t *rptr; 2261 int32_t rgap; 2262 uint32_t seg_ack; 2263 int seg_len; 2264 uint_t ip_hdr_len; 2265 uint32_t seg_seq; 2266 tcpha_t *tcpha; 2267 int urp; 2268 tcp_opt_t tcpopt; 2269 ip_pkt_t ipp; 2270 boolean_t ofo_seg = B_FALSE; /* Out of order segment */ 2271 uint32_t cwnd; 2272 uint32_t add; 2273 int npkt; 2274 int mss; 2275 conn_t *connp = (conn_t *)arg; 2276 squeue_t *sqp = (squeue_t *)arg2; 2277 tcp_t *tcp = connp->conn_tcp; 2278 tcp_stack_t *tcps = tcp->tcp_tcps; 2279 2280 /* 2281 * RST from fused tcp loopback peer should trigger an unfuse. 2282 */ 2283 if (tcp->tcp_fused) { 2284 TCP_STAT(tcps, tcp_fusion_aborted); 2285 tcp_unfuse(tcp); 2286 } 2287 2288 iphdr = mp->b_rptr; 2289 rptr = mp->b_rptr; 2290 ASSERT(OK_32PTR(rptr)); 2291 2292 ip_hdr_len = ira->ira_ip_hdr_length; 2293 if (connp->conn_recv_ancillary.crb_all != 0) { 2294 /* 2295 * Record packet information in the ip_pkt_t 2296 */ 2297 ipp.ipp_fields = 0; 2298 if (ira->ira_flags & IRAF_IS_IPV4) { 2299 (void) ip_find_hdr_v4((ipha_t *)rptr, &ipp, 2300 B_FALSE); 2301 } else { 2302 uint8_t nexthdrp; 2303 2304 /* 2305 * IPv6 packets can only be received by applications 2306 * that are prepared to receive IPv6 addresses. 2307 * The IP fanout must ensure this. 2308 */ 2309 ASSERT(connp->conn_family == AF_INET6); 2310 2311 (void) ip_find_hdr_v6(mp, (ip6_t *)rptr, B_TRUE, &ipp, 2312 &nexthdrp); 2313 ASSERT(nexthdrp == IPPROTO_TCP); 2314 2315 /* Could have caused a pullup? */ 2316 iphdr = mp->b_rptr; 2317 rptr = mp->b_rptr; 2318 } 2319 } 2320 ASSERT(DB_TYPE(mp) == M_DATA); 2321 ASSERT(mp->b_next == NULL); 2322 2323 tcpha = (tcpha_t *)&rptr[ip_hdr_len]; 2324 seg_seq = ntohl(tcpha->tha_seq); 2325 seg_ack = ntohl(tcpha->tha_ack); 2326 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 2327 seg_len = (int)(mp->b_wptr - rptr) - 2328 (ip_hdr_len + TCP_HDR_LENGTH(tcpha)); 2329 if ((mp1 = mp->b_cont) != NULL && mp1->b_datap->db_type == M_DATA) { 2330 do { 2331 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 2332 (uintptr_t)INT_MAX); 2333 seg_len += (int)(mp1->b_wptr - mp1->b_rptr); 2334 } while ((mp1 = mp1->b_cont) != NULL && 2335 mp1->b_datap->db_type == M_DATA); 2336 } 2337 2338 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, connp->conn_ixa, 2339 __dtrace_tcp_void_ip_t *, iphdr, tcp_t *, tcp, 2340 __dtrace_tcp_tcph_t *, tcpha); 2341 2342 if (tcp->tcp_state == TCPS_TIME_WAIT) { 2343 tcp_time_wait_processing(tcp, mp, seg_seq, seg_ack, 2344 seg_len, tcpha, ira); 2345 return; 2346 } 2347 2348 if (sqp != NULL) { 2349 /* 2350 * This is the correct place to update tcp_last_recv_time. Note 2351 * that it is also updated for tcp structure that belongs to 2352 * global and listener queues which do not really need updating. 2353 * But that should not cause any harm. And it is updated for 2354 * all kinds of incoming segments, not only for data segments. 2355 */ 2356 tcp->tcp_last_recv_time = LBOLT_FASTPATH; 2357 } 2358 2359 flags = (unsigned int)tcpha->tha_flags & 0xFF; 2360 2361 BUMP_LOCAL(tcp->tcp_ibsegs); 2362 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp); 2363 2364 if ((flags & TH_URG) && sqp != NULL) { 2365 /* 2366 * TCP can't handle urgent pointers that arrive before 2367 * the connection has been accept()ed since it can't 2368 * buffer OOB data. Discard segment if this happens. 2369 * 2370 * We can't just rely on a non-null tcp_listener to indicate 2371 * that the accept() has completed since unlinking of the 2372 * eager and completion of the accept are not atomic. 2373 * tcp_detached, when it is not set (B_FALSE) indicates 2374 * that the accept() has completed. 2375 * 2376 * Nor can it reassemble urgent pointers, so discard 2377 * if it's not the next segment expected. 2378 * 2379 * Otherwise, collapse chain into one mblk (discard if 2380 * that fails). This makes sure the headers, retransmitted 2381 * data, and new data all are in the same mblk. 2382 */ 2383 ASSERT(mp != NULL); 2384 if (tcp->tcp_detached || !pullupmsg(mp, -1)) { 2385 freemsg(mp); 2386 return; 2387 } 2388 /* Update pointers into message */ 2389 iphdr = rptr = mp->b_rptr; 2390 tcpha = (tcpha_t *)&rptr[ip_hdr_len]; 2391 if (SEQ_GT(seg_seq, tcp->tcp_rnxt)) { 2392 /* 2393 * Since we can't handle any data with this urgent 2394 * pointer that is out of sequence, we expunge 2395 * the data. This allows us to still register 2396 * the urgent mark and generate the M_PCSIG, 2397 * which we can do. 2398 */ 2399 mp->b_wptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha); 2400 seg_len = 0; 2401 } 2402 } 2403 2404 switch (tcp->tcp_state) { 2405 case TCPS_SYN_SENT: 2406 if (connp->conn_final_sqp == NULL && 2407 tcp_outbound_squeue_switch && sqp != NULL) { 2408 ASSERT(connp->conn_initial_sqp == connp->conn_sqp); 2409 connp->conn_final_sqp = sqp; 2410 if (connp->conn_final_sqp != connp->conn_sqp) { 2411 DTRACE_PROBE1(conn__final__sqp__switch, 2412 conn_t *, connp); 2413 CONN_INC_REF(connp); 2414 SQUEUE_SWITCH(connp, connp->conn_final_sqp); 2415 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, 2416 tcp_input_data, connp, ira, ip_squeue_flag, 2417 SQTAG_CONNECT_FINISH); 2418 return; 2419 } 2420 DTRACE_PROBE1(conn__final__sqp__same, conn_t *, connp); 2421 } 2422 if (flags & TH_ACK) { 2423 /* 2424 * Note that our stack cannot send data before a 2425 * connection is established, therefore the 2426 * following check is valid. Otherwise, it has 2427 * to be changed. 2428 */ 2429 if (SEQ_LEQ(seg_ack, tcp->tcp_iss) || 2430 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 2431 freemsg(mp); 2432 if (flags & TH_RST) 2433 return; 2434 tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq", 2435 tcp, seg_ack, 0, TH_RST); 2436 return; 2437 } 2438 ASSERT(tcp->tcp_suna + 1 == seg_ack); 2439 } 2440 if (flags & TH_RST) { 2441 if (flags & TH_ACK) { 2442 DTRACE_TCP5(connect__refused, mblk_t *, NULL, 2443 ip_xmit_attr_t *, connp->conn_ixa, 2444 void_ip_t *, iphdr, tcp_t *, tcp, 2445 tcph_t *, tcpha); 2446 (void) tcp_clean_death(tcp, ECONNREFUSED); 2447 } 2448 freemsg(mp); 2449 return; 2450 } 2451 if (!(flags & TH_SYN)) { 2452 freemsg(mp); 2453 return; 2454 } 2455 2456 /* Process all TCP options. */ 2457 tcp_process_options(tcp, tcpha); 2458 /* 2459 * The following changes our rwnd to be a multiple of the 2460 * MIN(peer MSS, our MSS) for performance reason. 2461 */ 2462 (void) tcp_rwnd_set(tcp, MSS_ROUNDUP(connp->conn_rcvbuf, 2463 tcp->tcp_mss)); 2464 2465 /* Is the other end ECN capable? */ 2466 if (tcp->tcp_ecn_ok) { 2467 if ((flags & (TH_ECE|TH_CWR)) != TH_ECE) { 2468 tcp->tcp_ecn_ok = B_FALSE; 2469 } 2470 } 2471 /* 2472 * Clear ECN flags because it may interfere with later 2473 * processing. 2474 */ 2475 flags &= ~(TH_ECE|TH_CWR); 2476 2477 tcp->tcp_irs = seg_seq; 2478 tcp->tcp_rack = seg_seq; 2479 tcp->tcp_rnxt = seg_seq + 1; 2480 tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt); 2481 if (!TCP_IS_DETACHED(tcp)) { 2482 /* Allocate room for SACK options if needed. */ 2483 connp->conn_wroff = connp->conn_ht_iphc_len; 2484 if (tcp->tcp_snd_sack_ok) 2485 connp->conn_wroff += TCPOPT_MAX_SACK_LEN; 2486 if (!tcp->tcp_loopback) 2487 connp->conn_wroff += tcps->tcps_wroff_xtra; 2488 2489 (void) proto_set_tx_wroff(connp->conn_rq, connp, 2490 connp->conn_wroff); 2491 } 2492 if (flags & TH_ACK) { 2493 /* 2494 * If we can't get the confirmation upstream, pretend 2495 * we didn't even see this one. 2496 * 2497 * XXX: how can we pretend we didn't see it if we 2498 * have updated rnxt et. al. 2499 * 2500 * For loopback we defer sending up the T_CONN_CON 2501 * until after some checks below. 2502 */ 2503 mp1 = NULL; 2504 /* 2505 * tcp_sendmsg() checks tcp_state without entering 2506 * the squeue so tcp_state should be updated before 2507 * sending up connection confirmation. Probe the 2508 * state change below when we are sure the connection 2509 * confirmation has been sent. 2510 */ 2511 tcp->tcp_state = TCPS_ESTABLISHED; 2512 if (!tcp_conn_con(tcp, iphdr, mp, 2513 tcp->tcp_loopback ? &mp1 : NULL, ira)) { 2514 tcp->tcp_state = TCPS_SYN_SENT; 2515 freemsg(mp); 2516 return; 2517 } 2518 TCPS_CONN_INC(tcps); 2519 /* SYN was acked - making progress */ 2520 tcp->tcp_ip_forward_progress = B_TRUE; 2521 2522 /* One for the SYN */ 2523 tcp->tcp_suna = tcp->tcp_iss + 1; 2524 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 2525 2526 /* 2527 * If SYN was retransmitted, need to reset all 2528 * retransmission info. This is because this 2529 * segment will be treated as a dup ACK. 2530 */ 2531 if (tcp->tcp_rexmit) { 2532 tcp->tcp_rexmit = B_FALSE; 2533 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 2534 tcp->tcp_rexmit_max = tcp->tcp_snxt; 2535 tcp->tcp_snd_burst = tcp->tcp_localnet ? 2536 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 2537 tcp->tcp_ms_we_have_waited = 0; 2538 2539 /* 2540 * Set tcp_cwnd back to 1 MSS, per 2541 * recommendation from 2542 * draft-floyd-incr-init-win-01.txt, 2543 * Increasing TCP's Initial Window. 2544 */ 2545 tcp->tcp_cwnd = tcp->tcp_mss; 2546 } 2547 2548 tcp->tcp_swl1 = seg_seq; 2549 tcp->tcp_swl2 = seg_ack; 2550 2551 new_swnd = ntohs(tcpha->tha_win); 2552 tcp->tcp_swnd = new_swnd; 2553 if (new_swnd > tcp->tcp_max_swnd) 2554 tcp->tcp_max_swnd = new_swnd; 2555 2556 /* 2557 * Always send the three-way handshake ack immediately 2558 * in order to make the connection complete as soon as 2559 * possible on the accepting host. 2560 */ 2561 flags |= TH_ACK_NEEDED; 2562 2563 /* 2564 * Trace connect-established here. 2565 */ 2566 DTRACE_TCP5(connect__established, mblk_t *, NULL, 2567 ip_xmit_attr_t *, tcp->tcp_connp->conn_ixa, 2568 void_ip_t *, iphdr, tcp_t *, tcp, tcph_t *, tcpha); 2569 2570 /* Trace change from SYN_SENT -> ESTABLISHED here */ 2571 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 2572 connp->conn_ixa, void, NULL, tcp_t *, tcp, 2573 void, NULL, int32_t, TCPS_SYN_SENT); 2574 2575 /* 2576 * Special case for loopback. At this point we have 2577 * received SYN-ACK from the remote endpoint. In 2578 * order to ensure that both endpoints reach the 2579 * fused state prior to any data exchange, the final 2580 * ACK needs to be sent before we indicate T_CONN_CON 2581 * to the module upstream. 2582 */ 2583 if (tcp->tcp_loopback) { 2584 mblk_t *ack_mp; 2585 2586 ASSERT(!tcp->tcp_unfusable); 2587 ASSERT(mp1 != NULL); 2588 /* 2589 * For loopback, we always get a pure SYN-ACK 2590 * and only need to send back the final ACK 2591 * with no data (this is because the other 2592 * tcp is ours and we don't do T/TCP). This 2593 * final ACK triggers the passive side to 2594 * perform fusion in ESTABLISHED state. 2595 */ 2596 if ((ack_mp = tcp_ack_mp(tcp)) != NULL) { 2597 if (tcp->tcp_ack_tid != 0) { 2598 (void) TCP_TIMER_CANCEL(tcp, 2599 tcp->tcp_ack_tid); 2600 tcp->tcp_ack_tid = 0; 2601 } 2602 tcp_send_data(tcp, ack_mp); 2603 BUMP_LOCAL(tcp->tcp_obsegs); 2604 TCPS_BUMP_MIB(tcps, tcpOutAck); 2605 2606 if (!IPCL_IS_NONSTR(connp)) { 2607 /* Send up T_CONN_CON */ 2608 if (ira->ira_cred != NULL) { 2609 mblk_setcred(mp1, 2610 ira->ira_cred, 2611 ira->ira_cpid); 2612 } 2613 putnext(connp->conn_rq, mp1); 2614 } else { 2615 (*connp->conn_upcalls-> 2616 su_connected) 2617 (connp->conn_upper_handle, 2618 tcp->tcp_connid, 2619 ira->ira_cred, 2620 ira->ira_cpid); 2621 freemsg(mp1); 2622 } 2623 2624 freemsg(mp); 2625 return; 2626 } 2627 /* 2628 * Forget fusion; we need to handle more 2629 * complex cases below. Send the deferred 2630 * T_CONN_CON message upstream and proceed 2631 * as usual. Mark this tcp as not capable 2632 * of fusion. 2633 */ 2634 TCP_STAT(tcps, tcp_fusion_unfusable); 2635 tcp->tcp_unfusable = B_TRUE; 2636 if (!IPCL_IS_NONSTR(connp)) { 2637 if (ira->ira_cred != NULL) { 2638 mblk_setcred(mp1, ira->ira_cred, 2639 ira->ira_cpid); 2640 } 2641 putnext(connp->conn_rq, mp1); 2642 } else { 2643 (*connp->conn_upcalls->su_connected) 2644 (connp->conn_upper_handle, 2645 tcp->tcp_connid, ira->ira_cred, 2646 ira->ira_cpid); 2647 freemsg(mp1); 2648 } 2649 } 2650 2651 /* 2652 * Check to see if there is data to be sent. If 2653 * yes, set the transmit flag. Then check to see 2654 * if received data processing needs to be done. 2655 * If not, go straight to xmit_check. This short 2656 * cut is OK as we don't support T/TCP. 2657 */ 2658 if (tcp->tcp_unsent) 2659 flags |= TH_XMIT_NEEDED; 2660 2661 if (seg_len == 0 && !(flags & TH_URG)) { 2662 freemsg(mp); 2663 goto xmit_check; 2664 } 2665 2666 flags &= ~TH_SYN; 2667 seg_seq++; 2668 break; 2669 } 2670 tcp->tcp_state = TCPS_SYN_RCVD; 2671 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 2672 connp->conn_ixa, void_ip_t *, NULL, tcp_t *, tcp, 2673 tcph_t *, NULL, int32_t, TCPS_SYN_SENT); 2674 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, tcp->tcp_mss, 2675 NULL, NULL, tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 2676 if (mp1 != NULL) { 2677 tcp_send_data(tcp, mp1); 2678 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 2679 } 2680 freemsg(mp); 2681 return; 2682 case TCPS_SYN_RCVD: 2683 if (flags & TH_ACK) { 2684 /* 2685 * In this state, a SYN|ACK packet is either bogus 2686 * because the other side must be ACKing our SYN which 2687 * indicates it has seen the ACK for their SYN and 2688 * shouldn't retransmit it or we're crossing SYNs 2689 * on active open. 2690 */ 2691 if ((flags & TH_SYN) && !tcp->tcp_active_open) { 2692 freemsg(mp); 2693 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn", 2694 tcp, seg_ack, 0, TH_RST); 2695 return; 2696 } 2697 /* 2698 * NOTE: RFC 793 pg. 72 says this should be 2699 * tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt 2700 * but that would mean we have an ack that ignored 2701 * our SYN. 2702 */ 2703 if (SEQ_LEQ(seg_ack, tcp->tcp_suna) || 2704 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 2705 freemsg(mp); 2706 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack", 2707 tcp, seg_ack, 0, TH_RST); 2708 return; 2709 } 2710 /* 2711 * No sane TCP stack will send such a small window 2712 * without receiving any data. Just drop this invalid 2713 * ACK. We also shorten the abort timeout in case 2714 * this is an attack. 2715 */ 2716 if ((ntohs(tcpha->tha_win) << tcp->tcp_snd_ws) < 2717 (tcp->tcp_mss >> tcp_init_wnd_shft)) { 2718 freemsg(mp); 2719 TCP_STAT(tcps, tcp_zwin_ack_syn); 2720 tcp->tcp_second_ctimer_threshold = 2721 tcp_early_abort * SECONDS; 2722 return; 2723 } 2724 } 2725 break; 2726 case TCPS_LISTEN: 2727 /* 2728 * Only a TLI listener can come through this path when a 2729 * acceptor is going back to be a listener and a packet 2730 * for the acceptor hits the classifier. For a socket 2731 * listener, this can never happen because a listener 2732 * can never accept connection on itself and hence a 2733 * socket acceptor can not go back to being a listener. 2734 */ 2735 ASSERT(!TCP_IS_SOCKET(tcp)); 2736 /*FALLTHRU*/ 2737 case TCPS_CLOSED: 2738 case TCPS_BOUND: { 2739 conn_t *new_connp; 2740 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 2741 2742 /* 2743 * Don't accept any input on a closed tcp as this TCP logically 2744 * does not exist on the system. Don't proceed further with 2745 * this TCP. For instance, this packet could trigger another 2746 * close of this tcp which would be disastrous for tcp_refcnt. 2747 * tcp_close_detached / tcp_clean_death / tcp_closei_local must 2748 * be called at most once on a TCP. In this case we need to 2749 * refeed the packet into the classifier and figure out where 2750 * the packet should go. 2751 */ 2752 new_connp = ipcl_classify(mp, ira, ipst); 2753 if (new_connp != NULL) { 2754 /* Drops ref on new_connp */ 2755 tcp_reinput(new_connp, mp, ira, ipst); 2756 return; 2757 } 2758 /* We failed to classify. For now just drop the packet */ 2759 freemsg(mp); 2760 return; 2761 } 2762 case TCPS_IDLE: 2763 /* 2764 * Handle the case where the tcp_clean_death() has happened 2765 * on a connection (application hasn't closed yet) but a packet 2766 * was already queued on squeue before tcp_clean_death() 2767 * was processed. Calling tcp_clean_death() twice on same 2768 * connection can result in weird behaviour. 2769 */ 2770 freemsg(mp); 2771 return; 2772 default: 2773 break; 2774 } 2775 2776 /* 2777 * Already on the correct queue/perimeter. 2778 * If this is a detached connection and not an eager 2779 * connection hanging off a listener then new data 2780 * (past the FIN) will cause a reset. 2781 * We do a special check here where it 2782 * is out of the main line, rather than check 2783 * if we are detached every time we see new 2784 * data down below. 2785 */ 2786 if (TCP_IS_DETACHED_NONEAGER(tcp) && 2787 (seg_len > 0 && SEQ_GT(seg_seq + seg_len, tcp->tcp_rnxt))) { 2788 TCPS_BUMP_MIB(tcps, tcpInClosed); 2789 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp); 2790 2791 freemsg(mp); 2792 /* 2793 * This could be an SSL closure alert. We're detached so just 2794 * acknowledge it this last time. 2795 */ 2796 if (tcp->tcp_kssl_ctx != NULL) { 2797 kssl_release_ctx(tcp->tcp_kssl_ctx); 2798 tcp->tcp_kssl_ctx = NULL; 2799 2800 tcp->tcp_rnxt += seg_len; 2801 tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt); 2802 flags |= TH_ACK_NEEDED; 2803 goto ack_check; 2804 } 2805 2806 tcp_xmit_ctl("new data when detached", tcp, 2807 tcp->tcp_snxt, 0, TH_RST); 2808 (void) tcp_clean_death(tcp, EPROTO); 2809 return; 2810 } 2811 2812 mp->b_rptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha); 2813 urp = ntohs(tcpha->tha_urp) - TCP_OLD_URP_INTERPRETATION; 2814 new_swnd = ntohs(tcpha->tha_win) << 2815 ((tcpha->tha_flags & TH_SYN) ? 0 : tcp->tcp_snd_ws); 2816 2817 if (tcp->tcp_snd_ts_ok) { 2818 if (!tcp_paws_check(tcp, tcpha, &tcpopt)) { 2819 /* 2820 * This segment is not acceptable. 2821 * Drop it and send back an ACK. 2822 */ 2823 freemsg(mp); 2824 flags |= TH_ACK_NEEDED; 2825 goto ack_check; 2826 } 2827 } else if (tcp->tcp_snd_sack_ok) { 2828 tcpopt.tcp = tcp; 2829 /* 2830 * SACK info in already updated in tcp_parse_options. Ignore 2831 * all other TCP options... 2832 */ 2833 (void) tcp_parse_options(tcpha, &tcpopt); 2834 } 2835 try_again:; 2836 mss = tcp->tcp_mss; 2837 gap = seg_seq - tcp->tcp_rnxt; 2838 rgap = tcp->tcp_rwnd - (gap + seg_len); 2839 /* 2840 * gap is the amount of sequence space between what we expect to see 2841 * and what we got for seg_seq. A positive value for gap means 2842 * something got lost. A negative value means we got some old stuff. 2843 */ 2844 if (gap < 0) { 2845 /* Old stuff present. Is the SYN in there? */ 2846 if (seg_seq == tcp->tcp_irs && (flags & TH_SYN) && 2847 (seg_len != 0)) { 2848 flags &= ~TH_SYN; 2849 seg_seq++; 2850 urp--; 2851 /* Recompute the gaps after noting the SYN. */ 2852 goto try_again; 2853 } 2854 TCPS_BUMP_MIB(tcps, tcpInDataDupSegs); 2855 TCPS_UPDATE_MIB(tcps, tcpInDataDupBytes, 2856 (seg_len > -gap ? -gap : seg_len)); 2857 /* Remove the old stuff from seg_len. */ 2858 seg_len += gap; 2859 /* 2860 * Anything left? 2861 * Make sure to check for unack'd FIN when rest of data 2862 * has been previously ack'd. 2863 */ 2864 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 2865 /* 2866 * Resets are only valid if they lie within our offered 2867 * window. If the RST bit is set, we just ignore this 2868 * segment. 2869 */ 2870 if (flags & TH_RST) { 2871 freemsg(mp); 2872 return; 2873 } 2874 2875 /* 2876 * The arriving of dup data packets indicate that we 2877 * may have postponed an ack for too long, or the other 2878 * side's RTT estimate is out of shape. Start acking 2879 * more often. 2880 */ 2881 if (SEQ_GEQ(seg_seq + seg_len - gap, tcp->tcp_rack) && 2882 tcp->tcp_rack_cnt >= 1 && 2883 tcp->tcp_rack_abs_max > 2) { 2884 tcp->tcp_rack_abs_max--; 2885 } 2886 tcp->tcp_rack_cur_max = 1; 2887 2888 /* 2889 * This segment is "unacceptable". None of its 2890 * sequence space lies within our advertized window. 2891 * 2892 * Adjust seg_len to the original value for tracing. 2893 */ 2894 seg_len -= gap; 2895 if (connp->conn_debug) { 2896 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 2897 "tcp_rput: unacceptable, gap %d, rgap %d, " 2898 "flags 0x%x, seg_seq %u, seg_ack %u, " 2899 "seg_len %d, rnxt %u, snxt %u, %s", 2900 gap, rgap, flags, seg_seq, seg_ack, 2901 seg_len, tcp->tcp_rnxt, tcp->tcp_snxt, 2902 tcp_display(tcp, NULL, 2903 DISP_ADDR_AND_PORT)); 2904 } 2905 2906 /* 2907 * Arrange to send an ACK in response to the 2908 * unacceptable segment per RFC 793 page 69. There 2909 * is only one small difference between ours and the 2910 * acceptability test in the RFC - we accept ACK-only 2911 * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK 2912 * will be generated. 2913 * 2914 * Note that we have to ACK an ACK-only packet at least 2915 * for stacks that send 0-length keep-alives with 2916 * SEG.SEQ = SND.NXT-1 as recommended by RFC1122, 2917 * section 4.2.3.6. As long as we don't ever generate 2918 * an unacceptable packet in response to an incoming 2919 * packet that is unacceptable, it should not cause 2920 * "ACK wars". 2921 */ 2922 flags |= TH_ACK_NEEDED; 2923 2924 /* 2925 * Continue processing this segment in order to use the 2926 * ACK information it contains, but skip all other 2927 * sequence-number processing. Processing the ACK 2928 * information is necessary in order to 2929 * re-synchronize connections that may have lost 2930 * synchronization. 2931 * 2932 * We clear seg_len and flag fields related to 2933 * sequence number processing as they are not 2934 * to be trusted for an unacceptable segment. 2935 */ 2936 seg_len = 0; 2937 flags &= ~(TH_SYN | TH_FIN | TH_URG); 2938 goto process_ack; 2939 } 2940 2941 /* Fix seg_seq, and chew the gap off the front. */ 2942 seg_seq = tcp->tcp_rnxt; 2943 urp += gap; 2944 do { 2945 mblk_t *mp2; 2946 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 2947 (uintptr_t)UINT_MAX); 2948 gap += (uint_t)(mp->b_wptr - mp->b_rptr); 2949 if (gap > 0) { 2950 mp->b_rptr = mp->b_wptr - gap; 2951 break; 2952 } 2953 mp2 = mp; 2954 mp = mp->b_cont; 2955 freeb(mp2); 2956 } while (gap < 0); 2957 /* 2958 * If the urgent data has already been acknowledged, we 2959 * should ignore TH_URG below 2960 */ 2961 if (urp < 0) 2962 flags &= ~TH_URG; 2963 } 2964 /* 2965 * rgap is the amount of stuff received out of window. A negative 2966 * value is the amount out of window. 2967 */ 2968 if (rgap < 0) { 2969 mblk_t *mp2; 2970 2971 if (tcp->tcp_rwnd == 0) { 2972 TCPS_BUMP_MIB(tcps, tcpInWinProbe); 2973 } else { 2974 TCPS_BUMP_MIB(tcps, tcpInDataPastWinSegs); 2975 TCPS_UPDATE_MIB(tcps, tcpInDataPastWinBytes, -rgap); 2976 } 2977 2978 /* 2979 * seg_len does not include the FIN, so if more than 2980 * just the FIN is out of window, we act like we don't 2981 * see it. (If just the FIN is out of window, rgap 2982 * will be zero and we will go ahead and acknowledge 2983 * the FIN.) 2984 */ 2985 flags &= ~TH_FIN; 2986 2987 /* Fix seg_len and make sure there is something left. */ 2988 seg_len += rgap; 2989 if (seg_len <= 0) { 2990 /* 2991 * Resets are only valid if they lie within our offered 2992 * window. If the RST bit is set, we just ignore this 2993 * segment. 2994 */ 2995 if (flags & TH_RST) { 2996 freemsg(mp); 2997 return; 2998 } 2999 3000 /* Per RFC 793, we need to send back an ACK. */ 3001 flags |= TH_ACK_NEEDED; 3002 3003 /* 3004 * Send SIGURG as soon as possible i.e. even 3005 * if the TH_URG was delivered in a window probe 3006 * packet (which will be unacceptable). 3007 * 3008 * We generate a signal if none has been generated 3009 * for this connection or if this is a new urgent 3010 * byte. Also send a zero-length "unmarked" message 3011 * to inform SIOCATMARK that this is not the mark. 3012 * 3013 * tcp_urp_last_valid is cleared when the T_exdata_ind 3014 * is sent up. This plus the check for old data 3015 * (gap >= 0) handles the wraparound of the sequence 3016 * number space without having to always track the 3017 * correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks 3018 * this max in its rcv_up variable). 3019 * 3020 * This prevents duplicate SIGURGS due to a "late" 3021 * zero-window probe when the T_EXDATA_IND has already 3022 * been sent up. 3023 */ 3024 if ((flags & TH_URG) && 3025 (!tcp->tcp_urp_last_valid || SEQ_GT(urp + seg_seq, 3026 tcp->tcp_urp_last))) { 3027 if (IPCL_IS_NONSTR(connp)) { 3028 if (!TCP_IS_DETACHED(tcp)) { 3029 (*connp->conn_upcalls-> 3030 su_signal_oob) 3031 (connp->conn_upper_handle, 3032 urp); 3033 } 3034 } else { 3035 mp1 = allocb(0, BPRI_MED); 3036 if (mp1 == NULL) { 3037 freemsg(mp); 3038 return; 3039 } 3040 if (!TCP_IS_DETACHED(tcp) && 3041 !putnextctl1(connp->conn_rq, 3042 M_PCSIG, SIGURG)) { 3043 /* Try again on the rexmit. */ 3044 freemsg(mp1); 3045 freemsg(mp); 3046 return; 3047 } 3048 /* 3049 * If the next byte would be the mark 3050 * then mark with MARKNEXT else mark 3051 * with NOTMARKNEXT. 3052 */ 3053 if (gap == 0 && urp == 0) 3054 mp1->b_flag |= MSGMARKNEXT; 3055 else 3056 mp1->b_flag |= MSGNOTMARKNEXT; 3057 freemsg(tcp->tcp_urp_mark_mp); 3058 tcp->tcp_urp_mark_mp = mp1; 3059 flags |= TH_SEND_URP_MARK; 3060 } 3061 tcp->tcp_urp_last_valid = B_TRUE; 3062 tcp->tcp_urp_last = urp + seg_seq; 3063 } 3064 /* 3065 * If this is a zero window probe, continue to 3066 * process the ACK part. But we need to set seg_len 3067 * to 0 to avoid data processing. Otherwise just 3068 * drop the segment and send back an ACK. 3069 */ 3070 if (tcp->tcp_rwnd == 0 && seg_seq == tcp->tcp_rnxt) { 3071 flags &= ~(TH_SYN | TH_URG); 3072 seg_len = 0; 3073 goto process_ack; 3074 } else { 3075 freemsg(mp); 3076 goto ack_check; 3077 } 3078 } 3079 /* Pitch out of window stuff off the end. */ 3080 rgap = seg_len; 3081 mp2 = mp; 3082 do { 3083 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 3084 (uintptr_t)INT_MAX); 3085 rgap -= (int)(mp2->b_wptr - mp2->b_rptr); 3086 if (rgap < 0) { 3087 mp2->b_wptr += rgap; 3088 if ((mp1 = mp2->b_cont) != NULL) { 3089 mp2->b_cont = NULL; 3090 freemsg(mp1); 3091 } 3092 break; 3093 } 3094 } while ((mp2 = mp2->b_cont) != NULL); 3095 } 3096 ok:; 3097 /* 3098 * TCP should check ECN info for segments inside the window only. 3099 * Therefore the check should be done here. 3100 */ 3101 if (tcp->tcp_ecn_ok) { 3102 if (flags & TH_CWR) { 3103 tcp->tcp_ecn_echo_on = B_FALSE; 3104 } 3105 /* 3106 * Note that both ECN_CE and CWR can be set in the 3107 * same segment. In this case, we once again turn 3108 * on ECN_ECHO. 3109 */ 3110 if (connp->conn_ipversion == IPV4_VERSION) { 3111 uchar_t tos = ((ipha_t *)rptr)->ipha_type_of_service; 3112 3113 if ((tos & IPH_ECN_CE) == IPH_ECN_CE) { 3114 tcp->tcp_ecn_echo_on = B_TRUE; 3115 } 3116 } else { 3117 uint32_t vcf = ((ip6_t *)rptr)->ip6_vcf; 3118 3119 if ((vcf & htonl(IPH_ECN_CE << 20)) == 3120 htonl(IPH_ECN_CE << 20)) { 3121 tcp->tcp_ecn_echo_on = B_TRUE; 3122 } 3123 } 3124 } 3125 3126 /* 3127 * Check whether we can update tcp_ts_recent. This test is 3128 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 3129 * Extensions for High Performance: An Update", Internet Draft. 3130 */ 3131 if (tcp->tcp_snd_ts_ok && 3132 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 3133 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 3134 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 3135 tcp->tcp_last_rcv_lbolt = LBOLT_FASTPATH64; 3136 } 3137 3138 if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) { 3139 /* 3140 * FIN in an out of order segment. We record this in 3141 * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq. 3142 * Clear the FIN so that any check on FIN flag will fail. 3143 * Remember that FIN also counts in the sequence number 3144 * space. So we need to ack out of order FIN only segments. 3145 */ 3146 if (flags & TH_FIN) { 3147 tcp->tcp_valid_bits |= TCP_OFO_FIN_VALID; 3148 tcp->tcp_ofo_fin_seq = seg_seq + seg_len; 3149 flags &= ~TH_FIN; 3150 flags |= TH_ACK_NEEDED; 3151 } 3152 if (seg_len > 0) { 3153 /* Fill in the SACK blk list. */ 3154 if (tcp->tcp_snd_sack_ok) { 3155 tcp_sack_insert(tcp->tcp_sack_list, 3156 seg_seq, seg_seq + seg_len, 3157 &(tcp->tcp_num_sack_blk)); 3158 } 3159 3160 /* 3161 * Attempt reassembly and see if we have something 3162 * ready to go. 3163 */ 3164 mp = tcp_reass(tcp, mp, seg_seq); 3165 /* Always ack out of order packets */ 3166 flags |= TH_ACK_NEEDED | TH_PUSH; 3167 if (mp) { 3168 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 3169 (uintptr_t)INT_MAX); 3170 seg_len = mp->b_cont ? msgdsize(mp) : 3171 (int)(mp->b_wptr - mp->b_rptr); 3172 seg_seq = tcp->tcp_rnxt; 3173 /* 3174 * A gap is filled and the seq num and len 3175 * of the gap match that of a previously 3176 * received FIN, put the FIN flag back in. 3177 */ 3178 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 3179 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 3180 flags |= TH_FIN; 3181 tcp->tcp_valid_bits &= 3182 ~TCP_OFO_FIN_VALID; 3183 } 3184 if (tcp->tcp_reass_tid != 0) { 3185 (void) TCP_TIMER_CANCEL(tcp, 3186 tcp->tcp_reass_tid); 3187 /* 3188 * Restart the timer if there is still 3189 * data in the reassembly queue. 3190 */ 3191 if (tcp->tcp_reass_head != NULL) { 3192 tcp->tcp_reass_tid = TCP_TIMER( 3193 tcp, tcp_reass_timer, 3194 tcps->tcps_reass_timeout); 3195 } else { 3196 tcp->tcp_reass_tid = 0; 3197 } 3198 } 3199 } else { 3200 /* 3201 * Keep going even with NULL mp. 3202 * There may be a useful ACK or something else 3203 * we don't want to miss. 3204 * 3205 * But TCP should not perform fast retransmit 3206 * because of the ack number. TCP uses 3207 * seg_len == 0 to determine if it is a pure 3208 * ACK. And this is not a pure ACK. 3209 */ 3210 seg_len = 0; 3211 ofo_seg = B_TRUE; 3212 3213 if (tcps->tcps_reass_timeout != 0 && 3214 tcp->tcp_reass_tid == 0) { 3215 tcp->tcp_reass_tid = TCP_TIMER(tcp, 3216 tcp_reass_timer, 3217 tcps->tcps_reass_timeout); 3218 } 3219 } 3220 } 3221 } else if (seg_len > 0) { 3222 TCPS_BUMP_MIB(tcps, tcpInDataInorderSegs); 3223 TCPS_UPDATE_MIB(tcps, tcpInDataInorderBytes, seg_len); 3224 /* 3225 * If an out of order FIN was received before, and the seq 3226 * num and len of the new segment match that of the FIN, 3227 * put the FIN flag back in. 3228 */ 3229 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 3230 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 3231 flags |= TH_FIN; 3232 tcp->tcp_valid_bits &= ~TCP_OFO_FIN_VALID; 3233 } 3234 } 3235 if ((flags & (TH_RST | TH_SYN | TH_URG | TH_ACK)) != TH_ACK) { 3236 if (flags & TH_RST) { 3237 freemsg(mp); 3238 switch (tcp->tcp_state) { 3239 case TCPS_SYN_RCVD: 3240 (void) tcp_clean_death(tcp, ECONNREFUSED); 3241 break; 3242 case TCPS_ESTABLISHED: 3243 case TCPS_FIN_WAIT_1: 3244 case TCPS_FIN_WAIT_2: 3245 case TCPS_CLOSE_WAIT: 3246 (void) tcp_clean_death(tcp, ECONNRESET); 3247 break; 3248 case TCPS_CLOSING: 3249 case TCPS_LAST_ACK: 3250 (void) tcp_clean_death(tcp, 0); 3251 break; 3252 default: 3253 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 3254 (void) tcp_clean_death(tcp, ENXIO); 3255 break; 3256 } 3257 return; 3258 } 3259 if (flags & TH_SYN) { 3260 /* 3261 * See RFC 793, Page 71 3262 * 3263 * The seq number must be in the window as it should 3264 * be "fixed" above. If it is outside window, it should 3265 * be already rejected. Note that we allow seg_seq to be 3266 * rnxt + rwnd because we want to accept 0 window probe. 3267 */ 3268 ASSERT(SEQ_GEQ(seg_seq, tcp->tcp_rnxt) && 3269 SEQ_LEQ(seg_seq, tcp->tcp_rnxt + tcp->tcp_rwnd)); 3270 freemsg(mp); 3271 /* 3272 * If the ACK flag is not set, just use our snxt as the 3273 * seq number of the RST segment. 3274 */ 3275 if (!(flags & TH_ACK)) { 3276 seg_ack = tcp->tcp_snxt; 3277 } 3278 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 3279 TH_RST|TH_ACK); 3280 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 3281 (void) tcp_clean_death(tcp, ECONNRESET); 3282 return; 3283 } 3284 /* 3285 * urp could be -1 when the urp field in the packet is 0 3286 * and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent 3287 * byte was at seg_seq - 1, in which case we ignore the urgent flag. 3288 */ 3289 if (flags & TH_URG && urp >= 0) { 3290 if (!tcp->tcp_urp_last_valid || 3291 SEQ_GT(urp + seg_seq, tcp->tcp_urp_last)) { 3292 /* 3293 * Non-STREAMS sockets handle the urgent data a litte 3294 * differently from STREAMS based sockets. There is no 3295 * need to mark any mblks with the MSG{NOT,}MARKNEXT 3296 * flags to keep SIOCATMARK happy. Instead a 3297 * su_signal_oob upcall is made to update the mark. 3298 * Neither is a T_EXDATA_IND mblk needed to be 3299 * prepended to the urgent data. The urgent data is 3300 * delivered using the su_recv upcall, where we set 3301 * the MSG_OOB flag to indicate that it is urg data. 3302 * 3303 * Neither TH_SEND_URP_MARK nor TH_MARKNEXT_NEEDED 3304 * are used by non-STREAMS sockets. 3305 */ 3306 if (IPCL_IS_NONSTR(connp)) { 3307 if (!TCP_IS_DETACHED(tcp)) { 3308 (*connp->conn_upcalls->su_signal_oob) 3309 (connp->conn_upper_handle, urp); 3310 } 3311 } else { 3312 /* 3313 * If we haven't generated the signal yet for 3314 * this urgent pointer value, do it now. Also, 3315 * send up a zero-length M_DATA indicating 3316 * whether or not this is the mark. The latter 3317 * is not needed when a T_EXDATA_IND is sent up. 3318 * However, if there are allocation failures 3319 * this code relies on the sender retransmitting 3320 * and the socket code for determining the mark 3321 * should not block waiting for the peer to 3322 * transmit. Thus, for simplicity we always 3323 * send up the mark indication. 3324 */ 3325 mp1 = allocb(0, BPRI_MED); 3326 if (mp1 == NULL) { 3327 freemsg(mp); 3328 return; 3329 } 3330 if (!TCP_IS_DETACHED(tcp) && 3331 !putnextctl1(connp->conn_rq, M_PCSIG, 3332 SIGURG)) { 3333 /* Try again on the rexmit. */ 3334 freemsg(mp1); 3335 freemsg(mp); 3336 return; 3337 } 3338 /* 3339 * Mark with NOTMARKNEXT for now. 3340 * The code below will change this to MARKNEXT 3341 * if we are at the mark. 3342 * 3343 * If there are allocation failures (e.g. in 3344 * dupmsg below) the next time tcp_input_data 3345 * sees the urgent segment it will send up the 3346 * MSGMARKNEXT message. 3347 */ 3348 mp1->b_flag |= MSGNOTMARKNEXT; 3349 freemsg(tcp->tcp_urp_mark_mp); 3350 tcp->tcp_urp_mark_mp = mp1; 3351 flags |= TH_SEND_URP_MARK; 3352 #ifdef DEBUG 3353 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3354 "tcp_rput: sent M_PCSIG 2 seq %x urp %x " 3355 "last %x, %s", 3356 seg_seq, urp, tcp->tcp_urp_last, 3357 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 3358 #endif /* DEBUG */ 3359 } 3360 tcp->tcp_urp_last_valid = B_TRUE; 3361 tcp->tcp_urp_last = urp + seg_seq; 3362 } else if (tcp->tcp_urp_mark_mp != NULL) { 3363 /* 3364 * An allocation failure prevented the previous 3365 * tcp_input_data from sending up the allocated 3366 * MSG*MARKNEXT message - send it up this time 3367 * around. 3368 */ 3369 flags |= TH_SEND_URP_MARK; 3370 } 3371 3372 /* 3373 * If the urgent byte is in this segment, make sure that it is 3374 * all by itself. This makes it much easier to deal with the 3375 * possibility of an allocation failure on the T_exdata_ind. 3376 * Note that seg_len is the number of bytes in the segment, and 3377 * urp is the offset into the segment of the urgent byte. 3378 * urp < seg_len means that the urgent byte is in this segment. 3379 */ 3380 if (urp < seg_len) { 3381 if (seg_len != 1) { 3382 uint32_t tmp_rnxt; 3383 /* 3384 * Break it up and feed it back in. 3385 * Re-attach the IP header. 3386 */ 3387 mp->b_rptr = iphdr; 3388 if (urp > 0) { 3389 /* 3390 * There is stuff before the urgent 3391 * byte. 3392 */ 3393 mp1 = dupmsg(mp); 3394 if (!mp1) { 3395 /* 3396 * Trim from urgent byte on. 3397 * The rest will come back. 3398 */ 3399 (void) adjmsg(mp, 3400 urp - seg_len); 3401 tcp_input_data(connp, 3402 mp, NULL, ira); 3403 return; 3404 } 3405 (void) adjmsg(mp1, urp - seg_len); 3406 /* Feed this piece back in. */ 3407 tmp_rnxt = tcp->tcp_rnxt; 3408 tcp_input_data(connp, mp1, NULL, ira); 3409 /* 3410 * If the data passed back in was not 3411 * processed (ie: bad ACK) sending 3412 * the remainder back in will cause a 3413 * loop. In this case, drop the 3414 * packet and let the sender try 3415 * sending a good packet. 3416 */ 3417 if (tmp_rnxt == tcp->tcp_rnxt) { 3418 freemsg(mp); 3419 return; 3420 } 3421 } 3422 if (urp != seg_len - 1) { 3423 uint32_t tmp_rnxt; 3424 /* 3425 * There is stuff after the urgent 3426 * byte. 3427 */ 3428 mp1 = dupmsg(mp); 3429 if (!mp1) { 3430 /* 3431 * Trim everything beyond the 3432 * urgent byte. The rest will 3433 * come back. 3434 */ 3435 (void) adjmsg(mp, 3436 urp + 1 - seg_len); 3437 tcp_input_data(connp, 3438 mp, NULL, ira); 3439 return; 3440 } 3441 (void) adjmsg(mp1, urp + 1 - seg_len); 3442 tmp_rnxt = tcp->tcp_rnxt; 3443 tcp_input_data(connp, mp1, NULL, ira); 3444 /* 3445 * If the data passed back in was not 3446 * processed (ie: bad ACK) sending 3447 * the remainder back in will cause a 3448 * loop. In this case, drop the 3449 * packet and let the sender try 3450 * sending a good packet. 3451 */ 3452 if (tmp_rnxt == tcp->tcp_rnxt) { 3453 freemsg(mp); 3454 return; 3455 } 3456 } 3457 tcp_input_data(connp, mp, NULL, ira); 3458 return; 3459 } 3460 /* 3461 * This segment contains only the urgent byte. We 3462 * have to allocate the T_exdata_ind, if we can. 3463 */ 3464 if (IPCL_IS_NONSTR(connp)) { 3465 int error; 3466 3467 (*connp->conn_upcalls->su_recv) 3468 (connp->conn_upper_handle, mp, seg_len, 3469 MSG_OOB, &error, NULL); 3470 /* 3471 * We should never be in middle of a 3472 * fallback, the squeue guarantees that. 3473 */ 3474 ASSERT(error != EOPNOTSUPP); 3475 mp = NULL; 3476 goto update_ack; 3477 } else if (!tcp->tcp_urp_mp) { 3478 struct T_exdata_ind *tei; 3479 mp1 = allocb(sizeof (struct T_exdata_ind), 3480 BPRI_MED); 3481 if (!mp1) { 3482 /* 3483 * Sigh... It'll be back. 3484 * Generate any MSG*MARK message now. 3485 */ 3486 freemsg(mp); 3487 seg_len = 0; 3488 if (flags & TH_SEND_URP_MARK) { 3489 3490 3491 ASSERT(tcp->tcp_urp_mark_mp); 3492 tcp->tcp_urp_mark_mp->b_flag &= 3493 ~MSGNOTMARKNEXT; 3494 tcp->tcp_urp_mark_mp->b_flag |= 3495 MSGMARKNEXT; 3496 } 3497 goto ack_check; 3498 } 3499 mp1->b_datap->db_type = M_PROTO; 3500 tei = (struct T_exdata_ind *)mp1->b_rptr; 3501 tei->PRIM_type = T_EXDATA_IND; 3502 tei->MORE_flag = 0; 3503 mp1->b_wptr = (uchar_t *)&tei[1]; 3504 tcp->tcp_urp_mp = mp1; 3505 #ifdef DEBUG 3506 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3507 "tcp_rput: allocated exdata_ind %s", 3508 tcp_display(tcp, NULL, 3509 DISP_PORT_ONLY)); 3510 #endif /* DEBUG */ 3511 /* 3512 * There is no need to send a separate MSG*MARK 3513 * message since the T_EXDATA_IND will be sent 3514 * now. 3515 */ 3516 flags &= ~TH_SEND_URP_MARK; 3517 freemsg(tcp->tcp_urp_mark_mp); 3518 tcp->tcp_urp_mark_mp = NULL; 3519 } 3520 /* 3521 * Now we are all set. On the next putnext upstream, 3522 * tcp_urp_mp will be non-NULL and will get prepended 3523 * to what has to be this piece containing the urgent 3524 * byte. If for any reason we abort this segment below, 3525 * if it comes back, we will have this ready, or it 3526 * will get blown off in close. 3527 */ 3528 } else if (urp == seg_len) { 3529 /* 3530 * The urgent byte is the next byte after this sequence 3531 * number. If this endpoint is non-STREAMS, then there 3532 * is nothing to do here since the socket has already 3533 * been notified about the urg pointer by the 3534 * su_signal_oob call above. 3535 * 3536 * In case of STREAMS, some more work might be needed. 3537 * If there is data it is marked with MSGMARKNEXT and 3538 * and any tcp_urp_mark_mp is discarded since it is not 3539 * needed. Otherwise, if the code above just allocated 3540 * a zero-length tcp_urp_mark_mp message, that message 3541 * is tagged with MSGMARKNEXT. Sending up these 3542 * MSGMARKNEXT messages makes SIOCATMARK work correctly 3543 * even though the T_EXDATA_IND will not be sent up 3544 * until the urgent byte arrives. 3545 */ 3546 if (!IPCL_IS_NONSTR(tcp->tcp_connp)) { 3547 if (seg_len != 0) { 3548 flags |= TH_MARKNEXT_NEEDED; 3549 freemsg(tcp->tcp_urp_mark_mp); 3550 tcp->tcp_urp_mark_mp = NULL; 3551 flags &= ~TH_SEND_URP_MARK; 3552 } else if (tcp->tcp_urp_mark_mp != NULL) { 3553 flags |= TH_SEND_URP_MARK; 3554 tcp->tcp_urp_mark_mp->b_flag &= 3555 ~MSGNOTMARKNEXT; 3556 tcp->tcp_urp_mark_mp->b_flag |= 3557 MSGMARKNEXT; 3558 } 3559 } 3560 #ifdef DEBUG 3561 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3562 "tcp_rput: AT MARK, len %d, flags 0x%x, %s", 3563 seg_len, flags, 3564 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 3565 #endif /* DEBUG */ 3566 } 3567 #ifdef DEBUG 3568 else { 3569 /* Data left until we hit mark */ 3570 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3571 "tcp_rput: URP %d bytes left, %s", 3572 urp - seg_len, tcp_display(tcp, NULL, 3573 DISP_PORT_ONLY)); 3574 } 3575 #endif /* DEBUG */ 3576 } 3577 3578 process_ack: 3579 if (!(flags & TH_ACK)) { 3580 freemsg(mp); 3581 goto xmit_check; 3582 } 3583 } 3584 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 3585 3586 if (bytes_acked > 0) 3587 tcp->tcp_ip_forward_progress = B_TRUE; 3588 if (tcp->tcp_state == TCPS_SYN_RCVD) { 3589 /* 3590 * tcp_sendmsg() checks tcp_state without entering 3591 * the squeue so tcp_state should be updated before 3592 * sending up a connection confirmation or a new 3593 * connection indication. 3594 */ 3595 tcp->tcp_state = TCPS_ESTABLISHED; 3596 3597 /* 3598 * We are seeing the final ack in the three way 3599 * hand shake of a active open'ed connection 3600 * so we must send up a T_CONN_CON 3601 */ 3602 if (tcp->tcp_active_open) { 3603 if (!tcp_conn_con(tcp, iphdr, mp, NULL, ira)) { 3604 freemsg(mp); 3605 tcp->tcp_state = TCPS_SYN_RCVD; 3606 return; 3607 } 3608 /* 3609 * Don't fuse the loopback endpoints for 3610 * simultaneous active opens. 3611 */ 3612 if (tcp->tcp_loopback) { 3613 TCP_STAT(tcps, tcp_fusion_unfusable); 3614 tcp->tcp_unfusable = B_TRUE; 3615 } 3616 /* 3617 * For simultaneous active open, trace receipt of final 3618 * ACK as tcp:::connect-established. 3619 */ 3620 DTRACE_TCP5(connect__established, mblk_t *, NULL, 3621 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3622 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3623 } else if (IPCL_IS_NONSTR(connp)) { 3624 /* 3625 * 3-way handshake has completed, so notify socket 3626 * of the new connection. 3627 * 3628 * We are here means eager is fine but it can 3629 * get a TH_RST at any point between now and till 3630 * accept completes and disappear. We need to 3631 * ensure that reference to eager is valid after 3632 * we get out of eager's perimeter. So we do 3633 * an extra refhold. 3634 */ 3635 CONN_INC_REF(connp); 3636 3637 if (!tcp_newconn_notify(tcp, ira)) { 3638 freemsg(mp); 3639 /* notification did not go up, so drop ref */ 3640 CONN_DEC_REF(connp); 3641 return; 3642 } 3643 /* 3644 * For passive open, trace receipt of final ACK as 3645 * tcp:::accept-established. 3646 */ 3647 DTRACE_TCP5(accept__established, mlbk_t *, NULL, 3648 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3649 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3650 } else if (((tcp->tcp_kssl_ent == NULL) || 3651 !tcp->tcp_kssl_pending)) { 3652 /* 3653 * 3-way handshake complete - this is a STREAMS based 3654 * socket, so pass up the T_CONN_IND. 3655 */ 3656 tcp_t *listener = tcp->tcp_listener; 3657 mblk_t *mp = tcp->tcp_conn.tcp_eager_conn_ind; 3658 3659 tcp->tcp_tconnind_started = B_TRUE; 3660 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 3661 ASSERT(mp != NULL); 3662 /* 3663 * We are here means eager is fine but it can 3664 * get a TH_RST at any point between now and till 3665 * accept completes and disappear. We need to 3666 * ensure that reference to eager is valid after 3667 * we get out of eager's perimeter. So we do 3668 * an extra refhold. 3669 */ 3670 CONN_INC_REF(connp); 3671 3672 /* 3673 * The listener also exists because of the refhold 3674 * done in tcp_input_listener. Its possible that it 3675 * might have closed. We will check that once we 3676 * get inside listeners context. 3677 */ 3678 CONN_INC_REF(listener->tcp_connp); 3679 if (listener->tcp_connp->conn_sqp == 3680 connp->conn_sqp) { 3681 /* 3682 * We optimize by not calling an SQUEUE_ENTER 3683 * on the listener since we know that the 3684 * listener and eager squeues are the same. 3685 * We are able to make this check safely only 3686 * because neither the eager nor the listener 3687 * can change its squeue. Only an active connect 3688 * can change its squeue 3689 */ 3690 tcp_send_conn_ind(listener->tcp_connp, mp, 3691 listener->tcp_connp->conn_sqp); 3692 CONN_DEC_REF(listener->tcp_connp); 3693 } else if (!tcp->tcp_loopback) { 3694 SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp, 3695 mp, tcp_send_conn_ind, 3696 listener->tcp_connp, NULL, SQ_FILL, 3697 SQTAG_TCP_CONN_IND); 3698 } else { 3699 SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp, 3700 mp, tcp_send_conn_ind, 3701 listener->tcp_connp, NULL, SQ_NODRAIN, 3702 SQTAG_TCP_CONN_IND); 3703 } 3704 /* 3705 * For passive open, trace receipt of final ACK as 3706 * tcp:::accept-established. 3707 */ 3708 DTRACE_TCP5(accept__established, mlbk_t *, NULL, 3709 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3710 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3711 } 3712 TCPS_CONN_INC(tcps); 3713 3714 tcp->tcp_suna = tcp->tcp_iss + 1; /* One for the SYN */ 3715 bytes_acked--; 3716 /* SYN was acked - making progress */ 3717 tcp->tcp_ip_forward_progress = B_TRUE; 3718 3719 /* 3720 * If SYN was retransmitted, need to reset all 3721 * retransmission info as this segment will be 3722 * treated as a dup ACK. 3723 */ 3724 if (tcp->tcp_rexmit) { 3725 tcp->tcp_rexmit = B_FALSE; 3726 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 3727 tcp->tcp_rexmit_max = tcp->tcp_snxt; 3728 tcp->tcp_snd_burst = tcp->tcp_localnet ? 3729 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 3730 tcp->tcp_ms_we_have_waited = 0; 3731 tcp->tcp_cwnd = mss; 3732 } 3733 3734 /* 3735 * We set the send window to zero here. 3736 * This is needed if there is data to be 3737 * processed already on the queue. 3738 * Later (at swnd_update label), the 3739 * "new_swnd > tcp_swnd" condition is satisfied 3740 * the XMIT_NEEDED flag is set in the current 3741 * (SYN_RCVD) state. This ensures tcp_wput_data() is 3742 * called if there is already data on queue in 3743 * this state. 3744 */ 3745 tcp->tcp_swnd = 0; 3746 3747 if (new_swnd > tcp->tcp_max_swnd) 3748 tcp->tcp_max_swnd = new_swnd; 3749 tcp->tcp_swl1 = seg_seq; 3750 tcp->tcp_swl2 = seg_ack; 3751 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 3752 3753 /* Trace change from SYN_RCVD -> ESTABLISHED here */ 3754 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 3755 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL, 3756 int32_t, TCPS_SYN_RCVD); 3757 3758 /* Fuse when both sides are in ESTABLISHED state */ 3759 if (tcp->tcp_loopback && do_tcp_fusion) 3760 tcp_fuse(tcp, iphdr, tcpha); 3761 3762 } 3763 /* This code follows 4.4BSD-Lite2 mostly. */ 3764 if (bytes_acked < 0) 3765 goto est; 3766 3767 /* 3768 * If TCP is ECN capable and the congestion experience bit is 3769 * set, reduce tcp_cwnd and tcp_ssthresh. But this should only be 3770 * done once per window (or more loosely, per RTT). 3771 */ 3772 if (tcp->tcp_cwr && SEQ_GT(seg_ack, tcp->tcp_cwr_snd_max)) 3773 tcp->tcp_cwr = B_FALSE; 3774 if (tcp->tcp_ecn_ok && (flags & TH_ECE)) { 3775 if (!tcp->tcp_cwr) { 3776 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / mss; 3777 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * mss; 3778 tcp->tcp_cwnd = npkt * mss; 3779 /* 3780 * If the cwnd is 0, use the timer to clock out 3781 * new segments. This is required by the ECN spec. 3782 */ 3783 if (npkt == 0) { 3784 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 3785 /* 3786 * This makes sure that when the ACK comes 3787 * back, we will increase tcp_cwnd by 1 MSS. 3788 */ 3789 tcp->tcp_cwnd_cnt = 0; 3790 } 3791 tcp->tcp_cwr = B_TRUE; 3792 /* 3793 * This marks the end of the current window of in 3794 * flight data. That is why we don't use 3795 * tcp_suna + tcp_swnd. Only data in flight can 3796 * provide ECN info. 3797 */ 3798 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 3799 tcp->tcp_ecn_cwr_sent = B_FALSE; 3800 } 3801 } 3802 3803 mp1 = tcp->tcp_xmit_head; 3804 if (bytes_acked == 0) { 3805 if (!ofo_seg && seg_len == 0 && new_swnd == tcp->tcp_swnd) { 3806 int dupack_cnt; 3807 3808 TCPS_BUMP_MIB(tcps, tcpInDupAck); 3809 /* 3810 * Fast retransmit. When we have seen exactly three 3811 * identical ACKs while we have unacked data 3812 * outstanding we take it as a hint that our peer 3813 * dropped something. 3814 * 3815 * If TCP is retransmitting, don't do fast retransmit. 3816 */ 3817 if (mp1 && tcp->tcp_suna != tcp->tcp_snxt && 3818 ! tcp->tcp_rexmit) { 3819 /* Do Limited Transmit */ 3820 if ((dupack_cnt = ++tcp->tcp_dupack_cnt) < 3821 tcps->tcps_dupack_fast_retransmit) { 3822 /* 3823 * RFC 3042 3824 * 3825 * What we need to do is temporarily 3826 * increase tcp_cwnd so that new 3827 * data can be sent if it is allowed 3828 * by the receive window (tcp_rwnd). 3829 * tcp_wput_data() will take care of 3830 * the rest. 3831 * 3832 * If the connection is SACK capable, 3833 * only do limited xmit when there 3834 * is SACK info. 3835 * 3836 * Note how tcp_cwnd is incremented. 3837 * The first dup ACK will increase 3838 * it by 1 MSS. The second dup ACK 3839 * will increase it by 2 MSS. This 3840 * means that only 1 new segment will 3841 * be sent for each dup ACK. 3842 */ 3843 if (tcp->tcp_unsent > 0 && 3844 (!tcp->tcp_snd_sack_ok || 3845 (tcp->tcp_snd_sack_ok && 3846 tcp->tcp_notsack_list != NULL))) { 3847 tcp->tcp_cwnd += mss << 3848 (tcp->tcp_dupack_cnt - 1); 3849 flags |= TH_LIMIT_XMIT; 3850 } 3851 } else if (dupack_cnt == 3852 tcps->tcps_dupack_fast_retransmit) { 3853 3854 /* 3855 * If we have reduced tcp_ssthresh 3856 * because of ECN, do not reduce it again 3857 * unless it is already one window of data 3858 * away. After one window of data, tcp_cwr 3859 * should then be cleared. Note that 3860 * for non ECN capable connection, tcp_cwr 3861 * should always be false. 3862 * 3863 * Adjust cwnd since the duplicate 3864 * ack indicates that a packet was 3865 * dropped (due to congestion.) 3866 */ 3867 if (!tcp->tcp_cwr) { 3868 npkt = ((tcp->tcp_snxt - 3869 tcp->tcp_suna) >> 1) / mss; 3870 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 3871 mss; 3872 tcp->tcp_cwnd = (npkt + 3873 tcp->tcp_dupack_cnt) * mss; 3874 } 3875 if (tcp->tcp_ecn_ok) { 3876 tcp->tcp_cwr = B_TRUE; 3877 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 3878 tcp->tcp_ecn_cwr_sent = B_FALSE; 3879 } 3880 3881 /* 3882 * We do Hoe's algorithm. Refer to her 3883 * paper "Improving the Start-up Behavior 3884 * of a Congestion Control Scheme for TCP," 3885 * appeared in SIGCOMM'96. 3886 * 3887 * Save highest seq no we have sent so far. 3888 * Be careful about the invisible FIN byte. 3889 */ 3890 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 3891 (tcp->tcp_unsent == 0)) { 3892 tcp->tcp_rexmit_max = tcp->tcp_fss; 3893 } else { 3894 tcp->tcp_rexmit_max = tcp->tcp_snxt; 3895 } 3896 3897 /* 3898 * Do not allow bursty traffic during. 3899 * fast recovery. Refer to Fall and Floyd's 3900 * paper "Simulation-based Comparisons of 3901 * Tahoe, Reno and SACK TCP" (in CCR?) 3902 * This is a best current practise. 3903 */ 3904 tcp->tcp_snd_burst = TCP_CWND_SS; 3905 3906 /* 3907 * For SACK: 3908 * Calculate tcp_pipe, which is the 3909 * estimated number of bytes in 3910 * network. 3911 * 3912 * tcp_fack is the highest sack'ed seq num 3913 * TCP has received. 3914 * 3915 * tcp_pipe is explained in the above quoted 3916 * Fall and Floyd's paper. tcp_fack is 3917 * explained in Mathis and Mahdavi's 3918 * "Forward Acknowledgment: Refining TCP 3919 * Congestion Control" in SIGCOMM '96. 3920 */ 3921 if (tcp->tcp_snd_sack_ok) { 3922 if (tcp->tcp_notsack_list != NULL) { 3923 tcp->tcp_pipe = tcp->tcp_snxt - 3924 tcp->tcp_fack; 3925 tcp->tcp_sack_snxt = seg_ack; 3926 flags |= TH_NEED_SACK_REXMIT; 3927 } else { 3928 /* 3929 * Always initialize tcp_pipe 3930 * even though we don't have 3931 * any SACK info. If later 3932 * we get SACK info and 3933 * tcp_pipe is not initialized, 3934 * funny things will happen. 3935 */ 3936 tcp->tcp_pipe = 3937 tcp->tcp_cwnd_ssthresh; 3938 } 3939 } else { 3940 flags |= TH_REXMIT_NEEDED; 3941 } /* tcp_snd_sack_ok */ 3942 3943 } else { 3944 /* 3945 * Here we perform congestion 3946 * avoidance, but NOT slow start. 3947 * This is known as the Fast 3948 * Recovery Algorithm. 3949 */ 3950 if (tcp->tcp_snd_sack_ok && 3951 tcp->tcp_notsack_list != NULL) { 3952 flags |= TH_NEED_SACK_REXMIT; 3953 tcp->tcp_pipe -= mss; 3954 if (tcp->tcp_pipe < 0) 3955 tcp->tcp_pipe = 0; 3956 } else { 3957 /* 3958 * We know that one more packet has 3959 * left the pipe thus we can update 3960 * cwnd. 3961 */ 3962 cwnd = tcp->tcp_cwnd + mss; 3963 if (cwnd > tcp->tcp_cwnd_max) 3964 cwnd = tcp->tcp_cwnd_max; 3965 tcp->tcp_cwnd = cwnd; 3966 if (tcp->tcp_unsent > 0) 3967 flags |= TH_XMIT_NEEDED; 3968 } 3969 } 3970 } 3971 } else if (tcp->tcp_zero_win_probe) { 3972 /* 3973 * If the window has opened, need to arrange 3974 * to send additional data. 3975 */ 3976 if (new_swnd != 0) { 3977 /* tcp_suna != tcp_snxt */ 3978 /* Packet contains a window update */ 3979 TCPS_BUMP_MIB(tcps, tcpInWinUpdate); 3980 tcp->tcp_zero_win_probe = 0; 3981 tcp->tcp_timer_backoff = 0; 3982 tcp->tcp_ms_we_have_waited = 0; 3983 3984 /* 3985 * Transmit starting with tcp_suna since 3986 * the one byte probe is not ack'ed. 3987 * If TCP has sent more than one identical 3988 * probe, tcp_rexmit will be set. That means 3989 * tcp_ss_rexmit() will send out the one 3990 * byte along with new data. Otherwise, 3991 * fake the retransmission. 3992 */ 3993 flags |= TH_XMIT_NEEDED; 3994 if (!tcp->tcp_rexmit) { 3995 tcp->tcp_rexmit = B_TRUE; 3996 tcp->tcp_dupack_cnt = 0; 3997 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 3998 tcp->tcp_rexmit_max = tcp->tcp_suna + 1; 3999 } 4000 } 4001 } 4002 goto swnd_update; 4003 } 4004 4005 /* 4006 * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73. 4007 * If the ACK value acks something that we have not yet sent, it might 4008 * be an old duplicate segment. Send an ACK to re-synchronize the 4009 * other side. 4010 * Note: reset in response to unacceptable ACK in SYN_RECEIVE 4011 * state is handled above, so we can always just drop the segment and 4012 * send an ACK here. 4013 * 4014 * In the case where the peer shrinks the window, we see the new window 4015 * update, but all the data sent previously is queued up by the peer. 4016 * To account for this, in tcp_process_shrunk_swnd(), the sequence 4017 * number, which was already sent, and within window, is recorded. 4018 * tcp_snxt is then updated. 4019 * 4020 * If the window has previously shrunk, and an ACK for data not yet 4021 * sent, according to tcp_snxt is recieved, it may still be valid. If 4022 * the ACK is for data within the window at the time the window was 4023 * shrunk, then the ACK is acceptable. In this case tcp_snxt is set to 4024 * the sequence number ACK'ed. 4025 * 4026 * If the ACK covers all the data sent at the time the window was 4027 * shrunk, we can now set tcp_is_wnd_shrnk to B_FALSE. 4028 * 4029 * Should we send ACKs in response to ACK only segments? 4030 */ 4031 4032 if (SEQ_GT(seg_ack, tcp->tcp_snxt)) { 4033 if ((tcp->tcp_is_wnd_shrnk) && 4034 (SEQ_LEQ(seg_ack, tcp->tcp_snxt_shrunk))) { 4035 uint32_t data_acked_ahead_snxt; 4036 4037 data_acked_ahead_snxt = seg_ack - tcp->tcp_snxt; 4038 tcp_update_xmit_tail(tcp, seg_ack); 4039 tcp->tcp_unsent -= data_acked_ahead_snxt; 4040 } else { 4041 TCPS_BUMP_MIB(tcps, tcpInAckUnsent); 4042 /* drop the received segment */ 4043 freemsg(mp); 4044 4045 /* 4046 * Send back an ACK. If tcp_drop_ack_unsent_cnt is 4047 * greater than 0, check if the number of such 4048 * bogus ACks is greater than that count. If yes, 4049 * don't send back any ACK. This prevents TCP from 4050 * getting into an ACK storm if somehow an attacker 4051 * successfully spoofs an acceptable segment to our 4052 * peer. If this continues (count > 2 X threshold), 4053 * we should abort this connection. 4054 */ 4055 if (tcp_drop_ack_unsent_cnt > 0 && 4056 ++tcp->tcp_in_ack_unsent > 4057 tcp_drop_ack_unsent_cnt) { 4058 TCP_STAT(tcps, tcp_in_ack_unsent_drop); 4059 if (tcp->tcp_in_ack_unsent > 2 * 4060 tcp_drop_ack_unsent_cnt) { 4061 (void) tcp_clean_death(tcp, EPROTO); 4062 } 4063 return; 4064 } 4065 mp = tcp_ack_mp(tcp); 4066 if (mp != NULL) { 4067 BUMP_LOCAL(tcp->tcp_obsegs); 4068 TCPS_BUMP_MIB(tcps, tcpOutAck); 4069 tcp_send_data(tcp, mp); 4070 } 4071 return; 4072 } 4073 } else if (tcp->tcp_is_wnd_shrnk && SEQ_GEQ(seg_ack, 4074 tcp->tcp_snxt_shrunk)) { 4075 tcp->tcp_is_wnd_shrnk = B_FALSE; 4076 } 4077 4078 /* 4079 * TCP gets a new ACK, update the notsack'ed list to delete those 4080 * blocks that are covered by this ACK. 4081 */ 4082 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 4083 tcp_notsack_remove(&(tcp->tcp_notsack_list), seg_ack, 4084 &(tcp->tcp_num_notsack_blk), &(tcp->tcp_cnt_notsack_list)); 4085 } 4086 4087 /* 4088 * If we got an ACK after fast retransmit, check to see 4089 * if it is a partial ACK. If it is not and the congestion 4090 * window was inflated to account for the other side's 4091 * cached packets, retract it. If it is, do Hoe's algorithm. 4092 */ 4093 if (tcp->tcp_dupack_cnt >= tcps->tcps_dupack_fast_retransmit) { 4094 ASSERT(tcp->tcp_rexmit == B_FALSE); 4095 if (SEQ_GEQ(seg_ack, tcp->tcp_rexmit_max)) { 4096 tcp->tcp_dupack_cnt = 0; 4097 /* 4098 * Restore the orig tcp_cwnd_ssthresh after 4099 * fast retransmit phase. 4100 */ 4101 if (tcp->tcp_cwnd > tcp->tcp_cwnd_ssthresh) { 4102 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh; 4103 } 4104 tcp->tcp_rexmit_max = seg_ack; 4105 tcp->tcp_cwnd_cnt = 0; 4106 tcp->tcp_snd_burst = tcp->tcp_localnet ? 4107 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 4108 4109 /* 4110 * Remove all notsack info to avoid confusion with 4111 * the next fast retrasnmit/recovery phase. 4112 */ 4113 if (tcp->tcp_snd_sack_ok) { 4114 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, 4115 tcp); 4116 } 4117 } else { 4118 if (tcp->tcp_snd_sack_ok && 4119 tcp->tcp_notsack_list != NULL) { 4120 flags |= TH_NEED_SACK_REXMIT; 4121 tcp->tcp_pipe -= mss; 4122 if (tcp->tcp_pipe < 0) 4123 tcp->tcp_pipe = 0; 4124 } else { 4125 /* 4126 * Hoe's algorithm: 4127 * 4128 * Retransmit the unack'ed segment and 4129 * restart fast recovery. Note that we 4130 * need to scale back tcp_cwnd to the 4131 * original value when we started fast 4132 * recovery. This is to prevent overly 4133 * aggressive behaviour in sending new 4134 * segments. 4135 */ 4136 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh + 4137 tcps->tcps_dupack_fast_retransmit * mss; 4138 tcp->tcp_cwnd_cnt = tcp->tcp_cwnd; 4139 flags |= TH_REXMIT_NEEDED; 4140 } 4141 } 4142 } else { 4143 tcp->tcp_dupack_cnt = 0; 4144 if (tcp->tcp_rexmit) { 4145 /* 4146 * TCP is retranmitting. If the ACK ack's all 4147 * outstanding data, update tcp_rexmit_max and 4148 * tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt 4149 * to the correct value. 4150 * 4151 * Note that SEQ_LEQ() is used. This is to avoid 4152 * unnecessary fast retransmit caused by dup ACKs 4153 * received when TCP does slow start retransmission 4154 * after a time out. During this phase, TCP may 4155 * send out segments which are already received. 4156 * This causes dup ACKs to be sent back. 4157 */ 4158 if (SEQ_LEQ(seg_ack, tcp->tcp_rexmit_max)) { 4159 if (SEQ_GT(seg_ack, tcp->tcp_rexmit_nxt)) { 4160 tcp->tcp_rexmit_nxt = seg_ack; 4161 } 4162 if (seg_ack != tcp->tcp_rexmit_max) { 4163 flags |= TH_XMIT_NEEDED; 4164 } 4165 } else { 4166 tcp->tcp_rexmit = B_FALSE; 4167 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 4168 tcp->tcp_snd_burst = tcp->tcp_localnet ? 4169 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 4170 } 4171 tcp->tcp_ms_we_have_waited = 0; 4172 } 4173 } 4174 4175 TCPS_BUMP_MIB(tcps, tcpInAckSegs); 4176 TCPS_UPDATE_MIB(tcps, tcpInAckBytes, bytes_acked); 4177 tcp->tcp_suna = seg_ack; 4178 if (tcp->tcp_zero_win_probe != 0) { 4179 tcp->tcp_zero_win_probe = 0; 4180 tcp->tcp_timer_backoff = 0; 4181 } 4182 4183 /* 4184 * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed. 4185 * Note that it cannot be the SYN being ack'ed. The code flow 4186 * will not reach here. 4187 */ 4188 if (mp1 == NULL) { 4189 goto fin_acked; 4190 } 4191 4192 /* 4193 * Update the congestion window. 4194 * 4195 * If TCP is not ECN capable or TCP is ECN capable but the 4196 * congestion experience bit is not set, increase the tcp_cwnd as 4197 * usual. 4198 */ 4199 if (!tcp->tcp_ecn_ok || !(flags & TH_ECE)) { 4200 cwnd = tcp->tcp_cwnd; 4201 add = mss; 4202 4203 if (cwnd >= tcp->tcp_cwnd_ssthresh) { 4204 /* 4205 * This is to prevent an increase of less than 1 MSS of 4206 * tcp_cwnd. With partial increase, tcp_wput_data() 4207 * may send out tinygrams in order to preserve mblk 4208 * boundaries. 4209 * 4210 * By initializing tcp_cwnd_cnt to new tcp_cwnd and 4211 * decrementing it by 1 MSS for every ACKs, tcp_cwnd is 4212 * increased by 1 MSS for every RTTs. 4213 */ 4214 if (tcp->tcp_cwnd_cnt <= 0) { 4215 tcp->tcp_cwnd_cnt = cwnd + add; 4216 } else { 4217 tcp->tcp_cwnd_cnt -= add; 4218 add = 0; 4219 } 4220 } 4221 tcp->tcp_cwnd = MIN(cwnd + add, tcp->tcp_cwnd_max); 4222 } 4223 4224 /* See if the latest urgent data has been acknowledged */ 4225 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && 4226 SEQ_GT(seg_ack, tcp->tcp_urg)) 4227 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 4228 4229 /* Can we update the RTT estimates? */ 4230 if (tcp->tcp_snd_ts_ok) { 4231 /* Ignore zero timestamp echo-reply. */ 4232 if (tcpopt.tcp_opt_ts_ecr != 0) { 4233 tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH - 4234 (int32_t)tcpopt.tcp_opt_ts_ecr); 4235 } 4236 4237 /* If needed, restart the timer. */ 4238 if (tcp->tcp_set_timer == 1) { 4239 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 4240 tcp->tcp_set_timer = 0; 4241 } 4242 /* 4243 * Update tcp_csuna in case the other side stops sending 4244 * us timestamps. 4245 */ 4246 tcp->tcp_csuna = tcp->tcp_snxt; 4247 } else if (SEQ_GT(seg_ack, tcp->tcp_csuna)) { 4248 /* 4249 * An ACK sequence we haven't seen before, so get the RTT 4250 * and update the RTO. But first check if the timestamp is 4251 * valid to use. 4252 */ 4253 if ((mp1->b_next != NULL) && 4254 SEQ_GT(seg_ack, (uint32_t)(uintptr_t)(mp1->b_next))) 4255 tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH - 4256 (int32_t)(intptr_t)mp1->b_prev); 4257 else 4258 TCPS_BUMP_MIB(tcps, tcpRttNoUpdate); 4259 4260 /* Remeber the last sequence to be ACKed */ 4261 tcp->tcp_csuna = seg_ack; 4262 if (tcp->tcp_set_timer == 1) { 4263 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 4264 tcp->tcp_set_timer = 0; 4265 } 4266 } else { 4267 TCPS_BUMP_MIB(tcps, tcpRttNoUpdate); 4268 } 4269 4270 /* Eat acknowledged bytes off the xmit queue. */ 4271 for (;;) { 4272 mblk_t *mp2; 4273 uchar_t *wptr; 4274 4275 wptr = mp1->b_wptr; 4276 ASSERT((uintptr_t)(wptr - mp1->b_rptr) <= (uintptr_t)INT_MAX); 4277 bytes_acked -= (int)(wptr - mp1->b_rptr); 4278 if (bytes_acked < 0) { 4279 mp1->b_rptr = wptr + bytes_acked; 4280 /* 4281 * Set a new timestamp if all the bytes timed by the 4282 * old timestamp have been ack'ed. 4283 */ 4284 if (SEQ_GT(seg_ack, 4285 (uint32_t)(uintptr_t)(mp1->b_next))) { 4286 mp1->b_prev = 4287 (mblk_t *)(uintptr_t)LBOLT_FASTPATH; 4288 mp1->b_next = NULL; 4289 } 4290 break; 4291 } 4292 mp1->b_next = NULL; 4293 mp1->b_prev = NULL; 4294 mp2 = mp1; 4295 mp1 = mp1->b_cont; 4296 4297 /* 4298 * This notification is required for some zero-copy 4299 * clients to maintain a copy semantic. After the data 4300 * is ack'ed, client is safe to modify or reuse the buffer. 4301 */ 4302 if (tcp->tcp_snd_zcopy_aware && 4303 (mp2->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 4304 tcp_zcopy_notify(tcp); 4305 freeb(mp2); 4306 if (bytes_acked == 0) { 4307 if (mp1 == NULL) { 4308 /* Everything is ack'ed, clear the tail. */ 4309 tcp->tcp_xmit_tail = NULL; 4310 /* 4311 * Cancel the timer unless we are still 4312 * waiting for an ACK for the FIN packet. 4313 */ 4314 if (tcp->tcp_timer_tid != 0 && 4315 tcp->tcp_snxt == tcp->tcp_suna) { 4316 (void) TCP_TIMER_CANCEL(tcp, 4317 tcp->tcp_timer_tid); 4318 tcp->tcp_timer_tid = 0; 4319 } 4320 goto pre_swnd_update; 4321 } 4322 if (mp2 != tcp->tcp_xmit_tail) 4323 break; 4324 tcp->tcp_xmit_tail = mp1; 4325 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 4326 (uintptr_t)INT_MAX); 4327 tcp->tcp_xmit_tail_unsent = (int)(mp1->b_wptr - 4328 mp1->b_rptr); 4329 break; 4330 } 4331 if (mp1 == NULL) { 4332 /* 4333 * More was acked but there is nothing more 4334 * outstanding. This means that the FIN was 4335 * just acked or that we're talking to a clown. 4336 */ 4337 fin_acked: 4338 ASSERT(tcp->tcp_fin_sent); 4339 tcp->tcp_xmit_tail = NULL; 4340 if (tcp->tcp_fin_sent) { 4341 /* FIN was acked - making progress */ 4342 if (!tcp->tcp_fin_acked) 4343 tcp->tcp_ip_forward_progress = B_TRUE; 4344 tcp->tcp_fin_acked = B_TRUE; 4345 if (tcp->tcp_linger_tid != 0 && 4346 TCP_TIMER_CANCEL(tcp, 4347 tcp->tcp_linger_tid) >= 0) { 4348 tcp_stop_lingering(tcp); 4349 freemsg(mp); 4350 mp = NULL; 4351 } 4352 } else { 4353 /* 4354 * We should never get here because 4355 * we have already checked that the 4356 * number of bytes ack'ed should be 4357 * smaller than or equal to what we 4358 * have sent so far (it is the 4359 * acceptability check of the ACK). 4360 * We can only get here if the send 4361 * queue is corrupted. 4362 * 4363 * Terminate the connection and 4364 * panic the system. It is better 4365 * for us to panic instead of 4366 * continuing to avoid other disaster. 4367 */ 4368 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 4369 tcp->tcp_rnxt, TH_RST|TH_ACK); 4370 panic("Memory corruption " 4371 "detected for connection %s.", 4372 tcp_display(tcp, NULL, 4373 DISP_ADDR_AND_PORT)); 4374 /*NOTREACHED*/ 4375 } 4376 goto pre_swnd_update; 4377 } 4378 ASSERT(mp2 != tcp->tcp_xmit_tail); 4379 } 4380 if (tcp->tcp_unsent) { 4381 flags |= TH_XMIT_NEEDED; 4382 } 4383 pre_swnd_update: 4384 tcp->tcp_xmit_head = mp1; 4385 swnd_update: 4386 /* 4387 * The following check is different from most other implementations. 4388 * For bi-directional transfer, when segments are dropped, the 4389 * "normal" check will not accept a window update in those 4390 * retransmitted segemnts. Failing to do that, TCP may send out 4391 * segments which are outside receiver's window. As TCP accepts 4392 * the ack in those retransmitted segments, if the window update in 4393 * the same segment is not accepted, TCP will incorrectly calculates 4394 * that it can send more segments. This can create a deadlock 4395 * with the receiver if its window becomes zero. 4396 */ 4397 if (SEQ_LT(tcp->tcp_swl2, seg_ack) || 4398 SEQ_LT(tcp->tcp_swl1, seg_seq) || 4399 (tcp->tcp_swl1 == seg_seq && new_swnd > tcp->tcp_swnd)) { 4400 /* 4401 * The criteria for update is: 4402 * 4403 * 1. the segment acknowledges some data. Or 4404 * 2. the segment is new, i.e. it has a higher seq num. Or 4405 * 3. the segment is not old and the advertised window is 4406 * larger than the previous advertised window. 4407 */ 4408 if (tcp->tcp_unsent && new_swnd > tcp->tcp_swnd) 4409 flags |= TH_XMIT_NEEDED; 4410 tcp->tcp_swnd = new_swnd; 4411 if (new_swnd > tcp->tcp_max_swnd) 4412 tcp->tcp_max_swnd = new_swnd; 4413 tcp->tcp_swl1 = seg_seq; 4414 tcp->tcp_swl2 = seg_ack; 4415 } 4416 est: 4417 if (tcp->tcp_state > TCPS_ESTABLISHED) { 4418 4419 switch (tcp->tcp_state) { 4420 case TCPS_FIN_WAIT_1: 4421 if (tcp->tcp_fin_acked) { 4422 tcp->tcp_state = TCPS_FIN_WAIT_2; 4423 DTRACE_TCP6(state__change, void, NULL, 4424 ip_xmit_attr_t *, connp->conn_ixa, 4425 void, NULL, tcp_t *, tcp, void, NULL, 4426 int32_t, TCPS_FIN_WAIT_1); 4427 /* 4428 * We implement the non-standard BSD/SunOS 4429 * FIN_WAIT_2 flushing algorithm. 4430 * If there is no user attached to this 4431 * TCP endpoint, then this TCP struct 4432 * could hang around forever in FIN_WAIT_2 4433 * state if the peer forgets to send us 4434 * a FIN. To prevent this, we wait only 4435 * 2*MSL (a convenient time value) for 4436 * the FIN to arrive. If it doesn't show up, 4437 * we flush the TCP endpoint. This algorithm, 4438 * though a violation of RFC-793, has worked 4439 * for over 10 years in BSD systems. 4440 * Note: SunOS 4.x waits 675 seconds before 4441 * flushing the FIN_WAIT_2 connection. 4442 */ 4443 TCP_TIMER_RESTART(tcp, 4444 tcp->tcp_fin_wait_2_flush_interval); 4445 } 4446 break; 4447 case TCPS_FIN_WAIT_2: 4448 break; /* Shutdown hook? */ 4449 case TCPS_LAST_ACK: 4450 freemsg(mp); 4451 if (tcp->tcp_fin_acked) { 4452 (void) tcp_clean_death(tcp, 0); 4453 return; 4454 } 4455 goto xmit_check; 4456 case TCPS_CLOSING: 4457 if (tcp->tcp_fin_acked) { 4458 SET_TIME_WAIT(tcps, tcp, connp); 4459 DTRACE_TCP6(state__change, void, NULL, 4460 ip_xmit_attr_t *, connp->conn_ixa, void, 4461 NULL, tcp_t *, tcp, void, NULL, int32_t, 4462 TCPS_CLOSING); 4463 } 4464 /*FALLTHRU*/ 4465 case TCPS_CLOSE_WAIT: 4466 freemsg(mp); 4467 goto xmit_check; 4468 default: 4469 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 4470 break; 4471 } 4472 } 4473 if (flags & TH_FIN) { 4474 /* Make sure we ack the fin */ 4475 flags |= TH_ACK_NEEDED; 4476 if (!tcp->tcp_fin_rcvd) { 4477 tcp->tcp_fin_rcvd = B_TRUE; 4478 tcp->tcp_rnxt++; 4479 tcpha = tcp->tcp_tcpha; 4480 tcpha->tha_ack = htonl(tcp->tcp_rnxt); 4481 4482 /* 4483 * Generate the ordrel_ind at the end unless the 4484 * conn is detached or it is a STREAMS based eager. 4485 * In the eager case we defer the notification until 4486 * tcp_accept_finish has run. 4487 */ 4488 if (!TCP_IS_DETACHED(tcp) && (IPCL_IS_NONSTR(connp) || 4489 (tcp->tcp_listener == NULL && 4490 !tcp->tcp_hard_binding))) 4491 flags |= TH_ORDREL_NEEDED; 4492 switch (tcp->tcp_state) { 4493 case TCPS_SYN_RCVD: 4494 tcp->tcp_state = TCPS_CLOSE_WAIT; 4495 DTRACE_TCP6(state__change, void, NULL, 4496 ip_xmit_attr_t *, connp->conn_ixa, 4497 void, NULL, tcp_t *, tcp, void, NULL, 4498 int32_t, TCPS_SYN_RCVD); 4499 /* Keepalive? */ 4500 break; 4501 case TCPS_ESTABLISHED: 4502 tcp->tcp_state = TCPS_CLOSE_WAIT; 4503 DTRACE_TCP6(state__change, void, NULL, 4504 ip_xmit_attr_t *, connp->conn_ixa, 4505 void, NULL, tcp_t *, tcp, void, NULL, 4506 int32_t, TCPS_ESTABLISHED); 4507 /* Keepalive? */ 4508 break; 4509 case TCPS_FIN_WAIT_1: 4510 if (!tcp->tcp_fin_acked) { 4511 tcp->tcp_state = TCPS_CLOSING; 4512 DTRACE_TCP6(state__change, void, NULL, 4513 ip_xmit_attr_t *, connp->conn_ixa, 4514 void, NULL, tcp_t *, tcp, void, 4515 NULL, int32_t, TCPS_FIN_WAIT_1); 4516 break; 4517 } 4518 /* FALLTHRU */ 4519 case TCPS_FIN_WAIT_2: 4520 SET_TIME_WAIT(tcps, tcp, connp); 4521 DTRACE_TCP6(state__change, void, NULL, 4522 ip_xmit_attr_t *, connp->conn_ixa, void, 4523 NULL, tcp_t *, tcp, void, NULL, int32_t, 4524 TCPS_FIN_WAIT_2); 4525 if (seg_len) { 4526 /* 4527 * implies data piggybacked on FIN. 4528 * break to handle data. 4529 */ 4530 break; 4531 } 4532 freemsg(mp); 4533 goto ack_check; 4534 } 4535 } 4536 } 4537 if (mp == NULL) 4538 goto xmit_check; 4539 if (seg_len == 0) { 4540 freemsg(mp); 4541 goto xmit_check; 4542 } 4543 if (mp->b_rptr == mp->b_wptr) { 4544 /* 4545 * The header has been consumed, so we remove the 4546 * zero-length mblk here. 4547 */ 4548 mp1 = mp; 4549 mp = mp->b_cont; 4550 freeb(mp1); 4551 } 4552 update_ack: 4553 tcpha = tcp->tcp_tcpha; 4554 tcp->tcp_rack_cnt++; 4555 { 4556 uint32_t cur_max; 4557 4558 cur_max = tcp->tcp_rack_cur_max; 4559 if (tcp->tcp_rack_cnt >= cur_max) { 4560 /* 4561 * We have more unacked data than we should - send 4562 * an ACK now. 4563 */ 4564 flags |= TH_ACK_NEEDED; 4565 cur_max++; 4566 if (cur_max > tcp->tcp_rack_abs_max) 4567 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 4568 else 4569 tcp->tcp_rack_cur_max = cur_max; 4570 } else if (TCP_IS_DETACHED(tcp)) { 4571 /* We don't have an ACK timer for detached TCP. */ 4572 flags |= TH_ACK_NEEDED; 4573 } else if (seg_len < mss) { 4574 /* 4575 * If we get a segment that is less than an mss, and we 4576 * already have unacknowledged data, and the amount 4577 * unacknowledged is not a multiple of mss, then we 4578 * better generate an ACK now. Otherwise, this may be 4579 * the tail piece of a transaction, and we would rather 4580 * wait for the response. 4581 */ 4582 uint32_t udif; 4583 ASSERT((uintptr_t)(tcp->tcp_rnxt - tcp->tcp_rack) <= 4584 (uintptr_t)INT_MAX); 4585 udif = (int)(tcp->tcp_rnxt - tcp->tcp_rack); 4586 if (udif && (udif % mss)) 4587 flags |= TH_ACK_NEEDED; 4588 else 4589 flags |= TH_ACK_TIMER_NEEDED; 4590 } else { 4591 /* Start delayed ack timer */ 4592 flags |= TH_ACK_TIMER_NEEDED; 4593 } 4594 } 4595 tcp->tcp_rnxt += seg_len; 4596 tcpha->tha_ack = htonl(tcp->tcp_rnxt); 4597 4598 if (mp == NULL) 4599 goto xmit_check; 4600 4601 /* Update SACK list */ 4602 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 4603 tcp_sack_remove(tcp->tcp_sack_list, tcp->tcp_rnxt, 4604 &(tcp->tcp_num_sack_blk)); 4605 } 4606 4607 if (tcp->tcp_urp_mp) { 4608 tcp->tcp_urp_mp->b_cont = mp; 4609 mp = tcp->tcp_urp_mp; 4610 tcp->tcp_urp_mp = NULL; 4611 /* Ready for a new signal. */ 4612 tcp->tcp_urp_last_valid = B_FALSE; 4613 #ifdef DEBUG 4614 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4615 "tcp_rput: sending exdata_ind %s", 4616 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4617 #endif /* DEBUG */ 4618 } 4619 4620 /* 4621 * Check for ancillary data changes compared to last segment. 4622 */ 4623 if (connp->conn_recv_ancillary.crb_all != 0) { 4624 mp = tcp_input_add_ancillary(tcp, mp, &ipp, ira); 4625 if (mp == NULL) 4626 return; 4627 } 4628 4629 if (IPCL_IS_NONSTR(connp)) { 4630 /* 4631 * Non-STREAMS socket 4632 * 4633 * Note that no KSSL processing is done here, because 4634 * KSSL is not supported for non-STREAMS sockets. 4635 */ 4636 boolean_t push = flags & (TH_PUSH|TH_FIN); 4637 int error; 4638 4639 if ((*connp->conn_upcalls->su_recv)( 4640 connp->conn_upper_handle, 4641 mp, seg_len, 0, &error, &push) <= 0) { 4642 /* 4643 * We should never be in middle of a 4644 * fallback, the squeue guarantees that. 4645 */ 4646 ASSERT(error != EOPNOTSUPP); 4647 if (error == ENOSPC) 4648 tcp->tcp_rwnd -= seg_len; 4649 } else if (push) { 4650 /* PUSH bit set and sockfs is not flow controlled */ 4651 flags |= tcp_rwnd_reopen(tcp); 4652 } 4653 } else if (tcp->tcp_listener != NULL || tcp->tcp_hard_binding) { 4654 /* 4655 * Side queue inbound data until the accept happens. 4656 * tcp_accept/tcp_rput drains this when the accept happens. 4657 * M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or 4658 * T_EXDATA_IND) it is queued on b_next. 4659 * XXX Make urgent data use this. Requires: 4660 * Removing tcp_listener check for TH_URG 4661 * Making M_PCPROTO and MARK messages skip the eager case 4662 */ 4663 4664 if (tcp->tcp_kssl_pending) { 4665 DTRACE_PROBE1(kssl_mblk__ksslinput_pending, 4666 mblk_t *, mp); 4667 tcp_kssl_input(tcp, mp, ira->ira_cred); 4668 } else { 4669 tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred); 4670 } 4671 } else { 4672 /* Active STREAMS socket */ 4673 if (mp->b_datap->db_type != M_DATA || 4674 (flags & TH_MARKNEXT_NEEDED)) { 4675 if (tcp->tcp_rcv_list != NULL) { 4676 flags |= tcp_rcv_drain(tcp); 4677 } 4678 ASSERT(tcp->tcp_rcv_list == NULL || 4679 tcp->tcp_fused_sigurg); 4680 4681 if (flags & TH_MARKNEXT_NEEDED) { 4682 #ifdef DEBUG 4683 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4684 "tcp_rput: sending MSGMARKNEXT %s", 4685 tcp_display(tcp, NULL, 4686 DISP_PORT_ONLY)); 4687 #endif /* DEBUG */ 4688 mp->b_flag |= MSGMARKNEXT; 4689 flags &= ~TH_MARKNEXT_NEEDED; 4690 } 4691 4692 /* Does this need SSL processing first? */ 4693 if ((tcp->tcp_kssl_ctx != NULL) && 4694 (DB_TYPE(mp) == M_DATA)) { 4695 DTRACE_PROBE1(kssl_mblk__ksslinput_data1, 4696 mblk_t *, mp); 4697 tcp_kssl_input(tcp, mp, ira->ira_cred); 4698 } else { 4699 if (is_system_labeled()) 4700 tcp_setcred_data(mp, ira); 4701 4702 putnext(connp->conn_rq, mp); 4703 if (!canputnext(connp->conn_rq)) 4704 tcp->tcp_rwnd -= seg_len; 4705 } 4706 } else if ((tcp->tcp_kssl_ctx != NULL) && 4707 (DB_TYPE(mp) == M_DATA)) { 4708 /* Does this need SSL processing first? */ 4709 DTRACE_PROBE1(kssl_mblk__ksslinput_data2, mblk_t *, mp); 4710 tcp_kssl_input(tcp, mp, ira->ira_cred); 4711 } else if ((flags & (TH_PUSH|TH_FIN)) || 4712 tcp->tcp_rcv_cnt + seg_len >= connp->conn_rcvbuf >> 3) { 4713 if (tcp->tcp_rcv_list != NULL) { 4714 /* 4715 * Enqueue the new segment first and then 4716 * call tcp_rcv_drain() to send all data 4717 * up. The other way to do this is to 4718 * send all queued data up and then call 4719 * putnext() to send the new segment up. 4720 * This way can remove the else part later 4721 * on. 4722 * 4723 * We don't do this to avoid one more call to 4724 * canputnext() as tcp_rcv_drain() needs to 4725 * call canputnext(). 4726 */ 4727 tcp_rcv_enqueue(tcp, mp, seg_len, 4728 ira->ira_cred); 4729 flags |= tcp_rcv_drain(tcp); 4730 } else { 4731 if (is_system_labeled()) 4732 tcp_setcred_data(mp, ira); 4733 4734 putnext(connp->conn_rq, mp); 4735 if (!canputnext(connp->conn_rq)) 4736 tcp->tcp_rwnd -= seg_len; 4737 } 4738 } else { 4739 /* 4740 * Enqueue all packets when processing an mblk 4741 * from the co queue and also enqueue normal packets. 4742 */ 4743 tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred); 4744 } 4745 /* 4746 * Make sure the timer is running if we have data waiting 4747 * for a push bit. This provides resiliency against 4748 * implementations that do not correctly generate push bits. 4749 */ 4750 if (tcp->tcp_rcv_list != NULL && tcp->tcp_push_tid == 0) { 4751 /* 4752 * The connection may be closed at this point, so don't 4753 * do anything for a detached tcp. 4754 */ 4755 if (!TCP_IS_DETACHED(tcp)) 4756 tcp->tcp_push_tid = TCP_TIMER(tcp, 4757 tcp_push_timer, 4758 tcps->tcps_push_timer_interval); 4759 } 4760 } 4761 4762 xmit_check: 4763 /* Is there anything left to do? */ 4764 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 4765 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_ACK_NEEDED| 4766 TH_NEED_SACK_REXMIT|TH_LIMIT_XMIT|TH_ACK_TIMER_NEEDED| 4767 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 4768 goto done; 4769 4770 /* Any transmit work to do and a non-zero window? */ 4771 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_NEED_SACK_REXMIT| 4772 TH_LIMIT_XMIT)) && tcp->tcp_swnd != 0) { 4773 if (flags & TH_REXMIT_NEEDED) { 4774 uint32_t snd_size = tcp->tcp_snxt - tcp->tcp_suna; 4775 4776 TCPS_BUMP_MIB(tcps, tcpOutFastRetrans); 4777 if (snd_size > mss) 4778 snd_size = mss; 4779 if (snd_size > tcp->tcp_swnd) 4780 snd_size = tcp->tcp_swnd; 4781 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, snd_size, 4782 NULL, NULL, tcp->tcp_suna, B_TRUE, &snd_size, 4783 B_TRUE); 4784 4785 if (mp1 != NULL) { 4786 tcp->tcp_xmit_head->b_prev = 4787 (mblk_t *)LBOLT_FASTPATH; 4788 tcp->tcp_csuna = tcp->tcp_snxt; 4789 TCPS_BUMP_MIB(tcps, tcpRetransSegs); 4790 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, 4791 snd_size); 4792 tcp_send_data(tcp, mp1); 4793 } 4794 } 4795 if (flags & TH_NEED_SACK_REXMIT) { 4796 tcp_sack_rexmit(tcp, &flags); 4797 } 4798 /* 4799 * For TH_LIMIT_XMIT, tcp_wput_data() is called to send 4800 * out new segment. Note that tcp_rexmit should not be 4801 * set, otherwise TH_LIMIT_XMIT should not be set. 4802 */ 4803 if (flags & (TH_XMIT_NEEDED|TH_LIMIT_XMIT)) { 4804 if (!tcp->tcp_rexmit) { 4805 tcp_wput_data(tcp, NULL, B_FALSE); 4806 } else { 4807 tcp_ss_rexmit(tcp); 4808 } 4809 } 4810 /* 4811 * Adjust tcp_cwnd back to normal value after sending 4812 * new data segments. 4813 */ 4814 if (flags & TH_LIMIT_XMIT) { 4815 tcp->tcp_cwnd -= mss << (tcp->tcp_dupack_cnt - 1); 4816 /* 4817 * This will restart the timer. Restarting the 4818 * timer is used to avoid a timeout before the 4819 * limited transmitted segment's ACK gets back. 4820 */ 4821 if (tcp->tcp_xmit_head != NULL) 4822 tcp->tcp_xmit_head->b_prev = 4823 (mblk_t *)LBOLT_FASTPATH; 4824 } 4825 4826 /* Anything more to do? */ 4827 if ((flags & (TH_ACK_NEEDED|TH_ACK_TIMER_NEEDED| 4828 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 4829 goto done; 4830 } 4831 ack_check: 4832 if (flags & TH_SEND_URP_MARK) { 4833 ASSERT(tcp->tcp_urp_mark_mp); 4834 ASSERT(!IPCL_IS_NONSTR(connp)); 4835 /* 4836 * Send up any queued data and then send the mark message 4837 */ 4838 if (tcp->tcp_rcv_list != NULL) { 4839 flags |= tcp_rcv_drain(tcp); 4840 4841 } 4842 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 4843 mp1 = tcp->tcp_urp_mark_mp; 4844 tcp->tcp_urp_mark_mp = NULL; 4845 if (is_system_labeled()) 4846 tcp_setcred_data(mp1, ira); 4847 4848 putnext(connp->conn_rq, mp1); 4849 #ifdef DEBUG 4850 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4851 "tcp_rput: sending zero-length %s %s", 4852 ((mp1->b_flag & MSGMARKNEXT) ? "MSGMARKNEXT" : 4853 "MSGNOTMARKNEXT"), 4854 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4855 #endif /* DEBUG */ 4856 flags &= ~TH_SEND_URP_MARK; 4857 } 4858 if (flags & TH_ACK_NEEDED) { 4859 /* 4860 * Time to send an ack for some reason. 4861 */ 4862 mp1 = tcp_ack_mp(tcp); 4863 4864 if (mp1 != NULL) { 4865 tcp_send_data(tcp, mp1); 4866 BUMP_LOCAL(tcp->tcp_obsegs); 4867 TCPS_BUMP_MIB(tcps, tcpOutAck); 4868 } 4869 if (tcp->tcp_ack_tid != 0) { 4870 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 4871 tcp->tcp_ack_tid = 0; 4872 } 4873 } 4874 if (flags & TH_ACK_TIMER_NEEDED) { 4875 /* 4876 * Arrange for deferred ACK or push wait timeout. 4877 * Start timer if it is not already running. 4878 */ 4879 if (tcp->tcp_ack_tid == 0) { 4880 tcp->tcp_ack_tid = TCP_TIMER(tcp, tcp_ack_timer, 4881 tcp->tcp_localnet ? 4882 tcps->tcps_local_dack_interval : 4883 tcps->tcps_deferred_ack_interval); 4884 } 4885 } 4886 if (flags & TH_ORDREL_NEEDED) { 4887 /* 4888 * Notify upper layer about an orderly release. If this is 4889 * a non-STREAMS socket, then just make an upcall. For STREAMS 4890 * we send up an ordrel_ind, unless this is an eager, in which 4891 * case the ordrel will be sent when tcp_accept_finish runs. 4892 * Note that for non-STREAMS we make an upcall even if it is an 4893 * eager, because we have an upper handle to send it to. 4894 */ 4895 ASSERT(IPCL_IS_NONSTR(connp) || tcp->tcp_listener == NULL); 4896 ASSERT(!tcp->tcp_detached); 4897 4898 if (IPCL_IS_NONSTR(connp)) { 4899 ASSERT(tcp->tcp_ordrel_mp == NULL); 4900 tcp->tcp_ordrel_done = B_TRUE; 4901 (*connp->conn_upcalls->su_opctl) 4902 (connp->conn_upper_handle, SOCK_OPCTL_SHUT_RECV, 0); 4903 goto done; 4904 } 4905 4906 if (tcp->tcp_rcv_list != NULL) { 4907 /* 4908 * Push any mblk(s) enqueued from co processing. 4909 */ 4910 flags |= tcp_rcv_drain(tcp); 4911 } 4912 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 4913 4914 mp1 = tcp->tcp_ordrel_mp; 4915 tcp->tcp_ordrel_mp = NULL; 4916 tcp->tcp_ordrel_done = B_TRUE; 4917 putnext(connp->conn_rq, mp1); 4918 } 4919 done: 4920 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 4921 } 4922 4923 /* 4924 * Attach ancillary data to a received TCP segments for the 4925 * ancillary pieces requested by the application that are 4926 * different than they were in the previous data segment. 4927 * 4928 * Save the "current" values once memory allocation is ok so that 4929 * when memory allocation fails we can just wait for the next data segment. 4930 */ 4931 static mblk_t * 4932 tcp_input_add_ancillary(tcp_t *tcp, mblk_t *mp, ip_pkt_t *ipp, 4933 ip_recv_attr_t *ira) 4934 { 4935 struct T_optdata_ind *todi; 4936 int optlen; 4937 uchar_t *optptr; 4938 struct T_opthdr *toh; 4939 crb_t addflag; /* Which pieces to add */ 4940 mblk_t *mp1; 4941 conn_t *connp = tcp->tcp_connp; 4942 4943 optlen = 0; 4944 addflag.crb_all = 0; 4945 /* If app asked for pktinfo and the index has changed ... */ 4946 if (connp->conn_recv_ancillary.crb_ip_recvpktinfo && 4947 ira->ira_ruifindex != tcp->tcp_recvifindex) { 4948 optlen += sizeof (struct T_opthdr) + 4949 sizeof (struct in6_pktinfo); 4950 addflag.crb_ip_recvpktinfo = 1; 4951 } 4952 /* If app asked for hoplimit and it has changed ... */ 4953 if (connp->conn_recv_ancillary.crb_ipv6_recvhoplimit && 4954 ipp->ipp_hoplimit != tcp->tcp_recvhops) { 4955 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 4956 addflag.crb_ipv6_recvhoplimit = 1; 4957 } 4958 /* If app asked for tclass and it has changed ... */ 4959 if (connp->conn_recv_ancillary.crb_ipv6_recvtclass && 4960 ipp->ipp_tclass != tcp->tcp_recvtclass) { 4961 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 4962 addflag.crb_ipv6_recvtclass = 1; 4963 } 4964 /* 4965 * If app asked for hopbyhop headers and it has changed ... 4966 * For security labels, note that (1) security labels can't change on 4967 * a connected socket at all, (2) we're connected to at most one peer, 4968 * (3) if anything changes, then it must be some other extra option. 4969 */ 4970 if (connp->conn_recv_ancillary.crb_ipv6_recvhopopts && 4971 ip_cmpbuf(tcp->tcp_hopopts, tcp->tcp_hopoptslen, 4972 (ipp->ipp_fields & IPPF_HOPOPTS), 4973 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) { 4974 optlen += sizeof (struct T_opthdr) + ipp->ipp_hopoptslen; 4975 addflag.crb_ipv6_recvhopopts = 1; 4976 if (!ip_allocbuf((void **)&tcp->tcp_hopopts, 4977 &tcp->tcp_hopoptslen, (ipp->ipp_fields & IPPF_HOPOPTS), 4978 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) 4979 return (mp); 4980 } 4981 /* If app asked for dst headers before routing headers ... */ 4982 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdrdstopts && 4983 ip_cmpbuf(tcp->tcp_rthdrdstopts, tcp->tcp_rthdrdstoptslen, 4984 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 4985 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) { 4986 optlen += sizeof (struct T_opthdr) + 4987 ipp->ipp_rthdrdstoptslen; 4988 addflag.crb_ipv6_recvrthdrdstopts = 1; 4989 if (!ip_allocbuf((void **)&tcp->tcp_rthdrdstopts, 4990 &tcp->tcp_rthdrdstoptslen, 4991 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 4992 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) 4993 return (mp); 4994 } 4995 /* If app asked for routing headers and it has changed ... */ 4996 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdr && 4997 ip_cmpbuf(tcp->tcp_rthdr, tcp->tcp_rthdrlen, 4998 (ipp->ipp_fields & IPPF_RTHDR), 4999 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) { 5000 optlen += sizeof (struct T_opthdr) + ipp->ipp_rthdrlen; 5001 addflag.crb_ipv6_recvrthdr = 1; 5002 if (!ip_allocbuf((void **)&tcp->tcp_rthdr, 5003 &tcp->tcp_rthdrlen, (ipp->ipp_fields & IPPF_RTHDR), 5004 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) 5005 return (mp); 5006 } 5007 /* If app asked for dest headers and it has changed ... */ 5008 if ((connp->conn_recv_ancillary.crb_ipv6_recvdstopts || 5009 connp->conn_recv_ancillary.crb_old_ipv6_recvdstopts) && 5010 ip_cmpbuf(tcp->tcp_dstopts, tcp->tcp_dstoptslen, 5011 (ipp->ipp_fields & IPPF_DSTOPTS), 5012 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) { 5013 optlen += sizeof (struct T_opthdr) + ipp->ipp_dstoptslen; 5014 addflag.crb_ipv6_recvdstopts = 1; 5015 if (!ip_allocbuf((void **)&tcp->tcp_dstopts, 5016 &tcp->tcp_dstoptslen, (ipp->ipp_fields & IPPF_DSTOPTS), 5017 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) 5018 return (mp); 5019 } 5020 5021 if (optlen == 0) { 5022 /* Nothing to add */ 5023 return (mp); 5024 } 5025 mp1 = allocb(sizeof (struct T_optdata_ind) + optlen, BPRI_MED); 5026 if (mp1 == NULL) { 5027 /* 5028 * Defer sending ancillary data until the next TCP segment 5029 * arrives. 5030 */ 5031 return (mp); 5032 } 5033 mp1->b_cont = mp; 5034 mp = mp1; 5035 mp->b_wptr += sizeof (*todi) + optlen; 5036 mp->b_datap->db_type = M_PROTO; 5037 todi = (struct T_optdata_ind *)mp->b_rptr; 5038 todi->PRIM_type = T_OPTDATA_IND; 5039 todi->DATA_flag = 1; /* MORE data */ 5040 todi->OPT_length = optlen; 5041 todi->OPT_offset = sizeof (*todi); 5042 optptr = (uchar_t *)&todi[1]; 5043 /* 5044 * If app asked for pktinfo and the index has changed ... 5045 * Note that the local address never changes for the connection. 5046 */ 5047 if (addflag.crb_ip_recvpktinfo) { 5048 struct in6_pktinfo *pkti; 5049 uint_t ifindex; 5050 5051 ifindex = ira->ira_ruifindex; 5052 toh = (struct T_opthdr *)optptr; 5053 toh->level = IPPROTO_IPV6; 5054 toh->name = IPV6_PKTINFO; 5055 toh->len = sizeof (*toh) + sizeof (*pkti); 5056 toh->status = 0; 5057 optptr += sizeof (*toh); 5058 pkti = (struct in6_pktinfo *)optptr; 5059 pkti->ipi6_addr = connp->conn_laddr_v6; 5060 pkti->ipi6_ifindex = ifindex; 5061 optptr += sizeof (*pkti); 5062 ASSERT(OK_32PTR(optptr)); 5063 /* Save as "last" value */ 5064 tcp->tcp_recvifindex = ifindex; 5065 } 5066 /* If app asked for hoplimit and it has changed ... */ 5067 if (addflag.crb_ipv6_recvhoplimit) { 5068 toh = (struct T_opthdr *)optptr; 5069 toh->level = IPPROTO_IPV6; 5070 toh->name = IPV6_HOPLIMIT; 5071 toh->len = sizeof (*toh) + sizeof (uint_t); 5072 toh->status = 0; 5073 optptr += sizeof (*toh); 5074 *(uint_t *)optptr = ipp->ipp_hoplimit; 5075 optptr += sizeof (uint_t); 5076 ASSERT(OK_32PTR(optptr)); 5077 /* Save as "last" value */ 5078 tcp->tcp_recvhops = ipp->ipp_hoplimit; 5079 } 5080 /* If app asked for tclass and it has changed ... */ 5081 if (addflag.crb_ipv6_recvtclass) { 5082 toh = (struct T_opthdr *)optptr; 5083 toh->level = IPPROTO_IPV6; 5084 toh->name = IPV6_TCLASS; 5085 toh->len = sizeof (*toh) + sizeof (uint_t); 5086 toh->status = 0; 5087 optptr += sizeof (*toh); 5088 *(uint_t *)optptr = ipp->ipp_tclass; 5089 optptr += sizeof (uint_t); 5090 ASSERT(OK_32PTR(optptr)); 5091 /* Save as "last" value */ 5092 tcp->tcp_recvtclass = ipp->ipp_tclass; 5093 } 5094 if (addflag.crb_ipv6_recvhopopts) { 5095 toh = (struct T_opthdr *)optptr; 5096 toh->level = IPPROTO_IPV6; 5097 toh->name = IPV6_HOPOPTS; 5098 toh->len = sizeof (*toh) + ipp->ipp_hopoptslen; 5099 toh->status = 0; 5100 optptr += sizeof (*toh); 5101 bcopy((uchar_t *)ipp->ipp_hopopts, optptr, ipp->ipp_hopoptslen); 5102 optptr += ipp->ipp_hopoptslen; 5103 ASSERT(OK_32PTR(optptr)); 5104 /* Save as last value */ 5105 ip_savebuf((void **)&tcp->tcp_hopopts, &tcp->tcp_hopoptslen, 5106 (ipp->ipp_fields & IPPF_HOPOPTS), 5107 ipp->ipp_hopopts, ipp->ipp_hopoptslen); 5108 } 5109 if (addflag.crb_ipv6_recvrthdrdstopts) { 5110 toh = (struct T_opthdr *)optptr; 5111 toh->level = IPPROTO_IPV6; 5112 toh->name = IPV6_RTHDRDSTOPTS; 5113 toh->len = sizeof (*toh) + ipp->ipp_rthdrdstoptslen; 5114 toh->status = 0; 5115 optptr += sizeof (*toh); 5116 bcopy(ipp->ipp_rthdrdstopts, optptr, ipp->ipp_rthdrdstoptslen); 5117 optptr += ipp->ipp_rthdrdstoptslen; 5118 ASSERT(OK_32PTR(optptr)); 5119 /* Save as last value */ 5120 ip_savebuf((void **)&tcp->tcp_rthdrdstopts, 5121 &tcp->tcp_rthdrdstoptslen, 5122 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 5123 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen); 5124 } 5125 if (addflag.crb_ipv6_recvrthdr) { 5126 toh = (struct T_opthdr *)optptr; 5127 toh->level = IPPROTO_IPV6; 5128 toh->name = IPV6_RTHDR; 5129 toh->len = sizeof (*toh) + ipp->ipp_rthdrlen; 5130 toh->status = 0; 5131 optptr += sizeof (*toh); 5132 bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen); 5133 optptr += ipp->ipp_rthdrlen; 5134 ASSERT(OK_32PTR(optptr)); 5135 /* Save as last value */ 5136 ip_savebuf((void **)&tcp->tcp_rthdr, &tcp->tcp_rthdrlen, 5137 (ipp->ipp_fields & IPPF_RTHDR), 5138 ipp->ipp_rthdr, ipp->ipp_rthdrlen); 5139 } 5140 if (addflag.crb_ipv6_recvdstopts) { 5141 toh = (struct T_opthdr *)optptr; 5142 toh->level = IPPROTO_IPV6; 5143 toh->name = IPV6_DSTOPTS; 5144 toh->len = sizeof (*toh) + ipp->ipp_dstoptslen; 5145 toh->status = 0; 5146 optptr += sizeof (*toh); 5147 bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen); 5148 optptr += ipp->ipp_dstoptslen; 5149 ASSERT(OK_32PTR(optptr)); 5150 /* Save as last value */ 5151 ip_savebuf((void **)&tcp->tcp_dstopts, &tcp->tcp_dstoptslen, 5152 (ipp->ipp_fields & IPPF_DSTOPTS), 5153 ipp->ipp_dstopts, ipp->ipp_dstoptslen); 5154 } 5155 ASSERT(optptr == mp->b_wptr); 5156 return (mp); 5157 } 5158 5159 /* The minimum of smoothed mean deviation in RTO calculation. */ 5160 #define TCP_SD_MIN 400 5161 5162 /* 5163 * Set RTO for this connection. The formula is from Jacobson and Karels' 5164 * "Congestion Avoidance and Control" in SIGCOMM '88. The variable names 5165 * are the same as those in Appendix A.2 of that paper. 5166 * 5167 * m = new measurement 5168 * sa = smoothed RTT average (8 * average estimates). 5169 * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates). 5170 */ 5171 static void 5172 tcp_set_rto(tcp_t *tcp, clock_t rtt) 5173 { 5174 long m = TICK_TO_MSEC(rtt); 5175 clock_t sa = tcp->tcp_rtt_sa; 5176 clock_t sv = tcp->tcp_rtt_sd; 5177 clock_t rto; 5178 tcp_stack_t *tcps = tcp->tcp_tcps; 5179 5180 TCPS_BUMP_MIB(tcps, tcpRttUpdate); 5181 tcp->tcp_rtt_update++; 5182 5183 /* tcp_rtt_sa is not 0 means this is a new sample. */ 5184 if (sa != 0) { 5185 /* 5186 * Update average estimator: 5187 * new rtt = 7/8 old rtt + 1/8 Error 5188 */ 5189 5190 /* m is now Error in estimate. */ 5191 m -= sa >> 3; 5192 if ((sa += m) <= 0) { 5193 /* 5194 * Don't allow the smoothed average to be negative. 5195 * We use 0 to denote reinitialization of the 5196 * variables. 5197 */ 5198 sa = 1; 5199 } 5200 5201 /* 5202 * Update deviation estimator: 5203 * new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev) 5204 */ 5205 if (m < 0) 5206 m = -m; 5207 m -= sv >> 2; 5208 sv += m; 5209 } else { 5210 /* 5211 * This follows BSD's implementation. So the reinitialized 5212 * RTO is 3 * m. We cannot go less than 2 because if the 5213 * link is bandwidth dominated, doubling the window size 5214 * during slow start means doubling the RTT. We want to be 5215 * more conservative when we reinitialize our estimates. 3 5216 * is just a convenient number. 5217 */ 5218 sa = m << 3; 5219 sv = m << 1; 5220 } 5221 if (sv < TCP_SD_MIN) { 5222 /* 5223 * We do not know that if sa captures the delay ACK 5224 * effect as in a long train of segments, a receiver 5225 * does not delay its ACKs. So set the minimum of sv 5226 * to be TCP_SD_MIN, which is default to 400 ms, twice 5227 * of BSD DATO. That means the minimum of mean 5228 * deviation is 100 ms. 5229 * 5230 */ 5231 sv = TCP_SD_MIN; 5232 } 5233 tcp->tcp_rtt_sa = sa; 5234 tcp->tcp_rtt_sd = sv; 5235 /* 5236 * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv) 5237 * 5238 * Add tcp_rexmit_interval extra in case of extreme environment 5239 * where the algorithm fails to work. The default value of 5240 * tcp_rexmit_interval_extra should be 0. 5241 * 5242 * As we use a finer grained clock than BSD and update 5243 * RTO for every ACKs, add in another .25 of RTT to the 5244 * deviation of RTO to accomodate burstiness of 1/4 of 5245 * window size. 5246 */ 5247 rto = (sa >> 3) + sv + tcps->tcps_rexmit_interval_extra + (sa >> 5); 5248 5249 TCP_SET_RTO(tcp, rto); 5250 5251 /* Now, we can reset tcp_timer_backoff to use the new RTO... */ 5252 tcp->tcp_timer_backoff = 0; 5253 } 5254 5255 /* 5256 * On a labeled system we have some protocols above TCP, such as RPC, which 5257 * appear to assume that every mblk in a chain has a db_credp. 5258 */ 5259 static void 5260 tcp_setcred_data(mblk_t *mp, ip_recv_attr_t *ira) 5261 { 5262 ASSERT(is_system_labeled()); 5263 ASSERT(ira->ira_cred != NULL); 5264 5265 while (mp != NULL) { 5266 mblk_setcred(mp, ira->ira_cred, NOPID); 5267 mp = mp->b_cont; 5268 } 5269 } 5270 5271 uint_t 5272 tcp_rwnd_reopen(tcp_t *tcp) 5273 { 5274 uint_t ret = 0; 5275 uint_t thwin; 5276 conn_t *connp = tcp->tcp_connp; 5277 5278 /* Learn the latest rwnd information that we sent to the other side. */ 5279 thwin = ((uint_t)ntohs(tcp->tcp_tcpha->tha_win)) 5280 << tcp->tcp_rcv_ws; 5281 /* This is peer's calculated send window (our receive window). */ 5282 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 5283 /* 5284 * Increase the receive window to max. But we need to do receiver 5285 * SWS avoidance. This means that we need to check the increase of 5286 * of receive window is at least 1 MSS. 5287 */ 5288 if (connp->conn_rcvbuf - thwin >= tcp->tcp_mss) { 5289 /* 5290 * If the window that the other side knows is less than max 5291 * deferred acks segments, send an update immediately. 5292 */ 5293 if (thwin < tcp->tcp_rack_cur_max * tcp->tcp_mss) { 5294 TCPS_BUMP_MIB(tcp->tcp_tcps, tcpOutWinUpdate); 5295 ret = TH_ACK_NEEDED; 5296 } 5297 tcp->tcp_rwnd = connp->conn_rcvbuf; 5298 } 5299 return (ret); 5300 } 5301 5302 /* 5303 * Handle a packet that has been reclassified by TCP. 5304 * This function drops the ref on connp that the caller had. 5305 */ 5306 void 5307 tcp_reinput(conn_t *connp, mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst) 5308 { 5309 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec; 5310 5311 if (connp->conn_incoming_ifindex != 0 && 5312 connp->conn_incoming_ifindex != ira->ira_ruifindex) { 5313 freemsg(mp); 5314 CONN_DEC_REF(connp); 5315 return; 5316 } 5317 5318 if (CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss) || 5319 (ira->ira_flags & IRAF_IPSEC_SECURE)) { 5320 ip6_t *ip6h; 5321 ipha_t *ipha; 5322 5323 if (ira->ira_flags & IRAF_IS_IPV4) { 5324 ipha = (ipha_t *)mp->b_rptr; 5325 ip6h = NULL; 5326 } else { 5327 ipha = NULL; 5328 ip6h = (ip6_t *)mp->b_rptr; 5329 } 5330 mp = ipsec_check_inbound_policy(mp, connp, ipha, ip6h, ira); 5331 if (mp == NULL) { 5332 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 5333 /* Note that mp is NULL */ 5334 ip_drop_input("ipIfStatsInDiscards", mp, NULL); 5335 CONN_DEC_REF(connp); 5336 return; 5337 } 5338 } 5339 5340 if (IPCL_IS_TCP(connp)) { 5341 /* 5342 * do not drain, certain use cases can blow 5343 * the stack 5344 */ 5345 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, 5346 connp->conn_recv, connp, ira, 5347 SQ_NODRAIN, SQTAG_IP_TCP_INPUT); 5348 } else { 5349 /* Not TCP; must be SOCK_RAW, IPPROTO_TCP */ 5350 (connp->conn_recv)(connp, mp, NULL, 5351 ira); 5352 CONN_DEC_REF(connp); 5353 } 5354 5355 } 5356 5357 /* ARGSUSED */ 5358 static void 5359 tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy) 5360 { 5361 conn_t *connp = (conn_t *)arg; 5362 tcp_t *tcp = connp->conn_tcp; 5363 queue_t *q = connp->conn_rq; 5364 5365 ASSERT(!IPCL_IS_NONSTR(connp)); 5366 mutex_enter(&tcp->tcp_rsrv_mp_lock); 5367 tcp->tcp_rsrv_mp = mp; 5368 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5369 5370 if (TCP_IS_DETACHED(tcp) || q == NULL) { 5371 return; 5372 } 5373 5374 if (tcp->tcp_fused) { 5375 tcp_fuse_backenable(tcp); 5376 return; 5377 } 5378 5379 if (canputnext(q)) { 5380 /* Not flow-controlled, open rwnd */ 5381 tcp->tcp_rwnd = connp->conn_rcvbuf; 5382 5383 /* 5384 * Send back a window update immediately if TCP is above 5385 * ESTABLISHED state and the increase of the rcv window 5386 * that the other side knows is at least 1 MSS after flow 5387 * control is lifted. 5388 */ 5389 if (tcp->tcp_state >= TCPS_ESTABLISHED && 5390 tcp_rwnd_reopen(tcp) == TH_ACK_NEEDED) { 5391 tcp_xmit_ctl(NULL, tcp, 5392 (tcp->tcp_swnd == 0) ? tcp->tcp_suna : 5393 tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 5394 } 5395 } 5396 } 5397 5398 /* 5399 * The read side service routine is called mostly when we get back-enabled as a 5400 * result of flow control relief. Since we don't actually queue anything in 5401 * TCP, we have no data to send out of here. What we do is clear the receive 5402 * window, and send out a window update. 5403 */ 5404 void 5405 tcp_rsrv(queue_t *q) 5406 { 5407 conn_t *connp = Q_TO_CONN(q); 5408 tcp_t *tcp = connp->conn_tcp; 5409 mblk_t *mp; 5410 5411 /* No code does a putq on the read side */ 5412 ASSERT(q->q_first == NULL); 5413 5414 /* 5415 * If tcp->tcp_rsrv_mp == NULL, it means that tcp_rsrv() has already 5416 * been run. So just return. 5417 */ 5418 mutex_enter(&tcp->tcp_rsrv_mp_lock); 5419 if ((mp = tcp->tcp_rsrv_mp) == NULL) { 5420 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5421 return; 5422 } 5423 tcp->tcp_rsrv_mp = NULL; 5424 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5425 5426 CONN_INC_REF(connp); 5427 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_rsrv_input, connp, 5428 NULL, SQ_PROCESS, SQTAG_TCP_RSRV); 5429 } 5430 5431 /* At minimum we need 8 bytes in the TCP header for the lookup */ 5432 #define ICMP_MIN_TCP_HDR 8 5433 5434 /* 5435 * tcp_icmp_input is called as conn_recvicmp to process ICMP error messages 5436 * passed up by IP. The message is always received on the correct tcp_t. 5437 * Assumes that IP has pulled up everything up to and including the ICMP header. 5438 */ 5439 /* ARGSUSED2 */ 5440 void 5441 tcp_icmp_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 5442 { 5443 conn_t *connp = (conn_t *)arg1; 5444 icmph_t *icmph; 5445 ipha_t *ipha; 5446 int iph_hdr_length; 5447 tcpha_t *tcpha; 5448 uint32_t seg_seq; 5449 tcp_t *tcp = connp->conn_tcp; 5450 5451 /* Assume IP provides aligned packets */ 5452 ASSERT(OK_32PTR(mp->b_rptr)); 5453 ASSERT((MBLKL(mp) >= sizeof (ipha_t))); 5454 5455 /* 5456 * Verify IP version. Anything other than IPv4 or IPv6 packet is sent 5457 * upstream. ICMPv6 is handled in tcp_icmp_error_ipv6. 5458 */ 5459 if (!(ira->ira_flags & IRAF_IS_IPV4)) { 5460 tcp_icmp_error_ipv6(tcp, mp, ira); 5461 return; 5462 } 5463 5464 /* Skip past the outer IP and ICMP headers */ 5465 iph_hdr_length = ira->ira_ip_hdr_length; 5466 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length]; 5467 /* 5468 * If we don't have the correct outer IP header length 5469 * or if we don't have a complete inner IP header 5470 * drop it. 5471 */ 5472 if (iph_hdr_length < sizeof (ipha_t) || 5473 (ipha_t *)&icmph[1] + 1 > (ipha_t *)mp->b_wptr) { 5474 noticmpv4: 5475 freemsg(mp); 5476 return; 5477 } 5478 ipha = (ipha_t *)&icmph[1]; 5479 5480 /* Skip past the inner IP and find the ULP header */ 5481 iph_hdr_length = IPH_HDR_LENGTH(ipha); 5482 tcpha = (tcpha_t *)((char *)ipha + iph_hdr_length); 5483 /* 5484 * If we don't have the correct inner IP header length or if the ULP 5485 * is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR 5486 * bytes of TCP header, drop it. 5487 */ 5488 if (iph_hdr_length < sizeof (ipha_t) || 5489 ipha->ipha_protocol != IPPROTO_TCP || 5490 (uchar_t *)tcpha + ICMP_MIN_TCP_HDR > mp->b_wptr) { 5491 goto noticmpv4; 5492 } 5493 5494 seg_seq = ntohl(tcpha->tha_seq); 5495 switch (icmph->icmph_type) { 5496 case ICMP_DEST_UNREACHABLE: 5497 switch (icmph->icmph_code) { 5498 case ICMP_FRAGMENTATION_NEEDED: 5499 /* 5500 * Update Path MTU, then try to send something out. 5501 */ 5502 tcp_update_pmtu(tcp, B_TRUE); 5503 tcp_rexmit_after_error(tcp); 5504 break; 5505 case ICMP_PORT_UNREACHABLE: 5506 case ICMP_PROTOCOL_UNREACHABLE: 5507 switch (tcp->tcp_state) { 5508 case TCPS_SYN_SENT: 5509 case TCPS_SYN_RCVD: 5510 /* 5511 * ICMP can snipe away incipient 5512 * TCP connections as long as 5513 * seq number is same as initial 5514 * send seq number. 5515 */ 5516 if (seg_seq == tcp->tcp_iss) { 5517 (void) tcp_clean_death(tcp, 5518 ECONNREFUSED); 5519 } 5520 break; 5521 } 5522 break; 5523 case ICMP_HOST_UNREACHABLE: 5524 case ICMP_NET_UNREACHABLE: 5525 /* Record the error in case we finally time out. */ 5526 if (icmph->icmph_code == ICMP_HOST_UNREACHABLE) 5527 tcp->tcp_client_errno = EHOSTUNREACH; 5528 else 5529 tcp->tcp_client_errno = ENETUNREACH; 5530 if (tcp->tcp_state == TCPS_SYN_RCVD) { 5531 if (tcp->tcp_listener != NULL && 5532 tcp->tcp_listener->tcp_syn_defense) { 5533 /* 5534 * Ditch the half-open connection if we 5535 * suspect a SYN attack is under way. 5536 */ 5537 (void) tcp_clean_death(tcp, 5538 tcp->tcp_client_errno); 5539 } 5540 } 5541 break; 5542 default: 5543 break; 5544 } 5545 break; 5546 case ICMP_SOURCE_QUENCH: { 5547 /* 5548 * use a global boolean to control 5549 * whether TCP should respond to ICMP_SOURCE_QUENCH. 5550 * The default is false. 5551 */ 5552 if (tcp_icmp_source_quench) { 5553 /* 5554 * Reduce the sending rate as if we got a 5555 * retransmit timeout 5556 */ 5557 uint32_t npkt; 5558 5559 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / 5560 tcp->tcp_mss; 5561 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * tcp->tcp_mss; 5562 tcp->tcp_cwnd = tcp->tcp_mss; 5563 tcp->tcp_cwnd_cnt = 0; 5564 } 5565 break; 5566 } 5567 } 5568 freemsg(mp); 5569 } 5570 5571 /* 5572 * tcp_icmp_error_ipv6 is called from tcp_icmp_input to process ICMPv6 5573 * error messages passed up by IP. 5574 * Assumes that IP has pulled up all the extension headers as well 5575 * as the ICMPv6 header. 5576 */ 5577 static void 5578 tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, ip_recv_attr_t *ira) 5579 { 5580 icmp6_t *icmp6; 5581 ip6_t *ip6h; 5582 uint16_t iph_hdr_length = ira->ira_ip_hdr_length; 5583 tcpha_t *tcpha; 5584 uint8_t *nexthdrp; 5585 uint32_t seg_seq; 5586 5587 /* 5588 * Verify that we have a complete IP header. 5589 */ 5590 ASSERT((MBLKL(mp) >= sizeof (ip6_t))); 5591 5592 icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length]; 5593 ip6h = (ip6_t *)&icmp6[1]; 5594 /* 5595 * Verify if we have a complete ICMP and inner IP header. 5596 */ 5597 if ((uchar_t *)&ip6h[1] > mp->b_wptr) { 5598 noticmpv6: 5599 freemsg(mp); 5600 return; 5601 } 5602 5603 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp)) 5604 goto noticmpv6; 5605 tcpha = (tcpha_t *)((char *)ip6h + iph_hdr_length); 5606 /* 5607 * Validate inner header. If the ULP is not IPPROTO_TCP or if we don't 5608 * have at least ICMP_MIN_TCP_HDR bytes of TCP header drop the 5609 * packet. 5610 */ 5611 if ((*nexthdrp != IPPROTO_TCP) || 5612 ((uchar_t *)tcpha + ICMP_MIN_TCP_HDR) > mp->b_wptr) { 5613 goto noticmpv6; 5614 } 5615 5616 seg_seq = ntohl(tcpha->tha_seq); 5617 switch (icmp6->icmp6_type) { 5618 case ICMP6_PACKET_TOO_BIG: 5619 /* 5620 * Update Path MTU, then try to send something out. 5621 */ 5622 tcp_update_pmtu(tcp, B_TRUE); 5623 tcp_rexmit_after_error(tcp); 5624 break; 5625 case ICMP6_DST_UNREACH: 5626 switch (icmp6->icmp6_code) { 5627 case ICMP6_DST_UNREACH_NOPORT: 5628 if (((tcp->tcp_state == TCPS_SYN_SENT) || 5629 (tcp->tcp_state == TCPS_SYN_RCVD)) && 5630 (seg_seq == tcp->tcp_iss)) { 5631 (void) tcp_clean_death(tcp, ECONNREFUSED); 5632 } 5633 break; 5634 case ICMP6_DST_UNREACH_ADMIN: 5635 case ICMP6_DST_UNREACH_NOROUTE: 5636 case ICMP6_DST_UNREACH_BEYONDSCOPE: 5637 case ICMP6_DST_UNREACH_ADDR: 5638 /* Record the error in case we finally time out. */ 5639 tcp->tcp_client_errno = EHOSTUNREACH; 5640 if (((tcp->tcp_state == TCPS_SYN_SENT) || 5641 (tcp->tcp_state == TCPS_SYN_RCVD)) && 5642 (seg_seq == tcp->tcp_iss)) { 5643 if (tcp->tcp_listener != NULL && 5644 tcp->tcp_listener->tcp_syn_defense) { 5645 /* 5646 * Ditch the half-open connection if we 5647 * suspect a SYN attack is under way. 5648 */ 5649 (void) tcp_clean_death(tcp, 5650 tcp->tcp_client_errno); 5651 } 5652 } 5653 5654 5655 break; 5656 default: 5657 break; 5658 } 5659 break; 5660 case ICMP6_PARAM_PROB: 5661 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */ 5662 if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER && 5663 (uchar_t *)ip6h + icmp6->icmp6_pptr == 5664 (uchar_t *)nexthdrp) { 5665 if (tcp->tcp_state == TCPS_SYN_SENT || 5666 tcp->tcp_state == TCPS_SYN_RCVD) { 5667 (void) tcp_clean_death(tcp, ECONNREFUSED); 5668 } 5669 break; 5670 } 5671 break; 5672 5673 case ICMP6_TIME_EXCEEDED: 5674 default: 5675 break; 5676 } 5677 freemsg(mp); 5678 } 5679 5680 /* 5681 * CALLED OUTSIDE OF SQUEUE! It can not follow any pointers that tcp might 5682 * change. But it can refer to fields like tcp_suna and tcp_snxt. 5683 * 5684 * Function tcp_verifyicmp is called as conn_verifyicmp to verify the ICMP 5685 * error messages received by IP. The message is always received on the correct 5686 * tcp_t. 5687 */ 5688 /* ARGSUSED */ 5689 boolean_t 5690 tcp_verifyicmp(conn_t *connp, void *arg2, icmph_t *icmph, icmp6_t *icmp6, 5691 ip_recv_attr_t *ira) 5692 { 5693 tcpha_t *tcpha = (tcpha_t *)arg2; 5694 uint32_t seq = ntohl(tcpha->tha_seq); 5695 tcp_t *tcp = connp->conn_tcp; 5696 5697 /* 5698 * TCP sequence number contained in payload of the ICMP error message 5699 * should be within the range SND.UNA <= SEG.SEQ < SND.NXT. Otherwise, 5700 * the message is either a stale ICMP error, or an attack from the 5701 * network. Fail the verification. 5702 */ 5703 if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GEQ(seq, tcp->tcp_snxt)) 5704 return (B_FALSE); 5705 5706 /* For "too big" we also check the ignore flag */ 5707 if (ira->ira_flags & IRAF_IS_IPV4) { 5708 ASSERT(icmph != NULL); 5709 if (icmph->icmph_type == ICMP_DEST_UNREACHABLE && 5710 icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED && 5711 tcp->tcp_tcps->tcps_ignore_path_mtu) 5712 return (B_FALSE); 5713 } else { 5714 ASSERT(icmp6 != NULL); 5715 if (icmp6->icmp6_type == ICMP6_PACKET_TOO_BIG && 5716 tcp->tcp_tcps->tcps_ignore_path_mtu) 5717 return (B_FALSE); 5718 } 5719 return (B_TRUE); 5720 } 5721