1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27 /* This file contains all TCP input processing functions. */ 28 29 #include <sys/types.h> 30 #include <sys/stream.h> 31 #include <sys/strsun.h> 32 #include <sys/strsubr.h> 33 #include <sys/stropts.h> 34 #include <sys/strlog.h> 35 #define _SUN_TPI_VERSION 2 36 #include <sys/tihdr.h> 37 #include <sys/suntpi.h> 38 #include <sys/xti_inet.h> 39 #include <sys/squeue_impl.h> 40 #include <sys/squeue.h> 41 #include <sys/tsol/tnet.h> 42 43 #include <inet/common.h> 44 #include <inet/ip.h> 45 #include <inet/tcp.h> 46 #include <inet/tcp_impl.h> 47 #include <inet/tcp_cluster.h> 48 #include <inet/proto_set.h> 49 #include <inet/ipsec_impl.h> 50 51 /* 52 * RFC1323-recommended phrasing of TSTAMP option, for easier parsing 53 */ 54 55 #ifdef _BIG_ENDIAN 56 #define TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 57 (TCPOPT_TSTAMP << 8) | 10) 58 #else 59 #define TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \ 60 (TCPOPT_NOP << 8) | TCPOPT_NOP) 61 #endif 62 63 /* 64 * Flags returned from tcp_parse_options. 65 */ 66 #define TCP_OPT_MSS_PRESENT 1 67 #define TCP_OPT_WSCALE_PRESENT 2 68 #define TCP_OPT_TSTAMP_PRESENT 4 69 #define TCP_OPT_SACK_OK_PRESENT 8 70 #define TCP_OPT_SACK_PRESENT 16 71 72 /* 73 * PAWS needs a timer for 24 days. This is the number of ticks in 24 days 74 */ 75 #define PAWS_TIMEOUT ((clock_t)(24*24*60*60*hz)) 76 77 /* 78 * Since tcp_listener is not cleared atomically with tcp_detached 79 * being cleared we need this extra bit to tell a detached connection 80 * apart from one that is in the process of being accepted. 81 */ 82 #define TCP_IS_DETACHED_NONEAGER(tcp) \ 83 (TCP_IS_DETACHED(tcp) && \ 84 (!(tcp)->tcp_hard_binding)) 85 86 /* 87 * Steps to do when a tcp_t moves to TIME-WAIT state. 88 * 89 * This connection is done, we don't need to account for it. Decrement 90 * the listener connection counter if needed. 91 * 92 * Decrement the connection counter of the stack. Note that this counter 93 * is per CPU. So the total number of connections in a stack is the sum of all 94 * of them. Since there is no lock for handling all of them exclusively, the 95 * resulting sum is only an approximation. 96 * 97 * Unconditionally clear the exclusive binding bit so this TIME-WAIT 98 * connection won't interfere with new ones. 99 * 100 * Start the TIME-WAIT timer. If upper layer has not closed the connection, 101 * the timer is handled within the context of this tcp_t. When the timer 102 * fires, tcp_clean_death() is called. If upper layer closes the connection 103 * during this period, tcp_time_wait_append() will be called to add this 104 * tcp_t to the global TIME-WAIT list. Note that this means that the 105 * actual wait time in TIME-WAIT state will be longer than the 106 * tcps_time_wait_interval since the period before upper layer closes the 107 * connection is not accounted for when tcp_time_wait_append() is called. 108 * 109 * If uppser layer has closed the connection, call tcp_time_wait_append() 110 * directly. 111 * 112 */ 113 #define SET_TIME_WAIT(tcps, tcp, connp) \ 114 { \ 115 (tcp)->tcp_state = TCPS_TIME_WAIT; \ 116 if ((tcp)->tcp_listen_cnt != NULL) \ 117 TCP_DECR_LISTEN_CNT(tcp); \ 118 atomic_dec_64( \ 119 (uint64_t *)&(tcps)->tcps_sc[CPU->cpu_seqid]->tcp_sc_conn_cnt); \ 120 (connp)->conn_exclbind = 0; \ 121 if (!TCP_IS_DETACHED(tcp)) { \ 122 TCP_TIMER_RESTART(tcp, (tcps)->tcps_time_wait_interval); \ 123 } else { \ 124 tcp_time_wait_append(tcp); \ 125 TCP_DBGSTAT(tcps, tcp_rput_time_wait); \ 126 } \ 127 } 128 129 /* 130 * If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more 131 * than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent 132 * data, TCP will not respond with an ACK. RFC 793 requires that 133 * TCP responds with an ACK for such a bogus ACK. By not following 134 * the RFC, we prevent TCP from getting into an ACK storm if somehow 135 * an attacker successfully spoofs an acceptable segment to our 136 * peer; or when our peer is "confused." 137 */ 138 static uint32_t tcp_drop_ack_unsent_cnt = 10; 139 140 /* 141 * To protect TCP against attacker using a small window and requesting 142 * large amount of data (DoS attack by conuming memory), TCP checks the 143 * window advertised in the last ACK of the 3-way handshake. TCP uses 144 * the tcp_mss (the size of one packet) value for comparion. The window 145 * should be larger than tcp_mss. But while a sane TCP should advertise 146 * a receive window larger than or equal to 4*MSS to avoid stop and go 147 * tarrfic, not all TCP stacks do that. This is especially true when 148 * tcp_mss is a big value. 149 * 150 * To work around this issue, an additional fixed value for comparison 151 * is also used. If the advertised window is smaller than both tcp_mss 152 * and tcp_init_wnd_chk, the ACK is considered as invalid. So for large 153 * tcp_mss value (say, 8K), a window larger than tcp_init_wnd_chk but 154 * smaller than 8K is considered to be OK. 155 */ 156 static uint32_t tcp_init_wnd_chk = 4096; 157 158 /* Process ICMP source quench message or not. */ 159 static boolean_t tcp_icmp_source_quench = B_FALSE; 160 161 static boolean_t tcp_outbound_squeue_switch = B_FALSE; 162 163 static mblk_t *tcp_conn_create_v4(conn_t *, conn_t *, mblk_t *, 164 ip_recv_attr_t *); 165 static mblk_t *tcp_conn_create_v6(conn_t *, conn_t *, mblk_t *, 166 ip_recv_attr_t *); 167 static boolean_t tcp_drop_q0(tcp_t *); 168 static void tcp_icmp_error_ipv6(tcp_t *, mblk_t *, ip_recv_attr_t *); 169 static mblk_t *tcp_input_add_ancillary(tcp_t *, mblk_t *, ip_pkt_t *, 170 ip_recv_attr_t *); 171 static void tcp_input_listener(void *, mblk_t *, void *, ip_recv_attr_t *); 172 static int tcp_parse_options(tcpha_t *, tcp_opt_t *); 173 static void tcp_process_options(tcp_t *, tcpha_t *); 174 static mblk_t *tcp_reass(tcp_t *, mblk_t *, uint32_t); 175 static void tcp_reass_elim_overlap(tcp_t *, mblk_t *); 176 static void tcp_rsrv_input(void *, mblk_t *, void *, ip_recv_attr_t *); 177 static void tcp_set_rto(tcp_t *, time_t); 178 static void tcp_setcred_data(mblk_t *, ip_recv_attr_t *); 179 180 /* 181 * Set the MSS associated with a particular tcp based on its current value, 182 * and a new one passed in. Observe minimums and maximums, and reset other 183 * state variables that we want to view as multiples of MSS. 184 * 185 * The value of MSS could be either increased or descreased. 186 */ 187 void 188 tcp_mss_set(tcp_t *tcp, uint32_t mss) 189 { 190 uint32_t mss_max; 191 tcp_stack_t *tcps = tcp->tcp_tcps; 192 conn_t *connp = tcp->tcp_connp; 193 194 if (connp->conn_ipversion == IPV4_VERSION) 195 mss_max = tcps->tcps_mss_max_ipv4; 196 else 197 mss_max = tcps->tcps_mss_max_ipv6; 198 199 if (mss < tcps->tcps_mss_min) 200 mss = tcps->tcps_mss_min; 201 if (mss > mss_max) 202 mss = mss_max; 203 /* 204 * Unless naglim has been set by our client to 205 * a non-mss value, force naglim to track mss. 206 * This can help to aggregate small writes. 207 */ 208 if (mss < tcp->tcp_naglim || tcp->tcp_mss == tcp->tcp_naglim) 209 tcp->tcp_naglim = mss; 210 /* 211 * TCP should be able to buffer at least 4 MSS data for obvious 212 * performance reason. 213 */ 214 if ((mss << 2) > connp->conn_sndbuf) 215 connp->conn_sndbuf = mss << 2; 216 217 /* 218 * Set the send lowater to at least twice of MSS. 219 */ 220 if ((mss << 1) > connp->conn_sndlowat) 221 connp->conn_sndlowat = mss << 1; 222 223 /* 224 * Update tcp_cwnd according to the new value of MSS. Keep the 225 * previous ratio to preserve the transmit rate. 226 */ 227 tcp->tcp_cwnd = (tcp->tcp_cwnd / tcp->tcp_mss) * mss; 228 tcp->tcp_cwnd_cnt = 0; 229 230 tcp->tcp_mss = mss; 231 (void) tcp_maxpsz_set(tcp, B_TRUE); 232 } 233 234 /* 235 * Extract option values from a tcp header. We put any found values into the 236 * tcpopt struct and return a bitmask saying which options were found. 237 */ 238 static int 239 tcp_parse_options(tcpha_t *tcpha, tcp_opt_t *tcpopt) 240 { 241 uchar_t *endp; 242 int len; 243 uint32_t mss; 244 uchar_t *up = (uchar_t *)tcpha; 245 int found = 0; 246 int32_t sack_len; 247 tcp_seq sack_begin, sack_end; 248 tcp_t *tcp; 249 250 endp = up + TCP_HDR_LENGTH(tcpha); 251 up += TCP_MIN_HEADER_LENGTH; 252 while (up < endp) { 253 len = endp - up; 254 switch (*up) { 255 case TCPOPT_EOL: 256 break; 257 258 case TCPOPT_NOP: 259 up++; 260 continue; 261 262 case TCPOPT_MAXSEG: 263 if (len < TCPOPT_MAXSEG_LEN || 264 up[1] != TCPOPT_MAXSEG_LEN) 265 break; 266 267 mss = BE16_TO_U16(up+2); 268 /* Caller must handle tcp_mss_min and tcp_mss_max_* */ 269 tcpopt->tcp_opt_mss = mss; 270 found |= TCP_OPT_MSS_PRESENT; 271 272 up += TCPOPT_MAXSEG_LEN; 273 continue; 274 275 case TCPOPT_WSCALE: 276 if (len < TCPOPT_WS_LEN || up[1] != TCPOPT_WS_LEN) 277 break; 278 279 if (up[2] > TCP_MAX_WINSHIFT) 280 tcpopt->tcp_opt_wscale = TCP_MAX_WINSHIFT; 281 else 282 tcpopt->tcp_opt_wscale = up[2]; 283 found |= TCP_OPT_WSCALE_PRESENT; 284 285 up += TCPOPT_WS_LEN; 286 continue; 287 288 case TCPOPT_SACK_PERMITTED: 289 if (len < TCPOPT_SACK_OK_LEN || 290 up[1] != TCPOPT_SACK_OK_LEN) 291 break; 292 found |= TCP_OPT_SACK_OK_PRESENT; 293 up += TCPOPT_SACK_OK_LEN; 294 continue; 295 296 case TCPOPT_SACK: 297 if (len <= 2 || up[1] <= 2 || len < up[1]) 298 break; 299 300 /* If TCP is not interested in SACK blks... */ 301 if ((tcp = tcpopt->tcp) == NULL) { 302 up += up[1]; 303 continue; 304 } 305 sack_len = up[1] - TCPOPT_HEADER_LEN; 306 up += TCPOPT_HEADER_LEN; 307 308 /* 309 * If the list is empty, allocate one and assume 310 * nothing is sack'ed. 311 */ 312 if (tcp->tcp_notsack_list == NULL) { 313 tcp_notsack_update(&(tcp->tcp_notsack_list), 314 tcp->tcp_suna, tcp->tcp_snxt, 315 &(tcp->tcp_num_notsack_blk), 316 &(tcp->tcp_cnt_notsack_list)); 317 318 /* 319 * Make sure tcp_notsack_list is not NULL. 320 * This happens when kmem_alloc(KM_NOSLEEP) 321 * returns NULL. 322 */ 323 if (tcp->tcp_notsack_list == NULL) { 324 up += sack_len; 325 continue; 326 } 327 tcp->tcp_fack = tcp->tcp_suna; 328 } 329 330 while (sack_len > 0) { 331 if (up + 8 > endp) { 332 up = endp; 333 break; 334 } 335 sack_begin = BE32_TO_U32(up); 336 up += 4; 337 sack_end = BE32_TO_U32(up); 338 up += 4; 339 sack_len -= 8; 340 /* 341 * Bounds checking. Make sure the SACK 342 * info is within tcp_suna and tcp_snxt. 343 * If this SACK blk is out of bound, ignore 344 * it but continue to parse the following 345 * blks. 346 */ 347 if (SEQ_LEQ(sack_end, sack_begin) || 348 SEQ_LT(sack_begin, tcp->tcp_suna) || 349 SEQ_GT(sack_end, tcp->tcp_snxt)) { 350 continue; 351 } 352 tcp_notsack_insert(&(tcp->tcp_notsack_list), 353 sack_begin, sack_end, 354 &(tcp->tcp_num_notsack_blk), 355 &(tcp->tcp_cnt_notsack_list)); 356 if (SEQ_GT(sack_end, tcp->tcp_fack)) { 357 tcp->tcp_fack = sack_end; 358 } 359 } 360 found |= TCP_OPT_SACK_PRESENT; 361 continue; 362 363 case TCPOPT_TSTAMP: 364 if (len < TCPOPT_TSTAMP_LEN || 365 up[1] != TCPOPT_TSTAMP_LEN) 366 break; 367 368 tcpopt->tcp_opt_ts_val = BE32_TO_U32(up+2); 369 tcpopt->tcp_opt_ts_ecr = BE32_TO_U32(up+6); 370 371 found |= TCP_OPT_TSTAMP_PRESENT; 372 373 up += TCPOPT_TSTAMP_LEN; 374 continue; 375 376 default: 377 if (len <= 1 || len < (int)up[1] || up[1] == 0) 378 break; 379 up += up[1]; 380 continue; 381 } 382 break; 383 } 384 return (found); 385 } 386 387 /* 388 * Process all TCP option in SYN segment. Note that this function should 389 * be called after tcp_set_destination() is called so that the necessary info 390 * from IRE is already set in the tcp structure. 391 * 392 * This function sets up the correct tcp_mss value according to the 393 * MSS option value and our header size. It also sets up the window scale 394 * and timestamp values, and initialize SACK info blocks. But it does not 395 * change receive window size after setting the tcp_mss value. The caller 396 * should do the appropriate change. 397 */ 398 static void 399 tcp_process_options(tcp_t *tcp, tcpha_t *tcpha) 400 { 401 int options; 402 tcp_opt_t tcpopt; 403 uint32_t mss_max; 404 char *tmp_tcph; 405 tcp_stack_t *tcps = tcp->tcp_tcps; 406 conn_t *connp = tcp->tcp_connp; 407 408 tcpopt.tcp = NULL; 409 options = tcp_parse_options(tcpha, &tcpopt); 410 411 /* 412 * Process MSS option. Note that MSS option value does not account 413 * for IP or TCP options. This means that it is equal to MTU - minimum 414 * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for 415 * IPv6. 416 */ 417 if (!(options & TCP_OPT_MSS_PRESENT)) { 418 if (connp->conn_ipversion == IPV4_VERSION) 419 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv4; 420 else 421 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv6; 422 } else { 423 if (connp->conn_ipversion == IPV4_VERSION) 424 mss_max = tcps->tcps_mss_max_ipv4; 425 else 426 mss_max = tcps->tcps_mss_max_ipv6; 427 if (tcpopt.tcp_opt_mss < tcps->tcps_mss_min) 428 tcpopt.tcp_opt_mss = tcps->tcps_mss_min; 429 else if (tcpopt.tcp_opt_mss > mss_max) 430 tcpopt.tcp_opt_mss = mss_max; 431 } 432 433 /* Process Window Scale option. */ 434 if (options & TCP_OPT_WSCALE_PRESENT) { 435 tcp->tcp_snd_ws = tcpopt.tcp_opt_wscale; 436 tcp->tcp_snd_ws_ok = B_TRUE; 437 } else { 438 tcp->tcp_snd_ws = B_FALSE; 439 tcp->tcp_snd_ws_ok = B_FALSE; 440 tcp->tcp_rcv_ws = B_FALSE; 441 } 442 443 /* Process Timestamp option. */ 444 if ((options & TCP_OPT_TSTAMP_PRESENT) && 445 (tcp->tcp_snd_ts_ok || TCP_IS_DETACHED(tcp))) { 446 tmp_tcph = (char *)tcp->tcp_tcpha; 447 448 tcp->tcp_snd_ts_ok = B_TRUE; 449 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 450 tcp->tcp_last_rcv_lbolt = ddi_get_lbolt64(); 451 ASSERT(OK_32PTR(tmp_tcph)); 452 ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH); 453 454 /* Fill in our template header with basic timestamp option. */ 455 tmp_tcph += connp->conn_ht_ulp_len; 456 tmp_tcph[0] = TCPOPT_NOP; 457 tmp_tcph[1] = TCPOPT_NOP; 458 tmp_tcph[2] = TCPOPT_TSTAMP; 459 tmp_tcph[3] = TCPOPT_TSTAMP_LEN; 460 connp->conn_ht_iphc_len += TCPOPT_REAL_TS_LEN; 461 connp->conn_ht_ulp_len += TCPOPT_REAL_TS_LEN; 462 tcp->tcp_tcpha->tha_offset_and_reserved += (3 << 4); 463 } else { 464 tcp->tcp_snd_ts_ok = B_FALSE; 465 } 466 467 /* 468 * Process SACK options. If SACK is enabled for this connection, 469 * then allocate the SACK info structure. Note the following ways 470 * when tcp_snd_sack_ok is set to true. 471 * 472 * For active connection: in tcp_set_destination() called in 473 * tcp_connect(). 474 * 475 * For passive connection: in tcp_set_destination() called in 476 * tcp_input_listener(). 477 * 478 * That's the reason why the extra TCP_IS_DETACHED() check is there. 479 * That check makes sure that if we did not send a SACK OK option, 480 * we will not enable SACK for this connection even though the other 481 * side sends us SACK OK option. For active connection, the SACK 482 * info structure has already been allocated. So we need to free 483 * it if SACK is disabled. 484 */ 485 if ((options & TCP_OPT_SACK_OK_PRESENT) && 486 (tcp->tcp_snd_sack_ok || 487 (tcps->tcps_sack_permitted != 0 && TCP_IS_DETACHED(tcp)))) { 488 ASSERT(tcp->tcp_num_sack_blk == 0); 489 ASSERT(tcp->tcp_notsack_list == NULL); 490 491 tcp->tcp_snd_sack_ok = B_TRUE; 492 if (tcp->tcp_snd_ts_ok) { 493 tcp->tcp_max_sack_blk = 3; 494 } else { 495 tcp->tcp_max_sack_blk = 4; 496 } 497 } else if (tcp->tcp_snd_sack_ok) { 498 /* 499 * Resetting tcp_snd_sack_ok to B_FALSE so that 500 * no SACK info will be used for this 501 * connection. This assumes that SACK usage 502 * permission is negotiated. This may need 503 * to be changed once this is clarified. 504 */ 505 ASSERT(tcp->tcp_num_sack_blk == 0); 506 ASSERT(tcp->tcp_notsack_list == NULL); 507 tcp->tcp_snd_sack_ok = B_FALSE; 508 } 509 510 /* 511 * Now we know the exact TCP/IP header length, subtract 512 * that from tcp_mss to get our side's MSS. 513 */ 514 tcp->tcp_mss -= connp->conn_ht_iphc_len; 515 516 /* 517 * Here we assume that the other side's header size will be equal to 518 * our header size. We calculate the real MSS accordingly. Need to 519 * take into additional stuffs IPsec puts in. 520 * 521 * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header) 522 */ 523 tcpopt.tcp_opt_mss -= connp->conn_ht_iphc_len + 524 tcp->tcp_ipsec_overhead - 525 ((connp->conn_ipversion == IPV4_VERSION ? 526 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) + TCP_MIN_HEADER_LENGTH); 527 528 /* 529 * Set MSS to the smaller one of both ends of the connection. 530 * We should not have called tcp_mss_set() before, but our 531 * side of the MSS should have been set to a proper value 532 * by tcp_set_destination(). tcp_mss_set() will also set up the 533 * STREAM head parameters properly. 534 * 535 * If we have a larger-than-16-bit window but the other side 536 * didn't want to do window scale, tcp_rwnd_set() will take 537 * care of that. 538 */ 539 tcp_mss_set(tcp, MIN(tcpopt.tcp_opt_mss, tcp->tcp_mss)); 540 541 /* 542 * Initialize tcp_cwnd value. After tcp_mss_set(), tcp_mss has been 543 * updated properly. 544 */ 545 TCP_SET_INIT_CWND(tcp, tcp->tcp_mss, tcps->tcps_slow_start_initial); 546 } 547 548 /* 549 * Add a new piece to the tcp reassembly queue. If the gap at the beginning 550 * is filled, return as much as we can. The message passed in may be 551 * multi-part, chained using b_cont. "start" is the starting sequence 552 * number for this piece. 553 */ 554 static mblk_t * 555 tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start) 556 { 557 uint32_t end; 558 mblk_t *mp1; 559 mblk_t *mp2; 560 mblk_t *next_mp; 561 uint32_t u1; 562 tcp_stack_t *tcps = tcp->tcp_tcps; 563 564 565 /* Walk through all the new pieces. */ 566 do { 567 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 568 (uintptr_t)INT_MAX); 569 end = start + (int)(mp->b_wptr - mp->b_rptr); 570 next_mp = mp->b_cont; 571 if (start == end) { 572 /* Empty. Blast it. */ 573 freeb(mp); 574 continue; 575 } 576 mp->b_cont = NULL; 577 TCP_REASS_SET_SEQ(mp, start); 578 TCP_REASS_SET_END(mp, end); 579 mp1 = tcp->tcp_reass_tail; 580 if (!mp1) { 581 tcp->tcp_reass_tail = mp; 582 tcp->tcp_reass_head = mp; 583 TCPS_BUMP_MIB(tcps, tcpInDataUnorderSegs); 584 TCPS_UPDATE_MIB(tcps, tcpInDataUnorderBytes, 585 end - start); 586 continue; 587 } 588 /* New stuff completely beyond tail? */ 589 if (SEQ_GEQ(start, TCP_REASS_END(mp1))) { 590 /* Link it on end. */ 591 mp1->b_cont = mp; 592 tcp->tcp_reass_tail = mp; 593 TCPS_BUMP_MIB(tcps, tcpInDataUnorderSegs); 594 TCPS_UPDATE_MIB(tcps, tcpInDataUnorderBytes, 595 end - start); 596 continue; 597 } 598 mp1 = tcp->tcp_reass_head; 599 u1 = TCP_REASS_SEQ(mp1); 600 /* New stuff at the front? */ 601 if (SEQ_LT(start, u1)) { 602 /* Yes... Check for overlap. */ 603 mp->b_cont = mp1; 604 tcp->tcp_reass_head = mp; 605 tcp_reass_elim_overlap(tcp, mp); 606 continue; 607 } 608 /* 609 * The new piece fits somewhere between the head and tail. 610 * We find our slot, where mp1 precedes us and mp2 trails. 611 */ 612 for (; (mp2 = mp1->b_cont) != NULL; mp1 = mp2) { 613 u1 = TCP_REASS_SEQ(mp2); 614 if (SEQ_LEQ(start, u1)) 615 break; 616 } 617 /* Link ourselves in */ 618 mp->b_cont = mp2; 619 mp1->b_cont = mp; 620 621 /* Trim overlap with following mblk(s) first */ 622 tcp_reass_elim_overlap(tcp, mp); 623 624 /* Trim overlap with preceding mblk */ 625 tcp_reass_elim_overlap(tcp, mp1); 626 627 } while (start = end, mp = next_mp); 628 mp1 = tcp->tcp_reass_head; 629 /* Anything ready to go? */ 630 if (TCP_REASS_SEQ(mp1) != tcp->tcp_rnxt) 631 return (NULL); 632 /* Eat what we can off the queue */ 633 for (;;) { 634 mp = mp1->b_cont; 635 end = TCP_REASS_END(mp1); 636 TCP_REASS_SET_SEQ(mp1, 0); 637 TCP_REASS_SET_END(mp1, 0); 638 if (!mp) { 639 tcp->tcp_reass_tail = NULL; 640 break; 641 } 642 if (end != TCP_REASS_SEQ(mp)) { 643 mp1->b_cont = NULL; 644 break; 645 } 646 mp1 = mp; 647 } 648 mp1 = tcp->tcp_reass_head; 649 tcp->tcp_reass_head = mp; 650 return (mp1); 651 } 652 653 /* Eliminate any overlap that mp may have over later mblks */ 654 static void 655 tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp) 656 { 657 uint32_t end; 658 mblk_t *mp1; 659 uint32_t u1; 660 tcp_stack_t *tcps = tcp->tcp_tcps; 661 662 end = TCP_REASS_END(mp); 663 while ((mp1 = mp->b_cont) != NULL) { 664 u1 = TCP_REASS_SEQ(mp1); 665 if (!SEQ_GT(end, u1)) 666 break; 667 if (!SEQ_GEQ(end, TCP_REASS_END(mp1))) { 668 mp->b_wptr -= end - u1; 669 TCP_REASS_SET_END(mp, u1); 670 TCPS_BUMP_MIB(tcps, tcpInDataPartDupSegs); 671 TCPS_UPDATE_MIB(tcps, tcpInDataPartDupBytes, 672 end - u1); 673 break; 674 } 675 mp->b_cont = mp1->b_cont; 676 TCP_REASS_SET_SEQ(mp1, 0); 677 TCP_REASS_SET_END(mp1, 0); 678 freeb(mp1); 679 TCPS_BUMP_MIB(tcps, tcpInDataDupSegs); 680 TCPS_UPDATE_MIB(tcps, tcpInDataDupBytes, end - u1); 681 } 682 if (!mp1) 683 tcp->tcp_reass_tail = mp; 684 } 685 686 /* 687 * This function does PAWS protection check. Returns B_TRUE if the 688 * segment passes the PAWS test, else returns B_FALSE. 689 */ 690 boolean_t 691 tcp_paws_check(tcp_t *tcp, tcpha_t *tcpha, tcp_opt_t *tcpoptp) 692 { 693 uint8_t flags; 694 int options; 695 uint8_t *up; 696 conn_t *connp = tcp->tcp_connp; 697 698 flags = (unsigned int)tcpha->tha_flags & 0xFF; 699 /* 700 * If timestamp option is aligned nicely, get values inline, 701 * otherwise call general routine to parse. Only do that 702 * if timestamp is the only option. 703 */ 704 if (TCP_HDR_LENGTH(tcpha) == (uint32_t)TCP_MIN_HEADER_LENGTH + 705 TCPOPT_REAL_TS_LEN && 706 OK_32PTR((up = ((uint8_t *)tcpha) + 707 TCP_MIN_HEADER_LENGTH)) && 708 *(uint32_t *)up == TCPOPT_NOP_NOP_TSTAMP) { 709 tcpoptp->tcp_opt_ts_val = ABE32_TO_U32((up+4)); 710 tcpoptp->tcp_opt_ts_ecr = ABE32_TO_U32((up+8)); 711 712 options = TCP_OPT_TSTAMP_PRESENT; 713 } else { 714 if (tcp->tcp_snd_sack_ok) { 715 tcpoptp->tcp = tcp; 716 } else { 717 tcpoptp->tcp = NULL; 718 } 719 options = tcp_parse_options(tcpha, tcpoptp); 720 } 721 722 if (options & TCP_OPT_TSTAMP_PRESENT) { 723 /* 724 * Do PAWS per RFC 1323 section 4.2. Accept RST 725 * regardless of the timestamp, page 18 RFC 1323.bis. 726 */ 727 if ((flags & TH_RST) == 0 && 728 TSTMP_LT(tcpoptp->tcp_opt_ts_val, 729 tcp->tcp_ts_recent)) { 730 if (LBOLT_FASTPATH64 < 731 (tcp->tcp_last_rcv_lbolt + PAWS_TIMEOUT)) { 732 /* This segment is not acceptable. */ 733 return (B_FALSE); 734 } else { 735 /* 736 * Connection has been idle for 737 * too long. Reset the timestamp 738 * and assume the segment is valid. 739 */ 740 tcp->tcp_ts_recent = 741 tcpoptp->tcp_opt_ts_val; 742 } 743 } 744 } else { 745 /* 746 * If we don't get a timestamp on every packet, we 747 * figure we can't really trust 'em, so we stop sending 748 * and parsing them. 749 */ 750 tcp->tcp_snd_ts_ok = B_FALSE; 751 752 connp->conn_ht_iphc_len -= TCPOPT_REAL_TS_LEN; 753 connp->conn_ht_ulp_len -= TCPOPT_REAL_TS_LEN; 754 tcp->tcp_tcpha->tha_offset_and_reserved -= (3 << 4); 755 /* 756 * Adjust the tcp_mss and tcp_cwnd accordingly. We avoid 757 * doing a slow start here so as to not to lose on the 758 * transfer rate built up so far. 759 */ 760 tcp_mss_set(tcp, tcp->tcp_mss + TCPOPT_REAL_TS_LEN); 761 if (tcp->tcp_snd_sack_ok) 762 tcp->tcp_max_sack_blk = 4; 763 } 764 return (B_TRUE); 765 } 766 767 /* 768 * Defense for the SYN attack - 769 * 1. When q0 is full, drop from the tail (tcp_eager_prev_drop_q0) the oldest 770 * one from the list of droppable eagers. This list is a subset of q0. 771 * see comments before the definition of MAKE_DROPPABLE(). 772 * 2. Don't drop a SYN request before its first timeout. This gives every 773 * request at least til the first timeout to complete its 3-way handshake. 774 * 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many 775 * requests currently on the queue that has timed out. This will be used 776 * as an indicator of whether an attack is under way, so that appropriate 777 * actions can be taken. (It's incremented in tcp_timer() and decremented 778 * either when eager goes into ESTABLISHED, or gets freed up.) 779 * 4. The current threshold is - # of timeout > q0len/4 => SYN alert on 780 * # of timeout drops back to <= q0len/32 => SYN alert off 781 */ 782 static boolean_t 783 tcp_drop_q0(tcp_t *tcp) 784 { 785 tcp_t *eager; 786 mblk_t *mp; 787 tcp_stack_t *tcps = tcp->tcp_tcps; 788 789 ASSERT(MUTEX_HELD(&tcp->tcp_eager_lock)); 790 ASSERT(tcp->tcp_eager_next_q0 != tcp->tcp_eager_prev_q0); 791 792 /* Pick oldest eager from the list of droppable eagers */ 793 eager = tcp->tcp_eager_prev_drop_q0; 794 795 /* If list is empty. return B_FALSE */ 796 if (eager == tcp) { 797 return (B_FALSE); 798 } 799 800 /* If allocated, the mp will be freed in tcp_clean_death_wrapper() */ 801 if ((mp = allocb(0, BPRI_HI)) == NULL) 802 return (B_FALSE); 803 804 /* 805 * Take this eager out from the list of droppable eagers since we are 806 * going to drop it. 807 */ 808 MAKE_UNDROPPABLE(eager); 809 810 if (tcp->tcp_connp->conn_debug) { 811 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 812 "tcp_drop_q0: listen half-open queue (max=%d) overflow" 813 " (%d pending) on %s, drop one", tcps->tcps_conn_req_max_q0, 814 tcp->tcp_conn_req_cnt_q0, 815 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 816 } 817 818 TCPS_BUMP_MIB(tcps, tcpHalfOpenDrop); 819 820 /* Put a reference on the conn as we are enqueueing it in the sqeue */ 821 CONN_INC_REF(eager->tcp_connp); 822 823 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 824 tcp_clean_death_wrapper, eager->tcp_connp, NULL, 825 SQ_FILL, SQTAG_TCP_DROP_Q0); 826 827 return (B_TRUE); 828 } 829 830 /* 831 * Handle a SYN on an AF_INET6 socket; can be either IPv4 or IPv6 832 */ 833 static mblk_t * 834 tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 835 ip_recv_attr_t *ira) 836 { 837 tcp_t *ltcp = lconnp->conn_tcp; 838 tcp_t *tcp = connp->conn_tcp; 839 mblk_t *tpi_mp; 840 ipha_t *ipha; 841 ip6_t *ip6h; 842 sin6_t sin6; 843 uint_t ifindex = ira->ira_ruifindex; 844 tcp_stack_t *tcps = tcp->tcp_tcps; 845 846 if (ira->ira_flags & IRAF_IS_IPV4) { 847 ipha = (ipha_t *)mp->b_rptr; 848 849 connp->conn_ipversion = IPV4_VERSION; 850 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6); 851 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6); 852 connp->conn_saddr_v6 = connp->conn_laddr_v6; 853 854 sin6 = sin6_null; 855 sin6.sin6_addr = connp->conn_faddr_v6; 856 sin6.sin6_port = connp->conn_fport; 857 sin6.sin6_family = AF_INET6; 858 sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6, 859 IPCL_ZONEID(lconnp), tcps->tcps_netstack); 860 861 if (connp->conn_recv_ancillary.crb_recvdstaddr) { 862 sin6_t sin6d; 863 864 sin6d = sin6_null; 865 sin6d.sin6_addr = connp->conn_laddr_v6; 866 sin6d.sin6_port = connp->conn_lport; 867 sin6d.sin6_family = AF_INET; 868 tpi_mp = mi_tpi_extconn_ind(NULL, 869 (char *)&sin6d, sizeof (sin6_t), 870 (char *)&tcp, 871 (t_scalar_t)sizeof (intptr_t), 872 (char *)&sin6d, sizeof (sin6_t), 873 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 874 } else { 875 tpi_mp = mi_tpi_conn_ind(NULL, 876 (char *)&sin6, sizeof (sin6_t), 877 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 878 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 879 } 880 } else { 881 ip6h = (ip6_t *)mp->b_rptr; 882 883 connp->conn_ipversion = IPV6_VERSION; 884 connp->conn_laddr_v6 = ip6h->ip6_dst; 885 connp->conn_faddr_v6 = ip6h->ip6_src; 886 connp->conn_saddr_v6 = connp->conn_laddr_v6; 887 888 sin6 = sin6_null; 889 sin6.sin6_addr = connp->conn_faddr_v6; 890 sin6.sin6_port = connp->conn_fport; 891 sin6.sin6_family = AF_INET6; 892 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 893 sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6, 894 IPCL_ZONEID(lconnp), tcps->tcps_netstack); 895 896 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 897 /* Pass up the scope_id of remote addr */ 898 sin6.sin6_scope_id = ifindex; 899 } else { 900 sin6.sin6_scope_id = 0; 901 } 902 if (connp->conn_recv_ancillary.crb_recvdstaddr) { 903 sin6_t sin6d; 904 905 sin6d = sin6_null; 906 sin6.sin6_addr = connp->conn_laddr_v6; 907 sin6d.sin6_port = connp->conn_lport; 908 sin6d.sin6_family = AF_INET6; 909 if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_laddr_v6)) 910 sin6d.sin6_scope_id = ifindex; 911 912 tpi_mp = mi_tpi_extconn_ind(NULL, 913 (char *)&sin6d, sizeof (sin6_t), 914 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 915 (char *)&sin6d, sizeof (sin6_t), 916 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 917 } else { 918 tpi_mp = mi_tpi_conn_ind(NULL, 919 (char *)&sin6, sizeof (sin6_t), 920 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 921 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 922 } 923 } 924 925 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 926 return (tpi_mp); 927 } 928 929 /* Handle a SYN on an AF_INET socket */ 930 static mblk_t * 931 tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, mblk_t *mp, 932 ip_recv_attr_t *ira) 933 { 934 tcp_t *ltcp = lconnp->conn_tcp; 935 tcp_t *tcp = connp->conn_tcp; 936 sin_t sin; 937 mblk_t *tpi_mp = NULL; 938 tcp_stack_t *tcps = tcp->tcp_tcps; 939 ipha_t *ipha; 940 941 ASSERT(ira->ira_flags & IRAF_IS_IPV4); 942 ipha = (ipha_t *)mp->b_rptr; 943 944 connp->conn_ipversion = IPV4_VERSION; 945 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6); 946 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6); 947 connp->conn_saddr_v6 = connp->conn_laddr_v6; 948 949 sin = sin_null; 950 sin.sin_addr.s_addr = connp->conn_faddr_v4; 951 sin.sin_port = connp->conn_fport; 952 sin.sin_family = AF_INET; 953 if (lconnp->conn_recv_ancillary.crb_recvdstaddr) { 954 sin_t sind; 955 956 sind = sin_null; 957 sind.sin_addr.s_addr = connp->conn_laddr_v4; 958 sind.sin_port = connp->conn_lport; 959 sind.sin_family = AF_INET; 960 tpi_mp = mi_tpi_extconn_ind(NULL, 961 (char *)&sind, sizeof (sin_t), (char *)&tcp, 962 (t_scalar_t)sizeof (intptr_t), (char *)&sind, 963 sizeof (sin_t), (t_scalar_t)ltcp->tcp_conn_req_seqnum); 964 } else { 965 tpi_mp = mi_tpi_conn_ind(NULL, 966 (char *)&sin, sizeof (sin_t), 967 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 968 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 969 } 970 971 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 972 return (tpi_mp); 973 } 974 975 /* 976 * Called via squeue to get on to eager's perimeter. It sends a 977 * TH_RST if eager is in the fanout table. The listener wants the 978 * eager to disappear either by means of tcp_eager_blowoff() or 979 * tcp_eager_cleanup() being called. tcp_eager_kill() can also be 980 * called (via squeue) if the eager cannot be inserted in the 981 * fanout table in tcp_input_listener(). 982 */ 983 /* ARGSUSED */ 984 void 985 tcp_eager_kill(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy) 986 { 987 conn_t *econnp = (conn_t *)arg; 988 tcp_t *eager = econnp->conn_tcp; 989 tcp_t *listener = eager->tcp_listener; 990 991 /* 992 * We could be called because listener is closing. Since 993 * the eager was using listener's queue's, we avoid 994 * using the listeners queues from now on. 995 */ 996 ASSERT(eager->tcp_detached); 997 econnp->conn_rq = NULL; 998 econnp->conn_wq = NULL; 999 1000 /* 1001 * An eager's conn_fanout will be NULL if it's a duplicate 1002 * for an existing 4-tuples in the conn fanout table. 1003 * We don't want to send an RST out in such case. 1004 */ 1005 if (econnp->conn_fanout != NULL && eager->tcp_state > TCPS_LISTEN) { 1006 tcp_xmit_ctl("tcp_eager_kill, can't wait", 1007 eager, eager->tcp_snxt, 0, TH_RST); 1008 } 1009 1010 /* We are here because listener wants this eager gone */ 1011 if (listener != NULL) { 1012 mutex_enter(&listener->tcp_eager_lock); 1013 tcp_eager_unlink(eager); 1014 if (eager->tcp_tconnind_started) { 1015 /* 1016 * The eager has sent a conn_ind up to the 1017 * listener but listener decides to close 1018 * instead. We need to drop the extra ref 1019 * placed on eager in tcp_input_data() before 1020 * sending the conn_ind to listener. 1021 */ 1022 CONN_DEC_REF(econnp); 1023 } 1024 mutex_exit(&listener->tcp_eager_lock); 1025 CONN_DEC_REF(listener->tcp_connp); 1026 } 1027 1028 if (eager->tcp_state != TCPS_CLOSED) 1029 tcp_close_detached(eager); 1030 } 1031 1032 /* 1033 * Reset any eager connection hanging off this listener marked 1034 * with 'seqnum' and then reclaim it's resources. 1035 */ 1036 boolean_t 1037 tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum) 1038 { 1039 tcp_t *eager; 1040 mblk_t *mp; 1041 1042 eager = listener; 1043 mutex_enter(&listener->tcp_eager_lock); 1044 do { 1045 eager = eager->tcp_eager_next_q; 1046 if (eager == NULL) { 1047 mutex_exit(&listener->tcp_eager_lock); 1048 return (B_FALSE); 1049 } 1050 } while (eager->tcp_conn_req_seqnum != seqnum); 1051 1052 if (eager->tcp_closemp_used) { 1053 mutex_exit(&listener->tcp_eager_lock); 1054 return (B_TRUE); 1055 } 1056 eager->tcp_closemp_used = B_TRUE; 1057 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1058 CONN_INC_REF(eager->tcp_connp); 1059 mutex_exit(&listener->tcp_eager_lock); 1060 mp = &eager->tcp_closemp; 1061 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, tcp_eager_kill, 1062 eager->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_EAGER_BLOWOFF); 1063 return (B_TRUE); 1064 } 1065 1066 /* 1067 * Reset any eager connection hanging off this listener 1068 * and then reclaim it's resources. 1069 */ 1070 void 1071 tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only) 1072 { 1073 tcp_t *eager; 1074 mblk_t *mp; 1075 tcp_stack_t *tcps = listener->tcp_tcps; 1076 1077 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 1078 1079 if (!q0_only) { 1080 /* First cleanup q */ 1081 TCP_STAT(tcps, tcp_eager_blowoff_q); 1082 eager = listener->tcp_eager_next_q; 1083 while (eager != NULL) { 1084 if (!eager->tcp_closemp_used) { 1085 eager->tcp_closemp_used = B_TRUE; 1086 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1087 CONN_INC_REF(eager->tcp_connp); 1088 mp = &eager->tcp_closemp; 1089 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 1090 tcp_eager_kill, eager->tcp_connp, NULL, 1091 SQ_FILL, SQTAG_TCP_EAGER_CLEANUP); 1092 } 1093 eager = eager->tcp_eager_next_q; 1094 } 1095 } 1096 /* Then cleanup q0 */ 1097 TCP_STAT(tcps, tcp_eager_blowoff_q0); 1098 eager = listener->tcp_eager_next_q0; 1099 while (eager != listener) { 1100 if (!eager->tcp_closemp_used) { 1101 eager->tcp_closemp_used = B_TRUE; 1102 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1103 CONN_INC_REF(eager->tcp_connp); 1104 mp = &eager->tcp_closemp; 1105 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, 1106 tcp_eager_kill, eager->tcp_connp, NULL, SQ_FILL, 1107 SQTAG_TCP_EAGER_CLEANUP_Q0); 1108 } 1109 eager = eager->tcp_eager_next_q0; 1110 } 1111 } 1112 1113 /* 1114 * If we are an eager connection hanging off a listener that hasn't 1115 * formally accepted the connection yet, get off his list and blow off 1116 * any data that we have accumulated. 1117 */ 1118 void 1119 tcp_eager_unlink(tcp_t *tcp) 1120 { 1121 tcp_t *listener = tcp->tcp_listener; 1122 1123 ASSERT(listener != NULL); 1124 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 1125 if (tcp->tcp_eager_next_q0 != NULL) { 1126 ASSERT(tcp->tcp_eager_prev_q0 != NULL); 1127 1128 /* Remove the eager tcp from q0 */ 1129 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 1130 tcp->tcp_eager_prev_q0; 1131 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 1132 tcp->tcp_eager_next_q0; 1133 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 1134 listener->tcp_conn_req_cnt_q0--; 1135 1136 tcp->tcp_eager_next_q0 = NULL; 1137 tcp->tcp_eager_prev_q0 = NULL; 1138 1139 /* 1140 * Take the eager out, if it is in the list of droppable 1141 * eagers. 1142 */ 1143 MAKE_UNDROPPABLE(tcp); 1144 1145 if (tcp->tcp_syn_rcvd_timeout != 0) { 1146 /* we have timed out before */ 1147 ASSERT(listener->tcp_syn_rcvd_timeout > 0); 1148 listener->tcp_syn_rcvd_timeout--; 1149 } 1150 } else { 1151 tcp_t **tcpp = &listener->tcp_eager_next_q; 1152 tcp_t *prev = NULL; 1153 1154 for (; tcpp[0]; tcpp = &tcpp[0]->tcp_eager_next_q) { 1155 if (tcpp[0] == tcp) { 1156 if (listener->tcp_eager_last_q == tcp) { 1157 /* 1158 * If we are unlinking the last 1159 * element on the list, adjust 1160 * tail pointer. Set tail pointer 1161 * to nil when list is empty. 1162 */ 1163 ASSERT(tcp->tcp_eager_next_q == NULL); 1164 if (listener->tcp_eager_last_q == 1165 listener->tcp_eager_next_q) { 1166 listener->tcp_eager_last_q = 1167 NULL; 1168 } else { 1169 /* 1170 * We won't get here if there 1171 * is only one eager in the 1172 * list. 1173 */ 1174 ASSERT(prev != NULL); 1175 listener->tcp_eager_last_q = 1176 prev; 1177 } 1178 } 1179 tcpp[0] = tcp->tcp_eager_next_q; 1180 tcp->tcp_eager_next_q = NULL; 1181 tcp->tcp_eager_last_q = NULL; 1182 ASSERT(listener->tcp_conn_req_cnt_q > 0); 1183 listener->tcp_conn_req_cnt_q--; 1184 break; 1185 } 1186 prev = tcpp[0]; 1187 } 1188 } 1189 tcp->tcp_listener = NULL; 1190 } 1191 1192 /* BEGIN CSTYLED */ 1193 /* 1194 * 1195 * The sockfs ACCEPT path: 1196 * ======================= 1197 * 1198 * The eager is now established in its own perimeter as soon as SYN is 1199 * received in tcp_input_listener(). When sockfs receives conn_ind, it 1200 * completes the accept processing on the acceptor STREAM. The sending 1201 * of conn_ind part is common for both sockfs listener and a TLI/XTI 1202 * listener but a TLI/XTI listener completes the accept processing 1203 * on the listener perimeter. 1204 * 1205 * Common control flow for 3 way handshake: 1206 * ---------------------------------------- 1207 * 1208 * incoming SYN (listener perimeter) -> tcp_input_listener() 1209 * 1210 * incoming SYN-ACK-ACK (eager perim) -> tcp_input_data() 1211 * send T_CONN_IND (listener perim) -> tcp_send_conn_ind() 1212 * 1213 * Sockfs ACCEPT Path: 1214 * ------------------- 1215 * 1216 * open acceptor stream (tcp_open allocates tcp_tli_accept() 1217 * as STREAM entry point) 1218 * 1219 * soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_tli_accept() 1220 * 1221 * tcp_tli_accept() extracts the eager and makes the q->q_ptr <-> eager 1222 * association (we are not behind eager's squeue but sockfs is protecting us 1223 * and no one knows about this stream yet. The STREAMS entry point q->q_info 1224 * is changed to point at tcp_wput(). 1225 * 1226 * tcp_accept_common() sends any deferred eagers via tcp_send_pending() to 1227 * listener (done on listener's perimeter). 1228 * 1229 * tcp_tli_accept() calls tcp_accept_finish() on eagers perimeter to finish 1230 * accept. 1231 * 1232 * TLI/XTI client ACCEPT path: 1233 * --------------------------- 1234 * 1235 * soaccept() sends T_CONN_RES on the listener STREAM. 1236 * 1237 * tcp_tli_accept() -> tcp_accept_swap() complete the processing and send 1238 * a M_SETOPS mblk to eager perimeter to finish accept (tcp_accept_finish()). 1239 * 1240 * Locks: 1241 * ====== 1242 * 1243 * listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and 1244 * and listeners->tcp_eager_next_q. 1245 * 1246 * Referencing: 1247 * ============ 1248 * 1249 * 1) We start out in tcp_input_listener by eager placing a ref on 1250 * listener and listener adding eager to listeners->tcp_eager_next_q0. 1251 * 1252 * 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before 1253 * doing so we place a ref on the eager. This ref is finally dropped at the 1254 * end of tcp_accept_finish() while unwinding from the squeue, i.e. the 1255 * reference is dropped by the squeue framework. 1256 * 1257 * 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish 1258 * 1259 * The reference must be released by the same entity that added the reference 1260 * In the above scheme, the eager is the entity that adds and releases the 1261 * references. Note that tcp_accept_finish executes in the squeue of the eager 1262 * (albeit after it is attached to the acceptor stream). Though 1. executes 1263 * in the listener's squeue, the eager is nascent at this point and the 1264 * reference can be considered to have been added on behalf of the eager. 1265 * 1266 * Eager getting a Reset or listener closing: 1267 * ========================================== 1268 * 1269 * Once the listener and eager are linked, the listener never does the unlink. 1270 * If the listener needs to close, tcp_eager_cleanup() is called which queues 1271 * a message on all eager perimeter. The eager then does the unlink, clears 1272 * any pointers to the listener's queue and drops the reference to the 1273 * listener. The listener waits in tcp_close outside the squeue until its 1274 * refcount has dropped to 1. This ensures that the listener has waited for 1275 * all eagers to clear their association with the listener. 1276 * 1277 * Similarly, if eager decides to go away, it can unlink itself and close. 1278 * When the T_CONN_RES comes down, we check if eager has closed. Note that 1279 * the reference to eager is still valid because of the extra ref we put 1280 * in tcp_send_conn_ind. 1281 * 1282 * Listener can always locate the eager under the protection 1283 * of the listener->tcp_eager_lock, and then do a refhold 1284 * on the eager during the accept processing. 1285 * 1286 * The acceptor stream accesses the eager in the accept processing 1287 * based on the ref placed on eager before sending T_conn_ind. 1288 * The only entity that can negate this refhold is a listener close 1289 * which is mutually exclusive with an active acceptor stream. 1290 * 1291 * Eager's reference on the listener 1292 * =================================== 1293 * 1294 * If the accept happens (even on a closed eager) the eager drops its 1295 * reference on the listener at the start of tcp_accept_finish. If the 1296 * eager is killed due to an incoming RST before the T_conn_ind is sent up, 1297 * the reference is dropped in tcp_closei_local. If the listener closes, 1298 * the reference is dropped in tcp_eager_kill. In all cases the reference 1299 * is dropped while executing in the eager's context (squeue). 1300 */ 1301 /* END CSTYLED */ 1302 1303 /* Process the SYN packet, mp, directed at the listener 'tcp' */ 1304 1305 /* 1306 * THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN. 1307 * tcp_input_data will not see any packets for listeners since the listener 1308 * has conn_recv set to tcp_input_listener. 1309 */ 1310 /* ARGSUSED */ 1311 static void 1312 tcp_input_listener(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 1313 { 1314 tcpha_t *tcpha; 1315 uint32_t seg_seq; 1316 tcp_t *eager; 1317 int err; 1318 conn_t *econnp = NULL; 1319 squeue_t *new_sqp; 1320 mblk_t *mp1; 1321 uint_t ip_hdr_len; 1322 conn_t *lconnp = (conn_t *)arg; 1323 tcp_t *listener = lconnp->conn_tcp; 1324 tcp_stack_t *tcps = listener->tcp_tcps; 1325 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 1326 uint_t flags; 1327 mblk_t *tpi_mp; 1328 uint_t ifindex = ira->ira_ruifindex; 1329 boolean_t tlc_set = B_FALSE; 1330 1331 ip_hdr_len = ira->ira_ip_hdr_length; 1332 tcpha = (tcpha_t *)&mp->b_rptr[ip_hdr_len]; 1333 flags = (unsigned int)tcpha->tha_flags & 0xFF; 1334 1335 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, lconnp->conn_ixa, 1336 __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, listener, 1337 __dtrace_tcp_tcph_t *, tcpha); 1338 1339 if (!(flags & TH_SYN)) { 1340 if ((flags & TH_RST) || (flags & TH_URG)) { 1341 freemsg(mp); 1342 return; 1343 } 1344 if (flags & TH_ACK) { 1345 /* Note this executes in listener's squeue */ 1346 tcp_xmit_listeners_reset(mp, ira, ipst, lconnp); 1347 return; 1348 } 1349 1350 freemsg(mp); 1351 return; 1352 } 1353 1354 if (listener->tcp_state != TCPS_LISTEN) 1355 goto error2; 1356 1357 ASSERT(IPCL_IS_BOUND(lconnp)); 1358 1359 mutex_enter(&listener->tcp_eager_lock); 1360 1361 /* 1362 * The system is under memory pressure, so we need to do our part 1363 * to relieve the pressure. So we only accept new request if there 1364 * is nothing waiting to be accepted or waiting to complete the 3-way 1365 * handshake. This means that busy listener will not get too many 1366 * new requests which they cannot handle in time while non-busy 1367 * listener is still functioning properly. 1368 */ 1369 if (tcps->tcps_reclaim && (listener->tcp_conn_req_cnt_q > 0 || 1370 listener->tcp_conn_req_cnt_q0 > 0)) { 1371 mutex_exit(&listener->tcp_eager_lock); 1372 TCP_STAT(tcps, tcp_listen_mem_drop); 1373 goto error2; 1374 } 1375 1376 if (listener->tcp_conn_req_cnt_q >= listener->tcp_conn_req_max) { 1377 mutex_exit(&listener->tcp_eager_lock); 1378 TCP_STAT(tcps, tcp_listendrop); 1379 TCPS_BUMP_MIB(tcps, tcpListenDrop); 1380 if (lconnp->conn_debug) { 1381 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 1382 "tcp_input_listener: listen backlog (max=%d) " 1383 "overflow (%d pending) on %s", 1384 listener->tcp_conn_req_max, 1385 listener->tcp_conn_req_cnt_q, 1386 tcp_display(listener, NULL, DISP_PORT_ONLY)); 1387 } 1388 goto error2; 1389 } 1390 1391 if (listener->tcp_conn_req_cnt_q0 >= 1392 listener->tcp_conn_req_max + tcps->tcps_conn_req_max_q0) { 1393 /* 1394 * Q0 is full. Drop a pending half-open req from the queue 1395 * to make room for the new SYN req. Also mark the time we 1396 * drop a SYN. 1397 * 1398 * A more aggressive defense against SYN attack will 1399 * be to set the "tcp_syn_defense" flag now. 1400 */ 1401 TCP_STAT(tcps, tcp_listendropq0); 1402 listener->tcp_last_rcv_lbolt = ddi_get_lbolt64(); 1403 if (!tcp_drop_q0(listener)) { 1404 mutex_exit(&listener->tcp_eager_lock); 1405 TCPS_BUMP_MIB(tcps, tcpListenDropQ0); 1406 if (lconnp->conn_debug) { 1407 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 1408 "tcp_input_listener: listen half-open " 1409 "queue (max=%d) full (%d pending) on %s", 1410 tcps->tcps_conn_req_max_q0, 1411 listener->tcp_conn_req_cnt_q0, 1412 tcp_display(listener, NULL, 1413 DISP_PORT_ONLY)); 1414 } 1415 goto error2; 1416 } 1417 } 1418 1419 /* 1420 * Enforce the limit set on the number of connections per listener. 1421 * Note that tlc_cnt starts with 1. So need to add 1 to tlc_max 1422 * for comparison. 1423 */ 1424 if (listener->tcp_listen_cnt != NULL) { 1425 tcp_listen_cnt_t *tlc = listener->tcp_listen_cnt; 1426 int64_t now; 1427 1428 if (atomic_add_32_nv(&tlc->tlc_cnt, 1) > tlc->tlc_max + 1) { 1429 mutex_exit(&listener->tcp_eager_lock); 1430 now = ddi_get_lbolt64(); 1431 atomic_add_32(&tlc->tlc_cnt, -1); 1432 TCP_STAT(tcps, tcp_listen_cnt_drop); 1433 tlc->tlc_drop++; 1434 if (now - tlc->tlc_report_time > 1435 MSEC_TO_TICK(TCP_TLC_REPORT_INTERVAL)) { 1436 zcmn_err(lconnp->conn_zoneid, CE_WARN, 1437 "Listener (port %d) connection max (%u) " 1438 "reached: %u attempts dropped total\n", 1439 ntohs(listener->tcp_connp->conn_lport), 1440 tlc->tlc_max, tlc->tlc_drop); 1441 tlc->tlc_report_time = now; 1442 } 1443 goto error2; 1444 } 1445 tlc_set = B_TRUE; 1446 } 1447 1448 mutex_exit(&listener->tcp_eager_lock); 1449 1450 /* 1451 * IP sets ira_sqp to either the senders conn_sqp (for loopback) 1452 * or based on the ring (for packets from GLD). Otherwise it is 1453 * set based on lbolt i.e., a somewhat random number. 1454 */ 1455 ASSERT(ira->ira_sqp != NULL); 1456 new_sqp = ira->ira_sqp; 1457 1458 econnp = (conn_t *)tcp_get_conn(arg2, tcps); 1459 if (econnp == NULL) 1460 goto error2; 1461 1462 ASSERT(econnp->conn_netstack == lconnp->conn_netstack); 1463 econnp->conn_sqp = new_sqp; 1464 econnp->conn_initial_sqp = new_sqp; 1465 econnp->conn_ixa->ixa_sqp = new_sqp; 1466 1467 econnp->conn_fport = tcpha->tha_lport; 1468 econnp->conn_lport = tcpha->tha_fport; 1469 1470 err = conn_inherit_parent(lconnp, econnp); 1471 if (err != 0) 1472 goto error3; 1473 1474 /* We already know the laddr of the new connection is ours */ 1475 econnp->conn_ixa->ixa_src_generation = ipst->ips_src_generation; 1476 1477 ASSERT(OK_32PTR(mp->b_rptr)); 1478 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION || 1479 IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION); 1480 1481 if (lconnp->conn_family == AF_INET) { 1482 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION); 1483 tpi_mp = tcp_conn_create_v4(lconnp, econnp, mp, ira); 1484 } else { 1485 tpi_mp = tcp_conn_create_v6(lconnp, econnp, mp, ira); 1486 } 1487 1488 if (tpi_mp == NULL) 1489 goto error3; 1490 1491 eager = econnp->conn_tcp; 1492 eager->tcp_detached = B_TRUE; 1493 SOCK_CONNID_INIT(eager->tcp_connid); 1494 1495 /* 1496 * Initialize the eager's tcp_t and inherit some parameters from 1497 * the listener. 1498 */ 1499 tcp_init_values(eager, listener); 1500 1501 ASSERT((econnp->conn_ixa->ixa_flags & 1502 (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE | 1503 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO)) == 1504 (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE | 1505 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO)); 1506 1507 if (!tcps->tcps_dev_flow_ctl) 1508 econnp->conn_ixa->ixa_flags |= IXAF_NO_DEV_FLOW_CTL; 1509 1510 /* Prepare for diffing against previous packets */ 1511 eager->tcp_recvifindex = 0; 1512 eager->tcp_recvhops = 0xffffffffU; 1513 1514 if (!(ira->ira_flags & IRAF_IS_IPV4) && econnp->conn_bound_if == 0) { 1515 if (IN6_IS_ADDR_LINKSCOPE(&econnp->conn_faddr_v6) || 1516 IN6_IS_ADDR_LINKSCOPE(&econnp->conn_laddr_v6)) { 1517 econnp->conn_incoming_ifindex = ifindex; 1518 econnp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET; 1519 econnp->conn_ixa->ixa_scopeid = ifindex; 1520 } 1521 } 1522 1523 if ((ira->ira_flags & (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS)) == 1524 (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS) && 1525 tcps->tcps_rev_src_routes) { 1526 ipha_t *ipha = (ipha_t *)mp->b_rptr; 1527 ip_pkt_t *ipp = &econnp->conn_xmit_ipp; 1528 1529 /* Source routing option copyover (reverse it) */ 1530 err = ip_find_hdr_v4(ipha, ipp, B_TRUE); 1531 if (err != 0) { 1532 freemsg(tpi_mp); 1533 goto error3; 1534 } 1535 ip_pkt_source_route_reverse_v4(ipp); 1536 } 1537 1538 ASSERT(eager->tcp_conn.tcp_eager_conn_ind == NULL); 1539 ASSERT(!eager->tcp_tconnind_started); 1540 /* 1541 * If the SYN came with a credential, it's a loopback packet or a 1542 * labeled packet; attach the credential to the TPI message. 1543 */ 1544 if (ira->ira_cred != NULL) 1545 mblk_setcred(tpi_mp, ira->ira_cred, ira->ira_cpid); 1546 1547 eager->tcp_conn.tcp_eager_conn_ind = tpi_mp; 1548 ASSERT(eager->tcp_ordrel_mp == NULL); 1549 1550 /* Inherit the listener's non-STREAMS flag */ 1551 if (IPCL_IS_NONSTR(lconnp)) { 1552 econnp->conn_flags |= IPCL_NONSTR; 1553 /* All non-STREAMS tcp_ts are sockets */ 1554 eager->tcp_issocket = B_TRUE; 1555 } else { 1556 /* 1557 * Pre-allocate the T_ordrel_ind mblk for TPI socket so that 1558 * at close time, we will always have that to send up. 1559 * Otherwise, we need to do special handling in case the 1560 * allocation fails at that time. 1561 */ 1562 if ((eager->tcp_ordrel_mp = mi_tpi_ordrel_ind()) == NULL) 1563 goto error3; 1564 } 1565 /* 1566 * Now that the IP addresses and ports are setup in econnp we 1567 * can do the IPsec policy work. 1568 */ 1569 if (ira->ira_flags & IRAF_IPSEC_SECURE) { 1570 if (lconnp->conn_policy != NULL) { 1571 /* 1572 * Inherit the policy from the listener; use 1573 * actions from ira 1574 */ 1575 if (!ip_ipsec_policy_inherit(econnp, lconnp, ira)) { 1576 CONN_DEC_REF(econnp); 1577 freemsg(mp); 1578 goto error3; 1579 } 1580 } 1581 } 1582 1583 /* 1584 * tcp_set_destination() may set tcp_rwnd according to the route 1585 * metrics. If it does not, the eager's receive window will be set 1586 * to the listener's receive window later in this function. 1587 */ 1588 eager->tcp_rwnd = 0; 1589 1590 if (is_system_labeled()) { 1591 ip_xmit_attr_t *ixa = econnp->conn_ixa; 1592 1593 ASSERT(ira->ira_tsl != NULL); 1594 /* Discard any old label */ 1595 if (ixa->ixa_free_flags & IXA_FREE_TSL) { 1596 ASSERT(ixa->ixa_tsl != NULL); 1597 label_rele(ixa->ixa_tsl); 1598 ixa->ixa_free_flags &= ~IXA_FREE_TSL; 1599 ixa->ixa_tsl = NULL; 1600 } 1601 if ((lconnp->conn_mlp_type != mlptSingle || 1602 lconnp->conn_mac_mode != CONN_MAC_DEFAULT) && 1603 ira->ira_tsl != NULL) { 1604 /* 1605 * If this is an MLP connection or a MAC-Exempt 1606 * connection with an unlabeled node, packets are to be 1607 * exchanged using the security label of the received 1608 * SYN packet instead of the server application's label. 1609 * tsol_check_dest called from ip_set_destination 1610 * might later update TSF_UNLABELED by replacing 1611 * ixa_tsl with a new label. 1612 */ 1613 label_hold(ira->ira_tsl); 1614 ip_xmit_attr_replace_tsl(ixa, ira->ira_tsl); 1615 DTRACE_PROBE2(mlp_syn_accept, conn_t *, 1616 econnp, ts_label_t *, ixa->ixa_tsl) 1617 } else { 1618 ixa->ixa_tsl = crgetlabel(econnp->conn_cred); 1619 DTRACE_PROBE2(syn_accept, conn_t *, 1620 econnp, ts_label_t *, ixa->ixa_tsl) 1621 } 1622 /* 1623 * conn_connect() called from tcp_set_destination will verify 1624 * the destination is allowed to receive packets at the 1625 * security label of the SYN-ACK we are generating. As part of 1626 * that, tsol_check_dest() may create a new effective label for 1627 * this connection. 1628 * Finally conn_connect() will call conn_update_label. 1629 * All that remains for TCP to do is to call 1630 * conn_build_hdr_template which is done as part of 1631 * tcp_set_destination. 1632 */ 1633 } 1634 1635 /* 1636 * Since we will clear tcp_listener before we clear tcp_detached 1637 * in the accept code we need tcp_hard_binding aka tcp_accept_inprogress 1638 * so we can tell a TCP_IS_DETACHED_NONEAGER apart. 1639 */ 1640 eager->tcp_hard_binding = B_TRUE; 1641 1642 tcp_bind_hash_insert(&tcps->tcps_bind_fanout[ 1643 TCP_BIND_HASH(econnp->conn_lport)], eager, 0); 1644 1645 CL_INET_CONNECT(econnp, B_FALSE, err); 1646 if (err != 0) { 1647 tcp_bind_hash_remove(eager); 1648 goto error3; 1649 } 1650 1651 SOCK_CONNID_BUMP(eager->tcp_connid); 1652 1653 /* 1654 * Adapt our mss, ttl, ... based on the remote address. 1655 */ 1656 1657 if (tcp_set_destination(eager) != 0) { 1658 TCPS_BUMP_MIB(tcps, tcpAttemptFails); 1659 /* Undo the bind_hash_insert */ 1660 tcp_bind_hash_remove(eager); 1661 goto error3; 1662 } 1663 1664 /* Process all TCP options. */ 1665 tcp_process_options(eager, tcpha); 1666 1667 /* Is the other end ECN capable? */ 1668 if (tcps->tcps_ecn_permitted >= 1 && 1669 (tcpha->tha_flags & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) { 1670 eager->tcp_ecn_ok = B_TRUE; 1671 } 1672 1673 /* 1674 * The listener's conn_rcvbuf should be the default window size or a 1675 * window size changed via SO_RCVBUF option. First round up the 1676 * eager's tcp_rwnd to the nearest MSS. Then find out the window 1677 * scale option value if needed. Call tcp_rwnd_set() to finish the 1678 * setting. 1679 * 1680 * Note if there is a rpipe metric associated with the remote host, 1681 * we should not inherit receive window size from listener. 1682 */ 1683 eager->tcp_rwnd = MSS_ROUNDUP( 1684 (eager->tcp_rwnd == 0 ? econnp->conn_rcvbuf : 1685 eager->tcp_rwnd), eager->tcp_mss); 1686 if (eager->tcp_snd_ws_ok) 1687 tcp_set_ws_value(eager); 1688 /* 1689 * Note that this is the only place tcp_rwnd_set() is called for 1690 * accepting a connection. We need to call it here instead of 1691 * after the 3-way handshake because we need to tell the other 1692 * side our rwnd in the SYN-ACK segment. 1693 */ 1694 (void) tcp_rwnd_set(eager, eager->tcp_rwnd); 1695 1696 ASSERT(eager->tcp_connp->conn_rcvbuf != 0 && 1697 eager->tcp_connp->conn_rcvbuf == eager->tcp_rwnd); 1698 1699 ASSERT(econnp->conn_rcvbuf != 0 && 1700 econnp->conn_rcvbuf == eager->tcp_rwnd); 1701 1702 /* Put a ref on the listener for the eager. */ 1703 CONN_INC_REF(lconnp); 1704 mutex_enter(&listener->tcp_eager_lock); 1705 listener->tcp_eager_next_q0->tcp_eager_prev_q0 = eager; 1706 eager->tcp_eager_next_q0 = listener->tcp_eager_next_q0; 1707 listener->tcp_eager_next_q0 = eager; 1708 eager->tcp_eager_prev_q0 = listener; 1709 1710 /* Set tcp_listener before adding it to tcp_conn_fanout */ 1711 eager->tcp_listener = listener; 1712 eager->tcp_saved_listener = listener; 1713 1714 /* 1715 * Set tcp_listen_cnt so that when the connection is done, the counter 1716 * is decremented. 1717 */ 1718 eager->tcp_listen_cnt = listener->tcp_listen_cnt; 1719 1720 /* 1721 * Tag this detached tcp vector for later retrieval 1722 * by our listener client in tcp_accept(). 1723 */ 1724 eager->tcp_conn_req_seqnum = listener->tcp_conn_req_seqnum; 1725 listener->tcp_conn_req_cnt_q0++; 1726 if (++listener->tcp_conn_req_seqnum == -1) { 1727 /* 1728 * -1 is "special" and defined in TPI as something 1729 * that should never be used in T_CONN_IND 1730 */ 1731 ++listener->tcp_conn_req_seqnum; 1732 } 1733 mutex_exit(&listener->tcp_eager_lock); 1734 1735 if (listener->tcp_syn_defense) { 1736 /* Don't drop the SYN that comes from a good IP source */ 1737 ipaddr_t *addr_cache; 1738 1739 addr_cache = (ipaddr_t *)(listener->tcp_ip_addr_cache); 1740 if (addr_cache != NULL && econnp->conn_faddr_v4 == 1741 addr_cache[IP_ADDR_CACHE_HASH(econnp->conn_faddr_v4)]) { 1742 eager->tcp_dontdrop = B_TRUE; 1743 } 1744 } 1745 1746 /* 1747 * We need to insert the eager in its own perimeter but as soon 1748 * as we do that, we expose the eager to the classifier and 1749 * should not touch any field outside the eager's perimeter. 1750 * So do all the work necessary before inserting the eager 1751 * in its own perimeter. Be optimistic that conn_connect() 1752 * will succeed but undo everything if it fails. 1753 */ 1754 seg_seq = ntohl(tcpha->tha_seq); 1755 eager->tcp_irs = seg_seq; 1756 eager->tcp_rack = seg_seq; 1757 eager->tcp_rnxt = seg_seq + 1; 1758 eager->tcp_tcpha->tha_ack = htonl(eager->tcp_rnxt); 1759 TCPS_BUMP_MIB(tcps, tcpPassiveOpens); 1760 eager->tcp_state = TCPS_SYN_RCVD; 1761 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 1762 econnp->conn_ixa, void, NULL, tcp_t *, eager, void, NULL, 1763 int32_t, TCPS_LISTEN); 1764 1765 mp1 = tcp_xmit_mp(eager, eager->tcp_xmit_head, eager->tcp_mss, 1766 NULL, NULL, eager->tcp_iss, B_FALSE, NULL, B_FALSE); 1767 if (mp1 == NULL) { 1768 /* 1769 * Increment the ref count as we are going to 1770 * enqueueing an mp in squeue 1771 */ 1772 CONN_INC_REF(econnp); 1773 goto error; 1774 } 1775 1776 /* 1777 * We need to start the rto timer. In normal case, we start 1778 * the timer after sending the packet on the wire (or at 1779 * least believing that packet was sent by waiting for 1780 * conn_ip_output() to return). Since this is the first packet 1781 * being sent on the wire for the eager, our initial tcp_rto 1782 * is at least tcp_rexmit_interval_min which is a fairly 1783 * large value to allow the algorithm to adjust slowly to large 1784 * fluctuations of RTT during first few transmissions. 1785 * 1786 * Starting the timer first and then sending the packet in this 1787 * case shouldn't make much difference since tcp_rexmit_interval_min 1788 * is of the order of several 100ms and starting the timer 1789 * first and then sending the packet will result in difference 1790 * of few micro seconds. 1791 * 1792 * Without this optimization, we are forced to hold the fanout 1793 * lock across the ipcl_bind_insert() and sending the packet 1794 * so that we don't race against an incoming packet (maybe RST) 1795 * for this eager. 1796 * 1797 * It is necessary to acquire an extra reference on the eager 1798 * at this point and hold it until after tcp_send_data() to 1799 * ensure against an eager close race. 1800 */ 1801 1802 CONN_INC_REF(econnp); 1803 1804 TCP_TIMER_RESTART(eager, eager->tcp_rto); 1805 1806 /* 1807 * Insert the eager in its own perimeter now. We are ready to deal 1808 * with any packets on eager. 1809 */ 1810 if (ipcl_conn_insert(econnp) != 0) 1811 goto error; 1812 1813 ASSERT(econnp->conn_ixa->ixa_notify_cookie == econnp->conn_tcp); 1814 freemsg(mp); 1815 /* 1816 * Send the SYN-ACK. Use the right squeue so that conn_ixa is 1817 * only used by one thread at a time. 1818 */ 1819 if (econnp->conn_sqp == lconnp->conn_sqp) { 1820 DTRACE_TCP5(send, mblk_t *, NULL, ip_xmit_attr_t *, 1821 econnp->conn_ixa, __dtrace_tcp_void_ip_t *, mp1->b_rptr, 1822 tcp_t *, eager, __dtrace_tcp_tcph_t *, 1823 &mp1->b_rptr[econnp->conn_ixa->ixa_ip_hdr_length]); 1824 (void) conn_ip_output(mp1, econnp->conn_ixa); 1825 CONN_DEC_REF(econnp); 1826 } else { 1827 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_send_synack, 1828 econnp, NULL, SQ_PROCESS, SQTAG_TCP_SEND_SYNACK); 1829 } 1830 return; 1831 error: 1832 freemsg(mp1); 1833 eager->tcp_closemp_used = B_TRUE; 1834 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 1835 mp1 = &eager->tcp_closemp; 1836 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_eager_kill, 1837 econnp, NULL, SQ_FILL, SQTAG_TCP_CONN_REQ_2); 1838 1839 /* 1840 * If a connection already exists, send the mp to that connections so 1841 * that it can be appropriately dealt with. 1842 */ 1843 ipst = tcps->tcps_netstack->netstack_ip; 1844 1845 if ((econnp = ipcl_classify(mp, ira, ipst)) != NULL) { 1846 if (!IPCL_IS_CONNECTED(econnp)) { 1847 /* 1848 * Something bad happened. ipcl_conn_insert() 1849 * failed because a connection already existed 1850 * in connected hash but we can't find it 1851 * anymore (someone blew it away). Just 1852 * free this message and hopefully remote 1853 * will retransmit at which time the SYN can be 1854 * treated as a new connection or dealth with 1855 * a TH_RST if a connection already exists. 1856 */ 1857 CONN_DEC_REF(econnp); 1858 freemsg(mp); 1859 } else { 1860 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp, tcp_input_data, 1861 econnp, ira, SQ_FILL, SQTAG_TCP_CONN_REQ_1); 1862 } 1863 } else { 1864 /* Nobody wants this packet */ 1865 freemsg(mp); 1866 } 1867 return; 1868 error3: 1869 CONN_DEC_REF(econnp); 1870 error2: 1871 freemsg(mp); 1872 if (tlc_set) 1873 atomic_add_32(&listener->tcp_listen_cnt->tlc_cnt, -1); 1874 } 1875 1876 /* 1877 * In an ideal case of vertical partition in NUMA architecture, its 1878 * beneficial to have the listener and all the incoming connections 1879 * tied to the same squeue. The other constraint is that incoming 1880 * connections should be tied to the squeue attached to interrupted 1881 * CPU for obvious locality reason so this leaves the listener to 1882 * be tied to the same squeue. Our only problem is that when listener 1883 * is binding, the CPU that will get interrupted by the NIC whose 1884 * IP address the listener is binding to is not even known. So 1885 * the code below allows us to change that binding at the time the 1886 * CPU is interrupted by virtue of incoming connection's squeue. 1887 * 1888 * This is usefull only in case of a listener bound to a specific IP 1889 * address. For other kind of listeners, they get bound the 1890 * very first time and there is no attempt to rebind them. 1891 */ 1892 void 1893 tcp_input_listener_unbound(void *arg, mblk_t *mp, void *arg2, 1894 ip_recv_attr_t *ira) 1895 { 1896 conn_t *connp = (conn_t *)arg; 1897 squeue_t *sqp = (squeue_t *)arg2; 1898 squeue_t *new_sqp; 1899 uint32_t conn_flags; 1900 1901 /* 1902 * IP sets ira_sqp to either the senders conn_sqp (for loopback) 1903 * or based on the ring (for packets from GLD). Otherwise it is 1904 * set based on lbolt i.e., a somewhat random number. 1905 */ 1906 ASSERT(ira->ira_sqp != NULL); 1907 new_sqp = ira->ira_sqp; 1908 1909 if (connp->conn_fanout == NULL) 1910 goto done; 1911 1912 if (!(connp->conn_flags & IPCL_FULLY_BOUND)) { 1913 mutex_enter(&connp->conn_fanout->connf_lock); 1914 mutex_enter(&connp->conn_lock); 1915 /* 1916 * No one from read or write side can access us now 1917 * except for already queued packets on this squeue. 1918 * But since we haven't changed the squeue yet, they 1919 * can't execute. If they are processed after we have 1920 * changed the squeue, they are sent back to the 1921 * correct squeue down below. 1922 * But a listner close can race with processing of 1923 * incoming SYN. If incoming SYN processing changes 1924 * the squeue then the listener close which is waiting 1925 * to enter the squeue would operate on the wrong 1926 * squeue. Hence we don't change the squeue here unless 1927 * the refcount is exactly the minimum refcount. The 1928 * minimum refcount of 4 is counted as - 1 each for 1929 * TCP and IP, 1 for being in the classifier hash, and 1930 * 1 for the mblk being processed. 1931 */ 1932 1933 if (connp->conn_ref != 4 || 1934 connp->conn_tcp->tcp_state != TCPS_LISTEN) { 1935 mutex_exit(&connp->conn_lock); 1936 mutex_exit(&connp->conn_fanout->connf_lock); 1937 goto done; 1938 } 1939 if (connp->conn_sqp != new_sqp) { 1940 while (connp->conn_sqp != new_sqp) 1941 (void) casptr(&connp->conn_sqp, sqp, new_sqp); 1942 /* No special MT issues for outbound ixa_sqp hint */ 1943 connp->conn_ixa->ixa_sqp = new_sqp; 1944 } 1945 1946 do { 1947 conn_flags = connp->conn_flags; 1948 conn_flags |= IPCL_FULLY_BOUND; 1949 (void) cas32(&connp->conn_flags, connp->conn_flags, 1950 conn_flags); 1951 } while (!(connp->conn_flags & IPCL_FULLY_BOUND)); 1952 1953 mutex_exit(&connp->conn_fanout->connf_lock); 1954 mutex_exit(&connp->conn_lock); 1955 1956 /* 1957 * Assume we have picked a good squeue for the listener. Make 1958 * subsequent SYNs not try to change the squeue. 1959 */ 1960 connp->conn_recv = tcp_input_listener; 1961 } 1962 1963 done: 1964 if (connp->conn_sqp != sqp) { 1965 CONN_INC_REF(connp); 1966 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, connp->conn_recv, connp, 1967 ira, SQ_FILL, SQTAG_TCP_CONN_REQ_UNBOUND); 1968 } else { 1969 tcp_input_listener(connp, mp, sqp, ira); 1970 } 1971 } 1972 1973 /* 1974 * Send up all messages queued on tcp_rcv_list. 1975 */ 1976 uint_t 1977 tcp_rcv_drain(tcp_t *tcp) 1978 { 1979 mblk_t *mp; 1980 uint_t ret = 0; 1981 #ifdef DEBUG 1982 uint_t cnt = 0; 1983 #endif 1984 queue_t *q = tcp->tcp_connp->conn_rq; 1985 1986 /* Can't drain on an eager connection */ 1987 if (tcp->tcp_listener != NULL) 1988 return (ret); 1989 1990 /* Can't be a non-STREAMS connection */ 1991 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp)); 1992 1993 /* No need for the push timer now. */ 1994 if (tcp->tcp_push_tid != 0) { 1995 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 1996 tcp->tcp_push_tid = 0; 1997 } 1998 1999 /* 2000 * Handle two cases here: we are currently fused or we were 2001 * previously fused and have some urgent data to be delivered 2002 * upstream. The latter happens because we either ran out of 2003 * memory or were detached and therefore sending the SIGURG was 2004 * deferred until this point. In either case we pass control 2005 * over to tcp_fuse_rcv_drain() since it may need to complete 2006 * some work. 2007 */ 2008 if ((tcp->tcp_fused || tcp->tcp_fused_sigurg)) { 2009 if (tcp_fuse_rcv_drain(q, tcp, tcp->tcp_fused ? NULL : 2010 &tcp->tcp_fused_sigurg_mp)) 2011 return (ret); 2012 } 2013 2014 while ((mp = tcp->tcp_rcv_list) != NULL) { 2015 tcp->tcp_rcv_list = mp->b_next; 2016 mp->b_next = NULL; 2017 #ifdef DEBUG 2018 cnt += msgdsize(mp); 2019 #endif 2020 putnext(q, mp); 2021 } 2022 #ifdef DEBUG 2023 ASSERT(cnt == tcp->tcp_rcv_cnt); 2024 #endif 2025 tcp->tcp_rcv_last_head = NULL; 2026 tcp->tcp_rcv_last_tail = NULL; 2027 tcp->tcp_rcv_cnt = 0; 2028 2029 if (canputnext(q)) 2030 return (tcp_rwnd_reopen(tcp)); 2031 2032 return (ret); 2033 } 2034 2035 /* 2036 * Queue data on tcp_rcv_list which is a b_next chain. 2037 * tcp_rcv_last_head/tail is the last element of this chain. 2038 * Each element of the chain is a b_cont chain. 2039 * 2040 * M_DATA messages are added to the current element. 2041 * Other messages are added as new (b_next) elements. 2042 */ 2043 void 2044 tcp_rcv_enqueue(tcp_t *tcp, mblk_t *mp, uint_t seg_len, cred_t *cr) 2045 { 2046 ASSERT(seg_len == msgdsize(mp)); 2047 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_rcv_last_head != NULL); 2048 2049 if (is_system_labeled()) { 2050 ASSERT(cr != NULL || msg_getcred(mp, NULL) != NULL); 2051 /* 2052 * Provide for protocols above TCP such as RPC. NOPID leaves 2053 * db_cpid unchanged. 2054 * The cred could have already been set. 2055 */ 2056 if (cr != NULL) 2057 mblk_setcred(mp, cr, NOPID); 2058 } 2059 2060 if (tcp->tcp_rcv_list == NULL) { 2061 ASSERT(tcp->tcp_rcv_last_head == NULL); 2062 tcp->tcp_rcv_list = mp; 2063 tcp->tcp_rcv_last_head = mp; 2064 } else if (DB_TYPE(mp) == DB_TYPE(tcp->tcp_rcv_last_head)) { 2065 tcp->tcp_rcv_last_tail->b_cont = mp; 2066 } else { 2067 tcp->tcp_rcv_last_head->b_next = mp; 2068 tcp->tcp_rcv_last_head = mp; 2069 } 2070 2071 while (mp->b_cont) 2072 mp = mp->b_cont; 2073 2074 tcp->tcp_rcv_last_tail = mp; 2075 tcp->tcp_rcv_cnt += seg_len; 2076 tcp->tcp_rwnd -= seg_len; 2077 } 2078 2079 /* Generate an ACK-only (no data) segment for a TCP endpoint */ 2080 mblk_t * 2081 tcp_ack_mp(tcp_t *tcp) 2082 { 2083 uint32_t seq_no; 2084 tcp_stack_t *tcps = tcp->tcp_tcps; 2085 conn_t *connp = tcp->tcp_connp; 2086 2087 /* 2088 * There are a few cases to be considered while setting the sequence no. 2089 * Essentially, we can come here while processing an unacceptable pkt 2090 * in the TCPS_SYN_RCVD state, in which case we set the sequence number 2091 * to snxt (per RFC 793), note the swnd wouldn't have been set yet. 2092 * If we are here for a zero window probe, stick with suna. In all 2093 * other cases, we check if suna + swnd encompasses snxt and set 2094 * the sequence number to snxt, if so. If snxt falls outside the 2095 * window (the receiver probably shrunk its window), we will go with 2096 * suna + swnd, otherwise the sequence no will be unacceptable to the 2097 * receiver. 2098 */ 2099 if (tcp->tcp_zero_win_probe) { 2100 seq_no = tcp->tcp_suna; 2101 } else if (tcp->tcp_state == TCPS_SYN_RCVD) { 2102 ASSERT(tcp->tcp_swnd == 0); 2103 seq_no = tcp->tcp_snxt; 2104 } else { 2105 seq_no = SEQ_GT(tcp->tcp_snxt, 2106 (tcp->tcp_suna + tcp->tcp_swnd)) ? 2107 (tcp->tcp_suna + tcp->tcp_swnd) : tcp->tcp_snxt; 2108 } 2109 2110 if (tcp->tcp_valid_bits) { 2111 /* 2112 * For the complex case where we have to send some 2113 * controls (FIN or SYN), let tcp_xmit_mp do it. 2114 */ 2115 return (tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, seq_no, B_FALSE, 2116 NULL, B_FALSE)); 2117 } else { 2118 /* Generate a simple ACK */ 2119 int data_length; 2120 uchar_t *rptr; 2121 tcpha_t *tcpha; 2122 mblk_t *mp1; 2123 int32_t total_hdr_len; 2124 int32_t tcp_hdr_len; 2125 int32_t num_sack_blk = 0; 2126 int32_t sack_opt_len; 2127 ip_xmit_attr_t *ixa = connp->conn_ixa; 2128 2129 /* 2130 * Allocate space for TCP + IP headers 2131 * and link-level header 2132 */ 2133 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 2134 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 2135 tcp->tcp_num_sack_blk); 2136 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 2137 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 2138 total_hdr_len = connp->conn_ht_iphc_len + sack_opt_len; 2139 tcp_hdr_len = connp->conn_ht_ulp_len + sack_opt_len; 2140 } else { 2141 total_hdr_len = connp->conn_ht_iphc_len; 2142 tcp_hdr_len = connp->conn_ht_ulp_len; 2143 } 2144 mp1 = allocb(total_hdr_len + tcps->tcps_wroff_xtra, BPRI_MED); 2145 if (!mp1) 2146 return (NULL); 2147 2148 /* Update the latest receive window size in TCP header. */ 2149 tcp->tcp_tcpha->tha_win = 2150 htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws); 2151 /* copy in prototype TCP + IP header */ 2152 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra; 2153 mp1->b_rptr = rptr; 2154 mp1->b_wptr = rptr + total_hdr_len; 2155 bcopy(connp->conn_ht_iphc, rptr, connp->conn_ht_iphc_len); 2156 2157 tcpha = (tcpha_t *)&rptr[ixa->ixa_ip_hdr_length]; 2158 2159 /* Set the TCP sequence number. */ 2160 tcpha->tha_seq = htonl(seq_no); 2161 2162 /* Set up the TCP flag field. */ 2163 tcpha->tha_flags = (uchar_t)TH_ACK; 2164 if (tcp->tcp_ecn_echo_on) 2165 tcpha->tha_flags |= TH_ECE; 2166 2167 tcp->tcp_rack = tcp->tcp_rnxt; 2168 tcp->tcp_rack_cnt = 0; 2169 2170 /* fill in timestamp option if in use */ 2171 if (tcp->tcp_snd_ts_ok) { 2172 uint32_t llbolt = (uint32_t)LBOLT_FASTPATH; 2173 2174 U32_TO_BE32(llbolt, 2175 (char *)tcpha + TCP_MIN_HEADER_LENGTH+4); 2176 U32_TO_BE32(tcp->tcp_ts_recent, 2177 (char *)tcpha + TCP_MIN_HEADER_LENGTH+8); 2178 } 2179 2180 /* Fill in SACK options */ 2181 if (num_sack_blk > 0) { 2182 uchar_t *wptr = (uchar_t *)tcpha + 2183 connp->conn_ht_ulp_len; 2184 sack_blk_t *tmp; 2185 int32_t i; 2186 2187 wptr[0] = TCPOPT_NOP; 2188 wptr[1] = TCPOPT_NOP; 2189 wptr[2] = TCPOPT_SACK; 2190 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 2191 sizeof (sack_blk_t); 2192 wptr += TCPOPT_REAL_SACK_LEN; 2193 2194 tmp = tcp->tcp_sack_list; 2195 for (i = 0; i < num_sack_blk; i++) { 2196 U32_TO_BE32(tmp[i].begin, wptr); 2197 wptr += sizeof (tcp_seq); 2198 U32_TO_BE32(tmp[i].end, wptr); 2199 wptr += sizeof (tcp_seq); 2200 } 2201 tcpha->tha_offset_and_reserved += 2202 ((num_sack_blk * 2 + 1) << 4); 2203 } 2204 2205 ixa->ixa_pktlen = total_hdr_len; 2206 2207 if (ixa->ixa_flags & IXAF_IS_IPV4) { 2208 ((ipha_t *)rptr)->ipha_length = htons(total_hdr_len); 2209 } else { 2210 ip6_t *ip6 = (ip6_t *)rptr; 2211 2212 ip6->ip6_plen = htons(total_hdr_len - IPV6_HDR_LEN); 2213 } 2214 2215 /* 2216 * Prime pump for checksum calculation in IP. Include the 2217 * adjustment for a source route if any. 2218 */ 2219 data_length = tcp_hdr_len + connp->conn_sum; 2220 data_length = (data_length >> 16) + (data_length & 0xFFFF); 2221 tcpha->tha_sum = htons(data_length); 2222 2223 if (tcp->tcp_ip_forward_progress) { 2224 tcp->tcp_ip_forward_progress = B_FALSE; 2225 connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF; 2226 } else { 2227 connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF; 2228 } 2229 return (mp1); 2230 } 2231 } 2232 2233 /* 2234 * Dummy socket upcalls for if/when the conn_t gets detached from a 2235 * direct-callback sonode via a user-driven close(). Easy to catch with 2236 * DTrace FBT, and should be mostly harmless. 2237 */ 2238 2239 /* ARGSUSED */ 2240 static sock_upper_handle_t 2241 tcp_dummy_newconn(sock_upper_handle_t x, sock_lower_handle_t y, 2242 sock_downcalls_t *z, cred_t *cr, pid_t pid, sock_upcalls_t **ignored) 2243 { 2244 ASSERT(0); /* Panic in debug, otherwise ignore. */ 2245 return (NULL); 2246 } 2247 2248 /* ARGSUSED */ 2249 static void 2250 tcp_dummy_connected(sock_upper_handle_t x, sock_connid_t y, cred_t *cr, 2251 pid_t pid) 2252 { 2253 ASSERT(x == NULL); 2254 /* Normally we'd crhold(cr) and attach it to socket state. */ 2255 /* LINTED */ 2256 } 2257 2258 /* ARGSUSED */ 2259 static int 2260 tcp_dummy_disconnected(sock_upper_handle_t x, sock_connid_t y, int blah) 2261 { 2262 ASSERT(0); /* Panic in debug, otherwise ignore. */ 2263 return (-1); 2264 } 2265 2266 /* ARGSUSED */ 2267 static void 2268 tcp_dummy_opctl(sock_upper_handle_t x, sock_opctl_action_t y, uintptr_t blah) 2269 { 2270 ASSERT(x == NULL); 2271 /* We really want this one to be a harmless NOP for now. */ 2272 /* LINTED */ 2273 } 2274 2275 /* ARGSUSED */ 2276 static ssize_t 2277 tcp_dummy_recv(sock_upper_handle_t x, mblk_t *mp, size_t len, int flags, 2278 int *error, boolean_t *push) 2279 { 2280 ASSERT(x == NULL); 2281 2282 /* 2283 * Consume the message, set ESHUTDOWN, and return an error. 2284 * Nobody's home! 2285 */ 2286 freemsg(mp); 2287 *error = ESHUTDOWN; 2288 return (-1); 2289 } 2290 2291 /* ARGSUSED */ 2292 static void 2293 tcp_dummy_set_proto_props(sock_upper_handle_t x, struct sock_proto_props *y) 2294 { 2295 ASSERT(0); /* Panic in debug, otherwise ignore. */ 2296 } 2297 2298 /* ARGSUSED */ 2299 static void 2300 tcp_dummy_txq_full(sock_upper_handle_t x, boolean_t y) 2301 { 2302 ASSERT(0); /* Panic in debug, otherwise ignore. */ 2303 } 2304 2305 /* ARGSUSED */ 2306 static void 2307 tcp_dummy_signal_oob(sock_upper_handle_t x, ssize_t len) 2308 { 2309 ASSERT(x == NULL); 2310 /* Otherwise, this would signal socket state about OOB data. */ 2311 } 2312 2313 /* ARGSUSED */ 2314 static void 2315 tcp_dummy_set_error(sock_upper_handle_t x, int err) 2316 { 2317 ASSERT(0); /* Panic in debug, otherwise ignore. */ 2318 } 2319 2320 /* ARGSUSED */ 2321 static void 2322 tcp_dummy_onearg(sock_upper_handle_t x) 2323 { 2324 ASSERT(0); /* Panic in debug, otherwise ignore. */ 2325 } 2326 2327 static sock_upcalls_t tcp_dummy_upcalls = { 2328 tcp_dummy_newconn, 2329 tcp_dummy_connected, 2330 tcp_dummy_disconnected, 2331 tcp_dummy_opctl, 2332 tcp_dummy_recv, 2333 tcp_dummy_set_proto_props, 2334 tcp_dummy_txq_full, 2335 tcp_dummy_signal_oob, 2336 tcp_dummy_onearg, 2337 tcp_dummy_set_error, 2338 tcp_dummy_onearg 2339 }; 2340 2341 /* 2342 * Handle M_DATA messages from IP. Its called directly from IP via 2343 * squeue for received IP packets. 2344 * 2345 * The first argument is always the connp/tcp to which the mp belongs. 2346 * There are no exceptions to this rule. The caller has already put 2347 * a reference on this connp/tcp and once tcp_input_data() returns, 2348 * the squeue will do the refrele. 2349 * 2350 * The TH_SYN for the listener directly go to tcp_input_listener via 2351 * squeue. ICMP errors go directly to tcp_icmp_input(). 2352 * 2353 * sqp: NULL = recursive, sqp != NULL means called from squeue 2354 */ 2355 void 2356 tcp_input_data(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 2357 { 2358 int32_t bytes_acked; 2359 int32_t gap; 2360 mblk_t *mp1; 2361 uint_t flags; 2362 uint32_t new_swnd = 0; 2363 uchar_t *iphdr; 2364 uchar_t *rptr; 2365 int32_t rgap; 2366 uint32_t seg_ack; 2367 int seg_len; 2368 uint_t ip_hdr_len; 2369 uint32_t seg_seq; 2370 tcpha_t *tcpha; 2371 int urp; 2372 tcp_opt_t tcpopt; 2373 ip_pkt_t ipp; 2374 boolean_t ofo_seg = B_FALSE; /* Out of order segment */ 2375 uint32_t cwnd; 2376 uint32_t add; 2377 int npkt; 2378 int mss; 2379 conn_t *connp = (conn_t *)arg; 2380 squeue_t *sqp = (squeue_t *)arg2; 2381 tcp_t *tcp = connp->conn_tcp; 2382 tcp_stack_t *tcps = tcp->tcp_tcps; 2383 sock_upcalls_t *sockupcalls; 2384 2385 /* 2386 * RST from fused tcp loopback peer should trigger an unfuse. 2387 */ 2388 if (tcp->tcp_fused) { 2389 TCP_STAT(tcps, tcp_fusion_aborted); 2390 tcp_unfuse(tcp); 2391 } 2392 2393 iphdr = mp->b_rptr; 2394 rptr = mp->b_rptr; 2395 ASSERT(OK_32PTR(rptr)); 2396 2397 ip_hdr_len = ira->ira_ip_hdr_length; 2398 if (connp->conn_recv_ancillary.crb_all != 0) { 2399 /* 2400 * Record packet information in the ip_pkt_t 2401 */ 2402 ipp.ipp_fields = 0; 2403 if (ira->ira_flags & IRAF_IS_IPV4) { 2404 (void) ip_find_hdr_v4((ipha_t *)rptr, &ipp, 2405 B_FALSE); 2406 } else { 2407 uint8_t nexthdrp; 2408 2409 /* 2410 * IPv6 packets can only be received by applications 2411 * that are prepared to receive IPv6 addresses. 2412 * The IP fanout must ensure this. 2413 */ 2414 ASSERT(connp->conn_family == AF_INET6); 2415 2416 (void) ip_find_hdr_v6(mp, (ip6_t *)rptr, B_TRUE, &ipp, 2417 &nexthdrp); 2418 ASSERT(nexthdrp == IPPROTO_TCP); 2419 2420 /* Could have caused a pullup? */ 2421 iphdr = mp->b_rptr; 2422 rptr = mp->b_rptr; 2423 } 2424 } 2425 ASSERT(DB_TYPE(mp) == M_DATA); 2426 ASSERT(mp->b_next == NULL); 2427 2428 tcpha = (tcpha_t *)&rptr[ip_hdr_len]; 2429 seg_seq = ntohl(tcpha->tha_seq); 2430 seg_ack = ntohl(tcpha->tha_ack); 2431 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 2432 seg_len = (int)(mp->b_wptr - rptr) - 2433 (ip_hdr_len + TCP_HDR_LENGTH(tcpha)); 2434 if ((mp1 = mp->b_cont) != NULL && mp1->b_datap->db_type == M_DATA) { 2435 do { 2436 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 2437 (uintptr_t)INT_MAX); 2438 seg_len += (int)(mp1->b_wptr - mp1->b_rptr); 2439 } while ((mp1 = mp1->b_cont) != NULL && 2440 mp1->b_datap->db_type == M_DATA); 2441 } 2442 2443 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, connp->conn_ixa, 2444 __dtrace_tcp_void_ip_t *, iphdr, tcp_t *, tcp, 2445 __dtrace_tcp_tcph_t *, tcpha); 2446 2447 if (tcp->tcp_state == TCPS_TIME_WAIT) { 2448 tcp_time_wait_processing(tcp, mp, seg_seq, seg_ack, 2449 seg_len, tcpha, ira); 2450 return; 2451 } 2452 2453 if (sqp != NULL) { 2454 /* 2455 * This is the correct place to update tcp_last_recv_time. Note 2456 * that it is also updated for tcp structure that belongs to 2457 * global and listener queues which do not really need updating. 2458 * But that should not cause any harm. And it is updated for 2459 * all kinds of incoming segments, not only for data segments. 2460 */ 2461 tcp->tcp_last_recv_time = LBOLT_FASTPATH; 2462 } 2463 2464 flags = (unsigned int)tcpha->tha_flags & 0xFF; 2465 2466 BUMP_LOCAL(tcp->tcp_ibsegs); 2467 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp); 2468 2469 if ((flags & TH_URG) && sqp != NULL) { 2470 /* 2471 * TCP can't handle urgent pointers that arrive before 2472 * the connection has been accept()ed since it can't 2473 * buffer OOB data. Discard segment if this happens. 2474 * 2475 * We can't just rely on a non-null tcp_listener to indicate 2476 * that the accept() has completed since unlinking of the 2477 * eager and completion of the accept are not atomic. 2478 * tcp_detached, when it is not set (B_FALSE) indicates 2479 * that the accept() has completed. 2480 * 2481 * Nor can it reassemble urgent pointers, so discard 2482 * if it's not the next segment expected. 2483 * 2484 * Otherwise, collapse chain into one mblk (discard if 2485 * that fails). This makes sure the headers, retransmitted 2486 * data, and new data all are in the same mblk. 2487 */ 2488 ASSERT(mp != NULL); 2489 if (tcp->tcp_detached || !pullupmsg(mp, -1)) { 2490 freemsg(mp); 2491 return; 2492 } 2493 /* Update pointers into message */ 2494 iphdr = rptr = mp->b_rptr; 2495 tcpha = (tcpha_t *)&rptr[ip_hdr_len]; 2496 if (SEQ_GT(seg_seq, tcp->tcp_rnxt)) { 2497 /* 2498 * Since we can't handle any data with this urgent 2499 * pointer that is out of sequence, we expunge 2500 * the data. This allows us to still register 2501 * the urgent mark and generate the M_PCSIG, 2502 * which we can do. 2503 */ 2504 mp->b_wptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha); 2505 seg_len = 0; 2506 } 2507 } 2508 2509 sockupcalls = connp->conn_upcalls; 2510 /* A conn_t may have belonged to a now-closed socket. Be careful. */ 2511 if (sockupcalls == NULL) 2512 sockupcalls = &tcp_dummy_upcalls; 2513 2514 switch (tcp->tcp_state) { 2515 case TCPS_SYN_SENT: 2516 if (connp->conn_final_sqp == NULL && 2517 tcp_outbound_squeue_switch && sqp != NULL) { 2518 ASSERT(connp->conn_initial_sqp == connp->conn_sqp); 2519 connp->conn_final_sqp = sqp; 2520 if (connp->conn_final_sqp != connp->conn_sqp) { 2521 DTRACE_PROBE1(conn__final__sqp__switch, 2522 conn_t *, connp); 2523 CONN_INC_REF(connp); 2524 SQUEUE_SWITCH(connp, connp->conn_final_sqp); 2525 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, 2526 tcp_input_data, connp, ira, ip_squeue_flag, 2527 SQTAG_CONNECT_FINISH); 2528 return; 2529 } 2530 DTRACE_PROBE1(conn__final__sqp__same, conn_t *, connp); 2531 } 2532 if (flags & TH_ACK) { 2533 /* 2534 * Note that our stack cannot send data before a 2535 * connection is established, therefore the 2536 * following check is valid. Otherwise, it has 2537 * to be changed. 2538 */ 2539 if (SEQ_LEQ(seg_ack, tcp->tcp_iss) || 2540 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 2541 freemsg(mp); 2542 if (flags & TH_RST) 2543 return; 2544 tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq", 2545 tcp, seg_ack, 0, TH_RST); 2546 return; 2547 } 2548 ASSERT(tcp->tcp_suna + 1 == seg_ack); 2549 } 2550 if (flags & TH_RST) { 2551 if (flags & TH_ACK) { 2552 DTRACE_TCP5(connect__refused, mblk_t *, NULL, 2553 ip_xmit_attr_t *, connp->conn_ixa, 2554 void_ip_t *, iphdr, tcp_t *, tcp, 2555 tcph_t *, tcpha); 2556 (void) tcp_clean_death(tcp, ECONNREFUSED); 2557 } 2558 freemsg(mp); 2559 return; 2560 } 2561 if (!(flags & TH_SYN)) { 2562 freemsg(mp); 2563 return; 2564 } 2565 2566 /* Process all TCP options. */ 2567 tcp_process_options(tcp, tcpha); 2568 /* 2569 * The following changes our rwnd to be a multiple of the 2570 * MIN(peer MSS, our MSS) for performance reason. 2571 */ 2572 (void) tcp_rwnd_set(tcp, MSS_ROUNDUP(connp->conn_rcvbuf, 2573 tcp->tcp_mss)); 2574 2575 /* Is the other end ECN capable? */ 2576 if (tcp->tcp_ecn_ok) { 2577 if ((flags & (TH_ECE|TH_CWR)) != TH_ECE) { 2578 tcp->tcp_ecn_ok = B_FALSE; 2579 } 2580 } 2581 /* 2582 * Clear ECN flags because it may interfere with later 2583 * processing. 2584 */ 2585 flags &= ~(TH_ECE|TH_CWR); 2586 2587 tcp->tcp_irs = seg_seq; 2588 tcp->tcp_rack = seg_seq; 2589 tcp->tcp_rnxt = seg_seq + 1; 2590 tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt); 2591 if (!TCP_IS_DETACHED(tcp)) { 2592 /* Allocate room for SACK options if needed. */ 2593 connp->conn_wroff = connp->conn_ht_iphc_len; 2594 if (tcp->tcp_snd_sack_ok) 2595 connp->conn_wroff += TCPOPT_MAX_SACK_LEN; 2596 if (!tcp->tcp_loopback) 2597 connp->conn_wroff += tcps->tcps_wroff_xtra; 2598 2599 (void) proto_set_tx_wroff(connp->conn_rq, connp, 2600 connp->conn_wroff); 2601 } 2602 if (flags & TH_ACK) { 2603 /* 2604 * If we can't get the confirmation upstream, pretend 2605 * we didn't even see this one. 2606 * 2607 * XXX: how can we pretend we didn't see it if we 2608 * have updated rnxt et. al. 2609 * 2610 * For loopback we defer sending up the T_CONN_CON 2611 * until after some checks below. 2612 */ 2613 mp1 = NULL; 2614 /* 2615 * tcp_sendmsg() checks tcp_state without entering 2616 * the squeue so tcp_state should be updated before 2617 * sending up connection confirmation. Probe the 2618 * state change below when we are sure the connection 2619 * confirmation has been sent. 2620 */ 2621 tcp->tcp_state = TCPS_ESTABLISHED; 2622 if (!tcp_conn_con(tcp, iphdr, mp, 2623 tcp->tcp_loopback ? &mp1 : NULL, ira)) { 2624 tcp->tcp_state = TCPS_SYN_SENT; 2625 freemsg(mp); 2626 return; 2627 } 2628 TCPS_CONN_INC(tcps); 2629 /* SYN was acked - making progress */ 2630 tcp->tcp_ip_forward_progress = B_TRUE; 2631 2632 /* One for the SYN */ 2633 tcp->tcp_suna = tcp->tcp_iss + 1; 2634 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 2635 2636 /* 2637 * If SYN was retransmitted, need to reset all 2638 * retransmission info. This is because this 2639 * segment will be treated as a dup ACK. 2640 */ 2641 if (tcp->tcp_rexmit) { 2642 tcp->tcp_rexmit = B_FALSE; 2643 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 2644 tcp->tcp_rexmit_max = tcp->tcp_snxt; 2645 tcp->tcp_snd_burst = tcp->tcp_localnet ? 2646 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 2647 tcp->tcp_ms_we_have_waited = 0; 2648 2649 /* 2650 * Set tcp_cwnd back to 1 MSS, per 2651 * recommendation from 2652 * draft-floyd-incr-init-win-01.txt, 2653 * Increasing TCP's Initial Window. 2654 */ 2655 tcp->tcp_cwnd = tcp->tcp_mss; 2656 } 2657 2658 tcp->tcp_swl1 = seg_seq; 2659 tcp->tcp_swl2 = seg_ack; 2660 2661 new_swnd = ntohs(tcpha->tha_win); 2662 tcp->tcp_swnd = new_swnd; 2663 if (new_swnd > tcp->tcp_max_swnd) 2664 tcp->tcp_max_swnd = new_swnd; 2665 2666 /* 2667 * Always send the three-way handshake ack immediately 2668 * in order to make the connection complete as soon as 2669 * possible on the accepting host. 2670 */ 2671 flags |= TH_ACK_NEEDED; 2672 2673 /* 2674 * Trace connect-established here. 2675 */ 2676 DTRACE_TCP5(connect__established, mblk_t *, NULL, 2677 ip_xmit_attr_t *, tcp->tcp_connp->conn_ixa, 2678 void_ip_t *, iphdr, tcp_t *, tcp, tcph_t *, tcpha); 2679 2680 /* Trace change from SYN_SENT -> ESTABLISHED here */ 2681 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 2682 connp->conn_ixa, void, NULL, tcp_t *, tcp, 2683 void, NULL, int32_t, TCPS_SYN_SENT); 2684 2685 /* 2686 * Special case for loopback. At this point we have 2687 * received SYN-ACK from the remote endpoint. In 2688 * order to ensure that both endpoints reach the 2689 * fused state prior to any data exchange, the final 2690 * ACK needs to be sent before we indicate T_CONN_CON 2691 * to the module upstream. 2692 */ 2693 if (tcp->tcp_loopback) { 2694 mblk_t *ack_mp; 2695 2696 ASSERT(!tcp->tcp_unfusable); 2697 ASSERT(mp1 != NULL); 2698 /* 2699 * For loopback, we always get a pure SYN-ACK 2700 * and only need to send back the final ACK 2701 * with no data (this is because the other 2702 * tcp is ours and we don't do T/TCP). This 2703 * final ACK triggers the passive side to 2704 * perform fusion in ESTABLISHED state. 2705 */ 2706 if ((ack_mp = tcp_ack_mp(tcp)) != NULL) { 2707 if (tcp->tcp_ack_tid != 0) { 2708 (void) TCP_TIMER_CANCEL(tcp, 2709 tcp->tcp_ack_tid); 2710 tcp->tcp_ack_tid = 0; 2711 } 2712 tcp_send_data(tcp, ack_mp); 2713 BUMP_LOCAL(tcp->tcp_obsegs); 2714 TCPS_BUMP_MIB(tcps, tcpOutAck); 2715 2716 if (!IPCL_IS_NONSTR(connp)) { 2717 /* Send up T_CONN_CON */ 2718 if (ira->ira_cred != NULL) { 2719 mblk_setcred(mp1, 2720 ira->ira_cred, 2721 ira->ira_cpid); 2722 } 2723 putnext(connp->conn_rq, mp1); 2724 } else { 2725 (*sockupcalls->su_connected) 2726 (connp->conn_upper_handle, 2727 tcp->tcp_connid, 2728 ira->ira_cred, 2729 ira->ira_cpid); 2730 freemsg(mp1); 2731 } 2732 2733 freemsg(mp); 2734 return; 2735 } 2736 /* 2737 * Forget fusion; we need to handle more 2738 * complex cases below. Send the deferred 2739 * T_CONN_CON message upstream and proceed 2740 * as usual. Mark this tcp as not capable 2741 * of fusion. 2742 */ 2743 TCP_STAT(tcps, tcp_fusion_unfusable); 2744 tcp->tcp_unfusable = B_TRUE; 2745 if (!IPCL_IS_NONSTR(connp)) { 2746 if (ira->ira_cred != NULL) { 2747 mblk_setcred(mp1, ira->ira_cred, 2748 ira->ira_cpid); 2749 } 2750 putnext(connp->conn_rq, mp1); 2751 } else { 2752 (*sockupcalls->su_connected) 2753 (connp->conn_upper_handle, 2754 tcp->tcp_connid, ira->ira_cred, 2755 ira->ira_cpid); 2756 freemsg(mp1); 2757 } 2758 } 2759 2760 /* 2761 * Check to see if there is data to be sent. If 2762 * yes, set the transmit flag. Then check to see 2763 * if received data processing needs to be done. 2764 * If not, go straight to xmit_check. This short 2765 * cut is OK as we don't support T/TCP. 2766 */ 2767 if (tcp->tcp_unsent) 2768 flags |= TH_XMIT_NEEDED; 2769 2770 if (seg_len == 0 && !(flags & TH_URG)) { 2771 freemsg(mp); 2772 goto xmit_check; 2773 } 2774 2775 flags &= ~TH_SYN; 2776 seg_seq++; 2777 break; 2778 } 2779 tcp->tcp_state = TCPS_SYN_RCVD; 2780 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 2781 connp->conn_ixa, void_ip_t *, NULL, tcp_t *, tcp, 2782 tcph_t *, NULL, int32_t, TCPS_SYN_SENT); 2783 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, tcp->tcp_mss, 2784 NULL, NULL, tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 2785 if (mp1 != NULL) { 2786 tcp_send_data(tcp, mp1); 2787 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 2788 } 2789 freemsg(mp); 2790 return; 2791 case TCPS_SYN_RCVD: 2792 if (flags & TH_ACK) { 2793 uint32_t pinit_wnd; 2794 2795 /* 2796 * In this state, a SYN|ACK packet is either bogus 2797 * because the other side must be ACKing our SYN which 2798 * indicates it has seen the ACK for their SYN and 2799 * shouldn't retransmit it or we're crossing SYNs 2800 * on active open. 2801 */ 2802 if ((flags & TH_SYN) && !tcp->tcp_active_open) { 2803 freemsg(mp); 2804 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn", 2805 tcp, seg_ack, 0, TH_RST); 2806 return; 2807 } 2808 /* 2809 * NOTE: RFC 793 pg. 72 says this should be 2810 * tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt 2811 * but that would mean we have an ack that ignored 2812 * our SYN. 2813 */ 2814 if (SEQ_LEQ(seg_ack, tcp->tcp_suna) || 2815 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 2816 freemsg(mp); 2817 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack", 2818 tcp, seg_ack, 0, TH_RST); 2819 return; 2820 } 2821 /* 2822 * No sane TCP stack will send such a small window 2823 * without receiving any data. Just drop this invalid 2824 * ACK. We also shorten the abort timeout in case 2825 * this is an attack. 2826 */ 2827 pinit_wnd = ntohs(tcpha->tha_win) << tcp->tcp_snd_ws; 2828 if (pinit_wnd < tcp->tcp_mss && 2829 pinit_wnd < tcp_init_wnd_chk) { 2830 freemsg(mp); 2831 TCP_STAT(tcps, tcp_zwin_ack_syn); 2832 tcp->tcp_second_ctimer_threshold = 2833 tcp_early_abort * SECONDS; 2834 return; 2835 } 2836 } 2837 break; 2838 case TCPS_LISTEN: 2839 /* 2840 * Only a TLI listener can come through this path when a 2841 * acceptor is going back to be a listener and a packet 2842 * for the acceptor hits the classifier. For a socket 2843 * listener, this can never happen because a listener 2844 * can never accept connection on itself and hence a 2845 * socket acceptor can not go back to being a listener. 2846 */ 2847 ASSERT(!TCP_IS_SOCKET(tcp)); 2848 /*FALLTHRU*/ 2849 case TCPS_CLOSED: 2850 case TCPS_BOUND: { 2851 conn_t *new_connp; 2852 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 2853 2854 /* 2855 * Don't accept any input on a closed tcp as this TCP logically 2856 * does not exist on the system. Don't proceed further with 2857 * this TCP. For instance, this packet could trigger another 2858 * close of this tcp which would be disastrous for tcp_refcnt. 2859 * tcp_close_detached / tcp_clean_death / tcp_closei_local must 2860 * be called at most once on a TCP. In this case we need to 2861 * refeed the packet into the classifier and figure out where 2862 * the packet should go. 2863 */ 2864 new_connp = ipcl_classify(mp, ira, ipst); 2865 if (new_connp != NULL) { 2866 /* Drops ref on new_connp */ 2867 tcp_reinput(new_connp, mp, ira, ipst); 2868 return; 2869 } 2870 /* We failed to classify. For now just drop the packet */ 2871 freemsg(mp); 2872 return; 2873 } 2874 case TCPS_IDLE: 2875 /* 2876 * Handle the case where the tcp_clean_death() has happened 2877 * on a connection (application hasn't closed yet) but a packet 2878 * was already queued on squeue before tcp_clean_death() 2879 * was processed. Calling tcp_clean_death() twice on same 2880 * connection can result in weird behaviour. 2881 */ 2882 freemsg(mp); 2883 return; 2884 default: 2885 break; 2886 } 2887 2888 /* 2889 * Already on the correct queue/perimeter. 2890 * If this is a detached connection and not an eager 2891 * connection hanging off a listener then new data 2892 * (past the FIN) will cause a reset. 2893 * We do a special check here where it 2894 * is out of the main line, rather than check 2895 * if we are detached every time we see new 2896 * data down below. 2897 */ 2898 if (TCP_IS_DETACHED_NONEAGER(tcp) && 2899 (seg_len > 0 && SEQ_GT(seg_seq + seg_len, tcp->tcp_rnxt))) { 2900 TCPS_BUMP_MIB(tcps, tcpInClosed); 2901 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp); 2902 freemsg(mp); 2903 tcp_xmit_ctl("new data when detached", tcp, 2904 tcp->tcp_snxt, 0, TH_RST); 2905 (void) tcp_clean_death(tcp, EPROTO); 2906 return; 2907 } 2908 2909 mp->b_rptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha); 2910 urp = ntohs(tcpha->tha_urp) - TCP_OLD_URP_INTERPRETATION; 2911 new_swnd = ntohs(tcpha->tha_win) << 2912 ((tcpha->tha_flags & TH_SYN) ? 0 : tcp->tcp_snd_ws); 2913 2914 if (tcp->tcp_snd_ts_ok) { 2915 if (!tcp_paws_check(tcp, tcpha, &tcpopt)) { 2916 /* 2917 * This segment is not acceptable. 2918 * Drop it and send back an ACK. 2919 */ 2920 freemsg(mp); 2921 flags |= TH_ACK_NEEDED; 2922 goto ack_check; 2923 } 2924 } else if (tcp->tcp_snd_sack_ok) { 2925 tcpopt.tcp = tcp; 2926 /* 2927 * SACK info in already updated in tcp_parse_options. Ignore 2928 * all other TCP options... 2929 */ 2930 (void) tcp_parse_options(tcpha, &tcpopt); 2931 } 2932 try_again:; 2933 mss = tcp->tcp_mss; 2934 gap = seg_seq - tcp->tcp_rnxt; 2935 rgap = tcp->tcp_rwnd - (gap + seg_len); 2936 /* 2937 * gap is the amount of sequence space between what we expect to see 2938 * and what we got for seg_seq. A positive value for gap means 2939 * something got lost. A negative value means we got some old stuff. 2940 */ 2941 if (gap < 0) { 2942 /* Old stuff present. Is the SYN in there? */ 2943 if (seg_seq == tcp->tcp_irs && (flags & TH_SYN) && 2944 (seg_len != 0)) { 2945 flags &= ~TH_SYN; 2946 seg_seq++; 2947 urp--; 2948 /* Recompute the gaps after noting the SYN. */ 2949 goto try_again; 2950 } 2951 TCPS_BUMP_MIB(tcps, tcpInDataDupSegs); 2952 TCPS_UPDATE_MIB(tcps, tcpInDataDupBytes, 2953 (seg_len > -gap ? -gap : seg_len)); 2954 /* Remove the old stuff from seg_len. */ 2955 seg_len += gap; 2956 /* 2957 * Anything left? 2958 * Make sure to check for unack'd FIN when rest of data 2959 * has been previously ack'd. 2960 */ 2961 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 2962 /* 2963 * Resets are only valid if they lie within our offered 2964 * window. If the RST bit is set, we just ignore this 2965 * segment. 2966 */ 2967 if (flags & TH_RST) { 2968 freemsg(mp); 2969 return; 2970 } 2971 2972 /* 2973 * The arriving of dup data packets indicate that we 2974 * may have postponed an ack for too long, or the other 2975 * side's RTT estimate is out of shape. Start acking 2976 * more often. 2977 */ 2978 if (SEQ_GEQ(seg_seq + seg_len - gap, tcp->tcp_rack) && 2979 tcp->tcp_rack_cnt >= 1 && 2980 tcp->tcp_rack_abs_max > 2) { 2981 tcp->tcp_rack_abs_max--; 2982 } 2983 tcp->tcp_rack_cur_max = 1; 2984 2985 /* 2986 * This segment is "unacceptable". None of its 2987 * sequence space lies within our advertized window. 2988 * 2989 * Adjust seg_len to the original value for tracing. 2990 */ 2991 seg_len -= gap; 2992 if (connp->conn_debug) { 2993 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 2994 "tcp_rput: unacceptable, gap %d, rgap %d, " 2995 "flags 0x%x, seg_seq %u, seg_ack %u, " 2996 "seg_len %d, rnxt %u, snxt %u, %s", 2997 gap, rgap, flags, seg_seq, seg_ack, 2998 seg_len, tcp->tcp_rnxt, tcp->tcp_snxt, 2999 tcp_display(tcp, NULL, 3000 DISP_ADDR_AND_PORT)); 3001 } 3002 3003 /* 3004 * Arrange to send an ACK in response to the 3005 * unacceptable segment per RFC 793 page 69. There 3006 * is only one small difference between ours and the 3007 * acceptability test in the RFC - we accept ACK-only 3008 * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK 3009 * will be generated. 3010 * 3011 * Note that we have to ACK an ACK-only packet at least 3012 * for stacks that send 0-length keep-alives with 3013 * SEG.SEQ = SND.NXT-1 as recommended by RFC1122, 3014 * section 4.2.3.6. As long as we don't ever generate 3015 * an unacceptable packet in response to an incoming 3016 * packet that is unacceptable, it should not cause 3017 * "ACK wars". 3018 */ 3019 flags |= TH_ACK_NEEDED; 3020 3021 /* 3022 * Continue processing this segment in order to use the 3023 * ACK information it contains, but skip all other 3024 * sequence-number processing. Processing the ACK 3025 * information is necessary in order to 3026 * re-synchronize connections that may have lost 3027 * synchronization. 3028 * 3029 * We clear seg_len and flag fields related to 3030 * sequence number processing as they are not 3031 * to be trusted for an unacceptable segment. 3032 */ 3033 seg_len = 0; 3034 flags &= ~(TH_SYN | TH_FIN | TH_URG); 3035 goto process_ack; 3036 } 3037 3038 /* Fix seg_seq, and chew the gap off the front. */ 3039 seg_seq = tcp->tcp_rnxt; 3040 urp += gap; 3041 do { 3042 mblk_t *mp2; 3043 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 3044 (uintptr_t)UINT_MAX); 3045 gap += (uint_t)(mp->b_wptr - mp->b_rptr); 3046 if (gap > 0) { 3047 mp->b_rptr = mp->b_wptr - gap; 3048 break; 3049 } 3050 mp2 = mp; 3051 mp = mp->b_cont; 3052 freeb(mp2); 3053 } while (gap < 0); 3054 /* 3055 * If the urgent data has already been acknowledged, we 3056 * should ignore TH_URG below 3057 */ 3058 if (urp < 0) 3059 flags &= ~TH_URG; 3060 } 3061 /* 3062 * rgap is the amount of stuff received out of window. A negative 3063 * value is the amount out of window. 3064 */ 3065 if (rgap < 0) { 3066 mblk_t *mp2; 3067 3068 if (tcp->tcp_rwnd == 0) { 3069 TCPS_BUMP_MIB(tcps, tcpInWinProbe); 3070 } else { 3071 TCPS_BUMP_MIB(tcps, tcpInDataPastWinSegs); 3072 TCPS_UPDATE_MIB(tcps, tcpInDataPastWinBytes, -rgap); 3073 } 3074 3075 /* 3076 * seg_len does not include the FIN, so if more than 3077 * just the FIN is out of window, we act like we don't 3078 * see it. (If just the FIN is out of window, rgap 3079 * will be zero and we will go ahead and acknowledge 3080 * the FIN.) 3081 */ 3082 flags &= ~TH_FIN; 3083 3084 /* Fix seg_len and make sure there is something left. */ 3085 seg_len += rgap; 3086 if (seg_len <= 0) { 3087 /* 3088 * Resets are only valid if they lie within our offered 3089 * window. If the RST bit is set, we just ignore this 3090 * segment. 3091 */ 3092 if (flags & TH_RST) { 3093 freemsg(mp); 3094 return; 3095 } 3096 3097 /* Per RFC 793, we need to send back an ACK. */ 3098 flags |= TH_ACK_NEEDED; 3099 3100 /* 3101 * Send SIGURG as soon as possible i.e. even 3102 * if the TH_URG was delivered in a window probe 3103 * packet (which will be unacceptable). 3104 * 3105 * We generate a signal if none has been generated 3106 * for this connection or if this is a new urgent 3107 * byte. Also send a zero-length "unmarked" message 3108 * to inform SIOCATMARK that this is not the mark. 3109 * 3110 * tcp_urp_last_valid is cleared when the T_exdata_ind 3111 * is sent up. This plus the check for old data 3112 * (gap >= 0) handles the wraparound of the sequence 3113 * number space without having to always track the 3114 * correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks 3115 * this max in its rcv_up variable). 3116 * 3117 * This prevents duplicate SIGURGS due to a "late" 3118 * zero-window probe when the T_EXDATA_IND has already 3119 * been sent up. 3120 */ 3121 if ((flags & TH_URG) && 3122 (!tcp->tcp_urp_last_valid || SEQ_GT(urp + seg_seq, 3123 tcp->tcp_urp_last))) { 3124 if (IPCL_IS_NONSTR(connp)) { 3125 if (!TCP_IS_DETACHED(tcp)) { 3126 (*sockupcalls->su_signal_oob) 3127 (connp->conn_upper_handle, 3128 urp); 3129 } 3130 } else { 3131 mp1 = allocb(0, BPRI_MED); 3132 if (mp1 == NULL) { 3133 freemsg(mp); 3134 return; 3135 } 3136 if (!TCP_IS_DETACHED(tcp) && 3137 !putnextctl1(connp->conn_rq, 3138 M_PCSIG, SIGURG)) { 3139 /* Try again on the rexmit. */ 3140 freemsg(mp1); 3141 freemsg(mp); 3142 return; 3143 } 3144 /* 3145 * If the next byte would be the mark 3146 * then mark with MARKNEXT else mark 3147 * with NOTMARKNEXT. 3148 */ 3149 if (gap == 0 && urp == 0) 3150 mp1->b_flag |= MSGMARKNEXT; 3151 else 3152 mp1->b_flag |= MSGNOTMARKNEXT; 3153 freemsg(tcp->tcp_urp_mark_mp); 3154 tcp->tcp_urp_mark_mp = mp1; 3155 flags |= TH_SEND_URP_MARK; 3156 } 3157 tcp->tcp_urp_last_valid = B_TRUE; 3158 tcp->tcp_urp_last = urp + seg_seq; 3159 } 3160 /* 3161 * If this is a zero window probe, continue to 3162 * process the ACK part. But we need to set seg_len 3163 * to 0 to avoid data processing. Otherwise just 3164 * drop the segment and send back an ACK. 3165 */ 3166 if (tcp->tcp_rwnd == 0 && seg_seq == tcp->tcp_rnxt) { 3167 flags &= ~(TH_SYN | TH_URG); 3168 seg_len = 0; 3169 goto process_ack; 3170 } else { 3171 freemsg(mp); 3172 goto ack_check; 3173 } 3174 } 3175 /* Pitch out of window stuff off the end. */ 3176 rgap = seg_len; 3177 mp2 = mp; 3178 do { 3179 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 3180 (uintptr_t)INT_MAX); 3181 rgap -= (int)(mp2->b_wptr - mp2->b_rptr); 3182 if (rgap < 0) { 3183 mp2->b_wptr += rgap; 3184 if ((mp1 = mp2->b_cont) != NULL) { 3185 mp2->b_cont = NULL; 3186 freemsg(mp1); 3187 } 3188 break; 3189 } 3190 } while ((mp2 = mp2->b_cont) != NULL); 3191 } 3192 ok:; 3193 /* 3194 * TCP should check ECN info for segments inside the window only. 3195 * Therefore the check should be done here. 3196 */ 3197 if (tcp->tcp_ecn_ok) { 3198 if (flags & TH_CWR) { 3199 tcp->tcp_ecn_echo_on = B_FALSE; 3200 } 3201 /* 3202 * Note that both ECN_CE and CWR can be set in the 3203 * same segment. In this case, we once again turn 3204 * on ECN_ECHO. 3205 */ 3206 if (connp->conn_ipversion == IPV4_VERSION) { 3207 uchar_t tos = ((ipha_t *)rptr)->ipha_type_of_service; 3208 3209 if ((tos & IPH_ECN_CE) == IPH_ECN_CE) { 3210 tcp->tcp_ecn_echo_on = B_TRUE; 3211 } 3212 } else { 3213 uint32_t vcf = ((ip6_t *)rptr)->ip6_vcf; 3214 3215 if ((vcf & htonl(IPH_ECN_CE << 20)) == 3216 htonl(IPH_ECN_CE << 20)) { 3217 tcp->tcp_ecn_echo_on = B_TRUE; 3218 } 3219 } 3220 } 3221 3222 /* 3223 * Check whether we can update tcp_ts_recent. This test is 3224 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 3225 * Extensions for High Performance: An Update", Internet Draft. 3226 */ 3227 if (tcp->tcp_snd_ts_ok && 3228 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 3229 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 3230 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 3231 tcp->tcp_last_rcv_lbolt = LBOLT_FASTPATH64; 3232 } 3233 3234 if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) { 3235 /* 3236 * FIN in an out of order segment. We record this in 3237 * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq. 3238 * Clear the FIN so that any check on FIN flag will fail. 3239 * Remember that FIN also counts in the sequence number 3240 * space. So we need to ack out of order FIN only segments. 3241 */ 3242 if (flags & TH_FIN) { 3243 tcp->tcp_valid_bits |= TCP_OFO_FIN_VALID; 3244 tcp->tcp_ofo_fin_seq = seg_seq + seg_len; 3245 flags &= ~TH_FIN; 3246 flags |= TH_ACK_NEEDED; 3247 } 3248 if (seg_len > 0) { 3249 /* Fill in the SACK blk list. */ 3250 if (tcp->tcp_snd_sack_ok) { 3251 tcp_sack_insert(tcp->tcp_sack_list, 3252 seg_seq, seg_seq + seg_len, 3253 &(tcp->tcp_num_sack_blk)); 3254 } 3255 3256 /* 3257 * Attempt reassembly and see if we have something 3258 * ready to go. 3259 */ 3260 mp = tcp_reass(tcp, mp, seg_seq); 3261 /* Always ack out of order packets */ 3262 flags |= TH_ACK_NEEDED | TH_PUSH; 3263 if (mp) { 3264 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 3265 (uintptr_t)INT_MAX); 3266 seg_len = mp->b_cont ? msgdsize(mp) : 3267 (int)(mp->b_wptr - mp->b_rptr); 3268 seg_seq = tcp->tcp_rnxt; 3269 /* 3270 * A gap is filled and the seq num and len 3271 * of the gap match that of a previously 3272 * received FIN, put the FIN flag back in. 3273 */ 3274 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 3275 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 3276 flags |= TH_FIN; 3277 tcp->tcp_valid_bits &= 3278 ~TCP_OFO_FIN_VALID; 3279 } 3280 if (tcp->tcp_reass_tid != 0) { 3281 (void) TCP_TIMER_CANCEL(tcp, 3282 tcp->tcp_reass_tid); 3283 /* 3284 * Restart the timer if there is still 3285 * data in the reassembly queue. 3286 */ 3287 if (tcp->tcp_reass_head != NULL) { 3288 tcp->tcp_reass_tid = TCP_TIMER( 3289 tcp, tcp_reass_timer, 3290 tcps->tcps_reass_timeout); 3291 } else { 3292 tcp->tcp_reass_tid = 0; 3293 } 3294 } 3295 } else { 3296 /* 3297 * Keep going even with NULL mp. 3298 * There may be a useful ACK or something else 3299 * we don't want to miss. 3300 * 3301 * But TCP should not perform fast retransmit 3302 * because of the ack number. TCP uses 3303 * seg_len == 0 to determine if it is a pure 3304 * ACK. And this is not a pure ACK. 3305 */ 3306 seg_len = 0; 3307 ofo_seg = B_TRUE; 3308 3309 if (tcps->tcps_reass_timeout != 0 && 3310 tcp->tcp_reass_tid == 0) { 3311 tcp->tcp_reass_tid = TCP_TIMER(tcp, 3312 tcp_reass_timer, 3313 tcps->tcps_reass_timeout); 3314 } 3315 } 3316 } 3317 } else if (seg_len > 0) { 3318 TCPS_BUMP_MIB(tcps, tcpInDataInorderSegs); 3319 TCPS_UPDATE_MIB(tcps, tcpInDataInorderBytes, seg_len); 3320 /* 3321 * If an out of order FIN was received before, and the seq 3322 * num and len of the new segment match that of the FIN, 3323 * put the FIN flag back in. 3324 */ 3325 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 3326 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 3327 flags |= TH_FIN; 3328 tcp->tcp_valid_bits &= ~TCP_OFO_FIN_VALID; 3329 } 3330 } 3331 if ((flags & (TH_RST | TH_SYN | TH_URG | TH_ACK)) != TH_ACK) { 3332 if (flags & TH_RST) { 3333 freemsg(mp); 3334 switch (tcp->tcp_state) { 3335 case TCPS_SYN_RCVD: 3336 (void) tcp_clean_death(tcp, ECONNREFUSED); 3337 break; 3338 case TCPS_ESTABLISHED: 3339 case TCPS_FIN_WAIT_1: 3340 case TCPS_FIN_WAIT_2: 3341 case TCPS_CLOSE_WAIT: 3342 (void) tcp_clean_death(tcp, ECONNRESET); 3343 break; 3344 case TCPS_CLOSING: 3345 case TCPS_LAST_ACK: 3346 (void) tcp_clean_death(tcp, 0); 3347 break; 3348 default: 3349 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 3350 (void) tcp_clean_death(tcp, ENXIO); 3351 break; 3352 } 3353 return; 3354 } 3355 if (flags & TH_SYN) { 3356 /* 3357 * See RFC 793, Page 71 3358 * 3359 * The seq number must be in the window as it should 3360 * be "fixed" above. If it is outside window, it should 3361 * be already rejected. Note that we allow seg_seq to be 3362 * rnxt + rwnd because we want to accept 0 window probe. 3363 */ 3364 ASSERT(SEQ_GEQ(seg_seq, tcp->tcp_rnxt) && 3365 SEQ_LEQ(seg_seq, tcp->tcp_rnxt + tcp->tcp_rwnd)); 3366 freemsg(mp); 3367 /* 3368 * If the ACK flag is not set, just use our snxt as the 3369 * seq number of the RST segment. 3370 */ 3371 if (!(flags & TH_ACK)) { 3372 seg_ack = tcp->tcp_snxt; 3373 } 3374 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 3375 TH_RST|TH_ACK); 3376 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 3377 (void) tcp_clean_death(tcp, ECONNRESET); 3378 return; 3379 } 3380 /* 3381 * urp could be -1 when the urp field in the packet is 0 3382 * and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent 3383 * byte was at seg_seq - 1, in which case we ignore the urgent flag. 3384 */ 3385 if (flags & TH_URG && urp >= 0) { 3386 if (!tcp->tcp_urp_last_valid || 3387 SEQ_GT(urp + seg_seq, tcp->tcp_urp_last)) { 3388 /* 3389 * Non-STREAMS sockets handle the urgent data a litte 3390 * differently from STREAMS based sockets. There is no 3391 * need to mark any mblks with the MSG{NOT,}MARKNEXT 3392 * flags to keep SIOCATMARK happy. Instead a 3393 * su_signal_oob upcall is made to update the mark. 3394 * Neither is a T_EXDATA_IND mblk needed to be 3395 * prepended to the urgent data. The urgent data is 3396 * delivered using the su_recv upcall, where we set 3397 * the MSG_OOB flag to indicate that it is urg data. 3398 * 3399 * Neither TH_SEND_URP_MARK nor TH_MARKNEXT_NEEDED 3400 * are used by non-STREAMS sockets. 3401 */ 3402 if (IPCL_IS_NONSTR(connp)) { 3403 if (!TCP_IS_DETACHED(tcp)) { 3404 (*sockupcalls->su_signal_oob) 3405 (connp->conn_upper_handle, urp); 3406 } 3407 } else { 3408 /* 3409 * If we haven't generated the signal yet for 3410 * this urgent pointer value, do it now. Also, 3411 * send up a zero-length M_DATA indicating 3412 * whether or not this is the mark. The latter 3413 * is not needed when a T_EXDATA_IND is sent up. 3414 * However, if there are allocation failures 3415 * this code relies on the sender retransmitting 3416 * and the socket code for determining the mark 3417 * should not block waiting for the peer to 3418 * transmit. Thus, for simplicity we always 3419 * send up the mark indication. 3420 */ 3421 mp1 = allocb(0, BPRI_MED); 3422 if (mp1 == NULL) { 3423 freemsg(mp); 3424 return; 3425 } 3426 if (!TCP_IS_DETACHED(tcp) && 3427 !putnextctl1(connp->conn_rq, M_PCSIG, 3428 SIGURG)) { 3429 /* Try again on the rexmit. */ 3430 freemsg(mp1); 3431 freemsg(mp); 3432 return; 3433 } 3434 /* 3435 * Mark with NOTMARKNEXT for now. 3436 * The code below will change this to MARKNEXT 3437 * if we are at the mark. 3438 * 3439 * If there are allocation failures (e.g. in 3440 * dupmsg below) the next time tcp_input_data 3441 * sees the urgent segment it will send up the 3442 * MSGMARKNEXT message. 3443 */ 3444 mp1->b_flag |= MSGNOTMARKNEXT; 3445 freemsg(tcp->tcp_urp_mark_mp); 3446 tcp->tcp_urp_mark_mp = mp1; 3447 flags |= TH_SEND_URP_MARK; 3448 #ifdef DEBUG 3449 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3450 "tcp_rput: sent M_PCSIG 2 seq %x urp %x " 3451 "last %x, %s", 3452 seg_seq, urp, tcp->tcp_urp_last, 3453 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 3454 #endif /* DEBUG */ 3455 } 3456 tcp->tcp_urp_last_valid = B_TRUE; 3457 tcp->tcp_urp_last = urp + seg_seq; 3458 } else if (tcp->tcp_urp_mark_mp != NULL) { 3459 /* 3460 * An allocation failure prevented the previous 3461 * tcp_input_data from sending up the allocated 3462 * MSG*MARKNEXT message - send it up this time 3463 * around. 3464 */ 3465 flags |= TH_SEND_URP_MARK; 3466 } 3467 3468 /* 3469 * If the urgent byte is in this segment, make sure that it is 3470 * all by itself. This makes it much easier to deal with the 3471 * possibility of an allocation failure on the T_exdata_ind. 3472 * Note that seg_len is the number of bytes in the segment, and 3473 * urp is the offset into the segment of the urgent byte. 3474 * urp < seg_len means that the urgent byte is in this segment. 3475 */ 3476 if (urp < seg_len) { 3477 if (seg_len != 1) { 3478 uint32_t tmp_rnxt; 3479 /* 3480 * Break it up and feed it back in. 3481 * Re-attach the IP header. 3482 */ 3483 mp->b_rptr = iphdr; 3484 if (urp > 0) { 3485 /* 3486 * There is stuff before the urgent 3487 * byte. 3488 */ 3489 mp1 = dupmsg(mp); 3490 if (!mp1) { 3491 /* 3492 * Trim from urgent byte on. 3493 * The rest will come back. 3494 */ 3495 (void) adjmsg(mp, 3496 urp - seg_len); 3497 tcp_input_data(connp, 3498 mp, NULL, ira); 3499 return; 3500 } 3501 (void) adjmsg(mp1, urp - seg_len); 3502 /* Feed this piece back in. */ 3503 tmp_rnxt = tcp->tcp_rnxt; 3504 tcp_input_data(connp, mp1, NULL, ira); 3505 /* 3506 * If the data passed back in was not 3507 * processed (ie: bad ACK) sending 3508 * the remainder back in will cause a 3509 * loop. In this case, drop the 3510 * packet and let the sender try 3511 * sending a good packet. 3512 */ 3513 if (tmp_rnxt == tcp->tcp_rnxt) { 3514 freemsg(mp); 3515 return; 3516 } 3517 } 3518 if (urp != seg_len - 1) { 3519 uint32_t tmp_rnxt; 3520 /* 3521 * There is stuff after the urgent 3522 * byte. 3523 */ 3524 mp1 = dupmsg(mp); 3525 if (!mp1) { 3526 /* 3527 * Trim everything beyond the 3528 * urgent byte. The rest will 3529 * come back. 3530 */ 3531 (void) adjmsg(mp, 3532 urp + 1 - seg_len); 3533 tcp_input_data(connp, 3534 mp, NULL, ira); 3535 return; 3536 } 3537 (void) adjmsg(mp1, urp + 1 - seg_len); 3538 tmp_rnxt = tcp->tcp_rnxt; 3539 tcp_input_data(connp, mp1, NULL, ira); 3540 /* 3541 * If the data passed back in was not 3542 * processed (ie: bad ACK) sending 3543 * the remainder back in will cause a 3544 * loop. In this case, drop the 3545 * packet and let the sender try 3546 * sending a good packet. 3547 */ 3548 if (tmp_rnxt == tcp->tcp_rnxt) { 3549 freemsg(mp); 3550 return; 3551 } 3552 } 3553 tcp_input_data(connp, mp, NULL, ira); 3554 return; 3555 } 3556 /* 3557 * This segment contains only the urgent byte. We 3558 * have to allocate the T_exdata_ind, if we can. 3559 */ 3560 if (IPCL_IS_NONSTR(connp)) { 3561 int error; 3562 3563 (*sockupcalls->su_recv) 3564 (connp->conn_upper_handle, mp, seg_len, 3565 MSG_OOB, &error, NULL); 3566 /* 3567 * We should never be in middle of a 3568 * fallback, the squeue guarantees that. 3569 */ 3570 ASSERT(error != EOPNOTSUPP); 3571 mp = NULL; 3572 goto update_ack; 3573 } else if (!tcp->tcp_urp_mp) { 3574 struct T_exdata_ind *tei; 3575 mp1 = allocb(sizeof (struct T_exdata_ind), 3576 BPRI_MED); 3577 if (!mp1) { 3578 /* 3579 * Sigh... It'll be back. 3580 * Generate any MSG*MARK message now. 3581 */ 3582 freemsg(mp); 3583 seg_len = 0; 3584 if (flags & TH_SEND_URP_MARK) { 3585 3586 3587 ASSERT(tcp->tcp_urp_mark_mp); 3588 tcp->tcp_urp_mark_mp->b_flag &= 3589 ~MSGNOTMARKNEXT; 3590 tcp->tcp_urp_mark_mp->b_flag |= 3591 MSGMARKNEXT; 3592 } 3593 goto ack_check; 3594 } 3595 mp1->b_datap->db_type = M_PROTO; 3596 tei = (struct T_exdata_ind *)mp1->b_rptr; 3597 tei->PRIM_type = T_EXDATA_IND; 3598 tei->MORE_flag = 0; 3599 mp1->b_wptr = (uchar_t *)&tei[1]; 3600 tcp->tcp_urp_mp = mp1; 3601 #ifdef DEBUG 3602 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3603 "tcp_rput: allocated exdata_ind %s", 3604 tcp_display(tcp, NULL, 3605 DISP_PORT_ONLY)); 3606 #endif /* DEBUG */ 3607 /* 3608 * There is no need to send a separate MSG*MARK 3609 * message since the T_EXDATA_IND will be sent 3610 * now. 3611 */ 3612 flags &= ~TH_SEND_URP_MARK; 3613 freemsg(tcp->tcp_urp_mark_mp); 3614 tcp->tcp_urp_mark_mp = NULL; 3615 } 3616 /* 3617 * Now we are all set. On the next putnext upstream, 3618 * tcp_urp_mp will be non-NULL and will get prepended 3619 * to what has to be this piece containing the urgent 3620 * byte. If for any reason we abort this segment below, 3621 * if it comes back, we will have this ready, or it 3622 * will get blown off in close. 3623 */ 3624 } else if (urp == seg_len) { 3625 /* 3626 * The urgent byte is the next byte after this sequence 3627 * number. If this endpoint is non-STREAMS, then there 3628 * is nothing to do here since the socket has already 3629 * been notified about the urg pointer by the 3630 * su_signal_oob call above. 3631 * 3632 * In case of STREAMS, some more work might be needed. 3633 * If there is data it is marked with MSGMARKNEXT and 3634 * and any tcp_urp_mark_mp is discarded since it is not 3635 * needed. Otherwise, if the code above just allocated 3636 * a zero-length tcp_urp_mark_mp message, that message 3637 * is tagged with MSGMARKNEXT. Sending up these 3638 * MSGMARKNEXT messages makes SIOCATMARK work correctly 3639 * even though the T_EXDATA_IND will not be sent up 3640 * until the urgent byte arrives. 3641 */ 3642 if (!IPCL_IS_NONSTR(tcp->tcp_connp)) { 3643 if (seg_len != 0) { 3644 flags |= TH_MARKNEXT_NEEDED; 3645 freemsg(tcp->tcp_urp_mark_mp); 3646 tcp->tcp_urp_mark_mp = NULL; 3647 flags &= ~TH_SEND_URP_MARK; 3648 } else if (tcp->tcp_urp_mark_mp != NULL) { 3649 flags |= TH_SEND_URP_MARK; 3650 tcp->tcp_urp_mark_mp->b_flag &= 3651 ~MSGNOTMARKNEXT; 3652 tcp->tcp_urp_mark_mp->b_flag |= 3653 MSGMARKNEXT; 3654 } 3655 } 3656 #ifdef DEBUG 3657 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3658 "tcp_rput: AT MARK, len %d, flags 0x%x, %s", 3659 seg_len, flags, 3660 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 3661 #endif /* DEBUG */ 3662 } 3663 #ifdef DEBUG 3664 else { 3665 /* Data left until we hit mark */ 3666 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 3667 "tcp_rput: URP %d bytes left, %s", 3668 urp - seg_len, tcp_display(tcp, NULL, 3669 DISP_PORT_ONLY)); 3670 } 3671 #endif /* DEBUG */ 3672 } 3673 3674 process_ack: 3675 if (!(flags & TH_ACK)) { 3676 freemsg(mp); 3677 goto xmit_check; 3678 } 3679 } 3680 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 3681 3682 if (bytes_acked > 0) 3683 tcp->tcp_ip_forward_progress = B_TRUE; 3684 if (tcp->tcp_state == TCPS_SYN_RCVD) { 3685 /* 3686 * tcp_sendmsg() checks tcp_state without entering 3687 * the squeue so tcp_state should be updated before 3688 * sending up a connection confirmation or a new 3689 * connection indication. 3690 */ 3691 tcp->tcp_state = TCPS_ESTABLISHED; 3692 3693 /* 3694 * We are seeing the final ack in the three way 3695 * hand shake of a active open'ed connection 3696 * so we must send up a T_CONN_CON 3697 */ 3698 if (tcp->tcp_active_open) { 3699 if (!tcp_conn_con(tcp, iphdr, mp, NULL, ira)) { 3700 freemsg(mp); 3701 tcp->tcp_state = TCPS_SYN_RCVD; 3702 return; 3703 } 3704 /* 3705 * Don't fuse the loopback endpoints for 3706 * simultaneous active opens. 3707 */ 3708 if (tcp->tcp_loopback) { 3709 TCP_STAT(tcps, tcp_fusion_unfusable); 3710 tcp->tcp_unfusable = B_TRUE; 3711 } 3712 /* 3713 * For simultaneous active open, trace receipt of final 3714 * ACK as tcp:::connect-established. 3715 */ 3716 DTRACE_TCP5(connect__established, mblk_t *, NULL, 3717 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3718 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3719 } else if (IPCL_IS_NONSTR(connp)) { 3720 /* 3721 * 3-way handshake has completed, so notify socket 3722 * of the new connection. 3723 * 3724 * We are here means eager is fine but it can 3725 * get a TH_RST at any point between now and till 3726 * accept completes and disappear. We need to 3727 * ensure that reference to eager is valid after 3728 * we get out of eager's perimeter. So we do 3729 * an extra refhold. 3730 */ 3731 CONN_INC_REF(connp); 3732 3733 if (!tcp_newconn_notify(tcp, ira)) { 3734 /* 3735 * The state-change probe for SYN_RCVD -> 3736 * ESTABLISHED has not fired yet. We reset 3737 * the state to SYN_RCVD so that future 3738 * state-change probes report correct state 3739 * transistions. 3740 */ 3741 tcp->tcp_state = TCPS_SYN_RCVD; 3742 freemsg(mp); 3743 /* notification did not go up, so drop ref */ 3744 CONN_DEC_REF(connp); 3745 /* ... and close the eager */ 3746 ASSERT(TCP_IS_DETACHED(tcp)); 3747 (void) tcp_close_detached(tcp); 3748 return; 3749 } 3750 /* 3751 * For passive open, trace receipt of final ACK as 3752 * tcp:::accept-established. 3753 */ 3754 DTRACE_TCP5(accept__established, mlbk_t *, NULL, 3755 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3756 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3757 } else { 3758 /* 3759 * 3-way handshake complete - this is a STREAMS based 3760 * socket, so pass up the T_CONN_IND. 3761 */ 3762 tcp_t *listener = tcp->tcp_listener; 3763 mblk_t *mp = tcp->tcp_conn.tcp_eager_conn_ind; 3764 3765 tcp->tcp_tconnind_started = B_TRUE; 3766 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 3767 ASSERT(mp != NULL); 3768 /* 3769 * We are here means eager is fine but it can 3770 * get a TH_RST at any point between now and till 3771 * accept completes and disappear. We need to 3772 * ensure that reference to eager is valid after 3773 * we get out of eager's perimeter. So we do 3774 * an extra refhold. 3775 */ 3776 CONN_INC_REF(connp); 3777 3778 /* 3779 * The listener also exists because of the refhold 3780 * done in tcp_input_listener. Its possible that it 3781 * might have closed. We will check that once we 3782 * get inside listeners context. 3783 */ 3784 CONN_INC_REF(listener->tcp_connp); 3785 if (listener->tcp_connp->conn_sqp == 3786 connp->conn_sqp) { 3787 /* 3788 * We optimize by not calling an SQUEUE_ENTER 3789 * on the listener since we know that the 3790 * listener and eager squeues are the same. 3791 * We are able to make this check safely only 3792 * because neither the eager nor the listener 3793 * can change its squeue. Only an active connect 3794 * can change its squeue 3795 */ 3796 tcp_send_conn_ind(listener->tcp_connp, mp, 3797 listener->tcp_connp->conn_sqp); 3798 CONN_DEC_REF(listener->tcp_connp); 3799 } else if (!tcp->tcp_loopback) { 3800 SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp, 3801 mp, tcp_send_conn_ind, 3802 listener->tcp_connp, NULL, SQ_FILL, 3803 SQTAG_TCP_CONN_IND); 3804 } else { 3805 SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp, 3806 mp, tcp_send_conn_ind, 3807 listener->tcp_connp, NULL, SQ_NODRAIN, 3808 SQTAG_TCP_CONN_IND); 3809 } 3810 /* 3811 * For passive open, trace receipt of final ACK as 3812 * tcp:::accept-established. 3813 */ 3814 DTRACE_TCP5(accept__established, mlbk_t *, NULL, 3815 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *, 3816 iphdr, tcp_t *, tcp, tcph_t *, tcpha); 3817 } 3818 TCPS_CONN_INC(tcps); 3819 3820 tcp->tcp_suna = tcp->tcp_iss + 1; /* One for the SYN */ 3821 bytes_acked--; 3822 /* SYN was acked - making progress */ 3823 tcp->tcp_ip_forward_progress = B_TRUE; 3824 3825 /* 3826 * If SYN was retransmitted, need to reset all 3827 * retransmission info as this segment will be 3828 * treated as a dup ACK. 3829 */ 3830 if (tcp->tcp_rexmit) { 3831 tcp->tcp_rexmit = B_FALSE; 3832 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 3833 tcp->tcp_rexmit_max = tcp->tcp_snxt; 3834 tcp->tcp_snd_burst = tcp->tcp_localnet ? 3835 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 3836 tcp->tcp_ms_we_have_waited = 0; 3837 tcp->tcp_cwnd = mss; 3838 } 3839 3840 /* 3841 * We set the send window to zero here. 3842 * This is needed if there is data to be 3843 * processed already on the queue. 3844 * Later (at swnd_update label), the 3845 * "new_swnd > tcp_swnd" condition is satisfied 3846 * the XMIT_NEEDED flag is set in the current 3847 * (SYN_RCVD) state. This ensures tcp_wput_data() is 3848 * called if there is already data on queue in 3849 * this state. 3850 */ 3851 tcp->tcp_swnd = 0; 3852 3853 if (new_swnd > tcp->tcp_max_swnd) 3854 tcp->tcp_max_swnd = new_swnd; 3855 tcp->tcp_swl1 = seg_seq; 3856 tcp->tcp_swl2 = seg_ack; 3857 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 3858 3859 /* Trace change from SYN_RCVD -> ESTABLISHED here */ 3860 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *, 3861 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL, 3862 int32_t, TCPS_SYN_RCVD); 3863 3864 /* Fuse when both sides are in ESTABLISHED state */ 3865 if (tcp->tcp_loopback && do_tcp_fusion) 3866 tcp_fuse(tcp, iphdr, tcpha); 3867 3868 } 3869 /* This code follows 4.4BSD-Lite2 mostly. */ 3870 if (bytes_acked < 0) 3871 goto est; 3872 3873 /* 3874 * If TCP is ECN capable and the congestion experience bit is 3875 * set, reduce tcp_cwnd and tcp_ssthresh. But this should only be 3876 * done once per window (or more loosely, per RTT). 3877 */ 3878 if (tcp->tcp_cwr && SEQ_GT(seg_ack, tcp->tcp_cwr_snd_max)) 3879 tcp->tcp_cwr = B_FALSE; 3880 if (tcp->tcp_ecn_ok && (flags & TH_ECE)) { 3881 if (!tcp->tcp_cwr) { 3882 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / mss; 3883 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * mss; 3884 tcp->tcp_cwnd = npkt * mss; 3885 /* 3886 * If the cwnd is 0, use the timer to clock out 3887 * new segments. This is required by the ECN spec. 3888 */ 3889 if (npkt == 0) { 3890 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 3891 /* 3892 * This makes sure that when the ACK comes 3893 * back, we will increase tcp_cwnd by 1 MSS. 3894 */ 3895 tcp->tcp_cwnd_cnt = 0; 3896 } 3897 tcp->tcp_cwr = B_TRUE; 3898 /* 3899 * This marks the end of the current window of in 3900 * flight data. That is why we don't use 3901 * tcp_suna + tcp_swnd. Only data in flight can 3902 * provide ECN info. 3903 */ 3904 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 3905 tcp->tcp_ecn_cwr_sent = B_FALSE; 3906 } 3907 } 3908 3909 mp1 = tcp->tcp_xmit_head; 3910 if (bytes_acked == 0) { 3911 if (!ofo_seg && seg_len == 0 && new_swnd == tcp->tcp_swnd) { 3912 int dupack_cnt; 3913 3914 TCPS_BUMP_MIB(tcps, tcpInDupAck); 3915 /* 3916 * Fast retransmit. When we have seen exactly three 3917 * identical ACKs while we have unacked data 3918 * outstanding we take it as a hint that our peer 3919 * dropped something. 3920 * 3921 * If TCP is retransmitting, don't do fast retransmit. 3922 */ 3923 if (mp1 && tcp->tcp_suna != tcp->tcp_snxt && 3924 ! tcp->tcp_rexmit) { 3925 /* Do Limited Transmit */ 3926 if ((dupack_cnt = ++tcp->tcp_dupack_cnt) < 3927 tcps->tcps_dupack_fast_retransmit) { 3928 /* 3929 * RFC 3042 3930 * 3931 * What we need to do is temporarily 3932 * increase tcp_cwnd so that new 3933 * data can be sent if it is allowed 3934 * by the receive window (tcp_rwnd). 3935 * tcp_wput_data() will take care of 3936 * the rest. 3937 * 3938 * If the connection is SACK capable, 3939 * only do limited xmit when there 3940 * is SACK info. 3941 * 3942 * Note how tcp_cwnd is incremented. 3943 * The first dup ACK will increase 3944 * it by 1 MSS. The second dup ACK 3945 * will increase it by 2 MSS. This 3946 * means that only 1 new segment will 3947 * be sent for each dup ACK. 3948 */ 3949 if (tcp->tcp_unsent > 0 && 3950 (!tcp->tcp_snd_sack_ok || 3951 (tcp->tcp_snd_sack_ok && 3952 tcp->tcp_notsack_list != NULL))) { 3953 tcp->tcp_cwnd += mss << 3954 (tcp->tcp_dupack_cnt - 1); 3955 flags |= TH_LIMIT_XMIT; 3956 } 3957 } else if (dupack_cnt == 3958 tcps->tcps_dupack_fast_retransmit) { 3959 3960 /* 3961 * If we have reduced tcp_ssthresh 3962 * because of ECN, do not reduce it again 3963 * unless it is already one window of data 3964 * away. After one window of data, tcp_cwr 3965 * should then be cleared. Note that 3966 * for non ECN capable connection, tcp_cwr 3967 * should always be false. 3968 * 3969 * Adjust cwnd since the duplicate 3970 * ack indicates that a packet was 3971 * dropped (due to congestion.) 3972 */ 3973 if (!tcp->tcp_cwr) { 3974 npkt = ((tcp->tcp_snxt - 3975 tcp->tcp_suna) >> 1) / mss; 3976 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 3977 mss; 3978 tcp->tcp_cwnd = (npkt + 3979 tcp->tcp_dupack_cnt) * mss; 3980 } 3981 if (tcp->tcp_ecn_ok) { 3982 tcp->tcp_cwr = B_TRUE; 3983 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 3984 tcp->tcp_ecn_cwr_sent = B_FALSE; 3985 } 3986 3987 /* 3988 * We do Hoe's algorithm. Refer to her 3989 * paper "Improving the Start-up Behavior 3990 * of a Congestion Control Scheme for TCP," 3991 * appeared in SIGCOMM'96. 3992 * 3993 * Save highest seq no we have sent so far. 3994 * Be careful about the invisible FIN byte. 3995 */ 3996 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 3997 (tcp->tcp_unsent == 0)) { 3998 tcp->tcp_rexmit_max = tcp->tcp_fss; 3999 } else { 4000 tcp->tcp_rexmit_max = tcp->tcp_snxt; 4001 } 4002 4003 /* 4004 * Do not allow bursty traffic during. 4005 * fast recovery. Refer to Fall and Floyd's 4006 * paper "Simulation-based Comparisons of 4007 * Tahoe, Reno and SACK TCP" (in CCR?) 4008 * This is a best current practise. 4009 */ 4010 tcp->tcp_snd_burst = TCP_CWND_SS; 4011 4012 /* 4013 * For SACK: 4014 * Calculate tcp_pipe, which is the 4015 * estimated number of bytes in 4016 * network. 4017 * 4018 * tcp_fack is the highest sack'ed seq num 4019 * TCP has received. 4020 * 4021 * tcp_pipe is explained in the above quoted 4022 * Fall and Floyd's paper. tcp_fack is 4023 * explained in Mathis and Mahdavi's 4024 * "Forward Acknowledgment: Refining TCP 4025 * Congestion Control" in SIGCOMM '96. 4026 */ 4027 if (tcp->tcp_snd_sack_ok) { 4028 if (tcp->tcp_notsack_list != NULL) { 4029 tcp->tcp_pipe = tcp->tcp_snxt - 4030 tcp->tcp_fack; 4031 tcp->tcp_sack_snxt = seg_ack; 4032 flags |= TH_NEED_SACK_REXMIT; 4033 } else { 4034 /* 4035 * Always initialize tcp_pipe 4036 * even though we don't have 4037 * any SACK info. If later 4038 * we get SACK info and 4039 * tcp_pipe is not initialized, 4040 * funny things will happen. 4041 */ 4042 tcp->tcp_pipe = 4043 tcp->tcp_cwnd_ssthresh; 4044 } 4045 } else { 4046 flags |= TH_REXMIT_NEEDED; 4047 } /* tcp_snd_sack_ok */ 4048 4049 } else { 4050 /* 4051 * Here we perform congestion 4052 * avoidance, but NOT slow start. 4053 * This is known as the Fast 4054 * Recovery Algorithm. 4055 */ 4056 if (tcp->tcp_snd_sack_ok && 4057 tcp->tcp_notsack_list != NULL) { 4058 flags |= TH_NEED_SACK_REXMIT; 4059 tcp->tcp_pipe -= mss; 4060 if (tcp->tcp_pipe < 0) 4061 tcp->tcp_pipe = 0; 4062 } else { 4063 /* 4064 * We know that one more packet has 4065 * left the pipe thus we can update 4066 * cwnd. 4067 */ 4068 cwnd = tcp->tcp_cwnd + mss; 4069 if (cwnd > tcp->tcp_cwnd_max) 4070 cwnd = tcp->tcp_cwnd_max; 4071 tcp->tcp_cwnd = cwnd; 4072 if (tcp->tcp_unsent > 0) 4073 flags |= TH_XMIT_NEEDED; 4074 } 4075 } 4076 } 4077 } else if (tcp->tcp_zero_win_probe) { 4078 /* 4079 * If the window has opened, need to arrange 4080 * to send additional data. 4081 */ 4082 if (new_swnd != 0) { 4083 /* tcp_suna != tcp_snxt */ 4084 /* Packet contains a window update */ 4085 TCPS_BUMP_MIB(tcps, tcpInWinUpdate); 4086 tcp->tcp_zero_win_probe = 0; 4087 tcp->tcp_timer_backoff = 0; 4088 tcp->tcp_ms_we_have_waited = 0; 4089 4090 /* 4091 * Transmit starting with tcp_suna since 4092 * the one byte probe is not ack'ed. 4093 * If TCP has sent more than one identical 4094 * probe, tcp_rexmit will be set. That means 4095 * tcp_ss_rexmit() will send out the one 4096 * byte along with new data. Otherwise, 4097 * fake the retransmission. 4098 */ 4099 flags |= TH_XMIT_NEEDED; 4100 if (!tcp->tcp_rexmit) { 4101 tcp->tcp_rexmit = B_TRUE; 4102 tcp->tcp_dupack_cnt = 0; 4103 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 4104 tcp->tcp_rexmit_max = tcp->tcp_suna + 1; 4105 } 4106 } 4107 } 4108 goto swnd_update; 4109 } 4110 4111 /* 4112 * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73. 4113 * If the ACK value acks something that we have not yet sent, it might 4114 * be an old duplicate segment. Send an ACK to re-synchronize the 4115 * other side. 4116 * Note: reset in response to unacceptable ACK in SYN_RECEIVE 4117 * state is handled above, so we can always just drop the segment and 4118 * send an ACK here. 4119 * 4120 * In the case where the peer shrinks the window, we see the new window 4121 * update, but all the data sent previously is queued up by the peer. 4122 * To account for this, in tcp_process_shrunk_swnd(), the sequence 4123 * number, which was already sent, and within window, is recorded. 4124 * tcp_snxt is then updated. 4125 * 4126 * If the window has previously shrunk, and an ACK for data not yet 4127 * sent, according to tcp_snxt is recieved, it may still be valid. If 4128 * the ACK is for data within the window at the time the window was 4129 * shrunk, then the ACK is acceptable. In this case tcp_snxt is set to 4130 * the sequence number ACK'ed. 4131 * 4132 * If the ACK covers all the data sent at the time the window was 4133 * shrunk, we can now set tcp_is_wnd_shrnk to B_FALSE. 4134 * 4135 * Should we send ACKs in response to ACK only segments? 4136 */ 4137 4138 if (SEQ_GT(seg_ack, tcp->tcp_snxt)) { 4139 if ((tcp->tcp_is_wnd_shrnk) && 4140 (SEQ_LEQ(seg_ack, tcp->tcp_snxt_shrunk))) { 4141 uint32_t data_acked_ahead_snxt; 4142 4143 data_acked_ahead_snxt = seg_ack - tcp->tcp_snxt; 4144 tcp_update_xmit_tail(tcp, seg_ack); 4145 tcp->tcp_unsent -= data_acked_ahead_snxt; 4146 } else { 4147 TCPS_BUMP_MIB(tcps, tcpInAckUnsent); 4148 /* drop the received segment */ 4149 freemsg(mp); 4150 4151 /* 4152 * Send back an ACK. If tcp_drop_ack_unsent_cnt is 4153 * greater than 0, check if the number of such 4154 * bogus ACks is greater than that count. If yes, 4155 * don't send back any ACK. This prevents TCP from 4156 * getting into an ACK storm if somehow an attacker 4157 * successfully spoofs an acceptable segment to our 4158 * peer. If this continues (count > 2 X threshold), 4159 * we should abort this connection. 4160 */ 4161 if (tcp_drop_ack_unsent_cnt > 0 && 4162 ++tcp->tcp_in_ack_unsent > 4163 tcp_drop_ack_unsent_cnt) { 4164 TCP_STAT(tcps, tcp_in_ack_unsent_drop); 4165 if (tcp->tcp_in_ack_unsent > 2 * 4166 tcp_drop_ack_unsent_cnt) { 4167 (void) tcp_clean_death(tcp, EPROTO); 4168 } 4169 return; 4170 } 4171 mp = tcp_ack_mp(tcp); 4172 if (mp != NULL) { 4173 BUMP_LOCAL(tcp->tcp_obsegs); 4174 TCPS_BUMP_MIB(tcps, tcpOutAck); 4175 tcp_send_data(tcp, mp); 4176 } 4177 return; 4178 } 4179 } else if (tcp->tcp_is_wnd_shrnk && SEQ_GEQ(seg_ack, 4180 tcp->tcp_snxt_shrunk)) { 4181 tcp->tcp_is_wnd_shrnk = B_FALSE; 4182 } 4183 4184 /* 4185 * TCP gets a new ACK, update the notsack'ed list to delete those 4186 * blocks that are covered by this ACK. 4187 */ 4188 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 4189 tcp_notsack_remove(&(tcp->tcp_notsack_list), seg_ack, 4190 &(tcp->tcp_num_notsack_blk), &(tcp->tcp_cnt_notsack_list)); 4191 } 4192 4193 /* 4194 * If we got an ACK after fast retransmit, check to see 4195 * if it is a partial ACK. If it is not and the congestion 4196 * window was inflated to account for the other side's 4197 * cached packets, retract it. If it is, do Hoe's algorithm. 4198 */ 4199 if (tcp->tcp_dupack_cnt >= tcps->tcps_dupack_fast_retransmit) { 4200 ASSERT(tcp->tcp_rexmit == B_FALSE); 4201 if (SEQ_GEQ(seg_ack, tcp->tcp_rexmit_max)) { 4202 tcp->tcp_dupack_cnt = 0; 4203 /* 4204 * Restore the orig tcp_cwnd_ssthresh after 4205 * fast retransmit phase. 4206 */ 4207 if (tcp->tcp_cwnd > tcp->tcp_cwnd_ssthresh) { 4208 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh; 4209 } 4210 tcp->tcp_rexmit_max = seg_ack; 4211 tcp->tcp_cwnd_cnt = 0; 4212 tcp->tcp_snd_burst = tcp->tcp_localnet ? 4213 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 4214 4215 /* 4216 * Remove all notsack info to avoid confusion with 4217 * the next fast retrasnmit/recovery phase. 4218 */ 4219 if (tcp->tcp_snd_sack_ok) { 4220 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, 4221 tcp); 4222 } 4223 } else { 4224 if (tcp->tcp_snd_sack_ok && 4225 tcp->tcp_notsack_list != NULL) { 4226 flags |= TH_NEED_SACK_REXMIT; 4227 tcp->tcp_pipe -= mss; 4228 if (tcp->tcp_pipe < 0) 4229 tcp->tcp_pipe = 0; 4230 } else { 4231 /* 4232 * Hoe's algorithm: 4233 * 4234 * Retransmit the unack'ed segment and 4235 * restart fast recovery. Note that we 4236 * need to scale back tcp_cwnd to the 4237 * original value when we started fast 4238 * recovery. This is to prevent overly 4239 * aggressive behaviour in sending new 4240 * segments. 4241 */ 4242 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh + 4243 tcps->tcps_dupack_fast_retransmit * mss; 4244 tcp->tcp_cwnd_cnt = tcp->tcp_cwnd; 4245 flags |= TH_REXMIT_NEEDED; 4246 } 4247 } 4248 } else { 4249 tcp->tcp_dupack_cnt = 0; 4250 if (tcp->tcp_rexmit) { 4251 /* 4252 * TCP is retranmitting. If the ACK ack's all 4253 * outstanding data, update tcp_rexmit_max and 4254 * tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt 4255 * to the correct value. 4256 * 4257 * Note that SEQ_LEQ() is used. This is to avoid 4258 * unnecessary fast retransmit caused by dup ACKs 4259 * received when TCP does slow start retransmission 4260 * after a time out. During this phase, TCP may 4261 * send out segments which are already received. 4262 * This causes dup ACKs to be sent back. 4263 */ 4264 if (SEQ_LEQ(seg_ack, tcp->tcp_rexmit_max)) { 4265 if (SEQ_GT(seg_ack, tcp->tcp_rexmit_nxt)) { 4266 tcp->tcp_rexmit_nxt = seg_ack; 4267 } 4268 if (seg_ack != tcp->tcp_rexmit_max) { 4269 flags |= TH_XMIT_NEEDED; 4270 } 4271 } else { 4272 tcp->tcp_rexmit = B_FALSE; 4273 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 4274 tcp->tcp_snd_burst = tcp->tcp_localnet ? 4275 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 4276 } 4277 tcp->tcp_ms_we_have_waited = 0; 4278 } 4279 } 4280 4281 TCPS_BUMP_MIB(tcps, tcpInAckSegs); 4282 TCPS_UPDATE_MIB(tcps, tcpInAckBytes, bytes_acked); 4283 tcp->tcp_suna = seg_ack; 4284 if (tcp->tcp_zero_win_probe != 0) { 4285 tcp->tcp_zero_win_probe = 0; 4286 tcp->tcp_timer_backoff = 0; 4287 } 4288 4289 /* 4290 * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed. 4291 * Note that it cannot be the SYN being ack'ed. The code flow 4292 * will not reach here. 4293 */ 4294 if (mp1 == NULL) { 4295 goto fin_acked; 4296 } 4297 4298 /* 4299 * Update the congestion window. 4300 * 4301 * If TCP is not ECN capable or TCP is ECN capable but the 4302 * congestion experience bit is not set, increase the tcp_cwnd as 4303 * usual. 4304 */ 4305 if (!tcp->tcp_ecn_ok || !(flags & TH_ECE)) { 4306 cwnd = tcp->tcp_cwnd; 4307 add = mss; 4308 4309 if (cwnd >= tcp->tcp_cwnd_ssthresh) { 4310 /* 4311 * This is to prevent an increase of less than 1 MSS of 4312 * tcp_cwnd. With partial increase, tcp_wput_data() 4313 * may send out tinygrams in order to preserve mblk 4314 * boundaries. 4315 * 4316 * By initializing tcp_cwnd_cnt to new tcp_cwnd and 4317 * decrementing it by 1 MSS for every ACKs, tcp_cwnd is 4318 * increased by 1 MSS for every RTTs. 4319 */ 4320 if (tcp->tcp_cwnd_cnt <= 0) { 4321 tcp->tcp_cwnd_cnt = cwnd + add; 4322 } else { 4323 tcp->tcp_cwnd_cnt -= add; 4324 add = 0; 4325 } 4326 } 4327 tcp->tcp_cwnd = MIN(cwnd + add, tcp->tcp_cwnd_max); 4328 } 4329 4330 /* See if the latest urgent data has been acknowledged */ 4331 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && 4332 SEQ_GT(seg_ack, tcp->tcp_urg)) 4333 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 4334 4335 /* Can we update the RTT estimates? */ 4336 if (tcp->tcp_snd_ts_ok) { 4337 /* Ignore zero timestamp echo-reply. */ 4338 if (tcpopt.tcp_opt_ts_ecr != 0) { 4339 tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH - 4340 (int32_t)tcpopt.tcp_opt_ts_ecr); 4341 } 4342 4343 /* If needed, restart the timer. */ 4344 if (tcp->tcp_set_timer == 1) { 4345 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 4346 tcp->tcp_set_timer = 0; 4347 } 4348 /* 4349 * Update tcp_csuna in case the other side stops sending 4350 * us timestamps. 4351 */ 4352 tcp->tcp_csuna = tcp->tcp_snxt; 4353 } else if (SEQ_GT(seg_ack, tcp->tcp_csuna)) { 4354 /* 4355 * An ACK sequence we haven't seen before, so get the RTT 4356 * and update the RTO. But first check if the timestamp is 4357 * valid to use. 4358 */ 4359 if ((mp1->b_next != NULL) && 4360 SEQ_GT(seg_ack, (uint32_t)(uintptr_t)(mp1->b_next))) 4361 tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH - 4362 (int32_t)(intptr_t)mp1->b_prev); 4363 else 4364 TCPS_BUMP_MIB(tcps, tcpRttNoUpdate); 4365 4366 /* Remeber the last sequence to be ACKed */ 4367 tcp->tcp_csuna = seg_ack; 4368 if (tcp->tcp_set_timer == 1) { 4369 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 4370 tcp->tcp_set_timer = 0; 4371 } 4372 } else { 4373 TCPS_BUMP_MIB(tcps, tcpRttNoUpdate); 4374 } 4375 4376 /* Eat acknowledged bytes off the xmit queue. */ 4377 for (;;) { 4378 mblk_t *mp2; 4379 uchar_t *wptr; 4380 4381 wptr = mp1->b_wptr; 4382 ASSERT((uintptr_t)(wptr - mp1->b_rptr) <= (uintptr_t)INT_MAX); 4383 bytes_acked -= (int)(wptr - mp1->b_rptr); 4384 if (bytes_acked < 0) { 4385 mp1->b_rptr = wptr + bytes_acked; 4386 /* 4387 * Set a new timestamp if all the bytes timed by the 4388 * old timestamp have been ack'ed. 4389 */ 4390 if (SEQ_GT(seg_ack, 4391 (uint32_t)(uintptr_t)(mp1->b_next))) { 4392 mp1->b_prev = 4393 (mblk_t *)(uintptr_t)LBOLT_FASTPATH; 4394 mp1->b_next = NULL; 4395 } 4396 break; 4397 } 4398 mp1->b_next = NULL; 4399 mp1->b_prev = NULL; 4400 mp2 = mp1; 4401 mp1 = mp1->b_cont; 4402 4403 /* 4404 * This notification is required for some zero-copy 4405 * clients to maintain a copy semantic. After the data 4406 * is ack'ed, client is safe to modify or reuse the buffer. 4407 */ 4408 if (tcp->tcp_snd_zcopy_aware && 4409 (mp2->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 4410 tcp_zcopy_notify(tcp); 4411 freeb(mp2); 4412 if (bytes_acked == 0) { 4413 if (mp1 == NULL) { 4414 /* Everything is ack'ed, clear the tail. */ 4415 tcp->tcp_xmit_tail = NULL; 4416 /* 4417 * Cancel the timer unless we are still 4418 * waiting for an ACK for the FIN packet. 4419 */ 4420 if (tcp->tcp_timer_tid != 0 && 4421 tcp->tcp_snxt == tcp->tcp_suna) { 4422 (void) TCP_TIMER_CANCEL(tcp, 4423 tcp->tcp_timer_tid); 4424 tcp->tcp_timer_tid = 0; 4425 } 4426 goto pre_swnd_update; 4427 } 4428 if (mp2 != tcp->tcp_xmit_tail) 4429 break; 4430 tcp->tcp_xmit_tail = mp1; 4431 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 4432 (uintptr_t)INT_MAX); 4433 tcp->tcp_xmit_tail_unsent = (int)(mp1->b_wptr - 4434 mp1->b_rptr); 4435 break; 4436 } 4437 if (mp1 == NULL) { 4438 /* 4439 * More was acked but there is nothing more 4440 * outstanding. This means that the FIN was 4441 * just acked or that we're talking to a clown. 4442 */ 4443 fin_acked: 4444 ASSERT(tcp->tcp_fin_sent); 4445 tcp->tcp_xmit_tail = NULL; 4446 if (tcp->tcp_fin_sent) { 4447 /* FIN was acked - making progress */ 4448 if (!tcp->tcp_fin_acked) 4449 tcp->tcp_ip_forward_progress = B_TRUE; 4450 tcp->tcp_fin_acked = B_TRUE; 4451 if (tcp->tcp_linger_tid != 0 && 4452 TCP_TIMER_CANCEL(tcp, 4453 tcp->tcp_linger_tid) >= 0) { 4454 tcp_stop_lingering(tcp); 4455 freemsg(mp); 4456 mp = NULL; 4457 } 4458 } else { 4459 /* 4460 * We should never get here because 4461 * we have already checked that the 4462 * number of bytes ack'ed should be 4463 * smaller than or equal to what we 4464 * have sent so far (it is the 4465 * acceptability check of the ACK). 4466 * We can only get here if the send 4467 * queue is corrupted. 4468 * 4469 * Terminate the connection and 4470 * panic the system. It is better 4471 * for us to panic instead of 4472 * continuing to avoid other disaster. 4473 */ 4474 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 4475 tcp->tcp_rnxt, TH_RST|TH_ACK); 4476 panic("Memory corruption " 4477 "detected for connection %s.", 4478 tcp_display(tcp, NULL, 4479 DISP_ADDR_AND_PORT)); 4480 /*NOTREACHED*/ 4481 } 4482 goto pre_swnd_update; 4483 } 4484 ASSERT(mp2 != tcp->tcp_xmit_tail); 4485 } 4486 if (tcp->tcp_unsent) { 4487 flags |= TH_XMIT_NEEDED; 4488 } 4489 pre_swnd_update: 4490 tcp->tcp_xmit_head = mp1; 4491 swnd_update: 4492 /* 4493 * The following check is different from most other implementations. 4494 * For bi-directional transfer, when segments are dropped, the 4495 * "normal" check will not accept a window update in those 4496 * retransmitted segemnts. Failing to do that, TCP may send out 4497 * segments which are outside receiver's window. As TCP accepts 4498 * the ack in those retransmitted segments, if the window update in 4499 * the same segment is not accepted, TCP will incorrectly calculates 4500 * that it can send more segments. This can create a deadlock 4501 * with the receiver if its window becomes zero. 4502 */ 4503 if (SEQ_LT(tcp->tcp_swl2, seg_ack) || 4504 SEQ_LT(tcp->tcp_swl1, seg_seq) || 4505 (tcp->tcp_swl1 == seg_seq && new_swnd > tcp->tcp_swnd)) { 4506 /* 4507 * The criteria for update is: 4508 * 4509 * 1. the segment acknowledges some data. Or 4510 * 2. the segment is new, i.e. it has a higher seq num. Or 4511 * 3. the segment is not old and the advertised window is 4512 * larger than the previous advertised window. 4513 */ 4514 if (tcp->tcp_unsent && new_swnd > tcp->tcp_swnd) 4515 flags |= TH_XMIT_NEEDED; 4516 tcp->tcp_swnd = new_swnd; 4517 if (new_swnd > tcp->tcp_max_swnd) 4518 tcp->tcp_max_swnd = new_swnd; 4519 tcp->tcp_swl1 = seg_seq; 4520 tcp->tcp_swl2 = seg_ack; 4521 } 4522 est: 4523 if (tcp->tcp_state > TCPS_ESTABLISHED) { 4524 4525 switch (tcp->tcp_state) { 4526 case TCPS_FIN_WAIT_1: 4527 if (tcp->tcp_fin_acked) { 4528 tcp->tcp_state = TCPS_FIN_WAIT_2; 4529 DTRACE_TCP6(state__change, void, NULL, 4530 ip_xmit_attr_t *, connp->conn_ixa, 4531 void, NULL, tcp_t *, tcp, void, NULL, 4532 int32_t, TCPS_FIN_WAIT_1); 4533 /* 4534 * We implement the non-standard BSD/SunOS 4535 * FIN_WAIT_2 flushing algorithm. 4536 * If there is no user attached to this 4537 * TCP endpoint, then this TCP struct 4538 * could hang around forever in FIN_WAIT_2 4539 * state if the peer forgets to send us 4540 * a FIN. To prevent this, we wait only 4541 * 2*MSL (a convenient time value) for 4542 * the FIN to arrive. If it doesn't show up, 4543 * we flush the TCP endpoint. This algorithm, 4544 * though a violation of RFC-793, has worked 4545 * for over 10 years in BSD systems. 4546 * Note: SunOS 4.x waits 675 seconds before 4547 * flushing the FIN_WAIT_2 connection. 4548 */ 4549 TCP_TIMER_RESTART(tcp, 4550 tcp->tcp_fin_wait_2_flush_interval); 4551 } 4552 break; 4553 case TCPS_FIN_WAIT_2: 4554 break; /* Shutdown hook? */ 4555 case TCPS_LAST_ACK: 4556 freemsg(mp); 4557 if (tcp->tcp_fin_acked) { 4558 (void) tcp_clean_death(tcp, 0); 4559 return; 4560 } 4561 goto xmit_check; 4562 case TCPS_CLOSING: 4563 if (tcp->tcp_fin_acked) { 4564 SET_TIME_WAIT(tcps, tcp, connp); 4565 DTRACE_TCP6(state__change, void, NULL, 4566 ip_xmit_attr_t *, connp->conn_ixa, void, 4567 NULL, tcp_t *, tcp, void, NULL, int32_t, 4568 TCPS_CLOSING); 4569 } 4570 /*FALLTHRU*/ 4571 case TCPS_CLOSE_WAIT: 4572 freemsg(mp); 4573 goto xmit_check; 4574 default: 4575 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 4576 break; 4577 } 4578 } 4579 if (flags & TH_FIN) { 4580 /* Make sure we ack the fin */ 4581 flags |= TH_ACK_NEEDED; 4582 if (!tcp->tcp_fin_rcvd) { 4583 tcp->tcp_fin_rcvd = B_TRUE; 4584 tcp->tcp_rnxt++; 4585 tcpha = tcp->tcp_tcpha; 4586 tcpha->tha_ack = htonl(tcp->tcp_rnxt); 4587 4588 /* 4589 * Generate the ordrel_ind at the end unless the 4590 * conn is detached or it is a STREAMS based eager. 4591 * In the eager case we defer the notification until 4592 * tcp_accept_finish has run. 4593 */ 4594 if (!TCP_IS_DETACHED(tcp) && (IPCL_IS_NONSTR(connp) || 4595 (tcp->tcp_listener == NULL && 4596 !tcp->tcp_hard_binding))) 4597 flags |= TH_ORDREL_NEEDED; 4598 switch (tcp->tcp_state) { 4599 case TCPS_SYN_RCVD: 4600 tcp->tcp_state = TCPS_CLOSE_WAIT; 4601 DTRACE_TCP6(state__change, void, NULL, 4602 ip_xmit_attr_t *, connp->conn_ixa, 4603 void, NULL, tcp_t *, tcp, void, NULL, 4604 int32_t, TCPS_SYN_RCVD); 4605 /* Keepalive? */ 4606 break; 4607 case TCPS_ESTABLISHED: 4608 tcp->tcp_state = TCPS_CLOSE_WAIT; 4609 DTRACE_TCP6(state__change, void, NULL, 4610 ip_xmit_attr_t *, connp->conn_ixa, 4611 void, NULL, tcp_t *, tcp, void, NULL, 4612 int32_t, TCPS_ESTABLISHED); 4613 /* Keepalive? */ 4614 break; 4615 case TCPS_FIN_WAIT_1: 4616 if (!tcp->tcp_fin_acked) { 4617 tcp->tcp_state = TCPS_CLOSING; 4618 DTRACE_TCP6(state__change, void, NULL, 4619 ip_xmit_attr_t *, connp->conn_ixa, 4620 void, NULL, tcp_t *, tcp, void, 4621 NULL, int32_t, TCPS_FIN_WAIT_1); 4622 break; 4623 } 4624 /* FALLTHRU */ 4625 case TCPS_FIN_WAIT_2: 4626 SET_TIME_WAIT(tcps, tcp, connp); 4627 DTRACE_TCP6(state__change, void, NULL, 4628 ip_xmit_attr_t *, connp->conn_ixa, void, 4629 NULL, tcp_t *, tcp, void, NULL, int32_t, 4630 TCPS_FIN_WAIT_2); 4631 if (seg_len) { 4632 /* 4633 * implies data piggybacked on FIN. 4634 * break to handle data. 4635 */ 4636 break; 4637 } 4638 freemsg(mp); 4639 goto ack_check; 4640 } 4641 } 4642 } 4643 if (mp == NULL) 4644 goto xmit_check; 4645 if (seg_len == 0) { 4646 freemsg(mp); 4647 goto xmit_check; 4648 } 4649 if (mp->b_rptr == mp->b_wptr) { 4650 /* 4651 * The header has been consumed, so we remove the 4652 * zero-length mblk here. 4653 */ 4654 mp1 = mp; 4655 mp = mp->b_cont; 4656 freeb(mp1); 4657 } 4658 update_ack: 4659 tcpha = tcp->tcp_tcpha; 4660 tcp->tcp_rack_cnt++; 4661 { 4662 uint32_t cur_max; 4663 4664 cur_max = tcp->tcp_rack_cur_max; 4665 if (tcp->tcp_rack_cnt >= cur_max) { 4666 /* 4667 * We have more unacked data than we should - send 4668 * an ACK now. 4669 */ 4670 flags |= TH_ACK_NEEDED; 4671 cur_max++; 4672 if (cur_max > tcp->tcp_rack_abs_max) 4673 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 4674 else 4675 tcp->tcp_rack_cur_max = cur_max; 4676 } else if (TCP_IS_DETACHED(tcp)) { 4677 /* We don't have an ACK timer for detached TCP. */ 4678 flags |= TH_ACK_NEEDED; 4679 } else if (seg_len < mss) { 4680 /* 4681 * If we get a segment that is less than an mss, and we 4682 * already have unacknowledged data, and the amount 4683 * unacknowledged is not a multiple of mss, then we 4684 * better generate an ACK now. Otherwise, this may be 4685 * the tail piece of a transaction, and we would rather 4686 * wait for the response. 4687 */ 4688 uint32_t udif; 4689 ASSERT((uintptr_t)(tcp->tcp_rnxt - tcp->tcp_rack) <= 4690 (uintptr_t)INT_MAX); 4691 udif = (int)(tcp->tcp_rnxt - tcp->tcp_rack); 4692 if (udif && (udif % mss)) 4693 flags |= TH_ACK_NEEDED; 4694 else 4695 flags |= TH_ACK_TIMER_NEEDED; 4696 } else { 4697 /* Start delayed ack timer */ 4698 flags |= TH_ACK_TIMER_NEEDED; 4699 } 4700 } 4701 tcp->tcp_rnxt += seg_len; 4702 tcpha->tha_ack = htonl(tcp->tcp_rnxt); 4703 4704 if (mp == NULL) 4705 goto xmit_check; 4706 4707 /* Update SACK list */ 4708 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 4709 tcp_sack_remove(tcp->tcp_sack_list, tcp->tcp_rnxt, 4710 &(tcp->tcp_num_sack_blk)); 4711 } 4712 4713 if (tcp->tcp_urp_mp) { 4714 tcp->tcp_urp_mp->b_cont = mp; 4715 mp = tcp->tcp_urp_mp; 4716 tcp->tcp_urp_mp = NULL; 4717 /* Ready for a new signal. */ 4718 tcp->tcp_urp_last_valid = B_FALSE; 4719 #ifdef DEBUG 4720 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4721 "tcp_rput: sending exdata_ind %s", 4722 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4723 #endif /* DEBUG */ 4724 } 4725 4726 /* 4727 * Check for ancillary data changes compared to last segment. 4728 */ 4729 if (connp->conn_recv_ancillary.crb_all != 0) { 4730 mp = tcp_input_add_ancillary(tcp, mp, &ipp, ira); 4731 if (mp == NULL) 4732 return; 4733 } 4734 4735 if (IPCL_IS_NONSTR(connp)) { 4736 /* 4737 * Non-STREAMS socket 4738 */ 4739 boolean_t push = flags & (TH_PUSH|TH_FIN); 4740 int error; 4741 4742 if ((*sockupcalls->su_recv)(connp->conn_upper_handle, 4743 mp, seg_len, 0, &error, &push) <= 0) { 4744 /* 4745 * We should never be in middle of a 4746 * fallback, the squeue guarantees that. 4747 */ 4748 ASSERT(error != EOPNOTSUPP); 4749 if (error == ENOSPC) 4750 tcp->tcp_rwnd -= seg_len; 4751 } else if (push) { 4752 /* PUSH bit set and sockfs is not flow controlled */ 4753 flags |= tcp_rwnd_reopen(tcp); 4754 } 4755 } else if (tcp->tcp_listener != NULL || tcp->tcp_hard_binding) { 4756 /* 4757 * Side queue inbound data until the accept happens. 4758 * tcp_accept/tcp_rput drains this when the accept happens. 4759 * M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or 4760 * T_EXDATA_IND) it is queued on b_next. 4761 * XXX Make urgent data use this. Requires: 4762 * Removing tcp_listener check for TH_URG 4763 * Making M_PCPROTO and MARK messages skip the eager case 4764 */ 4765 4766 tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred); 4767 } else { 4768 /* Active STREAMS socket */ 4769 if (mp->b_datap->db_type != M_DATA || 4770 (flags & TH_MARKNEXT_NEEDED)) { 4771 if (tcp->tcp_rcv_list != NULL) { 4772 flags |= tcp_rcv_drain(tcp); 4773 } 4774 ASSERT(tcp->tcp_rcv_list == NULL || 4775 tcp->tcp_fused_sigurg); 4776 4777 if (flags & TH_MARKNEXT_NEEDED) { 4778 #ifdef DEBUG 4779 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4780 "tcp_rput: sending MSGMARKNEXT %s", 4781 tcp_display(tcp, NULL, 4782 DISP_PORT_ONLY)); 4783 #endif /* DEBUG */ 4784 mp->b_flag |= MSGMARKNEXT; 4785 flags &= ~TH_MARKNEXT_NEEDED; 4786 } 4787 4788 if (is_system_labeled()) 4789 tcp_setcred_data(mp, ira); 4790 4791 putnext(connp->conn_rq, mp); 4792 if (!canputnext(connp->conn_rq)) 4793 tcp->tcp_rwnd -= seg_len; 4794 } else if ((flags & (TH_PUSH|TH_FIN)) || 4795 tcp->tcp_rcv_cnt + seg_len >= connp->conn_rcvbuf >> 3) { 4796 if (tcp->tcp_rcv_list != NULL) { 4797 /* 4798 * Enqueue the new segment first and then 4799 * call tcp_rcv_drain() to send all data 4800 * up. The other way to do this is to 4801 * send all queued data up and then call 4802 * putnext() to send the new segment up. 4803 * This way can remove the else part later 4804 * on. 4805 * 4806 * We don't do this to avoid one more call to 4807 * canputnext() as tcp_rcv_drain() needs to 4808 * call canputnext(). 4809 */ 4810 tcp_rcv_enqueue(tcp, mp, seg_len, 4811 ira->ira_cred); 4812 flags |= tcp_rcv_drain(tcp); 4813 } else { 4814 if (is_system_labeled()) 4815 tcp_setcred_data(mp, ira); 4816 4817 putnext(connp->conn_rq, mp); 4818 if (!canputnext(connp->conn_rq)) 4819 tcp->tcp_rwnd -= seg_len; 4820 } 4821 } else { 4822 /* 4823 * Enqueue all packets when processing an mblk 4824 * from the co queue and also enqueue normal packets. 4825 */ 4826 tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred); 4827 } 4828 /* 4829 * Make sure the timer is running if we have data waiting 4830 * for a push bit. This provides resiliency against 4831 * implementations that do not correctly generate push bits. 4832 */ 4833 if (tcp->tcp_rcv_list != NULL && tcp->tcp_push_tid == 0) { 4834 /* 4835 * The connection may be closed at this point, so don't 4836 * do anything for a detached tcp. 4837 */ 4838 if (!TCP_IS_DETACHED(tcp)) 4839 tcp->tcp_push_tid = TCP_TIMER(tcp, 4840 tcp_push_timer, 4841 tcps->tcps_push_timer_interval); 4842 } 4843 } 4844 4845 xmit_check: 4846 /* Is there anything left to do? */ 4847 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 4848 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_ACK_NEEDED| 4849 TH_NEED_SACK_REXMIT|TH_LIMIT_XMIT|TH_ACK_TIMER_NEEDED| 4850 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 4851 goto done; 4852 4853 /* Any transmit work to do and a non-zero window? */ 4854 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_NEED_SACK_REXMIT| 4855 TH_LIMIT_XMIT)) && tcp->tcp_swnd != 0) { 4856 if (flags & TH_REXMIT_NEEDED) { 4857 uint32_t snd_size = tcp->tcp_snxt - tcp->tcp_suna; 4858 4859 TCPS_BUMP_MIB(tcps, tcpOutFastRetrans); 4860 if (snd_size > mss) 4861 snd_size = mss; 4862 if (snd_size > tcp->tcp_swnd) 4863 snd_size = tcp->tcp_swnd; 4864 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, snd_size, 4865 NULL, NULL, tcp->tcp_suna, B_TRUE, &snd_size, 4866 B_TRUE); 4867 4868 if (mp1 != NULL) { 4869 tcp->tcp_xmit_head->b_prev = 4870 (mblk_t *)LBOLT_FASTPATH; 4871 tcp->tcp_csuna = tcp->tcp_snxt; 4872 TCPS_BUMP_MIB(tcps, tcpRetransSegs); 4873 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, 4874 snd_size); 4875 tcp_send_data(tcp, mp1); 4876 } 4877 } 4878 if (flags & TH_NEED_SACK_REXMIT) { 4879 tcp_sack_rexmit(tcp, &flags); 4880 } 4881 /* 4882 * For TH_LIMIT_XMIT, tcp_wput_data() is called to send 4883 * out new segment. Note that tcp_rexmit should not be 4884 * set, otherwise TH_LIMIT_XMIT should not be set. 4885 */ 4886 if (flags & (TH_XMIT_NEEDED|TH_LIMIT_XMIT)) { 4887 if (!tcp->tcp_rexmit) { 4888 tcp_wput_data(tcp, NULL, B_FALSE); 4889 } else { 4890 tcp_ss_rexmit(tcp); 4891 } 4892 } 4893 /* 4894 * Adjust tcp_cwnd back to normal value after sending 4895 * new data segments. 4896 */ 4897 if (flags & TH_LIMIT_XMIT) { 4898 tcp->tcp_cwnd -= mss << (tcp->tcp_dupack_cnt - 1); 4899 /* 4900 * This will restart the timer. Restarting the 4901 * timer is used to avoid a timeout before the 4902 * limited transmitted segment's ACK gets back. 4903 */ 4904 if (tcp->tcp_xmit_head != NULL) 4905 tcp->tcp_xmit_head->b_prev = 4906 (mblk_t *)LBOLT_FASTPATH; 4907 } 4908 4909 /* Anything more to do? */ 4910 if ((flags & (TH_ACK_NEEDED|TH_ACK_TIMER_NEEDED| 4911 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 4912 goto done; 4913 } 4914 ack_check: 4915 if (flags & TH_SEND_URP_MARK) { 4916 ASSERT(tcp->tcp_urp_mark_mp); 4917 ASSERT(!IPCL_IS_NONSTR(connp)); 4918 /* 4919 * Send up any queued data and then send the mark message 4920 */ 4921 if (tcp->tcp_rcv_list != NULL) { 4922 flags |= tcp_rcv_drain(tcp); 4923 4924 } 4925 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 4926 mp1 = tcp->tcp_urp_mark_mp; 4927 tcp->tcp_urp_mark_mp = NULL; 4928 if (is_system_labeled()) 4929 tcp_setcred_data(mp1, ira); 4930 4931 putnext(connp->conn_rq, mp1); 4932 #ifdef DEBUG 4933 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 4934 "tcp_rput: sending zero-length %s %s", 4935 ((mp1->b_flag & MSGMARKNEXT) ? "MSGMARKNEXT" : 4936 "MSGNOTMARKNEXT"), 4937 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4938 #endif /* DEBUG */ 4939 flags &= ~TH_SEND_URP_MARK; 4940 } 4941 if (flags & TH_ACK_NEEDED) { 4942 /* 4943 * Time to send an ack for some reason. 4944 */ 4945 mp1 = tcp_ack_mp(tcp); 4946 4947 if (mp1 != NULL) { 4948 tcp_send_data(tcp, mp1); 4949 BUMP_LOCAL(tcp->tcp_obsegs); 4950 TCPS_BUMP_MIB(tcps, tcpOutAck); 4951 } 4952 if (tcp->tcp_ack_tid != 0) { 4953 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 4954 tcp->tcp_ack_tid = 0; 4955 } 4956 } 4957 if (flags & TH_ACK_TIMER_NEEDED) { 4958 /* 4959 * Arrange for deferred ACK or push wait timeout. 4960 * Start timer if it is not already running. 4961 */ 4962 if (tcp->tcp_ack_tid == 0) { 4963 tcp->tcp_ack_tid = TCP_TIMER(tcp, tcp_ack_timer, 4964 tcp->tcp_localnet ? 4965 tcps->tcps_local_dack_interval : 4966 tcps->tcps_deferred_ack_interval); 4967 } 4968 } 4969 if (flags & TH_ORDREL_NEEDED) { 4970 /* 4971 * Notify upper layer about an orderly release. If this is 4972 * a non-STREAMS socket, then just make an upcall. For STREAMS 4973 * we send up an ordrel_ind, unless this is an eager, in which 4974 * case the ordrel will be sent when tcp_accept_finish runs. 4975 * Note that for non-STREAMS we make an upcall even if it is an 4976 * eager, because we have an upper handle to send it to. 4977 */ 4978 ASSERT(IPCL_IS_NONSTR(connp) || tcp->tcp_listener == NULL); 4979 ASSERT(!tcp->tcp_detached); 4980 4981 if (IPCL_IS_NONSTR(connp)) { 4982 ASSERT(tcp->tcp_ordrel_mp == NULL); 4983 tcp->tcp_ordrel_done = B_TRUE; 4984 (*sockupcalls->su_opctl)(connp->conn_upper_handle, 4985 SOCK_OPCTL_SHUT_RECV, 0); 4986 goto done; 4987 } 4988 4989 if (tcp->tcp_rcv_list != NULL) { 4990 /* 4991 * Push any mblk(s) enqueued from co processing. 4992 */ 4993 flags |= tcp_rcv_drain(tcp); 4994 } 4995 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 4996 4997 mp1 = tcp->tcp_ordrel_mp; 4998 tcp->tcp_ordrel_mp = NULL; 4999 tcp->tcp_ordrel_done = B_TRUE; 5000 putnext(connp->conn_rq, mp1); 5001 } 5002 done: 5003 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 5004 } 5005 5006 /* 5007 * Attach ancillary data to a received TCP segments for the 5008 * ancillary pieces requested by the application that are 5009 * different than they were in the previous data segment. 5010 * 5011 * Save the "current" values once memory allocation is ok so that 5012 * when memory allocation fails we can just wait for the next data segment. 5013 */ 5014 static mblk_t * 5015 tcp_input_add_ancillary(tcp_t *tcp, mblk_t *mp, ip_pkt_t *ipp, 5016 ip_recv_attr_t *ira) 5017 { 5018 struct T_optdata_ind *todi; 5019 int optlen; 5020 uchar_t *optptr; 5021 struct T_opthdr *toh; 5022 crb_t addflag; /* Which pieces to add */ 5023 mblk_t *mp1; 5024 conn_t *connp = tcp->tcp_connp; 5025 5026 optlen = 0; 5027 addflag.crb_all = 0; 5028 /* If app asked for pktinfo and the index has changed ... */ 5029 if (connp->conn_recv_ancillary.crb_ip_recvpktinfo && 5030 ira->ira_ruifindex != tcp->tcp_recvifindex) { 5031 optlen += sizeof (struct T_opthdr) + 5032 sizeof (struct in6_pktinfo); 5033 addflag.crb_ip_recvpktinfo = 1; 5034 } 5035 /* If app asked for hoplimit and it has changed ... */ 5036 if (connp->conn_recv_ancillary.crb_ipv6_recvhoplimit && 5037 ipp->ipp_hoplimit != tcp->tcp_recvhops) { 5038 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 5039 addflag.crb_ipv6_recvhoplimit = 1; 5040 } 5041 /* If app asked for tclass and it has changed ... */ 5042 if (connp->conn_recv_ancillary.crb_ipv6_recvtclass && 5043 ipp->ipp_tclass != tcp->tcp_recvtclass) { 5044 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 5045 addflag.crb_ipv6_recvtclass = 1; 5046 } 5047 /* 5048 * If app asked for hopbyhop headers and it has changed ... 5049 * For security labels, note that (1) security labels can't change on 5050 * a connected socket at all, (2) we're connected to at most one peer, 5051 * (3) if anything changes, then it must be some other extra option. 5052 */ 5053 if (connp->conn_recv_ancillary.crb_ipv6_recvhopopts && 5054 ip_cmpbuf(tcp->tcp_hopopts, tcp->tcp_hopoptslen, 5055 (ipp->ipp_fields & IPPF_HOPOPTS), 5056 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) { 5057 optlen += sizeof (struct T_opthdr) + ipp->ipp_hopoptslen; 5058 addflag.crb_ipv6_recvhopopts = 1; 5059 if (!ip_allocbuf((void **)&tcp->tcp_hopopts, 5060 &tcp->tcp_hopoptslen, (ipp->ipp_fields & IPPF_HOPOPTS), 5061 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) 5062 return (mp); 5063 } 5064 /* If app asked for dst headers before routing headers ... */ 5065 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdrdstopts && 5066 ip_cmpbuf(tcp->tcp_rthdrdstopts, tcp->tcp_rthdrdstoptslen, 5067 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 5068 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) { 5069 optlen += sizeof (struct T_opthdr) + 5070 ipp->ipp_rthdrdstoptslen; 5071 addflag.crb_ipv6_recvrthdrdstopts = 1; 5072 if (!ip_allocbuf((void **)&tcp->tcp_rthdrdstopts, 5073 &tcp->tcp_rthdrdstoptslen, 5074 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 5075 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) 5076 return (mp); 5077 } 5078 /* If app asked for routing headers and it has changed ... */ 5079 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdr && 5080 ip_cmpbuf(tcp->tcp_rthdr, tcp->tcp_rthdrlen, 5081 (ipp->ipp_fields & IPPF_RTHDR), 5082 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) { 5083 optlen += sizeof (struct T_opthdr) + ipp->ipp_rthdrlen; 5084 addflag.crb_ipv6_recvrthdr = 1; 5085 if (!ip_allocbuf((void **)&tcp->tcp_rthdr, 5086 &tcp->tcp_rthdrlen, (ipp->ipp_fields & IPPF_RTHDR), 5087 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) 5088 return (mp); 5089 } 5090 /* If app asked for dest headers and it has changed ... */ 5091 if ((connp->conn_recv_ancillary.crb_ipv6_recvdstopts || 5092 connp->conn_recv_ancillary.crb_old_ipv6_recvdstopts) && 5093 ip_cmpbuf(tcp->tcp_dstopts, tcp->tcp_dstoptslen, 5094 (ipp->ipp_fields & IPPF_DSTOPTS), 5095 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) { 5096 optlen += sizeof (struct T_opthdr) + ipp->ipp_dstoptslen; 5097 addflag.crb_ipv6_recvdstopts = 1; 5098 if (!ip_allocbuf((void **)&tcp->tcp_dstopts, 5099 &tcp->tcp_dstoptslen, (ipp->ipp_fields & IPPF_DSTOPTS), 5100 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) 5101 return (mp); 5102 } 5103 5104 if (optlen == 0) { 5105 /* Nothing to add */ 5106 return (mp); 5107 } 5108 mp1 = allocb(sizeof (struct T_optdata_ind) + optlen, BPRI_MED); 5109 if (mp1 == NULL) { 5110 /* 5111 * Defer sending ancillary data until the next TCP segment 5112 * arrives. 5113 */ 5114 return (mp); 5115 } 5116 mp1->b_cont = mp; 5117 mp = mp1; 5118 mp->b_wptr += sizeof (*todi) + optlen; 5119 mp->b_datap->db_type = M_PROTO; 5120 todi = (struct T_optdata_ind *)mp->b_rptr; 5121 todi->PRIM_type = T_OPTDATA_IND; 5122 todi->DATA_flag = 1; /* MORE data */ 5123 todi->OPT_length = optlen; 5124 todi->OPT_offset = sizeof (*todi); 5125 optptr = (uchar_t *)&todi[1]; 5126 /* 5127 * If app asked for pktinfo and the index has changed ... 5128 * Note that the local address never changes for the connection. 5129 */ 5130 if (addflag.crb_ip_recvpktinfo) { 5131 struct in6_pktinfo *pkti; 5132 uint_t ifindex; 5133 5134 ifindex = ira->ira_ruifindex; 5135 toh = (struct T_opthdr *)optptr; 5136 toh->level = IPPROTO_IPV6; 5137 toh->name = IPV6_PKTINFO; 5138 toh->len = sizeof (*toh) + sizeof (*pkti); 5139 toh->status = 0; 5140 optptr += sizeof (*toh); 5141 pkti = (struct in6_pktinfo *)optptr; 5142 pkti->ipi6_addr = connp->conn_laddr_v6; 5143 pkti->ipi6_ifindex = ifindex; 5144 optptr += sizeof (*pkti); 5145 ASSERT(OK_32PTR(optptr)); 5146 /* Save as "last" value */ 5147 tcp->tcp_recvifindex = ifindex; 5148 } 5149 /* If app asked for hoplimit and it has changed ... */ 5150 if (addflag.crb_ipv6_recvhoplimit) { 5151 toh = (struct T_opthdr *)optptr; 5152 toh->level = IPPROTO_IPV6; 5153 toh->name = IPV6_HOPLIMIT; 5154 toh->len = sizeof (*toh) + sizeof (uint_t); 5155 toh->status = 0; 5156 optptr += sizeof (*toh); 5157 *(uint_t *)optptr = ipp->ipp_hoplimit; 5158 optptr += sizeof (uint_t); 5159 ASSERT(OK_32PTR(optptr)); 5160 /* Save as "last" value */ 5161 tcp->tcp_recvhops = ipp->ipp_hoplimit; 5162 } 5163 /* If app asked for tclass and it has changed ... */ 5164 if (addflag.crb_ipv6_recvtclass) { 5165 toh = (struct T_opthdr *)optptr; 5166 toh->level = IPPROTO_IPV6; 5167 toh->name = IPV6_TCLASS; 5168 toh->len = sizeof (*toh) + sizeof (uint_t); 5169 toh->status = 0; 5170 optptr += sizeof (*toh); 5171 *(uint_t *)optptr = ipp->ipp_tclass; 5172 optptr += sizeof (uint_t); 5173 ASSERT(OK_32PTR(optptr)); 5174 /* Save as "last" value */ 5175 tcp->tcp_recvtclass = ipp->ipp_tclass; 5176 } 5177 if (addflag.crb_ipv6_recvhopopts) { 5178 toh = (struct T_opthdr *)optptr; 5179 toh->level = IPPROTO_IPV6; 5180 toh->name = IPV6_HOPOPTS; 5181 toh->len = sizeof (*toh) + ipp->ipp_hopoptslen; 5182 toh->status = 0; 5183 optptr += sizeof (*toh); 5184 bcopy((uchar_t *)ipp->ipp_hopopts, optptr, ipp->ipp_hopoptslen); 5185 optptr += ipp->ipp_hopoptslen; 5186 ASSERT(OK_32PTR(optptr)); 5187 /* Save as last value */ 5188 ip_savebuf((void **)&tcp->tcp_hopopts, &tcp->tcp_hopoptslen, 5189 (ipp->ipp_fields & IPPF_HOPOPTS), 5190 ipp->ipp_hopopts, ipp->ipp_hopoptslen); 5191 } 5192 if (addflag.crb_ipv6_recvrthdrdstopts) { 5193 toh = (struct T_opthdr *)optptr; 5194 toh->level = IPPROTO_IPV6; 5195 toh->name = IPV6_RTHDRDSTOPTS; 5196 toh->len = sizeof (*toh) + ipp->ipp_rthdrdstoptslen; 5197 toh->status = 0; 5198 optptr += sizeof (*toh); 5199 bcopy(ipp->ipp_rthdrdstopts, optptr, ipp->ipp_rthdrdstoptslen); 5200 optptr += ipp->ipp_rthdrdstoptslen; 5201 ASSERT(OK_32PTR(optptr)); 5202 /* Save as last value */ 5203 ip_savebuf((void **)&tcp->tcp_rthdrdstopts, 5204 &tcp->tcp_rthdrdstoptslen, 5205 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS), 5206 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen); 5207 } 5208 if (addflag.crb_ipv6_recvrthdr) { 5209 toh = (struct T_opthdr *)optptr; 5210 toh->level = IPPROTO_IPV6; 5211 toh->name = IPV6_RTHDR; 5212 toh->len = sizeof (*toh) + ipp->ipp_rthdrlen; 5213 toh->status = 0; 5214 optptr += sizeof (*toh); 5215 bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen); 5216 optptr += ipp->ipp_rthdrlen; 5217 ASSERT(OK_32PTR(optptr)); 5218 /* Save as last value */ 5219 ip_savebuf((void **)&tcp->tcp_rthdr, &tcp->tcp_rthdrlen, 5220 (ipp->ipp_fields & IPPF_RTHDR), 5221 ipp->ipp_rthdr, ipp->ipp_rthdrlen); 5222 } 5223 if (addflag.crb_ipv6_recvdstopts) { 5224 toh = (struct T_opthdr *)optptr; 5225 toh->level = IPPROTO_IPV6; 5226 toh->name = IPV6_DSTOPTS; 5227 toh->len = sizeof (*toh) + ipp->ipp_dstoptslen; 5228 toh->status = 0; 5229 optptr += sizeof (*toh); 5230 bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen); 5231 optptr += ipp->ipp_dstoptslen; 5232 ASSERT(OK_32PTR(optptr)); 5233 /* Save as last value */ 5234 ip_savebuf((void **)&tcp->tcp_dstopts, &tcp->tcp_dstoptslen, 5235 (ipp->ipp_fields & IPPF_DSTOPTS), 5236 ipp->ipp_dstopts, ipp->ipp_dstoptslen); 5237 } 5238 ASSERT(optptr == mp->b_wptr); 5239 return (mp); 5240 } 5241 5242 /* The minimum of smoothed mean deviation in RTO calculation. */ 5243 #define TCP_SD_MIN 400 5244 5245 /* 5246 * Set RTO for this connection. The formula is from Jacobson and Karels' 5247 * "Congestion Avoidance and Control" in SIGCOMM '88. The variable names 5248 * are the same as those in Appendix A.2 of that paper. 5249 * 5250 * m = new measurement 5251 * sa = smoothed RTT average (8 * average estimates). 5252 * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates). 5253 */ 5254 static void 5255 tcp_set_rto(tcp_t *tcp, clock_t rtt) 5256 { 5257 long m = TICK_TO_MSEC(rtt); 5258 clock_t sa = tcp->tcp_rtt_sa; 5259 clock_t sv = tcp->tcp_rtt_sd; 5260 clock_t rto; 5261 tcp_stack_t *tcps = tcp->tcp_tcps; 5262 5263 TCPS_BUMP_MIB(tcps, tcpRttUpdate); 5264 tcp->tcp_rtt_update++; 5265 5266 /* tcp_rtt_sa is not 0 means this is a new sample. */ 5267 if (sa != 0) { 5268 /* 5269 * Update average estimator: 5270 * new rtt = 7/8 old rtt + 1/8 Error 5271 */ 5272 5273 /* m is now Error in estimate. */ 5274 m -= sa >> 3; 5275 if ((sa += m) <= 0) { 5276 /* 5277 * Don't allow the smoothed average to be negative. 5278 * We use 0 to denote reinitialization of the 5279 * variables. 5280 */ 5281 sa = 1; 5282 } 5283 5284 /* 5285 * Update deviation estimator: 5286 * new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev) 5287 */ 5288 if (m < 0) 5289 m = -m; 5290 m -= sv >> 2; 5291 sv += m; 5292 } else { 5293 /* 5294 * This follows BSD's implementation. So the reinitialized 5295 * RTO is 3 * m. We cannot go less than 2 because if the 5296 * link is bandwidth dominated, doubling the window size 5297 * during slow start means doubling the RTT. We want to be 5298 * more conservative when we reinitialize our estimates. 3 5299 * is just a convenient number. 5300 */ 5301 sa = m << 3; 5302 sv = m << 1; 5303 } 5304 if (sv < TCP_SD_MIN) { 5305 /* 5306 * We do not know that if sa captures the delay ACK 5307 * effect as in a long train of segments, a receiver 5308 * does not delay its ACKs. So set the minimum of sv 5309 * to be TCP_SD_MIN, which is default to 400 ms, twice 5310 * of BSD DATO. That means the minimum of mean 5311 * deviation is 100 ms. 5312 * 5313 */ 5314 sv = TCP_SD_MIN; 5315 } 5316 tcp->tcp_rtt_sa = sa; 5317 tcp->tcp_rtt_sd = sv; 5318 /* 5319 * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv) 5320 * 5321 * Add tcp_rexmit_interval extra in case of extreme environment 5322 * where the algorithm fails to work. The default value of 5323 * tcp_rexmit_interval_extra should be 0. 5324 * 5325 * As we use a finer grained clock than BSD and update 5326 * RTO for every ACKs, add in another .25 of RTT to the 5327 * deviation of RTO to accomodate burstiness of 1/4 of 5328 * window size. 5329 */ 5330 rto = (sa >> 3) + sv + tcps->tcps_rexmit_interval_extra + (sa >> 5); 5331 5332 TCP_SET_RTO(tcp, rto); 5333 5334 /* Now, we can reset tcp_timer_backoff to use the new RTO... */ 5335 tcp->tcp_timer_backoff = 0; 5336 } 5337 5338 /* 5339 * On a labeled system we have some protocols above TCP, such as RPC, which 5340 * appear to assume that every mblk in a chain has a db_credp. 5341 */ 5342 static void 5343 tcp_setcred_data(mblk_t *mp, ip_recv_attr_t *ira) 5344 { 5345 ASSERT(is_system_labeled()); 5346 ASSERT(ira->ira_cred != NULL); 5347 5348 while (mp != NULL) { 5349 mblk_setcred(mp, ira->ira_cred, NOPID); 5350 mp = mp->b_cont; 5351 } 5352 } 5353 5354 uint_t 5355 tcp_rwnd_reopen(tcp_t *tcp) 5356 { 5357 uint_t ret = 0; 5358 uint_t thwin; 5359 conn_t *connp = tcp->tcp_connp; 5360 5361 /* Learn the latest rwnd information that we sent to the other side. */ 5362 thwin = ((uint_t)ntohs(tcp->tcp_tcpha->tha_win)) 5363 << tcp->tcp_rcv_ws; 5364 /* This is peer's calculated send window (our receive window). */ 5365 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 5366 /* 5367 * Increase the receive window to max. But we need to do receiver 5368 * SWS avoidance. This means that we need to check the increase of 5369 * of receive window is at least 1 MSS. 5370 */ 5371 if (connp->conn_rcvbuf - thwin >= tcp->tcp_mss) { 5372 /* 5373 * If the window that the other side knows is less than max 5374 * deferred acks segments, send an update immediately. 5375 */ 5376 if (thwin < tcp->tcp_rack_cur_max * tcp->tcp_mss) { 5377 TCPS_BUMP_MIB(tcp->tcp_tcps, tcpOutWinUpdate); 5378 ret = TH_ACK_NEEDED; 5379 } 5380 tcp->tcp_rwnd = connp->conn_rcvbuf; 5381 } 5382 return (ret); 5383 } 5384 5385 /* 5386 * Handle a packet that has been reclassified by TCP. 5387 * This function drops the ref on connp that the caller had. 5388 */ 5389 void 5390 tcp_reinput(conn_t *connp, mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst) 5391 { 5392 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec; 5393 5394 if (connp->conn_incoming_ifindex != 0 && 5395 connp->conn_incoming_ifindex != ira->ira_ruifindex) { 5396 freemsg(mp); 5397 CONN_DEC_REF(connp); 5398 return; 5399 } 5400 5401 if (CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss) || 5402 (ira->ira_flags & IRAF_IPSEC_SECURE)) { 5403 ip6_t *ip6h; 5404 ipha_t *ipha; 5405 5406 if (ira->ira_flags & IRAF_IS_IPV4) { 5407 ipha = (ipha_t *)mp->b_rptr; 5408 ip6h = NULL; 5409 } else { 5410 ipha = NULL; 5411 ip6h = (ip6_t *)mp->b_rptr; 5412 } 5413 mp = ipsec_check_inbound_policy(mp, connp, ipha, ip6h, ira); 5414 if (mp == NULL) { 5415 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 5416 /* Note that mp is NULL */ 5417 ip_drop_input("ipIfStatsInDiscards", mp, NULL); 5418 CONN_DEC_REF(connp); 5419 return; 5420 } 5421 } 5422 5423 if (IPCL_IS_TCP(connp)) { 5424 /* 5425 * do not drain, certain use cases can blow 5426 * the stack 5427 */ 5428 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, 5429 connp->conn_recv, connp, ira, 5430 SQ_NODRAIN, SQTAG_IP_TCP_INPUT); 5431 } else { 5432 /* Not TCP; must be SOCK_RAW, IPPROTO_TCP */ 5433 (connp->conn_recv)(connp, mp, NULL, 5434 ira); 5435 CONN_DEC_REF(connp); 5436 } 5437 5438 } 5439 5440 /* ARGSUSED */ 5441 static void 5442 tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy) 5443 { 5444 conn_t *connp = (conn_t *)arg; 5445 tcp_t *tcp = connp->conn_tcp; 5446 queue_t *q = connp->conn_rq; 5447 5448 ASSERT(!IPCL_IS_NONSTR(connp)); 5449 mutex_enter(&tcp->tcp_rsrv_mp_lock); 5450 tcp->tcp_rsrv_mp = mp; 5451 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5452 5453 if (TCP_IS_DETACHED(tcp) || q == NULL) { 5454 return; 5455 } 5456 5457 if (tcp->tcp_fused) { 5458 tcp_fuse_backenable(tcp); 5459 return; 5460 } 5461 5462 if (canputnext(q)) { 5463 /* Not flow-controlled, open rwnd */ 5464 tcp->tcp_rwnd = connp->conn_rcvbuf; 5465 5466 /* 5467 * Send back a window update immediately if TCP is above 5468 * ESTABLISHED state and the increase of the rcv window 5469 * that the other side knows is at least 1 MSS after flow 5470 * control is lifted. 5471 */ 5472 if (tcp->tcp_state >= TCPS_ESTABLISHED && 5473 tcp_rwnd_reopen(tcp) == TH_ACK_NEEDED) { 5474 tcp_xmit_ctl(NULL, tcp, 5475 (tcp->tcp_swnd == 0) ? tcp->tcp_suna : 5476 tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 5477 } 5478 } 5479 } 5480 5481 /* 5482 * The read side service routine is called mostly when we get back-enabled as a 5483 * result of flow control relief. Since we don't actually queue anything in 5484 * TCP, we have no data to send out of here. What we do is clear the receive 5485 * window, and send out a window update. 5486 */ 5487 void 5488 tcp_rsrv(queue_t *q) 5489 { 5490 conn_t *connp = Q_TO_CONN(q); 5491 tcp_t *tcp = connp->conn_tcp; 5492 mblk_t *mp; 5493 5494 /* No code does a putq on the read side */ 5495 ASSERT(q->q_first == NULL); 5496 5497 /* 5498 * If tcp->tcp_rsrv_mp == NULL, it means that tcp_rsrv() has already 5499 * been run. So just return. 5500 */ 5501 mutex_enter(&tcp->tcp_rsrv_mp_lock); 5502 if ((mp = tcp->tcp_rsrv_mp) == NULL) { 5503 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5504 return; 5505 } 5506 tcp->tcp_rsrv_mp = NULL; 5507 mutex_exit(&tcp->tcp_rsrv_mp_lock); 5508 5509 CONN_INC_REF(connp); 5510 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_rsrv_input, connp, 5511 NULL, SQ_PROCESS, SQTAG_TCP_RSRV); 5512 } 5513 5514 /* At minimum we need 8 bytes in the TCP header for the lookup */ 5515 #define ICMP_MIN_TCP_HDR 8 5516 5517 /* 5518 * tcp_icmp_input is called as conn_recvicmp to process ICMP error messages 5519 * passed up by IP. The message is always received on the correct tcp_t. 5520 * Assumes that IP has pulled up everything up to and including the ICMP header. 5521 */ 5522 /* ARGSUSED2 */ 5523 void 5524 tcp_icmp_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 5525 { 5526 conn_t *connp = (conn_t *)arg1; 5527 icmph_t *icmph; 5528 ipha_t *ipha; 5529 int iph_hdr_length; 5530 tcpha_t *tcpha; 5531 uint32_t seg_seq; 5532 tcp_t *tcp = connp->conn_tcp; 5533 5534 /* Assume IP provides aligned packets */ 5535 ASSERT(OK_32PTR(mp->b_rptr)); 5536 ASSERT((MBLKL(mp) >= sizeof (ipha_t))); 5537 5538 /* 5539 * Verify IP version. Anything other than IPv4 or IPv6 packet is sent 5540 * upstream. ICMPv6 is handled in tcp_icmp_error_ipv6. 5541 */ 5542 if (!(ira->ira_flags & IRAF_IS_IPV4)) { 5543 tcp_icmp_error_ipv6(tcp, mp, ira); 5544 return; 5545 } 5546 5547 /* Skip past the outer IP and ICMP headers */ 5548 iph_hdr_length = ira->ira_ip_hdr_length; 5549 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length]; 5550 /* 5551 * If we don't have the correct outer IP header length 5552 * or if we don't have a complete inner IP header 5553 * drop it. 5554 */ 5555 if (iph_hdr_length < sizeof (ipha_t) || 5556 (ipha_t *)&icmph[1] + 1 > (ipha_t *)mp->b_wptr) { 5557 noticmpv4: 5558 freemsg(mp); 5559 return; 5560 } 5561 ipha = (ipha_t *)&icmph[1]; 5562 5563 /* Skip past the inner IP and find the ULP header */ 5564 iph_hdr_length = IPH_HDR_LENGTH(ipha); 5565 tcpha = (tcpha_t *)((char *)ipha + iph_hdr_length); 5566 /* 5567 * If we don't have the correct inner IP header length or if the ULP 5568 * is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR 5569 * bytes of TCP header, drop it. 5570 */ 5571 if (iph_hdr_length < sizeof (ipha_t) || 5572 ipha->ipha_protocol != IPPROTO_TCP || 5573 (uchar_t *)tcpha + ICMP_MIN_TCP_HDR > mp->b_wptr) { 5574 goto noticmpv4; 5575 } 5576 5577 seg_seq = ntohl(tcpha->tha_seq); 5578 switch (icmph->icmph_type) { 5579 case ICMP_DEST_UNREACHABLE: 5580 switch (icmph->icmph_code) { 5581 case ICMP_FRAGMENTATION_NEEDED: 5582 /* 5583 * Update Path MTU, then try to send something out. 5584 */ 5585 tcp_update_pmtu(tcp, B_TRUE); 5586 tcp_rexmit_after_error(tcp); 5587 break; 5588 case ICMP_PORT_UNREACHABLE: 5589 case ICMP_PROTOCOL_UNREACHABLE: 5590 switch (tcp->tcp_state) { 5591 case TCPS_SYN_SENT: 5592 case TCPS_SYN_RCVD: 5593 /* 5594 * ICMP can snipe away incipient 5595 * TCP connections as long as 5596 * seq number is same as initial 5597 * send seq number. 5598 */ 5599 if (seg_seq == tcp->tcp_iss) { 5600 (void) tcp_clean_death(tcp, 5601 ECONNREFUSED); 5602 } 5603 break; 5604 } 5605 break; 5606 case ICMP_HOST_UNREACHABLE: 5607 case ICMP_NET_UNREACHABLE: 5608 /* Record the error in case we finally time out. */ 5609 if (icmph->icmph_code == ICMP_HOST_UNREACHABLE) 5610 tcp->tcp_client_errno = EHOSTUNREACH; 5611 else 5612 tcp->tcp_client_errno = ENETUNREACH; 5613 if (tcp->tcp_state == TCPS_SYN_RCVD) { 5614 if (tcp->tcp_listener != NULL && 5615 tcp->tcp_listener->tcp_syn_defense) { 5616 /* 5617 * Ditch the half-open connection if we 5618 * suspect a SYN attack is under way. 5619 */ 5620 (void) tcp_clean_death(tcp, 5621 tcp->tcp_client_errno); 5622 } 5623 } 5624 break; 5625 default: 5626 break; 5627 } 5628 break; 5629 case ICMP_SOURCE_QUENCH: { 5630 /* 5631 * use a global boolean to control 5632 * whether TCP should respond to ICMP_SOURCE_QUENCH. 5633 * The default is false. 5634 */ 5635 if (tcp_icmp_source_quench) { 5636 /* 5637 * Reduce the sending rate as if we got a 5638 * retransmit timeout 5639 */ 5640 uint32_t npkt; 5641 5642 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / 5643 tcp->tcp_mss; 5644 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * tcp->tcp_mss; 5645 tcp->tcp_cwnd = tcp->tcp_mss; 5646 tcp->tcp_cwnd_cnt = 0; 5647 } 5648 break; 5649 } 5650 } 5651 freemsg(mp); 5652 } 5653 5654 /* 5655 * tcp_icmp_error_ipv6 is called from tcp_icmp_input to process ICMPv6 5656 * error messages passed up by IP. 5657 * Assumes that IP has pulled up all the extension headers as well 5658 * as the ICMPv6 header. 5659 */ 5660 static void 5661 tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, ip_recv_attr_t *ira) 5662 { 5663 icmp6_t *icmp6; 5664 ip6_t *ip6h; 5665 uint16_t iph_hdr_length = ira->ira_ip_hdr_length; 5666 tcpha_t *tcpha; 5667 uint8_t *nexthdrp; 5668 uint32_t seg_seq; 5669 5670 /* 5671 * Verify that we have a complete IP header. 5672 */ 5673 ASSERT((MBLKL(mp) >= sizeof (ip6_t))); 5674 5675 icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length]; 5676 ip6h = (ip6_t *)&icmp6[1]; 5677 /* 5678 * Verify if we have a complete ICMP and inner IP header. 5679 */ 5680 if ((uchar_t *)&ip6h[1] > mp->b_wptr) { 5681 noticmpv6: 5682 freemsg(mp); 5683 return; 5684 } 5685 5686 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp)) 5687 goto noticmpv6; 5688 tcpha = (tcpha_t *)((char *)ip6h + iph_hdr_length); 5689 /* 5690 * Validate inner header. If the ULP is not IPPROTO_TCP or if we don't 5691 * have at least ICMP_MIN_TCP_HDR bytes of TCP header drop the 5692 * packet. 5693 */ 5694 if ((*nexthdrp != IPPROTO_TCP) || 5695 ((uchar_t *)tcpha + ICMP_MIN_TCP_HDR) > mp->b_wptr) { 5696 goto noticmpv6; 5697 } 5698 5699 seg_seq = ntohl(tcpha->tha_seq); 5700 switch (icmp6->icmp6_type) { 5701 case ICMP6_PACKET_TOO_BIG: 5702 /* 5703 * Update Path MTU, then try to send something out. 5704 */ 5705 tcp_update_pmtu(tcp, B_TRUE); 5706 tcp_rexmit_after_error(tcp); 5707 break; 5708 case ICMP6_DST_UNREACH: 5709 switch (icmp6->icmp6_code) { 5710 case ICMP6_DST_UNREACH_NOPORT: 5711 if (((tcp->tcp_state == TCPS_SYN_SENT) || 5712 (tcp->tcp_state == TCPS_SYN_RCVD)) && 5713 (seg_seq == tcp->tcp_iss)) { 5714 (void) tcp_clean_death(tcp, ECONNREFUSED); 5715 } 5716 break; 5717 case ICMP6_DST_UNREACH_ADMIN: 5718 case ICMP6_DST_UNREACH_NOROUTE: 5719 case ICMP6_DST_UNREACH_BEYONDSCOPE: 5720 case ICMP6_DST_UNREACH_ADDR: 5721 /* Record the error in case we finally time out. */ 5722 tcp->tcp_client_errno = EHOSTUNREACH; 5723 if (((tcp->tcp_state == TCPS_SYN_SENT) || 5724 (tcp->tcp_state == TCPS_SYN_RCVD)) && 5725 (seg_seq == tcp->tcp_iss)) { 5726 if (tcp->tcp_listener != NULL && 5727 tcp->tcp_listener->tcp_syn_defense) { 5728 /* 5729 * Ditch the half-open connection if we 5730 * suspect a SYN attack is under way. 5731 */ 5732 (void) tcp_clean_death(tcp, 5733 tcp->tcp_client_errno); 5734 } 5735 } 5736 5737 5738 break; 5739 default: 5740 break; 5741 } 5742 break; 5743 case ICMP6_PARAM_PROB: 5744 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */ 5745 if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER && 5746 (uchar_t *)ip6h + icmp6->icmp6_pptr == 5747 (uchar_t *)nexthdrp) { 5748 if (tcp->tcp_state == TCPS_SYN_SENT || 5749 tcp->tcp_state == TCPS_SYN_RCVD) { 5750 (void) tcp_clean_death(tcp, ECONNREFUSED); 5751 } 5752 break; 5753 } 5754 break; 5755 5756 case ICMP6_TIME_EXCEEDED: 5757 default: 5758 break; 5759 } 5760 freemsg(mp); 5761 } 5762 5763 /* 5764 * CALLED OUTSIDE OF SQUEUE! It can not follow any pointers that tcp might 5765 * change. But it can refer to fields like tcp_suna and tcp_snxt. 5766 * 5767 * Function tcp_verifyicmp is called as conn_verifyicmp to verify the ICMP 5768 * error messages received by IP. The message is always received on the correct 5769 * tcp_t. 5770 */ 5771 /* ARGSUSED */ 5772 boolean_t 5773 tcp_verifyicmp(conn_t *connp, void *arg2, icmph_t *icmph, icmp6_t *icmp6, 5774 ip_recv_attr_t *ira) 5775 { 5776 tcpha_t *tcpha = (tcpha_t *)arg2; 5777 uint32_t seq = ntohl(tcpha->tha_seq); 5778 tcp_t *tcp = connp->conn_tcp; 5779 5780 /* 5781 * TCP sequence number contained in payload of the ICMP error message 5782 * should be within the range SND.UNA <= SEG.SEQ < SND.NXT. Otherwise, 5783 * the message is either a stale ICMP error, or an attack from the 5784 * network. Fail the verification. 5785 */ 5786 if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GEQ(seq, tcp->tcp_snxt)) 5787 return (B_FALSE); 5788 5789 /* For "too big" we also check the ignore flag */ 5790 if (ira->ira_flags & IRAF_IS_IPV4) { 5791 ASSERT(icmph != NULL); 5792 if (icmph->icmph_type == ICMP_DEST_UNREACHABLE && 5793 icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED && 5794 tcp->tcp_tcps->tcps_ignore_path_mtu) 5795 return (B_FALSE); 5796 } else { 5797 ASSERT(icmp6 != NULL); 5798 if (icmp6->icmp6_type == ICMP6_PACKET_TOO_BIG && 5799 tcp->tcp_tcps->tcps_ignore_path_mtu) 5800 return (B_FALSE); 5801 } 5802 return (B_TRUE); 5803 } 5804