1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/stream.h> 28 #include <sys/strsun.h> 29 #include <sys/strsubr.h> 30 #include <sys/debug.h> 31 #include <sys/sdt.h> 32 #include <sys/cmn_err.h> 33 #include <sys/tihdr.h> 34 35 #include <inet/common.h> 36 #include <inet/optcom.h> 37 #include <inet/ip.h> 38 #include <inet/ip_if.h> 39 #include <inet/ip_impl.h> 40 #include <inet/tcp.h> 41 #include <inet/tcp_impl.h> 42 #include <inet/ipsec_impl.h> 43 #include <inet/ipclassifier.h> 44 #include <inet/ipp_common.h> 45 #include <inet/ip_if.h> 46 47 /* 48 * This file implements TCP fusion - a protocol-less data path for TCP 49 * loopback connections. The fusion of two local TCP endpoints occurs 50 * at connection establishment time. Various conditions (see details 51 * in tcp_fuse()) need to be met for fusion to be successful. If it 52 * fails, we fall back to the regular TCP data path; if it succeeds, 53 * both endpoints proceed to use tcp_fuse_output() as the transmit path. 54 * tcp_fuse_output() enqueues application data directly onto the peer's 55 * receive queue; no protocol processing is involved. 56 * 57 * Sychronization is handled by squeue and the mutex tcp_non_sq_lock. 58 * One of the requirements for fusion to succeed is that both endpoints 59 * need to be using the same squeue. This ensures that neither side 60 * can disappear while the other side is still sending data. Flow 61 * control information is manipulated outside the squeue, so the 62 * tcp_non_sq_lock must be held when touching tcp_flow_stopped. 63 */ 64 65 /* 66 * Setting this to false means we disable fusion altogether and 67 * loopback connections would go through the protocol paths. 68 */ 69 boolean_t do_tcp_fusion = B_TRUE; 70 71 /* 72 * Return true if this connection needs some IP functionality 73 */ 74 static boolean_t 75 tcp_loopback_needs_ip(tcp_t *tcp, netstack_t *ns) 76 { 77 ipsec_stack_t *ipss = ns->netstack_ipsec; 78 79 /* 80 * If ire is not cached, do not use fusion 81 */ 82 if (tcp->tcp_connp->conn_ire_cache == NULL) { 83 /* 84 * There is no need to hold conn_lock here because when called 85 * from tcp_fuse() there can be no window where conn_ire_cache 86 * can change. This is not true when called from 87 * tcp_fuse_output() as conn_ire_cache can become null just 88 * after the check. It will be necessary to recheck for a NULL 89 * conn_ire_cache in tcp_fuse_output() to avoid passing a 90 * stale ill pointer to FW_HOOKS. 91 */ 92 return (B_TRUE); 93 } 94 if (tcp->tcp_ipversion == IPV4_VERSION) { 95 if (tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) 96 return (B_TRUE); 97 if (CONN_OUTBOUND_POLICY_PRESENT(tcp->tcp_connp, ipss)) 98 return (B_TRUE); 99 if (CONN_INBOUND_POLICY_PRESENT(tcp->tcp_connp, ipss)) 100 return (B_TRUE); 101 } else { 102 if (tcp->tcp_ip_hdr_len != IPV6_HDR_LEN) 103 return (B_TRUE); 104 if (CONN_OUTBOUND_POLICY_PRESENT_V6(tcp->tcp_connp, ipss)) 105 return (B_TRUE); 106 if (CONN_INBOUND_POLICY_PRESENT_V6(tcp->tcp_connp, ipss)) 107 return (B_TRUE); 108 } 109 if (!CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp)) 110 return (B_TRUE); 111 return (B_FALSE); 112 } 113 114 115 /* 116 * This routine gets called by the eager tcp upon changing state from 117 * SYN_RCVD to ESTABLISHED. It fuses a direct path between itself 118 * and the active connect tcp such that the regular tcp processings 119 * may be bypassed under allowable circumstances. Because the fusion 120 * requires both endpoints to be in the same squeue, it does not work 121 * for simultaneous active connects because there is no easy way to 122 * switch from one squeue to another once the connection is created. 123 * This is different from the eager tcp case where we assign it the 124 * same squeue as the one given to the active connect tcp during open. 125 */ 126 void 127 tcp_fuse(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph) 128 { 129 conn_t *peer_connp, *connp = tcp->tcp_connp; 130 tcp_t *peer_tcp; 131 tcp_stack_t *tcps = tcp->tcp_tcps; 132 netstack_t *ns; 133 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 134 135 ASSERT(!tcp->tcp_fused); 136 ASSERT(tcp->tcp_loopback); 137 ASSERT(tcp->tcp_loopback_peer == NULL); 138 /* 139 * We need to inherit q_hiwat of the listener tcp, but we can't 140 * really use tcp_listener since we get here after sending up 141 * T_CONN_IND and tcp_wput_accept() may be called independently, 142 * at which point tcp_listener is cleared; this is why we use 143 * tcp_saved_listener. The listener itself is guaranteed to be 144 * around until tcp_accept_finish() is called on this eager -- 145 * this won't happen until we're done since we're inside the 146 * eager's perimeter now. 147 * 148 * We can also get called in the case were a connection needs 149 * to be re-fused. In this case tcp_saved_listener will be 150 * NULL but tcp_refuse will be true. 151 */ 152 ASSERT(tcp->tcp_saved_listener != NULL || tcp->tcp_refuse); 153 /* 154 * Lookup peer endpoint; search for the remote endpoint having 155 * the reversed address-port quadruplet in ESTABLISHED state, 156 * which is guaranteed to be unique in the system. Zone check 157 * is applied accordingly for loopback address, but not for 158 * local address since we want fusion to happen across Zones. 159 */ 160 if (tcp->tcp_ipversion == IPV4_VERSION) { 161 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv4(connp, 162 (ipha_t *)iphdr, tcph, ipst); 163 } else { 164 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv6(connp, 165 (ip6_t *)iphdr, tcph, ipst); 166 } 167 168 /* 169 * We can only proceed if peer exists, resides in the same squeue 170 * as our conn and is not raw-socket. We also restrict fusion to 171 * endpoints of the same type (STREAMS or non-STREAMS). The squeue 172 * assignment of this eager tcp was done earlier at the time of SYN 173 * processing in ip_fanout_tcp{_v6}. Note that similar squeues by 174 * itself doesn't guarantee a safe condition to fuse, hence we perform 175 * additional tests below. 176 */ 177 ASSERT(peer_connp == NULL || peer_connp != connp); 178 if (peer_connp == NULL || peer_connp->conn_sqp != connp->conn_sqp || 179 !IPCL_IS_TCP(peer_connp) || 180 IPCL_IS_NONSTR(connp) != IPCL_IS_NONSTR(peer_connp)) { 181 if (peer_connp != NULL) { 182 TCP_STAT(tcps, tcp_fusion_unqualified); 183 CONN_DEC_REF(peer_connp); 184 } 185 return; 186 } 187 peer_tcp = peer_connp->conn_tcp; /* active connect tcp */ 188 189 ASSERT(peer_tcp != NULL && peer_tcp != tcp && !peer_tcp->tcp_fused); 190 ASSERT(peer_tcp->tcp_loopback_peer == NULL); 191 ASSERT(peer_connp->conn_sqp == connp->conn_sqp); 192 193 /* 194 * Due to IRE changes the peer and us might not agree on tcp_loopback. 195 * We bail in that case. 196 */ 197 if (!peer_tcp->tcp_loopback) { 198 TCP_STAT(tcps, tcp_fusion_unqualified); 199 CONN_DEC_REF(peer_connp); 200 return; 201 } 202 /* 203 * Fuse the endpoints; we perform further checks against both 204 * tcp endpoints to ensure that a fusion is allowed to happen. 205 * In particular we bail out for non-simple TCP/IP or if IPsec/ 206 * IPQoS policy/kernel SSL exists. We also need to check if 207 * the connection is quiescent to cover the case when we are 208 * trying to re-enable fusion after IPobservability is turned off. 209 */ 210 ns = tcps->tcps_netstack; 211 ipst = ns->netstack_ip; 212 213 if (!tcp->tcp_unfusable && !peer_tcp->tcp_unfusable && 214 !tcp_loopback_needs_ip(tcp, ns) && 215 !tcp_loopback_needs_ip(peer_tcp, ns) && 216 tcp->tcp_kssl_ent == NULL && 217 tcp->tcp_xmit_head == NULL && peer_tcp->tcp_xmit_head == NULL && 218 !IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN, ipst)) { 219 mblk_t *mp; 220 queue_t *peer_rq = peer_tcp->tcp_rq; 221 222 ASSERT(!TCP_IS_DETACHED(peer_tcp)); 223 ASSERT(tcp->tcp_fused_sigurg_mp == NULL); 224 ASSERT(peer_tcp->tcp_fused_sigurg_mp == NULL); 225 ASSERT(tcp->tcp_kssl_ctx == NULL); 226 227 /* 228 * We need to drain data on both endpoints during unfuse. 229 * If we need to send up SIGURG at the time of draining, 230 * we want to be sure that an mblk is readily available. 231 * This is why we pre-allocate the M_PCSIG mblks for both 232 * endpoints which will only be used during/after unfuse. 233 */ 234 if (!IPCL_IS_NONSTR(tcp->tcp_connp)) { 235 ASSERT(!IPCL_IS_NONSTR(peer_tcp->tcp_connp)); 236 237 if ((mp = allocb(1, BPRI_HI)) == NULL) 238 goto failed; 239 tcp->tcp_fused_sigurg_mp = mp; 240 241 if ((mp = allocb(1, BPRI_HI)) == NULL) 242 goto failed; 243 peer_tcp->tcp_fused_sigurg_mp = mp; 244 245 if ((mp = allocb(sizeof (struct stroptions), 246 BPRI_HI)) == NULL) 247 goto failed; 248 } 249 250 /* Fuse both endpoints */ 251 peer_tcp->tcp_loopback_peer = tcp; 252 tcp->tcp_loopback_peer = peer_tcp; 253 peer_tcp->tcp_fused = tcp->tcp_fused = B_TRUE; 254 255 /* 256 * We never use regular tcp paths in fusion and should 257 * therefore clear tcp_unsent on both endpoints. Having 258 * them set to non-zero values means asking for trouble 259 * especially after unfuse, where we may end up sending 260 * through regular tcp paths which expect xmit_list and 261 * friends to be correctly setup. 262 */ 263 peer_tcp->tcp_unsent = tcp->tcp_unsent = 0; 264 265 tcp_timers_stop(tcp); 266 tcp_timers_stop(peer_tcp); 267 268 /* 269 * At this point we are a detached eager tcp and therefore 270 * don't have a queue assigned to us until accept happens. 271 * In the mean time the peer endpoint may immediately send 272 * us data as soon as fusion is finished, and we need to be 273 * able to flow control it in case it sends down huge amount 274 * of data while we're still detached. To prevent that we 275 * inherit the listener's recv_hiwater value; this is temporary 276 * since we'll repeat the process in tcp_accept_finish(). 277 */ 278 if (!tcp->tcp_refuse) { 279 (void) tcp_fuse_set_rcv_hiwat(tcp, 280 tcp->tcp_saved_listener->tcp_recv_hiwater); 281 282 /* 283 * Set the stream head's write offset value to zero 284 * since we won't be needing any room for TCP/IP 285 * headers; tell it to not break up the writes (this 286 * would reduce the amount of work done by kmem); and 287 * configure our receive buffer. Note that we can only 288 * do this for the active connect tcp since our eager is 289 * still detached; it will be dealt with later in 290 * tcp_accept_finish(). 291 */ 292 if (!IPCL_IS_NONSTR(peer_tcp->tcp_connp)) { 293 struct stroptions *stropt; 294 295 DB_TYPE(mp) = M_SETOPTS; 296 mp->b_wptr += sizeof (*stropt); 297 298 stropt = (struct stroptions *)mp->b_rptr; 299 stropt->so_flags = SO_MAXBLK|SO_WROFF|SO_HIWAT; 300 stropt->so_maxblk = tcp_maxpsz_set(peer_tcp, 301 B_FALSE); 302 stropt->so_wroff = 0; 303 304 /* 305 * Record the stream head's high water mark for 306 * peer endpoint; this is used for flow-control 307 * purposes in tcp_fuse_output(). 308 */ 309 stropt->so_hiwat = tcp_fuse_set_rcv_hiwat( 310 peer_tcp, peer_rq->q_hiwat); 311 312 tcp->tcp_refuse = B_FALSE; 313 peer_tcp->tcp_refuse = B_FALSE; 314 /* Send the options up */ 315 putnext(peer_rq, mp); 316 } else { 317 struct sock_proto_props sopp; 318 319 /* The peer is a non-STREAMS end point */ 320 ASSERT(IPCL_IS_TCP(peer_connp)); 321 322 (void) tcp_fuse_set_rcv_hiwat(tcp, 323 tcp->tcp_saved_listener->tcp_recv_hiwater); 324 325 sopp.sopp_flags = SOCKOPT_MAXBLK | 326 SOCKOPT_WROFF | SOCKOPT_RCVHIWAT; 327 sopp.sopp_maxblk = tcp_maxpsz_set(peer_tcp, 328 B_FALSE); 329 sopp.sopp_wroff = 0; 330 sopp.sopp_rxhiwat = tcp_fuse_set_rcv_hiwat( 331 peer_tcp, peer_tcp->tcp_recv_hiwater); 332 (*peer_connp->conn_upcalls->su_set_proto_props) 333 (peer_connp->conn_upper_handle, &sopp); 334 } 335 } 336 tcp->tcp_refuse = B_FALSE; 337 peer_tcp->tcp_refuse = B_FALSE; 338 } else { 339 TCP_STAT(tcps, tcp_fusion_unqualified); 340 } 341 CONN_DEC_REF(peer_connp); 342 return; 343 344 failed: 345 if (tcp->tcp_fused_sigurg_mp != NULL) { 346 freeb(tcp->tcp_fused_sigurg_mp); 347 tcp->tcp_fused_sigurg_mp = NULL; 348 } 349 if (peer_tcp->tcp_fused_sigurg_mp != NULL) { 350 freeb(peer_tcp->tcp_fused_sigurg_mp); 351 peer_tcp->tcp_fused_sigurg_mp = NULL; 352 } 353 CONN_DEC_REF(peer_connp); 354 } 355 356 /* 357 * Unfuse a previously-fused pair of tcp loopback endpoints. 358 */ 359 void 360 tcp_unfuse(tcp_t *tcp) 361 { 362 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 363 tcp_stack_t *tcps = tcp->tcp_tcps; 364 365 ASSERT(tcp->tcp_fused && peer_tcp != NULL); 366 ASSERT(peer_tcp->tcp_fused && peer_tcp->tcp_loopback_peer == tcp); 367 ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp); 368 ASSERT(tcp->tcp_unsent == 0 && peer_tcp->tcp_unsent == 0); 369 370 /* 371 * Cancel any pending push timers. 372 */ 373 if (tcp->tcp_push_tid != 0) { 374 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 375 tcp->tcp_push_tid = 0; 376 } 377 if (peer_tcp->tcp_push_tid != 0) { 378 (void) TCP_TIMER_CANCEL(peer_tcp, peer_tcp->tcp_push_tid); 379 peer_tcp->tcp_push_tid = 0; 380 } 381 382 /* 383 * Drain any pending data; Note that in case of a detached tcp, the 384 * draining will happen later after the tcp is unfused. For non- 385 * urgent data, this can be handled by the regular tcp_rcv_drain(). 386 * If we have urgent data sitting in the receive list, we will 387 * need to send up a SIGURG signal first before draining the data. 388 * All of these will be handled by the code in tcp_fuse_rcv_drain() 389 * when called from tcp_rcv_drain(). 390 */ 391 if (!TCP_IS_DETACHED(tcp)) { 392 (void) tcp_fuse_rcv_drain(tcp->tcp_rq, tcp, 393 &tcp->tcp_fused_sigurg_mp); 394 } 395 if (!TCP_IS_DETACHED(peer_tcp)) { 396 (void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp, 397 &peer_tcp->tcp_fused_sigurg_mp); 398 } 399 400 /* Lift up any flow-control conditions */ 401 mutex_enter(&tcp->tcp_non_sq_lock); 402 if (tcp->tcp_flow_stopped) { 403 tcp_clrqfull(tcp); 404 TCP_STAT(tcps, tcp_fusion_backenabled); 405 } 406 mutex_exit(&tcp->tcp_non_sq_lock); 407 408 mutex_enter(&peer_tcp->tcp_non_sq_lock); 409 if (peer_tcp->tcp_flow_stopped) { 410 tcp_clrqfull(peer_tcp); 411 TCP_STAT(tcps, tcp_fusion_backenabled); 412 } 413 mutex_exit(&peer_tcp->tcp_non_sq_lock); 414 415 /* 416 * Update th_seq and th_ack in the header template 417 */ 418 U32_TO_ABE32(tcp->tcp_snxt, tcp->tcp_tcph->th_seq); 419 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 420 U32_TO_ABE32(peer_tcp->tcp_snxt, peer_tcp->tcp_tcph->th_seq); 421 U32_TO_ABE32(peer_tcp->tcp_rnxt, peer_tcp->tcp_tcph->th_ack); 422 423 /* Unfuse the endpoints */ 424 peer_tcp->tcp_fused = tcp->tcp_fused = B_FALSE; 425 peer_tcp->tcp_loopback_peer = tcp->tcp_loopback_peer = NULL; 426 if (!IPCL_IS_NONSTR(peer_tcp->tcp_connp)) { 427 ASSERT(peer_tcp->tcp_fused_sigurg_mp != NULL); 428 freeb(peer_tcp->tcp_fused_sigurg_mp); 429 peer_tcp->tcp_fused_sigurg_mp = NULL; 430 431 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp)); 432 ASSERT(tcp->tcp_fused_sigurg_mp != NULL); 433 freeb(tcp->tcp_fused_sigurg_mp); 434 tcp->tcp_fused_sigurg_mp = NULL; 435 } 436 } 437 438 /* 439 * Fusion output routine used to handle urgent data sent by STREAMS based 440 * endpoints. This routine is called by tcp_fuse_output() for handling 441 * non-M_DATA mblks. 442 */ 443 void 444 tcp_fuse_output_urg(tcp_t *tcp, mblk_t *mp) 445 { 446 mblk_t *mp1; 447 struct T_exdata_ind *tei; 448 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 449 mblk_t *head, *prev_head = NULL; 450 tcp_stack_t *tcps = tcp->tcp_tcps; 451 452 ASSERT(tcp->tcp_fused); 453 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp); 454 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp)); 455 ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO); 456 ASSERT(mp->b_cont != NULL && DB_TYPE(mp->b_cont) == M_DATA); 457 ASSERT(MBLKL(mp) >= sizeof (*tei) && MBLKL(mp->b_cont) > 0); 458 459 /* 460 * Urgent data arrives in the form of T_EXDATA_REQ from above. 461 * Each occurence denotes a new urgent pointer. For each new 462 * urgent pointer we signal (SIGURG) the receiving app to indicate 463 * that it needs to go into urgent mode. This is similar to the 464 * urgent data handling in the regular tcp. We don't need to keep 465 * track of where the urgent pointer is, because each T_EXDATA_REQ 466 * "advances" the urgent pointer for us. 467 * 468 * The actual urgent data carried by T_EXDATA_REQ is then prepended 469 * by a T_EXDATA_IND before being enqueued behind any existing data 470 * destined for the receiving app. There is only a single urgent 471 * pointer (out-of-band mark) for a given tcp. If the new urgent 472 * data arrives before the receiving app reads some existing urgent 473 * data, the previous marker is lost. This behavior is emulated 474 * accordingly below, by removing any existing T_EXDATA_IND messages 475 * and essentially converting old urgent data into non-urgent. 476 */ 477 ASSERT(tcp->tcp_valid_bits & TCP_URG_VALID); 478 /* Let sender get out of urgent mode */ 479 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 480 481 /* 482 * This flag indicates that a signal needs to be sent up. 483 * This flag will only get cleared once SIGURG is delivered and 484 * is not affected by the tcp_fused flag -- delivery will still 485 * happen even after an endpoint is unfused, to handle the case 486 * where the sending endpoint immediately closes/unfuses after 487 * sending urgent data and the accept is not yet finished. 488 */ 489 peer_tcp->tcp_fused_sigurg = B_TRUE; 490 491 /* Reuse T_EXDATA_REQ mblk for T_EXDATA_IND */ 492 DB_TYPE(mp) = M_PROTO; 493 tei = (struct T_exdata_ind *)mp->b_rptr; 494 tei->PRIM_type = T_EXDATA_IND; 495 tei->MORE_flag = 0; 496 mp->b_wptr = (uchar_t *)&tei[1]; 497 498 TCP_STAT(tcps, tcp_fusion_urg); 499 BUMP_MIB(&tcps->tcps_mib, tcpOutUrg); 500 501 head = peer_tcp->tcp_rcv_list; 502 while (head != NULL) { 503 /* 504 * Remove existing T_EXDATA_IND, keep the data which follows 505 * it and relink our list. Note that we don't modify the 506 * tcp_rcv_last_tail since it never points to T_EXDATA_IND. 507 */ 508 if (DB_TYPE(head) != M_DATA) { 509 mp1 = head; 510 511 ASSERT(DB_TYPE(mp1->b_cont) == M_DATA); 512 head = mp1->b_cont; 513 mp1->b_cont = NULL; 514 head->b_next = mp1->b_next; 515 mp1->b_next = NULL; 516 if (prev_head != NULL) 517 prev_head->b_next = head; 518 if (peer_tcp->tcp_rcv_list == mp1) 519 peer_tcp->tcp_rcv_list = head; 520 if (peer_tcp->tcp_rcv_last_head == mp1) 521 peer_tcp->tcp_rcv_last_head = head; 522 freeb(mp1); 523 } 524 prev_head = head; 525 head = head->b_next; 526 } 527 } 528 529 /* 530 * Fusion output routine, called by tcp_output() and tcp_wput_proto(). 531 * If we are modifying any member that can be changed outside the squeue, 532 * like tcp_flow_stopped, we need to take tcp_non_sq_lock. 533 */ 534 boolean_t 535 tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size) 536 { 537 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 538 boolean_t flow_stopped, peer_data_queued = B_FALSE; 539 boolean_t urgent = (DB_TYPE(mp) != M_DATA); 540 boolean_t push = B_TRUE; 541 mblk_t *mp1 = mp; 542 ill_t *ilp, *olp; 543 ipif_t *iifp, *oifp; 544 ipha_t *ipha; 545 ip6_t *ip6h; 546 tcph_t *tcph; 547 uint_t ip_hdr_len; 548 uint32_t seq; 549 uint32_t recv_size = send_size; 550 tcp_stack_t *tcps = tcp->tcp_tcps; 551 netstack_t *ns = tcps->tcps_netstack; 552 ip_stack_t *ipst = ns->netstack_ip; 553 554 ASSERT(tcp->tcp_fused); 555 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp); 556 ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp); 557 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_PROTO || 558 DB_TYPE(mp) == M_PCPROTO); 559 560 /* If this connection requires IP, unfuse and use regular path */ 561 if (tcp_loopback_needs_ip(tcp, ns) || 562 tcp_loopback_needs_ip(peer_tcp, ns) || 563 IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN, ipst) || 564 list_head(&ipst->ips_ipobs_cb_list) != NULL) { 565 TCP_STAT(tcps, tcp_fusion_aborted); 566 tcp->tcp_refuse = B_TRUE; 567 peer_tcp->tcp_refuse = B_TRUE; 568 569 bcopy(peer_tcp->tcp_tcph, &tcp->tcp_saved_tcph, 570 sizeof (tcph_t)); 571 bcopy(tcp->tcp_tcph, &peer_tcp->tcp_saved_tcph, 572 sizeof (tcph_t)); 573 if (tcp->tcp_ipversion == IPV4_VERSION) { 574 bcopy(peer_tcp->tcp_ipha, &tcp->tcp_saved_ipha, 575 sizeof (ipha_t)); 576 bcopy(tcp->tcp_ipha, &peer_tcp->tcp_saved_ipha, 577 sizeof (ipha_t)); 578 } else { 579 bcopy(peer_tcp->tcp_ip6h, &tcp->tcp_saved_ip6h, 580 sizeof (ip6_t)); 581 bcopy(tcp->tcp_ip6h, &peer_tcp->tcp_saved_ip6h, 582 sizeof (ip6_t)); 583 } 584 goto unfuse; 585 } 586 587 if (send_size == 0) { 588 freemsg(mp); 589 return (B_TRUE); 590 } 591 592 /* 593 * Handle urgent data; we either send up SIGURG to the peer now 594 * or do it later when we drain, in case the peer is detached 595 * or if we're short of memory for M_PCSIG mblk. 596 */ 597 if (urgent) { 598 tcp_fuse_output_urg(tcp, mp); 599 600 mp1 = mp->b_cont; 601 } 602 603 if (tcp->tcp_ipversion == IPV4_VERSION && 604 (HOOKS4_INTERESTED_LOOPBACK_IN(ipst) || 605 HOOKS4_INTERESTED_LOOPBACK_OUT(ipst)) || 606 tcp->tcp_ipversion == IPV6_VERSION && 607 (HOOKS6_INTERESTED_LOOPBACK_IN(ipst) || 608 HOOKS6_INTERESTED_LOOPBACK_OUT(ipst))) { 609 /* 610 * Build ip and tcp header to satisfy FW_HOOKS. 611 * We only build it when any hook is present. 612 */ 613 if ((mp1 = tcp_xmit_mp(tcp, mp1, tcp->tcp_mss, NULL, NULL, 614 tcp->tcp_snxt, B_TRUE, NULL, B_FALSE)) == NULL) 615 /* If tcp_xmit_mp fails, use regular path */ 616 goto unfuse; 617 618 /* 619 * The ipif and ill can be safely referenced under the 620 * protection of conn_lock - see head of function comment for 621 * conn_get_held_ipif(). It is necessary to check that both 622 * the ipif and ill can be looked up (i.e. not condemned). If 623 * not, bail out and unfuse this connection. 624 */ 625 mutex_enter(&peer_tcp->tcp_connp->conn_lock); 626 if ((peer_tcp->tcp_connp->conn_ire_cache == NULL) || 627 (peer_tcp->tcp_connp->conn_ire_cache->ire_marks & 628 IRE_MARK_CONDEMNED) || 629 ((oifp = peer_tcp->tcp_connp->conn_ire_cache->ire_ipif) 630 == NULL) || 631 (!IPIF_CAN_LOOKUP(oifp)) || 632 ((olp = oifp->ipif_ill) == NULL) || 633 (ill_check_and_refhold(olp) != 0)) { 634 mutex_exit(&peer_tcp->tcp_connp->conn_lock); 635 goto unfuse; 636 } 637 mutex_exit(&peer_tcp->tcp_connp->conn_lock); 638 639 /* PFHooks: LOOPBACK_OUT */ 640 if (tcp->tcp_ipversion == IPV4_VERSION) { 641 ipha = (ipha_t *)mp1->b_rptr; 642 643 DTRACE_PROBE4(ip4__loopback__out__start, 644 ill_t *, NULL, ill_t *, olp, 645 ipha_t *, ipha, mblk_t *, mp1); 646 FW_HOOKS(ipst->ips_ip4_loopback_out_event, 647 ipst->ips_ipv4firewall_loopback_out, 648 NULL, olp, ipha, mp1, mp1, 0, ipst); 649 DTRACE_PROBE1(ip4__loopback__out__end, mblk_t *, mp1); 650 } else { 651 ip6h = (ip6_t *)mp1->b_rptr; 652 653 DTRACE_PROBE4(ip6__loopback__out__start, 654 ill_t *, NULL, ill_t *, olp, 655 ip6_t *, ip6h, mblk_t *, mp1); 656 FW_HOOKS6(ipst->ips_ip6_loopback_out_event, 657 ipst->ips_ipv6firewall_loopback_out, 658 NULL, olp, ip6h, mp1, mp1, 0, ipst); 659 DTRACE_PROBE1(ip6__loopback__out__end, mblk_t *, mp1); 660 } 661 ill_refrele(olp); 662 663 if (mp1 == NULL) 664 goto unfuse; 665 666 /* 667 * The ipif and ill can be safely referenced under the 668 * protection of conn_lock - see head of function comment for 669 * conn_get_held_ipif(). It is necessary to check that both 670 * the ipif and ill can be looked up (i.e. not condemned). If 671 * not, bail out and unfuse this connection. 672 */ 673 mutex_enter(&tcp->tcp_connp->conn_lock); 674 if ((tcp->tcp_connp->conn_ire_cache == NULL) || 675 (tcp->tcp_connp->conn_ire_cache->ire_marks & 676 IRE_MARK_CONDEMNED) || 677 ((iifp = tcp->tcp_connp->conn_ire_cache->ire_ipif) 678 == NULL) || 679 (!IPIF_CAN_LOOKUP(iifp)) || 680 ((ilp = iifp->ipif_ill) == NULL) || 681 (ill_check_and_refhold(ilp) != 0)) { 682 mutex_exit(&tcp->tcp_connp->conn_lock); 683 goto unfuse; 684 } 685 mutex_exit(&tcp->tcp_connp->conn_lock); 686 687 /* PFHooks: LOOPBACK_IN */ 688 if (tcp->tcp_ipversion == IPV4_VERSION) { 689 DTRACE_PROBE4(ip4__loopback__in__start, 690 ill_t *, ilp, ill_t *, NULL, 691 ipha_t *, ipha, mblk_t *, mp1); 692 FW_HOOKS(ipst->ips_ip4_loopback_in_event, 693 ipst->ips_ipv4firewall_loopback_in, 694 ilp, NULL, ipha, mp1, mp1, 0, ipst); 695 DTRACE_PROBE1(ip4__loopback__in__end, mblk_t *, mp1); 696 ill_refrele(ilp); 697 if (mp1 == NULL) 698 goto unfuse; 699 700 ip_hdr_len = IPH_HDR_LENGTH(ipha); 701 } else { 702 DTRACE_PROBE4(ip6__loopback__in__start, 703 ill_t *, ilp, ill_t *, NULL, 704 ip6_t *, ip6h, mblk_t *, mp1); 705 FW_HOOKS6(ipst->ips_ip6_loopback_in_event, 706 ipst->ips_ipv6firewall_loopback_in, 707 ilp, NULL, ip6h, mp1, mp1, 0, ipst); 708 DTRACE_PROBE1(ip6__loopback__in__end, mblk_t *, mp1); 709 ill_refrele(ilp); 710 if (mp1 == NULL) 711 goto unfuse; 712 713 ip_hdr_len = ip_hdr_length_v6(mp1, ip6h); 714 } 715 716 /* Data length might be changed by FW_HOOKS */ 717 tcph = (tcph_t *)&mp1->b_rptr[ip_hdr_len]; 718 seq = ABE32_TO_U32(tcph->th_seq); 719 recv_size += seq - tcp->tcp_snxt; 720 721 /* 722 * The message duplicated by tcp_xmit_mp is freed. 723 * Note: the original message passed in remains unchanged. 724 */ 725 freemsg(mp1); 726 } 727 728 /* 729 * Enqueue data into the peer's receive list; we may or may not 730 * drain the contents depending on the conditions below. 731 * 732 * For non-STREAMS sockets we normally queue data directly in the 733 * socket by calling the su_recv upcall. However, if the peer is 734 * detached we use tcp_rcv_enqueue() instead. Queued data will be 735 * drained when the accept completes (in tcp_accept_finish()). 736 */ 737 if (IPCL_IS_NONSTR(peer_tcp->tcp_connp) && 738 !TCP_IS_DETACHED(peer_tcp)) { 739 int error; 740 int flags = 0; 741 742 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && 743 (tcp->tcp_urg == tcp->tcp_snxt)) { 744 flags = MSG_OOB; 745 (*peer_tcp->tcp_connp->conn_upcalls->su_signal_oob) 746 (peer_tcp->tcp_connp->conn_upper_handle, 0); 747 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 748 } 749 if ((*peer_tcp->tcp_connp->conn_upcalls->su_recv)( 750 peer_tcp->tcp_connp->conn_upper_handle, mp, recv_size, 751 flags, &error, &push) < 0) { 752 ASSERT(error != EOPNOTSUPP); 753 peer_data_queued = B_TRUE; 754 } 755 } else { 756 if (IPCL_IS_NONSTR(peer_tcp->tcp_connp) && 757 (tcp->tcp_valid_bits & TCP_URG_VALID) && 758 (tcp->tcp_urg == tcp->tcp_snxt)) { 759 /* 760 * Can not deal with urgent pointers 761 * that arrive before the connection has been 762 * accept()ed. 763 */ 764 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 765 freemsg(mp); 766 return (B_TRUE); 767 } 768 769 tcp_rcv_enqueue(peer_tcp, mp, recv_size); 770 771 /* In case it wrapped around and also to keep it constant */ 772 peer_tcp->tcp_rwnd += recv_size; 773 } 774 775 /* 776 * Exercise flow-control when needed; we will get back-enabled 777 * in either tcp_accept_finish(), tcp_unfuse(), or when data is 778 * consumed. If peer endpoint is detached, we emulate streams flow 779 * control by checking the peer's queue size and high water mark; 780 * otherwise we simply use canputnext() to decide if we need to stop 781 * our flow. 782 * 783 * Since we are accessing our tcp_flow_stopped and might modify it, 784 * we need to take tcp->tcp_non_sq_lock. 785 */ 786 mutex_enter(&tcp->tcp_non_sq_lock); 787 flow_stopped = tcp->tcp_flow_stopped; 788 if ((TCP_IS_DETACHED(peer_tcp) && 789 (peer_tcp->tcp_rcv_cnt >= peer_tcp->tcp_fuse_rcv_hiwater)) || 790 (!TCP_IS_DETACHED(peer_tcp) && 791 !IPCL_IS_NONSTR(peer_tcp->tcp_connp) && 792 !canputnext(peer_tcp->tcp_rq))) { 793 peer_data_queued = B_TRUE; 794 } 795 796 if (!flow_stopped && (peer_data_queued || 797 (TCP_UNSENT_BYTES(tcp) >= tcp->tcp_xmit_hiwater))) { 798 tcp_setqfull(tcp); 799 flow_stopped = B_TRUE; 800 TCP_STAT(tcps, tcp_fusion_flowctl); 801 DTRACE_PROBE3(tcp__fuse__output__flowctl, tcp_t *, tcp, 802 uint_t, send_size, uint_t, peer_tcp->tcp_rcv_cnt); 803 } else if (flow_stopped && !peer_data_queued && 804 (TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater)) { 805 tcp_clrqfull(tcp); 806 TCP_STAT(tcps, tcp_fusion_backenabled); 807 flow_stopped = B_FALSE; 808 } 809 mutex_exit(&tcp->tcp_non_sq_lock); 810 811 ipst->ips_loopback_packets++; 812 tcp->tcp_last_sent_len = send_size; 813 814 /* Need to adjust the following SNMP MIB-related variables */ 815 tcp->tcp_snxt += send_size; 816 tcp->tcp_suna = tcp->tcp_snxt; 817 peer_tcp->tcp_rnxt += recv_size; 818 peer_tcp->tcp_rack = peer_tcp->tcp_rnxt; 819 820 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 821 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, send_size); 822 823 BUMP_MIB(&tcps->tcps_mib, tcpInSegs); 824 BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs); 825 UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, send_size); 826 827 BUMP_LOCAL(tcp->tcp_obsegs); 828 BUMP_LOCAL(peer_tcp->tcp_ibsegs); 829 830 DTRACE_PROBE2(tcp__fuse__output, tcp_t *, tcp, uint_t, send_size); 831 832 if (!IPCL_IS_NONSTR(peer_tcp->tcp_connp) && 833 !TCP_IS_DETACHED(peer_tcp)) { 834 /* 835 * Drain the peer's receive queue it has urgent data or if 836 * we're not flow-controlled. 837 */ 838 if (urgent || !flow_stopped) { 839 ASSERT(peer_tcp->tcp_rcv_list != NULL); 840 /* 841 * For TLI-based streams, a thread in tcp_accept_swap() 842 * can race with us. That thread will ensure that the 843 * correct peer_tcp->tcp_rq is globally visible before 844 * peer_tcp->tcp_detached is visible as clear, but we 845 * must also ensure that the load of tcp_rq cannot be 846 * reordered to be before the tcp_detached check. 847 */ 848 membar_consumer(); 849 (void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp, 850 NULL); 851 } 852 } 853 return (B_TRUE); 854 unfuse: 855 tcp_unfuse(tcp); 856 return (B_FALSE); 857 } 858 859 /* 860 * This routine gets called to deliver data upstream on a fused or 861 * previously fused tcp loopback endpoint; the latter happens only 862 * when there is a pending SIGURG signal plus urgent data that can't 863 * be sent upstream in the past. 864 */ 865 boolean_t 866 tcp_fuse_rcv_drain(queue_t *q, tcp_t *tcp, mblk_t **sigurg_mpp) 867 { 868 mblk_t *mp; 869 conn_t *connp = tcp->tcp_connp; 870 871 #ifdef DEBUG 872 uint_t cnt = 0; 873 #endif 874 tcp_stack_t *tcps = tcp->tcp_tcps; 875 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 876 877 ASSERT(tcp->tcp_loopback); 878 ASSERT(tcp->tcp_fused || tcp->tcp_fused_sigurg); 879 ASSERT(!tcp->tcp_fused || tcp->tcp_loopback_peer != NULL); 880 ASSERT(IPCL_IS_NONSTR(connp) || sigurg_mpp != NULL || tcp->tcp_fused); 881 882 /* No need for the push timer now, in case it was scheduled */ 883 if (tcp->tcp_push_tid != 0) { 884 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 885 tcp->tcp_push_tid = 0; 886 } 887 /* 888 * If there's urgent data sitting in receive list and we didn't 889 * get a chance to send up a SIGURG signal, make sure we send 890 * it first before draining in order to ensure that SIOCATMARK 891 * works properly. 892 */ 893 if (tcp->tcp_fused_sigurg) { 894 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp)); 895 896 tcp->tcp_fused_sigurg = B_FALSE; 897 /* 898 * sigurg_mpp is normally NULL, i.e. when we're still 899 * fused and didn't get here because of tcp_unfuse(). 900 * In this case try hard to allocate the M_PCSIG mblk. 901 */ 902 if (sigurg_mpp == NULL && 903 (mp = allocb(1, BPRI_HI)) == NULL && 904 (mp = allocb_tryhard(1)) == NULL) { 905 /* Alloc failed; try again next time */ 906 tcp->tcp_push_tid = TCP_TIMER(tcp, 907 tcp_push_timer, 908 MSEC_TO_TICK( 909 tcps->tcps_push_timer_interval)); 910 return (B_TRUE); 911 } else if (sigurg_mpp != NULL) { 912 /* 913 * Use the supplied M_PCSIG mblk; it means we're 914 * either unfused or in the process of unfusing, 915 * and the drain must happen now. 916 */ 917 mp = *sigurg_mpp; 918 *sigurg_mpp = NULL; 919 } 920 ASSERT(mp != NULL); 921 922 /* Send up the signal */ 923 DB_TYPE(mp) = M_PCSIG; 924 *mp->b_wptr++ = (uchar_t)SIGURG; 925 putnext(q, mp); 926 927 /* 928 * Let the regular tcp_rcv_drain() path handle 929 * draining the data if we're no longer fused. 930 */ 931 if (!tcp->tcp_fused) 932 return (B_FALSE); 933 } 934 935 /* Drain the data */ 936 while ((mp = tcp->tcp_rcv_list) != NULL) { 937 tcp->tcp_rcv_list = mp->b_next; 938 mp->b_next = NULL; 939 #ifdef DEBUG 940 cnt += msgdsize(mp); 941 #endif 942 ASSERT(!IPCL_IS_NONSTR(connp)); 943 putnext(q, mp); 944 TCP_STAT(tcps, tcp_fusion_putnext); 945 } 946 947 #ifdef DEBUG 948 ASSERT(cnt == tcp->tcp_rcv_cnt); 949 #endif 950 tcp->tcp_rcv_last_head = NULL; 951 tcp->tcp_rcv_last_tail = NULL; 952 tcp->tcp_rcv_cnt = 0; 953 tcp->tcp_rwnd = tcp->tcp_recv_hiwater; 954 955 mutex_enter(&peer_tcp->tcp_non_sq_lock); 956 if (peer_tcp->tcp_flow_stopped && (TCP_UNSENT_BYTES(peer_tcp) <= 957 peer_tcp->tcp_xmit_lowater)) { 958 tcp_clrqfull(peer_tcp); 959 TCP_STAT(tcps, tcp_fusion_backenabled); 960 } 961 mutex_exit(&peer_tcp->tcp_non_sq_lock); 962 963 return (B_TRUE); 964 } 965 966 /* 967 * Calculate the size of receive buffer for a fused tcp endpoint. 968 */ 969 size_t 970 tcp_fuse_set_rcv_hiwat(tcp_t *tcp, size_t rwnd) 971 { 972 tcp_stack_t *tcps = tcp->tcp_tcps; 973 974 ASSERT(tcp->tcp_fused); 975 976 /* Ensure that value is within the maximum upper bound */ 977 if (rwnd > tcps->tcps_max_buf) 978 rwnd = tcps->tcps_max_buf; 979 980 /* Obey the absolute minimum tcp receive high water mark */ 981 if (rwnd < tcps->tcps_sth_rcv_hiwat) 982 rwnd = tcps->tcps_sth_rcv_hiwat; 983 984 /* 985 * Round up to system page size in case SO_RCVBUF is modified 986 * after SO_SNDBUF; the latter is also similarly rounded up. 987 */ 988 rwnd = P2ROUNDUP_TYPED(rwnd, PAGESIZE, size_t); 989 tcp->tcp_fuse_rcv_hiwater = rwnd; 990 return (rwnd); 991 } 992 993 /* 994 * Calculate the maximum outstanding unread data block for a fused tcp endpoint. 995 */ 996 int 997 tcp_fuse_maxpsz_set(tcp_t *tcp) 998 { 999 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 1000 uint_t sndbuf = tcp->tcp_xmit_hiwater; 1001 uint_t maxpsz = sndbuf; 1002 1003 ASSERT(tcp->tcp_fused); 1004 ASSERT(peer_tcp != NULL); 1005 ASSERT(peer_tcp->tcp_fuse_rcv_hiwater != 0); 1006 /* 1007 * In the fused loopback case, we want the stream head to split 1008 * up larger writes into smaller chunks for a more accurate flow- 1009 * control accounting. Our maxpsz is half of the sender's send 1010 * buffer or the receiver's receive buffer, whichever is smaller. 1011 * We round up the buffer to system page size due to the lack of 1012 * TCP MSS concept in Fusion. 1013 */ 1014 if (maxpsz > peer_tcp->tcp_fuse_rcv_hiwater) 1015 maxpsz = peer_tcp->tcp_fuse_rcv_hiwater; 1016 maxpsz = P2ROUNDUP_TYPED(maxpsz, PAGESIZE, uint_t) >> 1; 1017 1018 return (maxpsz); 1019 } 1020 1021 /* 1022 * Called to release flow control. 1023 */ 1024 void 1025 tcp_fuse_backenable(tcp_t *tcp) 1026 { 1027 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 1028 1029 ASSERT(tcp->tcp_fused); 1030 ASSERT(peer_tcp != NULL && peer_tcp->tcp_fused); 1031 ASSERT(peer_tcp->tcp_loopback_peer == tcp); 1032 ASSERT(!TCP_IS_DETACHED(tcp)); 1033 ASSERT(tcp->tcp_connp->conn_sqp == 1034 peer_tcp->tcp_connp->conn_sqp); 1035 1036 if (tcp->tcp_rcv_list != NULL) 1037 (void) tcp_fuse_rcv_drain(tcp->tcp_rq, tcp, NULL); 1038 1039 mutex_enter(&peer_tcp->tcp_non_sq_lock); 1040 if (peer_tcp->tcp_flow_stopped && 1041 (TCP_UNSENT_BYTES(peer_tcp) <= 1042 peer_tcp->tcp_xmit_lowater)) { 1043 tcp_clrqfull(peer_tcp); 1044 } 1045 mutex_exit(&peer_tcp->tcp_non_sq_lock); 1046 1047 TCP_STAT(tcp->tcp_tcps, tcp_fusion_backenabled); 1048 } 1049