1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/stream.h> 28 #include <sys/strsun.h> 29 #include <sys/strsubr.h> 30 #include <sys/debug.h> 31 #include <sys/sdt.h> 32 #include <sys/cmn_err.h> 33 #include <sys/tihdr.h> 34 35 #include <inet/common.h> 36 #include <inet/optcom.h> 37 #include <inet/ip.h> 38 #include <inet/ip_impl.h> 39 #include <inet/tcp.h> 40 #include <inet/tcp_impl.h> 41 #include <inet/ipsec_impl.h> 42 #include <inet/ipclassifier.h> 43 #include <inet/ipp_common.h> 44 #include <inet/ip_if.h> 45 46 /* 47 * This file implements TCP fusion - a protocol-less data path for TCP 48 * loopback connections. The fusion of two local TCP endpoints occurs 49 * at connection establishment time. Various conditions (see details 50 * in tcp_fuse()) need to be met for fusion to be successful. If it 51 * fails, we fall back to the regular TCP data path; if it succeeds, 52 * both endpoints proceed to use tcp_fuse_output() as the transmit path. 53 * tcp_fuse_output() enqueues application data directly onto the peer's 54 * receive queue; no protocol processing is involved. After enqueueing 55 * the data, the sender can either push (putnext) data up the receiver's 56 * read queue; or the sender can simply return and let the receiver 57 * retrieve the enqueued data via the synchronous streams entry point 58 * tcp_fuse_rrw(). The latter path is taken if synchronous streams is 59 * enabled (the default). It is disabled if sockfs no longer resides 60 * directly on top of tcp module due to a module insertion or removal. 61 * It also needs to be temporarily disabled when sending urgent data 62 * because the tcp_fuse_rrw() path bypasses the M_PROTO processing done 63 * by strsock_proto() hook. 64 * 65 * Sychronization is handled by squeue and the mutex tcp_non_sq_lock. 66 * One of the requirements for fusion to succeed is that both endpoints 67 * need to be using the same squeue. This ensures that neither side 68 * can disappear while the other side is still sending data. By itself, 69 * squeue is not sufficient for guaranteeing safety when synchronous 70 * streams is enabled. The reason is that tcp_fuse_rrw() doesn't enter 71 * the squeue and its access to tcp_rcv_list and other fusion-related 72 * fields needs to be sychronized with the sender. tcp_non_sq_lock is 73 * used for this purpose. When there is urgent data, the sender needs 74 * to push the data up the receiver's streams read queue. In order to 75 * avoid holding the tcp_non_sq_lock across putnext(), the sender sets 76 * the peer tcp's tcp_fuse_syncstr_plugged bit and releases tcp_non_sq_lock 77 * (see macro TCP_FUSE_SYNCSTR_PLUG_DRAIN()). If tcp_fuse_rrw() enters 78 * after this point, it will see that synchronous streams is plugged and 79 * will wait on tcp_fuse_plugcv. After the sender has finished pushing up 80 * all urgent data, it will clear the tcp_fuse_syncstr_plugged bit using 81 * TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(). This will cause any threads waiting 82 * on tcp_fuse_plugcv to return EBUSY, and in turn cause strget() to call 83 * getq_noenab() to dequeue data from the stream head instead. Once the 84 * data on the stream head has been consumed, tcp_fuse_rrw() may again 85 * be used to process tcp_rcv_list. However, if TCP_FUSE_SYNCSTR_STOP() 86 * has been called, all future calls to tcp_fuse_rrw() will return EBUSY, 87 * effectively disabling synchronous streams. 88 * 89 * The following note applies only to the synchronous streams mode. 90 * 91 * Flow control is done by checking the size of receive buffer and 92 * the number of data blocks, both set to different limits. This is 93 * different than regular streams flow control where cumulative size 94 * check dominates block count check -- streams queue high water mark 95 * typically represents bytes. Each enqueue triggers notifications 96 * to the receiving process; a build up of data blocks indicates a 97 * slow receiver and the sender should be blocked or informed at the 98 * earliest moment instead of further wasting system resources. In 99 * effect, this is equivalent to limiting the number of outstanding 100 * segments in flight. 101 */ 102 103 /* 104 * Setting this to false means we disable fusion altogether and 105 * loopback connections would go through the protocol paths. 106 */ 107 boolean_t do_tcp_fusion = B_TRUE; 108 109 /* 110 * Enabling this flag allows sockfs to retrieve data directly 111 * from a fused tcp endpoint using synchronous streams interface. 112 */ 113 boolean_t do_tcp_direct_sockfs = B_TRUE; 114 115 /* 116 * This is the minimum amount of outstanding writes allowed on 117 * a synchronous streams-enabled receiving endpoint before the 118 * sender gets flow-controlled. Setting this value to 0 means 119 * that the data block limit is equivalent to the byte count 120 * limit, which essentially disables the check. 121 */ 122 #define TCP_FUSION_RCV_UNREAD_MIN 8 123 uint_t tcp_fusion_rcv_unread_min = TCP_FUSION_RCV_UNREAD_MIN; 124 125 static void tcp_fuse_syncstr_enable(tcp_t *); 126 static void tcp_fuse_syncstr_disable(tcp_t *); 127 static boolean_t strrput_sig(queue_t *, boolean_t); 128 129 /* 130 * Return true if this connection needs some IP functionality 131 */ 132 static boolean_t 133 tcp_loopback_needs_ip(tcp_t *tcp, netstack_t *ns) 134 { 135 ipsec_stack_t *ipss = ns->netstack_ipsec; 136 137 /* 138 * If ire is not cached, do not use fusion 139 */ 140 if (tcp->tcp_connp->conn_ire_cache == NULL) { 141 /* 142 * There is no need to hold conn_lock here because when called 143 * from tcp_fuse() there can be no window where conn_ire_cache 144 * can change. This is not true when called from 145 * tcp_fuse_output() as conn_ire_cache can become null just 146 * after the check. It will be necessary to recheck for a NULL 147 * conn_ire_cache in tcp_fuse_output() to avoid passing a 148 * stale ill pointer to FW_HOOKS. 149 */ 150 return (B_TRUE); 151 } 152 if (tcp->tcp_ipversion == IPV4_VERSION) { 153 if (tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) 154 return (B_TRUE); 155 if (CONN_OUTBOUND_POLICY_PRESENT(tcp->tcp_connp, ipss)) 156 return (B_TRUE); 157 if (CONN_INBOUND_POLICY_PRESENT(tcp->tcp_connp, ipss)) 158 return (B_TRUE); 159 } else { 160 if (tcp->tcp_ip_hdr_len != IPV6_HDR_LEN) 161 return (B_TRUE); 162 if (CONN_OUTBOUND_POLICY_PRESENT_V6(tcp->tcp_connp, ipss)) 163 return (B_TRUE); 164 if (CONN_INBOUND_POLICY_PRESENT_V6(tcp->tcp_connp, ipss)) 165 return (B_TRUE); 166 } 167 if (!CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp)) 168 return (B_TRUE); 169 return (B_FALSE); 170 } 171 172 173 /* 174 * This routine gets called by the eager tcp upon changing state from 175 * SYN_RCVD to ESTABLISHED. It fuses a direct path between itself 176 * and the active connect tcp such that the regular tcp processings 177 * may be bypassed under allowable circumstances. Because the fusion 178 * requires both endpoints to be in the same squeue, it does not work 179 * for simultaneous active connects because there is no easy way to 180 * switch from one squeue to another once the connection is created. 181 * This is different from the eager tcp case where we assign it the 182 * same squeue as the one given to the active connect tcp during open. 183 */ 184 void 185 tcp_fuse(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph) 186 { 187 conn_t *peer_connp, *connp = tcp->tcp_connp; 188 tcp_t *peer_tcp; 189 tcp_stack_t *tcps = tcp->tcp_tcps; 190 netstack_t *ns; 191 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 192 193 ASSERT(!tcp->tcp_fused); 194 ASSERT(tcp->tcp_loopback); 195 ASSERT(tcp->tcp_loopback_peer == NULL); 196 /* 197 * We need to inherit q_hiwat of the listener tcp, but we can't 198 * really use tcp_listener since we get here after sending up 199 * T_CONN_IND and tcp_wput_accept() may be called independently, 200 * at which point tcp_listener is cleared; this is why we use 201 * tcp_saved_listener. The listener itself is guaranteed to be 202 * around until tcp_accept_finish() is called on this eager -- 203 * this won't happen until we're done since we're inside the 204 * eager's perimeter now. 205 * 206 * We can also get called in the case were a connection needs 207 * to be re-fused. In this case tcp_saved_listener will be 208 * NULL but tcp_refuse will be true. 209 */ 210 ASSERT(tcp->tcp_saved_listener != NULL || tcp->tcp_refuse); 211 /* 212 * Lookup peer endpoint; search for the remote endpoint having 213 * the reversed address-port quadruplet in ESTABLISHED state, 214 * which is guaranteed to be unique in the system. Zone check 215 * is applied accordingly for loopback address, but not for 216 * local address since we want fusion to happen across Zones. 217 */ 218 if (tcp->tcp_ipversion == IPV4_VERSION) { 219 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv4(connp, 220 (ipha_t *)iphdr, tcph, ipst); 221 } else { 222 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv6(connp, 223 (ip6_t *)iphdr, tcph, ipst); 224 } 225 226 /* 227 * We can only proceed if peer exists, resides in the same squeue 228 * as our conn and is not raw-socket. The squeue assignment of 229 * this eager tcp was done earlier at the time of SYN processing 230 * in ip_fanout_tcp{_v6}. Note that similar squeues by itself 231 * doesn't guarantee a safe condition to fuse, hence we perform 232 * additional tests below. 233 */ 234 ASSERT(peer_connp == NULL || peer_connp != connp); 235 if (peer_connp == NULL || peer_connp->conn_sqp != connp->conn_sqp || 236 !IPCL_IS_TCP(peer_connp)) { 237 if (peer_connp != NULL) { 238 TCP_STAT(tcps, tcp_fusion_unqualified); 239 CONN_DEC_REF(peer_connp); 240 } 241 return; 242 } 243 peer_tcp = peer_connp->conn_tcp; /* active connect tcp */ 244 245 ASSERT(peer_tcp != NULL && peer_tcp != tcp && !peer_tcp->tcp_fused); 246 ASSERT(peer_tcp->tcp_loopback && peer_tcp->tcp_loopback_peer == NULL); 247 ASSERT(peer_connp->conn_sqp == connp->conn_sqp); 248 249 /* 250 * Fuse the endpoints; we perform further checks against both 251 * tcp endpoints to ensure that a fusion is allowed to happen. 252 * In particular we bail out for non-simple TCP/IP or if IPsec/ 253 * IPQoS policy/kernel SSL exists. 254 */ 255 ns = tcps->tcps_netstack; 256 ipst = ns->netstack_ip; 257 258 if (!tcp->tcp_unfusable && !peer_tcp->tcp_unfusable && 259 !tcp_loopback_needs_ip(tcp, ns) && 260 !tcp_loopback_needs_ip(peer_tcp, ns) && 261 tcp->tcp_kssl_ent == NULL && 262 !IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN, ipst)) { 263 mblk_t *mp; 264 struct stroptions *stropt; 265 queue_t *peer_rq = peer_tcp->tcp_rq; 266 267 ASSERT(!TCP_IS_DETACHED(peer_tcp) && peer_rq != NULL); 268 ASSERT(tcp->tcp_fused_sigurg_mp == NULL); 269 ASSERT(peer_tcp->tcp_fused_sigurg_mp == NULL); 270 ASSERT(tcp->tcp_kssl_ctx == NULL); 271 272 /* 273 * We need to drain data on both endpoints during unfuse. 274 * If we need to send up SIGURG at the time of draining, 275 * we want to be sure that an mblk is readily available. 276 * This is why we pre-allocate the M_PCSIG mblks for both 277 * endpoints which will only be used during/after unfuse. 278 */ 279 if ((mp = allocb(1, BPRI_HI)) == NULL) 280 goto failed; 281 282 tcp->tcp_fused_sigurg_mp = mp; 283 284 if ((mp = allocb(1, BPRI_HI)) == NULL) 285 goto failed; 286 287 peer_tcp->tcp_fused_sigurg_mp = mp; 288 289 /* Allocate M_SETOPTS mblk */ 290 if ((mp = allocb(sizeof (*stropt), BPRI_HI)) == NULL) 291 goto failed; 292 293 /* If either tcp or peer_tcp sodirect enabled then disable */ 294 if (tcp->tcp_sodirect != NULL) { 295 mutex_enter(tcp->tcp_sodirect->sod_lockp); 296 SOD_DISABLE(tcp->tcp_sodirect); 297 mutex_exit(tcp->tcp_sodirect->sod_lockp); 298 tcp->tcp_sodirect = NULL; 299 } 300 if (peer_tcp->tcp_sodirect != NULL) { 301 mutex_enter(peer_tcp->tcp_sodirect->sod_lockp); 302 SOD_DISABLE(peer_tcp->tcp_sodirect); 303 mutex_exit(peer_tcp->tcp_sodirect->sod_lockp); 304 peer_tcp->tcp_sodirect = NULL; 305 } 306 307 /* Fuse both endpoints */ 308 peer_tcp->tcp_loopback_peer = tcp; 309 tcp->tcp_loopback_peer = peer_tcp; 310 peer_tcp->tcp_fused = tcp->tcp_fused = B_TRUE; 311 312 /* 313 * We never use regular tcp paths in fusion and should 314 * therefore clear tcp_unsent on both endpoints. Having 315 * them set to non-zero values means asking for trouble 316 * especially after unfuse, where we may end up sending 317 * through regular tcp paths which expect xmit_list and 318 * friends to be correctly setup. 319 */ 320 peer_tcp->tcp_unsent = tcp->tcp_unsent = 0; 321 322 tcp_timers_stop(tcp); 323 tcp_timers_stop(peer_tcp); 324 325 /* 326 * At this point we are a detached eager tcp and therefore 327 * don't have a queue assigned to us until accept happens. 328 * In the mean time the peer endpoint may immediately send 329 * us data as soon as fusion is finished, and we need to be 330 * able to flow control it in case it sends down huge amount 331 * of data while we're still detached. To prevent that we 332 * inherit the listener's q_hiwat value; this is temporary 333 * since we'll repeat the process in tcp_accept_finish(). 334 */ 335 if (!tcp->tcp_refuse) { 336 (void) tcp_fuse_set_rcv_hiwat(tcp, 337 tcp->tcp_saved_listener->tcp_rq->q_hiwat); 338 339 /* 340 * Set the stream head's write offset value to zero 341 * since we won't be needing any room for TCP/IP 342 * headers; tell it to not break up the writes (this 343 * would reduce the amount of work done by kmem); and 344 * configure our receive buffer. Note that we can only 345 * do this for the active connect tcp since our eager 346 * is still detached; it will be dealt with later in 347 * tcp_accept_finish(). 348 */ 349 DB_TYPE(mp) = M_SETOPTS; 350 mp->b_wptr += sizeof (*stropt); 351 352 stropt = (struct stroptions *)mp->b_rptr; 353 stropt->so_flags = SO_MAXBLK | SO_WROFF | SO_HIWAT; 354 stropt->so_maxblk = tcp_maxpsz_set(peer_tcp, B_FALSE); 355 stropt->so_wroff = 0; 356 357 /* 358 * Record the stream head's high water mark for 359 * peer endpoint; this is used for flow-control 360 * purposes in tcp_fuse_output(). 361 */ 362 stropt->so_hiwat = tcp_fuse_set_rcv_hiwat(peer_tcp, 363 peer_rq->q_hiwat); 364 365 tcp->tcp_refuse = B_FALSE; 366 peer_tcp->tcp_refuse = B_FALSE; 367 /* Send the options up */ 368 putnext(peer_rq, mp); 369 } 370 tcp->tcp_refuse = B_FALSE; 371 peer_tcp->tcp_refuse = B_FALSE; 372 } else { 373 TCP_STAT(tcps, tcp_fusion_unqualified); 374 } 375 CONN_DEC_REF(peer_connp); 376 return; 377 378 failed: 379 if (tcp->tcp_fused_sigurg_mp != NULL) { 380 freeb(tcp->tcp_fused_sigurg_mp); 381 tcp->tcp_fused_sigurg_mp = NULL; 382 } 383 if (peer_tcp->tcp_fused_sigurg_mp != NULL) { 384 freeb(peer_tcp->tcp_fused_sigurg_mp); 385 peer_tcp->tcp_fused_sigurg_mp = NULL; 386 } 387 CONN_DEC_REF(peer_connp); 388 } 389 390 /* 391 * Unfuse a previously-fused pair of tcp loopback endpoints. 392 */ 393 void 394 tcp_unfuse(tcp_t *tcp) 395 { 396 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 397 398 ASSERT(tcp->tcp_fused && peer_tcp != NULL); 399 ASSERT(peer_tcp->tcp_fused && peer_tcp->tcp_loopback_peer == tcp); 400 ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp); 401 ASSERT(tcp->tcp_unsent == 0 && peer_tcp->tcp_unsent == 0); 402 ASSERT(tcp->tcp_fused_sigurg_mp != NULL); 403 ASSERT(peer_tcp->tcp_fused_sigurg_mp != NULL); 404 405 /* 406 * We disable synchronous streams, drain any queued data and 407 * clear tcp_direct_sockfs. The synchronous streams entry 408 * points will become no-ops after this point. 409 */ 410 tcp_fuse_disable_pair(tcp, B_TRUE); 411 412 /* 413 * Update th_seq and th_ack in the header template 414 */ 415 U32_TO_ABE32(tcp->tcp_snxt, tcp->tcp_tcph->th_seq); 416 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 417 U32_TO_ABE32(peer_tcp->tcp_snxt, peer_tcp->tcp_tcph->th_seq); 418 U32_TO_ABE32(peer_tcp->tcp_rnxt, peer_tcp->tcp_tcph->th_ack); 419 420 /* Unfuse the endpoints */ 421 peer_tcp->tcp_fused = tcp->tcp_fused = B_FALSE; 422 peer_tcp->tcp_loopback_peer = tcp->tcp_loopback_peer = NULL; 423 freeb(peer_tcp->tcp_fused_sigurg_mp); 424 freeb(tcp->tcp_fused_sigurg_mp); 425 peer_tcp->tcp_fused_sigurg_mp = NULL; 426 tcp->tcp_fused_sigurg_mp = NULL; 427 } 428 429 /* 430 * Fusion output routine for urgent data. This routine is called by 431 * tcp_fuse_output() for handling non-M_DATA mblks. 432 */ 433 void 434 tcp_fuse_output_urg(tcp_t *tcp, mblk_t *mp) 435 { 436 mblk_t *mp1; 437 struct T_exdata_ind *tei; 438 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 439 mblk_t *head, *prev_head = NULL; 440 tcp_stack_t *tcps = tcp->tcp_tcps; 441 442 ASSERT(tcp->tcp_fused); 443 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp); 444 ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO); 445 ASSERT(mp->b_cont != NULL && DB_TYPE(mp->b_cont) == M_DATA); 446 ASSERT(MBLKL(mp) >= sizeof (*tei) && MBLKL(mp->b_cont) > 0); 447 448 /* 449 * Urgent data arrives in the form of T_EXDATA_REQ from above. 450 * Each occurence denotes a new urgent pointer. For each new 451 * urgent pointer we signal (SIGURG) the receiving app to indicate 452 * that it needs to go into urgent mode. This is similar to the 453 * urgent data handling in the regular tcp. We don't need to keep 454 * track of where the urgent pointer is, because each T_EXDATA_REQ 455 * "advances" the urgent pointer for us. 456 * 457 * The actual urgent data carried by T_EXDATA_REQ is then prepended 458 * by a T_EXDATA_IND before being enqueued behind any existing data 459 * destined for the receiving app. There is only a single urgent 460 * pointer (out-of-band mark) for a given tcp. If the new urgent 461 * data arrives before the receiving app reads some existing urgent 462 * data, the previous marker is lost. This behavior is emulated 463 * accordingly below, by removing any existing T_EXDATA_IND messages 464 * and essentially converting old urgent data into non-urgent. 465 */ 466 ASSERT(tcp->tcp_valid_bits & TCP_URG_VALID); 467 /* Let sender get out of urgent mode */ 468 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 469 470 /* 471 * This flag indicates that a signal needs to be sent up. 472 * This flag will only get cleared once SIGURG is delivered and 473 * is not affected by the tcp_fused flag -- delivery will still 474 * happen even after an endpoint is unfused, to handle the case 475 * where the sending endpoint immediately closes/unfuses after 476 * sending urgent data and the accept is not yet finished. 477 */ 478 peer_tcp->tcp_fused_sigurg = B_TRUE; 479 480 /* Reuse T_EXDATA_REQ mblk for T_EXDATA_IND */ 481 DB_TYPE(mp) = M_PROTO; 482 tei = (struct T_exdata_ind *)mp->b_rptr; 483 tei->PRIM_type = T_EXDATA_IND; 484 tei->MORE_flag = 0; 485 mp->b_wptr = (uchar_t *)&tei[1]; 486 487 TCP_STAT(tcps, tcp_fusion_urg); 488 BUMP_MIB(&tcps->tcps_mib, tcpOutUrg); 489 490 head = peer_tcp->tcp_rcv_list; 491 while (head != NULL) { 492 /* 493 * Remove existing T_EXDATA_IND, keep the data which follows 494 * it and relink our list. Note that we don't modify the 495 * tcp_rcv_last_tail since it never points to T_EXDATA_IND. 496 */ 497 if (DB_TYPE(head) != M_DATA) { 498 mp1 = head; 499 500 ASSERT(DB_TYPE(mp1->b_cont) == M_DATA); 501 head = mp1->b_cont; 502 mp1->b_cont = NULL; 503 head->b_next = mp1->b_next; 504 mp1->b_next = NULL; 505 if (prev_head != NULL) 506 prev_head->b_next = head; 507 if (peer_tcp->tcp_rcv_list == mp1) 508 peer_tcp->tcp_rcv_list = head; 509 if (peer_tcp->tcp_rcv_last_head == mp1) 510 peer_tcp->tcp_rcv_last_head = head; 511 freeb(mp1); 512 } 513 prev_head = head; 514 head = head->b_next; 515 } 516 } 517 518 /* 519 * Fusion output routine, called by tcp_output() and tcp_wput_proto(). 520 * If we are modifying any member that can be changed outside the squeue, 521 * like tcp_flow_stopped, we need to take tcp_non_sq_lock. 522 */ 523 boolean_t 524 tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size) 525 { 526 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 527 uint_t max_unread; 528 boolean_t flow_stopped, peer_data_queued = B_FALSE; 529 boolean_t urgent = (DB_TYPE(mp) != M_DATA); 530 mblk_t *mp1 = mp; 531 ill_t *ilp, *olp; 532 ipif_t *iifp, *oifp; 533 ipha_t *ipha; 534 ip6_t *ip6h; 535 tcph_t *tcph; 536 uint_t ip_hdr_len; 537 uint32_t seq; 538 uint32_t recv_size = send_size; 539 tcp_stack_t *tcps = tcp->tcp_tcps; 540 netstack_t *ns = tcps->tcps_netstack; 541 ip_stack_t *ipst = ns->netstack_ip; 542 543 ASSERT(tcp->tcp_fused); 544 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp); 545 ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp); 546 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_PROTO || 547 DB_TYPE(mp) == M_PCPROTO); 548 549 550 /* If this connection requires IP, unfuse and use regular path */ 551 if (tcp_loopback_needs_ip(tcp, ns) || 552 tcp_loopback_needs_ip(peer_tcp, ns) || 553 IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN, ipst) || 554 list_head(&ipst->ips_ipobs_cb_list) != NULL) { 555 TCP_STAT(tcps, tcp_fusion_aborted); 556 tcp->tcp_refuse = B_TRUE; 557 peer_tcp->tcp_refuse = B_TRUE; 558 559 bcopy(peer_tcp->tcp_tcph, &tcp->tcp_saved_tcph, 560 sizeof (tcph_t)); 561 bcopy(tcp->tcp_tcph, &peer_tcp->tcp_saved_tcph, 562 sizeof (tcph_t)); 563 if (tcp->tcp_ipversion == IPV4_VERSION) { 564 bcopy(peer_tcp->tcp_ipha, &tcp->tcp_saved_ipha, 565 sizeof (ipha_t)); 566 bcopy(tcp->tcp_ipha, &peer_tcp->tcp_saved_ipha, 567 sizeof (ipha_t)); 568 } else { 569 bcopy(peer_tcp->tcp_ip6h, &tcp->tcp_saved_ip6h, 570 sizeof (ip6_t)); 571 bcopy(tcp->tcp_ip6h, &peer_tcp->tcp_saved_ip6h, 572 sizeof (ip6_t)); 573 } 574 goto unfuse; 575 } 576 577 if (send_size == 0) { 578 freemsg(mp); 579 return (B_TRUE); 580 } 581 max_unread = peer_tcp->tcp_fuse_rcv_unread_hiwater; 582 583 /* 584 * Handle urgent data; we either send up SIGURG to the peer now 585 * or do it later when we drain, in case the peer is detached 586 * or if we're short of memory for M_PCSIG mblk. 587 */ 588 if (urgent) { 589 /* 590 * We stop synchronous streams when we have urgent data 591 * queued to prevent tcp_fuse_rrw() from pulling it. If 592 * for some reasons the urgent data can't be delivered 593 * below, synchronous streams will remain stopped until 594 * someone drains the tcp_rcv_list. 595 */ 596 TCP_FUSE_SYNCSTR_PLUG_DRAIN(peer_tcp); 597 tcp_fuse_output_urg(tcp, mp); 598 599 mp1 = mp->b_cont; 600 } 601 602 if (tcp->tcp_ipversion == IPV4_VERSION && 603 (HOOKS4_INTERESTED_LOOPBACK_IN(ipst) || 604 HOOKS4_INTERESTED_LOOPBACK_OUT(ipst)) || 605 tcp->tcp_ipversion == IPV6_VERSION && 606 (HOOKS6_INTERESTED_LOOPBACK_IN(ipst) || 607 HOOKS6_INTERESTED_LOOPBACK_OUT(ipst))) { 608 /* 609 * Build ip and tcp header to satisfy FW_HOOKS. 610 * We only build it when any hook is present. 611 */ 612 if ((mp1 = tcp_xmit_mp(tcp, mp1, tcp->tcp_mss, NULL, NULL, 613 tcp->tcp_snxt, B_TRUE, NULL, B_FALSE)) == NULL) 614 /* If tcp_xmit_mp fails, use regular path */ 615 goto unfuse; 616 617 /* 618 * The ipif and ill can be safely referenced under the 619 * protection of conn_lock - see head of function comment for 620 * conn_get_held_ipif(). It is necessary to check that both 621 * the ipif and ill can be looked up (i.e. not condemned). If 622 * not, bail out and unfuse this connection. 623 */ 624 mutex_enter(&peer_tcp->tcp_connp->conn_lock); 625 if ((peer_tcp->tcp_connp->conn_ire_cache == NULL) || 626 (peer_tcp->tcp_connp->conn_ire_cache->ire_marks & 627 IRE_MARK_CONDEMNED) || 628 ((oifp = peer_tcp->tcp_connp->conn_ire_cache->ire_ipif) 629 == NULL) || 630 (!IPIF_CAN_LOOKUP(oifp)) || 631 ((olp = oifp->ipif_ill) == NULL) || 632 (ill_check_and_refhold(olp) != 0)) { 633 mutex_exit(&peer_tcp->tcp_connp->conn_lock); 634 goto unfuse; 635 } 636 mutex_exit(&peer_tcp->tcp_connp->conn_lock); 637 638 /* PFHooks: LOOPBACK_OUT */ 639 if (tcp->tcp_ipversion == IPV4_VERSION) { 640 ipha = (ipha_t *)mp1->b_rptr; 641 642 DTRACE_PROBE4(ip4__loopback__out__start, 643 ill_t *, NULL, ill_t *, olp, 644 ipha_t *, ipha, mblk_t *, mp1); 645 FW_HOOKS(ipst->ips_ip4_loopback_out_event, 646 ipst->ips_ipv4firewall_loopback_out, 647 NULL, olp, ipha, mp1, mp1, 0, ipst); 648 DTRACE_PROBE1(ip4__loopback__out__end, mblk_t *, mp1); 649 } else { 650 ip6h = (ip6_t *)mp1->b_rptr; 651 652 DTRACE_PROBE4(ip6__loopback__out__start, 653 ill_t *, NULL, ill_t *, olp, 654 ip6_t *, ip6h, mblk_t *, mp1); 655 FW_HOOKS6(ipst->ips_ip6_loopback_out_event, 656 ipst->ips_ipv6firewall_loopback_out, 657 NULL, olp, ip6h, mp1, mp1, 0, ipst); 658 DTRACE_PROBE1(ip6__loopback__out__end, mblk_t *, mp1); 659 } 660 ill_refrele(olp); 661 662 if (mp1 == NULL) 663 goto unfuse; 664 665 /* 666 * The ipif and ill can be safely referenced under the 667 * protection of conn_lock - see head of function comment for 668 * conn_get_held_ipif(). It is necessary to check that both 669 * the ipif and ill can be looked up (i.e. not condemned). If 670 * not, bail out and unfuse this connection. 671 */ 672 mutex_enter(&tcp->tcp_connp->conn_lock); 673 if ((tcp->tcp_connp->conn_ire_cache == NULL) || 674 (tcp->tcp_connp->conn_ire_cache->ire_marks & 675 IRE_MARK_CONDEMNED) || 676 ((iifp = tcp->tcp_connp->conn_ire_cache->ire_ipif) 677 == NULL) || 678 (!IPIF_CAN_LOOKUP(iifp)) || 679 ((ilp = iifp->ipif_ill) == NULL) || 680 (ill_check_and_refhold(ilp) != 0)) { 681 mutex_exit(&tcp->tcp_connp->conn_lock); 682 goto unfuse; 683 } 684 mutex_exit(&tcp->tcp_connp->conn_lock); 685 686 /* PFHooks: LOOPBACK_IN */ 687 if (tcp->tcp_ipversion == IPV4_VERSION) { 688 DTRACE_PROBE4(ip4__loopback__in__start, 689 ill_t *, ilp, ill_t *, NULL, 690 ipha_t *, ipha, mblk_t *, mp1); 691 FW_HOOKS(ipst->ips_ip4_loopback_in_event, 692 ipst->ips_ipv4firewall_loopback_in, 693 ilp, NULL, ipha, mp1, mp1, 0, ipst); 694 DTRACE_PROBE1(ip4__loopback__in__end, mblk_t *, mp1); 695 ill_refrele(ilp); 696 if (mp1 == NULL) 697 goto unfuse; 698 699 ip_hdr_len = IPH_HDR_LENGTH(ipha); 700 } else { 701 DTRACE_PROBE4(ip6__loopback__in__start, 702 ill_t *, ilp, ill_t *, NULL, 703 ip6_t *, ip6h, mblk_t *, mp1); 704 FW_HOOKS6(ipst->ips_ip6_loopback_in_event, 705 ipst->ips_ipv6firewall_loopback_in, 706 ilp, NULL, ip6h, mp1, mp1, 0, ipst); 707 DTRACE_PROBE1(ip6__loopback__in__end, mblk_t *, mp1); 708 ill_refrele(ilp); 709 if (mp1 == NULL) 710 goto unfuse; 711 712 ip_hdr_len = ip_hdr_length_v6(mp1, ip6h); 713 } 714 715 /* Data length might be changed by FW_HOOKS */ 716 tcph = (tcph_t *)&mp1->b_rptr[ip_hdr_len]; 717 seq = ABE32_TO_U32(tcph->th_seq); 718 recv_size += seq - tcp->tcp_snxt; 719 720 /* 721 * The message duplicated by tcp_xmit_mp is freed. 722 * Note: the original message passed in remains unchanged. 723 */ 724 freemsg(mp1); 725 } 726 727 mutex_enter(&peer_tcp->tcp_non_sq_lock); 728 /* 729 * Wake up and signal the peer; it is okay to do this before 730 * enqueueing because we are holding the lock. One of the 731 * advantages of synchronous streams is the ability for us to 732 * find out when the application performs a read on the socket, 733 * by way of tcp_fuse_rrw() entry point being called. Every 734 * data that gets enqueued onto the receiver is treated as if 735 * it has arrived at the receiving endpoint, thus generating 736 * SIGPOLL/SIGIO for asynchronous socket just as in the strrput() 737 * case. However, we only wake up the application when necessary, 738 * i.e. during the first enqueue. When tcp_fuse_rrw() is called 739 * it will send everything upstream. 740 */ 741 if (peer_tcp->tcp_direct_sockfs && !urgent && 742 !TCP_IS_DETACHED(peer_tcp)) { 743 /* Update poll events and send SIGPOLL/SIGIO if necessary */ 744 STR_WAKEUP_SENDSIG(STREAM(peer_tcp->tcp_rq), 745 peer_tcp->tcp_rcv_list); 746 } 747 748 /* 749 * Enqueue data into the peer's receive list; we may or may not 750 * drain the contents depending on the conditions below. 751 */ 752 tcp_rcv_enqueue(peer_tcp, mp, recv_size); 753 754 /* In case it wrapped around and also to keep it constant */ 755 peer_tcp->tcp_rwnd += recv_size; 756 /* 757 * We increase the peer's unread message count here whilst still 758 * holding it's tcp_non_sq_lock. This ensures that the increment 759 * occurs in the same lock acquisition perimeter as the enqueue. 760 * Depending on lock hierarchy, we can release these locks which 761 * creates a window in which we can race with tcp_fuse_rrw() 762 */ 763 peer_tcp->tcp_fuse_rcv_unread_cnt++; 764 765 /* 766 * Exercise flow-control when needed; we will get back-enabled 767 * in either tcp_accept_finish(), tcp_unfuse(), or tcp_fuse_rrw(). 768 * If tcp_direct_sockfs is on or if the peer endpoint is detached, 769 * we emulate streams flow control by checking the peer's queue 770 * size and high water mark; otherwise we simply use canputnext() 771 * to decide if we need to stop our flow. 772 * 773 * The outstanding unread data block check does not apply for a 774 * detached receiver; this is to avoid unnecessary blocking of the 775 * sender while the accept is currently in progress and is quite 776 * similar to the regular tcp. 777 */ 778 if (TCP_IS_DETACHED(peer_tcp) || max_unread == 0) 779 max_unread = UINT_MAX; 780 781 /* 782 * Since we are accessing our tcp_flow_stopped and might modify it, 783 * we need to take tcp->tcp_non_sq_lock. The lock for the highest 784 * address is held first. Dropping peer_tcp->tcp_non_sq_lock should 785 * not be an issue here since we are within the squeue and the peer 786 * won't disappear. 787 */ 788 if (tcp > peer_tcp) { 789 mutex_exit(&peer_tcp->tcp_non_sq_lock); 790 mutex_enter(&tcp->tcp_non_sq_lock); 791 mutex_enter(&peer_tcp->tcp_non_sq_lock); 792 } else { 793 mutex_enter(&tcp->tcp_non_sq_lock); 794 } 795 flow_stopped = tcp->tcp_flow_stopped; 796 if (((peer_tcp->tcp_direct_sockfs || TCP_IS_DETACHED(peer_tcp)) && 797 (peer_tcp->tcp_rcv_cnt >= peer_tcp->tcp_fuse_rcv_hiwater || 798 peer_tcp->tcp_fuse_rcv_unread_cnt >= max_unread)) || 799 (!peer_tcp->tcp_direct_sockfs && !TCP_IS_DETACHED(peer_tcp) && 800 !canputnext(peer_tcp->tcp_rq))) { 801 peer_data_queued = B_TRUE; 802 } 803 804 if (!flow_stopped && (peer_data_queued || 805 (TCP_UNSENT_BYTES(tcp) >= tcp->tcp_xmit_hiwater))) { 806 tcp_setqfull(tcp); 807 flow_stopped = B_TRUE; 808 TCP_STAT(tcps, tcp_fusion_flowctl); 809 DTRACE_PROBE4(tcp__fuse__output__flowctl, tcp_t *, tcp, 810 uint_t, send_size, uint_t, peer_tcp->tcp_rcv_cnt, 811 uint_t, peer_tcp->tcp_fuse_rcv_unread_cnt); 812 } else if (flow_stopped && !peer_data_queued && 813 (TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater)) { 814 tcp_clrqfull(tcp); 815 TCP_STAT(tcps, tcp_fusion_backenabled); 816 flow_stopped = B_FALSE; 817 } 818 mutex_exit(&tcp->tcp_non_sq_lock); 819 820 /* 821 * If we are in synchronous streams mode and the peer read queue is 822 * not full then schedule a push timer if one is not scheduled 823 * already. This is needed for applications which use MSG_PEEK to 824 * determine the number of bytes available before issuing a 'real' 825 * read. It also makes flow control more deterministic, particularly 826 * for smaller message sizes. 827 */ 828 if (!urgent && peer_tcp->tcp_direct_sockfs && 829 peer_tcp->tcp_push_tid == 0 && !TCP_IS_DETACHED(peer_tcp) && 830 canputnext(peer_tcp->tcp_rq)) { 831 peer_tcp->tcp_push_tid = TCP_TIMER(peer_tcp, tcp_push_timer, 832 MSEC_TO_TICK(tcps->tcps_push_timer_interval)); 833 } 834 mutex_exit(&peer_tcp->tcp_non_sq_lock); 835 ipst->ips_loopback_packets++; 836 tcp->tcp_last_sent_len = send_size; 837 838 /* Need to adjust the following SNMP MIB-related variables */ 839 tcp->tcp_snxt += send_size; 840 tcp->tcp_suna = tcp->tcp_snxt; 841 peer_tcp->tcp_rnxt += recv_size; 842 peer_tcp->tcp_rack = peer_tcp->tcp_rnxt; 843 844 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 845 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, send_size); 846 847 BUMP_MIB(&tcps->tcps_mib, tcpInSegs); 848 BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs); 849 UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, send_size); 850 851 BUMP_LOCAL(tcp->tcp_obsegs); 852 BUMP_LOCAL(peer_tcp->tcp_ibsegs); 853 854 DTRACE_PROBE2(tcp__fuse__output, tcp_t *, tcp, uint_t, send_size); 855 856 if (!TCP_IS_DETACHED(peer_tcp)) { 857 /* 858 * Drain the peer's receive queue it has urgent data or if 859 * we're not flow-controlled. There is no need for draining 860 * normal data when tcp_direct_sockfs is on because the peer 861 * will pull the data via tcp_fuse_rrw(). 862 */ 863 if (urgent || (!flow_stopped && !peer_tcp->tcp_direct_sockfs)) { 864 ASSERT(peer_tcp->tcp_rcv_list != NULL); 865 /* 866 * For TLI-based streams, a thread in tcp_accept_swap() 867 * can race with us. That thread will ensure that the 868 * correct peer_tcp->tcp_rq is globally visible before 869 * peer_tcp->tcp_detached is visible as clear, but we 870 * must also ensure that the load of tcp_rq cannot be 871 * reordered to be before the tcp_detached check. 872 */ 873 membar_consumer(); 874 (void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp, 875 NULL); 876 /* 877 * If synchronous streams was stopped above due 878 * to the presence of urgent data, re-enable it. 879 */ 880 if (urgent) 881 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(peer_tcp); 882 } 883 } 884 return (B_TRUE); 885 unfuse: 886 tcp_unfuse(tcp); 887 return (B_FALSE); 888 } 889 890 /* 891 * This routine gets called to deliver data upstream on a fused or 892 * previously fused tcp loopback endpoint; the latter happens only 893 * when there is a pending SIGURG signal plus urgent data that can't 894 * be sent upstream in the past. 895 */ 896 boolean_t 897 tcp_fuse_rcv_drain(queue_t *q, tcp_t *tcp, mblk_t **sigurg_mpp) 898 { 899 mblk_t *mp; 900 #ifdef DEBUG 901 uint_t cnt = 0; 902 #endif 903 tcp_stack_t *tcps = tcp->tcp_tcps; 904 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 905 boolean_t sd_rd_eof = B_FALSE; 906 907 ASSERT(tcp->tcp_loopback); 908 ASSERT(tcp->tcp_fused || tcp->tcp_fused_sigurg); 909 ASSERT(!tcp->tcp_fused || tcp->tcp_loopback_peer != NULL); 910 ASSERT(sigurg_mpp != NULL || tcp->tcp_fused); 911 912 /* No need for the push timer now, in case it was scheduled */ 913 if (tcp->tcp_push_tid != 0) { 914 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 915 tcp->tcp_push_tid = 0; 916 } 917 /* 918 * If there's urgent data sitting in receive list and we didn't 919 * get a chance to send up a SIGURG signal, make sure we send 920 * it first before draining in order to ensure that SIOCATMARK 921 * works properly. 922 */ 923 if (tcp->tcp_fused_sigurg) { 924 /* 925 * sigurg_mpp is normally NULL, i.e. when we're still 926 * fused and didn't get here because of tcp_unfuse(). 927 * In this case try hard to allocate the M_PCSIG mblk. 928 */ 929 if (sigurg_mpp == NULL && 930 (mp = allocb(1, BPRI_HI)) == NULL && 931 (mp = allocb_tryhard(1)) == NULL) { 932 /* Alloc failed; try again next time */ 933 tcp->tcp_push_tid = TCP_TIMER(tcp, tcp_push_timer, 934 MSEC_TO_TICK(tcps->tcps_push_timer_interval)); 935 return (B_TRUE); 936 } else if (sigurg_mpp != NULL) { 937 /* 938 * Use the supplied M_PCSIG mblk; it means we're 939 * either unfused or in the process of unfusing, 940 * and the drain must happen now. 941 */ 942 mp = *sigurg_mpp; 943 *sigurg_mpp = NULL; 944 } 945 ASSERT(mp != NULL); 946 947 tcp->tcp_fused_sigurg = B_FALSE; 948 /* Send up the signal */ 949 DB_TYPE(mp) = M_PCSIG; 950 *mp->b_wptr++ = (uchar_t)SIGURG; 951 putnext(q, mp); 952 /* 953 * Let the regular tcp_rcv_drain() path handle 954 * draining the data if we're no longer fused. 955 */ 956 if (!tcp->tcp_fused) 957 return (B_FALSE); 958 } 959 960 /* 961 * In the synchronous streams case, we generate SIGPOLL/SIGIO for 962 * each M_DATA that gets enqueued onto the receiver. At this point 963 * we are about to drain any queued data via putnext(). In order 964 * to avoid extraneous signal generation from strrput(), we set 965 * STRGETINPROG flag at the stream head prior to the draining and 966 * restore it afterwards. This masks out signal generation only 967 * for M_DATA messages and does not affect urgent data. We only do 968 * this if the STREOF flag is not set which can happen if the 969 * application shuts down the read side of a stream. In this case 970 * we simply free these messages to approximate the flushq behavior 971 * which normally occurs when STREOF is on the stream head read queue. 972 */ 973 if (tcp->tcp_direct_sockfs) 974 sd_rd_eof = strrput_sig(q, B_FALSE); 975 976 /* Drain the data */ 977 while ((mp = tcp->tcp_rcv_list) != NULL) { 978 tcp->tcp_rcv_list = mp->b_next; 979 mp->b_next = NULL; 980 #ifdef DEBUG 981 cnt += msgdsize(mp); 982 #endif 983 if (sd_rd_eof) { 984 freemsg(mp); 985 } else { 986 putnext(q, mp); 987 TCP_STAT(tcps, tcp_fusion_putnext); 988 } 989 } 990 991 if (tcp->tcp_direct_sockfs && !sd_rd_eof) 992 (void) strrput_sig(q, B_TRUE); 993 994 ASSERT(cnt == tcp->tcp_rcv_cnt); 995 tcp->tcp_rcv_last_head = NULL; 996 tcp->tcp_rcv_last_tail = NULL; 997 tcp->tcp_rcv_cnt = 0; 998 tcp->tcp_fuse_rcv_unread_cnt = 0; 999 tcp->tcp_rwnd = q->q_hiwat; 1000 1001 if (peer_tcp->tcp_flow_stopped && (TCP_UNSENT_BYTES(peer_tcp) <= 1002 peer_tcp->tcp_xmit_lowater)) { 1003 tcp_clrqfull(peer_tcp); 1004 TCP_STAT(tcps, tcp_fusion_backenabled); 1005 } 1006 1007 return (B_TRUE); 1008 } 1009 1010 /* 1011 * Synchronous stream entry point for sockfs to retrieve 1012 * data directly from tcp_rcv_list. 1013 * tcp_fuse_rrw() might end up modifying the peer's tcp_flow_stopped, 1014 * for which it must take the tcp_non_sq_lock of the peer as well 1015 * making any change. The order of taking the locks is based on 1016 * the TCP pointer itself. Before we get the peer we need to take 1017 * our tcp_non_sq_lock so that the peer doesn't disappear. However, 1018 * we cannot drop the lock if we have to grab the peer's lock (because 1019 * of ordering), since the peer might disappear in the interim. So, 1020 * we take our tcp_non_sq_lock, get the peer, increment the ref on the 1021 * peer's conn, drop all the locks and then take the tcp_non_sq_lock in the 1022 * desired order. Incrementing the conn ref on the peer means that the 1023 * peer won't disappear when we drop our tcp_non_sq_lock. 1024 */ 1025 int 1026 tcp_fuse_rrw(queue_t *q, struiod_t *dp) 1027 { 1028 tcp_t *tcp = Q_TO_CONN(q)->conn_tcp; 1029 mblk_t *mp; 1030 tcp_t *peer_tcp; 1031 tcp_stack_t *tcps = tcp->tcp_tcps; 1032 1033 mutex_enter(&tcp->tcp_non_sq_lock); 1034 1035 /* 1036 * If tcp_fuse_syncstr_plugged is set, then another thread is moving 1037 * the underlying data to the stream head. We need to wait until it's 1038 * done, then return EBUSY so that strget() will dequeue data from the 1039 * stream head to ensure data is drained in-order. 1040 */ 1041 plugged: 1042 if (tcp->tcp_fuse_syncstr_plugged) { 1043 do { 1044 cv_wait(&tcp->tcp_fuse_plugcv, &tcp->tcp_non_sq_lock); 1045 } while (tcp->tcp_fuse_syncstr_plugged); 1046 1047 mutex_exit(&tcp->tcp_non_sq_lock); 1048 TCP_STAT(tcps, tcp_fusion_rrw_plugged); 1049 TCP_STAT(tcps, tcp_fusion_rrw_busy); 1050 return (EBUSY); 1051 } 1052 1053 peer_tcp = tcp->tcp_loopback_peer; 1054 1055 /* 1056 * If someone had turned off tcp_direct_sockfs or if synchronous 1057 * streams is stopped, we return EBUSY. This causes strget() to 1058 * dequeue data from the stream head instead. 1059 */ 1060 if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped) { 1061 mutex_exit(&tcp->tcp_non_sq_lock); 1062 TCP_STAT(tcps, tcp_fusion_rrw_busy); 1063 return (EBUSY); 1064 } 1065 1066 /* 1067 * Grab lock in order. The highest addressed tcp is locked first. 1068 * We don't do this within the tcp_rcv_list check since if we 1069 * have to drop the lock, for ordering, then the tcp_rcv_list 1070 * could change. 1071 */ 1072 if (peer_tcp > tcp) { 1073 CONN_INC_REF(peer_tcp->tcp_connp); 1074 mutex_exit(&tcp->tcp_non_sq_lock); 1075 mutex_enter(&peer_tcp->tcp_non_sq_lock); 1076 mutex_enter(&tcp->tcp_non_sq_lock); 1077 /* 1078 * This might have changed in the interim 1079 * Once read-side tcp_non_sq_lock is dropped above 1080 * anything can happen, we need to check all 1081 * known conditions again once we reaquire 1082 * read-side tcp_non_sq_lock. 1083 */ 1084 if (tcp->tcp_fuse_syncstr_plugged) { 1085 mutex_exit(&peer_tcp->tcp_non_sq_lock); 1086 CONN_DEC_REF(peer_tcp->tcp_connp); 1087 goto plugged; 1088 } 1089 if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped) { 1090 mutex_exit(&tcp->tcp_non_sq_lock); 1091 mutex_exit(&peer_tcp->tcp_non_sq_lock); 1092 CONN_DEC_REF(peer_tcp->tcp_connp); 1093 TCP_STAT(tcps, tcp_fusion_rrw_busy); 1094 return (EBUSY); 1095 } 1096 CONN_DEC_REF(peer_tcp->tcp_connp); 1097 } else { 1098 mutex_enter(&peer_tcp->tcp_non_sq_lock); 1099 } 1100 1101 if ((mp = tcp->tcp_rcv_list) != NULL) { 1102 1103 DTRACE_PROBE3(tcp__fuse__rrw, tcp_t *, tcp, 1104 uint32_t, tcp->tcp_rcv_cnt, ssize_t, dp->d_uio.uio_resid); 1105 1106 tcp->tcp_rcv_list = NULL; 1107 TCP_STAT(tcps, tcp_fusion_rrw_msgcnt); 1108 1109 /* 1110 * At this point nothing should be left in tcp_rcv_list. 1111 * The only possible case where we would have a chain of 1112 * b_next-linked messages is urgent data, but we wouldn't 1113 * be here if that's true since urgent data is delivered 1114 * via putnext() and synchronous streams is stopped until 1115 * tcp_fuse_rcv_drain() is finished. 1116 */ 1117 ASSERT(DB_TYPE(mp) == M_DATA && mp->b_next == NULL); 1118 1119 tcp->tcp_rcv_last_head = NULL; 1120 tcp->tcp_rcv_last_tail = NULL; 1121 tcp->tcp_rcv_cnt = 0; 1122 tcp->tcp_fuse_rcv_unread_cnt = 0; 1123 1124 if (peer_tcp->tcp_flow_stopped && 1125 (TCP_UNSENT_BYTES(peer_tcp) <= 1126 peer_tcp->tcp_xmit_lowater)) { 1127 tcp_clrqfull(peer_tcp); 1128 TCP_STAT(tcps, tcp_fusion_backenabled); 1129 } 1130 } 1131 mutex_exit(&peer_tcp->tcp_non_sq_lock); 1132 /* 1133 * Either we just dequeued everything or we get here from sockfs 1134 * and have nothing to return; in this case clear RSLEEP. 1135 */ 1136 ASSERT(tcp->tcp_rcv_last_head == NULL); 1137 ASSERT(tcp->tcp_rcv_last_tail == NULL); 1138 ASSERT(tcp->tcp_rcv_cnt == 0); 1139 ASSERT(tcp->tcp_fuse_rcv_unread_cnt == 0); 1140 STR_WAKEUP_CLEAR(STREAM(q)); 1141 1142 mutex_exit(&tcp->tcp_non_sq_lock); 1143 dp->d_mp = mp; 1144 return (0); 1145 } 1146 1147 /* 1148 * Synchronous stream entry point used by certain ioctls to retrieve 1149 * information about or peek into the tcp_rcv_list. 1150 */ 1151 int 1152 tcp_fuse_rinfop(queue_t *q, infod_t *dp) 1153 { 1154 tcp_t *tcp = Q_TO_CONN(q)->conn_tcp; 1155 mblk_t *mp; 1156 uint_t cmd = dp->d_cmd; 1157 int res = 0; 1158 int error = 0; 1159 struct stdata *stp = STREAM(q); 1160 1161 mutex_enter(&tcp->tcp_non_sq_lock); 1162 /* If shutdown on read has happened, return nothing */ 1163 mutex_enter(&stp->sd_lock); 1164 if (stp->sd_flag & STREOF) { 1165 mutex_exit(&stp->sd_lock); 1166 goto done; 1167 } 1168 mutex_exit(&stp->sd_lock); 1169 1170 /* 1171 * It is OK not to return an answer if tcp_rcv_list is 1172 * currently not accessible. 1173 */ 1174 if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped || 1175 tcp->tcp_fuse_syncstr_plugged || (mp = tcp->tcp_rcv_list) == NULL) 1176 goto done; 1177 1178 if (cmd & INFOD_COUNT) { 1179 /* 1180 * We have at least one message and 1181 * could return only one at a time. 1182 */ 1183 dp->d_count++; 1184 res |= INFOD_COUNT; 1185 } 1186 if (cmd & INFOD_BYTES) { 1187 /* 1188 * Return size of all data messages. 1189 */ 1190 dp->d_bytes += tcp->tcp_rcv_cnt; 1191 res |= INFOD_BYTES; 1192 } 1193 if (cmd & INFOD_FIRSTBYTES) { 1194 /* 1195 * Return size of first data message. 1196 */ 1197 dp->d_bytes = msgdsize(mp); 1198 res |= INFOD_FIRSTBYTES; 1199 dp->d_cmd &= ~INFOD_FIRSTBYTES; 1200 } 1201 if (cmd & INFOD_COPYOUT) { 1202 mblk_t *mp1; 1203 int n; 1204 1205 if (DB_TYPE(mp) == M_DATA) { 1206 mp1 = mp; 1207 } else { 1208 mp1 = mp->b_cont; 1209 ASSERT(mp1 != NULL); 1210 } 1211 1212 /* 1213 * Return data contents of first message. 1214 */ 1215 ASSERT(DB_TYPE(mp1) == M_DATA); 1216 while (mp1 != NULL && dp->d_uiop->uio_resid > 0) { 1217 n = MIN(dp->d_uiop->uio_resid, MBLKL(mp1)); 1218 if (n != 0 && (error = uiomove((char *)mp1->b_rptr, n, 1219 UIO_READ, dp->d_uiop)) != 0) { 1220 goto done; 1221 } 1222 mp1 = mp1->b_cont; 1223 } 1224 res |= INFOD_COPYOUT; 1225 dp->d_cmd &= ~INFOD_COPYOUT; 1226 } 1227 done: 1228 mutex_exit(&tcp->tcp_non_sq_lock); 1229 1230 dp->d_res |= res; 1231 1232 return (error); 1233 } 1234 1235 /* 1236 * Enable synchronous streams on a fused tcp loopback endpoint. 1237 */ 1238 static void 1239 tcp_fuse_syncstr_enable(tcp_t *tcp) 1240 { 1241 queue_t *rq = tcp->tcp_rq; 1242 struct stdata *stp = STREAM(rq); 1243 1244 /* We can only enable synchronous streams for sockfs mode */ 1245 tcp->tcp_direct_sockfs = tcp->tcp_issocket && do_tcp_direct_sockfs; 1246 1247 if (!tcp->tcp_direct_sockfs) 1248 return; 1249 1250 mutex_enter(&stp->sd_lock); 1251 mutex_enter(QLOCK(rq)); 1252 1253 /* 1254 * We replace our q_qinfo with one that has the qi_rwp entry point. 1255 * Clear SR_SIGALLDATA because we generate the equivalent signal(s) 1256 * for every enqueued data in tcp_fuse_output(). 1257 */ 1258 rq->q_qinfo = &tcp_loopback_rinit; 1259 rq->q_struiot = tcp_loopback_rinit.qi_struiot; 1260 stp->sd_struiordq = rq; 1261 stp->sd_rput_opt &= ~SR_SIGALLDATA; 1262 1263 mutex_exit(QLOCK(rq)); 1264 mutex_exit(&stp->sd_lock); 1265 } 1266 1267 /* 1268 * Disable synchronous streams on a fused tcp loopback endpoint. 1269 */ 1270 static void 1271 tcp_fuse_syncstr_disable(tcp_t *tcp) 1272 { 1273 queue_t *rq = tcp->tcp_rq; 1274 struct stdata *stp = STREAM(rq); 1275 1276 if (!tcp->tcp_direct_sockfs) 1277 return; 1278 1279 mutex_enter(&stp->sd_lock); 1280 mutex_enter(QLOCK(rq)); 1281 1282 /* 1283 * Reset q_qinfo to point to the default tcp entry points. 1284 * Also restore SR_SIGALLDATA so that strrput() can generate 1285 * the signals again for future M_DATA messages. 1286 */ 1287 rq->q_qinfo = &tcp_rinitv4; /* No open - same as rinitv6 */ 1288 rq->q_struiot = tcp_rinitv4.qi_struiot; 1289 stp->sd_struiordq = NULL; 1290 stp->sd_rput_opt |= SR_SIGALLDATA; 1291 tcp->tcp_direct_sockfs = B_FALSE; 1292 1293 mutex_exit(QLOCK(rq)); 1294 mutex_exit(&stp->sd_lock); 1295 } 1296 1297 /* 1298 * Enable synchronous streams on a pair of fused tcp endpoints. 1299 */ 1300 void 1301 tcp_fuse_syncstr_enable_pair(tcp_t *tcp) 1302 { 1303 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 1304 1305 ASSERT(tcp->tcp_fused); 1306 ASSERT(peer_tcp != NULL); 1307 1308 tcp_fuse_syncstr_enable(tcp); 1309 tcp_fuse_syncstr_enable(peer_tcp); 1310 } 1311 1312 /* 1313 * Used to enable/disable signal generation at the stream head. We already 1314 * generated the signal(s) for these messages when they were enqueued on the 1315 * receiver. We also check if STREOF is set here. If it is, we return false 1316 * and let the caller decide what to do. 1317 */ 1318 static boolean_t 1319 strrput_sig(queue_t *q, boolean_t on) 1320 { 1321 struct stdata *stp = STREAM(q); 1322 1323 mutex_enter(&stp->sd_lock); 1324 if (stp->sd_flag == STREOF) { 1325 mutex_exit(&stp->sd_lock); 1326 return (B_TRUE); 1327 } 1328 if (on) 1329 stp->sd_flag &= ~STRGETINPROG; 1330 else 1331 stp->sd_flag |= STRGETINPROG; 1332 mutex_exit(&stp->sd_lock); 1333 1334 return (B_FALSE); 1335 } 1336 1337 /* 1338 * Disable synchronous streams on a pair of fused tcp endpoints and drain 1339 * any queued data; called either during unfuse or upon transitioning from 1340 * a socket to a stream endpoint due to _SIOCSOCKFALLBACK. 1341 */ 1342 void 1343 tcp_fuse_disable_pair(tcp_t *tcp, boolean_t unfusing) 1344 { 1345 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 1346 tcp_stack_t *tcps = tcp->tcp_tcps; 1347 1348 ASSERT(tcp->tcp_fused); 1349 ASSERT(peer_tcp != NULL); 1350 1351 /* 1352 * Force any tcp_fuse_rrw() calls to block until we've moved the data 1353 * onto the stream head. 1354 */ 1355 TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp); 1356 TCP_FUSE_SYNCSTR_PLUG_DRAIN(peer_tcp); 1357 1358 /* 1359 * Cancel any pending push timers. 1360 */ 1361 if (tcp->tcp_push_tid != 0) { 1362 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 1363 tcp->tcp_push_tid = 0; 1364 } 1365 if (peer_tcp->tcp_push_tid != 0) { 1366 (void) TCP_TIMER_CANCEL(peer_tcp, peer_tcp->tcp_push_tid); 1367 peer_tcp->tcp_push_tid = 0; 1368 } 1369 1370 /* 1371 * Drain any pending data; the detached check is needed because 1372 * we may be called as a result of a tcp_unfuse() triggered by 1373 * tcp_fuse_output(). Note that in case of a detached tcp, the 1374 * draining will happen later after the tcp is unfused. For non- 1375 * urgent data, this can be handled by the regular tcp_rcv_drain(). 1376 * If we have urgent data sitting in the receive list, we will 1377 * need to send up a SIGURG signal first before draining the data. 1378 * All of these will be handled by the code in tcp_fuse_rcv_drain() 1379 * when called from tcp_rcv_drain(). 1380 */ 1381 if (!TCP_IS_DETACHED(tcp)) { 1382 (void) tcp_fuse_rcv_drain(tcp->tcp_rq, tcp, 1383 (unfusing ? &tcp->tcp_fused_sigurg_mp : NULL)); 1384 } 1385 if (!TCP_IS_DETACHED(peer_tcp)) { 1386 (void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp, 1387 (unfusing ? &peer_tcp->tcp_fused_sigurg_mp : NULL)); 1388 } 1389 1390 /* 1391 * Make all current and future tcp_fuse_rrw() calls fail with EBUSY. 1392 * To ensure threads don't sneak past the checks in tcp_fuse_rrw(), 1393 * a given stream must be stopped prior to being unplugged (but the 1394 * ordering of operations between the streams is unimportant). 1395 */ 1396 TCP_FUSE_SYNCSTR_STOP(tcp); 1397 TCP_FUSE_SYNCSTR_STOP(peer_tcp); 1398 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp); 1399 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(peer_tcp); 1400 1401 /* Lift up any flow-control conditions */ 1402 if (tcp->tcp_flow_stopped) { 1403 tcp_clrqfull(tcp); 1404 TCP_STAT(tcps, tcp_fusion_backenabled); 1405 } 1406 if (peer_tcp->tcp_flow_stopped) { 1407 tcp_clrqfull(peer_tcp); 1408 TCP_STAT(tcps, tcp_fusion_backenabled); 1409 } 1410 1411 /* Disable synchronous streams */ 1412 tcp_fuse_syncstr_disable(tcp); 1413 tcp_fuse_syncstr_disable(peer_tcp); 1414 } 1415 1416 /* 1417 * Calculate the size of receive buffer for a fused tcp endpoint. 1418 */ 1419 size_t 1420 tcp_fuse_set_rcv_hiwat(tcp_t *tcp, size_t rwnd) 1421 { 1422 tcp_stack_t *tcps = tcp->tcp_tcps; 1423 1424 ASSERT(tcp->tcp_fused); 1425 1426 /* Ensure that value is within the maximum upper bound */ 1427 if (rwnd > tcps->tcps_max_buf) 1428 rwnd = tcps->tcps_max_buf; 1429 1430 /* Obey the absolute minimum tcp receive high water mark */ 1431 if (rwnd < tcps->tcps_sth_rcv_hiwat) 1432 rwnd = tcps->tcps_sth_rcv_hiwat; 1433 1434 /* 1435 * Round up to system page size in case SO_RCVBUF is modified 1436 * after SO_SNDBUF; the latter is also similarly rounded up. 1437 */ 1438 rwnd = P2ROUNDUP_TYPED(rwnd, PAGESIZE, size_t); 1439 tcp->tcp_fuse_rcv_hiwater = rwnd; 1440 return (rwnd); 1441 } 1442 1443 /* 1444 * Calculate the maximum outstanding unread data block for a fused tcp endpoint. 1445 */ 1446 int 1447 tcp_fuse_maxpsz_set(tcp_t *tcp) 1448 { 1449 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 1450 uint_t sndbuf = tcp->tcp_xmit_hiwater; 1451 uint_t maxpsz = sndbuf; 1452 1453 ASSERT(tcp->tcp_fused); 1454 ASSERT(peer_tcp != NULL); 1455 ASSERT(peer_tcp->tcp_fuse_rcv_hiwater != 0); 1456 /* 1457 * In the fused loopback case, we want the stream head to split 1458 * up larger writes into smaller chunks for a more accurate flow- 1459 * control accounting. Our maxpsz is half of the sender's send 1460 * buffer or the receiver's receive buffer, whichever is smaller. 1461 * We round up the buffer to system page size due to the lack of 1462 * TCP MSS concept in Fusion. 1463 */ 1464 if (maxpsz > peer_tcp->tcp_fuse_rcv_hiwater) 1465 maxpsz = peer_tcp->tcp_fuse_rcv_hiwater; 1466 maxpsz = P2ROUNDUP_TYPED(maxpsz, PAGESIZE, uint_t) >> 1; 1467 1468 /* 1469 * Calculate the peer's limit for the number of outstanding unread 1470 * data block. This is the amount of data blocks that are allowed 1471 * to reside in the receiver's queue before the sender gets flow 1472 * controlled. It is used only in the synchronous streams mode as 1473 * a way to throttle the sender when it performs consecutive writes 1474 * faster than can be read. The value is derived from SO_SNDBUF in 1475 * order to give the sender some control; we divide it with a large 1476 * value (16KB) to produce a fairly low initial limit. 1477 */ 1478 if (tcp_fusion_rcv_unread_min == 0) { 1479 /* A value of 0 means that we disable the check */ 1480 peer_tcp->tcp_fuse_rcv_unread_hiwater = 0; 1481 } else { 1482 peer_tcp->tcp_fuse_rcv_unread_hiwater = 1483 MAX(sndbuf >> 14, tcp_fusion_rcv_unread_min); 1484 } 1485 return (maxpsz); 1486 } 1487