1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/stream.h> 30 #include <sys/strsun.h> 31 #include <sys/strsubr.h> 32 #include <sys/debug.h> 33 #include <sys/sdt.h> 34 #include <sys/cmn_err.h> 35 #include <sys/tihdr.h> 36 37 #include <inet/common.h> 38 #include <inet/ip.h> 39 #include <inet/ip_impl.h> 40 #include <inet/tcp.h> 41 #include <inet/tcp_impl.h> 42 #include <inet/ipsec_impl.h> 43 #include <inet/ipclassifier.h> 44 #include <inet/ipp_common.h> 45 46 /* 47 * This file implements TCP fusion - a protocol-less data path for TCP 48 * loopback connections. The fusion of two local TCP endpoints occurs 49 * at connection establishment time. Various conditions (see details 50 * in tcp_fuse()) need to be met for fusion to be successful. If it 51 * fails, we fall back to the regular TCP data path; if it succeeds, 52 * both endpoints proceed to use tcp_fuse_output() as the transmit path. 53 * tcp_fuse_output() enqueues application data directly onto the peer's 54 * receive queue; no protocol processing is involved. After enqueueing 55 * the data, the sender can either push (putnext) data up the receiver's 56 * read queue; or the sender can simply return and let the receiver 57 * retrieve the enqueued data via the synchronous streams entry point 58 * tcp_fuse_rrw(). The latter path is taken if synchronous streams is 59 * enabled (the default). It is disabled if sockfs no longer resides 60 * directly on top of tcp module due to a module insertion or removal. 61 * It also needs to be temporarily disabled when sending urgent data 62 * because the tcp_fuse_rrw() path bypasses the M_PROTO processing done 63 * by strsock_proto() hook. 64 * 65 * Sychronization is handled by squeue and the mutex tcp_non_sq_lock. 66 * One of the requirements for fusion to succeed is that both endpoints 67 * need to be using the same squeue. This ensures that neither side 68 * can disappear while the other side is still sending data. By itself, 69 * squeue is not sufficient for guaranteeing safety when synchronous 70 * streams is enabled. The reason is that tcp_fuse_rrw() doesn't enter 71 * the squeue and its access to tcp_rcv_list and other fusion-related 72 * fields needs to be sychronized with the sender. tcp_non_sq_lock is 73 * used for this purpose. When there is urgent data, the sender needs 74 * to push the data up the receiver's streams read queue. In order to 75 * avoid holding the tcp_non_sq_lock across putnext(), the sender sets 76 * the peer tcp's tcp_fuse_syncstr_plugged bit and releases tcp_non_sq_lock 77 * (see macro TCP_FUSE_SYNCSTR_PLUG_DRAIN()). If tcp_fuse_rrw() enters 78 * after this point, it will see that synchronous streams is plugged and 79 * will wait on tcp_fuse_plugcv. After the sender has finished pushing up 80 * all urgent data, it will clear the tcp_fuse_syncstr_plugged bit using 81 * TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(). This will cause any threads waiting 82 * on tcp_fuse_plugcv to return EBUSY, and in turn cause strget() to call 83 * getq_noenab() to dequeue data from the stream head instead. Once the 84 * data on the stream head has been consumed, tcp_fuse_rrw() may again 85 * be used to process tcp_rcv_list. However, if TCP_FUSE_SYNCSTR_STOP() 86 * has been called, all future calls to tcp_fuse_rrw() will return EBUSY, 87 * effectively disabling synchronous streams. 88 * 89 * The following note applies only to the synchronous streams mode. 90 * 91 * Flow control is done by checking the size of receive buffer and 92 * the number of data blocks, both set to different limits. This is 93 * different than regular streams flow control where cumulative size 94 * check dominates block count check -- streams queue high water mark 95 * typically represents bytes. Each enqueue triggers notifications 96 * to the receiving process; a build up of data blocks indicates a 97 * slow receiver and the sender should be blocked or informed at the 98 * earliest moment instead of further wasting system resources. In 99 * effect, this is equivalent to limiting the number of outstanding 100 * segments in flight. 101 */ 102 103 /* 104 * Setting this to false means we disable fusion altogether and 105 * loopback connections would go through the protocol paths. 106 */ 107 boolean_t do_tcp_fusion = B_TRUE; 108 109 /* 110 * Enabling this flag allows sockfs to retrieve data directly 111 * from a fused tcp endpoint using synchronous streams interface. 112 */ 113 boolean_t do_tcp_direct_sockfs = B_TRUE; 114 115 /* 116 * This is the minimum amount of outstanding writes allowed on 117 * a synchronous streams-enabled receiving endpoint before the 118 * sender gets flow-controlled. Setting this value to 0 means 119 * that the data block limit is equivalent to the byte count 120 * limit, which essentially disables the check. 121 */ 122 #define TCP_FUSION_RCV_UNREAD_MIN 8 123 uint_t tcp_fusion_rcv_unread_min = TCP_FUSION_RCV_UNREAD_MIN; 124 125 static void tcp_fuse_syncstr_enable(tcp_t *); 126 static void tcp_fuse_syncstr_disable(tcp_t *); 127 static void strrput_sig(queue_t *, boolean_t); 128 129 /* 130 * Return true if this connection needs some IP functionality 131 */ 132 static boolean_t 133 tcp_loopback_needs_ip(tcp_t *tcp, netstack_t *ns) 134 { 135 ipsec_stack_t *ipss = ns->netstack_ipsec; 136 137 /* 138 * If ire is not cached, do not use fusion 139 */ 140 if (tcp->tcp_connp->conn_ire_cache == NULL) { 141 /* 142 * There is no need to hold conn_lock here because when called 143 * from tcp_fuse() there can be no window where conn_ire_cache 144 * can change. This is not true whe called from 145 * tcp_fuse_output(). conn_ire_cache can become null just 146 * after the check, but it's ok if a few packets are delivered 147 * in the fused state. 148 */ 149 return (B_TRUE); 150 } 151 if (tcp->tcp_ipversion == IPV4_VERSION) { 152 if (tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) 153 return (B_TRUE); 154 if (CONN_OUTBOUND_POLICY_PRESENT(tcp->tcp_connp, ipss)) 155 return (B_TRUE); 156 if (CONN_INBOUND_POLICY_PRESENT(tcp->tcp_connp, ipss)) 157 return (B_TRUE); 158 } else { 159 if (tcp->tcp_ip_hdr_len != IPV6_HDR_LEN) 160 return (B_TRUE); 161 if (CONN_OUTBOUND_POLICY_PRESENT_V6(tcp->tcp_connp, ipss)) 162 return (B_TRUE); 163 if (CONN_INBOUND_POLICY_PRESENT_V6(tcp->tcp_connp, ipss)) 164 return (B_TRUE); 165 } 166 if (!CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp)) 167 return (B_TRUE); 168 return (B_FALSE); 169 } 170 171 172 /* 173 * This routine gets called by the eager tcp upon changing state from 174 * SYN_RCVD to ESTABLISHED. It fuses a direct path between itself 175 * and the active connect tcp such that the regular tcp processings 176 * may be bypassed under allowable circumstances. Because the fusion 177 * requires both endpoints to be in the same squeue, it does not work 178 * for simultaneous active connects because there is no easy way to 179 * switch from one squeue to another once the connection is created. 180 * This is different from the eager tcp case where we assign it the 181 * same squeue as the one given to the active connect tcp during open. 182 */ 183 void 184 tcp_fuse(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph) 185 { 186 conn_t *peer_connp, *connp = tcp->tcp_connp; 187 tcp_t *peer_tcp; 188 tcp_stack_t *tcps = tcp->tcp_tcps; 189 netstack_t *ns; 190 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 191 192 ASSERT(!tcp->tcp_fused); 193 ASSERT(tcp->tcp_loopback); 194 ASSERT(tcp->tcp_loopback_peer == NULL); 195 /* 196 * We need to inherit q_hiwat of the listener tcp, but we can't 197 * really use tcp_listener since we get here after sending up 198 * T_CONN_IND and tcp_wput_accept() may be called independently, 199 * at which point tcp_listener is cleared; this is why we use 200 * tcp_saved_listener. The listener itself is guaranteed to be 201 * around until tcp_accept_finish() is called on this eager -- 202 * this won't happen until we're done since we're inside the 203 * eager's perimeter now. 204 */ 205 ASSERT(tcp->tcp_saved_listener != NULL); 206 207 /* 208 * Lookup peer endpoint; search for the remote endpoint having 209 * the reversed address-port quadruplet in ESTABLISHED state, 210 * which is guaranteed to be unique in the system. Zone check 211 * is applied accordingly for loopback address, but not for 212 * local address since we want fusion to happen across Zones. 213 */ 214 if (tcp->tcp_ipversion == IPV4_VERSION) { 215 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv4(connp, 216 (ipha_t *)iphdr, tcph, ipst); 217 } else { 218 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv6(connp, 219 (ip6_t *)iphdr, tcph, ipst); 220 } 221 222 /* 223 * We can only proceed if peer exists, resides in the same squeue 224 * as our conn and is not raw-socket. The squeue assignment of 225 * this eager tcp was done earlier at the time of SYN processing 226 * in ip_fanout_tcp{_v6}. Note that similar squeues by itself 227 * doesn't guarantee a safe condition to fuse, hence we perform 228 * additional tests below. 229 */ 230 ASSERT(peer_connp == NULL || peer_connp != connp); 231 if (peer_connp == NULL || peer_connp->conn_sqp != connp->conn_sqp || 232 !IPCL_IS_TCP(peer_connp)) { 233 if (peer_connp != NULL) { 234 TCP_STAT(tcps, tcp_fusion_unqualified); 235 CONN_DEC_REF(peer_connp); 236 } 237 return; 238 } 239 peer_tcp = peer_connp->conn_tcp; /* active connect tcp */ 240 241 ASSERT(peer_tcp != NULL && peer_tcp != tcp && !peer_tcp->tcp_fused); 242 ASSERT(peer_tcp->tcp_loopback && peer_tcp->tcp_loopback_peer == NULL); 243 ASSERT(peer_connp->conn_sqp == connp->conn_sqp); 244 245 /* 246 * Fuse the endpoints; we perform further checks against both 247 * tcp endpoints to ensure that a fusion is allowed to happen. 248 * In particular we bail out for non-simple TCP/IP or if IPsec/ 249 * IPQoS policy/kernel SSL exists. 250 */ 251 ns = tcps->tcps_netstack; 252 ipst = ns->netstack_ip; 253 254 if (!tcp->tcp_unfusable && !peer_tcp->tcp_unfusable && 255 !tcp_loopback_needs_ip(tcp, ns) && 256 !tcp_loopback_needs_ip(peer_tcp, ns) && 257 tcp->tcp_kssl_ent == NULL && 258 !IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN, ipst)) { 259 mblk_t *mp; 260 struct stroptions *stropt; 261 queue_t *peer_rq = peer_tcp->tcp_rq; 262 263 ASSERT(!TCP_IS_DETACHED(peer_tcp) && peer_rq != NULL); 264 ASSERT(tcp->tcp_fused_sigurg_mp == NULL); 265 ASSERT(peer_tcp->tcp_fused_sigurg_mp == NULL); 266 ASSERT(tcp->tcp_kssl_ctx == NULL); 267 268 /* 269 * We need to drain data on both endpoints during unfuse. 270 * If we need to send up SIGURG at the time of draining, 271 * we want to be sure that an mblk is readily available. 272 * This is why we pre-allocate the M_PCSIG mblks for both 273 * endpoints which will only be used during/after unfuse. 274 */ 275 if ((mp = allocb(1, BPRI_HI)) == NULL) 276 goto failed; 277 278 tcp->tcp_fused_sigurg_mp = mp; 279 280 if ((mp = allocb(1, BPRI_HI)) == NULL) 281 goto failed; 282 283 peer_tcp->tcp_fused_sigurg_mp = mp; 284 285 /* Allocate M_SETOPTS mblk */ 286 if ((mp = allocb(sizeof (*stropt), BPRI_HI)) == NULL) 287 goto failed; 288 289 /* Fuse both endpoints */ 290 peer_tcp->tcp_loopback_peer = tcp; 291 tcp->tcp_loopback_peer = peer_tcp; 292 peer_tcp->tcp_fused = tcp->tcp_fused = B_TRUE; 293 294 /* 295 * We never use regular tcp paths in fusion and should 296 * therefore clear tcp_unsent on both endpoints. Having 297 * them set to non-zero values means asking for trouble 298 * especially after unfuse, where we may end up sending 299 * through regular tcp paths which expect xmit_list and 300 * friends to be correctly setup. 301 */ 302 peer_tcp->tcp_unsent = tcp->tcp_unsent = 0; 303 304 tcp_timers_stop(tcp); 305 tcp_timers_stop(peer_tcp); 306 307 /* 308 * At this point we are a detached eager tcp and therefore 309 * don't have a queue assigned to us until accept happens. 310 * In the mean time the peer endpoint may immediately send 311 * us data as soon as fusion is finished, and we need to be 312 * able to flow control it in case it sends down huge amount 313 * of data while we're still detached. To prevent that we 314 * inherit the listener's q_hiwat value; this is temporary 315 * since we'll repeat the process in tcp_accept_finish(). 316 */ 317 (void) tcp_fuse_set_rcv_hiwat(tcp, 318 tcp->tcp_saved_listener->tcp_rq->q_hiwat); 319 320 /* 321 * Set the stream head's write offset value to zero since we 322 * won't be needing any room for TCP/IP headers; tell it to 323 * not break up the writes (this would reduce the amount of 324 * work done by kmem); and configure our receive buffer. 325 * Note that we can only do this for the active connect tcp 326 * since our eager is still detached; it will be dealt with 327 * later in tcp_accept_finish(). 328 */ 329 DB_TYPE(mp) = M_SETOPTS; 330 mp->b_wptr += sizeof (*stropt); 331 332 stropt = (struct stroptions *)mp->b_rptr; 333 stropt->so_flags = SO_MAXBLK | SO_WROFF | SO_HIWAT; 334 stropt->so_maxblk = tcp_maxpsz_set(peer_tcp, B_FALSE); 335 stropt->so_wroff = 0; 336 337 /* 338 * Record the stream head's high water mark for 339 * peer endpoint; this is used for flow-control 340 * purposes in tcp_fuse_output(). 341 */ 342 stropt->so_hiwat = tcp_fuse_set_rcv_hiwat(peer_tcp, 343 peer_rq->q_hiwat); 344 345 /* Send the options up */ 346 putnext(peer_rq, mp); 347 } else { 348 TCP_STAT(tcps, tcp_fusion_unqualified); 349 } 350 CONN_DEC_REF(peer_connp); 351 return; 352 353 failed: 354 if (tcp->tcp_fused_sigurg_mp != NULL) { 355 freeb(tcp->tcp_fused_sigurg_mp); 356 tcp->tcp_fused_sigurg_mp = NULL; 357 } 358 if (peer_tcp->tcp_fused_sigurg_mp != NULL) { 359 freeb(peer_tcp->tcp_fused_sigurg_mp); 360 peer_tcp->tcp_fused_sigurg_mp = NULL; 361 } 362 CONN_DEC_REF(peer_connp); 363 } 364 365 /* 366 * Unfuse a previously-fused pair of tcp loopback endpoints. 367 */ 368 void 369 tcp_unfuse(tcp_t *tcp) 370 { 371 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 372 373 ASSERT(tcp->tcp_fused && peer_tcp != NULL); 374 ASSERT(peer_tcp->tcp_fused && peer_tcp->tcp_loopback_peer == tcp); 375 ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp); 376 ASSERT(tcp->tcp_unsent == 0 && peer_tcp->tcp_unsent == 0); 377 ASSERT(tcp->tcp_fused_sigurg_mp != NULL); 378 ASSERT(peer_tcp->tcp_fused_sigurg_mp != NULL); 379 380 /* 381 * We disable synchronous streams, drain any queued data and 382 * clear tcp_direct_sockfs. The synchronous streams entry 383 * points will become no-ops after this point. 384 */ 385 tcp_fuse_disable_pair(tcp, B_TRUE); 386 387 /* 388 * Update th_seq and th_ack in the header template 389 */ 390 U32_TO_ABE32(tcp->tcp_snxt, tcp->tcp_tcph->th_seq); 391 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 392 U32_TO_ABE32(peer_tcp->tcp_snxt, peer_tcp->tcp_tcph->th_seq); 393 U32_TO_ABE32(peer_tcp->tcp_rnxt, peer_tcp->tcp_tcph->th_ack); 394 395 /* Unfuse the endpoints */ 396 peer_tcp->tcp_fused = tcp->tcp_fused = B_FALSE; 397 peer_tcp->tcp_loopback_peer = tcp->tcp_loopback_peer = NULL; 398 } 399 400 /* 401 * Fusion output routine for urgent data. This routine is called by 402 * tcp_fuse_output() for handling non-M_DATA mblks. 403 */ 404 void 405 tcp_fuse_output_urg(tcp_t *tcp, mblk_t *mp) 406 { 407 mblk_t *mp1; 408 struct T_exdata_ind *tei; 409 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 410 mblk_t *head, *prev_head = NULL; 411 tcp_stack_t *tcps = tcp->tcp_tcps; 412 413 ASSERT(tcp->tcp_fused); 414 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp); 415 ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO); 416 ASSERT(mp->b_cont != NULL && DB_TYPE(mp->b_cont) == M_DATA); 417 ASSERT(MBLKL(mp) >= sizeof (*tei) && MBLKL(mp->b_cont) > 0); 418 419 /* 420 * Urgent data arrives in the form of T_EXDATA_REQ from above. 421 * Each occurence denotes a new urgent pointer. For each new 422 * urgent pointer we signal (SIGURG) the receiving app to indicate 423 * that it needs to go into urgent mode. This is similar to the 424 * urgent data handling in the regular tcp. We don't need to keep 425 * track of where the urgent pointer is, because each T_EXDATA_REQ 426 * "advances" the urgent pointer for us. 427 * 428 * The actual urgent data carried by T_EXDATA_REQ is then prepended 429 * by a T_EXDATA_IND before being enqueued behind any existing data 430 * destined for the receiving app. There is only a single urgent 431 * pointer (out-of-band mark) for a given tcp. If the new urgent 432 * data arrives before the receiving app reads some existing urgent 433 * data, the previous marker is lost. This behavior is emulated 434 * accordingly below, by removing any existing T_EXDATA_IND messages 435 * and essentially converting old urgent data into non-urgent. 436 */ 437 ASSERT(tcp->tcp_valid_bits & TCP_URG_VALID); 438 /* Let sender get out of urgent mode */ 439 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 440 441 /* 442 * This flag indicates that a signal needs to be sent up. 443 * This flag will only get cleared once SIGURG is delivered and 444 * is not affected by the tcp_fused flag -- delivery will still 445 * happen even after an endpoint is unfused, to handle the case 446 * where the sending endpoint immediately closes/unfuses after 447 * sending urgent data and the accept is not yet finished. 448 */ 449 peer_tcp->tcp_fused_sigurg = B_TRUE; 450 451 /* Reuse T_EXDATA_REQ mblk for T_EXDATA_IND */ 452 DB_TYPE(mp) = M_PROTO; 453 tei = (struct T_exdata_ind *)mp->b_rptr; 454 tei->PRIM_type = T_EXDATA_IND; 455 tei->MORE_flag = 0; 456 mp->b_wptr = (uchar_t *)&tei[1]; 457 458 TCP_STAT(tcps, tcp_fusion_urg); 459 BUMP_MIB(&tcps->tcps_mib, tcpOutUrg); 460 461 head = peer_tcp->tcp_rcv_list; 462 while (head != NULL) { 463 /* 464 * Remove existing T_EXDATA_IND, keep the data which follows 465 * it and relink our list. Note that we don't modify the 466 * tcp_rcv_last_tail since it never points to T_EXDATA_IND. 467 */ 468 if (DB_TYPE(head) != M_DATA) { 469 mp1 = head; 470 471 ASSERT(DB_TYPE(mp1->b_cont) == M_DATA); 472 head = mp1->b_cont; 473 mp1->b_cont = NULL; 474 head->b_next = mp1->b_next; 475 mp1->b_next = NULL; 476 if (prev_head != NULL) 477 prev_head->b_next = head; 478 if (peer_tcp->tcp_rcv_list == mp1) 479 peer_tcp->tcp_rcv_list = head; 480 if (peer_tcp->tcp_rcv_last_head == mp1) 481 peer_tcp->tcp_rcv_last_head = head; 482 freeb(mp1); 483 } 484 prev_head = head; 485 head = head->b_next; 486 } 487 } 488 489 /* 490 * Fusion output routine, called by tcp_output() and tcp_wput_proto(). 491 * If we are modifying any member that can be changed outside the squeue, 492 * like tcp_flow_stopped, we need to take tcp_non_sq_lock. 493 */ 494 boolean_t 495 tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size) 496 { 497 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 498 uint_t max_unread; 499 boolean_t flow_stopped, peer_data_queued = B_FALSE; 500 boolean_t urgent = (DB_TYPE(mp) != M_DATA); 501 mblk_t *mp1 = mp; 502 ill_t *ilp, *olp; 503 ipha_t *ipha; 504 ip6_t *ip6h; 505 tcph_t *tcph; 506 uint_t ip_hdr_len; 507 uint32_t seq; 508 uint32_t recv_size = send_size; 509 tcp_stack_t *tcps = tcp->tcp_tcps; 510 netstack_t *ns = tcps->tcps_netstack; 511 ip_stack_t *ipst = ns->netstack_ip; 512 513 ASSERT(tcp->tcp_fused); 514 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp); 515 ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp); 516 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_PROTO || 517 DB_TYPE(mp) == M_PCPROTO); 518 519 max_unread = peer_tcp->tcp_fuse_rcv_unread_hiwater; 520 521 /* If this connection requires IP, unfuse and use regular path */ 522 if (tcp_loopback_needs_ip(tcp, ns) || 523 tcp_loopback_needs_ip(peer_tcp, ns) || 524 IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN, ipst)) { 525 TCP_STAT(tcps, tcp_fusion_aborted); 526 goto unfuse; 527 } 528 529 if (send_size == 0) { 530 freemsg(mp); 531 return (B_TRUE); 532 } 533 534 /* 535 * Handle urgent data; we either send up SIGURG to the peer now 536 * or do it later when we drain, in case the peer is detached 537 * or if we're short of memory for M_PCSIG mblk. 538 */ 539 if (urgent) { 540 /* 541 * We stop synchronous streams when we have urgent data 542 * queued to prevent tcp_fuse_rrw() from pulling it. If 543 * for some reasons the urgent data can't be delivered 544 * below, synchronous streams will remain stopped until 545 * someone drains the tcp_rcv_list. 546 */ 547 TCP_FUSE_SYNCSTR_PLUG_DRAIN(peer_tcp); 548 tcp_fuse_output_urg(tcp, mp); 549 550 mp1 = mp->b_cont; 551 } 552 553 if (tcp->tcp_ipversion == IPV4_VERSION && 554 (HOOKS4_INTERESTED_LOOPBACK_IN(ipst) || 555 HOOKS4_INTERESTED_LOOPBACK_OUT(ipst)) || 556 tcp->tcp_ipversion == IPV6_VERSION && 557 (HOOKS6_INTERESTED_LOOPBACK_IN(ipst) || 558 HOOKS6_INTERESTED_LOOPBACK_OUT(ipst))) { 559 /* 560 * Build ip and tcp header to satisfy FW_HOOKS. 561 * We only build it when any hook is present. 562 */ 563 if ((mp1 = tcp_xmit_mp(tcp, mp1, tcp->tcp_mss, NULL, NULL, 564 tcp->tcp_snxt, B_TRUE, NULL, B_FALSE)) == NULL) 565 /* If tcp_xmit_mp fails, use regular path */ 566 goto unfuse; 567 568 ASSERT(peer_tcp->tcp_connp->conn_ire_cache->ire_ipif != NULL); 569 olp = peer_tcp->tcp_connp->conn_ire_cache->ire_ipif->ipif_ill; 570 /* PFHooks: LOOPBACK_OUT */ 571 if (tcp->tcp_ipversion == IPV4_VERSION) { 572 ipha = (ipha_t *)mp1->b_rptr; 573 574 DTRACE_PROBE4(ip4__loopback__out__start, 575 ill_t *, NULL, ill_t *, olp, 576 ipha_t *, ipha, mblk_t *, mp1); 577 FW_HOOKS(ipst->ips_ip4_loopback_out_event, 578 ipst->ips_ipv4firewall_loopback_out, 579 NULL, olp, ipha, mp1, mp1, ipst); 580 DTRACE_PROBE1(ip4__loopback__out__end, mblk_t *, mp1); 581 } else { 582 ip6h = (ip6_t *)mp1->b_rptr; 583 584 DTRACE_PROBE4(ip6__loopback__out__start, 585 ill_t *, NULL, ill_t *, olp, 586 ip6_t *, ip6h, mblk_t *, mp1); 587 FW_HOOKS6(ipst->ips_ip6_loopback_out_event, 588 ipst->ips_ipv6firewall_loopback_out, 589 NULL, olp, ip6h, mp1, mp1, ipst); 590 DTRACE_PROBE1(ip6__loopback__out__end, mblk_t *, mp1); 591 } 592 if (mp1 == NULL) 593 goto unfuse; 594 595 596 /* PFHooks: LOOPBACK_IN */ 597 ASSERT(tcp->tcp_connp->conn_ire_cache->ire_ipif != NULL); 598 ilp = tcp->tcp_connp->conn_ire_cache->ire_ipif->ipif_ill; 599 600 if (tcp->tcp_ipversion == IPV4_VERSION) { 601 DTRACE_PROBE4(ip4__loopback__in__start, 602 ill_t *, ilp, ill_t *, NULL, 603 ipha_t *, ipha, mblk_t *, mp1); 604 FW_HOOKS(ipst->ips_ip4_loopback_in_event, 605 ipst->ips_ipv4firewall_loopback_in, 606 ilp, NULL, ipha, mp1, mp1, ipst); 607 DTRACE_PROBE1(ip4__loopback__in__end, mblk_t *, mp1); 608 if (mp1 == NULL) 609 goto unfuse; 610 611 ip_hdr_len = IPH_HDR_LENGTH(ipha); 612 } else { 613 DTRACE_PROBE4(ip6__loopback__in__start, 614 ill_t *, ilp, ill_t *, NULL, 615 ip6_t *, ip6h, mblk_t *, mp1); 616 FW_HOOKS6(ipst->ips_ip6_loopback_in_event, 617 ipst->ips_ipv6firewall_loopback_in, 618 ilp, NULL, ip6h, mp1, mp1, ipst); 619 DTRACE_PROBE1(ip6__loopback__in__end, mblk_t *, mp1); 620 if (mp1 == NULL) 621 goto unfuse; 622 623 ip_hdr_len = ip_hdr_length_v6(mp1, ip6h); 624 } 625 626 /* Data length might be changed by FW_HOOKS */ 627 tcph = (tcph_t *)&mp1->b_rptr[ip_hdr_len]; 628 seq = ABE32_TO_U32(tcph->th_seq); 629 recv_size += seq - tcp->tcp_snxt; 630 631 /* 632 * The message duplicated by tcp_xmit_mp is freed. 633 * Note: the original message passed in remains unchanged. 634 */ 635 freemsg(mp1); 636 } 637 638 mutex_enter(&peer_tcp->tcp_non_sq_lock); 639 /* 640 * Wake up and signal the peer; it is okay to do this before 641 * enqueueing because we are holding the lock. One of the 642 * advantages of synchronous streams is the ability for us to 643 * find out when the application performs a read on the socket, 644 * by way of tcp_fuse_rrw() entry point being called. Every 645 * data that gets enqueued onto the receiver is treated as if 646 * it has arrived at the receiving endpoint, thus generating 647 * SIGPOLL/SIGIO for asynchronous socket just as in the strrput() 648 * case. However, we only wake up the application when necessary, 649 * i.e. during the first enqueue. When tcp_fuse_rrw() is called 650 * it will send everything upstream. 651 */ 652 if (peer_tcp->tcp_direct_sockfs && !urgent && 653 !TCP_IS_DETACHED(peer_tcp)) { 654 if (peer_tcp->tcp_rcv_list == NULL) 655 STR_WAKEUP_SET(STREAM(peer_tcp->tcp_rq)); 656 /* Update poll events and send SIGPOLL/SIGIO if necessary */ 657 STR_SENDSIG(STREAM(peer_tcp->tcp_rq)); 658 } 659 660 /* 661 * Enqueue data into the peer's receive list; we may or may not 662 * drain the contents depending on the conditions below. 663 */ 664 tcp_rcv_enqueue(peer_tcp, mp, recv_size); 665 666 /* In case it wrapped around and also to keep it constant */ 667 peer_tcp->tcp_rwnd += recv_size; 668 669 /* 670 * Exercise flow-control when needed; we will get back-enabled 671 * in either tcp_accept_finish(), tcp_unfuse(), or tcp_fuse_rrw(). 672 * If tcp_direct_sockfs is on or if the peer endpoint is detached, 673 * we emulate streams flow control by checking the peer's queue 674 * size and high water mark; otherwise we simply use canputnext() 675 * to decide if we need to stop our flow. 676 * 677 * The outstanding unread data block check does not apply for a 678 * detached receiver; this is to avoid unnecessary blocking of the 679 * sender while the accept is currently in progress and is quite 680 * similar to the regular tcp. 681 */ 682 if (TCP_IS_DETACHED(peer_tcp) || max_unread == 0) 683 max_unread = UINT_MAX; 684 685 /* 686 * Since we are accessing our tcp_flow_stopped and might modify it, 687 * we need to take tcp->tcp_non_sq_lock. The lock for the highest 688 * address is held first. Dropping peer_tcp->tcp_non_sq_lock should 689 * not be an issue here since we are within the squeue and the peer 690 * won't disappear. 691 */ 692 if (tcp > peer_tcp) { 693 mutex_exit(&peer_tcp->tcp_non_sq_lock); 694 mutex_enter(&tcp->tcp_non_sq_lock); 695 mutex_enter(&peer_tcp->tcp_non_sq_lock); 696 } else { 697 mutex_enter(&tcp->tcp_non_sq_lock); 698 } 699 flow_stopped = tcp->tcp_flow_stopped; 700 if (((peer_tcp->tcp_direct_sockfs || TCP_IS_DETACHED(peer_tcp)) && 701 (peer_tcp->tcp_rcv_cnt >= peer_tcp->tcp_fuse_rcv_hiwater || 702 ++peer_tcp->tcp_fuse_rcv_unread_cnt >= max_unread)) || 703 (!peer_tcp->tcp_direct_sockfs && 704 !TCP_IS_DETACHED(peer_tcp) && !canputnext(peer_tcp->tcp_rq))) { 705 peer_data_queued = B_TRUE; 706 } 707 708 if (!flow_stopped && (peer_data_queued || 709 (TCP_UNSENT_BYTES(tcp) >= tcp->tcp_xmit_hiwater))) { 710 tcp_setqfull(tcp); 711 flow_stopped = B_TRUE; 712 TCP_STAT(tcps, tcp_fusion_flowctl); 713 DTRACE_PROBE4(tcp__fuse__output__flowctl, tcp_t *, tcp, 714 uint_t, send_size, uint_t, peer_tcp->tcp_rcv_cnt, 715 uint_t, peer_tcp->tcp_fuse_rcv_unread_cnt); 716 } else if (flow_stopped && !peer_data_queued && 717 (TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater)) { 718 tcp_clrqfull(tcp); 719 flow_stopped = B_FALSE; 720 } 721 mutex_exit(&tcp->tcp_non_sq_lock); 722 ipst->ips_loopback_packets++; 723 tcp->tcp_last_sent_len = send_size; 724 725 /* Need to adjust the following SNMP MIB-related variables */ 726 tcp->tcp_snxt += send_size; 727 tcp->tcp_suna = tcp->tcp_snxt; 728 peer_tcp->tcp_rnxt += recv_size; 729 peer_tcp->tcp_rack = peer_tcp->tcp_rnxt; 730 731 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 732 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, send_size); 733 734 BUMP_MIB(&tcps->tcps_mib, tcpInSegs); 735 BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs); 736 UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, send_size); 737 738 BUMP_LOCAL(tcp->tcp_obsegs); 739 BUMP_LOCAL(peer_tcp->tcp_ibsegs); 740 741 mutex_exit(&peer_tcp->tcp_non_sq_lock); 742 743 DTRACE_PROBE2(tcp__fuse__output, tcp_t *, tcp, uint_t, send_size); 744 745 if (!TCP_IS_DETACHED(peer_tcp)) { 746 /* 747 * Drain the peer's receive queue it has urgent data or if 748 * we're not flow-controlled. There is no need for draining 749 * normal data when tcp_direct_sockfs is on because the peer 750 * will pull the data via tcp_fuse_rrw(). 751 */ 752 if (urgent || (!flow_stopped && !peer_tcp->tcp_direct_sockfs)) { 753 ASSERT(peer_tcp->tcp_rcv_list != NULL); 754 /* 755 * For TLI-based streams, a thread in tcp_accept_swap() 756 * can race with us. That thread will ensure that the 757 * correct peer_tcp->tcp_rq is globally visible before 758 * peer_tcp->tcp_detached is visible as clear, but we 759 * must also ensure that the load of tcp_rq cannot be 760 * reordered to be before the tcp_detached check. 761 */ 762 membar_consumer(); 763 (void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp, 764 NULL); 765 /* 766 * If synchronous streams was stopped above due 767 * to the presence of urgent data, re-enable it. 768 */ 769 if (urgent) 770 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(peer_tcp); 771 } 772 } 773 return (B_TRUE); 774 unfuse: 775 tcp_unfuse(tcp); 776 return (B_FALSE); 777 } 778 779 /* 780 * This routine gets called to deliver data upstream on a fused or 781 * previously fused tcp loopback endpoint; the latter happens only 782 * when there is a pending SIGURG signal plus urgent data that can't 783 * be sent upstream in the past. 784 */ 785 boolean_t 786 tcp_fuse_rcv_drain(queue_t *q, tcp_t *tcp, mblk_t **sigurg_mpp) 787 { 788 mblk_t *mp; 789 #ifdef DEBUG 790 uint_t cnt = 0; 791 #endif 792 tcp_stack_t *tcps = tcp->tcp_tcps; 793 794 ASSERT(tcp->tcp_loopback); 795 ASSERT(tcp->tcp_fused || tcp->tcp_fused_sigurg); 796 ASSERT(!tcp->tcp_fused || tcp->tcp_loopback_peer != NULL); 797 ASSERT(sigurg_mpp != NULL || tcp->tcp_fused); 798 799 /* No need for the push timer now, in case it was scheduled */ 800 if (tcp->tcp_push_tid != 0) { 801 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 802 tcp->tcp_push_tid = 0; 803 } 804 /* 805 * If there's urgent data sitting in receive list and we didn't 806 * get a chance to send up a SIGURG signal, make sure we send 807 * it first before draining in order to ensure that SIOCATMARK 808 * works properly. 809 */ 810 if (tcp->tcp_fused_sigurg) { 811 /* 812 * sigurg_mpp is normally NULL, i.e. when we're still 813 * fused and didn't get here because of tcp_unfuse(). 814 * In this case try hard to allocate the M_PCSIG mblk. 815 */ 816 if (sigurg_mpp == NULL && 817 (mp = allocb(1, BPRI_HI)) == NULL && 818 (mp = allocb_tryhard(1)) == NULL) { 819 /* Alloc failed; try again next time */ 820 tcp->tcp_push_tid = TCP_TIMER(tcp, tcp_push_timer, 821 MSEC_TO_TICK(tcps->tcps_push_timer_interval)); 822 return (B_TRUE); 823 } else if (sigurg_mpp != NULL) { 824 /* 825 * Use the supplied M_PCSIG mblk; it means we're 826 * either unfused or in the process of unfusing, 827 * and the drain must happen now. 828 */ 829 mp = *sigurg_mpp; 830 *sigurg_mpp = NULL; 831 } 832 ASSERT(mp != NULL); 833 834 tcp->tcp_fused_sigurg = B_FALSE; 835 /* Send up the signal */ 836 DB_TYPE(mp) = M_PCSIG; 837 *mp->b_wptr++ = (uchar_t)SIGURG; 838 putnext(q, mp); 839 /* 840 * Let the regular tcp_rcv_drain() path handle 841 * draining the data if we're no longer fused. 842 */ 843 if (!tcp->tcp_fused) 844 return (B_FALSE); 845 } 846 847 /* 848 * In the synchronous streams case, we generate SIGPOLL/SIGIO for 849 * each M_DATA that gets enqueued onto the receiver. At this point 850 * we are about to drain any queued data via putnext(). In order 851 * to avoid extraneous signal generation from strrput(), we set 852 * STRGETINPROG flag at the stream head prior to the draining and 853 * restore it afterwards. This masks out signal generation only 854 * for M_DATA messages and does not affect urgent data. 855 */ 856 if (tcp->tcp_direct_sockfs) 857 strrput_sig(q, B_FALSE); 858 859 /* Drain the data */ 860 while ((mp = tcp->tcp_rcv_list) != NULL) { 861 tcp->tcp_rcv_list = mp->b_next; 862 mp->b_next = NULL; 863 #ifdef DEBUG 864 cnt += msgdsize(mp); 865 #endif 866 putnext(q, mp); 867 TCP_STAT(tcps, tcp_fusion_putnext); 868 } 869 870 if (tcp->tcp_direct_sockfs) 871 strrput_sig(q, B_TRUE); 872 873 ASSERT(cnt == tcp->tcp_rcv_cnt); 874 tcp->tcp_rcv_last_head = NULL; 875 tcp->tcp_rcv_last_tail = NULL; 876 tcp->tcp_rcv_cnt = 0; 877 tcp->tcp_fuse_rcv_unread_cnt = 0; 878 tcp->tcp_rwnd = q->q_hiwat; 879 880 return (B_TRUE); 881 } 882 883 /* 884 * Synchronous stream entry point for sockfs to retrieve 885 * data directly from tcp_rcv_list. 886 * tcp_fuse_rrw() might end up modifying the peer's tcp_flow_stopped, 887 * for which it must take the tcp_non_sq_lock of the peer as well 888 * making any change. The order of taking the locks is based on 889 * the TCP pointer itself. Before we get the peer we need to take 890 * our tcp_non_sq_lock so that the peer doesn't disappear. However, 891 * we cannot drop the lock if we have to grab the peer's lock (because 892 * of ordering), since the peer might disappear in the interim. So, 893 * we take our tcp_non_sq_lock, get the peer, increment the ref on the 894 * peer's conn, drop all the locks and then take the tcp_non_sq_lock in the 895 * desired order. Incrementing the conn ref on the peer means that the 896 * peer won't disappear when we drop our tcp_non_sq_lock. 897 */ 898 int 899 tcp_fuse_rrw(queue_t *q, struiod_t *dp) 900 { 901 tcp_t *tcp = Q_TO_CONN(q)->conn_tcp; 902 mblk_t *mp; 903 tcp_t *peer_tcp; 904 tcp_stack_t *tcps = tcp->tcp_tcps; 905 906 mutex_enter(&tcp->tcp_non_sq_lock); 907 908 /* 909 * If tcp_fuse_syncstr_plugged is set, then another thread is moving 910 * the underlying data to the stream head. We need to wait until it's 911 * done, then return EBUSY so that strget() will dequeue data from the 912 * stream head to ensure data is drained in-order. 913 */ 914 plugged: 915 if (tcp->tcp_fuse_syncstr_plugged) { 916 do { 917 cv_wait(&tcp->tcp_fuse_plugcv, &tcp->tcp_non_sq_lock); 918 } while (tcp->tcp_fuse_syncstr_plugged); 919 920 mutex_exit(&tcp->tcp_non_sq_lock); 921 TCP_STAT(tcps, tcp_fusion_rrw_plugged); 922 TCP_STAT(tcps, tcp_fusion_rrw_busy); 923 return (EBUSY); 924 } 925 926 peer_tcp = tcp->tcp_loopback_peer; 927 928 /* 929 * If someone had turned off tcp_direct_sockfs or if synchronous 930 * streams is stopped, we return EBUSY. This causes strget() to 931 * dequeue data from the stream head instead. 932 */ 933 if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped) { 934 mutex_exit(&tcp->tcp_non_sq_lock); 935 TCP_STAT(tcps, tcp_fusion_rrw_busy); 936 return (EBUSY); 937 } 938 939 /* 940 * Grab lock in order. The highest addressed tcp is locked first. 941 * We don't do this within the tcp_rcv_list check since if we 942 * have to drop the lock, for ordering, then the tcp_rcv_list 943 * could change. 944 */ 945 if (peer_tcp > tcp) { 946 CONN_INC_REF(peer_tcp->tcp_connp); 947 mutex_exit(&tcp->tcp_non_sq_lock); 948 mutex_enter(&peer_tcp->tcp_non_sq_lock); 949 mutex_enter(&tcp->tcp_non_sq_lock); 950 CONN_DEC_REF(peer_tcp->tcp_connp); 951 /* This might have changed in the interim */ 952 if (tcp->tcp_fuse_syncstr_plugged) { 953 mutex_exit(&peer_tcp->tcp_non_sq_lock); 954 goto plugged; 955 } 956 } else { 957 mutex_enter(&peer_tcp->tcp_non_sq_lock); 958 } 959 960 if ((mp = tcp->tcp_rcv_list) != NULL) { 961 962 DTRACE_PROBE3(tcp__fuse__rrw, tcp_t *, tcp, 963 uint32_t, tcp->tcp_rcv_cnt, ssize_t, dp->d_uio.uio_resid); 964 965 tcp->tcp_rcv_list = NULL; 966 TCP_STAT(tcps, tcp_fusion_rrw_msgcnt); 967 968 /* 969 * At this point nothing should be left in tcp_rcv_list. 970 * The only possible case where we would have a chain of 971 * b_next-linked messages is urgent data, but we wouldn't 972 * be here if that's true since urgent data is delivered 973 * via putnext() and synchronous streams is stopped until 974 * tcp_fuse_rcv_drain() is finished. 975 */ 976 ASSERT(DB_TYPE(mp) == M_DATA && mp->b_next == NULL); 977 978 tcp->tcp_rcv_last_head = NULL; 979 tcp->tcp_rcv_last_tail = NULL; 980 tcp->tcp_rcv_cnt = 0; 981 tcp->tcp_fuse_rcv_unread_cnt = 0; 982 983 if (peer_tcp->tcp_flow_stopped && 984 (TCP_UNSENT_BYTES(peer_tcp) <= 985 peer_tcp->tcp_xmit_lowater)) { 986 tcp_clrqfull(peer_tcp); 987 TCP_STAT(tcps, tcp_fusion_backenabled); 988 } 989 } 990 mutex_exit(&peer_tcp->tcp_non_sq_lock); 991 /* 992 * Either we just dequeued everything or we get here from sockfs 993 * and have nothing to return; in this case clear RSLEEP. 994 */ 995 ASSERT(tcp->tcp_rcv_last_head == NULL); 996 ASSERT(tcp->tcp_rcv_last_tail == NULL); 997 ASSERT(tcp->tcp_rcv_cnt == 0); 998 ASSERT(tcp->tcp_fuse_rcv_unread_cnt == 0); 999 STR_WAKEUP_CLEAR(STREAM(q)); 1000 1001 mutex_exit(&tcp->tcp_non_sq_lock); 1002 dp->d_mp = mp; 1003 return (0); 1004 } 1005 1006 /* 1007 * Synchronous stream entry point used by certain ioctls to retrieve 1008 * information about or peek into the tcp_rcv_list. 1009 */ 1010 int 1011 tcp_fuse_rinfop(queue_t *q, infod_t *dp) 1012 { 1013 tcp_t *tcp = Q_TO_CONN(q)->conn_tcp; 1014 mblk_t *mp; 1015 uint_t cmd = dp->d_cmd; 1016 int res = 0; 1017 int error = 0; 1018 struct stdata *stp = STREAM(q); 1019 1020 mutex_enter(&tcp->tcp_non_sq_lock); 1021 /* If shutdown on read has happened, return nothing */ 1022 mutex_enter(&stp->sd_lock); 1023 if (stp->sd_flag & STREOF) { 1024 mutex_exit(&stp->sd_lock); 1025 goto done; 1026 } 1027 mutex_exit(&stp->sd_lock); 1028 1029 /* 1030 * It is OK not to return an answer if tcp_rcv_list is 1031 * currently not accessible. 1032 */ 1033 if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped || 1034 tcp->tcp_fuse_syncstr_plugged || (mp = tcp->tcp_rcv_list) == NULL) 1035 goto done; 1036 1037 if (cmd & INFOD_COUNT) { 1038 /* 1039 * We have at least one message and 1040 * could return only one at a time. 1041 */ 1042 dp->d_count++; 1043 res |= INFOD_COUNT; 1044 } 1045 if (cmd & INFOD_BYTES) { 1046 /* 1047 * Return size of all data messages. 1048 */ 1049 dp->d_bytes += tcp->tcp_rcv_cnt; 1050 res |= INFOD_BYTES; 1051 } 1052 if (cmd & INFOD_FIRSTBYTES) { 1053 /* 1054 * Return size of first data message. 1055 */ 1056 dp->d_bytes = msgdsize(mp); 1057 res |= INFOD_FIRSTBYTES; 1058 dp->d_cmd &= ~INFOD_FIRSTBYTES; 1059 } 1060 if (cmd & INFOD_COPYOUT) { 1061 mblk_t *mp1; 1062 int n; 1063 1064 if (DB_TYPE(mp) == M_DATA) { 1065 mp1 = mp; 1066 } else { 1067 mp1 = mp->b_cont; 1068 ASSERT(mp1 != NULL); 1069 } 1070 1071 /* 1072 * Return data contents of first message. 1073 */ 1074 ASSERT(DB_TYPE(mp1) == M_DATA); 1075 while (mp1 != NULL && dp->d_uiop->uio_resid > 0) { 1076 n = MIN(dp->d_uiop->uio_resid, MBLKL(mp1)); 1077 if (n != 0 && (error = uiomove((char *)mp1->b_rptr, n, 1078 UIO_READ, dp->d_uiop)) != 0) { 1079 goto done; 1080 } 1081 mp1 = mp1->b_cont; 1082 } 1083 res |= INFOD_COPYOUT; 1084 dp->d_cmd &= ~INFOD_COPYOUT; 1085 } 1086 done: 1087 mutex_exit(&tcp->tcp_non_sq_lock); 1088 1089 dp->d_res |= res; 1090 1091 return (error); 1092 } 1093 1094 /* 1095 * Enable synchronous streams on a fused tcp loopback endpoint. 1096 */ 1097 static void 1098 tcp_fuse_syncstr_enable(tcp_t *tcp) 1099 { 1100 queue_t *rq = tcp->tcp_rq; 1101 struct stdata *stp = STREAM(rq); 1102 1103 /* We can only enable synchronous streams for sockfs mode */ 1104 tcp->tcp_direct_sockfs = tcp->tcp_issocket && do_tcp_direct_sockfs; 1105 1106 if (!tcp->tcp_direct_sockfs) 1107 return; 1108 1109 mutex_enter(&stp->sd_lock); 1110 mutex_enter(QLOCK(rq)); 1111 1112 /* 1113 * We replace our q_qinfo with one that has the qi_rwp entry point. 1114 * Clear SR_SIGALLDATA because we generate the equivalent signal(s) 1115 * for every enqueued data in tcp_fuse_output(). 1116 */ 1117 rq->q_qinfo = &tcp_loopback_rinit; 1118 rq->q_struiot = tcp_loopback_rinit.qi_struiot; 1119 stp->sd_struiordq = rq; 1120 stp->sd_rput_opt &= ~SR_SIGALLDATA; 1121 1122 mutex_exit(QLOCK(rq)); 1123 mutex_exit(&stp->sd_lock); 1124 } 1125 1126 /* 1127 * Disable synchronous streams on a fused tcp loopback endpoint. 1128 */ 1129 static void 1130 tcp_fuse_syncstr_disable(tcp_t *tcp) 1131 { 1132 queue_t *rq = tcp->tcp_rq; 1133 struct stdata *stp = STREAM(rq); 1134 1135 if (!tcp->tcp_direct_sockfs) 1136 return; 1137 1138 mutex_enter(&stp->sd_lock); 1139 mutex_enter(QLOCK(rq)); 1140 1141 /* 1142 * Reset q_qinfo to point to the default tcp entry points. 1143 * Also restore SR_SIGALLDATA so that strrput() can generate 1144 * the signals again for future M_DATA messages. 1145 */ 1146 rq->q_qinfo = &tcp_rinit; 1147 rq->q_struiot = tcp_rinit.qi_struiot; 1148 stp->sd_struiordq = NULL; 1149 stp->sd_rput_opt |= SR_SIGALLDATA; 1150 tcp->tcp_direct_sockfs = B_FALSE; 1151 1152 mutex_exit(QLOCK(rq)); 1153 mutex_exit(&stp->sd_lock); 1154 } 1155 1156 /* 1157 * Enable synchronous streams on a pair of fused tcp endpoints. 1158 */ 1159 void 1160 tcp_fuse_syncstr_enable_pair(tcp_t *tcp) 1161 { 1162 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 1163 1164 ASSERT(tcp->tcp_fused); 1165 ASSERT(peer_tcp != NULL); 1166 1167 tcp_fuse_syncstr_enable(tcp); 1168 tcp_fuse_syncstr_enable(peer_tcp); 1169 } 1170 1171 /* 1172 * Allow or disallow signals to be generated by strrput(). 1173 */ 1174 static void 1175 strrput_sig(queue_t *q, boolean_t on) 1176 { 1177 struct stdata *stp = STREAM(q); 1178 1179 mutex_enter(&stp->sd_lock); 1180 if (on) 1181 stp->sd_flag &= ~STRGETINPROG; 1182 else 1183 stp->sd_flag |= STRGETINPROG; 1184 mutex_exit(&stp->sd_lock); 1185 } 1186 1187 /* 1188 * Disable synchronous streams on a pair of fused tcp endpoints and drain 1189 * any queued data; called either during unfuse or upon transitioning from 1190 * a socket to a stream endpoint due to _SIOCSOCKFALLBACK. 1191 */ 1192 void 1193 tcp_fuse_disable_pair(tcp_t *tcp, boolean_t unfusing) 1194 { 1195 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 1196 tcp_stack_t *tcps = tcp->tcp_tcps; 1197 1198 ASSERT(tcp->tcp_fused); 1199 ASSERT(peer_tcp != NULL); 1200 1201 /* 1202 * Force any tcp_fuse_rrw() calls to block until we've moved the data 1203 * onto the stream head. 1204 */ 1205 TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp); 1206 TCP_FUSE_SYNCSTR_PLUG_DRAIN(peer_tcp); 1207 1208 /* 1209 * Drain any pending data; the detached check is needed because 1210 * we may be called as a result of a tcp_unfuse() triggered by 1211 * tcp_fuse_output(). Note that in case of a detached tcp, the 1212 * draining will happen later after the tcp is unfused. For non- 1213 * urgent data, this can be handled by the regular tcp_rcv_drain(). 1214 * If we have urgent data sitting in the receive list, we will 1215 * need to send up a SIGURG signal first before draining the data. 1216 * All of these will be handled by the code in tcp_fuse_rcv_drain() 1217 * when called from tcp_rcv_drain(). 1218 */ 1219 if (!TCP_IS_DETACHED(tcp)) { 1220 (void) tcp_fuse_rcv_drain(tcp->tcp_rq, tcp, 1221 (unfusing ? &tcp->tcp_fused_sigurg_mp : NULL)); 1222 } 1223 if (!TCP_IS_DETACHED(peer_tcp)) { 1224 (void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp, 1225 (unfusing ? &peer_tcp->tcp_fused_sigurg_mp : NULL)); 1226 } 1227 1228 /* 1229 * Make all current and future tcp_fuse_rrw() calls fail with EBUSY. 1230 * To ensure threads don't sneak past the checks in tcp_fuse_rrw(), 1231 * a given stream must be stopped prior to being unplugged (but the 1232 * ordering of operations between the streams is unimportant). 1233 */ 1234 TCP_FUSE_SYNCSTR_STOP(tcp); 1235 TCP_FUSE_SYNCSTR_STOP(peer_tcp); 1236 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp); 1237 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(peer_tcp); 1238 1239 /* Lift up any flow-control conditions */ 1240 if (tcp->tcp_flow_stopped) { 1241 tcp_clrqfull(tcp); 1242 TCP_STAT(tcps, tcp_fusion_backenabled); 1243 } 1244 if (peer_tcp->tcp_flow_stopped) { 1245 tcp_clrqfull(peer_tcp); 1246 TCP_STAT(tcps, tcp_fusion_backenabled); 1247 } 1248 1249 /* Disable synchronous streams */ 1250 tcp_fuse_syncstr_disable(tcp); 1251 tcp_fuse_syncstr_disable(peer_tcp); 1252 } 1253 1254 /* 1255 * Calculate the size of receive buffer for a fused tcp endpoint. 1256 */ 1257 size_t 1258 tcp_fuse_set_rcv_hiwat(tcp_t *tcp, size_t rwnd) 1259 { 1260 tcp_stack_t *tcps = tcp->tcp_tcps; 1261 1262 ASSERT(tcp->tcp_fused); 1263 1264 /* Ensure that value is within the maximum upper bound */ 1265 if (rwnd > tcps->tcps_max_buf) 1266 rwnd = tcps->tcps_max_buf; 1267 1268 /* Obey the absolute minimum tcp receive high water mark */ 1269 if (rwnd < tcps->tcps_sth_rcv_hiwat) 1270 rwnd = tcps->tcps_sth_rcv_hiwat; 1271 1272 /* 1273 * Round up to system page size in case SO_RCVBUF is modified 1274 * after SO_SNDBUF; the latter is also similarly rounded up. 1275 */ 1276 rwnd = P2ROUNDUP_TYPED(rwnd, PAGESIZE, size_t); 1277 tcp->tcp_fuse_rcv_hiwater = rwnd; 1278 return (rwnd); 1279 } 1280 1281 /* 1282 * Calculate the maximum outstanding unread data block for a fused tcp endpoint. 1283 */ 1284 int 1285 tcp_fuse_maxpsz_set(tcp_t *tcp) 1286 { 1287 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 1288 uint_t sndbuf = tcp->tcp_xmit_hiwater; 1289 uint_t maxpsz = sndbuf; 1290 1291 ASSERT(tcp->tcp_fused); 1292 ASSERT(peer_tcp != NULL); 1293 ASSERT(peer_tcp->tcp_fuse_rcv_hiwater != 0); 1294 /* 1295 * In the fused loopback case, we want the stream head to split 1296 * up larger writes into smaller chunks for a more accurate flow- 1297 * control accounting. Our maxpsz is half of the sender's send 1298 * buffer or the receiver's receive buffer, whichever is smaller. 1299 * We round up the buffer to system page size due to the lack of 1300 * TCP MSS concept in Fusion. 1301 */ 1302 if (maxpsz > peer_tcp->tcp_fuse_rcv_hiwater) 1303 maxpsz = peer_tcp->tcp_fuse_rcv_hiwater; 1304 maxpsz = P2ROUNDUP_TYPED(maxpsz, PAGESIZE, uint_t) >> 1; 1305 1306 /* 1307 * Calculate the peer's limit for the number of outstanding unread 1308 * data block. This is the amount of data blocks that are allowed 1309 * to reside in the receiver's queue before the sender gets flow 1310 * controlled. It is used only in the synchronous streams mode as 1311 * a way to throttle the sender when it performs consecutive writes 1312 * faster than can be read. The value is derived from SO_SNDBUF in 1313 * order to give the sender some control; we divide it with a large 1314 * value (16KB) to produce a fairly low initial limit. 1315 */ 1316 if (tcp_fusion_rcv_unread_min == 0) { 1317 /* A value of 0 means that we disable the check */ 1318 peer_tcp->tcp_fuse_rcv_unread_hiwater = 0; 1319 } else { 1320 peer_tcp->tcp_fuse_rcv_unread_hiwater = 1321 MAX(sndbuf >> 14, tcp_fusion_rcv_unread_min); 1322 } 1323 return (maxpsz); 1324 } 1325