1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/stream.h> 30 #include <sys/strsun.h> 31 #include <sys/strsubr.h> 32 #include <sys/debug.h> 33 #include <sys/sdt.h> 34 #include <sys/cmn_err.h> 35 #include <sys/tihdr.h> 36 37 #include <inet/common.h> 38 #include <inet/optcom.h> 39 #include <inet/ip.h> 40 #include <inet/ip_impl.h> 41 #include <inet/tcp.h> 42 #include <inet/tcp_impl.h> 43 #include <inet/ipsec_impl.h> 44 #include <inet/ipclassifier.h> 45 #include <inet/ipp_common.h> 46 47 /* 48 * This file implements TCP fusion - a protocol-less data path for TCP 49 * loopback connections. The fusion of two local TCP endpoints occurs 50 * at connection establishment time. Various conditions (see details 51 * in tcp_fuse()) need to be met for fusion to be successful. If it 52 * fails, we fall back to the regular TCP data path; if it succeeds, 53 * both endpoints proceed to use tcp_fuse_output() as the transmit path. 54 * tcp_fuse_output() enqueues application data directly onto the peer's 55 * receive queue; no protocol processing is involved. After enqueueing 56 * the data, the sender can either push (putnext) data up the receiver's 57 * read queue; or the sender can simply return and let the receiver 58 * retrieve the enqueued data via the synchronous streams entry point 59 * tcp_fuse_rrw(). The latter path is taken if synchronous streams is 60 * enabled (the default). It is disabled if sockfs no longer resides 61 * directly on top of tcp module due to a module insertion or removal. 62 * It also needs to be temporarily disabled when sending urgent data 63 * because the tcp_fuse_rrw() path bypasses the M_PROTO processing done 64 * by strsock_proto() hook. 65 * 66 * Sychronization is handled by squeue and the mutex tcp_non_sq_lock. 67 * One of the requirements for fusion to succeed is that both endpoints 68 * need to be using the same squeue. This ensures that neither side 69 * can disappear while the other side is still sending data. By itself, 70 * squeue is not sufficient for guaranteeing safety when synchronous 71 * streams is enabled. The reason is that tcp_fuse_rrw() doesn't enter 72 * the squeue and its access to tcp_rcv_list and other fusion-related 73 * fields needs to be sychronized with the sender. tcp_non_sq_lock is 74 * used for this purpose. When there is urgent data, the sender needs 75 * to push the data up the receiver's streams read queue. In order to 76 * avoid holding the tcp_non_sq_lock across putnext(), the sender sets 77 * the peer tcp's tcp_fuse_syncstr_plugged bit and releases tcp_non_sq_lock 78 * (see macro TCP_FUSE_SYNCSTR_PLUG_DRAIN()). If tcp_fuse_rrw() enters 79 * after this point, it will see that synchronous streams is plugged and 80 * will wait on tcp_fuse_plugcv. After the sender has finished pushing up 81 * all urgent data, it will clear the tcp_fuse_syncstr_plugged bit using 82 * TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(). This will cause any threads waiting 83 * on tcp_fuse_plugcv to return EBUSY, and in turn cause strget() to call 84 * getq_noenab() to dequeue data from the stream head instead. Once the 85 * data on the stream head has been consumed, tcp_fuse_rrw() may again 86 * be used to process tcp_rcv_list. However, if TCP_FUSE_SYNCSTR_STOP() 87 * has been called, all future calls to tcp_fuse_rrw() will return EBUSY, 88 * effectively disabling synchronous streams. 89 * 90 * The following note applies only to the synchronous streams mode. 91 * 92 * Flow control is done by checking the size of receive buffer and 93 * the number of data blocks, both set to different limits. This is 94 * different than regular streams flow control where cumulative size 95 * check dominates block count check -- streams queue high water mark 96 * typically represents bytes. Each enqueue triggers notifications 97 * to the receiving process; a build up of data blocks indicates a 98 * slow receiver and the sender should be blocked or informed at the 99 * earliest moment instead of further wasting system resources. In 100 * effect, this is equivalent to limiting the number of outstanding 101 * segments in flight. 102 */ 103 104 /* 105 * Setting this to false means we disable fusion altogether and 106 * loopback connections would go through the protocol paths. 107 */ 108 boolean_t do_tcp_fusion = B_TRUE; 109 110 /* 111 * Enabling this flag allows sockfs to retrieve data directly 112 * from a fused tcp endpoint using synchronous streams interface. 113 */ 114 boolean_t do_tcp_direct_sockfs = B_TRUE; 115 116 /* 117 * This is the minimum amount of outstanding writes allowed on 118 * a synchronous streams-enabled receiving endpoint before the 119 * sender gets flow-controlled. Setting this value to 0 means 120 * that the data block limit is equivalent to the byte count 121 * limit, which essentially disables the check. 122 */ 123 #define TCP_FUSION_RCV_UNREAD_MIN 8 124 uint_t tcp_fusion_rcv_unread_min = TCP_FUSION_RCV_UNREAD_MIN; 125 126 static void tcp_fuse_syncstr_enable(tcp_t *); 127 static void tcp_fuse_syncstr_disable(tcp_t *); 128 static void strrput_sig(queue_t *, boolean_t); 129 130 /* 131 * Return true if this connection needs some IP functionality 132 */ 133 static boolean_t 134 tcp_loopback_needs_ip(tcp_t *tcp, netstack_t *ns) 135 { 136 ipsec_stack_t *ipss = ns->netstack_ipsec; 137 138 /* 139 * If ire is not cached, do not use fusion 140 */ 141 if (tcp->tcp_connp->conn_ire_cache == NULL) { 142 /* 143 * There is no need to hold conn_lock here because when called 144 * from tcp_fuse() there can be no window where conn_ire_cache 145 * can change. This is not true whe called from 146 * tcp_fuse_output(). conn_ire_cache can become null just 147 * after the check, but it's ok if a few packets are delivered 148 * in the fused state. 149 */ 150 return (B_TRUE); 151 } 152 if (tcp->tcp_ipversion == IPV4_VERSION) { 153 if (tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) 154 return (B_TRUE); 155 if (CONN_OUTBOUND_POLICY_PRESENT(tcp->tcp_connp, ipss)) 156 return (B_TRUE); 157 if (CONN_INBOUND_POLICY_PRESENT(tcp->tcp_connp, ipss)) 158 return (B_TRUE); 159 } else { 160 if (tcp->tcp_ip_hdr_len != IPV6_HDR_LEN) 161 return (B_TRUE); 162 if (CONN_OUTBOUND_POLICY_PRESENT_V6(tcp->tcp_connp, ipss)) 163 return (B_TRUE); 164 if (CONN_INBOUND_POLICY_PRESENT_V6(tcp->tcp_connp, ipss)) 165 return (B_TRUE); 166 } 167 if (!CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp)) 168 return (B_TRUE); 169 return (B_FALSE); 170 } 171 172 173 /* 174 * This routine gets called by the eager tcp upon changing state from 175 * SYN_RCVD to ESTABLISHED. It fuses a direct path between itself 176 * and the active connect tcp such that the regular tcp processings 177 * may be bypassed under allowable circumstances. Because the fusion 178 * requires both endpoints to be in the same squeue, it does not work 179 * for simultaneous active connects because there is no easy way to 180 * switch from one squeue to another once the connection is created. 181 * This is different from the eager tcp case where we assign it the 182 * same squeue as the one given to the active connect tcp during open. 183 */ 184 void 185 tcp_fuse(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph) 186 { 187 conn_t *peer_connp, *connp = tcp->tcp_connp; 188 tcp_t *peer_tcp; 189 tcp_stack_t *tcps = tcp->tcp_tcps; 190 netstack_t *ns; 191 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 192 193 ASSERT(!tcp->tcp_fused); 194 ASSERT(tcp->tcp_loopback); 195 ASSERT(tcp->tcp_loopback_peer == NULL); 196 /* 197 * We need to inherit q_hiwat of the listener tcp, but we can't 198 * really use tcp_listener since we get here after sending up 199 * T_CONN_IND and tcp_wput_accept() may be called independently, 200 * at which point tcp_listener is cleared; this is why we use 201 * tcp_saved_listener. The listener itself is guaranteed to be 202 * around until tcp_accept_finish() is called on this eager -- 203 * this won't happen until we're done since we're inside the 204 * eager's perimeter now. 205 */ 206 ASSERT(tcp->tcp_saved_listener != NULL); 207 208 /* 209 * Lookup peer endpoint; search for the remote endpoint having 210 * the reversed address-port quadruplet in ESTABLISHED state, 211 * which is guaranteed to be unique in the system. Zone check 212 * is applied accordingly for loopback address, but not for 213 * local address since we want fusion to happen across Zones. 214 */ 215 if (tcp->tcp_ipversion == IPV4_VERSION) { 216 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv4(connp, 217 (ipha_t *)iphdr, tcph, ipst); 218 } else { 219 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv6(connp, 220 (ip6_t *)iphdr, tcph, ipst); 221 } 222 223 /* 224 * We can only proceed if peer exists, resides in the same squeue 225 * as our conn and is not raw-socket. The squeue assignment of 226 * this eager tcp was done earlier at the time of SYN processing 227 * in ip_fanout_tcp{_v6}. Note that similar squeues by itself 228 * doesn't guarantee a safe condition to fuse, hence we perform 229 * additional tests below. 230 */ 231 ASSERT(peer_connp == NULL || peer_connp != connp); 232 if (peer_connp == NULL || peer_connp->conn_sqp != connp->conn_sqp || 233 !IPCL_IS_TCP(peer_connp)) { 234 if (peer_connp != NULL) { 235 TCP_STAT(tcps, tcp_fusion_unqualified); 236 CONN_DEC_REF(peer_connp); 237 } 238 return; 239 } 240 peer_tcp = peer_connp->conn_tcp; /* active connect tcp */ 241 242 ASSERT(peer_tcp != NULL && peer_tcp != tcp && !peer_tcp->tcp_fused); 243 ASSERT(peer_tcp->tcp_loopback && peer_tcp->tcp_loopback_peer == NULL); 244 ASSERT(peer_connp->conn_sqp == connp->conn_sqp); 245 246 /* 247 * Fuse the endpoints; we perform further checks against both 248 * tcp endpoints to ensure that a fusion is allowed to happen. 249 * In particular we bail out for non-simple TCP/IP or if IPsec/ 250 * IPQoS policy/kernel SSL exists. 251 */ 252 ns = tcps->tcps_netstack; 253 ipst = ns->netstack_ip; 254 255 if (!tcp->tcp_unfusable && !peer_tcp->tcp_unfusable && 256 !tcp_loopback_needs_ip(tcp, ns) && 257 !tcp_loopback_needs_ip(peer_tcp, ns) && 258 tcp->tcp_kssl_ent == NULL && 259 !IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN, ipst)) { 260 mblk_t *mp; 261 struct stroptions *stropt; 262 queue_t *peer_rq = peer_tcp->tcp_rq; 263 264 ASSERT(!TCP_IS_DETACHED(peer_tcp) && peer_rq != NULL); 265 ASSERT(tcp->tcp_fused_sigurg_mp == NULL); 266 ASSERT(peer_tcp->tcp_fused_sigurg_mp == NULL); 267 ASSERT(tcp->tcp_kssl_ctx == NULL); 268 269 /* 270 * We need to drain data on both endpoints during unfuse. 271 * If we need to send up SIGURG at the time of draining, 272 * we want to be sure that an mblk is readily available. 273 * This is why we pre-allocate the M_PCSIG mblks for both 274 * endpoints which will only be used during/after unfuse. 275 */ 276 if ((mp = allocb(1, BPRI_HI)) == NULL) 277 goto failed; 278 279 tcp->tcp_fused_sigurg_mp = mp; 280 281 if ((mp = allocb(1, BPRI_HI)) == NULL) 282 goto failed; 283 284 peer_tcp->tcp_fused_sigurg_mp = mp; 285 286 /* Allocate M_SETOPTS mblk */ 287 if ((mp = allocb(sizeof (*stropt), BPRI_HI)) == NULL) 288 goto failed; 289 290 /* Fuse both endpoints */ 291 peer_tcp->tcp_loopback_peer = tcp; 292 tcp->tcp_loopback_peer = peer_tcp; 293 peer_tcp->tcp_fused = tcp->tcp_fused = B_TRUE; 294 295 /* 296 * We never use regular tcp paths in fusion and should 297 * therefore clear tcp_unsent on both endpoints. Having 298 * them set to non-zero values means asking for trouble 299 * especially after unfuse, where we may end up sending 300 * through regular tcp paths which expect xmit_list and 301 * friends to be correctly setup. 302 */ 303 peer_tcp->tcp_unsent = tcp->tcp_unsent = 0; 304 305 tcp_timers_stop(tcp); 306 tcp_timers_stop(peer_tcp); 307 308 /* 309 * At this point we are a detached eager tcp and therefore 310 * don't have a queue assigned to us until accept happens. 311 * In the mean time the peer endpoint may immediately send 312 * us data as soon as fusion is finished, and we need to be 313 * able to flow control it in case it sends down huge amount 314 * of data while we're still detached. To prevent that we 315 * inherit the listener's q_hiwat value; this is temporary 316 * since we'll repeat the process in tcp_accept_finish(). 317 */ 318 (void) tcp_fuse_set_rcv_hiwat(tcp, 319 tcp->tcp_saved_listener->tcp_rq->q_hiwat); 320 321 /* 322 * Set the stream head's write offset value to zero since we 323 * won't be needing any room for TCP/IP headers; tell it to 324 * not break up the writes (this would reduce the amount of 325 * work done by kmem); and configure our receive buffer. 326 * Note that we can only do this for the active connect tcp 327 * since our eager is still detached; it will be dealt with 328 * later in tcp_accept_finish(). 329 */ 330 DB_TYPE(mp) = M_SETOPTS; 331 mp->b_wptr += sizeof (*stropt); 332 333 stropt = (struct stroptions *)mp->b_rptr; 334 stropt->so_flags = SO_MAXBLK | SO_WROFF | SO_HIWAT; 335 stropt->so_maxblk = tcp_maxpsz_set(peer_tcp, B_FALSE); 336 stropt->so_wroff = 0; 337 338 /* 339 * Record the stream head's high water mark for 340 * peer endpoint; this is used for flow-control 341 * purposes in tcp_fuse_output(). 342 */ 343 stropt->so_hiwat = tcp_fuse_set_rcv_hiwat(peer_tcp, 344 peer_rq->q_hiwat); 345 346 /* Send the options up */ 347 putnext(peer_rq, mp); 348 } else { 349 TCP_STAT(tcps, tcp_fusion_unqualified); 350 } 351 CONN_DEC_REF(peer_connp); 352 return; 353 354 failed: 355 if (tcp->tcp_fused_sigurg_mp != NULL) { 356 freeb(tcp->tcp_fused_sigurg_mp); 357 tcp->tcp_fused_sigurg_mp = NULL; 358 } 359 if (peer_tcp->tcp_fused_sigurg_mp != NULL) { 360 freeb(peer_tcp->tcp_fused_sigurg_mp); 361 peer_tcp->tcp_fused_sigurg_mp = NULL; 362 } 363 CONN_DEC_REF(peer_connp); 364 } 365 366 /* 367 * Unfuse a previously-fused pair of tcp loopback endpoints. 368 */ 369 void 370 tcp_unfuse(tcp_t *tcp) 371 { 372 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 373 374 ASSERT(tcp->tcp_fused && peer_tcp != NULL); 375 ASSERT(peer_tcp->tcp_fused && peer_tcp->tcp_loopback_peer == tcp); 376 ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp); 377 ASSERT(tcp->tcp_unsent == 0 && peer_tcp->tcp_unsent == 0); 378 ASSERT(tcp->tcp_fused_sigurg_mp != NULL); 379 ASSERT(peer_tcp->tcp_fused_sigurg_mp != NULL); 380 381 /* 382 * We disable synchronous streams, drain any queued data and 383 * clear tcp_direct_sockfs. The synchronous streams entry 384 * points will become no-ops after this point. 385 */ 386 tcp_fuse_disable_pair(tcp, B_TRUE); 387 388 /* 389 * Update th_seq and th_ack in the header template 390 */ 391 U32_TO_ABE32(tcp->tcp_snxt, tcp->tcp_tcph->th_seq); 392 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 393 U32_TO_ABE32(peer_tcp->tcp_snxt, peer_tcp->tcp_tcph->th_seq); 394 U32_TO_ABE32(peer_tcp->tcp_rnxt, peer_tcp->tcp_tcph->th_ack); 395 396 /* Unfuse the endpoints */ 397 peer_tcp->tcp_fused = tcp->tcp_fused = B_FALSE; 398 peer_tcp->tcp_loopback_peer = tcp->tcp_loopback_peer = NULL; 399 } 400 401 /* 402 * Fusion output routine for urgent data. This routine is called by 403 * tcp_fuse_output() for handling non-M_DATA mblks. 404 */ 405 void 406 tcp_fuse_output_urg(tcp_t *tcp, mblk_t *mp) 407 { 408 mblk_t *mp1; 409 struct T_exdata_ind *tei; 410 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 411 mblk_t *head, *prev_head = NULL; 412 tcp_stack_t *tcps = tcp->tcp_tcps; 413 414 ASSERT(tcp->tcp_fused); 415 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp); 416 ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO); 417 ASSERT(mp->b_cont != NULL && DB_TYPE(mp->b_cont) == M_DATA); 418 ASSERT(MBLKL(mp) >= sizeof (*tei) && MBLKL(mp->b_cont) > 0); 419 420 /* 421 * Urgent data arrives in the form of T_EXDATA_REQ from above. 422 * Each occurence denotes a new urgent pointer. For each new 423 * urgent pointer we signal (SIGURG) the receiving app to indicate 424 * that it needs to go into urgent mode. This is similar to the 425 * urgent data handling in the regular tcp. We don't need to keep 426 * track of where the urgent pointer is, because each T_EXDATA_REQ 427 * "advances" the urgent pointer for us. 428 * 429 * The actual urgent data carried by T_EXDATA_REQ is then prepended 430 * by a T_EXDATA_IND before being enqueued behind any existing data 431 * destined for the receiving app. There is only a single urgent 432 * pointer (out-of-band mark) for a given tcp. If the new urgent 433 * data arrives before the receiving app reads some existing urgent 434 * data, the previous marker is lost. This behavior is emulated 435 * accordingly below, by removing any existing T_EXDATA_IND messages 436 * and essentially converting old urgent data into non-urgent. 437 */ 438 ASSERT(tcp->tcp_valid_bits & TCP_URG_VALID); 439 /* Let sender get out of urgent mode */ 440 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 441 442 /* 443 * This flag indicates that a signal needs to be sent up. 444 * This flag will only get cleared once SIGURG is delivered and 445 * is not affected by the tcp_fused flag -- delivery will still 446 * happen even after an endpoint is unfused, to handle the case 447 * where the sending endpoint immediately closes/unfuses after 448 * sending urgent data and the accept is not yet finished. 449 */ 450 peer_tcp->tcp_fused_sigurg = B_TRUE; 451 452 /* Reuse T_EXDATA_REQ mblk for T_EXDATA_IND */ 453 DB_TYPE(mp) = M_PROTO; 454 tei = (struct T_exdata_ind *)mp->b_rptr; 455 tei->PRIM_type = T_EXDATA_IND; 456 tei->MORE_flag = 0; 457 mp->b_wptr = (uchar_t *)&tei[1]; 458 459 TCP_STAT(tcps, tcp_fusion_urg); 460 BUMP_MIB(&tcps->tcps_mib, tcpOutUrg); 461 462 head = peer_tcp->tcp_rcv_list; 463 while (head != NULL) { 464 /* 465 * Remove existing T_EXDATA_IND, keep the data which follows 466 * it and relink our list. Note that we don't modify the 467 * tcp_rcv_last_tail since it never points to T_EXDATA_IND. 468 */ 469 if (DB_TYPE(head) != M_DATA) { 470 mp1 = head; 471 472 ASSERT(DB_TYPE(mp1->b_cont) == M_DATA); 473 head = mp1->b_cont; 474 mp1->b_cont = NULL; 475 head->b_next = mp1->b_next; 476 mp1->b_next = NULL; 477 if (prev_head != NULL) 478 prev_head->b_next = head; 479 if (peer_tcp->tcp_rcv_list == mp1) 480 peer_tcp->tcp_rcv_list = head; 481 if (peer_tcp->tcp_rcv_last_head == mp1) 482 peer_tcp->tcp_rcv_last_head = head; 483 freeb(mp1); 484 } 485 prev_head = head; 486 head = head->b_next; 487 } 488 } 489 490 /* 491 * Fusion output routine, called by tcp_output() and tcp_wput_proto(). 492 * If we are modifying any member that can be changed outside the squeue, 493 * like tcp_flow_stopped, we need to take tcp_non_sq_lock. 494 */ 495 boolean_t 496 tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size) 497 { 498 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 499 uint_t max_unread; 500 boolean_t flow_stopped, peer_data_queued = B_FALSE; 501 boolean_t urgent = (DB_TYPE(mp) != M_DATA); 502 mblk_t *mp1 = mp; 503 ill_t *ilp, *olp; 504 ipha_t *ipha; 505 ip6_t *ip6h; 506 tcph_t *tcph; 507 uint_t ip_hdr_len; 508 uint32_t seq; 509 uint32_t recv_size = send_size; 510 tcp_stack_t *tcps = tcp->tcp_tcps; 511 netstack_t *ns = tcps->tcps_netstack; 512 ip_stack_t *ipst = ns->netstack_ip; 513 514 ASSERT(tcp->tcp_fused); 515 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp); 516 ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp); 517 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_PROTO || 518 DB_TYPE(mp) == M_PCPROTO); 519 520 max_unread = peer_tcp->tcp_fuse_rcv_unread_hiwater; 521 522 /* If this connection requires IP, unfuse and use regular path */ 523 if (tcp_loopback_needs_ip(tcp, ns) || 524 tcp_loopback_needs_ip(peer_tcp, ns) || 525 IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN, ipst)) { 526 TCP_STAT(tcps, tcp_fusion_aborted); 527 goto unfuse; 528 } 529 530 if (send_size == 0) { 531 freemsg(mp); 532 return (B_TRUE); 533 } 534 535 /* 536 * Handle urgent data; we either send up SIGURG to the peer now 537 * or do it later when we drain, in case the peer is detached 538 * or if we're short of memory for M_PCSIG mblk. 539 */ 540 if (urgent) { 541 /* 542 * We stop synchronous streams when we have urgent data 543 * queued to prevent tcp_fuse_rrw() from pulling it. If 544 * for some reasons the urgent data can't be delivered 545 * below, synchronous streams will remain stopped until 546 * someone drains the tcp_rcv_list. 547 */ 548 TCP_FUSE_SYNCSTR_PLUG_DRAIN(peer_tcp); 549 tcp_fuse_output_urg(tcp, mp); 550 551 mp1 = mp->b_cont; 552 } 553 554 if (tcp->tcp_ipversion == IPV4_VERSION && 555 (HOOKS4_INTERESTED_LOOPBACK_IN(ipst) || 556 HOOKS4_INTERESTED_LOOPBACK_OUT(ipst)) || 557 tcp->tcp_ipversion == IPV6_VERSION && 558 (HOOKS6_INTERESTED_LOOPBACK_IN(ipst) || 559 HOOKS6_INTERESTED_LOOPBACK_OUT(ipst))) { 560 /* 561 * Build ip and tcp header to satisfy FW_HOOKS. 562 * We only build it when any hook is present. 563 */ 564 if ((mp1 = tcp_xmit_mp(tcp, mp1, tcp->tcp_mss, NULL, NULL, 565 tcp->tcp_snxt, B_TRUE, NULL, B_FALSE)) == NULL) 566 /* If tcp_xmit_mp fails, use regular path */ 567 goto unfuse; 568 569 ASSERT(peer_tcp->tcp_connp->conn_ire_cache->ire_ipif != NULL); 570 olp = peer_tcp->tcp_connp->conn_ire_cache->ire_ipif->ipif_ill; 571 /* PFHooks: LOOPBACK_OUT */ 572 if (tcp->tcp_ipversion == IPV4_VERSION) { 573 ipha = (ipha_t *)mp1->b_rptr; 574 575 DTRACE_PROBE4(ip4__loopback__out__start, 576 ill_t *, NULL, ill_t *, olp, 577 ipha_t *, ipha, mblk_t *, mp1); 578 FW_HOOKS(ipst->ips_ip4_loopback_out_event, 579 ipst->ips_ipv4firewall_loopback_out, 580 NULL, olp, ipha, mp1, mp1, ipst); 581 DTRACE_PROBE1(ip4__loopback__out__end, mblk_t *, mp1); 582 } else { 583 ip6h = (ip6_t *)mp1->b_rptr; 584 585 DTRACE_PROBE4(ip6__loopback__out__start, 586 ill_t *, NULL, ill_t *, olp, 587 ip6_t *, ip6h, mblk_t *, mp1); 588 FW_HOOKS6(ipst->ips_ip6_loopback_out_event, 589 ipst->ips_ipv6firewall_loopback_out, 590 NULL, olp, ip6h, mp1, mp1, ipst); 591 DTRACE_PROBE1(ip6__loopback__out__end, mblk_t *, mp1); 592 } 593 if (mp1 == NULL) 594 goto unfuse; 595 596 597 /* PFHooks: LOOPBACK_IN */ 598 ASSERT(tcp->tcp_connp->conn_ire_cache->ire_ipif != NULL); 599 ilp = tcp->tcp_connp->conn_ire_cache->ire_ipif->ipif_ill; 600 601 if (tcp->tcp_ipversion == IPV4_VERSION) { 602 DTRACE_PROBE4(ip4__loopback__in__start, 603 ill_t *, ilp, ill_t *, NULL, 604 ipha_t *, ipha, mblk_t *, mp1); 605 FW_HOOKS(ipst->ips_ip4_loopback_in_event, 606 ipst->ips_ipv4firewall_loopback_in, 607 ilp, NULL, ipha, mp1, mp1, ipst); 608 DTRACE_PROBE1(ip4__loopback__in__end, mblk_t *, mp1); 609 if (mp1 == NULL) 610 goto unfuse; 611 612 ip_hdr_len = IPH_HDR_LENGTH(ipha); 613 } else { 614 DTRACE_PROBE4(ip6__loopback__in__start, 615 ill_t *, ilp, ill_t *, NULL, 616 ip6_t *, ip6h, mblk_t *, mp1); 617 FW_HOOKS6(ipst->ips_ip6_loopback_in_event, 618 ipst->ips_ipv6firewall_loopback_in, 619 ilp, NULL, ip6h, mp1, mp1, ipst); 620 DTRACE_PROBE1(ip6__loopback__in__end, mblk_t *, mp1); 621 if (mp1 == NULL) 622 goto unfuse; 623 624 ip_hdr_len = ip_hdr_length_v6(mp1, ip6h); 625 } 626 627 /* Data length might be changed by FW_HOOKS */ 628 tcph = (tcph_t *)&mp1->b_rptr[ip_hdr_len]; 629 seq = ABE32_TO_U32(tcph->th_seq); 630 recv_size += seq - tcp->tcp_snxt; 631 632 /* 633 * The message duplicated by tcp_xmit_mp is freed. 634 * Note: the original message passed in remains unchanged. 635 */ 636 freemsg(mp1); 637 } 638 639 mutex_enter(&peer_tcp->tcp_non_sq_lock); 640 /* 641 * Wake up and signal the peer; it is okay to do this before 642 * enqueueing because we are holding the lock. One of the 643 * advantages of synchronous streams is the ability for us to 644 * find out when the application performs a read on the socket, 645 * by way of tcp_fuse_rrw() entry point being called. Every 646 * data that gets enqueued onto the receiver is treated as if 647 * it has arrived at the receiving endpoint, thus generating 648 * SIGPOLL/SIGIO for asynchronous socket just as in the strrput() 649 * case. However, we only wake up the application when necessary, 650 * i.e. during the first enqueue. When tcp_fuse_rrw() is called 651 * it will send everything upstream. 652 */ 653 if (peer_tcp->tcp_direct_sockfs && !urgent && 654 !TCP_IS_DETACHED(peer_tcp)) { 655 if (peer_tcp->tcp_rcv_list == NULL) 656 STR_WAKEUP_SET(STREAM(peer_tcp->tcp_rq)); 657 /* Update poll events and send SIGPOLL/SIGIO if necessary */ 658 STR_SENDSIG(STREAM(peer_tcp->tcp_rq)); 659 } 660 661 /* 662 * Enqueue data into the peer's receive list; we may or may not 663 * drain the contents depending on the conditions below. 664 */ 665 tcp_rcv_enqueue(peer_tcp, mp, recv_size); 666 667 /* In case it wrapped around and also to keep it constant */ 668 peer_tcp->tcp_rwnd += recv_size; 669 670 /* 671 * Exercise flow-control when needed; we will get back-enabled 672 * in either tcp_accept_finish(), tcp_unfuse(), or tcp_fuse_rrw(). 673 * If tcp_direct_sockfs is on or if the peer endpoint is detached, 674 * we emulate streams flow control by checking the peer's queue 675 * size and high water mark; otherwise we simply use canputnext() 676 * to decide if we need to stop our flow. 677 * 678 * The outstanding unread data block check does not apply for a 679 * detached receiver; this is to avoid unnecessary blocking of the 680 * sender while the accept is currently in progress and is quite 681 * similar to the regular tcp. 682 */ 683 if (TCP_IS_DETACHED(peer_tcp) || max_unread == 0) 684 max_unread = UINT_MAX; 685 686 /* 687 * Since we are accessing our tcp_flow_stopped and might modify it, 688 * we need to take tcp->tcp_non_sq_lock. The lock for the highest 689 * address is held first. Dropping peer_tcp->tcp_non_sq_lock should 690 * not be an issue here since we are within the squeue and the peer 691 * won't disappear. 692 */ 693 if (tcp > peer_tcp) { 694 mutex_exit(&peer_tcp->tcp_non_sq_lock); 695 mutex_enter(&tcp->tcp_non_sq_lock); 696 mutex_enter(&peer_tcp->tcp_non_sq_lock); 697 } else { 698 mutex_enter(&tcp->tcp_non_sq_lock); 699 } 700 flow_stopped = tcp->tcp_flow_stopped; 701 if (((peer_tcp->tcp_direct_sockfs || TCP_IS_DETACHED(peer_tcp)) && 702 (peer_tcp->tcp_rcv_cnt >= peer_tcp->tcp_fuse_rcv_hiwater || 703 ++peer_tcp->tcp_fuse_rcv_unread_cnt >= max_unread)) || 704 (!peer_tcp->tcp_direct_sockfs && 705 !TCP_IS_DETACHED(peer_tcp) && !canputnext(peer_tcp->tcp_rq))) { 706 peer_data_queued = B_TRUE; 707 } 708 709 if (!flow_stopped && (peer_data_queued || 710 (TCP_UNSENT_BYTES(tcp) >= tcp->tcp_xmit_hiwater))) { 711 tcp_setqfull(tcp); 712 flow_stopped = B_TRUE; 713 TCP_STAT(tcps, tcp_fusion_flowctl); 714 DTRACE_PROBE4(tcp__fuse__output__flowctl, tcp_t *, tcp, 715 uint_t, send_size, uint_t, peer_tcp->tcp_rcv_cnt, 716 uint_t, peer_tcp->tcp_fuse_rcv_unread_cnt); 717 } else if (flow_stopped && !peer_data_queued && 718 (TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater)) { 719 tcp_clrqfull(tcp); 720 flow_stopped = B_FALSE; 721 } 722 mutex_exit(&tcp->tcp_non_sq_lock); 723 ipst->ips_loopback_packets++; 724 tcp->tcp_last_sent_len = send_size; 725 726 /* Need to adjust the following SNMP MIB-related variables */ 727 tcp->tcp_snxt += send_size; 728 tcp->tcp_suna = tcp->tcp_snxt; 729 peer_tcp->tcp_rnxt += recv_size; 730 peer_tcp->tcp_rack = peer_tcp->tcp_rnxt; 731 732 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 733 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, send_size); 734 735 BUMP_MIB(&tcps->tcps_mib, tcpInSegs); 736 BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs); 737 UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, send_size); 738 739 BUMP_LOCAL(tcp->tcp_obsegs); 740 BUMP_LOCAL(peer_tcp->tcp_ibsegs); 741 742 mutex_exit(&peer_tcp->tcp_non_sq_lock); 743 744 DTRACE_PROBE2(tcp__fuse__output, tcp_t *, tcp, uint_t, send_size); 745 746 if (!TCP_IS_DETACHED(peer_tcp)) { 747 /* 748 * Drain the peer's receive queue it has urgent data or if 749 * we're not flow-controlled. There is no need for draining 750 * normal data when tcp_direct_sockfs is on because the peer 751 * will pull the data via tcp_fuse_rrw(). 752 */ 753 if (urgent || (!flow_stopped && !peer_tcp->tcp_direct_sockfs)) { 754 ASSERT(peer_tcp->tcp_rcv_list != NULL); 755 /* 756 * For TLI-based streams, a thread in tcp_accept_swap() 757 * can race with us. That thread will ensure that the 758 * correct peer_tcp->tcp_rq is globally visible before 759 * peer_tcp->tcp_detached is visible as clear, but we 760 * must also ensure that the load of tcp_rq cannot be 761 * reordered to be before the tcp_detached check. 762 */ 763 membar_consumer(); 764 (void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp, 765 NULL); 766 /* 767 * If synchronous streams was stopped above due 768 * to the presence of urgent data, re-enable it. 769 */ 770 if (urgent) 771 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(peer_tcp); 772 } 773 } 774 return (B_TRUE); 775 unfuse: 776 tcp_unfuse(tcp); 777 return (B_FALSE); 778 } 779 780 /* 781 * This routine gets called to deliver data upstream on a fused or 782 * previously fused tcp loopback endpoint; the latter happens only 783 * when there is a pending SIGURG signal plus urgent data that can't 784 * be sent upstream in the past. 785 */ 786 boolean_t 787 tcp_fuse_rcv_drain(queue_t *q, tcp_t *tcp, mblk_t **sigurg_mpp) 788 { 789 mblk_t *mp; 790 #ifdef DEBUG 791 uint_t cnt = 0; 792 #endif 793 tcp_stack_t *tcps = tcp->tcp_tcps; 794 795 ASSERT(tcp->tcp_loopback); 796 ASSERT(tcp->tcp_fused || tcp->tcp_fused_sigurg); 797 ASSERT(!tcp->tcp_fused || tcp->tcp_loopback_peer != NULL); 798 ASSERT(sigurg_mpp != NULL || tcp->tcp_fused); 799 800 /* No need for the push timer now, in case it was scheduled */ 801 if (tcp->tcp_push_tid != 0) { 802 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 803 tcp->tcp_push_tid = 0; 804 } 805 /* 806 * If there's urgent data sitting in receive list and we didn't 807 * get a chance to send up a SIGURG signal, make sure we send 808 * it first before draining in order to ensure that SIOCATMARK 809 * works properly. 810 */ 811 if (tcp->tcp_fused_sigurg) { 812 /* 813 * sigurg_mpp is normally NULL, i.e. when we're still 814 * fused and didn't get here because of tcp_unfuse(). 815 * In this case try hard to allocate the M_PCSIG mblk. 816 */ 817 if (sigurg_mpp == NULL && 818 (mp = allocb(1, BPRI_HI)) == NULL && 819 (mp = allocb_tryhard(1)) == NULL) { 820 /* Alloc failed; try again next time */ 821 tcp->tcp_push_tid = TCP_TIMER(tcp, tcp_push_timer, 822 MSEC_TO_TICK(tcps->tcps_push_timer_interval)); 823 return (B_TRUE); 824 } else if (sigurg_mpp != NULL) { 825 /* 826 * Use the supplied M_PCSIG mblk; it means we're 827 * either unfused or in the process of unfusing, 828 * and the drain must happen now. 829 */ 830 mp = *sigurg_mpp; 831 *sigurg_mpp = NULL; 832 } 833 ASSERT(mp != NULL); 834 835 tcp->tcp_fused_sigurg = B_FALSE; 836 /* Send up the signal */ 837 DB_TYPE(mp) = M_PCSIG; 838 *mp->b_wptr++ = (uchar_t)SIGURG; 839 putnext(q, mp); 840 /* 841 * Let the regular tcp_rcv_drain() path handle 842 * draining the data if we're no longer fused. 843 */ 844 if (!tcp->tcp_fused) 845 return (B_FALSE); 846 } 847 848 /* 849 * In the synchronous streams case, we generate SIGPOLL/SIGIO for 850 * each M_DATA that gets enqueued onto the receiver. At this point 851 * we are about to drain any queued data via putnext(). In order 852 * to avoid extraneous signal generation from strrput(), we set 853 * STRGETINPROG flag at the stream head prior to the draining and 854 * restore it afterwards. This masks out signal generation only 855 * for M_DATA messages and does not affect urgent data. 856 */ 857 if (tcp->tcp_direct_sockfs) 858 strrput_sig(q, B_FALSE); 859 860 /* Drain the data */ 861 while ((mp = tcp->tcp_rcv_list) != NULL) { 862 tcp->tcp_rcv_list = mp->b_next; 863 mp->b_next = NULL; 864 #ifdef DEBUG 865 cnt += msgdsize(mp); 866 #endif 867 putnext(q, mp); 868 TCP_STAT(tcps, tcp_fusion_putnext); 869 } 870 871 if (tcp->tcp_direct_sockfs) 872 strrput_sig(q, B_TRUE); 873 874 ASSERT(cnt == tcp->tcp_rcv_cnt); 875 tcp->tcp_rcv_last_head = NULL; 876 tcp->tcp_rcv_last_tail = NULL; 877 tcp->tcp_rcv_cnt = 0; 878 tcp->tcp_fuse_rcv_unread_cnt = 0; 879 tcp->tcp_rwnd = q->q_hiwat; 880 881 return (B_TRUE); 882 } 883 884 /* 885 * Synchronous stream entry point for sockfs to retrieve 886 * data directly from tcp_rcv_list. 887 * tcp_fuse_rrw() might end up modifying the peer's tcp_flow_stopped, 888 * for which it must take the tcp_non_sq_lock of the peer as well 889 * making any change. The order of taking the locks is based on 890 * the TCP pointer itself. Before we get the peer we need to take 891 * our tcp_non_sq_lock so that the peer doesn't disappear. However, 892 * we cannot drop the lock if we have to grab the peer's lock (because 893 * of ordering), since the peer might disappear in the interim. So, 894 * we take our tcp_non_sq_lock, get the peer, increment the ref on the 895 * peer's conn, drop all the locks and then take the tcp_non_sq_lock in the 896 * desired order. Incrementing the conn ref on the peer means that the 897 * peer won't disappear when we drop our tcp_non_sq_lock. 898 */ 899 int 900 tcp_fuse_rrw(queue_t *q, struiod_t *dp) 901 { 902 tcp_t *tcp = Q_TO_CONN(q)->conn_tcp; 903 mblk_t *mp; 904 tcp_t *peer_tcp; 905 tcp_stack_t *tcps = tcp->tcp_tcps; 906 907 mutex_enter(&tcp->tcp_non_sq_lock); 908 909 /* 910 * If tcp_fuse_syncstr_plugged is set, then another thread is moving 911 * the underlying data to the stream head. We need to wait until it's 912 * done, then return EBUSY so that strget() will dequeue data from the 913 * stream head to ensure data is drained in-order. 914 */ 915 plugged: 916 if (tcp->tcp_fuse_syncstr_plugged) { 917 do { 918 cv_wait(&tcp->tcp_fuse_plugcv, &tcp->tcp_non_sq_lock); 919 } while (tcp->tcp_fuse_syncstr_plugged); 920 921 mutex_exit(&tcp->tcp_non_sq_lock); 922 TCP_STAT(tcps, tcp_fusion_rrw_plugged); 923 TCP_STAT(tcps, tcp_fusion_rrw_busy); 924 return (EBUSY); 925 } 926 927 peer_tcp = tcp->tcp_loopback_peer; 928 929 /* 930 * If someone had turned off tcp_direct_sockfs or if synchronous 931 * streams is stopped, we return EBUSY. This causes strget() to 932 * dequeue data from the stream head instead. 933 */ 934 if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped) { 935 mutex_exit(&tcp->tcp_non_sq_lock); 936 TCP_STAT(tcps, tcp_fusion_rrw_busy); 937 return (EBUSY); 938 } 939 940 /* 941 * Grab lock in order. The highest addressed tcp is locked first. 942 * We don't do this within the tcp_rcv_list check since if we 943 * have to drop the lock, for ordering, then the tcp_rcv_list 944 * could change. 945 */ 946 if (peer_tcp > tcp) { 947 CONN_INC_REF(peer_tcp->tcp_connp); 948 mutex_exit(&tcp->tcp_non_sq_lock); 949 mutex_enter(&peer_tcp->tcp_non_sq_lock); 950 mutex_enter(&tcp->tcp_non_sq_lock); 951 CONN_DEC_REF(peer_tcp->tcp_connp); 952 /* This might have changed in the interim */ 953 if (tcp->tcp_fuse_syncstr_plugged) { 954 mutex_exit(&peer_tcp->tcp_non_sq_lock); 955 goto plugged; 956 } 957 } else { 958 mutex_enter(&peer_tcp->tcp_non_sq_lock); 959 } 960 961 if ((mp = tcp->tcp_rcv_list) != NULL) { 962 963 DTRACE_PROBE3(tcp__fuse__rrw, tcp_t *, tcp, 964 uint32_t, tcp->tcp_rcv_cnt, ssize_t, dp->d_uio.uio_resid); 965 966 tcp->tcp_rcv_list = NULL; 967 TCP_STAT(tcps, tcp_fusion_rrw_msgcnt); 968 969 /* 970 * At this point nothing should be left in tcp_rcv_list. 971 * The only possible case where we would have a chain of 972 * b_next-linked messages is urgent data, but we wouldn't 973 * be here if that's true since urgent data is delivered 974 * via putnext() and synchronous streams is stopped until 975 * tcp_fuse_rcv_drain() is finished. 976 */ 977 ASSERT(DB_TYPE(mp) == M_DATA && mp->b_next == NULL); 978 979 tcp->tcp_rcv_last_head = NULL; 980 tcp->tcp_rcv_last_tail = NULL; 981 tcp->tcp_rcv_cnt = 0; 982 tcp->tcp_fuse_rcv_unread_cnt = 0; 983 984 if (peer_tcp->tcp_flow_stopped && 985 (TCP_UNSENT_BYTES(peer_tcp) <= 986 peer_tcp->tcp_xmit_lowater)) { 987 tcp_clrqfull(peer_tcp); 988 TCP_STAT(tcps, tcp_fusion_backenabled); 989 } 990 } 991 mutex_exit(&peer_tcp->tcp_non_sq_lock); 992 /* 993 * Either we just dequeued everything or we get here from sockfs 994 * and have nothing to return; in this case clear RSLEEP. 995 */ 996 ASSERT(tcp->tcp_rcv_last_head == NULL); 997 ASSERT(tcp->tcp_rcv_last_tail == NULL); 998 ASSERT(tcp->tcp_rcv_cnt == 0); 999 ASSERT(tcp->tcp_fuse_rcv_unread_cnt == 0); 1000 STR_WAKEUP_CLEAR(STREAM(q)); 1001 1002 mutex_exit(&tcp->tcp_non_sq_lock); 1003 dp->d_mp = mp; 1004 return (0); 1005 } 1006 1007 /* 1008 * Synchronous stream entry point used by certain ioctls to retrieve 1009 * information about or peek into the tcp_rcv_list. 1010 */ 1011 int 1012 tcp_fuse_rinfop(queue_t *q, infod_t *dp) 1013 { 1014 tcp_t *tcp = Q_TO_CONN(q)->conn_tcp; 1015 mblk_t *mp; 1016 uint_t cmd = dp->d_cmd; 1017 int res = 0; 1018 int error = 0; 1019 struct stdata *stp = STREAM(q); 1020 1021 mutex_enter(&tcp->tcp_non_sq_lock); 1022 /* If shutdown on read has happened, return nothing */ 1023 mutex_enter(&stp->sd_lock); 1024 if (stp->sd_flag & STREOF) { 1025 mutex_exit(&stp->sd_lock); 1026 goto done; 1027 } 1028 mutex_exit(&stp->sd_lock); 1029 1030 /* 1031 * It is OK not to return an answer if tcp_rcv_list is 1032 * currently not accessible. 1033 */ 1034 if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped || 1035 tcp->tcp_fuse_syncstr_plugged || (mp = tcp->tcp_rcv_list) == NULL) 1036 goto done; 1037 1038 if (cmd & INFOD_COUNT) { 1039 /* 1040 * We have at least one message and 1041 * could return only one at a time. 1042 */ 1043 dp->d_count++; 1044 res |= INFOD_COUNT; 1045 } 1046 if (cmd & INFOD_BYTES) { 1047 /* 1048 * Return size of all data messages. 1049 */ 1050 dp->d_bytes += tcp->tcp_rcv_cnt; 1051 res |= INFOD_BYTES; 1052 } 1053 if (cmd & INFOD_FIRSTBYTES) { 1054 /* 1055 * Return size of first data message. 1056 */ 1057 dp->d_bytes = msgdsize(mp); 1058 res |= INFOD_FIRSTBYTES; 1059 dp->d_cmd &= ~INFOD_FIRSTBYTES; 1060 } 1061 if (cmd & INFOD_COPYOUT) { 1062 mblk_t *mp1; 1063 int n; 1064 1065 if (DB_TYPE(mp) == M_DATA) { 1066 mp1 = mp; 1067 } else { 1068 mp1 = mp->b_cont; 1069 ASSERT(mp1 != NULL); 1070 } 1071 1072 /* 1073 * Return data contents of first message. 1074 */ 1075 ASSERT(DB_TYPE(mp1) == M_DATA); 1076 while (mp1 != NULL && dp->d_uiop->uio_resid > 0) { 1077 n = MIN(dp->d_uiop->uio_resid, MBLKL(mp1)); 1078 if (n != 0 && (error = uiomove((char *)mp1->b_rptr, n, 1079 UIO_READ, dp->d_uiop)) != 0) { 1080 goto done; 1081 } 1082 mp1 = mp1->b_cont; 1083 } 1084 res |= INFOD_COPYOUT; 1085 dp->d_cmd &= ~INFOD_COPYOUT; 1086 } 1087 done: 1088 mutex_exit(&tcp->tcp_non_sq_lock); 1089 1090 dp->d_res |= res; 1091 1092 return (error); 1093 } 1094 1095 /* 1096 * Enable synchronous streams on a fused tcp loopback endpoint. 1097 */ 1098 static void 1099 tcp_fuse_syncstr_enable(tcp_t *tcp) 1100 { 1101 queue_t *rq = tcp->tcp_rq; 1102 struct stdata *stp = STREAM(rq); 1103 1104 /* We can only enable synchronous streams for sockfs mode */ 1105 tcp->tcp_direct_sockfs = tcp->tcp_issocket && do_tcp_direct_sockfs; 1106 1107 if (!tcp->tcp_direct_sockfs) 1108 return; 1109 1110 mutex_enter(&stp->sd_lock); 1111 mutex_enter(QLOCK(rq)); 1112 1113 /* 1114 * We replace our q_qinfo with one that has the qi_rwp entry point. 1115 * Clear SR_SIGALLDATA because we generate the equivalent signal(s) 1116 * for every enqueued data in tcp_fuse_output(). 1117 */ 1118 rq->q_qinfo = &tcp_loopback_rinit; 1119 rq->q_struiot = tcp_loopback_rinit.qi_struiot; 1120 stp->sd_struiordq = rq; 1121 stp->sd_rput_opt &= ~SR_SIGALLDATA; 1122 1123 mutex_exit(QLOCK(rq)); 1124 mutex_exit(&stp->sd_lock); 1125 } 1126 1127 /* 1128 * Disable synchronous streams on a fused tcp loopback endpoint. 1129 */ 1130 static void 1131 tcp_fuse_syncstr_disable(tcp_t *tcp) 1132 { 1133 queue_t *rq = tcp->tcp_rq; 1134 struct stdata *stp = STREAM(rq); 1135 1136 if (!tcp->tcp_direct_sockfs) 1137 return; 1138 1139 mutex_enter(&stp->sd_lock); 1140 mutex_enter(QLOCK(rq)); 1141 1142 /* 1143 * Reset q_qinfo to point to the default tcp entry points. 1144 * Also restore SR_SIGALLDATA so that strrput() can generate 1145 * the signals again for future M_DATA messages. 1146 */ 1147 rq->q_qinfo = &tcp_rinitv4; /* No open - same as rinitv6 */ 1148 rq->q_struiot = tcp_rinitv4.qi_struiot; 1149 stp->sd_struiordq = NULL; 1150 stp->sd_rput_opt |= SR_SIGALLDATA; 1151 tcp->tcp_direct_sockfs = B_FALSE; 1152 1153 mutex_exit(QLOCK(rq)); 1154 mutex_exit(&stp->sd_lock); 1155 } 1156 1157 /* 1158 * Enable synchronous streams on a pair of fused tcp endpoints. 1159 */ 1160 void 1161 tcp_fuse_syncstr_enable_pair(tcp_t *tcp) 1162 { 1163 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 1164 1165 ASSERT(tcp->tcp_fused); 1166 ASSERT(peer_tcp != NULL); 1167 1168 tcp_fuse_syncstr_enable(tcp); 1169 tcp_fuse_syncstr_enable(peer_tcp); 1170 } 1171 1172 /* 1173 * Allow or disallow signals to be generated by strrput(). 1174 */ 1175 static void 1176 strrput_sig(queue_t *q, boolean_t on) 1177 { 1178 struct stdata *stp = STREAM(q); 1179 1180 mutex_enter(&stp->sd_lock); 1181 if (on) 1182 stp->sd_flag &= ~STRGETINPROG; 1183 else 1184 stp->sd_flag |= STRGETINPROG; 1185 mutex_exit(&stp->sd_lock); 1186 } 1187 1188 /* 1189 * Disable synchronous streams on a pair of fused tcp endpoints and drain 1190 * any queued data; called either during unfuse or upon transitioning from 1191 * a socket to a stream endpoint due to _SIOCSOCKFALLBACK. 1192 */ 1193 void 1194 tcp_fuse_disable_pair(tcp_t *tcp, boolean_t unfusing) 1195 { 1196 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 1197 tcp_stack_t *tcps = tcp->tcp_tcps; 1198 1199 ASSERT(tcp->tcp_fused); 1200 ASSERT(peer_tcp != NULL); 1201 1202 /* 1203 * Force any tcp_fuse_rrw() calls to block until we've moved the data 1204 * onto the stream head. 1205 */ 1206 TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp); 1207 TCP_FUSE_SYNCSTR_PLUG_DRAIN(peer_tcp); 1208 1209 /* 1210 * Drain any pending data; the detached check is needed because 1211 * we may be called as a result of a tcp_unfuse() triggered by 1212 * tcp_fuse_output(). Note that in case of a detached tcp, the 1213 * draining will happen later after the tcp is unfused. For non- 1214 * urgent data, this can be handled by the regular tcp_rcv_drain(). 1215 * If we have urgent data sitting in the receive list, we will 1216 * need to send up a SIGURG signal first before draining the data. 1217 * All of these will be handled by the code in tcp_fuse_rcv_drain() 1218 * when called from tcp_rcv_drain(). 1219 */ 1220 if (!TCP_IS_DETACHED(tcp)) { 1221 (void) tcp_fuse_rcv_drain(tcp->tcp_rq, tcp, 1222 (unfusing ? &tcp->tcp_fused_sigurg_mp : NULL)); 1223 } 1224 if (!TCP_IS_DETACHED(peer_tcp)) { 1225 (void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp, 1226 (unfusing ? &peer_tcp->tcp_fused_sigurg_mp : NULL)); 1227 } 1228 1229 /* 1230 * Make all current and future tcp_fuse_rrw() calls fail with EBUSY. 1231 * To ensure threads don't sneak past the checks in tcp_fuse_rrw(), 1232 * a given stream must be stopped prior to being unplugged (but the 1233 * ordering of operations between the streams is unimportant). 1234 */ 1235 TCP_FUSE_SYNCSTR_STOP(tcp); 1236 TCP_FUSE_SYNCSTR_STOP(peer_tcp); 1237 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp); 1238 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(peer_tcp); 1239 1240 /* Lift up any flow-control conditions */ 1241 if (tcp->tcp_flow_stopped) { 1242 tcp_clrqfull(tcp); 1243 TCP_STAT(tcps, tcp_fusion_backenabled); 1244 } 1245 if (peer_tcp->tcp_flow_stopped) { 1246 tcp_clrqfull(peer_tcp); 1247 TCP_STAT(tcps, tcp_fusion_backenabled); 1248 } 1249 1250 /* Disable synchronous streams */ 1251 tcp_fuse_syncstr_disable(tcp); 1252 tcp_fuse_syncstr_disable(peer_tcp); 1253 } 1254 1255 /* 1256 * Calculate the size of receive buffer for a fused tcp endpoint. 1257 */ 1258 size_t 1259 tcp_fuse_set_rcv_hiwat(tcp_t *tcp, size_t rwnd) 1260 { 1261 tcp_stack_t *tcps = tcp->tcp_tcps; 1262 1263 ASSERT(tcp->tcp_fused); 1264 1265 /* Ensure that value is within the maximum upper bound */ 1266 if (rwnd > tcps->tcps_max_buf) 1267 rwnd = tcps->tcps_max_buf; 1268 1269 /* Obey the absolute minimum tcp receive high water mark */ 1270 if (rwnd < tcps->tcps_sth_rcv_hiwat) 1271 rwnd = tcps->tcps_sth_rcv_hiwat; 1272 1273 /* 1274 * Round up to system page size in case SO_RCVBUF is modified 1275 * after SO_SNDBUF; the latter is also similarly rounded up. 1276 */ 1277 rwnd = P2ROUNDUP_TYPED(rwnd, PAGESIZE, size_t); 1278 tcp->tcp_fuse_rcv_hiwater = rwnd; 1279 return (rwnd); 1280 } 1281 1282 /* 1283 * Calculate the maximum outstanding unread data block for a fused tcp endpoint. 1284 */ 1285 int 1286 tcp_fuse_maxpsz_set(tcp_t *tcp) 1287 { 1288 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 1289 uint_t sndbuf = tcp->tcp_xmit_hiwater; 1290 uint_t maxpsz = sndbuf; 1291 1292 ASSERT(tcp->tcp_fused); 1293 ASSERT(peer_tcp != NULL); 1294 ASSERT(peer_tcp->tcp_fuse_rcv_hiwater != 0); 1295 /* 1296 * In the fused loopback case, we want the stream head to split 1297 * up larger writes into smaller chunks for a more accurate flow- 1298 * control accounting. Our maxpsz is half of the sender's send 1299 * buffer or the receiver's receive buffer, whichever is smaller. 1300 * We round up the buffer to system page size due to the lack of 1301 * TCP MSS concept in Fusion. 1302 */ 1303 if (maxpsz > peer_tcp->tcp_fuse_rcv_hiwater) 1304 maxpsz = peer_tcp->tcp_fuse_rcv_hiwater; 1305 maxpsz = P2ROUNDUP_TYPED(maxpsz, PAGESIZE, uint_t) >> 1; 1306 1307 /* 1308 * Calculate the peer's limit for the number of outstanding unread 1309 * data block. This is the amount of data blocks that are allowed 1310 * to reside in the receiver's queue before the sender gets flow 1311 * controlled. It is used only in the synchronous streams mode as 1312 * a way to throttle the sender when it performs consecutive writes 1313 * faster than can be read. The value is derived from SO_SNDBUF in 1314 * order to give the sender some control; we divide it with a large 1315 * value (16KB) to produce a fairly low initial limit. 1316 */ 1317 if (tcp_fusion_rcv_unread_min == 0) { 1318 /* A value of 0 means that we disable the check */ 1319 peer_tcp->tcp_fuse_rcv_unread_hiwater = 0; 1320 } else { 1321 peer_tcp->tcp_fuse_rcv_unread_hiwater = 1322 MAX(sndbuf >> 14, tcp_fusion_rcv_unread_min); 1323 } 1324 return (maxpsz); 1325 } 1326