1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/stream.h> 30 #include <sys/strsun.h> 31 #include <sys/strsubr.h> 32 #include <sys/debug.h> 33 #include <sys/cmn_err.h> 34 #include <sys/tihdr.h> 35 36 #include <inet/common.h> 37 #include <inet/ip.h> 38 #include <inet/ip_impl.h> 39 #include <inet/tcp.h> 40 #include <inet/tcp_impl.h> 41 #include <inet/ipsec_impl.h> 42 #include <inet/ipclassifier.h> 43 #include <inet/ipp_common.h> 44 45 /* 46 * This file implements TCP fusion - a protocol-less data path for TCP 47 * loopback connections. The fusion of two local TCP endpoints occurs 48 * at connection establishment time. Various conditions (see details 49 * in tcp_fuse()) need to be met for fusion to be successful. If it 50 * fails, we fall back to the regular TCP data path; if it succeeds, 51 * both endpoints proceed to use tcp_fuse_output() as the transmit path. 52 * tcp_fuse_output() enqueues application data directly onto the peer's 53 * receive queue; no protocol processing is involved. After enqueueing 54 * the data, the sender can either push (putnext) data up the receiver's 55 * read queue; or the sender can simply return and let the receiver 56 * retrieve the enqueued data via the synchronous streams entry point 57 * tcp_fuse_rrw(). The latter path is taken if synchronous streams is 58 * enabled (the default). It is disabled if sockfs no longer resides 59 * directly on top of tcp module due to a module insertion or removal. 60 * It also needs to be temporarily disabled when sending urgent data 61 * because the tcp_fuse_rrw() path bypasses the M_PROTO processing done 62 * by strsock_proto() hook. 63 * 64 * Sychronization is handled by squeue and the mutex tcp_fuse_lock. 65 * One of the requirements for fusion to succeed is that both endpoints 66 * need to be using the same squeue. This ensures that neither side 67 * can disappear while the other side is still sending data. By itself, 68 * squeue is not sufficient for guaranteeing safety when synchronous 69 * streams is enabled. The reason is that tcp_fuse_rrw() doesn't enter 70 * the squeue and its access to tcp_rcv_list and other fusion-related 71 * fields needs to be sychronized with the sender. tcp_fuse_lock is 72 * used for this purpose. When there is urgent data, the sender needs 73 * to push the data up the receiver's streams read queue. In order to 74 * avoid holding the tcp_fuse_lock across putnext(), the sender sets 75 * the peer tcp's tcp_fuse_syncstr_stopped bit and releases tcp_fuse_lock 76 * (see macro TCP_FUSE_SYNCSTR_STOP()). If tcp_fuse_rrw() enters after 77 * this point, it will see that synchronous streams is temporarily 78 * stopped and it will immediately return EBUSY without accessing the 79 * tcp_rcv_list or other fields protected by the tcp_fuse_lock. This 80 * will result in strget() calling getq_noenab() to dequeue data from 81 * the stream head instead. After the sender has finished pushing up 82 * all urgent data, it will clear the tcp_fuse_syncstr_stopped bit using 83 * TCP_FUSE_SYNCSTR_RESUME and the receiver may then resume using 84 * tcp_fuse_rrw() to retrieve data from tcp_rcv_list. 85 * 86 * The following note applies only to the synchronous streams mode. 87 * 88 * Flow control is done by checking the size of receive buffer and 89 * the number of data blocks, both set to different limits. This is 90 * different than regular streams flow control where cumulative size 91 * check dominates block count check -- streams queue high water mark 92 * typically represents bytes. Each enqueue triggers notifications 93 * to the receiving process; a build up of data blocks indicates a 94 * slow receiver and the sender should be blocked or informed at the 95 * earliest moment instead of further wasting system resources. In 96 * effect, this is equivalent to limiting the number of outstanding 97 * segments in flight. 98 */ 99 100 /* 101 * Macros that determine whether or not IP processing is needed for TCP. 102 */ 103 #define TCP_IPOPT_POLICY_V4(tcp) \ 104 ((tcp)->tcp_ipversion == IPV4_VERSION && \ 105 ((tcp)->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH || \ 106 CONN_OUTBOUND_POLICY_PRESENT((tcp)->tcp_connp) || \ 107 CONN_INBOUND_POLICY_PRESENT((tcp)->tcp_connp))) 108 109 #define TCP_IPOPT_POLICY_V6(tcp) \ 110 ((tcp)->tcp_ipversion == IPV6_VERSION && \ 111 ((tcp)->tcp_ip_hdr_len != IPV6_HDR_LEN || \ 112 CONN_OUTBOUND_POLICY_PRESENT_V6((tcp)->tcp_connp) || \ 113 CONN_INBOUND_POLICY_PRESENT_V6((tcp)->tcp_connp))) 114 115 #define TCP_LOOPBACK_IP(tcp) \ 116 (TCP_IPOPT_POLICY_V4(tcp) || TCP_IPOPT_POLICY_V6(tcp) || \ 117 !CONN_IS_MD_FASTPATH((tcp)->tcp_connp)) 118 119 /* 120 * Setting this to false means we disable fusion altogether and 121 * loopback connections would go through the protocol paths. 122 */ 123 boolean_t do_tcp_fusion = B_TRUE; 124 125 /* 126 * Enabling this flag allows sockfs to retrieve data directly 127 * from a fused tcp endpoint using synchronous streams interface. 128 */ 129 boolean_t do_tcp_direct_sockfs = B_TRUE; 130 131 /* 132 * This is the minimum amount of outstanding writes allowed on 133 * a synchronous streams-enabled receiving endpoint before the 134 * sender gets flow-controlled. Setting this value to 0 means 135 * that the data block limit is equivalent to the byte count 136 * limit, which essentially disables the check. 137 */ 138 #define TCP_FUSION_RCV_UNREAD_MIN 8 139 uint_t tcp_fusion_rcv_unread_min = TCP_FUSION_RCV_UNREAD_MIN; 140 141 static void tcp_fuse_syncstr_enable(tcp_t *); 142 static void tcp_fuse_syncstr_disable(tcp_t *); 143 static void strrput_sig(queue_t *, boolean_t); 144 145 /* 146 * This routine gets called by the eager tcp upon changing state from 147 * SYN_RCVD to ESTABLISHED. It fuses a direct path between itself 148 * and the active connect tcp such that the regular tcp processings 149 * may be bypassed under allowable circumstances. Because the fusion 150 * requires both endpoints to be in the same squeue, it does not work 151 * for simultaneous active connects because there is no easy way to 152 * switch from one squeue to another once the connection is created. 153 * This is different from the eager tcp case where we assign it the 154 * same squeue as the one given to the active connect tcp during open. 155 */ 156 void 157 tcp_fuse(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph) 158 { 159 conn_t *peer_connp, *connp = tcp->tcp_connp; 160 tcp_t *peer_tcp; 161 162 ASSERT(!tcp->tcp_fused); 163 ASSERT(tcp->tcp_loopback); 164 ASSERT(tcp->tcp_loopback_peer == NULL); 165 /* 166 * We need to inherit q_hiwat of the listener tcp, but we can't 167 * really use tcp_listener since we get here after sending up 168 * T_CONN_IND and tcp_wput_accept() may be called independently, 169 * at which point tcp_listener is cleared; this is why we use 170 * tcp_saved_listener. The listener itself is guaranteed to be 171 * around until tcp_accept_finish() is called on this eager -- 172 * this won't happen until we're done since we're inside the 173 * eager's perimeter now. 174 */ 175 ASSERT(tcp->tcp_saved_listener != NULL); 176 177 /* 178 * Lookup peer endpoint; search for the remote endpoint having 179 * the reversed address-port quadruplet in ESTABLISHED state, 180 * which is guaranteed to be unique in the system. Zone check 181 * is applied accordingly for loopback address, but not for 182 * local address since we want fusion to happen across Zones. 183 */ 184 if (tcp->tcp_ipversion == IPV4_VERSION) { 185 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv4(connp, 186 (ipha_t *)iphdr, tcph); 187 } else { 188 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv6(connp, 189 (ip6_t *)iphdr, tcph); 190 } 191 192 /* 193 * We can only proceed if peer exists, resides in the same squeue 194 * as our conn and is not raw-socket. The squeue assignment of 195 * this eager tcp was done earlier at the time of SYN processing 196 * in ip_fanout_tcp{_v6}. Note that similar squeues by itself 197 * doesn't guarantee a safe condition to fuse, hence we perform 198 * additional tests below. 199 */ 200 ASSERT(peer_connp == NULL || peer_connp != connp); 201 if (peer_connp == NULL || peer_connp->conn_sqp != connp->conn_sqp || 202 !IPCL_IS_TCP(peer_connp)) { 203 if (peer_connp != NULL) { 204 TCP_STAT(tcp_fusion_unqualified); 205 CONN_DEC_REF(peer_connp); 206 } 207 return; 208 } 209 peer_tcp = peer_connp->conn_tcp; /* active connect tcp */ 210 211 ASSERT(peer_tcp != NULL && peer_tcp != tcp && !peer_tcp->tcp_fused); 212 ASSERT(peer_tcp->tcp_loopback && peer_tcp->tcp_loopback_peer == NULL); 213 ASSERT(peer_connp->conn_sqp == connp->conn_sqp); 214 215 /* 216 * Fuse the endpoints; we perform further checks against both 217 * tcp endpoints to ensure that a fusion is allowed to happen. 218 * In particular we bail out for non-simple TCP/IP or if IPsec/ 219 * IPQoS policy/kernel SSL exists. 220 */ 221 if (!tcp->tcp_unfusable && !peer_tcp->tcp_unfusable && 222 !TCP_LOOPBACK_IP(tcp) && !TCP_LOOPBACK_IP(peer_tcp) && 223 tcp->tcp_kssl_ent == NULL && 224 !IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN)) { 225 mblk_t *mp; 226 struct stroptions *stropt; 227 queue_t *peer_rq = peer_tcp->tcp_rq; 228 229 ASSERT(!TCP_IS_DETACHED(peer_tcp) && peer_rq != NULL); 230 ASSERT(tcp->tcp_fused_sigurg_mp == NULL); 231 ASSERT(peer_tcp->tcp_fused_sigurg_mp == NULL); 232 ASSERT(tcp->tcp_kssl_ctx == NULL); 233 234 /* 235 * We need to drain data on both endpoints during unfuse. 236 * If we need to send up SIGURG at the time of draining, 237 * we want to be sure that an mblk is readily available. 238 * This is why we pre-allocate the M_PCSIG mblks for both 239 * endpoints which will only be used during/after unfuse. 240 */ 241 if ((mp = allocb(1, BPRI_HI)) == NULL) 242 goto failed; 243 244 tcp->tcp_fused_sigurg_mp = mp; 245 246 if ((mp = allocb(1, BPRI_HI)) == NULL) 247 goto failed; 248 249 peer_tcp->tcp_fused_sigurg_mp = mp; 250 251 /* Allocate M_SETOPTS mblk */ 252 if ((mp = allocb(sizeof (*stropt), BPRI_HI)) == NULL) 253 goto failed; 254 255 /* Fuse both endpoints */ 256 peer_tcp->tcp_loopback_peer = tcp; 257 tcp->tcp_loopback_peer = peer_tcp; 258 peer_tcp->tcp_fused = tcp->tcp_fused = B_TRUE; 259 260 /* 261 * We never use regular tcp paths in fusion and should 262 * therefore clear tcp_unsent on both endpoints. Having 263 * them set to non-zero values means asking for trouble 264 * especially after unfuse, where we may end up sending 265 * through regular tcp paths which expect xmit_list and 266 * friends to be correctly setup. 267 */ 268 peer_tcp->tcp_unsent = tcp->tcp_unsent = 0; 269 270 tcp_timers_stop(tcp); 271 tcp_timers_stop(peer_tcp); 272 273 /* 274 * At this point we are a detached eager tcp and therefore 275 * don't have a queue assigned to us until accept happens. 276 * In the mean time the peer endpoint may immediately send 277 * us data as soon as fusion is finished, and we need to be 278 * able to flow control it in case it sends down huge amount 279 * of data while we're still detached. To prevent that we 280 * inherit the listener's q_hiwat value; this is temporary 281 * since we'll repeat the process in tcp_accept_finish(). 282 */ 283 (void) tcp_fuse_set_rcv_hiwat(tcp, 284 tcp->tcp_saved_listener->tcp_rq->q_hiwat); 285 286 /* 287 * Set the stream head's write offset value to zero since we 288 * won't be needing any room for TCP/IP headers; tell it to 289 * not break up the writes (this would reduce the amount of 290 * work done by kmem); and configure our receive buffer. 291 * Note that we can only do this for the active connect tcp 292 * since our eager is still detached; it will be dealt with 293 * later in tcp_accept_finish(). 294 */ 295 DB_TYPE(mp) = M_SETOPTS; 296 mp->b_wptr += sizeof (*stropt); 297 298 stropt = (struct stroptions *)mp->b_rptr; 299 stropt->so_flags = SO_MAXBLK | SO_WROFF | SO_HIWAT; 300 stropt->so_maxblk = tcp_maxpsz_set(peer_tcp, B_FALSE); 301 stropt->so_wroff = 0; 302 303 /* 304 * Record the stream head's high water mark for 305 * peer endpoint; this is used for flow-control 306 * purposes in tcp_fuse_output(). 307 */ 308 stropt->so_hiwat = tcp_fuse_set_rcv_hiwat(peer_tcp, 309 peer_rq->q_hiwat); 310 311 /* Send the options up */ 312 putnext(peer_rq, mp); 313 } else { 314 TCP_STAT(tcp_fusion_unqualified); 315 } 316 CONN_DEC_REF(peer_connp); 317 return; 318 319 failed: 320 if (tcp->tcp_fused_sigurg_mp != NULL) { 321 freeb(tcp->tcp_fused_sigurg_mp); 322 tcp->tcp_fused_sigurg_mp = NULL; 323 } 324 if (peer_tcp->tcp_fused_sigurg_mp != NULL) { 325 freeb(peer_tcp->tcp_fused_sigurg_mp); 326 peer_tcp->tcp_fused_sigurg_mp = NULL; 327 } 328 CONN_DEC_REF(peer_connp); 329 } 330 331 /* 332 * Unfuse a previously-fused pair of tcp loopback endpoints. 333 */ 334 void 335 tcp_unfuse(tcp_t *tcp) 336 { 337 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 338 339 ASSERT(tcp->tcp_fused && peer_tcp != NULL); 340 ASSERT(peer_tcp->tcp_fused && peer_tcp->tcp_loopback_peer == tcp); 341 ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp); 342 ASSERT(tcp->tcp_unsent == 0 && peer_tcp->tcp_unsent == 0); 343 ASSERT(tcp->tcp_fused_sigurg_mp != NULL); 344 ASSERT(peer_tcp->tcp_fused_sigurg_mp != NULL); 345 346 /* 347 * We disable synchronous streams, drain any queued data and 348 * clear tcp_direct_sockfs. The synchronous streams entry 349 * points will become no-ops after this point. 350 */ 351 tcp_fuse_disable_pair(tcp, B_TRUE); 352 353 /* 354 * Update th_seq and th_ack in the header template 355 */ 356 U32_TO_ABE32(tcp->tcp_snxt, tcp->tcp_tcph->th_seq); 357 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 358 U32_TO_ABE32(peer_tcp->tcp_snxt, peer_tcp->tcp_tcph->th_seq); 359 U32_TO_ABE32(peer_tcp->tcp_rnxt, peer_tcp->tcp_tcph->th_ack); 360 361 /* Unfuse the endpoints */ 362 peer_tcp->tcp_fused = tcp->tcp_fused = B_FALSE; 363 peer_tcp->tcp_loopback_peer = tcp->tcp_loopback_peer = NULL; 364 } 365 366 /* 367 * Fusion output routine for urgent data. This routine is called by 368 * tcp_fuse_output() for handling non-M_DATA mblks. 369 */ 370 void 371 tcp_fuse_output_urg(tcp_t *tcp, mblk_t *mp) 372 { 373 mblk_t *mp1; 374 struct T_exdata_ind *tei; 375 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 376 mblk_t *head, *prev_head = NULL; 377 378 ASSERT(tcp->tcp_fused); 379 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp); 380 ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO); 381 ASSERT(mp->b_cont != NULL && DB_TYPE(mp->b_cont) == M_DATA); 382 ASSERT(MBLKL(mp) >= sizeof (*tei) && MBLKL(mp->b_cont) > 0); 383 384 /* 385 * Urgent data arrives in the form of T_EXDATA_REQ from above. 386 * Each occurence denotes a new urgent pointer. For each new 387 * urgent pointer we signal (SIGURG) the receiving app to indicate 388 * that it needs to go into urgent mode. This is similar to the 389 * urgent data handling in the regular tcp. We don't need to keep 390 * track of where the urgent pointer is, because each T_EXDATA_REQ 391 * "advances" the urgent pointer for us. 392 * 393 * The actual urgent data carried by T_EXDATA_REQ is then prepended 394 * by a T_EXDATA_IND before being enqueued behind any existing data 395 * destined for the receiving app. There is only a single urgent 396 * pointer (out-of-band mark) for a given tcp. If the new urgent 397 * data arrives before the receiving app reads some existing urgent 398 * data, the previous marker is lost. This behavior is emulated 399 * accordingly below, by removing any existing T_EXDATA_IND messages 400 * and essentially converting old urgent data into non-urgent. 401 */ 402 ASSERT(tcp->tcp_valid_bits & TCP_URG_VALID); 403 /* Let sender get out of urgent mode */ 404 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 405 406 /* 407 * This flag indicates that a signal needs to be sent up. 408 * This flag will only get cleared once SIGURG is delivered and 409 * is not affected by the tcp_fused flag -- delivery will still 410 * happen even after an endpoint is unfused, to handle the case 411 * where the sending endpoint immediately closes/unfuses after 412 * sending urgent data and the accept is not yet finished. 413 */ 414 peer_tcp->tcp_fused_sigurg = B_TRUE; 415 416 /* Reuse T_EXDATA_REQ mblk for T_EXDATA_IND */ 417 DB_TYPE(mp) = M_PROTO; 418 tei = (struct T_exdata_ind *)mp->b_rptr; 419 tei->PRIM_type = T_EXDATA_IND; 420 tei->MORE_flag = 0; 421 mp->b_wptr = (uchar_t *)&tei[1]; 422 423 TCP_STAT(tcp_fusion_urg); 424 BUMP_MIB(&tcp_mib, tcpOutUrg); 425 426 head = peer_tcp->tcp_rcv_list; 427 while (head != NULL) { 428 /* 429 * Remove existing T_EXDATA_IND, keep the data which follows 430 * it and relink our list. Note that we don't modify the 431 * tcp_rcv_last_tail since it never points to T_EXDATA_IND. 432 */ 433 if (DB_TYPE(head) != M_DATA) { 434 mp1 = head; 435 436 ASSERT(DB_TYPE(mp1->b_cont) == M_DATA); 437 head = mp1->b_cont; 438 mp1->b_cont = NULL; 439 head->b_next = mp1->b_next; 440 mp1->b_next = NULL; 441 if (prev_head != NULL) 442 prev_head->b_next = head; 443 if (peer_tcp->tcp_rcv_list == mp1) 444 peer_tcp->tcp_rcv_list = head; 445 if (peer_tcp->tcp_rcv_last_head == mp1) 446 peer_tcp->tcp_rcv_last_head = head; 447 freeb(mp1); 448 } 449 prev_head = head; 450 head = head->b_next; 451 } 452 } 453 454 /* 455 * Fusion output routine, called by tcp_output() and tcp_wput_proto(). 456 */ 457 boolean_t 458 tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size) 459 { 460 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 461 queue_t *peer_rq; 462 uint_t max_unread; 463 boolean_t flow_stopped; 464 boolean_t urgent = (DB_TYPE(mp) != M_DATA); 465 466 ASSERT(tcp->tcp_fused); 467 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp); 468 ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp); 469 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_PROTO || 470 DB_TYPE(mp) == M_PCPROTO); 471 472 peer_rq = peer_tcp->tcp_rq; 473 max_unread = peer_tcp->tcp_fuse_rcv_unread_hiwater; 474 475 /* If this connection requires IP, unfuse and use regular path */ 476 if (TCP_LOOPBACK_IP(tcp) || TCP_LOOPBACK_IP(peer_tcp) || 477 IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN)) { 478 TCP_STAT(tcp_fusion_aborted); 479 tcp_unfuse(tcp); 480 return (B_FALSE); 481 } 482 483 if (send_size == 0) { 484 freemsg(mp); 485 return (B_TRUE); 486 } 487 488 /* 489 * Handle urgent data; we either send up SIGURG to the peer now 490 * or do it later when we drain, in case the peer is detached 491 * or if we're short of memory for M_PCSIG mblk. 492 */ 493 if (urgent) { 494 /* 495 * We stop synchronous streams when we have urgent data 496 * queued to prevent tcp_fuse_rrw() from pulling it. If 497 * for some reasons the urgent data can't be delivered 498 * below, synchronous streams will remain stopped until 499 * someone drains the tcp_rcv_list. 500 */ 501 TCP_FUSE_SYNCSTR_STOP(peer_tcp); 502 tcp_fuse_output_urg(tcp, mp); 503 } 504 505 mutex_enter(&peer_tcp->tcp_fuse_lock); 506 /* 507 * Wake up and signal the peer; it is okay to do this before 508 * enqueueing because we are holding the lock. One of the 509 * advantages of synchronous streams is the ability for us to 510 * find out when the application performs a read on the socket, 511 * by way of tcp_fuse_rrw() entry point being called. Every 512 * data that gets enqueued onto the receiver is treated as if 513 * it has arrived at the receiving endpoint, thus generating 514 * SIGPOLL/SIGIO for asynchronous socket just as in the strrput() 515 * case. However, we only wake up the application when necessary, 516 * i.e. during the first enqueue. When tcp_fuse_rrw() is called 517 * it will send everything upstream. 518 */ 519 if (peer_tcp->tcp_direct_sockfs && !urgent && 520 !TCP_IS_DETACHED(peer_tcp)) { 521 if (peer_tcp->tcp_rcv_list == NULL) 522 STR_WAKEUP_SET(STREAM(peer_tcp->tcp_rq)); 523 /* Update poll events and send SIGPOLL/SIGIO if necessary */ 524 STR_SENDSIG(STREAM(peer_tcp->tcp_rq)); 525 } 526 527 /* 528 * Enqueue data into the peer's receive list; we may or may not 529 * drain the contents depending on the conditions below. 530 */ 531 tcp_rcv_enqueue(peer_tcp, mp, send_size); 532 533 /* In case it wrapped around and also to keep it constant */ 534 peer_tcp->tcp_rwnd += send_size; 535 536 /* 537 * Exercise flow-control when needed; we will get back-enabled 538 * in either tcp_accept_finish(), tcp_unfuse(), or tcp_fuse_rrw(). 539 * If tcp_direct_sockfs is on or if the peer endpoint is detached, 540 * we emulate streams flow control by checking the peer's queue 541 * size and high water mark; otherwise we simply use canputnext() 542 * to decide if we need to stop our flow. 543 * 544 * The outstanding unread data block check does not apply for a 545 * detached receiver; this is to avoid unnecessary blocking of the 546 * sender while the accept is currently in progress and is quite 547 * similar to the regular tcp. 548 */ 549 if (TCP_IS_DETACHED(peer_tcp) || max_unread == 0) 550 max_unread = UINT_MAX; 551 552 flow_stopped = tcp->tcp_flow_stopped; 553 if (!flow_stopped && 554 (((peer_tcp->tcp_direct_sockfs || TCP_IS_DETACHED(peer_tcp)) && 555 (peer_tcp->tcp_rcv_cnt >= peer_tcp->tcp_fuse_rcv_hiwater || 556 ++peer_tcp->tcp_fuse_rcv_unread_cnt >= max_unread)) || 557 (!peer_tcp->tcp_direct_sockfs && 558 !TCP_IS_DETACHED(peer_tcp) && !canputnext(peer_tcp->tcp_rq)))) { 559 tcp_setqfull(tcp); 560 flow_stopped = B_TRUE; 561 TCP_STAT(tcp_fusion_flowctl); 562 DTRACE_PROBE4(tcp__fuse__output__flowctl, tcp_t *, tcp, 563 uint_t, send_size, uint_t, peer_tcp->tcp_rcv_cnt, 564 uint_t, peer_tcp->tcp_fuse_rcv_unread_cnt); 565 } else if (flow_stopped && 566 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 567 tcp_clrqfull(tcp); 568 } 569 570 loopback_packets++; 571 tcp->tcp_last_sent_len = send_size; 572 573 /* Need to adjust the following SNMP MIB-related variables */ 574 tcp->tcp_snxt += send_size; 575 tcp->tcp_suna = tcp->tcp_snxt; 576 peer_tcp->tcp_rnxt += send_size; 577 peer_tcp->tcp_rack = peer_tcp->tcp_rnxt; 578 579 BUMP_MIB(&tcp_mib, tcpOutDataSegs); 580 UPDATE_MIB(&tcp_mib, tcpOutDataBytes, send_size); 581 582 BUMP_MIB(&tcp_mib, tcpInSegs); 583 BUMP_MIB(&tcp_mib, tcpInDataInorderSegs); 584 UPDATE_MIB(&tcp_mib, tcpInDataInorderBytes, send_size); 585 586 BUMP_LOCAL(tcp->tcp_obsegs); 587 BUMP_LOCAL(peer_tcp->tcp_ibsegs); 588 589 mutex_exit(&peer_tcp->tcp_fuse_lock); 590 591 DTRACE_PROBE2(tcp__fuse__output, tcp_t *, tcp, uint_t, send_size); 592 593 if (!TCP_IS_DETACHED(peer_tcp)) { 594 /* 595 * Drain the peer's receive queue it has urgent data or if 596 * we're not flow-controlled. There is no need for draining 597 * normal data when tcp_direct_sockfs is on because the peer 598 * will pull the data via tcp_fuse_rrw(). 599 */ 600 if (urgent || (!flow_stopped && !peer_tcp->tcp_direct_sockfs)) { 601 ASSERT(peer_tcp->tcp_rcv_list != NULL); 602 (void) tcp_fuse_rcv_drain(peer_rq, peer_tcp, NULL); 603 /* 604 * If synchronous streams was stopped above due 605 * to the presence of urgent data, re-enable it. 606 */ 607 if (urgent) 608 TCP_FUSE_SYNCSTR_RESUME(peer_tcp); 609 } 610 } 611 return (B_TRUE); 612 } 613 614 /* 615 * This routine gets called to deliver data upstream on a fused or 616 * previously fused tcp loopback endpoint; the latter happens only 617 * when there is a pending SIGURG signal plus urgent data that can't 618 * be sent upstream in the past. 619 */ 620 boolean_t 621 tcp_fuse_rcv_drain(queue_t *q, tcp_t *tcp, mblk_t **sigurg_mpp) 622 { 623 mblk_t *mp; 624 #ifdef DEBUG 625 uint_t cnt = 0; 626 #endif 627 628 ASSERT(tcp->tcp_loopback); 629 ASSERT(tcp->tcp_fused || tcp->tcp_fused_sigurg); 630 ASSERT(!tcp->tcp_fused || tcp->tcp_loopback_peer != NULL); 631 ASSERT(sigurg_mpp != NULL || tcp->tcp_fused); 632 633 /* No need for the push timer now, in case it was scheduled */ 634 if (tcp->tcp_push_tid != 0) { 635 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 636 tcp->tcp_push_tid = 0; 637 } 638 /* 639 * If there's urgent data sitting in receive list and we didn't 640 * get a chance to send up a SIGURG signal, make sure we send 641 * it first before draining in order to ensure that SIOCATMARK 642 * works properly. 643 */ 644 if (tcp->tcp_fused_sigurg) { 645 /* 646 * sigurg_mpp is normally NULL, i.e. when we're still 647 * fused and didn't get here because of tcp_unfuse(). 648 * In this case try hard to allocate the M_PCSIG mblk. 649 */ 650 if (sigurg_mpp == NULL && 651 (mp = allocb(1, BPRI_HI)) == NULL && 652 (mp = allocb_tryhard(1)) == NULL) { 653 /* Alloc failed; try again next time */ 654 tcp->tcp_push_tid = TCP_TIMER(tcp, tcp_push_timer, 655 MSEC_TO_TICK(tcp_push_timer_interval)); 656 return (B_TRUE); 657 } else if (sigurg_mpp != NULL) { 658 /* 659 * Use the supplied M_PCSIG mblk; it means we're 660 * either unfused or in the process of unfusing, 661 * and the drain must happen now. 662 */ 663 mp = *sigurg_mpp; 664 *sigurg_mpp = NULL; 665 } 666 ASSERT(mp != NULL); 667 668 tcp->tcp_fused_sigurg = B_FALSE; 669 /* Send up the signal */ 670 DB_TYPE(mp) = M_PCSIG; 671 *mp->b_wptr++ = (uchar_t)SIGURG; 672 putnext(q, mp); 673 /* 674 * Let the regular tcp_rcv_drain() path handle 675 * draining the data if we're no longer fused. 676 */ 677 if (!tcp->tcp_fused) 678 return (B_FALSE); 679 } 680 681 /* 682 * In the synchronous streams case, we generate SIGPOLL/SIGIO for 683 * each M_DATA that gets enqueued onto the receiver. At this point 684 * we are about to drain any queued data via putnext(). In order 685 * to avoid extraneous signal generation from strrput(), we set 686 * STRGETINPROG flag at the stream head prior to the draining and 687 * restore it afterwards. This masks out signal generation only 688 * for M_DATA messages and does not affect urgent data. 689 */ 690 if (tcp->tcp_direct_sockfs) 691 strrput_sig(q, B_FALSE); 692 693 /* Drain the data */ 694 while ((mp = tcp->tcp_rcv_list) != NULL) { 695 tcp->tcp_rcv_list = mp->b_next; 696 mp->b_next = NULL; 697 #ifdef DEBUG 698 cnt += msgdsize(mp); 699 #endif 700 putnext(q, mp); 701 TCP_STAT(tcp_fusion_putnext); 702 } 703 704 if (tcp->tcp_direct_sockfs) 705 strrput_sig(q, B_TRUE); 706 707 ASSERT(cnt == tcp->tcp_rcv_cnt); 708 tcp->tcp_rcv_last_head = NULL; 709 tcp->tcp_rcv_last_tail = NULL; 710 tcp->tcp_rcv_cnt = 0; 711 tcp->tcp_fuse_rcv_unread_cnt = 0; 712 tcp->tcp_rwnd = q->q_hiwat; 713 714 return (B_TRUE); 715 } 716 717 /* 718 * Synchronous stream entry point for sockfs to retrieve 719 * data directly from tcp_rcv_list. 720 */ 721 int 722 tcp_fuse_rrw(queue_t *q, struiod_t *dp) 723 { 724 tcp_t *tcp = Q_TO_CONN(q)->conn_tcp; 725 mblk_t *mp; 726 727 mutex_enter(&tcp->tcp_fuse_lock); 728 /* 729 * If someone had turned off tcp_direct_sockfs or if synchronous 730 * streams is temporarily disabled, we return EBUSY. This causes 731 * strget() to dequeue data from the stream head instead. 732 */ 733 if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped) { 734 mutex_exit(&tcp->tcp_fuse_lock); 735 TCP_STAT(tcp_fusion_rrw_busy); 736 return (EBUSY); 737 } 738 739 if ((mp = tcp->tcp_rcv_list) != NULL) { 740 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 741 742 DTRACE_PROBE3(tcp__fuse__rrw, tcp_t *, tcp, 743 uint32_t, tcp->tcp_rcv_cnt, ssize_t, dp->d_uio.uio_resid); 744 745 tcp->tcp_rcv_list = NULL; 746 TCP_STAT(tcp_fusion_rrw_msgcnt); 747 748 /* 749 * At this point nothing should be left in tcp_rcv_list. 750 * The only possible case where we would have a chain of 751 * b_next-linked messages is urgent data, but we wouldn't 752 * be here if that's true since urgent data is delivered 753 * via putnext() and synchronous streams is stopped until 754 * tcp_fuse_rcv_drain() is finished. 755 */ 756 ASSERT(DB_TYPE(mp) == M_DATA && mp->b_next == NULL); 757 758 tcp->tcp_rcv_last_head = NULL; 759 tcp->tcp_rcv_last_tail = NULL; 760 tcp->tcp_rcv_cnt = 0; 761 tcp->tcp_fuse_rcv_unread_cnt = 0; 762 763 if (peer_tcp->tcp_flow_stopped) { 764 tcp_clrqfull(peer_tcp); 765 TCP_STAT(tcp_fusion_backenabled); 766 } 767 } 768 769 /* 770 * Either we just dequeued everything or we get here from sockfs 771 * and have nothing to return; in this case clear RSLEEP. 772 */ 773 ASSERT(tcp->tcp_rcv_last_head == NULL); 774 ASSERT(tcp->tcp_rcv_last_tail == NULL); 775 ASSERT(tcp->tcp_rcv_cnt == 0); 776 ASSERT(tcp->tcp_fuse_rcv_unread_cnt == 0); 777 STR_WAKEUP_CLEAR(STREAM(q)); 778 779 mutex_exit(&tcp->tcp_fuse_lock); 780 dp->d_mp = mp; 781 return (0); 782 } 783 784 /* 785 * Synchronous stream entry point used by certain ioctls to retrieve 786 * information about or peek into the tcp_rcv_list. 787 */ 788 int 789 tcp_fuse_rinfop(queue_t *q, infod_t *dp) 790 { 791 tcp_t *tcp = Q_TO_CONN(q)->conn_tcp; 792 mblk_t *mp; 793 uint_t cmd = dp->d_cmd; 794 int res = 0; 795 int error = 0; 796 struct stdata *stp = STREAM(q); 797 798 mutex_enter(&tcp->tcp_fuse_lock); 799 /* If shutdown on read has happened, return nothing */ 800 mutex_enter(&stp->sd_lock); 801 if (stp->sd_flag & STREOF) { 802 mutex_exit(&stp->sd_lock); 803 goto done; 804 } 805 mutex_exit(&stp->sd_lock); 806 807 /* 808 * It is OK not to return an answer if tcp_rcv_list is 809 * currently not accessible. 810 */ 811 if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped || 812 (mp = tcp->tcp_rcv_list) == NULL) 813 goto done; 814 815 if (cmd & INFOD_COUNT) { 816 /* 817 * We have at least one message and 818 * could return only one at a time. 819 */ 820 dp->d_count++; 821 res |= INFOD_COUNT; 822 } 823 if (cmd & INFOD_BYTES) { 824 /* 825 * Return size of all data messages. 826 */ 827 dp->d_bytes += tcp->tcp_rcv_cnt; 828 res |= INFOD_BYTES; 829 } 830 if (cmd & INFOD_FIRSTBYTES) { 831 /* 832 * Return size of first data message. 833 */ 834 dp->d_bytes = msgdsize(mp); 835 res |= INFOD_FIRSTBYTES; 836 dp->d_cmd &= ~INFOD_FIRSTBYTES; 837 } 838 if (cmd & INFOD_COPYOUT) { 839 mblk_t *mp1; 840 int n; 841 842 if (DB_TYPE(mp) == M_DATA) { 843 mp1 = mp; 844 } else { 845 mp1 = mp->b_cont; 846 ASSERT(mp1 != NULL); 847 } 848 849 /* 850 * Return data contents of first message. 851 */ 852 ASSERT(DB_TYPE(mp1) == M_DATA); 853 while (mp1 != NULL && dp->d_uiop->uio_resid > 0) { 854 n = MIN(dp->d_uiop->uio_resid, MBLKL(mp1)); 855 if (n != 0 && (error = uiomove((char *)mp1->b_rptr, n, 856 UIO_READ, dp->d_uiop)) != 0) { 857 goto done; 858 } 859 mp1 = mp1->b_cont; 860 } 861 res |= INFOD_COPYOUT; 862 dp->d_cmd &= ~INFOD_COPYOUT; 863 } 864 done: 865 mutex_exit(&tcp->tcp_fuse_lock); 866 867 dp->d_res |= res; 868 869 return (error); 870 } 871 872 /* 873 * Enable synchronous streams on a fused tcp loopback endpoint. 874 */ 875 static void 876 tcp_fuse_syncstr_enable(tcp_t *tcp) 877 { 878 queue_t *rq = tcp->tcp_rq; 879 struct stdata *stp = STREAM(rq); 880 881 /* We can only enable synchronous streams for sockfs mode */ 882 tcp->tcp_direct_sockfs = tcp->tcp_issocket && do_tcp_direct_sockfs; 883 884 if (!tcp->tcp_direct_sockfs) 885 return; 886 887 mutex_enter(&stp->sd_lock); 888 mutex_enter(QLOCK(rq)); 889 890 /* 891 * We replace our q_qinfo with one that has the qi_rwp entry point. 892 * Clear SR_SIGALLDATA because we generate the equivalent signal(s) 893 * for every enqueued data in tcp_fuse_output(). 894 */ 895 rq->q_qinfo = &tcp_loopback_rinit; 896 rq->q_struiot = tcp_loopback_rinit.qi_struiot; 897 stp->sd_struiordq = rq; 898 stp->sd_rput_opt &= ~SR_SIGALLDATA; 899 900 mutex_exit(QLOCK(rq)); 901 mutex_exit(&stp->sd_lock); 902 } 903 904 /* 905 * Disable synchronous streams on a fused tcp loopback endpoint. 906 */ 907 static void 908 tcp_fuse_syncstr_disable(tcp_t *tcp) 909 { 910 queue_t *rq = tcp->tcp_rq; 911 struct stdata *stp = STREAM(rq); 912 913 if (!tcp->tcp_direct_sockfs) 914 return; 915 916 mutex_enter(&stp->sd_lock); 917 mutex_enter(QLOCK(rq)); 918 919 /* 920 * Reset q_qinfo to point to the default tcp entry points. 921 * Also restore SR_SIGALLDATA so that strrput() can generate 922 * the signals again for future M_DATA messages. 923 */ 924 rq->q_qinfo = &tcp_rinit; 925 rq->q_struiot = tcp_rinit.qi_struiot; 926 stp->sd_struiordq = NULL; 927 stp->sd_rput_opt |= SR_SIGALLDATA; 928 tcp->tcp_direct_sockfs = B_FALSE; 929 930 mutex_exit(QLOCK(rq)); 931 mutex_exit(&stp->sd_lock); 932 } 933 934 /* 935 * Enable synchronous streams on a pair of fused tcp endpoints. 936 */ 937 void 938 tcp_fuse_syncstr_enable_pair(tcp_t *tcp) 939 { 940 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 941 942 ASSERT(tcp->tcp_fused); 943 ASSERT(peer_tcp != NULL); 944 945 tcp_fuse_syncstr_enable(tcp); 946 tcp_fuse_syncstr_enable(peer_tcp); 947 } 948 949 /* 950 * Allow or disallow signals to be generated by strrput(). 951 */ 952 static void 953 strrput_sig(queue_t *q, boolean_t on) 954 { 955 struct stdata *stp = STREAM(q); 956 957 mutex_enter(&stp->sd_lock); 958 if (on) 959 stp->sd_flag &= ~STRGETINPROG; 960 else 961 stp->sd_flag |= STRGETINPROG; 962 mutex_exit(&stp->sd_lock); 963 } 964 965 /* 966 * Disable synchronous streams on a pair of fused tcp endpoints and drain 967 * any queued data; called either during unfuse or upon transitioning from 968 * a socket to a stream endpoint due to _SIOCSOCKFALLBACK. 969 */ 970 void 971 tcp_fuse_disable_pair(tcp_t *tcp, boolean_t unfusing) 972 { 973 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 974 975 ASSERT(tcp->tcp_fused); 976 ASSERT(peer_tcp != NULL); 977 978 /* 979 * We need to prevent tcp_fuse_rrw() from entering before 980 * we can disable synchronous streams. 981 */ 982 TCP_FUSE_SYNCSTR_STOP(tcp); 983 TCP_FUSE_SYNCSTR_STOP(peer_tcp); 984 985 /* 986 * Drain any pending data; the detached check is needed because 987 * we may be called as a result of a tcp_unfuse() triggered by 988 * tcp_fuse_output(). Note that in case of a detached tcp, the 989 * draining will happen later after the tcp is unfused. For non- 990 * urgent data, this can be handled by the regular tcp_rcv_drain(). 991 * If we have urgent data sitting in the receive list, we will 992 * need to send up a SIGURG signal first before draining the data. 993 * All of these will be handled by the code in tcp_fuse_rcv_drain() 994 * when called from tcp_rcv_drain(). 995 */ 996 if (!TCP_IS_DETACHED(tcp)) { 997 (void) tcp_fuse_rcv_drain(tcp->tcp_rq, tcp, 998 (unfusing ? &tcp->tcp_fused_sigurg_mp : NULL)); 999 } 1000 if (!TCP_IS_DETACHED(peer_tcp)) { 1001 (void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp, 1002 (unfusing ? &peer_tcp->tcp_fused_sigurg_mp : NULL)); 1003 } 1004 1005 /* Lift up any flow-control conditions */ 1006 if (tcp->tcp_flow_stopped) { 1007 tcp_clrqfull(tcp); 1008 TCP_STAT(tcp_fusion_backenabled); 1009 } 1010 if (peer_tcp->tcp_flow_stopped) { 1011 tcp_clrqfull(peer_tcp); 1012 TCP_STAT(tcp_fusion_backenabled); 1013 } 1014 1015 /* Disable synchronous streams */ 1016 tcp_fuse_syncstr_disable(tcp); 1017 tcp_fuse_syncstr_disable(peer_tcp); 1018 } 1019 1020 /* 1021 * Calculate the size of receive buffer for a fused tcp endpoint. 1022 */ 1023 size_t 1024 tcp_fuse_set_rcv_hiwat(tcp_t *tcp, size_t rwnd) 1025 { 1026 ASSERT(tcp->tcp_fused); 1027 1028 /* Ensure that value is within the maximum upper bound */ 1029 if (rwnd > tcp_max_buf) 1030 rwnd = tcp_max_buf; 1031 1032 /* Obey the absolute minimum tcp receive high water mark */ 1033 if (rwnd < tcp_sth_rcv_hiwat) 1034 rwnd = tcp_sth_rcv_hiwat; 1035 1036 /* 1037 * Round up to system page size in case SO_RCVBUF is modified 1038 * after SO_SNDBUF; the latter is also similarly rounded up. 1039 */ 1040 rwnd = P2ROUNDUP_TYPED(rwnd, PAGESIZE, size_t); 1041 tcp->tcp_fuse_rcv_hiwater = rwnd; 1042 return (rwnd); 1043 } 1044 1045 /* 1046 * Calculate the maximum outstanding unread data block for a fused tcp endpoint. 1047 */ 1048 int 1049 tcp_fuse_maxpsz_set(tcp_t *tcp) 1050 { 1051 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 1052 uint_t sndbuf = tcp->tcp_xmit_hiwater; 1053 uint_t maxpsz = sndbuf; 1054 1055 ASSERT(tcp->tcp_fused); 1056 ASSERT(peer_tcp != NULL); 1057 ASSERT(peer_tcp->tcp_fuse_rcv_hiwater != 0); 1058 /* 1059 * In the fused loopback case, we want the stream head to split 1060 * up larger writes into smaller chunks for a more accurate flow- 1061 * control accounting. Our maxpsz is half of the sender's send 1062 * buffer or the receiver's receive buffer, whichever is smaller. 1063 * We round up the buffer to system page size due to the lack of 1064 * TCP MSS concept in Fusion. 1065 */ 1066 if (maxpsz > peer_tcp->tcp_fuse_rcv_hiwater) 1067 maxpsz = peer_tcp->tcp_fuse_rcv_hiwater; 1068 maxpsz = P2ROUNDUP_TYPED(maxpsz, PAGESIZE, uint_t) >> 1; 1069 1070 /* 1071 * Calculate the peer's limit for the number of outstanding unread 1072 * data block. This is the amount of data blocks that are allowed 1073 * to reside in the receiver's queue before the sender gets flow 1074 * controlled. It is used only in the synchronous streams mode as 1075 * a way to throttle the sender when it performs consecutive writes 1076 * faster than can be read. The value is derived from SO_SNDBUF in 1077 * order to give the sender some control; we divide it with a large 1078 * value (16KB) to produce a fairly low initial limit. 1079 */ 1080 if (tcp_fusion_rcv_unread_min == 0) { 1081 /* A value of 0 means that we disable the check */ 1082 peer_tcp->tcp_fuse_rcv_unread_hiwater = 0; 1083 } else { 1084 peer_tcp->tcp_fuse_rcv_unread_hiwater = 1085 MAX(sndbuf >> 14, tcp_fusion_rcv_unread_min); 1086 } 1087 return (maxpsz); 1088 } 1089