xref: /illumos-gate/usr/src/uts/common/inet/tcp/tcp_fusion.c (revision 54034eb2d6e7d811adf4a1fe5105eac6fea6b0b5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/stream.h>
28 #include <sys/strsun.h>
29 #include <sys/strsubr.h>
30 #include <sys/debug.h>
31 #include <sys/sdt.h>
32 #include <sys/cmn_err.h>
33 #include <sys/tihdr.h>
34 
35 #include <inet/common.h>
36 #include <inet/optcom.h>
37 #include <inet/ip.h>
38 #include <inet/ip_if.h>
39 #include <inet/ip_impl.h>
40 #include <inet/tcp.h>
41 #include <inet/tcp_impl.h>
42 #include <inet/ipsec_impl.h>
43 #include <inet/ipclassifier.h>
44 #include <inet/ipp_common.h>
45 #include <inet/ip_if.h>
46 
47 /*
48  * This file implements TCP fusion - a protocol-less data path for TCP
49  * loopback connections.  The fusion of two local TCP endpoints occurs
50  * at connection establishment time.  Various conditions (see details
51  * in tcp_fuse()) need to be met for fusion to be successful.  If it
52  * fails, we fall back to the regular TCP data path; if it succeeds,
53  * both endpoints proceed to use tcp_fuse_output() as the transmit path.
54  * tcp_fuse_output() enqueues application data directly onto the peer's
55  * receive queue; no protocol processing is involved.  After enqueueing
56  * the data, the sender can either push (putnext) data up the receiver's
57  * read queue; or the sender can simply return and let the receiver
58  * retrieve the enqueued data via the synchronous streams entry point
59  * tcp_fuse_rrw().  The latter path is taken if synchronous streams is
60  * enabled (the default).  It is disabled if sockfs no longer resides
61  * directly on top of tcp module due to a module insertion or removal.
62  * It also needs to be temporarily disabled when sending urgent data
63  * because the tcp_fuse_rrw() path bypasses the M_PROTO processing done
64  * by strsock_proto() hook.
65  *
66  * Sychronization is handled by squeue and the mutex tcp_non_sq_lock.
67  * One of the requirements for fusion to succeed is that both endpoints
68  * need to be using the same squeue.  This ensures that neither side
69  * can disappear while the other side is still sending data.  By itself,
70  * squeue is not sufficient for guaranteeing safety when synchronous
71  * streams is enabled.  The reason is that tcp_fuse_rrw() doesn't enter
72  * the squeue and its access to tcp_rcv_list and other fusion-related
73  * fields needs to be sychronized with the sender.  tcp_non_sq_lock is
74  * used for this purpose.  When there is urgent data, the sender needs
75  * to push the data up the receiver's streams read queue.  In order to
76  * avoid holding the tcp_non_sq_lock across putnext(), the sender sets
77  * the peer tcp's tcp_fuse_syncstr_plugged bit and releases tcp_non_sq_lock
78  * (see macro TCP_FUSE_SYNCSTR_PLUG_DRAIN()).  If tcp_fuse_rrw() enters
79  * after this point, it will see that synchronous streams is plugged and
80  * will wait on tcp_fuse_plugcv.  After the sender has finished pushing up
81  * all urgent data, it will clear the tcp_fuse_syncstr_plugged bit using
82  * TCP_FUSE_SYNCSTR_UNPLUG_DRAIN().  This will cause any threads waiting
83  * on tcp_fuse_plugcv to return EBUSY, and in turn cause strget() to call
84  * getq_noenab() to dequeue data from the stream head instead.  Once the
85  * data on the stream head has been consumed, tcp_fuse_rrw() may again
86  * be used to process tcp_rcv_list.  However, if TCP_FUSE_SYNCSTR_STOP()
87  * has been called, all future calls to tcp_fuse_rrw() will return EBUSY,
88  * effectively disabling synchronous streams.
89  *
90  * The following note applies only to the synchronous streams mode.
91  *
92  * Flow control is done by checking the size of receive buffer and
93  * the number of data blocks, both set to different limits.  This is
94  * different than regular streams flow control where cumulative size
95  * check dominates block count check -- streams queue high water mark
96  * typically represents bytes.  Each enqueue triggers notifications
97  * to the receiving process; a build up of data blocks indicates a
98  * slow receiver and the sender should be blocked or informed at the
99  * earliest moment instead of further wasting system resources.  In
100  * effect, this is equivalent to limiting the number of outstanding
101  * segments in flight.
102  */
103 
104 /*
105  * Setting this to false means we disable fusion altogether and
106  * loopback connections would go through the protocol paths.
107  */
108 boolean_t do_tcp_fusion = B_TRUE;
109 
110 /*
111  * Enabling this flag allows sockfs to retrieve data directly
112  * from a fused tcp endpoint using synchronous streams interface.
113  */
114 boolean_t do_tcp_direct_sockfs = B_FALSE;
115 
116 /*
117  * This is the minimum amount of outstanding writes allowed on
118  * a synchronous streams-enabled receiving endpoint before the
119  * sender gets flow-controlled.  Setting this value to 0 means
120  * that the data block limit is equivalent to the byte count
121  * limit, which essentially disables the check.
122  */
123 #define	TCP_FUSION_RCV_UNREAD_MIN	8
124 uint_t tcp_fusion_rcv_unread_min = TCP_FUSION_RCV_UNREAD_MIN;
125 
126 static void		tcp_fuse_syncstr_enable(tcp_t *);
127 static void		tcp_fuse_syncstr_disable(tcp_t *);
128 static boolean_t	strrput_sig(queue_t *, boolean_t);
129 
130 /*
131  * Return true if this connection needs some IP functionality
132  */
133 static boolean_t
134 tcp_loopback_needs_ip(tcp_t *tcp, netstack_t *ns)
135 {
136 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
137 
138 	/*
139 	 * If ire is not cached, do not use fusion
140 	 */
141 	if (tcp->tcp_connp->conn_ire_cache == NULL) {
142 		/*
143 		 * There is no need to hold conn_lock here because when called
144 		 * from tcp_fuse() there can be no window where conn_ire_cache
145 		 * can change. This is not true when called from
146 		 * tcp_fuse_output() as conn_ire_cache can become null just
147 		 * after the check. It will be necessary to recheck for a NULL
148 		 * conn_ire_cache in tcp_fuse_output() to avoid passing a
149 		 * stale ill pointer to FW_HOOKS.
150 		 */
151 		return (B_TRUE);
152 	}
153 	if (tcp->tcp_ipversion == IPV4_VERSION) {
154 		if (tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH)
155 			return (B_TRUE);
156 		if (CONN_OUTBOUND_POLICY_PRESENT(tcp->tcp_connp, ipss))
157 			return (B_TRUE);
158 		if (CONN_INBOUND_POLICY_PRESENT(tcp->tcp_connp, ipss))
159 			return (B_TRUE);
160 	} else {
161 		if (tcp->tcp_ip_hdr_len != IPV6_HDR_LEN)
162 			return (B_TRUE);
163 		if (CONN_OUTBOUND_POLICY_PRESENT_V6(tcp->tcp_connp, ipss))
164 			return (B_TRUE);
165 		if (CONN_INBOUND_POLICY_PRESENT_V6(tcp->tcp_connp, ipss))
166 			return (B_TRUE);
167 	}
168 	if (!CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp))
169 		return (B_TRUE);
170 	return (B_FALSE);
171 }
172 
173 
174 /*
175  * This routine gets called by the eager tcp upon changing state from
176  * SYN_RCVD to ESTABLISHED.  It fuses a direct path between itself
177  * and the active connect tcp such that the regular tcp processings
178  * may be bypassed under allowable circumstances.  Because the fusion
179  * requires both endpoints to be in the same squeue, it does not work
180  * for simultaneous active connects because there is no easy way to
181  * switch from one squeue to another once the connection is created.
182  * This is different from the eager tcp case where we assign it the
183  * same squeue as the one given to the active connect tcp during open.
184  */
185 void
186 tcp_fuse(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph)
187 {
188 	conn_t *peer_connp, *connp = tcp->tcp_connp;
189 	tcp_t *peer_tcp;
190 	tcp_stack_t	*tcps = tcp->tcp_tcps;
191 	netstack_t	*ns;
192 	ip_stack_t	*ipst = tcps->tcps_netstack->netstack_ip;
193 
194 	ASSERT(!tcp->tcp_fused);
195 	ASSERT(tcp->tcp_loopback);
196 	ASSERT(tcp->tcp_loopback_peer == NULL);
197 	/*
198 	 * We need to inherit q_hiwat of the listener tcp, but we can't
199 	 * really use tcp_listener since we get here after sending up
200 	 * T_CONN_IND and tcp_wput_accept() may be called independently,
201 	 * at which point tcp_listener is cleared; this is why we use
202 	 * tcp_saved_listener.  The listener itself is guaranteed to be
203 	 * around until tcp_accept_finish() is called on this eager --
204 	 * this won't happen until we're done since we're inside the
205 	 * eager's perimeter now.
206 	 *
207 	 * We can also get called in the case were a connection needs
208 	 * to be re-fused. In this case tcp_saved_listener will be
209 	 * NULL but tcp_refuse will be true.
210 	 */
211 	ASSERT(tcp->tcp_saved_listener != NULL || tcp->tcp_refuse);
212 	/*
213 	 * Lookup peer endpoint; search for the remote endpoint having
214 	 * the reversed address-port quadruplet in ESTABLISHED state,
215 	 * which is guaranteed to be unique in the system.  Zone check
216 	 * is applied accordingly for loopback address, but not for
217 	 * local address since we want fusion to happen across Zones.
218 	 */
219 	if (tcp->tcp_ipversion == IPV4_VERSION) {
220 		peer_connp = ipcl_conn_tcp_lookup_reversed_ipv4(connp,
221 		    (ipha_t *)iphdr, tcph, ipst);
222 	} else {
223 		peer_connp = ipcl_conn_tcp_lookup_reversed_ipv6(connp,
224 		    (ip6_t *)iphdr, tcph, ipst);
225 	}
226 
227 	/*
228 	 * We can only proceed if peer exists, resides in the same squeue
229 	 * as our conn and is not raw-socket.  The squeue assignment of
230 	 * this eager tcp was done earlier at the time of SYN processing
231 	 * in ip_fanout_tcp{_v6}.  Note that similar squeues by itself
232 	 * doesn't guarantee a safe condition to fuse, hence we perform
233 	 * additional tests below.
234 	 */
235 	ASSERT(peer_connp == NULL || peer_connp != connp);
236 	if (peer_connp == NULL || peer_connp->conn_sqp != connp->conn_sqp ||
237 	    !IPCL_IS_TCP(peer_connp)) {
238 		if (peer_connp != NULL) {
239 			TCP_STAT(tcps, tcp_fusion_unqualified);
240 			CONN_DEC_REF(peer_connp);
241 		}
242 		return;
243 	}
244 	peer_tcp = peer_connp->conn_tcp;	/* active connect tcp */
245 
246 	ASSERT(peer_tcp != NULL && peer_tcp != tcp && !peer_tcp->tcp_fused);
247 	ASSERT(peer_tcp->tcp_loopback && peer_tcp->tcp_loopback_peer == NULL);
248 	ASSERT(peer_connp->conn_sqp == connp->conn_sqp);
249 
250 	/*
251 	 * Fuse the endpoints; we perform further checks against both
252 	 * tcp endpoints to ensure that a fusion is allowed to happen.
253 	 * In particular we bail out for non-simple TCP/IP or if IPsec/
254 	 * IPQoS policy/kernel SSL exists. We also need to check if
255 	 * the connection is quiescent to cover the case when we are
256 	 * trying to re-enable fusion after IPobservability is turned off.
257 	 */
258 	ns = tcps->tcps_netstack;
259 	ipst = ns->netstack_ip;
260 
261 	if (!tcp->tcp_unfusable && !peer_tcp->tcp_unfusable &&
262 	    !tcp_loopback_needs_ip(tcp, ns) &&
263 	    !tcp_loopback_needs_ip(peer_tcp, ns) &&
264 	    tcp->tcp_kssl_ent == NULL &&
265 	    tcp->tcp_xmit_head == NULL && peer_tcp->tcp_xmit_head == NULL &&
266 	    !IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN, ipst)) {
267 		mblk_t *mp;
268 		queue_t *peer_rq = peer_tcp->tcp_rq;
269 
270 		ASSERT(!TCP_IS_DETACHED(peer_tcp));
271 		ASSERT(tcp->tcp_fused_sigurg_mp == NULL);
272 		ASSERT(peer_tcp->tcp_fused_sigurg_mp == NULL);
273 		ASSERT(tcp->tcp_kssl_ctx == NULL);
274 
275 		/*
276 		 * We need to drain data on both endpoints during unfuse.
277 		 * If we need to send up SIGURG at the time of draining,
278 		 * we want to be sure that an mblk is readily available.
279 		 * This is why we pre-allocate the M_PCSIG mblks for both
280 		 * endpoints which will only be used during/after unfuse.
281 		 */
282 		if (!IPCL_IS_NONSTR(tcp->tcp_connp)) {
283 			if ((mp = allocb(1, BPRI_HI)) == NULL)
284 				goto failed;
285 
286 			tcp->tcp_fused_sigurg_mp = mp;
287 		}
288 
289 		if (!IPCL_IS_NONSTR(peer_tcp->tcp_connp)) {
290 			if ((mp = allocb(1, BPRI_HI)) == NULL)
291 				goto failed;
292 
293 			peer_tcp->tcp_fused_sigurg_mp = mp;
294 		}
295 
296 		if (!IPCL_IS_NONSTR(peer_tcp->tcp_connp) &&
297 		    (mp = allocb(sizeof (struct stroptions),
298 		    BPRI_HI)) == NULL) {
299 			goto failed;
300 		}
301 
302 		/* If either tcp or peer_tcp sodirect enabled then disable */
303 		if (tcp->tcp_sodirect != NULL) {
304 			mutex_enter(tcp->tcp_sodirect->sod_lockp);
305 			SOD_DISABLE(tcp->tcp_sodirect);
306 			mutex_exit(tcp->tcp_sodirect->sod_lockp);
307 			tcp->tcp_sodirect = NULL;
308 		}
309 		if (peer_tcp->tcp_sodirect != NULL) {
310 			mutex_enter(peer_tcp->tcp_sodirect->sod_lockp);
311 			SOD_DISABLE(peer_tcp->tcp_sodirect);
312 			mutex_exit(peer_tcp->tcp_sodirect->sod_lockp);
313 			peer_tcp->tcp_sodirect = NULL;
314 		}
315 
316 		/* Fuse both endpoints */
317 		peer_tcp->tcp_loopback_peer = tcp;
318 		tcp->tcp_loopback_peer = peer_tcp;
319 		peer_tcp->tcp_fused = tcp->tcp_fused = B_TRUE;
320 
321 		/*
322 		 * We never use regular tcp paths in fusion and should
323 		 * therefore clear tcp_unsent on both endpoints.  Having
324 		 * them set to non-zero values means asking for trouble
325 		 * especially after unfuse, where we may end up sending
326 		 * through regular tcp paths which expect xmit_list and
327 		 * friends to be correctly setup.
328 		 */
329 		peer_tcp->tcp_unsent = tcp->tcp_unsent = 0;
330 
331 		tcp_timers_stop(tcp);
332 		tcp_timers_stop(peer_tcp);
333 
334 		/*
335 		 * At this point we are a detached eager tcp and therefore
336 		 * don't have a queue assigned to us until accept happens.
337 		 * In the mean time the peer endpoint may immediately send
338 		 * us data as soon as fusion is finished, and we need to be
339 		 * able to flow control it in case it sends down huge amount
340 		 * of data while we're still detached.  To prevent that we
341 		 * inherit the listener's recv_hiwater value; this is temporary
342 		 * since we'll repeat the process intcp_accept_finish().
343 		 */
344 		if (!tcp->tcp_refuse) {
345 			(void) tcp_fuse_set_rcv_hiwat(tcp,
346 			    tcp->tcp_saved_listener->tcp_recv_hiwater);
347 
348 			/*
349 			 * Set the stream head's write offset value to zero
350 			 * since we won't be needing any room for TCP/IP
351 			 * headers; tell it to not break up the writes (this
352 			 * would reduce the amount of work done by kmem); and
353 			 * configure our receive buffer. Note that we can only
354 			 * do this for the active connect tcp since our eager is
355 			 * still detached; it will be dealt with later in
356 			 * tcp_accept_finish().
357 			 */
358 			if (!IPCL_IS_NONSTR(peer_tcp->tcp_connp)) {
359 				struct stroptions *stropt;
360 
361 				DB_TYPE(mp) = M_SETOPTS;
362 				mp->b_wptr += sizeof (*stropt);
363 
364 				stropt = (struct stroptions *)mp->b_rptr;
365 				stropt->so_flags = SO_MAXBLK|SO_WROFF|SO_HIWAT;
366 				stropt->so_maxblk = tcp_maxpsz_set(peer_tcp,
367 				    B_FALSE);
368 				stropt->so_wroff = 0;
369 
370 				/*
371 				 * Record the stream head's high water mark for
372 				 * peer endpoint; this is used for flow-control
373 				 * purposes in tcp_fuse_output().
374 				 */
375 				stropt->so_hiwat = tcp_fuse_set_rcv_hiwat(
376 				    peer_tcp, peer_rq->q_hiwat);
377 
378 				tcp->tcp_refuse = B_FALSE;
379 				peer_tcp->tcp_refuse = B_FALSE;
380 				/* Send the options up */
381 				putnext(peer_rq, mp);
382 			} else {
383 				struct sock_proto_props sopp;
384 
385 				/* The peer is a non-STREAMS end point */
386 				ASSERT(IPCL_IS_TCP(peer_connp));
387 
388 				(void) tcp_fuse_set_rcv_hiwat(tcp,
389 				    tcp->tcp_saved_listener->tcp_recv_hiwater);
390 
391 				sopp.sopp_flags = SOCKOPT_MAXBLK |
392 				    SOCKOPT_WROFF | SOCKOPT_RCVHIWAT;
393 				sopp.sopp_maxblk = tcp_maxpsz_set(peer_tcp,
394 				    B_FALSE);
395 				sopp.sopp_wroff = 0;
396 				sopp.sopp_rxhiwat = tcp_fuse_set_rcv_hiwat(
397 				    peer_tcp, peer_tcp->tcp_recv_hiwater);
398 				(*peer_connp->conn_upcalls->su_set_proto_props)
399 				    (peer_connp->conn_upper_handle, &sopp);
400 			}
401 		}
402 		tcp->tcp_refuse = B_FALSE;
403 		peer_tcp->tcp_refuse = B_FALSE;
404 	} else {
405 		TCP_STAT(tcps, tcp_fusion_unqualified);
406 	}
407 	CONN_DEC_REF(peer_connp);
408 	return;
409 
410 failed:
411 	if (tcp->tcp_fused_sigurg_mp != NULL) {
412 		freeb(tcp->tcp_fused_sigurg_mp);
413 		tcp->tcp_fused_sigurg_mp = NULL;
414 	}
415 	if (peer_tcp->tcp_fused_sigurg_mp != NULL) {
416 		freeb(peer_tcp->tcp_fused_sigurg_mp);
417 		peer_tcp->tcp_fused_sigurg_mp = NULL;
418 	}
419 	CONN_DEC_REF(peer_connp);
420 }
421 
422 /*
423  * Unfuse a previously-fused pair of tcp loopback endpoints.
424  */
425 void
426 tcp_unfuse(tcp_t *tcp)
427 {
428 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
429 
430 	ASSERT(tcp->tcp_fused && peer_tcp != NULL);
431 	ASSERT(peer_tcp->tcp_fused && peer_tcp->tcp_loopback_peer == tcp);
432 	ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp);
433 	ASSERT(tcp->tcp_unsent == 0 && peer_tcp->tcp_unsent == 0);
434 
435 	/*
436 	 * We disable synchronous streams, drain any queued data and
437 	 * clear tcp_direct_sockfs.  The synchronous streams entry
438 	 * points will become no-ops after this point.
439 	 */
440 	tcp_fuse_disable_pair(tcp, B_TRUE);
441 
442 	/*
443 	 * Update th_seq and th_ack in the header template
444 	 */
445 	U32_TO_ABE32(tcp->tcp_snxt, tcp->tcp_tcph->th_seq);
446 	U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack);
447 	U32_TO_ABE32(peer_tcp->tcp_snxt, peer_tcp->tcp_tcph->th_seq);
448 	U32_TO_ABE32(peer_tcp->tcp_rnxt, peer_tcp->tcp_tcph->th_ack);
449 
450 	/* Unfuse the endpoints */
451 	peer_tcp->tcp_fused = tcp->tcp_fused = B_FALSE;
452 	peer_tcp->tcp_loopback_peer = tcp->tcp_loopback_peer = NULL;
453 	if (!IPCL_IS_NONSTR(peer_tcp->tcp_connp)) {
454 		ASSERT(peer_tcp->tcp_fused_sigurg_mp != NULL);
455 		freeb(peer_tcp->tcp_fused_sigurg_mp);
456 		peer_tcp->tcp_fused_sigurg_mp = NULL;
457 	}
458 	if (!IPCL_IS_NONSTR(tcp->tcp_connp)) {
459 		ASSERT(tcp->tcp_fused_sigurg_mp != NULL);
460 		freeb(tcp->tcp_fused_sigurg_mp);
461 		tcp->tcp_fused_sigurg_mp = NULL;
462 	}
463 }
464 
465 /*
466  * Fusion output routine for urgent data.  This routine is called by
467  * tcp_fuse_output() for handling non-M_DATA mblks.
468  */
469 void
470 tcp_fuse_output_urg(tcp_t *tcp, mblk_t *mp)
471 {
472 	mblk_t *mp1;
473 	struct T_exdata_ind *tei;
474 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
475 	mblk_t *head, *prev_head = NULL;
476 	tcp_stack_t	*tcps = tcp->tcp_tcps;
477 
478 	ASSERT(tcp->tcp_fused);
479 	ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp);
480 	ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO);
481 	ASSERT(mp->b_cont != NULL && DB_TYPE(mp->b_cont) == M_DATA);
482 	ASSERT(MBLKL(mp) >= sizeof (*tei) && MBLKL(mp->b_cont) > 0);
483 
484 	/*
485 	 * Urgent data arrives in the form of T_EXDATA_REQ from above.
486 	 * Each occurence denotes a new urgent pointer.  For each new
487 	 * urgent pointer we signal (SIGURG) the receiving app to indicate
488 	 * that it needs to go into urgent mode.  This is similar to the
489 	 * urgent data handling in the regular tcp.  We don't need to keep
490 	 * track of where the urgent pointer is, because each T_EXDATA_REQ
491 	 * "advances" the urgent pointer for us.
492 	 *
493 	 * The actual urgent data carried by T_EXDATA_REQ is then prepended
494 	 * by a T_EXDATA_IND before being enqueued behind any existing data
495 	 * destined for the receiving app.  There is only a single urgent
496 	 * pointer (out-of-band mark) for a given tcp.  If the new urgent
497 	 * data arrives before the receiving app reads some existing urgent
498 	 * data, the previous marker is lost.  This behavior is emulated
499 	 * accordingly below, by removing any existing T_EXDATA_IND messages
500 	 * and essentially converting old urgent data into non-urgent.
501 	 */
502 	ASSERT(tcp->tcp_valid_bits & TCP_URG_VALID);
503 	/* Let sender get out of urgent mode */
504 	tcp->tcp_valid_bits &= ~TCP_URG_VALID;
505 
506 	/*
507 	 * This flag indicates that a signal needs to be sent up.
508 	 * This flag will only get cleared once SIGURG is delivered and
509 	 * is not affected by the tcp_fused flag -- delivery will still
510 	 * happen even after an endpoint is unfused, to handle the case
511 	 * where the sending endpoint immediately closes/unfuses after
512 	 * sending urgent data and the accept is not yet finished.
513 	 */
514 	peer_tcp->tcp_fused_sigurg = B_TRUE;
515 
516 	/* Reuse T_EXDATA_REQ mblk for T_EXDATA_IND */
517 	DB_TYPE(mp) = M_PROTO;
518 	tei = (struct T_exdata_ind *)mp->b_rptr;
519 	tei->PRIM_type = T_EXDATA_IND;
520 	tei->MORE_flag = 0;
521 	mp->b_wptr = (uchar_t *)&tei[1];
522 
523 	TCP_STAT(tcps, tcp_fusion_urg);
524 	BUMP_MIB(&tcps->tcps_mib, tcpOutUrg);
525 
526 	head = peer_tcp->tcp_rcv_list;
527 	while (head != NULL) {
528 		/*
529 		 * Remove existing T_EXDATA_IND, keep the data which follows
530 		 * it and relink our list.  Note that we don't modify the
531 		 * tcp_rcv_last_tail since it never points to T_EXDATA_IND.
532 		 */
533 		if (DB_TYPE(head) != M_DATA) {
534 			mp1 = head;
535 
536 			ASSERT(DB_TYPE(mp1->b_cont) == M_DATA);
537 			head = mp1->b_cont;
538 			mp1->b_cont = NULL;
539 			head->b_next = mp1->b_next;
540 			mp1->b_next = NULL;
541 			if (prev_head != NULL)
542 				prev_head->b_next = head;
543 			if (peer_tcp->tcp_rcv_list == mp1)
544 				peer_tcp->tcp_rcv_list = head;
545 			if (peer_tcp->tcp_rcv_last_head == mp1)
546 				peer_tcp->tcp_rcv_last_head = head;
547 			freeb(mp1);
548 		}
549 		prev_head = head;
550 		head = head->b_next;
551 	}
552 }
553 
554 /*
555  * Fusion output routine, called by tcp_output() and tcp_wput_proto().
556  * If we are modifying any member that can be changed outside the squeue,
557  * like tcp_flow_stopped, we need to take tcp_non_sq_lock.
558  */
559 boolean_t
560 tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size)
561 {
562 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
563 	uint_t max_unread;
564 	boolean_t flow_stopped, peer_data_queued = B_FALSE;
565 	boolean_t urgent = (DB_TYPE(mp) != M_DATA);
566 	boolean_t push = B_TRUE;
567 	mblk_t *mp1 = mp;
568 	ill_t *ilp, *olp;
569 	ipif_t *iifp, *oifp;
570 	ipha_t *ipha;
571 	ip6_t *ip6h;
572 	tcph_t *tcph;
573 	uint_t ip_hdr_len;
574 	uint32_t seq;
575 	uint32_t recv_size = send_size;
576 	tcp_stack_t	*tcps = tcp->tcp_tcps;
577 	netstack_t	*ns = tcps->tcps_netstack;
578 	ip_stack_t	*ipst = ns->netstack_ip;
579 
580 	ASSERT(tcp->tcp_fused);
581 	ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp);
582 	ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp);
583 	ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_PROTO ||
584 	    DB_TYPE(mp) == M_PCPROTO);
585 
586 	/* If this connection requires IP, unfuse and use regular path */
587 	if (tcp_loopback_needs_ip(tcp, ns) ||
588 	    tcp_loopback_needs_ip(peer_tcp, ns) ||
589 	    IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN, ipst) ||
590 	    list_head(&ipst->ips_ipobs_cb_list) != NULL) {
591 		TCP_STAT(tcps, tcp_fusion_aborted);
592 		tcp->tcp_refuse = B_TRUE;
593 		peer_tcp->tcp_refuse = B_TRUE;
594 
595 		bcopy(peer_tcp->tcp_tcph, &tcp->tcp_saved_tcph,
596 		    sizeof (tcph_t));
597 		bcopy(tcp->tcp_tcph, &peer_tcp->tcp_saved_tcph,
598 		    sizeof (tcph_t));
599 		if (tcp->tcp_ipversion == IPV4_VERSION) {
600 			bcopy(peer_tcp->tcp_ipha, &tcp->tcp_saved_ipha,
601 			    sizeof (ipha_t));
602 			bcopy(tcp->tcp_ipha, &peer_tcp->tcp_saved_ipha,
603 			    sizeof (ipha_t));
604 		} else {
605 			bcopy(peer_tcp->tcp_ip6h, &tcp->tcp_saved_ip6h,
606 			    sizeof (ip6_t));
607 			bcopy(tcp->tcp_ip6h, &peer_tcp->tcp_saved_ip6h,
608 			    sizeof (ip6_t));
609 		}
610 		goto unfuse;
611 	}
612 
613 	if (send_size == 0) {
614 		freemsg(mp);
615 		return (B_TRUE);
616 	}
617 	max_unread = peer_tcp->tcp_fuse_rcv_unread_hiwater;
618 
619 	/*
620 	 * Handle urgent data; we either send up SIGURG to the peer now
621 	 * or do it later when we drain, in case the peer is detached
622 	 * or if we're short of memory for M_PCSIG mblk.
623 	 */
624 	if (urgent) {
625 		/*
626 		 * We stop synchronous streams when we have urgent data
627 		 * queued to prevent tcp_fuse_rrw() from pulling it.  If
628 		 * for some reasons the urgent data can't be delivered
629 		 * below, synchronous streams will remain stopped until
630 		 * someone drains the tcp_rcv_list.
631 		 */
632 		TCP_FUSE_SYNCSTR_PLUG_DRAIN(peer_tcp);
633 		tcp_fuse_output_urg(tcp, mp);
634 
635 		mp1 = mp->b_cont;
636 	}
637 
638 	if (tcp->tcp_ipversion == IPV4_VERSION &&
639 	    (HOOKS4_INTERESTED_LOOPBACK_IN(ipst) ||
640 	    HOOKS4_INTERESTED_LOOPBACK_OUT(ipst)) ||
641 	    tcp->tcp_ipversion == IPV6_VERSION &&
642 	    (HOOKS6_INTERESTED_LOOPBACK_IN(ipst) ||
643 	    HOOKS6_INTERESTED_LOOPBACK_OUT(ipst))) {
644 		/*
645 		 * Build ip and tcp header to satisfy FW_HOOKS.
646 		 * We only build it when any hook is present.
647 		 */
648 		if ((mp1 = tcp_xmit_mp(tcp, mp1, tcp->tcp_mss, NULL, NULL,
649 		    tcp->tcp_snxt, B_TRUE, NULL, B_FALSE)) == NULL)
650 			/* If tcp_xmit_mp fails, use regular path */
651 			goto unfuse;
652 
653 		/*
654 		 * The ipif and ill can be safely referenced under the
655 		 * protection of conn_lock - see head of function comment for
656 		 * conn_get_held_ipif(). It is necessary to check that both
657 		 * the ipif and ill can be looked up (i.e. not condemned). If
658 		 * not, bail out and unfuse this connection.
659 		 */
660 		mutex_enter(&peer_tcp->tcp_connp->conn_lock);
661 		if ((peer_tcp->tcp_connp->conn_ire_cache == NULL) ||
662 		    (peer_tcp->tcp_connp->conn_ire_cache->ire_marks &
663 		    IRE_MARK_CONDEMNED) ||
664 		    ((oifp = peer_tcp->tcp_connp->conn_ire_cache->ire_ipif)
665 		    == NULL) ||
666 		    (!IPIF_CAN_LOOKUP(oifp)) ||
667 		    ((olp = oifp->ipif_ill) == NULL) ||
668 		    (ill_check_and_refhold(olp) != 0)) {
669 			mutex_exit(&peer_tcp->tcp_connp->conn_lock);
670 			goto unfuse;
671 		}
672 		mutex_exit(&peer_tcp->tcp_connp->conn_lock);
673 
674 		/* PFHooks: LOOPBACK_OUT */
675 		if (tcp->tcp_ipversion == IPV4_VERSION) {
676 			ipha = (ipha_t *)mp1->b_rptr;
677 
678 			DTRACE_PROBE4(ip4__loopback__out__start,
679 			    ill_t *, NULL, ill_t *, olp,
680 			    ipha_t *, ipha, mblk_t *, mp1);
681 			FW_HOOKS(ipst->ips_ip4_loopback_out_event,
682 			    ipst->ips_ipv4firewall_loopback_out,
683 			    NULL, olp, ipha, mp1, mp1, 0, ipst);
684 			DTRACE_PROBE1(ip4__loopback__out__end, mblk_t *, mp1);
685 		} else {
686 			ip6h = (ip6_t *)mp1->b_rptr;
687 
688 			DTRACE_PROBE4(ip6__loopback__out__start,
689 			    ill_t *, NULL, ill_t *, olp,
690 			    ip6_t *, ip6h, mblk_t *, mp1);
691 			FW_HOOKS6(ipst->ips_ip6_loopback_out_event,
692 			    ipst->ips_ipv6firewall_loopback_out,
693 			    NULL, olp, ip6h, mp1, mp1, 0, ipst);
694 			DTRACE_PROBE1(ip6__loopback__out__end, mblk_t *, mp1);
695 		}
696 		ill_refrele(olp);
697 
698 		if (mp1 == NULL)
699 			goto unfuse;
700 
701 		/*
702 		 * The ipif and ill can be safely referenced under the
703 		 * protection of conn_lock - see head of function comment for
704 		 * conn_get_held_ipif(). It is necessary to check that both
705 		 * the ipif and ill can be looked up (i.e. not condemned). If
706 		 * not, bail out and unfuse this connection.
707 		 */
708 		mutex_enter(&tcp->tcp_connp->conn_lock);
709 		if ((tcp->tcp_connp->conn_ire_cache == NULL) ||
710 		    (tcp->tcp_connp->conn_ire_cache->ire_marks &
711 		    IRE_MARK_CONDEMNED) ||
712 		    ((iifp = tcp->tcp_connp->conn_ire_cache->ire_ipif)
713 		    == NULL) ||
714 		    (!IPIF_CAN_LOOKUP(iifp)) ||
715 		    ((ilp = iifp->ipif_ill) == NULL) ||
716 		    (ill_check_and_refhold(ilp) != 0)) {
717 			mutex_exit(&tcp->tcp_connp->conn_lock);
718 			goto unfuse;
719 		}
720 		mutex_exit(&tcp->tcp_connp->conn_lock);
721 
722 		/* PFHooks: LOOPBACK_IN */
723 		if (tcp->tcp_ipversion == IPV4_VERSION) {
724 			DTRACE_PROBE4(ip4__loopback__in__start,
725 			    ill_t *, ilp, ill_t *, NULL,
726 			    ipha_t *, ipha, mblk_t *, mp1);
727 			FW_HOOKS(ipst->ips_ip4_loopback_in_event,
728 			    ipst->ips_ipv4firewall_loopback_in,
729 			    ilp, NULL, ipha, mp1, mp1, 0, ipst);
730 			DTRACE_PROBE1(ip4__loopback__in__end, mblk_t *, mp1);
731 			ill_refrele(ilp);
732 			if (mp1 == NULL)
733 				goto unfuse;
734 
735 			ip_hdr_len = IPH_HDR_LENGTH(ipha);
736 		} else {
737 			DTRACE_PROBE4(ip6__loopback__in__start,
738 			    ill_t *, ilp, ill_t *, NULL,
739 			    ip6_t *, ip6h, mblk_t *, mp1);
740 			FW_HOOKS6(ipst->ips_ip6_loopback_in_event,
741 			    ipst->ips_ipv6firewall_loopback_in,
742 			    ilp, NULL, ip6h, mp1, mp1, 0, ipst);
743 			DTRACE_PROBE1(ip6__loopback__in__end, mblk_t *, mp1);
744 			ill_refrele(ilp);
745 			if (mp1 == NULL)
746 				goto unfuse;
747 
748 			ip_hdr_len = ip_hdr_length_v6(mp1, ip6h);
749 		}
750 
751 		/* Data length might be changed by FW_HOOKS */
752 		tcph = (tcph_t *)&mp1->b_rptr[ip_hdr_len];
753 		seq = ABE32_TO_U32(tcph->th_seq);
754 		recv_size += seq - tcp->tcp_snxt;
755 
756 		/*
757 		 * The message duplicated by tcp_xmit_mp is freed.
758 		 * Note: the original message passed in remains unchanged.
759 		 */
760 		freemsg(mp1);
761 	}
762 
763 	mutex_enter(&peer_tcp->tcp_non_sq_lock);
764 	/*
765 	 * Wake up and signal the peer; it is okay to do this before
766 	 * enqueueing because we are holding the lock.  One of the
767 	 * advantages of synchronous streams is the ability for us to
768 	 * find out when the application performs a read on the socket,
769 	 * by way of tcp_fuse_rrw() entry point being called.  Every
770 	 * data that gets enqueued onto the receiver is treated as if
771 	 * it has arrived at the receiving endpoint, thus generating
772 	 * SIGPOLL/SIGIO for asynchronous socket just as in the strrput()
773 	 * case.  However, we only wake up the application when necessary,
774 	 * i.e. during the first enqueue.  When tcp_fuse_rrw() is called
775 	 * it will send everything upstream.
776 	 */
777 	if (peer_tcp->tcp_direct_sockfs && !urgent &&
778 	    !TCP_IS_DETACHED(peer_tcp)) {
779 		/* Update poll events and send SIGPOLL/SIGIO if necessary */
780 		STR_WAKEUP_SENDSIG(STREAM(peer_tcp->tcp_rq),
781 		    peer_tcp->tcp_rcv_list);
782 	}
783 
784 	/*
785 	 * Enqueue data into the peer's receive list; we may or may not
786 	 * drain the contents depending on the conditions below.
787 	 *
788 	 * tcp_hard_binding indicates that accept has not yet completed,
789 	 * in which case we use tcp_rcv_enqueue() instead of calling
790 	 * su_recv directly. Queued data will be drained when the accept
791 	 * completes (in tcp_accept_finish()).
792 	 */
793 	if (IPCL_IS_NONSTR(peer_tcp->tcp_connp) &&
794 	    !peer_tcp->tcp_hard_binding) {
795 		int error;
796 		int flags = 0;
797 
798 		if ((tcp->tcp_valid_bits & TCP_URG_VALID) &&
799 		    (tcp->tcp_urg == tcp->tcp_snxt)) {
800 			flags = MSG_OOB;
801 			(*peer_tcp->tcp_connp->conn_upcalls->su_signal_oob)
802 			    (peer_tcp->tcp_connp->conn_upper_handle, 0);
803 			tcp->tcp_valid_bits &= ~TCP_URG_VALID;
804 		}
805 		(*peer_tcp->tcp_connp->conn_upcalls->su_recv)(
806 		    peer_tcp->tcp_connp->conn_upper_handle, mp, recv_size,
807 		    flags, &error, &push);
808 		ASSERT(error != EOPNOTSUPP);
809 	} else {
810 		if (IPCL_IS_NONSTR(peer_tcp->tcp_connp) &&
811 		    (tcp->tcp_valid_bits & TCP_URG_VALID) &&
812 		    (tcp->tcp_urg == tcp->tcp_snxt)) {
813 			/*
814 			 * Can not deal with urgent pointers
815 			 * that arrive before the connection has been
816 			 * accept()ed.
817 			 */
818 			tcp->tcp_valid_bits &= ~TCP_URG_VALID;
819 			freemsg(mp);
820 			mutex_exit(&peer_tcp->tcp_non_sq_lock);
821 			return (B_TRUE);
822 		}
823 
824 		tcp_rcv_enqueue(peer_tcp, mp, recv_size);
825 	}
826 
827 	/* In case it wrapped around and also to keep it constant */
828 	peer_tcp->tcp_rwnd += recv_size;
829 	/*
830 	 * We increase the peer's unread message count here whilst still
831 	 * holding it's tcp_non_sq_lock. This ensures that the increment
832 	 * occurs in the same lock acquisition perimeter as the enqueue.
833 	 * Depending on lock hierarchy, we can release these locks which
834 	 * creates a window in which we can race with tcp_fuse_rrw()
835 	 */
836 	peer_tcp->tcp_fuse_rcv_unread_cnt++;
837 
838 	/*
839 	 * Exercise flow-control when needed; we will get back-enabled
840 	 * in either tcp_accept_finish(), tcp_unfuse(), or tcp_fuse_rrw().
841 	 * If tcp_direct_sockfs is on or if the peer endpoint is detached,
842 	 * we emulate streams flow control by checking the peer's queue
843 	 * size and high water mark; otherwise we simply use canputnext()
844 	 * to decide if we need to stop our flow.
845 	 *
846 	 * The outstanding unread data block check does not apply for a
847 	 * detached receiver; this is to avoid unnecessary blocking of the
848 	 * sender while the accept is currently in progress and is quite
849 	 * similar to the regular tcp.
850 	 */
851 	if (TCP_IS_DETACHED(peer_tcp) || max_unread == 0)
852 		max_unread = UINT_MAX;
853 
854 	/*
855 	 * Since we are accessing our tcp_flow_stopped and might modify it,
856 	 * we need to take tcp->tcp_non_sq_lock. The lock for the highest
857 	 * address is held first. Dropping peer_tcp->tcp_non_sq_lock should
858 	 * not be an issue here since we are within the squeue and the peer
859 	 * won't disappear.
860 	 */
861 	if (tcp > peer_tcp) {
862 		mutex_exit(&peer_tcp->tcp_non_sq_lock);
863 		mutex_enter(&tcp->tcp_non_sq_lock);
864 		mutex_enter(&peer_tcp->tcp_non_sq_lock);
865 	} else {
866 		mutex_enter(&tcp->tcp_non_sq_lock);
867 	}
868 	flow_stopped = tcp->tcp_flow_stopped;
869 	if (((peer_tcp->tcp_direct_sockfs || TCP_IS_DETACHED(peer_tcp)) &&
870 	    (peer_tcp->tcp_rcv_cnt >= peer_tcp->tcp_fuse_rcv_hiwater ||
871 	    peer_tcp->tcp_fuse_rcv_unread_cnt >= max_unread)) ||
872 	    (!peer_tcp->tcp_direct_sockfs && !TCP_IS_DETACHED(peer_tcp) &&
873 	    !IPCL_IS_NONSTR(peer_tcp->tcp_connp) &&
874 	    !canputnext(peer_tcp->tcp_rq))) {
875 		peer_data_queued = B_TRUE;
876 	}
877 
878 	if (!flow_stopped && (peer_data_queued ||
879 	    (TCP_UNSENT_BYTES(tcp) >= tcp->tcp_xmit_hiwater))) {
880 		tcp_setqfull(tcp);
881 		flow_stopped = B_TRUE;
882 		TCP_STAT(tcps, tcp_fusion_flowctl);
883 		DTRACE_PROBE4(tcp__fuse__output__flowctl, tcp_t *, tcp,
884 		    uint_t, send_size, uint_t, peer_tcp->tcp_rcv_cnt,
885 		    uint_t, peer_tcp->tcp_fuse_rcv_unread_cnt);
886 	} else if (flow_stopped && !peer_data_queued &&
887 	    (TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater)) {
888 		tcp_clrqfull(tcp);
889 		TCP_STAT(tcps, tcp_fusion_backenabled);
890 		flow_stopped = B_FALSE;
891 	}
892 	mutex_exit(&tcp->tcp_non_sq_lock);
893 
894 	/*
895 	 * If we are in synchronous streams mode and the peer read queue is
896 	 * not full then schedule a push timer if one is not scheduled
897 	 * already. This is needed for applications which use MSG_PEEK to
898 	 * determine the number of bytes available before issuing a 'real'
899 	 * read. It also makes flow control more deterministic, particularly
900 	 * for smaller message sizes.
901 	 */
902 	if (!urgent && peer_tcp->tcp_direct_sockfs &&
903 	    peer_tcp->tcp_push_tid == 0 && !TCP_IS_DETACHED(peer_tcp) &&
904 	    canputnext(peer_tcp->tcp_rq)) {
905 		peer_tcp->tcp_push_tid = TCP_TIMER(peer_tcp, tcp_push_timer,
906 		    MSEC_TO_TICK(tcps->tcps_push_timer_interval));
907 	}
908 	mutex_exit(&peer_tcp->tcp_non_sq_lock);
909 	ipst->ips_loopback_packets++;
910 	tcp->tcp_last_sent_len = send_size;
911 
912 	/* Need to adjust the following SNMP MIB-related variables */
913 	tcp->tcp_snxt += send_size;
914 	tcp->tcp_suna = tcp->tcp_snxt;
915 	peer_tcp->tcp_rnxt += recv_size;
916 	peer_tcp->tcp_rack = peer_tcp->tcp_rnxt;
917 
918 	BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs);
919 	UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, send_size);
920 
921 	BUMP_MIB(&tcps->tcps_mib, tcpInSegs);
922 	BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs);
923 	UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, send_size);
924 
925 	BUMP_LOCAL(tcp->tcp_obsegs);
926 	BUMP_LOCAL(peer_tcp->tcp_ibsegs);
927 
928 	DTRACE_PROBE2(tcp__fuse__output, tcp_t *, tcp, uint_t, send_size);
929 
930 	if (!TCP_IS_DETACHED(peer_tcp)) {
931 		/*
932 		 * Drain the peer's receive queue it has urgent data or if
933 		 * we're not flow-controlled.  There is no need for draining
934 		 * normal data when tcp_direct_sockfs is on because the peer
935 		 * will pull the data via tcp_fuse_rrw().
936 		 */
937 		if (urgent || (!flow_stopped && !peer_tcp->tcp_direct_sockfs)) {
938 			ASSERT(IPCL_IS_NONSTR(peer_tcp->tcp_connp) ||
939 			    peer_tcp->tcp_rcv_list != NULL);
940 			/*
941 			 * For TLI-based streams, a thread in tcp_accept_swap()
942 			 * can race with us.  That thread will ensure that the
943 			 * correct peer_tcp->tcp_rq is globally visible before
944 			 * peer_tcp->tcp_detached is visible as clear, but we
945 			 * must also ensure that the load of tcp_rq cannot be
946 			 * reordered to be before the tcp_detached check.
947 			 */
948 			membar_consumer();
949 			(void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp,
950 			    NULL);
951 			/*
952 			 * If synchronous streams was stopped above due
953 			 * to the presence of urgent data, re-enable it.
954 			 */
955 			if (urgent)
956 				TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(peer_tcp);
957 		}
958 	}
959 	return (B_TRUE);
960 unfuse:
961 	tcp_unfuse(tcp);
962 	return (B_FALSE);
963 }
964 
965 /*
966  * This routine gets called to deliver data upstream on a fused or
967  * previously fused tcp loopback endpoint; the latter happens only
968  * when there is a pending SIGURG signal plus urgent data that can't
969  * be sent upstream in the past.
970  */
971 boolean_t
972 tcp_fuse_rcv_drain(queue_t *q, tcp_t *tcp, mblk_t **sigurg_mpp)
973 {
974 	mblk_t *mp;
975 	conn_t	*connp = tcp->tcp_connp;
976 
977 #ifdef DEBUG
978 	uint_t cnt = 0;
979 #endif
980 	tcp_stack_t	*tcps = tcp->tcp_tcps;
981 	tcp_t		*peer_tcp = tcp->tcp_loopback_peer;
982 	boolean_t	sd_rd_eof = B_FALSE;
983 
984 	ASSERT(tcp->tcp_loopback);
985 	ASSERT(tcp->tcp_fused || tcp->tcp_fused_sigurg);
986 	ASSERT(!tcp->tcp_fused || tcp->tcp_loopback_peer != NULL);
987 	ASSERT(IPCL_IS_NONSTR(connp) || sigurg_mpp != NULL || tcp->tcp_fused);
988 
989 	/* No need for the push timer now, in case it was scheduled */
990 	if (tcp->tcp_push_tid != 0) {
991 		(void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid);
992 		tcp->tcp_push_tid = 0;
993 	}
994 	/*
995 	 * If there's urgent data sitting in receive list and we didn't
996 	 * get a chance to send up a SIGURG signal, make sure we send
997 	 * it first before draining in order to ensure that SIOCATMARK
998 	 * works properly.
999 	 */
1000 	if (tcp->tcp_fused_sigurg) {
1001 		tcp->tcp_fused_sigurg = B_FALSE;
1002 		if (IPCL_IS_NONSTR(connp)) {
1003 			(*connp->conn_upcalls->su_signal_oob)
1004 			    (connp->conn_upper_handle, 0);
1005 		} else {
1006 			/*
1007 			 * sigurg_mpp is normally NULL, i.e. when we're still
1008 			 * fused and didn't get here because of tcp_unfuse().
1009 			 * In this case try hard to allocate the M_PCSIG mblk.
1010 			 */
1011 			if (sigurg_mpp == NULL &&
1012 			    (mp = allocb(1, BPRI_HI)) == NULL &&
1013 			    (mp = allocb_tryhard(1)) == NULL) {
1014 				/* Alloc failed; try again next time */
1015 				tcp->tcp_push_tid = TCP_TIMER(tcp,
1016 				    tcp_push_timer,
1017 				    MSEC_TO_TICK(
1018 				    tcps->tcps_push_timer_interval));
1019 				return (B_TRUE);
1020 			} else if (sigurg_mpp != NULL) {
1021 				/*
1022 				 * Use the supplied M_PCSIG mblk; it means we're
1023 				 * either unfused or in the process of unfusing,
1024 				 * and the drain must happen now.
1025 				 */
1026 				mp = *sigurg_mpp;
1027 				*sigurg_mpp = NULL;
1028 			}
1029 			ASSERT(mp != NULL);
1030 
1031 			/* Send up the signal */
1032 			DB_TYPE(mp) = M_PCSIG;
1033 			*mp->b_wptr++ = (uchar_t)SIGURG;
1034 			putnext(q, mp);
1035 		}
1036 		/*
1037 		 * Let the regular tcp_rcv_drain() path handle
1038 		 * draining the data if we're no longer fused.
1039 		 */
1040 		if (!tcp->tcp_fused)
1041 			return (B_FALSE);
1042 	}
1043 
1044 	/*
1045 	 * In the synchronous streams case, we generate SIGPOLL/SIGIO for
1046 	 * each M_DATA that gets enqueued onto the receiver.  At this point
1047 	 * we are about to drain any queued data via putnext().  In order
1048 	 * to avoid extraneous signal generation from strrput(), we set
1049 	 * STRGETINPROG flag at the stream head prior to the draining and
1050 	 * restore it afterwards.  This masks out signal generation only
1051 	 * for M_DATA messages and does not affect urgent data. We only do
1052 	 * this if the STREOF flag is not set which can happen if the
1053 	 * application shuts down the read side of a stream. In this case
1054 	 * we simply free these messages to approximate the flushq behavior
1055 	 * which normally occurs when STREOF is on the stream head read queue.
1056 	 */
1057 	if (tcp->tcp_direct_sockfs)
1058 		sd_rd_eof = strrput_sig(q, B_FALSE);
1059 
1060 	/* Drain the data */
1061 	while ((mp = tcp->tcp_rcv_list) != NULL) {
1062 		tcp->tcp_rcv_list = mp->b_next;
1063 		mp->b_next = NULL;
1064 #ifdef DEBUG
1065 		cnt += msgdsize(mp);
1066 #endif
1067 		ASSERT(!IPCL_IS_NONSTR(connp));
1068 		if (sd_rd_eof) {
1069 			freemsg(mp);
1070 		} else {
1071 			putnext(q, mp);
1072 			TCP_STAT(tcps, tcp_fusion_putnext);
1073 		}
1074 	}
1075 
1076 	if (tcp->tcp_direct_sockfs && !sd_rd_eof)
1077 		(void) strrput_sig(q, B_TRUE);
1078 
1079 #ifdef DEBUG
1080 	ASSERT(cnt == tcp->tcp_rcv_cnt);
1081 #endif
1082 	tcp->tcp_rcv_last_head = NULL;
1083 	tcp->tcp_rcv_last_tail = NULL;
1084 	tcp->tcp_rcv_cnt = 0;
1085 	tcp->tcp_fuse_rcv_unread_cnt = 0;
1086 	tcp->tcp_rwnd = tcp->tcp_recv_hiwater;
1087 
1088 	if (peer_tcp->tcp_flow_stopped && (TCP_UNSENT_BYTES(peer_tcp) <=
1089 	    peer_tcp->tcp_xmit_lowater)) {
1090 		tcp_clrqfull(peer_tcp);
1091 		TCP_STAT(tcps, tcp_fusion_backenabled);
1092 	}
1093 
1094 	return (B_TRUE);
1095 }
1096 
1097 /*
1098  * Synchronous stream entry point for sockfs to retrieve
1099  * data directly from tcp_rcv_list.
1100  * tcp_fuse_rrw() might end up modifying the peer's tcp_flow_stopped,
1101  * for which it  must take the tcp_non_sq_lock of the peer as well
1102  * making any change. The order of taking the locks is based on
1103  * the TCP pointer itself. Before we get the peer we need to take
1104  * our tcp_non_sq_lock so that the peer doesn't disappear. However,
1105  * we cannot drop the lock if we have to grab the peer's lock (because
1106  * of ordering), since the peer might disappear in the interim. So,
1107  * we take our tcp_non_sq_lock, get the peer, increment the ref on the
1108  * peer's conn, drop all the locks and then take the tcp_non_sq_lock in the
1109  * desired order. Incrementing the conn ref on the peer means that the
1110  * peer won't disappear when we drop our tcp_non_sq_lock.
1111  */
1112 int
1113 tcp_fuse_rrw(queue_t *q, struiod_t *dp)
1114 {
1115 	tcp_t *tcp = Q_TO_CONN(q)->conn_tcp;
1116 	mblk_t *mp;
1117 	tcp_t *peer_tcp;
1118 	tcp_stack_t	*tcps = tcp->tcp_tcps;
1119 
1120 	mutex_enter(&tcp->tcp_non_sq_lock);
1121 
1122 	/*
1123 	 * If tcp_fuse_syncstr_plugged is set, then another thread is moving
1124 	 * the underlying data to the stream head.  We need to wait until it's
1125 	 * done, then return EBUSY so that strget() will dequeue data from the
1126 	 * stream head to ensure data is drained in-order.
1127 	 */
1128 plugged:
1129 	if (tcp->tcp_fuse_syncstr_plugged) {
1130 		do {
1131 			cv_wait(&tcp->tcp_fuse_plugcv, &tcp->tcp_non_sq_lock);
1132 		} while (tcp->tcp_fuse_syncstr_plugged);
1133 
1134 		mutex_exit(&tcp->tcp_non_sq_lock);
1135 		TCP_STAT(tcps, tcp_fusion_rrw_plugged);
1136 		TCP_STAT(tcps, tcp_fusion_rrw_busy);
1137 		return (EBUSY);
1138 	}
1139 
1140 	peer_tcp = tcp->tcp_loopback_peer;
1141 
1142 	/*
1143 	 * If someone had turned off tcp_direct_sockfs or if synchronous
1144 	 * streams is stopped, we return EBUSY.  This causes strget() to
1145 	 * dequeue data from the stream head instead.
1146 	 */
1147 	if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped) {
1148 		mutex_exit(&tcp->tcp_non_sq_lock);
1149 		TCP_STAT(tcps, tcp_fusion_rrw_busy);
1150 		return (EBUSY);
1151 	}
1152 
1153 	/*
1154 	 * Grab lock in order. The highest addressed tcp is locked first.
1155 	 * We don't do this within the tcp_rcv_list check since if we
1156 	 * have to drop the lock, for ordering, then the tcp_rcv_list
1157 	 * could change.
1158 	 */
1159 	if (peer_tcp > tcp) {
1160 		CONN_INC_REF(peer_tcp->tcp_connp);
1161 		mutex_exit(&tcp->tcp_non_sq_lock);
1162 		mutex_enter(&peer_tcp->tcp_non_sq_lock);
1163 		mutex_enter(&tcp->tcp_non_sq_lock);
1164 		/*
1165 		 * This might have changed in the interim
1166 		 * Once read-side tcp_non_sq_lock is dropped above
1167 		 * anything can happen, we need to check all
1168 		 * known conditions again once we reaquire
1169 		 * read-side tcp_non_sq_lock.
1170 		 */
1171 		if (tcp->tcp_fuse_syncstr_plugged) {
1172 			mutex_exit(&peer_tcp->tcp_non_sq_lock);
1173 			CONN_DEC_REF(peer_tcp->tcp_connp);
1174 			goto plugged;
1175 		}
1176 		if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped) {
1177 			mutex_exit(&tcp->tcp_non_sq_lock);
1178 			mutex_exit(&peer_tcp->tcp_non_sq_lock);
1179 			CONN_DEC_REF(peer_tcp->tcp_connp);
1180 			TCP_STAT(tcps, tcp_fusion_rrw_busy);
1181 			return (EBUSY);
1182 		}
1183 		CONN_DEC_REF(peer_tcp->tcp_connp);
1184 	} else {
1185 		mutex_enter(&peer_tcp->tcp_non_sq_lock);
1186 	}
1187 
1188 	if ((mp = tcp->tcp_rcv_list) != NULL) {
1189 
1190 		DTRACE_PROBE3(tcp__fuse__rrw, tcp_t *, tcp,
1191 		    uint32_t, tcp->tcp_rcv_cnt, ssize_t, dp->d_uio.uio_resid);
1192 
1193 		tcp->tcp_rcv_list = NULL;
1194 		TCP_STAT(tcps, tcp_fusion_rrw_msgcnt);
1195 
1196 		/*
1197 		 * At this point nothing should be left in tcp_rcv_list.
1198 		 * The only possible case where we would have a chain of
1199 		 * b_next-linked messages is urgent data, but we wouldn't
1200 		 * be here if that's true since urgent data is delivered
1201 		 * via putnext() and synchronous streams is stopped until
1202 		 * tcp_fuse_rcv_drain() is finished.
1203 		 */
1204 		ASSERT(DB_TYPE(mp) == M_DATA && mp->b_next == NULL);
1205 
1206 		tcp->tcp_rcv_last_head = NULL;
1207 		tcp->tcp_rcv_last_tail = NULL;
1208 		tcp->tcp_rcv_cnt = 0;
1209 		tcp->tcp_fuse_rcv_unread_cnt = 0;
1210 
1211 		if (peer_tcp->tcp_flow_stopped &&
1212 		    (TCP_UNSENT_BYTES(peer_tcp) <=
1213 		    peer_tcp->tcp_xmit_lowater)) {
1214 			tcp_clrqfull(peer_tcp);
1215 			TCP_STAT(tcps, tcp_fusion_backenabled);
1216 		}
1217 	}
1218 	mutex_exit(&peer_tcp->tcp_non_sq_lock);
1219 	/*
1220 	 * Either we just dequeued everything or we get here from sockfs
1221 	 * and have nothing to return; in this case clear RSLEEP.
1222 	 */
1223 	ASSERT(tcp->tcp_rcv_last_head == NULL);
1224 	ASSERT(tcp->tcp_rcv_last_tail == NULL);
1225 	ASSERT(tcp->tcp_rcv_cnt == 0);
1226 	ASSERT(tcp->tcp_fuse_rcv_unread_cnt == 0);
1227 	STR_WAKEUP_CLEAR(STREAM(q));
1228 
1229 	mutex_exit(&tcp->tcp_non_sq_lock);
1230 	dp->d_mp = mp;
1231 	return (0);
1232 }
1233 
1234 /*
1235  * Synchronous stream entry point used by certain ioctls to retrieve
1236  * information about or peek into the tcp_rcv_list.
1237  */
1238 int
1239 tcp_fuse_rinfop(queue_t *q, infod_t *dp)
1240 {
1241 	tcp_t	*tcp = Q_TO_CONN(q)->conn_tcp;
1242 	mblk_t	*mp;
1243 	uint_t	cmd = dp->d_cmd;
1244 	int	res = 0;
1245 	int	error = 0;
1246 	struct stdata *stp = STREAM(q);
1247 
1248 	mutex_enter(&tcp->tcp_non_sq_lock);
1249 	/* If shutdown on read has happened, return nothing */
1250 	mutex_enter(&stp->sd_lock);
1251 	if (stp->sd_flag & STREOF) {
1252 		mutex_exit(&stp->sd_lock);
1253 		goto done;
1254 	}
1255 	mutex_exit(&stp->sd_lock);
1256 
1257 	/*
1258 	 * It is OK not to return an answer if tcp_rcv_list is
1259 	 * currently not accessible.
1260 	 */
1261 	if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped ||
1262 	    tcp->tcp_fuse_syncstr_plugged || (mp = tcp->tcp_rcv_list) == NULL)
1263 		goto done;
1264 
1265 	if (cmd & INFOD_COUNT) {
1266 		/*
1267 		 * We have at least one message and
1268 		 * could return only one at a time.
1269 		 */
1270 		dp->d_count++;
1271 		res |= INFOD_COUNT;
1272 	}
1273 	if (cmd & INFOD_BYTES) {
1274 		/*
1275 		 * Return size of all data messages.
1276 		 */
1277 		dp->d_bytes += tcp->tcp_rcv_cnt;
1278 		res |= INFOD_BYTES;
1279 	}
1280 	if (cmd & INFOD_FIRSTBYTES) {
1281 		/*
1282 		 * Return size of first data message.
1283 		 */
1284 		dp->d_bytes = msgdsize(mp);
1285 		res |= INFOD_FIRSTBYTES;
1286 		dp->d_cmd &= ~INFOD_FIRSTBYTES;
1287 	}
1288 	if (cmd & INFOD_COPYOUT) {
1289 		mblk_t *mp1;
1290 		int n;
1291 
1292 		if (DB_TYPE(mp) == M_DATA) {
1293 			mp1 = mp;
1294 		} else {
1295 			mp1 = mp->b_cont;
1296 			ASSERT(mp1 != NULL);
1297 		}
1298 
1299 		/*
1300 		 * Return data contents of first message.
1301 		 */
1302 		ASSERT(DB_TYPE(mp1) == M_DATA);
1303 		while (mp1 != NULL && dp->d_uiop->uio_resid > 0) {
1304 			n = MIN(dp->d_uiop->uio_resid, MBLKL(mp1));
1305 			if (n != 0 && (error = uiomove((char *)mp1->b_rptr, n,
1306 			    UIO_READ, dp->d_uiop)) != 0) {
1307 				goto done;
1308 			}
1309 			mp1 = mp1->b_cont;
1310 		}
1311 		res |= INFOD_COPYOUT;
1312 		dp->d_cmd &= ~INFOD_COPYOUT;
1313 	}
1314 done:
1315 	mutex_exit(&tcp->tcp_non_sq_lock);
1316 
1317 	dp->d_res |= res;
1318 
1319 	return (error);
1320 }
1321 
1322 /*
1323  * Enable synchronous streams on a fused tcp loopback endpoint.
1324  */
1325 static void
1326 tcp_fuse_syncstr_enable(tcp_t *tcp)
1327 {
1328 	queue_t *rq = tcp->tcp_rq;
1329 	struct stdata *stp = STREAM(rq);
1330 
1331 	/* We can only enable synchronous streams for sockfs mode */
1332 	tcp->tcp_direct_sockfs = tcp->tcp_issocket && do_tcp_direct_sockfs;
1333 
1334 	if (!tcp->tcp_direct_sockfs)
1335 		return;
1336 
1337 	mutex_enter(&stp->sd_lock);
1338 	mutex_enter(QLOCK(rq));
1339 
1340 	/*
1341 	 * We replace our q_qinfo with one that has the qi_rwp entry point.
1342 	 * Clear SR_SIGALLDATA because we generate the equivalent signal(s)
1343 	 * for every enqueued data in tcp_fuse_output().
1344 	 */
1345 	rq->q_qinfo = &tcp_loopback_rinit;
1346 	rq->q_struiot = tcp_loopback_rinit.qi_struiot;
1347 	stp->sd_struiordq = rq;
1348 	stp->sd_rput_opt &= ~SR_SIGALLDATA;
1349 
1350 	mutex_exit(QLOCK(rq));
1351 	mutex_exit(&stp->sd_lock);
1352 }
1353 
1354 /*
1355  * Disable synchronous streams on a fused tcp loopback endpoint.
1356  */
1357 static void
1358 tcp_fuse_syncstr_disable(tcp_t *tcp)
1359 {
1360 	queue_t *rq = tcp->tcp_rq;
1361 	struct stdata *stp = STREAM(rq);
1362 
1363 	if (!tcp->tcp_direct_sockfs)
1364 		return;
1365 
1366 	mutex_enter(&stp->sd_lock);
1367 	mutex_enter(QLOCK(rq));
1368 
1369 	/*
1370 	 * Reset q_qinfo to point to the default tcp entry points.
1371 	 * Also restore SR_SIGALLDATA so that strrput() can generate
1372 	 * the signals again for future M_DATA messages.
1373 	 */
1374 	rq->q_qinfo = &tcp_rinitv4;	/* No open - same as rinitv6 */
1375 	rq->q_struiot = tcp_rinitv4.qi_struiot;
1376 	stp->sd_struiordq = NULL;
1377 	stp->sd_rput_opt |= SR_SIGALLDATA;
1378 	tcp->tcp_direct_sockfs = B_FALSE;
1379 
1380 	mutex_exit(QLOCK(rq));
1381 	mutex_exit(&stp->sd_lock);
1382 }
1383 
1384 /*
1385  * Enable synchronous streams on a pair of fused tcp endpoints.
1386  */
1387 void
1388 tcp_fuse_syncstr_enable_pair(tcp_t *tcp)
1389 {
1390 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
1391 
1392 	ASSERT(tcp->tcp_fused);
1393 	ASSERT(peer_tcp != NULL);
1394 
1395 	tcp_fuse_syncstr_enable(tcp);
1396 	tcp_fuse_syncstr_enable(peer_tcp);
1397 }
1398 
1399 /*
1400  * Used to enable/disable signal generation at the stream head. We already
1401  * generated the signal(s) for these messages when they were enqueued on the
1402  * receiver. We also check if STREOF is set here. If it is, we return false
1403  * and let the caller decide what to do.
1404  */
1405 static boolean_t
1406 strrput_sig(queue_t *q, boolean_t on)
1407 {
1408 	struct stdata *stp = STREAM(q);
1409 
1410 	mutex_enter(&stp->sd_lock);
1411 	if (stp->sd_flag == STREOF) {
1412 		mutex_exit(&stp->sd_lock);
1413 		return (B_TRUE);
1414 	}
1415 	if (on)
1416 		stp->sd_flag &= ~STRGETINPROG;
1417 	else
1418 		stp->sd_flag |= STRGETINPROG;
1419 	mutex_exit(&stp->sd_lock);
1420 
1421 	return (B_FALSE);
1422 }
1423 
1424 /*
1425  * Disable synchronous streams on a pair of fused tcp endpoints and drain
1426  * any queued data; called either during unfuse or upon transitioning from
1427  * a socket to a stream endpoint due to _SIOCSOCKFALLBACK.
1428  */
1429 void
1430 tcp_fuse_disable_pair(tcp_t *tcp, boolean_t unfusing)
1431 {
1432 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
1433 	tcp_stack_t	*tcps = tcp->tcp_tcps;
1434 
1435 	ASSERT(tcp->tcp_fused);
1436 	ASSERT(peer_tcp != NULL);
1437 
1438 	/*
1439 	 * Force any tcp_fuse_rrw() calls to block until we've moved the data
1440 	 * onto the stream head.
1441 	 */
1442 	TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp);
1443 	TCP_FUSE_SYNCSTR_PLUG_DRAIN(peer_tcp);
1444 
1445 	/*
1446 	 * Cancel any pending push timers.
1447 	 */
1448 	if (tcp->tcp_push_tid != 0) {
1449 		(void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid);
1450 		tcp->tcp_push_tid = 0;
1451 	}
1452 	if (peer_tcp->tcp_push_tid != 0) {
1453 		(void) TCP_TIMER_CANCEL(peer_tcp, peer_tcp->tcp_push_tid);
1454 		peer_tcp->tcp_push_tid = 0;
1455 	}
1456 
1457 	/*
1458 	 * Drain any pending data; the detached check is needed because
1459 	 * we may be called as a result of a tcp_unfuse() triggered by
1460 	 * tcp_fuse_output().  Note that in case of a detached tcp, the
1461 	 * draining will happen later after the tcp is unfused.  For non-
1462 	 * urgent data, this can be handled by the regular tcp_rcv_drain().
1463 	 * If we have urgent data sitting in the receive list, we will
1464 	 * need to send up a SIGURG signal first before draining the data.
1465 	 * All of these will be handled by the code in tcp_fuse_rcv_drain()
1466 	 * when called from tcp_rcv_drain().
1467 	 */
1468 	if (!TCP_IS_DETACHED(tcp)) {
1469 		(void) tcp_fuse_rcv_drain(tcp->tcp_rq, tcp,
1470 		    (unfusing ? &tcp->tcp_fused_sigurg_mp : NULL));
1471 	}
1472 	if (!TCP_IS_DETACHED(peer_tcp)) {
1473 		(void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp,
1474 		    (unfusing ? &peer_tcp->tcp_fused_sigurg_mp : NULL));
1475 	}
1476 
1477 	/*
1478 	 * Make all current and future tcp_fuse_rrw() calls fail with EBUSY.
1479 	 * To ensure threads don't sneak past the checks in tcp_fuse_rrw(),
1480 	 * a given stream must be stopped prior to being unplugged (but the
1481 	 * ordering of operations between the streams is unimportant).
1482 	 */
1483 	TCP_FUSE_SYNCSTR_STOP(tcp);
1484 	TCP_FUSE_SYNCSTR_STOP(peer_tcp);
1485 	TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp);
1486 	TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(peer_tcp);
1487 
1488 	/* Lift up any flow-control conditions */
1489 	if (tcp->tcp_flow_stopped) {
1490 		tcp_clrqfull(tcp);
1491 		TCP_STAT(tcps, tcp_fusion_backenabled);
1492 	}
1493 	if (peer_tcp->tcp_flow_stopped) {
1494 		tcp_clrqfull(peer_tcp);
1495 		TCP_STAT(tcps, tcp_fusion_backenabled);
1496 	}
1497 
1498 	/* Disable synchronous streams */
1499 	if (!IPCL_IS_NONSTR(tcp->tcp_connp))
1500 		tcp_fuse_syncstr_disable(tcp);
1501 	if (!IPCL_IS_NONSTR(peer_tcp->tcp_connp))
1502 		tcp_fuse_syncstr_disable(peer_tcp);
1503 }
1504 
1505 /*
1506  * Calculate the size of receive buffer for a fused tcp endpoint.
1507  */
1508 size_t
1509 tcp_fuse_set_rcv_hiwat(tcp_t *tcp, size_t rwnd)
1510 {
1511 	tcp_stack_t	*tcps = tcp->tcp_tcps;
1512 
1513 	ASSERT(tcp->tcp_fused);
1514 
1515 	/* Ensure that value is within the maximum upper bound */
1516 	if (rwnd > tcps->tcps_max_buf)
1517 		rwnd = tcps->tcps_max_buf;
1518 
1519 	/* Obey the absolute minimum tcp receive high water mark */
1520 	if (rwnd < tcps->tcps_sth_rcv_hiwat)
1521 		rwnd = tcps->tcps_sth_rcv_hiwat;
1522 
1523 	/*
1524 	 * Round up to system page size in case SO_RCVBUF is modified
1525 	 * after SO_SNDBUF; the latter is also similarly rounded up.
1526 	 */
1527 	rwnd = P2ROUNDUP_TYPED(rwnd, PAGESIZE, size_t);
1528 	tcp->tcp_fuse_rcv_hiwater = rwnd;
1529 	return (rwnd);
1530 }
1531 
1532 /*
1533  * Calculate the maximum outstanding unread data block for a fused tcp endpoint.
1534  */
1535 int
1536 tcp_fuse_maxpsz_set(tcp_t *tcp)
1537 {
1538 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
1539 	uint_t sndbuf = tcp->tcp_xmit_hiwater;
1540 	uint_t maxpsz = sndbuf;
1541 
1542 	ASSERT(tcp->tcp_fused);
1543 	ASSERT(peer_tcp != NULL);
1544 	ASSERT(peer_tcp->tcp_fuse_rcv_hiwater != 0);
1545 	/*
1546 	 * In the fused loopback case, we want the stream head to split
1547 	 * up larger writes into smaller chunks for a more accurate flow-
1548 	 * control accounting.  Our maxpsz is half of the sender's send
1549 	 * buffer or the receiver's receive buffer, whichever is smaller.
1550 	 * We round up the buffer to system page size due to the lack of
1551 	 * TCP MSS concept in Fusion.
1552 	 */
1553 	if (maxpsz > peer_tcp->tcp_fuse_rcv_hiwater)
1554 		maxpsz = peer_tcp->tcp_fuse_rcv_hiwater;
1555 	maxpsz = P2ROUNDUP_TYPED(maxpsz, PAGESIZE, uint_t) >> 1;
1556 
1557 	/*
1558 	 * Calculate the peer's limit for the number of outstanding unread
1559 	 * data block.  This is the amount of data blocks that are allowed
1560 	 * to reside in the receiver's queue before the sender gets flow
1561 	 * controlled.  It is used only in the synchronous streams mode as
1562 	 * a way to throttle the sender when it performs consecutive writes
1563 	 * faster than can be read.  The value is derived from SO_SNDBUF in
1564 	 * order to give the sender some control; we divide it with a large
1565 	 * value (16KB) to produce a fairly low initial limit.
1566 	 */
1567 	if (tcp_fusion_rcv_unread_min == 0) {
1568 		/* A value of 0 means that we disable the check */
1569 		peer_tcp->tcp_fuse_rcv_unread_hiwater = 0;
1570 	} else {
1571 		peer_tcp->tcp_fuse_rcv_unread_hiwater =
1572 		    MAX(sndbuf >> 14, tcp_fusion_rcv_unread_min);
1573 	}
1574 	return (maxpsz);
1575 }
1576