xref: /titanic_52/usr/src/uts/common/inet/tcp/tcp_fusion.c (revision 56a424cca6b3f91f31bdab72a4626c48c779fe8b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/stream.h>
30 #include <sys/strsun.h>
31 #include <sys/strsubr.h>
32 #include <sys/debug.h>
33 #include <sys/cmn_err.h>
34 #include <sys/tihdr.h>
35 
36 #include <inet/common.h>
37 #include <inet/ip.h>
38 #include <inet/ip_impl.h>
39 #include <inet/tcp.h>
40 #include <inet/tcp_impl.h>
41 #include <inet/ipsec_impl.h>
42 #include <inet/ipclassifier.h>
43 #include <inet/ipp_common.h>
44 
45 /*
46  * This file implements TCP fusion - a protocol-less data path for TCP
47  * loopback connections.  The fusion of two local TCP endpoints occurs
48  * at connection establishment time.  Various conditions (see details
49  * in tcp_fuse()) need to be met for fusion to be successful.  If it
50  * fails, we fall back to the regular TCP data path; if it succeeds,
51  * both endpoints proceed to use tcp_fuse_output() as the transmit path.
52  * tcp_fuse_output() enqueues application data directly onto the peer's
53  * receive queue; no protocol processing is involved.  After enqueueing
54  * the data, the sender can either push (putnext) data up the receiver's
55  * read queue; or the sender can simply return and let the receiver
56  * retrieve the enqueued data via the synchronous streams entry point
57  * tcp_fuse_rrw().  The latter path is taken if synchronous streams is
58  * enabled (the default).  It is disabled if sockfs no longer resides
59  * directly on top of tcp module due to a module insertion or removal.
60  * It also needs to be temporarily disabled when sending urgent data
61  * because the tcp_fuse_rrw() path bypasses the M_PROTO processing done
62  * by strsock_proto() hook.
63  *
64  * Sychronization is handled by squeue and the mutex tcp_fuse_lock.
65  * One of the requirements for fusion to succeed is that both endpoints
66  * need to be using the same squeue.  This ensures that neither side
67  * can disappear while the other side is still sending data.  By itself,
68  * squeue is not sufficient for guaranteeing safety when synchronous
69  * streams is enabled.  The reason is that tcp_fuse_rrw() doesn't enter
70  * the squeue and its access to tcp_rcv_list and other fusion-related
71  * fields needs to be sychronized with the sender.  tcp_fuse_lock is
72  * used for this purpose.  When there is urgent data, the sender needs
73  * to push the data up the receiver's streams read queue.  In order to
74  * avoid holding the tcp_fuse_lock across putnext(), the sender sets
75  * the peer tcp's tcp_fuse_syncstr_plugged bit and releases tcp_fuse_lock
76  * (see macro TCP_FUSE_SYNCSTR_PLUG_DRAIN()).  If tcp_fuse_rrw() enters
77  * after this point, it will see that synchronous streams is plugged and
78  * will wait on tcp_fuse_plugcv.  After the sender has finished pushing up
79  * all urgent data, it will clear the tcp_fuse_syncstr_plugged bit using
80  * TCP_FUSE_SYNCSTR_UNPLUG_DRAIN().  This will cause any threads waiting
81  * on tcp_fuse_plugcv to return EBUSY, and in turn cause strget() to call
82  * getq_noenab() to dequeue data from the stream head instead.  Once the
83  * data on the stream head has been consumed, tcp_fuse_rrw() may again
84  * be used to process tcp_rcv_list.  However, if TCP_FUSE_SYNCSTR_STOP()
85  * has been called, all future calls to tcp_fuse_rrw() will return EBUSY,
86  * effectively disabling synchronous streams.
87  *
88  * The following note applies only to the synchronous streams mode.
89  *
90  * Flow control is done by checking the size of receive buffer and
91  * the number of data blocks, both set to different limits.  This is
92  * different than regular streams flow control where cumulative size
93  * check dominates block count check -- streams queue high water mark
94  * typically represents bytes.  Each enqueue triggers notifications
95  * to the receiving process; a build up of data blocks indicates a
96  * slow receiver and the sender should be blocked or informed at the
97  * earliest moment instead of further wasting system resources.  In
98  * effect, this is equivalent to limiting the number of outstanding
99  * segments in flight.
100  */
101 
102 /*
103  * Macros that determine whether or not IP processing is needed for TCP.
104  */
105 #define	TCP_IPOPT_POLICY_V4(tcp)					\
106 	((tcp)->tcp_ipversion == IPV4_VERSION &&			\
107 	((tcp)->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH ||		\
108 	CONN_OUTBOUND_POLICY_PRESENT((tcp)->tcp_connp) ||		\
109 	CONN_INBOUND_POLICY_PRESENT((tcp)->tcp_connp)))
110 
111 #define	TCP_IPOPT_POLICY_V6(tcp)					\
112 	((tcp)->tcp_ipversion == IPV6_VERSION &&			\
113 	((tcp)->tcp_ip_hdr_len != IPV6_HDR_LEN ||			\
114 	CONN_OUTBOUND_POLICY_PRESENT_V6((tcp)->tcp_connp) ||		\
115 	CONN_INBOUND_POLICY_PRESENT_V6((tcp)->tcp_connp)))
116 
117 #define	TCP_LOOPBACK_IP(tcp)						\
118 	(TCP_IPOPT_POLICY_V4(tcp) || TCP_IPOPT_POLICY_V6(tcp) ||	\
119 	!CONN_IS_MD_FASTPATH((tcp)->tcp_connp))
120 
121 /*
122  * Setting this to false means we disable fusion altogether and
123  * loopback connections would go through the protocol paths.
124  */
125 boolean_t do_tcp_fusion = B_TRUE;
126 
127 /*
128  * Enabling this flag allows sockfs to retrieve data directly
129  * from a fused tcp endpoint using synchronous streams interface.
130  */
131 boolean_t do_tcp_direct_sockfs = B_TRUE;
132 
133 /*
134  * This is the minimum amount of outstanding writes allowed on
135  * a synchronous streams-enabled receiving endpoint before the
136  * sender gets flow-controlled.  Setting this value to 0 means
137  * that the data block limit is equivalent to the byte count
138  * limit, which essentially disables the check.
139  */
140 #define	TCP_FUSION_RCV_UNREAD_MIN	8
141 uint_t tcp_fusion_rcv_unread_min = TCP_FUSION_RCV_UNREAD_MIN;
142 
143 static void	tcp_fuse_syncstr_enable(tcp_t *);
144 static void	tcp_fuse_syncstr_disable(tcp_t *);
145 static void	strrput_sig(queue_t *, boolean_t);
146 
147 /*
148  * This routine gets called by the eager tcp upon changing state from
149  * SYN_RCVD to ESTABLISHED.  It fuses a direct path between itself
150  * and the active connect tcp such that the regular tcp processings
151  * may be bypassed under allowable circumstances.  Because the fusion
152  * requires both endpoints to be in the same squeue, it does not work
153  * for simultaneous active connects because there is no easy way to
154  * switch from one squeue to another once the connection is created.
155  * This is different from the eager tcp case where we assign it the
156  * same squeue as the one given to the active connect tcp during open.
157  */
158 void
159 tcp_fuse(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph)
160 {
161 	conn_t *peer_connp, *connp = tcp->tcp_connp;
162 	tcp_t *peer_tcp;
163 
164 	ASSERT(!tcp->tcp_fused);
165 	ASSERT(tcp->tcp_loopback);
166 	ASSERT(tcp->tcp_loopback_peer == NULL);
167 	/*
168 	 * We need to inherit q_hiwat of the listener tcp, but we can't
169 	 * really use tcp_listener since we get here after sending up
170 	 * T_CONN_IND and tcp_wput_accept() may be called independently,
171 	 * at which point tcp_listener is cleared; this is why we use
172 	 * tcp_saved_listener.  The listener itself is guaranteed to be
173 	 * around until tcp_accept_finish() is called on this eager --
174 	 * this won't happen until we're done since we're inside the
175 	 * eager's perimeter now.
176 	 */
177 	ASSERT(tcp->tcp_saved_listener != NULL);
178 
179 	/*
180 	 * Lookup peer endpoint; search for the remote endpoint having
181 	 * the reversed address-port quadruplet in ESTABLISHED state,
182 	 * which is guaranteed to be unique in the system.  Zone check
183 	 * is applied accordingly for loopback address, but not for
184 	 * local address since we want fusion to happen across Zones.
185 	 */
186 	if (tcp->tcp_ipversion == IPV4_VERSION) {
187 		peer_connp = ipcl_conn_tcp_lookup_reversed_ipv4(connp,
188 		    (ipha_t *)iphdr, tcph);
189 	} else {
190 		peer_connp = ipcl_conn_tcp_lookup_reversed_ipv6(connp,
191 		    (ip6_t *)iphdr, tcph);
192 	}
193 
194 	/*
195 	 * We can only proceed if peer exists, resides in the same squeue
196 	 * as our conn and is not raw-socket.  The squeue assignment of
197 	 * this eager tcp was done earlier at the time of SYN processing
198 	 * in ip_fanout_tcp{_v6}.  Note that similar squeues by itself
199 	 * doesn't guarantee a safe condition to fuse, hence we perform
200 	 * additional tests below.
201 	 */
202 	ASSERT(peer_connp == NULL || peer_connp != connp);
203 	if (peer_connp == NULL || peer_connp->conn_sqp != connp->conn_sqp ||
204 	    !IPCL_IS_TCP(peer_connp)) {
205 		if (peer_connp != NULL) {
206 			TCP_STAT(tcp_fusion_unqualified);
207 			CONN_DEC_REF(peer_connp);
208 		}
209 		return;
210 	}
211 	peer_tcp = peer_connp->conn_tcp;	/* active connect tcp */
212 
213 	ASSERT(peer_tcp != NULL && peer_tcp != tcp && !peer_tcp->tcp_fused);
214 	ASSERT(peer_tcp->tcp_loopback && peer_tcp->tcp_loopback_peer == NULL);
215 	ASSERT(peer_connp->conn_sqp == connp->conn_sqp);
216 
217 	/*
218 	 * Fuse the endpoints; we perform further checks against both
219 	 * tcp endpoints to ensure that a fusion is allowed to happen.
220 	 * In particular we bail out for non-simple TCP/IP or if IPsec/
221 	 * IPQoS policy/kernel SSL exists.
222 	 */
223 	if (!tcp->tcp_unfusable && !peer_tcp->tcp_unfusable &&
224 	    !TCP_LOOPBACK_IP(tcp) && !TCP_LOOPBACK_IP(peer_tcp) &&
225 	    tcp->tcp_kssl_ent == NULL &&
226 	    !IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN)) {
227 		mblk_t *mp;
228 		struct stroptions *stropt;
229 		queue_t *peer_rq = peer_tcp->tcp_rq;
230 
231 		ASSERT(!TCP_IS_DETACHED(peer_tcp) && peer_rq != NULL);
232 		ASSERT(tcp->tcp_fused_sigurg_mp == NULL);
233 		ASSERT(peer_tcp->tcp_fused_sigurg_mp == NULL);
234 		ASSERT(tcp->tcp_kssl_ctx == NULL);
235 
236 		/*
237 		 * We need to drain data on both endpoints during unfuse.
238 		 * If we need to send up SIGURG at the time of draining,
239 		 * we want to be sure that an mblk is readily available.
240 		 * This is why we pre-allocate the M_PCSIG mblks for both
241 		 * endpoints which will only be used during/after unfuse.
242 		 */
243 		if ((mp = allocb(1, BPRI_HI)) == NULL)
244 			goto failed;
245 
246 		tcp->tcp_fused_sigurg_mp = mp;
247 
248 		if ((mp = allocb(1, BPRI_HI)) == NULL)
249 			goto failed;
250 
251 		peer_tcp->tcp_fused_sigurg_mp = mp;
252 
253 		/* Allocate M_SETOPTS mblk */
254 		if ((mp = allocb(sizeof (*stropt), BPRI_HI)) == NULL)
255 			goto failed;
256 
257 		/* Fuse both endpoints */
258 		peer_tcp->tcp_loopback_peer = tcp;
259 		tcp->tcp_loopback_peer = peer_tcp;
260 		peer_tcp->tcp_fused = tcp->tcp_fused = B_TRUE;
261 
262 		/*
263 		 * We never use regular tcp paths in fusion and should
264 		 * therefore clear tcp_unsent on both endpoints.  Having
265 		 * them set to non-zero values means asking for trouble
266 		 * especially after unfuse, where we may end up sending
267 		 * through regular tcp paths which expect xmit_list and
268 		 * friends to be correctly setup.
269 		 */
270 		peer_tcp->tcp_unsent = tcp->tcp_unsent = 0;
271 
272 		tcp_timers_stop(tcp);
273 		tcp_timers_stop(peer_tcp);
274 
275 		/*
276 		 * At this point we are a detached eager tcp and therefore
277 		 * don't have a queue assigned to us until accept happens.
278 		 * In the mean time the peer endpoint may immediately send
279 		 * us data as soon as fusion is finished, and we need to be
280 		 * able to flow control it in case it sends down huge amount
281 		 * of data while we're still detached.  To prevent that we
282 		 * inherit the listener's q_hiwat value; this is temporary
283 		 * since we'll repeat the process in tcp_accept_finish().
284 		 */
285 		(void) tcp_fuse_set_rcv_hiwat(tcp,
286 		    tcp->tcp_saved_listener->tcp_rq->q_hiwat);
287 
288 		/*
289 		 * Set the stream head's write offset value to zero since we
290 		 * won't be needing any room for TCP/IP headers; tell it to
291 		 * not break up the writes (this would reduce the amount of
292 		 * work done by kmem); and configure our receive buffer.
293 		 * Note that we can only do this for the active connect tcp
294 		 * since our eager is still detached; it will be dealt with
295 		 * later in tcp_accept_finish().
296 		 */
297 		DB_TYPE(mp) = M_SETOPTS;
298 		mp->b_wptr += sizeof (*stropt);
299 
300 		stropt = (struct stroptions *)mp->b_rptr;
301 		stropt->so_flags = SO_MAXBLK | SO_WROFF | SO_HIWAT;
302 		stropt->so_maxblk = tcp_maxpsz_set(peer_tcp, B_FALSE);
303 		stropt->so_wroff = 0;
304 
305 		/*
306 		 * Record the stream head's high water mark for
307 		 * peer endpoint; this is used for flow-control
308 		 * purposes in tcp_fuse_output().
309 		 */
310 		stropt->so_hiwat = tcp_fuse_set_rcv_hiwat(peer_tcp,
311 		    peer_rq->q_hiwat);
312 
313 		/* Send the options up */
314 		putnext(peer_rq, mp);
315 	} else {
316 		TCP_STAT(tcp_fusion_unqualified);
317 	}
318 	CONN_DEC_REF(peer_connp);
319 	return;
320 
321 failed:
322 	if (tcp->tcp_fused_sigurg_mp != NULL) {
323 		freeb(tcp->tcp_fused_sigurg_mp);
324 		tcp->tcp_fused_sigurg_mp = NULL;
325 	}
326 	if (peer_tcp->tcp_fused_sigurg_mp != NULL) {
327 		freeb(peer_tcp->tcp_fused_sigurg_mp);
328 		peer_tcp->tcp_fused_sigurg_mp = NULL;
329 	}
330 	CONN_DEC_REF(peer_connp);
331 }
332 
333 /*
334  * Unfuse a previously-fused pair of tcp loopback endpoints.
335  */
336 void
337 tcp_unfuse(tcp_t *tcp)
338 {
339 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
340 
341 	ASSERT(tcp->tcp_fused && peer_tcp != NULL);
342 	ASSERT(peer_tcp->tcp_fused && peer_tcp->tcp_loopback_peer == tcp);
343 	ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp);
344 	ASSERT(tcp->tcp_unsent == 0 && peer_tcp->tcp_unsent == 0);
345 	ASSERT(tcp->tcp_fused_sigurg_mp != NULL);
346 	ASSERT(peer_tcp->tcp_fused_sigurg_mp != NULL);
347 
348 	/*
349 	 * We disable synchronous streams, drain any queued data and
350 	 * clear tcp_direct_sockfs.  The synchronous streams entry
351 	 * points will become no-ops after this point.
352 	 */
353 	tcp_fuse_disable_pair(tcp, B_TRUE);
354 
355 	/*
356 	 * Update th_seq and th_ack in the header template
357 	 */
358 	U32_TO_ABE32(tcp->tcp_snxt, tcp->tcp_tcph->th_seq);
359 	U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack);
360 	U32_TO_ABE32(peer_tcp->tcp_snxt, peer_tcp->tcp_tcph->th_seq);
361 	U32_TO_ABE32(peer_tcp->tcp_rnxt, peer_tcp->tcp_tcph->th_ack);
362 
363 	/* Unfuse the endpoints */
364 	peer_tcp->tcp_fused = tcp->tcp_fused = B_FALSE;
365 	peer_tcp->tcp_loopback_peer = tcp->tcp_loopback_peer = NULL;
366 }
367 
368 /*
369  * Fusion output routine for urgent data.  This routine is called by
370  * tcp_fuse_output() for handling non-M_DATA mblks.
371  */
372 void
373 tcp_fuse_output_urg(tcp_t *tcp, mblk_t *mp)
374 {
375 	mblk_t *mp1;
376 	struct T_exdata_ind *tei;
377 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
378 	mblk_t *head, *prev_head = NULL;
379 
380 	ASSERT(tcp->tcp_fused);
381 	ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp);
382 	ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO);
383 	ASSERT(mp->b_cont != NULL && DB_TYPE(mp->b_cont) == M_DATA);
384 	ASSERT(MBLKL(mp) >= sizeof (*tei) && MBLKL(mp->b_cont) > 0);
385 
386 	/*
387 	 * Urgent data arrives in the form of T_EXDATA_REQ from above.
388 	 * Each occurence denotes a new urgent pointer.  For each new
389 	 * urgent pointer we signal (SIGURG) the receiving app to indicate
390 	 * that it needs to go into urgent mode.  This is similar to the
391 	 * urgent data handling in the regular tcp.  We don't need to keep
392 	 * track of where the urgent pointer is, because each T_EXDATA_REQ
393 	 * "advances" the urgent pointer for us.
394 	 *
395 	 * The actual urgent data carried by T_EXDATA_REQ is then prepended
396 	 * by a T_EXDATA_IND before being enqueued behind any existing data
397 	 * destined for the receiving app.  There is only a single urgent
398 	 * pointer (out-of-band mark) for a given tcp.  If the new urgent
399 	 * data arrives before the receiving app reads some existing urgent
400 	 * data, the previous marker is lost.  This behavior is emulated
401 	 * accordingly below, by removing any existing T_EXDATA_IND messages
402 	 * and essentially converting old urgent data into non-urgent.
403 	 */
404 	ASSERT(tcp->tcp_valid_bits & TCP_URG_VALID);
405 	/* Let sender get out of urgent mode */
406 	tcp->tcp_valid_bits &= ~TCP_URG_VALID;
407 
408 	/*
409 	 * This flag indicates that a signal needs to be sent up.
410 	 * This flag will only get cleared once SIGURG is delivered and
411 	 * is not affected by the tcp_fused flag -- delivery will still
412 	 * happen even after an endpoint is unfused, to handle the case
413 	 * where the sending endpoint immediately closes/unfuses after
414 	 * sending urgent data and the accept is not yet finished.
415 	 */
416 	peer_tcp->tcp_fused_sigurg = B_TRUE;
417 
418 	/* Reuse T_EXDATA_REQ mblk for T_EXDATA_IND */
419 	DB_TYPE(mp) = M_PROTO;
420 	tei = (struct T_exdata_ind *)mp->b_rptr;
421 	tei->PRIM_type = T_EXDATA_IND;
422 	tei->MORE_flag = 0;
423 	mp->b_wptr = (uchar_t *)&tei[1];
424 
425 	TCP_STAT(tcp_fusion_urg);
426 	BUMP_MIB(&tcp_mib, tcpOutUrg);
427 
428 	head = peer_tcp->tcp_rcv_list;
429 	while (head != NULL) {
430 		/*
431 		 * Remove existing T_EXDATA_IND, keep the data which follows
432 		 * it and relink our list.  Note that we don't modify the
433 		 * tcp_rcv_last_tail since it never points to T_EXDATA_IND.
434 		 */
435 		if (DB_TYPE(head) != M_DATA) {
436 			mp1 = head;
437 
438 			ASSERT(DB_TYPE(mp1->b_cont) == M_DATA);
439 			head = mp1->b_cont;
440 			mp1->b_cont = NULL;
441 			head->b_next = mp1->b_next;
442 			mp1->b_next = NULL;
443 			if (prev_head != NULL)
444 				prev_head->b_next = head;
445 			if (peer_tcp->tcp_rcv_list == mp1)
446 				peer_tcp->tcp_rcv_list = head;
447 			if (peer_tcp->tcp_rcv_last_head == mp1)
448 				peer_tcp->tcp_rcv_last_head = head;
449 			freeb(mp1);
450 		}
451 		prev_head = head;
452 		head = head->b_next;
453 	}
454 }
455 
456 /*
457  * Fusion output routine, called by tcp_output() and tcp_wput_proto().
458  */
459 boolean_t
460 tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size)
461 {
462 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
463 	uint_t max_unread;
464 	boolean_t flow_stopped;
465 	boolean_t urgent = (DB_TYPE(mp) != M_DATA);
466 
467 	ASSERT(tcp->tcp_fused);
468 	ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp);
469 	ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp);
470 	ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_PROTO ||
471 	    DB_TYPE(mp) == M_PCPROTO);
472 
473 	max_unread = peer_tcp->tcp_fuse_rcv_unread_hiwater;
474 
475 	/* If this connection requires IP, unfuse and use regular path */
476 	if (TCP_LOOPBACK_IP(tcp) || TCP_LOOPBACK_IP(peer_tcp) ||
477 	    IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN)) {
478 		TCP_STAT(tcp_fusion_aborted);
479 		tcp_unfuse(tcp);
480 		return (B_FALSE);
481 	}
482 
483 	if (send_size == 0) {
484 		freemsg(mp);
485 		return (B_TRUE);
486 	}
487 
488 	/*
489 	 * Handle urgent data; we either send up SIGURG to the peer now
490 	 * or do it later when we drain, in case the peer is detached
491 	 * or if we're short of memory for M_PCSIG mblk.
492 	 */
493 	if (urgent) {
494 		/*
495 		 * We stop synchronous streams when we have urgent data
496 		 * queued to prevent tcp_fuse_rrw() from pulling it.  If
497 		 * for some reasons the urgent data can't be delivered
498 		 * below, synchronous streams will remain stopped until
499 		 * someone drains the tcp_rcv_list.
500 		 */
501 		TCP_FUSE_SYNCSTR_PLUG_DRAIN(peer_tcp);
502 		tcp_fuse_output_urg(tcp, mp);
503 	}
504 
505 	mutex_enter(&peer_tcp->tcp_fuse_lock);
506 	/*
507 	 * Wake up and signal the peer; it is okay to do this before
508 	 * enqueueing because we are holding the lock.  One of the
509 	 * advantages of synchronous streams is the ability for us to
510 	 * find out when the application performs a read on the socket,
511 	 * by way of tcp_fuse_rrw() entry point being called.  Every
512 	 * data that gets enqueued onto the receiver is treated as if
513 	 * it has arrived at the receiving endpoint, thus generating
514 	 * SIGPOLL/SIGIO for asynchronous socket just as in the strrput()
515 	 * case.  However, we only wake up the application when necessary,
516 	 * i.e. during the first enqueue.  When tcp_fuse_rrw() is called
517 	 * it will send everything upstream.
518 	 */
519 	if (peer_tcp->tcp_direct_sockfs && !urgent &&
520 	    !TCP_IS_DETACHED(peer_tcp)) {
521 		if (peer_tcp->tcp_rcv_list == NULL)
522 			STR_WAKEUP_SET(STREAM(peer_tcp->tcp_rq));
523 		/* Update poll events and send SIGPOLL/SIGIO if necessary */
524 		STR_SENDSIG(STREAM(peer_tcp->tcp_rq));
525 	}
526 
527 	/*
528 	 * Enqueue data into the peer's receive list; we may or may not
529 	 * drain the contents depending on the conditions below.
530 	 */
531 	tcp_rcv_enqueue(peer_tcp, mp, send_size);
532 
533 	/* In case it wrapped around and also to keep it constant */
534 	peer_tcp->tcp_rwnd += send_size;
535 
536 	/*
537 	 * Exercise flow-control when needed; we will get back-enabled
538 	 * in either tcp_accept_finish(), tcp_unfuse(), or tcp_fuse_rrw().
539 	 * If tcp_direct_sockfs is on or if the peer endpoint is detached,
540 	 * we emulate streams flow control by checking the peer's queue
541 	 * size and high water mark; otherwise we simply use canputnext()
542 	 * to decide if we need to stop our flow.
543 	 *
544 	 * The outstanding unread data block check does not apply for a
545 	 * detached receiver; this is to avoid unnecessary blocking of the
546 	 * sender while the accept is currently in progress and is quite
547 	 * similar to the regular tcp.
548 	 */
549 	if (TCP_IS_DETACHED(peer_tcp) || max_unread == 0)
550 		max_unread = UINT_MAX;
551 
552 	flow_stopped = tcp->tcp_flow_stopped;
553 	if (!flow_stopped &&
554 	    (((peer_tcp->tcp_direct_sockfs || TCP_IS_DETACHED(peer_tcp)) &&
555 	    (peer_tcp->tcp_rcv_cnt >= peer_tcp->tcp_fuse_rcv_hiwater ||
556 	    ++peer_tcp->tcp_fuse_rcv_unread_cnt >= max_unread)) ||
557 	    (!peer_tcp->tcp_direct_sockfs &&
558 	    !TCP_IS_DETACHED(peer_tcp) && !canputnext(peer_tcp->tcp_rq)))) {
559 		tcp_setqfull(tcp);
560 		flow_stopped = B_TRUE;
561 		TCP_STAT(tcp_fusion_flowctl);
562 		DTRACE_PROBE4(tcp__fuse__output__flowctl, tcp_t *, tcp,
563 		    uint_t, send_size, uint_t, peer_tcp->tcp_rcv_cnt,
564 		    uint_t, peer_tcp->tcp_fuse_rcv_unread_cnt);
565 	} else if (flow_stopped &&
566 	    TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) {
567 		tcp_clrqfull(tcp);
568 		flow_stopped = B_FALSE;
569 	}
570 
571 	loopback_packets++;
572 	tcp->tcp_last_sent_len = send_size;
573 
574 	/* Need to adjust the following SNMP MIB-related variables */
575 	tcp->tcp_snxt += send_size;
576 	tcp->tcp_suna = tcp->tcp_snxt;
577 	peer_tcp->tcp_rnxt += send_size;
578 	peer_tcp->tcp_rack = peer_tcp->tcp_rnxt;
579 
580 	BUMP_MIB(&tcp_mib, tcpOutDataSegs);
581 	UPDATE_MIB(&tcp_mib, tcpOutDataBytes, send_size);
582 
583 	BUMP_MIB(&tcp_mib, tcpInSegs);
584 	BUMP_MIB(&tcp_mib, tcpInDataInorderSegs);
585 	UPDATE_MIB(&tcp_mib, tcpInDataInorderBytes, send_size);
586 
587 	BUMP_LOCAL(tcp->tcp_obsegs);
588 	BUMP_LOCAL(peer_tcp->tcp_ibsegs);
589 
590 	mutex_exit(&peer_tcp->tcp_fuse_lock);
591 
592 	DTRACE_PROBE2(tcp__fuse__output, tcp_t *, tcp, uint_t, send_size);
593 
594 	if (!TCP_IS_DETACHED(peer_tcp)) {
595 		/*
596 		 * Drain the peer's receive queue it has urgent data or if
597 		 * we're not flow-controlled.  There is no need for draining
598 		 * normal data when tcp_direct_sockfs is on because the peer
599 		 * will pull the data via tcp_fuse_rrw().
600 		 */
601 		if (urgent || (!flow_stopped && !peer_tcp->tcp_direct_sockfs)) {
602 			ASSERT(peer_tcp->tcp_rcv_list != NULL);
603 			/*
604 			 * For TLI-based streams, a thread in tcp_accept_swap()
605 			 * can race with us.  That thread will ensure that the
606 			 * correct peer_tcp->tcp_rq is globally visible before
607 			 * peer_tcp->tcp_detached is visible as clear, but we
608 			 * must also ensure that the load of tcp_rq cannot be
609 			 * reordered to be before the tcp_detached check.
610 			 */
611 			membar_consumer();
612 			(void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp,
613 			    NULL);
614 			/*
615 			 * If synchronous streams was stopped above due
616 			 * to the presence of urgent data, re-enable it.
617 			 */
618 			if (urgent)
619 				TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(peer_tcp);
620 		}
621 	}
622 	return (B_TRUE);
623 }
624 
625 /*
626  * This routine gets called to deliver data upstream on a fused or
627  * previously fused tcp loopback endpoint; the latter happens only
628  * when there is a pending SIGURG signal plus urgent data that can't
629  * be sent upstream in the past.
630  */
631 boolean_t
632 tcp_fuse_rcv_drain(queue_t *q, tcp_t *tcp, mblk_t **sigurg_mpp)
633 {
634 	mblk_t *mp;
635 #ifdef DEBUG
636 	uint_t cnt = 0;
637 #endif
638 
639 	ASSERT(tcp->tcp_loopback);
640 	ASSERT(tcp->tcp_fused || tcp->tcp_fused_sigurg);
641 	ASSERT(!tcp->tcp_fused || tcp->tcp_loopback_peer != NULL);
642 	ASSERT(sigurg_mpp != NULL || tcp->tcp_fused);
643 
644 	/* No need for the push timer now, in case it was scheduled */
645 	if (tcp->tcp_push_tid != 0) {
646 		(void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid);
647 		tcp->tcp_push_tid = 0;
648 	}
649 	/*
650 	 * If there's urgent data sitting in receive list and we didn't
651 	 * get a chance to send up a SIGURG signal, make sure we send
652 	 * it first before draining in order to ensure that SIOCATMARK
653 	 * works properly.
654 	 */
655 	if (tcp->tcp_fused_sigurg) {
656 		/*
657 		 * sigurg_mpp is normally NULL, i.e. when we're still
658 		 * fused and didn't get here because of tcp_unfuse().
659 		 * In this case try hard to allocate the M_PCSIG mblk.
660 		 */
661 		if (sigurg_mpp == NULL &&
662 		    (mp = allocb(1, BPRI_HI)) == NULL &&
663 		    (mp = allocb_tryhard(1)) == NULL) {
664 			/* Alloc failed; try again next time */
665 			tcp->tcp_push_tid = TCP_TIMER(tcp, tcp_push_timer,
666 			    MSEC_TO_TICK(tcp_push_timer_interval));
667 			return (B_TRUE);
668 		} else if (sigurg_mpp != NULL) {
669 			/*
670 			 * Use the supplied M_PCSIG mblk; it means we're
671 			 * either unfused or in the process of unfusing,
672 			 * and the drain must happen now.
673 			 */
674 			mp = *sigurg_mpp;
675 			*sigurg_mpp = NULL;
676 		}
677 		ASSERT(mp != NULL);
678 
679 		tcp->tcp_fused_sigurg = B_FALSE;
680 		/* Send up the signal */
681 		DB_TYPE(mp) = M_PCSIG;
682 		*mp->b_wptr++ = (uchar_t)SIGURG;
683 		putnext(q, mp);
684 		/*
685 		 * Let the regular tcp_rcv_drain() path handle
686 		 * draining the data if we're no longer fused.
687 		 */
688 		if (!tcp->tcp_fused)
689 			return (B_FALSE);
690 	}
691 
692 	/*
693 	 * In the synchronous streams case, we generate SIGPOLL/SIGIO for
694 	 * each M_DATA that gets enqueued onto the receiver.  At this point
695 	 * we are about to drain any queued data via putnext().  In order
696 	 * to avoid extraneous signal generation from strrput(), we set
697 	 * STRGETINPROG flag at the stream head prior to the draining and
698 	 * restore it afterwards.  This masks out signal generation only
699 	 * for M_DATA messages and does not affect urgent data.
700 	 */
701 	if (tcp->tcp_direct_sockfs)
702 		strrput_sig(q, B_FALSE);
703 
704 	/* Drain the data */
705 	while ((mp = tcp->tcp_rcv_list) != NULL) {
706 		tcp->tcp_rcv_list = mp->b_next;
707 		mp->b_next = NULL;
708 #ifdef DEBUG
709 		cnt += msgdsize(mp);
710 #endif
711 		putnext(q, mp);
712 		TCP_STAT(tcp_fusion_putnext);
713 	}
714 
715 	if (tcp->tcp_direct_sockfs)
716 		strrput_sig(q, B_TRUE);
717 
718 	ASSERT(cnt == tcp->tcp_rcv_cnt);
719 	tcp->tcp_rcv_last_head = NULL;
720 	tcp->tcp_rcv_last_tail = NULL;
721 	tcp->tcp_rcv_cnt = 0;
722 	tcp->tcp_fuse_rcv_unread_cnt = 0;
723 	tcp->tcp_rwnd = q->q_hiwat;
724 
725 	return (B_TRUE);
726 }
727 
728 /*
729  * Synchronous stream entry point for sockfs to retrieve
730  * data directly from tcp_rcv_list.
731  */
732 int
733 tcp_fuse_rrw(queue_t *q, struiod_t *dp)
734 {
735 	tcp_t *tcp = Q_TO_CONN(q)->conn_tcp;
736 	mblk_t *mp;
737 
738 	mutex_enter(&tcp->tcp_fuse_lock);
739 
740 	/*
741 	 * If tcp_fuse_syncstr_plugged is set, then another thread is moving
742 	 * the underlying data to the stream head.  We need to wait until it's
743 	 * done, then return EBUSY so that strget() will dequeue data from the
744 	 * stream head to ensure data is drained in-order.
745 	 */
746 	if (tcp->tcp_fuse_syncstr_plugged) {
747 		do {
748 			cv_wait(&tcp->tcp_fuse_plugcv, &tcp->tcp_fuse_lock);
749 		} while (tcp->tcp_fuse_syncstr_plugged);
750 
751 		mutex_exit(&tcp->tcp_fuse_lock);
752 		TCP_STAT(tcp_fusion_rrw_plugged);
753 		TCP_STAT(tcp_fusion_rrw_busy);
754 		return (EBUSY);
755 	}
756 
757 	/*
758 	 * If someone had turned off tcp_direct_sockfs or if synchronous
759 	 * streams is stopped, we return EBUSY.  This causes strget() to
760 	 * dequeue data from the stream head instead.
761 	 */
762 	if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped) {
763 		mutex_exit(&tcp->tcp_fuse_lock);
764 		TCP_STAT(tcp_fusion_rrw_busy);
765 		return (EBUSY);
766 	}
767 
768 	if ((mp = tcp->tcp_rcv_list) != NULL) {
769 		tcp_t *peer_tcp = tcp->tcp_loopback_peer;
770 
771 		DTRACE_PROBE3(tcp__fuse__rrw, tcp_t *, tcp,
772 		    uint32_t, tcp->tcp_rcv_cnt, ssize_t, dp->d_uio.uio_resid);
773 
774 		tcp->tcp_rcv_list = NULL;
775 		TCP_STAT(tcp_fusion_rrw_msgcnt);
776 
777 		/*
778 		 * At this point nothing should be left in tcp_rcv_list.
779 		 * The only possible case where we would have a chain of
780 		 * b_next-linked messages is urgent data, but we wouldn't
781 		 * be here if that's true since urgent data is delivered
782 		 * via putnext() and synchronous streams is stopped until
783 		 * tcp_fuse_rcv_drain() is finished.
784 		 */
785 		ASSERT(DB_TYPE(mp) == M_DATA && mp->b_next == NULL);
786 
787 		tcp->tcp_rcv_last_head = NULL;
788 		tcp->tcp_rcv_last_tail = NULL;
789 		tcp->tcp_rcv_cnt = 0;
790 		tcp->tcp_fuse_rcv_unread_cnt = 0;
791 
792 		if (peer_tcp->tcp_flow_stopped) {
793 			tcp_clrqfull(peer_tcp);
794 			TCP_STAT(tcp_fusion_backenabled);
795 		}
796 	}
797 
798 	/*
799 	 * Either we just dequeued everything or we get here from sockfs
800 	 * and have nothing to return; in this case clear RSLEEP.
801 	 */
802 	ASSERT(tcp->tcp_rcv_last_head == NULL);
803 	ASSERT(tcp->tcp_rcv_last_tail == NULL);
804 	ASSERT(tcp->tcp_rcv_cnt == 0);
805 	ASSERT(tcp->tcp_fuse_rcv_unread_cnt == 0);
806 	STR_WAKEUP_CLEAR(STREAM(q));
807 
808 	mutex_exit(&tcp->tcp_fuse_lock);
809 	dp->d_mp = mp;
810 	return (0);
811 }
812 
813 /*
814  * Synchronous stream entry point used by certain ioctls to retrieve
815  * information about or peek into the tcp_rcv_list.
816  */
817 int
818 tcp_fuse_rinfop(queue_t *q, infod_t *dp)
819 {
820 	tcp_t	*tcp = Q_TO_CONN(q)->conn_tcp;
821 	mblk_t	*mp;
822 	uint_t	cmd = dp->d_cmd;
823 	int	res = 0;
824 	int	error = 0;
825 	struct stdata *stp = STREAM(q);
826 
827 	mutex_enter(&tcp->tcp_fuse_lock);
828 	/* If shutdown on read has happened, return nothing */
829 	mutex_enter(&stp->sd_lock);
830 	if (stp->sd_flag & STREOF) {
831 		mutex_exit(&stp->sd_lock);
832 		goto done;
833 	}
834 	mutex_exit(&stp->sd_lock);
835 
836 	/*
837 	 * It is OK not to return an answer if tcp_rcv_list is
838 	 * currently not accessible.
839 	 */
840 	if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped ||
841 	    tcp->tcp_fuse_syncstr_plugged || (mp = tcp->tcp_rcv_list) == NULL)
842 		goto done;
843 
844 	if (cmd & INFOD_COUNT) {
845 		/*
846 		 * We have at least one message and
847 		 * could return only one at a time.
848 		 */
849 		dp->d_count++;
850 		res |= INFOD_COUNT;
851 	}
852 	if (cmd & INFOD_BYTES) {
853 		/*
854 		 * Return size of all data messages.
855 		 */
856 		dp->d_bytes += tcp->tcp_rcv_cnt;
857 		res |= INFOD_BYTES;
858 	}
859 	if (cmd & INFOD_FIRSTBYTES) {
860 		/*
861 		 * Return size of first data message.
862 		 */
863 		dp->d_bytes = msgdsize(mp);
864 		res |= INFOD_FIRSTBYTES;
865 		dp->d_cmd &= ~INFOD_FIRSTBYTES;
866 	}
867 	if (cmd & INFOD_COPYOUT) {
868 		mblk_t *mp1;
869 		int n;
870 
871 		if (DB_TYPE(mp) == M_DATA) {
872 			mp1 = mp;
873 		} else {
874 			mp1 = mp->b_cont;
875 			ASSERT(mp1 != NULL);
876 		}
877 
878 		/*
879 		 * Return data contents of first message.
880 		 */
881 		ASSERT(DB_TYPE(mp1) == M_DATA);
882 		while (mp1 != NULL && dp->d_uiop->uio_resid > 0) {
883 			n = MIN(dp->d_uiop->uio_resid, MBLKL(mp1));
884 			if (n != 0 && (error = uiomove((char *)mp1->b_rptr, n,
885 			    UIO_READ, dp->d_uiop)) != 0) {
886 				goto done;
887 			}
888 			mp1 = mp1->b_cont;
889 		}
890 		res |= INFOD_COPYOUT;
891 		dp->d_cmd &= ~INFOD_COPYOUT;
892 	}
893 done:
894 	mutex_exit(&tcp->tcp_fuse_lock);
895 
896 	dp->d_res |= res;
897 
898 	return (error);
899 }
900 
901 /*
902  * Enable synchronous streams on a fused tcp loopback endpoint.
903  */
904 static void
905 tcp_fuse_syncstr_enable(tcp_t *tcp)
906 {
907 	queue_t *rq = tcp->tcp_rq;
908 	struct stdata *stp = STREAM(rq);
909 
910 	/* We can only enable synchronous streams for sockfs mode */
911 	tcp->tcp_direct_sockfs = tcp->tcp_issocket && do_tcp_direct_sockfs;
912 
913 	if (!tcp->tcp_direct_sockfs)
914 		return;
915 
916 	mutex_enter(&stp->sd_lock);
917 	mutex_enter(QLOCK(rq));
918 
919 	/*
920 	 * We replace our q_qinfo with one that has the qi_rwp entry point.
921 	 * Clear SR_SIGALLDATA because we generate the equivalent signal(s)
922 	 * for every enqueued data in tcp_fuse_output().
923 	 */
924 	rq->q_qinfo = &tcp_loopback_rinit;
925 	rq->q_struiot = tcp_loopback_rinit.qi_struiot;
926 	stp->sd_struiordq = rq;
927 	stp->sd_rput_opt &= ~SR_SIGALLDATA;
928 
929 	mutex_exit(QLOCK(rq));
930 	mutex_exit(&stp->sd_lock);
931 }
932 
933 /*
934  * Disable synchronous streams on a fused tcp loopback endpoint.
935  */
936 static void
937 tcp_fuse_syncstr_disable(tcp_t *tcp)
938 {
939 	queue_t *rq = tcp->tcp_rq;
940 	struct stdata *stp = STREAM(rq);
941 
942 	if (!tcp->tcp_direct_sockfs)
943 		return;
944 
945 	mutex_enter(&stp->sd_lock);
946 	mutex_enter(QLOCK(rq));
947 
948 	/*
949 	 * Reset q_qinfo to point to the default tcp entry points.
950 	 * Also restore SR_SIGALLDATA so that strrput() can generate
951 	 * the signals again for future M_DATA messages.
952 	 */
953 	rq->q_qinfo = &tcp_rinit;
954 	rq->q_struiot = tcp_rinit.qi_struiot;
955 	stp->sd_struiordq = NULL;
956 	stp->sd_rput_opt |= SR_SIGALLDATA;
957 	tcp->tcp_direct_sockfs = B_FALSE;
958 
959 	mutex_exit(QLOCK(rq));
960 	mutex_exit(&stp->sd_lock);
961 }
962 
963 /*
964  * Enable synchronous streams on a pair of fused tcp endpoints.
965  */
966 void
967 tcp_fuse_syncstr_enable_pair(tcp_t *tcp)
968 {
969 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
970 
971 	ASSERT(tcp->tcp_fused);
972 	ASSERT(peer_tcp != NULL);
973 
974 	tcp_fuse_syncstr_enable(tcp);
975 	tcp_fuse_syncstr_enable(peer_tcp);
976 }
977 
978 /*
979  * Allow or disallow signals to be generated by strrput().
980  */
981 static void
982 strrput_sig(queue_t *q, boolean_t on)
983 {
984 	struct stdata *stp = STREAM(q);
985 
986 	mutex_enter(&stp->sd_lock);
987 	if (on)
988 		stp->sd_flag &= ~STRGETINPROG;
989 	else
990 		stp->sd_flag |= STRGETINPROG;
991 	mutex_exit(&stp->sd_lock);
992 }
993 
994 /*
995  * Disable synchronous streams on a pair of fused tcp endpoints and drain
996  * any queued data; called either during unfuse or upon transitioning from
997  * a socket to a stream endpoint due to _SIOCSOCKFALLBACK.
998  */
999 void
1000 tcp_fuse_disable_pair(tcp_t *tcp, boolean_t unfusing)
1001 {
1002 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
1003 
1004 	ASSERT(tcp->tcp_fused);
1005 	ASSERT(peer_tcp != NULL);
1006 
1007 	/*
1008 	 * Force any tcp_fuse_rrw() calls to block until we've moved the data
1009 	 * onto the stream head.
1010 	 */
1011 	TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp);
1012 	TCP_FUSE_SYNCSTR_PLUG_DRAIN(peer_tcp);
1013 
1014 	/*
1015 	 * Drain any pending data; the detached check is needed because
1016 	 * we may be called as a result of a tcp_unfuse() triggered by
1017 	 * tcp_fuse_output().  Note that in case of a detached tcp, the
1018 	 * draining will happen later after the tcp is unfused.  For non-
1019 	 * urgent data, this can be handled by the regular tcp_rcv_drain().
1020 	 * If we have urgent data sitting in the receive list, we will
1021 	 * need to send up a SIGURG signal first before draining the data.
1022 	 * All of these will be handled by the code in tcp_fuse_rcv_drain()
1023 	 * when called from tcp_rcv_drain().
1024 	 */
1025 	if (!TCP_IS_DETACHED(tcp)) {
1026 		(void) tcp_fuse_rcv_drain(tcp->tcp_rq, tcp,
1027 		    (unfusing ? &tcp->tcp_fused_sigurg_mp : NULL));
1028 	}
1029 	if (!TCP_IS_DETACHED(peer_tcp)) {
1030 		(void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp,
1031 		    (unfusing ? &peer_tcp->tcp_fused_sigurg_mp : NULL));
1032 	}
1033 
1034 	/*
1035 	 * Make all current and future tcp_fuse_rrw() calls fail with EBUSY.
1036 	 * To ensure threads don't sneak past the checks in tcp_fuse_rrw(),
1037 	 * a given stream must be stopped prior to being unplugged (but the
1038 	 * ordering of operations between the streams is unimportant).
1039 	 */
1040 	TCP_FUSE_SYNCSTR_STOP(tcp);
1041 	TCP_FUSE_SYNCSTR_STOP(peer_tcp);
1042 	TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp);
1043 	TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(peer_tcp);
1044 
1045 	/* Lift up any flow-control conditions */
1046 	if (tcp->tcp_flow_stopped) {
1047 		tcp_clrqfull(tcp);
1048 		TCP_STAT(tcp_fusion_backenabled);
1049 	}
1050 	if (peer_tcp->tcp_flow_stopped) {
1051 		tcp_clrqfull(peer_tcp);
1052 		TCP_STAT(tcp_fusion_backenabled);
1053 	}
1054 
1055 	/* Disable synchronous streams */
1056 	tcp_fuse_syncstr_disable(tcp);
1057 	tcp_fuse_syncstr_disable(peer_tcp);
1058 }
1059 
1060 /*
1061  * Calculate the size of receive buffer for a fused tcp endpoint.
1062  */
1063 size_t
1064 tcp_fuse_set_rcv_hiwat(tcp_t *tcp, size_t rwnd)
1065 {
1066 	ASSERT(tcp->tcp_fused);
1067 
1068 	/* Ensure that value is within the maximum upper bound */
1069 	if (rwnd > tcp_max_buf)
1070 		rwnd = tcp_max_buf;
1071 
1072 	/* Obey the absolute minimum tcp receive high water mark */
1073 	if (rwnd < tcp_sth_rcv_hiwat)
1074 		rwnd = tcp_sth_rcv_hiwat;
1075 
1076 	/*
1077 	 * Round up to system page size in case SO_RCVBUF is modified
1078 	 * after SO_SNDBUF; the latter is also similarly rounded up.
1079 	 */
1080 	rwnd = P2ROUNDUP_TYPED(rwnd, PAGESIZE, size_t);
1081 	tcp->tcp_fuse_rcv_hiwater = rwnd;
1082 	return (rwnd);
1083 }
1084 
1085 /*
1086  * Calculate the maximum outstanding unread data block for a fused tcp endpoint.
1087  */
1088 int
1089 tcp_fuse_maxpsz_set(tcp_t *tcp)
1090 {
1091 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
1092 	uint_t sndbuf = tcp->tcp_xmit_hiwater;
1093 	uint_t maxpsz = sndbuf;
1094 
1095 	ASSERT(tcp->tcp_fused);
1096 	ASSERT(peer_tcp != NULL);
1097 	ASSERT(peer_tcp->tcp_fuse_rcv_hiwater != 0);
1098 	/*
1099 	 * In the fused loopback case, we want the stream head to split
1100 	 * up larger writes into smaller chunks for a more accurate flow-
1101 	 * control accounting.  Our maxpsz is half of the sender's send
1102 	 * buffer or the receiver's receive buffer, whichever is smaller.
1103 	 * We round up the buffer to system page size due to the lack of
1104 	 * TCP MSS concept in Fusion.
1105 	 */
1106 	if (maxpsz > peer_tcp->tcp_fuse_rcv_hiwater)
1107 		maxpsz = peer_tcp->tcp_fuse_rcv_hiwater;
1108 	maxpsz = P2ROUNDUP_TYPED(maxpsz, PAGESIZE, uint_t) >> 1;
1109 
1110 	/*
1111 	 * Calculate the peer's limit for the number of outstanding unread
1112 	 * data block.  This is the amount of data blocks that are allowed
1113 	 * to reside in the receiver's queue before the sender gets flow
1114 	 * controlled.  It is used only in the synchronous streams mode as
1115 	 * a way to throttle the sender when it performs consecutive writes
1116 	 * faster than can be read.  The value is derived from SO_SNDBUF in
1117 	 * order to give the sender some control; we divide it with a large
1118 	 * value (16KB) to produce a fairly low initial limit.
1119 	 */
1120 	if (tcp_fusion_rcv_unread_min == 0) {
1121 		/* A value of 0 means that we disable the check */
1122 		peer_tcp->tcp_fuse_rcv_unread_hiwater = 0;
1123 	} else {
1124 		peer_tcp->tcp_fuse_rcv_unread_hiwater =
1125 		    MAX(sndbuf >> 14, tcp_fusion_rcv_unread_min);
1126 	}
1127 	return (maxpsz);
1128 }
1129