xref: /illumos-gate/usr/src/uts/common/inet/tcp/tcp_fusion.c (revision 968633ad8faee931821fd6b656eb0d96d4b186c0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/stream.h>
28 #include <sys/strsun.h>
29 #include <sys/strsubr.h>
30 #include <sys/debug.h>
31 #include <sys/sdt.h>
32 #include <sys/cmn_err.h>
33 #include <sys/tihdr.h>
34 
35 #include <inet/common.h>
36 #include <inet/optcom.h>
37 #include <inet/ip.h>
38 #include <inet/ip_impl.h>
39 #include <inet/tcp.h>
40 #include <inet/tcp_impl.h>
41 #include <inet/ipsec_impl.h>
42 #include <inet/ipclassifier.h>
43 #include <inet/ipp_common.h>
44 #include <inet/ip_if.h>
45 
46 /*
47  * This file implements TCP fusion - a protocol-less data path for TCP
48  * loopback connections.  The fusion of two local TCP endpoints occurs
49  * at connection establishment time.  Various conditions (see details
50  * in tcp_fuse()) need to be met for fusion to be successful.  If it
51  * fails, we fall back to the regular TCP data path; if it succeeds,
52  * both endpoints proceed to use tcp_fuse_output() as the transmit path.
53  * tcp_fuse_output() enqueues application data directly onto the peer's
54  * receive queue; no protocol processing is involved.  After enqueueing
55  * the data, the sender can either push (putnext) data up the receiver's
56  * read queue; or the sender can simply return and let the receiver
57  * retrieve the enqueued data via the synchronous streams entry point
58  * tcp_fuse_rrw().  The latter path is taken if synchronous streams is
59  * enabled (the default).  It is disabled if sockfs no longer resides
60  * directly on top of tcp module due to a module insertion or removal.
61  * It also needs to be temporarily disabled when sending urgent data
62  * because the tcp_fuse_rrw() path bypasses the M_PROTO processing done
63  * by strsock_proto() hook.
64  *
65  * Sychronization is handled by squeue and the mutex tcp_non_sq_lock.
66  * One of the requirements for fusion to succeed is that both endpoints
67  * need to be using the same squeue.  This ensures that neither side
68  * can disappear while the other side is still sending data.  By itself,
69  * squeue is not sufficient for guaranteeing safety when synchronous
70  * streams is enabled.  The reason is that tcp_fuse_rrw() doesn't enter
71  * the squeue and its access to tcp_rcv_list and other fusion-related
72  * fields needs to be sychronized with the sender.  tcp_non_sq_lock is
73  * used for this purpose.  When there is urgent data, the sender needs
74  * to push the data up the receiver's streams read queue.  In order to
75  * avoid holding the tcp_non_sq_lock across putnext(), the sender sets
76  * the peer tcp's tcp_fuse_syncstr_plugged bit and releases tcp_non_sq_lock
77  * (see macro TCP_FUSE_SYNCSTR_PLUG_DRAIN()).  If tcp_fuse_rrw() enters
78  * after this point, it will see that synchronous streams is plugged and
79  * will wait on tcp_fuse_plugcv.  After the sender has finished pushing up
80  * all urgent data, it will clear the tcp_fuse_syncstr_plugged bit using
81  * TCP_FUSE_SYNCSTR_UNPLUG_DRAIN().  This will cause any threads waiting
82  * on tcp_fuse_plugcv to return EBUSY, and in turn cause strget() to call
83  * getq_noenab() to dequeue data from the stream head instead.  Once the
84  * data on the stream head has been consumed, tcp_fuse_rrw() may again
85  * be used to process tcp_rcv_list.  However, if TCP_FUSE_SYNCSTR_STOP()
86  * has been called, all future calls to tcp_fuse_rrw() will return EBUSY,
87  * effectively disabling synchronous streams.
88  *
89  * The following note applies only to the synchronous streams mode.
90  *
91  * Flow control is done by checking the size of receive buffer and
92  * the number of data blocks, both set to different limits.  This is
93  * different than regular streams flow control where cumulative size
94  * check dominates block count check -- streams queue high water mark
95  * typically represents bytes.  Each enqueue triggers notifications
96  * to the receiving process; a build up of data blocks indicates a
97  * slow receiver and the sender should be blocked or informed at the
98  * earliest moment instead of further wasting system resources.  In
99  * effect, this is equivalent to limiting the number of outstanding
100  * segments in flight.
101  */
102 
103 /*
104  * Setting this to false means we disable fusion altogether and
105  * loopback connections would go through the protocol paths.
106  */
107 boolean_t do_tcp_fusion = B_TRUE;
108 
109 /*
110  * Enabling this flag allows sockfs to retrieve data directly
111  * from a fused tcp endpoint using synchronous streams interface.
112  */
113 boolean_t do_tcp_direct_sockfs = B_TRUE;
114 
115 /*
116  * This is the minimum amount of outstanding writes allowed on
117  * a synchronous streams-enabled receiving endpoint before the
118  * sender gets flow-controlled.  Setting this value to 0 means
119  * that the data block limit is equivalent to the byte count
120  * limit, which essentially disables the check.
121  */
122 #define	TCP_FUSION_RCV_UNREAD_MIN	8
123 uint_t tcp_fusion_rcv_unread_min = TCP_FUSION_RCV_UNREAD_MIN;
124 
125 static void		tcp_fuse_syncstr_enable(tcp_t *);
126 static void		tcp_fuse_syncstr_disable(tcp_t *);
127 static boolean_t	strrput_sig(queue_t *, boolean_t);
128 
129 /*
130  * Return true if this connection needs some IP functionality
131  */
132 static boolean_t
133 tcp_loopback_needs_ip(tcp_t *tcp, netstack_t *ns)
134 {
135 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
136 
137 	/*
138 	 * If ire is not cached, do not use fusion
139 	 */
140 	if (tcp->tcp_connp->conn_ire_cache == NULL) {
141 		/*
142 		 * There is no need to hold conn_lock here because when called
143 		 * from tcp_fuse() there can be no window where conn_ire_cache
144 		 * can change. This is not true when called from
145 		 * tcp_fuse_output() as conn_ire_cache can become null just
146 		 * after the check. It will be necessary to recheck for a NULL
147 		 * conn_ire_cache in tcp_fuse_output() to avoid passing a
148 		 * stale ill pointer to FW_HOOKS.
149 		 */
150 		return (B_TRUE);
151 	}
152 	if (tcp->tcp_ipversion == IPV4_VERSION) {
153 		if (tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH)
154 			return (B_TRUE);
155 		if (CONN_OUTBOUND_POLICY_PRESENT(tcp->tcp_connp, ipss))
156 			return (B_TRUE);
157 		if (CONN_INBOUND_POLICY_PRESENT(tcp->tcp_connp, ipss))
158 			return (B_TRUE);
159 	} else {
160 		if (tcp->tcp_ip_hdr_len != IPV6_HDR_LEN)
161 			return (B_TRUE);
162 		if (CONN_OUTBOUND_POLICY_PRESENT_V6(tcp->tcp_connp, ipss))
163 			return (B_TRUE);
164 		if (CONN_INBOUND_POLICY_PRESENT_V6(tcp->tcp_connp, ipss))
165 			return (B_TRUE);
166 	}
167 	if (!CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp))
168 		return (B_TRUE);
169 	return (B_FALSE);
170 }
171 
172 
173 /*
174  * This routine gets called by the eager tcp upon changing state from
175  * SYN_RCVD to ESTABLISHED.  It fuses a direct path between itself
176  * and the active connect tcp such that the regular tcp processings
177  * may be bypassed under allowable circumstances.  Because the fusion
178  * requires both endpoints to be in the same squeue, it does not work
179  * for simultaneous active connects because there is no easy way to
180  * switch from one squeue to another once the connection is created.
181  * This is different from the eager tcp case where we assign it the
182  * same squeue as the one given to the active connect tcp during open.
183  */
184 void
185 tcp_fuse(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph)
186 {
187 	conn_t *peer_connp, *connp = tcp->tcp_connp;
188 	tcp_t *peer_tcp;
189 	tcp_stack_t	*tcps = tcp->tcp_tcps;
190 	netstack_t	*ns;
191 	ip_stack_t	*ipst = tcps->tcps_netstack->netstack_ip;
192 
193 	ASSERT(!tcp->tcp_fused);
194 	ASSERT(tcp->tcp_loopback);
195 	ASSERT(tcp->tcp_loopback_peer == NULL);
196 	/*
197 	 * We need to inherit q_hiwat of the listener tcp, but we can't
198 	 * really use tcp_listener since we get here after sending up
199 	 * T_CONN_IND and tcp_wput_accept() may be called independently,
200 	 * at which point tcp_listener is cleared; this is why we use
201 	 * tcp_saved_listener.  The listener itself is guaranteed to be
202 	 * around until tcp_accept_finish() is called on this eager --
203 	 * this won't happen until we're done since we're inside the
204 	 * eager's perimeter now.
205 	 */
206 	ASSERT(tcp->tcp_saved_listener != NULL);
207 
208 	/*
209 	 * Lookup peer endpoint; search for the remote endpoint having
210 	 * the reversed address-port quadruplet in ESTABLISHED state,
211 	 * which is guaranteed to be unique in the system.  Zone check
212 	 * is applied accordingly for loopback address, but not for
213 	 * local address since we want fusion to happen across Zones.
214 	 */
215 	if (tcp->tcp_ipversion == IPV4_VERSION) {
216 		peer_connp = ipcl_conn_tcp_lookup_reversed_ipv4(connp,
217 		    (ipha_t *)iphdr, tcph, ipst);
218 	} else {
219 		peer_connp = ipcl_conn_tcp_lookup_reversed_ipv6(connp,
220 		    (ip6_t *)iphdr, tcph, ipst);
221 	}
222 
223 	/*
224 	 * We can only proceed if peer exists, resides in the same squeue
225 	 * as our conn and is not raw-socket.  The squeue assignment of
226 	 * this eager tcp was done earlier at the time of SYN processing
227 	 * in ip_fanout_tcp{_v6}.  Note that similar squeues by itself
228 	 * doesn't guarantee a safe condition to fuse, hence we perform
229 	 * additional tests below.
230 	 */
231 	ASSERT(peer_connp == NULL || peer_connp != connp);
232 	if (peer_connp == NULL || peer_connp->conn_sqp != connp->conn_sqp ||
233 	    !IPCL_IS_TCP(peer_connp)) {
234 		if (peer_connp != NULL) {
235 			TCP_STAT(tcps, tcp_fusion_unqualified);
236 			CONN_DEC_REF(peer_connp);
237 		}
238 		return;
239 	}
240 	peer_tcp = peer_connp->conn_tcp;	/* active connect tcp */
241 
242 	ASSERT(peer_tcp != NULL && peer_tcp != tcp && !peer_tcp->tcp_fused);
243 	ASSERT(peer_tcp->tcp_loopback && peer_tcp->tcp_loopback_peer == NULL);
244 	ASSERT(peer_connp->conn_sqp == connp->conn_sqp);
245 
246 	/*
247 	 * Fuse the endpoints; we perform further checks against both
248 	 * tcp endpoints to ensure that a fusion is allowed to happen.
249 	 * In particular we bail out for non-simple TCP/IP or if IPsec/
250 	 * IPQoS policy/kernel SSL exists.
251 	 */
252 	ns = tcps->tcps_netstack;
253 	ipst = ns->netstack_ip;
254 
255 	if (!tcp->tcp_unfusable && !peer_tcp->tcp_unfusable &&
256 	    !tcp_loopback_needs_ip(tcp, ns) &&
257 	    !tcp_loopback_needs_ip(peer_tcp, ns) &&
258 	    tcp->tcp_kssl_ent == NULL &&
259 	    !IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN, ipst)) {
260 		mblk_t *mp;
261 		struct stroptions *stropt;
262 		queue_t *peer_rq = peer_tcp->tcp_rq;
263 
264 		ASSERT(!TCP_IS_DETACHED(peer_tcp) && peer_rq != NULL);
265 		ASSERT(tcp->tcp_fused_sigurg_mp == NULL);
266 		ASSERT(peer_tcp->tcp_fused_sigurg_mp == NULL);
267 		ASSERT(tcp->tcp_kssl_ctx == NULL);
268 
269 		/*
270 		 * We need to drain data on both endpoints during unfuse.
271 		 * If we need to send up SIGURG at the time of draining,
272 		 * we want to be sure that an mblk is readily available.
273 		 * This is why we pre-allocate the M_PCSIG mblks for both
274 		 * endpoints which will only be used during/after unfuse.
275 		 */
276 		if ((mp = allocb(1, BPRI_HI)) == NULL)
277 			goto failed;
278 
279 		tcp->tcp_fused_sigurg_mp = mp;
280 
281 		if ((mp = allocb(1, BPRI_HI)) == NULL)
282 			goto failed;
283 
284 		peer_tcp->tcp_fused_sigurg_mp = mp;
285 
286 		/* Allocate M_SETOPTS mblk */
287 		if ((mp = allocb(sizeof (*stropt), BPRI_HI)) == NULL)
288 			goto failed;
289 
290 		/* If either tcp or peer_tcp sodirect enabled then disable */
291 		if (tcp->tcp_sodirect != NULL) {
292 			mutex_enter(tcp->tcp_sodirect->sod_lockp);
293 			SOD_DISABLE(tcp->tcp_sodirect);
294 			mutex_exit(tcp->tcp_sodirect->sod_lockp);
295 			tcp->tcp_sodirect = NULL;
296 		}
297 		if (peer_tcp->tcp_sodirect != NULL) {
298 			mutex_enter(peer_tcp->tcp_sodirect->sod_lockp);
299 			SOD_DISABLE(peer_tcp->tcp_sodirect);
300 			mutex_exit(peer_tcp->tcp_sodirect->sod_lockp);
301 			peer_tcp->tcp_sodirect = NULL;
302 		}
303 
304 		/* Fuse both endpoints */
305 		peer_tcp->tcp_loopback_peer = tcp;
306 		tcp->tcp_loopback_peer = peer_tcp;
307 		peer_tcp->tcp_fused = tcp->tcp_fused = B_TRUE;
308 
309 		/*
310 		 * We never use regular tcp paths in fusion and should
311 		 * therefore clear tcp_unsent on both endpoints.  Having
312 		 * them set to non-zero values means asking for trouble
313 		 * especially after unfuse, where we may end up sending
314 		 * through regular tcp paths which expect xmit_list and
315 		 * friends to be correctly setup.
316 		 */
317 		peer_tcp->tcp_unsent = tcp->tcp_unsent = 0;
318 
319 		tcp_timers_stop(tcp);
320 		tcp_timers_stop(peer_tcp);
321 
322 		/*
323 		 * At this point we are a detached eager tcp and therefore
324 		 * don't have a queue assigned to us until accept happens.
325 		 * In the mean time the peer endpoint may immediately send
326 		 * us data as soon as fusion is finished, and we need to be
327 		 * able to flow control it in case it sends down huge amount
328 		 * of data while we're still detached.  To prevent that we
329 		 * inherit the listener's q_hiwat value; this is temporary
330 		 * since we'll repeat the process in tcp_accept_finish().
331 		 */
332 		(void) tcp_fuse_set_rcv_hiwat(tcp,
333 		    tcp->tcp_saved_listener->tcp_rq->q_hiwat);
334 
335 		/*
336 		 * Set the stream head's write offset value to zero since we
337 		 * won't be needing any room for TCP/IP headers; tell it to
338 		 * not break up the writes (this would reduce the amount of
339 		 * work done by kmem); and configure our receive buffer.
340 		 * Note that we can only do this for the active connect tcp
341 		 * since our eager is still detached; it will be dealt with
342 		 * later in tcp_accept_finish().
343 		 */
344 		DB_TYPE(mp) = M_SETOPTS;
345 		mp->b_wptr += sizeof (*stropt);
346 
347 		stropt = (struct stroptions *)mp->b_rptr;
348 		stropt->so_flags = SO_MAXBLK | SO_WROFF | SO_HIWAT;
349 		stropt->so_maxblk = tcp_maxpsz_set(peer_tcp, B_FALSE);
350 		stropt->so_wroff = 0;
351 
352 		/*
353 		 * Record the stream head's high water mark for
354 		 * peer endpoint; this is used for flow-control
355 		 * purposes in tcp_fuse_output().
356 		 */
357 		stropt->so_hiwat = tcp_fuse_set_rcv_hiwat(peer_tcp,
358 		    peer_rq->q_hiwat);
359 
360 		/* Send the options up */
361 		putnext(peer_rq, mp);
362 	} else {
363 		TCP_STAT(tcps, tcp_fusion_unqualified);
364 	}
365 	CONN_DEC_REF(peer_connp);
366 	return;
367 
368 failed:
369 	if (tcp->tcp_fused_sigurg_mp != NULL) {
370 		freeb(tcp->tcp_fused_sigurg_mp);
371 		tcp->tcp_fused_sigurg_mp = NULL;
372 	}
373 	if (peer_tcp->tcp_fused_sigurg_mp != NULL) {
374 		freeb(peer_tcp->tcp_fused_sigurg_mp);
375 		peer_tcp->tcp_fused_sigurg_mp = NULL;
376 	}
377 	CONN_DEC_REF(peer_connp);
378 }
379 
380 /*
381  * Unfuse a previously-fused pair of tcp loopback endpoints.
382  */
383 void
384 tcp_unfuse(tcp_t *tcp)
385 {
386 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
387 
388 	ASSERT(tcp->tcp_fused && peer_tcp != NULL);
389 	ASSERT(peer_tcp->tcp_fused && peer_tcp->tcp_loopback_peer == tcp);
390 	ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp);
391 	ASSERT(tcp->tcp_unsent == 0 && peer_tcp->tcp_unsent == 0);
392 	ASSERT(tcp->tcp_fused_sigurg_mp != NULL);
393 	ASSERT(peer_tcp->tcp_fused_sigurg_mp != NULL);
394 
395 	/*
396 	 * We disable synchronous streams, drain any queued data and
397 	 * clear tcp_direct_sockfs.  The synchronous streams entry
398 	 * points will become no-ops after this point.
399 	 */
400 	tcp_fuse_disable_pair(tcp, B_TRUE);
401 
402 	/*
403 	 * Update th_seq and th_ack in the header template
404 	 */
405 	U32_TO_ABE32(tcp->tcp_snxt, tcp->tcp_tcph->th_seq);
406 	U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack);
407 	U32_TO_ABE32(peer_tcp->tcp_snxt, peer_tcp->tcp_tcph->th_seq);
408 	U32_TO_ABE32(peer_tcp->tcp_rnxt, peer_tcp->tcp_tcph->th_ack);
409 
410 	/* Unfuse the endpoints */
411 	peer_tcp->tcp_fused = tcp->tcp_fused = B_FALSE;
412 	peer_tcp->tcp_loopback_peer = tcp->tcp_loopback_peer = NULL;
413 }
414 
415 /*
416  * Fusion output routine for urgent data.  This routine is called by
417  * tcp_fuse_output() for handling non-M_DATA mblks.
418  */
419 void
420 tcp_fuse_output_urg(tcp_t *tcp, mblk_t *mp)
421 {
422 	mblk_t *mp1;
423 	struct T_exdata_ind *tei;
424 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
425 	mblk_t *head, *prev_head = NULL;
426 	tcp_stack_t	*tcps = tcp->tcp_tcps;
427 
428 	ASSERT(tcp->tcp_fused);
429 	ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp);
430 	ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO);
431 	ASSERT(mp->b_cont != NULL && DB_TYPE(mp->b_cont) == M_DATA);
432 	ASSERT(MBLKL(mp) >= sizeof (*tei) && MBLKL(mp->b_cont) > 0);
433 
434 	/*
435 	 * Urgent data arrives in the form of T_EXDATA_REQ from above.
436 	 * Each occurence denotes a new urgent pointer.  For each new
437 	 * urgent pointer we signal (SIGURG) the receiving app to indicate
438 	 * that it needs to go into urgent mode.  This is similar to the
439 	 * urgent data handling in the regular tcp.  We don't need to keep
440 	 * track of where the urgent pointer is, because each T_EXDATA_REQ
441 	 * "advances" the urgent pointer for us.
442 	 *
443 	 * The actual urgent data carried by T_EXDATA_REQ is then prepended
444 	 * by a T_EXDATA_IND before being enqueued behind any existing data
445 	 * destined for the receiving app.  There is only a single urgent
446 	 * pointer (out-of-band mark) for a given tcp.  If the new urgent
447 	 * data arrives before the receiving app reads some existing urgent
448 	 * data, the previous marker is lost.  This behavior is emulated
449 	 * accordingly below, by removing any existing T_EXDATA_IND messages
450 	 * and essentially converting old urgent data into non-urgent.
451 	 */
452 	ASSERT(tcp->tcp_valid_bits & TCP_URG_VALID);
453 	/* Let sender get out of urgent mode */
454 	tcp->tcp_valid_bits &= ~TCP_URG_VALID;
455 
456 	/*
457 	 * This flag indicates that a signal needs to be sent up.
458 	 * This flag will only get cleared once SIGURG is delivered and
459 	 * is not affected by the tcp_fused flag -- delivery will still
460 	 * happen even after an endpoint is unfused, to handle the case
461 	 * where the sending endpoint immediately closes/unfuses after
462 	 * sending urgent data and the accept is not yet finished.
463 	 */
464 	peer_tcp->tcp_fused_sigurg = B_TRUE;
465 
466 	/* Reuse T_EXDATA_REQ mblk for T_EXDATA_IND */
467 	DB_TYPE(mp) = M_PROTO;
468 	tei = (struct T_exdata_ind *)mp->b_rptr;
469 	tei->PRIM_type = T_EXDATA_IND;
470 	tei->MORE_flag = 0;
471 	mp->b_wptr = (uchar_t *)&tei[1];
472 
473 	TCP_STAT(tcps, tcp_fusion_urg);
474 	BUMP_MIB(&tcps->tcps_mib, tcpOutUrg);
475 
476 	head = peer_tcp->tcp_rcv_list;
477 	while (head != NULL) {
478 		/*
479 		 * Remove existing T_EXDATA_IND, keep the data which follows
480 		 * it and relink our list.  Note that we don't modify the
481 		 * tcp_rcv_last_tail since it never points to T_EXDATA_IND.
482 		 */
483 		if (DB_TYPE(head) != M_DATA) {
484 			mp1 = head;
485 
486 			ASSERT(DB_TYPE(mp1->b_cont) == M_DATA);
487 			head = mp1->b_cont;
488 			mp1->b_cont = NULL;
489 			head->b_next = mp1->b_next;
490 			mp1->b_next = NULL;
491 			if (prev_head != NULL)
492 				prev_head->b_next = head;
493 			if (peer_tcp->tcp_rcv_list == mp1)
494 				peer_tcp->tcp_rcv_list = head;
495 			if (peer_tcp->tcp_rcv_last_head == mp1)
496 				peer_tcp->tcp_rcv_last_head = head;
497 			freeb(mp1);
498 		}
499 		prev_head = head;
500 		head = head->b_next;
501 	}
502 }
503 
504 /*
505  * Fusion output routine, called by tcp_output() and tcp_wput_proto().
506  * If we are modifying any member that can be changed outside the squeue,
507  * like tcp_flow_stopped, we need to take tcp_non_sq_lock.
508  */
509 boolean_t
510 tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size)
511 {
512 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
513 	uint_t max_unread;
514 	boolean_t flow_stopped, peer_data_queued = B_FALSE;
515 	boolean_t urgent = (DB_TYPE(mp) != M_DATA);
516 	mblk_t *mp1 = mp;
517 	ill_t *ilp, *olp;
518 	ipif_t *iifp, *oifp;
519 	ipha_t *ipha;
520 	ip6_t *ip6h;
521 	tcph_t *tcph;
522 	uint_t ip_hdr_len;
523 	uint32_t seq;
524 	uint32_t recv_size = send_size;
525 	tcp_stack_t	*tcps = tcp->tcp_tcps;
526 	netstack_t	*ns = tcps->tcps_netstack;
527 	ip_stack_t	*ipst = ns->netstack_ip;
528 
529 	ASSERT(tcp->tcp_fused);
530 	ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp);
531 	ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp);
532 	ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_PROTO ||
533 	    DB_TYPE(mp) == M_PCPROTO);
534 
535 
536 	/* If this connection requires IP, unfuse and use regular path */
537 	if (tcp_loopback_needs_ip(tcp, ns) ||
538 	    tcp_loopback_needs_ip(peer_tcp, ns) ||
539 	    IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN, ipst)) {
540 		TCP_STAT(tcps, tcp_fusion_aborted);
541 		goto unfuse;
542 	}
543 
544 	if (send_size == 0) {
545 		freemsg(mp);
546 		return (B_TRUE);
547 	}
548 	max_unread = peer_tcp->tcp_fuse_rcv_unread_hiwater;
549 
550 	/*
551 	 * Handle urgent data; we either send up SIGURG to the peer now
552 	 * or do it later when we drain, in case the peer is detached
553 	 * or if we're short of memory for M_PCSIG mblk.
554 	 */
555 	if (urgent) {
556 		/*
557 		 * We stop synchronous streams when we have urgent data
558 		 * queued to prevent tcp_fuse_rrw() from pulling it.  If
559 		 * for some reasons the urgent data can't be delivered
560 		 * below, synchronous streams will remain stopped until
561 		 * someone drains the tcp_rcv_list.
562 		 */
563 		TCP_FUSE_SYNCSTR_PLUG_DRAIN(peer_tcp);
564 		tcp_fuse_output_urg(tcp, mp);
565 
566 		mp1 = mp->b_cont;
567 	}
568 
569 	if (tcp->tcp_ipversion == IPV4_VERSION &&
570 	    (HOOKS4_INTERESTED_LOOPBACK_IN(ipst) ||
571 	    HOOKS4_INTERESTED_LOOPBACK_OUT(ipst)) ||
572 	    tcp->tcp_ipversion == IPV6_VERSION &&
573 	    (HOOKS6_INTERESTED_LOOPBACK_IN(ipst) ||
574 	    HOOKS6_INTERESTED_LOOPBACK_OUT(ipst))) {
575 		/*
576 		 * Build ip and tcp header to satisfy FW_HOOKS.
577 		 * We only build it when any hook is present.
578 		 */
579 		if ((mp1 = tcp_xmit_mp(tcp, mp1, tcp->tcp_mss, NULL, NULL,
580 		    tcp->tcp_snxt, B_TRUE, NULL, B_FALSE)) == NULL)
581 			/* If tcp_xmit_mp fails, use regular path */
582 			goto unfuse;
583 
584 		/*
585 		 * The ipif and ill can be safely referenced under the
586 		 * protection of conn_lock - see head of function comment for
587 		 * conn_get_held_ipif(). It is necessary to check that both
588 		 * the ipif and ill can be looked up (i.e. not condemned). If
589 		 * not, bail out and unfuse this connection.
590 		 */
591 		mutex_enter(&peer_tcp->tcp_connp->conn_lock);
592 		if ((peer_tcp->tcp_connp->conn_ire_cache == NULL) ||
593 		    (peer_tcp->tcp_connp->conn_ire_cache->ire_marks &
594 		    IRE_MARK_CONDEMNED) ||
595 		    ((oifp = peer_tcp->tcp_connp->conn_ire_cache->ire_ipif)
596 		    == NULL) ||
597 		    (!IPIF_CAN_LOOKUP(oifp)) ||
598 		    ((olp = oifp->ipif_ill) == NULL) ||
599 		    (ill_check_and_refhold(olp) != 0)) {
600 			mutex_exit(&peer_tcp->tcp_connp->conn_lock);
601 			goto unfuse;
602 		}
603 		mutex_exit(&peer_tcp->tcp_connp->conn_lock);
604 
605 		/* PFHooks: LOOPBACK_OUT */
606 		if (tcp->tcp_ipversion == IPV4_VERSION) {
607 			ipha = (ipha_t *)mp1->b_rptr;
608 
609 			DTRACE_PROBE4(ip4__loopback__out__start,
610 			    ill_t *, NULL, ill_t *, olp,
611 			    ipha_t *, ipha, mblk_t *, mp1);
612 			FW_HOOKS(ipst->ips_ip4_loopback_out_event,
613 			    ipst->ips_ipv4firewall_loopback_out,
614 			    NULL, olp, ipha, mp1, mp1, 0, ipst);
615 			DTRACE_PROBE1(ip4__loopback__out__end, mblk_t *, mp1);
616 		} else {
617 			ip6h = (ip6_t *)mp1->b_rptr;
618 
619 			DTRACE_PROBE4(ip6__loopback__out__start,
620 			    ill_t *, NULL, ill_t *, olp,
621 			    ip6_t *, ip6h, mblk_t *, mp1);
622 			FW_HOOKS6(ipst->ips_ip6_loopback_out_event,
623 			    ipst->ips_ipv6firewall_loopback_out,
624 			    NULL, olp, ip6h, mp1, mp1, 0, ipst);
625 			DTRACE_PROBE1(ip6__loopback__out__end, mblk_t *, mp1);
626 		}
627 		ill_refrele(olp);
628 
629 		if (mp1 == NULL)
630 			goto unfuse;
631 
632 		/*
633 		 * The ipif and ill can be safely referenced under the
634 		 * protection of conn_lock - see head of function comment for
635 		 * conn_get_held_ipif(). It is necessary to check that both
636 		 * the ipif and ill can be looked up (i.e. not condemned). If
637 		 * not, bail out and unfuse this connection.
638 		 */
639 		mutex_enter(&tcp->tcp_connp->conn_lock);
640 		if ((tcp->tcp_connp->conn_ire_cache == NULL) ||
641 		    (tcp->tcp_connp->conn_ire_cache->ire_marks &
642 		    IRE_MARK_CONDEMNED) ||
643 		    ((iifp = tcp->tcp_connp->conn_ire_cache->ire_ipif)
644 		    == NULL) ||
645 		    (!IPIF_CAN_LOOKUP(iifp)) ||
646 		    ((ilp = iifp->ipif_ill) == NULL) ||
647 		    (ill_check_and_refhold(ilp) != 0)) {
648 			mutex_exit(&tcp->tcp_connp->conn_lock);
649 			goto unfuse;
650 		}
651 		mutex_exit(&tcp->tcp_connp->conn_lock);
652 
653 		/* PFHooks: LOOPBACK_IN */
654 		if (tcp->tcp_ipversion == IPV4_VERSION) {
655 			DTRACE_PROBE4(ip4__loopback__in__start,
656 			    ill_t *, ilp, ill_t *, NULL,
657 			    ipha_t *, ipha, mblk_t *, mp1);
658 			FW_HOOKS(ipst->ips_ip4_loopback_in_event,
659 			    ipst->ips_ipv4firewall_loopback_in,
660 			    ilp, NULL, ipha, mp1, mp1, 0, ipst);
661 			DTRACE_PROBE1(ip4__loopback__in__end, mblk_t *, mp1);
662 			ill_refrele(ilp);
663 			if (mp1 == NULL)
664 				goto unfuse;
665 
666 			ip_hdr_len = IPH_HDR_LENGTH(ipha);
667 		} else {
668 			DTRACE_PROBE4(ip6__loopback__in__start,
669 			    ill_t *, ilp, ill_t *, NULL,
670 			    ip6_t *, ip6h, mblk_t *, mp1);
671 			FW_HOOKS6(ipst->ips_ip6_loopback_in_event,
672 			    ipst->ips_ipv6firewall_loopback_in,
673 			    ilp, NULL, ip6h, mp1, mp1, 0, ipst);
674 			DTRACE_PROBE1(ip6__loopback__in__end, mblk_t *, mp1);
675 			ill_refrele(ilp);
676 			if (mp1 == NULL)
677 				goto unfuse;
678 
679 			ip_hdr_len = ip_hdr_length_v6(mp1, ip6h);
680 		}
681 
682 		/* Data length might be changed by FW_HOOKS */
683 		tcph = (tcph_t *)&mp1->b_rptr[ip_hdr_len];
684 		seq = ABE32_TO_U32(tcph->th_seq);
685 		recv_size += seq - tcp->tcp_snxt;
686 
687 		/*
688 		 * The message duplicated by tcp_xmit_mp is freed.
689 		 * Note: the original message passed in remains unchanged.
690 		 */
691 		freemsg(mp1);
692 	}
693 
694 	mutex_enter(&peer_tcp->tcp_non_sq_lock);
695 	/*
696 	 * Wake up and signal the peer; it is okay to do this before
697 	 * enqueueing because we are holding the lock.  One of the
698 	 * advantages of synchronous streams is the ability for us to
699 	 * find out when the application performs a read on the socket,
700 	 * by way of tcp_fuse_rrw() entry point being called.  Every
701 	 * data that gets enqueued onto the receiver is treated as if
702 	 * it has arrived at the receiving endpoint, thus generating
703 	 * SIGPOLL/SIGIO for asynchronous socket just as in the strrput()
704 	 * case.  However, we only wake up the application when necessary,
705 	 * i.e. during the first enqueue.  When tcp_fuse_rrw() is called
706 	 * it will send everything upstream.
707 	 */
708 	if (peer_tcp->tcp_direct_sockfs && !urgent &&
709 	    !TCP_IS_DETACHED(peer_tcp)) {
710 		/* Update poll events and send SIGPOLL/SIGIO if necessary */
711 		STR_WAKEUP_SENDSIG(STREAM(peer_tcp->tcp_rq),
712 		    peer_tcp->tcp_rcv_list);
713 	}
714 
715 	/*
716 	 * Enqueue data into the peer's receive list; we may or may not
717 	 * drain the contents depending on the conditions below.
718 	 */
719 	tcp_rcv_enqueue(peer_tcp, mp, recv_size);
720 
721 	/* In case it wrapped around and also to keep it constant */
722 	peer_tcp->tcp_rwnd += recv_size;
723 	/*
724 	 * We increase the peer's unread message count here whilst still
725 	 * holding it's tcp_non_sq_lock. This ensures that the increment
726 	 * occurs in the same lock acquisition perimeter as the enqueue.
727 	 * Depending on lock hierarchy, we can release these locks which
728 	 * creates a window in which we can race with tcp_fuse_rrw()
729 	 */
730 	peer_tcp->tcp_fuse_rcv_unread_cnt++;
731 
732 	/*
733 	 * Exercise flow-control when needed; we will get back-enabled
734 	 * in either tcp_accept_finish(), tcp_unfuse(), or tcp_fuse_rrw().
735 	 * If tcp_direct_sockfs is on or if the peer endpoint is detached,
736 	 * we emulate streams flow control by checking the peer's queue
737 	 * size and high water mark; otherwise we simply use canputnext()
738 	 * to decide if we need to stop our flow.
739 	 *
740 	 * The outstanding unread data block check does not apply for a
741 	 * detached receiver; this is to avoid unnecessary blocking of the
742 	 * sender while the accept is currently in progress and is quite
743 	 * similar to the regular tcp.
744 	 */
745 	if (TCP_IS_DETACHED(peer_tcp) || max_unread == 0)
746 		max_unread = UINT_MAX;
747 
748 	/*
749 	 * Since we are accessing our tcp_flow_stopped and might modify it,
750 	 * we need to take tcp->tcp_non_sq_lock. The lock for the highest
751 	 * address is held first. Dropping peer_tcp->tcp_non_sq_lock should
752 	 * not be an issue here since we are within the squeue and the peer
753 	 * won't disappear.
754 	 */
755 	if (tcp > peer_tcp) {
756 		mutex_exit(&peer_tcp->tcp_non_sq_lock);
757 		mutex_enter(&tcp->tcp_non_sq_lock);
758 		mutex_enter(&peer_tcp->tcp_non_sq_lock);
759 	} else {
760 		mutex_enter(&tcp->tcp_non_sq_lock);
761 	}
762 	flow_stopped = tcp->tcp_flow_stopped;
763 	if (((peer_tcp->tcp_direct_sockfs || TCP_IS_DETACHED(peer_tcp)) &&
764 	    (peer_tcp->tcp_rcv_cnt >= peer_tcp->tcp_fuse_rcv_hiwater ||
765 	    peer_tcp->tcp_fuse_rcv_unread_cnt >= max_unread)) ||
766 	    (!peer_tcp->tcp_direct_sockfs && !TCP_IS_DETACHED(peer_tcp) &&
767 	    !canputnext(peer_tcp->tcp_rq))) {
768 		peer_data_queued = B_TRUE;
769 	}
770 
771 	if (!flow_stopped && (peer_data_queued ||
772 	    (TCP_UNSENT_BYTES(tcp) >= tcp->tcp_xmit_hiwater))) {
773 		tcp_setqfull(tcp);
774 		flow_stopped = B_TRUE;
775 		TCP_STAT(tcps, tcp_fusion_flowctl);
776 		DTRACE_PROBE4(tcp__fuse__output__flowctl, tcp_t *, tcp,
777 		    uint_t, send_size, uint_t, peer_tcp->tcp_rcv_cnt,
778 		    uint_t, peer_tcp->tcp_fuse_rcv_unread_cnt);
779 	} else if (flow_stopped && !peer_data_queued &&
780 	    (TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater)) {
781 		tcp_clrqfull(tcp);
782 		TCP_STAT(tcps, tcp_fusion_backenabled);
783 		flow_stopped = B_FALSE;
784 	}
785 	mutex_exit(&tcp->tcp_non_sq_lock);
786 
787 	/*
788 	 * If we are in synchronous streams mode and the peer read queue is
789 	 * not full then schedule a push timer if one is not scheduled
790 	 * already. This is needed for applications which use MSG_PEEK to
791 	 * determine the number of bytes available before issuing a 'real'
792 	 * read. It also makes flow control more deterministic, particularly
793 	 * for smaller message sizes.
794 	 */
795 	if (!urgent && peer_tcp->tcp_direct_sockfs &&
796 	    peer_tcp->tcp_push_tid == 0 && !TCP_IS_DETACHED(peer_tcp) &&
797 	    canputnext(peer_tcp->tcp_rq)) {
798 		peer_tcp->tcp_push_tid = TCP_TIMER(peer_tcp, tcp_push_timer,
799 		    MSEC_TO_TICK(tcps->tcps_push_timer_interval));
800 	}
801 	mutex_exit(&peer_tcp->tcp_non_sq_lock);
802 	ipst->ips_loopback_packets++;
803 	tcp->tcp_last_sent_len = send_size;
804 
805 	/* Need to adjust the following SNMP MIB-related variables */
806 	tcp->tcp_snxt += send_size;
807 	tcp->tcp_suna = tcp->tcp_snxt;
808 	peer_tcp->tcp_rnxt += recv_size;
809 	peer_tcp->tcp_rack = peer_tcp->tcp_rnxt;
810 
811 	BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs);
812 	UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, send_size);
813 
814 	BUMP_MIB(&tcps->tcps_mib, tcpInSegs);
815 	BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs);
816 	UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, send_size);
817 
818 	BUMP_LOCAL(tcp->tcp_obsegs);
819 	BUMP_LOCAL(peer_tcp->tcp_ibsegs);
820 
821 	DTRACE_PROBE2(tcp__fuse__output, tcp_t *, tcp, uint_t, send_size);
822 
823 	if (!TCP_IS_DETACHED(peer_tcp)) {
824 		/*
825 		 * Drain the peer's receive queue it has urgent data or if
826 		 * we're not flow-controlled.  There is no need for draining
827 		 * normal data when tcp_direct_sockfs is on because the peer
828 		 * will pull the data via tcp_fuse_rrw().
829 		 */
830 		if (urgent || (!flow_stopped && !peer_tcp->tcp_direct_sockfs)) {
831 			ASSERT(peer_tcp->tcp_rcv_list != NULL);
832 			/*
833 			 * For TLI-based streams, a thread in tcp_accept_swap()
834 			 * can race with us.  That thread will ensure that the
835 			 * correct peer_tcp->tcp_rq is globally visible before
836 			 * peer_tcp->tcp_detached is visible as clear, but we
837 			 * must also ensure that the load of tcp_rq cannot be
838 			 * reordered to be before the tcp_detached check.
839 			 */
840 			membar_consumer();
841 			(void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp,
842 			    NULL);
843 			/*
844 			 * If synchronous streams was stopped above due
845 			 * to the presence of urgent data, re-enable it.
846 			 */
847 			if (urgent)
848 				TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(peer_tcp);
849 		}
850 	}
851 	return (B_TRUE);
852 unfuse:
853 	tcp_unfuse(tcp);
854 	return (B_FALSE);
855 }
856 
857 /*
858  * This routine gets called to deliver data upstream on a fused or
859  * previously fused tcp loopback endpoint; the latter happens only
860  * when there is a pending SIGURG signal plus urgent data that can't
861  * be sent upstream in the past.
862  */
863 boolean_t
864 tcp_fuse_rcv_drain(queue_t *q, tcp_t *tcp, mblk_t **sigurg_mpp)
865 {
866 	mblk_t *mp;
867 #ifdef DEBUG
868 	uint_t cnt = 0;
869 #endif
870 	tcp_stack_t	*tcps = tcp->tcp_tcps;
871 	tcp_t		*peer_tcp = tcp->tcp_loopback_peer;
872 	boolean_t	sd_rd_eof = B_FALSE;
873 
874 	ASSERT(tcp->tcp_loopback);
875 	ASSERT(tcp->tcp_fused || tcp->tcp_fused_sigurg);
876 	ASSERT(!tcp->tcp_fused || tcp->tcp_loopback_peer != NULL);
877 	ASSERT(sigurg_mpp != NULL || tcp->tcp_fused);
878 
879 	/* No need for the push timer now, in case it was scheduled */
880 	if (tcp->tcp_push_tid != 0) {
881 		(void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid);
882 		tcp->tcp_push_tid = 0;
883 	}
884 	/*
885 	 * If there's urgent data sitting in receive list and we didn't
886 	 * get a chance to send up a SIGURG signal, make sure we send
887 	 * it first before draining in order to ensure that SIOCATMARK
888 	 * works properly.
889 	 */
890 	if (tcp->tcp_fused_sigurg) {
891 		/*
892 		 * sigurg_mpp is normally NULL, i.e. when we're still
893 		 * fused and didn't get here because of tcp_unfuse().
894 		 * In this case try hard to allocate the M_PCSIG mblk.
895 		 */
896 		if (sigurg_mpp == NULL &&
897 		    (mp = allocb(1, BPRI_HI)) == NULL &&
898 		    (mp = allocb_tryhard(1)) == NULL) {
899 			/* Alloc failed; try again next time */
900 			tcp->tcp_push_tid = TCP_TIMER(tcp, tcp_push_timer,
901 			    MSEC_TO_TICK(tcps->tcps_push_timer_interval));
902 			return (B_TRUE);
903 		} else if (sigurg_mpp != NULL) {
904 			/*
905 			 * Use the supplied M_PCSIG mblk; it means we're
906 			 * either unfused or in the process of unfusing,
907 			 * and the drain must happen now.
908 			 */
909 			mp = *sigurg_mpp;
910 			*sigurg_mpp = NULL;
911 		}
912 		ASSERT(mp != NULL);
913 
914 		tcp->tcp_fused_sigurg = B_FALSE;
915 		/* Send up the signal */
916 		DB_TYPE(mp) = M_PCSIG;
917 		*mp->b_wptr++ = (uchar_t)SIGURG;
918 		putnext(q, mp);
919 		/*
920 		 * Let the regular tcp_rcv_drain() path handle
921 		 * draining the data if we're no longer fused.
922 		 */
923 		if (!tcp->tcp_fused)
924 			return (B_FALSE);
925 	}
926 
927 	/*
928 	 * In the synchronous streams case, we generate SIGPOLL/SIGIO for
929 	 * each M_DATA that gets enqueued onto the receiver.  At this point
930 	 * we are about to drain any queued data via putnext().  In order
931 	 * to avoid extraneous signal generation from strrput(), we set
932 	 * STRGETINPROG flag at the stream head prior to the draining and
933 	 * restore it afterwards.  This masks out signal generation only
934 	 * for M_DATA messages and does not affect urgent data. We only do
935 	 * this if the STREOF flag is not set which can happen if the
936 	 * application shuts down the read side of a stream. In this case
937 	 * we simply free these messages to approximate the flushq behavior
938 	 * which normally occurs when STREOF is on the stream head read queue.
939 	 */
940 	if (tcp->tcp_direct_sockfs)
941 		sd_rd_eof = strrput_sig(q, B_FALSE);
942 
943 	/* Drain the data */
944 	while ((mp = tcp->tcp_rcv_list) != NULL) {
945 		tcp->tcp_rcv_list = mp->b_next;
946 		mp->b_next = NULL;
947 #ifdef DEBUG
948 		cnt += msgdsize(mp);
949 #endif
950 		if (sd_rd_eof) {
951 			freemsg(mp);
952 		} else {
953 			putnext(q, mp);
954 			TCP_STAT(tcps, tcp_fusion_putnext);
955 		}
956 	}
957 
958 	if (tcp->tcp_direct_sockfs && !sd_rd_eof)
959 		(void) strrput_sig(q, B_TRUE);
960 
961 	ASSERT(cnt == tcp->tcp_rcv_cnt);
962 	tcp->tcp_rcv_last_head = NULL;
963 	tcp->tcp_rcv_last_tail = NULL;
964 	tcp->tcp_rcv_cnt = 0;
965 	tcp->tcp_fuse_rcv_unread_cnt = 0;
966 	tcp->tcp_rwnd = q->q_hiwat;
967 
968 	if (peer_tcp->tcp_flow_stopped && (TCP_UNSENT_BYTES(peer_tcp) <=
969 	    peer_tcp->tcp_xmit_lowater)) {
970 		tcp_clrqfull(peer_tcp);
971 		TCP_STAT(tcps, tcp_fusion_backenabled);
972 	}
973 
974 	return (B_TRUE);
975 }
976 
977 /*
978  * Synchronous stream entry point for sockfs to retrieve
979  * data directly from tcp_rcv_list.
980  * tcp_fuse_rrw() might end up modifying the peer's tcp_flow_stopped,
981  * for which it  must take the tcp_non_sq_lock of the peer as well
982  * making any change. The order of taking the locks is based on
983  * the TCP pointer itself. Before we get the peer we need to take
984  * our tcp_non_sq_lock so that the peer doesn't disappear. However,
985  * we cannot drop the lock if we have to grab the peer's lock (because
986  * of ordering), since the peer might disappear in the interim. So,
987  * we take our tcp_non_sq_lock, get the peer, increment the ref on the
988  * peer's conn, drop all the locks and then take the tcp_non_sq_lock in the
989  * desired order. Incrementing the conn ref on the peer means that the
990  * peer won't disappear when we drop our tcp_non_sq_lock.
991  */
992 int
993 tcp_fuse_rrw(queue_t *q, struiod_t *dp)
994 {
995 	tcp_t *tcp = Q_TO_CONN(q)->conn_tcp;
996 	mblk_t *mp;
997 	tcp_t *peer_tcp;
998 	tcp_stack_t	*tcps = tcp->tcp_tcps;
999 
1000 	mutex_enter(&tcp->tcp_non_sq_lock);
1001 
1002 	/*
1003 	 * If tcp_fuse_syncstr_plugged is set, then another thread is moving
1004 	 * the underlying data to the stream head.  We need to wait until it's
1005 	 * done, then return EBUSY so that strget() will dequeue data from the
1006 	 * stream head to ensure data is drained in-order.
1007 	 */
1008 plugged:
1009 	if (tcp->tcp_fuse_syncstr_plugged) {
1010 		do {
1011 			cv_wait(&tcp->tcp_fuse_plugcv, &tcp->tcp_non_sq_lock);
1012 		} while (tcp->tcp_fuse_syncstr_plugged);
1013 
1014 		mutex_exit(&tcp->tcp_non_sq_lock);
1015 		TCP_STAT(tcps, tcp_fusion_rrw_plugged);
1016 		TCP_STAT(tcps, tcp_fusion_rrw_busy);
1017 		return (EBUSY);
1018 	}
1019 
1020 	peer_tcp = tcp->tcp_loopback_peer;
1021 
1022 	/*
1023 	 * If someone had turned off tcp_direct_sockfs or if synchronous
1024 	 * streams is stopped, we return EBUSY.  This causes strget() to
1025 	 * dequeue data from the stream head instead.
1026 	 */
1027 	if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped) {
1028 		mutex_exit(&tcp->tcp_non_sq_lock);
1029 		TCP_STAT(tcps, tcp_fusion_rrw_busy);
1030 		return (EBUSY);
1031 	}
1032 
1033 	/*
1034 	 * Grab lock in order. The highest addressed tcp is locked first.
1035 	 * We don't do this within the tcp_rcv_list check since if we
1036 	 * have to drop the lock, for ordering, then the tcp_rcv_list
1037 	 * could change.
1038 	 */
1039 	if (peer_tcp > tcp) {
1040 		CONN_INC_REF(peer_tcp->tcp_connp);
1041 		mutex_exit(&tcp->tcp_non_sq_lock);
1042 		mutex_enter(&peer_tcp->tcp_non_sq_lock);
1043 		mutex_enter(&tcp->tcp_non_sq_lock);
1044 		/*
1045 		 * This might have changed in the interim
1046 		 * Once read-side tcp_non_sq_lock is dropped above
1047 		 * anything can happen, we need to check all
1048 		 * known conditions again once we reaquire
1049 		 * read-side tcp_non_sq_lock.
1050 		 */
1051 		if (tcp->tcp_fuse_syncstr_plugged) {
1052 			mutex_exit(&peer_tcp->tcp_non_sq_lock);
1053 			CONN_DEC_REF(peer_tcp->tcp_connp);
1054 			goto plugged;
1055 		}
1056 		if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped) {
1057 			mutex_exit(&tcp->tcp_non_sq_lock);
1058 			mutex_exit(&peer_tcp->tcp_non_sq_lock);
1059 			CONN_DEC_REF(peer_tcp->tcp_connp);
1060 			TCP_STAT(tcps, tcp_fusion_rrw_busy);
1061 			return (EBUSY);
1062 		}
1063 		CONN_DEC_REF(peer_tcp->tcp_connp);
1064 	} else {
1065 		mutex_enter(&peer_tcp->tcp_non_sq_lock);
1066 	}
1067 
1068 	if ((mp = tcp->tcp_rcv_list) != NULL) {
1069 
1070 		DTRACE_PROBE3(tcp__fuse__rrw, tcp_t *, tcp,
1071 		    uint32_t, tcp->tcp_rcv_cnt, ssize_t, dp->d_uio.uio_resid);
1072 
1073 		tcp->tcp_rcv_list = NULL;
1074 		TCP_STAT(tcps, tcp_fusion_rrw_msgcnt);
1075 
1076 		/*
1077 		 * At this point nothing should be left in tcp_rcv_list.
1078 		 * The only possible case where we would have a chain of
1079 		 * b_next-linked messages is urgent data, but we wouldn't
1080 		 * be here if that's true since urgent data is delivered
1081 		 * via putnext() and synchronous streams is stopped until
1082 		 * tcp_fuse_rcv_drain() is finished.
1083 		 */
1084 		ASSERT(DB_TYPE(mp) == M_DATA && mp->b_next == NULL);
1085 
1086 		tcp->tcp_rcv_last_head = NULL;
1087 		tcp->tcp_rcv_last_tail = NULL;
1088 		tcp->tcp_rcv_cnt = 0;
1089 		tcp->tcp_fuse_rcv_unread_cnt = 0;
1090 
1091 		if (peer_tcp->tcp_flow_stopped &&
1092 		    (TCP_UNSENT_BYTES(peer_tcp) <=
1093 		    peer_tcp->tcp_xmit_lowater)) {
1094 			tcp_clrqfull(peer_tcp);
1095 			TCP_STAT(tcps, tcp_fusion_backenabled);
1096 		}
1097 	}
1098 	mutex_exit(&peer_tcp->tcp_non_sq_lock);
1099 	/*
1100 	 * Either we just dequeued everything or we get here from sockfs
1101 	 * and have nothing to return; in this case clear RSLEEP.
1102 	 */
1103 	ASSERT(tcp->tcp_rcv_last_head == NULL);
1104 	ASSERT(tcp->tcp_rcv_last_tail == NULL);
1105 	ASSERT(tcp->tcp_rcv_cnt == 0);
1106 	ASSERT(tcp->tcp_fuse_rcv_unread_cnt == 0);
1107 	STR_WAKEUP_CLEAR(STREAM(q));
1108 
1109 	mutex_exit(&tcp->tcp_non_sq_lock);
1110 	dp->d_mp = mp;
1111 	return (0);
1112 }
1113 
1114 /*
1115  * Synchronous stream entry point used by certain ioctls to retrieve
1116  * information about or peek into the tcp_rcv_list.
1117  */
1118 int
1119 tcp_fuse_rinfop(queue_t *q, infod_t *dp)
1120 {
1121 	tcp_t	*tcp = Q_TO_CONN(q)->conn_tcp;
1122 	mblk_t	*mp;
1123 	uint_t	cmd = dp->d_cmd;
1124 	int	res = 0;
1125 	int	error = 0;
1126 	struct stdata *stp = STREAM(q);
1127 
1128 	mutex_enter(&tcp->tcp_non_sq_lock);
1129 	/* If shutdown on read has happened, return nothing */
1130 	mutex_enter(&stp->sd_lock);
1131 	if (stp->sd_flag & STREOF) {
1132 		mutex_exit(&stp->sd_lock);
1133 		goto done;
1134 	}
1135 	mutex_exit(&stp->sd_lock);
1136 
1137 	/*
1138 	 * It is OK not to return an answer if tcp_rcv_list is
1139 	 * currently not accessible.
1140 	 */
1141 	if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped ||
1142 	    tcp->tcp_fuse_syncstr_plugged || (mp = tcp->tcp_rcv_list) == NULL)
1143 		goto done;
1144 
1145 	if (cmd & INFOD_COUNT) {
1146 		/*
1147 		 * We have at least one message and
1148 		 * could return only one at a time.
1149 		 */
1150 		dp->d_count++;
1151 		res |= INFOD_COUNT;
1152 	}
1153 	if (cmd & INFOD_BYTES) {
1154 		/*
1155 		 * Return size of all data messages.
1156 		 */
1157 		dp->d_bytes += tcp->tcp_rcv_cnt;
1158 		res |= INFOD_BYTES;
1159 	}
1160 	if (cmd & INFOD_FIRSTBYTES) {
1161 		/*
1162 		 * Return size of first data message.
1163 		 */
1164 		dp->d_bytes = msgdsize(mp);
1165 		res |= INFOD_FIRSTBYTES;
1166 		dp->d_cmd &= ~INFOD_FIRSTBYTES;
1167 	}
1168 	if (cmd & INFOD_COPYOUT) {
1169 		mblk_t *mp1;
1170 		int n;
1171 
1172 		if (DB_TYPE(mp) == M_DATA) {
1173 			mp1 = mp;
1174 		} else {
1175 			mp1 = mp->b_cont;
1176 			ASSERT(mp1 != NULL);
1177 		}
1178 
1179 		/*
1180 		 * Return data contents of first message.
1181 		 */
1182 		ASSERT(DB_TYPE(mp1) == M_DATA);
1183 		while (mp1 != NULL && dp->d_uiop->uio_resid > 0) {
1184 			n = MIN(dp->d_uiop->uio_resid, MBLKL(mp1));
1185 			if (n != 0 && (error = uiomove((char *)mp1->b_rptr, n,
1186 			    UIO_READ, dp->d_uiop)) != 0) {
1187 				goto done;
1188 			}
1189 			mp1 = mp1->b_cont;
1190 		}
1191 		res |= INFOD_COPYOUT;
1192 		dp->d_cmd &= ~INFOD_COPYOUT;
1193 	}
1194 done:
1195 	mutex_exit(&tcp->tcp_non_sq_lock);
1196 
1197 	dp->d_res |= res;
1198 
1199 	return (error);
1200 }
1201 
1202 /*
1203  * Enable synchronous streams on a fused tcp loopback endpoint.
1204  */
1205 static void
1206 tcp_fuse_syncstr_enable(tcp_t *tcp)
1207 {
1208 	queue_t *rq = tcp->tcp_rq;
1209 	struct stdata *stp = STREAM(rq);
1210 
1211 	/* We can only enable synchronous streams for sockfs mode */
1212 	tcp->tcp_direct_sockfs = tcp->tcp_issocket && do_tcp_direct_sockfs;
1213 
1214 	if (!tcp->tcp_direct_sockfs)
1215 		return;
1216 
1217 	mutex_enter(&stp->sd_lock);
1218 	mutex_enter(QLOCK(rq));
1219 
1220 	/*
1221 	 * We replace our q_qinfo with one that has the qi_rwp entry point.
1222 	 * Clear SR_SIGALLDATA because we generate the equivalent signal(s)
1223 	 * for every enqueued data in tcp_fuse_output().
1224 	 */
1225 	rq->q_qinfo = &tcp_loopback_rinit;
1226 	rq->q_struiot = tcp_loopback_rinit.qi_struiot;
1227 	stp->sd_struiordq = rq;
1228 	stp->sd_rput_opt &= ~SR_SIGALLDATA;
1229 
1230 	mutex_exit(QLOCK(rq));
1231 	mutex_exit(&stp->sd_lock);
1232 }
1233 
1234 /*
1235  * Disable synchronous streams on a fused tcp loopback endpoint.
1236  */
1237 static void
1238 tcp_fuse_syncstr_disable(tcp_t *tcp)
1239 {
1240 	queue_t *rq = tcp->tcp_rq;
1241 	struct stdata *stp = STREAM(rq);
1242 
1243 	if (!tcp->tcp_direct_sockfs)
1244 		return;
1245 
1246 	mutex_enter(&stp->sd_lock);
1247 	mutex_enter(QLOCK(rq));
1248 
1249 	/*
1250 	 * Reset q_qinfo to point to the default tcp entry points.
1251 	 * Also restore SR_SIGALLDATA so that strrput() can generate
1252 	 * the signals again for future M_DATA messages.
1253 	 */
1254 	rq->q_qinfo = &tcp_rinitv4;	/* No open - same as rinitv6 */
1255 	rq->q_struiot = tcp_rinitv4.qi_struiot;
1256 	stp->sd_struiordq = NULL;
1257 	stp->sd_rput_opt |= SR_SIGALLDATA;
1258 	tcp->tcp_direct_sockfs = B_FALSE;
1259 
1260 	mutex_exit(QLOCK(rq));
1261 	mutex_exit(&stp->sd_lock);
1262 }
1263 
1264 /*
1265  * Enable synchronous streams on a pair of fused tcp endpoints.
1266  */
1267 void
1268 tcp_fuse_syncstr_enable_pair(tcp_t *tcp)
1269 {
1270 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
1271 
1272 	ASSERT(tcp->tcp_fused);
1273 	ASSERT(peer_tcp != NULL);
1274 
1275 	tcp_fuse_syncstr_enable(tcp);
1276 	tcp_fuse_syncstr_enable(peer_tcp);
1277 }
1278 
1279 /*
1280  * Used to enable/disable signal generation at the stream head. We already
1281  * generated the signal(s) for these messages when they were enqueued on the
1282  * receiver. We also check if STREOF is set here. If it is, we return false
1283  * and let the caller decide what to do.
1284  */
1285 static boolean_t
1286 strrput_sig(queue_t *q, boolean_t on)
1287 {
1288 	struct stdata *stp = STREAM(q);
1289 
1290 	mutex_enter(&stp->sd_lock);
1291 	if (stp->sd_flag == STREOF) {
1292 		mutex_exit(&stp->sd_lock);
1293 		return (B_TRUE);
1294 	}
1295 	if (on)
1296 		stp->sd_flag &= ~STRGETINPROG;
1297 	else
1298 		stp->sd_flag |= STRGETINPROG;
1299 	mutex_exit(&stp->sd_lock);
1300 
1301 	return (B_FALSE);
1302 }
1303 
1304 /*
1305  * Disable synchronous streams on a pair of fused tcp endpoints and drain
1306  * any queued data; called either during unfuse or upon transitioning from
1307  * a socket to a stream endpoint due to _SIOCSOCKFALLBACK.
1308  */
1309 void
1310 tcp_fuse_disable_pair(tcp_t *tcp, boolean_t unfusing)
1311 {
1312 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
1313 	tcp_stack_t	*tcps = tcp->tcp_tcps;
1314 
1315 	ASSERT(tcp->tcp_fused);
1316 	ASSERT(peer_tcp != NULL);
1317 
1318 	/*
1319 	 * Force any tcp_fuse_rrw() calls to block until we've moved the data
1320 	 * onto the stream head.
1321 	 */
1322 	TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp);
1323 	TCP_FUSE_SYNCSTR_PLUG_DRAIN(peer_tcp);
1324 
1325 	/*
1326 	 * Cancel any pending push timers.
1327 	 */
1328 	if (tcp->tcp_push_tid != 0) {
1329 		(void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid);
1330 		tcp->tcp_push_tid = 0;
1331 	}
1332 	if (peer_tcp->tcp_push_tid != 0) {
1333 		(void) TCP_TIMER_CANCEL(peer_tcp, peer_tcp->tcp_push_tid);
1334 		peer_tcp->tcp_push_tid = 0;
1335 	}
1336 
1337 	/*
1338 	 * Drain any pending data; the detached check is needed because
1339 	 * we may be called as a result of a tcp_unfuse() triggered by
1340 	 * tcp_fuse_output().  Note that in case of a detached tcp, the
1341 	 * draining will happen later after the tcp is unfused.  For non-
1342 	 * urgent data, this can be handled by the regular tcp_rcv_drain().
1343 	 * If we have urgent data sitting in the receive list, we will
1344 	 * need to send up a SIGURG signal first before draining the data.
1345 	 * All of these will be handled by the code in tcp_fuse_rcv_drain()
1346 	 * when called from tcp_rcv_drain().
1347 	 */
1348 	if (!TCP_IS_DETACHED(tcp)) {
1349 		(void) tcp_fuse_rcv_drain(tcp->tcp_rq, tcp,
1350 		    (unfusing ? &tcp->tcp_fused_sigurg_mp : NULL));
1351 	}
1352 	if (!TCP_IS_DETACHED(peer_tcp)) {
1353 		(void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp,
1354 		    (unfusing ? &peer_tcp->tcp_fused_sigurg_mp : NULL));
1355 	}
1356 
1357 	/*
1358 	 * Make all current and future tcp_fuse_rrw() calls fail with EBUSY.
1359 	 * To ensure threads don't sneak past the checks in tcp_fuse_rrw(),
1360 	 * a given stream must be stopped prior to being unplugged (but the
1361 	 * ordering of operations between the streams is unimportant).
1362 	 */
1363 	TCP_FUSE_SYNCSTR_STOP(tcp);
1364 	TCP_FUSE_SYNCSTR_STOP(peer_tcp);
1365 	TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp);
1366 	TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(peer_tcp);
1367 
1368 	/* Lift up any flow-control conditions */
1369 	if (tcp->tcp_flow_stopped) {
1370 		tcp_clrqfull(tcp);
1371 		TCP_STAT(tcps, tcp_fusion_backenabled);
1372 	}
1373 	if (peer_tcp->tcp_flow_stopped) {
1374 		tcp_clrqfull(peer_tcp);
1375 		TCP_STAT(tcps, tcp_fusion_backenabled);
1376 	}
1377 
1378 	/* Disable synchronous streams */
1379 	tcp_fuse_syncstr_disable(tcp);
1380 	tcp_fuse_syncstr_disable(peer_tcp);
1381 }
1382 
1383 /*
1384  * Calculate the size of receive buffer for a fused tcp endpoint.
1385  */
1386 size_t
1387 tcp_fuse_set_rcv_hiwat(tcp_t *tcp, size_t rwnd)
1388 {
1389 	tcp_stack_t	*tcps = tcp->tcp_tcps;
1390 
1391 	ASSERT(tcp->tcp_fused);
1392 
1393 	/* Ensure that value is within the maximum upper bound */
1394 	if (rwnd > tcps->tcps_max_buf)
1395 		rwnd = tcps->tcps_max_buf;
1396 
1397 	/* Obey the absolute minimum tcp receive high water mark */
1398 	if (rwnd < tcps->tcps_sth_rcv_hiwat)
1399 		rwnd = tcps->tcps_sth_rcv_hiwat;
1400 
1401 	/*
1402 	 * Round up to system page size in case SO_RCVBUF is modified
1403 	 * after SO_SNDBUF; the latter is also similarly rounded up.
1404 	 */
1405 	rwnd = P2ROUNDUP_TYPED(rwnd, PAGESIZE, size_t);
1406 	tcp->tcp_fuse_rcv_hiwater = rwnd;
1407 	return (rwnd);
1408 }
1409 
1410 /*
1411  * Calculate the maximum outstanding unread data block for a fused tcp endpoint.
1412  */
1413 int
1414 tcp_fuse_maxpsz_set(tcp_t *tcp)
1415 {
1416 	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
1417 	uint_t sndbuf = tcp->tcp_xmit_hiwater;
1418 	uint_t maxpsz = sndbuf;
1419 
1420 	ASSERT(tcp->tcp_fused);
1421 	ASSERT(peer_tcp != NULL);
1422 	ASSERT(peer_tcp->tcp_fuse_rcv_hiwater != 0);
1423 	/*
1424 	 * In the fused loopback case, we want the stream head to split
1425 	 * up larger writes into smaller chunks for a more accurate flow-
1426 	 * control accounting.  Our maxpsz is half of the sender's send
1427 	 * buffer or the receiver's receive buffer, whichever is smaller.
1428 	 * We round up the buffer to system page size due to the lack of
1429 	 * TCP MSS concept in Fusion.
1430 	 */
1431 	if (maxpsz > peer_tcp->tcp_fuse_rcv_hiwater)
1432 		maxpsz = peer_tcp->tcp_fuse_rcv_hiwater;
1433 	maxpsz = P2ROUNDUP_TYPED(maxpsz, PAGESIZE, uint_t) >> 1;
1434 
1435 	/*
1436 	 * Calculate the peer's limit for the number of outstanding unread
1437 	 * data block.  This is the amount of data blocks that are allowed
1438 	 * to reside in the receiver's queue before the sender gets flow
1439 	 * controlled.  It is used only in the synchronous streams mode as
1440 	 * a way to throttle the sender when it performs consecutive writes
1441 	 * faster than can be read.  The value is derived from SO_SNDBUF in
1442 	 * order to give the sender some control; we divide it with a large
1443 	 * value (16KB) to produce a fairly low initial limit.
1444 	 */
1445 	if (tcp_fusion_rcv_unread_min == 0) {
1446 		/* A value of 0 means that we disable the check */
1447 		peer_tcp->tcp_fuse_rcv_unread_hiwater = 0;
1448 	} else {
1449 		peer_tcp->tcp_fuse_rcv_unread_hiwater =
1450 		    MAX(sndbuf >> 14, tcp_fusion_rcv_unread_min);
1451 	}
1452 	return (maxpsz);
1453 }
1454