xref: /illumos-gate/usr/src/uts/common/fs/sockfs/sockcommon_subr.c (revision d48be21240dfd051b689384ce2b23479d757f2d8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 /*
26  * Copyright 2014, OmniTI Computer Consulting, Inc. All rights reserved.
27  * Copyright 2019 Joyent, Inc.
28  */
29 
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/signal.h>
33 #include <sys/cmn_err.h>
34 
35 #include <sys/stropts.h>
36 #include <sys/socket.h>
37 #include <sys/socketvar.h>
38 #include <sys/sockio.h>
39 #include <sys/strsubr.h>
40 #include <sys/strsun.h>
41 #include <sys/atomic.h>
42 #include <sys/tihdr.h>
43 
44 #include <fs/sockfs/sockcommon.h>
45 #include <fs/sockfs/sockfilter_impl.h>
46 #include <fs/sockfs/socktpi.h>
47 #include <fs/sockfs/sodirect.h>
48 #include <sys/ddi.h>
49 #include <inet/ip.h>
50 #include <sys/time.h>
51 #include <sys/cmn_err.h>
52 
53 #ifdef SOCK_TEST
54 extern int do_useracc;
55 extern clock_t sock_test_timelimit;
56 #endif /* SOCK_TEST */
57 
58 #define	MBLK_PULL_LEN 64
59 uint32_t so_mblk_pull_len = MBLK_PULL_LEN;
60 
61 #ifdef DEBUG
62 boolean_t so_debug_length = B_FALSE;
63 static boolean_t so_check_length(sonode_t *so);
64 #endif
65 
66 static int
67 so_acceptq_dequeue_locked(struct sonode *so, boolean_t dontblock,
68     struct sonode **nsop)
69 {
70 	struct sonode *nso = NULL;
71 
72 	*nsop = NULL;
73 	ASSERT(MUTEX_HELD(&so->so_acceptq_lock));
74 	while ((nso = list_remove_head(&so->so_acceptq_list)) == NULL) {
75 		/*
76 		 * No need to check so_error here, because it is not
77 		 * possible for a listening socket to be reset or otherwise
78 		 * disconnected.
79 		 *
80 		 * So now we just need check if it's ok to wait.
81 		 */
82 		if (dontblock)
83 			return (EWOULDBLOCK);
84 		if (so->so_state & (SS_CLOSING | SS_FALLBACK_PENDING))
85 			return (EINTR);
86 
87 		if (cv_wait_sig_swap(&so->so_acceptq_cv,
88 		    &so->so_acceptq_lock) == 0)
89 			return (EINTR);
90 	}
91 
92 	ASSERT(nso != NULL);
93 	ASSERT(so->so_acceptq_len > 0);
94 	so->so_acceptq_len--;
95 	nso->so_listener = NULL;
96 
97 	*nsop = nso;
98 
99 	return (0);
100 }
101 
102 /*
103  * int so_acceptq_dequeue(struct sonode *, boolean_t, struct sonode **)
104  *
105  * Pulls a connection off of the accept queue.
106  *
107  * Arguments:
108  *   so	       - listening socket
109  *   dontblock - indicate whether it's ok to sleep if there are no
110  *		 connections on the queue
111  *   nsop      - Value-return argument
112  *
113  * Return values:
114  *   0 when a connection is successfully dequeued, in which case nsop
115  *   is set to point to the new connection. Upon failure a non-zero
116  *   value is returned, and the value of nsop is set to NULL.
117  *
118  * Note:
119  *   so_acceptq_dequeue() may return prematurly if the socket is falling
120  *   back to TPI.
121  */
122 int
123 so_acceptq_dequeue(struct sonode *so, boolean_t dontblock,
124     struct sonode **nsop)
125 {
126 	int error;
127 
128 	mutex_enter(&so->so_acceptq_lock);
129 	error = so_acceptq_dequeue_locked(so, dontblock, nsop);
130 	mutex_exit(&so->so_acceptq_lock);
131 
132 	return (error);
133 }
134 
135 static void
136 so_acceptq_flush_impl(struct sonode *so, list_t *list, boolean_t doclose)
137 {
138 	struct sonode *nso;
139 
140 	while ((nso = list_remove_head(list)) != NULL) {
141 		nso->so_listener = NULL;
142 		if (doclose) {
143 			(void) socket_close(nso, 0, CRED());
144 		} else {
145 			/*
146 			 * Only used for fallback - not possible when filters
147 			 * are present.
148 			 */
149 			ASSERT(so->so_filter_active == 0);
150 			/*
151 			 * Since the socket is on the accept queue, there can
152 			 * only be one reference. We drop the reference and
153 			 * just blow off the socket.
154 			 */
155 			ASSERT(nso->so_count == 1);
156 			nso->so_count--;
157 			/* drop the proto ref */
158 			VN_RELE(SOTOV(nso));
159 		}
160 		socket_destroy(nso);
161 	}
162 }
163 /*
164  * void so_acceptq_flush(struct sonode *so)
165  *
166  * Removes all pending connections from a listening socket, and
167  * frees the associated resources.
168  *
169  * Arguments
170  *   so	     - listening socket
171  *   doclose - make a close downcall for each socket on the accept queue
172  *
173  * Return values:
174  *   None.
175  *
176  * Note:
177  *   The caller has to ensure that no calls to so_acceptq_enqueue() or
178  *   so_acceptq_dequeue() occur while the accept queue is being flushed.
179  *   So either the socket needs to be in a state where no operations
180  *   would come in, or so_lock needs to be obtained.
181  */
182 void
183 so_acceptq_flush(struct sonode *so, boolean_t doclose)
184 {
185 	so_acceptq_flush_impl(so, &so->so_acceptq_list, doclose);
186 	so_acceptq_flush_impl(so, &so->so_acceptq_defer, doclose);
187 
188 	so->so_acceptq_len = 0;
189 }
190 
191 int
192 so_wait_connected_locked(struct sonode *so, boolean_t nonblock,
193     sock_connid_t id)
194 {
195 	ASSERT(MUTEX_HELD(&so->so_lock));
196 
197 	/*
198 	 * The protocol has notified us that a connection attempt is being
199 	 * made, so before we wait for a notification to arrive we must
200 	 * clear out any errors associated with earlier connection attempts.
201 	 */
202 	if (so->so_error != 0 && SOCK_CONNID_LT(so->so_proto_connid, id))
203 		so->so_error = 0;
204 
205 	while (SOCK_CONNID_LT(so->so_proto_connid, id)) {
206 		if (nonblock)
207 			return (EINPROGRESS);
208 
209 		if (so->so_state & (SS_CLOSING | SS_FALLBACK_PENDING))
210 			return (EINTR);
211 
212 		if (cv_wait_sig_swap(&so->so_state_cv, &so->so_lock) == 0)
213 			return (EINTR);
214 	}
215 
216 	if (so->so_error != 0)
217 		return (sogeterr(so, B_TRUE));
218 	/*
219 	 * Under normal circumstances, so_error should contain an error
220 	 * in case the connect failed. However, it is possible for another
221 	 * thread to come in a consume the error, so generate a sensible
222 	 * error in that case.
223 	 */
224 	if ((so->so_state & SS_ISCONNECTED) == 0)
225 		return (ECONNREFUSED);
226 
227 	return (0);
228 }
229 
230 /*
231  * int so_wait_connected(struct sonode *so, boolean_t nonblock,
232  *    sock_connid_t id)
233  *
234  * Wait until the socket is connected or an error has occured.
235  *
236  * Arguments:
237  *   so	      - socket
238  *   nonblock - indicate whether it's ok to sleep if the connection has
239  *		not yet been established
240  *   gen      - generation number that was returned by the protocol
241  *		when the operation was started
242  *
243  * Returns:
244  *   0 if the connection attempt was successful, or an error indicating why
245  *   the connection attempt failed.
246  */
247 int
248 so_wait_connected(struct sonode *so, boolean_t nonblock, sock_connid_t id)
249 {
250 	int error;
251 
252 	mutex_enter(&so->so_lock);
253 	error = so_wait_connected_locked(so, nonblock, id);
254 	mutex_exit(&so->so_lock);
255 
256 	return (error);
257 }
258 
259 int
260 so_snd_wait_qnotfull_locked(struct sonode *so, boolean_t dontblock)
261 {
262 	int error;
263 
264 	ASSERT(MUTEX_HELD(&so->so_lock));
265 	while (SO_SND_FLOWCTRLD(so)) {
266 		if (so->so_state & SS_CANTSENDMORE)
267 			return (EPIPE);
268 		if (dontblock)
269 			return (EWOULDBLOCK);
270 
271 		if (so->so_state & (SS_CLOSING | SS_FALLBACK_PENDING))
272 			return (EINTR);
273 
274 		if (so->so_sndtimeo == 0) {
275 			/*
276 			 * Zero means disable timeout.
277 			 */
278 			error = cv_wait_sig(&so->so_snd_cv, &so->so_lock);
279 		} else {
280 			error = cv_reltimedwait_sig(&so->so_snd_cv,
281 			    &so->so_lock, so->so_sndtimeo, TR_CLOCK_TICK);
282 		}
283 		if (error == 0)
284 			return (EINTR);
285 		else if (error == -1)
286 			return (EAGAIN);
287 	}
288 	return (0);
289 }
290 
291 /*
292  * int so_wait_sendbuf(struct sonode *so, boolean_t dontblock)
293  *
294  * Wait for the transport to notify us about send buffers becoming
295  * available.
296  */
297 int
298 so_snd_wait_qnotfull(struct sonode *so, boolean_t dontblock)
299 {
300 	int error = 0;
301 
302 	mutex_enter(&so->so_lock);
303 	so->so_snd_wakeup = B_TRUE;
304 	error = so_snd_wait_qnotfull_locked(so, dontblock);
305 	so->so_snd_wakeup = B_FALSE;
306 	mutex_exit(&so->so_lock);
307 
308 	return (error);
309 }
310 
311 void
312 so_snd_qfull(struct sonode *so)
313 {
314 	mutex_enter(&so->so_lock);
315 	so->so_snd_qfull = B_TRUE;
316 	mutex_exit(&so->so_lock);
317 }
318 
319 void
320 so_snd_qnotfull(struct sonode *so)
321 {
322 	mutex_enter(&so->so_lock);
323 	so->so_snd_qfull = B_FALSE;
324 	/* wake up everyone waiting for buffers */
325 	cv_broadcast(&so->so_snd_cv);
326 	mutex_exit(&so->so_lock);
327 }
328 
329 /*
330  * Change the process/process group to which SIGIO is sent.
331  */
332 int
333 socket_chgpgrp(struct sonode *so, pid_t pid)
334 {
335 	int error;
336 
337 	ASSERT(MUTEX_HELD(&so->so_lock));
338 	if (pid != 0) {
339 		/*
340 		 * Permissions check by sending signal 0.
341 		 * Note that when kill fails it does a
342 		 * set_errno causing the system call to fail.
343 		 */
344 		error = kill(pid, 0);
345 		if (error != 0) {
346 			return (error);
347 		}
348 	}
349 	so->so_pgrp = pid;
350 	return (0);
351 }
352 
353 
354 /*
355  * Generate a SIGIO, for 'writable' events include siginfo structure,
356  * for read events just send the signal.
357  */
358 /*ARGSUSED*/
359 static void
360 socket_sigproc(proc_t *proc, int event)
361 {
362 	k_siginfo_t info;
363 
364 	ASSERT(event & (SOCKETSIG_WRITE | SOCKETSIG_READ | SOCKETSIG_URG));
365 
366 	if (event & SOCKETSIG_WRITE) {
367 		info.si_signo = SIGPOLL;
368 		info.si_code = POLL_OUT;
369 		info.si_errno = 0;
370 		info.si_fd = 0;
371 		info.si_band = 0;
372 		sigaddq(proc, NULL, &info, KM_NOSLEEP);
373 	}
374 	if (event & SOCKETSIG_READ) {
375 		sigtoproc(proc, NULL, SIGPOLL);
376 	}
377 	if (event & SOCKETSIG_URG) {
378 		sigtoproc(proc, NULL, SIGURG);
379 	}
380 }
381 
382 void
383 socket_sendsig(struct sonode *so, int event)
384 {
385 	proc_t *proc;
386 
387 	ASSERT(MUTEX_HELD(&so->so_lock));
388 
389 	if (so->so_pgrp == 0 || (!(so->so_state & SS_ASYNC) &&
390 	    event != SOCKETSIG_URG)) {
391 		return;
392 	}
393 
394 	dprint(3, ("sending sig %d to %d\n", event, so->so_pgrp));
395 
396 	if (so->so_pgrp > 0) {
397 		/*
398 		 * XXX This unfortunately still generates
399 		 * a signal when a fd is closed but
400 		 * the proc is active.
401 		 */
402 		mutex_enter(&pidlock);
403 		/*
404 		 * Even if the thread started in another zone, we're receiving
405 		 * on behalf of this socket's zone, so find the proc using the
406 		 * socket's zone ID.
407 		 */
408 		proc = prfind_zone(so->so_pgrp, so->so_zoneid);
409 		if (proc == NULL) {
410 			mutex_exit(&pidlock);
411 			return;
412 		}
413 		mutex_enter(&proc->p_lock);
414 		mutex_exit(&pidlock);
415 		socket_sigproc(proc, event);
416 		mutex_exit(&proc->p_lock);
417 	} else {
418 		/*
419 		 * Send to process group. Hold pidlock across
420 		 * calls to socket_sigproc().
421 		 */
422 		pid_t pgrp = -so->so_pgrp;
423 
424 		mutex_enter(&pidlock);
425 		/*
426 		 * Even if the thread started in another zone, we're receiving
427 		 * on behalf of this socket's zone, so find the pgrp using the
428 		 * socket's zone ID.
429 		 */
430 		proc = pgfind_zone(pgrp, so->so_zoneid);
431 		while (proc != NULL) {
432 			mutex_enter(&proc->p_lock);
433 			socket_sigproc(proc, event);
434 			mutex_exit(&proc->p_lock);
435 			proc = proc->p_pglink;
436 		}
437 		mutex_exit(&pidlock);
438 	}
439 }
440 
441 #define	MIN(a, b) ((a) < (b) ? (a) : (b))
442 /* Copy userdata into a new mblk_t */
443 mblk_t *
444 socopyinuio(uio_t *uiop, ssize_t iosize, size_t wroff, ssize_t maxblk,
445     size_t tail_len, int *errorp)
446 {
447 	mblk_t	*head = NULL, **tail = &head;
448 
449 	ASSERT(iosize == INFPSZ || iosize > 0);
450 
451 	if (iosize == INFPSZ || iosize > uiop->uio_resid)
452 		iosize = uiop->uio_resid;
453 
454 	if (maxblk == INFPSZ)
455 		maxblk = iosize;
456 
457 	/* Nothing to do in these cases, so we're done */
458 	if (iosize < 0 || maxblk < 0 || (maxblk == 0 && iosize > 0))
459 		goto done;
460 
461 	/*
462 	 * We will enter the loop below if iosize is 0; it will allocate an
463 	 * empty message block and call uiomove(9F) which will just return.
464 	 * We could avoid that with an extra check but would only slow
465 	 * down the much more likely case where iosize is larger than 0.
466 	 */
467 	do {
468 		ssize_t blocksize;
469 		mblk_t	*mp;
470 
471 		blocksize = MIN(iosize, maxblk);
472 		ASSERT(blocksize >= 0);
473 		mp = allocb(wroff + blocksize + tail_len, BPRI_MED);
474 		if (mp == NULL) {
475 			*errorp = ENOMEM;
476 			return (head);
477 		}
478 		mp->b_rptr += wroff;
479 		mp->b_wptr = mp->b_rptr + blocksize;
480 
481 		*tail = mp;
482 		tail = &mp->b_cont;
483 
484 		/* uiomove(9F) either returns 0 or EFAULT */
485 		if ((*errorp = uiomove(mp->b_rptr, (size_t)blocksize,
486 		    UIO_WRITE, uiop)) != 0) {
487 			ASSERT(*errorp != ENOMEM);
488 			freemsg(head);
489 			return (NULL);
490 		}
491 
492 		iosize -= blocksize;
493 	} while (iosize > 0);
494 
495 done:
496 	*errorp = 0;
497 	return (head);
498 }
499 
500 mblk_t *
501 socopyoutuio(mblk_t *mp, struct uio *uiop, ssize_t max_read, int *errorp)
502 {
503 	int error;
504 	ptrdiff_t n;
505 	mblk_t *nmp;
506 
507 	ASSERT(mp->b_wptr >= mp->b_rptr);
508 
509 	/*
510 	 * max_read is the offset of the oobmark and read can not go pass
511 	 * the oobmark.
512 	 */
513 	if (max_read == INFPSZ || max_read > uiop->uio_resid)
514 		max_read = uiop->uio_resid;
515 
516 	do {
517 		if ((n = MIN(max_read, MBLKL(mp))) != 0) {
518 			ASSERT(n > 0);
519 
520 			error = uiomove(mp->b_rptr, n, UIO_READ, uiop);
521 			if (error != 0) {
522 				freemsg(mp);
523 				*errorp = error;
524 				return (NULL);
525 			}
526 		}
527 
528 		mp->b_rptr += n;
529 		max_read -= n;
530 		while (mp != NULL && (mp->b_rptr >= mp->b_wptr)) {
531 			/*
532 			 * get rid of zero length mblks
533 			 */
534 			nmp = mp;
535 			mp = mp->b_cont;
536 			freeb(nmp);
537 		}
538 	} while (mp != NULL && max_read > 0);
539 
540 	*errorp = 0;
541 	return (mp);
542 }
543 
544 static void
545 so_prepend_msg(struct sonode *so, mblk_t *mp, mblk_t *last_tail)
546 {
547 	ASSERT(last_tail != NULL);
548 	mp->b_next = so->so_rcv_q_head;
549 	mp->b_prev = last_tail;
550 	ASSERT(!(DB_FLAGS(mp) & DBLK_UIOA));
551 
552 	if (so->so_rcv_q_head == NULL) {
553 		ASSERT(so->so_rcv_q_last_head == NULL);
554 		so->so_rcv_q_last_head = mp;
555 #ifdef DEBUG
556 	} else {
557 		ASSERT(!(DB_FLAGS(so->so_rcv_q_head) & DBLK_UIOA));
558 #endif
559 	}
560 	so->so_rcv_q_head = mp;
561 
562 #ifdef DEBUG
563 	if (so_debug_length) {
564 		mutex_enter(&so->so_lock);
565 		ASSERT(so_check_length(so));
566 		mutex_exit(&so->so_lock);
567 	}
568 #endif
569 }
570 
571 /*
572  * Move a mblk chain (mp_head, mp_last_head) to the sonode's rcv queue so it
573  * can be processed by so_dequeue_msg().
574  */
575 void
576 so_process_new_message(struct sonode *so, mblk_t *mp_head, mblk_t *mp_last_head)
577 {
578 	if (so->so_filter_active > 0 &&
579 	    (mp_head = sof_filter_data_in_proc(so, mp_head,
580 	    &mp_last_head)) == NULL)
581 		return;
582 
583 	ASSERT(mp_head->b_prev != NULL);
584 	if (so->so_rcv_q_head == NULL) {
585 		so->so_rcv_q_head = mp_head;
586 		so->so_rcv_q_last_head = mp_last_head;
587 		ASSERT(so->so_rcv_q_last_head->b_prev != NULL);
588 	} else {
589 		boolean_t flag_equal = ((DB_FLAGS(mp_head) & DBLK_UIOA) ==
590 		    (DB_FLAGS(so->so_rcv_q_last_head) & DBLK_UIOA));
591 
592 		if (mp_head->b_next == NULL &&
593 		    DB_TYPE(mp_head) == M_DATA &&
594 		    DB_TYPE(so->so_rcv_q_last_head) == M_DATA && flag_equal) {
595 			so->so_rcv_q_last_head->b_prev->b_cont = mp_head;
596 			so->so_rcv_q_last_head->b_prev = mp_head->b_prev;
597 			mp_head->b_prev = NULL;
598 		} else if (flag_equal && (DB_FLAGS(mp_head) & DBLK_UIOA)) {
599 			/*
600 			 * Append to last_head if more than one mblks, and both
601 			 * mp_head and last_head are I/OAT mblks.
602 			 */
603 			ASSERT(mp_head->b_next != NULL);
604 			so->so_rcv_q_last_head->b_prev->b_cont = mp_head;
605 			so->so_rcv_q_last_head->b_prev = mp_head->b_prev;
606 			mp_head->b_prev = NULL;
607 
608 			so->so_rcv_q_last_head->b_next = mp_head->b_next;
609 			mp_head->b_next = NULL;
610 			so->so_rcv_q_last_head = mp_last_head;
611 		} else {
612 #ifdef DEBUG
613 			{
614 				mblk_t *tmp_mblk;
615 				tmp_mblk = mp_head;
616 				while (tmp_mblk != NULL) {
617 					ASSERT(tmp_mblk->b_prev != NULL);
618 					tmp_mblk = tmp_mblk->b_next;
619 				}
620 			}
621 #endif
622 			so->so_rcv_q_last_head->b_next = mp_head;
623 			so->so_rcv_q_last_head = mp_last_head;
624 		}
625 	}
626 }
627 
628 /*
629  * Check flow control on a given sonode.  Must have so_lock held, and
630  * this function will release the hold.  Return true if flow control
631  * is cleared.
632  */
633 boolean_t
634 so_check_flow_control(struct sonode *so)
635 {
636 	ASSERT(MUTEX_HELD(&so->so_lock));
637 
638 	if (so->so_flowctrld && (so->so_rcv_queued < so->so_rcvlowat &&
639 	    !(so->so_state & SS_FIL_RCV_FLOWCTRL))) {
640 		so->so_flowctrld = B_FALSE;
641 		mutex_exit(&so->so_lock);
642 		/*
643 		 * Open up flow control. SCTP does not have any downcalls, and
644 		 * it will clr flow ctrl in sosctp_recvmsg().
645 		 */
646 		if (so->so_downcalls != NULL &&
647 		    so->so_downcalls->sd_clr_flowctrl != NULL) {
648 			(*so->so_downcalls->sd_clr_flowctrl)
649 			    (so->so_proto_handle);
650 		}
651 		/* filters can start injecting data */
652 		sof_sonode_notify_filters(so, SOF_EV_INJECT_DATA_IN_OK, 0);
653 		return (B_TRUE);
654 	} else {
655 		mutex_exit(&so->so_lock);
656 		return (B_FALSE);
657 	}
658 }
659 
660 int
661 so_dequeue_msg(struct sonode *so, mblk_t **mctlp, struct uio *uiop,
662     rval_t *rvalp, int flags)
663 {
664 	mblk_t	*mp, *nmp;
665 	mblk_t	*savemp, *savemptail;
666 	mblk_t	*new_msg_head;
667 	mblk_t	*new_msg_last_head;
668 	mblk_t	*last_tail;
669 	boolean_t partial_read;
670 	boolean_t reset_atmark = B_FALSE;
671 	int more = 0;
672 	int error;
673 	ssize_t oobmark;
674 	sodirect_t *sodp = so->so_direct;
675 
676 	partial_read = B_FALSE;
677 	*mctlp = NULL;
678 again:
679 	mutex_enter(&so->so_lock);
680 again1:
681 #ifdef DEBUG
682 	if (so_debug_length) {
683 		ASSERT(so_check_length(so));
684 	}
685 #endif
686 	if (so->so_state & SS_RCVATMARK) {
687 		/* Check whether the caller is OK to read past the mark */
688 		if (flags & MSG_NOMARK) {
689 			mutex_exit(&so->so_lock);
690 			return (EWOULDBLOCK);
691 		}
692 		reset_atmark = B_TRUE;
693 	}
694 	/*
695 	 * First move messages from the dump area to processing area
696 	 */
697 	if (sodp != NULL) {
698 		if (sodp->sod_enabled) {
699 			if (sodp->sod_uioa.uioa_state & UIOA_ALLOC) {
700 				/* nothing to uioamove */
701 				sodp = NULL;
702 			} else if (sodp->sod_uioa.uioa_state & UIOA_INIT) {
703 				sodp->sod_uioa.uioa_state &= UIOA_CLR;
704 				sodp->sod_uioa.uioa_state |= UIOA_ENABLED;
705 				/*
706 				 * try to uioamove() the data that
707 				 * has already queued.
708 				 */
709 				sod_uioa_so_init(so, sodp, uiop);
710 			}
711 		} else {
712 			sodp = NULL;
713 		}
714 	}
715 	new_msg_head = so->so_rcv_head;
716 	new_msg_last_head = so->so_rcv_last_head;
717 	so->so_rcv_head = NULL;
718 	so->so_rcv_last_head = NULL;
719 	oobmark = so->so_oobmark;
720 	/*
721 	 * We can release the lock as there can only be one reader
722 	 */
723 	mutex_exit(&so->so_lock);
724 
725 	if (new_msg_head != NULL) {
726 		so_process_new_message(so, new_msg_head, new_msg_last_head);
727 	}
728 	savemp = savemptail = NULL;
729 	rvalp->r_vals = 0;
730 	error = 0;
731 	mp = so->so_rcv_q_head;
732 
733 	if (mp != NULL &&
734 	    (so->so_rcv_timer_tid == 0 ||
735 	    so->so_rcv_queued >= so->so_rcv_thresh)) {
736 		partial_read = B_FALSE;
737 
738 		if (flags & MSG_PEEK) {
739 			if ((nmp = dupmsg(mp)) == NULL &&
740 			    (nmp = copymsg(mp)) == NULL) {
741 				size_t size = msgsize(mp);
742 
743 				error = strwaitbuf(size, BPRI_HI);
744 				if (error) {
745 					return (error);
746 				}
747 				goto again;
748 			}
749 			mp = nmp;
750 		} else {
751 			ASSERT(mp->b_prev != NULL);
752 			last_tail = mp->b_prev;
753 			mp->b_prev = NULL;
754 			so->so_rcv_q_head = mp->b_next;
755 			if (so->so_rcv_q_head == NULL) {
756 				so->so_rcv_q_last_head = NULL;
757 			}
758 			mp->b_next = NULL;
759 		}
760 
761 		ASSERT(mctlp != NULL);
762 		/*
763 		 * First process PROTO or PCPROTO blocks, if any.
764 		 */
765 		if (DB_TYPE(mp) != M_DATA) {
766 			*mctlp = mp;
767 			savemp = mp;
768 			savemptail = mp;
769 			ASSERT(DB_TYPE(mp) == M_PROTO ||
770 			    DB_TYPE(mp) == M_PCPROTO);
771 			while (mp->b_cont != NULL &&
772 			    DB_TYPE(mp->b_cont) != M_DATA) {
773 				ASSERT(DB_TYPE(mp->b_cont) == M_PROTO ||
774 				    DB_TYPE(mp->b_cont) == M_PCPROTO);
775 				mp = mp->b_cont;
776 				savemptail = mp;
777 			}
778 			mp = savemptail->b_cont;
779 			savemptail->b_cont = NULL;
780 		}
781 
782 		ASSERT(DB_TYPE(mp) == M_DATA);
783 		/*
784 		 * Now process DATA blocks, if any. Note that for sodirect
785 		 * enabled socket, uio_resid can be 0.
786 		 */
787 		if (uiop->uio_resid >= 0) {
788 			ssize_t copied = 0;
789 
790 			if (sodp != NULL && (DB_FLAGS(mp) & DBLK_UIOA)) {
791 				mutex_enter(&so->so_lock);
792 				ASSERT(uiop == (uio_t *)&sodp->sod_uioa);
793 				copied = sod_uioa_mblk(so, mp);
794 				if (copied > 0)
795 					partial_read = B_TRUE;
796 				mutex_exit(&so->so_lock);
797 				/* mark this mblk as processed */
798 				mp = NULL;
799 			} else {
800 				ssize_t oldresid = uiop->uio_resid;
801 
802 				if (MBLKL(mp) < so_mblk_pull_len) {
803 					if (pullupmsg(mp, -1) == 1) {
804 						last_tail = mp;
805 					}
806 				}
807 				/*
808 				 * Can not read beyond the oobmark
809 				 */
810 				mp = socopyoutuio(mp, uiop,
811 				    oobmark == 0 ? INFPSZ : oobmark, &error);
812 				if (error != 0) {
813 					freemsg(*mctlp);
814 					*mctlp = NULL;
815 					more = 0;
816 					goto done;
817 				}
818 				ASSERT(oldresid >= uiop->uio_resid);
819 				copied = oldresid - uiop->uio_resid;
820 				if (oldresid > uiop->uio_resid)
821 					partial_read = B_TRUE;
822 			}
823 			ASSERT(copied >= 0);
824 			if (copied > 0 && !(flags & MSG_PEEK)) {
825 				mutex_enter(&so->so_lock);
826 				so->so_rcv_queued -= copied;
827 				ASSERT(so->so_oobmark >= 0);
828 				if (so->so_oobmark > 0) {
829 					so->so_oobmark -= copied;
830 					ASSERT(so->so_oobmark >= 0);
831 					if (so->so_oobmark == 0) {
832 						ASSERT(so->so_state &
833 						    SS_OOBPEND);
834 						so->so_oobmark = 0;
835 						so->so_state |= SS_RCVATMARK;
836 					}
837 				}
838 				/*
839 				 * so_check_flow_control() will drop
840 				 * so->so_lock.
841 				 */
842 				rvalp->r_val2 = so_check_flow_control(so);
843 			}
844 		}
845 		if (mp != NULL) { /* more data blocks in msg */
846 			more |= MOREDATA;
847 			if ((flags & (MSG_PEEK|MSG_TRUNC))) {
848 				if (flags & MSG_PEEK) {
849 					freemsg(mp);
850 				} else {
851 					unsigned int msize = msgdsize(mp);
852 
853 					freemsg(mp);
854 					mutex_enter(&so->so_lock);
855 					so->so_rcv_queued -= msize;
856 					/*
857 					 * so_check_flow_control() will drop
858 					 * so->so_lock.
859 					 */
860 					rvalp->r_val2 =
861 					    so_check_flow_control(so);
862 				}
863 			} else if (partial_read && !somsghasdata(mp)) {
864 				/*
865 				 * Avoid queuing a zero-length tail part of
866 				 * a message. partial_read == 1 indicates that
867 				 * we read some of the message.
868 				 */
869 				freemsg(mp);
870 				more &= ~MOREDATA;
871 			} else {
872 				if (savemp != NULL &&
873 				    (flags & MSG_DUPCTRL)) {
874 					mblk_t *nmp;
875 					/*
876 					 * There should only be non data mblks
877 					 */
878 					ASSERT(DB_TYPE(savemp) != M_DATA &&
879 					    DB_TYPE(savemptail) != M_DATA);
880 try_again:
881 					if ((nmp = dupmsg(savemp)) == NULL &&
882 					    (nmp = copymsg(savemp)) == NULL) {
883 
884 						size_t size = msgsize(savemp);
885 
886 						error = strwaitbuf(size,
887 						    BPRI_HI);
888 						if (error != 0) {
889 							/*
890 							 * In case we
891 							 * cannot copy
892 							 * control data
893 							 * free the remaining
894 							 * data.
895 							 */
896 							freemsg(mp);
897 							goto done;
898 						}
899 						goto try_again;
900 					}
901 
902 					ASSERT(nmp != NULL);
903 					ASSERT(DB_TYPE(nmp) != M_DATA);
904 					savemptail->b_cont = mp;
905 					*mctlp = nmp;
906 					mp = savemp;
907 				}
908 				/*
909 				 * putback mp
910 				 */
911 				so_prepend_msg(so, mp, last_tail);
912 			}
913 		}
914 
915 		/* fast check so_rcv_head if there is more data */
916 		if (partial_read && !(so->so_state & SS_RCVATMARK) &&
917 		    *mctlp == NULL && uiop->uio_resid > 0 &&
918 		    !(flags & MSG_PEEK) && so->so_rcv_head != NULL) {
919 			goto again;
920 		}
921 	} else if (!partial_read) {
922 		mutex_enter(&so->so_lock);
923 		if (so->so_error != 0) {
924 			error = sogeterr(so, !(flags & MSG_PEEK));
925 			mutex_exit(&so->so_lock);
926 			return (error);
927 		}
928 		/*
929 		 * No pending data. Return right away for nonblocking
930 		 * socket, otherwise sleep waiting for data.
931 		 */
932 		if (!(so->so_state & SS_CANTRCVMORE) && uiop->uio_resid > 0) {
933 			if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) ||
934 			    (flags & MSG_DONTWAIT)) {
935 				error = EWOULDBLOCK;
936 			} else {
937 				if (so->so_state & (SS_CLOSING |
938 				    SS_FALLBACK_PENDING)) {
939 					mutex_exit(&so->so_lock);
940 					error = EINTR;
941 					goto done;
942 				}
943 
944 				if (so->so_rcv_head != NULL) {
945 					goto again1;
946 				}
947 				so->so_rcv_wakeup = B_TRUE;
948 				so->so_rcv_wanted = uiop->uio_resid;
949 				if (so->so_rcvtimeo == 0) {
950 					/*
951 					 * Zero means disable timeout.
952 					 */
953 					error = cv_wait_sig(&so->so_rcv_cv,
954 					    &so->so_lock);
955 				} else {
956 					error = cv_reltimedwait_sig(
957 					    &so->so_rcv_cv, &so->so_lock,
958 					    so->so_rcvtimeo, TR_CLOCK_TICK);
959 				}
960 				so->so_rcv_wakeup = B_FALSE;
961 				so->so_rcv_wanted = 0;
962 
963 				if (error == 0) {
964 					error = EINTR;
965 				} else if (error == -1) {
966 					error = EAGAIN;
967 				} else {
968 					goto again1;
969 				}
970 			}
971 		}
972 		mutex_exit(&so->so_lock);
973 	}
974 	if (reset_atmark && partial_read && !(flags & MSG_PEEK)) {
975 		/*
976 		 * We are passed the mark, update state
977 		 * 4.3BSD and 4.4BSD clears the mark when peeking across it.
978 		 * The draft Posix socket spec states that the mark should
979 		 * not be cleared when peeking. We follow the latter.
980 		 */
981 		mutex_enter(&so->so_lock);
982 		ASSERT(so_verify_oobstate(so));
983 		so->so_state &= ~(SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK);
984 		freemsg(so->so_oobmsg);
985 		so->so_oobmsg = NULL;
986 		ASSERT(so_verify_oobstate(so));
987 		mutex_exit(&so->so_lock);
988 	}
989 	ASSERT(so->so_rcv_wakeup == B_FALSE);
990 done:
991 	if (sodp != NULL) {
992 		mutex_enter(&so->so_lock);
993 		if (sodp->sod_enabled &&
994 		    (sodp->sod_uioa.uioa_state & UIOA_ENABLED)) {
995 			SOD_UIOAFINI(sodp);
996 			if (sodp->sod_uioa.uioa_mbytes > 0) {
997 				ASSERT(so->so_rcv_q_head != NULL ||
998 				    so->so_rcv_head != NULL);
999 				so->so_rcv_queued -= sod_uioa_mblk(so, NULL);
1000 				if (error == EWOULDBLOCK)
1001 					error = 0;
1002 			}
1003 		}
1004 		mutex_exit(&so->so_lock);
1005 	}
1006 #ifdef DEBUG
1007 	if (so_debug_length) {
1008 		mutex_enter(&so->so_lock);
1009 		ASSERT(so_check_length(so));
1010 		mutex_exit(&so->so_lock);
1011 	}
1012 #endif
1013 	rvalp->r_val1 = more;
1014 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
1015 	return (error);
1016 }
1017 
1018 /*
1019  * Enqueue data from the protocol on the socket's rcv queue.
1020  *
1021  * We try to hook new M_DATA mblks onto an existing chain, however,
1022  * that cannot be done if the existing chain has already been
1023  * processed by I/OAT. Non-M_DATA mblks are just linked together via
1024  * b_next. In all cases the b_prev of the enqueued mblk is set to
1025  * point to the last mblk in its b_cont chain.
1026  */
1027 void
1028 so_enqueue_msg(struct sonode *so, mblk_t *mp, size_t msg_size)
1029 {
1030 	ASSERT(MUTEX_HELD(&so->so_lock));
1031 
1032 #ifdef DEBUG
1033 	if (so_debug_length) {
1034 		ASSERT(so_check_length(so));
1035 	}
1036 #endif
1037 	so->so_rcv_queued += msg_size;
1038 
1039 	if (so->so_rcv_head == NULL) {
1040 		ASSERT(so->so_rcv_last_head == NULL);
1041 		so->so_rcv_head = mp;
1042 		so->so_rcv_last_head = mp;
1043 	} else if ((DB_TYPE(mp) == M_DATA &&
1044 	    DB_TYPE(so->so_rcv_last_head) == M_DATA) &&
1045 	    ((DB_FLAGS(mp) & DBLK_UIOA) ==
1046 	    (DB_FLAGS(so->so_rcv_last_head) & DBLK_UIOA))) {
1047 		/* Added to the end */
1048 		ASSERT(so->so_rcv_last_head != NULL);
1049 		ASSERT(so->so_rcv_last_head->b_prev != NULL);
1050 		so->so_rcv_last_head->b_prev->b_cont = mp;
1051 	} else {
1052 		/* Start a new end */
1053 		so->so_rcv_last_head->b_next = mp;
1054 		so->so_rcv_last_head = mp;
1055 	}
1056 	while (mp->b_cont != NULL)
1057 		mp = mp->b_cont;
1058 
1059 	so->so_rcv_last_head->b_prev = mp;
1060 #ifdef DEBUG
1061 	if (so_debug_length) {
1062 		ASSERT(so_check_length(so));
1063 	}
1064 #endif
1065 }
1066 
1067 /*
1068  * Return B_TRUE if there is data in the message, B_FALSE otherwise.
1069  */
1070 boolean_t
1071 somsghasdata(mblk_t *mp)
1072 {
1073 	for (; mp; mp = mp->b_cont)
1074 		if (mp->b_datap->db_type == M_DATA) {
1075 			ASSERT(mp->b_wptr >= mp->b_rptr);
1076 			if (mp->b_wptr > mp->b_rptr)
1077 				return (B_TRUE);
1078 		}
1079 	return (B_FALSE);
1080 }
1081 
1082 /*
1083  * Flush the read side of sockfs.
1084  *
1085  * The caller must be sure that a reader is not already active when the
1086  * buffer is being flushed.
1087  */
1088 void
1089 so_rcv_flush(struct sonode *so)
1090 {
1091 	mblk_t  *mp;
1092 
1093 	ASSERT(MUTEX_HELD(&so->so_lock));
1094 
1095 	if (so->so_oobmsg != NULL) {
1096 		freemsg(so->so_oobmsg);
1097 		so->so_oobmsg = NULL;
1098 		so->so_oobmark = 0;
1099 		so->so_state &=
1100 		    ~(SS_OOBPEND|SS_HAVEOOBDATA|SS_HADOOBDATA|SS_RCVATMARK);
1101 	}
1102 
1103 	/*
1104 	 * Free messages sitting in the recv queues
1105 	 */
1106 	while (so->so_rcv_q_head != NULL) {
1107 		mp = so->so_rcv_q_head;
1108 		so->so_rcv_q_head = mp->b_next;
1109 		mp->b_next = mp->b_prev = NULL;
1110 		freemsg(mp);
1111 	}
1112 	while (so->so_rcv_head != NULL) {
1113 		mp = so->so_rcv_head;
1114 		so->so_rcv_head = mp->b_next;
1115 		mp->b_next = mp->b_prev = NULL;
1116 		freemsg(mp);
1117 	}
1118 	so->so_rcv_queued = 0;
1119 	so->so_rcv_q_head = NULL;
1120 	so->so_rcv_q_last_head = NULL;
1121 	so->so_rcv_head = NULL;
1122 	so->so_rcv_last_head = NULL;
1123 }
1124 
1125 /*
1126  * Handle recv* calls that set MSG_OOB or MSG_OOB together with MSG_PEEK.
1127  */
1128 int
1129 sorecvoob(struct sonode *so, struct nmsghdr *msg, struct uio *uiop, int flags,
1130     boolean_t oob_inline)
1131 {
1132 	mblk_t		*mp, *nmp;
1133 	int		error;
1134 
1135 	dprintso(so, 1, ("sorecvoob(%p, %p, 0x%x)\n", (void *)so, (void *)msg,
1136 	    flags));
1137 
1138 	if (msg != NULL) {
1139 		/*
1140 		 * There is never any oob data with addresses or control since
1141 		 * the T_EXDATA_IND does not carry any options.
1142 		 */
1143 		msg->msg_controllen = 0;
1144 		msg->msg_namelen = 0;
1145 		msg->msg_flags = 0;
1146 	}
1147 
1148 	mutex_enter(&so->so_lock);
1149 	ASSERT(so_verify_oobstate(so));
1150 	if (oob_inline ||
1151 	    (so->so_state & (SS_OOBPEND|SS_HADOOBDATA)) != SS_OOBPEND) {
1152 		dprintso(so, 1, ("sorecvoob: inline or data consumed\n"));
1153 		mutex_exit(&so->so_lock);
1154 		return (EINVAL);
1155 	}
1156 	if (!(so->so_state & SS_HAVEOOBDATA)) {
1157 		dprintso(so, 1, ("sorecvoob: no data yet\n"));
1158 		mutex_exit(&so->so_lock);
1159 		return (EWOULDBLOCK);
1160 	}
1161 	ASSERT(so->so_oobmsg != NULL);
1162 	mp = so->so_oobmsg;
1163 	if (flags & MSG_PEEK) {
1164 		/*
1165 		 * Since recv* can not return ENOBUFS we can not use dupmsg.
1166 		 * Instead we revert to the consolidation private
1167 		 * allocb_wait plus bcopy.
1168 		 */
1169 		mblk_t *mp1;
1170 
1171 		mp1 = allocb_wait(msgdsize(mp), BPRI_MED, STR_NOSIG, NULL);
1172 		ASSERT(mp1);
1173 
1174 		while (mp != NULL) {
1175 			ssize_t size;
1176 
1177 			size = MBLKL(mp);
1178 			bcopy(mp->b_rptr, mp1->b_wptr, size);
1179 			mp1->b_wptr += size;
1180 			ASSERT(mp1->b_wptr <= mp1->b_datap->db_lim);
1181 			mp = mp->b_cont;
1182 		}
1183 		mp = mp1;
1184 	} else {
1185 		/*
1186 		 * Update the state indicating that the data has been consumed.
1187 		 * Keep SS_OOBPEND set until data is consumed past the mark.
1188 		 */
1189 		so->so_oobmsg = NULL;
1190 		so->so_state ^= SS_HAVEOOBDATA|SS_HADOOBDATA;
1191 	}
1192 	ASSERT(so_verify_oobstate(so));
1193 	mutex_exit(&so->so_lock);
1194 
1195 	error = 0;
1196 	nmp = mp;
1197 	while (nmp != NULL && uiop->uio_resid > 0) {
1198 		ssize_t n = MBLKL(nmp);
1199 
1200 		n = MIN(n, uiop->uio_resid);
1201 		if (n > 0)
1202 			error = uiomove(nmp->b_rptr, n,
1203 			    UIO_READ, uiop);
1204 		if (error)
1205 			break;
1206 		nmp = nmp->b_cont;
1207 	}
1208 	ASSERT(mp->b_next == NULL && mp->b_prev == NULL);
1209 	freemsg(mp);
1210 	return (error);
1211 }
1212 
1213 /*
1214  * Allocate and initializ sonode
1215  */
1216 /* ARGSUSED */
1217 struct sonode *
1218 socket_sonode_create(struct sockparams *sp, int family, int type,
1219     int protocol, int version, int sflags, int *errorp, struct cred *cr)
1220 {
1221 	sonode_t *so;
1222 	int	kmflags;
1223 
1224 	/*
1225 	 * Choose the right set of sonodeops based on the upcall and
1226 	 * down call version that the protocol has provided
1227 	 */
1228 	if (SOCK_UC_VERSION != sp->sp_smod_info->smod_uc_version ||
1229 	    SOCK_DC_VERSION != sp->sp_smod_info->smod_dc_version) {
1230 		/*
1231 		 * mismatch
1232 		 */
1233 #ifdef DEBUG
1234 		cmn_err(CE_CONT, "protocol and socket module version mismatch");
1235 #endif
1236 		*errorp = EINVAL;
1237 		return (NULL);
1238 	}
1239 
1240 	kmflags = (sflags & SOCKET_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
1241 
1242 	so = kmem_cache_alloc(socket_cache, kmflags);
1243 	if (so == NULL) {
1244 		*errorp = ENOMEM;
1245 		return (NULL);
1246 	}
1247 
1248 	sonode_init(so, sp, family, type, protocol, &so_sonodeops);
1249 
1250 	if (version == SOV_DEFAULT)
1251 		version = so_default_version;
1252 
1253 	so->so_version = (short)version;
1254 
1255 	/*
1256 	 * set the default values to be INFPSZ
1257 	 * if a protocol desires it can change the value later
1258 	 */
1259 	so->so_proto_props.sopp_rxhiwat = SOCKET_RECVHIWATER;
1260 	so->so_proto_props.sopp_rxlowat = SOCKET_RECVLOWATER;
1261 	so->so_proto_props.sopp_maxpsz = INFPSZ;
1262 	so->so_proto_props.sopp_maxblk = INFPSZ;
1263 
1264 	return (so);
1265 }
1266 
1267 int
1268 socket_init_common(struct sonode *so, struct sonode *pso, int flags, cred_t *cr)
1269 {
1270 	int error = 0;
1271 
1272 	if (pso != NULL) {
1273 		/*
1274 		 * We have a passive open, so inherit basic state from
1275 		 * the parent (listener).
1276 		 *
1277 		 * No need to grab the new sonode's lock, since there is no
1278 		 * one that can have a reference to it.
1279 		 */
1280 		mutex_enter(&pso->so_lock);
1281 
1282 		so->so_state |= SS_ISCONNECTED | (pso->so_state & SS_ASYNC);
1283 		so->so_pgrp = pso->so_pgrp;
1284 		so->so_rcvtimeo = pso->so_rcvtimeo;
1285 		so->so_sndtimeo = pso->so_sndtimeo;
1286 		so->so_xpg_rcvbuf = pso->so_xpg_rcvbuf;
1287 		/*
1288 		 * Make note of the socket level options. TCP and IP level
1289 		 * options are already inherited. We could do all this after
1290 		 * accept is successful but doing it here simplifies code and
1291 		 * no harm done for error case.
1292 		 */
1293 		so->so_options = pso->so_options & (SO_DEBUG|SO_REUSEADDR|
1294 		    SO_KEEPALIVE|SO_DONTROUTE|SO_BROADCAST|SO_USELOOPBACK|
1295 		    SO_OOBINLINE|SO_DGRAM_ERRIND|SO_LINGER);
1296 		so->so_proto_props = pso->so_proto_props;
1297 		so->so_mode = pso->so_mode;
1298 		so->so_pollev = pso->so_pollev & SO_POLLEV_ALWAYS;
1299 
1300 		mutex_exit(&pso->so_lock);
1301 
1302 		/*
1303 		 * If the parent has any filters, try to inherit them.
1304 		 */
1305 		if (pso->so_filter_active > 0 &&
1306 		    (error = sof_sonode_inherit_filters(so, pso)) != 0)
1307 			return (error);
1308 
1309 	} else {
1310 		struct sockparams *sp = so->so_sockparams;
1311 		sock_upcalls_t *upcalls_to_use;
1312 
1313 		/*
1314 		 * Attach automatic filters, if there are any.
1315 		 */
1316 		if (!list_is_empty(&sp->sp_auto_filters) &&
1317 		    (error = sof_sonode_autoattach_filters(so, cr)) != 0)
1318 			return (error);
1319 
1320 		/* OK to attach filters */
1321 		so->so_state |= SS_FILOP_OK;
1322 
1323 		/*
1324 		 * Based on the version number select the right upcalls to
1325 		 * pass down. Currently we only have one version so choose
1326 		 * default
1327 		 */
1328 		upcalls_to_use = &so_upcalls;
1329 
1330 		/* active open, so create a lower handle */
1331 		so->so_proto_handle =
1332 		    sp->sp_smod_info->smod_proto_create_func(so->so_family,
1333 		    so->so_type, so->so_protocol, &so->so_downcalls,
1334 		    &so->so_mode, &error, flags, cr);
1335 
1336 		if (so->so_proto_handle == NULL) {
1337 			ASSERT(error != 0);
1338 			/*
1339 			 * To be safe; if a lower handle cannot be created, and
1340 			 * the proto does not give a reason why, assume there
1341 			 * was a lack of memory.
1342 			 */
1343 			return ((error == 0) ? ENOMEM : error);
1344 		}
1345 		ASSERT(so->so_downcalls != NULL);
1346 		ASSERT(so->so_downcalls->sd_send != NULL ||
1347 		    so->so_downcalls->sd_send_uio != NULL);
1348 		if (so->so_downcalls->sd_recv_uio != NULL) {
1349 			ASSERT(so->so_downcalls->sd_poll != NULL);
1350 			so->so_pollev |= SO_POLLEV_ALWAYS;
1351 		}
1352 
1353 		(*so->so_downcalls->sd_activate)(so->so_proto_handle,
1354 		    (sock_upper_handle_t)so, upcalls_to_use, 0, cr);
1355 
1356 		/* Wildcard */
1357 
1358 		/*
1359 		 * FIXME No need for this, the protocol can deal with it in
1360 		 * sd_create(). Should update ICMP.
1361 		 */
1362 		if (so->so_protocol != so->so_sockparams->sp_protocol) {
1363 			int protocol = so->so_protocol;
1364 			int error;
1365 			/*
1366 			 * Issue SO_PROTOTYPE setsockopt.
1367 			 */
1368 			error = socket_setsockopt(so, SOL_SOCKET, SO_PROTOTYPE,
1369 			    &protocol, (t_uscalar_t)sizeof (protocol), cr);
1370 			if (error) {
1371 				(void) (*so->so_downcalls->sd_close)
1372 				    (so->so_proto_handle, 0, cr);
1373 
1374 				mutex_enter(&so->so_lock);
1375 				so_rcv_flush(so);
1376 				mutex_exit(&so->so_lock);
1377 				/*
1378 				 * Setsockopt often fails with ENOPROTOOPT but
1379 				 * socket() should fail with
1380 				 * EPROTONOSUPPORT/EPROTOTYPE.
1381 				 */
1382 				return (EPROTONOSUPPORT);
1383 			}
1384 		}
1385 	}
1386 
1387 	if (uioasync.enabled)
1388 		sod_sock_init(so);
1389 
1390 	/* put an extra reference on the socket for the protocol */
1391 	VN_HOLD(SOTOV(so));
1392 
1393 	return (0);
1394 }
1395 
1396 /*
1397  * int socket_ioctl_common(struct sonode *so, int cmd, intptr_t arg, int mode,
1398  *         struct cred *cr, int32_t *rvalp)
1399  *
1400  * Handle ioctls that manipulate basic socket state; non-blocking,
1401  * async, etc.
1402  *
1403  * Returns:
1404  *   < 0  - ioctl was not handle
1405  *  >= 0  - ioctl was handled, if > 0, then it is an errno
1406  *
1407  * Notes:
1408  *   Assumes the standard receive buffer is used to obtain info for
1409  *   NREAD.
1410  */
1411 /* ARGSUSED */
1412 int
1413 socket_ioctl_common(struct sonode *so, int cmd, intptr_t arg, int mode,
1414     struct cred *cr, int32_t *rvalp)
1415 {
1416 	switch (cmd) {
1417 	case SIOCSQPTR:
1418 		/*
1419 		 * SIOCSQPTR is valid only when helper stream is created
1420 		 * by the protocol.
1421 		 */
1422 
1423 		return (EOPNOTSUPP);
1424 	case FIONBIO: {
1425 		int32_t value;
1426 
1427 		if (so_copyin((void *)arg, &value, sizeof (int32_t),
1428 		    (mode & (int)FKIOCTL)))
1429 			return (EFAULT);
1430 
1431 		mutex_enter(&so->so_lock);
1432 		if (value) {
1433 			so->so_state |= SS_NDELAY;
1434 		} else {
1435 			so->so_state &= ~SS_NDELAY;
1436 		}
1437 		mutex_exit(&so->so_lock);
1438 		return (0);
1439 	}
1440 	case FIOASYNC: {
1441 		int32_t value;
1442 
1443 		if (so_copyin((void *)arg, &value, sizeof (int32_t),
1444 		    (mode & (int)FKIOCTL)))
1445 			return (EFAULT);
1446 
1447 		mutex_enter(&so->so_lock);
1448 
1449 		if (value) {
1450 			/* Turn on SIGIO */
1451 			so->so_state |= SS_ASYNC;
1452 		} else {
1453 			/* Turn off SIGIO */
1454 			so->so_state &= ~SS_ASYNC;
1455 		}
1456 		mutex_exit(&so->so_lock);
1457 
1458 		return (0);
1459 	}
1460 
1461 	case SIOCSPGRP:
1462 	case FIOSETOWN: {
1463 		int error;
1464 		pid_t pid;
1465 
1466 		if (so_copyin((void *)arg, &pid, sizeof (pid_t),
1467 		    (mode & (int)FKIOCTL)))
1468 			return (EFAULT);
1469 
1470 		mutex_enter(&so->so_lock);
1471 		error = (pid != so->so_pgrp) ? socket_chgpgrp(so, pid) : 0;
1472 		mutex_exit(&so->so_lock);
1473 		return (error);
1474 	}
1475 	case SIOCGPGRP:
1476 	case FIOGETOWN:
1477 		if (so_copyout(&so->so_pgrp, (void *)arg,
1478 		    sizeof (pid_t), (mode & (int)FKIOCTL)))
1479 			return (EFAULT);
1480 
1481 		return (0);
1482 	case SIOCATMARK: {
1483 		int retval;
1484 
1485 		/*
1486 		 * Only protocols that support urgent data can handle ATMARK.
1487 		 */
1488 		if ((so->so_mode & SM_EXDATA) == 0)
1489 			return (EINVAL);
1490 
1491 		/*
1492 		 * If the protocol is maintaining its own buffer, then the
1493 		 * request must be passed down.
1494 		 */
1495 		if (so->so_downcalls->sd_recv_uio != NULL)
1496 			return (-1);
1497 
1498 		retval = (so->so_state & SS_RCVATMARK) != 0;
1499 
1500 		if (so_copyout(&retval, (void *)arg, sizeof (int),
1501 		    (mode & (int)FKIOCTL))) {
1502 			return (EFAULT);
1503 		}
1504 		return (0);
1505 	}
1506 
1507 	case FIONREAD: {
1508 		int retval;
1509 
1510 		/*
1511 		 * If the protocol is maintaining its own buffer, then the
1512 		 * request must be passed down.
1513 		 */
1514 		if (so->so_downcalls->sd_recv_uio != NULL)
1515 			return (-1);
1516 
1517 		retval = MIN(so->so_rcv_queued, INT_MAX);
1518 
1519 		if (so_copyout(&retval, (void *)arg,
1520 		    sizeof (retval), (mode & (int)FKIOCTL))) {
1521 			return (EFAULT);
1522 		}
1523 		return (0);
1524 	}
1525 
1526 	case _I_GETPEERCRED: {
1527 		int error = 0;
1528 
1529 		if ((mode & FKIOCTL) == 0)
1530 			return (EINVAL);
1531 
1532 		mutex_enter(&so->so_lock);
1533 		if ((so->so_mode & SM_CONNREQUIRED) == 0) {
1534 			error = ENOTSUP;
1535 		} else if ((so->so_state & SS_ISCONNECTED) == 0) {
1536 			error = ENOTCONN;
1537 		} else if (so->so_peercred != NULL) {
1538 			k_peercred_t *kp = (k_peercred_t *)arg;
1539 			kp->pc_cr = so->so_peercred;
1540 			kp->pc_cpid = so->so_cpid;
1541 			crhold(so->so_peercred);
1542 		} else {
1543 			error = EINVAL;
1544 		}
1545 		mutex_exit(&so->so_lock);
1546 		return (error);
1547 	}
1548 	default:
1549 		return (-1);
1550 	}
1551 }
1552 
1553 /*
1554  * Handle the I_NREAD STREAM ioctl.
1555  */
1556 static int
1557 so_strioc_nread(struct sonode *so, intptr_t arg, int mode, int32_t *rvalp)
1558 {
1559 	size_t size = 0;
1560 	int retval;
1561 	int count = 0;
1562 	mblk_t *mp;
1563 	clock_t wakeup = drv_usectohz(10);
1564 
1565 	if (so->so_downcalls == NULL ||
1566 	    so->so_downcalls->sd_recv_uio != NULL)
1567 		return (EINVAL);
1568 
1569 	mutex_enter(&so->so_lock);
1570 	/* Wait for reader to get out of the way. */
1571 	while (so->so_flag & SOREADLOCKED) {
1572 		/*
1573 		 * If reader is waiting for data, then there should be nothing
1574 		 * on the rcv queue.
1575 		 */
1576 		if (so->so_rcv_wakeup)
1577 			goto out;
1578 
1579 		/* Do a timed sleep, in case the reader goes to sleep. */
1580 		(void) cv_reltimedwait(&so->so_read_cv, &so->so_lock, wakeup,
1581 		    TR_CLOCK_TICK);
1582 	}
1583 
1584 	/*
1585 	 * Since we are holding so_lock no new reader will come in, and the
1586 	 * protocol will not be able to enqueue data. So it's safe to walk
1587 	 * both rcv queues.
1588 	 */
1589 	mp = so->so_rcv_q_head;
1590 	if (mp != NULL) {
1591 		size = msgdsize(so->so_rcv_q_head);
1592 		for (; mp != NULL; mp = mp->b_next)
1593 			count++;
1594 	} else {
1595 		/*
1596 		 * In case the processing list was empty, get the size of the
1597 		 * next msg in line.
1598 		 */
1599 		size = msgdsize(so->so_rcv_head);
1600 	}
1601 
1602 	for (mp = so->so_rcv_head; mp != NULL; mp = mp->b_next)
1603 		count++;
1604 out:
1605 	mutex_exit(&so->so_lock);
1606 
1607 	/*
1608 	 * Drop down from size_t to the "int" required by the
1609 	 * interface.  Cap at INT_MAX.
1610 	 */
1611 	retval = MIN(size, INT_MAX);
1612 	if (so_copyout(&retval, (void *)arg, sizeof (retval),
1613 	    (mode & (int)FKIOCTL))) {
1614 		return (EFAULT);
1615 	} else {
1616 		*rvalp = count;
1617 		return (0);
1618 	}
1619 }
1620 
1621 /*
1622  * Process STREAM ioctls.
1623  *
1624  * Returns:
1625  *   < 0  - ioctl was not handle
1626  *  >= 0  - ioctl was handled, if > 0, then it is an errno
1627  */
1628 int
1629 socket_strioc_common(struct sonode *so, int cmd, intptr_t arg, int mode,
1630     struct cred *cr, int32_t *rvalp)
1631 {
1632 	int retval;
1633 
1634 	/* Only STREAM iotcls are handled here */
1635 	if ((cmd & 0xffffff00U) != STR)
1636 		return (-1);
1637 
1638 	switch (cmd) {
1639 	case I_CANPUT:
1640 		/*
1641 		 * We return an error for I_CANPUT so that isastream(3C) will
1642 		 * not report the socket as being a STREAM.
1643 		 */
1644 		return (EOPNOTSUPP);
1645 	case I_NREAD:
1646 		/* Avoid doing a fallback for I_NREAD. */
1647 		return (so_strioc_nread(so, arg, mode, rvalp));
1648 	case I_LOOK:
1649 		/* Avoid doing a fallback for I_LOOK. */
1650 		if (so_copyout("sockmod", (void *)arg, strlen("sockmod") + 1,
1651 		    (mode & (int)FKIOCTL))) {
1652 			return (EFAULT);
1653 		}
1654 		return (0);
1655 	default:
1656 		break;
1657 	}
1658 
1659 	/*
1660 	 * Try to fall back to TPI, and if successful, reissue the ioctl.
1661 	 */
1662 	if ((retval = so_tpi_fallback(so, cr)) == 0) {
1663 		/* Reissue the ioctl */
1664 		ASSERT(so->so_rcv_q_head == NULL);
1665 		return (SOP_IOCTL(so, cmd, arg, mode, cr, rvalp));
1666 	} else {
1667 		return (retval);
1668 	}
1669 }
1670 
1671 /*
1672  * This is called for all socket types to verify that the buffer size is large
1673  * enough for the option, and if we can, handle the request as well. Most
1674  * options will be forwarded to the protocol.
1675  */
1676 int
1677 socket_getopt_common(struct sonode *so, int level, int option_name,
1678     void *optval, socklen_t *optlenp, int flags)
1679 {
1680 	if (level != SOL_SOCKET)
1681 		return (-1);
1682 
1683 	switch (option_name) {
1684 	case SO_ERROR:
1685 	case SO_DOMAIN:
1686 	case SO_TYPE:
1687 	case SO_ACCEPTCONN: {
1688 		int32_t value;
1689 		socklen_t optlen = *optlenp;
1690 
1691 		if (optlen < (t_uscalar_t)sizeof (int32_t)) {
1692 			return (EINVAL);
1693 		}
1694 
1695 		switch (option_name) {
1696 		case SO_ERROR:
1697 			mutex_enter(&so->so_lock);
1698 			value = sogeterr(so, B_TRUE);
1699 			mutex_exit(&so->so_lock);
1700 			break;
1701 		case SO_DOMAIN:
1702 			value = so->so_family;
1703 			break;
1704 		case SO_TYPE:
1705 			value = so->so_type;
1706 			break;
1707 		case SO_ACCEPTCONN:
1708 			if (so->so_state & SS_ACCEPTCONN)
1709 				value = SO_ACCEPTCONN;
1710 			else
1711 				value = 0;
1712 			break;
1713 		}
1714 
1715 		bcopy(&value, optval, sizeof (value));
1716 		*optlenp = sizeof (value);
1717 
1718 		return (0);
1719 	}
1720 	case SO_SNDTIMEO:
1721 	case SO_RCVTIMEO: {
1722 		clock_t value;
1723 		socklen_t optlen = *optlenp;
1724 
1725 		if (get_udatamodel() == DATAMODEL_NONE ||
1726 		    get_udatamodel() == DATAMODEL_NATIVE) {
1727 			if (optlen < sizeof (struct timeval))
1728 				return (EINVAL);
1729 		} else {
1730 			if (optlen < sizeof (struct timeval32))
1731 				return (EINVAL);
1732 		}
1733 		if (option_name == SO_RCVTIMEO)
1734 			value = drv_hztousec(so->so_rcvtimeo);
1735 		else
1736 			value = drv_hztousec(so->so_sndtimeo);
1737 
1738 		if (get_udatamodel() == DATAMODEL_NONE ||
1739 		    get_udatamodel() == DATAMODEL_NATIVE) {
1740 			((struct timeval *)(optval))->tv_sec =
1741 			    value / (1000 * 1000);
1742 			((struct timeval *)(optval))->tv_usec =
1743 			    value % (1000 * 1000);
1744 			*optlenp = sizeof (struct timeval);
1745 		} else {
1746 			((struct timeval32 *)(optval))->tv_sec =
1747 			    value / (1000 * 1000);
1748 			((struct timeval32 *)(optval))->tv_usec =
1749 			    value % (1000 * 1000);
1750 			*optlenp = sizeof (struct timeval32);
1751 		}
1752 		return (0);
1753 	}
1754 	case SO_DEBUG:
1755 	case SO_REUSEADDR:
1756 	case SO_KEEPALIVE:
1757 	case SO_DONTROUTE:
1758 	case SO_BROADCAST:
1759 	case SO_USELOOPBACK:
1760 	case SO_OOBINLINE:
1761 	case SO_SNDBUF:
1762 #ifdef notyet
1763 	case SO_SNDLOWAT:
1764 	case SO_RCVLOWAT:
1765 #endif /* notyet */
1766 	case SO_DGRAM_ERRIND: {
1767 		socklen_t optlen = *optlenp;
1768 
1769 		if (optlen < (t_uscalar_t)sizeof (int32_t))
1770 			return (EINVAL);
1771 		break;
1772 	}
1773 	case SO_RCVBUF: {
1774 		socklen_t optlen = *optlenp;
1775 
1776 		if (optlen < (t_uscalar_t)sizeof (int32_t))
1777 			return (EINVAL);
1778 
1779 		if ((flags & _SOGETSOCKOPT_XPG4_2) && so->so_xpg_rcvbuf != 0) {
1780 			/*
1781 			 * XXX If SO_RCVBUF has been set and this is an
1782 			 * XPG 4.2 application then do not ask the transport
1783 			 * since the transport might adjust the value and not
1784 			 * return exactly what was set by the application.
1785 			 * For non-XPG 4.2 application we return the value
1786 			 * that the transport is actually using.
1787 			 */
1788 			*(int32_t *)optval = so->so_xpg_rcvbuf;
1789 			*optlenp = sizeof (so->so_xpg_rcvbuf);
1790 			return (0);
1791 		}
1792 		/*
1793 		 * If the option has not been set then get a default
1794 		 * value from the transport.
1795 		 */
1796 		break;
1797 	}
1798 	case SO_LINGER: {
1799 		socklen_t optlen = *optlenp;
1800 
1801 		if (optlen < (t_uscalar_t)sizeof (struct linger))
1802 			return (EINVAL);
1803 		break;
1804 	}
1805 	case SO_SND_BUFINFO: {
1806 		socklen_t optlen = *optlenp;
1807 
1808 		if (optlen < (t_uscalar_t)sizeof (struct so_snd_bufinfo))
1809 			return (EINVAL);
1810 		((struct so_snd_bufinfo *)(optval))->sbi_wroff =
1811 		    (so->so_proto_props).sopp_wroff;
1812 		((struct so_snd_bufinfo *)(optval))->sbi_maxblk =
1813 		    (so->so_proto_props).sopp_maxblk;
1814 		((struct so_snd_bufinfo *)(optval))->sbi_maxpsz =
1815 		    (so->so_proto_props).sopp_maxpsz;
1816 		((struct so_snd_bufinfo *)(optval))->sbi_tail =
1817 		    (so->so_proto_props).sopp_tail;
1818 		*optlenp = sizeof (struct so_snd_bufinfo);
1819 		return (0);
1820 	}
1821 	case SO_SND_COPYAVOID: {
1822 		sof_instance_t *inst;
1823 
1824 		/*
1825 		 * Avoid zero-copy if there is a filter with a data_out
1826 		 * callback. We could let the operation succeed, but then
1827 		 * the filter would have to copy the data anyway.
1828 		 */
1829 		for (inst = so->so_filter_top; inst != NULL;
1830 		    inst = inst->sofi_next) {
1831 			if (SOF_INTERESTED(inst, data_out))
1832 				return (EOPNOTSUPP);
1833 		}
1834 		break;
1835 	}
1836 
1837 	default:
1838 		break;
1839 	}
1840 
1841 	/* Unknown Option */
1842 	return (-1);
1843 }
1844 
1845 void
1846 socket_sonode_destroy(struct sonode *so)
1847 {
1848 	sonode_fini(so);
1849 	kmem_cache_free(socket_cache, so);
1850 }
1851 
1852 int
1853 so_zcopy_wait(struct sonode *so)
1854 {
1855 	int error = 0;
1856 
1857 	mutex_enter(&so->so_lock);
1858 	while (!(so->so_copyflag & STZCNOTIFY)) {
1859 		if (so->so_state & SS_CLOSING) {
1860 			mutex_exit(&so->so_lock);
1861 			return (EINTR);
1862 		}
1863 		if (cv_wait_sig(&so->so_copy_cv, &so->so_lock) == 0) {
1864 			error = EINTR;
1865 			break;
1866 		}
1867 	}
1868 	so->so_copyflag &= ~STZCNOTIFY;
1869 	mutex_exit(&so->so_lock);
1870 	return (error);
1871 }
1872 
1873 void
1874 so_timer_callback(void *arg)
1875 {
1876 	struct sonode *so = (struct sonode *)arg;
1877 
1878 	mutex_enter(&so->so_lock);
1879 
1880 	so->so_rcv_timer_tid = 0;
1881 	if (so->so_rcv_queued > 0) {
1882 		so_notify_data(so, so->so_rcv_queued);
1883 	} else {
1884 		mutex_exit(&so->so_lock);
1885 	}
1886 }
1887 
1888 #ifdef DEBUG
1889 /*
1890  * Verify that the length stored in so_rcv_queued and the length of data blocks
1891  * queued is same.
1892  */
1893 static boolean_t
1894 so_check_length(sonode_t *so)
1895 {
1896 	mblk_t *mp = so->so_rcv_q_head;
1897 	int len = 0;
1898 
1899 	ASSERT(MUTEX_HELD(&so->so_lock));
1900 
1901 	if (mp != NULL) {
1902 		len = msgdsize(mp);
1903 		while ((mp = mp->b_next) != NULL)
1904 			len += msgdsize(mp);
1905 	}
1906 	mp = so->so_rcv_head;
1907 	if (mp != NULL) {
1908 		len += msgdsize(mp);
1909 		while ((mp = mp->b_next) != NULL)
1910 			len += msgdsize(mp);
1911 	}
1912 	return ((len == so->so_rcv_queued) ? B_TRUE : B_FALSE);
1913 }
1914 #endif
1915 
1916 int
1917 so_get_mod_version(struct sockparams *sp)
1918 {
1919 	ASSERT(sp != NULL && sp->sp_smod_info != NULL);
1920 	return (sp->sp_smod_info->smod_version);
1921 }
1922 
1923 /*
1924  * so_start_fallback()
1925  *
1926  * Block new socket operations from coming in, and wait for active operations
1927  * to complete. Threads that are sleeping will be woken up so they can get
1928  * out of the way.
1929  *
1930  * The caller must be a reader on so_fallback_rwlock.
1931  */
1932 static boolean_t
1933 so_start_fallback(struct sonode *so)
1934 {
1935 	ASSERT(RW_READ_HELD(&so->so_fallback_rwlock));
1936 
1937 	mutex_enter(&so->so_lock);
1938 	if (so->so_state & SS_FALLBACK_PENDING) {
1939 		mutex_exit(&so->so_lock);
1940 		return (B_FALSE);
1941 	}
1942 	so->so_state |= SS_FALLBACK_PENDING;
1943 	/*
1944 	 * Poke all threads that might be sleeping. Any operation that comes
1945 	 * in after the cv_broadcast will observe the fallback pending flag
1946 	 * which cause the call to return where it would normally sleep.
1947 	 */
1948 	cv_broadcast(&so->so_state_cv);		/* threads in connect() */
1949 	cv_broadcast(&so->so_rcv_cv);		/* threads in recvmsg() */
1950 	cv_broadcast(&so->so_snd_cv);		/* threads in sendmsg() */
1951 	mutex_enter(&so->so_acceptq_lock);
1952 	cv_broadcast(&so->so_acceptq_cv);	/* threads in accept() */
1953 	mutex_exit(&so->so_acceptq_lock);
1954 	mutex_exit(&so->so_lock);
1955 
1956 	/*
1957 	 * The main reason for the rw_tryupgrade call is to provide
1958 	 * observability during the fallback process. We want to
1959 	 * be able to see if there are pending operations.
1960 	 */
1961 	if (rw_tryupgrade(&so->so_fallback_rwlock) == 0) {
1962 		/*
1963 		 * It is safe to drop and reaquire the fallback lock, because
1964 		 * we are guaranteed that another fallback cannot take place.
1965 		 */
1966 		rw_exit(&so->so_fallback_rwlock);
1967 		DTRACE_PROBE1(pending__ops__wait, (struct sonode *), so);
1968 		rw_enter(&so->so_fallback_rwlock, RW_WRITER);
1969 		DTRACE_PROBE1(pending__ops__complete, (struct sonode *), so);
1970 	}
1971 
1972 	return (B_TRUE);
1973 }
1974 
1975 /*
1976  * so_end_fallback()
1977  *
1978  * Allow socket opertions back in.
1979  *
1980  * The caller must be a writer on so_fallback_rwlock.
1981  */
1982 static void
1983 so_end_fallback(struct sonode *so)
1984 {
1985 	ASSERT(RW_ISWRITER(&so->so_fallback_rwlock));
1986 
1987 	mutex_enter(&so->so_lock);
1988 	so->so_state &= ~(SS_FALLBACK_PENDING|SS_FALLBACK_DRAIN);
1989 	mutex_exit(&so->so_lock);
1990 
1991 	rw_downgrade(&so->so_fallback_rwlock);
1992 }
1993 
1994 /*
1995  * so_quiesced_cb()
1996  *
1997  * Callback passed to the protocol during fallback. It is called once
1998  * the endpoint is quiescent.
1999  *
2000  * No requests from the user, no notifications from the protocol, so it
2001  * is safe to synchronize the state. Data can also be moved without
2002  * risk for reordering.
2003  *
2004  * We do not need to hold so_lock, since there can be only one thread
2005  * operating on the sonode.
2006  */
2007 static mblk_t *
2008 so_quiesced_cb(sock_upper_handle_t sock_handle, sock_quiesce_arg_t *arg,
2009     struct T_capability_ack *tcap,
2010     struct sockaddr *laddr, socklen_t laddrlen,
2011     struct sockaddr *faddr, socklen_t faddrlen, short opts)
2012 {
2013 	struct sonode *so = (struct sonode *)sock_handle;
2014 	boolean_t atmark;
2015 	mblk_t *retmp = NULL, **tailmpp = &retmp;
2016 
2017 	if (tcap != NULL)
2018 		sotpi_update_state(so, tcap, laddr, laddrlen, faddr, faddrlen,
2019 		    opts);
2020 
2021 	/*
2022 	 * Some protocols do not quiece the data path during fallback. Once
2023 	 * we set the SS_FALLBACK_DRAIN flag any attempt to queue data will
2024 	 * fail and the protocol is responsible for saving the data for later
2025 	 * delivery (i.e., once the fallback has completed).
2026 	 */
2027 	mutex_enter(&so->so_lock);
2028 	so->so_state |= SS_FALLBACK_DRAIN;
2029 	SOCKET_TIMER_CANCEL(so);
2030 	mutex_exit(&so->so_lock);
2031 
2032 	if (so->so_rcv_head != NULL) {
2033 		if (so->so_rcv_q_last_head == NULL)
2034 			so->so_rcv_q_head = so->so_rcv_head;
2035 		else
2036 			so->so_rcv_q_last_head->b_next = so->so_rcv_head;
2037 		so->so_rcv_q_last_head = so->so_rcv_last_head;
2038 	}
2039 
2040 	atmark = (so->so_state & SS_RCVATMARK) != 0;
2041 	/*
2042 	 * Clear any OOB state having to do with pending data. The TPI
2043 	 * code path will set the appropriate oob state when we move the
2044 	 * oob data to the STREAM head. We leave SS_HADOOBDATA since the oob
2045 	 * data has already been consumed.
2046 	 */
2047 	so->so_state &= ~(SS_RCVATMARK|SS_OOBPEND|SS_HAVEOOBDATA);
2048 
2049 	ASSERT(so->so_oobmsg != NULL || so->so_oobmark <= so->so_rcv_queued);
2050 
2051 	/*
2052 	 * Move data to the STREAM head.
2053 	 */
2054 	while (so->so_rcv_q_head != NULL) {
2055 		mblk_t *mp = so->so_rcv_q_head;
2056 		size_t mlen = msgdsize(mp);
2057 
2058 		so->so_rcv_q_head = mp->b_next;
2059 		mp->b_next = NULL;
2060 		mp->b_prev = NULL;
2061 
2062 		/*
2063 		 * Send T_EXDATA_IND if we are at the oob mark.
2064 		 */
2065 		if (atmark) {
2066 			struct T_exdata_ind *tei;
2067 			mblk_t *mp1 = arg->soqa_exdata_mp;
2068 
2069 			arg->soqa_exdata_mp = NULL;
2070 			ASSERT(mp1 != NULL);
2071 			mp1->b_datap->db_type = M_PROTO;
2072 			tei = (struct T_exdata_ind *)mp1->b_rptr;
2073 			tei->PRIM_type = T_EXDATA_IND;
2074 			tei->MORE_flag = 0;
2075 			mp1->b_wptr = (uchar_t *)&tei[1];
2076 
2077 			if (IS_SO_OOB_INLINE(so)) {
2078 				mp1->b_cont = mp;
2079 			} else {
2080 				ASSERT(so->so_oobmsg != NULL);
2081 				mp1->b_cont = so->so_oobmsg;
2082 				so->so_oobmsg = NULL;
2083 
2084 				/* process current mp next time around */
2085 				mp->b_next = so->so_rcv_q_head;
2086 				so->so_rcv_q_head = mp;
2087 				mlen = 0;
2088 			}
2089 			mp = mp1;
2090 
2091 			/* we have consumed the oob mark */
2092 			atmark = B_FALSE;
2093 		} else if (so->so_oobmark > 0) {
2094 			/*
2095 			 * Check if the OOB mark is within the current
2096 			 * mblk chain. In that case we have to split it up.
2097 			 */
2098 			if (so->so_oobmark < mlen) {
2099 				mblk_t *urg_mp = mp;
2100 
2101 				atmark = B_TRUE;
2102 				mp = NULL;
2103 				mlen = so->so_oobmark;
2104 
2105 				/*
2106 				 * It is assumed that the OOB mark does
2107 				 * not land within a mblk.
2108 				 */
2109 				do {
2110 					so->so_oobmark -= MBLKL(urg_mp);
2111 					mp = urg_mp;
2112 					urg_mp = urg_mp->b_cont;
2113 				} while (so->so_oobmark > 0);
2114 				mp->b_cont = NULL;
2115 				if (urg_mp != NULL) {
2116 					urg_mp->b_next = so->so_rcv_q_head;
2117 					so->so_rcv_q_head = urg_mp;
2118 				}
2119 			} else {
2120 				so->so_oobmark -= mlen;
2121 				if (so->so_oobmark == 0)
2122 					atmark = B_TRUE;
2123 			}
2124 		}
2125 
2126 		/*
2127 		 * Queue data on the STREAM head.
2128 		 */
2129 		so->so_rcv_queued -= mlen;
2130 		*tailmpp = mp;
2131 		tailmpp = &mp->b_next;
2132 	}
2133 	so->so_rcv_head = NULL;
2134 	so->so_rcv_last_head = NULL;
2135 	so->so_rcv_q_head = NULL;
2136 	so->so_rcv_q_last_head = NULL;
2137 
2138 	/*
2139 	 * Check if the oob byte is at the end of the data stream, or if the
2140 	 * oob byte has not yet arrived. In the latter case we have to send a
2141 	 * SIGURG and a mark indicator to the STREAM head. The mark indicator
2142 	 * is needed to guarantee correct behavior for SIOCATMARK. See block
2143 	 * comment in socktpi.h for more details.
2144 	 */
2145 	if (atmark || so->so_oobmark > 0) {
2146 		mblk_t *mp;
2147 
2148 		if (atmark && so->so_oobmsg != NULL) {
2149 			struct T_exdata_ind *tei;
2150 
2151 			mp = arg->soqa_exdata_mp;
2152 			arg->soqa_exdata_mp = NULL;
2153 			ASSERT(mp != NULL);
2154 			mp->b_datap->db_type = M_PROTO;
2155 			tei = (struct T_exdata_ind *)mp->b_rptr;
2156 			tei->PRIM_type = T_EXDATA_IND;
2157 			tei->MORE_flag = 0;
2158 			mp->b_wptr = (uchar_t *)&tei[1];
2159 
2160 			mp->b_cont = so->so_oobmsg;
2161 			so->so_oobmsg = NULL;
2162 
2163 			*tailmpp = mp;
2164 			tailmpp = &mp->b_next;
2165 		} else {
2166 			/* Send up the signal */
2167 			mp = arg->soqa_exdata_mp;
2168 			arg->soqa_exdata_mp = NULL;
2169 			ASSERT(mp != NULL);
2170 			DB_TYPE(mp) = M_PCSIG;
2171 			*mp->b_wptr++ = (uchar_t)SIGURG;
2172 			*tailmpp = mp;
2173 			tailmpp = &mp->b_next;
2174 
2175 			/* Send up the mark indicator */
2176 			mp = arg->soqa_urgmark_mp;
2177 			arg->soqa_urgmark_mp = NULL;
2178 			mp->b_flag = atmark ? MSGMARKNEXT : MSGNOTMARKNEXT;
2179 			*tailmpp = mp;
2180 			tailmpp = &mp->b_next;
2181 
2182 			so->so_oobmark = 0;
2183 		}
2184 	}
2185 	ASSERT(so->so_oobmark == 0);
2186 	ASSERT(so->so_rcv_queued == 0);
2187 
2188 	return (retmp);
2189 }
2190 
2191 #ifdef DEBUG
2192 /*
2193  * Do an integrity check of the sonode. This should be done if a
2194  * fallback fails after sonode has initially been converted to use
2195  * TPI and subsequently have to be reverted.
2196  *
2197  * Failure to pass the integrity check will panic the system.
2198  */
2199 void
2200 so_integrity_check(struct sonode *cur, struct sonode *orig)
2201 {
2202 	VERIFY(cur->so_vnode == orig->so_vnode);
2203 	VERIFY(cur->so_ops == orig->so_ops);
2204 	/*
2205 	 * For so_state we can only VERIFY the state flags in CHECK_STATE.
2206 	 * The other state flags might be affected by a notification from the
2207 	 * protocol.
2208 	 */
2209 #define	CHECK_STATE	(SS_CANTRCVMORE|SS_CANTSENDMORE|SS_NDELAY|SS_NONBLOCK| \
2210 	SS_ASYNC|SS_ACCEPTCONN|SS_SAVEDEOR|SS_RCVATMARK|SS_OOBPEND| \
2211 	SS_HAVEOOBDATA|SS_HADOOBDATA|SS_SENTLASTREADSIG|SS_SENTLASTWRITESIG)
2212 	VERIFY((cur->so_state & (orig->so_state & CHECK_STATE)) ==
2213 	    (orig->so_state & CHECK_STATE));
2214 	VERIFY(cur->so_mode == orig->so_mode);
2215 	VERIFY(cur->so_flag == orig->so_flag);
2216 	VERIFY(cur->so_count == orig->so_count);
2217 	/* Cannot VERIFY so_proto_connid; proto can update it */
2218 	VERIFY(cur->so_sockparams == orig->so_sockparams);
2219 	/* an error might have been recorded, but it can not be lost */
2220 	VERIFY(cur->so_error != 0 || orig->so_error == 0);
2221 	VERIFY(cur->so_family == orig->so_family);
2222 	VERIFY(cur->so_type == orig->so_type);
2223 	VERIFY(cur->so_protocol == orig->so_protocol);
2224 	VERIFY(cur->so_version == orig->so_version);
2225 	/* New conns might have arrived, but none should have been lost */
2226 	VERIFY(cur->so_acceptq_len >= orig->so_acceptq_len);
2227 	VERIFY(list_head(&cur->so_acceptq_list) ==
2228 	    list_head(&orig->so_acceptq_list));
2229 	VERIFY(cur->so_backlog == orig->so_backlog);
2230 	/* New OOB migth have arrived, but mark should not have been lost */
2231 	VERIFY(cur->so_oobmark >= orig->so_oobmark);
2232 	/* Cannot VERIFY so_oobmsg; the proto might have sent up a new one */
2233 	VERIFY(cur->so_pgrp == orig->so_pgrp);
2234 	VERIFY(cur->so_peercred == orig->so_peercred);
2235 	VERIFY(cur->so_cpid == orig->so_cpid);
2236 	VERIFY(cur->so_zoneid == orig->so_zoneid);
2237 	/* New data migth have arrived, but none should have been lost */
2238 	VERIFY(cur->so_rcv_queued >= orig->so_rcv_queued);
2239 	VERIFY(cur->so_rcv_q_head == orig->so_rcv_q_head);
2240 	VERIFY(cur->so_rcv_head == orig->so_rcv_head);
2241 	VERIFY(cur->so_proto_handle == orig->so_proto_handle);
2242 	VERIFY(cur->so_downcalls == orig->so_downcalls);
2243 	/* Cannot VERIFY so_proto_props; they can be updated by proto */
2244 }
2245 #endif
2246 
2247 /*
2248  * so_tpi_fallback()
2249  *
2250  * This is the fallback initation routine; things start here.
2251  *
2252  * Basic strategy:
2253  *   o Block new socket operations from coming in
2254  *   o Allocate/initate info needed by TPI
2255  *   o Quiesce the connection, at which point we sync
2256  *     state and move data
2257  *   o Change operations (sonodeops) associated with the socket
2258  *   o Unblock threads waiting for the fallback to finish
2259  */
2260 int
2261 so_tpi_fallback(struct sonode *so, struct cred *cr)
2262 {
2263 	int error;
2264 	queue_t *q;
2265 	struct sockparams *sp;
2266 	struct sockparams *newsp = NULL;
2267 	so_proto_fallback_func_t fbfunc;
2268 	const char *devpath;
2269 	boolean_t direct;
2270 	struct sonode *nso;
2271 	sock_quiesce_arg_t arg = { NULL, NULL };
2272 #ifdef DEBUG
2273 	struct sonode origso;
2274 #endif
2275 	error = 0;
2276 	sp = so->so_sockparams;
2277 	fbfunc = sp->sp_smod_info->smod_proto_fallback_func;
2278 
2279 	/*
2280 	 * Cannot fallback if the socket has active filters or a krecv callback.
2281 	 */
2282 	if (so->so_filter_active > 0 || so->so_krecv_cb != NULL)
2283 		return (EINVAL);
2284 
2285 	switch (so->so_family) {
2286 	case AF_INET:
2287 		devpath = sp->sp_smod_info->smod_fallback_devpath_v4;
2288 		break;
2289 	case AF_INET6:
2290 		devpath = sp->sp_smod_info->smod_fallback_devpath_v6;
2291 		break;
2292 	default:
2293 		return (EINVAL);
2294 	}
2295 
2296 	/*
2297 	 * Fallback can only happen if the socket module has a TPI device
2298 	 * and fallback function.
2299 	 */
2300 	if (devpath == NULL || fbfunc == NULL)
2301 		return (EINVAL);
2302 
2303 	/*
2304 	 * Initiate fallback; upon success we know that no new requests
2305 	 * will come in from the user.
2306 	 */
2307 	if (!so_start_fallback(so))
2308 		return (EAGAIN);
2309 #ifdef DEBUG
2310 	/*
2311 	 * Make a copy of the sonode in case we need to make an integrity
2312 	 * check later on.
2313 	 */
2314 	bcopy(so, &origso, sizeof (*so));
2315 #endif
2316 
2317 	sp->sp_stats.sps_nfallback.value.ui64++;
2318 
2319 	newsp = sockparams_hold_ephemeral_bydev(so->so_family, so->so_type,
2320 	    so->so_protocol, devpath, KM_SLEEP, &error);
2321 	if (error != 0)
2322 		goto out;
2323 
2324 	if (so->so_direct != NULL) {
2325 		sodirect_t *sodp = so->so_direct;
2326 		mutex_enter(&so->so_lock);
2327 
2328 		so->so_direct->sod_enabled = B_FALSE;
2329 		so->so_state &= ~SS_SODIRECT;
2330 		ASSERT(sodp->sod_uioafh == NULL);
2331 		mutex_exit(&so->so_lock);
2332 	}
2333 
2334 	/* Turn sonode into a TPI socket */
2335 	error = sotpi_convert_sonode(so, newsp, &direct, &q, cr);
2336 	if (error != 0)
2337 		goto out;
2338 	/*
2339 	 * When it comes to urgent data we have two cases to deal with;
2340 	 * (1) The oob byte has already arrived, or (2) the protocol has
2341 	 * notified that oob data is pending, but it has not yet arrived.
2342 	 *
2343 	 * For (1) all we need to do is send a T_EXDATA_IND to indicate were
2344 	 * in the byte stream the oob byte is. For (2) we have to send a
2345 	 * SIGURG (M_PCSIG), followed by a zero-length mblk indicating whether
2346 	 * the oob byte will be the next byte from the protocol.
2347 	 *
2348 	 * So in the worst case we need two mblks, one for the signal, another
2349 	 * for mark indication. In that case we use the exdata_mp for the sig.
2350 	 */
2351 	arg.soqa_exdata_mp = allocb_wait(sizeof (struct T_exdata_ind),
2352 	    BPRI_MED, STR_NOSIG, NULL);
2353 	arg.soqa_urgmark_mp = allocb_wait(0, BPRI_MED, STR_NOSIG, NULL);
2354 
2355 	/*
2356 	 * Now tell the protocol to start using TPI. so_quiesced_cb be
2357 	 * called once it's safe to synchronize state.
2358 	 */
2359 	DTRACE_PROBE1(proto__fallback__begin, struct sonode *, so);
2360 	error = (*fbfunc)(so->so_proto_handle, q, direct, so_quiesced_cb,
2361 	    &arg);
2362 	DTRACE_PROBE1(proto__fallback__end, struct sonode *, so);
2363 
2364 	if (error != 0) {
2365 		/* protocol was unable to do a fallback, revert the sonode */
2366 		sotpi_revert_sonode(so, cr);
2367 		goto out;
2368 	}
2369 
2370 	/*
2371 	 * Walk the accept queue and notify the proto that they should
2372 	 * fall back to TPI. The protocol will send up the T_CONN_IND.
2373 	 */
2374 	nso = list_head(&so->so_acceptq_list);
2375 	while (nso != NULL) {
2376 		int rval;
2377 		struct sonode *next;
2378 
2379 		if (arg.soqa_exdata_mp == NULL) {
2380 			arg.soqa_exdata_mp =
2381 			    allocb_wait(sizeof (struct T_exdata_ind),
2382 			    BPRI_MED, STR_NOSIG, NULL);
2383 		}
2384 		if (arg.soqa_urgmark_mp == NULL) {
2385 			arg.soqa_urgmark_mp = allocb_wait(0, BPRI_MED,
2386 			    STR_NOSIG, NULL);
2387 		}
2388 
2389 		DTRACE_PROBE1(proto__fallback__begin, struct sonode *, nso);
2390 		rval = (*fbfunc)(nso->so_proto_handle, NULL, direct,
2391 		    so_quiesced_cb, &arg);
2392 		DTRACE_PROBE1(proto__fallback__end, struct sonode *, nso);
2393 		if (rval != 0) {
2394 			/* Abort the connection */
2395 			zcmn_err(getzoneid(), CE_WARN,
2396 			    "Failed to convert socket in accept queue to TPI. "
2397 			    "Pid = %d\n", curproc->p_pid);
2398 			next = list_next(&so->so_acceptq_list, nso);
2399 			list_remove(&so->so_acceptq_list, nso);
2400 			so->so_acceptq_len--;
2401 
2402 			(void) socket_close(nso, 0, CRED());
2403 			socket_destroy(nso);
2404 			nso = next;
2405 		} else {
2406 			nso = list_next(&so->so_acceptq_list, nso);
2407 		}
2408 	}
2409 
2410 	/*
2411 	 * Now flush the acceptq, this will destroy all sockets. They will
2412 	 * be recreated in sotpi_accept().
2413 	 */
2414 	so_acceptq_flush(so, B_FALSE);
2415 
2416 	mutex_enter(&so->so_lock);
2417 	so->so_state |= SS_FALLBACK_COMP;
2418 	mutex_exit(&so->so_lock);
2419 
2420 	/*
2421 	 * Swap the sonode ops. Socket opertations that come in once this
2422 	 * is done will proceed without blocking.
2423 	 */
2424 	so->so_ops = &sotpi_sonodeops;
2425 
2426 	/*
2427 	 * Wake up any threads stuck in poll. This is needed since the poll
2428 	 * head changes when the fallback happens (moves from the sonode to
2429 	 * the STREAMS head).
2430 	 */
2431 	pollwakeup(&so->so_poll_list, POLLERR);
2432 
2433 	/*
2434 	 * When this non-STREAM socket was created we placed an extra ref on
2435 	 * the associated vnode to support asynchronous close. Drop that ref
2436 	 * here.
2437 	 */
2438 	ASSERT(SOTOV(so)->v_count >= 2);
2439 	VN_RELE(SOTOV(so));
2440 out:
2441 	so_end_fallback(so);
2442 
2443 	if (error != 0) {
2444 #ifdef DEBUG
2445 		so_integrity_check(so, &origso);
2446 #endif
2447 		zcmn_err(getzoneid(), CE_WARN,
2448 		    "Failed to convert socket to TPI (err=%d). Pid = %d\n",
2449 		    error, curproc->p_pid);
2450 		if (newsp != NULL)
2451 			SOCKPARAMS_DEC_REF(newsp);
2452 	}
2453 	if (arg.soqa_exdata_mp != NULL)
2454 		freemsg(arg.soqa_exdata_mp);
2455 	if (arg.soqa_urgmark_mp != NULL)
2456 		freemsg(arg.soqa_urgmark_mp);
2457 
2458 	return (error);
2459 }
2460 
2461 int
2462 so_krecv_set(sonode_t *so, so_krecv_f cb, void *arg)
2463 {
2464 	int ret;
2465 
2466 	if (cb == NULL && arg != NULL)
2467 		return (EINVAL);
2468 
2469 	SO_BLOCK_FALLBACK(so, so_krecv_set(so, cb, arg));
2470 
2471 	mutex_enter(&so->so_lock);
2472 	if (so->so_state & SS_FALLBACK_COMP) {
2473 		mutex_exit(&so->so_lock);
2474 		SO_UNBLOCK_FALLBACK(so);
2475 		return (ENOTSUP);
2476 	}
2477 
2478 	ret = so_lock_read(so, 0);
2479 	VERIFY(ret == 0);
2480 	/*
2481 	 * Other consumers may actually care about getting extant data delivered
2482 	 * to them, when they come along, they should figure out the best API
2483 	 * for that.
2484 	 */
2485 	so_rcv_flush(so);
2486 
2487 	so->so_krecv_cb = cb;
2488 	so->so_krecv_arg = arg;
2489 
2490 	so_unlock_read(so);
2491 	mutex_exit(&so->so_lock);
2492 	SO_UNBLOCK_FALLBACK(so);
2493 
2494 	return (0);
2495 }
2496 
2497 void
2498 so_krecv_unblock(sonode_t *so)
2499 {
2500 	mutex_enter(&so->so_lock);
2501 	VERIFY(so->so_krecv_cb != NULL);
2502 
2503 	so->so_rcv_queued = 0;
2504 	(void) so_check_flow_control(so);
2505 	/*
2506 	 * so_check_flow_control() always drops so->so_lock, so we won't
2507 	 * need to drop it ourselves.
2508 	 */
2509 }
2510