xref: /titanic_52/usr/src/uts/common/fs/sockfs/sockcommon_subr.c (revision dd572c32b944990cbfa56af3f36d6bb8969fd018)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 /*
26  * Copyright 2014, OmniTI Computer Consulting, Inc. All rights reserved.
27  */
28 
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/signal.h>
32 #include <sys/cmn_err.h>
33 
34 #include <sys/stropts.h>
35 #include <sys/socket.h>
36 #include <sys/socketvar.h>
37 #include <sys/sockio.h>
38 #include <sys/strsubr.h>
39 #include <sys/strsun.h>
40 #include <sys/atomic.h>
41 #include <sys/tihdr.h>
42 
43 #include <fs/sockfs/sockcommon.h>
44 #include <fs/sockfs/sockfilter_impl.h>
45 #include <fs/sockfs/socktpi.h>
46 #include <fs/sockfs/sodirect.h>
47 #include <sys/ddi.h>
48 #include <inet/ip.h>
49 #include <sys/time.h>
50 #include <sys/cmn_err.h>
51 
52 #ifdef SOCK_TEST
53 extern int do_useracc;
54 extern clock_t sock_test_timelimit;
55 #endif /* SOCK_TEST */
56 
57 #define	MBLK_PULL_LEN 64
58 uint32_t so_mblk_pull_len = MBLK_PULL_LEN;
59 
60 #ifdef DEBUG
61 boolean_t so_debug_length = B_FALSE;
62 static boolean_t so_check_length(sonode_t *so);
63 #endif
64 
65 static int
66 so_acceptq_dequeue_locked(struct sonode *so, boolean_t dontblock,
67     struct sonode **nsop)
68 {
69 	struct sonode *nso = NULL;
70 
71 	*nsop = NULL;
72 	ASSERT(MUTEX_HELD(&so->so_acceptq_lock));
73 	while ((nso = list_remove_head(&so->so_acceptq_list)) == NULL) {
74 		/*
75 		 * No need to check so_error here, because it is not
76 		 * possible for a listening socket to be reset or otherwise
77 		 * disconnected.
78 		 *
79 		 * So now we just need check if it's ok to wait.
80 		 */
81 		if (dontblock)
82 			return (EWOULDBLOCK);
83 		if (so->so_state & (SS_CLOSING | SS_FALLBACK_PENDING))
84 			return (EINTR);
85 
86 		if (cv_wait_sig_swap(&so->so_acceptq_cv,
87 		    &so->so_acceptq_lock) == 0)
88 			return (EINTR);
89 	}
90 
91 	ASSERT(nso != NULL);
92 	ASSERT(so->so_acceptq_len > 0);
93 	so->so_acceptq_len--;
94 	nso->so_listener = NULL;
95 
96 	*nsop = nso;
97 
98 	return (0);
99 }
100 
101 /*
102  * int so_acceptq_dequeue(struct sonode *, boolean_t, struct sonode **)
103  *
104  * Pulls a connection off of the accept queue.
105  *
106  * Arguments:
107  *   so	       - listening socket
108  *   dontblock - indicate whether it's ok to sleep if there are no
109  *		 connections on the queue
110  *   nsop      - Value-return argument
111  *
112  * Return values:
113  *   0 when a connection is successfully dequeued, in which case nsop
114  *   is set to point to the new connection. Upon failure a non-zero
115  *   value is returned, and the value of nsop is set to NULL.
116  *
117  * Note:
118  *   so_acceptq_dequeue() may return prematurly if the socket is falling
119  *   back to TPI.
120  */
121 int
122 so_acceptq_dequeue(struct sonode *so, boolean_t dontblock,
123     struct sonode **nsop)
124 {
125 	int error;
126 
127 	mutex_enter(&so->so_acceptq_lock);
128 	error = so_acceptq_dequeue_locked(so, dontblock, nsop);
129 	mutex_exit(&so->so_acceptq_lock);
130 
131 	return (error);
132 }
133 
134 static void
135 so_acceptq_flush_impl(struct sonode *so, list_t *list, boolean_t doclose)
136 {
137 	struct sonode *nso;
138 
139 	while ((nso = list_remove_head(list)) != NULL) {
140 		nso->so_listener = NULL;
141 		if (doclose) {
142 			(void) socket_close(nso, 0, CRED());
143 		} else {
144 			/*
145 			 * Only used for fallback - not possible when filters
146 			 * are present.
147 			 */
148 			ASSERT(so->so_filter_active == 0);
149 			/*
150 			 * Since the socket is on the accept queue, there can
151 			 * only be one reference. We drop the reference and
152 			 * just blow off the socket.
153 			 */
154 			ASSERT(nso->so_count == 1);
155 			nso->so_count--;
156 			/* drop the proto ref */
157 			VN_RELE(SOTOV(nso));
158 		}
159 		socket_destroy(nso);
160 	}
161 }
162 /*
163  * void so_acceptq_flush(struct sonode *so)
164  *
165  * Removes all pending connections from a listening socket, and
166  * frees the associated resources.
167  *
168  * Arguments
169  *   so	     - listening socket
170  *   doclose - make a close downcall for each socket on the accept queue
171  *
172  * Return values:
173  *   None.
174  *
175  * Note:
176  *   The caller has to ensure that no calls to so_acceptq_enqueue() or
177  *   so_acceptq_dequeue() occur while the accept queue is being flushed.
178  *   So either the socket needs to be in a state where no operations
179  *   would come in, or so_lock needs to be obtained.
180  */
181 void
182 so_acceptq_flush(struct sonode *so, boolean_t doclose)
183 {
184 	so_acceptq_flush_impl(so, &so->so_acceptq_list, doclose);
185 	so_acceptq_flush_impl(so, &so->so_acceptq_defer, doclose);
186 
187 	so->so_acceptq_len = 0;
188 }
189 
190 int
191 so_wait_connected_locked(struct sonode *so, boolean_t nonblock,
192     sock_connid_t id)
193 {
194 	ASSERT(MUTEX_HELD(&so->so_lock));
195 
196 	/*
197 	 * The protocol has notified us that a connection attempt is being
198 	 * made, so before we wait for a notification to arrive we must
199 	 * clear out any errors associated with earlier connection attempts.
200 	 */
201 	if (so->so_error != 0 && SOCK_CONNID_LT(so->so_proto_connid, id))
202 		so->so_error = 0;
203 
204 	while (SOCK_CONNID_LT(so->so_proto_connid, id)) {
205 		if (nonblock)
206 			return (EINPROGRESS);
207 
208 		if (so->so_state & (SS_CLOSING | SS_FALLBACK_PENDING))
209 			return (EINTR);
210 
211 		if (cv_wait_sig_swap(&so->so_state_cv, &so->so_lock) == 0)
212 			return (EINTR);
213 	}
214 
215 	if (so->so_error != 0)
216 		return (sogeterr(so, B_TRUE));
217 	/*
218 	 * Under normal circumstances, so_error should contain an error
219 	 * in case the connect failed. However, it is possible for another
220 	 * thread to come in a consume the error, so generate a sensible
221 	 * error in that case.
222 	 */
223 	if ((so->so_state & SS_ISCONNECTED) == 0)
224 		return (ECONNREFUSED);
225 
226 	return (0);
227 }
228 
229 /*
230  * int so_wait_connected(struct sonode *so, boolean_t nonblock,
231  *    sock_connid_t id)
232  *
233  * Wait until the socket is connected or an error has occured.
234  *
235  * Arguments:
236  *   so	      - socket
237  *   nonblock - indicate whether it's ok to sleep if the connection has
238  *		not yet been established
239  *   gen      - generation number that was returned by the protocol
240  *		when the operation was started
241  *
242  * Returns:
243  *   0 if the connection attempt was successful, or an error indicating why
244  *   the connection attempt failed.
245  */
246 int
247 so_wait_connected(struct sonode *so, boolean_t nonblock, sock_connid_t id)
248 {
249 	int error;
250 
251 	mutex_enter(&so->so_lock);
252 	error = so_wait_connected_locked(so, nonblock, id);
253 	mutex_exit(&so->so_lock);
254 
255 	return (error);
256 }
257 
258 int
259 so_snd_wait_qnotfull_locked(struct sonode *so, boolean_t dontblock)
260 {
261 	int error;
262 
263 	ASSERT(MUTEX_HELD(&so->so_lock));
264 	while (SO_SND_FLOWCTRLD(so)) {
265 		if (so->so_state & SS_CANTSENDMORE)
266 			return (EPIPE);
267 		if (dontblock)
268 			return (EWOULDBLOCK);
269 
270 		if (so->so_state & (SS_CLOSING | SS_FALLBACK_PENDING))
271 			return (EINTR);
272 
273 		if (so->so_sndtimeo == 0) {
274 			/*
275 			 * Zero means disable timeout.
276 			 */
277 			error = cv_wait_sig(&so->so_snd_cv, &so->so_lock);
278 		} else {
279 			error = cv_reltimedwait_sig(&so->so_snd_cv,
280 			    &so->so_lock, so->so_sndtimeo, TR_CLOCK_TICK);
281 		}
282 		if (error == 0)
283 			return (EINTR);
284 		else if (error == -1)
285 			return (EAGAIN);
286 	}
287 	return (0);
288 }
289 
290 /*
291  * int so_wait_sendbuf(struct sonode *so, boolean_t dontblock)
292  *
293  * Wait for the transport to notify us about send buffers becoming
294  * available.
295  */
296 int
297 so_snd_wait_qnotfull(struct sonode *so, boolean_t dontblock)
298 {
299 	int error = 0;
300 
301 	mutex_enter(&so->so_lock);
302 	so->so_snd_wakeup = B_TRUE;
303 	error = so_snd_wait_qnotfull_locked(so, dontblock);
304 	so->so_snd_wakeup = B_FALSE;
305 	mutex_exit(&so->so_lock);
306 
307 	return (error);
308 }
309 
310 void
311 so_snd_qfull(struct sonode *so)
312 {
313 	mutex_enter(&so->so_lock);
314 	so->so_snd_qfull = B_TRUE;
315 	mutex_exit(&so->so_lock);
316 }
317 
318 void
319 so_snd_qnotfull(struct sonode *so)
320 {
321 	mutex_enter(&so->so_lock);
322 	so->so_snd_qfull = B_FALSE;
323 	/* wake up everyone waiting for buffers */
324 	cv_broadcast(&so->so_snd_cv);
325 	mutex_exit(&so->so_lock);
326 }
327 
328 /*
329  * Change the process/process group to which SIGIO is sent.
330  */
331 int
332 socket_chgpgrp(struct sonode *so, pid_t pid)
333 {
334 	int error;
335 
336 	ASSERT(MUTEX_HELD(&so->so_lock));
337 	if (pid != 0) {
338 		/*
339 		 * Permissions check by sending signal 0.
340 		 * Note that when kill fails it does a
341 		 * set_errno causing the system call to fail.
342 		 */
343 		error = kill(pid, 0);
344 		if (error != 0) {
345 			return (error);
346 		}
347 	}
348 	so->so_pgrp = pid;
349 	return (0);
350 }
351 
352 
353 /*
354  * Generate a SIGIO, for 'writable' events include siginfo structure,
355  * for read events just send the signal.
356  */
357 /*ARGSUSED*/
358 static void
359 socket_sigproc(proc_t *proc, int event)
360 {
361 	k_siginfo_t info;
362 
363 	ASSERT(event & (SOCKETSIG_WRITE | SOCKETSIG_READ | SOCKETSIG_URG));
364 
365 	if (event & SOCKETSIG_WRITE) {
366 		info.si_signo = SIGPOLL;
367 		info.si_code = POLL_OUT;
368 		info.si_errno = 0;
369 		info.si_fd = 0;
370 		info.si_band = 0;
371 		sigaddq(proc, NULL, &info, KM_NOSLEEP);
372 	}
373 	if (event & SOCKETSIG_READ) {
374 		sigtoproc(proc, NULL, SIGPOLL);
375 	}
376 	if (event & SOCKETSIG_URG) {
377 		sigtoproc(proc, NULL, SIGURG);
378 	}
379 }
380 
381 void
382 socket_sendsig(struct sonode *so, int event)
383 {
384 	proc_t *proc;
385 
386 	ASSERT(MUTEX_HELD(&so->so_lock));
387 
388 	if (so->so_pgrp == 0 || (!(so->so_state & SS_ASYNC) &&
389 	    event != SOCKETSIG_URG)) {
390 		return;
391 	}
392 
393 	dprint(3, ("sending sig %d to %d\n", event, so->so_pgrp));
394 
395 	if (so->so_pgrp > 0) {
396 		/*
397 		 * XXX This unfortunately still generates
398 		 * a signal when a fd is closed but
399 		 * the proc is active.
400 		 */
401 		mutex_enter(&pidlock);
402 		/*
403 		 * Even if the thread started in another zone, we're receiving
404 		 * on behalf of this socket's zone, so find the proc using the
405 		 * socket's zone ID.
406 		 */
407 		proc = prfind_zone(so->so_pgrp, so->so_zoneid);
408 		if (proc == NULL) {
409 			mutex_exit(&pidlock);
410 			return;
411 		}
412 		mutex_enter(&proc->p_lock);
413 		mutex_exit(&pidlock);
414 		socket_sigproc(proc, event);
415 		mutex_exit(&proc->p_lock);
416 	} else {
417 		/*
418 		 * Send to process group. Hold pidlock across
419 		 * calls to socket_sigproc().
420 		 */
421 		pid_t pgrp = -so->so_pgrp;
422 
423 		mutex_enter(&pidlock);
424 		/*
425 		 * Even if the thread started in another zone, we're receiving
426 		 * on behalf of this socket's zone, so find the pgrp using the
427 		 * socket's zone ID.
428 		 */
429 		proc = pgfind_zone(pgrp, so->so_zoneid);
430 		while (proc != NULL) {
431 			mutex_enter(&proc->p_lock);
432 			socket_sigproc(proc, event);
433 			mutex_exit(&proc->p_lock);
434 			proc = proc->p_pglink;
435 		}
436 		mutex_exit(&pidlock);
437 	}
438 }
439 
440 #define	MIN(a, b) ((a) < (b) ? (a) : (b))
441 /* Copy userdata into a new mblk_t */
442 mblk_t *
443 socopyinuio(uio_t *uiop, ssize_t iosize, size_t wroff, ssize_t maxblk,
444     size_t tail_len, int *errorp)
445 {
446 	mblk_t	*head = NULL, **tail = &head;
447 
448 	ASSERT(iosize == INFPSZ || iosize > 0);
449 
450 	if (iosize == INFPSZ || iosize > uiop->uio_resid)
451 		iosize = uiop->uio_resid;
452 
453 	if (maxblk == INFPSZ)
454 		maxblk = iosize;
455 
456 	/* Nothing to do in these cases, so we're done */
457 	if (iosize < 0 || maxblk < 0 || (maxblk == 0 && iosize > 0))
458 		goto done;
459 
460 	/*
461 	 * We will enter the loop below if iosize is 0; it will allocate an
462 	 * empty message block and call uiomove(9F) which will just return.
463 	 * We could avoid that with an extra check but would only slow
464 	 * down the much more likely case where iosize is larger than 0.
465 	 */
466 	do {
467 		ssize_t blocksize;
468 		mblk_t	*mp;
469 
470 		blocksize = MIN(iosize, maxblk);
471 		ASSERT(blocksize >= 0);
472 		mp = allocb(wroff + blocksize + tail_len, BPRI_MED);
473 		if (mp == NULL) {
474 			*errorp = ENOMEM;
475 			return (head);
476 		}
477 		mp->b_rptr += wroff;
478 		mp->b_wptr = mp->b_rptr + blocksize;
479 
480 		*tail = mp;
481 		tail = &mp->b_cont;
482 
483 		/* uiomove(9F) either returns 0 or EFAULT */
484 		if ((*errorp = uiomove(mp->b_rptr, (size_t)blocksize,
485 		    UIO_WRITE, uiop)) != 0) {
486 			ASSERT(*errorp != ENOMEM);
487 			freemsg(head);
488 			return (NULL);
489 		}
490 
491 		iosize -= blocksize;
492 	} while (iosize > 0);
493 
494 done:
495 	*errorp = 0;
496 	return (head);
497 }
498 
499 mblk_t *
500 socopyoutuio(mblk_t *mp, struct uio *uiop, ssize_t max_read, int *errorp)
501 {
502 	int error;
503 	ptrdiff_t n;
504 	mblk_t *nmp;
505 
506 	ASSERT(mp->b_wptr >= mp->b_rptr);
507 
508 	/*
509 	 * max_read is the offset of the oobmark and read can not go pass
510 	 * the oobmark.
511 	 */
512 	if (max_read == INFPSZ || max_read > uiop->uio_resid)
513 		max_read = uiop->uio_resid;
514 
515 	do {
516 		if ((n = MIN(max_read, MBLKL(mp))) != 0) {
517 			ASSERT(n > 0);
518 
519 			error = uiomove(mp->b_rptr, n, UIO_READ, uiop);
520 			if (error != 0) {
521 				freemsg(mp);
522 				*errorp = error;
523 				return (NULL);
524 			}
525 		}
526 
527 		mp->b_rptr += n;
528 		max_read -= n;
529 		while (mp != NULL && (mp->b_rptr >= mp->b_wptr)) {
530 			/*
531 			 * get rid of zero length mblks
532 			 */
533 			nmp = mp;
534 			mp = mp->b_cont;
535 			freeb(nmp);
536 		}
537 	} while (mp != NULL && max_read > 0);
538 
539 	*errorp = 0;
540 	return (mp);
541 }
542 
543 static void
544 so_prepend_msg(struct sonode *so, mblk_t *mp, mblk_t *last_tail)
545 {
546 	ASSERT(last_tail != NULL);
547 	mp->b_next = so->so_rcv_q_head;
548 	mp->b_prev = last_tail;
549 	ASSERT(!(DB_FLAGS(mp) & DBLK_UIOA));
550 
551 	if (so->so_rcv_q_head == NULL) {
552 		ASSERT(so->so_rcv_q_last_head == NULL);
553 		so->so_rcv_q_last_head = mp;
554 #ifdef DEBUG
555 	} else {
556 		ASSERT(!(DB_FLAGS(so->so_rcv_q_head) & DBLK_UIOA));
557 #endif
558 	}
559 	so->so_rcv_q_head = mp;
560 
561 #ifdef DEBUG
562 	if (so_debug_length) {
563 		mutex_enter(&so->so_lock);
564 		ASSERT(so_check_length(so));
565 		mutex_exit(&so->so_lock);
566 	}
567 #endif
568 }
569 
570 /*
571  * Move a mblk chain (mp_head, mp_last_head) to the sonode's rcv queue so it
572  * can be processed by so_dequeue_msg().
573  */
574 void
575 so_process_new_message(struct sonode *so, mblk_t *mp_head, mblk_t *mp_last_head)
576 {
577 	if (so->so_filter_active > 0 &&
578 	    (mp_head = sof_filter_data_in_proc(so, mp_head,
579 	    &mp_last_head)) == NULL)
580 		return;
581 
582 	ASSERT(mp_head->b_prev != NULL);
583 	if (so->so_rcv_q_head == NULL) {
584 		so->so_rcv_q_head = mp_head;
585 		so->so_rcv_q_last_head = mp_last_head;
586 		ASSERT(so->so_rcv_q_last_head->b_prev != NULL);
587 	} else {
588 		boolean_t flag_equal = ((DB_FLAGS(mp_head) & DBLK_UIOA) ==
589 		    (DB_FLAGS(so->so_rcv_q_last_head) & DBLK_UIOA));
590 
591 		if (mp_head->b_next == NULL &&
592 		    DB_TYPE(mp_head) == M_DATA &&
593 		    DB_TYPE(so->so_rcv_q_last_head) == M_DATA && flag_equal) {
594 			so->so_rcv_q_last_head->b_prev->b_cont = mp_head;
595 			so->so_rcv_q_last_head->b_prev = mp_head->b_prev;
596 			mp_head->b_prev = NULL;
597 		} else if (flag_equal && (DB_FLAGS(mp_head) & DBLK_UIOA)) {
598 			/*
599 			 * Append to last_head if more than one mblks, and both
600 			 * mp_head and last_head are I/OAT mblks.
601 			 */
602 			ASSERT(mp_head->b_next != NULL);
603 			so->so_rcv_q_last_head->b_prev->b_cont = mp_head;
604 			so->so_rcv_q_last_head->b_prev = mp_head->b_prev;
605 			mp_head->b_prev = NULL;
606 
607 			so->so_rcv_q_last_head->b_next = mp_head->b_next;
608 			mp_head->b_next = NULL;
609 			so->so_rcv_q_last_head = mp_last_head;
610 		} else {
611 #ifdef DEBUG
612 			{
613 				mblk_t *tmp_mblk;
614 				tmp_mblk = mp_head;
615 				while (tmp_mblk != NULL) {
616 					ASSERT(tmp_mblk->b_prev != NULL);
617 					tmp_mblk = tmp_mblk->b_next;
618 				}
619 			}
620 #endif
621 			so->so_rcv_q_last_head->b_next = mp_head;
622 			so->so_rcv_q_last_head = mp_last_head;
623 		}
624 	}
625 }
626 
627 /*
628  * Check flow control on a given sonode.  Must have so_lock held, and
629  * this function will release the hold.  Return true if flow control
630  * is cleared.
631  */
632 boolean_t
633 so_check_flow_control(struct sonode *so)
634 {
635 	ASSERT(MUTEX_HELD(&so->so_lock));
636 
637 	if (so->so_flowctrld && (so->so_rcv_queued < so->so_rcvlowat &&
638 	    !(so->so_state & SS_FIL_RCV_FLOWCTRL))) {
639 		so->so_flowctrld = B_FALSE;
640 		mutex_exit(&so->so_lock);
641 		/*
642 		 * Open up flow control. SCTP does not have any downcalls, and
643 		 * it will clr flow ctrl in sosctp_recvmsg().
644 		 */
645 		if (so->so_downcalls != NULL &&
646 		    so->so_downcalls->sd_clr_flowctrl != NULL) {
647 			(*so->so_downcalls->sd_clr_flowctrl)
648 			    (so->so_proto_handle);
649 		}
650 		/* filters can start injecting data */
651 		sof_sonode_notify_filters(so, SOF_EV_INJECT_DATA_IN_OK, 0);
652 		return (B_TRUE);
653 	} else {
654 		mutex_exit(&so->so_lock);
655 		return (B_FALSE);
656 	}
657 }
658 
659 int
660 so_dequeue_msg(struct sonode *so, mblk_t **mctlp, struct uio *uiop,
661     rval_t *rvalp, int flags)
662 {
663 	mblk_t	*mp, *nmp;
664 	mblk_t	*savemp, *savemptail;
665 	mblk_t	*new_msg_head;
666 	mblk_t	*new_msg_last_head;
667 	mblk_t	*last_tail;
668 	boolean_t partial_read;
669 	boolean_t reset_atmark = B_FALSE;
670 	int more = 0;
671 	int error;
672 	ssize_t oobmark;
673 	sodirect_t *sodp = so->so_direct;
674 
675 	partial_read = B_FALSE;
676 	*mctlp = NULL;
677 again:
678 	mutex_enter(&so->so_lock);
679 again1:
680 #ifdef DEBUG
681 	if (so_debug_length) {
682 		ASSERT(so_check_length(so));
683 	}
684 #endif
685 	if (so->so_state & SS_RCVATMARK) {
686 		/* Check whether the caller is OK to read past the mark */
687 		if (flags & MSG_NOMARK) {
688 			mutex_exit(&so->so_lock);
689 			return (EWOULDBLOCK);
690 		}
691 		reset_atmark = B_TRUE;
692 	}
693 	/*
694 	 * First move messages from the dump area to processing area
695 	 */
696 	if (sodp != NULL) {
697 		if (sodp->sod_enabled) {
698 			if (sodp->sod_uioa.uioa_state & UIOA_ALLOC) {
699 				/* nothing to uioamove */
700 				sodp = NULL;
701 			} else if (sodp->sod_uioa.uioa_state & UIOA_INIT) {
702 				sodp->sod_uioa.uioa_state &= UIOA_CLR;
703 				sodp->sod_uioa.uioa_state |= UIOA_ENABLED;
704 				/*
705 				 * try to uioamove() the data that
706 				 * has already queued.
707 				 */
708 				sod_uioa_so_init(so, sodp, uiop);
709 			}
710 		} else {
711 			sodp = NULL;
712 		}
713 	}
714 	new_msg_head = so->so_rcv_head;
715 	new_msg_last_head = so->so_rcv_last_head;
716 	so->so_rcv_head = NULL;
717 	so->so_rcv_last_head = NULL;
718 	oobmark = so->so_oobmark;
719 	/*
720 	 * We can release the lock as there can only be one reader
721 	 */
722 	mutex_exit(&so->so_lock);
723 
724 	if (new_msg_head != NULL) {
725 		so_process_new_message(so, new_msg_head, new_msg_last_head);
726 	}
727 	savemp = savemptail = NULL;
728 	rvalp->r_vals = 0;
729 	error = 0;
730 	mp = so->so_rcv_q_head;
731 
732 	if (mp != NULL &&
733 	    (so->so_rcv_timer_tid == 0 ||
734 	    so->so_rcv_queued >= so->so_rcv_thresh)) {
735 		partial_read = B_FALSE;
736 
737 		if (flags & MSG_PEEK) {
738 			if ((nmp = dupmsg(mp)) == NULL &&
739 			    (nmp = copymsg(mp)) == NULL) {
740 				size_t size = msgsize(mp);
741 
742 				error = strwaitbuf(size, BPRI_HI);
743 				if (error) {
744 					return (error);
745 				}
746 				goto again;
747 			}
748 			mp = nmp;
749 		} else {
750 			ASSERT(mp->b_prev != NULL);
751 			last_tail = mp->b_prev;
752 			mp->b_prev = NULL;
753 			so->so_rcv_q_head = mp->b_next;
754 			if (so->so_rcv_q_head == NULL) {
755 				so->so_rcv_q_last_head = NULL;
756 			}
757 			mp->b_next = NULL;
758 		}
759 
760 		ASSERT(mctlp != NULL);
761 		/*
762 		 * First process PROTO or PCPROTO blocks, if any.
763 		 */
764 		if (DB_TYPE(mp) != M_DATA) {
765 			*mctlp = mp;
766 			savemp = mp;
767 			savemptail = mp;
768 			ASSERT(DB_TYPE(mp) == M_PROTO ||
769 			    DB_TYPE(mp) == M_PCPROTO);
770 			while (mp->b_cont != NULL &&
771 			    DB_TYPE(mp->b_cont) != M_DATA) {
772 				ASSERT(DB_TYPE(mp->b_cont) == M_PROTO ||
773 				    DB_TYPE(mp->b_cont) == M_PCPROTO);
774 				mp = mp->b_cont;
775 				savemptail = mp;
776 			}
777 			mp = savemptail->b_cont;
778 			savemptail->b_cont = NULL;
779 		}
780 
781 		ASSERT(DB_TYPE(mp) == M_DATA);
782 		/*
783 		 * Now process DATA blocks, if any. Note that for sodirect
784 		 * enabled socket, uio_resid can be 0.
785 		 */
786 		if (uiop->uio_resid >= 0) {
787 			ssize_t copied = 0;
788 
789 			if (sodp != NULL && (DB_FLAGS(mp) & DBLK_UIOA)) {
790 				mutex_enter(&so->so_lock);
791 				ASSERT(uiop == (uio_t *)&sodp->sod_uioa);
792 				copied = sod_uioa_mblk(so, mp);
793 				if (copied > 0)
794 					partial_read = B_TRUE;
795 				mutex_exit(&so->so_lock);
796 				/* mark this mblk as processed */
797 				mp = NULL;
798 			} else {
799 				ssize_t oldresid = uiop->uio_resid;
800 
801 				if (MBLKL(mp) < so_mblk_pull_len) {
802 					if (pullupmsg(mp, -1) == 1) {
803 						last_tail = mp;
804 					}
805 				}
806 				/*
807 				 * Can not read beyond the oobmark
808 				 */
809 				mp = socopyoutuio(mp, uiop,
810 				    oobmark == 0 ? INFPSZ : oobmark, &error);
811 				if (error != 0) {
812 					freemsg(*mctlp);
813 					*mctlp = NULL;
814 					more = 0;
815 					goto done;
816 				}
817 				ASSERT(oldresid >= uiop->uio_resid);
818 				copied = oldresid - uiop->uio_resid;
819 				if (oldresid > uiop->uio_resid)
820 					partial_read = B_TRUE;
821 			}
822 			ASSERT(copied >= 0);
823 			if (copied > 0 && !(flags & MSG_PEEK)) {
824 				mutex_enter(&so->so_lock);
825 				so->so_rcv_queued -= copied;
826 				ASSERT(so->so_oobmark >= 0);
827 				if (so->so_oobmark > 0) {
828 					so->so_oobmark -= copied;
829 					ASSERT(so->so_oobmark >= 0);
830 					if (so->so_oobmark == 0) {
831 						ASSERT(so->so_state &
832 						    SS_OOBPEND);
833 						so->so_oobmark = 0;
834 						so->so_state |= SS_RCVATMARK;
835 					}
836 				}
837 				/*
838 				 * so_check_flow_control() will drop
839 				 * so->so_lock.
840 				 */
841 				rvalp->r_val2 = so_check_flow_control(so);
842 			}
843 		}
844 		if (mp != NULL) { /* more data blocks in msg */
845 			more |= MOREDATA;
846 			if ((flags & (MSG_PEEK|MSG_TRUNC))) {
847 				if (flags & MSG_PEEK) {
848 					freemsg(mp);
849 				} else {
850 					unsigned int msize = msgdsize(mp);
851 
852 					freemsg(mp);
853 					mutex_enter(&so->so_lock);
854 					so->so_rcv_queued -= msize;
855 					/*
856 					 * so_check_flow_control() will drop
857 					 * so->so_lock.
858 					 */
859 					rvalp->r_val2 =
860 					    so_check_flow_control(so);
861 				}
862 			} else if (partial_read && !somsghasdata(mp)) {
863 				/*
864 				 * Avoid queuing a zero-length tail part of
865 				 * a message. partial_read == 1 indicates that
866 				 * we read some of the message.
867 				 */
868 				freemsg(mp);
869 				more &= ~MOREDATA;
870 			} else {
871 				if (savemp != NULL &&
872 				    (flags & MSG_DUPCTRL)) {
873 					mblk_t *nmp;
874 					/*
875 					 * There should only be non data mblks
876 					 */
877 					ASSERT(DB_TYPE(savemp) != M_DATA &&
878 					    DB_TYPE(savemptail) != M_DATA);
879 try_again:
880 					if ((nmp = dupmsg(savemp)) == NULL &&
881 					    (nmp = copymsg(savemp)) == NULL) {
882 
883 						size_t size = msgsize(savemp);
884 
885 						error = strwaitbuf(size,
886 						    BPRI_HI);
887 						if (error != 0) {
888 							/*
889 							 * In case we
890 							 * cannot copy
891 							 * control data
892 							 * free the remaining
893 							 * data.
894 							 */
895 							freemsg(mp);
896 							goto done;
897 						}
898 						goto try_again;
899 					}
900 
901 					ASSERT(nmp != NULL);
902 					ASSERT(DB_TYPE(nmp) != M_DATA);
903 					savemptail->b_cont = mp;
904 					*mctlp = nmp;
905 					mp = savemp;
906 				}
907 				/*
908 				 * putback mp
909 				 */
910 				so_prepend_msg(so, mp, last_tail);
911 			}
912 		}
913 
914 		/* fast check so_rcv_head if there is more data */
915 		if (partial_read && !(so->so_state & SS_RCVATMARK) &&
916 		    *mctlp == NULL && uiop->uio_resid > 0 &&
917 		    !(flags & MSG_PEEK) && so->so_rcv_head != NULL) {
918 			goto again;
919 		}
920 	} else if (!partial_read) {
921 		mutex_enter(&so->so_lock);
922 		if (so->so_error != 0) {
923 			error = sogeterr(so, !(flags & MSG_PEEK));
924 			mutex_exit(&so->so_lock);
925 			return (error);
926 		}
927 
928 		/* See if new data has arrived in the meantime */
929 		if (so->so_rcv_head != NULL)
930 			goto again1;
931 
932 		/*
933 		 * No pending data. Return right away for nonblocking
934 		 * socket, otherwise sleep waiting for data.
935 		 */
936 		if (!(so->so_state & SS_CANTRCVMORE) && uiop->uio_resid > 0) {
937 			if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) ||
938 			    (flags & MSG_DONTWAIT)) {
939 				error = EWOULDBLOCK;
940 			} else {
941 				if (so->so_state & (SS_CLOSING |
942 				    SS_FALLBACK_PENDING)) {
943 					mutex_exit(&so->so_lock);
944 					error = EINTR;
945 					goto done;
946 				}
947 
948 				so->so_rcv_wakeup = B_TRUE;
949 				so->so_rcv_wanted = uiop->uio_resid;
950 				if (so->so_rcvtimeo == 0) {
951 					/*
952 					 * Zero means disable timeout.
953 					 */
954 					error = cv_wait_sig(&so->so_rcv_cv,
955 					    &so->so_lock);
956 				} else {
957 					error = cv_reltimedwait_sig(
958 					    &so->so_rcv_cv, &so->so_lock,
959 					    so->so_rcvtimeo, TR_CLOCK_TICK);
960 				}
961 				so->so_rcv_wakeup = B_FALSE;
962 				so->so_rcv_wanted = 0;
963 
964 				if (error == 0) {
965 					error = EINTR;
966 				} else if (error == -1) {
967 					error = EAGAIN;
968 				} else {
969 					goto again1;
970 				}
971 			}
972 		}
973 		mutex_exit(&so->so_lock);
974 	}
975 	if (reset_atmark && partial_read && !(flags & MSG_PEEK)) {
976 		/*
977 		 * We are passed the mark, update state
978 		 * 4.3BSD and 4.4BSD clears the mark when peeking across it.
979 		 * The draft Posix socket spec states that the mark should
980 		 * not be cleared when peeking. We follow the latter.
981 		 */
982 		mutex_enter(&so->so_lock);
983 		ASSERT(so_verify_oobstate(so));
984 		so->so_state &= ~(SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK);
985 		freemsg(so->so_oobmsg);
986 		so->so_oobmsg = NULL;
987 		ASSERT(so_verify_oobstate(so));
988 		mutex_exit(&so->so_lock);
989 	}
990 	ASSERT(so->so_rcv_wakeup == B_FALSE);
991 done:
992 	if (sodp != NULL) {
993 		mutex_enter(&so->so_lock);
994 		if (sodp->sod_enabled &&
995 		    (sodp->sod_uioa.uioa_state & UIOA_ENABLED)) {
996 			SOD_UIOAFINI(sodp);
997 			if (sodp->sod_uioa.uioa_mbytes > 0) {
998 				ASSERT(so->so_rcv_q_head != NULL ||
999 				    so->so_rcv_head != NULL);
1000 				so->so_rcv_queued -= sod_uioa_mblk(so, NULL);
1001 				if (error == EWOULDBLOCK)
1002 					error = 0;
1003 			}
1004 		}
1005 		mutex_exit(&so->so_lock);
1006 	}
1007 #ifdef DEBUG
1008 	if (so_debug_length) {
1009 		mutex_enter(&so->so_lock);
1010 		ASSERT(so_check_length(so));
1011 		mutex_exit(&so->so_lock);
1012 	}
1013 #endif
1014 	rvalp->r_val1 = more;
1015 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
1016 	return (error);
1017 }
1018 
1019 /*
1020  * Enqueue data from the protocol on the socket's rcv queue.
1021  *
1022  * We try to hook new M_DATA mblks onto an existing chain, however,
1023  * that cannot be done if the existing chain has already been
1024  * processed by I/OAT. Non-M_DATA mblks are just linked together via
1025  * b_next. In all cases the b_prev of the enqueued mblk is set to
1026  * point to the last mblk in its b_cont chain.
1027  */
1028 void
1029 so_enqueue_msg(struct sonode *so, mblk_t *mp, size_t msg_size)
1030 {
1031 	ASSERT(MUTEX_HELD(&so->so_lock));
1032 
1033 #ifdef DEBUG
1034 	if (so_debug_length) {
1035 		ASSERT(so_check_length(so));
1036 	}
1037 #endif
1038 	so->so_rcv_queued += msg_size;
1039 
1040 	if (so->so_rcv_head == NULL) {
1041 		ASSERT(so->so_rcv_last_head == NULL);
1042 		so->so_rcv_head = mp;
1043 		so->so_rcv_last_head = mp;
1044 	} else if ((DB_TYPE(mp) == M_DATA &&
1045 	    DB_TYPE(so->so_rcv_last_head) == M_DATA) &&
1046 	    ((DB_FLAGS(mp) & DBLK_UIOA) ==
1047 	    (DB_FLAGS(so->so_rcv_last_head) & DBLK_UIOA))) {
1048 		/* Added to the end */
1049 		ASSERT(so->so_rcv_last_head != NULL);
1050 		ASSERT(so->so_rcv_last_head->b_prev != NULL);
1051 		so->so_rcv_last_head->b_prev->b_cont = mp;
1052 	} else {
1053 		/* Start a new end */
1054 		so->so_rcv_last_head->b_next = mp;
1055 		so->so_rcv_last_head = mp;
1056 	}
1057 	while (mp->b_cont != NULL)
1058 		mp = mp->b_cont;
1059 
1060 	so->so_rcv_last_head->b_prev = mp;
1061 #ifdef DEBUG
1062 	if (so_debug_length) {
1063 		ASSERT(so_check_length(so));
1064 	}
1065 #endif
1066 }
1067 
1068 /*
1069  * Return B_TRUE if there is data in the message, B_FALSE otherwise.
1070  */
1071 boolean_t
1072 somsghasdata(mblk_t *mp)
1073 {
1074 	for (; mp; mp = mp->b_cont)
1075 		if (mp->b_datap->db_type == M_DATA) {
1076 			ASSERT(mp->b_wptr >= mp->b_rptr);
1077 			if (mp->b_wptr > mp->b_rptr)
1078 				return (B_TRUE);
1079 		}
1080 	return (B_FALSE);
1081 }
1082 
1083 /*
1084  * Flush the read side of sockfs.
1085  *
1086  * The caller must be sure that a reader is not already active when the
1087  * buffer is being flushed.
1088  */
1089 void
1090 so_rcv_flush(struct sonode *so)
1091 {
1092 	mblk_t  *mp;
1093 
1094 	ASSERT(MUTEX_HELD(&so->so_lock));
1095 
1096 	if (so->so_oobmsg != NULL) {
1097 		freemsg(so->so_oobmsg);
1098 		so->so_oobmsg = NULL;
1099 		so->so_oobmark = 0;
1100 		so->so_state &=
1101 		    ~(SS_OOBPEND|SS_HAVEOOBDATA|SS_HADOOBDATA|SS_RCVATMARK);
1102 	}
1103 
1104 	/*
1105 	 * Free messages sitting in the recv queues
1106 	 */
1107 	while (so->so_rcv_q_head != NULL) {
1108 		mp = so->so_rcv_q_head;
1109 		so->so_rcv_q_head = mp->b_next;
1110 		mp->b_next = mp->b_prev = NULL;
1111 		freemsg(mp);
1112 	}
1113 	while (so->so_rcv_head != NULL) {
1114 		mp = so->so_rcv_head;
1115 		so->so_rcv_head = mp->b_next;
1116 		mp->b_next = mp->b_prev = NULL;
1117 		freemsg(mp);
1118 	}
1119 	so->so_rcv_queued = 0;
1120 	so->so_rcv_q_head = NULL;
1121 	so->so_rcv_q_last_head = NULL;
1122 	so->so_rcv_head = NULL;
1123 	so->so_rcv_last_head = NULL;
1124 }
1125 
1126 /*
1127  * Handle recv* calls that set MSG_OOB or MSG_OOB together with MSG_PEEK.
1128  */
1129 int
1130 sorecvoob(struct sonode *so, struct nmsghdr *msg, struct uio *uiop, int flags,
1131     boolean_t oob_inline)
1132 {
1133 	mblk_t		*mp, *nmp;
1134 	int		error;
1135 
1136 	dprintso(so, 1, ("sorecvoob(%p, %p, 0x%x)\n", (void *)so, (void *)msg,
1137 	    flags));
1138 
1139 	if (msg != NULL) {
1140 		/*
1141 		 * There is never any oob data with addresses or control since
1142 		 * the T_EXDATA_IND does not carry any options.
1143 		 */
1144 		msg->msg_controllen = 0;
1145 		msg->msg_namelen = 0;
1146 		msg->msg_flags = 0;
1147 	}
1148 
1149 	mutex_enter(&so->so_lock);
1150 	ASSERT(so_verify_oobstate(so));
1151 	if (oob_inline ||
1152 	    (so->so_state & (SS_OOBPEND|SS_HADOOBDATA)) != SS_OOBPEND) {
1153 		dprintso(so, 1, ("sorecvoob: inline or data consumed\n"));
1154 		mutex_exit(&so->so_lock);
1155 		return (EINVAL);
1156 	}
1157 	if (!(so->so_state & SS_HAVEOOBDATA)) {
1158 		dprintso(so, 1, ("sorecvoob: no data yet\n"));
1159 		mutex_exit(&so->so_lock);
1160 		return (EWOULDBLOCK);
1161 	}
1162 	ASSERT(so->so_oobmsg != NULL);
1163 	mp = so->so_oobmsg;
1164 	if (flags & MSG_PEEK) {
1165 		/*
1166 		 * Since recv* can not return ENOBUFS we can not use dupmsg.
1167 		 * Instead we revert to the consolidation private
1168 		 * allocb_wait plus bcopy.
1169 		 */
1170 		mblk_t *mp1;
1171 
1172 		mp1 = allocb_wait(msgdsize(mp), BPRI_MED, STR_NOSIG, NULL);
1173 		ASSERT(mp1);
1174 
1175 		while (mp != NULL) {
1176 			ssize_t size;
1177 
1178 			size = MBLKL(mp);
1179 			bcopy(mp->b_rptr, mp1->b_wptr, size);
1180 			mp1->b_wptr += size;
1181 			ASSERT(mp1->b_wptr <= mp1->b_datap->db_lim);
1182 			mp = mp->b_cont;
1183 		}
1184 		mp = mp1;
1185 	} else {
1186 		/*
1187 		 * Update the state indicating that the data has been consumed.
1188 		 * Keep SS_OOBPEND set until data is consumed past the mark.
1189 		 */
1190 		so->so_oobmsg = NULL;
1191 		so->so_state ^= SS_HAVEOOBDATA|SS_HADOOBDATA;
1192 	}
1193 	ASSERT(so_verify_oobstate(so));
1194 	mutex_exit(&so->so_lock);
1195 
1196 	error = 0;
1197 	nmp = mp;
1198 	while (nmp != NULL && uiop->uio_resid > 0) {
1199 		ssize_t n = MBLKL(nmp);
1200 
1201 		n = MIN(n, uiop->uio_resid);
1202 		if (n > 0)
1203 			error = uiomove(nmp->b_rptr, n,
1204 			    UIO_READ, uiop);
1205 		if (error)
1206 			break;
1207 		nmp = nmp->b_cont;
1208 	}
1209 	ASSERT(mp->b_next == NULL && mp->b_prev == NULL);
1210 	freemsg(mp);
1211 	return (error);
1212 }
1213 
1214 /*
1215  * Allocate and initializ sonode
1216  */
1217 /* ARGSUSED */
1218 struct sonode *
1219 socket_sonode_create(struct sockparams *sp, int family, int type,
1220     int protocol, int version, int sflags, int *errorp, struct cred *cr)
1221 {
1222 	sonode_t *so;
1223 	int	kmflags;
1224 
1225 	/*
1226 	 * Choose the right set of sonodeops based on the upcall and
1227 	 * down call version that the protocol has provided
1228 	 */
1229 	if (SOCK_UC_VERSION != sp->sp_smod_info->smod_uc_version ||
1230 	    SOCK_DC_VERSION != sp->sp_smod_info->smod_dc_version) {
1231 		/*
1232 		 * mismatch
1233 		 */
1234 #ifdef DEBUG
1235 		cmn_err(CE_CONT, "protocol and socket module version mismatch");
1236 #endif
1237 		*errorp = EINVAL;
1238 		return (NULL);
1239 	}
1240 
1241 	kmflags = (sflags & SOCKET_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
1242 
1243 	so = kmem_cache_alloc(socket_cache, kmflags);
1244 	if (so == NULL) {
1245 		*errorp = ENOMEM;
1246 		return (NULL);
1247 	}
1248 
1249 	sonode_init(so, sp, family, type, protocol, &so_sonodeops);
1250 
1251 	if (version == SOV_DEFAULT)
1252 		version = so_default_version;
1253 
1254 	so->so_version = (short)version;
1255 
1256 	/*
1257 	 * set the default values to be INFPSZ
1258 	 * if a protocol desires it can change the value later
1259 	 */
1260 	so->so_proto_props.sopp_rxhiwat = SOCKET_RECVHIWATER;
1261 	so->so_proto_props.sopp_rxlowat = SOCKET_RECVLOWATER;
1262 	so->so_proto_props.sopp_maxpsz = INFPSZ;
1263 	so->so_proto_props.sopp_maxblk = INFPSZ;
1264 
1265 	return (so);
1266 }
1267 
1268 int
1269 socket_init_common(struct sonode *so, struct sonode *pso, int flags, cred_t *cr)
1270 {
1271 	int error = 0;
1272 
1273 	if (pso != NULL) {
1274 		/*
1275 		 * We have a passive open, so inherit basic state from
1276 		 * the parent (listener).
1277 		 *
1278 		 * No need to grab the new sonode's lock, since there is no
1279 		 * one that can have a reference to it.
1280 		 */
1281 		mutex_enter(&pso->so_lock);
1282 
1283 		so->so_state |= SS_ISCONNECTED | (pso->so_state & SS_ASYNC);
1284 		so->so_pgrp = pso->so_pgrp;
1285 		so->so_rcvtimeo = pso->so_rcvtimeo;
1286 		so->so_sndtimeo = pso->so_sndtimeo;
1287 		so->so_xpg_rcvbuf = pso->so_xpg_rcvbuf;
1288 		/*
1289 		 * Make note of the socket level options. TCP and IP level
1290 		 * options are already inherited. We could do all this after
1291 		 * accept is successful but doing it here simplifies code and
1292 		 * no harm done for error case.
1293 		 */
1294 		so->so_options = pso->so_options & (SO_DEBUG|SO_REUSEADDR|
1295 		    SO_KEEPALIVE|SO_DONTROUTE|SO_BROADCAST|SO_USELOOPBACK|
1296 		    SO_OOBINLINE|SO_DGRAM_ERRIND|SO_LINGER);
1297 		so->so_proto_props = pso->so_proto_props;
1298 		so->so_mode = pso->so_mode;
1299 		so->so_pollev = pso->so_pollev & SO_POLLEV_ALWAYS;
1300 
1301 		mutex_exit(&pso->so_lock);
1302 
1303 		/*
1304 		 * If the parent has any filters, try to inherit them.
1305 		 */
1306 		if (pso->so_filter_active > 0 &&
1307 		    (error = sof_sonode_inherit_filters(so, pso)) != 0)
1308 			return (error);
1309 
1310 	} else {
1311 		struct sockparams *sp = so->so_sockparams;
1312 		sock_upcalls_t *upcalls_to_use;
1313 
1314 		/*
1315 		 * Attach automatic filters, if there are any.
1316 		 */
1317 		if (!list_is_empty(&sp->sp_auto_filters) &&
1318 		    (error = sof_sonode_autoattach_filters(so, cr)) != 0)
1319 			return (error);
1320 
1321 		/* OK to attach filters */
1322 		so->so_state |= SS_FILOP_OK;
1323 
1324 		/*
1325 		 * Based on the version number select the right upcalls to
1326 		 * pass down. Currently we only have one version so choose
1327 		 * default
1328 		 */
1329 		upcalls_to_use = &so_upcalls;
1330 
1331 		/* active open, so create a lower handle */
1332 		so->so_proto_handle =
1333 		    sp->sp_smod_info->smod_proto_create_func(so->so_family,
1334 		    so->so_type, so->so_protocol, &so->so_downcalls,
1335 		    &so->so_mode, &error, flags, cr);
1336 
1337 		if (so->so_proto_handle == NULL) {
1338 			ASSERT(error != 0);
1339 			/*
1340 			 * To be safe; if a lower handle cannot be created, and
1341 			 * the proto does not give a reason why, assume there
1342 			 * was a lack of memory.
1343 			 */
1344 			return ((error == 0) ? ENOMEM : error);
1345 		}
1346 		ASSERT(so->so_downcalls != NULL);
1347 		ASSERT(so->so_downcalls->sd_send != NULL ||
1348 		    so->so_downcalls->sd_send_uio != NULL);
1349 		if (so->so_downcalls->sd_recv_uio != NULL) {
1350 			ASSERT(so->so_downcalls->sd_poll != NULL);
1351 			so->so_pollev |= SO_POLLEV_ALWAYS;
1352 		}
1353 
1354 		(*so->so_downcalls->sd_activate)(so->so_proto_handle,
1355 		    (sock_upper_handle_t)so, upcalls_to_use, 0, cr);
1356 
1357 		/* Wildcard */
1358 
1359 		/*
1360 		 * FIXME No need for this, the protocol can deal with it in
1361 		 * sd_create(). Should update ICMP.
1362 		 */
1363 		if (so->so_protocol != so->so_sockparams->sp_protocol) {
1364 			int protocol = so->so_protocol;
1365 			int error;
1366 			/*
1367 			 * Issue SO_PROTOTYPE setsockopt.
1368 			 */
1369 			error = socket_setsockopt(so, SOL_SOCKET, SO_PROTOTYPE,
1370 			    &protocol, (t_uscalar_t)sizeof (protocol), cr);
1371 			if (error) {
1372 				(void) (*so->so_downcalls->sd_close)
1373 				    (so->so_proto_handle, 0, cr);
1374 
1375 				mutex_enter(&so->so_lock);
1376 				so_rcv_flush(so);
1377 				mutex_exit(&so->so_lock);
1378 				/*
1379 				 * Setsockopt often fails with ENOPROTOOPT but
1380 				 * socket() should fail with
1381 				 * EPROTONOSUPPORT/EPROTOTYPE.
1382 				 */
1383 				return (EPROTONOSUPPORT);
1384 			}
1385 		}
1386 	}
1387 
1388 	if (uioasync.enabled)
1389 		sod_sock_init(so);
1390 
1391 	/* put an extra reference on the socket for the protocol */
1392 	VN_HOLD(SOTOV(so));
1393 
1394 	return (0);
1395 }
1396 
1397 /*
1398  * int socket_ioctl_common(struct sonode *so, int cmd, intptr_t arg, int mode,
1399  *         struct cred *cr, int32_t *rvalp)
1400  *
1401  * Handle ioctls that manipulate basic socket state; non-blocking,
1402  * async, etc.
1403  *
1404  * Returns:
1405  *   < 0  - ioctl was not handle
1406  *  >= 0  - ioctl was handled, if > 0, then it is an errno
1407  *
1408  * Notes:
1409  *   Assumes the standard receive buffer is used to obtain info for
1410  *   NREAD.
1411  */
1412 /* ARGSUSED */
1413 int
1414 socket_ioctl_common(struct sonode *so, int cmd, intptr_t arg, int mode,
1415     struct cred *cr, int32_t *rvalp)
1416 {
1417 	switch (cmd) {
1418 	case SIOCSQPTR:
1419 		/*
1420 		 * SIOCSQPTR is valid only when helper stream is created
1421 		 * by the protocol.
1422 		 */
1423 
1424 		return (EOPNOTSUPP);
1425 	case FIONBIO: {
1426 		int32_t value;
1427 
1428 		if (so_copyin((void *)arg, &value, sizeof (int32_t),
1429 		    (mode & (int)FKIOCTL)))
1430 			return (EFAULT);
1431 
1432 		mutex_enter(&so->so_lock);
1433 		if (value) {
1434 			so->so_state |= SS_NDELAY;
1435 		} else {
1436 			so->so_state &= ~SS_NDELAY;
1437 		}
1438 		mutex_exit(&so->so_lock);
1439 		return (0);
1440 	}
1441 	case FIOASYNC: {
1442 		int32_t value;
1443 
1444 		if (so_copyin((void *)arg, &value, sizeof (int32_t),
1445 		    (mode & (int)FKIOCTL)))
1446 			return (EFAULT);
1447 
1448 		mutex_enter(&so->so_lock);
1449 
1450 		if (value) {
1451 			/* Turn on SIGIO */
1452 			so->so_state |= SS_ASYNC;
1453 		} else {
1454 			/* Turn off SIGIO */
1455 			so->so_state &= ~SS_ASYNC;
1456 		}
1457 		mutex_exit(&so->so_lock);
1458 
1459 		return (0);
1460 	}
1461 
1462 	case SIOCSPGRP:
1463 	case FIOSETOWN: {
1464 		int error;
1465 		pid_t pid;
1466 
1467 		if (so_copyin((void *)arg, &pid, sizeof (pid_t),
1468 		    (mode & (int)FKIOCTL)))
1469 			return (EFAULT);
1470 
1471 		mutex_enter(&so->so_lock);
1472 		error = (pid != so->so_pgrp) ? socket_chgpgrp(so, pid) : 0;
1473 		mutex_exit(&so->so_lock);
1474 		return (error);
1475 	}
1476 	case SIOCGPGRP:
1477 	case FIOGETOWN:
1478 		if (so_copyout(&so->so_pgrp, (void *)arg,
1479 		    sizeof (pid_t), (mode & (int)FKIOCTL)))
1480 			return (EFAULT);
1481 
1482 		return (0);
1483 	case SIOCATMARK: {
1484 		int retval;
1485 
1486 		/*
1487 		 * Only protocols that support urgent data can handle ATMARK.
1488 		 */
1489 		if ((so->so_mode & SM_EXDATA) == 0)
1490 			return (EINVAL);
1491 
1492 		/*
1493 		 * If the protocol is maintaining its own buffer, then the
1494 		 * request must be passed down.
1495 		 */
1496 		if (so->so_downcalls->sd_recv_uio != NULL)
1497 			return (-1);
1498 
1499 		retval = (so->so_state & SS_RCVATMARK) != 0;
1500 
1501 		if (so_copyout(&retval, (void *)arg, sizeof (int),
1502 		    (mode & (int)FKIOCTL))) {
1503 			return (EFAULT);
1504 		}
1505 		return (0);
1506 	}
1507 
1508 	case FIONREAD: {
1509 		int retval;
1510 
1511 		/*
1512 		 * If the protocol is maintaining its own buffer, then the
1513 		 * request must be passed down.
1514 		 */
1515 		if (so->so_downcalls->sd_recv_uio != NULL)
1516 			return (-1);
1517 
1518 		retval = MIN(so->so_rcv_queued, INT_MAX);
1519 
1520 		if (so_copyout(&retval, (void *)arg,
1521 		    sizeof (retval), (mode & (int)FKIOCTL))) {
1522 			return (EFAULT);
1523 		}
1524 		return (0);
1525 	}
1526 
1527 	case _I_GETPEERCRED: {
1528 		int error = 0;
1529 
1530 		if ((mode & FKIOCTL) == 0)
1531 			return (EINVAL);
1532 
1533 		mutex_enter(&so->so_lock);
1534 		if ((so->so_mode & SM_CONNREQUIRED) == 0) {
1535 			error = ENOTSUP;
1536 		} else if ((so->so_state & SS_ISCONNECTED) == 0) {
1537 			error = ENOTCONN;
1538 		} else if (so->so_peercred != NULL) {
1539 			k_peercred_t *kp = (k_peercred_t *)arg;
1540 			kp->pc_cr = so->so_peercred;
1541 			kp->pc_cpid = so->so_cpid;
1542 			crhold(so->so_peercred);
1543 		} else {
1544 			error = EINVAL;
1545 		}
1546 		mutex_exit(&so->so_lock);
1547 		return (error);
1548 	}
1549 	default:
1550 		return (-1);
1551 	}
1552 }
1553 
1554 /*
1555  * Handle the I_NREAD STREAM ioctl.
1556  */
1557 static int
1558 so_strioc_nread(struct sonode *so, intptr_t arg, int mode, int32_t *rvalp)
1559 {
1560 	size_t size = 0;
1561 	int retval;
1562 	int count = 0;
1563 	mblk_t *mp;
1564 	clock_t wakeup = drv_usectohz(10);
1565 
1566 	if (so->so_downcalls == NULL ||
1567 	    so->so_downcalls->sd_recv_uio != NULL)
1568 		return (EINVAL);
1569 
1570 	mutex_enter(&so->so_lock);
1571 	/* Wait for reader to get out of the way. */
1572 	while (so->so_flag & SOREADLOCKED) {
1573 		/*
1574 		 * If reader is waiting for data, then there should be nothing
1575 		 * on the rcv queue.
1576 		 */
1577 		if (so->so_rcv_wakeup)
1578 			goto out;
1579 
1580 		/* Do a timed sleep, in case the reader goes to sleep. */
1581 		(void) cv_reltimedwait(&so->so_read_cv, &so->so_lock, wakeup,
1582 		    TR_CLOCK_TICK);
1583 	}
1584 
1585 	/*
1586 	 * Since we are holding so_lock no new reader will come in, and the
1587 	 * protocol will not be able to enqueue data. So it's safe to walk
1588 	 * both rcv queues.
1589 	 */
1590 	mp = so->so_rcv_q_head;
1591 	if (mp != NULL) {
1592 		size = msgdsize(so->so_rcv_q_head);
1593 		for (; mp != NULL; mp = mp->b_next)
1594 			count++;
1595 	} else {
1596 		/*
1597 		 * In case the processing list was empty, get the size of the
1598 		 * next msg in line.
1599 		 */
1600 		size = msgdsize(so->so_rcv_head);
1601 	}
1602 
1603 	for (mp = so->so_rcv_head; mp != NULL; mp = mp->b_next)
1604 		count++;
1605 out:
1606 	mutex_exit(&so->so_lock);
1607 
1608 	/*
1609 	 * Drop down from size_t to the "int" required by the
1610 	 * interface.  Cap at INT_MAX.
1611 	 */
1612 	retval = MIN(size, INT_MAX);
1613 	if (so_copyout(&retval, (void *)arg, sizeof (retval),
1614 	    (mode & (int)FKIOCTL))) {
1615 		return (EFAULT);
1616 	} else {
1617 		*rvalp = count;
1618 		return (0);
1619 	}
1620 }
1621 
1622 /*
1623  * Process STREAM ioctls.
1624  *
1625  * Returns:
1626  *   < 0  - ioctl was not handle
1627  *  >= 0  - ioctl was handled, if > 0, then it is an errno
1628  */
1629 int
1630 socket_strioc_common(struct sonode *so, int cmd, intptr_t arg, int mode,
1631     struct cred *cr, int32_t *rvalp)
1632 {
1633 	int retval;
1634 
1635 	/* Only STREAM iotcls are handled here */
1636 	if ((cmd & 0xffffff00U) != STR)
1637 		return (-1);
1638 
1639 	switch (cmd) {
1640 	case I_CANPUT:
1641 		/*
1642 		 * We return an error for I_CANPUT so that isastream(3C) will
1643 		 * not report the socket as being a STREAM.
1644 		 */
1645 		return (EOPNOTSUPP);
1646 	case I_NREAD:
1647 		/* Avoid doing a fallback for I_NREAD. */
1648 		return (so_strioc_nread(so, arg, mode, rvalp));
1649 	case I_LOOK:
1650 		/* Avoid doing a fallback for I_LOOK. */
1651 		if (so_copyout("sockmod", (void *)arg, strlen("sockmod") + 1,
1652 		    (mode & (int)FKIOCTL))) {
1653 			return (EFAULT);
1654 		}
1655 		return (0);
1656 	default:
1657 		break;
1658 	}
1659 
1660 	/*
1661 	 * Try to fall back to TPI, and if successful, reissue the ioctl.
1662 	 */
1663 	if ((retval = so_tpi_fallback(so, cr)) == 0) {
1664 		/* Reissue the ioctl */
1665 		ASSERT(so->so_rcv_q_head == NULL);
1666 		return (SOP_IOCTL(so, cmd, arg, mode, cr, rvalp));
1667 	} else {
1668 		return (retval);
1669 	}
1670 }
1671 
1672 /*
1673  * This is called for all socket types to verify that the buffer size is large
1674  * enough for the option, and if we can, handle the request as well. Most
1675  * options will be forwarded to the protocol.
1676  */
1677 int
1678 socket_getopt_common(struct sonode *so, int level, int option_name,
1679     void *optval, socklen_t *optlenp, int flags)
1680 {
1681 	if (level != SOL_SOCKET)
1682 		return (-1);
1683 
1684 	switch (option_name) {
1685 	case SO_ERROR:
1686 	case SO_DOMAIN:
1687 	case SO_TYPE:
1688 	case SO_ACCEPTCONN: {
1689 		int32_t value;
1690 		socklen_t optlen = *optlenp;
1691 
1692 		if (optlen < (t_uscalar_t)sizeof (int32_t)) {
1693 			return (EINVAL);
1694 		}
1695 
1696 		switch (option_name) {
1697 		case SO_ERROR:
1698 			mutex_enter(&so->so_lock);
1699 			value = sogeterr(so, B_TRUE);
1700 			mutex_exit(&so->so_lock);
1701 			break;
1702 		case SO_DOMAIN:
1703 			value = so->so_family;
1704 			break;
1705 		case SO_TYPE:
1706 			value = so->so_type;
1707 			break;
1708 		case SO_ACCEPTCONN:
1709 			if (so->so_state & SS_ACCEPTCONN)
1710 				value = SO_ACCEPTCONN;
1711 			else
1712 				value = 0;
1713 			break;
1714 		}
1715 
1716 		bcopy(&value, optval, sizeof (value));
1717 		*optlenp = sizeof (value);
1718 
1719 		return (0);
1720 	}
1721 	case SO_SNDTIMEO:
1722 	case SO_RCVTIMEO: {
1723 		clock_t value;
1724 		socklen_t optlen = *optlenp;
1725 
1726 		if (get_udatamodel() == DATAMODEL_NONE ||
1727 		    get_udatamodel() == DATAMODEL_NATIVE) {
1728 			if (optlen < sizeof (struct timeval))
1729 				return (EINVAL);
1730 		} else {
1731 			if (optlen < sizeof (struct timeval32))
1732 				return (EINVAL);
1733 		}
1734 		if (option_name == SO_RCVTIMEO)
1735 			value = drv_hztousec(so->so_rcvtimeo);
1736 		else
1737 			value = drv_hztousec(so->so_sndtimeo);
1738 
1739 		if (get_udatamodel() == DATAMODEL_NONE ||
1740 		    get_udatamodel() == DATAMODEL_NATIVE) {
1741 			((struct timeval *)(optval))->tv_sec =
1742 			    value / (1000 * 1000);
1743 			((struct timeval *)(optval))->tv_usec =
1744 			    value % (1000 * 1000);
1745 			*optlenp = sizeof (struct timeval);
1746 		} else {
1747 			((struct timeval32 *)(optval))->tv_sec =
1748 			    value / (1000 * 1000);
1749 			((struct timeval32 *)(optval))->tv_usec =
1750 			    value % (1000 * 1000);
1751 			*optlenp = sizeof (struct timeval32);
1752 		}
1753 		return (0);
1754 	}
1755 	case SO_DEBUG:
1756 	case SO_REUSEADDR:
1757 	case SO_REUSEPORT:
1758 	case SO_KEEPALIVE:
1759 	case SO_DONTROUTE:
1760 	case SO_BROADCAST:
1761 	case SO_USELOOPBACK:
1762 	case SO_OOBINLINE:
1763 	case SO_SNDBUF:
1764 #ifdef notyet
1765 	case SO_SNDLOWAT:
1766 	case SO_RCVLOWAT:
1767 #endif /* notyet */
1768 	case SO_DGRAM_ERRIND: {
1769 		socklen_t optlen = *optlenp;
1770 
1771 		if (optlen < (t_uscalar_t)sizeof (int32_t))
1772 			return (EINVAL);
1773 		break;
1774 	}
1775 	case SO_RCVBUF: {
1776 		socklen_t optlen = *optlenp;
1777 
1778 		if (optlen < (t_uscalar_t)sizeof (int32_t))
1779 			return (EINVAL);
1780 
1781 		if ((flags & _SOGETSOCKOPT_XPG4_2) && so->so_xpg_rcvbuf != 0) {
1782 			/*
1783 			 * XXX If SO_RCVBUF has been set and this is an
1784 			 * XPG 4.2 application then do not ask the transport
1785 			 * since the transport might adjust the value and not
1786 			 * return exactly what was set by the application.
1787 			 * For non-XPG 4.2 application we return the value
1788 			 * that the transport is actually using.
1789 			 */
1790 			*(int32_t *)optval = so->so_xpg_rcvbuf;
1791 			*optlenp = sizeof (so->so_xpg_rcvbuf);
1792 			return (0);
1793 		}
1794 		/*
1795 		 * If the option has not been set then get a default
1796 		 * value from the transport.
1797 		 */
1798 		break;
1799 	}
1800 	case SO_LINGER: {
1801 		socklen_t optlen = *optlenp;
1802 
1803 		if (optlen < (t_uscalar_t)sizeof (struct linger))
1804 			return (EINVAL);
1805 		break;
1806 	}
1807 	case SO_SND_BUFINFO: {
1808 		socklen_t optlen = *optlenp;
1809 
1810 		if (optlen < (t_uscalar_t)sizeof (struct so_snd_bufinfo))
1811 			return (EINVAL);
1812 		((struct so_snd_bufinfo *)(optval))->sbi_wroff =
1813 		    (so->so_proto_props).sopp_wroff;
1814 		((struct so_snd_bufinfo *)(optval))->sbi_maxblk =
1815 		    (so->so_proto_props).sopp_maxblk;
1816 		((struct so_snd_bufinfo *)(optval))->sbi_maxpsz =
1817 		    (so->so_proto_props).sopp_maxpsz;
1818 		((struct so_snd_bufinfo *)(optval))->sbi_tail =
1819 		    (so->so_proto_props).sopp_tail;
1820 		*optlenp = sizeof (struct so_snd_bufinfo);
1821 		return (0);
1822 	}
1823 	case SO_SND_COPYAVOID: {
1824 		sof_instance_t *inst;
1825 
1826 		/*
1827 		 * Avoid zero-copy if there is a filter with a data_out
1828 		 * callback. We could let the operation succeed, but then
1829 		 * the filter would have to copy the data anyway.
1830 		 */
1831 		for (inst = so->so_filter_top; inst != NULL;
1832 		    inst = inst->sofi_next) {
1833 			if (SOF_INTERESTED(inst, data_out))
1834 				return (EOPNOTSUPP);
1835 		}
1836 		break;
1837 	}
1838 
1839 	default:
1840 		break;
1841 	}
1842 
1843 	/* Unknown Option */
1844 	return (-1);
1845 }
1846 
1847 void
1848 socket_sonode_destroy(struct sonode *so)
1849 {
1850 	sonode_fini(so);
1851 	kmem_cache_free(socket_cache, so);
1852 }
1853 
1854 int
1855 so_zcopy_wait(struct sonode *so)
1856 {
1857 	int error = 0;
1858 
1859 	mutex_enter(&so->so_lock);
1860 	while (!(so->so_copyflag & STZCNOTIFY)) {
1861 		if (so->so_state & SS_CLOSING) {
1862 			mutex_exit(&so->so_lock);
1863 			return (EINTR);
1864 		}
1865 		if (cv_wait_sig(&so->so_copy_cv, &so->so_lock) == 0) {
1866 			error = EINTR;
1867 			break;
1868 		}
1869 	}
1870 	so->so_copyflag &= ~STZCNOTIFY;
1871 	mutex_exit(&so->so_lock);
1872 	return (error);
1873 }
1874 
1875 void
1876 so_timer_callback(void *arg)
1877 {
1878 	struct sonode *so = (struct sonode *)arg;
1879 
1880 	mutex_enter(&so->so_lock);
1881 
1882 	so->so_rcv_timer_tid = 0;
1883 	if (so->so_rcv_queued > 0) {
1884 		so_notify_data(so, so->so_rcv_queued);
1885 	} else {
1886 		mutex_exit(&so->so_lock);
1887 	}
1888 }
1889 
1890 #ifdef DEBUG
1891 /*
1892  * Verify that the length stored in so_rcv_queued and the length of data blocks
1893  * queued is same.
1894  */
1895 static boolean_t
1896 so_check_length(sonode_t *so)
1897 {
1898 	mblk_t *mp = so->so_rcv_q_head;
1899 	int len = 0;
1900 
1901 	ASSERT(MUTEX_HELD(&so->so_lock));
1902 
1903 	if (mp != NULL) {
1904 		len = msgdsize(mp);
1905 		while ((mp = mp->b_next) != NULL)
1906 			len += msgdsize(mp);
1907 	}
1908 	mp = so->so_rcv_head;
1909 	if (mp != NULL) {
1910 		len += msgdsize(mp);
1911 		while ((mp = mp->b_next) != NULL)
1912 			len += msgdsize(mp);
1913 	}
1914 	return ((len == so->so_rcv_queued) ? B_TRUE : B_FALSE);
1915 }
1916 #endif
1917 
1918 int
1919 so_get_mod_version(struct sockparams *sp)
1920 {
1921 	ASSERT(sp != NULL && sp->sp_smod_info != NULL);
1922 	return (sp->sp_smod_info->smod_version);
1923 }
1924 
1925 /*
1926  * so_start_fallback()
1927  *
1928  * Block new socket operations from coming in, and wait for active operations
1929  * to complete. Threads that are sleeping will be woken up so they can get
1930  * out of the way.
1931  *
1932  * The caller must be a reader on so_fallback_rwlock.
1933  */
1934 static boolean_t
1935 so_start_fallback(struct sonode *so)
1936 {
1937 	ASSERT(RW_READ_HELD(&so->so_fallback_rwlock));
1938 
1939 	mutex_enter(&so->so_lock);
1940 	if (so->so_state & SS_FALLBACK_PENDING) {
1941 		mutex_exit(&so->so_lock);
1942 		return (B_FALSE);
1943 	}
1944 	so->so_state |= SS_FALLBACK_PENDING;
1945 	/*
1946 	 * Poke all threads that might be sleeping. Any operation that comes
1947 	 * in after the cv_broadcast will observe the fallback pending flag
1948 	 * which cause the call to return where it would normally sleep.
1949 	 */
1950 	cv_broadcast(&so->so_state_cv);		/* threads in connect() */
1951 	cv_broadcast(&so->so_rcv_cv);		/* threads in recvmsg() */
1952 	cv_broadcast(&so->so_snd_cv);		/* threads in sendmsg() */
1953 	mutex_enter(&so->so_acceptq_lock);
1954 	cv_broadcast(&so->so_acceptq_cv);	/* threads in accept() */
1955 	mutex_exit(&so->so_acceptq_lock);
1956 	mutex_exit(&so->so_lock);
1957 
1958 	/*
1959 	 * The main reason for the rw_tryupgrade call is to provide
1960 	 * observability during the fallback process. We want to
1961 	 * be able to see if there are pending operations.
1962 	 */
1963 	if (rw_tryupgrade(&so->so_fallback_rwlock) == 0) {
1964 		/*
1965 		 * It is safe to drop and reaquire the fallback lock, because
1966 		 * we are guaranteed that another fallback cannot take place.
1967 		 */
1968 		rw_exit(&so->so_fallback_rwlock);
1969 		DTRACE_PROBE1(pending__ops__wait, (struct sonode *), so);
1970 		rw_enter(&so->so_fallback_rwlock, RW_WRITER);
1971 		DTRACE_PROBE1(pending__ops__complete, (struct sonode *), so);
1972 	}
1973 
1974 	return (B_TRUE);
1975 }
1976 
1977 /*
1978  * so_end_fallback()
1979  *
1980  * Allow socket opertions back in.
1981  *
1982  * The caller must be a writer on so_fallback_rwlock.
1983  */
1984 static void
1985 so_end_fallback(struct sonode *so)
1986 {
1987 	ASSERT(RW_ISWRITER(&so->so_fallback_rwlock));
1988 
1989 	mutex_enter(&so->so_lock);
1990 	so->so_state &= ~(SS_FALLBACK_PENDING|SS_FALLBACK_DRAIN);
1991 	mutex_exit(&so->so_lock);
1992 
1993 	rw_downgrade(&so->so_fallback_rwlock);
1994 }
1995 
1996 /*
1997  * so_quiesced_cb()
1998  *
1999  * Callback passed to the protocol during fallback. It is called once
2000  * the endpoint is quiescent.
2001  *
2002  * No requests from the user, no notifications from the protocol, so it
2003  * is safe to synchronize the state. Data can also be moved without
2004  * risk for reordering.
2005  *
2006  * We do not need to hold so_lock, since there can be only one thread
2007  * operating on the sonode.
2008  */
2009 static mblk_t *
2010 so_quiesced_cb(sock_upper_handle_t sock_handle, sock_quiesce_arg_t *arg,
2011     struct T_capability_ack *tcap,
2012     struct sockaddr *laddr, socklen_t laddrlen,
2013     struct sockaddr *faddr, socklen_t faddrlen, short opts)
2014 {
2015 	struct sonode *so = (struct sonode *)sock_handle;
2016 	boolean_t atmark;
2017 	mblk_t *retmp = NULL, **tailmpp = &retmp;
2018 
2019 	if (tcap != NULL)
2020 		sotpi_update_state(so, tcap, laddr, laddrlen, faddr, faddrlen,
2021 		    opts);
2022 
2023 	/*
2024 	 * Some protocols do not quiece the data path during fallback. Once
2025 	 * we set the SS_FALLBACK_DRAIN flag any attempt to queue data will
2026 	 * fail and the protocol is responsible for saving the data for later
2027 	 * delivery (i.e., once the fallback has completed).
2028 	 */
2029 	mutex_enter(&so->so_lock);
2030 	so->so_state |= SS_FALLBACK_DRAIN;
2031 	SOCKET_TIMER_CANCEL(so);
2032 	mutex_exit(&so->so_lock);
2033 
2034 	if (so->so_rcv_head != NULL) {
2035 		if (so->so_rcv_q_last_head == NULL)
2036 			so->so_rcv_q_head = so->so_rcv_head;
2037 		else
2038 			so->so_rcv_q_last_head->b_next = so->so_rcv_head;
2039 		so->so_rcv_q_last_head = so->so_rcv_last_head;
2040 	}
2041 
2042 	atmark = (so->so_state & SS_RCVATMARK) != 0;
2043 	/*
2044 	 * Clear any OOB state having to do with pending data. The TPI
2045 	 * code path will set the appropriate oob state when we move the
2046 	 * oob data to the STREAM head. We leave SS_HADOOBDATA since the oob
2047 	 * data has already been consumed.
2048 	 */
2049 	so->so_state &= ~(SS_RCVATMARK|SS_OOBPEND|SS_HAVEOOBDATA);
2050 
2051 	ASSERT(so->so_oobmsg != NULL || so->so_oobmark <= so->so_rcv_queued);
2052 
2053 	/*
2054 	 * Move data to the STREAM head.
2055 	 */
2056 	while (so->so_rcv_q_head != NULL) {
2057 		mblk_t *mp = so->so_rcv_q_head;
2058 		size_t mlen = msgdsize(mp);
2059 
2060 		so->so_rcv_q_head = mp->b_next;
2061 		mp->b_next = NULL;
2062 		mp->b_prev = NULL;
2063 
2064 		/*
2065 		 * Send T_EXDATA_IND if we are at the oob mark.
2066 		 */
2067 		if (atmark) {
2068 			struct T_exdata_ind *tei;
2069 			mblk_t *mp1 = arg->soqa_exdata_mp;
2070 
2071 			arg->soqa_exdata_mp = NULL;
2072 			ASSERT(mp1 != NULL);
2073 			mp1->b_datap->db_type = M_PROTO;
2074 			tei = (struct T_exdata_ind *)mp1->b_rptr;
2075 			tei->PRIM_type = T_EXDATA_IND;
2076 			tei->MORE_flag = 0;
2077 			mp1->b_wptr = (uchar_t *)&tei[1];
2078 
2079 			if (IS_SO_OOB_INLINE(so)) {
2080 				mp1->b_cont = mp;
2081 			} else {
2082 				ASSERT(so->so_oobmsg != NULL);
2083 				mp1->b_cont = so->so_oobmsg;
2084 				so->so_oobmsg = NULL;
2085 
2086 				/* process current mp next time around */
2087 				mp->b_next = so->so_rcv_q_head;
2088 				so->so_rcv_q_head = mp;
2089 				mlen = 0;
2090 			}
2091 			mp = mp1;
2092 
2093 			/* we have consumed the oob mark */
2094 			atmark = B_FALSE;
2095 		} else if (so->so_oobmark > 0) {
2096 			/*
2097 			 * Check if the OOB mark is within the current
2098 			 * mblk chain. In that case we have to split it up.
2099 			 */
2100 			if (so->so_oobmark < mlen) {
2101 				mblk_t *urg_mp = mp;
2102 
2103 				atmark = B_TRUE;
2104 				mp = NULL;
2105 				mlen = so->so_oobmark;
2106 
2107 				/*
2108 				 * It is assumed that the OOB mark does
2109 				 * not land within a mblk.
2110 				 */
2111 				do {
2112 					so->so_oobmark -= MBLKL(urg_mp);
2113 					mp = urg_mp;
2114 					urg_mp = urg_mp->b_cont;
2115 				} while (so->so_oobmark > 0);
2116 				mp->b_cont = NULL;
2117 				if (urg_mp != NULL) {
2118 					urg_mp->b_next = so->so_rcv_q_head;
2119 					so->so_rcv_q_head = urg_mp;
2120 				}
2121 			} else {
2122 				so->so_oobmark -= mlen;
2123 				if (so->so_oobmark == 0)
2124 					atmark = B_TRUE;
2125 			}
2126 		}
2127 
2128 		/*
2129 		 * Queue data on the STREAM head.
2130 		 */
2131 		so->so_rcv_queued -= mlen;
2132 		*tailmpp = mp;
2133 		tailmpp = &mp->b_next;
2134 	}
2135 	so->so_rcv_head = NULL;
2136 	so->so_rcv_last_head = NULL;
2137 	so->so_rcv_q_head = NULL;
2138 	so->so_rcv_q_last_head = NULL;
2139 
2140 	/*
2141 	 * Check if the oob byte is at the end of the data stream, or if the
2142 	 * oob byte has not yet arrived. In the latter case we have to send a
2143 	 * SIGURG and a mark indicator to the STREAM head. The mark indicator
2144 	 * is needed to guarantee correct behavior for SIOCATMARK. See block
2145 	 * comment in socktpi.h for more details.
2146 	 */
2147 	if (atmark || so->so_oobmark > 0) {
2148 		mblk_t *mp;
2149 
2150 		if (atmark && so->so_oobmsg != NULL) {
2151 			struct T_exdata_ind *tei;
2152 
2153 			mp = arg->soqa_exdata_mp;
2154 			arg->soqa_exdata_mp = NULL;
2155 			ASSERT(mp != NULL);
2156 			mp->b_datap->db_type = M_PROTO;
2157 			tei = (struct T_exdata_ind *)mp->b_rptr;
2158 			tei->PRIM_type = T_EXDATA_IND;
2159 			tei->MORE_flag = 0;
2160 			mp->b_wptr = (uchar_t *)&tei[1];
2161 
2162 			mp->b_cont = so->so_oobmsg;
2163 			so->so_oobmsg = NULL;
2164 
2165 			*tailmpp = mp;
2166 			tailmpp = &mp->b_next;
2167 		} else {
2168 			/* Send up the signal */
2169 			mp = arg->soqa_exdata_mp;
2170 			arg->soqa_exdata_mp = NULL;
2171 			ASSERT(mp != NULL);
2172 			DB_TYPE(mp) = M_PCSIG;
2173 			*mp->b_wptr++ = (uchar_t)SIGURG;
2174 			*tailmpp = mp;
2175 			tailmpp = &mp->b_next;
2176 
2177 			/* Send up the mark indicator */
2178 			mp = arg->soqa_urgmark_mp;
2179 			arg->soqa_urgmark_mp = NULL;
2180 			mp->b_flag = atmark ? MSGMARKNEXT : MSGNOTMARKNEXT;
2181 			*tailmpp = mp;
2182 			tailmpp = &mp->b_next;
2183 
2184 			so->so_oobmark = 0;
2185 		}
2186 	}
2187 	ASSERT(so->so_oobmark == 0);
2188 	ASSERT(so->so_rcv_queued == 0);
2189 
2190 	return (retmp);
2191 }
2192 
2193 #ifdef DEBUG
2194 /*
2195  * Do an integrity check of the sonode. This should be done if a
2196  * fallback fails after sonode has initially been converted to use
2197  * TPI and subsequently have to be reverted.
2198  *
2199  * Failure to pass the integrity check will panic the system.
2200  */
2201 void
2202 so_integrity_check(struct sonode *cur, struct sonode *orig)
2203 {
2204 	VERIFY(cur->so_vnode == orig->so_vnode);
2205 	VERIFY(cur->so_ops == orig->so_ops);
2206 	/*
2207 	 * For so_state we can only VERIFY the state flags in CHECK_STATE.
2208 	 * The other state flags might be affected by a notification from the
2209 	 * protocol.
2210 	 */
2211 #define	CHECK_STATE	(SS_CANTRCVMORE|SS_CANTSENDMORE|SS_NDELAY|SS_NONBLOCK| \
2212 	SS_ASYNC|SS_ACCEPTCONN|SS_SAVEDEOR|SS_RCVATMARK|SS_OOBPEND| \
2213 	SS_HAVEOOBDATA|SS_HADOOBDATA|SS_SENTLASTREADSIG|SS_SENTLASTWRITESIG)
2214 	VERIFY((cur->so_state & (orig->so_state & CHECK_STATE)) ==
2215 	    (orig->so_state & CHECK_STATE));
2216 	VERIFY(cur->so_mode == orig->so_mode);
2217 	VERIFY(cur->so_flag == orig->so_flag);
2218 	VERIFY(cur->so_count == orig->so_count);
2219 	/* Cannot VERIFY so_proto_connid; proto can update it */
2220 	VERIFY(cur->so_sockparams == orig->so_sockparams);
2221 	/* an error might have been recorded, but it can not be lost */
2222 	VERIFY(cur->so_error != 0 || orig->so_error == 0);
2223 	VERIFY(cur->so_family == orig->so_family);
2224 	VERIFY(cur->so_type == orig->so_type);
2225 	VERIFY(cur->so_protocol == orig->so_protocol);
2226 	VERIFY(cur->so_version == orig->so_version);
2227 	/* New conns might have arrived, but none should have been lost */
2228 	VERIFY(cur->so_acceptq_len >= orig->so_acceptq_len);
2229 	VERIFY(list_head(&cur->so_acceptq_list) ==
2230 	    list_head(&orig->so_acceptq_list));
2231 	VERIFY(cur->so_backlog == orig->so_backlog);
2232 	/* New OOB migth have arrived, but mark should not have been lost */
2233 	VERIFY(cur->so_oobmark >= orig->so_oobmark);
2234 	/* Cannot VERIFY so_oobmsg; the proto might have sent up a new one */
2235 	VERIFY(cur->so_pgrp == orig->so_pgrp);
2236 	VERIFY(cur->so_peercred == orig->so_peercred);
2237 	VERIFY(cur->so_cpid == orig->so_cpid);
2238 	VERIFY(cur->so_zoneid == orig->so_zoneid);
2239 	/* New data migth have arrived, but none should have been lost */
2240 	VERIFY(cur->so_rcv_queued >= orig->so_rcv_queued);
2241 	VERIFY(cur->so_rcv_q_head == orig->so_rcv_q_head);
2242 	VERIFY(cur->so_rcv_head == orig->so_rcv_head);
2243 	VERIFY(cur->so_proto_handle == orig->so_proto_handle);
2244 	VERIFY(cur->so_downcalls == orig->so_downcalls);
2245 	/* Cannot VERIFY so_proto_props; they can be updated by proto */
2246 }
2247 #endif
2248 
2249 /*
2250  * so_tpi_fallback()
2251  *
2252  * This is the fallback initation routine; things start here.
2253  *
2254  * Basic strategy:
2255  *   o Block new socket operations from coming in
2256  *   o Allocate/initate info needed by TPI
2257  *   o Quiesce the connection, at which point we sync
2258  *     state and move data
2259  *   o Change operations (sonodeops) associated with the socket
2260  *   o Unblock threads waiting for the fallback to finish
2261  */
2262 int
2263 so_tpi_fallback(struct sonode *so, struct cred *cr)
2264 {
2265 	int error;
2266 	queue_t *q;
2267 	struct sockparams *sp;
2268 	struct sockparams *newsp = NULL;
2269 	so_proto_fallback_func_t fbfunc;
2270 	const char *devpath;
2271 	boolean_t direct;
2272 	struct sonode *nso;
2273 	sock_quiesce_arg_t arg = { NULL, NULL };
2274 #ifdef DEBUG
2275 	struct sonode origso;
2276 #endif
2277 	error = 0;
2278 	sp = so->so_sockparams;
2279 	fbfunc = sp->sp_smod_info->smod_proto_fallback_func;
2280 
2281 	/*
2282 	 * Cannot fallback if the socket has active filters
2283 	 */
2284 	if (so->so_filter_active > 0)
2285 		return (EINVAL);
2286 
2287 	switch (so->so_family) {
2288 	case AF_INET:
2289 		devpath = sp->sp_smod_info->smod_fallback_devpath_v4;
2290 		break;
2291 	case AF_INET6:
2292 		devpath = sp->sp_smod_info->smod_fallback_devpath_v6;
2293 		break;
2294 	default:
2295 		return (EINVAL);
2296 	}
2297 
2298 	/*
2299 	 * Fallback can only happen if the socket module has a TPI device
2300 	 * and fallback function.
2301 	 */
2302 	if (devpath == NULL || fbfunc == NULL)
2303 		return (EINVAL);
2304 
2305 	/*
2306 	 * Initiate fallback; upon success we know that no new requests
2307 	 * will come in from the user.
2308 	 */
2309 	if (!so_start_fallback(so))
2310 		return (EAGAIN);
2311 #ifdef DEBUG
2312 	/*
2313 	 * Make a copy of the sonode in case we need to make an integrity
2314 	 * check later on.
2315 	 */
2316 	bcopy(so, &origso, sizeof (*so));
2317 #endif
2318 
2319 	sp->sp_stats.sps_nfallback.value.ui64++;
2320 
2321 	newsp = sockparams_hold_ephemeral_bydev(so->so_family, so->so_type,
2322 	    so->so_protocol, devpath, KM_SLEEP, &error);
2323 	if (error != 0)
2324 		goto out;
2325 
2326 	if (so->so_direct != NULL) {
2327 		sodirect_t *sodp = so->so_direct;
2328 		mutex_enter(&so->so_lock);
2329 
2330 		so->so_direct->sod_enabled = B_FALSE;
2331 		so->so_state &= ~SS_SODIRECT;
2332 		ASSERT(sodp->sod_uioafh == NULL);
2333 		mutex_exit(&so->so_lock);
2334 	}
2335 
2336 	/* Turn sonode into a TPI socket */
2337 	error = sotpi_convert_sonode(so, newsp, &direct, &q, cr);
2338 	if (error != 0)
2339 		goto out;
2340 	/*
2341 	 * When it comes to urgent data we have two cases to deal with;
2342 	 * (1) The oob byte has already arrived, or (2) the protocol has
2343 	 * notified that oob data is pending, but it has not yet arrived.
2344 	 *
2345 	 * For (1) all we need to do is send a T_EXDATA_IND to indicate were
2346 	 * in the byte stream the oob byte is. For (2) we have to send a
2347 	 * SIGURG (M_PCSIG), followed by a zero-length mblk indicating whether
2348 	 * the oob byte will be the next byte from the protocol.
2349 	 *
2350 	 * So in the worst case we need two mblks, one for the signal, another
2351 	 * for mark indication. In that case we use the exdata_mp for the sig.
2352 	 */
2353 	arg.soqa_exdata_mp = allocb_wait(sizeof (struct T_exdata_ind),
2354 	    BPRI_MED, STR_NOSIG, NULL);
2355 	arg.soqa_urgmark_mp = allocb_wait(0, BPRI_MED, STR_NOSIG, NULL);
2356 
2357 	/*
2358 	 * Now tell the protocol to start using TPI. so_quiesced_cb be
2359 	 * called once it's safe to synchronize state.
2360 	 */
2361 	DTRACE_PROBE1(proto__fallback__begin, struct sonode *, so);
2362 	error = (*fbfunc)(so->so_proto_handle, q, direct, so_quiesced_cb,
2363 	    &arg);
2364 	DTRACE_PROBE1(proto__fallback__end, struct sonode *, so);
2365 
2366 	if (error != 0) {
2367 		/* protocol was unable to do a fallback, revert the sonode */
2368 		sotpi_revert_sonode(so, cr);
2369 		goto out;
2370 	}
2371 
2372 	/*
2373 	 * Walk the accept queue and notify the proto that they should
2374 	 * fall back to TPI. The protocol will send up the T_CONN_IND.
2375 	 */
2376 	nso = list_head(&so->so_acceptq_list);
2377 	while (nso != NULL) {
2378 		int rval;
2379 		struct sonode *next;
2380 
2381 		if (arg.soqa_exdata_mp == NULL) {
2382 			arg.soqa_exdata_mp =
2383 			    allocb_wait(sizeof (struct T_exdata_ind),
2384 			    BPRI_MED, STR_NOSIG, NULL);
2385 		}
2386 		if (arg.soqa_urgmark_mp == NULL) {
2387 			arg.soqa_urgmark_mp = allocb_wait(0, BPRI_MED,
2388 			    STR_NOSIG, NULL);
2389 		}
2390 
2391 		DTRACE_PROBE1(proto__fallback__begin, struct sonode *, nso);
2392 		rval = (*fbfunc)(nso->so_proto_handle, NULL, direct,
2393 		    so_quiesced_cb, &arg);
2394 		DTRACE_PROBE1(proto__fallback__end, struct sonode *, nso);
2395 		if (rval != 0) {
2396 			/* Abort the connection */
2397 			zcmn_err(getzoneid(), CE_WARN,
2398 			    "Failed to convert socket in accept queue to TPI. "
2399 			    "Pid = %d\n", curproc->p_pid);
2400 			next = list_next(&so->so_acceptq_list, nso);
2401 			list_remove(&so->so_acceptq_list, nso);
2402 			so->so_acceptq_len--;
2403 
2404 			(void) socket_close(nso, 0, CRED());
2405 			socket_destroy(nso);
2406 			nso = next;
2407 		} else {
2408 			nso = list_next(&so->so_acceptq_list, nso);
2409 		}
2410 	}
2411 
2412 	/*
2413 	 * Now flush the acceptq, this will destroy all sockets. They will
2414 	 * be recreated in sotpi_accept().
2415 	 */
2416 	so_acceptq_flush(so, B_FALSE);
2417 
2418 	mutex_enter(&so->so_lock);
2419 	so->so_state |= SS_FALLBACK_COMP;
2420 	mutex_exit(&so->so_lock);
2421 
2422 	/*
2423 	 * Swap the sonode ops. Socket opertations that come in once this
2424 	 * is done will proceed without blocking.
2425 	 */
2426 	so->so_ops = &sotpi_sonodeops;
2427 
2428 	/*
2429 	 * Wake up any threads stuck in poll. This is needed since the poll
2430 	 * head changes when the fallback happens (moves from the sonode to
2431 	 * the STREAMS head).
2432 	 */
2433 	pollwakeup(&so->so_poll_list, POLLERR);
2434 
2435 	/*
2436 	 * When this non-STREAM socket was created we placed an extra ref on
2437 	 * the associated vnode to support asynchronous close. Drop that ref
2438 	 * here.
2439 	 */
2440 	ASSERT(SOTOV(so)->v_count >= 2);
2441 	VN_RELE(SOTOV(so));
2442 out:
2443 	so_end_fallback(so);
2444 
2445 	if (error != 0) {
2446 #ifdef DEBUG
2447 		so_integrity_check(so, &origso);
2448 #endif
2449 		zcmn_err(getzoneid(), CE_WARN,
2450 		    "Failed to convert socket to TPI (err=%d). Pid = %d\n",
2451 		    error, curproc->p_pid);
2452 		if (newsp != NULL)
2453 			SOCKPARAMS_DEC_REF(newsp);
2454 	}
2455 	if (arg.soqa_exdata_mp != NULL)
2456 		freemsg(arg.soqa_exdata_mp);
2457 	if (arg.soqa_urgmark_mp != NULL)
2458 		freemsg(arg.soqa_urgmark_mp);
2459 
2460 	return (error);
2461 }
2462