xref: /illumos-gate/usr/src/uts/common/rpc/rpcmod.c (revision 8654d0253136055bd4cc2423d87378e8a37f2eb5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /* Copyright (c) 1990 Mentat Inc. */
26 
27 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 
31 #pragma ident	"%Z%%M%	%I%	%E% SMI"
32 
33 /*
34  * Kernel RPC filtering module
35  */
36 
37 #include <sys/param.h>
38 #include <sys/types.h>
39 #include <sys/stream.h>
40 #include <sys/stropts.h>
41 #include <sys/tihdr.h>
42 #include <sys/timod.h>
43 #include <sys/tiuser.h>
44 #include <sys/debug.h>
45 #include <sys/signal.h>
46 #include <sys/pcb.h>
47 #include <sys/user.h>
48 #include <sys/errno.h>
49 #include <sys/cred.h>
50 #include <sys/policy.h>
51 #include <sys/inline.h>
52 #include <sys/cmn_err.h>
53 #include <sys/kmem.h>
54 #include <sys/file.h>
55 #include <sys/sysmacros.h>
56 #include <sys/systm.h>
57 #include <sys/t_lock.h>
58 #include <sys/ddi.h>
59 #include <sys/vtrace.h>
60 #include <sys/callb.h>
61 
62 #include <sys/strlog.h>
63 #include <rpc/rpc_com.h>
64 #include <inet/common.h>
65 #include <rpc/types.h>
66 #include <sys/time.h>
67 #include <rpc/xdr.h>
68 #include <rpc/auth.h>
69 #include <rpc/clnt.h>
70 #include <rpc/rpc_msg.h>
71 #include <rpc/clnt.h>
72 #include <rpc/svc.h>
73 #include <rpc/rpcsys.h>
74 #include <rpc/rpc_rdma.h>
75 
76 /*
77  * This is the loadable module wrapper.
78  */
79 #include <sys/conf.h>
80 #include <sys/modctl.h>
81 #include <sys/syscall.h>
82 
83 extern struct streamtab rpcinfo;
84 
85 static struct fmodsw fsw = {
86 	"rpcmod",
87 	&rpcinfo,
88 	D_NEW|D_MP,
89 };
90 
91 /*
92  * Module linkage information for the kernel.
93  */
94 
95 static struct modlstrmod modlstrmod = {
96 	&mod_strmodops, "rpc interface str mod", &fsw
97 };
98 
99 /*
100  * For the RPC system call.
101  */
102 static struct sysent rpcsysent = {
103 	2,
104 	SE_32RVAL1 | SE_ARGC | SE_NOUNLOAD,
105 	rpcsys
106 };
107 
108 static struct modlsys modlsys = {
109 	&mod_syscallops,
110 	"RPC syscall",
111 	&rpcsysent
112 };
113 
114 #ifdef _SYSCALL32_IMPL
115 static struct modlsys modlsys32 = {
116 	&mod_syscallops32,
117 	"32-bit RPC syscall",
118 	&rpcsysent
119 };
120 #endif /* _SYSCALL32_IMPL */
121 
122 static struct modlinkage modlinkage = {
123 	MODREV_1,
124 	{
125 		&modlsys,
126 #ifdef _SYSCALL32_IMPL
127 		&modlsys32,
128 #endif
129 		&modlstrmod,
130 		NULL
131 	}
132 };
133 
134 int
135 _init(void)
136 {
137 	int error = 0;
138 	callb_id_t cid;
139 	int status;
140 
141 	svc_init();
142 	clnt_init();
143 	cid = callb_add(connmgr_cpr_reset, 0, CB_CL_CPR_RPC, "rpc");
144 
145 	if (error = mod_install(&modlinkage)) {
146 		/*
147 		 * Could not install module, cleanup previous
148 		 * initialization work.
149 		 */
150 		clnt_fini();
151 		if (cid != NULL)
152 			(void) callb_delete(cid);
153 
154 		return (error);
155 	}
156 
157 	/*
158 	 * Load up the RDMA plugins and initialize the stats. Even if the
159 	 * plugins loadup fails, but rpcmod was successfully installed the
160 	 * counters still get initialized.
161 	 */
162 	rw_init(&rdma_lock, NULL, RW_DEFAULT, NULL);
163 	mutex_init(&rdma_modload_lock, NULL, MUTEX_DEFAULT, NULL);
164 	mt_kstat_init();
165 
166 	/*
167 	 * Get our identification into ldi.  This is used for loading
168 	 * other modules, e.g. rpcib.
169 	 */
170 	status = ldi_ident_from_mod(&modlinkage, &rpcmod_li);
171 	if (status != 0) {
172 		cmn_err(CE_WARN, "ldi_ident_from_mod fails with %d", status);
173 		rpcmod_li = NULL;
174 	}
175 
176 	return (error);
177 }
178 
179 /*
180  * The unload entry point fails, because we advertise entry points into
181  * rpcmod from the rest of kRPC: rpcmod_release().
182  */
183 int
184 _fini(void)
185 {
186 	return (EBUSY);
187 }
188 
189 int
190 _info(struct modinfo *modinfop)
191 {
192 	return (mod_info(&modlinkage, modinfop));
193 }
194 
195 extern int nulldev();
196 
197 #define	RPCMOD_ID	2049
198 
199 int rmm_open(), rmm_close();
200 
201 /*
202  * To save instructions, since STREAMS ignores the return value
203  * from these functions, they are defined as void here. Kind of icky, but...
204  */
205 void rmm_rput(queue_t *, mblk_t *);
206 void rmm_wput(queue_t *, mblk_t *);
207 void rmm_rsrv(queue_t *);
208 void rmm_wsrv(queue_t *);
209 
210 int rpcmodopen(), rpcmodclose();
211 void rpcmodrput(), rpcmodwput();
212 void rpcmodrsrv(), rpcmodwsrv();
213 
214 static	void	rpcmodwput_other(queue_t *, mblk_t *);
215 static	int	mir_close(queue_t *q);
216 static	int	mir_open(queue_t *q, dev_t *devp, int flag, int sflag,
217 		    cred_t *credp);
218 static	void	mir_rput(queue_t *q, mblk_t *mp);
219 static	void	mir_rsrv(queue_t *q);
220 static	void	mir_wput(queue_t *q, mblk_t *mp);
221 static	void	mir_wsrv(queue_t *q);
222 
223 static struct module_info rpcmod_info =
224 	{RPCMOD_ID, "rpcmod", 0, INFPSZ, 256*1024, 1024};
225 
226 /*
227  * Read side has no service procedure.
228  */
229 static struct qinit rpcmodrinit = {
230 	(int (*)())rmm_rput,
231 	(int (*)())rmm_rsrv,
232 	rmm_open,
233 	rmm_close,
234 	nulldev,
235 	&rpcmod_info,
236 	NULL
237 };
238 
239 /*
240  * The write put procedure is simply putnext to conserve stack space.
241  * The write service procedure is not used to queue data, but instead to
242  * synchronize with flow control.
243  */
244 static struct qinit rpcmodwinit = {
245 	(int (*)())rmm_wput,
246 	(int (*)())rmm_wsrv,
247 	rmm_open,
248 	rmm_close,
249 	nulldev,
250 	&rpcmod_info,
251 	NULL
252 };
253 struct streamtab rpcinfo = { &rpcmodrinit, &rpcmodwinit, NULL, NULL };
254 
255 struct xprt_style_ops {
256 	int (*xo_open)();
257 	int (*xo_close)();
258 	void (*xo_wput)();
259 	void (*xo_wsrv)();
260 	void (*xo_rput)();
261 	void (*xo_rsrv)();
262 };
263 
264 static struct xprt_style_ops xprt_clts_ops = {
265 	rpcmodopen,
266 	rpcmodclose,
267 	rpcmodwput,
268 	rpcmodwsrv,
269 	rpcmodrput,
270 	NULL
271 };
272 
273 static struct xprt_style_ops xprt_cots_ops = {
274 	mir_open,
275 	mir_close,
276 	mir_wput,
277 	mir_wsrv,
278 	mir_rput,
279 	mir_rsrv
280 };
281 
282 /*
283  * Per rpcmod "slot" data structure. q->q_ptr points to one of these.
284  */
285 struct rpcm {
286 	void		*rm_krpc_cell;	/* Reserved for use by KRPC */
287 	struct		xprt_style_ops	*rm_ops;
288 	int		rm_type;	/* Client or server side stream */
289 #define	RM_CLOSING	0x1		/* somebody is trying to close slot */
290 	uint_t		rm_state;	/* state of the slot. see above */
291 	uint_t		rm_ref;		/* cnt of external references to slot */
292 	kmutex_t	rm_lock;	/* mutex protecting above fields */
293 	kcondvar_t	rm_cwait;	/* condition for closing */
294 	zoneid_t	rm_zoneid;	/* zone which pushed rpcmod */
295 };
296 
297 struct temp_slot {
298 	void *cell;
299 	struct xprt_style_ops *ops;
300 	int type;
301 	mblk_t *info_ack;
302 	kmutex_t lock;
303 	kcondvar_t wait;
304 };
305 
306 typedef struct mir_s {
307 	void	*mir_krpc_cell;	/* Reserved for KRPC use. This field */
308 					/* must be first in the structure. */
309 	struct xprt_style_ops	*rm_ops;
310 	int	mir_type;		/* Client or server side stream */
311 
312 	mblk_t	*mir_head_mp;		/* RPC msg in progress */
313 		/*
314 		 * mir_head_mp points the first mblk being collected in
315 		 * the current RPC message.  Record headers are removed
316 		 * before data is linked into mir_head_mp.
317 		 */
318 	mblk_t	*mir_tail_mp;		/* Last mblk in mir_head_mp */
319 		/*
320 		 * mir_tail_mp points to the last mblk in the message
321 		 * chain starting at mir_head_mp.  It is only valid
322 		 * if mir_head_mp is non-NULL and is used to add new
323 		 * data blocks to the end of chain quickly.
324 		 */
325 
326 	int32_t	mir_frag_len;		/* Bytes seen in the current frag */
327 		/*
328 		 * mir_frag_len starts at -4 for beginning of each fragment.
329 		 * When this length is negative, it indicates the number of
330 		 * bytes that rpcmod needs to complete the record marker
331 		 * header.  When it is positive or zero, it holds the number
332 		 * of bytes that have arrived for the current fragment and
333 		 * are held in mir_header_mp.
334 		 */
335 
336 	int32_t	mir_frag_header;
337 		/*
338 		 * Fragment header as collected for the current fragment.
339 		 * It holds the last-fragment indicator and the number
340 		 * of bytes in the fragment.
341 		 */
342 
343 	unsigned int
344 		mir_ordrel_pending : 1,	/* Sent T_ORDREL_REQ */
345 		mir_hold_inbound : 1,	/* Hold inbound messages on server */
346 					/* side until outbound flow control */
347 					/* is relieved. */
348 		mir_closing : 1,	/* The stream is being closed */
349 		mir_inrservice : 1,	/* data queued or rd srv proc running */
350 		mir_inwservice : 1,	/* data queued or wr srv proc running */
351 		mir_inwflushdata : 1,	/* flush M_DATAs when srv runs */
352 		/*
353 		 * On client streams, mir_clntreq is 0 or 1; it is set
354 		 * to 1 whenever a new request is sent out (mir_wput)
355 		 * and cleared when the timer fires (mir_timer).  If
356 		 * the timer fires with this value equal to 0, then the
357 		 * stream is considered idle and KRPC is notified.
358 		 */
359 		mir_clntreq : 1,
360 		/*
361 		 * On server streams, stop accepting messages
362 		 */
363 		mir_svc_no_more_msgs : 1,
364 		mir_listen_stream : 1,	/* listen end point */
365 		mir_unused : 1,	/* no longer used */
366 		mir_timer_call : 1,
367 		mir_junk_fill_thru_bit_31 : 21;
368 
369 	int	mir_setup_complete;	/* server has initialized everything */
370 	timeout_id_t mir_timer_id;	/* Timer for idle checks */
371 	clock_t	mir_idle_timeout;	/* Allowed idle time before shutdown */
372 		/*
373 		 * This value is copied from clnt_idle_timeout or
374 		 * svc_idle_timeout during the appropriate ioctl.
375 		 * Kept in milliseconds
376 		 */
377 	clock_t	mir_use_timestamp;	/* updated on client with each use */
378 		/*
379 		 * This value is set to lbolt
380 		 * every time a client stream sends or receives data.
381 		 * Even if the timer message arrives, we don't shutdown
382 		 * client unless:
383 		 *    lbolt >= MSEC_TO_TICK(mir_idle_timeout)+mir_use_timestamp.
384 		 * This value is kept in HZ.
385 		 */
386 
387 	uint_t	*mir_max_msg_sizep;	/* Reference to sanity check size */
388 		/*
389 		 * This pointer is set to &clnt_max_msg_size or
390 		 * &svc_max_msg_size during the appropriate ioctl.
391 		 */
392 	zoneid_t mir_zoneid;	/* zone which pushed rpcmod */
393 	/* Server-side fields. */
394 	int	mir_ref_cnt;		/* Reference count: server side only */
395 					/* counts the number of references */
396 					/* that a kernel RPC server thread */
397 					/* (see svc_run()) has on this rpcmod */
398 					/* slot. Effectively, it is the */
399 					/* number * of unprocessed messages */
400 					/* that have been passed up to the */
401 					/* KRPC layer */
402 
403 	mblk_t	*mir_svc_pend_mp;	/* Pending T_ORDREL_IND or */
404 					/* T_DISCON_IND */
405 
406 	/*
407 	 * these fields are for both client and server, but for debugging,
408 	 * it is easier to have these last in the structure.
409 	 */
410 	kmutex_t	mir_mutex;	/* Mutex and condvar for close */
411 	kcondvar_t	mir_condvar;	/* synchronization. */
412 	kcondvar_t	mir_timer_cv;	/* Timer routine sync. */
413 } mir_t;
414 
415 void tmp_rput(queue_t *q, mblk_t *mp);
416 
417 struct xprt_style_ops tmpops = {
418 	NULL,
419 	NULL,
420 	putnext,
421 	NULL,
422 	tmp_rput,
423 	NULL
424 };
425 
426 void
427 tmp_rput(queue_t *q, mblk_t *mp)
428 {
429 	struct temp_slot *t = (struct temp_slot *)(q->q_ptr);
430 	struct T_info_ack *pptr;
431 
432 	switch (mp->b_datap->db_type) {
433 	case M_PCPROTO:
434 		pptr = (struct T_info_ack *)mp->b_rptr;
435 		switch (pptr->PRIM_type) {
436 		case T_INFO_ACK:
437 			mutex_enter(&t->lock);
438 			t->info_ack = mp;
439 			cv_signal(&t->wait);
440 			mutex_exit(&t->lock);
441 			return;
442 		default:
443 			break;
444 		}
445 	default:
446 		break;
447 	}
448 
449 	/*
450 	 * Not an info-ack, so free it. This is ok because we should
451 	 * not be receiving data until the open finishes: rpcmod
452 	 * is pushed well before the end-point is bound to an address.
453 	 */
454 	freemsg(mp);
455 }
456 
457 int
458 rmm_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *crp)
459 {
460 	mblk_t *bp;
461 	struct temp_slot ts, *t;
462 	struct T_info_ack *pptr;
463 	int error = 0;
464 
465 	ASSERT(q != NULL);
466 	/*
467 	 * Check for re-opens.
468 	 */
469 	if (q->q_ptr) {
470 		TRACE_1(TR_FAC_KRPC, TR_RPCMODOPEN_END,
471 		    "rpcmodopen_end:(%s)", "q->qptr");
472 		return (0);
473 	}
474 
475 	t = &ts;
476 	bzero(t, sizeof (*t));
477 	q->q_ptr = (void *)t;
478 	WR(q)->q_ptr = (void *)t;
479 
480 	/*
481 	 * Allocate the required messages upfront.
482 	 */
483 	if ((bp = allocb(sizeof (struct T_info_req) +
484 	    sizeof (struct T_info_ack), BPRI_LO)) == (mblk_t *)NULL) {
485 		return (ENOBUFS);
486 	}
487 
488 	mutex_init(&t->lock, NULL, MUTEX_DEFAULT, NULL);
489 	cv_init(&t->wait, NULL, CV_DEFAULT, NULL);
490 
491 	t->ops = &tmpops;
492 
493 	qprocson(q);
494 	bp->b_datap->db_type = M_PCPROTO;
495 	*(int32_t *)bp->b_wptr = (int32_t)T_INFO_REQ;
496 	bp->b_wptr += sizeof (struct T_info_req);
497 	putnext(WR(q), bp);
498 
499 	mutex_enter(&t->lock);
500 	while (t->info_ack == NULL) {
501 		if (cv_wait_sig(&t->wait, &t->lock) == 0) {
502 			error = EINTR;
503 			break;
504 		}
505 	}
506 	mutex_exit(&t->lock);
507 
508 	if (error)
509 		goto out;
510 
511 	pptr = (struct T_info_ack *)t->info_ack->b_rptr;
512 
513 	if (pptr->SERV_type == T_CLTS) {
514 		if ((error = rpcmodopen(q, devp, flag, sflag, crp)) == 0)
515 			((struct rpcm *)q->q_ptr)->rm_ops = &xprt_clts_ops;
516 	} else {
517 		if ((error = mir_open(q, devp, flag, sflag, crp)) == 0)
518 			((mir_t *)q->q_ptr)->rm_ops = &xprt_cots_ops;
519 	}
520 
521 out:
522 	if (error)
523 		qprocsoff(q);
524 
525 	freemsg(t->info_ack);
526 	mutex_destroy(&t->lock);
527 	cv_destroy(&t->wait);
528 
529 	return (error);
530 }
531 
532 void
533 rmm_rput(queue_t *q, mblk_t  *mp)
534 {
535 	(*((struct temp_slot *)q->q_ptr)->ops->xo_rput)(q, mp);
536 }
537 
538 void
539 rmm_rsrv(queue_t *q)
540 {
541 	(*((struct temp_slot *)q->q_ptr)->ops->xo_rsrv)(q);
542 }
543 
544 void
545 rmm_wput(queue_t *q, mblk_t *mp)
546 {
547 	(*((struct temp_slot *)q->q_ptr)->ops->xo_wput)(q, mp);
548 }
549 
550 void
551 rmm_wsrv(queue_t *q)
552 {
553 	(*((struct temp_slot *)q->q_ptr)->ops->xo_wsrv)(q);
554 }
555 
556 int
557 rmm_close(queue_t *q, int flag, cred_t *crp)
558 {
559 	return ((*((struct temp_slot *)q->q_ptr)->ops->xo_close)(q, flag, crp));
560 }
561 
562 /*
563  * rpcmodopen -	open routine gets called when the module gets pushed
564  *		onto the stream.
565  */
566 /*ARGSUSED*/
567 int
568 rpcmodopen(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *crp)
569 {
570 	struct rpcm *rmp;
571 
572 	extern void (*rpc_rele)(queue_t *, mblk_t *);
573 	static void rpcmod_release(queue_t *, mblk_t *);
574 
575 	TRACE_0(TR_FAC_KRPC, TR_RPCMODOPEN_START, "rpcmodopen_start:");
576 
577 	/*
578 	 * Initialize entry points to release a rpcmod slot (and an input
579 	 * message if supplied) and to send an output message to the module
580 	 * below rpcmod.
581 	 */
582 	if (rpc_rele == NULL)
583 		rpc_rele = rpcmod_release;
584 
585 	/*
586 	 * Only sufficiently privileged users can use this module, and it
587 	 * is assumed that they will use this module properly, and NOT send
588 	 * bulk data from downstream.
589 	 */
590 	if (secpolicy_rpcmod_open(crp) != 0)
591 		return (EPERM);
592 
593 	/*
594 	 * Allocate slot data structure.
595 	 */
596 	rmp = kmem_zalloc(sizeof (*rmp), KM_SLEEP);
597 
598 	mutex_init(&rmp->rm_lock, NULL, MUTEX_DEFAULT, NULL);
599 	cv_init(&rmp->rm_cwait, NULL, CV_DEFAULT, NULL);
600 	rmp->rm_zoneid = rpc_zoneid();
601 	/*
602 	 * slot type will be set by kRPC client and server ioctl's
603 	 */
604 	rmp->rm_type = 0;
605 
606 	q->q_ptr = (void *)rmp;
607 	WR(q)->q_ptr = (void *)rmp;
608 
609 	TRACE_1(TR_FAC_KRPC, TR_RPCMODOPEN_END, "rpcmodopen_end:(%s)", "end");
610 	return (0);
611 }
612 
613 /*
614  * rpcmodclose - This routine gets called when the module gets popped
615  * off of the stream.
616  */
617 /*ARGSUSED*/
618 int
619 rpcmodclose(queue_t *q, int flag, cred_t *crp)
620 {
621 	struct rpcm *rmp;
622 
623 	ASSERT(q != NULL);
624 	rmp = (struct rpcm *)q->q_ptr;
625 
626 	/*
627 	 * Mark our state as closing.
628 	 */
629 	mutex_enter(&rmp->rm_lock);
630 	rmp->rm_state |= RM_CLOSING;
631 
632 	/*
633 	 * Check and see if there are any messages on the queue.  If so, send
634 	 * the messages, regardless whether the downstream module is ready to
635 	 * accept data.
636 	 */
637 	if (rmp->rm_type == RPC_SERVER) {
638 		flushq(q, FLUSHDATA);
639 
640 		qenable(WR(q));
641 
642 		if (rmp->rm_ref) {
643 			mutex_exit(&rmp->rm_lock);
644 			/*
645 			 * call into SVC to clean the queue
646 			 */
647 			svc_queueclean(q);
648 			mutex_enter(&rmp->rm_lock);
649 
650 			/*
651 			 * Block while there are kRPC threads with a reference
652 			 * to this message.
653 			 */
654 			while (rmp->rm_ref)
655 				cv_wait(&rmp->rm_cwait, &rmp->rm_lock);
656 		}
657 
658 		mutex_exit(&rmp->rm_lock);
659 
660 		/*
661 		 * It is now safe to remove this queue from the stream. No kRPC
662 		 * threads have a reference to the stream, and none ever will,
663 		 * because RM_CLOSING is set.
664 		 */
665 		qprocsoff(q);
666 
667 		/* Notify kRPC that this stream is going away. */
668 		svc_queueclose(q);
669 	} else {
670 		mutex_exit(&rmp->rm_lock);
671 		qprocsoff(q);
672 	}
673 
674 	q->q_ptr = NULL;
675 	WR(q)->q_ptr = NULL;
676 	mutex_destroy(&rmp->rm_lock);
677 	cv_destroy(&rmp->rm_cwait);
678 	kmem_free(rmp, sizeof (*rmp));
679 	return (0);
680 }
681 
682 #ifdef	DEBUG
683 int	rpcmod_send_msg_up = 0;
684 int	rpcmod_send_uderr = 0;
685 int	rpcmod_send_dup = 0;
686 int	rpcmod_send_dup_cnt = 0;
687 #endif
688 
689 /*
690  * rpcmodrput -	Module read put procedure.  This is called from
691  *		the module, driver, or stream head downstream.
692  */
693 void
694 rpcmodrput(queue_t *q, mblk_t *mp)
695 {
696 	struct rpcm *rmp;
697 	union T_primitives *pptr;
698 	int hdrsz;
699 
700 	TRACE_0(TR_FAC_KRPC, TR_RPCMODRPUT_START, "rpcmodrput_start:");
701 
702 	ASSERT(q != NULL);
703 	rmp = (struct rpcm *)q->q_ptr;
704 
705 	if (rmp->rm_type == 0) {
706 		freemsg(mp);
707 		return;
708 	}
709 
710 #ifdef DEBUG
711 	if (rpcmod_send_msg_up > 0) {
712 		mblk_t *nmp = copymsg(mp);
713 		if (nmp) {
714 			putnext(q, nmp);
715 			rpcmod_send_msg_up--;
716 		}
717 	}
718 	if ((rpcmod_send_uderr > 0) && mp->b_datap->db_type == M_PROTO) {
719 		mblk_t *nmp;
720 		struct T_unitdata_ind *data;
721 		struct T_uderror_ind *ud;
722 		int d;
723 		data = (struct T_unitdata_ind *)mp->b_rptr;
724 		if (data->PRIM_type == T_UNITDATA_IND) {
725 			d = sizeof (*ud) - sizeof (*data);
726 			nmp = allocb(mp->b_wptr - mp->b_rptr + d, BPRI_HI);
727 			if (nmp) {
728 				ud = (struct T_uderror_ind *)nmp->b_rptr;
729 				ud->PRIM_type = T_UDERROR_IND;
730 				ud->DEST_length = data->SRC_length;
731 				ud->DEST_offset = data->SRC_offset + d;
732 				ud->OPT_length = data->OPT_length;
733 				ud->OPT_offset = data->OPT_offset + d;
734 				ud->ERROR_type = ENETDOWN;
735 				if (data->SRC_length) {
736 					bcopy(mp->b_rptr +
737 					    data->SRC_offset,
738 					    nmp->b_rptr +
739 					    ud->DEST_offset,
740 					    data->SRC_length);
741 				}
742 				if (data->OPT_length) {
743 					bcopy(mp->b_rptr +
744 					    data->OPT_offset,
745 					    nmp->b_rptr +
746 					    ud->OPT_offset,
747 					    data->OPT_length);
748 				}
749 				nmp->b_wptr += d;
750 				nmp->b_wptr += (mp->b_wptr - mp->b_rptr);
751 				nmp->b_datap->db_type = M_PROTO;
752 				putnext(q, nmp);
753 				rpcmod_send_uderr--;
754 			}
755 		}
756 	}
757 #endif
758 	switch (mp->b_datap->db_type) {
759 	default:
760 		putnext(q, mp);
761 		break;
762 
763 	case M_PROTO:
764 	case M_PCPROTO:
765 		ASSERT((mp->b_wptr - mp->b_rptr) >= sizeof (int32_t));
766 		pptr = (union T_primitives *)mp->b_rptr;
767 
768 		/*
769 		 * Forward this message to krpc if it is data.
770 		 */
771 		if (pptr->type == T_UNITDATA_IND) {
772 		    mblk_t *nmp;
773 
774 		/*
775 		 * Check if the module is being popped.
776 		 */
777 		    mutex_enter(&rmp->rm_lock);
778 		    if (rmp->rm_state & RM_CLOSING) {
779 			mutex_exit(&rmp->rm_lock);
780 			putnext(q, mp);
781 			break;
782 		    }
783 
784 		    switch (rmp->rm_type) {
785 		    case RPC_CLIENT:
786 			mutex_exit(&rmp->rm_lock);
787 			hdrsz = mp->b_wptr - mp->b_rptr;
788 
789 			/*
790 			 * Make sure the header is sane.
791 			 */
792 			if (hdrsz < TUNITDATAINDSZ ||
793 				hdrsz < (pptr->unitdata_ind.OPT_length +
794 					pptr->unitdata_ind.OPT_offset) ||
795 				hdrsz < (pptr->unitdata_ind.SRC_length +
796 					pptr->unitdata_ind.SRC_offset)) {
797 					freemsg(mp);
798 					return;
799 			}
800 
801 			/*
802 			 * Call clnt_clts_dispatch_notify, so that it can
803 			 * pass the message to the proper caller.  Don't
804 			 * discard the header just yet since the client may
805 			 * need the sender's address.
806 			 */
807 			clnt_clts_dispatch_notify(mp, hdrsz, rmp->rm_zoneid);
808 			return;
809 		    case RPC_SERVER:
810 			/*
811 			 * rm_krpc_cell is exclusively used by the kRPC
812 			 * CLTS server
813 			 */
814 			if (rmp->rm_krpc_cell) {
815 #ifdef DEBUG
816 				/*
817 				 * Test duplicate request cache and
818 				 * rm_ref count handling by sending a
819 				 * duplicate every so often, if
820 				 * desired.
821 				 */
822 				if (rpcmod_send_dup &&
823 				    rpcmod_send_dup_cnt++ %
824 				    rpcmod_send_dup)
825 					nmp = copymsg(mp);
826 				else
827 					nmp = NULL;
828 #endif
829 				/*
830 				 * Raise the reference count on this
831 				 * module to prevent it from being
832 				 * popped before krpc generates the
833 				 * reply.
834 				 */
835 				rmp->rm_ref++;
836 				mutex_exit(&rmp->rm_lock);
837 
838 				/*
839 				 * Submit the message to krpc.
840 				 */
841 				svc_queuereq(q, mp);
842 #ifdef DEBUG
843 				/*
844 				 * Send duplicate if we created one.
845 				 */
846 				if (nmp) {
847 					mutex_enter(&rmp->rm_lock);
848 					rmp->rm_ref++;
849 					mutex_exit(&rmp->rm_lock);
850 					svc_queuereq(q, nmp);
851 				}
852 #endif
853 			} else {
854 				mutex_exit(&rmp->rm_lock);
855 				freemsg(mp);
856 			}
857 			return;
858 		    default:
859 			mutex_exit(&rmp->rm_lock);
860 			freemsg(mp);
861 			return;
862 		    } /* end switch(rmp->rm_type) */
863 		} else if (pptr->type == T_UDERROR_IND) {
864 		    mutex_enter(&rmp->rm_lock);
865 		    hdrsz = mp->b_wptr - mp->b_rptr;
866 
867 		/*
868 		 * Make sure the header is sane
869 		 */
870 		    if (hdrsz < TUDERRORINDSZ ||
871 			hdrsz < (pptr->uderror_ind.OPT_length +
872 				pptr->uderror_ind.OPT_offset) ||
873 			hdrsz < (pptr->uderror_ind.DEST_length +
874 				pptr->uderror_ind.DEST_offset)) {
875 			    mutex_exit(&rmp->rm_lock);
876 			    freemsg(mp);
877 			    return;
878 		    }
879 
880 		/*
881 		 * In the case where a unit data error has been
882 		 * received, all we need to do is clear the message from
883 		 * the queue.
884 		 */
885 		    mutex_exit(&rmp->rm_lock);
886 		    freemsg(mp);
887 		    RPCLOG(32, "rpcmodrput: unitdata error received at "
888 				"%ld\n", gethrestime_sec());
889 		    return;
890 		} /* end else if (pptr->type == T_UDERROR_IND) */
891 
892 		putnext(q, mp);
893 		break;
894 	} /* end switch (mp->b_datap->db_type) */
895 
896 	TRACE_0(TR_FAC_KRPC, TR_RPCMODRPUT_END,
897 		"rpcmodrput_end:");
898 	/*
899 	 * Return codes are not looked at by the STREAMS framework.
900 	 */
901 }
902 
903 /*
904  * write put procedure
905  */
906 void
907 rpcmodwput(queue_t *q, mblk_t *mp)
908 {
909 	struct rpcm	*rmp;
910 
911 	ASSERT(q != NULL);
912 
913 	switch (mp->b_datap->db_type) {
914 	    case M_PROTO:
915 	    case M_PCPROTO:
916 		    break;
917 	    default:
918 		    rpcmodwput_other(q, mp);
919 		    return;
920 	}
921 
922 	/*
923 	 * Check to see if we can send the message downstream.
924 	 */
925 	if (canputnext(q)) {
926 		putnext(q, mp);
927 		return;
928 	}
929 
930 	rmp = (struct rpcm *)q->q_ptr;
931 	ASSERT(rmp != NULL);
932 
933 	/*
934 	 * The first canputnext failed.  Try again except this time with the
935 	 * lock held, so that we can check the state of the stream to see if
936 	 * it is closing.  If either of these conditions evaluate to true
937 	 * then send the meesage.
938 	 */
939 	mutex_enter(&rmp->rm_lock);
940 	if (canputnext(q) || (rmp->rm_state & RM_CLOSING)) {
941 		mutex_exit(&rmp->rm_lock);
942 		putnext(q, mp);
943 	} else {
944 		/*
945 		 * canputnext failed again and the stream is not closing.
946 		 * Place the message on the queue and let the service
947 		 * procedure handle the message.
948 		 */
949 		mutex_exit(&rmp->rm_lock);
950 		(void) putq(q, mp);
951 	}
952 }
953 
954 static void
955 rpcmodwput_other(queue_t *q, mblk_t *mp)
956 {
957 	struct rpcm	*rmp;
958 	struct iocblk	*iocp;
959 
960 	rmp = (struct rpcm *)q->q_ptr;
961 	ASSERT(rmp != NULL);
962 
963 	switch (mp->b_datap->db_type) {
964 		case M_IOCTL:
965 			iocp = (struct iocblk *)mp->b_rptr;
966 			ASSERT(iocp != NULL);
967 			switch (iocp->ioc_cmd) {
968 			    case RPC_CLIENT:
969 			    case RPC_SERVER:
970 				    mutex_enter(&rmp->rm_lock);
971 				    rmp->rm_type = iocp->ioc_cmd;
972 				    mutex_exit(&rmp->rm_lock);
973 				    mp->b_datap->db_type = M_IOCACK;
974 				    qreply(q, mp);
975 				    return;
976 			    default:
977 				/*
978 				 * pass the ioctl downstream and hope someone
979 				 * down there knows how to handle it.
980 				 */
981 				    putnext(q, mp);
982 				    return;
983 			}
984 		default:
985 			break;
986 	}
987 	/*
988 	 * This is something we definitely do not know how to handle, just
989 	 * pass the message downstream
990 	 */
991 	putnext(q, mp);
992 }
993 
994 /*
995  * Module write service procedure. This is called by downstream modules
996  * for back enabling during flow control.
997  */
998 void
999 rpcmodwsrv(queue_t *q)
1000 {
1001 	struct rpcm	*rmp;
1002 	mblk_t		*mp = NULL;
1003 
1004 	rmp = (struct rpcm *)q->q_ptr;
1005 	ASSERT(rmp != NULL);
1006 
1007 	/*
1008 	 * Get messages that may be queued and send them down stream
1009 	 */
1010 	while ((mp = getq(q)) != NULL) {
1011 		/*
1012 		 * Optimize the service procedure for the server-side, by
1013 		 * avoiding a call to canputnext().
1014 		 */
1015 		if (rmp->rm_type == RPC_SERVER || canputnext(q)) {
1016 			putnext(q, mp);
1017 			continue;
1018 		}
1019 		(void) putbq(q, mp);
1020 		return;
1021 	}
1022 }
1023 
1024 static void
1025 rpcmod_release(queue_t *q, mblk_t *bp)
1026 {
1027 	struct rpcm *rmp;
1028 
1029 	/*
1030 	 * For now, just free the message.
1031 	 */
1032 	if (bp)
1033 		freemsg(bp);
1034 	rmp = (struct rpcm *)q->q_ptr;
1035 
1036 	mutex_enter(&rmp->rm_lock);
1037 	rmp->rm_ref--;
1038 
1039 	if (rmp->rm_ref == 0 && (rmp->rm_state & RM_CLOSING)) {
1040 		cv_broadcast(&rmp->rm_cwait);
1041 	}
1042 
1043 	mutex_exit(&rmp->rm_lock);
1044 }
1045 
1046 /*
1047  * This part of rpcmod is pushed on a connection-oriented transport for use
1048  * by RPC.  It serves to bypass the Stream head, implements
1049  * the record marking protocol, and dispatches incoming RPC messages.
1050  */
1051 
1052 /* Default idle timer values */
1053 #define	MIR_CLNT_IDLE_TIMEOUT	(5 * (60 * 1000L))	/* 5 minutes */
1054 #define	MIR_SVC_IDLE_TIMEOUT	(6 * (60 * 1000L))	/* 6 minutes */
1055 #define	MIR_SVC_ORDREL_TIMEOUT	(10 * (60 * 1000L))	/* 10 minutes */
1056 #define	MIR_LASTFRAG	0x80000000	/* Record marker */
1057 
1058 #define	DLEN(mp) (mp->b_cont ? msgdsize(mp) : (mp->b_wptr - mp->b_rptr))
1059 
1060 #define	MIR_SVC_QUIESCED(mir)	\
1061 	(mir->mir_ref_cnt == 0 && mir->mir_inrservice == 0)
1062 
1063 #define	MIR_CLEAR_INRSRV(mir_ptr)	{	\
1064 	(mir_ptr)->mir_inrservice = 0;	\
1065 	if ((mir_ptr)->mir_type == RPC_SERVER &&	\
1066 		(mir_ptr)->mir_closing)	\
1067 		cv_signal(&(mir_ptr)->mir_condvar);	\
1068 }
1069 
1070 /*
1071  * Don't block service procedure (and mir_close) if
1072  * we are in the process of closing.
1073  */
1074 #define	MIR_WCANPUTNEXT(mir_ptr, write_q)	\
1075 	(canputnext(write_q) || ((mir_ptr)->mir_svc_no_more_msgs == 1))
1076 
1077 static int	mir_clnt_dup_request(queue_t *q, mblk_t *mp);
1078 static void	mir_rput_proto(queue_t *q, mblk_t *mp);
1079 static int	mir_svc_policy_notify(queue_t *q, int event);
1080 static void	mir_svc_release(queue_t *wq, mblk_t *mp);
1081 static void	mir_svc_start(queue_t *wq);
1082 static void	mir_svc_idle_start(queue_t *, mir_t *);
1083 static void	mir_svc_idle_stop(queue_t *, mir_t *);
1084 static void	mir_svc_start_close(queue_t *, mir_t *);
1085 static void	mir_clnt_idle_do_stop(queue_t *);
1086 static void	mir_clnt_idle_stop(queue_t *, mir_t *);
1087 static void	mir_clnt_idle_start(queue_t *, mir_t *);
1088 static void	mir_wput(queue_t *q, mblk_t *mp);
1089 static void	mir_wput_other(queue_t *q, mblk_t *mp);
1090 static void	mir_wsrv(queue_t *q);
1091 static	void	mir_disconnect(queue_t *, mir_t *ir);
1092 static	int	mir_check_len(queue_t *, int32_t, mblk_t *);
1093 static	void	mir_timer(void *);
1094 
1095 extern void	(*mir_rele)(queue_t *, mblk_t *);
1096 extern void	(*mir_start)(queue_t *);
1097 extern void	(*clnt_stop_idle)(queue_t *);
1098 
1099 clock_t	clnt_idle_timeout = MIR_CLNT_IDLE_TIMEOUT;
1100 clock_t	svc_idle_timeout = MIR_SVC_IDLE_TIMEOUT;
1101 
1102 /*
1103  * Timeout for subsequent notifications of idle connection.  This is
1104  * typically used to clean up after a wedged orderly release.
1105  */
1106 clock_t	svc_ordrel_timeout = MIR_SVC_ORDREL_TIMEOUT; /* milliseconds */
1107 
1108 extern	uint_t	*clnt_max_msg_sizep;
1109 extern	uint_t	*svc_max_msg_sizep;
1110 uint_t	clnt_max_msg_size = RPC_MAXDATASIZE;
1111 uint_t	svc_max_msg_size = RPC_MAXDATASIZE;
1112 uint_t	mir_krpc_cell_null;
1113 
1114 static void
1115 mir_timer_stop(mir_t *mir)
1116 {
1117 	timeout_id_t tid;
1118 
1119 	ASSERT(MUTEX_HELD(&mir->mir_mutex));
1120 
1121 	/*
1122 	 * Since the mir_mutex lock needs to be released to call
1123 	 * untimeout(), we need to make sure that no other thread
1124 	 * can start/stop the timer (changing mir_timer_id) during
1125 	 * that time.  The mir_timer_call bit and the mir_timer_cv
1126 	 * condition variable are used to synchronize this.  Setting
1127 	 * mir_timer_call also tells mir_timer() (refer to the comments
1128 	 * in mir_timer()) that it does not need to do anything.
1129 	 */
1130 	while (mir->mir_timer_call)
1131 		cv_wait(&mir->mir_timer_cv, &mir->mir_mutex);
1132 	mir->mir_timer_call = B_TRUE;
1133 
1134 	if ((tid = mir->mir_timer_id) != 0) {
1135 		mir->mir_timer_id = 0;
1136 		mutex_exit(&mir->mir_mutex);
1137 		(void) untimeout(tid);
1138 		mutex_enter(&mir->mir_mutex);
1139 	}
1140 	mir->mir_timer_call = B_FALSE;
1141 	cv_broadcast(&mir->mir_timer_cv);
1142 }
1143 
1144 static void
1145 mir_timer_start(queue_t *q, mir_t *mir, clock_t intrvl)
1146 {
1147 	timeout_id_t tid;
1148 
1149 	ASSERT(MUTEX_HELD(&mir->mir_mutex));
1150 
1151 	while (mir->mir_timer_call)
1152 		cv_wait(&mir->mir_timer_cv, &mir->mir_mutex);
1153 	mir->mir_timer_call = B_TRUE;
1154 
1155 	if ((tid = mir->mir_timer_id) != 0) {
1156 		mutex_exit(&mir->mir_mutex);
1157 		(void) untimeout(tid);
1158 		mutex_enter(&mir->mir_mutex);
1159 	}
1160 	/* Only start the timer when it is not closing. */
1161 	if (!mir->mir_closing) {
1162 		mir->mir_timer_id = timeout(mir_timer, q,
1163 		    MSEC_TO_TICK(intrvl));
1164 	}
1165 	mir->mir_timer_call = B_FALSE;
1166 	cv_broadcast(&mir->mir_timer_cv);
1167 }
1168 
1169 static int
1170 mir_clnt_dup_request(queue_t *q, mblk_t *mp)
1171 {
1172 	mblk_t  *mp1;
1173 	uint32_t  new_xid;
1174 	uint32_t  old_xid;
1175 
1176 	ASSERT(MUTEX_HELD(&((mir_t *)q->q_ptr)->mir_mutex));
1177 	new_xid = BE32_TO_U32(&mp->b_rptr[4]);
1178 	/*
1179 	 * This loop is a bit tacky -- it walks the STREAMS list of
1180 	 * flow-controlled messages.
1181 	 */
1182 	if ((mp1 = q->q_first) != NULL) {
1183 		do {
1184 			old_xid = BE32_TO_U32(&mp1->b_rptr[4]);
1185 			if (new_xid == old_xid)
1186 				return (1);
1187 		} while ((mp1 = mp1->b_next) != NULL);
1188 	}
1189 	return (0);
1190 }
1191 
1192 static int
1193 mir_close(queue_t *q)
1194 {
1195 	mir_t	*mir;
1196 	mblk_t	*mp;
1197 	bool_t queue_cleaned = FALSE;
1198 
1199 	RPCLOG(32, "rpcmod: mir_close of q 0x%p\n", (void *)q);
1200 	mir = (mir_t *)q->q_ptr;
1201 	ASSERT(MUTEX_NOT_HELD(&mir->mir_mutex));
1202 	mutex_enter(&mir->mir_mutex);
1203 	if ((mp = mir->mir_head_mp) != NULL) {
1204 		mir->mir_head_mp = (mblk_t *)0;
1205 		freemsg(mp);
1206 	}
1207 	/*
1208 	 * Set mir_closing so we get notified when MIR_SVC_QUIESCED()
1209 	 * is TRUE.  And mir_timer_start() won't start the timer again.
1210 	 */
1211 	mir->mir_closing = B_TRUE;
1212 	mir_timer_stop(mir);
1213 
1214 	if (mir->mir_type == RPC_SERVER) {
1215 		flushq(q, FLUSHDATA);	/* Ditch anything waiting on read q */
1216 
1217 		/*
1218 		 * This will prevent more requests from arriving and
1219 		 * will force rpcmod to ignore flow control.
1220 		 */
1221 		mir_svc_start_close(WR(q), mir);
1222 
1223 		while ((!MIR_SVC_QUIESCED(mir)) || mir->mir_inwservice == 1) {
1224 
1225 			if (mir->mir_ref_cnt && !mir->mir_inrservice &&
1226 					(queue_cleaned == FALSE)) {
1227 				/*
1228 				 * call into SVC to clean the queue
1229 				 */
1230 				mutex_exit(&mir->mir_mutex);
1231 				svc_queueclean(q);
1232 				queue_cleaned = TRUE;
1233 				mutex_enter(&mir->mir_mutex);
1234 				continue;
1235 			}
1236 
1237 			/*
1238 			 * Bugid 1253810 - Force the write service
1239 			 * procedure to send its messages, regardless
1240 			 * whether the downstream  module is ready
1241 			 * to accept data.
1242 			 */
1243 			if (mir->mir_inwservice == 1)
1244 				qenable(WR(q));
1245 
1246 			cv_wait(&mir->mir_condvar, &mir->mir_mutex);
1247 		}
1248 
1249 		mutex_exit(&mir->mir_mutex);
1250 		qprocsoff(q);
1251 
1252 		/* Notify KRPC that this stream is going away. */
1253 		svc_queueclose(q);
1254 	} else {
1255 		mutex_exit(&mir->mir_mutex);
1256 		qprocsoff(q);
1257 	}
1258 
1259 	mutex_destroy(&mir->mir_mutex);
1260 	cv_destroy(&mir->mir_condvar);
1261 	cv_destroy(&mir->mir_timer_cv);
1262 	kmem_free(mir, sizeof (mir_t));
1263 	return (0);
1264 }
1265 
1266 /*
1267  * This is server side only (RPC_SERVER).
1268  *
1269  * Exit idle mode.
1270  */
1271 static void
1272 mir_svc_idle_stop(queue_t *q, mir_t *mir)
1273 {
1274 	ASSERT(MUTEX_HELD(&mir->mir_mutex));
1275 	ASSERT((q->q_flag & QREADR) == 0);
1276 	ASSERT(mir->mir_type == RPC_SERVER);
1277 	RPCLOG(16, "rpcmod: mir_svc_idle_stop of q 0x%p\n", (void *)q);
1278 
1279 	mir_timer_stop(mir);
1280 }
1281 
1282 /*
1283  * This is server side only (RPC_SERVER).
1284  *
1285  * Start idle processing, which will include setting idle timer if the
1286  * stream is not being closed.
1287  */
1288 static void
1289 mir_svc_idle_start(queue_t *q, mir_t *mir)
1290 {
1291 	ASSERT(MUTEX_HELD(&mir->mir_mutex));
1292 	ASSERT((q->q_flag & QREADR) == 0);
1293 	ASSERT(mir->mir_type == RPC_SERVER);
1294 	RPCLOG(16, "rpcmod: mir_svc_idle_start q 0x%p\n", (void *)q);
1295 
1296 	/*
1297 	 * Don't re-start idle timer if we are closing queues.
1298 	 */
1299 	if (mir->mir_closing) {
1300 		RPCLOG(16, "mir_svc_idle_start - closing: 0x%p\n",
1301 			(void *)q);
1302 
1303 		/*
1304 		 * We will call mir_svc_idle_start() whenever MIR_SVC_QUIESCED()
1305 		 * is true.  When it is true, and we are in the process of
1306 		 * closing the stream, signal any thread waiting in
1307 		 * mir_close().
1308 		 */
1309 		if (mir->mir_inwservice == 0)
1310 			cv_signal(&mir->mir_condvar);
1311 
1312 	} else {
1313 		RPCLOG(16, "mir_svc_idle_start - reset %s timer\n",
1314 			mir->mir_ordrel_pending ? "ordrel" : "normal");
1315 		/*
1316 		 * Normal condition, start the idle timer.  If an orderly
1317 		 * release has been sent, set the timeout to wait for the
1318 		 * client to close its side of the connection.  Otherwise,
1319 		 * use the normal idle timeout.
1320 		 */
1321 		mir_timer_start(q, mir, mir->mir_ordrel_pending ?
1322 		    svc_ordrel_timeout : mir->mir_idle_timeout);
1323 	}
1324 }
1325 
1326 /* ARGSUSED */
1327 static int
1328 mir_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
1329 {
1330 	mir_t	*mir;
1331 
1332 	RPCLOG(32, "rpcmod: mir_open of q 0x%p\n", (void *)q);
1333 	/* Set variables used directly by KRPC. */
1334 	if (!mir_rele)
1335 		mir_rele = mir_svc_release;
1336 	if (!mir_start)
1337 		mir_start = mir_svc_start;
1338 	if (!clnt_stop_idle)
1339 		clnt_stop_idle = mir_clnt_idle_do_stop;
1340 	if (!clnt_max_msg_sizep)
1341 		clnt_max_msg_sizep = &clnt_max_msg_size;
1342 	if (!svc_max_msg_sizep)
1343 		svc_max_msg_sizep = &svc_max_msg_size;
1344 
1345 	/* Allocate a zero'ed out mir structure for this stream. */
1346 	mir = kmem_zalloc(sizeof (mir_t), KM_SLEEP);
1347 
1348 	/*
1349 	 * We set hold inbound here so that incoming messages will
1350 	 * be held on the read-side queue until the stream is completely
1351 	 * initialized with a RPC_CLIENT or RPC_SERVER ioctl.  During
1352 	 * the ioctl processing, the flag is cleared and any messages that
1353 	 * arrived between the open and the ioctl are delivered to KRPC.
1354 	 *
1355 	 * Early data should never arrive on a client stream since
1356 	 * servers only respond to our requests and we do not send any.
1357 	 * until after the stream is initialized.  Early data is
1358 	 * very common on a server stream where the client will start
1359 	 * sending data as soon as the connection is made (and this
1360 	 * is especially true with TCP where the protocol accepts the
1361 	 * connection before nfsd or KRPC is notified about it).
1362 	 */
1363 
1364 	mir->mir_hold_inbound = 1;
1365 
1366 	/*
1367 	 * Start the record marker looking for a 4-byte header.  When
1368 	 * this length is negative, it indicates that rpcmod is looking
1369 	 * for bytes to consume for the record marker header.  When it
1370 	 * is positive, it holds the number of bytes that have arrived
1371 	 * for the current fragment and are being held in mir_header_mp.
1372 	 */
1373 
1374 	mir->mir_frag_len = -(int32_t)sizeof (uint32_t);
1375 
1376 	mir->mir_zoneid = rpc_zoneid();
1377 	mutex_init(&mir->mir_mutex, NULL, MUTEX_DEFAULT, NULL);
1378 	cv_init(&mir->mir_condvar, NULL, CV_DRIVER, NULL);
1379 	cv_init(&mir->mir_timer_cv, NULL, CV_DRIVER, NULL);
1380 
1381 	q->q_ptr = (char *)mir;
1382 	WR(q)->q_ptr = (char *)mir;
1383 
1384 	/*
1385 	 * We noenable the read-side queue because we don't want it
1386 	 * automatically enabled by putq.  We enable it explicitly
1387 	 * in mir_wsrv when appropriate. (See additional comments on
1388 	 * flow control at the beginning of mir_rsrv.)
1389 	 */
1390 	noenable(q);
1391 
1392 	qprocson(q);
1393 	return (0);
1394 }
1395 
1396 /*
1397  * Read-side put routine for both the client and server side.  Does the
1398  * record marking for incoming RPC messages, and when complete, dispatches
1399  * the message to either the client or server.
1400  */
1401 static void
1402 mir_do_rput(queue_t *q, mblk_t *mp, int srv)
1403 {
1404 	mblk_t	*cont_mp;
1405 	int	excess;
1406 	int32_t	frag_len;
1407 	int32_t	frag_header;
1408 	mblk_t	*head_mp;
1409 	int	len;
1410 	mir_t	*mir;
1411 	mblk_t	*mp1;
1412 	unsigned char	*rptr;
1413 	mblk_t	*tail_mp;
1414 	unsigned char	*wptr;
1415 	boolean_t	stop_timer = B_FALSE;
1416 
1417 	mir = (mir_t *)q->q_ptr;
1418 	ASSERT(mir != NULL);
1419 
1420 	/*
1421 	 * If the stream has not been set up as a RPC_CLIENT or RPC_SERVER
1422 	 * with the corresponding ioctl, then don't accept
1423 	 * any inbound data.  This should never happen for streams
1424 	 * created by nfsd or client-side KRPC because they are careful
1425 	 * to set the mode of the stream before doing anything else.
1426 	 */
1427 	if (mir->mir_type == 0) {
1428 		freemsg(mp);
1429 		return;
1430 	}
1431 
1432 	ASSERT(MUTEX_NOT_HELD(&mir->mir_mutex));
1433 
1434 	switch (mp->b_datap->db_type) {
1435 	case M_DATA:
1436 		break;
1437 	case M_PROTO:
1438 	case M_PCPROTO:
1439 		rptr = mp->b_rptr;
1440 		if (mp->b_wptr - rptr < sizeof (uint32_t)) {
1441 			RPCLOG(1, "mir_rput: runt TPI message (%d bytes)\n",
1442 			    (int)(mp->b_wptr - rptr));
1443 			freemsg(mp);
1444 			return;
1445 		}
1446 		if (((union T_primitives *)rptr)->type != T_DATA_IND) {
1447 			mir_rput_proto(q, mp);
1448 			return;
1449 		}
1450 
1451 		/* Throw away the T_DATA_IND block and continue with data. */
1452 		mp1 = mp;
1453 		mp = mp->b_cont;
1454 		freeb(mp1);
1455 		break;
1456 	case M_SETOPTS:
1457 		/*
1458 		 * If a module on the stream is trying set the Stream head's
1459 		 * high water mark, then set our hiwater to the requested
1460 		 * value.  We are the "stream head" for all inbound
1461 		 * data messages since messages are passed directly to KRPC.
1462 		 */
1463 		if ((mp->b_wptr - mp->b_rptr) >= sizeof (struct stroptions)) {
1464 			struct stroptions	*stropts;
1465 
1466 			stropts = (struct stroptions *)mp->b_rptr;
1467 			if ((stropts->so_flags & SO_HIWAT) &&
1468 				!(stropts->so_flags & SO_BAND)) {
1469 				(void) strqset(q, QHIWAT, 0, stropts->so_hiwat);
1470 			}
1471 		}
1472 		putnext(q, mp);
1473 		return;
1474 	case M_FLUSH:
1475 		RPCLOG(32, "mir_do_rput: ignoring M_FLUSH on q 0x%p. ",
1476 		    (void *)q);
1477 		RPCLOG(32, "M_FLUSH is %x\n", (uint_t)*mp->b_rptr);
1478 
1479 		putnext(q, mp);
1480 		return;
1481 	default:
1482 		putnext(q, mp);
1483 		return;
1484 	}
1485 
1486 	mutex_enter(&mir->mir_mutex);
1487 
1488 	/*
1489 	 * If this connection is closing, don't accept any new messages.
1490 	 */
1491 	if (mir->mir_svc_no_more_msgs) {
1492 		ASSERT(mir->mir_type == RPC_SERVER);
1493 		mutex_exit(&mir->mir_mutex);
1494 		freemsg(mp);
1495 		return;
1496 	}
1497 
1498 	/* Get local copies for quicker access. */
1499 	frag_len = mir->mir_frag_len;
1500 	frag_header = mir->mir_frag_header;
1501 	head_mp = mir->mir_head_mp;
1502 	tail_mp = mir->mir_tail_mp;
1503 
1504 	/* Loop, processing each message block in the mp chain separately. */
1505 	do {
1506 		/*
1507 		 * cont_mp is used in the do/while condition below to
1508 		 * walk to the next block in the STREAMS message.
1509 		 * mp->b_cont may be nil'ed during processing so we
1510 		 * can't rely on it to find the next block.
1511 		 */
1512 		cont_mp = mp->b_cont;
1513 
1514 		/*
1515 		 * Get local copies of rptr and wptr for our processing.
1516 		 * These always point into "mp" (the current block being
1517 		 * processed), but rptr is updated as we consume any
1518 		 * record header in this message, and wptr is updated to
1519 		 * point to the end of the data for the current fragment,
1520 		 * if it ends in this block.  The main point is that
1521 		 * they are not always the same as b_rptr and b_wptr.
1522 		 * b_rptr and b_wptr will be updated when appropriate.
1523 		 */
1524 		rptr = mp->b_rptr;
1525 		wptr = mp->b_wptr;
1526 same_mblk:;
1527 		len = (int)(wptr - rptr);
1528 		if (len <= 0) {
1529 			/*
1530 			 * If we have processed all of the data in the message
1531 			 * or the block is empty to begin with, then we're
1532 			 * done with this block and can go on to cont_mp,
1533 			 * if there is one.
1534 			 *
1535 			 * First, we check to see if the current block is
1536 			 * now zero-length and, if so, we free it.
1537 			 * This happens when either the block was empty
1538 			 * to begin with or we consumed all of the data
1539 			 * for the record marking header.
1540 			 */
1541 			if (rptr <= mp->b_rptr) {
1542 				/*
1543 				 * If head_mp is non-NULL, add cont_mp to the
1544 				 * mblk list. XXX But there is a possibility
1545 				 * that tail_mp = mp or even head_mp = mp XXX
1546 				 */
1547 				if (head_mp) {
1548 					if (head_mp == mp)
1549 						head_mp = NULL;
1550 					else if (tail_mp != mp) {
1551 		ASSERT((tail_mp->b_cont == NULL) || (tail_mp->b_cont == mp));
1552 						tail_mp->b_cont = cont_mp;
1553 						/*
1554 						 * It's possible that, because
1555 						 * of a very short mblk (0-3
1556 						 * bytes), we've ended up here
1557 						 * and that cont_mp could be
1558 						 * NULL (if we're at the end
1559 						 * of an mblk chain). If so,
1560 						 * don't set tail_mp to
1561 						 * cont_mp, because the next
1562 						 * time we access it, we'll
1563 						 * dereference a NULL pointer
1564 						 * and crash. Just leave
1565 						 * tail_mp pointing at the
1566 						 * current end of chain.
1567 						 */
1568 						if (cont_mp)
1569 							tail_mp = cont_mp;
1570 					} else {
1571 						mblk_t *smp = head_mp;
1572 
1573 						while ((smp->b_cont != NULL) &&
1574 							(smp->b_cont != mp))
1575 							smp = smp->b_cont;
1576 						smp->b_cont = cont_mp;
1577 						/*
1578 						 * Don't set tail_mp to cont_mp
1579 						 * if it's NULL. Instead, set
1580 						 * tail_mp to smp, which is the
1581 						 * end of the chain starting
1582 						 * at head_mp.
1583 						 */
1584 						if (cont_mp)
1585 							tail_mp = cont_mp;
1586 						else
1587 							tail_mp = smp;
1588 					}
1589 				}
1590 				freeb(mp);
1591 			}
1592 			continue;
1593 		}
1594 
1595 		/*
1596 		 * frag_len starts at -4 and is incremented past the record
1597 		 * marking header to 0, and then becomes positive as real data
1598 		 * bytes are received for the message.  While frag_len is less
1599 		 * than zero, we need more bytes for the record marking
1600 		 * header.
1601 		 */
1602 		if (frag_len < 0) {
1603 			uchar_t	*up = rptr;
1604 			/*
1605 			 * Collect as many bytes as we need for the record
1606 			 * marking header and that are available in this block.
1607 			 */
1608 			do {
1609 				--len;
1610 				frag_len++;
1611 				frag_header <<= 8;
1612 				frag_header += (*up++ & 0xFF);
1613 			} while (len > 0 && frag_len < 0);
1614 
1615 			if (rptr == mp->b_rptr) {
1616 				/*
1617 				 * The record header is located at the
1618 				 * beginning of the block, so just walk
1619 				 * b_rptr past it.
1620 				 */
1621 				mp->b_rptr = rptr = up;
1622 			} else {
1623 				/*
1624 				 * The record header is located in the middle
1625 				 * of a block, so copy any remaining data up.
1626 				 * This happens when an RPC message is
1627 				 * fragmented into multiple pieces and
1628 				 * a middle (or end) fragment immediately
1629 				 * follows a previous fragment in the same
1630 				 * message block.
1631 				 */
1632 				wptr = &rptr[len];
1633 				mp->b_wptr = wptr;
1634 				if (len) {
1635 					RPCLOG(32, "mir_do_rput: copying %d "
1636 					    "bytes of data up", len);
1637 					RPCLOG(32, " db_ref %d\n",
1638 					    (uint_t)mp->b_datap->db_ref);
1639 					bcopy(up, rptr, len);
1640 				}
1641 			}
1642 
1643 			/*
1644 			 * If we haven't received the complete record header
1645 			 * yet, then loop around to get the next block in the
1646 			 * STREAMS message. The logic at same_mblk label will
1647 			 * free the current block if it has become empty.
1648 			 */
1649 			if (frag_len < 0) {
1650 				RPCLOG(32, "mir_do_rput: frag_len is still < 0 "
1651 				"(%d)", len);
1652 				goto same_mblk;
1653 			}
1654 
1655 #ifdef	RPCDEBUG
1656 			if ((frag_header & MIR_LASTFRAG) == 0) {
1657 				RPCLOG0(32, "mir_do_rput: multi-fragment "
1658 				    "record\n");
1659 			}
1660 			{
1661 				uint_t l = frag_header & ~MIR_LASTFRAG;
1662 
1663 				if (l != 0 && mir->mir_max_msg_sizep &&
1664 				    l >= *mir->mir_max_msg_sizep) {
1665 					RPCLOG(32, "mir_do_rput: fragment size"
1666 					    " (%d) > maximum", l);
1667 					RPCLOG(32, " (%u)\n",
1668 					    *mir->mir_max_msg_sizep);
1669 				}
1670 			}
1671 #endif
1672 			/*
1673 			 * At this point we have retrieved the complete record
1674 			 * header for this fragment.  If the current block is
1675 			 * empty, then we need to free it and walk to the next
1676 			 * block.
1677 			 */
1678 			if (mp->b_rptr >= wptr) {
1679 				/*
1680 				 * If this is not the last fragment or if we
1681 				 * have not received all the data for this
1682 				 * RPC message, then loop around to the next
1683 				 * block.
1684 				 */
1685 				if (!(frag_header & MIR_LASTFRAG) ||
1686 					(frag_len -
1687 					(frag_header & ~MIR_LASTFRAG)) ||
1688 					!head_mp)
1689 					goto same_mblk;
1690 
1691 				/*
1692 				 * Quick walk to next block in the
1693 				 * STREAMS message.
1694 				 */
1695 				freeb(mp);
1696 				continue;
1697 			}
1698 		}
1699 
1700 		/*
1701 		 * We've collected the complete record header.  The data
1702 		 * in the current block is added to the end of the RPC
1703 		 * message.  Note that tail_mp is the same as mp after
1704 		 * this linkage.
1705 		 */
1706 		if (!head_mp)
1707 			head_mp = mp;
1708 		else if (tail_mp != mp) {
1709 			ASSERT((tail_mp->b_cont == NULL) ||
1710 			    (tail_mp->b_cont == mp));
1711 			tail_mp->b_cont = mp;
1712 		}
1713 		tail_mp = mp;
1714 
1715 		/*
1716 		 * Add the length of this block to the accumulated
1717 		 * fragment length.
1718 		 */
1719 		frag_len += len;
1720 		excess = frag_len - (frag_header & ~MIR_LASTFRAG);
1721 		/*
1722 		 * If we have not received all the data for this fragment,
1723 		 * then walk to the next block.
1724 		 */
1725 		if (excess < 0)
1726 			continue;
1727 
1728 		/*
1729 		 * We've received a complete fragment, so reset frag_len
1730 		 * for the next one.
1731 		 */
1732 		frag_len = -(int32_t)sizeof (uint32_t);
1733 
1734 		/*
1735 		 * Update rptr to point to the beginning of the next
1736 		 * fragment in this block.  If there are no more bytes
1737 		 * in the block (excess is 0), then rptr will be equal
1738 		 * to wptr.
1739 		 */
1740 		rptr = wptr - excess;
1741 
1742 		/*
1743 		 * Now we check to see if this fragment is the last one in
1744 		 * the RPC message.
1745 		 */
1746 		if (!(frag_header & MIR_LASTFRAG)) {
1747 			/*
1748 			 * This isn't the last one, so start processing the
1749 			 * next fragment.
1750 			 */
1751 			frag_header = 0;
1752 
1753 			/*
1754 			 * If excess is 0, the next fragment
1755 			 * starts at the beginning of the next block --
1756 			 * we "continue" to the end of the while loop and
1757 			 * walk to cont_mp.
1758 			 */
1759 			if (excess == 0)
1760 				continue;
1761 			RPCLOG0(32, "mir_do_rput: multi-fragment message with "
1762 			    "two or more fragments in one mblk\n");
1763 
1764 			/*
1765 			 * If excess is non-0, then the next fragment starts
1766 			 * in this block.  rptr points to the beginning
1767 			 * of the next fragment and we "goto same_mblk"
1768 			 * to continue processing.
1769 			 */
1770 			goto same_mblk;
1771 		}
1772 
1773 		/*
1774 		 * We've got a complete RPC message.  Before passing it
1775 		 * upstream, check to see if there is extra data in this
1776 		 * message block. If so, then we separate the excess
1777 		 * from the complete message. The excess data is processed
1778 		 * after the current message goes upstream.
1779 		 */
1780 		if (excess > 0) {
1781 			RPCLOG(32, "mir_do_rput: end of record, but excess "
1782 			    "data (%d bytes) in this mblk. dupb/copyb "
1783 			    "needed\n", excess);
1784 
1785 			/* Duplicate only the overlapping block. */
1786 			mp1 = dupb(tail_mp);
1787 
1788 			/*
1789 			 * dupb() might have failed due to ref count wrap around
1790 			 * so try a copyb().
1791 			 */
1792 			if (mp1 == NULL)
1793 				mp1 = copyb(tail_mp);
1794 
1795 			/*
1796 			 * Do not use bufcall() to schedule a "buffer
1797 			 * availability event."  The reason is that
1798 			 * bufcall() has problems.  For example, if memory
1799 			 * runs out, bufcall() itself will fail since it
1800 			 * needs to allocate memory.  The most appropriate
1801 			 * action right now is to disconnect this connection
1802 			 * as the system is under stress.  We should try to
1803 			 * free up resources.
1804 			 */
1805 			if (mp1 == NULL) {
1806 				freemsg(head_mp);
1807 				RPCLOG0(1, "mir_do_rput: dupb/copyb failed\n");
1808 				mir->mir_frag_header = 0;
1809 				mir->mir_frag_len = -(int)sizeof (uint32_t);
1810 				mir->mir_head_mp = NULL;
1811 				mir->mir_tail_mp = NULL;
1812 
1813 				mir_disconnect(q, mir);
1814 				return;
1815 			}
1816 
1817 			/*
1818 			 * The new message block is linked with the
1819 			 * continuation block in cont_mp.  We then point
1820 			 * cont_mp to the new block so that we will
1821 			 * process it next.
1822 			 */
1823 			mp1->b_cont = cont_mp;
1824 			cont_mp = mp1;
1825 			/*
1826 			 * Data in the new block begins at the
1827 			 * next fragment (rptr).
1828 			 */
1829 			cont_mp->b_rptr += (rptr - tail_mp->b_rptr);
1830 			ASSERT(cont_mp->b_rptr >= cont_mp->b_datap->db_base);
1831 			ASSERT(cont_mp->b_rptr <= cont_mp->b_wptr);
1832 
1833 			/* Data in the current fragment ends at rptr. */
1834 			tail_mp->b_wptr = rptr;
1835 			ASSERT(tail_mp->b_wptr <= tail_mp->b_datap->db_lim);
1836 			ASSERT(tail_mp->b_wptr >= tail_mp->b_rptr);
1837 
1838 		}
1839 
1840 		/* tail_mp is the last block with data for this RPC message. */
1841 		tail_mp->b_cont = NULL;
1842 
1843 		/* Pass the RPC message to the current consumer. */
1844 		switch (mir->mir_type) {
1845 		case RPC_CLIENT:
1846 			if (clnt_dispatch_notify(head_mp, mir->mir_zoneid)) {
1847 				/*
1848 				 * Mark this stream as active.  This marker
1849 				 * is used in mir_timer().
1850 				 */
1851 
1852 				mir->mir_clntreq = 1;
1853 				mir->mir_use_timestamp = lbolt;
1854 			} else
1855 				freemsg(head_mp);
1856 			break;
1857 
1858 		case RPC_SERVER:
1859 			/*
1860 			 * Check for flow control before passing the
1861 			 * message to KRPC.
1862 			 */
1863 
1864 			if (!mir->mir_hold_inbound) {
1865 			    if (mir->mir_krpc_cell) {
1866 				/*
1867 				 * If the reference count is 0
1868 				 * (not including this request),
1869 				 * then the stream is transitioning
1870 				 * from idle to non-idle.  In this case,
1871 				 * we cancel the idle timer.
1872 				 */
1873 				if (mir->mir_ref_cnt++ == 0)
1874 					stop_timer = B_TRUE;
1875 				if (mir_check_len(q,
1876 					(int32_t)msgdsize(mp), mp))
1877 						return;
1878 				svc_queuereq(q, head_mp); /* to KRPC */
1879 			    } else {
1880 				/*
1881 				 * Count # of times this happens. Should be
1882 				 * never, but experience shows otherwise.
1883 				 */
1884 				mir_krpc_cell_null++;
1885 				freemsg(head_mp);
1886 			    }
1887 
1888 			} else {
1889 				/*
1890 				 * If the outbound side of the stream is
1891 				 * flow controlled, then hold this message
1892 				 * until client catches up. mir_hold_inbound
1893 				 * is set in mir_wput and cleared in mir_wsrv.
1894 				 */
1895 				if (srv)
1896 					(void) putbq(q, head_mp);
1897 				else
1898 					(void) putq(q, head_mp);
1899 				mir->mir_inrservice = B_TRUE;
1900 			}
1901 			break;
1902 		default:
1903 			RPCLOG(1, "mir_rput: unknown mir_type %d\n",
1904 				mir->mir_type);
1905 			freemsg(head_mp);
1906 			break;
1907 		}
1908 
1909 		/*
1910 		 * Reset head_mp and frag_header since we're starting on a
1911 		 * new RPC fragment and message.
1912 		 */
1913 		head_mp = NULL;
1914 		tail_mp = NULL;
1915 		frag_header = 0;
1916 	} while ((mp = cont_mp) != NULL);
1917 
1918 	/*
1919 	 * Do a sanity check on the message length.  If this message is
1920 	 * getting excessively large, shut down the connection.
1921 	 */
1922 	if (head_mp != NULL && mir->mir_setup_complete &&
1923 		mir_check_len(q, frag_len, head_mp))
1924 		return;
1925 
1926 	/* Save our local copies back in the mir structure. */
1927 	mir->mir_frag_header = frag_header;
1928 	mir->mir_frag_len = frag_len;
1929 	mir->mir_head_mp = head_mp;
1930 	mir->mir_tail_mp = tail_mp;
1931 
1932 	/*
1933 	 * The timer is stopped after the whole message chain is processed.
1934 	 * The reason is that stopping the timer releases the mir_mutex
1935 	 * lock temporarily.  This means that the request can be serviced
1936 	 * while we are still processing the message chain.  This is not
1937 	 * good.  So we stop the timer here instead.
1938 	 *
1939 	 * Note that if the timer fires before we stop it, it will not
1940 	 * do any harm as MIR_SVC_QUIESCED() is false and mir_timer()
1941 	 * will just return;
1942 	 */
1943 	if (stop_timer) {
1944 		RPCLOG(16, "mir_do_rput stopping idle timer on 0x%p because "
1945 		    "ref cnt going to non zero\n", (void *) WR(q));
1946 		mir_svc_idle_stop(WR(q), mir);
1947 	}
1948 	mutex_exit(&mir->mir_mutex);
1949 }
1950 
1951 static void
1952 mir_rput(queue_t *q, mblk_t *mp)
1953 {
1954 	mir_do_rput(q, mp, 0);
1955 }
1956 
1957 static void
1958 mir_rput_proto(queue_t *q, mblk_t *mp)
1959 {
1960 	mir_t	*mir = (mir_t *)q->q_ptr;
1961 	uint32_t	type;
1962 	uint32_t reason = 0;
1963 
1964 	ASSERT(MUTEX_NOT_HELD(&mir->mir_mutex));
1965 
1966 	type = ((union T_primitives *)mp->b_rptr)->type;
1967 	switch (mir->mir_type) {
1968 	case RPC_CLIENT:
1969 		switch (type) {
1970 		case T_DISCON_IND:
1971 		    reason =
1972 			((struct T_discon_ind *)(mp->b_rptr))->DISCON_reason;
1973 		    /*FALLTHROUGH*/
1974 		case T_ORDREL_IND:
1975 			mutex_enter(&mir->mir_mutex);
1976 			if (mir->mir_head_mp) {
1977 				freemsg(mir->mir_head_mp);
1978 				mir->mir_head_mp = (mblk_t *)0;
1979 				mir->mir_tail_mp = (mblk_t *)0;
1980 			}
1981 			/*
1982 			 * We are disconnecting, but not necessarily
1983 			 * closing. By not closing, we will fail to
1984 			 * pick up a possibly changed global timeout value,
1985 			 * unless we store it now.
1986 			 */
1987 			mir->mir_idle_timeout = clnt_idle_timeout;
1988 			mir_clnt_idle_stop(WR(q), mir);
1989 
1990 			/*
1991 			 * Even though we are unconnected, we still
1992 			 * leave the idle timer going on the client. The
1993 			 * reason for is that if we've disconnected due
1994 			 * to a server-side disconnect, reset, or connection
1995 			 * timeout, there is a possibility the client may
1996 			 * retry the RPC request. This retry needs to done on
1997 			 * the same bound address for the server to interpret
1998 			 * it as such. However, we don't want
1999 			 * to wait forever for that possibility. If the
2000 			 * end-point stays unconnected for mir_idle_timeout
2001 			 * units of time, then that is a signal to the
2002 			 * connection manager to give up waiting for the
2003 			 * application (eg. NFS) to send a retry.
2004 			 */
2005 			mir_clnt_idle_start(WR(q), mir);
2006 			mutex_exit(&mir->mir_mutex);
2007 			clnt_dispatch_notifyall(WR(q), type, reason);
2008 			freemsg(mp);
2009 			return;
2010 		case T_ERROR_ACK:
2011 		{
2012 			struct T_error_ack	*terror;
2013 
2014 			terror = (struct T_error_ack *)mp->b_rptr;
2015 			RPCLOG(1, "mir_rput_proto T_ERROR_ACK for queue 0x%p",
2016 				(void *)q);
2017 			RPCLOG(1, " ERROR_prim: %s,",
2018 				rpc_tpiprim2name(terror->ERROR_prim));
2019 			RPCLOG(1, " TLI_error: %s,",
2020 				rpc_tpierr2name(terror->TLI_error));
2021 			RPCLOG(1, " UNIX_error: %d\n", terror->UNIX_error);
2022 			if (terror->ERROR_prim == T_DISCON_REQ)  {
2023 				clnt_dispatch_notifyall(WR(q), type, reason);
2024 				freemsg(mp);
2025 				return;
2026 			} else {
2027 				if (clnt_dispatch_notifyconn(WR(q), mp))
2028 					return;
2029 			}
2030 			break;
2031 		}
2032 		case T_OK_ACK:
2033 		{
2034 			struct T_ok_ack	*tok = (struct T_ok_ack *)mp->b_rptr;
2035 
2036 			if (tok->CORRECT_prim == T_DISCON_REQ) {
2037 				clnt_dispatch_notifyall(WR(q), type, reason);
2038 				freemsg(mp);
2039 				return;
2040 			} else {
2041 				if (clnt_dispatch_notifyconn(WR(q), mp))
2042 					return;
2043 			}
2044 			break;
2045 		}
2046 		case T_CONN_CON:
2047 		case T_INFO_ACK:
2048 		case T_OPTMGMT_ACK:
2049 			if (clnt_dispatch_notifyconn(WR(q), mp))
2050 				return;
2051 			break;
2052 		case T_BIND_ACK:
2053 			break;
2054 		default:
2055 			RPCLOG(1, "mir_rput: unexpected message %d "
2056 			    "for KRPC client\n",
2057 			    ((union T_primitives *)mp->b_rptr)->type);
2058 			break;
2059 		}
2060 		break;
2061 
2062 	case RPC_SERVER:
2063 		switch (type) {
2064 		case T_BIND_ACK:
2065 		{
2066 			struct T_bind_ack	*tbind;
2067 
2068 			/*
2069 			 * If this is a listening stream, then shut
2070 			 * off the idle timer.
2071 			 */
2072 			tbind = (struct T_bind_ack *)mp->b_rptr;
2073 			if (tbind->CONIND_number > 0) {
2074 				mutex_enter(&mir->mir_mutex);
2075 				mir_svc_idle_stop(WR(q), mir);
2076 
2077 				/*
2078 				 * mark this as a listen endpoint
2079 				 * for special handling.
2080 				 */
2081 
2082 				mir->mir_listen_stream = 1;
2083 				mutex_exit(&mir->mir_mutex);
2084 			}
2085 			break;
2086 		}
2087 		case T_DISCON_IND:
2088 		case T_ORDREL_IND:
2089 			RPCLOG(16, "mir_rput_proto: got %s indication\n",
2090 				type == T_DISCON_IND ? "disconnect"
2091 				: "orderly release");
2092 
2093 			/*
2094 			 * For listen endpoint just pass
2095 			 * on the message.
2096 			 */
2097 
2098 			if (mir->mir_listen_stream)
2099 				break;
2100 
2101 			mutex_enter(&mir->mir_mutex);
2102 
2103 			/*
2104 			 * If client wants to break off connection, record
2105 			 * that fact.
2106 			 */
2107 			mir_svc_start_close(WR(q), mir);
2108 
2109 			/*
2110 			 * If we are idle, then send the orderly release
2111 			 * or disconnect indication to nfsd.
2112 			 */
2113 			if (MIR_SVC_QUIESCED(mir)) {
2114 				mutex_exit(&mir->mir_mutex);
2115 				break;
2116 			}
2117 
2118 			RPCLOG(16, "mir_rput_proto: not idle, so "
2119 				"disconnect/ord rel indication not passed "
2120 				"upstream on 0x%p\n", (void *)q);
2121 
2122 			/*
2123 			 * Hold the indication until we get idle
2124 			 * If there already is an indication stored,
2125 			 * replace it if the new one is a disconnect. The
2126 			 * reasoning is that disconnection takes less time
2127 			 * to process, and once a client decides to
2128 			 * disconnect, we should do that.
2129 			 */
2130 			if (mir->mir_svc_pend_mp) {
2131 				if (type == T_DISCON_IND) {
2132 					RPCLOG(16, "mir_rput_proto: replacing"
2133 					    " held disconnect/ord rel"
2134 					    " indication with disconnect on"
2135 					    " 0x%p\n", (void *)q);
2136 
2137 					freemsg(mir->mir_svc_pend_mp);
2138 					mir->mir_svc_pend_mp = mp;
2139 				} else {
2140 					RPCLOG(16, "mir_rput_proto: already "
2141 					    "held a disconnect/ord rel "
2142 					    "indication. freeing ord rel "
2143 					    "ind on 0x%p\n", (void *)q);
2144 					freemsg(mp);
2145 				}
2146 			} else
2147 				mir->mir_svc_pend_mp = mp;
2148 
2149 			mutex_exit(&mir->mir_mutex);
2150 			return;
2151 
2152 		default:
2153 			/* nfsd handles server-side non-data messages. */
2154 			break;
2155 		}
2156 		break;
2157 
2158 	default:
2159 		break;
2160 	}
2161 
2162 	putnext(q, mp);
2163 }
2164 
2165 /*
2166  * The server-side read queues are used to hold inbound messages while
2167  * outbound flow control is exerted.  When outbound flow control is
2168  * relieved, mir_wsrv qenables the read-side queue.  Read-side queues
2169  * are not enabled by STREAMS and are explicitly noenable'ed in mir_open.
2170  *
2171  * For the server side,  we have two types of messages queued. The first type
2172  * are messages that are ready to be XDR decoded and and then sent to the
2173  * RPC program's dispatch routine. The second type are "raw" messages that
2174  * haven't been processed, i.e. assembled from rpc record fragements into
2175  * full requests. The only time we will see the second type of message
2176  * queued is if we have a memory allocation failure while processing a
2177  * a raw message. The field mir_first_non_processed_mblk will mark the
2178  * first such raw message. So the flow for server side is:
2179  *
2180  *	- send processed queued messages to kRPC until we run out or find
2181  *	  one that needs additional processing because we were short on memory
2182  *	  earlier
2183  *	- process a message that was deferred because of lack of
2184  *	  memory
2185  *	- continue processing messages until the queue empties or we
2186  *	  have to stop because of lack of memory
2187  *	- during each of the above phase, if the queue is empty and
2188  *	  there are no pending messages that were passed to the RPC
2189  *	  layer, send upstream the pending disconnect/ordrel indication if
2190  *	  there is one
2191  *
2192  * The read-side queue is also enabled by a bufcall callback if dupmsg
2193  * fails in mir_rput.
2194  */
2195 static void
2196 mir_rsrv(queue_t *q)
2197 {
2198 	mir_t	*mir;
2199 	mblk_t	*mp;
2200 	mblk_t	*cmp = NULL;
2201 	boolean_t stop_timer = B_FALSE;
2202 
2203 	mir = (mir_t *)q->q_ptr;
2204 	mutex_enter(&mir->mir_mutex);
2205 
2206 	mp = NULL;
2207 	switch (mir->mir_type) {
2208 	case RPC_SERVER:
2209 		if (mir->mir_ref_cnt == 0)
2210 			mir->mir_hold_inbound = 0;
2211 		if (mir->mir_hold_inbound) {
2212 
2213 			ASSERT(cmp == NULL);
2214 			if (q->q_first == NULL) {
2215 
2216 				MIR_CLEAR_INRSRV(mir);
2217 
2218 				if (MIR_SVC_QUIESCED(mir)) {
2219 					cmp = mir->mir_svc_pend_mp;
2220 					mir->mir_svc_pend_mp = NULL;
2221 				}
2222 			}
2223 
2224 			mutex_exit(&mir->mir_mutex);
2225 
2226 			if (cmp != NULL) {
2227 				RPCLOG(16, "mir_rsrv: line %d: sending a held "
2228 				    "disconnect/ord rel indication upstream\n",
2229 				    __LINE__);
2230 				putnext(q, cmp);
2231 			}
2232 
2233 			return;
2234 		}
2235 		while (mp = getq(q)) {
2236 			if (mir->mir_krpc_cell) {
2237 				/*
2238 				 * If we were idle, turn off idle timer since
2239 				 * we aren't idle any more.
2240 				 */
2241 				if (mir->mir_ref_cnt++ == 0)
2242 					stop_timer = B_TRUE;
2243 				if (mir_check_len(q,
2244 					(int32_t)msgdsize(mp), mp))
2245 						return;
2246 				svc_queuereq(q, mp);
2247 			} else {
2248 				/*
2249 				 * Count # of times this happens. Should be
2250 				 * never, but experience shows otherwise.
2251 				 */
2252 				mir_krpc_cell_null++;
2253 				freemsg(mp);
2254 			}
2255 		}
2256 		break;
2257 	case RPC_CLIENT:
2258 		break;
2259 	default:
2260 		RPCLOG(1, "mir_rsrv: unexpected mir_type %d\n", mir->mir_type);
2261 
2262 		if (q->q_first == NULL)
2263 			MIR_CLEAR_INRSRV(mir);
2264 
2265 		mutex_exit(&mir->mir_mutex);
2266 
2267 		return;
2268 	}
2269 
2270 	/*
2271 	 * The timer is stopped after all the messages are processed.
2272 	 * The reason is that stopping the timer releases the mir_mutex
2273 	 * lock temporarily.  This means that the request can be serviced
2274 	 * while we are still processing the message queue.  This is not
2275 	 * good.  So we stop the timer here instead.
2276 	 */
2277 	if (stop_timer)  {
2278 		RPCLOG(16, "mir_rsrv stopping idle timer on 0x%p because ref "
2279 		    "cnt going to non zero\n", (void *)WR(q));
2280 		mir_svc_idle_stop(WR(q), mir);
2281 	}
2282 
2283 	if (q->q_first == NULL) {
2284 
2285 		MIR_CLEAR_INRSRV(mir);
2286 
2287 		ASSERT(cmp == NULL);
2288 		if (mir->mir_type == RPC_SERVER && MIR_SVC_QUIESCED(mir)) {
2289 			cmp = mir->mir_svc_pend_mp;
2290 			mir->mir_svc_pend_mp = NULL;
2291 		}
2292 
2293 		mutex_exit(&mir->mir_mutex);
2294 
2295 		if (cmp != NULL) {
2296 			RPCLOG(16, "mir_rsrv: line %d: sending a held "
2297 				"disconnect/ord rel indication upstream\n",
2298 				__LINE__);
2299 			putnext(q, cmp);
2300 		}
2301 
2302 		return;
2303 	}
2304 	mutex_exit(&mir->mir_mutex);
2305 }
2306 
2307 static int mir_svc_policy_fails;
2308 
2309 /*
2310  * Called to send an event code to nfsd/lockd so that it initiates
2311  * connection close.
2312  */
2313 static int
2314 mir_svc_policy_notify(queue_t *q, int event)
2315 {
2316 	mblk_t	*mp;
2317 #ifdef DEBUG
2318 	mir_t *mir = (mir_t *)q->q_ptr;
2319 	ASSERT(MUTEX_NOT_HELD(&mir->mir_mutex));
2320 #endif
2321 	ASSERT(q->q_flag & QREADR);
2322 
2323 	/*
2324 	 * Create an M_DATA message with the event code and pass it to the
2325 	 * Stream head (nfsd or whoever created the stream will consume it).
2326 	 */
2327 	mp = allocb(sizeof (int), BPRI_HI);
2328 
2329 	if (!mp) {
2330 
2331 		mir_svc_policy_fails++;
2332 		RPCLOG(16, "mir_svc_policy_notify: could not allocate event "
2333 			"%d\n", event);
2334 		return (ENOMEM);
2335 	}
2336 
2337 	U32_TO_BE32(event, mp->b_rptr);
2338 	mp->b_wptr = mp->b_rptr + sizeof (int);
2339 	putnext(q, mp);
2340 	return (0);
2341 }
2342 
2343 /*
2344  * Server side: start the close phase. We want to get this rpcmod slot in an
2345  * idle state before mir_close() is called.
2346  */
2347 static void
2348 mir_svc_start_close(queue_t *wq, mir_t *mir)
2349 {
2350 	ASSERT(MUTEX_HELD(&mir->mir_mutex));
2351 	ASSERT((wq->q_flag & QREADR) == 0);
2352 	ASSERT(mir->mir_type == RPC_SERVER);
2353 
2354 
2355 	/*
2356 	 * Do not accept any more messages.
2357 	 */
2358 	mir->mir_svc_no_more_msgs = 1;
2359 
2360 	/*
2361 	 * Next two statements will make the read service procedure invoke
2362 	 * svc_queuereq() on everything stuck in the streams read queue.
2363 	 * It's not necessary because enabling the write queue will
2364 	 * have the same effect, but why not speed the process along?
2365 	 */
2366 	mir->mir_hold_inbound = 0;
2367 	qenable(RD(wq));
2368 
2369 	/*
2370 	 * Meanwhile force the write service procedure to send the
2371 	 * responses downstream, regardless of flow control.
2372 	 */
2373 	qenable(wq);
2374 }
2375 
2376 /*
2377  * This routine is called directly by KRPC after a request is completed,
2378  * whether a reply was sent or the request was dropped.
2379  */
2380 static void
2381 mir_svc_release(queue_t *wq, mblk_t *mp)
2382 {
2383 	mir_t   *mir = (mir_t *)wq->q_ptr;
2384 	mblk_t	*cmp = NULL;
2385 
2386 	ASSERT((wq->q_flag & QREADR) == 0);
2387 	if (mp)
2388 		freemsg(mp);
2389 
2390 	mutex_enter(&mir->mir_mutex);
2391 
2392 	/*
2393 	 * Start idle processing if this is the last reference.
2394 	 */
2395 	if ((mir->mir_ref_cnt == 1) && (mir->mir_inrservice == 0)) {
2396 
2397 		RPCLOG(16, "mir_svc_release starting idle timer on 0x%p "
2398 		    "because ref cnt is zero\n", (void *) wq);
2399 
2400 		cmp = mir->mir_svc_pend_mp;
2401 		mir->mir_svc_pend_mp = NULL;
2402 		mir_svc_idle_start(wq, mir);
2403 	}
2404 
2405 	mir->mir_ref_cnt--;
2406 	ASSERT(mir->mir_ref_cnt >= 0);
2407 
2408 	/*
2409 	 * Wake up the thread waiting to close.
2410 	 */
2411 
2412 	if ((mir->mir_ref_cnt == 0) && mir->mir_closing)
2413 		cv_signal(&mir->mir_condvar);
2414 
2415 	mutex_exit(&mir->mir_mutex);
2416 
2417 	if (cmp) {
2418 		RPCLOG(16, "mir_svc_release: sending a held "
2419 		    "disconnect/ord rel indication upstream on queue 0x%p\n",
2420 		    (void *)RD(wq));
2421 
2422 		putnext(RD(wq), cmp);
2423 	}
2424 }
2425 
2426 /*
2427  * This routine is called by server-side KRPC when it is ready to
2428  * handle inbound messages on the stream.
2429  */
2430 static void
2431 mir_svc_start(queue_t *wq)
2432 {
2433 	mir_t   *mir = (mir_t *)wq->q_ptr;
2434 
2435 	/*
2436 	 * no longer need to take the mir_mutex because the
2437 	 * mir_setup_complete field has been moved out of
2438 	 * the binary field protected by the mir_mutex.
2439 	 */
2440 
2441 	mir->mir_setup_complete = 1;
2442 	qenable(RD(wq));
2443 }
2444 
2445 /*
2446  * client side wrapper for stopping timer with normal idle timeout.
2447  */
2448 static void
2449 mir_clnt_idle_stop(queue_t *wq, mir_t *mir)
2450 {
2451 	ASSERT(MUTEX_HELD(&mir->mir_mutex));
2452 	ASSERT((wq->q_flag & QREADR) == 0);
2453 	ASSERT(mir->mir_type == RPC_CLIENT);
2454 
2455 	mir_timer_stop(mir);
2456 }
2457 
2458 /*
2459  * client side wrapper for stopping timer with normal idle timeout.
2460  */
2461 static void
2462 mir_clnt_idle_start(queue_t *wq, mir_t *mir)
2463 {
2464 	ASSERT(MUTEX_HELD(&mir->mir_mutex));
2465 	ASSERT((wq->q_flag & QREADR) == 0);
2466 	ASSERT(mir->mir_type == RPC_CLIENT);
2467 
2468 	mir_timer_start(wq, mir, mir->mir_idle_timeout);
2469 }
2470 
2471 /*
2472  * client side only. Forces rpcmod to stop sending T_ORDREL_REQs on
2473  * end-points that aren't connected.
2474  */
2475 static void
2476 mir_clnt_idle_do_stop(queue_t *wq)
2477 {
2478 	mir_t   *mir = (mir_t *)wq->q_ptr;
2479 
2480 	RPCLOG(1, "mir_clnt_idle_do_stop: wq 0x%p\n", (void *)wq);
2481 	ASSERT(MUTEX_NOT_HELD(&mir->mir_mutex));
2482 	mutex_enter(&mir->mir_mutex);
2483 	mir_clnt_idle_stop(wq, mir);
2484 	mutex_exit(&mir->mir_mutex);
2485 }
2486 
2487 /*
2488  * Timer handler.  It handles idle timeout and memory shortage problem.
2489  */
2490 static void
2491 mir_timer(void *arg)
2492 {
2493 	queue_t *wq = (queue_t *)arg;
2494 	mir_t *mir = (mir_t *)wq->q_ptr;
2495 	boolean_t notify;
2496 
2497 	mutex_enter(&mir->mir_mutex);
2498 
2499 	/*
2500 	 * mir_timer_call is set only when either mir_timer_[start|stop]
2501 	 * is progressing.  And mir_timer() can only be run while they
2502 	 * are progressing if the timer is being stopped.  So just
2503 	 * return.
2504 	 */
2505 	if (mir->mir_timer_call) {
2506 		mutex_exit(&mir->mir_mutex);
2507 		return;
2508 	}
2509 	mir->mir_timer_id = 0;
2510 
2511 	switch (mir->mir_type) {
2512 	case RPC_CLIENT:
2513 
2514 		/*
2515 		 * For clients, the timer fires at clnt_idle_timeout
2516 		 * intervals.  If the activity marker (mir_clntreq) is
2517 		 * zero, then the stream has been idle since the last
2518 		 * timer event and we notify KRPC.  If mir_clntreq is
2519 		 * non-zero, then the stream is active and we just
2520 		 * restart the timer for another interval.  mir_clntreq
2521 		 * is set to 1 in mir_wput for every request passed
2522 		 * downstream.
2523 		 *
2524 		 * If this was a memory shortage timer reset the idle
2525 		 * timeout regardless; the mir_clntreq will not be a
2526 		 * valid indicator.
2527 		 *
2528 		 * The timer is initially started in mir_wput during
2529 		 * RPC_CLIENT ioctl processing.
2530 		 *
2531 		 * The timer interval can be changed for individual
2532 		 * streams with the ND variable "mir_idle_timeout".
2533 		 */
2534 		if (mir->mir_clntreq > 0 && mir->mir_use_timestamp +
2535 		    MSEC_TO_TICK(mir->mir_idle_timeout) - lbolt >= 0) {
2536 			clock_t tout;
2537 
2538 			tout = mir->mir_idle_timeout -
2539 				TICK_TO_MSEC(lbolt - mir->mir_use_timestamp);
2540 			if (tout < 0)
2541 				tout = 1000;
2542 #if 0
2543 printf("mir_timer[%d < %d + %d]: reset client timer to %d (ms)\n",
2544 TICK_TO_MSEC(lbolt), TICK_TO_MSEC(mir->mir_use_timestamp),
2545 mir->mir_idle_timeout, tout);
2546 #endif
2547 			mir->mir_clntreq = 0;
2548 			mir_timer_start(wq, mir, tout);
2549 			mutex_exit(&mir->mir_mutex);
2550 			return;
2551 		}
2552 #if 0
2553 printf("mir_timer[%d]: doing client timeout\n", lbolt / hz);
2554 #endif
2555 		/*
2556 		 * We are disconnecting, but not necessarily
2557 		 * closing. By not closing, we will fail to
2558 		 * pick up a possibly changed global timeout value,
2559 		 * unless we store it now.
2560 		 */
2561 		mir->mir_idle_timeout = clnt_idle_timeout;
2562 		mir_clnt_idle_start(wq, mir);
2563 
2564 		mutex_exit(&mir->mir_mutex);
2565 		/*
2566 		 * We pass T_ORDREL_REQ as an integer value
2567 		 * to KRPC as the indication that the stream
2568 		 * is idle.  This is not a T_ORDREL_REQ message,
2569 		 * it is just a convenient value since we call
2570 		 * the same KRPC routine for T_ORDREL_INDs and
2571 		 * T_DISCON_INDs.
2572 		 */
2573 		clnt_dispatch_notifyall(wq, T_ORDREL_REQ, 0);
2574 		return;
2575 
2576 	case RPC_SERVER:
2577 
2578 		/*
2579 		 * For servers, the timer is only running when the stream
2580 		 * is really idle or memory is short.  The timer is started
2581 		 * by mir_wput when mir_type is set to RPC_SERVER and
2582 		 * by mir_svc_idle_start whenever the stream goes idle
2583 		 * (mir_ref_cnt == 0).  The timer is cancelled in
2584 		 * mir_rput whenever a new inbound request is passed to KRPC
2585 		 * and the stream was previously idle.
2586 		 *
2587 		 * The timer interval can be changed for individual
2588 		 * streams with the ND variable "mir_idle_timeout".
2589 		 *
2590 		 * If the stream is not idle do nothing.
2591 		 */
2592 		if (!MIR_SVC_QUIESCED(mir)) {
2593 			mutex_exit(&mir->mir_mutex);
2594 			return;
2595 		}
2596 
2597 		notify = !mir->mir_inrservice;
2598 		mutex_exit(&mir->mir_mutex);
2599 
2600 		/*
2601 		 * If there is no packet queued up in read queue, the stream
2602 		 * is really idle so notify nfsd to close it.
2603 		 */
2604 		if (notify) {
2605 			RPCLOG(16, "mir_timer: telling stream head listener "
2606 			    "to close stream (0x%p)\n", (void *) RD(wq));
2607 			(void) mir_svc_policy_notify(RD(wq), 1);
2608 		}
2609 		return;
2610 	default:
2611 		RPCLOG(1, "mir_timer: unexpected mir_type %d\n",
2612 			mir->mir_type);
2613 		mutex_exit(&mir->mir_mutex);
2614 		return;
2615 	}
2616 }
2617 
2618 /*
2619  * Called by the RPC package to send either a call or a return, or a
2620  * transport connection request.  Adds the record marking header.
2621  */
2622 static void
2623 mir_wput(queue_t *q, mblk_t *mp)
2624 {
2625 	uint_t	frag_header;
2626 	mir_t	*mir = (mir_t *)q->q_ptr;
2627 	uchar_t	*rptr = mp->b_rptr;
2628 
2629 	if (!mir) {
2630 		freemsg(mp);
2631 		return;
2632 	}
2633 
2634 	if (mp->b_datap->db_type != M_DATA) {
2635 		mir_wput_other(q, mp);
2636 		return;
2637 	}
2638 
2639 	if (mir->mir_ordrel_pending == 1) {
2640 		freemsg(mp);
2641 		RPCLOG(16, "mir_wput wq 0x%p: got data after T_ORDREL_REQ\n",
2642 			(void *)q);
2643 		return;
2644 	}
2645 
2646 	frag_header = (uint_t)DLEN(mp);
2647 	frag_header |= MIR_LASTFRAG;
2648 
2649 	/* Stick in the 4 byte record marking header. */
2650 	if ((rptr - mp->b_datap->db_base) < sizeof (uint32_t) ||
2651 	    !IS_P2ALIGNED(mp->b_rptr, sizeof (uint32_t))) {
2652 		/*
2653 		 * Since we know that M_DATA messages are created exclusively
2654 		 * by KRPC, we expect that KRPC will leave room for our header
2655 		 * and 4 byte align which is normal for XDR.
2656 		 * If KRPC (or someone else) does not cooperate, then we
2657 		 * just throw away the message.
2658 		 */
2659 		RPCLOG(1, "mir_wput: KRPC did not leave space for record "
2660 		    "fragment header (%d bytes left)\n",
2661 		    (int)(rptr - mp->b_datap->db_base));
2662 		freemsg(mp);
2663 		return;
2664 	}
2665 	rptr -= sizeof (uint32_t);
2666 	*(uint32_t *)rptr = htonl(frag_header);
2667 	mp->b_rptr = rptr;
2668 
2669 	mutex_enter(&mir->mir_mutex);
2670 	if (mir->mir_type == RPC_CLIENT) {
2671 		/*
2672 		 * For the client, set mir_clntreq to indicate that the
2673 		 * connection is active.
2674 		 */
2675 		mir->mir_clntreq = 1;
2676 		mir->mir_use_timestamp = lbolt;
2677 	}
2678 
2679 	/*
2680 	 * If we haven't already queued some data and the downstream module
2681 	 * can accept more data, send it on, otherwise we queue the message
2682 	 * and take other actions depending on mir_type.
2683 	 */
2684 	if (!mir->mir_inwservice && MIR_WCANPUTNEXT(mir, q)) {
2685 		mutex_exit(&mir->mir_mutex);
2686 
2687 		/*
2688 		 * Now we pass the RPC message downstream.
2689 		 */
2690 		putnext(q, mp);
2691 		return;
2692 	}
2693 
2694 	switch (mir->mir_type) {
2695 	case RPC_CLIENT:
2696 		/*
2697 		 * Check for a previous duplicate request on the
2698 		 * queue.  If there is one, then we throw away
2699 		 * the current message and let the previous one
2700 		 * go through.  If we can't find a duplicate, then
2701 		 * send this one.  This tap dance is an effort
2702 		 * to reduce traffic and processing requirements
2703 		 * under load conditions.
2704 		 */
2705 		if (mir_clnt_dup_request(q, mp)) {
2706 			mutex_exit(&mir->mir_mutex);
2707 			freemsg(mp);
2708 			return;
2709 		}
2710 		break;
2711 	case RPC_SERVER:
2712 		/*
2713 		 * Set mir_hold_inbound so that new inbound RPC
2714 		 * messages will be held until the client catches
2715 		 * up on the earlier replies.  This flag is cleared
2716 		 * in mir_wsrv after flow control is relieved;
2717 		 * the read-side queue is also enabled at that time.
2718 		 */
2719 		mir->mir_hold_inbound = 1;
2720 		break;
2721 	default:
2722 		RPCLOG(1, "mir_wput: unexpected mir_type %d\n", mir->mir_type);
2723 		break;
2724 	}
2725 	mir->mir_inwservice = 1;
2726 	(void) putq(q, mp);
2727 	mutex_exit(&mir->mir_mutex);
2728 }
2729 
2730 static void
2731 mir_wput_other(queue_t *q, mblk_t *mp)
2732 {
2733 	mir_t	*mir = (mir_t *)q->q_ptr;
2734 	struct iocblk	*iocp;
2735 	uchar_t	*rptr = mp->b_rptr;
2736 	bool_t	flush_in_svc = FALSE;
2737 
2738 	ASSERT(MUTEX_NOT_HELD(&mir->mir_mutex));
2739 	switch (mp->b_datap->db_type) {
2740 	case M_IOCTL:
2741 		iocp = (struct iocblk *)rptr;
2742 		switch (iocp->ioc_cmd) {
2743 		case RPC_CLIENT:
2744 			mutex_enter(&mir->mir_mutex);
2745 			if (mir->mir_type != 0 &&
2746 			    mir->mir_type != iocp->ioc_cmd) {
2747 ioc_eperm:
2748 				mutex_exit(&mir->mir_mutex);
2749 				iocp->ioc_error = EPERM;
2750 				iocp->ioc_count = 0;
2751 				mp->b_datap->db_type = M_IOCACK;
2752 				qreply(q, mp);
2753 				return;
2754 			}
2755 
2756 			mir->mir_type = iocp->ioc_cmd;
2757 
2758 			/*
2759 			 * Clear mir_hold_inbound which was set to 1 by
2760 			 * mir_open.  This flag is not used on client
2761 			 * streams.
2762 			 */
2763 			mir->mir_hold_inbound = 0;
2764 			mir->mir_max_msg_sizep = &clnt_max_msg_size;
2765 
2766 			/*
2767 			 * Start the idle timer.  See mir_timer() for more
2768 			 * information on how client timers work.
2769 			 */
2770 			mir->mir_idle_timeout = clnt_idle_timeout;
2771 			mir_clnt_idle_start(q, mir);
2772 			mutex_exit(&mir->mir_mutex);
2773 
2774 			mp->b_datap->db_type = M_IOCACK;
2775 			qreply(q, mp);
2776 			return;
2777 		case RPC_SERVER:
2778 			mutex_enter(&mir->mir_mutex);
2779 			if (mir->mir_type != 0 &&
2780 			    mir->mir_type != iocp->ioc_cmd)
2781 				goto ioc_eperm;
2782 
2783 			/*
2784 			 * We don't clear mir_hold_inbound here because
2785 			 * mir_hold_inbound is used in the flow control
2786 			 * model. If we cleared it here, then we'd commit
2787 			 * a small violation to the model where the transport
2788 			 * might immediately block downstream flow.
2789 			 */
2790 
2791 			mir->mir_type = iocp->ioc_cmd;
2792 			mir->mir_max_msg_sizep = &svc_max_msg_size;
2793 
2794 			/*
2795 			 * Start the idle timer.  See mir_timer() for more
2796 			 * information on how server timers work.
2797 			 *
2798 			 * Note that it is important to start the idle timer
2799 			 * here so that connections time out even if we
2800 			 * never receive any data on them.
2801 			 */
2802 			mir->mir_idle_timeout = svc_idle_timeout;
2803 			RPCLOG(16, "mir_wput_other starting idle timer on 0x%p "
2804 			    "because we got RPC_SERVER ioctl\n", (void *)q);
2805 			mir_svc_idle_start(q, mir);
2806 			mutex_exit(&mir->mir_mutex);
2807 
2808 			mp->b_datap->db_type = M_IOCACK;
2809 			qreply(q, mp);
2810 			return;
2811 		default:
2812 			break;
2813 		}
2814 		break;
2815 
2816 	case M_PROTO:
2817 		if (mir->mir_type == RPC_CLIENT) {
2818 			/*
2819 			 * We are likely being called from the context of a
2820 			 * service procedure. So we need to enqueue. However
2821 			 * enqueing may put our message behind data messages.
2822 			 * So flush the data first.
2823 			 */
2824 			flush_in_svc = TRUE;
2825 		}
2826 		if ((mp->b_wptr - rptr) < sizeof (uint32_t) ||
2827 				!IS_P2ALIGNED(rptr, sizeof (uint32_t)))
2828 			break;
2829 
2830 		switch (((union T_primitives *)rptr)->type) {
2831 		case T_DATA_REQ:
2832 			/* Don't pass T_DATA_REQ messages downstream. */
2833 			freemsg(mp);
2834 			return;
2835 		case T_ORDREL_REQ:
2836 			RPCLOG(8, "mir_wput_other wq 0x%p: got T_ORDREL_REQ\n",
2837 			    (void *)q);
2838 			mutex_enter(&mir->mir_mutex);
2839 			if (mir->mir_type != RPC_SERVER) {
2840 				/*
2841 				 * We are likely being called from
2842 				 * clnt_dispatch_notifyall(). Sending
2843 				 * a T_ORDREL_REQ will result in
2844 				 * a some kind of _IND message being sent,
2845 				 * will be another call to
2846 				 * clnt_dispatch_notifyall(). To keep the stack
2847 				 * lean, queue this message.
2848 				 */
2849 				mir->mir_inwservice = 1;
2850 				(void) putq(q, mp);
2851 				mutex_exit(&mir->mir_mutex);
2852 				return;
2853 			}
2854 
2855 			/*
2856 			 * Mark the structure such that we don't accept any
2857 			 * more requests from client. We could defer this
2858 			 * until we actually send the orderly release
2859 			 * request downstream, but all that does is delay
2860 			 * the closing of this stream.
2861 			 */
2862 			RPCLOG(16, "mir_wput_other wq 0x%p: got T_ORDREL_REQ "
2863 			    " so calling mir_svc_start_close\n", (void *)q);
2864 
2865 			mir_svc_start_close(q, mir);
2866 
2867 			/*
2868 			 * If we have sent down a T_ORDREL_REQ, don't send
2869 			 * any more.
2870 			 */
2871 			if (mir->mir_ordrel_pending) {
2872 				freemsg(mp);
2873 				mutex_exit(&mir->mir_mutex);
2874 				return;
2875 			}
2876 
2877 			/*
2878 			 * If the stream is not idle, then we hold the
2879 			 * orderly release until it becomes idle.  This
2880 			 * ensures that KRPC will be able to reply to
2881 			 * all requests that we have passed to it.
2882 			 *
2883 			 * We also queue the request if there is data already
2884 			 * queued, because we cannot allow the T_ORDREL_REQ
2885 			 * to go before data. When we had a separate reply
2886 			 * count, this was not a problem, because the
2887 			 * reply count was reconciled when mir_wsrv()
2888 			 * completed.
2889 			 */
2890 			if (!MIR_SVC_QUIESCED(mir) ||
2891 			    mir->mir_inwservice == 1) {
2892 				mir->mir_inwservice = 1;
2893 				(void) putq(q, mp);
2894 
2895 				RPCLOG(16, "mir_wput_other: queuing "
2896 				    "T_ORDREL_REQ on 0x%p\n", (void *)q);
2897 
2898 				mutex_exit(&mir->mir_mutex);
2899 				return;
2900 			}
2901 
2902 			/*
2903 			 * Mark the structure so that we know we sent
2904 			 * an orderly release request, and reset the idle timer.
2905 			 */
2906 			mir->mir_ordrel_pending = 1;
2907 
2908 			RPCLOG(16, "mir_wput_other: calling mir_svc_idle_start"
2909 			    " on 0x%p because we got T_ORDREL_REQ\n",
2910 			    (void *)q);
2911 
2912 			mir_svc_idle_start(q, mir);
2913 			mutex_exit(&mir->mir_mutex);
2914 
2915 			/*
2916 			 * When we break, we will putnext the T_ORDREL_REQ.
2917 			 */
2918 			break;
2919 
2920 		case T_CONN_REQ:
2921 			mutex_enter(&mir->mir_mutex);
2922 			if (mir->mir_head_mp != NULL) {
2923 				freemsg(mir->mir_head_mp);
2924 				mir->mir_head_mp = NULL;
2925 				mir->mir_tail_mp = NULL;
2926 			}
2927 			mir->mir_frag_len = -(int32_t)sizeof (uint32_t);
2928 			/*
2929 			 * Restart timer in case mir_clnt_idle_do_stop() was
2930 			 * called.
2931 			 */
2932 			mir->mir_idle_timeout = clnt_idle_timeout;
2933 			mir_clnt_idle_stop(q, mir);
2934 			mir_clnt_idle_start(q, mir);
2935 			mutex_exit(&mir->mir_mutex);
2936 			break;
2937 
2938 		default:
2939 			/*
2940 			 * T_DISCON_REQ is one of the interesting default
2941 			 * cases here. Ideally, an M_FLUSH is done before
2942 			 * T_DISCON_REQ is done. However, that is somewhat
2943 			 * cumbersome for clnt_cots.c to do. So we queue
2944 			 * T_DISCON_REQ, and let the service procedure
2945 			 * flush all M_DATA.
2946 			 */
2947 			break;
2948 		}
2949 		/* fallthru */;
2950 	default:
2951 		if (mp->b_datap->db_type >= QPCTL) {
2952 			if (mp->b_datap->db_type == M_FLUSH) {
2953 				if (mir->mir_type == RPC_CLIENT &&
2954 				    *mp->b_rptr & FLUSHW) {
2955 					RPCLOG(32, "mir_wput_other: flushing "
2956 					    "wq 0x%p\n", (void *)q);
2957 					if (*mp->b_rptr & FLUSHBAND) {
2958 						flushband(q, *(mp->b_rptr + 1),
2959 							FLUSHDATA);
2960 					} else {
2961 						flushq(q, FLUSHDATA);
2962 					}
2963 				} else {
2964 					RPCLOG(32, "mir_wput_other: ignoring "
2965 					    "M_FLUSH on wq 0x%p\n", (void *)q);
2966 				}
2967 			}
2968 			break;
2969 		}
2970 
2971 		mutex_enter(&mir->mir_mutex);
2972 		if (mir->mir_inwservice == 0 && MIR_WCANPUTNEXT(mir, q)) {
2973 			mutex_exit(&mir->mir_mutex);
2974 			break;
2975 		}
2976 		mir->mir_inwservice = 1;
2977 		mir->mir_inwflushdata = flush_in_svc;
2978 		(void) putq(q, mp);
2979 		mutex_exit(&mir->mir_mutex);
2980 		qenable(q);
2981 
2982 		return;
2983 	}
2984 	putnext(q, mp);
2985 }
2986 
2987 static void
2988 mir_wsrv(queue_t *q)
2989 {
2990 	mblk_t	*mp;
2991 	mir_t	*mir;
2992 	bool_t flushdata;
2993 
2994 	mir = (mir_t *)q->q_ptr;
2995 	mutex_enter(&mir->mir_mutex);
2996 
2997 	flushdata = mir->mir_inwflushdata;
2998 	mir->mir_inwflushdata = 0;
2999 
3000 	while (mp = getq(q)) {
3001 		if (mp->b_datap->db_type == M_DATA) {
3002 			/*
3003 			 * Do not send any more data if we have sent
3004 			 * a T_ORDREL_REQ.
3005 			 */
3006 			if (flushdata || mir->mir_ordrel_pending == 1) {
3007 				freemsg(mp);
3008 				continue;
3009 			}
3010 
3011 			/*
3012 			 * Make sure that the stream can really handle more
3013 			 * data.
3014 			 */
3015 			if (!MIR_WCANPUTNEXT(mir, q)) {
3016 				(void) putbq(q, mp);
3017 				mutex_exit(&mir->mir_mutex);
3018 				return;
3019 			}
3020 
3021 			/*
3022 			 * Now we pass the RPC message downstream.
3023 			 */
3024 			mutex_exit(&mir->mir_mutex);
3025 			putnext(q, mp);
3026 			mutex_enter(&mir->mir_mutex);
3027 			continue;
3028 		}
3029 
3030 		/*
3031 		 * This is not an RPC message, pass it downstream
3032 		 * (ignoring flow control) if the server side is not sending a
3033 		 * T_ORDREL_REQ downstream.
3034 		 */
3035 		if (mir->mir_type != RPC_SERVER ||
3036 			    ((union T_primitives *)mp->b_rptr)->type !=
3037 			    T_ORDREL_REQ) {
3038 			mutex_exit(&mir->mir_mutex);
3039 			putnext(q, mp);
3040 			mutex_enter(&mir->mir_mutex);
3041 			continue;
3042 		}
3043 
3044 		if (mir->mir_ordrel_pending == 1) {
3045 			/*
3046 			 * Don't send two T_ORDRELs
3047 			 */
3048 			freemsg(mp);
3049 			continue;
3050 		}
3051 
3052 		/*
3053 		 * Mark the structure so that we know we sent an orderly
3054 		 * release request.  We will check to see slot is idle at the
3055 		 * end of this routine, and if so, reset the idle timer to
3056 		 * handle orderly release timeouts.
3057 		 */
3058 		mir->mir_ordrel_pending = 1;
3059 		RPCLOG(16, "mir_wsrv: sending ordrel req on q 0x%p\n",
3060 								(void *)q);
3061 		/*
3062 		 * Send the orderly release downstream. If there are other
3063 		 * pending replies we won't be able to send them.  However,
3064 		 * the only reason we should send the orderly release is if
3065 		 * we were idle, or if an unusual event occurred.
3066 		 */
3067 		mutex_exit(&mir->mir_mutex);
3068 		putnext(q, mp);
3069 		mutex_enter(&mir->mir_mutex);
3070 	}
3071 
3072 	if (q->q_first == NULL)
3073 		/*
3074 		 * If we call mir_svc_idle_start() below, then
3075 		 * clearing mir_inwservice here will also result in
3076 		 * any thread waiting in mir_close() to be signaled.
3077 		 */
3078 		mir->mir_inwservice = 0;
3079 
3080 	if (mir->mir_type != RPC_SERVER) {
3081 		mutex_exit(&mir->mir_mutex);
3082 		return;
3083 	}
3084 
3085 	/*
3086 	 * If idle we call mir_svc_idle_start to start the timer (or wakeup
3087 	 * a close). Also make sure not to start the idle timer on the
3088 	 * listener stream. This can cause nfsd to send an orderly release
3089 	 * command on the listener stream.
3090 	 */
3091 	if (MIR_SVC_QUIESCED(mir) && !(mir->mir_listen_stream)) {
3092 		RPCLOG(16, "mir_wsrv: calling mir_svc_idle_start on 0x%p "
3093 		    "because mir slot is idle\n", (void *)q);
3094 		mir_svc_idle_start(q, mir);
3095 	}
3096 
3097 	/*
3098 	 * If outbound flow control has been relieved, then allow new
3099 	 * inbound requests to be processed.
3100 	 */
3101 	if (mir->mir_hold_inbound) {
3102 		mir->mir_hold_inbound = 0;
3103 		qenable(RD(q));
3104 	}
3105 	mutex_exit(&mir->mir_mutex);
3106 }
3107 
3108 static void
3109 mir_disconnect(queue_t *q, mir_t *mir)
3110 {
3111 	ASSERT(MUTEX_HELD(&mir->mir_mutex));
3112 
3113 	switch (mir->mir_type) {
3114 	case RPC_CLIENT:
3115 		/*
3116 		 * We are disconnecting, but not necessarily
3117 		 * closing. By not closing, we will fail to
3118 		 * pick up a possibly changed global timeout value,
3119 		 * unless we store it now.
3120 		 */
3121 		mir->mir_idle_timeout = clnt_idle_timeout;
3122 		mir_clnt_idle_start(WR(q), mir);
3123 		mutex_exit(&mir->mir_mutex);
3124 
3125 		/*
3126 		 * T_DISCON_REQ is passed to KRPC as an integer value
3127 		 * (this is not a TPI message).  It is used as a
3128 		 * convenient value to indicate a sanity check
3129 		 * failure -- the same KRPC routine is also called
3130 		 * for T_DISCON_INDs and T_ORDREL_INDs.
3131 		 */
3132 		clnt_dispatch_notifyall(WR(q), T_DISCON_REQ, 0);
3133 		break;
3134 
3135 	case RPC_SERVER:
3136 		mir->mir_svc_no_more_msgs = 1;
3137 		mir_svc_idle_stop(WR(q), mir);
3138 		mutex_exit(&mir->mir_mutex);
3139 		RPCLOG(16, "mir_disconnect: telling "
3140 			"stream head listener to disconnect stream "
3141 			"(0x%p)\n", (void *) q);
3142 		(void) mir_svc_policy_notify(q, 2);
3143 		break;
3144 
3145 	default:
3146 		mutex_exit(&mir->mir_mutex);
3147 		break;
3148 	}
3149 }
3150 
3151 /*
3152  * do a sanity check on the length of the fragment.
3153  * returns 1 if bad else 0.
3154  */
3155 static int
3156 mir_check_len(queue_t *q, int32_t frag_len,
3157     mblk_t *head_mp)
3158 {
3159 	mir_t   *mir;
3160 
3161 	mir = (mir_t *)q->q_ptr;
3162 
3163 	/*
3164 	 * Do a sanity check on the message length.  If this message is
3165 	 * getting excessively large, shut down the connection.
3166 	 */
3167 
3168 	if ((frag_len <= 0) || (mir->mir_max_msg_sizep == NULL) ||
3169 		(frag_len <= *mir->mir_max_msg_sizep)) {
3170 		return (0);
3171 	}
3172 
3173 	freemsg(head_mp);
3174 	mir->mir_head_mp = (mblk_t *)0;
3175 	mir->mir_frag_len = -(int)sizeof (uint32_t);
3176 	if (mir->mir_type != RPC_SERVER || mir->mir_setup_complete) {
3177 		cmn_err(CE_NOTE,
3178 		"KRPC: record fragment from %s of size(%d) exceeds "
3179 		"maximum (%u). Disconnecting",
3180 		(mir->mir_type == RPC_CLIENT) ? "server" :
3181 		(mir->mir_type == RPC_SERVER) ? "client" :
3182 		"test tool",
3183 		frag_len, *mir->mir_max_msg_sizep);
3184 	}
3185 
3186 	mir_disconnect(q, mir);
3187 	return (1);
3188 }
3189