xref: /titanic_53/usr/src/uts/common/os/ddi.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
23*7c478bd9Sstevel@tonic-gate /*	  All Rights Reserved  	*/
24*7c478bd9Sstevel@tonic-gate 
25*7c478bd9Sstevel@tonic-gate 
26*7c478bd9Sstevel@tonic-gate /*
27*7c478bd9Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
28*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
29*7c478bd9Sstevel@tonic-gate  */
30*7c478bd9Sstevel@tonic-gate 
31*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
32*7c478bd9Sstevel@tonic-gate 
33*7c478bd9Sstevel@tonic-gate /*
34*7c478bd9Sstevel@tonic-gate  * UNIX Device Driver Interface functions
35*7c478bd9Sstevel@tonic-gate  *
36*7c478bd9Sstevel@tonic-gate  * This file contains functions that are to be added to the kernel
37*7c478bd9Sstevel@tonic-gate  * to put the interface presented to drivers in conformance with
38*7c478bd9Sstevel@tonic-gate  * the DDI standard. Of the functions added to the kernel, 17 are
39*7c478bd9Sstevel@tonic-gate  * function equivalents of existing macros in sysmacros.h,
40*7c478bd9Sstevel@tonic-gate  * stream.h, and param.h
41*7c478bd9Sstevel@tonic-gate  *
42*7c478bd9Sstevel@tonic-gate  * 17 additional functions -- drv_getparm(), drv_setparm(),
43*7c478bd9Sstevel@tonic-gate  * getrbuf(), freerbuf(),
44*7c478bd9Sstevel@tonic-gate  * getemajor(), geteminor(), etoimajor(), itoemajor(), drv_usectohz(),
45*7c478bd9Sstevel@tonic-gate  * drv_hztousec(), drv_usecwait(), drv_priv(), and kvtoppid() --
46*7c478bd9Sstevel@tonic-gate  * are specified by DDI to exist in the kernel and are implemented here.
47*7c478bd9Sstevel@tonic-gate  *
48*7c478bd9Sstevel@tonic-gate  * Note that putnext() and put() are not in this file. The C version of
49*7c478bd9Sstevel@tonic-gate  * these routines are in uts/common/os/putnext.c and assembly versions
50*7c478bd9Sstevel@tonic-gate  * might exist for some architectures.
51*7c478bd9Sstevel@tonic-gate  */
52*7c478bd9Sstevel@tonic-gate 
53*7c478bd9Sstevel@tonic-gate #include <sys/types.h>
54*7c478bd9Sstevel@tonic-gate #include <sys/param.h>
55*7c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
56*7c478bd9Sstevel@tonic-gate #include <sys/time.h>
57*7c478bd9Sstevel@tonic-gate #include <sys/systm.h>
58*7c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
59*7c478bd9Sstevel@tonic-gate #include <sys/signal.h>
60*7c478bd9Sstevel@tonic-gate #include <sys/pcb.h>
61*7c478bd9Sstevel@tonic-gate #include <sys/user.h>
62*7c478bd9Sstevel@tonic-gate #include <sys/errno.h>
63*7c478bd9Sstevel@tonic-gate #include <sys/buf.h>
64*7c478bd9Sstevel@tonic-gate #include <sys/proc.h>
65*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
66*7c478bd9Sstevel@tonic-gate #include <sys/stream.h>
67*7c478bd9Sstevel@tonic-gate #include <sys/strsubr.h>
68*7c478bd9Sstevel@tonic-gate #include <sys/uio.h>
69*7c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
70*7c478bd9Sstevel@tonic-gate #include <sys/conf.h>
71*7c478bd9Sstevel@tonic-gate #include <sys/cred.h>
72*7c478bd9Sstevel@tonic-gate #include <sys/vnode.h>
73*7c478bd9Sstevel@tonic-gate #include <sys/file.h>
74*7c478bd9Sstevel@tonic-gate #include <sys/poll.h>
75*7c478bd9Sstevel@tonic-gate #include <sys/session.h>
76*7c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
77*7c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
78*7c478bd9Sstevel@tonic-gate #include <sys/esunddi.h>
79*7c478bd9Sstevel@tonic-gate #include <sys/mkdev.h>
80*7c478bd9Sstevel@tonic-gate #include <sys/debug.h>
81*7c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
82*7c478bd9Sstevel@tonic-gate 
83*7c478bd9Sstevel@tonic-gate /*
84*7c478bd9Sstevel@tonic-gate  * return internal major number corresponding to device
85*7c478bd9Sstevel@tonic-gate  * number (new format) argument
86*7c478bd9Sstevel@tonic-gate  */
87*7c478bd9Sstevel@tonic-gate major_t
88*7c478bd9Sstevel@tonic-gate getmajor(dev_t dev)
89*7c478bd9Sstevel@tonic-gate {
90*7c478bd9Sstevel@tonic-gate #ifdef _LP64
91*7c478bd9Sstevel@tonic-gate 	return ((major_t)((dev >> NBITSMINOR64) & MAXMAJ64));
92*7c478bd9Sstevel@tonic-gate #else
93*7c478bd9Sstevel@tonic-gate 	return ((major_t)((dev >> NBITSMINOR) & MAXMAJ));
94*7c478bd9Sstevel@tonic-gate #endif
95*7c478bd9Sstevel@tonic-gate }
96*7c478bd9Sstevel@tonic-gate 
97*7c478bd9Sstevel@tonic-gate /*
98*7c478bd9Sstevel@tonic-gate  * return external major number corresponding to device
99*7c478bd9Sstevel@tonic-gate  * number (new format) argument
100*7c478bd9Sstevel@tonic-gate  */
101*7c478bd9Sstevel@tonic-gate major_t
102*7c478bd9Sstevel@tonic-gate getemajor(dev_t dev)
103*7c478bd9Sstevel@tonic-gate {
104*7c478bd9Sstevel@tonic-gate #ifdef _LP64
105*7c478bd9Sstevel@tonic-gate 	return ((major_t)((dev >> NBITSMINOR64) & MAXMAJ64));
106*7c478bd9Sstevel@tonic-gate #else
107*7c478bd9Sstevel@tonic-gate 	return ((major_t)((dev >> NBITSMINOR) & MAXMAJ));
108*7c478bd9Sstevel@tonic-gate #endif
109*7c478bd9Sstevel@tonic-gate }
110*7c478bd9Sstevel@tonic-gate 
111*7c478bd9Sstevel@tonic-gate /*
112*7c478bd9Sstevel@tonic-gate  * return internal minor number corresponding to device
113*7c478bd9Sstevel@tonic-gate  * number (new format) argument
114*7c478bd9Sstevel@tonic-gate  */
115*7c478bd9Sstevel@tonic-gate minor_t
116*7c478bd9Sstevel@tonic-gate getminor(dev_t dev)
117*7c478bd9Sstevel@tonic-gate {
118*7c478bd9Sstevel@tonic-gate #ifdef _LP64
119*7c478bd9Sstevel@tonic-gate 	return ((minor_t)(dev & MAXMIN64));
120*7c478bd9Sstevel@tonic-gate #else
121*7c478bd9Sstevel@tonic-gate 	return ((minor_t)(dev & MAXMIN));
122*7c478bd9Sstevel@tonic-gate #endif
123*7c478bd9Sstevel@tonic-gate }
124*7c478bd9Sstevel@tonic-gate 
125*7c478bd9Sstevel@tonic-gate /*
126*7c478bd9Sstevel@tonic-gate  * return external minor number corresponding to device
127*7c478bd9Sstevel@tonic-gate  * number (new format) argument
128*7c478bd9Sstevel@tonic-gate  */
129*7c478bd9Sstevel@tonic-gate minor_t
130*7c478bd9Sstevel@tonic-gate geteminor(dev_t dev)
131*7c478bd9Sstevel@tonic-gate {
132*7c478bd9Sstevel@tonic-gate #ifdef _LP64
133*7c478bd9Sstevel@tonic-gate 	return ((minor_t)(dev & MAXMIN64));
134*7c478bd9Sstevel@tonic-gate #else
135*7c478bd9Sstevel@tonic-gate 	return ((minor_t)(dev & MAXMIN));
136*7c478bd9Sstevel@tonic-gate #endif
137*7c478bd9Sstevel@tonic-gate }
138*7c478bd9Sstevel@tonic-gate 
139*7c478bd9Sstevel@tonic-gate /*
140*7c478bd9Sstevel@tonic-gate  * return internal major number corresponding to external
141*7c478bd9Sstevel@tonic-gate  * major number.
142*7c478bd9Sstevel@tonic-gate  */
143*7c478bd9Sstevel@tonic-gate int
144*7c478bd9Sstevel@tonic-gate etoimajor(major_t emajnum)
145*7c478bd9Sstevel@tonic-gate {
146*7c478bd9Sstevel@tonic-gate #ifdef _LP64
147*7c478bd9Sstevel@tonic-gate 	if (emajnum >= devcnt)
148*7c478bd9Sstevel@tonic-gate 		return (-1); /* invalid external major */
149*7c478bd9Sstevel@tonic-gate #else
150*7c478bd9Sstevel@tonic-gate 	if (emajnum > MAXMAJ || emajnum >= devcnt)
151*7c478bd9Sstevel@tonic-gate 		return (-1); /* invalid external major */
152*7c478bd9Sstevel@tonic-gate #endif
153*7c478bd9Sstevel@tonic-gate 	return ((int)emajnum);
154*7c478bd9Sstevel@tonic-gate }
155*7c478bd9Sstevel@tonic-gate 
156*7c478bd9Sstevel@tonic-gate /*
157*7c478bd9Sstevel@tonic-gate  * return external major number corresponding to internal
158*7c478bd9Sstevel@tonic-gate  * major number argument or -1 if no external major number
159*7c478bd9Sstevel@tonic-gate  * can be found after lastemaj that maps to the internal
160*7c478bd9Sstevel@tonic-gate  * major number. Pass a lastemaj val of -1 to start
161*7c478bd9Sstevel@tonic-gate  * the search initially. (Typical use of this function is
162*7c478bd9Sstevel@tonic-gate  * of the form:
163*7c478bd9Sstevel@tonic-gate  *
164*7c478bd9Sstevel@tonic-gate  *	lastemaj = -1;
165*7c478bd9Sstevel@tonic-gate  *	while ((lastemaj = itoemajor(imag, lastemaj)) != -1)
166*7c478bd9Sstevel@tonic-gate  *		{ process major number }
167*7c478bd9Sstevel@tonic-gate  */
168*7c478bd9Sstevel@tonic-gate int
169*7c478bd9Sstevel@tonic-gate itoemajor(major_t imajnum, int lastemaj)
170*7c478bd9Sstevel@tonic-gate {
171*7c478bd9Sstevel@tonic-gate 	if (imajnum >= devcnt)
172*7c478bd9Sstevel@tonic-gate 		return (-1);
173*7c478bd9Sstevel@tonic-gate 
174*7c478bd9Sstevel@tonic-gate 	/*
175*7c478bd9Sstevel@tonic-gate 	 * if lastemaj == -1 then start from beginning of
176*7c478bd9Sstevel@tonic-gate 	 * the (imaginary) MAJOR table
177*7c478bd9Sstevel@tonic-gate 	 */
178*7c478bd9Sstevel@tonic-gate 	if (lastemaj < -1)
179*7c478bd9Sstevel@tonic-gate 		return (-1);
180*7c478bd9Sstevel@tonic-gate 
181*7c478bd9Sstevel@tonic-gate 	/*
182*7c478bd9Sstevel@tonic-gate 	 * given that there's a 1-1 mapping of internal to external
183*7c478bd9Sstevel@tonic-gate 	 * major numbers, searching is somewhat pointless ... let's
184*7c478bd9Sstevel@tonic-gate 	 * just go there directly.
185*7c478bd9Sstevel@tonic-gate 	 */
186*7c478bd9Sstevel@tonic-gate 	if (++lastemaj < devcnt && imajnum < devcnt)
187*7c478bd9Sstevel@tonic-gate 		return (imajnum);
188*7c478bd9Sstevel@tonic-gate 	return (-1);
189*7c478bd9Sstevel@tonic-gate }
190*7c478bd9Sstevel@tonic-gate 
191*7c478bd9Sstevel@tonic-gate /*
192*7c478bd9Sstevel@tonic-gate  * encode external major and minor number arguments into a
193*7c478bd9Sstevel@tonic-gate  * new format device number
194*7c478bd9Sstevel@tonic-gate  */
195*7c478bd9Sstevel@tonic-gate dev_t
196*7c478bd9Sstevel@tonic-gate makedevice(major_t maj, minor_t minor)
197*7c478bd9Sstevel@tonic-gate {
198*7c478bd9Sstevel@tonic-gate #ifdef _LP64
199*7c478bd9Sstevel@tonic-gate 	return (((dev_t)maj << NBITSMINOR64) | (minor & MAXMIN64));
200*7c478bd9Sstevel@tonic-gate #else
201*7c478bd9Sstevel@tonic-gate 	return (((dev_t)maj << NBITSMINOR) | (minor & MAXMIN));
202*7c478bd9Sstevel@tonic-gate #endif
203*7c478bd9Sstevel@tonic-gate }
204*7c478bd9Sstevel@tonic-gate 
205*7c478bd9Sstevel@tonic-gate /*
206*7c478bd9Sstevel@tonic-gate  * cmpdev - compress new device format to old device format
207*7c478bd9Sstevel@tonic-gate  */
208*7c478bd9Sstevel@tonic-gate o_dev_t
209*7c478bd9Sstevel@tonic-gate cmpdev(dev_t dev)
210*7c478bd9Sstevel@tonic-gate {
211*7c478bd9Sstevel@tonic-gate 	major_t major_d;
212*7c478bd9Sstevel@tonic-gate 	minor_t minor_d;
213*7c478bd9Sstevel@tonic-gate 
214*7c478bd9Sstevel@tonic-gate #ifdef _LP64
215*7c478bd9Sstevel@tonic-gate 	major_d = dev >> NBITSMINOR64;
216*7c478bd9Sstevel@tonic-gate 	minor_d = dev & MAXMIN64;
217*7c478bd9Sstevel@tonic-gate #else
218*7c478bd9Sstevel@tonic-gate 	major_d = dev >> NBITSMINOR;
219*7c478bd9Sstevel@tonic-gate 	minor_d = dev & MAXMIN;
220*7c478bd9Sstevel@tonic-gate #endif
221*7c478bd9Sstevel@tonic-gate 	if (major_d > OMAXMAJ || minor_d > OMAXMIN)
222*7c478bd9Sstevel@tonic-gate 		return ((o_dev_t)NODEV);
223*7c478bd9Sstevel@tonic-gate 	return ((o_dev_t)((major_d << ONBITSMINOR) | minor_d));
224*7c478bd9Sstevel@tonic-gate }
225*7c478bd9Sstevel@tonic-gate 
226*7c478bd9Sstevel@tonic-gate dev_t
227*7c478bd9Sstevel@tonic-gate expdev(dev_t dev)
228*7c478bd9Sstevel@tonic-gate {
229*7c478bd9Sstevel@tonic-gate 	major_t major_d;
230*7c478bd9Sstevel@tonic-gate 	minor_t minor_d;
231*7c478bd9Sstevel@tonic-gate 
232*7c478bd9Sstevel@tonic-gate 	major_d = ((dev >> ONBITSMINOR) & OMAXMAJ);
233*7c478bd9Sstevel@tonic-gate 	minor_d = (dev & OMAXMIN);
234*7c478bd9Sstevel@tonic-gate #ifdef _LP64
235*7c478bd9Sstevel@tonic-gate 	return ((((dev_t)major_d << NBITSMINOR64) | minor_d));
236*7c478bd9Sstevel@tonic-gate #else
237*7c478bd9Sstevel@tonic-gate 	return ((((dev_t)major_d << NBITSMINOR) | minor_d));
238*7c478bd9Sstevel@tonic-gate #endif
239*7c478bd9Sstevel@tonic-gate }
240*7c478bd9Sstevel@tonic-gate 
241*7c478bd9Sstevel@tonic-gate /*
242*7c478bd9Sstevel@tonic-gate  * return true (1) if the message type input is a data
243*7c478bd9Sstevel@tonic-gate  * message type, 0 otherwise
244*7c478bd9Sstevel@tonic-gate  */
245*7c478bd9Sstevel@tonic-gate #undef datamsg
246*7c478bd9Sstevel@tonic-gate int
247*7c478bd9Sstevel@tonic-gate datamsg(unsigned char db_type)
248*7c478bd9Sstevel@tonic-gate {
249*7c478bd9Sstevel@tonic-gate 	return (db_type == M_DATA || db_type == M_PROTO ||
250*7c478bd9Sstevel@tonic-gate 		db_type == M_PCPROTO || db_type == M_DELAY);
251*7c478bd9Sstevel@tonic-gate }
252*7c478bd9Sstevel@tonic-gate 
253*7c478bd9Sstevel@tonic-gate /*
254*7c478bd9Sstevel@tonic-gate  * return a pointer to the other queue in the queue pair of qp
255*7c478bd9Sstevel@tonic-gate  */
256*7c478bd9Sstevel@tonic-gate queue_t *
257*7c478bd9Sstevel@tonic-gate OTHERQ(queue_t *q)
258*7c478bd9Sstevel@tonic-gate {
259*7c478bd9Sstevel@tonic-gate 	return (_OTHERQ(q));
260*7c478bd9Sstevel@tonic-gate }
261*7c478bd9Sstevel@tonic-gate 
262*7c478bd9Sstevel@tonic-gate /*
263*7c478bd9Sstevel@tonic-gate  * return a pointer to the read queue in the queue pair of qp.
264*7c478bd9Sstevel@tonic-gate  */
265*7c478bd9Sstevel@tonic-gate queue_t *
266*7c478bd9Sstevel@tonic-gate RD(queue_t *q)
267*7c478bd9Sstevel@tonic-gate {
268*7c478bd9Sstevel@tonic-gate 		return (_RD(q));
269*7c478bd9Sstevel@tonic-gate 
270*7c478bd9Sstevel@tonic-gate }
271*7c478bd9Sstevel@tonic-gate 
272*7c478bd9Sstevel@tonic-gate /*
273*7c478bd9Sstevel@tonic-gate  * return a pointer to the write queue in the queue pair of qp.
274*7c478bd9Sstevel@tonic-gate  */
275*7c478bd9Sstevel@tonic-gate int
276*7c478bd9Sstevel@tonic-gate SAMESTR(queue_t *q)
277*7c478bd9Sstevel@tonic-gate {
278*7c478bd9Sstevel@tonic-gate 	return (_SAMESTR(q));
279*7c478bd9Sstevel@tonic-gate }
280*7c478bd9Sstevel@tonic-gate 
281*7c478bd9Sstevel@tonic-gate /*
282*7c478bd9Sstevel@tonic-gate  * return a pointer to the write queue in the queue pair of qp.
283*7c478bd9Sstevel@tonic-gate  */
284*7c478bd9Sstevel@tonic-gate queue_t *
285*7c478bd9Sstevel@tonic-gate WR(queue_t *q)
286*7c478bd9Sstevel@tonic-gate {
287*7c478bd9Sstevel@tonic-gate 	return (_WR(q));
288*7c478bd9Sstevel@tonic-gate }
289*7c478bd9Sstevel@tonic-gate 
290*7c478bd9Sstevel@tonic-gate /*
291*7c478bd9Sstevel@tonic-gate  * store value of kernel parameter associated with parm
292*7c478bd9Sstevel@tonic-gate  */
293*7c478bd9Sstevel@tonic-gate int
294*7c478bd9Sstevel@tonic-gate drv_getparm(unsigned int parm, void *valuep)
295*7c478bd9Sstevel@tonic-gate {
296*7c478bd9Sstevel@tonic-gate 	time_t now;
297*7c478bd9Sstevel@tonic-gate 
298*7c478bd9Sstevel@tonic-gate 	switch (parm) {
299*7c478bd9Sstevel@tonic-gate 	case UPROCP:
300*7c478bd9Sstevel@tonic-gate 		*(proc_t **)valuep = ttoproc(curthread);
301*7c478bd9Sstevel@tonic-gate 		break;
302*7c478bd9Sstevel@tonic-gate 	case PPGRP:
303*7c478bd9Sstevel@tonic-gate 		*(pid_t *)valuep = ttoproc(curthread)->p_pgrp;
304*7c478bd9Sstevel@tonic-gate 		break;
305*7c478bd9Sstevel@tonic-gate 	case LBOLT:
306*7c478bd9Sstevel@tonic-gate 		*(clock_t *)valuep = lbolt;
307*7c478bd9Sstevel@tonic-gate 		break;
308*7c478bd9Sstevel@tonic-gate 	case TIME:
309*7c478bd9Sstevel@tonic-gate 		if ((now = gethrestime_sec()) == 0) {
310*7c478bd9Sstevel@tonic-gate 			timestruc_t ts;
311*7c478bd9Sstevel@tonic-gate 			mutex_enter(&tod_lock);
312*7c478bd9Sstevel@tonic-gate 			ts = tod_get();
313*7c478bd9Sstevel@tonic-gate 			mutex_exit(&tod_lock);
314*7c478bd9Sstevel@tonic-gate 			*(time_t *)valuep = ts.tv_sec;
315*7c478bd9Sstevel@tonic-gate 		} else {
316*7c478bd9Sstevel@tonic-gate 			*(time_t *)valuep = now;
317*7c478bd9Sstevel@tonic-gate 		}
318*7c478bd9Sstevel@tonic-gate 		break;
319*7c478bd9Sstevel@tonic-gate 	case PPID:
320*7c478bd9Sstevel@tonic-gate 		*(pid_t *)valuep = ttoproc(curthread)->p_pid;
321*7c478bd9Sstevel@tonic-gate 		break;
322*7c478bd9Sstevel@tonic-gate 	case PSID:
323*7c478bd9Sstevel@tonic-gate 		*(pid_t *)valuep = ttoproc(curthread)->p_sessp->s_sid;
324*7c478bd9Sstevel@tonic-gate 		break;
325*7c478bd9Sstevel@tonic-gate 	case UCRED:
326*7c478bd9Sstevel@tonic-gate 		*(cred_t **)valuep = CRED();
327*7c478bd9Sstevel@tonic-gate 		break;
328*7c478bd9Sstevel@tonic-gate 	default:
329*7c478bd9Sstevel@tonic-gate 		return (-1);
330*7c478bd9Sstevel@tonic-gate 	}
331*7c478bd9Sstevel@tonic-gate 
332*7c478bd9Sstevel@tonic-gate 	return (0);
333*7c478bd9Sstevel@tonic-gate }
334*7c478bd9Sstevel@tonic-gate 
335*7c478bd9Sstevel@tonic-gate /*
336*7c478bd9Sstevel@tonic-gate  * set value of kernel parameter associated with parm
337*7c478bd9Sstevel@tonic-gate  */
338*7c478bd9Sstevel@tonic-gate int
339*7c478bd9Sstevel@tonic-gate drv_setparm(unsigned int parm, unsigned long value)
340*7c478bd9Sstevel@tonic-gate {
341*7c478bd9Sstevel@tonic-gate 	switch (parm) {
342*7c478bd9Sstevel@tonic-gate 	case SYSRINT:
343*7c478bd9Sstevel@tonic-gate 		CPU_STATS_ADDQ(CPU, sys, rcvint, value);
344*7c478bd9Sstevel@tonic-gate 		break;
345*7c478bd9Sstevel@tonic-gate 	case SYSXINT:
346*7c478bd9Sstevel@tonic-gate 		CPU_STATS_ADDQ(CPU, sys, xmtint, value);
347*7c478bd9Sstevel@tonic-gate 		break;
348*7c478bd9Sstevel@tonic-gate 	case SYSMINT:
349*7c478bd9Sstevel@tonic-gate 		CPU_STATS_ADDQ(CPU, sys, mdmint, value);
350*7c478bd9Sstevel@tonic-gate 		break;
351*7c478bd9Sstevel@tonic-gate 	case SYSRAWC:
352*7c478bd9Sstevel@tonic-gate 		CPU_STATS_ADDQ(CPU, sys, rawch, value);
353*7c478bd9Sstevel@tonic-gate 		break;
354*7c478bd9Sstevel@tonic-gate 	case SYSCANC:
355*7c478bd9Sstevel@tonic-gate 		CPU_STATS_ADDQ(CPU, sys, canch, value);
356*7c478bd9Sstevel@tonic-gate 		break;
357*7c478bd9Sstevel@tonic-gate 	case SYSOUTC:
358*7c478bd9Sstevel@tonic-gate 		CPU_STATS_ADDQ(CPU, sys, outch, value);
359*7c478bd9Sstevel@tonic-gate 		break;
360*7c478bd9Sstevel@tonic-gate 	default:
361*7c478bd9Sstevel@tonic-gate 		return (-1);
362*7c478bd9Sstevel@tonic-gate 	}
363*7c478bd9Sstevel@tonic-gate 
364*7c478bd9Sstevel@tonic-gate 	return (0);
365*7c478bd9Sstevel@tonic-gate }
366*7c478bd9Sstevel@tonic-gate 
367*7c478bd9Sstevel@tonic-gate /*
368*7c478bd9Sstevel@tonic-gate  * allocate space for buffer header and return pointer to it.
369*7c478bd9Sstevel@tonic-gate  * preferred means of obtaining space for a local buf header.
370*7c478bd9Sstevel@tonic-gate  * returns pointer to buf upon success, NULL for failure
371*7c478bd9Sstevel@tonic-gate  */
372*7c478bd9Sstevel@tonic-gate struct buf *
373*7c478bd9Sstevel@tonic-gate getrbuf(int sleep)
374*7c478bd9Sstevel@tonic-gate {
375*7c478bd9Sstevel@tonic-gate 	struct buf *bp;
376*7c478bd9Sstevel@tonic-gate 
377*7c478bd9Sstevel@tonic-gate 	bp = kmem_alloc(sizeof (struct buf), sleep);
378*7c478bd9Sstevel@tonic-gate 	if (bp == NULL)
379*7c478bd9Sstevel@tonic-gate 		return (NULL);
380*7c478bd9Sstevel@tonic-gate 	bioinit(bp);
381*7c478bd9Sstevel@tonic-gate 
382*7c478bd9Sstevel@tonic-gate 	return (bp);
383*7c478bd9Sstevel@tonic-gate }
384*7c478bd9Sstevel@tonic-gate 
385*7c478bd9Sstevel@tonic-gate /*
386*7c478bd9Sstevel@tonic-gate  * free up space allocated by getrbuf()
387*7c478bd9Sstevel@tonic-gate  */
388*7c478bd9Sstevel@tonic-gate void
389*7c478bd9Sstevel@tonic-gate freerbuf(struct buf *bp)
390*7c478bd9Sstevel@tonic-gate {
391*7c478bd9Sstevel@tonic-gate 	biofini(bp);
392*7c478bd9Sstevel@tonic-gate 	kmem_free(bp, sizeof (struct buf));
393*7c478bd9Sstevel@tonic-gate }
394*7c478bd9Sstevel@tonic-gate 
395*7c478bd9Sstevel@tonic-gate /*
396*7c478bd9Sstevel@tonic-gate  * convert byte count input to logical page units
397*7c478bd9Sstevel@tonic-gate  * (byte counts that are not a page-size multiple
398*7c478bd9Sstevel@tonic-gate  * are rounded down)
399*7c478bd9Sstevel@tonic-gate  */
400*7c478bd9Sstevel@tonic-gate pgcnt_t
401*7c478bd9Sstevel@tonic-gate btop(size_t numbytes)
402*7c478bd9Sstevel@tonic-gate {
403*7c478bd9Sstevel@tonic-gate 	return (numbytes >> PAGESHIFT);
404*7c478bd9Sstevel@tonic-gate }
405*7c478bd9Sstevel@tonic-gate 
406*7c478bd9Sstevel@tonic-gate /*
407*7c478bd9Sstevel@tonic-gate  * convert byte count input to logical page units
408*7c478bd9Sstevel@tonic-gate  * (byte counts that are not a page-size multiple
409*7c478bd9Sstevel@tonic-gate  * are rounded up)
410*7c478bd9Sstevel@tonic-gate  */
411*7c478bd9Sstevel@tonic-gate pgcnt_t
412*7c478bd9Sstevel@tonic-gate btopr(size_t numbytes)
413*7c478bd9Sstevel@tonic-gate {
414*7c478bd9Sstevel@tonic-gate 	return ((numbytes + PAGEOFFSET) >> PAGESHIFT);
415*7c478bd9Sstevel@tonic-gate }
416*7c478bd9Sstevel@tonic-gate 
417*7c478bd9Sstevel@tonic-gate /*
418*7c478bd9Sstevel@tonic-gate  * convert size in pages to bytes.
419*7c478bd9Sstevel@tonic-gate  */
420*7c478bd9Sstevel@tonic-gate size_t
421*7c478bd9Sstevel@tonic-gate ptob(pgcnt_t numpages)
422*7c478bd9Sstevel@tonic-gate {
423*7c478bd9Sstevel@tonic-gate 	return (numpages << PAGESHIFT);
424*7c478bd9Sstevel@tonic-gate }
425*7c478bd9Sstevel@tonic-gate 
426*7c478bd9Sstevel@tonic-gate #define	MAXCLOCK_T LONG_MAX
427*7c478bd9Sstevel@tonic-gate 
428*7c478bd9Sstevel@tonic-gate /*
429*7c478bd9Sstevel@tonic-gate  * Convert from system time units (hz) to microseconds.
430*7c478bd9Sstevel@tonic-gate  *
431*7c478bd9Sstevel@tonic-gate  * If ticks <= 0, return 0.
432*7c478bd9Sstevel@tonic-gate  * If converting ticks to usecs would overflow, return MAXCLOCK_T.
433*7c478bd9Sstevel@tonic-gate  * Otherwise, convert ticks to microseconds.
434*7c478bd9Sstevel@tonic-gate  */
435*7c478bd9Sstevel@tonic-gate clock_t
436*7c478bd9Sstevel@tonic-gate drv_hztousec(clock_t ticks)
437*7c478bd9Sstevel@tonic-gate {
438*7c478bd9Sstevel@tonic-gate 	if (ticks <= 0)
439*7c478bd9Sstevel@tonic-gate 		return (0);
440*7c478bd9Sstevel@tonic-gate 
441*7c478bd9Sstevel@tonic-gate 	if (ticks > MAXCLOCK_T / usec_per_tick)
442*7c478bd9Sstevel@tonic-gate 		return (MAXCLOCK_T);
443*7c478bd9Sstevel@tonic-gate 
444*7c478bd9Sstevel@tonic-gate 	return (TICK_TO_USEC(ticks));
445*7c478bd9Sstevel@tonic-gate }
446*7c478bd9Sstevel@tonic-gate 
447*7c478bd9Sstevel@tonic-gate 
448*7c478bd9Sstevel@tonic-gate /*
449*7c478bd9Sstevel@tonic-gate  * Convert from microseconds to system time units (hz), rounded up.
450*7c478bd9Sstevel@tonic-gate  *
451*7c478bd9Sstevel@tonic-gate  * If ticks <= 0, return 0.
452*7c478bd9Sstevel@tonic-gate  * Otherwise, convert microseconds to ticks, rounding up.
453*7c478bd9Sstevel@tonic-gate  */
454*7c478bd9Sstevel@tonic-gate clock_t
455*7c478bd9Sstevel@tonic-gate drv_usectohz(clock_t microsecs)
456*7c478bd9Sstevel@tonic-gate {
457*7c478bd9Sstevel@tonic-gate 	if (microsecs <= 0)
458*7c478bd9Sstevel@tonic-gate 		return (0);
459*7c478bd9Sstevel@tonic-gate 
460*7c478bd9Sstevel@tonic-gate 	return (USEC_TO_TICK_ROUNDUP(microsecs));
461*7c478bd9Sstevel@tonic-gate }
462*7c478bd9Sstevel@tonic-gate 
463*7c478bd9Sstevel@tonic-gate #ifdef	sun
464*7c478bd9Sstevel@tonic-gate /*
465*7c478bd9Sstevel@tonic-gate  * drv_usecwait implemented in each architecture's machine
466*7c478bd9Sstevel@tonic-gate  * specific code somewhere. For sparc, it is the alternate entry
467*7c478bd9Sstevel@tonic-gate  * to usec_delay (eventually usec_delay goes away). See
468*7c478bd9Sstevel@tonic-gate  * sparc/os/ml/sparc_subr.s
469*7c478bd9Sstevel@tonic-gate  */
470*7c478bd9Sstevel@tonic-gate #endif
471*7c478bd9Sstevel@tonic-gate 
472*7c478bd9Sstevel@tonic-gate /*
473*7c478bd9Sstevel@tonic-gate  * bcanputnext, canputnext assume called from timeout, bufcall,
474*7c478bd9Sstevel@tonic-gate  * or esballoc free routines.  since these are driven by
475*7c478bd9Sstevel@tonic-gate  * clock interrupts, instead of system calls the appropriate plumbing
476*7c478bd9Sstevel@tonic-gate  * locks have not been acquired.
477*7c478bd9Sstevel@tonic-gate  */
478*7c478bd9Sstevel@tonic-gate int
479*7c478bd9Sstevel@tonic-gate bcanputnext(queue_t *q, unsigned char band)
480*7c478bd9Sstevel@tonic-gate {
481*7c478bd9Sstevel@tonic-gate 	int	ret;
482*7c478bd9Sstevel@tonic-gate 
483*7c478bd9Sstevel@tonic-gate 	claimstr(q);
484*7c478bd9Sstevel@tonic-gate 	ret = bcanput(q->q_next, band);
485*7c478bd9Sstevel@tonic-gate 	releasestr(q);
486*7c478bd9Sstevel@tonic-gate 	return (ret);
487*7c478bd9Sstevel@tonic-gate }
488*7c478bd9Sstevel@tonic-gate 
489*7c478bd9Sstevel@tonic-gate int
490*7c478bd9Sstevel@tonic-gate canputnext(queue_t *q)
491*7c478bd9Sstevel@tonic-gate {
492*7c478bd9Sstevel@tonic-gate 	queue_t	*qofsq = q;
493*7c478bd9Sstevel@tonic-gate 	struct stdata *stp = STREAM(q);
494*7c478bd9Sstevel@tonic-gate 	kmutex_t *sdlock;
495*7c478bd9Sstevel@tonic-gate 
496*7c478bd9Sstevel@tonic-gate 	TRACE_1(TR_FAC_STREAMS_FR, TR_CANPUTNEXT_IN,
497*7c478bd9Sstevel@tonic-gate 	    "canputnext?:%p\n", q);
498*7c478bd9Sstevel@tonic-gate 
499*7c478bd9Sstevel@tonic-gate 	if (stp->sd_ciputctrl != NULL) {
500*7c478bd9Sstevel@tonic-gate 		int ix = CPU->cpu_seqid & stp->sd_nciputctrl;
501*7c478bd9Sstevel@tonic-gate 		sdlock = &stp->sd_ciputctrl[ix].ciputctrl_lock;
502*7c478bd9Sstevel@tonic-gate 		mutex_enter(sdlock);
503*7c478bd9Sstevel@tonic-gate 	} else
504*7c478bd9Sstevel@tonic-gate 		mutex_enter(sdlock = &stp->sd_reflock);
505*7c478bd9Sstevel@tonic-gate 
506*7c478bd9Sstevel@tonic-gate 	/* get next module forward with a service queue */
507*7c478bd9Sstevel@tonic-gate 	q = q->q_next->q_nfsrv;
508*7c478bd9Sstevel@tonic-gate 	ASSERT(q != NULL);
509*7c478bd9Sstevel@tonic-gate 
510*7c478bd9Sstevel@tonic-gate 	/* this is for loopback transports, they should not do a canputnext */
511*7c478bd9Sstevel@tonic-gate 	ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(qofsq));
512*7c478bd9Sstevel@tonic-gate 
513*7c478bd9Sstevel@tonic-gate 	if (!(q->q_flag & QFULL)) {
514*7c478bd9Sstevel@tonic-gate 		mutex_exit(sdlock);
515*7c478bd9Sstevel@tonic-gate 		TRACE_2(TR_FAC_STREAMS_FR, TR_CANPUTNEXT_OUT,
516*7c478bd9Sstevel@tonic-gate 		    "canputnext:%p %d", q, 1);
517*7c478bd9Sstevel@tonic-gate 		return (1);
518*7c478bd9Sstevel@tonic-gate 	}
519*7c478bd9Sstevel@tonic-gate 
520*7c478bd9Sstevel@tonic-gate 	if (sdlock != &stp->sd_reflock) {
521*7c478bd9Sstevel@tonic-gate 		mutex_exit(sdlock);
522*7c478bd9Sstevel@tonic-gate 		mutex_enter(&stp->sd_reflock);
523*7c478bd9Sstevel@tonic-gate 	}
524*7c478bd9Sstevel@tonic-gate 
525*7c478bd9Sstevel@tonic-gate 	/* the above is the most frequently used path */
526*7c478bd9Sstevel@tonic-gate 	stp->sd_refcnt++;
527*7c478bd9Sstevel@tonic-gate 	ASSERT(stp->sd_refcnt != 0);	/* Wraparound */
528*7c478bd9Sstevel@tonic-gate 	mutex_exit(&stp->sd_reflock);
529*7c478bd9Sstevel@tonic-gate 
530*7c478bd9Sstevel@tonic-gate 	mutex_enter(QLOCK(q));
531*7c478bd9Sstevel@tonic-gate 	if (q->q_flag & QFULL) {
532*7c478bd9Sstevel@tonic-gate 		q->q_flag |= QWANTW;
533*7c478bd9Sstevel@tonic-gate 		mutex_exit(QLOCK(q));
534*7c478bd9Sstevel@tonic-gate 		TRACE_2(TR_FAC_STREAMS_FR, TR_CANPUTNEXT_OUT,
535*7c478bd9Sstevel@tonic-gate 		    "canputnext:%p %d", q, 0);
536*7c478bd9Sstevel@tonic-gate 		releasestr(qofsq);
537*7c478bd9Sstevel@tonic-gate 
538*7c478bd9Sstevel@tonic-gate 		return (0);
539*7c478bd9Sstevel@tonic-gate 	}
540*7c478bd9Sstevel@tonic-gate 	mutex_exit(QLOCK(q));
541*7c478bd9Sstevel@tonic-gate 	TRACE_2(TR_FAC_STREAMS_FR, TR_CANPUTNEXT_OUT, "canputnext:%p %d", q, 1);
542*7c478bd9Sstevel@tonic-gate 	releasestr(qofsq);
543*7c478bd9Sstevel@tonic-gate 
544*7c478bd9Sstevel@tonic-gate 	return (1);
545*7c478bd9Sstevel@tonic-gate }
546*7c478bd9Sstevel@tonic-gate 
547*7c478bd9Sstevel@tonic-gate 
548*7c478bd9Sstevel@tonic-gate /*
549*7c478bd9Sstevel@tonic-gate  * Open has progressed to the point where it is safe to send/receive messages.
550*7c478bd9Sstevel@tonic-gate  *
551*7c478bd9Sstevel@tonic-gate  * "qprocson enables the put and service routines of the driver
552*7c478bd9Sstevel@tonic-gate  * or module... Prior to the call to qprocson, the put and service
553*7c478bd9Sstevel@tonic-gate  * routines of a newly pushed module or newly opened driver are
554*7c478bd9Sstevel@tonic-gate  * disabled.  For the module, messages flow around it as if it
555*7c478bd9Sstevel@tonic-gate  * were not present in the stream... qprocson must be called by
556*7c478bd9Sstevel@tonic-gate  * the first open of a module or driver after allocation and
557*7c478bd9Sstevel@tonic-gate  * initialization of any resource on which the put and service
558*7c478bd9Sstevel@tonic-gate  * routines depend."
559*7c478bd9Sstevel@tonic-gate  *
560*7c478bd9Sstevel@tonic-gate  * Note that before calling qprocson a module/driver could itself cause its
561*7c478bd9Sstevel@tonic-gate  * put or service procedures to be run by using put() or qenable().
562*7c478bd9Sstevel@tonic-gate  */
563*7c478bd9Sstevel@tonic-gate void
564*7c478bd9Sstevel@tonic-gate qprocson(queue_t *q)
565*7c478bd9Sstevel@tonic-gate {
566*7c478bd9Sstevel@tonic-gate 	ASSERT(q->q_flag & QREADR);
567*7c478bd9Sstevel@tonic-gate 	/*
568*7c478bd9Sstevel@tonic-gate 	 * Do not call insertq() if it is a re-open.  But if _QINSERTING
569*7c478bd9Sstevel@tonic-gate 	 * is set, q_next will not be NULL and we need to call insertq().
570*7c478bd9Sstevel@tonic-gate 	 */
571*7c478bd9Sstevel@tonic-gate 	if ((q->q_next == NULL && WR(q)->q_next == NULL) ||
572*7c478bd9Sstevel@tonic-gate 	    (q->q_flag & _QINSERTING))
573*7c478bd9Sstevel@tonic-gate 		insertq(STREAM(q), q);
574*7c478bd9Sstevel@tonic-gate }
575*7c478bd9Sstevel@tonic-gate 
576*7c478bd9Sstevel@tonic-gate /*
577*7c478bd9Sstevel@tonic-gate  * Close has reached a point where it can no longer allow put/service
578*7c478bd9Sstevel@tonic-gate  * into the queue.
579*7c478bd9Sstevel@tonic-gate  *
580*7c478bd9Sstevel@tonic-gate  * "qprocsoff disables the put and service routines of the driver
581*7c478bd9Sstevel@tonic-gate  * or module... When the routines are disabled in a module, messages
582*7c478bd9Sstevel@tonic-gate  * flow around the module as if it were not present in the stream.
583*7c478bd9Sstevel@tonic-gate  * qprocsoff must be called by the close routine of a driver or module
584*7c478bd9Sstevel@tonic-gate  * before deallocating any resources on which the driver/module's
585*7c478bd9Sstevel@tonic-gate  * put and service routines depend.  qprocsoff will remove the
586*7c478bd9Sstevel@tonic-gate  * queue's service routines from the list of service routines to be
587*7c478bd9Sstevel@tonic-gate  * run and waits until any concurrent put or service routines are
588*7c478bd9Sstevel@tonic-gate  * finished."
589*7c478bd9Sstevel@tonic-gate  *
590*7c478bd9Sstevel@tonic-gate  * Note that after calling qprocsoff a module/driver could itself cause its
591*7c478bd9Sstevel@tonic-gate  * put procedures to be run by using put().
592*7c478bd9Sstevel@tonic-gate  */
593*7c478bd9Sstevel@tonic-gate void
594*7c478bd9Sstevel@tonic-gate qprocsoff(queue_t *q)
595*7c478bd9Sstevel@tonic-gate {
596*7c478bd9Sstevel@tonic-gate 	ASSERT(q->q_flag & QREADR);
597*7c478bd9Sstevel@tonic-gate 	if (q->q_flag & QWCLOSE) {
598*7c478bd9Sstevel@tonic-gate 		/* Called more than once */
599*7c478bd9Sstevel@tonic-gate 		return;
600*7c478bd9Sstevel@tonic-gate 	}
601*7c478bd9Sstevel@tonic-gate 	disable_svc(q);
602*7c478bd9Sstevel@tonic-gate 	removeq(q);
603*7c478bd9Sstevel@tonic-gate }
604*7c478bd9Sstevel@tonic-gate 
605*7c478bd9Sstevel@tonic-gate /*
606*7c478bd9Sstevel@tonic-gate  * "freezestr() freezes the state of the entire STREAM  containing
607*7c478bd9Sstevel@tonic-gate  *  the  queue  pair  q.  A frozen STREAM blocks any thread
608*7c478bd9Sstevel@tonic-gate  *  attempting to enter any open, close, put or service  routine
609*7c478bd9Sstevel@tonic-gate  *  belonging  to  any  queue instance in the STREAM, and blocks
610*7c478bd9Sstevel@tonic-gate  *  any thread currently within the STREAM if it attempts to put
611*7c478bd9Sstevel@tonic-gate  *  messages  onto  or take messages off of any queue within the
612*7c478bd9Sstevel@tonic-gate  *  STREAM (with the sole exception  of  the  caller).   Threads
613*7c478bd9Sstevel@tonic-gate  *  blocked  by  this  mechanism  remain  so until the STREAM is
614*7c478bd9Sstevel@tonic-gate  *  thawed by a call to unfreezestr().
615*7c478bd9Sstevel@tonic-gate  *
616*7c478bd9Sstevel@tonic-gate  * Use strblock to set SQ_FROZEN in all syncqs in the stream (prevents
617*7c478bd9Sstevel@tonic-gate  * further entry into put, service, open, and close procedures) and
618*7c478bd9Sstevel@tonic-gate  * grab (and hold) all the QLOCKs in the stream (to block putq, getq etc.)
619*7c478bd9Sstevel@tonic-gate  *
620*7c478bd9Sstevel@tonic-gate  * Note: this has to be the only code that acquires one QLOCK while holding
621*7c478bd9Sstevel@tonic-gate  * another QLOCK (otherwise we would have locking hirarchy/ordering violations.)
622*7c478bd9Sstevel@tonic-gate  */
623*7c478bd9Sstevel@tonic-gate void
624*7c478bd9Sstevel@tonic-gate freezestr(queue_t *q)
625*7c478bd9Sstevel@tonic-gate {
626*7c478bd9Sstevel@tonic-gate 	struct stdata *stp = STREAM(q);
627*7c478bd9Sstevel@tonic-gate 
628*7c478bd9Sstevel@tonic-gate 	/*
629*7c478bd9Sstevel@tonic-gate 	 * Increment refcnt to prevent q_next from changing during the strblock
630*7c478bd9Sstevel@tonic-gate 	 * as well as while the stream is frozen.
631*7c478bd9Sstevel@tonic-gate 	 */
632*7c478bd9Sstevel@tonic-gate 	claimstr(RD(q));
633*7c478bd9Sstevel@tonic-gate 
634*7c478bd9Sstevel@tonic-gate 	strblock(q);
635*7c478bd9Sstevel@tonic-gate 	ASSERT(stp->sd_freezer == NULL);
636*7c478bd9Sstevel@tonic-gate 	stp->sd_freezer = curthread;
637*7c478bd9Sstevel@tonic-gate 	for (q = stp->sd_wrq; q != NULL; q = SAMESTR(q) ? q->q_next : NULL) {
638*7c478bd9Sstevel@tonic-gate 		mutex_enter(QLOCK(q));
639*7c478bd9Sstevel@tonic-gate 		mutex_enter(QLOCK(RD(q)));
640*7c478bd9Sstevel@tonic-gate 	}
641*7c478bd9Sstevel@tonic-gate }
642*7c478bd9Sstevel@tonic-gate 
643*7c478bd9Sstevel@tonic-gate /*
644*7c478bd9Sstevel@tonic-gate  * Undo what freezestr did.
645*7c478bd9Sstevel@tonic-gate  * Have to drop the QLOCKs before the strunblock since strunblock will
646*7c478bd9Sstevel@tonic-gate  * potentially call other put procedures.
647*7c478bd9Sstevel@tonic-gate  */
648*7c478bd9Sstevel@tonic-gate void
649*7c478bd9Sstevel@tonic-gate unfreezestr(queue_t *q)
650*7c478bd9Sstevel@tonic-gate {
651*7c478bd9Sstevel@tonic-gate 	struct stdata *stp = STREAM(q);
652*7c478bd9Sstevel@tonic-gate 	queue_t	*q1;
653*7c478bd9Sstevel@tonic-gate 
654*7c478bd9Sstevel@tonic-gate 	for (q1 = stp->sd_wrq; q1 != NULL;
655*7c478bd9Sstevel@tonic-gate 	    q1 = SAMESTR(q1) ? q1->q_next : NULL) {
656*7c478bd9Sstevel@tonic-gate 		mutex_exit(QLOCK(q1));
657*7c478bd9Sstevel@tonic-gate 		mutex_exit(QLOCK(RD(q1)));
658*7c478bd9Sstevel@tonic-gate 	}
659*7c478bd9Sstevel@tonic-gate 	ASSERT(stp->sd_freezer == curthread);
660*7c478bd9Sstevel@tonic-gate 	stp->sd_freezer = NULL;
661*7c478bd9Sstevel@tonic-gate 	strunblock(q);
662*7c478bd9Sstevel@tonic-gate 	releasestr(RD(q));
663*7c478bd9Sstevel@tonic-gate }
664*7c478bd9Sstevel@tonic-gate 
665*7c478bd9Sstevel@tonic-gate /*
666*7c478bd9Sstevel@tonic-gate  * Used by open and close procedures to "sleep" waiting for messages to
667*7c478bd9Sstevel@tonic-gate  * arrive. Note: can only be used in open and close procedures.
668*7c478bd9Sstevel@tonic-gate  *
669*7c478bd9Sstevel@tonic-gate  * Lower the gate and let in either messages on the syncq (if there are
670*7c478bd9Sstevel@tonic-gate  * any) or put/service procedures.
671*7c478bd9Sstevel@tonic-gate  *
672*7c478bd9Sstevel@tonic-gate  * If the queue has an outer perimeter this will not prevent entry into this
673*7c478bd9Sstevel@tonic-gate  * syncq (since outer_enter does not set SQ_WRITER on the syncq that gets the
674*7c478bd9Sstevel@tonic-gate  * exclusive access to the outer perimeter.)
675*7c478bd9Sstevel@tonic-gate  *
676*7c478bd9Sstevel@tonic-gate  * Return 0 is the cv_wait_sig was interrupted; otherwise 1.
677*7c478bd9Sstevel@tonic-gate  *
678*7c478bd9Sstevel@tonic-gate  * It only makes sense to grab sq_putlocks for !SQ_CIOC sync queues because
679*7c478bd9Sstevel@tonic-gate  * otherwise put entry points were not blocked in the first place. if this is
680*7c478bd9Sstevel@tonic-gate  * SQ_CIOC then qwait is used to wait for service procedure to run since syncq
681*7c478bd9Sstevel@tonic-gate  * is always SQ_CIPUT if it is SQ_CIOC.
682*7c478bd9Sstevel@tonic-gate  *
683*7c478bd9Sstevel@tonic-gate  * Note that SQ_EXCL is dropped and SQ_WANTEXITWAKEUP set in sq_flags
684*7c478bd9Sstevel@tonic-gate  * atomically under sq_putlocks to make sure putnext will not miss a pending
685*7c478bd9Sstevel@tonic-gate  * wakeup.
686*7c478bd9Sstevel@tonic-gate  */
687*7c478bd9Sstevel@tonic-gate int
688*7c478bd9Sstevel@tonic-gate qwait_sig(queue_t *q)
689*7c478bd9Sstevel@tonic-gate {
690*7c478bd9Sstevel@tonic-gate 	syncq_t		*sq, *outer;
691*7c478bd9Sstevel@tonic-gate 	uint_t		flags;
692*7c478bd9Sstevel@tonic-gate 	int		ret = 1;
693*7c478bd9Sstevel@tonic-gate 	int		is_sq_cioc;
694*7c478bd9Sstevel@tonic-gate 
695*7c478bd9Sstevel@tonic-gate 	/*
696*7c478bd9Sstevel@tonic-gate 	 * Perform the same operations as a leavesq(sq, SQ_OPENCLOSE)
697*7c478bd9Sstevel@tonic-gate 	 * while detecting all cases where the perimeter is entered
698*7c478bd9Sstevel@tonic-gate 	 * so that qwait_sig can return to the caller.
699*7c478bd9Sstevel@tonic-gate 	 *
700*7c478bd9Sstevel@tonic-gate 	 * Drain the syncq if possible. Otherwise reset SQ_EXCL and
701*7c478bd9Sstevel@tonic-gate 	 * wait for a thread to leave the syncq.
702*7c478bd9Sstevel@tonic-gate 	 */
703*7c478bd9Sstevel@tonic-gate 	sq = q->q_syncq;
704*7c478bd9Sstevel@tonic-gate 	ASSERT(sq);
705*7c478bd9Sstevel@tonic-gate 	is_sq_cioc = (sq->sq_type & SQ_CIOC) ? 1 : 0;
706*7c478bd9Sstevel@tonic-gate 	ASSERT(sq->sq_outer == NULL || sq->sq_outer->sq_flags & SQ_WRITER);
707*7c478bd9Sstevel@tonic-gate 	outer = sq->sq_outer;
708*7c478bd9Sstevel@tonic-gate 	/*
709*7c478bd9Sstevel@tonic-gate 	 * XXX this does not work if there is only an outer perimeter.
710*7c478bd9Sstevel@tonic-gate 	 * The semantics of qwait/qwait_sig are undefined in this case.
711*7c478bd9Sstevel@tonic-gate 	 */
712*7c478bd9Sstevel@tonic-gate 	if (outer)
713*7c478bd9Sstevel@tonic-gate 		outer_exit(outer);
714*7c478bd9Sstevel@tonic-gate 
715*7c478bd9Sstevel@tonic-gate 	mutex_enter(SQLOCK(sq));
716*7c478bd9Sstevel@tonic-gate 	if (is_sq_cioc == 0) {
717*7c478bd9Sstevel@tonic-gate 		SQ_PUTLOCKS_ENTER(sq);
718*7c478bd9Sstevel@tonic-gate 	}
719*7c478bd9Sstevel@tonic-gate 	flags = sq->sq_flags;
720*7c478bd9Sstevel@tonic-gate 	/*
721*7c478bd9Sstevel@tonic-gate 	 * Drop SQ_EXCL and sq_count but hold the SQLOCK
722*7c478bd9Sstevel@tonic-gate 	 * to prevent any undetected entry and exit into the perimeter.
723*7c478bd9Sstevel@tonic-gate 	 */
724*7c478bd9Sstevel@tonic-gate 	ASSERT(sq->sq_count > 0);
725*7c478bd9Sstevel@tonic-gate 	sq->sq_count--;
726*7c478bd9Sstevel@tonic-gate 
727*7c478bd9Sstevel@tonic-gate 	if (is_sq_cioc == 0) {
728*7c478bd9Sstevel@tonic-gate 		ASSERT(flags & SQ_EXCL);
729*7c478bd9Sstevel@tonic-gate 		flags &= ~SQ_EXCL;
730*7c478bd9Sstevel@tonic-gate 	}
731*7c478bd9Sstevel@tonic-gate 	/*
732*7c478bd9Sstevel@tonic-gate 	 * Unblock any thread blocked in an entersq or outer_enter.
733*7c478bd9Sstevel@tonic-gate 	 * Note: we do not unblock a thread waiting in qwait/qwait_sig,
734*7c478bd9Sstevel@tonic-gate 	 * since that could lead to livelock with two threads in
735*7c478bd9Sstevel@tonic-gate 	 * qwait for the same (per module) inner perimeter.
736*7c478bd9Sstevel@tonic-gate 	 */
737*7c478bd9Sstevel@tonic-gate 	if (flags & SQ_WANTWAKEUP) {
738*7c478bd9Sstevel@tonic-gate 		cv_broadcast(&sq->sq_wait);
739*7c478bd9Sstevel@tonic-gate 		flags &= ~SQ_WANTWAKEUP;
740*7c478bd9Sstevel@tonic-gate 	}
741*7c478bd9Sstevel@tonic-gate 	sq->sq_flags = flags;
742*7c478bd9Sstevel@tonic-gate 	if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) {
743*7c478bd9Sstevel@tonic-gate 		if (is_sq_cioc == 0) {
744*7c478bd9Sstevel@tonic-gate 			SQ_PUTLOCKS_EXIT(sq);
745*7c478bd9Sstevel@tonic-gate 		}
746*7c478bd9Sstevel@tonic-gate 		/* drain_syncq() drops SQLOCK */
747*7c478bd9Sstevel@tonic-gate 		drain_syncq(sq);
748*7c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
749*7c478bd9Sstevel@tonic-gate 		entersq(sq, SQ_OPENCLOSE);
750*7c478bd9Sstevel@tonic-gate 		return (1);
751*7c478bd9Sstevel@tonic-gate 	}
752*7c478bd9Sstevel@tonic-gate 	/*
753*7c478bd9Sstevel@tonic-gate 	 * Sleep on sq_exitwait to only be woken up when threads leave the
754*7c478bd9Sstevel@tonic-gate 	 * put or service procedures. We can not sleep on sq_wait since an
755*7c478bd9Sstevel@tonic-gate 	 * outer_exit in a qwait running in the same outer perimeter would
756*7c478bd9Sstevel@tonic-gate 	 * cause a livelock "ping-pong" between two or more qwait'ers.
757*7c478bd9Sstevel@tonic-gate 	 */
758*7c478bd9Sstevel@tonic-gate 	do {
759*7c478bd9Sstevel@tonic-gate 		sq->sq_flags |= SQ_WANTEXWAKEUP;
760*7c478bd9Sstevel@tonic-gate 		if (is_sq_cioc == 0) {
761*7c478bd9Sstevel@tonic-gate 			SQ_PUTLOCKS_EXIT(sq);
762*7c478bd9Sstevel@tonic-gate 		}
763*7c478bd9Sstevel@tonic-gate 		ret = cv_wait_sig(&sq->sq_exitwait, SQLOCK(sq));
764*7c478bd9Sstevel@tonic-gate 		if (is_sq_cioc == 0) {
765*7c478bd9Sstevel@tonic-gate 			SQ_PUTLOCKS_ENTER(sq);
766*7c478bd9Sstevel@tonic-gate 		}
767*7c478bd9Sstevel@tonic-gate 	} while (ret && (sq->sq_flags & SQ_WANTEXWAKEUP));
768*7c478bd9Sstevel@tonic-gate 	if (is_sq_cioc == 0) {
769*7c478bd9Sstevel@tonic-gate 		SQ_PUTLOCKS_EXIT(sq);
770*7c478bd9Sstevel@tonic-gate 	}
771*7c478bd9Sstevel@tonic-gate 	mutex_exit(SQLOCK(sq));
772*7c478bd9Sstevel@tonic-gate 
773*7c478bd9Sstevel@tonic-gate 	/*
774*7c478bd9Sstevel@tonic-gate 	 * Re-enter the perimeters again
775*7c478bd9Sstevel@tonic-gate 	 */
776*7c478bd9Sstevel@tonic-gate 	entersq(sq, SQ_OPENCLOSE);
777*7c478bd9Sstevel@tonic-gate 	return (ret);
778*7c478bd9Sstevel@tonic-gate }
779*7c478bd9Sstevel@tonic-gate 
780*7c478bd9Sstevel@tonic-gate /*
781*7c478bd9Sstevel@tonic-gate  * Used by open and close procedures to "sleep" waiting for messages to
782*7c478bd9Sstevel@tonic-gate  * arrive. Note: can only be used in open and close procedures.
783*7c478bd9Sstevel@tonic-gate  *
784*7c478bd9Sstevel@tonic-gate  * Lower the gate and let in either messages on the syncq (if there are
785*7c478bd9Sstevel@tonic-gate  * any) or put/service procedures.
786*7c478bd9Sstevel@tonic-gate  *
787*7c478bd9Sstevel@tonic-gate  * If the queue has an outer perimeter this will not prevent entry into this
788*7c478bd9Sstevel@tonic-gate  * syncq (since outer_enter does not set SQ_WRITER on the syncq that gets the
789*7c478bd9Sstevel@tonic-gate  * exclusive access to the outer perimeter.)
790*7c478bd9Sstevel@tonic-gate  *
791*7c478bd9Sstevel@tonic-gate  * It only makes sense to grab sq_putlocks for !SQ_CIOC sync queues because
792*7c478bd9Sstevel@tonic-gate  * otherwise put entry points were not blocked in the first place. if this is
793*7c478bd9Sstevel@tonic-gate  * SQ_CIOC then qwait is used to wait for service procedure to run since syncq
794*7c478bd9Sstevel@tonic-gate  * is always SQ_CIPUT if it is SQ_CIOC.
795*7c478bd9Sstevel@tonic-gate  *
796*7c478bd9Sstevel@tonic-gate  * Note that SQ_EXCL is dropped and SQ_WANTEXITWAKEUP set in sq_flags
797*7c478bd9Sstevel@tonic-gate  * atomically under sq_putlocks to make sure putnext will not miss a pending
798*7c478bd9Sstevel@tonic-gate  * wakeup.
799*7c478bd9Sstevel@tonic-gate  */
800*7c478bd9Sstevel@tonic-gate void
801*7c478bd9Sstevel@tonic-gate qwait(queue_t *q)
802*7c478bd9Sstevel@tonic-gate {
803*7c478bd9Sstevel@tonic-gate 	syncq_t		*sq, *outer;
804*7c478bd9Sstevel@tonic-gate 	uint_t		flags;
805*7c478bd9Sstevel@tonic-gate 	int		is_sq_cioc;
806*7c478bd9Sstevel@tonic-gate 
807*7c478bd9Sstevel@tonic-gate 	/*
808*7c478bd9Sstevel@tonic-gate 	 * Perform the same operations as a leavesq(sq, SQ_OPENCLOSE)
809*7c478bd9Sstevel@tonic-gate 	 * while detecting all cases where the perimeter is entered
810*7c478bd9Sstevel@tonic-gate 	 * so that qwait can return to the caller.
811*7c478bd9Sstevel@tonic-gate 	 *
812*7c478bd9Sstevel@tonic-gate 	 * Drain the syncq if possible. Otherwise reset SQ_EXCL and
813*7c478bd9Sstevel@tonic-gate 	 * wait for a thread to leave the syncq.
814*7c478bd9Sstevel@tonic-gate 	 */
815*7c478bd9Sstevel@tonic-gate 	sq = q->q_syncq;
816*7c478bd9Sstevel@tonic-gate 	ASSERT(sq);
817*7c478bd9Sstevel@tonic-gate 	is_sq_cioc = (sq->sq_type & SQ_CIOC) ? 1 : 0;
818*7c478bd9Sstevel@tonic-gate 	ASSERT(sq->sq_outer == NULL || sq->sq_outer->sq_flags & SQ_WRITER);
819*7c478bd9Sstevel@tonic-gate 	outer = sq->sq_outer;
820*7c478bd9Sstevel@tonic-gate 	/*
821*7c478bd9Sstevel@tonic-gate 	 * XXX this does not work if there is only an outer perimeter.
822*7c478bd9Sstevel@tonic-gate 	 * The semantics of qwait/qwait_sig are undefined in this case.
823*7c478bd9Sstevel@tonic-gate 	 */
824*7c478bd9Sstevel@tonic-gate 	if (outer)
825*7c478bd9Sstevel@tonic-gate 		outer_exit(outer);
826*7c478bd9Sstevel@tonic-gate 
827*7c478bd9Sstevel@tonic-gate 	mutex_enter(SQLOCK(sq));
828*7c478bd9Sstevel@tonic-gate 	if (is_sq_cioc == 0) {
829*7c478bd9Sstevel@tonic-gate 		SQ_PUTLOCKS_ENTER(sq);
830*7c478bd9Sstevel@tonic-gate 	}
831*7c478bd9Sstevel@tonic-gate 	flags = sq->sq_flags;
832*7c478bd9Sstevel@tonic-gate 	/*
833*7c478bd9Sstevel@tonic-gate 	 * Drop SQ_EXCL and sq_count but hold the SQLOCK
834*7c478bd9Sstevel@tonic-gate 	 * to prevent any undetected entry and exit into the perimeter.
835*7c478bd9Sstevel@tonic-gate 	 */
836*7c478bd9Sstevel@tonic-gate 	ASSERT(sq->sq_count > 0);
837*7c478bd9Sstevel@tonic-gate 	sq->sq_count--;
838*7c478bd9Sstevel@tonic-gate 
839*7c478bd9Sstevel@tonic-gate 	if (is_sq_cioc == 0) {
840*7c478bd9Sstevel@tonic-gate 		ASSERT(flags & SQ_EXCL);
841*7c478bd9Sstevel@tonic-gate 		flags &= ~SQ_EXCL;
842*7c478bd9Sstevel@tonic-gate 	}
843*7c478bd9Sstevel@tonic-gate 	/*
844*7c478bd9Sstevel@tonic-gate 	 * Unblock any thread blocked in an entersq or outer_enter.
845*7c478bd9Sstevel@tonic-gate 	 * Note: we do not unblock a thread waiting in qwait/qwait_sig,
846*7c478bd9Sstevel@tonic-gate 	 * since that could lead to livelock with two threads in
847*7c478bd9Sstevel@tonic-gate 	 * qwait for the same (per module) inner perimeter.
848*7c478bd9Sstevel@tonic-gate 	 */
849*7c478bd9Sstevel@tonic-gate 	if (flags & SQ_WANTWAKEUP) {
850*7c478bd9Sstevel@tonic-gate 		cv_broadcast(&sq->sq_wait);
851*7c478bd9Sstevel@tonic-gate 		flags &= ~SQ_WANTWAKEUP;
852*7c478bd9Sstevel@tonic-gate 	}
853*7c478bd9Sstevel@tonic-gate 	sq->sq_flags = flags;
854*7c478bd9Sstevel@tonic-gate 	if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) {
855*7c478bd9Sstevel@tonic-gate 		if (is_sq_cioc == 0) {
856*7c478bd9Sstevel@tonic-gate 			SQ_PUTLOCKS_EXIT(sq);
857*7c478bd9Sstevel@tonic-gate 		}
858*7c478bd9Sstevel@tonic-gate 		/* drain_syncq() drops SQLOCK */
859*7c478bd9Sstevel@tonic-gate 		drain_syncq(sq);
860*7c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
861*7c478bd9Sstevel@tonic-gate 		entersq(sq, SQ_OPENCLOSE);
862*7c478bd9Sstevel@tonic-gate 		return;
863*7c478bd9Sstevel@tonic-gate 	}
864*7c478bd9Sstevel@tonic-gate 	/*
865*7c478bd9Sstevel@tonic-gate 	 * Sleep on sq_exitwait to only be woken up when threads leave the
866*7c478bd9Sstevel@tonic-gate 	 * put or service procedures. We can not sleep on sq_wait since an
867*7c478bd9Sstevel@tonic-gate 	 * outer_exit in a qwait running in the same outer perimeter would
868*7c478bd9Sstevel@tonic-gate 	 * cause a livelock "ping-pong" between two or more qwait'ers.
869*7c478bd9Sstevel@tonic-gate 	 */
870*7c478bd9Sstevel@tonic-gate 	do {
871*7c478bd9Sstevel@tonic-gate 		sq->sq_flags |= SQ_WANTEXWAKEUP;
872*7c478bd9Sstevel@tonic-gate 		if (is_sq_cioc == 0) {
873*7c478bd9Sstevel@tonic-gate 			SQ_PUTLOCKS_EXIT(sq);
874*7c478bd9Sstevel@tonic-gate 		}
875*7c478bd9Sstevel@tonic-gate 		cv_wait(&sq->sq_exitwait, SQLOCK(sq));
876*7c478bd9Sstevel@tonic-gate 		if (is_sq_cioc == 0) {
877*7c478bd9Sstevel@tonic-gate 			SQ_PUTLOCKS_ENTER(sq);
878*7c478bd9Sstevel@tonic-gate 		}
879*7c478bd9Sstevel@tonic-gate 	} while (sq->sq_flags & SQ_WANTEXWAKEUP);
880*7c478bd9Sstevel@tonic-gate 	if (is_sq_cioc == 0) {
881*7c478bd9Sstevel@tonic-gate 		SQ_PUTLOCKS_EXIT(sq);
882*7c478bd9Sstevel@tonic-gate 	}
883*7c478bd9Sstevel@tonic-gate 	mutex_exit(SQLOCK(sq));
884*7c478bd9Sstevel@tonic-gate 
885*7c478bd9Sstevel@tonic-gate 	/*
886*7c478bd9Sstevel@tonic-gate 	 * Re-enter the perimeters again
887*7c478bd9Sstevel@tonic-gate 	 */
888*7c478bd9Sstevel@tonic-gate 	entersq(sq, SQ_OPENCLOSE);
889*7c478bd9Sstevel@tonic-gate }
890*7c478bd9Sstevel@tonic-gate 
891*7c478bd9Sstevel@tonic-gate /*
892*7c478bd9Sstevel@tonic-gate  * Used for the synchronous streams entrypoints when sleeping outside
893*7c478bd9Sstevel@tonic-gate  * the perimeters. Must never be called from regular put entrypoint.
894*7c478bd9Sstevel@tonic-gate  *
895*7c478bd9Sstevel@tonic-gate  * There's no need to grab sq_putlocks here (which only exist for CIPUT sync
896*7c478bd9Sstevel@tonic-gate  * queues). If it is CIPUT sync queue put entry points were not blocked in the
897*7c478bd9Sstevel@tonic-gate  * first place by rwnext/infonext which are treated as put entrypoints for
898*7c478bd9Sstevel@tonic-gate  * permiter syncronization purposes.
899*7c478bd9Sstevel@tonic-gate  *
900*7c478bd9Sstevel@tonic-gate  * Consolidation private.
901*7c478bd9Sstevel@tonic-gate  */
902*7c478bd9Sstevel@tonic-gate boolean_t
903*7c478bd9Sstevel@tonic-gate qwait_rw(queue_t *q)
904*7c478bd9Sstevel@tonic-gate {
905*7c478bd9Sstevel@tonic-gate 	syncq_t		*sq;
906*7c478bd9Sstevel@tonic-gate 	ulong_t		flags;
907*7c478bd9Sstevel@tonic-gate 	boolean_t	gotsignal = B_FALSE;
908*7c478bd9Sstevel@tonic-gate 
909*7c478bd9Sstevel@tonic-gate 	/*
910*7c478bd9Sstevel@tonic-gate 	 * Perform the same operations as a leavesq(sq, SQ_PUT)
911*7c478bd9Sstevel@tonic-gate 	 * while detecting all cases where the perimeter is entered
912*7c478bd9Sstevel@tonic-gate 	 * so that qwait_rw can return to the caller.
913*7c478bd9Sstevel@tonic-gate 	 *
914*7c478bd9Sstevel@tonic-gate 	 * Drain the syncq if possible. Otherwise reset SQ_EXCL and
915*7c478bd9Sstevel@tonic-gate 	 * wait for a thread to leave the syncq.
916*7c478bd9Sstevel@tonic-gate 	 */
917*7c478bd9Sstevel@tonic-gate 	sq = q->q_syncq;
918*7c478bd9Sstevel@tonic-gate 	ASSERT(sq);
919*7c478bd9Sstevel@tonic-gate 
920*7c478bd9Sstevel@tonic-gate 	mutex_enter(SQLOCK(sq));
921*7c478bd9Sstevel@tonic-gate 	flags = sq->sq_flags;
922*7c478bd9Sstevel@tonic-gate 	/*
923*7c478bd9Sstevel@tonic-gate 	 * Drop SQ_EXCL and sq_count but hold the SQLOCK until to prevent any
924*7c478bd9Sstevel@tonic-gate 	 * undetected entry and exit into the perimeter.
925*7c478bd9Sstevel@tonic-gate 	 */
926*7c478bd9Sstevel@tonic-gate 	ASSERT(sq->sq_count > 0);
927*7c478bd9Sstevel@tonic-gate 	sq->sq_count--;
928*7c478bd9Sstevel@tonic-gate 	if (!(sq->sq_type & SQ_CIPUT)) {
929*7c478bd9Sstevel@tonic-gate 		ASSERT(flags & SQ_EXCL);
930*7c478bd9Sstevel@tonic-gate 		flags &= ~SQ_EXCL;
931*7c478bd9Sstevel@tonic-gate 	}
932*7c478bd9Sstevel@tonic-gate 	/*
933*7c478bd9Sstevel@tonic-gate 	 * Unblock any thread blocked in an entersq or outer_enter.
934*7c478bd9Sstevel@tonic-gate 	 * Note: we do not unblock a thread waiting in qwait/qwait_sig,
935*7c478bd9Sstevel@tonic-gate 	 * since that could lead to livelock with two threads in
936*7c478bd9Sstevel@tonic-gate 	 * qwait for the same (per module) inner perimeter.
937*7c478bd9Sstevel@tonic-gate 	 */
938*7c478bd9Sstevel@tonic-gate 	if (flags & SQ_WANTWAKEUP) {
939*7c478bd9Sstevel@tonic-gate 		cv_broadcast(&sq->sq_wait);
940*7c478bd9Sstevel@tonic-gate 		flags &= ~SQ_WANTWAKEUP;
941*7c478bd9Sstevel@tonic-gate 	}
942*7c478bd9Sstevel@tonic-gate 	sq->sq_flags = flags;
943*7c478bd9Sstevel@tonic-gate 	if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) {
944*7c478bd9Sstevel@tonic-gate 		/* drain_syncq() drops SQLOCK */
945*7c478bd9Sstevel@tonic-gate 		drain_syncq(sq);
946*7c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
947*7c478bd9Sstevel@tonic-gate 		entersq(sq, SQ_PUT);
948*7c478bd9Sstevel@tonic-gate 		return (B_FALSE);
949*7c478bd9Sstevel@tonic-gate 	}
950*7c478bd9Sstevel@tonic-gate 	/*
951*7c478bd9Sstevel@tonic-gate 	 * Sleep on sq_exitwait to only be woken up when threads leave the
952*7c478bd9Sstevel@tonic-gate 	 * put or service procedures. We can not sleep on sq_wait since an
953*7c478bd9Sstevel@tonic-gate 	 * outer_exit in a qwait running in the same outer perimeter would
954*7c478bd9Sstevel@tonic-gate 	 * cause a livelock "ping-pong" between two or more qwait'ers.
955*7c478bd9Sstevel@tonic-gate 	 */
956*7c478bd9Sstevel@tonic-gate 	do {
957*7c478bd9Sstevel@tonic-gate 		sq->sq_flags |= SQ_WANTEXWAKEUP;
958*7c478bd9Sstevel@tonic-gate 		if (cv_wait_sig(&sq->sq_exitwait, SQLOCK(sq)) <= 0) {
959*7c478bd9Sstevel@tonic-gate 			sq->sq_flags &= ~SQ_WANTEXWAKEUP;
960*7c478bd9Sstevel@tonic-gate 			gotsignal = B_TRUE;
961*7c478bd9Sstevel@tonic-gate 			break;
962*7c478bd9Sstevel@tonic-gate 		}
963*7c478bd9Sstevel@tonic-gate 	} while (sq->sq_flags & SQ_WANTEXWAKEUP);
964*7c478bd9Sstevel@tonic-gate 	mutex_exit(SQLOCK(sq));
965*7c478bd9Sstevel@tonic-gate 
966*7c478bd9Sstevel@tonic-gate 	/*
967*7c478bd9Sstevel@tonic-gate 	 * Re-enter the perimeters again
968*7c478bd9Sstevel@tonic-gate 	 */
969*7c478bd9Sstevel@tonic-gate 	entersq(sq, SQ_PUT);
970*7c478bd9Sstevel@tonic-gate 	return (gotsignal);
971*7c478bd9Sstevel@tonic-gate }
972*7c478bd9Sstevel@tonic-gate 
973*7c478bd9Sstevel@tonic-gate /*
974*7c478bd9Sstevel@tonic-gate  * Asynchronously upgrade to exclusive access at either the inner or
975*7c478bd9Sstevel@tonic-gate  * outer perimeter.
976*7c478bd9Sstevel@tonic-gate  */
977*7c478bd9Sstevel@tonic-gate void
978*7c478bd9Sstevel@tonic-gate qwriter(queue_t *q, mblk_t *mp, void (*func)(), int perim)
979*7c478bd9Sstevel@tonic-gate {
980*7c478bd9Sstevel@tonic-gate 	if (perim == PERIM_INNER)
981*7c478bd9Sstevel@tonic-gate 		qwriter_inner(q, mp, func);
982*7c478bd9Sstevel@tonic-gate 	else if (perim == PERIM_OUTER)
983*7c478bd9Sstevel@tonic-gate 		qwriter_outer(q, mp, func);
984*7c478bd9Sstevel@tonic-gate 	else
985*7c478bd9Sstevel@tonic-gate 		panic("qwriter: wrong \"perimeter\" parameter");
986*7c478bd9Sstevel@tonic-gate }
987*7c478bd9Sstevel@tonic-gate 
988*7c478bd9Sstevel@tonic-gate /*
989*7c478bd9Sstevel@tonic-gate  * Schedule a synchronous streams timeout
990*7c478bd9Sstevel@tonic-gate  */
991*7c478bd9Sstevel@tonic-gate timeout_id_t
992*7c478bd9Sstevel@tonic-gate qtimeout(queue_t *q, void (*func)(void *), void *arg, clock_t tim)
993*7c478bd9Sstevel@tonic-gate {
994*7c478bd9Sstevel@tonic-gate 	syncq_t		*sq;
995*7c478bd9Sstevel@tonic-gate 	callbparams_t	*cbp;
996*7c478bd9Sstevel@tonic-gate 	timeout_id_t	tid;
997*7c478bd9Sstevel@tonic-gate 
998*7c478bd9Sstevel@tonic-gate 	sq = q->q_syncq;
999*7c478bd9Sstevel@tonic-gate 	/*
1000*7c478bd9Sstevel@tonic-gate 	 * you don't want the timeout firing before its params are set up
1001*7c478bd9Sstevel@tonic-gate 	 * callbparams_alloc() acquires SQLOCK(sq)
1002*7c478bd9Sstevel@tonic-gate 	 * qtimeout() can't fail and can't sleep, so panic if memory is not
1003*7c478bd9Sstevel@tonic-gate 	 * available.
1004*7c478bd9Sstevel@tonic-gate 	 */
1005*7c478bd9Sstevel@tonic-gate 	cbp = callbparams_alloc(sq, func, arg, KM_NOSLEEP | KM_PANIC);
1006*7c478bd9Sstevel@tonic-gate 	/*
1007*7c478bd9Sstevel@tonic-gate 	 * the callbflags in the sq use the same flags. They get anded
1008*7c478bd9Sstevel@tonic-gate 	 * in the callbwrapper to determine if a qun* of this callback type
1009*7c478bd9Sstevel@tonic-gate 	 * is required. This is not a request to cancel.
1010*7c478bd9Sstevel@tonic-gate 	 */
1011*7c478bd9Sstevel@tonic-gate 	cbp->cbp_flags = SQ_CANCEL_TOUT;
1012*7c478bd9Sstevel@tonic-gate 	/* check new timeout version return codes */
1013*7c478bd9Sstevel@tonic-gate 	tid = timeout(qcallbwrapper, cbp, tim);
1014*7c478bd9Sstevel@tonic-gate 	cbp->cbp_id = (callbparams_id_t)tid;
1015*7c478bd9Sstevel@tonic-gate 	mutex_exit(SQLOCK(sq));
1016*7c478bd9Sstevel@tonic-gate 	/* use local id because the cbp memory could be free by now */
1017*7c478bd9Sstevel@tonic-gate 	return (tid);
1018*7c478bd9Sstevel@tonic-gate }
1019*7c478bd9Sstevel@tonic-gate 
1020*7c478bd9Sstevel@tonic-gate bufcall_id_t
1021*7c478bd9Sstevel@tonic-gate qbufcall(queue_t *q, size_t size, uint_t pri, void (*func)(void *), void *arg)
1022*7c478bd9Sstevel@tonic-gate {
1023*7c478bd9Sstevel@tonic-gate 	syncq_t		*sq;
1024*7c478bd9Sstevel@tonic-gate 	callbparams_t	*cbp;
1025*7c478bd9Sstevel@tonic-gate 	bufcall_id_t	bid;
1026*7c478bd9Sstevel@tonic-gate 
1027*7c478bd9Sstevel@tonic-gate 	sq = q->q_syncq;
1028*7c478bd9Sstevel@tonic-gate 	/*
1029*7c478bd9Sstevel@tonic-gate 	 * you don't want the timeout firing before its params are set up
1030*7c478bd9Sstevel@tonic-gate 	 * callbparams_alloc() acquires SQLOCK(sq) if successful.
1031*7c478bd9Sstevel@tonic-gate 	 */
1032*7c478bd9Sstevel@tonic-gate 	cbp = callbparams_alloc(sq, func, arg, KM_NOSLEEP);
1033*7c478bd9Sstevel@tonic-gate 	if (cbp == NULL)
1034*7c478bd9Sstevel@tonic-gate 		return ((bufcall_id_t)0);
1035*7c478bd9Sstevel@tonic-gate 
1036*7c478bd9Sstevel@tonic-gate 	/*
1037*7c478bd9Sstevel@tonic-gate 	 * the callbflags in the sq use the same flags. They get anded
1038*7c478bd9Sstevel@tonic-gate 	 * in the callbwrapper to determine if a qun* of this callback type
1039*7c478bd9Sstevel@tonic-gate 	 * is required. This is not a request to cancel.
1040*7c478bd9Sstevel@tonic-gate 	 */
1041*7c478bd9Sstevel@tonic-gate 	cbp->cbp_flags = SQ_CANCEL_BUFCALL;
1042*7c478bd9Sstevel@tonic-gate 	/* check new timeout version return codes */
1043*7c478bd9Sstevel@tonic-gate 	bid = bufcall(size, pri, qcallbwrapper, cbp);
1044*7c478bd9Sstevel@tonic-gate 	cbp->cbp_id = (callbparams_id_t)bid;
1045*7c478bd9Sstevel@tonic-gate 	if (bid == 0) {
1046*7c478bd9Sstevel@tonic-gate 		callbparams_free(sq, cbp);
1047*7c478bd9Sstevel@tonic-gate 	}
1048*7c478bd9Sstevel@tonic-gate 	mutex_exit(SQLOCK(sq));
1049*7c478bd9Sstevel@tonic-gate 	/* use local id because the params memory could be free by now */
1050*7c478bd9Sstevel@tonic-gate 	return (bid);
1051*7c478bd9Sstevel@tonic-gate }
1052*7c478bd9Sstevel@tonic-gate 
1053*7c478bd9Sstevel@tonic-gate /*
1054*7c478bd9Sstevel@tonic-gate  * cancel a timeout callback which enters the inner perimeter.
1055*7c478bd9Sstevel@tonic-gate  * cancelling of all callback types on a given syncq is serialized.
1056*7c478bd9Sstevel@tonic-gate  * the SQ_CALLB_BYPASSED flag indicates that the callback fn did
1057*7c478bd9Sstevel@tonic-gate  * not execute. The quntimeout return value needs to reflect this.
1058*7c478bd9Sstevel@tonic-gate  * As with out existing callback programming model - callbacks must
1059*7c478bd9Sstevel@tonic-gate  * be cancelled before a close completes - so ensuring that the sq
1060*7c478bd9Sstevel@tonic-gate  * is valid when the callback wrapper is executed.
1061*7c478bd9Sstevel@tonic-gate  */
1062*7c478bd9Sstevel@tonic-gate clock_t
1063*7c478bd9Sstevel@tonic-gate quntimeout(queue_t *q, timeout_id_t id)
1064*7c478bd9Sstevel@tonic-gate {
1065*7c478bd9Sstevel@tonic-gate 	syncq_t *sq = q->q_syncq;
1066*7c478bd9Sstevel@tonic-gate 	clock_t ret;
1067*7c478bd9Sstevel@tonic-gate 
1068*7c478bd9Sstevel@tonic-gate 	mutex_enter(SQLOCK(sq));
1069*7c478bd9Sstevel@tonic-gate 	/* callbacks are processed serially on each syncq */
1070*7c478bd9Sstevel@tonic-gate 	while (sq->sq_callbflags & SQ_CALLB_CANCEL_MASK) {
1071*7c478bd9Sstevel@tonic-gate 		sq->sq_flags |= SQ_WANTWAKEUP;
1072*7c478bd9Sstevel@tonic-gate 		cv_wait(&sq->sq_wait, SQLOCK(sq));
1073*7c478bd9Sstevel@tonic-gate 	}
1074*7c478bd9Sstevel@tonic-gate 	sq->sq_cancelid = (callbparams_id_t)id;
1075*7c478bd9Sstevel@tonic-gate 	sq->sq_callbflags = SQ_CANCEL_TOUT;
1076*7c478bd9Sstevel@tonic-gate 	if (sq->sq_flags & SQ_WANTWAKEUP) {
1077*7c478bd9Sstevel@tonic-gate 		cv_broadcast(&sq->sq_wait);
1078*7c478bd9Sstevel@tonic-gate 		sq->sq_flags &= ~SQ_WANTWAKEUP;
1079*7c478bd9Sstevel@tonic-gate 	}
1080*7c478bd9Sstevel@tonic-gate 	mutex_exit(SQLOCK(sq));
1081*7c478bd9Sstevel@tonic-gate 	ret = untimeout(id);
1082*7c478bd9Sstevel@tonic-gate 	mutex_enter(SQLOCK(sq));
1083*7c478bd9Sstevel@tonic-gate 	if (ret != -1) {
1084*7c478bd9Sstevel@tonic-gate 		/* The wrapper was never called - need to free based on id */
1085*7c478bd9Sstevel@tonic-gate 		callbparams_free_id(sq, (callbparams_id_t)id, SQ_CANCEL_TOUT);
1086*7c478bd9Sstevel@tonic-gate 	}
1087*7c478bd9Sstevel@tonic-gate 	if (sq->sq_callbflags & SQ_CALLB_BYPASSED) {
1088*7c478bd9Sstevel@tonic-gate 		ret = 0;	/* this was how much time left */
1089*7c478bd9Sstevel@tonic-gate 	}
1090*7c478bd9Sstevel@tonic-gate 	sq->sq_callbflags = 0;
1091*7c478bd9Sstevel@tonic-gate 	if (sq->sq_flags & SQ_WANTWAKEUP) {
1092*7c478bd9Sstevel@tonic-gate 		cv_broadcast(&sq->sq_wait);
1093*7c478bd9Sstevel@tonic-gate 		sq->sq_flags &= ~SQ_WANTWAKEUP;
1094*7c478bd9Sstevel@tonic-gate 	}
1095*7c478bd9Sstevel@tonic-gate 	mutex_exit(SQLOCK(sq));
1096*7c478bd9Sstevel@tonic-gate 	return (ret);
1097*7c478bd9Sstevel@tonic-gate }
1098*7c478bd9Sstevel@tonic-gate 
1099*7c478bd9Sstevel@tonic-gate 
1100*7c478bd9Sstevel@tonic-gate void
1101*7c478bd9Sstevel@tonic-gate qunbufcall(queue_t *q, bufcall_id_t id)
1102*7c478bd9Sstevel@tonic-gate {
1103*7c478bd9Sstevel@tonic-gate 	syncq_t *sq = q->q_syncq;
1104*7c478bd9Sstevel@tonic-gate 
1105*7c478bd9Sstevel@tonic-gate 	mutex_enter(SQLOCK(sq));
1106*7c478bd9Sstevel@tonic-gate 	/* callbacks are processed serially on each syncq */
1107*7c478bd9Sstevel@tonic-gate 	while (sq->sq_callbflags & SQ_CALLB_CANCEL_MASK) {
1108*7c478bd9Sstevel@tonic-gate 		sq->sq_flags |= SQ_WANTWAKEUP;
1109*7c478bd9Sstevel@tonic-gate 		cv_wait(&sq->sq_wait, SQLOCK(sq));
1110*7c478bd9Sstevel@tonic-gate 	}
1111*7c478bd9Sstevel@tonic-gate 	sq->sq_cancelid = (callbparams_id_t)id;
1112*7c478bd9Sstevel@tonic-gate 	sq->sq_callbflags = SQ_CANCEL_BUFCALL;
1113*7c478bd9Sstevel@tonic-gate 	if (sq->sq_flags & SQ_WANTWAKEUP) {
1114*7c478bd9Sstevel@tonic-gate 		cv_broadcast(&sq->sq_wait);
1115*7c478bd9Sstevel@tonic-gate 		sq->sq_flags &= ~SQ_WANTWAKEUP;
1116*7c478bd9Sstevel@tonic-gate 	}
1117*7c478bd9Sstevel@tonic-gate 	mutex_exit(SQLOCK(sq));
1118*7c478bd9Sstevel@tonic-gate 	unbufcall(id);
1119*7c478bd9Sstevel@tonic-gate 	mutex_enter(SQLOCK(sq));
1120*7c478bd9Sstevel@tonic-gate 	/*
1121*7c478bd9Sstevel@tonic-gate 	 * No indication from unbufcall if the callback has already run.
1122*7c478bd9Sstevel@tonic-gate 	 * Always attempt to free it.
1123*7c478bd9Sstevel@tonic-gate 	 */
1124*7c478bd9Sstevel@tonic-gate 	callbparams_free_id(sq, (callbparams_id_t)id, SQ_CANCEL_BUFCALL);
1125*7c478bd9Sstevel@tonic-gate 	sq->sq_callbflags = 0;
1126*7c478bd9Sstevel@tonic-gate 	if (sq->sq_flags & SQ_WANTWAKEUP) {
1127*7c478bd9Sstevel@tonic-gate 		cv_broadcast(&sq->sq_wait);
1128*7c478bd9Sstevel@tonic-gate 		sq->sq_flags &= ~SQ_WANTWAKEUP;
1129*7c478bd9Sstevel@tonic-gate 	}
1130*7c478bd9Sstevel@tonic-gate 	mutex_exit(SQLOCK(sq));
1131*7c478bd9Sstevel@tonic-gate }
1132*7c478bd9Sstevel@tonic-gate 
1133*7c478bd9Sstevel@tonic-gate /*
1134*7c478bd9Sstevel@tonic-gate  * Associate stream with an instance of the bottom driver.
1135*7c478bd9Sstevel@tonic-gate  * This interface may be called from STREAM driver's put
1136*7c478bd9Sstevel@tonic-gate  * procedure, so it cannot block.
1137*7c478bd9Sstevel@tonic-gate  */
1138*7c478bd9Sstevel@tonic-gate int
1139*7c478bd9Sstevel@tonic-gate qassociate(queue_t *q, int instance)
1140*7c478bd9Sstevel@tonic-gate {
1141*7c478bd9Sstevel@tonic-gate 	vnode_t *vp;
1142*7c478bd9Sstevel@tonic-gate 	major_t major;
1143*7c478bd9Sstevel@tonic-gate 	dev_info_t *dip;
1144*7c478bd9Sstevel@tonic-gate 
1145*7c478bd9Sstevel@tonic-gate 	if (instance == -1) {
1146*7c478bd9Sstevel@tonic-gate 		ddi_assoc_queue_with_devi(q, NULL);
1147*7c478bd9Sstevel@tonic-gate 		return (0);
1148*7c478bd9Sstevel@tonic-gate 	}
1149*7c478bd9Sstevel@tonic-gate 
1150*7c478bd9Sstevel@tonic-gate 	vp = STREAM(q)->sd_vnode;
1151*7c478bd9Sstevel@tonic-gate 	major = getmajor(vp->v_rdev);
1152*7c478bd9Sstevel@tonic-gate 	dip = ddi_hold_devi_by_instance(major, instance,
1153*7c478bd9Sstevel@tonic-gate 	    E_DDI_HOLD_DEVI_NOATTACH);
1154*7c478bd9Sstevel@tonic-gate 	if (dip == NULL)
1155*7c478bd9Sstevel@tonic-gate 		return (-1);
1156*7c478bd9Sstevel@tonic-gate 
1157*7c478bd9Sstevel@tonic-gate 	ddi_assoc_queue_with_devi(q, dip);
1158*7c478bd9Sstevel@tonic-gate 	ddi_release_devi(dip);
1159*7c478bd9Sstevel@tonic-gate 	return (0);
1160*7c478bd9Sstevel@tonic-gate }
1161*7c478bd9Sstevel@tonic-gate 
1162*7c478bd9Sstevel@tonic-gate /*
1163*7c478bd9Sstevel@tonic-gate  * This routine is the SVR4MP 'replacement' for
1164*7c478bd9Sstevel@tonic-gate  * hat_getkpfnum.  The only major difference is
1165*7c478bd9Sstevel@tonic-gate  * the return value for illegal addresses - since
1166*7c478bd9Sstevel@tonic-gate  * sunm_getkpfnum() and srmmu_getkpfnum() both
1167*7c478bd9Sstevel@tonic-gate  * return '-1' for bogus mappings, we can (more or
1168*7c478bd9Sstevel@tonic-gate  * less) return the value directly.
1169*7c478bd9Sstevel@tonic-gate  */
1170*7c478bd9Sstevel@tonic-gate ppid_t
1171*7c478bd9Sstevel@tonic-gate kvtoppid(caddr_t addr)
1172*7c478bd9Sstevel@tonic-gate {
1173*7c478bd9Sstevel@tonic-gate 	return ((ppid_t)hat_getpfnum(kas.a_hat, addr));
1174*7c478bd9Sstevel@tonic-gate }
1175*7c478bd9Sstevel@tonic-gate 
1176*7c478bd9Sstevel@tonic-gate /*
1177*7c478bd9Sstevel@tonic-gate  * This is used to set the timeout value for cv_timed_wait() or
1178*7c478bd9Sstevel@tonic-gate  * cv_timedwait_sig().
1179*7c478bd9Sstevel@tonic-gate  */
1180*7c478bd9Sstevel@tonic-gate void
1181*7c478bd9Sstevel@tonic-gate time_to_wait(clock_t *now, clock_t time)
1182*7c478bd9Sstevel@tonic-gate {
1183*7c478bd9Sstevel@tonic-gate 	*now = lbolt + time;
1184*7c478bd9Sstevel@tonic-gate }
1185