xref: /titanic_51/usr/src/uts/common/io/winlockio.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate  */
26*7c478bd9Sstevel@tonic-gate 
27*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*7c478bd9Sstevel@tonic-gate 
29*7c478bd9Sstevel@tonic-gate /*
30*7c478bd9Sstevel@tonic-gate  * This is the lock device driver.
31*7c478bd9Sstevel@tonic-gate  *
32*7c478bd9Sstevel@tonic-gate  * The lock driver provides a variation of inter-process mutexes with the
33*7c478bd9Sstevel@tonic-gate  * following twist in semantics:
34*7c478bd9Sstevel@tonic-gate  *	A waiter for a lock after a set timeout can "break" the lock and
35*7c478bd9Sstevel@tonic-gate  *	grab it from the current owner (without informing the owner).
36*7c478bd9Sstevel@tonic-gate  *
37*7c478bd9Sstevel@tonic-gate  * These semantics result in temporarily multiple processes thinking they
38*7c478bd9Sstevel@tonic-gate  * own the lock. This usually does not make sense for cases where locks are
39*7c478bd9Sstevel@tonic-gate  * used to protect a critical region and it is important to serialize access
40*7c478bd9Sstevel@tonic-gate  * to data structures. As breaking the lock will also lose the serialization
41*7c478bd9Sstevel@tonic-gate  * and result in corrupt data structures.
42*7c478bd9Sstevel@tonic-gate  *
43*7c478bd9Sstevel@tonic-gate  * The usage for winlock driver is primarily driven by the graphics system
44*7c478bd9Sstevel@tonic-gate  * when doing DGA (direct graphics access) graphics. The locks are used to
45*7c478bd9Sstevel@tonic-gate  * protect access to the frame buffer (presumably reflects back to the screen)
46*7c478bd9Sstevel@tonic-gate  * between competing processes that directly write to the screen as opposed
47*7c478bd9Sstevel@tonic-gate  * to going through the window server etc.
48*7c478bd9Sstevel@tonic-gate  * In this case, the result of breaking the lock at worst causes the screen
49*7c478bd9Sstevel@tonic-gate  * image to be distorted and is easily fixed by doing a "refresh"
50*7c478bd9Sstevel@tonic-gate  *
51*7c478bd9Sstevel@tonic-gate  * In well-behaved applications, the lock is held for a very short time and
52*7c478bd9Sstevel@tonic-gate  * the breaking semantics do not come into play. Not having this feature and
53*7c478bd9Sstevel@tonic-gate  * using normal inter-process mutexes will result in a misbehaved application
54*7c478bd9Sstevel@tonic-gate  * from grabbing the screen writing capability from the window manager and
55*7c478bd9Sstevel@tonic-gate  * effectively make the system look like it is hung (mouse pointer does not
56*7c478bd9Sstevel@tonic-gate  * move).
57*7c478bd9Sstevel@tonic-gate  *
58*7c478bd9Sstevel@tonic-gate  * A secondary aspect of the winlock driver is that it allows for extremely
59*7c478bd9Sstevel@tonic-gate  * fast lock acquire/release in cases where there is low contention. A memory
60*7c478bd9Sstevel@tonic-gate  * write is all that is needed (not even a function call). And the window
61*7c478bd9Sstevel@tonic-gate  * manager is the only DGA writer usually and this optimized for. Occasionally
62*7c478bd9Sstevel@tonic-gate  * some processes might do DGA graphics and cause kernel faults to handle
63*7c478bd9Sstevel@tonic-gate  * the contention/locking (and that has got to be slow!).
64*7c478bd9Sstevel@tonic-gate  *
65*7c478bd9Sstevel@tonic-gate  * The following IOCTLs are supported:
66*7c478bd9Sstevel@tonic-gate  *
67*7c478bd9Sstevel@tonic-gate  *   GRABPAGEALLOC:
68*7c478bd9Sstevel@tonic-gate  *	Compatibility with old cgsix device driver lockpage ioctls.
69*7c478bd9Sstevel@tonic-gate  *	Lockpages created this way must be an entire page for compatibility with
70*7c478bd9Sstevel@tonic-gate  *	older software.	 This ioctl allocates a lock context with its own
71*7c478bd9Sstevel@tonic-gate  *	private lock page.  The unique "ident" that identifies this lock is
72*7c478bd9Sstevel@tonic-gate  *	returned.
73*7c478bd9Sstevel@tonic-gate  *
74*7c478bd9Sstevel@tonic-gate  *   GRABPAGEFREE:
75*7c478bd9Sstevel@tonic-gate  *	Compatibility with cgsix device driver lockpage ioctls.	 This
76*7c478bd9Sstevel@tonic-gate  *	ioctl releases the lock context allocated by GRABPAGEALLOC.
77*7c478bd9Sstevel@tonic-gate  *
78*7c478bd9Sstevel@tonic-gate  *   GRABLOCKINFO:
79*7c478bd9Sstevel@tonic-gate  *	Returns a one-word flag.  '1' means that multiple clients may
80*7c478bd9Sstevel@tonic-gate  *	access this lock page.	Older device drivers returned '0',
81*7c478bd9Sstevel@tonic-gate  *	meaning that only two clients could access a lock page.
82*7c478bd9Sstevel@tonic-gate  *
83*7c478bd9Sstevel@tonic-gate  *   GRABATTACH:
84*7c478bd9Sstevel@tonic-gate  *	Not supported.	This ioctl would have grabbed all lock pages
85*7c478bd9Sstevel@tonic-gate  *	on behalf of the calling program.
86*7c478bd9Sstevel@tonic-gate  *
87*7c478bd9Sstevel@tonic-gate  *   WINLOCKALLOC:
88*7c478bd9Sstevel@tonic-gate  *	Allocate a lock context.  This ioctl accepts a key value.  as
89*7c478bd9Sstevel@tonic-gate  *	its argument.  If the key is zero, a new lock context is
90*7c478bd9Sstevel@tonic-gate  *	created, and its "ident" is returned.	If the key is nonzero,
91*7c478bd9Sstevel@tonic-gate  *	all existing contexts are checked to see if they match they
92*7c478bd9Sstevel@tonic-gate  *	key.  If a match is found, its reference count is incremented
93*7c478bd9Sstevel@tonic-gate  *	and its ident is returned, otherwise a new context is created
94*7c478bd9Sstevel@tonic-gate  *	and its ident is returned.
95*7c478bd9Sstevel@tonic-gate  *
96*7c478bd9Sstevel@tonic-gate  *   WINLOCKFREE:
97*7c478bd9Sstevel@tonic-gate  *	Free a lock context.  This ioctl accepts the ident of a lock
98*7c478bd9Sstevel@tonic-gate  *	context and decrements its reference count.  Once the reference
99*7c478bd9Sstevel@tonic-gate  *	count reaches zero *and* all mappings are released, the lock
100*7c478bd9Sstevel@tonic-gate  *	context is freed.  When all the lock context in the lock page are
101*7c478bd9Sstevel@tonic-gate  *	freed, the lock page is freed as well.
102*7c478bd9Sstevel@tonic-gate  *
103*7c478bd9Sstevel@tonic-gate  *   WINLOCKSETTIMEOUT:
104*7c478bd9Sstevel@tonic-gate  *	Set lock timeout for a context.	 This ioctl accepts the ident
105*7c478bd9Sstevel@tonic-gate  *	of a lock context and a timeout value in milliseconds.
106*7c478bd9Sstevel@tonic-gate  *	Whenever lock contention occurs, the timer is started and the lock is
107*7c478bd9Sstevel@tonic-gate  *	broken after the timeout expires. If timeout value is zero, lock does
108*7c478bd9Sstevel@tonic-gate  *	not timeout.  This value will be rounded to the nearest clock
109*7c478bd9Sstevel@tonic-gate  *	tick, so don't try to use it for real-time control or something.
110*7c478bd9Sstevel@tonic-gate  *
111*7c478bd9Sstevel@tonic-gate  *   WINLOCKGETTIMEOUT:
112*7c478bd9Sstevel@tonic-gate  *	Get lock timeout from a context.
113*7c478bd9Sstevel@tonic-gate  *
114*7c478bd9Sstevel@tonic-gate  *   WINLOCKDUMP:
115*7c478bd9Sstevel@tonic-gate  *	Dump state of this device.
116*7c478bd9Sstevel@tonic-gate  *
117*7c478bd9Sstevel@tonic-gate  *
118*7c478bd9Sstevel@tonic-gate  * How /dev/winlock works:
119*7c478bd9Sstevel@tonic-gate  *
120*7c478bd9Sstevel@tonic-gate  *   Every lock context consists of two mappings for the client to the lock
121*7c478bd9Sstevel@tonic-gate  *   page.  These mappings are known as the "lock page" and "unlock page"
122*7c478bd9Sstevel@tonic-gate  *   to the client. The first mmap to the lock context (identified by the
123*7c478bd9Sstevel@tonic-gate  *   sy_ident field returns during alloc) allocates mapping to the lock page,
124*7c478bd9Sstevel@tonic-gate  *   the second mmap allocates a mapping to the unlock page.
125*7c478bd9Sstevel@tonic-gate  *	The mappings dont have to be ordered in virtual address space, but do
126*7c478bd9Sstevel@tonic-gate  *   need to be ordered in time. Mapping and unmapping of these lock and unlock
127*7c478bd9Sstevel@tonic-gate  *   pages should happen in pairs. Doing them one at a time or unmapping one
128*7c478bd9Sstevel@tonic-gate  *   and leaving one mapped etc cause undefined behaviors.
129*7c478bd9Sstevel@tonic-gate  *	The mappings are always of length PAGESIZE, and type MAP_SHARED.
130*7c478bd9Sstevel@tonic-gate  *
131*7c478bd9Sstevel@tonic-gate  *   The first ioctl is to ALLOC a lock, either based on a key (if trying to
132*7c478bd9Sstevel@tonic-gate  *	grab a preexisting lock) or 0 (gets a default new one)
133*7c478bd9Sstevel@tonic-gate  *	This ioctl returns a value in sy_ident which is needed to do the
134*7c478bd9Sstevel@tonic-gate  *	later mmaps and FREE/other ioctls.
135*7c478bd9Sstevel@tonic-gate  *
136*7c478bd9Sstevel@tonic-gate  *   The "page number" portion of the sy_ident needs to be passed as the
137*7c478bd9Sstevel@tonic-gate  *	file offset when doing an mmap for both the lock page and unlock page
138*7c478bd9Sstevel@tonic-gate  *
139*7c478bd9Sstevel@tonic-gate  *   The value returned by mmap ( a user virtual address) needs to be
140*7c478bd9Sstevel@tonic-gate  *	incremented by the "page offset" portion of sy_ident to obtain the
141*7c478bd9Sstevel@tonic-gate  *	pointer to the actual lock. (Skipping this step, does not cause any
142*7c478bd9Sstevel@tonic-gate  *	visible error, but the process will be using the wrong lock!)
143*7c478bd9Sstevel@tonic-gate  *
144*7c478bd9Sstevel@tonic-gate  *	On a fork(), the child process will inherit the mappings for free, but
145*7c478bd9Sstevel@tonic-gate  *   will not inherit the parent's lock ownership if any. The child should NOT
146*7c478bd9Sstevel@tonic-gate  *   do an explicit FREE on the lock context unless it did an explicit ALLOC.
147*7c478bd9Sstevel@tonic-gate  *	Only one process at a time is allowed to have a valid hat
148*7c478bd9Sstevel@tonic-gate  *   mapping to a lock page. This is enforced by this driver.
149*7c478bd9Sstevel@tonic-gate  *   A client acquires a lock by writing a '1' to the lock page.
150*7c478bd9Sstevel@tonic-gate  *   Note, that it is not necessary to read and veryify that the lock is '0'
151*7c478bd9Sstevel@tonic-gate  *	prior to writing a '1' in it.
152*7c478bd9Sstevel@tonic-gate  *   If it does not already have a valid mapping to that page, the driver
153*7c478bd9Sstevel@tonic-gate  *   takes a fault (devmap_access), loads the client mapping
154*7c478bd9Sstevel@tonic-gate  *   and allows the client to continue.	 The client releases the lock by
155*7c478bd9Sstevel@tonic-gate  *   writing a '0' to the unlock page.	Again, if it does not have a valid
156*7c478bd9Sstevel@tonic-gate  *   mapping to the unlock page, the segment driver takes a fault,
157*7c478bd9Sstevel@tonic-gate  *   loads the mapping, and lets the client continue.  From this point
158*7c478bd9Sstevel@tonic-gate  *   forward, the client can make as many locks and unlocks as it
159*7c478bd9Sstevel@tonic-gate  *   wants, without any more faults into the kernel.
160*7c478bd9Sstevel@tonic-gate  *
161*7c478bd9Sstevel@tonic-gate  *   If a different process wants to acquire a lock, it takes a page fault
162*7c478bd9Sstevel@tonic-gate  *   when it writes the '1' to the lock page.  If the segment driver sees
163*7c478bd9Sstevel@tonic-gate  *   that the lock page contained a zero, then it invalidates the owner's
164*7c478bd9Sstevel@tonic-gate  *   mappings and gives the mappings to this process.
165*7c478bd9Sstevel@tonic-gate  *
166*7c478bd9Sstevel@tonic-gate  *   If there is already a '1' in the lock page when the second client
167*7c478bd9Sstevel@tonic-gate  *   tries to access the lock page, then a lock exists.	 The segment
168*7c478bd9Sstevel@tonic-gate  *   driver sleeps the second client and, if applicable, starts the
169*7c478bd9Sstevel@tonic-gate  *   timeout on the lock.  The owner's mapping to the unlock page
170*7c478bd9Sstevel@tonic-gate  *   is invalidated so that the driver will be woken again when the owner
171*7c478bd9Sstevel@tonic-gate  *   releases the lock.
172*7c478bd9Sstevel@tonic-gate  *
173*7c478bd9Sstevel@tonic-gate  *   When the locking client finally writes a '0' to the unlock page, the
174*7c478bd9Sstevel@tonic-gate  *   segment driver takes another fault.  The client is given a valid
175*7c478bd9Sstevel@tonic-gate  *   mapping, not to the unlock page, but to the "trash page", and allowed
176*7c478bd9Sstevel@tonic-gate  *   to continue.  Meanwhile, the sleeping client is given a valid mapping
177*7c478bd9Sstevel@tonic-gate  *   to the lock/unlock pages and allowed to continue as well.
178*7c478bd9Sstevel@tonic-gate  *
179*7c478bd9Sstevel@tonic-gate  * RFE: There is a leak if process exits before freeing allocated locks
180*7c478bd9Sstevel@tonic-gate  * But currently not tracking which locks were allocated by which
181*7c478bd9Sstevel@tonic-gate  * process and we do not have a clean entry point into the driver
182*7c478bd9Sstevel@tonic-gate  * to do garbage collection. If the interface used a file descriptor for each
183*7c478bd9Sstevel@tonic-gate  * lock it allocs, then the driver can free up stuff in the _close routine
184*7c478bd9Sstevel@tonic-gate  */
185*7c478bd9Sstevel@tonic-gate 
186*7c478bd9Sstevel@tonic-gate #include <sys/types.h>		/* various type defn's */
187*7c478bd9Sstevel@tonic-gate #include <sys/debug.h>
188*7c478bd9Sstevel@tonic-gate #include <sys/param.h>		/* various kernel limits */
189*7c478bd9Sstevel@tonic-gate #include <sys/time.h>
190*7c478bd9Sstevel@tonic-gate #include <sys/errno.h>
191*7c478bd9Sstevel@tonic-gate #include <sys/kmem.h>		/* defines kmem_alloc() */
192*7c478bd9Sstevel@tonic-gate #include <sys/conf.h>		/* defines cdevsw */
193*7c478bd9Sstevel@tonic-gate #include <sys/file.h>		/* various file modes, etc. */
194*7c478bd9Sstevel@tonic-gate #include <sys/uio.h>		/* UIO stuff */
195*7c478bd9Sstevel@tonic-gate #include <sys/ioctl.h>
196*7c478bd9Sstevel@tonic-gate #include <sys/cred.h>		/* defines cred struct */
197*7c478bd9Sstevel@tonic-gate #include <sys/mman.h>		/* defines mmap(2) parameters */
198*7c478bd9Sstevel@tonic-gate #include <sys/stat.h>		/* defines S_IFCHR */
199*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>	/* use cmn_err */
200*7c478bd9Sstevel@tonic-gate #include <sys/ddi.h>		/* ddi stuff */
201*7c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>		/* ddi stuff */
202*7c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h>	/* ddi stuff */
203*7c478bd9Sstevel@tonic-gate #include <sys/winlockio.h>	/* defines ioctls, flags, data structs */
204*7c478bd9Sstevel@tonic-gate 
205*7c478bd9Sstevel@tonic-gate static int	winlock_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
206*7c478bd9Sstevel@tonic-gate static int	winlock_devmap(dev_t, devmap_cookie_t, offset_t, size_t,
207*7c478bd9Sstevel@tonic-gate 			size_t *, uint_t);
208*7c478bd9Sstevel@tonic-gate static int	winlocksegmap(dev_t, off_t, struct as *, caddr_t *, off_t,
209*7c478bd9Sstevel@tonic-gate 			uint_t, uint_t, uint_t, cred_t *);
210*7c478bd9Sstevel@tonic-gate 
211*7c478bd9Sstevel@tonic-gate static struct cb_ops	winlock_cb_ops = {
212*7c478bd9Sstevel@tonic-gate 	nulldev,		/* open */
213*7c478bd9Sstevel@tonic-gate 	nulldev,		/* close */
214*7c478bd9Sstevel@tonic-gate 	nodev,			/* strategy */
215*7c478bd9Sstevel@tonic-gate 	nodev,			/* print */
216*7c478bd9Sstevel@tonic-gate 	nodev,			/* dump */
217*7c478bd9Sstevel@tonic-gate 	nodev,			/* read */
218*7c478bd9Sstevel@tonic-gate 	nodev,			/* write */
219*7c478bd9Sstevel@tonic-gate 	winlock_ioctl,		/* ioctl */
220*7c478bd9Sstevel@tonic-gate 	winlock_devmap,		/* devmap */
221*7c478bd9Sstevel@tonic-gate 	nodev,			/* mmap */
222*7c478bd9Sstevel@tonic-gate 	winlocksegmap,		/* segmap */
223*7c478bd9Sstevel@tonic-gate 	nochpoll,		/* poll */
224*7c478bd9Sstevel@tonic-gate 	ddi_prop_op,		/* prop_op */
225*7c478bd9Sstevel@tonic-gate 	NULL,			/* streamtab */
226*7c478bd9Sstevel@tonic-gate 	D_NEW|D_MP|D_DEVMAP,	/* Driver compatibility flag */
227*7c478bd9Sstevel@tonic-gate 	0,			/* rev */
228*7c478bd9Sstevel@tonic-gate 	nodev,			/* aread */
229*7c478bd9Sstevel@tonic-gate 	nodev			/* awrite */
230*7c478bd9Sstevel@tonic-gate };
231*7c478bd9Sstevel@tonic-gate 
232*7c478bd9Sstevel@tonic-gate static int winlock_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
233*7c478bd9Sstevel@tonic-gate static int winlock_attach(dev_info_t *, ddi_attach_cmd_t);
234*7c478bd9Sstevel@tonic-gate static int winlock_detach(dev_info_t *, ddi_detach_cmd_t);
235*7c478bd9Sstevel@tonic-gate 
236*7c478bd9Sstevel@tonic-gate static struct dev_ops	winlock_ops = {
237*7c478bd9Sstevel@tonic-gate 	DEVO_REV,
238*7c478bd9Sstevel@tonic-gate 	0,			/* refcount */
239*7c478bd9Sstevel@tonic-gate 	winlock_info,		/* info */
240*7c478bd9Sstevel@tonic-gate 	nulldev,		/* identify */
241*7c478bd9Sstevel@tonic-gate 	nulldev,		/* probe */
242*7c478bd9Sstevel@tonic-gate 	winlock_attach,		/* attach */
243*7c478bd9Sstevel@tonic-gate 	winlock_detach,		/* detach */
244*7c478bd9Sstevel@tonic-gate 	nodev,			/* reset */
245*7c478bd9Sstevel@tonic-gate 	&winlock_cb_ops,	/* driver ops */
246*7c478bd9Sstevel@tonic-gate 	NULL,			/* bus ops */
247*7c478bd9Sstevel@tonic-gate 	NULL			/* power */
248*7c478bd9Sstevel@tonic-gate };
249*7c478bd9Sstevel@tonic-gate 
250*7c478bd9Sstevel@tonic-gate static int winlockmap_map(devmap_cookie_t, dev_t, uint_t, offset_t, size_t,
251*7c478bd9Sstevel@tonic-gate 		void **);
252*7c478bd9Sstevel@tonic-gate static void winlockmap_unmap(devmap_cookie_t, void *, offset_t, size_t,
253*7c478bd9Sstevel@tonic-gate 		devmap_cookie_t, void **, devmap_cookie_t, void **);
254*7c478bd9Sstevel@tonic-gate static int winlockmap_dup(devmap_cookie_t, void *,
255*7c478bd9Sstevel@tonic-gate 		devmap_cookie_t, void **);
256*7c478bd9Sstevel@tonic-gate static int winlockmap_access(devmap_cookie_t, void *, offset_t, size_t,
257*7c478bd9Sstevel@tonic-gate 		uint_t, uint_t);
258*7c478bd9Sstevel@tonic-gate 
259*7c478bd9Sstevel@tonic-gate static
260*7c478bd9Sstevel@tonic-gate struct devmap_callback_ctl winlockmap_ops = {
261*7c478bd9Sstevel@tonic-gate 	DEVMAP_OPS_REV,
262*7c478bd9Sstevel@tonic-gate 	winlockmap_map,
263*7c478bd9Sstevel@tonic-gate 	winlockmap_access,
264*7c478bd9Sstevel@tonic-gate 	winlockmap_dup,
265*7c478bd9Sstevel@tonic-gate 	winlockmap_unmap,
266*7c478bd9Sstevel@tonic-gate };
267*7c478bd9Sstevel@tonic-gate 
268*7c478bd9Sstevel@tonic-gate #if DEBUG
269*7c478bd9Sstevel@tonic-gate static	int	lock_debug = 0;
270*7c478bd9Sstevel@tonic-gate #define	DEBUGF(level, args)	{ if (lock_debug >= (level)) cmn_err args; }
271*7c478bd9Sstevel@tonic-gate #else
272*7c478bd9Sstevel@tonic-gate #define	DEBUGF(level, args)
273*7c478bd9Sstevel@tonic-gate #endif
274*7c478bd9Sstevel@tonic-gate 
275*7c478bd9Sstevel@tonic-gate /* Driver supports two styles of locks */
276*7c478bd9Sstevel@tonic-gate enum winlock_style { NEWSTYLE_LOCK, OLDSTYLE_LOCK };
277*7c478bd9Sstevel@tonic-gate 
278*7c478bd9Sstevel@tonic-gate /*
279*7c478bd9Sstevel@tonic-gate  * These structures describe a lock context.  We permit multiple
280*7c478bd9Sstevel@tonic-gate  * clients (not just two) to access a lock page
281*7c478bd9Sstevel@tonic-gate  *
282*7c478bd9Sstevel@tonic-gate  * The "cookie" identifies the lock context. It is the page number portion
283*7c478bd9Sstevel@tonic-gate  * sy_ident returned on lock allocation. Cookie is used in later ioctls.
284*7c478bd9Sstevel@tonic-gate  * "cookie" is lockid * PAGESIZE
285*7c478bd9Sstevel@tonic-gate  * "lockptr" is the kernel virtual address to the lock itself
286*7c478bd9Sstevel@tonic-gate  * The page offset portion of lockptr is the page offset portion of sy_ident
287*7c478bd9Sstevel@tonic-gate  */
288*7c478bd9Sstevel@tonic-gate 
289*7c478bd9Sstevel@tonic-gate /*
290*7c478bd9Sstevel@tonic-gate  * per-process information about locks.  This is the private field of
291*7c478bd9Sstevel@tonic-gate  * a devmap mapping.  Note that usually *two* mappings point to this.
292*7c478bd9Sstevel@tonic-gate  */
293*7c478bd9Sstevel@tonic-gate 
294*7c478bd9Sstevel@tonic-gate /*
295*7c478bd9Sstevel@tonic-gate  * Each process using winlock is associated with a segproc structure
296*7c478bd9Sstevel@tonic-gate  * In various driver entry points, we need to search to find the right
297*7c478bd9Sstevel@tonic-gate  * segproc structure (If we were using file handles for each lock this
298*7c478bd9Sstevel@tonic-gate  * would not have been necessary).
299*7c478bd9Sstevel@tonic-gate  * It would have been simple to use the process pid (and ddi_get_pid)
300*7c478bd9Sstevel@tonic-gate  * However, during fork devmap_dup is called in the parent process context
301*7c478bd9Sstevel@tonic-gate  * and using the pid complicates the code by introducing orphans.
302*7c478bd9Sstevel@tonic-gate  * Instead we use the as pointer for the process as a cookie
303*7c478bd9Sstevel@tonic-gate  * which requires delving into various non-DDI kosher structs
304*7c478bd9Sstevel@tonic-gate  */
305*7c478bd9Sstevel@tonic-gate typedef struct segproc {
306*7c478bd9Sstevel@tonic-gate 	struct segproc	*next;		/* next client of this lock */
307*7c478bd9Sstevel@tonic-gate 	struct seglock	*lp;		/* associated lock context */
308*7c478bd9Sstevel@tonic-gate 	devmap_cookie_t	lockseg;	/* lock mapping, if any */
309*7c478bd9Sstevel@tonic-gate 	devmap_cookie_t unlockseg;	/* unlock mapping, if any */
310*7c478bd9Sstevel@tonic-gate 	void		*tag;		/* process as pointer as tag */
311*7c478bd9Sstevel@tonic-gate 	uint_t		flag;		/* see "flag bits" in winlockio.h */
312*7c478bd9Sstevel@tonic-gate } SegProc;
313*7c478bd9Sstevel@tonic-gate 
314*7c478bd9Sstevel@tonic-gate #define	ID(sdp)		((sdp)->tag)
315*7c478bd9Sstevel@tonic-gate #define	CURPROC_ID	(void *)(curproc->p_as)
316*7c478bd9Sstevel@tonic-gate 
317*7c478bd9Sstevel@tonic-gate /* per lock context information */
318*7c478bd9Sstevel@tonic-gate 
319*7c478bd9Sstevel@tonic-gate typedef struct seglock {
320*7c478bd9Sstevel@tonic-gate 	struct seglock	*next;		/* next lock */
321*7c478bd9Sstevel@tonic-gate 	uint_t		sleepers;	/* nthreads sleeping on this lock */
322*7c478bd9Sstevel@tonic-gate 	uint_t		alloccount;	/* how many times created? */
323*7c478bd9Sstevel@tonic-gate 	uint_t		cookie;		/* mmap() offset (page #) into device */
324*7c478bd9Sstevel@tonic-gate 	uint_t		key;		/* key, if any */
325*7c478bd9Sstevel@tonic-gate 	enum winlock_style	style;	/* style of lock - OLDSTYLE, NEWSTYLE */
326*7c478bd9Sstevel@tonic-gate 	clock_t		timeout;	/* sleep time in ticks */
327*7c478bd9Sstevel@tonic-gate 	ddi_umem_cookie_t umem_cookie;	/* cookie for umem allocated memory */
328*7c478bd9Sstevel@tonic-gate 	int		*lockptr;	/* kernel virtual addr of lock */
329*7c478bd9Sstevel@tonic-gate 	struct segproc	*clients;	/* list of clients of this lock */
330*7c478bd9Sstevel@tonic-gate 	struct segproc	*owner;		/* current owner of lock */
331*7c478bd9Sstevel@tonic-gate 	kmutex_t	mutex;		/* mutex for lock */
332*7c478bd9Sstevel@tonic-gate 	kcondvar_t	locksleep;	/* for sleeping on lock */
333*7c478bd9Sstevel@tonic-gate } SegLock;
334*7c478bd9Sstevel@tonic-gate 
335*7c478bd9Sstevel@tonic-gate #define	LOCK(lp)	(*((lp)->lockptr))
336*7c478bd9Sstevel@tonic-gate 
337*7c478bd9Sstevel@tonic-gate /*
338*7c478bd9Sstevel@tonic-gate  * Number of locks that can fit in a page. Driver can support only that many.
339*7c478bd9Sstevel@tonic-gate  * For oldsytle locks, it is relatively easy to increase the limit as each
340*7c478bd9Sstevel@tonic-gate  * is in a separate page (MAX_LOCKS mostly serves to prevent runaway allocation
341*7c478bd9Sstevel@tonic-gate  * For newstyle locks, this is trickier as the code needs to allow for mapping
342*7c478bd9Sstevel@tonic-gate  * into the second or third page of the cookie for some locks.
343*7c478bd9Sstevel@tonic-gate  */
344*7c478bd9Sstevel@tonic-gate #define	MAX_LOCKS	(PAGESIZE/sizeof (int))
345*7c478bd9Sstevel@tonic-gate 
346*7c478bd9Sstevel@tonic-gate #define	LOCKTIME	3	/* Default lock timeout in seconds */
347*7c478bd9Sstevel@tonic-gate 
348*7c478bd9Sstevel@tonic-gate 
349*7c478bd9Sstevel@tonic-gate /* Protections setting for winlock user mappings */
350*7c478bd9Sstevel@tonic-gate #define	WINLOCK_PROT	(PROT_READ|PROT_WRITE|PROT_USER)
351*7c478bd9Sstevel@tonic-gate 
352*7c478bd9Sstevel@tonic-gate /*
353*7c478bd9Sstevel@tonic-gate  * The trash page is where unwanted writes go
354*7c478bd9Sstevel@tonic-gate  * when a process is releasing a lock.
355*7c478bd9Sstevel@tonic-gate  */
356*7c478bd9Sstevel@tonic-gate static	ddi_umem_cookie_t trashpage_cookie = NULL;
357*7c478bd9Sstevel@tonic-gate 
358*7c478bd9Sstevel@tonic-gate /* For newstyle allocations a common page of locks is used */
359*7c478bd9Sstevel@tonic-gate static	caddr_t	lockpage = NULL;
360*7c478bd9Sstevel@tonic-gate static	ddi_umem_cookie_t lockpage_cookie = NULL;
361*7c478bd9Sstevel@tonic-gate 
362*7c478bd9Sstevel@tonic-gate static	dev_info_t	*winlock_dip = NULL;
363*7c478bd9Sstevel@tonic-gate static	kmutex_t	winlock_mutex;
364*7c478bd9Sstevel@tonic-gate 
365*7c478bd9Sstevel@tonic-gate /*
366*7c478bd9Sstevel@tonic-gate  * winlock_mutex protects
367*7c478bd9Sstevel@tonic-gate  *	lock_list
368*7c478bd9Sstevel@tonic-gate  *	lock_free_list
369*7c478bd9Sstevel@tonic-gate  *	"next" field in SegLock
370*7c478bd9Sstevel@tonic-gate  *	next_lock
371*7c478bd9Sstevel@tonic-gate  *	trashpage_cookie
372*7c478bd9Sstevel@tonic-gate  *	lockpage & lockpage_cookie
373*7c478bd9Sstevel@tonic-gate  *
374*7c478bd9Sstevel@tonic-gate  * SegLock_mutex protects
375*7c478bd9Sstevel@tonic-gate  *	rest of fields in SegLock
376*7c478bd9Sstevel@tonic-gate  *	All fields in list of SegProc (lp->clients)
377*7c478bd9Sstevel@tonic-gate  *
378*7c478bd9Sstevel@tonic-gate  * Lock ordering is winlock_mutex->SegLock_mutex
379*7c478bd9Sstevel@tonic-gate  * During devmap/seg operations SegLock_mutex acquired without winlock_mutex
380*7c478bd9Sstevel@tonic-gate  *
381*7c478bd9Sstevel@tonic-gate  * During devmap callbacks, the pointer to SegProc is stored as the private
382*7c478bd9Sstevel@tonic-gate  * data in the devmap handle. This pointer will not go stale (i.e., the
383*7c478bd9Sstevel@tonic-gate  * SegProc getting deleted) as the SegProc is not deleted until both the
384*7c478bd9Sstevel@tonic-gate  * lockseg and unlockseg have been unmapped and the pointers stored in
385*7c478bd9Sstevel@tonic-gate  * the devmap handles have been NULL'ed.
386*7c478bd9Sstevel@tonic-gate  * But before this pointer is used to access any fields (other than the 'lp')
387*7c478bd9Sstevel@tonic-gate  * lp->mutex must be held.
388*7c478bd9Sstevel@tonic-gate  */
389*7c478bd9Sstevel@tonic-gate 
390*7c478bd9Sstevel@tonic-gate /*
391*7c478bd9Sstevel@tonic-gate  * The allocation code tries to allocate from lock_free_list
392*7c478bd9Sstevel@tonic-gate  * first, otherwise it uses kmem_zalloc.  When lock list is idle, all
393*7c478bd9Sstevel@tonic-gate  * locks in lock_free_list are kmem_freed
394*7c478bd9Sstevel@tonic-gate  */
395*7c478bd9Sstevel@tonic-gate static	SegLock	*lock_list = NULL;		/* in-use locks */
396*7c478bd9Sstevel@tonic-gate static	SegLock	*lock_free_list = NULL;		/* free locks */
397*7c478bd9Sstevel@tonic-gate static	int	next_lock = 0;			/* next lock cookie */
398*7c478bd9Sstevel@tonic-gate 
399*7c478bd9Sstevel@tonic-gate /* Routines to find a lock in lock_list based on offset or key */
400*7c478bd9Sstevel@tonic-gate static SegLock *seglock_findlock(uint_t);
401*7c478bd9Sstevel@tonic-gate static SegLock *seglock_findkey(uint_t);
402*7c478bd9Sstevel@tonic-gate 
403*7c478bd9Sstevel@tonic-gate /* Routines to find and allocate SegProc structures */
404*7c478bd9Sstevel@tonic-gate static SegProc *seglock_find_specific(SegLock *, void *);
405*7c478bd9Sstevel@tonic-gate static SegProc *seglock_alloc_specific(SegLock *, void *);
406*7c478bd9Sstevel@tonic-gate #define	seglock_findclient(lp)	seglock_find_specific((lp), CURPROC_ID)
407*7c478bd9Sstevel@tonic-gate #define	seglock_allocclient(lp)	seglock_alloc_specific((lp), CURPROC_ID)
408*7c478bd9Sstevel@tonic-gate 
409*7c478bd9Sstevel@tonic-gate /* Delete client from lock's client list */
410*7c478bd9Sstevel@tonic-gate static void seglock_deleteclient(SegLock *, SegProc *);
411*7c478bd9Sstevel@tonic-gate static void garbage_collect_lock(SegLock *, SegProc *);
412*7c478bd9Sstevel@tonic-gate 
413*7c478bd9Sstevel@tonic-gate /* Create a new lock */
414*7c478bd9Sstevel@tonic-gate static SegLock *seglock_createlock(enum winlock_style);
415*7c478bd9Sstevel@tonic-gate /* Destroy lock */
416*7c478bd9Sstevel@tonic-gate static void seglock_destroylock(SegLock *);
417*7c478bd9Sstevel@tonic-gate static void lock_destroyall(void);
418*7c478bd9Sstevel@tonic-gate 
419*7c478bd9Sstevel@tonic-gate /* Helper functions in winlockmap_access */
420*7c478bd9Sstevel@tonic-gate static int give_mapping(SegLock *, SegProc *, uint_t);
421*7c478bd9Sstevel@tonic-gate static int lock_giveup(SegLock *, int);
422*7c478bd9Sstevel@tonic-gate static int seglock_lockfault(devmap_cookie_t, SegProc *, SegLock *, uint_t);
423*7c478bd9Sstevel@tonic-gate 
424*7c478bd9Sstevel@tonic-gate /* routines called from ioctl */
425*7c478bd9Sstevel@tonic-gate static int seglock_graballoc(intptr_t, enum winlock_style, int);
426*7c478bd9Sstevel@tonic-gate static int seglock_grabinfo(intptr_t, int);
427*7c478bd9Sstevel@tonic-gate static int seglock_grabfree(intptr_t, int);
428*7c478bd9Sstevel@tonic-gate static int seglock_gettimeout(intptr_t, int);
429*7c478bd9Sstevel@tonic-gate static int seglock_settimeout(intptr_t, int);
430*7c478bd9Sstevel@tonic-gate static void seglock_dump_all(void);
431*7c478bd9Sstevel@tonic-gate 
432*7c478bd9Sstevel@tonic-gate static	int
433*7c478bd9Sstevel@tonic-gate winlock_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
434*7c478bd9Sstevel@tonic-gate {
435*7c478bd9Sstevel@tonic-gate 	DEBUGF(1, (CE_CONT, "winlock_attach, devi=%p, cmd=%d\n",
436*7c478bd9Sstevel@tonic-gate 		(void *)devi, (int)cmd));
437*7c478bd9Sstevel@tonic-gate 	if (cmd != DDI_ATTACH)
438*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
439*7c478bd9Sstevel@tonic-gate 	if (ddi_create_minor_node(devi, "winlock", S_IFCHR, 0, DDI_PSEUDO, 0)
440*7c478bd9Sstevel@tonic-gate 	    == DDI_FAILURE) {
441*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
442*7c478bd9Sstevel@tonic-gate 	}
443*7c478bd9Sstevel@tonic-gate 	winlock_dip = devi;
444*7c478bd9Sstevel@tonic-gate 	ddi_report_dev(devi);
445*7c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
446*7c478bd9Sstevel@tonic-gate }
447*7c478bd9Sstevel@tonic-gate 
448*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
449*7c478bd9Sstevel@tonic-gate static	int
450*7c478bd9Sstevel@tonic-gate winlock_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
451*7c478bd9Sstevel@tonic-gate {
452*7c478bd9Sstevel@tonic-gate 	DEBUGF(1, (CE_CONT, "winlock_detach, devi=%p, cmd=%d\n",
453*7c478bd9Sstevel@tonic-gate 		(void *)devi, (int)cmd));
454*7c478bd9Sstevel@tonic-gate 	if (cmd != DDI_DETACH)
455*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
456*7c478bd9Sstevel@tonic-gate 
457*7c478bd9Sstevel@tonic-gate 	mutex_enter(&winlock_mutex);
458*7c478bd9Sstevel@tonic-gate 	if (lock_list != NULL) {
459*7c478bd9Sstevel@tonic-gate 		mutex_exit(&winlock_mutex);
460*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
461*7c478bd9Sstevel@tonic-gate 	}
462*7c478bd9Sstevel@tonic-gate 	ASSERT(lock_free_list == NULL);
463*7c478bd9Sstevel@tonic-gate 
464*7c478bd9Sstevel@tonic-gate 	DEBUGF(1, (CE_CONT, "detach freeing trashpage and lockpage\n"));
465*7c478bd9Sstevel@tonic-gate 	/* destroy any common stuff created */
466*7c478bd9Sstevel@tonic-gate 	if (trashpage_cookie != NULL) {
467*7c478bd9Sstevel@tonic-gate 		ddi_umem_free(trashpage_cookie);
468*7c478bd9Sstevel@tonic-gate 		trashpage_cookie = NULL;
469*7c478bd9Sstevel@tonic-gate 	}
470*7c478bd9Sstevel@tonic-gate 	if (lockpage != NULL) {
471*7c478bd9Sstevel@tonic-gate 		ddi_umem_free(lockpage_cookie);
472*7c478bd9Sstevel@tonic-gate 		lockpage = NULL;
473*7c478bd9Sstevel@tonic-gate 		lockpage_cookie = NULL;
474*7c478bd9Sstevel@tonic-gate 	}
475*7c478bd9Sstevel@tonic-gate 	winlock_dip = NULL;
476*7c478bd9Sstevel@tonic-gate 	mutex_exit(&winlock_mutex);
477*7c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
478*7c478bd9Sstevel@tonic-gate }
479*7c478bd9Sstevel@tonic-gate 
480*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
481*7c478bd9Sstevel@tonic-gate static	int
482*7c478bd9Sstevel@tonic-gate winlock_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
483*7c478bd9Sstevel@tonic-gate {
484*7c478bd9Sstevel@tonic-gate 	register int error;
485*7c478bd9Sstevel@tonic-gate 
486*7c478bd9Sstevel@tonic-gate 	/* initialize result */
487*7c478bd9Sstevel@tonic-gate 	*result = NULL;
488*7c478bd9Sstevel@tonic-gate 
489*7c478bd9Sstevel@tonic-gate 	/* only valid instance (i.e., getminor) is 0 */
490*7c478bd9Sstevel@tonic-gate 	if (getminor((dev_t)arg) >= 1)
491*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
492*7c478bd9Sstevel@tonic-gate 
493*7c478bd9Sstevel@tonic-gate 	switch (infocmd) {
494*7c478bd9Sstevel@tonic-gate 	case DDI_INFO_DEVT2DEVINFO:
495*7c478bd9Sstevel@tonic-gate 		if (winlock_dip == NULL)
496*7c478bd9Sstevel@tonic-gate 			error = DDI_FAILURE;
497*7c478bd9Sstevel@tonic-gate 		else {
498*7c478bd9Sstevel@tonic-gate 			*result = (void *)winlock_dip;
499*7c478bd9Sstevel@tonic-gate 			error = DDI_SUCCESS;
500*7c478bd9Sstevel@tonic-gate 		}
501*7c478bd9Sstevel@tonic-gate 		break;
502*7c478bd9Sstevel@tonic-gate 	case DDI_INFO_DEVT2INSTANCE:
503*7c478bd9Sstevel@tonic-gate 		*result = (void *)0;
504*7c478bd9Sstevel@tonic-gate 		error = DDI_SUCCESS;
505*7c478bd9Sstevel@tonic-gate 		break;
506*7c478bd9Sstevel@tonic-gate 	default:
507*7c478bd9Sstevel@tonic-gate 		error = DDI_FAILURE;
508*7c478bd9Sstevel@tonic-gate 	}
509*7c478bd9Sstevel@tonic-gate 	return (error);
510*7c478bd9Sstevel@tonic-gate }
511*7c478bd9Sstevel@tonic-gate 
512*7c478bd9Sstevel@tonic-gate 
513*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
514*7c478bd9Sstevel@tonic-gate int
515*7c478bd9Sstevel@tonic-gate winlock_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
516*7c478bd9Sstevel@tonic-gate 	cred_t *cred, int *rval)
517*7c478bd9Sstevel@tonic-gate {
518*7c478bd9Sstevel@tonic-gate 	DEBUGF(1, (CE_CONT, "winlockioctl: cmd=%d, arg=0x%p\n",
519*7c478bd9Sstevel@tonic-gate 		cmd, (void *)arg));
520*7c478bd9Sstevel@tonic-gate 
521*7c478bd9Sstevel@tonic-gate 	switch (cmd) {
522*7c478bd9Sstevel@tonic-gate 	/*
523*7c478bd9Sstevel@tonic-gate 	 * ioctls that used to be handled by framebuffers (defined in fbio.h)
524*7c478bd9Sstevel@tonic-gate 	 * RFE: No code really calls the GRAB* ioctls now. Should EOL.
525*7c478bd9Sstevel@tonic-gate 	 */
526*7c478bd9Sstevel@tonic-gate 
527*7c478bd9Sstevel@tonic-gate 	case GRABPAGEALLOC:
528*7c478bd9Sstevel@tonic-gate 		return (seglock_graballoc(arg, OLDSTYLE_LOCK, mode));
529*7c478bd9Sstevel@tonic-gate 	case GRABPAGEFREE:
530*7c478bd9Sstevel@tonic-gate 		return (seglock_grabfree(arg, mode));
531*7c478bd9Sstevel@tonic-gate 	case GRABLOCKINFO:
532*7c478bd9Sstevel@tonic-gate 		return (seglock_grabinfo(arg, mode));
533*7c478bd9Sstevel@tonic-gate 	case GRABATTACH:
534*7c478bd9Sstevel@tonic-gate 		return (EINVAL); /* GRABATTACH is not supported (never was) */
535*7c478bd9Sstevel@tonic-gate 
536*7c478bd9Sstevel@tonic-gate 	case WINLOCKALLOC:
537*7c478bd9Sstevel@tonic-gate 		return (seglock_graballoc(arg, NEWSTYLE_LOCK, mode));
538*7c478bd9Sstevel@tonic-gate 	case WINLOCKFREE:
539*7c478bd9Sstevel@tonic-gate 		return (seglock_grabfree(arg, mode));
540*7c478bd9Sstevel@tonic-gate 	case WINLOCKSETTIMEOUT:
541*7c478bd9Sstevel@tonic-gate 		return (seglock_settimeout(arg, mode));
542*7c478bd9Sstevel@tonic-gate 	case WINLOCKGETTIMEOUT:
543*7c478bd9Sstevel@tonic-gate 		return (seglock_gettimeout(arg, mode));
544*7c478bd9Sstevel@tonic-gate 	case WINLOCKDUMP:
545*7c478bd9Sstevel@tonic-gate 		seglock_dump_all();
546*7c478bd9Sstevel@tonic-gate 		return (0);
547*7c478bd9Sstevel@tonic-gate 
548*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
549*7c478bd9Sstevel@tonic-gate 	case (WIOC|255):
550*7c478bd9Sstevel@tonic-gate 		lock_debug = arg;
551*7c478bd9Sstevel@tonic-gate 		return (0);
552*7c478bd9Sstevel@tonic-gate #endif
553*7c478bd9Sstevel@tonic-gate 
554*7c478bd9Sstevel@tonic-gate 	default:
555*7c478bd9Sstevel@tonic-gate 		return (ENOTTY);		/* Why is this not EINVAL */
556*7c478bd9Sstevel@tonic-gate 	}
557*7c478bd9Sstevel@tonic-gate }
558*7c478bd9Sstevel@tonic-gate 
559*7c478bd9Sstevel@tonic-gate int
560*7c478bd9Sstevel@tonic-gate winlocksegmap(
561*7c478bd9Sstevel@tonic-gate 	dev_t	dev,		/* major:minor */
562*7c478bd9Sstevel@tonic-gate 	off_t	off,		/* device offset from mmap(2) */
563*7c478bd9Sstevel@tonic-gate 	struct as *as,		/* user's address space. */
564*7c478bd9Sstevel@tonic-gate 	caddr_t	*addr,		/* address from mmap(2) */
565*7c478bd9Sstevel@tonic-gate 	off_t	len,		/* length from mmap(2) */
566*7c478bd9Sstevel@tonic-gate 	uint_t	prot,		/* user wants this access */
567*7c478bd9Sstevel@tonic-gate 	uint_t	maxprot,	/* this is the maximum the user can have */
568*7c478bd9Sstevel@tonic-gate 	uint_t	flags,		/* flags from mmap(2) */
569*7c478bd9Sstevel@tonic-gate 	cred_t	*cred)
570*7c478bd9Sstevel@tonic-gate {
571*7c478bd9Sstevel@tonic-gate 	DEBUGF(1, (CE_CONT, "winlock_segmap off=%lx, len=0x%lx\n", off, len));
572*7c478bd9Sstevel@tonic-gate 
573*7c478bd9Sstevel@tonic-gate 	/* Only MAP_SHARED mappings are supported */
574*7c478bd9Sstevel@tonic-gate 	if ((flags & MAP_TYPE) == MAP_PRIVATE) {
575*7c478bd9Sstevel@tonic-gate 		return (EINVAL);
576*7c478bd9Sstevel@tonic-gate 	}
577*7c478bd9Sstevel@tonic-gate 
578*7c478bd9Sstevel@tonic-gate 	/* Use devmap_setup to setup the mapping */
579*7c478bd9Sstevel@tonic-gate 	return (devmap_setup(dev, (offset_t)off, as, addr, (size_t)len, prot,
580*7c478bd9Sstevel@tonic-gate 		maxprot, flags, cred));
581*7c478bd9Sstevel@tonic-gate }
582*7c478bd9Sstevel@tonic-gate 
583*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
584*7c478bd9Sstevel@tonic-gate int
585*7c478bd9Sstevel@tonic-gate winlock_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len,
586*7c478bd9Sstevel@tonic-gate     size_t *maplen, uint_t model)
587*7c478bd9Sstevel@tonic-gate {
588*7c478bd9Sstevel@tonic-gate 	SegLock *lp;
589*7c478bd9Sstevel@tonic-gate 	int err;
590*7c478bd9Sstevel@tonic-gate 
591*7c478bd9Sstevel@tonic-gate 	DEBUGF(1, (CE_CONT, "winlock devmap: off=%llx, len=%lx, dhp=%p\n",
592*7c478bd9Sstevel@tonic-gate 		off, len, (void *)dhp));
593*7c478bd9Sstevel@tonic-gate 
594*7c478bd9Sstevel@tonic-gate 	*maplen = 0;
595*7c478bd9Sstevel@tonic-gate 
596*7c478bd9Sstevel@tonic-gate 	/* Check if the lock exists, i.e., has been created by alloc */
597*7c478bd9Sstevel@tonic-gate 	/* off is the sy_ident returned in the alloc ioctl */
598*7c478bd9Sstevel@tonic-gate 	if ((lp = seglock_findlock((uint_t)off)) == NULL) {
599*7c478bd9Sstevel@tonic-gate 		return (ENXIO);
600*7c478bd9Sstevel@tonic-gate 	}
601*7c478bd9Sstevel@tonic-gate 
602*7c478bd9Sstevel@tonic-gate 	/*
603*7c478bd9Sstevel@tonic-gate 	 * The offset bits in mmap(2) offset has to be same as in lockptr
604*7c478bd9Sstevel@tonic-gate 	 * OR the offset should be 0 (i.e. masked off)
605*7c478bd9Sstevel@tonic-gate 	 */
606*7c478bd9Sstevel@tonic-gate 	if (((off & PAGEOFFSET) != 0) &&
607*7c478bd9Sstevel@tonic-gate 	    ((off ^ (uintptr_t)(lp->lockptr)) & (offset_t)PAGEOFFSET) != 0) {
608*7c478bd9Sstevel@tonic-gate 		DEBUGF(2, (CE_CONT,
609*7c478bd9Sstevel@tonic-gate 			"mmap offset %llx mismatch with lockptr %p\n",
610*7c478bd9Sstevel@tonic-gate 			off, (void *)lp->lockptr));
611*7c478bd9Sstevel@tonic-gate 		mutex_exit(&lp->mutex);	/* mutex held by seglock_findlock */
612*7c478bd9Sstevel@tonic-gate 		return (EINVAL);
613*7c478bd9Sstevel@tonic-gate 	}
614*7c478bd9Sstevel@tonic-gate 
615*7c478bd9Sstevel@tonic-gate 	/* Only supports PAGESIZE length mappings */
616*7c478bd9Sstevel@tonic-gate 	if (len != PAGESIZE) {
617*7c478bd9Sstevel@tonic-gate 		mutex_exit(&lp->mutex);	/* mutex held by seglock_findlock */
618*7c478bd9Sstevel@tonic-gate 		return (EINVAL);
619*7c478bd9Sstevel@tonic-gate 	}
620*7c478bd9Sstevel@tonic-gate 
621*7c478bd9Sstevel@tonic-gate 	/*
622*7c478bd9Sstevel@tonic-gate 	 * Set up devmap to point at page associated with lock
623*7c478bd9Sstevel@tonic-gate 	 * RFE: At this point we dont know if this is a lockpage or unlockpage
624*7c478bd9Sstevel@tonic-gate 	 * a lockpage would not need DEVMAP_ALLOW_REMAP setting
625*7c478bd9Sstevel@tonic-gate 	 * We could have kept track of the mapping order here,
626*7c478bd9Sstevel@tonic-gate 	 * but devmap framework does not support storing any state in this
627*7c478bd9Sstevel@tonic-gate 	 * devmap callback as it does not callback for error cleanup if some
628*7c478bd9Sstevel@tonic-gate 	 * other error happens in the framework.
629*7c478bd9Sstevel@tonic-gate 	 * RFE: We should modify the winlock mmap interface so that the
630*7c478bd9Sstevel@tonic-gate 	 * user process marks in the offset passed in whether this is for a
631*7c478bd9Sstevel@tonic-gate 	 * lock or unlock mapping instead of guessing based on order of maps
632*7c478bd9Sstevel@tonic-gate 	 * This would cleanup other things (such as in fork)
633*7c478bd9Sstevel@tonic-gate 	 */
634*7c478bd9Sstevel@tonic-gate 	if ((err = devmap_umem_setup(dhp, winlock_dip, &winlockmap_ops,
635*7c478bd9Sstevel@tonic-gate 	    lp->umem_cookie, 0, PAGESIZE, WINLOCK_PROT,
636*7c478bd9Sstevel@tonic-gate 	    DEVMAP_ALLOW_REMAP, 0)) < 0) {
637*7c478bd9Sstevel@tonic-gate 		mutex_exit(&lp->mutex);	/* held by seglock_findlock */
638*7c478bd9Sstevel@tonic-gate 		return (err);
639*7c478bd9Sstevel@tonic-gate 	}
640*7c478bd9Sstevel@tonic-gate 	/*
641*7c478bd9Sstevel@tonic-gate 	 * No mappings are loaded to those segments yet. The correctness
642*7c478bd9Sstevel@tonic-gate 	 * of the winlock semantics depends on the devmap framework/seg_dev NOT
643*7c478bd9Sstevel@tonic-gate 	 * loading the translations without calling _access callback.
644*7c478bd9Sstevel@tonic-gate 	 */
645*7c478bd9Sstevel@tonic-gate 
646*7c478bd9Sstevel@tonic-gate 	mutex_exit(&lp->mutex);	/* mutex held by seglock_findlock */
647*7c478bd9Sstevel@tonic-gate 	*maplen = PAGESIZE;
648*7c478bd9Sstevel@tonic-gate 	return (0);
649*7c478bd9Sstevel@tonic-gate }
650*7c478bd9Sstevel@tonic-gate 
651*7c478bd9Sstevel@tonic-gate /*
652*7c478bd9Sstevel@tonic-gate  * This routine is called by the devmap framework after the devmap entry point
653*7c478bd9Sstevel@tonic-gate  * above and the mapping is setup in seg_dev.
654*7c478bd9Sstevel@tonic-gate  * We store the pointer to the per-process context in the devmap private data.
655*7c478bd9Sstevel@tonic-gate  */
656*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
657*7c478bd9Sstevel@tonic-gate static int
658*7c478bd9Sstevel@tonic-gate winlockmap_map(devmap_cookie_t dhp, dev_t dev, uint_t flags, offset_t off,
659*7c478bd9Sstevel@tonic-gate 	size_t len, void **pvtp)
660*7c478bd9Sstevel@tonic-gate {
661*7c478bd9Sstevel@tonic-gate 	SegLock *lp = seglock_findlock((uint_t)off); /* returns w/ mutex held */
662*7c478bd9Sstevel@tonic-gate 	SegProc *sdp;
663*7c478bd9Sstevel@tonic-gate 
664*7c478bd9Sstevel@tonic-gate 	ASSERT(len == PAGESIZE);
665*7c478bd9Sstevel@tonic-gate 
666*7c478bd9Sstevel@tonic-gate 	/* Find the per-process context for this lock, alloc one if not found */
667*7c478bd9Sstevel@tonic-gate 	sdp = seglock_allocclient(lp);
668*7c478bd9Sstevel@tonic-gate 
669*7c478bd9Sstevel@tonic-gate 	/*
670*7c478bd9Sstevel@tonic-gate 	 * RFE: Determining which is a lock vs unlock seg is based on order
671*7c478bd9Sstevel@tonic-gate 	 * of mmaps, we should change that to be derivable from off
672*7c478bd9Sstevel@tonic-gate 	 */
673*7c478bd9Sstevel@tonic-gate 	if (sdp->lockseg == NULL) {
674*7c478bd9Sstevel@tonic-gate 		sdp->lockseg = dhp;
675*7c478bd9Sstevel@tonic-gate 	} else if (sdp->unlockseg == NULL) {
676*7c478bd9Sstevel@tonic-gate 		sdp->unlockseg = dhp;
677*7c478bd9Sstevel@tonic-gate 	} else {
678*7c478bd9Sstevel@tonic-gate 		/* attempting to map lock more than twice */
679*7c478bd9Sstevel@tonic-gate 		mutex_exit(&lp->mutex);	/* mutex held by seglock_findlock */
680*7c478bd9Sstevel@tonic-gate 		return (ENOMEM);
681*7c478bd9Sstevel@tonic-gate 	}
682*7c478bd9Sstevel@tonic-gate 
683*7c478bd9Sstevel@tonic-gate 	*pvtp = sdp;
684*7c478bd9Sstevel@tonic-gate 	mutex_exit(&lp->mutex);	/* mutex held by seglock_findlock */
685*7c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
686*7c478bd9Sstevel@tonic-gate }
687*7c478bd9Sstevel@tonic-gate 
688*7c478bd9Sstevel@tonic-gate /*
689*7c478bd9Sstevel@tonic-gate  * duplicate a segment, as in fork()
690*7c478bd9Sstevel@tonic-gate  * On fork, the child inherits the mappings to the lock
691*7c478bd9Sstevel@tonic-gate  *	lp->alloccount is NOT incremented, so child should not do a free().
692*7c478bd9Sstevel@tonic-gate  *	Semantics same as if done an alloc(), map(), map().
693*7c478bd9Sstevel@tonic-gate  *	This way it would work fine if doing an exec() variant later
694*7c478bd9Sstevel@tonic-gate  *	Child does not inherit any UFLAGS set in parent
695*7c478bd9Sstevel@tonic-gate  * The lock and unlock pages are started off unmapped, i.e., child does not
696*7c478bd9Sstevel@tonic-gate  *	own the lock.
697*7c478bd9Sstevel@tonic-gate  * The code assumes that the child process has a valid pid at this point
698*7c478bd9Sstevel@tonic-gate  * RFE: This semantics depends on fork not duplicating the hat mappings
699*7c478bd9Sstevel@tonic-gate  *	(which is the current implementation). To enforce it would need to
700*7c478bd9Sstevel@tonic-gate  *	call devmap_unload from here - not clear if that is allowed.
701*7c478bd9Sstevel@tonic-gate  */
702*7c478bd9Sstevel@tonic-gate 
703*7c478bd9Sstevel@tonic-gate static int
704*7c478bd9Sstevel@tonic-gate winlockmap_dup(devmap_cookie_t dhp, void *oldpvt, devmap_cookie_t new_dhp,
705*7c478bd9Sstevel@tonic-gate 	void **newpvt)
706*7c478bd9Sstevel@tonic-gate {
707*7c478bd9Sstevel@tonic-gate 	SegProc *sdp = (SegProc *)oldpvt;
708*7c478bd9Sstevel@tonic-gate 	SegProc *ndp;
709*7c478bd9Sstevel@tonic-gate 	SegLock *lp = sdp->lp;
710*7c478bd9Sstevel@tonic-gate 
711*7c478bd9Sstevel@tonic-gate 	mutex_enter(&lp->mutex);
712*7c478bd9Sstevel@tonic-gate 	ASSERT((dhp == sdp->lockseg) || (dhp == sdp->unlockseg));
713*7c478bd9Sstevel@tonic-gate 
714*7c478bd9Sstevel@tonic-gate 	/*
715*7c478bd9Sstevel@tonic-gate 	 * Note: At this point, the child process does have a pid, but
716*7c478bd9Sstevel@tonic-gate 	 * the arguments passed to as_dup and hence to devmap_dup dont pass it
717*7c478bd9Sstevel@tonic-gate 	 * down. So we cannot use normal seglock_findclient - which finds the
718*7c478bd9Sstevel@tonic-gate 	 * parent sdp itself!
719*7c478bd9Sstevel@tonic-gate 	 * Instead we allocate the child's SegProc by using the child as pointer
720*7c478bd9Sstevel@tonic-gate 	 * RFE: we are using the as stucture which means peeking into the
721*7c478bd9Sstevel@tonic-gate 	 * devmap_cookie. This is not DDI-compliant. Need a compliant way of
722*7c478bd9Sstevel@tonic-gate 	 * getting at either the as or, better, a way to get the child's new pid
723*7c478bd9Sstevel@tonic-gate 	 */
724*7c478bd9Sstevel@tonic-gate 	ndp = seglock_alloc_specific(lp,
725*7c478bd9Sstevel@tonic-gate 		(void *)((devmap_handle_t *)new_dhp)->dh_seg->s_as);
726*7c478bd9Sstevel@tonic-gate 	ASSERT(ndp != sdp);
727*7c478bd9Sstevel@tonic-gate 
728*7c478bd9Sstevel@tonic-gate 	if (sdp->lockseg == dhp) {
729*7c478bd9Sstevel@tonic-gate 		ASSERT(ndp->lockseg == NULL);
730*7c478bd9Sstevel@tonic-gate 		ndp->lockseg = new_dhp;
731*7c478bd9Sstevel@tonic-gate 	} else {
732*7c478bd9Sstevel@tonic-gate 		ASSERT(sdp->unlockseg == dhp);
733*7c478bd9Sstevel@tonic-gate 		ASSERT(ndp->unlockseg == NULL);
734*7c478bd9Sstevel@tonic-gate 		ndp->unlockseg = new_dhp;
735*7c478bd9Sstevel@tonic-gate 		if (sdp->flag & TRASHPAGE) {
736*7c478bd9Sstevel@tonic-gate 			ndp->flag |= TRASHPAGE;
737*7c478bd9Sstevel@tonic-gate 		}
738*7c478bd9Sstevel@tonic-gate 	}
739*7c478bd9Sstevel@tonic-gate 	mutex_exit(&lp->mutex);
740*7c478bd9Sstevel@tonic-gate 	*newpvt = (void *)ndp;
741*7c478bd9Sstevel@tonic-gate 	return (0);
742*7c478bd9Sstevel@tonic-gate }
743*7c478bd9Sstevel@tonic-gate 
744*7c478bd9Sstevel@tonic-gate 
745*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
746*7c478bd9Sstevel@tonic-gate static void
747*7c478bd9Sstevel@tonic-gate winlockmap_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off, size_t len,
748*7c478bd9Sstevel@tonic-gate 	devmap_cookie_t new_dhp1, void **newpvtp1,
749*7c478bd9Sstevel@tonic-gate 	devmap_cookie_t new_dhp2, void **newpvtp2)
750*7c478bd9Sstevel@tonic-gate {
751*7c478bd9Sstevel@tonic-gate 	SegProc	*sdp = (SegProc *)pvtp;
752*7c478bd9Sstevel@tonic-gate 	SegLock	*lp = sdp->lp;
753*7c478bd9Sstevel@tonic-gate 
754*7c478bd9Sstevel@tonic-gate 	/*
755*7c478bd9Sstevel@tonic-gate 	 * We always create PAGESIZE length mappings, so there should never
756*7c478bd9Sstevel@tonic-gate 	 * be a partial unmapping case
757*7c478bd9Sstevel@tonic-gate 	 */
758*7c478bd9Sstevel@tonic-gate 	ASSERT((new_dhp1 == NULL) && (new_dhp2 == NULL));
759*7c478bd9Sstevel@tonic-gate 
760*7c478bd9Sstevel@tonic-gate 	mutex_enter(&lp->mutex);
761*7c478bd9Sstevel@tonic-gate 	ASSERT((dhp == sdp->lockseg) || (dhp == sdp->unlockseg));
762*7c478bd9Sstevel@tonic-gate 	/* make sure this process doesn't own the lock */
763*7c478bd9Sstevel@tonic-gate 	if (sdp == lp->owner) {
764*7c478bd9Sstevel@tonic-gate 		/*
765*7c478bd9Sstevel@tonic-gate 		 * Not handling errors - i.e., errors in unloading mapping
766*7c478bd9Sstevel@tonic-gate 		 * As part of unmapping hat/seg structure get torn down anyway
767*7c478bd9Sstevel@tonic-gate 		 */
768*7c478bd9Sstevel@tonic-gate 		(void) lock_giveup(lp, 0);
769*7c478bd9Sstevel@tonic-gate 	}
770*7c478bd9Sstevel@tonic-gate 
771*7c478bd9Sstevel@tonic-gate 	ASSERT(sdp != lp->owner);
772*7c478bd9Sstevel@tonic-gate 	if (sdp->lockseg == dhp) {
773*7c478bd9Sstevel@tonic-gate 		sdp->lockseg = NULL;
774*7c478bd9Sstevel@tonic-gate 	} else {
775*7c478bd9Sstevel@tonic-gate 		ASSERT(sdp->unlockseg == dhp);
776*7c478bd9Sstevel@tonic-gate 		sdp->unlockseg = NULL;
777*7c478bd9Sstevel@tonic-gate 		sdp->flag &= ~TRASHPAGE;	/* clear flag if set */
778*7c478bd9Sstevel@tonic-gate 	}
779*7c478bd9Sstevel@tonic-gate 
780*7c478bd9Sstevel@tonic-gate 	garbage_collect_lock(lp, sdp);
781*7c478bd9Sstevel@tonic-gate }
782*7c478bd9Sstevel@tonic-gate 
783*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
784*7c478bd9Sstevel@tonic-gate static int
785*7c478bd9Sstevel@tonic-gate winlockmap_access(devmap_cookie_t dhp, void *pvt, offset_t off, size_t len,
786*7c478bd9Sstevel@tonic-gate 	uint_t type, uint_t rw)
787*7c478bd9Sstevel@tonic-gate {
788*7c478bd9Sstevel@tonic-gate 	SegProc *sdp = (SegProc *)pvt;
789*7c478bd9Sstevel@tonic-gate 	SegLock *lp = sdp->lp;
790*7c478bd9Sstevel@tonic-gate 	int err;
791*7c478bd9Sstevel@tonic-gate 
792*7c478bd9Sstevel@tonic-gate 	/* Driver handles only DEVMAP_ACCESS type of faults */
793*7c478bd9Sstevel@tonic-gate 	if (type != DEVMAP_ACCESS)
794*7c478bd9Sstevel@tonic-gate 		return (-1);
795*7c478bd9Sstevel@tonic-gate 
796*7c478bd9Sstevel@tonic-gate 	mutex_enter(&lp->mutex);
797*7c478bd9Sstevel@tonic-gate 	ASSERT((dhp == sdp->lockseg) || (dhp == sdp->unlockseg));
798*7c478bd9Sstevel@tonic-gate 
799*7c478bd9Sstevel@tonic-gate 	/* should be using a SegProc that corresponds to current process */
800*7c478bd9Sstevel@tonic-gate 	ASSERT(ID(sdp) == CURPROC_ID);
801*7c478bd9Sstevel@tonic-gate 
802*7c478bd9Sstevel@tonic-gate 	/*
803*7c478bd9Sstevel@tonic-gate 	 * If process is faulting but does not have both segments mapped
804*7c478bd9Sstevel@tonic-gate 	 * return error (should cause a segv).
805*7c478bd9Sstevel@tonic-gate 	 * RFE: could give it a permanent trashpage
806*7c478bd9Sstevel@tonic-gate 	 */
807*7c478bd9Sstevel@tonic-gate 	if ((sdp->lockseg == NULL) || (sdp->unlockseg == NULL)) {
808*7c478bd9Sstevel@tonic-gate 		err = -1;
809*7c478bd9Sstevel@tonic-gate 	} else {
810*7c478bd9Sstevel@tonic-gate 		err = seglock_lockfault(dhp, sdp, lp, rw);
811*7c478bd9Sstevel@tonic-gate 	}
812*7c478bd9Sstevel@tonic-gate 	mutex_exit(&lp->mutex);
813*7c478bd9Sstevel@tonic-gate 	return (err);
814*7c478bd9Sstevel@tonic-gate }
815*7c478bd9Sstevel@tonic-gate 
816*7c478bd9Sstevel@tonic-gate 	/* INTERNAL ROUTINES START HERE */
817*7c478bd9Sstevel@tonic-gate 
818*7c478bd9Sstevel@tonic-gate 
819*7c478bd9Sstevel@tonic-gate 
820*7c478bd9Sstevel@tonic-gate /*
821*7c478bd9Sstevel@tonic-gate  * search the lock_list list for the specified cookie
822*7c478bd9Sstevel@tonic-gate  * The cookie is the sy_ident field returns by ALLOC ioctl.
823*7c478bd9Sstevel@tonic-gate  * This has two parts:
824*7c478bd9Sstevel@tonic-gate  * the pageoffset bits contain offset into the lock page.
825*7c478bd9Sstevel@tonic-gate  * the pagenumber bits contain the lock id.
826*7c478bd9Sstevel@tonic-gate  * The user code is supposed to pass in only the pagenumber portion
827*7c478bd9Sstevel@tonic-gate  *	(i.e. mask off the pageoffset bits). However the code below
828*7c478bd9Sstevel@tonic-gate  *	does the mask in case the users are not diligent
829*7c478bd9Sstevel@tonic-gate  * if found, returns with mutex for SegLock structure held
830*7c478bd9Sstevel@tonic-gate  */
831*7c478bd9Sstevel@tonic-gate static SegLock *
832*7c478bd9Sstevel@tonic-gate seglock_findlock(uint_t cookie)
833*7c478bd9Sstevel@tonic-gate {
834*7c478bd9Sstevel@tonic-gate 	SegLock	*lp;
835*7c478bd9Sstevel@tonic-gate 
836*7c478bd9Sstevel@tonic-gate 	cookie &= (uint_t)PAGEMASK;   /* remove pageoffset bits to get cookie */
837*7c478bd9Sstevel@tonic-gate 	mutex_enter(&winlock_mutex);
838*7c478bd9Sstevel@tonic-gate 	for (lp = lock_list; lp != NULL; lp = lp->next) {
839*7c478bd9Sstevel@tonic-gate 		mutex_enter(&lp->mutex);
840*7c478bd9Sstevel@tonic-gate 		if (cookie == lp->cookie) {
841*7c478bd9Sstevel@tonic-gate 			break;	/* return with lp->mutex held */
842*7c478bd9Sstevel@tonic-gate 		}
843*7c478bd9Sstevel@tonic-gate 		mutex_exit(&lp->mutex);
844*7c478bd9Sstevel@tonic-gate 	}
845*7c478bd9Sstevel@tonic-gate 	mutex_exit(&winlock_mutex);
846*7c478bd9Sstevel@tonic-gate 	return (lp);
847*7c478bd9Sstevel@tonic-gate }
848*7c478bd9Sstevel@tonic-gate 
849*7c478bd9Sstevel@tonic-gate /*
850*7c478bd9Sstevel@tonic-gate  * search the lock_list list for the specified non-zero key
851*7c478bd9Sstevel@tonic-gate  * if found, returns with lock for SegLock structure held
852*7c478bd9Sstevel@tonic-gate  */
853*7c478bd9Sstevel@tonic-gate static SegLock *
854*7c478bd9Sstevel@tonic-gate seglock_findkey(uint_t key)
855*7c478bd9Sstevel@tonic-gate {
856*7c478bd9Sstevel@tonic-gate 	SegLock	*lp;
857*7c478bd9Sstevel@tonic-gate 
858*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&winlock_mutex));
859*7c478bd9Sstevel@tonic-gate 	/* The driver allows multiple locks with key 0, dont search */
860*7c478bd9Sstevel@tonic-gate 	if (key == 0)
861*7c478bd9Sstevel@tonic-gate 		return (NULL);
862*7c478bd9Sstevel@tonic-gate 	for (lp = lock_list; lp != NULL; lp = lp->next) {
863*7c478bd9Sstevel@tonic-gate 		mutex_enter(&lp->mutex);
864*7c478bd9Sstevel@tonic-gate 		if (key == lp->key)
865*7c478bd9Sstevel@tonic-gate 			break;
866*7c478bd9Sstevel@tonic-gate 		mutex_exit(&lp->mutex);
867*7c478bd9Sstevel@tonic-gate 	}
868*7c478bd9Sstevel@tonic-gate 	return (lp);
869*7c478bd9Sstevel@tonic-gate }
870*7c478bd9Sstevel@tonic-gate 
871*7c478bd9Sstevel@tonic-gate /*
872*7c478bd9Sstevel@tonic-gate  * Create a new lock context.
873*7c478bd9Sstevel@tonic-gate  * Returns with SegLock mutex held
874*7c478bd9Sstevel@tonic-gate  */
875*7c478bd9Sstevel@tonic-gate 
876*7c478bd9Sstevel@tonic-gate static SegLock *
877*7c478bd9Sstevel@tonic-gate seglock_createlock(enum winlock_style style)
878*7c478bd9Sstevel@tonic-gate {
879*7c478bd9Sstevel@tonic-gate 	SegLock	*lp;
880*7c478bd9Sstevel@tonic-gate 
881*7c478bd9Sstevel@tonic-gate 	DEBUGF(3, (CE_CONT, "seglock_createlock: free_list=%p, next_lock %d\n",
882*7c478bd9Sstevel@tonic-gate 		(void *)lock_free_list, next_lock));
883*7c478bd9Sstevel@tonic-gate 
884*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&winlock_mutex));
885*7c478bd9Sstevel@tonic-gate 	if (lock_free_list != NULL) {
886*7c478bd9Sstevel@tonic-gate 		lp = lock_free_list;
887*7c478bd9Sstevel@tonic-gate 		lock_free_list = lp->next;
888*7c478bd9Sstevel@tonic-gate 	} else if (next_lock >= MAX_LOCKS) {
889*7c478bd9Sstevel@tonic-gate 		return (NULL);
890*7c478bd9Sstevel@tonic-gate 	} else {
891*7c478bd9Sstevel@tonic-gate 		lp = kmem_zalloc(sizeof (SegLock), KM_SLEEP);
892*7c478bd9Sstevel@tonic-gate 		lp->cookie = (next_lock + 1) * (uint_t)PAGESIZE;
893*7c478bd9Sstevel@tonic-gate 		mutex_init(&lp->mutex, NULL, MUTEX_DEFAULT, NULL);
894*7c478bd9Sstevel@tonic-gate 		cv_init(&lp->locksleep, NULL, CV_DEFAULT, NULL);
895*7c478bd9Sstevel@tonic-gate 		++next_lock;
896*7c478bd9Sstevel@tonic-gate 	}
897*7c478bd9Sstevel@tonic-gate 
898*7c478bd9Sstevel@tonic-gate 	mutex_enter(&lp->mutex);
899*7c478bd9Sstevel@tonic-gate 	ASSERT((lp->cookie/PAGESIZE) <= next_lock);
900*7c478bd9Sstevel@tonic-gate 
901*7c478bd9Sstevel@tonic-gate 	if (style == OLDSTYLE_LOCK) {
902*7c478bd9Sstevel@tonic-gate 		lp->lockptr = (int *)ddi_umem_alloc(PAGESIZE,
903*7c478bd9Sstevel@tonic-gate 			DDI_UMEM_SLEEP, &(lp->umem_cookie));
904*7c478bd9Sstevel@tonic-gate 	} else {
905*7c478bd9Sstevel@tonic-gate 		lp->lockptr = ((int *)lockpage) + ((lp->cookie/PAGESIZE) - 1);
906*7c478bd9Sstevel@tonic-gate 		lp->umem_cookie = lockpage_cookie;
907*7c478bd9Sstevel@tonic-gate 	}
908*7c478bd9Sstevel@tonic-gate 
909*7c478bd9Sstevel@tonic-gate 	ASSERT(lp->lockptr != NULL);
910*7c478bd9Sstevel@tonic-gate 	lp->style = style;
911*7c478bd9Sstevel@tonic-gate 	lp->sleepers = 0;
912*7c478bd9Sstevel@tonic-gate 	lp->alloccount = 1;
913*7c478bd9Sstevel@tonic-gate 	lp->timeout = LOCKTIME*hz;
914*7c478bd9Sstevel@tonic-gate 	lp->clients = NULL;
915*7c478bd9Sstevel@tonic-gate 	lp->owner = NULL;
916*7c478bd9Sstevel@tonic-gate 	LOCK(lp) = 0;
917*7c478bd9Sstevel@tonic-gate 	lp->next = lock_list;
918*7c478bd9Sstevel@tonic-gate 	lock_list = lp;
919*7c478bd9Sstevel@tonic-gate 	return (lp);
920*7c478bd9Sstevel@tonic-gate }
921*7c478bd9Sstevel@tonic-gate 
922*7c478bd9Sstevel@tonic-gate /*
923*7c478bd9Sstevel@tonic-gate  * Routine to destory a lock structure.
924*7c478bd9Sstevel@tonic-gate  * This routine is called while holding the lp->mutex but not the
925*7c478bd9Sstevel@tonic-gate  * winlock_mutex.
926*7c478bd9Sstevel@tonic-gate  */
927*7c478bd9Sstevel@tonic-gate 
928*7c478bd9Sstevel@tonic-gate static void
929*7c478bd9Sstevel@tonic-gate seglock_destroylock(SegLock *lp)
930*7c478bd9Sstevel@tonic-gate {
931*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&lp->mutex));
932*7c478bd9Sstevel@tonic-gate 	ASSERT(!MUTEX_HELD(&winlock_mutex));
933*7c478bd9Sstevel@tonic-gate 
934*7c478bd9Sstevel@tonic-gate 	DEBUGF(3, (CE_CONT, "destroying lock cookie %d key %d\n",
935*7c478bd9Sstevel@tonic-gate 		lp->cookie, lp->key));
936*7c478bd9Sstevel@tonic-gate 
937*7c478bd9Sstevel@tonic-gate 	ASSERT(lp->alloccount == 0);
938*7c478bd9Sstevel@tonic-gate 	ASSERT(lp->clients == NULL);
939*7c478bd9Sstevel@tonic-gate 	ASSERT(lp->owner == NULL);
940*7c478bd9Sstevel@tonic-gate 	ASSERT(lp->sleepers == 0);
941*7c478bd9Sstevel@tonic-gate 
942*7c478bd9Sstevel@tonic-gate 	/* clean up/release fields in lp */
943*7c478bd9Sstevel@tonic-gate 	if (lp->style == OLDSTYLE_LOCK) {
944*7c478bd9Sstevel@tonic-gate 		ddi_umem_free(lp->umem_cookie);
945*7c478bd9Sstevel@tonic-gate 	}
946*7c478bd9Sstevel@tonic-gate 	lp->umem_cookie = NULL;
947*7c478bd9Sstevel@tonic-gate 	lp->lockptr = NULL;
948*7c478bd9Sstevel@tonic-gate 	lp->key = 0;
949*7c478bd9Sstevel@tonic-gate 
950*7c478bd9Sstevel@tonic-gate 	/*
951*7c478bd9Sstevel@tonic-gate 	 * Reduce cookie by 1, makes it non page-aligned and invalid
952*7c478bd9Sstevel@tonic-gate 	 * This prevents any valid lookup from finding this lock
953*7c478bd9Sstevel@tonic-gate 	 * so when we drop the lock and regrab it it will still
954*7c478bd9Sstevel@tonic-gate 	 * be there and nobody else would have attached to it
955*7c478bd9Sstevel@tonic-gate 	 */
956*7c478bd9Sstevel@tonic-gate 	lp->cookie--;
957*7c478bd9Sstevel@tonic-gate 
958*7c478bd9Sstevel@tonic-gate 	/* Drop and reacquire mutexes in right order */
959*7c478bd9Sstevel@tonic-gate 	mutex_exit(&lp->mutex);
960*7c478bd9Sstevel@tonic-gate 	mutex_enter(&winlock_mutex);
961*7c478bd9Sstevel@tonic-gate 	mutex_enter(&lp->mutex);
962*7c478bd9Sstevel@tonic-gate 
963*7c478bd9Sstevel@tonic-gate 	/* reincrement the cookie to get the original valid cookie */
964*7c478bd9Sstevel@tonic-gate 	lp->cookie++;
965*7c478bd9Sstevel@tonic-gate 	ASSERT((lp->cookie & PAGEOFFSET) == 0);
966*7c478bd9Sstevel@tonic-gate 	ASSERT(lp->alloccount == 0);
967*7c478bd9Sstevel@tonic-gate 	ASSERT(lp->clients == NULL);
968*7c478bd9Sstevel@tonic-gate 	ASSERT(lp->owner == NULL);
969*7c478bd9Sstevel@tonic-gate 	ASSERT(lp->sleepers == 0);
970*7c478bd9Sstevel@tonic-gate 
971*7c478bd9Sstevel@tonic-gate 	/* Remove lp from lock_list */
972*7c478bd9Sstevel@tonic-gate 	if (lock_list == lp) {
973*7c478bd9Sstevel@tonic-gate 		lock_list = lp->next;
974*7c478bd9Sstevel@tonic-gate 	} else {
975*7c478bd9Sstevel@tonic-gate 		SegLock *tmp = lock_list;
976*7c478bd9Sstevel@tonic-gate 		while (tmp->next != lp) {
977*7c478bd9Sstevel@tonic-gate 			tmp = tmp->next;
978*7c478bd9Sstevel@tonic-gate 			ASSERT(tmp != NULL);
979*7c478bd9Sstevel@tonic-gate 		}
980*7c478bd9Sstevel@tonic-gate 		tmp->next = lp->next;
981*7c478bd9Sstevel@tonic-gate 	}
982*7c478bd9Sstevel@tonic-gate 
983*7c478bd9Sstevel@tonic-gate 	/* Add to lock_free_list */
984*7c478bd9Sstevel@tonic-gate 	lp->next = lock_free_list;
985*7c478bd9Sstevel@tonic-gate 	lock_free_list = lp;
986*7c478bd9Sstevel@tonic-gate 	mutex_exit(&lp->mutex);
987*7c478bd9Sstevel@tonic-gate 
988*7c478bd9Sstevel@tonic-gate 	/* Check if all locks deleted and cleanup */
989*7c478bd9Sstevel@tonic-gate 	if (lock_list == NULL) {
990*7c478bd9Sstevel@tonic-gate 		lock_destroyall();
991*7c478bd9Sstevel@tonic-gate 	}
992*7c478bd9Sstevel@tonic-gate 
993*7c478bd9Sstevel@tonic-gate 	mutex_exit(&winlock_mutex);
994*7c478bd9Sstevel@tonic-gate }
995*7c478bd9Sstevel@tonic-gate 
996*7c478bd9Sstevel@tonic-gate /* Routine to find a SegProc corresponding to the tag */
997*7c478bd9Sstevel@tonic-gate 
998*7c478bd9Sstevel@tonic-gate static SegProc *
999*7c478bd9Sstevel@tonic-gate seglock_find_specific(SegLock *lp, void *tag)
1000*7c478bd9Sstevel@tonic-gate {
1001*7c478bd9Sstevel@tonic-gate 	SegProc *sdp;
1002*7c478bd9Sstevel@tonic-gate 
1003*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&lp->mutex));
1004*7c478bd9Sstevel@tonic-gate 	ASSERT(tag != NULL);
1005*7c478bd9Sstevel@tonic-gate 	for (sdp = lp->clients; sdp != NULL; sdp = sdp->next) {
1006*7c478bd9Sstevel@tonic-gate 		if (ID(sdp) == tag)
1007*7c478bd9Sstevel@tonic-gate 			break;
1008*7c478bd9Sstevel@tonic-gate 	}
1009*7c478bd9Sstevel@tonic-gate 	return (sdp);
1010*7c478bd9Sstevel@tonic-gate }
1011*7c478bd9Sstevel@tonic-gate 
1012*7c478bd9Sstevel@tonic-gate /* Routine to find (and if needed allocate) a SegProc corresponding to tag */
1013*7c478bd9Sstevel@tonic-gate 
1014*7c478bd9Sstevel@tonic-gate static SegProc *
1015*7c478bd9Sstevel@tonic-gate seglock_alloc_specific(SegLock *lp, void *tag)
1016*7c478bd9Sstevel@tonic-gate {
1017*7c478bd9Sstevel@tonic-gate 	SegProc *sdp;
1018*7c478bd9Sstevel@tonic-gate 
1019*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&lp->mutex));
1020*7c478bd9Sstevel@tonic-gate 	ASSERT(tag != NULL);
1021*7c478bd9Sstevel@tonic-gate 
1022*7c478bd9Sstevel@tonic-gate 	/* Search and return if existing one found */
1023*7c478bd9Sstevel@tonic-gate 	sdp = seglock_find_specific(lp, tag);
1024*7c478bd9Sstevel@tonic-gate 	if (sdp != NULL)
1025*7c478bd9Sstevel@tonic-gate 		return (sdp);
1026*7c478bd9Sstevel@tonic-gate 
1027*7c478bd9Sstevel@tonic-gate 	DEBUGF(3, (CE_CONT, "Allocating segproc structure for tag %p lock %d\n",
1028*7c478bd9Sstevel@tonic-gate 		    tag, lp->cookie));
1029*7c478bd9Sstevel@tonic-gate 
1030*7c478bd9Sstevel@tonic-gate 	/* Allocate a new SegProc */
1031*7c478bd9Sstevel@tonic-gate 	sdp = kmem_zalloc(sizeof (SegProc), KM_SLEEP);
1032*7c478bd9Sstevel@tonic-gate 	sdp->next = lp->clients;
1033*7c478bd9Sstevel@tonic-gate 	lp->clients = sdp;
1034*7c478bd9Sstevel@tonic-gate 	sdp->lp = lp;
1035*7c478bd9Sstevel@tonic-gate 	ID(sdp) = tag;
1036*7c478bd9Sstevel@tonic-gate 	return (sdp);
1037*7c478bd9Sstevel@tonic-gate }
1038*7c478bd9Sstevel@tonic-gate 
1039*7c478bd9Sstevel@tonic-gate /*
1040*7c478bd9Sstevel@tonic-gate  * search a context's client list for the given client and delete
1041*7c478bd9Sstevel@tonic-gate  */
1042*7c478bd9Sstevel@tonic-gate 
1043*7c478bd9Sstevel@tonic-gate static void
1044*7c478bd9Sstevel@tonic-gate seglock_deleteclient(SegLock *lp, SegProc *sdp)
1045*7c478bd9Sstevel@tonic-gate {
1046*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&lp->mutex));
1047*7c478bd9Sstevel@tonic-gate 	ASSERT(lp->owner != sdp);	/* Not current owner of lock */
1048*7c478bd9Sstevel@tonic-gate 	ASSERT(sdp->lockseg == NULL);	/* Mappings torn down */
1049*7c478bd9Sstevel@tonic-gate 	ASSERT(sdp->unlockseg == NULL);
1050*7c478bd9Sstevel@tonic-gate 
1051*7c478bd9Sstevel@tonic-gate 	DEBUGF(3, (CE_CONT, "Deleting segproc structure for pid %d lock %d\n",
1052*7c478bd9Sstevel@tonic-gate 		ddi_get_pid(), lp->cookie));
1053*7c478bd9Sstevel@tonic-gate 	if (lp->clients == sdp) {
1054*7c478bd9Sstevel@tonic-gate 		lp->clients = sdp->next;
1055*7c478bd9Sstevel@tonic-gate 	} else {
1056*7c478bd9Sstevel@tonic-gate 		SegProc *tmp = lp->clients;
1057*7c478bd9Sstevel@tonic-gate 		while (tmp->next != sdp) {
1058*7c478bd9Sstevel@tonic-gate 			tmp = tmp->next;
1059*7c478bd9Sstevel@tonic-gate 			ASSERT(tmp != NULL);
1060*7c478bd9Sstevel@tonic-gate 		}
1061*7c478bd9Sstevel@tonic-gate 		tmp->next = sdp->next;
1062*7c478bd9Sstevel@tonic-gate 	}
1063*7c478bd9Sstevel@tonic-gate 	kmem_free(sdp, sizeof (SegProc));
1064*7c478bd9Sstevel@tonic-gate }
1065*7c478bd9Sstevel@tonic-gate 
1066*7c478bd9Sstevel@tonic-gate /*
1067*7c478bd9Sstevel@tonic-gate  * Routine to verify if a SegProc and SegLock
1068*7c478bd9Sstevel@tonic-gate  * structures are empty/idle.
1069*7c478bd9Sstevel@tonic-gate  * Destroys the structures if they are ready
1070*7c478bd9Sstevel@tonic-gate  * Can be called with sdp == NULL if want to verify only the lock state
1071*7c478bd9Sstevel@tonic-gate  * caller should hold the lp->mutex
1072*7c478bd9Sstevel@tonic-gate  * and this routine drops the mutex
1073*7c478bd9Sstevel@tonic-gate  */
1074*7c478bd9Sstevel@tonic-gate static void
1075*7c478bd9Sstevel@tonic-gate garbage_collect_lock(SegLock *lp, SegProc *sdp)
1076*7c478bd9Sstevel@tonic-gate {
1077*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&lp->mutex));
1078*7c478bd9Sstevel@tonic-gate 	/* see if both segments unmapped from client structure */
1079*7c478bd9Sstevel@tonic-gate 	if ((sdp != NULL) && (sdp->lockseg == NULL) && (sdp->unlockseg == NULL))
1080*7c478bd9Sstevel@tonic-gate 		seglock_deleteclient(lp, sdp);
1081*7c478bd9Sstevel@tonic-gate 
1082*7c478bd9Sstevel@tonic-gate 	/* see if this is last client in the entire lock context */
1083*7c478bd9Sstevel@tonic-gate 	if ((lp->clients == NULL) && (lp->alloccount == 0)) {
1084*7c478bd9Sstevel@tonic-gate 		seglock_destroylock(lp);
1085*7c478bd9Sstevel@tonic-gate 	} else {
1086*7c478bd9Sstevel@tonic-gate 		mutex_exit(&lp->mutex);
1087*7c478bd9Sstevel@tonic-gate 	}
1088*7c478bd9Sstevel@tonic-gate }
1089*7c478bd9Sstevel@tonic-gate 
1090*7c478bd9Sstevel@tonic-gate 
1091*7c478bd9Sstevel@tonic-gate /* IOCTLS START HERE */
1092*7c478bd9Sstevel@tonic-gate 
1093*7c478bd9Sstevel@tonic-gate static int
1094*7c478bd9Sstevel@tonic-gate seglock_grabinfo(intptr_t arg, int mode)
1095*7c478bd9Sstevel@tonic-gate {
1096*7c478bd9Sstevel@tonic-gate 	int i = 1;
1097*7c478bd9Sstevel@tonic-gate 
1098*7c478bd9Sstevel@tonic-gate 	/* multiple clients per lock supported - see comments up top */
1099*7c478bd9Sstevel@tonic-gate 	if (ddi_copyout((caddr_t)&i, (caddr_t)arg, sizeof (int), mode) != 0)
1100*7c478bd9Sstevel@tonic-gate 		return (EFAULT);
1101*7c478bd9Sstevel@tonic-gate 	return (0);
1102*7c478bd9Sstevel@tonic-gate }
1103*7c478bd9Sstevel@tonic-gate 
1104*7c478bd9Sstevel@tonic-gate static int
1105*7c478bd9Sstevel@tonic-gate seglock_graballoc(intptr_t arg, enum winlock_style style, int mode) /* IOCTL */
1106*7c478bd9Sstevel@tonic-gate {
1107*7c478bd9Sstevel@tonic-gate 	struct seglock	*lp;
1108*7c478bd9Sstevel@tonic-gate 	uint_t		key;
1109*7c478bd9Sstevel@tonic-gate 	struct		winlockalloc wla;
1110*7c478bd9Sstevel@tonic-gate 	int		err;
1111*7c478bd9Sstevel@tonic-gate 
1112*7c478bd9Sstevel@tonic-gate 	if (style == OLDSTYLE_LOCK) {
1113*7c478bd9Sstevel@tonic-gate 		key = 0;
1114*7c478bd9Sstevel@tonic-gate 	} else {
1115*7c478bd9Sstevel@tonic-gate 		if (ddi_copyin((caddr_t)arg, (caddr_t)&wla, sizeof (wla),
1116*7c478bd9Sstevel@tonic-gate 		    mode)) {
1117*7c478bd9Sstevel@tonic-gate 			return (EFAULT);
1118*7c478bd9Sstevel@tonic-gate 		}
1119*7c478bd9Sstevel@tonic-gate 		key = wla.sy_key;
1120*7c478bd9Sstevel@tonic-gate 	}
1121*7c478bd9Sstevel@tonic-gate 
1122*7c478bd9Sstevel@tonic-gate 	DEBUGF(3, (CE_CONT,
1123*7c478bd9Sstevel@tonic-gate 		"seglock_graballoc: key=%u, style=%d\n", key, style));
1124*7c478bd9Sstevel@tonic-gate 
1125*7c478bd9Sstevel@tonic-gate 	mutex_enter(&winlock_mutex);
1126*7c478bd9Sstevel@tonic-gate 	/* Allocate lockpage on first new style alloc */
1127*7c478bd9Sstevel@tonic-gate 	if ((lockpage == NULL) && (style == NEWSTYLE_LOCK)) {
1128*7c478bd9Sstevel@tonic-gate 		lockpage = ddi_umem_alloc(PAGESIZE, DDI_UMEM_SLEEP,
1129*7c478bd9Sstevel@tonic-gate 				&lockpage_cookie);
1130*7c478bd9Sstevel@tonic-gate 	}
1131*7c478bd9Sstevel@tonic-gate 
1132*7c478bd9Sstevel@tonic-gate 	/* Allocate trashpage on first alloc (any style) */
1133*7c478bd9Sstevel@tonic-gate 	if (trashpage_cookie == NULL) {
1134*7c478bd9Sstevel@tonic-gate 		(void) ddi_umem_alloc(PAGESIZE, DDI_UMEM_TRASH | DDI_UMEM_SLEEP,
1135*7c478bd9Sstevel@tonic-gate 					&trashpage_cookie);
1136*7c478bd9Sstevel@tonic-gate 	}
1137*7c478bd9Sstevel@tonic-gate 
1138*7c478bd9Sstevel@tonic-gate 	if ((lp = seglock_findkey(key)) != NULL) {
1139*7c478bd9Sstevel@tonic-gate 		DEBUGF(2, (CE_CONT, "alloc: found lock key %d cookie %d\n",
1140*7c478bd9Sstevel@tonic-gate 			key, lp->cookie));
1141*7c478bd9Sstevel@tonic-gate 		++lp->alloccount;
1142*7c478bd9Sstevel@tonic-gate 	} else if ((lp = seglock_createlock(style)) != NULL) {
1143*7c478bd9Sstevel@tonic-gate 		DEBUGF(2, (CE_CONT, "alloc: created lock key %d cookie %d\n",
1144*7c478bd9Sstevel@tonic-gate 			key, lp->cookie));
1145*7c478bd9Sstevel@tonic-gate 		lp->key = key;
1146*7c478bd9Sstevel@tonic-gate 	} else {
1147*7c478bd9Sstevel@tonic-gate 		DEBUGF(2, (CE_CONT, "alloc: cannot create lock key %d\n", key));
1148*7c478bd9Sstevel@tonic-gate 		mutex_exit(&winlock_mutex);
1149*7c478bd9Sstevel@tonic-gate 		return (ENOMEM);
1150*7c478bd9Sstevel@tonic-gate 	}
1151*7c478bd9Sstevel@tonic-gate 	ASSERT((lp != NULL) && MUTEX_HELD(&lp->mutex));
1152*7c478bd9Sstevel@tonic-gate 
1153*7c478bd9Sstevel@tonic-gate 	mutex_exit(&winlock_mutex);
1154*7c478bd9Sstevel@tonic-gate 
1155*7c478bd9Sstevel@tonic-gate 	if (style == OLDSTYLE_LOCK) {
1156*7c478bd9Sstevel@tonic-gate 		err = ddi_copyout((caddr_t)&lp->cookie, (caddr_t)arg,
1157*7c478bd9Sstevel@tonic-gate 			sizeof (lp->cookie), mode);
1158*7c478bd9Sstevel@tonic-gate 	} else {
1159*7c478bd9Sstevel@tonic-gate 		wla.sy_ident = lp->cookie +
1160*7c478bd9Sstevel@tonic-gate 		    (uint_t)((uintptr_t)(lp->lockptr) & PAGEOFFSET);
1161*7c478bd9Sstevel@tonic-gate 		err = ddi_copyout((caddr_t)&wla, (caddr_t)arg,
1162*7c478bd9Sstevel@tonic-gate 		    sizeof (wla), mode);
1163*7c478bd9Sstevel@tonic-gate 	}
1164*7c478bd9Sstevel@tonic-gate 
1165*7c478bd9Sstevel@tonic-gate 	if (err) {
1166*7c478bd9Sstevel@tonic-gate 		/* On error, should undo allocation */
1167*7c478bd9Sstevel@tonic-gate 		lp->alloccount--;
1168*7c478bd9Sstevel@tonic-gate 
1169*7c478bd9Sstevel@tonic-gate 		/* Verify and delete if lock is unused now */
1170*7c478bd9Sstevel@tonic-gate 		garbage_collect_lock(lp, NULL);
1171*7c478bd9Sstevel@tonic-gate 		return (EFAULT);
1172*7c478bd9Sstevel@tonic-gate 	}
1173*7c478bd9Sstevel@tonic-gate 
1174*7c478bd9Sstevel@tonic-gate 	mutex_exit(&lp->mutex);
1175*7c478bd9Sstevel@tonic-gate 	return (0);
1176*7c478bd9Sstevel@tonic-gate }
1177*7c478bd9Sstevel@tonic-gate 
1178*7c478bd9Sstevel@tonic-gate static int
1179*7c478bd9Sstevel@tonic-gate seglock_grabfree(intptr_t arg, int mode)	/* IOCTL */
1180*7c478bd9Sstevel@tonic-gate {
1181*7c478bd9Sstevel@tonic-gate 	struct seglock	*lp;
1182*7c478bd9Sstevel@tonic-gate 	uint_t	offset;
1183*7c478bd9Sstevel@tonic-gate 
1184*7c478bd9Sstevel@tonic-gate 	if (ddi_copyin((caddr_t)arg, &offset, sizeof (offset), mode)
1185*7c478bd9Sstevel@tonic-gate 	    != 0) {
1186*7c478bd9Sstevel@tonic-gate 		return (EFAULT);
1187*7c478bd9Sstevel@tonic-gate 	}
1188*7c478bd9Sstevel@tonic-gate 	DEBUGF(2, (CE_CONT, "seglock_grabfree: offset=%u", offset));
1189*7c478bd9Sstevel@tonic-gate 
1190*7c478bd9Sstevel@tonic-gate 	if ((lp = seglock_findlock(offset)) == NULL) {
1191*7c478bd9Sstevel@tonic-gate 		DEBUGF(2, (CE_CONT, "did not find lock\n"));
1192*7c478bd9Sstevel@tonic-gate 		return (EINVAL);
1193*7c478bd9Sstevel@tonic-gate 	}
1194*7c478bd9Sstevel@tonic-gate 	DEBUGF(3, (CE_CONT, " lock key %d, cookie %d, alloccount %d\n",
1195*7c478bd9Sstevel@tonic-gate 		lp->key, lp->cookie, lp->alloccount));
1196*7c478bd9Sstevel@tonic-gate 
1197*7c478bd9Sstevel@tonic-gate 	if (lp->alloccount > 0)
1198*7c478bd9Sstevel@tonic-gate 		lp->alloccount--;
1199*7c478bd9Sstevel@tonic-gate 
1200*7c478bd9Sstevel@tonic-gate 	/* Verify and delete if lock is unused now */
1201*7c478bd9Sstevel@tonic-gate 	garbage_collect_lock(lp, NULL);
1202*7c478bd9Sstevel@tonic-gate 	return (0);
1203*7c478bd9Sstevel@tonic-gate }
1204*7c478bd9Sstevel@tonic-gate 
1205*7c478bd9Sstevel@tonic-gate 
1206*7c478bd9Sstevel@tonic-gate /*
1207*7c478bd9Sstevel@tonic-gate  * Sets timeout in lock and UFLAGS in client
1208*7c478bd9Sstevel@tonic-gate  *	the UFLAGS are stored in the client structure and persistent only
1209*7c478bd9Sstevel@tonic-gate  *	till the unmap of the lock pages. If the process sets UFLAGS
1210*7c478bd9Sstevel@tonic-gate  *	does a map of the lock/unlock pages and unmaps them, the client
1211*7c478bd9Sstevel@tonic-gate  *	structure will get deleted and the UFLAGS will be lost. The process
1212*7c478bd9Sstevel@tonic-gate  *	will need to resetup the flags.
1213*7c478bd9Sstevel@tonic-gate  */
1214*7c478bd9Sstevel@tonic-gate static int
1215*7c478bd9Sstevel@tonic-gate seglock_settimeout(intptr_t arg, int mode)	/* IOCTL */
1216*7c478bd9Sstevel@tonic-gate {
1217*7c478bd9Sstevel@tonic-gate 	SegLock		*lp;
1218*7c478bd9Sstevel@tonic-gate 	SegProc		*sdp;
1219*7c478bd9Sstevel@tonic-gate 	struct winlocktimeout		wlt;
1220*7c478bd9Sstevel@tonic-gate 
1221*7c478bd9Sstevel@tonic-gate 	if (ddi_copyin((caddr_t)arg, &wlt, sizeof (wlt), mode) != 0) {
1222*7c478bd9Sstevel@tonic-gate 		return (EFAULT);
1223*7c478bd9Sstevel@tonic-gate 	}
1224*7c478bd9Sstevel@tonic-gate 
1225*7c478bd9Sstevel@tonic-gate 	if ((lp = seglock_findlock(wlt.sy_ident)) == NULL)
1226*7c478bd9Sstevel@tonic-gate 		return (EINVAL);
1227*7c478bd9Sstevel@tonic-gate 
1228*7c478bd9Sstevel@tonic-gate 	lp->timeout = MSEC_TO_TICK_ROUNDUP(wlt.sy_timeout);
1229*7c478bd9Sstevel@tonic-gate 	/* if timeout modified, wake up any sleepers */
1230*7c478bd9Sstevel@tonic-gate 	if (lp->sleepers > 0) {
1231*7c478bd9Sstevel@tonic-gate 		cv_broadcast(&lp->locksleep);
1232*7c478bd9Sstevel@tonic-gate 	}
1233*7c478bd9Sstevel@tonic-gate 
1234*7c478bd9Sstevel@tonic-gate 	/*
1235*7c478bd9Sstevel@tonic-gate 	 * If the process is trying to set UFLAGS,
1236*7c478bd9Sstevel@tonic-gate 	 *	Find the client segproc and allocate one if needed
1237*7c478bd9Sstevel@tonic-gate 	 *	Set the flags preserving the kernel flags
1238*7c478bd9Sstevel@tonic-gate 	 * If the process is clearing UFLAGS
1239*7c478bd9Sstevel@tonic-gate 	 *	Find the client segproc but dont allocate one if does not exist
1240*7c478bd9Sstevel@tonic-gate 	 */
1241*7c478bd9Sstevel@tonic-gate 	if (wlt.sy_flags & UFLAGS) {
1242*7c478bd9Sstevel@tonic-gate 		sdp = seglock_allocclient(lp);
1243*7c478bd9Sstevel@tonic-gate 		sdp->flag = sdp->flag & KFLAGS | wlt.sy_flags & UFLAGS;
1244*7c478bd9Sstevel@tonic-gate 	} else if ((sdp = seglock_findclient(lp)) != NULL) {
1245*7c478bd9Sstevel@tonic-gate 		sdp->flag = sdp->flag & KFLAGS;
1246*7c478bd9Sstevel@tonic-gate 		/* If clearing UFLAGS leaves the segment or lock idle, delete */
1247*7c478bd9Sstevel@tonic-gate 		garbage_collect_lock(lp, sdp);
1248*7c478bd9Sstevel@tonic-gate 		return (0);
1249*7c478bd9Sstevel@tonic-gate 	}
1250*7c478bd9Sstevel@tonic-gate 	mutex_exit(&lp->mutex);	/* mutex held by seglock_findlock */
1251*7c478bd9Sstevel@tonic-gate 	return (0);
1252*7c478bd9Sstevel@tonic-gate }
1253*7c478bd9Sstevel@tonic-gate 
1254*7c478bd9Sstevel@tonic-gate static int
1255*7c478bd9Sstevel@tonic-gate seglock_gettimeout(intptr_t arg, int mode)
1256*7c478bd9Sstevel@tonic-gate {
1257*7c478bd9Sstevel@tonic-gate 	SegLock		*lp;
1258*7c478bd9Sstevel@tonic-gate 	SegProc		*sdp;
1259*7c478bd9Sstevel@tonic-gate 	struct winlocktimeout		wlt;
1260*7c478bd9Sstevel@tonic-gate 
1261*7c478bd9Sstevel@tonic-gate 	if (ddi_copyin((caddr_t)arg, &wlt, sizeof (wlt), mode) != 0)
1262*7c478bd9Sstevel@tonic-gate 		return (EFAULT);
1263*7c478bd9Sstevel@tonic-gate 
1264*7c478bd9Sstevel@tonic-gate 	if ((lp = seglock_findlock(wlt.sy_ident)) == NULL)
1265*7c478bd9Sstevel@tonic-gate 		return (EINVAL);
1266*7c478bd9Sstevel@tonic-gate 
1267*7c478bd9Sstevel@tonic-gate 	wlt.sy_timeout = TICK_TO_MSEC(lp->timeout);
1268*7c478bd9Sstevel@tonic-gate 	/*
1269*7c478bd9Sstevel@tonic-gate 	 * If this process has an active allocated lock return those flags
1270*7c478bd9Sstevel@tonic-gate 	 *	Dont allocate a client structure on gettimeout
1271*7c478bd9Sstevel@tonic-gate 	 * If not, return 0.
1272*7c478bd9Sstevel@tonic-gate 	 */
1273*7c478bd9Sstevel@tonic-gate 	if ((sdp = seglock_findclient(lp)) != NULL) {
1274*7c478bd9Sstevel@tonic-gate 		wlt.sy_flags = sdp->flag & UFLAGS;
1275*7c478bd9Sstevel@tonic-gate 	} else {
1276*7c478bd9Sstevel@tonic-gate 		wlt.sy_flags = 0;
1277*7c478bd9Sstevel@tonic-gate 	}
1278*7c478bd9Sstevel@tonic-gate 	mutex_exit(&lp->mutex);	/* mutex held by seglock_findlock */
1279*7c478bd9Sstevel@tonic-gate 
1280*7c478bd9Sstevel@tonic-gate 	if (ddi_copyout(&wlt, (caddr_t)arg, sizeof (wlt), mode) != 0)
1281*7c478bd9Sstevel@tonic-gate 		return (EFAULT);
1282*7c478bd9Sstevel@tonic-gate 
1283*7c478bd9Sstevel@tonic-gate 	return (0);
1284*7c478bd9Sstevel@tonic-gate }
1285*7c478bd9Sstevel@tonic-gate 
1286*7c478bd9Sstevel@tonic-gate /*
1287*7c478bd9Sstevel@tonic-gate  * Handle lock segment faults here...
1288*7c478bd9Sstevel@tonic-gate  *
1289*7c478bd9Sstevel@tonic-gate  * This is where the magic happens.
1290*7c478bd9Sstevel@tonic-gate  */
1291*7c478bd9Sstevel@tonic-gate 
1292*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
1293*7c478bd9Sstevel@tonic-gate static	int
1294*7c478bd9Sstevel@tonic-gate seglock_lockfault(devmap_cookie_t dhp, SegProc *sdp, SegLock *lp, uint_t rw)
1295*7c478bd9Sstevel@tonic-gate {
1296*7c478bd9Sstevel@tonic-gate 	SegProc *owner = lp->owner;
1297*7c478bd9Sstevel@tonic-gate 	int err;
1298*7c478bd9Sstevel@tonic-gate 
1299*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&lp->mutex));
1300*7c478bd9Sstevel@tonic-gate 	DEBUGF(3, (CE_CONT,
1301*7c478bd9Sstevel@tonic-gate 		"seglock_lockfault: hdl=%p, sdp=%p, lp=%p owner=%p\n",
1302*7c478bd9Sstevel@tonic-gate 		(void *)dhp, (void *)sdp, (void *)lp, (void *)owner));
1303*7c478bd9Sstevel@tonic-gate 
1304*7c478bd9Sstevel@tonic-gate 	/* lockfault is always called with sdp in current process context */
1305*7c478bd9Sstevel@tonic-gate 	ASSERT(ID(sdp) == CURPROC_ID);
1306*7c478bd9Sstevel@tonic-gate 
1307*7c478bd9Sstevel@tonic-gate 	/* If Lock has no current owner, give the mapping to new owner */
1308*7c478bd9Sstevel@tonic-gate 	if (owner == NULL) {
1309*7c478bd9Sstevel@tonic-gate 		DEBUGF(4, (CE_CONT, " lock has no current owner\n"));
1310*7c478bd9Sstevel@tonic-gate 		return (give_mapping(lp, sdp, rw));
1311*7c478bd9Sstevel@tonic-gate 	}
1312*7c478bd9Sstevel@tonic-gate 
1313*7c478bd9Sstevel@tonic-gate 	if (owner == sdp) {
1314*7c478bd9Sstevel@tonic-gate 		/*
1315*7c478bd9Sstevel@tonic-gate 		 * Current owner is faulting on owned lock segment OR
1316*7c478bd9Sstevel@tonic-gate 		 * Current owner is faulting on unlock page and has no waiters
1317*7c478bd9Sstevel@tonic-gate 		 * Then can give the mapping to current owner
1318*7c478bd9Sstevel@tonic-gate 		 */
1319*7c478bd9Sstevel@tonic-gate 	    if ((sdp->lockseg == dhp) || (lp->sleepers == 0)) {
1320*7c478bd9Sstevel@tonic-gate 		DEBUGF(4, (CE_CONT, "lock owner faulting\n"));
1321*7c478bd9Sstevel@tonic-gate 		return (give_mapping(lp, sdp, rw));
1322*7c478bd9Sstevel@tonic-gate 	    } else {
1323*7c478bd9Sstevel@tonic-gate 		/*
1324*7c478bd9Sstevel@tonic-gate 		 * Owner must be writing to unlock page and there are waiters.
1325*7c478bd9Sstevel@tonic-gate 		 * other cases have been checked earlier.
1326*7c478bd9Sstevel@tonic-gate 		 * Release the lock, owner, and owners mappings
1327*7c478bd9Sstevel@tonic-gate 		 * As the owner is trying to write to the unlock page, leave
1328*7c478bd9Sstevel@tonic-gate 		 * it with a trashpage mapping and wake up the sleepers
1329*7c478bd9Sstevel@tonic-gate 		 */
1330*7c478bd9Sstevel@tonic-gate 		ASSERT((dhp == sdp->unlockseg) && (lp->sleepers != 0));
1331*7c478bd9Sstevel@tonic-gate 		DEBUGF(4, (CE_CONT, " owner fault on unlock seg w/ sleeper\n"));
1332*7c478bd9Sstevel@tonic-gate 		return (lock_giveup(lp, 1));
1333*7c478bd9Sstevel@tonic-gate 	    }
1334*7c478bd9Sstevel@tonic-gate 	}
1335*7c478bd9Sstevel@tonic-gate 
1336*7c478bd9Sstevel@tonic-gate 	ASSERT(owner != sdp);
1337*7c478bd9Sstevel@tonic-gate 
1338*7c478bd9Sstevel@tonic-gate 	/*
1339*7c478bd9Sstevel@tonic-gate 	 * If old owner faulting on trash unlock mapping,
1340*7c478bd9Sstevel@tonic-gate 	 * load hat mappings to trash page
1341*7c478bd9Sstevel@tonic-gate 	 * RFE: non-owners should NOT be faulting on unlock mapping as they
1342*7c478bd9Sstevel@tonic-gate 	 * as first supposed to fault on the lock seg. We could give them
1343*7c478bd9Sstevel@tonic-gate 	 * a trash page or return error.
1344*7c478bd9Sstevel@tonic-gate 	 */
1345*7c478bd9Sstevel@tonic-gate 	if ((sdp->unlockseg == dhp) && (sdp->flag & TRASHPAGE)) {
1346*7c478bd9Sstevel@tonic-gate 		DEBUGF(4, (CE_CONT, " old owner reloads trash mapping\n"));
1347*7c478bd9Sstevel@tonic-gate 		return (devmap_load(sdp->unlockseg, lp->cookie, PAGESIZE,
1348*7c478bd9Sstevel@tonic-gate 			DEVMAP_ACCESS, rw));
1349*7c478bd9Sstevel@tonic-gate 	}
1350*7c478bd9Sstevel@tonic-gate 
1351*7c478bd9Sstevel@tonic-gate 	/*
1352*7c478bd9Sstevel@tonic-gate 	 * Non-owner faulting. Need to check current LOCK state.
1353*7c478bd9Sstevel@tonic-gate 	 *
1354*7c478bd9Sstevel@tonic-gate 	 * Before reading lock value in LOCK(lp), we must make sure that
1355*7c478bd9Sstevel@tonic-gate 	 * the owner cannot change its value before we change mappings
1356*7c478bd9Sstevel@tonic-gate 	 * or else we could end up either with a hung process
1357*7c478bd9Sstevel@tonic-gate 	 * or more than one process thinking they have the lock.
1358*7c478bd9Sstevel@tonic-gate 	 * We do that by unloading the owner's mappings
1359*7c478bd9Sstevel@tonic-gate 	 */
1360*7c478bd9Sstevel@tonic-gate 	DEBUGF(4, (CE_CONT, " owner loses mappings to check lock state\n"));
1361*7c478bd9Sstevel@tonic-gate 	err = devmap_unload(owner->lockseg, lp->cookie, PAGESIZE);
1362*7c478bd9Sstevel@tonic-gate 	err |= devmap_unload(owner->unlockseg, lp->cookie, PAGESIZE);
1363*7c478bd9Sstevel@tonic-gate 	if (err != 0)
1364*7c478bd9Sstevel@tonic-gate 		return (err);	/* unable to remove owner mapping */
1365*7c478bd9Sstevel@tonic-gate 
1366*7c478bd9Sstevel@tonic-gate 	/*
1367*7c478bd9Sstevel@tonic-gate 	 * If lock is not held, then current owner mappings were
1368*7c478bd9Sstevel@tonic-gate 	 * unloaded above and we can give the lock to the new owner
1369*7c478bd9Sstevel@tonic-gate 	 */
1370*7c478bd9Sstevel@tonic-gate 	if (LOCK(lp) == 0) {
1371*7c478bd9Sstevel@tonic-gate 		DEBUGF(4, (CE_CONT,
1372*7c478bd9Sstevel@tonic-gate 			"Free lock (%p): Giving mapping to new owner %d\n",
1373*7c478bd9Sstevel@tonic-gate 			(void *)lp, ddi_get_pid()));
1374*7c478bd9Sstevel@tonic-gate 		return (give_mapping(lp, sdp, rw));
1375*7c478bd9Sstevel@tonic-gate 	}
1376*7c478bd9Sstevel@tonic-gate 
1377*7c478bd9Sstevel@tonic-gate 	DEBUGF(4, (CE_CONT, "  lock held, sleeping\n"));
1378*7c478bd9Sstevel@tonic-gate 
1379*7c478bd9Sstevel@tonic-gate 	/*
1380*7c478bd9Sstevel@tonic-gate 	 * A non-owning process tried to write (presumably to the lockpage,
1381*7c478bd9Sstevel@tonic-gate 	 * but it doesn't matter) but the lock is held; we need to sleep for
1382*7c478bd9Sstevel@tonic-gate 	 * the lock while there is an owner.
1383*7c478bd9Sstevel@tonic-gate 	 */
1384*7c478bd9Sstevel@tonic-gate 
1385*7c478bd9Sstevel@tonic-gate 	lp->sleepers++;
1386*7c478bd9Sstevel@tonic-gate 	while ((owner = lp->owner) != NULL) {
1387*7c478bd9Sstevel@tonic-gate 		int rval;
1388*7c478bd9Sstevel@tonic-gate 
1389*7c478bd9Sstevel@tonic-gate 		if ((lp->timeout == 0) || (owner->flag & SY_NOTIMEOUT)) {
1390*7c478bd9Sstevel@tonic-gate 			/*
1391*7c478bd9Sstevel@tonic-gate 			 * No timeout has been specified for this lock;
1392*7c478bd9Sstevel@tonic-gate 			 * we'll simply sleep on the condition variable.
1393*7c478bd9Sstevel@tonic-gate 			 */
1394*7c478bd9Sstevel@tonic-gate 			rval = cv_wait_sig(&lp->locksleep, &lp->mutex);
1395*7c478bd9Sstevel@tonic-gate 		} else {
1396*7c478bd9Sstevel@tonic-gate 			/*
1397*7c478bd9Sstevel@tonic-gate 			 * A timeout _has_ been specified for this lock. We need
1398*7c478bd9Sstevel@tonic-gate 			 * to wake up and possibly steal this lock if the owner
1399*7c478bd9Sstevel@tonic-gate 			 * does not let it go. Note that all sleepers on a lock
1400*7c478bd9Sstevel@tonic-gate 			 * with a timeout wait; the sleeper with the earliest
1401*7c478bd9Sstevel@tonic-gate 			 * timeout will wakeup, and potentially steal the lock
1402*7c478bd9Sstevel@tonic-gate 			 * Stealing the lock will cause a broadcast on the
1403*7c478bd9Sstevel@tonic-gate 			 * locksleep cv and thus kick the other timed waiters
1404*7c478bd9Sstevel@tonic-gate 			 * and cause everyone to restart in a new timedwait
1405*7c478bd9Sstevel@tonic-gate 			 */
1406*7c478bd9Sstevel@tonic-gate 			rval = cv_timedwait_sig(&lp->locksleep,
1407*7c478bd9Sstevel@tonic-gate 			    &lp->mutex, ddi_get_lbolt() + lp->timeout);
1408*7c478bd9Sstevel@tonic-gate 		}
1409*7c478bd9Sstevel@tonic-gate 
1410*7c478bd9Sstevel@tonic-gate 		/*
1411*7c478bd9Sstevel@tonic-gate 		 * Timeout and still old owner - steal lock
1412*7c478bd9Sstevel@tonic-gate 		 * Force-Release lock and give old owner a trashpage mapping
1413*7c478bd9Sstevel@tonic-gate 		 */
1414*7c478bd9Sstevel@tonic-gate 		if ((rval == -1) && (lp->owner == owner)) {
1415*7c478bd9Sstevel@tonic-gate 			/*
1416*7c478bd9Sstevel@tonic-gate 			 * if any errors in lock_giveup, go back and sleep/retry
1417*7c478bd9Sstevel@tonic-gate 			 * If successful, will break out of loop
1418*7c478bd9Sstevel@tonic-gate 			 */
1419*7c478bd9Sstevel@tonic-gate 			cmn_err(CE_NOTE, "Process %d timed out on lock %d\n",
1420*7c478bd9Sstevel@tonic-gate 				ddi_get_pid(), lp->cookie);
1421*7c478bd9Sstevel@tonic-gate 			(void) lock_giveup(lp, 1);
1422*7c478bd9Sstevel@tonic-gate 		} else if (rval == 0) { /* signal pending */
1423*7c478bd9Sstevel@tonic-gate 			cmn_err(CE_NOTE,
1424*7c478bd9Sstevel@tonic-gate 			    "Process %d signalled while waiting on lock %d\n",
1425*7c478bd9Sstevel@tonic-gate 			    ddi_get_pid(), lp->cookie);
1426*7c478bd9Sstevel@tonic-gate 			lp->sleepers--;
1427*7c478bd9Sstevel@tonic-gate 			return (FC_MAKE_ERR(EINTR));
1428*7c478bd9Sstevel@tonic-gate 		}
1429*7c478bd9Sstevel@tonic-gate 	}
1430*7c478bd9Sstevel@tonic-gate 
1431*7c478bd9Sstevel@tonic-gate 	lp->sleepers--;
1432*7c478bd9Sstevel@tonic-gate 	/*
1433*7c478bd9Sstevel@tonic-gate 	 * Give mapping to this process and save a fault later
1434*7c478bd9Sstevel@tonic-gate 	 */
1435*7c478bd9Sstevel@tonic-gate 	return (give_mapping(lp, sdp, rw));
1436*7c478bd9Sstevel@tonic-gate }
1437*7c478bd9Sstevel@tonic-gate 
1438*7c478bd9Sstevel@tonic-gate /*
1439*7c478bd9Sstevel@tonic-gate  * Utility: give a valid mapping to lock and unlock pages to current process.
1440*7c478bd9Sstevel@tonic-gate  * Caller responsible for unloading old owner's mappings
1441*7c478bd9Sstevel@tonic-gate  */
1442*7c478bd9Sstevel@tonic-gate 
1443*7c478bd9Sstevel@tonic-gate static int
1444*7c478bd9Sstevel@tonic-gate give_mapping(SegLock *lp, SegProc *sdp, uint_t rw)
1445*7c478bd9Sstevel@tonic-gate {
1446*7c478bd9Sstevel@tonic-gate 	int err = 0;
1447*7c478bd9Sstevel@tonic-gate 
1448*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&lp->mutex));
1449*7c478bd9Sstevel@tonic-gate 	ASSERT(!((lp->owner == NULL) && (LOCK(lp) != 0)));
1450*7c478bd9Sstevel@tonic-gate 	/* give_mapping is always called with sdp in current process context */
1451*7c478bd9Sstevel@tonic-gate 	ASSERT(ID(sdp) == CURPROC_ID);
1452*7c478bd9Sstevel@tonic-gate 
1453*7c478bd9Sstevel@tonic-gate 	/* remap any old trash mappings */
1454*7c478bd9Sstevel@tonic-gate 	if (sdp->flag & TRASHPAGE) {
1455*7c478bd9Sstevel@tonic-gate 		/* current owner should not have a trash mapping */
1456*7c478bd9Sstevel@tonic-gate 		ASSERT(sdp != lp->owner);
1457*7c478bd9Sstevel@tonic-gate 
1458*7c478bd9Sstevel@tonic-gate 		DEBUGF(4, (CE_CONT,
1459*7c478bd9Sstevel@tonic-gate 		    "new owner %d remapping old trash mapping\n",
1460*7c478bd9Sstevel@tonic-gate 		    ddi_get_pid()));
1461*7c478bd9Sstevel@tonic-gate 		if ((err = devmap_umem_remap(sdp->unlockseg, winlock_dip,
1462*7c478bd9Sstevel@tonic-gate 		    lp->umem_cookie, 0, PAGESIZE, WINLOCK_PROT, 0, 0)) != 0) {
1463*7c478bd9Sstevel@tonic-gate 			/*
1464*7c478bd9Sstevel@tonic-gate 			 * unable to remap old trash page,
1465*7c478bd9Sstevel@tonic-gate 			 * abort before changing owner
1466*7c478bd9Sstevel@tonic-gate 			 */
1467*7c478bd9Sstevel@tonic-gate 			DEBUGF(4, (CE_CONT,
1468*7c478bd9Sstevel@tonic-gate 			    "aborting: error in umem_remap %d\n", err));
1469*7c478bd9Sstevel@tonic-gate 			return (err);
1470*7c478bd9Sstevel@tonic-gate 		}
1471*7c478bd9Sstevel@tonic-gate 		sdp->flag &= ~TRASHPAGE;
1472*7c478bd9Sstevel@tonic-gate 	}
1473*7c478bd9Sstevel@tonic-gate 
1474*7c478bd9Sstevel@tonic-gate 	/* we have a new owner now */
1475*7c478bd9Sstevel@tonic-gate 	lp->owner = sdp;
1476*7c478bd9Sstevel@tonic-gate 
1477*7c478bd9Sstevel@tonic-gate 	if ((err = devmap_load(sdp->lockseg, lp->cookie, PAGESIZE,
1478*7c478bd9Sstevel@tonic-gate 	    DEVMAP_ACCESS, rw)) != 0) {
1479*7c478bd9Sstevel@tonic-gate 		return (err);
1480*7c478bd9Sstevel@tonic-gate 	}
1481*7c478bd9Sstevel@tonic-gate 	DEBUGF(4, (CE_CONT, "new owner %d gets lock mapping", ddi_get_pid()));
1482*7c478bd9Sstevel@tonic-gate 
1483*7c478bd9Sstevel@tonic-gate 	if (lp->sleepers) {
1484*7c478bd9Sstevel@tonic-gate 		/* Force unload unlock mapping if there are waiters */
1485*7c478bd9Sstevel@tonic-gate 		DEBUGF(4, (CE_CONT,
1486*7c478bd9Sstevel@tonic-gate 		    " lock has %d sleepers => remove unlock mapping\n",
1487*7c478bd9Sstevel@tonic-gate 		    lp->sleepers));
1488*7c478bd9Sstevel@tonic-gate 		err = devmap_unload(sdp->unlockseg, lp->cookie, PAGESIZE);
1489*7c478bd9Sstevel@tonic-gate 	} else {
1490*7c478bd9Sstevel@tonic-gate 		/*
1491*7c478bd9Sstevel@tonic-gate 		 * while here, give new owner a valid mapping to unlock
1492*7c478bd9Sstevel@tonic-gate 		 * page so we don't get called again.
1493*7c478bd9Sstevel@tonic-gate 		 */
1494*7c478bd9Sstevel@tonic-gate 		DEBUGF(4, (CE_CONT, " and unlock mapping\n"));
1495*7c478bd9Sstevel@tonic-gate 		err = devmap_load(sdp->unlockseg, lp->cookie, PAGESIZE,
1496*7c478bd9Sstevel@tonic-gate 			DEVMAP_ACCESS, PROT_WRITE);
1497*7c478bd9Sstevel@tonic-gate 	}
1498*7c478bd9Sstevel@tonic-gate 	return (err);
1499*7c478bd9Sstevel@tonic-gate }
1500*7c478bd9Sstevel@tonic-gate 
1501*7c478bd9Sstevel@tonic-gate /*
1502*7c478bd9Sstevel@tonic-gate  * Unload owner's mappings, release the lock and wakeup any sleepers
1503*7c478bd9Sstevel@tonic-gate  * If trash, then the old owner is given a trash mapping
1504*7c478bd9Sstevel@tonic-gate  *	=> old owner held lock too long and caused a timeout
1505*7c478bd9Sstevel@tonic-gate  */
1506*7c478bd9Sstevel@tonic-gate static int
1507*7c478bd9Sstevel@tonic-gate lock_giveup(SegLock *lp, int trash)
1508*7c478bd9Sstevel@tonic-gate {
1509*7c478bd9Sstevel@tonic-gate 	SegProc *owner = lp->owner;
1510*7c478bd9Sstevel@tonic-gate 
1511*7c478bd9Sstevel@tonic-gate 	DEBUGF(4, (CE_CONT, "winlock_giveup: lp=%p, owner=%p, trash %d\n",
1512*7c478bd9Sstevel@tonic-gate 	    (void *)lp, (void *)ID(lp->owner), trash));
1513*7c478bd9Sstevel@tonic-gate 
1514*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&lp->mutex));
1515*7c478bd9Sstevel@tonic-gate 	ASSERT(owner != NULL);
1516*7c478bd9Sstevel@tonic-gate 
1517*7c478bd9Sstevel@tonic-gate 	/*
1518*7c478bd9Sstevel@tonic-gate 	 * owner loses lockpage/unlockpage mappings and gains a
1519*7c478bd9Sstevel@tonic-gate 	 * trashpage mapping, if needed.
1520*7c478bd9Sstevel@tonic-gate 	 */
1521*7c478bd9Sstevel@tonic-gate 	if (!trash) {
1522*7c478bd9Sstevel@tonic-gate 		/*
1523*7c478bd9Sstevel@tonic-gate 		 * We do not handle errors in devmap_unload in the !trash case,
1524*7c478bd9Sstevel@tonic-gate 		 * as the process is attempting to unmap/exit or otherwise
1525*7c478bd9Sstevel@tonic-gate 		 * release the lock. Errors in unloading the mapping are not
1526*7c478bd9Sstevel@tonic-gate 		 * going to affect that (unmap does not take error return).
1527*7c478bd9Sstevel@tonic-gate 		 */
1528*7c478bd9Sstevel@tonic-gate 		(void) devmap_unload(owner->lockseg, lp->cookie, PAGESIZE);
1529*7c478bd9Sstevel@tonic-gate 		(void) devmap_unload(owner->unlockseg, lp->cookie, PAGESIZE);
1530*7c478bd9Sstevel@tonic-gate 	} else {
1531*7c478bd9Sstevel@tonic-gate 		int err;
1532*7c478bd9Sstevel@tonic-gate 
1533*7c478bd9Sstevel@tonic-gate 		if (err = devmap_unload(owner->lockseg, lp->cookie, PAGESIZE)) {
1534*7c478bd9Sstevel@tonic-gate 			/* error unloading lockseg mapping. abort giveup */
1535*7c478bd9Sstevel@tonic-gate 			return (err);
1536*7c478bd9Sstevel@tonic-gate 		}
1537*7c478bd9Sstevel@tonic-gate 
1538*7c478bd9Sstevel@tonic-gate 		/*
1539*7c478bd9Sstevel@tonic-gate 		 * old owner gets mapping to trash page so it can continue
1540*7c478bd9Sstevel@tonic-gate 		 * devmap_umem_remap does a hat_unload (and does it holding
1541*7c478bd9Sstevel@tonic-gate 		 * the right locks), so no need to devmap_unload on unlockseg
1542*7c478bd9Sstevel@tonic-gate 		 */
1543*7c478bd9Sstevel@tonic-gate 		if ((err = devmap_umem_remap(owner->unlockseg, winlock_dip,
1544*7c478bd9Sstevel@tonic-gate 		    trashpage_cookie, 0, PAGESIZE, WINLOCK_PROT, 0, 0)) != 0) {
1545*7c478bd9Sstevel@tonic-gate 			/* error remapping to trash page, abort giveup */
1546*7c478bd9Sstevel@tonic-gate 			return (err);
1547*7c478bd9Sstevel@tonic-gate 		}
1548*7c478bd9Sstevel@tonic-gate 		owner->flag |= TRASHPAGE;
1549*7c478bd9Sstevel@tonic-gate 		/*
1550*7c478bd9Sstevel@tonic-gate 		 * Preload mapping to trash page by calling devmap_load
1551*7c478bd9Sstevel@tonic-gate 		 * However, devmap_load can only be called on the faulting
1552*7c478bd9Sstevel@tonic-gate 		 * process context and not on the owner's process context
1553*7c478bd9Sstevel@tonic-gate 		 * we preload only if we happen to be in owner process context
1554*7c478bd9Sstevel@tonic-gate 		 * Other processes will fault on the unlock mapping
1555*7c478bd9Sstevel@tonic-gate 		 * and be given a trash mapping at that time.
1556*7c478bd9Sstevel@tonic-gate 		 */
1557*7c478bd9Sstevel@tonic-gate 		if (ID(owner) == CURPROC_ID) {
1558*7c478bd9Sstevel@tonic-gate 		    (void) devmap_load(owner->unlockseg, lp->cookie, PAGESIZE,
1559*7c478bd9Sstevel@tonic-gate 			DEVMAP_ACCESS, PROT_WRITE);
1560*7c478bd9Sstevel@tonic-gate 		}
1561*7c478bd9Sstevel@tonic-gate 	}
1562*7c478bd9Sstevel@tonic-gate 
1563*7c478bd9Sstevel@tonic-gate 	lp->owner = NULL;
1564*7c478bd9Sstevel@tonic-gate 
1565*7c478bd9Sstevel@tonic-gate 	/* Clear the lock value in underlying page so new owner can grab it */
1566*7c478bd9Sstevel@tonic-gate 	LOCK(lp) = 0;
1567*7c478bd9Sstevel@tonic-gate 
1568*7c478bd9Sstevel@tonic-gate 	if (lp->sleepers) {
1569*7c478bd9Sstevel@tonic-gate 		DEBUGF(4, (CE_CONT, "  waking up, lp=%p\n", (void *)lp));
1570*7c478bd9Sstevel@tonic-gate 		cv_broadcast(&lp->locksleep);
1571*7c478bd9Sstevel@tonic-gate 	}
1572*7c478bd9Sstevel@tonic-gate 	return (0);
1573*7c478bd9Sstevel@tonic-gate }
1574*7c478bd9Sstevel@tonic-gate 
1575*7c478bd9Sstevel@tonic-gate /*
1576*7c478bd9Sstevel@tonic-gate  * destroy all allocated memory.
1577*7c478bd9Sstevel@tonic-gate  */
1578*7c478bd9Sstevel@tonic-gate 
1579*7c478bd9Sstevel@tonic-gate static void
1580*7c478bd9Sstevel@tonic-gate lock_destroyall(void)
1581*7c478bd9Sstevel@tonic-gate {
1582*7c478bd9Sstevel@tonic-gate 	SegLock	*lp, *lpnext;
1583*7c478bd9Sstevel@tonic-gate 
1584*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&winlock_mutex));
1585*7c478bd9Sstevel@tonic-gate 	ASSERT(lock_list == NULL);
1586*7c478bd9Sstevel@tonic-gate 
1587*7c478bd9Sstevel@tonic-gate 	DEBUGF(1, (CE_CONT, "Lock list empty. Releasing free list\n"));
1588*7c478bd9Sstevel@tonic-gate 	for (lp = lock_free_list; lp != NULL; lp = lpnext) {
1589*7c478bd9Sstevel@tonic-gate 		mutex_enter(&lp->mutex);
1590*7c478bd9Sstevel@tonic-gate 		lpnext =  lp->next;
1591*7c478bd9Sstevel@tonic-gate 		ASSERT(lp->clients == NULL);
1592*7c478bd9Sstevel@tonic-gate 		ASSERT(lp->owner == NULL);
1593*7c478bd9Sstevel@tonic-gate 		ASSERT(lp->alloccount == 0);
1594*7c478bd9Sstevel@tonic-gate 		mutex_destroy(&lp->mutex);
1595*7c478bd9Sstevel@tonic-gate 		cv_destroy(&lp->locksleep);
1596*7c478bd9Sstevel@tonic-gate 		kmem_free(lp, sizeof (SegLock));
1597*7c478bd9Sstevel@tonic-gate 	}
1598*7c478bd9Sstevel@tonic-gate 	lock_free_list = NULL;
1599*7c478bd9Sstevel@tonic-gate 	next_lock = 0;
1600*7c478bd9Sstevel@tonic-gate }
1601*7c478bd9Sstevel@tonic-gate 
1602*7c478bd9Sstevel@tonic-gate 
1603*7c478bd9Sstevel@tonic-gate /* RFE: create mdb walkers instead of dump routines? */
1604*7c478bd9Sstevel@tonic-gate static void
1605*7c478bd9Sstevel@tonic-gate seglock_dump_all(void)
1606*7c478bd9Sstevel@tonic-gate {
1607*7c478bd9Sstevel@tonic-gate 	SegLock	*lp;
1608*7c478bd9Sstevel@tonic-gate 
1609*7c478bd9Sstevel@tonic-gate 	mutex_enter(&winlock_mutex);
1610*7c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "ID\tKEY\tNALLOC\tATTCH\tOWNED\tLOCK\tWAITER\n");
1611*7c478bd9Sstevel@tonic-gate 
1612*7c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "Lock List:\n");
1613*7c478bd9Sstevel@tonic-gate 	for (lp = lock_list; lp != NULL; lp = lp->next) {
1614*7c478bd9Sstevel@tonic-gate 		mutex_enter(&lp->mutex);
1615*7c478bd9Sstevel@tonic-gate 		cmn_err(CE_CONT, "%d\t%d\t%u\t%c\t%c\t%c\t%d\n",
1616*7c478bd9Sstevel@tonic-gate 		    lp->cookie, lp->key, lp->alloccount,
1617*7c478bd9Sstevel@tonic-gate 		    lp->clients ? 'Y' : 'N',
1618*7c478bd9Sstevel@tonic-gate 		    lp->owner ? 'Y' : 'N',
1619*7c478bd9Sstevel@tonic-gate 		    lp->lockptr != 0 && LOCK(lp) ? 'Y' : 'N',
1620*7c478bd9Sstevel@tonic-gate 		    lp->sleepers);
1621*7c478bd9Sstevel@tonic-gate 		mutex_exit(&lp->mutex);
1622*7c478bd9Sstevel@tonic-gate 	}
1623*7c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "Free Lock List:\n");
1624*7c478bd9Sstevel@tonic-gate 	for (lp = lock_free_list; lp != NULL; lp = lp->next) {
1625*7c478bd9Sstevel@tonic-gate 		mutex_enter(&lp->mutex);
1626*7c478bd9Sstevel@tonic-gate 		cmn_err(CE_CONT, "%d\t%d\t%u\t%c\t%c\t%c\t%d\n",
1627*7c478bd9Sstevel@tonic-gate 		    lp->cookie, lp->key, lp->alloccount,
1628*7c478bd9Sstevel@tonic-gate 		    lp->clients ? 'Y' : 'N',
1629*7c478bd9Sstevel@tonic-gate 		    lp->owner ? 'Y' : 'N',
1630*7c478bd9Sstevel@tonic-gate 		    lp->lockptr != 0 && LOCK(lp) ? 'Y' : 'N',
1631*7c478bd9Sstevel@tonic-gate 		    lp->sleepers);
1632*7c478bd9Sstevel@tonic-gate 		mutex_exit(&lp->mutex);
1633*7c478bd9Sstevel@tonic-gate 	}
1634*7c478bd9Sstevel@tonic-gate 
1635*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
1636*7c478bd9Sstevel@tonic-gate 	if (lock_debug < 3) {
1637*7c478bd9Sstevel@tonic-gate 		mutex_exit(&winlock_mutex);
1638*7c478bd9Sstevel@tonic-gate 		return;
1639*7c478bd9Sstevel@tonic-gate 	}
1640*7c478bd9Sstevel@tonic-gate 
1641*7c478bd9Sstevel@tonic-gate 	for (lp = lock_list; lp != NULL; lp = lp->next) {
1642*7c478bd9Sstevel@tonic-gate 		SegProc	*sdp;
1643*7c478bd9Sstevel@tonic-gate 
1644*7c478bd9Sstevel@tonic-gate 		mutex_enter(&lp->mutex);
1645*7c478bd9Sstevel@tonic-gate 		cmn_err(CE_CONT,
1646*7c478bd9Sstevel@tonic-gate 		    "lock %p, key=%d, cookie=%d, nalloc=%u, lock=%d, wait=%d\n",
1647*7c478bd9Sstevel@tonic-gate 		    (void *)lp, lp->key, lp->cookie, lp->alloccount,
1648*7c478bd9Sstevel@tonic-gate 		    lp->lockptr != 0 ? LOCK(lp) : -1, lp->sleepers);
1649*7c478bd9Sstevel@tonic-gate 
1650*7c478bd9Sstevel@tonic-gate 		cmn_err(CE_CONT,
1651*7c478bd9Sstevel@tonic-gate 		    "style=%d, lockptr=%p, timeout=%ld, clients=%p, owner=%p\n",
1652*7c478bd9Sstevel@tonic-gate 		    lp->style, (void *)lp->lockptr, lp->timeout,
1653*7c478bd9Sstevel@tonic-gate 		    (void *)lp->clients, (void *)lp->owner);
1654*7c478bd9Sstevel@tonic-gate 
1655*7c478bd9Sstevel@tonic-gate 
1656*7c478bd9Sstevel@tonic-gate 		for (sdp = lp->clients; sdp != NULL; sdp = sdp->next) {
1657*7c478bd9Sstevel@tonic-gate 			cmn_err(CE_CONT, "  client %p%s, lp=%p, flag=%x, "
1658*7c478bd9Sstevel@tonic-gate 			    "process tag=%p, lockseg=%p, unlockseg=%p\n",
1659*7c478bd9Sstevel@tonic-gate 			    (void *)sdp, sdp == lp->owner ? " (owner)" : "",
1660*7c478bd9Sstevel@tonic-gate 			    (void *)sdp->lp, sdp->flag, (void *)ID(sdp),
1661*7c478bd9Sstevel@tonic-gate 			    (void *)sdp->lockseg, (void *)sdp->unlockseg);
1662*7c478bd9Sstevel@tonic-gate 		}
1663*7c478bd9Sstevel@tonic-gate 		mutex_exit(&lp->mutex);
1664*7c478bd9Sstevel@tonic-gate 	}
1665*7c478bd9Sstevel@tonic-gate #endif
1666*7c478bd9Sstevel@tonic-gate 	mutex_exit(&winlock_mutex);
1667*7c478bd9Sstevel@tonic-gate }
1668*7c478bd9Sstevel@tonic-gate 
1669*7c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
1670*7c478bd9Sstevel@tonic-gate 
1671*7c478bd9Sstevel@tonic-gate static struct modldrv modldrv = {
1672*7c478bd9Sstevel@tonic-gate 	&mod_driverops,		/* Type of module.  This one is a driver */
1673*7c478bd9Sstevel@tonic-gate 	"Winlock Driver v%I%",	/* Name of the module */
1674*7c478bd9Sstevel@tonic-gate 	&winlock_ops,		/* driver ops */
1675*7c478bd9Sstevel@tonic-gate };
1676*7c478bd9Sstevel@tonic-gate 
1677*7c478bd9Sstevel@tonic-gate static struct modlinkage modlinkage = {
1678*7c478bd9Sstevel@tonic-gate 	MODREV_1,
1679*7c478bd9Sstevel@tonic-gate 	(void *)&modldrv,
1680*7c478bd9Sstevel@tonic-gate 	0,
1681*7c478bd9Sstevel@tonic-gate 	0,
1682*7c478bd9Sstevel@tonic-gate 	0
1683*7c478bd9Sstevel@tonic-gate };
1684*7c478bd9Sstevel@tonic-gate 
1685*7c478bd9Sstevel@tonic-gate int
1686*7c478bd9Sstevel@tonic-gate _init(void)
1687*7c478bd9Sstevel@tonic-gate {
1688*7c478bd9Sstevel@tonic-gate 	int e;
1689*7c478bd9Sstevel@tonic-gate 
1690*7c478bd9Sstevel@tonic-gate 	mutex_init(&winlock_mutex, NULL, MUTEX_DEFAULT, NULL);
1691*7c478bd9Sstevel@tonic-gate 	e = mod_install(&modlinkage);
1692*7c478bd9Sstevel@tonic-gate 	if (e) {
1693*7c478bd9Sstevel@tonic-gate 		mutex_destroy(&winlock_mutex);
1694*7c478bd9Sstevel@tonic-gate 	}
1695*7c478bd9Sstevel@tonic-gate 	return (e);
1696*7c478bd9Sstevel@tonic-gate }
1697*7c478bd9Sstevel@tonic-gate 
1698*7c478bd9Sstevel@tonic-gate 
1699*7c478bd9Sstevel@tonic-gate int
1700*7c478bd9Sstevel@tonic-gate _info(struct modinfo *modinfop)
1701*7c478bd9Sstevel@tonic-gate {
1702*7c478bd9Sstevel@tonic-gate 	return (mod_info(&modlinkage, modinfop));
1703*7c478bd9Sstevel@tonic-gate }
1704*7c478bd9Sstevel@tonic-gate 
1705*7c478bd9Sstevel@tonic-gate int
1706*7c478bd9Sstevel@tonic-gate _fini(void)
1707*7c478bd9Sstevel@tonic-gate {
1708*7c478bd9Sstevel@tonic-gate 	int	e;
1709*7c478bd9Sstevel@tonic-gate 
1710*7c478bd9Sstevel@tonic-gate 	e = mod_remove(&modlinkage);
1711*7c478bd9Sstevel@tonic-gate 	if (e == 0) {
1712*7c478bd9Sstevel@tonic-gate 		mutex_destroy(&winlock_mutex);
1713*7c478bd9Sstevel@tonic-gate 	}
1714*7c478bd9Sstevel@tonic-gate 	return (e);
1715*7c478bd9Sstevel@tonic-gate }
1716