xref: /titanic_53/usr/src/uts/common/syscall/poll.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate  */
26*7c478bd9Sstevel@tonic-gate 
27*7c478bd9Sstevel@tonic-gate /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
28*7c478bd9Sstevel@tonic-gate /*	  All Rights Reserved  	*/
29*7c478bd9Sstevel@tonic-gate 
30*7c478bd9Sstevel@tonic-gate /*
31*7c478bd9Sstevel@tonic-gate  * Portions of this source code were derived from Berkeley 4.3 BSD
32*7c478bd9Sstevel@tonic-gate  * under license from the Regents of the University of California.
33*7c478bd9Sstevel@tonic-gate  */
34*7c478bd9Sstevel@tonic-gate 
35*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
36*7c478bd9Sstevel@tonic-gate 
37*7c478bd9Sstevel@tonic-gate #include <sys/param.h>
38*7c478bd9Sstevel@tonic-gate #include <sys/isa_defs.h>
39*7c478bd9Sstevel@tonic-gate #include <sys/types.h>
40*7c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
41*7c478bd9Sstevel@tonic-gate #include <sys/user.h>
42*7c478bd9Sstevel@tonic-gate #include <sys/systm.h>
43*7c478bd9Sstevel@tonic-gate #include <sys/errno.h>
44*7c478bd9Sstevel@tonic-gate #include <sys/time.h>
45*7c478bd9Sstevel@tonic-gate #include <sys/vnode.h>
46*7c478bd9Sstevel@tonic-gate #include <sys/file.h>
47*7c478bd9Sstevel@tonic-gate #include <sys/mode.h>
48*7c478bd9Sstevel@tonic-gate #include <sys/proc.h>
49*7c478bd9Sstevel@tonic-gate #include <sys/uio.h>
50*7c478bd9Sstevel@tonic-gate #include <sys/poll_impl.h>
51*7c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
52*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
53*7c478bd9Sstevel@tonic-gate #include <sys/debug.h>
54*7c478bd9Sstevel@tonic-gate #include <sys/bitmap.h>
55*7c478bd9Sstevel@tonic-gate #include <sys/kstat.h>
56*7c478bd9Sstevel@tonic-gate #include <sys/rctl.h>
57*7c478bd9Sstevel@tonic-gate #include <sys/port_kernel.h>
58*7c478bd9Sstevel@tonic-gate #include <sys/schedctl.h>
59*7c478bd9Sstevel@tonic-gate 
60*7c478bd9Sstevel@tonic-gate #define	NPHLOCKS	64	/* Number of locks; must be power of 2 */
61*7c478bd9Sstevel@tonic-gate #define	PHLOCKADDR(php)	&plocks[(((uintptr_t)(php)) >> 8) & (NPHLOCKS - 1)]
62*7c478bd9Sstevel@tonic-gate #define	PHLOCK(php)	PHLOCKADDR(php).pp_lock
63*7c478bd9Sstevel@tonic-gate #define	PH_ENTER(php)	mutex_enter(PHLOCK(php))
64*7c478bd9Sstevel@tonic-gate #define	PH_EXIT(php)	mutex_exit(PHLOCK(php))
65*7c478bd9Sstevel@tonic-gate #define	VALID_POLL_EVENTS	(POLLIN | POLLPRI | POLLOUT | POLLRDNORM \
66*7c478bd9Sstevel@tonic-gate 	| POLLRDBAND | POLLWRBAND | POLLHUP | POLLERR | POLLNVAL)
67*7c478bd9Sstevel@tonic-gate 
68*7c478bd9Sstevel@tonic-gate /*
69*7c478bd9Sstevel@tonic-gate  * global counters to collect some stats
70*7c478bd9Sstevel@tonic-gate  */
71*7c478bd9Sstevel@tonic-gate static struct {
72*7c478bd9Sstevel@tonic-gate 	kstat_named_t	polllistmiss;	/* failed to find a cached poll list */
73*7c478bd9Sstevel@tonic-gate 	kstat_named_t	pollcachehit;	/* list matched 100% w/ cached one */
74*7c478bd9Sstevel@tonic-gate 	kstat_named_t	pollcachephit;	/* list matched < 100% w/ cached one */
75*7c478bd9Sstevel@tonic-gate 	kstat_named_t	pollcachemiss;	/* every list entry is dif from cache */
76*7c478bd9Sstevel@tonic-gate } pollstats = {
77*7c478bd9Sstevel@tonic-gate 	{ "polllistmiss",	KSTAT_DATA_UINT64 },
78*7c478bd9Sstevel@tonic-gate 	{ "pollcachehit",	KSTAT_DATA_UINT64 },
79*7c478bd9Sstevel@tonic-gate 	{ "pollcachephit",	KSTAT_DATA_UINT64 },
80*7c478bd9Sstevel@tonic-gate 	{ "pollcachemiss",	KSTAT_DATA_UINT64 }
81*7c478bd9Sstevel@tonic-gate };
82*7c478bd9Sstevel@tonic-gate 
83*7c478bd9Sstevel@tonic-gate kstat_named_t *pollstats_ptr = (kstat_named_t *)&pollstats;
84*7c478bd9Sstevel@tonic-gate uint_t pollstats_ndata = sizeof (pollstats) / sizeof (kstat_named_t);
85*7c478bd9Sstevel@tonic-gate 
86*7c478bd9Sstevel@tonic-gate struct pplock	{
87*7c478bd9Sstevel@tonic-gate 	kmutex_t	pp_lock;
88*7c478bd9Sstevel@tonic-gate 	short		pp_flag;
89*7c478bd9Sstevel@tonic-gate 	kcondvar_t	pp_wait_cv;
90*7c478bd9Sstevel@tonic-gate 	int32_t		pp_pad;		/* to a nice round 16 bytes */
91*7c478bd9Sstevel@tonic-gate };
92*7c478bd9Sstevel@tonic-gate 
93*7c478bd9Sstevel@tonic-gate static struct pplock plocks[NPHLOCKS];	/* Hash array of pollhead locks */
94*7c478bd9Sstevel@tonic-gate 
95*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
96*7c478bd9Sstevel@tonic-gate static int pollchecksanity(pollstate_t *, nfds_t);
97*7c478bd9Sstevel@tonic-gate static int pollcheckxref(pollstate_t *, int);
98*7c478bd9Sstevel@tonic-gate static void pollcheckphlist(void);
99*7c478bd9Sstevel@tonic-gate static int pollcheckrevents(pollstate_t *, int, int, int);
100*7c478bd9Sstevel@tonic-gate static void checkpolldat(pollstate_t *);
101*7c478bd9Sstevel@tonic-gate #endif	/* DEBUG */
102*7c478bd9Sstevel@tonic-gate static int plist_chkdupfd(file_t *, polldat_t *, pollstate_t *, pollfd_t *, int,
103*7c478bd9Sstevel@tonic-gate     int *);
104*7c478bd9Sstevel@tonic-gate 
105*7c478bd9Sstevel@tonic-gate /*
106*7c478bd9Sstevel@tonic-gate  * Data structure overview:
107*7c478bd9Sstevel@tonic-gate  * The per-thread poll state consists of
108*7c478bd9Sstevel@tonic-gate  *	one pollstate_t
109*7c478bd9Sstevel@tonic-gate  *	one pollcache_t
110*7c478bd9Sstevel@tonic-gate  *	one bitmap with one event bit per fd
111*7c478bd9Sstevel@tonic-gate  *	a (two-dimensional) hashed array of polldat_t structures - one entry
112*7c478bd9Sstevel@tonic-gate  *	per fd
113*7c478bd9Sstevel@tonic-gate  *
114*7c478bd9Sstevel@tonic-gate  * This conglomerate of data structures interact with
115*7c478bd9Sstevel@tonic-gate  *	the pollhead which is used by VOP_POLL and pollwakeup
116*7c478bd9Sstevel@tonic-gate  *	(protected by the PHLOCK, cached array of plocks), and
117*7c478bd9Sstevel@tonic-gate  *	the fpollinfo list hanging off the fi_list which is used to notify
118*7c478bd9Sstevel@tonic-gate  *	poll when a cached fd is closed. This is protected by uf_lock.
119*7c478bd9Sstevel@tonic-gate  *
120*7c478bd9Sstevel@tonic-gate  * Invariants:
121*7c478bd9Sstevel@tonic-gate  *	pd_php (pollhead pointer) is set iff (if and only if) the polldat
122*7c478bd9Sstevel@tonic-gate  *	is on that pollhead. This is modified atomically under pc_lock.
123*7c478bd9Sstevel@tonic-gate  *
124*7c478bd9Sstevel@tonic-gate  *	pd_fp (file_t pointer) is set iff the thread is on the fpollinfo
125*7c478bd9Sstevel@tonic-gate  *	list for that open file.
126*7c478bd9Sstevel@tonic-gate  *	This is modified atomically under pc_lock.
127*7c478bd9Sstevel@tonic-gate  *
128*7c478bd9Sstevel@tonic-gate  *	pd_count is the sum (over all values of i) of pd_ref[i].xf_refcnt.
129*7c478bd9Sstevel@tonic-gate  *	Iff pd_ref[i].xf_refcnt >= 1 then
130*7c478bd9Sstevel@tonic-gate  *		ps_pcacheset[i].pcs_pollfd[pd_ref[i].xf_position].fd == pd_fd
131*7c478bd9Sstevel@tonic-gate  *	Iff pd_ref[i].xf_refcnt > 1 then
132*7c478bd9Sstevel@tonic-gate  *		In ps_pcacheset[i].pcs_pollfd between index
133*7c478bd9Sstevel@tonic-gate  *		pd_ref[i].xf_position] and the end of the list
134*7c478bd9Sstevel@tonic-gate  *		there are xf_refcnt entries with .fd == pd_fd
135*7c478bd9Sstevel@tonic-gate  *
136*7c478bd9Sstevel@tonic-gate  * Locking design:
137*7c478bd9Sstevel@tonic-gate  * Whenever possible the design relies on the fact that the poll cache state
138*7c478bd9Sstevel@tonic-gate  * is per thread thus for both poll and exit it is self-synchronizing.
139*7c478bd9Sstevel@tonic-gate  * Thus the key interactions where other threads access the state are:
140*7c478bd9Sstevel@tonic-gate  *	pollwakeup (and polltime), and
141*7c478bd9Sstevel@tonic-gate  *	close cleaning up the cached references to an open file
142*7c478bd9Sstevel@tonic-gate  *
143*7c478bd9Sstevel@tonic-gate  * The two key locks in poll proper is ps_lock and pc_lock.
144*7c478bd9Sstevel@tonic-gate  *
145*7c478bd9Sstevel@tonic-gate  * The ps_lock is used for synchronization between poll, (lwp_)exit and close
146*7c478bd9Sstevel@tonic-gate  * to ensure that modifications to pollcacheset structure are serialized.
147*7c478bd9Sstevel@tonic-gate  * This lock is held through most of poll() except where poll sleeps
148*7c478bd9Sstevel@tonic-gate  * since there is little need to handle closes concurrently with the execution
149*7c478bd9Sstevel@tonic-gate  * of poll.
150*7c478bd9Sstevel@tonic-gate  * The pc_lock protects most of the fields in pollcache structure and polldat
151*7c478bd9Sstevel@tonic-gate  * structures (which are accessed by poll, pollwakeup, and polltime)
152*7c478bd9Sstevel@tonic-gate  * with the exception of fields that are only modified when only one thread
153*7c478bd9Sstevel@tonic-gate  * can access this per-thread state.
154*7c478bd9Sstevel@tonic-gate  * Those exceptions occur in poll when first allocating the per-thread state,
155*7c478bd9Sstevel@tonic-gate  * when poll grows the number of polldat (never shrinks), and when
156*7c478bd9Sstevel@tonic-gate  * exit/pollcleanup has ensured that there are no references from either
157*7c478bd9Sstevel@tonic-gate  * pollheads or fpollinfo to the threads poll state.
158*7c478bd9Sstevel@tonic-gate  *
159*7c478bd9Sstevel@tonic-gate  * Poll(2) system call is the only path which ps_lock and pc_lock are both
160*7c478bd9Sstevel@tonic-gate  * held, in that order. It needs ps_lock to synchronize with close and
161*7c478bd9Sstevel@tonic-gate  * lwp_exit; and pc_lock with pollwakeup.
162*7c478bd9Sstevel@tonic-gate  *
163*7c478bd9Sstevel@tonic-gate  * The locking interaction between pc_lock and PHLOCK take into account
164*7c478bd9Sstevel@tonic-gate  * that poll acquires these locks in the order of pc_lock and then PHLOCK
165*7c478bd9Sstevel@tonic-gate  * while pollwakeup does it in the reverse order. Thus pollwakeup implements
166*7c478bd9Sstevel@tonic-gate  * deadlock avoidance by dropping the locks and reacquiring them in the
167*7c478bd9Sstevel@tonic-gate  * reverse order. For this to work pollwakeup needs to prevent the thread
168*7c478bd9Sstevel@tonic-gate  * from exiting and freeing all of the poll related state. Thus is done
169*7c478bd9Sstevel@tonic-gate  * using
170*7c478bd9Sstevel@tonic-gate  *	the pc_no_exit lock
171*7c478bd9Sstevel@tonic-gate  *	the pc_busy counter
172*7c478bd9Sstevel@tonic-gate  *	the pc_busy_cv condition variable
173*7c478bd9Sstevel@tonic-gate  *
174*7c478bd9Sstevel@tonic-gate  * The locking interaction between pc_lock and uf_lock has similar
175*7c478bd9Sstevel@tonic-gate  * issues. Poll holds ps_lock and/or pc_lock across calls to getf/releasef
176*7c478bd9Sstevel@tonic-gate  * which acquire uf_lock. The poll cleanup in close needs to hold uf_lock
177*7c478bd9Sstevel@tonic-gate  * to prevent poll or exit from doing a delfpollinfo after which the thread
178*7c478bd9Sstevel@tonic-gate  * might exit. But the cleanup needs to acquire pc_lock when modifying
179*7c478bd9Sstevel@tonic-gate  * the poll cache state. The solution is to use pc_busy and do the close
180*7c478bd9Sstevel@tonic-gate  * cleanup in two phases:
181*7c478bd9Sstevel@tonic-gate  *	First close calls pollblockexit which increments pc_busy.
182*7c478bd9Sstevel@tonic-gate  *	This prevents the per-thread poll related state from being freed.
183*7c478bd9Sstevel@tonic-gate  *	Then close drops uf_lock and calls pollcacheclean.
184*7c478bd9Sstevel@tonic-gate  *	This routine can then acquire pc_lock and remove any references
185*7c478bd9Sstevel@tonic-gate  *	to the closing fd (as well as recording that it has been closed
186*7c478bd9Sstevel@tonic-gate  *	so that a POLLNVAL can be generated even if the fd is reused before
187*7c478bd9Sstevel@tonic-gate  *	poll has been woken up and checked getf() again).
188*7c478bd9Sstevel@tonic-gate  *
189*7c478bd9Sstevel@tonic-gate  * When removing a polled fd from poll cache, the fd is always removed
190*7c478bd9Sstevel@tonic-gate  * from pollhead list first and then from fpollinfo list, i.e.,
191*7c478bd9Sstevel@tonic-gate  * pollhead_delete() is called before delfpollinfo().
192*7c478bd9Sstevel@tonic-gate  *
193*7c478bd9Sstevel@tonic-gate  *
194*7c478bd9Sstevel@tonic-gate  * Locking hierarchy:
195*7c478bd9Sstevel@tonic-gate  *	pc_no_exit is a leaf level lock.
196*7c478bd9Sstevel@tonic-gate  *	ps_lock is held when acquiring pc_lock (except when pollwakeup
197*7c478bd9Sstevel@tonic-gate  *	acquires pc_lock).
198*7c478bd9Sstevel@tonic-gate  *	pc_lock might be held when acquiring PHLOCK (pollhead_insert/
199*7c478bd9Sstevel@tonic-gate  *	pollhead_delete)
200*7c478bd9Sstevel@tonic-gate  *	pc_lock is always held (but this is not required)
201*7c478bd9Sstevel@tonic-gate  *	when acquiring PHLOCK (in polladd/pollhead_delete and pollwakeup called
202*7c478bd9Sstevel@tonic-gate  *	from pcache_clean_entry).
203*7c478bd9Sstevel@tonic-gate  *	pc_lock is held across addfpollinfo/delfpollinfo which acquire
204*7c478bd9Sstevel@tonic-gate  *	uf_lock.
205*7c478bd9Sstevel@tonic-gate  *	pc_lock is held across getf/releasef which acquire uf_lock.
206*7c478bd9Sstevel@tonic-gate  *	ps_lock might be held across getf/releasef which acquire uf_lock.
207*7c478bd9Sstevel@tonic-gate  *	pollwakeup tries to acquire pc_lock while holding PHLOCK
208*7c478bd9Sstevel@tonic-gate  *	but drops the locks and reacquire them in reverse order to avoid
209*7c478bd9Sstevel@tonic-gate  *	deadlock.
210*7c478bd9Sstevel@tonic-gate  *
211*7c478bd9Sstevel@tonic-gate  * Note also that there is deadlock avoidance support for VOP_POLL routines
212*7c478bd9Sstevel@tonic-gate  * and pollwakeup involving a file system or driver lock.
213*7c478bd9Sstevel@tonic-gate  * See below.
214*7c478bd9Sstevel@tonic-gate  */
215*7c478bd9Sstevel@tonic-gate 
216*7c478bd9Sstevel@tonic-gate /*
217*7c478bd9Sstevel@tonic-gate  * Deadlock avoidance support for VOP_POLL() routines.  This is
218*7c478bd9Sstevel@tonic-gate  * sometimes necessary to prevent deadlock between polling threads
219*7c478bd9Sstevel@tonic-gate  * (which hold poll locks on entry to xx_poll(), then acquire foo)
220*7c478bd9Sstevel@tonic-gate  * and pollwakeup() threads (which hold foo, then acquire poll locks).
221*7c478bd9Sstevel@tonic-gate  *
222*7c478bd9Sstevel@tonic-gate  * pollunlock(void) releases whatever poll locks the current thread holds,
223*7c478bd9Sstevel@tonic-gate  *	returning a cookie for use by pollrelock();
224*7c478bd9Sstevel@tonic-gate  *
225*7c478bd9Sstevel@tonic-gate  * pollrelock(cookie) reacquires previously dropped poll locks;
226*7c478bd9Sstevel@tonic-gate  *
227*7c478bd9Sstevel@tonic-gate  * polllock(php, mutex) does the common case: pollunlock(),
228*7c478bd9Sstevel@tonic-gate  *	acquire the problematic mutex, pollrelock().
229*7c478bd9Sstevel@tonic-gate  */
230*7c478bd9Sstevel@tonic-gate int
231*7c478bd9Sstevel@tonic-gate pollunlock(void)
232*7c478bd9Sstevel@tonic-gate {
233*7c478bd9Sstevel@tonic-gate 	pollcache_t *pcp;
234*7c478bd9Sstevel@tonic-gate 	int lockstate = 0;
235*7c478bd9Sstevel@tonic-gate 
236*7c478bd9Sstevel@tonic-gate 	/*
237*7c478bd9Sstevel@tonic-gate 	 * t_pollcache is set by /dev/poll and event ports (port_fd.c).
238*7c478bd9Sstevel@tonic-gate 	 * If the pollrelock/pollunlock is called as a result of poll(2),
239*7c478bd9Sstevel@tonic-gate 	 * the t_pollcache should be NULL.
240*7c478bd9Sstevel@tonic-gate 	 */
241*7c478bd9Sstevel@tonic-gate 	if (curthread->t_pollcache == NULL)
242*7c478bd9Sstevel@tonic-gate 		pcp = curthread->t_pollstate->ps_pcache;
243*7c478bd9Sstevel@tonic-gate 	else
244*7c478bd9Sstevel@tonic-gate 		pcp = curthread->t_pollcache;
245*7c478bd9Sstevel@tonic-gate 
246*7c478bd9Sstevel@tonic-gate 	if (mutex_owned(&pcp->pc_lock)) {
247*7c478bd9Sstevel@tonic-gate 		lockstate = 1;
248*7c478bd9Sstevel@tonic-gate 		mutex_exit(&pcp->pc_lock);
249*7c478bd9Sstevel@tonic-gate 	}
250*7c478bd9Sstevel@tonic-gate 	return (lockstate);
251*7c478bd9Sstevel@tonic-gate }
252*7c478bd9Sstevel@tonic-gate 
253*7c478bd9Sstevel@tonic-gate void
254*7c478bd9Sstevel@tonic-gate pollrelock(int lockstate)
255*7c478bd9Sstevel@tonic-gate {
256*7c478bd9Sstevel@tonic-gate 	pollcache_t *pcp;
257*7c478bd9Sstevel@tonic-gate 
258*7c478bd9Sstevel@tonic-gate 	/*
259*7c478bd9Sstevel@tonic-gate 	 * t_pollcache is set by /dev/poll and event ports (port_fd.c).
260*7c478bd9Sstevel@tonic-gate 	 * If the pollrelock/pollunlock is called as a result of poll(2),
261*7c478bd9Sstevel@tonic-gate 	 * the t_pollcache should be NULL.
262*7c478bd9Sstevel@tonic-gate 	 */
263*7c478bd9Sstevel@tonic-gate 	if (curthread->t_pollcache == NULL)
264*7c478bd9Sstevel@tonic-gate 		pcp = curthread->t_pollstate->ps_pcache;
265*7c478bd9Sstevel@tonic-gate 	else
266*7c478bd9Sstevel@tonic-gate 		pcp = curthread->t_pollcache;
267*7c478bd9Sstevel@tonic-gate 
268*7c478bd9Sstevel@tonic-gate 	if (lockstate > 0)
269*7c478bd9Sstevel@tonic-gate 		mutex_enter(&pcp->pc_lock);
270*7c478bd9Sstevel@tonic-gate }
271*7c478bd9Sstevel@tonic-gate 
272*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
273*7c478bd9Sstevel@tonic-gate void
274*7c478bd9Sstevel@tonic-gate polllock(pollhead_t *php, kmutex_t *lp)
275*7c478bd9Sstevel@tonic-gate {
276*7c478bd9Sstevel@tonic-gate 	if (!mutex_tryenter(lp)) {
277*7c478bd9Sstevel@tonic-gate 		int lockstate = pollunlock();
278*7c478bd9Sstevel@tonic-gate 		mutex_enter(lp);
279*7c478bd9Sstevel@tonic-gate 		pollrelock(lockstate);
280*7c478bd9Sstevel@tonic-gate 	}
281*7c478bd9Sstevel@tonic-gate }
282*7c478bd9Sstevel@tonic-gate 
283*7c478bd9Sstevel@tonic-gate static int
284*7c478bd9Sstevel@tonic-gate poll_common(pollfd_t *fds, nfds_t nfds, timespec_t *tsp, k_sigset_t *ksetp)
285*7c478bd9Sstevel@tonic-gate {
286*7c478bd9Sstevel@tonic-gate 	kthread_t *t = curthread;
287*7c478bd9Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(t);
288*7c478bd9Sstevel@tonic-gate 	proc_t *p = ttoproc(t);
289*7c478bd9Sstevel@tonic-gate 	int fdcnt = 0;
290*7c478bd9Sstevel@tonic-gate 	int rval;
291*7c478bd9Sstevel@tonic-gate 	int i;
292*7c478bd9Sstevel@tonic-gate 	timespec_t *rqtp = NULL;
293*7c478bd9Sstevel@tonic-gate 	int timecheck = 0;
294*7c478bd9Sstevel@tonic-gate 	int imm_timeout = 0;
295*7c478bd9Sstevel@tonic-gate 	pollfd_t *pollfdp;
296*7c478bd9Sstevel@tonic-gate 	pollstate_t *ps;
297*7c478bd9Sstevel@tonic-gate 	pollcache_t *pcp;
298*7c478bd9Sstevel@tonic-gate 	int error = 0;
299*7c478bd9Sstevel@tonic-gate 	nfds_t old_nfds;
300*7c478bd9Sstevel@tonic-gate 	int cacheindex = 0;	/* which cache set is used */
301*7c478bd9Sstevel@tonic-gate 
302*7c478bd9Sstevel@tonic-gate 	/*
303*7c478bd9Sstevel@tonic-gate 	 * Determine the precise future time of the requested timeout, if any.
304*7c478bd9Sstevel@tonic-gate 	 */
305*7c478bd9Sstevel@tonic-gate 	if (tsp != NULL) {
306*7c478bd9Sstevel@tonic-gate 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
307*7c478bd9Sstevel@tonic-gate 			imm_timeout = 1;
308*7c478bd9Sstevel@tonic-gate 		else {
309*7c478bd9Sstevel@tonic-gate 			timespec_t now;
310*7c478bd9Sstevel@tonic-gate 			timecheck = timechanged;
311*7c478bd9Sstevel@tonic-gate 			gethrestime(&now);
312*7c478bd9Sstevel@tonic-gate 			rqtp = tsp;
313*7c478bd9Sstevel@tonic-gate 			timespecadd(rqtp, &now);
314*7c478bd9Sstevel@tonic-gate 		}
315*7c478bd9Sstevel@tonic-gate 	}
316*7c478bd9Sstevel@tonic-gate 
317*7c478bd9Sstevel@tonic-gate 	/*
318*7c478bd9Sstevel@tonic-gate 	 * Reset our signal mask, if requested.
319*7c478bd9Sstevel@tonic-gate 	 */
320*7c478bd9Sstevel@tonic-gate 	if (ksetp != NULL) {
321*7c478bd9Sstevel@tonic-gate 		mutex_enter(&p->p_lock);
322*7c478bd9Sstevel@tonic-gate 		schedctl_finish_sigblock(t);
323*7c478bd9Sstevel@tonic-gate 		lwp->lwp_sigoldmask = t->t_hold;
324*7c478bd9Sstevel@tonic-gate 		t->t_hold = *ksetp;
325*7c478bd9Sstevel@tonic-gate 		t->t_flag |= T_TOMASK;
326*7c478bd9Sstevel@tonic-gate 		/*
327*7c478bd9Sstevel@tonic-gate 		 * Call cv_timedwait_sig() just to check for signals.
328*7c478bd9Sstevel@tonic-gate 		 * We will return immediately with either 0 or -1.
329*7c478bd9Sstevel@tonic-gate 		 */
330*7c478bd9Sstevel@tonic-gate 		if (!cv_timedwait_sig(&t->t_delay_cv, &p->p_lock, lbolt)) {
331*7c478bd9Sstevel@tonic-gate 			mutex_exit(&p->p_lock);
332*7c478bd9Sstevel@tonic-gate 			error = EINTR;
333*7c478bd9Sstevel@tonic-gate 			goto pollout;
334*7c478bd9Sstevel@tonic-gate 		}
335*7c478bd9Sstevel@tonic-gate 		mutex_exit(&p->p_lock);
336*7c478bd9Sstevel@tonic-gate 	}
337*7c478bd9Sstevel@tonic-gate 
338*7c478bd9Sstevel@tonic-gate 	/*
339*7c478bd9Sstevel@tonic-gate 	 * Check to see if this guy just wants to use poll() as a timeout.
340*7c478bd9Sstevel@tonic-gate 	 * If yes then bypass all the other stuff and make him sleep.
341*7c478bd9Sstevel@tonic-gate 	 */
342*7c478bd9Sstevel@tonic-gate 	if (nfds == 0) {
343*7c478bd9Sstevel@tonic-gate 		/*
344*7c478bd9Sstevel@tonic-gate 		 * Sleep until we have passed the requested future
345*7c478bd9Sstevel@tonic-gate 		 * time or until interrupted by a signal.
346*7c478bd9Sstevel@tonic-gate 		 * Do not check for signals if we have a zero timeout.
347*7c478bd9Sstevel@tonic-gate 		 */
348*7c478bd9Sstevel@tonic-gate 		if (!imm_timeout) {
349*7c478bd9Sstevel@tonic-gate 			mutex_enter(&t->t_delay_lock);
350*7c478bd9Sstevel@tonic-gate 			while ((rval = cv_waituntil_sig(&t->t_delay_cv,
351*7c478bd9Sstevel@tonic-gate 			    &t->t_delay_lock, rqtp, timecheck)) > 0)
352*7c478bd9Sstevel@tonic-gate 				continue;
353*7c478bd9Sstevel@tonic-gate 			mutex_exit(&t->t_delay_lock);
354*7c478bd9Sstevel@tonic-gate 			if (rval == 0)
355*7c478bd9Sstevel@tonic-gate 				error = EINTR;
356*7c478bd9Sstevel@tonic-gate 		}
357*7c478bd9Sstevel@tonic-gate 		goto pollout;
358*7c478bd9Sstevel@tonic-gate 	}
359*7c478bd9Sstevel@tonic-gate 
360*7c478bd9Sstevel@tonic-gate 	if (nfds > p->p_fno_ctl) {
361*7c478bd9Sstevel@tonic-gate 		mutex_enter(&p->p_lock);
362*7c478bd9Sstevel@tonic-gate 		(void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
363*7c478bd9Sstevel@tonic-gate 		    p->p_rctls, p, RCA_SAFE);
364*7c478bd9Sstevel@tonic-gate 		mutex_exit(&p->p_lock);
365*7c478bd9Sstevel@tonic-gate 		error = EINVAL;
366*7c478bd9Sstevel@tonic-gate 		goto pollout;
367*7c478bd9Sstevel@tonic-gate 	}
368*7c478bd9Sstevel@tonic-gate 
369*7c478bd9Sstevel@tonic-gate 	/*
370*7c478bd9Sstevel@tonic-gate 	 * Need to allocate memory for pollstate before anything because
371*7c478bd9Sstevel@tonic-gate 	 * the mutex and cv are created in this space
372*7c478bd9Sstevel@tonic-gate 	 */
373*7c478bd9Sstevel@tonic-gate 	if ((ps = t->t_pollstate) == NULL) {
374*7c478bd9Sstevel@tonic-gate 		t->t_pollstate = pollstate_create();
375*7c478bd9Sstevel@tonic-gate 		ps = t->t_pollstate;
376*7c478bd9Sstevel@tonic-gate 	}
377*7c478bd9Sstevel@tonic-gate 
378*7c478bd9Sstevel@tonic-gate 	if (ps->ps_pcache == NULL)
379*7c478bd9Sstevel@tonic-gate 		ps->ps_pcache = pcache_alloc();
380*7c478bd9Sstevel@tonic-gate 	pcp = ps->ps_pcache;
381*7c478bd9Sstevel@tonic-gate 
382*7c478bd9Sstevel@tonic-gate 	/*
383*7c478bd9Sstevel@tonic-gate 	 * NOTE: for performance, buffers are saved across poll() calls.
384*7c478bd9Sstevel@tonic-gate 	 * The theory is that if a process polls heavily, it tends to poll
385*7c478bd9Sstevel@tonic-gate 	 * on the same set of descriptors.  Therefore, we only reallocate
386*7c478bd9Sstevel@tonic-gate 	 * buffers when nfds changes.  There is no hysteresis control,
387*7c478bd9Sstevel@tonic-gate 	 * because there is no data to suggest that this is necessary;
388*7c478bd9Sstevel@tonic-gate 	 * the penalty of reallocating is not *that* great in any event.
389*7c478bd9Sstevel@tonic-gate 	 */
390*7c478bd9Sstevel@tonic-gate 	old_nfds = ps->ps_nfds;
391*7c478bd9Sstevel@tonic-gate 	if (nfds != old_nfds) {
392*7c478bd9Sstevel@tonic-gate 
393*7c478bd9Sstevel@tonic-gate 		kmem_free(ps->ps_pollfd, old_nfds * sizeof (pollfd_t));
394*7c478bd9Sstevel@tonic-gate 		pollfdp = kmem_alloc(nfds * sizeof (pollfd_t), KM_SLEEP);
395*7c478bd9Sstevel@tonic-gate 		ps->ps_pollfd = pollfdp;
396*7c478bd9Sstevel@tonic-gate 		ps->ps_nfds = nfds;
397*7c478bd9Sstevel@tonic-gate 	}
398*7c478bd9Sstevel@tonic-gate 
399*7c478bd9Sstevel@tonic-gate 	pollfdp = ps->ps_pollfd;
400*7c478bd9Sstevel@tonic-gate 	if (copyin(fds, pollfdp, nfds * sizeof (pollfd_t))) {
401*7c478bd9Sstevel@tonic-gate 		error = EFAULT;
402*7c478bd9Sstevel@tonic-gate 		goto pollout;
403*7c478bd9Sstevel@tonic-gate 	}
404*7c478bd9Sstevel@tonic-gate 
405*7c478bd9Sstevel@tonic-gate 	if (fds == NULL) {
406*7c478bd9Sstevel@tonic-gate 		/*
407*7c478bd9Sstevel@tonic-gate 		 * If the process has page 0 mapped, then the copyin() above
408*7c478bd9Sstevel@tonic-gate 		 * will succeed even if fds is NULL.  However, our cached
409*7c478bd9Sstevel@tonic-gate 		 * poll lists are keyed by the address of the passed-in fds
410*7c478bd9Sstevel@tonic-gate 		 * structure, and we use the value NULL to indicate an unused
411*7c478bd9Sstevel@tonic-gate 		 * poll cache list entry.  As such, we elect not to support
412*7c478bd9Sstevel@tonic-gate 		 * NULL as a valid (user) memory address and fail the poll()
413*7c478bd9Sstevel@tonic-gate 		 * call.
414*7c478bd9Sstevel@tonic-gate 		 */
415*7c478bd9Sstevel@tonic-gate 		error = EINVAL;
416*7c478bd9Sstevel@tonic-gate 		goto pollout;
417*7c478bd9Sstevel@tonic-gate 	}
418*7c478bd9Sstevel@tonic-gate 
419*7c478bd9Sstevel@tonic-gate 	/*
420*7c478bd9Sstevel@tonic-gate 	 * If this thread polls for the first time, allocate ALL poll
421*7c478bd9Sstevel@tonic-gate 	 * cache data structures and cache the poll fd list. This
422*7c478bd9Sstevel@tonic-gate 	 * allocation is delayed till now because lwp's polling 0 fd
423*7c478bd9Sstevel@tonic-gate 	 * (i.e. using poll as timeout()) don't need this memory.
424*7c478bd9Sstevel@tonic-gate 	 */
425*7c478bd9Sstevel@tonic-gate 	mutex_enter(&ps->ps_lock);
426*7c478bd9Sstevel@tonic-gate 	pcp = ps->ps_pcache;
427*7c478bd9Sstevel@tonic-gate 	ASSERT(pcp != NULL);
428*7c478bd9Sstevel@tonic-gate 	if (pcp->pc_bitmap == NULL) {
429*7c478bd9Sstevel@tonic-gate 		pcache_create(pcp, nfds);
430*7c478bd9Sstevel@tonic-gate 		/*
431*7c478bd9Sstevel@tonic-gate 		 * poll and cache this poll fd list in ps_pcacheset[0].
432*7c478bd9Sstevel@tonic-gate 		 */
433*7c478bd9Sstevel@tonic-gate 		error = pcacheset_cache_list(ps, fds, &fdcnt, cacheindex);
434*7c478bd9Sstevel@tonic-gate 		if (fdcnt || error) {
435*7c478bd9Sstevel@tonic-gate 			mutex_exit(&ps->ps_lock);
436*7c478bd9Sstevel@tonic-gate 			goto pollout;
437*7c478bd9Sstevel@tonic-gate 		}
438*7c478bd9Sstevel@tonic-gate 	} else {
439*7c478bd9Sstevel@tonic-gate 		pollcacheset_t	*pcset = ps->ps_pcacheset;
440*7c478bd9Sstevel@tonic-gate 
441*7c478bd9Sstevel@tonic-gate 		/*
442*7c478bd9Sstevel@tonic-gate 		 * Not first time polling. Select a cached poll list by
443*7c478bd9Sstevel@tonic-gate 		 * matching user pollfd list buffer address.
444*7c478bd9Sstevel@tonic-gate 		 */
445*7c478bd9Sstevel@tonic-gate 		for (cacheindex = 0; cacheindex < ps->ps_nsets; cacheindex++) {
446*7c478bd9Sstevel@tonic-gate 			if (pcset[cacheindex].pcs_usradr == (uintptr_t)fds) {
447*7c478bd9Sstevel@tonic-gate 				if ((++pcset[cacheindex].pcs_count) == 0) {
448*7c478bd9Sstevel@tonic-gate 					/*
449*7c478bd9Sstevel@tonic-gate 					 * counter is wrapping around.
450*7c478bd9Sstevel@tonic-gate 					 */
451*7c478bd9Sstevel@tonic-gate 					pcacheset_reset_count(ps, cacheindex);
452*7c478bd9Sstevel@tonic-gate 				}
453*7c478bd9Sstevel@tonic-gate 				/*
454*7c478bd9Sstevel@tonic-gate 				 * examine and resolve possible
455*7c478bd9Sstevel@tonic-gate 				 * difference of the current poll
456*7c478bd9Sstevel@tonic-gate 				 * list and previously cached one.
457*7c478bd9Sstevel@tonic-gate 				 * If there is an error during resolve(),
458*7c478bd9Sstevel@tonic-gate 				 * the callee will guarantee the consistency
459*7c478bd9Sstevel@tonic-gate 				 * of cached poll list and cache content.
460*7c478bd9Sstevel@tonic-gate 				 */
461*7c478bd9Sstevel@tonic-gate 				error = pcacheset_resolve(ps, nfds, &fdcnt,
462*7c478bd9Sstevel@tonic-gate 				    cacheindex);
463*7c478bd9Sstevel@tonic-gate 				if (error) {
464*7c478bd9Sstevel@tonic-gate 					mutex_exit(&ps->ps_lock);
465*7c478bd9Sstevel@tonic-gate 					goto pollout;
466*7c478bd9Sstevel@tonic-gate 				}
467*7c478bd9Sstevel@tonic-gate 				break;
468*7c478bd9Sstevel@tonic-gate 			}
469*7c478bd9Sstevel@tonic-gate 
470*7c478bd9Sstevel@tonic-gate 			/*
471*7c478bd9Sstevel@tonic-gate 			 * Note that pcs_usradr field of an used entry won't be
472*7c478bd9Sstevel@tonic-gate 			 * NULL because it stores the address of passed-in fds,
473*7c478bd9Sstevel@tonic-gate 			 * and NULL fds will not be cached (Then it is either
474*7c478bd9Sstevel@tonic-gate 			 * the special timeout case when nfds is 0 or it returns
475*7c478bd9Sstevel@tonic-gate 			 * failure directly).
476*7c478bd9Sstevel@tonic-gate 			 */
477*7c478bd9Sstevel@tonic-gate 			if (pcset[cacheindex].pcs_usradr == NULL) {
478*7c478bd9Sstevel@tonic-gate 				/*
479*7c478bd9Sstevel@tonic-gate 				 * found an unused entry. Use it to cache
480*7c478bd9Sstevel@tonic-gate 				 * this poll list.
481*7c478bd9Sstevel@tonic-gate 				 */
482*7c478bd9Sstevel@tonic-gate 				error = pcacheset_cache_list(ps, fds, &fdcnt,
483*7c478bd9Sstevel@tonic-gate 				    cacheindex);
484*7c478bd9Sstevel@tonic-gate 				if (fdcnt || error) {
485*7c478bd9Sstevel@tonic-gate 					mutex_exit(&ps->ps_lock);
486*7c478bd9Sstevel@tonic-gate 					goto pollout;
487*7c478bd9Sstevel@tonic-gate 				}
488*7c478bd9Sstevel@tonic-gate 				break;
489*7c478bd9Sstevel@tonic-gate 			}
490*7c478bd9Sstevel@tonic-gate 		}
491*7c478bd9Sstevel@tonic-gate 		if (cacheindex == ps->ps_nsets) {
492*7c478bd9Sstevel@tonic-gate 			/*
493*7c478bd9Sstevel@tonic-gate 			 * We failed to find a matching cached poll fd list.
494*7c478bd9Sstevel@tonic-gate 			 * replace an old list.
495*7c478bd9Sstevel@tonic-gate 			 */
496*7c478bd9Sstevel@tonic-gate 			pollstats.polllistmiss.value.ui64++;
497*7c478bd9Sstevel@tonic-gate 			cacheindex = pcacheset_replace(ps);
498*7c478bd9Sstevel@tonic-gate 			ASSERT(cacheindex < ps->ps_nsets);
499*7c478bd9Sstevel@tonic-gate 			pcset[cacheindex].pcs_usradr = (uintptr_t)fds;
500*7c478bd9Sstevel@tonic-gate 			error = pcacheset_resolve(ps, nfds, &fdcnt, cacheindex);
501*7c478bd9Sstevel@tonic-gate 			if (error) {
502*7c478bd9Sstevel@tonic-gate 				mutex_exit(&ps->ps_lock);
503*7c478bd9Sstevel@tonic-gate 				goto pollout;
504*7c478bd9Sstevel@tonic-gate 			}
505*7c478bd9Sstevel@tonic-gate 		}
506*7c478bd9Sstevel@tonic-gate 	}
507*7c478bd9Sstevel@tonic-gate 
508*7c478bd9Sstevel@tonic-gate 	/*
509*7c478bd9Sstevel@tonic-gate 	 * Always scan the bitmap with the lock on the pollcache held.
510*7c478bd9Sstevel@tonic-gate 	 * This is to make sure that a wakeup does not come undetected.
511*7c478bd9Sstevel@tonic-gate 	 * If the lock is not held, a pollwakeup could have come for an
512*7c478bd9Sstevel@tonic-gate 	 * fd we already checked but before this thread sleeps, in which
513*7c478bd9Sstevel@tonic-gate 	 * case the wakeup is missed. Now we hold the pcache lock and
514*7c478bd9Sstevel@tonic-gate 	 * check the bitmap again. This will prevent wakeup from happening
515*7c478bd9Sstevel@tonic-gate 	 * while we hold pcache lock since pollwakeup() will also lock
516*7c478bd9Sstevel@tonic-gate 	 * the pcache before updating poll bitmap.
517*7c478bd9Sstevel@tonic-gate 	 */
518*7c478bd9Sstevel@tonic-gate 	mutex_enter(&pcp->pc_lock);
519*7c478bd9Sstevel@tonic-gate 	for (;;) {
520*7c478bd9Sstevel@tonic-gate 		pcp->pc_flag = 0;
521*7c478bd9Sstevel@tonic-gate 		error = pcache_poll(pollfdp, ps, nfds, &fdcnt, cacheindex);
522*7c478bd9Sstevel@tonic-gate 		if (fdcnt || error) {
523*7c478bd9Sstevel@tonic-gate 			mutex_exit(&pcp->pc_lock);
524*7c478bd9Sstevel@tonic-gate 			mutex_exit(&ps->ps_lock);
525*7c478bd9Sstevel@tonic-gate 			break;
526*7c478bd9Sstevel@tonic-gate 		}
527*7c478bd9Sstevel@tonic-gate 
528*7c478bd9Sstevel@tonic-gate 		/*
529*7c478bd9Sstevel@tonic-gate 		 * If T_POLLWAKE is set, a pollwakeup() was performed on
530*7c478bd9Sstevel@tonic-gate 		 * one of the file descriptors.  This can happen only if
531*7c478bd9Sstevel@tonic-gate 		 * one of the VOP_POLL() functions dropped pcp->pc_lock.
532*7c478bd9Sstevel@tonic-gate 		 * The only current cases of this is in procfs (prpoll())
533*7c478bd9Sstevel@tonic-gate 		 * and STREAMS (strpoll()).
534*7c478bd9Sstevel@tonic-gate 		 */
535*7c478bd9Sstevel@tonic-gate 		if (pcp->pc_flag & T_POLLWAKE)
536*7c478bd9Sstevel@tonic-gate 			continue;
537*7c478bd9Sstevel@tonic-gate 
538*7c478bd9Sstevel@tonic-gate 		/*
539*7c478bd9Sstevel@tonic-gate 		 * If you get here, the poll of fds was unsuccessful.
540*7c478bd9Sstevel@tonic-gate 		 * Wait until some fd becomes readable, writable, or gets
541*7c478bd9Sstevel@tonic-gate 		 * an exception, or until a signal or a timeout occurs.
542*7c478bd9Sstevel@tonic-gate 		 * Do not check for signals if we have a zero timeout.
543*7c478bd9Sstevel@tonic-gate 		 */
544*7c478bd9Sstevel@tonic-gate 		mutex_exit(&ps->ps_lock);
545*7c478bd9Sstevel@tonic-gate 		if (imm_timeout)
546*7c478bd9Sstevel@tonic-gate 			rval = -1;
547*7c478bd9Sstevel@tonic-gate 		else
548*7c478bd9Sstevel@tonic-gate 			rval = cv_waituntil_sig(&pcp->pc_cv, &pcp->pc_lock,
549*7c478bd9Sstevel@tonic-gate 				rqtp, timecheck);
550*7c478bd9Sstevel@tonic-gate 		mutex_exit(&pcp->pc_lock);
551*7c478bd9Sstevel@tonic-gate 		/*
552*7c478bd9Sstevel@tonic-gate 		 * If we have received a signal or timed out
553*7c478bd9Sstevel@tonic-gate 		 * then break out and return.
554*7c478bd9Sstevel@tonic-gate 		 */
555*7c478bd9Sstevel@tonic-gate 		if (rval <= 0) {
556*7c478bd9Sstevel@tonic-gate 			if (rval == 0)
557*7c478bd9Sstevel@tonic-gate 				error = EINTR;
558*7c478bd9Sstevel@tonic-gate 			break;
559*7c478bd9Sstevel@tonic-gate 		}
560*7c478bd9Sstevel@tonic-gate 		/*
561*7c478bd9Sstevel@tonic-gate 		 * We have not received a signal or timed out.
562*7c478bd9Sstevel@tonic-gate 		 * Continue around and poll fds again.
563*7c478bd9Sstevel@tonic-gate 		 */
564*7c478bd9Sstevel@tonic-gate 		mutex_enter(&ps->ps_lock);
565*7c478bd9Sstevel@tonic-gate 		mutex_enter(&pcp->pc_lock);
566*7c478bd9Sstevel@tonic-gate 	}
567*7c478bd9Sstevel@tonic-gate 
568*7c478bd9Sstevel@tonic-gate pollout:
569*7c478bd9Sstevel@tonic-gate 	/*
570*7c478bd9Sstevel@tonic-gate 	 * If we changed the signal mask but we received
571*7c478bd9Sstevel@tonic-gate 	 * no signal then restore the signal mask.
572*7c478bd9Sstevel@tonic-gate 	 * Otherwise psig() will deal with the signal mask.
573*7c478bd9Sstevel@tonic-gate 	 */
574*7c478bd9Sstevel@tonic-gate 	if (ksetp != NULL) {
575*7c478bd9Sstevel@tonic-gate 		mutex_enter(&p->p_lock);
576*7c478bd9Sstevel@tonic-gate 		if (lwp->lwp_cursig == 0) {
577*7c478bd9Sstevel@tonic-gate 			t->t_hold = lwp->lwp_sigoldmask;
578*7c478bd9Sstevel@tonic-gate 			t->t_flag &= ~T_TOMASK;
579*7c478bd9Sstevel@tonic-gate 		}
580*7c478bd9Sstevel@tonic-gate 		mutex_exit(&p->p_lock);
581*7c478bd9Sstevel@tonic-gate 	}
582*7c478bd9Sstevel@tonic-gate 
583*7c478bd9Sstevel@tonic-gate 	if (error)
584*7c478bd9Sstevel@tonic-gate 		return (set_errno(error));
585*7c478bd9Sstevel@tonic-gate 
586*7c478bd9Sstevel@tonic-gate 	/*
587*7c478bd9Sstevel@tonic-gate 	 * Copy out the events and return the fdcnt to the user.
588*7c478bd9Sstevel@tonic-gate 	 */
589*7c478bd9Sstevel@tonic-gate 	if (nfds != 0 &&
590*7c478bd9Sstevel@tonic-gate 	    copyout(pollfdp, fds, nfds * sizeof (pollfd_t)))
591*7c478bd9Sstevel@tonic-gate 		return (set_errno(EFAULT));
592*7c478bd9Sstevel@tonic-gate 
593*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
594*7c478bd9Sstevel@tonic-gate 	/*
595*7c478bd9Sstevel@tonic-gate 	 * Another sanity check:
596*7c478bd9Sstevel@tonic-gate 	 */
597*7c478bd9Sstevel@tonic-gate 	if (fdcnt) {
598*7c478bd9Sstevel@tonic-gate 		int	reventcnt = 0;
599*7c478bd9Sstevel@tonic-gate 
600*7c478bd9Sstevel@tonic-gate 		for (i = 0; i < nfds; i++) {
601*7c478bd9Sstevel@tonic-gate 			if (pollfdp[i].fd < 0) {
602*7c478bd9Sstevel@tonic-gate 				ASSERT(pollfdp[i].revents == 0);
603*7c478bd9Sstevel@tonic-gate 				continue;
604*7c478bd9Sstevel@tonic-gate 			}
605*7c478bd9Sstevel@tonic-gate 			if (pollfdp[i].revents) {
606*7c478bd9Sstevel@tonic-gate 				reventcnt++;
607*7c478bd9Sstevel@tonic-gate 			}
608*7c478bd9Sstevel@tonic-gate 		}
609*7c478bd9Sstevel@tonic-gate 		ASSERT(fdcnt == reventcnt);
610*7c478bd9Sstevel@tonic-gate 	} else {
611*7c478bd9Sstevel@tonic-gate 		for (i = 0; i < nfds; i++) {
612*7c478bd9Sstevel@tonic-gate 			ASSERT(pollfdp[i].revents == 0);
613*7c478bd9Sstevel@tonic-gate 		}
614*7c478bd9Sstevel@tonic-gate 	}
615*7c478bd9Sstevel@tonic-gate #endif	/* DEBUG */
616*7c478bd9Sstevel@tonic-gate 
617*7c478bd9Sstevel@tonic-gate 	return (fdcnt);
618*7c478bd9Sstevel@tonic-gate }
619*7c478bd9Sstevel@tonic-gate 
620*7c478bd9Sstevel@tonic-gate /*
621*7c478bd9Sstevel@tonic-gate  * This system call trap exists solely for binary compatibility with
622*7c478bd9Sstevel@tonic-gate  * old statically-linked applications.  It is not called from libc.
623*7c478bd9Sstevel@tonic-gate  * It should be removed in the next release.
624*7c478bd9Sstevel@tonic-gate  */
625*7c478bd9Sstevel@tonic-gate int
626*7c478bd9Sstevel@tonic-gate poll(pollfd_t *fds, nfds_t nfds, int time_out)
627*7c478bd9Sstevel@tonic-gate {
628*7c478bd9Sstevel@tonic-gate 	timespec_t ts;
629*7c478bd9Sstevel@tonic-gate 	timespec_t *tsp;
630*7c478bd9Sstevel@tonic-gate 
631*7c478bd9Sstevel@tonic-gate 	if (time_out < 0)
632*7c478bd9Sstevel@tonic-gate 		tsp = NULL;
633*7c478bd9Sstevel@tonic-gate 	else {
634*7c478bd9Sstevel@tonic-gate 		ts.tv_sec = time_out / MILLISEC;
635*7c478bd9Sstevel@tonic-gate 		ts.tv_nsec = (time_out % MILLISEC) * MICROSEC;
636*7c478bd9Sstevel@tonic-gate 		tsp = &ts;
637*7c478bd9Sstevel@tonic-gate 	}
638*7c478bd9Sstevel@tonic-gate 
639*7c478bd9Sstevel@tonic-gate 	return (poll_common(fds, nfds, tsp, NULL));
640*7c478bd9Sstevel@tonic-gate }
641*7c478bd9Sstevel@tonic-gate 
642*7c478bd9Sstevel@tonic-gate /*
643*7c478bd9Sstevel@tonic-gate  * This is the system call trap that poll(),
644*7c478bd9Sstevel@tonic-gate  * select() and pselect() are built upon.
645*7c478bd9Sstevel@tonic-gate  * It is a private interface between libc and the kernel.
646*7c478bd9Sstevel@tonic-gate  */
647*7c478bd9Sstevel@tonic-gate int
648*7c478bd9Sstevel@tonic-gate pollsys(pollfd_t *fds, nfds_t nfds, timespec_t *timeoutp, sigset_t *setp)
649*7c478bd9Sstevel@tonic-gate {
650*7c478bd9Sstevel@tonic-gate 	timespec_t ts;
651*7c478bd9Sstevel@tonic-gate 	timespec_t *tsp;
652*7c478bd9Sstevel@tonic-gate 	sigset_t set;
653*7c478bd9Sstevel@tonic-gate 	k_sigset_t kset;
654*7c478bd9Sstevel@tonic-gate 	k_sigset_t *ksetp;
655*7c478bd9Sstevel@tonic-gate 	model_t datamodel = get_udatamodel();
656*7c478bd9Sstevel@tonic-gate 
657*7c478bd9Sstevel@tonic-gate 	if (timeoutp == NULL)
658*7c478bd9Sstevel@tonic-gate 		tsp = NULL;
659*7c478bd9Sstevel@tonic-gate 	else {
660*7c478bd9Sstevel@tonic-gate 		if (datamodel == DATAMODEL_NATIVE) {
661*7c478bd9Sstevel@tonic-gate 			if (copyin(timeoutp, &ts, sizeof (ts)))
662*7c478bd9Sstevel@tonic-gate 				return (set_errno(EFAULT));
663*7c478bd9Sstevel@tonic-gate 		} else {
664*7c478bd9Sstevel@tonic-gate 			timespec32_t ts32;
665*7c478bd9Sstevel@tonic-gate 
666*7c478bd9Sstevel@tonic-gate 			if (copyin(timeoutp, &ts32, sizeof (ts32)))
667*7c478bd9Sstevel@tonic-gate 				return (set_errno(EFAULT));
668*7c478bd9Sstevel@tonic-gate 			TIMESPEC32_TO_TIMESPEC(&ts, &ts32)
669*7c478bd9Sstevel@tonic-gate 		}
670*7c478bd9Sstevel@tonic-gate 
671*7c478bd9Sstevel@tonic-gate 		if (itimerspecfix(&ts))
672*7c478bd9Sstevel@tonic-gate 			return (set_errno(EINVAL));
673*7c478bd9Sstevel@tonic-gate 		tsp = &ts;
674*7c478bd9Sstevel@tonic-gate 	}
675*7c478bd9Sstevel@tonic-gate 
676*7c478bd9Sstevel@tonic-gate 	if (setp == NULL)
677*7c478bd9Sstevel@tonic-gate 		ksetp = NULL;
678*7c478bd9Sstevel@tonic-gate 	else {
679*7c478bd9Sstevel@tonic-gate 		if (copyin(setp, &set, sizeof (set)))
680*7c478bd9Sstevel@tonic-gate 			return (set_errno(EFAULT));
681*7c478bd9Sstevel@tonic-gate 		sigutok(&set, &kset);
682*7c478bd9Sstevel@tonic-gate 		ksetp = &kset;
683*7c478bd9Sstevel@tonic-gate 	}
684*7c478bd9Sstevel@tonic-gate 
685*7c478bd9Sstevel@tonic-gate 	return (poll_common(fds, nfds, tsp, ksetp));
686*7c478bd9Sstevel@tonic-gate }
687*7c478bd9Sstevel@tonic-gate 
688*7c478bd9Sstevel@tonic-gate /*
689*7c478bd9Sstevel@tonic-gate  * Clean up any state left around by poll(2). Called when a thread exits.
690*7c478bd9Sstevel@tonic-gate  */
691*7c478bd9Sstevel@tonic-gate void
692*7c478bd9Sstevel@tonic-gate pollcleanup()
693*7c478bd9Sstevel@tonic-gate {
694*7c478bd9Sstevel@tonic-gate 	pollstate_t *ps = curthread->t_pollstate;
695*7c478bd9Sstevel@tonic-gate 	pollcache_t *pcp;
696*7c478bd9Sstevel@tonic-gate 
697*7c478bd9Sstevel@tonic-gate 	if (ps == NULL)
698*7c478bd9Sstevel@tonic-gate 		return;
699*7c478bd9Sstevel@tonic-gate 	pcp = ps->ps_pcache;
700*7c478bd9Sstevel@tonic-gate 	/*
701*7c478bd9Sstevel@tonic-gate 	 * free up all cached poll fds
702*7c478bd9Sstevel@tonic-gate 	 */
703*7c478bd9Sstevel@tonic-gate 	if (pcp == NULL) {
704*7c478bd9Sstevel@tonic-gate 		/* this pollstate is used by /dev/poll */
705*7c478bd9Sstevel@tonic-gate 		goto pollcleanout;
706*7c478bd9Sstevel@tonic-gate 	}
707*7c478bd9Sstevel@tonic-gate 
708*7c478bd9Sstevel@tonic-gate 	if (pcp->pc_bitmap != NULL) {
709*7c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&ps->ps_lock));
710*7c478bd9Sstevel@tonic-gate 		/*
711*7c478bd9Sstevel@tonic-gate 		 * a close lwp can race with us when cleaning up a polldat
712*7c478bd9Sstevel@tonic-gate 		 * entry. We hold the ps_lock when cleaning hash table.
713*7c478bd9Sstevel@tonic-gate 		 * Since this pollcache is going away anyway, there is no
714*7c478bd9Sstevel@tonic-gate 		 * need to hold the pc_lock.
715*7c478bd9Sstevel@tonic-gate 		 */
716*7c478bd9Sstevel@tonic-gate 		mutex_enter(&ps->ps_lock);
717*7c478bd9Sstevel@tonic-gate 		pcache_clean(pcp);
718*7c478bd9Sstevel@tonic-gate 		mutex_exit(&ps->ps_lock);
719*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
720*7c478bd9Sstevel@tonic-gate 		/*
721*7c478bd9Sstevel@tonic-gate 		 * At this point, all fds cached by this lwp should be
722*7c478bd9Sstevel@tonic-gate 		 * cleaned up. There should be no fd in fi_list still
723*7c478bd9Sstevel@tonic-gate 		 * reference this thread.
724*7c478bd9Sstevel@tonic-gate 		 */
725*7c478bd9Sstevel@tonic-gate 		checkfpollinfo();	/* sanity check */
726*7c478bd9Sstevel@tonic-gate 		pollcheckphlist();	/* sanity check */
727*7c478bd9Sstevel@tonic-gate #endif	/* DEBUG */
728*7c478bd9Sstevel@tonic-gate 	}
729*7c478bd9Sstevel@tonic-gate 	/*
730*7c478bd9Sstevel@tonic-gate 	 * Be sure no one is referencing thread before exiting
731*7c478bd9Sstevel@tonic-gate 	 */
732*7c478bd9Sstevel@tonic-gate 	mutex_enter(&pcp->pc_no_exit);
733*7c478bd9Sstevel@tonic-gate 	ASSERT(pcp->pc_busy >= 0);
734*7c478bd9Sstevel@tonic-gate 	while (pcp->pc_busy > 0)
735*7c478bd9Sstevel@tonic-gate 		cv_wait(&pcp->pc_busy_cv, &pcp->pc_no_exit);
736*7c478bd9Sstevel@tonic-gate 	mutex_exit(&pcp->pc_no_exit);
737*7c478bd9Sstevel@tonic-gate pollcleanout:
738*7c478bd9Sstevel@tonic-gate 	pollstate_destroy(ps);
739*7c478bd9Sstevel@tonic-gate 	curthread->t_pollstate = NULL;
740*7c478bd9Sstevel@tonic-gate }
741*7c478bd9Sstevel@tonic-gate 
742*7c478bd9Sstevel@tonic-gate /*
743*7c478bd9Sstevel@tonic-gate  * pollwakeup() - poke threads waiting in poll() for some event
744*7c478bd9Sstevel@tonic-gate  * on a particular object.
745*7c478bd9Sstevel@tonic-gate  *
746*7c478bd9Sstevel@tonic-gate  * The threads hanging off of the specified pollhead structure are scanned.
747*7c478bd9Sstevel@tonic-gate  * If their event mask matches the specified event(s), then pollnotify() is
748*7c478bd9Sstevel@tonic-gate  * called to poke the thread.
749*7c478bd9Sstevel@tonic-gate  *
750*7c478bd9Sstevel@tonic-gate  * Multiple events may be specified.  When POLLHUP or POLLERR are specified,
751*7c478bd9Sstevel@tonic-gate  * all waiting threads are poked.
752*7c478bd9Sstevel@tonic-gate  *
753*7c478bd9Sstevel@tonic-gate  * It is important that pollnotify() not drop the lock protecting the list
754*7c478bd9Sstevel@tonic-gate  * of threads.
755*7c478bd9Sstevel@tonic-gate  */
756*7c478bd9Sstevel@tonic-gate void
757*7c478bd9Sstevel@tonic-gate pollwakeup(pollhead_t *php, short events_arg)
758*7c478bd9Sstevel@tonic-gate {
759*7c478bd9Sstevel@tonic-gate 	polldat_t	*pdp;
760*7c478bd9Sstevel@tonic-gate 	int		events = (ushort_t)events_arg;
761*7c478bd9Sstevel@tonic-gate 
762*7c478bd9Sstevel@tonic-gate retry:
763*7c478bd9Sstevel@tonic-gate 	PH_ENTER(php);
764*7c478bd9Sstevel@tonic-gate 
765*7c478bd9Sstevel@tonic-gate 	/*
766*7c478bd9Sstevel@tonic-gate 	 * About half of all pollwakeups don't do anything, because the
767*7c478bd9Sstevel@tonic-gate 	 * pollhead list is empty (i.e, nobody is interested in the event).
768*7c478bd9Sstevel@tonic-gate 	 * For this common case, we can optimize out locking overhead.
769*7c478bd9Sstevel@tonic-gate 	 */
770*7c478bd9Sstevel@tonic-gate 	if (php->ph_list == NULL) {
771*7c478bd9Sstevel@tonic-gate 		PH_EXIT(php);
772*7c478bd9Sstevel@tonic-gate 		return;
773*7c478bd9Sstevel@tonic-gate 	}
774*7c478bd9Sstevel@tonic-gate 
775*7c478bd9Sstevel@tonic-gate 	for (pdp = php->ph_list; pdp; pdp = pdp->pd_next) {
776*7c478bd9Sstevel@tonic-gate 		if ((pdp->pd_events & events) ||
777*7c478bd9Sstevel@tonic-gate 		    (events & (POLLHUP | POLLERR))) {
778*7c478bd9Sstevel@tonic-gate 
779*7c478bd9Sstevel@tonic-gate 			pollcache_t 	*pcp;
780*7c478bd9Sstevel@tonic-gate 
781*7c478bd9Sstevel@tonic-gate 			if (pdp->pd_portev != NULL) {
782*7c478bd9Sstevel@tonic-gate 				port_kevent_t	*pkevp = pdp->pd_portev;
783*7c478bd9Sstevel@tonic-gate 				/*
784*7c478bd9Sstevel@tonic-gate 				 * Object (fd) is associated with an event port,
785*7c478bd9Sstevel@tonic-gate 				 * => send event notification to the port.
786*7c478bd9Sstevel@tonic-gate 				 */
787*7c478bd9Sstevel@tonic-gate 				pkevp->portkev_events |= events &
788*7c478bd9Sstevel@tonic-gate 				    (pdp->pd_events | POLLHUP | POLLERR);
789*7c478bd9Sstevel@tonic-gate 				if (pkevp->portkev_flags & PORT_KEV_VALID) {
790*7c478bd9Sstevel@tonic-gate 					pkevp->portkev_flags &= ~PORT_KEV_VALID;
791*7c478bd9Sstevel@tonic-gate 					(void) port_send_event(pdp->pd_portev);
792*7c478bd9Sstevel@tonic-gate 				}
793*7c478bd9Sstevel@tonic-gate 				continue;
794*7c478bd9Sstevel@tonic-gate 			}
795*7c478bd9Sstevel@tonic-gate 
796*7c478bd9Sstevel@tonic-gate 			pcp = pdp->pd_pcache;
797*7c478bd9Sstevel@tonic-gate 
798*7c478bd9Sstevel@tonic-gate 			/*
799*7c478bd9Sstevel@tonic-gate 			 * Try to grab the lock for this thread. If
800*7c478bd9Sstevel@tonic-gate 			 * we don't get it then we may deadlock so
801*7c478bd9Sstevel@tonic-gate 			 * back out and restart all over again. Note
802*7c478bd9Sstevel@tonic-gate 			 * that the failure rate is very very low.
803*7c478bd9Sstevel@tonic-gate 			 */
804*7c478bd9Sstevel@tonic-gate 			if (mutex_tryenter(&pcp->pc_lock)) {
805*7c478bd9Sstevel@tonic-gate 				pollnotify(pcp, pdp->pd_fd);
806*7c478bd9Sstevel@tonic-gate 				mutex_exit(&pcp->pc_lock);
807*7c478bd9Sstevel@tonic-gate 			} else {
808*7c478bd9Sstevel@tonic-gate 				/*
809*7c478bd9Sstevel@tonic-gate 				 * We are here because:
810*7c478bd9Sstevel@tonic-gate 				 *	1) This thread has been woke up
811*7c478bd9Sstevel@tonic-gate 				 *	   and is trying to get out of poll().
812*7c478bd9Sstevel@tonic-gate 				 *	2) Some other thread is also here
813*7c478bd9Sstevel@tonic-gate 				 *	   but with a different pollhead lock.
814*7c478bd9Sstevel@tonic-gate 				 *
815*7c478bd9Sstevel@tonic-gate 				 * So, we need to drop the lock on pollhead
816*7c478bd9Sstevel@tonic-gate 				 * because of (1) but we want to prevent
817*7c478bd9Sstevel@tonic-gate 				 * that thread from doing lwp_exit() or
818*7c478bd9Sstevel@tonic-gate 				 * devpoll close. We want to ensure that
819*7c478bd9Sstevel@tonic-gate 				 * the pollcache pointer is still invalid.
820*7c478bd9Sstevel@tonic-gate 				 *
821*7c478bd9Sstevel@tonic-gate 				 * Solution: Grab the pcp->pc_no_exit lock,
822*7c478bd9Sstevel@tonic-gate 				 * increment the pc_busy counter, drop every
823*7c478bd9Sstevel@tonic-gate 				 * lock in sight. Get out of the way and wait
824*7c478bd9Sstevel@tonic-gate 				 * for type (2) threads to finish.
825*7c478bd9Sstevel@tonic-gate 				 */
826*7c478bd9Sstevel@tonic-gate 
827*7c478bd9Sstevel@tonic-gate 				mutex_enter(&pcp->pc_no_exit);
828*7c478bd9Sstevel@tonic-gate 				pcp->pc_busy++;	/* prevents exit()'s */
829*7c478bd9Sstevel@tonic-gate 				mutex_exit(&pcp->pc_no_exit);
830*7c478bd9Sstevel@tonic-gate 
831*7c478bd9Sstevel@tonic-gate 				PH_EXIT(php);
832*7c478bd9Sstevel@tonic-gate 				mutex_enter(&pcp->pc_lock);
833*7c478bd9Sstevel@tonic-gate 				mutex_exit(&pcp->pc_lock);
834*7c478bd9Sstevel@tonic-gate 				mutex_enter(&pcp->pc_no_exit);
835*7c478bd9Sstevel@tonic-gate 				pcp->pc_busy--;
836*7c478bd9Sstevel@tonic-gate 				if (pcp->pc_busy == 0) {
837*7c478bd9Sstevel@tonic-gate 					/*
838*7c478bd9Sstevel@tonic-gate 					 * Wakeup the thread waiting in
839*7c478bd9Sstevel@tonic-gate 					 * thread_exit().
840*7c478bd9Sstevel@tonic-gate 					 */
841*7c478bd9Sstevel@tonic-gate 					cv_signal(&pcp->pc_busy_cv);
842*7c478bd9Sstevel@tonic-gate 				}
843*7c478bd9Sstevel@tonic-gate 				mutex_exit(&pcp->pc_no_exit);
844*7c478bd9Sstevel@tonic-gate 				goto retry;
845*7c478bd9Sstevel@tonic-gate 			}
846*7c478bd9Sstevel@tonic-gate 		}
847*7c478bd9Sstevel@tonic-gate 	}
848*7c478bd9Sstevel@tonic-gate 	PH_EXIT(php);
849*7c478bd9Sstevel@tonic-gate }
850*7c478bd9Sstevel@tonic-gate 
851*7c478bd9Sstevel@tonic-gate /*
852*7c478bd9Sstevel@tonic-gate  * This function is called to inform a thread that
853*7c478bd9Sstevel@tonic-gate  * an event being polled for has occurred.
854*7c478bd9Sstevel@tonic-gate  * The pollstate lock on the thread should be held on entry.
855*7c478bd9Sstevel@tonic-gate  */
856*7c478bd9Sstevel@tonic-gate void
857*7c478bd9Sstevel@tonic-gate pollnotify(pollcache_t *pcp, int fd)
858*7c478bd9Sstevel@tonic-gate {
859*7c478bd9Sstevel@tonic-gate 	ASSERT(fd < pcp->pc_mapsize);
860*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&pcp->pc_lock));
861*7c478bd9Sstevel@tonic-gate 	BT_SET(pcp->pc_bitmap, fd);
862*7c478bd9Sstevel@tonic-gate 	pcp->pc_flag |= T_POLLWAKE;
863*7c478bd9Sstevel@tonic-gate 	cv_signal(&pcp->pc_cv);
864*7c478bd9Sstevel@tonic-gate }
865*7c478bd9Sstevel@tonic-gate 
866*7c478bd9Sstevel@tonic-gate /*
867*7c478bd9Sstevel@tonic-gate  * add a polldat entry to pollhead ph_list. The polldat struct is used
868*7c478bd9Sstevel@tonic-gate  * by pollwakeup to wake sleeping pollers when polled events has happened.
869*7c478bd9Sstevel@tonic-gate  */
870*7c478bd9Sstevel@tonic-gate void
871*7c478bd9Sstevel@tonic-gate pollhead_insert(pollhead_t *php, polldat_t *pdp)
872*7c478bd9Sstevel@tonic-gate {
873*7c478bd9Sstevel@tonic-gate 	PH_ENTER(php);
874*7c478bd9Sstevel@tonic-gate 	ASSERT(pdp->pd_next == NULL);
875*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
876*7c478bd9Sstevel@tonic-gate 	{
877*7c478bd9Sstevel@tonic-gate 		/*
878*7c478bd9Sstevel@tonic-gate 		 * the polldat should not be already on the list
879*7c478bd9Sstevel@tonic-gate 		 */
880*7c478bd9Sstevel@tonic-gate 		polldat_t *wp;
881*7c478bd9Sstevel@tonic-gate 		for (wp = php->ph_list; wp; wp = wp->pd_next) {
882*7c478bd9Sstevel@tonic-gate 			ASSERT(wp != pdp);
883*7c478bd9Sstevel@tonic-gate 		}
884*7c478bd9Sstevel@tonic-gate 	}
885*7c478bd9Sstevel@tonic-gate #endif	/* DEBUG */
886*7c478bd9Sstevel@tonic-gate 	pdp->pd_next = php->ph_list;
887*7c478bd9Sstevel@tonic-gate 	php->ph_list = pdp;
888*7c478bd9Sstevel@tonic-gate 	PH_EXIT(php);
889*7c478bd9Sstevel@tonic-gate }
890*7c478bd9Sstevel@tonic-gate 
891*7c478bd9Sstevel@tonic-gate /*
892*7c478bd9Sstevel@tonic-gate  * Delete the polldat entry from ph_list.
893*7c478bd9Sstevel@tonic-gate  */
894*7c478bd9Sstevel@tonic-gate void
895*7c478bd9Sstevel@tonic-gate pollhead_delete(pollhead_t *php, polldat_t *pdp)
896*7c478bd9Sstevel@tonic-gate {
897*7c478bd9Sstevel@tonic-gate 	polldat_t *wp;
898*7c478bd9Sstevel@tonic-gate 	polldat_t **wpp;
899*7c478bd9Sstevel@tonic-gate 
900*7c478bd9Sstevel@tonic-gate 	PH_ENTER(php);
901*7c478bd9Sstevel@tonic-gate 	for (wpp = &php->ph_list; (wp = *wpp) != NULL; wpp = &wp->pd_next) {
902*7c478bd9Sstevel@tonic-gate 		if (wp == pdp) {
903*7c478bd9Sstevel@tonic-gate 			*wpp = pdp->pd_next;
904*7c478bd9Sstevel@tonic-gate 			pdp->pd_next = NULL;
905*7c478bd9Sstevel@tonic-gate 			break;
906*7c478bd9Sstevel@tonic-gate 		}
907*7c478bd9Sstevel@tonic-gate 	}
908*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
909*7c478bd9Sstevel@tonic-gate 	/* assert that pdp is no longer in the list */
910*7c478bd9Sstevel@tonic-gate 	for (wp = *wpp; wp; wp = wp->pd_next) {
911*7c478bd9Sstevel@tonic-gate 		ASSERT(wp != pdp);
912*7c478bd9Sstevel@tonic-gate 	}
913*7c478bd9Sstevel@tonic-gate #endif	/* DEBUG */
914*7c478bd9Sstevel@tonic-gate 	PH_EXIT(php);
915*7c478bd9Sstevel@tonic-gate }
916*7c478bd9Sstevel@tonic-gate 
917*7c478bd9Sstevel@tonic-gate /*
918*7c478bd9Sstevel@tonic-gate  * walk through the poll fd lists to see if they are identical. This is an
919*7c478bd9Sstevel@tonic-gate  * expensive operation and should not be done more than once for each poll()
920*7c478bd9Sstevel@tonic-gate  * call.
921*7c478bd9Sstevel@tonic-gate  *
922*7c478bd9Sstevel@tonic-gate  * As an optimization (i.e., not having to go through the lists more than
923*7c478bd9Sstevel@tonic-gate  * once), this routine also clear the revents field of pollfd in 'current'.
924*7c478bd9Sstevel@tonic-gate  * Zeroing out the revents field of each entry in current poll list is
925*7c478bd9Sstevel@tonic-gate  * required by poll man page.
926*7c478bd9Sstevel@tonic-gate  *
927*7c478bd9Sstevel@tonic-gate  * Since the events field of cached list has illegal poll events filtered
928*7c478bd9Sstevel@tonic-gate  * out, the current list applies the same filtering before comparison.
929*7c478bd9Sstevel@tonic-gate  *
930*7c478bd9Sstevel@tonic-gate  * The routine stops when it detects a meaningful difference, or when it
931*7c478bd9Sstevel@tonic-gate  * exhausts the lists.
932*7c478bd9Sstevel@tonic-gate  */
933*7c478bd9Sstevel@tonic-gate int
934*7c478bd9Sstevel@tonic-gate pcacheset_cmp(pollfd_t *current, pollfd_t *cached, pollfd_t *newlist, int n)
935*7c478bd9Sstevel@tonic-gate {
936*7c478bd9Sstevel@tonic-gate 	int    ix;
937*7c478bd9Sstevel@tonic-gate 
938*7c478bd9Sstevel@tonic-gate 	for (ix = 0; ix < n; ix++) {
939*7c478bd9Sstevel@tonic-gate 		if (current[ix].fd == cached[ix].fd) {
940*7c478bd9Sstevel@tonic-gate 			/*
941*7c478bd9Sstevel@tonic-gate 			 * Filter out invalid poll events while we are in
942*7c478bd9Sstevel@tonic-gate 			 * inside the loop.
943*7c478bd9Sstevel@tonic-gate 			 */
944*7c478bd9Sstevel@tonic-gate 			if (current[ix].events & ~VALID_POLL_EVENTS) {
945*7c478bd9Sstevel@tonic-gate 				current[ix].events &= VALID_POLL_EVENTS;
946*7c478bd9Sstevel@tonic-gate 				if (newlist != NULL)
947*7c478bd9Sstevel@tonic-gate 					newlist[ix].events = current[ix].events;
948*7c478bd9Sstevel@tonic-gate 			}
949*7c478bd9Sstevel@tonic-gate 			if (current[ix].events == cached[ix].events) {
950*7c478bd9Sstevel@tonic-gate 				current[ix].revents = 0;
951*7c478bd9Sstevel@tonic-gate 				continue;
952*7c478bd9Sstevel@tonic-gate 			}
953*7c478bd9Sstevel@tonic-gate 		}
954*7c478bd9Sstevel@tonic-gate 		if ((current[ix].fd < 0) && (cached[ix].fd < 0)) {
955*7c478bd9Sstevel@tonic-gate 			current[ix].revents = 0;
956*7c478bd9Sstevel@tonic-gate 			continue;
957*7c478bd9Sstevel@tonic-gate 		}
958*7c478bd9Sstevel@tonic-gate 		return (ix);
959*7c478bd9Sstevel@tonic-gate 	}
960*7c478bd9Sstevel@tonic-gate 	return (ix);
961*7c478bd9Sstevel@tonic-gate }
962*7c478bd9Sstevel@tonic-gate 
963*7c478bd9Sstevel@tonic-gate /*
964*7c478bd9Sstevel@tonic-gate  * This routine returns a pointer to a cached poll fd entry, or NULL if it
965*7c478bd9Sstevel@tonic-gate  * does not find it in the hash table.
966*7c478bd9Sstevel@tonic-gate  */
967*7c478bd9Sstevel@tonic-gate polldat_t *
968*7c478bd9Sstevel@tonic-gate pcache_lookup_fd(pollcache_t *pcp, int fd)
969*7c478bd9Sstevel@tonic-gate {
970*7c478bd9Sstevel@tonic-gate 	int hashindex;
971*7c478bd9Sstevel@tonic-gate 	polldat_t *pdp;
972*7c478bd9Sstevel@tonic-gate 
973*7c478bd9Sstevel@tonic-gate 	hashindex = POLLHASH(pcp->pc_hashsize, fd);
974*7c478bd9Sstevel@tonic-gate 	pdp = pcp->pc_hash[hashindex];
975*7c478bd9Sstevel@tonic-gate 	while (pdp != NULL) {
976*7c478bd9Sstevel@tonic-gate 		if (pdp->pd_fd == fd)
977*7c478bd9Sstevel@tonic-gate 			break;
978*7c478bd9Sstevel@tonic-gate 		pdp = pdp->pd_hashnext;
979*7c478bd9Sstevel@tonic-gate 	}
980*7c478bd9Sstevel@tonic-gate 	return (pdp);
981*7c478bd9Sstevel@tonic-gate }
982*7c478bd9Sstevel@tonic-gate 
983*7c478bd9Sstevel@tonic-gate polldat_t *
984*7c478bd9Sstevel@tonic-gate pcache_alloc_fd(int nsets)
985*7c478bd9Sstevel@tonic-gate {
986*7c478bd9Sstevel@tonic-gate 	polldat_t *pdp;
987*7c478bd9Sstevel@tonic-gate 
988*7c478bd9Sstevel@tonic-gate 	pdp = kmem_zalloc(sizeof (polldat_t), KM_SLEEP);
989*7c478bd9Sstevel@tonic-gate 	if (nsets > 0) {
990*7c478bd9Sstevel@tonic-gate 		pdp->pd_ref = kmem_zalloc(sizeof (xref_t) * nsets, KM_SLEEP);
991*7c478bd9Sstevel@tonic-gate 		pdp->pd_nsets = nsets;
992*7c478bd9Sstevel@tonic-gate 	}
993*7c478bd9Sstevel@tonic-gate 	return (pdp);
994*7c478bd9Sstevel@tonic-gate }
995*7c478bd9Sstevel@tonic-gate 
996*7c478bd9Sstevel@tonic-gate /*
997*7c478bd9Sstevel@tonic-gate  * This routine  inserts a polldat into the pollcache's hash table. It
998*7c478bd9Sstevel@tonic-gate  * may be necessary to grow the size of the hash table.
999*7c478bd9Sstevel@tonic-gate  */
1000*7c478bd9Sstevel@tonic-gate void
1001*7c478bd9Sstevel@tonic-gate pcache_insert_fd(pollcache_t *pcp, polldat_t *pdp, nfds_t nfds)
1002*7c478bd9Sstevel@tonic-gate {
1003*7c478bd9Sstevel@tonic-gate 	int hashindex;
1004*7c478bd9Sstevel@tonic-gate 	int fd;
1005*7c478bd9Sstevel@tonic-gate 
1006*7c478bd9Sstevel@tonic-gate 	if ((pcp->pc_fdcount > pcp->pc_hashsize * POLLHASHTHRESHOLD) ||
1007*7c478bd9Sstevel@tonic-gate 	    (nfds > pcp->pc_hashsize * POLLHASHTHRESHOLD)) {
1008*7c478bd9Sstevel@tonic-gate 		pcache_grow_hashtbl(pcp, nfds);
1009*7c478bd9Sstevel@tonic-gate 	}
1010*7c478bd9Sstevel@tonic-gate 	fd = pdp->pd_fd;
1011*7c478bd9Sstevel@tonic-gate 	hashindex = POLLHASH(pcp->pc_hashsize, fd);
1012*7c478bd9Sstevel@tonic-gate 	pdp->pd_hashnext = pcp->pc_hash[hashindex];
1013*7c478bd9Sstevel@tonic-gate 	pcp->pc_hash[hashindex] = pdp;
1014*7c478bd9Sstevel@tonic-gate 	pcp->pc_fdcount++;
1015*7c478bd9Sstevel@tonic-gate 
1016*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
1017*7c478bd9Sstevel@tonic-gate 	{
1018*7c478bd9Sstevel@tonic-gate 		/*
1019*7c478bd9Sstevel@tonic-gate 		 * same fd should not appear on a hash list twice
1020*7c478bd9Sstevel@tonic-gate 		 */
1021*7c478bd9Sstevel@tonic-gate 		polldat_t *pdp1;
1022*7c478bd9Sstevel@tonic-gate 		for (pdp1 = pdp->pd_hashnext; pdp1; pdp1 = pdp1->pd_hashnext) {
1023*7c478bd9Sstevel@tonic-gate 			ASSERT(pdp->pd_fd != pdp1->pd_fd);
1024*7c478bd9Sstevel@tonic-gate 		}
1025*7c478bd9Sstevel@tonic-gate 	}
1026*7c478bd9Sstevel@tonic-gate #endif	/* DEBUG */
1027*7c478bd9Sstevel@tonic-gate }
1028*7c478bd9Sstevel@tonic-gate 
1029*7c478bd9Sstevel@tonic-gate /*
1030*7c478bd9Sstevel@tonic-gate  * Grow the hash table -- either double the table size or round it to the
1031*7c478bd9Sstevel@tonic-gate  * nearest multiples of POLLHASHCHUNKSZ, whichever is bigger. Rehash all the
1032*7c478bd9Sstevel@tonic-gate  * elements on the hash table.
1033*7c478bd9Sstevel@tonic-gate  */
1034*7c478bd9Sstevel@tonic-gate void
1035*7c478bd9Sstevel@tonic-gate pcache_grow_hashtbl(pollcache_t *pcp, nfds_t nfds)
1036*7c478bd9Sstevel@tonic-gate {
1037*7c478bd9Sstevel@tonic-gate 	int	oldsize;
1038*7c478bd9Sstevel@tonic-gate 	polldat_t **oldtbl;
1039*7c478bd9Sstevel@tonic-gate 	polldat_t *pdp, *pdp1;
1040*7c478bd9Sstevel@tonic-gate 	int	i;
1041*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
1042*7c478bd9Sstevel@tonic-gate 	int	count = 0;
1043*7c478bd9Sstevel@tonic-gate #endif
1044*7c478bd9Sstevel@tonic-gate 
1045*7c478bd9Sstevel@tonic-gate 	ASSERT(pcp->pc_hashsize % POLLHASHCHUNKSZ == 0);
1046*7c478bd9Sstevel@tonic-gate 	oldsize = pcp->pc_hashsize;
1047*7c478bd9Sstevel@tonic-gate 	oldtbl = pcp->pc_hash;
1048*7c478bd9Sstevel@tonic-gate 	if (nfds > pcp->pc_hashsize * POLLHASHINC) {
1049*7c478bd9Sstevel@tonic-gate 		pcp->pc_hashsize = (nfds + POLLHASHCHUNKSZ - 1) &
1050*7c478bd9Sstevel@tonic-gate 		    ~(POLLHASHCHUNKSZ - 1);
1051*7c478bd9Sstevel@tonic-gate 	} else {
1052*7c478bd9Sstevel@tonic-gate 		pcp->pc_hashsize = pcp->pc_hashsize * POLLHASHINC;
1053*7c478bd9Sstevel@tonic-gate 	}
1054*7c478bd9Sstevel@tonic-gate 	pcp->pc_hash = kmem_zalloc(pcp->pc_hashsize * sizeof (polldat_t *),
1055*7c478bd9Sstevel@tonic-gate 	    KM_SLEEP);
1056*7c478bd9Sstevel@tonic-gate 	/*
1057*7c478bd9Sstevel@tonic-gate 	 * rehash existing elements
1058*7c478bd9Sstevel@tonic-gate 	 */
1059*7c478bd9Sstevel@tonic-gate 	pcp->pc_fdcount = 0;
1060*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < oldsize; i++) {
1061*7c478bd9Sstevel@tonic-gate 		pdp = oldtbl[i];
1062*7c478bd9Sstevel@tonic-gate 		while (pdp != NULL) {
1063*7c478bd9Sstevel@tonic-gate 			pdp1 = pdp->pd_hashnext;
1064*7c478bd9Sstevel@tonic-gate 			pcache_insert_fd(pcp, pdp, nfds);
1065*7c478bd9Sstevel@tonic-gate 			pdp = pdp1;
1066*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
1067*7c478bd9Sstevel@tonic-gate 			count++;
1068*7c478bd9Sstevel@tonic-gate #endif
1069*7c478bd9Sstevel@tonic-gate 		}
1070*7c478bd9Sstevel@tonic-gate 	}
1071*7c478bd9Sstevel@tonic-gate 	kmem_free(oldtbl, oldsize * sizeof (polldat_t *));
1072*7c478bd9Sstevel@tonic-gate 	ASSERT(pcp->pc_fdcount == count);
1073*7c478bd9Sstevel@tonic-gate }
1074*7c478bd9Sstevel@tonic-gate 
1075*7c478bd9Sstevel@tonic-gate void
1076*7c478bd9Sstevel@tonic-gate pcache_grow_map(pollcache_t *pcp, int fd)
1077*7c478bd9Sstevel@tonic-gate {
1078*7c478bd9Sstevel@tonic-gate 	int  	newsize;
1079*7c478bd9Sstevel@tonic-gate 	ulong_t	*newmap;
1080*7c478bd9Sstevel@tonic-gate 
1081*7c478bd9Sstevel@tonic-gate 	/*
1082*7c478bd9Sstevel@tonic-gate 	 * grow to nearest multiple of POLLMAPCHUNK, assuming POLLMAPCHUNK is
1083*7c478bd9Sstevel@tonic-gate 	 * power of 2.
1084*7c478bd9Sstevel@tonic-gate 	 */
1085*7c478bd9Sstevel@tonic-gate 	newsize = (fd + POLLMAPCHUNK) & ~(POLLMAPCHUNK - 1);
1086*7c478bd9Sstevel@tonic-gate 	newmap = kmem_zalloc((newsize / BT_NBIPUL) * sizeof (ulong_t),
1087*7c478bd9Sstevel@tonic-gate 	    KM_SLEEP);
1088*7c478bd9Sstevel@tonic-gate 	/*
1089*7c478bd9Sstevel@tonic-gate 	 * don't want pollwakeup to set a bit while growing the bitmap.
1090*7c478bd9Sstevel@tonic-gate 	 */
1091*7c478bd9Sstevel@tonic-gate 	ASSERT(mutex_owned(&pcp->pc_lock) == 0);
1092*7c478bd9Sstevel@tonic-gate 	mutex_enter(&pcp->pc_lock);
1093*7c478bd9Sstevel@tonic-gate 	bcopy(pcp->pc_bitmap, newmap,
1094*7c478bd9Sstevel@tonic-gate 	    (pcp->pc_mapsize / BT_NBIPUL) * sizeof (ulong_t));
1095*7c478bd9Sstevel@tonic-gate 	kmem_free(pcp->pc_bitmap,
1096*7c478bd9Sstevel@tonic-gate 	    (pcp->pc_mapsize /BT_NBIPUL) * sizeof (ulong_t));
1097*7c478bd9Sstevel@tonic-gate 	pcp->pc_bitmap = newmap;
1098*7c478bd9Sstevel@tonic-gate 	pcp->pc_mapsize = newsize;
1099*7c478bd9Sstevel@tonic-gate 	mutex_exit(&pcp->pc_lock);
1100*7c478bd9Sstevel@tonic-gate }
1101*7c478bd9Sstevel@tonic-gate 
1102*7c478bd9Sstevel@tonic-gate /*
1103*7c478bd9Sstevel@tonic-gate  * remove all the reference from pollhead list and fpollinfo lists.
1104*7c478bd9Sstevel@tonic-gate  */
1105*7c478bd9Sstevel@tonic-gate void
1106*7c478bd9Sstevel@tonic-gate pcache_clean(pollcache_t *pcp)
1107*7c478bd9Sstevel@tonic-gate {
1108*7c478bd9Sstevel@tonic-gate 	int i;
1109*7c478bd9Sstevel@tonic-gate 	polldat_t **hashtbl;
1110*7c478bd9Sstevel@tonic-gate 	polldat_t *pdp;
1111*7c478bd9Sstevel@tonic-gate 
1112*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&curthread->t_pollstate->ps_lock));
1113*7c478bd9Sstevel@tonic-gate 	hashtbl = pcp->pc_hash;
1114*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < pcp->pc_hashsize; i++) {
1115*7c478bd9Sstevel@tonic-gate 		for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) {
1116*7c478bd9Sstevel@tonic-gate 			if (pdp->pd_php != NULL) {
1117*7c478bd9Sstevel@tonic-gate 				pollhead_delete(pdp->pd_php, pdp);
1118*7c478bd9Sstevel@tonic-gate 				pdp->pd_php = NULL;
1119*7c478bd9Sstevel@tonic-gate 			}
1120*7c478bd9Sstevel@tonic-gate 			if (pdp->pd_fp != NULL) {
1121*7c478bd9Sstevel@tonic-gate 				delfpollinfo(pdp->pd_fd);
1122*7c478bd9Sstevel@tonic-gate 				pdp->pd_fp = NULL;
1123*7c478bd9Sstevel@tonic-gate 			}
1124*7c478bd9Sstevel@tonic-gate 		}
1125*7c478bd9Sstevel@tonic-gate 	}
1126*7c478bd9Sstevel@tonic-gate }
1127*7c478bd9Sstevel@tonic-gate 
1128*7c478bd9Sstevel@tonic-gate void
1129*7c478bd9Sstevel@tonic-gate pcacheset_invalidate(pollstate_t *ps, polldat_t *pdp)
1130*7c478bd9Sstevel@tonic-gate {
1131*7c478bd9Sstevel@tonic-gate 	int 	i;
1132*7c478bd9Sstevel@tonic-gate 	int	fd = pdp->pd_fd;
1133*7c478bd9Sstevel@tonic-gate 
1134*7c478bd9Sstevel@tonic-gate 	/*
1135*7c478bd9Sstevel@tonic-gate 	 * we come here because an earlier close() on this cached poll fd.
1136*7c478bd9Sstevel@tonic-gate 	 */
1137*7c478bd9Sstevel@tonic-gate 	ASSERT(pdp->pd_fp == NULL);
1138*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1139*7c478bd9Sstevel@tonic-gate 	pdp->pd_events = 0;
1140*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < ps->ps_nsets; i++) {
1141*7c478bd9Sstevel@tonic-gate 		xref_t		*refp;
1142*7c478bd9Sstevel@tonic-gate 		pollcacheset_t	*pcsp;
1143*7c478bd9Sstevel@tonic-gate 
1144*7c478bd9Sstevel@tonic-gate 		ASSERT(pdp->pd_ref != NULL);
1145*7c478bd9Sstevel@tonic-gate 		refp = &pdp->pd_ref[i];
1146*7c478bd9Sstevel@tonic-gate 		if (refp->xf_refcnt) {
1147*7c478bd9Sstevel@tonic-gate 			ASSERT(refp->xf_position >= 0);
1148*7c478bd9Sstevel@tonic-gate 			pcsp = &ps->ps_pcacheset[i];
1149*7c478bd9Sstevel@tonic-gate 			if (refp->xf_refcnt == 1) {
1150*7c478bd9Sstevel@tonic-gate 				pcsp->pcs_pollfd[refp->xf_position].fd = -1;
1151*7c478bd9Sstevel@tonic-gate 				refp->xf_refcnt = 0;
1152*7c478bd9Sstevel@tonic-gate 				pdp->pd_count--;
1153*7c478bd9Sstevel@tonic-gate 			} else if (refp->xf_refcnt > 1) {
1154*7c478bd9Sstevel@tonic-gate 				int	j;
1155*7c478bd9Sstevel@tonic-gate 
1156*7c478bd9Sstevel@tonic-gate 				/*
1157*7c478bd9Sstevel@tonic-gate 				 * turn off every appearance in pcs_pollfd list
1158*7c478bd9Sstevel@tonic-gate 				 */
1159*7c478bd9Sstevel@tonic-gate 				for (j = refp->xf_position;
1160*7c478bd9Sstevel@tonic-gate 				    j < pcsp->pcs_nfds; j++) {
1161*7c478bd9Sstevel@tonic-gate 					if (pcsp->pcs_pollfd[j].fd == fd) {
1162*7c478bd9Sstevel@tonic-gate 						pcsp->pcs_pollfd[j].fd = -1;
1163*7c478bd9Sstevel@tonic-gate 						refp->xf_refcnt--;
1164*7c478bd9Sstevel@tonic-gate 						pdp->pd_count--;
1165*7c478bd9Sstevel@tonic-gate 					}
1166*7c478bd9Sstevel@tonic-gate 				}
1167*7c478bd9Sstevel@tonic-gate 			}
1168*7c478bd9Sstevel@tonic-gate 			ASSERT(refp->xf_refcnt == 0);
1169*7c478bd9Sstevel@tonic-gate 			refp->xf_position = POLLPOSINVAL;
1170*7c478bd9Sstevel@tonic-gate 		}
1171*7c478bd9Sstevel@tonic-gate 	}
1172*7c478bd9Sstevel@tonic-gate 	ASSERT(pdp->pd_count == 0);
1173*7c478bd9Sstevel@tonic-gate }
1174*7c478bd9Sstevel@tonic-gate 
1175*7c478bd9Sstevel@tonic-gate /*
1176*7c478bd9Sstevel@tonic-gate  * Insert poll fd into the pollcache, and add poll registration.
1177*7c478bd9Sstevel@tonic-gate  * This routine is called after getf() and before releasef(). So the vnode
1178*7c478bd9Sstevel@tonic-gate  * can not disappear even if we block here.
1179*7c478bd9Sstevel@tonic-gate  * If there is an error, the polled fd is not cached.
1180*7c478bd9Sstevel@tonic-gate  */
1181*7c478bd9Sstevel@tonic-gate int
1182*7c478bd9Sstevel@tonic-gate pcache_insert(pollstate_t *ps, file_t *fp, pollfd_t *pollfdp, int *fdcntp,
1183*7c478bd9Sstevel@tonic-gate     ssize_t pos, int which)
1184*7c478bd9Sstevel@tonic-gate {
1185*7c478bd9Sstevel@tonic-gate 	pollcache_t	*pcp = ps->ps_pcache;
1186*7c478bd9Sstevel@tonic-gate 	polldat_t	*pdp;
1187*7c478bd9Sstevel@tonic-gate 	int		error;
1188*7c478bd9Sstevel@tonic-gate 	int		fd;
1189*7c478bd9Sstevel@tonic-gate 	pollhead_t	*memphp = NULL;
1190*7c478bd9Sstevel@tonic-gate 	xref_t		*refp;
1191*7c478bd9Sstevel@tonic-gate 	int		newpollfd = 0;
1192*7c478bd9Sstevel@tonic-gate 
1193*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1194*7c478bd9Sstevel@tonic-gate 	/*
1195*7c478bd9Sstevel@tonic-gate 	 * The poll caching uses the existing VOP_POLL interface. If there
1196*7c478bd9Sstevel@tonic-gate 	 * is no polled events, we want the polled device to set its "some
1197*7c478bd9Sstevel@tonic-gate 	 * one is sleeping in poll" flag. When the polled events happen
1198*7c478bd9Sstevel@tonic-gate 	 * later, the driver will call pollwakeup(). We achieve this by
1199*7c478bd9Sstevel@tonic-gate 	 * always passing 0 in the third parameter ("anyyet") when calling
1200*7c478bd9Sstevel@tonic-gate 	 * VOP_POLL. This parameter is not looked at by drivers when the
1201*7c478bd9Sstevel@tonic-gate 	 * polled events exist. If a driver chooses to ignore this parameter
1202*7c478bd9Sstevel@tonic-gate 	 * and call pollwakeup whenever the polled events happen, that will
1203*7c478bd9Sstevel@tonic-gate 	 * be OK too.
1204*7c478bd9Sstevel@tonic-gate 	 */
1205*7c478bd9Sstevel@tonic-gate 	ASSERT(curthread->t_pollcache == NULL);
1206*7c478bd9Sstevel@tonic-gate 	error = VOP_POLL(fp->f_vnode, pollfdp->events, 0, &pollfdp->revents,
1207*7c478bd9Sstevel@tonic-gate 	    &memphp);
1208*7c478bd9Sstevel@tonic-gate 	if (error) {
1209*7c478bd9Sstevel@tonic-gate 		return (error);
1210*7c478bd9Sstevel@tonic-gate 	}
1211*7c478bd9Sstevel@tonic-gate 	if (pollfdp->revents) {
1212*7c478bd9Sstevel@tonic-gate 		(*fdcntp)++;
1213*7c478bd9Sstevel@tonic-gate 	}
1214*7c478bd9Sstevel@tonic-gate 	/*
1215*7c478bd9Sstevel@tonic-gate 	 * polling the underlying device succeeded. Now we can cache it.
1216*7c478bd9Sstevel@tonic-gate 	 * A close can't come in here because we have not done a releasef()
1217*7c478bd9Sstevel@tonic-gate 	 * yet.
1218*7c478bd9Sstevel@tonic-gate 	 */
1219*7c478bd9Sstevel@tonic-gate 	fd = pollfdp->fd;
1220*7c478bd9Sstevel@tonic-gate 	pdp = pcache_lookup_fd(pcp, fd);
1221*7c478bd9Sstevel@tonic-gate 	if (pdp == NULL) {
1222*7c478bd9Sstevel@tonic-gate 		ASSERT(ps->ps_nsets > 0);
1223*7c478bd9Sstevel@tonic-gate 		pdp = pcache_alloc_fd(ps->ps_nsets);
1224*7c478bd9Sstevel@tonic-gate 		newpollfd = 1;
1225*7c478bd9Sstevel@tonic-gate 	}
1226*7c478bd9Sstevel@tonic-gate 	/*
1227*7c478bd9Sstevel@tonic-gate 	 * If this entry was used to cache a poll fd which was closed, and
1228*7c478bd9Sstevel@tonic-gate 	 * this entry has not been cleaned, do it now.
1229*7c478bd9Sstevel@tonic-gate 	 */
1230*7c478bd9Sstevel@tonic-gate 	if ((pdp->pd_count > 0) && (pdp->pd_fp == NULL)) {
1231*7c478bd9Sstevel@tonic-gate 		pcacheset_invalidate(ps, pdp);
1232*7c478bd9Sstevel@tonic-gate 		ASSERT(pdp->pd_next == NULL);
1233*7c478bd9Sstevel@tonic-gate 	}
1234*7c478bd9Sstevel@tonic-gate 	if (pdp->pd_count == 0) {
1235*7c478bd9Sstevel@tonic-gate 		pdp->pd_fd = fd;
1236*7c478bd9Sstevel@tonic-gate 		pdp->pd_fp = fp;
1237*7c478bd9Sstevel@tonic-gate 		addfpollinfo(fd);
1238*7c478bd9Sstevel@tonic-gate 		pdp->pd_thread = curthread;
1239*7c478bd9Sstevel@tonic-gate 		pdp->pd_pcache = pcp;
1240*7c478bd9Sstevel@tonic-gate 		/*
1241*7c478bd9Sstevel@tonic-gate 		 * the entry is never used or cleared by removing a cached
1242*7c478bd9Sstevel@tonic-gate 		 * pollfd (pcache_delete_fd). So all the fields should be clear.
1243*7c478bd9Sstevel@tonic-gate 		 */
1244*7c478bd9Sstevel@tonic-gate 		ASSERT(pdp->pd_next == NULL);
1245*7c478bd9Sstevel@tonic-gate 	}
1246*7c478bd9Sstevel@tonic-gate 
1247*7c478bd9Sstevel@tonic-gate 	/*
1248*7c478bd9Sstevel@tonic-gate 	 * A polled fd is considered cached. So there should be a fpollinfo
1249*7c478bd9Sstevel@tonic-gate 	 * entry on uf_fpollinfo list.
1250*7c478bd9Sstevel@tonic-gate 	 */
1251*7c478bd9Sstevel@tonic-gate 	ASSERT(infpollinfo(fd));
1252*7c478bd9Sstevel@tonic-gate 	/*
1253*7c478bd9Sstevel@tonic-gate 	 * If there is an inconsistency, we want to know it here.
1254*7c478bd9Sstevel@tonic-gate 	 */
1255*7c478bd9Sstevel@tonic-gate 	ASSERT(pdp->pd_fp == fp);
1256*7c478bd9Sstevel@tonic-gate 
1257*7c478bd9Sstevel@tonic-gate 	/*
1258*7c478bd9Sstevel@tonic-gate 	 * XXX pd_events is a union of all polled events on this fd, possibly
1259*7c478bd9Sstevel@tonic-gate 	 * by different threads. Unless this is a new first poll(), pd_events
1260*7c478bd9Sstevel@tonic-gate 	 * never shrinks. If an event is no longer polled by a process, there
1261*7c478bd9Sstevel@tonic-gate 	 * is no way to cancel that event. In that case, poll degrade to its
1262*7c478bd9Sstevel@tonic-gate 	 * old form -- polling on this fd every time poll() is called. The
1263*7c478bd9Sstevel@tonic-gate 	 * assumption is an app always polls the same type of events.
1264*7c478bd9Sstevel@tonic-gate 	 */
1265*7c478bd9Sstevel@tonic-gate 	pdp->pd_events |= pollfdp->events;
1266*7c478bd9Sstevel@tonic-gate 
1267*7c478bd9Sstevel@tonic-gate 	pdp->pd_count++;
1268*7c478bd9Sstevel@tonic-gate 	/*
1269*7c478bd9Sstevel@tonic-gate 	 * There is not much special handling for multiple appearances of
1270*7c478bd9Sstevel@tonic-gate 	 * same fd other than xf_position always recording the first
1271*7c478bd9Sstevel@tonic-gate 	 * appearance in poll list. If this is called from pcacheset_cache_list,
1272*7c478bd9Sstevel@tonic-gate 	 * a VOP_POLL is called on every pollfd entry; therefore each
1273*7c478bd9Sstevel@tonic-gate 	 * revents and fdcnt should be set correctly. If this is called from
1274*7c478bd9Sstevel@tonic-gate 	 * pcacheset_resolve, we don't care about fdcnt here. Pollreadmap will
1275*7c478bd9Sstevel@tonic-gate 	 * pick up the right count and handle revents field of each pollfd
1276*7c478bd9Sstevel@tonic-gate 	 * entry.
1277*7c478bd9Sstevel@tonic-gate 	 */
1278*7c478bd9Sstevel@tonic-gate 	ASSERT(pdp->pd_ref != NULL);
1279*7c478bd9Sstevel@tonic-gate 	refp = &pdp->pd_ref[which];
1280*7c478bd9Sstevel@tonic-gate 	if (refp->xf_refcnt == 0) {
1281*7c478bd9Sstevel@tonic-gate 		refp->xf_position = pos;
1282*7c478bd9Sstevel@tonic-gate 	} else {
1283*7c478bd9Sstevel@tonic-gate 		/*
1284*7c478bd9Sstevel@tonic-gate 		 * xf_position records the fd's first appearance in poll list
1285*7c478bd9Sstevel@tonic-gate 		 */
1286*7c478bd9Sstevel@tonic-gate 		if (pos < refp->xf_position) {
1287*7c478bd9Sstevel@tonic-gate 			refp->xf_position = pos;
1288*7c478bd9Sstevel@tonic-gate 		}
1289*7c478bd9Sstevel@tonic-gate 	}
1290*7c478bd9Sstevel@tonic-gate 	ASSERT(pollfdp->fd == ps->ps_pollfd[refp->xf_position].fd);
1291*7c478bd9Sstevel@tonic-gate 	refp->xf_refcnt++;
1292*7c478bd9Sstevel@tonic-gate 	if (fd >= pcp->pc_mapsize) {
1293*7c478bd9Sstevel@tonic-gate 		pcache_grow_map(pcp, fd);
1294*7c478bd9Sstevel@tonic-gate 	}
1295*7c478bd9Sstevel@tonic-gate 	if (fd > pcp->pc_mapend) {
1296*7c478bd9Sstevel@tonic-gate 		pcp->pc_mapend = fd;
1297*7c478bd9Sstevel@tonic-gate 	}
1298*7c478bd9Sstevel@tonic-gate 	if (newpollfd != 0) {
1299*7c478bd9Sstevel@tonic-gate 		pcache_insert_fd(ps->ps_pcache, pdp, ps->ps_nfds);
1300*7c478bd9Sstevel@tonic-gate 	}
1301*7c478bd9Sstevel@tonic-gate 	if (memphp) {
1302*7c478bd9Sstevel@tonic-gate 		if (pdp->pd_php == NULL) {
1303*7c478bd9Sstevel@tonic-gate 			pollhead_insert(memphp, pdp);
1304*7c478bd9Sstevel@tonic-gate 			pdp->pd_php = memphp;
1305*7c478bd9Sstevel@tonic-gate 		} else {
1306*7c478bd9Sstevel@tonic-gate 			if (memphp != pdp->pd_php) {
1307*7c478bd9Sstevel@tonic-gate 				/*
1308*7c478bd9Sstevel@tonic-gate 				 * layered devices (e.g. console driver)
1309*7c478bd9Sstevel@tonic-gate 				 * may change the vnode and thus the pollhead
1310*7c478bd9Sstevel@tonic-gate 				 * pointer out from underneath us.
1311*7c478bd9Sstevel@tonic-gate 				 */
1312*7c478bd9Sstevel@tonic-gate 				pollhead_delete(pdp->pd_php, pdp);
1313*7c478bd9Sstevel@tonic-gate 				pollhead_insert(memphp, pdp);
1314*7c478bd9Sstevel@tonic-gate 				pdp->pd_php = memphp;
1315*7c478bd9Sstevel@tonic-gate 			}
1316*7c478bd9Sstevel@tonic-gate 		}
1317*7c478bd9Sstevel@tonic-gate 	}
1318*7c478bd9Sstevel@tonic-gate 	/*
1319*7c478bd9Sstevel@tonic-gate 	 * Since there is a considerable window between VOP_POLL and when
1320*7c478bd9Sstevel@tonic-gate 	 * we actually put the polldat struct on the pollhead list, we could
1321*7c478bd9Sstevel@tonic-gate 	 * miss a pollwakeup. In the case of polling additional events, we
1322*7c478bd9Sstevel@tonic-gate 	 * don't update the events until after VOP_POLL. So we could miss
1323*7c478bd9Sstevel@tonic-gate 	 * pollwakeup there too. So we always set the bit here just to be
1324*7c478bd9Sstevel@tonic-gate 	 * safe. The real performance gain is in subsequent pcache_poll.
1325*7c478bd9Sstevel@tonic-gate 	 */
1326*7c478bd9Sstevel@tonic-gate 	mutex_enter(&pcp->pc_lock);
1327*7c478bd9Sstevel@tonic-gate 	BT_SET(pcp->pc_bitmap, fd);
1328*7c478bd9Sstevel@tonic-gate 	mutex_exit(&pcp->pc_lock);
1329*7c478bd9Sstevel@tonic-gate 	return (0);
1330*7c478bd9Sstevel@tonic-gate }
1331*7c478bd9Sstevel@tonic-gate 
1332*7c478bd9Sstevel@tonic-gate /*
1333*7c478bd9Sstevel@tonic-gate  * The entry is not really deleted. The fields are cleared so that the
1334*7c478bd9Sstevel@tonic-gate  * entry is no longer useful, but it will remain in the hash table for reuse
1335*7c478bd9Sstevel@tonic-gate  * later. It will be freed when the polling lwp exits.
1336*7c478bd9Sstevel@tonic-gate  */
1337*7c478bd9Sstevel@tonic-gate int
1338*7c478bd9Sstevel@tonic-gate pcache_delete_fd(pollstate_t *ps, int fd, size_t pos, int which, uint_t cevent)
1339*7c478bd9Sstevel@tonic-gate {
1340*7c478bd9Sstevel@tonic-gate 	pollcache_t	*pcp = ps->ps_pcache;
1341*7c478bd9Sstevel@tonic-gate 	polldat_t	*pdp;
1342*7c478bd9Sstevel@tonic-gate 	xref_t		*refp;
1343*7c478bd9Sstevel@tonic-gate 
1344*7c478bd9Sstevel@tonic-gate 	ASSERT(fd < pcp->pc_mapsize);
1345*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1346*7c478bd9Sstevel@tonic-gate 
1347*7c478bd9Sstevel@tonic-gate 	pdp = pcache_lookup_fd(pcp, fd);
1348*7c478bd9Sstevel@tonic-gate 	ASSERT(pdp != NULL);
1349*7c478bd9Sstevel@tonic-gate 	ASSERT(pdp->pd_count > 0);
1350*7c478bd9Sstevel@tonic-gate 	ASSERT(pdp->pd_ref != NULL);
1351*7c478bd9Sstevel@tonic-gate 	refp = &pdp->pd_ref[which];
1352*7c478bd9Sstevel@tonic-gate 	if (pdp->pd_count == 1) {
1353*7c478bd9Sstevel@tonic-gate 		pdp->pd_events = 0;
1354*7c478bd9Sstevel@tonic-gate 		refp->xf_position = POLLPOSINVAL;
1355*7c478bd9Sstevel@tonic-gate 		ASSERT(refp->xf_refcnt == 1);
1356*7c478bd9Sstevel@tonic-gate 		refp->xf_refcnt = 0;
1357*7c478bd9Sstevel@tonic-gate 		if (pdp->pd_php) {
1358*7c478bd9Sstevel@tonic-gate 			/*
1359*7c478bd9Sstevel@tonic-gate 			 * It is possible for a wakeup thread to get ahead
1360*7c478bd9Sstevel@tonic-gate 			 * of the following pollhead_delete and set the bit in
1361*7c478bd9Sstevel@tonic-gate 			 * bitmap.  It is OK because the bit will be cleared
1362*7c478bd9Sstevel@tonic-gate 			 * here anyway.
1363*7c478bd9Sstevel@tonic-gate 			 */
1364*7c478bd9Sstevel@tonic-gate 			pollhead_delete(pdp->pd_php, pdp);
1365*7c478bd9Sstevel@tonic-gate 			pdp->pd_php = NULL;
1366*7c478bd9Sstevel@tonic-gate 		}
1367*7c478bd9Sstevel@tonic-gate 		pdp->pd_count = 0;
1368*7c478bd9Sstevel@tonic-gate 		if (pdp->pd_fp != NULL) {
1369*7c478bd9Sstevel@tonic-gate 			pdp->pd_fp = NULL;
1370*7c478bd9Sstevel@tonic-gate 			delfpollinfo(fd);
1371*7c478bd9Sstevel@tonic-gate 		}
1372*7c478bd9Sstevel@tonic-gate 		mutex_enter(&pcp->pc_lock);
1373*7c478bd9Sstevel@tonic-gate 		BT_CLEAR(pcp->pc_bitmap, fd);
1374*7c478bd9Sstevel@tonic-gate 		mutex_exit(&pcp->pc_lock);
1375*7c478bd9Sstevel@tonic-gate 		return (0);
1376*7c478bd9Sstevel@tonic-gate 	}
1377*7c478bd9Sstevel@tonic-gate 	if ((cevent & POLLCLOSED) == POLLCLOSED) {
1378*7c478bd9Sstevel@tonic-gate 		/*
1379*7c478bd9Sstevel@tonic-gate 		 * fd cached here has been closed. This is the first
1380*7c478bd9Sstevel@tonic-gate 		 * pcache_delete_fd called after the close. Clean up the
1381*7c478bd9Sstevel@tonic-gate 		 * entire entry.
1382*7c478bd9Sstevel@tonic-gate 		 */
1383*7c478bd9Sstevel@tonic-gate 		pcacheset_invalidate(ps, pdp);
1384*7c478bd9Sstevel@tonic-gate 		ASSERT(pdp->pd_php == NULL);
1385*7c478bd9Sstevel@tonic-gate 		mutex_enter(&pcp->pc_lock);
1386*7c478bd9Sstevel@tonic-gate 		BT_CLEAR(pcp->pc_bitmap, fd);
1387*7c478bd9Sstevel@tonic-gate 		mutex_exit(&pcp->pc_lock);
1388*7c478bd9Sstevel@tonic-gate 		return (0);
1389*7c478bd9Sstevel@tonic-gate 	}
1390*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
1391*7c478bd9Sstevel@tonic-gate 	if (getf(fd) != NULL) {
1392*7c478bd9Sstevel@tonic-gate 		ASSERT(infpollinfo(fd));
1393*7c478bd9Sstevel@tonic-gate 		releasef(fd);
1394*7c478bd9Sstevel@tonic-gate 	}
1395*7c478bd9Sstevel@tonic-gate #endif	/* DEBUG */
1396*7c478bd9Sstevel@tonic-gate 	pdp->pd_count--;
1397*7c478bd9Sstevel@tonic-gate 	ASSERT(refp->xf_refcnt > 0);
1398*7c478bd9Sstevel@tonic-gate 	if (--refp->xf_refcnt == 0) {
1399*7c478bd9Sstevel@tonic-gate 		refp->xf_position = POLLPOSINVAL;
1400*7c478bd9Sstevel@tonic-gate 	} else {
1401*7c478bd9Sstevel@tonic-gate 		ASSERT(pos >= refp->xf_position);
1402*7c478bd9Sstevel@tonic-gate 		if (pos == refp->xf_position) {
1403*7c478bd9Sstevel@tonic-gate 			/*
1404*7c478bd9Sstevel@tonic-gate 			 * The xref position is no longer valid.
1405*7c478bd9Sstevel@tonic-gate 			 * Reset it to a special value and let
1406*7c478bd9Sstevel@tonic-gate 			 * caller know it needs to updatexref()
1407*7c478bd9Sstevel@tonic-gate 			 * with a new xf_position value.
1408*7c478bd9Sstevel@tonic-gate 			 */
1409*7c478bd9Sstevel@tonic-gate 			refp->xf_position = POLLPOSTRANS;
1410*7c478bd9Sstevel@tonic-gate 			return (1);
1411*7c478bd9Sstevel@tonic-gate 		}
1412*7c478bd9Sstevel@tonic-gate 	}
1413*7c478bd9Sstevel@tonic-gate 	return (0);
1414*7c478bd9Sstevel@tonic-gate }
1415*7c478bd9Sstevel@tonic-gate 
1416*7c478bd9Sstevel@tonic-gate void
1417*7c478bd9Sstevel@tonic-gate pcache_update_xref(pollcache_t *pcp, int fd, ssize_t pos, int which)
1418*7c478bd9Sstevel@tonic-gate {
1419*7c478bd9Sstevel@tonic-gate 	polldat_t	*pdp;
1420*7c478bd9Sstevel@tonic-gate 
1421*7c478bd9Sstevel@tonic-gate 	pdp = pcache_lookup_fd(pcp, fd);
1422*7c478bd9Sstevel@tonic-gate 	ASSERT(pdp != NULL);
1423*7c478bd9Sstevel@tonic-gate 	ASSERT(pdp->pd_ref != NULL);
1424*7c478bd9Sstevel@tonic-gate 	pdp->pd_ref[which].xf_position = pos;
1425*7c478bd9Sstevel@tonic-gate }
1426*7c478bd9Sstevel@tonic-gate 
1427*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
1428*7c478bd9Sstevel@tonic-gate /*
1429*7c478bd9Sstevel@tonic-gate  * For each polled fd, it's either in the bitmap or cached in
1430*7c478bd9Sstevel@tonic-gate  * pcache hash table. If this routine returns 0, something is wrong.
1431*7c478bd9Sstevel@tonic-gate  */
1432*7c478bd9Sstevel@tonic-gate static int
1433*7c478bd9Sstevel@tonic-gate pollchecksanity(pollstate_t *ps, nfds_t nfds)
1434*7c478bd9Sstevel@tonic-gate {
1435*7c478bd9Sstevel@tonic-gate 	int    		i;
1436*7c478bd9Sstevel@tonic-gate 	int		fd;
1437*7c478bd9Sstevel@tonic-gate 	pollcache_t	*pcp = ps->ps_pcache;
1438*7c478bd9Sstevel@tonic-gate 	polldat_t	*pdp;
1439*7c478bd9Sstevel@tonic-gate 	pollfd_t	*pollfdp = ps->ps_pollfd;
1440*7c478bd9Sstevel@tonic-gate 	file_t		*fp;
1441*7c478bd9Sstevel@tonic-gate 
1442*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1443*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < nfds; i++) {
1444*7c478bd9Sstevel@tonic-gate 		fd = pollfdp[i].fd;
1445*7c478bd9Sstevel@tonic-gate 		if (fd < 0) {
1446*7c478bd9Sstevel@tonic-gate 			ASSERT(pollfdp[i].revents == 0);
1447*7c478bd9Sstevel@tonic-gate 			continue;
1448*7c478bd9Sstevel@tonic-gate 		}
1449*7c478bd9Sstevel@tonic-gate 		if (pollfdp[i].revents == POLLNVAL)
1450*7c478bd9Sstevel@tonic-gate 			continue;
1451*7c478bd9Sstevel@tonic-gate 		if ((fp = getf(fd)) == NULL)
1452*7c478bd9Sstevel@tonic-gate 			continue;
1453*7c478bd9Sstevel@tonic-gate 		pdp = pcache_lookup_fd(pcp, fd);
1454*7c478bd9Sstevel@tonic-gate 		ASSERT(pdp != NULL);
1455*7c478bd9Sstevel@tonic-gate 		ASSERT(infpollinfo(fd));
1456*7c478bd9Sstevel@tonic-gate 		ASSERT(pdp->pd_fp == fp);
1457*7c478bd9Sstevel@tonic-gate 		releasef(fd);
1458*7c478bd9Sstevel@tonic-gate 		if (BT_TEST(pcp->pc_bitmap, fd))
1459*7c478bd9Sstevel@tonic-gate 			continue;
1460*7c478bd9Sstevel@tonic-gate 		if (pdp->pd_php == NULL)
1461*7c478bd9Sstevel@tonic-gate 			return (0);
1462*7c478bd9Sstevel@tonic-gate 	}
1463*7c478bd9Sstevel@tonic-gate 	return (1);
1464*7c478bd9Sstevel@tonic-gate }
1465*7c478bd9Sstevel@tonic-gate #endif	/* DEBUG */
1466*7c478bd9Sstevel@tonic-gate 
1467*7c478bd9Sstevel@tonic-gate /*
1468*7c478bd9Sstevel@tonic-gate  * resolve the difference between the current poll list and a cached one.
1469*7c478bd9Sstevel@tonic-gate  */
1470*7c478bd9Sstevel@tonic-gate int
1471*7c478bd9Sstevel@tonic-gate pcacheset_resolve(pollstate_t *ps, nfds_t nfds, int *fdcntp, int which)
1472*7c478bd9Sstevel@tonic-gate {
1473*7c478bd9Sstevel@tonic-gate 	int    		i;
1474*7c478bd9Sstevel@tonic-gate 	pollcache_t	*pcp = ps->ps_pcache;
1475*7c478bd9Sstevel@tonic-gate 	pollfd_t	*newlist = NULL;
1476*7c478bd9Sstevel@tonic-gate 	pollfd_t	*current = ps->ps_pollfd;
1477*7c478bd9Sstevel@tonic-gate 	pollfd_t	*cached;
1478*7c478bd9Sstevel@tonic-gate 	pollcacheset_t	*pcsp;
1479*7c478bd9Sstevel@tonic-gate 	int		common;
1480*7c478bd9Sstevel@tonic-gate 	int		count = 0;
1481*7c478bd9Sstevel@tonic-gate 	int		offset;
1482*7c478bd9Sstevel@tonic-gate 	int		remain;
1483*7c478bd9Sstevel@tonic-gate 	int		fd;
1484*7c478bd9Sstevel@tonic-gate 	file_t		*fp;
1485*7c478bd9Sstevel@tonic-gate 	int		fdcnt = 0;
1486*7c478bd9Sstevel@tonic-gate 	int		cnt = 0;
1487*7c478bd9Sstevel@tonic-gate 	nfds_t		old_nfds;
1488*7c478bd9Sstevel@tonic-gate 	int		error = 0;
1489*7c478bd9Sstevel@tonic-gate 	int		mismatch = 0;
1490*7c478bd9Sstevel@tonic-gate 
1491*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1492*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
1493*7c478bd9Sstevel@tonic-gate 	checkpolldat(ps);
1494*7c478bd9Sstevel@tonic-gate #endif
1495*7c478bd9Sstevel@tonic-gate 	pcsp = &ps->ps_pcacheset[which];
1496*7c478bd9Sstevel@tonic-gate 	old_nfds = pcsp->pcs_nfds;
1497*7c478bd9Sstevel@tonic-gate 	common = (nfds > old_nfds) ? old_nfds : nfds;
1498*7c478bd9Sstevel@tonic-gate 	if (nfds != old_nfds) {
1499*7c478bd9Sstevel@tonic-gate 		/*
1500*7c478bd9Sstevel@tonic-gate 		 * the length of poll list has changed. allocate a new
1501*7c478bd9Sstevel@tonic-gate 		 * pollfd list.
1502*7c478bd9Sstevel@tonic-gate 		 */
1503*7c478bd9Sstevel@tonic-gate 		newlist = kmem_alloc(nfds * sizeof (pollfd_t), KM_SLEEP);
1504*7c478bd9Sstevel@tonic-gate 		bcopy(current, newlist, sizeof (pollfd_t) * nfds);
1505*7c478bd9Sstevel@tonic-gate 	}
1506*7c478bd9Sstevel@tonic-gate 	/*
1507*7c478bd9Sstevel@tonic-gate 	 * Compare the overlapping part of the current fd list with the
1508*7c478bd9Sstevel@tonic-gate 	 * cached one. Whenever a difference is found, resolve it.
1509*7c478bd9Sstevel@tonic-gate 	 * The comparison is done on the current poll list and the
1510*7c478bd9Sstevel@tonic-gate 	 * cached list. But we may be setting up the newlist to be the
1511*7c478bd9Sstevel@tonic-gate 	 * cached list for next poll.
1512*7c478bd9Sstevel@tonic-gate 	 */
1513*7c478bd9Sstevel@tonic-gate 	cached = pcsp->pcs_pollfd;
1514*7c478bd9Sstevel@tonic-gate 	remain = common;
1515*7c478bd9Sstevel@tonic-gate 
1516*7c478bd9Sstevel@tonic-gate 	while (count < common) {
1517*7c478bd9Sstevel@tonic-gate 		int	tmpfd;
1518*7c478bd9Sstevel@tonic-gate 		pollfd_t *np;
1519*7c478bd9Sstevel@tonic-gate 
1520*7c478bd9Sstevel@tonic-gate 		np = (newlist != NULL) ? &newlist[count] : NULL;
1521*7c478bd9Sstevel@tonic-gate 		offset = pcacheset_cmp(&current[count], &cached[count], np,
1522*7c478bd9Sstevel@tonic-gate 		    remain);
1523*7c478bd9Sstevel@tonic-gate 		/*
1524*7c478bd9Sstevel@tonic-gate 		 * Collect stats. If lists are completed the first time,
1525*7c478bd9Sstevel@tonic-gate 		 * it's a hit. Otherwise, it's a partial hit or miss.
1526*7c478bd9Sstevel@tonic-gate 		 */
1527*7c478bd9Sstevel@tonic-gate 		if ((count == 0) && (offset == common)) {
1528*7c478bd9Sstevel@tonic-gate 			pollstats.pollcachehit.value.ui64++;
1529*7c478bd9Sstevel@tonic-gate 		} else {
1530*7c478bd9Sstevel@tonic-gate 			mismatch++;
1531*7c478bd9Sstevel@tonic-gate 		}
1532*7c478bd9Sstevel@tonic-gate 		count += offset;
1533*7c478bd9Sstevel@tonic-gate 		if (offset < remain) {
1534*7c478bd9Sstevel@tonic-gate 			ASSERT(count < common);
1535*7c478bd9Sstevel@tonic-gate 			ASSERT((current[count].fd != cached[count].fd) ||
1536*7c478bd9Sstevel@tonic-gate 			    (current[count].events != cached[count].events));
1537*7c478bd9Sstevel@tonic-gate 			/*
1538*7c478bd9Sstevel@tonic-gate 			 * Filter out invalid events.
1539*7c478bd9Sstevel@tonic-gate 			 */
1540*7c478bd9Sstevel@tonic-gate 			if (current[count].events & ~VALID_POLL_EVENTS) {
1541*7c478bd9Sstevel@tonic-gate 				if (newlist != NULL) {
1542*7c478bd9Sstevel@tonic-gate 					newlist[count].events =
1543*7c478bd9Sstevel@tonic-gate 						current[count].events &=
1544*7c478bd9Sstevel@tonic-gate 							VALID_POLL_EVENTS;
1545*7c478bd9Sstevel@tonic-gate 				} else {
1546*7c478bd9Sstevel@tonic-gate 					current[count].events &=
1547*7c478bd9Sstevel@tonic-gate 						VALID_POLL_EVENTS;
1548*7c478bd9Sstevel@tonic-gate 				}
1549*7c478bd9Sstevel@tonic-gate 			}
1550*7c478bd9Sstevel@tonic-gate 			/*
1551*7c478bd9Sstevel@tonic-gate 			 * when resolving a difference, we always remove the
1552*7c478bd9Sstevel@tonic-gate 			 * fd from cache before inserting one into cache.
1553*7c478bd9Sstevel@tonic-gate 			 */
1554*7c478bd9Sstevel@tonic-gate 			if (cached[count].fd >= 0) {
1555*7c478bd9Sstevel@tonic-gate 				tmpfd = cached[count].fd;
1556*7c478bd9Sstevel@tonic-gate 				if (pcache_delete_fd(ps, tmpfd, count, which,
1557*7c478bd9Sstevel@tonic-gate 				    (uint_t)cached[count].events)) {
1558*7c478bd9Sstevel@tonic-gate 					/*
1559*7c478bd9Sstevel@tonic-gate 					 * This should be rare but needed for
1560*7c478bd9Sstevel@tonic-gate 					 * correctness.
1561*7c478bd9Sstevel@tonic-gate 					 *
1562*7c478bd9Sstevel@tonic-gate 					 * The first appearance in cached list
1563*7c478bd9Sstevel@tonic-gate 					 * is being "turned off". The same fd
1564*7c478bd9Sstevel@tonic-gate 					 * appear more than once in the cached
1565*7c478bd9Sstevel@tonic-gate 					 * poll list. Find the next one on the
1566*7c478bd9Sstevel@tonic-gate 					 * list and update the cached
1567*7c478bd9Sstevel@tonic-gate 					 * xf_position field.
1568*7c478bd9Sstevel@tonic-gate 					 */
1569*7c478bd9Sstevel@tonic-gate 					for (i = count + 1; i < old_nfds; i++) {
1570*7c478bd9Sstevel@tonic-gate 						if (cached[i].fd == tmpfd) {
1571*7c478bd9Sstevel@tonic-gate 							pcache_update_xref(pcp,
1572*7c478bd9Sstevel@tonic-gate 							    tmpfd, (ssize_t)i,
1573*7c478bd9Sstevel@tonic-gate 							    which);
1574*7c478bd9Sstevel@tonic-gate 						    break;
1575*7c478bd9Sstevel@tonic-gate 						}
1576*7c478bd9Sstevel@tonic-gate 					}
1577*7c478bd9Sstevel@tonic-gate 					ASSERT(i <= old_nfds);
1578*7c478bd9Sstevel@tonic-gate 				}
1579*7c478bd9Sstevel@tonic-gate 				/*
1580*7c478bd9Sstevel@tonic-gate 				 * In case a new cache list is allocated,
1581*7c478bd9Sstevel@tonic-gate 				 * need to keep both cache lists in sync
1582*7c478bd9Sstevel@tonic-gate 				 * b/c the new one can be freed if we have
1583*7c478bd9Sstevel@tonic-gate 				 * an error later.
1584*7c478bd9Sstevel@tonic-gate 				 */
1585*7c478bd9Sstevel@tonic-gate 				cached[count].fd = -1;
1586*7c478bd9Sstevel@tonic-gate 				if (newlist != NULL) {
1587*7c478bd9Sstevel@tonic-gate 					newlist[count].fd = -1;
1588*7c478bd9Sstevel@tonic-gate 				}
1589*7c478bd9Sstevel@tonic-gate 			}
1590*7c478bd9Sstevel@tonic-gate 			if ((tmpfd = current[count].fd) >= 0) {
1591*7c478bd9Sstevel@tonic-gate 				/*
1592*7c478bd9Sstevel@tonic-gate 				 * add to the cached fd tbl and bitmap.
1593*7c478bd9Sstevel@tonic-gate 				 */
1594*7c478bd9Sstevel@tonic-gate 				if ((fp = getf(tmpfd)) == NULL) {
1595*7c478bd9Sstevel@tonic-gate 					current[count].revents = POLLNVAL;
1596*7c478bd9Sstevel@tonic-gate 					if (newlist != NULL) {
1597*7c478bd9Sstevel@tonic-gate 						newlist[count].fd = -1;
1598*7c478bd9Sstevel@tonic-gate 					}
1599*7c478bd9Sstevel@tonic-gate 					cached[count].fd = -1;
1600*7c478bd9Sstevel@tonic-gate 					fdcnt++;
1601*7c478bd9Sstevel@tonic-gate 				} else {
1602*7c478bd9Sstevel@tonic-gate 					/*
1603*7c478bd9Sstevel@tonic-gate 					 * Here we don't care about the
1604*7c478bd9Sstevel@tonic-gate 					 * fdcnt. We will examine the bitmap
1605*7c478bd9Sstevel@tonic-gate 					 * later and pick up the correct
1606*7c478bd9Sstevel@tonic-gate 					 * fdcnt there. So we never bother
1607*7c478bd9Sstevel@tonic-gate 					 * to check value of 'cnt'.
1608*7c478bd9Sstevel@tonic-gate 					 */
1609*7c478bd9Sstevel@tonic-gate 					error = pcache_insert(ps, fp,
1610*7c478bd9Sstevel@tonic-gate 					    &current[count], &cnt,
1611*7c478bd9Sstevel@tonic-gate 					    (ssize_t)count, which);
1612*7c478bd9Sstevel@tonic-gate 					/*
1613*7c478bd9Sstevel@tonic-gate 					 * if no error, we want to do releasef
1614*7c478bd9Sstevel@tonic-gate 					 * after we updated cache poll list
1615*7c478bd9Sstevel@tonic-gate 					 * entry so that close() won't race
1616*7c478bd9Sstevel@tonic-gate 					 * us.
1617*7c478bd9Sstevel@tonic-gate 					 */
1618*7c478bd9Sstevel@tonic-gate 					if (error) {
1619*7c478bd9Sstevel@tonic-gate 						/*
1620*7c478bd9Sstevel@tonic-gate 						 * If we encountered an error,
1621*7c478bd9Sstevel@tonic-gate 						 * we have invalidated an
1622*7c478bd9Sstevel@tonic-gate 						 * entry in cached poll list
1623*7c478bd9Sstevel@tonic-gate 						 * (in pcache_delete_fd() above)
1624*7c478bd9Sstevel@tonic-gate 						 * but failed to add one here.
1625*7c478bd9Sstevel@tonic-gate 						 * This is OK b/c what's in the
1626*7c478bd9Sstevel@tonic-gate 						 * cached list is consistent
1627*7c478bd9Sstevel@tonic-gate 						 * with content of cache.
1628*7c478bd9Sstevel@tonic-gate 						 * It will not have any ill
1629*7c478bd9Sstevel@tonic-gate 						 * effect on next poll().
1630*7c478bd9Sstevel@tonic-gate 						 */
1631*7c478bd9Sstevel@tonic-gate 						releasef(tmpfd);
1632*7c478bd9Sstevel@tonic-gate 						if (newlist != NULL) {
1633*7c478bd9Sstevel@tonic-gate 							kmem_free(newlist,
1634*7c478bd9Sstevel@tonic-gate 							    nfds *
1635*7c478bd9Sstevel@tonic-gate 							    sizeof (pollfd_t));
1636*7c478bd9Sstevel@tonic-gate 						}
1637*7c478bd9Sstevel@tonic-gate 						return (error);
1638*7c478bd9Sstevel@tonic-gate 					}
1639*7c478bd9Sstevel@tonic-gate 					/*
1640*7c478bd9Sstevel@tonic-gate 					 * If we have allocated a new(temp)
1641*7c478bd9Sstevel@tonic-gate 					 * cache list, we need to keep both
1642*7c478bd9Sstevel@tonic-gate 					 * in sync b/c the new one can be freed
1643*7c478bd9Sstevel@tonic-gate 					 * if we have an error later.
1644*7c478bd9Sstevel@tonic-gate 					 */
1645*7c478bd9Sstevel@tonic-gate 					if (newlist != NULL) {
1646*7c478bd9Sstevel@tonic-gate 						newlist[count].fd =
1647*7c478bd9Sstevel@tonic-gate 						    current[count].fd;
1648*7c478bd9Sstevel@tonic-gate 						newlist[count].events =
1649*7c478bd9Sstevel@tonic-gate 						    current[count].events;
1650*7c478bd9Sstevel@tonic-gate 					}
1651*7c478bd9Sstevel@tonic-gate 					cached[count].fd = current[count].fd;
1652*7c478bd9Sstevel@tonic-gate 					cached[count].events =
1653*7c478bd9Sstevel@tonic-gate 					    current[count].events;
1654*7c478bd9Sstevel@tonic-gate 					releasef(tmpfd);
1655*7c478bd9Sstevel@tonic-gate 				}
1656*7c478bd9Sstevel@tonic-gate 			} else {
1657*7c478bd9Sstevel@tonic-gate 				current[count].revents = 0;
1658*7c478bd9Sstevel@tonic-gate 			}
1659*7c478bd9Sstevel@tonic-gate 			count++;
1660*7c478bd9Sstevel@tonic-gate 			remain = common - count;
1661*7c478bd9Sstevel@tonic-gate 		}
1662*7c478bd9Sstevel@tonic-gate 	}
1663*7c478bd9Sstevel@tonic-gate 	if (mismatch != 0) {
1664*7c478bd9Sstevel@tonic-gate 		if (mismatch == common) {
1665*7c478bd9Sstevel@tonic-gate 			pollstats.pollcachemiss.value.ui64++;
1666*7c478bd9Sstevel@tonic-gate 		} else {
1667*7c478bd9Sstevel@tonic-gate 			pollstats.pollcachephit.value.ui64++;
1668*7c478bd9Sstevel@tonic-gate 		}
1669*7c478bd9Sstevel@tonic-gate 	}
1670*7c478bd9Sstevel@tonic-gate 	/*
1671*7c478bd9Sstevel@tonic-gate 	 * take care of the non overlapping part of a list
1672*7c478bd9Sstevel@tonic-gate 	 */
1673*7c478bd9Sstevel@tonic-gate 	if (nfds > old_nfds) {
1674*7c478bd9Sstevel@tonic-gate 		ASSERT(newlist != NULL);
1675*7c478bd9Sstevel@tonic-gate 		for (i = old_nfds; i < nfds; i++) {
1676*7c478bd9Sstevel@tonic-gate 			/* filter out invalid events */
1677*7c478bd9Sstevel@tonic-gate 			if (current[i].events & ~VALID_POLL_EVENTS) {
1678*7c478bd9Sstevel@tonic-gate 				newlist[i].events = current[i].events =
1679*7c478bd9Sstevel@tonic-gate 				current[i].events & VALID_POLL_EVENTS;
1680*7c478bd9Sstevel@tonic-gate 			}
1681*7c478bd9Sstevel@tonic-gate 			if ((fd = current[i].fd) < 0) {
1682*7c478bd9Sstevel@tonic-gate 				current[i].revents = 0;
1683*7c478bd9Sstevel@tonic-gate 				continue;
1684*7c478bd9Sstevel@tonic-gate 			}
1685*7c478bd9Sstevel@tonic-gate 			/*
1686*7c478bd9Sstevel@tonic-gate 			 * add to the cached fd tbl and bitmap.
1687*7c478bd9Sstevel@tonic-gate 			 */
1688*7c478bd9Sstevel@tonic-gate 			if ((fp = getf(fd)) == NULL) {
1689*7c478bd9Sstevel@tonic-gate 				current[i].revents = POLLNVAL;
1690*7c478bd9Sstevel@tonic-gate 				newlist[i].fd = -1;
1691*7c478bd9Sstevel@tonic-gate 				fdcnt++;
1692*7c478bd9Sstevel@tonic-gate 				continue;
1693*7c478bd9Sstevel@tonic-gate 			}
1694*7c478bd9Sstevel@tonic-gate 			/*
1695*7c478bd9Sstevel@tonic-gate 			 * Here we don't care about the
1696*7c478bd9Sstevel@tonic-gate 			 * fdcnt. We will examine the bitmap
1697*7c478bd9Sstevel@tonic-gate 			 * later and pick up the correct
1698*7c478bd9Sstevel@tonic-gate 			 * fdcnt there. So we never bother to
1699*7c478bd9Sstevel@tonic-gate 			 * check 'cnt'.
1700*7c478bd9Sstevel@tonic-gate 			 */
1701*7c478bd9Sstevel@tonic-gate 			error = pcache_insert(ps, fp, &current[i], &cnt,
1702*7c478bd9Sstevel@tonic-gate 			    (ssize_t)i, which);
1703*7c478bd9Sstevel@tonic-gate 			releasef(fd);
1704*7c478bd9Sstevel@tonic-gate 			if (error) {
1705*7c478bd9Sstevel@tonic-gate 				/*
1706*7c478bd9Sstevel@tonic-gate 				 * Here we are half way through adding newly
1707*7c478bd9Sstevel@tonic-gate 				 * polled fd. Undo enough to keep the cache
1708*7c478bd9Sstevel@tonic-gate 				 * list consistent with the cache content.
1709*7c478bd9Sstevel@tonic-gate 				 */
1710*7c478bd9Sstevel@tonic-gate 				pcacheset_remove_list(ps, current, old_nfds,
1711*7c478bd9Sstevel@tonic-gate 				    i, which, 0);
1712*7c478bd9Sstevel@tonic-gate 				kmem_free(newlist, nfds * sizeof (pollfd_t));
1713*7c478bd9Sstevel@tonic-gate 				return (error);
1714*7c478bd9Sstevel@tonic-gate 			}
1715*7c478bd9Sstevel@tonic-gate 		}
1716*7c478bd9Sstevel@tonic-gate 	}
1717*7c478bd9Sstevel@tonic-gate 	if (old_nfds > nfds) {
1718*7c478bd9Sstevel@tonic-gate 		/*
1719*7c478bd9Sstevel@tonic-gate 		 * remove the fd's which are no longer polled.
1720*7c478bd9Sstevel@tonic-gate 		 */
1721*7c478bd9Sstevel@tonic-gate 		pcacheset_remove_list(ps, pcsp->pcs_pollfd, nfds, old_nfds,
1722*7c478bd9Sstevel@tonic-gate 		    which, 1);
1723*7c478bd9Sstevel@tonic-gate 	}
1724*7c478bd9Sstevel@tonic-gate 	/*
1725*7c478bd9Sstevel@tonic-gate 	 * set difference resolved. update nfds and cachedlist
1726*7c478bd9Sstevel@tonic-gate 	 * in pollstate struct.
1727*7c478bd9Sstevel@tonic-gate 	 */
1728*7c478bd9Sstevel@tonic-gate 	if (newlist != NULL) {
1729*7c478bd9Sstevel@tonic-gate 		kmem_free(pcsp->pcs_pollfd, old_nfds * sizeof (pollfd_t));
1730*7c478bd9Sstevel@tonic-gate 		/*
1731*7c478bd9Sstevel@tonic-gate 		 * By now, the pollfd.revents field should
1732*7c478bd9Sstevel@tonic-gate 		 * all be zeroed.
1733*7c478bd9Sstevel@tonic-gate 		 */
1734*7c478bd9Sstevel@tonic-gate 		pcsp->pcs_pollfd = newlist;
1735*7c478bd9Sstevel@tonic-gate 		pcsp->pcs_nfds = nfds;
1736*7c478bd9Sstevel@tonic-gate 	}
1737*7c478bd9Sstevel@tonic-gate 	ASSERT(*fdcntp == 0);
1738*7c478bd9Sstevel@tonic-gate 	*fdcntp = fdcnt;
1739*7c478bd9Sstevel@tonic-gate 	/*
1740*7c478bd9Sstevel@tonic-gate 	 * By now for every fd in pollfdp, one of the following should be
1741*7c478bd9Sstevel@tonic-gate 	 * true. Otherwise we will miss a polled event.
1742*7c478bd9Sstevel@tonic-gate 	 *
1743*7c478bd9Sstevel@tonic-gate 	 * 1. the bit corresponding to the fd in bitmap is set. So VOP_POLL
1744*7c478bd9Sstevel@tonic-gate 	 *    will be called on this fd in next poll.
1745*7c478bd9Sstevel@tonic-gate 	 * 2. the fd is cached in the pcache (i.e. pd_php is set). So
1746*7c478bd9Sstevel@tonic-gate 	 *    pollnotify will happen.
1747*7c478bd9Sstevel@tonic-gate 	 */
1748*7c478bd9Sstevel@tonic-gate 	ASSERT(pollchecksanity(ps, nfds));
1749*7c478bd9Sstevel@tonic-gate 	/*
1750*7c478bd9Sstevel@tonic-gate 	 * make sure cross reference between cached poll lists and cached
1751*7c478bd9Sstevel@tonic-gate 	 * poll fds are correct.
1752*7c478bd9Sstevel@tonic-gate 	 */
1753*7c478bd9Sstevel@tonic-gate 	ASSERT(pollcheckxref(ps, which));
1754*7c478bd9Sstevel@tonic-gate 	/*
1755*7c478bd9Sstevel@tonic-gate 	 * ensure each polldat in pollcache reference a polled fd in
1756*7c478bd9Sstevel@tonic-gate 	 * pollcacheset.
1757*7c478bd9Sstevel@tonic-gate 	 */
1758*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
1759*7c478bd9Sstevel@tonic-gate 	checkpolldat(ps);
1760*7c478bd9Sstevel@tonic-gate #endif
1761*7c478bd9Sstevel@tonic-gate 	return (0);
1762*7c478bd9Sstevel@tonic-gate }
1763*7c478bd9Sstevel@tonic-gate 
1764*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
1765*7c478bd9Sstevel@tonic-gate static int
1766*7c478bd9Sstevel@tonic-gate pollscanrevents(pollcache_t *pcp, pollfd_t *pollfdp, nfds_t nfds)
1767*7c478bd9Sstevel@tonic-gate {
1768*7c478bd9Sstevel@tonic-gate 	int i;
1769*7c478bd9Sstevel@tonic-gate 	int reventcnt = 0;
1770*7c478bd9Sstevel@tonic-gate 
1771*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < nfds; i++) {
1772*7c478bd9Sstevel@tonic-gate 		if (pollfdp[i].fd < 0) {
1773*7c478bd9Sstevel@tonic-gate 			ASSERT(pollfdp[i].revents == 0);
1774*7c478bd9Sstevel@tonic-gate 			continue;
1775*7c478bd9Sstevel@tonic-gate 		}
1776*7c478bd9Sstevel@tonic-gate 		if (pollfdp[i].revents) {
1777*7c478bd9Sstevel@tonic-gate 			reventcnt++;
1778*7c478bd9Sstevel@tonic-gate 		}
1779*7c478bd9Sstevel@tonic-gate 		if (pollfdp[i].revents && (pollfdp[i].revents != POLLNVAL)) {
1780*7c478bd9Sstevel@tonic-gate 			ASSERT(BT_TEST(pcp->pc_bitmap, pollfdp[i].fd));
1781*7c478bd9Sstevel@tonic-gate 		}
1782*7c478bd9Sstevel@tonic-gate 	}
1783*7c478bd9Sstevel@tonic-gate 	return (reventcnt);
1784*7c478bd9Sstevel@tonic-gate }
1785*7c478bd9Sstevel@tonic-gate #endif	/* DEBUG */
1786*7c478bd9Sstevel@tonic-gate 
1787*7c478bd9Sstevel@tonic-gate /*
1788*7c478bd9Sstevel@tonic-gate  * read the bitmap and poll on fds corresponding to the '1' bits. The ps_lock
1789*7c478bd9Sstevel@tonic-gate  * is held upon entry.
1790*7c478bd9Sstevel@tonic-gate  */
1791*7c478bd9Sstevel@tonic-gate int
1792*7c478bd9Sstevel@tonic-gate pcache_poll(pollfd_t *pollfdp, pollstate_t *ps, nfds_t nfds, int *fdcntp,
1793*7c478bd9Sstevel@tonic-gate     int which)
1794*7c478bd9Sstevel@tonic-gate {
1795*7c478bd9Sstevel@tonic-gate 	int		i;
1796*7c478bd9Sstevel@tonic-gate 	pollcache_t	*pcp;
1797*7c478bd9Sstevel@tonic-gate 	int 		fd;
1798*7c478bd9Sstevel@tonic-gate 	int 		begin, end, done;
1799*7c478bd9Sstevel@tonic-gate 	pollhead_t	*php;
1800*7c478bd9Sstevel@tonic-gate 	int		fdcnt;
1801*7c478bd9Sstevel@tonic-gate 	int		error = 0;
1802*7c478bd9Sstevel@tonic-gate 	file_t		*fp;
1803*7c478bd9Sstevel@tonic-gate 	polldat_t	*pdp;
1804*7c478bd9Sstevel@tonic-gate 	xref_t		*refp;
1805*7c478bd9Sstevel@tonic-gate 	int		entry;
1806*7c478bd9Sstevel@tonic-gate 
1807*7c478bd9Sstevel@tonic-gate 	pcp = ps->ps_pcache;
1808*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1809*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&pcp->pc_lock));
1810*7c478bd9Sstevel@tonic-gate retry:
1811*7c478bd9Sstevel@tonic-gate 	done = 0;
1812*7c478bd9Sstevel@tonic-gate 	begin = 0;
1813*7c478bd9Sstevel@tonic-gate 	fdcnt = 0;
1814*7c478bd9Sstevel@tonic-gate 	end = pcp->pc_mapend;
1815*7c478bd9Sstevel@tonic-gate 	while ((fdcnt < nfds) && !done) {
1816*7c478bd9Sstevel@tonic-gate 		php = NULL;
1817*7c478bd9Sstevel@tonic-gate 		/*
1818*7c478bd9Sstevel@tonic-gate 		 * only poll fds which may have events
1819*7c478bd9Sstevel@tonic-gate 		 */
1820*7c478bd9Sstevel@tonic-gate 		fd = bt_getlowbit(pcp->pc_bitmap, begin, end);
1821*7c478bd9Sstevel@tonic-gate 		ASSERT(fd <= end);
1822*7c478bd9Sstevel@tonic-gate 		if (fd >= 0) {
1823*7c478bd9Sstevel@tonic-gate 			ASSERT(pollcheckrevents(ps, begin, fd, which));
1824*7c478bd9Sstevel@tonic-gate 			/*
1825*7c478bd9Sstevel@tonic-gate 			 * adjust map pointers for next round
1826*7c478bd9Sstevel@tonic-gate 			 */
1827*7c478bd9Sstevel@tonic-gate 			if (fd == end) {
1828*7c478bd9Sstevel@tonic-gate 				done = 1;
1829*7c478bd9Sstevel@tonic-gate 			} else {
1830*7c478bd9Sstevel@tonic-gate 				begin = fd + 1;
1831*7c478bd9Sstevel@tonic-gate 			}
1832*7c478bd9Sstevel@tonic-gate 			/*
1833*7c478bd9Sstevel@tonic-gate 			 * A bitmap caches poll state information of
1834*7c478bd9Sstevel@tonic-gate 			 * multiple poll lists. Call VOP_POLL only if
1835*7c478bd9Sstevel@tonic-gate 			 * the bit corresponds to an fd in this poll
1836*7c478bd9Sstevel@tonic-gate 			 * list.
1837*7c478bd9Sstevel@tonic-gate 			 */
1838*7c478bd9Sstevel@tonic-gate 			pdp = pcache_lookup_fd(pcp, fd);
1839*7c478bd9Sstevel@tonic-gate 			ASSERT(pdp != NULL);
1840*7c478bd9Sstevel@tonic-gate 			ASSERT(pdp->pd_ref != NULL);
1841*7c478bd9Sstevel@tonic-gate 			refp = &pdp->pd_ref[which];
1842*7c478bd9Sstevel@tonic-gate 			if (refp->xf_refcnt == 0)
1843*7c478bd9Sstevel@tonic-gate 				continue;
1844*7c478bd9Sstevel@tonic-gate 			entry = refp->xf_position;
1845*7c478bd9Sstevel@tonic-gate 			ASSERT((entry >= 0) && (entry < nfds));
1846*7c478bd9Sstevel@tonic-gate 			ASSERT(pollfdp[entry].fd == fd);
1847*7c478bd9Sstevel@tonic-gate 			/*
1848*7c478bd9Sstevel@tonic-gate 			 * we are in this routine implies that we have
1849*7c478bd9Sstevel@tonic-gate 			 * successfully polled this fd in the past.
1850*7c478bd9Sstevel@tonic-gate 			 * Check to see this fd is closed while we are
1851*7c478bd9Sstevel@tonic-gate 			 * blocked in poll. This ensures that we don't
1852*7c478bd9Sstevel@tonic-gate 			 * miss a close on the fd in the case this fd is
1853*7c478bd9Sstevel@tonic-gate 			 * reused.
1854*7c478bd9Sstevel@tonic-gate 			 */
1855*7c478bd9Sstevel@tonic-gate 			if (pdp->pd_fp == NULL) {
1856*7c478bd9Sstevel@tonic-gate 				ASSERT(pdp->pd_count > 0);
1857*7c478bd9Sstevel@tonic-gate 				pollfdp[entry].revents = POLLNVAL;
1858*7c478bd9Sstevel@tonic-gate 				fdcnt++;
1859*7c478bd9Sstevel@tonic-gate 				if (refp->xf_refcnt > 1) {
1860*7c478bd9Sstevel@tonic-gate 					/*
1861*7c478bd9Sstevel@tonic-gate 					 * this fd appeared multiple time
1862*7c478bd9Sstevel@tonic-gate 					 * in the poll list. Find all of them.
1863*7c478bd9Sstevel@tonic-gate 					 */
1864*7c478bd9Sstevel@tonic-gate 					for (i = entry + 1; i < nfds; i++) {
1865*7c478bd9Sstevel@tonic-gate 						if (pollfdp[i].fd == fd) {
1866*7c478bd9Sstevel@tonic-gate 							pollfdp[i].revents =
1867*7c478bd9Sstevel@tonic-gate 							    POLLNVAL;
1868*7c478bd9Sstevel@tonic-gate 							fdcnt++;
1869*7c478bd9Sstevel@tonic-gate 						}
1870*7c478bd9Sstevel@tonic-gate 					}
1871*7c478bd9Sstevel@tonic-gate 				}
1872*7c478bd9Sstevel@tonic-gate 				pcacheset_invalidate(ps, pdp);
1873*7c478bd9Sstevel@tonic-gate 				continue;
1874*7c478bd9Sstevel@tonic-gate 			}
1875*7c478bd9Sstevel@tonic-gate 			/*
1876*7c478bd9Sstevel@tonic-gate 			 * We can be here polling a device that is being
1877*7c478bd9Sstevel@tonic-gate 			 * closed (i.e. the file pointer is set to NULL,
1878*7c478bd9Sstevel@tonic-gate 			 * but pollcacheclean has not happened yet).
1879*7c478bd9Sstevel@tonic-gate 			 */
1880*7c478bd9Sstevel@tonic-gate 			if ((fp = getf(fd)) == NULL) {
1881*7c478bd9Sstevel@tonic-gate 				pollfdp[entry].revents = POLLNVAL;
1882*7c478bd9Sstevel@tonic-gate 				fdcnt++;
1883*7c478bd9Sstevel@tonic-gate 				if (refp->xf_refcnt > 1) {
1884*7c478bd9Sstevel@tonic-gate 					/*
1885*7c478bd9Sstevel@tonic-gate 					 * this fd appeared multiple time
1886*7c478bd9Sstevel@tonic-gate 					 * in the poll list. Find all of them.
1887*7c478bd9Sstevel@tonic-gate 					 */
1888*7c478bd9Sstevel@tonic-gate 					for (i = entry + 1; i < nfds; i++) {
1889*7c478bd9Sstevel@tonic-gate 						if (pollfdp[i].fd == fd) {
1890*7c478bd9Sstevel@tonic-gate 							pollfdp[i].revents =
1891*7c478bd9Sstevel@tonic-gate 							    POLLNVAL;
1892*7c478bd9Sstevel@tonic-gate 							fdcnt++;
1893*7c478bd9Sstevel@tonic-gate 						}
1894*7c478bd9Sstevel@tonic-gate 					}
1895*7c478bd9Sstevel@tonic-gate 				}
1896*7c478bd9Sstevel@tonic-gate 				continue;
1897*7c478bd9Sstevel@tonic-gate 			}
1898*7c478bd9Sstevel@tonic-gate 			ASSERT(pdp->pd_fp == fp);
1899*7c478bd9Sstevel@tonic-gate 			ASSERT(infpollinfo(fd));
1900*7c478bd9Sstevel@tonic-gate 			/*
1901*7c478bd9Sstevel@tonic-gate 			 * Since we no longer hold poll head lock across
1902*7c478bd9Sstevel@tonic-gate 			 * VOP_POLL, pollunlock logic can be simplifed.
1903*7c478bd9Sstevel@tonic-gate 			 */
1904*7c478bd9Sstevel@tonic-gate 			ASSERT(pdp->pd_php == NULL ||
1905*7c478bd9Sstevel@tonic-gate 			    MUTEX_NOT_HELD(PHLOCK(pdp->pd_php)));
1906*7c478bd9Sstevel@tonic-gate 			/*
1907*7c478bd9Sstevel@tonic-gate 			 * underlying file systems may set a "pollpending"
1908*7c478bd9Sstevel@tonic-gate 			 * flag when it sees the poll may block. Pollwakeup()
1909*7c478bd9Sstevel@tonic-gate 			 * is called by wakeup thread if pollpending is set.
1910*7c478bd9Sstevel@tonic-gate 			 * Pass a 0 fdcnt so that the underlying file system
1911*7c478bd9Sstevel@tonic-gate 			 * will set the "pollpending" flag set when there is
1912*7c478bd9Sstevel@tonic-gate 			 * no polled events.
1913*7c478bd9Sstevel@tonic-gate 			 *
1914*7c478bd9Sstevel@tonic-gate 			 * Use pollfdp[].events for actual polling because
1915*7c478bd9Sstevel@tonic-gate 			 * the pd_events is union of all cached poll events
1916*7c478bd9Sstevel@tonic-gate 			 * on this fd. The events parameter also affects
1917*7c478bd9Sstevel@tonic-gate 			 * how the polled device sets the "poll pending"
1918*7c478bd9Sstevel@tonic-gate 			 * flag.
1919*7c478bd9Sstevel@tonic-gate 			 */
1920*7c478bd9Sstevel@tonic-gate 			ASSERT(curthread->t_pollcache == NULL);
1921*7c478bd9Sstevel@tonic-gate 			error = VOP_POLL(fp->f_vnode, pollfdp[entry].events, 0,
1922*7c478bd9Sstevel@tonic-gate 			    &pollfdp[entry].revents, &php);
1923*7c478bd9Sstevel@tonic-gate 			/*
1924*7c478bd9Sstevel@tonic-gate 			 * releasef after completely done with this cached
1925*7c478bd9Sstevel@tonic-gate 			 * poll entry. To prevent close() coming in to clear
1926*7c478bd9Sstevel@tonic-gate 			 * this entry.
1927*7c478bd9Sstevel@tonic-gate 			 */
1928*7c478bd9Sstevel@tonic-gate 			if (error) {
1929*7c478bd9Sstevel@tonic-gate 				releasef(fd);
1930*7c478bd9Sstevel@tonic-gate 				break;
1931*7c478bd9Sstevel@tonic-gate 			}
1932*7c478bd9Sstevel@tonic-gate 			/*
1933*7c478bd9Sstevel@tonic-gate 			 * layered devices (e.g. console driver)
1934*7c478bd9Sstevel@tonic-gate 			 * may change the vnode and thus the pollhead
1935*7c478bd9Sstevel@tonic-gate 			 * pointer out from underneath us.
1936*7c478bd9Sstevel@tonic-gate 			 */
1937*7c478bd9Sstevel@tonic-gate 			if (php != NULL && pdp->pd_php != NULL &&
1938*7c478bd9Sstevel@tonic-gate 			    php != pdp->pd_php) {
1939*7c478bd9Sstevel@tonic-gate 				releasef(fd);
1940*7c478bd9Sstevel@tonic-gate 				pollhead_delete(pdp->pd_php, pdp);
1941*7c478bd9Sstevel@tonic-gate 				pdp->pd_php = php;
1942*7c478bd9Sstevel@tonic-gate 				pollhead_insert(php, pdp);
1943*7c478bd9Sstevel@tonic-gate 				/*
1944*7c478bd9Sstevel@tonic-gate 				 * We could have missed a wakeup on the new
1945*7c478bd9Sstevel@tonic-gate 				 * target device. Make sure the new target
1946*7c478bd9Sstevel@tonic-gate 				 * gets polled once.
1947*7c478bd9Sstevel@tonic-gate 				 */
1948*7c478bd9Sstevel@tonic-gate 				BT_SET(pcp->pc_bitmap, fd);
1949*7c478bd9Sstevel@tonic-gate 				goto retry;
1950*7c478bd9Sstevel@tonic-gate 			}
1951*7c478bd9Sstevel@tonic-gate 
1952*7c478bd9Sstevel@tonic-gate 			if (pollfdp[entry].revents) {
1953*7c478bd9Sstevel@tonic-gate 				ASSERT(refp->xf_refcnt >= 1);
1954*7c478bd9Sstevel@tonic-gate 				fdcnt++;
1955*7c478bd9Sstevel@tonic-gate 				if (refp->xf_refcnt > 1) {
1956*7c478bd9Sstevel@tonic-gate 					/*
1957*7c478bd9Sstevel@tonic-gate 					 * this fd appeared multiple time
1958*7c478bd9Sstevel@tonic-gate 					 * in the poll list. This is rare but
1959*7c478bd9Sstevel@tonic-gate 					 * we have to look at all of them for
1960*7c478bd9Sstevel@tonic-gate 					 * correctness.
1961*7c478bd9Sstevel@tonic-gate 					 */
1962*7c478bd9Sstevel@tonic-gate 					error = plist_chkdupfd(fp, pdp, ps,
1963*7c478bd9Sstevel@tonic-gate 					    pollfdp, entry, &fdcnt);
1964*7c478bd9Sstevel@tonic-gate 					if (error > 0) {
1965*7c478bd9Sstevel@tonic-gate 						releasef(fd);
1966*7c478bd9Sstevel@tonic-gate 						break;
1967*7c478bd9Sstevel@tonic-gate 					}
1968*7c478bd9Sstevel@tonic-gate 					if (error < 0) {
1969*7c478bd9Sstevel@tonic-gate 						goto retry;
1970*7c478bd9Sstevel@tonic-gate 					}
1971*7c478bd9Sstevel@tonic-gate 				}
1972*7c478bd9Sstevel@tonic-gate 				releasef(fd);
1973*7c478bd9Sstevel@tonic-gate 			} else {
1974*7c478bd9Sstevel@tonic-gate 				/*
1975*7c478bd9Sstevel@tonic-gate 				 * VOP_POLL didn't return any revents. We can
1976*7c478bd9Sstevel@tonic-gate 				 * clear the bit in bitmap only if we have the
1977*7c478bd9Sstevel@tonic-gate 				 * pollhead ptr cached and no other cached
1978*7c478bd9Sstevel@tonic-gate 				 * entry is polling different events on this fd.
1979*7c478bd9Sstevel@tonic-gate 				 * VOP_POLL may have dropped the ps_lock. Make
1980*7c478bd9Sstevel@tonic-gate 				 * sure pollwakeup has not happened before clear
1981*7c478bd9Sstevel@tonic-gate 				 * the bit.
1982*7c478bd9Sstevel@tonic-gate 				 */
1983*7c478bd9Sstevel@tonic-gate 				if ((pdp->pd_php != NULL) &&
1984*7c478bd9Sstevel@tonic-gate 				    (pollfdp[entry].events == pdp->pd_events) &&
1985*7c478bd9Sstevel@tonic-gate 				    ((pcp->pc_flag & T_POLLWAKE) == 0)) {
1986*7c478bd9Sstevel@tonic-gate 					BT_CLEAR(pcp->pc_bitmap, fd);
1987*7c478bd9Sstevel@tonic-gate 				}
1988*7c478bd9Sstevel@tonic-gate 				/*
1989*7c478bd9Sstevel@tonic-gate 				 * if the fd can be cached now but not before,
1990*7c478bd9Sstevel@tonic-gate 				 * do it now.
1991*7c478bd9Sstevel@tonic-gate 				 */
1992*7c478bd9Sstevel@tonic-gate 				if ((pdp->pd_php == NULL) && (php != NULL)) {
1993*7c478bd9Sstevel@tonic-gate 					pdp->pd_php = php;
1994*7c478bd9Sstevel@tonic-gate 					pollhead_insert(php, pdp);
1995*7c478bd9Sstevel@tonic-gate 					/*
1996*7c478bd9Sstevel@tonic-gate 					 * We are inserting a polldat struct for
1997*7c478bd9Sstevel@tonic-gate 					 * the first time. We may have missed a
1998*7c478bd9Sstevel@tonic-gate 					 * wakeup on this device. Re-poll once.
1999*7c478bd9Sstevel@tonic-gate 					 * This should be a rare event.
2000*7c478bd9Sstevel@tonic-gate 					 */
2001*7c478bd9Sstevel@tonic-gate 					releasef(fd);
2002*7c478bd9Sstevel@tonic-gate 					goto retry;
2003*7c478bd9Sstevel@tonic-gate 				}
2004*7c478bd9Sstevel@tonic-gate 				if (refp->xf_refcnt > 1) {
2005*7c478bd9Sstevel@tonic-gate 					/*
2006*7c478bd9Sstevel@tonic-gate 					 * this fd appeared multiple time
2007*7c478bd9Sstevel@tonic-gate 					 * in the poll list. This is rare but
2008*7c478bd9Sstevel@tonic-gate 					 * we have to look at all of them for
2009*7c478bd9Sstevel@tonic-gate 					 * correctness.
2010*7c478bd9Sstevel@tonic-gate 					 */
2011*7c478bd9Sstevel@tonic-gate 					error = plist_chkdupfd(fp, pdp, ps,
2012*7c478bd9Sstevel@tonic-gate 					    pollfdp, entry, &fdcnt);
2013*7c478bd9Sstevel@tonic-gate 					if (error > 0) {
2014*7c478bd9Sstevel@tonic-gate 						releasef(fd);
2015*7c478bd9Sstevel@tonic-gate 						break;
2016*7c478bd9Sstevel@tonic-gate 					}
2017*7c478bd9Sstevel@tonic-gate 					if (error < 0) {
2018*7c478bd9Sstevel@tonic-gate 						goto retry;
2019*7c478bd9Sstevel@tonic-gate 					}
2020*7c478bd9Sstevel@tonic-gate 				}
2021*7c478bd9Sstevel@tonic-gate 				releasef(fd);
2022*7c478bd9Sstevel@tonic-gate 			}
2023*7c478bd9Sstevel@tonic-gate 		} else {
2024*7c478bd9Sstevel@tonic-gate 			done = 1;
2025*7c478bd9Sstevel@tonic-gate 			ASSERT(pollcheckrevents(ps, begin, end + 1, which));
2026*7c478bd9Sstevel@tonic-gate 		}
2027*7c478bd9Sstevel@tonic-gate 	}
2028*7c478bd9Sstevel@tonic-gate 	if (!error) {
2029*7c478bd9Sstevel@tonic-gate 		ASSERT(*fdcntp + fdcnt == pollscanrevents(pcp, pollfdp, nfds));
2030*7c478bd9Sstevel@tonic-gate 		*fdcntp += fdcnt;
2031*7c478bd9Sstevel@tonic-gate 	}
2032*7c478bd9Sstevel@tonic-gate 	return (error);
2033*7c478bd9Sstevel@tonic-gate }
2034*7c478bd9Sstevel@tonic-gate 
2035*7c478bd9Sstevel@tonic-gate /*
2036*7c478bd9Sstevel@tonic-gate  * Going through the poll list without much locking. Poll all fds and
2037*7c478bd9Sstevel@tonic-gate  * cache all valid fds in the pollcache.
2038*7c478bd9Sstevel@tonic-gate  */
2039*7c478bd9Sstevel@tonic-gate int
2040*7c478bd9Sstevel@tonic-gate pcacheset_cache_list(pollstate_t *ps, pollfd_t *fds, int *fdcntp, int which)
2041*7c478bd9Sstevel@tonic-gate {
2042*7c478bd9Sstevel@tonic-gate 	pollfd_t	*pollfdp = ps->ps_pollfd;
2043*7c478bd9Sstevel@tonic-gate 	pollcacheset_t	*pcacheset = ps->ps_pcacheset;
2044*7c478bd9Sstevel@tonic-gate 	pollfd_t	*newfdlist;
2045*7c478bd9Sstevel@tonic-gate 	int		i;
2046*7c478bd9Sstevel@tonic-gate 	int		fd;
2047*7c478bd9Sstevel@tonic-gate 	file_t		*fp;
2048*7c478bd9Sstevel@tonic-gate 	int		error = 0;
2049*7c478bd9Sstevel@tonic-gate 
2050*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2051*7c478bd9Sstevel@tonic-gate 	ASSERT(which < ps->ps_nsets);
2052*7c478bd9Sstevel@tonic-gate 	ASSERT(pcacheset != NULL);
2053*7c478bd9Sstevel@tonic-gate 	ASSERT(pcacheset[which].pcs_pollfd == NULL);
2054*7c478bd9Sstevel@tonic-gate 	newfdlist  = kmem_alloc(ps->ps_nfds * sizeof (pollfd_t), KM_SLEEP);
2055*7c478bd9Sstevel@tonic-gate 	/*
2056*7c478bd9Sstevel@tonic-gate 	 * cache the new poll list in pollcachset.
2057*7c478bd9Sstevel@tonic-gate 	 */
2058*7c478bd9Sstevel@tonic-gate 	bcopy(pollfdp, newfdlist, sizeof (pollfd_t) * ps->ps_nfds);
2059*7c478bd9Sstevel@tonic-gate 
2060*7c478bd9Sstevel@tonic-gate 	pcacheset[which].pcs_pollfd = newfdlist;
2061*7c478bd9Sstevel@tonic-gate 	pcacheset[which].pcs_nfds = ps->ps_nfds;
2062*7c478bd9Sstevel@tonic-gate 	pcacheset[which].pcs_usradr = (uintptr_t)fds;
2063*7c478bd9Sstevel@tonic-gate 
2064*7c478bd9Sstevel@tonic-gate 	/*
2065*7c478bd9Sstevel@tonic-gate 	 * We have saved a copy of current poll fd list in one pollcacheset.
2066*7c478bd9Sstevel@tonic-gate 	 * The 'revents' field of the new list is not yet set to 0. Loop
2067*7c478bd9Sstevel@tonic-gate 	 * through the new list just to do that is expensive. We do that
2068*7c478bd9Sstevel@tonic-gate 	 * while polling the list.
2069*7c478bd9Sstevel@tonic-gate 	 */
2070*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < ps->ps_nfds; i++) {
2071*7c478bd9Sstevel@tonic-gate 		fd = pollfdp[i].fd;
2072*7c478bd9Sstevel@tonic-gate 		/*
2073*7c478bd9Sstevel@tonic-gate 		 * We also filter out the illegal poll events in the event
2074*7c478bd9Sstevel@tonic-gate 		 * field for the cached poll list/set.
2075*7c478bd9Sstevel@tonic-gate 		 */
2076*7c478bd9Sstevel@tonic-gate 		if (pollfdp[i].events & ~VALID_POLL_EVENTS) {
2077*7c478bd9Sstevel@tonic-gate 			newfdlist[i].events = pollfdp[i].events =
2078*7c478bd9Sstevel@tonic-gate 			pollfdp[i].events & VALID_POLL_EVENTS;
2079*7c478bd9Sstevel@tonic-gate 		}
2080*7c478bd9Sstevel@tonic-gate 		if (fd < 0) {
2081*7c478bd9Sstevel@tonic-gate 			pollfdp[i].revents = 0;
2082*7c478bd9Sstevel@tonic-gate 			continue;
2083*7c478bd9Sstevel@tonic-gate 		}
2084*7c478bd9Sstevel@tonic-gate 		if ((fp = getf(fd)) == NULL) {
2085*7c478bd9Sstevel@tonic-gate 			pollfdp[i].revents = POLLNVAL;
2086*7c478bd9Sstevel@tonic-gate 			/*
2087*7c478bd9Sstevel@tonic-gate 			 * invalidate this cache entry in the cached poll list
2088*7c478bd9Sstevel@tonic-gate 			 */
2089*7c478bd9Sstevel@tonic-gate 			newfdlist[i].fd = -1;
2090*7c478bd9Sstevel@tonic-gate 			(*fdcntp)++;
2091*7c478bd9Sstevel@tonic-gate 			continue;
2092*7c478bd9Sstevel@tonic-gate 		}
2093*7c478bd9Sstevel@tonic-gate 		/*
2094*7c478bd9Sstevel@tonic-gate 		 * cache this fd.
2095*7c478bd9Sstevel@tonic-gate 		 */
2096*7c478bd9Sstevel@tonic-gate 		error = pcache_insert(ps, fp, &pollfdp[i], fdcntp, (ssize_t)i,
2097*7c478bd9Sstevel@tonic-gate 		    which);
2098*7c478bd9Sstevel@tonic-gate 		releasef(fd);
2099*7c478bd9Sstevel@tonic-gate 		if (error) {
2100*7c478bd9Sstevel@tonic-gate 			/*
2101*7c478bd9Sstevel@tonic-gate 			 * Here we are half way through caching a new
2102*7c478bd9Sstevel@tonic-gate 			 * poll list. Undo every thing.
2103*7c478bd9Sstevel@tonic-gate 			 */
2104*7c478bd9Sstevel@tonic-gate 			pcacheset_remove_list(ps, pollfdp, 0, i, which, 0);
2105*7c478bd9Sstevel@tonic-gate 			kmem_free(newfdlist, ps->ps_nfds * sizeof (pollfd_t));
2106*7c478bd9Sstevel@tonic-gate 			pcacheset[which].pcs_pollfd = NULL;
2107*7c478bd9Sstevel@tonic-gate 			pcacheset[which].pcs_usradr = NULL;
2108*7c478bd9Sstevel@tonic-gate 			break;
2109*7c478bd9Sstevel@tonic-gate 		}
2110*7c478bd9Sstevel@tonic-gate 	}
2111*7c478bd9Sstevel@tonic-gate 	return (error);
2112*7c478bd9Sstevel@tonic-gate }
2113*7c478bd9Sstevel@tonic-gate 
2114*7c478bd9Sstevel@tonic-gate /*
2115*7c478bd9Sstevel@tonic-gate  * called by pollcacheclean() to set the fp NULL. It also sets polled events
2116*7c478bd9Sstevel@tonic-gate  * in pcacheset entries to a special events 'POLLCLOSED'. Do a pollwakeup to
2117*7c478bd9Sstevel@tonic-gate  * wake any sleeping poller, then remove the polldat from the driver.
2118*7c478bd9Sstevel@tonic-gate  * The routine is called with ps_pcachelock held.
2119*7c478bd9Sstevel@tonic-gate  */
2120*7c478bd9Sstevel@tonic-gate void
2121*7c478bd9Sstevel@tonic-gate pcache_clean_entry(pollstate_t *ps, int fd)
2122*7c478bd9Sstevel@tonic-gate {
2123*7c478bd9Sstevel@tonic-gate 	pollcache_t	*pcp;
2124*7c478bd9Sstevel@tonic-gate 	polldat_t	*pdp;
2125*7c478bd9Sstevel@tonic-gate 	int		i;
2126*7c478bd9Sstevel@tonic-gate 
2127*7c478bd9Sstevel@tonic-gate 	ASSERT(ps != NULL);
2128*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2129*7c478bd9Sstevel@tonic-gate 	pcp = ps->ps_pcache;
2130*7c478bd9Sstevel@tonic-gate 	ASSERT(pcp);
2131*7c478bd9Sstevel@tonic-gate 	pdp = pcache_lookup_fd(pcp, fd);
2132*7c478bd9Sstevel@tonic-gate 	ASSERT(pdp != NULL);
2133*7c478bd9Sstevel@tonic-gate 	/*
2134*7c478bd9Sstevel@tonic-gate 	 * the corresponding fpollinfo in fi_list has been removed by
2135*7c478bd9Sstevel@tonic-gate 	 * a close on this fd. Reset the cached fp ptr here.
2136*7c478bd9Sstevel@tonic-gate 	 */
2137*7c478bd9Sstevel@tonic-gate 	pdp->pd_fp = NULL;
2138*7c478bd9Sstevel@tonic-gate 	/*
2139*7c478bd9Sstevel@tonic-gate 	 * XXX - This routine also touches data in pcacheset struct.
2140*7c478bd9Sstevel@tonic-gate 	 *
2141*7c478bd9Sstevel@tonic-gate 	 * set the event in cached poll lists to POLLCLOSED. This invalidate
2142*7c478bd9Sstevel@tonic-gate 	 * the cached poll fd entry in that poll list, which will force a
2143*7c478bd9Sstevel@tonic-gate 	 * removal of this cached entry in next poll(). The cleanup is done
2144*7c478bd9Sstevel@tonic-gate 	 * at the removal time.
2145*7c478bd9Sstevel@tonic-gate 	 */
2146*7c478bd9Sstevel@tonic-gate 	ASSERT(pdp->pd_ref != NULL);
2147*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < ps->ps_nsets; i++) {
2148*7c478bd9Sstevel@tonic-gate 		xref_t		*refp;
2149*7c478bd9Sstevel@tonic-gate 		pollcacheset_t	*pcsp;
2150*7c478bd9Sstevel@tonic-gate 
2151*7c478bd9Sstevel@tonic-gate 		refp = &pdp->pd_ref[i];
2152*7c478bd9Sstevel@tonic-gate 		if (refp->xf_refcnt) {
2153*7c478bd9Sstevel@tonic-gate 			ASSERT(refp->xf_position >= 0);
2154*7c478bd9Sstevel@tonic-gate 			pcsp = &ps->ps_pcacheset[i];
2155*7c478bd9Sstevel@tonic-gate 			if (refp->xf_refcnt == 1) {
2156*7c478bd9Sstevel@tonic-gate 				pcsp->pcs_pollfd[refp->xf_position].events =
2157*7c478bd9Sstevel@tonic-gate 				    (short)POLLCLOSED;
2158*7c478bd9Sstevel@tonic-gate 			}
2159*7c478bd9Sstevel@tonic-gate 			if (refp->xf_refcnt > 1) {
2160*7c478bd9Sstevel@tonic-gate 				int	j;
2161*7c478bd9Sstevel@tonic-gate 				/*
2162*7c478bd9Sstevel@tonic-gate 				 * mark every matching entry in pcs_pollfd
2163*7c478bd9Sstevel@tonic-gate 				 */
2164*7c478bd9Sstevel@tonic-gate 				for (j = refp->xf_position;
2165*7c478bd9Sstevel@tonic-gate 				    j < pcsp->pcs_nfds; j++) {
2166*7c478bd9Sstevel@tonic-gate 					if (pcsp->pcs_pollfd[j].fd == fd) {
2167*7c478bd9Sstevel@tonic-gate 						pcsp->pcs_pollfd[j].events =
2168*7c478bd9Sstevel@tonic-gate 						    (short)POLLCLOSED;
2169*7c478bd9Sstevel@tonic-gate 					}
2170*7c478bd9Sstevel@tonic-gate 				}
2171*7c478bd9Sstevel@tonic-gate 			}
2172*7c478bd9Sstevel@tonic-gate 		}
2173*7c478bd9Sstevel@tonic-gate 	}
2174*7c478bd9Sstevel@tonic-gate 	if (pdp->pd_php) {
2175*7c478bd9Sstevel@tonic-gate 		pollwakeup(pdp->pd_php, POLLHUP);
2176*7c478bd9Sstevel@tonic-gate 		pollhead_delete(pdp->pd_php, pdp);
2177*7c478bd9Sstevel@tonic-gate 		pdp->pd_php = NULL;
2178*7c478bd9Sstevel@tonic-gate 	}
2179*7c478bd9Sstevel@tonic-gate }
2180*7c478bd9Sstevel@tonic-gate 
2181*7c478bd9Sstevel@tonic-gate /*
2182*7c478bd9Sstevel@tonic-gate  * This is the first time this thread has ever polled,
2183*7c478bd9Sstevel@tonic-gate  * so we have to create its pollstate structure.
2184*7c478bd9Sstevel@tonic-gate  * This will persist for the life of the thread,
2185*7c478bd9Sstevel@tonic-gate  * until it calls pollcleanup().
2186*7c478bd9Sstevel@tonic-gate  */
2187*7c478bd9Sstevel@tonic-gate pollstate_t *
2188*7c478bd9Sstevel@tonic-gate pollstate_create(void)
2189*7c478bd9Sstevel@tonic-gate {
2190*7c478bd9Sstevel@tonic-gate 	pollstate_t *ps;
2191*7c478bd9Sstevel@tonic-gate 
2192*7c478bd9Sstevel@tonic-gate 	ps = kmem_zalloc(sizeof (pollstate_t), KM_SLEEP);
2193*7c478bd9Sstevel@tonic-gate 	ps->ps_nsets = POLLFDSETS;
2194*7c478bd9Sstevel@tonic-gate 	ps->ps_pcacheset = pcacheset_create(ps->ps_nsets);
2195*7c478bd9Sstevel@tonic-gate 	return (ps);
2196*7c478bd9Sstevel@tonic-gate }
2197*7c478bd9Sstevel@tonic-gate 
2198*7c478bd9Sstevel@tonic-gate void
2199*7c478bd9Sstevel@tonic-gate pollstate_destroy(pollstate_t *ps)
2200*7c478bd9Sstevel@tonic-gate {
2201*7c478bd9Sstevel@tonic-gate 	if (ps->ps_pollfd != NULL) {
2202*7c478bd9Sstevel@tonic-gate 		kmem_free(ps->ps_pollfd, ps->ps_nfds * sizeof (pollfd_t));
2203*7c478bd9Sstevel@tonic-gate 		ps->ps_pollfd = NULL;
2204*7c478bd9Sstevel@tonic-gate 	}
2205*7c478bd9Sstevel@tonic-gate 	if (ps->ps_pcache != NULL) {
2206*7c478bd9Sstevel@tonic-gate 		pcache_destroy(ps->ps_pcache);
2207*7c478bd9Sstevel@tonic-gate 		ps->ps_pcache = NULL;
2208*7c478bd9Sstevel@tonic-gate 	}
2209*7c478bd9Sstevel@tonic-gate 	pcacheset_destroy(ps->ps_pcacheset, ps->ps_nsets);
2210*7c478bd9Sstevel@tonic-gate 	ps->ps_pcacheset = NULL;
2211*7c478bd9Sstevel@tonic-gate 	if (ps->ps_dpbuf != NULL) {
2212*7c478bd9Sstevel@tonic-gate 		kmem_free(ps->ps_dpbuf, ps->ps_dpbufsize * sizeof (pollfd_t));
2213*7c478bd9Sstevel@tonic-gate 		ps->ps_dpbuf = NULL;
2214*7c478bd9Sstevel@tonic-gate 	}
2215*7c478bd9Sstevel@tonic-gate 	mutex_destroy(&ps->ps_lock);
2216*7c478bd9Sstevel@tonic-gate 	kmem_free(ps, sizeof (pollstate_t));
2217*7c478bd9Sstevel@tonic-gate }
2218*7c478bd9Sstevel@tonic-gate 
2219*7c478bd9Sstevel@tonic-gate /*
2220*7c478bd9Sstevel@tonic-gate  * We are holding the appropriate uf_lock entering this routine.
2221*7c478bd9Sstevel@tonic-gate  * Bump up the ps_busy count to prevent the thread from exiting.
2222*7c478bd9Sstevel@tonic-gate  */
2223*7c478bd9Sstevel@tonic-gate void
2224*7c478bd9Sstevel@tonic-gate pollblockexit(fpollinfo_t *fpip)
2225*7c478bd9Sstevel@tonic-gate {
2226*7c478bd9Sstevel@tonic-gate 	for (; fpip; fpip = fpip->fp_next) {
2227*7c478bd9Sstevel@tonic-gate 		pollcache_t *pcp = fpip->fp_thread->t_pollstate->ps_pcache;
2228*7c478bd9Sstevel@tonic-gate 
2229*7c478bd9Sstevel@tonic-gate 		mutex_enter(&pcp->pc_no_exit);
2230*7c478bd9Sstevel@tonic-gate 		pcp->pc_busy++;  /* prevents exit()'s */
2231*7c478bd9Sstevel@tonic-gate 		mutex_exit(&pcp->pc_no_exit);
2232*7c478bd9Sstevel@tonic-gate 	}
2233*7c478bd9Sstevel@tonic-gate }
2234*7c478bd9Sstevel@tonic-gate 
2235*7c478bd9Sstevel@tonic-gate /*
2236*7c478bd9Sstevel@tonic-gate  * Complete phase 2 of cached poll fd cleanup. Call pcache_clean_entry to mark
2237*7c478bd9Sstevel@tonic-gate  * the pcacheset events field POLLCLOSED to force the next poll() to remove
2238*7c478bd9Sstevel@tonic-gate  * this cache entry. We can't clean the polldat entry clean up here because
2239*7c478bd9Sstevel@tonic-gate  * lwp block in poll() needs the info to return. Wakeup anyone blocked in
2240*7c478bd9Sstevel@tonic-gate  * poll and let exiting lwp go. No lock is help upon entry. So it's OK for
2241*7c478bd9Sstevel@tonic-gate  * pcache_clean_entry to call pollwakeup().
2242*7c478bd9Sstevel@tonic-gate  */
2243*7c478bd9Sstevel@tonic-gate void
2244*7c478bd9Sstevel@tonic-gate pollcacheclean(fpollinfo_t *fip, int fd)
2245*7c478bd9Sstevel@tonic-gate {
2246*7c478bd9Sstevel@tonic-gate 	struct fpollinfo	*fpip, *fpip2;
2247*7c478bd9Sstevel@tonic-gate 
2248*7c478bd9Sstevel@tonic-gate 	fpip = fip;
2249*7c478bd9Sstevel@tonic-gate 	while (fpip) {
2250*7c478bd9Sstevel@tonic-gate 		pollstate_t *ps = fpip->fp_thread->t_pollstate;
2251*7c478bd9Sstevel@tonic-gate 		pollcache_t *pcp = ps->ps_pcache;
2252*7c478bd9Sstevel@tonic-gate 
2253*7c478bd9Sstevel@tonic-gate 		mutex_enter(&ps->ps_lock);
2254*7c478bd9Sstevel@tonic-gate 		pcache_clean_entry(ps, fd);
2255*7c478bd9Sstevel@tonic-gate 		mutex_exit(&ps->ps_lock);
2256*7c478bd9Sstevel@tonic-gate 		mutex_enter(&pcp->pc_no_exit);
2257*7c478bd9Sstevel@tonic-gate 		pcp->pc_busy--;
2258*7c478bd9Sstevel@tonic-gate 		if (pcp->pc_busy == 0) {
2259*7c478bd9Sstevel@tonic-gate 			/*
2260*7c478bd9Sstevel@tonic-gate 			 * Wakeup the thread waiting in
2261*7c478bd9Sstevel@tonic-gate 			 * thread_exit().
2262*7c478bd9Sstevel@tonic-gate 			 */
2263*7c478bd9Sstevel@tonic-gate 			cv_signal(&pcp->pc_busy_cv);
2264*7c478bd9Sstevel@tonic-gate 		}
2265*7c478bd9Sstevel@tonic-gate 		mutex_exit(&pcp->pc_no_exit);
2266*7c478bd9Sstevel@tonic-gate 
2267*7c478bd9Sstevel@tonic-gate 		fpip2 = fpip;
2268*7c478bd9Sstevel@tonic-gate 		fpip = fpip->fp_next;
2269*7c478bd9Sstevel@tonic-gate 		kmem_free(fpip2, sizeof (fpollinfo_t));
2270*7c478bd9Sstevel@tonic-gate 	}
2271*7c478bd9Sstevel@tonic-gate }
2272*7c478bd9Sstevel@tonic-gate 
2273*7c478bd9Sstevel@tonic-gate /*
2274*7c478bd9Sstevel@tonic-gate  * one of the cache line's counter is wrapping around. Reset all cache line
2275*7c478bd9Sstevel@tonic-gate  * counters to zero except one. This is simplistic, but probably works
2276*7c478bd9Sstevel@tonic-gate  * effectively.
2277*7c478bd9Sstevel@tonic-gate  */
2278*7c478bd9Sstevel@tonic-gate void
2279*7c478bd9Sstevel@tonic-gate pcacheset_reset_count(pollstate_t *ps, int index)
2280*7c478bd9Sstevel@tonic-gate {
2281*7c478bd9Sstevel@tonic-gate 	int	i;
2282*7c478bd9Sstevel@tonic-gate 
2283*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2284*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < ps->ps_nsets; i++) {
2285*7c478bd9Sstevel@tonic-gate 		if (ps->ps_pcacheset[i].pcs_pollfd != NULL) {
2286*7c478bd9Sstevel@tonic-gate 			ps->ps_pcacheset[i].pcs_count = 0;
2287*7c478bd9Sstevel@tonic-gate 		}
2288*7c478bd9Sstevel@tonic-gate 	}
2289*7c478bd9Sstevel@tonic-gate 	ps->ps_pcacheset[index].pcs_count = 1;
2290*7c478bd9Sstevel@tonic-gate }
2291*7c478bd9Sstevel@tonic-gate 
2292*7c478bd9Sstevel@tonic-gate /*
2293*7c478bd9Sstevel@tonic-gate  * this routine implements poll cache list replacement policy.
2294*7c478bd9Sstevel@tonic-gate  * It is currently choose the "least used".
2295*7c478bd9Sstevel@tonic-gate  */
2296*7c478bd9Sstevel@tonic-gate int
2297*7c478bd9Sstevel@tonic-gate pcacheset_replace(pollstate_t *ps)
2298*7c478bd9Sstevel@tonic-gate {
2299*7c478bd9Sstevel@tonic-gate 	int i;
2300*7c478bd9Sstevel@tonic-gate 	int index = 0;
2301*7c478bd9Sstevel@tonic-gate 
2302*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2303*7c478bd9Sstevel@tonic-gate 	for (i = 1; i < ps->ps_nsets; i++) {
2304*7c478bd9Sstevel@tonic-gate 		if (ps->ps_pcacheset[index].pcs_count >
2305*7c478bd9Sstevel@tonic-gate 		    ps->ps_pcacheset[i].pcs_count) {
2306*7c478bd9Sstevel@tonic-gate 			index = i;
2307*7c478bd9Sstevel@tonic-gate 		}
2308*7c478bd9Sstevel@tonic-gate 	}
2309*7c478bd9Sstevel@tonic-gate 	ps->ps_pcacheset[index].pcs_count = 0;
2310*7c478bd9Sstevel@tonic-gate 	return (index);
2311*7c478bd9Sstevel@tonic-gate }
2312*7c478bd9Sstevel@tonic-gate 
2313*7c478bd9Sstevel@tonic-gate /*
2314*7c478bd9Sstevel@tonic-gate  * this routine is called by strclose to remove remaining polldat struct on
2315*7c478bd9Sstevel@tonic-gate  * the pollhead list of the device being closed. There are two reasons as why
2316*7c478bd9Sstevel@tonic-gate  * the polldat structures still remain on the pollhead list:
2317*7c478bd9Sstevel@tonic-gate  *
2318*7c478bd9Sstevel@tonic-gate  * (1) The layered device(e.g.the console driver).
2319*7c478bd9Sstevel@tonic-gate  * In this case, the existence of a polldat implies that the thread putting
2320*7c478bd9Sstevel@tonic-gate  * the polldat on this list has not exited yet. Before the thread exits, it
2321*7c478bd9Sstevel@tonic-gate  * will have to hold this pollhead lock to remove the polldat. So holding the
2322*7c478bd9Sstevel@tonic-gate  * pollhead lock here effectively prevents the thread which put the polldat
2323*7c478bd9Sstevel@tonic-gate  * on this list from exiting.
2324*7c478bd9Sstevel@tonic-gate  *
2325*7c478bd9Sstevel@tonic-gate  * (2) /dev/poll.
2326*7c478bd9Sstevel@tonic-gate  * When a polled fd is cached in /dev/poll, its polldat will remain on the
2327*7c478bd9Sstevel@tonic-gate  * pollhead list if the process has not done a POLLREMOVE before closing the
2328*7c478bd9Sstevel@tonic-gate  * polled fd. We just unlink it here.
2329*7c478bd9Sstevel@tonic-gate  */
2330*7c478bd9Sstevel@tonic-gate void
2331*7c478bd9Sstevel@tonic-gate pollhead_clean(pollhead_t *php)
2332*7c478bd9Sstevel@tonic-gate {
2333*7c478bd9Sstevel@tonic-gate 	polldat_t	*pdp;
2334*7c478bd9Sstevel@tonic-gate 
2335*7c478bd9Sstevel@tonic-gate 	/*
2336*7c478bd9Sstevel@tonic-gate 	 * In case(1), while we must prevent the thread in question from
2337*7c478bd9Sstevel@tonic-gate 	 * exiting, we must also obey the proper locking order, i.e.
2338*7c478bd9Sstevel@tonic-gate 	 * (ps_lock -> phlock).
2339*7c478bd9Sstevel@tonic-gate 	 */
2340*7c478bd9Sstevel@tonic-gate 	PH_ENTER(php);
2341*7c478bd9Sstevel@tonic-gate 	while (php->ph_list != NULL) {
2342*7c478bd9Sstevel@tonic-gate 		pollstate_t	*ps;
2343*7c478bd9Sstevel@tonic-gate 		pollcache_t	*pcp;
2344*7c478bd9Sstevel@tonic-gate 
2345*7c478bd9Sstevel@tonic-gate 		pdp = php->ph_list;
2346*7c478bd9Sstevel@tonic-gate 		ASSERT(pdp->pd_php == php);
2347*7c478bd9Sstevel@tonic-gate 		if (pdp->pd_thread == NULL) {
2348*7c478bd9Sstevel@tonic-gate 			/*
2349*7c478bd9Sstevel@tonic-gate 			 * This is case(2). Since the ph_lock is sufficient
2350*7c478bd9Sstevel@tonic-gate 			 * to synchronize this lwp with any other /dev/poll
2351*7c478bd9Sstevel@tonic-gate 			 * lwp, just unlink the polldat.
2352*7c478bd9Sstevel@tonic-gate 			 */
2353*7c478bd9Sstevel@tonic-gate 			php->ph_list = pdp->pd_next;
2354*7c478bd9Sstevel@tonic-gate 			pdp->pd_php = NULL;
2355*7c478bd9Sstevel@tonic-gate 			pdp->pd_next = NULL;
2356*7c478bd9Sstevel@tonic-gate 			continue;
2357*7c478bd9Sstevel@tonic-gate 		}
2358*7c478bd9Sstevel@tonic-gate 		ps = pdp->pd_thread->t_pollstate;
2359*7c478bd9Sstevel@tonic-gate 		ASSERT(ps != NULL);
2360*7c478bd9Sstevel@tonic-gate 		pcp = pdp->pd_pcache;
2361*7c478bd9Sstevel@tonic-gate 		ASSERT(pcp != NULL);
2362*7c478bd9Sstevel@tonic-gate 		mutex_enter(&pcp->pc_no_exit);
2363*7c478bd9Sstevel@tonic-gate 		pcp->pc_busy++;  /* prevents exit()'s */
2364*7c478bd9Sstevel@tonic-gate 		mutex_exit(&pcp->pc_no_exit);
2365*7c478bd9Sstevel@tonic-gate 		/*
2366*7c478bd9Sstevel@tonic-gate 		 * Now get the locks in proper order to avoid deadlock.
2367*7c478bd9Sstevel@tonic-gate 		 */
2368*7c478bd9Sstevel@tonic-gate 		PH_EXIT(php);
2369*7c478bd9Sstevel@tonic-gate 		mutex_enter(&ps->ps_lock);
2370*7c478bd9Sstevel@tonic-gate 		/*
2371*7c478bd9Sstevel@tonic-gate 		 * while we dropped the pollhead lock, the element could be
2372*7c478bd9Sstevel@tonic-gate 		 * taken off the list already.
2373*7c478bd9Sstevel@tonic-gate 		 */
2374*7c478bd9Sstevel@tonic-gate 		PH_ENTER(php);
2375*7c478bd9Sstevel@tonic-gate 		if (pdp->pd_php == php) {
2376*7c478bd9Sstevel@tonic-gate 			ASSERT(pdp == php->ph_list);
2377*7c478bd9Sstevel@tonic-gate 			php->ph_list = pdp->pd_next;
2378*7c478bd9Sstevel@tonic-gate 			pdp->pd_php = NULL;
2379*7c478bd9Sstevel@tonic-gate 			pdp->pd_next = NULL;
2380*7c478bd9Sstevel@tonic-gate 		}
2381*7c478bd9Sstevel@tonic-gate 		PH_EXIT(php);
2382*7c478bd9Sstevel@tonic-gate 		mutex_exit(&ps->ps_lock);
2383*7c478bd9Sstevel@tonic-gate 		mutex_enter(&pcp->pc_no_exit);
2384*7c478bd9Sstevel@tonic-gate 		pcp->pc_busy--;
2385*7c478bd9Sstevel@tonic-gate 		if (pcp->pc_busy == 0) {
2386*7c478bd9Sstevel@tonic-gate 			/*
2387*7c478bd9Sstevel@tonic-gate 			 * Wakeup the thread waiting in
2388*7c478bd9Sstevel@tonic-gate 			 * thread_exit().
2389*7c478bd9Sstevel@tonic-gate 			 */
2390*7c478bd9Sstevel@tonic-gate 			cv_signal(&pcp->pc_busy_cv);
2391*7c478bd9Sstevel@tonic-gate 		}
2392*7c478bd9Sstevel@tonic-gate 		mutex_exit(&pcp->pc_no_exit);
2393*7c478bd9Sstevel@tonic-gate 		PH_ENTER(php);
2394*7c478bd9Sstevel@tonic-gate 	}
2395*7c478bd9Sstevel@tonic-gate 	PH_EXIT(php);
2396*7c478bd9Sstevel@tonic-gate }
2397*7c478bd9Sstevel@tonic-gate 
2398*7c478bd9Sstevel@tonic-gate /*
2399*7c478bd9Sstevel@tonic-gate  * The remove_list is called to cleanup a partially cached 'current' list or
2400*7c478bd9Sstevel@tonic-gate  * to remove a partial list which is no longer cached. The flag value of 1
2401*7c478bd9Sstevel@tonic-gate  * indicates the second case.
2402*7c478bd9Sstevel@tonic-gate  */
2403*7c478bd9Sstevel@tonic-gate void
2404*7c478bd9Sstevel@tonic-gate pcacheset_remove_list(pollstate_t *ps, pollfd_t *pollfdp, int start, int end,
2405*7c478bd9Sstevel@tonic-gate     int cacheindex, int flag)
2406*7c478bd9Sstevel@tonic-gate {
2407*7c478bd9Sstevel@tonic-gate 	int i;
2408*7c478bd9Sstevel@tonic-gate 
2409*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2410*7c478bd9Sstevel@tonic-gate 	for (i = start; i < end; i++) {
2411*7c478bd9Sstevel@tonic-gate 		if ((pollfdp[i].fd >= 0) &&
2412*7c478bd9Sstevel@tonic-gate 		    (flag || !(pollfdp[i].revents & POLLNVAL))) {
2413*7c478bd9Sstevel@tonic-gate 			if (pcache_delete_fd(ps, pollfdp[i].fd, i, cacheindex,
2414*7c478bd9Sstevel@tonic-gate 			    (uint_t)pollfdp[i].events)) {
2415*7c478bd9Sstevel@tonic-gate 				int j;
2416*7c478bd9Sstevel@tonic-gate 				int fd = pollfdp[i].fd;
2417*7c478bd9Sstevel@tonic-gate 
2418*7c478bd9Sstevel@tonic-gate 				for (j = i + 1; j < end; j++) {
2419*7c478bd9Sstevel@tonic-gate 					if (pollfdp[j].fd == fd) {
2420*7c478bd9Sstevel@tonic-gate 						pcache_update_xref(
2421*7c478bd9Sstevel@tonic-gate 						    ps->ps_pcache, fd,
2422*7c478bd9Sstevel@tonic-gate 						    (ssize_t)j, cacheindex);
2423*7c478bd9Sstevel@tonic-gate 						break;
2424*7c478bd9Sstevel@tonic-gate 					}
2425*7c478bd9Sstevel@tonic-gate 				}
2426*7c478bd9Sstevel@tonic-gate 				ASSERT(j <= end);
2427*7c478bd9Sstevel@tonic-gate 			}
2428*7c478bd9Sstevel@tonic-gate 		}
2429*7c478bd9Sstevel@tonic-gate 	}
2430*7c478bd9Sstevel@tonic-gate }
2431*7c478bd9Sstevel@tonic-gate 
2432*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
2433*7c478bd9Sstevel@tonic-gate 
2434*7c478bd9Sstevel@tonic-gate #include<sys/strsubr.h>
2435*7c478bd9Sstevel@tonic-gate /*
2436*7c478bd9Sstevel@tonic-gate  * make sure curthread is not on anyone's pollhead list any more.
2437*7c478bd9Sstevel@tonic-gate  */
2438*7c478bd9Sstevel@tonic-gate static void
2439*7c478bd9Sstevel@tonic-gate pollcheckphlist()
2440*7c478bd9Sstevel@tonic-gate {
2441*7c478bd9Sstevel@tonic-gate 	int i;
2442*7c478bd9Sstevel@tonic-gate 	file_t *fp;
2443*7c478bd9Sstevel@tonic-gate 	uf_entry_t *ufp;
2444*7c478bd9Sstevel@tonic-gate 	uf_info_t *fip = P_FINFO(curproc);
2445*7c478bd9Sstevel@tonic-gate 	struct stdata *stp;
2446*7c478bd9Sstevel@tonic-gate 	polldat_t *pdp;
2447*7c478bd9Sstevel@tonic-gate 
2448*7c478bd9Sstevel@tonic-gate 	mutex_enter(&fip->fi_lock);
2449*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < fip->fi_nfiles; i++) {
2450*7c478bd9Sstevel@tonic-gate 		UF_ENTER(ufp, fip, i);
2451*7c478bd9Sstevel@tonic-gate 		if ((fp = ufp->uf_file) != NULL) {
2452*7c478bd9Sstevel@tonic-gate 			if ((stp = fp->f_vnode->v_stream) != NULL) {
2453*7c478bd9Sstevel@tonic-gate 				PH_ENTER(&stp->sd_pollist);
2454*7c478bd9Sstevel@tonic-gate 				pdp = stp->sd_pollist.ph_list;
2455*7c478bd9Sstevel@tonic-gate 				while (pdp) {
2456*7c478bd9Sstevel@tonic-gate 					ASSERT(pdp->pd_thread != curthread);
2457*7c478bd9Sstevel@tonic-gate 					pdp = pdp->pd_next;
2458*7c478bd9Sstevel@tonic-gate 				}
2459*7c478bd9Sstevel@tonic-gate 				PH_EXIT(&stp->sd_pollist);
2460*7c478bd9Sstevel@tonic-gate 			}
2461*7c478bd9Sstevel@tonic-gate 		}
2462*7c478bd9Sstevel@tonic-gate 		UF_EXIT(ufp);
2463*7c478bd9Sstevel@tonic-gate 	}
2464*7c478bd9Sstevel@tonic-gate 	mutex_exit(&fip->fi_lock);
2465*7c478bd9Sstevel@tonic-gate }
2466*7c478bd9Sstevel@tonic-gate 
2467*7c478bd9Sstevel@tonic-gate /*
2468*7c478bd9Sstevel@tonic-gate  * for resolved set poll list, the xref info in the pcache should be
2469*7c478bd9Sstevel@tonic-gate  * consistent with this poll list.
2470*7c478bd9Sstevel@tonic-gate  */
2471*7c478bd9Sstevel@tonic-gate static int
2472*7c478bd9Sstevel@tonic-gate pollcheckxref(pollstate_t *ps, int cacheindex)
2473*7c478bd9Sstevel@tonic-gate {
2474*7c478bd9Sstevel@tonic-gate 	pollfd_t *pollfdp = ps->ps_pcacheset[cacheindex].pcs_pollfd;
2475*7c478bd9Sstevel@tonic-gate 	pollcache_t *pcp = ps->ps_pcache;
2476*7c478bd9Sstevel@tonic-gate 	polldat_t *pdp;
2477*7c478bd9Sstevel@tonic-gate 	int	i;
2478*7c478bd9Sstevel@tonic-gate 	xref_t	*refp;
2479*7c478bd9Sstevel@tonic-gate 
2480*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < ps->ps_pcacheset[cacheindex].pcs_nfds; i++) {
2481*7c478bd9Sstevel@tonic-gate 		if (pollfdp[i].fd < 0) {
2482*7c478bd9Sstevel@tonic-gate 			continue;
2483*7c478bd9Sstevel@tonic-gate 		}
2484*7c478bd9Sstevel@tonic-gate 		pdp = pcache_lookup_fd(pcp, pollfdp[i].fd);
2485*7c478bd9Sstevel@tonic-gate 		ASSERT(pdp != NULL);
2486*7c478bd9Sstevel@tonic-gate 		ASSERT(pdp->pd_ref != NULL);
2487*7c478bd9Sstevel@tonic-gate 		refp = &pdp->pd_ref[cacheindex];
2488*7c478bd9Sstevel@tonic-gate 		if (refp->xf_position >= 0) {
2489*7c478bd9Sstevel@tonic-gate 			ASSERT(refp->xf_refcnt >= 1);
2490*7c478bd9Sstevel@tonic-gate 			ASSERT(pollfdp[refp->xf_position].fd == pdp->pd_fd);
2491*7c478bd9Sstevel@tonic-gate 			if (refp->xf_refcnt > 1) {
2492*7c478bd9Sstevel@tonic-gate 				int	j;
2493*7c478bd9Sstevel@tonic-gate 				int	count = 0;
2494*7c478bd9Sstevel@tonic-gate 
2495*7c478bd9Sstevel@tonic-gate 				for (j = refp->xf_position;
2496*7c478bd9Sstevel@tonic-gate 				    j < ps->ps_pcacheset[cacheindex].pcs_nfds;
2497*7c478bd9Sstevel@tonic-gate 				    j++) {
2498*7c478bd9Sstevel@tonic-gate 					if (pollfdp[j].fd == pdp->pd_fd) {
2499*7c478bd9Sstevel@tonic-gate 						count++;
2500*7c478bd9Sstevel@tonic-gate 					}
2501*7c478bd9Sstevel@tonic-gate 				}
2502*7c478bd9Sstevel@tonic-gate 				ASSERT(count == refp->xf_refcnt);
2503*7c478bd9Sstevel@tonic-gate 			}
2504*7c478bd9Sstevel@tonic-gate 		}
2505*7c478bd9Sstevel@tonic-gate 	}
2506*7c478bd9Sstevel@tonic-gate 	return (1);
2507*7c478bd9Sstevel@tonic-gate }
2508*7c478bd9Sstevel@tonic-gate 
2509*7c478bd9Sstevel@tonic-gate /*
2510*7c478bd9Sstevel@tonic-gate  * For every cached pollfd, its polldat struct should be consistent with
2511*7c478bd9Sstevel@tonic-gate  * what is in the pcacheset lists.
2512*7c478bd9Sstevel@tonic-gate  */
2513*7c478bd9Sstevel@tonic-gate static void
2514*7c478bd9Sstevel@tonic-gate checkpolldat(pollstate_t *ps)
2515*7c478bd9Sstevel@tonic-gate {
2516*7c478bd9Sstevel@tonic-gate 	pollcache_t	*pcp = ps->ps_pcache;
2517*7c478bd9Sstevel@tonic-gate 	polldat_t	**hashtbl;
2518*7c478bd9Sstevel@tonic-gate 	int		i;
2519*7c478bd9Sstevel@tonic-gate 
2520*7c478bd9Sstevel@tonic-gate 	hashtbl = pcp->pc_hash;
2521*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < pcp->pc_hashsize; i++) {
2522*7c478bd9Sstevel@tonic-gate 		polldat_t	*pdp;
2523*7c478bd9Sstevel@tonic-gate 
2524*7c478bd9Sstevel@tonic-gate 		for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) {
2525*7c478bd9Sstevel@tonic-gate 			ASSERT(pdp->pd_ref != NULL);
2526*7c478bd9Sstevel@tonic-gate 			if (pdp->pd_count > 0) {
2527*7c478bd9Sstevel@tonic-gate 				xref_t		*refp;
2528*7c478bd9Sstevel@tonic-gate 				int		j;
2529*7c478bd9Sstevel@tonic-gate 				pollcacheset_t	*pcsp;
2530*7c478bd9Sstevel@tonic-gate 				pollfd_t	*pollfd;
2531*7c478bd9Sstevel@tonic-gate 
2532*7c478bd9Sstevel@tonic-gate 				for (j = 0; j < ps->ps_nsets; j++) {
2533*7c478bd9Sstevel@tonic-gate 					refp = &pdp->pd_ref[j];
2534*7c478bd9Sstevel@tonic-gate 					if (refp->xf_refcnt > 0) {
2535*7c478bd9Sstevel@tonic-gate 						pcsp = &ps->ps_pcacheset[j];
2536*7c478bd9Sstevel@tonic-gate 				ASSERT(refp->xf_position < pcsp->pcs_nfds);
2537*7c478bd9Sstevel@tonic-gate 						pollfd = pcsp->pcs_pollfd;
2538*7c478bd9Sstevel@tonic-gate 			ASSERT(pdp->pd_fd == pollfd[refp->xf_position].fd);
2539*7c478bd9Sstevel@tonic-gate 					}
2540*7c478bd9Sstevel@tonic-gate 				}
2541*7c478bd9Sstevel@tonic-gate 			}
2542*7c478bd9Sstevel@tonic-gate 		}
2543*7c478bd9Sstevel@tonic-gate 	}
2544*7c478bd9Sstevel@tonic-gate }
2545*7c478bd9Sstevel@tonic-gate 
2546*7c478bd9Sstevel@tonic-gate /*
2547*7c478bd9Sstevel@tonic-gate  * every wfd element on ph_list must have a corresponding fpollinfo on the
2548*7c478bd9Sstevel@tonic-gate  * uf_fpollinfo list. This is a variation of infpollinfo() w/o holding locks.
2549*7c478bd9Sstevel@tonic-gate  */
2550*7c478bd9Sstevel@tonic-gate void
2551*7c478bd9Sstevel@tonic-gate checkwfdlist(vnode_t *vp, fpollinfo_t *fpip)
2552*7c478bd9Sstevel@tonic-gate {
2553*7c478bd9Sstevel@tonic-gate 	stdata_t *stp;
2554*7c478bd9Sstevel@tonic-gate 	polldat_t *pdp;
2555*7c478bd9Sstevel@tonic-gate 	fpollinfo_t *fpip2;
2556*7c478bd9Sstevel@tonic-gate 
2557*7c478bd9Sstevel@tonic-gate 	if ((stp = vp->v_stream) == NULL) {
2558*7c478bd9Sstevel@tonic-gate 		return;
2559*7c478bd9Sstevel@tonic-gate 	}
2560*7c478bd9Sstevel@tonic-gate 	PH_ENTER(&stp->sd_pollist);
2561*7c478bd9Sstevel@tonic-gate 	for (pdp = stp->sd_pollist.ph_list; pdp; pdp = pdp->pd_next) {
2562*7c478bd9Sstevel@tonic-gate 		if (pdp->pd_thread->t_procp == curthread->t_procp) {
2563*7c478bd9Sstevel@tonic-gate 			for (fpip2 = fpip; fpip2; fpip2 = fpip2->fp_next) {
2564*7c478bd9Sstevel@tonic-gate 				if (pdp->pd_thread == fpip2->fp_thread) {
2565*7c478bd9Sstevel@tonic-gate 					break;
2566*7c478bd9Sstevel@tonic-gate 				}
2567*7c478bd9Sstevel@tonic-gate 			}
2568*7c478bd9Sstevel@tonic-gate 			ASSERT(fpip2 != NULL);
2569*7c478bd9Sstevel@tonic-gate 		}
2570*7c478bd9Sstevel@tonic-gate 	}
2571*7c478bd9Sstevel@tonic-gate 	PH_EXIT(&stp->sd_pollist);
2572*7c478bd9Sstevel@tonic-gate }
2573*7c478bd9Sstevel@tonic-gate 
2574*7c478bd9Sstevel@tonic-gate /*
2575*7c478bd9Sstevel@tonic-gate  * For each cached fd whose bit is not set in bitmap, its revents field in
2576*7c478bd9Sstevel@tonic-gate  * current poll list should be 0.
2577*7c478bd9Sstevel@tonic-gate  */
2578*7c478bd9Sstevel@tonic-gate static int
2579*7c478bd9Sstevel@tonic-gate pollcheckrevents(pollstate_t *ps, int begin, int end, int cacheindex)
2580*7c478bd9Sstevel@tonic-gate {
2581*7c478bd9Sstevel@tonic-gate 	pollcache_t	*pcp = ps->ps_pcache;
2582*7c478bd9Sstevel@tonic-gate 	pollfd_t	*pollfdp = ps->ps_pollfd;
2583*7c478bd9Sstevel@tonic-gate 	int		i;
2584*7c478bd9Sstevel@tonic-gate 
2585*7c478bd9Sstevel@tonic-gate 	for (i = begin; i < end; i++) {
2586*7c478bd9Sstevel@tonic-gate 		polldat_t	*pdp;
2587*7c478bd9Sstevel@tonic-gate 
2588*7c478bd9Sstevel@tonic-gate 		ASSERT(!BT_TEST(pcp->pc_bitmap, i));
2589*7c478bd9Sstevel@tonic-gate 		pdp = pcache_lookup_fd(pcp, i);
2590*7c478bd9Sstevel@tonic-gate 		if (pdp && pdp->pd_fp != NULL) {
2591*7c478bd9Sstevel@tonic-gate 			xref_t *refp;
2592*7c478bd9Sstevel@tonic-gate 			int entry;
2593*7c478bd9Sstevel@tonic-gate 
2594*7c478bd9Sstevel@tonic-gate 			ASSERT(pdp->pd_ref != NULL);
2595*7c478bd9Sstevel@tonic-gate 			refp = &pdp->pd_ref[cacheindex];
2596*7c478bd9Sstevel@tonic-gate 			if (refp->xf_refcnt == 0) {
2597*7c478bd9Sstevel@tonic-gate 				continue;
2598*7c478bd9Sstevel@tonic-gate 			}
2599*7c478bd9Sstevel@tonic-gate 			entry = refp->xf_position;
2600*7c478bd9Sstevel@tonic-gate 			ASSERT(entry >= 0);
2601*7c478bd9Sstevel@tonic-gate 			ASSERT(pollfdp[entry].revents == 0);
2602*7c478bd9Sstevel@tonic-gate 			if (refp->xf_refcnt > 1) {
2603*7c478bd9Sstevel@tonic-gate 				int j;
2604*7c478bd9Sstevel@tonic-gate 
2605*7c478bd9Sstevel@tonic-gate 				for (j = entry + 1; j < ps->ps_nfds; j++) {
2606*7c478bd9Sstevel@tonic-gate 					if (pollfdp[j].fd == i) {
2607*7c478bd9Sstevel@tonic-gate 						ASSERT(pollfdp[j].revents == 0);
2608*7c478bd9Sstevel@tonic-gate 					}
2609*7c478bd9Sstevel@tonic-gate 				}
2610*7c478bd9Sstevel@tonic-gate 			}
2611*7c478bd9Sstevel@tonic-gate 		}
2612*7c478bd9Sstevel@tonic-gate 	}
2613*7c478bd9Sstevel@tonic-gate 	return (1);
2614*7c478bd9Sstevel@tonic-gate }
2615*7c478bd9Sstevel@tonic-gate 
2616*7c478bd9Sstevel@tonic-gate #endif	/* DEBUG */
2617*7c478bd9Sstevel@tonic-gate 
2618*7c478bd9Sstevel@tonic-gate pollcache_t *
2619*7c478bd9Sstevel@tonic-gate pcache_alloc()
2620*7c478bd9Sstevel@tonic-gate {
2621*7c478bd9Sstevel@tonic-gate 	return (kmem_zalloc(sizeof (pollcache_t), KM_SLEEP));
2622*7c478bd9Sstevel@tonic-gate }
2623*7c478bd9Sstevel@tonic-gate 
2624*7c478bd9Sstevel@tonic-gate void
2625*7c478bd9Sstevel@tonic-gate pcache_create(pollcache_t *pcp, nfds_t nfds)
2626*7c478bd9Sstevel@tonic-gate {
2627*7c478bd9Sstevel@tonic-gate 	size_t	mapsize;
2628*7c478bd9Sstevel@tonic-gate 
2629*7c478bd9Sstevel@tonic-gate 	/*
2630*7c478bd9Sstevel@tonic-gate 	 * allocate enough bits for the poll fd list
2631*7c478bd9Sstevel@tonic-gate 	 */
2632*7c478bd9Sstevel@tonic-gate 	if ((mapsize = POLLMAPCHUNK) <= nfds) {
2633*7c478bd9Sstevel@tonic-gate 		mapsize = (nfds + POLLMAPCHUNK - 1) & ~(POLLMAPCHUNK - 1);
2634*7c478bd9Sstevel@tonic-gate 	}
2635*7c478bd9Sstevel@tonic-gate 	pcp->pc_bitmap = kmem_zalloc((mapsize / BT_NBIPUL) * sizeof (ulong_t),
2636*7c478bd9Sstevel@tonic-gate 	    KM_SLEEP);
2637*7c478bd9Sstevel@tonic-gate 	pcp->pc_mapsize = mapsize;
2638*7c478bd9Sstevel@tonic-gate 	/*
2639*7c478bd9Sstevel@tonic-gate 	 * The hash size is at least POLLHASHCHUNKSZ. If user polls a large
2640*7c478bd9Sstevel@tonic-gate 	 * number of fd to start with, allocate a bigger hash table (to the
2641*7c478bd9Sstevel@tonic-gate 	 * nearest multiple of POLLHASHCHUNKSZ) because dynamically growing a
2642*7c478bd9Sstevel@tonic-gate 	 * hash table is expensive.
2643*7c478bd9Sstevel@tonic-gate 	 */
2644*7c478bd9Sstevel@tonic-gate 	if (nfds < POLLHASHCHUNKSZ) {
2645*7c478bd9Sstevel@tonic-gate 		pcp->pc_hashsize = POLLHASHCHUNKSZ;
2646*7c478bd9Sstevel@tonic-gate 	} else {
2647*7c478bd9Sstevel@tonic-gate 		pcp->pc_hashsize = (nfds + POLLHASHCHUNKSZ - 1) &
2648*7c478bd9Sstevel@tonic-gate 		    ~(POLLHASHCHUNKSZ - 1);
2649*7c478bd9Sstevel@tonic-gate 	}
2650*7c478bd9Sstevel@tonic-gate 	pcp->pc_hash = kmem_zalloc(pcp->pc_hashsize * sizeof (polldat_t *),
2651*7c478bd9Sstevel@tonic-gate 	    KM_SLEEP);
2652*7c478bd9Sstevel@tonic-gate }
2653*7c478bd9Sstevel@tonic-gate 
2654*7c478bd9Sstevel@tonic-gate void
2655*7c478bd9Sstevel@tonic-gate pcache_destroy(pollcache_t *pcp)
2656*7c478bd9Sstevel@tonic-gate {
2657*7c478bd9Sstevel@tonic-gate 	polldat_t	**hashtbl;
2658*7c478bd9Sstevel@tonic-gate 	int i;
2659*7c478bd9Sstevel@tonic-gate 
2660*7c478bd9Sstevel@tonic-gate 	hashtbl = pcp->pc_hash;
2661*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < pcp->pc_hashsize; i++) {
2662*7c478bd9Sstevel@tonic-gate 		if (hashtbl[i] != NULL) {
2663*7c478bd9Sstevel@tonic-gate 			polldat_t *pdp, *pdp2;
2664*7c478bd9Sstevel@tonic-gate 
2665*7c478bd9Sstevel@tonic-gate 			pdp = hashtbl[i];
2666*7c478bd9Sstevel@tonic-gate 			while (pdp != NULL) {
2667*7c478bd9Sstevel@tonic-gate 				pdp2 = pdp->pd_hashnext;
2668*7c478bd9Sstevel@tonic-gate 				if (pdp->pd_ref != NULL) {
2669*7c478bd9Sstevel@tonic-gate 					kmem_free(pdp->pd_ref, sizeof (xref_t) *
2670*7c478bd9Sstevel@tonic-gate 					    pdp->pd_nsets);
2671*7c478bd9Sstevel@tonic-gate 				}
2672*7c478bd9Sstevel@tonic-gate 				kmem_free(pdp, sizeof (polldat_t));
2673*7c478bd9Sstevel@tonic-gate 				pdp = pdp2;
2674*7c478bd9Sstevel@tonic-gate 				pcp->pc_fdcount--;
2675*7c478bd9Sstevel@tonic-gate 			}
2676*7c478bd9Sstevel@tonic-gate 		}
2677*7c478bd9Sstevel@tonic-gate 	}
2678*7c478bd9Sstevel@tonic-gate 	ASSERT(pcp->pc_fdcount == 0);
2679*7c478bd9Sstevel@tonic-gate 	kmem_free(pcp->pc_hash, sizeof (polldat_t *) * pcp->pc_hashsize);
2680*7c478bd9Sstevel@tonic-gate 	kmem_free(pcp->pc_bitmap,
2681*7c478bd9Sstevel@tonic-gate 	    sizeof (ulong_t) * (pcp->pc_mapsize/BT_NBIPUL));
2682*7c478bd9Sstevel@tonic-gate 	mutex_destroy(&pcp->pc_no_exit);
2683*7c478bd9Sstevel@tonic-gate 	mutex_destroy(&pcp->pc_lock);
2684*7c478bd9Sstevel@tonic-gate 	cv_destroy(&pcp->pc_cv);
2685*7c478bd9Sstevel@tonic-gate 	cv_destroy(&pcp->pc_busy_cv);
2686*7c478bd9Sstevel@tonic-gate 	kmem_free(pcp, sizeof (pollcache_t));
2687*7c478bd9Sstevel@tonic-gate }
2688*7c478bd9Sstevel@tonic-gate 
2689*7c478bd9Sstevel@tonic-gate pollcacheset_t *
2690*7c478bd9Sstevel@tonic-gate pcacheset_create(int nsets)
2691*7c478bd9Sstevel@tonic-gate {
2692*7c478bd9Sstevel@tonic-gate 	return (kmem_zalloc(sizeof (pollcacheset_t) * nsets, KM_SLEEP));
2693*7c478bd9Sstevel@tonic-gate }
2694*7c478bd9Sstevel@tonic-gate 
2695*7c478bd9Sstevel@tonic-gate void
2696*7c478bd9Sstevel@tonic-gate pcacheset_destroy(pollcacheset_t *pcsp, int nsets)
2697*7c478bd9Sstevel@tonic-gate {
2698*7c478bd9Sstevel@tonic-gate 	int i;
2699*7c478bd9Sstevel@tonic-gate 
2700*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < nsets; i++) {
2701*7c478bd9Sstevel@tonic-gate 		if (pcsp[i].pcs_pollfd != NULL) {
2702*7c478bd9Sstevel@tonic-gate 			kmem_free(pcsp[i].pcs_pollfd, pcsp[i].pcs_nfds *
2703*7c478bd9Sstevel@tonic-gate 			    sizeof (pollfd_t));
2704*7c478bd9Sstevel@tonic-gate 		}
2705*7c478bd9Sstevel@tonic-gate 	}
2706*7c478bd9Sstevel@tonic-gate 	kmem_free(pcsp, sizeof (pollcacheset_t) * nsets);
2707*7c478bd9Sstevel@tonic-gate }
2708*7c478bd9Sstevel@tonic-gate 
2709*7c478bd9Sstevel@tonic-gate /*
2710*7c478bd9Sstevel@tonic-gate  * Check each duplicated poll fd in the poll list. It may be necessary to
2711*7c478bd9Sstevel@tonic-gate  * VOP_POLL the same fd again using different poll events. getf() has been
2712*7c478bd9Sstevel@tonic-gate  * done by caller. This routine returns 0 if it can sucessfully process the
2713*7c478bd9Sstevel@tonic-gate  * entire poll fd list. It returns -1 if underlying vnode has changed during
2714*7c478bd9Sstevel@tonic-gate  * a VOP_POLL, in which case the caller has to repoll. It returns a positive
2715*7c478bd9Sstevel@tonic-gate  * value if VOP_POLL failed.
2716*7c478bd9Sstevel@tonic-gate  */
2717*7c478bd9Sstevel@tonic-gate static int
2718*7c478bd9Sstevel@tonic-gate plist_chkdupfd(file_t *fp, polldat_t *pdp, pollstate_t *psp, pollfd_t *pollfdp,
2719*7c478bd9Sstevel@tonic-gate     int entry, int *fdcntp)
2720*7c478bd9Sstevel@tonic-gate {
2721*7c478bd9Sstevel@tonic-gate 	int	i;
2722*7c478bd9Sstevel@tonic-gate 	int	fd;
2723*7c478bd9Sstevel@tonic-gate 	nfds_t	nfds = psp->ps_nfds;
2724*7c478bd9Sstevel@tonic-gate 
2725*7c478bd9Sstevel@tonic-gate 	fd = pollfdp[entry].fd;
2726*7c478bd9Sstevel@tonic-gate 	for (i = entry + 1; i < nfds; i++) {
2727*7c478bd9Sstevel@tonic-gate 		if (pollfdp[i].fd == fd) {
2728*7c478bd9Sstevel@tonic-gate 			if (pollfdp[i].events == pollfdp[entry].events) {
2729*7c478bd9Sstevel@tonic-gate 				if ((pollfdp[i].revents =
2730*7c478bd9Sstevel@tonic-gate 				    pollfdp[entry].revents) != 0) {
2731*7c478bd9Sstevel@tonic-gate 					(*fdcntp)++;
2732*7c478bd9Sstevel@tonic-gate 				}
2733*7c478bd9Sstevel@tonic-gate 			} else {
2734*7c478bd9Sstevel@tonic-gate 
2735*7c478bd9Sstevel@tonic-gate 				int	error;
2736*7c478bd9Sstevel@tonic-gate 				pollhead_t *php;
2737*7c478bd9Sstevel@tonic-gate 				pollcache_t *pcp = psp->ps_pcache;
2738*7c478bd9Sstevel@tonic-gate 
2739*7c478bd9Sstevel@tonic-gate 				/*
2740*7c478bd9Sstevel@tonic-gate 				 * the events are different. VOP_POLL on this
2741*7c478bd9Sstevel@tonic-gate 				 * fd so that we don't miss any revents.
2742*7c478bd9Sstevel@tonic-gate 				 */
2743*7c478bd9Sstevel@tonic-gate 				php = NULL;
2744*7c478bd9Sstevel@tonic-gate 				ASSERT(curthread->t_pollcache == NULL);
2745*7c478bd9Sstevel@tonic-gate 				error = VOP_POLL(fp->f_vnode,
2746*7c478bd9Sstevel@tonic-gate 				    pollfdp[i].events, 0,
2747*7c478bd9Sstevel@tonic-gate 				    &pollfdp[i].revents, &php);
2748*7c478bd9Sstevel@tonic-gate 				if (error) {
2749*7c478bd9Sstevel@tonic-gate 					return (error);
2750*7c478bd9Sstevel@tonic-gate 				}
2751*7c478bd9Sstevel@tonic-gate 				/*
2752*7c478bd9Sstevel@tonic-gate 				 * layered devices(e.g. console driver)
2753*7c478bd9Sstevel@tonic-gate 				 * may change the vnode and thus the pollhead
2754*7c478bd9Sstevel@tonic-gate 				 * pointer out from underneath us.
2755*7c478bd9Sstevel@tonic-gate 				 */
2756*7c478bd9Sstevel@tonic-gate 				if (php != NULL && pdp->pd_php != NULL &&
2757*7c478bd9Sstevel@tonic-gate 				    php != pdp->pd_php) {
2758*7c478bd9Sstevel@tonic-gate 					pollhead_delete(pdp->pd_php, pdp);
2759*7c478bd9Sstevel@tonic-gate 					pdp->pd_php = php;
2760*7c478bd9Sstevel@tonic-gate 					pollhead_insert(php, pdp);
2761*7c478bd9Sstevel@tonic-gate 					/*
2762*7c478bd9Sstevel@tonic-gate 					 * We could have missed a wakeup on the
2763*7c478bd9Sstevel@tonic-gate 					 * new target device. Make sure the new
2764*7c478bd9Sstevel@tonic-gate 					 * target gets polled once.
2765*7c478bd9Sstevel@tonic-gate 					 */
2766*7c478bd9Sstevel@tonic-gate 					BT_SET(pcp->pc_bitmap, fd);
2767*7c478bd9Sstevel@tonic-gate 					return (-1);
2768*7c478bd9Sstevel@tonic-gate 				}
2769*7c478bd9Sstevel@tonic-gate 				if (pollfdp[i].revents) {
2770*7c478bd9Sstevel@tonic-gate 					(*fdcntp)++;
2771*7c478bd9Sstevel@tonic-gate 				}
2772*7c478bd9Sstevel@tonic-gate 			}
2773*7c478bd9Sstevel@tonic-gate 		}
2774*7c478bd9Sstevel@tonic-gate 	}
2775*7c478bd9Sstevel@tonic-gate 	return (0);
2776*7c478bd9Sstevel@tonic-gate }
2777