xref: /titanic_50/usr/src/uts/common/syscall/poll.c (revision 554ff184129088135ad2643c1c9832174a17be88)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 /*
31  * Portions of this source code were derived from Berkeley 4.3 BSD
32  * under license from the Regents of the University of California.
33  */
34 
35 #pragma ident	"%Z%%M%	%I%	%E% SMI"
36 
37 #include <sys/param.h>
38 #include <sys/isa_defs.h>
39 #include <sys/types.h>
40 #include <sys/sysmacros.h>
41 #include <sys/user.h>
42 #include <sys/systm.h>
43 #include <sys/errno.h>
44 #include <sys/time.h>
45 #include <sys/vnode.h>
46 #include <sys/file.h>
47 #include <sys/mode.h>
48 #include <sys/proc.h>
49 #include <sys/uio.h>
50 #include <sys/poll_impl.h>
51 #include <sys/kmem.h>
52 #include <sys/cmn_err.h>
53 #include <sys/debug.h>
54 #include <sys/bitmap.h>
55 #include <sys/kstat.h>
56 #include <sys/rctl.h>
57 #include <sys/port_kernel.h>
58 #include <sys/schedctl.h>
59 
60 #define	NPHLOCKS	64	/* Number of locks; must be power of 2 */
61 #define	PHLOCKADDR(php)	&plocks[(((uintptr_t)(php)) >> 8) & (NPHLOCKS - 1)]
62 #define	PHLOCK(php)	PHLOCKADDR(php).pp_lock
63 #define	PH_ENTER(php)	mutex_enter(PHLOCK(php))
64 #define	PH_EXIT(php)	mutex_exit(PHLOCK(php))
65 #define	VALID_POLL_EVENTS	(POLLIN | POLLPRI | POLLOUT | POLLRDNORM \
66 	| POLLRDBAND | POLLWRBAND | POLLHUP | POLLERR | POLLNVAL)
67 
68 /*
69  * global counters to collect some stats
70  */
71 static struct {
72 	kstat_named_t	polllistmiss;	/* failed to find a cached poll list */
73 	kstat_named_t	pollcachehit;	/* list matched 100% w/ cached one */
74 	kstat_named_t	pollcachephit;	/* list matched < 100% w/ cached one */
75 	kstat_named_t	pollcachemiss;	/* every list entry is dif from cache */
76 } pollstats = {
77 	{ "polllistmiss",	KSTAT_DATA_UINT64 },
78 	{ "pollcachehit",	KSTAT_DATA_UINT64 },
79 	{ "pollcachephit",	KSTAT_DATA_UINT64 },
80 	{ "pollcachemiss",	KSTAT_DATA_UINT64 }
81 };
82 
83 kstat_named_t *pollstats_ptr = (kstat_named_t *)&pollstats;
84 uint_t pollstats_ndata = sizeof (pollstats) / sizeof (kstat_named_t);
85 
86 struct pplock	{
87 	kmutex_t	pp_lock;
88 	short		pp_flag;
89 	kcondvar_t	pp_wait_cv;
90 	int32_t		pp_pad;		/* to a nice round 16 bytes */
91 };
92 
93 static struct pplock plocks[NPHLOCKS];	/* Hash array of pollhead locks */
94 
95 #ifdef DEBUG
96 static int pollchecksanity(pollstate_t *, nfds_t);
97 static int pollcheckxref(pollstate_t *, int);
98 static void pollcheckphlist(void);
99 static int pollcheckrevents(pollstate_t *, int, int, int);
100 static void checkpolldat(pollstate_t *);
101 #endif	/* DEBUG */
102 static int plist_chkdupfd(file_t *, polldat_t *, pollstate_t *, pollfd_t *, int,
103     int *);
104 
105 /*
106  * Data structure overview:
107  * The per-thread poll state consists of
108  *	one pollstate_t
109  *	one pollcache_t
110  *	one bitmap with one event bit per fd
111  *	a (two-dimensional) hashed array of polldat_t structures - one entry
112  *	per fd
113  *
114  * This conglomerate of data structures interact with
115  *	the pollhead which is used by VOP_POLL and pollwakeup
116  *	(protected by the PHLOCK, cached array of plocks), and
117  *	the fpollinfo list hanging off the fi_list which is used to notify
118  *	poll when a cached fd is closed. This is protected by uf_lock.
119  *
120  * Invariants:
121  *	pd_php (pollhead pointer) is set iff (if and only if) the polldat
122  *	is on that pollhead. This is modified atomically under pc_lock.
123  *
124  *	pd_fp (file_t pointer) is set iff the thread is on the fpollinfo
125  *	list for that open file.
126  *	This is modified atomically under pc_lock.
127  *
128  *	pd_count is the sum (over all values of i) of pd_ref[i].xf_refcnt.
129  *	Iff pd_ref[i].xf_refcnt >= 1 then
130  *		ps_pcacheset[i].pcs_pollfd[pd_ref[i].xf_position].fd == pd_fd
131  *	Iff pd_ref[i].xf_refcnt > 1 then
132  *		In ps_pcacheset[i].pcs_pollfd between index
133  *		pd_ref[i].xf_position] and the end of the list
134  *		there are xf_refcnt entries with .fd == pd_fd
135  *
136  * Locking design:
137  * Whenever possible the design relies on the fact that the poll cache state
138  * is per thread thus for both poll and exit it is self-synchronizing.
139  * Thus the key interactions where other threads access the state are:
140  *	pollwakeup (and polltime), and
141  *	close cleaning up the cached references to an open file
142  *
143  * The two key locks in poll proper is ps_lock and pc_lock.
144  *
145  * The ps_lock is used for synchronization between poll, (lwp_)exit and close
146  * to ensure that modifications to pollcacheset structure are serialized.
147  * This lock is held through most of poll() except where poll sleeps
148  * since there is little need to handle closes concurrently with the execution
149  * of poll.
150  * The pc_lock protects most of the fields in pollcache structure and polldat
151  * structures (which are accessed by poll, pollwakeup, and polltime)
152  * with the exception of fields that are only modified when only one thread
153  * can access this per-thread state.
154  * Those exceptions occur in poll when first allocating the per-thread state,
155  * when poll grows the number of polldat (never shrinks), and when
156  * exit/pollcleanup has ensured that there are no references from either
157  * pollheads or fpollinfo to the threads poll state.
158  *
159  * Poll(2) system call is the only path which ps_lock and pc_lock are both
160  * held, in that order. It needs ps_lock to synchronize with close and
161  * lwp_exit; and pc_lock with pollwakeup.
162  *
163  * The locking interaction between pc_lock and PHLOCK take into account
164  * that poll acquires these locks in the order of pc_lock and then PHLOCK
165  * while pollwakeup does it in the reverse order. Thus pollwakeup implements
166  * deadlock avoidance by dropping the locks and reacquiring them in the
167  * reverse order. For this to work pollwakeup needs to prevent the thread
168  * from exiting and freeing all of the poll related state. Thus is done
169  * using
170  *	the pc_no_exit lock
171  *	the pc_busy counter
172  *	the pc_busy_cv condition variable
173  *
174  * The locking interaction between pc_lock and uf_lock has similar
175  * issues. Poll holds ps_lock and/or pc_lock across calls to getf/releasef
176  * which acquire uf_lock. The poll cleanup in close needs to hold uf_lock
177  * to prevent poll or exit from doing a delfpollinfo after which the thread
178  * might exit. But the cleanup needs to acquire pc_lock when modifying
179  * the poll cache state. The solution is to use pc_busy and do the close
180  * cleanup in two phases:
181  *	First close calls pollblockexit which increments pc_busy.
182  *	This prevents the per-thread poll related state from being freed.
183  *	Then close drops uf_lock and calls pollcacheclean.
184  *	This routine can then acquire pc_lock and remove any references
185  *	to the closing fd (as well as recording that it has been closed
186  *	so that a POLLNVAL can be generated even if the fd is reused before
187  *	poll has been woken up and checked getf() again).
188  *
189  * When removing a polled fd from poll cache, the fd is always removed
190  * from pollhead list first and then from fpollinfo list, i.e.,
191  * pollhead_delete() is called before delfpollinfo().
192  *
193  *
194  * Locking hierarchy:
195  *	pc_no_exit is a leaf level lock.
196  *	ps_lock is held when acquiring pc_lock (except when pollwakeup
197  *	acquires pc_lock).
198  *	pc_lock might be held when acquiring PHLOCK (pollhead_insert/
199  *	pollhead_delete)
200  *	pc_lock is always held (but this is not required)
201  *	when acquiring PHLOCK (in polladd/pollhead_delete and pollwakeup called
202  *	from pcache_clean_entry).
203  *	pc_lock is held across addfpollinfo/delfpollinfo which acquire
204  *	uf_lock.
205  *	pc_lock is held across getf/releasef which acquire uf_lock.
206  *	ps_lock might be held across getf/releasef which acquire uf_lock.
207  *	pollwakeup tries to acquire pc_lock while holding PHLOCK
208  *	but drops the locks and reacquire them in reverse order to avoid
209  *	deadlock.
210  *
211  * Note also that there is deadlock avoidance support for VOP_POLL routines
212  * and pollwakeup involving a file system or driver lock.
213  * See below.
214  */
215 
216 /*
217  * Deadlock avoidance support for VOP_POLL() routines.  This is
218  * sometimes necessary to prevent deadlock between polling threads
219  * (which hold poll locks on entry to xx_poll(), then acquire foo)
220  * and pollwakeup() threads (which hold foo, then acquire poll locks).
221  *
222  * pollunlock(void) releases whatever poll locks the current thread holds,
223  *	returning a cookie for use by pollrelock();
224  *
225  * pollrelock(cookie) reacquires previously dropped poll locks;
226  *
227  * polllock(php, mutex) does the common case: pollunlock(),
228  *	acquire the problematic mutex, pollrelock().
229  */
230 int
231 pollunlock(void)
232 {
233 	pollcache_t *pcp;
234 	int lockstate = 0;
235 
236 	/*
237 	 * t_pollcache is set by /dev/poll and event ports (port_fd.c).
238 	 * If the pollrelock/pollunlock is called as a result of poll(2),
239 	 * the t_pollcache should be NULL.
240 	 */
241 	if (curthread->t_pollcache == NULL)
242 		pcp = curthread->t_pollstate->ps_pcache;
243 	else
244 		pcp = curthread->t_pollcache;
245 
246 	if (mutex_owned(&pcp->pc_lock)) {
247 		lockstate = 1;
248 		mutex_exit(&pcp->pc_lock);
249 	}
250 	return (lockstate);
251 }
252 
253 void
254 pollrelock(int lockstate)
255 {
256 	pollcache_t *pcp;
257 
258 	/*
259 	 * t_pollcache is set by /dev/poll and event ports (port_fd.c).
260 	 * If the pollrelock/pollunlock is called as a result of poll(2),
261 	 * the t_pollcache should be NULL.
262 	 */
263 	if (curthread->t_pollcache == NULL)
264 		pcp = curthread->t_pollstate->ps_pcache;
265 	else
266 		pcp = curthread->t_pollcache;
267 
268 	if (lockstate > 0)
269 		mutex_enter(&pcp->pc_lock);
270 }
271 
272 /* ARGSUSED */
273 void
274 polllock(pollhead_t *php, kmutex_t *lp)
275 {
276 	if (!mutex_tryenter(lp)) {
277 		int lockstate = pollunlock();
278 		mutex_enter(lp);
279 		pollrelock(lockstate);
280 	}
281 }
282 
283 static int
284 poll_common(pollfd_t *fds, nfds_t nfds, timespec_t *tsp, k_sigset_t *ksetp)
285 {
286 	kthread_t *t = curthread;
287 	klwp_t *lwp = ttolwp(t);
288 	proc_t *p = ttoproc(t);
289 	int fdcnt = 0;
290 	int rval;
291 	int i;
292 	timespec_t *rqtp = NULL;
293 	int timecheck = 0;
294 	int imm_timeout = 0;
295 	pollfd_t *pollfdp;
296 	pollstate_t *ps;
297 	pollcache_t *pcp;
298 	int error = 0;
299 	nfds_t old_nfds;
300 	int cacheindex = 0;	/* which cache set is used */
301 
302 	/*
303 	 * Determine the precise future time of the requested timeout, if any.
304 	 */
305 	if (tsp != NULL) {
306 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
307 			imm_timeout = 1;
308 		else {
309 			timespec_t now;
310 			timecheck = timechanged;
311 			gethrestime(&now);
312 			rqtp = tsp;
313 			timespecadd(rqtp, &now);
314 		}
315 	}
316 
317 	/*
318 	 * Reset our signal mask, if requested.
319 	 */
320 	if (ksetp != NULL) {
321 		mutex_enter(&p->p_lock);
322 		schedctl_finish_sigblock(t);
323 		lwp->lwp_sigoldmask = t->t_hold;
324 		t->t_hold = *ksetp;
325 		t->t_flag |= T_TOMASK;
326 		/*
327 		 * Call cv_timedwait_sig() just to check for signals.
328 		 * We will return immediately with either 0 or -1.
329 		 */
330 		if (!cv_timedwait_sig(&t->t_delay_cv, &p->p_lock, lbolt)) {
331 			mutex_exit(&p->p_lock);
332 			error = EINTR;
333 			goto pollout;
334 		}
335 		mutex_exit(&p->p_lock);
336 	}
337 
338 	/*
339 	 * Check to see if this guy just wants to use poll() as a timeout.
340 	 * If yes then bypass all the other stuff and make him sleep.
341 	 */
342 	if (nfds == 0) {
343 		/*
344 		 * Sleep until we have passed the requested future
345 		 * time or until interrupted by a signal.
346 		 * Do not check for signals if we have a zero timeout.
347 		 */
348 		if (!imm_timeout) {
349 			mutex_enter(&t->t_delay_lock);
350 			while ((rval = cv_waituntil_sig(&t->t_delay_cv,
351 			    &t->t_delay_lock, rqtp, timecheck)) > 0)
352 				continue;
353 			mutex_exit(&t->t_delay_lock);
354 			if (rval == 0)
355 				error = EINTR;
356 		}
357 		goto pollout;
358 	}
359 
360 	if (nfds > p->p_fno_ctl) {
361 		mutex_enter(&p->p_lock);
362 		(void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
363 		    p->p_rctls, p, RCA_SAFE);
364 		mutex_exit(&p->p_lock);
365 		error = EINVAL;
366 		goto pollout;
367 	}
368 
369 	/*
370 	 * Need to allocate memory for pollstate before anything because
371 	 * the mutex and cv are created in this space
372 	 */
373 	if ((ps = t->t_pollstate) == NULL) {
374 		t->t_pollstate = pollstate_create();
375 		ps = t->t_pollstate;
376 	}
377 
378 	if (ps->ps_pcache == NULL)
379 		ps->ps_pcache = pcache_alloc();
380 	pcp = ps->ps_pcache;
381 
382 	/*
383 	 * NOTE: for performance, buffers are saved across poll() calls.
384 	 * The theory is that if a process polls heavily, it tends to poll
385 	 * on the same set of descriptors.  Therefore, we only reallocate
386 	 * buffers when nfds changes.  There is no hysteresis control,
387 	 * because there is no data to suggest that this is necessary;
388 	 * the penalty of reallocating is not *that* great in any event.
389 	 */
390 	old_nfds = ps->ps_nfds;
391 	if (nfds != old_nfds) {
392 
393 		kmem_free(ps->ps_pollfd, old_nfds * sizeof (pollfd_t));
394 		pollfdp = kmem_alloc(nfds * sizeof (pollfd_t), KM_SLEEP);
395 		ps->ps_pollfd = pollfdp;
396 		ps->ps_nfds = nfds;
397 	}
398 
399 	pollfdp = ps->ps_pollfd;
400 	if (copyin(fds, pollfdp, nfds * sizeof (pollfd_t))) {
401 		error = EFAULT;
402 		goto pollout;
403 	}
404 
405 	if (fds == NULL) {
406 		/*
407 		 * If the process has page 0 mapped, then the copyin() above
408 		 * will succeed even if fds is NULL.  However, our cached
409 		 * poll lists are keyed by the address of the passed-in fds
410 		 * structure, and we use the value NULL to indicate an unused
411 		 * poll cache list entry.  As such, we elect not to support
412 		 * NULL as a valid (user) memory address and fail the poll()
413 		 * call.
414 		 */
415 		error = EINVAL;
416 		goto pollout;
417 	}
418 
419 	/*
420 	 * If this thread polls for the first time, allocate ALL poll
421 	 * cache data structures and cache the poll fd list. This
422 	 * allocation is delayed till now because lwp's polling 0 fd
423 	 * (i.e. using poll as timeout()) don't need this memory.
424 	 */
425 	mutex_enter(&ps->ps_lock);
426 	pcp = ps->ps_pcache;
427 	ASSERT(pcp != NULL);
428 	if (pcp->pc_bitmap == NULL) {
429 		pcache_create(pcp, nfds);
430 		/*
431 		 * poll and cache this poll fd list in ps_pcacheset[0].
432 		 */
433 		error = pcacheset_cache_list(ps, fds, &fdcnt, cacheindex);
434 		if (fdcnt || error) {
435 			mutex_exit(&ps->ps_lock);
436 			goto pollout;
437 		}
438 	} else {
439 		pollcacheset_t	*pcset = ps->ps_pcacheset;
440 
441 		/*
442 		 * Not first time polling. Select a cached poll list by
443 		 * matching user pollfd list buffer address.
444 		 */
445 		for (cacheindex = 0; cacheindex < ps->ps_nsets; cacheindex++) {
446 			if (pcset[cacheindex].pcs_usradr == (uintptr_t)fds) {
447 				if ((++pcset[cacheindex].pcs_count) == 0) {
448 					/*
449 					 * counter is wrapping around.
450 					 */
451 					pcacheset_reset_count(ps, cacheindex);
452 				}
453 				/*
454 				 * examine and resolve possible
455 				 * difference of the current poll
456 				 * list and previously cached one.
457 				 * If there is an error during resolve(),
458 				 * the callee will guarantee the consistency
459 				 * of cached poll list and cache content.
460 				 */
461 				error = pcacheset_resolve(ps, nfds, &fdcnt,
462 				    cacheindex);
463 				if (error) {
464 					mutex_exit(&ps->ps_lock);
465 					goto pollout;
466 				}
467 				break;
468 			}
469 
470 			/*
471 			 * Note that pcs_usradr field of an used entry won't be
472 			 * NULL because it stores the address of passed-in fds,
473 			 * and NULL fds will not be cached (Then it is either
474 			 * the special timeout case when nfds is 0 or it returns
475 			 * failure directly).
476 			 */
477 			if (pcset[cacheindex].pcs_usradr == NULL) {
478 				/*
479 				 * found an unused entry. Use it to cache
480 				 * this poll list.
481 				 */
482 				error = pcacheset_cache_list(ps, fds, &fdcnt,
483 				    cacheindex);
484 				if (fdcnt || error) {
485 					mutex_exit(&ps->ps_lock);
486 					goto pollout;
487 				}
488 				break;
489 			}
490 		}
491 		if (cacheindex == ps->ps_nsets) {
492 			/*
493 			 * We failed to find a matching cached poll fd list.
494 			 * replace an old list.
495 			 */
496 			pollstats.polllistmiss.value.ui64++;
497 			cacheindex = pcacheset_replace(ps);
498 			ASSERT(cacheindex < ps->ps_nsets);
499 			pcset[cacheindex].pcs_usradr = (uintptr_t)fds;
500 			error = pcacheset_resolve(ps, nfds, &fdcnt, cacheindex);
501 			if (error) {
502 				mutex_exit(&ps->ps_lock);
503 				goto pollout;
504 			}
505 		}
506 	}
507 
508 	/*
509 	 * Always scan the bitmap with the lock on the pollcache held.
510 	 * This is to make sure that a wakeup does not come undetected.
511 	 * If the lock is not held, a pollwakeup could have come for an
512 	 * fd we already checked but before this thread sleeps, in which
513 	 * case the wakeup is missed. Now we hold the pcache lock and
514 	 * check the bitmap again. This will prevent wakeup from happening
515 	 * while we hold pcache lock since pollwakeup() will also lock
516 	 * the pcache before updating poll bitmap.
517 	 */
518 	mutex_enter(&pcp->pc_lock);
519 	for (;;) {
520 		pcp->pc_flag = 0;
521 		error = pcache_poll(pollfdp, ps, nfds, &fdcnt, cacheindex);
522 		if (fdcnt || error) {
523 			mutex_exit(&pcp->pc_lock);
524 			mutex_exit(&ps->ps_lock);
525 			break;
526 		}
527 
528 		/*
529 		 * If T_POLLWAKE is set, a pollwakeup() was performed on
530 		 * one of the file descriptors.  This can happen only if
531 		 * one of the VOP_POLL() functions dropped pcp->pc_lock.
532 		 * The only current cases of this is in procfs (prpoll())
533 		 * and STREAMS (strpoll()).
534 		 */
535 		if (pcp->pc_flag & T_POLLWAKE)
536 			continue;
537 
538 		/*
539 		 * If you get here, the poll of fds was unsuccessful.
540 		 * Wait until some fd becomes readable, writable, or gets
541 		 * an exception, or until a signal or a timeout occurs.
542 		 * Do not check for signals if we have a zero timeout.
543 		 */
544 		mutex_exit(&ps->ps_lock);
545 		if (imm_timeout)
546 			rval = -1;
547 		else
548 			rval = cv_waituntil_sig(&pcp->pc_cv, &pcp->pc_lock,
549 				rqtp, timecheck);
550 		mutex_exit(&pcp->pc_lock);
551 		/*
552 		 * If we have received a signal or timed out
553 		 * then break out and return.
554 		 */
555 		if (rval <= 0) {
556 			if (rval == 0)
557 				error = EINTR;
558 			break;
559 		}
560 		/*
561 		 * We have not received a signal or timed out.
562 		 * Continue around and poll fds again.
563 		 */
564 		mutex_enter(&ps->ps_lock);
565 		mutex_enter(&pcp->pc_lock);
566 	}
567 
568 pollout:
569 	/*
570 	 * If we changed the signal mask but we received
571 	 * no signal then restore the signal mask.
572 	 * Otherwise psig() will deal with the signal mask.
573 	 */
574 	if (ksetp != NULL) {
575 		mutex_enter(&p->p_lock);
576 		if (lwp->lwp_cursig == 0) {
577 			t->t_hold = lwp->lwp_sigoldmask;
578 			t->t_flag &= ~T_TOMASK;
579 		}
580 		mutex_exit(&p->p_lock);
581 	}
582 
583 	if (error)
584 		return (set_errno(error));
585 
586 	/*
587 	 * Copy out the events and return the fdcnt to the user.
588 	 */
589 	if (nfds != 0 &&
590 	    copyout(pollfdp, fds, nfds * sizeof (pollfd_t)))
591 		return (set_errno(EFAULT));
592 
593 #ifdef DEBUG
594 	/*
595 	 * Another sanity check:
596 	 */
597 	if (fdcnt) {
598 		int	reventcnt = 0;
599 
600 		for (i = 0; i < nfds; i++) {
601 			if (pollfdp[i].fd < 0) {
602 				ASSERT(pollfdp[i].revents == 0);
603 				continue;
604 			}
605 			if (pollfdp[i].revents) {
606 				reventcnt++;
607 			}
608 		}
609 		ASSERT(fdcnt == reventcnt);
610 	} else {
611 		for (i = 0; i < nfds; i++) {
612 			ASSERT(pollfdp[i].revents == 0);
613 		}
614 	}
615 #endif	/* DEBUG */
616 
617 	return (fdcnt);
618 }
619 
620 /*
621  * This system call trap exists solely for binary compatibility with
622  * old statically-linked applications.  It is not called from libc.
623  * It should be removed in the next release.
624  */
625 int
626 poll(pollfd_t *fds, nfds_t nfds, int time_out)
627 {
628 	timespec_t ts;
629 	timespec_t *tsp;
630 
631 	if (time_out < 0)
632 		tsp = NULL;
633 	else {
634 		ts.tv_sec = time_out / MILLISEC;
635 		ts.tv_nsec = (time_out % MILLISEC) * MICROSEC;
636 		tsp = &ts;
637 	}
638 
639 	return (poll_common(fds, nfds, tsp, NULL));
640 }
641 
642 /*
643  * This is the system call trap that poll(),
644  * select() and pselect() are built upon.
645  * It is a private interface between libc and the kernel.
646  */
647 int
648 pollsys(pollfd_t *fds, nfds_t nfds, timespec_t *timeoutp, sigset_t *setp)
649 {
650 	timespec_t ts;
651 	timespec_t *tsp;
652 	sigset_t set;
653 	k_sigset_t kset;
654 	k_sigset_t *ksetp;
655 	model_t datamodel = get_udatamodel();
656 
657 	if (timeoutp == NULL)
658 		tsp = NULL;
659 	else {
660 		if (datamodel == DATAMODEL_NATIVE) {
661 			if (copyin(timeoutp, &ts, sizeof (ts)))
662 				return (set_errno(EFAULT));
663 		} else {
664 			timespec32_t ts32;
665 
666 			if (copyin(timeoutp, &ts32, sizeof (ts32)))
667 				return (set_errno(EFAULT));
668 			TIMESPEC32_TO_TIMESPEC(&ts, &ts32)
669 		}
670 
671 		if (itimerspecfix(&ts))
672 			return (set_errno(EINVAL));
673 		tsp = &ts;
674 	}
675 
676 	if (setp == NULL)
677 		ksetp = NULL;
678 	else {
679 		if (copyin(setp, &set, sizeof (set)))
680 			return (set_errno(EFAULT));
681 		sigutok(&set, &kset);
682 		ksetp = &kset;
683 	}
684 
685 	return (poll_common(fds, nfds, tsp, ksetp));
686 }
687 
688 /*
689  * Clean up any state left around by poll(2). Called when a thread exits.
690  */
691 void
692 pollcleanup()
693 {
694 	pollstate_t *ps = curthread->t_pollstate;
695 	pollcache_t *pcp;
696 
697 	if (ps == NULL)
698 		return;
699 	pcp = ps->ps_pcache;
700 	/*
701 	 * free up all cached poll fds
702 	 */
703 	if (pcp == NULL) {
704 		/* this pollstate is used by /dev/poll */
705 		goto pollcleanout;
706 	}
707 
708 	if (pcp->pc_bitmap != NULL) {
709 		ASSERT(MUTEX_NOT_HELD(&ps->ps_lock));
710 		/*
711 		 * a close lwp can race with us when cleaning up a polldat
712 		 * entry. We hold the ps_lock when cleaning hash table.
713 		 * Since this pollcache is going away anyway, there is no
714 		 * need to hold the pc_lock.
715 		 */
716 		mutex_enter(&ps->ps_lock);
717 		pcache_clean(pcp);
718 		mutex_exit(&ps->ps_lock);
719 #ifdef DEBUG
720 		/*
721 		 * At this point, all fds cached by this lwp should be
722 		 * cleaned up. There should be no fd in fi_list still
723 		 * reference this thread.
724 		 */
725 		checkfpollinfo();	/* sanity check */
726 		pollcheckphlist();	/* sanity check */
727 #endif	/* DEBUG */
728 	}
729 	/*
730 	 * Be sure no one is referencing thread before exiting
731 	 */
732 	mutex_enter(&pcp->pc_no_exit);
733 	ASSERT(pcp->pc_busy >= 0);
734 	while (pcp->pc_busy > 0)
735 		cv_wait(&pcp->pc_busy_cv, &pcp->pc_no_exit);
736 	mutex_exit(&pcp->pc_no_exit);
737 pollcleanout:
738 	pollstate_destroy(ps);
739 	curthread->t_pollstate = NULL;
740 }
741 
742 /*
743  * pollwakeup() - poke threads waiting in poll() for some event
744  * on a particular object.
745  *
746  * The threads hanging off of the specified pollhead structure are scanned.
747  * If their event mask matches the specified event(s), then pollnotify() is
748  * called to poke the thread.
749  *
750  * Multiple events may be specified.  When POLLHUP or POLLERR are specified,
751  * all waiting threads are poked.
752  *
753  * It is important that pollnotify() not drop the lock protecting the list
754  * of threads.
755  */
756 void
757 pollwakeup(pollhead_t *php, short events_arg)
758 {
759 	polldat_t	*pdp;
760 	int		events = (ushort_t)events_arg;
761 
762 retry:
763 	PH_ENTER(php);
764 
765 	/*
766 	 * About half of all pollwakeups don't do anything, because the
767 	 * pollhead list is empty (i.e, nobody is interested in the event).
768 	 * For this common case, we can optimize out locking overhead.
769 	 */
770 	if (php->ph_list == NULL) {
771 		PH_EXIT(php);
772 		return;
773 	}
774 
775 	for (pdp = php->ph_list; pdp; pdp = pdp->pd_next) {
776 		if ((pdp->pd_events & events) ||
777 		    (events & (POLLHUP | POLLERR))) {
778 
779 			pollcache_t 	*pcp;
780 
781 			if (pdp->pd_portev != NULL) {
782 				port_kevent_t	*pkevp = pdp->pd_portev;
783 				/*
784 				 * Object (fd) is associated with an event port,
785 				 * => send event notification to the port.
786 				 */
787 				pkevp->portkev_events |= events &
788 				    (pdp->pd_events | POLLHUP | POLLERR);
789 				if (pkevp->portkev_flags & PORT_KEV_VALID) {
790 					pkevp->portkev_flags &= ~PORT_KEV_VALID;
791 					(void) port_send_event(pdp->pd_portev);
792 				}
793 				continue;
794 			}
795 
796 			pcp = pdp->pd_pcache;
797 
798 			/*
799 			 * Try to grab the lock for this thread. If
800 			 * we don't get it then we may deadlock so
801 			 * back out and restart all over again. Note
802 			 * that the failure rate is very very low.
803 			 */
804 			if (mutex_tryenter(&pcp->pc_lock)) {
805 				pollnotify(pcp, pdp->pd_fd);
806 				mutex_exit(&pcp->pc_lock);
807 			} else {
808 				/*
809 				 * We are here because:
810 				 *	1) This thread has been woke up
811 				 *	   and is trying to get out of poll().
812 				 *	2) Some other thread is also here
813 				 *	   but with a different pollhead lock.
814 				 *
815 				 * So, we need to drop the lock on pollhead
816 				 * because of (1) but we want to prevent
817 				 * that thread from doing lwp_exit() or
818 				 * devpoll close. We want to ensure that
819 				 * the pollcache pointer is still invalid.
820 				 *
821 				 * Solution: Grab the pcp->pc_no_exit lock,
822 				 * increment the pc_busy counter, drop every
823 				 * lock in sight. Get out of the way and wait
824 				 * for type (2) threads to finish.
825 				 */
826 
827 				mutex_enter(&pcp->pc_no_exit);
828 				pcp->pc_busy++;	/* prevents exit()'s */
829 				mutex_exit(&pcp->pc_no_exit);
830 
831 				PH_EXIT(php);
832 				mutex_enter(&pcp->pc_lock);
833 				mutex_exit(&pcp->pc_lock);
834 				mutex_enter(&pcp->pc_no_exit);
835 				pcp->pc_busy--;
836 				if (pcp->pc_busy == 0) {
837 					/*
838 					 * Wakeup the thread waiting in
839 					 * thread_exit().
840 					 */
841 					cv_signal(&pcp->pc_busy_cv);
842 				}
843 				mutex_exit(&pcp->pc_no_exit);
844 				goto retry;
845 			}
846 		}
847 	}
848 	PH_EXIT(php);
849 }
850 
851 /*
852  * This function is called to inform a thread that
853  * an event being polled for has occurred.
854  * The pollstate lock on the thread should be held on entry.
855  */
856 void
857 pollnotify(pollcache_t *pcp, int fd)
858 {
859 	ASSERT(fd < pcp->pc_mapsize);
860 	ASSERT(MUTEX_HELD(&pcp->pc_lock));
861 	BT_SET(pcp->pc_bitmap, fd);
862 	pcp->pc_flag |= T_POLLWAKE;
863 	cv_signal(&pcp->pc_cv);
864 }
865 
866 /*
867  * add a polldat entry to pollhead ph_list. The polldat struct is used
868  * by pollwakeup to wake sleeping pollers when polled events has happened.
869  */
870 void
871 pollhead_insert(pollhead_t *php, polldat_t *pdp)
872 {
873 	PH_ENTER(php);
874 	ASSERT(pdp->pd_next == NULL);
875 #ifdef DEBUG
876 	{
877 		/*
878 		 * the polldat should not be already on the list
879 		 */
880 		polldat_t *wp;
881 		for (wp = php->ph_list; wp; wp = wp->pd_next) {
882 			ASSERT(wp != pdp);
883 		}
884 	}
885 #endif	/* DEBUG */
886 	pdp->pd_next = php->ph_list;
887 	php->ph_list = pdp;
888 	PH_EXIT(php);
889 }
890 
891 /*
892  * Delete the polldat entry from ph_list.
893  */
894 void
895 pollhead_delete(pollhead_t *php, polldat_t *pdp)
896 {
897 	polldat_t *wp;
898 	polldat_t **wpp;
899 
900 	PH_ENTER(php);
901 	for (wpp = &php->ph_list; (wp = *wpp) != NULL; wpp = &wp->pd_next) {
902 		if (wp == pdp) {
903 			*wpp = pdp->pd_next;
904 			pdp->pd_next = NULL;
905 			break;
906 		}
907 	}
908 #ifdef DEBUG
909 	/* assert that pdp is no longer in the list */
910 	for (wp = *wpp; wp; wp = wp->pd_next) {
911 		ASSERT(wp != pdp);
912 	}
913 #endif	/* DEBUG */
914 	PH_EXIT(php);
915 }
916 
917 /*
918  * walk through the poll fd lists to see if they are identical. This is an
919  * expensive operation and should not be done more than once for each poll()
920  * call.
921  *
922  * As an optimization (i.e., not having to go through the lists more than
923  * once), this routine also clear the revents field of pollfd in 'current'.
924  * Zeroing out the revents field of each entry in current poll list is
925  * required by poll man page.
926  *
927  * Since the events field of cached list has illegal poll events filtered
928  * out, the current list applies the same filtering before comparison.
929  *
930  * The routine stops when it detects a meaningful difference, or when it
931  * exhausts the lists.
932  */
933 int
934 pcacheset_cmp(pollfd_t *current, pollfd_t *cached, pollfd_t *newlist, int n)
935 {
936 	int    ix;
937 
938 	for (ix = 0; ix < n; ix++) {
939 		if (current[ix].fd == cached[ix].fd) {
940 			/*
941 			 * Filter out invalid poll events while we are in
942 			 * inside the loop.
943 			 */
944 			if (current[ix].events & ~VALID_POLL_EVENTS) {
945 				current[ix].events &= VALID_POLL_EVENTS;
946 				if (newlist != NULL)
947 					newlist[ix].events = current[ix].events;
948 			}
949 			if (current[ix].events == cached[ix].events) {
950 				current[ix].revents = 0;
951 				continue;
952 			}
953 		}
954 		if ((current[ix].fd < 0) && (cached[ix].fd < 0)) {
955 			current[ix].revents = 0;
956 			continue;
957 		}
958 		return (ix);
959 	}
960 	return (ix);
961 }
962 
963 /*
964  * This routine returns a pointer to a cached poll fd entry, or NULL if it
965  * does not find it in the hash table.
966  */
967 polldat_t *
968 pcache_lookup_fd(pollcache_t *pcp, int fd)
969 {
970 	int hashindex;
971 	polldat_t *pdp;
972 
973 	hashindex = POLLHASH(pcp->pc_hashsize, fd);
974 	pdp = pcp->pc_hash[hashindex];
975 	while (pdp != NULL) {
976 		if (pdp->pd_fd == fd)
977 			break;
978 		pdp = pdp->pd_hashnext;
979 	}
980 	return (pdp);
981 }
982 
983 polldat_t *
984 pcache_alloc_fd(int nsets)
985 {
986 	polldat_t *pdp;
987 
988 	pdp = kmem_zalloc(sizeof (polldat_t), KM_SLEEP);
989 	if (nsets > 0) {
990 		pdp->pd_ref = kmem_zalloc(sizeof (xref_t) * nsets, KM_SLEEP);
991 		pdp->pd_nsets = nsets;
992 	}
993 	return (pdp);
994 }
995 
996 /*
997  * This routine  inserts a polldat into the pollcache's hash table. It
998  * may be necessary to grow the size of the hash table.
999  */
1000 void
1001 pcache_insert_fd(pollcache_t *pcp, polldat_t *pdp, nfds_t nfds)
1002 {
1003 	int hashindex;
1004 	int fd;
1005 
1006 	if ((pcp->pc_fdcount > pcp->pc_hashsize * POLLHASHTHRESHOLD) ||
1007 	    (nfds > pcp->pc_hashsize * POLLHASHTHRESHOLD)) {
1008 		pcache_grow_hashtbl(pcp, nfds);
1009 	}
1010 	fd = pdp->pd_fd;
1011 	hashindex = POLLHASH(pcp->pc_hashsize, fd);
1012 	pdp->pd_hashnext = pcp->pc_hash[hashindex];
1013 	pcp->pc_hash[hashindex] = pdp;
1014 	pcp->pc_fdcount++;
1015 
1016 #ifdef DEBUG
1017 	{
1018 		/*
1019 		 * same fd should not appear on a hash list twice
1020 		 */
1021 		polldat_t *pdp1;
1022 		for (pdp1 = pdp->pd_hashnext; pdp1; pdp1 = pdp1->pd_hashnext) {
1023 			ASSERT(pdp->pd_fd != pdp1->pd_fd);
1024 		}
1025 	}
1026 #endif	/* DEBUG */
1027 }
1028 
1029 /*
1030  * Grow the hash table -- either double the table size or round it to the
1031  * nearest multiples of POLLHASHCHUNKSZ, whichever is bigger. Rehash all the
1032  * elements on the hash table.
1033  */
1034 void
1035 pcache_grow_hashtbl(pollcache_t *pcp, nfds_t nfds)
1036 {
1037 	int	oldsize;
1038 	polldat_t **oldtbl;
1039 	polldat_t *pdp, *pdp1;
1040 	int	i;
1041 #ifdef DEBUG
1042 	int	count = 0;
1043 #endif
1044 
1045 	ASSERT(pcp->pc_hashsize % POLLHASHCHUNKSZ == 0);
1046 	oldsize = pcp->pc_hashsize;
1047 	oldtbl = pcp->pc_hash;
1048 	if (nfds > pcp->pc_hashsize * POLLHASHINC) {
1049 		pcp->pc_hashsize = (nfds + POLLHASHCHUNKSZ - 1) &
1050 		    ~(POLLHASHCHUNKSZ - 1);
1051 	} else {
1052 		pcp->pc_hashsize = pcp->pc_hashsize * POLLHASHINC;
1053 	}
1054 	pcp->pc_hash = kmem_zalloc(pcp->pc_hashsize * sizeof (polldat_t *),
1055 	    KM_SLEEP);
1056 	/*
1057 	 * rehash existing elements
1058 	 */
1059 	pcp->pc_fdcount = 0;
1060 	for (i = 0; i < oldsize; i++) {
1061 		pdp = oldtbl[i];
1062 		while (pdp != NULL) {
1063 			pdp1 = pdp->pd_hashnext;
1064 			pcache_insert_fd(pcp, pdp, nfds);
1065 			pdp = pdp1;
1066 #ifdef DEBUG
1067 			count++;
1068 #endif
1069 		}
1070 	}
1071 	kmem_free(oldtbl, oldsize * sizeof (polldat_t *));
1072 	ASSERT(pcp->pc_fdcount == count);
1073 }
1074 
1075 void
1076 pcache_grow_map(pollcache_t *pcp, int fd)
1077 {
1078 	int  	newsize;
1079 	ulong_t	*newmap;
1080 
1081 	/*
1082 	 * grow to nearest multiple of POLLMAPCHUNK, assuming POLLMAPCHUNK is
1083 	 * power of 2.
1084 	 */
1085 	newsize = (fd + POLLMAPCHUNK) & ~(POLLMAPCHUNK - 1);
1086 	newmap = kmem_zalloc((newsize / BT_NBIPUL) * sizeof (ulong_t),
1087 	    KM_SLEEP);
1088 	/*
1089 	 * don't want pollwakeup to set a bit while growing the bitmap.
1090 	 */
1091 	ASSERT(mutex_owned(&pcp->pc_lock) == 0);
1092 	mutex_enter(&pcp->pc_lock);
1093 	bcopy(pcp->pc_bitmap, newmap,
1094 	    (pcp->pc_mapsize / BT_NBIPUL) * sizeof (ulong_t));
1095 	kmem_free(pcp->pc_bitmap,
1096 	    (pcp->pc_mapsize /BT_NBIPUL) * sizeof (ulong_t));
1097 	pcp->pc_bitmap = newmap;
1098 	pcp->pc_mapsize = newsize;
1099 	mutex_exit(&pcp->pc_lock);
1100 }
1101 
1102 /*
1103  * remove all the reference from pollhead list and fpollinfo lists.
1104  */
1105 void
1106 pcache_clean(pollcache_t *pcp)
1107 {
1108 	int i;
1109 	polldat_t **hashtbl;
1110 	polldat_t *pdp;
1111 
1112 	ASSERT(MUTEX_HELD(&curthread->t_pollstate->ps_lock));
1113 	hashtbl = pcp->pc_hash;
1114 	for (i = 0; i < pcp->pc_hashsize; i++) {
1115 		for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) {
1116 			if (pdp->pd_php != NULL) {
1117 				pollhead_delete(pdp->pd_php, pdp);
1118 				pdp->pd_php = NULL;
1119 			}
1120 			if (pdp->pd_fp != NULL) {
1121 				delfpollinfo(pdp->pd_fd);
1122 				pdp->pd_fp = NULL;
1123 			}
1124 		}
1125 	}
1126 }
1127 
1128 void
1129 pcacheset_invalidate(pollstate_t *ps, polldat_t *pdp)
1130 {
1131 	int 	i;
1132 	int	fd = pdp->pd_fd;
1133 
1134 	/*
1135 	 * we come here because an earlier close() on this cached poll fd.
1136 	 */
1137 	ASSERT(pdp->pd_fp == NULL);
1138 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1139 	pdp->pd_events = 0;
1140 	for (i = 0; i < ps->ps_nsets; i++) {
1141 		xref_t		*refp;
1142 		pollcacheset_t	*pcsp;
1143 
1144 		ASSERT(pdp->pd_ref != NULL);
1145 		refp = &pdp->pd_ref[i];
1146 		if (refp->xf_refcnt) {
1147 			ASSERT(refp->xf_position >= 0);
1148 			pcsp = &ps->ps_pcacheset[i];
1149 			if (refp->xf_refcnt == 1) {
1150 				pcsp->pcs_pollfd[refp->xf_position].fd = -1;
1151 				refp->xf_refcnt = 0;
1152 				pdp->pd_count--;
1153 			} else if (refp->xf_refcnt > 1) {
1154 				int	j;
1155 
1156 				/*
1157 				 * turn off every appearance in pcs_pollfd list
1158 				 */
1159 				for (j = refp->xf_position;
1160 				    j < pcsp->pcs_nfds; j++) {
1161 					if (pcsp->pcs_pollfd[j].fd == fd) {
1162 						pcsp->pcs_pollfd[j].fd = -1;
1163 						refp->xf_refcnt--;
1164 						pdp->pd_count--;
1165 					}
1166 				}
1167 			}
1168 			ASSERT(refp->xf_refcnt == 0);
1169 			refp->xf_position = POLLPOSINVAL;
1170 		}
1171 	}
1172 	ASSERT(pdp->pd_count == 0);
1173 }
1174 
1175 /*
1176  * Insert poll fd into the pollcache, and add poll registration.
1177  * This routine is called after getf() and before releasef(). So the vnode
1178  * can not disappear even if we block here.
1179  * If there is an error, the polled fd is not cached.
1180  */
1181 int
1182 pcache_insert(pollstate_t *ps, file_t *fp, pollfd_t *pollfdp, int *fdcntp,
1183     ssize_t pos, int which)
1184 {
1185 	pollcache_t	*pcp = ps->ps_pcache;
1186 	polldat_t	*pdp;
1187 	int		error;
1188 	int		fd;
1189 	pollhead_t	*memphp = NULL;
1190 	xref_t		*refp;
1191 	int		newpollfd = 0;
1192 
1193 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1194 	/*
1195 	 * The poll caching uses the existing VOP_POLL interface. If there
1196 	 * is no polled events, we want the polled device to set its "some
1197 	 * one is sleeping in poll" flag. When the polled events happen
1198 	 * later, the driver will call pollwakeup(). We achieve this by
1199 	 * always passing 0 in the third parameter ("anyyet") when calling
1200 	 * VOP_POLL. This parameter is not looked at by drivers when the
1201 	 * polled events exist. If a driver chooses to ignore this parameter
1202 	 * and call pollwakeup whenever the polled events happen, that will
1203 	 * be OK too.
1204 	 */
1205 	ASSERT(curthread->t_pollcache == NULL);
1206 	error = VOP_POLL(fp->f_vnode, pollfdp->events, 0, &pollfdp->revents,
1207 	    &memphp);
1208 	if (error) {
1209 		return (error);
1210 	}
1211 	if (pollfdp->revents) {
1212 		(*fdcntp)++;
1213 	}
1214 	/*
1215 	 * polling the underlying device succeeded. Now we can cache it.
1216 	 * A close can't come in here because we have not done a releasef()
1217 	 * yet.
1218 	 */
1219 	fd = pollfdp->fd;
1220 	pdp = pcache_lookup_fd(pcp, fd);
1221 	if (pdp == NULL) {
1222 		ASSERT(ps->ps_nsets > 0);
1223 		pdp = pcache_alloc_fd(ps->ps_nsets);
1224 		newpollfd = 1;
1225 	}
1226 	/*
1227 	 * If this entry was used to cache a poll fd which was closed, and
1228 	 * this entry has not been cleaned, do it now.
1229 	 */
1230 	if ((pdp->pd_count > 0) && (pdp->pd_fp == NULL)) {
1231 		pcacheset_invalidate(ps, pdp);
1232 		ASSERT(pdp->pd_next == NULL);
1233 	}
1234 	if (pdp->pd_count == 0) {
1235 		pdp->pd_fd = fd;
1236 		pdp->pd_fp = fp;
1237 		addfpollinfo(fd);
1238 		pdp->pd_thread = curthread;
1239 		pdp->pd_pcache = pcp;
1240 		/*
1241 		 * the entry is never used or cleared by removing a cached
1242 		 * pollfd (pcache_delete_fd). So all the fields should be clear.
1243 		 */
1244 		ASSERT(pdp->pd_next == NULL);
1245 	}
1246 
1247 	/*
1248 	 * A polled fd is considered cached. So there should be a fpollinfo
1249 	 * entry on uf_fpollinfo list.
1250 	 */
1251 	ASSERT(infpollinfo(fd));
1252 	/*
1253 	 * If there is an inconsistency, we want to know it here.
1254 	 */
1255 	ASSERT(pdp->pd_fp == fp);
1256 
1257 	/*
1258 	 * XXX pd_events is a union of all polled events on this fd, possibly
1259 	 * by different threads. Unless this is a new first poll(), pd_events
1260 	 * never shrinks. If an event is no longer polled by a process, there
1261 	 * is no way to cancel that event. In that case, poll degrade to its
1262 	 * old form -- polling on this fd every time poll() is called. The
1263 	 * assumption is an app always polls the same type of events.
1264 	 */
1265 	pdp->pd_events |= pollfdp->events;
1266 
1267 	pdp->pd_count++;
1268 	/*
1269 	 * There is not much special handling for multiple appearances of
1270 	 * same fd other than xf_position always recording the first
1271 	 * appearance in poll list. If this is called from pcacheset_cache_list,
1272 	 * a VOP_POLL is called on every pollfd entry; therefore each
1273 	 * revents and fdcnt should be set correctly. If this is called from
1274 	 * pcacheset_resolve, we don't care about fdcnt here. Pollreadmap will
1275 	 * pick up the right count and handle revents field of each pollfd
1276 	 * entry.
1277 	 */
1278 	ASSERT(pdp->pd_ref != NULL);
1279 	refp = &pdp->pd_ref[which];
1280 	if (refp->xf_refcnt == 0) {
1281 		refp->xf_position = pos;
1282 	} else {
1283 		/*
1284 		 * xf_position records the fd's first appearance in poll list
1285 		 */
1286 		if (pos < refp->xf_position) {
1287 			refp->xf_position = pos;
1288 		}
1289 	}
1290 	ASSERT(pollfdp->fd == ps->ps_pollfd[refp->xf_position].fd);
1291 	refp->xf_refcnt++;
1292 	if (fd >= pcp->pc_mapsize) {
1293 		pcache_grow_map(pcp, fd);
1294 	}
1295 	if (fd > pcp->pc_mapend) {
1296 		pcp->pc_mapend = fd;
1297 	}
1298 	if (newpollfd != 0) {
1299 		pcache_insert_fd(ps->ps_pcache, pdp, ps->ps_nfds);
1300 	}
1301 	if (memphp) {
1302 		if (pdp->pd_php == NULL) {
1303 			pollhead_insert(memphp, pdp);
1304 			pdp->pd_php = memphp;
1305 		} else {
1306 			if (memphp != pdp->pd_php) {
1307 				/*
1308 				 * layered devices (e.g. console driver)
1309 				 * may change the vnode and thus the pollhead
1310 				 * pointer out from underneath us.
1311 				 */
1312 				pollhead_delete(pdp->pd_php, pdp);
1313 				pollhead_insert(memphp, pdp);
1314 				pdp->pd_php = memphp;
1315 			}
1316 		}
1317 	}
1318 	/*
1319 	 * Since there is a considerable window between VOP_POLL and when
1320 	 * we actually put the polldat struct on the pollhead list, we could
1321 	 * miss a pollwakeup. In the case of polling additional events, we
1322 	 * don't update the events until after VOP_POLL. So we could miss
1323 	 * pollwakeup there too. So we always set the bit here just to be
1324 	 * safe. The real performance gain is in subsequent pcache_poll.
1325 	 */
1326 	mutex_enter(&pcp->pc_lock);
1327 	BT_SET(pcp->pc_bitmap, fd);
1328 	mutex_exit(&pcp->pc_lock);
1329 	return (0);
1330 }
1331 
1332 /*
1333  * The entry is not really deleted. The fields are cleared so that the
1334  * entry is no longer useful, but it will remain in the hash table for reuse
1335  * later. It will be freed when the polling lwp exits.
1336  */
1337 int
1338 pcache_delete_fd(pollstate_t *ps, int fd, size_t pos, int which, uint_t cevent)
1339 {
1340 	pollcache_t	*pcp = ps->ps_pcache;
1341 	polldat_t	*pdp;
1342 	xref_t		*refp;
1343 
1344 	ASSERT(fd < pcp->pc_mapsize);
1345 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1346 
1347 	pdp = pcache_lookup_fd(pcp, fd);
1348 	ASSERT(pdp != NULL);
1349 	ASSERT(pdp->pd_count > 0);
1350 	ASSERT(pdp->pd_ref != NULL);
1351 	refp = &pdp->pd_ref[which];
1352 	if (pdp->pd_count == 1) {
1353 		pdp->pd_events = 0;
1354 		refp->xf_position = POLLPOSINVAL;
1355 		ASSERT(refp->xf_refcnt == 1);
1356 		refp->xf_refcnt = 0;
1357 		if (pdp->pd_php) {
1358 			/*
1359 			 * It is possible for a wakeup thread to get ahead
1360 			 * of the following pollhead_delete and set the bit in
1361 			 * bitmap.  It is OK because the bit will be cleared
1362 			 * here anyway.
1363 			 */
1364 			pollhead_delete(pdp->pd_php, pdp);
1365 			pdp->pd_php = NULL;
1366 		}
1367 		pdp->pd_count = 0;
1368 		if (pdp->pd_fp != NULL) {
1369 			pdp->pd_fp = NULL;
1370 			delfpollinfo(fd);
1371 		}
1372 		mutex_enter(&pcp->pc_lock);
1373 		BT_CLEAR(pcp->pc_bitmap, fd);
1374 		mutex_exit(&pcp->pc_lock);
1375 		return (0);
1376 	}
1377 	if ((cevent & POLLCLOSED) == POLLCLOSED) {
1378 		/*
1379 		 * fd cached here has been closed. This is the first
1380 		 * pcache_delete_fd called after the close. Clean up the
1381 		 * entire entry.
1382 		 */
1383 		pcacheset_invalidate(ps, pdp);
1384 		ASSERT(pdp->pd_php == NULL);
1385 		mutex_enter(&pcp->pc_lock);
1386 		BT_CLEAR(pcp->pc_bitmap, fd);
1387 		mutex_exit(&pcp->pc_lock);
1388 		return (0);
1389 	}
1390 #ifdef DEBUG
1391 	if (getf(fd) != NULL) {
1392 		ASSERT(infpollinfo(fd));
1393 		releasef(fd);
1394 	}
1395 #endif	/* DEBUG */
1396 	pdp->pd_count--;
1397 	ASSERT(refp->xf_refcnt > 0);
1398 	if (--refp->xf_refcnt == 0) {
1399 		refp->xf_position = POLLPOSINVAL;
1400 	} else {
1401 		ASSERT(pos >= refp->xf_position);
1402 		if (pos == refp->xf_position) {
1403 			/*
1404 			 * The xref position is no longer valid.
1405 			 * Reset it to a special value and let
1406 			 * caller know it needs to updatexref()
1407 			 * with a new xf_position value.
1408 			 */
1409 			refp->xf_position = POLLPOSTRANS;
1410 			return (1);
1411 		}
1412 	}
1413 	return (0);
1414 }
1415 
1416 void
1417 pcache_update_xref(pollcache_t *pcp, int fd, ssize_t pos, int which)
1418 {
1419 	polldat_t	*pdp;
1420 
1421 	pdp = pcache_lookup_fd(pcp, fd);
1422 	ASSERT(pdp != NULL);
1423 	ASSERT(pdp->pd_ref != NULL);
1424 	pdp->pd_ref[which].xf_position = pos;
1425 }
1426 
1427 #ifdef DEBUG
1428 /*
1429  * For each polled fd, it's either in the bitmap or cached in
1430  * pcache hash table. If this routine returns 0, something is wrong.
1431  */
1432 static int
1433 pollchecksanity(pollstate_t *ps, nfds_t nfds)
1434 {
1435 	int    		i;
1436 	int		fd;
1437 	pollcache_t	*pcp = ps->ps_pcache;
1438 	polldat_t	*pdp;
1439 	pollfd_t	*pollfdp = ps->ps_pollfd;
1440 	file_t		*fp;
1441 
1442 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1443 	for (i = 0; i < nfds; i++) {
1444 		fd = pollfdp[i].fd;
1445 		if (fd < 0) {
1446 			ASSERT(pollfdp[i].revents == 0);
1447 			continue;
1448 		}
1449 		if (pollfdp[i].revents == POLLNVAL)
1450 			continue;
1451 		if ((fp = getf(fd)) == NULL)
1452 			continue;
1453 		pdp = pcache_lookup_fd(pcp, fd);
1454 		ASSERT(pdp != NULL);
1455 		ASSERT(infpollinfo(fd));
1456 		ASSERT(pdp->pd_fp == fp);
1457 		releasef(fd);
1458 		if (BT_TEST(pcp->pc_bitmap, fd))
1459 			continue;
1460 		if (pdp->pd_php == NULL)
1461 			return (0);
1462 	}
1463 	return (1);
1464 }
1465 #endif	/* DEBUG */
1466 
1467 /*
1468  * resolve the difference between the current poll list and a cached one.
1469  */
1470 int
1471 pcacheset_resolve(pollstate_t *ps, nfds_t nfds, int *fdcntp, int which)
1472 {
1473 	int    		i;
1474 	pollcache_t	*pcp = ps->ps_pcache;
1475 	pollfd_t	*newlist = NULL;
1476 	pollfd_t	*current = ps->ps_pollfd;
1477 	pollfd_t	*cached;
1478 	pollcacheset_t	*pcsp;
1479 	int		common;
1480 	int		count = 0;
1481 	int		offset;
1482 	int		remain;
1483 	int		fd;
1484 	file_t		*fp;
1485 	int		fdcnt = 0;
1486 	int		cnt = 0;
1487 	nfds_t		old_nfds;
1488 	int		error = 0;
1489 	int		mismatch = 0;
1490 
1491 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1492 #ifdef DEBUG
1493 	checkpolldat(ps);
1494 #endif
1495 	pcsp = &ps->ps_pcacheset[which];
1496 	old_nfds = pcsp->pcs_nfds;
1497 	common = (nfds > old_nfds) ? old_nfds : nfds;
1498 	if (nfds != old_nfds) {
1499 		/*
1500 		 * the length of poll list has changed. allocate a new
1501 		 * pollfd list.
1502 		 */
1503 		newlist = kmem_alloc(nfds * sizeof (pollfd_t), KM_SLEEP);
1504 		bcopy(current, newlist, sizeof (pollfd_t) * nfds);
1505 	}
1506 	/*
1507 	 * Compare the overlapping part of the current fd list with the
1508 	 * cached one. Whenever a difference is found, resolve it.
1509 	 * The comparison is done on the current poll list and the
1510 	 * cached list. But we may be setting up the newlist to be the
1511 	 * cached list for next poll.
1512 	 */
1513 	cached = pcsp->pcs_pollfd;
1514 	remain = common;
1515 
1516 	while (count < common) {
1517 		int	tmpfd;
1518 		pollfd_t *np;
1519 
1520 		np = (newlist != NULL) ? &newlist[count] : NULL;
1521 		offset = pcacheset_cmp(&current[count], &cached[count], np,
1522 		    remain);
1523 		/*
1524 		 * Collect stats. If lists are completed the first time,
1525 		 * it's a hit. Otherwise, it's a partial hit or miss.
1526 		 */
1527 		if ((count == 0) && (offset == common)) {
1528 			pollstats.pollcachehit.value.ui64++;
1529 		} else {
1530 			mismatch++;
1531 		}
1532 		count += offset;
1533 		if (offset < remain) {
1534 			ASSERT(count < common);
1535 			ASSERT((current[count].fd != cached[count].fd) ||
1536 			    (current[count].events != cached[count].events));
1537 			/*
1538 			 * Filter out invalid events.
1539 			 */
1540 			if (current[count].events & ~VALID_POLL_EVENTS) {
1541 				if (newlist != NULL) {
1542 					newlist[count].events =
1543 						current[count].events &=
1544 							VALID_POLL_EVENTS;
1545 				} else {
1546 					current[count].events &=
1547 						VALID_POLL_EVENTS;
1548 				}
1549 			}
1550 			/*
1551 			 * when resolving a difference, we always remove the
1552 			 * fd from cache before inserting one into cache.
1553 			 */
1554 			if (cached[count].fd >= 0) {
1555 				tmpfd = cached[count].fd;
1556 				if (pcache_delete_fd(ps, tmpfd, count, which,
1557 				    (uint_t)cached[count].events)) {
1558 					/*
1559 					 * This should be rare but needed for
1560 					 * correctness.
1561 					 *
1562 					 * The first appearance in cached list
1563 					 * is being "turned off". The same fd
1564 					 * appear more than once in the cached
1565 					 * poll list. Find the next one on the
1566 					 * list and update the cached
1567 					 * xf_position field.
1568 					 */
1569 					for (i = count + 1; i < old_nfds; i++) {
1570 						if (cached[i].fd == tmpfd) {
1571 							pcache_update_xref(pcp,
1572 							    tmpfd, (ssize_t)i,
1573 							    which);
1574 						    break;
1575 						}
1576 					}
1577 					ASSERT(i <= old_nfds);
1578 				}
1579 				/*
1580 				 * In case a new cache list is allocated,
1581 				 * need to keep both cache lists in sync
1582 				 * b/c the new one can be freed if we have
1583 				 * an error later.
1584 				 */
1585 				cached[count].fd = -1;
1586 				if (newlist != NULL) {
1587 					newlist[count].fd = -1;
1588 				}
1589 			}
1590 			if ((tmpfd = current[count].fd) >= 0) {
1591 				/*
1592 				 * add to the cached fd tbl and bitmap.
1593 				 */
1594 				if ((fp = getf(tmpfd)) == NULL) {
1595 					current[count].revents = POLLNVAL;
1596 					if (newlist != NULL) {
1597 						newlist[count].fd = -1;
1598 					}
1599 					cached[count].fd = -1;
1600 					fdcnt++;
1601 				} else {
1602 					/*
1603 					 * Here we don't care about the
1604 					 * fdcnt. We will examine the bitmap
1605 					 * later and pick up the correct
1606 					 * fdcnt there. So we never bother
1607 					 * to check value of 'cnt'.
1608 					 */
1609 					error = pcache_insert(ps, fp,
1610 					    &current[count], &cnt,
1611 					    (ssize_t)count, which);
1612 					/*
1613 					 * if no error, we want to do releasef
1614 					 * after we updated cache poll list
1615 					 * entry so that close() won't race
1616 					 * us.
1617 					 */
1618 					if (error) {
1619 						/*
1620 						 * If we encountered an error,
1621 						 * we have invalidated an
1622 						 * entry in cached poll list
1623 						 * (in pcache_delete_fd() above)
1624 						 * but failed to add one here.
1625 						 * This is OK b/c what's in the
1626 						 * cached list is consistent
1627 						 * with content of cache.
1628 						 * It will not have any ill
1629 						 * effect on next poll().
1630 						 */
1631 						releasef(tmpfd);
1632 						if (newlist != NULL) {
1633 							kmem_free(newlist,
1634 							    nfds *
1635 							    sizeof (pollfd_t));
1636 						}
1637 						return (error);
1638 					}
1639 					/*
1640 					 * If we have allocated a new(temp)
1641 					 * cache list, we need to keep both
1642 					 * in sync b/c the new one can be freed
1643 					 * if we have an error later.
1644 					 */
1645 					if (newlist != NULL) {
1646 						newlist[count].fd =
1647 						    current[count].fd;
1648 						newlist[count].events =
1649 						    current[count].events;
1650 					}
1651 					cached[count].fd = current[count].fd;
1652 					cached[count].events =
1653 					    current[count].events;
1654 					releasef(tmpfd);
1655 				}
1656 			} else {
1657 				current[count].revents = 0;
1658 			}
1659 			count++;
1660 			remain = common - count;
1661 		}
1662 	}
1663 	if (mismatch != 0) {
1664 		if (mismatch == common) {
1665 			pollstats.pollcachemiss.value.ui64++;
1666 		} else {
1667 			pollstats.pollcachephit.value.ui64++;
1668 		}
1669 	}
1670 	/*
1671 	 * take care of the non overlapping part of a list
1672 	 */
1673 	if (nfds > old_nfds) {
1674 		ASSERT(newlist != NULL);
1675 		for (i = old_nfds; i < nfds; i++) {
1676 			/* filter out invalid events */
1677 			if (current[i].events & ~VALID_POLL_EVENTS) {
1678 				newlist[i].events = current[i].events =
1679 				current[i].events & VALID_POLL_EVENTS;
1680 			}
1681 			if ((fd = current[i].fd) < 0) {
1682 				current[i].revents = 0;
1683 				continue;
1684 			}
1685 			/*
1686 			 * add to the cached fd tbl and bitmap.
1687 			 */
1688 			if ((fp = getf(fd)) == NULL) {
1689 				current[i].revents = POLLNVAL;
1690 				newlist[i].fd = -1;
1691 				fdcnt++;
1692 				continue;
1693 			}
1694 			/*
1695 			 * Here we don't care about the
1696 			 * fdcnt. We will examine the bitmap
1697 			 * later and pick up the correct
1698 			 * fdcnt there. So we never bother to
1699 			 * check 'cnt'.
1700 			 */
1701 			error = pcache_insert(ps, fp, &current[i], &cnt,
1702 			    (ssize_t)i, which);
1703 			releasef(fd);
1704 			if (error) {
1705 				/*
1706 				 * Here we are half way through adding newly
1707 				 * polled fd. Undo enough to keep the cache
1708 				 * list consistent with the cache content.
1709 				 */
1710 				pcacheset_remove_list(ps, current, old_nfds,
1711 				    i, which, 0);
1712 				kmem_free(newlist, nfds * sizeof (pollfd_t));
1713 				return (error);
1714 			}
1715 		}
1716 	}
1717 	if (old_nfds > nfds) {
1718 		/*
1719 		 * remove the fd's which are no longer polled.
1720 		 */
1721 		pcacheset_remove_list(ps, pcsp->pcs_pollfd, nfds, old_nfds,
1722 		    which, 1);
1723 	}
1724 	/*
1725 	 * set difference resolved. update nfds and cachedlist
1726 	 * in pollstate struct.
1727 	 */
1728 	if (newlist != NULL) {
1729 		kmem_free(pcsp->pcs_pollfd, old_nfds * sizeof (pollfd_t));
1730 		/*
1731 		 * By now, the pollfd.revents field should
1732 		 * all be zeroed.
1733 		 */
1734 		pcsp->pcs_pollfd = newlist;
1735 		pcsp->pcs_nfds = nfds;
1736 	}
1737 	ASSERT(*fdcntp == 0);
1738 	*fdcntp = fdcnt;
1739 	/*
1740 	 * By now for every fd in pollfdp, one of the following should be
1741 	 * true. Otherwise we will miss a polled event.
1742 	 *
1743 	 * 1. the bit corresponding to the fd in bitmap is set. So VOP_POLL
1744 	 *    will be called on this fd in next poll.
1745 	 * 2. the fd is cached in the pcache (i.e. pd_php is set). So
1746 	 *    pollnotify will happen.
1747 	 */
1748 	ASSERT(pollchecksanity(ps, nfds));
1749 	/*
1750 	 * make sure cross reference between cached poll lists and cached
1751 	 * poll fds are correct.
1752 	 */
1753 	ASSERT(pollcheckxref(ps, which));
1754 	/*
1755 	 * ensure each polldat in pollcache reference a polled fd in
1756 	 * pollcacheset.
1757 	 */
1758 #ifdef DEBUG
1759 	checkpolldat(ps);
1760 #endif
1761 	return (0);
1762 }
1763 
1764 #ifdef DEBUG
1765 static int
1766 pollscanrevents(pollcache_t *pcp, pollfd_t *pollfdp, nfds_t nfds)
1767 {
1768 	int i;
1769 	int reventcnt = 0;
1770 
1771 	for (i = 0; i < nfds; i++) {
1772 		if (pollfdp[i].fd < 0) {
1773 			ASSERT(pollfdp[i].revents == 0);
1774 			continue;
1775 		}
1776 		if (pollfdp[i].revents) {
1777 			reventcnt++;
1778 		}
1779 		if (pollfdp[i].revents && (pollfdp[i].revents != POLLNVAL)) {
1780 			ASSERT(BT_TEST(pcp->pc_bitmap, pollfdp[i].fd));
1781 		}
1782 	}
1783 	return (reventcnt);
1784 }
1785 #endif	/* DEBUG */
1786 
1787 /*
1788  * read the bitmap and poll on fds corresponding to the '1' bits. The ps_lock
1789  * is held upon entry.
1790  */
1791 int
1792 pcache_poll(pollfd_t *pollfdp, pollstate_t *ps, nfds_t nfds, int *fdcntp,
1793     int which)
1794 {
1795 	int		i;
1796 	pollcache_t	*pcp;
1797 	int 		fd;
1798 	int 		begin, end, done;
1799 	pollhead_t	*php;
1800 	int		fdcnt;
1801 	int		error = 0;
1802 	file_t		*fp;
1803 	polldat_t	*pdp;
1804 	xref_t		*refp;
1805 	int		entry;
1806 
1807 	pcp = ps->ps_pcache;
1808 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1809 	ASSERT(MUTEX_HELD(&pcp->pc_lock));
1810 retry:
1811 	done = 0;
1812 	begin = 0;
1813 	fdcnt = 0;
1814 	end = pcp->pc_mapend;
1815 	while ((fdcnt < nfds) && !done) {
1816 		php = NULL;
1817 		/*
1818 		 * only poll fds which may have events
1819 		 */
1820 		fd = bt_getlowbit(pcp->pc_bitmap, begin, end);
1821 		ASSERT(fd <= end);
1822 		if (fd >= 0) {
1823 			ASSERT(pollcheckrevents(ps, begin, fd, which));
1824 			/*
1825 			 * adjust map pointers for next round
1826 			 */
1827 			if (fd == end) {
1828 				done = 1;
1829 			} else {
1830 				begin = fd + 1;
1831 			}
1832 			/*
1833 			 * A bitmap caches poll state information of
1834 			 * multiple poll lists. Call VOP_POLL only if
1835 			 * the bit corresponds to an fd in this poll
1836 			 * list.
1837 			 */
1838 			pdp = pcache_lookup_fd(pcp, fd);
1839 			ASSERT(pdp != NULL);
1840 			ASSERT(pdp->pd_ref != NULL);
1841 			refp = &pdp->pd_ref[which];
1842 			if (refp->xf_refcnt == 0)
1843 				continue;
1844 			entry = refp->xf_position;
1845 			ASSERT((entry >= 0) && (entry < nfds));
1846 			ASSERT(pollfdp[entry].fd == fd);
1847 			/*
1848 			 * we are in this routine implies that we have
1849 			 * successfully polled this fd in the past.
1850 			 * Check to see this fd is closed while we are
1851 			 * blocked in poll. This ensures that we don't
1852 			 * miss a close on the fd in the case this fd is
1853 			 * reused.
1854 			 */
1855 			if (pdp->pd_fp == NULL) {
1856 				ASSERT(pdp->pd_count > 0);
1857 				pollfdp[entry].revents = POLLNVAL;
1858 				fdcnt++;
1859 				if (refp->xf_refcnt > 1) {
1860 					/*
1861 					 * this fd appeared multiple time
1862 					 * in the poll list. Find all of them.
1863 					 */
1864 					for (i = entry + 1; i < nfds; i++) {
1865 						if (pollfdp[i].fd == fd) {
1866 							pollfdp[i].revents =
1867 							    POLLNVAL;
1868 							fdcnt++;
1869 						}
1870 					}
1871 				}
1872 				pcacheset_invalidate(ps, pdp);
1873 				continue;
1874 			}
1875 			/*
1876 			 * We can be here polling a device that is being
1877 			 * closed (i.e. the file pointer is set to NULL,
1878 			 * but pollcacheclean has not happened yet).
1879 			 */
1880 			if ((fp = getf(fd)) == NULL) {
1881 				pollfdp[entry].revents = POLLNVAL;
1882 				fdcnt++;
1883 				if (refp->xf_refcnt > 1) {
1884 					/*
1885 					 * this fd appeared multiple time
1886 					 * in the poll list. Find all of them.
1887 					 */
1888 					for (i = entry + 1; i < nfds; i++) {
1889 						if (pollfdp[i].fd == fd) {
1890 							pollfdp[i].revents =
1891 							    POLLNVAL;
1892 							fdcnt++;
1893 						}
1894 					}
1895 				}
1896 				continue;
1897 			}
1898 			ASSERT(pdp->pd_fp == fp);
1899 			ASSERT(infpollinfo(fd));
1900 			/*
1901 			 * Since we no longer hold poll head lock across
1902 			 * VOP_POLL, pollunlock logic can be simplifed.
1903 			 */
1904 			ASSERT(pdp->pd_php == NULL ||
1905 			    MUTEX_NOT_HELD(PHLOCK(pdp->pd_php)));
1906 			/*
1907 			 * underlying file systems may set a "pollpending"
1908 			 * flag when it sees the poll may block. Pollwakeup()
1909 			 * is called by wakeup thread if pollpending is set.
1910 			 * Pass a 0 fdcnt so that the underlying file system
1911 			 * will set the "pollpending" flag set when there is
1912 			 * no polled events.
1913 			 *
1914 			 * Use pollfdp[].events for actual polling because
1915 			 * the pd_events is union of all cached poll events
1916 			 * on this fd. The events parameter also affects
1917 			 * how the polled device sets the "poll pending"
1918 			 * flag.
1919 			 */
1920 			ASSERT(curthread->t_pollcache == NULL);
1921 			error = VOP_POLL(fp->f_vnode, pollfdp[entry].events, 0,
1922 			    &pollfdp[entry].revents, &php);
1923 			/*
1924 			 * releasef after completely done with this cached
1925 			 * poll entry. To prevent close() coming in to clear
1926 			 * this entry.
1927 			 */
1928 			if (error) {
1929 				releasef(fd);
1930 				break;
1931 			}
1932 			/*
1933 			 * layered devices (e.g. console driver)
1934 			 * may change the vnode and thus the pollhead
1935 			 * pointer out from underneath us.
1936 			 */
1937 			if (php != NULL && pdp->pd_php != NULL &&
1938 			    php != pdp->pd_php) {
1939 				releasef(fd);
1940 				pollhead_delete(pdp->pd_php, pdp);
1941 				pdp->pd_php = php;
1942 				pollhead_insert(php, pdp);
1943 				/*
1944 				 * We could have missed a wakeup on the new
1945 				 * target device. Make sure the new target
1946 				 * gets polled once.
1947 				 */
1948 				BT_SET(pcp->pc_bitmap, fd);
1949 				goto retry;
1950 			}
1951 
1952 			if (pollfdp[entry].revents) {
1953 				ASSERT(refp->xf_refcnt >= 1);
1954 				fdcnt++;
1955 				if (refp->xf_refcnt > 1) {
1956 					/*
1957 					 * this fd appeared multiple time
1958 					 * in the poll list. This is rare but
1959 					 * we have to look at all of them for
1960 					 * correctness.
1961 					 */
1962 					error = plist_chkdupfd(fp, pdp, ps,
1963 					    pollfdp, entry, &fdcnt);
1964 					if (error > 0) {
1965 						releasef(fd);
1966 						break;
1967 					}
1968 					if (error < 0) {
1969 						goto retry;
1970 					}
1971 				}
1972 				releasef(fd);
1973 			} else {
1974 				/*
1975 				 * VOP_POLL didn't return any revents. We can
1976 				 * clear the bit in bitmap only if we have the
1977 				 * pollhead ptr cached and no other cached
1978 				 * entry is polling different events on this fd.
1979 				 * VOP_POLL may have dropped the ps_lock. Make
1980 				 * sure pollwakeup has not happened before clear
1981 				 * the bit.
1982 				 */
1983 				if ((pdp->pd_php != NULL) &&
1984 				    (pollfdp[entry].events == pdp->pd_events) &&
1985 				    ((pcp->pc_flag & T_POLLWAKE) == 0)) {
1986 					BT_CLEAR(pcp->pc_bitmap, fd);
1987 				}
1988 				/*
1989 				 * if the fd can be cached now but not before,
1990 				 * do it now.
1991 				 */
1992 				if ((pdp->pd_php == NULL) && (php != NULL)) {
1993 					pdp->pd_php = php;
1994 					pollhead_insert(php, pdp);
1995 					/*
1996 					 * We are inserting a polldat struct for
1997 					 * the first time. We may have missed a
1998 					 * wakeup on this device. Re-poll once.
1999 					 * This should be a rare event.
2000 					 */
2001 					releasef(fd);
2002 					goto retry;
2003 				}
2004 				if (refp->xf_refcnt > 1) {
2005 					/*
2006 					 * this fd appeared multiple time
2007 					 * in the poll list. This is rare but
2008 					 * we have to look at all of them for
2009 					 * correctness.
2010 					 */
2011 					error = plist_chkdupfd(fp, pdp, ps,
2012 					    pollfdp, entry, &fdcnt);
2013 					if (error > 0) {
2014 						releasef(fd);
2015 						break;
2016 					}
2017 					if (error < 0) {
2018 						goto retry;
2019 					}
2020 				}
2021 				releasef(fd);
2022 			}
2023 		} else {
2024 			done = 1;
2025 			ASSERT(pollcheckrevents(ps, begin, end + 1, which));
2026 		}
2027 	}
2028 	if (!error) {
2029 		ASSERT(*fdcntp + fdcnt == pollscanrevents(pcp, pollfdp, nfds));
2030 		*fdcntp += fdcnt;
2031 	}
2032 	return (error);
2033 }
2034 
2035 /*
2036  * Going through the poll list without much locking. Poll all fds and
2037  * cache all valid fds in the pollcache.
2038  */
2039 int
2040 pcacheset_cache_list(pollstate_t *ps, pollfd_t *fds, int *fdcntp, int which)
2041 {
2042 	pollfd_t	*pollfdp = ps->ps_pollfd;
2043 	pollcacheset_t	*pcacheset = ps->ps_pcacheset;
2044 	pollfd_t	*newfdlist;
2045 	int		i;
2046 	int		fd;
2047 	file_t		*fp;
2048 	int		error = 0;
2049 
2050 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2051 	ASSERT(which < ps->ps_nsets);
2052 	ASSERT(pcacheset != NULL);
2053 	ASSERT(pcacheset[which].pcs_pollfd == NULL);
2054 	newfdlist  = kmem_alloc(ps->ps_nfds * sizeof (pollfd_t), KM_SLEEP);
2055 	/*
2056 	 * cache the new poll list in pollcachset.
2057 	 */
2058 	bcopy(pollfdp, newfdlist, sizeof (pollfd_t) * ps->ps_nfds);
2059 
2060 	pcacheset[which].pcs_pollfd = newfdlist;
2061 	pcacheset[which].pcs_nfds = ps->ps_nfds;
2062 	pcacheset[which].pcs_usradr = (uintptr_t)fds;
2063 
2064 	/*
2065 	 * We have saved a copy of current poll fd list in one pollcacheset.
2066 	 * The 'revents' field of the new list is not yet set to 0. Loop
2067 	 * through the new list just to do that is expensive. We do that
2068 	 * while polling the list.
2069 	 */
2070 	for (i = 0; i < ps->ps_nfds; i++) {
2071 		fd = pollfdp[i].fd;
2072 		/*
2073 		 * We also filter out the illegal poll events in the event
2074 		 * field for the cached poll list/set.
2075 		 */
2076 		if (pollfdp[i].events & ~VALID_POLL_EVENTS) {
2077 			newfdlist[i].events = pollfdp[i].events =
2078 			pollfdp[i].events & VALID_POLL_EVENTS;
2079 		}
2080 		if (fd < 0) {
2081 			pollfdp[i].revents = 0;
2082 			continue;
2083 		}
2084 		if ((fp = getf(fd)) == NULL) {
2085 			pollfdp[i].revents = POLLNVAL;
2086 			/*
2087 			 * invalidate this cache entry in the cached poll list
2088 			 */
2089 			newfdlist[i].fd = -1;
2090 			(*fdcntp)++;
2091 			continue;
2092 		}
2093 		/*
2094 		 * cache this fd.
2095 		 */
2096 		error = pcache_insert(ps, fp, &pollfdp[i], fdcntp, (ssize_t)i,
2097 		    which);
2098 		releasef(fd);
2099 		if (error) {
2100 			/*
2101 			 * Here we are half way through caching a new
2102 			 * poll list. Undo every thing.
2103 			 */
2104 			pcacheset_remove_list(ps, pollfdp, 0, i, which, 0);
2105 			kmem_free(newfdlist, ps->ps_nfds * sizeof (pollfd_t));
2106 			pcacheset[which].pcs_pollfd = NULL;
2107 			pcacheset[which].pcs_usradr = NULL;
2108 			break;
2109 		}
2110 	}
2111 	return (error);
2112 }
2113 
2114 /*
2115  * called by pollcacheclean() to set the fp NULL. It also sets polled events
2116  * in pcacheset entries to a special events 'POLLCLOSED'. Do a pollwakeup to
2117  * wake any sleeping poller, then remove the polldat from the driver.
2118  * The routine is called with ps_pcachelock held.
2119  */
2120 void
2121 pcache_clean_entry(pollstate_t *ps, int fd)
2122 {
2123 	pollcache_t	*pcp;
2124 	polldat_t	*pdp;
2125 	int		i;
2126 
2127 	ASSERT(ps != NULL);
2128 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2129 	pcp = ps->ps_pcache;
2130 	ASSERT(pcp);
2131 	pdp = pcache_lookup_fd(pcp, fd);
2132 	ASSERT(pdp != NULL);
2133 	/*
2134 	 * the corresponding fpollinfo in fi_list has been removed by
2135 	 * a close on this fd. Reset the cached fp ptr here.
2136 	 */
2137 	pdp->pd_fp = NULL;
2138 	/*
2139 	 * XXX - This routine also touches data in pcacheset struct.
2140 	 *
2141 	 * set the event in cached poll lists to POLLCLOSED. This invalidate
2142 	 * the cached poll fd entry in that poll list, which will force a
2143 	 * removal of this cached entry in next poll(). The cleanup is done
2144 	 * at the removal time.
2145 	 */
2146 	ASSERT(pdp->pd_ref != NULL);
2147 	for (i = 0; i < ps->ps_nsets; i++) {
2148 		xref_t		*refp;
2149 		pollcacheset_t	*pcsp;
2150 
2151 		refp = &pdp->pd_ref[i];
2152 		if (refp->xf_refcnt) {
2153 			ASSERT(refp->xf_position >= 0);
2154 			pcsp = &ps->ps_pcacheset[i];
2155 			if (refp->xf_refcnt == 1) {
2156 				pcsp->pcs_pollfd[refp->xf_position].events =
2157 				    (short)POLLCLOSED;
2158 			}
2159 			if (refp->xf_refcnt > 1) {
2160 				int	j;
2161 				/*
2162 				 * mark every matching entry in pcs_pollfd
2163 				 */
2164 				for (j = refp->xf_position;
2165 				    j < pcsp->pcs_nfds; j++) {
2166 					if (pcsp->pcs_pollfd[j].fd == fd) {
2167 						pcsp->pcs_pollfd[j].events =
2168 						    (short)POLLCLOSED;
2169 					}
2170 				}
2171 			}
2172 		}
2173 	}
2174 	if (pdp->pd_php) {
2175 		pollwakeup(pdp->pd_php, POLLHUP);
2176 		pollhead_delete(pdp->pd_php, pdp);
2177 		pdp->pd_php = NULL;
2178 	}
2179 }
2180 
2181 /*
2182  * This is the first time this thread has ever polled,
2183  * so we have to create its pollstate structure.
2184  * This will persist for the life of the thread,
2185  * until it calls pollcleanup().
2186  */
2187 pollstate_t *
2188 pollstate_create(void)
2189 {
2190 	pollstate_t *ps;
2191 
2192 	ps = kmem_zalloc(sizeof (pollstate_t), KM_SLEEP);
2193 	ps->ps_nsets = POLLFDSETS;
2194 	ps->ps_pcacheset = pcacheset_create(ps->ps_nsets);
2195 	return (ps);
2196 }
2197 
2198 void
2199 pollstate_destroy(pollstate_t *ps)
2200 {
2201 	if (ps->ps_pollfd != NULL) {
2202 		kmem_free(ps->ps_pollfd, ps->ps_nfds * sizeof (pollfd_t));
2203 		ps->ps_pollfd = NULL;
2204 	}
2205 	if (ps->ps_pcache != NULL) {
2206 		pcache_destroy(ps->ps_pcache);
2207 		ps->ps_pcache = NULL;
2208 	}
2209 	pcacheset_destroy(ps->ps_pcacheset, ps->ps_nsets);
2210 	ps->ps_pcacheset = NULL;
2211 	if (ps->ps_dpbuf != NULL) {
2212 		kmem_free(ps->ps_dpbuf, ps->ps_dpbufsize * sizeof (pollfd_t));
2213 		ps->ps_dpbuf = NULL;
2214 	}
2215 	mutex_destroy(&ps->ps_lock);
2216 	kmem_free(ps, sizeof (pollstate_t));
2217 }
2218 
2219 /*
2220  * We are holding the appropriate uf_lock entering this routine.
2221  * Bump up the ps_busy count to prevent the thread from exiting.
2222  */
2223 void
2224 pollblockexit(fpollinfo_t *fpip)
2225 {
2226 	for (; fpip; fpip = fpip->fp_next) {
2227 		pollcache_t *pcp = fpip->fp_thread->t_pollstate->ps_pcache;
2228 
2229 		mutex_enter(&pcp->pc_no_exit);
2230 		pcp->pc_busy++;  /* prevents exit()'s */
2231 		mutex_exit(&pcp->pc_no_exit);
2232 	}
2233 }
2234 
2235 /*
2236  * Complete phase 2 of cached poll fd cleanup. Call pcache_clean_entry to mark
2237  * the pcacheset events field POLLCLOSED to force the next poll() to remove
2238  * this cache entry. We can't clean the polldat entry clean up here because
2239  * lwp block in poll() needs the info to return. Wakeup anyone blocked in
2240  * poll and let exiting lwp go. No lock is help upon entry. So it's OK for
2241  * pcache_clean_entry to call pollwakeup().
2242  */
2243 void
2244 pollcacheclean(fpollinfo_t *fip, int fd)
2245 {
2246 	struct fpollinfo	*fpip, *fpip2;
2247 
2248 	fpip = fip;
2249 	while (fpip) {
2250 		pollstate_t *ps = fpip->fp_thread->t_pollstate;
2251 		pollcache_t *pcp = ps->ps_pcache;
2252 
2253 		mutex_enter(&ps->ps_lock);
2254 		pcache_clean_entry(ps, fd);
2255 		mutex_exit(&ps->ps_lock);
2256 		mutex_enter(&pcp->pc_no_exit);
2257 		pcp->pc_busy--;
2258 		if (pcp->pc_busy == 0) {
2259 			/*
2260 			 * Wakeup the thread waiting in
2261 			 * thread_exit().
2262 			 */
2263 			cv_signal(&pcp->pc_busy_cv);
2264 		}
2265 		mutex_exit(&pcp->pc_no_exit);
2266 
2267 		fpip2 = fpip;
2268 		fpip = fpip->fp_next;
2269 		kmem_free(fpip2, sizeof (fpollinfo_t));
2270 	}
2271 }
2272 
2273 /*
2274  * one of the cache line's counter is wrapping around. Reset all cache line
2275  * counters to zero except one. This is simplistic, but probably works
2276  * effectively.
2277  */
2278 void
2279 pcacheset_reset_count(pollstate_t *ps, int index)
2280 {
2281 	int	i;
2282 
2283 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2284 	for (i = 0; i < ps->ps_nsets; i++) {
2285 		if (ps->ps_pcacheset[i].pcs_pollfd != NULL) {
2286 			ps->ps_pcacheset[i].pcs_count = 0;
2287 		}
2288 	}
2289 	ps->ps_pcacheset[index].pcs_count = 1;
2290 }
2291 
2292 /*
2293  * this routine implements poll cache list replacement policy.
2294  * It is currently choose the "least used".
2295  */
2296 int
2297 pcacheset_replace(pollstate_t *ps)
2298 {
2299 	int i;
2300 	int index = 0;
2301 
2302 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2303 	for (i = 1; i < ps->ps_nsets; i++) {
2304 		if (ps->ps_pcacheset[index].pcs_count >
2305 		    ps->ps_pcacheset[i].pcs_count) {
2306 			index = i;
2307 		}
2308 	}
2309 	ps->ps_pcacheset[index].pcs_count = 0;
2310 	return (index);
2311 }
2312 
2313 /*
2314  * this routine is called by strclose to remove remaining polldat struct on
2315  * the pollhead list of the device being closed. There are two reasons as why
2316  * the polldat structures still remain on the pollhead list:
2317  *
2318  * (1) The layered device(e.g.the console driver).
2319  * In this case, the existence of a polldat implies that the thread putting
2320  * the polldat on this list has not exited yet. Before the thread exits, it
2321  * will have to hold this pollhead lock to remove the polldat. So holding the
2322  * pollhead lock here effectively prevents the thread which put the polldat
2323  * on this list from exiting.
2324  *
2325  * (2) /dev/poll.
2326  * When a polled fd is cached in /dev/poll, its polldat will remain on the
2327  * pollhead list if the process has not done a POLLREMOVE before closing the
2328  * polled fd. We just unlink it here.
2329  */
2330 void
2331 pollhead_clean(pollhead_t *php)
2332 {
2333 	polldat_t	*pdp;
2334 
2335 	/*
2336 	 * In case(1), while we must prevent the thread in question from
2337 	 * exiting, we must also obey the proper locking order, i.e.
2338 	 * (ps_lock -> phlock).
2339 	 */
2340 	PH_ENTER(php);
2341 	while (php->ph_list != NULL) {
2342 		pollstate_t	*ps;
2343 		pollcache_t	*pcp;
2344 
2345 		pdp = php->ph_list;
2346 		ASSERT(pdp->pd_php == php);
2347 		if (pdp->pd_thread == NULL) {
2348 			/*
2349 			 * This is case(2). Since the ph_lock is sufficient
2350 			 * to synchronize this lwp with any other /dev/poll
2351 			 * lwp, just unlink the polldat.
2352 			 */
2353 			php->ph_list = pdp->pd_next;
2354 			pdp->pd_php = NULL;
2355 			pdp->pd_next = NULL;
2356 			continue;
2357 		}
2358 		ps = pdp->pd_thread->t_pollstate;
2359 		ASSERT(ps != NULL);
2360 		pcp = pdp->pd_pcache;
2361 		ASSERT(pcp != NULL);
2362 		mutex_enter(&pcp->pc_no_exit);
2363 		pcp->pc_busy++;  /* prevents exit()'s */
2364 		mutex_exit(&pcp->pc_no_exit);
2365 		/*
2366 		 * Now get the locks in proper order to avoid deadlock.
2367 		 */
2368 		PH_EXIT(php);
2369 		mutex_enter(&ps->ps_lock);
2370 		/*
2371 		 * while we dropped the pollhead lock, the element could be
2372 		 * taken off the list already.
2373 		 */
2374 		PH_ENTER(php);
2375 		if (pdp->pd_php == php) {
2376 			ASSERT(pdp == php->ph_list);
2377 			php->ph_list = pdp->pd_next;
2378 			pdp->pd_php = NULL;
2379 			pdp->pd_next = NULL;
2380 		}
2381 		PH_EXIT(php);
2382 		mutex_exit(&ps->ps_lock);
2383 		mutex_enter(&pcp->pc_no_exit);
2384 		pcp->pc_busy--;
2385 		if (pcp->pc_busy == 0) {
2386 			/*
2387 			 * Wakeup the thread waiting in
2388 			 * thread_exit().
2389 			 */
2390 			cv_signal(&pcp->pc_busy_cv);
2391 		}
2392 		mutex_exit(&pcp->pc_no_exit);
2393 		PH_ENTER(php);
2394 	}
2395 	PH_EXIT(php);
2396 }
2397 
2398 /*
2399  * The remove_list is called to cleanup a partially cached 'current' list or
2400  * to remove a partial list which is no longer cached. The flag value of 1
2401  * indicates the second case.
2402  */
2403 void
2404 pcacheset_remove_list(pollstate_t *ps, pollfd_t *pollfdp, int start, int end,
2405     int cacheindex, int flag)
2406 {
2407 	int i;
2408 
2409 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2410 	for (i = start; i < end; i++) {
2411 		if ((pollfdp[i].fd >= 0) &&
2412 		    (flag || !(pollfdp[i].revents & POLLNVAL))) {
2413 			if (pcache_delete_fd(ps, pollfdp[i].fd, i, cacheindex,
2414 			    (uint_t)pollfdp[i].events)) {
2415 				int j;
2416 				int fd = pollfdp[i].fd;
2417 
2418 				for (j = i + 1; j < end; j++) {
2419 					if (pollfdp[j].fd == fd) {
2420 						pcache_update_xref(
2421 						    ps->ps_pcache, fd,
2422 						    (ssize_t)j, cacheindex);
2423 						break;
2424 					}
2425 				}
2426 				ASSERT(j <= end);
2427 			}
2428 		}
2429 	}
2430 }
2431 
2432 #ifdef DEBUG
2433 
2434 #include<sys/strsubr.h>
2435 /*
2436  * make sure curthread is not on anyone's pollhead list any more.
2437  */
2438 static void
2439 pollcheckphlist()
2440 {
2441 	int i;
2442 	file_t *fp;
2443 	uf_entry_t *ufp;
2444 	uf_info_t *fip = P_FINFO(curproc);
2445 	struct stdata *stp;
2446 	polldat_t *pdp;
2447 
2448 	mutex_enter(&fip->fi_lock);
2449 	for (i = 0; i < fip->fi_nfiles; i++) {
2450 		UF_ENTER(ufp, fip, i);
2451 		if ((fp = ufp->uf_file) != NULL) {
2452 			if ((stp = fp->f_vnode->v_stream) != NULL) {
2453 				PH_ENTER(&stp->sd_pollist);
2454 				pdp = stp->sd_pollist.ph_list;
2455 				while (pdp) {
2456 					ASSERT(pdp->pd_thread != curthread);
2457 					pdp = pdp->pd_next;
2458 				}
2459 				PH_EXIT(&stp->sd_pollist);
2460 			}
2461 		}
2462 		UF_EXIT(ufp);
2463 	}
2464 	mutex_exit(&fip->fi_lock);
2465 }
2466 
2467 /*
2468  * for resolved set poll list, the xref info in the pcache should be
2469  * consistent with this poll list.
2470  */
2471 static int
2472 pollcheckxref(pollstate_t *ps, int cacheindex)
2473 {
2474 	pollfd_t *pollfdp = ps->ps_pcacheset[cacheindex].pcs_pollfd;
2475 	pollcache_t *pcp = ps->ps_pcache;
2476 	polldat_t *pdp;
2477 	int	i;
2478 	xref_t	*refp;
2479 
2480 	for (i = 0; i < ps->ps_pcacheset[cacheindex].pcs_nfds; i++) {
2481 		if (pollfdp[i].fd < 0) {
2482 			continue;
2483 		}
2484 		pdp = pcache_lookup_fd(pcp, pollfdp[i].fd);
2485 		ASSERT(pdp != NULL);
2486 		ASSERT(pdp->pd_ref != NULL);
2487 		refp = &pdp->pd_ref[cacheindex];
2488 		if (refp->xf_position >= 0) {
2489 			ASSERT(refp->xf_refcnt >= 1);
2490 			ASSERT(pollfdp[refp->xf_position].fd == pdp->pd_fd);
2491 			if (refp->xf_refcnt > 1) {
2492 				int	j;
2493 				int	count = 0;
2494 
2495 				for (j = refp->xf_position;
2496 				    j < ps->ps_pcacheset[cacheindex].pcs_nfds;
2497 				    j++) {
2498 					if (pollfdp[j].fd == pdp->pd_fd) {
2499 						count++;
2500 					}
2501 				}
2502 				ASSERT(count == refp->xf_refcnt);
2503 			}
2504 		}
2505 	}
2506 	return (1);
2507 }
2508 
2509 /*
2510  * For every cached pollfd, its polldat struct should be consistent with
2511  * what is in the pcacheset lists.
2512  */
2513 static void
2514 checkpolldat(pollstate_t *ps)
2515 {
2516 	pollcache_t	*pcp = ps->ps_pcache;
2517 	polldat_t	**hashtbl;
2518 	int		i;
2519 
2520 	hashtbl = pcp->pc_hash;
2521 	for (i = 0; i < pcp->pc_hashsize; i++) {
2522 		polldat_t	*pdp;
2523 
2524 		for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) {
2525 			ASSERT(pdp->pd_ref != NULL);
2526 			if (pdp->pd_count > 0) {
2527 				xref_t		*refp;
2528 				int		j;
2529 				pollcacheset_t	*pcsp;
2530 				pollfd_t	*pollfd;
2531 
2532 				for (j = 0; j < ps->ps_nsets; j++) {
2533 					refp = &pdp->pd_ref[j];
2534 					if (refp->xf_refcnt > 0) {
2535 						pcsp = &ps->ps_pcacheset[j];
2536 				ASSERT(refp->xf_position < pcsp->pcs_nfds);
2537 						pollfd = pcsp->pcs_pollfd;
2538 			ASSERT(pdp->pd_fd == pollfd[refp->xf_position].fd);
2539 					}
2540 				}
2541 			}
2542 		}
2543 	}
2544 }
2545 
2546 /*
2547  * every wfd element on ph_list must have a corresponding fpollinfo on the
2548  * uf_fpollinfo list. This is a variation of infpollinfo() w/o holding locks.
2549  */
2550 void
2551 checkwfdlist(vnode_t *vp, fpollinfo_t *fpip)
2552 {
2553 	stdata_t *stp;
2554 	polldat_t *pdp;
2555 	fpollinfo_t *fpip2;
2556 
2557 	if ((stp = vp->v_stream) == NULL) {
2558 		return;
2559 	}
2560 	PH_ENTER(&stp->sd_pollist);
2561 	for (pdp = stp->sd_pollist.ph_list; pdp; pdp = pdp->pd_next) {
2562 		if (pdp->pd_thread->t_procp == curthread->t_procp) {
2563 			for (fpip2 = fpip; fpip2; fpip2 = fpip2->fp_next) {
2564 				if (pdp->pd_thread == fpip2->fp_thread) {
2565 					break;
2566 				}
2567 			}
2568 			ASSERT(fpip2 != NULL);
2569 		}
2570 	}
2571 	PH_EXIT(&stp->sd_pollist);
2572 }
2573 
2574 /*
2575  * For each cached fd whose bit is not set in bitmap, its revents field in
2576  * current poll list should be 0.
2577  */
2578 static int
2579 pollcheckrevents(pollstate_t *ps, int begin, int end, int cacheindex)
2580 {
2581 	pollcache_t	*pcp = ps->ps_pcache;
2582 	pollfd_t	*pollfdp = ps->ps_pollfd;
2583 	int		i;
2584 
2585 	for (i = begin; i < end; i++) {
2586 		polldat_t	*pdp;
2587 
2588 		ASSERT(!BT_TEST(pcp->pc_bitmap, i));
2589 		pdp = pcache_lookup_fd(pcp, i);
2590 		if (pdp && pdp->pd_fp != NULL) {
2591 			xref_t *refp;
2592 			int entry;
2593 
2594 			ASSERT(pdp->pd_ref != NULL);
2595 			refp = &pdp->pd_ref[cacheindex];
2596 			if (refp->xf_refcnt == 0) {
2597 				continue;
2598 			}
2599 			entry = refp->xf_position;
2600 			ASSERT(entry >= 0);
2601 			ASSERT(pollfdp[entry].revents == 0);
2602 			if (refp->xf_refcnt > 1) {
2603 				int j;
2604 
2605 				for (j = entry + 1; j < ps->ps_nfds; j++) {
2606 					if (pollfdp[j].fd == i) {
2607 						ASSERT(pollfdp[j].revents == 0);
2608 					}
2609 				}
2610 			}
2611 		}
2612 	}
2613 	return (1);
2614 }
2615 
2616 #endif	/* DEBUG */
2617 
2618 pollcache_t *
2619 pcache_alloc()
2620 {
2621 	return (kmem_zalloc(sizeof (pollcache_t), KM_SLEEP));
2622 }
2623 
2624 void
2625 pcache_create(pollcache_t *pcp, nfds_t nfds)
2626 {
2627 	size_t	mapsize;
2628 
2629 	/*
2630 	 * allocate enough bits for the poll fd list
2631 	 */
2632 	if ((mapsize = POLLMAPCHUNK) <= nfds) {
2633 		mapsize = (nfds + POLLMAPCHUNK - 1) & ~(POLLMAPCHUNK - 1);
2634 	}
2635 	pcp->pc_bitmap = kmem_zalloc((mapsize / BT_NBIPUL) * sizeof (ulong_t),
2636 	    KM_SLEEP);
2637 	pcp->pc_mapsize = mapsize;
2638 	/*
2639 	 * The hash size is at least POLLHASHCHUNKSZ. If user polls a large
2640 	 * number of fd to start with, allocate a bigger hash table (to the
2641 	 * nearest multiple of POLLHASHCHUNKSZ) because dynamically growing a
2642 	 * hash table is expensive.
2643 	 */
2644 	if (nfds < POLLHASHCHUNKSZ) {
2645 		pcp->pc_hashsize = POLLHASHCHUNKSZ;
2646 	} else {
2647 		pcp->pc_hashsize = (nfds + POLLHASHCHUNKSZ - 1) &
2648 		    ~(POLLHASHCHUNKSZ - 1);
2649 	}
2650 	pcp->pc_hash = kmem_zalloc(pcp->pc_hashsize * sizeof (polldat_t *),
2651 	    KM_SLEEP);
2652 }
2653 
2654 void
2655 pcache_destroy(pollcache_t *pcp)
2656 {
2657 	polldat_t	**hashtbl;
2658 	int i;
2659 
2660 	hashtbl = pcp->pc_hash;
2661 	for (i = 0; i < pcp->pc_hashsize; i++) {
2662 		if (hashtbl[i] != NULL) {
2663 			polldat_t *pdp, *pdp2;
2664 
2665 			pdp = hashtbl[i];
2666 			while (pdp != NULL) {
2667 				pdp2 = pdp->pd_hashnext;
2668 				if (pdp->pd_ref != NULL) {
2669 					kmem_free(pdp->pd_ref, sizeof (xref_t) *
2670 					    pdp->pd_nsets);
2671 				}
2672 				kmem_free(pdp, sizeof (polldat_t));
2673 				pdp = pdp2;
2674 				pcp->pc_fdcount--;
2675 			}
2676 		}
2677 	}
2678 	ASSERT(pcp->pc_fdcount == 0);
2679 	kmem_free(pcp->pc_hash, sizeof (polldat_t *) * pcp->pc_hashsize);
2680 	kmem_free(pcp->pc_bitmap,
2681 	    sizeof (ulong_t) * (pcp->pc_mapsize/BT_NBIPUL));
2682 	mutex_destroy(&pcp->pc_no_exit);
2683 	mutex_destroy(&pcp->pc_lock);
2684 	cv_destroy(&pcp->pc_cv);
2685 	cv_destroy(&pcp->pc_busy_cv);
2686 	kmem_free(pcp, sizeof (pollcache_t));
2687 }
2688 
2689 pollcacheset_t *
2690 pcacheset_create(int nsets)
2691 {
2692 	return (kmem_zalloc(sizeof (pollcacheset_t) * nsets, KM_SLEEP));
2693 }
2694 
2695 void
2696 pcacheset_destroy(pollcacheset_t *pcsp, int nsets)
2697 {
2698 	int i;
2699 
2700 	for (i = 0; i < nsets; i++) {
2701 		if (pcsp[i].pcs_pollfd != NULL) {
2702 			kmem_free(pcsp[i].pcs_pollfd, pcsp[i].pcs_nfds *
2703 			    sizeof (pollfd_t));
2704 		}
2705 	}
2706 	kmem_free(pcsp, sizeof (pollcacheset_t) * nsets);
2707 }
2708 
2709 /*
2710  * Check each duplicated poll fd in the poll list. It may be necessary to
2711  * VOP_POLL the same fd again using different poll events. getf() has been
2712  * done by caller. This routine returns 0 if it can sucessfully process the
2713  * entire poll fd list. It returns -1 if underlying vnode has changed during
2714  * a VOP_POLL, in which case the caller has to repoll. It returns a positive
2715  * value if VOP_POLL failed.
2716  */
2717 static int
2718 plist_chkdupfd(file_t *fp, polldat_t *pdp, pollstate_t *psp, pollfd_t *pollfdp,
2719     int entry, int *fdcntp)
2720 {
2721 	int	i;
2722 	int	fd;
2723 	nfds_t	nfds = psp->ps_nfds;
2724 
2725 	fd = pollfdp[entry].fd;
2726 	for (i = entry + 1; i < nfds; i++) {
2727 		if (pollfdp[i].fd == fd) {
2728 			if (pollfdp[i].events == pollfdp[entry].events) {
2729 				if ((pollfdp[i].revents =
2730 				    pollfdp[entry].revents) != 0) {
2731 					(*fdcntp)++;
2732 				}
2733 			} else {
2734 
2735 				int	error;
2736 				pollhead_t *php;
2737 				pollcache_t *pcp = psp->ps_pcache;
2738 
2739 				/*
2740 				 * the events are different. VOP_POLL on this
2741 				 * fd so that we don't miss any revents.
2742 				 */
2743 				php = NULL;
2744 				ASSERT(curthread->t_pollcache == NULL);
2745 				error = VOP_POLL(fp->f_vnode,
2746 				    pollfdp[i].events, 0,
2747 				    &pollfdp[i].revents, &php);
2748 				if (error) {
2749 					return (error);
2750 				}
2751 				/*
2752 				 * layered devices(e.g. console driver)
2753 				 * may change the vnode and thus the pollhead
2754 				 * pointer out from underneath us.
2755 				 */
2756 				if (php != NULL && pdp->pd_php != NULL &&
2757 				    php != pdp->pd_php) {
2758 					pollhead_delete(pdp->pd_php, pdp);
2759 					pdp->pd_php = php;
2760 					pollhead_insert(php, pdp);
2761 					/*
2762 					 * We could have missed a wakeup on the
2763 					 * new target device. Make sure the new
2764 					 * target gets polled once.
2765 					 */
2766 					BT_SET(pcp->pc_bitmap, fd);
2767 					return (-1);
2768 				}
2769 				if (pollfdp[i].revents) {
2770 					(*fdcntp)++;
2771 				}
2772 			}
2773 		}
2774 	}
2775 	return (0);
2776 }
2777