xref: /illumos-gate/usr/src/uts/common/syscall/poll.c (revision 3893cb7fe5bfa1c9a4f7954517a917367f6cf081)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 /*
31  * Portions of this source code were derived from Berkeley 4.3 BSD
32  * under license from the Regents of the University of California.
33  */
34 
35 #pragma ident	"%Z%%M%	%I%	%E% SMI"
36 
37 #include <sys/param.h>
38 #include <sys/isa_defs.h>
39 #include <sys/types.h>
40 #include <sys/sysmacros.h>
41 #include <sys/user.h>
42 #include <sys/systm.h>
43 #include <sys/errno.h>
44 #include <sys/time.h>
45 #include <sys/vnode.h>
46 #include <sys/file.h>
47 #include <sys/mode.h>
48 #include <sys/proc.h>
49 #include <sys/uio.h>
50 #include <sys/poll_impl.h>
51 #include <sys/kmem.h>
52 #include <sys/cmn_err.h>
53 #include <sys/debug.h>
54 #include <sys/bitmap.h>
55 #include <sys/kstat.h>
56 #include <sys/rctl.h>
57 #include <sys/port_impl.h>
58 #include <sys/schedctl.h>
59 
60 #define	NPHLOCKS	64	/* Number of locks; must be power of 2 */
61 #define	PHLOCKADDR(php)	&plocks[(((uintptr_t)(php)) >> 8) & (NPHLOCKS - 1)]
62 #define	PHLOCK(php)	PHLOCKADDR(php).pp_lock
63 #define	PH_ENTER(php)	mutex_enter(PHLOCK(php))
64 #define	PH_EXIT(php)	mutex_exit(PHLOCK(php))
65 #define	VALID_POLL_EVENTS	(POLLIN | POLLPRI | POLLOUT | POLLRDNORM \
66 	| POLLRDBAND | POLLWRBAND | POLLHUP | POLLERR | POLLNVAL)
67 
68 /*
69  * global counters to collect some stats
70  */
71 static struct {
72 	kstat_named_t	polllistmiss;	/* failed to find a cached poll list */
73 	kstat_named_t	pollcachehit;	/* list matched 100% w/ cached one */
74 	kstat_named_t	pollcachephit;	/* list matched < 100% w/ cached one */
75 	kstat_named_t	pollcachemiss;	/* every list entry is dif from cache */
76 } pollstats = {
77 	{ "polllistmiss",	KSTAT_DATA_UINT64 },
78 	{ "pollcachehit",	KSTAT_DATA_UINT64 },
79 	{ "pollcachephit",	KSTAT_DATA_UINT64 },
80 	{ "pollcachemiss",	KSTAT_DATA_UINT64 }
81 };
82 
83 kstat_named_t *pollstats_ptr = (kstat_named_t *)&pollstats;
84 uint_t pollstats_ndata = sizeof (pollstats) / sizeof (kstat_named_t);
85 
86 struct pplock	{
87 	kmutex_t	pp_lock;
88 	short		pp_flag;
89 	kcondvar_t	pp_wait_cv;
90 	int32_t		pp_pad;		/* to a nice round 16 bytes */
91 };
92 
93 static struct pplock plocks[NPHLOCKS];	/* Hash array of pollhead locks */
94 
95 #ifdef DEBUG
96 static int pollchecksanity(pollstate_t *, nfds_t);
97 static int pollcheckxref(pollstate_t *, int);
98 static void pollcheckphlist(void);
99 static int pollcheckrevents(pollstate_t *, int, int, int);
100 static void checkpolldat(pollstate_t *);
101 #endif	/* DEBUG */
102 static int plist_chkdupfd(file_t *, polldat_t *, pollstate_t *, pollfd_t *, int,
103     int *);
104 
105 /*
106  * Data structure overview:
107  * The per-thread poll state consists of
108  *	one pollstate_t
109  *	one pollcache_t
110  *	one bitmap with one event bit per fd
111  *	a (two-dimensional) hashed array of polldat_t structures - one entry
112  *	per fd
113  *
114  * This conglomerate of data structures interact with
115  *	the pollhead which is used by VOP_POLL and pollwakeup
116  *	(protected by the PHLOCK, cached array of plocks), and
117  *	the fpollinfo list hanging off the fi_list which is used to notify
118  *	poll when a cached fd is closed. This is protected by uf_lock.
119  *
120  * Invariants:
121  *	pd_php (pollhead pointer) is set iff (if and only if) the polldat
122  *	is on that pollhead. This is modified atomically under pc_lock.
123  *
124  *	pd_fp (file_t pointer) is set iff the thread is on the fpollinfo
125  *	list for that open file.
126  *	This is modified atomically under pc_lock.
127  *
128  *	pd_count is the sum (over all values of i) of pd_ref[i].xf_refcnt.
129  *	Iff pd_ref[i].xf_refcnt >= 1 then
130  *		ps_pcacheset[i].pcs_pollfd[pd_ref[i].xf_position].fd == pd_fd
131  *	Iff pd_ref[i].xf_refcnt > 1 then
132  *		In ps_pcacheset[i].pcs_pollfd between index
133  *		pd_ref[i].xf_position] and the end of the list
134  *		there are xf_refcnt entries with .fd == pd_fd
135  *
136  * Locking design:
137  * Whenever possible the design relies on the fact that the poll cache state
138  * is per thread thus for both poll and exit it is self-synchronizing.
139  * Thus the key interactions where other threads access the state are:
140  *	pollwakeup (and polltime), and
141  *	close cleaning up the cached references to an open file
142  *
143  * The two key locks in poll proper is ps_lock and pc_lock.
144  *
145  * The ps_lock is used for synchronization between poll, (lwp_)exit and close
146  * to ensure that modifications to pollcacheset structure are serialized.
147  * This lock is held through most of poll() except where poll sleeps
148  * since there is little need to handle closes concurrently with the execution
149  * of poll.
150  * The pc_lock protects most of the fields in pollcache structure and polldat
151  * structures (which are accessed by poll, pollwakeup, and polltime)
152  * with the exception of fields that are only modified when only one thread
153  * can access this per-thread state.
154  * Those exceptions occur in poll when first allocating the per-thread state,
155  * when poll grows the number of polldat (never shrinks), and when
156  * exit/pollcleanup has ensured that there are no references from either
157  * pollheads or fpollinfo to the threads poll state.
158  *
159  * Poll(2) system call is the only path which ps_lock and pc_lock are both
160  * held, in that order. It needs ps_lock to synchronize with close and
161  * lwp_exit; and pc_lock with pollwakeup.
162  *
163  * The locking interaction between pc_lock and PHLOCK take into account
164  * that poll acquires these locks in the order of pc_lock and then PHLOCK
165  * while pollwakeup does it in the reverse order. Thus pollwakeup implements
166  * deadlock avoidance by dropping the locks and reacquiring them in the
167  * reverse order. For this to work pollwakeup needs to prevent the thread
168  * from exiting and freeing all of the poll related state. Thus is done
169  * using
170  *	the pc_no_exit lock
171  *	the pc_busy counter
172  *	the pc_busy_cv condition variable
173  *
174  * The locking interaction between pc_lock and uf_lock has similar
175  * issues. Poll holds ps_lock and/or pc_lock across calls to getf/releasef
176  * which acquire uf_lock. The poll cleanup in close needs to hold uf_lock
177  * to prevent poll or exit from doing a delfpollinfo after which the thread
178  * might exit. But the cleanup needs to acquire pc_lock when modifying
179  * the poll cache state. The solution is to use pc_busy and do the close
180  * cleanup in two phases:
181  *	First close calls pollblockexit which increments pc_busy.
182  *	This prevents the per-thread poll related state from being freed.
183  *	Then close drops uf_lock and calls pollcacheclean.
184  *	This routine can then acquire pc_lock and remove any references
185  *	to the closing fd (as well as recording that it has been closed
186  *	so that a POLLNVAL can be generated even if the fd is reused before
187  *	poll has been woken up and checked getf() again).
188  *
189  * When removing a polled fd from poll cache, the fd is always removed
190  * from pollhead list first and then from fpollinfo list, i.e.,
191  * pollhead_delete() is called before delfpollinfo().
192  *
193  *
194  * Locking hierarchy:
195  *	pc_no_exit is a leaf level lock.
196  *	ps_lock is held when acquiring pc_lock (except when pollwakeup
197  *	acquires pc_lock).
198  *	pc_lock might be held when acquiring PHLOCK (pollhead_insert/
199  *	pollhead_delete)
200  *	pc_lock is always held (but this is not required)
201  *	when acquiring PHLOCK (in polladd/pollhead_delete and pollwakeup called
202  *	from pcache_clean_entry).
203  *	pc_lock is held across addfpollinfo/delfpollinfo which acquire
204  *	uf_lock.
205  *	pc_lock is held across getf/releasef which acquire uf_lock.
206  *	ps_lock might be held across getf/releasef which acquire uf_lock.
207  *	pollwakeup tries to acquire pc_lock while holding PHLOCK
208  *	but drops the locks and reacquire them in reverse order to avoid
209  *	deadlock.
210  *
211  * Note also that there is deadlock avoidance support for VOP_POLL routines
212  * and pollwakeup involving a file system or driver lock.
213  * See below.
214  */
215 
216 /*
217  * Deadlock avoidance support for VOP_POLL() routines.  This is
218  * sometimes necessary to prevent deadlock between polling threads
219  * (which hold poll locks on entry to xx_poll(), then acquire foo)
220  * and pollwakeup() threads (which hold foo, then acquire poll locks).
221  *
222  * pollunlock(void) releases whatever poll locks the current thread holds,
223  *	returning a cookie for use by pollrelock();
224  *
225  * pollrelock(cookie) reacquires previously dropped poll locks;
226  *
227  * polllock(php, mutex) does the common case: pollunlock(),
228  *	acquire the problematic mutex, pollrelock().
229  */
230 int
231 pollunlock(void)
232 {
233 	pollcache_t *pcp;
234 	int lockstate = 0;
235 
236 	/*
237 	 * t_pollcache is set by /dev/poll and event ports (port_fd.c).
238 	 * If the pollrelock/pollunlock is called as a result of poll(2),
239 	 * the t_pollcache should be NULL.
240 	 */
241 	if (curthread->t_pollcache == NULL)
242 		pcp = curthread->t_pollstate->ps_pcache;
243 	else
244 		pcp = curthread->t_pollcache;
245 
246 	if (mutex_owned(&pcp->pc_lock)) {
247 		lockstate = 1;
248 		mutex_exit(&pcp->pc_lock);
249 	}
250 	return (lockstate);
251 }
252 
253 void
254 pollrelock(int lockstate)
255 {
256 	pollcache_t *pcp;
257 
258 	/*
259 	 * t_pollcache is set by /dev/poll and event ports (port_fd.c).
260 	 * If the pollrelock/pollunlock is called as a result of poll(2),
261 	 * the t_pollcache should be NULL.
262 	 */
263 	if (curthread->t_pollcache == NULL)
264 		pcp = curthread->t_pollstate->ps_pcache;
265 	else
266 		pcp = curthread->t_pollcache;
267 
268 	if (lockstate > 0)
269 		mutex_enter(&pcp->pc_lock);
270 }
271 
272 /* ARGSUSED */
273 void
274 polllock(pollhead_t *php, kmutex_t *lp)
275 {
276 	if (!mutex_tryenter(lp)) {
277 		int lockstate = pollunlock();
278 		mutex_enter(lp);
279 		pollrelock(lockstate);
280 	}
281 }
282 
283 static int
284 poll_common(pollfd_t *fds, nfds_t nfds, timespec_t *tsp, k_sigset_t *ksetp)
285 {
286 	kthread_t *t = curthread;
287 	klwp_t *lwp = ttolwp(t);
288 	proc_t *p = ttoproc(t);
289 	int fdcnt = 0;
290 	int rval;
291 	int i;
292 	timespec_t *rqtp = NULL;
293 	int timecheck = 0;
294 	int imm_timeout = 0;
295 	pollfd_t *pollfdp;
296 	pollstate_t *ps;
297 	pollcache_t *pcp;
298 	int error = 0;
299 	nfds_t old_nfds;
300 	int cacheindex = 0;	/* which cache set is used */
301 
302 	/*
303 	 * Determine the precise future time of the requested timeout, if any.
304 	 */
305 	if (tsp != NULL) {
306 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
307 			imm_timeout = 1;
308 		else {
309 			timespec_t now;
310 			timecheck = timechanged;
311 			gethrestime(&now);
312 			rqtp = tsp;
313 			timespecadd(rqtp, &now);
314 		}
315 	}
316 
317 	/*
318 	 * Reset our signal mask, if requested.
319 	 */
320 	if (ksetp != NULL) {
321 		mutex_enter(&p->p_lock);
322 		schedctl_finish_sigblock(t);
323 		lwp->lwp_sigoldmask = t->t_hold;
324 		t->t_hold = *ksetp;
325 		t->t_flag |= T_TOMASK;
326 		/*
327 		 * Call cv_timedwait_sig() just to check for signals.
328 		 * We will return immediately with either 0 or -1.
329 		 */
330 		if (!cv_timedwait_sig(&t->t_delay_cv, &p->p_lock, lbolt)) {
331 			mutex_exit(&p->p_lock);
332 			error = EINTR;
333 			goto pollout;
334 		}
335 		mutex_exit(&p->p_lock);
336 	}
337 
338 	/*
339 	 * Check to see if this guy just wants to use poll() as a timeout.
340 	 * If yes then bypass all the other stuff and make him sleep.
341 	 */
342 	if (nfds == 0) {
343 		/*
344 		 * Sleep until we have passed the requested future
345 		 * time or until interrupted by a signal.
346 		 * Do not check for signals if we have a zero timeout.
347 		 */
348 		if (!imm_timeout) {
349 			mutex_enter(&t->t_delay_lock);
350 			while ((rval = cv_waituntil_sig(&t->t_delay_cv,
351 			    &t->t_delay_lock, rqtp, timecheck)) > 0)
352 				continue;
353 			mutex_exit(&t->t_delay_lock);
354 			if (rval == 0)
355 				error = EINTR;
356 		}
357 		goto pollout;
358 	}
359 
360 	if (nfds > p->p_fno_ctl) {
361 		mutex_enter(&p->p_lock);
362 		(void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
363 		    p->p_rctls, p, RCA_SAFE);
364 		mutex_exit(&p->p_lock);
365 		error = EINVAL;
366 		goto pollout;
367 	}
368 
369 	/*
370 	 * Need to allocate memory for pollstate before anything because
371 	 * the mutex and cv are created in this space
372 	 */
373 	if ((ps = t->t_pollstate) == NULL) {
374 		t->t_pollstate = pollstate_create();
375 		ps = t->t_pollstate;
376 	}
377 
378 	if (ps->ps_pcache == NULL)
379 		ps->ps_pcache = pcache_alloc();
380 	pcp = ps->ps_pcache;
381 
382 	/*
383 	 * NOTE: for performance, buffers are saved across poll() calls.
384 	 * The theory is that if a process polls heavily, it tends to poll
385 	 * on the same set of descriptors.  Therefore, we only reallocate
386 	 * buffers when nfds changes.  There is no hysteresis control,
387 	 * because there is no data to suggest that this is necessary;
388 	 * the penalty of reallocating is not *that* great in any event.
389 	 */
390 	old_nfds = ps->ps_nfds;
391 	if (nfds != old_nfds) {
392 
393 		kmem_free(ps->ps_pollfd, old_nfds * sizeof (pollfd_t));
394 		pollfdp = kmem_alloc(nfds * sizeof (pollfd_t), KM_SLEEP);
395 		ps->ps_pollfd = pollfdp;
396 		ps->ps_nfds = nfds;
397 	}
398 
399 	pollfdp = ps->ps_pollfd;
400 	if (copyin(fds, pollfdp, nfds * sizeof (pollfd_t))) {
401 		error = EFAULT;
402 		goto pollout;
403 	}
404 
405 	if (fds == NULL) {
406 		/*
407 		 * If the process has page 0 mapped, then the copyin() above
408 		 * will succeed even if fds is NULL.  However, our cached
409 		 * poll lists are keyed by the address of the passed-in fds
410 		 * structure, and we use the value NULL to indicate an unused
411 		 * poll cache list entry.  As such, we elect not to support
412 		 * NULL as a valid (user) memory address and fail the poll()
413 		 * call.
414 		 */
415 		error = EINVAL;
416 		goto pollout;
417 	}
418 
419 	/*
420 	 * If this thread polls for the first time, allocate ALL poll
421 	 * cache data structures and cache the poll fd list. This
422 	 * allocation is delayed till now because lwp's polling 0 fd
423 	 * (i.e. using poll as timeout()) don't need this memory.
424 	 */
425 	mutex_enter(&ps->ps_lock);
426 	pcp = ps->ps_pcache;
427 	ASSERT(pcp != NULL);
428 	if (pcp->pc_bitmap == NULL) {
429 		pcache_create(pcp, nfds);
430 		/*
431 		 * poll and cache this poll fd list in ps_pcacheset[0].
432 		 */
433 		error = pcacheset_cache_list(ps, fds, &fdcnt, cacheindex);
434 		if (fdcnt || error) {
435 			mutex_exit(&ps->ps_lock);
436 			goto pollout;
437 		}
438 	} else {
439 		pollcacheset_t	*pcset = ps->ps_pcacheset;
440 
441 		/*
442 		 * Not first time polling. Select a cached poll list by
443 		 * matching user pollfd list buffer address.
444 		 */
445 		for (cacheindex = 0; cacheindex < ps->ps_nsets; cacheindex++) {
446 			if (pcset[cacheindex].pcs_usradr == (uintptr_t)fds) {
447 				if ((++pcset[cacheindex].pcs_count) == 0) {
448 					/*
449 					 * counter is wrapping around.
450 					 */
451 					pcacheset_reset_count(ps, cacheindex);
452 				}
453 				/*
454 				 * examine and resolve possible
455 				 * difference of the current poll
456 				 * list and previously cached one.
457 				 * If there is an error during resolve(),
458 				 * the callee will guarantee the consistency
459 				 * of cached poll list and cache content.
460 				 */
461 				error = pcacheset_resolve(ps, nfds, &fdcnt,
462 				    cacheindex);
463 				if (error) {
464 					mutex_exit(&ps->ps_lock);
465 					goto pollout;
466 				}
467 				break;
468 			}
469 
470 			/*
471 			 * Note that pcs_usradr field of an used entry won't be
472 			 * NULL because it stores the address of passed-in fds,
473 			 * and NULL fds will not be cached (Then it is either
474 			 * the special timeout case when nfds is 0 or it returns
475 			 * failure directly).
476 			 */
477 			if (pcset[cacheindex].pcs_usradr == NULL) {
478 				/*
479 				 * found an unused entry. Use it to cache
480 				 * this poll list.
481 				 */
482 				error = pcacheset_cache_list(ps, fds, &fdcnt,
483 				    cacheindex);
484 				if (fdcnt || error) {
485 					mutex_exit(&ps->ps_lock);
486 					goto pollout;
487 				}
488 				break;
489 			}
490 		}
491 		if (cacheindex == ps->ps_nsets) {
492 			/*
493 			 * We failed to find a matching cached poll fd list.
494 			 * replace an old list.
495 			 */
496 			pollstats.polllistmiss.value.ui64++;
497 			cacheindex = pcacheset_replace(ps);
498 			ASSERT(cacheindex < ps->ps_nsets);
499 			pcset[cacheindex].pcs_usradr = (uintptr_t)fds;
500 			error = pcacheset_resolve(ps, nfds, &fdcnt, cacheindex);
501 			if (error) {
502 				mutex_exit(&ps->ps_lock);
503 				goto pollout;
504 			}
505 		}
506 	}
507 
508 	/*
509 	 * Always scan the bitmap with the lock on the pollcache held.
510 	 * This is to make sure that a wakeup does not come undetected.
511 	 * If the lock is not held, a pollwakeup could have come for an
512 	 * fd we already checked but before this thread sleeps, in which
513 	 * case the wakeup is missed. Now we hold the pcache lock and
514 	 * check the bitmap again. This will prevent wakeup from happening
515 	 * while we hold pcache lock since pollwakeup() will also lock
516 	 * the pcache before updating poll bitmap.
517 	 */
518 	mutex_enter(&pcp->pc_lock);
519 	for (;;) {
520 		pcp->pc_flag = 0;
521 		error = pcache_poll(pollfdp, ps, nfds, &fdcnt, cacheindex);
522 		if (fdcnt || error) {
523 			mutex_exit(&pcp->pc_lock);
524 			mutex_exit(&ps->ps_lock);
525 			break;
526 		}
527 
528 		/*
529 		 * If T_POLLWAKE is set, a pollwakeup() was performed on
530 		 * one of the file descriptors.  This can happen only if
531 		 * one of the VOP_POLL() functions dropped pcp->pc_lock.
532 		 * The only current cases of this is in procfs (prpoll())
533 		 * and STREAMS (strpoll()).
534 		 */
535 		if (pcp->pc_flag & T_POLLWAKE)
536 			continue;
537 
538 		/*
539 		 * If you get here, the poll of fds was unsuccessful.
540 		 * Wait until some fd becomes readable, writable, or gets
541 		 * an exception, or until a signal or a timeout occurs.
542 		 * Do not check for signals if we have a zero timeout.
543 		 */
544 		mutex_exit(&ps->ps_lock);
545 		if (imm_timeout)
546 			rval = -1;
547 		else
548 			rval = cv_waituntil_sig(&pcp->pc_cv, &pcp->pc_lock,
549 				rqtp, timecheck);
550 		mutex_exit(&pcp->pc_lock);
551 		/*
552 		 * If we have received a signal or timed out
553 		 * then break out and return.
554 		 */
555 		if (rval <= 0) {
556 			if (rval == 0)
557 				error = EINTR;
558 			break;
559 		}
560 		/*
561 		 * We have not received a signal or timed out.
562 		 * Continue around and poll fds again.
563 		 */
564 		mutex_enter(&ps->ps_lock);
565 		mutex_enter(&pcp->pc_lock);
566 	}
567 
568 pollout:
569 	/*
570 	 * If we changed the signal mask but we received
571 	 * no signal then restore the signal mask.
572 	 * Otherwise psig() will deal with the signal mask.
573 	 */
574 	if (ksetp != NULL) {
575 		mutex_enter(&p->p_lock);
576 		if (lwp->lwp_cursig == 0) {
577 			t->t_hold = lwp->lwp_sigoldmask;
578 			t->t_flag &= ~T_TOMASK;
579 		}
580 		mutex_exit(&p->p_lock);
581 	}
582 
583 	if (error)
584 		return (set_errno(error));
585 
586 	/*
587 	 * Copy out the events and return the fdcnt to the user.
588 	 */
589 	if (nfds != 0 &&
590 	    copyout(pollfdp, fds, nfds * sizeof (pollfd_t)))
591 		return (set_errno(EFAULT));
592 
593 #ifdef DEBUG
594 	/*
595 	 * Another sanity check:
596 	 */
597 	if (fdcnt) {
598 		int	reventcnt = 0;
599 
600 		for (i = 0; i < nfds; i++) {
601 			if (pollfdp[i].fd < 0) {
602 				ASSERT(pollfdp[i].revents == 0);
603 				continue;
604 			}
605 			if (pollfdp[i].revents) {
606 				reventcnt++;
607 			}
608 		}
609 		ASSERT(fdcnt == reventcnt);
610 	} else {
611 		for (i = 0; i < nfds; i++) {
612 			ASSERT(pollfdp[i].revents == 0);
613 		}
614 	}
615 #endif	/* DEBUG */
616 
617 	return (fdcnt);
618 }
619 
620 /*
621  * This system call trap exists solely for binary compatibility with
622  * old statically-linked applications.  It is not called from libc.
623  * It should be removed in the next release.
624  */
625 int
626 poll(pollfd_t *fds, nfds_t nfds, int time_out)
627 {
628 	timespec_t ts;
629 	timespec_t *tsp;
630 
631 	if (time_out < 0)
632 		tsp = NULL;
633 	else {
634 		ts.tv_sec = time_out / MILLISEC;
635 		ts.tv_nsec = (time_out % MILLISEC) * MICROSEC;
636 		tsp = &ts;
637 	}
638 
639 	return (poll_common(fds, nfds, tsp, NULL));
640 }
641 
642 /*
643  * This is the system call trap that poll(),
644  * select() and pselect() are built upon.
645  * It is a private interface between libc and the kernel.
646  */
647 int
648 pollsys(pollfd_t *fds, nfds_t nfds, timespec_t *timeoutp, sigset_t *setp)
649 {
650 	timespec_t ts;
651 	timespec_t *tsp;
652 	sigset_t set;
653 	k_sigset_t kset;
654 	k_sigset_t *ksetp;
655 	model_t datamodel = get_udatamodel();
656 
657 	if (timeoutp == NULL)
658 		tsp = NULL;
659 	else {
660 		if (datamodel == DATAMODEL_NATIVE) {
661 			if (copyin(timeoutp, &ts, sizeof (ts)))
662 				return (set_errno(EFAULT));
663 		} else {
664 			timespec32_t ts32;
665 
666 			if (copyin(timeoutp, &ts32, sizeof (ts32)))
667 				return (set_errno(EFAULT));
668 			TIMESPEC32_TO_TIMESPEC(&ts, &ts32)
669 		}
670 
671 		if (itimerspecfix(&ts))
672 			return (set_errno(EINVAL));
673 		tsp = &ts;
674 	}
675 
676 	if (setp == NULL)
677 		ksetp = NULL;
678 	else {
679 		if (copyin(setp, &set, sizeof (set)))
680 			return (set_errno(EFAULT));
681 		sigutok(&set, &kset);
682 		ksetp = &kset;
683 	}
684 
685 	return (poll_common(fds, nfds, tsp, ksetp));
686 }
687 
688 /*
689  * Clean up any state left around by poll(2). Called when a thread exits.
690  */
691 void
692 pollcleanup()
693 {
694 	pollstate_t *ps = curthread->t_pollstate;
695 	pollcache_t *pcp;
696 
697 	if (ps == NULL)
698 		return;
699 	pcp = ps->ps_pcache;
700 	/*
701 	 * free up all cached poll fds
702 	 */
703 	if (pcp == NULL) {
704 		/* this pollstate is used by /dev/poll */
705 		goto pollcleanout;
706 	}
707 
708 	if (pcp->pc_bitmap != NULL) {
709 		ASSERT(MUTEX_NOT_HELD(&ps->ps_lock));
710 		/*
711 		 * a close lwp can race with us when cleaning up a polldat
712 		 * entry. We hold the ps_lock when cleaning hash table.
713 		 * Since this pollcache is going away anyway, there is no
714 		 * need to hold the pc_lock.
715 		 */
716 		mutex_enter(&ps->ps_lock);
717 		pcache_clean(pcp);
718 		mutex_exit(&ps->ps_lock);
719 #ifdef DEBUG
720 		/*
721 		 * At this point, all fds cached by this lwp should be
722 		 * cleaned up. There should be no fd in fi_list still
723 		 * reference this thread.
724 		 */
725 		checkfpollinfo();	/* sanity check */
726 		pollcheckphlist();	/* sanity check */
727 #endif	/* DEBUG */
728 	}
729 	/*
730 	 * Be sure no one is referencing thread before exiting
731 	 */
732 	mutex_enter(&pcp->pc_no_exit);
733 	ASSERT(pcp->pc_busy >= 0);
734 	while (pcp->pc_busy > 0)
735 		cv_wait(&pcp->pc_busy_cv, &pcp->pc_no_exit);
736 	mutex_exit(&pcp->pc_no_exit);
737 pollcleanout:
738 	pollstate_destroy(ps);
739 	curthread->t_pollstate = NULL;
740 }
741 
742 /*
743  * pollwakeup() - poke threads waiting in poll() for some event
744  * on a particular object.
745  *
746  * The threads hanging off of the specified pollhead structure are scanned.
747  * If their event mask matches the specified event(s), then pollnotify() is
748  * called to poke the thread.
749  *
750  * Multiple events may be specified.  When POLLHUP or POLLERR are specified,
751  * all waiting threads are poked.
752  *
753  * It is important that pollnotify() not drop the lock protecting the list
754  * of threads.
755  */
756 void
757 pollwakeup(pollhead_t *php, short events_arg)
758 {
759 	polldat_t	*pdp;
760 	int		events = (ushort_t)events_arg;
761 	struct plist {
762 		port_t *pp;
763 		int	pevents;
764 		struct plist *next;
765 		};
766 	struct plist *plhead = NULL, *pltail = NULL;
767 
768 retry:
769 	PH_ENTER(php);
770 
771 	for (pdp = php->ph_list; pdp; pdp = pdp->pd_next) {
772 		if ((pdp->pd_events & events) ||
773 		    (events & (POLLHUP | POLLERR))) {
774 
775 			pollcache_t 	*pcp;
776 
777 			if (pdp->pd_portev != NULL) {
778 				port_kevent_t	*pkevp = pdp->pd_portev;
779 				/*
780 				 * Object (fd) is associated with an event port,
781 				 * => send event notification to the port.
782 				 */
783 				ASSERT(pkevp->portkev_source == PORT_SOURCE_FD);
784 				mutex_enter(&pkevp->portkev_lock);
785 				if (pkevp->portkev_flags & PORT_KEV_VALID) {
786 					int pevents;
787 
788 					pkevp->portkev_flags &= ~PORT_KEV_VALID;
789 					pkevp->portkev_events |= events &
790 					    (pdp->pd_events | POLLHUP |
791 					    POLLERR);
792 					/*
793 					 * portkev_lock mutex will be released
794 					 * by port_send_event().
795 					 */
796 					port_send_event(pkevp);
797 
798 					/*
799 					 * If we have some thread polling the
800 					 * port's fd, add it to the list. They
801 					 * will be notified later.
802 					 * The port_pollwkup() will flag the
803 					 * port_t so that it will not disappear
804 					 * till port_pollwkdone() is called.
805 					 */
806 					pevents =
807 					    port_pollwkup(pkevp->portkev_port);
808 					if (pevents) {
809 						struct plist *t;
810 						t = kmem_zalloc(
811 							sizeof (struct plist),
812 							    KM_SLEEP);
813 						t->pp = pkevp->portkev_port;
814 						t->pevents = pevents;
815 						if (plhead == NULL) {
816 							plhead = t;
817 						} else {
818 							pltail->next = t;
819 						}
820 						pltail = t;
821 					}
822 				} else {
823 					mutex_exit(&pkevp->portkev_lock);
824 				}
825 				continue;
826 			}
827 
828 			pcp = pdp->pd_pcache;
829 
830 			/*
831 			 * Try to grab the lock for this thread. If
832 			 * we don't get it then we may deadlock so
833 			 * back out and restart all over again. Note
834 			 * that the failure rate is very very low.
835 			 */
836 			if (mutex_tryenter(&pcp->pc_lock)) {
837 				pollnotify(pcp, pdp->pd_fd);
838 				mutex_exit(&pcp->pc_lock);
839 			} else {
840 				/*
841 				 * We are here because:
842 				 *	1) This thread has been woke up
843 				 *	   and is trying to get out of poll().
844 				 *	2) Some other thread is also here
845 				 *	   but with a different pollhead lock.
846 				 *
847 				 * So, we need to drop the lock on pollhead
848 				 * because of (1) but we want to prevent
849 				 * that thread from doing lwp_exit() or
850 				 * devpoll close. We want to ensure that
851 				 * the pollcache pointer is still invalid.
852 				 *
853 				 * Solution: Grab the pcp->pc_no_exit lock,
854 				 * increment the pc_busy counter, drop every
855 				 * lock in sight. Get out of the way and wait
856 				 * for type (2) threads to finish.
857 				 */
858 
859 				mutex_enter(&pcp->pc_no_exit);
860 				pcp->pc_busy++;	/* prevents exit()'s */
861 				mutex_exit(&pcp->pc_no_exit);
862 
863 				PH_EXIT(php);
864 				mutex_enter(&pcp->pc_lock);
865 				mutex_exit(&pcp->pc_lock);
866 				mutex_enter(&pcp->pc_no_exit);
867 				pcp->pc_busy--;
868 				if (pcp->pc_busy == 0) {
869 					/*
870 					 * Wakeup the thread waiting in
871 					 * thread_exit().
872 					 */
873 					cv_signal(&pcp->pc_busy_cv);
874 				}
875 				mutex_exit(&pcp->pc_no_exit);
876 				goto retry;
877 			}
878 		}
879 	}
880 
881 
882 	/*
883 	 * Event ports - If this php is of the port on the list,
884 	 * call port_pollwkdone() to release it. The port_pollwkdone()
885 	 * needs to be called before dropping the PH lock so that any new
886 	 * thread attempting to poll this port are blocked. There can be
887 	 * only one thread here in pollwakeup notifying this port's fd.
888 	 */
889 	if (plhead != NULL && &plhead->pp->port_pollhd == php) {
890 		struct plist *t;
891 		port_pollwkdone(plhead->pp);
892 		t = plhead;
893 		plhead = plhead->next;
894 		kmem_free(t, sizeof (struct plist));
895 	}
896 	PH_EXIT(php);
897 
898 	/*
899 	 * Event ports - Notify threads polling the event port's fd.
900 	 * This is normally done in port_send_event() where it calls
901 	 * pollwakeup() on the port. But, for PORT_SOURCE_FD source alone,
902 	 * we do it here in pollwakeup() to avoid a recursive call.
903 	 */
904 	if (plhead != NULL) {
905 		php = &plhead->pp->port_pollhd;
906 		events = plhead->pevents;
907 		goto retry;
908 	}
909 }
910 
911 /*
912  * This function is called to inform a thread that
913  * an event being polled for has occurred.
914  * The pollstate lock on the thread should be held on entry.
915  */
916 void
917 pollnotify(pollcache_t *pcp, int fd)
918 {
919 	ASSERT(fd < pcp->pc_mapsize);
920 	ASSERT(MUTEX_HELD(&pcp->pc_lock));
921 	BT_SET(pcp->pc_bitmap, fd);
922 	pcp->pc_flag |= T_POLLWAKE;
923 	cv_signal(&pcp->pc_cv);
924 }
925 
926 /*
927  * add a polldat entry to pollhead ph_list. The polldat struct is used
928  * by pollwakeup to wake sleeping pollers when polled events has happened.
929  */
930 void
931 pollhead_insert(pollhead_t *php, polldat_t *pdp)
932 {
933 	PH_ENTER(php);
934 	ASSERT(pdp->pd_next == NULL);
935 #ifdef DEBUG
936 	{
937 		/*
938 		 * the polldat should not be already on the list
939 		 */
940 		polldat_t *wp;
941 		for (wp = php->ph_list; wp; wp = wp->pd_next) {
942 			ASSERT(wp != pdp);
943 		}
944 	}
945 #endif	/* DEBUG */
946 	pdp->pd_next = php->ph_list;
947 	php->ph_list = pdp;
948 	PH_EXIT(php);
949 }
950 
951 /*
952  * Delete the polldat entry from ph_list.
953  */
954 void
955 pollhead_delete(pollhead_t *php, polldat_t *pdp)
956 {
957 	polldat_t *wp;
958 	polldat_t **wpp;
959 
960 	PH_ENTER(php);
961 	for (wpp = &php->ph_list; (wp = *wpp) != NULL; wpp = &wp->pd_next) {
962 		if (wp == pdp) {
963 			*wpp = pdp->pd_next;
964 			pdp->pd_next = NULL;
965 			break;
966 		}
967 	}
968 #ifdef DEBUG
969 	/* assert that pdp is no longer in the list */
970 	for (wp = *wpp; wp; wp = wp->pd_next) {
971 		ASSERT(wp != pdp);
972 	}
973 #endif	/* DEBUG */
974 	PH_EXIT(php);
975 }
976 
977 /*
978  * walk through the poll fd lists to see if they are identical. This is an
979  * expensive operation and should not be done more than once for each poll()
980  * call.
981  *
982  * As an optimization (i.e., not having to go through the lists more than
983  * once), this routine also clear the revents field of pollfd in 'current'.
984  * Zeroing out the revents field of each entry in current poll list is
985  * required by poll man page.
986  *
987  * Since the events field of cached list has illegal poll events filtered
988  * out, the current list applies the same filtering before comparison.
989  *
990  * The routine stops when it detects a meaningful difference, or when it
991  * exhausts the lists.
992  */
993 int
994 pcacheset_cmp(pollfd_t *current, pollfd_t *cached, pollfd_t *newlist, int n)
995 {
996 	int    ix;
997 
998 	for (ix = 0; ix < n; ix++) {
999 		if (current[ix].fd == cached[ix].fd) {
1000 			/*
1001 			 * Filter out invalid poll events while we are in
1002 			 * inside the loop.
1003 			 */
1004 			if (current[ix].events & ~VALID_POLL_EVENTS) {
1005 				current[ix].events &= VALID_POLL_EVENTS;
1006 				if (newlist != NULL)
1007 					newlist[ix].events = current[ix].events;
1008 			}
1009 			if (current[ix].events == cached[ix].events) {
1010 				current[ix].revents = 0;
1011 				continue;
1012 			}
1013 		}
1014 		if ((current[ix].fd < 0) && (cached[ix].fd < 0)) {
1015 			current[ix].revents = 0;
1016 			continue;
1017 		}
1018 		return (ix);
1019 	}
1020 	return (ix);
1021 }
1022 
1023 /*
1024  * This routine returns a pointer to a cached poll fd entry, or NULL if it
1025  * does not find it in the hash table.
1026  */
1027 polldat_t *
1028 pcache_lookup_fd(pollcache_t *pcp, int fd)
1029 {
1030 	int hashindex;
1031 	polldat_t *pdp;
1032 
1033 	hashindex = POLLHASH(pcp->pc_hashsize, fd);
1034 	pdp = pcp->pc_hash[hashindex];
1035 	while (pdp != NULL) {
1036 		if (pdp->pd_fd == fd)
1037 			break;
1038 		pdp = pdp->pd_hashnext;
1039 	}
1040 	return (pdp);
1041 }
1042 
1043 polldat_t *
1044 pcache_alloc_fd(int nsets)
1045 {
1046 	polldat_t *pdp;
1047 
1048 	pdp = kmem_zalloc(sizeof (polldat_t), KM_SLEEP);
1049 	if (nsets > 0) {
1050 		pdp->pd_ref = kmem_zalloc(sizeof (xref_t) * nsets, KM_SLEEP);
1051 		pdp->pd_nsets = nsets;
1052 	}
1053 	return (pdp);
1054 }
1055 
1056 /*
1057  * This routine  inserts a polldat into the pollcache's hash table. It
1058  * may be necessary to grow the size of the hash table.
1059  */
1060 void
1061 pcache_insert_fd(pollcache_t *pcp, polldat_t *pdp, nfds_t nfds)
1062 {
1063 	int hashindex;
1064 	int fd;
1065 
1066 	if ((pcp->pc_fdcount > pcp->pc_hashsize * POLLHASHTHRESHOLD) ||
1067 	    (nfds > pcp->pc_hashsize * POLLHASHTHRESHOLD)) {
1068 		pcache_grow_hashtbl(pcp, nfds);
1069 	}
1070 	fd = pdp->pd_fd;
1071 	hashindex = POLLHASH(pcp->pc_hashsize, fd);
1072 	pdp->pd_hashnext = pcp->pc_hash[hashindex];
1073 	pcp->pc_hash[hashindex] = pdp;
1074 	pcp->pc_fdcount++;
1075 
1076 #ifdef DEBUG
1077 	{
1078 		/*
1079 		 * same fd should not appear on a hash list twice
1080 		 */
1081 		polldat_t *pdp1;
1082 		for (pdp1 = pdp->pd_hashnext; pdp1; pdp1 = pdp1->pd_hashnext) {
1083 			ASSERT(pdp->pd_fd != pdp1->pd_fd);
1084 		}
1085 	}
1086 #endif	/* DEBUG */
1087 }
1088 
1089 /*
1090  * Grow the hash table -- either double the table size or round it to the
1091  * nearest multiples of POLLHASHCHUNKSZ, whichever is bigger. Rehash all the
1092  * elements on the hash table.
1093  */
1094 void
1095 pcache_grow_hashtbl(pollcache_t *pcp, nfds_t nfds)
1096 {
1097 	int	oldsize;
1098 	polldat_t **oldtbl;
1099 	polldat_t *pdp, *pdp1;
1100 	int	i;
1101 #ifdef DEBUG
1102 	int	count = 0;
1103 #endif
1104 
1105 	ASSERT(pcp->pc_hashsize % POLLHASHCHUNKSZ == 0);
1106 	oldsize = pcp->pc_hashsize;
1107 	oldtbl = pcp->pc_hash;
1108 	if (nfds > pcp->pc_hashsize * POLLHASHINC) {
1109 		pcp->pc_hashsize = (nfds + POLLHASHCHUNKSZ - 1) &
1110 		    ~(POLLHASHCHUNKSZ - 1);
1111 	} else {
1112 		pcp->pc_hashsize = pcp->pc_hashsize * POLLHASHINC;
1113 	}
1114 	pcp->pc_hash = kmem_zalloc(pcp->pc_hashsize * sizeof (polldat_t *),
1115 	    KM_SLEEP);
1116 	/*
1117 	 * rehash existing elements
1118 	 */
1119 	pcp->pc_fdcount = 0;
1120 	for (i = 0; i < oldsize; i++) {
1121 		pdp = oldtbl[i];
1122 		while (pdp != NULL) {
1123 			pdp1 = pdp->pd_hashnext;
1124 			pcache_insert_fd(pcp, pdp, nfds);
1125 			pdp = pdp1;
1126 #ifdef DEBUG
1127 			count++;
1128 #endif
1129 		}
1130 	}
1131 	kmem_free(oldtbl, oldsize * sizeof (polldat_t *));
1132 	ASSERT(pcp->pc_fdcount == count);
1133 }
1134 
1135 void
1136 pcache_grow_map(pollcache_t *pcp, int fd)
1137 {
1138 	int  	newsize;
1139 	ulong_t	*newmap;
1140 
1141 	/*
1142 	 * grow to nearest multiple of POLLMAPCHUNK, assuming POLLMAPCHUNK is
1143 	 * power of 2.
1144 	 */
1145 	newsize = (fd + POLLMAPCHUNK) & ~(POLLMAPCHUNK - 1);
1146 	newmap = kmem_zalloc((newsize / BT_NBIPUL) * sizeof (ulong_t),
1147 	    KM_SLEEP);
1148 	/*
1149 	 * don't want pollwakeup to set a bit while growing the bitmap.
1150 	 */
1151 	ASSERT(mutex_owned(&pcp->pc_lock) == 0);
1152 	mutex_enter(&pcp->pc_lock);
1153 	bcopy(pcp->pc_bitmap, newmap,
1154 	    (pcp->pc_mapsize / BT_NBIPUL) * sizeof (ulong_t));
1155 	kmem_free(pcp->pc_bitmap,
1156 	    (pcp->pc_mapsize /BT_NBIPUL) * sizeof (ulong_t));
1157 	pcp->pc_bitmap = newmap;
1158 	pcp->pc_mapsize = newsize;
1159 	mutex_exit(&pcp->pc_lock);
1160 }
1161 
1162 /*
1163  * remove all the reference from pollhead list and fpollinfo lists.
1164  */
1165 void
1166 pcache_clean(pollcache_t *pcp)
1167 {
1168 	int i;
1169 	polldat_t **hashtbl;
1170 	polldat_t *pdp;
1171 
1172 	ASSERT(MUTEX_HELD(&curthread->t_pollstate->ps_lock));
1173 	hashtbl = pcp->pc_hash;
1174 	for (i = 0; i < pcp->pc_hashsize; i++) {
1175 		for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) {
1176 			if (pdp->pd_php != NULL) {
1177 				pollhead_delete(pdp->pd_php, pdp);
1178 				pdp->pd_php = NULL;
1179 			}
1180 			if (pdp->pd_fp != NULL) {
1181 				delfpollinfo(pdp->pd_fd);
1182 				pdp->pd_fp = NULL;
1183 			}
1184 		}
1185 	}
1186 }
1187 
1188 void
1189 pcacheset_invalidate(pollstate_t *ps, polldat_t *pdp)
1190 {
1191 	int 	i;
1192 	int	fd = pdp->pd_fd;
1193 
1194 	/*
1195 	 * we come here because an earlier close() on this cached poll fd.
1196 	 */
1197 	ASSERT(pdp->pd_fp == NULL);
1198 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1199 	pdp->pd_events = 0;
1200 	for (i = 0; i < ps->ps_nsets; i++) {
1201 		xref_t		*refp;
1202 		pollcacheset_t	*pcsp;
1203 
1204 		ASSERT(pdp->pd_ref != NULL);
1205 		refp = &pdp->pd_ref[i];
1206 		if (refp->xf_refcnt) {
1207 			ASSERT(refp->xf_position >= 0);
1208 			pcsp = &ps->ps_pcacheset[i];
1209 			if (refp->xf_refcnt == 1) {
1210 				pcsp->pcs_pollfd[refp->xf_position].fd = -1;
1211 				refp->xf_refcnt = 0;
1212 				pdp->pd_count--;
1213 			} else if (refp->xf_refcnt > 1) {
1214 				int	j;
1215 
1216 				/*
1217 				 * turn off every appearance in pcs_pollfd list
1218 				 */
1219 				for (j = refp->xf_position;
1220 				    j < pcsp->pcs_nfds; j++) {
1221 					if (pcsp->pcs_pollfd[j].fd == fd) {
1222 						pcsp->pcs_pollfd[j].fd = -1;
1223 						refp->xf_refcnt--;
1224 						pdp->pd_count--;
1225 					}
1226 				}
1227 			}
1228 			ASSERT(refp->xf_refcnt == 0);
1229 			refp->xf_position = POLLPOSINVAL;
1230 		}
1231 	}
1232 	ASSERT(pdp->pd_count == 0);
1233 }
1234 
1235 /*
1236  * Insert poll fd into the pollcache, and add poll registration.
1237  * This routine is called after getf() and before releasef(). So the vnode
1238  * can not disappear even if we block here.
1239  * If there is an error, the polled fd is not cached.
1240  */
1241 int
1242 pcache_insert(pollstate_t *ps, file_t *fp, pollfd_t *pollfdp, int *fdcntp,
1243     ssize_t pos, int which)
1244 {
1245 	pollcache_t	*pcp = ps->ps_pcache;
1246 	polldat_t	*pdp;
1247 	int		error;
1248 	int		fd;
1249 	pollhead_t	*memphp = NULL;
1250 	xref_t		*refp;
1251 	int		newpollfd = 0;
1252 
1253 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1254 	/*
1255 	 * The poll caching uses the existing VOP_POLL interface. If there
1256 	 * is no polled events, we want the polled device to set its "some
1257 	 * one is sleeping in poll" flag. When the polled events happen
1258 	 * later, the driver will call pollwakeup(). We achieve this by
1259 	 * always passing 0 in the third parameter ("anyyet") when calling
1260 	 * VOP_POLL. This parameter is not looked at by drivers when the
1261 	 * polled events exist. If a driver chooses to ignore this parameter
1262 	 * and call pollwakeup whenever the polled events happen, that will
1263 	 * be OK too.
1264 	 */
1265 	ASSERT(curthread->t_pollcache == NULL);
1266 	error = VOP_POLL(fp->f_vnode, pollfdp->events, 0, &pollfdp->revents,
1267 	    &memphp);
1268 	if (error) {
1269 		return (error);
1270 	}
1271 	if (pollfdp->revents) {
1272 		(*fdcntp)++;
1273 	}
1274 	/*
1275 	 * polling the underlying device succeeded. Now we can cache it.
1276 	 * A close can't come in here because we have not done a releasef()
1277 	 * yet.
1278 	 */
1279 	fd = pollfdp->fd;
1280 	pdp = pcache_lookup_fd(pcp, fd);
1281 	if (pdp == NULL) {
1282 		ASSERT(ps->ps_nsets > 0);
1283 		pdp = pcache_alloc_fd(ps->ps_nsets);
1284 		newpollfd = 1;
1285 	}
1286 	/*
1287 	 * If this entry was used to cache a poll fd which was closed, and
1288 	 * this entry has not been cleaned, do it now.
1289 	 */
1290 	if ((pdp->pd_count > 0) && (pdp->pd_fp == NULL)) {
1291 		pcacheset_invalidate(ps, pdp);
1292 		ASSERT(pdp->pd_next == NULL);
1293 	}
1294 	if (pdp->pd_count == 0) {
1295 		pdp->pd_fd = fd;
1296 		pdp->pd_fp = fp;
1297 		addfpollinfo(fd);
1298 		pdp->pd_thread = curthread;
1299 		pdp->pd_pcache = pcp;
1300 		/*
1301 		 * the entry is never used or cleared by removing a cached
1302 		 * pollfd (pcache_delete_fd). So all the fields should be clear.
1303 		 */
1304 		ASSERT(pdp->pd_next == NULL);
1305 	}
1306 
1307 	/*
1308 	 * A polled fd is considered cached. So there should be a fpollinfo
1309 	 * entry on uf_fpollinfo list.
1310 	 */
1311 	ASSERT(infpollinfo(fd));
1312 	/*
1313 	 * If there is an inconsistency, we want to know it here.
1314 	 */
1315 	ASSERT(pdp->pd_fp == fp);
1316 
1317 	/*
1318 	 * XXX pd_events is a union of all polled events on this fd, possibly
1319 	 * by different threads. Unless this is a new first poll(), pd_events
1320 	 * never shrinks. If an event is no longer polled by a process, there
1321 	 * is no way to cancel that event. In that case, poll degrade to its
1322 	 * old form -- polling on this fd every time poll() is called. The
1323 	 * assumption is an app always polls the same type of events.
1324 	 */
1325 	pdp->pd_events |= pollfdp->events;
1326 
1327 	pdp->pd_count++;
1328 	/*
1329 	 * There is not much special handling for multiple appearances of
1330 	 * same fd other than xf_position always recording the first
1331 	 * appearance in poll list. If this is called from pcacheset_cache_list,
1332 	 * a VOP_POLL is called on every pollfd entry; therefore each
1333 	 * revents and fdcnt should be set correctly. If this is called from
1334 	 * pcacheset_resolve, we don't care about fdcnt here. Pollreadmap will
1335 	 * pick up the right count and handle revents field of each pollfd
1336 	 * entry.
1337 	 */
1338 	ASSERT(pdp->pd_ref != NULL);
1339 	refp = &pdp->pd_ref[which];
1340 	if (refp->xf_refcnt == 0) {
1341 		refp->xf_position = pos;
1342 	} else {
1343 		/*
1344 		 * xf_position records the fd's first appearance in poll list
1345 		 */
1346 		if (pos < refp->xf_position) {
1347 			refp->xf_position = pos;
1348 		}
1349 	}
1350 	ASSERT(pollfdp->fd == ps->ps_pollfd[refp->xf_position].fd);
1351 	refp->xf_refcnt++;
1352 	if (fd >= pcp->pc_mapsize) {
1353 		pcache_grow_map(pcp, fd);
1354 	}
1355 	if (fd > pcp->pc_mapend) {
1356 		pcp->pc_mapend = fd;
1357 	}
1358 	if (newpollfd != 0) {
1359 		pcache_insert_fd(ps->ps_pcache, pdp, ps->ps_nfds);
1360 	}
1361 	if (memphp) {
1362 		if (pdp->pd_php == NULL) {
1363 			pollhead_insert(memphp, pdp);
1364 			pdp->pd_php = memphp;
1365 		} else {
1366 			if (memphp != pdp->pd_php) {
1367 				/*
1368 				 * layered devices (e.g. console driver)
1369 				 * may change the vnode and thus the pollhead
1370 				 * pointer out from underneath us.
1371 				 */
1372 				pollhead_delete(pdp->pd_php, pdp);
1373 				pollhead_insert(memphp, pdp);
1374 				pdp->pd_php = memphp;
1375 			}
1376 		}
1377 	}
1378 	/*
1379 	 * Since there is a considerable window between VOP_POLL and when
1380 	 * we actually put the polldat struct on the pollhead list, we could
1381 	 * miss a pollwakeup. In the case of polling additional events, we
1382 	 * don't update the events until after VOP_POLL. So we could miss
1383 	 * pollwakeup there too. So we always set the bit here just to be
1384 	 * safe. The real performance gain is in subsequent pcache_poll.
1385 	 */
1386 	mutex_enter(&pcp->pc_lock);
1387 	BT_SET(pcp->pc_bitmap, fd);
1388 	mutex_exit(&pcp->pc_lock);
1389 	return (0);
1390 }
1391 
1392 /*
1393  * The entry is not really deleted. The fields are cleared so that the
1394  * entry is no longer useful, but it will remain in the hash table for reuse
1395  * later. It will be freed when the polling lwp exits.
1396  */
1397 int
1398 pcache_delete_fd(pollstate_t *ps, int fd, size_t pos, int which, uint_t cevent)
1399 {
1400 	pollcache_t	*pcp = ps->ps_pcache;
1401 	polldat_t	*pdp;
1402 	xref_t		*refp;
1403 
1404 	ASSERT(fd < pcp->pc_mapsize);
1405 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1406 
1407 	pdp = pcache_lookup_fd(pcp, fd);
1408 	ASSERT(pdp != NULL);
1409 	ASSERT(pdp->pd_count > 0);
1410 	ASSERT(pdp->pd_ref != NULL);
1411 	refp = &pdp->pd_ref[which];
1412 	if (pdp->pd_count == 1) {
1413 		pdp->pd_events = 0;
1414 		refp->xf_position = POLLPOSINVAL;
1415 		ASSERT(refp->xf_refcnt == 1);
1416 		refp->xf_refcnt = 0;
1417 		if (pdp->pd_php) {
1418 			/*
1419 			 * It is possible for a wakeup thread to get ahead
1420 			 * of the following pollhead_delete and set the bit in
1421 			 * bitmap.  It is OK because the bit will be cleared
1422 			 * here anyway.
1423 			 */
1424 			pollhead_delete(pdp->pd_php, pdp);
1425 			pdp->pd_php = NULL;
1426 		}
1427 		pdp->pd_count = 0;
1428 		if (pdp->pd_fp != NULL) {
1429 			pdp->pd_fp = NULL;
1430 			delfpollinfo(fd);
1431 		}
1432 		mutex_enter(&pcp->pc_lock);
1433 		BT_CLEAR(pcp->pc_bitmap, fd);
1434 		mutex_exit(&pcp->pc_lock);
1435 		return (0);
1436 	}
1437 	if ((cevent & POLLCLOSED) == POLLCLOSED) {
1438 		/*
1439 		 * fd cached here has been closed. This is the first
1440 		 * pcache_delete_fd called after the close. Clean up the
1441 		 * entire entry.
1442 		 */
1443 		pcacheset_invalidate(ps, pdp);
1444 		ASSERT(pdp->pd_php == NULL);
1445 		mutex_enter(&pcp->pc_lock);
1446 		BT_CLEAR(pcp->pc_bitmap, fd);
1447 		mutex_exit(&pcp->pc_lock);
1448 		return (0);
1449 	}
1450 #ifdef DEBUG
1451 	if (getf(fd) != NULL) {
1452 		ASSERT(infpollinfo(fd));
1453 		releasef(fd);
1454 	}
1455 #endif	/* DEBUG */
1456 	pdp->pd_count--;
1457 	ASSERT(refp->xf_refcnt > 0);
1458 	if (--refp->xf_refcnt == 0) {
1459 		refp->xf_position = POLLPOSINVAL;
1460 	} else {
1461 		ASSERT(pos >= refp->xf_position);
1462 		if (pos == refp->xf_position) {
1463 			/*
1464 			 * The xref position is no longer valid.
1465 			 * Reset it to a special value and let
1466 			 * caller know it needs to updatexref()
1467 			 * with a new xf_position value.
1468 			 */
1469 			refp->xf_position = POLLPOSTRANS;
1470 			return (1);
1471 		}
1472 	}
1473 	return (0);
1474 }
1475 
1476 void
1477 pcache_update_xref(pollcache_t *pcp, int fd, ssize_t pos, int which)
1478 {
1479 	polldat_t	*pdp;
1480 
1481 	pdp = pcache_lookup_fd(pcp, fd);
1482 	ASSERT(pdp != NULL);
1483 	ASSERT(pdp->pd_ref != NULL);
1484 	pdp->pd_ref[which].xf_position = pos;
1485 }
1486 
1487 #ifdef DEBUG
1488 /*
1489  * For each polled fd, it's either in the bitmap or cached in
1490  * pcache hash table. If this routine returns 0, something is wrong.
1491  */
1492 static int
1493 pollchecksanity(pollstate_t *ps, nfds_t nfds)
1494 {
1495 	int    		i;
1496 	int		fd;
1497 	pollcache_t	*pcp = ps->ps_pcache;
1498 	polldat_t	*pdp;
1499 	pollfd_t	*pollfdp = ps->ps_pollfd;
1500 	file_t		*fp;
1501 
1502 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1503 	for (i = 0; i < nfds; i++) {
1504 		fd = pollfdp[i].fd;
1505 		if (fd < 0) {
1506 			ASSERT(pollfdp[i].revents == 0);
1507 			continue;
1508 		}
1509 		if (pollfdp[i].revents == POLLNVAL)
1510 			continue;
1511 		if ((fp = getf(fd)) == NULL)
1512 			continue;
1513 		pdp = pcache_lookup_fd(pcp, fd);
1514 		ASSERT(pdp != NULL);
1515 		ASSERT(infpollinfo(fd));
1516 		ASSERT(pdp->pd_fp == fp);
1517 		releasef(fd);
1518 		if (BT_TEST(pcp->pc_bitmap, fd))
1519 			continue;
1520 		if (pdp->pd_php == NULL)
1521 			return (0);
1522 	}
1523 	return (1);
1524 }
1525 #endif	/* DEBUG */
1526 
1527 /*
1528  * resolve the difference between the current poll list and a cached one.
1529  */
1530 int
1531 pcacheset_resolve(pollstate_t *ps, nfds_t nfds, int *fdcntp, int which)
1532 {
1533 	int    		i;
1534 	pollcache_t	*pcp = ps->ps_pcache;
1535 	pollfd_t	*newlist = NULL;
1536 	pollfd_t	*current = ps->ps_pollfd;
1537 	pollfd_t	*cached;
1538 	pollcacheset_t	*pcsp;
1539 	int		common;
1540 	int		count = 0;
1541 	int		offset;
1542 	int		remain;
1543 	int		fd;
1544 	file_t		*fp;
1545 	int		fdcnt = 0;
1546 	int		cnt = 0;
1547 	nfds_t		old_nfds;
1548 	int		error = 0;
1549 	int		mismatch = 0;
1550 
1551 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1552 #ifdef DEBUG
1553 	checkpolldat(ps);
1554 #endif
1555 	pcsp = &ps->ps_pcacheset[which];
1556 	old_nfds = pcsp->pcs_nfds;
1557 	common = (nfds > old_nfds) ? old_nfds : nfds;
1558 	if (nfds != old_nfds) {
1559 		/*
1560 		 * the length of poll list has changed. allocate a new
1561 		 * pollfd list.
1562 		 */
1563 		newlist = kmem_alloc(nfds * sizeof (pollfd_t), KM_SLEEP);
1564 		bcopy(current, newlist, sizeof (pollfd_t) * nfds);
1565 	}
1566 	/*
1567 	 * Compare the overlapping part of the current fd list with the
1568 	 * cached one. Whenever a difference is found, resolve it.
1569 	 * The comparison is done on the current poll list and the
1570 	 * cached list. But we may be setting up the newlist to be the
1571 	 * cached list for next poll.
1572 	 */
1573 	cached = pcsp->pcs_pollfd;
1574 	remain = common;
1575 
1576 	while (count < common) {
1577 		int	tmpfd;
1578 		pollfd_t *np;
1579 
1580 		np = (newlist != NULL) ? &newlist[count] : NULL;
1581 		offset = pcacheset_cmp(&current[count], &cached[count], np,
1582 		    remain);
1583 		/*
1584 		 * Collect stats. If lists are completed the first time,
1585 		 * it's a hit. Otherwise, it's a partial hit or miss.
1586 		 */
1587 		if ((count == 0) && (offset == common)) {
1588 			pollstats.pollcachehit.value.ui64++;
1589 		} else {
1590 			mismatch++;
1591 		}
1592 		count += offset;
1593 		if (offset < remain) {
1594 			ASSERT(count < common);
1595 			ASSERT((current[count].fd != cached[count].fd) ||
1596 			    (current[count].events != cached[count].events));
1597 			/*
1598 			 * Filter out invalid events.
1599 			 */
1600 			if (current[count].events & ~VALID_POLL_EVENTS) {
1601 				if (newlist != NULL) {
1602 					newlist[count].events =
1603 						current[count].events &=
1604 							VALID_POLL_EVENTS;
1605 				} else {
1606 					current[count].events &=
1607 						VALID_POLL_EVENTS;
1608 				}
1609 			}
1610 			/*
1611 			 * when resolving a difference, we always remove the
1612 			 * fd from cache before inserting one into cache.
1613 			 */
1614 			if (cached[count].fd >= 0) {
1615 				tmpfd = cached[count].fd;
1616 				if (pcache_delete_fd(ps, tmpfd, count, which,
1617 				    (uint_t)cached[count].events)) {
1618 					/*
1619 					 * This should be rare but needed for
1620 					 * correctness.
1621 					 *
1622 					 * The first appearance in cached list
1623 					 * is being "turned off". The same fd
1624 					 * appear more than once in the cached
1625 					 * poll list. Find the next one on the
1626 					 * list and update the cached
1627 					 * xf_position field.
1628 					 */
1629 					for (i = count + 1; i < old_nfds; i++) {
1630 						if (cached[i].fd == tmpfd) {
1631 							pcache_update_xref(pcp,
1632 							    tmpfd, (ssize_t)i,
1633 							    which);
1634 						    break;
1635 						}
1636 					}
1637 					ASSERT(i <= old_nfds);
1638 				}
1639 				/*
1640 				 * In case a new cache list is allocated,
1641 				 * need to keep both cache lists in sync
1642 				 * b/c the new one can be freed if we have
1643 				 * an error later.
1644 				 */
1645 				cached[count].fd = -1;
1646 				if (newlist != NULL) {
1647 					newlist[count].fd = -1;
1648 				}
1649 			}
1650 			if ((tmpfd = current[count].fd) >= 0) {
1651 				/*
1652 				 * add to the cached fd tbl and bitmap.
1653 				 */
1654 				if ((fp = getf(tmpfd)) == NULL) {
1655 					current[count].revents = POLLNVAL;
1656 					if (newlist != NULL) {
1657 						newlist[count].fd = -1;
1658 					}
1659 					cached[count].fd = -1;
1660 					fdcnt++;
1661 				} else {
1662 					/*
1663 					 * Here we don't care about the
1664 					 * fdcnt. We will examine the bitmap
1665 					 * later and pick up the correct
1666 					 * fdcnt there. So we never bother
1667 					 * to check value of 'cnt'.
1668 					 */
1669 					error = pcache_insert(ps, fp,
1670 					    &current[count], &cnt,
1671 					    (ssize_t)count, which);
1672 					/*
1673 					 * if no error, we want to do releasef
1674 					 * after we updated cache poll list
1675 					 * entry so that close() won't race
1676 					 * us.
1677 					 */
1678 					if (error) {
1679 						/*
1680 						 * If we encountered an error,
1681 						 * we have invalidated an
1682 						 * entry in cached poll list
1683 						 * (in pcache_delete_fd() above)
1684 						 * but failed to add one here.
1685 						 * This is OK b/c what's in the
1686 						 * cached list is consistent
1687 						 * with content of cache.
1688 						 * It will not have any ill
1689 						 * effect on next poll().
1690 						 */
1691 						releasef(tmpfd);
1692 						if (newlist != NULL) {
1693 							kmem_free(newlist,
1694 							    nfds *
1695 							    sizeof (pollfd_t));
1696 						}
1697 						return (error);
1698 					}
1699 					/*
1700 					 * If we have allocated a new(temp)
1701 					 * cache list, we need to keep both
1702 					 * in sync b/c the new one can be freed
1703 					 * if we have an error later.
1704 					 */
1705 					if (newlist != NULL) {
1706 						newlist[count].fd =
1707 						    current[count].fd;
1708 						newlist[count].events =
1709 						    current[count].events;
1710 					}
1711 					cached[count].fd = current[count].fd;
1712 					cached[count].events =
1713 					    current[count].events;
1714 					releasef(tmpfd);
1715 				}
1716 			} else {
1717 				current[count].revents = 0;
1718 			}
1719 			count++;
1720 			remain = common - count;
1721 		}
1722 	}
1723 	if (mismatch != 0) {
1724 		if (mismatch == common) {
1725 			pollstats.pollcachemiss.value.ui64++;
1726 		} else {
1727 			pollstats.pollcachephit.value.ui64++;
1728 		}
1729 	}
1730 	/*
1731 	 * take care of the non overlapping part of a list
1732 	 */
1733 	if (nfds > old_nfds) {
1734 		ASSERT(newlist != NULL);
1735 		for (i = old_nfds; i < nfds; i++) {
1736 			/* filter out invalid events */
1737 			if (current[i].events & ~VALID_POLL_EVENTS) {
1738 				newlist[i].events = current[i].events =
1739 				current[i].events & VALID_POLL_EVENTS;
1740 			}
1741 			if ((fd = current[i].fd) < 0) {
1742 				current[i].revents = 0;
1743 				continue;
1744 			}
1745 			/*
1746 			 * add to the cached fd tbl and bitmap.
1747 			 */
1748 			if ((fp = getf(fd)) == NULL) {
1749 				current[i].revents = POLLNVAL;
1750 				newlist[i].fd = -1;
1751 				fdcnt++;
1752 				continue;
1753 			}
1754 			/*
1755 			 * Here we don't care about the
1756 			 * fdcnt. We will examine the bitmap
1757 			 * later and pick up the correct
1758 			 * fdcnt there. So we never bother to
1759 			 * check 'cnt'.
1760 			 */
1761 			error = pcache_insert(ps, fp, &current[i], &cnt,
1762 			    (ssize_t)i, which);
1763 			releasef(fd);
1764 			if (error) {
1765 				/*
1766 				 * Here we are half way through adding newly
1767 				 * polled fd. Undo enough to keep the cache
1768 				 * list consistent with the cache content.
1769 				 */
1770 				pcacheset_remove_list(ps, current, old_nfds,
1771 				    i, which, 0);
1772 				kmem_free(newlist, nfds * sizeof (pollfd_t));
1773 				return (error);
1774 			}
1775 		}
1776 	}
1777 	if (old_nfds > nfds) {
1778 		/*
1779 		 * remove the fd's which are no longer polled.
1780 		 */
1781 		pcacheset_remove_list(ps, pcsp->pcs_pollfd, nfds, old_nfds,
1782 		    which, 1);
1783 	}
1784 	/*
1785 	 * set difference resolved. update nfds and cachedlist
1786 	 * in pollstate struct.
1787 	 */
1788 	if (newlist != NULL) {
1789 		kmem_free(pcsp->pcs_pollfd, old_nfds * sizeof (pollfd_t));
1790 		/*
1791 		 * By now, the pollfd.revents field should
1792 		 * all be zeroed.
1793 		 */
1794 		pcsp->pcs_pollfd = newlist;
1795 		pcsp->pcs_nfds = nfds;
1796 	}
1797 	ASSERT(*fdcntp == 0);
1798 	*fdcntp = fdcnt;
1799 	/*
1800 	 * By now for every fd in pollfdp, one of the following should be
1801 	 * true. Otherwise we will miss a polled event.
1802 	 *
1803 	 * 1. the bit corresponding to the fd in bitmap is set. So VOP_POLL
1804 	 *    will be called on this fd in next poll.
1805 	 * 2. the fd is cached in the pcache (i.e. pd_php is set). So
1806 	 *    pollnotify will happen.
1807 	 */
1808 	ASSERT(pollchecksanity(ps, nfds));
1809 	/*
1810 	 * make sure cross reference between cached poll lists and cached
1811 	 * poll fds are correct.
1812 	 */
1813 	ASSERT(pollcheckxref(ps, which));
1814 	/*
1815 	 * ensure each polldat in pollcache reference a polled fd in
1816 	 * pollcacheset.
1817 	 */
1818 #ifdef DEBUG
1819 	checkpolldat(ps);
1820 #endif
1821 	return (0);
1822 }
1823 
1824 #ifdef DEBUG
1825 static int
1826 pollscanrevents(pollcache_t *pcp, pollfd_t *pollfdp, nfds_t nfds)
1827 {
1828 	int i;
1829 	int reventcnt = 0;
1830 
1831 	for (i = 0; i < nfds; i++) {
1832 		if (pollfdp[i].fd < 0) {
1833 			ASSERT(pollfdp[i].revents == 0);
1834 			continue;
1835 		}
1836 		if (pollfdp[i].revents) {
1837 			reventcnt++;
1838 		}
1839 		if (pollfdp[i].revents && (pollfdp[i].revents != POLLNVAL)) {
1840 			ASSERT(BT_TEST(pcp->pc_bitmap, pollfdp[i].fd));
1841 		}
1842 	}
1843 	return (reventcnt);
1844 }
1845 #endif	/* DEBUG */
1846 
1847 /*
1848  * read the bitmap and poll on fds corresponding to the '1' bits. The ps_lock
1849  * is held upon entry.
1850  */
1851 int
1852 pcache_poll(pollfd_t *pollfdp, pollstate_t *ps, nfds_t nfds, int *fdcntp,
1853     int which)
1854 {
1855 	int		i;
1856 	pollcache_t	*pcp;
1857 	int 		fd;
1858 	int 		begin, end, done;
1859 	pollhead_t	*php;
1860 	int		fdcnt;
1861 	int		error = 0;
1862 	file_t		*fp;
1863 	polldat_t	*pdp;
1864 	xref_t		*refp;
1865 	int		entry;
1866 
1867 	pcp = ps->ps_pcache;
1868 	ASSERT(MUTEX_HELD(&ps->ps_lock));
1869 	ASSERT(MUTEX_HELD(&pcp->pc_lock));
1870 retry:
1871 	done = 0;
1872 	begin = 0;
1873 	fdcnt = 0;
1874 	end = pcp->pc_mapend;
1875 	while ((fdcnt < nfds) && !done) {
1876 		php = NULL;
1877 		/*
1878 		 * only poll fds which may have events
1879 		 */
1880 		fd = bt_getlowbit(pcp->pc_bitmap, begin, end);
1881 		ASSERT(fd <= end);
1882 		if (fd >= 0) {
1883 			ASSERT(pollcheckrevents(ps, begin, fd, which));
1884 			/*
1885 			 * adjust map pointers for next round
1886 			 */
1887 			if (fd == end) {
1888 				done = 1;
1889 			} else {
1890 				begin = fd + 1;
1891 			}
1892 			/*
1893 			 * A bitmap caches poll state information of
1894 			 * multiple poll lists. Call VOP_POLL only if
1895 			 * the bit corresponds to an fd in this poll
1896 			 * list.
1897 			 */
1898 			pdp = pcache_lookup_fd(pcp, fd);
1899 			ASSERT(pdp != NULL);
1900 			ASSERT(pdp->pd_ref != NULL);
1901 			refp = &pdp->pd_ref[which];
1902 			if (refp->xf_refcnt == 0)
1903 				continue;
1904 			entry = refp->xf_position;
1905 			ASSERT((entry >= 0) && (entry < nfds));
1906 			ASSERT(pollfdp[entry].fd == fd);
1907 			/*
1908 			 * we are in this routine implies that we have
1909 			 * successfully polled this fd in the past.
1910 			 * Check to see this fd is closed while we are
1911 			 * blocked in poll. This ensures that we don't
1912 			 * miss a close on the fd in the case this fd is
1913 			 * reused.
1914 			 */
1915 			if (pdp->pd_fp == NULL) {
1916 				ASSERT(pdp->pd_count > 0);
1917 				pollfdp[entry].revents = POLLNVAL;
1918 				fdcnt++;
1919 				if (refp->xf_refcnt > 1) {
1920 					/*
1921 					 * this fd appeared multiple time
1922 					 * in the poll list. Find all of them.
1923 					 */
1924 					for (i = entry + 1; i < nfds; i++) {
1925 						if (pollfdp[i].fd == fd) {
1926 							pollfdp[i].revents =
1927 							    POLLNVAL;
1928 							fdcnt++;
1929 						}
1930 					}
1931 				}
1932 				pcacheset_invalidate(ps, pdp);
1933 				continue;
1934 			}
1935 			/*
1936 			 * We can be here polling a device that is being
1937 			 * closed (i.e. the file pointer is set to NULL,
1938 			 * but pollcacheclean has not happened yet).
1939 			 */
1940 			if ((fp = getf(fd)) == NULL) {
1941 				pollfdp[entry].revents = POLLNVAL;
1942 				fdcnt++;
1943 				if (refp->xf_refcnt > 1) {
1944 					/*
1945 					 * this fd appeared multiple time
1946 					 * in the poll list. Find all of them.
1947 					 */
1948 					for (i = entry + 1; i < nfds; i++) {
1949 						if (pollfdp[i].fd == fd) {
1950 							pollfdp[i].revents =
1951 							    POLLNVAL;
1952 							fdcnt++;
1953 						}
1954 					}
1955 				}
1956 				continue;
1957 			}
1958 			ASSERT(pdp->pd_fp == fp);
1959 			ASSERT(infpollinfo(fd));
1960 			/*
1961 			 * Since we no longer hold poll head lock across
1962 			 * VOP_POLL, pollunlock logic can be simplifed.
1963 			 */
1964 			ASSERT(pdp->pd_php == NULL ||
1965 			    MUTEX_NOT_HELD(PHLOCK(pdp->pd_php)));
1966 			/*
1967 			 * underlying file systems may set a "pollpending"
1968 			 * flag when it sees the poll may block. Pollwakeup()
1969 			 * is called by wakeup thread if pollpending is set.
1970 			 * Pass a 0 fdcnt so that the underlying file system
1971 			 * will set the "pollpending" flag set when there is
1972 			 * no polled events.
1973 			 *
1974 			 * Use pollfdp[].events for actual polling because
1975 			 * the pd_events is union of all cached poll events
1976 			 * on this fd. The events parameter also affects
1977 			 * how the polled device sets the "poll pending"
1978 			 * flag.
1979 			 */
1980 			ASSERT(curthread->t_pollcache == NULL);
1981 			error = VOP_POLL(fp->f_vnode, pollfdp[entry].events, 0,
1982 			    &pollfdp[entry].revents, &php);
1983 			/*
1984 			 * releasef after completely done with this cached
1985 			 * poll entry. To prevent close() coming in to clear
1986 			 * this entry.
1987 			 */
1988 			if (error) {
1989 				releasef(fd);
1990 				break;
1991 			}
1992 			/*
1993 			 * layered devices (e.g. console driver)
1994 			 * may change the vnode and thus the pollhead
1995 			 * pointer out from underneath us.
1996 			 */
1997 			if (php != NULL && pdp->pd_php != NULL &&
1998 			    php != pdp->pd_php) {
1999 				releasef(fd);
2000 				pollhead_delete(pdp->pd_php, pdp);
2001 				pdp->pd_php = php;
2002 				pollhead_insert(php, pdp);
2003 				/*
2004 				 * We could have missed a wakeup on the new
2005 				 * target device. Make sure the new target
2006 				 * gets polled once.
2007 				 */
2008 				BT_SET(pcp->pc_bitmap, fd);
2009 				goto retry;
2010 			}
2011 
2012 			if (pollfdp[entry].revents) {
2013 				ASSERT(refp->xf_refcnt >= 1);
2014 				fdcnt++;
2015 				if (refp->xf_refcnt > 1) {
2016 					/*
2017 					 * this fd appeared multiple time
2018 					 * in the poll list. This is rare but
2019 					 * we have to look at all of them for
2020 					 * correctness.
2021 					 */
2022 					error = plist_chkdupfd(fp, pdp, ps,
2023 					    pollfdp, entry, &fdcnt);
2024 					if (error > 0) {
2025 						releasef(fd);
2026 						break;
2027 					}
2028 					if (error < 0) {
2029 						goto retry;
2030 					}
2031 				}
2032 				releasef(fd);
2033 			} else {
2034 				/*
2035 				 * VOP_POLL didn't return any revents. We can
2036 				 * clear the bit in bitmap only if we have the
2037 				 * pollhead ptr cached and no other cached
2038 				 * entry is polling different events on this fd.
2039 				 * VOP_POLL may have dropped the ps_lock. Make
2040 				 * sure pollwakeup has not happened before clear
2041 				 * the bit.
2042 				 */
2043 				if ((pdp->pd_php != NULL) &&
2044 				    (pollfdp[entry].events == pdp->pd_events) &&
2045 				    ((pcp->pc_flag & T_POLLWAKE) == 0)) {
2046 					BT_CLEAR(pcp->pc_bitmap, fd);
2047 				}
2048 				/*
2049 				 * if the fd can be cached now but not before,
2050 				 * do it now.
2051 				 */
2052 				if ((pdp->pd_php == NULL) && (php != NULL)) {
2053 					pdp->pd_php = php;
2054 					pollhead_insert(php, pdp);
2055 					/*
2056 					 * We are inserting a polldat struct for
2057 					 * the first time. We may have missed a
2058 					 * wakeup on this device. Re-poll once.
2059 					 * This should be a rare event.
2060 					 */
2061 					releasef(fd);
2062 					goto retry;
2063 				}
2064 				if (refp->xf_refcnt > 1) {
2065 					/*
2066 					 * this fd appeared multiple time
2067 					 * in the poll list. This is rare but
2068 					 * we have to look at all of them for
2069 					 * correctness.
2070 					 */
2071 					error = plist_chkdupfd(fp, pdp, ps,
2072 					    pollfdp, entry, &fdcnt);
2073 					if (error > 0) {
2074 						releasef(fd);
2075 						break;
2076 					}
2077 					if (error < 0) {
2078 						goto retry;
2079 					}
2080 				}
2081 				releasef(fd);
2082 			}
2083 		} else {
2084 			done = 1;
2085 			ASSERT(pollcheckrevents(ps, begin, end + 1, which));
2086 		}
2087 	}
2088 	if (!error) {
2089 		ASSERT(*fdcntp + fdcnt == pollscanrevents(pcp, pollfdp, nfds));
2090 		*fdcntp += fdcnt;
2091 	}
2092 	return (error);
2093 }
2094 
2095 /*
2096  * Going through the poll list without much locking. Poll all fds and
2097  * cache all valid fds in the pollcache.
2098  */
2099 int
2100 pcacheset_cache_list(pollstate_t *ps, pollfd_t *fds, int *fdcntp, int which)
2101 {
2102 	pollfd_t	*pollfdp = ps->ps_pollfd;
2103 	pollcacheset_t	*pcacheset = ps->ps_pcacheset;
2104 	pollfd_t	*newfdlist;
2105 	int		i;
2106 	int		fd;
2107 	file_t		*fp;
2108 	int		error = 0;
2109 
2110 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2111 	ASSERT(which < ps->ps_nsets);
2112 	ASSERT(pcacheset != NULL);
2113 	ASSERT(pcacheset[which].pcs_pollfd == NULL);
2114 	newfdlist  = kmem_alloc(ps->ps_nfds * sizeof (pollfd_t), KM_SLEEP);
2115 	/*
2116 	 * cache the new poll list in pollcachset.
2117 	 */
2118 	bcopy(pollfdp, newfdlist, sizeof (pollfd_t) * ps->ps_nfds);
2119 
2120 	pcacheset[which].pcs_pollfd = newfdlist;
2121 	pcacheset[which].pcs_nfds = ps->ps_nfds;
2122 	pcacheset[which].pcs_usradr = (uintptr_t)fds;
2123 
2124 	/*
2125 	 * We have saved a copy of current poll fd list in one pollcacheset.
2126 	 * The 'revents' field of the new list is not yet set to 0. Loop
2127 	 * through the new list just to do that is expensive. We do that
2128 	 * while polling the list.
2129 	 */
2130 	for (i = 0; i < ps->ps_nfds; i++) {
2131 		fd = pollfdp[i].fd;
2132 		/*
2133 		 * We also filter out the illegal poll events in the event
2134 		 * field for the cached poll list/set.
2135 		 */
2136 		if (pollfdp[i].events & ~VALID_POLL_EVENTS) {
2137 			newfdlist[i].events = pollfdp[i].events =
2138 			pollfdp[i].events & VALID_POLL_EVENTS;
2139 		}
2140 		if (fd < 0) {
2141 			pollfdp[i].revents = 0;
2142 			continue;
2143 		}
2144 		if ((fp = getf(fd)) == NULL) {
2145 			pollfdp[i].revents = POLLNVAL;
2146 			/*
2147 			 * invalidate this cache entry in the cached poll list
2148 			 */
2149 			newfdlist[i].fd = -1;
2150 			(*fdcntp)++;
2151 			continue;
2152 		}
2153 		/*
2154 		 * cache this fd.
2155 		 */
2156 		error = pcache_insert(ps, fp, &pollfdp[i], fdcntp, (ssize_t)i,
2157 		    which);
2158 		releasef(fd);
2159 		if (error) {
2160 			/*
2161 			 * Here we are half way through caching a new
2162 			 * poll list. Undo every thing.
2163 			 */
2164 			pcacheset_remove_list(ps, pollfdp, 0, i, which, 0);
2165 			kmem_free(newfdlist, ps->ps_nfds * sizeof (pollfd_t));
2166 			pcacheset[which].pcs_pollfd = NULL;
2167 			pcacheset[which].pcs_usradr = NULL;
2168 			break;
2169 		}
2170 	}
2171 	return (error);
2172 }
2173 
2174 /*
2175  * called by pollcacheclean() to set the fp NULL. It also sets polled events
2176  * in pcacheset entries to a special events 'POLLCLOSED'. Do a pollwakeup to
2177  * wake any sleeping poller, then remove the polldat from the driver.
2178  * The routine is called with ps_pcachelock held.
2179  */
2180 void
2181 pcache_clean_entry(pollstate_t *ps, int fd)
2182 {
2183 	pollcache_t	*pcp;
2184 	polldat_t	*pdp;
2185 	int		i;
2186 
2187 	ASSERT(ps != NULL);
2188 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2189 	pcp = ps->ps_pcache;
2190 	ASSERT(pcp);
2191 	pdp = pcache_lookup_fd(pcp, fd);
2192 	ASSERT(pdp != NULL);
2193 	/*
2194 	 * the corresponding fpollinfo in fi_list has been removed by
2195 	 * a close on this fd. Reset the cached fp ptr here.
2196 	 */
2197 	pdp->pd_fp = NULL;
2198 	/*
2199 	 * XXX - This routine also touches data in pcacheset struct.
2200 	 *
2201 	 * set the event in cached poll lists to POLLCLOSED. This invalidate
2202 	 * the cached poll fd entry in that poll list, which will force a
2203 	 * removal of this cached entry in next poll(). The cleanup is done
2204 	 * at the removal time.
2205 	 */
2206 	ASSERT(pdp->pd_ref != NULL);
2207 	for (i = 0; i < ps->ps_nsets; i++) {
2208 		xref_t		*refp;
2209 		pollcacheset_t	*pcsp;
2210 
2211 		refp = &pdp->pd_ref[i];
2212 		if (refp->xf_refcnt) {
2213 			ASSERT(refp->xf_position >= 0);
2214 			pcsp = &ps->ps_pcacheset[i];
2215 			if (refp->xf_refcnt == 1) {
2216 				pcsp->pcs_pollfd[refp->xf_position].events =
2217 				    (short)POLLCLOSED;
2218 			}
2219 			if (refp->xf_refcnt > 1) {
2220 				int	j;
2221 				/*
2222 				 * mark every matching entry in pcs_pollfd
2223 				 */
2224 				for (j = refp->xf_position;
2225 				    j < pcsp->pcs_nfds; j++) {
2226 					if (pcsp->pcs_pollfd[j].fd == fd) {
2227 						pcsp->pcs_pollfd[j].events =
2228 						    (short)POLLCLOSED;
2229 					}
2230 				}
2231 			}
2232 		}
2233 	}
2234 	if (pdp->pd_php) {
2235 		pollwakeup(pdp->pd_php, POLLHUP);
2236 		pollhead_delete(pdp->pd_php, pdp);
2237 		pdp->pd_php = NULL;
2238 	}
2239 }
2240 
2241 /*
2242  * This is the first time this thread has ever polled,
2243  * so we have to create its pollstate structure.
2244  * This will persist for the life of the thread,
2245  * until it calls pollcleanup().
2246  */
2247 pollstate_t *
2248 pollstate_create(void)
2249 {
2250 	pollstate_t *ps;
2251 
2252 	ps = kmem_zalloc(sizeof (pollstate_t), KM_SLEEP);
2253 	ps->ps_nsets = POLLFDSETS;
2254 	ps->ps_pcacheset = pcacheset_create(ps->ps_nsets);
2255 	return (ps);
2256 }
2257 
2258 void
2259 pollstate_destroy(pollstate_t *ps)
2260 {
2261 	if (ps->ps_pollfd != NULL) {
2262 		kmem_free(ps->ps_pollfd, ps->ps_nfds * sizeof (pollfd_t));
2263 		ps->ps_pollfd = NULL;
2264 	}
2265 	if (ps->ps_pcache != NULL) {
2266 		pcache_destroy(ps->ps_pcache);
2267 		ps->ps_pcache = NULL;
2268 	}
2269 	pcacheset_destroy(ps->ps_pcacheset, ps->ps_nsets);
2270 	ps->ps_pcacheset = NULL;
2271 	if (ps->ps_dpbuf != NULL) {
2272 		kmem_free(ps->ps_dpbuf, ps->ps_dpbufsize * sizeof (pollfd_t));
2273 		ps->ps_dpbuf = NULL;
2274 	}
2275 	mutex_destroy(&ps->ps_lock);
2276 	kmem_free(ps, sizeof (pollstate_t));
2277 }
2278 
2279 /*
2280  * We are holding the appropriate uf_lock entering this routine.
2281  * Bump up the ps_busy count to prevent the thread from exiting.
2282  */
2283 void
2284 pollblockexit(fpollinfo_t *fpip)
2285 {
2286 	for (; fpip; fpip = fpip->fp_next) {
2287 		pollcache_t *pcp = fpip->fp_thread->t_pollstate->ps_pcache;
2288 
2289 		mutex_enter(&pcp->pc_no_exit);
2290 		pcp->pc_busy++;  /* prevents exit()'s */
2291 		mutex_exit(&pcp->pc_no_exit);
2292 	}
2293 }
2294 
2295 /*
2296  * Complete phase 2 of cached poll fd cleanup. Call pcache_clean_entry to mark
2297  * the pcacheset events field POLLCLOSED to force the next poll() to remove
2298  * this cache entry. We can't clean the polldat entry clean up here because
2299  * lwp block in poll() needs the info to return. Wakeup anyone blocked in
2300  * poll and let exiting lwp go. No lock is help upon entry. So it's OK for
2301  * pcache_clean_entry to call pollwakeup().
2302  */
2303 void
2304 pollcacheclean(fpollinfo_t *fip, int fd)
2305 {
2306 	struct fpollinfo	*fpip, *fpip2;
2307 
2308 	fpip = fip;
2309 	while (fpip) {
2310 		pollstate_t *ps = fpip->fp_thread->t_pollstate;
2311 		pollcache_t *pcp = ps->ps_pcache;
2312 
2313 		mutex_enter(&ps->ps_lock);
2314 		pcache_clean_entry(ps, fd);
2315 		mutex_exit(&ps->ps_lock);
2316 		mutex_enter(&pcp->pc_no_exit);
2317 		pcp->pc_busy--;
2318 		if (pcp->pc_busy == 0) {
2319 			/*
2320 			 * Wakeup the thread waiting in
2321 			 * thread_exit().
2322 			 */
2323 			cv_signal(&pcp->pc_busy_cv);
2324 		}
2325 		mutex_exit(&pcp->pc_no_exit);
2326 
2327 		fpip2 = fpip;
2328 		fpip = fpip->fp_next;
2329 		kmem_free(fpip2, sizeof (fpollinfo_t));
2330 	}
2331 }
2332 
2333 /*
2334  * one of the cache line's counter is wrapping around. Reset all cache line
2335  * counters to zero except one. This is simplistic, but probably works
2336  * effectively.
2337  */
2338 void
2339 pcacheset_reset_count(pollstate_t *ps, int index)
2340 {
2341 	int	i;
2342 
2343 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2344 	for (i = 0; i < ps->ps_nsets; i++) {
2345 		if (ps->ps_pcacheset[i].pcs_pollfd != NULL) {
2346 			ps->ps_pcacheset[i].pcs_count = 0;
2347 		}
2348 	}
2349 	ps->ps_pcacheset[index].pcs_count = 1;
2350 }
2351 
2352 /*
2353  * this routine implements poll cache list replacement policy.
2354  * It is currently choose the "least used".
2355  */
2356 int
2357 pcacheset_replace(pollstate_t *ps)
2358 {
2359 	int i;
2360 	int index = 0;
2361 
2362 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2363 	for (i = 1; i < ps->ps_nsets; i++) {
2364 		if (ps->ps_pcacheset[index].pcs_count >
2365 		    ps->ps_pcacheset[i].pcs_count) {
2366 			index = i;
2367 		}
2368 	}
2369 	ps->ps_pcacheset[index].pcs_count = 0;
2370 	return (index);
2371 }
2372 
2373 /*
2374  * this routine is called by strclose to remove remaining polldat struct on
2375  * the pollhead list of the device being closed. There are two reasons as why
2376  * the polldat structures still remain on the pollhead list:
2377  *
2378  * (1) The layered device(e.g.the console driver).
2379  * In this case, the existence of a polldat implies that the thread putting
2380  * the polldat on this list has not exited yet. Before the thread exits, it
2381  * will have to hold this pollhead lock to remove the polldat. So holding the
2382  * pollhead lock here effectively prevents the thread which put the polldat
2383  * on this list from exiting.
2384  *
2385  * (2) /dev/poll.
2386  * When a polled fd is cached in /dev/poll, its polldat will remain on the
2387  * pollhead list if the process has not done a POLLREMOVE before closing the
2388  * polled fd. We just unlink it here.
2389  */
2390 void
2391 pollhead_clean(pollhead_t *php)
2392 {
2393 	polldat_t	*pdp;
2394 
2395 	/*
2396 	 * In case(1), while we must prevent the thread in question from
2397 	 * exiting, we must also obey the proper locking order, i.e.
2398 	 * (ps_lock -> phlock).
2399 	 */
2400 	PH_ENTER(php);
2401 	while (php->ph_list != NULL) {
2402 		pollstate_t	*ps;
2403 		pollcache_t	*pcp;
2404 
2405 		pdp = php->ph_list;
2406 		ASSERT(pdp->pd_php == php);
2407 		if (pdp->pd_thread == NULL) {
2408 			/*
2409 			 * This is case(2). Since the ph_lock is sufficient
2410 			 * to synchronize this lwp with any other /dev/poll
2411 			 * lwp, just unlink the polldat.
2412 			 */
2413 			php->ph_list = pdp->pd_next;
2414 			pdp->pd_php = NULL;
2415 			pdp->pd_next = NULL;
2416 			continue;
2417 		}
2418 		ps = pdp->pd_thread->t_pollstate;
2419 		ASSERT(ps != NULL);
2420 		pcp = pdp->pd_pcache;
2421 		ASSERT(pcp != NULL);
2422 		mutex_enter(&pcp->pc_no_exit);
2423 		pcp->pc_busy++;  /* prevents exit()'s */
2424 		mutex_exit(&pcp->pc_no_exit);
2425 		/*
2426 		 * Now get the locks in proper order to avoid deadlock.
2427 		 */
2428 		PH_EXIT(php);
2429 		mutex_enter(&ps->ps_lock);
2430 		/*
2431 		 * while we dropped the pollhead lock, the element could be
2432 		 * taken off the list already.
2433 		 */
2434 		PH_ENTER(php);
2435 		if (pdp->pd_php == php) {
2436 			ASSERT(pdp == php->ph_list);
2437 			php->ph_list = pdp->pd_next;
2438 			pdp->pd_php = NULL;
2439 			pdp->pd_next = NULL;
2440 		}
2441 		PH_EXIT(php);
2442 		mutex_exit(&ps->ps_lock);
2443 		mutex_enter(&pcp->pc_no_exit);
2444 		pcp->pc_busy--;
2445 		if (pcp->pc_busy == 0) {
2446 			/*
2447 			 * Wakeup the thread waiting in
2448 			 * thread_exit().
2449 			 */
2450 			cv_signal(&pcp->pc_busy_cv);
2451 		}
2452 		mutex_exit(&pcp->pc_no_exit);
2453 		PH_ENTER(php);
2454 	}
2455 	PH_EXIT(php);
2456 }
2457 
2458 /*
2459  * The remove_list is called to cleanup a partially cached 'current' list or
2460  * to remove a partial list which is no longer cached. The flag value of 1
2461  * indicates the second case.
2462  */
2463 void
2464 pcacheset_remove_list(pollstate_t *ps, pollfd_t *pollfdp, int start, int end,
2465     int cacheindex, int flag)
2466 {
2467 	int i;
2468 
2469 	ASSERT(MUTEX_HELD(&ps->ps_lock));
2470 	for (i = start; i < end; i++) {
2471 		if ((pollfdp[i].fd >= 0) &&
2472 		    (flag || !(pollfdp[i].revents & POLLNVAL))) {
2473 			if (pcache_delete_fd(ps, pollfdp[i].fd, i, cacheindex,
2474 			    (uint_t)pollfdp[i].events)) {
2475 				int j;
2476 				int fd = pollfdp[i].fd;
2477 
2478 				for (j = i + 1; j < end; j++) {
2479 					if (pollfdp[j].fd == fd) {
2480 						pcache_update_xref(
2481 						    ps->ps_pcache, fd,
2482 						    (ssize_t)j, cacheindex);
2483 						break;
2484 					}
2485 				}
2486 				ASSERT(j <= end);
2487 			}
2488 		}
2489 	}
2490 }
2491 
2492 #ifdef DEBUG
2493 
2494 #include<sys/strsubr.h>
2495 /*
2496  * make sure curthread is not on anyone's pollhead list any more.
2497  */
2498 static void
2499 pollcheckphlist()
2500 {
2501 	int i;
2502 	file_t *fp;
2503 	uf_entry_t *ufp;
2504 	uf_info_t *fip = P_FINFO(curproc);
2505 	struct stdata *stp;
2506 	polldat_t *pdp;
2507 
2508 	mutex_enter(&fip->fi_lock);
2509 	for (i = 0; i < fip->fi_nfiles; i++) {
2510 		UF_ENTER(ufp, fip, i);
2511 		if ((fp = ufp->uf_file) != NULL) {
2512 			if ((stp = fp->f_vnode->v_stream) != NULL) {
2513 				PH_ENTER(&stp->sd_pollist);
2514 				pdp = stp->sd_pollist.ph_list;
2515 				while (pdp) {
2516 					ASSERT(pdp->pd_thread != curthread);
2517 					pdp = pdp->pd_next;
2518 				}
2519 				PH_EXIT(&stp->sd_pollist);
2520 			}
2521 		}
2522 		UF_EXIT(ufp);
2523 	}
2524 	mutex_exit(&fip->fi_lock);
2525 }
2526 
2527 /*
2528  * for resolved set poll list, the xref info in the pcache should be
2529  * consistent with this poll list.
2530  */
2531 static int
2532 pollcheckxref(pollstate_t *ps, int cacheindex)
2533 {
2534 	pollfd_t *pollfdp = ps->ps_pcacheset[cacheindex].pcs_pollfd;
2535 	pollcache_t *pcp = ps->ps_pcache;
2536 	polldat_t *pdp;
2537 	int	i;
2538 	xref_t	*refp;
2539 
2540 	for (i = 0; i < ps->ps_pcacheset[cacheindex].pcs_nfds; i++) {
2541 		if (pollfdp[i].fd < 0) {
2542 			continue;
2543 		}
2544 		pdp = pcache_lookup_fd(pcp, pollfdp[i].fd);
2545 		ASSERT(pdp != NULL);
2546 		ASSERT(pdp->pd_ref != NULL);
2547 		refp = &pdp->pd_ref[cacheindex];
2548 		if (refp->xf_position >= 0) {
2549 			ASSERT(refp->xf_refcnt >= 1);
2550 			ASSERT(pollfdp[refp->xf_position].fd == pdp->pd_fd);
2551 			if (refp->xf_refcnt > 1) {
2552 				int	j;
2553 				int	count = 0;
2554 
2555 				for (j = refp->xf_position;
2556 				    j < ps->ps_pcacheset[cacheindex].pcs_nfds;
2557 				    j++) {
2558 					if (pollfdp[j].fd == pdp->pd_fd) {
2559 						count++;
2560 					}
2561 				}
2562 				ASSERT(count == refp->xf_refcnt);
2563 			}
2564 		}
2565 	}
2566 	return (1);
2567 }
2568 
2569 /*
2570  * For every cached pollfd, its polldat struct should be consistent with
2571  * what is in the pcacheset lists.
2572  */
2573 static void
2574 checkpolldat(pollstate_t *ps)
2575 {
2576 	pollcache_t	*pcp = ps->ps_pcache;
2577 	polldat_t	**hashtbl;
2578 	int		i;
2579 
2580 	hashtbl = pcp->pc_hash;
2581 	for (i = 0; i < pcp->pc_hashsize; i++) {
2582 		polldat_t	*pdp;
2583 
2584 		for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) {
2585 			ASSERT(pdp->pd_ref != NULL);
2586 			if (pdp->pd_count > 0) {
2587 				xref_t		*refp;
2588 				int		j;
2589 				pollcacheset_t	*pcsp;
2590 				pollfd_t	*pollfd;
2591 
2592 				for (j = 0; j < ps->ps_nsets; j++) {
2593 					refp = &pdp->pd_ref[j];
2594 					if (refp->xf_refcnt > 0) {
2595 						pcsp = &ps->ps_pcacheset[j];
2596 				ASSERT(refp->xf_position < pcsp->pcs_nfds);
2597 						pollfd = pcsp->pcs_pollfd;
2598 			ASSERT(pdp->pd_fd == pollfd[refp->xf_position].fd);
2599 					}
2600 				}
2601 			}
2602 		}
2603 	}
2604 }
2605 
2606 /*
2607  * every wfd element on ph_list must have a corresponding fpollinfo on the
2608  * uf_fpollinfo list. This is a variation of infpollinfo() w/o holding locks.
2609  */
2610 void
2611 checkwfdlist(vnode_t *vp, fpollinfo_t *fpip)
2612 {
2613 	stdata_t *stp;
2614 	polldat_t *pdp;
2615 	fpollinfo_t *fpip2;
2616 
2617 	if ((stp = vp->v_stream) == NULL) {
2618 		return;
2619 	}
2620 	PH_ENTER(&stp->sd_pollist);
2621 	for (pdp = stp->sd_pollist.ph_list; pdp; pdp = pdp->pd_next) {
2622 		if (pdp->pd_thread->t_procp == curthread->t_procp) {
2623 			for (fpip2 = fpip; fpip2; fpip2 = fpip2->fp_next) {
2624 				if (pdp->pd_thread == fpip2->fp_thread) {
2625 					break;
2626 				}
2627 			}
2628 			ASSERT(fpip2 != NULL);
2629 		}
2630 	}
2631 	PH_EXIT(&stp->sd_pollist);
2632 }
2633 
2634 /*
2635  * For each cached fd whose bit is not set in bitmap, its revents field in
2636  * current poll list should be 0.
2637  */
2638 static int
2639 pollcheckrevents(pollstate_t *ps, int begin, int end, int cacheindex)
2640 {
2641 	pollcache_t	*pcp = ps->ps_pcache;
2642 	pollfd_t	*pollfdp = ps->ps_pollfd;
2643 	int		i;
2644 
2645 	for (i = begin; i < end; i++) {
2646 		polldat_t	*pdp;
2647 
2648 		ASSERT(!BT_TEST(pcp->pc_bitmap, i));
2649 		pdp = pcache_lookup_fd(pcp, i);
2650 		if (pdp && pdp->pd_fp != NULL) {
2651 			xref_t *refp;
2652 			int entry;
2653 
2654 			ASSERT(pdp->pd_ref != NULL);
2655 			refp = &pdp->pd_ref[cacheindex];
2656 			if (refp->xf_refcnt == 0) {
2657 				continue;
2658 			}
2659 			entry = refp->xf_position;
2660 			ASSERT(entry >= 0);
2661 			ASSERT(pollfdp[entry].revents == 0);
2662 			if (refp->xf_refcnt > 1) {
2663 				int j;
2664 
2665 				for (j = entry + 1; j < ps->ps_nfds; j++) {
2666 					if (pollfdp[j].fd == i) {
2667 						ASSERT(pollfdp[j].revents == 0);
2668 					}
2669 				}
2670 			}
2671 		}
2672 	}
2673 	return (1);
2674 }
2675 
2676 #endif	/* DEBUG */
2677 
2678 pollcache_t *
2679 pcache_alloc()
2680 {
2681 	return (kmem_zalloc(sizeof (pollcache_t), KM_SLEEP));
2682 }
2683 
2684 void
2685 pcache_create(pollcache_t *pcp, nfds_t nfds)
2686 {
2687 	size_t	mapsize;
2688 
2689 	/*
2690 	 * allocate enough bits for the poll fd list
2691 	 */
2692 	if ((mapsize = POLLMAPCHUNK) <= nfds) {
2693 		mapsize = (nfds + POLLMAPCHUNK - 1) & ~(POLLMAPCHUNK - 1);
2694 	}
2695 	pcp->pc_bitmap = kmem_zalloc((mapsize / BT_NBIPUL) * sizeof (ulong_t),
2696 	    KM_SLEEP);
2697 	pcp->pc_mapsize = mapsize;
2698 	/*
2699 	 * The hash size is at least POLLHASHCHUNKSZ. If user polls a large
2700 	 * number of fd to start with, allocate a bigger hash table (to the
2701 	 * nearest multiple of POLLHASHCHUNKSZ) because dynamically growing a
2702 	 * hash table is expensive.
2703 	 */
2704 	if (nfds < POLLHASHCHUNKSZ) {
2705 		pcp->pc_hashsize = POLLHASHCHUNKSZ;
2706 	} else {
2707 		pcp->pc_hashsize = (nfds + POLLHASHCHUNKSZ - 1) &
2708 		    ~(POLLHASHCHUNKSZ - 1);
2709 	}
2710 	pcp->pc_hash = kmem_zalloc(pcp->pc_hashsize * sizeof (polldat_t *),
2711 	    KM_SLEEP);
2712 }
2713 
2714 void
2715 pcache_destroy(pollcache_t *pcp)
2716 {
2717 	polldat_t	**hashtbl;
2718 	int i;
2719 
2720 	hashtbl = pcp->pc_hash;
2721 	for (i = 0; i < pcp->pc_hashsize; i++) {
2722 		if (hashtbl[i] != NULL) {
2723 			polldat_t *pdp, *pdp2;
2724 
2725 			pdp = hashtbl[i];
2726 			while (pdp != NULL) {
2727 				pdp2 = pdp->pd_hashnext;
2728 				if (pdp->pd_ref != NULL) {
2729 					kmem_free(pdp->pd_ref, sizeof (xref_t) *
2730 					    pdp->pd_nsets);
2731 				}
2732 				kmem_free(pdp, sizeof (polldat_t));
2733 				pdp = pdp2;
2734 				pcp->pc_fdcount--;
2735 			}
2736 		}
2737 	}
2738 	ASSERT(pcp->pc_fdcount == 0);
2739 	kmem_free(pcp->pc_hash, sizeof (polldat_t *) * pcp->pc_hashsize);
2740 	kmem_free(pcp->pc_bitmap,
2741 	    sizeof (ulong_t) * (pcp->pc_mapsize/BT_NBIPUL));
2742 	mutex_destroy(&pcp->pc_no_exit);
2743 	mutex_destroy(&pcp->pc_lock);
2744 	cv_destroy(&pcp->pc_cv);
2745 	cv_destroy(&pcp->pc_busy_cv);
2746 	kmem_free(pcp, sizeof (pollcache_t));
2747 }
2748 
2749 pollcacheset_t *
2750 pcacheset_create(int nsets)
2751 {
2752 	return (kmem_zalloc(sizeof (pollcacheset_t) * nsets, KM_SLEEP));
2753 }
2754 
2755 void
2756 pcacheset_destroy(pollcacheset_t *pcsp, int nsets)
2757 {
2758 	int i;
2759 
2760 	for (i = 0; i < nsets; i++) {
2761 		if (pcsp[i].pcs_pollfd != NULL) {
2762 			kmem_free(pcsp[i].pcs_pollfd, pcsp[i].pcs_nfds *
2763 			    sizeof (pollfd_t));
2764 		}
2765 	}
2766 	kmem_free(pcsp, sizeof (pollcacheset_t) * nsets);
2767 }
2768 
2769 /*
2770  * Check each duplicated poll fd in the poll list. It may be necessary to
2771  * VOP_POLL the same fd again using different poll events. getf() has been
2772  * done by caller. This routine returns 0 if it can sucessfully process the
2773  * entire poll fd list. It returns -1 if underlying vnode has changed during
2774  * a VOP_POLL, in which case the caller has to repoll. It returns a positive
2775  * value if VOP_POLL failed.
2776  */
2777 static int
2778 plist_chkdupfd(file_t *fp, polldat_t *pdp, pollstate_t *psp, pollfd_t *pollfdp,
2779     int entry, int *fdcntp)
2780 {
2781 	int	i;
2782 	int	fd;
2783 	nfds_t	nfds = psp->ps_nfds;
2784 
2785 	fd = pollfdp[entry].fd;
2786 	for (i = entry + 1; i < nfds; i++) {
2787 		if (pollfdp[i].fd == fd) {
2788 			if (pollfdp[i].events == pollfdp[entry].events) {
2789 				if ((pollfdp[i].revents =
2790 				    pollfdp[entry].revents) != 0) {
2791 					(*fdcntp)++;
2792 				}
2793 			} else {
2794 
2795 				int	error;
2796 				pollhead_t *php;
2797 				pollcache_t *pcp = psp->ps_pcache;
2798 
2799 				/*
2800 				 * the events are different. VOP_POLL on this
2801 				 * fd so that we don't miss any revents.
2802 				 */
2803 				php = NULL;
2804 				ASSERT(curthread->t_pollcache == NULL);
2805 				error = VOP_POLL(fp->f_vnode,
2806 				    pollfdp[i].events, 0,
2807 				    &pollfdp[i].revents, &php);
2808 				if (error) {
2809 					return (error);
2810 				}
2811 				/*
2812 				 * layered devices(e.g. console driver)
2813 				 * may change the vnode and thus the pollhead
2814 				 * pointer out from underneath us.
2815 				 */
2816 				if (php != NULL && pdp->pd_php != NULL &&
2817 				    php != pdp->pd_php) {
2818 					pollhead_delete(pdp->pd_php, pdp);
2819 					pdp->pd_php = php;
2820 					pollhead_insert(php, pdp);
2821 					/*
2822 					 * We could have missed a wakeup on the
2823 					 * new target device. Make sure the new
2824 					 * target gets polled once.
2825 					 */
2826 					BT_SET(pcp->pc_bitmap, fd);
2827 					return (-1);
2828 				}
2829 				if (pollfdp[i].revents) {
2830 					(*fdcntp)++;
2831 				}
2832 			}
2833 		}
2834 	}
2835 	return (0);
2836 }
2837