17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 534709573Sraf * Common Development and Distribution License (the "License"). 634709573Sraf * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 2134709573Sraf 227c478bd9Sstevel@tonic-gate /* 238fd04b83SRoger A. Faulkner * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 287c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 297c478bd9Sstevel@tonic-gate 307c478bd9Sstevel@tonic-gate /* 31cd1c8b85SMatthew Ahrens * Copyright (c) 2012 by Delphix. All rights reserved. 32*f3bb54f3SPatrick Mooney * Copyright 2015, Joyent, Inc. 33cd1c8b85SMatthew Ahrens */ 34cd1c8b85SMatthew Ahrens 35cd1c8b85SMatthew Ahrens /* 367c478bd9Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD 377c478bd9Sstevel@tonic-gate * under license from the Regents of the University of California. 387c478bd9Sstevel@tonic-gate */ 397c478bd9Sstevel@tonic-gate 407c478bd9Sstevel@tonic-gate #include <sys/param.h> 417c478bd9Sstevel@tonic-gate #include <sys/isa_defs.h> 427c478bd9Sstevel@tonic-gate #include <sys/types.h> 437c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 447c478bd9Sstevel@tonic-gate #include <sys/user.h> 457c478bd9Sstevel@tonic-gate #include <sys/systm.h> 467c478bd9Sstevel@tonic-gate #include <sys/errno.h> 477c478bd9Sstevel@tonic-gate #include <sys/time.h> 487c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 497c478bd9Sstevel@tonic-gate #include <sys/file.h> 507c478bd9Sstevel@tonic-gate #include <sys/mode.h> 517c478bd9Sstevel@tonic-gate #include <sys/proc.h> 527c478bd9Sstevel@tonic-gate #include <sys/uio.h> 537c478bd9Sstevel@tonic-gate #include <sys/poll_impl.h> 547c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 557c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 567c478bd9Sstevel@tonic-gate #include <sys/debug.h> 577c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 587c478bd9Sstevel@tonic-gate #include <sys/kstat.h> 597c478bd9Sstevel@tonic-gate #include <sys/rctl.h> 6011dc39ddSpraks #include <sys/port_impl.h> 617c478bd9Sstevel@tonic-gate #include <sys/schedctl.h> 62eb4462acSDavid Plauger #include <sys/cpu.h> 637c478bd9Sstevel@tonic-gate 647c478bd9Sstevel@tonic-gate #define NPHLOCKS 64 /* Number of locks; must be power of 2 */ 657c478bd9Sstevel@tonic-gate #define PHLOCKADDR(php) &plocks[(((uintptr_t)(php)) >> 8) & (NPHLOCKS - 1)] 667c478bd9Sstevel@tonic-gate #define PHLOCK(php) PHLOCKADDR(php).pp_lock 677c478bd9Sstevel@tonic-gate #define PH_ENTER(php) mutex_enter(PHLOCK(php)) 687c478bd9Sstevel@tonic-gate #define PH_EXIT(php) mutex_exit(PHLOCK(php)) 697c478bd9Sstevel@tonic-gate #define VALID_POLL_EVENTS (POLLIN | POLLPRI | POLLOUT | POLLRDNORM \ 707c478bd9Sstevel@tonic-gate | POLLRDBAND | POLLWRBAND | POLLHUP | POLLERR | POLLNVAL) 717c478bd9Sstevel@tonic-gate 727c478bd9Sstevel@tonic-gate /* 737c478bd9Sstevel@tonic-gate * global counters to collect some stats 747c478bd9Sstevel@tonic-gate */ 757c478bd9Sstevel@tonic-gate static struct { 767c478bd9Sstevel@tonic-gate kstat_named_t polllistmiss; /* failed to find a cached poll list */ 777c478bd9Sstevel@tonic-gate kstat_named_t pollcachehit; /* list matched 100% w/ cached one */ 787c478bd9Sstevel@tonic-gate kstat_named_t pollcachephit; /* list matched < 100% w/ cached one */ 797c478bd9Sstevel@tonic-gate kstat_named_t pollcachemiss; /* every list entry is dif from cache */ 80*f3bb54f3SPatrick Mooney kstat_named_t pollunlockfail; /* failed to perform pollunlock */ 817c478bd9Sstevel@tonic-gate } pollstats = { 827c478bd9Sstevel@tonic-gate { "polllistmiss", KSTAT_DATA_UINT64 }, 837c478bd9Sstevel@tonic-gate { "pollcachehit", KSTAT_DATA_UINT64 }, 847c478bd9Sstevel@tonic-gate { "pollcachephit", KSTAT_DATA_UINT64 }, 85*f3bb54f3SPatrick Mooney { "pollcachemiss", KSTAT_DATA_UINT64 }, 86*f3bb54f3SPatrick Mooney { "pollunlockfail", KSTAT_DATA_UINT64 } 877c478bd9Sstevel@tonic-gate }; 887c478bd9Sstevel@tonic-gate 897c478bd9Sstevel@tonic-gate kstat_named_t *pollstats_ptr = (kstat_named_t *)&pollstats; 907c478bd9Sstevel@tonic-gate uint_t pollstats_ndata = sizeof (pollstats) / sizeof (kstat_named_t); 917c478bd9Sstevel@tonic-gate 927c478bd9Sstevel@tonic-gate struct pplock { 937c478bd9Sstevel@tonic-gate kmutex_t pp_lock; 947c478bd9Sstevel@tonic-gate short pp_flag; 957c478bd9Sstevel@tonic-gate kcondvar_t pp_wait_cv; 967c478bd9Sstevel@tonic-gate int32_t pp_pad; /* to a nice round 16 bytes */ 977c478bd9Sstevel@tonic-gate }; 987c478bd9Sstevel@tonic-gate 997c478bd9Sstevel@tonic-gate static struct pplock plocks[NPHLOCKS]; /* Hash array of pollhead locks */ 1007c478bd9Sstevel@tonic-gate 101*f3bb54f3SPatrick Mooney /* Contention lock & list for preventing deadlocks in recursive /dev/poll. */ 102*f3bb54f3SPatrick Mooney static kmutex_t pollstate_contenders_lock; 103*f3bb54f3SPatrick Mooney static pollstate_t *pollstate_contenders = NULL; 104*f3bb54f3SPatrick Mooney 1057c478bd9Sstevel@tonic-gate #ifdef DEBUG 1067c478bd9Sstevel@tonic-gate static int pollchecksanity(pollstate_t *, nfds_t); 1077c478bd9Sstevel@tonic-gate static int pollcheckxref(pollstate_t *, int); 1087c478bd9Sstevel@tonic-gate static void pollcheckphlist(void); 1097c478bd9Sstevel@tonic-gate static int pollcheckrevents(pollstate_t *, int, int, int); 1107c478bd9Sstevel@tonic-gate static void checkpolldat(pollstate_t *); 1117c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 1127c478bd9Sstevel@tonic-gate static int plist_chkdupfd(file_t *, polldat_t *, pollstate_t *, pollfd_t *, int, 1137c478bd9Sstevel@tonic-gate int *); 1147c478bd9Sstevel@tonic-gate 1157c478bd9Sstevel@tonic-gate /* 1167c478bd9Sstevel@tonic-gate * Data structure overview: 1177c478bd9Sstevel@tonic-gate * The per-thread poll state consists of 1187c478bd9Sstevel@tonic-gate * one pollstate_t 1197c478bd9Sstevel@tonic-gate * one pollcache_t 1207c478bd9Sstevel@tonic-gate * one bitmap with one event bit per fd 1217c478bd9Sstevel@tonic-gate * a (two-dimensional) hashed array of polldat_t structures - one entry 1227c478bd9Sstevel@tonic-gate * per fd 1237c478bd9Sstevel@tonic-gate * 1247c478bd9Sstevel@tonic-gate * This conglomerate of data structures interact with 1257c478bd9Sstevel@tonic-gate * the pollhead which is used by VOP_POLL and pollwakeup 1267c478bd9Sstevel@tonic-gate * (protected by the PHLOCK, cached array of plocks), and 1277c478bd9Sstevel@tonic-gate * the fpollinfo list hanging off the fi_list which is used to notify 1287c478bd9Sstevel@tonic-gate * poll when a cached fd is closed. This is protected by uf_lock. 1297c478bd9Sstevel@tonic-gate * 1307c478bd9Sstevel@tonic-gate * Invariants: 1317c478bd9Sstevel@tonic-gate * pd_php (pollhead pointer) is set iff (if and only if) the polldat 1327c478bd9Sstevel@tonic-gate * is on that pollhead. This is modified atomically under pc_lock. 1337c478bd9Sstevel@tonic-gate * 1347c478bd9Sstevel@tonic-gate * pd_fp (file_t pointer) is set iff the thread is on the fpollinfo 1357c478bd9Sstevel@tonic-gate * list for that open file. 1367c478bd9Sstevel@tonic-gate * This is modified atomically under pc_lock. 1377c478bd9Sstevel@tonic-gate * 1387c478bd9Sstevel@tonic-gate * pd_count is the sum (over all values of i) of pd_ref[i].xf_refcnt. 1397c478bd9Sstevel@tonic-gate * Iff pd_ref[i].xf_refcnt >= 1 then 1407c478bd9Sstevel@tonic-gate * ps_pcacheset[i].pcs_pollfd[pd_ref[i].xf_position].fd == pd_fd 1417c478bd9Sstevel@tonic-gate * Iff pd_ref[i].xf_refcnt > 1 then 1427c478bd9Sstevel@tonic-gate * In ps_pcacheset[i].pcs_pollfd between index 1437c478bd9Sstevel@tonic-gate * pd_ref[i].xf_position] and the end of the list 1447c478bd9Sstevel@tonic-gate * there are xf_refcnt entries with .fd == pd_fd 1457c478bd9Sstevel@tonic-gate * 1467c478bd9Sstevel@tonic-gate * Locking design: 1477c478bd9Sstevel@tonic-gate * Whenever possible the design relies on the fact that the poll cache state 1487c478bd9Sstevel@tonic-gate * is per thread thus for both poll and exit it is self-synchronizing. 1497c478bd9Sstevel@tonic-gate * Thus the key interactions where other threads access the state are: 1507c478bd9Sstevel@tonic-gate * pollwakeup (and polltime), and 1517c478bd9Sstevel@tonic-gate * close cleaning up the cached references to an open file 1527c478bd9Sstevel@tonic-gate * 1537c478bd9Sstevel@tonic-gate * The two key locks in poll proper is ps_lock and pc_lock. 1547c478bd9Sstevel@tonic-gate * 1557c478bd9Sstevel@tonic-gate * The ps_lock is used for synchronization between poll, (lwp_)exit and close 1567c478bd9Sstevel@tonic-gate * to ensure that modifications to pollcacheset structure are serialized. 1577c478bd9Sstevel@tonic-gate * This lock is held through most of poll() except where poll sleeps 1587c478bd9Sstevel@tonic-gate * since there is little need to handle closes concurrently with the execution 1597c478bd9Sstevel@tonic-gate * of poll. 1607c478bd9Sstevel@tonic-gate * The pc_lock protects most of the fields in pollcache structure and polldat 1617c478bd9Sstevel@tonic-gate * structures (which are accessed by poll, pollwakeup, and polltime) 1627c478bd9Sstevel@tonic-gate * with the exception of fields that are only modified when only one thread 1637c478bd9Sstevel@tonic-gate * can access this per-thread state. 1647c478bd9Sstevel@tonic-gate * Those exceptions occur in poll when first allocating the per-thread state, 1657c478bd9Sstevel@tonic-gate * when poll grows the number of polldat (never shrinks), and when 1667c478bd9Sstevel@tonic-gate * exit/pollcleanup has ensured that there are no references from either 1677c478bd9Sstevel@tonic-gate * pollheads or fpollinfo to the threads poll state. 1687c478bd9Sstevel@tonic-gate * 1697c478bd9Sstevel@tonic-gate * Poll(2) system call is the only path which ps_lock and pc_lock are both 1707c478bd9Sstevel@tonic-gate * held, in that order. It needs ps_lock to synchronize with close and 1717c478bd9Sstevel@tonic-gate * lwp_exit; and pc_lock with pollwakeup. 1727c478bd9Sstevel@tonic-gate * 1737c478bd9Sstevel@tonic-gate * The locking interaction between pc_lock and PHLOCK take into account 1747c478bd9Sstevel@tonic-gate * that poll acquires these locks in the order of pc_lock and then PHLOCK 1757c478bd9Sstevel@tonic-gate * while pollwakeup does it in the reverse order. Thus pollwakeup implements 1767c478bd9Sstevel@tonic-gate * deadlock avoidance by dropping the locks and reacquiring them in the 1777c478bd9Sstevel@tonic-gate * reverse order. For this to work pollwakeup needs to prevent the thread 1787c478bd9Sstevel@tonic-gate * from exiting and freeing all of the poll related state. Thus is done 1797c478bd9Sstevel@tonic-gate * using 1807c478bd9Sstevel@tonic-gate * the pc_no_exit lock 1817c478bd9Sstevel@tonic-gate * the pc_busy counter 1827c478bd9Sstevel@tonic-gate * the pc_busy_cv condition variable 1837c478bd9Sstevel@tonic-gate * 1847c478bd9Sstevel@tonic-gate * The locking interaction between pc_lock and uf_lock has similar 1857c478bd9Sstevel@tonic-gate * issues. Poll holds ps_lock and/or pc_lock across calls to getf/releasef 1867c478bd9Sstevel@tonic-gate * which acquire uf_lock. The poll cleanup in close needs to hold uf_lock 1877c478bd9Sstevel@tonic-gate * to prevent poll or exit from doing a delfpollinfo after which the thread 1887c478bd9Sstevel@tonic-gate * might exit. But the cleanup needs to acquire pc_lock when modifying 1897c478bd9Sstevel@tonic-gate * the poll cache state. The solution is to use pc_busy and do the close 1907c478bd9Sstevel@tonic-gate * cleanup in two phases: 1917c478bd9Sstevel@tonic-gate * First close calls pollblockexit which increments pc_busy. 1927c478bd9Sstevel@tonic-gate * This prevents the per-thread poll related state from being freed. 1937c478bd9Sstevel@tonic-gate * Then close drops uf_lock and calls pollcacheclean. 1947c478bd9Sstevel@tonic-gate * This routine can then acquire pc_lock and remove any references 1957c478bd9Sstevel@tonic-gate * to the closing fd (as well as recording that it has been closed 1967c478bd9Sstevel@tonic-gate * so that a POLLNVAL can be generated even if the fd is reused before 1977c478bd9Sstevel@tonic-gate * poll has been woken up and checked getf() again). 1987c478bd9Sstevel@tonic-gate * 1997c478bd9Sstevel@tonic-gate * When removing a polled fd from poll cache, the fd is always removed 2007c478bd9Sstevel@tonic-gate * from pollhead list first and then from fpollinfo list, i.e., 2017c478bd9Sstevel@tonic-gate * pollhead_delete() is called before delfpollinfo(). 2027c478bd9Sstevel@tonic-gate * 2037c478bd9Sstevel@tonic-gate * 2047c478bd9Sstevel@tonic-gate * Locking hierarchy: 2057c478bd9Sstevel@tonic-gate * pc_no_exit is a leaf level lock. 2067c478bd9Sstevel@tonic-gate * ps_lock is held when acquiring pc_lock (except when pollwakeup 2077c478bd9Sstevel@tonic-gate * acquires pc_lock). 2087c478bd9Sstevel@tonic-gate * pc_lock might be held when acquiring PHLOCK (pollhead_insert/ 2097c478bd9Sstevel@tonic-gate * pollhead_delete) 2107c478bd9Sstevel@tonic-gate * pc_lock is always held (but this is not required) 2117c478bd9Sstevel@tonic-gate * when acquiring PHLOCK (in polladd/pollhead_delete and pollwakeup called 2127c478bd9Sstevel@tonic-gate * from pcache_clean_entry). 2137c478bd9Sstevel@tonic-gate * pc_lock is held across addfpollinfo/delfpollinfo which acquire 2147c478bd9Sstevel@tonic-gate * uf_lock. 2157c478bd9Sstevel@tonic-gate * pc_lock is held across getf/releasef which acquire uf_lock. 2167c478bd9Sstevel@tonic-gate * ps_lock might be held across getf/releasef which acquire uf_lock. 2177c478bd9Sstevel@tonic-gate * pollwakeup tries to acquire pc_lock while holding PHLOCK 2187c478bd9Sstevel@tonic-gate * but drops the locks and reacquire them in reverse order to avoid 2197c478bd9Sstevel@tonic-gate * deadlock. 2207c478bd9Sstevel@tonic-gate * 2217c478bd9Sstevel@tonic-gate * Note also that there is deadlock avoidance support for VOP_POLL routines 2227c478bd9Sstevel@tonic-gate * and pollwakeup involving a file system or driver lock. 2237c478bd9Sstevel@tonic-gate * See below. 2247c478bd9Sstevel@tonic-gate */ 2257c478bd9Sstevel@tonic-gate 2267c478bd9Sstevel@tonic-gate /* 2277c478bd9Sstevel@tonic-gate * Deadlock avoidance support for VOP_POLL() routines. This is 2287c478bd9Sstevel@tonic-gate * sometimes necessary to prevent deadlock between polling threads 2297c478bd9Sstevel@tonic-gate * (which hold poll locks on entry to xx_poll(), then acquire foo) 2307c478bd9Sstevel@tonic-gate * and pollwakeup() threads (which hold foo, then acquire poll locks). 2317c478bd9Sstevel@tonic-gate * 232*f3bb54f3SPatrick Mooney * pollunlock(*cookie) releases whatever poll locks the current thread holds, 233*f3bb54f3SPatrick Mooney * setting a cookie for use by pollrelock(); 2347c478bd9Sstevel@tonic-gate * 2357c478bd9Sstevel@tonic-gate * pollrelock(cookie) reacquires previously dropped poll locks; 2367c478bd9Sstevel@tonic-gate * 2377c478bd9Sstevel@tonic-gate * polllock(php, mutex) does the common case: pollunlock(), 2387c478bd9Sstevel@tonic-gate * acquire the problematic mutex, pollrelock(). 239*f3bb54f3SPatrick Mooney * 240*f3bb54f3SPatrick Mooney * If polllock() or pollunlock() return non-zero, it indicates that a recursive 241*f3bb54f3SPatrick Mooney * /dev/poll is in progress and pollcache locks cannot be dropped. Callers 242*f3bb54f3SPatrick Mooney * must handle this by indicating a POLLNVAL in the revents of the VOP_POLL. 2437c478bd9Sstevel@tonic-gate */ 2447c478bd9Sstevel@tonic-gate int 245*f3bb54f3SPatrick Mooney pollunlock(int *lockstate) 2467c478bd9Sstevel@tonic-gate { 247*f3bb54f3SPatrick Mooney pollstate_t *ps = curthread->t_pollstate; 2487c478bd9Sstevel@tonic-gate pollcache_t *pcp; 249*f3bb54f3SPatrick Mooney 250*f3bb54f3SPatrick Mooney ASSERT(lockstate != NULL); 251*f3bb54f3SPatrick Mooney 252*f3bb54f3SPatrick Mooney /* 253*f3bb54f3SPatrick Mooney * There is no way to safely perform a pollunlock() while in the depths 254*f3bb54f3SPatrick Mooney * of a recursive /dev/poll operation. 255*f3bb54f3SPatrick Mooney */ 256*f3bb54f3SPatrick Mooney if (ps != NULL && ps->ps_depth > 1) { 257*f3bb54f3SPatrick Mooney ps->ps_flags |= POLLSTATE_ULFAIL; 258*f3bb54f3SPatrick Mooney pollstats.pollunlockfail.value.ui64++; 259*f3bb54f3SPatrick Mooney return (-1); 260*f3bb54f3SPatrick Mooney } 2617c478bd9Sstevel@tonic-gate 2627c478bd9Sstevel@tonic-gate /* 2637c478bd9Sstevel@tonic-gate * t_pollcache is set by /dev/poll and event ports (port_fd.c). 2647c478bd9Sstevel@tonic-gate * If the pollrelock/pollunlock is called as a result of poll(2), 2657c478bd9Sstevel@tonic-gate * the t_pollcache should be NULL. 2667c478bd9Sstevel@tonic-gate */ 2677c478bd9Sstevel@tonic-gate if (curthread->t_pollcache == NULL) 268*f3bb54f3SPatrick Mooney pcp = ps->ps_pcache; 2697c478bd9Sstevel@tonic-gate else 2707c478bd9Sstevel@tonic-gate pcp = curthread->t_pollcache; 2717c478bd9Sstevel@tonic-gate 272*f3bb54f3SPatrick Mooney if (!mutex_owned(&pcp->pc_lock)) { 273*f3bb54f3SPatrick Mooney *lockstate = 0; 274*f3bb54f3SPatrick Mooney } else { 275*f3bb54f3SPatrick Mooney *lockstate = 1; 2767c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 2777c478bd9Sstevel@tonic-gate } 278*f3bb54f3SPatrick Mooney return (0); 2797c478bd9Sstevel@tonic-gate } 2807c478bd9Sstevel@tonic-gate 2817c478bd9Sstevel@tonic-gate void 2827c478bd9Sstevel@tonic-gate pollrelock(int lockstate) 2837c478bd9Sstevel@tonic-gate { 284*f3bb54f3SPatrick Mooney pollstate_t *ps = curthread->t_pollstate; 2857c478bd9Sstevel@tonic-gate pollcache_t *pcp; 2867c478bd9Sstevel@tonic-gate 287*f3bb54f3SPatrick Mooney /* Skip this whole ordeal if the pollcache was not locked to begin */ 288*f3bb54f3SPatrick Mooney if (lockstate == 0) 289*f3bb54f3SPatrick Mooney return; 290*f3bb54f3SPatrick Mooney 2917c478bd9Sstevel@tonic-gate /* 2927c478bd9Sstevel@tonic-gate * t_pollcache is set by /dev/poll and event ports (port_fd.c). 2937c478bd9Sstevel@tonic-gate * If the pollrelock/pollunlock is called as a result of poll(2), 2947c478bd9Sstevel@tonic-gate * the t_pollcache should be NULL. 2957c478bd9Sstevel@tonic-gate */ 2967c478bd9Sstevel@tonic-gate if (curthread->t_pollcache == NULL) 297*f3bb54f3SPatrick Mooney pcp = ps->ps_pcache; 2987c478bd9Sstevel@tonic-gate else 2997c478bd9Sstevel@tonic-gate pcp = curthread->t_pollcache; 3007c478bd9Sstevel@tonic-gate 3017c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 3027c478bd9Sstevel@tonic-gate } 3037c478bd9Sstevel@tonic-gate 3047c478bd9Sstevel@tonic-gate /* ARGSUSED */ 305*f3bb54f3SPatrick Mooney int 3067c478bd9Sstevel@tonic-gate polllock(pollhead_t *php, kmutex_t *lp) 3077c478bd9Sstevel@tonic-gate { 308*f3bb54f3SPatrick Mooney if (mutex_tryenter(lp) == 0) { 309*f3bb54f3SPatrick Mooney int state; 310*f3bb54f3SPatrick Mooney 311*f3bb54f3SPatrick Mooney if (pollunlock(&state) != 0) { 312*f3bb54f3SPatrick Mooney return (-1); 3137c478bd9Sstevel@tonic-gate } 314*f3bb54f3SPatrick Mooney mutex_enter(lp); 315*f3bb54f3SPatrick Mooney pollrelock(state); 316*f3bb54f3SPatrick Mooney } 317*f3bb54f3SPatrick Mooney return (0); 3187c478bd9Sstevel@tonic-gate } 3197c478bd9Sstevel@tonic-gate 3207c478bd9Sstevel@tonic-gate static int 3217c478bd9Sstevel@tonic-gate poll_common(pollfd_t *fds, nfds_t nfds, timespec_t *tsp, k_sigset_t *ksetp) 3227c478bd9Sstevel@tonic-gate { 3237c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 3247c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 3257c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 3267c478bd9Sstevel@tonic-gate int fdcnt = 0; 3277c478bd9Sstevel@tonic-gate int i; 328cd1c8b85SMatthew Ahrens hrtime_t deadline; /* hrtime value when we want to return */ 3297c478bd9Sstevel@tonic-gate pollfd_t *pollfdp; 3307c478bd9Sstevel@tonic-gate pollstate_t *ps; 3317c478bd9Sstevel@tonic-gate pollcache_t *pcp; 3327c478bd9Sstevel@tonic-gate int error = 0; 3337c478bd9Sstevel@tonic-gate nfds_t old_nfds; 3347c478bd9Sstevel@tonic-gate int cacheindex = 0; /* which cache set is used */ 3357c478bd9Sstevel@tonic-gate 3367c478bd9Sstevel@tonic-gate /* 3377c478bd9Sstevel@tonic-gate * Determine the precise future time of the requested timeout, if any. 3387c478bd9Sstevel@tonic-gate */ 339cd1c8b85SMatthew Ahrens if (tsp == NULL) { 340cd1c8b85SMatthew Ahrens deadline = -1; 341cd1c8b85SMatthew Ahrens } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) { 342cd1c8b85SMatthew Ahrens deadline = 0; 343fe234e7cSMatt Amdur } else { 344cd1c8b85SMatthew Ahrens /* They must wait at least a tick. */ 3457f0270d8SRichard Lowe deadline = ((hrtime_t)tsp->tv_sec * NANOSEC) + tsp->tv_nsec; 346cd1c8b85SMatthew Ahrens deadline = MAX(deadline, nsec_per_tick); 347cd1c8b85SMatthew Ahrens deadline += gethrtime(); 3487c478bd9Sstevel@tonic-gate } 3497c478bd9Sstevel@tonic-gate 3507c478bd9Sstevel@tonic-gate /* 3517c478bd9Sstevel@tonic-gate * Reset our signal mask, if requested. 3527c478bd9Sstevel@tonic-gate */ 3537c478bd9Sstevel@tonic-gate if (ksetp != NULL) { 3547c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 3557c478bd9Sstevel@tonic-gate schedctl_finish_sigblock(t); 3567c478bd9Sstevel@tonic-gate lwp->lwp_sigoldmask = t->t_hold; 3577c478bd9Sstevel@tonic-gate t->t_hold = *ksetp; 3587c478bd9Sstevel@tonic-gate t->t_flag |= T_TOMASK; 3597c478bd9Sstevel@tonic-gate /* 360d3d50737SRafael Vanoni * Call cv_reltimedwait_sig() just to check for signals. 3617c478bd9Sstevel@tonic-gate * We will return immediately with either 0 or -1. 3627c478bd9Sstevel@tonic-gate */ 363d3d50737SRafael Vanoni if (!cv_reltimedwait_sig(&t->t_delay_cv, &p->p_lock, 0, 364d3d50737SRafael Vanoni TR_CLOCK_TICK)) { 3657c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 3667c478bd9Sstevel@tonic-gate error = EINTR; 3677c478bd9Sstevel@tonic-gate goto pollout; 3687c478bd9Sstevel@tonic-gate } 3697c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 3707c478bd9Sstevel@tonic-gate } 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate /* 3737c478bd9Sstevel@tonic-gate * Check to see if this guy just wants to use poll() as a timeout. 3747c478bd9Sstevel@tonic-gate * If yes then bypass all the other stuff and make him sleep. 3757c478bd9Sstevel@tonic-gate */ 3767c478bd9Sstevel@tonic-gate if (nfds == 0) { 3777c478bd9Sstevel@tonic-gate /* 3787c478bd9Sstevel@tonic-gate * Sleep until we have passed the requested future 3797c478bd9Sstevel@tonic-gate * time or until interrupted by a signal. 380cd1c8b85SMatthew Ahrens * Do not check for signals if we do not want to wait. 3817c478bd9Sstevel@tonic-gate */ 382cd1c8b85SMatthew Ahrens if (deadline != 0) { 3837c478bd9Sstevel@tonic-gate mutex_enter(&t->t_delay_lock); 384cd1c8b85SMatthew Ahrens while ((error = cv_timedwait_sig_hrtime(&t->t_delay_cv, 385cd1c8b85SMatthew Ahrens &t->t_delay_lock, deadline)) > 0) 3867c478bd9Sstevel@tonic-gate continue; 3877c478bd9Sstevel@tonic-gate mutex_exit(&t->t_delay_lock); 388cd1c8b85SMatthew Ahrens error = (error == 0) ? EINTR : 0; 3897c478bd9Sstevel@tonic-gate } 3907c478bd9Sstevel@tonic-gate goto pollout; 3917c478bd9Sstevel@tonic-gate } 3927c478bd9Sstevel@tonic-gate 3935f684e24Ssp92102 if (nfds > p->p_fno_ctl) { 3947c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 3957c478bd9Sstevel@tonic-gate (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE], 3967c478bd9Sstevel@tonic-gate p->p_rctls, p, RCA_SAFE); 3977c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 3987c478bd9Sstevel@tonic-gate error = EINVAL; 3997c478bd9Sstevel@tonic-gate goto pollout; 4007c478bd9Sstevel@tonic-gate } 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate /* 4037c478bd9Sstevel@tonic-gate * Need to allocate memory for pollstate before anything because 4047c478bd9Sstevel@tonic-gate * the mutex and cv are created in this space 4057c478bd9Sstevel@tonic-gate */ 406*f3bb54f3SPatrick Mooney ps = pollstate_create(); 4077c478bd9Sstevel@tonic-gate 4087c478bd9Sstevel@tonic-gate if (ps->ps_pcache == NULL) 4097c478bd9Sstevel@tonic-gate ps->ps_pcache = pcache_alloc(); 4107c478bd9Sstevel@tonic-gate pcp = ps->ps_pcache; 4117c478bd9Sstevel@tonic-gate 4127c478bd9Sstevel@tonic-gate /* 4137c478bd9Sstevel@tonic-gate * NOTE: for performance, buffers are saved across poll() calls. 4147c478bd9Sstevel@tonic-gate * The theory is that if a process polls heavily, it tends to poll 4157c478bd9Sstevel@tonic-gate * on the same set of descriptors. Therefore, we only reallocate 4167c478bd9Sstevel@tonic-gate * buffers when nfds changes. There is no hysteresis control, 4177c478bd9Sstevel@tonic-gate * because there is no data to suggest that this is necessary; 4187c478bd9Sstevel@tonic-gate * the penalty of reallocating is not *that* great in any event. 4197c478bd9Sstevel@tonic-gate */ 4207c478bd9Sstevel@tonic-gate old_nfds = ps->ps_nfds; 4217c478bd9Sstevel@tonic-gate if (nfds != old_nfds) { 4227c478bd9Sstevel@tonic-gate 4237c478bd9Sstevel@tonic-gate kmem_free(ps->ps_pollfd, old_nfds * sizeof (pollfd_t)); 4247d5e0ac2Sok199659 pollfdp = kmem_alloc(nfds * sizeof (pollfd_t), KM_SLEEP); 4257c478bd9Sstevel@tonic-gate ps->ps_pollfd = pollfdp; 4267c478bd9Sstevel@tonic-gate ps->ps_nfds = nfds; 4277c478bd9Sstevel@tonic-gate } 4287c478bd9Sstevel@tonic-gate 4297c478bd9Sstevel@tonic-gate pollfdp = ps->ps_pollfd; 4307c478bd9Sstevel@tonic-gate if (copyin(fds, pollfdp, nfds * sizeof (pollfd_t))) { 4317c478bd9Sstevel@tonic-gate error = EFAULT; 4327c478bd9Sstevel@tonic-gate goto pollout; 4337c478bd9Sstevel@tonic-gate } 4347c478bd9Sstevel@tonic-gate 4357c478bd9Sstevel@tonic-gate if (fds == NULL) { 4367c478bd9Sstevel@tonic-gate /* 4377c478bd9Sstevel@tonic-gate * If the process has page 0 mapped, then the copyin() above 4387c478bd9Sstevel@tonic-gate * will succeed even if fds is NULL. However, our cached 4397c478bd9Sstevel@tonic-gate * poll lists are keyed by the address of the passed-in fds 4407c478bd9Sstevel@tonic-gate * structure, and we use the value NULL to indicate an unused 4417c478bd9Sstevel@tonic-gate * poll cache list entry. As such, we elect not to support 4427c478bd9Sstevel@tonic-gate * NULL as a valid (user) memory address and fail the poll() 4437c478bd9Sstevel@tonic-gate * call. 4447c478bd9Sstevel@tonic-gate */ 4457c478bd9Sstevel@tonic-gate error = EINVAL; 4467c478bd9Sstevel@tonic-gate goto pollout; 4477c478bd9Sstevel@tonic-gate } 4487c478bd9Sstevel@tonic-gate 4497c478bd9Sstevel@tonic-gate /* 4507c478bd9Sstevel@tonic-gate * If this thread polls for the first time, allocate ALL poll 4517c478bd9Sstevel@tonic-gate * cache data structures and cache the poll fd list. This 4527c478bd9Sstevel@tonic-gate * allocation is delayed till now because lwp's polling 0 fd 4537c478bd9Sstevel@tonic-gate * (i.e. using poll as timeout()) don't need this memory. 4547c478bd9Sstevel@tonic-gate */ 4557c478bd9Sstevel@tonic-gate mutex_enter(&ps->ps_lock); 4567c478bd9Sstevel@tonic-gate pcp = ps->ps_pcache; 4577c478bd9Sstevel@tonic-gate ASSERT(pcp != NULL); 4587c478bd9Sstevel@tonic-gate if (pcp->pc_bitmap == NULL) { 4597c478bd9Sstevel@tonic-gate pcache_create(pcp, nfds); 4607c478bd9Sstevel@tonic-gate /* 4617c478bd9Sstevel@tonic-gate * poll and cache this poll fd list in ps_pcacheset[0]. 4627c478bd9Sstevel@tonic-gate */ 4637c478bd9Sstevel@tonic-gate error = pcacheset_cache_list(ps, fds, &fdcnt, cacheindex); 4647c478bd9Sstevel@tonic-gate if (fdcnt || error) { 4657c478bd9Sstevel@tonic-gate mutex_exit(&ps->ps_lock); 4667c478bd9Sstevel@tonic-gate goto pollout; 4677c478bd9Sstevel@tonic-gate } 4687c478bd9Sstevel@tonic-gate } else { 4697c478bd9Sstevel@tonic-gate pollcacheset_t *pcset = ps->ps_pcacheset; 4707c478bd9Sstevel@tonic-gate 4717c478bd9Sstevel@tonic-gate /* 4727c478bd9Sstevel@tonic-gate * Not first time polling. Select a cached poll list by 4737c478bd9Sstevel@tonic-gate * matching user pollfd list buffer address. 4747c478bd9Sstevel@tonic-gate */ 4757c478bd9Sstevel@tonic-gate for (cacheindex = 0; cacheindex < ps->ps_nsets; cacheindex++) { 4767c478bd9Sstevel@tonic-gate if (pcset[cacheindex].pcs_usradr == (uintptr_t)fds) { 4777c478bd9Sstevel@tonic-gate if ((++pcset[cacheindex].pcs_count) == 0) { 4787c478bd9Sstevel@tonic-gate /* 4797c478bd9Sstevel@tonic-gate * counter is wrapping around. 4807c478bd9Sstevel@tonic-gate */ 4817c478bd9Sstevel@tonic-gate pcacheset_reset_count(ps, cacheindex); 4827c478bd9Sstevel@tonic-gate } 4837c478bd9Sstevel@tonic-gate /* 4847c478bd9Sstevel@tonic-gate * examine and resolve possible 4857c478bd9Sstevel@tonic-gate * difference of the current poll 4867c478bd9Sstevel@tonic-gate * list and previously cached one. 4877c478bd9Sstevel@tonic-gate * If there is an error during resolve(), 4887c478bd9Sstevel@tonic-gate * the callee will guarantee the consistency 4897c478bd9Sstevel@tonic-gate * of cached poll list and cache content. 4907c478bd9Sstevel@tonic-gate */ 4917c478bd9Sstevel@tonic-gate error = pcacheset_resolve(ps, nfds, &fdcnt, 4927c478bd9Sstevel@tonic-gate cacheindex); 4937c478bd9Sstevel@tonic-gate if (error) { 4947c478bd9Sstevel@tonic-gate mutex_exit(&ps->ps_lock); 4957c478bd9Sstevel@tonic-gate goto pollout; 4967c478bd9Sstevel@tonic-gate } 4977c478bd9Sstevel@tonic-gate break; 4987c478bd9Sstevel@tonic-gate } 4997c478bd9Sstevel@tonic-gate 5007c478bd9Sstevel@tonic-gate /* 5017c478bd9Sstevel@tonic-gate * Note that pcs_usradr field of an used entry won't be 5027c478bd9Sstevel@tonic-gate * NULL because it stores the address of passed-in fds, 5037c478bd9Sstevel@tonic-gate * and NULL fds will not be cached (Then it is either 5047c478bd9Sstevel@tonic-gate * the special timeout case when nfds is 0 or it returns 5057c478bd9Sstevel@tonic-gate * failure directly). 5067c478bd9Sstevel@tonic-gate */ 5077c478bd9Sstevel@tonic-gate if (pcset[cacheindex].pcs_usradr == NULL) { 5087c478bd9Sstevel@tonic-gate /* 5097c478bd9Sstevel@tonic-gate * found an unused entry. Use it to cache 5107c478bd9Sstevel@tonic-gate * this poll list. 5117c478bd9Sstevel@tonic-gate */ 5127c478bd9Sstevel@tonic-gate error = pcacheset_cache_list(ps, fds, &fdcnt, 5137c478bd9Sstevel@tonic-gate cacheindex); 5147c478bd9Sstevel@tonic-gate if (fdcnt || error) { 5157c478bd9Sstevel@tonic-gate mutex_exit(&ps->ps_lock); 5167c478bd9Sstevel@tonic-gate goto pollout; 5177c478bd9Sstevel@tonic-gate } 5187c478bd9Sstevel@tonic-gate break; 5197c478bd9Sstevel@tonic-gate } 5207c478bd9Sstevel@tonic-gate } 5217c478bd9Sstevel@tonic-gate if (cacheindex == ps->ps_nsets) { 5227c478bd9Sstevel@tonic-gate /* 5237c478bd9Sstevel@tonic-gate * We failed to find a matching cached poll fd list. 5247c478bd9Sstevel@tonic-gate * replace an old list. 5257c478bd9Sstevel@tonic-gate */ 5267c478bd9Sstevel@tonic-gate pollstats.polllistmiss.value.ui64++; 5277c478bd9Sstevel@tonic-gate cacheindex = pcacheset_replace(ps); 5287c478bd9Sstevel@tonic-gate ASSERT(cacheindex < ps->ps_nsets); 5297c478bd9Sstevel@tonic-gate pcset[cacheindex].pcs_usradr = (uintptr_t)fds; 5307c478bd9Sstevel@tonic-gate error = pcacheset_resolve(ps, nfds, &fdcnt, cacheindex); 5317c478bd9Sstevel@tonic-gate if (error) { 5327c478bd9Sstevel@tonic-gate mutex_exit(&ps->ps_lock); 5337c478bd9Sstevel@tonic-gate goto pollout; 5347c478bd9Sstevel@tonic-gate } 5357c478bd9Sstevel@tonic-gate } 5367c478bd9Sstevel@tonic-gate } 5377c478bd9Sstevel@tonic-gate 5387c478bd9Sstevel@tonic-gate /* 5397c478bd9Sstevel@tonic-gate * Always scan the bitmap with the lock on the pollcache held. 5407c478bd9Sstevel@tonic-gate * This is to make sure that a wakeup does not come undetected. 5417c478bd9Sstevel@tonic-gate * If the lock is not held, a pollwakeup could have come for an 5427c478bd9Sstevel@tonic-gate * fd we already checked but before this thread sleeps, in which 5437c478bd9Sstevel@tonic-gate * case the wakeup is missed. Now we hold the pcache lock and 5447c478bd9Sstevel@tonic-gate * check the bitmap again. This will prevent wakeup from happening 5457c478bd9Sstevel@tonic-gate * while we hold pcache lock since pollwakeup() will also lock 5467c478bd9Sstevel@tonic-gate * the pcache before updating poll bitmap. 5477c478bd9Sstevel@tonic-gate */ 5487c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 5497c478bd9Sstevel@tonic-gate for (;;) { 5507c478bd9Sstevel@tonic-gate pcp->pc_flag = 0; 5517c478bd9Sstevel@tonic-gate error = pcache_poll(pollfdp, ps, nfds, &fdcnt, cacheindex); 5527c478bd9Sstevel@tonic-gate if (fdcnt || error) { 5537c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 5547c478bd9Sstevel@tonic-gate mutex_exit(&ps->ps_lock); 5557c478bd9Sstevel@tonic-gate break; 5567c478bd9Sstevel@tonic-gate } 5577c478bd9Sstevel@tonic-gate 5587c478bd9Sstevel@tonic-gate /* 559a5eb7107SBryan Cantrill * If PC_POLLWAKE is set, a pollwakeup() was performed on 5607c478bd9Sstevel@tonic-gate * one of the file descriptors. This can happen only if 5617c478bd9Sstevel@tonic-gate * one of the VOP_POLL() functions dropped pcp->pc_lock. 5627c478bd9Sstevel@tonic-gate * The only current cases of this is in procfs (prpoll()) 5637c478bd9Sstevel@tonic-gate * and STREAMS (strpoll()). 5647c478bd9Sstevel@tonic-gate */ 565a5eb7107SBryan Cantrill if (pcp->pc_flag & PC_POLLWAKE) 5667c478bd9Sstevel@tonic-gate continue; 5677c478bd9Sstevel@tonic-gate 5687c478bd9Sstevel@tonic-gate /* 5697c478bd9Sstevel@tonic-gate * If you get here, the poll of fds was unsuccessful. 5707c478bd9Sstevel@tonic-gate * Wait until some fd becomes readable, writable, or gets 5717c478bd9Sstevel@tonic-gate * an exception, or until a signal or a timeout occurs. 5727c478bd9Sstevel@tonic-gate * Do not check for signals if we have a zero timeout. 5737c478bd9Sstevel@tonic-gate */ 5747c478bd9Sstevel@tonic-gate mutex_exit(&ps->ps_lock); 575cd1c8b85SMatthew Ahrens if (deadline == 0) { 576cd1c8b85SMatthew Ahrens error = -1; 577fe234e7cSMatt Amdur } else { 578cd1c8b85SMatthew Ahrens error = cv_timedwait_sig_hrtime(&pcp->pc_cv, 579cd1c8b85SMatthew Ahrens &pcp->pc_lock, deadline); 580fe234e7cSMatt Amdur } 5817c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 5827c478bd9Sstevel@tonic-gate /* 5837c478bd9Sstevel@tonic-gate * If we have received a signal or timed out 5847c478bd9Sstevel@tonic-gate * then break out and return. 5857c478bd9Sstevel@tonic-gate */ 586cd1c8b85SMatthew Ahrens if (error <= 0) { 587cd1c8b85SMatthew Ahrens error = (error == 0) ? EINTR : 0; 5887c478bd9Sstevel@tonic-gate break; 5897c478bd9Sstevel@tonic-gate } 5907c478bd9Sstevel@tonic-gate /* 5917c478bd9Sstevel@tonic-gate * We have not received a signal or timed out. 5927c478bd9Sstevel@tonic-gate * Continue around and poll fds again. 5937c478bd9Sstevel@tonic-gate */ 5947c478bd9Sstevel@tonic-gate mutex_enter(&ps->ps_lock); 5957c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 5967c478bd9Sstevel@tonic-gate } 5977c478bd9Sstevel@tonic-gate 5987c478bd9Sstevel@tonic-gate pollout: 5997c478bd9Sstevel@tonic-gate /* 6007c478bd9Sstevel@tonic-gate * If we changed the signal mask but we received 6017c478bd9Sstevel@tonic-gate * no signal then restore the signal mask. 6027c478bd9Sstevel@tonic-gate * Otherwise psig() will deal with the signal mask. 6037c478bd9Sstevel@tonic-gate */ 6047c478bd9Sstevel@tonic-gate if (ksetp != NULL) { 6057c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 6067c478bd9Sstevel@tonic-gate if (lwp->lwp_cursig == 0) { 6077c478bd9Sstevel@tonic-gate t->t_hold = lwp->lwp_sigoldmask; 6087c478bd9Sstevel@tonic-gate t->t_flag &= ~T_TOMASK; 6097c478bd9Sstevel@tonic-gate } 6107c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 6117c478bd9Sstevel@tonic-gate } 6127c478bd9Sstevel@tonic-gate 6137c478bd9Sstevel@tonic-gate if (error) 6147c478bd9Sstevel@tonic-gate return (set_errno(error)); 6157c478bd9Sstevel@tonic-gate 6167c478bd9Sstevel@tonic-gate /* 6177c478bd9Sstevel@tonic-gate * Copy out the events and return the fdcnt to the user. 6187c478bd9Sstevel@tonic-gate */ 6197c478bd9Sstevel@tonic-gate if (nfds != 0 && 6207c478bd9Sstevel@tonic-gate copyout(pollfdp, fds, nfds * sizeof (pollfd_t))) 6217c478bd9Sstevel@tonic-gate return (set_errno(EFAULT)); 6227c478bd9Sstevel@tonic-gate 6237c478bd9Sstevel@tonic-gate #ifdef DEBUG 6247c478bd9Sstevel@tonic-gate /* 6257c478bd9Sstevel@tonic-gate * Another sanity check: 6267c478bd9Sstevel@tonic-gate */ 6277c478bd9Sstevel@tonic-gate if (fdcnt) { 6287c478bd9Sstevel@tonic-gate int reventcnt = 0; 6297c478bd9Sstevel@tonic-gate 6307c478bd9Sstevel@tonic-gate for (i = 0; i < nfds; i++) { 6317c478bd9Sstevel@tonic-gate if (pollfdp[i].fd < 0) { 6327c478bd9Sstevel@tonic-gate ASSERT(pollfdp[i].revents == 0); 6337c478bd9Sstevel@tonic-gate continue; 6347c478bd9Sstevel@tonic-gate } 6357c478bd9Sstevel@tonic-gate if (pollfdp[i].revents) { 6367c478bd9Sstevel@tonic-gate reventcnt++; 6377c478bd9Sstevel@tonic-gate } 6387c478bd9Sstevel@tonic-gate } 6397c478bd9Sstevel@tonic-gate ASSERT(fdcnt == reventcnt); 6407c478bd9Sstevel@tonic-gate } else { 6417c478bd9Sstevel@tonic-gate for (i = 0; i < nfds; i++) { 6427c478bd9Sstevel@tonic-gate ASSERT(pollfdp[i].revents == 0); 6437c478bd9Sstevel@tonic-gate } 6447c478bd9Sstevel@tonic-gate } 6457c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 6467c478bd9Sstevel@tonic-gate 6477c478bd9Sstevel@tonic-gate return (fdcnt); 6487c478bd9Sstevel@tonic-gate } 6497c478bd9Sstevel@tonic-gate 6507c478bd9Sstevel@tonic-gate /* 6517c478bd9Sstevel@tonic-gate * This is the system call trap that poll(), 6527c478bd9Sstevel@tonic-gate * select() and pselect() are built upon. 6537c478bd9Sstevel@tonic-gate * It is a private interface between libc and the kernel. 6547c478bd9Sstevel@tonic-gate */ 6557c478bd9Sstevel@tonic-gate int 6567c478bd9Sstevel@tonic-gate pollsys(pollfd_t *fds, nfds_t nfds, timespec_t *timeoutp, sigset_t *setp) 6577c478bd9Sstevel@tonic-gate { 6587c478bd9Sstevel@tonic-gate timespec_t ts; 6597c478bd9Sstevel@tonic-gate timespec_t *tsp; 6607c478bd9Sstevel@tonic-gate sigset_t set; 6617c478bd9Sstevel@tonic-gate k_sigset_t kset; 6627c478bd9Sstevel@tonic-gate k_sigset_t *ksetp; 6637c478bd9Sstevel@tonic-gate model_t datamodel = get_udatamodel(); 6647c478bd9Sstevel@tonic-gate 6657c478bd9Sstevel@tonic-gate if (timeoutp == NULL) 6667c478bd9Sstevel@tonic-gate tsp = NULL; 6677c478bd9Sstevel@tonic-gate else { 6687c478bd9Sstevel@tonic-gate if (datamodel == DATAMODEL_NATIVE) { 6697c478bd9Sstevel@tonic-gate if (copyin(timeoutp, &ts, sizeof (ts))) 6707c478bd9Sstevel@tonic-gate return (set_errno(EFAULT)); 6717c478bd9Sstevel@tonic-gate } else { 6727c478bd9Sstevel@tonic-gate timespec32_t ts32; 6737c478bd9Sstevel@tonic-gate 6747c478bd9Sstevel@tonic-gate if (copyin(timeoutp, &ts32, sizeof (ts32))) 6757c478bd9Sstevel@tonic-gate return (set_errno(EFAULT)); 6767c478bd9Sstevel@tonic-gate TIMESPEC32_TO_TIMESPEC(&ts, &ts32) 6777c478bd9Sstevel@tonic-gate } 6787c478bd9Sstevel@tonic-gate 6797c478bd9Sstevel@tonic-gate if (itimerspecfix(&ts)) 6807c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 6817c478bd9Sstevel@tonic-gate tsp = &ts; 6827c478bd9Sstevel@tonic-gate } 6837c478bd9Sstevel@tonic-gate 6847c478bd9Sstevel@tonic-gate if (setp == NULL) 6857c478bd9Sstevel@tonic-gate ksetp = NULL; 6867c478bd9Sstevel@tonic-gate else { 6877c478bd9Sstevel@tonic-gate if (copyin(setp, &set, sizeof (set))) 6887c478bd9Sstevel@tonic-gate return (set_errno(EFAULT)); 6897c478bd9Sstevel@tonic-gate sigutok(&set, &kset); 6907c478bd9Sstevel@tonic-gate ksetp = &kset; 6917c478bd9Sstevel@tonic-gate } 6927c478bd9Sstevel@tonic-gate 6937c478bd9Sstevel@tonic-gate return (poll_common(fds, nfds, tsp, ksetp)); 6947c478bd9Sstevel@tonic-gate } 6957c478bd9Sstevel@tonic-gate 6967c478bd9Sstevel@tonic-gate /* 6977c478bd9Sstevel@tonic-gate * Clean up any state left around by poll(2). Called when a thread exits. 6987c478bd9Sstevel@tonic-gate */ 6997c478bd9Sstevel@tonic-gate void 7007c478bd9Sstevel@tonic-gate pollcleanup() 7017c478bd9Sstevel@tonic-gate { 7027c478bd9Sstevel@tonic-gate pollstate_t *ps = curthread->t_pollstate; 7037c478bd9Sstevel@tonic-gate pollcache_t *pcp; 7047c478bd9Sstevel@tonic-gate 7057c478bd9Sstevel@tonic-gate if (ps == NULL) 7067c478bd9Sstevel@tonic-gate return; 7077c478bd9Sstevel@tonic-gate pcp = ps->ps_pcache; 7087c478bd9Sstevel@tonic-gate /* 7097c478bd9Sstevel@tonic-gate * free up all cached poll fds 7107c478bd9Sstevel@tonic-gate */ 7117c478bd9Sstevel@tonic-gate if (pcp == NULL) { 7127c478bd9Sstevel@tonic-gate /* this pollstate is used by /dev/poll */ 7137c478bd9Sstevel@tonic-gate goto pollcleanout; 7147c478bd9Sstevel@tonic-gate } 7157c478bd9Sstevel@tonic-gate 7167c478bd9Sstevel@tonic-gate if (pcp->pc_bitmap != NULL) { 7177c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&ps->ps_lock)); 7187c478bd9Sstevel@tonic-gate /* 7197c478bd9Sstevel@tonic-gate * a close lwp can race with us when cleaning up a polldat 7207c478bd9Sstevel@tonic-gate * entry. We hold the ps_lock when cleaning hash table. 7217c478bd9Sstevel@tonic-gate * Since this pollcache is going away anyway, there is no 7227c478bd9Sstevel@tonic-gate * need to hold the pc_lock. 7237c478bd9Sstevel@tonic-gate */ 7247c478bd9Sstevel@tonic-gate mutex_enter(&ps->ps_lock); 7257c478bd9Sstevel@tonic-gate pcache_clean(pcp); 7267c478bd9Sstevel@tonic-gate mutex_exit(&ps->ps_lock); 7277c478bd9Sstevel@tonic-gate #ifdef DEBUG 7287c478bd9Sstevel@tonic-gate /* 7297c478bd9Sstevel@tonic-gate * At this point, all fds cached by this lwp should be 7307c478bd9Sstevel@tonic-gate * cleaned up. There should be no fd in fi_list still 7317c478bd9Sstevel@tonic-gate * reference this thread. 7327c478bd9Sstevel@tonic-gate */ 7337c478bd9Sstevel@tonic-gate checkfpollinfo(); /* sanity check */ 7347c478bd9Sstevel@tonic-gate pollcheckphlist(); /* sanity check */ 7357c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 7367c478bd9Sstevel@tonic-gate } 7377c478bd9Sstevel@tonic-gate /* 7387c478bd9Sstevel@tonic-gate * Be sure no one is referencing thread before exiting 7397c478bd9Sstevel@tonic-gate */ 7407c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_no_exit); 7417c478bd9Sstevel@tonic-gate ASSERT(pcp->pc_busy >= 0); 7427c478bd9Sstevel@tonic-gate while (pcp->pc_busy > 0) 7437c478bd9Sstevel@tonic-gate cv_wait(&pcp->pc_busy_cv, &pcp->pc_no_exit); 7447c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_no_exit); 7457c478bd9Sstevel@tonic-gate pollcleanout: 7467c478bd9Sstevel@tonic-gate pollstate_destroy(ps); 7477c478bd9Sstevel@tonic-gate curthread->t_pollstate = NULL; 7487c478bd9Sstevel@tonic-gate } 7497c478bd9Sstevel@tonic-gate 7507c478bd9Sstevel@tonic-gate /* 7517c478bd9Sstevel@tonic-gate * pollwakeup() - poke threads waiting in poll() for some event 7527c478bd9Sstevel@tonic-gate * on a particular object. 7537c478bd9Sstevel@tonic-gate * 7547c478bd9Sstevel@tonic-gate * The threads hanging off of the specified pollhead structure are scanned. 7557c478bd9Sstevel@tonic-gate * If their event mask matches the specified event(s), then pollnotify() is 7567c478bd9Sstevel@tonic-gate * called to poke the thread. 7577c478bd9Sstevel@tonic-gate * 7587c478bd9Sstevel@tonic-gate * Multiple events may be specified. When POLLHUP or POLLERR are specified, 7597c478bd9Sstevel@tonic-gate * all waiting threads are poked. 7607c478bd9Sstevel@tonic-gate * 7617c478bd9Sstevel@tonic-gate * It is important that pollnotify() not drop the lock protecting the list 7627c478bd9Sstevel@tonic-gate * of threads. 7637c478bd9Sstevel@tonic-gate */ 7647c478bd9Sstevel@tonic-gate void 7657c478bd9Sstevel@tonic-gate pollwakeup(pollhead_t *php, short events_arg) 7667c478bd9Sstevel@tonic-gate { 7677c478bd9Sstevel@tonic-gate polldat_t *pdp; 7687c478bd9Sstevel@tonic-gate int events = (ushort_t)events_arg; 76911dc39ddSpraks struct plist { 77011dc39ddSpraks port_t *pp; 77111dc39ddSpraks int pevents; 77211dc39ddSpraks struct plist *next; 77311dc39ddSpraks }; 77411dc39ddSpraks struct plist *plhead = NULL, *pltail = NULL; 7757c478bd9Sstevel@tonic-gate 7767c478bd9Sstevel@tonic-gate retry: 7777c478bd9Sstevel@tonic-gate PH_ENTER(php); 7787c478bd9Sstevel@tonic-gate 7797c478bd9Sstevel@tonic-gate for (pdp = php->ph_list; pdp; pdp = pdp->pd_next) { 7807c478bd9Sstevel@tonic-gate if ((pdp->pd_events & events) || 7817c478bd9Sstevel@tonic-gate (events & (POLLHUP | POLLERR))) { 7827c478bd9Sstevel@tonic-gate 7837c478bd9Sstevel@tonic-gate pollcache_t *pcp; 7847c478bd9Sstevel@tonic-gate 7857c478bd9Sstevel@tonic-gate if (pdp->pd_portev != NULL) { 7867c478bd9Sstevel@tonic-gate port_kevent_t *pkevp = pdp->pd_portev; 7877c478bd9Sstevel@tonic-gate /* 7887c478bd9Sstevel@tonic-gate * Object (fd) is associated with an event port, 7897c478bd9Sstevel@tonic-gate * => send event notification to the port. 7907c478bd9Sstevel@tonic-gate */ 79111dc39ddSpraks ASSERT(pkevp->portkev_source == PORT_SOURCE_FD); 79261b4b1efSpraks mutex_enter(&pkevp->portkev_lock); 7937c478bd9Sstevel@tonic-gate if (pkevp->portkev_flags & PORT_KEV_VALID) { 79411dc39ddSpraks int pevents; 79511dc39ddSpraks 7967c478bd9Sstevel@tonic-gate pkevp->portkev_flags &= ~PORT_KEV_VALID; 79761b4b1efSpraks pkevp->portkev_events |= events & 79861b4b1efSpraks (pdp->pd_events | POLLHUP | 79961b4b1efSpraks POLLERR); 80061b4b1efSpraks /* 80161b4b1efSpraks * portkev_lock mutex will be released 80211dc39ddSpraks * by port_send_event(). 80361b4b1efSpraks */ 80411dc39ddSpraks port_send_event(pkevp); 80511dc39ddSpraks 80611dc39ddSpraks /* 80711dc39ddSpraks * If we have some thread polling the 80811dc39ddSpraks * port's fd, add it to the list. They 80911dc39ddSpraks * will be notified later. 81011dc39ddSpraks * The port_pollwkup() will flag the 81111dc39ddSpraks * port_t so that it will not disappear 81211dc39ddSpraks * till port_pollwkdone() is called. 81311dc39ddSpraks */ 81411dc39ddSpraks pevents = 81511dc39ddSpraks port_pollwkup(pkevp->portkev_port); 81611dc39ddSpraks if (pevents) { 81711dc39ddSpraks struct plist *t; 81811dc39ddSpraks t = kmem_zalloc( 81911dc39ddSpraks sizeof (struct plist), 82011dc39ddSpraks KM_SLEEP); 82111dc39ddSpraks t->pp = pkevp->portkev_port; 82211dc39ddSpraks t->pevents = pevents; 82311dc39ddSpraks if (plhead == NULL) { 82411dc39ddSpraks plhead = t; 82511dc39ddSpraks } else { 82611dc39ddSpraks pltail->next = t; 82711dc39ddSpraks } 82811dc39ddSpraks pltail = t; 82911dc39ddSpraks } 83061b4b1efSpraks } else { 83161b4b1efSpraks mutex_exit(&pkevp->portkev_lock); 8327c478bd9Sstevel@tonic-gate } 8337c478bd9Sstevel@tonic-gate continue; 8347c478bd9Sstevel@tonic-gate } 8357c478bd9Sstevel@tonic-gate 8367c478bd9Sstevel@tonic-gate pcp = pdp->pd_pcache; 8377c478bd9Sstevel@tonic-gate 8387c478bd9Sstevel@tonic-gate /* 8397c478bd9Sstevel@tonic-gate * Try to grab the lock for this thread. If 8407c478bd9Sstevel@tonic-gate * we don't get it then we may deadlock so 8417c478bd9Sstevel@tonic-gate * back out and restart all over again. Note 8427c478bd9Sstevel@tonic-gate * that the failure rate is very very low. 8437c478bd9Sstevel@tonic-gate */ 8447c478bd9Sstevel@tonic-gate if (mutex_tryenter(&pcp->pc_lock)) { 8457c478bd9Sstevel@tonic-gate pollnotify(pcp, pdp->pd_fd); 8467c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 8477c478bd9Sstevel@tonic-gate } else { 8487c478bd9Sstevel@tonic-gate /* 8497c478bd9Sstevel@tonic-gate * We are here because: 8507c478bd9Sstevel@tonic-gate * 1) This thread has been woke up 8517c478bd9Sstevel@tonic-gate * and is trying to get out of poll(). 8527c478bd9Sstevel@tonic-gate * 2) Some other thread is also here 8537c478bd9Sstevel@tonic-gate * but with a different pollhead lock. 8547c478bd9Sstevel@tonic-gate * 8557c478bd9Sstevel@tonic-gate * So, we need to drop the lock on pollhead 8567c478bd9Sstevel@tonic-gate * because of (1) but we want to prevent 8577c478bd9Sstevel@tonic-gate * that thread from doing lwp_exit() or 8587c478bd9Sstevel@tonic-gate * devpoll close. We want to ensure that 8597c478bd9Sstevel@tonic-gate * the pollcache pointer is still invalid. 8607c478bd9Sstevel@tonic-gate * 8617c478bd9Sstevel@tonic-gate * Solution: Grab the pcp->pc_no_exit lock, 8627c478bd9Sstevel@tonic-gate * increment the pc_busy counter, drop every 8637c478bd9Sstevel@tonic-gate * lock in sight. Get out of the way and wait 8647c478bd9Sstevel@tonic-gate * for type (2) threads to finish. 8657c478bd9Sstevel@tonic-gate */ 8667c478bd9Sstevel@tonic-gate 8677c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_no_exit); 8687c478bd9Sstevel@tonic-gate pcp->pc_busy++; /* prevents exit()'s */ 8697c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_no_exit); 8707c478bd9Sstevel@tonic-gate 8717c478bd9Sstevel@tonic-gate PH_EXIT(php); 8727c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 8737c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 8747c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_no_exit); 8757c478bd9Sstevel@tonic-gate pcp->pc_busy--; 8767c478bd9Sstevel@tonic-gate if (pcp->pc_busy == 0) { 8777c478bd9Sstevel@tonic-gate /* 8787c478bd9Sstevel@tonic-gate * Wakeup the thread waiting in 8797c478bd9Sstevel@tonic-gate * thread_exit(). 8807c478bd9Sstevel@tonic-gate */ 8817c478bd9Sstevel@tonic-gate cv_signal(&pcp->pc_busy_cv); 8827c478bd9Sstevel@tonic-gate } 8837c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_no_exit); 8847c478bd9Sstevel@tonic-gate goto retry; 8857c478bd9Sstevel@tonic-gate } 8867c478bd9Sstevel@tonic-gate } 8877c478bd9Sstevel@tonic-gate } 88811dc39ddSpraks 88911dc39ddSpraks 89011dc39ddSpraks /* 89111dc39ddSpraks * Event ports - If this php is of the port on the list, 89211dc39ddSpraks * call port_pollwkdone() to release it. The port_pollwkdone() 89311dc39ddSpraks * needs to be called before dropping the PH lock so that any new 89411dc39ddSpraks * thread attempting to poll this port are blocked. There can be 89511dc39ddSpraks * only one thread here in pollwakeup notifying this port's fd. 89611dc39ddSpraks */ 89711dc39ddSpraks if (plhead != NULL && &plhead->pp->port_pollhd == php) { 89811dc39ddSpraks struct plist *t; 89911dc39ddSpraks port_pollwkdone(plhead->pp); 90011dc39ddSpraks t = plhead; 90111dc39ddSpraks plhead = plhead->next; 90211dc39ddSpraks kmem_free(t, sizeof (struct plist)); 90311dc39ddSpraks } 9047c478bd9Sstevel@tonic-gate PH_EXIT(php); 90511dc39ddSpraks 90611dc39ddSpraks /* 90711dc39ddSpraks * Event ports - Notify threads polling the event port's fd. 90811dc39ddSpraks * This is normally done in port_send_event() where it calls 90911dc39ddSpraks * pollwakeup() on the port. But, for PORT_SOURCE_FD source alone, 91011dc39ddSpraks * we do it here in pollwakeup() to avoid a recursive call. 91111dc39ddSpraks */ 91211dc39ddSpraks if (plhead != NULL) { 91311dc39ddSpraks php = &plhead->pp->port_pollhd; 91411dc39ddSpraks events = plhead->pevents; 91511dc39ddSpraks goto retry; 91611dc39ddSpraks } 9177c478bd9Sstevel@tonic-gate } 9187c478bd9Sstevel@tonic-gate 9197c478bd9Sstevel@tonic-gate /* 920a5eb7107SBryan Cantrill * This function is called to inform a thread (or threads) that an event being 921a5eb7107SBryan Cantrill * polled on has occurred. The pollstate lock on the thread should be held 922a5eb7107SBryan Cantrill * on entry. 9237c478bd9Sstevel@tonic-gate */ 9247c478bd9Sstevel@tonic-gate void 9257c478bd9Sstevel@tonic-gate pollnotify(pollcache_t *pcp, int fd) 9267c478bd9Sstevel@tonic-gate { 9277c478bd9Sstevel@tonic-gate ASSERT(fd < pcp->pc_mapsize); 9287c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&pcp->pc_lock)); 9297c478bd9Sstevel@tonic-gate BT_SET(pcp->pc_bitmap, fd); 930a5eb7107SBryan Cantrill pcp->pc_flag |= PC_POLLWAKE; 931a5eb7107SBryan Cantrill cv_broadcast(&pcp->pc_cv); 932*f3bb54f3SPatrick Mooney pcache_wake_parents(pcp); 9337c478bd9Sstevel@tonic-gate } 9347c478bd9Sstevel@tonic-gate 9357c478bd9Sstevel@tonic-gate /* 9367c478bd9Sstevel@tonic-gate * add a polldat entry to pollhead ph_list. The polldat struct is used 9377c478bd9Sstevel@tonic-gate * by pollwakeup to wake sleeping pollers when polled events has happened. 9387c478bd9Sstevel@tonic-gate */ 9397c478bd9Sstevel@tonic-gate void 9407c478bd9Sstevel@tonic-gate pollhead_insert(pollhead_t *php, polldat_t *pdp) 9417c478bd9Sstevel@tonic-gate { 9427c478bd9Sstevel@tonic-gate PH_ENTER(php); 9437c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_next == NULL); 9447c478bd9Sstevel@tonic-gate #ifdef DEBUG 9457c478bd9Sstevel@tonic-gate { 9467c478bd9Sstevel@tonic-gate /* 9477c478bd9Sstevel@tonic-gate * the polldat should not be already on the list 9487c478bd9Sstevel@tonic-gate */ 9497c478bd9Sstevel@tonic-gate polldat_t *wp; 9507c478bd9Sstevel@tonic-gate for (wp = php->ph_list; wp; wp = wp->pd_next) { 9517c478bd9Sstevel@tonic-gate ASSERT(wp != pdp); 9527c478bd9Sstevel@tonic-gate } 9537c478bd9Sstevel@tonic-gate } 9547c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 9557c478bd9Sstevel@tonic-gate pdp->pd_next = php->ph_list; 9567c478bd9Sstevel@tonic-gate php->ph_list = pdp; 9577c478bd9Sstevel@tonic-gate PH_EXIT(php); 9587c478bd9Sstevel@tonic-gate } 9597c478bd9Sstevel@tonic-gate 9607c478bd9Sstevel@tonic-gate /* 9617c478bd9Sstevel@tonic-gate * Delete the polldat entry from ph_list. 9627c478bd9Sstevel@tonic-gate */ 9637c478bd9Sstevel@tonic-gate void 9647c478bd9Sstevel@tonic-gate pollhead_delete(pollhead_t *php, polldat_t *pdp) 9657c478bd9Sstevel@tonic-gate { 9667c478bd9Sstevel@tonic-gate polldat_t *wp; 9677c478bd9Sstevel@tonic-gate polldat_t **wpp; 9687c478bd9Sstevel@tonic-gate 9697c478bd9Sstevel@tonic-gate PH_ENTER(php); 9707c478bd9Sstevel@tonic-gate for (wpp = &php->ph_list; (wp = *wpp) != NULL; wpp = &wp->pd_next) { 9717c478bd9Sstevel@tonic-gate if (wp == pdp) { 9727c478bd9Sstevel@tonic-gate *wpp = pdp->pd_next; 9737c478bd9Sstevel@tonic-gate pdp->pd_next = NULL; 9747c478bd9Sstevel@tonic-gate break; 9757c478bd9Sstevel@tonic-gate } 9767c478bd9Sstevel@tonic-gate } 9777c478bd9Sstevel@tonic-gate #ifdef DEBUG 9787c478bd9Sstevel@tonic-gate /* assert that pdp is no longer in the list */ 9797c478bd9Sstevel@tonic-gate for (wp = *wpp; wp; wp = wp->pd_next) { 9807c478bd9Sstevel@tonic-gate ASSERT(wp != pdp); 9817c478bd9Sstevel@tonic-gate } 9827c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 9837c478bd9Sstevel@tonic-gate PH_EXIT(php); 9847c478bd9Sstevel@tonic-gate } 9857c478bd9Sstevel@tonic-gate 9867c478bd9Sstevel@tonic-gate /* 9877c478bd9Sstevel@tonic-gate * walk through the poll fd lists to see if they are identical. This is an 9887c478bd9Sstevel@tonic-gate * expensive operation and should not be done more than once for each poll() 9897c478bd9Sstevel@tonic-gate * call. 9907c478bd9Sstevel@tonic-gate * 9917c478bd9Sstevel@tonic-gate * As an optimization (i.e., not having to go through the lists more than 9927c478bd9Sstevel@tonic-gate * once), this routine also clear the revents field of pollfd in 'current'. 9937c478bd9Sstevel@tonic-gate * Zeroing out the revents field of each entry in current poll list is 9947c478bd9Sstevel@tonic-gate * required by poll man page. 9957c478bd9Sstevel@tonic-gate * 9967c478bd9Sstevel@tonic-gate * Since the events field of cached list has illegal poll events filtered 9977c478bd9Sstevel@tonic-gate * out, the current list applies the same filtering before comparison. 9987c478bd9Sstevel@tonic-gate * 9997c478bd9Sstevel@tonic-gate * The routine stops when it detects a meaningful difference, or when it 10007c478bd9Sstevel@tonic-gate * exhausts the lists. 10017c478bd9Sstevel@tonic-gate */ 10027c478bd9Sstevel@tonic-gate int 10037c478bd9Sstevel@tonic-gate pcacheset_cmp(pollfd_t *current, pollfd_t *cached, pollfd_t *newlist, int n) 10047c478bd9Sstevel@tonic-gate { 10057c478bd9Sstevel@tonic-gate int ix; 10067c478bd9Sstevel@tonic-gate 10077c478bd9Sstevel@tonic-gate for (ix = 0; ix < n; ix++) { 10085c7544f7SDavid Plauger /* Prefetch 64 bytes worth of 8-byte elements */ 10095c7544f7SDavid Plauger if ((ix & 0x7) == 0) { 101094e7edb1Slucy wang - Sun Microsystems - Beijing China prefetch_write_many((caddr_t)¤t[ix + 8]); 101194e7edb1Slucy wang - Sun Microsystems - Beijing China prefetch_write_many((caddr_t)&cached[ix + 8]); 10125c7544f7SDavid Plauger } 10137c478bd9Sstevel@tonic-gate if (current[ix].fd == cached[ix].fd) { 10147c478bd9Sstevel@tonic-gate /* 10157c478bd9Sstevel@tonic-gate * Filter out invalid poll events while we are in 10167c478bd9Sstevel@tonic-gate * inside the loop. 10177c478bd9Sstevel@tonic-gate */ 10187c478bd9Sstevel@tonic-gate if (current[ix].events & ~VALID_POLL_EVENTS) { 10197c478bd9Sstevel@tonic-gate current[ix].events &= VALID_POLL_EVENTS; 10207c478bd9Sstevel@tonic-gate if (newlist != NULL) 10217c478bd9Sstevel@tonic-gate newlist[ix].events = current[ix].events; 10227c478bd9Sstevel@tonic-gate } 10237c478bd9Sstevel@tonic-gate if (current[ix].events == cached[ix].events) { 10247c478bd9Sstevel@tonic-gate current[ix].revents = 0; 10257c478bd9Sstevel@tonic-gate continue; 10267c478bd9Sstevel@tonic-gate } 10277c478bd9Sstevel@tonic-gate } 10287c478bd9Sstevel@tonic-gate if ((current[ix].fd < 0) && (cached[ix].fd < 0)) { 10297c478bd9Sstevel@tonic-gate current[ix].revents = 0; 10307c478bd9Sstevel@tonic-gate continue; 10317c478bd9Sstevel@tonic-gate } 10327c478bd9Sstevel@tonic-gate return (ix); 10337c478bd9Sstevel@tonic-gate } 10347c478bd9Sstevel@tonic-gate return (ix); 10357c478bd9Sstevel@tonic-gate } 10367c478bd9Sstevel@tonic-gate 10377c478bd9Sstevel@tonic-gate /* 10387c478bd9Sstevel@tonic-gate * This routine returns a pointer to a cached poll fd entry, or NULL if it 10397c478bd9Sstevel@tonic-gate * does not find it in the hash table. 10407c478bd9Sstevel@tonic-gate */ 10417c478bd9Sstevel@tonic-gate polldat_t * 10427c478bd9Sstevel@tonic-gate pcache_lookup_fd(pollcache_t *pcp, int fd) 10437c478bd9Sstevel@tonic-gate { 10447c478bd9Sstevel@tonic-gate int hashindex; 10457c478bd9Sstevel@tonic-gate polldat_t *pdp; 10467c478bd9Sstevel@tonic-gate 10477c478bd9Sstevel@tonic-gate hashindex = POLLHASH(pcp->pc_hashsize, fd); 10487c478bd9Sstevel@tonic-gate pdp = pcp->pc_hash[hashindex]; 10497c478bd9Sstevel@tonic-gate while (pdp != NULL) { 10507c478bd9Sstevel@tonic-gate if (pdp->pd_fd == fd) 10517c478bd9Sstevel@tonic-gate break; 10527c478bd9Sstevel@tonic-gate pdp = pdp->pd_hashnext; 10537c478bd9Sstevel@tonic-gate } 10547c478bd9Sstevel@tonic-gate return (pdp); 10557c478bd9Sstevel@tonic-gate } 10567c478bd9Sstevel@tonic-gate 10577c478bd9Sstevel@tonic-gate polldat_t * 10587c478bd9Sstevel@tonic-gate pcache_alloc_fd(int nsets) 10597c478bd9Sstevel@tonic-gate { 10607c478bd9Sstevel@tonic-gate polldat_t *pdp; 10617c478bd9Sstevel@tonic-gate 10627c478bd9Sstevel@tonic-gate pdp = kmem_zalloc(sizeof (polldat_t), KM_SLEEP); 10637c478bd9Sstevel@tonic-gate if (nsets > 0) { 10647c478bd9Sstevel@tonic-gate pdp->pd_ref = kmem_zalloc(sizeof (xref_t) * nsets, KM_SLEEP); 10657c478bd9Sstevel@tonic-gate pdp->pd_nsets = nsets; 10667c478bd9Sstevel@tonic-gate } 10677c478bd9Sstevel@tonic-gate return (pdp); 10687c478bd9Sstevel@tonic-gate } 10697c478bd9Sstevel@tonic-gate 10707c478bd9Sstevel@tonic-gate /* 10717c478bd9Sstevel@tonic-gate * This routine inserts a polldat into the pollcache's hash table. It 10727c478bd9Sstevel@tonic-gate * may be necessary to grow the size of the hash table. 10737c478bd9Sstevel@tonic-gate */ 10747c478bd9Sstevel@tonic-gate void 10757c478bd9Sstevel@tonic-gate pcache_insert_fd(pollcache_t *pcp, polldat_t *pdp, nfds_t nfds) 10767c478bd9Sstevel@tonic-gate { 10777c478bd9Sstevel@tonic-gate int hashindex; 10787c478bd9Sstevel@tonic-gate int fd; 10797c478bd9Sstevel@tonic-gate 10807c478bd9Sstevel@tonic-gate if ((pcp->pc_fdcount > pcp->pc_hashsize * POLLHASHTHRESHOLD) || 10817c478bd9Sstevel@tonic-gate (nfds > pcp->pc_hashsize * POLLHASHTHRESHOLD)) { 10827c478bd9Sstevel@tonic-gate pcache_grow_hashtbl(pcp, nfds); 10837c478bd9Sstevel@tonic-gate } 10847c478bd9Sstevel@tonic-gate fd = pdp->pd_fd; 10857c478bd9Sstevel@tonic-gate hashindex = POLLHASH(pcp->pc_hashsize, fd); 10867c478bd9Sstevel@tonic-gate pdp->pd_hashnext = pcp->pc_hash[hashindex]; 10877c478bd9Sstevel@tonic-gate pcp->pc_hash[hashindex] = pdp; 10887c478bd9Sstevel@tonic-gate pcp->pc_fdcount++; 10897c478bd9Sstevel@tonic-gate 10907c478bd9Sstevel@tonic-gate #ifdef DEBUG 10917c478bd9Sstevel@tonic-gate { 10927c478bd9Sstevel@tonic-gate /* 10937c478bd9Sstevel@tonic-gate * same fd should not appear on a hash list twice 10947c478bd9Sstevel@tonic-gate */ 10957c478bd9Sstevel@tonic-gate polldat_t *pdp1; 10967c478bd9Sstevel@tonic-gate for (pdp1 = pdp->pd_hashnext; pdp1; pdp1 = pdp1->pd_hashnext) { 10977c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_fd != pdp1->pd_fd); 10987c478bd9Sstevel@tonic-gate } 10997c478bd9Sstevel@tonic-gate } 11007c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 11017c478bd9Sstevel@tonic-gate } 11027c478bd9Sstevel@tonic-gate 11037c478bd9Sstevel@tonic-gate /* 11047c478bd9Sstevel@tonic-gate * Grow the hash table -- either double the table size or round it to the 11057c478bd9Sstevel@tonic-gate * nearest multiples of POLLHASHCHUNKSZ, whichever is bigger. Rehash all the 11067c478bd9Sstevel@tonic-gate * elements on the hash table. 11077c478bd9Sstevel@tonic-gate */ 11087c478bd9Sstevel@tonic-gate void 11097c478bd9Sstevel@tonic-gate pcache_grow_hashtbl(pollcache_t *pcp, nfds_t nfds) 11107c478bd9Sstevel@tonic-gate { 11117c478bd9Sstevel@tonic-gate int oldsize; 11127c478bd9Sstevel@tonic-gate polldat_t **oldtbl; 11137c478bd9Sstevel@tonic-gate polldat_t *pdp, *pdp1; 11147c478bd9Sstevel@tonic-gate int i; 11157c478bd9Sstevel@tonic-gate #ifdef DEBUG 11167c478bd9Sstevel@tonic-gate int count = 0; 11177c478bd9Sstevel@tonic-gate #endif 11187c478bd9Sstevel@tonic-gate 11197c478bd9Sstevel@tonic-gate ASSERT(pcp->pc_hashsize % POLLHASHCHUNKSZ == 0); 11207c478bd9Sstevel@tonic-gate oldsize = pcp->pc_hashsize; 11217c478bd9Sstevel@tonic-gate oldtbl = pcp->pc_hash; 11227c478bd9Sstevel@tonic-gate if (nfds > pcp->pc_hashsize * POLLHASHINC) { 11237c478bd9Sstevel@tonic-gate pcp->pc_hashsize = (nfds + POLLHASHCHUNKSZ - 1) & 11247c478bd9Sstevel@tonic-gate ~(POLLHASHCHUNKSZ - 1); 11257c478bd9Sstevel@tonic-gate } else { 11267c478bd9Sstevel@tonic-gate pcp->pc_hashsize = pcp->pc_hashsize * POLLHASHINC; 11277c478bd9Sstevel@tonic-gate } 11287c478bd9Sstevel@tonic-gate pcp->pc_hash = kmem_zalloc(pcp->pc_hashsize * sizeof (polldat_t *), 11297c478bd9Sstevel@tonic-gate KM_SLEEP); 11307c478bd9Sstevel@tonic-gate /* 11317c478bd9Sstevel@tonic-gate * rehash existing elements 11327c478bd9Sstevel@tonic-gate */ 11337c478bd9Sstevel@tonic-gate pcp->pc_fdcount = 0; 11347c478bd9Sstevel@tonic-gate for (i = 0; i < oldsize; i++) { 11357c478bd9Sstevel@tonic-gate pdp = oldtbl[i]; 11367c478bd9Sstevel@tonic-gate while (pdp != NULL) { 11377c478bd9Sstevel@tonic-gate pdp1 = pdp->pd_hashnext; 11387c478bd9Sstevel@tonic-gate pcache_insert_fd(pcp, pdp, nfds); 11397c478bd9Sstevel@tonic-gate pdp = pdp1; 11407c478bd9Sstevel@tonic-gate #ifdef DEBUG 11417c478bd9Sstevel@tonic-gate count++; 11427c478bd9Sstevel@tonic-gate #endif 11437c478bd9Sstevel@tonic-gate } 11447c478bd9Sstevel@tonic-gate } 11457c478bd9Sstevel@tonic-gate kmem_free(oldtbl, oldsize * sizeof (polldat_t *)); 11467c478bd9Sstevel@tonic-gate ASSERT(pcp->pc_fdcount == count); 11477c478bd9Sstevel@tonic-gate } 11487c478bd9Sstevel@tonic-gate 11497c478bd9Sstevel@tonic-gate void 11507c478bd9Sstevel@tonic-gate pcache_grow_map(pollcache_t *pcp, int fd) 11517c478bd9Sstevel@tonic-gate { 11527c478bd9Sstevel@tonic-gate int newsize; 11537c478bd9Sstevel@tonic-gate ulong_t *newmap; 11547c478bd9Sstevel@tonic-gate 11557c478bd9Sstevel@tonic-gate /* 11567c478bd9Sstevel@tonic-gate * grow to nearest multiple of POLLMAPCHUNK, assuming POLLMAPCHUNK is 11577c478bd9Sstevel@tonic-gate * power of 2. 11587c478bd9Sstevel@tonic-gate */ 11597c478bd9Sstevel@tonic-gate newsize = (fd + POLLMAPCHUNK) & ~(POLLMAPCHUNK - 1); 11607c478bd9Sstevel@tonic-gate newmap = kmem_zalloc((newsize / BT_NBIPUL) * sizeof (ulong_t), 11617c478bd9Sstevel@tonic-gate KM_SLEEP); 11627c478bd9Sstevel@tonic-gate /* 11637c478bd9Sstevel@tonic-gate * don't want pollwakeup to set a bit while growing the bitmap. 11647c478bd9Sstevel@tonic-gate */ 11657c478bd9Sstevel@tonic-gate ASSERT(mutex_owned(&pcp->pc_lock) == 0); 11667c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 11677c478bd9Sstevel@tonic-gate bcopy(pcp->pc_bitmap, newmap, 11687c478bd9Sstevel@tonic-gate (pcp->pc_mapsize / BT_NBIPUL) * sizeof (ulong_t)); 11697c478bd9Sstevel@tonic-gate kmem_free(pcp->pc_bitmap, 11707c478bd9Sstevel@tonic-gate (pcp->pc_mapsize /BT_NBIPUL) * sizeof (ulong_t)); 11717c478bd9Sstevel@tonic-gate pcp->pc_bitmap = newmap; 11727c478bd9Sstevel@tonic-gate pcp->pc_mapsize = newsize; 11737c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 11747c478bd9Sstevel@tonic-gate } 11757c478bd9Sstevel@tonic-gate 11767c478bd9Sstevel@tonic-gate /* 11777c478bd9Sstevel@tonic-gate * remove all the reference from pollhead list and fpollinfo lists. 11787c478bd9Sstevel@tonic-gate */ 11797c478bd9Sstevel@tonic-gate void 11807c478bd9Sstevel@tonic-gate pcache_clean(pollcache_t *pcp) 11817c478bd9Sstevel@tonic-gate { 11827c478bd9Sstevel@tonic-gate int i; 11837c478bd9Sstevel@tonic-gate polldat_t **hashtbl; 11847c478bd9Sstevel@tonic-gate polldat_t *pdp; 11857c478bd9Sstevel@tonic-gate 11867c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&curthread->t_pollstate->ps_lock)); 11877c478bd9Sstevel@tonic-gate hashtbl = pcp->pc_hash; 11887c478bd9Sstevel@tonic-gate for (i = 0; i < pcp->pc_hashsize; i++) { 11897c478bd9Sstevel@tonic-gate for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) { 11907c478bd9Sstevel@tonic-gate if (pdp->pd_php != NULL) { 11917c478bd9Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 11927c478bd9Sstevel@tonic-gate pdp->pd_php = NULL; 11937c478bd9Sstevel@tonic-gate } 11947c478bd9Sstevel@tonic-gate if (pdp->pd_fp != NULL) { 11957c478bd9Sstevel@tonic-gate delfpollinfo(pdp->pd_fd); 11967c478bd9Sstevel@tonic-gate pdp->pd_fp = NULL; 11977c478bd9Sstevel@tonic-gate } 11987c478bd9Sstevel@tonic-gate } 11997c478bd9Sstevel@tonic-gate } 12007c478bd9Sstevel@tonic-gate } 12017c478bd9Sstevel@tonic-gate 12027c478bd9Sstevel@tonic-gate void 12037c478bd9Sstevel@tonic-gate pcacheset_invalidate(pollstate_t *ps, polldat_t *pdp) 12047c478bd9Sstevel@tonic-gate { 12057c478bd9Sstevel@tonic-gate int i; 12067c478bd9Sstevel@tonic-gate int fd = pdp->pd_fd; 12077c478bd9Sstevel@tonic-gate 12087c478bd9Sstevel@tonic-gate /* 12097c478bd9Sstevel@tonic-gate * we come here because an earlier close() on this cached poll fd. 12107c478bd9Sstevel@tonic-gate */ 12117c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_fp == NULL); 12127c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ps->ps_lock)); 12137c478bd9Sstevel@tonic-gate pdp->pd_events = 0; 12147c478bd9Sstevel@tonic-gate for (i = 0; i < ps->ps_nsets; i++) { 12157c478bd9Sstevel@tonic-gate xref_t *refp; 12167c478bd9Sstevel@tonic-gate pollcacheset_t *pcsp; 12177c478bd9Sstevel@tonic-gate 12187c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_ref != NULL); 12197c478bd9Sstevel@tonic-gate refp = &pdp->pd_ref[i]; 12207c478bd9Sstevel@tonic-gate if (refp->xf_refcnt) { 12217c478bd9Sstevel@tonic-gate ASSERT(refp->xf_position >= 0); 12227c478bd9Sstevel@tonic-gate pcsp = &ps->ps_pcacheset[i]; 12237c478bd9Sstevel@tonic-gate if (refp->xf_refcnt == 1) { 12247c478bd9Sstevel@tonic-gate pcsp->pcs_pollfd[refp->xf_position].fd = -1; 12257c478bd9Sstevel@tonic-gate refp->xf_refcnt = 0; 12267c478bd9Sstevel@tonic-gate pdp->pd_count--; 12277c478bd9Sstevel@tonic-gate } else if (refp->xf_refcnt > 1) { 12287c478bd9Sstevel@tonic-gate int j; 12297c478bd9Sstevel@tonic-gate 12307c478bd9Sstevel@tonic-gate /* 12317c478bd9Sstevel@tonic-gate * turn off every appearance in pcs_pollfd list 12327c478bd9Sstevel@tonic-gate */ 12337c478bd9Sstevel@tonic-gate for (j = refp->xf_position; 12347c478bd9Sstevel@tonic-gate j < pcsp->pcs_nfds; j++) { 12357c478bd9Sstevel@tonic-gate if (pcsp->pcs_pollfd[j].fd == fd) { 12367c478bd9Sstevel@tonic-gate pcsp->pcs_pollfd[j].fd = -1; 12377c478bd9Sstevel@tonic-gate refp->xf_refcnt--; 12387c478bd9Sstevel@tonic-gate pdp->pd_count--; 12397c478bd9Sstevel@tonic-gate } 12407c478bd9Sstevel@tonic-gate } 12417c478bd9Sstevel@tonic-gate } 12427c478bd9Sstevel@tonic-gate ASSERT(refp->xf_refcnt == 0); 12437c478bd9Sstevel@tonic-gate refp->xf_position = POLLPOSINVAL; 12447c478bd9Sstevel@tonic-gate } 12457c478bd9Sstevel@tonic-gate } 12467c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_count == 0); 12477c478bd9Sstevel@tonic-gate } 12487c478bd9Sstevel@tonic-gate 12497c478bd9Sstevel@tonic-gate /* 12507c478bd9Sstevel@tonic-gate * Insert poll fd into the pollcache, and add poll registration. 12517c478bd9Sstevel@tonic-gate * This routine is called after getf() and before releasef(). So the vnode 12527c478bd9Sstevel@tonic-gate * can not disappear even if we block here. 12537c478bd9Sstevel@tonic-gate * If there is an error, the polled fd is not cached. 12547c478bd9Sstevel@tonic-gate */ 12557c478bd9Sstevel@tonic-gate int 12567c478bd9Sstevel@tonic-gate pcache_insert(pollstate_t *ps, file_t *fp, pollfd_t *pollfdp, int *fdcntp, 12577c478bd9Sstevel@tonic-gate ssize_t pos, int which) 12587c478bd9Sstevel@tonic-gate { 12597c478bd9Sstevel@tonic-gate pollcache_t *pcp = ps->ps_pcache; 12607c478bd9Sstevel@tonic-gate polldat_t *pdp; 12617c478bd9Sstevel@tonic-gate int error; 12627c478bd9Sstevel@tonic-gate int fd; 12637c478bd9Sstevel@tonic-gate pollhead_t *memphp = NULL; 12647c478bd9Sstevel@tonic-gate xref_t *refp; 12657c478bd9Sstevel@tonic-gate int newpollfd = 0; 12667c478bd9Sstevel@tonic-gate 12677c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ps->ps_lock)); 12687c478bd9Sstevel@tonic-gate /* 12697c478bd9Sstevel@tonic-gate * The poll caching uses the existing VOP_POLL interface. If there 12707c478bd9Sstevel@tonic-gate * is no polled events, we want the polled device to set its "some 12717c478bd9Sstevel@tonic-gate * one is sleeping in poll" flag. When the polled events happen 12727c478bd9Sstevel@tonic-gate * later, the driver will call pollwakeup(). We achieve this by 12737c478bd9Sstevel@tonic-gate * always passing 0 in the third parameter ("anyyet") when calling 12747c478bd9Sstevel@tonic-gate * VOP_POLL. This parameter is not looked at by drivers when the 12757c478bd9Sstevel@tonic-gate * polled events exist. If a driver chooses to ignore this parameter 12767c478bd9Sstevel@tonic-gate * and call pollwakeup whenever the polled events happen, that will 12777c478bd9Sstevel@tonic-gate * be OK too. 12787c478bd9Sstevel@tonic-gate */ 12797c478bd9Sstevel@tonic-gate ASSERT(curthread->t_pollcache == NULL); 12807c478bd9Sstevel@tonic-gate error = VOP_POLL(fp->f_vnode, pollfdp->events, 0, &pollfdp->revents, 1281da6c28aaSamw &memphp, NULL); 12827c478bd9Sstevel@tonic-gate if (error) { 12837c478bd9Sstevel@tonic-gate return (error); 12847c478bd9Sstevel@tonic-gate } 12857c478bd9Sstevel@tonic-gate if (pollfdp->revents) { 12867c478bd9Sstevel@tonic-gate (*fdcntp)++; 12877c478bd9Sstevel@tonic-gate } 12887c478bd9Sstevel@tonic-gate /* 12897c478bd9Sstevel@tonic-gate * polling the underlying device succeeded. Now we can cache it. 12907c478bd9Sstevel@tonic-gate * A close can't come in here because we have not done a releasef() 12917c478bd9Sstevel@tonic-gate * yet. 12927c478bd9Sstevel@tonic-gate */ 12937c478bd9Sstevel@tonic-gate fd = pollfdp->fd; 12947c478bd9Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, fd); 12957c478bd9Sstevel@tonic-gate if (pdp == NULL) { 12967c478bd9Sstevel@tonic-gate ASSERT(ps->ps_nsets > 0); 12977c478bd9Sstevel@tonic-gate pdp = pcache_alloc_fd(ps->ps_nsets); 12987c478bd9Sstevel@tonic-gate newpollfd = 1; 12997c478bd9Sstevel@tonic-gate } 13007c478bd9Sstevel@tonic-gate /* 13017c478bd9Sstevel@tonic-gate * If this entry was used to cache a poll fd which was closed, and 13027c478bd9Sstevel@tonic-gate * this entry has not been cleaned, do it now. 13037c478bd9Sstevel@tonic-gate */ 13047c478bd9Sstevel@tonic-gate if ((pdp->pd_count > 0) && (pdp->pd_fp == NULL)) { 13057c478bd9Sstevel@tonic-gate pcacheset_invalidate(ps, pdp); 13067c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_next == NULL); 13077c478bd9Sstevel@tonic-gate } 13087c478bd9Sstevel@tonic-gate if (pdp->pd_count == 0) { 13097c478bd9Sstevel@tonic-gate pdp->pd_fd = fd; 13107c478bd9Sstevel@tonic-gate pdp->pd_fp = fp; 13117c478bd9Sstevel@tonic-gate addfpollinfo(fd); 13127c478bd9Sstevel@tonic-gate pdp->pd_thread = curthread; 13137c478bd9Sstevel@tonic-gate pdp->pd_pcache = pcp; 13147c478bd9Sstevel@tonic-gate /* 13157c478bd9Sstevel@tonic-gate * the entry is never used or cleared by removing a cached 13167c478bd9Sstevel@tonic-gate * pollfd (pcache_delete_fd). So all the fields should be clear. 13177c478bd9Sstevel@tonic-gate */ 13187c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_next == NULL); 13197c478bd9Sstevel@tonic-gate } 13207c478bd9Sstevel@tonic-gate 13217c478bd9Sstevel@tonic-gate /* 13227c478bd9Sstevel@tonic-gate * A polled fd is considered cached. So there should be a fpollinfo 13237c478bd9Sstevel@tonic-gate * entry on uf_fpollinfo list. 13247c478bd9Sstevel@tonic-gate */ 13257c478bd9Sstevel@tonic-gate ASSERT(infpollinfo(fd)); 13267c478bd9Sstevel@tonic-gate /* 13277c478bd9Sstevel@tonic-gate * If there is an inconsistency, we want to know it here. 13287c478bd9Sstevel@tonic-gate */ 13297c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_fp == fp); 13307c478bd9Sstevel@tonic-gate 13317c478bd9Sstevel@tonic-gate /* 13327c478bd9Sstevel@tonic-gate * XXX pd_events is a union of all polled events on this fd, possibly 13337c478bd9Sstevel@tonic-gate * by different threads. Unless this is a new first poll(), pd_events 13347c478bd9Sstevel@tonic-gate * never shrinks. If an event is no longer polled by a process, there 13357c478bd9Sstevel@tonic-gate * is no way to cancel that event. In that case, poll degrade to its 13367c478bd9Sstevel@tonic-gate * old form -- polling on this fd every time poll() is called. The 13377c478bd9Sstevel@tonic-gate * assumption is an app always polls the same type of events. 13387c478bd9Sstevel@tonic-gate */ 13397c478bd9Sstevel@tonic-gate pdp->pd_events |= pollfdp->events; 13407c478bd9Sstevel@tonic-gate 13417c478bd9Sstevel@tonic-gate pdp->pd_count++; 13427c478bd9Sstevel@tonic-gate /* 13437c478bd9Sstevel@tonic-gate * There is not much special handling for multiple appearances of 13447c478bd9Sstevel@tonic-gate * same fd other than xf_position always recording the first 13457c478bd9Sstevel@tonic-gate * appearance in poll list. If this is called from pcacheset_cache_list, 13467c478bd9Sstevel@tonic-gate * a VOP_POLL is called on every pollfd entry; therefore each 13477c478bd9Sstevel@tonic-gate * revents and fdcnt should be set correctly. If this is called from 13487c478bd9Sstevel@tonic-gate * pcacheset_resolve, we don't care about fdcnt here. Pollreadmap will 13497c478bd9Sstevel@tonic-gate * pick up the right count and handle revents field of each pollfd 13507c478bd9Sstevel@tonic-gate * entry. 13517c478bd9Sstevel@tonic-gate */ 13527c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_ref != NULL); 13537c478bd9Sstevel@tonic-gate refp = &pdp->pd_ref[which]; 13547c478bd9Sstevel@tonic-gate if (refp->xf_refcnt == 0) { 13557c478bd9Sstevel@tonic-gate refp->xf_position = pos; 13567c478bd9Sstevel@tonic-gate } else { 13577c478bd9Sstevel@tonic-gate /* 13587c478bd9Sstevel@tonic-gate * xf_position records the fd's first appearance in poll list 13597c478bd9Sstevel@tonic-gate */ 13607c478bd9Sstevel@tonic-gate if (pos < refp->xf_position) { 13617c478bd9Sstevel@tonic-gate refp->xf_position = pos; 13627c478bd9Sstevel@tonic-gate } 13637c478bd9Sstevel@tonic-gate } 13647c478bd9Sstevel@tonic-gate ASSERT(pollfdp->fd == ps->ps_pollfd[refp->xf_position].fd); 13657c478bd9Sstevel@tonic-gate refp->xf_refcnt++; 13667c478bd9Sstevel@tonic-gate if (fd >= pcp->pc_mapsize) { 13677c478bd9Sstevel@tonic-gate pcache_grow_map(pcp, fd); 13687c478bd9Sstevel@tonic-gate } 13697c478bd9Sstevel@tonic-gate if (fd > pcp->pc_mapend) { 13707c478bd9Sstevel@tonic-gate pcp->pc_mapend = fd; 13717c478bd9Sstevel@tonic-gate } 13727c478bd9Sstevel@tonic-gate if (newpollfd != 0) { 13737c478bd9Sstevel@tonic-gate pcache_insert_fd(ps->ps_pcache, pdp, ps->ps_nfds); 13747c478bd9Sstevel@tonic-gate } 13757c478bd9Sstevel@tonic-gate if (memphp) { 13767c478bd9Sstevel@tonic-gate if (pdp->pd_php == NULL) { 13777c478bd9Sstevel@tonic-gate pollhead_insert(memphp, pdp); 13787c478bd9Sstevel@tonic-gate pdp->pd_php = memphp; 13797c478bd9Sstevel@tonic-gate } else { 13807c478bd9Sstevel@tonic-gate if (memphp != pdp->pd_php) { 13817c478bd9Sstevel@tonic-gate /* 13827c478bd9Sstevel@tonic-gate * layered devices (e.g. console driver) 13837c478bd9Sstevel@tonic-gate * may change the vnode and thus the pollhead 13847c478bd9Sstevel@tonic-gate * pointer out from underneath us. 13857c478bd9Sstevel@tonic-gate */ 13867c478bd9Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 13877c478bd9Sstevel@tonic-gate pollhead_insert(memphp, pdp); 13887c478bd9Sstevel@tonic-gate pdp->pd_php = memphp; 13897c478bd9Sstevel@tonic-gate } 13907c478bd9Sstevel@tonic-gate } 13917c478bd9Sstevel@tonic-gate } 13927c478bd9Sstevel@tonic-gate /* 13937c478bd9Sstevel@tonic-gate * Since there is a considerable window between VOP_POLL and when 13947c478bd9Sstevel@tonic-gate * we actually put the polldat struct on the pollhead list, we could 13957c478bd9Sstevel@tonic-gate * miss a pollwakeup. In the case of polling additional events, we 13967c478bd9Sstevel@tonic-gate * don't update the events until after VOP_POLL. So we could miss 13977c478bd9Sstevel@tonic-gate * pollwakeup there too. So we always set the bit here just to be 13987c478bd9Sstevel@tonic-gate * safe. The real performance gain is in subsequent pcache_poll. 13997c478bd9Sstevel@tonic-gate */ 14007c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 14017c478bd9Sstevel@tonic-gate BT_SET(pcp->pc_bitmap, fd); 14027c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 14037c478bd9Sstevel@tonic-gate return (0); 14047c478bd9Sstevel@tonic-gate } 14057c478bd9Sstevel@tonic-gate 14067c478bd9Sstevel@tonic-gate /* 14077c478bd9Sstevel@tonic-gate * The entry is not really deleted. The fields are cleared so that the 14087c478bd9Sstevel@tonic-gate * entry is no longer useful, but it will remain in the hash table for reuse 14097c478bd9Sstevel@tonic-gate * later. It will be freed when the polling lwp exits. 14107c478bd9Sstevel@tonic-gate */ 14117c478bd9Sstevel@tonic-gate int 14127c478bd9Sstevel@tonic-gate pcache_delete_fd(pollstate_t *ps, int fd, size_t pos, int which, uint_t cevent) 14137c478bd9Sstevel@tonic-gate { 14147c478bd9Sstevel@tonic-gate pollcache_t *pcp = ps->ps_pcache; 14157c478bd9Sstevel@tonic-gate polldat_t *pdp; 14167c478bd9Sstevel@tonic-gate xref_t *refp; 14177c478bd9Sstevel@tonic-gate 14187c478bd9Sstevel@tonic-gate ASSERT(fd < pcp->pc_mapsize); 14197c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ps->ps_lock)); 14207c478bd9Sstevel@tonic-gate 14217c478bd9Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, fd); 14227c478bd9Sstevel@tonic-gate ASSERT(pdp != NULL); 14237c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_count > 0); 14247c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_ref != NULL); 14257c478bd9Sstevel@tonic-gate refp = &pdp->pd_ref[which]; 14267c478bd9Sstevel@tonic-gate if (pdp->pd_count == 1) { 14277c478bd9Sstevel@tonic-gate pdp->pd_events = 0; 14287c478bd9Sstevel@tonic-gate refp->xf_position = POLLPOSINVAL; 14297c478bd9Sstevel@tonic-gate ASSERT(refp->xf_refcnt == 1); 14307c478bd9Sstevel@tonic-gate refp->xf_refcnt = 0; 14317c478bd9Sstevel@tonic-gate if (pdp->pd_php) { 14327c478bd9Sstevel@tonic-gate /* 14337c478bd9Sstevel@tonic-gate * It is possible for a wakeup thread to get ahead 14347c478bd9Sstevel@tonic-gate * of the following pollhead_delete and set the bit in 14357c478bd9Sstevel@tonic-gate * bitmap. It is OK because the bit will be cleared 14367c478bd9Sstevel@tonic-gate * here anyway. 14377c478bd9Sstevel@tonic-gate */ 14387c478bd9Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 14397c478bd9Sstevel@tonic-gate pdp->pd_php = NULL; 14407c478bd9Sstevel@tonic-gate } 14417c478bd9Sstevel@tonic-gate pdp->pd_count = 0; 14427c478bd9Sstevel@tonic-gate if (pdp->pd_fp != NULL) { 14437c478bd9Sstevel@tonic-gate pdp->pd_fp = NULL; 14447c478bd9Sstevel@tonic-gate delfpollinfo(fd); 14457c478bd9Sstevel@tonic-gate } 14467c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 14477c478bd9Sstevel@tonic-gate BT_CLEAR(pcp->pc_bitmap, fd); 14487c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 14497c478bd9Sstevel@tonic-gate return (0); 14507c478bd9Sstevel@tonic-gate } 14517c478bd9Sstevel@tonic-gate if ((cevent & POLLCLOSED) == POLLCLOSED) { 14527c478bd9Sstevel@tonic-gate /* 14537c478bd9Sstevel@tonic-gate * fd cached here has been closed. This is the first 14547c478bd9Sstevel@tonic-gate * pcache_delete_fd called after the close. Clean up the 14557c478bd9Sstevel@tonic-gate * entire entry. 14567c478bd9Sstevel@tonic-gate */ 14577c478bd9Sstevel@tonic-gate pcacheset_invalidate(ps, pdp); 14587c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_php == NULL); 14597c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 14607c478bd9Sstevel@tonic-gate BT_CLEAR(pcp->pc_bitmap, fd); 14617c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 14627c478bd9Sstevel@tonic-gate return (0); 14637c478bd9Sstevel@tonic-gate } 14647c478bd9Sstevel@tonic-gate #ifdef DEBUG 14657c478bd9Sstevel@tonic-gate if (getf(fd) != NULL) { 14667c478bd9Sstevel@tonic-gate ASSERT(infpollinfo(fd)); 14677c478bd9Sstevel@tonic-gate releasef(fd); 14687c478bd9Sstevel@tonic-gate } 14697c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 14707c478bd9Sstevel@tonic-gate pdp->pd_count--; 14717c478bd9Sstevel@tonic-gate ASSERT(refp->xf_refcnt > 0); 14727c478bd9Sstevel@tonic-gate if (--refp->xf_refcnt == 0) { 14737c478bd9Sstevel@tonic-gate refp->xf_position = POLLPOSINVAL; 14747c478bd9Sstevel@tonic-gate } else { 14757c478bd9Sstevel@tonic-gate ASSERT(pos >= refp->xf_position); 14767c478bd9Sstevel@tonic-gate if (pos == refp->xf_position) { 14777c478bd9Sstevel@tonic-gate /* 14787c478bd9Sstevel@tonic-gate * The xref position is no longer valid. 14797c478bd9Sstevel@tonic-gate * Reset it to a special value and let 14807c478bd9Sstevel@tonic-gate * caller know it needs to updatexref() 14817c478bd9Sstevel@tonic-gate * with a new xf_position value. 14827c478bd9Sstevel@tonic-gate */ 14837c478bd9Sstevel@tonic-gate refp->xf_position = POLLPOSTRANS; 14847c478bd9Sstevel@tonic-gate return (1); 14857c478bd9Sstevel@tonic-gate } 14867c478bd9Sstevel@tonic-gate } 14877c478bd9Sstevel@tonic-gate return (0); 14887c478bd9Sstevel@tonic-gate } 14897c478bd9Sstevel@tonic-gate 14907c478bd9Sstevel@tonic-gate void 14917c478bd9Sstevel@tonic-gate pcache_update_xref(pollcache_t *pcp, int fd, ssize_t pos, int which) 14927c478bd9Sstevel@tonic-gate { 14937c478bd9Sstevel@tonic-gate polldat_t *pdp; 14947c478bd9Sstevel@tonic-gate 14957c478bd9Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, fd); 14967c478bd9Sstevel@tonic-gate ASSERT(pdp != NULL); 14977c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_ref != NULL); 14987c478bd9Sstevel@tonic-gate pdp->pd_ref[which].xf_position = pos; 14997c478bd9Sstevel@tonic-gate } 15007c478bd9Sstevel@tonic-gate 15017c478bd9Sstevel@tonic-gate #ifdef DEBUG 15027c478bd9Sstevel@tonic-gate /* 15037c478bd9Sstevel@tonic-gate * For each polled fd, it's either in the bitmap or cached in 15047c478bd9Sstevel@tonic-gate * pcache hash table. If this routine returns 0, something is wrong. 15057c478bd9Sstevel@tonic-gate */ 15067c478bd9Sstevel@tonic-gate static int 15077c478bd9Sstevel@tonic-gate pollchecksanity(pollstate_t *ps, nfds_t nfds) 15087c478bd9Sstevel@tonic-gate { 15097c478bd9Sstevel@tonic-gate int i; 15107c478bd9Sstevel@tonic-gate int fd; 15117c478bd9Sstevel@tonic-gate pollcache_t *pcp = ps->ps_pcache; 15127c478bd9Sstevel@tonic-gate polldat_t *pdp; 15137c478bd9Sstevel@tonic-gate pollfd_t *pollfdp = ps->ps_pollfd; 15147c478bd9Sstevel@tonic-gate file_t *fp; 15157c478bd9Sstevel@tonic-gate 15167c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ps->ps_lock)); 15177c478bd9Sstevel@tonic-gate for (i = 0; i < nfds; i++) { 15187c478bd9Sstevel@tonic-gate fd = pollfdp[i].fd; 15197c478bd9Sstevel@tonic-gate if (fd < 0) { 15207c478bd9Sstevel@tonic-gate ASSERT(pollfdp[i].revents == 0); 15217c478bd9Sstevel@tonic-gate continue; 15227c478bd9Sstevel@tonic-gate } 15237c478bd9Sstevel@tonic-gate if (pollfdp[i].revents == POLLNVAL) 15247c478bd9Sstevel@tonic-gate continue; 15257c478bd9Sstevel@tonic-gate if ((fp = getf(fd)) == NULL) 15267c478bd9Sstevel@tonic-gate continue; 15277c478bd9Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, fd); 15287c478bd9Sstevel@tonic-gate ASSERT(pdp != NULL); 15297c478bd9Sstevel@tonic-gate ASSERT(infpollinfo(fd)); 15307c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_fp == fp); 15317c478bd9Sstevel@tonic-gate releasef(fd); 15327c478bd9Sstevel@tonic-gate if (BT_TEST(pcp->pc_bitmap, fd)) 15337c478bd9Sstevel@tonic-gate continue; 15347c478bd9Sstevel@tonic-gate if (pdp->pd_php == NULL) 15357c478bd9Sstevel@tonic-gate return (0); 15367c478bd9Sstevel@tonic-gate } 15377c478bd9Sstevel@tonic-gate return (1); 15387c478bd9Sstevel@tonic-gate } 15397c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 15407c478bd9Sstevel@tonic-gate 15417c478bd9Sstevel@tonic-gate /* 15427c478bd9Sstevel@tonic-gate * resolve the difference between the current poll list and a cached one. 15437c478bd9Sstevel@tonic-gate */ 15447c478bd9Sstevel@tonic-gate int 15457c478bd9Sstevel@tonic-gate pcacheset_resolve(pollstate_t *ps, nfds_t nfds, int *fdcntp, int which) 15467c478bd9Sstevel@tonic-gate { 15477c478bd9Sstevel@tonic-gate int i; 15487c478bd9Sstevel@tonic-gate pollcache_t *pcp = ps->ps_pcache; 15497c478bd9Sstevel@tonic-gate pollfd_t *newlist = NULL; 15507c478bd9Sstevel@tonic-gate pollfd_t *current = ps->ps_pollfd; 15517c478bd9Sstevel@tonic-gate pollfd_t *cached; 15527c478bd9Sstevel@tonic-gate pollcacheset_t *pcsp; 15537c478bd9Sstevel@tonic-gate int common; 15547c478bd9Sstevel@tonic-gate int count = 0; 15557c478bd9Sstevel@tonic-gate int offset; 15567c478bd9Sstevel@tonic-gate int remain; 15577c478bd9Sstevel@tonic-gate int fd; 15587c478bd9Sstevel@tonic-gate file_t *fp; 15597c478bd9Sstevel@tonic-gate int fdcnt = 0; 15607c478bd9Sstevel@tonic-gate int cnt = 0; 15617c478bd9Sstevel@tonic-gate nfds_t old_nfds; 15627c478bd9Sstevel@tonic-gate int error = 0; 15637c478bd9Sstevel@tonic-gate int mismatch = 0; 15647c478bd9Sstevel@tonic-gate 15657c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ps->ps_lock)); 15667c478bd9Sstevel@tonic-gate #ifdef DEBUG 15677c478bd9Sstevel@tonic-gate checkpolldat(ps); 15687c478bd9Sstevel@tonic-gate #endif 15697c478bd9Sstevel@tonic-gate pcsp = &ps->ps_pcacheset[which]; 15707c478bd9Sstevel@tonic-gate old_nfds = pcsp->pcs_nfds; 15717c478bd9Sstevel@tonic-gate common = (nfds > old_nfds) ? old_nfds : nfds; 15727c478bd9Sstevel@tonic-gate if (nfds != old_nfds) { 15737c478bd9Sstevel@tonic-gate /* 15747c478bd9Sstevel@tonic-gate * the length of poll list has changed. allocate a new 15757c478bd9Sstevel@tonic-gate * pollfd list. 15767c478bd9Sstevel@tonic-gate */ 15777c478bd9Sstevel@tonic-gate newlist = kmem_alloc(nfds * sizeof (pollfd_t), KM_SLEEP); 15787c478bd9Sstevel@tonic-gate bcopy(current, newlist, sizeof (pollfd_t) * nfds); 15797c478bd9Sstevel@tonic-gate } 15807c478bd9Sstevel@tonic-gate /* 15817c478bd9Sstevel@tonic-gate * Compare the overlapping part of the current fd list with the 15827c478bd9Sstevel@tonic-gate * cached one. Whenever a difference is found, resolve it. 15837c478bd9Sstevel@tonic-gate * The comparison is done on the current poll list and the 15847c478bd9Sstevel@tonic-gate * cached list. But we may be setting up the newlist to be the 15857c478bd9Sstevel@tonic-gate * cached list for next poll. 15867c478bd9Sstevel@tonic-gate */ 15877c478bd9Sstevel@tonic-gate cached = pcsp->pcs_pollfd; 15887c478bd9Sstevel@tonic-gate remain = common; 15897c478bd9Sstevel@tonic-gate 15907c478bd9Sstevel@tonic-gate while (count < common) { 15917c478bd9Sstevel@tonic-gate int tmpfd; 15927c478bd9Sstevel@tonic-gate pollfd_t *np; 15937c478bd9Sstevel@tonic-gate 15947c478bd9Sstevel@tonic-gate np = (newlist != NULL) ? &newlist[count] : NULL; 15957c478bd9Sstevel@tonic-gate offset = pcacheset_cmp(¤t[count], &cached[count], np, 15967c478bd9Sstevel@tonic-gate remain); 15977c478bd9Sstevel@tonic-gate /* 15987c478bd9Sstevel@tonic-gate * Collect stats. If lists are completed the first time, 15997c478bd9Sstevel@tonic-gate * it's a hit. Otherwise, it's a partial hit or miss. 16007c478bd9Sstevel@tonic-gate */ 16017c478bd9Sstevel@tonic-gate if ((count == 0) && (offset == common)) { 16027c478bd9Sstevel@tonic-gate pollstats.pollcachehit.value.ui64++; 16037c478bd9Sstevel@tonic-gate } else { 16047c478bd9Sstevel@tonic-gate mismatch++; 16057c478bd9Sstevel@tonic-gate } 16067c478bd9Sstevel@tonic-gate count += offset; 16077c478bd9Sstevel@tonic-gate if (offset < remain) { 16087c478bd9Sstevel@tonic-gate ASSERT(count < common); 16097c478bd9Sstevel@tonic-gate ASSERT((current[count].fd != cached[count].fd) || 16107c478bd9Sstevel@tonic-gate (current[count].events != cached[count].events)); 16117c478bd9Sstevel@tonic-gate /* 16127c478bd9Sstevel@tonic-gate * Filter out invalid events. 16137c478bd9Sstevel@tonic-gate */ 16147c478bd9Sstevel@tonic-gate if (current[count].events & ~VALID_POLL_EVENTS) { 16157c478bd9Sstevel@tonic-gate if (newlist != NULL) { 16167c478bd9Sstevel@tonic-gate newlist[count].events = 16177c478bd9Sstevel@tonic-gate current[count].events &= 16187c478bd9Sstevel@tonic-gate VALID_POLL_EVENTS; 16197c478bd9Sstevel@tonic-gate } else { 16207c478bd9Sstevel@tonic-gate current[count].events &= 16217c478bd9Sstevel@tonic-gate VALID_POLL_EVENTS; 16227c478bd9Sstevel@tonic-gate } 16237c478bd9Sstevel@tonic-gate } 16247c478bd9Sstevel@tonic-gate /* 16257c478bd9Sstevel@tonic-gate * when resolving a difference, we always remove the 16267c478bd9Sstevel@tonic-gate * fd from cache before inserting one into cache. 16277c478bd9Sstevel@tonic-gate */ 16287c478bd9Sstevel@tonic-gate if (cached[count].fd >= 0) { 16297c478bd9Sstevel@tonic-gate tmpfd = cached[count].fd; 16307c478bd9Sstevel@tonic-gate if (pcache_delete_fd(ps, tmpfd, count, which, 16317c478bd9Sstevel@tonic-gate (uint_t)cached[count].events)) { 16327c478bd9Sstevel@tonic-gate /* 16337c478bd9Sstevel@tonic-gate * This should be rare but needed for 16347c478bd9Sstevel@tonic-gate * correctness. 16357c478bd9Sstevel@tonic-gate * 16367c478bd9Sstevel@tonic-gate * The first appearance in cached list 16377c478bd9Sstevel@tonic-gate * is being "turned off". The same fd 16387c478bd9Sstevel@tonic-gate * appear more than once in the cached 16397c478bd9Sstevel@tonic-gate * poll list. Find the next one on the 16407c478bd9Sstevel@tonic-gate * list and update the cached 16417c478bd9Sstevel@tonic-gate * xf_position field. 16427c478bd9Sstevel@tonic-gate */ 16437c478bd9Sstevel@tonic-gate for (i = count + 1; i < old_nfds; i++) { 16447c478bd9Sstevel@tonic-gate if (cached[i].fd == tmpfd) { 16457c478bd9Sstevel@tonic-gate pcache_update_xref(pcp, 16467c478bd9Sstevel@tonic-gate tmpfd, (ssize_t)i, 16477c478bd9Sstevel@tonic-gate which); 16487c478bd9Sstevel@tonic-gate break; 16497c478bd9Sstevel@tonic-gate } 16507c478bd9Sstevel@tonic-gate } 16517c478bd9Sstevel@tonic-gate ASSERT(i <= old_nfds); 16527c478bd9Sstevel@tonic-gate } 16537c478bd9Sstevel@tonic-gate /* 16547c478bd9Sstevel@tonic-gate * In case a new cache list is allocated, 16557c478bd9Sstevel@tonic-gate * need to keep both cache lists in sync 16567c478bd9Sstevel@tonic-gate * b/c the new one can be freed if we have 16577c478bd9Sstevel@tonic-gate * an error later. 16587c478bd9Sstevel@tonic-gate */ 16597c478bd9Sstevel@tonic-gate cached[count].fd = -1; 16607c478bd9Sstevel@tonic-gate if (newlist != NULL) { 16617c478bd9Sstevel@tonic-gate newlist[count].fd = -1; 16627c478bd9Sstevel@tonic-gate } 16637c478bd9Sstevel@tonic-gate } 16647c478bd9Sstevel@tonic-gate if ((tmpfd = current[count].fd) >= 0) { 16657c478bd9Sstevel@tonic-gate /* 16667c478bd9Sstevel@tonic-gate * add to the cached fd tbl and bitmap. 16677c478bd9Sstevel@tonic-gate */ 16687c478bd9Sstevel@tonic-gate if ((fp = getf(tmpfd)) == NULL) { 16697c478bd9Sstevel@tonic-gate current[count].revents = POLLNVAL; 16707c478bd9Sstevel@tonic-gate if (newlist != NULL) { 16717c478bd9Sstevel@tonic-gate newlist[count].fd = -1; 16727c478bd9Sstevel@tonic-gate } 16737c478bd9Sstevel@tonic-gate cached[count].fd = -1; 16747c478bd9Sstevel@tonic-gate fdcnt++; 16757c478bd9Sstevel@tonic-gate } else { 16767c478bd9Sstevel@tonic-gate /* 16777c478bd9Sstevel@tonic-gate * Here we don't care about the 16787c478bd9Sstevel@tonic-gate * fdcnt. We will examine the bitmap 16797c478bd9Sstevel@tonic-gate * later and pick up the correct 16807c478bd9Sstevel@tonic-gate * fdcnt there. So we never bother 16817c478bd9Sstevel@tonic-gate * to check value of 'cnt'. 16827c478bd9Sstevel@tonic-gate */ 16837c478bd9Sstevel@tonic-gate error = pcache_insert(ps, fp, 16847c478bd9Sstevel@tonic-gate ¤t[count], &cnt, 16857c478bd9Sstevel@tonic-gate (ssize_t)count, which); 16867c478bd9Sstevel@tonic-gate /* 16877c478bd9Sstevel@tonic-gate * if no error, we want to do releasef 16887c478bd9Sstevel@tonic-gate * after we updated cache poll list 16897c478bd9Sstevel@tonic-gate * entry so that close() won't race 16907c478bd9Sstevel@tonic-gate * us. 16917c478bd9Sstevel@tonic-gate */ 16927c478bd9Sstevel@tonic-gate if (error) { 16937c478bd9Sstevel@tonic-gate /* 16947c478bd9Sstevel@tonic-gate * If we encountered an error, 16957c478bd9Sstevel@tonic-gate * we have invalidated an 16967c478bd9Sstevel@tonic-gate * entry in cached poll list 16977c478bd9Sstevel@tonic-gate * (in pcache_delete_fd() above) 16987c478bd9Sstevel@tonic-gate * but failed to add one here. 16997c478bd9Sstevel@tonic-gate * This is OK b/c what's in the 17007c478bd9Sstevel@tonic-gate * cached list is consistent 17017c478bd9Sstevel@tonic-gate * with content of cache. 17027c478bd9Sstevel@tonic-gate * It will not have any ill 17037c478bd9Sstevel@tonic-gate * effect on next poll(). 17047c478bd9Sstevel@tonic-gate */ 17057c478bd9Sstevel@tonic-gate releasef(tmpfd); 17067c478bd9Sstevel@tonic-gate if (newlist != NULL) { 17077c478bd9Sstevel@tonic-gate kmem_free(newlist, 17087c478bd9Sstevel@tonic-gate nfds * 17097c478bd9Sstevel@tonic-gate sizeof (pollfd_t)); 17107c478bd9Sstevel@tonic-gate } 17117c478bd9Sstevel@tonic-gate return (error); 17127c478bd9Sstevel@tonic-gate } 17137c478bd9Sstevel@tonic-gate /* 17147c478bd9Sstevel@tonic-gate * If we have allocated a new(temp) 17157c478bd9Sstevel@tonic-gate * cache list, we need to keep both 17167c478bd9Sstevel@tonic-gate * in sync b/c the new one can be freed 17177c478bd9Sstevel@tonic-gate * if we have an error later. 17187c478bd9Sstevel@tonic-gate */ 17197c478bd9Sstevel@tonic-gate if (newlist != NULL) { 17207c478bd9Sstevel@tonic-gate newlist[count].fd = 17217c478bd9Sstevel@tonic-gate current[count].fd; 17227c478bd9Sstevel@tonic-gate newlist[count].events = 17237c478bd9Sstevel@tonic-gate current[count].events; 17247c478bd9Sstevel@tonic-gate } 17257c478bd9Sstevel@tonic-gate cached[count].fd = current[count].fd; 17267c478bd9Sstevel@tonic-gate cached[count].events = 17277c478bd9Sstevel@tonic-gate current[count].events; 17287c478bd9Sstevel@tonic-gate releasef(tmpfd); 17297c478bd9Sstevel@tonic-gate } 17307c478bd9Sstevel@tonic-gate } else { 17317c478bd9Sstevel@tonic-gate current[count].revents = 0; 17327c478bd9Sstevel@tonic-gate } 17337c478bd9Sstevel@tonic-gate count++; 17347c478bd9Sstevel@tonic-gate remain = common - count; 17357c478bd9Sstevel@tonic-gate } 17367c478bd9Sstevel@tonic-gate } 17377c478bd9Sstevel@tonic-gate if (mismatch != 0) { 17387c478bd9Sstevel@tonic-gate if (mismatch == common) { 17397c478bd9Sstevel@tonic-gate pollstats.pollcachemiss.value.ui64++; 17407c478bd9Sstevel@tonic-gate } else { 17417c478bd9Sstevel@tonic-gate pollstats.pollcachephit.value.ui64++; 17427c478bd9Sstevel@tonic-gate } 17437c478bd9Sstevel@tonic-gate } 17447c478bd9Sstevel@tonic-gate /* 17457c478bd9Sstevel@tonic-gate * take care of the non overlapping part of a list 17467c478bd9Sstevel@tonic-gate */ 17477c478bd9Sstevel@tonic-gate if (nfds > old_nfds) { 17487c478bd9Sstevel@tonic-gate ASSERT(newlist != NULL); 17497c478bd9Sstevel@tonic-gate for (i = old_nfds; i < nfds; i++) { 17507c478bd9Sstevel@tonic-gate /* filter out invalid events */ 17517c478bd9Sstevel@tonic-gate if (current[i].events & ~VALID_POLL_EVENTS) { 17527c478bd9Sstevel@tonic-gate newlist[i].events = current[i].events = 17537c478bd9Sstevel@tonic-gate current[i].events & VALID_POLL_EVENTS; 17547c478bd9Sstevel@tonic-gate } 17557c478bd9Sstevel@tonic-gate if ((fd = current[i].fd) < 0) { 17567c478bd9Sstevel@tonic-gate current[i].revents = 0; 17577c478bd9Sstevel@tonic-gate continue; 17587c478bd9Sstevel@tonic-gate } 17597c478bd9Sstevel@tonic-gate /* 17607c478bd9Sstevel@tonic-gate * add to the cached fd tbl and bitmap. 17617c478bd9Sstevel@tonic-gate */ 17627c478bd9Sstevel@tonic-gate if ((fp = getf(fd)) == NULL) { 17637c478bd9Sstevel@tonic-gate current[i].revents = POLLNVAL; 17647c478bd9Sstevel@tonic-gate newlist[i].fd = -1; 17657c478bd9Sstevel@tonic-gate fdcnt++; 17667c478bd9Sstevel@tonic-gate continue; 17677c478bd9Sstevel@tonic-gate } 17687c478bd9Sstevel@tonic-gate /* 17697c478bd9Sstevel@tonic-gate * Here we don't care about the 17707c478bd9Sstevel@tonic-gate * fdcnt. We will examine the bitmap 17717c478bd9Sstevel@tonic-gate * later and pick up the correct 17727c478bd9Sstevel@tonic-gate * fdcnt there. So we never bother to 17737c478bd9Sstevel@tonic-gate * check 'cnt'. 17747c478bd9Sstevel@tonic-gate */ 17757c478bd9Sstevel@tonic-gate error = pcache_insert(ps, fp, ¤t[i], &cnt, 17767c478bd9Sstevel@tonic-gate (ssize_t)i, which); 17777c478bd9Sstevel@tonic-gate releasef(fd); 17787c478bd9Sstevel@tonic-gate if (error) { 17797c478bd9Sstevel@tonic-gate /* 17807c478bd9Sstevel@tonic-gate * Here we are half way through adding newly 17817c478bd9Sstevel@tonic-gate * polled fd. Undo enough to keep the cache 17827c478bd9Sstevel@tonic-gate * list consistent with the cache content. 17837c478bd9Sstevel@tonic-gate */ 17847c478bd9Sstevel@tonic-gate pcacheset_remove_list(ps, current, old_nfds, 17857c478bd9Sstevel@tonic-gate i, which, 0); 17867c478bd9Sstevel@tonic-gate kmem_free(newlist, nfds * sizeof (pollfd_t)); 17877c478bd9Sstevel@tonic-gate return (error); 17887c478bd9Sstevel@tonic-gate } 17897c478bd9Sstevel@tonic-gate } 17907c478bd9Sstevel@tonic-gate } 17917c478bd9Sstevel@tonic-gate if (old_nfds > nfds) { 17927c478bd9Sstevel@tonic-gate /* 17937c478bd9Sstevel@tonic-gate * remove the fd's which are no longer polled. 17947c478bd9Sstevel@tonic-gate */ 17957c478bd9Sstevel@tonic-gate pcacheset_remove_list(ps, pcsp->pcs_pollfd, nfds, old_nfds, 17967c478bd9Sstevel@tonic-gate which, 1); 17977c478bd9Sstevel@tonic-gate } 17987c478bd9Sstevel@tonic-gate /* 17997c478bd9Sstevel@tonic-gate * set difference resolved. update nfds and cachedlist 18007c478bd9Sstevel@tonic-gate * in pollstate struct. 18017c478bd9Sstevel@tonic-gate */ 18027c478bd9Sstevel@tonic-gate if (newlist != NULL) { 18037c478bd9Sstevel@tonic-gate kmem_free(pcsp->pcs_pollfd, old_nfds * sizeof (pollfd_t)); 18047c478bd9Sstevel@tonic-gate /* 18057c478bd9Sstevel@tonic-gate * By now, the pollfd.revents field should 18067c478bd9Sstevel@tonic-gate * all be zeroed. 18077c478bd9Sstevel@tonic-gate */ 18087c478bd9Sstevel@tonic-gate pcsp->pcs_pollfd = newlist; 18097c478bd9Sstevel@tonic-gate pcsp->pcs_nfds = nfds; 18107c478bd9Sstevel@tonic-gate } 18117c478bd9Sstevel@tonic-gate ASSERT(*fdcntp == 0); 18127c478bd9Sstevel@tonic-gate *fdcntp = fdcnt; 18137c478bd9Sstevel@tonic-gate /* 18147c478bd9Sstevel@tonic-gate * By now for every fd in pollfdp, one of the following should be 18157c478bd9Sstevel@tonic-gate * true. Otherwise we will miss a polled event. 18167c478bd9Sstevel@tonic-gate * 18177c478bd9Sstevel@tonic-gate * 1. the bit corresponding to the fd in bitmap is set. So VOP_POLL 18187c478bd9Sstevel@tonic-gate * will be called on this fd in next poll. 18197c478bd9Sstevel@tonic-gate * 2. the fd is cached in the pcache (i.e. pd_php is set). So 18207c478bd9Sstevel@tonic-gate * pollnotify will happen. 18217c478bd9Sstevel@tonic-gate */ 18227c478bd9Sstevel@tonic-gate ASSERT(pollchecksanity(ps, nfds)); 18237c478bd9Sstevel@tonic-gate /* 18247c478bd9Sstevel@tonic-gate * make sure cross reference between cached poll lists and cached 18257c478bd9Sstevel@tonic-gate * poll fds are correct. 18267c478bd9Sstevel@tonic-gate */ 18277c478bd9Sstevel@tonic-gate ASSERT(pollcheckxref(ps, which)); 18287c478bd9Sstevel@tonic-gate /* 18297c478bd9Sstevel@tonic-gate * ensure each polldat in pollcache reference a polled fd in 18307c478bd9Sstevel@tonic-gate * pollcacheset. 18317c478bd9Sstevel@tonic-gate */ 18327c478bd9Sstevel@tonic-gate #ifdef DEBUG 18337c478bd9Sstevel@tonic-gate checkpolldat(ps); 18347c478bd9Sstevel@tonic-gate #endif 18357c478bd9Sstevel@tonic-gate return (0); 18367c478bd9Sstevel@tonic-gate } 18377c478bd9Sstevel@tonic-gate 18387c478bd9Sstevel@tonic-gate #ifdef DEBUG 18397c478bd9Sstevel@tonic-gate static int 18407c478bd9Sstevel@tonic-gate pollscanrevents(pollcache_t *pcp, pollfd_t *pollfdp, nfds_t nfds) 18417c478bd9Sstevel@tonic-gate { 18427c478bd9Sstevel@tonic-gate int i; 18437c478bd9Sstevel@tonic-gate int reventcnt = 0; 18447c478bd9Sstevel@tonic-gate 18457c478bd9Sstevel@tonic-gate for (i = 0; i < nfds; i++) { 18467c478bd9Sstevel@tonic-gate if (pollfdp[i].fd < 0) { 18477c478bd9Sstevel@tonic-gate ASSERT(pollfdp[i].revents == 0); 18487c478bd9Sstevel@tonic-gate continue; 18497c478bd9Sstevel@tonic-gate } 18507c478bd9Sstevel@tonic-gate if (pollfdp[i].revents) { 18517c478bd9Sstevel@tonic-gate reventcnt++; 18527c478bd9Sstevel@tonic-gate } 18537c478bd9Sstevel@tonic-gate if (pollfdp[i].revents && (pollfdp[i].revents != POLLNVAL)) { 18547c478bd9Sstevel@tonic-gate ASSERT(BT_TEST(pcp->pc_bitmap, pollfdp[i].fd)); 18557c478bd9Sstevel@tonic-gate } 18567c478bd9Sstevel@tonic-gate } 18577c478bd9Sstevel@tonic-gate return (reventcnt); 18587c478bd9Sstevel@tonic-gate } 18597c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 18607c478bd9Sstevel@tonic-gate 18617c478bd9Sstevel@tonic-gate /* 18627c478bd9Sstevel@tonic-gate * read the bitmap and poll on fds corresponding to the '1' bits. The ps_lock 18637c478bd9Sstevel@tonic-gate * is held upon entry. 18647c478bd9Sstevel@tonic-gate */ 18657c478bd9Sstevel@tonic-gate int 18667c478bd9Sstevel@tonic-gate pcache_poll(pollfd_t *pollfdp, pollstate_t *ps, nfds_t nfds, int *fdcntp, 18677c478bd9Sstevel@tonic-gate int which) 18687c478bd9Sstevel@tonic-gate { 18697c478bd9Sstevel@tonic-gate int i; 18707c478bd9Sstevel@tonic-gate pollcache_t *pcp; 18717c478bd9Sstevel@tonic-gate int fd; 18727c478bd9Sstevel@tonic-gate int begin, end, done; 18737c478bd9Sstevel@tonic-gate pollhead_t *php; 18747c478bd9Sstevel@tonic-gate int fdcnt; 18757c478bd9Sstevel@tonic-gate int error = 0; 18767c478bd9Sstevel@tonic-gate file_t *fp; 18777c478bd9Sstevel@tonic-gate polldat_t *pdp; 18787c478bd9Sstevel@tonic-gate xref_t *refp; 18797c478bd9Sstevel@tonic-gate int entry; 18807c478bd9Sstevel@tonic-gate 18817c478bd9Sstevel@tonic-gate pcp = ps->ps_pcache; 18827c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ps->ps_lock)); 18837c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&pcp->pc_lock)); 18847c478bd9Sstevel@tonic-gate retry: 18857c478bd9Sstevel@tonic-gate done = 0; 18867c478bd9Sstevel@tonic-gate begin = 0; 18877c478bd9Sstevel@tonic-gate fdcnt = 0; 18887c478bd9Sstevel@tonic-gate end = pcp->pc_mapend; 18897c478bd9Sstevel@tonic-gate while ((fdcnt < nfds) && !done) { 18907c478bd9Sstevel@tonic-gate php = NULL; 18917c478bd9Sstevel@tonic-gate /* 18927c478bd9Sstevel@tonic-gate * only poll fds which may have events 18937c478bd9Sstevel@tonic-gate */ 18947c478bd9Sstevel@tonic-gate fd = bt_getlowbit(pcp->pc_bitmap, begin, end); 18957c478bd9Sstevel@tonic-gate ASSERT(fd <= end); 18967c478bd9Sstevel@tonic-gate if (fd >= 0) { 18977c478bd9Sstevel@tonic-gate ASSERT(pollcheckrevents(ps, begin, fd, which)); 18987c478bd9Sstevel@tonic-gate /* 18997c478bd9Sstevel@tonic-gate * adjust map pointers for next round 19007c478bd9Sstevel@tonic-gate */ 19017c478bd9Sstevel@tonic-gate if (fd == end) { 19027c478bd9Sstevel@tonic-gate done = 1; 19037c478bd9Sstevel@tonic-gate } else { 19047c478bd9Sstevel@tonic-gate begin = fd + 1; 19057c478bd9Sstevel@tonic-gate } 19067c478bd9Sstevel@tonic-gate /* 19077c478bd9Sstevel@tonic-gate * A bitmap caches poll state information of 19087c478bd9Sstevel@tonic-gate * multiple poll lists. Call VOP_POLL only if 19097c478bd9Sstevel@tonic-gate * the bit corresponds to an fd in this poll 19107c478bd9Sstevel@tonic-gate * list. 19117c478bd9Sstevel@tonic-gate */ 19127c478bd9Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, fd); 19137c478bd9Sstevel@tonic-gate ASSERT(pdp != NULL); 19147c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_ref != NULL); 19157c478bd9Sstevel@tonic-gate refp = &pdp->pd_ref[which]; 19167c478bd9Sstevel@tonic-gate if (refp->xf_refcnt == 0) 19177c478bd9Sstevel@tonic-gate continue; 19187c478bd9Sstevel@tonic-gate entry = refp->xf_position; 19197c478bd9Sstevel@tonic-gate ASSERT((entry >= 0) && (entry < nfds)); 19207c478bd9Sstevel@tonic-gate ASSERT(pollfdp[entry].fd == fd); 19217c478bd9Sstevel@tonic-gate /* 19227c478bd9Sstevel@tonic-gate * we are in this routine implies that we have 19237c478bd9Sstevel@tonic-gate * successfully polled this fd in the past. 19247c478bd9Sstevel@tonic-gate * Check to see this fd is closed while we are 19257c478bd9Sstevel@tonic-gate * blocked in poll. This ensures that we don't 19267c478bd9Sstevel@tonic-gate * miss a close on the fd in the case this fd is 19277c478bd9Sstevel@tonic-gate * reused. 19287c478bd9Sstevel@tonic-gate */ 19297c478bd9Sstevel@tonic-gate if (pdp->pd_fp == NULL) { 19307c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_count > 0); 19317c478bd9Sstevel@tonic-gate pollfdp[entry].revents = POLLNVAL; 19327c478bd9Sstevel@tonic-gate fdcnt++; 19337c478bd9Sstevel@tonic-gate if (refp->xf_refcnt > 1) { 19347c478bd9Sstevel@tonic-gate /* 19357c478bd9Sstevel@tonic-gate * this fd appeared multiple time 19367c478bd9Sstevel@tonic-gate * in the poll list. Find all of them. 19377c478bd9Sstevel@tonic-gate */ 19387c478bd9Sstevel@tonic-gate for (i = entry + 1; i < nfds; i++) { 19397c478bd9Sstevel@tonic-gate if (pollfdp[i].fd == fd) { 19407c478bd9Sstevel@tonic-gate pollfdp[i].revents = 19417c478bd9Sstevel@tonic-gate POLLNVAL; 19427c478bd9Sstevel@tonic-gate fdcnt++; 19437c478bd9Sstevel@tonic-gate } 19447c478bd9Sstevel@tonic-gate } 19457c478bd9Sstevel@tonic-gate } 19467c478bd9Sstevel@tonic-gate pcacheset_invalidate(ps, pdp); 19477c478bd9Sstevel@tonic-gate continue; 19487c478bd9Sstevel@tonic-gate } 19497c478bd9Sstevel@tonic-gate /* 19507c478bd9Sstevel@tonic-gate * We can be here polling a device that is being 19517c478bd9Sstevel@tonic-gate * closed (i.e. the file pointer is set to NULL, 19527c478bd9Sstevel@tonic-gate * but pollcacheclean has not happened yet). 19537c478bd9Sstevel@tonic-gate */ 19547c478bd9Sstevel@tonic-gate if ((fp = getf(fd)) == NULL) { 19557c478bd9Sstevel@tonic-gate pollfdp[entry].revents = POLLNVAL; 19567c478bd9Sstevel@tonic-gate fdcnt++; 19577c478bd9Sstevel@tonic-gate if (refp->xf_refcnt > 1) { 19587c478bd9Sstevel@tonic-gate /* 19597c478bd9Sstevel@tonic-gate * this fd appeared multiple time 19607c478bd9Sstevel@tonic-gate * in the poll list. Find all of them. 19617c478bd9Sstevel@tonic-gate */ 19627c478bd9Sstevel@tonic-gate for (i = entry + 1; i < nfds; i++) { 19637c478bd9Sstevel@tonic-gate if (pollfdp[i].fd == fd) { 19647c478bd9Sstevel@tonic-gate pollfdp[i].revents = 19657c478bd9Sstevel@tonic-gate POLLNVAL; 19667c478bd9Sstevel@tonic-gate fdcnt++; 19677c478bd9Sstevel@tonic-gate } 19687c478bd9Sstevel@tonic-gate } 19697c478bd9Sstevel@tonic-gate } 19707c478bd9Sstevel@tonic-gate continue; 19717c478bd9Sstevel@tonic-gate } 19727c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_fp == fp); 19737c478bd9Sstevel@tonic-gate ASSERT(infpollinfo(fd)); 19747c478bd9Sstevel@tonic-gate /* 19757c478bd9Sstevel@tonic-gate * Since we no longer hold poll head lock across 19767c478bd9Sstevel@tonic-gate * VOP_POLL, pollunlock logic can be simplifed. 19777c478bd9Sstevel@tonic-gate */ 19787c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_php == NULL || 19797c478bd9Sstevel@tonic-gate MUTEX_NOT_HELD(PHLOCK(pdp->pd_php))); 19807c478bd9Sstevel@tonic-gate /* 19817c478bd9Sstevel@tonic-gate * underlying file systems may set a "pollpending" 19827c478bd9Sstevel@tonic-gate * flag when it sees the poll may block. Pollwakeup() 19837c478bd9Sstevel@tonic-gate * is called by wakeup thread if pollpending is set. 19847c478bd9Sstevel@tonic-gate * Pass a 0 fdcnt so that the underlying file system 19857c478bd9Sstevel@tonic-gate * will set the "pollpending" flag set when there is 19867c478bd9Sstevel@tonic-gate * no polled events. 19877c478bd9Sstevel@tonic-gate * 19887c478bd9Sstevel@tonic-gate * Use pollfdp[].events for actual polling because 19897c478bd9Sstevel@tonic-gate * the pd_events is union of all cached poll events 19907c478bd9Sstevel@tonic-gate * on this fd. The events parameter also affects 19917c478bd9Sstevel@tonic-gate * how the polled device sets the "poll pending" 19927c478bd9Sstevel@tonic-gate * flag. 19937c478bd9Sstevel@tonic-gate */ 19947c478bd9Sstevel@tonic-gate ASSERT(curthread->t_pollcache == NULL); 19957c478bd9Sstevel@tonic-gate error = VOP_POLL(fp->f_vnode, pollfdp[entry].events, 0, 1996da6c28aaSamw &pollfdp[entry].revents, &php, NULL); 19977c478bd9Sstevel@tonic-gate /* 19987c478bd9Sstevel@tonic-gate * releasef after completely done with this cached 19997c478bd9Sstevel@tonic-gate * poll entry. To prevent close() coming in to clear 20007c478bd9Sstevel@tonic-gate * this entry. 20017c478bd9Sstevel@tonic-gate */ 20027c478bd9Sstevel@tonic-gate if (error) { 20037c478bd9Sstevel@tonic-gate releasef(fd); 20047c478bd9Sstevel@tonic-gate break; 20057c478bd9Sstevel@tonic-gate } 20067c478bd9Sstevel@tonic-gate /* 20077c478bd9Sstevel@tonic-gate * layered devices (e.g. console driver) 20087c478bd9Sstevel@tonic-gate * may change the vnode and thus the pollhead 20097c478bd9Sstevel@tonic-gate * pointer out from underneath us. 20107c478bd9Sstevel@tonic-gate */ 20117c478bd9Sstevel@tonic-gate if (php != NULL && pdp->pd_php != NULL && 20127c478bd9Sstevel@tonic-gate php != pdp->pd_php) { 20137c478bd9Sstevel@tonic-gate releasef(fd); 20147c478bd9Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 20157c478bd9Sstevel@tonic-gate pdp->pd_php = php; 20167c478bd9Sstevel@tonic-gate pollhead_insert(php, pdp); 20177c478bd9Sstevel@tonic-gate /* 20187c478bd9Sstevel@tonic-gate * We could have missed a wakeup on the new 20197c478bd9Sstevel@tonic-gate * target device. Make sure the new target 20207c478bd9Sstevel@tonic-gate * gets polled once. 20217c478bd9Sstevel@tonic-gate */ 20227c478bd9Sstevel@tonic-gate BT_SET(pcp->pc_bitmap, fd); 20237c478bd9Sstevel@tonic-gate goto retry; 20247c478bd9Sstevel@tonic-gate } 20257c478bd9Sstevel@tonic-gate 20267c478bd9Sstevel@tonic-gate if (pollfdp[entry].revents) { 20277c478bd9Sstevel@tonic-gate ASSERT(refp->xf_refcnt >= 1); 20287c478bd9Sstevel@tonic-gate fdcnt++; 20297c478bd9Sstevel@tonic-gate if (refp->xf_refcnt > 1) { 20307c478bd9Sstevel@tonic-gate /* 20317c478bd9Sstevel@tonic-gate * this fd appeared multiple time 20327c478bd9Sstevel@tonic-gate * in the poll list. This is rare but 20337c478bd9Sstevel@tonic-gate * we have to look at all of them for 20347c478bd9Sstevel@tonic-gate * correctness. 20357c478bd9Sstevel@tonic-gate */ 20367c478bd9Sstevel@tonic-gate error = plist_chkdupfd(fp, pdp, ps, 20377c478bd9Sstevel@tonic-gate pollfdp, entry, &fdcnt); 20387c478bd9Sstevel@tonic-gate if (error > 0) { 20397c478bd9Sstevel@tonic-gate releasef(fd); 20407c478bd9Sstevel@tonic-gate break; 20417c478bd9Sstevel@tonic-gate } 20427c478bd9Sstevel@tonic-gate if (error < 0) { 20437c478bd9Sstevel@tonic-gate goto retry; 20447c478bd9Sstevel@tonic-gate } 20457c478bd9Sstevel@tonic-gate } 20467c478bd9Sstevel@tonic-gate releasef(fd); 20477c478bd9Sstevel@tonic-gate } else { 20487c478bd9Sstevel@tonic-gate /* 20497c478bd9Sstevel@tonic-gate * VOP_POLL didn't return any revents. We can 20507c478bd9Sstevel@tonic-gate * clear the bit in bitmap only if we have the 20517c478bd9Sstevel@tonic-gate * pollhead ptr cached and no other cached 20527c478bd9Sstevel@tonic-gate * entry is polling different events on this fd. 20537c478bd9Sstevel@tonic-gate * VOP_POLL may have dropped the ps_lock. Make 20547c478bd9Sstevel@tonic-gate * sure pollwakeup has not happened before clear 20557c478bd9Sstevel@tonic-gate * the bit. 20567c478bd9Sstevel@tonic-gate */ 20577c478bd9Sstevel@tonic-gate if ((pdp->pd_php != NULL) && 20587c478bd9Sstevel@tonic-gate (pollfdp[entry].events == pdp->pd_events) && 2059a5eb7107SBryan Cantrill ((pcp->pc_flag & PC_POLLWAKE) == 0)) { 20607c478bd9Sstevel@tonic-gate BT_CLEAR(pcp->pc_bitmap, fd); 20617c478bd9Sstevel@tonic-gate } 20627c478bd9Sstevel@tonic-gate /* 20637c478bd9Sstevel@tonic-gate * if the fd can be cached now but not before, 20647c478bd9Sstevel@tonic-gate * do it now. 20657c478bd9Sstevel@tonic-gate */ 20667c478bd9Sstevel@tonic-gate if ((pdp->pd_php == NULL) && (php != NULL)) { 20677c478bd9Sstevel@tonic-gate pdp->pd_php = php; 20687c478bd9Sstevel@tonic-gate pollhead_insert(php, pdp); 20697c478bd9Sstevel@tonic-gate /* 20707c478bd9Sstevel@tonic-gate * We are inserting a polldat struct for 20717c478bd9Sstevel@tonic-gate * the first time. We may have missed a 20727c478bd9Sstevel@tonic-gate * wakeup on this device. Re-poll once. 20737c478bd9Sstevel@tonic-gate * This should be a rare event. 20747c478bd9Sstevel@tonic-gate */ 20757c478bd9Sstevel@tonic-gate releasef(fd); 20767c478bd9Sstevel@tonic-gate goto retry; 20777c478bd9Sstevel@tonic-gate } 20787c478bd9Sstevel@tonic-gate if (refp->xf_refcnt > 1) { 20797c478bd9Sstevel@tonic-gate /* 20807c478bd9Sstevel@tonic-gate * this fd appeared multiple time 20817c478bd9Sstevel@tonic-gate * in the poll list. This is rare but 20827c478bd9Sstevel@tonic-gate * we have to look at all of them for 20837c478bd9Sstevel@tonic-gate * correctness. 20847c478bd9Sstevel@tonic-gate */ 20857c478bd9Sstevel@tonic-gate error = plist_chkdupfd(fp, pdp, ps, 20867c478bd9Sstevel@tonic-gate pollfdp, entry, &fdcnt); 20877c478bd9Sstevel@tonic-gate if (error > 0) { 20887c478bd9Sstevel@tonic-gate releasef(fd); 20897c478bd9Sstevel@tonic-gate break; 20907c478bd9Sstevel@tonic-gate } 20917c478bd9Sstevel@tonic-gate if (error < 0) { 20927c478bd9Sstevel@tonic-gate goto retry; 20937c478bd9Sstevel@tonic-gate } 20947c478bd9Sstevel@tonic-gate } 20957c478bd9Sstevel@tonic-gate releasef(fd); 20967c478bd9Sstevel@tonic-gate } 20977c478bd9Sstevel@tonic-gate } else { 20987c478bd9Sstevel@tonic-gate done = 1; 20997c478bd9Sstevel@tonic-gate ASSERT(pollcheckrevents(ps, begin, end + 1, which)); 21007c478bd9Sstevel@tonic-gate } 21017c478bd9Sstevel@tonic-gate } 21027c478bd9Sstevel@tonic-gate if (!error) { 21037c478bd9Sstevel@tonic-gate ASSERT(*fdcntp + fdcnt == pollscanrevents(pcp, pollfdp, nfds)); 21047c478bd9Sstevel@tonic-gate *fdcntp += fdcnt; 21057c478bd9Sstevel@tonic-gate } 21067c478bd9Sstevel@tonic-gate return (error); 21077c478bd9Sstevel@tonic-gate } 21087c478bd9Sstevel@tonic-gate 21097c478bd9Sstevel@tonic-gate /* 21107c478bd9Sstevel@tonic-gate * Going through the poll list without much locking. Poll all fds and 21117c478bd9Sstevel@tonic-gate * cache all valid fds in the pollcache. 21127c478bd9Sstevel@tonic-gate */ 21137c478bd9Sstevel@tonic-gate int 21147c478bd9Sstevel@tonic-gate pcacheset_cache_list(pollstate_t *ps, pollfd_t *fds, int *fdcntp, int which) 21157c478bd9Sstevel@tonic-gate { 21167c478bd9Sstevel@tonic-gate pollfd_t *pollfdp = ps->ps_pollfd; 21177c478bd9Sstevel@tonic-gate pollcacheset_t *pcacheset = ps->ps_pcacheset; 21187c478bd9Sstevel@tonic-gate pollfd_t *newfdlist; 21197c478bd9Sstevel@tonic-gate int i; 21207c478bd9Sstevel@tonic-gate int fd; 21217c478bd9Sstevel@tonic-gate file_t *fp; 21227c478bd9Sstevel@tonic-gate int error = 0; 21237c478bd9Sstevel@tonic-gate 21247c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ps->ps_lock)); 21257c478bd9Sstevel@tonic-gate ASSERT(which < ps->ps_nsets); 21267c478bd9Sstevel@tonic-gate ASSERT(pcacheset != NULL); 21277c478bd9Sstevel@tonic-gate ASSERT(pcacheset[which].pcs_pollfd == NULL); 21287c478bd9Sstevel@tonic-gate newfdlist = kmem_alloc(ps->ps_nfds * sizeof (pollfd_t), KM_SLEEP); 21297c478bd9Sstevel@tonic-gate /* 21307c478bd9Sstevel@tonic-gate * cache the new poll list in pollcachset. 21317c478bd9Sstevel@tonic-gate */ 21327c478bd9Sstevel@tonic-gate bcopy(pollfdp, newfdlist, sizeof (pollfd_t) * ps->ps_nfds); 21337c478bd9Sstevel@tonic-gate 21347c478bd9Sstevel@tonic-gate pcacheset[which].pcs_pollfd = newfdlist; 21357c478bd9Sstevel@tonic-gate pcacheset[which].pcs_nfds = ps->ps_nfds; 21367c478bd9Sstevel@tonic-gate pcacheset[which].pcs_usradr = (uintptr_t)fds; 21377c478bd9Sstevel@tonic-gate 21387c478bd9Sstevel@tonic-gate /* 21397c478bd9Sstevel@tonic-gate * We have saved a copy of current poll fd list in one pollcacheset. 21407c478bd9Sstevel@tonic-gate * The 'revents' field of the new list is not yet set to 0. Loop 21417c478bd9Sstevel@tonic-gate * through the new list just to do that is expensive. We do that 21427c478bd9Sstevel@tonic-gate * while polling the list. 21437c478bd9Sstevel@tonic-gate */ 21447c478bd9Sstevel@tonic-gate for (i = 0; i < ps->ps_nfds; i++) { 21457c478bd9Sstevel@tonic-gate fd = pollfdp[i].fd; 21467c478bd9Sstevel@tonic-gate /* 21477c478bd9Sstevel@tonic-gate * We also filter out the illegal poll events in the event 21487c478bd9Sstevel@tonic-gate * field for the cached poll list/set. 21497c478bd9Sstevel@tonic-gate */ 21507c478bd9Sstevel@tonic-gate if (pollfdp[i].events & ~VALID_POLL_EVENTS) { 21517c478bd9Sstevel@tonic-gate newfdlist[i].events = pollfdp[i].events = 21527c478bd9Sstevel@tonic-gate pollfdp[i].events & VALID_POLL_EVENTS; 21537c478bd9Sstevel@tonic-gate } 21547c478bd9Sstevel@tonic-gate if (fd < 0) { 21557c478bd9Sstevel@tonic-gate pollfdp[i].revents = 0; 21567c478bd9Sstevel@tonic-gate continue; 21577c478bd9Sstevel@tonic-gate } 21587c478bd9Sstevel@tonic-gate if ((fp = getf(fd)) == NULL) { 21597c478bd9Sstevel@tonic-gate pollfdp[i].revents = POLLNVAL; 21607c478bd9Sstevel@tonic-gate /* 21617c478bd9Sstevel@tonic-gate * invalidate this cache entry in the cached poll list 21627c478bd9Sstevel@tonic-gate */ 21637c478bd9Sstevel@tonic-gate newfdlist[i].fd = -1; 21647c478bd9Sstevel@tonic-gate (*fdcntp)++; 21657c478bd9Sstevel@tonic-gate continue; 21667c478bd9Sstevel@tonic-gate } 21677c478bd9Sstevel@tonic-gate /* 21687c478bd9Sstevel@tonic-gate * cache this fd. 21697c478bd9Sstevel@tonic-gate */ 21707c478bd9Sstevel@tonic-gate error = pcache_insert(ps, fp, &pollfdp[i], fdcntp, (ssize_t)i, 21717c478bd9Sstevel@tonic-gate which); 21727c478bd9Sstevel@tonic-gate releasef(fd); 21737c478bd9Sstevel@tonic-gate if (error) { 21747c478bd9Sstevel@tonic-gate /* 21757c478bd9Sstevel@tonic-gate * Here we are half way through caching a new 21767c478bd9Sstevel@tonic-gate * poll list. Undo every thing. 21777c478bd9Sstevel@tonic-gate */ 21787c478bd9Sstevel@tonic-gate pcacheset_remove_list(ps, pollfdp, 0, i, which, 0); 21797c478bd9Sstevel@tonic-gate kmem_free(newfdlist, ps->ps_nfds * sizeof (pollfd_t)); 21807c478bd9Sstevel@tonic-gate pcacheset[which].pcs_pollfd = NULL; 21817c478bd9Sstevel@tonic-gate pcacheset[which].pcs_usradr = NULL; 21827c478bd9Sstevel@tonic-gate break; 21837c478bd9Sstevel@tonic-gate } 21847c478bd9Sstevel@tonic-gate } 21857c478bd9Sstevel@tonic-gate return (error); 21867c478bd9Sstevel@tonic-gate } 21877c478bd9Sstevel@tonic-gate 21887c478bd9Sstevel@tonic-gate /* 21897c478bd9Sstevel@tonic-gate * called by pollcacheclean() to set the fp NULL. It also sets polled events 21907c478bd9Sstevel@tonic-gate * in pcacheset entries to a special events 'POLLCLOSED'. Do a pollwakeup to 21917c478bd9Sstevel@tonic-gate * wake any sleeping poller, then remove the polldat from the driver. 21927c478bd9Sstevel@tonic-gate * The routine is called with ps_pcachelock held. 21937c478bd9Sstevel@tonic-gate */ 21947c478bd9Sstevel@tonic-gate void 21957c478bd9Sstevel@tonic-gate pcache_clean_entry(pollstate_t *ps, int fd) 21967c478bd9Sstevel@tonic-gate { 21977c478bd9Sstevel@tonic-gate pollcache_t *pcp; 21987c478bd9Sstevel@tonic-gate polldat_t *pdp; 21997c478bd9Sstevel@tonic-gate int i; 22007c478bd9Sstevel@tonic-gate 22017c478bd9Sstevel@tonic-gate ASSERT(ps != NULL); 22027c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ps->ps_lock)); 22037c478bd9Sstevel@tonic-gate pcp = ps->ps_pcache; 22047c478bd9Sstevel@tonic-gate ASSERT(pcp); 22057c478bd9Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, fd); 22067c478bd9Sstevel@tonic-gate ASSERT(pdp != NULL); 22077c478bd9Sstevel@tonic-gate /* 22087c478bd9Sstevel@tonic-gate * the corresponding fpollinfo in fi_list has been removed by 22097c478bd9Sstevel@tonic-gate * a close on this fd. Reset the cached fp ptr here. 22107c478bd9Sstevel@tonic-gate */ 22117c478bd9Sstevel@tonic-gate pdp->pd_fp = NULL; 22127c478bd9Sstevel@tonic-gate /* 22137c478bd9Sstevel@tonic-gate * XXX - This routine also touches data in pcacheset struct. 22147c478bd9Sstevel@tonic-gate * 22157c478bd9Sstevel@tonic-gate * set the event in cached poll lists to POLLCLOSED. This invalidate 22167c478bd9Sstevel@tonic-gate * the cached poll fd entry in that poll list, which will force a 22177c478bd9Sstevel@tonic-gate * removal of this cached entry in next poll(). The cleanup is done 22187c478bd9Sstevel@tonic-gate * at the removal time. 22197c478bd9Sstevel@tonic-gate */ 22207c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_ref != NULL); 22217c478bd9Sstevel@tonic-gate for (i = 0; i < ps->ps_nsets; i++) { 22227c478bd9Sstevel@tonic-gate xref_t *refp; 22237c478bd9Sstevel@tonic-gate pollcacheset_t *pcsp; 22247c478bd9Sstevel@tonic-gate 22257c478bd9Sstevel@tonic-gate refp = &pdp->pd_ref[i]; 22267c478bd9Sstevel@tonic-gate if (refp->xf_refcnt) { 22277c478bd9Sstevel@tonic-gate ASSERT(refp->xf_position >= 0); 22287c478bd9Sstevel@tonic-gate pcsp = &ps->ps_pcacheset[i]; 22297c478bd9Sstevel@tonic-gate if (refp->xf_refcnt == 1) { 22307c478bd9Sstevel@tonic-gate pcsp->pcs_pollfd[refp->xf_position].events = 22317c478bd9Sstevel@tonic-gate (short)POLLCLOSED; 22327c478bd9Sstevel@tonic-gate } 22337c478bd9Sstevel@tonic-gate if (refp->xf_refcnt > 1) { 22347c478bd9Sstevel@tonic-gate int j; 22357c478bd9Sstevel@tonic-gate /* 22367c478bd9Sstevel@tonic-gate * mark every matching entry in pcs_pollfd 22377c478bd9Sstevel@tonic-gate */ 22387c478bd9Sstevel@tonic-gate for (j = refp->xf_position; 22397c478bd9Sstevel@tonic-gate j < pcsp->pcs_nfds; j++) { 22407c478bd9Sstevel@tonic-gate if (pcsp->pcs_pollfd[j].fd == fd) { 22417c478bd9Sstevel@tonic-gate pcsp->pcs_pollfd[j].events = 22427c478bd9Sstevel@tonic-gate (short)POLLCLOSED; 22437c478bd9Sstevel@tonic-gate } 22447c478bd9Sstevel@tonic-gate } 22457c478bd9Sstevel@tonic-gate } 22467c478bd9Sstevel@tonic-gate } 22477c478bd9Sstevel@tonic-gate } 22487c478bd9Sstevel@tonic-gate if (pdp->pd_php) { 22497c478bd9Sstevel@tonic-gate pollwakeup(pdp->pd_php, POLLHUP); 22507c478bd9Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 22517c478bd9Sstevel@tonic-gate pdp->pd_php = NULL; 22527c478bd9Sstevel@tonic-gate } 22537c478bd9Sstevel@tonic-gate } 22547c478bd9Sstevel@tonic-gate 2255*f3bb54f3SPatrick Mooney void 2256*f3bb54f3SPatrick Mooney pcache_wake_parents(pollcache_t *pcp) 2257*f3bb54f3SPatrick Mooney { 2258*f3bb54f3SPatrick Mooney pcachelink_t *pl, *pln; 2259*f3bb54f3SPatrick Mooney 2260*f3bb54f3SPatrick Mooney ASSERT(MUTEX_HELD(&pcp->pc_lock)); 2261*f3bb54f3SPatrick Mooney 2262*f3bb54f3SPatrick Mooney for (pl = pcp->pc_parents; pl != NULL; pl = pln) { 2263*f3bb54f3SPatrick Mooney mutex_enter(&pl->pcl_lock); 2264*f3bb54f3SPatrick Mooney if (pl->pcl_state == PCL_VALID) { 2265*f3bb54f3SPatrick Mooney ASSERT(pl->pcl_parent_pc != NULL); 2266*f3bb54f3SPatrick Mooney cv_broadcast(&pl->pcl_parent_pc->pc_cv); 2267*f3bb54f3SPatrick Mooney } 2268*f3bb54f3SPatrick Mooney pln = pl->pcl_parent_next; 2269*f3bb54f3SPatrick Mooney mutex_exit(&pl->pcl_lock); 2270*f3bb54f3SPatrick Mooney } 2271*f3bb54f3SPatrick Mooney } 2272*f3bb54f3SPatrick Mooney 22737c478bd9Sstevel@tonic-gate /* 2274*f3bb54f3SPatrick Mooney * Initialize thread pollstate structure. 2275*f3bb54f3SPatrick Mooney * It will persist for the life of the thread, until it calls pollcleanup(). 22767c478bd9Sstevel@tonic-gate */ 22777c478bd9Sstevel@tonic-gate pollstate_t * 2278*f3bb54f3SPatrick Mooney pollstate_create() 22797c478bd9Sstevel@tonic-gate { 2280*f3bb54f3SPatrick Mooney pollstate_t *ps = curthread->t_pollstate; 22817c478bd9Sstevel@tonic-gate 2282*f3bb54f3SPatrick Mooney if (ps == NULL) { 2283*f3bb54f3SPatrick Mooney /* 2284*f3bb54f3SPatrick Mooney * This is the first time this thread has ever polled, so we 2285*f3bb54f3SPatrick Mooney * have to create its pollstate structure. 2286*f3bb54f3SPatrick Mooney */ 22877c478bd9Sstevel@tonic-gate ps = kmem_zalloc(sizeof (pollstate_t), KM_SLEEP); 22887c478bd9Sstevel@tonic-gate ps->ps_nsets = POLLFDSETS; 22897c478bd9Sstevel@tonic-gate ps->ps_pcacheset = pcacheset_create(ps->ps_nsets); 2290*f3bb54f3SPatrick Mooney curthread->t_pollstate = ps; 2291*f3bb54f3SPatrick Mooney } else { 2292*f3bb54f3SPatrick Mooney ASSERT(ps->ps_depth == 0); 2293*f3bb54f3SPatrick Mooney ASSERT(ps->ps_flags == 0); 2294*f3bb54f3SPatrick Mooney ASSERT(ps->ps_pc_stack[0] == 0); 2295*f3bb54f3SPatrick Mooney } 22967c478bd9Sstevel@tonic-gate return (ps); 22977c478bd9Sstevel@tonic-gate } 22987c478bd9Sstevel@tonic-gate 22997c478bd9Sstevel@tonic-gate void 23007c478bd9Sstevel@tonic-gate pollstate_destroy(pollstate_t *ps) 23017c478bd9Sstevel@tonic-gate { 23027c478bd9Sstevel@tonic-gate if (ps->ps_pollfd != NULL) { 23037c478bd9Sstevel@tonic-gate kmem_free(ps->ps_pollfd, ps->ps_nfds * sizeof (pollfd_t)); 23047c478bd9Sstevel@tonic-gate ps->ps_pollfd = NULL; 23057c478bd9Sstevel@tonic-gate } 23067c478bd9Sstevel@tonic-gate if (ps->ps_pcache != NULL) { 23077c478bd9Sstevel@tonic-gate pcache_destroy(ps->ps_pcache); 23087c478bd9Sstevel@tonic-gate ps->ps_pcache = NULL; 23097c478bd9Sstevel@tonic-gate } 23107c478bd9Sstevel@tonic-gate pcacheset_destroy(ps->ps_pcacheset, ps->ps_nsets); 23117c478bd9Sstevel@tonic-gate ps->ps_pcacheset = NULL; 23127c478bd9Sstevel@tonic-gate if (ps->ps_dpbuf != NULL) { 2313a5eb7107SBryan Cantrill kmem_free(ps->ps_dpbuf, ps->ps_dpbufsize); 23147c478bd9Sstevel@tonic-gate ps->ps_dpbuf = NULL; 23157c478bd9Sstevel@tonic-gate } 23167c478bd9Sstevel@tonic-gate mutex_destroy(&ps->ps_lock); 23177c478bd9Sstevel@tonic-gate kmem_free(ps, sizeof (pollstate_t)); 23187c478bd9Sstevel@tonic-gate } 23197c478bd9Sstevel@tonic-gate 2320*f3bb54f3SPatrick Mooney static int 2321*f3bb54f3SPatrick Mooney pollstate_contend(pollstate_t *ps, pollcache_t *pcp) 2322*f3bb54f3SPatrick Mooney { 2323*f3bb54f3SPatrick Mooney pollstate_t *rem, *next; 2324*f3bb54f3SPatrick Mooney pollcache_t *desired_pc; 2325*f3bb54f3SPatrick Mooney int result = 0, depth_total; 2326*f3bb54f3SPatrick Mooney 2327*f3bb54f3SPatrick Mooney mutex_enter(&pollstate_contenders_lock); 2328*f3bb54f3SPatrick Mooney /* 2329*f3bb54f3SPatrick Mooney * There is a small chance that the pollcache of interest became 2330*f3bb54f3SPatrick Mooney * available while we were waiting on the contenders lock. 2331*f3bb54f3SPatrick Mooney */ 2332*f3bb54f3SPatrick Mooney if (mutex_tryenter(&pcp->pc_lock) != 0) { 2333*f3bb54f3SPatrick Mooney goto out; 2334*f3bb54f3SPatrick Mooney } 2335*f3bb54f3SPatrick Mooney 2336*f3bb54f3SPatrick Mooney /* 2337*f3bb54f3SPatrick Mooney * Walk the list of contended pollstates, searching for evidence of a 2338*f3bb54f3SPatrick Mooney * deadlock condition. 2339*f3bb54f3SPatrick Mooney */ 2340*f3bb54f3SPatrick Mooney depth_total = ps->ps_depth; 2341*f3bb54f3SPatrick Mooney desired_pc = pcp; 2342*f3bb54f3SPatrick Mooney for (rem = pollstate_contenders; rem != NULL; rem = next) { 2343*f3bb54f3SPatrick Mooney int i, j; 2344*f3bb54f3SPatrick Mooney next = rem->ps_contend_nextp; 2345*f3bb54f3SPatrick Mooney 2346*f3bb54f3SPatrick Mooney /* Is this pollstate holding the pollcache of interest? */ 2347*f3bb54f3SPatrick Mooney for (i = 0; i < rem->ps_depth; i++) { 2348*f3bb54f3SPatrick Mooney if (rem->ps_pc_stack[i] != desired_pc) { 2349*f3bb54f3SPatrick Mooney continue; 2350*f3bb54f3SPatrick Mooney } 2351*f3bb54f3SPatrick Mooney 2352*f3bb54f3SPatrick Mooney /* 2353*f3bb54f3SPatrick Mooney * The remote pollstate holds the pollcache lock we 2354*f3bb54f3SPatrick Mooney * desire. If it is waiting on a pollcache we hold, 2355*f3bb54f3SPatrick Mooney * then we can report the obvious deadlock. 2356*f3bb54f3SPatrick Mooney */ 2357*f3bb54f3SPatrick Mooney ASSERT(rem->ps_contend_pc != NULL); 2358*f3bb54f3SPatrick Mooney for (j = 0; j < ps->ps_depth; j++) { 2359*f3bb54f3SPatrick Mooney if (rem->ps_contend_pc == ps->ps_pc_stack[j]) { 2360*f3bb54f3SPatrick Mooney rem->ps_flags |= POLLSTATE_STALEMATE; 2361*f3bb54f3SPatrick Mooney result = -1; 2362*f3bb54f3SPatrick Mooney goto out; 2363*f3bb54f3SPatrick Mooney } 2364*f3bb54f3SPatrick Mooney } 2365*f3bb54f3SPatrick Mooney 2366*f3bb54f3SPatrick Mooney /* 2367*f3bb54f3SPatrick Mooney * The remote pollstate is not blocking on a pollcache 2368*f3bb54f3SPatrick Mooney * which would deadlock against us. That pollcache 2369*f3bb54f3SPatrick Mooney * may, however, be held by a pollstate which would 2370*f3bb54f3SPatrick Mooney * result in a deadlock. 2371*f3bb54f3SPatrick Mooney * 2372*f3bb54f3SPatrick Mooney * To detect such a condition, we continue walking 2373*f3bb54f3SPatrick Mooney * through the list using the pollcache blocking the 2374*f3bb54f3SPatrick Mooney * remote thread as our new search target. 2375*f3bb54f3SPatrick Mooney * 2376*f3bb54f3SPatrick Mooney * Return to the front of pollstate_contenders since it 2377*f3bb54f3SPatrick Mooney * is not ordered to guarantee complete dependency 2378*f3bb54f3SPatrick Mooney * traversal. The below depth tracking places an upper 2379*f3bb54f3SPatrick Mooney * bound on iterations. 2380*f3bb54f3SPatrick Mooney */ 2381*f3bb54f3SPatrick Mooney desired_pc = rem->ps_contend_pc; 2382*f3bb54f3SPatrick Mooney next = pollstate_contenders; 2383*f3bb54f3SPatrick Mooney 2384*f3bb54f3SPatrick Mooney /* 2385*f3bb54f3SPatrick Mooney * The recursion depth of the remote pollstate is used 2386*f3bb54f3SPatrick Mooney * to calculate a final depth for the local /dev/poll 2387*f3bb54f3SPatrick Mooney * recursion, since those locks will be acquired 2388*f3bb54f3SPatrick Mooney * eventually. If that value exceeds the defined 2389*f3bb54f3SPatrick Mooney * limit, we can report the failure now instead of 2390*f3bb54f3SPatrick Mooney * recursing to that failure depth. 2391*f3bb54f3SPatrick Mooney */ 2392*f3bb54f3SPatrick Mooney depth_total += (rem->ps_depth - i); 2393*f3bb54f3SPatrick Mooney if (depth_total >= POLLMAXDEPTH) { 2394*f3bb54f3SPatrick Mooney result = -1; 2395*f3bb54f3SPatrick Mooney goto out; 2396*f3bb54f3SPatrick Mooney } 2397*f3bb54f3SPatrick Mooney } 2398*f3bb54f3SPatrick Mooney } 2399*f3bb54f3SPatrick Mooney 2400*f3bb54f3SPatrick Mooney /* 2401*f3bb54f3SPatrick Mooney * No deadlock partner was found. The only course of action is to 2402*f3bb54f3SPatrick Mooney * record ourself as a contended pollstate and wait for the pollcache 2403*f3bb54f3SPatrick Mooney * mutex to become available. 2404*f3bb54f3SPatrick Mooney */ 2405*f3bb54f3SPatrick Mooney ps->ps_contend_pc = pcp; 2406*f3bb54f3SPatrick Mooney ps->ps_contend_nextp = pollstate_contenders; 2407*f3bb54f3SPatrick Mooney ps->ps_contend_pnextp = &pollstate_contenders; 2408*f3bb54f3SPatrick Mooney if (pollstate_contenders != NULL) { 2409*f3bb54f3SPatrick Mooney pollstate_contenders->ps_contend_pnextp = 2410*f3bb54f3SPatrick Mooney &ps->ps_contend_nextp; 2411*f3bb54f3SPatrick Mooney } 2412*f3bb54f3SPatrick Mooney pollstate_contenders = ps; 2413*f3bb54f3SPatrick Mooney 2414*f3bb54f3SPatrick Mooney mutex_exit(&pollstate_contenders_lock); 2415*f3bb54f3SPatrick Mooney mutex_enter(&pcp->pc_lock); 2416*f3bb54f3SPatrick Mooney mutex_enter(&pollstate_contenders_lock); 2417*f3bb54f3SPatrick Mooney 2418*f3bb54f3SPatrick Mooney /* 2419*f3bb54f3SPatrick Mooney * Our acquisition of the pollcache mutex may be due to another thread 2420*f3bb54f3SPatrick Mooney * giving up in the face of deadlock with us. If that is the case, 2421*f3bb54f3SPatrick Mooney * we too should report the failure. 2422*f3bb54f3SPatrick Mooney */ 2423*f3bb54f3SPatrick Mooney if ((ps->ps_flags & POLLSTATE_STALEMATE) != 0) { 2424*f3bb54f3SPatrick Mooney result = -1; 2425*f3bb54f3SPatrick Mooney ps->ps_flags &= ~POLLSTATE_STALEMATE; 2426*f3bb54f3SPatrick Mooney mutex_exit(&pcp->pc_lock); 2427*f3bb54f3SPatrick Mooney } 2428*f3bb54f3SPatrick Mooney 2429*f3bb54f3SPatrick Mooney /* Remove ourself from the contenders list. */ 2430*f3bb54f3SPatrick Mooney if (ps->ps_contend_nextp != NULL) { 2431*f3bb54f3SPatrick Mooney ps->ps_contend_nextp->ps_contend_pnextp = 2432*f3bb54f3SPatrick Mooney ps->ps_contend_pnextp; 2433*f3bb54f3SPatrick Mooney } 2434*f3bb54f3SPatrick Mooney *ps->ps_contend_pnextp = ps->ps_contend_nextp; 2435*f3bb54f3SPatrick Mooney ps->ps_contend_pc = NULL; 2436*f3bb54f3SPatrick Mooney ps->ps_contend_nextp = NULL; 2437*f3bb54f3SPatrick Mooney ps->ps_contend_pnextp = NULL; 2438*f3bb54f3SPatrick Mooney 2439*f3bb54f3SPatrick Mooney out: 2440*f3bb54f3SPatrick Mooney mutex_exit(&pollstate_contenders_lock); 2441*f3bb54f3SPatrick Mooney return (result); 2442*f3bb54f3SPatrick Mooney } 2443*f3bb54f3SPatrick Mooney 2444*f3bb54f3SPatrick Mooney int 2445*f3bb54f3SPatrick Mooney pollstate_enter(pollcache_t *pcp) 2446*f3bb54f3SPatrick Mooney { 2447*f3bb54f3SPatrick Mooney pollstate_t *ps = curthread->t_pollstate; 2448*f3bb54f3SPatrick Mooney int i; 2449*f3bb54f3SPatrick Mooney 2450*f3bb54f3SPatrick Mooney if (ps == NULL) { 2451*f3bb54f3SPatrick Mooney /* 2452*f3bb54f3SPatrick Mooney * The thread pollstate may not be initialized if VOP_POLL is 2453*f3bb54f3SPatrick Mooney * called on a recursion-enabled /dev/poll handle from outside 2454*f3bb54f3SPatrick Mooney * the poll() or /dev/poll codepaths. 2455*f3bb54f3SPatrick Mooney */ 2456*f3bb54f3SPatrick Mooney return (PSE_FAIL_POLLSTATE); 2457*f3bb54f3SPatrick Mooney } 2458*f3bb54f3SPatrick Mooney if (ps->ps_depth >= POLLMAXDEPTH) { 2459*f3bb54f3SPatrick Mooney return (PSE_FAIL_DEPTH); 2460*f3bb54f3SPatrick Mooney } 2461*f3bb54f3SPatrick Mooney /* 2462*f3bb54f3SPatrick Mooney * Check the desired pollcache against pollcaches we already have 2463*f3bb54f3SPatrick Mooney * locked. Such a loop is the most simple deadlock scenario. 2464*f3bb54f3SPatrick Mooney */ 2465*f3bb54f3SPatrick Mooney for (i = 0; i < ps->ps_depth; i++) { 2466*f3bb54f3SPatrick Mooney if (ps->ps_pc_stack[i] == pcp) { 2467*f3bb54f3SPatrick Mooney return (PSE_FAIL_LOOP); 2468*f3bb54f3SPatrick Mooney } 2469*f3bb54f3SPatrick Mooney } 2470*f3bb54f3SPatrick Mooney ASSERT(ps->ps_pc_stack[i] == NULL); 2471*f3bb54f3SPatrick Mooney 2472*f3bb54f3SPatrick Mooney if (ps->ps_depth == 0) { 2473*f3bb54f3SPatrick Mooney /* Locking initial the pollcache requires no caution */ 2474*f3bb54f3SPatrick Mooney mutex_enter(&pcp->pc_lock); 2475*f3bb54f3SPatrick Mooney } else if (mutex_tryenter(&pcp->pc_lock) == 0) { 2476*f3bb54f3SPatrick Mooney if (pollstate_contend(ps, pcp) != 0) { 2477*f3bb54f3SPatrick Mooney /* This pollcache cannot safely be locked. */ 2478*f3bb54f3SPatrick Mooney return (PSE_FAIL_DEADLOCK); 2479*f3bb54f3SPatrick Mooney } 2480*f3bb54f3SPatrick Mooney } 2481*f3bb54f3SPatrick Mooney 2482*f3bb54f3SPatrick Mooney ps->ps_pc_stack[ps->ps_depth++] = pcp; 2483*f3bb54f3SPatrick Mooney return (PSE_SUCCESS); 2484*f3bb54f3SPatrick Mooney } 2485*f3bb54f3SPatrick Mooney 2486*f3bb54f3SPatrick Mooney void 2487*f3bb54f3SPatrick Mooney pollstate_exit(pollcache_t *pcp) 2488*f3bb54f3SPatrick Mooney { 2489*f3bb54f3SPatrick Mooney pollstate_t *ps = curthread->t_pollstate; 2490*f3bb54f3SPatrick Mooney 2491*f3bb54f3SPatrick Mooney VERIFY(ps != NULL); 2492*f3bb54f3SPatrick Mooney VERIFY(ps->ps_pc_stack[ps->ps_depth - 1] == pcp); 2493*f3bb54f3SPatrick Mooney 2494*f3bb54f3SPatrick Mooney mutex_exit(&pcp->pc_lock); 2495*f3bb54f3SPatrick Mooney ps->ps_pc_stack[--ps->ps_depth] = NULL; 2496*f3bb54f3SPatrick Mooney VERIFY(ps->ps_depth >= 0); 2497*f3bb54f3SPatrick Mooney } 2498*f3bb54f3SPatrick Mooney 2499*f3bb54f3SPatrick Mooney 25007c478bd9Sstevel@tonic-gate /* 25017c478bd9Sstevel@tonic-gate * We are holding the appropriate uf_lock entering this routine. 25027c478bd9Sstevel@tonic-gate * Bump up the ps_busy count to prevent the thread from exiting. 25037c478bd9Sstevel@tonic-gate */ 25047c478bd9Sstevel@tonic-gate void 25057c478bd9Sstevel@tonic-gate pollblockexit(fpollinfo_t *fpip) 25067c478bd9Sstevel@tonic-gate { 25077c478bd9Sstevel@tonic-gate for (; fpip; fpip = fpip->fp_next) { 25087c478bd9Sstevel@tonic-gate pollcache_t *pcp = fpip->fp_thread->t_pollstate->ps_pcache; 25097c478bd9Sstevel@tonic-gate 25107c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_no_exit); 25117c478bd9Sstevel@tonic-gate pcp->pc_busy++; /* prevents exit()'s */ 25127c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_no_exit); 25137c478bd9Sstevel@tonic-gate } 25147c478bd9Sstevel@tonic-gate } 25157c478bd9Sstevel@tonic-gate 25167c478bd9Sstevel@tonic-gate /* 25177c478bd9Sstevel@tonic-gate * Complete phase 2 of cached poll fd cleanup. Call pcache_clean_entry to mark 25187c478bd9Sstevel@tonic-gate * the pcacheset events field POLLCLOSED to force the next poll() to remove 25197c478bd9Sstevel@tonic-gate * this cache entry. We can't clean the polldat entry clean up here because 25207c478bd9Sstevel@tonic-gate * lwp block in poll() needs the info to return. Wakeup anyone blocked in 25217c478bd9Sstevel@tonic-gate * poll and let exiting lwp go. No lock is help upon entry. So it's OK for 25227c478bd9Sstevel@tonic-gate * pcache_clean_entry to call pollwakeup(). 25237c478bd9Sstevel@tonic-gate */ 25247c478bd9Sstevel@tonic-gate void 25257c478bd9Sstevel@tonic-gate pollcacheclean(fpollinfo_t *fip, int fd) 25267c478bd9Sstevel@tonic-gate { 25277c478bd9Sstevel@tonic-gate struct fpollinfo *fpip, *fpip2; 25287c478bd9Sstevel@tonic-gate 25297c478bd9Sstevel@tonic-gate fpip = fip; 25307c478bd9Sstevel@tonic-gate while (fpip) { 25317c478bd9Sstevel@tonic-gate pollstate_t *ps = fpip->fp_thread->t_pollstate; 25327c478bd9Sstevel@tonic-gate pollcache_t *pcp = ps->ps_pcache; 25337c478bd9Sstevel@tonic-gate 25347c478bd9Sstevel@tonic-gate mutex_enter(&ps->ps_lock); 25357c478bd9Sstevel@tonic-gate pcache_clean_entry(ps, fd); 25367c478bd9Sstevel@tonic-gate mutex_exit(&ps->ps_lock); 25377c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_no_exit); 25387c478bd9Sstevel@tonic-gate pcp->pc_busy--; 25397c478bd9Sstevel@tonic-gate if (pcp->pc_busy == 0) { 25407c478bd9Sstevel@tonic-gate /* 25417c478bd9Sstevel@tonic-gate * Wakeup the thread waiting in 25427c478bd9Sstevel@tonic-gate * thread_exit(). 25437c478bd9Sstevel@tonic-gate */ 25447c478bd9Sstevel@tonic-gate cv_signal(&pcp->pc_busy_cv); 25457c478bd9Sstevel@tonic-gate } 25467c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_no_exit); 25477c478bd9Sstevel@tonic-gate 25487c478bd9Sstevel@tonic-gate fpip2 = fpip; 25497c478bd9Sstevel@tonic-gate fpip = fpip->fp_next; 25507c478bd9Sstevel@tonic-gate kmem_free(fpip2, sizeof (fpollinfo_t)); 25517c478bd9Sstevel@tonic-gate } 25527c478bd9Sstevel@tonic-gate } 25537c478bd9Sstevel@tonic-gate 25547c478bd9Sstevel@tonic-gate /* 25557c478bd9Sstevel@tonic-gate * one of the cache line's counter is wrapping around. Reset all cache line 25567c478bd9Sstevel@tonic-gate * counters to zero except one. This is simplistic, but probably works 25577c478bd9Sstevel@tonic-gate * effectively. 25587c478bd9Sstevel@tonic-gate */ 25597c478bd9Sstevel@tonic-gate void 25607c478bd9Sstevel@tonic-gate pcacheset_reset_count(pollstate_t *ps, int index) 25617c478bd9Sstevel@tonic-gate { 25627c478bd9Sstevel@tonic-gate int i; 25637c478bd9Sstevel@tonic-gate 25647c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ps->ps_lock)); 25657c478bd9Sstevel@tonic-gate for (i = 0; i < ps->ps_nsets; i++) { 25667c478bd9Sstevel@tonic-gate if (ps->ps_pcacheset[i].pcs_pollfd != NULL) { 25677c478bd9Sstevel@tonic-gate ps->ps_pcacheset[i].pcs_count = 0; 25687c478bd9Sstevel@tonic-gate } 25697c478bd9Sstevel@tonic-gate } 25707c478bd9Sstevel@tonic-gate ps->ps_pcacheset[index].pcs_count = 1; 25717c478bd9Sstevel@tonic-gate } 25727c478bd9Sstevel@tonic-gate 25737c478bd9Sstevel@tonic-gate /* 25747c478bd9Sstevel@tonic-gate * this routine implements poll cache list replacement policy. 25757c478bd9Sstevel@tonic-gate * It is currently choose the "least used". 25767c478bd9Sstevel@tonic-gate */ 25777c478bd9Sstevel@tonic-gate int 25787c478bd9Sstevel@tonic-gate pcacheset_replace(pollstate_t *ps) 25797c478bd9Sstevel@tonic-gate { 25807c478bd9Sstevel@tonic-gate int i; 25817c478bd9Sstevel@tonic-gate int index = 0; 25827c478bd9Sstevel@tonic-gate 25837c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ps->ps_lock)); 25847c478bd9Sstevel@tonic-gate for (i = 1; i < ps->ps_nsets; i++) { 25857c478bd9Sstevel@tonic-gate if (ps->ps_pcacheset[index].pcs_count > 25867c478bd9Sstevel@tonic-gate ps->ps_pcacheset[i].pcs_count) { 25877c478bd9Sstevel@tonic-gate index = i; 25887c478bd9Sstevel@tonic-gate } 25897c478bd9Sstevel@tonic-gate } 25907c478bd9Sstevel@tonic-gate ps->ps_pcacheset[index].pcs_count = 0; 25917c478bd9Sstevel@tonic-gate return (index); 25927c478bd9Sstevel@tonic-gate } 25937c478bd9Sstevel@tonic-gate 25947c478bd9Sstevel@tonic-gate /* 25957c478bd9Sstevel@tonic-gate * this routine is called by strclose to remove remaining polldat struct on 25967c478bd9Sstevel@tonic-gate * the pollhead list of the device being closed. There are two reasons as why 25977c478bd9Sstevel@tonic-gate * the polldat structures still remain on the pollhead list: 25987c478bd9Sstevel@tonic-gate * 25997c478bd9Sstevel@tonic-gate * (1) The layered device(e.g.the console driver). 26007c478bd9Sstevel@tonic-gate * In this case, the existence of a polldat implies that the thread putting 26017c478bd9Sstevel@tonic-gate * the polldat on this list has not exited yet. Before the thread exits, it 26027c478bd9Sstevel@tonic-gate * will have to hold this pollhead lock to remove the polldat. So holding the 26037c478bd9Sstevel@tonic-gate * pollhead lock here effectively prevents the thread which put the polldat 26047c478bd9Sstevel@tonic-gate * on this list from exiting. 26057c478bd9Sstevel@tonic-gate * 26067c478bd9Sstevel@tonic-gate * (2) /dev/poll. 26077c478bd9Sstevel@tonic-gate * When a polled fd is cached in /dev/poll, its polldat will remain on the 26087c478bd9Sstevel@tonic-gate * pollhead list if the process has not done a POLLREMOVE before closing the 26097c478bd9Sstevel@tonic-gate * polled fd. We just unlink it here. 26107c478bd9Sstevel@tonic-gate */ 26117c478bd9Sstevel@tonic-gate void 26127c478bd9Sstevel@tonic-gate pollhead_clean(pollhead_t *php) 26137c478bd9Sstevel@tonic-gate { 26147c478bd9Sstevel@tonic-gate polldat_t *pdp; 26157c478bd9Sstevel@tonic-gate 26167c478bd9Sstevel@tonic-gate /* 26177c478bd9Sstevel@tonic-gate * In case(1), while we must prevent the thread in question from 26187c478bd9Sstevel@tonic-gate * exiting, we must also obey the proper locking order, i.e. 26197c478bd9Sstevel@tonic-gate * (ps_lock -> phlock). 26207c478bd9Sstevel@tonic-gate */ 26217c478bd9Sstevel@tonic-gate PH_ENTER(php); 26227c478bd9Sstevel@tonic-gate while (php->ph_list != NULL) { 26237c478bd9Sstevel@tonic-gate pollstate_t *ps; 26247c478bd9Sstevel@tonic-gate pollcache_t *pcp; 26257c478bd9Sstevel@tonic-gate 26267c478bd9Sstevel@tonic-gate pdp = php->ph_list; 26277c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_php == php); 26287c478bd9Sstevel@tonic-gate if (pdp->pd_thread == NULL) { 26297c478bd9Sstevel@tonic-gate /* 26307c478bd9Sstevel@tonic-gate * This is case(2). Since the ph_lock is sufficient 26317c478bd9Sstevel@tonic-gate * to synchronize this lwp with any other /dev/poll 26327c478bd9Sstevel@tonic-gate * lwp, just unlink the polldat. 26337c478bd9Sstevel@tonic-gate */ 26347c478bd9Sstevel@tonic-gate php->ph_list = pdp->pd_next; 26357c478bd9Sstevel@tonic-gate pdp->pd_php = NULL; 26367c478bd9Sstevel@tonic-gate pdp->pd_next = NULL; 26377c478bd9Sstevel@tonic-gate continue; 26387c478bd9Sstevel@tonic-gate } 26397c478bd9Sstevel@tonic-gate ps = pdp->pd_thread->t_pollstate; 26407c478bd9Sstevel@tonic-gate ASSERT(ps != NULL); 26417c478bd9Sstevel@tonic-gate pcp = pdp->pd_pcache; 26427c478bd9Sstevel@tonic-gate ASSERT(pcp != NULL); 26437c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_no_exit); 26447c478bd9Sstevel@tonic-gate pcp->pc_busy++; /* prevents exit()'s */ 26457c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_no_exit); 26467c478bd9Sstevel@tonic-gate /* 26477c478bd9Sstevel@tonic-gate * Now get the locks in proper order to avoid deadlock. 26487c478bd9Sstevel@tonic-gate */ 26497c478bd9Sstevel@tonic-gate PH_EXIT(php); 26507c478bd9Sstevel@tonic-gate mutex_enter(&ps->ps_lock); 26517c478bd9Sstevel@tonic-gate /* 26527c478bd9Sstevel@tonic-gate * while we dropped the pollhead lock, the element could be 26537c478bd9Sstevel@tonic-gate * taken off the list already. 26547c478bd9Sstevel@tonic-gate */ 26557c478bd9Sstevel@tonic-gate PH_ENTER(php); 26567c478bd9Sstevel@tonic-gate if (pdp->pd_php == php) { 26577c478bd9Sstevel@tonic-gate ASSERT(pdp == php->ph_list); 26587c478bd9Sstevel@tonic-gate php->ph_list = pdp->pd_next; 26597c478bd9Sstevel@tonic-gate pdp->pd_php = NULL; 26607c478bd9Sstevel@tonic-gate pdp->pd_next = NULL; 26617c478bd9Sstevel@tonic-gate } 26627c478bd9Sstevel@tonic-gate PH_EXIT(php); 26637c478bd9Sstevel@tonic-gate mutex_exit(&ps->ps_lock); 26647c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_no_exit); 26657c478bd9Sstevel@tonic-gate pcp->pc_busy--; 26667c478bd9Sstevel@tonic-gate if (pcp->pc_busy == 0) { 26677c478bd9Sstevel@tonic-gate /* 26687c478bd9Sstevel@tonic-gate * Wakeup the thread waiting in 26697c478bd9Sstevel@tonic-gate * thread_exit(). 26707c478bd9Sstevel@tonic-gate */ 26717c478bd9Sstevel@tonic-gate cv_signal(&pcp->pc_busy_cv); 26727c478bd9Sstevel@tonic-gate } 26737c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_no_exit); 26747c478bd9Sstevel@tonic-gate PH_ENTER(php); 26757c478bd9Sstevel@tonic-gate } 26767c478bd9Sstevel@tonic-gate PH_EXIT(php); 26777c478bd9Sstevel@tonic-gate } 26787c478bd9Sstevel@tonic-gate 26797c478bd9Sstevel@tonic-gate /* 26807c478bd9Sstevel@tonic-gate * The remove_list is called to cleanup a partially cached 'current' list or 26817c478bd9Sstevel@tonic-gate * to remove a partial list which is no longer cached. The flag value of 1 26827c478bd9Sstevel@tonic-gate * indicates the second case. 26837c478bd9Sstevel@tonic-gate */ 26847c478bd9Sstevel@tonic-gate void 26857c478bd9Sstevel@tonic-gate pcacheset_remove_list(pollstate_t *ps, pollfd_t *pollfdp, int start, int end, 26867c478bd9Sstevel@tonic-gate int cacheindex, int flag) 26877c478bd9Sstevel@tonic-gate { 26887c478bd9Sstevel@tonic-gate int i; 26897c478bd9Sstevel@tonic-gate 26907c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ps->ps_lock)); 26917c478bd9Sstevel@tonic-gate for (i = start; i < end; i++) { 26927c478bd9Sstevel@tonic-gate if ((pollfdp[i].fd >= 0) && 26937c478bd9Sstevel@tonic-gate (flag || !(pollfdp[i].revents & POLLNVAL))) { 26947c478bd9Sstevel@tonic-gate if (pcache_delete_fd(ps, pollfdp[i].fd, i, cacheindex, 26957c478bd9Sstevel@tonic-gate (uint_t)pollfdp[i].events)) { 26967c478bd9Sstevel@tonic-gate int j; 26977c478bd9Sstevel@tonic-gate int fd = pollfdp[i].fd; 26987c478bd9Sstevel@tonic-gate 26997c478bd9Sstevel@tonic-gate for (j = i + 1; j < end; j++) { 27007c478bd9Sstevel@tonic-gate if (pollfdp[j].fd == fd) { 27017c478bd9Sstevel@tonic-gate pcache_update_xref( 27027c478bd9Sstevel@tonic-gate ps->ps_pcache, fd, 27037c478bd9Sstevel@tonic-gate (ssize_t)j, cacheindex); 27047c478bd9Sstevel@tonic-gate break; 27057c478bd9Sstevel@tonic-gate } 27067c478bd9Sstevel@tonic-gate } 27077c478bd9Sstevel@tonic-gate ASSERT(j <= end); 27087c478bd9Sstevel@tonic-gate } 27097c478bd9Sstevel@tonic-gate } 27107c478bd9Sstevel@tonic-gate } 27117c478bd9Sstevel@tonic-gate } 27127c478bd9Sstevel@tonic-gate 27137c478bd9Sstevel@tonic-gate #ifdef DEBUG 27147c478bd9Sstevel@tonic-gate 27157c478bd9Sstevel@tonic-gate #include<sys/strsubr.h> 27167c478bd9Sstevel@tonic-gate /* 27177c478bd9Sstevel@tonic-gate * make sure curthread is not on anyone's pollhead list any more. 27187c478bd9Sstevel@tonic-gate */ 27197c478bd9Sstevel@tonic-gate static void 27207c478bd9Sstevel@tonic-gate pollcheckphlist() 27217c478bd9Sstevel@tonic-gate { 27227c478bd9Sstevel@tonic-gate int i; 27237c478bd9Sstevel@tonic-gate file_t *fp; 27247c478bd9Sstevel@tonic-gate uf_entry_t *ufp; 27257c478bd9Sstevel@tonic-gate uf_info_t *fip = P_FINFO(curproc); 27267c478bd9Sstevel@tonic-gate struct stdata *stp; 27277c478bd9Sstevel@tonic-gate polldat_t *pdp; 27287c478bd9Sstevel@tonic-gate 27297c478bd9Sstevel@tonic-gate mutex_enter(&fip->fi_lock); 27307c478bd9Sstevel@tonic-gate for (i = 0; i < fip->fi_nfiles; i++) { 27317c478bd9Sstevel@tonic-gate UF_ENTER(ufp, fip, i); 27327c478bd9Sstevel@tonic-gate if ((fp = ufp->uf_file) != NULL) { 27337c478bd9Sstevel@tonic-gate if ((stp = fp->f_vnode->v_stream) != NULL) { 27347c478bd9Sstevel@tonic-gate PH_ENTER(&stp->sd_pollist); 27357c478bd9Sstevel@tonic-gate pdp = stp->sd_pollist.ph_list; 27367c478bd9Sstevel@tonic-gate while (pdp) { 27377c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_thread != curthread); 27387c478bd9Sstevel@tonic-gate pdp = pdp->pd_next; 27397c478bd9Sstevel@tonic-gate } 27407c478bd9Sstevel@tonic-gate PH_EXIT(&stp->sd_pollist); 27417c478bd9Sstevel@tonic-gate } 27427c478bd9Sstevel@tonic-gate } 27437c478bd9Sstevel@tonic-gate UF_EXIT(ufp); 27447c478bd9Sstevel@tonic-gate } 27457c478bd9Sstevel@tonic-gate mutex_exit(&fip->fi_lock); 27467c478bd9Sstevel@tonic-gate } 27477c478bd9Sstevel@tonic-gate 27487c478bd9Sstevel@tonic-gate /* 27497c478bd9Sstevel@tonic-gate * for resolved set poll list, the xref info in the pcache should be 27507c478bd9Sstevel@tonic-gate * consistent with this poll list. 27517c478bd9Sstevel@tonic-gate */ 27527c478bd9Sstevel@tonic-gate static int 27537c478bd9Sstevel@tonic-gate pollcheckxref(pollstate_t *ps, int cacheindex) 27547c478bd9Sstevel@tonic-gate { 27557c478bd9Sstevel@tonic-gate pollfd_t *pollfdp = ps->ps_pcacheset[cacheindex].pcs_pollfd; 27567c478bd9Sstevel@tonic-gate pollcache_t *pcp = ps->ps_pcache; 27577c478bd9Sstevel@tonic-gate polldat_t *pdp; 27587c478bd9Sstevel@tonic-gate int i; 27597c478bd9Sstevel@tonic-gate xref_t *refp; 27607c478bd9Sstevel@tonic-gate 27617c478bd9Sstevel@tonic-gate for (i = 0; i < ps->ps_pcacheset[cacheindex].pcs_nfds; i++) { 27627c478bd9Sstevel@tonic-gate if (pollfdp[i].fd < 0) { 27637c478bd9Sstevel@tonic-gate continue; 27647c478bd9Sstevel@tonic-gate } 27657c478bd9Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, pollfdp[i].fd); 27667c478bd9Sstevel@tonic-gate ASSERT(pdp != NULL); 27677c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_ref != NULL); 27687c478bd9Sstevel@tonic-gate refp = &pdp->pd_ref[cacheindex]; 27697c478bd9Sstevel@tonic-gate if (refp->xf_position >= 0) { 27707c478bd9Sstevel@tonic-gate ASSERT(refp->xf_refcnt >= 1); 27717c478bd9Sstevel@tonic-gate ASSERT(pollfdp[refp->xf_position].fd == pdp->pd_fd); 27727c478bd9Sstevel@tonic-gate if (refp->xf_refcnt > 1) { 27737c478bd9Sstevel@tonic-gate int j; 27747c478bd9Sstevel@tonic-gate int count = 0; 27757c478bd9Sstevel@tonic-gate 27767c478bd9Sstevel@tonic-gate for (j = refp->xf_position; 27777c478bd9Sstevel@tonic-gate j < ps->ps_pcacheset[cacheindex].pcs_nfds; 27787c478bd9Sstevel@tonic-gate j++) { 27797c478bd9Sstevel@tonic-gate if (pollfdp[j].fd == pdp->pd_fd) { 27807c478bd9Sstevel@tonic-gate count++; 27817c478bd9Sstevel@tonic-gate } 27827c478bd9Sstevel@tonic-gate } 27837c478bd9Sstevel@tonic-gate ASSERT(count == refp->xf_refcnt); 27847c478bd9Sstevel@tonic-gate } 27857c478bd9Sstevel@tonic-gate } 27867c478bd9Sstevel@tonic-gate } 27877c478bd9Sstevel@tonic-gate return (1); 27887c478bd9Sstevel@tonic-gate } 27897c478bd9Sstevel@tonic-gate 27907c478bd9Sstevel@tonic-gate /* 27917c478bd9Sstevel@tonic-gate * For every cached pollfd, its polldat struct should be consistent with 27927c478bd9Sstevel@tonic-gate * what is in the pcacheset lists. 27937c478bd9Sstevel@tonic-gate */ 27947c478bd9Sstevel@tonic-gate static void 27957c478bd9Sstevel@tonic-gate checkpolldat(pollstate_t *ps) 27967c478bd9Sstevel@tonic-gate { 27977c478bd9Sstevel@tonic-gate pollcache_t *pcp = ps->ps_pcache; 27987c478bd9Sstevel@tonic-gate polldat_t **hashtbl; 27997c478bd9Sstevel@tonic-gate int i; 28007c478bd9Sstevel@tonic-gate 28017c478bd9Sstevel@tonic-gate hashtbl = pcp->pc_hash; 28027c478bd9Sstevel@tonic-gate for (i = 0; i < pcp->pc_hashsize; i++) { 28037c478bd9Sstevel@tonic-gate polldat_t *pdp; 28047c478bd9Sstevel@tonic-gate 28057c478bd9Sstevel@tonic-gate for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) { 28067c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_ref != NULL); 28077c478bd9Sstevel@tonic-gate if (pdp->pd_count > 0) { 28087c478bd9Sstevel@tonic-gate xref_t *refp; 28097c478bd9Sstevel@tonic-gate int j; 28107c478bd9Sstevel@tonic-gate pollcacheset_t *pcsp; 28117c478bd9Sstevel@tonic-gate pollfd_t *pollfd; 28127c478bd9Sstevel@tonic-gate 28137c478bd9Sstevel@tonic-gate for (j = 0; j < ps->ps_nsets; j++) { 28147c478bd9Sstevel@tonic-gate refp = &pdp->pd_ref[j]; 28157c478bd9Sstevel@tonic-gate if (refp->xf_refcnt > 0) { 28167c478bd9Sstevel@tonic-gate pcsp = &ps->ps_pcacheset[j]; 28177c478bd9Sstevel@tonic-gate ASSERT(refp->xf_position < pcsp->pcs_nfds); 28187c478bd9Sstevel@tonic-gate pollfd = pcsp->pcs_pollfd; 28197c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_fd == pollfd[refp->xf_position].fd); 28207c478bd9Sstevel@tonic-gate } 28217c478bd9Sstevel@tonic-gate } 28227c478bd9Sstevel@tonic-gate } 28237c478bd9Sstevel@tonic-gate } 28247c478bd9Sstevel@tonic-gate } 28257c478bd9Sstevel@tonic-gate } 28267c478bd9Sstevel@tonic-gate 28277c478bd9Sstevel@tonic-gate /* 28287c478bd9Sstevel@tonic-gate * every wfd element on ph_list must have a corresponding fpollinfo on the 28297c478bd9Sstevel@tonic-gate * uf_fpollinfo list. This is a variation of infpollinfo() w/o holding locks. 28307c478bd9Sstevel@tonic-gate */ 28317c478bd9Sstevel@tonic-gate void 28327c478bd9Sstevel@tonic-gate checkwfdlist(vnode_t *vp, fpollinfo_t *fpip) 28337c478bd9Sstevel@tonic-gate { 28347c478bd9Sstevel@tonic-gate stdata_t *stp; 28357c478bd9Sstevel@tonic-gate polldat_t *pdp; 28367c478bd9Sstevel@tonic-gate fpollinfo_t *fpip2; 28377c478bd9Sstevel@tonic-gate 28387c478bd9Sstevel@tonic-gate if ((stp = vp->v_stream) == NULL) { 28397c478bd9Sstevel@tonic-gate return; 28407c478bd9Sstevel@tonic-gate } 28417c478bd9Sstevel@tonic-gate PH_ENTER(&stp->sd_pollist); 28427c478bd9Sstevel@tonic-gate for (pdp = stp->sd_pollist.ph_list; pdp; pdp = pdp->pd_next) { 2843d61dd0c1SPrakash Sangappa if (pdp->pd_thread != NULL && 2844d61dd0c1SPrakash Sangappa pdp->pd_thread->t_procp == curthread->t_procp) { 28457c478bd9Sstevel@tonic-gate for (fpip2 = fpip; fpip2; fpip2 = fpip2->fp_next) { 28467c478bd9Sstevel@tonic-gate if (pdp->pd_thread == fpip2->fp_thread) { 28477c478bd9Sstevel@tonic-gate break; 28487c478bd9Sstevel@tonic-gate } 28497c478bd9Sstevel@tonic-gate } 28507c478bd9Sstevel@tonic-gate ASSERT(fpip2 != NULL); 28517c478bd9Sstevel@tonic-gate } 28527c478bd9Sstevel@tonic-gate } 28537c478bd9Sstevel@tonic-gate PH_EXIT(&stp->sd_pollist); 28547c478bd9Sstevel@tonic-gate } 28557c478bd9Sstevel@tonic-gate 28567c478bd9Sstevel@tonic-gate /* 28577c478bd9Sstevel@tonic-gate * For each cached fd whose bit is not set in bitmap, its revents field in 28587c478bd9Sstevel@tonic-gate * current poll list should be 0. 28597c478bd9Sstevel@tonic-gate */ 28607c478bd9Sstevel@tonic-gate static int 28617c478bd9Sstevel@tonic-gate pollcheckrevents(pollstate_t *ps, int begin, int end, int cacheindex) 28627c478bd9Sstevel@tonic-gate { 28637c478bd9Sstevel@tonic-gate pollcache_t *pcp = ps->ps_pcache; 28647c478bd9Sstevel@tonic-gate pollfd_t *pollfdp = ps->ps_pollfd; 28657c478bd9Sstevel@tonic-gate int i; 28667c478bd9Sstevel@tonic-gate 28677c478bd9Sstevel@tonic-gate for (i = begin; i < end; i++) { 28687c478bd9Sstevel@tonic-gate polldat_t *pdp; 28697c478bd9Sstevel@tonic-gate 28707c478bd9Sstevel@tonic-gate ASSERT(!BT_TEST(pcp->pc_bitmap, i)); 28717c478bd9Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, i); 28727c478bd9Sstevel@tonic-gate if (pdp && pdp->pd_fp != NULL) { 28737c478bd9Sstevel@tonic-gate xref_t *refp; 28747c478bd9Sstevel@tonic-gate int entry; 28757c478bd9Sstevel@tonic-gate 28767c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_ref != NULL); 28777c478bd9Sstevel@tonic-gate refp = &pdp->pd_ref[cacheindex]; 28787c478bd9Sstevel@tonic-gate if (refp->xf_refcnt == 0) { 28797c478bd9Sstevel@tonic-gate continue; 28807c478bd9Sstevel@tonic-gate } 28817c478bd9Sstevel@tonic-gate entry = refp->xf_position; 28827c478bd9Sstevel@tonic-gate ASSERT(entry >= 0); 28837c478bd9Sstevel@tonic-gate ASSERT(pollfdp[entry].revents == 0); 28847c478bd9Sstevel@tonic-gate if (refp->xf_refcnt > 1) { 28857c478bd9Sstevel@tonic-gate int j; 28867c478bd9Sstevel@tonic-gate 28877c478bd9Sstevel@tonic-gate for (j = entry + 1; j < ps->ps_nfds; j++) { 28887c478bd9Sstevel@tonic-gate if (pollfdp[j].fd == i) { 28897c478bd9Sstevel@tonic-gate ASSERT(pollfdp[j].revents == 0); 28907c478bd9Sstevel@tonic-gate } 28917c478bd9Sstevel@tonic-gate } 28927c478bd9Sstevel@tonic-gate } 28937c478bd9Sstevel@tonic-gate } 28947c478bd9Sstevel@tonic-gate } 28957c478bd9Sstevel@tonic-gate return (1); 28967c478bd9Sstevel@tonic-gate } 28977c478bd9Sstevel@tonic-gate 28987c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 28997c478bd9Sstevel@tonic-gate 29007c478bd9Sstevel@tonic-gate pollcache_t * 29017c478bd9Sstevel@tonic-gate pcache_alloc() 29027c478bd9Sstevel@tonic-gate { 29037c478bd9Sstevel@tonic-gate return (kmem_zalloc(sizeof (pollcache_t), KM_SLEEP)); 29047c478bd9Sstevel@tonic-gate } 29057c478bd9Sstevel@tonic-gate 29067c478bd9Sstevel@tonic-gate void 29077c478bd9Sstevel@tonic-gate pcache_create(pollcache_t *pcp, nfds_t nfds) 29087c478bd9Sstevel@tonic-gate { 29097c478bd9Sstevel@tonic-gate size_t mapsize; 29107c478bd9Sstevel@tonic-gate 29117c478bd9Sstevel@tonic-gate /* 29127c478bd9Sstevel@tonic-gate * allocate enough bits for the poll fd list 29137c478bd9Sstevel@tonic-gate */ 29147c478bd9Sstevel@tonic-gate if ((mapsize = POLLMAPCHUNK) <= nfds) { 29157c478bd9Sstevel@tonic-gate mapsize = (nfds + POLLMAPCHUNK - 1) & ~(POLLMAPCHUNK - 1); 29167c478bd9Sstevel@tonic-gate } 29177c478bd9Sstevel@tonic-gate pcp->pc_bitmap = kmem_zalloc((mapsize / BT_NBIPUL) * sizeof (ulong_t), 29187c478bd9Sstevel@tonic-gate KM_SLEEP); 29197c478bd9Sstevel@tonic-gate pcp->pc_mapsize = mapsize; 29207c478bd9Sstevel@tonic-gate /* 29217c478bd9Sstevel@tonic-gate * The hash size is at least POLLHASHCHUNKSZ. If user polls a large 29227c478bd9Sstevel@tonic-gate * number of fd to start with, allocate a bigger hash table (to the 29237c478bd9Sstevel@tonic-gate * nearest multiple of POLLHASHCHUNKSZ) because dynamically growing a 29247c478bd9Sstevel@tonic-gate * hash table is expensive. 29257c478bd9Sstevel@tonic-gate */ 29267c478bd9Sstevel@tonic-gate if (nfds < POLLHASHCHUNKSZ) { 29277c478bd9Sstevel@tonic-gate pcp->pc_hashsize = POLLHASHCHUNKSZ; 29287c478bd9Sstevel@tonic-gate } else { 29297c478bd9Sstevel@tonic-gate pcp->pc_hashsize = (nfds + POLLHASHCHUNKSZ - 1) & 29307c478bd9Sstevel@tonic-gate ~(POLLHASHCHUNKSZ - 1); 29317c478bd9Sstevel@tonic-gate } 29327c478bd9Sstevel@tonic-gate pcp->pc_hash = kmem_zalloc(pcp->pc_hashsize * sizeof (polldat_t *), 29337c478bd9Sstevel@tonic-gate KM_SLEEP); 29347c478bd9Sstevel@tonic-gate } 29357c478bd9Sstevel@tonic-gate 29367c478bd9Sstevel@tonic-gate void 29377c478bd9Sstevel@tonic-gate pcache_destroy(pollcache_t *pcp) 29387c478bd9Sstevel@tonic-gate { 29397c478bd9Sstevel@tonic-gate polldat_t **hashtbl; 29407c478bd9Sstevel@tonic-gate int i; 29417c478bd9Sstevel@tonic-gate 29427c478bd9Sstevel@tonic-gate hashtbl = pcp->pc_hash; 29437c478bd9Sstevel@tonic-gate for (i = 0; i < pcp->pc_hashsize; i++) { 29447c478bd9Sstevel@tonic-gate if (hashtbl[i] != NULL) { 29457c478bd9Sstevel@tonic-gate polldat_t *pdp, *pdp2; 29467c478bd9Sstevel@tonic-gate 29477c478bd9Sstevel@tonic-gate pdp = hashtbl[i]; 29487c478bd9Sstevel@tonic-gate while (pdp != NULL) { 29497c478bd9Sstevel@tonic-gate pdp2 = pdp->pd_hashnext; 29507c478bd9Sstevel@tonic-gate if (pdp->pd_ref != NULL) { 29517c478bd9Sstevel@tonic-gate kmem_free(pdp->pd_ref, sizeof (xref_t) * 29527c478bd9Sstevel@tonic-gate pdp->pd_nsets); 29537c478bd9Sstevel@tonic-gate } 29547c478bd9Sstevel@tonic-gate kmem_free(pdp, sizeof (polldat_t)); 29557c478bd9Sstevel@tonic-gate pdp = pdp2; 29567c478bd9Sstevel@tonic-gate pcp->pc_fdcount--; 29577c478bd9Sstevel@tonic-gate } 29587c478bd9Sstevel@tonic-gate } 29597c478bd9Sstevel@tonic-gate } 29607c478bd9Sstevel@tonic-gate ASSERT(pcp->pc_fdcount == 0); 29617c478bd9Sstevel@tonic-gate kmem_free(pcp->pc_hash, sizeof (polldat_t *) * pcp->pc_hashsize); 29627c478bd9Sstevel@tonic-gate kmem_free(pcp->pc_bitmap, 29637c478bd9Sstevel@tonic-gate sizeof (ulong_t) * (pcp->pc_mapsize/BT_NBIPUL)); 29647c478bd9Sstevel@tonic-gate mutex_destroy(&pcp->pc_no_exit); 29657c478bd9Sstevel@tonic-gate mutex_destroy(&pcp->pc_lock); 29667c478bd9Sstevel@tonic-gate cv_destroy(&pcp->pc_cv); 29677c478bd9Sstevel@tonic-gate cv_destroy(&pcp->pc_busy_cv); 29687c478bd9Sstevel@tonic-gate kmem_free(pcp, sizeof (pollcache_t)); 29697c478bd9Sstevel@tonic-gate } 29707c478bd9Sstevel@tonic-gate 29717c478bd9Sstevel@tonic-gate pollcacheset_t * 29727c478bd9Sstevel@tonic-gate pcacheset_create(int nsets) 29737c478bd9Sstevel@tonic-gate { 29747c478bd9Sstevel@tonic-gate return (kmem_zalloc(sizeof (pollcacheset_t) * nsets, KM_SLEEP)); 29757c478bd9Sstevel@tonic-gate } 29767c478bd9Sstevel@tonic-gate 29777c478bd9Sstevel@tonic-gate void 29787c478bd9Sstevel@tonic-gate pcacheset_destroy(pollcacheset_t *pcsp, int nsets) 29797c478bd9Sstevel@tonic-gate { 29807c478bd9Sstevel@tonic-gate int i; 29817c478bd9Sstevel@tonic-gate 29827c478bd9Sstevel@tonic-gate for (i = 0; i < nsets; i++) { 29837c478bd9Sstevel@tonic-gate if (pcsp[i].pcs_pollfd != NULL) { 29847c478bd9Sstevel@tonic-gate kmem_free(pcsp[i].pcs_pollfd, pcsp[i].pcs_nfds * 29857c478bd9Sstevel@tonic-gate sizeof (pollfd_t)); 29867c478bd9Sstevel@tonic-gate } 29877c478bd9Sstevel@tonic-gate } 29887c478bd9Sstevel@tonic-gate kmem_free(pcsp, sizeof (pollcacheset_t) * nsets); 29897c478bd9Sstevel@tonic-gate } 29907c478bd9Sstevel@tonic-gate 29917c478bd9Sstevel@tonic-gate /* 29927c478bd9Sstevel@tonic-gate * Check each duplicated poll fd in the poll list. It may be necessary to 29937c478bd9Sstevel@tonic-gate * VOP_POLL the same fd again using different poll events. getf() has been 29947c478bd9Sstevel@tonic-gate * done by caller. This routine returns 0 if it can sucessfully process the 29957c478bd9Sstevel@tonic-gate * entire poll fd list. It returns -1 if underlying vnode has changed during 29967c478bd9Sstevel@tonic-gate * a VOP_POLL, in which case the caller has to repoll. It returns a positive 29977c478bd9Sstevel@tonic-gate * value if VOP_POLL failed. 29987c478bd9Sstevel@tonic-gate */ 29997c478bd9Sstevel@tonic-gate static int 30007c478bd9Sstevel@tonic-gate plist_chkdupfd(file_t *fp, polldat_t *pdp, pollstate_t *psp, pollfd_t *pollfdp, 30017c478bd9Sstevel@tonic-gate int entry, int *fdcntp) 30027c478bd9Sstevel@tonic-gate { 30037c478bd9Sstevel@tonic-gate int i; 30047c478bd9Sstevel@tonic-gate int fd; 30057c478bd9Sstevel@tonic-gate nfds_t nfds = psp->ps_nfds; 30067c478bd9Sstevel@tonic-gate 30077c478bd9Sstevel@tonic-gate fd = pollfdp[entry].fd; 30087c478bd9Sstevel@tonic-gate for (i = entry + 1; i < nfds; i++) { 30097c478bd9Sstevel@tonic-gate if (pollfdp[i].fd == fd) { 30107c478bd9Sstevel@tonic-gate if (pollfdp[i].events == pollfdp[entry].events) { 30117c478bd9Sstevel@tonic-gate if ((pollfdp[i].revents = 30127c478bd9Sstevel@tonic-gate pollfdp[entry].revents) != 0) { 30137c478bd9Sstevel@tonic-gate (*fdcntp)++; 30147c478bd9Sstevel@tonic-gate } 30157c478bd9Sstevel@tonic-gate } else { 30167c478bd9Sstevel@tonic-gate 30177c478bd9Sstevel@tonic-gate int error; 30187c478bd9Sstevel@tonic-gate pollhead_t *php; 30197c478bd9Sstevel@tonic-gate pollcache_t *pcp = psp->ps_pcache; 30207c478bd9Sstevel@tonic-gate 30217c478bd9Sstevel@tonic-gate /* 30227c478bd9Sstevel@tonic-gate * the events are different. VOP_POLL on this 30237c478bd9Sstevel@tonic-gate * fd so that we don't miss any revents. 30247c478bd9Sstevel@tonic-gate */ 30257c478bd9Sstevel@tonic-gate php = NULL; 30267c478bd9Sstevel@tonic-gate ASSERT(curthread->t_pollcache == NULL); 30277c478bd9Sstevel@tonic-gate error = VOP_POLL(fp->f_vnode, 30287c478bd9Sstevel@tonic-gate pollfdp[i].events, 0, 3029da6c28aaSamw &pollfdp[i].revents, &php, NULL); 30307c478bd9Sstevel@tonic-gate if (error) { 30317c478bd9Sstevel@tonic-gate return (error); 30327c478bd9Sstevel@tonic-gate } 30337c478bd9Sstevel@tonic-gate /* 30347c478bd9Sstevel@tonic-gate * layered devices(e.g. console driver) 30357c478bd9Sstevel@tonic-gate * may change the vnode and thus the pollhead 30367c478bd9Sstevel@tonic-gate * pointer out from underneath us. 30377c478bd9Sstevel@tonic-gate */ 30387c478bd9Sstevel@tonic-gate if (php != NULL && pdp->pd_php != NULL && 30397c478bd9Sstevel@tonic-gate php != pdp->pd_php) { 30407c478bd9Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 30417c478bd9Sstevel@tonic-gate pdp->pd_php = php; 30427c478bd9Sstevel@tonic-gate pollhead_insert(php, pdp); 30437c478bd9Sstevel@tonic-gate /* 30447c478bd9Sstevel@tonic-gate * We could have missed a wakeup on the 30457c478bd9Sstevel@tonic-gate * new target device. Make sure the new 30467c478bd9Sstevel@tonic-gate * target gets polled once. 30477c478bd9Sstevel@tonic-gate */ 30487c478bd9Sstevel@tonic-gate BT_SET(pcp->pc_bitmap, fd); 30497c478bd9Sstevel@tonic-gate return (-1); 30507c478bd9Sstevel@tonic-gate } 30517c478bd9Sstevel@tonic-gate if (pollfdp[i].revents) { 30527c478bd9Sstevel@tonic-gate (*fdcntp)++; 30537c478bd9Sstevel@tonic-gate } 30547c478bd9Sstevel@tonic-gate } 30557c478bd9Sstevel@tonic-gate } 30567c478bd9Sstevel@tonic-gate } 30577c478bd9Sstevel@tonic-gate return (0); 30587c478bd9Sstevel@tonic-gate } 3059