1*7c478bd9Sstevel@tonic-gate /* 2*7c478bd9Sstevel@tonic-gate * CDDL HEADER START 3*7c478bd9Sstevel@tonic-gate * 4*7c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*7c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*7c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*7c478bd9Sstevel@tonic-gate * with the License. 8*7c478bd9Sstevel@tonic-gate * 9*7c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*7c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*7c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 12*7c478bd9Sstevel@tonic-gate * and limitations under the License. 13*7c478bd9Sstevel@tonic-gate * 14*7c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*7c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*7c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*7c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*7c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*7c478bd9Sstevel@tonic-gate * 20*7c478bd9Sstevel@tonic-gate * CDDL HEADER END 21*7c478bd9Sstevel@tonic-gate */ 22*7c478bd9Sstevel@tonic-gate /* 23*7c478bd9Sstevel@tonic-gate * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24*7c478bd9Sstevel@tonic-gate * Use is subject to license terms. 25*7c478bd9Sstevel@tonic-gate */ 26*7c478bd9Sstevel@tonic-gate 27*7c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*7c478bd9Sstevel@tonic-gate 29*7c478bd9Sstevel@tonic-gate #include <sys/types.h> 30*7c478bd9Sstevel@tonic-gate #include <sys/devops.h> 31*7c478bd9Sstevel@tonic-gate #include <sys/conf.h> 32*7c478bd9Sstevel@tonic-gate #include <sys/modctl.h> 33*7c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> 34*7c478bd9Sstevel@tonic-gate #include <sys/stat.h> 35*7c478bd9Sstevel@tonic-gate #include <sys/poll_impl.h> 36*7c478bd9Sstevel@tonic-gate #include <sys/errno.h> 37*7c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 38*7c478bd9Sstevel@tonic-gate #include <sys/mkdev.h> 39*7c478bd9Sstevel@tonic-gate #include <sys/debug.h> 40*7c478bd9Sstevel@tonic-gate #include <sys/file.h> 41*7c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 42*7c478bd9Sstevel@tonic-gate #include <sys/systm.h> 43*7c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 44*7c478bd9Sstevel@tonic-gate #include <sys/devpoll.h> 45*7c478bd9Sstevel@tonic-gate #include <sys/rctl.h> 46*7c478bd9Sstevel@tonic-gate #include <sys/resource.h> 47*7c478bd9Sstevel@tonic-gate 48*7c478bd9Sstevel@tonic-gate #define RESERVED 1 49*7c478bd9Sstevel@tonic-gate 50*7c478bd9Sstevel@tonic-gate /* local data struct */ 51*7c478bd9Sstevel@tonic-gate static dp_entry_t **devpolltbl; /* dev poll entries */ 52*7c478bd9Sstevel@tonic-gate static size_t dptblsize; 53*7c478bd9Sstevel@tonic-gate 54*7c478bd9Sstevel@tonic-gate static kmutex_t devpoll_lock; /* lock protecting dev tbl */ 55*7c478bd9Sstevel@tonic-gate int devpoll_init; /* is /dev/poll initialized already */ 56*7c478bd9Sstevel@tonic-gate 57*7c478bd9Sstevel@tonic-gate /* device local functions */ 58*7c478bd9Sstevel@tonic-gate 59*7c478bd9Sstevel@tonic-gate static int dpopen(dev_t *devp, int flag, int otyp, cred_t *credp); 60*7c478bd9Sstevel@tonic-gate static int dpwrite(dev_t dev, struct uio *uiop, cred_t *credp); 61*7c478bd9Sstevel@tonic-gate static int dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 62*7c478bd9Sstevel@tonic-gate int *rvalp); 63*7c478bd9Sstevel@tonic-gate static int dppoll(dev_t dev, short events, int anyyet, short *reventsp, 64*7c478bd9Sstevel@tonic-gate struct pollhead **phpp); 65*7c478bd9Sstevel@tonic-gate static int dpclose(dev_t dev, int flag, int otyp, cred_t *credp); 66*7c478bd9Sstevel@tonic-gate static dev_info_t *dpdevi; 67*7c478bd9Sstevel@tonic-gate 68*7c478bd9Sstevel@tonic-gate 69*7c478bd9Sstevel@tonic-gate static struct cb_ops dp_cb_ops = { 70*7c478bd9Sstevel@tonic-gate dpopen, /* open */ 71*7c478bd9Sstevel@tonic-gate dpclose, /* close */ 72*7c478bd9Sstevel@tonic-gate nodev, /* strategy */ 73*7c478bd9Sstevel@tonic-gate nodev, /* print */ 74*7c478bd9Sstevel@tonic-gate nodev, /* dump */ 75*7c478bd9Sstevel@tonic-gate nodev, /* read */ 76*7c478bd9Sstevel@tonic-gate dpwrite, /* write */ 77*7c478bd9Sstevel@tonic-gate dpioctl, /* ioctl */ 78*7c478bd9Sstevel@tonic-gate nodev, /* devmap */ 79*7c478bd9Sstevel@tonic-gate nodev, /* mmap */ 80*7c478bd9Sstevel@tonic-gate nodev, /* segmap */ 81*7c478bd9Sstevel@tonic-gate dppoll, /* poll */ 82*7c478bd9Sstevel@tonic-gate nodev, /* prop_op */ 83*7c478bd9Sstevel@tonic-gate (struct streamtab *)0, /* streamtab */ 84*7c478bd9Sstevel@tonic-gate D_NEW | D_MP /* flags */ 85*7c478bd9Sstevel@tonic-gate }; 86*7c478bd9Sstevel@tonic-gate 87*7c478bd9Sstevel@tonic-gate static int dpattach(dev_info_t *, ddi_attach_cmd_t); 88*7c478bd9Sstevel@tonic-gate static int dpdetach(dev_info_t *, ddi_detach_cmd_t); 89*7c478bd9Sstevel@tonic-gate static int dpinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 90*7c478bd9Sstevel@tonic-gate 91*7c478bd9Sstevel@tonic-gate static struct dev_ops dp_ops = { 92*7c478bd9Sstevel@tonic-gate DEVO_REV, /* devo_rev */ 93*7c478bd9Sstevel@tonic-gate 0, /* refcnt */ 94*7c478bd9Sstevel@tonic-gate dpinfo, /* info */ 95*7c478bd9Sstevel@tonic-gate nulldev, /* identify */ 96*7c478bd9Sstevel@tonic-gate nulldev, /* probe */ 97*7c478bd9Sstevel@tonic-gate dpattach, /* attach */ 98*7c478bd9Sstevel@tonic-gate dpdetach, /* detach */ 99*7c478bd9Sstevel@tonic-gate nodev, /* reset */ 100*7c478bd9Sstevel@tonic-gate &dp_cb_ops, /* driver operations */ 101*7c478bd9Sstevel@tonic-gate (struct bus_ops *)NULL, /* bus operations */ 102*7c478bd9Sstevel@tonic-gate nulldev /* power */ 103*7c478bd9Sstevel@tonic-gate }; 104*7c478bd9Sstevel@tonic-gate 105*7c478bd9Sstevel@tonic-gate 106*7c478bd9Sstevel@tonic-gate static struct modldrv modldrv = { 107*7c478bd9Sstevel@tonic-gate &mod_driverops, /* type of module - a driver */ 108*7c478bd9Sstevel@tonic-gate "Dev Poll driver %I%", 109*7c478bd9Sstevel@tonic-gate &dp_ops, 110*7c478bd9Sstevel@tonic-gate }; 111*7c478bd9Sstevel@tonic-gate 112*7c478bd9Sstevel@tonic-gate static struct modlinkage modlinkage = { 113*7c478bd9Sstevel@tonic-gate MODREV_1, 114*7c478bd9Sstevel@tonic-gate (void *)&modldrv, 115*7c478bd9Sstevel@tonic-gate NULL 116*7c478bd9Sstevel@tonic-gate }; 117*7c478bd9Sstevel@tonic-gate 118*7c478bd9Sstevel@tonic-gate /* 119*7c478bd9Sstevel@tonic-gate * Locking Design 120*7c478bd9Sstevel@tonic-gate * 121*7c478bd9Sstevel@tonic-gate * The /dev/poll driver shares most of its code with poll sys call whose 122*7c478bd9Sstevel@tonic-gate * code is in common/syscall/poll.c. In poll(2) design, the pollcache 123*7c478bd9Sstevel@tonic-gate * structure is per lwp. An implicit assumption is made there that some 124*7c478bd9Sstevel@tonic-gate * portion of pollcache will never be touched by other lwps. E.g., in 125*7c478bd9Sstevel@tonic-gate * poll(2) design, no lwp will ever need to grow bitmap of other lwp. 126*7c478bd9Sstevel@tonic-gate * This assumption is not true for /dev/poll; hence the need for extra 127*7c478bd9Sstevel@tonic-gate * locking. 128*7c478bd9Sstevel@tonic-gate * 129*7c478bd9Sstevel@tonic-gate * To allow more paralellism, each /dev/poll file descriptor (indexed by 130*7c478bd9Sstevel@tonic-gate * minor number) has its own lock. Since read (dpioctl) is a much more 131*7c478bd9Sstevel@tonic-gate * frequent operation than write, we want to allow multiple reads on same 132*7c478bd9Sstevel@tonic-gate * /dev/poll fd. However, we prevent writes from being starved by giving 133*7c478bd9Sstevel@tonic-gate * priority to write operation. Theoretically writes can starve reads as 134*7c478bd9Sstevel@tonic-gate * well. But in pratical sense this is not important because (1) writes 135*7c478bd9Sstevel@tonic-gate * happens less often than reads, and (2) write operation defines the 136*7c478bd9Sstevel@tonic-gate * content of poll fd a cache set. If writes happens so often that they 137*7c478bd9Sstevel@tonic-gate * can starve reads, that means the cached set is very unstable. It may 138*7c478bd9Sstevel@tonic-gate * not make sense to read an unstable cache set anyway. Therefore, the 139*7c478bd9Sstevel@tonic-gate * writers starving readers case is not handled in this design. 140*7c478bd9Sstevel@tonic-gate */ 141*7c478bd9Sstevel@tonic-gate 142*7c478bd9Sstevel@tonic-gate int 143*7c478bd9Sstevel@tonic-gate _init() 144*7c478bd9Sstevel@tonic-gate { 145*7c478bd9Sstevel@tonic-gate int error; 146*7c478bd9Sstevel@tonic-gate 147*7c478bd9Sstevel@tonic-gate dptblsize = DEVPOLLSIZE; 148*7c478bd9Sstevel@tonic-gate devpolltbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP); 149*7c478bd9Sstevel@tonic-gate mutex_init(&devpoll_lock, NULL, MUTEX_DEFAULT, NULL); 150*7c478bd9Sstevel@tonic-gate devpoll_init = 1; 151*7c478bd9Sstevel@tonic-gate if ((error = mod_install(&modlinkage)) != 0) { 152*7c478bd9Sstevel@tonic-gate mutex_destroy(&devpoll_lock); 153*7c478bd9Sstevel@tonic-gate kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize); 154*7c478bd9Sstevel@tonic-gate devpoll_init = 0; 155*7c478bd9Sstevel@tonic-gate } 156*7c478bd9Sstevel@tonic-gate return (error); 157*7c478bd9Sstevel@tonic-gate } 158*7c478bd9Sstevel@tonic-gate 159*7c478bd9Sstevel@tonic-gate int 160*7c478bd9Sstevel@tonic-gate _fini() 161*7c478bd9Sstevel@tonic-gate { 162*7c478bd9Sstevel@tonic-gate int error; 163*7c478bd9Sstevel@tonic-gate 164*7c478bd9Sstevel@tonic-gate if ((error = mod_remove(&modlinkage)) != 0) { 165*7c478bd9Sstevel@tonic-gate return (error); 166*7c478bd9Sstevel@tonic-gate } 167*7c478bd9Sstevel@tonic-gate mutex_destroy(&devpoll_lock); 168*7c478bd9Sstevel@tonic-gate kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize); 169*7c478bd9Sstevel@tonic-gate return (0); 170*7c478bd9Sstevel@tonic-gate } 171*7c478bd9Sstevel@tonic-gate 172*7c478bd9Sstevel@tonic-gate int 173*7c478bd9Sstevel@tonic-gate _info(struct modinfo *modinfop) 174*7c478bd9Sstevel@tonic-gate { 175*7c478bd9Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop)); 176*7c478bd9Sstevel@tonic-gate } 177*7c478bd9Sstevel@tonic-gate 178*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 179*7c478bd9Sstevel@tonic-gate static int 180*7c478bd9Sstevel@tonic-gate dpattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 181*7c478bd9Sstevel@tonic-gate { 182*7c478bd9Sstevel@tonic-gate if (ddi_create_minor_node(devi, "poll", S_IFCHR, 0, DDI_PSEUDO, NULL) 183*7c478bd9Sstevel@tonic-gate == DDI_FAILURE) { 184*7c478bd9Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 185*7c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 186*7c478bd9Sstevel@tonic-gate } 187*7c478bd9Sstevel@tonic-gate dpdevi = devi; 188*7c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 189*7c478bd9Sstevel@tonic-gate } 190*7c478bd9Sstevel@tonic-gate 191*7c478bd9Sstevel@tonic-gate static int 192*7c478bd9Sstevel@tonic-gate dpdetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 193*7c478bd9Sstevel@tonic-gate { 194*7c478bd9Sstevel@tonic-gate if (cmd != DDI_DETACH) 195*7c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 196*7c478bd9Sstevel@tonic-gate 197*7c478bd9Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 198*7c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 199*7c478bd9Sstevel@tonic-gate } 200*7c478bd9Sstevel@tonic-gate 201*7c478bd9Sstevel@tonic-gate /* ARGSUSED */ 202*7c478bd9Sstevel@tonic-gate static int 203*7c478bd9Sstevel@tonic-gate dpinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 204*7c478bd9Sstevel@tonic-gate { 205*7c478bd9Sstevel@tonic-gate int error; 206*7c478bd9Sstevel@tonic-gate 207*7c478bd9Sstevel@tonic-gate switch (infocmd) { 208*7c478bd9Sstevel@tonic-gate case DDI_INFO_DEVT2DEVINFO: 209*7c478bd9Sstevel@tonic-gate *result = (void *)dpdevi; 210*7c478bd9Sstevel@tonic-gate error = DDI_SUCCESS; 211*7c478bd9Sstevel@tonic-gate break; 212*7c478bd9Sstevel@tonic-gate case DDI_INFO_DEVT2INSTANCE: 213*7c478bd9Sstevel@tonic-gate *result = (void *)0; 214*7c478bd9Sstevel@tonic-gate error = DDI_SUCCESS; 215*7c478bd9Sstevel@tonic-gate break; 216*7c478bd9Sstevel@tonic-gate default: 217*7c478bd9Sstevel@tonic-gate error = DDI_FAILURE; 218*7c478bd9Sstevel@tonic-gate } 219*7c478bd9Sstevel@tonic-gate return (error); 220*7c478bd9Sstevel@tonic-gate } 221*7c478bd9Sstevel@tonic-gate 222*7c478bd9Sstevel@tonic-gate /* 223*7c478bd9Sstevel@tonic-gate * dp_pcache_poll has similar logic to pcache_poll() in poll.c. The major 224*7c478bd9Sstevel@tonic-gate * differences are: (1) /dev/poll requires scanning the bitmap starting at 225*7c478bd9Sstevel@tonic-gate * where it was stopped last time, instead of always starting from 0, 226*7c478bd9Sstevel@tonic-gate * (2) since user may not have cleaned up the cached fds when they are 227*7c478bd9Sstevel@tonic-gate * closed, some polldats in cache may refer to closed or reused fds. We 228*7c478bd9Sstevel@tonic-gate * need to check for those cases. 229*7c478bd9Sstevel@tonic-gate * 230*7c478bd9Sstevel@tonic-gate * NOTE: Upon closing an fd, automatic poll cache cleanup is done for 231*7c478bd9Sstevel@tonic-gate * poll(2) caches but NOT for /dev/poll caches. So expect some 232*7c478bd9Sstevel@tonic-gate * stale entries! 233*7c478bd9Sstevel@tonic-gate */ 234*7c478bd9Sstevel@tonic-gate static int 235*7c478bd9Sstevel@tonic-gate dp_pcache_poll(pollfd_t *pfdp, pollcache_t *pcp, nfds_t nfds, int *fdcntp) 236*7c478bd9Sstevel@tonic-gate { 237*7c478bd9Sstevel@tonic-gate int start, ostart, end; 238*7c478bd9Sstevel@tonic-gate int fdcnt, fd; 239*7c478bd9Sstevel@tonic-gate boolean_t done; 240*7c478bd9Sstevel@tonic-gate file_t *fp; 241*7c478bd9Sstevel@tonic-gate short revent; 242*7c478bd9Sstevel@tonic-gate boolean_t no_wrap; 243*7c478bd9Sstevel@tonic-gate pollhead_t *php; 244*7c478bd9Sstevel@tonic-gate polldat_t *pdp; 245*7c478bd9Sstevel@tonic-gate int error = 0; 246*7c478bd9Sstevel@tonic-gate 247*7c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&pcp->pc_lock)); 248*7c478bd9Sstevel@tonic-gate if (pcp->pc_bitmap == NULL) { 249*7c478bd9Sstevel@tonic-gate /* 250*7c478bd9Sstevel@tonic-gate * No Need to search because no poll fd 251*7c478bd9Sstevel@tonic-gate * has been cached. 252*7c478bd9Sstevel@tonic-gate */ 253*7c478bd9Sstevel@tonic-gate return (error); 254*7c478bd9Sstevel@tonic-gate } 255*7c478bd9Sstevel@tonic-gate retry: 256*7c478bd9Sstevel@tonic-gate start = ostart = pcp->pc_mapstart; 257*7c478bd9Sstevel@tonic-gate end = pcp->pc_mapend; 258*7c478bd9Sstevel@tonic-gate php = NULL; 259*7c478bd9Sstevel@tonic-gate 260*7c478bd9Sstevel@tonic-gate if (start == 0) { 261*7c478bd9Sstevel@tonic-gate /* 262*7c478bd9Sstevel@tonic-gate * started from every begining, no need to wrap around. 263*7c478bd9Sstevel@tonic-gate */ 264*7c478bd9Sstevel@tonic-gate no_wrap = B_TRUE; 265*7c478bd9Sstevel@tonic-gate } else { 266*7c478bd9Sstevel@tonic-gate no_wrap = B_FALSE; 267*7c478bd9Sstevel@tonic-gate } 268*7c478bd9Sstevel@tonic-gate done = B_FALSE; 269*7c478bd9Sstevel@tonic-gate fdcnt = 0; 270*7c478bd9Sstevel@tonic-gate while ((fdcnt < nfds) && !done) { 271*7c478bd9Sstevel@tonic-gate php = NULL; 272*7c478bd9Sstevel@tonic-gate revent = 0; 273*7c478bd9Sstevel@tonic-gate /* 274*7c478bd9Sstevel@tonic-gate * Examine the bit map in a circular fashion 275*7c478bd9Sstevel@tonic-gate * to avoid starvation. Always resume from 276*7c478bd9Sstevel@tonic-gate * last stop. Scan till end of the map. Then 277*7c478bd9Sstevel@tonic-gate * wrap around. 278*7c478bd9Sstevel@tonic-gate */ 279*7c478bd9Sstevel@tonic-gate fd = bt_getlowbit(pcp->pc_bitmap, start, end); 280*7c478bd9Sstevel@tonic-gate ASSERT(fd <= end); 281*7c478bd9Sstevel@tonic-gate if (fd >= 0) { 282*7c478bd9Sstevel@tonic-gate if (fd == end) { 283*7c478bd9Sstevel@tonic-gate if (no_wrap) { 284*7c478bd9Sstevel@tonic-gate done = B_TRUE; 285*7c478bd9Sstevel@tonic-gate } else { 286*7c478bd9Sstevel@tonic-gate start = 0; 287*7c478bd9Sstevel@tonic-gate end = ostart - 1; 288*7c478bd9Sstevel@tonic-gate no_wrap = B_TRUE; 289*7c478bd9Sstevel@tonic-gate } 290*7c478bd9Sstevel@tonic-gate } else { 291*7c478bd9Sstevel@tonic-gate start = fd + 1; 292*7c478bd9Sstevel@tonic-gate } 293*7c478bd9Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, fd); 294*7c478bd9Sstevel@tonic-gate ASSERT(pdp != NULL); 295*7c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_fd == fd); 296*7c478bd9Sstevel@tonic-gate if (pdp->pd_fp == NULL) { 297*7c478bd9Sstevel@tonic-gate /* 298*7c478bd9Sstevel@tonic-gate * The fd is POLLREMOVed. This fd is 299*7c478bd9Sstevel@tonic-gate * logically no longer cached. So move 300*7c478bd9Sstevel@tonic-gate * on to the next one. 301*7c478bd9Sstevel@tonic-gate */ 302*7c478bd9Sstevel@tonic-gate continue; 303*7c478bd9Sstevel@tonic-gate } 304*7c478bd9Sstevel@tonic-gate if ((fp = getf(fd)) == NULL) { 305*7c478bd9Sstevel@tonic-gate /* 306*7c478bd9Sstevel@tonic-gate * The fd has been closed, but user has not 307*7c478bd9Sstevel@tonic-gate * done a POLLREMOVE on this fd yet. Instead 308*7c478bd9Sstevel@tonic-gate * of cleaning it here implicitly, we return 309*7c478bd9Sstevel@tonic-gate * POLLNVAL. This is consistent with poll(2) 310*7c478bd9Sstevel@tonic-gate * polling a closed fd. Hope this will remind 311*7c478bd9Sstevel@tonic-gate * user to do a POLLREMOVE. 312*7c478bd9Sstevel@tonic-gate */ 313*7c478bd9Sstevel@tonic-gate pfdp[fdcnt].fd = fd; 314*7c478bd9Sstevel@tonic-gate pfdp[fdcnt].revents = POLLNVAL; 315*7c478bd9Sstevel@tonic-gate fdcnt++; 316*7c478bd9Sstevel@tonic-gate continue; 317*7c478bd9Sstevel@tonic-gate } 318*7c478bd9Sstevel@tonic-gate if (fp != pdp->pd_fp) { 319*7c478bd9Sstevel@tonic-gate /* 320*7c478bd9Sstevel@tonic-gate * user is polling on a cached fd which was 321*7c478bd9Sstevel@tonic-gate * closed and then reused. Unfortunately 322*7c478bd9Sstevel@tonic-gate * there is no good way to inform user. 323*7c478bd9Sstevel@tonic-gate * If the file struct is also reused, we 324*7c478bd9Sstevel@tonic-gate * may not be able to detect the fd reuse 325*7c478bd9Sstevel@tonic-gate * at all. As long as this does not 326*7c478bd9Sstevel@tonic-gate * cause system failure and/or memory leak, 327*7c478bd9Sstevel@tonic-gate * we will play along. Man page states if 328*7c478bd9Sstevel@tonic-gate * user does not clean up closed fds, polling 329*7c478bd9Sstevel@tonic-gate * results will be indeterministic. 330*7c478bd9Sstevel@tonic-gate * 331*7c478bd9Sstevel@tonic-gate * XXX - perhaps log the detection of fd 332*7c478bd9Sstevel@tonic-gate * reuse? 333*7c478bd9Sstevel@tonic-gate */ 334*7c478bd9Sstevel@tonic-gate pdp->pd_fp = fp; 335*7c478bd9Sstevel@tonic-gate } 336*7c478bd9Sstevel@tonic-gate /* 337*7c478bd9Sstevel@tonic-gate * XXX - pollrelock() logic needs to know which 338*7c478bd9Sstevel@tonic-gate * which pollcache lock to grab. It'd be a 339*7c478bd9Sstevel@tonic-gate * cleaner solution if we could pass pcp as 340*7c478bd9Sstevel@tonic-gate * an arguement in VOP_POLL interface instead 341*7c478bd9Sstevel@tonic-gate * of implicitly passing it using thread_t 342*7c478bd9Sstevel@tonic-gate * struct. On the other hand, changing VOP_POLL 343*7c478bd9Sstevel@tonic-gate * interface will require all driver/file system 344*7c478bd9Sstevel@tonic-gate * poll routine to change. May want to revisit 345*7c478bd9Sstevel@tonic-gate * the tradeoff later. 346*7c478bd9Sstevel@tonic-gate */ 347*7c478bd9Sstevel@tonic-gate curthread->t_pollcache = pcp; 348*7c478bd9Sstevel@tonic-gate error = VOP_POLL(fp->f_vnode, pdp->pd_events, 0, 349*7c478bd9Sstevel@tonic-gate &revent, &php); 350*7c478bd9Sstevel@tonic-gate curthread->t_pollcache = NULL; 351*7c478bd9Sstevel@tonic-gate releasef(fd); 352*7c478bd9Sstevel@tonic-gate if (error != 0) { 353*7c478bd9Sstevel@tonic-gate break; 354*7c478bd9Sstevel@tonic-gate } 355*7c478bd9Sstevel@tonic-gate /* 356*7c478bd9Sstevel@tonic-gate * layered devices (e.g. console driver) 357*7c478bd9Sstevel@tonic-gate * may change the vnode and thus the pollhead 358*7c478bd9Sstevel@tonic-gate * pointer out from underneath us. 359*7c478bd9Sstevel@tonic-gate */ 360*7c478bd9Sstevel@tonic-gate if (php != NULL && pdp->pd_php != NULL && 361*7c478bd9Sstevel@tonic-gate php != pdp->pd_php) { 362*7c478bd9Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 363*7c478bd9Sstevel@tonic-gate pdp->pd_php = php; 364*7c478bd9Sstevel@tonic-gate pollhead_insert(php, pdp); 365*7c478bd9Sstevel@tonic-gate /* 366*7c478bd9Sstevel@tonic-gate * The bit should still be set. 367*7c478bd9Sstevel@tonic-gate */ 368*7c478bd9Sstevel@tonic-gate ASSERT(BT_TEST(pcp->pc_bitmap, fd)); 369*7c478bd9Sstevel@tonic-gate goto retry; 370*7c478bd9Sstevel@tonic-gate } 371*7c478bd9Sstevel@tonic-gate 372*7c478bd9Sstevel@tonic-gate if (revent != 0) { 373*7c478bd9Sstevel@tonic-gate pfdp[fdcnt].fd = fd; 374*7c478bd9Sstevel@tonic-gate pfdp[fdcnt].events = pdp->pd_events; 375*7c478bd9Sstevel@tonic-gate pfdp[fdcnt].revents = revent; 376*7c478bd9Sstevel@tonic-gate fdcnt++; 377*7c478bd9Sstevel@tonic-gate } else if (php != NULL) { 378*7c478bd9Sstevel@tonic-gate /* 379*7c478bd9Sstevel@tonic-gate * We clear a bit or cache a poll fd if 380*7c478bd9Sstevel@tonic-gate * the driver returns a poll head ptr, 381*7c478bd9Sstevel@tonic-gate * which is expected in the case of 0 382*7c478bd9Sstevel@tonic-gate * revents. Some buggy driver may return 383*7c478bd9Sstevel@tonic-gate * NULL php pointer with 0 revents. In 384*7c478bd9Sstevel@tonic-gate * this case, we just treat the driver as 385*7c478bd9Sstevel@tonic-gate * "noncachable" and not clearing the bit 386*7c478bd9Sstevel@tonic-gate * in bitmap. 387*7c478bd9Sstevel@tonic-gate */ 388*7c478bd9Sstevel@tonic-gate if ((pdp->pd_php != NULL) && 389*7c478bd9Sstevel@tonic-gate ((pcp->pc_flag & T_POLLWAKE) == 0)) { 390*7c478bd9Sstevel@tonic-gate BT_CLEAR(pcp->pc_bitmap, fd); 391*7c478bd9Sstevel@tonic-gate } 392*7c478bd9Sstevel@tonic-gate if (pdp->pd_php == NULL) { 393*7c478bd9Sstevel@tonic-gate pollhead_insert(php, pdp); 394*7c478bd9Sstevel@tonic-gate pdp->pd_php = php; 395*7c478bd9Sstevel@tonic-gate } 396*7c478bd9Sstevel@tonic-gate } 397*7c478bd9Sstevel@tonic-gate } else { 398*7c478bd9Sstevel@tonic-gate /* 399*7c478bd9Sstevel@tonic-gate * No bit set in the range. Check for wrap around. 400*7c478bd9Sstevel@tonic-gate */ 401*7c478bd9Sstevel@tonic-gate if (!no_wrap) { 402*7c478bd9Sstevel@tonic-gate start = 0; 403*7c478bd9Sstevel@tonic-gate end = ostart - 1; 404*7c478bd9Sstevel@tonic-gate no_wrap = B_TRUE; 405*7c478bd9Sstevel@tonic-gate } else { 406*7c478bd9Sstevel@tonic-gate done = B_TRUE; 407*7c478bd9Sstevel@tonic-gate } 408*7c478bd9Sstevel@tonic-gate } 409*7c478bd9Sstevel@tonic-gate } 410*7c478bd9Sstevel@tonic-gate 411*7c478bd9Sstevel@tonic-gate if (!done) { 412*7c478bd9Sstevel@tonic-gate pcp->pc_mapstart = start; 413*7c478bd9Sstevel@tonic-gate } 414*7c478bd9Sstevel@tonic-gate ASSERT(*fdcntp == 0); 415*7c478bd9Sstevel@tonic-gate *fdcntp = fdcnt; 416*7c478bd9Sstevel@tonic-gate return (error); 417*7c478bd9Sstevel@tonic-gate } 418*7c478bd9Sstevel@tonic-gate 419*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 420*7c478bd9Sstevel@tonic-gate static int 421*7c478bd9Sstevel@tonic-gate dpopen(dev_t *devp, int flag, int otyp, cred_t *credp) 422*7c478bd9Sstevel@tonic-gate { 423*7c478bd9Sstevel@tonic-gate minor_t minordev; 424*7c478bd9Sstevel@tonic-gate dp_entry_t *dpep; 425*7c478bd9Sstevel@tonic-gate pollcache_t *pcp; 426*7c478bd9Sstevel@tonic-gate 427*7c478bd9Sstevel@tonic-gate ASSERT(devpoll_init); 428*7c478bd9Sstevel@tonic-gate ASSERT(dptblsize <= MAXMIN); 429*7c478bd9Sstevel@tonic-gate mutex_enter(&devpoll_lock); 430*7c478bd9Sstevel@tonic-gate for (minordev = 0; minordev < dptblsize; minordev++) { 431*7c478bd9Sstevel@tonic-gate if (devpolltbl[minordev] == NULL) { 432*7c478bd9Sstevel@tonic-gate devpolltbl[minordev] = (dp_entry_t *)RESERVED; 433*7c478bd9Sstevel@tonic-gate break; 434*7c478bd9Sstevel@tonic-gate } 435*7c478bd9Sstevel@tonic-gate } 436*7c478bd9Sstevel@tonic-gate if (minordev == dptblsize) { 437*7c478bd9Sstevel@tonic-gate dp_entry_t **newtbl; 438*7c478bd9Sstevel@tonic-gate size_t oldsize; 439*7c478bd9Sstevel@tonic-gate 440*7c478bd9Sstevel@tonic-gate /* 441*7c478bd9Sstevel@tonic-gate * Used up every entry in the existing devpoll table. 442*7c478bd9Sstevel@tonic-gate * Grow the table by DEVPOLLSIZE. 443*7c478bd9Sstevel@tonic-gate */ 444*7c478bd9Sstevel@tonic-gate if ((oldsize = dptblsize) >= MAXMIN) { 445*7c478bd9Sstevel@tonic-gate mutex_exit(&devpoll_lock); 446*7c478bd9Sstevel@tonic-gate return (ENXIO); 447*7c478bd9Sstevel@tonic-gate } 448*7c478bd9Sstevel@tonic-gate dptblsize += DEVPOLLSIZE; 449*7c478bd9Sstevel@tonic-gate if (dptblsize > MAXMIN) { 450*7c478bd9Sstevel@tonic-gate dptblsize = MAXMIN; 451*7c478bd9Sstevel@tonic-gate } 452*7c478bd9Sstevel@tonic-gate newtbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP); 453*7c478bd9Sstevel@tonic-gate bcopy(devpolltbl, newtbl, sizeof (caddr_t) * oldsize); 454*7c478bd9Sstevel@tonic-gate kmem_free(devpolltbl, sizeof (caddr_t) * oldsize); 455*7c478bd9Sstevel@tonic-gate devpolltbl = newtbl; 456*7c478bd9Sstevel@tonic-gate devpolltbl[minordev] = (dp_entry_t *)RESERVED; 457*7c478bd9Sstevel@tonic-gate } 458*7c478bd9Sstevel@tonic-gate mutex_exit(&devpoll_lock); 459*7c478bd9Sstevel@tonic-gate 460*7c478bd9Sstevel@tonic-gate dpep = kmem_zalloc(sizeof (dp_entry_t), KM_SLEEP); 461*7c478bd9Sstevel@tonic-gate /* 462*7c478bd9Sstevel@tonic-gate * allocate a pollcache skeleton here. Delay allocating bitmap 463*7c478bd9Sstevel@tonic-gate * structures until dpwrite() time, since we don't know the 464*7c478bd9Sstevel@tonic-gate * optimal size yet. 465*7c478bd9Sstevel@tonic-gate */ 466*7c478bd9Sstevel@tonic-gate pcp = pcache_alloc(); 467*7c478bd9Sstevel@tonic-gate dpep->dpe_pcache = pcp; 468*7c478bd9Sstevel@tonic-gate pcp->pc_pid = curproc->p_pid; 469*7c478bd9Sstevel@tonic-gate *devp = makedevice(getmajor(*devp), minordev); /* clone the driver */ 470*7c478bd9Sstevel@tonic-gate mutex_enter(&devpoll_lock); 471*7c478bd9Sstevel@tonic-gate ASSERT(minordev < dptblsize); 472*7c478bd9Sstevel@tonic-gate ASSERT(devpolltbl[minordev] == (dp_entry_t *)RESERVED); 473*7c478bd9Sstevel@tonic-gate devpolltbl[minordev] = dpep; 474*7c478bd9Sstevel@tonic-gate mutex_exit(&devpoll_lock); 475*7c478bd9Sstevel@tonic-gate return (0); 476*7c478bd9Sstevel@tonic-gate } 477*7c478bd9Sstevel@tonic-gate 478*7c478bd9Sstevel@tonic-gate /* 479*7c478bd9Sstevel@tonic-gate * Write to dev/poll add/remove fd's to/from a cached poll fd set, 480*7c478bd9Sstevel@tonic-gate * or change poll events for a watched fd. 481*7c478bd9Sstevel@tonic-gate */ 482*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 483*7c478bd9Sstevel@tonic-gate static int 484*7c478bd9Sstevel@tonic-gate dpwrite(dev_t dev, struct uio *uiop, cred_t *credp) 485*7c478bd9Sstevel@tonic-gate { 486*7c478bd9Sstevel@tonic-gate minor_t minor; 487*7c478bd9Sstevel@tonic-gate dp_entry_t *dpep; 488*7c478bd9Sstevel@tonic-gate pollcache_t *pcp; 489*7c478bd9Sstevel@tonic-gate pollfd_t *pollfdp, *pfdp; 490*7c478bd9Sstevel@tonic-gate int error; 491*7c478bd9Sstevel@tonic-gate ssize_t uiosize; 492*7c478bd9Sstevel@tonic-gate nfds_t pollfdnum; 493*7c478bd9Sstevel@tonic-gate struct pollhead *php = NULL; 494*7c478bd9Sstevel@tonic-gate polldat_t *pdp; 495*7c478bd9Sstevel@tonic-gate int fd; 496*7c478bd9Sstevel@tonic-gate file_t *fp; 497*7c478bd9Sstevel@tonic-gate 498*7c478bd9Sstevel@tonic-gate minor = getminor(dev); 499*7c478bd9Sstevel@tonic-gate 500*7c478bd9Sstevel@tonic-gate mutex_enter(&devpoll_lock); 501*7c478bd9Sstevel@tonic-gate ASSERT(minor < dptblsize); 502*7c478bd9Sstevel@tonic-gate dpep = devpolltbl[minor]; 503*7c478bd9Sstevel@tonic-gate ASSERT(dpep != NULL); 504*7c478bd9Sstevel@tonic-gate mutex_exit(&devpoll_lock); 505*7c478bd9Sstevel@tonic-gate pcp = dpep->dpe_pcache; 506*7c478bd9Sstevel@tonic-gate if (curproc->p_pid != pcp->pc_pid) { 507*7c478bd9Sstevel@tonic-gate return (EACCES); 508*7c478bd9Sstevel@tonic-gate } 509*7c478bd9Sstevel@tonic-gate uiosize = uiop->uio_resid; 510*7c478bd9Sstevel@tonic-gate pollfdnum = uiosize / sizeof (pollfd_t); 511*7c478bd9Sstevel@tonic-gate mutex_enter(&curproc->p_lock); 512*7c478bd9Sstevel@tonic-gate if (pollfdnum > (uint_t)rctl_enforced_value( 513*7c478bd9Sstevel@tonic-gate rctlproc_legacy[RLIMIT_NOFILE], curproc->p_rctls, curproc)) { 514*7c478bd9Sstevel@tonic-gate (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE], 515*7c478bd9Sstevel@tonic-gate curproc->p_rctls, curproc, RCA_SAFE); 516*7c478bd9Sstevel@tonic-gate mutex_exit(&curproc->p_lock); 517*7c478bd9Sstevel@tonic-gate return (set_errno(EINVAL)); 518*7c478bd9Sstevel@tonic-gate } 519*7c478bd9Sstevel@tonic-gate mutex_exit(&curproc->p_lock); 520*7c478bd9Sstevel@tonic-gate /* 521*7c478bd9Sstevel@tonic-gate * Copy in the pollfd array. Walk through the array and add 522*7c478bd9Sstevel@tonic-gate * each polled fd to the cached set. 523*7c478bd9Sstevel@tonic-gate */ 524*7c478bd9Sstevel@tonic-gate pollfdp = kmem_alloc(uiosize, KM_SLEEP); 525*7c478bd9Sstevel@tonic-gate 526*7c478bd9Sstevel@tonic-gate /* 527*7c478bd9Sstevel@tonic-gate * Although /dev/poll uses the write(2) interface to cache fds, it's 528*7c478bd9Sstevel@tonic-gate * not supposed to function as a seekable device. To prevent offset 529*7c478bd9Sstevel@tonic-gate * from growing and eventually exceed the maximum, reset the offset 530*7c478bd9Sstevel@tonic-gate * here for every call. 531*7c478bd9Sstevel@tonic-gate */ 532*7c478bd9Sstevel@tonic-gate uiop->uio_loffset = 0; 533*7c478bd9Sstevel@tonic-gate if ((error = uiomove((caddr_t)pollfdp, uiosize, UIO_WRITE, uiop)) 534*7c478bd9Sstevel@tonic-gate != 0) { 535*7c478bd9Sstevel@tonic-gate kmem_free(pollfdp, uiosize); 536*7c478bd9Sstevel@tonic-gate return (error); 537*7c478bd9Sstevel@tonic-gate } 538*7c478bd9Sstevel@tonic-gate /* 539*7c478bd9Sstevel@tonic-gate * We are about to enter the core portion of dpwrite(). Make sure this 540*7c478bd9Sstevel@tonic-gate * write has exclusive access in this portion of the code, i.e., no 541*7c478bd9Sstevel@tonic-gate * other writers in this code and no other readers in dpioctl. 542*7c478bd9Sstevel@tonic-gate */ 543*7c478bd9Sstevel@tonic-gate mutex_enter(&dpep->dpe_lock); 544*7c478bd9Sstevel@tonic-gate dpep->dpe_writerwait++; 545*7c478bd9Sstevel@tonic-gate while (dpep->dpe_refcnt != 0) { 546*7c478bd9Sstevel@tonic-gate if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) { 547*7c478bd9Sstevel@tonic-gate dpep->dpe_writerwait--; 548*7c478bd9Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 549*7c478bd9Sstevel@tonic-gate kmem_free(pollfdp, uiosize); 550*7c478bd9Sstevel@tonic-gate return (set_errno(EINTR)); 551*7c478bd9Sstevel@tonic-gate } 552*7c478bd9Sstevel@tonic-gate } 553*7c478bd9Sstevel@tonic-gate dpep->dpe_writerwait--; 554*7c478bd9Sstevel@tonic-gate dpep->dpe_flag |= DP_WRITER_PRESENT; 555*7c478bd9Sstevel@tonic-gate dpep->dpe_refcnt++; 556*7c478bd9Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 557*7c478bd9Sstevel@tonic-gate 558*7c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 559*7c478bd9Sstevel@tonic-gate if (pcp->pc_bitmap == NULL) { 560*7c478bd9Sstevel@tonic-gate pcache_create(pcp, pollfdnum); 561*7c478bd9Sstevel@tonic-gate } 562*7c478bd9Sstevel@tonic-gate for (pfdp = pollfdp; pfdp < pollfdp + pollfdnum; pfdp++) { 563*7c478bd9Sstevel@tonic-gate fd = pfdp->fd; 564*7c478bd9Sstevel@tonic-gate if ((uint_t)fd >= P_FINFO(curproc)->fi_nfiles) 565*7c478bd9Sstevel@tonic-gate continue; 566*7c478bd9Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, fd); 567*7c478bd9Sstevel@tonic-gate if (pfdp->events != POLLREMOVE) { 568*7c478bd9Sstevel@tonic-gate if (pdp == NULL) { 569*7c478bd9Sstevel@tonic-gate pdp = pcache_alloc_fd(0); 570*7c478bd9Sstevel@tonic-gate pdp->pd_fd = fd; 571*7c478bd9Sstevel@tonic-gate pdp->pd_pcache = pcp; 572*7c478bd9Sstevel@tonic-gate pcache_insert_fd(pcp, pdp, pollfdnum); 573*7c478bd9Sstevel@tonic-gate } 574*7c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_fd == fd); 575*7c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_pcache == pcp); 576*7c478bd9Sstevel@tonic-gate if (fd >= pcp->pc_mapsize) { 577*7c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 578*7c478bd9Sstevel@tonic-gate pcache_grow_map(pcp, fd); 579*7c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 580*7c478bd9Sstevel@tonic-gate } 581*7c478bd9Sstevel@tonic-gate if (fd > pcp->pc_mapend) { 582*7c478bd9Sstevel@tonic-gate pcp->pc_mapend = fd; 583*7c478bd9Sstevel@tonic-gate } 584*7c478bd9Sstevel@tonic-gate if ((fp = getf(fd)) == NULL) { 585*7c478bd9Sstevel@tonic-gate /* 586*7c478bd9Sstevel@tonic-gate * The fd is not valid. Since we can't pass 587*7c478bd9Sstevel@tonic-gate * this error back in the write() call, set 588*7c478bd9Sstevel@tonic-gate * the bit in bitmap to force DP_POLL ioctl 589*7c478bd9Sstevel@tonic-gate * to examine it. 590*7c478bd9Sstevel@tonic-gate */ 591*7c478bd9Sstevel@tonic-gate BT_SET(pcp->pc_bitmap, fd); 592*7c478bd9Sstevel@tonic-gate pdp->pd_events |= pfdp->events; 593*7c478bd9Sstevel@tonic-gate continue; 594*7c478bd9Sstevel@tonic-gate } 595*7c478bd9Sstevel@tonic-gate /* 596*7c478bd9Sstevel@tonic-gate * Don't do VOP_POLL for an already cached fd with 597*7c478bd9Sstevel@tonic-gate * same poll events. 598*7c478bd9Sstevel@tonic-gate */ 599*7c478bd9Sstevel@tonic-gate if ((pdp->pd_events == pfdp->events) && 600*7c478bd9Sstevel@tonic-gate (pdp->pd_fp != NULL)) { 601*7c478bd9Sstevel@tonic-gate /* 602*7c478bd9Sstevel@tonic-gate * the events are already cached 603*7c478bd9Sstevel@tonic-gate */ 604*7c478bd9Sstevel@tonic-gate releasef(fd); 605*7c478bd9Sstevel@tonic-gate continue; 606*7c478bd9Sstevel@tonic-gate } 607*7c478bd9Sstevel@tonic-gate 608*7c478bd9Sstevel@tonic-gate /* 609*7c478bd9Sstevel@tonic-gate * do VOP_POLL and cache this poll fd. 610*7c478bd9Sstevel@tonic-gate */ 611*7c478bd9Sstevel@tonic-gate /* 612*7c478bd9Sstevel@tonic-gate * XXX - pollrelock() logic needs to know which 613*7c478bd9Sstevel@tonic-gate * which pollcache lock to grab. It'd be a 614*7c478bd9Sstevel@tonic-gate * cleaner solution if we could pass pcp as 615*7c478bd9Sstevel@tonic-gate * an arguement in VOP_POLL interface instead 616*7c478bd9Sstevel@tonic-gate * of implicitly passing it using thread_t 617*7c478bd9Sstevel@tonic-gate * struct. On the other hand, changing VOP_POLL 618*7c478bd9Sstevel@tonic-gate * interface will require all driver/file system 619*7c478bd9Sstevel@tonic-gate * poll routine to change. May want to revisit 620*7c478bd9Sstevel@tonic-gate * the tradeoff later. 621*7c478bd9Sstevel@tonic-gate */ 622*7c478bd9Sstevel@tonic-gate curthread->t_pollcache = pcp; 623*7c478bd9Sstevel@tonic-gate error = VOP_POLL(fp->f_vnode, pfdp->events, 0, 624*7c478bd9Sstevel@tonic-gate &pfdp->revents, &php); 625*7c478bd9Sstevel@tonic-gate curthread->t_pollcache = NULL; 626*7c478bd9Sstevel@tonic-gate /* 627*7c478bd9Sstevel@tonic-gate * We always set the bit when this fd is cached. 628*7c478bd9Sstevel@tonic-gate * So we don't have to worry about missing a 629*7c478bd9Sstevel@tonic-gate * pollwakeup between VOP_POLL and pollhead_insert. 630*7c478bd9Sstevel@tonic-gate * This forces the first DP_POLL to poll this fd. 631*7c478bd9Sstevel@tonic-gate * Real performance gain comes from subsequent 632*7c478bd9Sstevel@tonic-gate * DP_POLL. 633*7c478bd9Sstevel@tonic-gate */ 634*7c478bd9Sstevel@tonic-gate BT_SET(pcp->pc_bitmap, fd); 635*7c478bd9Sstevel@tonic-gate if (error != 0) { 636*7c478bd9Sstevel@tonic-gate releasef(fd); 637*7c478bd9Sstevel@tonic-gate break; 638*7c478bd9Sstevel@tonic-gate } 639*7c478bd9Sstevel@tonic-gate pdp->pd_fp = fp; 640*7c478bd9Sstevel@tonic-gate pdp->pd_events |= pfdp->events; 641*7c478bd9Sstevel@tonic-gate if (php != NULL) { 642*7c478bd9Sstevel@tonic-gate if (pdp->pd_php == NULL) { 643*7c478bd9Sstevel@tonic-gate pollhead_insert(php, pdp); 644*7c478bd9Sstevel@tonic-gate pdp->pd_php = php; 645*7c478bd9Sstevel@tonic-gate } else { 646*7c478bd9Sstevel@tonic-gate if (pdp->pd_php != php) { 647*7c478bd9Sstevel@tonic-gate pollhead_delete(pdp->pd_php, 648*7c478bd9Sstevel@tonic-gate pdp); 649*7c478bd9Sstevel@tonic-gate pollhead_insert(php, pdp); 650*7c478bd9Sstevel@tonic-gate pdp->pd_php = php; 651*7c478bd9Sstevel@tonic-gate } 652*7c478bd9Sstevel@tonic-gate } 653*7c478bd9Sstevel@tonic-gate 654*7c478bd9Sstevel@tonic-gate } 655*7c478bd9Sstevel@tonic-gate releasef(fd); 656*7c478bd9Sstevel@tonic-gate } else { 657*7c478bd9Sstevel@tonic-gate if (pdp == NULL) { 658*7c478bd9Sstevel@tonic-gate continue; 659*7c478bd9Sstevel@tonic-gate } 660*7c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_fd == fd); 661*7c478bd9Sstevel@tonic-gate pdp->pd_fp = NULL; 662*7c478bd9Sstevel@tonic-gate pdp->pd_events = 0; 663*7c478bd9Sstevel@tonic-gate ASSERT(pdp->pd_thread == NULL); 664*7c478bd9Sstevel@tonic-gate if (pdp->pd_php != NULL) { 665*7c478bd9Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 666*7c478bd9Sstevel@tonic-gate pdp->pd_php = NULL; 667*7c478bd9Sstevel@tonic-gate } 668*7c478bd9Sstevel@tonic-gate BT_CLEAR(pcp->pc_bitmap, fd); 669*7c478bd9Sstevel@tonic-gate } 670*7c478bd9Sstevel@tonic-gate } 671*7c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 672*7c478bd9Sstevel@tonic-gate mutex_enter(&dpep->dpe_lock); 673*7c478bd9Sstevel@tonic-gate dpep->dpe_flag &= ~DP_WRITER_PRESENT; 674*7c478bd9Sstevel@tonic-gate ASSERT(dpep->dpe_refcnt == 1); 675*7c478bd9Sstevel@tonic-gate dpep->dpe_refcnt--; 676*7c478bd9Sstevel@tonic-gate cv_broadcast(&dpep->dpe_cv); 677*7c478bd9Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 678*7c478bd9Sstevel@tonic-gate kmem_free(pollfdp, uiosize); 679*7c478bd9Sstevel@tonic-gate return (error); 680*7c478bd9Sstevel@tonic-gate } 681*7c478bd9Sstevel@tonic-gate 682*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 683*7c478bd9Sstevel@tonic-gate static int 684*7c478bd9Sstevel@tonic-gate dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) 685*7c478bd9Sstevel@tonic-gate { 686*7c478bd9Sstevel@tonic-gate timestruc_t now; 687*7c478bd9Sstevel@tonic-gate timestruc_t rqtime; 688*7c478bd9Sstevel@tonic-gate timestruc_t *rqtp = NULL; 689*7c478bd9Sstevel@tonic-gate int timecheck = 0; 690*7c478bd9Sstevel@tonic-gate minor_t minor; 691*7c478bd9Sstevel@tonic-gate dp_entry_t *dpep; 692*7c478bd9Sstevel@tonic-gate pollcache_t *pcp; 693*7c478bd9Sstevel@tonic-gate int error = 0; 694*7c478bd9Sstevel@tonic-gate STRUCT_DECL(dvpoll, dvpoll); 695*7c478bd9Sstevel@tonic-gate 696*7c478bd9Sstevel@tonic-gate if (cmd == DP_POLL) { 697*7c478bd9Sstevel@tonic-gate /* do this now, before we sleep on DP_WRITER_PRESENT below */ 698*7c478bd9Sstevel@tonic-gate timecheck = timechanged; 699*7c478bd9Sstevel@tonic-gate gethrestime(&now); 700*7c478bd9Sstevel@tonic-gate } 701*7c478bd9Sstevel@tonic-gate minor = getminor(dev); 702*7c478bd9Sstevel@tonic-gate mutex_enter(&devpoll_lock); 703*7c478bd9Sstevel@tonic-gate ASSERT(minor < dptblsize); 704*7c478bd9Sstevel@tonic-gate dpep = devpolltbl[minor]; 705*7c478bd9Sstevel@tonic-gate mutex_exit(&devpoll_lock); 706*7c478bd9Sstevel@tonic-gate ASSERT(dpep != NULL); 707*7c478bd9Sstevel@tonic-gate pcp = dpep->dpe_pcache; 708*7c478bd9Sstevel@tonic-gate if (curproc->p_pid != pcp->pc_pid) 709*7c478bd9Sstevel@tonic-gate return (EACCES); 710*7c478bd9Sstevel@tonic-gate 711*7c478bd9Sstevel@tonic-gate mutex_enter(&dpep->dpe_lock); 712*7c478bd9Sstevel@tonic-gate while ((dpep->dpe_flag & DP_WRITER_PRESENT) || 713*7c478bd9Sstevel@tonic-gate (dpep->dpe_writerwait != 0)) { 714*7c478bd9Sstevel@tonic-gate if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) { 715*7c478bd9Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 716*7c478bd9Sstevel@tonic-gate return (EINTR); 717*7c478bd9Sstevel@tonic-gate } 718*7c478bd9Sstevel@tonic-gate } 719*7c478bd9Sstevel@tonic-gate dpep->dpe_refcnt++; 720*7c478bd9Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 721*7c478bd9Sstevel@tonic-gate 722*7c478bd9Sstevel@tonic-gate switch (cmd) { 723*7c478bd9Sstevel@tonic-gate case DP_POLL: 724*7c478bd9Sstevel@tonic-gate { 725*7c478bd9Sstevel@tonic-gate pollstate_t *ps; 726*7c478bd9Sstevel@tonic-gate nfds_t nfds; 727*7c478bd9Sstevel@tonic-gate int fdcnt = 0; 728*7c478bd9Sstevel@tonic-gate int time_out; 729*7c478bd9Sstevel@tonic-gate int rval; 730*7c478bd9Sstevel@tonic-gate 731*7c478bd9Sstevel@tonic-gate STRUCT_INIT(dvpoll, mode); 732*7c478bd9Sstevel@tonic-gate error = copyin((caddr_t)arg, STRUCT_BUF(dvpoll), 733*7c478bd9Sstevel@tonic-gate STRUCT_SIZE(dvpoll)); 734*7c478bd9Sstevel@tonic-gate if (error) { 735*7c478bd9Sstevel@tonic-gate DP_REFRELE(dpep); 736*7c478bd9Sstevel@tonic-gate return (EFAULT); 737*7c478bd9Sstevel@tonic-gate } 738*7c478bd9Sstevel@tonic-gate 739*7c478bd9Sstevel@tonic-gate time_out = STRUCT_FGET(dvpoll, dp_timeout); 740*7c478bd9Sstevel@tonic-gate if (time_out > 0) { 741*7c478bd9Sstevel@tonic-gate /* 742*7c478bd9Sstevel@tonic-gate * Determine the future time of the requested timeout. 743*7c478bd9Sstevel@tonic-gate */ 744*7c478bd9Sstevel@tonic-gate rqtp = &rqtime; 745*7c478bd9Sstevel@tonic-gate rqtp->tv_sec = time_out / MILLISEC; 746*7c478bd9Sstevel@tonic-gate rqtp->tv_nsec = (time_out % MILLISEC) * MICROSEC; 747*7c478bd9Sstevel@tonic-gate timespecadd(rqtp, &now); 748*7c478bd9Sstevel@tonic-gate } 749*7c478bd9Sstevel@tonic-gate 750*7c478bd9Sstevel@tonic-gate if ((nfds = STRUCT_FGET(dvpoll, dp_nfds)) == 0) { 751*7c478bd9Sstevel@tonic-gate /* 752*7c478bd9Sstevel@tonic-gate * We are just using DP_POLL to sleep, so 753*7c478bd9Sstevel@tonic-gate * we don't any of the devpoll apparatus. 754*7c478bd9Sstevel@tonic-gate * Do not check for signals if we have a zero timeout. 755*7c478bd9Sstevel@tonic-gate */ 756*7c478bd9Sstevel@tonic-gate DP_REFRELE(dpep); 757*7c478bd9Sstevel@tonic-gate if (time_out == 0) 758*7c478bd9Sstevel@tonic-gate return (0); 759*7c478bd9Sstevel@tonic-gate mutex_enter(&curthread->t_delay_lock); 760*7c478bd9Sstevel@tonic-gate while ((rval = cv_waituntil_sig(&curthread->t_delay_cv, 761*7c478bd9Sstevel@tonic-gate &curthread->t_delay_lock, rqtp, timecheck)) > 0) 762*7c478bd9Sstevel@tonic-gate continue; 763*7c478bd9Sstevel@tonic-gate mutex_exit(&curthread->t_delay_lock); 764*7c478bd9Sstevel@tonic-gate return ((rval == 0)? EINTR : 0); 765*7c478bd9Sstevel@tonic-gate } 766*7c478bd9Sstevel@tonic-gate 767*7c478bd9Sstevel@tonic-gate /* 768*7c478bd9Sstevel@tonic-gate * XXX It'd be nice not to have to alloc each time. 769*7c478bd9Sstevel@tonic-gate * But it requires another per thread structure hook. 770*7c478bd9Sstevel@tonic-gate * Do it later if there is data suggest that. 771*7c478bd9Sstevel@tonic-gate */ 772*7c478bd9Sstevel@tonic-gate if ((ps = curthread->t_pollstate) == NULL) { 773*7c478bd9Sstevel@tonic-gate curthread->t_pollstate = pollstate_create(); 774*7c478bd9Sstevel@tonic-gate ps = curthread->t_pollstate; 775*7c478bd9Sstevel@tonic-gate } 776*7c478bd9Sstevel@tonic-gate if (ps->ps_dpbufsize < nfds) { 777*7c478bd9Sstevel@tonic-gate struct proc *p = ttoproc(curthread); 778*7c478bd9Sstevel@tonic-gate /* 779*7c478bd9Sstevel@tonic-gate * The maximum size should be no large than 780*7c478bd9Sstevel@tonic-gate * current maximum open file count. 781*7c478bd9Sstevel@tonic-gate */ 782*7c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 783*7c478bd9Sstevel@tonic-gate if (nfds >= p->p_fno_ctl) { 784*7c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 785*7c478bd9Sstevel@tonic-gate DP_REFRELE(dpep); 786*7c478bd9Sstevel@tonic-gate return (EINVAL); 787*7c478bd9Sstevel@tonic-gate } 788*7c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 789*7c478bd9Sstevel@tonic-gate kmem_free(ps->ps_dpbuf, sizeof (pollfd_t) * 790*7c478bd9Sstevel@tonic-gate ps->ps_dpbufsize); 791*7c478bd9Sstevel@tonic-gate ps->ps_dpbuf = kmem_zalloc(sizeof (pollfd_t) * 792*7c478bd9Sstevel@tonic-gate nfds, KM_SLEEP); 793*7c478bd9Sstevel@tonic-gate ps->ps_dpbufsize = nfds; 794*7c478bd9Sstevel@tonic-gate } 795*7c478bd9Sstevel@tonic-gate 796*7c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 797*7c478bd9Sstevel@tonic-gate for (;;) { 798*7c478bd9Sstevel@tonic-gate pcp->pc_flag = 0; 799*7c478bd9Sstevel@tonic-gate error = dp_pcache_poll(ps->ps_dpbuf, pcp, nfds, &fdcnt); 800*7c478bd9Sstevel@tonic-gate if (fdcnt > 0 || error != 0) 801*7c478bd9Sstevel@tonic-gate break; 802*7c478bd9Sstevel@tonic-gate 803*7c478bd9Sstevel@tonic-gate /* 804*7c478bd9Sstevel@tonic-gate * A pollwake has happened since we polled cache. 805*7c478bd9Sstevel@tonic-gate */ 806*7c478bd9Sstevel@tonic-gate if (pcp->pc_flag & T_POLLWAKE) 807*7c478bd9Sstevel@tonic-gate continue; 808*7c478bd9Sstevel@tonic-gate 809*7c478bd9Sstevel@tonic-gate /* 810*7c478bd9Sstevel@tonic-gate * Sleep until we are notified, signalled, or timed out. 811*7c478bd9Sstevel@tonic-gate * Do not check for signals if we have a zero timeout. 812*7c478bd9Sstevel@tonic-gate */ 813*7c478bd9Sstevel@tonic-gate if (time_out == 0) /* immediate timeout */ 814*7c478bd9Sstevel@tonic-gate break; 815*7c478bd9Sstevel@tonic-gate rval = cv_waituntil_sig(&pcp->pc_cv, &pcp->pc_lock, 816*7c478bd9Sstevel@tonic-gate rqtp, timecheck); 817*7c478bd9Sstevel@tonic-gate /* 818*7c478bd9Sstevel@tonic-gate * If we were awakened by a signal or timeout 819*7c478bd9Sstevel@tonic-gate * then break the loop, else poll again. 820*7c478bd9Sstevel@tonic-gate */ 821*7c478bd9Sstevel@tonic-gate if (rval <= 0) { 822*7c478bd9Sstevel@tonic-gate if (rval == 0) /* signal */ 823*7c478bd9Sstevel@tonic-gate error = EINTR; 824*7c478bd9Sstevel@tonic-gate break; 825*7c478bd9Sstevel@tonic-gate } 826*7c478bd9Sstevel@tonic-gate } 827*7c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 828*7c478bd9Sstevel@tonic-gate 829*7c478bd9Sstevel@tonic-gate if (error == 0 && fdcnt > 0) { 830*7c478bd9Sstevel@tonic-gate if (copyout(ps->ps_dpbuf, STRUCT_FGETP(dvpoll, 831*7c478bd9Sstevel@tonic-gate dp_fds), sizeof (pollfd_t) * fdcnt)) { 832*7c478bd9Sstevel@tonic-gate DP_REFRELE(dpep); 833*7c478bd9Sstevel@tonic-gate return (EFAULT); 834*7c478bd9Sstevel@tonic-gate } 835*7c478bd9Sstevel@tonic-gate *rvalp = fdcnt; 836*7c478bd9Sstevel@tonic-gate } 837*7c478bd9Sstevel@tonic-gate break; 838*7c478bd9Sstevel@tonic-gate } 839*7c478bd9Sstevel@tonic-gate 840*7c478bd9Sstevel@tonic-gate case DP_ISPOLLED: 841*7c478bd9Sstevel@tonic-gate { 842*7c478bd9Sstevel@tonic-gate pollfd_t pollfd; 843*7c478bd9Sstevel@tonic-gate polldat_t *pdp; 844*7c478bd9Sstevel@tonic-gate 845*7c478bd9Sstevel@tonic-gate STRUCT_INIT(dvpoll, mode); 846*7c478bd9Sstevel@tonic-gate error = copyin((caddr_t)arg, &pollfd, sizeof (pollfd_t)); 847*7c478bd9Sstevel@tonic-gate if (error) { 848*7c478bd9Sstevel@tonic-gate DP_REFRELE(dpep); 849*7c478bd9Sstevel@tonic-gate return (EFAULT); 850*7c478bd9Sstevel@tonic-gate } 851*7c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 852*7c478bd9Sstevel@tonic-gate if (pcp->pc_hash == NULL) { 853*7c478bd9Sstevel@tonic-gate /* 854*7c478bd9Sstevel@tonic-gate * No Need to search because no poll fd 855*7c478bd9Sstevel@tonic-gate * has been cached. 856*7c478bd9Sstevel@tonic-gate */ 857*7c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 858*7c478bd9Sstevel@tonic-gate DP_REFRELE(dpep); 859*7c478bd9Sstevel@tonic-gate return (0); 860*7c478bd9Sstevel@tonic-gate } 861*7c478bd9Sstevel@tonic-gate if (pollfd.fd < 0) { 862*7c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 863*7c478bd9Sstevel@tonic-gate break; 864*7c478bd9Sstevel@tonic-gate } 865*7c478bd9Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, pollfd.fd); 866*7c478bd9Sstevel@tonic-gate if ((pdp != NULL) && (pdp->pd_fd == pollfd.fd) && 867*7c478bd9Sstevel@tonic-gate (pdp->pd_fp != NULL)) { 868*7c478bd9Sstevel@tonic-gate pollfd.revents = pdp->pd_events; 869*7c478bd9Sstevel@tonic-gate if (copyout(&pollfd, (caddr_t)arg, sizeof (pollfd_t))) { 870*7c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 871*7c478bd9Sstevel@tonic-gate DP_REFRELE(dpep); 872*7c478bd9Sstevel@tonic-gate return (EFAULT); 873*7c478bd9Sstevel@tonic-gate } 874*7c478bd9Sstevel@tonic-gate *rvalp = 1; 875*7c478bd9Sstevel@tonic-gate } 876*7c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 877*7c478bd9Sstevel@tonic-gate break; 878*7c478bd9Sstevel@tonic-gate } 879*7c478bd9Sstevel@tonic-gate 880*7c478bd9Sstevel@tonic-gate default: 881*7c478bd9Sstevel@tonic-gate DP_REFRELE(dpep); 882*7c478bd9Sstevel@tonic-gate return (EINVAL); 883*7c478bd9Sstevel@tonic-gate } 884*7c478bd9Sstevel@tonic-gate DP_REFRELE(dpep); 885*7c478bd9Sstevel@tonic-gate return (error); 886*7c478bd9Sstevel@tonic-gate } 887*7c478bd9Sstevel@tonic-gate 888*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 889*7c478bd9Sstevel@tonic-gate static int 890*7c478bd9Sstevel@tonic-gate dppoll(dev_t dev, short events, int anyyet, short *reventsp, 891*7c478bd9Sstevel@tonic-gate struct pollhead **phpp) 892*7c478bd9Sstevel@tonic-gate { 893*7c478bd9Sstevel@tonic-gate /* 894*7c478bd9Sstevel@tonic-gate * Polling on a /dev/poll fd is not fully supported yet. 895*7c478bd9Sstevel@tonic-gate */ 896*7c478bd9Sstevel@tonic-gate *reventsp = POLLERR; 897*7c478bd9Sstevel@tonic-gate return (0); 898*7c478bd9Sstevel@tonic-gate } 899*7c478bd9Sstevel@tonic-gate 900*7c478bd9Sstevel@tonic-gate /* 901*7c478bd9Sstevel@tonic-gate * devpoll close should do enough clean up before the pollcache is deleted, 902*7c478bd9Sstevel@tonic-gate * i.e., it should ensure no one still references the pollcache later. 903*7c478bd9Sstevel@tonic-gate * There is no "permission" check in here. Any process having the last 904*7c478bd9Sstevel@tonic-gate * reference of this /dev/poll fd can close. 905*7c478bd9Sstevel@tonic-gate */ 906*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 907*7c478bd9Sstevel@tonic-gate static int 908*7c478bd9Sstevel@tonic-gate dpclose(dev_t dev, int flag, int otyp, cred_t *credp) 909*7c478bd9Sstevel@tonic-gate { 910*7c478bd9Sstevel@tonic-gate minor_t minor; 911*7c478bd9Sstevel@tonic-gate dp_entry_t *dpep; 912*7c478bd9Sstevel@tonic-gate pollcache_t *pcp; 913*7c478bd9Sstevel@tonic-gate int i; 914*7c478bd9Sstevel@tonic-gate polldat_t **hashtbl; 915*7c478bd9Sstevel@tonic-gate polldat_t *pdp; 916*7c478bd9Sstevel@tonic-gate 917*7c478bd9Sstevel@tonic-gate minor = getminor(dev); 918*7c478bd9Sstevel@tonic-gate 919*7c478bd9Sstevel@tonic-gate mutex_enter(&devpoll_lock); 920*7c478bd9Sstevel@tonic-gate dpep = devpolltbl[minor]; 921*7c478bd9Sstevel@tonic-gate ASSERT(dpep != NULL); 922*7c478bd9Sstevel@tonic-gate devpolltbl[minor] = NULL; 923*7c478bd9Sstevel@tonic-gate mutex_exit(&devpoll_lock); 924*7c478bd9Sstevel@tonic-gate pcp = dpep->dpe_pcache; 925*7c478bd9Sstevel@tonic-gate ASSERT(pcp != NULL); 926*7c478bd9Sstevel@tonic-gate /* 927*7c478bd9Sstevel@tonic-gate * At this point, no other lwp can access this pollcache via the 928*7c478bd9Sstevel@tonic-gate * /dev/poll fd. This pollcache is going away, so do the clean 929*7c478bd9Sstevel@tonic-gate * up without the pc_lock. 930*7c478bd9Sstevel@tonic-gate */ 931*7c478bd9Sstevel@tonic-gate hashtbl = pcp->pc_hash; 932*7c478bd9Sstevel@tonic-gate for (i = 0; i < pcp->pc_hashsize; i++) { 933*7c478bd9Sstevel@tonic-gate for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) { 934*7c478bd9Sstevel@tonic-gate if (pdp->pd_php != NULL) { 935*7c478bd9Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 936*7c478bd9Sstevel@tonic-gate pdp->pd_php = NULL; 937*7c478bd9Sstevel@tonic-gate pdp->pd_fp = NULL; 938*7c478bd9Sstevel@tonic-gate } 939*7c478bd9Sstevel@tonic-gate } 940*7c478bd9Sstevel@tonic-gate } 941*7c478bd9Sstevel@tonic-gate /* 942*7c478bd9Sstevel@tonic-gate * pollwakeup() may still interact with this pollcache. Wait until 943*7c478bd9Sstevel@tonic-gate * it is done. 944*7c478bd9Sstevel@tonic-gate */ 945*7c478bd9Sstevel@tonic-gate mutex_enter(&pcp->pc_no_exit); 946*7c478bd9Sstevel@tonic-gate ASSERT(pcp->pc_busy >= 0); 947*7c478bd9Sstevel@tonic-gate while (pcp->pc_busy > 0) 948*7c478bd9Sstevel@tonic-gate cv_wait(&pcp->pc_busy_cv, &pcp->pc_no_exit); 949*7c478bd9Sstevel@tonic-gate mutex_exit(&pcp->pc_no_exit); 950*7c478bd9Sstevel@tonic-gate pcache_destroy(pcp); 951*7c478bd9Sstevel@tonic-gate ASSERT(dpep->dpe_refcnt == 0); 952*7c478bd9Sstevel@tonic-gate kmem_free(dpep, sizeof (dp_entry_t)); 953*7c478bd9Sstevel@tonic-gate return (0); 954*7c478bd9Sstevel@tonic-gate } 955