xref: /titanic_41/usr/src/uts/common/io/devpoll.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/devops.h>
31 #include <sys/conf.h>
32 #include <sys/modctl.h>
33 #include <sys/sunddi.h>
34 #include <sys/stat.h>
35 #include <sys/poll_impl.h>
36 #include <sys/errno.h>
37 #include <sys/kmem.h>
38 #include <sys/mkdev.h>
39 #include <sys/debug.h>
40 #include <sys/file.h>
41 #include <sys/sysmacros.h>
42 #include <sys/systm.h>
43 #include <sys/bitmap.h>
44 #include <sys/devpoll.h>
45 #include <sys/rctl.h>
46 #include <sys/resource.h>
47 
48 #define	RESERVED	1
49 
50 /* local data struct */
51 static	dp_entry_t	**devpolltbl; 	/* dev poll entries */
52 static	size_t		dptblsize;
53 
54 static	kmutex_t	devpoll_lock;	/* lock protecting dev tbl */
55 int			devpoll_init;	/* is /dev/poll initialized already */
56 
57 /* device local functions */
58 
59 static int dpopen(dev_t *devp, int flag, int otyp, cred_t *credp);
60 static int dpwrite(dev_t dev, struct uio *uiop, cred_t *credp);
61 static int dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
62     int *rvalp);
63 static int dppoll(dev_t dev, short events, int anyyet, short *reventsp,
64     struct pollhead **phpp);
65 static int dpclose(dev_t dev, int flag, int otyp, cred_t *credp);
66 static dev_info_t *dpdevi;
67 
68 
69 static struct cb_ops    dp_cb_ops = {
70 	dpopen,			/* open */
71 	dpclose,		/* close */
72 	nodev,			/* strategy */
73 	nodev,			/* print */
74 	nodev,			/* dump */
75 	nodev,			/* read */
76 	dpwrite,		/* write */
77 	dpioctl,		/* ioctl */
78 	nodev,			/* devmap */
79 	nodev,			/* mmap */
80 	nodev,			/* segmap */
81 	dppoll,			/* poll */
82 	nodev,			/* prop_op */
83 	(struct streamtab *)0,	/* streamtab */
84 	D_NEW | D_MP		/* flags */
85 };
86 
87 static int dpattach(dev_info_t *, ddi_attach_cmd_t);
88 static int dpdetach(dev_info_t *, ddi_detach_cmd_t);
89 static int dpinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
90 
91 static struct dev_ops dp_ops = {
92 	DEVO_REV,		/* devo_rev */
93 	0,			/* refcnt */
94 	dpinfo,			/* info */
95 	nulldev,		/* identify */
96 	nulldev,		/* probe */
97 	dpattach,		/* attach */
98 	dpdetach,		/* detach */
99 	nodev,			/* reset */
100 	&dp_cb_ops,		/* driver operations */
101 	(struct bus_ops *)NULL, /* bus operations */
102 	nulldev			/* power */
103 };
104 
105 
106 static struct modldrv modldrv = {
107 	&mod_driverops,		/* type of module - a driver */
108 	"Dev Poll driver %I%",
109 	&dp_ops,
110 };
111 
112 static struct modlinkage modlinkage = {
113 	MODREV_1,
114 	(void *)&modldrv,
115 	NULL
116 };
117 
118 /*
119  * Locking Design
120  *
121  * The /dev/poll driver shares most of its code with poll sys call whose
122  * code is in common/syscall/poll.c. In poll(2) design, the pollcache
123  * structure is per lwp. An implicit assumption is made there that some
124  * portion of pollcache will never be touched by other lwps. E.g., in
125  * poll(2) design, no lwp will ever need to grow bitmap of other lwp.
126  * This assumption is not true for /dev/poll; hence the need for extra
127  * locking.
128  *
129  * To allow more paralellism, each /dev/poll file descriptor (indexed by
130  * minor number) has its own lock. Since read (dpioctl) is a much more
131  * frequent operation than write, we want to allow multiple reads on same
132  * /dev/poll fd. However, we prevent writes from being starved by giving
133  * priority to write operation. Theoretically writes can starve reads as
134  * well. But in pratical sense this is not important because (1) writes
135  * happens less often than reads, and (2) write operation defines the
136  * content of poll fd a cache set. If writes happens so often that they
137  * can starve reads, that means the cached set is very unstable. It may
138  * not make sense to read an unstable cache set anyway. Therefore, the
139  * writers starving readers case is not handled in this design.
140  */
141 
142 int
143 _init()
144 {
145 	int	error;
146 
147 	dptblsize = DEVPOLLSIZE;
148 	devpolltbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP);
149 	mutex_init(&devpoll_lock, NULL, MUTEX_DEFAULT, NULL);
150 	devpoll_init = 1;
151 	if ((error = mod_install(&modlinkage)) != 0) {
152 		mutex_destroy(&devpoll_lock);
153 		kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize);
154 		devpoll_init = 0;
155 	}
156 	return (error);
157 }
158 
159 int
160 _fini()
161 {
162 	int error;
163 
164 	if ((error = mod_remove(&modlinkage)) != 0) {
165 		return (error);
166 	}
167 	mutex_destroy(&devpoll_lock);
168 	kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize);
169 	return (0);
170 }
171 
172 int
173 _info(struct modinfo *modinfop)
174 {
175 	return (mod_info(&modlinkage, modinfop));
176 }
177 
178 /*ARGSUSED*/
179 static int
180 dpattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
181 {
182 	if (ddi_create_minor_node(devi, "poll", S_IFCHR, 0, DDI_PSEUDO, NULL)
183 	    == DDI_FAILURE) {
184 		ddi_remove_minor_node(devi, NULL);
185 		return (DDI_FAILURE);
186 	}
187 	dpdevi = devi;
188 	return (DDI_SUCCESS);
189 }
190 
191 static int
192 dpdetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
193 {
194 	if (cmd != DDI_DETACH)
195 		return (DDI_FAILURE);
196 
197 	ddi_remove_minor_node(devi, NULL);
198 	return (DDI_SUCCESS);
199 }
200 
201 /* ARGSUSED */
202 static int
203 dpinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
204 {
205 	int error;
206 
207 	switch (infocmd) {
208 	case DDI_INFO_DEVT2DEVINFO:
209 		*result = (void *)dpdevi;
210 		error = DDI_SUCCESS;
211 		break;
212 	case DDI_INFO_DEVT2INSTANCE:
213 		*result = (void *)0;
214 		error = DDI_SUCCESS;
215 		break;
216 	default:
217 		error = DDI_FAILURE;
218 	}
219 	return (error);
220 }
221 
222 /*
223  * dp_pcache_poll has similar logic to pcache_poll() in poll.c. The major
224  * differences are: (1) /dev/poll requires scanning the bitmap starting at
225  * where it was stopped last time, instead of always starting from 0,
226  * (2) since user may not have cleaned up the cached fds when they are
227  * closed, some polldats in cache may refer to closed or reused fds. We
228  * need to check for those cases.
229  *
230  * NOTE: Upon closing an fd, automatic poll cache cleanup is done for
231  *	 poll(2) caches but NOT for /dev/poll caches. So expect some
232  *	 stale entries!
233  */
234 static int
235 dp_pcache_poll(pollfd_t *pfdp, pollcache_t *pcp, nfds_t nfds, int *fdcntp)
236 {
237 	int		start, ostart, end;
238 	int		fdcnt, fd;
239 	boolean_t 	done;
240 	file_t		*fp;
241 	short		revent;
242 	boolean_t	no_wrap;
243 	pollhead_t	*php;
244 	polldat_t	*pdp;
245 	int		error = 0;
246 
247 	ASSERT(MUTEX_HELD(&pcp->pc_lock));
248 	if (pcp->pc_bitmap == NULL) {
249 		/*
250 		 * No Need to search because no poll fd
251 		 * has been cached.
252 		 */
253 		return (error);
254 	}
255 retry:
256 	start = ostart = pcp->pc_mapstart;
257 	end = pcp->pc_mapend;
258 	php = NULL;
259 
260 	if (start == 0) {
261 		/*
262 		 * started from every begining, no need to wrap around.
263 		 */
264 		no_wrap = B_TRUE;
265 	} else {
266 		no_wrap = B_FALSE;
267 	}
268 	done = B_FALSE;
269 	fdcnt = 0;
270 	while ((fdcnt < nfds) && !done) {
271 		php = NULL;
272 		revent = 0;
273 		/*
274 		 * Examine the bit map in a circular fashion
275 		 * to avoid starvation. Always resume from
276 		 * last stop. Scan till end of the map. Then
277 		 * wrap around.
278 		 */
279 		fd = bt_getlowbit(pcp->pc_bitmap, start, end);
280 		ASSERT(fd <= end);
281 		if (fd >= 0) {
282 			if (fd == end) {
283 				if (no_wrap) {
284 					done = B_TRUE;
285 				} else {
286 					start = 0;
287 					end = ostart - 1;
288 					no_wrap = B_TRUE;
289 				}
290 			} else {
291 				start = fd + 1;
292 			}
293 			pdp = pcache_lookup_fd(pcp, fd);
294 			ASSERT(pdp != NULL);
295 			ASSERT(pdp->pd_fd == fd);
296 			if (pdp->pd_fp == NULL) {
297 				/*
298 				 * The fd is POLLREMOVed. This fd is
299 				 * logically no longer cached. So move
300 				 * on to the next one.
301 				 */
302 				continue;
303 			}
304 			if ((fp = getf(fd)) == NULL) {
305 				/*
306 				 * The fd has been closed, but user has not
307 				 * done a POLLREMOVE on this fd yet. Instead
308 				 * of cleaning it here implicitly, we return
309 				 * POLLNVAL. This is consistent with poll(2)
310 				 * polling a closed fd. Hope this will remind
311 				 * user to do a POLLREMOVE.
312 				 */
313 				pfdp[fdcnt].fd = fd;
314 				pfdp[fdcnt].revents = POLLNVAL;
315 				fdcnt++;
316 				continue;
317 			}
318 			if (fp != pdp->pd_fp) {
319 				/*
320 				 * user is polling on a cached fd which was
321 				 * closed and then reused. Unfortunately
322 				 * there is no good way to inform user.
323 				 * If the file struct is also reused, we
324 				 * may not be able to detect the fd reuse
325 				 * at all.  As long as this does not
326 				 * cause system failure and/or memory leak,
327 				 * we will play along. Man page states if
328 				 * user does not clean up closed fds, polling
329 				 * results will be indeterministic.
330 				 *
331 				 * XXX - perhaps log the detection of fd
332 				 *	 reuse?
333 				 */
334 				pdp->pd_fp = fp;
335 			}
336 			/*
337 			 * XXX - pollrelock() logic needs to know which
338 			 * which pollcache lock to grab. It'd be a
339 			 * cleaner solution if we could pass pcp as
340 			 * an arguement in VOP_POLL interface instead
341 			 * of implicitly passing it using thread_t
342 			 * struct. On the other hand, changing VOP_POLL
343 			 * interface will require all driver/file system
344 			 * poll routine to change. May want to revisit
345 			 * the tradeoff later.
346 			 */
347 			curthread->t_pollcache = pcp;
348 			error = VOP_POLL(fp->f_vnode, pdp->pd_events, 0,
349 			    &revent, &php);
350 			curthread->t_pollcache = NULL;
351 			releasef(fd);
352 			if (error != 0) {
353 				break;
354 			}
355 			/*
356 			 * layered devices (e.g. console driver)
357 			 * may change the vnode and thus the pollhead
358 			 * pointer out from underneath us.
359 			 */
360 			if (php != NULL && pdp->pd_php != NULL &&
361 			    php != pdp->pd_php) {
362 				pollhead_delete(pdp->pd_php, pdp);
363 				pdp->pd_php = php;
364 				pollhead_insert(php, pdp);
365 				/*
366 				 * The bit should still be set.
367 				 */
368 				ASSERT(BT_TEST(pcp->pc_bitmap, fd));
369 				goto retry;
370 			}
371 
372 			if (revent != 0) {
373 				pfdp[fdcnt].fd = fd;
374 				pfdp[fdcnt].events = pdp->pd_events;
375 				pfdp[fdcnt].revents = revent;
376 				fdcnt++;
377 			} else if (php != NULL) {
378 				/*
379 				 * We clear a bit or cache a poll fd if
380 				 * the driver returns a poll head ptr,
381 				 * which is expected in the case of 0
382 				 * revents. Some buggy driver may return
383 				 * NULL php pointer with 0 revents. In
384 				 * this case, we just treat the driver as
385 				 * "noncachable" and not clearing the bit
386 				 * in bitmap.
387 				 */
388 				if ((pdp->pd_php != NULL) &&
389 				    ((pcp->pc_flag & T_POLLWAKE) == 0)) {
390 					BT_CLEAR(pcp->pc_bitmap, fd);
391 				}
392 				if (pdp->pd_php == NULL) {
393 					pollhead_insert(php, pdp);
394 					pdp->pd_php = php;
395 				}
396 			}
397 		} else {
398 			/*
399 			 * No bit set in the range. Check for wrap around.
400 			 */
401 			if (!no_wrap) {
402 				start = 0;
403 				end = ostart - 1;
404 				no_wrap = B_TRUE;
405 			} else {
406 				done = B_TRUE;
407 			}
408 		}
409 	}
410 
411 	if (!done) {
412 		pcp->pc_mapstart = start;
413 	}
414 	ASSERT(*fdcntp == 0);
415 	*fdcntp = fdcnt;
416 	return (error);
417 }
418 
419 /*ARGSUSED*/
420 static int
421 dpopen(dev_t *devp, int flag, int otyp, cred_t *credp)
422 {
423 	minor_t		minordev;
424 	dp_entry_t	*dpep;
425 	pollcache_t	*pcp;
426 
427 	ASSERT(devpoll_init);
428 	ASSERT(dptblsize <= MAXMIN);
429 	mutex_enter(&devpoll_lock);
430 	for (minordev = 0; minordev < dptblsize; minordev++) {
431 		if (devpolltbl[minordev] == NULL) {
432 			devpolltbl[minordev] = (dp_entry_t *)RESERVED;
433 			break;
434 		}
435 	}
436 	if (minordev == dptblsize) {
437 		dp_entry_t	**newtbl;
438 		size_t		oldsize;
439 
440 		/*
441 		 * Used up every entry in the existing devpoll table.
442 		 * Grow the table by DEVPOLLSIZE.
443 		 */
444 		if ((oldsize = dptblsize) >= MAXMIN) {
445 			mutex_exit(&devpoll_lock);
446 			return (ENXIO);
447 		}
448 		dptblsize += DEVPOLLSIZE;
449 		if (dptblsize > MAXMIN) {
450 			dptblsize = MAXMIN;
451 		}
452 		newtbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP);
453 		bcopy(devpolltbl, newtbl, sizeof (caddr_t) * oldsize);
454 		kmem_free(devpolltbl, sizeof (caddr_t) * oldsize);
455 		devpolltbl = newtbl;
456 		devpolltbl[minordev] = (dp_entry_t *)RESERVED;
457 	}
458 	mutex_exit(&devpoll_lock);
459 
460 	dpep = kmem_zalloc(sizeof (dp_entry_t), KM_SLEEP);
461 	/*
462 	 * allocate a pollcache skeleton here. Delay allocating bitmap
463 	 * structures until dpwrite() time, since we don't know the
464 	 * optimal size yet.
465 	 */
466 	pcp = pcache_alloc();
467 	dpep->dpe_pcache = pcp;
468 	pcp->pc_pid = curproc->p_pid;
469 	*devp = makedevice(getmajor(*devp), minordev);  /* clone the driver */
470 	mutex_enter(&devpoll_lock);
471 	ASSERT(minordev < dptblsize);
472 	ASSERT(devpolltbl[minordev] == (dp_entry_t *)RESERVED);
473 	devpolltbl[minordev] = dpep;
474 	mutex_exit(&devpoll_lock);
475 	return (0);
476 }
477 
478 /*
479  * Write to dev/poll add/remove fd's to/from a cached poll fd set,
480  * or change poll events for a watched fd.
481  */
482 /*ARGSUSED*/
483 static int
484 dpwrite(dev_t dev, struct uio *uiop, cred_t *credp)
485 {
486 	minor_t 	minor;
487 	dp_entry_t	*dpep;
488 	pollcache_t	*pcp;
489 	pollfd_t	*pollfdp, *pfdp;
490 	int		error;
491 	ssize_t		uiosize;
492 	nfds_t		pollfdnum;
493 	struct pollhead	*php = NULL;
494 	polldat_t	*pdp;
495 	int		fd;
496 	file_t		*fp;
497 
498 	minor = getminor(dev);
499 
500 	mutex_enter(&devpoll_lock);
501 	ASSERT(minor < dptblsize);
502 	dpep = devpolltbl[minor];
503 	ASSERT(dpep != NULL);
504 	mutex_exit(&devpoll_lock);
505 	pcp = dpep->dpe_pcache;
506 	if (curproc->p_pid != pcp->pc_pid) {
507 		return (EACCES);
508 	}
509 	uiosize = uiop->uio_resid;
510 	pollfdnum = uiosize / sizeof (pollfd_t);
511 	mutex_enter(&curproc->p_lock);
512 	if (pollfdnum > (uint_t)rctl_enforced_value(
513 	    rctlproc_legacy[RLIMIT_NOFILE], curproc->p_rctls, curproc)) {
514 		(void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
515 		    curproc->p_rctls, curproc, RCA_SAFE);
516 		mutex_exit(&curproc->p_lock);
517 		return (set_errno(EINVAL));
518 	}
519 	mutex_exit(&curproc->p_lock);
520 	/*
521 	 * Copy in the pollfd array.  Walk through the array and add
522 	 * each polled fd to the cached set.
523 	 */
524 	pollfdp = kmem_alloc(uiosize, KM_SLEEP);
525 
526 	/*
527 	 * Although /dev/poll uses the write(2) interface to cache fds, it's
528 	 * not supposed to function as a seekable device. To prevent offset
529 	 * from growing and eventually exceed the maximum, reset the offset
530 	 * here for every call.
531 	 */
532 	uiop->uio_loffset = 0;
533 	if ((error = uiomove((caddr_t)pollfdp, uiosize, UIO_WRITE, uiop))
534 	    != 0) {
535 		kmem_free(pollfdp, uiosize);
536 		return (error);
537 	}
538 	/*
539 	 * We are about to enter the core portion of dpwrite(). Make sure this
540 	 * write has exclusive access in this portion of the code, i.e., no
541 	 * other writers in this code and no other readers in dpioctl.
542 	 */
543 	mutex_enter(&dpep->dpe_lock);
544 	dpep->dpe_writerwait++;
545 	while (dpep->dpe_refcnt != 0) {
546 		if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) {
547 			dpep->dpe_writerwait--;
548 			mutex_exit(&dpep->dpe_lock);
549 			kmem_free(pollfdp, uiosize);
550 			return (set_errno(EINTR));
551 		}
552 	}
553 	dpep->dpe_writerwait--;
554 	dpep->dpe_flag |= DP_WRITER_PRESENT;
555 	dpep->dpe_refcnt++;
556 	mutex_exit(&dpep->dpe_lock);
557 
558 	mutex_enter(&pcp->pc_lock);
559 	if (pcp->pc_bitmap == NULL) {
560 		pcache_create(pcp, pollfdnum);
561 	}
562 	for (pfdp = pollfdp; pfdp < pollfdp + pollfdnum; pfdp++) {
563 		fd = pfdp->fd;
564 		if ((uint_t)fd >= P_FINFO(curproc)->fi_nfiles)
565 			continue;
566 		pdp = pcache_lookup_fd(pcp, fd);
567 		if (pfdp->events != POLLREMOVE) {
568 			if (pdp == NULL) {
569 				pdp = pcache_alloc_fd(0);
570 				pdp->pd_fd = fd;
571 				pdp->pd_pcache = pcp;
572 				pcache_insert_fd(pcp, pdp, pollfdnum);
573 			}
574 			ASSERT(pdp->pd_fd == fd);
575 			ASSERT(pdp->pd_pcache == pcp);
576 			if (fd >= pcp->pc_mapsize) {
577 				mutex_exit(&pcp->pc_lock);
578 				pcache_grow_map(pcp, fd);
579 				mutex_enter(&pcp->pc_lock);
580 			}
581 			if (fd > pcp->pc_mapend) {
582 				pcp->pc_mapend = fd;
583 			}
584 			if ((fp = getf(fd)) == NULL) {
585 				/*
586 				 * The fd is not valid. Since we can't pass
587 				 * this error back in the write() call, set
588 				 * the bit in bitmap to force DP_POLL ioctl
589 				 * to examine it.
590 				 */
591 				BT_SET(pcp->pc_bitmap, fd);
592 				pdp->pd_events |= pfdp->events;
593 				continue;
594 			}
595 			/*
596 			 * Don't do VOP_POLL for an already cached fd with
597 			 * same poll events.
598 			 */
599 			if ((pdp->pd_events == pfdp->events) &&
600 			    (pdp->pd_fp != NULL)) {
601 				/*
602 				 * the events are already cached
603 				 */
604 				releasef(fd);
605 				continue;
606 			}
607 
608 			/*
609 			 * do VOP_POLL and cache this poll fd.
610 			 */
611 			/*
612 			 * XXX - pollrelock() logic needs to know which
613 			 * which pollcache lock to grab. It'd be a
614 			 * cleaner solution if we could pass pcp as
615 			 * an arguement in VOP_POLL interface instead
616 			 * of implicitly passing it using thread_t
617 			 * struct. On the other hand, changing VOP_POLL
618 			 * interface will require all driver/file system
619 			 * poll routine to change. May want to revisit
620 			 * the tradeoff later.
621 			 */
622 			curthread->t_pollcache = pcp;
623 			error = VOP_POLL(fp->f_vnode, pfdp->events, 0,
624 			    &pfdp->revents, &php);
625 			curthread->t_pollcache = NULL;
626 			/*
627 			 * We always set the bit when this fd is cached.
628 			 * So we don't have to worry about missing a
629 			 * pollwakeup between VOP_POLL and pollhead_insert.
630 			 * This forces the first DP_POLL to poll this fd.
631 			 * Real performance gain comes from subsequent
632 			 * DP_POLL.
633 			 */
634 			BT_SET(pcp->pc_bitmap, fd);
635 			if (error != 0) {
636 				releasef(fd);
637 				break;
638 			}
639 			pdp->pd_fp = fp;
640 			pdp->pd_events |= pfdp->events;
641 			if (php != NULL) {
642 				if (pdp->pd_php == NULL) {
643 					pollhead_insert(php, pdp);
644 					pdp->pd_php = php;
645 				} else {
646 					if (pdp->pd_php != php) {
647 						pollhead_delete(pdp->pd_php,
648 						    pdp);
649 						pollhead_insert(php, pdp);
650 						pdp->pd_php = php;
651 					}
652 				}
653 
654 			}
655 			releasef(fd);
656 		} else {
657 			if (pdp == NULL) {
658 				continue;
659 			}
660 			ASSERT(pdp->pd_fd == fd);
661 			pdp->pd_fp = NULL;
662 			pdp->pd_events = 0;
663 			ASSERT(pdp->pd_thread == NULL);
664 			if (pdp->pd_php != NULL) {
665 				pollhead_delete(pdp->pd_php, pdp);
666 				pdp->pd_php = NULL;
667 			}
668 			BT_CLEAR(pcp->pc_bitmap, fd);
669 		}
670 	}
671 	mutex_exit(&pcp->pc_lock);
672 	mutex_enter(&dpep->dpe_lock);
673 	dpep->dpe_flag &= ~DP_WRITER_PRESENT;
674 	ASSERT(dpep->dpe_refcnt == 1);
675 	dpep->dpe_refcnt--;
676 	cv_broadcast(&dpep->dpe_cv);
677 	mutex_exit(&dpep->dpe_lock);
678 	kmem_free(pollfdp, uiosize);
679 	return (error);
680 }
681 
682 /*ARGSUSED*/
683 static int
684 dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
685 {
686 	timestruc_t	now;
687 	timestruc_t	rqtime;
688 	timestruc_t	*rqtp = NULL;
689 	int		timecheck = 0;
690 	minor_t 	minor;
691 	dp_entry_t	*dpep;
692 	pollcache_t	*pcp;
693 	int 		error = 0;
694 	STRUCT_DECL(dvpoll, dvpoll);
695 
696 	if (cmd == DP_POLL) {
697 		/* do this now, before we sleep on DP_WRITER_PRESENT below */
698 		timecheck = timechanged;
699 		gethrestime(&now);
700 	}
701 	minor = getminor(dev);
702 	mutex_enter(&devpoll_lock);
703 	ASSERT(minor < dptblsize);
704 	dpep = devpolltbl[minor];
705 	mutex_exit(&devpoll_lock);
706 	ASSERT(dpep != NULL);
707 	pcp = dpep->dpe_pcache;
708 	if (curproc->p_pid != pcp->pc_pid)
709 		return (EACCES);
710 
711 	mutex_enter(&dpep->dpe_lock);
712 	while ((dpep->dpe_flag & DP_WRITER_PRESENT) ||
713 	    (dpep->dpe_writerwait != 0)) {
714 		if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) {
715 			mutex_exit(&dpep->dpe_lock);
716 			return (EINTR);
717 		}
718 	}
719 	dpep->dpe_refcnt++;
720 	mutex_exit(&dpep->dpe_lock);
721 
722 	switch (cmd) {
723 	case	DP_POLL:
724 	{
725 		pollstate_t *ps;
726 		nfds_t	nfds;
727 		int	fdcnt = 0;
728 		int	time_out;
729 		int	rval;
730 
731 		STRUCT_INIT(dvpoll, mode);
732 		error = copyin((caddr_t)arg, STRUCT_BUF(dvpoll),
733 		    STRUCT_SIZE(dvpoll));
734 		if (error) {
735 			DP_REFRELE(dpep);
736 			return (EFAULT);
737 		}
738 
739 		time_out = STRUCT_FGET(dvpoll, dp_timeout);
740 		if (time_out > 0) {
741 			/*
742 			 * Determine the future time of the requested timeout.
743 			 */
744 			rqtp = &rqtime;
745 			rqtp->tv_sec = time_out / MILLISEC;
746 			rqtp->tv_nsec = (time_out % MILLISEC) * MICROSEC;
747 			timespecadd(rqtp, &now);
748 		}
749 
750 		if ((nfds = STRUCT_FGET(dvpoll, dp_nfds)) == 0) {
751 			/*
752 			 * We are just using DP_POLL to sleep, so
753 			 * we don't any of the devpoll apparatus.
754 			 * Do not check for signals if we have a zero timeout.
755 			 */
756 			DP_REFRELE(dpep);
757 			if (time_out == 0)
758 				return (0);
759 			mutex_enter(&curthread->t_delay_lock);
760 			while ((rval = cv_waituntil_sig(&curthread->t_delay_cv,
761 			    &curthread->t_delay_lock, rqtp, timecheck)) > 0)
762 				continue;
763 			mutex_exit(&curthread->t_delay_lock);
764 			return ((rval == 0)? EINTR : 0);
765 		}
766 
767 		/*
768 		 * XXX It'd be nice not to have to alloc each time.
769 		 * But it requires another per thread structure hook.
770 		 * Do it later if there is data suggest that.
771 		 */
772 		if ((ps = curthread->t_pollstate) == NULL) {
773 			curthread->t_pollstate = pollstate_create();
774 			ps = curthread->t_pollstate;
775 		}
776 		if (ps->ps_dpbufsize < nfds) {
777 			struct proc *p = ttoproc(curthread);
778 			/*
779 			 * The maximum size should be no large than
780 			 * current maximum open file count.
781 			 */
782 			mutex_enter(&p->p_lock);
783 			if (nfds >= p->p_fno_ctl) {
784 				mutex_exit(&p->p_lock);
785 				DP_REFRELE(dpep);
786 				return (EINVAL);
787 			}
788 			mutex_exit(&p->p_lock);
789 			kmem_free(ps->ps_dpbuf, sizeof (pollfd_t) *
790 			    ps->ps_dpbufsize);
791 			ps->ps_dpbuf = kmem_zalloc(sizeof (pollfd_t) *
792 			    nfds, KM_SLEEP);
793 			ps->ps_dpbufsize = nfds;
794 		}
795 
796 		mutex_enter(&pcp->pc_lock);
797 		for (;;) {
798 			pcp->pc_flag = 0;
799 			error = dp_pcache_poll(ps->ps_dpbuf, pcp, nfds, &fdcnt);
800 			if (fdcnt > 0 || error != 0)
801 				break;
802 
803 			/*
804 			 * A pollwake has happened since we polled cache.
805 			 */
806 			if (pcp->pc_flag & T_POLLWAKE)
807 				continue;
808 
809 			/*
810 			 * Sleep until we are notified, signalled, or timed out.
811 			 * Do not check for signals if we have a zero timeout.
812 			 */
813 			if (time_out == 0)	/* immediate timeout */
814 				break;
815 			rval = cv_waituntil_sig(&pcp->pc_cv, &pcp->pc_lock,
816 				rqtp, timecheck);
817 			/*
818 			 * If we were awakened by a signal or timeout
819 			 * then break the loop, else poll again.
820 			 */
821 			if (rval <= 0) {
822 				if (rval == 0)	/* signal */
823 					error = EINTR;
824 				break;
825 			}
826 		}
827 		mutex_exit(&pcp->pc_lock);
828 
829 		if (error == 0 && fdcnt > 0) {
830 			if (copyout(ps->ps_dpbuf, STRUCT_FGETP(dvpoll,
831 			    dp_fds), sizeof (pollfd_t) * fdcnt)) {
832 				DP_REFRELE(dpep);
833 				return (EFAULT);
834 			}
835 			*rvalp = fdcnt;
836 		}
837 		break;
838 	}
839 
840 	case	DP_ISPOLLED:
841 	{
842 		pollfd_t	pollfd;
843 		polldat_t	*pdp;
844 
845 		STRUCT_INIT(dvpoll, mode);
846 		error = copyin((caddr_t)arg, &pollfd, sizeof (pollfd_t));
847 		if (error) {
848 			DP_REFRELE(dpep);
849 			return (EFAULT);
850 		}
851 		mutex_enter(&pcp->pc_lock);
852 		if (pcp->pc_hash == NULL) {
853 			/*
854 			 * No Need to search because no poll fd
855 			 * has been cached.
856 			 */
857 			mutex_exit(&pcp->pc_lock);
858 			DP_REFRELE(dpep);
859 			return (0);
860 		}
861 		if (pollfd.fd < 0) {
862 			mutex_exit(&pcp->pc_lock);
863 			break;
864 		}
865 		pdp = pcache_lookup_fd(pcp, pollfd.fd);
866 		if ((pdp != NULL) && (pdp->pd_fd == pollfd.fd) &&
867 		    (pdp->pd_fp != NULL)) {
868 			pollfd.revents = pdp->pd_events;
869 			if (copyout(&pollfd, (caddr_t)arg, sizeof (pollfd_t))) {
870 				mutex_exit(&pcp->pc_lock);
871 				DP_REFRELE(dpep);
872 				return (EFAULT);
873 			}
874 			*rvalp = 1;
875 		}
876 		mutex_exit(&pcp->pc_lock);
877 		break;
878 	}
879 
880 	default:
881 		DP_REFRELE(dpep);
882 		return (EINVAL);
883 	}
884 	DP_REFRELE(dpep);
885 	return (error);
886 }
887 
888 /*ARGSUSED*/
889 static int
890 dppoll(dev_t dev, short events, int anyyet, short *reventsp,
891     struct pollhead **phpp)
892 {
893 	/*
894 	 * Polling on a /dev/poll fd is not fully supported yet.
895 	 */
896 	*reventsp = POLLERR;
897 	return (0);
898 }
899 
900 /*
901  * devpoll close should do enough clean up before the pollcache is deleted,
902  * i.e., it should ensure no one still references the pollcache later.
903  * There is no "permission" check in here. Any process having the last
904  * reference of this /dev/poll fd can close.
905  */
906 /*ARGSUSED*/
907 static int
908 dpclose(dev_t dev, int flag, int otyp, cred_t *credp)
909 {
910 	minor_t 	minor;
911 	dp_entry_t	*dpep;
912 	pollcache_t	*pcp;
913 	int		i;
914 	polldat_t	**hashtbl;
915 	polldat_t	*pdp;
916 
917 	minor = getminor(dev);
918 
919 	mutex_enter(&devpoll_lock);
920 	dpep = devpolltbl[minor];
921 	ASSERT(dpep != NULL);
922 	devpolltbl[minor] = NULL;
923 	mutex_exit(&devpoll_lock);
924 	pcp = dpep->dpe_pcache;
925 	ASSERT(pcp != NULL);
926 	/*
927 	 * At this point, no other lwp can access this pollcache via the
928 	 * /dev/poll fd. This pollcache is going away, so do the clean
929 	 * up without the pc_lock.
930 	 */
931 	hashtbl = pcp->pc_hash;
932 	for (i = 0; i < pcp->pc_hashsize; i++) {
933 		for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) {
934 			if (pdp->pd_php != NULL) {
935 				pollhead_delete(pdp->pd_php, pdp);
936 				pdp->pd_php = NULL;
937 				pdp->pd_fp = NULL;
938 			}
939 		}
940 	}
941 	/*
942 	 * pollwakeup() may still interact with this pollcache. Wait until
943 	 * it is done.
944 	 */
945 	mutex_enter(&pcp->pc_no_exit);
946 	ASSERT(pcp->pc_busy >= 0);
947 	while (pcp->pc_busy > 0)
948 		cv_wait(&pcp->pc_busy_cv, &pcp->pc_no_exit);
949 	mutex_exit(&pcp->pc_no_exit);
950 	pcache_destroy(pcp);
951 	ASSERT(dpep->dpe_refcnt == 0);
952 	kmem_free(dpep, sizeof (dp_entry_t));
953 	return (0);
954 }
955