1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 2011 by Delphix. All rights reserved. */ 27 28 #include <sys/types.h> 29 #include <sys/devops.h> 30 #include <sys/conf.h> 31 #include <sys/modctl.h> 32 #include <sys/sunddi.h> 33 #include <sys/stat.h> 34 #include <sys/poll_impl.h> 35 #include <sys/errno.h> 36 #include <sys/kmem.h> 37 #include <sys/mkdev.h> 38 #include <sys/debug.h> 39 #include <sys/file.h> 40 #include <sys/sysmacros.h> 41 #include <sys/systm.h> 42 #include <sys/bitmap.h> 43 #include <sys/devpoll.h> 44 #include <sys/rctl.h> 45 #include <sys/resource.h> 46 47 #define RESERVED 1 48 49 /* local data struct */ 50 static dp_entry_t **devpolltbl; /* dev poll entries */ 51 static size_t dptblsize; 52 53 static kmutex_t devpoll_lock; /* lock protecting dev tbl */ 54 int devpoll_init; /* is /dev/poll initialized already */ 55 56 /* device local functions */ 57 58 static int dpopen(dev_t *devp, int flag, int otyp, cred_t *credp); 59 static int dpwrite(dev_t dev, struct uio *uiop, cred_t *credp); 60 static int dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 61 int *rvalp); 62 static int dppoll(dev_t dev, short events, int anyyet, short *reventsp, 63 struct pollhead **phpp); 64 static int dpclose(dev_t dev, int flag, int otyp, cred_t *credp); 65 static dev_info_t *dpdevi; 66 67 68 static struct cb_ops dp_cb_ops = { 69 dpopen, /* open */ 70 dpclose, /* close */ 71 nodev, /* strategy */ 72 nodev, /* print */ 73 nodev, /* dump */ 74 nodev, /* read */ 75 dpwrite, /* write */ 76 dpioctl, /* ioctl */ 77 nodev, /* devmap */ 78 nodev, /* mmap */ 79 nodev, /* segmap */ 80 dppoll, /* poll */ 81 ddi_prop_op, /* prop_op */ 82 (struct streamtab *)0, /* streamtab */ 83 D_MP, /* flags */ 84 CB_REV, /* cb_ops revision */ 85 nodev, /* aread */ 86 nodev /* awrite */ 87 }; 88 89 static int dpattach(dev_info_t *, ddi_attach_cmd_t); 90 static int dpdetach(dev_info_t *, ddi_detach_cmd_t); 91 static int dpinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 92 93 static struct dev_ops dp_ops = { 94 DEVO_REV, /* devo_rev */ 95 0, /* refcnt */ 96 dpinfo, /* info */ 97 nulldev, /* identify */ 98 nulldev, /* probe */ 99 dpattach, /* attach */ 100 dpdetach, /* detach */ 101 nodev, /* reset */ 102 &dp_cb_ops, /* driver operations */ 103 (struct bus_ops *)NULL, /* bus operations */ 104 nulldev, /* power */ 105 ddi_quiesce_not_needed, /* quiesce */ 106 }; 107 108 109 static struct modldrv modldrv = { 110 &mod_driverops, /* type of module - a driver */ 111 "/dev/poll driver", 112 &dp_ops, 113 }; 114 115 static struct modlinkage modlinkage = { 116 MODREV_1, 117 (void *)&modldrv, 118 NULL 119 }; 120 121 /* 122 * Locking Design 123 * 124 * The /dev/poll driver shares most of its code with poll sys call whose 125 * code is in common/syscall/poll.c. In poll(2) design, the pollcache 126 * structure is per lwp. An implicit assumption is made there that some 127 * portion of pollcache will never be touched by other lwps. E.g., in 128 * poll(2) design, no lwp will ever need to grow bitmap of other lwp. 129 * This assumption is not true for /dev/poll; hence the need for extra 130 * locking. 131 * 132 * To allow more parallelism, each /dev/poll file descriptor (indexed by 133 * minor number) has its own lock. Since read (dpioctl) is a much more 134 * frequent operation than write, we want to allow multiple reads on same 135 * /dev/poll fd. However, we prevent writes from being starved by giving 136 * priority to write operation. Theoretically writes can starve reads as 137 * well. But in practical sense this is not important because (1) writes 138 * happens less often than reads, and (2) write operation defines the 139 * content of poll fd a cache set. If writes happens so often that they 140 * can starve reads, that means the cached set is very unstable. It may 141 * not make sense to read an unstable cache set anyway. Therefore, the 142 * writers starving readers case is not handled in this design. 143 */ 144 145 int 146 _init() 147 { 148 int error; 149 150 dptblsize = DEVPOLLSIZE; 151 devpolltbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP); 152 mutex_init(&devpoll_lock, NULL, MUTEX_DEFAULT, NULL); 153 devpoll_init = 1; 154 if ((error = mod_install(&modlinkage)) != 0) { 155 mutex_destroy(&devpoll_lock); 156 kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize); 157 devpoll_init = 0; 158 } 159 return (error); 160 } 161 162 int 163 _fini() 164 { 165 int error; 166 167 if ((error = mod_remove(&modlinkage)) != 0) { 168 return (error); 169 } 170 mutex_destroy(&devpoll_lock); 171 kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize); 172 return (0); 173 } 174 175 int 176 _info(struct modinfo *modinfop) 177 { 178 return (mod_info(&modlinkage, modinfop)); 179 } 180 181 /*ARGSUSED*/ 182 static int 183 dpattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 184 { 185 if (ddi_create_minor_node(devi, "poll", S_IFCHR, 0, DDI_PSEUDO, NULL) 186 == DDI_FAILURE) { 187 ddi_remove_minor_node(devi, NULL); 188 return (DDI_FAILURE); 189 } 190 dpdevi = devi; 191 return (DDI_SUCCESS); 192 } 193 194 static int 195 dpdetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 196 { 197 if (cmd != DDI_DETACH) 198 return (DDI_FAILURE); 199 200 ddi_remove_minor_node(devi, NULL); 201 return (DDI_SUCCESS); 202 } 203 204 /* ARGSUSED */ 205 static int 206 dpinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 207 { 208 int error; 209 210 switch (infocmd) { 211 case DDI_INFO_DEVT2DEVINFO: 212 *result = (void *)dpdevi; 213 error = DDI_SUCCESS; 214 break; 215 case DDI_INFO_DEVT2INSTANCE: 216 *result = (void *)0; 217 error = DDI_SUCCESS; 218 break; 219 default: 220 error = DDI_FAILURE; 221 } 222 return (error); 223 } 224 225 /* 226 * dp_pcache_poll has similar logic to pcache_poll() in poll.c. The major 227 * differences are: (1) /dev/poll requires scanning the bitmap starting at 228 * where it was stopped last time, instead of always starting from 0, 229 * (2) since user may not have cleaned up the cached fds when they are 230 * closed, some polldats in cache may refer to closed or reused fds. We 231 * need to check for those cases. 232 * 233 * NOTE: Upon closing an fd, automatic poll cache cleanup is done for 234 * poll(2) caches but NOT for /dev/poll caches. So expect some 235 * stale entries! 236 */ 237 static int 238 dp_pcache_poll(pollfd_t *pfdp, pollcache_t *pcp, nfds_t nfds, int *fdcntp) 239 { 240 int start, ostart, end; 241 int fdcnt, fd; 242 boolean_t done; 243 file_t *fp; 244 short revent; 245 boolean_t no_wrap; 246 pollhead_t *php; 247 polldat_t *pdp; 248 int error = 0; 249 250 ASSERT(MUTEX_HELD(&pcp->pc_lock)); 251 if (pcp->pc_bitmap == NULL) { 252 /* 253 * No Need to search because no poll fd 254 * has been cached. 255 */ 256 return (error); 257 } 258 retry: 259 start = ostart = pcp->pc_mapstart; 260 end = pcp->pc_mapend; 261 php = NULL; 262 263 if (start == 0) { 264 /* 265 * started from every begining, no need to wrap around. 266 */ 267 no_wrap = B_TRUE; 268 } else { 269 no_wrap = B_FALSE; 270 } 271 done = B_FALSE; 272 fdcnt = 0; 273 while ((fdcnt < nfds) && !done) { 274 php = NULL; 275 revent = 0; 276 /* 277 * Examine the bit map in a circular fashion 278 * to avoid starvation. Always resume from 279 * last stop. Scan till end of the map. Then 280 * wrap around. 281 */ 282 fd = bt_getlowbit(pcp->pc_bitmap, start, end); 283 ASSERT(fd <= end); 284 if (fd >= 0) { 285 if (fd == end) { 286 if (no_wrap) { 287 done = B_TRUE; 288 } else { 289 start = 0; 290 end = ostart - 1; 291 no_wrap = B_TRUE; 292 } 293 } else { 294 start = fd + 1; 295 } 296 pdp = pcache_lookup_fd(pcp, fd); 297 repoll: 298 ASSERT(pdp != NULL); 299 ASSERT(pdp->pd_fd == fd); 300 if (pdp->pd_fp == NULL) { 301 /* 302 * The fd is POLLREMOVed. This fd is 303 * logically no longer cached. So move 304 * on to the next one. 305 */ 306 continue; 307 } 308 if ((fp = getf(fd)) == NULL) { 309 /* 310 * The fd has been closed, but user has not 311 * done a POLLREMOVE on this fd yet. Instead 312 * of cleaning it here implicitly, we return 313 * POLLNVAL. This is consistent with poll(2) 314 * polling a closed fd. Hope this will remind 315 * user to do a POLLREMOVE. 316 */ 317 pfdp[fdcnt].fd = fd; 318 pfdp[fdcnt].revents = POLLNVAL; 319 fdcnt++; 320 continue; 321 } 322 if (fp != pdp->pd_fp) { 323 /* 324 * user is polling on a cached fd which was 325 * closed and then reused. Unfortunately 326 * there is no good way to inform user. 327 * If the file struct is also reused, we 328 * may not be able to detect the fd reuse 329 * at all. As long as this does not 330 * cause system failure and/or memory leak, 331 * we will play along. Man page states if 332 * user does not clean up closed fds, polling 333 * results will be indeterministic. 334 * 335 * XXX - perhaps log the detection of fd 336 * reuse? 337 */ 338 pdp->pd_fp = fp; 339 } 340 /* 341 * XXX - pollrelock() logic needs to know which 342 * which pollcache lock to grab. It'd be a 343 * cleaner solution if we could pass pcp as 344 * an arguement in VOP_POLL interface instead 345 * of implicitly passing it using thread_t 346 * struct. On the other hand, changing VOP_POLL 347 * interface will require all driver/file system 348 * poll routine to change. May want to revisit 349 * the tradeoff later. 350 */ 351 curthread->t_pollcache = pcp; 352 error = VOP_POLL(fp->f_vnode, pdp->pd_events, 0, 353 &revent, &php, NULL); 354 curthread->t_pollcache = NULL; 355 releasef(fd); 356 if (error != 0) { 357 break; 358 } 359 /* 360 * layered devices (e.g. console driver) 361 * may change the vnode and thus the pollhead 362 * pointer out from underneath us. 363 */ 364 if (php != NULL && pdp->pd_php != NULL && 365 php != pdp->pd_php) { 366 pollhead_delete(pdp->pd_php, pdp); 367 pdp->pd_php = php; 368 pollhead_insert(php, pdp); 369 /* 370 * The bit should still be set. 371 */ 372 ASSERT(BT_TEST(pcp->pc_bitmap, fd)); 373 goto retry; 374 } 375 376 if (revent != 0) { 377 pfdp[fdcnt].fd = fd; 378 pfdp[fdcnt].events = pdp->pd_events; 379 pfdp[fdcnt].revents = revent; 380 fdcnt++; 381 } else if (php != NULL) { 382 /* 383 * We clear a bit or cache a poll fd if 384 * the driver returns a poll head ptr, 385 * which is expected in the case of 0 386 * revents. Some buggy driver may return 387 * NULL php pointer with 0 revents. In 388 * this case, we just treat the driver as 389 * "noncachable" and not clearing the bit 390 * in bitmap. 391 */ 392 if ((pdp->pd_php != NULL) && 393 ((pcp->pc_flag & T_POLLWAKE) == 0)) { 394 BT_CLEAR(pcp->pc_bitmap, fd); 395 } 396 if (pdp->pd_php == NULL) { 397 pollhead_insert(php, pdp); 398 pdp->pd_php = php; 399 /* 400 * An event of interest may have 401 * arrived between the VOP_POLL() and 402 * the pollhead_insert(); check again. 403 */ 404 goto repoll; 405 } 406 } 407 } else { 408 /* 409 * No bit set in the range. Check for wrap around. 410 */ 411 if (!no_wrap) { 412 start = 0; 413 end = ostart - 1; 414 no_wrap = B_TRUE; 415 } else { 416 done = B_TRUE; 417 } 418 } 419 } 420 421 if (!done) { 422 pcp->pc_mapstart = start; 423 } 424 ASSERT(*fdcntp == 0); 425 *fdcntp = fdcnt; 426 return (error); 427 } 428 429 /*ARGSUSED*/ 430 static int 431 dpopen(dev_t *devp, int flag, int otyp, cred_t *credp) 432 { 433 minor_t minordev; 434 dp_entry_t *dpep; 435 pollcache_t *pcp; 436 437 ASSERT(devpoll_init); 438 ASSERT(dptblsize <= MAXMIN); 439 mutex_enter(&devpoll_lock); 440 for (minordev = 0; minordev < dptblsize; minordev++) { 441 if (devpolltbl[minordev] == NULL) { 442 devpolltbl[minordev] = (dp_entry_t *)RESERVED; 443 break; 444 } 445 } 446 if (minordev == dptblsize) { 447 dp_entry_t **newtbl; 448 size_t oldsize; 449 450 /* 451 * Used up every entry in the existing devpoll table. 452 * Grow the table by DEVPOLLSIZE. 453 */ 454 if ((oldsize = dptblsize) >= MAXMIN) { 455 mutex_exit(&devpoll_lock); 456 return (ENXIO); 457 } 458 dptblsize += DEVPOLLSIZE; 459 if (dptblsize > MAXMIN) { 460 dptblsize = MAXMIN; 461 } 462 newtbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP); 463 bcopy(devpolltbl, newtbl, sizeof (caddr_t) * oldsize); 464 kmem_free(devpolltbl, sizeof (caddr_t) * oldsize); 465 devpolltbl = newtbl; 466 devpolltbl[minordev] = (dp_entry_t *)RESERVED; 467 } 468 mutex_exit(&devpoll_lock); 469 470 dpep = kmem_zalloc(sizeof (dp_entry_t), KM_SLEEP); 471 /* 472 * allocate a pollcache skeleton here. Delay allocating bitmap 473 * structures until dpwrite() time, since we don't know the 474 * optimal size yet. 475 */ 476 pcp = pcache_alloc(); 477 dpep->dpe_pcache = pcp; 478 pcp->pc_pid = curproc->p_pid; 479 *devp = makedevice(getmajor(*devp), minordev); /* clone the driver */ 480 mutex_enter(&devpoll_lock); 481 ASSERT(minordev < dptblsize); 482 ASSERT(devpolltbl[minordev] == (dp_entry_t *)RESERVED); 483 devpolltbl[minordev] = dpep; 484 mutex_exit(&devpoll_lock); 485 return (0); 486 } 487 488 /* 489 * Write to dev/poll add/remove fd's to/from a cached poll fd set, 490 * or change poll events for a watched fd. 491 */ 492 /*ARGSUSED*/ 493 static int 494 dpwrite(dev_t dev, struct uio *uiop, cred_t *credp) 495 { 496 minor_t minor; 497 dp_entry_t *dpep; 498 pollcache_t *pcp; 499 pollfd_t *pollfdp, *pfdp; 500 int error; 501 ssize_t uiosize; 502 nfds_t pollfdnum; 503 struct pollhead *php = NULL; 504 polldat_t *pdp; 505 int fd; 506 file_t *fp; 507 508 minor = getminor(dev); 509 510 mutex_enter(&devpoll_lock); 511 ASSERT(minor < dptblsize); 512 dpep = devpolltbl[minor]; 513 ASSERT(dpep != NULL); 514 mutex_exit(&devpoll_lock); 515 pcp = dpep->dpe_pcache; 516 if (curproc->p_pid != pcp->pc_pid) { 517 return (EACCES); 518 } 519 uiosize = uiop->uio_resid; 520 pollfdnum = uiosize / sizeof (pollfd_t); 521 mutex_enter(&curproc->p_lock); 522 if (pollfdnum > (uint_t)rctl_enforced_value( 523 rctlproc_legacy[RLIMIT_NOFILE], curproc->p_rctls, curproc)) { 524 (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE], 525 curproc->p_rctls, curproc, RCA_SAFE); 526 mutex_exit(&curproc->p_lock); 527 return (set_errno(EINVAL)); 528 } 529 mutex_exit(&curproc->p_lock); 530 /* 531 * Copy in the pollfd array. Walk through the array and add 532 * each polled fd to the cached set. 533 */ 534 pollfdp = kmem_alloc(uiosize, KM_SLEEP); 535 536 /* 537 * Although /dev/poll uses the write(2) interface to cache fds, it's 538 * not supposed to function as a seekable device. To prevent offset 539 * from growing and eventually exceed the maximum, reset the offset 540 * here for every call. 541 */ 542 uiop->uio_loffset = 0; 543 if ((error = uiomove((caddr_t)pollfdp, uiosize, UIO_WRITE, uiop)) 544 != 0) { 545 kmem_free(pollfdp, uiosize); 546 return (error); 547 } 548 /* 549 * We are about to enter the core portion of dpwrite(). Make sure this 550 * write has exclusive access in this portion of the code, i.e., no 551 * other writers in this code and no other readers in dpioctl. 552 */ 553 mutex_enter(&dpep->dpe_lock); 554 dpep->dpe_writerwait++; 555 while (dpep->dpe_refcnt != 0) { 556 if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) { 557 dpep->dpe_writerwait--; 558 mutex_exit(&dpep->dpe_lock); 559 kmem_free(pollfdp, uiosize); 560 return (set_errno(EINTR)); 561 } 562 } 563 dpep->dpe_writerwait--; 564 dpep->dpe_flag |= DP_WRITER_PRESENT; 565 dpep->dpe_refcnt++; 566 mutex_exit(&dpep->dpe_lock); 567 568 mutex_enter(&pcp->pc_lock); 569 if (pcp->pc_bitmap == NULL) { 570 pcache_create(pcp, pollfdnum); 571 } 572 for (pfdp = pollfdp; pfdp < pollfdp + pollfdnum; pfdp++) { 573 fd = pfdp->fd; 574 if ((uint_t)fd >= P_FINFO(curproc)->fi_nfiles) 575 continue; 576 pdp = pcache_lookup_fd(pcp, fd); 577 if (pfdp->events != POLLREMOVE) { 578 if (pdp == NULL) { 579 pdp = pcache_alloc_fd(0); 580 pdp->pd_fd = fd; 581 pdp->pd_pcache = pcp; 582 pcache_insert_fd(pcp, pdp, pollfdnum); 583 } 584 ASSERT(pdp->pd_fd == fd); 585 ASSERT(pdp->pd_pcache == pcp); 586 if (fd >= pcp->pc_mapsize) { 587 mutex_exit(&pcp->pc_lock); 588 pcache_grow_map(pcp, fd); 589 mutex_enter(&pcp->pc_lock); 590 } 591 if (fd > pcp->pc_mapend) { 592 pcp->pc_mapend = fd; 593 } 594 if ((fp = getf(fd)) == NULL) { 595 /* 596 * The fd is not valid. Since we can't pass 597 * this error back in the write() call, set 598 * the bit in bitmap to force DP_POLL ioctl 599 * to examine it. 600 */ 601 BT_SET(pcp->pc_bitmap, fd); 602 pdp->pd_events |= pfdp->events; 603 continue; 604 } 605 /* 606 * Don't do VOP_POLL for an already cached fd with 607 * same poll events. 608 */ 609 if ((pdp->pd_events == pfdp->events) && 610 (pdp->pd_fp != NULL)) { 611 /* 612 * the events are already cached 613 */ 614 releasef(fd); 615 continue; 616 } 617 618 /* 619 * do VOP_POLL and cache this poll fd. 620 */ 621 /* 622 * XXX - pollrelock() logic needs to know which 623 * which pollcache lock to grab. It'd be a 624 * cleaner solution if we could pass pcp as 625 * an arguement in VOP_POLL interface instead 626 * of implicitly passing it using thread_t 627 * struct. On the other hand, changing VOP_POLL 628 * interface will require all driver/file system 629 * poll routine to change. May want to revisit 630 * the tradeoff later. 631 */ 632 curthread->t_pollcache = pcp; 633 error = VOP_POLL(fp->f_vnode, pfdp->events, 0, 634 &pfdp->revents, &php, NULL); 635 curthread->t_pollcache = NULL; 636 /* 637 * We always set the bit when this fd is cached; 638 * this forces the first DP_POLL to poll this fd. 639 * Real performance gain comes from subsequent 640 * DP_POLL. We also attempt a pollhead_insert(); 641 * if it's not possible, we'll do it in dpioctl(). 642 */ 643 BT_SET(pcp->pc_bitmap, fd); 644 if (error != 0) { 645 releasef(fd); 646 break; 647 } 648 pdp->pd_fp = fp; 649 pdp->pd_events |= pfdp->events; 650 if (php != NULL) { 651 if (pdp->pd_php == NULL) { 652 pollhead_insert(php, pdp); 653 pdp->pd_php = php; 654 } else { 655 if (pdp->pd_php != php) { 656 pollhead_delete(pdp->pd_php, 657 pdp); 658 pollhead_insert(php, pdp); 659 pdp->pd_php = php; 660 } 661 } 662 663 } 664 releasef(fd); 665 } else { 666 if (pdp == NULL) { 667 continue; 668 } 669 ASSERT(pdp->pd_fd == fd); 670 pdp->pd_fp = NULL; 671 pdp->pd_events = 0; 672 ASSERT(pdp->pd_thread == NULL); 673 if (pdp->pd_php != NULL) { 674 pollhead_delete(pdp->pd_php, pdp); 675 pdp->pd_php = NULL; 676 } 677 BT_CLEAR(pcp->pc_bitmap, fd); 678 } 679 } 680 mutex_exit(&pcp->pc_lock); 681 mutex_enter(&dpep->dpe_lock); 682 dpep->dpe_flag &= ~DP_WRITER_PRESENT; 683 ASSERT(dpep->dpe_refcnt == 1); 684 dpep->dpe_refcnt--; 685 cv_broadcast(&dpep->dpe_cv); 686 mutex_exit(&dpep->dpe_lock); 687 kmem_free(pollfdp, uiosize); 688 return (error); 689 } 690 691 /*ARGSUSED*/ 692 static int 693 dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) 694 { 695 minor_t minor; 696 dp_entry_t *dpep; 697 pollcache_t *pcp; 698 int error = 0; 699 STRUCT_DECL(dvpoll, dvpoll); 700 701 minor = getminor(dev); 702 mutex_enter(&devpoll_lock); 703 ASSERT(minor < dptblsize); 704 dpep = devpolltbl[minor]; 705 mutex_exit(&devpoll_lock); 706 ASSERT(dpep != NULL); 707 pcp = dpep->dpe_pcache; 708 if (curproc->p_pid != pcp->pc_pid) 709 return (EACCES); 710 711 mutex_enter(&dpep->dpe_lock); 712 while ((dpep->dpe_flag & DP_WRITER_PRESENT) || 713 (dpep->dpe_writerwait != 0)) { 714 if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) { 715 mutex_exit(&dpep->dpe_lock); 716 return (EINTR); 717 } 718 } 719 dpep->dpe_refcnt++; 720 mutex_exit(&dpep->dpe_lock); 721 722 switch (cmd) { 723 case DP_POLL: 724 { 725 pollstate_t *ps; 726 nfds_t nfds; 727 int fdcnt = 0; 728 int time_out; 729 clock_t *deltap = NULL; 730 clock_t delta; 731 732 STRUCT_INIT(dvpoll, mode); 733 error = copyin((caddr_t)arg, STRUCT_BUF(dvpoll), 734 STRUCT_SIZE(dvpoll)); 735 if (error) { 736 DP_REFRELE(dpep); 737 return (EFAULT); 738 } 739 740 time_out = STRUCT_FGET(dvpoll, dp_timeout); 741 if (time_out > 0) { 742 /* 743 * cv_relwaituntil_sig operates at the tick 744 * granularity, which by default is 10 ms. 745 * This results in rounding user specified 746 * timeouts up but prevents the system 747 * from being flooded with small high 748 * resolution timers. 749 */ 750 delta = MSEC_TO_TICK_ROUNDUP(time_out); 751 deltap = δ 752 } 753 754 if ((nfds = STRUCT_FGET(dvpoll, dp_nfds)) == 0) { 755 /* 756 * We are just using DP_POLL to sleep, so 757 * we don't any of the devpoll apparatus. 758 * Do not check for signals if we have a zero timeout. 759 */ 760 DP_REFRELE(dpep); 761 if (time_out == 0) 762 return (0); 763 mutex_enter(&curthread->t_delay_lock); 764 while ((delta = cv_relwaituntil_sig( 765 &curthread->t_delay_cv, &curthread->t_delay_lock, 766 deltap, TR_MILLISEC)) > 0) { 767 continue; 768 } 769 mutex_exit(&curthread->t_delay_lock); 770 return (delta == 0 ? EINTR : 0); 771 } 772 773 /* 774 * XXX It would be nice not to have to alloc each time, but it 775 * requires another per thread structure hook. This can be 776 * implemented later if data suggests that it's necessary. 777 */ 778 if ((ps = curthread->t_pollstate) == NULL) { 779 curthread->t_pollstate = pollstate_create(); 780 ps = curthread->t_pollstate; 781 } 782 if (ps->ps_dpbufsize < nfds) { 783 struct proc *p = ttoproc(curthread); 784 /* 785 * The maximum size should be no large than 786 * current maximum open file count. 787 */ 788 mutex_enter(&p->p_lock); 789 if (nfds > p->p_fno_ctl) { 790 mutex_exit(&p->p_lock); 791 DP_REFRELE(dpep); 792 return (EINVAL); 793 } 794 mutex_exit(&p->p_lock); 795 kmem_free(ps->ps_dpbuf, sizeof (pollfd_t) * 796 ps->ps_dpbufsize); 797 ps->ps_dpbuf = kmem_zalloc(sizeof (pollfd_t) * 798 nfds, KM_SLEEP); 799 ps->ps_dpbufsize = nfds; 800 } 801 802 mutex_enter(&pcp->pc_lock); 803 for (;;) { 804 pcp->pc_flag = 0; 805 error = dp_pcache_poll(ps->ps_dpbuf, pcp, nfds, &fdcnt); 806 if (fdcnt > 0 || error != 0) 807 break; 808 809 /* 810 * A pollwake has happened since we polled cache. 811 */ 812 if (pcp->pc_flag & T_POLLWAKE) 813 continue; 814 815 /* 816 * Sleep until we are notified, signaled, or timed out. 817 * Do not check for signals if we have a zero timeout. 818 */ 819 if (time_out == 0) /* immediate timeout */ 820 break; 821 822 delta = cv_relwaituntil_sig(&pcp->pc_cv, &pcp->pc_lock, 823 deltap, TR_MILLISEC); 824 /* 825 * If we were awakened by a signal or timeout 826 * then break the loop, else poll again. 827 */ 828 if (delta <= 0) { 829 if (delta == 0) /* signal */ 830 error = EINTR; 831 break; 832 } 833 } 834 mutex_exit(&pcp->pc_lock); 835 836 if (error == 0 && fdcnt > 0) { 837 if (copyout(ps->ps_dpbuf, STRUCT_FGETP(dvpoll, 838 dp_fds), sizeof (pollfd_t) * fdcnt)) { 839 DP_REFRELE(dpep); 840 return (EFAULT); 841 } 842 *rvalp = fdcnt; 843 } 844 break; 845 } 846 847 case DP_ISPOLLED: 848 { 849 pollfd_t pollfd; 850 polldat_t *pdp; 851 852 STRUCT_INIT(dvpoll, mode); 853 error = copyin((caddr_t)arg, &pollfd, sizeof (pollfd_t)); 854 if (error) { 855 DP_REFRELE(dpep); 856 return (EFAULT); 857 } 858 mutex_enter(&pcp->pc_lock); 859 if (pcp->pc_hash == NULL) { 860 /* 861 * No Need to search because no poll fd 862 * has been cached. 863 */ 864 mutex_exit(&pcp->pc_lock); 865 DP_REFRELE(dpep); 866 return (0); 867 } 868 if (pollfd.fd < 0) { 869 mutex_exit(&pcp->pc_lock); 870 break; 871 } 872 pdp = pcache_lookup_fd(pcp, pollfd.fd); 873 if ((pdp != NULL) && (pdp->pd_fd == pollfd.fd) && 874 (pdp->pd_fp != NULL)) { 875 pollfd.revents = pdp->pd_events; 876 if (copyout(&pollfd, (caddr_t)arg, sizeof (pollfd_t))) { 877 mutex_exit(&pcp->pc_lock); 878 DP_REFRELE(dpep); 879 return (EFAULT); 880 } 881 *rvalp = 1; 882 } 883 mutex_exit(&pcp->pc_lock); 884 break; 885 } 886 887 default: 888 DP_REFRELE(dpep); 889 return (EINVAL); 890 } 891 DP_REFRELE(dpep); 892 return (error); 893 } 894 895 /*ARGSUSED*/ 896 static int 897 dppoll(dev_t dev, short events, int anyyet, short *reventsp, 898 struct pollhead **phpp) 899 { 900 /* 901 * Polling on a /dev/poll fd is not fully supported yet. 902 */ 903 *reventsp = POLLERR; 904 return (0); 905 } 906 907 /* 908 * devpoll close should do enough clean up before the pollcache is deleted, 909 * i.e., it should ensure no one still references the pollcache later. 910 * There is no "permission" check in here. Any process having the last 911 * reference of this /dev/poll fd can close. 912 */ 913 /*ARGSUSED*/ 914 static int 915 dpclose(dev_t dev, int flag, int otyp, cred_t *credp) 916 { 917 minor_t minor; 918 dp_entry_t *dpep; 919 pollcache_t *pcp; 920 int i; 921 polldat_t **hashtbl; 922 polldat_t *pdp; 923 924 minor = getminor(dev); 925 926 mutex_enter(&devpoll_lock); 927 dpep = devpolltbl[minor]; 928 ASSERT(dpep != NULL); 929 devpolltbl[minor] = NULL; 930 mutex_exit(&devpoll_lock); 931 pcp = dpep->dpe_pcache; 932 ASSERT(pcp != NULL); 933 /* 934 * At this point, no other lwp can access this pollcache via the 935 * /dev/poll fd. This pollcache is going away, so do the clean 936 * up without the pc_lock. 937 */ 938 hashtbl = pcp->pc_hash; 939 for (i = 0; i < pcp->pc_hashsize; i++) { 940 for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) { 941 if (pdp->pd_php != NULL) { 942 pollhead_delete(pdp->pd_php, pdp); 943 pdp->pd_php = NULL; 944 pdp->pd_fp = NULL; 945 } 946 } 947 } 948 /* 949 * pollwakeup() may still interact with this pollcache. Wait until 950 * it is done. 951 */ 952 mutex_enter(&pcp->pc_no_exit); 953 ASSERT(pcp->pc_busy >= 0); 954 while (pcp->pc_busy > 0) 955 cv_wait(&pcp->pc_busy_cv, &pcp->pc_no_exit); 956 mutex_exit(&pcp->pc_no_exit); 957 pcache_destroy(pcp); 958 ASSERT(dpep->dpe_refcnt == 0); 959 kmem_free(dpep, sizeof (dp_entry_t)); 960 return (0); 961 } 962