1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 2012 by Delphix. All rights reserved. 28 * Copyright 2016 Joyent, Inc. 29 */ 30 31 #include <sys/types.h> 32 #include <sys/devops.h> 33 #include <sys/conf.h> 34 #include <sys/modctl.h> 35 #include <sys/sunddi.h> 36 #include <sys/stat.h> 37 #include <sys/poll_impl.h> 38 #include <sys/errno.h> 39 #include <sys/kmem.h> 40 #include <sys/mkdev.h> 41 #include <sys/debug.h> 42 #include <sys/file.h> 43 #include <sys/sysmacros.h> 44 #include <sys/systm.h> 45 #include <sys/bitmap.h> 46 #include <sys/devpoll.h> 47 #include <sys/rctl.h> 48 #include <sys/resource.h> 49 #include <sys/schedctl.h> 50 #include <sys/epoll.h> 51 52 #define RESERVED 1 53 54 /* local data struct */ 55 static dp_entry_t **devpolltbl; /* dev poll entries */ 56 static size_t dptblsize; 57 58 static kmutex_t devpoll_lock; /* lock protecting dev tbl */ 59 int devpoll_init; /* is /dev/poll initialized already */ 60 61 /* device local functions */ 62 63 static int dpopen(dev_t *devp, int flag, int otyp, cred_t *credp); 64 static int dpwrite(dev_t dev, struct uio *uiop, cred_t *credp); 65 static int dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 66 int *rvalp); 67 static int dppoll(dev_t dev, short events, int anyyet, short *reventsp, 68 struct pollhead **phpp); 69 static int dpclose(dev_t dev, int flag, int otyp, cred_t *credp); 70 static dev_info_t *dpdevi; 71 72 73 static struct cb_ops dp_cb_ops = { 74 dpopen, /* open */ 75 dpclose, /* close */ 76 nodev, /* strategy */ 77 nodev, /* print */ 78 nodev, /* dump */ 79 nodev, /* read */ 80 dpwrite, /* write */ 81 dpioctl, /* ioctl */ 82 nodev, /* devmap */ 83 nodev, /* mmap */ 84 nodev, /* segmap */ 85 dppoll, /* poll */ 86 ddi_prop_op, /* prop_op */ 87 (struct streamtab *)0, /* streamtab */ 88 D_MP, /* flags */ 89 CB_REV, /* cb_ops revision */ 90 nodev, /* aread */ 91 nodev /* awrite */ 92 }; 93 94 static int dpattach(dev_info_t *, ddi_attach_cmd_t); 95 static int dpdetach(dev_info_t *, ddi_detach_cmd_t); 96 static int dpinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 97 98 static struct dev_ops dp_ops = { 99 DEVO_REV, /* devo_rev */ 100 0, /* refcnt */ 101 dpinfo, /* info */ 102 nulldev, /* identify */ 103 nulldev, /* probe */ 104 dpattach, /* attach */ 105 dpdetach, /* detach */ 106 nodev, /* reset */ 107 &dp_cb_ops, /* driver operations */ 108 (struct bus_ops *)NULL, /* bus operations */ 109 nulldev, /* power */ 110 ddi_quiesce_not_needed, /* quiesce */ 111 }; 112 113 114 static struct modldrv modldrv = { 115 &mod_driverops, /* type of module - a driver */ 116 "/dev/poll driver", 117 &dp_ops, 118 }; 119 120 static struct modlinkage modlinkage = { 121 MODREV_1, 122 (void *)&modldrv, 123 NULL 124 }; 125 126 static void pcachelink_assoc(pollcache_t *, pollcache_t *); 127 static void pcachelink_mark_stale(pollcache_t *); 128 static void pcachelink_purge_stale(pollcache_t *); 129 static void pcachelink_purge_all(pollcache_t *); 130 131 132 /* 133 * Locking Design 134 * 135 * The /dev/poll driver shares most of its code with poll sys call whose 136 * code is in common/syscall/poll.c. In poll(2) design, the pollcache 137 * structure is per lwp. An implicit assumption is made there that some 138 * portion of pollcache will never be touched by other lwps. E.g., in 139 * poll(2) design, no lwp will ever need to grow bitmap of other lwp. 140 * This assumption is not true for /dev/poll; hence the need for extra 141 * locking. 142 * 143 * To allow more parallelism, each /dev/poll file descriptor (indexed by 144 * minor number) has its own lock. Since read (dpioctl) is a much more 145 * frequent operation than write, we want to allow multiple reads on same 146 * /dev/poll fd. However, we prevent writes from being starved by giving 147 * priority to write operation. Theoretically writes can starve reads as 148 * well. But in practical sense this is not important because (1) writes 149 * happens less often than reads, and (2) write operation defines the 150 * content of poll fd a cache set. If writes happens so often that they 151 * can starve reads, that means the cached set is very unstable. It may 152 * not make sense to read an unstable cache set anyway. Therefore, the 153 * writers starving readers case is not handled in this design. 154 */ 155 156 int 157 _init() 158 { 159 int error; 160 161 dptblsize = DEVPOLLSIZE; 162 devpolltbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP); 163 mutex_init(&devpoll_lock, NULL, MUTEX_DEFAULT, NULL); 164 devpoll_init = 1; 165 if ((error = mod_install(&modlinkage)) != 0) { 166 kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize); 167 devpoll_init = 0; 168 } 169 return (error); 170 } 171 172 int 173 _fini() 174 { 175 int error; 176 177 if ((error = mod_remove(&modlinkage)) != 0) { 178 return (error); 179 } 180 mutex_destroy(&devpoll_lock); 181 kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize); 182 return (0); 183 } 184 185 int 186 _info(struct modinfo *modinfop) 187 { 188 return (mod_info(&modlinkage, modinfop)); 189 } 190 191 /*ARGSUSED*/ 192 static int 193 dpattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 194 { 195 if (ddi_create_minor_node(devi, "poll", S_IFCHR, 0, DDI_PSEUDO, NULL) 196 == DDI_FAILURE) { 197 ddi_remove_minor_node(devi, NULL); 198 return (DDI_FAILURE); 199 } 200 dpdevi = devi; 201 return (DDI_SUCCESS); 202 } 203 204 static int 205 dpdetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 206 { 207 if (cmd != DDI_DETACH) 208 return (DDI_FAILURE); 209 210 ddi_remove_minor_node(devi, NULL); 211 return (DDI_SUCCESS); 212 } 213 214 /* ARGSUSED */ 215 static int 216 dpinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 217 { 218 int error; 219 220 switch (infocmd) { 221 case DDI_INFO_DEVT2DEVINFO: 222 *result = (void *)dpdevi; 223 error = DDI_SUCCESS; 224 break; 225 case DDI_INFO_DEVT2INSTANCE: 226 *result = (void *)0; 227 error = DDI_SUCCESS; 228 break; 229 default: 230 error = DDI_FAILURE; 231 } 232 return (error); 233 } 234 235 /* 236 * dp_pcache_poll has similar logic to pcache_poll() in poll.c. The major 237 * differences are: (1) /dev/poll requires scanning the bitmap starting at 238 * where it was stopped last time, instead of always starting from 0, 239 * (2) since user may not have cleaned up the cached fds when they are 240 * closed, some polldats in cache may refer to closed or reused fds. We 241 * need to check for those cases. 242 * 243 * NOTE: Upon closing an fd, automatic poll cache cleanup is done for 244 * poll(2) caches but NOT for /dev/poll caches. So expect some 245 * stale entries! 246 */ 247 static int 248 dp_pcache_poll(dp_entry_t *dpep, void *dpbuf, 249 pollcache_t *pcp, nfds_t nfds, int *fdcntp) 250 { 251 int start, ostart, end; 252 int fdcnt, fd; 253 boolean_t done; 254 file_t *fp; 255 short revent; 256 boolean_t no_wrap; 257 pollhead_t *php; 258 polldat_t *pdp; 259 pollfd_t *pfdp; 260 epoll_event_t *epoll; 261 int error = 0; 262 short mask = POLLRDHUP | POLLWRBAND; 263 boolean_t is_epoll = (dpep->dpe_flag & DP_ISEPOLLCOMPAT) != 0; 264 265 ASSERT(MUTEX_HELD(&pcp->pc_lock)); 266 if (pcp->pc_bitmap == NULL) { 267 /* 268 * No Need to search because no poll fd 269 * has been cached. 270 */ 271 return (error); 272 } 273 274 if (is_epoll) { 275 pfdp = NULL; 276 epoll = (epoll_event_t *)dpbuf; 277 } else { 278 pfdp = (pollfd_t *)dpbuf; 279 epoll = NULL; 280 } 281 retry: 282 start = ostart = pcp->pc_mapstart; 283 end = pcp->pc_mapend; 284 php = NULL; 285 286 if (start == 0) { 287 /* 288 * started from every begining, no need to wrap around. 289 */ 290 no_wrap = B_TRUE; 291 } else { 292 no_wrap = B_FALSE; 293 } 294 done = B_FALSE; 295 fdcnt = 0; 296 while ((fdcnt < nfds) && !done) { 297 php = NULL; 298 revent = 0; 299 /* 300 * Examine the bit map in a circular fashion 301 * to avoid starvation. Always resume from 302 * last stop. Scan till end of the map. Then 303 * wrap around. 304 */ 305 fd = bt_getlowbit(pcp->pc_bitmap, start, end); 306 ASSERT(fd <= end); 307 if (fd >= 0) { 308 if (fd == end) { 309 if (no_wrap) { 310 done = B_TRUE; 311 } else { 312 start = 0; 313 end = ostart - 1; 314 no_wrap = B_TRUE; 315 } 316 } else { 317 start = fd + 1; 318 } 319 pdp = pcache_lookup_fd(pcp, fd); 320 repoll: 321 ASSERT(pdp != NULL); 322 ASSERT(pdp->pd_fd == fd); 323 if (pdp->pd_fp == NULL) { 324 /* 325 * The fd is POLLREMOVed. This fd is 326 * logically no longer cached. So move 327 * on to the next one. 328 */ 329 continue; 330 } 331 if ((fp = getf(fd)) == NULL) { 332 /* 333 * The fd has been closed, but user has not 334 * done a POLLREMOVE on this fd yet. Instead 335 * of cleaning it here implicitly, we return 336 * POLLNVAL. This is consistent with poll(2) 337 * polling a closed fd. Hope this will remind 338 * user to do a POLLREMOVE. 339 */ 340 if (!is_epoll && pfdp != NULL) { 341 pfdp[fdcnt].fd = fd; 342 pfdp[fdcnt].revents = POLLNVAL; 343 fdcnt++; 344 continue; 345 } 346 347 /* 348 * In the epoll compatibility case, we actually 349 * perform the implicit removal to remain 350 * closer to the epoll semantics. 351 */ 352 if (is_epoll) { 353 pdp->pd_fp = NULL; 354 pdp->pd_events = 0; 355 356 if (pdp->pd_php != NULL) { 357 pollhead_delete(pdp->pd_php, 358 pdp); 359 pdp->pd_php = NULL; 360 } 361 362 BT_CLEAR(pcp->pc_bitmap, fd); 363 continue; 364 } 365 } 366 367 if (fp != pdp->pd_fp) { 368 /* 369 * user is polling on a cached fd which was 370 * closed and then reused. Unfortunately 371 * there is no good way to inform user. 372 * If the file struct is also reused, we 373 * may not be able to detect the fd reuse 374 * at all. As long as this does not 375 * cause system failure and/or memory leak, 376 * we will play along. Man page states if 377 * user does not clean up closed fds, polling 378 * results will be indeterministic. 379 * 380 * XXX - perhaps log the detection of fd 381 * reuse? 382 */ 383 pdp->pd_fp = fp; 384 } 385 /* 386 * XXX - pollrelock() logic needs to know which 387 * which pollcache lock to grab. It'd be a 388 * cleaner solution if we could pass pcp as 389 * an arguement in VOP_POLL interface instead 390 * of implicitly passing it using thread_t 391 * struct. On the other hand, changing VOP_POLL 392 * interface will require all driver/file system 393 * poll routine to change. May want to revisit 394 * the tradeoff later. 395 */ 396 curthread->t_pollcache = pcp; 397 error = VOP_POLL(fp->f_vnode, pdp->pd_events, 0, 398 &revent, &php, NULL); 399 curthread->t_pollcache = NULL; 400 releasef(fd); 401 if (error != 0) { 402 break; 403 } 404 405 /* 406 * layered devices (e.g. console driver) 407 * may change the vnode and thus the pollhead 408 * pointer out from underneath us. 409 */ 410 if (php != NULL && pdp->pd_php != NULL && 411 php != pdp->pd_php) { 412 pollhead_delete(pdp->pd_php, pdp); 413 pdp->pd_php = php; 414 pollhead_insert(php, pdp); 415 /* 416 * The bit should still be set. 417 */ 418 ASSERT(BT_TEST(pcp->pc_bitmap, fd)); 419 goto retry; 420 } 421 422 if (revent != 0) { 423 if (pfdp != NULL) { 424 pfdp[fdcnt].fd = fd; 425 pfdp[fdcnt].events = pdp->pd_events; 426 pfdp[fdcnt].revents = revent; 427 } else if (epoll != NULL) { 428 epoll_event_t *ep = &epoll[fdcnt]; 429 430 ASSERT(epoll != NULL); 431 ep->data.u64 = pdp->pd_epolldata; 432 433 /* 434 * If any of the event bits are set for 435 * which poll and epoll representations 436 * differ, swizzle in the native epoll 437 * values. 438 */ 439 if (revent & mask) { 440 ep->events = (revent & ~mask) | 441 ((revent & POLLRDHUP) ? 442 EPOLLRDHUP : 0) | 443 ((revent & POLLWRBAND) ? 444 EPOLLWRBAND : 0); 445 } else { 446 ep->events = revent; 447 } 448 449 /* 450 * We define POLLWRNORM to be POLLOUT, 451 * but epoll has separate definitions 452 * for them; if POLLOUT is set and the 453 * user has asked for EPOLLWRNORM, set 454 * that as well. 455 */ 456 if ((revent & POLLOUT) && 457 (pdp->pd_events & EPOLLWRNORM)) { 458 ep->events |= EPOLLWRNORM; 459 } 460 } else { 461 pollstate_t *ps = 462 curthread->t_pollstate; 463 /* 464 * The devpoll handle itself is being 465 * polled. Notify the caller of any 466 * readable event(s), leaving as much 467 * state as possible untouched. 468 */ 469 VERIFY(fdcnt == 0); 470 VERIFY(ps != NULL); 471 472 /* 473 * If a call to pollunlock() fails 474 * during VOP_POLL, skip over the fd 475 * and continue polling. 476 * 477 * Otherwise, report that there is an 478 * event pending. 479 */ 480 if ((ps->ps_flags & POLLSTATE_ULFAIL) 481 != 0) { 482 ps->ps_flags &= 483 ~POLLSTATE_ULFAIL; 484 continue; 485 } else { 486 fdcnt++; 487 break; 488 } 489 } 490 491 /* 492 * If POLLET is set, clear the bit in the 493 * bitmap -- which effectively latches the 494 * edge on a pollwakeup() from the driver. 495 */ 496 if (pdp->pd_events & POLLET) 497 BT_CLEAR(pcp->pc_bitmap, fd); 498 499 /* 500 * If POLLONESHOT is set, perform the implicit 501 * POLLREMOVE. 502 */ 503 if (pdp->pd_events & POLLONESHOT) { 504 pdp->pd_fp = NULL; 505 pdp->pd_events = 0; 506 507 if (pdp->pd_php != NULL) { 508 pollhead_delete(pdp->pd_php, 509 pdp); 510 pdp->pd_php = NULL; 511 } 512 513 BT_CLEAR(pcp->pc_bitmap, fd); 514 } 515 516 fdcnt++; 517 } else if (php != NULL) { 518 /* 519 * We clear a bit or cache a poll fd if 520 * the driver returns a poll head ptr, 521 * which is expected in the case of 0 522 * revents. Some buggy driver may return 523 * NULL php pointer with 0 revents. In 524 * this case, we just treat the driver as 525 * "noncachable" and not clearing the bit 526 * in bitmap. 527 */ 528 if ((pdp->pd_php != NULL) && 529 ((pcp->pc_flag & PC_POLLWAKE) == 0)) { 530 BT_CLEAR(pcp->pc_bitmap, fd); 531 } 532 if (pdp->pd_php == NULL) { 533 pollhead_insert(php, pdp); 534 pdp->pd_php = php; 535 /* 536 * An event of interest may have 537 * arrived between the VOP_POLL() and 538 * the pollhead_insert(); check again. 539 */ 540 goto repoll; 541 } 542 } 543 } else { 544 /* 545 * No bit set in the range. Check for wrap around. 546 */ 547 if (!no_wrap) { 548 start = 0; 549 end = ostart - 1; 550 no_wrap = B_TRUE; 551 } else { 552 done = B_TRUE; 553 } 554 } 555 } 556 557 if (!done) { 558 pcp->pc_mapstart = start; 559 } 560 ASSERT(*fdcntp == 0); 561 *fdcntp = fdcnt; 562 return (error); 563 } 564 565 /*ARGSUSED*/ 566 static int 567 dpopen(dev_t *devp, int flag, int otyp, cred_t *credp) 568 { 569 minor_t minordev; 570 dp_entry_t *dpep; 571 pollcache_t *pcp; 572 573 ASSERT(devpoll_init); 574 ASSERT(dptblsize <= MAXMIN); 575 mutex_enter(&devpoll_lock); 576 for (minordev = 0; minordev < dptblsize; minordev++) { 577 if (devpolltbl[minordev] == NULL) { 578 devpolltbl[minordev] = (dp_entry_t *)RESERVED; 579 break; 580 } 581 } 582 if (minordev == dptblsize) { 583 dp_entry_t **newtbl; 584 size_t oldsize; 585 586 /* 587 * Used up every entry in the existing devpoll table. 588 * Grow the table by DEVPOLLSIZE. 589 */ 590 if ((oldsize = dptblsize) >= MAXMIN) { 591 mutex_exit(&devpoll_lock); 592 return (ENXIO); 593 } 594 dptblsize += DEVPOLLSIZE; 595 if (dptblsize > MAXMIN) { 596 dptblsize = MAXMIN; 597 } 598 newtbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP); 599 bcopy(devpolltbl, newtbl, sizeof (caddr_t) * oldsize); 600 kmem_free(devpolltbl, sizeof (caddr_t) * oldsize); 601 devpolltbl = newtbl; 602 devpolltbl[minordev] = (dp_entry_t *)RESERVED; 603 } 604 mutex_exit(&devpoll_lock); 605 606 dpep = kmem_zalloc(sizeof (dp_entry_t), KM_SLEEP); 607 /* 608 * allocate a pollcache skeleton here. Delay allocating bitmap 609 * structures until dpwrite() time, since we don't know the 610 * optimal size yet. We also delay setting the pid until either 611 * dpwrite() or attempt to poll on the instance, allowing parents 612 * to create instances of /dev/poll for their children. (In the 613 * epoll compatibility case, this check isn't performed to maintain 614 * semantic compatibility.) 615 */ 616 pcp = pcache_alloc(); 617 dpep->dpe_pcache = pcp; 618 pcp->pc_pid = -1; 619 *devp = makedevice(getmajor(*devp), minordev); /* clone the driver */ 620 mutex_enter(&devpoll_lock); 621 ASSERT(minordev < dptblsize); 622 ASSERT(devpolltbl[minordev] == (dp_entry_t *)RESERVED); 623 devpolltbl[minordev] = dpep; 624 mutex_exit(&devpoll_lock); 625 return (0); 626 } 627 628 /* 629 * Write to dev/poll add/remove fd's to/from a cached poll fd set, 630 * or change poll events for a watched fd. 631 */ 632 /*ARGSUSED*/ 633 static int 634 dpwrite(dev_t dev, struct uio *uiop, cred_t *credp) 635 { 636 minor_t minor; 637 dp_entry_t *dpep; 638 pollcache_t *pcp; 639 pollfd_t *pollfdp, *pfdp; 640 dvpoll_epollfd_t *epfdp; 641 uintptr_t limit; 642 int error, size; 643 ssize_t uiosize; 644 size_t copysize; 645 nfds_t pollfdnum; 646 struct pollhead *php = NULL; 647 polldat_t *pdp; 648 int fd; 649 file_t *fp; 650 boolean_t is_epoll, fds_added = B_FALSE; 651 652 minor = getminor(dev); 653 654 mutex_enter(&devpoll_lock); 655 ASSERT(minor < dptblsize); 656 dpep = devpolltbl[minor]; 657 ASSERT(dpep != NULL); 658 mutex_exit(&devpoll_lock); 659 660 mutex_enter(&dpep->dpe_lock); 661 pcp = dpep->dpe_pcache; 662 is_epoll = (dpep->dpe_flag & DP_ISEPOLLCOMPAT) != 0; 663 size = (is_epoll) ? sizeof (dvpoll_epollfd_t) : sizeof (pollfd_t); 664 mutex_exit(&dpep->dpe_lock); 665 666 if (!is_epoll && curproc->p_pid != pcp->pc_pid) { 667 if (pcp->pc_pid != -1) { 668 return (EACCES); 669 } 670 671 pcp->pc_pid = curproc->p_pid; 672 } 673 674 uiosize = uiop->uio_resid; 675 pollfdnum = uiosize / size; 676 677 /* 678 * We want to make sure that pollfdnum isn't large enough to DoS us, 679 * but we also don't want to grab p_lock unnecessarily -- so we 680 * perform the full check against our resource limits if and only if 681 * pollfdnum is larger than the known-to-be-sane value of UINT8_MAX. 682 */ 683 if (pollfdnum > UINT8_MAX) { 684 mutex_enter(&curproc->p_lock); 685 if (pollfdnum > 686 (uint_t)rctl_enforced_value(rctlproc_legacy[RLIMIT_NOFILE], 687 curproc->p_rctls, curproc)) { 688 (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE], 689 curproc->p_rctls, curproc, RCA_SAFE); 690 mutex_exit(&curproc->p_lock); 691 return (EINVAL); 692 } 693 mutex_exit(&curproc->p_lock); 694 } 695 696 /* 697 * Copy in the pollfd array. Walk through the array and add 698 * each polled fd to the cached set. 699 */ 700 pollfdp = kmem_alloc(uiosize, KM_SLEEP); 701 limit = (uintptr_t)pollfdp + (pollfdnum * size); 702 703 /* 704 * Although /dev/poll uses the write(2) interface to cache fds, it's 705 * not supposed to function as a seekable device. To prevent offset 706 * from growing and eventually exceed the maximum, reset the offset 707 * here for every call. 708 */ 709 uiop->uio_loffset = 0; 710 711 /* 712 * Use uiocopy instead of uiomove when populating pollfdp, keeping 713 * uio_resid untouched for now. Write syscalls will translate EINTR 714 * into a success if they detect "successfully transfered" data via an 715 * updated uio_resid. Falsely suppressing such errors is disastrous. 716 */ 717 if ((error = uiocopy((caddr_t)pollfdp, uiosize, UIO_WRITE, uiop, 718 ©size)) != 0) { 719 kmem_free(pollfdp, uiosize); 720 return (error); 721 } 722 723 /* 724 * We are about to enter the core portion of dpwrite(). Make sure this 725 * write has exclusive access in this portion of the code, i.e., no 726 * other writers in this code. 727 * 728 * Waiting for all readers to drop their references to the dpe is 729 * unecessary since the pollcache itself is protected by pc_lock. 730 */ 731 mutex_enter(&dpep->dpe_lock); 732 dpep->dpe_writerwait++; 733 while ((dpep->dpe_flag & DP_WRITER_PRESENT) != 0) { 734 ASSERT(dpep->dpe_refcnt != 0); 735 736 if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) { 737 dpep->dpe_writerwait--; 738 mutex_exit(&dpep->dpe_lock); 739 kmem_free(pollfdp, uiosize); 740 return (EINTR); 741 } 742 } 743 dpep->dpe_writerwait--; 744 dpep->dpe_flag |= DP_WRITER_PRESENT; 745 dpep->dpe_refcnt++; 746 747 if (!is_epoll && (dpep->dpe_flag & DP_ISEPOLLCOMPAT) != 0) { 748 /* 749 * The epoll compat mode was enabled while we were waiting to 750 * establish write access. It is not safe to continue since 751 * state was prepared for non-epoll operation. 752 */ 753 error = EBUSY; 754 goto bypass; 755 } 756 mutex_exit(&dpep->dpe_lock); 757 758 /* 759 * Since the dpwrite() may recursively walk an added /dev/poll handle, 760 * pollstate_enter() deadlock and loop detection must be used. 761 */ 762 (void) pollstate_create(); 763 VERIFY(pollstate_enter(pcp) == PSE_SUCCESS); 764 765 if (pcp->pc_bitmap == NULL) { 766 pcache_create(pcp, pollfdnum); 767 } 768 for (pfdp = pollfdp; (uintptr_t)pfdp < limit; 769 pfdp = (pollfd_t *)((uintptr_t)pfdp + size)) { 770 fd = pfdp->fd; 771 if ((uint_t)fd >= P_FINFO(curproc)->fi_nfiles) { 772 /* 773 * epoll semantics demand that we return EBADF if our 774 * specified fd is invalid. 775 */ 776 if (is_epoll) { 777 error = EBADF; 778 break; 779 } 780 781 continue; 782 } 783 784 pdp = pcache_lookup_fd(pcp, fd); 785 if (pfdp->events != POLLREMOVE) { 786 787 fp = NULL; 788 789 if (pdp == NULL) { 790 /* 791 * If we're in epoll compatibility mode, check 792 * that the fd is valid before allocating 793 * anything for it; epoll semantics demand that 794 * we return EBADF if our specified fd is 795 * invalid. 796 */ 797 if (is_epoll) { 798 if ((fp = getf(fd)) == NULL) { 799 error = EBADF; 800 break; 801 } 802 } 803 804 pdp = pcache_alloc_fd(0); 805 pdp->pd_fd = fd; 806 pdp->pd_pcache = pcp; 807 pcache_insert_fd(pcp, pdp, pollfdnum); 808 } else { 809 /* 810 * epoll semantics demand that we error out if 811 * a file descriptor is added twice, which we 812 * check (imperfectly) by checking if we both 813 * have the file descriptor cached and the 814 * file pointer that correponds to the file 815 * descriptor matches our cached value. If 816 * there is a pointer mismatch, the file 817 * descriptor was closed without being removed. 818 * The converse is clearly not true, however, 819 * so to narrow the window by which a spurious 820 * EEXIST may be returned, we also check if 821 * this fp has been added to an epoll control 822 * descriptor in the past; if it hasn't, we 823 * know that this is due to fp reuse -- it's 824 * not a true EEXIST case. (By performing this 825 * additional check, we limit the window of 826 * spurious EEXIST to situations where a single 827 * file descriptor is being used across two or 828 * more epoll control descriptors -- and even 829 * then, the file descriptor must be closed and 830 * reused in a relatively tight time span.) 831 */ 832 if (is_epoll) { 833 if (pdp->pd_fp != NULL && 834 (fp = getf(fd)) != NULL && 835 fp == pdp->pd_fp && 836 (fp->f_flag2 & FEPOLLED)) { 837 error = EEXIST; 838 releasef(fd); 839 break; 840 } 841 842 /* 843 * We have decided that the cached 844 * information was stale: it either 845 * didn't match, or the fp had never 846 * actually been epoll()'d on before. 847 * We need to now clear our pd_events 848 * to assure that we don't mistakenly 849 * operate on cached event disposition. 850 */ 851 pdp->pd_events = 0; 852 } 853 } 854 855 if (is_epoll) { 856 epfdp = (dvpoll_epollfd_t *)pfdp; 857 pdp->pd_epolldata = epfdp->dpep_data; 858 } 859 860 ASSERT(pdp->pd_fd == fd); 861 ASSERT(pdp->pd_pcache == pcp); 862 if (fd >= pcp->pc_mapsize) { 863 mutex_exit(&pcp->pc_lock); 864 pcache_grow_map(pcp, fd); 865 mutex_enter(&pcp->pc_lock); 866 } 867 if (fd > pcp->pc_mapend) { 868 pcp->pc_mapend = fd; 869 } 870 if (fp == NULL && (fp = getf(fd)) == NULL) { 871 /* 872 * The fd is not valid. Since we can't pass 873 * this error back in the write() call, set 874 * the bit in bitmap to force DP_POLL ioctl 875 * to examine it. 876 */ 877 BT_SET(pcp->pc_bitmap, fd); 878 pdp->pd_events |= pfdp->events; 879 continue; 880 } 881 882 /* 883 * To (greatly) reduce EEXIST false positives, we 884 * denote that this fp has been epoll()'d. We do this 885 * regardless of epoll compatibility mode, as the flag 886 * is harmless if not in epoll compatibility mode. 887 */ 888 fp->f_flag2 |= FEPOLLED; 889 890 /* 891 * Don't do VOP_POLL for an already cached fd with 892 * same poll events. 893 */ 894 if ((pdp->pd_events == pfdp->events) && 895 (pdp->pd_fp == fp)) { 896 /* 897 * the events are already cached 898 */ 899 releasef(fd); 900 continue; 901 } 902 903 /* 904 * do VOP_POLL and cache this poll fd. 905 */ 906 /* 907 * XXX - pollrelock() logic needs to know which 908 * which pollcache lock to grab. It'd be a 909 * cleaner solution if we could pass pcp as 910 * an arguement in VOP_POLL interface instead 911 * of implicitly passing it using thread_t 912 * struct. On the other hand, changing VOP_POLL 913 * interface will require all driver/file system 914 * poll routine to change. May want to revisit 915 * the tradeoff later. 916 */ 917 curthread->t_pollcache = pcp; 918 error = VOP_POLL(fp->f_vnode, pfdp->events, 0, 919 &pfdp->revents, &php, NULL); 920 curthread->t_pollcache = NULL; 921 /* 922 * We always set the bit when this fd is cached; 923 * this forces the first DP_POLL to poll this fd. 924 * Real performance gain comes from subsequent 925 * DP_POLL. We also attempt a pollhead_insert(); 926 * if it's not possible, we'll do it in dpioctl(). 927 */ 928 BT_SET(pcp->pc_bitmap, fd); 929 if (error != 0) { 930 releasef(fd); 931 break; 932 } 933 pdp->pd_fp = fp; 934 pdp->pd_events |= pfdp->events; 935 if (php != NULL) { 936 if (pdp->pd_php == NULL) { 937 pollhead_insert(php, pdp); 938 pdp->pd_php = php; 939 } else { 940 if (pdp->pd_php != php) { 941 pollhead_delete(pdp->pd_php, 942 pdp); 943 pollhead_insert(php, pdp); 944 pdp->pd_php = php; 945 } 946 } 947 } 948 fds_added = B_TRUE; 949 releasef(fd); 950 } else { 951 if (pdp == NULL || pdp->pd_fp == NULL) { 952 if (is_epoll) { 953 /* 954 * As with the add case (above), epoll 955 * semantics demand that we error out 956 * in this case. 957 */ 958 error = ENOENT; 959 break; 960 } 961 962 continue; 963 } 964 ASSERT(pdp->pd_fd == fd); 965 pdp->pd_fp = NULL; 966 pdp->pd_events = 0; 967 ASSERT(pdp->pd_thread == NULL); 968 if (pdp->pd_php != NULL) { 969 pollhead_delete(pdp->pd_php, pdp); 970 pdp->pd_php = NULL; 971 } 972 BT_CLEAR(pcp->pc_bitmap, fd); 973 } 974 } 975 /* 976 * Wake any pollcache waiters so they can check the new descriptors. 977 * 978 * Any fds added to an recursive-capable pollcache could themselves be 979 * /dev/poll handles. To ensure that proper event propagation occurs, 980 * parent pollcaches are woken too, so that they can create any needed 981 * pollcache links. 982 */ 983 if (fds_added) { 984 cv_broadcast(&pcp->pc_cv); 985 pcache_wake_parents(pcp); 986 } 987 pollstate_exit(pcp); 988 mutex_enter(&dpep->dpe_lock); 989 bypass: 990 dpep->dpe_flag &= ~DP_WRITER_PRESENT; 991 dpep->dpe_refcnt--; 992 cv_broadcast(&dpep->dpe_cv); 993 mutex_exit(&dpep->dpe_lock); 994 kmem_free(pollfdp, uiosize); 995 if (error == 0) { 996 /* 997 * The state of uio_resid is updated only after the pollcache 998 * is successfully modified. 999 */ 1000 uioskip(uiop, copysize); 1001 } 1002 return (error); 1003 } 1004 1005 #define DP_SIGMASK_RESTORE(ksetp) { \ 1006 if (ksetp != NULL) { \ 1007 mutex_enter(&p->p_lock); \ 1008 if (lwp->lwp_cursig == 0) { \ 1009 t->t_hold = lwp->lwp_sigoldmask; \ 1010 t->t_flag &= ~T_TOMASK; \ 1011 } \ 1012 mutex_exit(&p->p_lock); \ 1013 } \ 1014 } 1015 1016 /*ARGSUSED*/ 1017 static int 1018 dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) 1019 { 1020 minor_t minor; 1021 dp_entry_t *dpep; 1022 pollcache_t *pcp; 1023 hrtime_t now; 1024 int error = 0; 1025 boolean_t is_epoll; 1026 STRUCT_DECL(dvpoll, dvpoll); 1027 1028 if (cmd == DP_POLL || cmd == DP_PPOLL) { 1029 /* do this now, before we sleep on DP_WRITER_PRESENT */ 1030 now = gethrtime(); 1031 } 1032 1033 minor = getminor(dev); 1034 mutex_enter(&devpoll_lock); 1035 ASSERT(minor < dptblsize); 1036 dpep = devpolltbl[minor]; 1037 mutex_exit(&devpoll_lock); 1038 ASSERT(dpep != NULL); 1039 pcp = dpep->dpe_pcache; 1040 1041 mutex_enter(&dpep->dpe_lock); 1042 is_epoll = (dpep->dpe_flag & DP_ISEPOLLCOMPAT) != 0; 1043 1044 if (cmd == DP_EPOLLCOMPAT) { 1045 if (dpep->dpe_refcnt != 0) { 1046 /* 1047 * We can't turn on epoll compatibility while there 1048 * are outstanding operations. 1049 */ 1050 mutex_exit(&dpep->dpe_lock); 1051 return (EBUSY); 1052 } 1053 1054 /* 1055 * epoll compatibility is a one-way street: there's no way 1056 * to turn it off for a particular open. 1057 */ 1058 dpep->dpe_flag |= DP_ISEPOLLCOMPAT; 1059 mutex_exit(&dpep->dpe_lock); 1060 1061 return (0); 1062 } 1063 1064 if (!is_epoll && curproc->p_pid != pcp->pc_pid) { 1065 if (pcp->pc_pid != -1) { 1066 mutex_exit(&dpep->dpe_lock); 1067 return (EACCES); 1068 } 1069 1070 pcp->pc_pid = curproc->p_pid; 1071 } 1072 1073 /* Wait until all writers have cleared the handle before continuing */ 1074 while ((dpep->dpe_flag & DP_WRITER_PRESENT) != 0 || 1075 (dpep->dpe_writerwait != 0)) { 1076 if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) { 1077 mutex_exit(&dpep->dpe_lock); 1078 return (EINTR); 1079 } 1080 } 1081 dpep->dpe_refcnt++; 1082 mutex_exit(&dpep->dpe_lock); 1083 1084 switch (cmd) { 1085 case DP_POLL: 1086 case DP_PPOLL: 1087 { 1088 pollstate_t *ps; 1089 nfds_t nfds; 1090 int fdcnt = 0; 1091 size_t size, fdsize, dpsize; 1092 hrtime_t deadline = 0; 1093 k_sigset_t *ksetp = NULL; 1094 k_sigset_t kset; 1095 sigset_t set; 1096 kthread_t *t = curthread; 1097 klwp_t *lwp = ttolwp(t); 1098 struct proc *p = ttoproc(curthread); 1099 1100 STRUCT_INIT(dvpoll, mode); 1101 1102 /* 1103 * The dp_setp member is only required/consumed for DP_PPOLL, 1104 * which otherwise uses the same structure as DP_POLL. 1105 */ 1106 if (cmd == DP_POLL) { 1107 dpsize = (uintptr_t)STRUCT_FADDR(dvpoll, dp_setp) - 1108 (uintptr_t)STRUCT_FADDR(dvpoll, dp_fds); 1109 } else { 1110 ASSERT(cmd == DP_PPOLL); 1111 dpsize = STRUCT_SIZE(dvpoll); 1112 } 1113 1114 if ((mode & FKIOCTL) != 0) { 1115 /* Kernel-internal ioctl call */ 1116 bcopy((caddr_t)arg, STRUCT_BUF(dvpoll), dpsize); 1117 error = 0; 1118 } else { 1119 error = copyin((caddr_t)arg, STRUCT_BUF(dvpoll), 1120 dpsize); 1121 } 1122 1123 if (error) { 1124 DP_REFRELE(dpep); 1125 return (EFAULT); 1126 } 1127 1128 deadline = STRUCT_FGET(dvpoll, dp_timeout); 1129 if (deadline > 0) { 1130 /* 1131 * Convert the deadline from relative milliseconds 1132 * to absolute nanoseconds. They must wait for at 1133 * least a tick. 1134 */ 1135 deadline = MSEC2NSEC(deadline); 1136 deadline = MAX(deadline, nsec_per_tick); 1137 deadline += now; 1138 } 1139 1140 if (cmd == DP_PPOLL) { 1141 void *setp = STRUCT_FGETP(dvpoll, dp_setp); 1142 1143 if (setp != NULL) { 1144 if ((mode & FKIOCTL) != 0) { 1145 /* Use the signal set directly */ 1146 ksetp = (k_sigset_t *)setp; 1147 } else { 1148 if (copyin(setp, &set, sizeof (set))) { 1149 DP_REFRELE(dpep); 1150 return (EFAULT); 1151 } 1152 sigutok(&set, &kset); 1153 ksetp = &kset; 1154 } 1155 1156 mutex_enter(&p->p_lock); 1157 schedctl_finish_sigblock(t); 1158 lwp->lwp_sigoldmask = t->t_hold; 1159 t->t_hold = *ksetp; 1160 t->t_flag |= T_TOMASK; 1161 1162 /* 1163 * Like ppoll() with a non-NULL sigset, we'll 1164 * call cv_reltimedwait_sig() just to check for 1165 * signals. This call will return immediately 1166 * with either 0 (signalled) or -1 (no signal). 1167 * There are some conditions whereby we can 1168 * get 0 from cv_reltimedwait_sig() without 1169 * a true signal (e.g., a directed stop), so 1170 * we restore our signal mask in the unlikely 1171 * event that lwp_cursig is 0. 1172 */ 1173 if (!cv_reltimedwait_sig(&t->t_delay_cv, 1174 &p->p_lock, 0, TR_CLOCK_TICK)) { 1175 if (lwp->lwp_cursig == 0) { 1176 t->t_hold = lwp->lwp_sigoldmask; 1177 t->t_flag &= ~T_TOMASK; 1178 } 1179 1180 mutex_exit(&p->p_lock); 1181 1182 DP_REFRELE(dpep); 1183 return (EINTR); 1184 } 1185 1186 mutex_exit(&p->p_lock); 1187 } 1188 } 1189 1190 if ((nfds = STRUCT_FGET(dvpoll, dp_nfds)) == 0) { 1191 /* 1192 * We are just using DP_POLL to sleep, so 1193 * we don't any of the devpoll apparatus. 1194 * Do not check for signals if we have a zero timeout. 1195 */ 1196 DP_REFRELE(dpep); 1197 if (deadline == 0) { 1198 DP_SIGMASK_RESTORE(ksetp); 1199 return (0); 1200 } 1201 1202 mutex_enter(&curthread->t_delay_lock); 1203 while ((error = 1204 cv_timedwait_sig_hrtime(&curthread->t_delay_cv, 1205 &curthread->t_delay_lock, deadline)) > 0) 1206 continue; 1207 mutex_exit(&curthread->t_delay_lock); 1208 1209 DP_SIGMASK_RESTORE(ksetp); 1210 1211 return (error == 0 ? EINTR : 0); 1212 } 1213 1214 if (is_epoll) { 1215 size = nfds * (fdsize = sizeof (epoll_event_t)); 1216 } else { 1217 size = nfds * (fdsize = sizeof (pollfd_t)); 1218 } 1219 1220 /* 1221 * XXX It would be nice not to have to alloc each time, but it 1222 * requires another per thread structure hook. This can be 1223 * implemented later if data suggests that it's necessary. 1224 */ 1225 ps = pollstate_create(); 1226 1227 if (ps->ps_dpbufsize < size) { 1228 /* 1229 * If nfds is larger than twice the current maximum 1230 * open file count, we'll silently clamp it. This 1231 * only limits our exposure to allocating an 1232 * inordinate amount of kernel memory; it doesn't 1233 * otherwise affect the semantics. (We have this 1234 * check at twice the maximum instead of merely the 1235 * maximum because some applications pass an nfds that 1236 * is only slightly larger than their limit.) 1237 */ 1238 mutex_enter(&p->p_lock); 1239 if ((nfds >> 1) > p->p_fno_ctl) { 1240 nfds = p->p_fno_ctl; 1241 size = nfds * fdsize; 1242 } 1243 mutex_exit(&p->p_lock); 1244 1245 if (ps->ps_dpbufsize < size) { 1246 kmem_free(ps->ps_dpbuf, ps->ps_dpbufsize); 1247 ps->ps_dpbuf = kmem_zalloc(size, KM_SLEEP); 1248 ps->ps_dpbufsize = size; 1249 } 1250 } 1251 1252 VERIFY(pollstate_enter(pcp) == PSE_SUCCESS); 1253 for (;;) { 1254 pcp->pc_flag &= ~PC_POLLWAKE; 1255 1256 /* 1257 * Mark all child pcachelinks as stale. 1258 * Those which are still part of the tree will be 1259 * marked as valid during the poll. 1260 */ 1261 pcachelink_mark_stale(pcp); 1262 1263 error = dp_pcache_poll(dpep, ps->ps_dpbuf, 1264 pcp, nfds, &fdcnt); 1265 if (fdcnt > 0 || error != 0) 1266 break; 1267 1268 /* Purge still-stale child pcachelinks */ 1269 pcachelink_purge_stale(pcp); 1270 1271 /* 1272 * A pollwake has happened since we polled cache. 1273 */ 1274 if (pcp->pc_flag & PC_POLLWAKE) 1275 continue; 1276 1277 /* 1278 * Sleep until we are notified, signaled, or timed out. 1279 */ 1280 if (deadline == 0) { 1281 /* immediate timeout; do not check signals */ 1282 break; 1283 } 1284 1285 error = cv_timedwait_sig_hrtime(&pcp->pc_cv, 1286 &pcp->pc_lock, deadline); 1287 1288 /* 1289 * If we were awakened by a signal or timeout then 1290 * break the loop, else poll again. 1291 */ 1292 if (error <= 0) { 1293 error = (error == 0) ? EINTR : 0; 1294 break; 1295 } else { 1296 error = 0; 1297 } 1298 } 1299 pollstate_exit(pcp); 1300 1301 DP_SIGMASK_RESTORE(ksetp); 1302 1303 if (error == 0 && fdcnt > 0) { 1304 /* 1305 * It should be noted that FKIOCTL does not influence 1306 * the copyout (vs bcopy) of dp_fds at this time. 1307 */ 1308 if (copyout(ps->ps_dpbuf, 1309 STRUCT_FGETP(dvpoll, dp_fds), fdcnt * fdsize)) { 1310 DP_REFRELE(dpep); 1311 return (EFAULT); 1312 } 1313 *rvalp = fdcnt; 1314 } 1315 break; 1316 } 1317 1318 case DP_ISPOLLED: 1319 { 1320 pollfd_t pollfd; 1321 polldat_t *pdp; 1322 1323 STRUCT_INIT(dvpoll, mode); 1324 error = copyin((caddr_t)arg, &pollfd, sizeof (pollfd_t)); 1325 if (error) { 1326 DP_REFRELE(dpep); 1327 return (EFAULT); 1328 } 1329 mutex_enter(&pcp->pc_lock); 1330 if (pcp->pc_hash == NULL) { 1331 /* 1332 * No Need to search because no poll fd 1333 * has been cached. 1334 */ 1335 mutex_exit(&pcp->pc_lock); 1336 DP_REFRELE(dpep); 1337 return (0); 1338 } 1339 if (pollfd.fd < 0) { 1340 mutex_exit(&pcp->pc_lock); 1341 break; 1342 } 1343 pdp = pcache_lookup_fd(pcp, pollfd.fd); 1344 if ((pdp != NULL) && (pdp->pd_fd == pollfd.fd) && 1345 (pdp->pd_fp != NULL)) { 1346 pollfd.revents = pdp->pd_events; 1347 if (copyout(&pollfd, (caddr_t)arg, sizeof (pollfd_t))) { 1348 mutex_exit(&pcp->pc_lock); 1349 DP_REFRELE(dpep); 1350 return (EFAULT); 1351 } 1352 *rvalp = 1; 1353 } 1354 mutex_exit(&pcp->pc_lock); 1355 break; 1356 } 1357 1358 default: 1359 DP_REFRELE(dpep); 1360 return (EINVAL); 1361 } 1362 DP_REFRELE(dpep); 1363 return (error); 1364 } 1365 1366 /* 1367 * Overview of Recursive Polling 1368 * 1369 * It is possible for /dev/poll to poll for events on file descriptors which 1370 * themselves are /dev/poll handles. Pending events in the child handle are 1371 * represented as readable data via the POLLIN flag. To limit surface area, 1372 * this recursion is presently allowed on only /dev/poll handles which have 1373 * been placed in epoll mode via the DP_EPOLLCOMPAT ioctl. Recursion depth is 1374 * limited to 5 in order to be consistent with Linux epoll. 1375 * 1376 * Extending dppoll() for VOP_POLL: 1377 * 1378 * The recursive /dev/poll implementation begins by extending dppoll() to 1379 * report when resources contained in the pollcache have relevant event state. 1380 * At the highest level, it means calling dp_pcache_poll() so it indicates if 1381 * fd events are present without consuming them or altering the pollcache 1382 * bitmap. This ensures that a subsequent DP_POLL operation on the bitmap will 1383 * yield the initiating event. Additionally, the VOP_POLL should return in 1384 * such a way that dp_pcache_poll() does not clear the parent bitmap entry 1385 * which corresponds to the child /dev/poll fd. This means that child 1386 * pollcaches will be checked during every poll which facilitates wake-up 1387 * behavior detailed below. 1388 * 1389 * Pollcache Links and Wake Events: 1390 * 1391 * Recursive /dev/poll avoids complicated pollcache locking constraints during 1392 * pollwakeup events by eschewing the traditional pollhead mechanism in favor 1393 * of a different approach. For each pollcache at the root of a recursive 1394 * /dev/poll "tree", pcachelink_t structures are established to all child 1395 * /dev/poll pollcaches. During pollnotify() in a child pollcache, the 1396 * linked list of pcachelink_t entries is walked, where those marked as valid 1397 * incur a cv_broadcast to their parent pollcache. Most notably, these 1398 * pcachelink_t cv wakeups are performed without acquiring pc_lock on the 1399 * parent pollcache (which would require careful deadlock avoidance). This 1400 * still allows the woken poll on the parent to discover the pertinent events 1401 * due to the fact that bitmap entires for the child pollcache are always 1402 * maintained by the dppoll() logic above. 1403 * 1404 * Depth Limiting and Loop Prevention: 1405 * 1406 * As each pollcache is encountered (either via DP_POLL or dppoll()), depth and 1407 * loop constraints are enforced via pollstate_enter(). The pollcache_t 1408 * pointer is compared against any existing entries in ps_pc_stack and is added 1409 * to the end if no match (and therefore loop) is found. Once poll operations 1410 * for a given pollcache_t are complete, pollstate_exit() clears the pointer 1411 * from the list. The pollstate_enter() and pollstate_exit() functions are 1412 * responsible for acquiring and releasing pc_lock, respectively. 1413 * 1414 * Deadlock Safety: 1415 * 1416 * Descending through a tree of recursive /dev/poll handles involves the tricky 1417 * business of sequentially entering multiple pollcache locks. This tree 1418 * topology cannot define a lock acquisition order in such a way that it is 1419 * immune to deadlocks between threads. The pollstate_enter() and 1420 * pollstate_exit() functions provide an interface for recursive /dev/poll 1421 * operations to safely lock pollcaches while failing gracefully in the face of 1422 * deadlocking topologies. (See pollstate_contend() for more detail about how 1423 * deadlocks are detected and resolved.) 1424 */ 1425 1426 /*ARGSUSED*/ 1427 static int 1428 dppoll(dev_t dev, short events, int anyyet, short *reventsp, 1429 struct pollhead **phpp) 1430 { 1431 minor_t minor; 1432 dp_entry_t *dpep; 1433 pollcache_t *pcp; 1434 int res, rc = 0; 1435 1436 minor = getminor(dev); 1437 mutex_enter(&devpoll_lock); 1438 ASSERT(minor < dptblsize); 1439 dpep = devpolltbl[minor]; 1440 ASSERT(dpep != NULL); 1441 mutex_exit(&devpoll_lock); 1442 1443 mutex_enter(&dpep->dpe_lock); 1444 if ((dpep->dpe_flag & DP_ISEPOLLCOMPAT) == 0) { 1445 /* Poll recursion is not yet supported for non-epoll handles */ 1446 *reventsp = POLLERR; 1447 mutex_exit(&dpep->dpe_lock); 1448 return (0); 1449 } else { 1450 dpep->dpe_refcnt++; 1451 pcp = dpep->dpe_pcache; 1452 mutex_exit(&dpep->dpe_lock); 1453 } 1454 1455 res = pollstate_enter(pcp); 1456 if (res == PSE_SUCCESS) { 1457 nfds_t nfds = 1; 1458 int fdcnt = 0; 1459 pollstate_t *ps = curthread->t_pollstate; 1460 1461 rc = dp_pcache_poll(dpep, NULL, pcp, nfds, &fdcnt); 1462 if (rc == 0) { 1463 *reventsp = (fdcnt > 0) ? POLLIN : 0; 1464 } 1465 pcachelink_assoc(pcp, ps->ps_pc_stack[0]); 1466 pollstate_exit(pcp); 1467 } else { 1468 switch (res) { 1469 case PSE_FAIL_DEPTH: 1470 rc = EINVAL; 1471 break; 1472 case PSE_FAIL_LOOP: 1473 case PSE_FAIL_DEADLOCK: 1474 rc = ELOOP; 1475 break; 1476 default: 1477 /* 1478 * If anything else has gone awry, such as being polled 1479 * from an unexpected context, fall back to the 1480 * recursion-intolerant response. 1481 */ 1482 *reventsp = POLLERR; 1483 rc = 0; 1484 break; 1485 } 1486 } 1487 1488 DP_REFRELE(dpep); 1489 return (rc); 1490 } 1491 1492 /* 1493 * devpoll close should do enough clean up before the pollcache is deleted, 1494 * i.e., it should ensure no one still references the pollcache later. 1495 * There is no "permission" check in here. Any process having the last 1496 * reference of this /dev/poll fd can close. 1497 */ 1498 /*ARGSUSED*/ 1499 static int 1500 dpclose(dev_t dev, int flag, int otyp, cred_t *credp) 1501 { 1502 minor_t minor; 1503 dp_entry_t *dpep; 1504 pollcache_t *pcp; 1505 int i; 1506 polldat_t **hashtbl; 1507 polldat_t *pdp; 1508 1509 minor = getminor(dev); 1510 1511 mutex_enter(&devpoll_lock); 1512 dpep = devpolltbl[minor]; 1513 ASSERT(dpep != NULL); 1514 devpolltbl[minor] = NULL; 1515 mutex_exit(&devpoll_lock); 1516 pcp = dpep->dpe_pcache; 1517 ASSERT(pcp != NULL); 1518 /* 1519 * At this point, no other lwp can access this pollcache via the 1520 * /dev/poll fd. This pollcache is going away, so do the clean 1521 * up without the pc_lock. 1522 */ 1523 hashtbl = pcp->pc_hash; 1524 for (i = 0; i < pcp->pc_hashsize; i++) { 1525 for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) { 1526 if (pdp->pd_php != NULL) { 1527 pollhead_delete(pdp->pd_php, pdp); 1528 pdp->pd_php = NULL; 1529 pdp->pd_fp = NULL; 1530 } 1531 } 1532 } 1533 /* 1534 * pollwakeup() may still interact with this pollcache. Wait until 1535 * it is done. 1536 */ 1537 mutex_enter(&pcp->pc_no_exit); 1538 ASSERT(pcp->pc_busy >= 0); 1539 while (pcp->pc_busy > 0) 1540 cv_wait(&pcp->pc_busy_cv, &pcp->pc_no_exit); 1541 mutex_exit(&pcp->pc_no_exit); 1542 1543 /* Clean up any pollcache links created via recursive /dev/poll */ 1544 if (pcp->pc_parents != NULL || pcp->pc_children != NULL) { 1545 /* 1546 * Because of the locking rules for pcachelink manipulation, 1547 * acquring pc_lock is required for this step. 1548 */ 1549 mutex_enter(&pcp->pc_lock); 1550 pcachelink_purge_all(pcp); 1551 mutex_exit(&pcp->pc_lock); 1552 } 1553 1554 pcache_destroy(pcp); 1555 ASSERT(dpep->dpe_refcnt == 0); 1556 kmem_free(dpep, sizeof (dp_entry_t)); 1557 return (0); 1558 } 1559 1560 static void 1561 pcachelink_locked_rele(pcachelink_t *pl) 1562 { 1563 ASSERT(MUTEX_HELD(&pl->pcl_lock)); 1564 VERIFY(pl->pcl_refcnt >= 1); 1565 1566 pl->pcl_refcnt--; 1567 if (pl->pcl_refcnt == 0) { 1568 VERIFY(pl->pcl_state == PCL_INVALID); 1569 ASSERT(pl->pcl_parent_pc == NULL); 1570 ASSERT(pl->pcl_child_pc == NULL); 1571 ASSERT(pl->pcl_parent_next == NULL); 1572 ASSERT(pl->pcl_child_next == NULL); 1573 1574 pl->pcl_state = PCL_FREE; 1575 mutex_destroy(&pl->pcl_lock); 1576 kmem_free(pl, sizeof (pcachelink_t)); 1577 } else { 1578 mutex_exit(&pl->pcl_lock); 1579 } 1580 } 1581 1582 /* 1583 * Associate parent and child pollcaches via a pcachelink_t. If an existing 1584 * link (stale or valid) between the two is found, it will be reused. If a 1585 * suitable link is not found for reuse, a new one will be allocated. 1586 */ 1587 static void 1588 pcachelink_assoc(pollcache_t *child, pollcache_t *parent) 1589 { 1590 pcachelink_t *pl, **plpn; 1591 1592 ASSERT(MUTEX_HELD(&child->pc_lock)); 1593 ASSERT(MUTEX_HELD(&parent->pc_lock)); 1594 1595 /* Search for an existing link we can reuse. */ 1596 plpn = &child->pc_parents; 1597 for (pl = child->pc_parents; pl != NULL; pl = *plpn) { 1598 mutex_enter(&pl->pcl_lock); 1599 if (pl->pcl_state == PCL_INVALID) { 1600 /* Clean any invalid links while walking the list */ 1601 *plpn = pl->pcl_parent_next; 1602 pl->pcl_child_pc = NULL; 1603 pl->pcl_parent_next = NULL; 1604 pcachelink_locked_rele(pl); 1605 } else if (pl->pcl_parent_pc == parent) { 1606 /* Successfully found parent link */ 1607 ASSERT(pl->pcl_state == PCL_VALID || 1608 pl->pcl_state == PCL_STALE); 1609 pl->pcl_state = PCL_VALID; 1610 mutex_exit(&pl->pcl_lock); 1611 return; 1612 } else { 1613 plpn = &pl->pcl_parent_next; 1614 mutex_exit(&pl->pcl_lock); 1615 } 1616 } 1617 1618 /* No existing link to the parent was found. Create a fresh one. */ 1619 pl = kmem_zalloc(sizeof (pcachelink_t), KM_SLEEP); 1620 mutex_init(&pl->pcl_lock, NULL, MUTEX_DEFAULT, NULL); 1621 1622 pl->pcl_parent_pc = parent; 1623 pl->pcl_child_next = parent->pc_children; 1624 parent->pc_children = pl; 1625 pl->pcl_refcnt++; 1626 1627 pl->pcl_child_pc = child; 1628 pl->pcl_parent_next = child->pc_parents; 1629 child->pc_parents = pl; 1630 pl->pcl_refcnt++; 1631 1632 pl->pcl_state = PCL_VALID; 1633 } 1634 1635 /* 1636 * Mark all child links in a pollcache as stale. Any invalid child links found 1637 * during iteration are purged. 1638 */ 1639 static void 1640 pcachelink_mark_stale(pollcache_t *pcp) 1641 { 1642 pcachelink_t *pl, **plpn; 1643 1644 ASSERT(MUTEX_HELD(&pcp->pc_lock)); 1645 1646 plpn = &pcp->pc_children; 1647 for (pl = pcp->pc_children; pl != NULL; pl = *plpn) { 1648 mutex_enter(&pl->pcl_lock); 1649 if (pl->pcl_state == PCL_INVALID) { 1650 /* 1651 * Remove any invalid links while we are going to the 1652 * trouble of walking the list. 1653 */ 1654 *plpn = pl->pcl_child_next; 1655 pl->pcl_parent_pc = NULL; 1656 pl->pcl_child_next = NULL; 1657 pcachelink_locked_rele(pl); 1658 } else { 1659 pl->pcl_state = PCL_STALE; 1660 plpn = &pl->pcl_child_next; 1661 mutex_exit(&pl->pcl_lock); 1662 } 1663 } 1664 } 1665 1666 /* 1667 * Purge all stale (or invalid) child links from a pollcache. 1668 */ 1669 static void 1670 pcachelink_purge_stale(pollcache_t *pcp) 1671 { 1672 pcachelink_t *pl, **plpn; 1673 1674 ASSERT(MUTEX_HELD(&pcp->pc_lock)); 1675 1676 plpn = &pcp->pc_children; 1677 for (pl = pcp->pc_children; pl != NULL; pl = *plpn) { 1678 mutex_enter(&pl->pcl_lock); 1679 switch (pl->pcl_state) { 1680 case PCL_STALE: 1681 pl->pcl_state = PCL_INVALID; 1682 /* FALLTHROUGH */ 1683 case PCL_INVALID: 1684 *plpn = pl->pcl_child_next; 1685 pl->pcl_parent_pc = NULL; 1686 pl->pcl_child_next = NULL; 1687 pcachelink_locked_rele(pl); 1688 break; 1689 default: 1690 plpn = &pl->pcl_child_next; 1691 mutex_exit(&pl->pcl_lock); 1692 } 1693 } 1694 } 1695 1696 /* 1697 * Purge all child and parent links from a pollcache, regardless of status. 1698 */ 1699 static void 1700 pcachelink_purge_all(pollcache_t *pcp) 1701 { 1702 pcachelink_t *pl, **plpn; 1703 1704 ASSERT(MUTEX_HELD(&pcp->pc_lock)); 1705 1706 plpn = &pcp->pc_parents; 1707 for (pl = pcp->pc_parents; pl != NULL; pl = *plpn) { 1708 mutex_enter(&pl->pcl_lock); 1709 pl->pcl_state = PCL_INVALID; 1710 *plpn = pl->pcl_parent_next; 1711 pl->pcl_child_pc = NULL; 1712 pl->pcl_parent_next = NULL; 1713 pcachelink_locked_rele(pl); 1714 } 1715 1716 plpn = &pcp->pc_children; 1717 for (pl = pcp->pc_children; pl != NULL; pl = *plpn) { 1718 mutex_enter(&pl->pcl_lock); 1719 pl->pcl_state = PCL_INVALID; 1720 *plpn = pl->pcl_child_next; 1721 pl->pcl_parent_pc = NULL; 1722 pl->pcl_child_next = NULL; 1723 pcachelink_locked_rele(pl); 1724 } 1725 1726 ASSERT(pcp->pc_parents == NULL); 1727 ASSERT(pcp->pc_children == NULL); 1728 } 1729