1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * This file containts all the functions required for interactions of 31 * event sources with the event port file system. 32 */ 33 34 #include <sys/types.h> 35 #include <sys/conf.h> 36 #include <sys/stat.h> 37 #include <sys/errno.h> 38 #include <sys/kmem.h> 39 #include <sys/debug.h> 40 #include <sys/file.h> 41 #include <sys/sysmacros.h> 42 #include <sys/systm.h> 43 #include <sys/bitmap.h> 44 #include <sys/rctl.h> 45 #include <sys/atomic.h> 46 #include <sys/poll_impl.h> 47 #include <sys/port_impl.h> 48 49 /* 50 * Maximum number of elements allowed to be passed in a single call of a 51 * port function (port_sendn(), port_getn(). We need to allocate kernel memory 52 * for all of them at once, so we can't let it scale without limit. 53 */ 54 uint_t port_max_list = PORT_MAX_LIST; 55 port_control_t port_control; /* Event port framework main structure */ 56 57 /* 58 * Block other threads from using a port. 59 * We enter holding portq->portq_mutex but 60 * we may drop and reacquire this lock. 61 * Callers must deal with this fact. 62 */ 63 void 64 port_block(port_queue_t *portq) 65 { 66 ASSERT(MUTEX_HELD(&portq->portq_mutex)); 67 68 while (portq->portq_flags & PORTQ_BLOCKED) 69 cv_wait(&portq->portq_block_cv, &portq->portq_mutex); 70 portq->portq_flags |= PORTQ_BLOCKED; 71 } 72 73 /* 74 * Undo port_block(portq). 75 */ 76 void 77 port_unblock(port_queue_t *portq) 78 { 79 ASSERT(MUTEX_HELD(&portq->portq_mutex)); 80 81 portq->portq_flags &= ~PORTQ_BLOCKED; 82 cv_signal(&portq->portq_block_cv); 83 } 84 85 /* 86 * Called from pollwakeup(PORT_SOURCE_FD source) to determine 87 * if the port's fd needs to be notified of poll events. If yes, 88 * we mark the port indicating that pollwakeup() is referring 89 * it so that the port_t does not disappear. pollwakeup() 90 * calls port_pollwkdone() after notifying. In port_pollwkdone(), 91 * we clear the hold on the port_t (clear PORTQ_POLLWK_PEND). 92 */ 93 int 94 port_pollwkup(port_t *pp) 95 { 96 int events = 0; 97 port_queue_t *portq; 98 portq = &pp->port_queue; 99 mutex_enter(&portq->portq_mutex); 100 101 /* 102 * Normally, we should not have a situation where PORTQ_POLLIN 103 * and PORTQ_POLLWK_PEND are set at the same time, but it is 104 * possible. So, in pollwakeup() we ensure that no new fd's get 105 * added to the pollhead between the time it notifies poll events 106 * and calls poll_wkupdone() where we clear the PORTQ_POLLWK_PEND flag. 107 */ 108 if (portq->portq_flags & PORTQ_POLLIN && 109 !(portq->portq_flags & PORTQ_POLLWK_PEND)) { 110 portq->portq_flags &= ~PORTQ_POLLIN; 111 portq->portq_flags |= PORTQ_POLLWK_PEND; 112 events = POLLIN; 113 } 114 mutex_exit(&portq->portq_mutex); 115 return (events); 116 } 117 118 void 119 port_pollwkdone(port_t *pp) 120 { 121 port_queue_t *portq; 122 portq = &pp->port_queue; 123 ASSERT(portq->portq_flags & PORTQ_POLLWK_PEND); 124 mutex_enter(&portq->portq_mutex); 125 portq->portq_flags &= ~PORTQ_POLLWK_PEND; 126 cv_signal(&pp->port_cv); 127 mutex_exit(&portq->portq_mutex); 128 } 129 130 131 /* 132 * The port_send_event() function is used by all event sources to submit 133 * trigerred events to a port. All the data required for the event management 134 * is already stored in the port_kevent_t structure. 135 * The event port internal data is stored in the port_kevent_t structure 136 * during the allocation time (see port_alloc_event()). The data related to 137 * the event itself and to the event source management is stored in the 138 * port_kevent_t structure between the allocation time and submit time 139 * (see port_init_event()). 140 * 141 * This function is often called from interrupt level. 142 */ 143 void 144 port_send_event(port_kevent_t *pkevp) 145 { 146 port_queue_t *portq; 147 148 portq = &pkevp->portkev_port->port_queue; 149 mutex_enter(&portq->portq_mutex); 150 151 if (pkevp->portkev_flags & PORT_KEV_DONEQ) { 152 /* Event already in the port queue */ 153 if (pkevp->portkev_source == PORT_SOURCE_FD) { 154 mutex_exit(&pkevp->portkev_lock); 155 } 156 mutex_exit(&portq->portq_mutex); 157 return; 158 } 159 160 /* put event in the port queue */ 161 list_insert_tail(&portq->portq_list, pkevp); 162 portq->portq_nent++; 163 164 /* 165 * Remove the PORTQ_WAIT_EVENTS flag to indicate 166 * that new events are available. 167 */ 168 portq->portq_flags &= ~PORTQ_WAIT_EVENTS; 169 pkevp->portkev_flags |= PORT_KEV_DONEQ; /* event enqueued */ 170 171 if (pkevp->portkev_source == PORT_SOURCE_FD) { 172 mutex_exit(&pkevp->portkev_lock); 173 } 174 175 /* Check if thread is in port_close() waiting for outstanding events */ 176 if (portq->portq_flags & PORTQ_CLOSE) { 177 /* Check if all outstanding events are already in port queue */ 178 if (pkevp->portkev_port->port_curr <= portq->portq_nent) 179 cv_signal(&portq->portq_closecv); 180 } 181 182 if (portq->portq_getn == 0) { 183 /* 184 * No thread retrieving events -> check if enough events are 185 * available to satify waiting threads. 186 */ 187 if (portq->portq_thread && 188 (portq->portq_nent >= portq->portq_nget)) 189 cv_signal(&portq->portq_thread->portget_cv); 190 } 191 192 /* 193 * If some thread is polling the port's fd, then notify it. 194 * For PORT_SOURCE_FD source, we don't need to call pollwakeup() 195 * here as it will result in a recursive call(PORT_SOURCE_FD source 196 * is pollwakeup()). Therefore pollwakeup() itself will notify the 197 * ports if being polled. 198 */ 199 if (pkevp->portkev_source != PORT_SOURCE_FD && 200 portq->portq_flags & PORTQ_POLLIN) { 201 portq->portq_flags &= ~PORTQ_POLLIN; 202 mutex_exit(&portq->portq_mutex); 203 pollwakeup(&pkevp->portkev_port->port_pollhd, POLLIN); 204 } else { 205 mutex_exit(&portq->portq_mutex); 206 } 207 } 208 209 /* 210 * The port_alloc_event() function has to be used by all event sources 211 * to request an slot for event notification. 212 * The slot reservation could be denied because of lack of resources. 213 * For that reason the event source should allocate an event slot as early 214 * as possible and be prepared to get an error code instead of the 215 * port event pointer. 216 * Al current event sources allocate an event slot during a system call 217 * entry. They return an error code to the application if an event slot 218 * could not be reserved. 219 * It is also recommended to associate the event source with the port 220 * before some other port function is used. 221 * The port argument is a file descriptor obtained by the application as 222 * a return value of port_create(). 223 * Possible values of flags are: 224 * PORT_ALLOC_DEFAULT 225 * This is the standard type of port events. port_get(n) will free this 226 * type of event structures as soon as the events are delivered to the 227 * application. 228 * PORT_ALLOC_PRIVATE 229 * This type of event will be use for private use of the event source. 230 * The port_get(n) function will deliver events of such an structure to 231 * the application but it will not free the event structure itself. 232 * The event source must free this structure using port_free_event(). 233 * PORT_ALLOC_CACHED 234 * This type of events is used when the event source helds an own 235 * cache. 236 * The port_get(n) function will deliver events of such an structure to 237 * the application but it will not free the event structure itself. 238 * The event source must free this structure using port_free_event(). 239 */ 240 int 241 port_alloc_event(int port, int flags, int source, port_kevent_t **pkevpp) 242 { 243 port_t *pp; 244 file_t *fp; 245 port_kevent_t *pkevp; 246 247 if ((fp = getf(port)) == NULL) 248 return (EBADF); 249 250 if (fp->f_vnode->v_type != VPORT) { 251 releasef(port); 252 return (EBADFD); 253 } 254 255 pkevp = kmem_cache_alloc(port_control.pc_cache, KM_NOSLEEP); 256 if (pkevp == NULL) { 257 releasef(port); 258 return (ENOMEM); 259 } 260 261 /* 262 * port_max_events is controlled by the resource control 263 * process.port-max-events 264 */ 265 pp = VTOEP(fp->f_vnode); 266 mutex_enter(&pp->port_queue.portq_mutex); 267 if (pp->port_curr >= pp->port_max_events) { 268 mutex_exit(&pp->port_queue.portq_mutex); 269 kmem_cache_free(port_control.pc_cache, pkevp); 270 releasef(port); 271 return (EAGAIN); 272 } 273 pp->port_curr++; 274 mutex_exit(&pp->port_queue.portq_mutex); 275 276 bzero(pkevp, sizeof (port_kevent_t)); 277 mutex_init(&pkevp->portkev_lock, NULL, MUTEX_DEFAULT, NULL); 278 pkevp->portkev_source = source; 279 pkevp->portkev_flags = flags; 280 pkevp->portkev_pid = curproc->p_pid; 281 pkevp->portkev_port = pp; 282 *pkevpp = pkevp; 283 releasef(port); 284 return (0); 285 } 286 287 /* 288 * This function is faster than the standard port_alloc_event() and 289 * can be used when the event source already allocated an event from 290 * a port. 291 */ 292 int 293 port_dup_event(port_kevent_t *pkevp, port_kevent_t **pkevdupp, int flags) 294 { 295 int error; 296 297 error = port_alloc_event_local(pkevp->portkev_port, 298 pkevp->portkev_source, flags, pkevdupp); 299 if (error == 0) 300 (*pkevdupp)->portkev_pid = pkevp->portkev_pid; 301 return (error); 302 } 303 304 /* 305 * port_alloc_event_local() is reserved for internal use only. 306 * It is doing the same job as port_alloc_event() but with the event port 307 * pointer as the first argument. 308 * The check of the validity of the port file descriptor is skipped here. 309 */ 310 int 311 port_alloc_event_local(port_t *pp, int source, int flags, 312 port_kevent_t **pkevpp) 313 { 314 port_kevent_t *pkevp; 315 316 pkevp = kmem_cache_alloc(port_control.pc_cache, KM_NOSLEEP); 317 if (pkevp == NULL) 318 return (ENOMEM); 319 320 mutex_enter(&pp->port_queue.portq_mutex); 321 if (pp->port_curr >= pp->port_max_events) { 322 mutex_exit(&pp->port_queue.portq_mutex); 323 kmem_cache_free(port_control.pc_cache, pkevp); 324 return (EAGAIN); 325 } 326 pp->port_curr++; 327 mutex_exit(&pp->port_queue.portq_mutex); 328 329 bzero(pkevp, sizeof (port_kevent_t)); 330 mutex_init(&pkevp->portkev_lock, NULL, MUTEX_DEFAULT, NULL); 331 pkevp->portkev_flags = flags; 332 pkevp->portkev_port = pp; 333 pkevp->portkev_source = source; 334 pkevp->portkev_pid = curproc->p_pid; 335 *pkevpp = pkevp; 336 return (0); 337 } 338 339 /* 340 * port_alloc_event_block() has the same functionality of port_alloc_event() + 341 * - it blocks if not enough event slots are available and 342 * - it blocks if not enough memory is available. 343 * Currently port_dispatch() is using this function to increase the 344 * reliability of event delivery for library event sources. 345 */ 346 int 347 port_alloc_event_block(port_t *pp, int source, int flags, 348 port_kevent_t **pkevpp) 349 { 350 port_kevent_t *pkevp = 351 kmem_cache_alloc(port_control.pc_cache, KM_SLEEP); 352 353 mutex_enter(&pp->port_queue.portq_mutex); 354 while (pp->port_curr >= pp->port_max_events) { 355 if (!cv_wait_sig(&pp->port_cv, &pp->port_queue.portq_mutex)) { 356 /* signal detected */ 357 mutex_exit(&pp->port_queue.portq_mutex); 358 kmem_cache_free(port_control.pc_cache, pkevp); 359 return (EINTR); 360 } 361 } 362 pp->port_curr++; 363 mutex_exit(&pp->port_queue.portq_mutex); 364 365 bzero(pkevp, sizeof (port_kevent_t)); 366 mutex_init(&pkevp->portkev_lock, NULL, MUTEX_DEFAULT, NULL); 367 pkevp->portkev_flags = flags; 368 pkevp->portkev_port = pp; 369 pkevp->portkev_source = source; 370 pkevp->portkev_pid = curproc->p_pid; 371 *pkevpp = pkevp; 372 return (0); 373 } 374 375 /* 376 * Take an event out of the port queue 377 */ 378 static void 379 port_remove_event_doneq(port_kevent_t *pkevp, port_queue_t *portq) 380 { 381 ASSERT(MUTEX_HELD(&portq->portq_mutex)); 382 list_remove(&portq->portq_list, pkevp); 383 portq->portq_nent--; 384 pkevp->portkev_flags &= ~PORT_KEV_DONEQ; 385 } 386 387 /* 388 * The port_remove_done_event() function takes a fired event out of the 389 * port queue. 390 * Currently this function is required to cancel a fired event because 391 * the application is delivering new association data (see port_associate_fd()). 392 */ 393 void 394 port_remove_done_event(port_kevent_t *pkevp) 395 { 396 port_queue_t *portq; 397 398 portq = &pkevp->portkev_port->port_queue; 399 mutex_enter(&portq->portq_mutex); 400 /* wait for port_get() or port_getn() */ 401 port_block(portq); 402 if (pkevp->portkev_flags & PORT_KEV_DONEQ) { 403 /* event still in port queue */ 404 if (portq->portq_getn) { 405 /* 406 * There could be still fired events in the temp queue; 407 * push those events back to the port queue and 408 * remove requested event afterwards. 409 */ 410 port_push_eventq(portq); 411 } 412 /* now remove event from the port queue */ 413 port_remove_event_doneq(pkevp, portq); 414 } 415 port_unblock(portq); 416 mutex_exit(&portq->portq_mutex); 417 } 418 419 /* 420 * Return port event back to the kmem_cache. 421 * If the event is currently in the port queue the event itself will only 422 * be set as invalid. The port_get(n) function will not deliver such events 423 * to the application and it will return them back to the kmem_cache. 424 */ 425 void 426 port_free_event(port_kevent_t *pkevp) 427 { 428 port_queue_t *portq; 429 port_t *pp; 430 431 pp = pkevp->portkev_port; 432 if (pp == NULL) 433 return; 434 if (pkevp->portkev_flags & PORT_ALLOC_PRIVATE) { 435 port_free_event_local(pkevp, 0); 436 return; 437 } 438 439 portq = &pp->port_queue; 440 mutex_enter(&portq->portq_mutex); 441 port_block(portq); 442 if (pkevp->portkev_flags & PORT_KEV_DONEQ) { 443 pkevp->portkev_flags |= PORT_KEV_FREE; 444 pkevp->portkev_callback = NULL; 445 port_unblock(portq); 446 mutex_exit(&portq->portq_mutex); 447 return; 448 } 449 port_unblock(portq); 450 451 if (pkevp->portkev_flags & PORT_KEV_CACHED) { 452 mutex_exit(&portq->portq_mutex); 453 return; 454 } 455 456 if (--pp->port_curr < pp->port_max_events) 457 cv_signal(&pp->port_cv); 458 if (portq->portq_flags & PORTQ_CLOSE) { 459 /* 460 * Another thread is closing the event port. 461 * That thread will sleep until all allocated event 462 * structures returned to the event port framework. 463 * The portq_mutex is used to synchronize the status 464 * of the allocated event structures (port_curr). 465 */ 466 if (pp->port_curr <= portq->portq_nent) 467 cv_signal(&portq->portq_closecv); 468 } 469 mutex_exit(&portq->portq_mutex); 470 port_free_event_local(pkevp, 1); 471 } 472 473 /* 474 * This event port internal function is used by port_free_event() and 475 * other port internal functions to return event structures back to the 476 * kmem_cache. 477 */ 478 void 479 port_free_event_local(port_kevent_t *pkevp, int counter) 480 { 481 port_t *pp = pkevp->portkev_port; 482 port_queue_t *portq = &pp->port_queue; 483 int wakeup; 484 485 pkevp->portkev_callback = NULL; 486 pkevp->portkev_flags = 0; 487 pkevp->portkev_port = NULL; 488 mutex_destroy(&pkevp->portkev_lock); 489 kmem_cache_free(port_control.pc_cache, pkevp); 490 491 mutex_enter(&portq->portq_mutex); 492 if (counter == 0) { 493 if (--pp->port_curr < pp->port_max_events) 494 cv_signal(&pp->port_cv); 495 } 496 wakeup = (portq->portq_flags & PORTQ_POLLOUT); 497 portq->portq_flags &= ~PORTQ_POLLOUT; 498 mutex_exit(&portq->portq_mutex); 499 500 /* Submit a POLLOUT event if requested */ 501 if (wakeup) 502 pollwakeup(&pp->port_pollhd, POLLOUT); 503 } 504 505 /* 506 * port_init_event(port_event_t *pev, uintptr_t object, void *user, 507 * int (*port_callback)(void *, int *, pid_t, int, void *), void *sysarg); 508 * This function initializes most of the "wired" elements of the port 509 * event structure. This is normally being used just after the allocation 510 * of the port event structure. 511 * pkevp : pointer to the port event structure 512 * object : object associated with this event structure 513 * user : user defined pointer delivered with the association function 514 * port_callback: 515 * Address of the callback function which will be called 516 * - just before the event is delivered to the application. 517 * The callback function is called in user context and can be 518 * used for copyouts, e.g. 519 * - on close() or dissociation of the event. The sub-system 520 * must remove immediately every existing association of 521 * some object with this event. 522 * sysarg : event source propietary data 523 */ 524 void 525 port_init_event(port_kevent_t *pkevp, uintptr_t object, void *user, 526 int (*port_callback)(void *, int *, pid_t, int, void *), 527 void *sysarg) 528 { 529 pkevp->portkev_object = object; 530 pkevp->portkev_user = user; 531 pkevp->portkev_callback = port_callback; 532 pkevp->portkev_arg = sysarg; 533 } 534 535 /* 536 * This routine removes a portfd_t from the fd cache's hash table. 537 */ 538 void 539 port_pcache_remove_fd(port_fdcache_t *pcp, portfd_t *pfd) 540 { 541 polldat_t *lpdp; 542 polldat_t *cpdp; 543 portfd_t **bucket; 544 polldat_t *pdp = PFTOD(pfd); 545 546 ASSERT(MUTEX_HELD(&pcp->pc_lock)); 547 bucket = PORT_FD_BUCKET(pcp, pdp->pd_fd); 548 cpdp = PFTOD(*bucket); 549 if (pdp == cpdp) { 550 *bucket = PDTOF(pdp->pd_hashnext); 551 if (--pcp->pc_fdcount == 0) { 552 /* 553 * signal the thread which may have blocked in 554 * port_close_sourcefd() on lastclose waiting 555 * for pc_fdcount to drop to 0. 556 */ 557 cv_signal(&pcp->pc_lclosecv); 558 } 559 kmem_free(pfd, sizeof (portfd_t)); 560 return; 561 } 562 563 while (cpdp != NULL) { 564 lpdp = cpdp; 565 cpdp = cpdp->pd_hashnext; 566 if (cpdp == pdp) { 567 /* polldat struct found */ 568 lpdp->pd_hashnext = pdp->pd_hashnext; 569 if (--pcp->pc_fdcount == 0) { 570 /* 571 * signal the thread which may have blocked in 572 * port_close_sourcefd() on lastclose waiting 573 * for pc_fdcount to drop to 0. 574 */ 575 cv_signal(&pcp->pc_lclosecv); 576 } 577 break; 578 } 579 } 580 ASSERT(cpdp != NULL); 581 kmem_free(pfd, sizeof (portfd_t)); 582 } 583 584 /* 585 * The port_push_eventq() function is used to move all remaining events 586 * from the temporary queue used in port_get(n)() to the standard port 587 * queue. 588 */ 589 void 590 port_push_eventq(port_queue_t *portq) 591 { 592 /* 593 * Append temporary portq_get_list to the port queue. On return 594 * the temporary portq_get_list is empty. 595 */ 596 list_move_tail(&portq->portq_list, &portq->portq_get_list); 597 portq->portq_nent += portq->portq_tnent; 598 portq->portq_tnent = 0; 599 } 600 601 /* 602 * The port_remove_fd_object() function frees all resources associated with 603 * delivered portfd_t structure. Returns 1 if the port_kevent was found 604 * and removed from the port queue. 605 */ 606 int 607 port_remove_fd_object(portfd_t *pfd, port_t *pp, port_fdcache_t *pcp) 608 { 609 port_queue_t *portq; 610 polldat_t *pdp = PFTOD(pfd); 611 port_kevent_t *pkevp; 612 int error; 613 int removed = 0; 614 615 ASSERT(MUTEX_HELD(&pcp->pc_lock)); 616 if (pdp->pd_php != NULL) { 617 pollhead_delete(pdp->pd_php, pdp); 618 pdp->pd_php = NULL; 619 } 620 pkevp = pdp->pd_portev; 621 portq = &pp->port_queue; 622 mutex_enter(&portq->portq_mutex); 623 port_block(portq); 624 if (pkevp->portkev_flags & PORT_KEV_DONEQ) { 625 if (portq->portq_getn && portq->portq_tnent) { 626 /* 627 * move events from the temporary "get" queue 628 * back to the port queue 629 */ 630 port_push_eventq(portq); 631 } 632 /* cleanup merged port queue */ 633 port_remove_event_doneq(pkevp, portq); 634 removed = 1; 635 } 636 port_unblock(portq); 637 mutex_exit(&portq->portq_mutex); 638 if (pkevp->portkev_callback) { 639 (void) (*pkevp->portkev_callback)(pkevp->portkev_arg, 640 &error, pkevp->portkev_pid, PORT_CALLBACK_DISSOCIATE, 641 pkevp); 642 } 643 port_free_event_local(pkevp, 0); 644 645 /* remove polldat struct */ 646 port_pcache_remove_fd(pcp, pfd); 647 return (removed); 648 } 649 650 /* 651 * The port_close_fd() function dissociates a file descriptor from a port 652 * and removes all allocated resources. 653 * close(2) detects in the uf_entry_t structure that the fd is associated 654 * with a port (at least one port). 655 * The fd can be associated with several ports. 656 */ 657 void 658 port_close_pfd(portfd_t *pfd) 659 { 660 port_t *pp; 661 port_fdcache_t *pcp; 662 663 /* 664 * the portfd_t passed in should be for this proc. 665 */ 666 ASSERT(curproc->p_pid == PFTOD(pfd)->pd_portev->portkev_pid); 667 pp = PFTOD(pfd)->pd_portev->portkev_port; 668 pcp = pp->port_queue.portq_pcp; 669 mutex_enter(&pcp->pc_lock); 670 (void) port_remove_fd_object(pfd, pp, pcp); 671 mutex_exit(&pcp->pc_lock); 672 } 673 674 /* 675 * The port_associate_ksource() function associates an event source with a port. 676 * On port_close() all associated sources are requested to free all local 677 * resources associated with the event port. 678 * The association of a source with a port can only be done one time. Further 679 * calls of this function will only increment the reference counter. 680 * The allocated port_source_t structure is removed from the port as soon as 681 * the reference counter becomes 0. 682 */ 683 /* ARGSUSED */ 684 int 685 port_associate_ksource(int port, int source, port_source_t **portsrc, 686 void (*port_src_close)(void *, int, pid_t, int), void *arg, 687 int (*port_src_associate)(port_kevent_t *, int, int, uintptr_t, void *)) 688 { 689 port_t *pp; 690 file_t *fp; 691 port_source_t **ps; 692 port_source_t *pse; 693 694 if ((fp = getf(port)) == NULL) 695 return (EBADF); 696 697 if (fp->f_vnode->v_type != VPORT) { 698 releasef(port); 699 return (EBADFD); 700 } 701 pp = VTOEP(fp->f_vnode); 702 703 mutex_enter(&pp->port_queue.portq_source_mutex); 704 ps = &pp->port_queue.portq_scache[PORT_SHASH(source)]; 705 for (pse = *ps; pse != NULL; pse = pse->portsrc_next) { 706 if (pse->portsrc_source == source) 707 break; 708 } 709 710 if (pse == NULL) { 711 /* Create association of the event source with the port */ 712 pse = kmem_zalloc(sizeof (port_source_t), KM_NOSLEEP); 713 if (pse == NULL) { 714 mutex_exit(&pp->port_queue.portq_source_mutex); 715 releasef(port); 716 return (ENOMEM); 717 } 718 pse->portsrc_source = source; 719 pse->portsrc_close = port_src_close; 720 pse->portsrc_closearg = arg; 721 pse->portsrc_cnt = 1; 722 if (*ps) 723 pse->portsrc_next = (*ps)->portsrc_next; 724 *ps = pse; 725 } else { 726 /* entry already available, source is only requesting count */ 727 pse->portsrc_cnt++; 728 } 729 mutex_exit(&pp->port_queue.portq_source_mutex); 730 releasef(port); 731 if (portsrc) 732 *portsrc = pse; 733 return (0); 734 } 735 736 /* 737 * The port_dissociate_ksource() function dissociates an event source from 738 * a port. 739 */ 740 int 741 port_dissociate_ksource(int port, int source, port_source_t *ps) 742 { 743 port_t *pp; 744 file_t *fp; 745 port_source_t **psh; 746 747 if (ps == NULL) 748 return (EINVAL); 749 750 if ((fp = getf(port)) == NULL) 751 return (EBADF); 752 753 if (fp->f_vnode->v_type != VPORT) { 754 releasef(port); 755 return (EBADFD); 756 } 757 pp = VTOEP(fp->f_vnode); 758 759 mutex_enter(&pp->port_queue.portq_source_mutex); 760 if (--ps->portsrc_cnt == 0) { 761 /* last association removed -> free source structure */ 762 if (ps->portsrc_prev == NULL) { 763 /* first entry */ 764 psh = &pp->port_queue.portq_scache[PORT_SHASH(source)]; 765 *psh = ps->portsrc_next; 766 if (ps->portsrc_next) 767 ps->portsrc_next->portsrc_prev = NULL; 768 } else { 769 ps->portsrc_prev->portsrc_next = ps->portsrc_next; 770 if (ps->portsrc_next) 771 ps->portsrc_next->portsrc_prev = 772 ps->portsrc_prev; 773 } 774 kmem_free(ps, sizeof (port_source_t)); 775 } 776 mutex_exit(&pp->port_queue.portq_source_mutex); 777 releasef(port); 778 return (0); 779 } 780