1 /*- 2 * Copyright (c) 2002 Alfred Perlstein <alfred@FreeBSD.org> 3 * Copyright (c) 2005 Robert N. M. Watson 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_posix.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/sysproto.h> 36 #include <sys/eventhandler.h> 37 #include <sys/kernel.h> 38 #include <sys/proc.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/module.h> 42 #include <sys/condvar.h> 43 #include <sys/sem.h> 44 #include <sys/uio.h> 45 #include <sys/syscall.h> 46 #include <sys/stat.h> 47 #include <sys/sysent.h> 48 #include <sys/sysctl.h> 49 #include <sys/time.h> 50 #include <sys/malloc.h> 51 #include <sys/fcntl.h> 52 53 #include <posix4/posix4.h> 54 #include <posix4/semaphore.h> 55 #include <posix4/_semaphore.h> 56 57 static int sem_count_proc(struct proc *p); 58 static struct ksem *sem_lookup_byname(const char *name); 59 static int sem_create(struct thread *td, const char *name, 60 struct ksem **ksret, mode_t mode, unsigned int value); 61 static void sem_free(struct ksem *ksnew); 62 static int sem_perm(struct thread *td, struct ksem *ks); 63 static void sem_enter(struct proc *p, struct ksem *ks); 64 static int sem_leave(struct proc *p, struct ksem *ks); 65 static void sem_exithook(void *arg, struct proc *p); 66 static void sem_forkhook(void *arg, struct proc *p1, struct proc *p2, 67 int flags); 68 static int sem_hasopen(struct thread *td, struct ksem *ks); 69 70 static int kern_sem_close(struct thread *td, semid_t id); 71 static int kern_sem_post(struct thread *td, semid_t id); 72 static int kern_sem_wait(struct thread *td, semid_t id, int tryflag, 73 struct timespec *abstime); 74 static int kern_sem_init(struct thread *td, int dir, unsigned int value, 75 semid_t *idp); 76 static int kern_sem_open(struct thread *td, int dir, const char *name, 77 int oflag, mode_t mode, unsigned int value, semid_t *idp); 78 static int kern_sem_unlink(struct thread *td, const char *name); 79 80 #ifndef SEM_MAX 81 #define SEM_MAX 30 82 #endif 83 84 #define SEM_MAX_NAMELEN 14 85 86 #define SEM_TO_ID(x) ((intptr_t)(x)) 87 #define ID_TO_SEM(x) id_to_sem(x) 88 89 struct kuser { 90 pid_t ku_pid; 91 LIST_ENTRY(kuser) ku_next; 92 }; 93 94 struct ksem { 95 LIST_ENTRY(ksem) ks_entry; /* global list entry */ 96 int ks_onlist; /* boolean if on a list (ks_entry) */ 97 char *ks_name; /* if named, this is the name */ 98 int ks_ref; /* number of references */ 99 mode_t ks_mode; /* protection bits */ 100 uid_t ks_uid; /* creator uid */ 101 gid_t ks_gid; /* creator gid */ 102 unsigned int ks_value; /* current value */ 103 struct cv ks_cv; /* waiters sleep here */ 104 int ks_waiters; /* number of waiters */ 105 LIST_HEAD(, kuser) ks_users; /* pids using this sem */ 106 }; 107 108 /* 109 * available semaphores go here, this includes sem_init and any semaphores 110 * created via sem_open that have not yet been unlinked. 111 */ 112 LIST_HEAD(, ksem) ksem_head = LIST_HEAD_INITIALIZER(&ksem_head); 113 /* 114 * semaphores still in use but have been sem_unlink()'d go here. 115 */ 116 LIST_HEAD(, ksem) ksem_deadhead = LIST_HEAD_INITIALIZER(&ksem_deadhead); 117 118 static struct mtx sem_lock; 119 static MALLOC_DEFINE(M_SEM, "sems", "semaphore data"); 120 121 static int nsems = 0; 122 SYSCTL_DECL(_p1003_1b); 123 SYSCTL_INT(_p1003_1b, OID_AUTO, nsems, CTLFLAG_RD, &nsems, 0, ""); 124 125 static eventhandler_tag sem_exit_tag, sem_exec_tag, sem_fork_tag; 126 127 #ifdef SEM_DEBUG 128 #define DP(x) printf x 129 #else 130 #define DP(x) 131 #endif 132 133 static __inline 134 void 135 sem_ref(struct ksem *ks) 136 { 137 138 mtx_assert(&sem_lock, MA_OWNED); 139 ks->ks_ref++; 140 DP(("sem_ref: ks = %p, ref = %d\n", ks, ks->ks_ref)); 141 } 142 143 static __inline 144 void 145 sem_rel(struct ksem *ks) 146 { 147 148 mtx_assert(&sem_lock, MA_OWNED); 149 DP(("sem_rel: ks = %p, ref = %d\n", ks, ks->ks_ref - 1)); 150 if (--ks->ks_ref == 0) 151 sem_free(ks); 152 } 153 154 static __inline struct ksem *id_to_sem(semid_t id); 155 156 static __inline 157 struct ksem * 158 id_to_sem(id) 159 semid_t id; 160 { 161 struct ksem *ks; 162 163 mtx_assert(&sem_lock, MA_OWNED); 164 DP(("id_to_sem: id = %0x,%p\n", id, (struct ksem *)id)); 165 LIST_FOREACH(ks, &ksem_head, ks_entry) { 166 DP(("id_to_sem: ks = %p\n", ks)); 167 if (ks == (struct ksem *)id) 168 return (ks); 169 } 170 return (NULL); 171 } 172 173 static struct ksem * 174 sem_lookup_byname(name) 175 const char *name; 176 { 177 struct ksem *ks; 178 179 mtx_assert(&sem_lock, MA_OWNED); 180 LIST_FOREACH(ks, &ksem_head, ks_entry) 181 if (ks->ks_name != NULL && strcmp(ks->ks_name, name) == 0) 182 return (ks); 183 return (NULL); 184 } 185 186 static int 187 sem_create(td, name, ksret, mode, value) 188 struct thread *td; 189 const char *name; 190 struct ksem **ksret; 191 mode_t mode; 192 unsigned int value; 193 { 194 struct ksem *ret; 195 struct proc *p; 196 struct ucred *uc; 197 size_t len; 198 int error; 199 200 DP(("sem_create\n")); 201 p = td->td_proc; 202 uc = td->td_ucred; 203 if (value > SEM_VALUE_MAX) 204 return (EINVAL); 205 ret = malloc(sizeof(*ret), M_SEM, M_WAITOK | M_ZERO); 206 if (name != NULL) { 207 len = strlen(name); 208 if (len > SEM_MAX_NAMELEN) { 209 free(ret, M_SEM); 210 return (ENAMETOOLONG); 211 } 212 /* name must start with a '/' but not contain one. */ 213 if (*name != '/' || len < 2 || index(name + 1, '/') != NULL) { 214 free(ret, M_SEM); 215 return (EINVAL); 216 } 217 ret->ks_name = malloc(len + 1, M_SEM, M_WAITOK); 218 strcpy(ret->ks_name, name); 219 } else { 220 ret->ks_name = NULL; 221 } 222 ret->ks_mode = mode; 223 ret->ks_value = value; 224 ret->ks_ref = 1; 225 ret->ks_waiters = 0; 226 ret->ks_uid = uc->cr_uid; 227 ret->ks_gid = uc->cr_gid; 228 ret->ks_onlist = 0; 229 cv_init(&ret->ks_cv, "sem"); 230 LIST_INIT(&ret->ks_users); 231 if (name != NULL) 232 sem_enter(td->td_proc, ret); 233 *ksret = ret; 234 mtx_lock(&sem_lock); 235 if (nsems >= p31b_getcfg(CTL_P1003_1B_SEM_NSEMS_MAX)) { 236 sem_leave(td->td_proc, ret); 237 sem_free(ret); 238 error = ENFILE; 239 } else { 240 nsems++; 241 error = 0; 242 } 243 mtx_unlock(&sem_lock); 244 return (error); 245 } 246 247 #ifndef _SYS_SYSPROTO_H_ 248 struct ksem_init_args { 249 unsigned int value; 250 semid_t *idp; 251 }; 252 int ksem_init(struct thread *td, struct ksem_init_args *uap); 253 #endif 254 int 255 ksem_init(td, uap) 256 struct thread *td; 257 struct ksem_init_args *uap; 258 { 259 int error; 260 261 error = kern_sem_init(td, UIO_USERSPACE, uap->value, uap->idp); 262 return (error); 263 } 264 265 static int 266 kern_sem_init(td, dir, value, idp) 267 struct thread *td; 268 int dir; 269 unsigned int value; 270 semid_t *idp; 271 { 272 struct ksem *ks; 273 semid_t id; 274 int error; 275 276 error = sem_create(td, NULL, &ks, S_IRWXU | S_IRWXG, value); 277 if (error) 278 return (error); 279 id = SEM_TO_ID(ks); 280 if (dir == UIO_USERSPACE) { 281 error = copyout(&id, idp, sizeof(id)); 282 if (error) { 283 mtx_lock(&sem_lock); 284 sem_rel(ks); 285 mtx_unlock(&sem_lock); 286 return (error); 287 } 288 } else { 289 *idp = id; 290 } 291 mtx_lock(&sem_lock); 292 LIST_INSERT_HEAD(&ksem_head, ks, ks_entry); 293 ks->ks_onlist = 1; 294 mtx_unlock(&sem_lock); 295 return (error); 296 } 297 298 #ifndef _SYS_SYSPROTO_H_ 299 struct ksem_open_args { 300 char *name; 301 int oflag; 302 mode_t mode; 303 unsigned int value; 304 semid_t *idp; 305 }; 306 int ksem_open(struct thread *td, struct ksem_open_args *uap); 307 #endif 308 int 309 ksem_open(td, uap) 310 struct thread *td; 311 struct ksem_open_args *uap; 312 { 313 char name[SEM_MAX_NAMELEN + 1]; 314 size_t done; 315 int error; 316 317 error = copyinstr(uap->name, name, SEM_MAX_NAMELEN + 1, &done); 318 if (error) 319 return (error); 320 DP((">>> sem_open start\n")); 321 error = kern_sem_open(td, UIO_USERSPACE, 322 name, uap->oflag, uap->mode, uap->value, uap->idp); 323 DP(("<<< sem_open end\n")); 324 return (error); 325 } 326 327 static int 328 kern_sem_open(td, dir, name, oflag, mode, value, idp) 329 struct thread *td; 330 int dir; 331 const char *name; 332 int oflag; 333 mode_t mode; 334 unsigned int value; 335 semid_t *idp; 336 { 337 struct ksem *ksnew, *ks; 338 int error; 339 semid_t id; 340 341 ksnew = NULL; 342 mtx_lock(&sem_lock); 343 ks = sem_lookup_byname(name); 344 /* 345 * If we found it but O_EXCL is set, error. 346 */ 347 if (ks != NULL && (oflag & O_EXCL) != 0) { 348 mtx_unlock(&sem_lock); 349 return (EEXIST); 350 } 351 /* 352 * If we didn't find it... 353 */ 354 if (ks == NULL) { 355 /* 356 * didn't ask for creation? error. 357 */ 358 if ((oflag & O_CREAT) == 0) { 359 mtx_unlock(&sem_lock); 360 return (ENOENT); 361 } 362 /* 363 * We may block during creation, so drop the lock. 364 */ 365 mtx_unlock(&sem_lock); 366 error = sem_create(td, name, &ksnew, mode, value); 367 if (error != 0) 368 return (error); 369 id = SEM_TO_ID(ksnew); 370 if (dir == UIO_USERSPACE) { 371 DP(("about to copyout! %d to %p\n", id, idp)); 372 error = copyout(&id, idp, sizeof(id)); 373 if (error) { 374 mtx_lock(&sem_lock); 375 sem_leave(td->td_proc, ksnew); 376 sem_rel(ksnew); 377 mtx_unlock(&sem_lock); 378 return (error); 379 } 380 } else { 381 DP(("about to set! %d to %p\n", id, idp)); 382 *idp = id; 383 } 384 /* 385 * We need to make sure we haven't lost a race while 386 * allocating during creation. 387 */ 388 mtx_lock(&sem_lock); 389 ks = sem_lookup_byname(name); 390 if (ks != NULL) { 391 /* we lost... */ 392 sem_leave(td->td_proc, ksnew); 393 sem_rel(ksnew); 394 /* we lost and we can't loose... */ 395 if ((oflag & O_EXCL) != 0) { 396 mtx_unlock(&sem_lock); 397 return (EEXIST); 398 } 399 } else { 400 DP(("sem_create: about to add to list...\n")); 401 LIST_INSERT_HEAD(&ksem_head, ksnew, ks_entry); 402 DP(("sem_create: setting list bit...\n")); 403 ksnew->ks_onlist = 1; 404 DP(("sem_create: done, about to unlock...\n")); 405 } 406 mtx_unlock(&sem_lock); 407 } else { 408 /* 409 * if we aren't the creator, then enforce permissions. 410 */ 411 error = sem_perm(td, ks); 412 if (!error) 413 sem_ref(ks); 414 mtx_unlock(&sem_lock); 415 if (error) 416 return (error); 417 id = SEM_TO_ID(ks); 418 if (dir == UIO_USERSPACE) { 419 error = copyout(&id, idp, sizeof(id)); 420 if (error) { 421 mtx_lock(&sem_lock); 422 sem_rel(ks); 423 mtx_unlock(&sem_lock); 424 return (error); 425 } 426 } else { 427 *idp = id; 428 } 429 sem_enter(td->td_proc, ks); 430 mtx_lock(&sem_lock); 431 sem_rel(ks); 432 mtx_unlock(&sem_lock); 433 } 434 return (error); 435 } 436 437 static int 438 sem_perm(td, ks) 439 struct thread *td; 440 struct ksem *ks; 441 { 442 struct ucred *uc; 443 444 uc = td->td_ucred; 445 DP(("sem_perm: uc(%d,%d) ks(%d,%d,%o)\n", 446 uc->cr_uid, uc->cr_gid, 447 ks->ks_uid, ks->ks_gid, ks->ks_mode)); 448 if ((uc->cr_uid == ks->ks_uid && (ks->ks_mode & S_IWUSR) != 0) || 449 (uc->cr_gid == ks->ks_gid && (ks->ks_mode & S_IWGRP) != 0) || 450 (ks->ks_mode & S_IWOTH) != 0 || suser(td) == 0) 451 return (0); 452 return (EPERM); 453 } 454 455 static void 456 sem_free(struct ksem *ks) 457 { 458 459 nsems--; 460 if (ks->ks_onlist) 461 LIST_REMOVE(ks, ks_entry); 462 if (ks->ks_name != NULL) 463 free(ks->ks_name, M_SEM); 464 cv_destroy(&ks->ks_cv); 465 free(ks, M_SEM); 466 } 467 468 static __inline struct kuser *sem_getuser(struct proc *p, struct ksem *ks); 469 470 static __inline struct kuser * 471 sem_getuser(p, ks) 472 struct proc *p; 473 struct ksem *ks; 474 { 475 struct kuser *k; 476 477 LIST_FOREACH(k, &ks->ks_users, ku_next) 478 if (k->ku_pid == p->p_pid) 479 return (k); 480 return (NULL); 481 } 482 483 static int 484 sem_hasopen(td, ks) 485 struct thread *td; 486 struct ksem *ks; 487 { 488 489 return ((ks->ks_name == NULL && sem_perm(td, ks) == 0) 490 || sem_getuser(td->td_proc, ks) != NULL); 491 } 492 493 static int 494 sem_leave(p, ks) 495 struct proc *p; 496 struct ksem *ks; 497 { 498 struct kuser *k; 499 500 DP(("sem_leave: ks = %p\n", ks)); 501 k = sem_getuser(p, ks); 502 DP(("sem_leave: ks = %p, k = %p\n", ks, k)); 503 if (k != NULL) { 504 LIST_REMOVE(k, ku_next); 505 sem_rel(ks); 506 DP(("sem_leave: about to free k\n")); 507 free(k, M_SEM); 508 DP(("sem_leave: returning\n")); 509 return (0); 510 } 511 return (EINVAL); 512 } 513 514 static void 515 sem_enter(p, ks) 516 struct proc *p; 517 struct ksem *ks; 518 { 519 struct kuser *ku, *k; 520 521 ku = malloc(sizeof(*ku), M_SEM, M_WAITOK); 522 ku->ku_pid = p->p_pid; 523 mtx_lock(&sem_lock); 524 k = sem_getuser(p, ks); 525 if (k != NULL) { 526 mtx_unlock(&sem_lock); 527 free(ku, M_TEMP); 528 return; 529 } 530 LIST_INSERT_HEAD(&ks->ks_users, ku, ku_next); 531 sem_ref(ks); 532 mtx_unlock(&sem_lock); 533 } 534 535 #ifndef _SYS_SYSPROTO_H_ 536 struct ksem_unlink_args { 537 char *name; 538 }; 539 int ksem_unlink(struct thread *td, struct ksem_unlink_args *uap); 540 #endif 541 542 int 543 ksem_unlink(td, uap) 544 struct thread *td; 545 struct ksem_unlink_args *uap; 546 { 547 char name[SEM_MAX_NAMELEN + 1]; 548 size_t done; 549 int error; 550 551 error = copyinstr(uap->name, name, SEM_MAX_NAMELEN + 1, &done); 552 return (error ? error : 553 kern_sem_unlink(td, name)); 554 } 555 556 static int 557 kern_sem_unlink(td, name) 558 struct thread *td; 559 const char *name; 560 { 561 struct ksem *ks; 562 int error; 563 564 mtx_lock(&sem_lock); 565 ks = sem_lookup_byname(name); 566 if (ks == NULL) 567 error = ENOENT; 568 else 569 error = sem_perm(td, ks); 570 DP(("sem_unlink: '%s' ks = %p, error = %d\n", name, ks, error)); 571 if (error == 0) { 572 LIST_REMOVE(ks, ks_entry); 573 LIST_INSERT_HEAD(&ksem_deadhead, ks, ks_entry); 574 sem_rel(ks); 575 } 576 mtx_unlock(&sem_lock); 577 return (error); 578 } 579 580 #ifndef _SYS_SYSPROTO_H_ 581 struct ksem_close_args { 582 semid_t id; 583 }; 584 int ksem_close(struct thread *td, struct ksem_close_args *uap); 585 #endif 586 587 int 588 ksem_close(struct thread *td, struct ksem_close_args *uap) 589 { 590 591 return (kern_sem_close(td, uap->id)); 592 } 593 594 static int 595 kern_sem_close(td, id) 596 struct thread *td; 597 semid_t id; 598 { 599 struct ksem *ks; 600 int error; 601 602 error = EINVAL; 603 mtx_lock(&sem_lock); 604 ks = ID_TO_SEM(id); 605 /* this is not a valid operation for unnamed sems */ 606 if (ks != NULL && ks->ks_name != NULL) 607 error = sem_leave(td->td_proc, ks); 608 mtx_unlock(&sem_lock); 609 return (error); 610 } 611 612 #ifndef _SYS_SYSPROTO_H_ 613 struct ksem_post_args { 614 semid_t id; 615 }; 616 int ksem_post(struct thread *td, struct ksem_post_args *uap); 617 #endif 618 int 619 ksem_post(td, uap) 620 struct thread *td; 621 struct ksem_post_args *uap; 622 { 623 624 return (kern_sem_post(td, uap->id)); 625 } 626 627 static int 628 kern_sem_post(td, id) 629 struct thread *td; 630 semid_t id; 631 { 632 struct ksem *ks; 633 int error; 634 635 mtx_lock(&sem_lock); 636 ks = ID_TO_SEM(id); 637 if (ks == NULL || !sem_hasopen(td, ks)) { 638 error = EINVAL; 639 goto err; 640 } 641 if (ks->ks_value == SEM_VALUE_MAX) { 642 error = EOVERFLOW; 643 goto err; 644 } 645 ++ks->ks_value; 646 if (ks->ks_waiters > 0) 647 cv_signal(&ks->ks_cv); 648 error = 0; 649 err: 650 mtx_unlock(&sem_lock); 651 return (error); 652 } 653 654 #ifndef _SYS_SYSPROTO_H_ 655 struct ksem_wait_args { 656 semid_t id; 657 }; 658 int ksem_wait(struct thread *td, struct ksem_wait_args *uap); 659 #endif 660 661 int 662 ksem_wait(td, uap) 663 struct thread *td; 664 struct ksem_wait_args *uap; 665 { 666 667 return (kern_sem_wait(td, uap->id, 0, NULL)); 668 } 669 670 #ifndef _SYS_SYSPROTO_H_ 671 struct ksem_timedwait_args { 672 semid_t id; 673 struct timespec *abstime; 674 }; 675 int ksem_timedwait(struct thread *td, struct ksem_timedwait_args *uap); 676 #endif 677 int 678 ksem_timedwait(td, uap) 679 struct thread *td; 680 struct ksem_timedwait_args *uap; 681 { 682 struct timespec abstime; 683 struct timespec *ts; 684 int error; 685 686 /* We allow a null timespec (wait forever). */ 687 if (uap->abstime == NULL) 688 ts = NULL; 689 else { 690 error = copyin(uap->abstime, &abstime, sizeof(abstime)); 691 if (error != 0) 692 return (error); 693 if (abstime.tv_nsec >= 1000000000 || abstime.tv_nsec < 0) 694 return (EINVAL); 695 ts = &abstime; 696 } 697 return (kern_sem_wait(td, uap->id, 0, ts)); 698 } 699 700 #ifndef _SYS_SYSPROTO_H_ 701 struct ksem_trywait_args { 702 semid_t id; 703 }; 704 int ksem_trywait(struct thread *td, struct ksem_trywait_args *uap); 705 #endif 706 int 707 ksem_trywait(td, uap) 708 struct thread *td; 709 struct ksem_trywait_args *uap; 710 { 711 712 return (kern_sem_wait(td, uap->id, 1, NULL)); 713 } 714 715 static int 716 kern_sem_wait(td, id, tryflag, abstime) 717 struct thread *td; 718 semid_t id; 719 int tryflag; 720 struct timespec *abstime; 721 { 722 struct timespec ts1, ts2; 723 struct timeval tv; 724 struct ksem *ks; 725 int error; 726 727 DP((">>> kern_sem_wait entered!\n")); 728 mtx_lock(&sem_lock); 729 ks = ID_TO_SEM(id); 730 if (ks == NULL) { 731 DP(("kern_sem_wait ks == NULL\n")); 732 error = EINVAL; 733 goto err; 734 } 735 sem_ref(ks); 736 if (!sem_hasopen(td, ks)) { 737 DP(("kern_sem_wait hasopen failed\n")); 738 error = EINVAL; 739 goto err; 740 } 741 DP(("kern_sem_wait value = %d, tryflag %d\n", ks->ks_value, tryflag)); 742 if (ks->ks_value == 0) { 743 ks->ks_waiters++; 744 if (tryflag != 0) 745 error = EAGAIN; 746 else if (abstime == NULL) 747 error = cv_wait_sig(&ks->ks_cv, &sem_lock); 748 else { 749 for (;;) { 750 ts1 = *abstime; 751 getnanotime(&ts2); 752 timespecsub(&ts1, &ts2); 753 TIMESPEC_TO_TIMEVAL(&tv, &ts1); 754 if (tv.tv_sec < 0) { 755 error = ETIMEDOUT; 756 break; 757 } 758 error = cv_timedwait_sig(&ks->ks_cv, 759 &sem_lock, tvtohz(&tv)); 760 if (error != EWOULDBLOCK) 761 break; 762 } 763 } 764 ks->ks_waiters--; 765 if (error) 766 goto err; 767 } 768 ks->ks_value--; 769 error = 0; 770 err: 771 if (ks != NULL) 772 sem_rel(ks); 773 mtx_unlock(&sem_lock); 774 DP(("<<< kern_sem_wait leaving, error = %d\n", error)); 775 return (error); 776 } 777 778 #ifndef _SYS_SYSPROTO_H_ 779 struct ksem_getvalue_args { 780 semid_t id; 781 int *val; 782 }; 783 int ksem_getvalue(struct thread *td, struct ksem_getvalue_args *uap); 784 #endif 785 int 786 ksem_getvalue(td, uap) 787 struct thread *td; 788 struct ksem_getvalue_args *uap; 789 { 790 struct ksem *ks; 791 int error, val; 792 793 mtx_lock(&sem_lock); 794 ks = ID_TO_SEM(uap->id); 795 if (ks == NULL || !sem_hasopen(td, ks)) { 796 mtx_unlock(&sem_lock); 797 return (EINVAL); 798 } 799 val = ks->ks_value; 800 mtx_unlock(&sem_lock); 801 error = copyout(&val, uap->val, sizeof(val)); 802 return (error); 803 } 804 805 #ifndef _SYS_SYSPROTO_H_ 806 struct ksem_destroy_args { 807 semid_t id; 808 }; 809 int ksem_destroy(struct thread *td, struct ksem_destroy_args *uap); 810 #endif 811 int 812 ksem_destroy(td, uap) 813 struct thread *td; 814 struct ksem_destroy_args *uap; 815 { 816 struct ksem *ks; 817 int error; 818 819 mtx_lock(&sem_lock); 820 ks = ID_TO_SEM(uap->id); 821 if (ks == NULL || !sem_hasopen(td, ks) || 822 ks->ks_name != NULL) { 823 error = EINVAL; 824 goto err; 825 } 826 if (ks->ks_waiters != 0) { 827 error = EBUSY; 828 goto err; 829 } 830 sem_rel(ks); 831 error = 0; 832 err: 833 mtx_unlock(&sem_lock); 834 return (error); 835 } 836 837 /* 838 * Count the number of kusers associated with a proc, so as to guess at how 839 * many to allocate when forking. 840 */ 841 static int 842 sem_count_proc(p) 843 struct proc *p; 844 { 845 struct ksem *ks; 846 struct kuser *ku; 847 int count; 848 849 mtx_assert(&sem_lock, MA_OWNED); 850 851 count = 0; 852 LIST_FOREACH(ks, &ksem_head, ks_entry) { 853 LIST_FOREACH(ku, &ks->ks_users, ku_next) { 854 if (ku->ku_pid == p->p_pid) 855 count++; 856 } 857 } 858 LIST_FOREACH(ks, &ksem_deadhead, ks_entry) { 859 LIST_FOREACH(ku, &ks->ks_users, ku_next) { 860 if (ku->ku_pid == p->p_pid) 861 count++; 862 } 863 } 864 return (count); 865 } 866 867 /* 868 * When a process forks, the child process must gain a reference to each open 869 * semaphore in the parent process, whether it is unlinked or not. This 870 * requires allocating a kuser structure for each semaphore reference in the 871 * new process. Because the set of semaphores in the parent can change while 872 * the fork is in progress, we have to handle races -- first we attempt to 873 * allocate enough storage to acquire references to each of the semaphores, 874 * then we enter the semaphores and release the temporary references. 875 */ 876 static void 877 sem_forkhook(arg, p1, p2, flags) 878 void *arg; 879 struct proc *p1; 880 struct proc *p2; 881 int flags; 882 { 883 struct ksem *ks, **sem_array; 884 int count, i, new_count; 885 struct kuser *ku; 886 887 mtx_lock(&sem_lock); 888 count = sem_count_proc(p1); 889 race_lost: 890 mtx_assert(&sem_lock, MA_OWNED); 891 mtx_unlock(&sem_lock); 892 sem_array = malloc(sizeof(struct ksem *) * count, M_TEMP, M_WAITOK); 893 mtx_lock(&sem_lock); 894 new_count = sem_count_proc(p1); 895 if (count < new_count) { 896 /* Lost race, repeat and allocate more storage. */ 897 free(sem_array, M_TEMP); 898 count = new_count; 899 goto race_lost; 900 } 901 /* 902 * Given an array capable of storing an adequate number of semaphore 903 * references, now walk the list of semaphores and acquire a new 904 * reference for any semaphore opened by p1. 905 */ 906 count = new_count; 907 i = 0; 908 LIST_FOREACH(ks, &ksem_head, ks_entry) { 909 LIST_FOREACH(ku, &ks->ks_users, ku_next) { 910 if (ku->ku_pid == p1->p_pid) { 911 sem_ref(ks); 912 sem_array[i] = ks; 913 i++; 914 break; 915 } 916 } 917 } 918 LIST_FOREACH(ks, &ksem_deadhead, ks_entry) { 919 LIST_FOREACH(ku, &ks->ks_users, ku_next) { 920 if (ku->ku_pid == p1->p_pid) { 921 sem_ref(ks); 922 sem_array[i] = ks; 923 i++; 924 break; 925 } 926 } 927 } 928 mtx_unlock(&sem_lock); 929 KASSERT(i == count, ("sem_forkhook: i != count (%d, %d)", i, count)); 930 /* 931 * Now cause p2 to enter each of the referenced semaphores, then 932 * release our temporary reference. This is pretty inefficient. 933 * Finally, free our temporary array. 934 */ 935 for (i = 0; i < count; i++) { 936 sem_enter(p2, sem_array[i]); 937 mtx_lock(&sem_lock); 938 sem_rel(sem_array[i]); 939 mtx_unlock(&sem_lock); 940 } 941 free(sem_array, M_TEMP); 942 } 943 944 static void 945 sem_exithook(arg, p) 946 void *arg; 947 struct proc *p; 948 { 949 struct ksem *ks, *ksnext; 950 951 mtx_lock(&sem_lock); 952 ks = LIST_FIRST(&ksem_head); 953 while (ks != NULL) { 954 ksnext = LIST_NEXT(ks, ks_entry); 955 sem_leave(p, ks); 956 ks = ksnext; 957 } 958 ks = LIST_FIRST(&ksem_deadhead); 959 while (ks != NULL) { 960 ksnext = LIST_NEXT(ks, ks_entry); 961 sem_leave(p, ks); 962 ks = ksnext; 963 } 964 mtx_unlock(&sem_lock); 965 } 966 967 static int 968 sem_modload(struct module *module, int cmd, void *arg) 969 { 970 int error = 0; 971 972 switch (cmd) { 973 case MOD_LOAD: 974 mtx_init(&sem_lock, "sem", "semaphore", MTX_DEF); 975 p31b_setcfg(CTL_P1003_1B_SEM_NSEMS_MAX, SEM_MAX); 976 p31b_setcfg(CTL_P1003_1B_SEM_VALUE_MAX, SEM_VALUE_MAX); 977 sem_exit_tag = EVENTHANDLER_REGISTER(process_exit, sem_exithook, 978 NULL, EVENTHANDLER_PRI_ANY); 979 sem_exec_tag = EVENTHANDLER_REGISTER(process_exec, sem_exithook, 980 NULL, EVENTHANDLER_PRI_ANY); 981 sem_fork_tag = EVENTHANDLER_REGISTER(process_fork, sem_forkhook, NULL, EVENTHANDLER_PRI_ANY); 982 break; 983 case MOD_UNLOAD: 984 if (nsems != 0) { 985 error = EOPNOTSUPP; 986 break; 987 } 988 EVENTHANDLER_DEREGISTER(process_exit, sem_exit_tag); 989 EVENTHANDLER_DEREGISTER(process_exec, sem_exec_tag); 990 EVENTHANDLER_DEREGISTER(process_fork, sem_fork_tag); 991 mtx_destroy(&sem_lock); 992 break; 993 case MOD_SHUTDOWN: 994 break; 995 default: 996 error = EINVAL; 997 break; 998 } 999 return (error); 1000 } 1001 1002 static moduledata_t sem_mod = { 1003 "sem", 1004 &sem_modload, 1005 NULL 1006 }; 1007 1008 SYSCALL_MODULE_HELPER(ksem_init); 1009 SYSCALL_MODULE_HELPER(ksem_open); 1010 SYSCALL_MODULE_HELPER(ksem_unlink); 1011 SYSCALL_MODULE_HELPER(ksem_close); 1012 SYSCALL_MODULE_HELPER(ksem_post); 1013 SYSCALL_MODULE_HELPER(ksem_wait); 1014 SYSCALL_MODULE_HELPER(ksem_timedwait); 1015 SYSCALL_MODULE_HELPER(ksem_trywait); 1016 SYSCALL_MODULE_HELPER(ksem_getvalue); 1017 SYSCALL_MODULE_HELPER(ksem_destroy); 1018 1019 DECLARE_MODULE(sem, sem_mod, SI_SUB_SYSV_SEM, SI_ORDER_FIRST); 1020 MODULE_VERSION(sem, 1); 1021