1 /*- 2 * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_compat.h" 31 #include "opt_posix.h" 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/priv.h> 37 #include <sys/proc.h> 38 #include <sys/posix4.h> 39 #include <sys/racct.h> 40 #include <sys/resourcevar.h> 41 #include <sys/rwlock.h> 42 #include <sys/sched.h> 43 #include <sys/sysctl.h> 44 #include <sys/smp.h> 45 #include <sys/syscallsubr.h> 46 #include <sys/sysent.h> 47 #include <sys/systm.h> 48 #include <sys/sysproto.h> 49 #include <sys/signalvar.h> 50 #include <sys/sysctl.h> 51 #include <sys/ucontext.h> 52 #include <sys/thr.h> 53 #include <sys/rtprio.h> 54 #include <sys/umtx.h> 55 #include <sys/limits.h> 56 57 #include <vm/vm_domain.h> 58 59 #include <machine/frame.h> 60 61 #include <security/audit/audit.h> 62 63 static SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, 64 "thread allocation"); 65 66 static int max_threads_per_proc = 1500; 67 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 68 &max_threads_per_proc, 0, "Limit on threads per proc"); 69 70 static int max_threads_hits; 71 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 72 &max_threads_hits, 0, "kern.threads.max_threads_per_proc hit count"); 73 74 #ifdef COMPAT_FREEBSD32 75 76 static inline int 77 suword_lwpid(void *addr, lwpid_t lwpid) 78 { 79 int error; 80 81 if (SV_CURPROC_FLAG(SV_LP64)) 82 error = suword(addr, lwpid); 83 else 84 error = suword32(addr, lwpid); 85 return (error); 86 } 87 88 #else 89 #define suword_lwpid suword 90 #endif 91 92 /* 93 * System call interface. 94 */ 95 96 struct thr_create_initthr_args { 97 ucontext_t ctx; 98 long *tid; 99 }; 100 101 static int 102 thr_create_initthr(struct thread *td, void *thunk) 103 { 104 struct thr_create_initthr_args *args; 105 106 /* Copy out the child tid. */ 107 args = thunk; 108 if (args->tid != NULL && suword_lwpid(args->tid, td->td_tid)) 109 return (EFAULT); 110 111 return (set_mcontext(td, &args->ctx.uc_mcontext)); 112 } 113 114 int 115 sys_thr_create(struct thread *td, struct thr_create_args *uap) 116 /* ucontext_t *ctx, long *id, int flags */ 117 { 118 struct thr_create_initthr_args args; 119 int error; 120 121 if ((error = copyin(uap->ctx, &args.ctx, sizeof(args.ctx)))) 122 return (error); 123 args.tid = uap->id; 124 return (thread_create(td, NULL, thr_create_initthr, &args)); 125 } 126 127 int 128 sys_thr_new(struct thread *td, struct thr_new_args *uap) 129 /* struct thr_param * */ 130 { 131 struct thr_param param; 132 int error; 133 134 if (uap->param_size < 0 || uap->param_size > sizeof(param)) 135 return (EINVAL); 136 bzero(¶m, sizeof(param)); 137 if ((error = copyin(uap->param, ¶m, uap->param_size))) 138 return (error); 139 return (kern_thr_new(td, ¶m)); 140 } 141 142 static int 143 thr_new_initthr(struct thread *td, void *thunk) 144 { 145 stack_t stack; 146 struct thr_param *param; 147 148 /* 149 * Here we copy out tid to two places, one for child and one 150 * for parent, because pthread can create a detached thread, 151 * if parent wants to safely access child tid, it has to provide 152 * its storage, because child thread may exit quickly and 153 * memory is freed before parent thread can access it. 154 */ 155 param = thunk; 156 if ((param->child_tid != NULL && 157 suword_lwpid(param->child_tid, td->td_tid)) || 158 (param->parent_tid != NULL && 159 suword_lwpid(param->parent_tid, td->td_tid))) 160 return (EFAULT); 161 162 /* Set up our machine context. */ 163 stack.ss_sp = param->stack_base; 164 stack.ss_size = param->stack_size; 165 /* Set upcall address to user thread entry function. */ 166 cpu_set_upcall(td, param->start_func, param->arg, &stack); 167 /* Setup user TLS address and TLS pointer register. */ 168 return (cpu_set_user_tls(td, param->tls_base)); 169 } 170 171 int 172 kern_thr_new(struct thread *td, struct thr_param *param) 173 { 174 struct rtprio rtp, *rtpp; 175 int error; 176 177 rtpp = NULL; 178 if (param->rtp != 0) { 179 error = copyin(param->rtp, &rtp, sizeof(struct rtprio)); 180 if (error) 181 return (error); 182 rtpp = &rtp; 183 } 184 return (thread_create(td, rtpp, thr_new_initthr, param)); 185 } 186 187 int 188 thread_create(struct thread *td, struct rtprio *rtp, 189 int (*initialize_thread)(struct thread *, void *), void *thunk) 190 { 191 struct thread *newtd; 192 struct proc *p; 193 int error; 194 195 p = td->td_proc; 196 197 if (rtp != NULL) { 198 switch(rtp->type) { 199 case RTP_PRIO_REALTIME: 200 case RTP_PRIO_FIFO: 201 /* Only root can set scheduler policy */ 202 if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0) 203 return (EPERM); 204 if (rtp->prio > RTP_PRIO_MAX) 205 return (EINVAL); 206 break; 207 case RTP_PRIO_NORMAL: 208 rtp->prio = 0; 209 break; 210 default: 211 return (EINVAL); 212 } 213 } 214 215 #ifdef RACCT 216 if (racct_enable) { 217 PROC_LOCK(p); 218 error = racct_add(p, RACCT_NTHR, 1); 219 PROC_UNLOCK(p); 220 if (error != 0) 221 return (EPROCLIM); 222 } 223 #endif 224 225 /* Initialize our td */ 226 error = kern_thr_alloc(p, 0, &newtd); 227 if (error) 228 goto fail; 229 230 cpu_copy_thread(newtd, td); 231 232 bzero(&newtd->td_startzero, 233 __rangeof(struct thread, td_startzero, td_endzero)); 234 bcopy(&td->td_startcopy, &newtd->td_startcopy, 235 __rangeof(struct thread, td_startcopy, td_endcopy)); 236 newtd->td_proc = td->td_proc; 237 newtd->td_rb_list = newtd->td_rbp_list = newtd->td_rb_inact = 0; 238 thread_cow_get(newtd, td); 239 240 error = initialize_thread(newtd, thunk); 241 if (error != 0) { 242 thread_cow_free(newtd); 243 thread_free(newtd); 244 goto fail; 245 } 246 247 PROC_LOCK(p); 248 p->p_flag |= P_HADTHREADS; 249 thread_link(newtd, p); 250 bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); 251 thread_lock(td); 252 /* let the scheduler know about these things. */ 253 sched_fork_thread(td, newtd); 254 thread_unlock(td); 255 if (P_SHOULDSTOP(p)) 256 newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 257 if (p->p_flag2 & P2_LWP_EVENTS) 258 newtd->td_dbgflags |= TDB_BORN; 259 260 /* 261 * Copy the existing thread VM policy into the new thread. 262 */ 263 vm_domain_policy_localcopy(&newtd->td_vm_dom_policy, 264 &td->td_vm_dom_policy); 265 266 PROC_UNLOCK(p); 267 268 tidhash_add(newtd); 269 270 thread_lock(newtd); 271 if (rtp != NULL) { 272 if (!(td->td_pri_class == PRI_TIMESHARE && 273 rtp->type == RTP_PRIO_NORMAL)) { 274 rtp_to_pri(rtp, newtd); 275 sched_prio(newtd, newtd->td_user_pri); 276 } /* ignore timesharing class */ 277 } 278 TD_SET_CAN_RUN(newtd); 279 sched_add(newtd, SRQ_BORING); 280 thread_unlock(newtd); 281 282 return (0); 283 284 fail: 285 #ifdef RACCT 286 if (racct_enable) { 287 PROC_LOCK(p); 288 racct_sub(p, RACCT_NTHR, 1); 289 PROC_UNLOCK(p); 290 } 291 #endif 292 return (error); 293 } 294 295 int 296 sys_thr_self(struct thread *td, struct thr_self_args *uap) 297 /* long *id */ 298 { 299 int error; 300 301 error = suword_lwpid(uap->id, (unsigned)td->td_tid); 302 if (error == -1) 303 return (EFAULT); 304 return (0); 305 } 306 307 int 308 sys_thr_exit(struct thread *td, struct thr_exit_args *uap) 309 /* long *state */ 310 { 311 312 umtx_thread_exit(td); 313 314 /* Signal userland that it can free the stack. */ 315 if ((void *)uap->state != NULL) { 316 suword_lwpid(uap->state, 1); 317 kern_umtx_wake(td, uap->state, INT_MAX, 0); 318 } 319 320 return (kern_thr_exit(td)); 321 } 322 323 int 324 kern_thr_exit(struct thread *td) 325 { 326 struct proc *p; 327 328 p = td->td_proc; 329 330 /* 331 * If all of the threads in a process call this routine to 332 * exit (e.g. all threads call pthread_exit()), exactly one 333 * thread should return to the caller to terminate the process 334 * instead of the thread. 335 * 336 * Checking p_numthreads alone is not sufficient since threads 337 * might be committed to terminating while the PROC_LOCK is 338 * dropped in either ptracestop() or while removing this thread 339 * from the tidhash. Instead, the p_pendingexits field holds 340 * the count of threads in either of those states and a thread 341 * is considered the "last" thread if all of the other threads 342 * in a process are already terminating. 343 */ 344 PROC_LOCK(p); 345 if (p->p_numthreads == p->p_pendingexits + 1) { 346 /* 347 * Ignore attempts to shut down last thread in the 348 * proc. This will actually call _exit(2) in the 349 * usermode trampoline when it returns. 350 */ 351 PROC_UNLOCK(p); 352 return (0); 353 } 354 355 p->p_pendingexits++; 356 td->td_dbgflags |= TDB_EXIT; 357 if (p->p_flag & P_TRACED && p->p_flag2 & P2_LWP_EVENTS) 358 ptracestop(td, SIGTRAP); 359 PROC_UNLOCK(p); 360 tidhash_remove(td); 361 PROC_LOCK(p); 362 p->p_pendingexits--; 363 364 /* 365 * The check above should prevent all other threads from this 366 * process from exiting while the PROC_LOCK is dropped, so 367 * there must be at least one other thread other than the 368 * current thread. 369 */ 370 KASSERT(p->p_numthreads > 1, ("too few threads")); 371 racct_sub(p, RACCT_NTHR, 1); 372 tdsigcleanup(td); 373 PROC_SLOCK(p); 374 thread_stopped(p); 375 thread_exit(); 376 /* NOTREACHED */ 377 } 378 379 int 380 sys_thr_kill(struct thread *td, struct thr_kill_args *uap) 381 /* long id, int sig */ 382 { 383 ksiginfo_t ksi; 384 struct thread *ttd; 385 struct proc *p; 386 int error; 387 388 p = td->td_proc; 389 ksiginfo_init(&ksi); 390 ksi.ksi_signo = uap->sig; 391 ksi.ksi_code = SI_LWP; 392 ksi.ksi_pid = p->p_pid; 393 ksi.ksi_uid = td->td_ucred->cr_ruid; 394 if (uap->id == -1) { 395 if (uap->sig != 0 && !_SIG_VALID(uap->sig)) { 396 error = EINVAL; 397 } else { 398 error = ESRCH; 399 PROC_LOCK(p); 400 FOREACH_THREAD_IN_PROC(p, ttd) { 401 if (ttd != td) { 402 error = 0; 403 if (uap->sig == 0) 404 break; 405 tdksignal(ttd, uap->sig, &ksi); 406 } 407 } 408 PROC_UNLOCK(p); 409 } 410 } else { 411 error = 0; 412 ttd = tdfind((lwpid_t)uap->id, p->p_pid); 413 if (ttd == NULL) 414 return (ESRCH); 415 if (uap->sig == 0) 416 ; 417 else if (!_SIG_VALID(uap->sig)) 418 error = EINVAL; 419 else 420 tdksignal(ttd, uap->sig, &ksi); 421 PROC_UNLOCK(ttd->td_proc); 422 } 423 return (error); 424 } 425 426 int 427 sys_thr_kill2(struct thread *td, struct thr_kill2_args *uap) 428 /* pid_t pid, long id, int sig */ 429 { 430 ksiginfo_t ksi; 431 struct thread *ttd; 432 struct proc *p; 433 int error; 434 435 AUDIT_ARG_SIGNUM(uap->sig); 436 437 ksiginfo_init(&ksi); 438 ksi.ksi_signo = uap->sig; 439 ksi.ksi_code = SI_LWP; 440 ksi.ksi_pid = td->td_proc->p_pid; 441 ksi.ksi_uid = td->td_ucred->cr_ruid; 442 if (uap->id == -1) { 443 if ((p = pfind(uap->pid)) == NULL) 444 return (ESRCH); 445 AUDIT_ARG_PROCESS(p); 446 error = p_cansignal(td, p, uap->sig); 447 if (error) { 448 PROC_UNLOCK(p); 449 return (error); 450 } 451 if (uap->sig != 0 && !_SIG_VALID(uap->sig)) { 452 error = EINVAL; 453 } else { 454 error = ESRCH; 455 FOREACH_THREAD_IN_PROC(p, ttd) { 456 if (ttd != td) { 457 error = 0; 458 if (uap->sig == 0) 459 break; 460 tdksignal(ttd, uap->sig, &ksi); 461 } 462 } 463 } 464 PROC_UNLOCK(p); 465 } else { 466 ttd = tdfind((lwpid_t)uap->id, uap->pid); 467 if (ttd == NULL) 468 return (ESRCH); 469 p = ttd->td_proc; 470 AUDIT_ARG_PROCESS(p); 471 error = p_cansignal(td, p, uap->sig); 472 if (uap->sig == 0) 473 ; 474 else if (!_SIG_VALID(uap->sig)) 475 error = EINVAL; 476 else 477 tdksignal(ttd, uap->sig, &ksi); 478 PROC_UNLOCK(p); 479 } 480 return (error); 481 } 482 483 int 484 sys_thr_suspend(struct thread *td, struct thr_suspend_args *uap) 485 /* const struct timespec *timeout */ 486 { 487 struct timespec ts, *tsp; 488 int error; 489 490 tsp = NULL; 491 if (uap->timeout != NULL) { 492 error = umtx_copyin_timeout(uap->timeout, &ts); 493 if (error != 0) 494 return (error); 495 tsp = &ts; 496 } 497 498 return (kern_thr_suspend(td, tsp)); 499 } 500 501 int 502 kern_thr_suspend(struct thread *td, struct timespec *tsp) 503 { 504 struct proc *p = td->td_proc; 505 struct timeval tv; 506 int error = 0; 507 int timo = 0; 508 509 if (td->td_pflags & TDP_WAKEUP) { 510 td->td_pflags &= ~TDP_WAKEUP; 511 return (0); 512 } 513 514 if (tsp != NULL) { 515 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 516 error = EWOULDBLOCK; 517 else { 518 TIMESPEC_TO_TIMEVAL(&tv, tsp); 519 timo = tvtohz(&tv); 520 } 521 } 522 523 PROC_LOCK(p); 524 if (error == 0 && (td->td_flags & TDF_THRWAKEUP) == 0) 525 error = msleep((void *)td, &p->p_mtx, 526 PCATCH, "lthr", timo); 527 528 if (td->td_flags & TDF_THRWAKEUP) { 529 thread_lock(td); 530 td->td_flags &= ~TDF_THRWAKEUP; 531 thread_unlock(td); 532 PROC_UNLOCK(p); 533 return (0); 534 } 535 PROC_UNLOCK(p); 536 if (error == EWOULDBLOCK) 537 error = ETIMEDOUT; 538 else if (error == ERESTART) { 539 if (timo != 0) 540 error = EINTR; 541 } 542 return (error); 543 } 544 545 int 546 sys_thr_wake(struct thread *td, struct thr_wake_args *uap) 547 /* long id */ 548 { 549 struct proc *p; 550 struct thread *ttd; 551 552 if (uap->id == td->td_tid) { 553 td->td_pflags |= TDP_WAKEUP; 554 return (0); 555 } 556 557 p = td->td_proc; 558 ttd = tdfind((lwpid_t)uap->id, p->p_pid); 559 if (ttd == NULL) 560 return (ESRCH); 561 thread_lock(ttd); 562 ttd->td_flags |= TDF_THRWAKEUP; 563 thread_unlock(ttd); 564 wakeup((void *)ttd); 565 PROC_UNLOCK(p); 566 return (0); 567 } 568 569 int 570 sys_thr_set_name(struct thread *td, struct thr_set_name_args *uap) 571 { 572 struct proc *p; 573 char name[MAXCOMLEN + 1]; 574 struct thread *ttd; 575 int error; 576 577 error = 0; 578 name[0] = '\0'; 579 if (uap->name != NULL) { 580 error = copyinstr(uap->name, name, sizeof(name), 581 NULL); 582 if (error) 583 return (error); 584 } 585 p = td->td_proc; 586 ttd = tdfind((lwpid_t)uap->id, p->p_pid); 587 if (ttd == NULL) 588 return (ESRCH); 589 strcpy(ttd->td_name, name); 590 #ifdef KTR 591 sched_clear_tdname(ttd); 592 #endif 593 PROC_UNLOCK(p); 594 return (error); 595 } 596 597 int 598 kern_thr_alloc(struct proc *p, int pages, struct thread **ntd) 599 { 600 601 /* Have race condition but it is cheap. */ 602 if (p->p_numthreads >= max_threads_per_proc) { 603 ++max_threads_hits; 604 return (EPROCLIM); 605 } 606 607 *ntd = thread_alloc(pages); 608 if (*ntd == NULL) 609 return (ENOMEM); 610 611 return (0); 612 } 613