1 /*- 2 * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_compat.h" 31 #include "opt_posix.h" 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/priv.h> 37 #include <sys/proc.h> 38 #include <sys/posix4.h> 39 #include <sys/racct.h> 40 #include <sys/resourcevar.h> 41 #include <sys/rwlock.h> 42 #include <sys/sched.h> 43 #include <sys/sysctl.h> 44 #include <sys/smp.h> 45 #include <sys/syscallsubr.h> 46 #include <sys/sysent.h> 47 #include <sys/systm.h> 48 #include <sys/sysproto.h> 49 #include <sys/signalvar.h> 50 #include <sys/sysctl.h> 51 #include <sys/ucontext.h> 52 #include <sys/thr.h> 53 #include <sys/rtprio.h> 54 #include <sys/umtx.h> 55 #include <sys/limits.h> 56 57 #include <machine/frame.h> 58 59 #include <security/audit/audit.h> 60 61 static SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, 62 "thread allocation"); 63 64 static int max_threads_per_proc = 1500; 65 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 66 &max_threads_per_proc, 0, "Limit on threads per proc"); 67 68 static int max_threads_hits; 69 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 70 &max_threads_hits, 0, "kern.threads.max_threads_per_proc hit count"); 71 72 #ifdef COMPAT_FREEBSD32 73 74 static inline int 75 suword_lwpid(void *addr, lwpid_t lwpid) 76 { 77 int error; 78 79 if (SV_CURPROC_FLAG(SV_LP64)) 80 error = suword(addr, lwpid); 81 else 82 error = suword32(addr, lwpid); 83 return (error); 84 } 85 86 #else 87 #define suword_lwpid suword 88 #endif 89 90 static int create_thread(struct thread *td, mcontext_t *ctx, 91 void (*start_func)(void *), void *arg, 92 char *stack_base, size_t stack_size, 93 char *tls_base, 94 long *child_tid, long *parent_tid, 95 int flags, struct rtprio *rtp); 96 97 /* 98 * System call interface. 99 */ 100 int 101 sys_thr_create(struct thread *td, struct thr_create_args *uap) 102 /* ucontext_t *ctx, long *id, int flags */ 103 { 104 ucontext_t ctx; 105 int error; 106 107 if ((error = copyin(uap->ctx, &ctx, sizeof(ctx)))) 108 return (error); 109 110 error = create_thread(td, &ctx.uc_mcontext, NULL, NULL, 111 NULL, 0, NULL, uap->id, NULL, uap->flags, NULL); 112 return (error); 113 } 114 115 int 116 sys_thr_new(struct thread *td, struct thr_new_args *uap) 117 /* struct thr_param * */ 118 { 119 struct thr_param param; 120 int error; 121 122 if (uap->param_size < 0 || uap->param_size > sizeof(param)) 123 return (EINVAL); 124 bzero(¶m, sizeof(param)); 125 if ((error = copyin(uap->param, ¶m, uap->param_size))) 126 return (error); 127 return (kern_thr_new(td, ¶m)); 128 } 129 130 int 131 kern_thr_new(struct thread *td, struct thr_param *param) 132 { 133 struct rtprio rtp, *rtpp; 134 int error; 135 136 rtpp = NULL; 137 if (param->rtp != 0) { 138 error = copyin(param->rtp, &rtp, sizeof(struct rtprio)); 139 if (error) 140 return (error); 141 rtpp = &rtp; 142 } 143 error = create_thread(td, NULL, param->start_func, param->arg, 144 param->stack_base, param->stack_size, param->tls_base, 145 param->child_tid, param->parent_tid, param->flags, 146 rtpp); 147 return (error); 148 } 149 150 static int 151 create_thread(struct thread *td, mcontext_t *ctx, 152 void (*start_func)(void *), void *arg, 153 char *stack_base, size_t stack_size, 154 char *tls_base, 155 long *child_tid, long *parent_tid, 156 int flags, struct rtprio *rtp) 157 { 158 stack_t stack; 159 struct thread *newtd; 160 struct proc *p; 161 int error; 162 163 p = td->td_proc; 164 165 if (rtp != NULL) { 166 switch(rtp->type) { 167 case RTP_PRIO_REALTIME: 168 case RTP_PRIO_FIFO: 169 /* Only root can set scheduler policy */ 170 if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0) 171 return (EPERM); 172 if (rtp->prio > RTP_PRIO_MAX) 173 return (EINVAL); 174 break; 175 case RTP_PRIO_NORMAL: 176 rtp->prio = 0; 177 break; 178 default: 179 return (EINVAL); 180 } 181 } 182 183 #ifdef RACCT 184 if (racct_enable) { 185 PROC_LOCK(p); 186 error = racct_add(p, RACCT_NTHR, 1); 187 PROC_UNLOCK(p); 188 if (error != 0) 189 return (EPROCLIM); 190 } 191 #endif 192 193 /* Initialize our td */ 194 error = kern_thr_alloc(p, 0, &newtd); 195 if (error) 196 goto fail; 197 198 cpu_set_upcall(newtd, td); 199 200 /* 201 * Try the copyout as soon as we allocate the td so we don't 202 * have to tear things down in a failure case below. 203 * Here we copy out tid to two places, one for child and one 204 * for parent, because pthread can create a detached thread, 205 * if parent wants to safely access child tid, it has to provide 206 * its storage, because child thread may exit quickly and 207 * memory is freed before parent thread can access it. 208 */ 209 if ((child_tid != NULL && 210 suword_lwpid(child_tid, newtd->td_tid)) || 211 (parent_tid != NULL && 212 suword_lwpid(parent_tid, newtd->td_tid))) { 213 thread_free(newtd); 214 error = EFAULT; 215 goto fail; 216 } 217 218 bzero(&newtd->td_startzero, 219 __rangeof(struct thread, td_startzero, td_endzero)); 220 bcopy(&td->td_startcopy, &newtd->td_startcopy, 221 __rangeof(struct thread, td_startcopy, td_endcopy)); 222 newtd->td_proc = td->td_proc; 223 thread_cow_get(newtd, td); 224 225 if (ctx != NULL) { /* old way to set user context */ 226 error = set_mcontext(newtd, ctx); 227 if (error != 0) { 228 thread_cow_free(newtd); 229 thread_free(newtd); 230 goto fail; 231 } 232 } else { 233 /* Set up our machine context. */ 234 stack.ss_sp = stack_base; 235 stack.ss_size = stack_size; 236 /* Set upcall address to user thread entry function. */ 237 cpu_set_upcall_kse(newtd, start_func, arg, &stack); 238 /* Setup user TLS address and TLS pointer register. */ 239 error = cpu_set_user_tls(newtd, tls_base); 240 if (error != 0) { 241 thread_cow_free(newtd); 242 thread_free(newtd); 243 goto fail; 244 } 245 } 246 247 PROC_LOCK(p); 248 p->p_flag |= P_HADTHREADS; 249 thread_link(newtd, p); 250 bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); 251 thread_lock(td); 252 /* let the scheduler know about these things. */ 253 sched_fork_thread(td, newtd); 254 thread_unlock(td); 255 if (P_SHOULDSTOP(p)) 256 newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 257 PROC_UNLOCK(p); 258 259 tidhash_add(newtd); 260 261 thread_lock(newtd); 262 if (rtp != NULL) { 263 if (!(td->td_pri_class == PRI_TIMESHARE && 264 rtp->type == RTP_PRIO_NORMAL)) { 265 rtp_to_pri(rtp, newtd); 266 sched_prio(newtd, newtd->td_user_pri); 267 } /* ignore timesharing class */ 268 } 269 TD_SET_CAN_RUN(newtd); 270 sched_add(newtd, SRQ_BORING); 271 thread_unlock(newtd); 272 273 return (0); 274 275 fail: 276 #ifdef RACCT 277 if (racct_enable) { 278 PROC_LOCK(p); 279 racct_sub(p, RACCT_NTHR, 1); 280 PROC_UNLOCK(p); 281 } 282 #endif 283 return (error); 284 } 285 286 int 287 sys_thr_self(struct thread *td, struct thr_self_args *uap) 288 /* long *id */ 289 { 290 int error; 291 292 error = suword_lwpid(uap->id, (unsigned)td->td_tid); 293 if (error == -1) 294 return (EFAULT); 295 return (0); 296 } 297 298 int 299 sys_thr_exit(struct thread *td, struct thr_exit_args *uap) 300 /* long *state */ 301 { 302 303 /* Signal userland that it can free the stack. */ 304 if ((void *)uap->state != NULL) { 305 suword_lwpid(uap->state, 1); 306 kern_umtx_wake(td, uap->state, INT_MAX, 0); 307 } 308 309 return (kern_thr_exit(td)); 310 } 311 312 int 313 kern_thr_exit(struct thread *td) 314 { 315 struct proc *p; 316 317 p = td->td_proc; 318 319 rw_wlock(&tidhash_lock); 320 PROC_LOCK(p); 321 322 if (p->p_numthreads != 1) { 323 racct_sub(p, RACCT_NTHR, 1); 324 LIST_REMOVE(td, td_hash); 325 rw_wunlock(&tidhash_lock); 326 tdsigcleanup(td); 327 umtx_thread_exit(td); 328 PROC_SLOCK(p); 329 thread_stopped(p); 330 thread_exit(); 331 /* NOTREACHED */ 332 } 333 334 /* 335 * Ignore attempts to shut down last thread in the proc. This 336 * will actually call _exit(2) in the usermode trampoline when 337 * it returns. 338 */ 339 PROC_UNLOCK(p); 340 rw_wunlock(&tidhash_lock); 341 return (0); 342 } 343 344 int 345 sys_thr_kill(struct thread *td, struct thr_kill_args *uap) 346 /* long id, int sig */ 347 { 348 ksiginfo_t ksi; 349 struct thread *ttd; 350 struct proc *p; 351 int error; 352 353 p = td->td_proc; 354 ksiginfo_init(&ksi); 355 ksi.ksi_signo = uap->sig; 356 ksi.ksi_code = SI_LWP; 357 ksi.ksi_pid = p->p_pid; 358 ksi.ksi_uid = td->td_ucred->cr_ruid; 359 if (uap->id == -1) { 360 if (uap->sig != 0 && !_SIG_VALID(uap->sig)) { 361 error = EINVAL; 362 } else { 363 error = ESRCH; 364 PROC_LOCK(p); 365 FOREACH_THREAD_IN_PROC(p, ttd) { 366 if (ttd != td) { 367 error = 0; 368 if (uap->sig == 0) 369 break; 370 tdksignal(ttd, uap->sig, &ksi); 371 } 372 } 373 PROC_UNLOCK(p); 374 } 375 } else { 376 error = 0; 377 ttd = tdfind((lwpid_t)uap->id, p->p_pid); 378 if (ttd == NULL) 379 return (ESRCH); 380 if (uap->sig == 0) 381 ; 382 else if (!_SIG_VALID(uap->sig)) 383 error = EINVAL; 384 else 385 tdksignal(ttd, uap->sig, &ksi); 386 PROC_UNLOCK(ttd->td_proc); 387 } 388 return (error); 389 } 390 391 int 392 sys_thr_kill2(struct thread *td, struct thr_kill2_args *uap) 393 /* pid_t pid, long id, int sig */ 394 { 395 ksiginfo_t ksi; 396 struct thread *ttd; 397 struct proc *p; 398 int error; 399 400 AUDIT_ARG_SIGNUM(uap->sig); 401 402 ksiginfo_init(&ksi); 403 ksi.ksi_signo = uap->sig; 404 ksi.ksi_code = SI_LWP; 405 ksi.ksi_pid = td->td_proc->p_pid; 406 ksi.ksi_uid = td->td_ucred->cr_ruid; 407 if (uap->id == -1) { 408 if ((p = pfind(uap->pid)) == NULL) 409 return (ESRCH); 410 AUDIT_ARG_PROCESS(p); 411 error = p_cansignal(td, p, uap->sig); 412 if (error) { 413 PROC_UNLOCK(p); 414 return (error); 415 } 416 if (uap->sig != 0 && !_SIG_VALID(uap->sig)) { 417 error = EINVAL; 418 } else { 419 error = ESRCH; 420 FOREACH_THREAD_IN_PROC(p, ttd) { 421 if (ttd != td) { 422 error = 0; 423 if (uap->sig == 0) 424 break; 425 tdksignal(ttd, uap->sig, &ksi); 426 } 427 } 428 } 429 PROC_UNLOCK(p); 430 } else { 431 ttd = tdfind((lwpid_t)uap->id, uap->pid); 432 if (ttd == NULL) 433 return (ESRCH); 434 p = ttd->td_proc; 435 AUDIT_ARG_PROCESS(p); 436 error = p_cansignal(td, p, uap->sig); 437 if (uap->sig == 0) 438 ; 439 else if (!_SIG_VALID(uap->sig)) 440 error = EINVAL; 441 else 442 tdksignal(ttd, uap->sig, &ksi); 443 PROC_UNLOCK(p); 444 } 445 return (error); 446 } 447 448 int 449 sys_thr_suspend(struct thread *td, struct thr_suspend_args *uap) 450 /* const struct timespec *timeout */ 451 { 452 struct timespec ts, *tsp; 453 int error; 454 455 tsp = NULL; 456 if (uap->timeout != NULL) { 457 error = umtx_copyin_timeout(uap->timeout, &ts); 458 if (error != 0) 459 return (error); 460 tsp = &ts; 461 } 462 463 return (kern_thr_suspend(td, tsp)); 464 } 465 466 int 467 kern_thr_suspend(struct thread *td, struct timespec *tsp) 468 { 469 struct proc *p = td->td_proc; 470 struct timeval tv; 471 int error = 0; 472 int timo = 0; 473 474 if (td->td_pflags & TDP_WAKEUP) { 475 td->td_pflags &= ~TDP_WAKEUP; 476 return (0); 477 } 478 479 if (tsp != NULL) { 480 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 481 error = EWOULDBLOCK; 482 else { 483 TIMESPEC_TO_TIMEVAL(&tv, tsp); 484 timo = tvtohz(&tv); 485 } 486 } 487 488 PROC_LOCK(p); 489 if (error == 0 && (td->td_flags & TDF_THRWAKEUP) == 0) 490 error = msleep((void *)td, &p->p_mtx, 491 PCATCH, "lthr", timo); 492 493 if (td->td_flags & TDF_THRWAKEUP) { 494 thread_lock(td); 495 td->td_flags &= ~TDF_THRWAKEUP; 496 thread_unlock(td); 497 PROC_UNLOCK(p); 498 return (0); 499 } 500 PROC_UNLOCK(p); 501 if (error == EWOULDBLOCK) 502 error = ETIMEDOUT; 503 else if (error == ERESTART) { 504 if (timo != 0) 505 error = EINTR; 506 } 507 return (error); 508 } 509 510 int 511 sys_thr_wake(struct thread *td, struct thr_wake_args *uap) 512 /* long id */ 513 { 514 struct proc *p; 515 struct thread *ttd; 516 517 if (uap->id == td->td_tid) { 518 td->td_pflags |= TDP_WAKEUP; 519 return (0); 520 } 521 522 p = td->td_proc; 523 ttd = tdfind((lwpid_t)uap->id, p->p_pid); 524 if (ttd == NULL) 525 return (ESRCH); 526 thread_lock(ttd); 527 ttd->td_flags |= TDF_THRWAKEUP; 528 thread_unlock(ttd); 529 wakeup((void *)ttd); 530 PROC_UNLOCK(p); 531 return (0); 532 } 533 534 int 535 sys_thr_set_name(struct thread *td, struct thr_set_name_args *uap) 536 { 537 struct proc *p; 538 char name[MAXCOMLEN + 1]; 539 struct thread *ttd; 540 int error; 541 542 error = 0; 543 name[0] = '\0'; 544 if (uap->name != NULL) { 545 error = copyinstr(uap->name, name, sizeof(name), 546 NULL); 547 if (error) 548 return (error); 549 } 550 p = td->td_proc; 551 ttd = tdfind((lwpid_t)uap->id, p->p_pid); 552 if (ttd == NULL) 553 return (ESRCH); 554 strcpy(ttd->td_name, name); 555 #ifdef KTR 556 sched_clear_tdname(ttd); 557 #endif 558 PROC_UNLOCK(p); 559 return (error); 560 } 561 562 int 563 kern_thr_alloc(struct proc *p, int pages, struct thread **ntd) 564 { 565 566 /* Have race condition but it is cheap. */ 567 if (p->p_numthreads >= max_threads_per_proc) { 568 ++max_threads_hits; 569 return (EPROCLIM); 570 } 571 572 *ntd = thread_alloc(pages); 573 if (*ntd == NULL) 574 return (ENOMEM); 575 576 return (0); 577 } 578