1 /*- 2 * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_compat.h" 31 #include "opt_posix.h" 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/priv.h> 37 #include <sys/proc.h> 38 #include <sys/posix4.h> 39 #include <sys/racct.h> 40 #include <sys/resourcevar.h> 41 #include <sys/rwlock.h> 42 #include <sys/sched.h> 43 #include <sys/sysctl.h> 44 #include <sys/smp.h> 45 #include <sys/syscallsubr.h> 46 #include <sys/sysent.h> 47 #include <sys/systm.h> 48 #include <sys/sysproto.h> 49 #include <sys/signalvar.h> 50 #include <sys/sysctl.h> 51 #include <sys/ucontext.h> 52 #include <sys/thr.h> 53 #include <sys/rtprio.h> 54 #include <sys/umtx.h> 55 #include <sys/limits.h> 56 57 #include <vm/vm_domain.h> 58 59 #include <machine/frame.h> 60 61 #include <security/audit/audit.h> 62 63 static SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, 64 "thread allocation"); 65 66 static int max_threads_per_proc = 1500; 67 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 68 &max_threads_per_proc, 0, "Limit on threads per proc"); 69 70 static int max_threads_hits; 71 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 72 &max_threads_hits, 0, "kern.threads.max_threads_per_proc hit count"); 73 74 #ifdef COMPAT_FREEBSD32 75 76 static inline int 77 suword_lwpid(void *addr, lwpid_t lwpid) 78 { 79 int error; 80 81 if (SV_CURPROC_FLAG(SV_LP64)) 82 error = suword(addr, lwpid); 83 else 84 error = suword32(addr, lwpid); 85 return (error); 86 } 87 88 #else 89 #define suword_lwpid suword 90 #endif 91 92 static int create_thread(struct thread *td, mcontext_t *ctx, 93 void (*start_func)(void *), void *arg, 94 char *stack_base, size_t stack_size, 95 char *tls_base, 96 long *child_tid, long *parent_tid, 97 int flags, struct rtprio *rtp); 98 99 /* 100 * System call interface. 101 */ 102 int 103 sys_thr_create(struct thread *td, struct thr_create_args *uap) 104 /* ucontext_t *ctx, long *id, int flags */ 105 { 106 ucontext_t ctx; 107 int error; 108 109 if ((error = copyin(uap->ctx, &ctx, sizeof(ctx)))) 110 return (error); 111 112 error = create_thread(td, &ctx.uc_mcontext, NULL, NULL, 113 NULL, 0, NULL, uap->id, NULL, uap->flags, NULL); 114 return (error); 115 } 116 117 int 118 sys_thr_new(struct thread *td, struct thr_new_args *uap) 119 /* struct thr_param * */ 120 { 121 struct thr_param param; 122 int error; 123 124 if (uap->param_size < 0 || uap->param_size > sizeof(param)) 125 return (EINVAL); 126 bzero(¶m, sizeof(param)); 127 if ((error = copyin(uap->param, ¶m, uap->param_size))) 128 return (error); 129 return (kern_thr_new(td, ¶m)); 130 } 131 132 int 133 kern_thr_new(struct thread *td, struct thr_param *param) 134 { 135 struct rtprio rtp, *rtpp; 136 int error; 137 138 rtpp = NULL; 139 if (param->rtp != 0) { 140 error = copyin(param->rtp, &rtp, sizeof(struct rtprio)); 141 if (error) 142 return (error); 143 rtpp = &rtp; 144 } 145 error = create_thread(td, NULL, param->start_func, param->arg, 146 param->stack_base, param->stack_size, param->tls_base, 147 param->child_tid, param->parent_tid, param->flags, 148 rtpp); 149 return (error); 150 } 151 152 static int 153 create_thread(struct thread *td, mcontext_t *ctx, 154 void (*start_func)(void *), void *arg, 155 char *stack_base, size_t stack_size, 156 char *tls_base, 157 long *child_tid, long *parent_tid, 158 int flags, struct rtprio *rtp) 159 { 160 stack_t stack; 161 struct thread *newtd; 162 struct proc *p; 163 int error; 164 165 p = td->td_proc; 166 167 if (rtp != NULL) { 168 switch(rtp->type) { 169 case RTP_PRIO_REALTIME: 170 case RTP_PRIO_FIFO: 171 /* Only root can set scheduler policy */ 172 if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0) 173 return (EPERM); 174 if (rtp->prio > RTP_PRIO_MAX) 175 return (EINVAL); 176 break; 177 case RTP_PRIO_NORMAL: 178 rtp->prio = 0; 179 break; 180 default: 181 return (EINVAL); 182 } 183 } 184 185 #ifdef RACCT 186 if (racct_enable) { 187 PROC_LOCK(p); 188 error = racct_add(p, RACCT_NTHR, 1); 189 PROC_UNLOCK(p); 190 if (error != 0) 191 return (EPROCLIM); 192 } 193 #endif 194 195 /* Initialize our td */ 196 error = kern_thr_alloc(p, 0, &newtd); 197 if (error) 198 goto fail; 199 200 cpu_set_upcall(newtd, td); 201 202 /* 203 * Try the copyout as soon as we allocate the td so we don't 204 * have to tear things down in a failure case below. 205 * Here we copy out tid to two places, one for child and one 206 * for parent, because pthread can create a detached thread, 207 * if parent wants to safely access child tid, it has to provide 208 * its storage, because child thread may exit quickly and 209 * memory is freed before parent thread can access it. 210 */ 211 if ((child_tid != NULL && 212 suword_lwpid(child_tid, newtd->td_tid)) || 213 (parent_tid != NULL && 214 suword_lwpid(parent_tid, newtd->td_tid))) { 215 thread_free(newtd); 216 error = EFAULT; 217 goto fail; 218 } 219 220 bzero(&newtd->td_startzero, 221 __rangeof(struct thread, td_startzero, td_endzero)); 222 bcopy(&td->td_startcopy, &newtd->td_startcopy, 223 __rangeof(struct thread, td_startcopy, td_endcopy)); 224 newtd->td_proc = td->td_proc; 225 thread_cow_get(newtd, td); 226 227 if (ctx != NULL) { /* old way to set user context */ 228 error = set_mcontext(newtd, ctx); 229 if (error != 0) { 230 thread_cow_free(newtd); 231 thread_free(newtd); 232 goto fail; 233 } 234 } else { 235 /* Set up our machine context. */ 236 stack.ss_sp = stack_base; 237 stack.ss_size = stack_size; 238 /* Set upcall address to user thread entry function. */ 239 cpu_set_upcall_kse(newtd, start_func, arg, &stack); 240 /* Setup user TLS address and TLS pointer register. */ 241 error = cpu_set_user_tls(newtd, tls_base); 242 if (error != 0) { 243 thread_cow_free(newtd); 244 thread_free(newtd); 245 goto fail; 246 } 247 } 248 249 PROC_LOCK(p); 250 p->p_flag |= P_HADTHREADS; 251 thread_link(newtd, p); 252 bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); 253 thread_lock(td); 254 /* let the scheduler know about these things. */ 255 sched_fork_thread(td, newtd); 256 thread_unlock(td); 257 if (P_SHOULDSTOP(p)) 258 newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 259 260 /* 261 * Copy the existing thread VM policy into the new thread. 262 */ 263 vm_domain_policy_localcopy(&newtd->td_vm_dom_policy, 264 &td->td_vm_dom_policy); 265 266 PROC_UNLOCK(p); 267 268 tidhash_add(newtd); 269 270 thread_lock(newtd); 271 if (rtp != NULL) { 272 if (!(td->td_pri_class == PRI_TIMESHARE && 273 rtp->type == RTP_PRIO_NORMAL)) { 274 rtp_to_pri(rtp, newtd); 275 sched_prio(newtd, newtd->td_user_pri); 276 } /* ignore timesharing class */ 277 } 278 TD_SET_CAN_RUN(newtd); 279 sched_add(newtd, SRQ_BORING); 280 thread_unlock(newtd); 281 282 return (0); 283 284 fail: 285 #ifdef RACCT 286 if (racct_enable) { 287 PROC_LOCK(p); 288 racct_sub(p, RACCT_NTHR, 1); 289 PROC_UNLOCK(p); 290 } 291 #endif 292 return (error); 293 } 294 295 int 296 sys_thr_self(struct thread *td, struct thr_self_args *uap) 297 /* long *id */ 298 { 299 int error; 300 301 error = suword_lwpid(uap->id, (unsigned)td->td_tid); 302 if (error == -1) 303 return (EFAULT); 304 return (0); 305 } 306 307 int 308 sys_thr_exit(struct thread *td, struct thr_exit_args *uap) 309 /* long *state */ 310 { 311 312 /* Signal userland that it can free the stack. */ 313 if ((void *)uap->state != NULL) { 314 suword_lwpid(uap->state, 1); 315 kern_umtx_wake(td, uap->state, INT_MAX, 0); 316 } 317 318 return (kern_thr_exit(td)); 319 } 320 321 int 322 kern_thr_exit(struct thread *td) 323 { 324 struct proc *p; 325 326 p = td->td_proc; 327 328 rw_wlock(&tidhash_lock); 329 PROC_LOCK(p); 330 331 if (p->p_numthreads != 1) { 332 racct_sub(p, RACCT_NTHR, 1); 333 LIST_REMOVE(td, td_hash); 334 rw_wunlock(&tidhash_lock); 335 tdsigcleanup(td); 336 umtx_thread_exit(td); 337 PROC_SLOCK(p); 338 thread_stopped(p); 339 thread_exit(); 340 /* NOTREACHED */ 341 } 342 343 /* 344 * Ignore attempts to shut down last thread in the proc. This 345 * will actually call _exit(2) in the usermode trampoline when 346 * it returns. 347 */ 348 PROC_UNLOCK(p); 349 rw_wunlock(&tidhash_lock); 350 return (0); 351 } 352 353 int 354 sys_thr_kill(struct thread *td, struct thr_kill_args *uap) 355 /* long id, int sig */ 356 { 357 ksiginfo_t ksi; 358 struct thread *ttd; 359 struct proc *p; 360 int error; 361 362 p = td->td_proc; 363 ksiginfo_init(&ksi); 364 ksi.ksi_signo = uap->sig; 365 ksi.ksi_code = SI_LWP; 366 ksi.ksi_pid = p->p_pid; 367 ksi.ksi_uid = td->td_ucred->cr_ruid; 368 if (uap->id == -1) { 369 if (uap->sig != 0 && !_SIG_VALID(uap->sig)) { 370 error = EINVAL; 371 } else { 372 error = ESRCH; 373 PROC_LOCK(p); 374 FOREACH_THREAD_IN_PROC(p, ttd) { 375 if (ttd != td) { 376 error = 0; 377 if (uap->sig == 0) 378 break; 379 tdksignal(ttd, uap->sig, &ksi); 380 } 381 } 382 PROC_UNLOCK(p); 383 } 384 } else { 385 error = 0; 386 ttd = tdfind((lwpid_t)uap->id, p->p_pid); 387 if (ttd == NULL) 388 return (ESRCH); 389 if (uap->sig == 0) 390 ; 391 else if (!_SIG_VALID(uap->sig)) 392 error = EINVAL; 393 else 394 tdksignal(ttd, uap->sig, &ksi); 395 PROC_UNLOCK(ttd->td_proc); 396 } 397 return (error); 398 } 399 400 int 401 sys_thr_kill2(struct thread *td, struct thr_kill2_args *uap) 402 /* pid_t pid, long id, int sig */ 403 { 404 ksiginfo_t ksi; 405 struct thread *ttd; 406 struct proc *p; 407 int error; 408 409 AUDIT_ARG_SIGNUM(uap->sig); 410 411 ksiginfo_init(&ksi); 412 ksi.ksi_signo = uap->sig; 413 ksi.ksi_code = SI_LWP; 414 ksi.ksi_pid = td->td_proc->p_pid; 415 ksi.ksi_uid = td->td_ucred->cr_ruid; 416 if (uap->id == -1) { 417 if ((p = pfind(uap->pid)) == NULL) 418 return (ESRCH); 419 AUDIT_ARG_PROCESS(p); 420 error = p_cansignal(td, p, uap->sig); 421 if (error) { 422 PROC_UNLOCK(p); 423 return (error); 424 } 425 if (uap->sig != 0 && !_SIG_VALID(uap->sig)) { 426 error = EINVAL; 427 } else { 428 error = ESRCH; 429 FOREACH_THREAD_IN_PROC(p, ttd) { 430 if (ttd != td) { 431 error = 0; 432 if (uap->sig == 0) 433 break; 434 tdksignal(ttd, uap->sig, &ksi); 435 } 436 } 437 } 438 PROC_UNLOCK(p); 439 } else { 440 ttd = tdfind((lwpid_t)uap->id, uap->pid); 441 if (ttd == NULL) 442 return (ESRCH); 443 p = ttd->td_proc; 444 AUDIT_ARG_PROCESS(p); 445 error = p_cansignal(td, p, uap->sig); 446 if (uap->sig == 0) 447 ; 448 else if (!_SIG_VALID(uap->sig)) 449 error = EINVAL; 450 else 451 tdksignal(ttd, uap->sig, &ksi); 452 PROC_UNLOCK(p); 453 } 454 return (error); 455 } 456 457 int 458 sys_thr_suspend(struct thread *td, struct thr_suspend_args *uap) 459 /* const struct timespec *timeout */ 460 { 461 struct timespec ts, *tsp; 462 int error; 463 464 tsp = NULL; 465 if (uap->timeout != NULL) { 466 error = umtx_copyin_timeout(uap->timeout, &ts); 467 if (error != 0) 468 return (error); 469 tsp = &ts; 470 } 471 472 return (kern_thr_suspend(td, tsp)); 473 } 474 475 int 476 kern_thr_suspend(struct thread *td, struct timespec *tsp) 477 { 478 struct proc *p = td->td_proc; 479 struct timeval tv; 480 int error = 0; 481 int timo = 0; 482 483 if (td->td_pflags & TDP_WAKEUP) { 484 td->td_pflags &= ~TDP_WAKEUP; 485 return (0); 486 } 487 488 if (tsp != NULL) { 489 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 490 error = EWOULDBLOCK; 491 else { 492 TIMESPEC_TO_TIMEVAL(&tv, tsp); 493 timo = tvtohz(&tv); 494 } 495 } 496 497 PROC_LOCK(p); 498 if (error == 0 && (td->td_flags & TDF_THRWAKEUP) == 0) 499 error = msleep((void *)td, &p->p_mtx, 500 PCATCH, "lthr", timo); 501 502 if (td->td_flags & TDF_THRWAKEUP) { 503 thread_lock(td); 504 td->td_flags &= ~TDF_THRWAKEUP; 505 thread_unlock(td); 506 PROC_UNLOCK(p); 507 return (0); 508 } 509 PROC_UNLOCK(p); 510 if (error == EWOULDBLOCK) 511 error = ETIMEDOUT; 512 else if (error == ERESTART) { 513 if (timo != 0) 514 error = EINTR; 515 } 516 return (error); 517 } 518 519 int 520 sys_thr_wake(struct thread *td, struct thr_wake_args *uap) 521 /* long id */ 522 { 523 struct proc *p; 524 struct thread *ttd; 525 526 if (uap->id == td->td_tid) { 527 td->td_pflags |= TDP_WAKEUP; 528 return (0); 529 } 530 531 p = td->td_proc; 532 ttd = tdfind((lwpid_t)uap->id, p->p_pid); 533 if (ttd == NULL) 534 return (ESRCH); 535 thread_lock(ttd); 536 ttd->td_flags |= TDF_THRWAKEUP; 537 thread_unlock(ttd); 538 wakeup((void *)ttd); 539 PROC_UNLOCK(p); 540 return (0); 541 } 542 543 int 544 sys_thr_set_name(struct thread *td, struct thr_set_name_args *uap) 545 { 546 struct proc *p; 547 char name[MAXCOMLEN + 1]; 548 struct thread *ttd; 549 int error; 550 551 error = 0; 552 name[0] = '\0'; 553 if (uap->name != NULL) { 554 error = copyinstr(uap->name, name, sizeof(name), 555 NULL); 556 if (error) 557 return (error); 558 } 559 p = td->td_proc; 560 ttd = tdfind((lwpid_t)uap->id, p->p_pid); 561 if (ttd == NULL) 562 return (ESRCH); 563 strcpy(ttd->td_name, name); 564 #ifdef KTR 565 sched_clear_tdname(ttd); 566 #endif 567 PROC_UNLOCK(p); 568 return (error); 569 } 570 571 int 572 kern_thr_alloc(struct proc *p, int pages, struct thread **ntd) 573 { 574 575 /* Have race condition but it is cheap. */ 576 if (p->p_numthreads >= max_threads_per_proc) { 577 ++max_threads_hits; 578 return (EPROCLIM); 579 } 580 581 *ntd = thread_alloc(pages); 582 if (*ntd == NULL) 583 return (ENOMEM); 584 585 return (0); 586 } 587