1 /*- 2 * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_compat.h" 31 #include "opt_posix.h" 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/priv.h> 37 #include <sys/proc.h> 38 #include <sys/posix4.h> 39 #include <sys/resourcevar.h> 40 #include <sys/rwlock.h> 41 #include <sys/sched.h> 42 #include <sys/sysctl.h> 43 #include <sys/smp.h> 44 #include <sys/syscallsubr.h> 45 #include <sys/sysent.h> 46 #include <sys/systm.h> 47 #include <sys/sysproto.h> 48 #include <sys/signalvar.h> 49 #include <sys/sysctl.h> 50 #include <sys/ucontext.h> 51 #include <sys/thr.h> 52 #include <sys/rtprio.h> 53 #include <sys/umtx.h> 54 #include <sys/limits.h> 55 56 #include <machine/frame.h> 57 58 #include <security/audit/audit.h> 59 60 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 61 62 static int max_threads_per_proc = 1500; 63 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 64 &max_threads_per_proc, 0, "Limit on threads per proc"); 65 66 static int max_threads_hits; 67 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 68 &max_threads_hits, 0, ""); 69 70 #ifdef COMPAT_FREEBSD32 71 72 static inline int 73 suword_lwpid(void *addr, lwpid_t lwpid) 74 { 75 int error; 76 77 if (SV_CURPROC_FLAG(SV_LP64)) 78 error = suword(addr, lwpid); 79 else 80 error = suword32(addr, lwpid); 81 return (error); 82 } 83 84 #else 85 #define suword_lwpid suword 86 #endif 87 88 static int create_thread(struct thread *td, mcontext_t *ctx, 89 void (*start_func)(void *), void *arg, 90 char *stack_base, size_t stack_size, 91 char *tls_base, 92 long *child_tid, long *parent_tid, 93 int flags, struct rtprio *rtp); 94 95 /* 96 * System call interface. 97 */ 98 int 99 thr_create(struct thread *td, struct thr_create_args *uap) 100 /* ucontext_t *ctx, long *id, int flags */ 101 { 102 ucontext_t ctx; 103 int error; 104 105 if ((error = copyin(uap->ctx, &ctx, sizeof(ctx)))) 106 return (error); 107 108 error = create_thread(td, &ctx.uc_mcontext, NULL, NULL, 109 NULL, 0, NULL, uap->id, NULL, uap->flags, NULL); 110 return (error); 111 } 112 113 int 114 thr_new(struct thread *td, struct thr_new_args *uap) 115 /* struct thr_param * */ 116 { 117 struct thr_param param; 118 int error; 119 120 if (uap->param_size < 0 || uap->param_size > sizeof(param)) 121 return (EINVAL); 122 bzero(¶m, sizeof(param)); 123 if ((error = copyin(uap->param, ¶m, uap->param_size))) 124 return (error); 125 return (kern_thr_new(td, ¶m)); 126 } 127 128 int 129 kern_thr_new(struct thread *td, struct thr_param *param) 130 { 131 struct rtprio rtp, *rtpp; 132 int error; 133 134 rtpp = NULL; 135 if (param->rtp != 0) { 136 error = copyin(param->rtp, &rtp, sizeof(struct rtprio)); 137 if (error) 138 return (error); 139 rtpp = &rtp; 140 } 141 error = create_thread(td, NULL, param->start_func, param->arg, 142 param->stack_base, param->stack_size, param->tls_base, 143 param->child_tid, param->parent_tid, param->flags, 144 rtpp); 145 return (error); 146 } 147 148 static int 149 create_thread(struct thread *td, mcontext_t *ctx, 150 void (*start_func)(void *), void *arg, 151 char *stack_base, size_t stack_size, 152 char *tls_base, 153 long *child_tid, long *parent_tid, 154 int flags, struct rtprio *rtp) 155 { 156 stack_t stack; 157 struct thread *newtd; 158 struct proc *p; 159 int error; 160 161 p = td->td_proc; 162 163 /* Have race condition but it is cheap. */ 164 if (p->p_numthreads >= max_threads_per_proc) { 165 ++max_threads_hits; 166 return (EPROCLIM); 167 } 168 169 if (rtp != NULL) { 170 switch(rtp->type) { 171 case RTP_PRIO_REALTIME: 172 case RTP_PRIO_FIFO: 173 /* Only root can set scheduler policy */ 174 if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0) 175 return (EPERM); 176 if (rtp->prio > RTP_PRIO_MAX) 177 return (EINVAL); 178 break; 179 case RTP_PRIO_NORMAL: 180 rtp->prio = 0; 181 break; 182 default: 183 return (EINVAL); 184 } 185 } 186 187 /* Initialize our td */ 188 newtd = thread_alloc(0); 189 if (newtd == NULL) 190 return (ENOMEM); 191 192 /* 193 * Try the copyout as soon as we allocate the td so we don't 194 * have to tear things down in a failure case below. 195 * Here we copy out tid to two places, one for child and one 196 * for parent, because pthread can create a detached thread, 197 * if parent wants to safely access child tid, it has to provide 198 * its storage, because child thread may exit quickly and 199 * memory is freed before parent thread can access it. 200 */ 201 if ((child_tid != NULL && 202 suword_lwpid(child_tid, newtd->td_tid)) || 203 (parent_tid != NULL && 204 suword_lwpid(parent_tid, newtd->td_tid))) { 205 thread_free(newtd); 206 return (EFAULT); 207 } 208 209 bzero(&newtd->td_startzero, 210 __rangeof(struct thread, td_startzero, td_endzero)); 211 bcopy(&td->td_startcopy, &newtd->td_startcopy, 212 __rangeof(struct thread, td_startcopy, td_endcopy)); 213 newtd->td_proc = td->td_proc; 214 newtd->td_ucred = crhold(td->td_ucred); 215 216 cpu_set_upcall(newtd, td); 217 218 if (ctx != NULL) { /* old way to set user context */ 219 error = set_mcontext(newtd, ctx); 220 if (error != 0) { 221 thread_free(newtd); 222 crfree(td->td_ucred); 223 return (error); 224 } 225 } else { 226 /* Set up our machine context. */ 227 stack.ss_sp = stack_base; 228 stack.ss_size = stack_size; 229 /* Set upcall address to user thread entry function. */ 230 cpu_set_upcall_kse(newtd, start_func, arg, &stack); 231 /* Setup user TLS address and TLS pointer register. */ 232 error = cpu_set_user_tls(newtd, tls_base); 233 if (error != 0) { 234 thread_free(newtd); 235 crfree(td->td_ucred); 236 return (error); 237 } 238 } 239 240 PROC_LOCK(td->td_proc); 241 td->td_proc->p_flag |= P_HADTHREADS; 242 newtd->td_sigmask = td->td_sigmask; 243 thread_link(newtd, p); 244 bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); 245 thread_lock(td); 246 /* let the scheduler know about these things. */ 247 sched_fork_thread(td, newtd); 248 thread_unlock(td); 249 if (P_SHOULDSTOP(p)) 250 newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 251 PROC_UNLOCK(p); 252 253 tidhash_add(newtd); 254 255 thread_lock(newtd); 256 if (rtp != NULL) { 257 if (!(td->td_pri_class == PRI_TIMESHARE && 258 rtp->type == RTP_PRIO_NORMAL)) { 259 rtp_to_pri(rtp, newtd); 260 sched_prio(newtd, newtd->td_user_pri); 261 } /* ignore timesharing class */ 262 } 263 TD_SET_CAN_RUN(newtd); 264 sched_add(newtd, SRQ_BORING); 265 thread_unlock(newtd); 266 267 return (0); 268 } 269 270 int 271 thr_self(struct thread *td, struct thr_self_args *uap) 272 /* long *id */ 273 { 274 int error; 275 276 error = suword_lwpid(uap->id, (unsigned)td->td_tid); 277 if (error == -1) 278 return (EFAULT); 279 return (0); 280 } 281 282 int 283 thr_exit(struct thread *td, struct thr_exit_args *uap) 284 /* long *state */ 285 { 286 struct proc *p; 287 288 p = td->td_proc; 289 290 /* Signal userland that it can free the stack. */ 291 if ((void *)uap->state != NULL) { 292 suword_lwpid(uap->state, 1); 293 kern_umtx_wake(td, uap->state, INT_MAX, 0); 294 } 295 296 rw_wlock(&tidhash_lock); 297 PROC_LOCK(p); 298 /* 299 * Shutting down last thread in the proc. This will actually 300 * call exit() in the trampoline when it returns. 301 */ 302 if (p->p_numthreads != 1) { 303 LIST_REMOVE(td, td_hash); 304 rw_wunlock(&tidhash_lock); 305 tdsigcleanup(td); 306 PROC_SLOCK(p); 307 thread_stopped(p); 308 thread_exit(); 309 /* NOTREACHED */ 310 } 311 PROC_UNLOCK(p); 312 rw_wunlock(&tidhash_lock); 313 return (0); 314 } 315 316 int 317 thr_kill(struct thread *td, struct thr_kill_args *uap) 318 /* long id, int sig */ 319 { 320 ksiginfo_t ksi; 321 struct thread *ttd; 322 struct proc *p; 323 int error; 324 325 p = td->td_proc; 326 ksiginfo_init(&ksi); 327 ksi.ksi_signo = uap->sig; 328 ksi.ksi_code = SI_LWP; 329 ksi.ksi_pid = p->p_pid; 330 ksi.ksi_uid = td->td_ucred->cr_ruid; 331 if (uap->id == -1) { 332 if (uap->sig != 0 && !_SIG_VALID(uap->sig)) { 333 error = EINVAL; 334 } else { 335 error = ESRCH; 336 PROC_LOCK(p); 337 FOREACH_THREAD_IN_PROC(p, ttd) { 338 if (ttd != td) { 339 error = 0; 340 if (uap->sig == 0) 341 break; 342 tdksignal(ttd, uap->sig, &ksi); 343 } 344 } 345 PROC_UNLOCK(p); 346 } 347 } else { 348 error = 0; 349 ttd = tdfind((lwpid_t)uap->id, p->p_pid); 350 if (ttd == NULL) 351 return (ESRCH); 352 if (uap->sig == 0) 353 ; 354 else if (!_SIG_VALID(uap->sig)) 355 error = EINVAL; 356 else 357 tdksignal(ttd, uap->sig, &ksi); 358 PROC_UNLOCK(ttd->td_proc); 359 } 360 return (error); 361 } 362 363 int 364 thr_kill2(struct thread *td, struct thr_kill2_args *uap) 365 /* pid_t pid, long id, int sig */ 366 { 367 ksiginfo_t ksi; 368 struct thread *ttd; 369 struct proc *p; 370 int error; 371 372 AUDIT_ARG_SIGNUM(uap->sig); 373 374 ksiginfo_init(&ksi); 375 ksi.ksi_signo = uap->sig; 376 ksi.ksi_code = SI_LWP; 377 ksi.ksi_pid = td->td_proc->p_pid; 378 ksi.ksi_uid = td->td_ucred->cr_ruid; 379 if (uap->id == -1) { 380 if ((p = pfind(uap->pid)) == NULL) 381 return (ESRCH); 382 AUDIT_ARG_PROCESS(p); 383 error = p_cansignal(td, p, uap->sig); 384 if (error) { 385 PROC_UNLOCK(p); 386 return (error); 387 } 388 if (uap->sig != 0 && !_SIG_VALID(uap->sig)) { 389 error = EINVAL; 390 } else { 391 error = ESRCH; 392 FOREACH_THREAD_IN_PROC(p, ttd) { 393 if (ttd != td) { 394 error = 0; 395 if (uap->sig == 0) 396 break; 397 tdksignal(ttd, uap->sig, &ksi); 398 } 399 } 400 } 401 PROC_UNLOCK(p); 402 } else { 403 ttd = tdfind((lwpid_t)uap->id, uap->pid); 404 if (ttd == NULL) 405 return (ESRCH); 406 p = ttd->td_proc; 407 AUDIT_ARG_PROCESS(p); 408 error = p_cansignal(td, p, uap->sig); 409 if (uap->sig == 0) 410 ; 411 else if (!_SIG_VALID(uap->sig)) 412 error = EINVAL; 413 else 414 tdksignal(ttd, uap->sig, &ksi); 415 PROC_UNLOCK(p); 416 } 417 return (error); 418 } 419 420 int 421 thr_suspend(struct thread *td, struct thr_suspend_args *uap) 422 /* const struct timespec *timeout */ 423 { 424 struct timespec ts, *tsp; 425 int error; 426 427 tsp = NULL; 428 if (uap->timeout != NULL) { 429 error = copyin((const void *)uap->timeout, (void *)&ts, 430 sizeof(struct timespec)); 431 if (error != 0) 432 return (error); 433 tsp = &ts; 434 } 435 436 return (kern_thr_suspend(td, tsp)); 437 } 438 439 int 440 kern_thr_suspend(struct thread *td, struct timespec *tsp) 441 { 442 struct proc *p = td->td_proc; 443 struct timeval tv; 444 int error = 0; 445 int timo = 0; 446 447 if (td->td_pflags & TDP_WAKEUP) { 448 td->td_pflags &= ~TDP_WAKEUP; 449 return (0); 450 } 451 452 if (tsp != NULL) { 453 if (tsp->tv_nsec < 0 || tsp->tv_nsec > 1000000000) 454 return (EINVAL); 455 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 456 error = EWOULDBLOCK; 457 else { 458 TIMESPEC_TO_TIMEVAL(&tv, tsp); 459 timo = tvtohz(&tv); 460 } 461 } 462 463 PROC_LOCK(p); 464 if (error == 0 && (td->td_flags & TDF_THRWAKEUP) == 0) 465 error = msleep((void *)td, &p->p_mtx, 466 PCATCH, "lthr", timo); 467 468 if (td->td_flags & TDF_THRWAKEUP) { 469 thread_lock(td); 470 td->td_flags &= ~TDF_THRWAKEUP; 471 thread_unlock(td); 472 PROC_UNLOCK(p); 473 return (0); 474 } 475 PROC_UNLOCK(p); 476 if (error == EWOULDBLOCK) 477 error = ETIMEDOUT; 478 else if (error == ERESTART) { 479 if (timo != 0) 480 error = EINTR; 481 } 482 return (error); 483 } 484 485 int 486 thr_wake(struct thread *td, struct thr_wake_args *uap) 487 /* long id */ 488 { 489 struct proc *p; 490 struct thread *ttd; 491 492 if (uap->id == td->td_tid) { 493 td->td_pflags |= TDP_WAKEUP; 494 return (0); 495 } 496 497 p = td->td_proc; 498 ttd = tdfind((lwpid_t)uap->id, p->p_pid); 499 if (ttd == NULL) 500 return (ESRCH); 501 thread_lock(ttd); 502 ttd->td_flags |= TDF_THRWAKEUP; 503 thread_unlock(ttd); 504 wakeup((void *)ttd); 505 PROC_UNLOCK(p); 506 return (0); 507 } 508 509 int 510 thr_set_name(struct thread *td, struct thr_set_name_args *uap) 511 { 512 struct proc *p; 513 char name[MAXCOMLEN + 1]; 514 struct thread *ttd; 515 int error; 516 517 error = 0; 518 name[0] = '\0'; 519 if (uap->name != NULL) { 520 error = copyinstr(uap->name, name, sizeof(name), 521 NULL); 522 if (error) 523 return (error); 524 } 525 p = td->td_proc; 526 ttd = tdfind((lwpid_t)uap->id, p->p_pid); 527 if (ttd == NULL) 528 return (ESRCH); 529 strcpy(ttd->td_name, name); 530 PROC_UNLOCK(p); 531 return (error); 532 } 533