1 /*- 2 * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_compat.h" 31 #include "opt_posix.h" 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/proc.h> 37 #include <sys/resourcevar.h> 38 #include <sys/sched.h> 39 #include <sys/sysctl.h> 40 #include <sys/smp.h> 41 #include <sys/sysent.h> 42 #include <sys/systm.h> 43 #include <sys/sysproto.h> 44 #include <sys/signalvar.h> 45 #include <sys/ucontext.h> 46 #include <sys/thr.h> 47 #include <sys/rtprio.h> 48 #include <posix4/sched.h> 49 #include <posix4/posix4.h> 50 #include <sys/umtx.h> 51 #include <sys/limits.h> 52 53 #include <machine/frame.h> 54 55 #ifdef COMPAT_IA32 56 57 extern struct sysentvec ia32_freebsd_sysvec; 58 59 static inline int 60 suword_lwpid(void *addr, lwpid_t lwpid) 61 { 62 int error; 63 64 if (curproc->p_sysent != &ia32_freebsd_sysvec) 65 error = suword(addr, lwpid); 66 else 67 error = suword32(addr, lwpid); 68 return (error); 69 } 70 71 #else 72 #define suword_lwpid suword 73 #endif 74 75 extern int max_threads_per_proc; 76 77 static int create_thread(struct thread *td, mcontext_t *ctx, 78 void (*start_func)(void *), void *arg, 79 char *stack_base, size_t stack_size, 80 char *tls_base, 81 long *child_tid, long *parent_tid, 82 int flags, struct rtprio *rtp); 83 84 /* 85 * System call interface. 86 */ 87 int 88 thr_create(struct thread *td, struct thr_create_args *uap) 89 /* ucontext_t *ctx, long *id, int flags */ 90 { 91 ucontext_t ctx; 92 int error; 93 94 if ((error = copyin(uap->ctx, &ctx, sizeof(ctx)))) 95 return (error); 96 97 error = create_thread(td, &ctx.uc_mcontext, NULL, NULL, 98 NULL, 0, NULL, uap->id, NULL, uap->flags, NULL); 99 return (error); 100 } 101 102 int 103 thr_new(struct thread *td, struct thr_new_args *uap) 104 /* struct thr_param * */ 105 { 106 struct thr_param param; 107 int error; 108 109 if (uap->param_size < 0 || uap->param_size > sizeof(param)) 110 return (EINVAL); 111 bzero(¶m, sizeof(param)); 112 if ((error = copyin(uap->param, ¶m, uap->param_size))) 113 return (error); 114 return (kern_thr_new(td, ¶m)); 115 } 116 117 int 118 kern_thr_new(struct thread *td, struct thr_param *param) 119 { 120 struct rtprio rtp, *rtpp; 121 int error; 122 123 rtpp = NULL; 124 if (param->rtp != 0) { 125 error = copyin(param->rtp, &rtp, sizeof(struct rtprio)); 126 rtpp = &rtp; 127 } 128 error = create_thread(td, NULL, param->start_func, param->arg, 129 param->stack_base, param->stack_size, param->tls_base, 130 param->child_tid, param->parent_tid, param->flags, 131 rtpp); 132 return (error); 133 } 134 135 static int 136 create_thread(struct thread *td, mcontext_t *ctx, 137 void (*start_func)(void *), void *arg, 138 char *stack_base, size_t stack_size, 139 char *tls_base, 140 long *child_tid, long *parent_tid, 141 int flags, struct rtprio *rtp) 142 { 143 stack_t stack; 144 struct thread *newtd; 145 struct ksegrp *kg, *newkg; 146 struct proc *p; 147 long id; 148 int error; 149 150 error = 0; 151 p = td->td_proc; 152 kg = td->td_ksegrp; 153 154 /* Have race condition but it is cheap. */ 155 if (p->p_numthreads >= max_threads_per_proc) 156 return (EPROCLIM); 157 158 if (rtp != NULL) { 159 switch(rtp->type) { 160 case RTP_PRIO_REALTIME: 161 case RTP_PRIO_FIFO: 162 /* Only root can set scheduler policy */ 163 if (suser(td) != 0) 164 return (EPERM); 165 if (rtp->prio > RTP_PRIO_MAX) 166 return (EINVAL); 167 break; 168 case RTP_PRIO_NORMAL: 169 rtp->prio = 0; 170 break; 171 default: 172 return (EINVAL); 173 } 174 } 175 176 /* Initialize our td and new ksegrp.. */ 177 newtd = thread_alloc(); 178 179 /* 180 * Try the copyout as soon as we allocate the td so we don't 181 * have to tear things down in a failure case below. 182 * Here we copy out tid to two places, one for child and one 183 * for parent, because pthread can create a detached thread, 184 * if parent wants to safely access child tid, it has to provide 185 * its storage, because child thread may exit quickly and 186 * memory is freed before parent thread can access it. 187 */ 188 id = newtd->td_tid; 189 if ((child_tid != NULL && 190 suword_lwpid(child_tid, newtd->td_tid)) || 191 (parent_tid != NULL && 192 suword_lwpid(parent_tid, newtd->td_tid))) { 193 thread_free(newtd); 194 return (EFAULT); 195 } 196 197 bzero(&newtd->td_startzero, 198 __rangeof(struct thread, td_startzero, td_endzero)); 199 bcopy(&td->td_startcopy, &newtd->td_startcopy, 200 __rangeof(struct thread, td_startcopy, td_endcopy)); 201 newtd->td_proc = td->td_proc; 202 newtd->td_ucred = crhold(td->td_ucred); 203 204 cpu_set_upcall(newtd, td); 205 206 if (ctx != NULL) { /* old way to set user context */ 207 error = set_mcontext(newtd, ctx); 208 if (error != 0) { 209 thread_free(newtd); 210 crfree(td->td_ucred); 211 return (error); 212 } 213 } else { 214 /* Set up our machine context. */ 215 stack.ss_sp = stack_base; 216 stack.ss_size = stack_size; 217 /* Set upcall address to user thread entry function. */ 218 cpu_set_upcall_kse(newtd, start_func, arg, &stack); 219 /* Setup user TLS address and TLS pointer register. */ 220 error = cpu_set_user_tls(newtd, tls_base); 221 if (error != 0) { 222 thread_free(newtd); 223 crfree(td->td_ucred); 224 return (error); 225 } 226 } 227 228 newkg = ksegrp_alloc(); 229 bzero(&newkg->kg_startzero, 230 __rangeof(struct ksegrp, kg_startzero, kg_endzero)); 231 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 232 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy)); 233 sched_init_concurrency(newkg); 234 PROC_LOCK(td->td_proc); 235 td->td_proc->p_flag |= P_HADTHREADS; 236 newtd->td_sigmask = td->td_sigmask; 237 mtx_lock_spin(&sched_lock); 238 ksegrp_link(newkg, p); 239 thread_link(newtd, newkg); 240 PROC_UNLOCK(p); 241 242 /* let the scheduler know about these things. */ 243 sched_fork_ksegrp(td, newkg); 244 sched_fork_thread(td, newtd); 245 if (rtp != NULL) { 246 if (!(kg->kg_pri_class == PRI_TIMESHARE && 247 rtp->type == RTP_PRIO_NORMAL)) { 248 rtp_to_pri(rtp, newkg); 249 sched_prio(newtd, newkg->kg_user_pri); 250 } /* ignore timesharing class */ 251 } 252 TD_SET_CAN_RUN(newtd); 253 /* if ((flags & THR_SUSPENDED) == 0) */ 254 setrunqueue(newtd, SRQ_BORING); 255 mtx_unlock_spin(&sched_lock); 256 257 return (error); 258 } 259 260 int 261 thr_self(struct thread *td, struct thr_self_args *uap) 262 /* long *id */ 263 { 264 int error; 265 266 error = suword_lwpid(uap->id, (unsigned)td->td_tid); 267 if (error == -1) 268 return (EFAULT); 269 return (0); 270 } 271 272 int 273 thr_exit(struct thread *td, struct thr_exit_args *uap) 274 /* long *state */ 275 { 276 struct proc *p; 277 278 p = td->td_proc; 279 280 /* Signal userland that it can free the stack. */ 281 if ((void *)uap->state != NULL) { 282 suword_lwpid(uap->state, 1); 283 kern_umtx_wake(td, uap->state, INT_MAX); 284 } 285 286 PROC_LOCK(p); 287 sigqueue_flush(&td->td_sigqueue); 288 mtx_lock_spin(&sched_lock); 289 290 /* 291 * Shutting down last thread in the proc. This will actually 292 * call exit() in the trampoline when it returns. 293 */ 294 if (p->p_numthreads != 1) { 295 thread_stopped(p); 296 thread_exit(); 297 /* NOTREACHED */ 298 } 299 mtx_unlock_spin(&sched_lock); 300 PROC_UNLOCK(p); 301 return (0); 302 } 303 304 int 305 thr_kill(struct thread *td, struct thr_kill_args *uap) 306 /* long id, int sig */ 307 { 308 struct thread *ttd; 309 struct proc *p; 310 int error; 311 312 p = td->td_proc; 313 error = 0; 314 PROC_LOCK(p); 315 if (uap->id == -1) { 316 if (uap->sig != 0 && !_SIG_VALID(uap->sig)) { 317 error = EINVAL; 318 } else { 319 error = ESRCH; 320 FOREACH_THREAD_IN_PROC(p, ttd) { 321 if (ttd != td) { 322 error = 0; 323 if (uap->sig == 0) 324 break; 325 tdsignal(p, ttd, uap->sig, NULL); 326 } 327 } 328 } 329 } else { 330 if (uap->id != td->td_tid) 331 ttd = thread_find(p, uap->id); 332 else 333 ttd = td; 334 if (ttd == NULL) 335 error = ESRCH; 336 else if (uap->sig == 0) 337 ; 338 else if (!_SIG_VALID(uap->sig)) 339 error = EINVAL; 340 else 341 tdsignal(p, ttd, uap->sig, NULL); 342 } 343 PROC_UNLOCK(p); 344 return (error); 345 } 346 347 int 348 thr_suspend(struct thread *td, struct thr_suspend_args *uap) 349 /* const struct timespec *timeout */ 350 { 351 struct timespec ts, *tsp; 352 int error; 353 354 error = 0; 355 tsp = NULL; 356 if (uap->timeout != NULL) { 357 error = copyin((const void *)uap->timeout, (void *)&ts, 358 sizeof(struct timespec)); 359 if (error != 0) 360 return (error); 361 tsp = &ts; 362 } 363 364 return (kern_thr_suspend(td, tsp)); 365 } 366 367 int 368 kern_thr_suspend(struct thread *td, struct timespec *tsp) 369 { 370 struct timeval tv; 371 int error = 0, hz = 0; 372 373 if (tsp != NULL) { 374 if (tsp->tv_nsec < 0 || tsp->tv_nsec > 1000000000) 375 return (EINVAL); 376 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 377 return (ETIMEDOUT); 378 TIMESPEC_TO_TIMEVAL(&tv, tsp); 379 hz = tvtohz(&tv); 380 } 381 PROC_LOCK(td->td_proc); 382 if ((td->td_flags & TDF_THRWAKEUP) == 0) 383 error = msleep((void *)td, &td->td_proc->p_mtx, PCATCH, "lthr", 384 hz); 385 if (td->td_flags & TDF_THRWAKEUP) { 386 mtx_lock_spin(&sched_lock); 387 td->td_flags &= ~TDF_THRWAKEUP; 388 mtx_unlock_spin(&sched_lock); 389 PROC_UNLOCK(td->td_proc); 390 return (0); 391 } 392 PROC_UNLOCK(td->td_proc); 393 if (error == EWOULDBLOCK) 394 error = ETIMEDOUT; 395 else if (error == ERESTART) { 396 if (hz != 0) 397 error = EINTR; 398 } 399 return (error); 400 } 401 402 int 403 thr_wake(struct thread *td, struct thr_wake_args *uap) 404 /* long id */ 405 { 406 struct proc *p; 407 struct thread *ttd; 408 409 p = td->td_proc; 410 PROC_LOCK(p); 411 ttd = thread_find(p, uap->id); 412 if (ttd == NULL) { 413 PROC_UNLOCK(p); 414 return (ESRCH); 415 } 416 mtx_lock_spin(&sched_lock); 417 ttd->td_flags |= TDF_THRWAKEUP; 418 mtx_unlock_spin(&sched_lock); 419 wakeup((void *)ttd); 420 PROC_UNLOCK(p); 421 return (0); 422 } 423 424 int 425 thr_set_name(struct thread *td, struct thr_set_name_args *uap) 426 { 427 struct proc *p = td->td_proc; 428 char name[MAXCOMLEN + 1]; 429 struct thread *ttd; 430 int error; 431 432 error = 0; 433 name[0] = '\0'; 434 if (uap->name != NULL) { 435 error = copyinstr(uap->name, name, sizeof(name), 436 NULL); 437 if (error) 438 return (error); 439 } 440 PROC_LOCK(p); 441 if (uap->id == td->td_tid) 442 ttd = td; 443 else 444 ttd = thread_find(p, uap->id); 445 if (ttd != NULL) 446 strcpy(ttd->td_name, name); 447 else 448 error = ESRCH; 449 PROC_UNLOCK(p); 450 return (error); 451 } 452