1 /*- 2 * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/lock.h> 33 #include <sys/mutex.h> 34 #include <sys/proc.h> 35 #include <sys/resourcevar.h> 36 #include <sys/sched.h> 37 #include <sys/sysctl.h> 38 #include <sys/smp.h> 39 #include <sys/sysent.h> 40 #include <sys/systm.h> 41 #include <sys/sysproto.h> 42 #include <sys/signalvar.h> 43 #include <sys/ucontext.h> 44 #include <sys/thr.h> 45 #include <sys/umtx.h> 46 #include <sys/limits.h> 47 48 #include <machine/frame.h> 49 50 extern int max_threads_per_proc; 51 extern int max_groups_per_proc; 52 53 SYSCTL_DECL(_kern_threads); 54 static int thr_scope = 0; 55 SYSCTL_INT(_kern_threads, OID_AUTO, thr_scope, CTLFLAG_RW, 56 &thr_scope, 0, "sys or proc scope scheduling"); 57 58 static int thr_concurrency = 0; 59 SYSCTL_INT(_kern_threads, OID_AUTO, thr_concurrency, CTLFLAG_RW, 60 &thr_concurrency, 0, "a concurrency value if not default"); 61 62 static int create_thread(struct thread *td, mcontext_t *ctx, 63 void (*start_func)(void *), void *arg, 64 char *stack_base, size_t stack_size, 65 char *tls_base, 66 long *child_tid, long *parent_tid, 67 int flags); 68 69 /* 70 * System call interface. 71 */ 72 int 73 thr_create(struct thread *td, struct thr_create_args *uap) 74 /* ucontext_t *ctx, long *id, int flags */ 75 { 76 ucontext_t ctx; 77 int error; 78 79 if ((error = copyin(uap->ctx, &ctx, sizeof(ctx)))) 80 return (error); 81 82 error = create_thread(td, &ctx.uc_mcontext, NULL, NULL, 83 NULL, 0, NULL, uap->id, NULL, uap->flags); 84 return (error); 85 } 86 87 int 88 thr_new(struct thread *td, struct thr_new_args *uap) 89 /* struct thr_param * */ 90 { 91 struct thr_param param; 92 int error; 93 94 if (uap->param_size < sizeof(param)) 95 return (EINVAL); 96 if ((error = copyin(uap->param, ¶m, sizeof(param)))) 97 return (error); 98 error = create_thread(td, NULL, param.start_func, param.arg, 99 param.stack_base, param.stack_size, param.tls_base, 100 param.child_tid, param.parent_tid, param.flags); 101 return (error); 102 } 103 104 static int 105 create_thread(struct thread *td, mcontext_t *ctx, 106 void (*start_func)(void *), void *arg, 107 char *stack_base, size_t stack_size, 108 char *tls_base, 109 long *child_tid, long *parent_tid, 110 int flags) 111 { 112 stack_t stack; 113 struct thread *newtd; 114 struct ksegrp *kg, *newkg; 115 struct proc *p; 116 long id; 117 int error, scope_sys, linkkg; 118 119 error = 0; 120 p = td->td_proc; 121 kg = td->td_ksegrp; 122 123 /* Have race condition but it is cheap. */ 124 if ((p->p_numksegrps >= max_groups_per_proc) || 125 (p->p_numthreads >= max_threads_per_proc)) { 126 return (EPROCLIM); 127 } 128 129 /* Check PTHREAD_SCOPE_SYSTEM */ 130 scope_sys = (flags & THR_SYSTEM_SCOPE) != 0; 131 132 /* sysctl overrides user's flag */ 133 if (thr_scope == 1) 134 scope_sys = 0; 135 else if (thr_scope == 2) 136 scope_sys = 1; 137 138 /* Initialize our td and new ksegrp.. */ 139 newtd = thread_alloc(); 140 141 /* 142 * Try the copyout as soon as we allocate the td so we don't 143 * have to tear things down in a failure case below. 144 * Here we copy out tid to two places, one for child and one 145 * for parent, because pthread can create a detached thread, 146 * if parent wants to safely access child tid, it has to provide 147 * its storage, because child thread may exit quickly and 148 * memory is freed before parent thread can access it. 149 */ 150 id = newtd->td_tid; 151 if ((child_tid != NULL && 152 (error = copyout(&id, child_tid, sizeof(long)))) || 153 (parent_tid != NULL && 154 (error = copyout(&id, parent_tid, sizeof(long))))) { 155 thread_free(newtd); 156 return (error); 157 } 158 bzero(&newtd->td_startzero, 159 __rangeof(struct thread, td_startzero, td_endzero)); 160 bcopy(&td->td_startcopy, &newtd->td_startcopy, 161 __rangeof(struct thread, td_startcopy, td_endcopy)); 162 newtd->td_proc = td->td_proc; 163 newtd->td_ucred = crhold(td->td_ucred); 164 165 cpu_set_upcall(newtd, td); 166 167 if (ctx != NULL) { /* old way to set user context */ 168 error = set_mcontext(newtd, ctx); 169 if (error != 0) { 170 thread_free(newtd); 171 crfree(td->td_ucred); 172 return (error); 173 } 174 } else { 175 /* Set up our machine context. */ 176 stack.ss_sp = stack_base; 177 stack.ss_size = stack_size; 178 /* Set upcall address to user thread entry function. */ 179 cpu_set_upcall_kse(newtd, start_func, arg, &stack); 180 /* Setup user TLS address and TLS pointer register. */ 181 error = cpu_set_user_tls(newtd, tls_base); 182 if (error != 0) { 183 thread_free(newtd); 184 crfree(td->td_ucred); 185 return (error); 186 } 187 } 188 189 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 190 /* Treat initial thread as it has PTHREAD_SCOPE_PROCESS. */ 191 p->p_procscopegrp = kg; 192 mtx_lock_spin(&sched_lock); 193 sched_set_concurrency(kg, 194 thr_concurrency ? thr_concurrency : (2*mp_ncpus)); 195 mtx_unlock_spin(&sched_lock); 196 } 197 198 linkkg = 0; 199 if (scope_sys) { 200 linkkg = 1; 201 newkg = ksegrp_alloc(); 202 bzero(&newkg->kg_startzero, 203 __rangeof(struct ksegrp, kg_startzero, kg_endzero)); 204 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 205 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy)); 206 sched_init_concurrency(newkg); 207 PROC_LOCK(td->td_proc); 208 } else { 209 /* 210 * Try to create a KSE group which will be shared 211 * by all PTHREAD_SCOPE_PROCESS threads. 212 */ 213 retry: 214 PROC_LOCK(td->td_proc); 215 if ((newkg = p->p_procscopegrp) == NULL) { 216 PROC_UNLOCK(p); 217 newkg = ksegrp_alloc(); 218 bzero(&newkg->kg_startzero, 219 __rangeof(struct ksegrp, kg_startzero, kg_endzero)); 220 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 221 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy)); 222 PROC_LOCK(p); 223 if (p->p_procscopegrp == NULL) { 224 p->p_procscopegrp = newkg; 225 sched_init_concurrency(newkg); 226 sched_set_concurrency(newkg, 227 thr_concurrency ? thr_concurrency : (2*mp_ncpus)); 228 linkkg = 1; 229 } else { 230 PROC_UNLOCK(p); 231 ksegrp_free(newkg); 232 goto retry; 233 } 234 } 235 } 236 237 td->td_proc->p_flag |= P_HADTHREADS; 238 newtd->td_sigmask = td->td_sigmask; 239 mtx_lock_spin(&sched_lock); 240 if (linkkg) 241 ksegrp_link(newkg, p); 242 thread_link(newtd, newkg); 243 PROC_UNLOCK(p); 244 245 /* let the scheduler know about these things. */ 246 if (linkkg) 247 sched_fork_ksegrp(td, newkg); 248 sched_fork_thread(td, newtd); 249 TD_SET_CAN_RUN(newtd); 250 /* if ((flags & THR_SUSPENDED) == 0) */ 251 setrunqueue(newtd, SRQ_BORING); 252 mtx_unlock_spin(&sched_lock); 253 254 return (error); 255 } 256 257 int 258 thr_self(struct thread *td, struct thr_self_args *uap) 259 /* long *id */ 260 { 261 long id; 262 int error; 263 264 id = td->td_tid; 265 if ((error = copyout(&id, uap->id, sizeof(long)))) 266 return (error); 267 268 return (0); 269 } 270 271 int 272 thr_exit(struct thread *td, struct thr_exit_args *uap) 273 /* long *state */ 274 { 275 struct proc *p; 276 277 p = td->td_proc; 278 279 /* Signal userland that it can free the stack. */ 280 if ((void *)uap->state != NULL) { 281 suword((void *)uap->state, 1); 282 kern_umtx_wake(td, uap->state, INT_MAX); 283 } 284 285 PROC_LOCK(p); 286 sigqueue_flush(&td->td_sigqueue); 287 mtx_lock_spin(&sched_lock); 288 289 /* 290 * Shutting down last thread in the proc. This will actually 291 * call exit() in the trampoline when it returns. 292 */ 293 if (p->p_numthreads != 1) { 294 thread_exit(); 295 /* NOTREACHED */ 296 } 297 mtx_unlock_spin(&sched_lock); 298 PROC_UNLOCK(p); 299 return (0); 300 } 301 302 int 303 thr_kill(struct thread *td, struct thr_kill_args *uap) 304 /* long id, int sig */ 305 { 306 struct thread *ttd; 307 struct proc *p; 308 int error; 309 310 p = td->td_proc; 311 error = 0; 312 PROC_LOCK(p); 313 ttd = thread_find(p, uap->id); 314 if (ttd == NULL) { 315 error = ESRCH; 316 goto out; 317 } 318 if (uap->sig == 0) 319 goto out; 320 if (!_SIG_VALID(uap->sig)) { 321 error = EINVAL; 322 goto out; 323 } 324 tdsignal(p, ttd, uap->sig, NULL); 325 out: 326 PROC_UNLOCK(p); 327 return (error); 328 } 329 330 int 331 thr_suspend(struct thread *td, struct thr_suspend_args *uap) 332 /* const struct timespec *timeout */ 333 { 334 struct timespec ts; 335 struct timeval tv; 336 int error; 337 int hz; 338 339 hz = 0; 340 error = 0; 341 if (uap->timeout != NULL) { 342 error = copyin((const void *)uap->timeout, (void *)&ts, 343 sizeof(struct timespec)); 344 if (error != 0) 345 return (error); 346 if (ts.tv_nsec < 0 || ts.tv_nsec > 1000000000) 347 return (EINVAL); 348 if (ts.tv_sec == 0 && ts.tv_nsec == 0) 349 return (ETIMEDOUT); 350 TIMESPEC_TO_TIMEVAL(&tv, &ts); 351 hz = tvtohz(&tv); 352 } 353 PROC_LOCK(td->td_proc); 354 if ((td->td_flags & TDF_THRWAKEUP) == 0) 355 error = msleep((void *)td, &td->td_proc->p_mtx, 356 td->td_priority | PCATCH, "lthr", hz); 357 if (td->td_flags & TDF_THRWAKEUP) { 358 mtx_lock_spin(&sched_lock); 359 td->td_flags &= ~TDF_THRWAKEUP; 360 mtx_unlock_spin(&sched_lock); 361 PROC_UNLOCK(td->td_proc); 362 return (0); 363 } 364 PROC_UNLOCK(td->td_proc); 365 if (error == EWOULDBLOCK) 366 error = ETIMEDOUT; 367 else if (error == ERESTART) { 368 if (hz != 0) 369 error = EINTR; 370 } 371 return (error); 372 } 373 374 int 375 thr_wake(struct thread *td, struct thr_wake_args *uap) 376 /* long id */ 377 { 378 struct proc *p; 379 struct thread *ttd; 380 381 p = td->td_proc; 382 PROC_LOCK(p); 383 ttd = thread_find(p, uap->id); 384 if (ttd == NULL) { 385 PROC_UNLOCK(p); 386 return (ESRCH); 387 } 388 mtx_lock_spin(&sched_lock); 389 ttd->td_flags |= TDF_THRWAKEUP; 390 mtx_unlock_spin(&sched_lock); 391 wakeup((void *)ttd); 392 PROC_UNLOCK(p); 393 return (0); 394 } 395