1 /*- 2 * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/lock.h> 33 #include <sys/mutex.h> 34 #include <sys/proc.h> 35 #include <sys/resourcevar.h> 36 #include <sys/sched.h> 37 #include <sys/sysctl.h> 38 #include <sys/smp.h> 39 #include <sys/sysent.h> 40 #include <sys/systm.h> 41 #include <sys/sysproto.h> 42 #include <sys/signalvar.h> 43 #include <sys/ucontext.h> 44 #include <sys/thr.h> 45 46 #include <machine/frame.h> 47 48 extern int max_threads_per_proc; 49 extern int max_groups_per_proc; 50 51 SYSCTL_DECL(_kern_threads); 52 static int thr_scope_sys = 0; 53 SYSCTL_INT(_kern_threads, OID_AUTO, thr_scope_sys, CTLFLAG_RW, 54 &thr_scope_sys, 0, "sys or proc scope scheduling"); 55 56 static int thr_concurrency = 0; 57 SYSCTL_INT(_kern_threads, OID_AUTO, thr_concurrency, CTLFLAG_RW, 58 &thr_concurrency, 0, "a concurrency value if not default"); 59 60 /* 61 * System call interface. 62 */ 63 int 64 thr_create(struct thread *td, struct thr_create_args *uap) 65 /* ucontext_t *ctx, long *id, int flags */ 66 { 67 struct thread *newtd; 68 ucontext_t ctx; 69 long id; 70 int error; 71 struct ksegrp *kg, *newkg; 72 struct proc *p; 73 int scope_sys; 74 75 p = td->td_proc; 76 kg = td->td_ksegrp; 77 if ((error = copyin(uap->ctx, &ctx, sizeof(ctx)))) 78 return (error); 79 80 /* Have race condition but it is cheap */ 81 if ((p->p_numksegrps >= max_groups_per_proc) || 82 (p->p_numthreads >= max_threads_per_proc)) { 83 return (EPROCLIM); 84 } 85 86 scope_sys = thr_scope_sys; 87 /* Initialize our td and new ksegrp.. */ 88 newtd = thread_alloc(); 89 if (scope_sys) 90 newkg = ksegrp_alloc(); 91 else 92 newkg = kg; 93 /* 94 * Try the copyout as soon as we allocate the td so we don't have to 95 * tear things down in a failure case below. 96 */ 97 id = newtd->td_tid; 98 if ((error = copyout(&id, uap->id, sizeof(long)))) { 99 if (scope_sys) 100 ksegrp_free(newkg); 101 thread_free(newtd); 102 return (error); 103 } 104 105 bzero(&newtd->td_startzero, 106 __rangeof(struct thread, td_startzero, td_endzero)); 107 bcopy(&td->td_startcopy, &newtd->td_startcopy, 108 __rangeof(struct thread, td_startcopy, td_endcopy)); 109 110 if (scope_sys) { 111 bzero(&newkg->kg_startzero, 112 __rangeof(struct ksegrp, kg_startzero, kg_endzero)); 113 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 114 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy)); 115 } 116 117 newtd->td_proc = td->td_proc; 118 newtd->td_ucred = crhold(td->td_ucred); 119 120 /* Set up our machine context. */ 121 cpu_set_upcall(newtd, td); 122 error = set_mcontext(newtd, &ctx.uc_mcontext); 123 if (error != 0) { 124 if (scope_sys) 125 ksegrp_free(newkg); 126 thread_free(newtd); 127 crfree(td->td_ucred); 128 goto out; 129 } 130 131 /* Link the thread and kse into the ksegrp and make it runnable. */ 132 PROC_LOCK(td->td_proc); 133 if (scope_sys) { 134 sched_init_concurrency(newkg); 135 } else { 136 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 137 sched_set_concurrency(kg, 138 thr_concurrency ? thr_concurrency : (2*mp_ncpus)); 139 } 140 } 141 142 td->td_proc->p_flag |= P_HADTHREADS; 143 newtd->td_sigmask = td->td_sigmask; 144 mtx_lock_spin(&sched_lock); 145 if (scope_sys) 146 ksegrp_link(newkg, p); 147 thread_link(newtd, newkg); 148 mtx_unlock_spin(&sched_lock); 149 PROC_UNLOCK(p); 150 151 /* let the scheduler know about these things. */ 152 mtx_lock_spin(&sched_lock); 153 if (scope_sys) 154 sched_fork_ksegrp(td, newkg); 155 sched_fork_thread(td, newtd); 156 157 TD_SET_CAN_RUN(newtd); 158 if ((uap->flags & THR_SUSPENDED) == 0) 159 setrunqueue(newtd, SRQ_BORING); 160 161 mtx_unlock_spin(&sched_lock); 162 163 out: 164 return (error); 165 } 166 167 int 168 thr_self(struct thread *td, struct thr_self_args *uap) 169 /* long *id */ 170 { 171 long id; 172 int error; 173 174 id = td->td_tid; 175 if ((error = copyout(&id, uap->id, sizeof(long)))) 176 return (error); 177 178 return (0); 179 } 180 181 int 182 thr_exit(struct thread *td, struct thr_exit_args *uap) 183 /* long *state */ 184 { 185 struct proc *p; 186 187 p = td->td_proc; 188 189 /* Signal userland that it can free the stack. */ 190 if ((void *)uap->state != NULL) 191 suword((void *)uap->state, 1); 192 193 PROC_LOCK(p); 194 mtx_lock_spin(&sched_lock); 195 196 /* 197 * Shutting down last thread in the proc. This will actually 198 * call exit() in the trampoline when it returns. 199 */ 200 if (p->p_numthreads != 1) { 201 thread_exit(); 202 /* NOTREACHED */ 203 } 204 mtx_unlock_spin(&sched_lock); 205 PROC_UNLOCK(p); 206 return (0); 207 } 208 209 int 210 thr_kill(struct thread *td, struct thr_kill_args *uap) 211 /* long id, int sig */ 212 { 213 struct thread *ttd; 214 struct proc *p; 215 int error; 216 217 p = td->td_proc; 218 error = 0; 219 PROC_LOCK(p); 220 FOREACH_THREAD_IN_PROC(p, ttd) { 221 if (ttd->td_tid == uap->id) 222 break; 223 } 224 if (ttd == NULL) { 225 error = ESRCH; 226 goto out; 227 } 228 if (uap->sig == 0) 229 goto out; 230 if (!_SIG_VALID(uap->sig)) { 231 error = EINVAL; 232 goto out; 233 } 234 tdsignal(ttd, uap->sig, SIGTARGET_TD); 235 out: 236 PROC_UNLOCK(p); 237 return (error); 238 } 239 240 int 241 thr_suspend(struct thread *td, struct thr_suspend_args *uap) 242 /* const struct timespec *timeout */ 243 { 244 struct timespec ts; 245 struct timeval tv; 246 int error; 247 int hz; 248 249 hz = 0; 250 error = 0; 251 if (uap->timeout != NULL) { 252 error = copyin((const void *)uap->timeout, (void *)&ts, 253 sizeof(struct timespec)); 254 if (error != 0) 255 return (error); 256 if (ts.tv_nsec < 0 || ts.tv_nsec > 1000000000) 257 return (EINVAL); 258 if (ts.tv_sec == 0 && ts.tv_nsec == 0) 259 return (ETIMEDOUT); 260 TIMESPEC_TO_TIMEVAL(&tv, &ts); 261 hz = tvtohz(&tv); 262 } 263 PROC_LOCK(td->td_proc); 264 if ((td->td_flags & TDF_THRWAKEUP) == 0) 265 error = msleep((void *)td, &td->td_proc->p_mtx, 266 td->td_priority | PCATCH, "lthr", hz); 267 if (td->td_flags & TDF_THRWAKEUP) { 268 mtx_lock_spin(&sched_lock); 269 td->td_flags &= ~TDF_THRWAKEUP; 270 mtx_unlock_spin(&sched_lock); 271 PROC_UNLOCK(td->td_proc); 272 return (0); 273 } 274 PROC_UNLOCK(td->td_proc); 275 if (error == EWOULDBLOCK) 276 error = ETIMEDOUT; 277 else if (error == ERESTART) { 278 if (hz != 0) 279 error = EINTR; 280 } 281 return (error); 282 } 283 284 int 285 thr_wake(struct thread *td, struct thr_wake_args *uap) 286 /* long id */ 287 { 288 struct thread *ttd; 289 290 PROC_LOCK(td->td_proc); 291 FOREACH_THREAD_IN_PROC(td->td_proc, ttd) { 292 if (ttd->td_tid == uap->id) 293 break; 294 } 295 if (ttd == NULL) { 296 PROC_UNLOCK(td->td_proc); 297 return (ESRCH); 298 } 299 mtx_lock_spin(&sched_lock); 300 ttd->td_flags |= TDF_THRWAKEUP; 301 mtx_unlock_spin(&sched_lock); 302 wakeup((void *)ttd); 303 PROC_UNLOCK(td->td_proc); 304 return (0); 305 } 306