1 /* 2 * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 * 28 */ 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/lock.h> 33 #include <sys/mutex.h> 34 #include <sys/proc.h> 35 #include <sys/resourcevar.h> 36 #include <sys/sched.h> 37 #include <sys/sysent.h> 38 #include <sys/systm.h> 39 #include <sys/sysproto.h> 40 #include <sys/signalvar.h> 41 #include <sys/ucontext.h> 42 #include <sys/thr.h> 43 44 #include <machine/frame.h> 45 46 /* 47 * Back end support functions. 48 */ 49 50 void 51 thr_exit1(void) 52 { 53 struct ksegrp *kg; 54 struct thread *td; 55 struct kse *ke; 56 struct proc *p; 57 58 td = curthread; 59 p = td->td_proc; 60 kg = td->td_ksegrp; 61 ke = td->td_kse; 62 63 mtx_assert(&sched_lock, MA_OWNED); 64 PROC_LOCK_ASSERT(p, MA_OWNED); 65 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 66 67 /* 68 * Shutting down last thread in the proc. This will actually 69 * call exit() in the trampoline when it returns. 70 */ 71 if (p->p_numthreads == 1) { 72 PROC_UNLOCK(p); 73 return; 74 } 75 76 /* 77 * XXX Undelivered process wide signals should be reposted to the 78 * proc. 79 */ 80 81 /* Clean up cpu resources. */ 82 cpu_thread_exit(td); 83 84 /* XXX make thread_unlink() */ 85 TAILQ_REMOVE(&p->p_threads, td, td_plist); 86 p->p_numthreads--; 87 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 88 kg->kg_numthreads--; 89 90 ke->ke_state = KES_UNQUEUED; 91 ke->ke_thread = NULL; 92 kse_unlink(ke); 93 sched_exit_kse(TAILQ_NEXT(ke, ke_kglist), ke); 94 95 /* 96 * If we were stopped while waiting for all threads to exit and this 97 * is the last thread wakeup the exiting thread. 98 */ 99 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) 100 if (p->p_numthreads == 1) 101 thread_unsuspend_one(p->p_singlethread); 102 103 PROC_UNLOCK(p); 104 td->td_kse = NULL; 105 td->td_state = TDS_INACTIVE; 106 #if 0 107 td->td_proc = NULL; 108 #endif 109 td->td_ksegrp = NULL; 110 td->td_last_kse = NULL; 111 sched_exit_thread(TAILQ_NEXT(td, td_kglist), td); 112 thread_stash(td); 113 114 #if !defined(__alpha__) && !defined(__powerpc__) 115 cpu_throw(td, choosethread()); 116 #else 117 cpu_throw(); 118 #endif 119 } 120 121 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 122 123 /* 124 * System call interface. 125 */ 126 int 127 thr_create(struct thread *td, struct thr_create_args *uap) 128 /* ucontext_t *ctx, thr_id_t *id, int flags */ 129 { 130 struct kse *ke0; 131 struct thread *td0; 132 ucontext_t ctx; 133 int error; 134 135 if ((error = copyin(uap->ctx, &ctx, sizeof(ctx)))) 136 return (error); 137 138 /* Initialize our td. */ 139 td0 = thread_alloc(); 140 141 /* 142 * Try the copyout as soon as we allocate the td so we don't have to 143 * tear things down in a failure case below. 144 */ 145 if ((error = copyout(&td0, uap->id, sizeof(thr_id_t)))) { 146 thread_free(td0); 147 return (error); 148 } 149 150 bzero(&td0->td_startzero, 151 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 152 bcopy(&td->td_startcopy, &td0->td_startcopy, 153 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 154 155 td0->td_proc = td->td_proc; 156 PROC_LOCK(td->td_proc); 157 td0->td_sigmask = td->td_sigmask; 158 PROC_UNLOCK(td->td_proc); 159 td0->td_ucred = crhold(td->td_ucred); 160 161 /* Initialize our kse structure. */ 162 ke0 = kse_alloc(); 163 bzero(&ke0->ke_startzero, 164 RANGEOF(struct kse, ke_startzero, ke_endzero)); 165 166 /* Set up our machine context. */ 167 cpu_set_upcall(td0, td); 168 error = set_mcontext(td0, &ctx.uc_mcontext); 169 if (error != 0) { 170 kse_free(ke0); 171 thread_free(td0); 172 goto out; 173 } 174 175 /* Link the thread and kse into the ksegrp and make it runnable. */ 176 mtx_lock_spin(&sched_lock); 177 178 thread_link(td0, td->td_ksegrp); 179 kse_link(ke0, td->td_ksegrp); 180 181 /* Bind this thread and kse together. */ 182 td0->td_kse = ke0; 183 ke0->ke_thread = td0; 184 185 sched_fork_kse(td->td_kse, ke0); 186 sched_fork_thread(td, td0); 187 188 TD_SET_CAN_RUN(td0); 189 if ((uap->flags & THR_SUSPENDED) == 0) 190 setrunqueue(td0); 191 192 mtx_unlock_spin(&sched_lock); 193 194 out: 195 return (error); 196 } 197 198 int 199 thr_self(struct thread *td, struct thr_self_args *uap) 200 /* thr_id_t *id */ 201 { 202 int error; 203 204 if ((error = copyout(&td, uap->id, sizeof(thr_id_t)))) 205 return (error); 206 207 return (0); 208 } 209 210 int 211 thr_exit(struct thread *td, struct thr_exit_args *uap) 212 /* NULL */ 213 { 214 struct proc *p; 215 216 p = td->td_proc; 217 218 PROC_LOCK(p); 219 mtx_lock_spin(&sched_lock); 220 221 /* 222 * This unlocks proc and doesn't return unless this is the last 223 * thread. 224 */ 225 thr_exit1(); 226 mtx_unlock_spin(&sched_lock); 227 228 return (0); 229 } 230 231 int 232 thr_kill(struct thread *td, struct thr_kill_args *uap) 233 /* thr_id_t id, int sig */ 234 { 235 struct thread *ttd; 236 struct proc *p; 237 int error; 238 239 p = td->td_proc; 240 error = 0; 241 242 PROC_LOCK(p); 243 244 FOREACH_THREAD_IN_PROC(p, ttd) 245 if (ttd == uap->id) 246 break; 247 248 if (ttd == NULL) { 249 error = ESRCH; 250 goto out; 251 } 252 253 if (uap->sig == 0) 254 goto out; 255 256 if (!_SIG_VALID(uap->sig)) { 257 error = EINVAL; 258 goto out; 259 } 260 261 /* 262 * We need a way to force this to go into this thread's siglist. 263 * Until then blocked signals will go to the proc. 264 */ 265 tdsignal(ttd, uap->sig); 266 out: 267 PROC_UNLOCK(p); 268 269 return (error); 270 } 271