1 /*- 2 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kthread.h> 33 #include <sys/lock.h> 34 #include <sys/mutex.h> 35 #include <sys/proc.h> 36 #include <sys/resourcevar.h> 37 #include <sys/rwlock.h> 38 #include <sys/signalvar.h> 39 #include <sys/sx.h> 40 #include <sys/unistd.h> 41 #include <sys/wait.h> 42 #include <sys/sched.h> 43 #include <vm/vm.h> 44 #include <vm/vm_extern.h> 45 46 #include <machine/stdarg.h> 47 48 /* 49 * Start a kernel process. This is called after a fork() call in 50 * mi_startup() in the file kern/init_main.c. 51 * 52 * This function is used to start "internal" daemons and intended 53 * to be called from SYSINIT(). 54 */ 55 void 56 kproc_start(udata) 57 const void *udata; 58 { 59 const struct kproc_desc *kp = udata; 60 int error; 61 62 error = kproc_create((void (*)(void *))kp->func, NULL, 63 kp->global_procpp, 0, 0, "%s", kp->arg0); 64 if (error) 65 panic("kproc_start: %s: error %d", kp->arg0, error); 66 } 67 68 /* 69 * Create a kernel process/thread/whatever. It shares its address space 70 * with proc0 - ie: kernel only. 71 * 72 * func is the function to start. 73 * arg is the parameter to pass to function on first startup. 74 * newpp is the return value pointing to the thread's struct proc. 75 * flags are flags to fork1 (in unistd.h) 76 * fmt and following will be *printf'd into (*newpp)->p_comm (for ps, etc.). 77 */ 78 int 79 kproc_create(void (*func)(void *), void *arg, 80 struct proc **newpp, int flags, int pages, const char *fmt, ...) 81 { 82 int error; 83 va_list ap; 84 struct thread *td; 85 struct proc *p2; 86 87 if (!proc0.p_stats) 88 panic("kproc_create called too soon"); 89 90 error = fork1(&thread0, RFMEM | RFFDG | RFPROC | RFSTOPPED | flags, 91 pages, &p2); 92 if (error) 93 return error; 94 95 /* save a global descriptor, if desired */ 96 if (newpp != NULL) 97 *newpp = p2; 98 99 /* this is a non-swapped system process */ 100 PROC_LOCK(p2); 101 td = FIRST_THREAD_IN_PROC(p2); 102 p2->p_flag |= P_SYSTEM | P_KTHREAD; 103 td->td_pflags |= TDP_KTHREAD; 104 mtx_lock(&p2->p_sigacts->ps_mtx); 105 p2->p_sigacts->ps_flag |= PS_NOCLDWAIT; 106 mtx_unlock(&p2->p_sigacts->ps_mtx); 107 PROC_UNLOCK(p2); 108 109 /* set up arg0 for 'ps', et al */ 110 va_start(ap, fmt); 111 vsnprintf(p2->p_comm, sizeof(p2->p_comm), fmt, ap); 112 va_end(ap); 113 /* set up arg0 for 'ps', et al */ 114 va_start(ap, fmt); 115 vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap); 116 va_end(ap); 117 118 /* call the processes' main()... */ 119 cpu_set_fork_handler(td, func, arg); 120 TD_SET_CAN_RUN(td); 121 122 /* Delay putting it on the run queue until now. */ 123 if (!(flags & RFSTOPPED)) { 124 thread_lock(td); 125 sched_add(td, SRQ_BORING); 126 thread_unlock(td); 127 } 128 129 return 0; 130 } 131 132 void 133 kproc_exit(int ecode) 134 { 135 struct thread *td; 136 struct proc *p; 137 138 td = curthread; 139 p = td->td_proc; 140 141 /* 142 * Reparent curthread from proc0 to init so that the zombie 143 * is harvested. 144 */ 145 sx_xlock(&proctree_lock); 146 PROC_LOCK(p); 147 proc_reparent(p, initproc); 148 PROC_UNLOCK(p); 149 sx_xunlock(&proctree_lock); 150 151 /* 152 * Wakeup anyone waiting for us to exit. 153 */ 154 wakeup(p); 155 156 /* Buh-bye! */ 157 exit1(td, W_EXITCODE(ecode, 0)); 158 } 159 160 /* 161 * Advise a kernel process to suspend (or resume) in its main loop. 162 * Participation is voluntary. 163 */ 164 int 165 kproc_suspend(struct proc *p, int timo) 166 { 167 /* 168 * Make sure this is indeed a system process and we can safely 169 * use the p_siglist field. 170 */ 171 PROC_LOCK(p); 172 if ((p->p_flag & P_KTHREAD) == 0) { 173 PROC_UNLOCK(p); 174 return (EINVAL); 175 } 176 SIGADDSET(p->p_siglist, SIGSTOP); 177 wakeup(p); 178 return msleep(&p->p_siglist, &p->p_mtx, PPAUSE | PDROP, "suspkp", timo); 179 } 180 181 int 182 kproc_resume(struct proc *p) 183 { 184 /* 185 * Make sure this is indeed a system process and we can safely 186 * use the p_siglist field. 187 */ 188 PROC_LOCK(p); 189 if ((p->p_flag & P_KTHREAD) == 0) { 190 PROC_UNLOCK(p); 191 return (EINVAL); 192 } 193 SIGDELSET(p->p_siglist, SIGSTOP); 194 PROC_UNLOCK(p); 195 wakeup(&p->p_siglist); 196 return (0); 197 } 198 199 void 200 kproc_suspend_check(struct proc *p) 201 { 202 PROC_LOCK(p); 203 while (SIGISMEMBER(p->p_siglist, SIGSTOP)) { 204 wakeup(&p->p_siglist); 205 msleep(&p->p_siglist, &p->p_mtx, PPAUSE, "kpsusp", 0); 206 } 207 PROC_UNLOCK(p); 208 } 209 210 211 /* 212 * Start a kernel thread. 213 * 214 * This function is used to start "internal" daemons and intended 215 * to be called from SYSINIT(). 216 */ 217 218 void 219 kthread_start(udata) 220 const void *udata; 221 { 222 const struct kthread_desc *kp = udata; 223 int error; 224 225 error = kthread_add((void (*)(void *))kp->func, NULL, 226 NULL, kp->global_threadpp, 0, 0, "%s", kp->arg0); 227 if (error) 228 panic("kthread_start: %s: error %d", kp->arg0, error); 229 } 230 231 /* 232 * Create a kernel thread. It shares its address space 233 * with proc0 - ie: kernel only. 234 * 235 * func is the function to start. 236 * arg is the parameter to pass to function on first startup. 237 * newtdp is the return value pointing to the thread's struct thread. 238 * ** XXX fix this --> flags are flags to fork1 (in unistd.h) 239 * fmt and following will be *printf'd into (*newtd)->td_name (for ps, etc.). 240 */ 241 int 242 kthread_add(void (*func)(void *), void *arg, struct proc *p, 243 struct thread **newtdp, int flags, int pages, const char *fmt, ...) 244 { 245 va_list ap; 246 struct thread *newtd, *oldtd; 247 248 if (!proc0.p_stats) 249 panic("kthread_add called too soon"); 250 251 /* If no process supplied, put it on proc0 */ 252 if (p == NULL) { 253 p = &proc0; 254 oldtd = &thread0; 255 } else { 256 oldtd = FIRST_THREAD_IN_PROC(p); 257 } 258 259 /* Initialize our new td */ 260 newtd = thread_alloc(pages); 261 if (newtd == NULL) 262 return (ENOMEM); 263 264 bzero(&newtd->td_startzero, 265 __rangeof(struct thread, td_startzero, td_endzero)); 266 /* XXX check if we should zero. */ 267 bcopy(&oldtd->td_startcopy, &newtd->td_startcopy, 268 __rangeof(struct thread, td_startcopy, td_endcopy)); 269 270 /* set up arg0 for 'ps', et al */ 271 va_start(ap, fmt); 272 vsnprintf(newtd->td_name, sizeof(newtd->td_name), fmt, ap); 273 va_end(ap); 274 275 newtd->td_proc = p; /* needed for cpu_set_upcall */ 276 277 /* XXX optimise this probably? */ 278 /* On x86 (and probably the others too) it is way too full of junk */ 279 /* Needs a better name */ 280 cpu_set_upcall(newtd, oldtd); 281 /* put the designated function(arg) as the resume context */ 282 cpu_set_fork_handler(newtd, func, arg); 283 284 newtd->td_pflags |= TDP_KTHREAD; 285 newtd->td_ucred = crhold(p->p_ucred); 286 287 /* this code almost the same as create_thread() in kern_thr.c */ 288 PROC_LOCK(p); 289 p->p_flag |= P_HADTHREADS; 290 newtd->td_sigmask = oldtd->td_sigmask; /* XXX dubious */ 291 thread_link(newtd, p); 292 thread_lock(oldtd); 293 /* let the scheduler know about these things. */ 294 sched_fork_thread(oldtd, newtd); 295 TD_SET_CAN_RUN(newtd); 296 thread_unlock(oldtd); 297 PROC_UNLOCK(p); 298 299 tidhash_add(newtd); 300 301 /* Delay putting it on the run queue until now. */ 302 if (!(flags & RFSTOPPED)) { 303 thread_lock(newtd); 304 sched_add(newtd, SRQ_BORING); 305 thread_unlock(newtd); 306 } 307 if (newtdp) 308 *newtdp = newtd; 309 return 0; 310 } 311 312 void 313 kthread_exit(void) 314 { 315 struct proc *p; 316 317 p = curthread->td_proc; 318 319 320 /* A module may be waiting for us to exit. */ 321 wakeup(curthread); 322 rw_wlock(&tidhash_lock); 323 PROC_LOCK(p); 324 if (p->p_numthreads == 1) { 325 PROC_UNLOCK(p); 326 rw_wunlock(&tidhash_lock); 327 kproc_exit(0); 328 329 /* NOTREACHED. */ 330 } 331 LIST_REMOVE(curthread, td_hash); 332 rw_wunlock(&tidhash_lock); 333 PROC_SLOCK(p); 334 thread_exit(); 335 } 336 337 /* 338 * Advise a kernel process to suspend (or resume) in its main loop. 339 * Participation is voluntary. 340 */ 341 int 342 kthread_suspend(struct thread *td, int timo) 343 { 344 struct proc *p; 345 346 p = td->td_proc; 347 348 /* 349 * td_pflags should not be read by any thread other than 350 * curthread, but as long as this flag is invariant during the 351 * thread's lifetime, it is OK to check its state. 352 */ 353 if ((td->td_pflags & TDP_KTHREAD) == 0) 354 return (EINVAL); 355 356 /* 357 * The caller of the primitive should have already checked that the 358 * thread is up and running, thus not being blocked by other 359 * conditions. 360 */ 361 PROC_LOCK(p); 362 thread_lock(td); 363 td->td_flags |= TDF_KTH_SUSP; 364 thread_unlock(td); 365 return (msleep(&td->td_flags, &p->p_mtx, PPAUSE | PDROP, "suspkt", 366 timo)); 367 } 368 369 /* 370 * Resume a thread previously put asleep with kthread_suspend(). 371 */ 372 int 373 kthread_resume(struct thread *td) 374 { 375 struct proc *p; 376 377 p = td->td_proc; 378 379 /* 380 * td_pflags should not be read by any thread other than 381 * curthread, but as long as this flag is invariant during the 382 * thread's lifetime, it is OK to check its state. 383 */ 384 if ((td->td_pflags & TDP_KTHREAD) == 0) 385 return (EINVAL); 386 387 PROC_LOCK(p); 388 thread_lock(td); 389 td->td_flags &= ~TDF_KTH_SUSP; 390 thread_unlock(td); 391 wakeup(&td->td_flags); 392 PROC_UNLOCK(p); 393 return (0); 394 } 395 396 /* 397 * Used by the thread to poll as to whether it should yield/sleep 398 * and notify the caller that is has happened. 399 */ 400 void 401 kthread_suspend_check() 402 { 403 struct proc *p; 404 struct thread *td; 405 406 td = curthread; 407 p = td->td_proc; 408 409 if ((td->td_pflags & TDP_KTHREAD) == 0) 410 panic("%s: curthread is not a valid kthread", __func__); 411 412 /* 413 * As long as the double-lock protection is used when accessing the 414 * TDF_KTH_SUSP flag, synchronizing the read operation via proc mutex 415 * is fine. 416 */ 417 PROC_LOCK(p); 418 while (td->td_flags & TDF_KTH_SUSP) { 419 wakeup(&td->td_flags); 420 msleep(&td->td_flags, &p->p_mtx, PPAUSE, "ktsusp", 0); 421 } 422 PROC_UNLOCK(p); 423 } 424 425 int 426 kproc_kthread_add(void (*func)(void *), void *arg, 427 struct proc **procptr, struct thread **tdptr, 428 int flags, int pages, const char *procname, const char *fmt, ...) 429 { 430 int error; 431 va_list ap; 432 char buf[100]; 433 struct thread *td; 434 435 if (*procptr == 0) { 436 error = kproc_create(func, arg, 437 procptr, flags, pages, "%s", procname); 438 if (error) 439 return (error); 440 td = FIRST_THREAD_IN_PROC(*procptr); 441 if (tdptr) 442 *tdptr = td; 443 va_start(ap, fmt); 444 vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap); 445 va_end(ap); 446 return (0); 447 } 448 va_start(ap, fmt); 449 vsnprintf(buf, sizeof(buf), fmt, ap); 450 va_end(ap); 451 error = kthread_add(func, arg, *procptr, 452 tdptr, flags, pages, "%s", buf); 453 return (error); 454 } 455