1 /*- 2 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kthread.h> 33 #include <sys/lock.h> 34 #include <sys/mutex.h> 35 #include <sys/proc.h> 36 #include <sys/resourcevar.h> 37 #include <sys/signalvar.h> 38 #include <sys/sx.h> 39 #include <sys/unistd.h> 40 #include <sys/wait.h> 41 #include <sys/sched.h> 42 #include <vm/vm.h> 43 #include <vm/vm_extern.h> 44 45 #include <machine/stdarg.h> 46 47 /* 48 * Start a kernel process. This is called after a fork() call in 49 * mi_startup() in the file kern/init_main.c. 50 * 51 * This function is used to start "internal" daemons and intended 52 * to be called from SYSINIT(). 53 */ 54 void 55 kproc_start(udata) 56 const void *udata; 57 { 58 const struct kproc_desc *kp = udata; 59 int error; 60 61 error = kproc_create((void (*)(void *))kp->func, NULL, 62 kp->global_procpp, 0, 0, "%s", kp->arg0); 63 if (error) 64 panic("kproc_start: %s: error %d", kp->arg0, error); 65 } 66 67 /* 68 * Create a kernel process/thread/whatever. It shares its address space 69 * with proc0 - ie: kernel only. 70 * 71 * func is the function to start. 72 * arg is the parameter to pass to function on first startup. 73 * newpp is the return value pointing to the thread's struct proc. 74 * flags are flags to fork1 (in unistd.h) 75 * fmt and following will be *printf'd into (*newpp)->p_comm (for ps, etc.). 76 */ 77 int 78 kproc_create(void (*func)(void *), void *arg, 79 struct proc **newpp, int flags, int pages, const char *fmt, ...) 80 { 81 int error; 82 va_list ap; 83 struct thread *td; 84 struct proc *p2; 85 86 if (!proc0.p_stats) 87 panic("kproc_create called too soon"); 88 89 error = fork1(&thread0, RFMEM | RFFDG | RFPROC | RFSTOPPED | flags, 90 pages, &p2); 91 if (error) 92 return error; 93 94 /* save a global descriptor, if desired */ 95 if (newpp != NULL) 96 *newpp = p2; 97 98 /* this is a non-swapped system process */ 99 PROC_LOCK(p2); 100 td = FIRST_THREAD_IN_PROC(p2); 101 p2->p_flag |= P_SYSTEM | P_KTHREAD; 102 td->td_pflags |= TDP_KTHREAD; 103 mtx_lock(&p2->p_sigacts->ps_mtx); 104 p2->p_sigacts->ps_flag |= PS_NOCLDWAIT; 105 mtx_unlock(&p2->p_sigacts->ps_mtx); 106 PROC_UNLOCK(p2); 107 108 /* set up arg0 for 'ps', et al */ 109 va_start(ap, fmt); 110 vsnprintf(p2->p_comm, sizeof(p2->p_comm), fmt, ap); 111 va_end(ap); 112 /* set up arg0 for 'ps', et al */ 113 va_start(ap, fmt); 114 vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap); 115 va_end(ap); 116 117 /* call the processes' main()... */ 118 cpu_set_fork_handler(td, func, arg); 119 TD_SET_CAN_RUN(td); 120 121 /* Delay putting it on the run queue until now. */ 122 if (!(flags & RFSTOPPED)) { 123 thread_lock(td); 124 sched_add(td, SRQ_BORING); 125 thread_unlock(td); 126 } 127 128 return 0; 129 } 130 131 void 132 kproc_exit(int ecode) 133 { 134 struct thread *td; 135 struct proc *p; 136 137 td = curthread; 138 p = td->td_proc; 139 140 /* 141 * Reparent curthread from proc0 to init so that the zombie 142 * is harvested. 143 */ 144 sx_xlock(&proctree_lock); 145 PROC_LOCK(p); 146 proc_reparent(p, initproc); 147 PROC_UNLOCK(p); 148 sx_xunlock(&proctree_lock); 149 150 /* 151 * Wakeup anyone waiting for us to exit. 152 */ 153 wakeup(p); 154 155 /* Buh-bye! */ 156 exit1(td, W_EXITCODE(ecode, 0)); 157 } 158 159 /* 160 * Advise a kernel process to suspend (or resume) in its main loop. 161 * Participation is voluntary. 162 */ 163 int 164 kproc_suspend(struct proc *p, int timo) 165 { 166 /* 167 * Make sure this is indeed a system process and we can safely 168 * use the p_siglist field. 169 */ 170 PROC_LOCK(p); 171 if ((p->p_flag & P_KTHREAD) == 0) { 172 PROC_UNLOCK(p); 173 return (EINVAL); 174 } 175 SIGADDSET(p->p_siglist, SIGSTOP); 176 wakeup(p); 177 return msleep(&p->p_siglist, &p->p_mtx, PPAUSE | PDROP, "suspkp", timo); 178 } 179 180 int 181 kproc_resume(struct proc *p) 182 { 183 /* 184 * Make sure this is indeed a system process and we can safely 185 * use the p_siglist field. 186 */ 187 PROC_LOCK(p); 188 if ((p->p_flag & P_KTHREAD) == 0) { 189 PROC_UNLOCK(p); 190 return (EINVAL); 191 } 192 SIGDELSET(p->p_siglist, SIGSTOP); 193 PROC_UNLOCK(p); 194 wakeup(&p->p_siglist); 195 return (0); 196 } 197 198 void 199 kproc_suspend_check(struct proc *p) 200 { 201 PROC_LOCK(p); 202 while (SIGISMEMBER(p->p_siglist, SIGSTOP)) { 203 wakeup(&p->p_siglist); 204 msleep(&p->p_siglist, &p->p_mtx, PPAUSE, "kpsusp", 0); 205 } 206 PROC_UNLOCK(p); 207 } 208 209 210 /* 211 * Start a kernel thread. 212 * 213 * This function is used to start "internal" daemons and intended 214 * to be called from SYSINIT(). 215 */ 216 217 void 218 kthread_start(udata) 219 const void *udata; 220 { 221 const struct kthread_desc *kp = udata; 222 int error; 223 224 error = kthread_add((void (*)(void *))kp->func, NULL, 225 NULL, kp->global_threadpp, 0, 0, "%s", kp->arg0); 226 if (error) 227 panic("kthread_start: %s: error %d", kp->arg0, error); 228 } 229 230 /* 231 * Create a kernel thread. It shares its address space 232 * with proc0 - ie: kernel only. 233 * 234 * func is the function to start. 235 * arg is the parameter to pass to function on first startup. 236 * newtdp is the return value pointing to the thread's struct thread. 237 * ** XXX fix this --> flags are flags to fork1 (in unistd.h) 238 * fmt and following will be *printf'd into (*newtd)->td_name (for ps, etc.). 239 */ 240 int 241 kthread_add(void (*func)(void *), void *arg, struct proc *p, 242 struct thread **newtdp, int flags, int pages, const char *fmt, ...) 243 { 244 va_list ap; 245 struct thread *newtd, *oldtd; 246 247 if (!proc0.p_stats) 248 panic("kthread_add called too soon"); 249 250 /* If no process supplied, put it on proc0 */ 251 if (p == NULL) { 252 p = &proc0; 253 oldtd = &thread0; 254 } else { 255 oldtd = FIRST_THREAD_IN_PROC(p); 256 } 257 258 /* Initialize our new td */ 259 newtd = thread_alloc(pages); 260 if (newtd == NULL) 261 return (ENOMEM); 262 263 bzero(&newtd->td_startzero, 264 __rangeof(struct thread, td_startzero, td_endzero)); 265 /* XXX check if we should zero. */ 266 bcopy(&oldtd->td_startcopy, &newtd->td_startcopy, 267 __rangeof(struct thread, td_startcopy, td_endcopy)); 268 269 /* set up arg0 for 'ps', et al */ 270 va_start(ap, fmt); 271 vsnprintf(newtd->td_name, sizeof(newtd->td_name), fmt, ap); 272 va_end(ap); 273 274 newtd->td_proc = p; /* needed for cpu_set_upcall */ 275 276 /* XXX optimise this probably? */ 277 /* On x86 (and probably the others too) it is way too full of junk */ 278 /* Needs a better name */ 279 cpu_set_upcall(newtd, oldtd); 280 /* put the designated function(arg) as the resume context */ 281 cpu_set_fork_handler(newtd, func, arg); 282 283 newtd->td_pflags |= TDP_KTHREAD; 284 newtd->td_ucred = crhold(p->p_ucred); 285 286 /* this code almost the same as create_thread() in kern_thr.c */ 287 PROC_LOCK(p); 288 p->p_flag |= P_HADTHREADS; 289 newtd->td_sigmask = oldtd->td_sigmask; /* XXX dubious */ 290 thread_link(newtd, p); 291 thread_lock(oldtd); 292 /* let the scheduler know about these things. */ 293 sched_fork_thread(oldtd, newtd); 294 TD_SET_CAN_RUN(newtd); 295 thread_unlock(oldtd); 296 PROC_UNLOCK(p); 297 298 299 /* Delay putting it on the run queue until now. */ 300 if (!(flags & RFSTOPPED)) { 301 thread_lock(newtd); 302 sched_add(newtd, SRQ_BORING); 303 thread_unlock(newtd); 304 } 305 if (newtdp) 306 *newtdp = newtd; 307 return 0; 308 } 309 310 void 311 kthread_exit(void) 312 { 313 struct proc *p; 314 315 /* A module may be waiting for us to exit. */ 316 wakeup(curthread); 317 318 /* 319 * We could rely on thread_exit to call exit1() but 320 * there is extra work that needs to be done 321 */ 322 if (curthread->td_proc->p_numthreads == 1) 323 kproc_exit(0); /* never returns */ 324 325 p = curthread->td_proc; 326 PROC_LOCK(p); 327 PROC_SLOCK(p); 328 thread_exit(); 329 } 330 331 /* 332 * Advise a kernel process to suspend (or resume) in its main loop. 333 * Participation is voluntary. 334 */ 335 int 336 kthread_suspend(struct thread *td, int timo) 337 { 338 struct proc *p; 339 340 p = td->td_proc; 341 342 /* 343 * td_pflags should not be ready by any other thread different by 344 * curthread, but as long as this flag is invariant during the 345 * thread lifetime, it is ok to check for it now. 346 */ 347 if ((td->td_pflags & TDP_KTHREAD) == 0) 348 return (EINVAL); 349 350 /* 351 * The caller of the primitive should have already checked that the 352 * thread is up and running, thus not being blocked by other 353 * conditions. 354 */ 355 PROC_LOCK(p); 356 thread_lock(td); 357 td->td_flags |= TDF_KTH_SUSP; 358 thread_unlock(td); 359 return (msleep(&td->td_flags, &p->p_mtx, PPAUSE | PDROP, "suspkt", 360 timo)); 361 } 362 363 /* 364 * Resume a thread previously put asleep with kthread_suspend(). 365 */ 366 int 367 kthread_resume(struct thread *td) 368 { 369 struct proc *p; 370 371 p = td->td_proc; 372 373 /* 374 * td_pflags should not be ready by any other thread different by 375 * curthread, but as long as this flag is invariant during the 376 * thread lifetime, it is ok to check for it now. 377 */ 378 if ((td->td_pflags & TDP_KTHREAD) == 0) 379 return (EINVAL); 380 381 PROC_LOCK(p); 382 thread_lock(td); 383 td->td_flags &= ~TDF_KTH_SUSP; 384 thread_unlock(td); 385 wakeup(&td->td_flags); 386 PROC_UNLOCK(p); 387 return (0); 388 } 389 390 /* 391 * Used by the thread to poll as to whether it should yield/sleep 392 * and notify the caller that is has happened. 393 */ 394 void 395 kthread_suspend_check() 396 { 397 struct proc *p; 398 struct thread *td; 399 400 td = curthread; 401 p = td->td_proc; 402 403 if ((td->td_pflags & TDP_KTHREAD) == 0) 404 panic("%s: curthread is not a valid kthread", __func__); 405 406 /* 407 * As long as the double-lock protection is used when accessing the 408 * TDF_KTH_SUSP flag, synchronizing the read operation via proc mutex 409 * is fine. 410 */ 411 PROC_LOCK(p); 412 while (td->td_flags & TDF_KTH_SUSP) { 413 wakeup(&td->td_flags); 414 msleep(&td->td_flags, &p->p_mtx, PPAUSE, "ktsusp", 0); 415 } 416 PROC_UNLOCK(p); 417 } 418 419 int 420 kproc_kthread_add(void (*func)(void *), void *arg, 421 struct proc **procptr, struct thread **tdptr, 422 int flags, int pages, char * procname, const char *fmt, ...) 423 { 424 int error; 425 va_list ap; 426 char buf[100]; 427 struct thread *td; 428 429 if (*procptr == 0) { 430 error = kproc_create(func, arg, 431 procptr, flags, pages, "%s", procname); 432 if (error) 433 return (error); 434 td = FIRST_THREAD_IN_PROC(*procptr); 435 if (tdptr) 436 *tdptr = td; 437 va_start(ap, fmt); 438 vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap); 439 va_end(ap); 440 return (0); 441 } 442 va_start(ap, fmt); 443 vsnprintf(buf, sizeof(buf), fmt, ap); 444 va_end(ap); 445 error = kthread_add(func, arg, *procptr, 446 tdptr, flags, pages, "%s", buf); 447 return (error); 448 } 449