1 /*- 2 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/cpuset.h> 33 #include <sys/kthread.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/proc.h> 37 #include <sys/resourcevar.h> 38 #include <sys/rwlock.h> 39 #include <sys/signalvar.h> 40 #include <sys/sx.h> 41 #include <sys/umtx.h> 42 #include <sys/unistd.h> 43 #include <sys/wait.h> 44 #include <sys/sched.h> 45 #include <vm/vm.h> 46 #include <vm/vm_extern.h> 47 48 #include <machine/stdarg.h> 49 50 /* 51 * Start a kernel process. This is called after a fork() call in 52 * mi_startup() in the file kern/init_main.c. 53 * 54 * This function is used to start "internal" daemons and intended 55 * to be called from SYSINIT(). 56 */ 57 void 58 kproc_start(const void *udata) 59 { 60 const struct kproc_desc *kp = udata; 61 int error; 62 63 error = kproc_create((void (*)(void *))kp->func, NULL, 64 kp->global_procpp, 0, 0, "%s", kp->arg0); 65 if (error) 66 panic("kproc_start: %s: error %d", kp->arg0, error); 67 } 68 69 /* 70 * Create a kernel process/thread/whatever. It shares its address space 71 * with proc0 - ie: kernel only. 72 * 73 * func is the function to start. 74 * arg is the parameter to pass to function on first startup. 75 * newpp is the return value pointing to the thread's struct proc. 76 * flags are flags to fork1 (in unistd.h) 77 * fmt and following will be *printf'd into (*newpp)->p_comm (for ps, etc.). 78 */ 79 int 80 kproc_create(void (*func)(void *), void *arg, 81 struct proc **newpp, int flags, int pages, const char *fmt, ...) 82 { 83 struct fork_req fr; 84 int error; 85 va_list ap; 86 struct thread *td; 87 struct proc *p2; 88 89 if (!proc0.p_stats) 90 panic("kproc_create called too soon"); 91 92 bzero(&fr, sizeof(fr)); 93 fr.fr_flags = RFMEM | RFFDG | RFPROC | RFSTOPPED | flags; 94 fr.fr_pages = pages; 95 fr.fr_procp = &p2; 96 error = fork1(&thread0, &fr); 97 if (error) 98 return error; 99 100 /* save a global descriptor, if desired */ 101 if (newpp != NULL) 102 *newpp = p2; 103 104 /* this is a non-swapped system process */ 105 PROC_LOCK(p2); 106 td = FIRST_THREAD_IN_PROC(p2); 107 p2->p_flag |= P_SYSTEM | P_KPROC; 108 td->td_pflags |= TDP_KTHREAD; 109 mtx_lock(&p2->p_sigacts->ps_mtx); 110 p2->p_sigacts->ps_flag |= PS_NOCLDWAIT; 111 mtx_unlock(&p2->p_sigacts->ps_mtx); 112 PROC_UNLOCK(p2); 113 114 /* set up arg0 for 'ps', et al */ 115 va_start(ap, fmt); 116 vsnprintf(p2->p_comm, sizeof(p2->p_comm), fmt, ap); 117 va_end(ap); 118 /* set up arg0 for 'ps', et al */ 119 va_start(ap, fmt); 120 vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap); 121 va_end(ap); 122 #ifdef KTR 123 sched_clear_tdname(td); 124 #endif 125 126 /* call the processes' main()... */ 127 cpu_fork_kthread_handler(td, func, arg); 128 129 /* Avoid inheriting affinity from a random parent. */ 130 cpuset_setthread(td->td_tid, cpuset_root); 131 thread_lock(td); 132 TD_SET_CAN_RUN(td); 133 sched_prio(td, PVM); 134 sched_user_prio(td, PUSER); 135 136 /* Delay putting it on the run queue until now. */ 137 if (!(flags & RFSTOPPED)) 138 sched_add(td, SRQ_BORING); 139 thread_unlock(td); 140 141 return 0; 142 } 143 144 void 145 kproc_exit(int ecode) 146 { 147 struct thread *td; 148 struct proc *p; 149 150 td = curthread; 151 p = td->td_proc; 152 153 /* 154 * Reparent curthread from proc0 to init so that the zombie 155 * is harvested. 156 */ 157 sx_xlock(&proctree_lock); 158 PROC_LOCK(p); 159 proc_reparent(p, initproc); 160 PROC_UNLOCK(p); 161 sx_xunlock(&proctree_lock); 162 163 /* 164 * Wakeup anyone waiting for us to exit. 165 */ 166 wakeup(p); 167 168 /* Buh-bye! */ 169 exit1(td, ecode, 0); 170 } 171 172 /* 173 * Advise a kernel process to suspend (or resume) in its main loop. 174 * Participation is voluntary. 175 */ 176 int 177 kproc_suspend(struct proc *p, int timo) 178 { 179 /* 180 * Make sure this is indeed a system process and we can safely 181 * use the p_siglist field. 182 */ 183 PROC_LOCK(p); 184 if ((p->p_flag & P_KPROC) == 0) { 185 PROC_UNLOCK(p); 186 return (EINVAL); 187 } 188 SIGADDSET(p->p_siglist, SIGSTOP); 189 wakeup(p); 190 return msleep(&p->p_siglist, &p->p_mtx, PPAUSE | PDROP, "suspkp", timo); 191 } 192 193 int 194 kproc_resume(struct proc *p) 195 { 196 /* 197 * Make sure this is indeed a system process and we can safely 198 * use the p_siglist field. 199 */ 200 PROC_LOCK(p); 201 if ((p->p_flag & P_KPROC) == 0) { 202 PROC_UNLOCK(p); 203 return (EINVAL); 204 } 205 SIGDELSET(p->p_siglist, SIGSTOP); 206 PROC_UNLOCK(p); 207 wakeup(&p->p_siglist); 208 return (0); 209 } 210 211 void 212 kproc_suspend_check(struct proc *p) 213 { 214 PROC_LOCK(p); 215 while (SIGISMEMBER(p->p_siglist, SIGSTOP)) { 216 wakeup(&p->p_siglist); 217 msleep(&p->p_siglist, &p->p_mtx, PPAUSE, "kpsusp", 0); 218 } 219 PROC_UNLOCK(p); 220 } 221 222 223 /* 224 * Start a kernel thread. 225 * 226 * This function is used to start "internal" daemons and intended 227 * to be called from SYSINIT(). 228 */ 229 230 void 231 kthread_start(const void *udata) 232 { 233 const struct kthread_desc *kp = udata; 234 int error; 235 236 error = kthread_add((void (*)(void *))kp->func, NULL, 237 NULL, kp->global_threadpp, 0, 0, "%s", kp->arg0); 238 if (error) 239 panic("kthread_start: %s: error %d", kp->arg0, error); 240 } 241 242 /* 243 * Create a kernel thread. It shares its address space 244 * with proc0 - ie: kernel only. 245 * 246 * func is the function to start. 247 * arg is the parameter to pass to function on first startup. 248 * newtdp is the return value pointing to the thread's struct thread. 249 * ** XXX fix this --> flags are flags to fork1 (in unistd.h) 250 * fmt and following will be *printf'd into (*newtd)->td_name (for ps, etc.). 251 */ 252 int 253 kthread_add(void (*func)(void *), void *arg, struct proc *p, 254 struct thread **newtdp, int flags, int pages, const char *fmt, ...) 255 { 256 va_list ap; 257 struct thread *newtd, *oldtd; 258 259 if (!proc0.p_stats) 260 panic("kthread_add called too soon"); 261 262 /* If no process supplied, put it on proc0 */ 263 if (p == NULL) 264 p = &proc0; 265 266 /* Initialize our new td */ 267 newtd = thread_alloc(pages); 268 if (newtd == NULL) 269 return (ENOMEM); 270 271 PROC_LOCK(p); 272 oldtd = FIRST_THREAD_IN_PROC(p); 273 274 bzero(&newtd->td_startzero, 275 __rangeof(struct thread, td_startzero, td_endzero)); 276 bcopy(&oldtd->td_startcopy, &newtd->td_startcopy, 277 __rangeof(struct thread, td_startcopy, td_endcopy)); 278 279 /* set up arg0 for 'ps', et al */ 280 va_start(ap, fmt); 281 vsnprintf(newtd->td_name, sizeof(newtd->td_name), fmt, ap); 282 va_end(ap); 283 284 newtd->td_proc = p; /* needed for cpu_copy_thread */ 285 /* might be further optimized for kthread */ 286 cpu_copy_thread(newtd, oldtd); 287 /* put the designated function(arg) as the resume context */ 288 cpu_fork_kthread_handler(newtd, func, arg); 289 290 newtd->td_pflags |= TDP_KTHREAD; 291 thread_cow_get_proc(newtd, p); 292 293 /* this code almost the same as create_thread() in kern_thr.c */ 294 p->p_flag |= P_HADTHREADS; 295 thread_link(newtd, p); 296 thread_lock(oldtd); 297 /* let the scheduler know about these things. */ 298 sched_fork_thread(oldtd, newtd); 299 TD_SET_CAN_RUN(newtd); 300 thread_unlock(oldtd); 301 PROC_UNLOCK(p); 302 303 tidhash_add(newtd); 304 305 /* Avoid inheriting affinity from a random parent. */ 306 cpuset_setthread(newtd->td_tid, cpuset_root); 307 308 /* Delay putting it on the run queue until now. */ 309 if (!(flags & RFSTOPPED)) { 310 thread_lock(newtd); 311 sched_add(newtd, SRQ_BORING); 312 thread_unlock(newtd); 313 } 314 if (newtdp) 315 *newtdp = newtd; 316 return 0; 317 } 318 319 void 320 kthread_exit(void) 321 { 322 struct proc *p; 323 struct thread *td; 324 325 td = curthread; 326 p = td->td_proc; 327 328 /* A module may be waiting for us to exit. */ 329 wakeup(td); 330 331 /* 332 * The last exiting thread in a kernel process must tear down 333 * the whole process. 334 */ 335 rw_wlock(&tidhash_lock); 336 PROC_LOCK(p); 337 if (p->p_numthreads == 1) { 338 PROC_UNLOCK(p); 339 rw_wunlock(&tidhash_lock); 340 kproc_exit(0); 341 } 342 LIST_REMOVE(td, td_hash); 343 rw_wunlock(&tidhash_lock); 344 umtx_thread_exit(td); 345 tdsigcleanup(td); 346 PROC_SLOCK(p); 347 thread_exit(); 348 } 349 350 /* 351 * Advise a kernel process to suspend (or resume) in its main loop. 352 * Participation is voluntary. 353 */ 354 int 355 kthread_suspend(struct thread *td, int timo) 356 { 357 struct proc *p; 358 359 p = td->td_proc; 360 361 /* 362 * td_pflags should not be read by any thread other than 363 * curthread, but as long as this flag is invariant during the 364 * thread's lifetime, it is OK to check its state. 365 */ 366 if ((td->td_pflags & TDP_KTHREAD) == 0) 367 return (EINVAL); 368 369 /* 370 * The caller of the primitive should have already checked that the 371 * thread is up and running, thus not being blocked by other 372 * conditions. 373 */ 374 PROC_LOCK(p); 375 thread_lock(td); 376 td->td_flags |= TDF_KTH_SUSP; 377 thread_unlock(td); 378 return (msleep(&td->td_flags, &p->p_mtx, PPAUSE | PDROP, "suspkt", 379 timo)); 380 } 381 382 /* 383 * Resume a thread previously put asleep with kthread_suspend(). 384 */ 385 int 386 kthread_resume(struct thread *td) 387 { 388 struct proc *p; 389 390 p = td->td_proc; 391 392 /* 393 * td_pflags should not be read by any thread other than 394 * curthread, but as long as this flag is invariant during the 395 * thread's lifetime, it is OK to check its state. 396 */ 397 if ((td->td_pflags & TDP_KTHREAD) == 0) 398 return (EINVAL); 399 400 PROC_LOCK(p); 401 thread_lock(td); 402 td->td_flags &= ~TDF_KTH_SUSP; 403 thread_unlock(td); 404 wakeup(&td->td_flags); 405 PROC_UNLOCK(p); 406 return (0); 407 } 408 409 /* 410 * Used by the thread to poll as to whether it should yield/sleep 411 * and notify the caller that is has happened. 412 */ 413 void 414 kthread_suspend_check(void) 415 { 416 struct proc *p; 417 struct thread *td; 418 419 td = curthread; 420 p = td->td_proc; 421 422 if ((td->td_pflags & TDP_KTHREAD) == 0) 423 panic("%s: curthread is not a valid kthread", __func__); 424 425 /* 426 * As long as the double-lock protection is used when accessing the 427 * TDF_KTH_SUSP flag, synchronizing the read operation via proc mutex 428 * is fine. 429 */ 430 PROC_LOCK(p); 431 while (td->td_flags & TDF_KTH_SUSP) { 432 wakeup(&td->td_flags); 433 msleep(&td->td_flags, &p->p_mtx, PPAUSE, "ktsusp", 0); 434 } 435 PROC_UNLOCK(p); 436 } 437 438 int 439 kproc_kthread_add(void (*func)(void *), void *arg, 440 struct proc **procptr, struct thread **tdptr, 441 int flags, int pages, const char *procname, const char *fmt, ...) 442 { 443 int error; 444 va_list ap; 445 char buf[100]; 446 struct thread *td; 447 448 if (*procptr == NULL) { 449 error = kproc_create(func, arg, 450 procptr, flags, pages, "%s", procname); 451 if (error) 452 return (error); 453 td = FIRST_THREAD_IN_PROC(*procptr); 454 if (tdptr) 455 *tdptr = td; 456 va_start(ap, fmt); 457 vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap); 458 va_end(ap); 459 #ifdef KTR 460 sched_clear_tdname(td); 461 #endif 462 return (0); 463 } 464 va_start(ap, fmt); 465 vsnprintf(buf, sizeof(buf), fmt, ap); 466 va_end(ap); 467 error = kthread_add(func, arg, *procptr, 468 tdptr, flags, pages, "%s", buf); 469 return (error); 470 } 471