1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/cpuset.h> 35 #include <sys/kthread.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/proc.h> 39 #include <sys/resourcevar.h> 40 #include <sys/rwlock.h> 41 #include <sys/signalvar.h> 42 #include <sys/sysent.h> 43 #include <sys/sx.h> 44 #include <sys/umtxvar.h> 45 #include <sys/unistd.h> 46 #include <sys/wait.h> 47 #include <sys/sched.h> 48 #include <sys/tslog.h> 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 52 #include <machine/stdarg.h> 53 54 /* 55 * Start a kernel process. This is called after a fork() call in 56 * mi_startup() in the file kern/init_main.c. 57 * 58 * This function is used to start "internal" daemons and intended 59 * to be called from SYSINIT(). 60 */ 61 void 62 kproc_start(const void *udata) 63 { 64 const struct kproc_desc *kp = udata; 65 int error; 66 67 error = kproc_create((void (*)(void *))kp->func, NULL, 68 kp->global_procpp, 0, 0, "%s", kp->arg0); 69 if (error) 70 panic("kproc_start: %s: error %d", kp->arg0, error); 71 } 72 73 /* 74 * Create a kernel process/thread/whatever. It shares its address space 75 * with proc0 - ie: kernel only. 76 * 77 * func is the function to start. 78 * arg is the parameter to pass to function on first startup. 79 * newpp is the return value pointing to the thread's struct proc. 80 * flags are flags to fork1 (in unistd.h) 81 * fmt and following will be *printf'd into (*newpp)->p_comm (for ps, etc.). 82 */ 83 int 84 kproc_create(void (*func)(void *), void *arg, 85 struct proc **newpp, int flags, int pages, const char *fmt, ...) 86 { 87 struct fork_req fr; 88 int error; 89 va_list ap; 90 struct thread *td; 91 struct proc *p2; 92 93 if (!proc0.p_stats) 94 panic("kproc_create called too soon"); 95 96 bzero(&fr, sizeof(fr)); 97 fr.fr_flags = RFMEM | RFFDG | RFPROC | RFSTOPPED | flags; 98 fr.fr_flags2 = FR2_KPROC; 99 fr.fr_pages = pages; 100 fr.fr_procp = &p2; 101 error = fork1(&thread0, &fr); 102 if (error) 103 return error; 104 105 /* save a global descriptor, if desired */ 106 if (newpp != NULL) 107 *newpp = p2; 108 109 /* set up arg0 for 'ps', et al */ 110 va_start(ap, fmt); 111 vsnprintf(p2->p_comm, sizeof(p2->p_comm), fmt, ap); 112 va_end(ap); 113 td = FIRST_THREAD_IN_PROC(p2); 114 va_start(ap, fmt); 115 vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap); 116 va_end(ap); 117 #ifdef KTR 118 sched_clear_tdname(td); 119 #endif 120 TSTHREAD(td, td->td_name); 121 #ifdef HWPMC_HOOKS 122 if (PMC_SYSTEM_SAMPLING_ACTIVE()) { 123 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_PROC_CREATE_LOG, p2); 124 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_CREATE_LOG, NULL); 125 } 126 #endif 127 128 /* call the processes' main()... */ 129 cpu_fork_kthread_handler(td, func, arg); 130 131 /* Avoid inheriting affinity from a random parent. */ 132 cpuset_kernthread(td); 133 thread_lock(td); 134 TD_SET_CAN_RUN(td); 135 sched_prio(td, PVM); 136 sched_user_prio(td, PUSER); 137 138 /* Delay putting it on the run queue until now. */ 139 if (!(flags & RFSTOPPED)) 140 sched_add(td, SRQ_BORING); 141 else 142 thread_unlock(td); 143 144 return 0; 145 } 146 147 void 148 kproc_exit(int ecode) 149 { 150 struct thread *td; 151 struct proc *p; 152 153 td = curthread; 154 p = td->td_proc; 155 156 /* 157 * Reparent curthread from proc0 to init so that the zombie 158 * is harvested. 159 */ 160 sx_xlock(&proctree_lock); 161 PROC_LOCK(p); 162 proc_reparent(p, initproc, true); 163 PROC_UNLOCK(p); 164 sx_xunlock(&proctree_lock); 165 166 /* 167 * Wakeup anyone waiting for us to exit. 168 */ 169 wakeup(p); 170 171 /* Buh-bye! */ 172 exit1(td, ecode, 0); 173 } 174 175 /* 176 * Advise a kernel process to suspend (or resume) in its main loop. 177 * Participation is voluntary. 178 */ 179 int 180 kproc_suspend(struct proc *p, int timo) 181 { 182 /* 183 * Make sure this is indeed a system process and we can safely 184 * use the p_siglist field. 185 */ 186 PROC_LOCK(p); 187 if ((p->p_flag & P_KPROC) == 0) { 188 PROC_UNLOCK(p); 189 return (EINVAL); 190 } 191 SIGADDSET(p->p_siglist, SIGSTOP); 192 wakeup(p); 193 return msleep(&p->p_siglist, &p->p_mtx, PPAUSE | PDROP, "suspkp", timo); 194 } 195 196 int 197 kproc_resume(struct proc *p) 198 { 199 /* 200 * Make sure this is indeed a system process and we can safely 201 * use the p_siglist field. 202 */ 203 PROC_LOCK(p); 204 if ((p->p_flag & P_KPROC) == 0) { 205 PROC_UNLOCK(p); 206 return (EINVAL); 207 } 208 SIGDELSET(p->p_siglist, SIGSTOP); 209 PROC_UNLOCK(p); 210 wakeup(&p->p_siglist); 211 return (0); 212 } 213 214 void 215 kproc_suspend_check(struct proc *p) 216 { 217 PROC_LOCK(p); 218 while (SIGISMEMBER(p->p_siglist, SIGSTOP)) { 219 wakeup(&p->p_siglist); 220 msleep(&p->p_siglist, &p->p_mtx, PPAUSE, "kpsusp", 0); 221 } 222 PROC_UNLOCK(p); 223 } 224 225 /* 226 * Start a kernel thread. 227 * 228 * This function is used to start "internal" daemons and intended 229 * to be called from SYSINIT(). 230 */ 231 232 void 233 kthread_start(const void *udata) 234 { 235 const struct kthread_desc *kp = udata; 236 int error; 237 238 error = kthread_add((void (*)(void *))kp->func, NULL, 239 NULL, kp->global_threadpp, 0, 0, "%s", kp->arg0); 240 if (error) 241 panic("kthread_start: %s: error %d", kp->arg0, error); 242 } 243 244 /* 245 * Create a kernel thread. It shares its address space 246 * with proc0 - ie: kernel only. 247 * 248 * func is the function to start. 249 * arg is the parameter to pass to function on first startup. 250 * newtdp is the return value pointing to the thread's struct thread. 251 * ** XXX fix this --> flags are flags to fork1 (in unistd.h) 252 * fmt and following will be *printf'd into (*newtd)->td_name (for ps, etc.). 253 */ 254 int 255 kthread_add(void (*func)(void *), void *arg, struct proc *p, 256 struct thread **newtdp, int flags, int pages, const char *fmt, ...) 257 { 258 va_list ap; 259 struct thread *newtd, *oldtd; 260 261 if (!proc0.p_stats) 262 panic("kthread_add called too soon"); 263 264 /* If no process supplied, put it on proc0 */ 265 if (p == NULL) 266 p = &proc0; 267 268 /* Initialize our new td */ 269 newtd = thread_alloc(pages); 270 if (newtd == NULL) 271 return (ENOMEM); 272 273 PROC_LOCK(p); 274 oldtd = FIRST_THREAD_IN_PROC(p); 275 276 bzero(&newtd->td_startzero, 277 __rangeof(struct thread, td_startzero, td_endzero)); 278 bcopy(&oldtd->td_startcopy, &newtd->td_startcopy, 279 __rangeof(struct thread, td_startcopy, td_endcopy)); 280 281 /* set up arg0 for 'ps', et al */ 282 va_start(ap, fmt); 283 vsnprintf(newtd->td_name, sizeof(newtd->td_name), fmt, ap); 284 va_end(ap); 285 286 TSTHREAD(newtd, newtd->td_name); 287 288 newtd->td_proc = p; /* needed for cpu_copy_thread */ 289 newtd->td_pflags |= TDP_KTHREAD; 290 291 /* might be further optimized for kthread */ 292 cpu_copy_thread(newtd, oldtd); 293 294 /* put the designated function(arg) as the resume context */ 295 cpu_fork_kthread_handler(newtd, func, arg); 296 297 thread_cow_get_proc(newtd, p); 298 299 /* This code is similar to thread_create() in kern_thr.c. */ 300 p->p_flag |= P_HADTHREADS; 301 thread_link(newtd, p); 302 thread_lock(oldtd); 303 /* let the scheduler know about these things. */ 304 sched_fork_thread(oldtd, newtd); 305 TD_SET_CAN_RUN(newtd); 306 thread_unlock(oldtd); 307 PROC_UNLOCK(p); 308 309 tidhash_add(newtd); 310 311 /* Avoid inheriting affinity from a random parent. */ 312 cpuset_kernthread(newtd); 313 #ifdef HWPMC_HOOKS 314 if (PMC_SYSTEM_SAMPLING_ACTIVE()) 315 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_CREATE_LOG, NULL); 316 #endif 317 /* Delay putting it on the run queue until now. */ 318 if (!(flags & RFSTOPPED)) { 319 thread_lock(newtd); 320 sched_add(newtd, SRQ_BORING); 321 } 322 if (newtdp) 323 *newtdp = newtd; 324 return 0; 325 } 326 327 void 328 kthread_exit(void) 329 { 330 struct proc *p; 331 struct thread *td; 332 333 td = curthread; 334 p = td->td_proc; 335 336 #ifdef HWPMC_HOOKS 337 if (PMC_SYSTEM_SAMPLING_ACTIVE()) 338 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL); 339 #endif 340 /* A module may be waiting for us to exit. */ 341 wakeup(td); 342 343 /* 344 * The last exiting thread in a kernel process must tear down 345 * the whole process. 346 */ 347 PROC_LOCK(p); 348 if (p->p_numthreads == 1) { 349 PROC_UNLOCK(p); 350 kproc_exit(0); 351 } 352 353 if (p->p_sysent->sv_ontdexit != NULL) 354 p->p_sysent->sv_ontdexit(td); 355 356 tidhash_remove(td); 357 umtx_thread_exit(td); 358 tdsigcleanup(td); 359 PROC_SLOCK(p); 360 thread_exit(); 361 } 362 363 /* 364 * Advise a kernel process to suspend (or resume) in its main loop. 365 * Participation is voluntary. 366 */ 367 int 368 kthread_suspend(struct thread *td, int timo) 369 { 370 struct proc *p; 371 372 p = td->td_proc; 373 374 /* 375 * td_pflags should not be read by any thread other than 376 * curthread, but as long as this flag is invariant during the 377 * thread's lifetime, it is OK to check its state. 378 */ 379 if ((td->td_pflags & TDP_KTHREAD) == 0) 380 return (EINVAL); 381 382 /* 383 * The caller of the primitive should have already checked that the 384 * thread is up and running, thus not being blocked by other 385 * conditions. 386 */ 387 PROC_LOCK(p); 388 thread_lock(td); 389 td->td_flags |= TDF_KTH_SUSP; 390 thread_unlock(td); 391 return (msleep(&td->td_flags, &p->p_mtx, PPAUSE | PDROP, "suspkt", 392 timo)); 393 } 394 395 /* 396 * Resume a thread previously put asleep with kthread_suspend(). 397 */ 398 int 399 kthread_resume(struct thread *td) 400 { 401 struct proc *p; 402 403 p = td->td_proc; 404 405 /* 406 * td_pflags should not be read by any thread other than 407 * curthread, but as long as this flag is invariant during the 408 * thread's lifetime, it is OK to check its state. 409 */ 410 if ((td->td_pflags & TDP_KTHREAD) == 0) 411 return (EINVAL); 412 413 PROC_LOCK(p); 414 thread_lock(td); 415 td->td_flags &= ~TDF_KTH_SUSP; 416 thread_unlock(td); 417 wakeup(&td->td_flags); 418 PROC_UNLOCK(p); 419 return (0); 420 } 421 422 /* 423 * Used by the thread to poll as to whether it should yield/sleep 424 * and notify the caller that is has happened. 425 */ 426 void 427 kthread_suspend_check(void) 428 { 429 struct proc *p; 430 struct thread *td; 431 432 td = curthread; 433 p = td->td_proc; 434 435 if ((td->td_pflags & TDP_KTHREAD) == 0) 436 panic("%s: curthread is not a valid kthread", __func__); 437 438 /* 439 * Setting the TDF_KTH_SUSP flag is protected by process lock. 440 * 441 * Do an unlocked read first to avoid serializing with all other threads 442 * in the common case of not suspending. 443 */ 444 if ((td->td_flags & TDF_KTH_SUSP) == 0) 445 return; 446 PROC_LOCK(p); 447 while ((td->td_flags & TDF_KTH_SUSP) != 0) { 448 wakeup(&td->td_flags); 449 msleep(&td->td_flags, &p->p_mtx, PPAUSE, "ktsusp", 0); 450 } 451 PROC_UNLOCK(p); 452 } 453 454 int 455 kproc_kthread_add(void (*func)(void *), void *arg, 456 struct proc **procptr, struct thread **tdptr, 457 int flags, int pages, const char *procname, const char *fmt, ...) 458 { 459 int error; 460 va_list ap; 461 char buf[100]; 462 struct thread *td; 463 464 if (*procptr == NULL) { 465 error = kproc_create(func, arg, 466 procptr, flags, pages, "%s", procname); 467 if (error) 468 return (error); 469 td = FIRST_THREAD_IN_PROC(*procptr); 470 if (tdptr) 471 *tdptr = td; 472 va_start(ap, fmt); 473 vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap); 474 va_end(ap); 475 #ifdef KTR 476 sched_clear_tdname(td); 477 #endif 478 return (0); 479 } 480 va_start(ap, fmt); 481 vsnprintf(buf, sizeof(buf), fmt, ap); 482 va_end(ap); 483 error = kthread_add(func, arg, *procptr, 484 tdptr, flags, pages, "%s", buf); 485 return (error); 486 } 487