1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice(s), this list of conditions and the following disclaimer as 12 * the first lines of this file unmodified other than the possible 13 * addition of one or more copyright notices. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice(s), this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 28 * DAMAGE. 29 */ 30 31 #include "opt_witness.h" 32 #include "opt_hwpmc_hooks.h" 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/proc.h> 43 #include <sys/epoch.h> 44 #include <sys/rangelock.h> 45 #include <sys/resourcevar.h> 46 #include <sys/sdt.h> 47 #include <sys/smp.h> 48 #include <sys/sched.h> 49 #include <sys/sleepqueue.h> 50 #include <sys/selinfo.h> 51 #include <sys/syscallsubr.h> 52 #include <sys/sysent.h> 53 #include <sys/turnstile.h> 54 #include <sys/ktr.h> 55 #include <sys/rwlock.h> 56 #include <sys/umtx.h> 57 #include <sys/vmmeter.h> 58 #include <sys/cpuset.h> 59 #ifdef HWPMC_HOOKS 60 #include <sys/pmckern.h> 61 #endif 62 63 #include <security/audit/audit.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_extern.h> 67 #include <vm/uma.h> 68 #include <sys/eventhandler.h> 69 70 /* 71 * Asserts below verify the stability of struct thread and struct proc 72 * layout, as exposed by KBI to modules. On head, the KBI is allowed 73 * to drift, change to the structures must be accompanied by the 74 * assert update. 75 * 76 * On the stable branches after KBI freeze, conditions must not be 77 * violated. Typically new fields are moved to the end of the 78 * structures. 79 */ 80 #ifdef __amd64__ 81 _Static_assert(offsetof(struct thread, td_flags) == 0xfc, 82 "struct thread KBI td_flags"); 83 _Static_assert(offsetof(struct thread, td_pflags) == 0x104, 84 "struct thread KBI td_pflags"); 85 _Static_assert(offsetof(struct thread, td_frame) == 0x478, 86 "struct thread KBI td_frame"); 87 _Static_assert(offsetof(struct thread, td_emuldata) == 0x540, 88 "struct thread KBI td_emuldata"); 89 _Static_assert(offsetof(struct proc, p_flag) == 0xb0, 90 "struct proc KBI p_flag"); 91 _Static_assert(offsetof(struct proc, p_pid) == 0xbc, 92 "struct proc KBI p_pid"); 93 _Static_assert(offsetof(struct proc, p_filemon) == 0x3c8, 94 "struct proc KBI p_filemon"); 95 _Static_assert(offsetof(struct proc, p_comm) == 0x3e0, 96 "struct proc KBI p_comm"); 97 _Static_assert(offsetof(struct proc, p_emuldata) == 0x4c0, 98 "struct proc KBI p_emuldata"); 99 #endif 100 #ifdef __i386__ 101 _Static_assert(offsetof(struct thread, td_flags) == 0x98, 102 "struct thread KBI td_flags"); 103 _Static_assert(offsetof(struct thread, td_pflags) == 0xa0, 104 "struct thread KBI td_pflags"); 105 _Static_assert(offsetof(struct thread, td_frame) == 0x2f0, 106 "struct thread KBI td_frame"); 107 _Static_assert(offsetof(struct thread, td_emuldata) == 0x338, 108 "struct thread KBI td_emuldata"); 109 _Static_assert(offsetof(struct proc, p_flag) == 0x68, 110 "struct proc KBI p_flag"); 111 _Static_assert(offsetof(struct proc, p_pid) == 0x74, 112 "struct proc KBI p_pid"); 113 _Static_assert(offsetof(struct proc, p_filemon) == 0x278, 114 "struct proc KBI p_filemon"); 115 _Static_assert(offsetof(struct proc, p_comm) == 0x28c, 116 "struct proc KBI p_comm"); 117 _Static_assert(offsetof(struct proc, p_emuldata) == 0x318, 118 "struct proc KBI p_emuldata"); 119 #endif 120 121 SDT_PROVIDER_DECLARE(proc); 122 SDT_PROBE_DEFINE(proc, , , lwp__exit); 123 124 /* 125 * thread related storage. 126 */ 127 static uma_zone_t thread_zone; 128 129 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 130 static struct mtx zombie_lock; 131 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 132 133 static void thread_zombie(struct thread *); 134 static int thread_unsuspend_one(struct thread *td, struct proc *p, 135 bool boundary); 136 137 #define TID_BUFFER_SIZE 1024 138 139 struct mtx tid_lock; 140 static struct unrhdr *tid_unrhdr; 141 static lwpid_t tid_buffer[TID_BUFFER_SIZE]; 142 static int tid_head, tid_tail; 143 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 144 145 struct tidhashhead *tidhashtbl; 146 u_long tidhash; 147 struct rwlock tidhash_lock; 148 149 EVENTHANDLER_LIST_DEFINE(thread_ctor); 150 EVENTHANDLER_LIST_DEFINE(thread_dtor); 151 EVENTHANDLER_LIST_DEFINE(thread_init); 152 EVENTHANDLER_LIST_DEFINE(thread_fini); 153 154 static lwpid_t 155 tid_alloc(void) 156 { 157 lwpid_t tid; 158 159 tid = alloc_unr(tid_unrhdr); 160 if (tid != -1) 161 return (tid); 162 mtx_lock(&tid_lock); 163 if (tid_head == tid_tail) { 164 mtx_unlock(&tid_lock); 165 return (-1); 166 } 167 tid = tid_buffer[tid_head]; 168 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 169 mtx_unlock(&tid_lock); 170 return (tid); 171 } 172 173 static void 174 tid_free(lwpid_t tid) 175 { 176 lwpid_t tmp_tid = -1; 177 178 mtx_lock(&tid_lock); 179 if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { 180 tmp_tid = tid_buffer[tid_head]; 181 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 182 } 183 tid_buffer[tid_tail] = tid; 184 tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE; 185 mtx_unlock(&tid_lock); 186 if (tmp_tid != -1) 187 free_unr(tid_unrhdr, tmp_tid); 188 } 189 190 /* 191 * Prepare a thread for use. 192 */ 193 static int 194 thread_ctor(void *mem, int size, void *arg, int flags) 195 { 196 struct thread *td; 197 198 td = (struct thread *)mem; 199 td->td_state = TDS_INACTIVE; 200 td->td_lastcpu = td->td_oncpu = NOCPU; 201 202 td->td_tid = tid_alloc(); 203 204 /* 205 * Note that td_critnest begins life as 1 because the thread is not 206 * running and is thereby implicitly waiting to be on the receiving 207 * end of a context switch. 208 */ 209 td->td_critnest = 1; 210 td->td_lend_user_pri = PRI_MAX; 211 EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); 212 #ifdef AUDIT 213 audit_thread_alloc(td); 214 #endif 215 umtx_thread_alloc(td); 216 return (0); 217 } 218 219 /* 220 * Reclaim a thread after use. 221 */ 222 static void 223 thread_dtor(void *mem, int size, void *arg) 224 { 225 struct thread *td; 226 227 td = (struct thread *)mem; 228 229 #ifdef INVARIANTS 230 /* Verify that this thread is in a safe state to free. */ 231 switch (td->td_state) { 232 case TDS_INHIBITED: 233 case TDS_RUNNING: 234 case TDS_CAN_RUN: 235 case TDS_RUNQ: 236 /* 237 * We must never unlink a thread that is in one of 238 * these states, because it is currently active. 239 */ 240 panic("bad state for thread unlinking"); 241 /* NOTREACHED */ 242 case TDS_INACTIVE: 243 break; 244 default: 245 panic("bad thread state"); 246 /* NOTREACHED */ 247 } 248 #endif 249 #ifdef AUDIT 250 audit_thread_free(td); 251 #endif 252 /* Free all OSD associated to this thread. */ 253 osd_thread_exit(td); 254 td_softdep_cleanup(td); 255 MPASS(td->td_su == NULL); 256 257 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td); 258 tid_free(td->td_tid); 259 } 260 261 /* 262 * Initialize type-stable parts of a thread (when newly created). 263 */ 264 static int 265 thread_init(void *mem, int size, int flags) 266 { 267 struct thread *td; 268 269 td = (struct thread *)mem; 270 271 td->td_sleepqueue = sleepq_alloc(); 272 td->td_turnstile = turnstile_alloc(); 273 td->td_rlqe = NULL; 274 EVENTHANDLER_DIRECT_INVOKE(thread_init, td); 275 umtx_thread_init(td); 276 epoch_thread_init(td); 277 td->td_kstack = 0; 278 td->td_sel = NULL; 279 return (0); 280 } 281 282 /* 283 * Tear down type-stable parts of a thread (just before being discarded). 284 */ 285 static void 286 thread_fini(void *mem, int size) 287 { 288 struct thread *td; 289 290 td = (struct thread *)mem; 291 EVENTHANDLER_DIRECT_INVOKE(thread_fini, td); 292 rlqentry_free(td->td_rlqe); 293 turnstile_free(td->td_turnstile); 294 sleepq_free(td->td_sleepqueue); 295 umtx_thread_fini(td); 296 epoch_thread_fini(td); 297 seltdfini(td); 298 } 299 300 /* 301 * For a newly created process, 302 * link up all the structures and its initial threads etc. 303 * called from: 304 * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. 305 * proc_dtor() (should go away) 306 * proc_init() 307 */ 308 void 309 proc_linkup0(struct proc *p, struct thread *td) 310 { 311 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 312 proc_linkup(p, td); 313 } 314 315 void 316 proc_linkup(struct proc *p, struct thread *td) 317 { 318 319 sigqueue_init(&p->p_sigqueue, p); 320 p->p_ksi = ksiginfo_alloc(1); 321 if (p->p_ksi != NULL) { 322 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 323 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 324 } 325 LIST_INIT(&p->p_mqnotifier); 326 p->p_numthreads = 0; 327 thread_link(td, p); 328 } 329 330 /* 331 * Initialize global thread allocation resources. 332 */ 333 void 334 threadinit(void) 335 { 336 337 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 338 339 /* 340 * pid_max cannot be greater than PID_MAX. 341 * leave one number for thread0. 342 */ 343 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 344 345 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 346 thread_ctor, thread_dtor, thread_init, thread_fini, 347 32 - 1, UMA_ZONE_NOFREE); 348 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 349 rw_init(&tidhash_lock, "tidhash"); 350 } 351 352 /* 353 * Place an unused thread on the zombie list. 354 * Use the slpq as that must be unused by now. 355 */ 356 void 357 thread_zombie(struct thread *td) 358 { 359 mtx_lock_spin(&zombie_lock); 360 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 361 mtx_unlock_spin(&zombie_lock); 362 } 363 364 /* 365 * Release a thread that has exited after cpu_throw(). 366 */ 367 void 368 thread_stash(struct thread *td) 369 { 370 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 371 thread_zombie(td); 372 } 373 374 /* 375 * Reap zombie resources. 376 */ 377 void 378 thread_reap(void) 379 { 380 struct thread *td_first, *td_next; 381 382 /* 383 * Don't even bother to lock if none at this instant, 384 * we really don't care about the next instant. 385 */ 386 if (!TAILQ_EMPTY(&zombie_threads)) { 387 mtx_lock_spin(&zombie_lock); 388 td_first = TAILQ_FIRST(&zombie_threads); 389 if (td_first) 390 TAILQ_INIT(&zombie_threads); 391 mtx_unlock_spin(&zombie_lock); 392 while (td_first) { 393 td_next = TAILQ_NEXT(td_first, td_slpq); 394 thread_cow_free(td_first); 395 thread_free(td_first); 396 td_first = td_next; 397 } 398 } 399 } 400 401 /* 402 * Allocate a thread. 403 */ 404 struct thread * 405 thread_alloc(int pages) 406 { 407 struct thread *td; 408 409 thread_reap(); /* check if any zombies to get */ 410 411 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 412 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 413 if (!vm_thread_new(td, pages)) { 414 uma_zfree(thread_zone, td); 415 return (NULL); 416 } 417 cpu_thread_alloc(td); 418 return (td); 419 } 420 421 int 422 thread_alloc_stack(struct thread *td, int pages) 423 { 424 425 KASSERT(td->td_kstack == 0, 426 ("thread_alloc_stack called on a thread with kstack")); 427 if (!vm_thread_new(td, pages)) 428 return (0); 429 cpu_thread_alloc(td); 430 return (1); 431 } 432 433 /* 434 * Deallocate a thread. 435 */ 436 void 437 thread_free(struct thread *td) 438 { 439 440 lock_profile_thread_exit(td); 441 if (td->td_cpuset) 442 cpuset_rel(td->td_cpuset); 443 td->td_cpuset = NULL; 444 cpu_thread_free(td); 445 if (td->td_kstack != 0) 446 vm_thread_dispose(td); 447 callout_drain(&td->td_slpcallout); 448 uma_zfree(thread_zone, td); 449 } 450 451 void 452 thread_cow_get_proc(struct thread *newtd, struct proc *p) 453 { 454 455 PROC_LOCK_ASSERT(p, MA_OWNED); 456 newtd->td_ucred = crhold(p->p_ucred); 457 newtd->td_limit = lim_hold(p->p_limit); 458 newtd->td_cowgen = p->p_cowgen; 459 } 460 461 void 462 thread_cow_get(struct thread *newtd, struct thread *td) 463 { 464 465 newtd->td_ucred = crhold(td->td_ucred); 466 newtd->td_limit = lim_hold(td->td_limit); 467 newtd->td_cowgen = td->td_cowgen; 468 } 469 470 void 471 thread_cow_free(struct thread *td) 472 { 473 474 if (td->td_ucred != NULL) 475 crfree(td->td_ucred); 476 if (td->td_limit != NULL) 477 lim_free(td->td_limit); 478 } 479 480 void 481 thread_cow_update(struct thread *td) 482 { 483 struct proc *p; 484 struct ucred *oldcred; 485 struct plimit *oldlimit; 486 487 p = td->td_proc; 488 oldcred = NULL; 489 oldlimit = NULL; 490 PROC_LOCK(p); 491 if (td->td_ucred != p->p_ucred) { 492 oldcred = td->td_ucred; 493 td->td_ucred = crhold(p->p_ucred); 494 } 495 if (td->td_limit != p->p_limit) { 496 oldlimit = td->td_limit; 497 td->td_limit = lim_hold(p->p_limit); 498 } 499 td->td_cowgen = p->p_cowgen; 500 PROC_UNLOCK(p); 501 if (oldcred != NULL) 502 crfree(oldcred); 503 if (oldlimit != NULL) 504 lim_free(oldlimit); 505 } 506 507 /* 508 * Discard the current thread and exit from its context. 509 * Always called with scheduler locked. 510 * 511 * Because we can't free a thread while we're operating under its context, 512 * push the current thread into our CPU's deadthread holder. This means 513 * we needn't worry about someone else grabbing our context before we 514 * do a cpu_throw(). 515 */ 516 void 517 thread_exit(void) 518 { 519 uint64_t runtime, new_switchtime; 520 struct thread *td; 521 struct thread *td2; 522 struct proc *p; 523 int wakeup_swapper; 524 525 td = curthread; 526 p = td->td_proc; 527 528 PROC_SLOCK_ASSERT(p, MA_OWNED); 529 mtx_assert(&Giant, MA_NOTOWNED); 530 531 PROC_LOCK_ASSERT(p, MA_OWNED); 532 KASSERT(p != NULL, ("thread exiting without a process")); 533 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 534 (long)p->p_pid, td->td_name); 535 SDT_PROBE0(proc, , , lwp__exit); 536 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 537 538 /* 539 * drop FPU & debug register state storage, or any other 540 * architecture specific resources that 541 * would not be on a new untouched process. 542 */ 543 cpu_thread_exit(td); 544 545 /* 546 * The last thread is left attached to the process 547 * So that the whole bundle gets recycled. Skip 548 * all this stuff if we never had threads. 549 * EXIT clears all sign of other threads when 550 * it goes to single threading, so the last thread always 551 * takes the short path. 552 */ 553 if (p->p_flag & P_HADTHREADS) { 554 if (p->p_numthreads > 1) { 555 atomic_add_int(&td->td_proc->p_exitthreads, 1); 556 thread_unlink(td); 557 td2 = FIRST_THREAD_IN_PROC(p); 558 sched_exit_thread(td2, td); 559 560 /* 561 * The test below is NOT true if we are the 562 * sole exiting thread. P_STOPPED_SINGLE is unset 563 * in exit1() after it is the only survivor. 564 */ 565 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 566 if (p->p_numthreads == p->p_suspcount) { 567 thread_lock(p->p_singlethread); 568 wakeup_swapper = thread_unsuspend_one( 569 p->p_singlethread, p, false); 570 thread_unlock(p->p_singlethread); 571 if (wakeup_swapper) 572 kick_proc0(); 573 } 574 } 575 576 PCPU_SET(deadthread, td); 577 } else { 578 /* 579 * The last thread is exiting.. but not through exit() 580 */ 581 panic ("thread_exit: Last thread exiting on its own"); 582 } 583 } 584 #ifdef HWPMC_HOOKS 585 /* 586 * If this thread is part of a process that is being tracked by hwpmc(4), 587 * inform the module of the thread's impending exit. 588 */ 589 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) { 590 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 591 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL); 592 } else if (PMC_SYSTEM_SAMPLING_ACTIVE()) 593 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL); 594 #endif 595 PROC_UNLOCK(p); 596 PROC_STATLOCK(p); 597 thread_lock(td); 598 PROC_SUNLOCK(p); 599 600 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 601 new_switchtime = cpu_ticks(); 602 runtime = new_switchtime - PCPU_GET(switchtime); 603 td->td_runtime += runtime; 604 td->td_incruntime += runtime; 605 PCPU_SET(switchtime, new_switchtime); 606 PCPU_SET(switchticks, ticks); 607 VM_CNT_INC(v_swtch); 608 609 /* Save our resource usage in our process. */ 610 td->td_ru.ru_nvcsw++; 611 ruxagg(p, td); 612 rucollect(&p->p_ru, &td->td_ru); 613 PROC_STATUNLOCK(p); 614 615 td->td_state = TDS_INACTIVE; 616 #ifdef WITNESS 617 witness_thread_exit(td); 618 #endif 619 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 620 sched_throw(td); 621 panic("I'm a teapot!"); 622 /* NOTREACHED */ 623 } 624 625 /* 626 * Do any thread specific cleanups that may be needed in wait() 627 * called with Giant, proc and schedlock not held. 628 */ 629 void 630 thread_wait(struct proc *p) 631 { 632 struct thread *td; 633 634 mtx_assert(&Giant, MA_NOTOWNED); 635 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 636 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 637 td = FIRST_THREAD_IN_PROC(p); 638 /* Lock the last thread so we spin until it exits cpu_throw(). */ 639 thread_lock(td); 640 thread_unlock(td); 641 lock_profile_thread_exit(td); 642 cpuset_rel(td->td_cpuset); 643 td->td_cpuset = NULL; 644 cpu_thread_clean(td); 645 thread_cow_free(td); 646 callout_drain(&td->td_slpcallout); 647 thread_reap(); /* check for zombie threads etc. */ 648 } 649 650 /* 651 * Link a thread to a process. 652 * set up anything that needs to be initialized for it to 653 * be used by the process. 654 */ 655 void 656 thread_link(struct thread *td, struct proc *p) 657 { 658 659 /* 660 * XXX This can't be enabled because it's called for proc0 before 661 * its lock has been created. 662 * PROC_LOCK_ASSERT(p, MA_OWNED); 663 */ 664 td->td_state = TDS_INACTIVE; 665 td->td_proc = p; 666 td->td_flags = TDF_INMEM; 667 668 LIST_INIT(&td->td_contested); 669 LIST_INIT(&td->td_lprof[0]); 670 LIST_INIT(&td->td_lprof[1]); 671 #ifdef EPOCH_TRACE 672 SLIST_INIT(&td->td_epochs); 673 #endif 674 sigqueue_init(&td->td_sigqueue, p); 675 callout_init(&td->td_slpcallout, 1); 676 TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); 677 p->p_numthreads++; 678 } 679 680 /* 681 * Called from: 682 * thread_exit() 683 */ 684 void 685 thread_unlink(struct thread *td) 686 { 687 struct proc *p = td->td_proc; 688 689 PROC_LOCK_ASSERT(p, MA_OWNED); 690 #ifdef EPOCH_TRACE 691 MPASS(SLIST_EMPTY(&td->td_epochs)); 692 #endif 693 694 TAILQ_REMOVE(&p->p_threads, td, td_plist); 695 p->p_numthreads--; 696 /* could clear a few other things here */ 697 /* Must NOT clear links to proc! */ 698 } 699 700 static int 701 calc_remaining(struct proc *p, int mode) 702 { 703 int remaining; 704 705 PROC_LOCK_ASSERT(p, MA_OWNED); 706 PROC_SLOCK_ASSERT(p, MA_OWNED); 707 if (mode == SINGLE_EXIT) 708 remaining = p->p_numthreads; 709 else if (mode == SINGLE_BOUNDARY) 710 remaining = p->p_numthreads - p->p_boundary_count; 711 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) 712 remaining = p->p_numthreads - p->p_suspcount; 713 else 714 panic("calc_remaining: wrong mode %d", mode); 715 return (remaining); 716 } 717 718 static int 719 remain_for_mode(int mode) 720 { 721 722 return (mode == SINGLE_ALLPROC ? 0 : 1); 723 } 724 725 static int 726 weed_inhib(int mode, struct thread *td2, struct proc *p) 727 { 728 int wakeup_swapper; 729 730 PROC_LOCK_ASSERT(p, MA_OWNED); 731 PROC_SLOCK_ASSERT(p, MA_OWNED); 732 THREAD_LOCK_ASSERT(td2, MA_OWNED); 733 734 wakeup_swapper = 0; 735 switch (mode) { 736 case SINGLE_EXIT: 737 if (TD_IS_SUSPENDED(td2)) 738 wakeup_swapper |= thread_unsuspend_one(td2, p, true); 739 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 740 wakeup_swapper |= sleepq_abort(td2, EINTR); 741 break; 742 case SINGLE_BOUNDARY: 743 case SINGLE_NO_EXIT: 744 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) 745 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 746 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 747 wakeup_swapper |= sleepq_abort(td2, ERESTART); 748 break; 749 case SINGLE_ALLPROC: 750 /* 751 * ALLPROC suspend tries to avoid spurious EINTR for 752 * threads sleeping interruptable, by suspending the 753 * thread directly, similarly to sig_suspend_threads(). 754 * Since such sleep is not performed at the user 755 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP 756 * is used to avoid immediate un-suspend. 757 */ 758 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY | 759 TDF_ALLPROCSUSP)) == 0) 760 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 761 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) { 762 if ((td2->td_flags & TDF_SBDRY) == 0) { 763 thread_suspend_one(td2); 764 td2->td_flags |= TDF_ALLPROCSUSP; 765 } else { 766 wakeup_swapper |= sleepq_abort(td2, ERESTART); 767 } 768 } 769 break; 770 } 771 return (wakeup_swapper); 772 } 773 774 /* 775 * Enforce single-threading. 776 * 777 * Returns 1 if the caller must abort (another thread is waiting to 778 * exit the process or similar). Process is locked! 779 * Returns 0 when you are successfully the only thread running. 780 * A process has successfully single threaded in the suspend mode when 781 * There are no threads in user mode. Threads in the kernel must be 782 * allowed to continue until they get to the user boundary. They may even 783 * copy out their return values and data before suspending. They may however be 784 * accelerated in reaching the user boundary as we will wake up 785 * any sleeping threads that are interruptable. (PCATCH). 786 */ 787 int 788 thread_single(struct proc *p, int mode) 789 { 790 struct thread *td; 791 struct thread *td2; 792 int remaining, wakeup_swapper; 793 794 td = curthread; 795 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 796 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 797 ("invalid mode %d", mode)); 798 /* 799 * If allowing non-ALLPROC singlethreading for non-curproc 800 * callers, calc_remaining() and remain_for_mode() should be 801 * adjusted to also account for td->td_proc != p. For now 802 * this is not implemented because it is not used. 803 */ 804 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || 805 (mode != SINGLE_ALLPROC && td->td_proc == p), 806 ("mode %d proc %p curproc %p", mode, p, td->td_proc)); 807 mtx_assert(&Giant, MA_NOTOWNED); 808 PROC_LOCK_ASSERT(p, MA_OWNED); 809 810 if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC) 811 return (0); 812 813 /* Is someone already single threading? */ 814 if (p->p_singlethread != NULL && p->p_singlethread != td) 815 return (1); 816 817 if (mode == SINGLE_EXIT) { 818 p->p_flag |= P_SINGLE_EXIT; 819 p->p_flag &= ~P_SINGLE_BOUNDARY; 820 } else { 821 p->p_flag &= ~P_SINGLE_EXIT; 822 if (mode == SINGLE_BOUNDARY) 823 p->p_flag |= P_SINGLE_BOUNDARY; 824 else 825 p->p_flag &= ~P_SINGLE_BOUNDARY; 826 } 827 if (mode == SINGLE_ALLPROC) 828 p->p_flag |= P_TOTAL_STOP; 829 p->p_flag |= P_STOPPED_SINGLE; 830 PROC_SLOCK(p); 831 p->p_singlethread = td; 832 remaining = calc_remaining(p, mode); 833 while (remaining != remain_for_mode(mode)) { 834 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 835 goto stopme; 836 wakeup_swapper = 0; 837 FOREACH_THREAD_IN_PROC(p, td2) { 838 if (td2 == td) 839 continue; 840 thread_lock(td2); 841 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 842 if (TD_IS_INHIBITED(td2)) { 843 wakeup_swapper |= weed_inhib(mode, td2, p); 844 #ifdef SMP 845 } else if (TD_IS_RUNNING(td2) && td != td2) { 846 forward_signal(td2); 847 #endif 848 } 849 thread_unlock(td2); 850 } 851 if (wakeup_swapper) 852 kick_proc0(); 853 remaining = calc_remaining(p, mode); 854 855 /* 856 * Maybe we suspended some threads.. was it enough? 857 */ 858 if (remaining == remain_for_mode(mode)) 859 break; 860 861 stopme: 862 /* 863 * Wake us up when everyone else has suspended. 864 * In the mean time we suspend as well. 865 */ 866 thread_suspend_switch(td, p); 867 remaining = calc_remaining(p, mode); 868 } 869 if (mode == SINGLE_EXIT) { 870 /* 871 * Convert the process to an unthreaded process. The 872 * SINGLE_EXIT is called by exit1() or execve(), in 873 * both cases other threads must be retired. 874 */ 875 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 876 p->p_singlethread = NULL; 877 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 878 879 /* 880 * Wait for any remaining threads to exit cpu_throw(). 881 */ 882 while (p->p_exitthreads != 0) { 883 PROC_SUNLOCK(p); 884 PROC_UNLOCK(p); 885 sched_relinquish(td); 886 PROC_LOCK(p); 887 PROC_SLOCK(p); 888 } 889 } else if (mode == SINGLE_BOUNDARY) { 890 /* 891 * Wait until all suspended threads are removed from 892 * the processors. The thread_suspend_check() 893 * increments p_boundary_count while it is still 894 * running, which makes it possible for the execve() 895 * to destroy vmspace while our other threads are 896 * still using the address space. 897 * 898 * We lock the thread, which is only allowed to 899 * succeed after context switch code finished using 900 * the address space. 901 */ 902 FOREACH_THREAD_IN_PROC(p, td2) { 903 if (td2 == td) 904 continue; 905 thread_lock(td2); 906 KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, 907 ("td %p not on boundary", td2)); 908 KASSERT(TD_IS_SUSPENDED(td2), 909 ("td %p is not suspended", td2)); 910 thread_unlock(td2); 911 } 912 } 913 PROC_SUNLOCK(p); 914 return (0); 915 } 916 917 bool 918 thread_suspend_check_needed(void) 919 { 920 struct proc *p; 921 struct thread *td; 922 923 td = curthread; 924 p = td->td_proc; 925 PROC_LOCK_ASSERT(p, MA_OWNED); 926 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 927 (td->td_dbgflags & TDB_SUSPEND) != 0)); 928 } 929 930 /* 931 * Called in from locations that can safely check to see 932 * whether we have to suspend or at least throttle for a 933 * single-thread event (e.g. fork). 934 * 935 * Such locations include userret(). 936 * If the "return_instead" argument is non zero, the thread must be able to 937 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 938 * 939 * The 'return_instead' argument tells the function if it may do a 940 * thread_exit() or suspend, or whether the caller must abort and back 941 * out instead. 942 * 943 * If the thread that set the single_threading request has set the 944 * P_SINGLE_EXIT bit in the process flags then this call will never return 945 * if 'return_instead' is false, but will exit. 946 * 947 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 948 *---------------+--------------------+--------------------- 949 * 0 | returns 0 | returns 0 or 1 950 * | when ST ends | immediately 951 *---------------+--------------------+--------------------- 952 * 1 | thread exits | returns 1 953 * | | immediately 954 * 0 = thread_exit() or suspension ok, 955 * other = return error instead of stopping the thread. 956 * 957 * While a full suspension is under effect, even a single threading 958 * thread would be suspended if it made this call (but it shouldn't). 959 * This call should only be made from places where 960 * thread_exit() would be safe as that may be the outcome unless 961 * return_instead is set. 962 */ 963 int 964 thread_suspend_check(int return_instead) 965 { 966 struct thread *td; 967 struct proc *p; 968 int wakeup_swapper; 969 970 td = curthread; 971 p = td->td_proc; 972 mtx_assert(&Giant, MA_NOTOWNED); 973 PROC_LOCK_ASSERT(p, MA_OWNED); 974 while (thread_suspend_check_needed()) { 975 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 976 KASSERT(p->p_singlethread != NULL, 977 ("singlethread not set")); 978 /* 979 * The only suspension in action is a 980 * single-threading. Single threader need not stop. 981 * It is safe to access p->p_singlethread unlocked 982 * because it can only be set to our address by us. 983 */ 984 if (p->p_singlethread == td) 985 return (0); /* Exempt from stopping. */ 986 } 987 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 988 return (EINTR); 989 990 /* Should we goto user boundary if we didn't come from there? */ 991 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 992 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 993 return (ERESTART); 994 995 /* 996 * Ignore suspend requests if they are deferred. 997 */ 998 if ((td->td_flags & TDF_SBDRY) != 0) { 999 KASSERT(return_instead, 1000 ("TDF_SBDRY set for unsafe thread_suspend_check")); 1001 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 1002 (TDF_SEINTR | TDF_SERESTART), 1003 ("both TDF_SEINTR and TDF_SERESTART")); 1004 return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0); 1005 } 1006 1007 /* 1008 * If the process is waiting for us to exit, 1009 * this thread should just suicide. 1010 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1011 */ 1012 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1013 PROC_UNLOCK(p); 1014 1015 /* 1016 * Allow Linux emulation layer to do some work 1017 * before thread suicide. 1018 */ 1019 if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) 1020 (p->p_sysent->sv_thread_detach)(td); 1021 umtx_thread_exit(td); 1022 kern_thr_exit(td); 1023 panic("stopped thread did not exit"); 1024 } 1025 1026 PROC_SLOCK(p); 1027 thread_stopped(p); 1028 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1029 if (p->p_numthreads == p->p_suspcount + 1) { 1030 thread_lock(p->p_singlethread); 1031 wakeup_swapper = thread_unsuspend_one( 1032 p->p_singlethread, p, false); 1033 thread_unlock(p->p_singlethread); 1034 if (wakeup_swapper) 1035 kick_proc0(); 1036 } 1037 } 1038 PROC_UNLOCK(p); 1039 thread_lock(td); 1040 /* 1041 * When a thread suspends, it just 1042 * gets taken off all queues. 1043 */ 1044 thread_suspend_one(td); 1045 if (return_instead == 0) { 1046 p->p_boundary_count++; 1047 td->td_flags |= TDF_BOUNDARY; 1048 } 1049 PROC_SUNLOCK(p); 1050 mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 1051 thread_unlock(td); 1052 PROC_LOCK(p); 1053 } 1054 return (0); 1055 } 1056 1057 void 1058 thread_suspend_switch(struct thread *td, struct proc *p) 1059 { 1060 1061 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1062 PROC_LOCK_ASSERT(p, MA_OWNED); 1063 PROC_SLOCK_ASSERT(p, MA_OWNED); 1064 /* 1065 * We implement thread_suspend_one in stages here to avoid 1066 * dropping the proc lock while the thread lock is owned. 1067 */ 1068 if (p == td->td_proc) { 1069 thread_stopped(p); 1070 p->p_suspcount++; 1071 } 1072 PROC_UNLOCK(p); 1073 thread_lock(td); 1074 td->td_flags &= ~TDF_NEEDSUSPCHK; 1075 TD_SET_SUSPENDED(td); 1076 sched_sleep(td, 0); 1077 PROC_SUNLOCK(p); 1078 DROP_GIANT(); 1079 mi_switch(SW_VOL | SWT_SUSPEND, NULL); 1080 thread_unlock(td); 1081 PICKUP_GIANT(); 1082 PROC_LOCK(p); 1083 PROC_SLOCK(p); 1084 } 1085 1086 void 1087 thread_suspend_one(struct thread *td) 1088 { 1089 struct proc *p; 1090 1091 p = td->td_proc; 1092 PROC_SLOCK_ASSERT(p, MA_OWNED); 1093 THREAD_LOCK_ASSERT(td, MA_OWNED); 1094 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1095 p->p_suspcount++; 1096 td->td_flags &= ~TDF_NEEDSUSPCHK; 1097 TD_SET_SUSPENDED(td); 1098 sched_sleep(td, 0); 1099 } 1100 1101 static int 1102 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) 1103 { 1104 1105 THREAD_LOCK_ASSERT(td, MA_OWNED); 1106 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 1107 TD_CLR_SUSPENDED(td); 1108 td->td_flags &= ~TDF_ALLPROCSUSP; 1109 if (td->td_proc == p) { 1110 PROC_SLOCK_ASSERT(p, MA_OWNED); 1111 p->p_suspcount--; 1112 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { 1113 td->td_flags &= ~TDF_BOUNDARY; 1114 p->p_boundary_count--; 1115 } 1116 } 1117 return (setrunnable(td)); 1118 } 1119 1120 /* 1121 * Allow all threads blocked by single threading to continue running. 1122 */ 1123 void 1124 thread_unsuspend(struct proc *p) 1125 { 1126 struct thread *td; 1127 int wakeup_swapper; 1128 1129 PROC_LOCK_ASSERT(p, MA_OWNED); 1130 PROC_SLOCK_ASSERT(p, MA_OWNED); 1131 wakeup_swapper = 0; 1132 if (!P_SHOULDSTOP(p)) { 1133 FOREACH_THREAD_IN_PROC(p, td) { 1134 thread_lock(td); 1135 if (TD_IS_SUSPENDED(td)) { 1136 wakeup_swapper |= thread_unsuspend_one(td, p, 1137 true); 1138 } 1139 thread_unlock(td); 1140 } 1141 } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1142 p->p_numthreads == p->p_suspcount) { 1143 /* 1144 * Stopping everything also did the job for the single 1145 * threading request. Now we've downgraded to single-threaded, 1146 * let it continue. 1147 */ 1148 if (p->p_singlethread->td_proc == p) { 1149 thread_lock(p->p_singlethread); 1150 wakeup_swapper = thread_unsuspend_one( 1151 p->p_singlethread, p, false); 1152 thread_unlock(p->p_singlethread); 1153 } 1154 } 1155 if (wakeup_swapper) 1156 kick_proc0(); 1157 } 1158 1159 /* 1160 * End the single threading mode.. 1161 */ 1162 void 1163 thread_single_end(struct proc *p, int mode) 1164 { 1165 struct thread *td; 1166 int wakeup_swapper; 1167 1168 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 1169 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 1170 ("invalid mode %d", mode)); 1171 PROC_LOCK_ASSERT(p, MA_OWNED); 1172 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || 1173 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), 1174 ("mode %d does not match P_TOTAL_STOP", mode)); 1175 KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, 1176 ("thread_single_end from other thread %p %p", 1177 curthread, p->p_singlethread)); 1178 KASSERT(mode != SINGLE_BOUNDARY || 1179 (p->p_flag & P_SINGLE_BOUNDARY) != 0, 1180 ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); 1181 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | 1182 P_TOTAL_STOP); 1183 PROC_SLOCK(p); 1184 p->p_singlethread = NULL; 1185 wakeup_swapper = 0; 1186 /* 1187 * If there are other threads they may now run, 1188 * unless of course there is a blanket 'stop order' 1189 * on the process. The single threader must be allowed 1190 * to continue however as this is a bad place to stop. 1191 */ 1192 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { 1193 FOREACH_THREAD_IN_PROC(p, td) { 1194 thread_lock(td); 1195 if (TD_IS_SUSPENDED(td)) { 1196 wakeup_swapper |= thread_unsuspend_one(td, p, 1197 mode == SINGLE_BOUNDARY); 1198 } 1199 thread_unlock(td); 1200 } 1201 } 1202 KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, 1203 ("inconsistent boundary count %d", p->p_boundary_count)); 1204 PROC_SUNLOCK(p); 1205 if (wakeup_swapper) 1206 kick_proc0(); 1207 } 1208 1209 struct thread * 1210 thread_find(struct proc *p, lwpid_t tid) 1211 { 1212 struct thread *td; 1213 1214 PROC_LOCK_ASSERT(p, MA_OWNED); 1215 FOREACH_THREAD_IN_PROC(p, td) { 1216 if (td->td_tid == tid) 1217 break; 1218 } 1219 return (td); 1220 } 1221 1222 /* Locate a thread by number; return with proc lock held. */ 1223 struct thread * 1224 tdfind(lwpid_t tid, pid_t pid) 1225 { 1226 #define RUN_THRESH 16 1227 struct thread *td; 1228 int run = 0; 1229 1230 rw_rlock(&tidhash_lock); 1231 LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1232 if (td->td_tid == tid) { 1233 if (pid != -1 && td->td_proc->p_pid != pid) { 1234 td = NULL; 1235 break; 1236 } 1237 PROC_LOCK(td->td_proc); 1238 if (td->td_proc->p_state == PRS_NEW) { 1239 PROC_UNLOCK(td->td_proc); 1240 td = NULL; 1241 break; 1242 } 1243 if (run > RUN_THRESH) { 1244 if (rw_try_upgrade(&tidhash_lock)) { 1245 LIST_REMOVE(td, td_hash); 1246 LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1247 td, td_hash); 1248 rw_wunlock(&tidhash_lock); 1249 return (td); 1250 } 1251 } 1252 break; 1253 } 1254 run++; 1255 } 1256 rw_runlock(&tidhash_lock); 1257 return (td); 1258 } 1259 1260 void 1261 tidhash_add(struct thread *td) 1262 { 1263 rw_wlock(&tidhash_lock); 1264 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1265 rw_wunlock(&tidhash_lock); 1266 } 1267 1268 void 1269 tidhash_remove(struct thread *td) 1270 { 1271 rw_wlock(&tidhash_lock); 1272 LIST_REMOVE(td, td_hash); 1273 rw_wunlock(&tidhash_lock); 1274 } 1275