1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice(s), this list of conditions and the following disclaimer as 12 * the first lines of this file unmodified other than the possible 13 * addition of one or more copyright notices. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice(s), this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 28 * DAMAGE. 29 */ 30 31 #include "opt_witness.h" 32 #include "opt_hwpmc_hooks.h" 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/proc.h> 43 #include <sys/epoch.h> 44 #include <sys/rangelock.h> 45 #include <sys/resourcevar.h> 46 #include <sys/sdt.h> 47 #include <sys/smp.h> 48 #include <sys/sched.h> 49 #include <sys/sleepqueue.h> 50 #include <sys/selinfo.h> 51 #include <sys/syscallsubr.h> 52 #include <sys/sysent.h> 53 #include <sys/turnstile.h> 54 #include <sys/ktr.h> 55 #include <sys/rwlock.h> 56 #include <sys/umtx.h> 57 #include <sys/vmmeter.h> 58 #include <sys/cpuset.h> 59 #ifdef HWPMC_HOOKS 60 #include <sys/pmckern.h> 61 #endif 62 63 #include <security/audit/audit.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_extern.h> 67 #include <vm/uma.h> 68 #include <sys/eventhandler.h> 69 70 /* 71 * Asserts below verify the stability of struct thread and struct proc 72 * layout, as exposed by KBI to modules. On head, the KBI is allowed 73 * to drift, change to the structures must be accompanied by the 74 * assert update. 75 * 76 * On the stable branches after KBI freeze, conditions must not be 77 * violated. Typically new fields are moved to the end of the 78 * structures. 79 */ 80 #ifdef __amd64__ 81 _Static_assert(offsetof(struct thread, td_flags) == 0xfc, 82 "struct thread KBI td_flags"); 83 _Static_assert(offsetof(struct thread, td_pflags) == 0x104, 84 "struct thread KBI td_pflags"); 85 _Static_assert(offsetof(struct thread, td_frame) == 0x478, 86 "struct thread KBI td_frame"); 87 _Static_assert(offsetof(struct thread, td_emuldata) == 0x690, 88 "struct thread KBI td_emuldata"); 89 _Static_assert(offsetof(struct proc, p_flag) == 0xb0, 90 "struct proc KBI p_flag"); 91 _Static_assert(offsetof(struct proc, p_pid) == 0xbc, 92 "struct proc KBI p_pid"); 93 _Static_assert(offsetof(struct proc, p_filemon) == 0x3c8, 94 "struct proc KBI p_filemon"); 95 _Static_assert(offsetof(struct proc, p_comm) == 0x3e0, 96 "struct proc KBI p_comm"); 97 _Static_assert(offsetof(struct proc, p_emuldata) == 0x4c0, 98 "struct proc KBI p_emuldata"); 99 #endif 100 #ifdef __i386__ 101 _Static_assert(offsetof(struct thread, td_flags) == 0x98, 102 "struct thread KBI td_flags"); 103 _Static_assert(offsetof(struct thread, td_pflags) == 0xa0, 104 "struct thread KBI td_pflags"); 105 _Static_assert(offsetof(struct thread, td_frame) == 0x2f0, 106 "struct thread KBI td_frame"); 107 _Static_assert(offsetof(struct thread, td_emuldata) == 0x338, 108 "struct thread KBI td_emuldata"); 109 _Static_assert(offsetof(struct proc, p_flag) == 0x68, 110 "struct proc KBI p_flag"); 111 _Static_assert(offsetof(struct proc, p_pid) == 0x74, 112 "struct proc KBI p_pid"); 113 _Static_assert(offsetof(struct proc, p_filemon) == 0x278, 114 "struct proc KBI p_filemon"); 115 _Static_assert(offsetof(struct proc, p_comm) == 0x28c, 116 "struct proc KBI p_comm"); 117 _Static_assert(offsetof(struct proc, p_emuldata) == 0x318, 118 "struct proc KBI p_emuldata"); 119 #endif 120 121 SDT_PROVIDER_DECLARE(proc); 122 SDT_PROBE_DEFINE(proc, , , lwp__exit); 123 124 /* 125 * thread related storage. 126 */ 127 static uma_zone_t thread_zone; 128 129 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 130 static struct mtx zombie_lock; 131 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 132 133 static void thread_zombie(struct thread *); 134 static int thread_unsuspend_one(struct thread *td, struct proc *p, 135 bool boundary); 136 137 #define TID_BUFFER_SIZE 1024 138 139 struct mtx tid_lock; 140 static struct unrhdr *tid_unrhdr; 141 static lwpid_t tid_buffer[TID_BUFFER_SIZE]; 142 static int tid_head, tid_tail; 143 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 144 145 struct tidhashhead *tidhashtbl; 146 u_long tidhash; 147 struct rwlock tidhash_lock; 148 149 EVENTHANDLER_LIST_DEFINE(thread_ctor); 150 EVENTHANDLER_LIST_DEFINE(thread_dtor); 151 EVENTHANDLER_LIST_DEFINE(thread_init); 152 EVENTHANDLER_LIST_DEFINE(thread_fini); 153 154 static lwpid_t 155 tid_alloc(void) 156 { 157 lwpid_t tid; 158 159 tid = alloc_unr(tid_unrhdr); 160 if (tid != -1) 161 return (tid); 162 mtx_lock(&tid_lock); 163 if (tid_head == tid_tail) { 164 mtx_unlock(&tid_lock); 165 return (-1); 166 } 167 tid = tid_buffer[tid_head]; 168 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 169 mtx_unlock(&tid_lock); 170 return (tid); 171 } 172 173 static void 174 tid_free(lwpid_t tid) 175 { 176 lwpid_t tmp_tid = -1; 177 178 mtx_lock(&tid_lock); 179 if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { 180 tmp_tid = tid_buffer[tid_head]; 181 tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 182 } 183 tid_buffer[tid_tail] = tid; 184 tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE; 185 mtx_unlock(&tid_lock); 186 if (tmp_tid != -1) 187 free_unr(tid_unrhdr, tmp_tid); 188 } 189 190 /* 191 * Prepare a thread for use. 192 */ 193 static int 194 thread_ctor(void *mem, int size, void *arg, int flags) 195 { 196 struct thread *td; 197 198 td = (struct thread *)mem; 199 td->td_state = TDS_INACTIVE; 200 td->td_lastcpu = td->td_oncpu = NOCPU; 201 202 td->td_tid = tid_alloc(); 203 204 /* 205 * Note that td_critnest begins life as 1 because the thread is not 206 * running and is thereby implicitly waiting to be on the receiving 207 * end of a context switch. 208 */ 209 td->td_critnest = 1; 210 td->td_lend_user_pri = PRI_MAX; 211 EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); 212 #ifdef AUDIT 213 audit_thread_alloc(td); 214 #endif 215 umtx_thread_alloc(td); 216 return (0); 217 } 218 219 /* 220 * Reclaim a thread after use. 221 */ 222 static void 223 thread_dtor(void *mem, int size, void *arg) 224 { 225 struct thread *td; 226 227 td = (struct thread *)mem; 228 229 #ifdef INVARIANTS 230 /* Verify that this thread is in a safe state to free. */ 231 switch (td->td_state) { 232 case TDS_INHIBITED: 233 case TDS_RUNNING: 234 case TDS_CAN_RUN: 235 case TDS_RUNQ: 236 /* 237 * We must never unlink a thread that is in one of 238 * these states, because it is currently active. 239 */ 240 panic("bad state for thread unlinking"); 241 /* NOTREACHED */ 242 case TDS_INACTIVE: 243 break; 244 default: 245 panic("bad thread state"); 246 /* NOTREACHED */ 247 } 248 #endif 249 #ifdef AUDIT 250 audit_thread_free(td); 251 #endif 252 /* Free all OSD associated to this thread. */ 253 osd_thread_exit(td); 254 td_softdep_cleanup(td); 255 MPASS(td->td_su == NULL); 256 257 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td); 258 tid_free(td->td_tid); 259 } 260 261 /* 262 * Initialize type-stable parts of a thread (when newly created). 263 */ 264 static int 265 thread_init(void *mem, int size, int flags) 266 { 267 struct thread *td; 268 269 td = (struct thread *)mem; 270 271 td->td_sleepqueue = sleepq_alloc(); 272 td->td_turnstile = turnstile_alloc(); 273 td->td_rlqe = NULL; 274 EVENTHANDLER_DIRECT_INVOKE(thread_init, td); 275 umtx_thread_init(td); 276 td->td_kstack = 0; 277 td->td_sel = NULL; 278 return (0); 279 } 280 281 /* 282 * Tear down type-stable parts of a thread (just before being discarded). 283 */ 284 static void 285 thread_fini(void *mem, int size) 286 { 287 struct thread *td; 288 289 td = (struct thread *)mem; 290 EVENTHANDLER_DIRECT_INVOKE(thread_fini, td); 291 rlqentry_free(td->td_rlqe); 292 turnstile_free(td->td_turnstile); 293 sleepq_free(td->td_sleepqueue); 294 umtx_thread_fini(td); 295 seltdfini(td); 296 } 297 298 /* 299 * For a newly created process, 300 * link up all the structures and its initial threads etc. 301 * called from: 302 * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. 303 * proc_dtor() (should go away) 304 * proc_init() 305 */ 306 void 307 proc_linkup0(struct proc *p, struct thread *td) 308 { 309 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 310 proc_linkup(p, td); 311 } 312 313 void 314 proc_linkup(struct proc *p, struct thread *td) 315 { 316 317 sigqueue_init(&p->p_sigqueue, p); 318 p->p_ksi = ksiginfo_alloc(1); 319 if (p->p_ksi != NULL) { 320 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 321 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 322 } 323 LIST_INIT(&p->p_mqnotifier); 324 p->p_numthreads = 0; 325 thread_link(td, p); 326 } 327 328 /* 329 * Initialize global thread allocation resources. 330 */ 331 void 332 threadinit(void) 333 { 334 335 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 336 337 /* 338 * pid_max cannot be greater than PID_MAX. 339 * leave one number for thread0. 340 */ 341 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 342 343 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 344 thread_ctor, thread_dtor, thread_init, thread_fini, 345 32 - 1, UMA_ZONE_NOFREE); 346 tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 347 rw_init(&tidhash_lock, "tidhash"); 348 } 349 350 /* 351 * Place an unused thread on the zombie list. 352 * Use the slpq as that must be unused by now. 353 */ 354 void 355 thread_zombie(struct thread *td) 356 { 357 mtx_lock_spin(&zombie_lock); 358 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 359 mtx_unlock_spin(&zombie_lock); 360 } 361 362 /* 363 * Release a thread that has exited after cpu_throw(). 364 */ 365 void 366 thread_stash(struct thread *td) 367 { 368 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 369 thread_zombie(td); 370 } 371 372 /* 373 * Reap zombie resources. 374 */ 375 void 376 thread_reap(void) 377 { 378 struct thread *td_first, *td_next; 379 380 /* 381 * Don't even bother to lock if none at this instant, 382 * we really don't care about the next instant. 383 */ 384 if (!TAILQ_EMPTY(&zombie_threads)) { 385 mtx_lock_spin(&zombie_lock); 386 td_first = TAILQ_FIRST(&zombie_threads); 387 if (td_first) 388 TAILQ_INIT(&zombie_threads); 389 mtx_unlock_spin(&zombie_lock); 390 while (td_first) { 391 td_next = TAILQ_NEXT(td_first, td_slpq); 392 thread_cow_free(td_first); 393 thread_free(td_first); 394 td_first = td_next; 395 } 396 } 397 } 398 399 /* 400 * Allocate a thread. 401 */ 402 struct thread * 403 thread_alloc(int pages) 404 { 405 struct thread *td; 406 407 thread_reap(); /* check if any zombies to get */ 408 409 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 410 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 411 if (!vm_thread_new(td, pages)) { 412 uma_zfree(thread_zone, td); 413 return (NULL); 414 } 415 cpu_thread_alloc(td); 416 return (td); 417 } 418 419 int 420 thread_alloc_stack(struct thread *td, int pages) 421 { 422 423 KASSERT(td->td_kstack == 0, 424 ("thread_alloc_stack called on a thread with kstack")); 425 if (!vm_thread_new(td, pages)) 426 return (0); 427 cpu_thread_alloc(td); 428 return (1); 429 } 430 431 /* 432 * Deallocate a thread. 433 */ 434 void 435 thread_free(struct thread *td) 436 { 437 438 lock_profile_thread_exit(td); 439 if (td->td_cpuset) 440 cpuset_rel(td->td_cpuset); 441 td->td_cpuset = NULL; 442 cpu_thread_free(td); 443 if (td->td_kstack != 0) 444 vm_thread_dispose(td); 445 callout_drain(&td->td_slpcallout); 446 uma_zfree(thread_zone, td); 447 } 448 449 void 450 thread_cow_get_proc(struct thread *newtd, struct proc *p) 451 { 452 453 PROC_LOCK_ASSERT(p, MA_OWNED); 454 newtd->td_ucred = crhold(p->p_ucred); 455 newtd->td_limit = lim_hold(p->p_limit); 456 newtd->td_cowgen = p->p_cowgen; 457 } 458 459 void 460 thread_cow_get(struct thread *newtd, struct thread *td) 461 { 462 463 newtd->td_ucred = crhold(td->td_ucred); 464 newtd->td_limit = lim_hold(td->td_limit); 465 newtd->td_cowgen = td->td_cowgen; 466 } 467 468 void 469 thread_cow_free(struct thread *td) 470 { 471 472 if (td->td_ucred != NULL) 473 crfree(td->td_ucred); 474 if (td->td_limit != NULL) 475 lim_free(td->td_limit); 476 } 477 478 void 479 thread_cow_update(struct thread *td) 480 { 481 struct proc *p; 482 struct ucred *oldcred; 483 struct plimit *oldlimit; 484 485 p = td->td_proc; 486 oldcred = NULL; 487 oldlimit = NULL; 488 PROC_LOCK(p); 489 if (td->td_ucred != p->p_ucred) { 490 oldcred = td->td_ucred; 491 td->td_ucred = crhold(p->p_ucred); 492 } 493 if (td->td_limit != p->p_limit) { 494 oldlimit = td->td_limit; 495 td->td_limit = lim_hold(p->p_limit); 496 } 497 td->td_cowgen = p->p_cowgen; 498 PROC_UNLOCK(p); 499 if (oldcred != NULL) 500 crfree(oldcred); 501 if (oldlimit != NULL) 502 lim_free(oldlimit); 503 } 504 505 /* 506 * Discard the current thread and exit from its context. 507 * Always called with scheduler locked. 508 * 509 * Because we can't free a thread while we're operating under its context, 510 * push the current thread into our CPU's deadthread holder. This means 511 * we needn't worry about someone else grabbing our context before we 512 * do a cpu_throw(). 513 */ 514 void 515 thread_exit(void) 516 { 517 uint64_t runtime, new_switchtime; 518 struct thread *td; 519 struct thread *td2; 520 struct proc *p; 521 int wakeup_swapper; 522 523 td = curthread; 524 p = td->td_proc; 525 526 PROC_SLOCK_ASSERT(p, MA_OWNED); 527 mtx_assert(&Giant, MA_NOTOWNED); 528 529 PROC_LOCK_ASSERT(p, MA_OWNED); 530 KASSERT(p != NULL, ("thread exiting without a process")); 531 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 532 (long)p->p_pid, td->td_name); 533 SDT_PROBE0(proc, , , lwp__exit); 534 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 535 536 /* 537 * drop FPU & debug register state storage, or any other 538 * architecture specific resources that 539 * would not be on a new untouched process. 540 */ 541 cpu_thread_exit(td); 542 543 /* 544 * The last thread is left attached to the process 545 * So that the whole bundle gets recycled. Skip 546 * all this stuff if we never had threads. 547 * EXIT clears all sign of other threads when 548 * it goes to single threading, so the last thread always 549 * takes the short path. 550 */ 551 if (p->p_flag & P_HADTHREADS) { 552 if (p->p_numthreads > 1) { 553 atomic_add_int(&td->td_proc->p_exitthreads, 1); 554 thread_unlink(td); 555 td2 = FIRST_THREAD_IN_PROC(p); 556 sched_exit_thread(td2, td); 557 558 /* 559 * The test below is NOT true if we are the 560 * sole exiting thread. P_STOPPED_SINGLE is unset 561 * in exit1() after it is the only survivor. 562 */ 563 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 564 if (p->p_numthreads == p->p_suspcount) { 565 thread_lock(p->p_singlethread); 566 wakeup_swapper = thread_unsuspend_one( 567 p->p_singlethread, p, false); 568 thread_unlock(p->p_singlethread); 569 if (wakeup_swapper) 570 kick_proc0(); 571 } 572 } 573 574 PCPU_SET(deadthread, td); 575 } else { 576 /* 577 * The last thread is exiting.. but not through exit() 578 */ 579 panic ("thread_exit: Last thread exiting on its own"); 580 } 581 } 582 #ifdef HWPMC_HOOKS 583 /* 584 * If this thread is part of a process that is being tracked by hwpmc(4), 585 * inform the module of the thread's impending exit. 586 */ 587 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) { 588 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 589 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL); 590 } else if (PMC_SYSTEM_SAMPLING_ACTIVE()) 591 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL); 592 #endif 593 PROC_UNLOCK(p); 594 PROC_STATLOCK(p); 595 thread_lock(td); 596 PROC_SUNLOCK(p); 597 598 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 599 new_switchtime = cpu_ticks(); 600 runtime = new_switchtime - PCPU_GET(switchtime); 601 td->td_runtime += runtime; 602 td->td_incruntime += runtime; 603 PCPU_SET(switchtime, new_switchtime); 604 PCPU_SET(switchticks, ticks); 605 VM_CNT_INC(v_swtch); 606 607 /* Save our resource usage in our process. */ 608 td->td_ru.ru_nvcsw++; 609 ruxagg(p, td); 610 rucollect(&p->p_ru, &td->td_ru); 611 PROC_STATUNLOCK(p); 612 613 td->td_state = TDS_INACTIVE; 614 #ifdef WITNESS 615 witness_thread_exit(td); 616 #endif 617 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 618 sched_throw(td); 619 panic("I'm a teapot!"); 620 /* NOTREACHED */ 621 } 622 623 /* 624 * Do any thread specific cleanups that may be needed in wait() 625 * called with Giant, proc and schedlock not held. 626 */ 627 void 628 thread_wait(struct proc *p) 629 { 630 struct thread *td; 631 632 mtx_assert(&Giant, MA_NOTOWNED); 633 KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); 634 KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); 635 td = FIRST_THREAD_IN_PROC(p); 636 /* Lock the last thread so we spin until it exits cpu_throw(). */ 637 thread_lock(td); 638 thread_unlock(td); 639 lock_profile_thread_exit(td); 640 cpuset_rel(td->td_cpuset); 641 td->td_cpuset = NULL; 642 cpu_thread_clean(td); 643 thread_cow_free(td); 644 callout_drain(&td->td_slpcallout); 645 thread_reap(); /* check for zombie threads etc. */ 646 } 647 648 /* 649 * Link a thread to a process. 650 * set up anything that needs to be initialized for it to 651 * be used by the process. 652 */ 653 void 654 thread_link(struct thread *td, struct proc *p) 655 { 656 657 /* 658 * XXX This can't be enabled because it's called for proc0 before 659 * its lock has been created. 660 * PROC_LOCK_ASSERT(p, MA_OWNED); 661 */ 662 td->td_state = TDS_INACTIVE; 663 td->td_proc = p; 664 td->td_flags = TDF_INMEM; 665 666 LIST_INIT(&td->td_contested); 667 LIST_INIT(&td->td_lprof[0]); 668 LIST_INIT(&td->td_lprof[1]); 669 #ifdef EPOCH_TRACE 670 SLIST_INIT(&td->td_epochs); 671 #endif 672 sigqueue_init(&td->td_sigqueue, p); 673 callout_init(&td->td_slpcallout, 1); 674 TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); 675 p->p_numthreads++; 676 } 677 678 /* 679 * Called from: 680 * thread_exit() 681 */ 682 void 683 thread_unlink(struct thread *td) 684 { 685 struct proc *p = td->td_proc; 686 687 PROC_LOCK_ASSERT(p, MA_OWNED); 688 #ifdef EPOCH_TRACE 689 MPASS(SLIST_EMPTY(&td->td_epochs)); 690 #endif 691 692 TAILQ_REMOVE(&p->p_threads, td, td_plist); 693 p->p_numthreads--; 694 /* could clear a few other things here */ 695 /* Must NOT clear links to proc! */ 696 } 697 698 static int 699 calc_remaining(struct proc *p, int mode) 700 { 701 int remaining; 702 703 PROC_LOCK_ASSERT(p, MA_OWNED); 704 PROC_SLOCK_ASSERT(p, MA_OWNED); 705 if (mode == SINGLE_EXIT) 706 remaining = p->p_numthreads; 707 else if (mode == SINGLE_BOUNDARY) 708 remaining = p->p_numthreads - p->p_boundary_count; 709 else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) 710 remaining = p->p_numthreads - p->p_suspcount; 711 else 712 panic("calc_remaining: wrong mode %d", mode); 713 return (remaining); 714 } 715 716 static int 717 remain_for_mode(int mode) 718 { 719 720 return (mode == SINGLE_ALLPROC ? 0 : 1); 721 } 722 723 static int 724 weed_inhib(int mode, struct thread *td2, struct proc *p) 725 { 726 int wakeup_swapper; 727 728 PROC_LOCK_ASSERT(p, MA_OWNED); 729 PROC_SLOCK_ASSERT(p, MA_OWNED); 730 THREAD_LOCK_ASSERT(td2, MA_OWNED); 731 732 wakeup_swapper = 0; 733 switch (mode) { 734 case SINGLE_EXIT: 735 if (TD_IS_SUSPENDED(td2)) 736 wakeup_swapper |= thread_unsuspend_one(td2, p, true); 737 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 738 wakeup_swapper |= sleepq_abort(td2, EINTR); 739 break; 740 case SINGLE_BOUNDARY: 741 case SINGLE_NO_EXIT: 742 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) 743 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 744 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) 745 wakeup_swapper |= sleepq_abort(td2, ERESTART); 746 break; 747 case SINGLE_ALLPROC: 748 /* 749 * ALLPROC suspend tries to avoid spurious EINTR for 750 * threads sleeping interruptable, by suspending the 751 * thread directly, similarly to sig_suspend_threads(). 752 * Since such sleep is not performed at the user 753 * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP 754 * is used to avoid immediate un-suspend. 755 */ 756 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY | 757 TDF_ALLPROCSUSP)) == 0) 758 wakeup_swapper |= thread_unsuspend_one(td2, p, false); 759 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) { 760 if ((td2->td_flags & TDF_SBDRY) == 0) { 761 thread_suspend_one(td2); 762 td2->td_flags |= TDF_ALLPROCSUSP; 763 } else { 764 wakeup_swapper |= sleepq_abort(td2, ERESTART); 765 } 766 } 767 break; 768 } 769 return (wakeup_swapper); 770 } 771 772 /* 773 * Enforce single-threading. 774 * 775 * Returns 1 if the caller must abort (another thread is waiting to 776 * exit the process or similar). Process is locked! 777 * Returns 0 when you are successfully the only thread running. 778 * A process has successfully single threaded in the suspend mode when 779 * There are no threads in user mode. Threads in the kernel must be 780 * allowed to continue until they get to the user boundary. They may even 781 * copy out their return values and data before suspending. They may however be 782 * accelerated in reaching the user boundary as we will wake up 783 * any sleeping threads that are interruptable. (PCATCH). 784 */ 785 int 786 thread_single(struct proc *p, int mode) 787 { 788 struct thread *td; 789 struct thread *td2; 790 int remaining, wakeup_swapper; 791 792 td = curthread; 793 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 794 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 795 ("invalid mode %d", mode)); 796 /* 797 * If allowing non-ALLPROC singlethreading for non-curproc 798 * callers, calc_remaining() and remain_for_mode() should be 799 * adjusted to also account for td->td_proc != p. For now 800 * this is not implemented because it is not used. 801 */ 802 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || 803 (mode != SINGLE_ALLPROC && td->td_proc == p), 804 ("mode %d proc %p curproc %p", mode, p, td->td_proc)); 805 mtx_assert(&Giant, MA_NOTOWNED); 806 PROC_LOCK_ASSERT(p, MA_OWNED); 807 808 if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC) 809 return (0); 810 811 /* Is someone already single threading? */ 812 if (p->p_singlethread != NULL && p->p_singlethread != td) 813 return (1); 814 815 if (mode == SINGLE_EXIT) { 816 p->p_flag |= P_SINGLE_EXIT; 817 p->p_flag &= ~P_SINGLE_BOUNDARY; 818 } else { 819 p->p_flag &= ~P_SINGLE_EXIT; 820 if (mode == SINGLE_BOUNDARY) 821 p->p_flag |= P_SINGLE_BOUNDARY; 822 else 823 p->p_flag &= ~P_SINGLE_BOUNDARY; 824 } 825 if (mode == SINGLE_ALLPROC) 826 p->p_flag |= P_TOTAL_STOP; 827 p->p_flag |= P_STOPPED_SINGLE; 828 PROC_SLOCK(p); 829 p->p_singlethread = td; 830 remaining = calc_remaining(p, mode); 831 while (remaining != remain_for_mode(mode)) { 832 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 833 goto stopme; 834 wakeup_swapper = 0; 835 FOREACH_THREAD_IN_PROC(p, td2) { 836 if (td2 == td) 837 continue; 838 thread_lock(td2); 839 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 840 if (TD_IS_INHIBITED(td2)) { 841 wakeup_swapper |= weed_inhib(mode, td2, p); 842 #ifdef SMP 843 } else if (TD_IS_RUNNING(td2) && td != td2) { 844 forward_signal(td2); 845 #endif 846 } 847 thread_unlock(td2); 848 } 849 if (wakeup_swapper) 850 kick_proc0(); 851 remaining = calc_remaining(p, mode); 852 853 /* 854 * Maybe we suspended some threads.. was it enough? 855 */ 856 if (remaining == remain_for_mode(mode)) 857 break; 858 859 stopme: 860 /* 861 * Wake us up when everyone else has suspended. 862 * In the mean time we suspend as well. 863 */ 864 thread_suspend_switch(td, p); 865 remaining = calc_remaining(p, mode); 866 } 867 if (mode == SINGLE_EXIT) { 868 /* 869 * Convert the process to an unthreaded process. The 870 * SINGLE_EXIT is called by exit1() or execve(), in 871 * both cases other threads must be retired. 872 */ 873 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); 874 p->p_singlethread = NULL; 875 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); 876 877 /* 878 * Wait for any remaining threads to exit cpu_throw(). 879 */ 880 while (p->p_exitthreads != 0) { 881 PROC_SUNLOCK(p); 882 PROC_UNLOCK(p); 883 sched_relinquish(td); 884 PROC_LOCK(p); 885 PROC_SLOCK(p); 886 } 887 } else if (mode == SINGLE_BOUNDARY) { 888 /* 889 * Wait until all suspended threads are removed from 890 * the processors. The thread_suspend_check() 891 * increments p_boundary_count while it is still 892 * running, which makes it possible for the execve() 893 * to destroy vmspace while our other threads are 894 * still using the address space. 895 * 896 * We lock the thread, which is only allowed to 897 * succeed after context switch code finished using 898 * the address space. 899 */ 900 FOREACH_THREAD_IN_PROC(p, td2) { 901 if (td2 == td) 902 continue; 903 thread_lock(td2); 904 KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, 905 ("td %p not on boundary", td2)); 906 KASSERT(TD_IS_SUSPENDED(td2), 907 ("td %p is not suspended", td2)); 908 thread_unlock(td2); 909 } 910 } 911 PROC_SUNLOCK(p); 912 return (0); 913 } 914 915 bool 916 thread_suspend_check_needed(void) 917 { 918 struct proc *p; 919 struct thread *td; 920 921 td = curthread; 922 p = td->td_proc; 923 PROC_LOCK_ASSERT(p, MA_OWNED); 924 return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && 925 (td->td_dbgflags & TDB_SUSPEND) != 0)); 926 } 927 928 /* 929 * Called in from locations that can safely check to see 930 * whether we have to suspend or at least throttle for a 931 * single-thread event (e.g. fork). 932 * 933 * Such locations include userret(). 934 * If the "return_instead" argument is non zero, the thread must be able to 935 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 936 * 937 * The 'return_instead' argument tells the function if it may do a 938 * thread_exit() or suspend, or whether the caller must abort and back 939 * out instead. 940 * 941 * If the thread that set the single_threading request has set the 942 * P_SINGLE_EXIT bit in the process flags then this call will never return 943 * if 'return_instead' is false, but will exit. 944 * 945 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 946 *---------------+--------------------+--------------------- 947 * 0 | returns 0 | returns 0 or 1 948 * | when ST ends | immediately 949 *---------------+--------------------+--------------------- 950 * 1 | thread exits | returns 1 951 * | | immediately 952 * 0 = thread_exit() or suspension ok, 953 * other = return error instead of stopping the thread. 954 * 955 * While a full suspension is under effect, even a single threading 956 * thread would be suspended if it made this call (but it shouldn't). 957 * This call should only be made from places where 958 * thread_exit() would be safe as that may be the outcome unless 959 * return_instead is set. 960 */ 961 int 962 thread_suspend_check(int return_instead) 963 { 964 struct thread *td; 965 struct proc *p; 966 int wakeup_swapper; 967 968 td = curthread; 969 p = td->td_proc; 970 mtx_assert(&Giant, MA_NOTOWNED); 971 PROC_LOCK_ASSERT(p, MA_OWNED); 972 while (thread_suspend_check_needed()) { 973 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 974 KASSERT(p->p_singlethread != NULL, 975 ("singlethread not set")); 976 /* 977 * The only suspension in action is a 978 * single-threading. Single threader need not stop. 979 * It is safe to access p->p_singlethread unlocked 980 * because it can only be set to our address by us. 981 */ 982 if (p->p_singlethread == td) 983 return (0); /* Exempt from stopping. */ 984 } 985 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 986 return (EINTR); 987 988 /* Should we goto user boundary if we didn't come from there? */ 989 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 990 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 991 return (ERESTART); 992 993 /* 994 * Ignore suspend requests if they are deferred. 995 */ 996 if ((td->td_flags & TDF_SBDRY) != 0) { 997 KASSERT(return_instead, 998 ("TDF_SBDRY set for unsafe thread_suspend_check")); 999 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 1000 (TDF_SEINTR | TDF_SERESTART), 1001 ("both TDF_SEINTR and TDF_SERESTART")); 1002 return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0); 1003 } 1004 1005 /* 1006 * If the process is waiting for us to exit, 1007 * this thread should just suicide. 1008 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1009 */ 1010 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1011 PROC_UNLOCK(p); 1012 1013 /* 1014 * Allow Linux emulation layer to do some work 1015 * before thread suicide. 1016 */ 1017 if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) 1018 (p->p_sysent->sv_thread_detach)(td); 1019 umtx_thread_exit(td); 1020 kern_thr_exit(td); 1021 panic("stopped thread did not exit"); 1022 } 1023 1024 PROC_SLOCK(p); 1025 thread_stopped(p); 1026 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1027 if (p->p_numthreads == p->p_suspcount + 1) { 1028 thread_lock(p->p_singlethread); 1029 wakeup_swapper = thread_unsuspend_one( 1030 p->p_singlethread, p, false); 1031 thread_unlock(p->p_singlethread); 1032 if (wakeup_swapper) 1033 kick_proc0(); 1034 } 1035 } 1036 PROC_UNLOCK(p); 1037 thread_lock(td); 1038 /* 1039 * When a thread suspends, it just 1040 * gets taken off all queues. 1041 */ 1042 thread_suspend_one(td); 1043 if (return_instead == 0) { 1044 p->p_boundary_count++; 1045 td->td_flags |= TDF_BOUNDARY; 1046 } 1047 PROC_SUNLOCK(p); 1048 mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 1049 thread_unlock(td); 1050 PROC_LOCK(p); 1051 } 1052 return (0); 1053 } 1054 1055 void 1056 thread_suspend_switch(struct thread *td, struct proc *p) 1057 { 1058 1059 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1060 PROC_LOCK_ASSERT(p, MA_OWNED); 1061 PROC_SLOCK_ASSERT(p, MA_OWNED); 1062 /* 1063 * We implement thread_suspend_one in stages here to avoid 1064 * dropping the proc lock while the thread lock is owned. 1065 */ 1066 if (p == td->td_proc) { 1067 thread_stopped(p); 1068 p->p_suspcount++; 1069 } 1070 PROC_UNLOCK(p); 1071 thread_lock(td); 1072 td->td_flags &= ~TDF_NEEDSUSPCHK; 1073 TD_SET_SUSPENDED(td); 1074 sched_sleep(td, 0); 1075 PROC_SUNLOCK(p); 1076 DROP_GIANT(); 1077 mi_switch(SW_VOL | SWT_SUSPEND, NULL); 1078 thread_unlock(td); 1079 PICKUP_GIANT(); 1080 PROC_LOCK(p); 1081 PROC_SLOCK(p); 1082 } 1083 1084 void 1085 thread_suspend_one(struct thread *td) 1086 { 1087 struct proc *p; 1088 1089 p = td->td_proc; 1090 PROC_SLOCK_ASSERT(p, MA_OWNED); 1091 THREAD_LOCK_ASSERT(td, MA_OWNED); 1092 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1093 p->p_suspcount++; 1094 td->td_flags &= ~TDF_NEEDSUSPCHK; 1095 TD_SET_SUSPENDED(td); 1096 sched_sleep(td, 0); 1097 } 1098 1099 static int 1100 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) 1101 { 1102 1103 THREAD_LOCK_ASSERT(td, MA_OWNED); 1104 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 1105 TD_CLR_SUSPENDED(td); 1106 td->td_flags &= ~TDF_ALLPROCSUSP; 1107 if (td->td_proc == p) { 1108 PROC_SLOCK_ASSERT(p, MA_OWNED); 1109 p->p_suspcount--; 1110 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { 1111 td->td_flags &= ~TDF_BOUNDARY; 1112 p->p_boundary_count--; 1113 } 1114 } 1115 return (setrunnable(td)); 1116 } 1117 1118 /* 1119 * Allow all threads blocked by single threading to continue running. 1120 */ 1121 void 1122 thread_unsuspend(struct proc *p) 1123 { 1124 struct thread *td; 1125 int wakeup_swapper; 1126 1127 PROC_LOCK_ASSERT(p, MA_OWNED); 1128 PROC_SLOCK_ASSERT(p, MA_OWNED); 1129 wakeup_swapper = 0; 1130 if (!P_SHOULDSTOP(p)) { 1131 FOREACH_THREAD_IN_PROC(p, td) { 1132 thread_lock(td); 1133 if (TD_IS_SUSPENDED(td)) { 1134 wakeup_swapper |= thread_unsuspend_one(td, p, 1135 true); 1136 } 1137 thread_unlock(td); 1138 } 1139 } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 1140 p->p_numthreads == p->p_suspcount) { 1141 /* 1142 * Stopping everything also did the job for the single 1143 * threading request. Now we've downgraded to single-threaded, 1144 * let it continue. 1145 */ 1146 if (p->p_singlethread->td_proc == p) { 1147 thread_lock(p->p_singlethread); 1148 wakeup_swapper = thread_unsuspend_one( 1149 p->p_singlethread, p, false); 1150 thread_unlock(p->p_singlethread); 1151 } 1152 } 1153 if (wakeup_swapper) 1154 kick_proc0(); 1155 } 1156 1157 /* 1158 * End the single threading mode.. 1159 */ 1160 void 1161 thread_single_end(struct proc *p, int mode) 1162 { 1163 struct thread *td; 1164 int wakeup_swapper; 1165 1166 KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || 1167 mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, 1168 ("invalid mode %d", mode)); 1169 PROC_LOCK_ASSERT(p, MA_OWNED); 1170 KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || 1171 (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), 1172 ("mode %d does not match P_TOTAL_STOP", mode)); 1173 KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, 1174 ("thread_single_end from other thread %p %p", 1175 curthread, p->p_singlethread)); 1176 KASSERT(mode != SINGLE_BOUNDARY || 1177 (p->p_flag & P_SINGLE_BOUNDARY) != 0, 1178 ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); 1179 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | 1180 P_TOTAL_STOP); 1181 PROC_SLOCK(p); 1182 p->p_singlethread = NULL; 1183 wakeup_swapper = 0; 1184 /* 1185 * If there are other threads they may now run, 1186 * unless of course there is a blanket 'stop order' 1187 * on the process. The single threader must be allowed 1188 * to continue however as this is a bad place to stop. 1189 */ 1190 if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { 1191 FOREACH_THREAD_IN_PROC(p, td) { 1192 thread_lock(td); 1193 if (TD_IS_SUSPENDED(td)) { 1194 wakeup_swapper |= thread_unsuspend_one(td, p, 1195 mode == SINGLE_BOUNDARY); 1196 } 1197 thread_unlock(td); 1198 } 1199 } 1200 KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, 1201 ("inconsistent boundary count %d", p->p_boundary_count)); 1202 PROC_SUNLOCK(p); 1203 if (wakeup_swapper) 1204 kick_proc0(); 1205 } 1206 1207 struct thread * 1208 thread_find(struct proc *p, lwpid_t tid) 1209 { 1210 struct thread *td; 1211 1212 PROC_LOCK_ASSERT(p, MA_OWNED); 1213 FOREACH_THREAD_IN_PROC(p, td) { 1214 if (td->td_tid == tid) 1215 break; 1216 } 1217 return (td); 1218 } 1219 1220 /* Locate a thread by number; return with proc lock held. */ 1221 struct thread * 1222 tdfind(lwpid_t tid, pid_t pid) 1223 { 1224 #define RUN_THRESH 16 1225 struct thread *td; 1226 int run = 0; 1227 1228 rw_rlock(&tidhash_lock); 1229 LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1230 if (td->td_tid == tid) { 1231 if (pid != -1 && td->td_proc->p_pid != pid) { 1232 td = NULL; 1233 break; 1234 } 1235 PROC_LOCK(td->td_proc); 1236 if (td->td_proc->p_state == PRS_NEW) { 1237 PROC_UNLOCK(td->td_proc); 1238 td = NULL; 1239 break; 1240 } 1241 if (run > RUN_THRESH) { 1242 if (rw_try_upgrade(&tidhash_lock)) { 1243 LIST_REMOVE(td, td_hash); 1244 LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1245 td, td_hash); 1246 rw_wunlock(&tidhash_lock); 1247 return (td); 1248 } 1249 } 1250 break; 1251 } 1252 run++; 1253 } 1254 rw_runlock(&tidhash_lock); 1255 return (td); 1256 } 1257 1258 void 1259 tidhash_add(struct thread *td) 1260 { 1261 rw_wlock(&tidhash_lock); 1262 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1263 rw_wunlock(&tidhash_lock); 1264 } 1265 1266 void 1267 tidhash_remove(struct thread *td) 1268 { 1269 rw_wlock(&tidhash_lock); 1270 LIST_REMOVE(td, td_hash); 1271 rw_wunlock(&tidhash_lock); 1272 } 1273