1 /* 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/mutex.h> 37 #include <sys/proc.h> 38 #include <sys/smp.h> 39 #include <sys/sysctl.h> 40 #include <sys/sysproto.h> 41 #include <sys/filedesc.h> 42 #include <sys/sched.h> 43 #include <sys/signalvar.h> 44 #include <sys/sx.h> 45 #include <sys/tty.h> 46 #include <sys/user.h> 47 #include <sys/jail.h> 48 #include <sys/kse.h> 49 #include <sys/ktr.h> 50 #include <sys/ucontext.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_object.h> 54 #include <vm/pmap.h> 55 #include <vm/uma.h> 56 #include <vm/vm_map.h> 57 58 #include <machine/frame.h> 59 60 /* 61 * KSEGRP related storage. 62 */ 63 static uma_zone_t ksegrp_zone; 64 static uma_zone_t kse_zone; 65 static uma_zone_t thread_zone; 66 static uma_zone_t upcall_zone; 67 68 /* DEBUG ONLY */ 69 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 70 static int thread_debug = 0; 71 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 72 &thread_debug, 0, "thread debug"); 73 74 static int max_threads_per_proc = 30; 75 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 76 &max_threads_per_proc, 0, "Limit on threads per proc"); 77 78 static int max_groups_per_proc = 5; 79 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 80 &max_groups_per_proc, 0, "Limit on thread groups per proc"); 81 82 static int max_threads_hits; 83 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 84 &max_threads_hits, 0, ""); 85 86 static int virtual_cpu; 87 88 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 89 90 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 91 TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); 92 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 93 TAILQ_HEAD(, kse_upcall) zombie_upcalls = 94 TAILQ_HEAD_INITIALIZER(zombie_upcalls); 95 struct mtx kse_zombie_lock; 96 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 97 98 static void kse_purge(struct proc *p, struct thread *td); 99 static void kse_purge_group(struct thread *td); 100 static int thread_update_usr_ticks(struct thread *td, int user); 101 static void thread_alloc_spare(struct thread *td, struct thread *spare); 102 103 static int 104 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 105 { 106 int error, new_val; 107 int def_val; 108 109 #ifdef SMP 110 def_val = mp_ncpus; 111 #else 112 def_val = 1; 113 #endif 114 if (virtual_cpu == 0) 115 new_val = def_val; 116 else 117 new_val = virtual_cpu; 118 error = sysctl_handle_int(oidp, &new_val, 0, req); 119 if (error != 0 || req->newptr == NULL) 120 return (error); 121 if (new_val < 0) 122 return (EINVAL); 123 virtual_cpu = new_val; 124 return (0); 125 } 126 127 /* DEBUG ONLY */ 128 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 129 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 130 "debug virtual cpus"); 131 132 /* 133 * Prepare a thread for use. 134 */ 135 static void 136 thread_ctor(void *mem, int size, void *arg) 137 { 138 struct thread *td; 139 140 td = (struct thread *)mem; 141 td->td_state = TDS_INACTIVE; 142 } 143 144 /* 145 * Reclaim a thread after use. 146 */ 147 static void 148 thread_dtor(void *mem, int size, void *arg) 149 { 150 struct thread *td; 151 152 td = (struct thread *)mem; 153 154 #ifdef INVARIANTS 155 /* Verify that this thread is in a safe state to free. */ 156 switch (td->td_state) { 157 case TDS_INHIBITED: 158 case TDS_RUNNING: 159 case TDS_CAN_RUN: 160 case TDS_RUNQ: 161 /* 162 * We must never unlink a thread that is in one of 163 * these states, because it is currently active. 164 */ 165 panic("bad state for thread unlinking"); 166 /* NOTREACHED */ 167 case TDS_INACTIVE: 168 break; 169 default: 170 panic("bad thread state"); 171 /* NOTREACHED */ 172 } 173 #endif 174 } 175 176 /* 177 * Initialize type-stable parts of a thread (when newly created). 178 */ 179 static void 180 thread_init(void *mem, int size) 181 { 182 struct thread *td; 183 184 td = (struct thread *)mem; 185 mtx_lock(&Giant); 186 pmap_new_thread(td, 0); 187 mtx_unlock(&Giant); 188 cpu_thread_setup(td); 189 td->td_sched = (struct td_sched *)&td[1]; 190 } 191 192 /* 193 * Tear down type-stable parts of a thread (just before being discarded). 194 */ 195 static void 196 thread_fini(void *mem, int size) 197 { 198 struct thread *td; 199 200 td = (struct thread *)mem; 201 pmap_dispose_thread(td); 202 } 203 204 /* 205 * Initialize type-stable parts of a kse (when newly created). 206 */ 207 static void 208 kse_init(void *mem, int size) 209 { 210 struct kse *ke; 211 212 ke = (struct kse *)mem; 213 ke->ke_sched = (struct ke_sched *)&ke[1]; 214 } 215 216 /* 217 * Initialize type-stable parts of a ksegrp (when newly created). 218 */ 219 static void 220 ksegrp_init(void *mem, int size) 221 { 222 struct ksegrp *kg; 223 224 kg = (struct ksegrp *)mem; 225 kg->kg_sched = (struct kg_sched *)&kg[1]; 226 } 227 228 /* 229 * KSE is linked into kse group. 230 */ 231 void 232 kse_link(struct kse *ke, struct ksegrp *kg) 233 { 234 struct proc *p = kg->kg_proc; 235 236 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); 237 kg->kg_kses++; 238 ke->ke_state = KES_UNQUEUED; 239 ke->ke_proc = p; 240 ke->ke_ksegrp = kg; 241 ke->ke_thread = NULL; 242 ke->ke_oncpu = NOCPU; 243 ke->ke_flags = 0; 244 } 245 246 void 247 kse_unlink(struct kse *ke) 248 { 249 struct ksegrp *kg; 250 251 mtx_assert(&sched_lock, MA_OWNED); 252 kg = ke->ke_ksegrp; 253 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 254 if (ke->ke_state == KES_IDLE) { 255 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 256 kg->kg_idle_kses--; 257 } 258 if (--kg->kg_kses == 0) 259 ksegrp_unlink(kg); 260 /* 261 * Aggregate stats from the KSE 262 */ 263 kse_stash(ke); 264 } 265 266 void 267 ksegrp_link(struct ksegrp *kg, struct proc *p) 268 { 269 270 TAILQ_INIT(&kg->kg_threads); 271 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 272 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ 273 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ 274 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ 275 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 276 kg->kg_proc = p; 277 /* 278 * the following counters are in the -zero- section 279 * and may not need clearing 280 */ 281 kg->kg_numthreads = 0; 282 kg->kg_runnable = 0; 283 kg->kg_kses = 0; 284 kg->kg_runq_kses = 0; /* XXXKSE change name */ 285 kg->kg_idle_kses = 0; 286 kg->kg_numupcalls = 0; 287 /* link it in now that it's consistent */ 288 p->p_numksegrps++; 289 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 290 } 291 292 void 293 ksegrp_unlink(struct ksegrp *kg) 294 { 295 struct proc *p; 296 297 mtx_assert(&sched_lock, MA_OWNED); 298 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 299 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); 300 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 301 302 p = kg->kg_proc; 303 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 304 p->p_numksegrps--; 305 /* 306 * Aggregate stats from the KSE 307 */ 308 ksegrp_stash(kg); 309 } 310 311 struct kse_upcall * 312 upcall_alloc(void) 313 { 314 struct kse_upcall *ku; 315 316 ku = uma_zalloc(upcall_zone, M_WAITOK); 317 bzero(ku, sizeof(*ku)); 318 return (ku); 319 } 320 321 void 322 upcall_free(struct kse_upcall *ku) 323 { 324 325 uma_zfree(upcall_zone, ku); 326 } 327 328 void 329 upcall_link(struct kse_upcall *ku, struct ksegrp *kg) 330 { 331 332 mtx_assert(&sched_lock, MA_OWNED); 333 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); 334 ku->ku_ksegrp = kg; 335 kg->kg_numupcalls++; 336 } 337 338 void 339 upcall_unlink(struct kse_upcall *ku) 340 { 341 struct ksegrp *kg = ku->ku_ksegrp; 342 343 mtx_assert(&sched_lock, MA_OWNED); 344 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); 345 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); 346 kg->kg_numupcalls--; 347 upcall_stash(ku); 348 } 349 350 void 351 upcall_remove(struct thread *td) 352 { 353 354 if (td->td_upcall) { 355 td->td_upcall->ku_owner = NULL; 356 upcall_unlink(td->td_upcall); 357 td->td_upcall = 0; 358 } 359 } 360 361 /* 362 * For a newly created process, 363 * link up all the structures and its initial threads etc. 364 */ 365 void 366 proc_linkup(struct proc *p, struct ksegrp *kg, 367 struct kse *ke, struct thread *td) 368 { 369 370 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 371 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 372 TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 373 p->p_numksegrps = 0; 374 p->p_numthreads = 0; 375 376 ksegrp_link(kg, p); 377 kse_link(ke, kg); 378 thread_link(td, kg); 379 } 380 381 /* 382 struct kse_thr_interrupt_args { 383 struct kse_thr_mailbox * tmbx; 384 }; 385 */ 386 int 387 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) 388 { 389 struct proc *p; 390 struct thread *td2; 391 392 p = td->td_proc; 393 if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL)) 394 return (EINVAL); 395 mtx_lock_spin(&sched_lock); 396 FOREACH_THREAD_IN_PROC(p, td2) { 397 if (td2->td_mailbox == uap->tmbx) { 398 td2->td_flags |= TDF_INTERRUPT; 399 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { 400 if (td2->td_flags & TDF_CVWAITQ) 401 cv_abort(td2); 402 else 403 abortsleep(td2); 404 } 405 mtx_unlock_spin(&sched_lock); 406 return (0); 407 } 408 } 409 mtx_unlock_spin(&sched_lock); 410 return (ESRCH); 411 } 412 413 /* 414 struct kse_exit_args { 415 register_t dummy; 416 }; 417 */ 418 int 419 kse_exit(struct thread *td, struct kse_exit_args *uap) 420 { 421 struct proc *p; 422 struct ksegrp *kg; 423 struct kse *ke; 424 425 p = td->td_proc; 426 /* 427 * Only UTS can call the syscall and current group 428 * should be a threaded group. 429 */ 430 if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) 431 return (EINVAL); 432 KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); 433 434 kg = td->td_ksegrp; 435 /* Serialize removing upcall */ 436 PROC_LOCK(p); 437 mtx_lock_spin(&sched_lock); 438 if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) { 439 mtx_unlock_spin(&sched_lock); 440 PROC_UNLOCK(p); 441 return (EDEADLK); 442 } 443 ke = td->td_kse; 444 upcall_remove(td); 445 if (p->p_numthreads == 1) { 446 kse_purge(p, td); 447 p->p_flag &= ~P_THREADED; 448 mtx_unlock_spin(&sched_lock); 449 PROC_UNLOCK(p); 450 } else { 451 if (kg->kg_numthreads == 1) { /* Shutdown a group */ 452 kse_purge_group(td); 453 ke->ke_flags |= KEF_EXIT; 454 } 455 thread_stopped(p); 456 thread_exit(); 457 /* NOTREACHED */ 458 } 459 return (0); 460 } 461 462 /* 463 * Either becomes an upcall or waits for an awakening event and 464 * then becomes an upcall. Only error cases return. 465 */ 466 /* 467 struct kse_release_args { 468 struct timespec *timeout; 469 }; 470 */ 471 int 472 kse_release(struct thread *td, struct kse_release_args *uap) 473 { 474 struct proc *p; 475 struct ksegrp *kg; 476 struct timespec ts, ts2, ts3, timeout; 477 struct timeval tv; 478 int error; 479 480 p = td->td_proc; 481 kg = td->td_ksegrp; 482 /* 483 * Only UTS can call the syscall and current group 484 * should be a threaded group. 485 */ 486 if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) 487 return (EINVAL); 488 KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); 489 if (uap->timeout != NULL) { 490 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) 491 return (error); 492 getnanouptime(&ts); 493 timespecadd(&ts, &timeout); 494 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 495 } 496 mtx_lock_spin(&sched_lock); 497 /* Change OURSELF to become an upcall. */ 498 td->td_flags = TDF_UPCALLING; 499 #if 0 /* XXX This shouldn't be necessary */ 500 if (p->p_sflag & PS_NEEDSIGCHK) 501 td->td_flags |= TDF_ASTPENDING; 502 #endif 503 mtx_unlock_spin(&sched_lock); 504 PROC_LOCK(p); 505 while ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 && 506 (kg->kg_completed == NULL)) { 507 kg->kg_upsleeps++; 508 error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, 509 "kse_rel", (uap->timeout ? tvtohz(&tv) : 0)); 510 kg->kg_upsleeps--; 511 PROC_UNLOCK(p); 512 if (uap->timeout == NULL || error != EWOULDBLOCK) 513 return (0); 514 getnanouptime(&ts2); 515 if (timespeccmp(&ts2, &ts, >=)) 516 return (0); 517 ts3 = ts; 518 timespecsub(&ts3, &ts2); 519 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 520 PROC_LOCK(p); 521 } 522 PROC_UNLOCK(p); 523 return (0); 524 } 525 526 /* struct kse_wakeup_args { 527 struct kse_mailbox *mbx; 528 }; */ 529 int 530 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) 531 { 532 struct proc *p; 533 struct ksegrp *kg; 534 struct kse_upcall *ku; 535 struct thread *td2; 536 537 p = td->td_proc; 538 td2 = NULL; 539 ku = NULL; 540 /* KSE-enabled processes only, please. */ 541 if (!(p->p_flag & P_THREADED)) 542 return (EINVAL); 543 PROC_LOCK(p); 544 mtx_lock_spin(&sched_lock); 545 if (uap->mbx) { 546 FOREACH_KSEGRP_IN_PROC(p, kg) { 547 FOREACH_UPCALL_IN_GROUP(kg, ku) { 548 if (ku->ku_mailbox == uap->mbx) 549 break; 550 } 551 if (ku) 552 break; 553 } 554 } else { 555 kg = td->td_ksegrp; 556 if (kg->kg_upsleeps) { 557 wakeup_one(&kg->kg_completed); 558 mtx_unlock_spin(&sched_lock); 559 PROC_UNLOCK(p); 560 return (0); 561 } 562 ku = TAILQ_FIRST(&kg->kg_upcalls); 563 } 564 if (ku) { 565 if ((td2 = ku->ku_owner) == NULL) { 566 panic("%s: no owner", __func__); 567 } else if (TD_ON_SLEEPQ(td2) && 568 (td2->td_wchan == &kg->kg_completed)) { 569 abortsleep(td2); 570 } else { 571 ku->ku_flags |= KUF_DOUPCALL; 572 } 573 mtx_unlock_spin(&sched_lock); 574 PROC_UNLOCK(p); 575 return (0); 576 } 577 mtx_unlock_spin(&sched_lock); 578 PROC_UNLOCK(p); 579 return (ESRCH); 580 } 581 582 /* 583 * No new KSEG: first call: use current KSE, don't schedule an upcall 584 * All other situations, do allocate max new KSEs and schedule an upcall. 585 */ 586 /* struct kse_create_args { 587 struct kse_mailbox *mbx; 588 int newgroup; 589 }; */ 590 int 591 kse_create(struct thread *td, struct kse_create_args *uap) 592 { 593 struct kse *newke; 594 struct ksegrp *newkg; 595 struct ksegrp *kg; 596 struct proc *p; 597 struct kse_mailbox mbx; 598 struct kse_upcall *newku; 599 int err, ncpus; 600 601 p = td->td_proc; 602 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) 603 return (err); 604 605 /* Too bad, why hasn't kernel always a cpu counter !? */ 606 #ifdef SMP 607 ncpus = mp_ncpus; 608 #else 609 ncpus = 1; 610 #endif 611 if (thread_debug && virtual_cpu != 0) 612 ncpus = virtual_cpu; 613 614 /* Easier to just set it than to test and set */ 615 PROC_LOCK(p); 616 p->p_flag |= P_THREADED; 617 PROC_UNLOCK(p); 618 kg = td->td_ksegrp; 619 if (uap->newgroup) { 620 /* Have race condition but it is cheap */ 621 if (p->p_numksegrps >= max_groups_per_proc) 622 return (EPROCLIM); 623 /* 624 * If we want a new KSEGRP it doesn't matter whether 625 * we have already fired up KSE mode before or not. 626 * We put the process in KSE mode and create a new KSEGRP. 627 */ 628 newkg = ksegrp_alloc(); 629 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, 630 kg_startzero, kg_endzero)); 631 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 632 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 633 mtx_lock_spin(&sched_lock); 634 if (p->p_numksegrps >= max_groups_per_proc) { 635 mtx_unlock_spin(&sched_lock); 636 ksegrp_free(newkg); 637 return (EPROCLIM); 638 } 639 ksegrp_link(newkg, p); 640 mtx_unlock_spin(&sched_lock); 641 } else { 642 newkg = kg; 643 } 644 645 /* 646 * Creating upcalls more than number of physical cpu does 647 * not help performance. 648 */ 649 if (newkg->kg_numupcalls >= ncpus) 650 return (EPROCLIM); 651 652 if (newkg->kg_numupcalls == 0) { 653 /* 654 * Initialize KSE group, optimized for MP. 655 * Create KSEs as many as physical cpus, this increases 656 * concurrent even if userland is not MP safe and can only run 657 * on single CPU (for early version of libpthread, it is true). 658 * In ideal world, every physical cpu should execute a thread. 659 * If there is enough KSEs, threads in kernel can be 660 * executed parallel on different cpus with full speed, 661 * Concurrent in kernel shouldn't be restricted by number of 662 * upcalls userland provides. 663 * Adding more upcall structures only increases concurrent 664 * in userland. 665 * Highest performance configuration is: 666 * N kses = N upcalls = N phyiscal cpus 667 */ 668 while (newkg->kg_kses < ncpus) { 669 newke = kse_alloc(); 670 bzero(&newke->ke_startzero, RANGEOF(struct kse, 671 ke_startzero, ke_endzero)); 672 #if 0 673 mtx_lock_spin(&sched_lock); 674 bcopy(&ke->ke_startcopy, &newke->ke_startcopy, 675 RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 676 mtx_unlock_spin(&sched_lock); 677 #endif 678 mtx_lock_spin(&sched_lock); 679 kse_link(newke, newkg); 680 /* Add engine */ 681 kse_reassign(newke); 682 mtx_unlock_spin(&sched_lock); 683 } 684 } 685 newku = upcall_alloc(); 686 newku->ku_mailbox = uap->mbx; 687 newku->ku_func = mbx.km_func; 688 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); 689 690 /* For the first call this may not have been set */ 691 if (td->td_standin == NULL) 692 thread_alloc_spare(td, NULL); 693 694 mtx_lock_spin(&sched_lock); 695 if (newkg->kg_numupcalls >= ncpus) { 696 mtx_unlock_spin(&sched_lock); 697 upcall_free(newku); 698 return (EPROCLIM); 699 } 700 upcall_link(newku, newkg); 701 if (mbx.km_quantum) 702 newkg->kg_upquantum = max(1, mbx.km_quantum/tick); 703 704 /* 705 * Each upcall structure has an owner thread, find which 706 * one owns it. 707 */ 708 if (uap->newgroup) { 709 /* 710 * Because new ksegrp hasn't thread, 711 * create an initial upcall thread to own it. 712 */ 713 thread_schedule_upcall(td, newku); 714 } else { 715 /* 716 * If current thread hasn't an upcall structure, 717 * just assign the upcall to it. 718 */ 719 if (td->td_upcall == NULL) { 720 newku->ku_owner = td; 721 td->td_upcall = newku; 722 } else { 723 /* 724 * Create a new upcall thread to own it. 725 */ 726 thread_schedule_upcall(td, newku); 727 } 728 } 729 mtx_unlock_spin(&sched_lock); 730 return (0); 731 } 732 733 /* 734 * Fill a ucontext_t with a thread's context information. 735 * 736 * This is an analogue to getcontext(3). 737 */ 738 void 739 thread_getcontext(struct thread *td, ucontext_t *uc) 740 { 741 742 /* 743 * XXX this is declared in a MD include file, i386/include/ucontext.h but 744 * is used in MI code. 745 */ 746 #ifdef __i386__ 747 get_mcontext(td, &uc->uc_mcontext); 748 #endif 749 uc->uc_sigmask = td->td_sigmask; 750 } 751 752 /* 753 * Set a thread's context from a ucontext_t. 754 * 755 * This is an analogue to setcontext(3). 756 */ 757 int 758 thread_setcontext(struct thread *td, ucontext_t *uc) 759 { 760 int ret; 761 762 /* 763 * XXX this is declared in a MD include file, i386/include/ucontext.h but 764 * is used in MI code. 765 */ 766 #ifdef __i386__ 767 ret = set_mcontext(td, &uc->uc_mcontext); 768 #else 769 ret = ENOSYS; 770 #endif 771 if (ret == 0) { 772 SIG_CANTMASK(uc->uc_sigmask); 773 PROC_LOCK(td->td_proc); 774 td->td_sigmask = uc->uc_sigmask; 775 PROC_UNLOCK(td->td_proc); 776 } 777 return (ret); 778 } 779 780 /* 781 * Initialize global thread allocation resources. 782 */ 783 void 784 threadinit(void) 785 { 786 787 #ifndef __ia64__ 788 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 789 thread_ctor, thread_dtor, thread_init, thread_fini, 790 UMA_ALIGN_CACHE, 0); 791 #else 792 /* 793 * XXX the ia64 kstack allocator is really lame and is at the mercy 794 * of contigmallloc(). This hackery is to pre-construct a whole 795 * pile of thread structures with associated kernel stacks early 796 * in the system startup while contigmalloc() still works. Once we 797 * have them, keep them. Sigh. 798 */ 799 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 800 thread_ctor, thread_dtor, thread_init, thread_fini, 801 UMA_ALIGN_CACHE, UMA_ZONE_NOFREE); 802 uma_prealloc(thread_zone, 512); /* XXX arbitary */ 803 #endif 804 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 805 NULL, NULL, ksegrp_init, NULL, 806 UMA_ALIGN_CACHE, 0); 807 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), 808 NULL, NULL, kse_init, NULL, 809 UMA_ALIGN_CACHE, 0); 810 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), 811 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 812 } 813 814 /* 815 * Stash an embarasingly extra thread into the zombie thread queue. 816 */ 817 void 818 thread_stash(struct thread *td) 819 { 820 mtx_lock_spin(&kse_zombie_lock); 821 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 822 mtx_unlock_spin(&kse_zombie_lock); 823 } 824 825 /* 826 * Stash an embarasingly extra kse into the zombie kse queue. 827 */ 828 void 829 kse_stash(struct kse *ke) 830 { 831 mtx_lock_spin(&kse_zombie_lock); 832 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); 833 mtx_unlock_spin(&kse_zombie_lock); 834 } 835 836 /* 837 * Stash an embarasingly extra upcall into the zombie upcall queue. 838 */ 839 840 void 841 upcall_stash(struct kse_upcall *ku) 842 { 843 mtx_lock_spin(&kse_zombie_lock); 844 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); 845 mtx_unlock_spin(&kse_zombie_lock); 846 } 847 848 /* 849 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 850 */ 851 void 852 ksegrp_stash(struct ksegrp *kg) 853 { 854 mtx_lock_spin(&kse_zombie_lock); 855 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 856 mtx_unlock_spin(&kse_zombie_lock); 857 } 858 859 /* 860 * Reap zombie kse resource. 861 */ 862 void 863 thread_reap(void) 864 { 865 struct thread *td_first, *td_next; 866 struct kse *ke_first, *ke_next; 867 struct ksegrp *kg_first, * kg_next; 868 struct kse_upcall *ku_first, *ku_next; 869 870 /* 871 * Don't even bother to lock if none at this instant, 872 * we really don't care about the next instant.. 873 */ 874 if ((!TAILQ_EMPTY(&zombie_threads)) 875 || (!TAILQ_EMPTY(&zombie_kses)) 876 || (!TAILQ_EMPTY(&zombie_ksegrps)) 877 || (!TAILQ_EMPTY(&zombie_upcalls))) { 878 mtx_lock_spin(&kse_zombie_lock); 879 td_first = TAILQ_FIRST(&zombie_threads); 880 ke_first = TAILQ_FIRST(&zombie_kses); 881 kg_first = TAILQ_FIRST(&zombie_ksegrps); 882 ku_first = TAILQ_FIRST(&zombie_upcalls); 883 if (td_first) 884 TAILQ_INIT(&zombie_threads); 885 if (ke_first) 886 TAILQ_INIT(&zombie_kses); 887 if (kg_first) 888 TAILQ_INIT(&zombie_ksegrps); 889 if (ku_first) 890 TAILQ_INIT(&zombie_upcalls); 891 mtx_unlock_spin(&kse_zombie_lock); 892 while (td_first) { 893 td_next = TAILQ_NEXT(td_first, td_runq); 894 if (td_first->td_ucred) 895 crfree(td_first->td_ucred); 896 thread_free(td_first); 897 td_first = td_next; 898 } 899 while (ke_first) { 900 ke_next = TAILQ_NEXT(ke_first, ke_procq); 901 kse_free(ke_first); 902 ke_first = ke_next; 903 } 904 while (kg_first) { 905 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 906 ksegrp_free(kg_first); 907 kg_first = kg_next; 908 } 909 while (ku_first) { 910 ku_next = TAILQ_NEXT(ku_first, ku_link); 911 upcall_free(ku_first); 912 ku_first = ku_next; 913 } 914 } 915 } 916 917 /* 918 * Allocate a ksegrp. 919 */ 920 struct ksegrp * 921 ksegrp_alloc(void) 922 { 923 return (uma_zalloc(ksegrp_zone, M_WAITOK)); 924 } 925 926 /* 927 * Allocate a kse. 928 */ 929 struct kse * 930 kse_alloc(void) 931 { 932 return (uma_zalloc(kse_zone, M_WAITOK)); 933 } 934 935 /* 936 * Allocate a thread. 937 */ 938 struct thread * 939 thread_alloc(void) 940 { 941 thread_reap(); /* check if any zombies to get */ 942 return (uma_zalloc(thread_zone, M_WAITOK)); 943 } 944 945 /* 946 * Deallocate a ksegrp. 947 */ 948 void 949 ksegrp_free(struct ksegrp *td) 950 { 951 uma_zfree(ksegrp_zone, td); 952 } 953 954 /* 955 * Deallocate a kse. 956 */ 957 void 958 kse_free(struct kse *td) 959 { 960 uma_zfree(kse_zone, td); 961 } 962 963 /* 964 * Deallocate a thread. 965 */ 966 void 967 thread_free(struct thread *td) 968 { 969 970 cpu_thread_clean(td); 971 uma_zfree(thread_zone, td); 972 } 973 974 /* 975 * Store the thread context in the UTS's mailbox. 976 * then add the mailbox at the head of a list we are building in user space. 977 * The list is anchored in the ksegrp structure. 978 */ 979 int 980 thread_export_context(struct thread *td) 981 { 982 struct proc *p; 983 struct ksegrp *kg; 984 uintptr_t mbx; 985 void *addr; 986 int error,temp; 987 ucontext_t uc; 988 989 p = td->td_proc; 990 kg = td->td_ksegrp; 991 992 /* Export the user/machine context. */ 993 addr = (void *)(&td->td_mailbox->tm_context); 994 error = copyin(addr, &uc, sizeof(ucontext_t)); 995 if (error) 996 goto bad; 997 998 thread_getcontext(td, &uc); 999 error = copyout(&uc, addr, sizeof(ucontext_t)); 1000 if (error) 1001 goto bad; 1002 1003 /* Exports clock ticks in kernel mode */ 1004 addr = (caddr_t)(&td->td_mailbox->tm_sticks); 1005 temp = fuword(addr) + td->td_usticks; 1006 if (suword(addr, temp)) 1007 goto bad; 1008 1009 /* Get address in latest mbox of list pointer */ 1010 addr = (void *)(&td->td_mailbox->tm_next); 1011 /* 1012 * Put the saved address of the previous first 1013 * entry into this one 1014 */ 1015 for (;;) { 1016 mbx = (uintptr_t)kg->kg_completed; 1017 if (suword(addr, mbx)) { 1018 error = EFAULT; 1019 goto bad; 1020 } 1021 PROC_LOCK(p); 1022 if (mbx == (uintptr_t)kg->kg_completed) { 1023 kg->kg_completed = td->td_mailbox; 1024 /* 1025 * The thread context may be taken away by 1026 * other upcall threads when we unlock 1027 * process lock. it's no longer valid to 1028 * use it again in any other places. 1029 */ 1030 td->td_mailbox = NULL; 1031 PROC_UNLOCK(p); 1032 break; 1033 } 1034 PROC_UNLOCK(p); 1035 } 1036 td->td_usticks = 0; 1037 return (0); 1038 1039 bad: 1040 PROC_LOCK(p); 1041 psignal(p, SIGSEGV); 1042 PROC_UNLOCK(p); 1043 /* The mailbox is bad, don't use it */ 1044 td->td_mailbox = NULL; 1045 td->td_usticks = 0; 1046 return (error); 1047 } 1048 1049 /* 1050 * Take the list of completed mailboxes for this KSEGRP and put them on this 1051 * upcall's mailbox as it's the next one going up. 1052 */ 1053 static int 1054 thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) 1055 { 1056 struct proc *p = kg->kg_proc; 1057 void *addr; 1058 uintptr_t mbx; 1059 1060 addr = (void *)(&ku->ku_mailbox->km_completed); 1061 for (;;) { 1062 mbx = (uintptr_t)kg->kg_completed; 1063 if (suword(addr, mbx)) { 1064 PROC_LOCK(p); 1065 psignal(p, SIGSEGV); 1066 PROC_UNLOCK(p); 1067 return (EFAULT); 1068 } 1069 PROC_LOCK(p); 1070 if (mbx == (uintptr_t)kg->kg_completed) { 1071 kg->kg_completed = NULL; 1072 PROC_UNLOCK(p); 1073 break; 1074 } 1075 PROC_UNLOCK(p); 1076 } 1077 return (0); 1078 } 1079 1080 /* 1081 * This function should be called at statclock interrupt time 1082 */ 1083 int 1084 thread_statclock(int user) 1085 { 1086 struct thread *td = curthread; 1087 1088 if (td->td_ksegrp->kg_numupcalls == 0) 1089 return (-1); 1090 if (user) { 1091 /* Current always do via ast() */ 1092 mtx_lock_spin(&sched_lock); 1093 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); 1094 mtx_unlock_spin(&sched_lock); 1095 td->td_uuticks++; 1096 } else { 1097 if (td->td_mailbox != NULL) 1098 td->td_usticks++; 1099 else { 1100 /* XXXKSE 1101 * We will call thread_user_enter() for every 1102 * kernel entry in future, so if the thread mailbox 1103 * is NULL, it must be a UTS kernel, don't account 1104 * clock ticks for it. 1105 */ 1106 } 1107 } 1108 return (0); 1109 } 1110 1111 /* 1112 * Export state clock ticks for userland 1113 */ 1114 static int 1115 thread_update_usr_ticks(struct thread *td, int user) 1116 { 1117 struct proc *p = td->td_proc; 1118 struct kse_thr_mailbox *tmbx; 1119 struct kse_upcall *ku; 1120 struct ksegrp *kg; 1121 caddr_t addr; 1122 uint uticks; 1123 1124 if ((ku = td->td_upcall) == NULL) 1125 return (-1); 1126 1127 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1128 if ((tmbx == NULL) || (tmbx == (void *)-1)) 1129 return (-1); 1130 if (user) { 1131 uticks = td->td_uuticks; 1132 td->td_uuticks = 0; 1133 addr = (caddr_t)&tmbx->tm_uticks; 1134 } else { 1135 uticks = td->td_usticks; 1136 td->td_usticks = 0; 1137 addr = (caddr_t)&tmbx->tm_sticks; 1138 } 1139 if (uticks) { 1140 if (suword(addr, uticks+fuword(addr))) { 1141 PROC_LOCK(p); 1142 psignal(p, SIGSEGV); 1143 PROC_UNLOCK(p); 1144 return (-2); 1145 } 1146 } 1147 kg = td->td_ksegrp; 1148 if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) { 1149 mtx_lock_spin(&sched_lock); 1150 td->td_upcall->ku_flags |= KUF_DOUPCALL; 1151 mtx_unlock_spin(&sched_lock); 1152 } 1153 return (0); 1154 } 1155 1156 /* 1157 * Discard the current thread and exit from its context. 1158 * 1159 * Because we can't free a thread while we're operating under its context, 1160 * push the current thread into our CPU's deadthread holder. This means 1161 * we needn't worry about someone else grabbing our context before we 1162 * do a cpu_throw(). 1163 */ 1164 void 1165 thread_exit(void) 1166 { 1167 struct thread *td; 1168 struct kse *ke; 1169 struct proc *p; 1170 struct ksegrp *kg; 1171 1172 td = curthread; 1173 kg = td->td_ksegrp; 1174 p = td->td_proc; 1175 ke = td->td_kse; 1176 1177 mtx_assert(&sched_lock, MA_OWNED); 1178 KASSERT(p != NULL, ("thread exiting without a process")); 1179 KASSERT(ke != NULL, ("thread exiting without a kse")); 1180 KASSERT(kg != NULL, ("thread exiting without a kse group")); 1181 PROC_LOCK_ASSERT(p, MA_OWNED); 1182 CTR1(KTR_PROC, "thread_exit: thread %p", td); 1183 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 1184 1185 if (td->td_standin != NULL) { 1186 thread_stash(td->td_standin); 1187 td->td_standin = NULL; 1188 } 1189 1190 cpu_thread_exit(td); /* XXXSMP */ 1191 1192 /* 1193 * The last thread is left attached to the process 1194 * So that the whole bundle gets recycled. Skip 1195 * all this stuff. 1196 */ 1197 if (p->p_numthreads > 1) { 1198 /* 1199 * Unlink this thread from its proc and the kseg. 1200 * In keeping with the other structs we probably should 1201 * have a thread_unlink() that does some of this but it 1202 * would only be called from here (I think) so it would 1203 * be a waste. (might be useful for proc_fini() as well.) 1204 */ 1205 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1206 p->p_numthreads--; 1207 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 1208 kg->kg_numthreads--; 1209 if (p->p_maxthrwaits) 1210 wakeup(&p->p_numthreads); 1211 /* 1212 * The test below is NOT true if we are the 1213 * sole exiting thread. P_STOPPED_SNGL is unset 1214 * in exit1() after it is the only survivor. 1215 */ 1216 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1217 if (p->p_numthreads == p->p_suspcount) { 1218 thread_unsuspend_one(p->p_singlethread); 1219 } 1220 } 1221 1222 /* 1223 * Because each upcall structure has an owner thread, 1224 * owner thread exits only when process is in exiting 1225 * state, so upcall to userland is no longer needed, 1226 * deleting upcall structure is safe here. 1227 * So when all threads in a group is exited, all upcalls 1228 * in the group should be automatically freed. 1229 */ 1230 if (td->td_upcall) 1231 upcall_remove(td); 1232 1233 ke->ke_state = KES_UNQUEUED; 1234 ke->ke_thread = NULL; 1235 /* 1236 * Decide what to do with the KSE attached to this thread. 1237 */ 1238 if (ke->ke_flags & KEF_EXIT) 1239 kse_unlink(ke); 1240 else 1241 kse_reassign(ke); 1242 PROC_UNLOCK(p); 1243 td->td_kse = NULL; 1244 td->td_state = TDS_INACTIVE; 1245 td->td_proc = NULL; 1246 td->td_ksegrp = NULL; 1247 td->td_last_kse = NULL; 1248 PCPU_SET(deadthread, td); 1249 } else { 1250 PROC_UNLOCK(p); 1251 } 1252 /* XXX Shouldn't cpu_throw() here. */ 1253 mtx_assert(&sched_lock, MA_OWNED); 1254 #if defined(__i386__) || defined(__sparc64__) 1255 cpu_throw(td, choosethread()); 1256 #else 1257 cpu_throw(); 1258 #endif 1259 panic("I'm a teapot!"); 1260 /* NOTREACHED */ 1261 } 1262 1263 /* 1264 * Do any thread specific cleanups that may be needed in wait() 1265 * called with Giant held, proc and schedlock not held. 1266 */ 1267 void 1268 thread_wait(struct proc *p) 1269 { 1270 struct thread *td; 1271 1272 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()")); 1273 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()")); 1274 FOREACH_THREAD_IN_PROC(p, td) { 1275 if (td->td_standin != NULL) { 1276 thread_free(td->td_standin); 1277 td->td_standin = NULL; 1278 } 1279 cpu_thread_clean(td); 1280 } 1281 thread_reap(); /* check for zombie threads etc. */ 1282 } 1283 1284 /* 1285 * Link a thread to a process. 1286 * set up anything that needs to be initialized for it to 1287 * be used by the process. 1288 * 1289 * Note that we do not link to the proc's ucred here. 1290 * The thread is linked as if running but no KSE assigned. 1291 */ 1292 void 1293 thread_link(struct thread *td, struct ksegrp *kg) 1294 { 1295 struct proc *p; 1296 1297 p = kg->kg_proc; 1298 td->td_state = TDS_INACTIVE; 1299 td->td_proc = p; 1300 td->td_ksegrp = kg; 1301 td->td_last_kse = NULL; 1302 td->td_flags = 0; 1303 td->td_kse = NULL; 1304 1305 LIST_INIT(&td->td_contested); 1306 callout_init(&td->td_slpcallout, 1); 1307 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 1308 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 1309 p->p_numthreads++; 1310 kg->kg_numthreads++; 1311 } 1312 1313 /* 1314 * Purge a ksegrp resource. When a ksegrp is preparing to 1315 * exit, it calls this function. 1316 */ 1317 void 1318 kse_purge_group(struct thread *td) 1319 { 1320 struct ksegrp *kg; 1321 struct kse *ke; 1322 1323 kg = td->td_ksegrp; 1324 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); 1325 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1326 KASSERT(ke->ke_state == KES_IDLE, 1327 ("%s: wrong idle KSE state", __func__)); 1328 kse_unlink(ke); 1329 } 1330 KASSERT((kg->kg_kses == 1), 1331 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); 1332 KASSERT((kg->kg_numupcalls == 0), 1333 ("%s: ksegrp still has %d upcall datas", 1334 __func__, kg->kg_numupcalls)); 1335 } 1336 1337 /* 1338 * Purge a process's KSE resource. When a process is preparing to 1339 * exit, it calls kse_purge to release any extra KSE resources in 1340 * the process. 1341 */ 1342 void 1343 kse_purge(struct proc *p, struct thread *td) 1344 { 1345 struct ksegrp *kg; 1346 struct kse *ke; 1347 1348 KASSERT(p->p_numthreads == 1, ("bad thread number")); 1349 mtx_lock_spin(&sched_lock); 1350 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { 1351 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 1352 p->p_numksegrps--; 1353 /* 1354 * There is no ownership for KSE, after all threads 1355 * in the group exited, it is possible that some KSEs 1356 * were left in idle queue, gc them now. 1357 */ 1358 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1359 KASSERT(ke->ke_state == KES_IDLE, 1360 ("%s: wrong idle KSE state", __func__)); 1361 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 1362 kg->kg_idle_kses--; 1363 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 1364 kg->kg_kses--; 1365 kse_stash(ke); 1366 } 1367 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || 1368 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), 1369 ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); 1370 KASSERT((kg->kg_numupcalls == 0), 1371 ("%s: ksegrp still has %d upcall datas", 1372 __func__, kg->kg_numupcalls)); 1373 1374 if (kg != td->td_ksegrp) 1375 ksegrp_stash(kg); 1376 } 1377 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); 1378 p->p_numksegrps++; 1379 mtx_unlock_spin(&sched_lock); 1380 } 1381 1382 /* 1383 * This function is intended to be used to initialize a spare thread 1384 * for upcall. Initialize thread's large data area outside sched_lock 1385 * for thread_schedule_upcall(). 1386 */ 1387 void 1388 thread_alloc_spare(struct thread *td, struct thread *spare) 1389 { 1390 if (td->td_standin) 1391 return; 1392 if (spare == NULL) 1393 spare = thread_alloc(); 1394 td->td_standin = spare; 1395 bzero(&spare->td_startzero, 1396 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 1397 spare->td_proc = td->td_proc; 1398 /* Setup PCB and fork address */ 1399 cpu_set_upcall(spare, td->td_pcb); 1400 /* 1401 * XXXKSE do we really need this? (default values for the 1402 * frame). 1403 */ 1404 bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe)); 1405 spare->td_ucred = crhold(td->td_ucred); 1406 } 1407 1408 /* 1409 * Create a thread and schedule it for upcall on the KSE given. 1410 * Use our thread's standin so that we don't have to allocate one. 1411 */ 1412 struct thread * 1413 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) 1414 { 1415 struct thread *td2; 1416 1417 mtx_assert(&sched_lock, MA_OWNED); 1418 1419 /* 1420 * Schedule an upcall thread on specified kse_upcall, 1421 * the kse_upcall must be free. 1422 * td must have a spare thread. 1423 */ 1424 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); 1425 if ((td2 = td->td_standin) != NULL) { 1426 td->td_standin = NULL; 1427 } else { 1428 panic("no reserve thread when scheduling an upcall"); 1429 return (NULL); 1430 } 1431 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 1432 td2, td->td_proc->p_pid, td->td_proc->p_comm); 1433 bcopy(&td->td_startcopy, &td2->td_startcopy, 1434 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 1435 thread_link(td2, ku->ku_ksegrp); 1436 /* Let the new thread become owner of the upcall */ 1437 ku->ku_owner = td2; 1438 td2->td_upcall = ku; 1439 td2->td_flags = TDF_UPCALLING; 1440 #if 0 /* XXX This shouldn't be necessary */ 1441 if (td->td_proc->p_sflag & PS_NEEDSIGCHK) 1442 td2->td_flags |= TDF_ASTPENDING; 1443 #endif 1444 td2->td_kse = NULL; 1445 td2->td_state = TDS_CAN_RUN; 1446 td2->td_inhibitors = 0; 1447 setrunqueue(td2); 1448 return (td2); /* bogus.. should be a void function */ 1449 } 1450 1451 void 1452 thread_signal_add(struct thread *td, int sig) 1453 { 1454 struct kse_upcall *ku; 1455 struct proc *p; 1456 sigset_t ss; 1457 int error; 1458 1459 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 1460 td = curthread; 1461 ku = td->td_upcall; 1462 p = td->td_proc; 1463 1464 PROC_UNLOCK(p); 1465 error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t)); 1466 if (error) 1467 goto error; 1468 1469 SIGADDSET(ss, sig); 1470 1471 error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t)); 1472 if (error) 1473 goto error; 1474 1475 PROC_LOCK(p); 1476 return; 1477 error: 1478 PROC_LOCK(p); 1479 sigexit(td, SIGILL); 1480 } 1481 1482 1483 /* 1484 * Schedule an upcall to notify a KSE process recieved signals. 1485 * 1486 */ 1487 void 1488 thread_signal_upcall(struct thread *td) 1489 { 1490 mtx_lock_spin(&sched_lock); 1491 td->td_flags |= TDF_UPCALLING; 1492 mtx_unlock_spin(&sched_lock); 1493 1494 return; 1495 } 1496 1497 void 1498 thread_switchout(struct thread *td) 1499 { 1500 struct kse_upcall *ku; 1501 1502 mtx_assert(&sched_lock, MA_OWNED); 1503 1504 /* 1505 * If the outgoing thread is in threaded group and has never 1506 * scheduled an upcall, decide whether this is a short 1507 * or long term event and thus whether or not to schedule 1508 * an upcall. 1509 * If it is a short term event, just suspend it in 1510 * a way that takes its KSE with it. 1511 * Select the events for which we want to schedule upcalls. 1512 * For now it's just sleep. 1513 * XXXKSE eventually almost any inhibition could do. 1514 */ 1515 if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) { 1516 /* 1517 * Release ownership of upcall, and schedule an upcall 1518 * thread, this new upcall thread becomes the owner of 1519 * the upcall structure. 1520 */ 1521 ku = td->td_upcall; 1522 ku->ku_owner = NULL; 1523 td->td_upcall = NULL; 1524 td->td_flags &= ~TDF_CAN_UNBIND; 1525 thread_schedule_upcall(td, ku); 1526 } 1527 } 1528 1529 /* 1530 * Setup done on the thread when it enters the kernel. 1531 * XXXKSE Presently only for syscalls but eventually all kernel entries. 1532 */ 1533 void 1534 thread_user_enter(struct proc *p, struct thread *td) 1535 { 1536 struct ksegrp *kg; 1537 struct kse_upcall *ku; 1538 1539 kg = td->td_ksegrp; 1540 /* 1541 * First check that we shouldn't just abort. 1542 * But check if we are the single thread first! 1543 * XXX p_singlethread not locked, but should be safe. 1544 */ 1545 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1546 PROC_LOCK(p); 1547 mtx_lock_spin(&sched_lock); 1548 thread_stopped(p); 1549 thread_exit(); 1550 /* NOTREACHED */ 1551 } 1552 1553 /* 1554 * If we are doing a syscall in a KSE environment, 1555 * note where our mailbox is. There is always the 1556 * possibility that we could do this lazily (in kse_reassign()), 1557 * but for now do it every time. 1558 */ 1559 kg = td->td_ksegrp; 1560 if (kg->kg_numupcalls) { 1561 ku = td->td_upcall; 1562 KASSERT(ku, ("%s: no upcall owned", __func__)); 1563 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); 1564 td->td_mailbox = 1565 (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1566 if ((td->td_mailbox == NULL) || 1567 (td->td_mailbox == (void *)-1)) { 1568 /* Don't schedule upcall when blocked */ 1569 td->td_mailbox = NULL; 1570 mtx_lock_spin(&sched_lock); 1571 td->td_flags &= ~TDF_CAN_UNBIND; 1572 mtx_unlock_spin(&sched_lock); 1573 } else { 1574 if (td->td_standin == NULL) 1575 thread_alloc_spare(td, NULL); 1576 mtx_lock_spin(&sched_lock); 1577 td->td_flags |= TDF_CAN_UNBIND; 1578 mtx_unlock_spin(&sched_lock); 1579 } 1580 } 1581 } 1582 1583 /* 1584 * The extra work we go through if we are a threaded process when we 1585 * return to userland. 1586 * 1587 * If we are a KSE process and returning to user mode, check for 1588 * extra work to do before we return (e.g. for more syscalls 1589 * to complete first). If we were in a critical section, we should 1590 * just return to let it finish. Same if we were in the UTS (in 1591 * which case the mailbox's context's busy indicator will be set). 1592 * The only traps we suport will have set the mailbox. 1593 * We will clear it here. 1594 */ 1595 int 1596 thread_userret(struct thread *td, struct trapframe *frame) 1597 { 1598 int error = 0, upcalls; 1599 struct kse_upcall *ku; 1600 struct ksegrp *kg, *kg2; 1601 struct proc *p; 1602 struct timespec ts; 1603 1604 p = td->td_proc; 1605 kg = td->td_ksegrp; 1606 1607 1608 /* Nothing to do with non-threaded group/process */ 1609 if (td->td_ksegrp->kg_numupcalls == 0) 1610 return (0); 1611 1612 /* 1613 * Stat clock interrupt hit in userland, it 1614 * is returning from interrupt, charge thread's 1615 * userland time for UTS. 1616 */ 1617 if (td->td_flags & TDF_USTATCLOCK) { 1618 thread_update_usr_ticks(td, 1); 1619 mtx_lock_spin(&sched_lock); 1620 td->td_flags &= ~TDF_USTATCLOCK; 1621 mtx_unlock_spin(&sched_lock); 1622 if (kg->kg_completed || 1623 (td->td_upcall->ku_flags & KUF_DOUPCALL)) 1624 thread_user_enter(p, td); 1625 } 1626 1627 /* 1628 * Optimisation: 1629 * This thread has not started any upcall. 1630 * If there is no work to report other than ourself, 1631 * then it can return direct to userland. 1632 */ 1633 if (TD_CAN_UNBIND(td)) { 1634 mtx_lock_spin(&sched_lock); 1635 td->td_flags &= ~TDF_CAN_UNBIND; 1636 ku = td->td_upcall; 1637 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 && 1638 (kg->kg_completed == NULL) && 1639 (ku->ku_flags & KUF_DOUPCALL) == 0 && 1640 (kg->kg_upquantum && ticks >= kg->kg_nextupcall)) { 1641 mtx_unlock_spin(&sched_lock); 1642 thread_update_usr_ticks(td, 0); 1643 nanotime(&ts); 1644 error = copyout(&ts, 1645 (caddr_t)&ku->ku_mailbox->km_timeofday, 1646 sizeof(ts)); 1647 td->td_mailbox = 0; 1648 if (error) 1649 goto out; 1650 return (0); 1651 } 1652 mtx_unlock_spin(&sched_lock); 1653 error = thread_export_context(td); 1654 if (error) { 1655 /* 1656 * Failing to do the KSE operation just defaults 1657 * back to synchonous operation, so just return from 1658 * the syscall. 1659 */ 1660 return (0); 1661 } 1662 /* 1663 * There is something to report, and we own an upcall 1664 * strucuture, we can go to userland. 1665 * Turn ourself into an upcall thread. 1666 */ 1667 mtx_lock_spin(&sched_lock); 1668 td->td_flags |= TDF_UPCALLING; 1669 mtx_unlock_spin(&sched_lock); 1670 } else if (td->td_mailbox) { 1671 error = thread_export_context(td); 1672 /* possibly upcall with error? */ 1673 PROC_LOCK(p); 1674 /* 1675 * There are upcall threads waiting for 1676 * work to do, wake one of them up. 1677 * XXXKSE Maybe wake all of them up. 1678 */ 1679 if (!error && kg->kg_upsleeps) 1680 wakeup_one(&kg->kg_completed); 1681 mtx_lock_spin(&sched_lock); 1682 thread_stopped(p); 1683 thread_exit(); 1684 /* NOTREACHED */ 1685 } 1686 1687 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); 1688 1689 if (p->p_numthreads > max_threads_per_proc) { 1690 max_threads_hits++; 1691 PROC_LOCK(p); 1692 while (p->p_numthreads > max_threads_per_proc) { 1693 if (P_SHOULDSTOP(p)) 1694 break; 1695 upcalls = 0; 1696 mtx_lock_spin(&sched_lock); 1697 FOREACH_KSEGRP_IN_PROC(p, kg2) { 1698 if (kg2->kg_numupcalls == 0) 1699 upcalls++; 1700 else 1701 upcalls += kg2->kg_numupcalls; 1702 } 1703 mtx_unlock_spin(&sched_lock); 1704 if (upcalls >= max_threads_per_proc) 1705 break; 1706 p->p_maxthrwaits++; 1707 msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, 1708 "maxthreads", NULL); 1709 p->p_maxthrwaits--; 1710 } 1711 PROC_UNLOCK(p); 1712 } 1713 1714 if (td->td_flags & TDF_UPCALLING) { 1715 kg->kg_nextupcall = ticks+kg->kg_upquantum; 1716 ku = td->td_upcall; 1717 /* 1718 * There is no more work to do and we are going to ride 1719 * this thread up to userland as an upcall. 1720 * Do the last parts of the setup needed for the upcall. 1721 */ 1722 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 1723 td, td->td_proc->p_pid, td->td_proc->p_comm); 1724 1725 /* 1726 * Set user context to the UTS. 1727 * Will use Giant in cpu_thread_clean() because it uses 1728 * kmem_free(kernel_map, ...) 1729 */ 1730 cpu_set_upcall_kse(td, ku); 1731 mtx_lock_spin(&sched_lock); 1732 td->td_flags &= ~TDF_UPCALLING; 1733 if (ku->ku_flags & KUF_DOUPCALL) 1734 ku->ku_flags &= ~KUF_DOUPCALL; 1735 mtx_unlock_spin(&sched_lock); 1736 1737 /* 1738 * Unhook the list of completed threads. 1739 * anything that completes after this gets to 1740 * come in next time. 1741 * Put the list of completed thread mailboxes on 1742 * this KSE's mailbox. 1743 */ 1744 error = thread_link_mboxes(kg, ku); 1745 if (error) 1746 goto out; 1747 1748 /* 1749 * Set state and clear the thread mailbox pointer. 1750 * From now on we are just a bound outgoing process. 1751 * **Problem** userret is often called several times. 1752 * it would be nice if this all happenned only on the first 1753 * time through. (the scan for extra work etc.) 1754 */ 1755 error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0); 1756 if (error) 1757 goto out; 1758 1759 /* Export current system time */ 1760 nanotime(&ts); 1761 error = copyout(&ts, (caddr_t)&ku->ku_mailbox->km_timeofday, 1762 sizeof(ts)); 1763 } 1764 1765 out: 1766 if (error) { 1767 /* 1768 * Things are going to be so screwed we should just kill 1769 * the process. 1770 * how do we do that? 1771 */ 1772 PROC_LOCK(td->td_proc); 1773 psignal(td->td_proc, SIGSEGV); 1774 PROC_UNLOCK(td->td_proc); 1775 } else { 1776 /* 1777 * Optimisation: 1778 * Ensure that we have a spare thread available, 1779 * for when we re-enter the kernel. 1780 */ 1781 if (td->td_standin == NULL) 1782 thread_alloc_spare(td, NULL); 1783 } 1784 1785 /* 1786 * Clear thread mailbox first, then clear system tick count. 1787 * The order is important because thread_statclock() use 1788 * mailbox pointer to see if it is an userland thread or 1789 * an UTS kernel thread. 1790 */ 1791 td->td_mailbox = NULL; 1792 td->td_usticks = 0; 1793 return (error); /* go sync */ 1794 } 1795 1796 /* 1797 * Enforce single-threading. 1798 * 1799 * Returns 1 if the caller must abort (another thread is waiting to 1800 * exit the process or similar). Process is locked! 1801 * Returns 0 when you are successfully the only thread running. 1802 * A process has successfully single threaded in the suspend mode when 1803 * There are no threads in user mode. Threads in the kernel must be 1804 * allowed to continue until they get to the user boundary. They may even 1805 * copy out their return values and data before suspending. They may however be 1806 * accellerated in reaching the user boundary as we will wake up 1807 * any sleeping threads that are interruptable. (PCATCH). 1808 */ 1809 int 1810 thread_single(int force_exit) 1811 { 1812 struct thread *td; 1813 struct thread *td2; 1814 struct proc *p; 1815 1816 td = curthread; 1817 p = td->td_proc; 1818 mtx_assert(&Giant, MA_OWNED); 1819 PROC_LOCK_ASSERT(p, MA_OWNED); 1820 KASSERT((td != NULL), ("curthread is NULL")); 1821 1822 if ((p->p_flag & P_THREADED) == 0 && p->p_numthreads == 1) 1823 return (0); 1824 1825 /* Is someone already single threading? */ 1826 if (p->p_singlethread) 1827 return (1); 1828 1829 if (force_exit == SINGLE_EXIT) { 1830 p->p_flag |= P_SINGLE_EXIT; 1831 } else 1832 p->p_flag &= ~P_SINGLE_EXIT; 1833 p->p_flag |= P_STOPPED_SINGLE; 1834 p->p_singlethread = td; 1835 /* XXXKSE Which lock protects the below values? */ 1836 while ((p->p_numthreads - p->p_suspcount) != 1) { 1837 mtx_lock_spin(&sched_lock); 1838 FOREACH_THREAD_IN_PROC(p, td2) { 1839 if (td2 == td) 1840 continue; 1841 td->td_flags |= TDF_ASTPENDING; 1842 if (TD_IS_INHIBITED(td2)) { 1843 if (force_exit == SINGLE_EXIT) { 1844 if (TD_IS_SUSPENDED(td2)) { 1845 thread_unsuspend_one(td2); 1846 } 1847 if (TD_ON_SLEEPQ(td2) && 1848 (td2->td_flags & TDF_SINTR)) { 1849 if (td2->td_flags & TDF_CVWAITQ) 1850 cv_abort(td2); 1851 else 1852 abortsleep(td2); 1853 } 1854 } else { 1855 if (TD_IS_SUSPENDED(td2)) 1856 continue; 1857 /* 1858 * maybe other inhibitted states too? 1859 * XXXKSE Is it totally safe to 1860 * suspend a non-interruptable thread? 1861 */ 1862 if (td2->td_inhibitors & 1863 (TDI_SLEEPING | TDI_SWAPPED)) 1864 thread_suspend_one(td2); 1865 } 1866 } 1867 } 1868 /* 1869 * Maybe we suspended some threads.. was it enough? 1870 */ 1871 if ((p->p_numthreads - p->p_suspcount) == 1) { 1872 mtx_unlock_spin(&sched_lock); 1873 break; 1874 } 1875 1876 /* 1877 * Wake us up when everyone else has suspended. 1878 * In the mean time we suspend as well. 1879 */ 1880 thread_suspend_one(td); 1881 /* XXX If you recursed this is broken. */ 1882 mtx_unlock(&Giant); 1883 PROC_UNLOCK(p); 1884 p->p_stats->p_ru.ru_nvcsw++; 1885 mi_switch(); 1886 mtx_unlock_spin(&sched_lock); 1887 mtx_lock(&Giant); 1888 PROC_LOCK(p); 1889 } 1890 if (force_exit == SINGLE_EXIT) { 1891 if (td->td_upcall) { 1892 mtx_lock_spin(&sched_lock); 1893 upcall_remove(td); 1894 mtx_unlock_spin(&sched_lock); 1895 } 1896 kse_purge(p, td); 1897 } 1898 return (0); 1899 } 1900 1901 /* 1902 * Called in from locations that can safely check to see 1903 * whether we have to suspend or at least throttle for a 1904 * single-thread event (e.g. fork). 1905 * 1906 * Such locations include userret(). 1907 * If the "return_instead" argument is non zero, the thread must be able to 1908 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1909 * 1910 * The 'return_instead' argument tells the function if it may do a 1911 * thread_exit() or suspend, or whether the caller must abort and back 1912 * out instead. 1913 * 1914 * If the thread that set the single_threading request has set the 1915 * P_SINGLE_EXIT bit in the process flags then this call will never return 1916 * if 'return_instead' is false, but will exit. 1917 * 1918 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1919 *---------------+--------------------+--------------------- 1920 * 0 | returns 0 | returns 0 or 1 1921 * | when ST ends | immediatly 1922 *---------------+--------------------+--------------------- 1923 * 1 | thread exits | returns 1 1924 * | | immediatly 1925 * 0 = thread_exit() or suspension ok, 1926 * other = return error instead of stopping the thread. 1927 * 1928 * While a full suspension is under effect, even a single threading 1929 * thread would be suspended if it made this call (but it shouldn't). 1930 * This call should only be made from places where 1931 * thread_exit() would be safe as that may be the outcome unless 1932 * return_instead is set. 1933 */ 1934 int 1935 thread_suspend_check(int return_instead) 1936 { 1937 struct thread *td; 1938 struct proc *p; 1939 struct ksegrp *kg; 1940 1941 td = curthread; 1942 p = td->td_proc; 1943 kg = td->td_ksegrp; 1944 PROC_LOCK_ASSERT(p, MA_OWNED); 1945 while (P_SHOULDSTOP(p)) { 1946 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1947 KASSERT(p->p_singlethread != NULL, 1948 ("singlethread not set")); 1949 /* 1950 * The only suspension in action is a 1951 * single-threading. Single threader need not stop. 1952 * XXX Should be safe to access unlocked 1953 * as it can only be set to be true by us. 1954 */ 1955 if (p->p_singlethread == td) 1956 return (0); /* Exempt from stopping. */ 1957 } 1958 if (return_instead) 1959 return (1); 1960 1961 mtx_lock_spin(&sched_lock); 1962 thread_stopped(p); 1963 /* 1964 * If the process is waiting for us to exit, 1965 * this thread should just suicide. 1966 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1967 */ 1968 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1969 while (mtx_owned(&Giant)) 1970 mtx_unlock(&Giant); 1971 if (p->p_flag & P_THREADED) 1972 thread_exit(); 1973 else 1974 thr_exit1(); 1975 } 1976 1977 mtx_assert(&Giant, MA_NOTOWNED); 1978 /* 1979 * When a thread suspends, it just 1980 * moves to the processes's suspend queue 1981 * and stays there. 1982 */ 1983 thread_suspend_one(td); 1984 PROC_UNLOCK(p); 1985 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1986 if (p->p_numthreads == p->p_suspcount) { 1987 thread_unsuspend_one(p->p_singlethread); 1988 } 1989 } 1990 p->p_stats->p_ru.ru_nivcsw++; 1991 mi_switch(); 1992 mtx_unlock_spin(&sched_lock); 1993 PROC_LOCK(p); 1994 } 1995 return (0); 1996 } 1997 1998 void 1999 thread_suspend_one(struct thread *td) 2000 { 2001 struct proc *p = td->td_proc; 2002 2003 mtx_assert(&sched_lock, MA_OWNED); 2004 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 2005 p->p_suspcount++; 2006 TD_SET_SUSPENDED(td); 2007 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 2008 /* 2009 * Hack: If we are suspending but are on the sleep queue 2010 * then we are in msleep or the cv equivalent. We 2011 * want to look like we have two Inhibitors. 2012 * May already be set.. doesn't matter. 2013 */ 2014 if (TD_ON_SLEEPQ(td)) 2015 TD_SET_SLEEPING(td); 2016 } 2017 2018 void 2019 thread_unsuspend_one(struct thread *td) 2020 { 2021 struct proc *p = td->td_proc; 2022 2023 mtx_assert(&sched_lock, MA_OWNED); 2024 TAILQ_REMOVE(&p->p_suspended, td, td_runq); 2025 TD_CLR_SUSPENDED(td); 2026 p->p_suspcount--; 2027 setrunnable(td); 2028 } 2029 2030 /* 2031 * Allow all threads blocked by single threading to continue running. 2032 */ 2033 void 2034 thread_unsuspend(struct proc *p) 2035 { 2036 struct thread *td; 2037 2038 mtx_assert(&sched_lock, MA_OWNED); 2039 PROC_LOCK_ASSERT(p, MA_OWNED); 2040 if (!P_SHOULDSTOP(p)) { 2041 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2042 thread_unsuspend_one(td); 2043 } 2044 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 2045 (p->p_numthreads == p->p_suspcount)) { 2046 /* 2047 * Stopping everything also did the job for the single 2048 * threading request. Now we've downgraded to single-threaded, 2049 * let it continue. 2050 */ 2051 thread_unsuspend_one(p->p_singlethread); 2052 } 2053 } 2054 2055 void 2056 thread_single_end(void) 2057 { 2058 struct thread *td; 2059 struct proc *p; 2060 2061 td = curthread; 2062 p = td->td_proc; 2063 PROC_LOCK_ASSERT(p, MA_OWNED); 2064 p->p_flag &= ~P_STOPPED_SINGLE; 2065 p->p_singlethread = NULL; 2066 /* 2067 * If there are other threads they mey now run, 2068 * unless of course there is a blanket 'stop order' 2069 * on the process. The single threader must be allowed 2070 * to continue however as this is a bad place to stop. 2071 */ 2072 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 2073 mtx_lock_spin(&sched_lock); 2074 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2075 thread_unsuspend_one(td); 2076 } 2077 mtx_unlock_spin(&sched_lock); 2078 } 2079 } 2080 2081 2082