1 /* 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/mutex.h> 37 #include <sys/proc.h> 38 #include <sys/smp.h> 39 #include <sys/sysctl.h> 40 #include <sys/sysproto.h> 41 #include <sys/filedesc.h> 42 #include <sys/sched.h> 43 #include <sys/signalvar.h> 44 #include <sys/sx.h> 45 #include <sys/tty.h> 46 #include <sys/user.h> 47 #include <sys/jail.h> 48 #include <sys/kse.h> 49 #include <sys/ktr.h> 50 #include <sys/ucontext.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_object.h> 54 #include <vm/pmap.h> 55 #include <vm/uma.h> 56 #include <vm/vm_map.h> 57 58 #include <machine/frame.h> 59 60 /* 61 * KSEGRP related storage. 62 */ 63 static uma_zone_t ksegrp_zone; 64 static uma_zone_t kse_zone; 65 static uma_zone_t thread_zone; 66 static uma_zone_t upcall_zone; 67 68 /* DEBUG ONLY */ 69 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 70 static int thread_debug = 0; 71 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 72 &thread_debug, 0, "thread debug"); 73 74 static int max_threads_per_proc = 150; 75 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 76 &max_threads_per_proc, 0, "Limit on threads per proc"); 77 78 static int max_groups_per_proc = 50; 79 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 80 &max_groups_per_proc, 0, "Limit on thread groups per proc"); 81 82 static int max_threads_hits; 83 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 84 &max_threads_hits, 0, ""); 85 86 static int virtual_cpu; 87 88 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 89 90 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 91 TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); 92 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 93 TAILQ_HEAD(, kse_upcall) zombie_upcalls = 94 TAILQ_HEAD_INITIALIZER(zombie_upcalls); 95 struct mtx kse_zombie_lock; 96 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 97 98 static void kse_purge(struct proc *p, struct thread *td); 99 static void kse_purge_group(struct thread *td); 100 static int thread_update_usr_ticks(struct thread *td, int user); 101 static void thread_alloc_spare(struct thread *td, struct thread *spare); 102 103 static int 104 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 105 { 106 int error, new_val; 107 int def_val; 108 109 #ifdef SMP 110 def_val = mp_ncpus; 111 #else 112 def_val = 1; 113 #endif 114 if (virtual_cpu == 0) 115 new_val = def_val; 116 else 117 new_val = virtual_cpu; 118 error = sysctl_handle_int(oidp, &new_val, 0, req); 119 if (error != 0 || req->newptr == NULL) 120 return (error); 121 if (new_val < 0) 122 return (EINVAL); 123 virtual_cpu = new_val; 124 return (0); 125 } 126 127 /* DEBUG ONLY */ 128 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 129 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 130 "debug virtual cpus"); 131 132 /* 133 * Prepare a thread for use. 134 */ 135 static void 136 thread_ctor(void *mem, int size, void *arg) 137 { 138 struct thread *td; 139 140 td = (struct thread *)mem; 141 td->td_state = TDS_INACTIVE; 142 td->td_oncpu = NOCPU; 143 } 144 145 /* 146 * Reclaim a thread after use. 147 */ 148 static void 149 thread_dtor(void *mem, int size, void *arg) 150 { 151 struct thread *td; 152 153 td = (struct thread *)mem; 154 155 #ifdef INVARIANTS 156 /* Verify that this thread is in a safe state to free. */ 157 switch (td->td_state) { 158 case TDS_INHIBITED: 159 case TDS_RUNNING: 160 case TDS_CAN_RUN: 161 case TDS_RUNQ: 162 /* 163 * We must never unlink a thread that is in one of 164 * these states, because it is currently active. 165 */ 166 panic("bad state for thread unlinking"); 167 /* NOTREACHED */ 168 case TDS_INACTIVE: 169 break; 170 default: 171 panic("bad thread state"); 172 /* NOTREACHED */ 173 } 174 #endif 175 } 176 177 /* 178 * Initialize type-stable parts of a thread (when newly created). 179 */ 180 static void 181 thread_init(void *mem, int size) 182 { 183 struct thread *td; 184 185 td = (struct thread *)mem; 186 mtx_lock(&Giant); 187 pmap_new_thread(td, 0); 188 mtx_unlock(&Giant); 189 cpu_thread_setup(td); 190 td->td_sched = (struct td_sched *)&td[1]; 191 } 192 193 /* 194 * Tear down type-stable parts of a thread (just before being discarded). 195 */ 196 static void 197 thread_fini(void *mem, int size) 198 { 199 struct thread *td; 200 201 td = (struct thread *)mem; 202 pmap_dispose_thread(td); 203 } 204 205 /* 206 * Initialize type-stable parts of a kse (when newly created). 207 */ 208 static void 209 kse_init(void *mem, int size) 210 { 211 struct kse *ke; 212 213 ke = (struct kse *)mem; 214 ke->ke_sched = (struct ke_sched *)&ke[1]; 215 } 216 217 /* 218 * Initialize type-stable parts of a ksegrp (when newly created). 219 */ 220 static void 221 ksegrp_init(void *mem, int size) 222 { 223 struct ksegrp *kg; 224 225 kg = (struct ksegrp *)mem; 226 kg->kg_sched = (struct kg_sched *)&kg[1]; 227 } 228 229 /* 230 * KSE is linked into kse group. 231 */ 232 void 233 kse_link(struct kse *ke, struct ksegrp *kg) 234 { 235 struct proc *p = kg->kg_proc; 236 237 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); 238 kg->kg_kses++; 239 ke->ke_state = KES_UNQUEUED; 240 ke->ke_proc = p; 241 ke->ke_ksegrp = kg; 242 ke->ke_thread = NULL; 243 ke->ke_oncpu = NOCPU; 244 ke->ke_flags = 0; 245 } 246 247 void 248 kse_unlink(struct kse *ke) 249 { 250 struct ksegrp *kg; 251 252 mtx_assert(&sched_lock, MA_OWNED); 253 kg = ke->ke_ksegrp; 254 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 255 if (ke->ke_state == KES_IDLE) { 256 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 257 kg->kg_idle_kses--; 258 } 259 if (--kg->kg_kses == 0) 260 ksegrp_unlink(kg); 261 /* 262 * Aggregate stats from the KSE 263 */ 264 kse_stash(ke); 265 } 266 267 void 268 ksegrp_link(struct ksegrp *kg, struct proc *p) 269 { 270 271 TAILQ_INIT(&kg->kg_threads); 272 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 273 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ 274 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ 275 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ 276 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 277 kg->kg_proc = p; 278 /* 279 * the following counters are in the -zero- section 280 * and may not need clearing 281 */ 282 kg->kg_numthreads = 0; 283 kg->kg_runnable = 0; 284 kg->kg_kses = 0; 285 kg->kg_runq_kses = 0; /* XXXKSE change name */ 286 kg->kg_idle_kses = 0; 287 kg->kg_numupcalls = 0; 288 /* link it in now that it's consistent */ 289 p->p_numksegrps++; 290 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 291 } 292 293 void 294 ksegrp_unlink(struct ksegrp *kg) 295 { 296 struct proc *p; 297 298 mtx_assert(&sched_lock, MA_OWNED); 299 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 300 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); 301 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 302 303 p = kg->kg_proc; 304 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 305 p->p_numksegrps--; 306 /* 307 * Aggregate stats from the KSE 308 */ 309 ksegrp_stash(kg); 310 } 311 312 struct kse_upcall * 313 upcall_alloc(void) 314 { 315 struct kse_upcall *ku; 316 317 ku = uma_zalloc(upcall_zone, M_WAITOK); 318 bzero(ku, sizeof(*ku)); 319 return (ku); 320 } 321 322 void 323 upcall_free(struct kse_upcall *ku) 324 { 325 326 uma_zfree(upcall_zone, ku); 327 } 328 329 void 330 upcall_link(struct kse_upcall *ku, struct ksegrp *kg) 331 { 332 333 mtx_assert(&sched_lock, MA_OWNED); 334 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); 335 ku->ku_ksegrp = kg; 336 kg->kg_numupcalls++; 337 } 338 339 void 340 upcall_unlink(struct kse_upcall *ku) 341 { 342 struct ksegrp *kg = ku->ku_ksegrp; 343 344 mtx_assert(&sched_lock, MA_OWNED); 345 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); 346 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); 347 kg->kg_numupcalls--; 348 upcall_stash(ku); 349 } 350 351 void 352 upcall_remove(struct thread *td) 353 { 354 355 if (td->td_upcall) { 356 td->td_upcall->ku_owner = NULL; 357 upcall_unlink(td->td_upcall); 358 td->td_upcall = 0; 359 } 360 } 361 362 /* 363 * For a newly created process, 364 * link up all the structures and its initial threads etc. 365 */ 366 void 367 proc_linkup(struct proc *p, struct ksegrp *kg, 368 struct kse *ke, struct thread *td) 369 { 370 371 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 372 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 373 TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 374 p->p_numksegrps = 0; 375 p->p_numthreads = 0; 376 377 ksegrp_link(kg, p); 378 kse_link(ke, kg); 379 thread_link(td, kg); 380 } 381 382 /* 383 struct kse_thr_interrupt_args { 384 struct kse_thr_mailbox * tmbx; 385 }; 386 */ 387 int 388 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) 389 { 390 struct proc *p; 391 struct thread *td2; 392 393 p = td->td_proc; 394 if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL)) 395 return (EINVAL); 396 mtx_lock_spin(&sched_lock); 397 FOREACH_THREAD_IN_PROC(p, td2) { 398 if (td2->td_mailbox == uap->tmbx) { 399 td2->td_flags |= TDF_INTERRUPT; 400 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { 401 if (td2->td_flags & TDF_CVWAITQ) 402 cv_abort(td2); 403 else 404 abortsleep(td2); 405 } 406 mtx_unlock_spin(&sched_lock); 407 return (0); 408 } 409 } 410 mtx_unlock_spin(&sched_lock); 411 return (ESRCH); 412 } 413 414 /* 415 struct kse_exit_args { 416 register_t dummy; 417 }; 418 */ 419 int 420 kse_exit(struct thread *td, struct kse_exit_args *uap) 421 { 422 struct proc *p; 423 struct ksegrp *kg; 424 struct kse *ke; 425 struct kse_upcall *ku, *ku2; 426 int error, count; 427 428 p = td->td_proc; 429 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) 430 return (EINVAL); 431 kg = td->td_ksegrp; 432 count = 0; 433 PROC_LOCK(p); 434 mtx_lock_spin(&sched_lock); 435 FOREACH_UPCALL_IN_GROUP(kg, ku2) { 436 if (ku2->ku_flags & KUF_EXITING) 437 count++; 438 } 439 if ((kg->kg_numupcalls - count) == 1 && 440 (kg->kg_numthreads > 1)) { 441 mtx_unlock_spin(&sched_lock); 442 PROC_UNLOCK(p); 443 return (EDEADLK); 444 } 445 ku->ku_flags |= KUF_EXITING; 446 mtx_unlock_spin(&sched_lock); 447 PROC_UNLOCK(p); 448 error = suword(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE); 449 PROC_LOCK(p); 450 if (error) 451 psignal(p, SIGSEGV); 452 mtx_lock_spin(&sched_lock); 453 upcall_remove(td); 454 ke = td->td_kse; 455 if (p->p_numthreads == 1) { 456 kse_purge(p, td); 457 p->p_flag &= ~P_THREADED; 458 mtx_unlock_spin(&sched_lock); 459 PROC_UNLOCK(p); 460 } else { 461 if (kg->kg_numthreads == 1) { /* Shutdown a group */ 462 kse_purge_group(td); 463 ke->ke_flags |= KEF_EXIT; 464 } 465 thread_stopped(p); 466 thread_exit(); 467 /* NOTREACHED */ 468 } 469 return (0); 470 } 471 472 /* 473 * Either becomes an upcall or waits for an awakening event and 474 * then becomes an upcall. Only error cases return. 475 */ 476 /* 477 struct kse_release_args { 478 struct timespec *timeout; 479 }; 480 */ 481 int 482 kse_release(struct thread *td, struct kse_release_args *uap) 483 { 484 struct proc *p; 485 struct ksegrp *kg; 486 struct timespec ts, ts2, ts3, timeout; 487 struct timeval tv; 488 int error; 489 490 p = td->td_proc; 491 kg = td->td_ksegrp; 492 if (td->td_upcall == NULL || TD_CAN_UNBIND(td)) 493 return (EINVAL); 494 if (uap->timeout != NULL) { 495 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) 496 return (error); 497 getnanouptime(&ts); 498 timespecadd(&ts, &timeout); 499 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 500 } 501 mtx_lock_spin(&sched_lock); 502 /* Change OURSELF to become an upcall. */ 503 td->td_flags = TDF_UPCALLING; 504 #if 0 /* XXX This shouldn't be necessary */ 505 if (p->p_sflag & PS_NEEDSIGCHK) 506 td->td_flags |= TDF_ASTPENDING; 507 #endif 508 mtx_unlock_spin(&sched_lock); 509 PROC_LOCK(p); 510 while ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 && 511 (kg->kg_completed == NULL)) { 512 kg->kg_upsleeps++; 513 error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, 514 "kse_rel", (uap->timeout ? tvtohz(&tv) : 0)); 515 kg->kg_upsleeps--; 516 PROC_UNLOCK(p); 517 if (uap->timeout == NULL || error != EWOULDBLOCK) 518 return (0); 519 getnanouptime(&ts2); 520 if (timespeccmp(&ts2, &ts, >=)) 521 return (0); 522 ts3 = ts; 523 timespecsub(&ts3, &ts2); 524 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 525 PROC_LOCK(p); 526 } 527 PROC_UNLOCK(p); 528 return (0); 529 } 530 531 /* struct kse_wakeup_args { 532 struct kse_mailbox *mbx; 533 }; */ 534 int 535 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) 536 { 537 struct proc *p; 538 struct ksegrp *kg; 539 struct kse_upcall *ku; 540 struct thread *td2; 541 542 p = td->td_proc; 543 td2 = NULL; 544 ku = NULL; 545 /* KSE-enabled processes only, please. */ 546 if (!(p->p_flag & P_THREADED)) 547 return (EINVAL); 548 PROC_LOCK(p); 549 mtx_lock_spin(&sched_lock); 550 if (uap->mbx) { 551 FOREACH_KSEGRP_IN_PROC(p, kg) { 552 FOREACH_UPCALL_IN_GROUP(kg, ku) { 553 if (ku->ku_mailbox == uap->mbx) 554 break; 555 } 556 if (ku) 557 break; 558 } 559 } else { 560 kg = td->td_ksegrp; 561 if (kg->kg_upsleeps) { 562 wakeup_one(&kg->kg_completed); 563 mtx_unlock_spin(&sched_lock); 564 PROC_UNLOCK(p); 565 return (0); 566 } 567 ku = TAILQ_FIRST(&kg->kg_upcalls); 568 } 569 if (ku) { 570 if ((td2 = ku->ku_owner) == NULL) { 571 panic("%s: no owner", __func__); 572 } else if (TD_ON_SLEEPQ(td2) && 573 (td2->td_wchan == &kg->kg_completed)) { 574 abortsleep(td2); 575 } else { 576 ku->ku_flags |= KUF_DOUPCALL; 577 } 578 mtx_unlock_spin(&sched_lock); 579 PROC_UNLOCK(p); 580 return (0); 581 } 582 mtx_unlock_spin(&sched_lock); 583 PROC_UNLOCK(p); 584 return (ESRCH); 585 } 586 587 /* 588 * No new KSEG: first call: use current KSE, don't schedule an upcall 589 * All other situations, do allocate max new KSEs and schedule an upcall. 590 */ 591 /* struct kse_create_args { 592 struct kse_mailbox *mbx; 593 int newgroup; 594 }; */ 595 int 596 kse_create(struct thread *td, struct kse_create_args *uap) 597 { 598 struct kse *newke; 599 struct ksegrp *newkg; 600 struct ksegrp *kg; 601 struct proc *p; 602 struct kse_mailbox mbx; 603 struct kse_upcall *newku; 604 int err, ncpus; 605 606 p = td->td_proc; 607 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) 608 return (err); 609 610 /* Too bad, why hasn't kernel always a cpu counter !? */ 611 #ifdef SMP 612 ncpus = mp_ncpus; 613 #else 614 ncpus = 1; 615 #endif 616 if (thread_debug && virtual_cpu != 0) 617 ncpus = virtual_cpu; 618 619 /* Easier to just set it than to test and set */ 620 PROC_LOCK(p); 621 p->p_flag |= P_THREADED; 622 PROC_UNLOCK(p); 623 kg = td->td_ksegrp; 624 if (uap->newgroup) { 625 /* Have race condition but it is cheap */ 626 if (p->p_numksegrps >= max_groups_per_proc) 627 return (EPROCLIM); 628 /* 629 * If we want a new KSEGRP it doesn't matter whether 630 * we have already fired up KSE mode before or not. 631 * We put the process in KSE mode and create a new KSEGRP. 632 */ 633 newkg = ksegrp_alloc(); 634 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, 635 kg_startzero, kg_endzero)); 636 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 637 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 638 mtx_lock_spin(&sched_lock); 639 if (p->p_numksegrps >= max_groups_per_proc) { 640 mtx_unlock_spin(&sched_lock); 641 ksegrp_free(newkg); 642 return (EPROCLIM); 643 } 644 ksegrp_link(newkg, p); 645 mtx_unlock_spin(&sched_lock); 646 } else { 647 newkg = kg; 648 } 649 650 /* 651 * Creating upcalls more than number of physical cpu does 652 * not help performance. 653 */ 654 if (newkg->kg_numupcalls >= ncpus) 655 return (EPROCLIM); 656 657 if (newkg->kg_numupcalls == 0) { 658 /* 659 * Initialize KSE group, optimized for MP. 660 * Create KSEs as many as physical cpus, this increases 661 * concurrent even if userland is not MP safe and can only run 662 * on single CPU (for early version of libpthread, it is true). 663 * In ideal world, every physical cpu should execute a thread. 664 * If there is enough KSEs, threads in kernel can be 665 * executed parallel on different cpus with full speed, 666 * Concurrent in kernel shouldn't be restricted by number of 667 * upcalls userland provides. 668 * Adding more upcall structures only increases concurrent 669 * in userland. 670 * Highest performance configuration is: 671 * N kses = N upcalls = N phyiscal cpus 672 */ 673 while (newkg->kg_kses < ncpus) { 674 newke = kse_alloc(); 675 bzero(&newke->ke_startzero, RANGEOF(struct kse, 676 ke_startzero, ke_endzero)); 677 #if 0 678 mtx_lock_spin(&sched_lock); 679 bcopy(&ke->ke_startcopy, &newke->ke_startcopy, 680 RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 681 mtx_unlock_spin(&sched_lock); 682 #endif 683 mtx_lock_spin(&sched_lock); 684 kse_link(newke, newkg); 685 /* Add engine */ 686 kse_reassign(newke); 687 mtx_unlock_spin(&sched_lock); 688 } 689 } 690 newku = upcall_alloc(); 691 newku->ku_mailbox = uap->mbx; 692 newku->ku_func = mbx.km_func; 693 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); 694 695 /* For the first call this may not have been set */ 696 if (td->td_standin == NULL) 697 thread_alloc_spare(td, NULL); 698 699 mtx_lock_spin(&sched_lock); 700 if (newkg->kg_numupcalls >= ncpus) { 701 mtx_unlock_spin(&sched_lock); 702 upcall_free(newku); 703 return (EPROCLIM); 704 } 705 upcall_link(newku, newkg); 706 if (mbx.km_quantum) 707 newkg->kg_upquantum = max(1, mbx.km_quantum/tick); 708 709 /* 710 * Each upcall structure has an owner thread, find which 711 * one owns it. 712 */ 713 if (uap->newgroup) { 714 /* 715 * Because new ksegrp hasn't thread, 716 * create an initial upcall thread to own it. 717 */ 718 thread_schedule_upcall(td, newku); 719 } else { 720 /* 721 * If current thread hasn't an upcall structure, 722 * just assign the upcall to it. 723 */ 724 if (td->td_upcall == NULL) { 725 newku->ku_owner = td; 726 td->td_upcall = newku; 727 } else { 728 /* 729 * Create a new upcall thread to own it. 730 */ 731 thread_schedule_upcall(td, newku); 732 } 733 } 734 mtx_unlock_spin(&sched_lock); 735 return (0); 736 } 737 738 /* 739 * Initialize global thread allocation resources. 740 */ 741 void 742 threadinit(void) 743 { 744 745 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 746 thread_ctor, thread_dtor, thread_init, thread_fini, 747 UMA_ALIGN_CACHE, 0); 748 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 749 NULL, NULL, ksegrp_init, NULL, 750 UMA_ALIGN_CACHE, 0); 751 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), 752 NULL, NULL, kse_init, NULL, 753 UMA_ALIGN_CACHE, 0); 754 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), 755 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 756 } 757 758 /* 759 * Stash an embarasingly extra thread into the zombie thread queue. 760 */ 761 void 762 thread_stash(struct thread *td) 763 { 764 mtx_lock_spin(&kse_zombie_lock); 765 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 766 mtx_unlock_spin(&kse_zombie_lock); 767 } 768 769 /* 770 * Stash an embarasingly extra kse into the zombie kse queue. 771 */ 772 void 773 kse_stash(struct kse *ke) 774 { 775 mtx_lock_spin(&kse_zombie_lock); 776 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); 777 mtx_unlock_spin(&kse_zombie_lock); 778 } 779 780 /* 781 * Stash an embarasingly extra upcall into the zombie upcall queue. 782 */ 783 784 void 785 upcall_stash(struct kse_upcall *ku) 786 { 787 mtx_lock_spin(&kse_zombie_lock); 788 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); 789 mtx_unlock_spin(&kse_zombie_lock); 790 } 791 792 /* 793 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 794 */ 795 void 796 ksegrp_stash(struct ksegrp *kg) 797 { 798 mtx_lock_spin(&kse_zombie_lock); 799 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 800 mtx_unlock_spin(&kse_zombie_lock); 801 } 802 803 /* 804 * Reap zombie kse resource. 805 */ 806 void 807 thread_reap(void) 808 { 809 struct thread *td_first, *td_next; 810 struct kse *ke_first, *ke_next; 811 struct ksegrp *kg_first, * kg_next; 812 struct kse_upcall *ku_first, *ku_next; 813 814 /* 815 * Don't even bother to lock if none at this instant, 816 * we really don't care about the next instant.. 817 */ 818 if ((!TAILQ_EMPTY(&zombie_threads)) 819 || (!TAILQ_EMPTY(&zombie_kses)) 820 || (!TAILQ_EMPTY(&zombie_ksegrps)) 821 || (!TAILQ_EMPTY(&zombie_upcalls))) { 822 mtx_lock_spin(&kse_zombie_lock); 823 td_first = TAILQ_FIRST(&zombie_threads); 824 ke_first = TAILQ_FIRST(&zombie_kses); 825 kg_first = TAILQ_FIRST(&zombie_ksegrps); 826 ku_first = TAILQ_FIRST(&zombie_upcalls); 827 if (td_first) 828 TAILQ_INIT(&zombie_threads); 829 if (ke_first) 830 TAILQ_INIT(&zombie_kses); 831 if (kg_first) 832 TAILQ_INIT(&zombie_ksegrps); 833 if (ku_first) 834 TAILQ_INIT(&zombie_upcalls); 835 mtx_unlock_spin(&kse_zombie_lock); 836 while (td_first) { 837 td_next = TAILQ_NEXT(td_first, td_runq); 838 if (td_first->td_ucred) 839 crfree(td_first->td_ucred); 840 thread_free(td_first); 841 td_first = td_next; 842 } 843 while (ke_first) { 844 ke_next = TAILQ_NEXT(ke_first, ke_procq); 845 kse_free(ke_first); 846 ke_first = ke_next; 847 } 848 while (kg_first) { 849 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 850 ksegrp_free(kg_first); 851 kg_first = kg_next; 852 } 853 while (ku_first) { 854 ku_next = TAILQ_NEXT(ku_first, ku_link); 855 upcall_free(ku_first); 856 ku_first = ku_next; 857 } 858 } 859 } 860 861 /* 862 * Allocate a ksegrp. 863 */ 864 struct ksegrp * 865 ksegrp_alloc(void) 866 { 867 return (uma_zalloc(ksegrp_zone, M_WAITOK)); 868 } 869 870 /* 871 * Allocate a kse. 872 */ 873 struct kse * 874 kse_alloc(void) 875 { 876 return (uma_zalloc(kse_zone, M_WAITOK)); 877 } 878 879 /* 880 * Allocate a thread. 881 */ 882 struct thread * 883 thread_alloc(void) 884 { 885 thread_reap(); /* check if any zombies to get */ 886 return (uma_zalloc(thread_zone, M_WAITOK)); 887 } 888 889 /* 890 * Deallocate a ksegrp. 891 */ 892 void 893 ksegrp_free(struct ksegrp *td) 894 { 895 uma_zfree(ksegrp_zone, td); 896 } 897 898 /* 899 * Deallocate a kse. 900 */ 901 void 902 kse_free(struct kse *td) 903 { 904 uma_zfree(kse_zone, td); 905 } 906 907 /* 908 * Deallocate a thread. 909 */ 910 void 911 thread_free(struct thread *td) 912 { 913 914 cpu_thread_clean(td); 915 uma_zfree(thread_zone, td); 916 } 917 918 /* 919 * Store the thread context in the UTS's mailbox. 920 * then add the mailbox at the head of a list we are building in user space. 921 * The list is anchored in the ksegrp structure. 922 */ 923 int 924 thread_export_context(struct thread *td) 925 { 926 struct proc *p; 927 struct ksegrp *kg; 928 uintptr_t mbx; 929 void *addr; 930 int error,temp; 931 mcontext_t mc; 932 933 p = td->td_proc; 934 kg = td->td_ksegrp; 935 936 /* Export the user/machine context. */ 937 get_mcontext(td, &mc, 0); 938 addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext); 939 error = copyout(&mc, addr, sizeof(mcontext_t)); 940 if (error) 941 goto bad; 942 943 /* Exports clock ticks in kernel mode */ 944 addr = (caddr_t)(&td->td_mailbox->tm_sticks); 945 temp = fuword(addr) + td->td_usticks; 946 if (suword(addr, temp)) { 947 error = EFAULT; 948 goto bad; 949 } 950 951 /* Get address in latest mbox of list pointer */ 952 addr = (void *)(&td->td_mailbox->tm_next); 953 /* 954 * Put the saved address of the previous first 955 * entry into this one 956 */ 957 for (;;) { 958 mbx = (uintptr_t)kg->kg_completed; 959 if (suword(addr, mbx)) { 960 error = EFAULT; 961 goto bad; 962 } 963 PROC_LOCK(p); 964 if (mbx == (uintptr_t)kg->kg_completed) { 965 kg->kg_completed = td->td_mailbox; 966 /* 967 * The thread context may be taken away by 968 * other upcall threads when we unlock 969 * process lock. it's no longer valid to 970 * use it again in any other places. 971 */ 972 td->td_mailbox = NULL; 973 PROC_UNLOCK(p); 974 break; 975 } 976 PROC_UNLOCK(p); 977 } 978 td->td_usticks = 0; 979 return (0); 980 981 bad: 982 PROC_LOCK(p); 983 psignal(p, SIGSEGV); 984 PROC_UNLOCK(p); 985 /* The mailbox is bad, don't use it */ 986 td->td_mailbox = NULL; 987 td->td_usticks = 0; 988 return (error); 989 } 990 991 /* 992 * Take the list of completed mailboxes for this KSEGRP and put them on this 993 * upcall's mailbox as it's the next one going up. 994 */ 995 static int 996 thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) 997 { 998 struct proc *p = kg->kg_proc; 999 void *addr; 1000 uintptr_t mbx; 1001 1002 addr = (void *)(&ku->ku_mailbox->km_completed); 1003 for (;;) { 1004 mbx = (uintptr_t)kg->kg_completed; 1005 if (suword(addr, mbx)) { 1006 PROC_LOCK(p); 1007 psignal(p, SIGSEGV); 1008 PROC_UNLOCK(p); 1009 return (EFAULT); 1010 } 1011 PROC_LOCK(p); 1012 if (mbx == (uintptr_t)kg->kg_completed) { 1013 kg->kg_completed = NULL; 1014 PROC_UNLOCK(p); 1015 break; 1016 } 1017 PROC_UNLOCK(p); 1018 } 1019 return (0); 1020 } 1021 1022 /* 1023 * This function should be called at statclock interrupt time 1024 */ 1025 int 1026 thread_statclock(int user) 1027 { 1028 struct thread *td = curthread; 1029 1030 if (td->td_ksegrp->kg_numupcalls == 0) 1031 return (-1); 1032 if (user) { 1033 /* Current always do via ast() */ 1034 mtx_lock_spin(&sched_lock); 1035 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); 1036 mtx_unlock_spin(&sched_lock); 1037 td->td_uuticks++; 1038 } else { 1039 if (td->td_mailbox != NULL) 1040 td->td_usticks++; 1041 else { 1042 /* XXXKSE 1043 * We will call thread_user_enter() for every 1044 * kernel entry in future, so if the thread mailbox 1045 * is NULL, it must be a UTS kernel, don't account 1046 * clock ticks for it. 1047 */ 1048 } 1049 } 1050 return (0); 1051 } 1052 1053 /* 1054 * Export state clock ticks for userland 1055 */ 1056 static int 1057 thread_update_usr_ticks(struct thread *td, int user) 1058 { 1059 struct proc *p = td->td_proc; 1060 struct kse_thr_mailbox *tmbx; 1061 struct kse_upcall *ku; 1062 struct ksegrp *kg; 1063 caddr_t addr; 1064 uint uticks; 1065 1066 if ((ku = td->td_upcall) == NULL) 1067 return (-1); 1068 1069 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1070 if ((tmbx == NULL) || (tmbx == (void *)-1)) 1071 return (-1); 1072 if (user) { 1073 uticks = td->td_uuticks; 1074 td->td_uuticks = 0; 1075 addr = (caddr_t)&tmbx->tm_uticks; 1076 } else { 1077 uticks = td->td_usticks; 1078 td->td_usticks = 0; 1079 addr = (caddr_t)&tmbx->tm_sticks; 1080 } 1081 if (uticks) { 1082 if (suword(addr, uticks+fuword(addr))) { 1083 PROC_LOCK(p); 1084 psignal(p, SIGSEGV); 1085 PROC_UNLOCK(p); 1086 return (-2); 1087 } 1088 } 1089 kg = td->td_ksegrp; 1090 if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) { 1091 mtx_lock_spin(&sched_lock); 1092 td->td_upcall->ku_flags |= KUF_DOUPCALL; 1093 mtx_unlock_spin(&sched_lock); 1094 } 1095 return (0); 1096 } 1097 1098 /* 1099 * Discard the current thread and exit from its context. 1100 * 1101 * Because we can't free a thread while we're operating under its context, 1102 * push the current thread into our CPU's deadthread holder. This means 1103 * we needn't worry about someone else grabbing our context before we 1104 * do a cpu_throw(). 1105 */ 1106 void 1107 thread_exit(void) 1108 { 1109 struct thread *td; 1110 struct kse *ke; 1111 struct proc *p; 1112 struct ksegrp *kg; 1113 1114 td = curthread; 1115 kg = td->td_ksegrp; 1116 p = td->td_proc; 1117 ke = td->td_kse; 1118 1119 mtx_assert(&sched_lock, MA_OWNED); 1120 KASSERT(p != NULL, ("thread exiting without a process")); 1121 KASSERT(ke != NULL, ("thread exiting without a kse")); 1122 KASSERT(kg != NULL, ("thread exiting without a kse group")); 1123 PROC_LOCK_ASSERT(p, MA_OWNED); 1124 CTR1(KTR_PROC, "thread_exit: thread %p", td); 1125 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 1126 1127 if (td->td_standin != NULL) { 1128 thread_stash(td->td_standin); 1129 td->td_standin = NULL; 1130 } 1131 1132 cpu_thread_exit(td); /* XXXSMP */ 1133 1134 /* 1135 * The last thread is left attached to the process 1136 * So that the whole bundle gets recycled. Skip 1137 * all this stuff. 1138 */ 1139 if (p->p_numthreads > 1) { 1140 thread_unlink(td); 1141 if (p->p_maxthrwaits) 1142 wakeup(&p->p_numthreads); 1143 /* 1144 * The test below is NOT true if we are the 1145 * sole exiting thread. P_STOPPED_SNGL is unset 1146 * in exit1() after it is the only survivor. 1147 */ 1148 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1149 if (p->p_numthreads == p->p_suspcount) { 1150 thread_unsuspend_one(p->p_singlethread); 1151 } 1152 } 1153 1154 /* 1155 * Because each upcall structure has an owner thread, 1156 * owner thread exits only when process is in exiting 1157 * state, so upcall to userland is no longer needed, 1158 * deleting upcall structure is safe here. 1159 * So when all threads in a group is exited, all upcalls 1160 * in the group should be automatically freed. 1161 */ 1162 if (td->td_upcall) 1163 upcall_remove(td); 1164 1165 ke->ke_state = KES_UNQUEUED; 1166 ke->ke_thread = NULL; 1167 /* 1168 * Decide what to do with the KSE attached to this thread. 1169 */ 1170 if (ke->ke_flags & KEF_EXIT) 1171 kse_unlink(ke); 1172 else 1173 kse_reassign(ke); 1174 PROC_UNLOCK(p); 1175 td->td_kse = NULL; 1176 td->td_state = TDS_INACTIVE; 1177 #if 0 1178 td->td_proc = NULL; 1179 #endif 1180 td->td_ksegrp = NULL; 1181 td->td_last_kse = NULL; 1182 PCPU_SET(deadthread, td); 1183 } else { 1184 PROC_UNLOCK(p); 1185 } 1186 /* XXX Shouldn't cpu_throw() here. */ 1187 mtx_assert(&sched_lock, MA_OWNED); 1188 #if !defined(__alpha__) && !defined(__powerpc__) 1189 cpu_throw(td, choosethread()); 1190 #else 1191 cpu_throw(); 1192 #endif 1193 panic("I'm a teapot!"); 1194 /* NOTREACHED */ 1195 } 1196 1197 /* 1198 * Do any thread specific cleanups that may be needed in wait() 1199 * called with Giant held, proc and schedlock not held. 1200 */ 1201 void 1202 thread_wait(struct proc *p) 1203 { 1204 struct thread *td; 1205 1206 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()")); 1207 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()")); 1208 FOREACH_THREAD_IN_PROC(p, td) { 1209 if (td->td_standin != NULL) { 1210 thread_free(td->td_standin); 1211 td->td_standin = NULL; 1212 } 1213 cpu_thread_clean(td); 1214 } 1215 thread_reap(); /* check for zombie threads etc. */ 1216 } 1217 1218 /* 1219 * Link a thread to a process. 1220 * set up anything that needs to be initialized for it to 1221 * be used by the process. 1222 * 1223 * Note that we do not link to the proc's ucred here. 1224 * The thread is linked as if running but no KSE assigned. 1225 */ 1226 void 1227 thread_link(struct thread *td, struct ksegrp *kg) 1228 { 1229 struct proc *p; 1230 1231 p = kg->kg_proc; 1232 td->td_state = TDS_INACTIVE; 1233 td->td_proc = p; 1234 td->td_ksegrp = kg; 1235 td->td_last_kse = NULL; 1236 td->td_flags = 0; 1237 td->td_kse = NULL; 1238 1239 LIST_INIT(&td->td_contested); 1240 callout_init(&td->td_slpcallout, 1); 1241 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 1242 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 1243 p->p_numthreads++; 1244 kg->kg_numthreads++; 1245 } 1246 1247 void 1248 thread_unlink(struct thread *td) 1249 { 1250 struct proc *p = td->td_proc; 1251 struct ksegrp *kg = td->td_ksegrp; 1252 1253 mtx_assert(&sched_lock, MA_OWNED); 1254 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1255 p->p_numthreads--; 1256 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 1257 kg->kg_numthreads--; 1258 /* could clear a few other things here */ 1259 } 1260 1261 /* 1262 * Purge a ksegrp resource. When a ksegrp is preparing to 1263 * exit, it calls this function. 1264 */ 1265 static void 1266 kse_purge_group(struct thread *td) 1267 { 1268 struct ksegrp *kg; 1269 struct kse *ke; 1270 1271 kg = td->td_ksegrp; 1272 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); 1273 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1274 KASSERT(ke->ke_state == KES_IDLE, 1275 ("%s: wrong idle KSE state", __func__)); 1276 kse_unlink(ke); 1277 } 1278 KASSERT((kg->kg_kses == 1), 1279 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); 1280 KASSERT((kg->kg_numupcalls == 0), 1281 ("%s: ksegrp still has %d upcall datas", 1282 __func__, kg->kg_numupcalls)); 1283 } 1284 1285 /* 1286 * Purge a process's KSE resource. When a process is preparing to 1287 * exit, it calls kse_purge to release any extra KSE resources in 1288 * the process. 1289 */ 1290 static void 1291 kse_purge(struct proc *p, struct thread *td) 1292 { 1293 struct ksegrp *kg; 1294 struct kse *ke; 1295 1296 KASSERT(p->p_numthreads == 1, ("bad thread number")); 1297 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { 1298 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 1299 p->p_numksegrps--; 1300 /* 1301 * There is no ownership for KSE, after all threads 1302 * in the group exited, it is possible that some KSEs 1303 * were left in idle queue, gc them now. 1304 */ 1305 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1306 KASSERT(ke->ke_state == KES_IDLE, 1307 ("%s: wrong idle KSE state", __func__)); 1308 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 1309 kg->kg_idle_kses--; 1310 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 1311 kg->kg_kses--; 1312 kse_stash(ke); 1313 } 1314 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || 1315 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), 1316 ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); 1317 KASSERT((kg->kg_numupcalls == 0), 1318 ("%s: ksegrp still has %d upcall datas", 1319 __func__, kg->kg_numupcalls)); 1320 1321 if (kg != td->td_ksegrp) 1322 ksegrp_stash(kg); 1323 } 1324 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); 1325 p->p_numksegrps++; 1326 } 1327 1328 /* 1329 * This function is intended to be used to initialize a spare thread 1330 * for upcall. Initialize thread's large data area outside sched_lock 1331 * for thread_schedule_upcall(). 1332 */ 1333 void 1334 thread_alloc_spare(struct thread *td, struct thread *spare) 1335 { 1336 if (td->td_standin) 1337 return; 1338 if (spare == NULL) 1339 spare = thread_alloc(); 1340 td->td_standin = spare; 1341 bzero(&spare->td_startzero, 1342 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 1343 spare->td_proc = td->td_proc; 1344 spare->td_ucred = crhold(td->td_ucred); 1345 } 1346 1347 /* 1348 * Create a thread and schedule it for upcall on the KSE given. 1349 * Use our thread's standin so that we don't have to allocate one. 1350 */ 1351 struct thread * 1352 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) 1353 { 1354 struct thread *td2; 1355 1356 mtx_assert(&sched_lock, MA_OWNED); 1357 1358 /* 1359 * Schedule an upcall thread on specified kse_upcall, 1360 * the kse_upcall must be free. 1361 * td must have a spare thread. 1362 */ 1363 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); 1364 if ((td2 = td->td_standin) != NULL) { 1365 td->td_standin = NULL; 1366 } else { 1367 panic("no reserve thread when scheduling an upcall"); 1368 return (NULL); 1369 } 1370 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 1371 td2, td->td_proc->p_pid, td->td_proc->p_comm); 1372 bcopy(&td->td_startcopy, &td2->td_startcopy, 1373 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 1374 thread_link(td2, ku->ku_ksegrp); 1375 /* inherit blocked thread's context */ 1376 cpu_set_upcall(td2, td); 1377 /* Let the new thread become owner of the upcall */ 1378 ku->ku_owner = td2; 1379 td2->td_upcall = ku; 1380 td2->td_flags = TDF_UPCALLING; 1381 #if 0 /* XXX This shouldn't be necessary */ 1382 if (td->td_proc->p_sflag & PS_NEEDSIGCHK) 1383 td2->td_flags |= TDF_ASTPENDING; 1384 #endif 1385 td2->td_kse = NULL; 1386 td2->td_state = TDS_CAN_RUN; 1387 td2->td_inhibitors = 0; 1388 setrunqueue(td2); 1389 return (td2); /* bogus.. should be a void function */ 1390 } 1391 1392 void 1393 thread_signal_add(struct thread *td, int sig) 1394 { 1395 struct kse_upcall *ku; 1396 struct proc *p; 1397 sigset_t ss; 1398 int error; 1399 1400 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 1401 td = curthread; 1402 ku = td->td_upcall; 1403 p = td->td_proc; 1404 1405 PROC_UNLOCK(p); 1406 error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t)); 1407 if (error) 1408 goto error; 1409 1410 SIGADDSET(ss, sig); 1411 1412 error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t)); 1413 if (error) 1414 goto error; 1415 1416 PROC_LOCK(p); 1417 return; 1418 error: 1419 PROC_LOCK(p); 1420 sigexit(td, SIGILL); 1421 } 1422 1423 1424 /* 1425 * Schedule an upcall to notify a KSE process recieved signals. 1426 * 1427 */ 1428 void 1429 thread_signal_upcall(struct thread *td) 1430 { 1431 mtx_lock_spin(&sched_lock); 1432 td->td_flags |= TDF_UPCALLING; 1433 mtx_unlock_spin(&sched_lock); 1434 1435 return; 1436 } 1437 1438 void 1439 thread_switchout(struct thread *td) 1440 { 1441 struct kse_upcall *ku; 1442 1443 mtx_assert(&sched_lock, MA_OWNED); 1444 1445 /* 1446 * If the outgoing thread is in threaded group and has never 1447 * scheduled an upcall, decide whether this is a short 1448 * or long term event and thus whether or not to schedule 1449 * an upcall. 1450 * If it is a short term event, just suspend it in 1451 * a way that takes its KSE with it. 1452 * Select the events for which we want to schedule upcalls. 1453 * For now it's just sleep. 1454 * XXXKSE eventually almost any inhibition could do. 1455 */ 1456 if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) { 1457 /* 1458 * Release ownership of upcall, and schedule an upcall 1459 * thread, this new upcall thread becomes the owner of 1460 * the upcall structure. 1461 */ 1462 ku = td->td_upcall; 1463 ku->ku_owner = NULL; 1464 td->td_upcall = NULL; 1465 td->td_flags &= ~TDF_CAN_UNBIND; 1466 thread_schedule_upcall(td, ku); 1467 } 1468 } 1469 1470 /* 1471 * Setup done on the thread when it enters the kernel. 1472 * XXXKSE Presently only for syscalls but eventually all kernel entries. 1473 */ 1474 void 1475 thread_user_enter(struct proc *p, struct thread *td) 1476 { 1477 struct ksegrp *kg; 1478 struct kse_upcall *ku; 1479 struct kse_thr_mailbox *tmbx; 1480 1481 kg = td->td_ksegrp; 1482 1483 /* 1484 * First check that we shouldn't just abort. 1485 * But check if we are the single thread first! 1486 */ 1487 PROC_LOCK(p); 1488 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1489 mtx_lock_spin(&sched_lock); 1490 thread_stopped(p); 1491 thread_exit(); 1492 /* NOTREACHED */ 1493 } 1494 PROC_UNLOCK(p); 1495 1496 /* 1497 * If we are doing a syscall in a KSE environment, 1498 * note where our mailbox is. There is always the 1499 * possibility that we could do this lazily (in kse_reassign()), 1500 * but for now do it every time. 1501 */ 1502 kg = td->td_ksegrp; 1503 if (kg->kg_numupcalls) { 1504 ku = td->td_upcall; 1505 KASSERT(ku, ("%s: no upcall owned", __func__)); 1506 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); 1507 KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__)); 1508 ku->ku_mflags = fuword((void *)&ku->ku_mailbox->km_flags); 1509 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1510 if ((tmbx == NULL) || (tmbx == (void *)-1)) { 1511 td->td_mailbox = NULL; 1512 } else { 1513 td->td_mailbox = tmbx; 1514 if (td->td_standin == NULL) 1515 thread_alloc_spare(td, NULL); 1516 mtx_lock_spin(&sched_lock); 1517 if (ku->ku_mflags & KMF_NOUPCALL) 1518 td->td_flags &= ~TDF_CAN_UNBIND; 1519 else 1520 td->td_flags |= TDF_CAN_UNBIND; 1521 mtx_unlock_spin(&sched_lock); 1522 } 1523 } 1524 } 1525 1526 /* 1527 * The extra work we go through if we are a threaded process when we 1528 * return to userland. 1529 * 1530 * If we are a KSE process and returning to user mode, check for 1531 * extra work to do before we return (e.g. for more syscalls 1532 * to complete first). If we were in a critical section, we should 1533 * just return to let it finish. Same if we were in the UTS (in 1534 * which case the mailbox's context's busy indicator will be set). 1535 * The only traps we suport will have set the mailbox. 1536 * We will clear it here. 1537 */ 1538 int 1539 thread_userret(struct thread *td, struct trapframe *frame) 1540 { 1541 int error = 0, upcalls, uts_crit; 1542 struct kse_upcall *ku; 1543 struct ksegrp *kg, *kg2; 1544 struct proc *p; 1545 struct timespec ts; 1546 1547 p = td->td_proc; 1548 kg = td->td_ksegrp; 1549 1550 /* Nothing to do with non-threaded group/process */ 1551 if (td->td_ksegrp->kg_numupcalls == 0) 1552 return (0); 1553 1554 /* 1555 * Stat clock interrupt hit in userland, it 1556 * is returning from interrupt, charge thread's 1557 * userland time for UTS. 1558 */ 1559 if (td->td_flags & TDF_USTATCLOCK) { 1560 thread_update_usr_ticks(td, 1); 1561 mtx_lock_spin(&sched_lock); 1562 td->td_flags &= ~TDF_USTATCLOCK; 1563 mtx_unlock_spin(&sched_lock); 1564 if (kg->kg_completed || 1565 (td->td_upcall->ku_flags & KUF_DOUPCALL)) 1566 thread_user_enter(p, td); 1567 } 1568 1569 uts_crit = (td->td_mailbox == NULL); 1570 ku = td->td_upcall; 1571 /* 1572 * Optimisation: 1573 * This thread has not started any upcall. 1574 * If there is no work to report other than ourself, 1575 * then it can return direct to userland. 1576 */ 1577 if (TD_CAN_UNBIND(td)) { 1578 mtx_lock_spin(&sched_lock); 1579 td->td_flags &= ~TDF_CAN_UNBIND; 1580 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 && 1581 (kg->kg_completed == NULL) && 1582 (ku->ku_flags & KUF_DOUPCALL) == 0 && 1583 (kg->kg_upquantum && ticks < kg->kg_nextupcall)) { 1584 mtx_unlock_spin(&sched_lock); 1585 thread_update_usr_ticks(td, 0); 1586 nanotime(&ts); 1587 error = copyout(&ts, 1588 (caddr_t)&ku->ku_mailbox->km_timeofday, 1589 sizeof(ts)); 1590 td->td_mailbox = 0; 1591 ku->ku_mflags = 0; 1592 if (error) 1593 goto out; 1594 return (0); 1595 } 1596 mtx_unlock_spin(&sched_lock); 1597 error = thread_export_context(td); 1598 if (error) { 1599 /* 1600 * Failing to do the KSE operation just defaults 1601 * back to synchonous operation, so just return from 1602 * the syscall. 1603 */ 1604 goto out; 1605 } 1606 /* 1607 * There is something to report, and we own an upcall 1608 * strucuture, we can go to userland. 1609 * Turn ourself into an upcall thread. 1610 */ 1611 mtx_lock_spin(&sched_lock); 1612 td->td_flags |= TDF_UPCALLING; 1613 mtx_unlock_spin(&sched_lock); 1614 } else if (td->td_mailbox && (ku == NULL)) { 1615 error = thread_export_context(td); 1616 /* possibly upcall with error? */ 1617 PROC_LOCK(p); 1618 /* 1619 * There are upcall threads waiting for 1620 * work to do, wake one of them up. 1621 * XXXKSE Maybe wake all of them up. 1622 */ 1623 if (!error && kg->kg_upsleeps) 1624 wakeup_one(&kg->kg_completed); 1625 mtx_lock_spin(&sched_lock); 1626 thread_stopped(p); 1627 thread_exit(); 1628 /* NOTREACHED */ 1629 } 1630 1631 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); 1632 1633 if (p->p_numthreads > max_threads_per_proc) { 1634 max_threads_hits++; 1635 PROC_LOCK(p); 1636 mtx_lock_spin(&sched_lock); 1637 while (p->p_numthreads > max_threads_per_proc) { 1638 if (P_SHOULDSTOP(p)) 1639 break; 1640 upcalls = 0; 1641 FOREACH_KSEGRP_IN_PROC(p, kg2) { 1642 if (kg2->kg_numupcalls == 0) 1643 upcalls++; 1644 else 1645 upcalls += kg2->kg_numupcalls; 1646 } 1647 if (upcalls >= max_threads_per_proc) 1648 break; 1649 mtx_unlock_spin(&sched_lock); 1650 p->p_maxthrwaits++; 1651 msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, 1652 "maxthreads", NULL); 1653 p->p_maxthrwaits--; 1654 mtx_lock_spin(&sched_lock); 1655 } 1656 mtx_unlock_spin(&sched_lock); 1657 PROC_UNLOCK(p); 1658 } 1659 1660 if (td->td_flags & TDF_UPCALLING) { 1661 uts_crit = 0; 1662 kg->kg_nextupcall = ticks+kg->kg_upquantum; 1663 /* 1664 * There is no more work to do and we are going to ride 1665 * this thread up to userland as an upcall. 1666 * Do the last parts of the setup needed for the upcall. 1667 */ 1668 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 1669 td, td->td_proc->p_pid, td->td_proc->p_comm); 1670 1671 mtx_lock_spin(&sched_lock); 1672 td->td_flags &= ~TDF_UPCALLING; 1673 if (ku->ku_flags & KUF_DOUPCALL) 1674 ku->ku_flags &= ~KUF_DOUPCALL; 1675 mtx_unlock_spin(&sched_lock); 1676 1677 /* 1678 * Set user context to the UTS 1679 */ 1680 if (!(ku->ku_mflags & KMF_NOUPCALL)) { 1681 cpu_set_upcall_kse(td, ku); 1682 error = suword(&ku->ku_mailbox->km_curthread, 0); 1683 if (error) 1684 goto out; 1685 } 1686 1687 /* 1688 * Unhook the list of completed threads. 1689 * anything that completes after this gets to 1690 * come in next time. 1691 * Put the list of completed thread mailboxes on 1692 * this KSE's mailbox. 1693 */ 1694 if (!(ku->ku_mflags & KMF_NOCOMPLETED) && 1695 (error = thread_link_mboxes(kg, ku)) != 0) 1696 goto out; 1697 } 1698 if (!uts_crit) { 1699 nanotime(&ts); 1700 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts)); 1701 } 1702 1703 out: 1704 if (error) { 1705 /* 1706 * Things are going to be so screwed we should just kill 1707 * the process. 1708 * how do we do that? 1709 */ 1710 PROC_LOCK(td->td_proc); 1711 psignal(td->td_proc, SIGSEGV); 1712 PROC_UNLOCK(td->td_proc); 1713 } else { 1714 /* 1715 * Optimisation: 1716 * Ensure that we have a spare thread available, 1717 * for when we re-enter the kernel. 1718 */ 1719 if (td->td_standin == NULL) 1720 thread_alloc_spare(td, NULL); 1721 } 1722 1723 ku->ku_mflags = 0; 1724 /* 1725 * Clear thread mailbox first, then clear system tick count. 1726 * The order is important because thread_statclock() use 1727 * mailbox pointer to see if it is an userland thread or 1728 * an UTS kernel thread. 1729 */ 1730 td->td_mailbox = NULL; 1731 td->td_usticks = 0; 1732 return (error); /* go sync */ 1733 } 1734 1735 /* 1736 * Enforce single-threading. 1737 * 1738 * Returns 1 if the caller must abort (another thread is waiting to 1739 * exit the process or similar). Process is locked! 1740 * Returns 0 when you are successfully the only thread running. 1741 * A process has successfully single threaded in the suspend mode when 1742 * There are no threads in user mode. Threads in the kernel must be 1743 * allowed to continue until they get to the user boundary. They may even 1744 * copy out their return values and data before suspending. They may however be 1745 * accellerated in reaching the user boundary as we will wake up 1746 * any sleeping threads that are interruptable. (PCATCH). 1747 */ 1748 int 1749 thread_single(int force_exit) 1750 { 1751 struct thread *td; 1752 struct thread *td2; 1753 struct proc *p; 1754 1755 td = curthread; 1756 p = td->td_proc; 1757 mtx_assert(&Giant, MA_OWNED); 1758 PROC_LOCK_ASSERT(p, MA_OWNED); 1759 KASSERT((td != NULL), ("curthread is NULL")); 1760 1761 if ((p->p_flag & P_THREADED) == 0 && p->p_numthreads == 1) 1762 return (0); 1763 1764 /* Is someone already single threading? */ 1765 if (p->p_singlethread) 1766 return (1); 1767 1768 if (force_exit == SINGLE_EXIT) { 1769 p->p_flag |= P_SINGLE_EXIT; 1770 } else 1771 p->p_flag &= ~P_SINGLE_EXIT; 1772 p->p_flag |= P_STOPPED_SINGLE; 1773 mtx_lock_spin(&sched_lock); 1774 p->p_singlethread = td; 1775 while ((p->p_numthreads - p->p_suspcount) != 1) { 1776 FOREACH_THREAD_IN_PROC(p, td2) { 1777 if (td2 == td) 1778 continue; 1779 td2->td_flags |= TDF_ASTPENDING; 1780 if (TD_IS_INHIBITED(td2)) { 1781 if (force_exit == SINGLE_EXIT) { 1782 if (TD_IS_SUSPENDED(td2)) { 1783 thread_unsuspend_one(td2); 1784 } 1785 if (TD_ON_SLEEPQ(td2) && 1786 (td2->td_flags & TDF_SINTR)) { 1787 if (td2->td_flags & TDF_CVWAITQ) 1788 cv_abort(td2); 1789 else 1790 abortsleep(td2); 1791 } 1792 } else { 1793 if (TD_IS_SUSPENDED(td2)) 1794 continue; 1795 /* 1796 * maybe other inhibitted states too? 1797 * XXXKSE Is it totally safe to 1798 * suspend a non-interruptable thread? 1799 */ 1800 if (td2->td_inhibitors & 1801 (TDI_SLEEPING | TDI_SWAPPED)) 1802 thread_suspend_one(td2); 1803 } 1804 } 1805 } 1806 /* 1807 * Maybe we suspended some threads.. was it enough? 1808 */ 1809 if ((p->p_numthreads - p->p_suspcount) == 1) 1810 break; 1811 1812 /* 1813 * Wake us up when everyone else has suspended. 1814 * In the mean time we suspend as well. 1815 */ 1816 thread_suspend_one(td); 1817 DROP_GIANT(); 1818 PROC_UNLOCK(p); 1819 p->p_stats->p_ru.ru_nvcsw++; 1820 mi_switch(); 1821 mtx_unlock_spin(&sched_lock); 1822 PICKUP_GIANT(); 1823 PROC_LOCK(p); 1824 mtx_lock_spin(&sched_lock); 1825 } 1826 if (force_exit == SINGLE_EXIT) { 1827 if (td->td_upcall) 1828 upcall_remove(td); 1829 kse_purge(p, td); 1830 } 1831 mtx_unlock_spin(&sched_lock); 1832 return (0); 1833 } 1834 1835 /* 1836 * Called in from locations that can safely check to see 1837 * whether we have to suspend or at least throttle for a 1838 * single-thread event (e.g. fork). 1839 * 1840 * Such locations include userret(). 1841 * If the "return_instead" argument is non zero, the thread must be able to 1842 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1843 * 1844 * The 'return_instead' argument tells the function if it may do a 1845 * thread_exit() or suspend, or whether the caller must abort and back 1846 * out instead. 1847 * 1848 * If the thread that set the single_threading request has set the 1849 * P_SINGLE_EXIT bit in the process flags then this call will never return 1850 * if 'return_instead' is false, but will exit. 1851 * 1852 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1853 *---------------+--------------------+--------------------- 1854 * 0 | returns 0 | returns 0 or 1 1855 * | when ST ends | immediatly 1856 *---------------+--------------------+--------------------- 1857 * 1 | thread exits | returns 1 1858 * | | immediatly 1859 * 0 = thread_exit() or suspension ok, 1860 * other = return error instead of stopping the thread. 1861 * 1862 * While a full suspension is under effect, even a single threading 1863 * thread would be suspended if it made this call (but it shouldn't). 1864 * This call should only be made from places where 1865 * thread_exit() would be safe as that may be the outcome unless 1866 * return_instead is set. 1867 */ 1868 int 1869 thread_suspend_check(int return_instead) 1870 { 1871 struct thread *td; 1872 struct proc *p; 1873 1874 td = curthread; 1875 p = td->td_proc; 1876 PROC_LOCK_ASSERT(p, MA_OWNED); 1877 while (P_SHOULDSTOP(p)) { 1878 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1879 KASSERT(p->p_singlethread != NULL, 1880 ("singlethread not set")); 1881 /* 1882 * The only suspension in action is a 1883 * single-threading. Single threader need not stop. 1884 * XXX Should be safe to access unlocked 1885 * as it can only be set to be true by us. 1886 */ 1887 if (p->p_singlethread == td) 1888 return (0); /* Exempt from stopping. */ 1889 } 1890 if (return_instead) 1891 return (1); 1892 1893 mtx_lock_spin(&sched_lock); 1894 thread_stopped(p); 1895 /* 1896 * If the process is waiting for us to exit, 1897 * this thread should just suicide. 1898 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1899 */ 1900 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1901 while (mtx_owned(&Giant)) 1902 mtx_unlock(&Giant); 1903 if (p->p_flag & P_THREADED) 1904 thread_exit(); 1905 else 1906 thr_exit1(); 1907 } 1908 1909 /* 1910 * When a thread suspends, it just 1911 * moves to the processes's suspend queue 1912 * and stays there. 1913 */ 1914 thread_suspend_one(td); 1915 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1916 if (p->p_numthreads == p->p_suspcount) { 1917 thread_unsuspend_one(p->p_singlethread); 1918 } 1919 } 1920 DROP_GIANT(); 1921 PROC_UNLOCK(p); 1922 p->p_stats->p_ru.ru_nivcsw++; 1923 mi_switch(); 1924 mtx_unlock_spin(&sched_lock); 1925 PICKUP_GIANT(); 1926 PROC_LOCK(p); 1927 } 1928 return (0); 1929 } 1930 1931 void 1932 thread_suspend_one(struct thread *td) 1933 { 1934 struct proc *p = td->td_proc; 1935 1936 mtx_assert(&sched_lock, MA_OWNED); 1937 PROC_LOCK_ASSERT(p, MA_OWNED); 1938 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1939 p->p_suspcount++; 1940 TD_SET_SUSPENDED(td); 1941 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 1942 /* 1943 * Hack: If we are suspending but are on the sleep queue 1944 * then we are in msleep or the cv equivalent. We 1945 * want to look like we have two Inhibitors. 1946 * May already be set.. doesn't matter. 1947 */ 1948 if (TD_ON_SLEEPQ(td)) 1949 TD_SET_SLEEPING(td); 1950 } 1951 1952 void 1953 thread_unsuspend_one(struct thread *td) 1954 { 1955 struct proc *p = td->td_proc; 1956 1957 mtx_assert(&sched_lock, MA_OWNED); 1958 PROC_LOCK_ASSERT(p, MA_OWNED); 1959 TAILQ_REMOVE(&p->p_suspended, td, td_runq); 1960 TD_CLR_SUSPENDED(td); 1961 p->p_suspcount--; 1962 setrunnable(td); 1963 } 1964 1965 /* 1966 * Allow all threads blocked by single threading to continue running. 1967 */ 1968 void 1969 thread_unsuspend(struct proc *p) 1970 { 1971 struct thread *td; 1972 1973 mtx_assert(&sched_lock, MA_OWNED); 1974 PROC_LOCK_ASSERT(p, MA_OWNED); 1975 if (!P_SHOULDSTOP(p)) { 1976 while (( td = TAILQ_FIRST(&p->p_suspended))) { 1977 thread_unsuspend_one(td); 1978 } 1979 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 1980 (p->p_numthreads == p->p_suspcount)) { 1981 /* 1982 * Stopping everything also did the job for the single 1983 * threading request. Now we've downgraded to single-threaded, 1984 * let it continue. 1985 */ 1986 thread_unsuspend_one(p->p_singlethread); 1987 } 1988 } 1989 1990 void 1991 thread_single_end(void) 1992 { 1993 struct thread *td; 1994 struct proc *p; 1995 1996 td = curthread; 1997 p = td->td_proc; 1998 PROC_LOCK_ASSERT(p, MA_OWNED); 1999 p->p_flag &= ~P_STOPPED_SINGLE; 2000 mtx_lock_spin(&sched_lock); 2001 p->p_singlethread = NULL; 2002 /* 2003 * If there are other threads they mey now run, 2004 * unless of course there is a blanket 'stop order' 2005 * on the process. The single threader must be allowed 2006 * to continue however as this is a bad place to stop. 2007 */ 2008 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 2009 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2010 thread_unsuspend_one(td); 2011 } 2012 } 2013 mtx_unlock_spin(&sched_lock); 2014 } 2015 2016 2017