1 /* 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/mutex.h> 37 #include <sys/proc.h> 38 #include <sys/smp.h> 39 #include <sys/sysctl.h> 40 #include <sys/sysproto.h> 41 #include <sys/filedesc.h> 42 #include <sys/sched.h> 43 #include <sys/signalvar.h> 44 #include <sys/sx.h> 45 #include <sys/tty.h> 46 #include <sys/user.h> 47 #include <sys/jail.h> 48 #include <sys/kse.h> 49 #include <sys/ktr.h> 50 #include <sys/ucontext.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_object.h> 54 #include <vm/pmap.h> 55 #include <vm/uma.h> 56 #include <vm/vm_map.h> 57 58 #include <machine/frame.h> 59 60 /* 61 * KSEGRP related storage. 62 */ 63 static uma_zone_t ksegrp_zone; 64 static uma_zone_t kse_zone; 65 static uma_zone_t thread_zone; 66 static uma_zone_t upcall_zone; 67 68 /* DEBUG ONLY */ 69 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 70 static int thread_debug = 0; 71 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 72 &thread_debug, 0, "thread debug"); 73 74 static int max_threads_per_proc = 150; 75 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 76 &max_threads_per_proc, 0, "Limit on threads per proc"); 77 78 static int max_groups_per_proc = 50; 79 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 80 &max_groups_per_proc, 0, "Limit on thread groups per proc"); 81 82 static int max_threads_hits; 83 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 84 &max_threads_hits, 0, ""); 85 86 static int virtual_cpu; 87 88 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 89 90 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 91 TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); 92 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 93 TAILQ_HEAD(, kse_upcall) zombie_upcalls = 94 TAILQ_HEAD_INITIALIZER(zombie_upcalls); 95 struct mtx kse_zombie_lock; 96 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 97 98 static void kse_purge(struct proc *p, struct thread *td); 99 static void kse_purge_group(struct thread *td); 100 static int thread_update_usr_ticks(struct thread *td, int user); 101 static void thread_alloc_spare(struct thread *td, struct thread *spare); 102 103 static int 104 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 105 { 106 int error, new_val; 107 int def_val; 108 109 #ifdef SMP 110 def_val = mp_ncpus; 111 #else 112 def_val = 1; 113 #endif 114 if (virtual_cpu == 0) 115 new_val = def_val; 116 else 117 new_val = virtual_cpu; 118 error = sysctl_handle_int(oidp, &new_val, 0, req); 119 if (error != 0 || req->newptr == NULL) 120 return (error); 121 if (new_val < 0) 122 return (EINVAL); 123 virtual_cpu = new_val; 124 return (0); 125 } 126 127 /* DEBUG ONLY */ 128 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 129 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 130 "debug virtual cpus"); 131 132 /* 133 * Prepare a thread for use. 134 */ 135 static void 136 thread_ctor(void *mem, int size, void *arg) 137 { 138 struct thread *td; 139 140 td = (struct thread *)mem; 141 td->td_state = TDS_INACTIVE; 142 td->td_oncpu = NOCPU; 143 } 144 145 /* 146 * Reclaim a thread after use. 147 */ 148 static void 149 thread_dtor(void *mem, int size, void *arg) 150 { 151 struct thread *td; 152 153 td = (struct thread *)mem; 154 155 #ifdef INVARIANTS 156 /* Verify that this thread is in a safe state to free. */ 157 switch (td->td_state) { 158 case TDS_INHIBITED: 159 case TDS_RUNNING: 160 case TDS_CAN_RUN: 161 case TDS_RUNQ: 162 /* 163 * We must never unlink a thread that is in one of 164 * these states, because it is currently active. 165 */ 166 panic("bad state for thread unlinking"); 167 /* NOTREACHED */ 168 case TDS_INACTIVE: 169 break; 170 default: 171 panic("bad thread state"); 172 /* NOTREACHED */ 173 } 174 #endif 175 } 176 177 /* 178 * Initialize type-stable parts of a thread (when newly created). 179 */ 180 static void 181 thread_init(void *mem, int size) 182 { 183 struct thread *td; 184 185 td = (struct thread *)mem; 186 mtx_lock(&Giant); 187 pmap_new_thread(td, 0); 188 mtx_unlock(&Giant); 189 cpu_thread_setup(td); 190 td->td_sched = (struct td_sched *)&td[1]; 191 } 192 193 /* 194 * Tear down type-stable parts of a thread (just before being discarded). 195 */ 196 static void 197 thread_fini(void *mem, int size) 198 { 199 struct thread *td; 200 201 td = (struct thread *)mem; 202 pmap_dispose_thread(td); 203 } 204 205 /* 206 * Initialize type-stable parts of a kse (when newly created). 207 */ 208 static void 209 kse_init(void *mem, int size) 210 { 211 struct kse *ke; 212 213 ke = (struct kse *)mem; 214 ke->ke_sched = (struct ke_sched *)&ke[1]; 215 } 216 217 /* 218 * Initialize type-stable parts of a ksegrp (when newly created). 219 */ 220 static void 221 ksegrp_init(void *mem, int size) 222 { 223 struct ksegrp *kg; 224 225 kg = (struct ksegrp *)mem; 226 kg->kg_sched = (struct kg_sched *)&kg[1]; 227 } 228 229 /* 230 * KSE is linked into kse group. 231 */ 232 void 233 kse_link(struct kse *ke, struct ksegrp *kg) 234 { 235 struct proc *p = kg->kg_proc; 236 237 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); 238 kg->kg_kses++; 239 ke->ke_state = KES_UNQUEUED; 240 ke->ke_proc = p; 241 ke->ke_ksegrp = kg; 242 ke->ke_thread = NULL; 243 ke->ke_oncpu = NOCPU; 244 ke->ke_flags = 0; 245 } 246 247 void 248 kse_unlink(struct kse *ke) 249 { 250 struct ksegrp *kg; 251 252 mtx_assert(&sched_lock, MA_OWNED); 253 kg = ke->ke_ksegrp; 254 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 255 if (ke->ke_state == KES_IDLE) { 256 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 257 kg->kg_idle_kses--; 258 } 259 if (--kg->kg_kses == 0) 260 ksegrp_unlink(kg); 261 /* 262 * Aggregate stats from the KSE 263 */ 264 kse_stash(ke); 265 } 266 267 void 268 ksegrp_link(struct ksegrp *kg, struct proc *p) 269 { 270 271 TAILQ_INIT(&kg->kg_threads); 272 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 273 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ 274 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ 275 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ 276 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 277 kg->kg_proc = p; 278 /* 279 * the following counters are in the -zero- section 280 * and may not need clearing 281 */ 282 kg->kg_numthreads = 0; 283 kg->kg_runnable = 0; 284 kg->kg_kses = 0; 285 kg->kg_runq_kses = 0; /* XXXKSE change name */ 286 kg->kg_idle_kses = 0; 287 kg->kg_numupcalls = 0; 288 /* link it in now that it's consistent */ 289 p->p_numksegrps++; 290 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 291 } 292 293 void 294 ksegrp_unlink(struct ksegrp *kg) 295 { 296 struct proc *p; 297 298 mtx_assert(&sched_lock, MA_OWNED); 299 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 300 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); 301 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 302 303 p = kg->kg_proc; 304 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 305 p->p_numksegrps--; 306 /* 307 * Aggregate stats from the KSE 308 */ 309 ksegrp_stash(kg); 310 } 311 312 struct kse_upcall * 313 upcall_alloc(void) 314 { 315 struct kse_upcall *ku; 316 317 ku = uma_zalloc(upcall_zone, M_WAITOK); 318 bzero(ku, sizeof(*ku)); 319 return (ku); 320 } 321 322 void 323 upcall_free(struct kse_upcall *ku) 324 { 325 326 uma_zfree(upcall_zone, ku); 327 } 328 329 void 330 upcall_link(struct kse_upcall *ku, struct ksegrp *kg) 331 { 332 333 mtx_assert(&sched_lock, MA_OWNED); 334 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); 335 ku->ku_ksegrp = kg; 336 kg->kg_numupcalls++; 337 } 338 339 void 340 upcall_unlink(struct kse_upcall *ku) 341 { 342 struct ksegrp *kg = ku->ku_ksegrp; 343 344 mtx_assert(&sched_lock, MA_OWNED); 345 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); 346 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); 347 kg->kg_numupcalls--; 348 upcall_stash(ku); 349 } 350 351 void 352 upcall_remove(struct thread *td) 353 { 354 355 if (td->td_upcall) { 356 td->td_upcall->ku_owner = NULL; 357 upcall_unlink(td->td_upcall); 358 td->td_upcall = 0; 359 } 360 } 361 362 /* 363 * For a newly created process, 364 * link up all the structures and its initial threads etc. 365 */ 366 void 367 proc_linkup(struct proc *p, struct ksegrp *kg, 368 struct kse *ke, struct thread *td) 369 { 370 371 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 372 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 373 TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 374 p->p_numksegrps = 0; 375 p->p_numthreads = 0; 376 377 ksegrp_link(kg, p); 378 kse_link(ke, kg); 379 thread_link(td, kg); 380 } 381 382 /* 383 struct kse_thr_interrupt_args { 384 struct kse_thr_mailbox * tmbx; 385 }; 386 */ 387 int 388 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) 389 { 390 struct proc *p; 391 struct thread *td2; 392 393 p = td->td_proc; 394 if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL)) 395 return (EINVAL); 396 mtx_lock_spin(&sched_lock); 397 FOREACH_THREAD_IN_PROC(p, td2) { 398 if (td2->td_mailbox == uap->tmbx) { 399 td2->td_flags |= TDF_INTERRUPT; 400 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { 401 if (td2->td_flags & TDF_CVWAITQ) 402 cv_abort(td2); 403 else 404 abortsleep(td2); 405 } 406 mtx_unlock_spin(&sched_lock); 407 return (0); 408 } 409 } 410 mtx_unlock_spin(&sched_lock); 411 return (ESRCH); 412 } 413 414 /* 415 struct kse_exit_args { 416 register_t dummy; 417 }; 418 */ 419 int 420 kse_exit(struct thread *td, struct kse_exit_args *uap) 421 { 422 struct proc *p; 423 struct ksegrp *kg; 424 struct kse *ke; 425 426 p = td->td_proc; 427 if (td->td_upcall == NULL || TD_CAN_UNBIND(td)) 428 return (EINVAL); 429 kg = td->td_ksegrp; 430 /* Serialize removing upcall */ 431 PROC_LOCK(p); 432 mtx_lock_spin(&sched_lock); 433 if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) { 434 mtx_unlock_spin(&sched_lock); 435 PROC_UNLOCK(p); 436 return (EDEADLK); 437 } 438 ke = td->td_kse; 439 upcall_remove(td); 440 if (p->p_numthreads == 1) { 441 kse_purge(p, td); 442 p->p_flag &= ~P_THREADED; 443 mtx_unlock_spin(&sched_lock); 444 PROC_UNLOCK(p); 445 } else { 446 if (kg->kg_numthreads == 1) { /* Shutdown a group */ 447 kse_purge_group(td); 448 ke->ke_flags |= KEF_EXIT; 449 } 450 thread_stopped(p); 451 thread_exit(); 452 /* NOTREACHED */ 453 } 454 return (0); 455 } 456 457 /* 458 * Either becomes an upcall or waits for an awakening event and 459 * then becomes an upcall. Only error cases return. 460 */ 461 /* 462 struct kse_release_args { 463 struct timespec *timeout; 464 }; 465 */ 466 int 467 kse_release(struct thread *td, struct kse_release_args *uap) 468 { 469 struct proc *p; 470 struct ksegrp *kg; 471 struct timespec ts, ts2, ts3, timeout; 472 struct timeval tv; 473 int error; 474 475 p = td->td_proc; 476 kg = td->td_ksegrp; 477 if (td->td_upcall == NULL || TD_CAN_UNBIND(td)) 478 return (EINVAL); 479 if (uap->timeout != NULL) { 480 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) 481 return (error); 482 getnanouptime(&ts); 483 timespecadd(&ts, &timeout); 484 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 485 } 486 mtx_lock_spin(&sched_lock); 487 /* Change OURSELF to become an upcall. */ 488 td->td_flags = TDF_UPCALLING; 489 #if 0 /* XXX This shouldn't be necessary */ 490 if (p->p_sflag & PS_NEEDSIGCHK) 491 td->td_flags |= TDF_ASTPENDING; 492 #endif 493 mtx_unlock_spin(&sched_lock); 494 PROC_LOCK(p); 495 while ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 && 496 (kg->kg_completed == NULL)) { 497 kg->kg_upsleeps++; 498 error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, 499 "kse_rel", (uap->timeout ? tvtohz(&tv) : 0)); 500 kg->kg_upsleeps--; 501 PROC_UNLOCK(p); 502 if (uap->timeout == NULL || error != EWOULDBLOCK) 503 return (0); 504 getnanouptime(&ts2); 505 if (timespeccmp(&ts2, &ts, >=)) 506 return (0); 507 ts3 = ts; 508 timespecsub(&ts3, &ts2); 509 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 510 PROC_LOCK(p); 511 } 512 PROC_UNLOCK(p); 513 return (0); 514 } 515 516 /* struct kse_wakeup_args { 517 struct kse_mailbox *mbx; 518 }; */ 519 int 520 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) 521 { 522 struct proc *p; 523 struct ksegrp *kg; 524 struct kse_upcall *ku; 525 struct thread *td2; 526 527 p = td->td_proc; 528 td2 = NULL; 529 ku = NULL; 530 /* KSE-enabled processes only, please. */ 531 if (!(p->p_flag & P_THREADED)) 532 return (EINVAL); 533 PROC_LOCK(p); 534 mtx_lock_spin(&sched_lock); 535 if (uap->mbx) { 536 FOREACH_KSEGRP_IN_PROC(p, kg) { 537 FOREACH_UPCALL_IN_GROUP(kg, ku) { 538 if (ku->ku_mailbox == uap->mbx) 539 break; 540 } 541 if (ku) 542 break; 543 } 544 } else { 545 kg = td->td_ksegrp; 546 if (kg->kg_upsleeps) { 547 wakeup_one(&kg->kg_completed); 548 mtx_unlock_spin(&sched_lock); 549 PROC_UNLOCK(p); 550 return (0); 551 } 552 ku = TAILQ_FIRST(&kg->kg_upcalls); 553 } 554 if (ku) { 555 if ((td2 = ku->ku_owner) == NULL) { 556 panic("%s: no owner", __func__); 557 } else if (TD_ON_SLEEPQ(td2) && 558 (td2->td_wchan == &kg->kg_completed)) { 559 abortsleep(td2); 560 } else { 561 ku->ku_flags |= KUF_DOUPCALL; 562 } 563 mtx_unlock_spin(&sched_lock); 564 PROC_UNLOCK(p); 565 return (0); 566 } 567 mtx_unlock_spin(&sched_lock); 568 PROC_UNLOCK(p); 569 return (ESRCH); 570 } 571 572 /* 573 * No new KSEG: first call: use current KSE, don't schedule an upcall 574 * All other situations, do allocate max new KSEs and schedule an upcall. 575 */ 576 /* struct kse_create_args { 577 struct kse_mailbox *mbx; 578 int newgroup; 579 }; */ 580 int 581 kse_create(struct thread *td, struct kse_create_args *uap) 582 { 583 struct kse *newke; 584 struct ksegrp *newkg; 585 struct ksegrp *kg; 586 struct proc *p; 587 struct kse_mailbox mbx; 588 struct kse_upcall *newku; 589 int err, ncpus; 590 591 p = td->td_proc; 592 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) 593 return (err); 594 595 /* Too bad, why hasn't kernel always a cpu counter !? */ 596 #ifdef SMP 597 ncpus = mp_ncpus; 598 #else 599 ncpus = 1; 600 #endif 601 if (thread_debug && virtual_cpu != 0) 602 ncpus = virtual_cpu; 603 604 /* Easier to just set it than to test and set */ 605 PROC_LOCK(p); 606 p->p_flag |= P_THREADED; 607 PROC_UNLOCK(p); 608 kg = td->td_ksegrp; 609 if (uap->newgroup) { 610 /* Have race condition but it is cheap */ 611 if (p->p_numksegrps >= max_groups_per_proc) 612 return (EPROCLIM); 613 /* 614 * If we want a new KSEGRP it doesn't matter whether 615 * we have already fired up KSE mode before or not. 616 * We put the process in KSE mode and create a new KSEGRP. 617 */ 618 newkg = ksegrp_alloc(); 619 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, 620 kg_startzero, kg_endzero)); 621 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 622 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 623 mtx_lock_spin(&sched_lock); 624 if (p->p_numksegrps >= max_groups_per_proc) { 625 mtx_unlock_spin(&sched_lock); 626 ksegrp_free(newkg); 627 return (EPROCLIM); 628 } 629 ksegrp_link(newkg, p); 630 mtx_unlock_spin(&sched_lock); 631 } else { 632 newkg = kg; 633 } 634 635 /* 636 * Creating upcalls more than number of physical cpu does 637 * not help performance. 638 */ 639 if (newkg->kg_numupcalls >= ncpus) 640 return (EPROCLIM); 641 642 if (newkg->kg_numupcalls == 0) { 643 /* 644 * Initialize KSE group, optimized for MP. 645 * Create KSEs as many as physical cpus, this increases 646 * concurrent even if userland is not MP safe and can only run 647 * on single CPU (for early version of libpthread, it is true). 648 * In ideal world, every physical cpu should execute a thread. 649 * If there is enough KSEs, threads in kernel can be 650 * executed parallel on different cpus with full speed, 651 * Concurrent in kernel shouldn't be restricted by number of 652 * upcalls userland provides. 653 * Adding more upcall structures only increases concurrent 654 * in userland. 655 * Highest performance configuration is: 656 * N kses = N upcalls = N phyiscal cpus 657 */ 658 while (newkg->kg_kses < ncpus) { 659 newke = kse_alloc(); 660 bzero(&newke->ke_startzero, RANGEOF(struct kse, 661 ke_startzero, ke_endzero)); 662 #if 0 663 mtx_lock_spin(&sched_lock); 664 bcopy(&ke->ke_startcopy, &newke->ke_startcopy, 665 RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 666 mtx_unlock_spin(&sched_lock); 667 #endif 668 mtx_lock_spin(&sched_lock); 669 kse_link(newke, newkg); 670 /* Add engine */ 671 kse_reassign(newke); 672 mtx_unlock_spin(&sched_lock); 673 } 674 } 675 newku = upcall_alloc(); 676 newku->ku_mailbox = uap->mbx; 677 newku->ku_func = mbx.km_func; 678 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); 679 680 /* For the first call this may not have been set */ 681 if (td->td_standin == NULL) 682 thread_alloc_spare(td, NULL); 683 684 mtx_lock_spin(&sched_lock); 685 if (newkg->kg_numupcalls >= ncpus) { 686 mtx_unlock_spin(&sched_lock); 687 upcall_free(newku); 688 return (EPROCLIM); 689 } 690 upcall_link(newku, newkg); 691 if (mbx.km_quantum) 692 newkg->kg_upquantum = max(1, mbx.km_quantum/tick); 693 694 /* 695 * Each upcall structure has an owner thread, find which 696 * one owns it. 697 */ 698 if (uap->newgroup) { 699 /* 700 * Because new ksegrp hasn't thread, 701 * create an initial upcall thread to own it. 702 */ 703 thread_schedule_upcall(td, newku); 704 } else { 705 /* 706 * If current thread hasn't an upcall structure, 707 * just assign the upcall to it. 708 */ 709 if (td->td_upcall == NULL) { 710 newku->ku_owner = td; 711 td->td_upcall = newku; 712 } else { 713 /* 714 * Create a new upcall thread to own it. 715 */ 716 thread_schedule_upcall(td, newku); 717 } 718 } 719 mtx_unlock_spin(&sched_lock); 720 return (0); 721 } 722 723 /* 724 * Fill a ucontext_t with a thread's context information. 725 * 726 * This is an analogue to getcontext(3). 727 */ 728 void 729 thread_getcontext(struct thread *td, ucontext_t *uc) 730 { 731 732 get_mcontext(td, &uc->uc_mcontext, 0); 733 PROC_LOCK(td->td_proc); 734 uc->uc_sigmask = td->td_sigmask; 735 PROC_UNLOCK(td->td_proc); 736 } 737 738 /* 739 * Set a thread's context from a ucontext_t. 740 * 741 * This is an analogue to setcontext(3). 742 */ 743 int 744 thread_setcontext(struct thread *td, ucontext_t *uc) 745 { 746 int ret; 747 748 ret = set_mcontext(td, &uc->uc_mcontext); 749 if (ret == 0) { 750 SIG_CANTMASK(uc->uc_sigmask); 751 PROC_LOCK(td->td_proc); 752 td->td_sigmask = uc->uc_sigmask; 753 PROC_UNLOCK(td->td_proc); 754 } 755 return (ret); 756 } 757 758 /* 759 * Initialize global thread allocation resources. 760 */ 761 void 762 threadinit(void) 763 { 764 765 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 766 thread_ctor, thread_dtor, thread_init, thread_fini, 767 UMA_ALIGN_CACHE, 0); 768 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 769 NULL, NULL, ksegrp_init, NULL, 770 UMA_ALIGN_CACHE, 0); 771 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), 772 NULL, NULL, kse_init, NULL, 773 UMA_ALIGN_CACHE, 0); 774 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), 775 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 776 } 777 778 /* 779 * Stash an embarasingly extra thread into the zombie thread queue. 780 */ 781 void 782 thread_stash(struct thread *td) 783 { 784 mtx_lock_spin(&kse_zombie_lock); 785 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 786 mtx_unlock_spin(&kse_zombie_lock); 787 } 788 789 /* 790 * Stash an embarasingly extra kse into the zombie kse queue. 791 */ 792 void 793 kse_stash(struct kse *ke) 794 { 795 mtx_lock_spin(&kse_zombie_lock); 796 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); 797 mtx_unlock_spin(&kse_zombie_lock); 798 } 799 800 /* 801 * Stash an embarasingly extra upcall into the zombie upcall queue. 802 */ 803 804 void 805 upcall_stash(struct kse_upcall *ku) 806 { 807 mtx_lock_spin(&kse_zombie_lock); 808 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); 809 mtx_unlock_spin(&kse_zombie_lock); 810 } 811 812 /* 813 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 814 */ 815 void 816 ksegrp_stash(struct ksegrp *kg) 817 { 818 mtx_lock_spin(&kse_zombie_lock); 819 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 820 mtx_unlock_spin(&kse_zombie_lock); 821 } 822 823 /* 824 * Reap zombie kse resource. 825 */ 826 void 827 thread_reap(void) 828 { 829 struct thread *td_first, *td_next; 830 struct kse *ke_first, *ke_next; 831 struct ksegrp *kg_first, * kg_next; 832 struct kse_upcall *ku_first, *ku_next; 833 834 /* 835 * Don't even bother to lock if none at this instant, 836 * we really don't care about the next instant.. 837 */ 838 if ((!TAILQ_EMPTY(&zombie_threads)) 839 || (!TAILQ_EMPTY(&zombie_kses)) 840 || (!TAILQ_EMPTY(&zombie_ksegrps)) 841 || (!TAILQ_EMPTY(&zombie_upcalls))) { 842 mtx_lock_spin(&kse_zombie_lock); 843 td_first = TAILQ_FIRST(&zombie_threads); 844 ke_first = TAILQ_FIRST(&zombie_kses); 845 kg_first = TAILQ_FIRST(&zombie_ksegrps); 846 ku_first = TAILQ_FIRST(&zombie_upcalls); 847 if (td_first) 848 TAILQ_INIT(&zombie_threads); 849 if (ke_first) 850 TAILQ_INIT(&zombie_kses); 851 if (kg_first) 852 TAILQ_INIT(&zombie_ksegrps); 853 if (ku_first) 854 TAILQ_INIT(&zombie_upcalls); 855 mtx_unlock_spin(&kse_zombie_lock); 856 while (td_first) { 857 td_next = TAILQ_NEXT(td_first, td_runq); 858 if (td_first->td_ucred) 859 crfree(td_first->td_ucred); 860 thread_free(td_first); 861 td_first = td_next; 862 } 863 while (ke_first) { 864 ke_next = TAILQ_NEXT(ke_first, ke_procq); 865 kse_free(ke_first); 866 ke_first = ke_next; 867 } 868 while (kg_first) { 869 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 870 ksegrp_free(kg_first); 871 kg_first = kg_next; 872 } 873 while (ku_first) { 874 ku_next = TAILQ_NEXT(ku_first, ku_link); 875 upcall_free(ku_first); 876 ku_first = ku_next; 877 } 878 } 879 } 880 881 /* 882 * Allocate a ksegrp. 883 */ 884 struct ksegrp * 885 ksegrp_alloc(void) 886 { 887 return (uma_zalloc(ksegrp_zone, M_WAITOK)); 888 } 889 890 /* 891 * Allocate a kse. 892 */ 893 struct kse * 894 kse_alloc(void) 895 { 896 return (uma_zalloc(kse_zone, M_WAITOK)); 897 } 898 899 /* 900 * Allocate a thread. 901 */ 902 struct thread * 903 thread_alloc(void) 904 { 905 thread_reap(); /* check if any zombies to get */ 906 return (uma_zalloc(thread_zone, M_WAITOK)); 907 } 908 909 /* 910 * Deallocate a ksegrp. 911 */ 912 void 913 ksegrp_free(struct ksegrp *td) 914 { 915 uma_zfree(ksegrp_zone, td); 916 } 917 918 /* 919 * Deallocate a kse. 920 */ 921 void 922 kse_free(struct kse *td) 923 { 924 uma_zfree(kse_zone, td); 925 } 926 927 /* 928 * Deallocate a thread. 929 */ 930 void 931 thread_free(struct thread *td) 932 { 933 934 cpu_thread_clean(td); 935 uma_zfree(thread_zone, td); 936 } 937 938 /* 939 * Store the thread context in the UTS's mailbox. 940 * then add the mailbox at the head of a list we are building in user space. 941 * The list is anchored in the ksegrp structure. 942 */ 943 int 944 thread_export_context(struct thread *td) 945 { 946 struct proc *p; 947 struct ksegrp *kg; 948 uintptr_t mbx; 949 void *addr; 950 int error,temp; 951 ucontext_t uc; 952 953 p = td->td_proc; 954 kg = td->td_ksegrp; 955 956 /* Export the user/machine context. */ 957 addr = (void *)(&td->td_mailbox->tm_context); 958 error = copyin(addr, &uc, sizeof(ucontext_t)); 959 if (error) 960 goto bad; 961 962 thread_getcontext(td, &uc); 963 error = copyout(&uc, addr, sizeof(ucontext_t)); 964 if (error) 965 goto bad; 966 967 /* Exports clock ticks in kernel mode */ 968 addr = (caddr_t)(&td->td_mailbox->tm_sticks); 969 temp = fuword(addr) + td->td_usticks; 970 if (suword(addr, temp)) 971 goto bad; 972 973 /* Get address in latest mbox of list pointer */ 974 addr = (void *)(&td->td_mailbox->tm_next); 975 /* 976 * Put the saved address of the previous first 977 * entry into this one 978 */ 979 for (;;) { 980 mbx = (uintptr_t)kg->kg_completed; 981 if (suword(addr, mbx)) { 982 error = EFAULT; 983 goto bad; 984 } 985 PROC_LOCK(p); 986 if (mbx == (uintptr_t)kg->kg_completed) { 987 kg->kg_completed = td->td_mailbox; 988 /* 989 * The thread context may be taken away by 990 * other upcall threads when we unlock 991 * process lock. it's no longer valid to 992 * use it again in any other places. 993 */ 994 td->td_mailbox = NULL; 995 PROC_UNLOCK(p); 996 break; 997 } 998 PROC_UNLOCK(p); 999 } 1000 td->td_usticks = 0; 1001 return (0); 1002 1003 bad: 1004 PROC_LOCK(p); 1005 psignal(p, SIGSEGV); 1006 PROC_UNLOCK(p); 1007 /* The mailbox is bad, don't use it */ 1008 td->td_mailbox = NULL; 1009 td->td_usticks = 0; 1010 return (error); 1011 } 1012 1013 /* 1014 * Take the list of completed mailboxes for this KSEGRP and put them on this 1015 * upcall's mailbox as it's the next one going up. 1016 */ 1017 static int 1018 thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) 1019 { 1020 struct proc *p = kg->kg_proc; 1021 void *addr; 1022 uintptr_t mbx; 1023 1024 addr = (void *)(&ku->ku_mailbox->km_completed); 1025 for (;;) { 1026 mbx = (uintptr_t)kg->kg_completed; 1027 if (suword(addr, mbx)) { 1028 PROC_LOCK(p); 1029 psignal(p, SIGSEGV); 1030 PROC_UNLOCK(p); 1031 return (EFAULT); 1032 } 1033 PROC_LOCK(p); 1034 if (mbx == (uintptr_t)kg->kg_completed) { 1035 kg->kg_completed = NULL; 1036 PROC_UNLOCK(p); 1037 break; 1038 } 1039 PROC_UNLOCK(p); 1040 } 1041 return (0); 1042 } 1043 1044 /* 1045 * This function should be called at statclock interrupt time 1046 */ 1047 int 1048 thread_statclock(int user) 1049 { 1050 struct thread *td = curthread; 1051 1052 if (td->td_ksegrp->kg_numupcalls == 0) 1053 return (-1); 1054 if (user) { 1055 /* Current always do via ast() */ 1056 mtx_lock_spin(&sched_lock); 1057 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); 1058 mtx_unlock_spin(&sched_lock); 1059 td->td_uuticks++; 1060 } else { 1061 if (td->td_mailbox != NULL) 1062 td->td_usticks++; 1063 else { 1064 /* XXXKSE 1065 * We will call thread_user_enter() for every 1066 * kernel entry in future, so if the thread mailbox 1067 * is NULL, it must be a UTS kernel, don't account 1068 * clock ticks for it. 1069 */ 1070 } 1071 } 1072 return (0); 1073 } 1074 1075 /* 1076 * Export state clock ticks for userland 1077 */ 1078 static int 1079 thread_update_usr_ticks(struct thread *td, int user) 1080 { 1081 struct proc *p = td->td_proc; 1082 struct kse_thr_mailbox *tmbx; 1083 struct kse_upcall *ku; 1084 struct ksegrp *kg; 1085 caddr_t addr; 1086 uint uticks; 1087 1088 if ((ku = td->td_upcall) == NULL) 1089 return (-1); 1090 1091 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1092 if ((tmbx == NULL) || (tmbx == (void *)-1)) 1093 return (-1); 1094 if (user) { 1095 uticks = td->td_uuticks; 1096 td->td_uuticks = 0; 1097 addr = (caddr_t)&tmbx->tm_uticks; 1098 } else { 1099 uticks = td->td_usticks; 1100 td->td_usticks = 0; 1101 addr = (caddr_t)&tmbx->tm_sticks; 1102 } 1103 if (uticks) { 1104 if (suword(addr, uticks+fuword(addr))) { 1105 PROC_LOCK(p); 1106 psignal(p, SIGSEGV); 1107 PROC_UNLOCK(p); 1108 return (-2); 1109 } 1110 } 1111 kg = td->td_ksegrp; 1112 if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) { 1113 mtx_lock_spin(&sched_lock); 1114 td->td_upcall->ku_flags |= KUF_DOUPCALL; 1115 mtx_unlock_spin(&sched_lock); 1116 } 1117 return (0); 1118 } 1119 1120 /* 1121 * Discard the current thread and exit from its context. 1122 * 1123 * Because we can't free a thread while we're operating under its context, 1124 * push the current thread into our CPU's deadthread holder. This means 1125 * we needn't worry about someone else grabbing our context before we 1126 * do a cpu_throw(). 1127 */ 1128 void 1129 thread_exit(void) 1130 { 1131 struct thread *td; 1132 struct kse *ke; 1133 struct proc *p; 1134 struct ksegrp *kg; 1135 1136 td = curthread; 1137 kg = td->td_ksegrp; 1138 p = td->td_proc; 1139 ke = td->td_kse; 1140 1141 mtx_assert(&sched_lock, MA_OWNED); 1142 KASSERT(p != NULL, ("thread exiting without a process")); 1143 KASSERT(ke != NULL, ("thread exiting without a kse")); 1144 KASSERT(kg != NULL, ("thread exiting without a kse group")); 1145 PROC_LOCK_ASSERT(p, MA_OWNED); 1146 CTR1(KTR_PROC, "thread_exit: thread %p", td); 1147 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 1148 1149 if (td->td_standin != NULL) { 1150 thread_stash(td->td_standin); 1151 td->td_standin = NULL; 1152 } 1153 1154 cpu_thread_exit(td); /* XXXSMP */ 1155 1156 /* 1157 * The last thread is left attached to the process 1158 * So that the whole bundle gets recycled. Skip 1159 * all this stuff. 1160 */ 1161 if (p->p_numthreads > 1) { 1162 thread_unlink(td); 1163 if (p->p_maxthrwaits) 1164 wakeup(&p->p_numthreads); 1165 /* 1166 * The test below is NOT true if we are the 1167 * sole exiting thread. P_STOPPED_SNGL is unset 1168 * in exit1() after it is the only survivor. 1169 */ 1170 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1171 if (p->p_numthreads == p->p_suspcount) { 1172 thread_unsuspend_one(p->p_singlethread); 1173 } 1174 } 1175 1176 /* 1177 * Because each upcall structure has an owner thread, 1178 * owner thread exits only when process is in exiting 1179 * state, so upcall to userland is no longer needed, 1180 * deleting upcall structure is safe here. 1181 * So when all threads in a group is exited, all upcalls 1182 * in the group should be automatically freed. 1183 */ 1184 if (td->td_upcall) 1185 upcall_remove(td); 1186 1187 ke->ke_state = KES_UNQUEUED; 1188 ke->ke_thread = NULL; 1189 /* 1190 * Decide what to do with the KSE attached to this thread. 1191 */ 1192 if (ke->ke_flags & KEF_EXIT) 1193 kse_unlink(ke); 1194 else 1195 kse_reassign(ke); 1196 PROC_UNLOCK(p); 1197 td->td_kse = NULL; 1198 td->td_state = TDS_INACTIVE; 1199 #if 0 1200 td->td_proc = NULL; 1201 #endif 1202 td->td_ksegrp = NULL; 1203 td->td_last_kse = NULL; 1204 PCPU_SET(deadthread, td); 1205 } else { 1206 PROC_UNLOCK(p); 1207 } 1208 /* XXX Shouldn't cpu_throw() here. */ 1209 mtx_assert(&sched_lock, MA_OWNED); 1210 #if !defined(__alpha__) && !defined(__powerpc__) 1211 cpu_throw(td, choosethread()); 1212 #else 1213 cpu_throw(); 1214 #endif 1215 panic("I'm a teapot!"); 1216 /* NOTREACHED */ 1217 } 1218 1219 /* 1220 * Do any thread specific cleanups that may be needed in wait() 1221 * called with Giant held, proc and schedlock not held. 1222 */ 1223 void 1224 thread_wait(struct proc *p) 1225 { 1226 struct thread *td; 1227 1228 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()")); 1229 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()")); 1230 FOREACH_THREAD_IN_PROC(p, td) { 1231 if (td->td_standin != NULL) { 1232 thread_free(td->td_standin); 1233 td->td_standin = NULL; 1234 } 1235 cpu_thread_clean(td); 1236 } 1237 thread_reap(); /* check for zombie threads etc. */ 1238 } 1239 1240 /* 1241 * Link a thread to a process. 1242 * set up anything that needs to be initialized for it to 1243 * be used by the process. 1244 * 1245 * Note that we do not link to the proc's ucred here. 1246 * The thread is linked as if running but no KSE assigned. 1247 */ 1248 void 1249 thread_link(struct thread *td, struct ksegrp *kg) 1250 { 1251 struct proc *p; 1252 1253 p = kg->kg_proc; 1254 td->td_state = TDS_INACTIVE; 1255 td->td_proc = p; 1256 td->td_ksegrp = kg; 1257 td->td_last_kse = NULL; 1258 td->td_flags = 0; 1259 td->td_kse = NULL; 1260 1261 LIST_INIT(&td->td_contested); 1262 callout_init(&td->td_slpcallout, 1); 1263 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 1264 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 1265 p->p_numthreads++; 1266 kg->kg_numthreads++; 1267 } 1268 1269 void 1270 thread_unlink(struct thread *td) 1271 { 1272 struct proc *p = td->td_proc; 1273 struct ksegrp *kg = td->td_ksegrp; 1274 1275 mtx_assert(&sched_lock, MA_OWNED); 1276 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1277 p->p_numthreads--; 1278 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 1279 kg->kg_numthreads--; 1280 /* could clear a few other things here */ 1281 } 1282 1283 /* 1284 * Purge a ksegrp resource. When a ksegrp is preparing to 1285 * exit, it calls this function. 1286 */ 1287 static void 1288 kse_purge_group(struct thread *td) 1289 { 1290 struct ksegrp *kg; 1291 struct kse *ke; 1292 1293 kg = td->td_ksegrp; 1294 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); 1295 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1296 KASSERT(ke->ke_state == KES_IDLE, 1297 ("%s: wrong idle KSE state", __func__)); 1298 kse_unlink(ke); 1299 } 1300 KASSERT((kg->kg_kses == 1), 1301 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); 1302 KASSERT((kg->kg_numupcalls == 0), 1303 ("%s: ksegrp still has %d upcall datas", 1304 __func__, kg->kg_numupcalls)); 1305 } 1306 1307 /* 1308 * Purge a process's KSE resource. When a process is preparing to 1309 * exit, it calls kse_purge to release any extra KSE resources in 1310 * the process. 1311 */ 1312 static void 1313 kse_purge(struct proc *p, struct thread *td) 1314 { 1315 struct ksegrp *kg; 1316 struct kse *ke; 1317 1318 KASSERT(p->p_numthreads == 1, ("bad thread number")); 1319 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { 1320 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 1321 p->p_numksegrps--; 1322 /* 1323 * There is no ownership for KSE, after all threads 1324 * in the group exited, it is possible that some KSEs 1325 * were left in idle queue, gc them now. 1326 */ 1327 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1328 KASSERT(ke->ke_state == KES_IDLE, 1329 ("%s: wrong idle KSE state", __func__)); 1330 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 1331 kg->kg_idle_kses--; 1332 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 1333 kg->kg_kses--; 1334 kse_stash(ke); 1335 } 1336 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || 1337 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), 1338 ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); 1339 KASSERT((kg->kg_numupcalls == 0), 1340 ("%s: ksegrp still has %d upcall datas", 1341 __func__, kg->kg_numupcalls)); 1342 1343 if (kg != td->td_ksegrp) 1344 ksegrp_stash(kg); 1345 } 1346 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); 1347 p->p_numksegrps++; 1348 } 1349 1350 /* 1351 * This function is intended to be used to initialize a spare thread 1352 * for upcall. Initialize thread's large data area outside sched_lock 1353 * for thread_schedule_upcall(). 1354 */ 1355 void 1356 thread_alloc_spare(struct thread *td, struct thread *spare) 1357 { 1358 if (td->td_standin) 1359 return; 1360 if (spare == NULL) 1361 spare = thread_alloc(); 1362 td->td_standin = spare; 1363 bzero(&spare->td_startzero, 1364 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 1365 spare->td_proc = td->td_proc; 1366 spare->td_ucred = crhold(td->td_ucred); 1367 } 1368 1369 /* 1370 * Create a thread and schedule it for upcall on the KSE given. 1371 * Use our thread's standin so that we don't have to allocate one. 1372 */ 1373 struct thread * 1374 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) 1375 { 1376 struct thread *td2; 1377 1378 mtx_assert(&sched_lock, MA_OWNED); 1379 1380 /* 1381 * Schedule an upcall thread on specified kse_upcall, 1382 * the kse_upcall must be free. 1383 * td must have a spare thread. 1384 */ 1385 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); 1386 if ((td2 = td->td_standin) != NULL) { 1387 td->td_standin = NULL; 1388 } else { 1389 panic("no reserve thread when scheduling an upcall"); 1390 return (NULL); 1391 } 1392 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 1393 td2, td->td_proc->p_pid, td->td_proc->p_comm); 1394 bcopy(&td->td_startcopy, &td2->td_startcopy, 1395 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 1396 thread_link(td2, ku->ku_ksegrp); 1397 /* inherit blocked thread's context */ 1398 bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe)); 1399 cpu_set_upcall(td2, td->td_pcb); 1400 /* Let the new thread become owner of the upcall */ 1401 ku->ku_owner = td2; 1402 td2->td_upcall = ku; 1403 td2->td_flags = TDF_UPCALLING; 1404 #if 0 /* XXX This shouldn't be necessary */ 1405 if (td->td_proc->p_sflag & PS_NEEDSIGCHK) 1406 td2->td_flags |= TDF_ASTPENDING; 1407 #endif 1408 td2->td_kse = NULL; 1409 td2->td_state = TDS_CAN_RUN; 1410 td2->td_inhibitors = 0; 1411 setrunqueue(td2); 1412 return (td2); /* bogus.. should be a void function */ 1413 } 1414 1415 void 1416 thread_signal_add(struct thread *td, int sig) 1417 { 1418 struct kse_upcall *ku; 1419 struct proc *p; 1420 sigset_t ss; 1421 int error; 1422 1423 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 1424 td = curthread; 1425 ku = td->td_upcall; 1426 p = td->td_proc; 1427 1428 PROC_UNLOCK(p); 1429 error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t)); 1430 if (error) 1431 goto error; 1432 1433 SIGADDSET(ss, sig); 1434 1435 error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t)); 1436 if (error) 1437 goto error; 1438 1439 PROC_LOCK(p); 1440 return; 1441 error: 1442 PROC_LOCK(p); 1443 sigexit(td, SIGILL); 1444 } 1445 1446 1447 /* 1448 * Schedule an upcall to notify a KSE process recieved signals. 1449 * 1450 */ 1451 void 1452 thread_signal_upcall(struct thread *td) 1453 { 1454 mtx_lock_spin(&sched_lock); 1455 td->td_flags |= TDF_UPCALLING; 1456 mtx_unlock_spin(&sched_lock); 1457 1458 return; 1459 } 1460 1461 void 1462 thread_switchout(struct thread *td) 1463 { 1464 struct kse_upcall *ku; 1465 1466 mtx_assert(&sched_lock, MA_OWNED); 1467 1468 /* 1469 * If the outgoing thread is in threaded group and has never 1470 * scheduled an upcall, decide whether this is a short 1471 * or long term event and thus whether or not to schedule 1472 * an upcall. 1473 * If it is a short term event, just suspend it in 1474 * a way that takes its KSE with it. 1475 * Select the events for which we want to schedule upcalls. 1476 * For now it's just sleep. 1477 * XXXKSE eventually almost any inhibition could do. 1478 */ 1479 if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) { 1480 /* 1481 * Release ownership of upcall, and schedule an upcall 1482 * thread, this new upcall thread becomes the owner of 1483 * the upcall structure. 1484 */ 1485 ku = td->td_upcall; 1486 ku->ku_owner = NULL; 1487 td->td_upcall = NULL; 1488 td->td_flags &= ~TDF_CAN_UNBIND; 1489 thread_schedule_upcall(td, ku); 1490 } 1491 } 1492 1493 /* 1494 * Setup done on the thread when it enters the kernel. 1495 * XXXKSE Presently only for syscalls but eventually all kernel entries. 1496 */ 1497 void 1498 thread_user_enter(struct proc *p, struct thread *td) 1499 { 1500 struct ksegrp *kg; 1501 struct kse_upcall *ku; 1502 struct kse_thr_mailbox *tmbx; 1503 1504 kg = td->td_ksegrp; 1505 1506 /* 1507 * First check that we shouldn't just abort. 1508 * But check if we are the single thread first! 1509 */ 1510 PROC_LOCK(p); 1511 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1512 mtx_lock_spin(&sched_lock); 1513 thread_stopped(p); 1514 thread_exit(); 1515 /* NOTREACHED */ 1516 } 1517 PROC_UNLOCK(p); 1518 1519 /* 1520 * If we are doing a syscall in a KSE environment, 1521 * note where our mailbox is. There is always the 1522 * possibility that we could do this lazily (in kse_reassign()), 1523 * but for now do it every time. 1524 */ 1525 kg = td->td_ksegrp; 1526 if (kg->kg_numupcalls) { 1527 ku = td->td_upcall; 1528 KASSERT(ku, ("%s: no upcall owned", __func__)); 1529 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); 1530 KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__)); 1531 ku->ku_mflags = fuword((void *)&ku->ku_mailbox->km_flags); 1532 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1533 if ((tmbx == NULL) || (tmbx == (void *)-1)) { 1534 td->td_mailbox = NULL; 1535 } else { 1536 td->td_mailbox = tmbx; 1537 if (td->td_standin == NULL) 1538 thread_alloc_spare(td, NULL); 1539 mtx_lock_spin(&sched_lock); 1540 if (ku->ku_mflags & KMF_NOUPCALL) 1541 td->td_flags &= ~TDF_CAN_UNBIND; 1542 else 1543 td->td_flags |= TDF_CAN_UNBIND; 1544 mtx_unlock_spin(&sched_lock); 1545 } 1546 } 1547 } 1548 1549 /* 1550 * The extra work we go through if we are a threaded process when we 1551 * return to userland. 1552 * 1553 * If we are a KSE process and returning to user mode, check for 1554 * extra work to do before we return (e.g. for more syscalls 1555 * to complete first). If we were in a critical section, we should 1556 * just return to let it finish. Same if we were in the UTS (in 1557 * which case the mailbox's context's busy indicator will be set). 1558 * The only traps we suport will have set the mailbox. 1559 * We will clear it here. 1560 */ 1561 int 1562 thread_userret(struct thread *td, struct trapframe *frame) 1563 { 1564 int error = 0, upcalls, uts_crit; 1565 struct kse_upcall *ku; 1566 struct ksegrp *kg, *kg2; 1567 struct proc *p; 1568 struct timespec ts; 1569 1570 p = td->td_proc; 1571 kg = td->td_ksegrp; 1572 1573 /* Nothing to do with non-threaded group/process */ 1574 if (td->td_ksegrp->kg_numupcalls == 0) 1575 return (0); 1576 1577 /* 1578 * Stat clock interrupt hit in userland, it 1579 * is returning from interrupt, charge thread's 1580 * userland time for UTS. 1581 */ 1582 if (td->td_flags & TDF_USTATCLOCK) { 1583 thread_update_usr_ticks(td, 1); 1584 mtx_lock_spin(&sched_lock); 1585 td->td_flags &= ~TDF_USTATCLOCK; 1586 mtx_unlock_spin(&sched_lock); 1587 if (kg->kg_completed || 1588 (td->td_upcall->ku_flags & KUF_DOUPCALL)) 1589 thread_user_enter(p, td); 1590 } 1591 1592 uts_crit = (td->td_mailbox == NULL); 1593 ku = td->td_upcall; 1594 /* 1595 * Optimisation: 1596 * This thread has not started any upcall. 1597 * If there is no work to report other than ourself, 1598 * then it can return direct to userland. 1599 */ 1600 if (TD_CAN_UNBIND(td)) { 1601 mtx_lock_spin(&sched_lock); 1602 td->td_flags &= ~TDF_CAN_UNBIND; 1603 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 && 1604 (kg->kg_completed == NULL) && 1605 (ku->ku_flags & KUF_DOUPCALL) == 0 && 1606 (kg->kg_upquantum && ticks < kg->kg_nextupcall)) { 1607 mtx_unlock_spin(&sched_lock); 1608 thread_update_usr_ticks(td, 0); 1609 nanotime(&ts); 1610 error = copyout(&ts, 1611 (caddr_t)&ku->ku_mailbox->km_timeofday, 1612 sizeof(ts)); 1613 td->td_mailbox = 0; 1614 ku->ku_mflags = 0; 1615 if (error) 1616 goto out; 1617 return (0); 1618 } 1619 mtx_unlock_spin(&sched_lock); 1620 error = thread_export_context(td); 1621 if (error) { 1622 /* 1623 * Failing to do the KSE operation just defaults 1624 * back to synchonous operation, so just return from 1625 * the syscall. 1626 */ 1627 goto out; 1628 } 1629 /* 1630 * There is something to report, and we own an upcall 1631 * strucuture, we can go to userland. 1632 * Turn ourself into an upcall thread. 1633 */ 1634 mtx_lock_spin(&sched_lock); 1635 td->td_flags |= TDF_UPCALLING; 1636 mtx_unlock_spin(&sched_lock); 1637 } else if (td->td_mailbox && (ku == NULL)) { 1638 error = thread_export_context(td); 1639 /* possibly upcall with error? */ 1640 PROC_LOCK(p); 1641 /* 1642 * There are upcall threads waiting for 1643 * work to do, wake one of them up. 1644 * XXXKSE Maybe wake all of them up. 1645 */ 1646 if (!error && kg->kg_upsleeps) 1647 wakeup_one(&kg->kg_completed); 1648 mtx_lock_spin(&sched_lock); 1649 thread_stopped(p); 1650 thread_exit(); 1651 /* NOTREACHED */ 1652 } 1653 1654 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); 1655 1656 if (p->p_numthreads > max_threads_per_proc) { 1657 max_threads_hits++; 1658 PROC_LOCK(p); 1659 mtx_lock_spin(&sched_lock); 1660 while (p->p_numthreads > max_threads_per_proc) { 1661 if (P_SHOULDSTOP(p)) 1662 break; 1663 upcalls = 0; 1664 FOREACH_KSEGRP_IN_PROC(p, kg2) { 1665 if (kg2->kg_numupcalls == 0) 1666 upcalls++; 1667 else 1668 upcalls += kg2->kg_numupcalls; 1669 } 1670 if (upcalls >= max_threads_per_proc) 1671 break; 1672 mtx_unlock_spin(&sched_lock); 1673 p->p_maxthrwaits++; 1674 msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, 1675 "maxthreads", NULL); 1676 p->p_maxthrwaits--; 1677 mtx_lock_spin(&sched_lock); 1678 } 1679 mtx_unlock_spin(&sched_lock); 1680 PROC_UNLOCK(p); 1681 } 1682 1683 if (td->td_flags & TDF_UPCALLING) { 1684 uts_crit = 0; 1685 kg->kg_nextupcall = ticks+kg->kg_upquantum; 1686 /* 1687 * There is no more work to do and we are going to ride 1688 * this thread up to userland as an upcall. 1689 * Do the last parts of the setup needed for the upcall. 1690 */ 1691 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 1692 td, td->td_proc->p_pid, td->td_proc->p_comm); 1693 1694 mtx_lock_spin(&sched_lock); 1695 td->td_flags &= ~TDF_UPCALLING; 1696 if (ku->ku_flags & KUF_DOUPCALL) 1697 ku->ku_flags &= ~KUF_DOUPCALL; 1698 mtx_unlock_spin(&sched_lock); 1699 1700 /* 1701 * Set user context to the UTS 1702 */ 1703 if (!(ku->ku_mflags & KMF_NOUPCALL)) { 1704 cpu_set_upcall_kse(td, ku); 1705 error = suword(&ku->ku_mailbox->km_curthread, 0); 1706 if (error) 1707 goto out; 1708 } 1709 1710 /* 1711 * Unhook the list of completed threads. 1712 * anything that completes after this gets to 1713 * come in next time. 1714 * Put the list of completed thread mailboxes on 1715 * this KSE's mailbox. 1716 */ 1717 if (!(ku->ku_mflags & KMF_NOCOMPLETED) && 1718 (error = thread_link_mboxes(kg, ku)) != 0) 1719 goto out; 1720 } 1721 if (!uts_crit) { 1722 nanotime(&ts); 1723 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts)); 1724 } 1725 1726 out: 1727 if (error) { 1728 /* 1729 * Things are going to be so screwed we should just kill 1730 * the process. 1731 * how do we do that? 1732 */ 1733 PROC_LOCK(td->td_proc); 1734 psignal(td->td_proc, SIGSEGV); 1735 PROC_UNLOCK(td->td_proc); 1736 } else { 1737 /* 1738 * Optimisation: 1739 * Ensure that we have a spare thread available, 1740 * for when we re-enter the kernel. 1741 */ 1742 if (td->td_standin == NULL) 1743 thread_alloc_spare(td, NULL); 1744 } 1745 1746 ku->ku_mflags = 0; 1747 /* 1748 * Clear thread mailbox first, then clear system tick count. 1749 * The order is important because thread_statclock() use 1750 * mailbox pointer to see if it is an userland thread or 1751 * an UTS kernel thread. 1752 */ 1753 td->td_mailbox = NULL; 1754 td->td_usticks = 0; 1755 return (error); /* go sync */ 1756 } 1757 1758 /* 1759 * Enforce single-threading. 1760 * 1761 * Returns 1 if the caller must abort (another thread is waiting to 1762 * exit the process or similar). Process is locked! 1763 * Returns 0 when you are successfully the only thread running. 1764 * A process has successfully single threaded in the suspend mode when 1765 * There are no threads in user mode. Threads in the kernel must be 1766 * allowed to continue until they get to the user boundary. They may even 1767 * copy out their return values and data before suspending. They may however be 1768 * accellerated in reaching the user boundary as we will wake up 1769 * any sleeping threads that are interruptable. (PCATCH). 1770 */ 1771 int 1772 thread_single(int force_exit) 1773 { 1774 struct thread *td; 1775 struct thread *td2; 1776 struct proc *p; 1777 1778 td = curthread; 1779 p = td->td_proc; 1780 mtx_assert(&Giant, MA_OWNED); 1781 PROC_LOCK_ASSERT(p, MA_OWNED); 1782 KASSERT((td != NULL), ("curthread is NULL")); 1783 1784 if ((p->p_flag & P_THREADED) == 0 && p->p_numthreads == 1) 1785 return (0); 1786 1787 /* Is someone already single threading? */ 1788 if (p->p_singlethread) 1789 return (1); 1790 1791 if (force_exit == SINGLE_EXIT) { 1792 p->p_flag |= P_SINGLE_EXIT; 1793 } else 1794 p->p_flag &= ~P_SINGLE_EXIT; 1795 p->p_flag |= P_STOPPED_SINGLE; 1796 mtx_lock_spin(&sched_lock); 1797 p->p_singlethread = td; 1798 while ((p->p_numthreads - p->p_suspcount) != 1) { 1799 FOREACH_THREAD_IN_PROC(p, td2) { 1800 if (td2 == td) 1801 continue; 1802 td2->td_flags |= TDF_ASTPENDING; 1803 if (TD_IS_INHIBITED(td2)) { 1804 if (force_exit == SINGLE_EXIT) { 1805 if (TD_IS_SUSPENDED(td2)) { 1806 thread_unsuspend_one(td2); 1807 } 1808 if (TD_ON_SLEEPQ(td2) && 1809 (td2->td_flags & TDF_SINTR)) { 1810 if (td2->td_flags & TDF_CVWAITQ) 1811 cv_abort(td2); 1812 else 1813 abortsleep(td2); 1814 } 1815 } else { 1816 if (TD_IS_SUSPENDED(td2)) 1817 continue; 1818 /* 1819 * maybe other inhibitted states too? 1820 * XXXKSE Is it totally safe to 1821 * suspend a non-interruptable thread? 1822 */ 1823 if (td2->td_inhibitors & 1824 (TDI_SLEEPING | TDI_SWAPPED)) 1825 thread_suspend_one(td2); 1826 } 1827 } 1828 } 1829 /* 1830 * Maybe we suspended some threads.. was it enough? 1831 */ 1832 if ((p->p_numthreads - p->p_suspcount) == 1) 1833 break; 1834 1835 /* 1836 * Wake us up when everyone else has suspended. 1837 * In the mean time we suspend as well. 1838 */ 1839 thread_suspend_one(td); 1840 DROP_GIANT(); 1841 PROC_UNLOCK(p); 1842 p->p_stats->p_ru.ru_nvcsw++; 1843 mi_switch(); 1844 mtx_unlock_spin(&sched_lock); 1845 PICKUP_GIANT(); 1846 PROC_LOCK(p); 1847 mtx_lock_spin(&sched_lock); 1848 } 1849 if (force_exit == SINGLE_EXIT) { 1850 if (td->td_upcall) 1851 upcall_remove(td); 1852 kse_purge(p, td); 1853 } 1854 mtx_unlock_spin(&sched_lock); 1855 return (0); 1856 } 1857 1858 /* 1859 * Called in from locations that can safely check to see 1860 * whether we have to suspend or at least throttle for a 1861 * single-thread event (e.g. fork). 1862 * 1863 * Such locations include userret(). 1864 * If the "return_instead" argument is non zero, the thread must be able to 1865 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1866 * 1867 * The 'return_instead' argument tells the function if it may do a 1868 * thread_exit() or suspend, or whether the caller must abort and back 1869 * out instead. 1870 * 1871 * If the thread that set the single_threading request has set the 1872 * P_SINGLE_EXIT bit in the process flags then this call will never return 1873 * if 'return_instead' is false, but will exit. 1874 * 1875 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1876 *---------------+--------------------+--------------------- 1877 * 0 | returns 0 | returns 0 or 1 1878 * | when ST ends | immediatly 1879 *---------------+--------------------+--------------------- 1880 * 1 | thread exits | returns 1 1881 * | | immediatly 1882 * 0 = thread_exit() or suspension ok, 1883 * other = return error instead of stopping the thread. 1884 * 1885 * While a full suspension is under effect, even a single threading 1886 * thread would be suspended if it made this call (but it shouldn't). 1887 * This call should only be made from places where 1888 * thread_exit() would be safe as that may be the outcome unless 1889 * return_instead is set. 1890 */ 1891 int 1892 thread_suspend_check(int return_instead) 1893 { 1894 struct thread *td; 1895 struct proc *p; 1896 1897 td = curthread; 1898 p = td->td_proc; 1899 PROC_LOCK_ASSERT(p, MA_OWNED); 1900 while (P_SHOULDSTOP(p)) { 1901 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1902 KASSERT(p->p_singlethread != NULL, 1903 ("singlethread not set")); 1904 /* 1905 * The only suspension in action is a 1906 * single-threading. Single threader need not stop. 1907 * XXX Should be safe to access unlocked 1908 * as it can only be set to be true by us. 1909 */ 1910 if (p->p_singlethread == td) 1911 return (0); /* Exempt from stopping. */ 1912 } 1913 if (return_instead) 1914 return (1); 1915 1916 mtx_lock_spin(&sched_lock); 1917 thread_stopped(p); 1918 /* 1919 * If the process is waiting for us to exit, 1920 * this thread should just suicide. 1921 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1922 */ 1923 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1924 while (mtx_owned(&Giant)) 1925 mtx_unlock(&Giant); 1926 if (p->p_flag & P_THREADED) 1927 thread_exit(); 1928 else 1929 thr_exit1(); 1930 } 1931 1932 /* 1933 * When a thread suspends, it just 1934 * moves to the processes's suspend queue 1935 * and stays there. 1936 */ 1937 thread_suspend_one(td); 1938 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1939 if (p->p_numthreads == p->p_suspcount) { 1940 thread_unsuspend_one(p->p_singlethread); 1941 } 1942 } 1943 DROP_GIANT(); 1944 PROC_UNLOCK(p); 1945 p->p_stats->p_ru.ru_nivcsw++; 1946 mi_switch(); 1947 mtx_unlock_spin(&sched_lock); 1948 PICKUP_GIANT(); 1949 PROC_LOCK(p); 1950 } 1951 return (0); 1952 } 1953 1954 void 1955 thread_suspend_one(struct thread *td) 1956 { 1957 struct proc *p = td->td_proc; 1958 1959 mtx_assert(&sched_lock, MA_OWNED); 1960 PROC_LOCK_ASSERT(p, MA_OWNED); 1961 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1962 p->p_suspcount++; 1963 TD_SET_SUSPENDED(td); 1964 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 1965 /* 1966 * Hack: If we are suspending but are on the sleep queue 1967 * then we are in msleep or the cv equivalent. We 1968 * want to look like we have two Inhibitors. 1969 * May already be set.. doesn't matter. 1970 */ 1971 if (TD_ON_SLEEPQ(td)) 1972 TD_SET_SLEEPING(td); 1973 } 1974 1975 void 1976 thread_unsuspend_one(struct thread *td) 1977 { 1978 struct proc *p = td->td_proc; 1979 1980 mtx_assert(&sched_lock, MA_OWNED); 1981 PROC_LOCK_ASSERT(p, MA_OWNED); 1982 TAILQ_REMOVE(&p->p_suspended, td, td_runq); 1983 TD_CLR_SUSPENDED(td); 1984 p->p_suspcount--; 1985 setrunnable(td); 1986 } 1987 1988 /* 1989 * Allow all threads blocked by single threading to continue running. 1990 */ 1991 void 1992 thread_unsuspend(struct proc *p) 1993 { 1994 struct thread *td; 1995 1996 mtx_assert(&sched_lock, MA_OWNED); 1997 PROC_LOCK_ASSERT(p, MA_OWNED); 1998 if (!P_SHOULDSTOP(p)) { 1999 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2000 thread_unsuspend_one(td); 2001 } 2002 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 2003 (p->p_numthreads == p->p_suspcount)) { 2004 /* 2005 * Stopping everything also did the job for the single 2006 * threading request. Now we've downgraded to single-threaded, 2007 * let it continue. 2008 */ 2009 thread_unsuspend_one(p->p_singlethread); 2010 } 2011 } 2012 2013 void 2014 thread_single_end(void) 2015 { 2016 struct thread *td; 2017 struct proc *p; 2018 2019 td = curthread; 2020 p = td->td_proc; 2021 PROC_LOCK_ASSERT(p, MA_OWNED); 2022 p->p_flag &= ~P_STOPPED_SINGLE; 2023 mtx_lock_spin(&sched_lock); 2024 p->p_singlethread = NULL; 2025 /* 2026 * If there are other threads they mey now run, 2027 * unless of course there is a blanket 'stop order' 2028 * on the process. The single threader must be allowed 2029 * to continue however as this is a bad place to stop. 2030 */ 2031 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 2032 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2033 thread_unsuspend_one(td); 2034 } 2035 } 2036 mtx_unlock_spin(&sched_lock); 2037 } 2038 2039 2040