1 /* 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/mutex.h> 38 #include <sys/proc.h> 39 #include <sys/smp.h> 40 #include <sys/sysctl.h> 41 #include <sys/sysproto.h> 42 #include <sys/filedesc.h> 43 #include <sys/sched.h> 44 #include <sys/signalvar.h> 45 #include <sys/sx.h> 46 #include <sys/tty.h> 47 #include <sys/user.h> 48 #include <sys/jail.h> 49 #include <sys/kse.h> 50 #include <sys/ktr.h> 51 #include <sys/ucontext.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_extern.h> 55 #include <vm/vm_object.h> 56 #include <vm/pmap.h> 57 #include <vm/uma.h> 58 #include <vm/vm_map.h> 59 60 #include <machine/frame.h> 61 62 /* 63 * KSEGRP related storage. 64 */ 65 static uma_zone_t ksegrp_zone; 66 static uma_zone_t kse_zone; 67 static uma_zone_t thread_zone; 68 static uma_zone_t upcall_zone; 69 70 /* DEBUG ONLY */ 71 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 72 static int thread_debug = 0; 73 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 74 &thread_debug, 0, "thread debug"); 75 76 static int max_threads_per_proc = 150; 77 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 78 &max_threads_per_proc, 0, "Limit on threads per proc"); 79 80 static int max_groups_per_proc = 50; 81 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 82 &max_groups_per_proc, 0, "Limit on thread groups per proc"); 83 84 static int max_threads_hits; 85 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 86 &max_threads_hits, 0, ""); 87 88 static int virtual_cpu; 89 90 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 91 92 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 93 TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); 94 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 95 TAILQ_HEAD(, kse_upcall) zombie_upcalls = 96 TAILQ_HEAD_INITIALIZER(zombie_upcalls); 97 struct mtx kse_zombie_lock; 98 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 99 100 static void kse_purge(struct proc *p, struct thread *td); 101 static void kse_purge_group(struct thread *td); 102 static int thread_update_usr_ticks(struct thread *td, int user); 103 static void thread_alloc_spare(struct thread *td, struct thread *spare); 104 105 static int 106 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 107 { 108 int error, new_val; 109 int def_val; 110 111 #ifdef SMP 112 def_val = mp_ncpus; 113 #else 114 def_val = 1; 115 #endif 116 if (virtual_cpu == 0) 117 new_val = def_val; 118 else 119 new_val = virtual_cpu; 120 error = sysctl_handle_int(oidp, &new_val, 0, req); 121 if (error != 0 || req->newptr == NULL) 122 return (error); 123 if (new_val < 0) 124 return (EINVAL); 125 virtual_cpu = new_val; 126 return (0); 127 } 128 129 /* DEBUG ONLY */ 130 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 131 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 132 "debug virtual cpus"); 133 134 /* 135 * Prepare a thread for use. 136 */ 137 static void 138 thread_ctor(void *mem, int size, void *arg) 139 { 140 struct thread *td; 141 142 td = (struct thread *)mem; 143 td->td_state = TDS_INACTIVE; 144 td->td_oncpu = NOCPU; 145 } 146 147 /* 148 * Reclaim a thread after use. 149 */ 150 static void 151 thread_dtor(void *mem, int size, void *arg) 152 { 153 struct thread *td; 154 155 td = (struct thread *)mem; 156 157 #ifdef INVARIANTS 158 /* Verify that this thread is in a safe state to free. */ 159 switch (td->td_state) { 160 case TDS_INHIBITED: 161 case TDS_RUNNING: 162 case TDS_CAN_RUN: 163 case TDS_RUNQ: 164 /* 165 * We must never unlink a thread that is in one of 166 * these states, because it is currently active. 167 */ 168 panic("bad state for thread unlinking"); 169 /* NOTREACHED */ 170 case TDS_INACTIVE: 171 break; 172 default: 173 panic("bad thread state"); 174 /* NOTREACHED */ 175 } 176 #endif 177 } 178 179 /* 180 * Initialize type-stable parts of a thread (when newly created). 181 */ 182 static void 183 thread_init(void *mem, int size) 184 { 185 struct thread *td; 186 187 td = (struct thread *)mem; 188 mtx_lock(&Giant); 189 vm_thread_new(td, 0); 190 mtx_unlock(&Giant); 191 cpu_thread_setup(td); 192 td->td_sched = (struct td_sched *)&td[1]; 193 } 194 195 /* 196 * Tear down type-stable parts of a thread (just before being discarded). 197 */ 198 static void 199 thread_fini(void *mem, int size) 200 { 201 struct thread *td; 202 203 td = (struct thread *)mem; 204 vm_thread_dispose(td); 205 } 206 207 /* 208 * Initialize type-stable parts of a kse (when newly created). 209 */ 210 static void 211 kse_init(void *mem, int size) 212 { 213 struct kse *ke; 214 215 ke = (struct kse *)mem; 216 ke->ke_sched = (struct ke_sched *)&ke[1]; 217 } 218 219 /* 220 * Initialize type-stable parts of a ksegrp (when newly created). 221 */ 222 static void 223 ksegrp_init(void *mem, int size) 224 { 225 struct ksegrp *kg; 226 227 kg = (struct ksegrp *)mem; 228 kg->kg_sched = (struct kg_sched *)&kg[1]; 229 } 230 231 /* 232 * KSE is linked into kse group. 233 */ 234 void 235 kse_link(struct kse *ke, struct ksegrp *kg) 236 { 237 struct proc *p = kg->kg_proc; 238 239 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); 240 kg->kg_kses++; 241 ke->ke_state = KES_UNQUEUED; 242 ke->ke_proc = p; 243 ke->ke_ksegrp = kg; 244 ke->ke_thread = NULL; 245 ke->ke_oncpu = NOCPU; 246 ke->ke_flags = 0; 247 } 248 249 void 250 kse_unlink(struct kse *ke) 251 { 252 struct ksegrp *kg; 253 254 mtx_assert(&sched_lock, MA_OWNED); 255 kg = ke->ke_ksegrp; 256 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 257 if (ke->ke_state == KES_IDLE) { 258 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 259 kg->kg_idle_kses--; 260 } 261 if (--kg->kg_kses == 0) 262 ksegrp_unlink(kg); 263 /* 264 * Aggregate stats from the KSE 265 */ 266 kse_stash(ke); 267 } 268 269 void 270 ksegrp_link(struct ksegrp *kg, struct proc *p) 271 { 272 273 TAILQ_INIT(&kg->kg_threads); 274 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 275 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ 276 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ 277 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ 278 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 279 kg->kg_proc = p; 280 /* 281 * the following counters are in the -zero- section 282 * and may not need clearing 283 */ 284 kg->kg_numthreads = 0; 285 kg->kg_runnable = 0; 286 kg->kg_kses = 0; 287 kg->kg_runq_kses = 0; /* XXXKSE change name */ 288 kg->kg_idle_kses = 0; 289 kg->kg_numupcalls = 0; 290 /* link it in now that it's consistent */ 291 p->p_numksegrps++; 292 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 293 } 294 295 void 296 ksegrp_unlink(struct ksegrp *kg) 297 { 298 struct proc *p; 299 300 mtx_assert(&sched_lock, MA_OWNED); 301 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 302 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); 303 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 304 305 p = kg->kg_proc; 306 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 307 p->p_numksegrps--; 308 /* 309 * Aggregate stats from the KSE 310 */ 311 ksegrp_stash(kg); 312 } 313 314 struct kse_upcall * 315 upcall_alloc(void) 316 { 317 struct kse_upcall *ku; 318 319 ku = uma_zalloc(upcall_zone, M_WAITOK); 320 bzero(ku, sizeof(*ku)); 321 return (ku); 322 } 323 324 void 325 upcall_free(struct kse_upcall *ku) 326 { 327 328 uma_zfree(upcall_zone, ku); 329 } 330 331 void 332 upcall_link(struct kse_upcall *ku, struct ksegrp *kg) 333 { 334 335 mtx_assert(&sched_lock, MA_OWNED); 336 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); 337 ku->ku_ksegrp = kg; 338 kg->kg_numupcalls++; 339 } 340 341 void 342 upcall_unlink(struct kse_upcall *ku) 343 { 344 struct ksegrp *kg = ku->ku_ksegrp; 345 346 mtx_assert(&sched_lock, MA_OWNED); 347 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); 348 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); 349 kg->kg_numupcalls--; 350 upcall_stash(ku); 351 } 352 353 void 354 upcall_remove(struct thread *td) 355 { 356 357 if (td->td_upcall) { 358 td->td_upcall->ku_owner = NULL; 359 upcall_unlink(td->td_upcall); 360 td->td_upcall = 0; 361 } 362 } 363 364 /* 365 * For a newly created process, 366 * link up all the structures and its initial threads etc. 367 */ 368 void 369 proc_linkup(struct proc *p, struct ksegrp *kg, 370 struct kse *ke, struct thread *td) 371 { 372 373 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 374 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 375 TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 376 p->p_numksegrps = 0; 377 p->p_numthreads = 0; 378 379 ksegrp_link(kg, p); 380 kse_link(ke, kg); 381 thread_link(td, kg); 382 } 383 384 /* 385 struct kse_thr_interrupt_args { 386 struct kse_thr_mailbox * tmbx; 387 }; 388 */ 389 int 390 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) 391 { 392 struct proc *p; 393 struct thread *td2; 394 395 p = td->td_proc; 396 if (!(p->p_flag & P_SA) || (uap->tmbx == NULL)) 397 return (EINVAL); 398 mtx_lock_spin(&sched_lock); 399 FOREACH_THREAD_IN_PROC(p, td2) { 400 if (td2->td_mailbox == uap->tmbx) { 401 td2->td_flags |= TDF_INTERRUPT; 402 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { 403 if (td2->td_flags & TDF_CVWAITQ) 404 cv_abort(td2); 405 else 406 abortsleep(td2); 407 } 408 mtx_unlock_spin(&sched_lock); 409 return (0); 410 } 411 } 412 mtx_unlock_spin(&sched_lock); 413 return (ESRCH); 414 } 415 416 /* 417 struct kse_exit_args { 418 register_t dummy; 419 }; 420 */ 421 int 422 kse_exit(struct thread *td, struct kse_exit_args *uap) 423 { 424 struct proc *p; 425 struct ksegrp *kg; 426 struct kse *ke; 427 struct kse_upcall *ku, *ku2; 428 int error, count; 429 430 p = td->td_proc; 431 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) 432 return (EINVAL); 433 kg = td->td_ksegrp; 434 count = 0; 435 PROC_LOCK(p); 436 mtx_lock_spin(&sched_lock); 437 FOREACH_UPCALL_IN_GROUP(kg, ku2) { 438 if (ku2->ku_flags & KUF_EXITING) 439 count++; 440 } 441 if ((kg->kg_numupcalls - count) == 1 && 442 (kg->kg_numthreads > 1)) { 443 mtx_unlock_spin(&sched_lock); 444 PROC_UNLOCK(p); 445 return (EDEADLK); 446 } 447 ku->ku_flags |= KUF_EXITING; 448 mtx_unlock_spin(&sched_lock); 449 PROC_UNLOCK(p); 450 error = suword(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE); 451 PROC_LOCK(p); 452 if (error) 453 psignal(p, SIGSEGV); 454 mtx_lock_spin(&sched_lock); 455 upcall_remove(td); 456 ke = td->td_kse; 457 if (p->p_numthreads == 1) { 458 kse_purge(p, td); 459 p->p_flag &= ~P_SA; 460 mtx_unlock_spin(&sched_lock); 461 PROC_UNLOCK(p); 462 } else { 463 if (kg->kg_numthreads == 1) { /* Shutdown a group */ 464 kse_purge_group(td); 465 ke->ke_flags |= KEF_EXIT; 466 } 467 thread_stopped(p); 468 thread_exit(); 469 /* NOTREACHED */ 470 } 471 return (0); 472 } 473 474 /* 475 * Either becomes an upcall or waits for an awakening event and 476 * then becomes an upcall. Only error cases return. 477 */ 478 /* 479 struct kse_release_args { 480 struct timespec *timeout; 481 }; 482 */ 483 int 484 kse_release(struct thread *td, struct kse_release_args *uap) 485 { 486 struct proc *p; 487 struct ksegrp *kg; 488 struct kse_upcall *ku; 489 struct timespec timeout; 490 struct timeval tv; 491 int error; 492 493 p = td->td_proc; 494 kg = td->td_ksegrp; 495 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) 496 return (EINVAL); 497 if (uap->timeout != NULL) { 498 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) 499 return (error); 500 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 501 } 502 if (td->td_flags & TDF_SA) 503 td->td_pflags |= TDP_UPCALLING; 504 PROC_LOCK(p); 505 if ((ku->ku_flags & KUF_DOUPCALL) == 0 && (kg->kg_completed == NULL)) { 506 kg->kg_upsleeps++; 507 error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, 508 "kserel", (uap->timeout ? tvtohz(&tv) : 0)); 509 kg->kg_upsleeps--; 510 } 511 if (ku->ku_flags & KUF_DOUPCALL) { 512 mtx_lock_spin(&sched_lock); 513 ku->ku_flags &= ~KUF_DOUPCALL; 514 mtx_unlock_spin(&sched_lock); 515 } 516 PROC_UNLOCK(p); 517 return (0); 518 } 519 520 /* struct kse_wakeup_args { 521 struct kse_mailbox *mbx; 522 }; */ 523 int 524 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) 525 { 526 struct proc *p; 527 struct ksegrp *kg; 528 struct kse_upcall *ku; 529 struct thread *td2; 530 531 p = td->td_proc; 532 td2 = NULL; 533 ku = NULL; 534 /* KSE-enabled processes only, please. */ 535 if (!(p->p_flag & P_SA)) 536 return (EINVAL); 537 PROC_LOCK(p); 538 mtx_lock_spin(&sched_lock); 539 if (uap->mbx) { 540 FOREACH_KSEGRP_IN_PROC(p, kg) { 541 FOREACH_UPCALL_IN_GROUP(kg, ku) { 542 if (ku->ku_mailbox == uap->mbx) 543 break; 544 } 545 if (ku) 546 break; 547 } 548 } else { 549 kg = td->td_ksegrp; 550 if (kg->kg_upsleeps) { 551 wakeup_one(&kg->kg_completed); 552 mtx_unlock_spin(&sched_lock); 553 PROC_UNLOCK(p); 554 return (0); 555 } 556 ku = TAILQ_FIRST(&kg->kg_upcalls); 557 } 558 if (ku) { 559 if ((td2 = ku->ku_owner) == NULL) { 560 panic("%s: no owner", __func__); 561 } else if (TD_ON_SLEEPQ(td2) && 562 (td2->td_wchan == &kg->kg_completed)) { 563 abortsleep(td2); 564 } else { 565 ku->ku_flags |= KUF_DOUPCALL; 566 } 567 mtx_unlock_spin(&sched_lock); 568 PROC_UNLOCK(p); 569 return (0); 570 } 571 mtx_unlock_spin(&sched_lock); 572 PROC_UNLOCK(p); 573 return (ESRCH); 574 } 575 576 /* 577 * No new KSEG: first call: use current KSE, don't schedule an upcall 578 * All other situations, do allocate max new KSEs and schedule an upcall. 579 */ 580 /* struct kse_create_args { 581 struct kse_mailbox *mbx; 582 int newgroup; 583 }; */ 584 int 585 kse_create(struct thread *td, struct kse_create_args *uap) 586 { 587 struct kse *newke; 588 struct ksegrp *newkg; 589 struct ksegrp *kg; 590 struct proc *p; 591 struct kse_mailbox mbx; 592 struct kse_upcall *newku; 593 int err, ncpus, sa = 0, first = 0; 594 struct thread *newtd; 595 596 p = td->td_proc; 597 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) 598 return (err); 599 600 /* Too bad, why hasn't kernel always a cpu counter !? */ 601 #ifdef SMP 602 ncpus = mp_ncpus; 603 #else 604 ncpus = 1; 605 #endif 606 if (virtual_cpu != 0) 607 ncpus = virtual_cpu; 608 if (!(mbx.km_flags & KMF_BOUND)) 609 sa = TDF_SA; 610 else 611 ncpus = 1; 612 PROC_LOCK(p); 613 if (!(p->p_flag & P_SA)) { 614 first = 1; 615 p->p_flag |= P_SA; 616 } 617 PROC_UNLOCK(p); 618 if (!sa && !uap->newgroup && !first) 619 return (EINVAL); 620 kg = td->td_ksegrp; 621 if (uap->newgroup) { 622 /* Have race condition but it is cheap */ 623 if (p->p_numksegrps >= max_groups_per_proc) 624 return (EPROCLIM); 625 /* 626 * If we want a new KSEGRP it doesn't matter whether 627 * we have already fired up KSE mode before or not. 628 * We put the process in KSE mode and create a new KSEGRP. 629 */ 630 newkg = ksegrp_alloc(); 631 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, 632 kg_startzero, kg_endzero)); 633 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 634 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 635 mtx_lock_spin(&sched_lock); 636 if (p->p_numksegrps >= max_groups_per_proc) { 637 mtx_unlock_spin(&sched_lock); 638 ksegrp_free(newkg); 639 return (EPROCLIM); 640 } 641 ksegrp_link(newkg, p); 642 mtx_unlock_spin(&sched_lock); 643 } else { 644 if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0) 645 return (EINVAL); 646 newkg = kg; 647 } 648 649 /* 650 * Creating upcalls more than number of physical cpu does 651 * not help performance. 652 */ 653 if (newkg->kg_numupcalls >= ncpus) 654 return (EPROCLIM); 655 656 if (newkg->kg_numupcalls == 0) { 657 /* 658 * Initialize KSE group 659 * 660 * For multiplxed group, create KSEs as many as physical 661 * cpus. This increases concurrent even if userland 662 * is not MP safe and can only run on single CPU. 663 * In ideal world, every physical cpu should execute a thread. 664 * If there is enough KSEs, threads in kernel can be 665 * executed parallel on different cpus with full speed, 666 * Concurrent in kernel shouldn't be restricted by number of 667 * upcalls userland provides. Adding more upcall structures 668 * only increases concurrent in userland. 669 * 670 * For bound thread group, because there is only thread in the 671 * group, we only create one KSE for the group. Thread in this 672 * kind of group will never schedule an upcall when blocked, 673 * this intends to simulate pthread system scope thread. 674 */ 675 while (newkg->kg_kses < ncpus) { 676 newke = kse_alloc(); 677 bzero(&newke->ke_startzero, RANGEOF(struct kse, 678 ke_startzero, ke_endzero)); 679 #if 0 680 mtx_lock_spin(&sched_lock); 681 bcopy(&ke->ke_startcopy, &newke->ke_startcopy, 682 RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 683 mtx_unlock_spin(&sched_lock); 684 #endif 685 mtx_lock_spin(&sched_lock); 686 kse_link(newke, newkg); 687 /* Add engine */ 688 kse_reassign(newke); 689 mtx_unlock_spin(&sched_lock); 690 } 691 } 692 newku = upcall_alloc(); 693 newku->ku_mailbox = uap->mbx; 694 newku->ku_func = mbx.km_func; 695 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); 696 697 /* For the first call this may not have been set */ 698 if (td->td_standin == NULL) 699 thread_alloc_spare(td, NULL); 700 701 mtx_lock_spin(&sched_lock); 702 if (newkg->kg_numupcalls >= ncpus) { 703 mtx_unlock_spin(&sched_lock); 704 upcall_free(newku); 705 return (EPROCLIM); 706 } 707 upcall_link(newku, newkg); 708 if (mbx.km_quantum) 709 newkg->kg_upquantum = max(1, mbx.km_quantum/tick); 710 711 /* 712 * Each upcall structure has an owner thread, find which 713 * one owns it. 714 */ 715 if (uap->newgroup) { 716 /* 717 * Because new ksegrp hasn't thread, 718 * create an initial upcall thread to own it. 719 */ 720 newtd = thread_schedule_upcall(td, newku); 721 } else { 722 /* 723 * If current thread hasn't an upcall structure, 724 * just assign the upcall to it. 725 */ 726 if (td->td_upcall == NULL) { 727 newku->ku_owner = td; 728 td->td_upcall = newku; 729 newtd = td; 730 } else { 731 /* 732 * Create a new upcall thread to own it. 733 */ 734 newtd = thread_schedule_upcall(td, newku); 735 } 736 } 737 if (!sa) { 738 newtd->td_mailbox = mbx.km_curthread; 739 newtd->td_flags &= ~TDF_SA; 740 if (newtd != td) { 741 mtx_unlock_spin(&sched_lock); 742 cpu_set_upcall_kse(newtd, newku); 743 mtx_lock_spin(&sched_lock); 744 } 745 } else { 746 newtd->td_flags |= TDF_SA; 747 } 748 if (newtd != td) 749 setrunqueue(newtd); 750 mtx_unlock_spin(&sched_lock); 751 return (0); 752 } 753 754 /* 755 * Initialize global thread allocation resources. 756 */ 757 void 758 threadinit(void) 759 { 760 761 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 762 thread_ctor, thread_dtor, thread_init, thread_fini, 763 UMA_ALIGN_CACHE, 0); 764 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 765 NULL, NULL, ksegrp_init, NULL, 766 UMA_ALIGN_CACHE, 0); 767 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), 768 NULL, NULL, kse_init, NULL, 769 UMA_ALIGN_CACHE, 0); 770 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), 771 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 772 } 773 774 /* 775 * Stash an embarasingly extra thread into the zombie thread queue. 776 */ 777 void 778 thread_stash(struct thread *td) 779 { 780 mtx_lock_spin(&kse_zombie_lock); 781 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 782 mtx_unlock_spin(&kse_zombie_lock); 783 } 784 785 /* 786 * Stash an embarasingly extra kse into the zombie kse queue. 787 */ 788 void 789 kse_stash(struct kse *ke) 790 { 791 mtx_lock_spin(&kse_zombie_lock); 792 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); 793 mtx_unlock_spin(&kse_zombie_lock); 794 } 795 796 /* 797 * Stash an embarasingly extra upcall into the zombie upcall queue. 798 */ 799 800 void 801 upcall_stash(struct kse_upcall *ku) 802 { 803 mtx_lock_spin(&kse_zombie_lock); 804 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); 805 mtx_unlock_spin(&kse_zombie_lock); 806 } 807 808 /* 809 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 810 */ 811 void 812 ksegrp_stash(struct ksegrp *kg) 813 { 814 mtx_lock_spin(&kse_zombie_lock); 815 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 816 mtx_unlock_spin(&kse_zombie_lock); 817 } 818 819 /* 820 * Reap zombie kse resource. 821 */ 822 void 823 thread_reap(void) 824 { 825 struct thread *td_first, *td_next; 826 struct kse *ke_first, *ke_next; 827 struct ksegrp *kg_first, * kg_next; 828 struct kse_upcall *ku_first, *ku_next; 829 830 /* 831 * Don't even bother to lock if none at this instant, 832 * we really don't care about the next instant.. 833 */ 834 if ((!TAILQ_EMPTY(&zombie_threads)) 835 || (!TAILQ_EMPTY(&zombie_kses)) 836 || (!TAILQ_EMPTY(&zombie_ksegrps)) 837 || (!TAILQ_EMPTY(&zombie_upcalls))) { 838 mtx_lock_spin(&kse_zombie_lock); 839 td_first = TAILQ_FIRST(&zombie_threads); 840 ke_first = TAILQ_FIRST(&zombie_kses); 841 kg_first = TAILQ_FIRST(&zombie_ksegrps); 842 ku_first = TAILQ_FIRST(&zombie_upcalls); 843 if (td_first) 844 TAILQ_INIT(&zombie_threads); 845 if (ke_first) 846 TAILQ_INIT(&zombie_kses); 847 if (kg_first) 848 TAILQ_INIT(&zombie_ksegrps); 849 if (ku_first) 850 TAILQ_INIT(&zombie_upcalls); 851 mtx_unlock_spin(&kse_zombie_lock); 852 while (td_first) { 853 td_next = TAILQ_NEXT(td_first, td_runq); 854 if (td_first->td_ucred) 855 crfree(td_first->td_ucred); 856 thread_free(td_first); 857 td_first = td_next; 858 } 859 while (ke_first) { 860 ke_next = TAILQ_NEXT(ke_first, ke_procq); 861 kse_free(ke_first); 862 ke_first = ke_next; 863 } 864 while (kg_first) { 865 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 866 ksegrp_free(kg_first); 867 kg_first = kg_next; 868 } 869 while (ku_first) { 870 ku_next = TAILQ_NEXT(ku_first, ku_link); 871 upcall_free(ku_first); 872 ku_first = ku_next; 873 } 874 } 875 } 876 877 /* 878 * Allocate a ksegrp. 879 */ 880 struct ksegrp * 881 ksegrp_alloc(void) 882 { 883 return (uma_zalloc(ksegrp_zone, M_WAITOK)); 884 } 885 886 /* 887 * Allocate a kse. 888 */ 889 struct kse * 890 kse_alloc(void) 891 { 892 return (uma_zalloc(kse_zone, M_WAITOK)); 893 } 894 895 /* 896 * Allocate a thread. 897 */ 898 struct thread * 899 thread_alloc(void) 900 { 901 thread_reap(); /* check if any zombies to get */ 902 return (uma_zalloc(thread_zone, M_WAITOK)); 903 } 904 905 /* 906 * Deallocate a ksegrp. 907 */ 908 void 909 ksegrp_free(struct ksegrp *td) 910 { 911 uma_zfree(ksegrp_zone, td); 912 } 913 914 /* 915 * Deallocate a kse. 916 */ 917 void 918 kse_free(struct kse *td) 919 { 920 uma_zfree(kse_zone, td); 921 } 922 923 /* 924 * Deallocate a thread. 925 */ 926 void 927 thread_free(struct thread *td) 928 { 929 930 cpu_thread_clean(td); 931 uma_zfree(thread_zone, td); 932 } 933 934 /* 935 * Store the thread context in the UTS's mailbox. 936 * then add the mailbox at the head of a list we are building in user space. 937 * The list is anchored in the ksegrp structure. 938 */ 939 int 940 thread_export_context(struct thread *td) 941 { 942 struct proc *p; 943 struct ksegrp *kg; 944 uintptr_t mbx; 945 void *addr; 946 int error = 0, temp; 947 mcontext_t mc; 948 949 p = td->td_proc; 950 kg = td->td_ksegrp; 951 952 /* Export the user/machine context. */ 953 get_mcontext(td, &mc, 0); 954 addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext); 955 error = copyout(&mc, addr, sizeof(mcontext_t)); 956 if (error) 957 goto bad; 958 959 /* Exports clock ticks in kernel mode */ 960 addr = (caddr_t)(&td->td_mailbox->tm_sticks); 961 temp = fuword(addr) + td->td_usticks; 962 if (suword(addr, temp)) { 963 error = EFAULT; 964 goto bad; 965 } 966 967 /* Get address in latest mbox of list pointer */ 968 addr = (void *)(&td->td_mailbox->tm_next); 969 /* 970 * Put the saved address of the previous first 971 * entry into this one 972 */ 973 for (;;) { 974 mbx = (uintptr_t)kg->kg_completed; 975 if (suword(addr, mbx)) { 976 error = EFAULT; 977 goto bad; 978 } 979 PROC_LOCK(p); 980 if (mbx == (uintptr_t)kg->kg_completed) { 981 kg->kg_completed = td->td_mailbox; 982 /* 983 * The thread context may be taken away by 984 * other upcall threads when we unlock 985 * process lock. it's no longer valid to 986 * use it again in any other places. 987 */ 988 td->td_mailbox = NULL; 989 PROC_UNLOCK(p); 990 break; 991 } 992 PROC_UNLOCK(p); 993 } 994 td->td_usticks = 0; 995 return (0); 996 997 bad: 998 PROC_LOCK(p); 999 psignal(p, SIGSEGV); 1000 PROC_UNLOCK(p); 1001 /* The mailbox is bad, don't use it */ 1002 td->td_mailbox = NULL; 1003 td->td_usticks = 0; 1004 return (error); 1005 } 1006 1007 /* 1008 * Take the list of completed mailboxes for this KSEGRP and put them on this 1009 * upcall's mailbox as it's the next one going up. 1010 */ 1011 static int 1012 thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) 1013 { 1014 struct proc *p = kg->kg_proc; 1015 void *addr; 1016 uintptr_t mbx; 1017 1018 addr = (void *)(&ku->ku_mailbox->km_completed); 1019 for (;;) { 1020 mbx = (uintptr_t)kg->kg_completed; 1021 if (suword(addr, mbx)) { 1022 PROC_LOCK(p); 1023 psignal(p, SIGSEGV); 1024 PROC_UNLOCK(p); 1025 return (EFAULT); 1026 } 1027 PROC_LOCK(p); 1028 if (mbx == (uintptr_t)kg->kg_completed) { 1029 kg->kg_completed = NULL; 1030 PROC_UNLOCK(p); 1031 break; 1032 } 1033 PROC_UNLOCK(p); 1034 } 1035 return (0); 1036 } 1037 1038 /* 1039 * This function should be called at statclock interrupt time 1040 */ 1041 int 1042 thread_statclock(int user) 1043 { 1044 struct thread *td = curthread; 1045 struct ksegrp *kg = td->td_ksegrp; 1046 1047 if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA)) 1048 return (0); 1049 if (user) { 1050 /* Current always do via ast() */ 1051 mtx_lock_spin(&sched_lock); 1052 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); 1053 mtx_unlock_spin(&sched_lock); 1054 td->td_uuticks++; 1055 } else { 1056 if (td->td_mailbox != NULL) 1057 td->td_usticks++; 1058 else { 1059 /* XXXKSE 1060 * We will call thread_user_enter() for every 1061 * kernel entry in future, so if the thread mailbox 1062 * is NULL, it must be a UTS kernel, don't account 1063 * clock ticks for it. 1064 */ 1065 } 1066 } 1067 return (0); 1068 } 1069 1070 /* 1071 * Export state clock ticks for userland 1072 */ 1073 static int 1074 thread_update_usr_ticks(struct thread *td, int user) 1075 { 1076 struct proc *p = td->td_proc; 1077 struct kse_thr_mailbox *tmbx; 1078 struct kse_upcall *ku; 1079 struct ksegrp *kg; 1080 caddr_t addr; 1081 uint uticks; 1082 1083 if ((ku = td->td_upcall) == NULL) 1084 return (-1); 1085 1086 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1087 if ((tmbx == NULL) || (tmbx == (void *)-1)) 1088 return (-1); 1089 if (user) { 1090 uticks = td->td_uuticks; 1091 td->td_uuticks = 0; 1092 addr = (caddr_t)&tmbx->tm_uticks; 1093 } else { 1094 uticks = td->td_usticks; 1095 td->td_usticks = 0; 1096 addr = (caddr_t)&tmbx->tm_sticks; 1097 } 1098 if (uticks) { 1099 if (suword(addr, uticks+fuword(addr))) { 1100 PROC_LOCK(p); 1101 psignal(p, SIGSEGV); 1102 PROC_UNLOCK(p); 1103 return (-2); 1104 } 1105 } 1106 kg = td->td_ksegrp; 1107 if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) { 1108 mtx_lock_spin(&sched_lock); 1109 td->td_upcall->ku_flags |= KUF_DOUPCALL; 1110 mtx_unlock_spin(&sched_lock); 1111 } 1112 return (0); 1113 } 1114 1115 /* 1116 * Discard the current thread and exit from its context. 1117 * 1118 * Because we can't free a thread while we're operating under its context, 1119 * push the current thread into our CPU's deadthread holder. This means 1120 * we needn't worry about someone else grabbing our context before we 1121 * do a cpu_throw(). 1122 */ 1123 void 1124 thread_exit(void) 1125 { 1126 struct thread *td; 1127 struct kse *ke; 1128 struct proc *p; 1129 struct ksegrp *kg; 1130 1131 td = curthread; 1132 kg = td->td_ksegrp; 1133 p = td->td_proc; 1134 ke = td->td_kse; 1135 1136 mtx_assert(&sched_lock, MA_OWNED); 1137 KASSERT(p != NULL, ("thread exiting without a process")); 1138 KASSERT(ke != NULL, ("thread exiting without a kse")); 1139 KASSERT(kg != NULL, ("thread exiting without a kse group")); 1140 PROC_LOCK_ASSERT(p, MA_OWNED); 1141 CTR1(KTR_PROC, "thread_exit: thread %p", td); 1142 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 1143 1144 if (td->td_standin != NULL) { 1145 thread_stash(td->td_standin); 1146 td->td_standin = NULL; 1147 } 1148 1149 cpu_thread_exit(td); /* XXXSMP */ 1150 1151 /* 1152 * The last thread is left attached to the process 1153 * So that the whole bundle gets recycled. Skip 1154 * all this stuff. 1155 */ 1156 if (p->p_numthreads > 1) { 1157 thread_unlink(td); 1158 if (p->p_maxthrwaits) 1159 wakeup(&p->p_numthreads); 1160 /* 1161 * The test below is NOT true if we are the 1162 * sole exiting thread. P_STOPPED_SNGL is unset 1163 * in exit1() after it is the only survivor. 1164 */ 1165 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1166 if (p->p_numthreads == p->p_suspcount) { 1167 thread_unsuspend_one(p->p_singlethread); 1168 } 1169 } 1170 1171 /* 1172 * Because each upcall structure has an owner thread, 1173 * owner thread exits only when process is in exiting 1174 * state, so upcall to userland is no longer needed, 1175 * deleting upcall structure is safe here. 1176 * So when all threads in a group is exited, all upcalls 1177 * in the group should be automatically freed. 1178 */ 1179 if (td->td_upcall) 1180 upcall_remove(td); 1181 1182 ke->ke_state = KES_UNQUEUED; 1183 ke->ke_thread = NULL; 1184 /* 1185 * Decide what to do with the KSE attached to this thread. 1186 */ 1187 if (ke->ke_flags & KEF_EXIT) 1188 kse_unlink(ke); 1189 else 1190 kse_reassign(ke); 1191 PROC_UNLOCK(p); 1192 td->td_kse = NULL; 1193 td->td_state = TDS_INACTIVE; 1194 #if 0 1195 td->td_proc = NULL; 1196 #endif 1197 td->td_ksegrp = NULL; 1198 td->td_last_kse = NULL; 1199 PCPU_SET(deadthread, td); 1200 } else { 1201 PROC_UNLOCK(p); 1202 } 1203 /* XXX Shouldn't cpu_throw() here. */ 1204 mtx_assert(&sched_lock, MA_OWNED); 1205 #if !defined(__alpha__) && !defined(__powerpc__) 1206 cpu_throw(td, choosethread()); 1207 #else 1208 cpu_throw(); 1209 #endif 1210 panic("I'm a teapot!"); 1211 /* NOTREACHED */ 1212 } 1213 1214 /* 1215 * Do any thread specific cleanups that may be needed in wait() 1216 * called with Giant held, proc and schedlock not held. 1217 */ 1218 void 1219 thread_wait(struct proc *p) 1220 { 1221 struct thread *td; 1222 1223 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()")); 1224 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()")); 1225 FOREACH_THREAD_IN_PROC(p, td) { 1226 if (td->td_standin != NULL) { 1227 thread_free(td->td_standin); 1228 td->td_standin = NULL; 1229 } 1230 cpu_thread_clean(td); 1231 } 1232 thread_reap(); /* check for zombie threads etc. */ 1233 } 1234 1235 /* 1236 * Link a thread to a process. 1237 * set up anything that needs to be initialized for it to 1238 * be used by the process. 1239 * 1240 * Note that we do not link to the proc's ucred here. 1241 * The thread is linked as if running but no KSE assigned. 1242 */ 1243 void 1244 thread_link(struct thread *td, struct ksegrp *kg) 1245 { 1246 struct proc *p; 1247 1248 p = kg->kg_proc; 1249 td->td_state = TDS_INACTIVE; 1250 td->td_proc = p; 1251 td->td_ksegrp = kg; 1252 td->td_last_kse = NULL; 1253 td->td_flags = 0; 1254 td->td_kse = NULL; 1255 1256 LIST_INIT(&td->td_contested); 1257 callout_init(&td->td_slpcallout, 1); 1258 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 1259 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 1260 p->p_numthreads++; 1261 kg->kg_numthreads++; 1262 } 1263 1264 void 1265 thread_unlink(struct thread *td) 1266 { 1267 struct proc *p = td->td_proc; 1268 struct ksegrp *kg = td->td_ksegrp; 1269 1270 mtx_assert(&sched_lock, MA_OWNED); 1271 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1272 p->p_numthreads--; 1273 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 1274 kg->kg_numthreads--; 1275 /* could clear a few other things here */ 1276 } 1277 1278 /* 1279 * Purge a ksegrp resource. When a ksegrp is preparing to 1280 * exit, it calls this function. 1281 */ 1282 static void 1283 kse_purge_group(struct thread *td) 1284 { 1285 struct ksegrp *kg; 1286 struct kse *ke; 1287 1288 kg = td->td_ksegrp; 1289 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); 1290 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1291 KASSERT(ke->ke_state == KES_IDLE, 1292 ("%s: wrong idle KSE state", __func__)); 1293 kse_unlink(ke); 1294 } 1295 KASSERT((kg->kg_kses == 1), 1296 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); 1297 KASSERT((kg->kg_numupcalls == 0), 1298 ("%s: ksegrp still has %d upcall datas", 1299 __func__, kg->kg_numupcalls)); 1300 } 1301 1302 /* 1303 * Purge a process's KSE resource. When a process is preparing to 1304 * exit, it calls kse_purge to release any extra KSE resources in 1305 * the process. 1306 */ 1307 static void 1308 kse_purge(struct proc *p, struct thread *td) 1309 { 1310 struct ksegrp *kg; 1311 struct kse *ke; 1312 1313 KASSERT(p->p_numthreads == 1, ("bad thread number")); 1314 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { 1315 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 1316 p->p_numksegrps--; 1317 /* 1318 * There is no ownership for KSE, after all threads 1319 * in the group exited, it is possible that some KSEs 1320 * were left in idle queue, gc them now. 1321 */ 1322 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1323 KASSERT(ke->ke_state == KES_IDLE, 1324 ("%s: wrong idle KSE state", __func__)); 1325 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 1326 kg->kg_idle_kses--; 1327 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 1328 kg->kg_kses--; 1329 kse_stash(ke); 1330 } 1331 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || 1332 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), 1333 ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); 1334 KASSERT((kg->kg_numupcalls == 0), 1335 ("%s: ksegrp still has %d upcall datas", 1336 __func__, kg->kg_numupcalls)); 1337 1338 if (kg != td->td_ksegrp) 1339 ksegrp_stash(kg); 1340 } 1341 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); 1342 p->p_numksegrps++; 1343 } 1344 1345 /* 1346 * This function is intended to be used to initialize a spare thread 1347 * for upcall. Initialize thread's large data area outside sched_lock 1348 * for thread_schedule_upcall(). 1349 */ 1350 void 1351 thread_alloc_spare(struct thread *td, struct thread *spare) 1352 { 1353 if (td->td_standin) 1354 return; 1355 if (spare == NULL) 1356 spare = thread_alloc(); 1357 td->td_standin = spare; 1358 bzero(&spare->td_startzero, 1359 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 1360 spare->td_proc = td->td_proc; 1361 spare->td_ucred = crhold(td->td_ucred); 1362 } 1363 1364 /* 1365 * Create a thread and schedule it for upcall on the KSE given. 1366 * Use our thread's standin so that we don't have to allocate one. 1367 */ 1368 struct thread * 1369 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) 1370 { 1371 struct thread *td2; 1372 1373 mtx_assert(&sched_lock, MA_OWNED); 1374 1375 /* 1376 * Schedule an upcall thread on specified kse_upcall, 1377 * the kse_upcall must be free. 1378 * td must have a spare thread. 1379 */ 1380 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); 1381 if ((td2 = td->td_standin) != NULL) { 1382 td->td_standin = NULL; 1383 } else { 1384 panic("no reserve thread when scheduling an upcall"); 1385 return (NULL); 1386 } 1387 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 1388 td2, td->td_proc->p_pid, td->td_proc->p_comm); 1389 bcopy(&td->td_startcopy, &td2->td_startcopy, 1390 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 1391 thread_link(td2, ku->ku_ksegrp); 1392 /* inherit blocked thread's context */ 1393 cpu_set_upcall(td2, td); 1394 /* Let the new thread become owner of the upcall */ 1395 ku->ku_owner = td2; 1396 td2->td_upcall = ku; 1397 td2->td_flags = TDF_SA; 1398 td2->td_pflags = TDP_UPCALLING; 1399 td2->td_kse = NULL; 1400 td2->td_state = TDS_CAN_RUN; 1401 td2->td_inhibitors = 0; 1402 return (td2); /* bogus.. should be a void function */ 1403 } 1404 1405 void 1406 thread_signal_add(struct thread *td, int sig) 1407 { 1408 struct kse_upcall *ku; 1409 struct proc *p; 1410 sigset_t ss; 1411 int error; 1412 1413 p = td->td_proc; 1414 PROC_LOCK_ASSERT(p, MA_OWNED); 1415 mtx_assert(&p->p_sigacts->ps_mtx, MA_OWNED); 1416 td = curthread; 1417 ku = td->td_upcall; 1418 mtx_unlock(&p->p_sigacts->ps_mtx); 1419 PROC_UNLOCK(p); 1420 error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t)); 1421 if (error) 1422 goto error; 1423 1424 SIGADDSET(ss, sig); 1425 1426 error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t)); 1427 if (error) 1428 goto error; 1429 1430 PROC_LOCK(p); 1431 mtx_lock(&p->p_sigacts->ps_mtx); 1432 return; 1433 error: 1434 PROC_LOCK(p); 1435 sigexit(td, SIGILL); 1436 } 1437 1438 /* 1439 * Schedule an upcall to notify a KSE process recieved signals. 1440 * 1441 */ 1442 void 1443 thread_signal_upcall(struct thread *td) 1444 { 1445 td->td_pflags |= TDP_UPCALLING; 1446 1447 return; 1448 } 1449 1450 void 1451 thread_switchout(struct thread *td) 1452 { 1453 struct kse_upcall *ku; 1454 struct thread *td2; 1455 1456 mtx_assert(&sched_lock, MA_OWNED); 1457 1458 /* 1459 * If the outgoing thread is in threaded group and has never 1460 * scheduled an upcall, decide whether this is a short 1461 * or long term event and thus whether or not to schedule 1462 * an upcall. 1463 * If it is a short term event, just suspend it in 1464 * a way that takes its KSE with it. 1465 * Select the events for which we want to schedule upcalls. 1466 * For now it's just sleep. 1467 * XXXKSE eventually almost any inhibition could do. 1468 */ 1469 if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) { 1470 /* 1471 * Release ownership of upcall, and schedule an upcall 1472 * thread, this new upcall thread becomes the owner of 1473 * the upcall structure. 1474 */ 1475 ku = td->td_upcall; 1476 ku->ku_owner = NULL; 1477 td->td_upcall = NULL; 1478 td->td_flags &= ~TDF_CAN_UNBIND; 1479 td2 = thread_schedule_upcall(td, ku); 1480 setrunqueue(td2); 1481 } 1482 } 1483 1484 /* 1485 * Setup done on the thread when it enters the kernel. 1486 * XXXKSE Presently only for syscalls but eventually all kernel entries. 1487 */ 1488 void 1489 thread_user_enter(struct proc *p, struct thread *td) 1490 { 1491 struct ksegrp *kg; 1492 struct kse_upcall *ku; 1493 struct kse_thr_mailbox *tmbx; 1494 1495 kg = td->td_ksegrp; 1496 1497 /* 1498 * First check that we shouldn't just abort. 1499 * But check if we are the single thread first! 1500 */ 1501 if (p->p_flag & P_SINGLE_EXIT) { 1502 PROC_LOCK(p); 1503 mtx_lock_spin(&sched_lock); 1504 thread_stopped(p); 1505 thread_exit(); 1506 /* NOTREACHED */ 1507 } 1508 1509 /* 1510 * If we are doing a syscall in a KSE environment, 1511 * note where our mailbox is. There is always the 1512 * possibility that we could do this lazily (in kse_reassign()), 1513 * but for now do it every time. 1514 */ 1515 kg = td->td_ksegrp; 1516 if (td->td_flags & TDF_SA) { 1517 ku = td->td_upcall; 1518 KASSERT(ku, ("%s: no upcall owned", __func__)); 1519 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); 1520 KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__)); 1521 ku->ku_mflags = fuword((void *)&ku->ku_mailbox->km_flags); 1522 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1523 if ((tmbx == NULL) || (tmbx == (void *)-1)) { 1524 td->td_mailbox = NULL; 1525 } else { 1526 td->td_mailbox = tmbx; 1527 if (td->td_standin == NULL) 1528 thread_alloc_spare(td, NULL); 1529 mtx_lock_spin(&sched_lock); 1530 if (ku->ku_mflags & KMF_NOUPCALL) 1531 td->td_flags &= ~TDF_CAN_UNBIND; 1532 else 1533 td->td_flags |= TDF_CAN_UNBIND; 1534 mtx_unlock_spin(&sched_lock); 1535 } 1536 } 1537 } 1538 1539 /* 1540 * The extra work we go through if we are a threaded process when we 1541 * return to userland. 1542 * 1543 * If we are a KSE process and returning to user mode, check for 1544 * extra work to do before we return (e.g. for more syscalls 1545 * to complete first). If we were in a critical section, we should 1546 * just return to let it finish. Same if we were in the UTS (in 1547 * which case the mailbox's context's busy indicator will be set). 1548 * The only traps we suport will have set the mailbox. 1549 * We will clear it here. 1550 */ 1551 int 1552 thread_userret(struct thread *td, struct trapframe *frame) 1553 { 1554 int error = 0, upcalls, uts_crit; 1555 struct kse_upcall *ku; 1556 struct ksegrp *kg, *kg2; 1557 struct proc *p; 1558 struct timespec ts; 1559 1560 p = td->td_proc; 1561 kg = td->td_ksegrp; 1562 ku = td->td_upcall; 1563 1564 /* Nothing to do with bound thread */ 1565 if (!(td->td_flags & TDF_SA)) 1566 return (0); 1567 1568 /* 1569 * Stat clock interrupt hit in userland, it 1570 * is returning from interrupt, charge thread's 1571 * userland time for UTS. 1572 */ 1573 if (td->td_flags & TDF_USTATCLOCK) { 1574 thread_update_usr_ticks(td, 1); 1575 mtx_lock_spin(&sched_lock); 1576 td->td_flags &= ~TDF_USTATCLOCK; 1577 mtx_unlock_spin(&sched_lock); 1578 if (kg->kg_completed || 1579 (td->td_upcall->ku_flags & KUF_DOUPCALL)) 1580 thread_user_enter(p, td); 1581 } 1582 1583 uts_crit = (td->td_mailbox == NULL); 1584 /* 1585 * Optimisation: 1586 * This thread has not started any upcall. 1587 * If there is no work to report other than ourself, 1588 * then it can return direct to userland. 1589 */ 1590 if (TD_CAN_UNBIND(td)) { 1591 mtx_lock_spin(&sched_lock); 1592 td->td_flags &= ~TDF_CAN_UNBIND; 1593 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 && 1594 (kg->kg_completed == NULL) && 1595 (ku->ku_flags & KUF_DOUPCALL) == 0 && 1596 (kg->kg_upquantum && ticks < kg->kg_nextupcall)) { 1597 mtx_unlock_spin(&sched_lock); 1598 thread_update_usr_ticks(td, 0); 1599 nanotime(&ts); 1600 error = copyout(&ts, 1601 (caddr_t)&ku->ku_mailbox->km_timeofday, 1602 sizeof(ts)); 1603 td->td_mailbox = 0; 1604 ku->ku_mflags = 0; 1605 if (error) 1606 goto out; 1607 return (0); 1608 } 1609 mtx_unlock_spin(&sched_lock); 1610 error = thread_export_context(td); 1611 if (error) { 1612 /* 1613 * Failing to do the KSE operation just defaults 1614 * back to synchonous operation, so just return from 1615 * the syscall. 1616 */ 1617 goto out; 1618 } 1619 /* 1620 * There is something to report, and we own an upcall 1621 * strucuture, we can go to userland. 1622 * Turn ourself into an upcall thread. 1623 */ 1624 td->td_pflags |= TDP_UPCALLING; 1625 } else if (td->td_mailbox && (ku == NULL)) { 1626 error = thread_export_context(td); 1627 /* possibly upcall with error? */ 1628 PROC_LOCK(p); 1629 /* 1630 * There are upcall threads waiting for 1631 * work to do, wake one of them up. 1632 * XXXKSE Maybe wake all of them up. 1633 */ 1634 if (!error && kg->kg_upsleeps) 1635 wakeup_one(&kg->kg_completed); 1636 mtx_lock_spin(&sched_lock); 1637 thread_stopped(p); 1638 thread_exit(); 1639 /* NOTREACHED */ 1640 } 1641 1642 KASSERT(ku != NULL, ("upcall is NULL\n")); 1643 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); 1644 1645 if (p->p_numthreads > max_threads_per_proc) { 1646 max_threads_hits++; 1647 PROC_LOCK(p); 1648 mtx_lock_spin(&sched_lock); 1649 p->p_maxthrwaits++; 1650 while (p->p_numthreads > max_threads_per_proc) { 1651 upcalls = 0; 1652 FOREACH_KSEGRP_IN_PROC(p, kg2) { 1653 if (kg2->kg_numupcalls == 0) 1654 upcalls++; 1655 else 1656 upcalls += kg2->kg_numupcalls; 1657 } 1658 if (upcalls >= max_threads_per_proc) 1659 break; 1660 mtx_unlock_spin(&sched_lock); 1661 if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, 1662 "maxthreads", NULL)) { 1663 mtx_lock_spin(&sched_lock); 1664 break; 1665 } else { 1666 mtx_lock_spin(&sched_lock); 1667 } 1668 } 1669 p->p_maxthrwaits--; 1670 mtx_unlock_spin(&sched_lock); 1671 PROC_UNLOCK(p); 1672 } 1673 1674 if (td->td_pflags & TDP_UPCALLING) { 1675 uts_crit = 0; 1676 kg->kg_nextupcall = ticks+kg->kg_upquantum; 1677 /* 1678 * There is no more work to do and we are going to ride 1679 * this thread up to userland as an upcall. 1680 * Do the last parts of the setup needed for the upcall. 1681 */ 1682 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 1683 td, td->td_proc->p_pid, td->td_proc->p_comm); 1684 1685 td->td_pflags &= ~TDP_UPCALLING; 1686 if (ku->ku_flags & KUF_DOUPCALL) { 1687 mtx_lock_spin(&sched_lock); 1688 ku->ku_flags &= ~KUF_DOUPCALL; 1689 mtx_unlock_spin(&sched_lock); 1690 } 1691 /* 1692 * Set user context to the UTS 1693 */ 1694 if (!(ku->ku_mflags & KMF_NOUPCALL)) { 1695 cpu_set_upcall_kse(td, ku); 1696 error = suword(&ku->ku_mailbox->km_curthread, 0); 1697 if (error) 1698 goto out; 1699 } 1700 1701 /* 1702 * Unhook the list of completed threads. 1703 * anything that completes after this gets to 1704 * come in next time. 1705 * Put the list of completed thread mailboxes on 1706 * this KSE's mailbox. 1707 */ 1708 if (!(ku->ku_mflags & KMF_NOCOMPLETED) && 1709 (error = thread_link_mboxes(kg, ku)) != 0) 1710 goto out; 1711 } 1712 if (!uts_crit) { 1713 nanotime(&ts); 1714 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts)); 1715 } 1716 1717 out: 1718 if (error) { 1719 /* 1720 * Things are going to be so screwed we should just kill 1721 * the process. 1722 * how do we do that? 1723 */ 1724 PROC_LOCK(td->td_proc); 1725 psignal(td->td_proc, SIGSEGV); 1726 PROC_UNLOCK(td->td_proc); 1727 } else { 1728 /* 1729 * Optimisation: 1730 * Ensure that we have a spare thread available, 1731 * for when we re-enter the kernel. 1732 */ 1733 if (td->td_standin == NULL) 1734 thread_alloc_spare(td, NULL); 1735 } 1736 1737 ku->ku_mflags = 0; 1738 /* 1739 * Clear thread mailbox first, then clear system tick count. 1740 * The order is important because thread_statclock() use 1741 * mailbox pointer to see if it is an userland thread or 1742 * an UTS kernel thread. 1743 */ 1744 td->td_mailbox = NULL; 1745 td->td_usticks = 0; 1746 return (error); /* go sync */ 1747 } 1748 1749 /* 1750 * Enforce single-threading. 1751 * 1752 * Returns 1 if the caller must abort (another thread is waiting to 1753 * exit the process or similar). Process is locked! 1754 * Returns 0 when you are successfully the only thread running. 1755 * A process has successfully single threaded in the suspend mode when 1756 * There are no threads in user mode. Threads in the kernel must be 1757 * allowed to continue until they get to the user boundary. They may even 1758 * copy out their return values and data before suspending. They may however be 1759 * accellerated in reaching the user boundary as we will wake up 1760 * any sleeping threads that are interruptable. (PCATCH). 1761 */ 1762 int 1763 thread_single(int force_exit) 1764 { 1765 struct thread *td; 1766 struct thread *td2; 1767 struct proc *p; 1768 1769 td = curthread; 1770 p = td->td_proc; 1771 mtx_assert(&Giant, MA_OWNED); 1772 PROC_LOCK_ASSERT(p, MA_OWNED); 1773 KASSERT((td != NULL), ("curthread is NULL")); 1774 1775 if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1) 1776 return (0); 1777 1778 /* Is someone already single threading? */ 1779 if (p->p_singlethread) 1780 return (1); 1781 1782 if (force_exit == SINGLE_EXIT) { 1783 p->p_flag |= P_SINGLE_EXIT; 1784 } else 1785 p->p_flag &= ~P_SINGLE_EXIT; 1786 p->p_flag |= P_STOPPED_SINGLE; 1787 mtx_lock_spin(&sched_lock); 1788 p->p_singlethread = td; 1789 while ((p->p_numthreads - p->p_suspcount) != 1) { 1790 FOREACH_THREAD_IN_PROC(p, td2) { 1791 if (td2 == td) 1792 continue; 1793 td2->td_flags |= TDF_ASTPENDING; 1794 if (TD_IS_INHIBITED(td2)) { 1795 if (force_exit == SINGLE_EXIT) { 1796 if (TD_IS_SUSPENDED(td2)) { 1797 thread_unsuspend_one(td2); 1798 } 1799 if (TD_ON_SLEEPQ(td2) && 1800 (td2->td_flags & TDF_SINTR)) { 1801 if (td2->td_flags & TDF_CVWAITQ) 1802 cv_abort(td2); 1803 else 1804 abortsleep(td2); 1805 } 1806 } else { 1807 if (TD_IS_SUSPENDED(td2)) 1808 continue; 1809 /* 1810 * maybe other inhibitted states too? 1811 * XXXKSE Is it totally safe to 1812 * suspend a non-interruptable thread? 1813 */ 1814 if (td2->td_inhibitors & 1815 (TDI_SLEEPING | TDI_SWAPPED)) 1816 thread_suspend_one(td2); 1817 } 1818 } 1819 } 1820 /* 1821 * Maybe we suspended some threads.. was it enough? 1822 */ 1823 if ((p->p_numthreads - p->p_suspcount) == 1) 1824 break; 1825 1826 /* 1827 * Wake us up when everyone else has suspended. 1828 * In the mean time we suspend as well. 1829 */ 1830 thread_suspend_one(td); 1831 DROP_GIANT(); 1832 PROC_UNLOCK(p); 1833 p->p_stats->p_ru.ru_nvcsw++; 1834 mi_switch(); 1835 mtx_unlock_spin(&sched_lock); 1836 PICKUP_GIANT(); 1837 PROC_LOCK(p); 1838 mtx_lock_spin(&sched_lock); 1839 } 1840 if (force_exit == SINGLE_EXIT) { 1841 if (td->td_upcall) 1842 upcall_remove(td); 1843 kse_purge(p, td); 1844 } 1845 mtx_unlock_spin(&sched_lock); 1846 return (0); 1847 } 1848 1849 /* 1850 * Called in from locations that can safely check to see 1851 * whether we have to suspend or at least throttle for a 1852 * single-thread event (e.g. fork). 1853 * 1854 * Such locations include userret(). 1855 * If the "return_instead" argument is non zero, the thread must be able to 1856 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1857 * 1858 * The 'return_instead' argument tells the function if it may do a 1859 * thread_exit() or suspend, or whether the caller must abort and back 1860 * out instead. 1861 * 1862 * If the thread that set the single_threading request has set the 1863 * P_SINGLE_EXIT bit in the process flags then this call will never return 1864 * if 'return_instead' is false, but will exit. 1865 * 1866 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1867 *---------------+--------------------+--------------------- 1868 * 0 | returns 0 | returns 0 or 1 1869 * | when ST ends | immediatly 1870 *---------------+--------------------+--------------------- 1871 * 1 | thread exits | returns 1 1872 * | | immediatly 1873 * 0 = thread_exit() or suspension ok, 1874 * other = return error instead of stopping the thread. 1875 * 1876 * While a full suspension is under effect, even a single threading 1877 * thread would be suspended if it made this call (but it shouldn't). 1878 * This call should only be made from places where 1879 * thread_exit() would be safe as that may be the outcome unless 1880 * return_instead is set. 1881 */ 1882 int 1883 thread_suspend_check(int return_instead) 1884 { 1885 struct thread *td; 1886 struct proc *p; 1887 1888 td = curthread; 1889 p = td->td_proc; 1890 PROC_LOCK_ASSERT(p, MA_OWNED); 1891 while (P_SHOULDSTOP(p)) { 1892 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1893 KASSERT(p->p_singlethread != NULL, 1894 ("singlethread not set")); 1895 /* 1896 * The only suspension in action is a 1897 * single-threading. Single threader need not stop. 1898 * XXX Should be safe to access unlocked 1899 * as it can only be set to be true by us. 1900 */ 1901 if (p->p_singlethread == td) 1902 return (0); /* Exempt from stopping. */ 1903 } 1904 if (return_instead) 1905 return (1); 1906 1907 mtx_lock_spin(&sched_lock); 1908 thread_stopped(p); 1909 /* 1910 * If the process is waiting for us to exit, 1911 * this thread should just suicide. 1912 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1913 */ 1914 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1915 while (mtx_owned(&Giant)) 1916 mtx_unlock(&Giant); 1917 if (p->p_flag & P_SA) 1918 thread_exit(); 1919 else 1920 thr_exit1(); 1921 } 1922 1923 /* 1924 * When a thread suspends, it just 1925 * moves to the processes's suspend queue 1926 * and stays there. 1927 */ 1928 thread_suspend_one(td); 1929 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1930 if (p->p_numthreads == p->p_suspcount) { 1931 thread_unsuspend_one(p->p_singlethread); 1932 } 1933 } 1934 DROP_GIANT(); 1935 PROC_UNLOCK(p); 1936 p->p_stats->p_ru.ru_nivcsw++; 1937 mi_switch(); 1938 mtx_unlock_spin(&sched_lock); 1939 PICKUP_GIANT(); 1940 PROC_LOCK(p); 1941 } 1942 return (0); 1943 } 1944 1945 void 1946 thread_suspend_one(struct thread *td) 1947 { 1948 struct proc *p = td->td_proc; 1949 1950 mtx_assert(&sched_lock, MA_OWNED); 1951 PROC_LOCK_ASSERT(p, MA_OWNED); 1952 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1953 p->p_suspcount++; 1954 TD_SET_SUSPENDED(td); 1955 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 1956 /* 1957 * Hack: If we are suspending but are on the sleep queue 1958 * then we are in msleep or the cv equivalent. We 1959 * want to look like we have two Inhibitors. 1960 * May already be set.. doesn't matter. 1961 */ 1962 if (TD_ON_SLEEPQ(td)) 1963 TD_SET_SLEEPING(td); 1964 } 1965 1966 void 1967 thread_unsuspend_one(struct thread *td) 1968 { 1969 struct proc *p = td->td_proc; 1970 1971 mtx_assert(&sched_lock, MA_OWNED); 1972 PROC_LOCK_ASSERT(p, MA_OWNED); 1973 TAILQ_REMOVE(&p->p_suspended, td, td_runq); 1974 TD_CLR_SUSPENDED(td); 1975 p->p_suspcount--; 1976 setrunnable(td); 1977 } 1978 1979 /* 1980 * Allow all threads blocked by single threading to continue running. 1981 */ 1982 void 1983 thread_unsuspend(struct proc *p) 1984 { 1985 struct thread *td; 1986 1987 mtx_assert(&sched_lock, MA_OWNED); 1988 PROC_LOCK_ASSERT(p, MA_OWNED); 1989 if (!P_SHOULDSTOP(p)) { 1990 while (( td = TAILQ_FIRST(&p->p_suspended))) { 1991 thread_unsuspend_one(td); 1992 } 1993 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 1994 (p->p_numthreads == p->p_suspcount)) { 1995 /* 1996 * Stopping everything also did the job for the single 1997 * threading request. Now we've downgraded to single-threaded, 1998 * let it continue. 1999 */ 2000 thread_unsuspend_one(p->p_singlethread); 2001 } 2002 } 2003 2004 void 2005 thread_single_end(void) 2006 { 2007 struct thread *td; 2008 struct proc *p; 2009 2010 td = curthread; 2011 p = td->td_proc; 2012 PROC_LOCK_ASSERT(p, MA_OWNED); 2013 p->p_flag &= ~P_STOPPED_SINGLE; 2014 mtx_lock_spin(&sched_lock); 2015 p->p_singlethread = NULL; 2016 /* 2017 * If there are other threads they mey now run, 2018 * unless of course there is a blanket 'stop order' 2019 * on the process. The single threader must be allowed 2020 * to continue however as this is a bad place to stop. 2021 */ 2022 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 2023 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2024 thread_unsuspend_one(td); 2025 } 2026 } 2027 mtx_unlock_spin(&sched_lock); 2028 } 2029 2030 2031