Lines Matching defs:t
398 thread_affinity_set(kthread_id_t t, int cpu_id)
403 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
411 * the NCA code doesn't acquire it. The following assert
423 thread_lock(t);
424 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
426 (void *)cp, (void *)t->t_bound_cpu);
428 t->t_affinitycnt++;
429 t->t_bound_cpu = cp;
434 if (cp != t->t_cpu || t != curthread) {
435 force_thread_migrate(t); /* drops thread lock */
437 thread_unlock(t);
459 thread_affinity_clear(kthread_id_t t)
463 thread_lock(t);
464 if (--t->t_affinitycnt == 0) {
465 if ((binding = t->t_bind_cpu) == PBIND_NONE) {
469 disp_adjust_unbound_pri(t);
470 t->t_bound_cpu = NULL;
471 if (t->t_cpu->cpu_part != t->t_cpupart) {
472 force_thread_migrate(t);
476 t->t_bound_cpu = cpu[binding];
480 if (t->t_cpu != t->t_bound_cpu) {
481 force_thread_migrate(t);
486 thread_unlock(t);
532 kthread_id_t t = curthread;
545 * in thread_allowmigrate they can't have changed). Migration
548 if (CPU_ON_INTR(cp) || t->t_flag & T_INTR_THREAD ||
562 if (t->t_nomigrate && t->t_weakbound_cpu && t->t_weakbound_cpu != cp) {
566 (void *)t->t_weakbound_cpu);
570 * At this point we have preemption disabled and we don't yet hold
591 if (t->t_nomigrate < 0 || weakbindingbarrier && t->t_nomigrate == 0) {
592 --t->t_nomigrate;
610 * we don't hold cpu_lock we may not see a recent store to that,
632 if (cp != cpu_inmotion || t->t_nomigrate > 0 || t->t_preempt > 1 ||
633 t->t_bound_cpu == cp) {
635 * Don't be tempted to store to t_weakbound_cpu only on
642 t->t_nomigrate++;
643 t->t_weakbound_cpu = cp;
677 kthread_id_t t = curthread;
679 ASSERT(t->t_weakbound_cpu == CPU ||
680 (t->t_nomigrate < 0 && t->t_preempt > 0) ||
681 CPU_ON_INTR(CPU) || t->t_flag & T_INTR_THREAD ||
684 if (CPU_ON_INTR(CPU) || (t->t_flag & T_INTR_THREAD) ||
688 if (t->t_nomigrate < 0) {
695 ++t->t_nomigrate;
697 } else if (--t->t_nomigrate == 0) {
703 * weak binding drops). We don't acquire thread_lock
715 if (t->t_bound_cpu &&
716 t->t_weakbound_cpu != t->t_bound_cpu)
718 t->t_weakbound_cpu = NULL;
780 * cpu_pause_info.cp_go is set, we don't want to spl
789 * an intr doesn't come in, wake up a thread, and call
845 kthread_id_t t;
852 t = thread_create(NULL, 0, cpu_pause, (void *)cpun,
854 thread_lock(t);
855 t->t_bound_cpu = cp;
856 t->t_disp_queue = cp->cpu_disp;
857 t->t_affinitycnt = 1;
858 t->t_preempt = 1;
859 thread_unlock(t);
860 cp->cpu_pause_thread = t;
868 CALLB_CPR_INIT_SAFE(t, "cpu_pause");
877 kthread_id_t t;
884 if ((t = cp->cpu_pause_thread) == NULL) {
888 thread_lock(t);
889 t->t_cpu = CPU; /* disp gets upset if last cpu is quiesced. */
890 t->t_bound_cpu = NULL; /* Must un-bind; cpu may not be running. */
891 t->t_pri = v.v_nglobpris - 1;
894 THREAD_TRANSITION(t);
895 setbackdq(t);
896 thread_unlock_nopreempt(t);
899 * If we don't wait for the thread to actually die, it may try to
936 kthread_id_t t;
955 t = cp->cpu_pause_thread;
956 thread_lock(t);
963 t->t_pri = v.v_nglobpris - 1;
964 THREAD_TRANSITION(t);
965 setbackdq(t);
966 thread_unlock_nopreempt(t);
1008 * This is so that it won't be necessary to rechoose a CPU
1028 * lock to make sure we don't prevent the pause
1254 kthread_t *t;
1281 * Don't offline last online CPU in partition
1293 * We shouldn't be bound to this CPU ourselves.
1310 * Take the CPU out of interrupt participation so we won't find
1312 * shut off interrupts on the CPU, don't quiesce it, but don't
1368 * the next clock tick. This is OK since it isn't
1444 t = p->p_tlist;
1446 if (t == NULL)
1452 ASSERT(t->t_lpl != NULL);
1462 if (t->t_lpl == cpu_lpl)
1463 lgrp_move_thread(t,
1464 lgrp_choose(t,
1465 t->t_cpupart), 0);
1466 else if (t->t_lpl->lpl_lgrpid ==
1470 ASSERT(t->t_lpl->lpl_ncpu > 0);
1475 if (t->t_cpu == cp && t->t_bound_cpu != cp)
1476 t->t_cpu = disp_lowpri_cpu(ncp,
1477 t->t_lpl, t->t_pri, NULL);
1478 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1479 t->t_weakbound_cpu == cp);
1481 t = t->t_forw;
1482 } while (t != p->p_tlist);
1485 * Didn't find any threads in the same lgroup as this
1500 t = curthread;
1502 ASSERT(t != NULL && t->t_lpl != NULL);
1509 if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl))
1510 lgrp_move_thread(t,
1511 lgrp_choose(t, t->t_cpupart), 1);
1513 ASSERT(t->t_lpl->lpl_ncpu > 0);
1519 if (t->t_cpu == cp && t->t_bound_cpu != cp) {
1520 t->t_cpu = disp_lowpri_cpu(ncp,
1521 t->t_lpl, t->t_pri, NULL);
1523 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1524 t->t_weakbound_cpu == cp);
1525 t = t->t_next;
1527 } while (t != curthread);
1744 * cpu_lock to insure that it isn't modified. However,
1745 * certain users can't or won't do that. To allow this
1748 * to insure that the list isn't modified underneath
1790 * So that new CPUs won't have NULL prev_onln and next_onln pointers,
1854 * has been updated so that we don't waste time
2653 /* can't grab cpu_lock */
2665 * on the right queue, but since this isn't
2666 * a performance-critical operation it doesn't
2886 register kthread_id_t t, tlist, tnext;
2901 if ((t = curthread) != NULL) {
2906 tnext = t->t_next;
2907 if (t->t_bound_cpu == cp) {
2912 * our "tlist". We "know" we don't have to
2916 t->t_next->t_prev = t->t_prev;
2917 t->t_prev->t_next = t->t_next;
2918 t->t_next = tlist;
2919 tlist = t;
2920 ASSERT(t->t_cid == syscid);
2922 cv_broadcast(&t->t_joincv);
2927 t->t_lwp = NULL;
2932 t->t_state = TS_FREE;
2933 t->t_prev = NULL; /* Just in case */
2936 } while ((t = tnext) != curthread);
2941 for (t = tlist; t != NULL; t = tnext) {
2942 tnext = t->t_next;
2943 thread_free(t);
2972 * Make sure the frequency doesn't change while a snapshot is