Lines Matching refs:td

125 #define	THREAD_CAN_SCHED(td, cpu)	\  argument
126 CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
141 static void sched_priority(struct thread *td, u_char prio);
143 static void maybe_resched(struct thread *td);
144 static void updatepri(struct thread *td);
145 static void resetpriority(struct thread *td);
146 static void resetpriority_thread(struct thread *td);
148 static int sched_pickcpu(struct thread *td);
311 maybe_resched(struct thread *td) in maybe_resched() argument
314 THREAD_LOCK_ASSERT(td, MA_OWNED); in maybe_resched()
315 if (td->td_priority < curthread->td_priority) in maybe_resched()
326 maybe_preempt(struct thread *td) in maybe_preempt() argument
355 THREAD_LOCK_ASSERT(td, MA_OWNED); in maybe_preempt()
356 KASSERT((td->td_inhibitors == 0), in maybe_preempt()
358 pri = td->td_priority; in maybe_preempt()
472 struct thread *td; in schedcpu() local
484 FOREACH_THREAD_IN_PROC(p, td) { in schedcpu()
486 ts = td_get_sched(td); in schedcpu()
487 thread_lock(td); in schedcpu()
497 if (TD_ON_RUNQ(td)) { in schedcpu()
499 td->td_flags &= ~TDF_DIDRUN; in schedcpu()
500 } else if (TD_IS_RUNNING(td)) { in schedcpu()
503 } else if (td->td_flags & TDF_DIDRUN) { in schedcpu()
505 td->td_flags &= ~TDF_DIDRUN; in schedcpu()
547 updatepri(td); in schedcpu()
553 thread_unlock(td); in schedcpu()
557 resetpriority(td); in schedcpu()
558 resetpriority_thread(td); in schedcpu()
559 thread_unlock(td); in schedcpu()
585 updatepri(struct thread *td) in updatepri() argument
591 ts = td_get_sched(td); in updatepri()
610 resetpriority(struct thread *td) in resetpriority() argument
614 if (td->td_pri_class != PRI_TIMESHARE) in resetpriority()
617 td_get_sched(td)->ts_estcpu / INVERSE_ESTCPU_WEIGHT + in resetpriority()
618 NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN); in resetpriority()
621 sched_user_prio(td, newpriority); in resetpriority()
629 resetpriority_thread(struct thread *td) in resetpriority_thread() argument
633 if (td->td_priority < PRI_MIN_TIMESHARE || in resetpriority_thread()
634 td->td_priority > PRI_MAX_TIMESHARE) in resetpriority_thread()
638 maybe_resched(td); in resetpriority_thread()
640 sched_prio(td, td->td_user_pri); in resetpriority_thread()
732 sched_clock_tick(struct thread *td) in sched_clock_tick() argument
737 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_clock_tick()
738 ts = td_get_sched(td); in sched_clock_tick()
743 resetpriority(td); in sched_clock_tick()
744 resetpriority_thread(td); in sched_clock_tick()
751 if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) { in sched_clock_tick()
758 if (PRI_BASE(td->td_pri_class) == PRI_ITHD) { in sched_clock_tick()
760 td->td_owepreempt = 1; in sched_clock_tick()
761 if (td->td_base_pri + RQ_PPQ < PRI_MAX_ITHD) { in sched_clock_tick()
763 sched_prio(td, td->td_base_pri + RQ_PPQ); in sched_clock_tick()
766 td->td_flags |= TDF_SLICEEND; in sched_clock_tick()
767 ast_sched_locked(td, TDA_SCHED); in sched_clock_tick()
777 sched_clock(struct thread *td, int cnt) in sched_clock() argument
781 sched_clock_tick(td); in sched_clock()
788 sched_exit(struct proc *p, struct thread *td) in sched_exit() argument
791 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit", in sched_exit()
792 "prio:%d", td->td_priority); in sched_exit()
795 sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); in sched_exit()
799 sched_exit_thread(struct thread *td, struct thread *child) in sched_exit_thread() argument
804 thread_lock(td); in sched_exit_thread()
805 td_get_sched(td)->ts_estcpu = ESTCPULIM(td_get_sched(td)->ts_estcpu + in sched_exit_thread()
807 thread_unlock(td); in sched_exit_thread()
815 sched_fork(struct thread *td, struct thread *childtd) in sched_fork() argument
817 sched_fork_thread(td, childtd); in sched_fork()
821 sched_fork_thread(struct thread *td, struct thread *childtd) in sched_fork_thread() argument
828 childtd->td_cpuset = cpuset_ref(td->td_cpuset); in sched_fork_thread()
829 childtd->td_domain.dr_policy = td->td_cpuset->cs_domain; in sched_fork_thread()
833 tsc = td_get_sched(td); in sched_fork_thread()
842 struct thread *td; in sched_nice() local
846 FOREACH_THREAD_IN_PROC(p, td) { in sched_nice()
847 thread_lock(td); in sched_nice()
848 resetpriority(td); in sched_nice()
849 resetpriority_thread(td); in sched_nice()
850 thread_unlock(td); in sched_nice()
855 sched_class(struct thread *td, int class) in sched_class() argument
857 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_class()
858 td->td_pri_class = class; in sched_class()
865 sched_priority(struct thread *td, u_char prio) in sched_priority() argument
868 KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change", in sched_priority()
869 "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED, in sched_priority()
871 SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio); in sched_priority()
872 if (td != curthread && prio > td->td_priority) { in sched_priority()
874 "lend prio", "prio:%d", td->td_priority, "new prio:%d", in sched_priority()
875 prio, KTR_ATTR_LINKED, sched_tdname(td)); in sched_priority()
876 SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio, in sched_priority()
879 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_priority()
880 if (td->td_priority == prio) in sched_priority()
882 td->td_priority = prio; in sched_priority()
883 if (TD_ON_RUNQ(td) && td->td_rqindex != RQ_PRI_TO_QUEUE_IDX(prio)) { in sched_priority()
884 sched_rem(td); in sched_priority()
885 sched_add(td, SRQ_BORING | SRQ_HOLDTD); in sched_priority()
894 sched_lend_prio(struct thread *td, u_char prio) in sched_lend_prio() argument
897 td->td_flags |= TDF_BORROWING; in sched_lend_prio()
898 sched_priority(td, prio); in sched_lend_prio()
910 sched_unlend_prio(struct thread *td, u_char prio) in sched_unlend_prio() argument
914 if (td->td_base_pri >= PRI_MIN_TIMESHARE && in sched_unlend_prio()
915 td->td_base_pri <= PRI_MAX_TIMESHARE) in sched_unlend_prio()
916 base_pri = td->td_user_pri; in sched_unlend_prio()
918 base_pri = td->td_base_pri; in sched_unlend_prio()
920 td->td_flags &= ~TDF_BORROWING; in sched_unlend_prio()
921 sched_prio(td, base_pri); in sched_unlend_prio()
923 sched_lend_prio(td, prio); in sched_unlend_prio()
927 sched_prio(struct thread *td, u_char prio) in sched_prio() argument
932 td->td_base_pri = prio; in sched_prio()
938 if (td->td_flags & TDF_BORROWING && td->td_priority < prio) in sched_prio()
942 oldprio = td->td_priority; in sched_prio()
943 sched_priority(td, prio); in sched_prio()
949 if (TD_ON_LOCK(td) && oldprio != prio) in sched_prio()
950 turnstile_adjust(td, oldprio); in sched_prio()
954 sched_ithread_prio(struct thread *td, u_char prio) in sched_ithread_prio() argument
956 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_ithread_prio()
957 MPASS(td->td_pri_class == PRI_ITHD); in sched_ithread_prio()
958 td->td_base_ithread_pri = prio; in sched_ithread_prio()
959 sched_prio(td, prio); in sched_ithread_prio()
963 sched_user_prio(struct thread *td, u_char prio) in sched_user_prio() argument
966 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_user_prio()
967 td->td_base_user_pri = prio; in sched_user_prio()
968 if (td->td_lend_user_pri <= prio) in sched_user_prio()
970 td->td_user_pri = prio; in sched_user_prio()
974 sched_lend_user_prio(struct thread *td, u_char prio) in sched_lend_user_prio() argument
977 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_lend_user_prio()
978 td->td_lend_user_pri = prio; in sched_lend_user_prio()
979 td->td_user_pri = min(prio, td->td_base_user_pri); in sched_lend_user_prio()
980 if (td->td_priority > td->td_user_pri) in sched_lend_user_prio()
981 sched_prio(td, td->td_user_pri); in sched_lend_user_prio()
982 else if (td->td_priority != td->td_user_pri) in sched_lend_user_prio()
983 ast_sched_locked(td, TDA_SCHED); in sched_lend_user_prio()
990 sched_lend_user_prio_cond(struct thread *td, u_char prio) in sched_lend_user_prio_cond() argument
993 if (td->td_lend_user_pri == prio) in sched_lend_user_prio_cond()
996 thread_lock(td); in sched_lend_user_prio_cond()
997 sched_lend_user_prio(td, prio); in sched_lend_user_prio_cond()
998 thread_unlock(td); in sched_lend_user_prio_cond()
1002 sched_sleep(struct thread *td, int pri) in sched_sleep() argument
1005 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_sleep()
1006 td->td_slptick = ticks; in sched_sleep()
1007 td_get_sched(td)->ts_slptime = 0; in sched_sleep()
1008 if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) in sched_sleep()
1009 sched_prio(td, pri); in sched_sleep()
1013 sched_switch(struct thread *td, int flags) in sched_switch() argument
1021 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_switch()
1023 td->td_lastcpu = td->td_oncpu; in sched_switch()
1024 preempted = (td->td_flags & TDF_SLICEEND) == 0 && in sched_switch()
1026 td->td_flags &= ~TDF_SLICEEND; in sched_switch()
1027 ast_unsched_locked(td, TDA_SCHED); in sched_switch()
1028 td->td_owepreempt = 0; in sched_switch()
1029 td->td_oncpu = NOCPU; in sched_switch()
1037 if (td->td_flags & TDF_IDLETD) { in sched_switch()
1038 TD_SET_CAN_RUN(td); in sched_switch()
1043 if (TD_IS_RUNNING(td)) { in sched_switch()
1045 sched_add(td, SRQ_HOLDTD | SRQ_OURSELF | SRQ_YIELDING | in sched_switch()
1055 if (td->td_lock != &sched_lock) { in sched_switch()
1057 tmtx = thread_lock_block(td); in sched_switch()
1061 if ((td->td_flags & TDF_NOLOAD) == 0) in sched_switch()
1068 if (TD_IS_IDLETHREAD(td)) in sched_switch()
1069 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle", in sched_switch()
1070 "prio:%d", td->td_priority); in sched_switch()
1072 KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td), in sched_switch()
1073 "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg, in sched_switch()
1074 "lockname:\"%s\"", td->td_lockname); in sched_switch()
1077 if (td != newtd) { in sched_switch()
1079 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) in sched_switch()
1080 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); in sched_switch()
1084 HWT_CALL_HOOK(td, HWT_SWITCH_OUT, NULL); in sched_switch()
1102 cpu_switch(td, newtd, tmtx); in sched_switch()
1124 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) in sched_switch()
1125 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); in sched_switch()
1128 td->td_lock = &sched_lock; in sched_switch()
1132 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running", in sched_switch()
1133 "prio:%d", td->td_priority); in sched_switch()
1136 if (td->td_flags & TDF_IDLETD) in sched_switch()
1139 sched_lock.mtx_lock = (uintptr_t)td; in sched_switch()
1140 td->td_oncpu = PCPU_GET(cpuid); in sched_switch()
1146 sched_wakeup(struct thread *td, int srqflags) in sched_wakeup() argument
1150 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_wakeup()
1151 ts = td_get_sched(td); in sched_wakeup()
1153 updatepri(td); in sched_wakeup()
1154 resetpriority(td); in sched_wakeup()
1156 td->td_slptick = 0; in sched_wakeup()
1164 if (PRI_BASE(td->td_pri_class) == PRI_ITHD && in sched_wakeup()
1165 td->td_base_pri != td->td_base_ithread_pri) in sched_wakeup()
1166 sched_prio(td, td->td_base_ithread_pri); in sched_wakeup()
1168 sched_add(td, srqflags); in sched_wakeup()
1296 sched_pickcpu(struct thread *td) in sched_pickcpu() argument
1302 if (td->td_lastcpu != NOCPU && THREAD_CAN_SCHED(td, td->td_lastcpu)) in sched_pickcpu()
1303 best = td->td_lastcpu; in sched_pickcpu()
1307 if (!THREAD_CAN_SCHED(td, cpu)) in sched_pickcpu()
1322 sched_add(struct thread *td, int flags) in sched_add() argument
1331 ts = td_get_sched(td); in sched_add()
1332 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_add()
1333 KASSERT((td->td_inhibitors == 0), in sched_add()
1335 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), in sched_add()
1337 KASSERT(td->td_flags & TDF_INMEM, in sched_add()
1340 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", in sched_add()
1341 "prio:%d", td->td_priority, KTR_ATTR_LINKED, in sched_add()
1344 KTR_ATTR_LINKED, sched_tdname(td)); in sched_add()
1345 SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL, in sched_add()
1352 if (td->td_lock != &sched_lock) { in sched_add()
1355 td->td_lock = &sched_lock; in sched_add()
1357 thread_lock_set(td, &sched_lock); in sched_add()
1359 TD_SET_RUNQ(td); in sched_add()
1370 if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND || in sched_add()
1372 if (td->td_pinned != 0) in sched_add()
1373 cpu = td->td_lastcpu; in sched_add()
1374 else if (td->td_flags & TDF_BOUND) { in sched_add()
1381 cpu = sched_pickcpu(td); in sched_add()
1385 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, in sched_add()
1390 td); in sched_add()
1395 if ((td->td_flags & TDF_NOLOAD) == 0) in sched_add()
1397 runq_add(ts->ts_runq, td, flags); in sched_add()
1403 kick_other_cpu(td->td_priority, cpu); in sched_add()
1417 if (!maybe_preempt(td)) in sched_add()
1418 maybe_resched(td); in sched_add()
1422 thread_unlock(td); in sched_add()
1428 ts = td_get_sched(td);
1429 THREAD_LOCK_ASSERT(td, MA_OWNED);
1430 KASSERT((td->td_inhibitors == 0),
1432 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1434 KASSERT(td->td_flags & TDF_INMEM,
1436 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1437 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1440 KTR_ATTR_LINKED, sched_tdname(td));
1441 SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1448 if (td->td_lock != &sched_lock) {
1451 td->td_lock = &sched_lock;
1453 thread_lock_set(td, &sched_lock);
1455 TD_SET_RUNQ(td);
1456 CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1459 if ((td->td_flags & TDF_NOLOAD) == 0)
1461 runq_add(ts->ts_runq, td, flags);
1462 if (!maybe_preempt(td))
1463 maybe_resched(td);
1465 thread_unlock(td);
1470 sched_rem(struct thread *td) in sched_rem() argument
1474 ts = td_get_sched(td); in sched_rem()
1475 KASSERT(td->td_flags & TDF_INMEM, in sched_rem()
1477 KASSERT(TD_ON_RUNQ(td), in sched_rem()
1480 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem", in sched_rem()
1481 "prio:%d", td->td_priority, KTR_ATTR_LINKED, in sched_rem()
1483 SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL); in sched_rem()
1485 if ((td->td_flags & TDF_NOLOAD) == 0) in sched_rem()
1491 runq_remove(ts->ts_runq, td); in sched_rem()
1492 TD_SET_CAN_RUN(td); in sched_rem()
1502 struct thread *td; in sched_choose() local
1510 td = runq_choose_fuzz(&runq, runq_fuzz); in sched_choose()
1513 if (td == NULL || in sched_choose()
1515 tdcpu->td_priority < td->td_priority)) { in sched_choose()
1518 td = tdcpu; in sched_choose()
1521 CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td); in sched_choose()
1526 td = runq_choose(&runq); in sched_choose()
1529 if (td) { in sched_choose()
1531 if (td == tdcpu) in sched_choose()
1534 runq_remove(rq, td); in sched_choose()
1535 td->td_flags |= TDF_DIDRUN; in sched_choose()
1537 KASSERT(td->td_flags & TDF_INMEM, in sched_choose()
1539 return (td); in sched_choose()
1545 sched_preempt(struct thread *td) in sched_preempt() argument
1549 SDT_PROBE2(sched, , , surrender, td, td->td_proc); in sched_preempt()
1550 if (td->td_critnest > 1) { in sched_preempt()
1551 td->td_owepreempt = 1; in sched_preempt()
1553 thread_lock(td); in sched_preempt()
1555 flags |= TD_IS_IDLETHREAD(td) ? SWT_REMOTEWAKEIDLE : in sched_preempt()
1562 sched_userret_slowpath(struct thread *td) in sched_userret_slowpath() argument
1565 thread_lock(td); in sched_userret_slowpath()
1566 td->td_priority = td->td_user_pri; in sched_userret_slowpath()
1567 td->td_base_pri = td->td_user_pri; in sched_userret_slowpath()
1568 thread_unlock(td); in sched_userret_slowpath()
1572 sched_bind(struct thread *td, int cpu) in sched_bind() argument
1575 struct td_sched *ts = td_get_sched(td); in sched_bind()
1578 THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); in sched_bind()
1579 KASSERT(td == curthread, ("sched_bind: can only bind curthread")); in sched_bind()
1581 td->td_flags |= TDF_BOUND; in sched_bind()
1588 thread_lock(td); in sched_bind()
1593 sched_unbind(struct thread* td) in sched_unbind() argument
1595 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_unbind()
1596 KASSERT(td == curthread, ("sched_unbind: can only bind curthread")); in sched_unbind()
1597 td->td_flags &= ~TDF_BOUND; in sched_unbind()
1601 sched_is_bound(struct thread *td) in sched_is_bound() argument
1603 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_is_bound()
1604 return (td->td_flags & TDF_BOUND); in sched_is_bound()
1608 sched_relinquish(struct thread *td) in sched_relinquish() argument
1610 thread_lock(td); in sched_relinquish()
1633 sched_pctcpu(struct thread *td) in sched_pctcpu() argument
1637 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_pctcpu()
1638 ts = td_get_sched(td); in sched_pctcpu()
1648 sched_pctcpu_delta(struct thread *td) in sched_pctcpu_delta() argument
1654 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_pctcpu_delta()
1655 ts = td_get_sched(td); in sched_pctcpu_delta()
1677 sched_estcpu(struct thread *td) in sched_estcpu() argument
1680 return (td_get_sched(td)->ts_estcpu); in sched_estcpu()
1707 sched_throw_tail(struct thread *td) in sched_throw_tail() argument
1717 if (td) in sched_throw_tail()
1718 HWT_CALL_HOOK(td, HWT_SWITCH_OUT, NULL); in sched_throw_tail()
1722 cpu_throw(td, newtd); /* doesn't return */ in sched_throw_tail()
1753 sched_throw(struct thread *td) in sched_throw() argument
1756 MPASS(td != NULL); in sched_throw()
1757 MPASS(td->td_lock == &sched_lock); in sched_throw()
1760 td->td_lastcpu = td->td_oncpu; in sched_throw()
1761 td->td_oncpu = NOCPU; in sched_throw()
1763 sched_throw_tail(td); in sched_throw()
1767 sched_fork_exit(struct thread *td) in sched_fork_exit() argument
1774 td->td_oncpu = PCPU_GET(cpuid); in sched_fork_exit()
1775 sched_lock.mtx_lock = (uintptr_t)td; in sched_fork_exit()
1778 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); in sched_fork_exit()
1780 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running", in sched_fork_exit()
1781 "prio:%d", td->td_priority); in sched_fork_exit()
1786 sched_tdname(struct thread *td) in sched_tdname() argument
1791 ts = td_get_sched(td); in sched_tdname()
1794 "%s tid %d", td->td_name, td->td_tid); in sched_tdname()
1797 return (td->td_name); in sched_tdname()
1803 sched_clear_tdname(struct thread *td) in sched_clear_tdname() argument
1807 ts = td_get_sched(td); in sched_clear_tdname()
1813 sched_affinity(struct thread *td) in sched_affinity() argument
1819 THREAD_LOCK_ASSERT(td, MA_OWNED); in sched_affinity()
1825 ts = td_get_sched(td); in sched_affinity()
1828 if (!THREAD_CAN_SCHED(td, cpu)) { in sched_affinity()
1841 if (td->td_pinned != 0 || td->td_flags & TDF_BOUND) in sched_affinity()
1844 switch (TD_GET_STATE(td)) { in sched_affinity()
1851 THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu)) in sched_affinity()
1855 sched_rem(td); in sched_affinity()
1856 sched_add(td, SRQ_HOLDTD | SRQ_BORING); in sched_affinity()
1863 if (THREAD_CAN_SCHED(td, td->td_oncpu)) in sched_affinity()
1866 ast_sched_locked(td, TDA_SCHED); in sched_affinity()
1867 if (td != curthread) in sched_affinity()