Lines Matching defs:td

85 #define	TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
117 #define THREAD_CAN_SCHED(td, cpu) \
118 CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
133 static void sched_priority(struct thread *td, u_char prio);
135 static void maybe_resched(struct thread *td);
136 static void updatepri(struct thread *td);
137 static void resetpriority(struct thread *td);
138 static void resetpriority_thread(struct thread *td);
140 static int sched_pickcpu(struct thread *td);
303 maybe_resched(struct thread *td)
306 THREAD_LOCK_ASSERT(td, MA_OWNED);
307 if (td->td_priority < curthread->td_priority)
318 maybe_preempt(struct thread *td)
347 THREAD_LOCK_ASSERT(td, MA_OWNED);
348 KASSERT((td->td_inhibitors == 0),
350 pri = td->td_priority;
464 struct thread *td;
476 FOREACH_THREAD_IN_PROC(p, td) {
478 ts = td_get_sched(td);
479 thread_lock(td);
489 if (TD_ON_RUNQ(td)) {
491 td->td_flags &= ~TDF_DIDRUN;
492 } else if (TD_IS_RUNNING(td)) {
495 } else if (td->td_flags & TDF_DIDRUN) {
497 td->td_flags &= ~TDF_DIDRUN;
539 updatepri(td);
545 thread_unlock(td);
549 resetpriority(td);
550 resetpriority_thread(td);
551 thread_unlock(td);
577 updatepri(struct thread *td)
583 ts = td_get_sched(td);
602 resetpriority(struct thread *td)
606 if (td->td_pri_class != PRI_TIMESHARE)
609 td_get_sched(td)->ts_estcpu / INVERSE_ESTCPU_WEIGHT +
610 NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
613 sched_user_prio(td, newpriority);
621 resetpriority_thread(struct thread *td)
625 if (td->td_priority < PRI_MIN_TIMESHARE ||
626 td->td_priority > PRI_MAX_TIMESHARE)
630 maybe_resched(td);
632 sched_prio(td, td->td_user_pri);
723 sched_clock_tick(struct thread *td)
728 THREAD_LOCK_ASSERT(td, MA_OWNED);
729 ts = td_get_sched(td);
734 resetpriority(td);
735 resetpriority_thread(td);
742 if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
749 if (PRI_BASE(td->td_pri_class) == PRI_ITHD) {
751 td->td_owepreempt = 1;
752 if (td->td_base_pri + RQ_PPQ < PRI_MAX_ITHD) {
754 sched_prio(td, td->td_base_pri + RQ_PPQ);
757 td->td_flags |= TDF_SLICEEND;
758 ast_sched_locked(td, TDA_SCHED);
768 sched_clock(struct thread *td, int cnt)
772 sched_clock_tick(td);
779 sched_exit(struct proc *p, struct thread *td)
782 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit",
783 "prio:%d", td->td_priority);
786 sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
790 sched_exit_thread(struct thread *td, struct thread *child)
795 thread_lock(td);
796 td_get_sched(td)->ts_estcpu = ESTCPULIM(td_get_sched(td)->ts_estcpu +
798 thread_unlock(td);
806 sched_fork(struct thread *td, struct thread *childtd)
808 sched_fork_thread(td, childtd);
812 sched_fork_thread(struct thread *td, struct thread *childtd)
819 childtd->td_cpuset = cpuset_ref(td->td_cpuset);
820 childtd->td_domain.dr_policy = td->td_cpuset->cs_domain;
824 tsc = td_get_sched(td);
833 struct thread *td;
837 FOREACH_THREAD_IN_PROC(p, td) {
838 thread_lock(td);
839 resetpriority(td);
840 resetpriority_thread(td);
841 thread_unlock(td);
846 sched_class(struct thread *td, int class)
848 THREAD_LOCK_ASSERT(td, MA_OWNED);
849 td->td_pri_class = class;
856 sched_priority(struct thread *td, u_char prio)
859 KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
860 "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
862 SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio);
863 if (td != curthread && prio > td->td_priority) {
865 "lend prio", "prio:%d", td->td_priority, "new prio:%d",
866 prio, KTR_ATTR_LINKED, sched_tdname(td));
867 SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
870 THREAD_LOCK_ASSERT(td, MA_OWNED);
871 if (td->td_priority == prio)
873 td->td_priority = prio;
874 if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
875 sched_rem(td);
876 sched_add(td, SRQ_BORING | SRQ_HOLDTD);
885 sched_lend_prio(struct thread *td, u_char prio)
888 td->td_flags |= TDF_BORROWING;
889 sched_priority(td, prio);
901 sched_unlend_prio(struct thread *td, u_char prio)
905 if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
906 td->td_base_pri <= PRI_MAX_TIMESHARE)
907 base_pri = td->td_user_pri;
909 base_pri = td->td_base_pri;
911 td->td_flags &= ~TDF_BORROWING;
912 sched_prio(td, base_pri);
914 sched_lend_prio(td, prio);
918 sched_prio(struct thread *td, u_char prio)
923 td->td_base_pri = prio;
929 if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
933 oldprio = td->td_priority;
934 sched_priority(td, prio);
940 if (TD_ON_LOCK(td) && oldprio != prio)
941 turnstile_adjust(td, oldprio);
945 sched_ithread_prio(struct thread *td, u_char prio)
947 THREAD_LOCK_ASSERT(td, MA_OWNED);
948 MPASS(td->td_pri_class == PRI_ITHD);
949 td->td_base_ithread_pri = prio;
950 sched_prio(td, prio);
954 sched_user_prio(struct thread *td, u_char prio)
957 THREAD_LOCK_ASSERT(td, MA_OWNED);
958 td->td_base_user_pri = prio;
959 if (td->td_lend_user_pri <= prio)
961 td->td_user_pri = prio;
965 sched_lend_user_prio(struct thread *td, u_char prio)
968 THREAD_LOCK_ASSERT(td, MA_OWNED);
969 td->td_lend_user_pri = prio;
970 td->td_user_pri = min(prio, td->td_base_user_pri);
971 if (td->td_priority > td->td_user_pri)
972 sched_prio(td, td->td_user_pri);
973 else if (td->td_priority != td->td_user_pri)
974 ast_sched_locked(td, TDA_SCHED);
981 sched_lend_user_prio_cond(struct thread *td, u_char prio)
984 if (td->td_lend_user_pri == prio)
987 thread_lock(td);
988 sched_lend_user_prio(td, prio);
989 thread_unlock(td);
993 sched_sleep(struct thread *td, int pri)
996 THREAD_LOCK_ASSERT(td, MA_OWNED);
997 td->td_slptick = ticks;
998 td_get_sched(td)->ts_slptime = 0;
999 if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
1000 sched_prio(td, pri);
1004 sched_switch(struct thread *td, int flags)
1012 THREAD_LOCK_ASSERT(td, MA_OWNED);
1014 td->td_lastcpu = td->td_oncpu;
1015 preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
1017 td->td_flags &= ~TDF_SLICEEND;
1018 ast_unsched_locked(td, TDA_SCHED);
1019 td->td_owepreempt = 0;
1020 td->td_oncpu = NOCPU;
1028 if (td->td_flags & TDF_IDLETD) {
1029 TD_SET_CAN_RUN(td);
1034 if (TD_IS_RUNNING(td)) {
1036 sched_add(td, SRQ_HOLDTD | SRQ_OURSELF | SRQ_YIELDING |
1046 if (td->td_lock != &sched_lock) {
1048 tmtx = thread_lock_block(td);
1052 if ((td->td_flags & TDF_NOLOAD) == 0)
1059 if (TD_IS_IDLETHREAD(td))
1060 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
1061 "prio:%d", td->td_priority);
1063 KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
1064 "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
1065 "lockname:\"%s\"", td->td_lockname);
1068 if (td != newtd) {
1070 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1071 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1088 cpu_switch(td, newtd, tmtx);
1110 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1111 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1114 td->td_lock = &sched_lock;
1118 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
1119 "prio:%d", td->td_priority);
1122 if (td->td_flags & TDF_IDLETD)
1125 sched_lock.mtx_lock = (uintptr_t)td;
1126 td->td_oncpu = PCPU_GET(cpuid);
1132 sched_wakeup(struct thread *td, int srqflags)
1136 THREAD_LOCK_ASSERT(td, MA_OWNED);
1137 ts = td_get_sched(td);
1139 updatepri(td);
1140 resetpriority(td);
1142 td->td_slptick = 0;
1150 if (PRI_BASE(td->td_pri_class) == PRI_ITHD &&
1151 td->td_base_pri != td->td_base_ithread_pri)
1152 sched_prio(td, td->td_base_ithread_pri);
1154 sched_add(td, srqflags);
1282 sched_pickcpu(struct thread *td)
1288 if (td->td_lastcpu != NOCPU && THREAD_CAN_SCHED(td, td->td_lastcpu))
1289 best = td->td_lastcpu;
1293 if (!THREAD_CAN_SCHED(td, cpu))
1308 sched_add(struct thread *td, int flags)
1317 ts = td_get_sched(td);
1318 THREAD_LOCK_ASSERT(td, MA_OWNED);
1319 KASSERT((td->td_inhibitors == 0),
1321 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1323 KASSERT(td->td_flags & TDF_INMEM,
1326 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1327 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1330 KTR_ATTR_LINKED, sched_tdname(td));
1331 SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1338 if (td->td_lock != &sched_lock) {
1341 td->td_lock = &sched_lock;
1343 thread_lock_set(td, &sched_lock);
1345 TD_SET_RUNQ(td);
1356 if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND ||
1358 if (td->td_pinned != 0)
1359 cpu = td->td_lastcpu;
1360 else if (td->td_flags & TDF_BOUND) {
1367 cpu = sched_pickcpu(td);
1371 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
1375 "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
1376 td);
1381 if ((td->td_flags & TDF_NOLOAD) == 0)
1383 runq_add(ts->ts_runq, td, flags);
1389 kick_other_cpu(td->td_priority, cpu);
1403 if (!maybe_preempt(td))
1404 maybe_resched(td);
1408 thread_unlock(td);
1414 ts = td_get_sched(td);
1415 THREAD_LOCK_ASSERT(td, MA_OWNED);
1416 KASSERT((td->td_inhibitors == 0),
1418 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1420 KASSERT(td->td_flags & TDF_INMEM,
1422 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1423 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1426 KTR_ATTR_LINKED, sched_tdname(td));
1427 SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1434 if (td->td_lock != &sched_lock) {
1437 td->td_lock = &sched_lock;
1439 thread_lock_set(td, &sched_lock);
1441 TD_SET_RUNQ(td);
1442 CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1445 if ((td->td_flags & TDF_NOLOAD) == 0)
1447 runq_add(ts->ts_runq, td, flags);
1448 if (!maybe_preempt(td))
1449 maybe_resched(td);
1451 thread_unlock(td);
1456 sched_rem(struct thread *td)
1460 ts = td_get_sched(td);
1461 KASSERT(td->td_flags & TDF_INMEM,
1463 KASSERT(TD_ON_RUNQ(td),
1466 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
1467 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1469 SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
1471 if ((td->td_flags & TDF_NOLOAD) == 0)
1477 runq_remove(ts->ts_runq, td);
1478 TD_SET_CAN_RUN(td);
1488 struct thread *td;
1496 td = runq_choose_fuzz(&runq, runq_fuzz);
1499 if (td == NULL ||
1501 tdcpu->td_priority < td->td_priority)) {
1502 CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
1504 td = tdcpu;
1507 CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
1512 td = runq_choose(&runq);
1515 if (td) {
1517 if (td == tdcpu)
1520 runq_remove(rq, td);
1521 td->td_flags |= TDF_DIDRUN;
1523 KASSERT(td->td_flags & TDF_INMEM,
1525 return (td);
1531 sched_preempt(struct thread *td)
1535 SDT_PROBE2(sched, , , surrender, td, td->td_proc);
1536 if (td->td_critnest > 1) {
1537 td->td_owepreempt = 1;
1539 thread_lock(td);
1541 flags |= TD_IS_IDLETHREAD(td) ? SWT_REMOTEWAKEIDLE :
1548 sched_userret_slowpath(struct thread *td)
1551 thread_lock(td);
1552 td->td_priority = td->td_user_pri;
1553 td->td_base_pri = td->td_user_pri;
1554 thread_unlock(td);
1558 sched_bind(struct thread *td, int cpu)
1561 struct td_sched *ts = td_get_sched(td);
1564 THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
1565 KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
1567 td->td_flags |= TDF_BOUND;
1574 thread_lock(td);
1579 sched_unbind(struct thread* td)
1581 THREAD_LOCK_ASSERT(td, MA_OWNED);
1582 KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
1583 td->td_flags &= ~TDF_BOUND;
1587 sched_is_bound(struct thread *td)
1589 THREAD_LOCK_ASSERT(td, MA_OWNED);
1590 return (td->td_flags & TDF_BOUND);
1594 sched_relinquish(struct thread *td)
1596 thread_lock(td);
1619 sched_pctcpu(struct thread *td)
1623 THREAD_LOCK_ASSERT(td, MA_OWNED);
1624 ts = td_get_sched(td);
1634 sched_pctcpu_delta(struct thread *td)
1640 THREAD_LOCK_ASSERT(td, MA_OWNED);
1641 ts = td_get_sched(td);
1663 sched_estcpu(struct thread *td)
1666 return (td_get_sched(td)->ts_estcpu);
1693 sched_throw_tail(struct thread *td)
1698 cpu_throw(td, choosethread()); /* doesn't return */
1729 sched_throw(struct thread *td)
1732 MPASS(td != NULL);
1733 MPASS(td->td_lock == &sched_lock);
1736 td->td_lastcpu = td->td_oncpu;
1737 td->td_oncpu = NOCPU;
1739 sched_throw_tail(td);
1743 sched_fork_exit(struct thread *td)
1750 td->td_oncpu = PCPU_GET(cpuid);
1751 sched_lock.mtx_lock = (uintptr_t)td;
1754 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
1756 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
1757 "prio:%d", td->td_priority);
1762 sched_tdname(struct thread *td)
1767 ts = td_get_sched(td);
1770 "%s tid %d", td->td_name, td->td_tid);
1773 return (td->td_name);
1779 sched_clear_tdname(struct thread *td)
1783 ts = td_get_sched(td);
1789 sched_affinity(struct thread *td)
1795 THREAD_LOCK_ASSERT(td, MA_OWNED);
1801 ts = td_get_sched(td);
1804 if (!THREAD_CAN_SCHED(td, cpu)) {
1817 if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
1820 switch (TD_GET_STATE(td)) {
1827 THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
1831 sched_rem(td);
1832 sched_add(td, SRQ_HOLDTD | SRQ_BORING);
1839 if (THREAD_CAN_SCHED(td, td->td_oncpu))
1842 ast_sched_locked(td, TDA_SCHED);
1843 if (td != curthread)