Lines Matching defs:tdq
227 * tdq - per processor runqs and statistics. A mutex synchronizes access to
232 * (f) flag, set with the tdq lock held, cleared on local CPU
235 * (t) all accesses are protected by the tdq mutex
236 * (ts) stores are serialized by the tdq mutex, loads may be lockless
238 struct tdq {
272 #define TDQ_LOAD(tdq) atomic_load_int(&(tdq)->tdq_load)
273 #define TDQ_TRANSFERABLE(tdq) atomic_load_int(&(tdq)->tdq_transferable)
274 #define TDQ_SWITCHCNT(tdq) (atomic_load_short(&(tdq)->tdq_switchcnt) + \
275 atomic_load_short(&(tdq)->tdq_oldswitchcnt))
276 #define TDQ_SWITCHCNT_INC(tdq) (atomic_store_short(&(tdq)->tdq_switchcnt, \
277 atomic_load_short(&(tdq)->tdq_switchcnt) + 1))
299 static struct tdq __read_mostly *balance_tdq;
301 DPCPU_DEFINE_STATIC(struct tdq, tdq);
304 #define TDQ_SELF() ((struct tdq *)PCPU_GET(sched))
305 #define TDQ_CPU(x) (DPCPU_ID_PTR((x), tdq))
308 static struct tdq tdq_cpu;
332 static struct thread *tdq_choose(struct tdq *);
333 static void tdq_setup(struct tdq *, int i);
334 static void tdq_load_add(struct tdq *, struct thread *);
335 static void tdq_load_rem(struct tdq *, struct thread *);
336 static __inline void tdq_runq_add(struct tdq *, struct thread *, int);
337 static __inline void tdq_runq_rem(struct tdq *, struct thread *);
341 static int tdq_add(struct tdq *, struct thread *, int);
343 static int tdq_move(struct tdq *, struct tdq *);
344 static int tdq_idled(struct tdq *);
345 static void tdq_notify(struct tdq *, int lowpri);
346 static struct thread *tdq_steal(struct tdq *, int);
350 static bool sched_balance_pair(struct tdq *, struct tdq *);
351 static inline struct tdq *sched_setcpu(struct thread *, int, int);
417 struct tdq *tdq;
419 tdq = TDQ_CPU(cpu);
421 printf("tdq %d:\n", TDQ_ID(tdq));
422 printf("\tlock %p\n", TDQ_LOCKPTR(tdq));
423 printf("\tLock name: %s\n", tdq->tdq_name);
424 printf("\tload: %d\n", tdq->tdq_load);
425 printf("\tswitch cnt: %d\n", tdq->tdq_switchcnt);
426 printf("\told switch cnt: %d\n", tdq->tdq_oldswitchcnt);
427 printf("\ttimeshare idx: %d\n", tdq->tdq_idx);
428 printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
429 printf("\tload transferable: %d\n", tdq->tdq_transferable);
430 printf("\tlowest priority: %d\n", tdq->tdq_lowpri);
432 runq_print(&tdq->tdq_realtime);
434 runq_print(&tdq->tdq_timeshare);
436 runq_print(&tdq->tdq_idle);
478 tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
483 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
490 tdq->tdq_transferable++;
494 ts->ts_runq = &tdq->tdq_realtime;
496 ts->ts_runq = &tdq->tdq_timeshare;
505 pri = (pri + tdq->tdq_idx) % RQ_NQS;
511 if (tdq->tdq_ridx != tdq->tdq_idx &&
512 pri == tdq->tdq_ridx)
515 pri = tdq->tdq_ridx;
519 ts->ts_runq = &tdq->tdq_idle;
529 tdq_runq_rem(struct tdq *tdq, struct thread *td)
534 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
539 tdq->tdq_transferable--;
542 if (ts->ts_runq == &tdq->tdq_timeshare) {
543 if (tdq->tdq_idx != tdq->tdq_ridx)
544 runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx);
556 tdq_load_add(struct tdq *tdq, struct thread *td)
559 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
562 tdq->tdq_load++;
564 tdq->tdq_sysload++;
565 KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
566 SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load);
574 tdq_load_rem(struct tdq *tdq, struct thread *td)
577 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
579 KASSERT(tdq->tdq_load != 0,
580 ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
582 tdq->tdq_load--;
584 tdq->tdq_sysload--;
585 KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
586 SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load);
596 tdq_slice(struct tdq *tdq)
605 load = tdq->tdq_sysload - 1;
618 tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
622 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
624 ctd = tdq->tdq_curthread;
625 td = tdq_choose(tdq);
627 tdq->tdq_lowpri = ctd->td_priority;
629 tdq->tdq_lowpri = td->td_priority;
677 struct tdq *tdq;
714 tdq = TDQ_CPU(c);
715 l = TDQ_LOAD(tdq);
731 (atomic_load_char(&tdq->tdq_lowpri) <= s->cs_pri &&
759 struct tdq *tdq;
785 tdq = TDQ_CPU(c);
786 l = TDQ_LOAD(tdq);
793 if (l < s->cs_load || TDQ_TRANSFERABLE(tdq) < s->cs_trans ||
848 struct tdq *tdq;
864 tdq = TDQ_CPU(high);
865 if (TDQ_LOAD(tdq) == 1) {
870 TDQ_LOCK(tdq);
871 td = tdq->tdq_curthread;
872 if (td->td_lock == TDQ_LOCKPTR(tdq) &&
880 TDQ_UNLOCK(tdq);
885 if (TDQ_TRANSFERABLE(tdq) == 0)
887 low = sched_lowest(cg, &lmask, -1, TDQ_LOAD(tdq) - 1, high, 1);
895 if (sched_balance_pair(tdq, TDQ_CPU(low))) {
914 struct tdq *tdq;
918 tdq = TDQ_SELF();
919 TDQ_UNLOCK(tdq);
921 TDQ_LOCK(tdq);
928 tdq_lock_pair(struct tdq *one, struct tdq *two)
943 tdq_unlock_pair(struct tdq *one, struct tdq *two)
954 sched_balance_pair(struct tdq *high, struct tdq *low)
993 tdq_move(struct tdq *from, struct tdq *to)
1019 * This tdq has idled. Try to steal a thread from another cpu and switch
1023 tdq_idled(struct tdq *tdq)
1026 struct tdq *steal;
1030 if (smp_started == 0 || steal_idle == 0 || tdq->tdq_cg == NULL)
1035 switchcnt = TDQ_SWITCHCNT(tdq);
1036 for (cg = tdq->tdq_cg, goup = 0; ; ) {
1042 if (TDQ_LOAD(tdq))
1087 TDQ_LOCK(tdq);
1088 if (tdq->tdq_load > 0) {
1093 TDQ_UNLOCK(tdq);
1106 switchcnt != TDQ_SWITCHCNT(tdq)) {
1107 tdq_unlock_pair(tdq, steal);
1113 if (tdq_move(steal, tdq) != -1)
1124 tdq_unlock_pair(tdq, steal);
1138 tdq_notify(struct tdq *tdq, int lowpri)
1142 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1143 KASSERT(tdq->tdq_lowpri <= lowpri,
1144 ("tdq_notify: lowpri %d > tdq_lowpri %d", lowpri, tdq->tdq_lowpri));
1146 if (tdq->tdq_owepreempt)
1153 if (!sched_shouldpreempt(tdq->tdq_lowpri, lowpri, 1))
1168 cpu = TDQ_ID(tdq);
1169 if (TD_IS_IDLETHREAD(tdq->tdq_curthread) &&
1170 (atomic_load_int(&tdq->tdq_cpu_idle) == 0 || cpu_idle_wakeup(cpu)))
1177 tdq->tdq_owepreempt = 1;
1261 tdq_steal(struct tdq *tdq, int cpu)
1265 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1266 if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
1268 if ((td = runq_steal_from(&tdq->tdq_timeshare,
1269 cpu, tdq->tdq_ridx)) != NULL)
1271 return (runq_steal(&tdq->tdq_idle, cpu));
1278 static inline struct tdq *
1282 struct tdq *tdq;
1286 tdq = TDQ_CPU(cpu);
1291 if (td->td_lock == TDQ_LOCKPTR(tdq)) {
1294 return (tdq);
1305 TDQ_LOCK(tdq);
1306 thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
1308 return (tdq);
1323 struct tdq *tdq;
1344 tdq = TDQ_SELF();
1345 if (tdq->tdq_lowpri >= PRI_MIN_IDLE) {
1351 cg = tdq->tdq_cg;
1355 tdq = TDQ_CPU(ts->ts_cpu);
1356 cg = tdq->tdq_cg;
1363 atomic_load_char(&tdq->tdq_lowpri) >= PRI_MIN_IDLE &&
1440 tdq = TDQ_CPU(cpu);
1442 atomic_load_char(&tdq->tdq_lowpri) < PRI_MIN_IDLE &&
1443 TDQ_LOAD(TDQ_SELF()) <= TDQ_LOAD(tdq) + 1) {
1457 tdq_choose(struct tdq *tdq)
1461 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1462 td = runq_choose(&tdq->tdq_realtime);
1465 td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1472 td = runq_choose(&tdq->tdq_idle);
1487 tdq_setup(struct tdq *tdq, int id)
1492 runq_init(&tdq->tdq_realtime);
1493 runq_init(&tdq->tdq_timeshare);
1494 runq_init(&tdq->tdq_idle);
1495 tdq->tdq_id = id;
1496 snprintf(tdq->tdq_name, sizeof(tdq->tdq_name),
1497 "sched lock %d", (int)TDQ_ID(tdq));
1498 mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock", MTX_SPIN);
1500 snprintf(tdq->tdq_loadname, sizeof(tdq->tdq_loadname),
1501 "CPU %d load", (int)TDQ_ID(tdq));
1509 struct tdq *tdq;
1514 tdq = DPCPU_ID_PTR(i, tdq);
1515 tdq_setup(tdq, i);
1516 tdq->tdq_cg = smp_topo_find(cpu_top, i);
1517 if (tdq->tdq_cg == NULL)
1521 PCPU_SET(sched, DPCPU_PTR(tdq));
1533 struct tdq *tdq;
1540 tdq = TDQ_SELF();
1543 TDQ_LOCK(tdq);
1544 thread0.td_lock = TDQ_LOCKPTR(tdq);
1545 tdq_load_add(tdq, &thread0);
1546 tdq->tdq_curthread = &thread0;
1547 tdq->tdq_lowpri = thread0.td_priority;
1548 TDQ_UNLOCK(tdq);
1798 PCPU_SET(sched, DPCPU_PTR(tdq));
1852 struct tdq *tdq;
1886 tdq = TDQ_CPU(td_get_sched(td)->ts_cpu);
1889 if (prio < tdq->tdq_lowpri)
1890 tdq->tdq_lowpri = prio;
1891 else if (tdq->tdq_lowpri == oldpri)
1892 tdq_setlowpri(tdq, td);
2020 * This tdq is about to idle. Try to steal a thread from another CPU before
2024 tdq_trysteal(struct tdq *tdq)
2027 struct tdq *steal;
2032 tdq->tdq_cg == NULL)
2038 TDQ_UNLOCK(tdq);
2039 for (i = 1, cg = tdq->tdq_cg, goup = 0; ; ) {
2045 if (TDQ_LOAD(tdq) > 0) {
2046 TDQ_LOCK(tdq);
2062 TDQ_LOCK(tdq);
2067 TDQ_LOCK(tdq);
2096 TDQ_LOCK(tdq);
2097 if (tdq->tdq_load > 0)
2115 if (tdq_move(steal, tdq) == -1) {
2131 sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
2133 struct tdq *tdn;
2146 tdq_load_rem(tdq, td);
2152 TDQ_UNLOCK(tdq);
2157 TDQ_LOCK(tdq);
2182 struct tdq *tdq;
2194 tdq = TDQ_SELF();
2210 atomic_store_char(&tdq->tdq_owepreempt, 0);
2212 TDQ_SWITCHCNT_INC(tdq);
2215 * Always block the thread lock so we can drop the tdq lock early.
2220 MPASS(mtx == TDQ_LOCKPTR(tdq));
2223 MPASS(mtx == TDQ_LOCKPTR(tdq));
2232 tdq_runq_add(tdq, td, srqflag);
2234 mtx = sched_switch_migrate(tdq, td, srqflag);
2237 if (mtx != TDQ_LOCKPTR(tdq)) {
2239 TDQ_LOCK(tdq);
2241 tdq_load_rem(tdq, td);
2243 if (tdq->tdq_load == 0)
2244 tdq_trysteal(tdq);
2263 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2264 MPASS(td == tdq->tdq_curthread);
2267 TDQ_UNLOCK(tdq);
2415 struct tdq *tdq;
2417 tdq = TDQ_SELF();
2426 child->td_lock = TDQ_LOCKPTR(tdq);
2447 ts2->ts_slice = tdq_slice(tdq) - sched_slice_min;
2508 struct tdq *tdq;
2514 tdq = TDQ_SELF();
2515 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2516 if (td->td_priority > tdq->tdq_lowpri) {
2527 tdq->tdq_owepreempt = 0;
2553 * sched_slice. For other threads it is tdq_slice(tdq).
2556 td_slice(struct thread *td, struct tdq *tdq)
2560 return (tdq_slice(tdq));
2570 struct tdq *tdq;
2574 tdq = TDQ_SELF();
2579 if (balance_tdq == tdq && smp_started != 0 && rebalance != 0 &&
2591 tdq->tdq_oldswitchcnt = tdq->tdq_switchcnt;
2592 tdq->tdq_switchcnt = tdq->tdq_load;
2598 if (tdq->tdq_idx == tdq->tdq_ridx) {
2599 tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
2600 if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
2601 tdq->tdq_ridx = tdq->tdq_idx;
2623 if (ts->ts_slice >= td_slice(td, tdq)) {
2658 struct tdq *tdq;
2663 tdq = TDQ_SELF();
2665 if (TDQ_LOAD(tdq) > 0)
2668 if (TDQ_LOAD(tdq) - 1 > 0)
2683 struct tdq *tdq;
2685 tdq = TDQ_SELF();
2686 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2687 td = tdq_choose(tdq);
2689 tdq_runq_rem(tdq, td);
2690 tdq->tdq_lowpri = td->td_priority;
2692 tdq->tdq_lowpri = PRI_MAX_IDLE;
2695 tdq->tdq_curthread = td;
2725 * thread to it. This is the internal function called when the tdq is
2729 tdq_add(struct tdq *tdq, struct thread *td, int flags)
2733 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2742 lowpri = tdq->tdq_lowpri;
2744 tdq->tdq_lowpri = td->td_priority;
2745 tdq_runq_add(tdq, td, flags);
2746 tdq_load_add(tdq, td);
2759 struct tdq *tdq;
2784 tdq = sched_setcpu(td, cpu, flags);
2785 lowpri = tdq_add(tdq, td, flags);
2787 tdq_notify(tdq, lowpri);
2791 tdq = TDQ_SELF();
2796 if (td->td_lock != TDQ_LOCKPTR(tdq)) {
2797 TDQ_LOCK(tdq);
2799 td->td_lock = TDQ_LOCKPTR(tdq);
2801 thread_lock_set(td, TDQ_LOCKPTR(tdq));
2803 (void)tdq_add(tdq, td, flags);
2819 struct tdq *tdq;
2824 tdq = TDQ_CPU(td_get_sched(td)->ts_cpu);
2825 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2826 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2829 tdq_runq_rem(tdq, td);
2830 tdq_load_rem(tdq, td);
2832 if (td->td_priority == tdq->tdq_lowpri)
2833 tdq_setlowpri(tdq, NULL);
2983 #define TDQ_IDLESPIN(tdq) \
2984 ((tdq)->tdq_cg != NULL && ((tdq)->tdq_cg->cg_flags & CG_FLAG_THREAD) == 0)
2986 #define TDQ_IDLESPIN(tdq) 1
2996 struct tdq *tdq;
3002 tdq = TDQ_SELF();
3006 if (TDQ_LOAD(tdq)) {
3010 switchcnt = TDQ_SWITCHCNT(tdq);
3014 if (tdq_idled(tdq) == 0)
3017 switchcnt = TDQ_SWITCHCNT(tdq);
3028 if (TDQ_IDLESPIN(tdq) && switchcnt > sched_idlespinthresh) {
3030 if (TDQ_LOAD(tdq))
3037 switchcnt = TDQ_SWITCHCNT(tdq);
3038 if (TDQ_LOAD(tdq) != 0 || switchcnt != oldswitchcnt)
3042 atomic_store_int(&tdq->tdq_cpu_idle, 1);
3054 if (TDQ_LOAD(tdq) != 0) {
3055 atomic_store_int(&tdq->tdq_cpu_idle, 0);
3059 atomic_store_int(&tdq->tdq_cpu_idle, 0);
3065 switchcnt = TDQ_SWITCHCNT(tdq);
3068 TDQ_SWITCHCNT_INC(tdq);
3075 * next. It returns with the tdq lock dropped in a spinlock section to
3080 sched_throw_grab(struct tdq *tdq)
3086 TDQ_UNLOCK(tdq);
3099 struct tdq *tdq;
3101 tdq = TDQ_SELF();
3104 THREAD_LOCKPTR_ASSERT(curthread, TDQ_LOCKPTR(tdq));
3106 TDQ_LOCK(tdq);
3112 newtd = sched_throw_grab(tdq);
3125 struct tdq *tdq;
3127 tdq = TDQ_SELF();
3131 THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(tdq));
3133 tdq_load_rem(tdq, td);
3138 newtd = sched_throw_grab(tdq);
3141 cpu_switch(td, newtd, TDQ_LOCKPTR(tdq));
3151 struct tdq *tdq;
3161 tdq = TDQ_SELF();
3162 TDQ_LOCK(tdq);
3164 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));