Lines Matching defs:scx
295 rcu_assign_pointer(p->scx.sched, sch);
432 if (rq->scx.flags & SCX_RQ_IN_BALANCE)
453 * This allows kfuncs to safely operate on rq from any scx ops callback,
504 * and records them in current->scx.kf_tasks[] for the duration of the call. A
520 WARN_ON_ONCE(current->scx.kf_tasks[0]); \
521 current->scx.kf_tasks[0] = task; \
523 current->scx.kf_tasks[0] = NULL; \
529 WARN_ON_ONCE(current->scx.kf_tasks[0]); \
530 current->scx.kf_tasks[0] = task; \
532 current->scx.kf_tasks[0] = NULL; \
539 WARN_ON_ONCE(current->scx.kf_tasks[0]); \
540 current->scx.kf_tasks[0] = task0; \
541 current->scx.kf_tasks[1] = task1; \
543 current->scx.kf_tasks[0] = NULL; \
544 current->scx.kf_tasks[1] = NULL; \
552 if (unlikely((p != current->scx.kf_tasks[0] &&
553 p != current->scx.kf_tasks[1]))) {
591 list_node = &cur->scx.dsq_list.node;
609 return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
641 p = container_of(cursor, struct task_struct, scx.dsq_list);
646 } while (p && unlikely(u32_before(cursor->priv, p->scx.dsq_seq)));
650 list_move_tail(&cursor->node, &p->scx.dsq_list.node);
652 list_move(&cursor->node, &p->scx.dsq_list.node);
684 if (unlikely(p->scx.dsq != dsq ||
685 u32_before(cursor->priv, p->scx.dsq_seq) ||
686 p->scx.holding_cpu >= 0))
716 return p->scx.flags & SCX_TASK_STATE_MASK;
733 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
755 p->scx.flags &= ~SCX_TASK_STATE_MASK;
756 p->scx.flags |= state;
927 return container_of(pos, struct task_struct, scx);
1080 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1137 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1166 irq_work_queue_on(&rq->scx.deferred_irq_work, cpu_of(rq));
1185 if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1189 if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING)
1203 if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1204 rq->scx.flags |= SCX_RQ_BAL_CB_PENDING;
1229 rq = container_of(dsq, struct rq, scx.local_dsq);
1243 guard(raw_spinlock_irqsave)(&rq->scx.deferred_reenq_lock);
1246 list_move_tail(&drl->node, &rq->scx.deferred_reenq_locals);
1264 guard(raw_spinlock_irqsave)(&rq->scx.deferred_reenq_lock);
1267 list_move_tail(&dru->node, &rq->scx.deferred_reenq_users);
1288 schedule_dsq_reenq(root, &rq->scx.local_dsq, reenq_flags, rq);
1296 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1314 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1324 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1326 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1347 if (curr->scx.slice != SCX_SLICE_INF) {
1348 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1349 if (!curr->scx.slice)
1360 container_of(node_a, struct task_struct, scx.dsq_priq);
1362 container_of(node_b, struct task_struct, scx.dsq_priq);
1364 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1385 p->scx.flags |= SCX_TASK_IMMED;
1388 if (p->scx.flags & SCX_TASK_IMMED) {
1389 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1394 rq->scx.nr_immed++;
1410 if (p->scx.flags & SCX_TASK_IMMED) {
1411 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1414 WARN_ON_ONCE(rq->scx.nr_immed <= 0))
1417 rq->scx.nr_immed--;
1423 p->scx.slice = READ_ONCE(sch->slice_dfl);
1439 return p->scx.sticky_cpu >= 0;
1449 if (!(p->scx.flags & SCX_TASK_IN_CUSTODY) || task_scx_migrating(p))
1455 p->scx.flags &= ~SCX_TASK_IN_CUSTODY;
1461 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1508 if (rq->scx.flags & SCX_RQ_IN_BALANCE)
1513 rq->curr->scx.slice = 0;
1524 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1525 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1526 !RB_EMPTY_NODE(&p->scx.dsq_priq));
1567 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1568 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1574 rbp = rb_prev(&p->scx.dsq_priq);
1578 scx.dsq_priq);
1579 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1582 list_add(&p->scx.dsq_list.node, &dsq->list);
1593 list_add(&p->scx.dsq_list.node, &dsq->list);
1603 list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1611 p->scx.dsq_seq = dsq->seq;
1614 p->scx.dsq = dsq;
1619 * and dequeue_task_scx() will RMW p->scx.flags. If we clear
1620 * ops_state first, both sides would modify p->scx.flags
1633 p->scx.flags |= SCX_TASK_IN_CUSTODY;
1643 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1649 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1651 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1652 rb_erase(&p->scx.dsq_priq, &dsq->priq);
1653 RB_CLEAR_NODE(&p->scx.dsq_priq);
1654 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1657 list_del_init(&p->scx.dsq_list.node);
1670 struct scx_dispatch_q *dsq = p->scx.dsq;
1671 bool is_local = dsq == &rq->scx.local_dsq;
1680 if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1681 list_del_init(&p->scx.dsq_list.node);
1686 * @p->scx.holding_cpu may be set under the protection of
1689 if (p->scx.holding_cpu >= 0)
1690 p->scx.holding_cpu = -1;
1699 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1702 if (p->scx.holding_cpu < 0) {
1708 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1712 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1713 p->scx.holding_cpu = -1;
1715 p->scx.dsq = NULL;
1732 p->scx.dsq = NULL;
1742 return &rq->scx.local_dsq;
1750 return &cpu_rq(cpu)->scx.local_dsq;
1790 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1791 WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1793 p->scx.ddsp_dsq_id = dsq_id;
1794 p->scx.ddsp_enq_flags = enq_flags;
1814 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1815 p->scx.ddsp_enq_flags = 0;
1823 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, task_cpu(p));
1828 p->scx.ddsp_enq_flags |= enq_flags;
1836 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1839 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1849 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1854 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1858 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1859 list_add_tail(&p->scx.dsq_list.node,
1860 &rq->scx.ddsp_deferred_locals);
1865 ddsp_enq_flags = p->scx.ddsp_enq_flags;
1880 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1891 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1903 p->scx.flags &= ~SCX_TASK_IMMED;
1918 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1939 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
1941 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1942 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
1951 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1958 p->scx.flags |= SCX_TASK_IN_CUSTODY;
1964 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
1971 dispatch_enqueue(sch, rq, &rq->scx.local_dsq, p, enq_flags);
1974 dsq = &rq->scx.local_dsq;
1997 return !list_empty(&p->scx.runnable_node);
2004 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
2005 p->scx.runnable_at = jiffies;
2006 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
2013 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
2018 list_del_init(&p->scx.runnable_node);
2020 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2026 int sticky_cpu = p->scx.sticky_cpu;
2027 u64 enq_flags = core_enq_flags | rq->scx.extra_enq_flags;
2030 rq->scx.flags |= SCX_RQ_IN_WAKEUP;
2041 if (p->scx.flags & SCX_TASK_QUEUED) {
2047 p->scx.flags |= SCX_TASK_QUEUED;
2048 rq->scx.nr_running++;
2058 if (rq->scx.nr_running == 1)
2064 p->scx.sticky_cpu = -1;
2066 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2069 unlikely(cpu_of(rq) != p->scx.selected_cpu))
2082 opss = atomic_long_read_acquire(&p->scx.ops_state);
2095 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_IN_CUSTODY));
2096 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2115 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2148 if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2176 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2178 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2180 p->scx.flags &= ~SCX_TASK_QUEUED;
2181 rq->scx.nr_running--;
2197 p->scx.slice = 0;
2231 if (rq->scx.nr_immed)
2240 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2246 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2249 list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2251 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2254 p->scx.dsq = dst_dsq;
2277 p->scx.sticky_cpu = cpu_of(dst_rq);
2285 * We want to pass scx-specific enq_flags but activate_task() will
2287 * @rq->scx.extra_enq_flags instead.
2290 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2291 dst_rq->scx.extra_enq_flags = enq_flags;
2293 dst_rq->scx.extra_enq_flags = 0;
2374 * dequeues and maintain the invariant that @p->scx.dsq can only change while
2382 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2401 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2403 p->scx.holding_cpu = cpu;
2409 return likely(p->scx.holding_cpu == cpu) &&
2457 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2573 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2606 p->scx.holding_cpu = raw_smp_processor_id();
2609 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2619 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2627 p->scx.holding_cpu = -1;
2628 dispatch_enqueue(sch, dst_rq, &dst_rq->scx.local_dsq, p,
2682 opss = atomic_long_read(&p->scx.ops_state);
2711 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2726 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2756 if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING))
2759 queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
2762 rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING;
2830 rq->scx.sub_dispatch_prev = prev;
2832 rq->scx.sub_dispatch_prev = NULL;
2837 if ((prev->scx.flags & SCX_TASK_QUEUED) && prev->scx.slice) {
2838 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2841 if (rq->scx.local_dsq.nr)
2878 rq->scx.flags |= SCX_RQ_IN_BALANCE;
2879 rq->scx.flags &= ~SCX_RQ_BAL_KEEP;
2882 unlikely(rq->scx.cpu_released)) {
2891 rq->scx.cpu_released = false;
2907 if ((prev->scx.flags & SCX_TASK_QUEUED) && prev->scx.slice &&
2909 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2915 if (rq->scx.local_dsq.nr)
2925 if ((prev->scx.flags & SCX_TASK_QUEUED) &&
2927 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2931 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2945 if (unlikely(rq->scx.local_dsq.nr > 1 && rq->scx.nr_immed))
2948 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2956 if (p->scx.flags & SCX_TASK_QUEUED) {
2968 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED))
2977 if ((p->scx.slice == SCX_SLICE_INF) !=
2978 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2979 if (p->scx.slice == SCX_SLICE_INF)
2980 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2982 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
3034 if (!rq->scx.cpu_released) {
3043 rq->scx.cpu_released = true;
3053 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
3058 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED))
3061 if (p->scx.flags & SCX_TASK_QUEUED) {
3071 if (p->scx.slice && !scx_bypassing(sch, cpu_of(rq))) {
3072 if (p->scx.flags & SCX_TASK_IMMED) {
3073 p->scx.flags |= SCX_TASK_REENQ_PREEMPTED;
3075 p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
3077 dispatch_enqueue(sch, rq, &rq->scx.local_dsq, p, SCX_ENQ_HEAD);
3119 for_each_cpu(cpu, rq->scx.cpus_to_sync) {
3125 smp_load_acquire(&cpu_rq(cpu)->scx.kick_sync) != ksyncs[cpu]) {
3126 cpumask_clear_cpu(cpu, rq->scx.cpus_to_sync);
3131 while (READ_ONCE(cpu_rq(cpu)->scx.kick_sync) == ksyncs[cpu]) {
3132 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
3145 return list_first_entry_or_null(&rq->scx.local_dsq.list,
3146 struct task_struct, scx.dsq_list.node);
3157 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
3171 if (unlikely(rq->scx.kick_sync_pending)) {
3172 rq->scx.kick_sync_pending = false;
3173 queue_balance_callback(rq, &rq->scx.kick_sync_bal_cb,
3188 keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
3202 if (!p->scx.slice)
3209 if (unlikely(!p->scx.slice)) {
3268 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
3273 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
3294 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3325 this_rq()->scx.in_select_cpu = true;
3327 this_rq()->scx.in_select_cpu = false;
3328 p->scx.selected_cpu = cpu;
3340 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3344 p->scx.selected_cpu = cpu;
3420 rq->scx.flags |= SCX_RQ_ONLINE;
3425 rq->scx.flags &= ~SCX_RQ_ONLINE;
3440 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3442 unsigned long last_runnable = p->scx.runnable_at;
3515 curr->scx.slice = 0;
3521 if (!curr->scx.slice)
3551 p->scx.disallow = false;
3566 if (p->scx.disallow) {
3568 scx_error(sch, "non-root ops.init_task() set task->scx.disallow for %s[%d]",
3571 scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork",
3610 WARN_ON_ONCE(p->scx.flags & SCX_TASK_IN_CUSTODY);
3621 p->scx.weight = sched_weight_to_cgroup(weight);
3627 SCX_CALL_OP_TASK(sch, set_weight, rq, p, p->scx.weight);
3654 WARN_ON_ONCE(p->scx.flags & SCX_TASK_IN_CUSTODY);
3715 if (p->scx.flags & SCX_TASK_SUB_INIT) {
3718 p->scx.flags &= ~SCX_TASK_SUB_INIT;
3725 void init_scx_entity(struct sched_ext_entity *scx)
3727 memset(scx, 0, sizeof(*scx));
3728 INIT_LIST_HEAD(&scx->dsq_list.node);
3729 RB_CLEAR_NODE(&scx->dsq_priq);
3730 scx->sticky_cpu = -1;
3731 scx->holding_cpu = -1;
3732 INIT_LIST_HEAD(&scx->runnable_node);
3733 scx->runnable_at = jiffies;
3734 scx->ddsp_dsq_id = SCX_DSQ_INVALID;
3735 scx->slice = SCX_SLICE_DFL;
3795 list_add_tail(&p->scx.tasks_node, &scx_tasks);
3856 list_del_init(&p->scx.tasks_node);
3894 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
3896 SCX_CALL_OP_TASK(sch, set_weight, rq, p, p->scx.weight);
3945 if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
3965 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
3966 struct task_struct, scx.dsq_list.node))) {
3969 u64 dsq_id = p->scx.ddsp_dsq_id;
3970 u64 enq_flags = p->scx.ddsp_enq_flags;
3972 list_del_init(&p->scx.dsq_list.node);
4013 if ((p->scx.flags & SCX_TASK_IMMED) &&
4038 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
4041 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
4042 scx.dsq_list.node) {
4068 if (WARN_ON_ONCE(p->scx.flags & SCX_TASK_REENQ_REASON_MASK))
4069 p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
4070 p->scx.flags |= reason;
4072 list_add_tail(&p->scx.dsq_list.node, &tasks);
4075 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
4076 list_del_init(&p->scx.dsq_list.node);
4080 p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
4089 u64 seq = ++rq->scx.deferred_reenq_locals_seq;
4098 scoped_guard (raw_spinlock, &rq->scx.deferred_reenq_lock) {
4100 list_first_entry_or_null(&rq->scx.deferred_reenq_locals,
4189 if (WARN_ON_ONCE(p->scx.flags & SCX_TASK_REENQ_REASON_MASK))
4190 p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
4191 p->scx.flags |= reason;
4195 p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
4224 scoped_guard (raw_spinlock, &rq->scx.deferred_reenq_lock) {
4226 list_first_entry_or_null(&rq->scx.deferred_reenq_users,
4254 if (!list_empty(&rq->scx.deferred_reenq_locals))
4257 if (!list_empty(&rq->scx.deferred_reenq_users))
4278 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
4289 tg->scx.weight = CGROUP_WEIGHT_DFL;
4290 tg->scx.bw_period_us = default_bw_period_us();
4291 tg->scx.bw_quota_us = RUNTIME_INF;
4292 tg->scx.idle = false;
4300 WARN_ON_ONCE(tg->scx.flags & (SCX_TG_ONLINE | SCX_TG_INITED));
4305 { .weight = tg->scx.weight,
4306 .bw_period_us = tg->scx.bw_period_us,
4307 .bw_quota_us = tg->scx.bw_quota_us,
4308 .bw_burst_us = tg->scx.bw_burst_us };
4316 tg->scx.flags |= SCX_TG_ONLINE | SCX_TG_INITED;
4318 tg->scx.flags |= SCX_TG_ONLINE;
4328 WARN_ON_ONCE(!(tg->scx.flags & SCX_TG_ONLINE));
4331 (tg->scx.flags & SCX_TG_INITED))
4333 tg->scx.flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
4350 WARN_ON_ONCE(p->scx.cgrp_moving_from);
4367 p->scx.cgrp_moving_from = from;
4375 p->scx.cgrp_moving_from)
4377 p, p->scx.cgrp_moving_from, css->cgroup);
4378 p->scx.cgrp_moving_from = NULL;
4396 !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
4398 p, p->scx.cgrp_moving_from,
4400 p->scx.cgrp_moving_from = NULL;
4414 p->scx.cgrp_moving_from)
4416 p, p->scx.cgrp_moving_from, css->cgroup);
4417 p->scx.cgrp_moving_from = NULL;
4429 tg->scx.weight != weight)
4432 tg->scx.weight = weight;
4448 tg->scx.idle = idle;
4462 (tg->scx.bw_period_us != period_us ||
4463 tg->scx.bw_quota_us != quota_us ||
4464 tg->scx.bw_burst_us != burst_us))
4468 tg->scx.bw_period_us = period_us;
4469 tg->scx.bw_quota_us = quota_us;
4470 tg->scx.bw_burst_us = burst_us;
4606 guard(raw_spinlock_irqsave)(&rq->scx.deferred_reenq_lock);
4686 if (!(tg->scx.flags & SCX_TG_INITED))
4688 tg->scx.flags &= ~SCX_TG_INITED;
4709 .weight = tg->scx.weight,
4710 .bw_period_us = tg->scx.bw_period_us,
4711 .bw_quota_us = tg->scx.bw_quota_us,
4712 .bw_burst_us = tg->scx.bw_burst_us,
4715 if ((tg->scx.flags &
4720 tg->scx.flags |= SCX_TG_INITED;
4730 tg->scx.flags |= SCX_TG_INITED;
5115 n = container_of(&cursor, struct task_struct, scx.dsq_list);
5177 list_move_tail(&cursor.node, &n->scx.dsq_list.node);
5490 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
5491 scx.runnable_node) {
5953 * rq clocks from a previous scx scheduler.
5960 /* no task is on scx, turn off all the switches and flush in-progress calls */
6232 unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
6243 if (p->scx.dsq)
6245 (unsigned long long)p->scx.dsq->id);
6251 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
6254 p->scx.flags & ~SCX_TASK_STATE_MASK,
6255 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
6258 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf);
6260 p->scx.dsq_vtime, p->scx.slice, p->scx.weight);
6348 idle = list_empty(&rq->scx.runnable_list) &&
6365 cpu, rq->scx.nr_running, rq->scx.flags,
6366 rq->scx.cpu_released, rq->scx.ops_qseq,
6367 rq->scx.kick_sync);
6371 if (!cpumask_empty(rq->scx.cpus_to_kick))
6373 cpumask_pr_args(rq->scx.cpus_to_kick));
6374 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
6376 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
6377 if (!cpumask_empty(rq->scx.cpus_to_preempt))
6379 cpumask_pr_args(rq->scx.cpus_to_preempt));
6380 if (!cpumask_empty(rq->scx.cpus_to_wait))
6382 cpumask_pr_args(rq->scx.cpus_to_wait));
6383 if (!cpumask_empty(rq->scx.cpus_to_sync))
6385 cpumask_pr_args(rq->scx.cpus_to_sync));
6415 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
6843 rq->scx.local_dsq.sched = sch;
6844 rq->scx.cpuperf_target = SCX_CPUPERF_ONE;
7024 p->scx.slice = READ_ONCE(sch->slice_dfl);
7244 if (p->scx.flags & SCX_TASK_SUB_INIT)
7282 p->scx.flags |= SCX_TASK_SUB_INIT;
7299 if (!(p->scx.flags & SCX_TASK_SUB_INIT))
7319 p->scx.flags &= ~SCX_TASK_SUB_INIT;
7356 if (p->scx.flags & SCX_TASK_SUB_INIT) {
7358 p->scx.flags &= ~SCX_TASK_SUB_INIT;
7487 if ((off >= offsetof(struct task_struct, scx.slice) &&
7488 off + size <= offsetofend(struct task_struct, scx.slice)) ||
7489 (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
7490 off + size <= offsetofend(struct task_struct, scx.dsq_vtime))) {
7491 pr_warn("sched_ext: Writing directly to p->scx.slice/dsq_vtime is deprecated, use scx_bpf_task_set_slice/dsq_vtime()");
7495 if (off >= offsetof(struct task_struct, scx.disallow) &&
7496 off + size <= offsetofend(struct task_struct, scx.disallow))
7809 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
7815 struct scx_rq *this_scx = &this_rq->scx;
7833 rq->curr->scx.slice = 0;
7840 ksyncs[cpu] = rq->scx.kick_sync;
7874 struct scx_rq *this_scx = &this_rq->scx;
7948 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
8014 BUG_ON(init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL, NULL));
8016 INIT_LIST_HEAD(&rq->scx.runnable_list);
8017 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
8019 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n));
8020 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
8021 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
8022 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
8023 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_sync, GFP_KERNEL, n));
8024 raw_spin_lock_init(&rq->scx.deferred_reenq_lock);
8025 INIT_LIST_HEAD(&rq->scx.deferred_reenq_locals);
8026 INIT_LIST_HEAD(&rq->scx.deferred_reenq_users);
8027 rq->scx.deferred_irq_work = IRQ_WORK_INIT_HARD(deferred_irq_workfn);
8028 rq->scx.kick_cpus_irq_work = IRQ_WORK_INIT_HARD(kick_cpus_irq_workfn);
8031 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
8110 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
8173 p->scx.slice = slice;
8175 p->scx.slice = p->scx.slice ?: 1;
8199 p->scx.slice = slice;
8201 p->scx.slice = p->scx.slice ?: 1;
8203 p->scx.dsq_vtime = vtime;
8354 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
8383 p->scx.dsq_vtime = kit->vtime;
8385 p->scx.slice = kit->slice;
8654 return scx_dispatch_sched(child, this_rq, this_rq->scx.sub_dispatch_prev,
8729 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
8819 p->scx.slice = slice;
8842 p->scx.dsq_vtime = vtime;
8884 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
8886 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
8889 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
8891 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
8894 irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
8943 ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
8949 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
9423 rq->scx.cpuperf_target = perf;
9613 if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
9622 clock = READ_ONCE(rq->scx.clock);