Lines Matching refs:scx

1258 	WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,  in scx_kf_allow()
1260 current->scx.kf_mask, mask); in scx_kf_allow()
1261 current->scx.kf_mask |= mask; in scx_kf_allow()
1268 current->scx.kf_mask &= ~mask; in scx_kf_disallow()
1338 current->scx.kf_tasks[0] = task; \
1340 current->scx.kf_tasks[0] = NULL; \
1347 current->scx.kf_tasks[0] = task; \
1349 current->scx.kf_tasks[0] = NULL; \
1357 current->scx.kf_tasks[0] = task0; \
1358 current->scx.kf_tasks[1] = task1; \
1360 current->scx.kf_tasks[0] = NULL; \
1361 current->scx.kf_tasks[1] = NULL; \
1368 if (unlikely(!(current->scx.kf_mask & mask))) { in scx_kf_allowed()
1370 mask, current->scx.kf_mask); in scx_kf_allowed()
1382 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) { in scx_kf_allowed()
1388 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) { in scx_kf_allowed()
1403 if (unlikely((p != current->scx.kf_tasks[0] && in scx_kf_allowed_on_arg_tasks()
1404 p != current->scx.kf_tasks[1]))) { in scx_kf_allowed_on_arg_tasks()
1429 list_node = &cur->scx.dsq_list.node; in nldsq_next_task()
1447 return container_of(dsq_lnode, struct task_struct, scx.dsq_list); in nldsq_next_task()
1598 return container_of(pos, struct task_struct, scx); in scx_task_iter_next()
1740 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss); in wait_ops_state()
1819 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work); in deferred_irq_workfn()
1843 if (rq->scx.flags & SCX_RQ_IN_WAKEUP) in schedule_deferred()
1850 if (rq->scx.flags & SCX_RQ_IN_BALANCE) { in schedule_deferred()
1851 queue_balance_callback(rq, &rq->scx.deferred_bal_cb, in schedule_deferred()
1862 irq_work_queue(&rq->scx.deferred_irq_work); in schedule_deferred()
1888 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq)); in touch_core_sched()
1921 if (curr->scx.slice != SCX_SLICE_INF) { in update_curr_scx()
1922 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec); in update_curr_scx()
1923 if (!curr->scx.slice) in update_curr_scx()
1932 container_of(node_a, struct task_struct, scx.dsq_priq); in scx_dsq_priq_less()
1934 container_of(node_b, struct task_struct, scx.dsq_priq); in scx_dsq_priq_less()
1936 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime); in scx_dsq_priq_less()
1947 p->scx.slice = SCX_SLICE_DFL; in refill_task_slice_dfl()
1956 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); in dispatch_enqueue()
1957 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) || in dispatch_enqueue()
1958 !RB_EMPTY_NODE(&p->scx.dsq_priq)); in dispatch_enqueue()
1997 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ; in dispatch_enqueue()
1998 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less); in dispatch_enqueue()
2004 rbp = rb_prev(&p->scx.dsq_priq); in dispatch_enqueue()
2008 scx.dsq_priq); in dispatch_enqueue()
2009 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node); in dispatch_enqueue()
2011 list_add(&p->scx.dsq_list.node, &dsq->list); in dispatch_enqueue()
2020 list_add(&p->scx.dsq_list.node, &dsq->list); in dispatch_enqueue()
2022 list_add_tail(&p->scx.dsq_list.node, &dsq->list); in dispatch_enqueue()
2027 p->scx.dsq_seq = dsq->seq; in dispatch_enqueue()
2030 p->scx.dsq = dsq; in dispatch_enqueue()
2038 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; in dispatch_enqueue()
2039 p->scx.ddsp_enq_flags = 0; in dispatch_enqueue()
2046 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); in dispatch_enqueue()
2049 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); in dispatch_enqueue()
2054 rq->curr->scx.slice = 0; in dispatch_enqueue()
2069 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node)); in task_unlink_from_dsq()
2071 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) { in task_unlink_from_dsq()
2072 rb_erase(&p->scx.dsq_priq, &dsq->priq); in task_unlink_from_dsq()
2073 RB_CLEAR_NODE(&p->scx.dsq_priq); in task_unlink_from_dsq()
2074 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ; in task_unlink_from_dsq()
2077 list_del_init(&p->scx.dsq_list.node); in task_unlink_from_dsq()
2083 struct scx_dispatch_q *dsq = p->scx.dsq; in dispatch_dequeue()
2084 bool is_local = dsq == &rq->scx.local_dsq; in dispatch_dequeue()
2091 if (unlikely(!list_empty(&p->scx.dsq_list.node))) in dispatch_dequeue()
2092 list_del_init(&p->scx.dsq_list.node); in dispatch_dequeue()
2100 if (p->scx.holding_cpu >= 0) in dispatch_dequeue()
2101 p->scx.holding_cpu = -1; in dispatch_dequeue()
2113 if (p->scx.holding_cpu < 0) { in dispatch_dequeue()
2123 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node)); in dispatch_dequeue()
2124 p->scx.holding_cpu = -1; in dispatch_dequeue()
2126 p->scx.dsq = NULL; in dispatch_dequeue()
2139 return &rq->scx.local_dsq; in find_dsq_for_dispatch()
2147 return &cpu_rq(cpu)->scx.local_dsq; in find_dsq_for_dispatch()
2187 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID); in mark_direct_dispatch()
2188 WARN_ON_ONCE(p->scx.ddsp_enq_flags); in mark_direct_dispatch()
2190 p->scx.ddsp_dsq_id = dsq_id; in mark_direct_dispatch()
2191 p->scx.ddsp_enq_flags = enq_flags; in mark_direct_dispatch()
2199 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); in direct_dispatch()
2203 p->scx.ddsp_enq_flags |= enq_flags; in direct_dispatch()
2211 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) { in direct_dispatch()
2214 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK; in direct_dispatch()
2224 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); in direct_dispatch()
2229 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); in direct_dispatch()
2233 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); in direct_dispatch()
2234 list_add_tail(&p->scx.dsq_list.node, in direct_dispatch()
2235 &rq->scx.ddsp_deferred_locals); in direct_dispatch()
2241 p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); in direct_dispatch()
2253 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq))); in scx_rq_online()
2263 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); in do_enqueue_task()
2282 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) in do_enqueue_task()
2303 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT; in do_enqueue_task()
2305 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); in do_enqueue_task()
2306 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq); in do_enqueue_task()
2315 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) in do_enqueue_task()
2322 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq); in do_enqueue_task()
2338 dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags); in do_enqueue_task()
2349 return !list_empty(&p->scx.runnable_node); in task_runnable()
2356 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) { in set_task_runnable()
2357 p->scx.runnable_at = jiffies; in set_task_runnable()
2358 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT; in set_task_runnable()
2365 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list); in set_task_runnable()
2370 list_del_init(&p->scx.runnable_node); in clr_task_runnable()
2372 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; in clr_task_runnable()
2378 int sticky_cpu = p->scx.sticky_cpu; in enqueue_task_scx()
2381 rq->scx.flags |= SCX_RQ_IN_WAKEUP; in enqueue_task_scx()
2383 enq_flags |= rq->scx.extra_enq_flags; in enqueue_task_scx()
2386 p->scx.sticky_cpu = -1; in enqueue_task_scx()
2397 if (p->scx.flags & SCX_TASK_QUEUED) { in enqueue_task_scx()
2403 p->scx.flags |= SCX_TASK_QUEUED; in enqueue_task_scx()
2404 rq->scx.nr_running++; in enqueue_task_scx()
2415 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP; in enqueue_task_scx()
2418 unlikely(cpu_of(rq) != p->scx.selected_cpu)) in enqueue_task_scx()
2431 opss = atomic_long_read_acquire(&p->scx.ops_state); in ops_dequeue()
2447 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, in ops_dequeue()
2466 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); in ops_dequeue()
2475 if (!(p->scx.flags & SCX_TASK_QUEUED)) { in dequeue_task_scx()
2503 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; in dequeue_task_scx()
2505 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP; in dequeue_task_scx()
2507 p->scx.flags &= ~SCX_TASK_QUEUED; in dequeue_task_scx()
2508 rq->scx.nr_running--; in dequeue_task_scx()
2523 p->scx.slice = 0; in yield_task_scx()
2542 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq; in move_local_task_to_local_dsq()
2548 WARN_ON_ONCE(p->scx.holding_cpu >= 0); in move_local_task_to_local_dsq()
2551 list_add(&p->scx.dsq_list.node, &dst_dsq->list); in move_local_task_to_local_dsq()
2553 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list); in move_local_task_to_local_dsq()
2556 p->scx.dsq = dst_dsq; in move_local_task_to_local_dsq()
2576 p->scx.sticky_cpu = cpu_of(dst_rq); in move_remote_task_to_local_dsq()
2587 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags); in move_remote_task_to_local_dsq()
2588 dst_rq->scx.extra_enq_flags = enq_flags; in move_remote_task_to_local_dsq()
2590 dst_rq->scx.extra_enq_flags = 0; in move_remote_task_to_local_dsq()
2699 WARN_ON_ONCE(p->scx.holding_cpu >= 0); in unlink_dsq_and_lock_src_rq()
2701 p->scx.holding_cpu = cpu; in unlink_dsq_and_lock_src_rq()
2707 return likely(p->scx.holding_cpu == cpu) && in unlink_dsq_and_lock_src_rq()
2754 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); in move_task_between_dsqs()
2787 p->scx.dsq = NULL; in move_task_between_dsqs()
2896 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); in dispatch_to_local_dsq()
2929 p->scx.holding_cpu = raw_smp_processor_id(); in dispatch_to_local_dsq()
2932 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); in dispatch_to_local_dsq()
2942 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) && in dispatch_to_local_dsq()
2950 p->scx.holding_cpu = -1; in dispatch_to_local_dsq()
2951 dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p, in dispatch_to_local_dsq()
3005 opss = atomic_long_read(&p->scx.ops_state); in finish_dispatch()
3028 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, in finish_dispatch()
3043 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED)); in finish_dispatch()
3074 bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED; in balance_one()
3078 rq->scx.flags |= SCX_RQ_IN_BALANCE; in balance_one()
3079 rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP); in balance_one()
3082 unlikely(rq->scx.cpu_released)) { in balance_one()
3092 rq->scx.cpu_released = false; in balance_one()
3108 if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) { in balance_one()
3109 rq->scx.flags |= SCX_RQ_BAL_KEEP; in balance_one()
3115 if (rq->scx.local_dsq.nr) in balance_one()
3142 if (prev_on_rq && prev->scx.slice) { in balance_one()
3143 rq->scx.flags |= SCX_RQ_BAL_KEEP; in balance_one()
3146 if (rq->scx.local_dsq.nr) in balance_one()
3173 rq->scx.flags |= SCX_RQ_BAL_KEEP; in balance_one()
3177 rq->scx.flags &= ~SCX_RQ_IN_BALANCE; in balance_one()
3181 rq->scx.flags &= ~SCX_RQ_IN_BALANCE; in balance_one()
3232 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals, in process_ddsp_deferred_locals()
3233 struct task_struct, scx.dsq_list.node))) { in process_ddsp_deferred_locals()
3237 list_del_init(&p->scx.dsq_list.node); in process_ddsp_deferred_locals()
3239 dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); in process_ddsp_deferred_locals()
3242 p->scx.ddsp_enq_flags); in process_ddsp_deferred_locals()
3250 if (p->scx.flags & SCX_TASK_QUEUED) { in set_next_task_scx()
3262 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED)) in set_next_task_scx()
3271 if ((p->scx.slice == SCX_SLICE_INF) != in set_next_task_scx()
3272 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) { in set_next_task_scx()
3273 if (p->scx.slice == SCX_SLICE_INF) in set_next_task_scx()
3274 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK; in set_next_task_scx()
3276 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK; in set_next_task_scx()
3312 smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); in switch_class()
3334 if (!rq->scx.cpu_released) { in switch_class()
3344 rq->scx.cpu_released = true; in switch_class()
3355 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED)) in put_prev_task_scx()
3358 if (p->scx.flags & SCX_TASK_QUEUED) { in put_prev_task_scx()
3367 if (p->scx.slice && !scx_rq_bypassing(rq)) { in put_prev_task_scx()
3368 dispatch_enqueue(sch, &rq->scx.local_dsq, p, in put_prev_task_scx()
3394 return list_first_entry_or_null(&rq->scx.local_dsq.list, in first_local_task()
3395 struct task_struct, scx.dsq_list.node); in first_local_task()
3402 bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP; in pick_task_scx()
3420 if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) { in pick_task_scx()
3421 if (prev->scx.flags & SCX_TASK_QUEUED) { in pick_task_scx()
3444 if (!p->scx.slice) in pick_task_scx()
3454 if (unlikely(!p->scx.slice)) { in pick_task_scx()
3505 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at); in scx_prio_less()
3540 p->scx.selected_cpu = cpu; in select_task_rq_scx()
3552 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL; in select_task_rq_scx()
3556 p->scx.selected_cpu = cpu; in select_task_rq_scx()
3630 rq->scx.flags |= SCX_RQ_ONLINE; in rq_online_scx()
3635 rq->scx.flags &= ~SCX_RQ_ONLINE; in rq_offline_scx()
3651 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) { in check_rq_for_timeouts()
3652 unsigned long last_runnable = p->scx.runnable_at; in check_rq_for_timeouts()
3722 curr->scx.slice = 0; in task_tick_scx()
3728 if (!curr->scx.slice) in task_tick_scx()
3756 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT; in scx_get_task_state()
3786 p->scx.flags &= ~SCX_TASK_STATE_MASK; in scx_set_task_state()
3787 p->scx.flags |= state << SCX_TASK_STATE_SHIFT; in scx_set_task_state()
3795 p->scx.disallow = false; in scx_init_task()
3813 if (p->scx.disallow) { in scx_init_task()
3839 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; in scx_init_task()
3860 p->scx.weight = sched_weight_to_cgroup(weight); in scx_enable_task()
3868 p, p->scx.weight); in scx_enable_task()
3915 void init_scx_entity(struct sched_ext_entity *scx) in init_scx_entity() argument
3917 memset(scx, 0, sizeof(*scx)); in init_scx_entity()
3918 INIT_LIST_HEAD(&scx->dsq_list.node); in init_scx_entity()
3919 RB_CLEAR_NODE(&scx->dsq_priq); in init_scx_entity()
3920 scx->sticky_cpu = -1; in init_scx_entity()
3921 scx->holding_cpu = -1; in init_scx_entity()
3922 INIT_LIST_HEAD(&scx->runnable_node); in init_scx_entity()
3923 scx->runnable_at = jiffies; in init_scx_entity()
3924 scx->ddsp_dsq_id = SCX_DSQ_INVALID; in init_scx_entity()
3925 scx->slice = SCX_SLICE_DFL; in init_scx_entity()
3970 list_add_tail(&p->scx.tasks_node, &scx_tasks); in scx_post_fork()
3996 list_del_init(&p->scx.tasks_node); in sched_ext_free()
4020 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight)); in reweight_task_scx()
4023 p, p->scx.weight); in reweight_task_scx()
4058 if (scx_enabled() && READ_ONCE(p->scx.disallow) && in scx_check_setscheduler()
4081 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK; in scx_can_stop_tick()
4092 tg->scx.weight = CGROUP_WEIGHT_DFL; in scx_tg_init()
4093 tg->scx.bw_period_us = default_bw_period_us(); in scx_tg_init()
4094 tg->scx.bw_quota_us = RUNTIME_INF; in scx_tg_init()
4102 WARN_ON_ONCE(tg->scx.flags & (SCX_TG_ONLINE | SCX_TG_INITED)); in scx_tg_online()
4109 { .weight = tg->scx.weight, in scx_tg_online()
4110 .bw_period_us = tg->scx.bw_period_us, in scx_tg_online()
4111 .bw_quota_us = tg->scx.bw_quota_us, in scx_tg_online()
4112 .bw_burst_us = tg->scx.bw_burst_us }; in scx_tg_online()
4120 tg->scx.flags |= SCX_TG_ONLINE | SCX_TG_INITED; in scx_tg_online()
4122 tg->scx.flags |= SCX_TG_ONLINE; in scx_tg_online()
4133 WARN_ON_ONCE(!(tg->scx.flags & SCX_TG_ONLINE)); in scx_tg_offline()
4138 (tg->scx.flags & SCX_TG_INITED)) in scx_tg_offline()
4141 tg->scx.flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED); in scx_tg_offline()
4163 WARN_ON_ONCE(p->scx.cgrp_moving_from); in scx_cgroup_can_attach()
4181 p->scx.cgrp_moving_from = from; in scx_cgroup_can_attach()
4189 p->scx.cgrp_moving_from) in scx_cgroup_can_attach()
4191 p, p->scx.cgrp_moving_from, css->cgroup); in scx_cgroup_can_attach()
4192 p->scx.cgrp_moving_from = NULL; in scx_cgroup_can_attach()
4211 !WARN_ON_ONCE(!p->scx.cgrp_moving_from)) in scx_cgroup_move_task()
4213 p, p->scx.cgrp_moving_from, in scx_cgroup_move_task()
4215 p->scx.cgrp_moving_from = NULL; in scx_cgroup_move_task()
4234 p->scx.cgrp_moving_from) in scx_cgroup_cancel_attach()
4236 p, p->scx.cgrp_moving_from, css->cgroup); in scx_cgroup_cancel_attach()
4237 p->scx.cgrp_moving_from = NULL; in scx_cgroup_cancel_attach()
4250 tg->scx.weight != weight) in scx_group_set_weight()
4254 tg->scx.weight = weight; in scx_group_set_weight()
4272 (tg->scx.bw_period_us != period_us || in scx_group_set_bandwidth()
4273 tg->scx.bw_quota_us != quota_us || in scx_group_set_bandwidth()
4274 tg->scx.bw_burst_us != burst_us)) in scx_group_set_bandwidth()
4278 tg->scx.bw_period_us = period_us; in scx_group_set_bandwidth()
4279 tg->scx.bw_quota_us = quota_us; in scx_group_set_bandwidth()
4280 tg->scx.bw_burst_us = burst_us; in scx_group_set_bandwidth()
4426 if (!(tg->scx.flags & SCX_TG_INITED)) in scx_cgroup_exit()
4428 tg->scx.flags &= ~SCX_TG_INITED; in scx_cgroup_exit()
4461 .weight = tg->scx.weight, in scx_cgroup_init()
4462 .bw_period_us = tg->scx.bw_period_us, in scx_cgroup_init()
4463 .bw_quota_us = tg->scx.bw_quota_us, in scx_cgroup_init()
4464 .bw_burst_us = tg->scx.bw_burst_us, in scx_cgroup_init()
4467 if ((tg->scx.flags & in scx_cgroup_init()
4472 tg->scx.flags |= SCX_TG_INITED; in scx_cgroup_init()
4487 tg->scx.flags |= SCX_TG_INITED; in scx_cgroup_init()
4844 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING); in scx_bypass()
4845 rq->scx.flags |= SCX_RQ_BYPASSING; in scx_bypass()
4847 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING)); in scx_bypass()
4848 rq->scx.flags &= ~SCX_RQ_BYPASSING; in scx_bypass()
4868 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list, in scx_bypass()
4869 scx.runnable_node) { in scx_bypass()
5218 unsigned long ops_state = atomic_long_read(&p->scx.ops_state); in scx_dump_task()
5221 if (p->scx.dsq) in scx_dump_task()
5223 (unsigned long long)p->scx.dsq->id); in scx_dump_task()
5228 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies)); in scx_dump_task()
5230 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK, in scx_dump_task()
5231 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK, in scx_dump_task()
5234 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf); in scx_dump_task()
5236 p->scx.dsq_vtime, p->scx.slice, p->scx.weight); in scx_dump_task()
5307 idle = list_empty(&rq->scx.runnable_list) && in scx_dump_state()
5324 cpu, rq->scx.nr_running, rq->scx.flags, in scx_dump_state()
5325 rq->scx.cpu_released, rq->scx.ops_qseq, in scx_dump_state()
5326 rq->scx.pnt_seq); in scx_dump_state()
5330 if (!cpumask_empty(rq->scx.cpus_to_kick)) in scx_dump_state()
5332 cpumask_pr_args(rq->scx.cpus_to_kick)); in scx_dump_state()
5333 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle)) in scx_dump_state()
5335 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle)); in scx_dump_state()
5336 if (!cpumask_empty(rq->scx.cpus_to_preempt)) in scx_dump_state()
5338 cpumask_pr_args(rq->scx.cpus_to_preempt)); in scx_dump_state()
5339 if (!cpumask_empty(rq->scx.cpus_to_wait)) in scx_dump_state()
5341 cpumask_pr_args(rq->scx.cpus_to_wait)); in scx_dump_state()
5371 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) in scx_dump_state()
5600 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE; in scx_enable()
5760 p->scx.slice = SCX_SLICE_DFL; in scx_enable()
5848 if (off >= offsetof(struct task_struct, scx.slice) && in bpf_scx_btf_struct_access()
5849 off + size <= offsetofend(struct task_struct, scx.slice)) in bpf_scx_btf_struct_access()
5851 if (off >= offsetof(struct task_struct, scx.dsq_vtime) && in bpf_scx_btf_struct_access()
5852 off + size <= offsetofend(struct task_struct, scx.dsq_vtime)) in bpf_scx_btf_struct_access()
5854 if (off >= offsetof(struct task_struct, scx.disallow) && in bpf_scx_btf_struct_access()
5855 off + size <= offsetofend(struct task_struct, scx.disallow)) in bpf_scx_btf_struct_access()
6115 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE); in can_skip_idle_kick()
6121 struct scx_rq *this_scx = &this_rq->scx; in kick_one_cpu()
6134 rq->curr->scx.slice = 0; in kick_one_cpu()
6139 pseqs[cpu] = rq->scx.pnt_seq; in kick_one_cpu()
6171 struct scx_rq *this_scx = &this_rq->scx; in kick_cpus_irq_workfn()
6191 unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq; in kick_cpus_irq_workfn()
6245 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at, in print_scx_info()
6307 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); in init_sched_ext_class()
6308 INIT_LIST_HEAD(&rq->scx.runnable_list); in init_sched_ext_class()
6309 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals); in init_sched_ext_class()
6311 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n)); in init_sched_ext_class()
6312 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n)); in init_sched_ext_class()
6313 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n)); in init_sched_ext_class()
6314 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n)); in init_sched_ext_class()
6315 init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn); in init_sched_ext_class()
6316 init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn); in init_sched_ext_class()
6319 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE; in init_sched_ext_class()
6370 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK, in scx_dsq_insert_commit()
6420 p->scx.slice = slice; in scx_bpf_dsq_insert()
6422 p->scx.slice = p->scx.slice ?: 1; in scx_bpf_dsq_insert()
6455 p->scx.slice = slice; in scx_bpf_dsq_insert_vtime()
6457 p->scx.slice = p->scx.slice ?: 1; in scx_bpf_dsq_insert_vtime()
6459 p->scx.dsq_vtime = vtime; in scx_bpf_dsq_insert_vtime()
6498 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE; in scx_dsq_move()
6523 if (unlikely(p->scx.dsq != src_dsq || in scx_dsq_move()
6524 u32_before(kit->cursor.priv, p->scx.dsq_seq) || in scx_dsq_move()
6525 p->scx.holding_cpu >= 0) || in scx_dsq_move()
6540 p->scx.dsq_vtime = kit->vtime; in scx_dsq_move()
6542 p->scx.slice = kit->slice; in scx_dsq_move()
6779 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list, in scx_bpf_reenqueue_local()
6780 scx.dsq_list.node) { in scx_bpf_reenqueue_local()
6800 list_add_tail(&p->scx.dsq_list.node, &tasks); in scx_bpf_reenqueue_local()
6803 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) { in scx_bpf_reenqueue_local()
6804 list_del_init(&p->scx.dsq_list.node); in scx_bpf_reenqueue_local()
6932 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle); in scx_bpf_kick_cpu()
6934 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick); in scx_bpf_kick_cpu()
6937 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt); in scx_bpf_kick_cpu()
6939 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait); in scx_bpf_kick_cpu()
6942 irq_work_queue(&this_rq->scx.kick_cpus_irq_work); in scx_bpf_kick_cpu()
6969 ret = READ_ONCE(this_rq()->scx.local_dsq.nr); in scx_bpf_dsq_nr_queued()
6975 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr); in scx_bpf_dsq_nr_queued()
7077 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list); in bpf_iter_scx_dsq_next()
7086 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq))); in bpf_iter_scx_dsq_next()
7090 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node); in bpf_iter_scx_dsq_next()
7092 list_move(&kit->cursor.node, &p->scx.dsq_list.node); in bpf_iter_scx_dsq_next()
7345 rq->scx.cpuperf_target = perf; in scx_bpf_cpuperf_set()
7497 if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) { in scx_bpf_now()
7506 clock = READ_ONCE(rq->scx.clock); in scx_bpf_now()