Lines Matching refs:sch
198 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags);
199 static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,
202 static __printf(4, 5) bool scx_exit(struct scx_sched *sch, in scx_exit() argument
210 ret = scx_vexit(sch, kind, exit_code, fmt, args); in scx_exit()
216 #define scx_error(sch, fmt, args...) scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args) argument
217 #define scx_verror(sch, fmt, args) scx_vexit((sch), SCX_EXIT_ERROR, 0, fmt, args) argument
219 #define SCX_HAS_OP(sch, op) test_bit(SCX_OP_IDX(op), (sch)->has_op) argument
247 static struct scx_dispatch_q *find_global_dsq(struct scx_sched *sch, in find_global_dsq() argument
250 return sch->global_dsqs[cpu_to_node(task_cpu(p))]; in find_global_dsq()
253 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id) in find_user_dsq() argument
255 return rhashtable_lookup(&sch->dsq_hash, &dsq_id, dsq_hash_params); in find_user_dsq()
310 #define SCX_CALL_OP(sch, mask, op, rq, args...) \ argument
316 (sch)->ops.op(args); \
319 (sch)->ops.op(args); \
325 #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \ argument
327 __typeof__((sch)->ops.op(args)) __ret; \
333 __ret = (sch)->ops.op(args); \
336 __ret = (sch)->ops.op(args); \
354 #define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...) \ argument
358 SCX_CALL_OP((sch), mask, op, rq, task, ##args); \
362 #define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...) \ argument
364 __typeof__((sch)->ops.op(task, ##args)) __ret; \
367 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args); \
372 #define SCX_CALL_OP_2TASKS_RET(sch, mask, op, rq, task0, task1, args...) \ argument
374 __typeof__((sch)->ops.op(task0, task1, ##args)) __ret; \
378 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task0, task1, ##args); \
385 static __always_inline bool scx_kf_allowed(struct scx_sched *sch, u32 mask) in scx_kf_allowed() argument
388 scx_error(sch, "kfunc with mask 0x%x called from an operation only allowing 0x%x", in scx_kf_allowed()
402 scx_error(sch, "cpu_release kfunc called from a nested operation"); in scx_kf_allowed()
408 scx_error(sch, "dispatch kfunc called from a nested operation"); in scx_kf_allowed()
416 static __always_inline bool scx_kf_allowed_on_arg_tasks(struct scx_sched *sch, in scx_kf_allowed_on_arg_tasks() argument
420 if (!scx_kf_allowed(sch, mask)) in scx_kf_allowed_on_arg_tasks()
425 scx_error(sch, "called on a task not being operated on"); in scx_kf_allowed_on_arg_tasks()
687 #define scx_add_event(sch, name, cnt) do { \ argument
688 this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \
700 #define __scx_add_event(sch, name, cnt) do { \ argument
701 __this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \
726 static void scx_read_events(struct scx_sched *sch,
779 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where) in ops_cpu_valid() argument
784 scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: ""); in ops_cpu_valid()
802 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err) in ops_sanitize_err() argument
807 scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err); in ops_sanitize_err()
979 static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p) in refill_task_slice_dfl() argument
982 __scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1); in refill_task_slice_dfl()
1009 static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq, in dispatch_enqueue() argument
1023 scx_error(sch, "attempting to dispatch to a destroyed dsq"); in dispatch_enqueue()
1026 dsq = find_global_dsq(sch, p); in dispatch_enqueue()
1040 scx_error(sch, "cannot use vtime ordering for built-in DSQs"); in dispatch_enqueue()
1054 scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks", in dispatch_enqueue()
1079 scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks", in dispatch_enqueue()
1215 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch, in find_dsq_for_dispatch() argument
1227 if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict")) in find_dsq_for_dispatch()
1228 return find_global_dsq(sch, p); in find_dsq_for_dispatch()
1234 dsq = find_global_dsq(sch, p); in find_dsq_for_dispatch()
1236 dsq = find_user_dsq(sch, dsq_id); in find_dsq_for_dispatch()
1239 scx_error(sch, "non-existent DSQ 0x%llx for %s[%d]", in find_dsq_for_dispatch()
1241 return find_global_dsq(sch, p); in find_dsq_for_dispatch()
1247 static void mark_direct_dispatch(struct scx_sched *sch, in mark_direct_dispatch() argument
1262 scx_error(sch, "%s[%d] already direct-dispatched", in mark_direct_dispatch()
1265 scx_error(sch, "scheduling for %s[%d] but trying to direct-dispatch %s[%d]", in mark_direct_dispatch()
1278 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p, in direct_dispatch() argument
1283 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); in direct_dispatch()
1324 dispatch_enqueue(sch, dsq, p, in direct_dispatch()
1343 struct scx_sched *sch = scx_root; in do_enqueue_task() local
1363 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1); in do_enqueue_task()
1371 if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) && in do_enqueue_task()
1373 __scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1); in do_enqueue_task()
1378 if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) && in do_enqueue_task()
1380 __scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1); in do_enqueue_task()
1384 if (unlikely(!SCX_HAS_OP(sch, enqueue))) in do_enqueue_task()
1397 SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags); in do_enqueue_task()
1411 direct_dispatch(sch, p, enq_flags); in do_enqueue_task()
1414 dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags); in do_enqueue_task()
1420 dsq = find_global_dsq(sch, p); in do_enqueue_task()
1433 refill_task_slice_dfl(sch, p); in do_enqueue_task()
1434 dispatch_enqueue(sch, dsq, p, enq_flags); in do_enqueue_task()
1467 struct scx_sched *sch = scx_root; in enqueue_task_scx() local
1497 if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p)) in enqueue_task_scx()
1498 SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags); in enqueue_task_scx()
1509 __scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1); in enqueue_task_scx()
1514 struct scx_sched *sch = scx_root; in ops_dequeue() local
1533 if (SCX_HAS_OP(sch, dequeue)) in ops_dequeue()
1534 SCX_CALL_OP_TASK(sch, SCX_KF_REST, dequeue, rq, in ops_dequeue()
1563 struct scx_sched *sch = scx_root; in dequeue_task_scx() local
1584 if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) { in dequeue_task_scx()
1586 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false); in dequeue_task_scx()
1589 if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p)) in dequeue_task_scx()
1590 SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags); in dequeue_task_scx()
1607 struct scx_sched *sch = scx_root; in yield_task_scx() local
1610 if (SCX_HAS_OP(sch, yield)) in yield_task_scx()
1611 SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL); in yield_task_scx()
1618 struct scx_sched *sch = scx_root; in yield_to_task_scx() local
1621 if (SCX_HAS_OP(sch, yield)) in yield_to_task_scx()
1622 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, in yield_to_task_scx()
1704 static bool task_can_run_on_remote_rq(struct scx_sched *sch, in task_can_run_on_remote_rq() argument
1726 scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d", in task_can_run_on_remote_rq()
1739 scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]", in task_can_run_on_remote_rq()
1746 __scx_add_event(sch, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1); in task_can_run_on_remote_rq()
1833 static struct rq *move_task_between_dsqs(struct scx_sched *sch, in move_task_between_dsqs() argument
1847 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) { in move_task_between_dsqs()
1848 dst_dsq = find_global_dsq(sch, p); in move_task_between_dsqs()
1880 dispatch_enqueue(sch, dst_dsq, p, enq_flags); in move_task_between_dsqs()
1886 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq, in consume_dispatch_q() argument
1922 if (task_can_run_on_remote_rq(sch, p, rq, false)) { in consume_dispatch_q()
1933 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq) in consume_global_dsq() argument
1937 return consume_dispatch_q(sch, rq, sch->global_dsqs[node]); in consume_global_dsq()
1955 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq, in dispatch_to_local_dsq() argument
1970 dispatch_enqueue(sch, dst_dsq, p, in dispatch_to_local_dsq()
1976 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) { in dispatch_to_local_dsq()
1977 dispatch_enqueue(sch, find_global_dsq(sch, p), p, in dispatch_to_local_dsq()
2015 dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p, in dispatch_to_local_dsq()
2055 static void finish_dispatch(struct scx_sched *sch, struct rq *rq, in finish_dispatch() argument
2109 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p); in finish_dispatch()
2112 dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags); in finish_dispatch()
2114 dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); in finish_dispatch()
2117 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq) in flush_dispatch_buf() argument
2125 finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id, in flush_dispatch_buf()
2148 struct scx_sched *sch = scx_root; in balance_one() local
2158 if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) && in balance_one()
2166 if (SCX_HAS_OP(sch, cpu_acquire)) in balance_one()
2167 SCX_CALL_OP(sch, SCX_KF_REST, cpu_acquire, rq, in balance_one()
2195 if (consume_global_dsq(sch, rq)) in balance_one()
2199 if (consume_dispatch_q(sch, rq, &rq->scx.bypass_dsq)) in balance_one()
2205 if (unlikely(!SCX_HAS_OP(sch, dispatch)) || !scx_rq_online(rq)) in balance_one()
2220 SCX_CALL_OP(sch, SCX_KF_DISPATCH, dispatch, rq, in balance_one()
2223 flush_dispatch_buf(sch, rq); in balance_one()
2231 if (consume_global_dsq(sch, rq)) in balance_one()
2244 scx_kick_cpu(sch, cpu_of(rq), 0); in balance_one()
2255 (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) { in balance_one()
2257 __scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1); in balance_one()
2283 struct scx_sched *sch = scx_root; in process_ddsp_deferred_locals() local
2288 dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); in process_ddsp_deferred_locals()
2290 dispatch_to_local_dsq(sch, rq, dsq, p, in process_ddsp_deferred_locals()
2297 struct scx_sched *sch = scx_root; in set_next_task_scx() local
2311 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED)) in set_next_task_scx()
2312 SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p); in set_next_task_scx()
2353 struct scx_sched *sch = scx_root; in switch_class() local
2356 if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT)) in switch_class()
2378 if (SCX_HAS_OP(sch, cpu_release)) { in switch_class()
2384 SCX_CALL_OP(sch, SCX_KF_CPU_RELEASE, cpu_release, rq, in switch_class()
2394 struct scx_sched *sch = scx_root; in put_prev_task_scx() local
2402 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED)) in put_prev_task_scx()
2403 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true); in put_prev_task_scx()
2415 dispatch_enqueue(sch, &rq->scx.local_dsq, p, in put_prev_task_scx()
2427 WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST)); in put_prev_task_scx()
2495 struct scx_sched *sch = rcu_dereference_sched(scx_root); in do_pick_task_scx() local
2497 if (!scx_rq_bypassing(rq) && !sch->warned_zero_slice) { in do_pick_task_scx()
2500 sch->warned_zero_slice = true; in do_pick_task_scx()
2502 refill_task_slice_dfl(sch, p); in do_pick_task_scx()
2536 struct scx_sched *sch = scx_root; in scx_prio_less() local
2543 if (SCX_HAS_OP(sch, core_sched_before) && in scx_prio_less()
2545 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, core_sched_before, in scx_prio_less()
2556 struct scx_sched *sch = scx_root; in select_task_rq_scx() local
2573 if (likely(SCX_HAS_OP(sch, select_cpu)) && !rq_bypass) { in select_task_rq_scx()
2581 cpu = SCX_CALL_OP_TASK_RET(sch, in select_task_rq_scx()
2587 if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()")) in select_task_rq_scx()
2596 refill_task_slice_dfl(sch, p); in select_task_rq_scx()
2604 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1); in select_task_rq_scx()
2617 struct scx_sched *sch = scx_root; in set_cpus_allowed_scx() local
2629 if (SCX_HAS_OP(sch, set_cpumask)) in set_cpus_allowed_scx()
2630 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, NULL, in set_cpus_allowed_scx()
2636 struct scx_sched *sch = scx_root; in handle_hotplug() local
2646 if (unlikely(!sch)) in handle_hotplug()
2650 scx_idle_update_selcpu_topology(&sch->ops); in handle_hotplug()
2652 if (online && SCX_HAS_OP(sch, cpu_online)) in handle_hotplug()
2653 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_online, NULL, cpu); in handle_hotplug()
2654 else if (!online && SCX_HAS_OP(sch, cpu_offline)) in handle_hotplug()
2655 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_offline, NULL, cpu); in handle_hotplug()
2657 scx_exit(sch, SCX_EXIT_UNREG_KERN, in handle_hotplug()
2686 struct scx_sched *sch; in check_rq_for_timeouts() local
2692 sch = rcu_dereference_bh(scx_root); in check_rq_for_timeouts()
2693 if (unlikely(!sch)) in check_rq_for_timeouts()
2703 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0, in check_rq_for_timeouts()
2733 struct scx_sched *sch; in scx_tick() local
2739 sch = rcu_dereference_bh(scx_root); in scx_tick()
2740 if (unlikely(!sch)) in scx_tick()
2748 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0, in scx_tick()
2758 struct scx_sched *sch = scx_root; in task_tick_scx() local
2769 } else if (SCX_HAS_OP(sch, tick)) { in task_tick_scx()
2770 SCX_CALL_OP_TASK(sch, SCX_KF_REST, tick, rq, curr); in task_tick_scx()
2837 struct scx_sched *sch = scx_root; in scx_init_task() local
2842 if (SCX_HAS_OP(sch, init_task)) { in scx_init_task()
2848 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init_task, NULL, in scx_init_task()
2851 ret = ops_sanitize_err(sch, "init_task", ret); in scx_init_task()
2879 scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork", in scx_init_task()
2890 struct scx_sched *sch = scx_root; in scx_enable_task() local
2907 if (SCX_HAS_OP(sch, enable)) in scx_enable_task()
2908 SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p); in scx_enable_task()
2911 if (SCX_HAS_OP(sch, set_weight)) in scx_enable_task()
2912 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq, in scx_enable_task()
2918 struct scx_sched *sch = scx_root; in scx_disable_task() local
2924 if (SCX_HAS_OP(sch, disable)) in scx_disable_task()
2925 SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p); in scx_disable_task()
2931 struct scx_sched *sch = scx_root; in scx_exit_task() local
2954 if (SCX_HAS_OP(sch, exit_task)) in scx_exit_task()
2955 SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p), in scx_exit_task()
3061 struct scx_sched *sch = scx_root; in reweight_task_scx() local
3066 if (SCX_HAS_OP(sch, set_weight)) in reweight_task_scx()
3067 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq, in reweight_task_scx()
3077 struct scx_sched *sch = scx_root; in switching_to_scx() local
3085 if (SCX_HAS_OP(sch, set_cpumask)) in switching_to_scx()
3086 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, rq, in switching_to_scx()
3145 struct scx_sched *sch = scx_root; in scx_tg_online() local
3151 if (SCX_HAS_OP(sch, cgroup_init)) { in scx_tg_online()
3158 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, in scx_tg_online()
3161 ret = ops_sanitize_err(sch, "cgroup_init", ret); in scx_tg_online()
3174 struct scx_sched *sch = scx_root; in scx_tg_offline() local
3178 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) && in scx_tg_offline()
3180 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL, in scx_tg_offline()
3187 struct scx_sched *sch = scx_root; in scx_cgroup_can_attach() local
3209 if (SCX_HAS_OP(sch, cgroup_prep_move)) { in scx_cgroup_can_attach()
3210 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, in scx_cgroup_can_attach()
3224 if (SCX_HAS_OP(sch, cgroup_cancel_move) && in scx_cgroup_can_attach()
3226 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL, in scx_cgroup_can_attach()
3231 return ops_sanitize_err(sch, "cgroup_prep_move", ret); in scx_cgroup_can_attach()
3236 struct scx_sched *sch = scx_root; in scx_cgroup_move_task() local
3245 if (SCX_HAS_OP(sch, cgroup_move) && in scx_cgroup_move_task()
3247 SCX_CALL_OP_TASK(sch, SCX_KF_UNLOCKED, cgroup_move, NULL, in scx_cgroup_move_task()
3255 struct scx_sched *sch = scx_root; in scx_cgroup_cancel_attach() local
3263 if (SCX_HAS_OP(sch, cgroup_cancel_move) && in scx_cgroup_cancel_attach()
3265 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL, in scx_cgroup_cancel_attach()
3273 struct scx_sched *sch = scx_root; in scx_group_set_weight() local
3277 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) && in scx_group_set_weight()
3279 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL, in scx_group_set_weight()
3289 struct scx_sched *sch = scx_root; in scx_group_set_idle() local
3293 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_idle)) in scx_group_set_idle()
3294 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_idle, NULL, in scx_group_set_idle()
3306 struct scx_sched *sch = scx_root; in scx_group_set_bandwidth() local
3310 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) && in scx_group_set_bandwidth()
3314 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_bandwidth, NULL, in scx_group_set_bandwidth()
3412 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id) in destroy_dsq() argument
3419 dsq = find_user_dsq(sch, dsq_id); in destroy_dsq()
3426 scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)", in destroy_dsq()
3431 if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node, in destroy_dsq()
3452 static void scx_cgroup_exit(struct scx_sched *sch) in scx_cgroup_exit() argument
3469 if (!sch->ops.cgroup_exit) in scx_cgroup_exit()
3472 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL, in scx_cgroup_exit()
3477 static int scx_cgroup_init(struct scx_sched *sch) in scx_cgroup_init() argument
3499 if (!sch->ops.cgroup_init) { in scx_cgroup_init()
3504 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL, in scx_cgroup_init()
3508 scx_error(sch, "ops.cgroup_init() failed (%d)", ret); in scx_cgroup_init()
3521 static void scx_cgroup_exit(struct scx_sched *sch) {} in scx_cgroup_exit() argument
3522 static int scx_cgroup_init(struct scx_sched *sch) { return 0; } in scx_cgroup_init() argument
3589 struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work); in scx_sched_free_rcu_work() local
3594 irq_work_sync(&sch->error_irq_work); in scx_sched_free_rcu_work()
3595 kthread_destroy_worker(sch->helper); in scx_sched_free_rcu_work()
3597 free_percpu(sch->pcpu); in scx_sched_free_rcu_work()
3600 kfree(sch->global_dsqs[node]); in scx_sched_free_rcu_work()
3601 kfree(sch->global_dsqs); in scx_sched_free_rcu_work()
3603 rhashtable_walk_enter(&sch->dsq_hash, &rht_iter); in scx_sched_free_rcu_work()
3608 destroy_dsq(sch, dsq->id); in scx_sched_free_rcu_work()
3614 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL); in scx_sched_free_rcu_work()
3615 free_exit_info(sch->exit_info); in scx_sched_free_rcu_work()
3616 kfree(sch); in scx_sched_free_rcu_work()
3621 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj); in scx_kobj_release() local
3623 INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work); in scx_kobj_release()
3624 queue_rcu_work(system_unbound_wq, &sch->rcu_work); in scx_kobj_release()
3641 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj); in scx_attr_events_show() local
3645 scx_read_events(sch, &events); in scx_attr_events_show()
3696 struct scx_sched *sch; in scx_allow_ttwu_queue() local
3701 sch = rcu_dereference_sched(scx_root); in scx_allow_ttwu_queue()
3702 if (unlikely(!sch)) in scx_allow_ttwu_queue()
3705 if (sch->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP) in scx_allow_ttwu_queue()
3727 struct scx_sched *sch; in handle_lockup() local
3733 sch = rcu_dereference(scx_root); in handle_lockup()
3734 if (unlikely(!sch)) in handle_lockup()
3741 ret = scx_verror(sch, fmt, args); in handle_lockup()
3807 static u32 bypass_lb_cpu(struct scx_sched *sch, struct rq *rq, in bypass_lb_cpu() argument
3858 if (!task_can_run_on_remote_rq(sch, p, donee_rq, false)) in bypass_lb_cpu()
3873 dispatch_enqueue(sch, donee_dsq, p, SCX_ENQ_NESTED); in bypass_lb_cpu()
3903 static void bypass_lb_node(struct scx_sched *sch, int node) in bypass_lb_node() argument
3955 nr_balanced += bypass_lb_cpu(sch, rq, donee_mask, resched_mask, in bypass_lb_node()
3985 struct scx_sched *sch; in scx_bypass_lb_timerfn() local
3989 sch = rcu_dereference_all(scx_root); in scx_bypass_lb_timerfn()
3990 if (unlikely(!sch) || !READ_ONCE(scx_bypass_depth)) in scx_bypass_lb_timerfn()
3994 bypass_lb_node(sch, node); in scx_bypass_lb_timerfn()
4038 struct scx_sched *sch; in scx_bypass() local
4043 sch = rcu_dereference_bh(scx_root); in scx_bypass()
4054 if (sch) in scx_bypass()
4055 scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1); in scx_bypass()
4069 if (sch) in scx_bypass()
4070 scx_add_event(sch, SCX_EV_BYPASS_DURATION, in scx_bypass()
4199 struct scx_sched *sch = container_of(work, struct scx_sched, disable_work); in scx_disable_workfn() local
4200 struct scx_exit_info *ei = sch->exit_info; in scx_disable_workfn()
4205 kind = atomic_read(&sch->exit_kind); in scx_disable_workfn()
4210 if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE)) in scx_disable_workfn()
4226 sch->exit_info->msg); in scx_disable_workfn()
4248 scx_cgroup_exit(sch); in scx_disable_workfn()
4290 bitmap_zero(sch->has_op, SCX_OPI_END); in scx_disable_workfn()
4296 sch->ops.name, ei->reason); in scx_disable_workfn()
4299 pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg); in scx_disable_workfn()
4305 sch->ops.name, ei->reason); in scx_disable_workfn()
4308 if (sch->ops.exit) in scx_disable_workfn()
4309 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei); in scx_disable_workfn()
4326 kobject_del(&sch->kobj); in scx_disable_workfn()
4345 static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind) in scx_claim_exit() argument
4349 if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind)) in scx_claim_exit()
4363 struct scx_sched *sch; in scx_disable() local
4369 sch = rcu_dereference(scx_root); in scx_disable()
4370 if (sch) { in scx_disable()
4371 scx_claim_exit(sch, kind); in scx_disable()
4372 kthread_queue_work(sch->helper, &sch->disable_work); in scx_disable()
4492 struct scx_sched *sch = scx_root; in scx_dump_task() local
4516 if (SCX_HAS_OP(sch, dump_task)) { in scx_dump_task()
4518 SCX_CALL_OP(sch, SCX_KF_REST, dump_task, NULL, dctx, p); in scx_dump_task()
4535 struct scx_sched *sch = scx_root; in scx_dump_state() local
4564 if (SCX_HAS_OP(sch, dump)) { in scx_dump_state()
4566 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, dump, NULL, &dctx); in scx_dump_state()
4587 if (idle && !SCX_HAS_OP(sch, dump_cpu)) in scx_dump_state()
4621 if (SCX_HAS_OP(sch, dump_cpu)) { in scx_dump_state()
4623 SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL, in scx_dump_state()
4658 scx_read_events(sch, &events); in scx_dump_state()
4678 struct scx_sched *sch = container_of(irq_work, struct scx_sched, error_irq_work); in scx_error_irq_workfn() local
4679 struct scx_exit_info *ei = sch->exit_info; in scx_error_irq_workfn()
4682 scx_dump_state(ei, sch->ops.exit_dump_len); in scx_error_irq_workfn()
4684 kthread_queue_work(sch->helper, &sch->disable_work); in scx_error_irq_workfn()
4687 static bool scx_vexit(struct scx_sched *sch, in scx_vexit() argument
4691 struct scx_exit_info *ei = sch->exit_info; in scx_vexit()
4693 if (!scx_claim_exit(sch, kind)) in scx_vexit()
4710 irq_work_queue(&sch->error_irq_work); in scx_vexit()
4743 struct scx_sched *sch; in scx_alloc_and_add_sched() local
4746 sch = kzalloc(sizeof(*sch), GFP_KERNEL); in scx_alloc_and_add_sched()
4747 if (!sch) in scx_alloc_and_add_sched()
4750 sch->exit_info = alloc_exit_info(ops->exit_dump_len); in scx_alloc_and_add_sched()
4751 if (!sch->exit_info) { in scx_alloc_and_add_sched()
4756 ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params); in scx_alloc_and_add_sched()
4760 sch->global_dsqs = kcalloc(nr_node_ids, sizeof(sch->global_dsqs[0]), in scx_alloc_and_add_sched()
4762 if (!sch->global_dsqs) { in scx_alloc_and_add_sched()
4777 sch->global_dsqs[node] = dsq; in scx_alloc_and_add_sched()
4780 sch->pcpu = alloc_percpu(struct scx_sched_pcpu); in scx_alloc_and_add_sched()
4781 if (!sch->pcpu) { in scx_alloc_and_add_sched()
4786 sch->helper = kthread_run_worker(0, "sched_ext_helper"); in scx_alloc_and_add_sched()
4787 if (IS_ERR(sch->helper)) { in scx_alloc_and_add_sched()
4788 ret = PTR_ERR(sch->helper); in scx_alloc_and_add_sched()
4792 sched_set_fifo(sch->helper->task); in scx_alloc_and_add_sched()
4794 atomic_set(&sch->exit_kind, SCX_EXIT_NONE); in scx_alloc_and_add_sched()
4795 init_irq_work(&sch->error_irq_work, scx_error_irq_workfn); in scx_alloc_and_add_sched()
4796 kthread_init_work(&sch->disable_work, scx_disable_workfn); in scx_alloc_and_add_sched()
4797 sch->ops = *ops; in scx_alloc_and_add_sched()
4798 ops->priv = sch; in scx_alloc_and_add_sched()
4800 sch->kobj.kset = scx_kset; in scx_alloc_and_add_sched()
4801 ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root"); in scx_alloc_and_add_sched()
4805 return sch; in scx_alloc_and_add_sched()
4808 kthread_destroy_worker(sch->helper); in scx_alloc_and_add_sched()
4810 free_percpu(sch->pcpu); in scx_alloc_and_add_sched()
4813 kfree(sch->global_dsqs[node]); in scx_alloc_and_add_sched()
4814 kfree(sch->global_dsqs); in scx_alloc_and_add_sched()
4816 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL); in scx_alloc_and_add_sched()
4818 free_exit_info(sch->exit_info); in scx_alloc_and_add_sched()
4820 kfree(sch); in scx_alloc_and_add_sched()
4824 static int check_hotplug_seq(struct scx_sched *sch, in check_hotplug_seq() argument
4837 scx_exit(sch, SCX_EXIT_UNREG_KERN, in check_hotplug_seq()
4848 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops) in validate_ops() argument
4855 scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented"); in validate_ops()
4865 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled"); in validate_ops()
4880 struct scx_sched *sch; in scx_enable() local
4903 sch = scx_alloc_and_add_sched(ops); in scx_enable()
4904 if (IS_ERR(sch)) { in scx_enable()
4905 ret = PTR_ERR(sch); in scx_enable()
4933 rcu_assign_pointer(scx_root, sch); in scx_enable()
4937 if (sch->ops.init) { in scx_enable()
4938 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init, NULL); in scx_enable()
4940 ret = ops_sanitize_err(sch, "init", ret); in scx_enable()
4942 scx_error(sch, "ops.init() failed (%d)", ret); in scx_enable()
4945 sch->exit_info->flags |= SCX_EFLAG_INITIALIZED; in scx_enable()
4950 set_bit(i, sch->has_op); in scx_enable()
4952 ret = check_hotplug_seq(sch, ops); in scx_enable()
4961 ret = validate_ops(sch, ops); in scx_enable()
4996 set_bit(i, sch->has_op); in scx_enable()
4998 if (sch->ops.cpu_acquire || sch->ops.cpu_release) in scx_enable()
4999 sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT; in scx_enable()
5024 ret = scx_cgroup_init(sch); in scx_enable()
5044 scx_error(sch, "ops.init_task() failed (%d) for %s[%d]", in scx_enable()
5094 WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE); in scx_enable()
5102 sch->ops.name, scx_switched_all() ? "" : " (partial)"); in scx_enable()
5103 kobject_uevent(&sch->kobj, KOBJ_ADD); in scx_enable()
5131 scx_error(sch, "scx_enable() failed (%d)", ret); in scx_enable()
5132 kthread_flush_work(&sch->disable_work); in scx_enable()
5269 struct scx_sched *sch = ops->priv; in bpf_scx_unreg() local
5272 kthread_flush_work(&sch->disable_work); in bpf_scx_unreg()
5273 kobject_put(&sch->kobj); in bpf_scx_unreg()
5565 struct scx_sched *sch = scx_root; in print_scx_info() local
5581 printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name, in print_scx_info()
5593 log_lvl, sch->ops.name, scx_enable_state_str[state], all, in print_scx_info()
5668 static bool scx_dsq_insert_preamble(struct scx_sched *sch, struct task_struct *p, in scx_dsq_insert_preamble() argument
5671 if (!scx_kf_allowed(sch, SCX_KF_ENQUEUE | SCX_KF_DISPATCH)) in scx_dsq_insert_preamble()
5677 scx_error(sch, "called with NULL task"); in scx_dsq_insert_preamble()
5682 scx_error(sch, "invalid enq_flags 0x%llx", enq_flags); in scx_dsq_insert_preamble()
5689 static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p, in scx_dsq_insert_commit() argument
5697 mark_direct_dispatch(sch, ddsp_task, p, dsq_id, enq_flags); in scx_dsq_insert_commit()
5702 scx_error(sch, "dispatch buffer overflow"); in scx_dsq_insert_commit()
5758 struct scx_sched *sch; in scx_bpf_dsq_insert___v2() local
5761 sch = rcu_dereference(scx_root); in scx_bpf_dsq_insert___v2()
5762 if (unlikely(!sch)) in scx_bpf_dsq_insert___v2()
5765 if (!scx_dsq_insert_preamble(sch, p, enq_flags)) in scx_bpf_dsq_insert___v2()
5773 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags); in scx_bpf_dsq_insert___v2()
5787 static bool scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p, in scx_dsq_insert_vtime() argument
5790 if (!scx_dsq_insert_preamble(sch, p, enq_flags)) in scx_dsq_insert_vtime()
5800 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); in scx_dsq_insert_vtime()
5847 struct scx_sched *sch; in __scx_bpf_dsq_insert_vtime() local
5851 sch = rcu_dereference(scx_root); in __scx_bpf_dsq_insert_vtime()
5852 if (unlikely(!sch)) in __scx_bpf_dsq_insert_vtime()
5855 return scx_dsq_insert_vtime(sch, p, args->dsq_id, args->slice, in __scx_bpf_dsq_insert_vtime()
5865 struct scx_sched *sch; in scx_bpf_dsq_insert_vtime() local
5869 sch = rcu_dereference(scx_root); in scx_bpf_dsq_insert_vtime()
5870 if (unlikely(!sch)) in scx_bpf_dsq_insert_vtime()
5873 scx_dsq_insert_vtime(sch, p, dsq_id, slice, vtime, enq_flags); in scx_bpf_dsq_insert_vtime()
5893 struct scx_sched *sch = scx_root; in scx_dsq_move() local
5901 !scx_kf_allowed(sch, SCX_KF_DISPATCH)) in scx_dsq_move()
5947 dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p); in scx_dsq_move()
5960 locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq); in scx_dsq_move()
5986 struct scx_sched *sch; in scx_bpf_dispatch_nr_slots() local
5990 sch = rcu_dereference(scx_root); in scx_bpf_dispatch_nr_slots()
5991 if (unlikely(!sch)) in scx_bpf_dispatch_nr_slots()
5994 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) in scx_bpf_dispatch_nr_slots()
6009 struct scx_sched *sch; in scx_bpf_dispatch_cancel() local
6013 sch = rcu_dereference(scx_root); in scx_bpf_dispatch_cancel()
6014 if (unlikely(!sch)) in scx_bpf_dispatch_cancel()
6017 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) in scx_bpf_dispatch_cancel()
6023 scx_error(sch, "dispatch buffer underflow"); in scx_bpf_dispatch_cancel()
6044 struct scx_sched *sch; in scx_bpf_dsq_move_to_local() local
6048 sch = rcu_dereference(scx_root); in scx_bpf_dsq_move_to_local()
6049 if (unlikely(!sch)) in scx_bpf_dsq_move_to_local()
6052 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) in scx_bpf_dsq_move_to_local()
6055 flush_dispatch_buf(sch, dspc->rq); in scx_bpf_dsq_move_to_local()
6057 dsq = find_user_dsq(sch, dsq_id); in scx_bpf_dsq_move_to_local()
6059 scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id); in scx_bpf_dsq_move_to_local()
6063 if (consume_dispatch_q(sch, dspc->rq, dsq)) { in scx_bpf_dsq_move_to_local()
6245 struct scx_sched *sch; in scx_bpf_reenqueue_local() local
6249 sch = rcu_dereference(scx_root); in scx_bpf_reenqueue_local()
6250 if (unlikely(!sch)) in scx_bpf_reenqueue_local()
6253 if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE)) in scx_bpf_reenqueue_local()
6286 struct scx_sched *sch; in scx_bpf_create_dsq() local
6304 sch = rcu_dereference(scx_root); in scx_bpf_create_dsq()
6305 if (sch) in scx_bpf_create_dsq()
6306 ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node, in scx_bpf_create_dsq()
6362 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags) in scx_kick_cpu() argument
6367 if (!ops_cpu_valid(sch, cpu, NULL)) in scx_kick_cpu()
6391 scx_error(sch, "PREEMPT/WAIT cannot be used with SCX_KICK_IDLE"); in scx_kick_cpu()
6427 struct scx_sched *sch; in scx_bpf_kick_cpu() local
6430 sch = rcu_dereference(scx_root); in scx_bpf_kick_cpu()
6431 if (likely(sch)) in scx_bpf_kick_cpu()
6432 scx_kick_cpu(sch, cpu, flags); in scx_bpf_kick_cpu()
6444 struct scx_sched *sch; in scx_bpf_dsq_nr_queued() local
6450 sch = rcu_dereference_sched(scx_root); in scx_bpf_dsq_nr_queued()
6451 if (unlikely(!sch)) { in scx_bpf_dsq_nr_queued()
6462 if (ops_cpu_valid(sch, cpu, NULL)) { in scx_bpf_dsq_nr_queued()
6467 dsq = find_user_dsq(sch, dsq_id); in scx_bpf_dsq_nr_queued()
6490 struct scx_sched *sch; in scx_bpf_destroy_dsq() local
6493 sch = rcu_dereference(scx_root); in scx_bpf_destroy_dsq()
6494 if (sch) in scx_bpf_destroy_dsq()
6495 destroy_dsq(sch, dsq_id); in scx_bpf_destroy_dsq()
6513 struct scx_sched *sch; in bpf_iter_scx_dsq_new() local
6528 sch = rcu_dereference_check(scx_root, rcu_read_lock_bh_held()); in bpf_iter_scx_dsq_new()
6529 if (unlikely(!sch)) in bpf_iter_scx_dsq_new()
6535 kit->dsq = find_user_dsq(sch, dsq_id); in bpf_iter_scx_dsq_new()
6627 struct scx_sched *sch; in scx_bpf_dsq_peek() local
6630 sch = rcu_dereference(scx_root); in scx_bpf_dsq_peek()
6631 if (unlikely(!sch)) in scx_bpf_dsq_peek()
6635 scx_error(sch, "peek disallowed on builtin DSQ 0x%llx", dsq_id); in scx_bpf_dsq_peek()
6639 dsq = find_user_dsq(sch, dsq_id); in scx_bpf_dsq_peek()
6641 scx_error(sch, "peek on non-existent DSQ 0x%llx", dsq_id); in scx_bpf_dsq_peek()
6650 static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf, in __bstr_format() argument
6659 scx_error(sch, "invalid data=%p and data__sz=%u", (void *)data, data__sz); in __bstr_format()
6665 scx_error(sch, "failed to read data fields (%d)", ret); in __bstr_format()
6672 scx_error(sch, "format preparation failed (%d)", ret); in __bstr_format()
6680 scx_error(sch, "(\"%s\", %p, %u) failed to format", fmt, data, data__sz); in __bstr_format()
6687 static s32 bstr_format(struct scx_sched *sch, struct scx_bstr_buf *buf, in bstr_format() argument
6690 return __bstr_format(sch, buf->data, buf->line, sizeof(buf->line), in bstr_format()
6709 struct scx_sched *sch; in scx_bpf_exit_bstr() local
6713 sch = rcu_dereference_bh(scx_root); in scx_bpf_exit_bstr()
6714 if (likely(sch) && in scx_bpf_exit_bstr()
6715 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0) in scx_bpf_exit_bstr()
6716 scx_exit(sch, SCX_EXIT_UNREG_BPF, exit_code, "%s", scx_exit_bstr_buf.line); in scx_bpf_exit_bstr()
6732 struct scx_sched *sch; in scx_bpf_error_bstr() local
6736 sch = rcu_dereference_bh(scx_root); in scx_bpf_error_bstr()
6737 if (likely(sch) && in scx_bpf_error_bstr()
6738 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0) in scx_bpf_error_bstr()
6739 scx_exit(sch, SCX_EXIT_ERROR_BPF, 0, "%s", scx_exit_bstr_buf.line); in scx_bpf_error_bstr()
6758 struct scx_sched *sch; in scx_bpf_dump_bstr() local
6765 sch = rcu_dereference(scx_root); in scx_bpf_dump_bstr()
6766 if (unlikely(!sch)) in scx_bpf_dump_bstr()
6770 scx_error(sch, "scx_bpf_dump() must only be called from ops.dump() and friends"); in scx_bpf_dump_bstr()
6775 ret = __bstr_format(sch, buf->data, buf->line + dd->cursor, in scx_bpf_dump_bstr()
6829 struct scx_sched *sch; in scx_bpf_cpuperf_cap() local
6833 sch = rcu_dereference(scx_root); in scx_bpf_cpuperf_cap()
6834 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL)) in scx_bpf_cpuperf_cap()
6856 struct scx_sched *sch; in scx_bpf_cpuperf_cur() local
6860 sch = rcu_dereference(scx_root); in scx_bpf_cpuperf_cur()
6861 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL)) in scx_bpf_cpuperf_cur()
6883 struct scx_sched *sch; in scx_bpf_cpuperf_set() local
6887 sch = rcu_dereference(scx_root); in scx_bpf_cpuperf_set()
6888 if (unlikely(!sch)) in scx_bpf_cpuperf_set()
6892 scx_error(sch, "Invalid cpuperf target %u for CPU %d", perf, cpu); in scx_bpf_cpuperf_set()
6896 if (ops_cpu_valid(sch, cpu, NULL)) { in scx_bpf_cpuperf_set()
6905 scx_error(sch, "Invalid target CPU %d", cpu); in scx_bpf_cpuperf_set()
7000 struct scx_sched *sch; in scx_bpf_cpu_rq() local
7004 sch = rcu_dereference(scx_root); in scx_bpf_cpu_rq()
7005 if (unlikely(!sch)) in scx_bpf_cpu_rq()
7008 if (!ops_cpu_valid(sch, cpu, NULL)) in scx_bpf_cpu_rq()
7011 if (!sch->warned_deprecated_rq) { in scx_bpf_cpu_rq()
7015 sch->warned_deprecated_rq = true; in scx_bpf_cpu_rq()
7029 struct scx_sched *sch; in scx_bpf_locked_rq() local
7034 sch = rcu_dereference_sched(scx_root); in scx_bpf_locked_rq()
7035 if (unlikely(!sch)) in scx_bpf_locked_rq()
7040 scx_error(sch, "accessing rq without holding rq lock"); in scx_bpf_locked_rq()
7055 struct scx_sched *sch; in scx_bpf_cpu_curr() local
7059 sch = rcu_dereference(scx_root); in scx_bpf_cpu_curr()
7060 if (unlikely(!sch)) in scx_bpf_cpu_curr()
7063 if (!ops_cpu_valid(sch, cpu, NULL)) in scx_bpf_cpu_curr()
7085 struct scx_sched *sch; in scx_bpf_task_cgroup() local
7089 sch = rcu_dereference(scx_root); in scx_bpf_task_cgroup()
7090 if (unlikely(!sch)) in scx_bpf_task_cgroup()
7093 if (!scx_kf_allowed_on_arg_tasks(sch, __SCX_KF_RQ_LOCKED, p)) in scx_bpf_task_cgroup()
7166 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events) in scx_read_events() argument
7174 e_cpu = &per_cpu_ptr(sch->pcpu, cpu)->event_stats; in scx_read_events()
7195 struct scx_sched *sch; in scx_bpf_events() local
7199 sch = rcu_dereference(scx_root); in scx_bpf_events()
7200 if (sch) in scx_bpf_events()
7201 scx_read_events(sch, &e_sys); in scx_bpf_events()