Lines Matching full:sch
150 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags);
151 static void scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,
154 static __printf(4, 5) void scx_exit(struct scx_sched *sch, in scx_exit() argument
161 scx_vexit(sch, kind, exit_code, fmt, args); in scx_exit()
165 #define scx_error(sch, fmt, args...) scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args) argument
167 #define SCX_HAS_OP(sch, op) test_bit(SCX_OP_IDX(op), (sch)->has_op) argument
195 static struct scx_dispatch_q *find_global_dsq(struct scx_sched *sch, in find_global_dsq() argument
198 return sch->global_dsqs[cpu_to_node(task_cpu(p))]; in find_global_dsq()
201 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id) in find_user_dsq() argument
203 return rhashtable_lookup_fast(&sch->dsq_hash, &dsq_id, dsq_hash_params); in find_user_dsq()
250 #define SCX_CALL_OP(sch, mask, op, rq, args...) \ argument
256 (sch)->ops.op(args); \
259 (sch)->ops.op(args); \
265 #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \ argument
267 __typeof__((sch)->ops.op(args)) __ret; \
273 __ret = (sch)->ops.op(args); \
276 __ret = (sch)->ops.op(args); \
294 #define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...) \ argument
298 SCX_CALL_OP((sch), mask, op, rq, task, ##args); \
302 #define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...) \ argument
304 __typeof__((sch)->ops.op(task, ##args)) __ret; \
307 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args); \
312 #define SCX_CALL_OP_2TASKS_RET(sch, mask, op, rq, task0, task1, args...) \ argument
314 __typeof__((sch)->ops.op(task0, task1, ##args)) __ret; \
318 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task0, task1, ##args); \
325 static __always_inline bool scx_kf_allowed(struct scx_sched *sch, u32 mask) in scx_kf_allowed() argument
328 scx_error(sch, "kfunc with mask 0x%x called from an operation only allowing 0x%x", in scx_kf_allowed()
342 scx_error(sch, "cpu_release kfunc called from a nested operation"); in scx_kf_allowed()
348 scx_error(sch, "dispatch kfunc called from a nested operation"); in scx_kf_allowed()
356 static __always_inline bool scx_kf_allowed_on_arg_tasks(struct scx_sched *sch, in scx_kf_allowed_on_arg_tasks() argument
360 if (!scx_kf_allowed(sch, mask)) in scx_kf_allowed_on_arg_tasks()
365 scx_error(sch, "called on a task not being operated on"); in scx_kf_allowed_on_arg_tasks()
625 * @sch: scx_sched to account events for
631 #define scx_add_event(sch, name, cnt) do { \ argument
632 this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \
638 * @sch: scx_sched to account events for
644 #define __scx_add_event(sch, name, cnt) do { \ argument
645 __this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \
670 static void scx_read_events(struct scx_sched *sch,
715 * @sch: scx_sched to abort on error
723 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where) in ops_cpu_valid() argument
728 scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: ""); in ops_cpu_valid()
735 * @sch: scx_sched to error out on error
746 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err) in ops_sanitize_err() argument
751 scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err); in ops_sanitize_err()
903 static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p) in refill_task_slice_dfl() argument
906 __scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1); in refill_task_slice_dfl()
909 static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq, in dispatch_enqueue() argument
921 scx_error(sch, "attempting to dispatch to a destroyed dsq"); in dispatch_enqueue()
924 dsq = find_global_dsq(sch, p); in dispatch_enqueue()
938 scx_error(sch, "cannot use vtime ordering for built-in DSQs"); in dispatch_enqueue()
952 scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks", in dispatch_enqueue()
974 scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks", in dispatch_enqueue()
1090 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch, in find_dsq_for_dispatch() argument
1102 if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict")) in find_dsq_for_dispatch()
1103 return find_global_dsq(sch, p); in find_dsq_for_dispatch()
1109 dsq = find_global_dsq(sch, p); in find_dsq_for_dispatch()
1111 dsq = find_user_dsq(sch, dsq_id); in find_dsq_for_dispatch()
1114 scx_error(sch, "non-existent DSQ 0x%llx for %s[%d]", in find_dsq_for_dispatch()
1116 return find_global_dsq(sch, p); in find_dsq_for_dispatch()
1122 static void mark_direct_dispatch(struct scx_sched *sch, in mark_direct_dispatch() argument
1137 scx_error(sch, "%s[%d] already direct-dispatched", in mark_direct_dispatch()
1140 scx_error(sch, "scheduling for %s[%d] but trying to direct-dispatch %s[%d]", in mark_direct_dispatch()
1153 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p, in direct_dispatch() argument
1158 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); in direct_dispatch()
1199 dispatch_enqueue(sch, dsq, p, in direct_dispatch()
1218 struct scx_sched *sch = scx_root; in do_enqueue_task() local
1237 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1); in do_enqueue_task()
1245 if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) && in do_enqueue_task()
1247 __scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1); in do_enqueue_task()
1252 if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) && in do_enqueue_task()
1254 __scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1); in do_enqueue_task()
1258 if (unlikely(!SCX_HAS_OP(sch, enqueue))) in do_enqueue_task()
1271 SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags); in do_enqueue_task()
1285 direct_dispatch(sch, p, enq_flags); in do_enqueue_task()
1295 refill_task_slice_dfl(sch, p); in do_enqueue_task()
1297 dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags); in do_enqueue_task()
1302 refill_task_slice_dfl(sch, p); in do_enqueue_task()
1303 dispatch_enqueue(sch, find_global_dsq(sch, p), p, enq_flags); in do_enqueue_task()
1336 struct scx_sched *sch = scx_root; in enqueue_task_scx() local
1366 if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p)) in enqueue_task_scx()
1367 SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags); in enqueue_task_scx()
1378 __scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1); in enqueue_task_scx()
1383 struct scx_sched *sch = scx_root; in ops_dequeue() local
1402 if (SCX_HAS_OP(sch, dequeue)) in ops_dequeue()
1403 SCX_CALL_OP_TASK(sch, SCX_KF_REST, dequeue, rq, in ops_dequeue()
1432 struct scx_sched *sch = scx_root; in dequeue_task_scx() local
1453 if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) { in dequeue_task_scx()
1455 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false); in dequeue_task_scx()
1458 if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p)) in dequeue_task_scx()
1459 SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags); in dequeue_task_scx()
1476 struct scx_sched *sch = scx_root; in yield_task_scx() local
1479 if (SCX_HAS_OP(sch, yield)) in yield_task_scx()
1480 SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL); in yield_task_scx()
1487 struct scx_sched *sch = scx_root; in yield_to_task_scx() local
1490 if (SCX_HAS_OP(sch, yield)) in yield_to_task_scx()
1491 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, in yield_to_task_scx()
1571 static bool task_can_run_on_remote_rq(struct scx_sched *sch, in task_can_run_on_remote_rq() argument
1593 scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d", in task_can_run_on_remote_rq()
1606 scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]", in task_can_run_on_remote_rq()
1613 __scx_add_event(sch, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1); in task_can_run_on_remote_rq()
1686 * @sch: scx_sched being operated on
1700 static struct rq *move_task_between_dsqs(struct scx_sched *sch, in move_task_between_dsqs() argument
1714 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) { in move_task_between_dsqs()
1715 dst_dsq = find_global_dsq(sch, p); in move_task_between_dsqs()
1748 dispatch_enqueue(sch, dst_dsq, p, enq_flags); in move_task_between_dsqs()
1783 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq, in consume_dispatch_q() argument
1816 if (task_can_run_on_remote_rq(sch, p, rq, false)) { in consume_dispatch_q()
1827 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq) in consume_global_dsq() argument
1831 return consume_dispatch_q(sch, rq, sch->global_dsqs[node]); in consume_global_dsq()
1836 * @sch: scx_sched being operated on
1849 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq, in dispatch_to_local_dsq() argument
1864 dispatch_enqueue(sch, dst_dsq, p, in dispatch_to_local_dsq()
1870 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) { in dispatch_to_local_dsq()
1871 dispatch_enqueue(sch, find_global_dsq(sch, p), p, in dispatch_to_local_dsq()
1909 dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p, in dispatch_to_local_dsq()
1949 static void finish_dispatch(struct scx_sched *sch, struct rq *rq, in finish_dispatch() argument
2003 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p); in finish_dispatch()
2006 dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags); in finish_dispatch()
2008 dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); in finish_dispatch()
2011 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq) in flush_dispatch_buf() argument
2019 finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id, in flush_dispatch_buf()
2042 struct scx_sched *sch = scx_root; in balance_one() local
2052 if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) && in balance_one()
2060 if (SCX_HAS_OP(sch, cpu_acquire)) in balance_one()
2061 SCX_CALL_OP(sch, SCX_KF_REST, cpu_acquire, rq, in balance_one()
2089 if (consume_global_dsq(sch, rq)) in balance_one()
2092 if (unlikely(!SCX_HAS_OP(sch, dispatch)) || in balance_one()
2108 SCX_CALL_OP(sch, SCX_KF_DISPATCH, dispatch, rq, in balance_one()
2111 flush_dispatch_buf(sch, rq); in balance_one()
2119 if (consume_global_dsq(sch, rq)) in balance_one()
2132 scx_kick_cpu(sch, cpu_of(rq), 0); in balance_one()
2143 (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) { in balance_one()
2145 __scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1); in balance_one()
2207 struct scx_sched *sch = scx_root; in process_ddsp_deferred_locals() local
2212 dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); in process_ddsp_deferred_locals()
2214 dispatch_to_local_dsq(sch, rq, dsq, p, in process_ddsp_deferred_locals()
2221 struct scx_sched *sch = scx_root; in set_next_task_scx() local
2235 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED)) in set_next_task_scx()
2236 SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p); in set_next_task_scx()
2277 struct scx_sched *sch = scx_root; in switch_class() local
2286 if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT)) in switch_class()
2308 if (SCX_HAS_OP(sch, cpu_release)) { in switch_class()
2314 SCX_CALL_OP(sch, SCX_KF_CPU_RELEASE, cpu_release, rq, in switch_class()
2324 struct scx_sched *sch = scx_root; in put_prev_task_scx() local
2328 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED)) in put_prev_task_scx()
2329 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true); in put_prev_task_scx()
2341 dispatch_enqueue(sch, &rq->scx.local_dsq, p, in put_prev_task_scx()
2353 WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST)); in put_prev_task_scx()
2429 struct scx_sched *sch = rcu_dereference_sched(scx_root); in pick_task_scx() local
2431 if (!scx_rq_bypassing(rq) && !sch->warned_zero_slice) { in pick_task_scx()
2434 sch->warned_zero_slice = true; in pick_task_scx()
2436 refill_task_slice_dfl(sch, p); in pick_task_scx()
2465 struct scx_sched *sch = scx_root; in scx_prio_less() local
2472 if (SCX_HAS_OP(sch, core_sched_before) && in scx_prio_less()
2474 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, core_sched_before, in scx_prio_less()
2485 struct scx_sched *sch = scx_root; in select_task_rq_scx() local
2502 if (likely(SCX_HAS_OP(sch, select_cpu)) && !rq_bypass) { in select_task_rq_scx()
2510 cpu = SCX_CALL_OP_TASK_RET(sch, in select_task_rq_scx()
2516 if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()")) in select_task_rq_scx()
2525 refill_task_slice_dfl(sch, p); in select_task_rq_scx()
2533 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1); in select_task_rq_scx()
2546 struct scx_sched *sch = scx_root; in set_cpus_allowed_scx() local
2558 if (SCX_HAS_OP(sch, set_cpumask)) in set_cpus_allowed_scx()
2559 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, NULL, in set_cpus_allowed_scx()
2565 struct scx_sched *sch = scx_root; in handle_hotplug() local
2575 if (unlikely(!sch)) in handle_hotplug()
2579 scx_idle_update_selcpu_topology(&sch->ops); in handle_hotplug()
2581 if (online && SCX_HAS_OP(sch, cpu_online)) in handle_hotplug()
2582 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_online, NULL, cpu); in handle_hotplug()
2583 else if (!online && SCX_HAS_OP(sch, cpu_offline)) in handle_hotplug()
2584 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_offline, NULL, cpu); in handle_hotplug()
2586 scx_exit(sch, SCX_EXIT_UNREG_KERN, in handle_hotplug()
2615 struct scx_sched *sch; in check_rq_for_timeouts() local
2621 sch = rcu_dereference_bh(scx_root); in check_rq_for_timeouts()
2622 if (unlikely(!sch)) in check_rq_for_timeouts()
2632 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0, in check_rq_for_timeouts()
2662 struct scx_sched *sch; in scx_tick() local
2668 sch = rcu_dereference_bh(scx_root); in scx_tick()
2669 if (unlikely(!sch)) in scx_tick()
2677 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0, in scx_tick()
2687 struct scx_sched *sch = scx_root; in task_tick_scx() local
2698 } else if (SCX_HAS_OP(sch, tick)) { in task_tick_scx()
2699 SCX_CALL_OP_TASK(sch, SCX_KF_REST, tick, rq, curr); in task_tick_scx()
2766 struct scx_sched *sch = scx_root; in scx_init_task() local
2771 if (SCX_HAS_OP(sch, init_task)) { in scx_init_task()
2777 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init_task, NULL, in scx_init_task()
2780 ret = ops_sanitize_err(sch, "init_task", ret); in scx_init_task()
2808 scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork", in scx_init_task()
2819 struct scx_sched *sch = scx_root; in scx_enable_task() local
2836 if (SCX_HAS_OP(sch, enable)) in scx_enable_task()
2837 SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p); in scx_enable_task()
2840 if (SCX_HAS_OP(sch, set_weight)) in scx_enable_task()
2841 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq, in scx_enable_task()
2847 struct scx_sched *sch = scx_root; in scx_disable_task() local
2853 if (SCX_HAS_OP(sch, disable)) in scx_disable_task()
2854 SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p); in scx_disable_task()
2860 struct scx_sched *sch = scx_root; in scx_exit_task() local
2883 if (SCX_HAS_OP(sch, exit_task)) in scx_exit_task()
2884 SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p), in scx_exit_task()
2990 struct scx_sched *sch = scx_root; in reweight_task_scx() local
2995 if (SCX_HAS_OP(sch, set_weight)) in reweight_task_scx()
2996 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq, in reweight_task_scx()
3006 struct scx_sched *sch = scx_root; in switching_to_scx() local
3014 if (SCX_HAS_OP(sch, set_cpumask)) in switching_to_scx()
3015 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, rq, in switching_to_scx()
3073 struct scx_sched *sch = scx_root; in scx_tg_online() local
3079 if (SCX_HAS_OP(sch, cgroup_init)) { in scx_tg_online()
3086 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, in scx_tg_online()
3089 ret = ops_sanitize_err(sch, "cgroup_init", ret); in scx_tg_online()
3102 struct scx_sched *sch = scx_root; in scx_tg_offline() local
3106 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) && in scx_tg_offline()
3108 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL, in scx_tg_offline()
3115 struct scx_sched *sch = scx_root; in scx_cgroup_can_attach() local
3137 if (SCX_HAS_OP(sch, cgroup_prep_move)) { in scx_cgroup_can_attach()
3138 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, in scx_cgroup_can_attach()
3152 if (SCX_HAS_OP(sch, cgroup_cancel_move) && in scx_cgroup_can_attach()
3154 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL, in scx_cgroup_can_attach()
3159 return ops_sanitize_err(sch, "cgroup_prep_move", ret); in scx_cgroup_can_attach()
3164 struct scx_sched *sch = scx_root; in scx_cgroup_move_task() local
3173 if (SCX_HAS_OP(sch, cgroup_move) && in scx_cgroup_move_task()
3175 SCX_CALL_OP_TASK(sch, SCX_KF_UNLOCKED, cgroup_move, NULL, in scx_cgroup_move_task()
3183 struct scx_sched *sch = scx_root; in scx_cgroup_cancel_attach() local
3191 if (SCX_HAS_OP(sch, cgroup_cancel_move) && in scx_cgroup_cancel_attach()
3193 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL, in scx_cgroup_cancel_attach()
3201 struct scx_sched *sch = scx_root; in scx_group_set_weight() local
3205 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) && in scx_group_set_weight()
3207 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL, in scx_group_set_weight()
3223 struct scx_sched *sch = scx_root; in scx_group_set_bandwidth() local
3227 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) && in scx_group_set_bandwidth()
3231 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_bandwidth, NULL, in scx_group_set_bandwidth()
3328 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id) in destroy_dsq() argument
3335 dsq = find_user_dsq(sch, dsq_id); in destroy_dsq()
3342 scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)", in destroy_dsq()
3347 if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node, in destroy_dsq()
3368 static void scx_cgroup_exit(struct scx_sched *sch) in scx_cgroup_exit() argument
3385 if (!sch->ops.cgroup_exit) in scx_cgroup_exit()
3388 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL, in scx_cgroup_exit()
3393 static int scx_cgroup_init(struct scx_sched *sch) in scx_cgroup_init() argument
3415 if (!sch->ops.cgroup_init) { in scx_cgroup_init()
3420 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL, in scx_cgroup_init()
3424 scx_error(sch, "ops.cgroup_init() failed (%d)", ret); in scx_cgroup_init()
3437 static void scx_cgroup_exit(struct scx_sched *sch) {} in scx_cgroup_exit() argument
3438 static int scx_cgroup_init(struct scx_sched *sch) { return 0; } in scx_cgroup_init() argument
3505 struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work); in scx_sched_free_rcu_work() local
3510 irq_work_sync(&sch->error_irq_work); in scx_sched_free_rcu_work()
3511 kthread_stop(sch->helper->task); in scx_sched_free_rcu_work()
3513 free_percpu(sch->pcpu); in scx_sched_free_rcu_work()
3516 kfree(sch->global_dsqs[node]); in scx_sched_free_rcu_work()
3517 kfree(sch->global_dsqs); in scx_sched_free_rcu_work()
3519 rhashtable_walk_enter(&sch->dsq_hash, &rht_iter); in scx_sched_free_rcu_work()
3524 destroy_dsq(sch, dsq->id); in scx_sched_free_rcu_work()
3530 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL); in scx_sched_free_rcu_work()
3531 free_exit_info(sch->exit_info); in scx_sched_free_rcu_work()
3532 kfree(sch); in scx_sched_free_rcu_work()
3537 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj); in scx_kobj_release() local
3539 INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work); in scx_kobj_release()
3540 queue_rcu_work(system_unbound_wq, &sch->rcu_work); in scx_kobj_release()
3557 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj); in scx_attr_events_show() local
3561 scx_read_events(sch, &events); in scx_attr_events_show()
3612 struct scx_sched *sch; in scx_allow_ttwu_queue() local
3617 sch = rcu_dereference_sched(scx_root); in scx_allow_ttwu_queue()
3618 if (unlikely(!sch)) in scx_allow_ttwu_queue()
3621 if (sch->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP) in scx_allow_ttwu_queue()
3640 struct scx_sched *sch; in scx_rcu_cpu_stall() local
3644 sch = rcu_dereference(scx_root); in scx_rcu_cpu_stall()
3645 if (unlikely(!sch)) { in scx_rcu_cpu_stall()
3659 scx_error(sch, "RCU CPU stall detected!"); in scx_rcu_cpu_stall()
3677 struct scx_sched *sch; in scx_softlockup() local
3681 sch = rcu_dereference(scx_root); in scx_softlockup()
3682 if (unlikely(!sch)) in scx_softlockup()
3706 scx_error(sch, "soft lockup - CPU#%d stuck for %us", smp_processor_id(), dur_s); in scx_softlockup()
3752 struct scx_sched *sch; in scx_bypass() local
3757 sch = rcu_dereference_bh(scx_root); in scx_bypass()
3765 if (sch) in scx_bypass()
3766 scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1); in scx_bypass()
3772 if (sch) in scx_bypass()
3773 scx_add_event(sch, SCX_EV_BYPASS_DURATION, in scx_bypass()
3914 struct scx_sched *sch = container_of(work, struct scx_sched, disable_work); in scx_disable_workfn() local
3915 struct scx_exit_info *ei = sch->exit_info; in scx_disable_workfn()
3920 kind = atomic_read(&sch->exit_kind); in scx_disable_workfn()
3925 if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE)) in scx_disable_workfn()
3940 sch->exit_info->msg); in scx_disable_workfn()
3962 scx_cgroup_exit(sch); in scx_disable_workfn()
4007 bitmap_zero(sch->has_op, SCX_OPI_END); in scx_disable_workfn()
4013 sch->ops.name, ei->reason); in scx_disable_workfn()
4016 pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg); in scx_disable_workfn()
4022 sch->ops.name, ei->reason); in scx_disable_workfn()
4025 if (sch->ops.exit) in scx_disable_workfn()
4026 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei); in scx_disable_workfn()
4043 kobject_del(&sch->kobj); in scx_disable_workfn()
4060 struct scx_sched *sch; in scx_disable() local
4066 sch = rcu_dereference(scx_root); in scx_disable()
4067 if (sch) { in scx_disable()
4068 atomic_try_cmpxchg(&sch->exit_kind, &none, kind); in scx_disable()
4069 kthread_queue_work(sch->helper, &sch->disable_work); in scx_disable()
4189 struct scx_sched *sch = scx_root; in scx_dump_task() local
4213 if (SCX_HAS_OP(sch, dump_task)) { in scx_dump_task()
4215 SCX_CALL_OP(sch, SCX_KF_REST, dump_task, NULL, dctx, p); in scx_dump_task()
4232 struct scx_sched *sch = scx_root; in scx_dump_state() local
4261 if (SCX_HAS_OP(sch, dump)) { in scx_dump_state()
4263 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, dump, NULL, &dctx); in scx_dump_state()
4284 if (idle && !SCX_HAS_OP(sch, dump_cpu)) in scx_dump_state()
4318 if (SCX_HAS_OP(sch, dump_cpu)) { in scx_dump_state()
4320 SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL, in scx_dump_state()
4355 scx_read_events(sch, &events); in scx_dump_state()
4375 struct scx_sched *sch = container_of(irq_work, struct scx_sched, error_irq_work); in scx_error_irq_workfn() local
4376 struct scx_exit_info *ei = sch->exit_info; in scx_error_irq_workfn()
4379 scx_dump_state(ei, sch->ops.exit_dump_len); in scx_error_irq_workfn()
4381 kthread_queue_work(sch->helper, &sch->disable_work); in scx_error_irq_workfn()
4384 static void scx_vexit(struct scx_sched *sch, in scx_vexit() argument
4388 struct scx_exit_info *ei = sch->exit_info; in scx_vexit()
4391 if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind)) in scx_vexit()
4408 irq_work_queue(&sch->error_irq_work); in scx_vexit()
4440 struct scx_sched *sch; in scx_alloc_and_add_sched() local
4443 sch = kzalloc(sizeof(*sch), GFP_KERNEL); in scx_alloc_and_add_sched()
4444 if (!sch) in scx_alloc_and_add_sched()
4447 sch->exit_info = alloc_exit_info(ops->exit_dump_len); in scx_alloc_and_add_sched()
4448 if (!sch->exit_info) { in scx_alloc_and_add_sched()
4453 ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params); in scx_alloc_and_add_sched()
4457 sch->global_dsqs = kcalloc(nr_node_ids, sizeof(sch->global_dsqs[0]), in scx_alloc_and_add_sched()
4459 if (!sch->global_dsqs) { in scx_alloc_and_add_sched()
4474 sch->global_dsqs[node] = dsq; in scx_alloc_and_add_sched()
4477 sch->pcpu = alloc_percpu(struct scx_sched_pcpu); in scx_alloc_and_add_sched()
4478 if (!sch->pcpu) in scx_alloc_and_add_sched()
4481 sch->helper = kthread_run_worker(0, "sched_ext_helper"); in scx_alloc_and_add_sched()
4482 if (!sch->helper) in scx_alloc_and_add_sched()
4484 sched_set_fifo(sch->helper->task); in scx_alloc_and_add_sched()
4486 atomic_set(&sch->exit_kind, SCX_EXIT_NONE); in scx_alloc_and_add_sched()
4487 init_irq_work(&sch->error_irq_work, scx_error_irq_workfn); in scx_alloc_and_add_sched()
4488 kthread_init_work(&sch->disable_work, scx_disable_workfn); in scx_alloc_and_add_sched()
4489 sch->ops = *ops; in scx_alloc_and_add_sched()
4490 ops->priv = sch; in scx_alloc_and_add_sched()
4492 sch->kobj.kset = scx_kset; in scx_alloc_and_add_sched()
4493 ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root"); in scx_alloc_and_add_sched()
4497 return sch; in scx_alloc_and_add_sched()
4500 kthread_stop(sch->helper->task); in scx_alloc_and_add_sched()
4502 free_percpu(sch->pcpu); in scx_alloc_and_add_sched()
4505 kfree(sch->global_dsqs[node]); in scx_alloc_and_add_sched()
4506 kfree(sch->global_dsqs); in scx_alloc_and_add_sched()
4508 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL); in scx_alloc_and_add_sched()
4510 free_exit_info(sch->exit_info); in scx_alloc_and_add_sched()
4512 kfree(sch); in scx_alloc_and_add_sched()
4516 static void check_hotplug_seq(struct scx_sched *sch, in check_hotplug_seq() argument
4529 scx_exit(sch, SCX_EXIT_UNREG_KERN, in check_hotplug_seq()
4537 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops) in validate_ops() argument
4544 scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented"); in validate_ops()
4554 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled"); in validate_ops()
4566 struct scx_sched *sch; in scx_enable() local
4589 sch = scx_alloc_and_add_sched(ops); in scx_enable()
4590 if (IS_ERR(sch)) { in scx_enable()
4591 ret = PTR_ERR(sch); in scx_enable()
4617 rcu_assign_pointer(scx_root, sch); in scx_enable()
4621 if (sch->ops.init) { in scx_enable()
4622 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init, NULL); in scx_enable()
4624 ret = ops_sanitize_err(sch, "init", ret); in scx_enable()
4626 scx_error(sch, "ops.init() failed (%d)", ret); in scx_enable()
4629 sch->exit_info->flags |= SCX_EFLAG_INITIALIZED; in scx_enable()
4634 set_bit(i, sch->has_op); in scx_enable()
4636 check_hotplug_seq(sch, ops); in scx_enable()
4641 ret = validate_ops(sch, ops); in scx_enable()
4675 set_bit(i, sch->has_op); in scx_enable()
4677 if (sch->ops.cpu_acquire || sch->ops.cpu_release) in scx_enable()
4678 sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT; in scx_enable()
4703 ret = scx_cgroup_init(sch); in scx_enable()
4723 scx_error(sch, "ops.init_task() failed (%d) for %s[%d]", in scx_enable()
4779 WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE); in scx_enable()
4787 sch->ops.name, scx_switched_all() ? "" : " (partial)"); in scx_enable()
4788 kobject_uevent(&sch->kobj, KOBJ_ADD); in scx_enable()
4814 * completion. sch's base reference will be put by bpf_scx_unreg(). in scx_enable()
4816 scx_error(sch, "scx_enable() failed (%d)", ret); in scx_enable()
4817 kthread_flush_work(&sch->disable_work); in scx_enable()
4954 struct scx_sched *sch = ops->priv; in bpf_scx_unreg() local
4957 kthread_flush_work(&sch->disable_work); in bpf_scx_unreg()
4958 kobject_put(&sch->kobj); in bpf_scx_unreg()
5238 struct scx_sched *sch = scx_root; in print_scx_info() local
5254 printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name, in print_scx_info()
5266 log_lvl, sch->ops.name, scx_enable_state_str[state], all, in print_scx_info()
5340 static bool scx_dsq_insert_preamble(struct scx_sched *sch, struct task_struct *p, in scx_dsq_insert_preamble() argument
5343 if (!scx_kf_allowed(sch, SCX_KF_ENQUEUE | SCX_KF_DISPATCH)) in scx_dsq_insert_preamble()
5349 scx_error(sch, "called with NULL task"); in scx_dsq_insert_preamble()
5354 scx_error(sch, "invalid enq_flags 0x%llx", enq_flags); in scx_dsq_insert_preamble()
5361 static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p, in scx_dsq_insert_commit() argument
5369 mark_direct_dispatch(sch, ddsp_task, p, dsq_id, enq_flags); in scx_dsq_insert_commit()
5374 scx_error(sch, "dispatch buffer overflow"); in scx_dsq_insert_commit()
5426 struct scx_sched *sch; in scx_bpf_dsq_insert() local
5429 sch = rcu_dereference(scx_root); in scx_bpf_dsq_insert()
5430 if (unlikely(!sch)) in scx_bpf_dsq_insert()
5433 if (!scx_dsq_insert_preamble(sch, p, enq_flags)) in scx_bpf_dsq_insert()
5441 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags); in scx_bpf_dsq_insert()
5468 struct scx_sched *sch; in scx_bpf_dsq_insert_vtime() local
5471 sch = rcu_dereference(scx_root); in scx_bpf_dsq_insert_vtime()
5472 if (unlikely(!sch)) in scx_bpf_dsq_insert_vtime()
5475 if (!scx_dsq_insert_preamble(sch, p, enq_flags)) in scx_bpf_dsq_insert_vtime()
5485 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); in scx_bpf_dsq_insert_vtime()
5503 struct scx_sched *sch = scx_root; in scx_dsq_move() local
5511 !scx_kf_allowed(sch, SCX_KF_DISPATCH)) in scx_dsq_move()
5557 dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p); in scx_dsq_move()
5570 locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq); in scx_dsq_move()
5596 struct scx_sched *sch; in scx_bpf_dispatch_nr_slots() local
5600 sch = rcu_dereference(scx_root); in scx_bpf_dispatch_nr_slots()
5601 if (unlikely(!sch)) in scx_bpf_dispatch_nr_slots()
5604 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) in scx_bpf_dispatch_nr_slots()
5619 struct scx_sched *sch; in scx_bpf_dispatch_cancel() local
5623 sch = rcu_dereference(scx_root); in scx_bpf_dispatch_cancel()
5624 if (unlikely(!sch)) in scx_bpf_dispatch_cancel()
5627 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) in scx_bpf_dispatch_cancel()
5633 scx_error(sch, "dispatch buffer underflow"); in scx_bpf_dispatch_cancel()
5654 struct scx_sched *sch; in scx_bpf_dsq_move_to_local() local
5658 sch = rcu_dereference(scx_root); in scx_bpf_dsq_move_to_local()
5659 if (unlikely(!sch)) in scx_bpf_dsq_move_to_local()
5662 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) in scx_bpf_dsq_move_to_local()
5665 flush_dispatch_buf(sch, dspc->rq); in scx_bpf_dsq_move_to_local()
5667 dsq = find_user_dsq(sch, dsq_id); in scx_bpf_dsq_move_to_local()
5669 scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id); in scx_bpf_dsq_move_to_local()
5673 if (consume_dispatch_q(sch, dspc->rq, dsq)) { in scx_bpf_dsq_move_to_local()
5809 struct scx_sched *sch; in scx_bpf_reenqueue_local() local
5816 sch = rcu_dereference(scx_root); in scx_bpf_reenqueue_local()
5817 if (unlikely(!sch)) in scx_bpf_reenqueue_local()
5820 if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE)) in scx_bpf_reenqueue_local()
5884 struct scx_sched *sch; in scx_bpf_create_dsq() local
5902 sch = rcu_dereference(scx_root); in scx_bpf_create_dsq()
5903 if (sch) in scx_bpf_create_dsq()
5904 ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node, in scx_bpf_create_dsq()
5932 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags) in scx_kick_cpu() argument
5937 if (!ops_cpu_valid(sch, cpu, NULL)) in scx_kick_cpu()
5961 scx_error(sch, "PREEMPT/WAIT cannot be used with SCX_KICK_IDLE"); in scx_kick_cpu()
5997 struct scx_sched *sch; in scx_bpf_kick_cpu() local
6000 sch = rcu_dereference(scx_root); in scx_bpf_kick_cpu()
6001 if (likely(sch)) in scx_bpf_kick_cpu()
6002 scx_kick_cpu(sch, cpu, flags); in scx_bpf_kick_cpu()
6014 struct scx_sched *sch; in scx_bpf_dsq_nr_queued() local
6020 sch = rcu_dereference_sched(scx_root); in scx_bpf_dsq_nr_queued()
6021 if (unlikely(!sch)) { in scx_bpf_dsq_nr_queued()
6032 if (ops_cpu_valid(sch, cpu, NULL)) { in scx_bpf_dsq_nr_queued()
6037 dsq = find_user_dsq(sch, dsq_id); in scx_bpf_dsq_nr_queued()
6060 struct scx_sched *sch; in scx_bpf_destroy_dsq() local
6063 sch = rcu_dereference(scx_root); in scx_bpf_destroy_dsq()
6064 if (sch) in scx_bpf_destroy_dsq()
6065 destroy_dsq(sch, dsq_id); in scx_bpf_destroy_dsq()
6083 struct scx_sched *sch; in bpf_iter_scx_dsq_new() local
6096 sch = rcu_dereference_check(scx_root, rcu_read_lock_bh_held()); in bpf_iter_scx_dsq_new()
6097 if (unlikely(!sch)) in bpf_iter_scx_dsq_new()
6103 kit->dsq = find_user_dsq(sch, dsq_id); in bpf_iter_scx_dsq_new()
6185 static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf, in __bstr_format() argument
6194 scx_error(sch, "invalid data=%p and data__sz=%u", (void *)data, data__sz); in __bstr_format()
6200 scx_error(sch, "failed to read data fields (%d)", ret); in __bstr_format()
6207 scx_error(sch, "format preparation failed (%d)", ret); in __bstr_format()
6215 scx_error(sch, "(\"%s\", %p, %u) failed to format", fmt, data, data__sz); in __bstr_format()
6222 static s32 bstr_format(struct scx_sched *sch, struct scx_bstr_buf *buf, in bstr_format() argument
6225 return __bstr_format(sch, buf->data, buf->line, sizeof(buf->line), in bstr_format()
6244 struct scx_sched *sch; in scx_bpf_exit_bstr() local
6248 sch = rcu_dereference_bh(scx_root); in scx_bpf_exit_bstr()
6249 if (likely(sch) && in scx_bpf_exit_bstr()
6250 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0) in scx_bpf_exit_bstr()
6251 scx_exit(sch, SCX_EXIT_UNREG_BPF, exit_code, "%s", scx_exit_bstr_buf.line); in scx_bpf_exit_bstr()
6267 struct scx_sched *sch; in scx_bpf_error_bstr() local
6271 sch = rcu_dereference_bh(scx_root); in scx_bpf_error_bstr()
6272 if (likely(sch) && in scx_bpf_error_bstr()
6273 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0) in scx_bpf_error_bstr()
6274 scx_exit(sch, SCX_EXIT_ERROR_BPF, 0, "%s", scx_exit_bstr_buf.line); in scx_bpf_error_bstr()
6293 struct scx_sched *sch; in scx_bpf_dump_bstr() local
6300 sch = rcu_dereference(scx_root); in scx_bpf_dump_bstr()
6301 if (unlikely(!sch)) in scx_bpf_dump_bstr()
6305 scx_error(sch, "scx_bpf_dump() must only be called from ops.dump() and friends"); in scx_bpf_dump_bstr()
6310 ret = __bstr_format(sch, buf->data, buf->line + dd->cursor, in scx_bpf_dump_bstr()
6346 struct scx_sched *sch; in scx_bpf_cpuperf_cap() local
6350 sch = rcu_dereference(scx_root); in scx_bpf_cpuperf_cap()
6351 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL)) in scx_bpf_cpuperf_cap()
6373 struct scx_sched *sch; in scx_bpf_cpuperf_cur() local
6377 sch = rcu_dereference(scx_root); in scx_bpf_cpuperf_cur()
6378 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL)) in scx_bpf_cpuperf_cur()
6400 struct scx_sched *sch; in scx_bpf_cpuperf_set() local
6404 sch = rcu_dereference(sch); in scx_bpf_cpuperf_set()
6405 if (unlikely(!sch)) in scx_bpf_cpuperf_set()
6409 scx_error(sch, "Invalid cpuperf target %u for CPU %d", perf, cpu); in scx_bpf_cpuperf_set()
6413 if (ops_cpu_valid(sch, cpu, NULL)) { in scx_bpf_cpuperf_set()
6422 scx_error(sch, "Invalid target CPU %d", cpu); in scx_bpf_cpuperf_set()
6517 struct scx_sched *sch; in scx_bpf_cpu_rq() local
6521 sch = rcu_dereference(scx_root); in scx_bpf_cpu_rq()
6522 if (unlikely(!sch)) in scx_bpf_cpu_rq()
6525 if (!ops_cpu_valid(sch, cpu, NULL)) in scx_bpf_cpu_rq()
6528 if (!sch->warned_deprecated_rq) { in scx_bpf_cpu_rq()
6532 sch->warned_deprecated_rq = true; in scx_bpf_cpu_rq()
6546 struct scx_sched *sch; in scx_bpf_locked_rq() local
6551 sch = rcu_dereference_sched(scx_root); in scx_bpf_locked_rq()
6552 if (unlikely(!sch)) in scx_bpf_locked_rq()
6557 scx_error(sch, "accessing rq without holding rq lock"); in scx_bpf_locked_rq()
6572 struct scx_sched *sch; in scx_bpf_cpu_curr() local
6576 sch = rcu_dereference(scx_root); in scx_bpf_cpu_curr()
6577 if (unlikely(!sch)) in scx_bpf_cpu_curr()
6580 if (!ops_cpu_valid(sch, cpu, NULL)) in scx_bpf_cpu_curr()
6602 struct scx_sched *sch; in scx_bpf_task_cgroup() local
6606 sch = rcu_dereference(scx_root); in scx_bpf_task_cgroup()
6607 if (unlikely(!sch)) in scx_bpf_task_cgroup()
6610 if (!scx_kf_allowed_on_arg_tasks(sch, __SCX_KF_RQ_LOCKED, p)) in scx_bpf_task_cgroup()
6683 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events) in scx_read_events() argument
6691 e_cpu = &per_cpu_ptr(sch->pcpu, cpu)->event_stats; in scx_read_events()
6712 struct scx_sched *sch; in scx_bpf_events() local
6716 sch = rcu_dereference(scx_root); in scx_bpf_events()
6717 if (sch) in scx_bpf_events()
6718 scx_read_events(sch, &e_sys); in scx_bpf_events()