1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8 */
9 #ifdef CONFIG_SCHED_CLASS_EXT
10
scx_kf_allowed_if_unlocked(void)11 static inline bool scx_kf_allowed_if_unlocked(void)
12 {
13 return !current->scx.kf_mask;
14 }
15
scx_rq_bypassing(struct rq * rq)16 static inline bool scx_rq_bypassing(struct rq *rq)
17 {
18 return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
19 }
20
21 DECLARE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
22
23 DECLARE_PER_CPU(struct rq *, scx_locked_rq_state);
24
25 /*
26 * Return the rq currently locked from an scx callback, or NULL if no rq is
27 * locked.
28 */
scx_locked_rq(void)29 static inline struct rq *scx_locked_rq(void)
30 {
31 return __this_cpu_read(scx_locked_rq_state);
32 }
33
34 void scx_tick(struct rq *rq);
35 void init_scx_entity(struct sched_ext_entity *scx);
36 void scx_pre_fork(struct task_struct *p);
37 int scx_fork(struct task_struct *p);
38 void scx_post_fork(struct task_struct *p);
39 void scx_cancel_fork(struct task_struct *p);
40 bool scx_can_stop_tick(struct rq *rq);
41 void scx_rq_activate(struct rq *rq);
42 void scx_rq_deactivate(struct rq *rq);
43 int scx_check_setscheduler(struct task_struct *p, int policy);
44 bool task_should_scx(int policy);
45 bool scx_allow_ttwu_queue(const struct task_struct *p);
46 void init_sched_ext_class(void);
47
scx_cpuperf_target(s32 cpu)48 static inline u32 scx_cpuperf_target(s32 cpu)
49 {
50 if (scx_enabled())
51 return cpu_rq(cpu)->scx.cpuperf_target;
52 else
53 return 0;
54 }
55
task_on_scx(const struct task_struct * p)56 static inline bool task_on_scx(const struct task_struct *p)
57 {
58 return scx_enabled() && p->sched_class == &ext_sched_class;
59 }
60
61 #ifdef CONFIG_SCHED_CORE
62 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
63 bool in_fi);
64 #endif
65
66 #else /* CONFIG_SCHED_CLASS_EXT */
67
scx_tick(struct rq * rq)68 static inline void scx_tick(struct rq *rq) {}
scx_pre_fork(struct task_struct * p)69 static inline void scx_pre_fork(struct task_struct *p) {}
scx_fork(struct task_struct * p)70 static inline int scx_fork(struct task_struct *p) { return 0; }
scx_post_fork(struct task_struct * p)71 static inline void scx_post_fork(struct task_struct *p) {}
scx_cancel_fork(struct task_struct * p)72 static inline void scx_cancel_fork(struct task_struct *p) {}
scx_cpuperf_target(s32 cpu)73 static inline u32 scx_cpuperf_target(s32 cpu) { return 0; }
scx_can_stop_tick(struct rq * rq)74 static inline bool scx_can_stop_tick(struct rq *rq) { return true; }
scx_rq_activate(struct rq * rq)75 static inline void scx_rq_activate(struct rq *rq) {}
scx_rq_deactivate(struct rq * rq)76 static inline void scx_rq_deactivate(struct rq *rq) {}
scx_check_setscheduler(struct task_struct * p,int policy)77 static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
task_on_scx(const struct task_struct * p)78 static inline bool task_on_scx(const struct task_struct *p) { return false; }
scx_allow_ttwu_queue(const struct task_struct * p)79 static inline bool scx_allow_ttwu_queue(const struct task_struct *p) { return true; }
init_sched_ext_class(void)80 static inline void init_sched_ext_class(void) {}
81
82 #endif /* CONFIG_SCHED_CLASS_EXT */
83
84 #ifdef CONFIG_SCHED_CLASS_EXT
85 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify);
86
scx_update_idle(struct rq * rq,bool idle,bool do_notify)87 static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify)
88 {
89 if (scx_enabled())
90 __scx_update_idle(rq, idle, do_notify);
91 }
92 #else
scx_update_idle(struct rq * rq,bool idle,bool do_notify)93 static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {}
94 #endif
95
96 #ifdef CONFIG_CGROUP_SCHED
97 #ifdef CONFIG_EXT_GROUP_SCHED
98 void scx_tg_init(struct task_group *tg);
99 int scx_tg_online(struct task_group *tg);
100 void scx_tg_offline(struct task_group *tg);
101 int scx_cgroup_can_attach(struct cgroup_taskset *tset);
102 void scx_cgroup_move_task(struct task_struct *p);
103 void scx_cgroup_finish_attach(void);
104 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
105 void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
106 void scx_group_set_idle(struct task_group *tg, bool idle);
107 void scx_group_set_bandwidth(struct task_group *tg, u64 period_us, u64 quota_us, u64 burst_us);
108 #else /* CONFIG_EXT_GROUP_SCHED */
scx_tg_init(struct task_group * tg)109 static inline void scx_tg_init(struct task_group *tg) {}
scx_tg_online(struct task_group * tg)110 static inline int scx_tg_online(struct task_group *tg) { return 0; }
scx_tg_offline(struct task_group * tg)111 static inline void scx_tg_offline(struct task_group *tg) {}
scx_cgroup_can_attach(struct cgroup_taskset * tset)112 static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
scx_cgroup_move_task(struct task_struct * p)113 static inline void scx_cgroup_move_task(struct task_struct *p) {}
scx_cgroup_finish_attach(void)114 static inline void scx_cgroup_finish_attach(void) {}
scx_cgroup_cancel_attach(struct cgroup_taskset * tset)115 static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {}
scx_group_set_weight(struct task_group * tg,unsigned long cgrp_weight)116 static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {}
scx_group_set_idle(struct task_group * tg,bool idle)117 static inline void scx_group_set_idle(struct task_group *tg, bool idle) {}
scx_group_set_bandwidth(struct task_group * tg,u64 period_us,u64 quota_us,u64 burst_us)118 static inline void scx_group_set_bandwidth(struct task_group *tg, u64 period_us, u64 quota_us, u64 burst_us) {}
119 #endif /* CONFIG_EXT_GROUP_SCHED */
120 #endif /* CONFIG_CGROUP_SCHED */
121