xref: /linux/kernel/sched/ext.h (revision 4f24bfcc398eb77aa41fe1bb1621d8c2cca5368d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8  */
9 #ifdef CONFIG_SCHED_CLASS_EXT
10 
scx_kf_allowed_if_unlocked(void)11 static inline bool scx_kf_allowed_if_unlocked(void)
12 {
13 	return !current->scx.kf_mask;
14 }
15 
16 DECLARE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
17 
18 void scx_tick(struct rq *rq);
19 void init_scx_entity(struct sched_ext_entity *scx);
20 void scx_pre_fork(struct task_struct *p);
21 int scx_fork(struct task_struct *p);
22 void scx_post_fork(struct task_struct *p);
23 void scx_cancel_fork(struct task_struct *p);
24 bool scx_can_stop_tick(struct rq *rq);
25 void scx_rq_activate(struct rq *rq);
26 void scx_rq_deactivate(struct rq *rq);
27 int scx_check_setscheduler(struct task_struct *p, int policy);
28 bool task_should_scx(int policy);
29 bool scx_allow_ttwu_queue(const struct task_struct *p);
30 void init_sched_ext_class(void);
31 
scx_cpuperf_target(s32 cpu)32 static inline u32 scx_cpuperf_target(s32 cpu)
33 {
34 	if (scx_enabled())
35 		return cpu_rq(cpu)->scx.cpuperf_target;
36 	else
37 		return 0;
38 }
39 
task_on_scx(const struct task_struct * p)40 static inline bool task_on_scx(const struct task_struct *p)
41 {
42 	return scx_enabled() && p->sched_class == &ext_sched_class;
43 }
44 
45 #ifdef CONFIG_SCHED_CORE
46 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
47 		   bool in_fi);
48 #endif
49 
50 #else	/* CONFIG_SCHED_CLASS_EXT */
51 
scx_tick(struct rq * rq)52 static inline void scx_tick(struct rq *rq) {}
scx_pre_fork(struct task_struct * p)53 static inline void scx_pre_fork(struct task_struct *p) {}
scx_fork(struct task_struct * p)54 static inline int scx_fork(struct task_struct *p) { return 0; }
scx_post_fork(struct task_struct * p)55 static inline void scx_post_fork(struct task_struct *p) {}
scx_cancel_fork(struct task_struct * p)56 static inline void scx_cancel_fork(struct task_struct *p) {}
scx_cpuperf_target(s32 cpu)57 static inline u32 scx_cpuperf_target(s32 cpu) { return 0; }
scx_can_stop_tick(struct rq * rq)58 static inline bool scx_can_stop_tick(struct rq *rq) { return true; }
scx_rq_activate(struct rq * rq)59 static inline void scx_rq_activate(struct rq *rq) {}
scx_rq_deactivate(struct rq * rq)60 static inline void scx_rq_deactivate(struct rq *rq) {}
scx_check_setscheduler(struct task_struct * p,int policy)61 static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
task_on_scx(const struct task_struct * p)62 static inline bool task_on_scx(const struct task_struct *p) { return false; }
scx_allow_ttwu_queue(const struct task_struct * p)63 static inline bool scx_allow_ttwu_queue(const struct task_struct *p) { return true; }
init_sched_ext_class(void)64 static inline void init_sched_ext_class(void) {}
65 
66 #endif	/* CONFIG_SCHED_CLASS_EXT */
67 
68 #if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
69 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify);
70 
scx_update_idle(struct rq * rq,bool idle,bool do_notify)71 static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify)
72 {
73 	if (scx_enabled())
74 		__scx_update_idle(rq, idle, do_notify);
75 }
76 #else
scx_update_idle(struct rq * rq,bool idle,bool do_notify)77 static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {}
78 #endif
79 
80 #ifdef CONFIG_CGROUP_SCHED
81 #ifdef CONFIG_EXT_GROUP_SCHED
82 void scx_tg_init(struct task_group *tg);
83 int scx_tg_online(struct task_group *tg);
84 void scx_tg_offline(struct task_group *tg);
85 int scx_cgroup_can_attach(struct cgroup_taskset *tset);
86 void scx_cgroup_move_task(struct task_struct *p);
87 void scx_cgroup_finish_attach(void);
88 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
89 void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
90 void scx_group_set_idle(struct task_group *tg, bool idle);
91 #else	/* CONFIG_EXT_GROUP_SCHED */
scx_tg_init(struct task_group * tg)92 static inline void scx_tg_init(struct task_group *tg) {}
scx_tg_online(struct task_group * tg)93 static inline int scx_tg_online(struct task_group *tg) { return 0; }
scx_tg_offline(struct task_group * tg)94 static inline void scx_tg_offline(struct task_group *tg) {}
scx_cgroup_can_attach(struct cgroup_taskset * tset)95 static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
scx_cgroup_move_task(struct task_struct * p)96 static inline void scx_cgroup_move_task(struct task_struct *p) {}
scx_cgroup_finish_attach(void)97 static inline void scx_cgroup_finish_attach(void) {}
scx_cgroup_cancel_attach(struct cgroup_taskset * tset)98 static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {}
scx_group_set_weight(struct task_group * tg,unsigned long cgrp_weight)99 static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {}
scx_group_set_idle(struct task_group * tg,bool idle)100 static inline void scx_group_set_idle(struct task_group *tg, bool idle) {}
101 #endif	/* CONFIG_EXT_GROUP_SCHED */
102 #endif	/* CONFIG_CGROUP_SCHED */
103