1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst 4 * 5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 7 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 8 */ 9 #ifdef CONFIG_SCHED_CLASS_EXT 10 11 void scx_tick(struct rq *rq); 12 void init_scx_entity(struct sched_ext_entity *scx); 13 void scx_pre_fork(struct task_struct *p); 14 int scx_fork(struct task_struct *p); 15 void scx_post_fork(struct task_struct *p); 16 void scx_cancel_fork(struct task_struct *p); 17 bool scx_can_stop_tick(struct rq *rq); 18 void scx_rq_activate(struct rq *rq); 19 void scx_rq_deactivate(struct rq *rq); 20 int scx_check_setscheduler(struct task_struct *p, int policy); 21 bool task_should_scx(int policy); 22 bool scx_allow_ttwu_queue(const struct task_struct *p); 23 void init_sched_ext_class(void); 24 25 static inline u32 scx_cpuperf_target(s32 cpu) 26 { 27 if (scx_enabled()) 28 return cpu_rq(cpu)->scx.cpuperf_target; 29 else 30 return 0; 31 } 32 33 static inline bool task_on_scx(const struct task_struct *p) 34 { 35 return scx_enabled() && p->sched_class == &ext_sched_class; 36 } 37 38 #ifdef CONFIG_SCHED_CORE 39 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b, 40 bool in_fi); 41 #endif 42 43 #else /* CONFIG_SCHED_CLASS_EXT */ 44 45 static inline void scx_tick(struct rq *rq) {} 46 static inline void scx_pre_fork(struct task_struct *p) {} 47 static inline int scx_fork(struct task_struct *p) { return 0; } 48 static inline void scx_post_fork(struct task_struct *p) {} 49 static inline void scx_cancel_fork(struct task_struct *p) {} 50 static inline u32 scx_cpuperf_target(s32 cpu) { return 0; } 51 static inline bool scx_can_stop_tick(struct rq *rq) { return true; } 52 static inline void scx_rq_activate(struct rq *rq) {} 53 static inline void scx_rq_deactivate(struct rq *rq) {} 54 static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; } 55 static inline bool task_on_scx(const struct task_struct *p) { return false; } 56 static inline bool scx_allow_ttwu_queue(const struct task_struct *p) { return true; } 57 static inline void init_sched_ext_class(void) {} 58 59 #endif /* CONFIG_SCHED_CLASS_EXT */ 60 61 #ifdef CONFIG_SCHED_CLASS_EXT 62 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify); 63 64 static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) 65 { 66 if (scx_enabled()) 67 __scx_update_idle(rq, idle, do_notify); 68 } 69 #else 70 static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {} 71 #endif 72 73 #ifdef CONFIG_CGROUP_SCHED 74 #ifdef CONFIG_EXT_GROUP_SCHED 75 void scx_tg_init(struct task_group *tg); 76 int scx_tg_online(struct task_group *tg); 77 void scx_tg_offline(struct task_group *tg); 78 int scx_cgroup_can_attach(struct cgroup_taskset *tset); 79 void scx_cgroup_move_task(struct task_struct *p); 80 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset); 81 void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight); 82 void scx_group_set_idle(struct task_group *tg, bool idle); 83 void scx_group_set_bandwidth(struct task_group *tg, u64 period_us, u64 quota_us, u64 burst_us); 84 #else /* CONFIG_EXT_GROUP_SCHED */ 85 static inline void scx_tg_init(struct task_group *tg) {} 86 static inline int scx_tg_online(struct task_group *tg) { return 0; } 87 static inline void scx_tg_offline(struct task_group *tg) {} 88 static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; } 89 static inline void scx_cgroup_move_task(struct task_struct *p) {} 90 static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {} 91 static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {} 92 static inline void scx_group_set_idle(struct task_group *tg, bool idle) {} 93 static inline void scx_group_set_bandwidth(struct task_group *tg, u64 period_us, u64 quota_us, u64 burst_us) {} 94 #endif /* CONFIG_EXT_GROUP_SCHED */ 95 #endif /* CONFIG_CGROUP_SCHED */ 96