xref: /linux/kernel/sched/ext.h (revision 2a52ca7c98960aafb0eca9ef96b2d0c932171357)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
4  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
5  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
6  */
7 #ifdef CONFIG_SCHED_CLASS_EXT
8 
9 struct sched_enq_and_set_ctx {
10 	struct task_struct	*p;
11 	int			queue_flags;
12 	bool			queued;
13 	bool			running;
14 };
15 
16 void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
17 			    struct sched_enq_and_set_ctx *ctx);
18 void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx);
19 
20 extern const struct sched_class ext_sched_class;
21 
22 DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled);
23 DECLARE_STATIC_KEY_FALSE(__scx_switched_all);
24 #define scx_enabled()		static_branch_unlikely(&__scx_ops_enabled)
25 #define scx_switched_all()	static_branch_unlikely(&__scx_switched_all)
26 
27 static inline bool task_on_scx(const struct task_struct *p)
28 {
29 	return scx_enabled() && p->sched_class == &ext_sched_class;
30 }
31 
32 void init_scx_entity(struct sched_ext_entity *scx);
33 void scx_pre_fork(struct task_struct *p);
34 int scx_fork(struct task_struct *p);
35 void scx_post_fork(struct task_struct *p);
36 void scx_cancel_fork(struct task_struct *p);
37 bool task_should_scx(struct task_struct *p);
38 void init_sched_ext_class(void);
39 
40 static inline const struct sched_class *next_active_class(const struct sched_class *class)
41 {
42 	class++;
43 	if (scx_switched_all() && class == &fair_sched_class)
44 		class++;
45 	if (!scx_enabled() && class == &ext_sched_class)
46 		class++;
47 	return class;
48 }
49 
50 #define for_active_class_range(class, _from, _to)				\
51 	for (class = (_from); class != (_to); class = next_active_class(class))
52 
53 #define for_each_active_class(class)						\
54 	for_active_class_range(class, __sched_class_highest, __sched_class_lowest)
55 
56 /*
57  * SCX requires a balance() call before every pick_next_task() call including
58  * when waking up from idle.
59  */
60 #define for_balance_class_range(class, prev_class, end_class)			\
61 	for_active_class_range(class, (prev_class) > &ext_sched_class ?		\
62 			       &ext_sched_class : (prev_class), (end_class))
63 
64 #else	/* CONFIG_SCHED_CLASS_EXT */
65 
66 #define scx_enabled()		false
67 #define scx_switched_all()	false
68 
69 static inline void scx_pre_fork(struct task_struct *p) {}
70 static inline int scx_fork(struct task_struct *p) { return 0; }
71 static inline void scx_post_fork(struct task_struct *p) {}
72 static inline void scx_cancel_fork(struct task_struct *p) {}
73 static inline bool task_on_scx(const struct task_struct *p) { return false; }
74 static inline void init_sched_ext_class(void) {}
75 
76 #define for_each_active_class		for_each_class
77 #define for_balance_class_range		for_class_range
78 
79 #endif	/* CONFIG_SCHED_CLASS_EXT */
80 
81 #if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
82 void __scx_update_idle(struct rq *rq, bool idle);
83 
84 static inline void scx_update_idle(struct rq *rq, bool idle)
85 {
86 	if (scx_enabled())
87 		__scx_update_idle(rq, idle);
88 }
89 #else
90 static inline void scx_update_idle(struct rq *rq, bool idle) {}
91 #endif
92