xref: /linux/kernel/sched/ext.h (revision 07814a9439a3b03d79a1001614b5bc1cab69bcec)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
4  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
5  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
6  */
7 #ifdef CONFIG_SCHED_CLASS_EXT
8 
9 struct sched_enq_and_set_ctx {
10 	struct task_struct	*p;
11 	int			queue_flags;
12 	bool			queued;
13 	bool			running;
14 };
15 
16 void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
17 			    struct sched_enq_and_set_ctx *ctx);
18 void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx);
19 
20 extern const struct sched_class ext_sched_class;
21 
22 DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled);
23 DECLARE_STATIC_KEY_FALSE(__scx_switched_all);
24 #define scx_enabled()		static_branch_unlikely(&__scx_ops_enabled)
25 #define scx_switched_all()	static_branch_unlikely(&__scx_switched_all)
26 
27 static inline bool task_on_scx(const struct task_struct *p)
28 {
29 	return scx_enabled() && p->sched_class == &ext_sched_class;
30 }
31 
32 void scx_tick(struct rq *rq);
33 void init_scx_entity(struct sched_ext_entity *scx);
34 void scx_pre_fork(struct task_struct *p);
35 int scx_fork(struct task_struct *p);
36 void scx_post_fork(struct task_struct *p);
37 void scx_cancel_fork(struct task_struct *p);
38 int scx_check_setscheduler(struct task_struct *p, int policy);
39 bool task_should_scx(struct task_struct *p);
40 void init_sched_ext_class(void);
41 
42 static inline const struct sched_class *next_active_class(const struct sched_class *class)
43 {
44 	class++;
45 	if (scx_switched_all() && class == &fair_sched_class)
46 		class++;
47 	if (!scx_enabled() && class == &ext_sched_class)
48 		class++;
49 	return class;
50 }
51 
52 #define for_active_class_range(class, _from, _to)				\
53 	for (class = (_from); class != (_to); class = next_active_class(class))
54 
55 #define for_each_active_class(class)						\
56 	for_active_class_range(class, __sched_class_highest, __sched_class_lowest)
57 
58 /*
59  * SCX requires a balance() call before every pick_next_task() call including
60  * when waking up from idle.
61  */
62 #define for_balance_class_range(class, prev_class, end_class)			\
63 	for_active_class_range(class, (prev_class) > &ext_sched_class ?		\
64 			       &ext_sched_class : (prev_class), (end_class))
65 
66 #else	/* CONFIG_SCHED_CLASS_EXT */
67 
68 #define scx_enabled()		false
69 #define scx_switched_all()	false
70 
71 static inline void scx_tick(struct rq *rq) {}
72 static inline void scx_pre_fork(struct task_struct *p) {}
73 static inline int scx_fork(struct task_struct *p) { return 0; }
74 static inline void scx_post_fork(struct task_struct *p) {}
75 static inline void scx_cancel_fork(struct task_struct *p) {}
76 static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
77 static inline bool task_on_scx(const struct task_struct *p) { return false; }
78 static inline void init_sched_ext_class(void) {}
79 
80 #define for_each_active_class		for_each_class
81 #define for_balance_class_range		for_class_range
82 
83 #endif	/* CONFIG_SCHED_CLASS_EXT */
84 
85 #if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
86 void __scx_update_idle(struct rq *rq, bool idle);
87 
88 static inline void scx_update_idle(struct rq *rq, bool idle)
89 {
90 	if (scx_enabled())
91 		__scx_update_idle(rq, idle);
92 }
93 #else
94 static inline void scx_update_idle(struct rq *rq, bool idle) {}
95 #endif
96