xref: /linux/kernel/sched/ext.c (revision 664f0f6be37ce4ef80992cf2ed74761cd5bbe207)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8  */
9 #include <linux/btf_ids.h>
10 #include "ext_idle.h"
11 
12 static DEFINE_RAW_SPINLOCK(scx_sched_lock);
13 
14 /*
15  * NOTE: sched_ext is in the process of growing multiple scheduler support and
16  * scx_root usage is in a transitional state. Naked dereferences are safe if the
17  * caller is one of the tasks attached to SCX and explicit RCU dereference is
18  * necessary otherwise. Naked scx_root dereferences trigger sparse warnings but
19  * are used as temporary markers to indicate that the dereferences need to be
20  * updated to point to the associated scheduler instances rather than scx_root.
21  */
22 struct scx_sched __rcu *scx_root;
23 
24 /*
25  * All scheds, writers must hold both scx_enable_mutex and scx_sched_lock.
26  * Readers can hold either or rcu_read_lock().
27  */
28 static LIST_HEAD(scx_sched_all);
29 
30 #ifdef CONFIG_EXT_SUB_SCHED
31 static const struct rhashtable_params scx_sched_hash_params = {
32 	.key_len		= sizeof_field(struct scx_sched, ops.sub_cgroup_id),
33 	.key_offset		= offsetof(struct scx_sched, ops.sub_cgroup_id),
34 	.head_offset		= offsetof(struct scx_sched, hash_node),
35 	.insecure_elasticity	= true,	/* inserted under scx_sched_lock */
36 };
37 
38 static struct rhashtable scx_sched_hash;
39 #endif
40 
41 /*
42  * During exit, a task may schedule after losing its PIDs. When disabling the
43  * BPF scheduler, we need to be able to iterate tasks in every state to
44  * guarantee system safety. Maintain a dedicated task list which contains every
45  * task between its fork and eventual free.
46  */
47 static DEFINE_RAW_SPINLOCK(scx_tasks_lock);
48 static LIST_HEAD(scx_tasks);
49 
50 /* ops enable/disable */
51 static DEFINE_MUTEX(scx_enable_mutex);
52 DEFINE_STATIC_KEY_FALSE(__scx_enabled);
53 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
54 static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED);
55 static DEFINE_RAW_SPINLOCK(scx_bypass_lock);
56 static bool scx_init_task_enabled;
57 static bool scx_switching_all;
58 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
59 
60 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
61 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
62 
63 #ifdef CONFIG_EXT_SUB_SCHED
64 /*
65  * The sub sched being enabled. Used by scx_disable_and_exit_task() to exit
66  * tasks for the sub-sched being enabled. Use a global variable instead of a
67  * per-task field as all enables are serialized.
68  */
69 static struct scx_sched *scx_enabling_sub_sched;
70 #else
71 #define scx_enabling_sub_sched	(struct scx_sched *)NULL
72 #endif	/* CONFIG_EXT_SUB_SCHED */
73 
74 /*
75  * A monotonically increasing sequence number that is incremented every time a
76  * scheduler is enabled. This can be used to check if any custom sched_ext
77  * scheduler has ever been used in the system.
78  */
79 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
80 
81 /*
82  * Watchdog interval. All scx_sched's share a single watchdog timer and the
83  * interval is half of the shortest sch->watchdog_timeout.
84  */
85 static unsigned long scx_watchdog_interval;
86 
87 /*
88  * The last time the delayed work was run. This delayed work relies on
89  * ksoftirqd being able to run to service timer interrupts, so it's possible
90  * that this work itself could get wedged. To account for this, we check that
91  * it's not stalled in the timer tick, and trigger an error if it is.
92  */
93 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
94 
95 static struct delayed_work scx_watchdog_work;
96 
97 /*
98  * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of kick_sync sequence
99  * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu
100  * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated
101  * lazily when enabling and freed when disabling to avoid waste when sched_ext
102  * isn't active.
103  */
104 struct scx_kick_syncs {
105 	struct rcu_head		rcu;
106 	unsigned long		syncs[];
107 };
108 
109 static DEFINE_PER_CPU(struct scx_kick_syncs __rcu *, scx_kick_syncs);
110 
111 /*
112  * Direct dispatch marker.
113  *
114  * Non-NULL values are used for direct dispatch from enqueue path. A valid
115  * pointer points to the task currently being enqueued. An ERR_PTR value is used
116  * to indicate that direct dispatch has already happened.
117  */
118 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
119 
120 static const struct rhashtable_params dsq_hash_params = {
121 	.key_len		= sizeof_field(struct scx_dispatch_q, id),
122 	.key_offset		= offsetof(struct scx_dispatch_q, id),
123 	.head_offset		= offsetof(struct scx_dispatch_q, hash_node),
124 };
125 
126 static LLIST_HEAD(dsqs_to_free);
127 
128 /* string formatting from BPF */
129 struct scx_bstr_buf {
130 	u64			data[MAX_BPRINTF_VARARGS];
131 	char			line[SCX_EXIT_MSG_LEN];
132 };
133 
134 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
135 static struct scx_bstr_buf scx_exit_bstr_buf;
136 
137 /* ops debug dump */
138 static DEFINE_RAW_SPINLOCK(scx_dump_lock);
139 
140 struct scx_dump_data {
141 	s32			cpu;
142 	bool			first;
143 	s32			cursor;
144 	struct seq_buf		*s;
145 	const char		*prefix;
146 	struct scx_bstr_buf	buf;
147 };
148 
149 static struct scx_dump_data scx_dump_data = {
150 	.cpu			= -1,
151 };
152 
153 /* /sys/kernel/sched_ext interface */
154 static struct kset *scx_kset;
155 
156 /*
157  * Parameters that can be adjusted through /sys/module/sched_ext/parameters.
158  * There usually is no reason to modify these as normal scheduler operation
159  * shouldn't be affected by them. The knobs are primarily for debugging.
160  */
161 static unsigned int scx_slice_bypass_us = SCX_SLICE_BYPASS / NSEC_PER_USEC;
162 static unsigned int scx_bypass_lb_intv_us = SCX_BYPASS_LB_DFL_INTV_US;
163 
164 static int set_slice_us(const char *val, const struct kernel_param *kp)
165 {
166 	return param_set_uint_minmax(val, kp, 100, 100 * USEC_PER_MSEC);
167 }
168 
169 static const struct kernel_param_ops slice_us_param_ops = {
170 	.set = set_slice_us,
171 	.get = param_get_uint,
172 };
173 
174 static int set_bypass_lb_intv_us(const char *val, const struct kernel_param *kp)
175 {
176 	return param_set_uint_minmax(val, kp, 0, 10 * USEC_PER_SEC);
177 }
178 
179 static const struct kernel_param_ops bypass_lb_intv_us_param_ops = {
180 	.set = set_bypass_lb_intv_us,
181 	.get = param_get_uint,
182 };
183 
184 #undef MODULE_PARAM_PREFIX
185 #define MODULE_PARAM_PREFIX	"sched_ext."
186 
187 module_param_cb(slice_bypass_us, &slice_us_param_ops, &scx_slice_bypass_us, 0600);
188 MODULE_PARM_DESC(slice_bypass_us, "bypass slice in microseconds, applied on [un]load (100us to 100ms)");
189 module_param_cb(bypass_lb_intv_us, &bypass_lb_intv_us_param_ops, &scx_bypass_lb_intv_us, 0600);
190 MODULE_PARM_DESC(bypass_lb_intv_us, "bypass load balance interval in microseconds (0 (disable) to 10s)");
191 
192 #undef MODULE_PARAM_PREFIX
193 
194 #define CREATE_TRACE_POINTS
195 #include <trace/events/sched_ext.h>
196 
197 static void run_deferred(struct rq *rq);
198 static bool task_dead_and_done(struct task_struct *p);
199 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags);
200 static void scx_disable(struct scx_sched *sch, enum scx_exit_kind kind);
201 static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,
202 		      s64 exit_code, const char *fmt, va_list args);
203 
204 static __printf(4, 5) bool scx_exit(struct scx_sched *sch,
205 				    enum scx_exit_kind kind, s64 exit_code,
206 				    const char *fmt, ...)
207 {
208 	va_list args;
209 	bool ret;
210 
211 	va_start(args, fmt);
212 	ret = scx_vexit(sch, kind, exit_code, fmt, args);
213 	va_end(args);
214 
215 	return ret;
216 }
217 
218 #define scx_error(sch, fmt, args...)	scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args)
219 #define scx_verror(sch, fmt, args)	scx_vexit((sch), SCX_EXIT_ERROR, 0, fmt, args)
220 
221 #define SCX_HAS_OP(sch, op)	test_bit(SCX_OP_IDX(op), (sch)->has_op)
222 
223 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
224 {
225 	if (time_after(at, now))
226 		return jiffies_to_msecs(at - now);
227 	else
228 		return -(long)jiffies_to_msecs(now - at);
229 }
230 
231 static bool u32_before(u32 a, u32 b)
232 {
233 	return (s32)(a - b) < 0;
234 }
235 
236 #ifdef CONFIG_EXT_SUB_SCHED
237 /**
238  * scx_parent - Find the parent sched
239  * @sch: sched to find the parent of
240  *
241  * Returns the parent scheduler or %NULL if @sch is root.
242  */
243 static struct scx_sched *scx_parent(struct scx_sched *sch)
244 {
245 	if (sch->level)
246 		return sch->ancestors[sch->level - 1];
247 	else
248 		return NULL;
249 }
250 
251 /**
252  * scx_next_descendant_pre - find the next descendant for pre-order walk
253  * @pos: the current position (%NULL to initiate traversal)
254  * @root: sched whose descendants to walk
255  *
256  * To be used by scx_for_each_descendant_pre(). Find the next descendant to
257  * visit for pre-order traversal of @root's descendants. @root is included in
258  * the iteration and the first node to be visited.
259  */
260 static struct scx_sched *scx_next_descendant_pre(struct scx_sched *pos,
261 						 struct scx_sched *root)
262 {
263 	struct scx_sched *next;
264 
265 	lockdep_assert(lockdep_is_held(&scx_enable_mutex) ||
266 		       lockdep_is_held(&scx_sched_lock));
267 
268 	/* if first iteration, visit @root */
269 	if (!pos)
270 		return root;
271 
272 	/* visit the first child if exists */
273 	next = list_first_entry_or_null(&pos->children, struct scx_sched, sibling);
274 	if (next)
275 		return next;
276 
277 	/* no child, visit my or the closest ancestor's next sibling */
278 	while (pos != root) {
279 		if (!list_is_last(&pos->sibling, &scx_parent(pos)->children))
280 			return list_next_entry(pos, sibling);
281 		pos = scx_parent(pos);
282 	}
283 
284 	return NULL;
285 }
286 
287 static struct scx_sched *scx_find_sub_sched(u64 cgroup_id)
288 {
289 	return rhashtable_lookup(&scx_sched_hash, &cgroup_id,
290 				 scx_sched_hash_params);
291 }
292 
293 static void scx_set_task_sched(struct task_struct *p, struct scx_sched *sch)
294 {
295 	rcu_assign_pointer(p->scx.sched, sch);
296 }
297 #else	/* CONFIG_EXT_SUB_SCHED */
298 static struct scx_sched *scx_parent(struct scx_sched *sch) { return NULL; }
299 static struct scx_sched *scx_next_descendant_pre(struct scx_sched *pos, struct scx_sched *root) { return pos ? NULL : root; }
300 static struct scx_sched *scx_find_sub_sched(u64 cgroup_id) { return NULL; }
301 static void scx_set_task_sched(struct task_struct *p, struct scx_sched *sch) {}
302 #endif	/* CONFIG_EXT_SUB_SCHED */
303 
304 /**
305  * scx_is_descendant - Test whether sched is a descendant
306  * @sch: sched to test
307  * @ancestor: ancestor sched to test against
308  *
309  * Test whether @sch is a descendant of @ancestor.
310  */
311 static bool scx_is_descendant(struct scx_sched *sch, struct scx_sched *ancestor)
312 {
313 	if (sch->level < ancestor->level)
314 		return false;
315 	return sch->ancestors[ancestor->level] == ancestor;
316 }
317 
318 /**
319  * scx_for_each_descendant_pre - pre-order walk of a sched's descendants
320  * @pos: iteration cursor
321  * @root: sched to walk the descendants of
322  *
323  * Walk @root's descendants. @root is included in the iteration and the first
324  * node to be visited. Must be called with either scx_enable_mutex or
325  * scx_sched_lock held.
326  */
327 #define scx_for_each_descendant_pre(pos, root)					\
328 	for ((pos) = scx_next_descendant_pre(NULL, (root)); (pos);		\
329 	     (pos) = scx_next_descendant_pre((pos), (root)))
330 
331 static struct scx_dispatch_q *find_global_dsq(struct scx_sched *sch, s32 cpu)
332 {
333 	return &sch->pnode[cpu_to_node(cpu)]->global_dsq;
334 }
335 
336 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id)
337 {
338 	return rhashtable_lookup(&sch->dsq_hash, &dsq_id, dsq_hash_params);
339 }
340 
341 static const struct sched_class *scx_setscheduler_class(struct task_struct *p)
342 {
343 	if (p->sched_class == &stop_sched_class)
344 		return &stop_sched_class;
345 
346 	return __setscheduler_class(p->policy, p->prio);
347 }
348 
349 static struct scx_dispatch_q *bypass_dsq(struct scx_sched *sch, s32 cpu)
350 {
351 	return &per_cpu_ptr(sch->pcpu, cpu)->bypass_dsq;
352 }
353 
354 static struct scx_dispatch_q *bypass_enq_target_dsq(struct scx_sched *sch, s32 cpu)
355 {
356 #ifdef CONFIG_EXT_SUB_SCHED
357 	/*
358 	 * If @sch is a sub-sched which is bypassing, its tasks should go into
359 	 * the bypass DSQs of the nearest ancestor which is not bypassing. The
360 	 * not-bypassing ancestor is responsible for scheduling all tasks from
361 	 * bypassing sub-trees. If all ancestors including root are bypassing,
362 	 * all tasks should go to the root's bypass DSQs.
363 	 *
364 	 * Whenever a sched starts bypassing, all runnable tasks in its subtree
365 	 * are re-enqueued after scx_bypassing() is turned on, guaranteeing that
366 	 * all tasks are transferred to the right DSQs.
367 	 */
368 	while (scx_parent(sch) && scx_bypassing(sch, cpu))
369 		sch = scx_parent(sch);
370 #endif	/* CONFIG_EXT_SUB_SCHED */
371 
372 	return bypass_dsq(sch, cpu);
373 }
374 
375 /**
376  * bypass_dsp_enabled - Check if bypass dispatch path is enabled
377  * @sch: scheduler to check
378  *
379  * When a descendant scheduler enters bypass mode, bypassed tasks are scheduled
380  * by the nearest non-bypassing ancestor, or the root scheduler if all ancestors
381  * are bypassing. In the former case, the ancestor is not itself bypassing but
382  * its bypass DSQs will be populated with bypassed tasks from descendants. Thus,
383  * the ancestor's bypass dispatch path must be active even though its own
384  * bypass_depth remains zero.
385  *
386  * This function checks bypass_dsp_enable_depth which is managed separately from
387  * bypass_depth to enable this decoupling. See enable_bypass_dsp() and
388  * disable_bypass_dsp().
389  */
390 static bool bypass_dsp_enabled(struct scx_sched *sch)
391 {
392 	return unlikely(atomic_read(&sch->bypass_dsp_enable_depth));
393 }
394 
395 /**
396  * rq_is_open - Is the rq available for immediate execution of an SCX task?
397  * @rq: rq to test
398  * @enq_flags: optional %SCX_ENQ_* of the task being enqueued
399  *
400  * Returns %true if @rq is currently open for executing an SCX task. After a
401  * %false return, @rq is guaranteed to invoke SCX dispatch path at least once
402  * before going to idle and not inserting a task into @rq's local DSQ after a
403  * %false return doesn't cause @rq to stall.
404  */
405 static bool rq_is_open(struct rq *rq, u64 enq_flags)
406 {
407 	lockdep_assert_rq_held(rq);
408 
409 	/*
410 	 * A higher-priority class task is either running or in the process of
411 	 * waking up on @rq.
412 	 */
413 	if (sched_class_above(rq->next_class, &ext_sched_class))
414 		return false;
415 
416 	/*
417 	 * @rq is either in transition to or in idle and there is no
418 	 * higher-priority class task waking up on it.
419 	 */
420 	if (sched_class_above(&ext_sched_class, rq->next_class))
421 		return true;
422 
423 	/*
424 	 * @rq is either picking, in transition to, or running an SCX task.
425 	 */
426 
427 	/*
428 	 * If we're in the dispatch path holding rq lock, $curr may or may not
429 	 * be ready depending on whether the on-going dispatch decides to extend
430 	 * $curr's slice. We say yes here and resolve it at the end of dispatch.
431 	 * See balance_one().
432 	 */
433 	if (rq->scx.flags & SCX_RQ_IN_BALANCE)
434 		return true;
435 
436 	/*
437 	 * %SCX_ENQ_PREEMPT clears $curr's slice if on SCX and kicks dispatch,
438 	 * so allow it to avoid spuriously triggering reenq on a combined
439 	 * PREEMPT|IMMED insertion.
440 	 */
441 	if (enq_flags & SCX_ENQ_PREEMPT)
442 		return true;
443 
444 	/*
445 	 * @rq is either in transition to or running an SCX task and can't go
446 	 * idle without another SCX dispatch cycle.
447 	 */
448 	return false;
449 }
450 
451 /*
452  * Track the rq currently locked.
453  *
454  * This allows kfuncs to safely operate on rq from any scx ops callback,
455  * knowing which rq is already locked.
456  */
457 DEFINE_PER_CPU(struct rq *, scx_locked_rq_state);
458 
459 static inline void update_locked_rq(struct rq *rq)
460 {
461 	/*
462 	 * Check whether @rq is actually locked. This can help expose bugs
463 	 * or incorrect assumptions about the context in which a kfunc or
464 	 * callback is executed.
465 	 */
466 	if (rq)
467 		lockdep_assert_rq_held(rq);
468 	__this_cpu_write(scx_locked_rq_state, rq);
469 }
470 
471 /*
472  * SCX ops can recurse via scx_bpf_sub_dispatch() - the inner call must not
473  * clobber the outer's scx_locked_rq_state. Save it on entry, restore on exit.
474  */
475 #define SCX_CALL_OP(sch, op, locked_rq, args...)				\
476 do {										\
477 	struct rq *__prev_locked_rq;						\
478 										\
479 	if (locked_rq) {							\
480 		__prev_locked_rq = scx_locked_rq();				\
481 		update_locked_rq(locked_rq);					\
482 	}									\
483 	(sch)->ops.op(args);							\
484 	if (locked_rq)								\
485 		update_locked_rq(__prev_locked_rq);				\
486 } while (0)
487 
488 #define SCX_CALL_OP_RET(sch, op, locked_rq, args...)				\
489 ({										\
490 	struct rq *__prev_locked_rq;						\
491 	__typeof__((sch)->ops.op(args)) __ret;					\
492 										\
493 	if (locked_rq) {							\
494 		__prev_locked_rq = scx_locked_rq();				\
495 		update_locked_rq(locked_rq);					\
496 	}									\
497 	__ret = (sch)->ops.op(args);						\
498 	if (locked_rq)								\
499 		update_locked_rq(__prev_locked_rq);				\
500 	__ret;									\
501 })
502 
503 /*
504  * SCX_CALL_OP_TASK*() invokes an SCX op that takes one or two task arguments
505  * and records them in current->scx.kf_tasks[] for the duration of the call. A
506  * kfunc invoked from inside such an op can then use
507  * scx_kf_arg_task_ok() to verify that its task argument is one of
508  * those subject tasks.
509  *
510  * Every SCX_CALL_OP_TASK*() call site invokes its op with @p's rq lock held -
511  * either via the @locked_rq argument here, or (for ops.select_cpu()) via @p's
512  * pi_lock held by try_to_wake_up() with rq tracking via scx_rq.in_select_cpu.
513  * So if kf_tasks[] is set, @p's scheduler-protected fields are stable.
514  *
515  * kf_tasks[] can not stack, so task-based SCX ops must not nest. The
516  * WARN_ON_ONCE() in each macro catches a re-entry of any of the three variants
517  * while a previous one is still in progress.
518  */
519 #define SCX_CALL_OP_TASK(sch, op, locked_rq, task, args...)			\
520 do {										\
521 	WARN_ON_ONCE(current->scx.kf_tasks[0]);					\
522 	current->scx.kf_tasks[0] = task;					\
523 	SCX_CALL_OP((sch), op, locked_rq, task, ##args);			\
524 	current->scx.kf_tasks[0] = NULL;					\
525 } while (0)
526 
527 #define SCX_CALL_OP_TASK_RET(sch, op, locked_rq, task, args...)			\
528 ({										\
529 	__typeof__((sch)->ops.op(task, ##args)) __ret;				\
530 	WARN_ON_ONCE(current->scx.kf_tasks[0]);					\
531 	current->scx.kf_tasks[0] = task;					\
532 	__ret = SCX_CALL_OP_RET((sch), op, locked_rq, task, ##args);		\
533 	current->scx.kf_tasks[0] = NULL;					\
534 	__ret;									\
535 })
536 
537 #define SCX_CALL_OP_2TASKS_RET(sch, op, locked_rq, task0, task1, args...)	\
538 ({										\
539 	__typeof__((sch)->ops.op(task0, task1, ##args)) __ret;			\
540 	WARN_ON_ONCE(current->scx.kf_tasks[0]);					\
541 	current->scx.kf_tasks[0] = task0;					\
542 	current->scx.kf_tasks[1] = task1;					\
543 	__ret = SCX_CALL_OP_RET((sch), op, locked_rq, task0, task1, ##args);	\
544 	current->scx.kf_tasks[0] = NULL;					\
545 	current->scx.kf_tasks[1] = NULL;					\
546 	__ret;									\
547 })
548 
549 /* see SCX_CALL_OP_TASK() */
550 static __always_inline bool scx_kf_arg_task_ok(struct scx_sched *sch,
551 							struct task_struct *p)
552 {
553 	if (unlikely((p != current->scx.kf_tasks[0] &&
554 		      p != current->scx.kf_tasks[1]))) {
555 		scx_error(sch, "called on a task not being operated on");
556 		return false;
557 	}
558 
559 	return true;
560 }
561 
562 enum scx_dsq_iter_flags {
563 	/* iterate in the reverse dispatch order */
564 	SCX_DSQ_ITER_REV		= 1U << 16,
565 
566 	__SCX_DSQ_ITER_HAS_SLICE	= 1U << 30,
567 	__SCX_DSQ_ITER_HAS_VTIME	= 1U << 31,
568 
569 	__SCX_DSQ_ITER_USER_FLAGS	= SCX_DSQ_ITER_REV,
570 	__SCX_DSQ_ITER_ALL_FLAGS	= __SCX_DSQ_ITER_USER_FLAGS |
571 					  __SCX_DSQ_ITER_HAS_SLICE |
572 					  __SCX_DSQ_ITER_HAS_VTIME,
573 };
574 
575 /**
576  * nldsq_next_task - Iterate to the next task in a non-local DSQ
577  * @dsq: non-local dsq being iterated
578  * @cur: current position, %NULL to start iteration
579  * @rev: walk backwards
580  *
581  * Returns %NULL when iteration is finished.
582  */
583 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
584 					   struct task_struct *cur, bool rev)
585 {
586 	struct list_head *list_node;
587 	struct scx_dsq_list_node *dsq_lnode;
588 
589 	lockdep_assert_held(&dsq->lock);
590 
591 	if (cur)
592 		list_node = &cur->scx.dsq_list.node;
593 	else
594 		list_node = &dsq->list;
595 
596 	/* find the next task, need to skip BPF iteration cursors */
597 	do {
598 		if (rev)
599 			list_node = list_node->prev;
600 		else
601 			list_node = list_node->next;
602 
603 		if (list_node == &dsq->list)
604 			return NULL;
605 
606 		dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
607 					 node);
608 	} while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
609 
610 	return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
611 }
612 
613 #define nldsq_for_each_task(p, dsq)						\
614 	for ((p) = nldsq_next_task((dsq), NULL, false); (p);			\
615 	     (p) = nldsq_next_task((dsq), (p), false))
616 
617 /**
618  * nldsq_cursor_next_task - Iterate to the next task given a cursor in a non-local DSQ
619  * @cursor: scx_dsq_list_node initialized with INIT_DSQ_LIST_CURSOR()
620  * @dsq: non-local dsq being iterated
621  *
622  * Find the next task in a cursor based iteration. The caller must have
623  * initialized @cursor using INIT_DSQ_LIST_CURSOR() and can release the DSQ lock
624  * between the iteration steps.
625  *
626  * Only tasks which were queued before @cursor was initialized are visible. This
627  * bounds the iteration and guarantees that vtime never jumps in the other
628  * direction while iterating.
629  */
630 static struct task_struct *nldsq_cursor_next_task(struct scx_dsq_list_node *cursor,
631 						  struct scx_dispatch_q *dsq)
632 {
633 	bool rev = cursor->flags & SCX_DSQ_ITER_REV;
634 	struct task_struct *p;
635 
636 	lockdep_assert_held(&dsq->lock);
637 	BUG_ON(!(cursor->flags & SCX_DSQ_LNODE_ITER_CURSOR));
638 
639 	if (list_empty(&cursor->node))
640 		p = NULL;
641 	else
642 		p = container_of(cursor, struct task_struct, scx.dsq_list);
643 
644 	/* skip cursors and tasks that were queued after @cursor init */
645 	do {
646 		p = nldsq_next_task(dsq, p, rev);
647 	} while (p && unlikely(u32_before(cursor->priv, p->scx.dsq_seq)));
648 
649 	if (p) {
650 		if (rev)
651 			list_move_tail(&cursor->node, &p->scx.dsq_list.node);
652 		else
653 			list_move(&cursor->node, &p->scx.dsq_list.node);
654 	} else {
655 		list_del_init(&cursor->node);
656 	}
657 
658 	return p;
659 }
660 
661 /**
662  * nldsq_cursor_lost_task - Test whether someone else took the task since iteration
663  * @cursor: scx_dsq_list_node initialized with INIT_DSQ_LIST_CURSOR()
664  * @rq: rq @p was on
665  * @dsq: dsq @p was on
666  * @p: target task
667  *
668  * @p is a task returned by nldsq_cursor_next_task(). The locks may have been
669  * dropped and re-acquired inbetween. Verify that no one else took or is in the
670  * process of taking @p from @dsq.
671  *
672  * On %false return, the caller can assume full ownership of @p.
673  */
674 static bool nldsq_cursor_lost_task(struct scx_dsq_list_node *cursor,
675 				   struct rq *rq, struct scx_dispatch_q *dsq,
676 				   struct task_struct *p)
677 {
678 	lockdep_assert_rq_held(rq);
679 	lockdep_assert_held(&dsq->lock);
680 
681 	/*
682 	 * @p could have already left $src_dsq, got re-enqueud, or be in the
683 	 * process of being consumed by someone else.
684 	 */
685 	if (unlikely(p->scx.dsq != dsq ||
686 		     u32_before(cursor->priv, p->scx.dsq_seq) ||
687 		     p->scx.holding_cpu >= 0))
688 		return true;
689 
690 	/* if @p has stayed on @dsq, its rq couldn't have changed */
691 	if (WARN_ON_ONCE(rq != task_rq(p)))
692 		return true;
693 
694 	return false;
695 }
696 
697 /*
698  * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
699  * dispatch order. BPF-visible iterator is opaque and larger to allow future
700  * changes without breaking backward compatibility. Can be used with
701  * bpf_for_each(). See bpf_iter_scx_dsq_*().
702  */
703 struct bpf_iter_scx_dsq_kern {
704 	struct scx_dsq_list_node	cursor;
705 	struct scx_dispatch_q		*dsq;
706 	u64				slice;
707 	u64				vtime;
708 } __attribute__((aligned(8)));
709 
710 struct bpf_iter_scx_dsq {
711 	u64				__opaque[6];
712 } __attribute__((aligned(8)));
713 
714 
715 /*
716  * SCX task iterator.
717  */
718 struct scx_task_iter {
719 	struct sched_ext_entity		cursor;
720 	struct task_struct		*locked_task;
721 	struct rq			*rq;
722 	struct rq_flags			rf;
723 	u32				cnt;
724 	bool				list_locked;
725 #ifdef CONFIG_EXT_SUB_SCHED
726 	struct cgroup			*cgrp;
727 	struct cgroup_subsys_state	*css_pos;
728 	struct css_task_iter		css_iter;
729 #endif
730 };
731 
732 /**
733  * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
734  * @iter: iterator to init
735  * @cgrp: Optional root of cgroup subhierarchy to iterate
736  *
737  * Initialize @iter. Once initialized, @iter must eventually be stopped with
738  * scx_task_iter_stop().
739  *
740  * If @cgrp is %NULL, scx_tasks is used for iteration and this function returns
741  * with scx_tasks_lock held and @iter->cursor inserted into scx_tasks.
742  *
743  * If @cgrp is not %NULL, @cgrp and its descendants' tasks are walked using
744  * @iter->css_iter. The caller must be holding cgroup_lock() to prevent cgroup
745  * task migrations.
746  *
747  * The two modes of iterations are largely independent and it's likely that
748  * scx_tasks can be removed in favor of always using cgroup iteration if
749  * CONFIG_SCHED_CLASS_EXT depends on CONFIG_CGROUPS.
750  *
751  * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
752  * between this and the first next() call or between any two next() calls. If
753  * the locks are released between two next() calls, the caller is responsible
754  * for ensuring that the task being iterated remains accessible either through
755  * RCU read lock or obtaining a reference count.
756  *
757  * All tasks which existed when the iteration started are guaranteed to be
758  * visited as long as they are not dead.
759  */
760 static void scx_task_iter_start(struct scx_task_iter *iter, struct cgroup *cgrp)
761 {
762 	memset(iter, 0, sizeof(*iter));
763 
764 #ifdef CONFIG_EXT_SUB_SCHED
765 	if (cgrp) {
766 		lockdep_assert_held(&cgroup_mutex);
767 		iter->cgrp = cgrp;
768 		iter->css_pos = css_next_descendant_pre(NULL, &iter->cgrp->self);
769 		css_task_iter_start(iter->css_pos, 0, &iter->css_iter);
770 		return;
771 	}
772 #endif
773 	raw_spin_lock_irq(&scx_tasks_lock);
774 
775 	iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
776 	list_add(&iter->cursor.tasks_node, &scx_tasks);
777 	iter->list_locked = true;
778 }
779 
780 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
781 {
782 	if (iter->locked_task) {
783 		__balance_callbacks(iter->rq, &iter->rf);
784 		task_rq_unlock(iter->rq, iter->locked_task, &iter->rf);
785 		iter->locked_task = NULL;
786 	}
787 }
788 
789 /**
790  * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
791  * @iter: iterator to unlock
792  *
793  * If @iter is in the middle of a locked iteration, it may be locking the rq of
794  * the task currently being visited in addition to scx_tasks_lock. Unlock both.
795  * This function can be safely called anytime during an iteration. The next
796  * iterator operation will automatically restore the necessary locking.
797  */
798 static void scx_task_iter_unlock(struct scx_task_iter *iter)
799 {
800 	__scx_task_iter_rq_unlock(iter);
801 	if (iter->list_locked) {
802 		iter->list_locked = false;
803 		raw_spin_unlock_irq(&scx_tasks_lock);
804 	}
805 }
806 
807 static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter)
808 {
809 	if (!iter->list_locked) {
810 		raw_spin_lock_irq(&scx_tasks_lock);
811 		iter->list_locked = true;
812 	}
813 }
814 
815 /**
816  * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
817  * @iter: iterator to exit
818  *
819  * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
820  * which is released on return. If the iterator holds a task's rq lock, that rq
821  * lock is also released. See scx_task_iter_start() for details.
822  */
823 static void scx_task_iter_stop(struct scx_task_iter *iter)
824 {
825 #ifdef CONFIG_EXT_SUB_SCHED
826 	if (iter->cgrp) {
827 		if (iter->css_pos)
828 			css_task_iter_end(&iter->css_iter);
829 		__scx_task_iter_rq_unlock(iter);
830 		return;
831 	}
832 #endif
833 	__scx_task_iter_maybe_relock(iter);
834 	list_del_init(&iter->cursor.tasks_node);
835 	scx_task_iter_unlock(iter);
836 }
837 
838 /**
839  * scx_task_iter_next - Next task
840  * @iter: iterator to walk
841  *
842  * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
843  * and re-acquired every %SCX_TASK_ITER_BATCH iterations to avoid causing stalls
844  * by holding scx_tasks_lock for too long.
845  */
846 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
847 {
848 	struct list_head *cursor = &iter->cursor.tasks_node;
849 	struct sched_ext_entity *pos;
850 
851 	if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) {
852 		scx_task_iter_unlock(iter);
853 		cond_resched();
854 	}
855 
856 #ifdef CONFIG_EXT_SUB_SCHED
857 	if (iter->cgrp) {
858 		while (iter->css_pos) {
859 			struct task_struct *p;
860 
861 			p = css_task_iter_next(&iter->css_iter);
862 			if (p)
863 				return p;
864 
865 			css_task_iter_end(&iter->css_iter);
866 			iter->css_pos = css_next_descendant_pre(iter->css_pos,
867 								&iter->cgrp->self);
868 			if (iter->css_pos)
869 				css_task_iter_start(iter->css_pos, 0, &iter->css_iter);
870 		}
871 		return NULL;
872 	}
873 #endif
874 	__scx_task_iter_maybe_relock(iter);
875 
876 	list_for_each_entry(pos, cursor, tasks_node) {
877 		if (&pos->tasks_node == &scx_tasks)
878 			return NULL;
879 		if (!(pos->flags & SCX_TASK_CURSOR)) {
880 			list_move(cursor, &pos->tasks_node);
881 			return container_of(pos, struct task_struct, scx);
882 		}
883 	}
884 
885 	/* can't happen, should always terminate at scx_tasks above */
886 	BUG();
887 }
888 
889 /**
890  * scx_task_iter_next_locked - Next non-idle task with its rq locked
891  * @iter: iterator to walk
892  *
893  * Visit the non-idle task with its rq lock held. Allows callers to specify
894  * whether they would like to filter out dead tasks. See scx_task_iter_start()
895  * for details.
896  */
897 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
898 {
899 	struct task_struct *p;
900 
901 	__scx_task_iter_rq_unlock(iter);
902 
903 	while ((p = scx_task_iter_next(iter))) {
904 		/*
905 		 * scx_task_iter is used to prepare and move tasks into SCX
906 		 * while loading the BPF scheduler and vice-versa while
907 		 * unloading. The init_tasks ("swappers") should be excluded
908 		 * from the iteration because:
909 		 *
910 		 * - It's unsafe to use __setschduler_prio() on an init_task to
911 		 *   determine the sched_class to use as it won't preserve its
912 		 *   idle_sched_class.
913 		 *
914 		 * - ops.init/exit_task() can easily be confused if called with
915 		 *   init_tasks as they, e.g., share PID 0.
916 		 *
917 		 * As init_tasks are never scheduled through SCX, they can be
918 		 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
919 		 * doesn't work here:
920 		 *
921 		 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
922 		 *   yet been onlined.
923 		 *
924 		 * - %PF_IDLE can be set on tasks that are not init_tasks. See
925 		 *   play_idle_precise() used by CONFIG_IDLE_INJECT.
926 		 *
927 		 * Test for idle_sched_class as only init_tasks are on it.
928 		 */
929 		if (p->sched_class != &idle_sched_class)
930 			break;
931 	}
932 	if (!p)
933 		return NULL;
934 
935 	iter->rq = task_rq_lock(p, &iter->rf);
936 	iter->locked_task = p;
937 
938 	return p;
939 }
940 
941 /**
942  * scx_add_event - Increase an event counter for 'name' by 'cnt'
943  * @sch: scx_sched to account events for
944  * @name: an event name defined in struct scx_event_stats
945  * @cnt: the number of the event occurred
946  *
947  * This can be used when preemption is not disabled.
948  */
949 #define scx_add_event(sch, name, cnt) do {					\
950 	this_cpu_add((sch)->pcpu->event_stats.name, (cnt));			\
951 	trace_sched_ext_event(#name, (cnt));					\
952 } while(0)
953 
954 /**
955  * __scx_add_event - Increase an event counter for 'name' by 'cnt'
956  * @sch: scx_sched to account events for
957  * @name: an event name defined in struct scx_event_stats
958  * @cnt: the number of the event occurred
959  *
960  * This should be used only when preemption is disabled.
961  */
962 #define __scx_add_event(sch, name, cnt) do {					\
963 	__this_cpu_add((sch)->pcpu->event_stats.name, (cnt));			\
964 	trace_sched_ext_event(#name, cnt);					\
965 } while(0)
966 
967 /**
968  * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e'
969  * @dst_e: destination event stats
970  * @src_e: source event stats
971  * @kind: a kind of event to be aggregated
972  */
973 #define scx_agg_event(dst_e, src_e, kind) do {					\
974 	(dst_e)->kind += READ_ONCE((src_e)->kind);				\
975 } while(0)
976 
977 /**
978  * scx_dump_event - Dump an event 'kind' in 'events' to 's'
979  * @s: output seq_buf
980  * @events: event stats
981  * @kind: a kind of event to dump
982  */
983 #define scx_dump_event(s, events, kind) do {					\
984 	dump_line(&(s), "%40s: %16lld", #kind, (events)->kind);			\
985 } while (0)
986 
987 
988 static void scx_read_events(struct scx_sched *sch,
989 			    struct scx_event_stats *events);
990 
991 static enum scx_enable_state scx_enable_state(void)
992 {
993 	return atomic_read(&scx_enable_state_var);
994 }
995 
996 static enum scx_enable_state scx_set_enable_state(enum scx_enable_state to)
997 {
998 	return atomic_xchg(&scx_enable_state_var, to);
999 }
1000 
1001 static bool scx_tryset_enable_state(enum scx_enable_state to,
1002 				    enum scx_enable_state from)
1003 {
1004 	int from_v = from;
1005 
1006 	return atomic_try_cmpxchg(&scx_enable_state_var, &from_v, to);
1007 }
1008 
1009 /**
1010  * wait_ops_state - Busy-wait the specified ops state to end
1011  * @p: target task
1012  * @opss: state to wait the end of
1013  *
1014  * Busy-wait for @p to transition out of @opss. This can only be used when the
1015  * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1016  * has load_acquire semantics to ensure that the caller can see the updates made
1017  * in the enqueueing and dispatching paths.
1018  */
1019 static void wait_ops_state(struct task_struct *p, unsigned long opss)
1020 {
1021 	do {
1022 		cpu_relax();
1023 	} while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1024 }
1025 
1026 static inline bool __cpu_valid(s32 cpu)
1027 {
1028 	return likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu));
1029 }
1030 
1031 /**
1032  * ops_cpu_valid - Verify a cpu number, to be used on ops input args
1033  * @sch: scx_sched to abort on error
1034  * @cpu: cpu number which came from a BPF ops
1035  * @where: extra information reported on error
1036  *
1037  * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1038  * Verify that it is in range and one of the possible cpus. If invalid, trigger
1039  * an ops error.
1040  */
1041 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where)
1042 {
1043 	if (__cpu_valid(cpu)) {
1044 		return true;
1045 	} else {
1046 		scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: "");
1047 		return false;
1048 	}
1049 }
1050 
1051 /**
1052  * ops_sanitize_err - Sanitize a -errno value
1053  * @sch: scx_sched to error out on error
1054  * @ops_name: operation to blame on failure
1055  * @err: -errno value to sanitize
1056  *
1057  * Verify @err is a valid -errno. If not, trigger scx_error() and return
1058  * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1059  * cause misbehaviors. For an example, a large negative return from
1060  * ops.init_task() triggers an oops when passed up the call chain because the
1061  * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1062  * handled as a pointer.
1063  */
1064 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err)
1065 {
1066 	if (err < 0 && err >= -MAX_ERRNO)
1067 		return err;
1068 
1069 	scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err);
1070 	return -EPROTO;
1071 }
1072 
1073 static void deferred_bal_cb_workfn(struct rq *rq)
1074 {
1075 	run_deferred(rq);
1076 }
1077 
1078 static void deferred_irq_workfn(struct irq_work *irq_work)
1079 {
1080 	struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1081 
1082 	raw_spin_rq_lock(rq);
1083 	run_deferred(rq);
1084 	raw_spin_rq_unlock(rq);
1085 }
1086 
1087 /**
1088  * schedule_deferred - Schedule execution of deferred actions on an rq
1089  * @rq: target rq
1090  *
1091  * Schedule execution of deferred actions on @rq. Deferred actions are executed
1092  * with @rq locked but unpinned, and thus can unlock @rq to e.g. migrate tasks
1093  * to other rqs.
1094  */
1095 static void schedule_deferred(struct rq *rq)
1096 {
1097 	/*
1098 	 * This is the fallback when schedule_deferred_locked() can't use
1099 	 * the cheaper balance callback or wakeup hook paths (the target
1100 	 * CPU is not in balance or wakeup). Currently, this is primarily
1101 	 * hit by reenqueue operations targeting a remote CPU.
1102 	 *
1103 	 * Queue on the target CPU. The deferred work can run from any CPU
1104 	 * correctly - the _locked() path already processes remote rqs from
1105 	 * the calling CPU - but targeting the owning CPU allows IPI delivery
1106 	 * without waiting for the calling CPU to re-enable IRQs and is
1107 	 * cheaper as the reenqueue runs locally.
1108 	 */
1109 	irq_work_queue_on(&rq->scx.deferred_irq_work, cpu_of(rq));
1110 }
1111 
1112 /**
1113  * schedule_deferred_locked - Schedule execution of deferred actions on an rq
1114  * @rq: target rq
1115  *
1116  * Schedule execution of deferred actions on @rq. Equivalent to
1117  * schedule_deferred() but requires @rq to be locked and can be more efficient.
1118  */
1119 static void schedule_deferred_locked(struct rq *rq)
1120 {
1121 	lockdep_assert_rq_held(rq);
1122 
1123 	/*
1124 	 * If in the middle of waking up a task, task_woken_scx() will be called
1125 	 * afterwards which will then run the deferred actions, no need to
1126 	 * schedule anything.
1127 	 */
1128 	if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1129 		return;
1130 
1131 	/* Don't do anything if there already is a deferred operation. */
1132 	if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING)
1133 		return;
1134 
1135 	/*
1136 	 * If in balance, the balance callbacks will be called before rq lock is
1137 	 * released. Schedule one.
1138 	 *
1139 	 *
1140 	 * We can't directly insert the callback into the
1141 	 * rq's list: The call can drop its lock and make the pending balance
1142 	 * callback visible to unrelated code paths that call rq_pin_lock().
1143 	 *
1144 	 * Just let balance_one() know that it must do it itself.
1145 	 */
1146 	if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1147 		rq->scx.flags |= SCX_RQ_BAL_CB_PENDING;
1148 		return;
1149 	}
1150 
1151 	/*
1152 	 * No scheduler hooks available. Use the generic irq_work path. The
1153 	 * above WAKEUP and BALANCE paths should cover most of the cases and the
1154 	 * time to IRQ re-enable shouldn't be long.
1155 	 */
1156 	schedule_deferred(rq);
1157 }
1158 
1159 static void schedule_dsq_reenq(struct scx_sched *sch, struct scx_dispatch_q *dsq,
1160 			       u64 reenq_flags, struct rq *locked_rq)
1161 {
1162 	struct rq *rq;
1163 
1164 	/*
1165 	 * Allowing reenqueues doesn't make sense while bypassing. This also
1166 	 * blocks from new reenqueues to be scheduled on dead scheds.
1167 	 */
1168 	if (unlikely(READ_ONCE(sch->bypass_depth)))
1169 		return;
1170 
1171 	if (dsq->id == SCX_DSQ_LOCAL) {
1172 		rq = container_of(dsq, struct rq, scx.local_dsq);
1173 
1174 		struct scx_sched_pcpu *sch_pcpu = per_cpu_ptr(sch->pcpu, cpu_of(rq));
1175 		struct scx_deferred_reenq_local *drl = &sch_pcpu->deferred_reenq_local;
1176 
1177 		/*
1178 		 * Pairs with smp_mb() in process_deferred_reenq_locals() and
1179 		 * guarantees that there is a reenq_local() afterwards.
1180 		 */
1181 		smp_mb();
1182 
1183 		if (list_empty(&drl->node) ||
1184 		    (READ_ONCE(drl->flags) & reenq_flags) != reenq_flags) {
1185 
1186 			guard(raw_spinlock_irqsave)(&rq->scx.deferred_reenq_lock);
1187 
1188 			if (list_empty(&drl->node))
1189 				list_move_tail(&drl->node, &rq->scx.deferred_reenq_locals);
1190 			WRITE_ONCE(drl->flags, drl->flags | reenq_flags);
1191 		}
1192 	} else if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN)) {
1193 		rq = this_rq();
1194 
1195 		struct scx_dsq_pcpu *dsq_pcpu = per_cpu_ptr(dsq->pcpu, cpu_of(rq));
1196 		struct scx_deferred_reenq_user *dru = &dsq_pcpu->deferred_reenq_user;
1197 
1198 		/*
1199 		 * Pairs with smp_mb() in process_deferred_reenq_users() and
1200 		 * guarantees that there is a reenq_user() afterwards.
1201 		 */
1202 		smp_mb();
1203 
1204 		if (list_empty(&dru->node) ||
1205 		    (READ_ONCE(dru->flags) & reenq_flags) != reenq_flags) {
1206 
1207 			guard(raw_spinlock_irqsave)(&rq->scx.deferred_reenq_lock);
1208 
1209 			if (list_empty(&dru->node))
1210 				list_move_tail(&dru->node, &rq->scx.deferred_reenq_users);
1211 			WRITE_ONCE(dru->flags, dru->flags | reenq_flags);
1212 		}
1213 	} else {
1214 		scx_error(sch, "DSQ 0x%llx not allowed for reenq", dsq->id);
1215 		return;
1216 	}
1217 
1218 	if (rq == locked_rq)
1219 		schedule_deferred_locked(rq);
1220 	else
1221 		schedule_deferred(rq);
1222 }
1223 
1224 static void schedule_reenq_local(struct rq *rq, u64 reenq_flags)
1225 {
1226 	struct scx_sched *root = rcu_dereference_sched(scx_root);
1227 
1228 	if (WARN_ON_ONCE(!root))
1229 		return;
1230 
1231 	schedule_dsq_reenq(root, &rq->scx.local_dsq, reenq_flags, rq);
1232 }
1233 
1234 /**
1235  * touch_core_sched - Update timestamp used for core-sched task ordering
1236  * @rq: rq to read clock from, must be locked
1237  * @p: task to update the timestamp for
1238  *
1239  * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1240  * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1241  * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1242  * exhaustion).
1243  */
1244 static void touch_core_sched(struct rq *rq, struct task_struct *p)
1245 {
1246 	lockdep_assert_rq_held(rq);
1247 
1248 #ifdef CONFIG_SCHED_CORE
1249 	/*
1250 	 * It's okay to update the timestamp spuriously. Use
1251 	 * sched_core_disabled() which is cheaper than enabled().
1252 	 *
1253 	 * As this is used to determine ordering between tasks of sibling CPUs,
1254 	 * it may be better to use per-core dispatch sequence instead.
1255 	 */
1256 	if (!sched_core_disabled())
1257 		p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1258 #endif
1259 }
1260 
1261 /**
1262  * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1263  * @rq: rq to read clock from, must be locked
1264  * @p: task being dispatched
1265  *
1266  * If the BPF scheduler implements custom core-sched ordering via
1267  * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1268  * ordering within each local DSQ. This function is called from dispatch paths
1269  * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1270  */
1271 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1272 {
1273 	lockdep_assert_rq_held(rq);
1274 
1275 #ifdef CONFIG_SCHED_CORE
1276 	if (unlikely(SCX_HAS_OP(scx_root, core_sched_before)))
1277 		touch_core_sched(rq, p);
1278 #endif
1279 }
1280 
1281 static void update_curr_scx(struct rq *rq)
1282 {
1283 	struct task_struct *curr = rq->curr;
1284 	s64 delta_exec;
1285 
1286 	delta_exec = update_curr_common(rq);
1287 	if (unlikely(delta_exec <= 0))
1288 		return;
1289 
1290 	if (curr->scx.slice != SCX_SLICE_INF) {
1291 		curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1292 		if (!curr->scx.slice)
1293 			touch_core_sched(rq, curr);
1294 	}
1295 
1296 	dl_server_update(&rq->ext_server, delta_exec);
1297 }
1298 
1299 static bool scx_dsq_priq_less(struct rb_node *node_a,
1300 			      const struct rb_node *node_b)
1301 {
1302 	const struct task_struct *a =
1303 		container_of(node_a, struct task_struct, scx.dsq_priq);
1304 	const struct task_struct *b =
1305 		container_of(node_b, struct task_struct, scx.dsq_priq);
1306 
1307 	return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1308 }
1309 
1310 static void dsq_inc_nr(struct scx_dispatch_q *dsq, struct task_struct *p, u64 enq_flags)
1311 {
1312 	/* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1313 	WRITE_ONCE(dsq->nr, dsq->nr + 1);
1314 
1315 	/*
1316 	 * Once @p reaches a local DSQ, it can only leave it by being dispatched
1317 	 * to the CPU or dequeued. In both cases, the only way @p can go back to
1318 	 * the BPF sched is through enqueueing. If being inserted into a local
1319 	 * DSQ with IMMED, persist the state until the next enqueueing event in
1320 	 * do_enqueue_task() so that we can maintain IMMED protection through
1321 	 * e.g. SAVE/RESTORE cycles and slice extensions.
1322 	 */
1323 	if (enq_flags & SCX_ENQ_IMMED) {
1324 		if (unlikely(dsq->id != SCX_DSQ_LOCAL)) {
1325 			WARN_ON_ONCE(!(enq_flags & SCX_ENQ_GDSQ_FALLBACK));
1326 			return;
1327 		}
1328 		p->scx.flags |= SCX_TASK_IMMED;
1329 	}
1330 
1331 	if (p->scx.flags & SCX_TASK_IMMED) {
1332 		struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1333 
1334 		if (WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
1335 			return;
1336 
1337 		rq->scx.nr_immed++;
1338 
1339 		/*
1340 		 * If @rq already had other tasks or the current task is not
1341 		 * done yet, @p can't go on the CPU immediately. Re-enqueue.
1342 		 */
1343 		if (unlikely(dsq->nr > 1 || !rq_is_open(rq, enq_flags)))
1344 			schedule_reenq_local(rq, 0);
1345 	}
1346 }
1347 
1348 static void dsq_dec_nr(struct scx_dispatch_q *dsq, struct task_struct *p)
1349 {
1350 	/* see dsq_inc_nr() */
1351 	WRITE_ONCE(dsq->nr, dsq->nr - 1);
1352 
1353 	if (p->scx.flags & SCX_TASK_IMMED) {
1354 		struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1355 
1356 		if (WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL) ||
1357 		    WARN_ON_ONCE(rq->scx.nr_immed <= 0))
1358 			return;
1359 
1360 		rq->scx.nr_immed--;
1361 	}
1362 }
1363 
1364 static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p)
1365 {
1366 	p->scx.slice = READ_ONCE(sch->slice_dfl);
1367 	__scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
1368 }
1369 
1370 /*
1371  * Return true if @p is moving due to an internal SCX migration, false
1372  * otherwise.
1373  */
1374 static inline bool task_scx_migrating(struct task_struct *p)
1375 {
1376 	/*
1377 	 * We only need to check sticky_cpu: it is set to the destination
1378 	 * CPU in move_remote_task_to_local_dsq() before deactivate_task()
1379 	 * and cleared when the task is enqueued on the destination, so it
1380 	 * is only non-negative during an internal SCX migration.
1381 	 */
1382 	return p->scx.sticky_cpu >= 0;
1383 }
1384 
1385 /*
1386  * Call ops.dequeue() if the task is in BPF custody and not migrating.
1387  * Clears %SCX_TASK_IN_CUSTODY when the callback is invoked.
1388  */
1389 static void call_task_dequeue(struct scx_sched *sch, struct rq *rq,
1390 			      struct task_struct *p, u64 deq_flags)
1391 {
1392 	if (!(p->scx.flags & SCX_TASK_IN_CUSTODY) || task_scx_migrating(p))
1393 		return;
1394 
1395 	if (SCX_HAS_OP(sch, dequeue))
1396 		SCX_CALL_OP_TASK(sch, dequeue, rq, p, deq_flags);
1397 
1398 	p->scx.flags &= ~SCX_TASK_IN_CUSTODY;
1399 }
1400 
1401 static void local_dsq_post_enq(struct scx_sched *sch, struct scx_dispatch_q *dsq,
1402 			       struct task_struct *p, u64 enq_flags)
1403 {
1404 	struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1405 
1406 	call_task_dequeue(sch, rq, p, 0);
1407 
1408 	/*
1409 	 * Note that @rq's lock may be dropped between this enqueue and @p
1410 	 * actually getting on CPU. This gives higher-class tasks (e.g. RT)
1411 	 * an opportunity to wake up on @rq and prevent @p from running.
1412 	 * Here are some concrete examples:
1413 	 *
1414 	 * Example 1:
1415 	 *
1416 	 * We dispatch two tasks from a single ops.dispatch():
1417 	 * - First, a local task to this CPU's local DSQ;
1418 	 * - Second, a local/remote task to a remote CPU's local DSQ.
1419 	 * We must drop the local rq lock in order to finish the second
1420 	 * dispatch. In that time, an RT task can wake up on the local rq.
1421 	 *
1422 	 * Example 2:
1423 	 *
1424 	 * We dispatch a local/remote task to a remote CPU's local DSQ.
1425 	 * We must drop the remote rq lock before the dispatched task can run,
1426 	 * which gives an RT task an opportunity to wake up on the remote rq.
1427 	 *
1428 	 * Both examples work the same if we replace dispatching with moving
1429 	 * the tasks from a user-created DSQ.
1430 	 *
1431 	 * We must detect these wakeups so that we can re-enqueue IMMED tasks
1432 	 * from @rq's local DSQ. scx_wakeup_preempt() serves exactly this
1433 	 * purpose, but for it to be invoked, we must ensure that we bump
1434 	 * @rq->next_class to &ext_sched_class if it's currently idle.
1435 	 *
1436 	 * wakeup_preempt() does the bumping, and since we only invoke it if
1437 	 * @rq->next_class is below &ext_sched_class, it will also
1438 	 * resched_curr(rq).
1439 	 */
1440 	if (sched_class_above(p->sched_class, rq->next_class))
1441 		wakeup_preempt(rq, p, 0);
1442 
1443 	/*
1444 	 * If @rq is in balance, the CPU is already vacant and looking for the
1445 	 * next task to run. No need to preempt or trigger resched after moving
1446 	 * @p into its local DSQ.
1447 	 * Note that the wakeup_preempt() above may have already triggered
1448 	 * a resched if @rq->next_class was idle. It's harmless, since
1449 	 * need_resched is cleared immediately after task pick.
1450 	 */
1451 	if (rq->scx.flags & SCX_RQ_IN_BALANCE)
1452 		return;
1453 
1454 	if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1455 	    rq->curr->sched_class == &ext_sched_class) {
1456 		rq->curr->scx.slice = 0;
1457 		resched_curr(rq);
1458 	}
1459 }
1460 
1461 static void dispatch_enqueue(struct scx_sched *sch, struct rq *rq,
1462 			     struct scx_dispatch_q *dsq, struct task_struct *p,
1463 			     u64 enq_flags)
1464 {
1465 	bool is_local = dsq->id == SCX_DSQ_LOCAL;
1466 
1467 	WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1468 	WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1469 		     !RB_EMPTY_NODE(&p->scx.dsq_priq));
1470 
1471 	if (!is_local) {
1472 		raw_spin_lock_nested(&dsq->lock,
1473 			(enq_flags & SCX_ENQ_NESTED) ? SINGLE_DEPTH_NESTING : 0);
1474 
1475 		if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1476 			scx_error(sch, "attempting to dispatch to a destroyed dsq");
1477 			/* fall back to the global dsq */
1478 			raw_spin_unlock(&dsq->lock);
1479 			dsq = find_global_dsq(sch, task_cpu(p));
1480 			raw_spin_lock(&dsq->lock);
1481 		}
1482 	}
1483 
1484 	if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1485 		     (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1486 		/*
1487 		 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1488 		 * their FIFO queues. To avoid confusion and accidentally
1489 		 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1490 		 * disallow any internal DSQ from doing vtime ordering of
1491 		 * tasks.
1492 		 */
1493 		scx_error(sch, "cannot use vtime ordering for built-in DSQs");
1494 		enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1495 	}
1496 
1497 	if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1498 		struct rb_node *rbp;
1499 
1500 		/*
1501 		 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1502 		 * linked to both the rbtree and list on PRIQs, this can only be
1503 		 * tested easily when adding the first task.
1504 		 */
1505 		if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1506 			     nldsq_next_task(dsq, NULL, false)))
1507 			scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1508 				  dsq->id);
1509 
1510 		p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1511 		rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1512 
1513 		/*
1514 		 * Find the previous task and insert after it on the list so
1515 		 * that @dsq->list is vtime ordered.
1516 		 */
1517 		rbp = rb_prev(&p->scx.dsq_priq);
1518 		if (rbp) {
1519 			struct task_struct *prev =
1520 				container_of(rbp, struct task_struct,
1521 					     scx.dsq_priq);
1522 			list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1523 			/* first task unchanged - no update needed */
1524 		} else {
1525 			list_add(&p->scx.dsq_list.node, &dsq->list);
1526 			/* not builtin and new task is at head - use fastpath */
1527 			rcu_assign_pointer(dsq->first_task, p);
1528 		}
1529 	} else {
1530 		/* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1531 		if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1532 			scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1533 				  dsq->id);
1534 
1535 		if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) {
1536 			list_add(&p->scx.dsq_list.node, &dsq->list);
1537 			/* new task inserted at head - use fastpath */
1538 			if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN))
1539 				rcu_assign_pointer(dsq->first_task, p);
1540 		} else {
1541 			/*
1542 			 * dsq->list can contain parked BPF iterator cursors, so
1543 			 * list_empty() here isn't a reliable proxy for "no real
1544 			 * task in the DSQ". Test dsq->first_task directly.
1545 			 */
1546 			list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1547 			if (!dsq->first_task && !(dsq->id & SCX_DSQ_FLAG_BUILTIN))
1548 				rcu_assign_pointer(dsq->first_task, p);
1549 		}
1550 	}
1551 
1552 	/* seq records the order tasks are queued, used by BPF DSQ iterator */
1553 	WRITE_ONCE(dsq->seq, dsq->seq + 1);
1554 	p->scx.dsq_seq = dsq->seq;
1555 
1556 	dsq_inc_nr(dsq, p, enq_flags);
1557 	p->scx.dsq = dsq;
1558 
1559 	/*
1560 	 * Update custody and call ops.dequeue() before clearing ops_state:
1561 	 * once ops_state is cleared, waiters in ops_dequeue() can proceed
1562 	 * and dequeue_task_scx() will RMW p->scx.flags. If we clear
1563 	 * ops_state first, both sides would modify p->scx.flags
1564 	 * concurrently in a non-atomic way.
1565 	 */
1566 	if (is_local) {
1567 		local_dsq_post_enq(sch, dsq, p, enq_flags);
1568 	} else {
1569 		/*
1570 		 * Task on global/bypass DSQ: leave custody, task on
1571 		 * non-terminal DSQ: enter custody.
1572 		 */
1573 		if (dsq->id == SCX_DSQ_GLOBAL || dsq->id == SCX_DSQ_BYPASS)
1574 			call_task_dequeue(sch, rq, p, 0);
1575 		else
1576 			p->scx.flags |= SCX_TASK_IN_CUSTODY;
1577 
1578 		raw_spin_unlock(&dsq->lock);
1579 	}
1580 
1581 	/*
1582 	 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1583 	 * match waiters' load_acquire.
1584 	 */
1585 	if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1586 		atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1587 }
1588 
1589 static void task_unlink_from_dsq(struct task_struct *p,
1590 				 struct scx_dispatch_q *dsq)
1591 {
1592 	WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1593 
1594 	if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1595 		rb_erase(&p->scx.dsq_priq, &dsq->priq);
1596 		RB_CLEAR_NODE(&p->scx.dsq_priq);
1597 		p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1598 	}
1599 
1600 	list_del_init(&p->scx.dsq_list.node);
1601 	dsq_dec_nr(dsq, p);
1602 
1603 	if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN) && dsq->first_task == p) {
1604 		struct task_struct *first_task;
1605 
1606 		first_task = nldsq_next_task(dsq, NULL, false);
1607 		rcu_assign_pointer(dsq->first_task, first_task);
1608 	}
1609 }
1610 
1611 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1612 {
1613 	struct scx_dispatch_q *dsq = p->scx.dsq;
1614 	bool is_local = dsq == &rq->scx.local_dsq;
1615 
1616 	lockdep_assert_rq_held(rq);
1617 
1618 	if (!dsq) {
1619 		/*
1620 		 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1621 		 * Unlinking is all that's needed to cancel.
1622 		 */
1623 		if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1624 			list_del_init(&p->scx.dsq_list.node);
1625 
1626 		/*
1627 		 * When dispatching directly from the BPF scheduler to a local
1628 		 * DSQ, the task isn't associated with any DSQ but
1629 		 * @p->scx.holding_cpu may be set under the protection of
1630 		 * %SCX_OPSS_DISPATCHING.
1631 		 */
1632 		if (p->scx.holding_cpu >= 0)
1633 			p->scx.holding_cpu = -1;
1634 
1635 		return;
1636 	}
1637 
1638 	if (!is_local)
1639 		raw_spin_lock(&dsq->lock);
1640 
1641 	/*
1642 	 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1643 	 * change underneath us.
1644 	*/
1645 	if (p->scx.holding_cpu < 0) {
1646 		/* @p must still be on @dsq, dequeue */
1647 		task_unlink_from_dsq(p, dsq);
1648 	} else {
1649 		/*
1650 		 * We're racing against dispatch_to_local_dsq() which already
1651 		 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1652 		 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1653 		 * the race.
1654 		 */
1655 		WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1656 		p->scx.holding_cpu = -1;
1657 	}
1658 	p->scx.dsq = NULL;
1659 
1660 	if (!is_local)
1661 		raw_spin_unlock(&dsq->lock);
1662 }
1663 
1664 /*
1665  * Abbreviated version of dispatch_dequeue() that can be used when both @p's rq
1666  * and dsq are locked.
1667  */
1668 static void dispatch_dequeue_locked(struct task_struct *p,
1669 				    struct scx_dispatch_q *dsq)
1670 {
1671 	lockdep_assert_rq_held(task_rq(p));
1672 	lockdep_assert_held(&dsq->lock);
1673 
1674 	task_unlink_from_dsq(p, dsq);
1675 	p->scx.dsq = NULL;
1676 }
1677 
1678 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch,
1679 						    struct rq *rq, u64 dsq_id,
1680 						    s32 tcpu)
1681 {
1682 	struct scx_dispatch_q *dsq;
1683 
1684 	if (dsq_id == SCX_DSQ_LOCAL)
1685 		return &rq->scx.local_dsq;
1686 
1687 	if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1688 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1689 
1690 		if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1691 			return find_global_dsq(sch, tcpu);
1692 
1693 		return &cpu_rq(cpu)->scx.local_dsq;
1694 	}
1695 
1696 	if (dsq_id == SCX_DSQ_GLOBAL)
1697 		dsq = find_global_dsq(sch, tcpu);
1698 	else
1699 		dsq = find_user_dsq(sch, dsq_id);
1700 
1701 	if (unlikely(!dsq)) {
1702 		scx_error(sch, "non-existent DSQ 0x%llx", dsq_id);
1703 		return find_global_dsq(sch, tcpu);
1704 	}
1705 
1706 	return dsq;
1707 }
1708 
1709 static void mark_direct_dispatch(struct scx_sched *sch,
1710 				 struct task_struct *ddsp_task,
1711 				 struct task_struct *p, u64 dsq_id,
1712 				 u64 enq_flags)
1713 {
1714 	/*
1715 	 * Mark that dispatch already happened from ops.select_cpu() or
1716 	 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1717 	 * which can never match a valid task pointer.
1718 	 */
1719 	__this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1720 
1721 	/* @p must match the task on the enqueue path */
1722 	if (unlikely(p != ddsp_task)) {
1723 		if (IS_ERR(ddsp_task))
1724 			scx_error(sch, "%s[%d] already direct-dispatched",
1725 				  p->comm, p->pid);
1726 		else
1727 			scx_error(sch, "scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1728 				  ddsp_task->comm, ddsp_task->pid,
1729 				  p->comm, p->pid);
1730 		return;
1731 	}
1732 
1733 	WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1734 	WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1735 
1736 	p->scx.ddsp_dsq_id = dsq_id;
1737 	p->scx.ddsp_enq_flags = enq_flags;
1738 }
1739 
1740 /*
1741  * Clear @p direct dispatch state when leaving the scheduler.
1742  *
1743  * Direct dispatch state must be cleared in the following cases:
1744  *  - direct_dispatch(): cleared on the synchronous enqueue path, deferred
1745  *    dispatch keeps the state until consumed
1746  *  - process_ddsp_deferred_locals(): cleared after consuming deferred state,
1747  *  - do_enqueue_task(): cleared on enqueue fallbacks where the dispatch
1748  *    verdict is ignored (local/global/bypass)
1749  *  - dequeue_task_scx(): cleared after dispatch_dequeue(), covering deferred
1750  *    cancellation and holding_cpu races
1751  *  - scx_disable_task(): cleared for queued wakeup tasks, which are excluded by
1752  *    the scx_bypass() loop, so that stale state is not reused by a subsequent
1753  *    scheduler instance
1754  */
1755 static inline void clear_direct_dispatch(struct task_struct *p)
1756 {
1757 	p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1758 	p->scx.ddsp_enq_flags = 0;
1759 }
1760 
1761 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
1762 			    u64 enq_flags)
1763 {
1764 	struct rq *rq = task_rq(p);
1765 	struct scx_dispatch_q *dsq =
1766 		find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, task_cpu(p));
1767 	u64 ddsp_enq_flags;
1768 
1769 	touch_core_sched_dispatch(rq, p);
1770 
1771 	p->scx.ddsp_enq_flags |= enq_flags;
1772 
1773 	/*
1774 	 * We are in the enqueue path with @rq locked and pinned, and thus can't
1775 	 * double lock a remote rq and enqueue to its local DSQ. For
1776 	 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1777 	 * the enqueue so that it's executed when @rq can be unlocked.
1778 	 */
1779 	if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1780 		unsigned long opss;
1781 
1782 		opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1783 
1784 		switch (opss & SCX_OPSS_STATE_MASK) {
1785 		case SCX_OPSS_NONE:
1786 			break;
1787 		case SCX_OPSS_QUEUEING:
1788 			/*
1789 			 * As @p was never passed to the BPF side, _release is
1790 			 * not strictly necessary. Still do it for consistency.
1791 			 */
1792 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1793 			break;
1794 		default:
1795 			WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1796 				  p->comm, p->pid, opss);
1797 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1798 			break;
1799 		}
1800 
1801 		WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1802 		list_add_tail(&p->scx.dsq_list.node,
1803 			      &rq->scx.ddsp_deferred_locals);
1804 		schedule_deferred_locked(rq);
1805 		return;
1806 	}
1807 
1808 	ddsp_enq_flags = p->scx.ddsp_enq_flags;
1809 	clear_direct_dispatch(p);
1810 
1811 	dispatch_enqueue(sch, rq, dsq, p, ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1812 }
1813 
1814 static bool scx_rq_online(struct rq *rq)
1815 {
1816 	/*
1817 	 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1818 	 * the online state as seen from the BPF scheduler. cpu_active() test
1819 	 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1820 	 * stay set until the current scheduling operation is complete even if
1821 	 * we aren't locking @rq.
1822 	 */
1823 	return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1824 }
1825 
1826 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
1827 			    int sticky_cpu)
1828 {
1829 	struct scx_sched *sch = scx_task_sched(p);
1830 	struct task_struct **ddsp_taskp;
1831 	struct scx_dispatch_q *dsq;
1832 	unsigned long qseq;
1833 
1834 	WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1835 
1836 	/* internal movements - rq migration / RESTORE */
1837 	if (sticky_cpu == cpu_of(rq))
1838 		goto local_norefill;
1839 
1840 	/*
1841 	 * Clear persistent TASK_IMMED for fresh enqueues, see dsq_inc_nr().
1842 	 * Note that exiting and migration-disabled tasks that skip
1843 	 * ops.enqueue() below will lose IMMED protection unless
1844 	 * %SCX_OPS_ENQ_EXITING / %SCX_OPS_ENQ_MIGRATION_DISABLED are set.
1845 	 */
1846 	p->scx.flags &= ~SCX_TASK_IMMED;
1847 
1848 	/*
1849 	 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
1850 	 * is offline and are just running the hotplug path. Don't bother the
1851 	 * BPF scheduler.
1852 	 */
1853 	if (!scx_rq_online(rq))
1854 		goto local;
1855 
1856 	if (scx_bypassing(sch, cpu_of(rq))) {
1857 		__scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
1858 		goto bypass;
1859 	}
1860 
1861 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1862 		goto direct;
1863 
1864 	/* see %SCX_OPS_ENQ_EXITING */
1865 	if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) &&
1866 	    unlikely(p->flags & PF_EXITING)) {
1867 		__scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1);
1868 		goto local;
1869 	}
1870 
1871 	/* see %SCX_OPS_ENQ_MIGRATION_DISABLED */
1872 	if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) &&
1873 	    is_migration_disabled(p)) {
1874 		__scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1);
1875 		goto local;
1876 	}
1877 
1878 	if (unlikely(!SCX_HAS_OP(sch, enqueue)))
1879 		goto global;
1880 
1881 	/* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
1882 	qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
1883 
1884 	WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1885 	atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
1886 
1887 	ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
1888 	WARN_ON_ONCE(*ddsp_taskp);
1889 	*ddsp_taskp = p;
1890 
1891 	SCX_CALL_OP_TASK(sch, enqueue, rq, p, enq_flags);
1892 
1893 	*ddsp_taskp = NULL;
1894 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1895 		goto direct;
1896 
1897 	/*
1898 	 * Task is now in BPF scheduler's custody. Set %SCX_TASK_IN_CUSTODY
1899 	 * so ops.dequeue() is called when it leaves custody.
1900 	 */
1901 	p->scx.flags |= SCX_TASK_IN_CUSTODY;
1902 
1903 	/*
1904 	 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
1905 	 * dequeue may be waiting. The store_release matches their load_acquire.
1906 	 */
1907 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
1908 	return;
1909 
1910 direct:
1911 	direct_dispatch(sch, p, enq_flags);
1912 	return;
1913 local_norefill:
1914 	dispatch_enqueue(sch, rq, &rq->scx.local_dsq, p, enq_flags);
1915 	return;
1916 local:
1917 	dsq = &rq->scx.local_dsq;
1918 	goto enqueue;
1919 global:
1920 	dsq = find_global_dsq(sch, task_cpu(p));
1921 	goto enqueue;
1922 bypass:
1923 	dsq = bypass_enq_target_dsq(sch, task_cpu(p));
1924 	goto enqueue;
1925 
1926 enqueue:
1927 	/*
1928 	 * For task-ordering, slice refill must be treated as implying the end
1929 	 * of the current slice. Otherwise, the longer @p stays on the CPU, the
1930 	 * higher priority it becomes from scx_prio_less()'s POV.
1931 	 */
1932 	touch_core_sched(rq, p);
1933 	refill_task_slice_dfl(sch, p);
1934 	clear_direct_dispatch(p);
1935 	dispatch_enqueue(sch, rq, dsq, p, enq_flags);
1936 }
1937 
1938 static bool task_runnable(const struct task_struct *p)
1939 {
1940 	return !list_empty(&p->scx.runnable_node);
1941 }
1942 
1943 static void set_task_runnable(struct rq *rq, struct task_struct *p)
1944 {
1945 	lockdep_assert_rq_held(rq);
1946 
1947 	if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
1948 		p->scx.runnable_at = jiffies;
1949 		p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
1950 	}
1951 
1952 	/*
1953 	 * list_add_tail() must be used. scx_bypass() depends on tasks being
1954 	 * appended to the runnable_list.
1955 	 */
1956 	list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
1957 }
1958 
1959 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
1960 {
1961 	list_del_init(&p->scx.runnable_node);
1962 	if (reset_runnable_at)
1963 		p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
1964 }
1965 
1966 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int core_enq_flags)
1967 {
1968 	struct scx_sched *sch = scx_task_sched(p);
1969 	int sticky_cpu = p->scx.sticky_cpu;
1970 	u64 enq_flags = core_enq_flags | rq->scx.extra_enq_flags;
1971 
1972 	if (enq_flags & ENQUEUE_WAKEUP)
1973 		rq->scx.flags |= SCX_RQ_IN_WAKEUP;
1974 
1975 	/*
1976 	 * Restoring a running task will be immediately followed by
1977 	 * set_next_task_scx() which expects the task to not be on the BPF
1978 	 * scheduler as tasks can only start running through local DSQs. Force
1979 	 * direct-dispatch into the local DSQ by setting the sticky_cpu.
1980 	 */
1981 	if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
1982 		sticky_cpu = cpu_of(rq);
1983 
1984 	if (p->scx.flags & SCX_TASK_QUEUED) {
1985 		WARN_ON_ONCE(!task_runnable(p));
1986 		goto out;
1987 	}
1988 
1989 	set_task_runnable(rq, p);
1990 	p->scx.flags |= SCX_TASK_QUEUED;
1991 	rq->scx.nr_running++;
1992 	add_nr_running(rq, 1);
1993 
1994 	if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p))
1995 		SCX_CALL_OP_TASK(sch, runnable, rq, p, enq_flags);
1996 
1997 	if (enq_flags & SCX_ENQ_WAKEUP)
1998 		touch_core_sched(rq, p);
1999 
2000 	/* Start dl_server if this is the first task being enqueued */
2001 	if (rq->scx.nr_running == 1)
2002 		dl_server_start(&rq->ext_server);
2003 
2004 	do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2005 
2006 	if (sticky_cpu >= 0)
2007 		p->scx.sticky_cpu = -1;
2008 out:
2009 	rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2010 
2011 	if ((enq_flags & SCX_ENQ_CPU_SELECTED) &&
2012 	    unlikely(cpu_of(rq) != p->scx.selected_cpu))
2013 		__scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1);
2014 }
2015 
2016 static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags)
2017 {
2018 	struct scx_sched *sch = scx_task_sched(p);
2019 	unsigned long opss;
2020 
2021 	/* dequeue is always temporary, don't reset runnable_at */
2022 	clr_task_runnable(p, false);
2023 
2024 	/* acquire ensures that we see the preceding updates on QUEUED */
2025 	opss = atomic_long_read_acquire(&p->scx.ops_state);
2026 
2027 	switch (opss & SCX_OPSS_STATE_MASK) {
2028 	case SCX_OPSS_NONE:
2029 		break;
2030 	case SCX_OPSS_QUEUEING:
2031 		/*
2032 		 * QUEUEING is started and finished while holding @p's rq lock.
2033 		 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2034 		 */
2035 		BUG();
2036 	case SCX_OPSS_QUEUED:
2037 		/* A queued task must always be in BPF scheduler's custody */
2038 		WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_IN_CUSTODY));
2039 		if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2040 					    SCX_OPSS_NONE))
2041 			break;
2042 		fallthrough;
2043 	case SCX_OPSS_DISPATCHING:
2044 		/*
2045 		 * If @p is being dispatched from the BPF scheduler to a DSQ,
2046 		 * wait for the transfer to complete so that @p doesn't get
2047 		 * added to its DSQ after dequeueing is complete.
2048 		 *
2049 		 * As we're waiting on DISPATCHING with the rq locked, the
2050 		 * dispatching side shouldn't try to lock the rq while
2051 		 * DISPATCHING is set. See dispatch_to_local_dsq().
2052 		 *
2053 		 * DISPATCHING shouldn't have qseq set and control can reach
2054 		 * here with NONE @opss from the above QUEUED case block.
2055 		 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2056 		 */
2057 		wait_ops_state(p, SCX_OPSS_DISPATCHING);
2058 		BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2059 		break;
2060 	}
2061 
2062 	/*
2063 	 * Call ops.dequeue() if the task is still in BPF custody.
2064 	 *
2065 	 * The code that clears ops_state to %SCX_OPSS_NONE does not always
2066 	 * clear %SCX_TASK_IN_CUSTODY: in dispatch_to_local_dsq(), when
2067 	 * we're moving a task that was in %SCX_OPSS_DISPATCHING to a
2068 	 * remote CPU's local DSQ, we only set ops_state to %SCX_OPSS_NONE
2069 	 * so that a concurrent dequeue can proceed, but we clear
2070 	 * %SCX_TASK_IN_CUSTODY only when we later enqueue or move the
2071 	 * task. So we can see NONE + IN_CUSTODY here and we must handle
2072 	 * it. Similarly, after waiting on %SCX_OPSS_DISPATCHING we see
2073 	 * NONE but the task may still have %SCX_TASK_IN_CUSTODY set until
2074 	 * it is enqueued on the destination.
2075 	 */
2076 	call_task_dequeue(sch, rq, p, deq_flags);
2077 }
2078 
2079 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int core_deq_flags)
2080 {
2081 	struct scx_sched *sch = scx_task_sched(p);
2082 	u64 deq_flags = core_deq_flags;
2083 
2084 	/*
2085 	 * Set %SCX_DEQ_SCHED_CHANGE when the dequeue is due to a property
2086 	 * change (not sleep or core-sched pick).
2087 	 */
2088 	if (!(deq_flags & (DEQUEUE_SLEEP | SCX_DEQ_CORE_SCHED_EXEC)))
2089 		deq_flags |= SCX_DEQ_SCHED_CHANGE;
2090 
2091 	if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2092 		WARN_ON_ONCE(task_runnable(p));
2093 		return true;
2094 	}
2095 
2096 	ops_dequeue(rq, p, deq_flags);
2097 
2098 	/*
2099 	 * A currently running task which is going off @rq first gets dequeued
2100 	 * and then stops running. As we want running <-> stopping transitions
2101 	 * to be contained within runnable <-> quiescent transitions, trigger
2102 	 * ->stopping() early here instead of in put_prev_task_scx().
2103 	 *
2104 	 * @p may go through multiple stopping <-> running transitions between
2105 	 * here and put_prev_task_scx() if task attribute changes occur while
2106 	 * balance_one() leaves @rq unlocked. However, they don't contain any
2107 	 * information meaningful to the BPF scheduler and can be suppressed by
2108 	 * skipping the callbacks if the task is !QUEUED.
2109 	 */
2110 	if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) {
2111 		update_curr_scx(rq);
2112 		SCX_CALL_OP_TASK(sch, stopping, rq, p, false);
2113 	}
2114 
2115 	if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p))
2116 		SCX_CALL_OP_TASK(sch, quiescent, rq, p, deq_flags);
2117 
2118 	if (deq_flags & SCX_DEQ_SLEEP)
2119 		p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2120 	else
2121 		p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2122 
2123 	p->scx.flags &= ~SCX_TASK_QUEUED;
2124 	rq->scx.nr_running--;
2125 	sub_nr_running(rq, 1);
2126 
2127 	dispatch_dequeue(rq, p);
2128 	clear_direct_dispatch(p);
2129 	return true;
2130 }
2131 
2132 static void yield_task_scx(struct rq *rq)
2133 {
2134 	struct task_struct *p = rq->donor;
2135 	struct scx_sched *sch = scx_task_sched(p);
2136 
2137 	if (SCX_HAS_OP(sch, yield))
2138 		SCX_CALL_OP_2TASKS_RET(sch, yield, rq, p, NULL);
2139 	else
2140 		p->scx.slice = 0;
2141 }
2142 
2143 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2144 {
2145 	struct task_struct *from = rq->donor;
2146 	struct scx_sched *sch = scx_task_sched(from);
2147 
2148 	if (SCX_HAS_OP(sch, yield) && sch == scx_task_sched(to))
2149 		return SCX_CALL_OP_2TASKS_RET(sch, yield, rq, from, to);
2150 	else
2151 		return false;
2152 }
2153 
2154 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p, int wake_flags)
2155 {
2156 	/*
2157 	 * Preemption between SCX tasks is implemented by resetting the victim
2158 	 * task's slice to 0 and triggering reschedule on the target CPU.
2159 	 * Nothing to do.
2160 	 */
2161 	if (p->sched_class == &ext_sched_class)
2162 		return;
2163 
2164 	/*
2165 	 * Getting preempted by a higher-priority class. Reenqueue IMMED tasks.
2166 	 * This captures all preemption cases including:
2167 	 *
2168 	 * - A SCX task is currently running.
2169 	 *
2170 	 * - @rq is waking from idle due to a SCX task waking to it.
2171 	 *
2172 	 * - A higher-priority wakes up while SCX dispatch is in progress.
2173 	 */
2174 	if (rq->scx.nr_immed)
2175 		schedule_reenq_local(rq, 0);
2176 }
2177 
2178 static void move_local_task_to_local_dsq(struct scx_sched *sch,
2179 					 struct task_struct *p, u64 enq_flags,
2180 					 struct scx_dispatch_q *src_dsq,
2181 					 struct rq *dst_rq)
2182 {
2183 	struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2184 
2185 	/* @dsq is locked and @p is on @dst_rq */
2186 	lockdep_assert_held(&src_dsq->lock);
2187 	lockdep_assert_rq_held(dst_rq);
2188 
2189 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2190 
2191 	if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2192 		list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2193 	else
2194 		list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2195 
2196 	dsq_inc_nr(dst_dsq, p, enq_flags);
2197 	p->scx.dsq = dst_dsq;
2198 
2199 	local_dsq_post_enq(sch, dst_dsq, p, enq_flags);
2200 }
2201 
2202 /**
2203  * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2204  * @p: task to move
2205  * @enq_flags: %SCX_ENQ_*
2206  * @src_rq: rq to move the task from, locked on entry, released on return
2207  * @dst_rq: rq to move the task into, locked on return
2208  *
2209  * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2210  */
2211 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2212 					  struct rq *src_rq, struct rq *dst_rq)
2213 {
2214 	lockdep_assert_rq_held(src_rq);
2215 
2216 	/*
2217 	 * Set sticky_cpu before deactivate_task() to properly mark the
2218 	 * beginning of an SCX-internal migration.
2219 	 */
2220 	p->scx.sticky_cpu = cpu_of(dst_rq);
2221 	deactivate_task(src_rq, p, 0);
2222 	set_task_cpu(p, cpu_of(dst_rq));
2223 
2224 	raw_spin_rq_unlock(src_rq);
2225 	raw_spin_rq_lock(dst_rq);
2226 
2227 	/*
2228 	 * We want to pass scx-specific enq_flags but activate_task() will
2229 	 * truncate the upper 32 bit. As we own @rq, we can pass them through
2230 	 * @rq->scx.extra_enq_flags instead.
2231 	 */
2232 	WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2233 	WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2234 	dst_rq->scx.extra_enq_flags = enq_flags;
2235 	activate_task(dst_rq, p, 0);
2236 	dst_rq->scx.extra_enq_flags = 0;
2237 }
2238 
2239 /*
2240  * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2241  * differences:
2242  *
2243  * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2244  *   task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2245  *   this CPU?".
2246  *
2247  *   While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2248  *   must be allowed to finish on the CPU that it's currently on regardless of
2249  *   the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2250  *   BPF scheduler shouldn't attempt to migrate a task which has migration
2251  *   disabled.
2252  *
2253  * - The BPF scheduler is bypassed while the rq is offline and we can always say
2254  *   no to the BPF scheduler initiated migrations while offline.
2255  *
2256  * The caller must ensure that @p and @rq are on different CPUs.
2257  */
2258 static bool task_can_run_on_remote_rq(struct scx_sched *sch,
2259 				      struct task_struct *p, struct rq *rq,
2260 				      bool enforce)
2261 {
2262 	s32 cpu = cpu_of(rq);
2263 
2264 	WARN_ON_ONCE(task_cpu(p) == cpu);
2265 
2266 	/*
2267 	 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
2268 	 * the pinned CPU in migrate_disable_switch() while @p is being switched
2269 	 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
2270 	 * updated and thus another CPU may see @p on a DSQ inbetween leading to
2271 	 * @p passing the below task_allowed_on_cpu() check while migration is
2272 	 * disabled.
2273 	 *
2274 	 * Test the migration disabled state first as the race window is narrow
2275 	 * and the BPF scheduler failing to check migration disabled state can
2276 	 * easily be masked if task_allowed_on_cpu() is done first.
2277 	 */
2278 	if (unlikely(is_migration_disabled(p))) {
2279 		if (enforce)
2280 			scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
2281 				  p->comm, p->pid, task_cpu(p), cpu);
2282 		return false;
2283 	}
2284 
2285 	/*
2286 	 * We don't require the BPF scheduler to avoid dispatching to offline
2287 	 * CPUs mostly for convenience but also because CPUs can go offline
2288 	 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
2289 	 * picked CPU is outside the allowed mask.
2290 	 */
2291 	if (!task_allowed_on_cpu(p, cpu)) {
2292 		if (enforce)
2293 			scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
2294 				  cpu, p->comm, p->pid);
2295 		return false;
2296 	}
2297 
2298 	if (!scx_rq_online(rq)) {
2299 		if (enforce)
2300 			__scx_add_event(sch, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1);
2301 		return false;
2302 	}
2303 
2304 	return true;
2305 }
2306 
2307 /**
2308  * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2309  * @p: target task
2310  * @dsq: locked DSQ @p is currently on
2311  * @src_rq: rq @p is currently on, stable with @dsq locked
2312  *
2313  * Called with @dsq locked but no rq's locked. We want to move @p to a different
2314  * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2315  * required when transferring into a local DSQ. Even when transferring into a
2316  * non-local DSQ, it's better to use the same mechanism to protect against
2317  * dequeues and maintain the invariant that @p->scx.dsq can only change while
2318  * @src_rq is locked, which e.g. scx_dump_task() depends on.
2319  *
2320  * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2321  * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2322  * this may race with dequeue, which can't drop the rq lock or fail, do a little
2323  * dancing from our side.
2324  *
2325  * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2326  * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2327  * would be cleared to -1. While other cpus may have updated it to different
2328  * values afterwards, as this operation can't be preempted or recurse, the
2329  * holding_cpu can never become this CPU again before we're done. Thus, we can
2330  * tell whether we lost to dequeue by testing whether the holding_cpu still
2331  * points to this CPU. See dispatch_dequeue() for the counterpart.
2332  *
2333  * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2334  * still valid. %false if lost to dequeue.
2335  */
2336 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2337 				       struct scx_dispatch_q *dsq,
2338 				       struct rq *src_rq)
2339 {
2340 	s32 cpu = raw_smp_processor_id();
2341 
2342 	lockdep_assert_held(&dsq->lock);
2343 
2344 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2345 	task_unlink_from_dsq(p, dsq);
2346 	p->scx.holding_cpu = cpu;
2347 
2348 	raw_spin_unlock(&dsq->lock);
2349 	raw_spin_rq_lock(src_rq);
2350 
2351 	/* task_rq couldn't have changed if we're still the holding cpu */
2352 	return likely(p->scx.holding_cpu == cpu) &&
2353 		!WARN_ON_ONCE(src_rq != task_rq(p));
2354 }
2355 
2356 static bool consume_remote_task(struct rq *this_rq,
2357 				struct task_struct *p, u64 enq_flags,
2358 				struct scx_dispatch_q *dsq, struct rq *src_rq)
2359 {
2360 	raw_spin_rq_unlock(this_rq);
2361 
2362 	if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2363 		move_remote_task_to_local_dsq(p, enq_flags, src_rq, this_rq);
2364 		return true;
2365 	} else {
2366 		raw_spin_rq_unlock(src_rq);
2367 		raw_spin_rq_lock(this_rq);
2368 		return false;
2369 	}
2370 }
2371 
2372 /**
2373  * move_task_between_dsqs() - Move a task from one DSQ to another
2374  * @sch: scx_sched being operated on
2375  * @p: target task
2376  * @enq_flags: %SCX_ENQ_*
2377  * @src_dsq: DSQ @p is currently on, must not be a local DSQ
2378  * @dst_dsq: DSQ @p is being moved to, can be any DSQ
2379  *
2380  * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
2381  * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
2382  * will change. As @p's task_rq is locked, this function doesn't need to use the
2383  * holding_cpu mechanism.
2384  *
2385  * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
2386  * return value, is locked.
2387  */
2388 static struct rq *move_task_between_dsqs(struct scx_sched *sch,
2389 					 struct task_struct *p, u64 enq_flags,
2390 					 struct scx_dispatch_q *src_dsq,
2391 					 struct scx_dispatch_q *dst_dsq)
2392 {
2393 	struct rq *src_rq = task_rq(p), *dst_rq;
2394 
2395 	BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
2396 	lockdep_assert_held(&src_dsq->lock);
2397 	lockdep_assert_rq_held(src_rq);
2398 
2399 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
2400 		dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2401 		if (src_rq != dst_rq &&
2402 		    unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
2403 			dst_dsq = find_global_dsq(sch, task_cpu(p));
2404 			dst_rq = src_rq;
2405 			enq_flags |= SCX_ENQ_GDSQ_FALLBACK;
2406 		}
2407 	} else {
2408 		/* no need to migrate if destination is a non-local DSQ */
2409 		dst_rq = src_rq;
2410 	}
2411 
2412 	/*
2413 	 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
2414 	 * CPU, @p will be migrated.
2415 	 */
2416 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
2417 		/* @p is going from a non-local DSQ to a local DSQ */
2418 		if (src_rq == dst_rq) {
2419 			task_unlink_from_dsq(p, src_dsq);
2420 			move_local_task_to_local_dsq(sch, p, enq_flags,
2421 						     src_dsq, dst_rq);
2422 			raw_spin_unlock(&src_dsq->lock);
2423 		} else {
2424 			raw_spin_unlock(&src_dsq->lock);
2425 			move_remote_task_to_local_dsq(p, enq_flags,
2426 						      src_rq, dst_rq);
2427 		}
2428 	} else {
2429 		/*
2430 		 * @p is going from a non-local DSQ to a non-local DSQ. As
2431 		 * $src_dsq is already locked, do an abbreviated dequeue.
2432 		 */
2433 		dispatch_dequeue_locked(p, src_dsq);
2434 		raw_spin_unlock(&src_dsq->lock);
2435 
2436 		dispatch_enqueue(sch, dst_rq, dst_dsq, p, enq_flags);
2437 	}
2438 
2439 	return dst_rq;
2440 }
2441 
2442 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq,
2443 			       struct scx_dispatch_q *dsq, u64 enq_flags)
2444 {
2445 	struct task_struct *p;
2446 retry:
2447 	/*
2448 	 * The caller can't expect to successfully consume a task if the task's
2449 	 * addition to @dsq isn't guaranteed to be visible somehow. Test
2450 	 * @dsq->list without locking and skip if it seems empty.
2451 	 */
2452 	if (list_empty(&dsq->list))
2453 		return false;
2454 
2455 	raw_spin_lock(&dsq->lock);
2456 
2457 	nldsq_for_each_task(p, dsq) {
2458 		struct rq *task_rq = task_rq(p);
2459 
2460 		/*
2461 		 * This loop can lead to multiple lockup scenarios, e.g. the BPF
2462 		 * scheduler can put an enormous number of affinitized tasks into
2463 		 * a contended DSQ, or the outer retry loop can repeatedly race
2464 		 * against scx_bypass() dequeueing tasks from @dsq trying to put
2465 		 * the system into the bypass mode. This can easily live-lock the
2466 		 * machine. If aborting, exit from all non-bypass DSQs.
2467 		 */
2468 		if (unlikely(READ_ONCE(sch->aborting)) && dsq->id != SCX_DSQ_BYPASS)
2469 			break;
2470 
2471 		if (rq == task_rq) {
2472 			task_unlink_from_dsq(p, dsq);
2473 			move_local_task_to_local_dsq(sch, p, enq_flags, dsq, rq);
2474 			raw_spin_unlock(&dsq->lock);
2475 			return true;
2476 		}
2477 
2478 		if (task_can_run_on_remote_rq(sch, p, rq, false)) {
2479 			if (likely(consume_remote_task(rq, p, enq_flags, dsq, task_rq)))
2480 				return true;
2481 			goto retry;
2482 		}
2483 	}
2484 
2485 	raw_spin_unlock(&dsq->lock);
2486 	return false;
2487 }
2488 
2489 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq)
2490 {
2491 	int node = cpu_to_node(cpu_of(rq));
2492 
2493 	return consume_dispatch_q(sch, rq, &sch->pnode[node]->global_dsq, 0);
2494 }
2495 
2496 /**
2497  * dispatch_to_local_dsq - Dispatch a task to a local dsq
2498  * @sch: scx_sched being operated on
2499  * @rq: current rq which is locked
2500  * @dst_dsq: destination DSQ
2501  * @p: task to dispatch
2502  * @enq_flags: %SCX_ENQ_*
2503  *
2504  * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2505  * DSQ. This function performs all the synchronization dancing needed because
2506  * local DSQs are protected with rq locks.
2507  *
2508  * The caller must have exclusive ownership of @p (e.g. through
2509  * %SCX_OPSS_DISPATCHING).
2510  */
2511 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
2512 				  struct scx_dispatch_q *dst_dsq,
2513 				  struct task_struct *p, u64 enq_flags)
2514 {
2515 	struct rq *src_rq = task_rq(p);
2516 	struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2517 	struct rq *locked_rq = rq;
2518 
2519 	/*
2520 	 * We're synchronized against dequeue through DISPATCHING. As @p can't
2521 	 * be dequeued, its task_rq and cpus_allowed are stable too.
2522 	 *
2523 	 * If dispatching to @rq that @p is already on, no lock dancing needed.
2524 	 */
2525 	if (rq == src_rq && rq == dst_rq) {
2526 		dispatch_enqueue(sch, rq, dst_dsq, p,
2527 				 enq_flags | SCX_ENQ_CLEAR_OPSS);
2528 		return;
2529 	}
2530 
2531 	if (src_rq != dst_rq &&
2532 	    unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
2533 		dispatch_enqueue(sch, rq, find_global_dsq(sch, task_cpu(p)), p,
2534 				 enq_flags | SCX_ENQ_CLEAR_OPSS | SCX_ENQ_GDSQ_FALLBACK);
2535 		return;
2536 	}
2537 
2538 	/*
2539 	 * @p is on a possibly remote @src_rq which we need to lock to move the
2540 	 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2541 	 * on DISPATCHING, so we can't grab @src_rq lock while holding
2542 	 * DISPATCHING.
2543 	 *
2544 	 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2545 	 * we're moving from a DSQ and use the same mechanism - mark the task
2546 	 * under transfer with holding_cpu, release DISPATCHING and then follow
2547 	 * the same protocol. See unlink_dsq_and_lock_src_rq().
2548 	 */
2549 	p->scx.holding_cpu = raw_smp_processor_id();
2550 
2551 	/* store_release ensures that dequeue sees the above */
2552 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2553 
2554 	/* switch to @src_rq lock */
2555 	if (locked_rq != src_rq) {
2556 		raw_spin_rq_unlock(locked_rq);
2557 		locked_rq = src_rq;
2558 		raw_spin_rq_lock(src_rq);
2559 	}
2560 
2561 	/* task_rq couldn't have changed if we're still the holding cpu */
2562 	if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2563 	    !WARN_ON_ONCE(src_rq != task_rq(p))) {
2564 		/*
2565 		 * If @p is staying on the same rq, there's no need to go
2566 		 * through the full deactivate/activate cycle. Optimize by
2567 		 * abbreviating move_remote_task_to_local_dsq().
2568 		 */
2569 		if (src_rq == dst_rq) {
2570 			p->scx.holding_cpu = -1;
2571 			dispatch_enqueue(sch, dst_rq, &dst_rq->scx.local_dsq, p,
2572 					 enq_flags);
2573 		} else {
2574 			move_remote_task_to_local_dsq(p, enq_flags,
2575 						      src_rq, dst_rq);
2576 			/* task has been moved to dst_rq, which is now locked */
2577 			locked_rq = dst_rq;
2578 		}
2579 
2580 		/* if the destination CPU is idle, wake it up */
2581 		if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2582 			resched_curr(dst_rq);
2583 	}
2584 
2585 	/* switch back to @rq lock */
2586 	if (locked_rq != rq) {
2587 		raw_spin_rq_unlock(locked_rq);
2588 		raw_spin_rq_lock(rq);
2589 	}
2590 }
2591 
2592 /**
2593  * finish_dispatch - Asynchronously finish dispatching a task
2594  * @rq: current rq which is locked
2595  * @p: task to finish dispatching
2596  * @qseq_at_dispatch: qseq when @p started getting dispatched
2597  * @dsq_id: destination DSQ ID
2598  * @enq_flags: %SCX_ENQ_*
2599  *
2600  * Dispatching to local DSQs may need to wait for queueing to complete or
2601  * require rq lock dancing. As we don't wanna do either while inside
2602  * ops.dispatch() to avoid locking order inversion, we split dispatching into
2603  * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
2604  * task and its qseq. Once ops.dispatch() returns, this function is called to
2605  * finish up.
2606  *
2607  * There is no guarantee that @p is still valid for dispatching or even that it
2608  * was valid in the first place. Make sure that the task is still owned by the
2609  * BPF scheduler and claim the ownership before dispatching.
2610  */
2611 static void finish_dispatch(struct scx_sched *sch, struct rq *rq,
2612 			    struct task_struct *p,
2613 			    unsigned long qseq_at_dispatch,
2614 			    u64 dsq_id, u64 enq_flags)
2615 {
2616 	struct scx_dispatch_q *dsq;
2617 	unsigned long opss;
2618 
2619 	touch_core_sched_dispatch(rq, p);
2620 retry:
2621 	/*
2622 	 * No need for _acquire here. @p is accessed only after a successful
2623 	 * try_cmpxchg to DISPATCHING.
2624 	 */
2625 	opss = atomic_long_read(&p->scx.ops_state);
2626 
2627 	switch (opss & SCX_OPSS_STATE_MASK) {
2628 	case SCX_OPSS_DISPATCHING:
2629 	case SCX_OPSS_NONE:
2630 		/* someone else already got to it */
2631 		return;
2632 	case SCX_OPSS_QUEUED:
2633 		/*
2634 		 * If qseq doesn't match, @p has gone through at least one
2635 		 * dispatch/dequeue and re-enqueue cycle between
2636 		 * scx_bpf_dsq_insert() and here and we have no claim on it.
2637 		 */
2638 		if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2639 			return;
2640 
2641 		/* see SCX_EV_INSERT_NOT_OWNED definition */
2642 		if (unlikely(!scx_task_on_sched(sch, p))) {
2643 			__scx_add_event(sch, SCX_EV_INSERT_NOT_OWNED, 1);
2644 			return;
2645 		}
2646 
2647 		/*
2648 		 * While we know @p is accessible, we don't yet have a claim on
2649 		 * it - the BPF scheduler is allowed to dispatch tasks
2650 		 * spuriously and there can be a racing dequeue attempt. Let's
2651 		 * claim @p by atomically transitioning it from QUEUED to
2652 		 * DISPATCHING.
2653 		 */
2654 		if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2655 						   SCX_OPSS_DISPATCHING)))
2656 			break;
2657 		goto retry;
2658 	case SCX_OPSS_QUEUEING:
2659 		/*
2660 		 * do_enqueue_task() is in the process of transferring the task
2661 		 * to the BPF scheduler while holding @p's rq lock. As we aren't
2662 		 * holding any kernel or BPF resource that the enqueue path may
2663 		 * depend upon, it's safe to wait.
2664 		 */
2665 		wait_ops_state(p, opss);
2666 		goto retry;
2667 	}
2668 
2669 	BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2670 
2671 	dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, task_cpu(p));
2672 
2673 	if (dsq->id == SCX_DSQ_LOCAL)
2674 		dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
2675 	else
2676 		dispatch_enqueue(sch, rq, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2677 }
2678 
2679 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
2680 {
2681 	struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
2682 	u32 u;
2683 
2684 	for (u = 0; u < dspc->cursor; u++) {
2685 		struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2686 
2687 		finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id,
2688 				ent->enq_flags);
2689 	}
2690 
2691 	dspc->nr_tasks += dspc->cursor;
2692 	dspc->cursor = 0;
2693 }
2694 
2695 static inline void maybe_queue_balance_callback(struct rq *rq)
2696 {
2697 	lockdep_assert_rq_held(rq);
2698 
2699 	if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING))
2700 		return;
2701 
2702 	queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
2703 				deferred_bal_cb_workfn);
2704 
2705 	rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING;
2706 }
2707 
2708 /*
2709  * One user of this function is scx_bpf_dispatch() which can be called
2710  * recursively as sub-sched dispatches nest. Always inline to reduce stack usage
2711  * from the call frame.
2712  */
2713 static __always_inline bool
2714 scx_dispatch_sched(struct scx_sched *sch, struct rq *rq,
2715 		   struct task_struct *prev, bool nested)
2716 {
2717 	struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
2718 	int nr_loops = SCX_DSP_MAX_LOOPS;
2719 	s32 cpu = cpu_of(rq);
2720 	bool prev_on_sch = (prev->sched_class == &ext_sched_class) &&
2721 		scx_task_on_sched(sch, prev);
2722 
2723 	if (consume_global_dsq(sch, rq))
2724 		return true;
2725 
2726 	if (bypass_dsp_enabled(sch)) {
2727 		/* if @sch is bypassing, only the bypass DSQs are active */
2728 		if (scx_bypassing(sch, cpu))
2729 			return consume_dispatch_q(sch, rq, bypass_dsq(sch, cpu), 0);
2730 
2731 #ifdef CONFIG_EXT_SUB_SCHED
2732 		/*
2733 		 * If @sch isn't bypassing but its children are, @sch is
2734 		 * responsible for making forward progress for both its own
2735 		 * tasks that aren't bypassing and the bypassing descendants'
2736 		 * tasks. The following implements a simple built-in behavior -
2737 		 * let each CPU try to run the bypass DSQ every Nth time.
2738 		 *
2739 		 * Later, if necessary, we can add an ops flag to suppress the
2740 		 * auto-consumption and a kfunc to consume the bypass DSQ and,
2741 		 * so that the BPF scheduler can fully control scheduling of
2742 		 * bypassed tasks.
2743 		 */
2744 		struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
2745 
2746 		if (!(pcpu->bypass_host_seq++ % SCX_BYPASS_HOST_NTH) &&
2747 		    consume_dispatch_q(sch, rq, bypass_dsq(sch, cpu), 0)) {
2748 			__scx_add_event(sch, SCX_EV_SUB_BYPASS_DISPATCH, 1);
2749 			return true;
2750 		}
2751 #endif	/* CONFIG_EXT_SUB_SCHED */
2752 	}
2753 
2754 	if (unlikely(!SCX_HAS_OP(sch, dispatch)) || !scx_rq_online(rq))
2755 		return false;
2756 
2757 	dspc->rq = rq;
2758 
2759 	/*
2760 	 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2761 	 * the local DSQ might still end up empty after a successful
2762 	 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2763 	 * produced some tasks, retry. The BPF scheduler may depend on this
2764 	 * looping behavior to simplify its implementation.
2765 	 */
2766 	do {
2767 		dspc->nr_tasks = 0;
2768 
2769 		if (nested) {
2770 			SCX_CALL_OP(sch, dispatch, rq, cpu, prev_on_sch ? prev : NULL);
2771 		} else {
2772 			/* stash @prev so that nested invocations can access it */
2773 			rq->scx.sub_dispatch_prev = prev;
2774 			SCX_CALL_OP(sch, dispatch, rq, cpu, prev_on_sch ? prev : NULL);
2775 			rq->scx.sub_dispatch_prev = NULL;
2776 		}
2777 
2778 		flush_dispatch_buf(sch, rq);
2779 
2780 		if ((prev->scx.flags & SCX_TASK_QUEUED) && prev->scx.slice) {
2781 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2782 			return true;
2783 		}
2784 		if (rq->scx.local_dsq.nr)
2785 			return true;
2786 		if (consume_global_dsq(sch, rq))
2787 			return true;
2788 
2789 		/*
2790 		 * ops.dispatch() can trap us in this loop by repeatedly
2791 		 * dispatching ineligible tasks. Break out once in a while to
2792 		 * allow the watchdog to run. As IRQ can't be enabled in
2793 		 * balance(), we want to complete this scheduling cycle and then
2794 		 * start a new one. IOW, we want to call resched_curr() on the
2795 		 * next, most likely idle, task, not the current one. Use
2796 		 * __scx_bpf_kick_cpu() for deferred kicking.
2797 		 */
2798 		if (unlikely(!--nr_loops)) {
2799 			scx_kick_cpu(sch, cpu, 0);
2800 			break;
2801 		}
2802 	} while (dspc->nr_tasks);
2803 
2804 	/*
2805 	 * Prevent the CPU from going idle while bypassed descendants have tasks
2806 	 * queued. Without this fallback, bypassed tasks could stall if the host
2807 	 * scheduler's ops.dispatch() doesn't yield any tasks.
2808 	 */
2809 	if (bypass_dsp_enabled(sch))
2810 		return consume_dispatch_q(sch, rq, bypass_dsq(sch, cpu), 0);
2811 
2812 	return false;
2813 }
2814 
2815 static int balance_one(struct rq *rq, struct task_struct *prev)
2816 {
2817 	struct scx_sched *sch = scx_root;
2818 	s32 cpu = cpu_of(rq);
2819 
2820 	lockdep_assert_rq_held(rq);
2821 	rq->scx.flags |= SCX_RQ_IN_BALANCE;
2822 	rq->scx.flags &= ~SCX_RQ_BAL_KEEP;
2823 
2824 	if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) &&
2825 	    unlikely(rq->scx.cpu_released)) {
2826 		/*
2827 		 * If the previous sched_class for the current CPU was not SCX,
2828 		 * notify the BPF scheduler that it again has control of the
2829 		 * core. This callback complements ->cpu_release(), which is
2830 		 * emitted in switch_class().
2831 		 */
2832 		if (SCX_HAS_OP(sch, cpu_acquire))
2833 			SCX_CALL_OP(sch, cpu_acquire, rq, cpu, NULL);
2834 		rq->scx.cpu_released = false;
2835 	}
2836 
2837 	if (prev->sched_class == &ext_sched_class) {
2838 		update_curr_scx(rq);
2839 
2840 		/*
2841 		 * If @prev is runnable & has slice left, it has priority and
2842 		 * fetching more just increases latency for the fetched tasks.
2843 		 * Tell pick_task_scx() to keep running @prev. If the BPF
2844 		 * scheduler wants to handle this explicitly, it should
2845 		 * implement ->cpu_release().
2846 		 *
2847 		 * See scx_disable_workfn() for the explanation on the bypassing
2848 		 * test.
2849 		 */
2850 		if ((prev->scx.flags & SCX_TASK_QUEUED) && prev->scx.slice &&
2851 		    !scx_bypassing(sch, cpu)) {
2852 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2853 			goto has_tasks;
2854 		}
2855 	}
2856 
2857 	/* if there already are tasks to run, nothing to do */
2858 	if (rq->scx.local_dsq.nr)
2859 		goto has_tasks;
2860 
2861 	if (scx_dispatch_sched(sch, rq, prev, false))
2862 		goto has_tasks;
2863 
2864 	/*
2865 	 * Didn't find another task to run. Keep running @prev unless
2866 	 * %SCX_OPS_ENQ_LAST is in effect.
2867 	 */
2868 	if ((prev->scx.flags & SCX_TASK_QUEUED) &&
2869 	    (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_bypassing(sch, cpu))) {
2870 		rq->scx.flags |= SCX_RQ_BAL_KEEP;
2871 		__scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1);
2872 		goto has_tasks;
2873 	}
2874 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2875 	return false;
2876 
2877 has_tasks:
2878 	/*
2879 	 * @rq may have extra IMMED tasks without reenq scheduled:
2880 	 *
2881 	 * - rq_is_open() can't reliably tell when and how slice is going to be
2882 	 *   modified for $curr and allows IMMED tasks to be queued while
2883 	 *   dispatch is in progress.
2884 	 *
2885 	 * - A non-IMMED HEAD task can get queued in front of an IMMED task
2886 	 *   between the IMMED queueing and the subsequent scheduling event.
2887 	 */
2888 	if (unlikely(rq->scx.local_dsq.nr > 1 && rq->scx.nr_immed))
2889 		schedule_reenq_local(rq, 0);
2890 
2891 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2892 	return true;
2893 }
2894 
2895 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2896 {
2897 	struct scx_sched *sch = scx_task_sched(p);
2898 
2899 	if (p->scx.flags & SCX_TASK_QUEUED) {
2900 		/*
2901 		 * Core-sched might decide to execute @p before it is
2902 		 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2903 		 */
2904 		ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC);
2905 		dispatch_dequeue(rq, p);
2906 	}
2907 
2908 	p->se.exec_start = rq_clock_task(rq);
2909 
2910 	/* see dequeue_task_scx() on why we skip when !QUEUED */
2911 	if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED))
2912 		SCX_CALL_OP_TASK(sch, running, rq, p);
2913 
2914 	clr_task_runnable(p, true);
2915 
2916 	/*
2917 	 * @p is getting newly scheduled or got kicked after someone updated its
2918 	 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2919 	 */
2920 	if ((p->scx.slice == SCX_SLICE_INF) !=
2921 	    (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2922 		if (p->scx.slice == SCX_SLICE_INF)
2923 			rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2924 		else
2925 			rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2926 
2927 		sched_update_tick_dependency(rq);
2928 
2929 		/*
2930 		 * For now, let's refresh the load_avgs just when transitioning
2931 		 * in and out of nohz. In the future, we might want to add a
2932 		 * mechanism which calls the following periodically on
2933 		 * tick-stopped CPUs.
2934 		 */
2935 		update_other_load_avgs(rq);
2936 	}
2937 }
2938 
2939 static enum scx_cpu_preempt_reason
2940 preempt_reason_from_class(const struct sched_class *class)
2941 {
2942 	if (class == &stop_sched_class)
2943 		return SCX_CPU_PREEMPT_STOP;
2944 	if (class == &dl_sched_class)
2945 		return SCX_CPU_PREEMPT_DL;
2946 	if (class == &rt_sched_class)
2947 		return SCX_CPU_PREEMPT_RT;
2948 	return SCX_CPU_PREEMPT_UNKNOWN;
2949 }
2950 
2951 static void switch_class(struct rq *rq, struct task_struct *next)
2952 {
2953 	struct scx_sched *sch = scx_root;
2954 	const struct sched_class *next_class = next->sched_class;
2955 
2956 	if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT))
2957 		return;
2958 
2959 	/*
2960 	 * The callback is conceptually meant to convey that the CPU is no
2961 	 * longer under the control of SCX. Therefore, don't invoke the callback
2962 	 * if the next class is below SCX (in which case the BPF scheduler has
2963 	 * actively decided not to schedule any tasks on the CPU).
2964 	 */
2965 	if (sched_class_above(&ext_sched_class, next_class))
2966 		return;
2967 
2968 	/*
2969 	 * At this point we know that SCX was preempted by a higher priority
2970 	 * sched_class, so invoke the ->cpu_release() callback if we have not
2971 	 * done so already. We only send the callback once between SCX being
2972 	 * preempted, and it regaining control of the CPU.
2973 	 *
2974 	 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
2975 	 *  next time that balance_one() is invoked.
2976 	 */
2977 	if (!rq->scx.cpu_released) {
2978 		if (SCX_HAS_OP(sch, cpu_release)) {
2979 			struct scx_cpu_release_args args = {
2980 				.reason = preempt_reason_from_class(next_class),
2981 				.task = next,
2982 			};
2983 
2984 			SCX_CALL_OP(sch, cpu_release, rq, cpu_of(rq), &args);
2985 		}
2986 		rq->scx.cpu_released = true;
2987 	}
2988 }
2989 
2990 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
2991 			      struct task_struct *next)
2992 {
2993 	struct scx_sched *sch = scx_task_sched(p);
2994 
2995 	/* see kick_sync_wait_bal_cb() */
2996 	smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
2997 
2998 	update_curr_scx(rq);
2999 
3000 	/* see dequeue_task_scx() on why we skip when !QUEUED */
3001 	if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED))
3002 		SCX_CALL_OP_TASK(sch, stopping, rq, p, true);
3003 
3004 	if (p->scx.flags & SCX_TASK_QUEUED) {
3005 		set_task_runnable(rq, p);
3006 
3007 		/*
3008 		 * If @p has slice left and is being put, @p is getting
3009 		 * preempted by a higher priority scheduler class or core-sched
3010 		 * forcing a different task. Leave it at the head of the local
3011 		 * DSQ unless it was an IMMED task. IMMED tasks should not
3012 		 * linger on a busy CPU, reenqueue them to the BPF scheduler.
3013 		 */
3014 		if (p->scx.slice && !scx_bypassing(sch, cpu_of(rq))) {
3015 			if (p->scx.flags & SCX_TASK_IMMED) {
3016 				p->scx.flags |= SCX_TASK_REENQ_PREEMPTED;
3017 				do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
3018 				p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
3019 			} else {
3020 				dispatch_enqueue(sch, rq, &rq->scx.local_dsq, p, SCX_ENQ_HEAD);
3021 			}
3022 			goto switch_class;
3023 		}
3024 
3025 		/*
3026 		 * If @p is runnable but we're about to enter a lower
3027 		 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
3028 		 * ops.enqueue() that @p is the only one available for this cpu,
3029 		 * which should trigger an explicit follow-up scheduling event.
3030 		 */
3031 		if (next && sched_class_above(&ext_sched_class, next->sched_class)) {
3032 			WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST));
3033 			do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
3034 		} else {
3035 			do_enqueue_task(rq, p, 0, -1);
3036 		}
3037 	}
3038 
3039 switch_class:
3040 	if (next && next->sched_class != &ext_sched_class)
3041 		switch_class(rq, next);
3042 }
3043 
3044 static void kick_sync_wait_bal_cb(struct rq *rq)
3045 {
3046 	struct scx_kick_syncs __rcu *ks = __this_cpu_read(scx_kick_syncs);
3047 	unsigned long *ksyncs = rcu_dereference_sched(ks)->syncs;
3048 	bool waited;
3049 	s32 cpu;
3050 
3051 	/*
3052 	 * Drop rq lock and enable IRQs while waiting. IRQs must be enabled
3053 	 * — a target CPU may be waiting for us to process an IPI (e.g. TLB
3054 	 * flush) while we wait for its kick_sync to advance.
3055 	 *
3056 	 * Also, keep advancing our own kick_sync so that new kick_sync waits
3057 	 * targeting us, which can start after we drop the lock, cannot form
3058 	 * cyclic dependencies.
3059 	 */
3060 retry:
3061 	waited = false;
3062 	for_each_cpu(cpu, rq->scx.cpus_to_sync) {
3063 		/*
3064 		 * smp_load_acquire() pairs with smp_store_release() on
3065 		 * kick_sync updates on the target CPUs.
3066 		 */
3067 		if (cpu == cpu_of(rq) ||
3068 		    smp_load_acquire(&cpu_rq(cpu)->scx.kick_sync) != ksyncs[cpu]) {
3069 			cpumask_clear_cpu(cpu, rq->scx.cpus_to_sync);
3070 			continue;
3071 		}
3072 
3073 		raw_spin_rq_unlock_irq(rq);
3074 		while (READ_ONCE(cpu_rq(cpu)->scx.kick_sync) == ksyncs[cpu]) {
3075 			smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
3076 			cpu_relax();
3077 		}
3078 		raw_spin_rq_lock_irq(rq);
3079 		waited = true;
3080 	}
3081 
3082 	if (waited)
3083 		goto retry;
3084 }
3085 
3086 static struct task_struct *first_local_task(struct rq *rq)
3087 {
3088 	return list_first_entry_or_null(&rq->scx.local_dsq.list,
3089 					struct task_struct, scx.dsq_list.node);
3090 }
3091 
3092 static struct task_struct *
3093 do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
3094 {
3095 	struct task_struct *prev = rq->curr;
3096 	bool keep_prev;
3097 	struct task_struct *p;
3098 
3099 	/* see kick_sync_wait_bal_cb() */
3100 	smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
3101 
3102 	rq_modified_begin(rq, &ext_sched_class);
3103 
3104 	rq_unpin_lock(rq, rf);
3105 	balance_one(rq, prev);
3106 	rq_repin_lock(rq, rf);
3107 	maybe_queue_balance_callback(rq);
3108 
3109 	/*
3110 	 * Defer to a balance callback which can drop rq lock and enable
3111 	 * IRQs. Waiting directly in the pick path would deadlock against
3112 	 * CPUs sending us IPIs (e.g. TLB flushes) while we wait for them.
3113 	 */
3114 	if (unlikely(rq->scx.kick_sync_pending)) {
3115 		rq->scx.kick_sync_pending = false;
3116 		queue_balance_callback(rq, &rq->scx.kick_sync_bal_cb,
3117 				       kick_sync_wait_bal_cb);
3118 	}
3119 
3120 	/*
3121 	 * If any higher-priority sched class enqueued a runnable task on
3122 	 * this rq during balance_one(), abort and return RETRY_TASK, so
3123 	 * that the scheduler loop can restart.
3124 	 *
3125 	 * If @force_scx is true, always try to pick a SCHED_EXT task,
3126 	 * regardless of any higher-priority sched classes activity.
3127 	 */
3128 	if (!force_scx && rq_modified_above(rq, &ext_sched_class))
3129 		return RETRY_TASK;
3130 
3131 	keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
3132 	if (unlikely(keep_prev &&
3133 		     prev->sched_class != &ext_sched_class)) {
3134 		WARN_ON_ONCE(scx_enable_state() == SCX_ENABLED);
3135 		keep_prev = false;
3136 	}
3137 
3138 	/*
3139 	 * If balance_one() is telling us to keep running @prev, replenish slice
3140 	 * if necessary and keep running @prev. Otherwise, pop the first one
3141 	 * from the local DSQ.
3142 	 */
3143 	if (keep_prev) {
3144 		p = prev;
3145 		if (!p->scx.slice)
3146 			refill_task_slice_dfl(scx_task_sched(p), p);
3147 	} else {
3148 		p = first_local_task(rq);
3149 		if (!p)
3150 			return NULL;
3151 
3152 		if (unlikely(!p->scx.slice)) {
3153 			struct scx_sched *sch = scx_task_sched(p);
3154 
3155 			if (!scx_bypassing(sch, cpu_of(rq)) &&
3156 			    !sch->warned_zero_slice) {
3157 				printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
3158 						p->comm, p->pid, __func__);
3159 				sch->warned_zero_slice = true;
3160 			}
3161 			refill_task_slice_dfl(sch, p);
3162 		}
3163 	}
3164 
3165 	return p;
3166 }
3167 
3168 static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf)
3169 {
3170 	return do_pick_task_scx(rq, rf, false);
3171 }
3172 
3173 /*
3174  * Select the next task to run from the ext scheduling class.
3175  *
3176  * Use do_pick_task_scx() directly with @force_scx enabled, since the
3177  * dl_server must always select a sched_ext task.
3178  */
3179 static struct task_struct *
3180 ext_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
3181 {
3182 	if (!scx_enabled())
3183 		return NULL;
3184 
3185 	return do_pick_task_scx(dl_se->rq, rf, true);
3186 }
3187 
3188 /*
3189  * Initialize the ext server deadline entity.
3190  */
3191 void ext_server_init(struct rq *rq)
3192 {
3193 	struct sched_dl_entity *dl_se = &rq->ext_server;
3194 
3195 	init_dl_entity(dl_se);
3196 
3197 	dl_server_init(dl_se, rq, ext_server_pick_task);
3198 }
3199 
3200 #ifdef CONFIG_SCHED_CORE
3201 /**
3202  * scx_prio_less - Task ordering for core-sched
3203  * @a: task A
3204  * @b: task B
3205  * @in_fi: in forced idle state
3206  *
3207  * Core-sched is implemented as an additional scheduling layer on top of the
3208  * usual sched_class'es and needs to find out the expected task ordering. For
3209  * SCX, core-sched calls this function to interrogate the task ordering.
3210  *
3211  * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
3212  * to implement the default task ordering. The older the timestamp, the higher
3213  * priority the task - the global FIFO ordering matching the default scheduling
3214  * behavior.
3215  *
3216  * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
3217  * implement FIFO ordering within each local DSQ. See pick_task_scx().
3218  */
3219 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
3220 		   bool in_fi)
3221 {
3222 	struct scx_sched *sch_a = scx_task_sched(a);
3223 	struct scx_sched *sch_b = scx_task_sched(b);
3224 
3225 	/*
3226 	 * The const qualifiers are dropped from task_struct pointers when
3227 	 * calling ops.core_sched_before(). Accesses are controlled by the
3228 	 * verifier.
3229 	 */
3230 	if (sch_a == sch_b && SCX_HAS_OP(sch_a, core_sched_before) &&
3231 	    !scx_bypassing(sch_a, task_cpu(a)))
3232 		return SCX_CALL_OP_2TASKS_RET(sch_a, core_sched_before,
3233 					      task_rq(a),
3234 					      (struct task_struct *)a,
3235 					      (struct task_struct *)b);
3236 	else
3237 		return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3238 }
3239 #endif	/* CONFIG_SCHED_CORE */
3240 
3241 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3242 {
3243 	struct scx_sched *sch = scx_task_sched(p);
3244 	bool bypassing;
3245 
3246 	/*
3247 	 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3248 	 * can be a good migration opportunity with low cache and memory
3249 	 * footprint. Returning a CPU different than @prev_cpu triggers
3250 	 * immediate rq migration. However, for SCX, as the current rq
3251 	 * association doesn't dictate where the task is going to run, this
3252 	 * doesn't fit well. If necessary, we can later add a dedicated method
3253 	 * which can decide to preempt self to force it through the regular
3254 	 * scheduling path.
3255 	 */
3256 	if (unlikely(wake_flags & WF_EXEC))
3257 		return prev_cpu;
3258 
3259 	bypassing = scx_bypassing(sch, task_cpu(p));
3260 	if (likely(SCX_HAS_OP(sch, select_cpu)) && !bypassing) {
3261 		s32 cpu;
3262 		struct task_struct **ddsp_taskp;
3263 
3264 		ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3265 		WARN_ON_ONCE(*ddsp_taskp);
3266 		*ddsp_taskp = p;
3267 
3268 		this_rq()->scx.in_select_cpu = true;
3269 		cpu = SCX_CALL_OP_TASK_RET(sch, select_cpu, NULL, p, prev_cpu, wake_flags);
3270 		this_rq()->scx.in_select_cpu = false;
3271 		p->scx.selected_cpu = cpu;
3272 		*ddsp_taskp = NULL;
3273 		if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()"))
3274 			return cpu;
3275 		else
3276 			return prev_cpu;
3277 	} else {
3278 		s32 cpu;
3279 
3280 		cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0);
3281 		if (cpu >= 0) {
3282 			refill_task_slice_dfl(sch, p);
3283 			p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3284 		} else {
3285 			cpu = prev_cpu;
3286 		}
3287 		p->scx.selected_cpu = cpu;
3288 
3289 		if (bypassing)
3290 			__scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
3291 		return cpu;
3292 	}
3293 }
3294 
3295 static void task_woken_scx(struct rq *rq, struct task_struct *p)
3296 {
3297 	run_deferred(rq);
3298 }
3299 
3300 static void set_cpus_allowed_scx(struct task_struct *p,
3301 				 struct affinity_context *ac)
3302 {
3303 	struct scx_sched *sch = scx_task_sched(p);
3304 
3305 	set_cpus_allowed_common(p, ac);
3306 
3307 	if (task_dead_and_done(p))
3308 		return;
3309 
3310 	/*
3311 	 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3312 	 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3313 	 * scheduler the effective one.
3314 	 *
3315 	 * Fine-grained memory write control is enforced by BPF making the const
3316 	 * designation pointless. Cast it away when calling the operation.
3317 	 */
3318 	if (SCX_HAS_OP(sch, set_cpumask))
3319 		SCX_CALL_OP_TASK(sch, set_cpumask, task_rq(p), p, (struct cpumask *)p->cpus_ptr);
3320 }
3321 
3322 static void handle_hotplug(struct rq *rq, bool online)
3323 {
3324 	struct scx_sched *sch = scx_root;
3325 	s32 cpu = cpu_of(rq);
3326 
3327 	atomic_long_inc(&scx_hotplug_seq);
3328 
3329 	/*
3330 	 * scx_root updates are protected by cpus_read_lock() and will stay
3331 	 * stable here. Note that we can't depend on scx_enabled() test as the
3332 	 * hotplug ops need to be enabled before __scx_enabled is set.
3333 	 */
3334 	if (unlikely(!sch))
3335 		return;
3336 
3337 	if (scx_enabled())
3338 		scx_idle_update_selcpu_topology(&sch->ops);
3339 
3340 	if (online && SCX_HAS_OP(sch, cpu_online))
3341 		SCX_CALL_OP(sch, cpu_online, NULL, cpu);
3342 	else if (!online && SCX_HAS_OP(sch, cpu_offline))
3343 		SCX_CALL_OP(sch, cpu_offline, NULL, cpu);
3344 	else
3345 		scx_exit(sch, SCX_EXIT_UNREG_KERN,
3346 			 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3347 			 "cpu %d going %s, exiting scheduler", cpu,
3348 			 online ? "online" : "offline");
3349 }
3350 
3351 void scx_rq_activate(struct rq *rq)
3352 {
3353 	handle_hotplug(rq, true);
3354 }
3355 
3356 void scx_rq_deactivate(struct rq *rq)
3357 {
3358 	handle_hotplug(rq, false);
3359 }
3360 
3361 static void rq_online_scx(struct rq *rq)
3362 {
3363 	rq->scx.flags |= SCX_RQ_ONLINE;
3364 }
3365 
3366 static void rq_offline_scx(struct rq *rq)
3367 {
3368 	rq->scx.flags &= ~SCX_RQ_ONLINE;
3369 }
3370 
3371 static bool check_rq_for_timeouts(struct rq *rq)
3372 {
3373 	struct scx_sched *sch;
3374 	struct task_struct *p;
3375 	struct rq_flags rf;
3376 	bool timed_out = false;
3377 
3378 	rq_lock_irqsave(rq, &rf);
3379 	sch = rcu_dereference_bh(scx_root);
3380 	if (unlikely(!sch))
3381 		goto out_unlock;
3382 
3383 	list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3384 		struct scx_sched *sch = scx_task_sched(p);
3385 		unsigned long last_runnable = p->scx.runnable_at;
3386 
3387 		if (unlikely(time_after(jiffies,
3388 					last_runnable + READ_ONCE(sch->watchdog_timeout)))) {
3389 			u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3390 
3391 			scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
3392 				 "%s[%d] failed to run for %u.%03us",
3393 				 p->comm, p->pid, dur_ms / 1000, dur_ms % 1000);
3394 			timed_out = true;
3395 			break;
3396 		}
3397 	}
3398 out_unlock:
3399 	rq_unlock_irqrestore(rq, &rf);
3400 	return timed_out;
3401 }
3402 
3403 static void scx_watchdog_workfn(struct work_struct *work)
3404 {
3405 	unsigned long intv;
3406 	int cpu;
3407 
3408 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3409 
3410 	for_each_online_cpu(cpu) {
3411 		if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3412 			break;
3413 
3414 		cond_resched();
3415 	}
3416 
3417 	intv = READ_ONCE(scx_watchdog_interval);
3418 	if (intv < ULONG_MAX)
3419 		queue_delayed_work(system_dfl_wq, to_delayed_work(work), intv);
3420 }
3421 
3422 void scx_tick(struct rq *rq)
3423 {
3424 	struct scx_sched *root;
3425 	unsigned long last_check;
3426 
3427 	if (!scx_enabled())
3428 		return;
3429 
3430 	root = rcu_dereference_bh(scx_root);
3431 	if (unlikely(!root))
3432 		return;
3433 
3434 	last_check = READ_ONCE(scx_watchdog_timestamp);
3435 	if (unlikely(time_after(jiffies,
3436 				last_check + READ_ONCE(root->watchdog_timeout)))) {
3437 		u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3438 
3439 		scx_exit(root, SCX_EXIT_ERROR_STALL, 0,
3440 			 "watchdog failed to check in for %u.%03us",
3441 			 dur_ms / 1000, dur_ms % 1000);
3442 	}
3443 
3444 	update_other_load_avgs(rq);
3445 }
3446 
3447 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3448 {
3449 	struct scx_sched *sch = scx_task_sched(curr);
3450 
3451 	update_curr_scx(rq);
3452 
3453 	/*
3454 	 * While disabling, always resched and refresh core-sched timestamp as
3455 	 * we can't trust the slice management or ops.core_sched_before().
3456 	 */
3457 	if (scx_bypassing(sch, cpu_of(rq))) {
3458 		curr->scx.slice = 0;
3459 		touch_core_sched(rq, curr);
3460 	} else if (SCX_HAS_OP(sch, tick)) {
3461 		SCX_CALL_OP_TASK(sch, tick, rq, curr);
3462 	}
3463 
3464 	if (!curr->scx.slice)
3465 		resched_curr(rq);
3466 }
3467 
3468 #ifdef CONFIG_EXT_GROUP_SCHED
3469 static struct cgroup *tg_cgrp(struct task_group *tg)
3470 {
3471 	/*
3472 	 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3473 	 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3474 	 * root cgroup.
3475 	 */
3476 	if (tg && tg->css.cgroup)
3477 		return tg->css.cgroup;
3478 	else
3479 		return &cgrp_dfl_root.cgrp;
3480 }
3481 
3482 #define SCX_INIT_TASK_ARGS_CGROUP(tg)		.cgroup = tg_cgrp(tg),
3483 
3484 #else	/* CONFIG_EXT_GROUP_SCHED */
3485 
3486 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
3487 
3488 #endif	/* CONFIG_EXT_GROUP_SCHED */
3489 
3490 static u32 scx_get_task_state(const struct task_struct *p)
3491 {
3492 	return p->scx.flags & SCX_TASK_STATE_MASK;
3493 }
3494 
3495 static void scx_set_task_state(struct task_struct *p, u32 state)
3496 {
3497 	u32 prev_state = scx_get_task_state(p);
3498 	bool warn = false;
3499 
3500 	switch (state) {
3501 	case SCX_TASK_NONE:
3502 		break;
3503 	case SCX_TASK_INIT:
3504 		warn = prev_state != SCX_TASK_NONE;
3505 		break;
3506 	case SCX_TASK_READY:
3507 		warn = prev_state == SCX_TASK_NONE;
3508 		break;
3509 	case SCX_TASK_ENABLED:
3510 		warn = prev_state != SCX_TASK_READY;
3511 		break;
3512 	default:
3513 		WARN_ONCE(1, "sched_ext: Invalid task state %d -> %d for %s[%d]",
3514 			  prev_state, state, p->comm, p->pid);
3515 		return;
3516 	}
3517 
3518 	WARN_ONCE(warn, "sched_ext: Invalid task state transition 0x%x -> 0x%x for %s[%d]",
3519 		  prev_state, state, p->comm, p->pid);
3520 
3521 	p->scx.flags &= ~SCX_TASK_STATE_MASK;
3522 	p->scx.flags |= state;
3523 }
3524 
3525 static int __scx_init_task(struct scx_sched *sch, struct task_struct *p, bool fork)
3526 {
3527 	int ret;
3528 
3529 	p->scx.disallow = false;
3530 
3531 	if (SCX_HAS_OP(sch, init_task)) {
3532 		struct scx_init_task_args args = {
3533 			SCX_INIT_TASK_ARGS_CGROUP(task_group(p))
3534 			.fork = fork,
3535 		};
3536 
3537 		ret = SCX_CALL_OP_RET(sch, init_task, NULL, p, &args);
3538 		if (unlikely(ret)) {
3539 			ret = ops_sanitize_err(sch, "init_task", ret);
3540 			return ret;
3541 		}
3542 	}
3543 
3544 	if (p->scx.disallow) {
3545 		if (unlikely(scx_parent(sch))) {
3546 			scx_error(sch, "non-root ops.init_task() set task->scx.disallow for %s[%d]",
3547 				  p->comm, p->pid);
3548 		} else if (unlikely(fork)) {
3549 			scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork",
3550 				  p->comm, p->pid);
3551 		} else {
3552 			struct rq *rq;
3553 			struct rq_flags rf;
3554 
3555 			rq = task_rq_lock(p, &rf);
3556 
3557 			/*
3558 			 * We're in the load path and @p->policy will be applied
3559 			 * right after. Reverting @p->policy here and rejecting
3560 			 * %SCHED_EXT transitions from scx_check_setscheduler()
3561 			 * guarantees that if ops.init_task() sets @p->disallow,
3562 			 * @p can never be in SCX.
3563 			 */
3564 			if (p->policy == SCHED_EXT) {
3565 				p->policy = SCHED_NORMAL;
3566 				atomic_long_inc(&scx_nr_rejected);
3567 			}
3568 
3569 			task_rq_unlock(rq, p, &rf);
3570 		}
3571 	}
3572 
3573 	return 0;
3574 }
3575 
3576 static int scx_init_task(struct scx_sched *sch, struct task_struct *p, bool fork)
3577 {
3578 	int ret;
3579 
3580 	ret = __scx_init_task(sch, p, fork);
3581 	if (!ret) {
3582 		/*
3583 		 * While @p's rq is not locked. @p is not visible to the rest of
3584 		 * SCX yet and it's safe to update the flags and state.
3585 		 */
3586 		p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
3587 		scx_set_task_state(p, SCX_TASK_INIT);
3588 	}
3589 	return ret;
3590 }
3591 
3592 static void __scx_enable_task(struct scx_sched *sch, struct task_struct *p)
3593 {
3594 	struct rq *rq = task_rq(p);
3595 	u32 weight;
3596 
3597 	lockdep_assert_rq_held(rq);
3598 
3599 	/*
3600 	 * Verify the task is not in BPF scheduler's custody. If flag
3601 	 * transitions are consistent, the flag should always be clear
3602 	 * here.
3603 	 */
3604 	WARN_ON_ONCE(p->scx.flags & SCX_TASK_IN_CUSTODY);
3605 
3606 	/*
3607 	 * Set the weight before calling ops.enable() so that the scheduler
3608 	 * doesn't see a stale value if they inspect the task struct.
3609 	 */
3610 	if (task_has_idle_policy(p))
3611 		weight = WEIGHT_IDLEPRIO;
3612 	else
3613 		weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
3614 
3615 	p->scx.weight = sched_weight_to_cgroup(weight);
3616 
3617 	if (SCX_HAS_OP(sch, enable))
3618 		SCX_CALL_OP_TASK(sch, enable, rq, p);
3619 
3620 	if (SCX_HAS_OP(sch, set_weight))
3621 		SCX_CALL_OP_TASK(sch, set_weight, rq, p, p->scx.weight);
3622 }
3623 
3624 static void scx_enable_task(struct scx_sched *sch, struct task_struct *p)
3625 {
3626 	__scx_enable_task(sch, p);
3627 	scx_set_task_state(p, SCX_TASK_ENABLED);
3628 }
3629 
3630 static void scx_disable_task(struct scx_sched *sch, struct task_struct *p)
3631 {
3632 	struct rq *rq = task_rq(p);
3633 
3634 	lockdep_assert_rq_held(rq);
3635 	WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
3636 
3637 	clear_direct_dispatch(p);
3638 
3639 	if (SCX_HAS_OP(sch, disable))
3640 		SCX_CALL_OP_TASK(sch, disable, rq, p);
3641 	scx_set_task_state(p, SCX_TASK_READY);
3642 
3643 	/*
3644 	 * Verify the task is not in BPF scheduler's custody. If flag
3645 	 * transitions are consistent, the flag should always be clear
3646 	 * here.
3647 	 */
3648 	WARN_ON_ONCE(p->scx.flags & SCX_TASK_IN_CUSTODY);
3649 }
3650 
3651 static void __scx_disable_and_exit_task(struct scx_sched *sch,
3652 					struct task_struct *p)
3653 {
3654 	struct scx_exit_task_args args = {
3655 		.cancelled = false,
3656 	};
3657 
3658 	lockdep_assert_held(&p->pi_lock);
3659 	lockdep_assert_rq_held(task_rq(p));
3660 
3661 	switch (scx_get_task_state(p)) {
3662 	case SCX_TASK_NONE:
3663 		return;
3664 	case SCX_TASK_INIT:
3665 		args.cancelled = true;
3666 		break;
3667 	case SCX_TASK_READY:
3668 		break;
3669 	case SCX_TASK_ENABLED:
3670 		scx_disable_task(sch, p);
3671 		break;
3672 	default:
3673 		WARN_ON_ONCE(true);
3674 		return;
3675 	}
3676 
3677 	if (SCX_HAS_OP(sch, exit_task))
3678 		SCX_CALL_OP_TASK(sch, exit_task, task_rq(p), p, &args);
3679 }
3680 
3681 /*
3682  * Undo a completed __scx_init_task(sch, p, false) when scx_enable_task() never
3683  * ran. The task state has not been transitioned, so this mirrors the
3684  * SCX_TASK_INIT branch in __scx_disable_and_exit_task().
3685  */
3686 static void scx_sub_init_cancel_task(struct scx_sched *sch, struct task_struct *p)
3687 {
3688 	struct scx_exit_task_args args = { .cancelled = true };
3689 
3690 	lockdep_assert_held(&p->pi_lock);
3691 	lockdep_assert_rq_held(task_rq(p));
3692 
3693 	if (SCX_HAS_OP(sch, exit_task))
3694 		SCX_CALL_OP_TASK(sch, exit_task, task_rq(p), p, &args);
3695 }
3696 
3697 static void scx_disable_and_exit_task(struct scx_sched *sch,
3698 				      struct task_struct *p)
3699 {
3700 	__scx_disable_and_exit_task(sch, p);
3701 
3702 	/*
3703 	 * If set, @p exited between __scx_init_task() and scx_enable_task() in
3704 	 * scx_sub_enable() and is initialized for both the associated sched and
3705 	 * its parent. Exit for the child too - scx_enable_task() never ran for
3706 	 * it, so undo only init_task.
3707 	 */
3708 	if (p->scx.flags & SCX_TASK_SUB_INIT) {
3709 		if (!WARN_ON_ONCE(!scx_enabling_sub_sched))
3710 			scx_sub_init_cancel_task(scx_enabling_sub_sched, p);
3711 		p->scx.flags &= ~SCX_TASK_SUB_INIT;
3712 	}
3713 
3714 	scx_set_task_sched(p, NULL);
3715 	scx_set_task_state(p, SCX_TASK_NONE);
3716 }
3717 
3718 void init_scx_entity(struct sched_ext_entity *scx)
3719 {
3720 	memset(scx, 0, sizeof(*scx));
3721 	INIT_LIST_HEAD(&scx->dsq_list.node);
3722 	RB_CLEAR_NODE(&scx->dsq_priq);
3723 	scx->sticky_cpu = -1;
3724 	scx->holding_cpu = -1;
3725 	INIT_LIST_HEAD(&scx->runnable_node);
3726 	scx->runnable_at = jiffies;
3727 	scx->ddsp_dsq_id = SCX_DSQ_INVALID;
3728 	scx->slice = SCX_SLICE_DFL;
3729 }
3730 
3731 void scx_pre_fork(struct task_struct *p)
3732 {
3733 	/*
3734 	 * BPF scheduler enable/disable paths want to be able to iterate and
3735 	 * update all tasks which can become complex when racing forks. As
3736 	 * enable/disable are very cold paths, let's use a percpu_rwsem to
3737 	 * exclude forks.
3738 	 */
3739 	percpu_down_read(&scx_fork_rwsem);
3740 }
3741 
3742 int scx_fork(struct task_struct *p, struct kernel_clone_args *kargs)
3743 {
3744 	s32 ret;
3745 
3746 	percpu_rwsem_assert_held(&scx_fork_rwsem);
3747 
3748 	if (scx_init_task_enabled) {
3749 #ifdef CONFIG_EXT_SUB_SCHED
3750 		struct scx_sched *sch = kargs->cset->dfl_cgrp->scx_sched;
3751 #else
3752 		struct scx_sched *sch = scx_root;
3753 #endif
3754 		ret = scx_init_task(sch, p, true);
3755 		if (!ret)
3756 			scx_set_task_sched(p, sch);
3757 		return ret;
3758 	}
3759 
3760 	return 0;
3761 }
3762 
3763 void scx_post_fork(struct task_struct *p)
3764 {
3765 	if (scx_init_task_enabled) {
3766 		scx_set_task_state(p, SCX_TASK_READY);
3767 
3768 		/*
3769 		 * Enable the task immediately if it's running on sched_ext.
3770 		 * Otherwise, it'll be enabled in switching_to_scx() if and
3771 		 * when it's ever configured to run with a SCHED_EXT policy.
3772 		 */
3773 		if (p->sched_class == &ext_sched_class) {
3774 			struct rq_flags rf;
3775 			struct rq *rq;
3776 
3777 			rq = task_rq_lock(p, &rf);
3778 			scx_enable_task(scx_task_sched(p), p);
3779 			task_rq_unlock(rq, p, &rf);
3780 		}
3781 	}
3782 
3783 	raw_spin_lock_irq(&scx_tasks_lock);
3784 	list_add_tail(&p->scx.tasks_node, &scx_tasks);
3785 	raw_spin_unlock_irq(&scx_tasks_lock);
3786 
3787 	percpu_up_read(&scx_fork_rwsem);
3788 }
3789 
3790 void scx_cancel_fork(struct task_struct *p)
3791 {
3792 	if (scx_enabled()) {
3793 		struct rq *rq;
3794 		struct rq_flags rf;
3795 
3796 		rq = task_rq_lock(p, &rf);
3797 		WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
3798 		scx_disable_and_exit_task(scx_task_sched(p), p);
3799 		task_rq_unlock(rq, p, &rf);
3800 	}
3801 
3802 	percpu_up_read(&scx_fork_rwsem);
3803 }
3804 
3805 /**
3806  * task_dead_and_done - Is a task dead and done running?
3807  * @p: target task
3808  *
3809  * Once sched_ext_dead() removes the dead task from scx_tasks and exits it, the
3810  * task no longer exists from SCX's POV. However, certain sched_class ops may be
3811  * invoked on these dead tasks leading to failures - e.g. sched_setscheduler()
3812  * may try to switch a task which finished sched_ext_dead() back into SCX
3813  * triggering invalid SCX task state transitions and worse.
3814  *
3815  * Once a task has finished the final switch, sched_ext_dead() is the only thing
3816  * that needs to happen on the task. Use this test to short-circuit sched_class
3817  * operations which may be called on dead tasks.
3818  */
3819 static bool task_dead_and_done(struct task_struct *p)
3820 {
3821 	struct rq *rq = task_rq(p);
3822 
3823 	lockdep_assert_rq_held(rq);
3824 
3825 	/*
3826 	 * In do_task_dead(), a dying task sets %TASK_DEAD with preemption
3827 	 * disabled and __schedule(). If @p has %TASK_DEAD set and off CPU, @p
3828 	 * won't ever run again.
3829 	 */
3830 	return unlikely(READ_ONCE(p->__state) == TASK_DEAD) &&
3831 		!task_on_cpu(rq, p);
3832 }
3833 
3834 void sched_ext_dead(struct task_struct *p)
3835 {
3836 	unsigned long flags;
3837 
3838 	/*
3839 	 * By the time control reaches here, @p has %TASK_DEAD set, switched out
3840 	 * for the last time and then dropped the rq lock - task_dead_and_done()
3841 	 * should be returning %true nullifying the straggling sched_class ops.
3842 	 * Remove from scx_tasks and exit @p.
3843 	 */
3844 	raw_spin_lock_irqsave(&scx_tasks_lock, flags);
3845 	list_del_init(&p->scx.tasks_node);
3846 	raw_spin_unlock_irqrestore(&scx_tasks_lock, flags);
3847 
3848 	/*
3849 	 * @p is off scx_tasks and wholly ours. scx_root_enable()'s READY ->
3850 	 * ENABLED transitions can't race us. Disable ops for @p.
3851 	 */
3852 	if (scx_get_task_state(p) != SCX_TASK_NONE) {
3853 		struct rq_flags rf;
3854 		struct rq *rq;
3855 
3856 		rq = task_rq_lock(p, &rf);
3857 		scx_disable_and_exit_task(scx_task_sched(p), p);
3858 		task_rq_unlock(rq, p, &rf);
3859 	}
3860 }
3861 
3862 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
3863 			      const struct load_weight *lw)
3864 {
3865 	struct scx_sched *sch = scx_task_sched(p);
3866 
3867 	lockdep_assert_rq_held(task_rq(p));
3868 
3869 	if (task_dead_and_done(p))
3870 		return;
3871 
3872 	p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
3873 	if (SCX_HAS_OP(sch, set_weight))
3874 		SCX_CALL_OP_TASK(sch, set_weight, rq, p, p->scx.weight);
3875 }
3876 
3877 static void prio_changed_scx(struct rq *rq, struct task_struct *p, u64 oldprio)
3878 {
3879 }
3880 
3881 static void switching_to_scx(struct rq *rq, struct task_struct *p)
3882 {
3883 	struct scx_sched *sch = scx_task_sched(p);
3884 
3885 	if (task_dead_and_done(p))
3886 		return;
3887 
3888 	scx_enable_task(sch, p);
3889 
3890 	/*
3891 	 * set_cpus_allowed_scx() is not called while @p is associated with a
3892 	 * different scheduler class. Keep the BPF scheduler up-to-date.
3893 	 */
3894 	if (SCX_HAS_OP(sch, set_cpumask))
3895 		SCX_CALL_OP_TASK(sch, set_cpumask, rq, p, (struct cpumask *)p->cpus_ptr);
3896 }
3897 
3898 static void switched_from_scx(struct rq *rq, struct task_struct *p)
3899 {
3900 	if (task_dead_and_done(p))
3901 		return;
3902 
3903 	scx_disable_task(scx_task_sched(p), p);
3904 }
3905 
3906 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
3907 
3908 int scx_check_setscheduler(struct task_struct *p, int policy)
3909 {
3910 	lockdep_assert_rq_held(task_rq(p));
3911 
3912 	/* if disallow, reject transitioning into SCX */
3913 	if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
3914 	    p->policy != policy && policy == SCHED_EXT)
3915 		return -EACCES;
3916 
3917 	return 0;
3918 }
3919 
3920 static void process_ddsp_deferred_locals(struct rq *rq)
3921 {
3922 	struct task_struct *p;
3923 
3924 	lockdep_assert_rq_held(rq);
3925 
3926 	/*
3927 	 * Now that @rq can be unlocked, execute the deferred enqueueing of
3928 	 * tasks directly dispatched to the local DSQs of other CPUs. See
3929 	 * direct_dispatch(). Keep popping from the head instead of using
3930 	 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
3931 	 * temporarily.
3932 	 */
3933 	while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
3934 				struct task_struct, scx.dsq_list.node))) {
3935 		struct scx_sched *sch = scx_task_sched(p);
3936 		struct scx_dispatch_q *dsq;
3937 		u64 dsq_id = p->scx.ddsp_dsq_id;
3938 		u64 enq_flags = p->scx.ddsp_enq_flags;
3939 
3940 		list_del_init(&p->scx.dsq_list.node);
3941 		clear_direct_dispatch(p);
3942 
3943 		dsq = find_dsq_for_dispatch(sch, rq, dsq_id, task_cpu(p));
3944 		if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
3945 			dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
3946 	}
3947 }
3948 
3949 /*
3950  * Determine whether @p should be reenqueued from a local DSQ.
3951  *
3952  * @reenq_flags is mutable and accumulates state across the DSQ walk:
3953  *
3954  * - %SCX_REENQ_TSR_NOT_FIRST: Set after the first task is visited. "First"
3955  *   tracks position in the DSQ list, not among IMMED tasks. A non-IMMED task at
3956  *   the head consumes the first slot.
3957  *
3958  * - %SCX_REENQ_TSR_RQ_OPEN: Set by reenq_local() before the walk if
3959  *   rq_is_open() is true.
3960  *
3961  * An IMMED task is kept (returns %false) only if it's the first task in the DSQ
3962  * AND the current task is done — i.e. it will execute immediately. All other
3963  * IMMED tasks are reenqueued. This means if a non-IMMED task sits at the head,
3964  * every IMMED task behind it gets reenqueued.
3965  *
3966  * Reenqueued tasks go through ops.enqueue() with %SCX_ENQ_REENQ |
3967  * %SCX_TASK_REENQ_IMMED. If the BPF scheduler dispatches back to the same local
3968  * DSQ with %SCX_ENQ_IMMED while the CPU is still unavailable, this triggers
3969  * another reenq cycle. Repetitions are bounded by %SCX_REENQ_LOCAL_MAX_REPEAT
3970  * in process_deferred_reenq_locals().
3971  */
3972 static bool local_task_should_reenq(struct task_struct *p, u64 *reenq_flags, u32 *reason)
3973 {
3974 	bool first;
3975 
3976 	first = !(*reenq_flags & SCX_REENQ_TSR_NOT_FIRST);
3977 	*reenq_flags |= SCX_REENQ_TSR_NOT_FIRST;
3978 
3979 	*reason = SCX_TASK_REENQ_KFUNC;
3980 
3981 	if ((p->scx.flags & SCX_TASK_IMMED) &&
3982 	    (!first || !(*reenq_flags & SCX_REENQ_TSR_RQ_OPEN))) {
3983 		__scx_add_event(scx_task_sched(p), SCX_EV_REENQ_IMMED, 1);
3984 		*reason = SCX_TASK_REENQ_IMMED;
3985 		return true;
3986 	}
3987 
3988 	return *reenq_flags & SCX_REENQ_ANY;
3989 }
3990 
3991 static u32 reenq_local(struct scx_sched *sch, struct rq *rq, u64 reenq_flags)
3992 {
3993 	LIST_HEAD(tasks);
3994 	u32 nr_enqueued = 0;
3995 	struct task_struct *p, *n;
3996 
3997 	lockdep_assert_rq_held(rq);
3998 
3999 	if (WARN_ON_ONCE(reenq_flags & __SCX_REENQ_TSR_MASK))
4000 		reenq_flags &= ~__SCX_REENQ_TSR_MASK;
4001 	if (rq_is_open(rq, 0))
4002 		reenq_flags |= SCX_REENQ_TSR_RQ_OPEN;
4003 
4004 	/*
4005 	 * The BPF scheduler may choose to dispatch tasks back to
4006 	 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
4007 	 * first to avoid processing the same tasks repeatedly.
4008 	 */
4009 	list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
4010 				 scx.dsq_list.node) {
4011 		struct scx_sched *task_sch = scx_task_sched(p);
4012 		u32 reason;
4013 
4014 		/*
4015 		 * If @p is being migrated, @p's current CPU may not agree with
4016 		 * its allowed CPUs and the migration_cpu_stop is about to
4017 		 * deactivate and re-activate @p anyway. Skip re-enqueueing.
4018 		 *
4019 		 * While racing sched property changes may also dequeue and
4020 		 * re-enqueue a migrating task while its current CPU and allowed
4021 		 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
4022 		 * the current local DSQ for running tasks and thus are not
4023 		 * visible to the BPF scheduler.
4024 		 */
4025 		if (p->migration_pending)
4026 			continue;
4027 
4028 		if (!scx_is_descendant(task_sch, sch))
4029 			continue;
4030 
4031 		if (!local_task_should_reenq(p, &reenq_flags, &reason))
4032 			continue;
4033 
4034 		dispatch_dequeue(rq, p);
4035 
4036 		if (WARN_ON_ONCE(p->scx.flags & SCX_TASK_REENQ_REASON_MASK))
4037 			p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
4038 		p->scx.flags |= reason;
4039 
4040 		list_add_tail(&p->scx.dsq_list.node, &tasks);
4041 	}
4042 
4043 	list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
4044 		list_del_init(&p->scx.dsq_list.node);
4045 
4046 		do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
4047 
4048 		p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
4049 		nr_enqueued++;
4050 	}
4051 
4052 	return nr_enqueued;
4053 }
4054 
4055 static void process_deferred_reenq_locals(struct rq *rq)
4056 {
4057 	u64 seq = ++rq->scx.deferred_reenq_locals_seq;
4058 
4059 	lockdep_assert_rq_held(rq);
4060 
4061 	while (true) {
4062 		struct scx_sched *sch;
4063 		u64 reenq_flags;
4064 		bool skip = false;
4065 
4066 		scoped_guard (raw_spinlock, &rq->scx.deferred_reenq_lock) {
4067 			struct scx_deferred_reenq_local *drl =
4068 				list_first_entry_or_null(&rq->scx.deferred_reenq_locals,
4069 							 struct scx_deferred_reenq_local,
4070 							 node);
4071 			struct scx_sched_pcpu *sch_pcpu;
4072 
4073 			if (!drl)
4074 				return;
4075 
4076 			sch_pcpu = container_of(drl, struct scx_sched_pcpu,
4077 						deferred_reenq_local);
4078 			sch = sch_pcpu->sch;
4079 
4080 			reenq_flags = drl->flags;
4081 			WRITE_ONCE(drl->flags, 0);
4082 			list_del_init(&drl->node);
4083 
4084 			if (likely(drl->seq != seq)) {
4085 				drl->seq = seq;
4086 				drl->cnt = 0;
4087 			} else {
4088 				if (unlikely(++drl->cnt > SCX_REENQ_LOCAL_MAX_REPEAT)) {
4089 					scx_error(sch, "SCX_ENQ_REENQ on SCX_DSQ_LOCAL repeated %u times",
4090 						  drl->cnt);
4091 					skip = true;
4092 				}
4093 
4094 				__scx_add_event(sch, SCX_EV_REENQ_LOCAL_REPEAT, 1);
4095 			}
4096 		}
4097 
4098 		if (!skip) {
4099 			/* see schedule_dsq_reenq() */
4100 			smp_mb();
4101 
4102 			reenq_local(sch, rq, reenq_flags);
4103 		}
4104 	}
4105 }
4106 
4107 static bool user_task_should_reenq(struct task_struct *p, u64 reenq_flags, u32 *reason)
4108 {
4109 	*reason = SCX_TASK_REENQ_KFUNC;
4110 	return reenq_flags & SCX_REENQ_ANY;
4111 }
4112 
4113 static void reenq_user(struct rq *rq, struct scx_dispatch_q *dsq, u64 reenq_flags)
4114 {
4115 	struct rq *locked_rq = rq;
4116 	struct scx_sched *sch = dsq->sched;
4117 	struct scx_dsq_list_node cursor = INIT_DSQ_LIST_CURSOR(cursor, dsq, 0);
4118 	struct task_struct *p;
4119 	s32 nr_enqueued = 0;
4120 
4121 	lockdep_assert_rq_held(rq);
4122 
4123 	raw_spin_lock(&dsq->lock);
4124 
4125 	while (likely(!READ_ONCE(sch->bypass_depth))) {
4126 		struct rq *task_rq;
4127 		u32 reason;
4128 
4129 		p = nldsq_cursor_next_task(&cursor, dsq);
4130 		if (!p)
4131 			break;
4132 
4133 		if (!user_task_should_reenq(p, reenq_flags, &reason))
4134 			continue;
4135 
4136 		task_rq = task_rq(p);
4137 
4138 		if (locked_rq != task_rq) {
4139 			if (locked_rq)
4140 				raw_spin_rq_unlock(locked_rq);
4141 			if (unlikely(!raw_spin_rq_trylock(task_rq))) {
4142 				raw_spin_unlock(&dsq->lock);
4143 				raw_spin_rq_lock(task_rq);
4144 				raw_spin_lock(&dsq->lock);
4145 			}
4146 			locked_rq = task_rq;
4147 
4148 			/* did we lose @p while switching locks? */
4149 			if (nldsq_cursor_lost_task(&cursor, task_rq, dsq, p))
4150 				continue;
4151 		}
4152 
4153 		/* @p is on @dsq, its rq and @dsq are locked */
4154 		dispatch_dequeue_locked(p, dsq);
4155 		raw_spin_unlock(&dsq->lock);
4156 
4157 		if (WARN_ON_ONCE(p->scx.flags & SCX_TASK_REENQ_REASON_MASK))
4158 			p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
4159 		p->scx.flags |= reason;
4160 
4161 		do_enqueue_task(task_rq, p, SCX_ENQ_REENQ, -1);
4162 
4163 		p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
4164 
4165 		if (!(++nr_enqueued % SCX_TASK_ITER_BATCH)) {
4166 			raw_spin_rq_unlock(locked_rq);
4167 			locked_rq = NULL;
4168 			cpu_relax();
4169 		}
4170 
4171 		raw_spin_lock(&dsq->lock);
4172 	}
4173 
4174 	list_del_init(&cursor.node);
4175 	raw_spin_unlock(&dsq->lock);
4176 
4177 	if (locked_rq != rq) {
4178 		if (locked_rq)
4179 			raw_spin_rq_unlock(locked_rq);
4180 		raw_spin_rq_lock(rq);
4181 	}
4182 }
4183 
4184 static void process_deferred_reenq_users(struct rq *rq)
4185 {
4186 	lockdep_assert_rq_held(rq);
4187 
4188 	while (true) {
4189 		struct scx_dispatch_q *dsq;
4190 		u64 reenq_flags;
4191 
4192 		scoped_guard (raw_spinlock, &rq->scx.deferred_reenq_lock) {
4193 			struct scx_deferred_reenq_user *dru =
4194 				list_first_entry_or_null(&rq->scx.deferred_reenq_users,
4195 							 struct scx_deferred_reenq_user,
4196 							 node);
4197 			struct scx_dsq_pcpu *dsq_pcpu;
4198 
4199 			if (!dru)
4200 				return;
4201 
4202 			dsq_pcpu = container_of(dru, struct scx_dsq_pcpu,
4203 						deferred_reenq_user);
4204 			dsq = dsq_pcpu->dsq;
4205 			reenq_flags = dru->flags;
4206 			WRITE_ONCE(dru->flags, 0);
4207 			list_del_init(&dru->node);
4208 		}
4209 
4210 		/* see schedule_dsq_reenq() */
4211 		smp_mb();
4212 
4213 		BUG_ON(dsq->id & SCX_DSQ_FLAG_BUILTIN);
4214 		reenq_user(rq, dsq, reenq_flags);
4215 	}
4216 }
4217 
4218 static void run_deferred(struct rq *rq)
4219 {
4220 	process_ddsp_deferred_locals(rq);
4221 
4222 	if (!list_empty(&rq->scx.deferred_reenq_locals))
4223 		process_deferred_reenq_locals(rq);
4224 
4225 	if (!list_empty(&rq->scx.deferred_reenq_users))
4226 		process_deferred_reenq_users(rq);
4227 }
4228 
4229 #ifdef CONFIG_NO_HZ_FULL
4230 bool scx_can_stop_tick(struct rq *rq)
4231 {
4232 	struct task_struct *p = rq->curr;
4233 	struct scx_sched *sch = scx_task_sched(p);
4234 
4235 	if (p->sched_class != &ext_sched_class)
4236 		return true;
4237 
4238 	if (scx_bypassing(sch, cpu_of(rq)))
4239 		return false;
4240 
4241 	/*
4242 	 * @rq can dispatch from different DSQs, so we can't tell whether it
4243 	 * needs the tick or not by looking at nr_running. Allow stopping ticks
4244 	 * iff the BPF scheduler indicated so. See set_next_task_scx().
4245 	 */
4246 	return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
4247 }
4248 #endif
4249 
4250 #ifdef CONFIG_EXT_GROUP_SCHED
4251 
4252 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_ops_rwsem);
4253 static bool scx_cgroup_enabled;
4254 
4255 void scx_tg_init(struct task_group *tg)
4256 {
4257 	tg->scx.weight = CGROUP_WEIGHT_DFL;
4258 	tg->scx.bw_period_us = default_bw_period_us();
4259 	tg->scx.bw_quota_us = RUNTIME_INF;
4260 	tg->scx.idle = false;
4261 }
4262 
4263 int scx_tg_online(struct task_group *tg)
4264 {
4265 	struct scx_sched *sch = scx_root;
4266 	int ret = 0;
4267 
4268 	WARN_ON_ONCE(tg->scx.flags & (SCX_TG_ONLINE | SCX_TG_INITED));
4269 
4270 	if (scx_cgroup_enabled) {
4271 		if (SCX_HAS_OP(sch, cgroup_init)) {
4272 			struct scx_cgroup_init_args args =
4273 				{ .weight = tg->scx.weight,
4274 				  .bw_period_us = tg->scx.bw_period_us,
4275 				  .bw_quota_us = tg->scx.bw_quota_us,
4276 				  .bw_burst_us = tg->scx.bw_burst_us };
4277 
4278 			ret = SCX_CALL_OP_RET(sch, cgroup_init,
4279 					      NULL, tg->css.cgroup, &args);
4280 			if (ret)
4281 				ret = ops_sanitize_err(sch, "cgroup_init", ret);
4282 		}
4283 		if (ret == 0)
4284 			tg->scx.flags |= SCX_TG_ONLINE | SCX_TG_INITED;
4285 	} else {
4286 		tg->scx.flags |= SCX_TG_ONLINE;
4287 	}
4288 
4289 	return ret;
4290 }
4291 
4292 void scx_tg_offline(struct task_group *tg)
4293 {
4294 	struct scx_sched *sch = scx_root;
4295 
4296 	WARN_ON_ONCE(!(tg->scx.flags & SCX_TG_ONLINE));
4297 
4298 	if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) &&
4299 	    (tg->scx.flags & SCX_TG_INITED))
4300 		SCX_CALL_OP(sch, cgroup_exit, NULL, tg->css.cgroup);
4301 	tg->scx.flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
4302 }
4303 
4304 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
4305 {
4306 	struct scx_sched *sch = scx_root;
4307 	struct cgroup_subsys_state *css;
4308 	struct task_struct *p;
4309 	int ret;
4310 
4311 	if (!scx_cgroup_enabled)
4312 		return 0;
4313 
4314 	cgroup_taskset_for_each(p, css, tset) {
4315 		struct cgroup *from = tg_cgrp(task_group(p));
4316 		struct cgroup *to = tg_cgrp(css_tg(css));
4317 
4318 		WARN_ON_ONCE(p->scx.cgrp_moving_from);
4319 
4320 		/*
4321 		 * sched_move_task() omits identity migrations. Let's match the
4322 		 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
4323 		 * always match one-to-one.
4324 		 */
4325 		if (from == to)
4326 			continue;
4327 
4328 		if (SCX_HAS_OP(sch, cgroup_prep_move)) {
4329 			ret = SCX_CALL_OP_RET(sch, cgroup_prep_move, NULL,
4330 					      p, from, css->cgroup);
4331 			if (ret)
4332 				goto err;
4333 		}
4334 
4335 		p->scx.cgrp_moving_from = from;
4336 	}
4337 
4338 	return 0;
4339 
4340 err:
4341 	cgroup_taskset_for_each(p, css, tset) {
4342 		if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
4343 		    p->scx.cgrp_moving_from)
4344 			SCX_CALL_OP(sch, cgroup_cancel_move, NULL,
4345 				    p, p->scx.cgrp_moving_from, css->cgroup);
4346 		p->scx.cgrp_moving_from = NULL;
4347 	}
4348 
4349 	return ops_sanitize_err(sch, "cgroup_prep_move", ret);
4350 }
4351 
4352 void scx_cgroup_move_task(struct task_struct *p)
4353 {
4354 	struct scx_sched *sch = scx_root;
4355 
4356 	if (!scx_cgroup_enabled)
4357 		return;
4358 
4359 	/*
4360 	 * @p must have ops.cgroup_prep_move() called on it and thus
4361 	 * cgrp_moving_from set.
4362 	 */
4363 	if (SCX_HAS_OP(sch, cgroup_move) &&
4364 	    !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
4365 		SCX_CALL_OP_TASK(sch, cgroup_move, task_rq(p),
4366 				 p, p->scx.cgrp_moving_from,
4367 				 tg_cgrp(task_group(p)));
4368 	p->scx.cgrp_moving_from = NULL;
4369 }
4370 
4371 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
4372 {
4373 	struct scx_sched *sch = scx_root;
4374 	struct cgroup_subsys_state *css;
4375 	struct task_struct *p;
4376 
4377 	if (!scx_cgroup_enabled)
4378 		return;
4379 
4380 	cgroup_taskset_for_each(p, css, tset) {
4381 		if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
4382 		    p->scx.cgrp_moving_from)
4383 			SCX_CALL_OP(sch, cgroup_cancel_move, NULL,
4384 				    p, p->scx.cgrp_moving_from, css->cgroup);
4385 		p->scx.cgrp_moving_from = NULL;
4386 	}
4387 }
4388 
4389 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
4390 {
4391 	struct scx_sched *sch;
4392 
4393 	percpu_down_read(&scx_cgroup_ops_rwsem);
4394 	sch = scx_root;
4395 
4396 	if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) &&
4397 	    tg->scx.weight != weight)
4398 		SCX_CALL_OP(sch, cgroup_set_weight, NULL, tg_cgrp(tg), weight);
4399 
4400 	tg->scx.weight = weight;
4401 
4402 	percpu_up_read(&scx_cgroup_ops_rwsem);
4403 }
4404 
4405 void scx_group_set_idle(struct task_group *tg, bool idle)
4406 {
4407 	struct scx_sched *sch;
4408 
4409 	percpu_down_read(&scx_cgroup_ops_rwsem);
4410 	sch = scx_root;
4411 
4412 	if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_idle))
4413 		SCX_CALL_OP(sch, cgroup_set_idle, NULL, tg_cgrp(tg), idle);
4414 
4415 	/* Update the task group's idle state */
4416 	tg->scx.idle = idle;
4417 
4418 	percpu_up_read(&scx_cgroup_ops_rwsem);
4419 }
4420 
4421 void scx_group_set_bandwidth(struct task_group *tg,
4422 			     u64 period_us, u64 quota_us, u64 burst_us)
4423 {
4424 	struct scx_sched *sch;
4425 
4426 	percpu_down_read(&scx_cgroup_ops_rwsem);
4427 	sch = scx_root;
4428 
4429 	if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) &&
4430 	    (tg->scx.bw_period_us != period_us ||
4431 	     tg->scx.bw_quota_us != quota_us ||
4432 	     tg->scx.bw_burst_us != burst_us))
4433 		SCX_CALL_OP(sch, cgroup_set_bandwidth, NULL,
4434 			    tg_cgrp(tg), period_us, quota_us, burst_us);
4435 
4436 	tg->scx.bw_period_us = period_us;
4437 	tg->scx.bw_quota_us = quota_us;
4438 	tg->scx.bw_burst_us = burst_us;
4439 
4440 	percpu_up_read(&scx_cgroup_ops_rwsem);
4441 }
4442 #endif	/* CONFIG_EXT_GROUP_SCHED */
4443 
4444 #if defined(CONFIG_EXT_GROUP_SCHED) || defined(CONFIG_EXT_SUB_SCHED)
4445 static struct cgroup *root_cgroup(void)
4446 {
4447 	return &cgrp_dfl_root.cgrp;
4448 }
4449 
4450 static void scx_cgroup_lock(void)
4451 {
4452 #ifdef CONFIG_EXT_GROUP_SCHED
4453 	percpu_down_write(&scx_cgroup_ops_rwsem);
4454 #endif
4455 	cgroup_lock();
4456 }
4457 
4458 static void scx_cgroup_unlock(void)
4459 {
4460 	cgroup_unlock();
4461 #ifdef CONFIG_EXT_GROUP_SCHED
4462 	percpu_up_write(&scx_cgroup_ops_rwsem);
4463 #endif
4464 }
4465 #else	/* CONFIG_EXT_GROUP_SCHED || CONFIG_EXT_SUB_SCHED */
4466 static struct cgroup *root_cgroup(void) { return NULL; }
4467 static void scx_cgroup_lock(void) {}
4468 static void scx_cgroup_unlock(void) {}
4469 #endif	/* CONFIG_EXT_GROUP_SCHED || CONFIG_EXT_SUB_SCHED */
4470 
4471 #ifdef CONFIG_EXT_SUB_SCHED
4472 static struct cgroup *sch_cgroup(struct scx_sched *sch)
4473 {
4474 	return sch->cgrp;
4475 }
4476 
4477 /* for each descendant of @cgrp including self, set ->scx_sched to @sch */
4478 static void set_cgroup_sched(struct cgroup *cgrp, struct scx_sched *sch)
4479 {
4480 	struct cgroup *pos;
4481 	struct cgroup_subsys_state *css;
4482 
4483 	cgroup_for_each_live_descendant_pre(pos, css, cgrp)
4484 		rcu_assign_pointer(pos->scx_sched, sch);
4485 }
4486 #else	/* CONFIG_EXT_SUB_SCHED */
4487 static struct cgroup *sch_cgroup(struct scx_sched *sch) { return NULL; }
4488 static void set_cgroup_sched(struct cgroup *cgrp, struct scx_sched *sch) {}
4489 #endif	/* CONFIG_EXT_SUB_SCHED */
4490 
4491 /*
4492  * Omitted operations:
4493  *
4494  * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
4495  *
4496  * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
4497  *   their current sched_class. Call them directly from sched core instead.
4498  */
4499 DEFINE_SCHED_CLASS(ext) = {
4500 	.enqueue_task		= enqueue_task_scx,
4501 	.dequeue_task		= dequeue_task_scx,
4502 	.yield_task		= yield_task_scx,
4503 	.yield_to_task		= yield_to_task_scx,
4504 
4505 	.wakeup_preempt		= wakeup_preempt_scx,
4506 
4507 	.pick_task		= pick_task_scx,
4508 
4509 	.put_prev_task		= put_prev_task_scx,
4510 	.set_next_task		= set_next_task_scx,
4511 
4512 	.select_task_rq		= select_task_rq_scx,
4513 	.task_woken		= task_woken_scx,
4514 	.set_cpus_allowed	= set_cpus_allowed_scx,
4515 
4516 	.rq_online		= rq_online_scx,
4517 	.rq_offline		= rq_offline_scx,
4518 
4519 	.task_tick		= task_tick_scx,
4520 
4521 	.switching_to		= switching_to_scx,
4522 	.switched_from		= switched_from_scx,
4523 	.switched_to		= switched_to_scx,
4524 	.reweight_task		= reweight_task_scx,
4525 	.prio_changed		= prio_changed_scx,
4526 
4527 	.update_curr		= update_curr_scx,
4528 
4529 #ifdef CONFIG_UCLAMP_TASK
4530 	.uclamp_enabled		= 1,
4531 #endif
4532 };
4533 
4534 static s32 init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id,
4535 		    struct scx_sched *sch)
4536 {
4537 	s32 cpu;
4538 
4539 	memset(dsq, 0, sizeof(*dsq));
4540 
4541 	raw_spin_lock_init(&dsq->lock);
4542 	INIT_LIST_HEAD(&dsq->list);
4543 	dsq->id = dsq_id;
4544 	dsq->sched = sch;
4545 
4546 	dsq->pcpu = alloc_percpu(struct scx_dsq_pcpu);
4547 	if (!dsq->pcpu)
4548 		return -ENOMEM;
4549 
4550 	for_each_possible_cpu(cpu) {
4551 		struct scx_dsq_pcpu *pcpu = per_cpu_ptr(dsq->pcpu, cpu);
4552 
4553 		pcpu->dsq = dsq;
4554 		INIT_LIST_HEAD(&pcpu->deferred_reenq_user.node);
4555 	}
4556 
4557 	return 0;
4558 }
4559 
4560 static void exit_dsq(struct scx_dispatch_q *dsq)
4561 {
4562 	s32 cpu;
4563 
4564 	for_each_possible_cpu(cpu) {
4565 		struct scx_dsq_pcpu *pcpu = per_cpu_ptr(dsq->pcpu, cpu);
4566 		struct scx_deferred_reenq_user *dru = &pcpu->deferred_reenq_user;
4567 		struct rq *rq = cpu_rq(cpu);
4568 
4569 		/*
4570 		 * There must have been a RCU grace period since the last
4571 		 * insertion and @dsq should be off the deferred list by now.
4572 		 */
4573 		if (WARN_ON_ONCE(!list_empty(&dru->node))) {
4574 			guard(raw_spinlock_irqsave)(&rq->scx.deferred_reenq_lock);
4575 			list_del_init(&dru->node);
4576 		}
4577 	}
4578 
4579 	free_percpu(dsq->pcpu);
4580 }
4581 
4582 static void free_dsq_rcufn(struct rcu_head *rcu)
4583 {
4584 	struct scx_dispatch_q *dsq = container_of(rcu, struct scx_dispatch_q, rcu);
4585 
4586 	exit_dsq(dsq);
4587 	kfree(dsq);
4588 }
4589 
4590 static void free_dsq_irq_workfn(struct irq_work *irq_work)
4591 {
4592 	struct llist_node *to_free = llist_del_all(&dsqs_to_free);
4593 	struct scx_dispatch_q *dsq, *tmp_dsq;
4594 
4595 	llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4596 		call_rcu(&dsq->rcu, free_dsq_rcufn);
4597 }
4598 
4599 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4600 
4601 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id)
4602 {
4603 	struct scx_dispatch_q *dsq;
4604 	unsigned long flags;
4605 
4606 	rcu_read_lock();
4607 
4608 	dsq = find_user_dsq(sch, dsq_id);
4609 	if (!dsq)
4610 		goto out_unlock_rcu;
4611 
4612 	raw_spin_lock_irqsave(&dsq->lock, flags);
4613 
4614 	if (dsq->nr) {
4615 		scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4616 			  dsq->id, dsq->nr);
4617 		goto out_unlock_dsq;
4618 	}
4619 
4620 	if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node,
4621 				   dsq_hash_params))
4622 		goto out_unlock_dsq;
4623 
4624 	/*
4625 	 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4626 	 * queueing more tasks. As this function can be called from anywhere,
4627 	 * freeing is bounced through an irq work to avoid nesting RCU
4628 	 * operations inside scheduler locks.
4629 	 */
4630 	dsq->id = SCX_DSQ_INVALID;
4631 	if (llist_add(&dsq->free_node, &dsqs_to_free))
4632 		irq_work_queue(&free_dsq_irq_work);
4633 
4634 out_unlock_dsq:
4635 	raw_spin_unlock_irqrestore(&dsq->lock, flags);
4636 out_unlock_rcu:
4637 	rcu_read_unlock();
4638 }
4639 
4640 #ifdef CONFIG_EXT_GROUP_SCHED
4641 static void scx_cgroup_exit(struct scx_sched *sch)
4642 {
4643 	struct cgroup_subsys_state *css;
4644 
4645 	scx_cgroup_enabled = false;
4646 
4647 	/*
4648 	 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk
4649 	 * cgroups and exit all the inited ones, all online cgroups are exited.
4650 	 */
4651 	css_for_each_descendant_post(css, &root_task_group.css) {
4652 		struct task_group *tg = css_tg(css);
4653 
4654 		if (!(tg->scx.flags & SCX_TG_INITED))
4655 			continue;
4656 		tg->scx.flags &= ~SCX_TG_INITED;
4657 
4658 		if (!sch->ops.cgroup_exit)
4659 			continue;
4660 
4661 		SCX_CALL_OP(sch, cgroup_exit, NULL, css->cgroup);
4662 	}
4663 }
4664 
4665 static int scx_cgroup_init(struct scx_sched *sch)
4666 {
4667 	struct cgroup_subsys_state *css;
4668 	int ret;
4669 
4670 	/*
4671 	 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk
4672 	 * cgroups and init, all online cgroups are initialized.
4673 	 */
4674 	css_for_each_descendant_pre(css, &root_task_group.css) {
4675 		struct task_group *tg = css_tg(css);
4676 		struct scx_cgroup_init_args args = {
4677 			.weight = tg->scx.weight,
4678 			.bw_period_us = tg->scx.bw_period_us,
4679 			.bw_quota_us = tg->scx.bw_quota_us,
4680 			.bw_burst_us = tg->scx.bw_burst_us,
4681 		};
4682 
4683 		if ((tg->scx.flags &
4684 		     (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4685 			continue;
4686 
4687 		if (!sch->ops.cgroup_init) {
4688 			tg->scx.flags |= SCX_TG_INITED;
4689 			continue;
4690 		}
4691 
4692 		ret = SCX_CALL_OP_RET(sch, cgroup_init, NULL,
4693 				      css->cgroup, &args);
4694 		if (ret) {
4695 			scx_error(sch, "ops.cgroup_init() failed (%d)", ret);
4696 			return ret;
4697 		}
4698 		tg->scx.flags |= SCX_TG_INITED;
4699 	}
4700 
4701 	WARN_ON_ONCE(scx_cgroup_enabled);
4702 	scx_cgroup_enabled = true;
4703 
4704 	return 0;
4705 }
4706 
4707 #else
4708 static void scx_cgroup_exit(struct scx_sched *sch) {}
4709 static int scx_cgroup_init(struct scx_sched *sch) { return 0; }
4710 #endif
4711 
4712 
4713 /********************************************************************************
4714  * Sysfs interface and ops enable/disable.
4715  */
4716 
4717 #define SCX_ATTR(_name)								\
4718 	static struct kobj_attribute scx_attr_##_name = {			\
4719 		.attr = { .name = __stringify(_name), .mode = 0444 },		\
4720 		.show = scx_attr_##_name##_show,				\
4721 	}
4722 
4723 static ssize_t scx_attr_state_show(struct kobject *kobj,
4724 				   struct kobj_attribute *ka, char *buf)
4725 {
4726 	return sysfs_emit(buf, "%s\n", scx_enable_state_str[scx_enable_state()]);
4727 }
4728 SCX_ATTR(state);
4729 
4730 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4731 					struct kobj_attribute *ka, char *buf)
4732 {
4733 	return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4734 }
4735 SCX_ATTR(switch_all);
4736 
4737 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4738 					 struct kobj_attribute *ka, char *buf)
4739 {
4740 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4741 }
4742 SCX_ATTR(nr_rejected);
4743 
4744 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4745 					 struct kobj_attribute *ka, char *buf)
4746 {
4747 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4748 }
4749 SCX_ATTR(hotplug_seq);
4750 
4751 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4752 					struct kobj_attribute *ka, char *buf)
4753 {
4754 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4755 }
4756 SCX_ATTR(enable_seq);
4757 
4758 static struct attribute *scx_global_attrs[] = {
4759 	&scx_attr_state.attr,
4760 	&scx_attr_switch_all.attr,
4761 	&scx_attr_nr_rejected.attr,
4762 	&scx_attr_hotplug_seq.attr,
4763 	&scx_attr_enable_seq.attr,
4764 	NULL,
4765 };
4766 
4767 static const struct attribute_group scx_global_attr_group = {
4768 	.attrs = scx_global_attrs,
4769 };
4770 
4771 static void free_pnode(struct scx_sched_pnode *pnode);
4772 static void free_exit_info(struct scx_exit_info *ei);
4773 
4774 static void scx_sched_free_rcu_work(struct work_struct *work)
4775 {
4776 	struct rcu_work *rcu_work = to_rcu_work(work);
4777 	struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work);
4778 	struct rhashtable_iter rht_iter;
4779 	struct scx_dispatch_q *dsq;
4780 	int cpu, node;
4781 
4782 	irq_work_sync(&sch->disable_irq_work);
4783 	kthread_destroy_worker(sch->helper);
4784 	timer_shutdown_sync(&sch->bypass_lb_timer);
4785 	free_cpumask_var(sch->bypass_lb_donee_cpumask);
4786 	free_cpumask_var(sch->bypass_lb_resched_cpumask);
4787 
4788 #ifdef CONFIG_EXT_SUB_SCHED
4789 	kfree(sch->cgrp_path);
4790 	if (sch_cgroup(sch))
4791 		cgroup_put(sch_cgroup(sch));
4792 #endif	/* CONFIG_EXT_SUB_SCHED */
4793 
4794 	for_each_possible_cpu(cpu) {
4795 		struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
4796 
4797 		/*
4798 		 * $sch would have entered bypass mode before the RCU grace
4799 		 * period. As that blocks new deferrals, all
4800 		 * deferred_reenq_local_node's must be off-list by now.
4801 		 */
4802 		WARN_ON_ONCE(!list_empty(&pcpu->deferred_reenq_local.node));
4803 
4804 		exit_dsq(bypass_dsq(sch, cpu));
4805 	}
4806 
4807 	free_percpu(sch->pcpu);
4808 
4809 	for_each_node_state(node, N_POSSIBLE)
4810 		free_pnode(sch->pnode[node]);
4811 	kfree(sch->pnode);
4812 
4813 	rhashtable_walk_enter(&sch->dsq_hash, &rht_iter);
4814 	do {
4815 		rhashtable_walk_start(&rht_iter);
4816 
4817 		while (!IS_ERR_OR_NULL((dsq = rhashtable_walk_next(&rht_iter))))
4818 			destroy_dsq(sch, dsq->id);
4819 
4820 		rhashtable_walk_stop(&rht_iter);
4821 	} while (dsq == ERR_PTR(-EAGAIN));
4822 	rhashtable_walk_exit(&rht_iter);
4823 
4824 	rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
4825 	free_exit_info(sch->exit_info);
4826 	kfree(sch);
4827 }
4828 
4829 static void scx_kobj_release(struct kobject *kobj)
4830 {
4831 	struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
4832 
4833 	INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work);
4834 	queue_rcu_work(system_dfl_wq, &sch->rcu_work);
4835 }
4836 
4837 static ssize_t scx_attr_ops_show(struct kobject *kobj,
4838 				 struct kobj_attribute *ka, char *buf)
4839 {
4840 	struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
4841 
4842 	return sysfs_emit(buf, "%s\n", sch->ops.name);
4843 }
4844 SCX_ATTR(ops);
4845 
4846 #define scx_attr_event_show(buf, at, events, kind) ({				\
4847 	sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind);		\
4848 })
4849 
4850 static ssize_t scx_attr_events_show(struct kobject *kobj,
4851 				    struct kobj_attribute *ka, char *buf)
4852 {
4853 	struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
4854 	struct scx_event_stats events;
4855 	int at = 0;
4856 
4857 	scx_read_events(sch, &events);
4858 	at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK);
4859 	at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
4860 	at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST);
4861 	at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING);
4862 	at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
4863 	at += scx_attr_event_show(buf, at, &events, SCX_EV_REENQ_IMMED);
4864 	at += scx_attr_event_show(buf, at, &events, SCX_EV_REENQ_LOCAL_REPEAT);
4865 	at += scx_attr_event_show(buf, at, &events, SCX_EV_REFILL_SLICE_DFL);
4866 	at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION);
4867 	at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH);
4868 	at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE);
4869 	at += scx_attr_event_show(buf, at, &events, SCX_EV_INSERT_NOT_OWNED);
4870 	at += scx_attr_event_show(buf, at, &events, SCX_EV_SUB_BYPASS_DISPATCH);
4871 	return at;
4872 }
4873 SCX_ATTR(events);
4874 
4875 static struct attribute *scx_sched_attrs[] = {
4876 	&scx_attr_ops.attr,
4877 	&scx_attr_events.attr,
4878 	NULL,
4879 };
4880 ATTRIBUTE_GROUPS(scx_sched);
4881 
4882 static const struct kobj_type scx_ktype = {
4883 	.release = scx_kobj_release,
4884 	.sysfs_ops = &kobj_sysfs_ops,
4885 	.default_groups = scx_sched_groups,
4886 };
4887 
4888 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4889 {
4890 	const struct scx_sched *sch;
4891 
4892 	/*
4893 	 * scx_uevent() can be reached by both scx_sched kobjects (scx_ktype)
4894 	 * and sub-scheduler kset kobjects (kset_ktype) through the parent
4895 	 * chain walk. Filter out the latter to avoid invalid casts.
4896 	 */
4897 	if (kobj->ktype != &scx_ktype)
4898 		return 0;
4899 
4900 	sch = container_of(kobj, struct scx_sched, kobj);
4901 
4902 	return add_uevent_var(env, "SCXOPS=%s", sch->ops.name);
4903 }
4904 
4905 static const struct kset_uevent_ops scx_uevent_ops = {
4906 	.uevent = scx_uevent,
4907 };
4908 
4909 /*
4910  * Used by sched_fork() and __setscheduler_prio() to pick the matching
4911  * sched_class. dl/rt are already handled.
4912  */
4913 bool task_should_scx(int policy)
4914 {
4915 	if (!scx_enabled() || unlikely(scx_enable_state() == SCX_DISABLING))
4916 		return false;
4917 	if (READ_ONCE(scx_switching_all))
4918 		return true;
4919 	return policy == SCHED_EXT;
4920 }
4921 
4922 bool scx_allow_ttwu_queue(const struct task_struct *p)
4923 {
4924 	struct scx_sched *sch;
4925 
4926 	if (!scx_enabled())
4927 		return true;
4928 
4929 	sch = scx_task_sched(p);
4930 	if (unlikely(!sch))
4931 		return true;
4932 
4933 	if (sch->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP)
4934 		return true;
4935 
4936 	if (unlikely(p->sched_class != &ext_sched_class))
4937 		return true;
4938 
4939 	return false;
4940 }
4941 
4942 /**
4943  * handle_lockup - sched_ext common lockup handler
4944  * @fmt: format string
4945  *
4946  * Called on system stall or lockup condition and initiates abort of sched_ext
4947  * if enabled, which may resolve the reported lockup.
4948  *
4949  * Returns %true if sched_ext is enabled and abort was initiated, which may
4950  * resolve the lockup. %false if sched_ext is not enabled or abort was already
4951  * initiated by someone else.
4952  */
4953 static __printf(1, 2) bool handle_lockup(const char *fmt, ...)
4954 {
4955 	struct scx_sched *sch;
4956 	va_list args;
4957 	bool ret;
4958 
4959 	guard(rcu)();
4960 
4961 	sch = rcu_dereference(scx_root);
4962 	if (unlikely(!sch))
4963 		return false;
4964 
4965 	switch (scx_enable_state()) {
4966 	case SCX_ENABLING:
4967 	case SCX_ENABLED:
4968 		va_start(args, fmt);
4969 		ret = scx_verror(sch, fmt, args);
4970 		va_end(args);
4971 		return ret;
4972 	default:
4973 		return false;
4974 	}
4975 }
4976 
4977 /**
4978  * scx_rcu_cpu_stall - sched_ext RCU CPU stall handler
4979  *
4980  * While there are various reasons why RCU CPU stalls can occur on a system
4981  * that may not be caused by the current BPF scheduler, try kicking out the
4982  * current scheduler in an attempt to recover the system to a good state before
4983  * issuing panics.
4984  *
4985  * Returns %true if sched_ext is enabled and abort was initiated, which may
4986  * resolve the reported RCU stall. %false if sched_ext is not enabled or someone
4987  * else already initiated abort.
4988  */
4989 bool scx_rcu_cpu_stall(void)
4990 {
4991 	return handle_lockup("RCU CPU stall detected!");
4992 }
4993 
4994 /**
4995  * scx_softlockup - sched_ext softlockup handler
4996  * @dur_s: number of seconds of CPU stuck due to soft lockup
4997  *
4998  * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
4999  * live-lock the system by making many CPUs target the same DSQ to the point
5000  * where soft-lockup detection triggers. This function is called from
5001  * soft-lockup watchdog when the triggering point is close and tries to unjam
5002  * the system and aborting the BPF scheduler.
5003  */
5004 void scx_softlockup(u32 dur_s)
5005 {
5006 	if (!handle_lockup("soft lockup - CPU %d stuck for %us", smp_processor_id(), dur_s))
5007 		return;
5008 
5009 	printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU %d stuck for %us, disabling BPF scheduler\n",
5010 			smp_processor_id(), dur_s);
5011 }
5012 
5013 /*
5014  * scx_hardlockup() runs from NMI and eventually calls scx_claim_exit(),
5015  * which takes scx_sched_lock. scx_sched_lock isn't NMI-safe and grabbing
5016  * it from NMI context can lead to deadlocks. Defer via irq_work; the
5017  * disable path runs off irq_work anyway.
5018  */
5019 static atomic_t scx_hardlockup_cpu = ATOMIC_INIT(-1);
5020 
5021 static void scx_hardlockup_irq_workfn(struct irq_work *work)
5022 {
5023 	int cpu = atomic_xchg(&scx_hardlockup_cpu, -1);
5024 
5025 	if (cpu >= 0 && handle_lockup("hard lockup - CPU %d", cpu))
5026 		printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n",
5027 				cpu);
5028 }
5029 
5030 static DEFINE_IRQ_WORK(scx_hardlockup_irq_work, scx_hardlockup_irq_workfn);
5031 
5032 /**
5033  * scx_hardlockup - sched_ext hardlockup handler
5034  *
5035  * A poorly behaving BPF scheduler can trigger hard lockup by e.g. putting
5036  * numerous affinitized tasks in a single queue and directing all CPUs at it.
5037  * Try kicking out the current scheduler in an attempt to recover the system to
5038  * a good state before taking more drastic actions.
5039  *
5040  * Queues an irq_work; the handle_lockup() call happens in IRQ context (see
5041  * scx_hardlockup_irq_workfn).
5042  *
5043  * Returns %true if sched_ext is enabled and the work was queued, %false
5044  * otherwise.
5045  */
5046 bool scx_hardlockup(int cpu)
5047 {
5048 	if (!rcu_access_pointer(scx_root))
5049 		return false;
5050 
5051 	atomic_cmpxchg(&scx_hardlockup_cpu, -1, cpu);
5052 	irq_work_queue(&scx_hardlockup_irq_work);
5053 	return true;
5054 }
5055 
5056 static u32 bypass_lb_cpu(struct scx_sched *sch, s32 donor,
5057 			 struct cpumask *donee_mask, struct cpumask *resched_mask,
5058 			 u32 nr_donor_target, u32 nr_donee_target)
5059 {
5060 	struct rq *donor_rq = cpu_rq(donor);
5061 	struct scx_dispatch_q *donor_dsq = bypass_dsq(sch, donor);
5062 	struct task_struct *p, *n;
5063 	struct scx_dsq_list_node cursor = INIT_DSQ_LIST_CURSOR(cursor, donor_dsq, 0);
5064 	s32 delta = READ_ONCE(donor_dsq->nr) - nr_donor_target;
5065 	u32 nr_balanced = 0, min_delta_us;
5066 
5067 	/*
5068 	 * All we want to guarantee is reasonable forward progress. No reason to
5069 	 * fine tune. Assuming every task on @donor_dsq runs their full slice,
5070 	 * consider offloading iff the total queued duration is over the
5071 	 * threshold.
5072 	 */
5073 	min_delta_us = READ_ONCE(scx_bypass_lb_intv_us) / SCX_BYPASS_LB_MIN_DELTA_DIV;
5074 	if (delta < DIV_ROUND_UP(min_delta_us, READ_ONCE(scx_slice_bypass_us)))
5075 		return 0;
5076 
5077 	raw_spin_rq_lock_irq(donor_rq);
5078 	raw_spin_lock(&donor_dsq->lock);
5079 	list_add(&cursor.node, &donor_dsq->list);
5080 resume:
5081 	n = container_of(&cursor, struct task_struct, scx.dsq_list);
5082 	n = nldsq_next_task(donor_dsq, n, false);
5083 
5084 	while ((p = n)) {
5085 		struct scx_dispatch_q *donee_dsq;
5086 		int donee;
5087 
5088 		n = nldsq_next_task(donor_dsq, n, false);
5089 
5090 		if (donor_dsq->nr <= nr_donor_target)
5091 			break;
5092 
5093 		if (cpumask_empty(donee_mask))
5094 			break;
5095 
5096 		/*
5097 		 * If an earlier pass placed @p on @donor_dsq from a different
5098 		 * CPU and the donee hasn't consumed it yet, @p is still on the
5099 		 * previous CPU and task_rq(@p) != @donor_rq. @p can't be moved
5100 		 * without its rq locked. Skip.
5101 		 */
5102 		if (task_rq(p) != donor_rq)
5103 			continue;
5104 
5105 		donee = cpumask_any_and_distribute(donee_mask, p->cpus_ptr);
5106 		if (donee >= nr_cpu_ids)
5107 			continue;
5108 
5109 		donee_dsq = bypass_dsq(sch, donee);
5110 
5111 		/*
5112 		 * $p's rq is not locked but $p's DSQ lock protects its
5113 		 * scheduling properties making this test safe.
5114 		 */
5115 		if (!task_can_run_on_remote_rq(sch, p, cpu_rq(donee), false))
5116 			continue;
5117 
5118 		/*
5119 		 * Moving $p from one non-local DSQ to another. The source rq
5120 		 * and DSQ are already locked. Do an abbreviated dequeue and
5121 		 * then perform enqueue without unlocking $donor_dsq.
5122 		 *
5123 		 * We don't want to drop and reacquire the lock on each
5124 		 * iteration as @donor_dsq can be very long and potentially
5125 		 * highly contended. Donee DSQs are less likely to be contended.
5126 		 * The nested locking is safe as only this LB moves tasks
5127 		 * between bypass DSQs.
5128 		 */
5129 		dispatch_dequeue_locked(p, donor_dsq);
5130 		dispatch_enqueue(sch, cpu_rq(donee), donee_dsq, p, SCX_ENQ_NESTED);
5131 
5132 		/*
5133 		 * $donee might have been idle and need to be woken up. No need
5134 		 * to be clever. Kick every CPU that receives tasks.
5135 		 */
5136 		cpumask_set_cpu(donee, resched_mask);
5137 
5138 		if (READ_ONCE(donee_dsq->nr) >= nr_donee_target)
5139 			cpumask_clear_cpu(donee, donee_mask);
5140 
5141 		nr_balanced++;
5142 		if (!(nr_balanced % SCX_BYPASS_LB_BATCH) && n) {
5143 			list_move_tail(&cursor.node, &n->scx.dsq_list.node);
5144 			raw_spin_unlock(&donor_dsq->lock);
5145 			raw_spin_rq_unlock_irq(donor_rq);
5146 			cpu_relax();
5147 			raw_spin_rq_lock_irq(donor_rq);
5148 			raw_spin_lock(&donor_dsq->lock);
5149 			goto resume;
5150 		}
5151 	}
5152 
5153 	list_del_init(&cursor.node);
5154 	raw_spin_unlock(&donor_dsq->lock);
5155 	raw_spin_rq_unlock_irq(donor_rq);
5156 
5157 	return nr_balanced;
5158 }
5159 
5160 static void bypass_lb_node(struct scx_sched *sch, int node)
5161 {
5162 	const struct cpumask *node_mask = cpumask_of_node(node);
5163 	struct cpumask *donee_mask = sch->bypass_lb_donee_cpumask;
5164 	struct cpumask *resched_mask = sch->bypass_lb_resched_cpumask;
5165 	u32 nr_tasks = 0, nr_cpus = 0, nr_balanced = 0;
5166 	u32 nr_target, nr_donor_target;
5167 	u32 before_min = U32_MAX, before_max = 0;
5168 	u32 after_min = U32_MAX, after_max = 0;
5169 	int cpu;
5170 
5171 	/* count the target tasks and CPUs */
5172 	for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
5173 		u32 nr = READ_ONCE(bypass_dsq(sch, cpu)->nr);
5174 
5175 		nr_tasks += nr;
5176 		nr_cpus++;
5177 
5178 		before_min = min(nr, before_min);
5179 		before_max = max(nr, before_max);
5180 	}
5181 
5182 	if (!nr_cpus)
5183 		return;
5184 
5185 	/*
5186 	 * We don't want CPUs to have more than $nr_donor_target tasks and
5187 	 * balancing to fill donee CPUs upto $nr_target. Once targets are
5188 	 * calculated, find the donee CPUs.
5189 	 */
5190 	nr_target = DIV_ROUND_UP(nr_tasks, nr_cpus);
5191 	nr_donor_target = DIV_ROUND_UP(nr_target * SCX_BYPASS_LB_DONOR_PCT, 100);
5192 
5193 	cpumask_clear(donee_mask);
5194 	for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
5195 		if (READ_ONCE(bypass_dsq(sch, cpu)->nr) < nr_target)
5196 			cpumask_set_cpu(cpu, donee_mask);
5197 	}
5198 
5199 	/* iterate !donee CPUs and see if they should be offloaded */
5200 	cpumask_clear(resched_mask);
5201 	for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
5202 		if (cpumask_empty(donee_mask))
5203 			break;
5204 		if (cpumask_test_cpu(cpu, donee_mask))
5205 			continue;
5206 		if (READ_ONCE(bypass_dsq(sch, cpu)->nr) <= nr_donor_target)
5207 			continue;
5208 
5209 		nr_balanced += bypass_lb_cpu(sch, cpu, donee_mask, resched_mask,
5210 					     nr_donor_target, nr_target);
5211 	}
5212 
5213 	for_each_cpu(cpu, resched_mask)
5214 		resched_cpu(cpu);
5215 
5216 	for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
5217 		u32 nr = READ_ONCE(bypass_dsq(sch, cpu)->nr);
5218 
5219 		after_min = min(nr, after_min);
5220 		after_max = max(nr, after_max);
5221 
5222 	}
5223 
5224 	trace_sched_ext_bypass_lb(node, nr_cpus, nr_tasks, nr_balanced,
5225 				  before_min, before_max, after_min, after_max);
5226 }
5227 
5228 /*
5229  * In bypass mode, all tasks are put on the per-CPU bypass DSQs. If the machine
5230  * is over-saturated and the BPF scheduler skewed tasks into few CPUs, some
5231  * bypass DSQs can be overloaded. If there are enough tasks to saturate other
5232  * lightly loaded CPUs, such imbalance can lead to very high execution latency
5233  * on the overloaded CPUs and thus to hung tasks and RCU stalls. To avoid such
5234  * outcomes, a simple load balancing mechanism is implemented by the following
5235  * timer which runs periodically while bypass mode is in effect.
5236  */
5237 static void scx_bypass_lb_timerfn(struct timer_list *timer)
5238 {
5239 	struct scx_sched *sch = container_of(timer, struct scx_sched, bypass_lb_timer);
5240 	int node;
5241 	u32 intv_us;
5242 
5243 	if (!bypass_dsp_enabled(sch))
5244 		return;
5245 
5246 	for_each_node_with_cpus(node)
5247 		bypass_lb_node(sch, node);
5248 
5249 	intv_us = READ_ONCE(scx_bypass_lb_intv_us);
5250 	if (intv_us)
5251 		mod_timer(timer, jiffies + usecs_to_jiffies(intv_us));
5252 }
5253 
5254 static bool inc_bypass_depth(struct scx_sched *sch)
5255 {
5256 	lockdep_assert_held(&scx_bypass_lock);
5257 
5258 	WARN_ON_ONCE(sch->bypass_depth < 0);
5259 	WRITE_ONCE(sch->bypass_depth, sch->bypass_depth + 1);
5260 	if (sch->bypass_depth != 1)
5261 		return false;
5262 
5263 	WRITE_ONCE(sch->slice_dfl, READ_ONCE(scx_slice_bypass_us) * NSEC_PER_USEC);
5264 	sch->bypass_timestamp = ktime_get_ns();
5265 	scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1);
5266 	return true;
5267 }
5268 
5269 static bool dec_bypass_depth(struct scx_sched *sch)
5270 {
5271 	lockdep_assert_held(&scx_bypass_lock);
5272 
5273 	WARN_ON_ONCE(sch->bypass_depth < 1);
5274 	WRITE_ONCE(sch->bypass_depth, sch->bypass_depth - 1);
5275 	if (sch->bypass_depth != 0)
5276 		return false;
5277 
5278 	WRITE_ONCE(sch->slice_dfl, SCX_SLICE_DFL);
5279 	scx_add_event(sch, SCX_EV_BYPASS_DURATION,
5280 		      ktime_get_ns() - sch->bypass_timestamp);
5281 	return true;
5282 }
5283 
5284 static void enable_bypass_dsp(struct scx_sched *sch)
5285 {
5286 	struct scx_sched *host = scx_parent(sch) ?: sch;
5287 	u32 intv_us = READ_ONCE(scx_bypass_lb_intv_us);
5288 	s32 ret;
5289 
5290 	/*
5291 	 * @sch->bypass_depth transitioning from 0 to 1 triggers enabling.
5292 	 * Shouldn't stagger.
5293 	 */
5294 	if (WARN_ON_ONCE(test_and_set_bit(0, &sch->bypass_dsp_claim)))
5295 		return;
5296 
5297 	/*
5298 	 * When a sub-sched bypasses, its tasks are queued on the bypass DSQs of
5299 	 * the nearest non-bypassing ancestor or root. As enable_bypass_dsp() is
5300 	 * called iff @sch is not already bypassed due to an ancestor bypassing,
5301 	 * we can assume that the parent is not bypassing and thus will be the
5302 	 * host of the bypass DSQs.
5303 	 *
5304 	 * While the situation may change in the future, the following
5305 	 * guarantees that the nearest non-bypassing ancestor or root has bypass
5306 	 * dispatch enabled while a descendant is bypassing, which is all that's
5307 	 * required.
5308 	 *
5309 	 * bypass_dsp_enabled() test is used to determine whether to enter the
5310 	 * bypass dispatch handling path from both bypassing and hosting scheds.
5311 	 * Bump enable depth on both @sch and bypass dispatch host.
5312 	 */
5313 	ret = atomic_inc_return(&sch->bypass_dsp_enable_depth);
5314 	WARN_ON_ONCE(ret <= 0);
5315 
5316 	if (host != sch) {
5317 		ret = atomic_inc_return(&host->bypass_dsp_enable_depth);
5318 		WARN_ON_ONCE(ret <= 0);
5319 	}
5320 
5321 	/*
5322 	 * The LB timer will stop running if bypass dispatch is disabled. Start
5323 	 * after enabling bypass dispatch.
5324 	 */
5325 	if (intv_us && !timer_pending(&host->bypass_lb_timer))
5326 		mod_timer(&host->bypass_lb_timer,
5327 			  jiffies + usecs_to_jiffies(intv_us));
5328 }
5329 
5330 /* may be called without holding scx_bypass_lock */
5331 static void disable_bypass_dsp(struct scx_sched *sch)
5332 {
5333 	s32 ret;
5334 
5335 	if (!test_and_clear_bit(0, &sch->bypass_dsp_claim))
5336 		return;
5337 
5338 	ret = atomic_dec_return(&sch->bypass_dsp_enable_depth);
5339 	WARN_ON_ONCE(ret < 0);
5340 
5341 	if (scx_parent(sch)) {
5342 		ret = atomic_dec_return(&scx_parent(sch)->bypass_dsp_enable_depth);
5343 		WARN_ON_ONCE(ret < 0);
5344 	}
5345 }
5346 
5347 /**
5348  * scx_bypass - [Un]bypass scx_ops and guarantee forward progress
5349  * @sch: sched to bypass
5350  * @bypass: true for bypass, false for unbypass
5351  *
5352  * Bypassing guarantees that all runnable tasks make forward progress without
5353  * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
5354  * be held by tasks that the BPF scheduler is forgetting to run, which
5355  * unfortunately also excludes toggling the static branches.
5356  *
5357  * Let's work around by overriding a couple ops and modifying behaviors based on
5358  * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
5359  * to force global FIFO scheduling.
5360  *
5361  * - ops.select_cpu() is ignored and the default select_cpu() is used.
5362  *
5363  * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
5364  *   %SCX_OPS_ENQ_LAST is also ignored.
5365  *
5366  * - ops.dispatch() is ignored.
5367  *
5368  * - balance_one() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
5369  *   can't be trusted. Whenever a tick triggers, the running task is rotated to
5370  *   the tail of the queue with core_sched_at touched.
5371  *
5372  * - pick_next_task() suppresses zero slice warning.
5373  *
5374  * - scx_kick_cpu() is disabled to avoid irq_work malfunction during PM
5375  *   operations.
5376  *
5377  * - scx_prio_less() reverts to the default core_sched_at order.
5378  */
5379 static void scx_bypass(struct scx_sched *sch, bool bypass)
5380 {
5381 	struct scx_sched *pos;
5382 	unsigned long flags;
5383 	int cpu;
5384 
5385 	raw_spin_lock_irqsave(&scx_bypass_lock, flags);
5386 
5387 	if (bypass) {
5388 		if (!inc_bypass_depth(sch))
5389 			goto unlock;
5390 
5391 		enable_bypass_dsp(sch);
5392 	} else {
5393 		if (!dec_bypass_depth(sch))
5394 			goto unlock;
5395 	}
5396 
5397 	/*
5398 	 * Bypass state is propagated to all descendants - an scx_sched bypasses
5399 	 * if itself or any of its ancestors are in bypass mode.
5400 	 */
5401 	raw_spin_lock(&scx_sched_lock);
5402 	scx_for_each_descendant_pre(pos, sch) {
5403 		if (pos == sch)
5404 			continue;
5405 		if (bypass)
5406 			inc_bypass_depth(pos);
5407 		else
5408 			dec_bypass_depth(pos);
5409 	}
5410 	raw_spin_unlock(&scx_sched_lock);
5411 
5412 	/*
5413 	 * No task property is changing. We just need to make sure all currently
5414 	 * queued tasks are re-queued according to the new scx_bypassing()
5415 	 * state. As an optimization, walk each rq's runnable_list instead of
5416 	 * the scx_tasks list.
5417 	 *
5418 	 * This function can't trust the scheduler and thus can't use
5419 	 * cpus_read_lock(). Walk all possible CPUs instead of online.
5420 	 */
5421 	for_each_possible_cpu(cpu) {
5422 		struct rq *rq = cpu_rq(cpu);
5423 		struct task_struct *p, *n;
5424 
5425 		raw_spin_rq_lock(rq);
5426 		raw_spin_lock(&scx_sched_lock);
5427 
5428 		scx_for_each_descendant_pre(pos, sch) {
5429 			struct scx_sched_pcpu *pcpu = per_cpu_ptr(pos->pcpu, cpu);
5430 
5431 			if (pos->bypass_depth)
5432 				pcpu->flags |= SCX_SCHED_PCPU_BYPASSING;
5433 			else
5434 				pcpu->flags &= ~SCX_SCHED_PCPU_BYPASSING;
5435 		}
5436 
5437 		raw_spin_unlock(&scx_sched_lock);
5438 
5439 		/*
5440 		 * We need to guarantee that no tasks are on the BPF scheduler
5441 		 * while bypassing. Either we see enabled or the enable path
5442 		 * sees scx_bypassing() before moving tasks to SCX.
5443 		 */
5444 		if (!scx_enabled()) {
5445 			raw_spin_rq_unlock(rq);
5446 			continue;
5447 		}
5448 
5449 		/*
5450 		 * The use of list_for_each_entry_safe_reverse() is required
5451 		 * because each task is going to be removed from and added back
5452 		 * to the runnable_list during iteration. Because they're added
5453 		 * to the tail of the list, safe reverse iteration can still
5454 		 * visit all nodes.
5455 		 */
5456 		list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
5457 						 scx.runnable_node) {
5458 			if (!scx_is_descendant(scx_task_sched(p), sch))
5459 				continue;
5460 
5461 			/* cycling deq/enq is enough, see the function comment */
5462 			scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) {
5463 				/* nothing */ ;
5464 			}
5465 		}
5466 
5467 		/* resched to restore ticks and idle state */
5468 		if (cpu_online(cpu) || cpu == smp_processor_id())
5469 			resched_curr(rq);
5470 
5471 		raw_spin_rq_unlock(rq);
5472 	}
5473 
5474 	/* disarming must come after moving all tasks out of the bypass DSQs */
5475 	if (!bypass)
5476 		disable_bypass_dsp(sch);
5477 unlock:
5478 	raw_spin_unlock_irqrestore(&scx_bypass_lock, flags);
5479 }
5480 
5481 static void free_exit_info(struct scx_exit_info *ei)
5482 {
5483 	kvfree(ei->dump);
5484 	kfree(ei->msg);
5485 	kfree(ei->bt);
5486 	kfree(ei);
5487 }
5488 
5489 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
5490 {
5491 	struct scx_exit_info *ei;
5492 
5493 	ei = kzalloc_obj(*ei);
5494 	if (!ei)
5495 		return NULL;
5496 
5497 	ei->bt = kzalloc_objs(ei->bt[0], SCX_EXIT_BT_LEN);
5498 	ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
5499 	ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL);
5500 
5501 	if (!ei->bt || !ei->msg || !ei->dump) {
5502 		free_exit_info(ei);
5503 		return NULL;
5504 	}
5505 
5506 	return ei;
5507 }
5508 
5509 static const char *scx_exit_reason(enum scx_exit_kind kind)
5510 {
5511 	switch (kind) {
5512 	case SCX_EXIT_UNREG:
5513 		return "unregistered from user space";
5514 	case SCX_EXIT_UNREG_BPF:
5515 		return "unregistered from BPF";
5516 	case SCX_EXIT_UNREG_KERN:
5517 		return "unregistered from the main kernel";
5518 	case SCX_EXIT_SYSRQ:
5519 		return "disabled by sysrq-S";
5520 	case SCX_EXIT_PARENT:
5521 		return "parent exiting";
5522 	case SCX_EXIT_ERROR:
5523 		return "runtime error";
5524 	case SCX_EXIT_ERROR_BPF:
5525 		return "scx_bpf_error";
5526 	case SCX_EXIT_ERROR_STALL:
5527 		return "runnable task stall";
5528 	default:
5529 		return "<UNKNOWN>";
5530 	}
5531 }
5532 
5533 static void free_kick_syncs(void)
5534 {
5535 	int cpu;
5536 
5537 	for_each_possible_cpu(cpu) {
5538 		struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu);
5539 		struct scx_kick_syncs *to_free;
5540 
5541 		to_free = rcu_replace_pointer(*ksyncs, NULL, true);
5542 		if (to_free)
5543 			kvfree_rcu(to_free, rcu);
5544 	}
5545 }
5546 
5547 static void refresh_watchdog(void)
5548 {
5549 	struct scx_sched *sch;
5550 	unsigned long intv = ULONG_MAX;
5551 
5552 	/* take the shortest timeout and use its half for watchdog interval */
5553 	rcu_read_lock();
5554 	list_for_each_entry_rcu(sch, &scx_sched_all, all)
5555 		intv = max(min(intv, sch->watchdog_timeout / 2), 1);
5556 	rcu_read_unlock();
5557 
5558 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5559 	WRITE_ONCE(scx_watchdog_interval, intv);
5560 
5561 	if (intv < ULONG_MAX)
5562 		mod_delayed_work(system_dfl_wq, &scx_watchdog_work, intv);
5563 	else
5564 		cancel_delayed_work_sync(&scx_watchdog_work);
5565 }
5566 
5567 static s32 scx_link_sched(struct scx_sched *sch)
5568 {
5569 	scoped_guard(raw_spinlock_irq, &scx_sched_lock) {
5570 #ifdef CONFIG_EXT_SUB_SCHED
5571 		struct scx_sched *parent = scx_parent(sch);
5572 		s32 ret;
5573 
5574 		if (parent) {
5575 			/*
5576 			 * scx_claim_exit() propagates exit_kind transition to
5577 			 * its sub-scheds while holding scx_sched_lock - either
5578 			 * we can see the parent's non-NONE exit_kind or the
5579 			 * parent can shoot us down.
5580 			 */
5581 			if (atomic_read(&parent->exit_kind) != SCX_EXIT_NONE) {
5582 				scx_error(sch, "parent disabled");
5583 				return -ENOENT;
5584 			}
5585 
5586 			ret = rhashtable_lookup_insert_fast(&scx_sched_hash,
5587 					&sch->hash_node, scx_sched_hash_params);
5588 			if (ret) {
5589 				scx_error(sch, "failed to insert into scx_sched_hash (%d)", ret);
5590 				return ret;
5591 			}
5592 
5593 			list_add_tail(&sch->sibling, &parent->children);
5594 		}
5595 #endif	/* CONFIG_EXT_SUB_SCHED */
5596 
5597 		list_add_tail_rcu(&sch->all, &scx_sched_all);
5598 	}
5599 
5600 	refresh_watchdog();
5601 	return 0;
5602 }
5603 
5604 static void scx_unlink_sched(struct scx_sched *sch)
5605 {
5606 	scoped_guard(raw_spinlock_irq, &scx_sched_lock) {
5607 #ifdef CONFIG_EXT_SUB_SCHED
5608 		if (scx_parent(sch)) {
5609 			rhashtable_remove_fast(&scx_sched_hash, &sch->hash_node,
5610 					       scx_sched_hash_params);
5611 			list_del_init(&sch->sibling);
5612 		}
5613 #endif	/* CONFIG_EXT_SUB_SCHED */
5614 		list_del_rcu(&sch->all);
5615 	}
5616 
5617 	refresh_watchdog();
5618 }
5619 
5620 /*
5621  * Called to disable future dumps and wait for in-progress one while disabling
5622  * @sch. Once @sch becomes empty during disable, there's no point in dumping it.
5623  * This prevents calling dump ops on a dead sch.
5624  */
5625 static void scx_disable_dump(struct scx_sched *sch)
5626 {
5627 	guard(raw_spinlock_irqsave)(&scx_dump_lock);
5628 	sch->dump_disabled = true;
5629 }
5630 
5631 #ifdef CONFIG_EXT_SUB_SCHED
5632 static DECLARE_WAIT_QUEUE_HEAD(scx_unlink_waitq);
5633 
5634 static void drain_descendants(struct scx_sched *sch)
5635 {
5636 	/*
5637 	 * Child scheds that finished the critical part of disabling will take
5638 	 * themselves off @sch->children. Wait for it to drain. As propagation
5639 	 * is recursive, empty @sch->children means that all proper descendant
5640 	 * scheds reached unlinking stage.
5641 	 */
5642 	wait_event(scx_unlink_waitq, list_empty(&sch->children));
5643 }
5644 
5645 static void scx_fail_parent(struct scx_sched *sch,
5646 			    struct task_struct *failed, s32 fail_code)
5647 {
5648 	struct scx_sched *parent = scx_parent(sch);
5649 	struct scx_task_iter sti;
5650 	struct task_struct *p;
5651 
5652 	scx_error(parent, "ops.init_task() failed (%d) for %s[%d] while disabling a sub-scheduler",
5653 		  fail_code, failed->comm, failed->pid);
5654 
5655 	/*
5656 	 * Once $parent is bypassed, it's safe to put SCX_TASK_NONE tasks into
5657 	 * it. This may cause downstream failures on the BPF side but $parent is
5658 	 * dying anyway.
5659 	 */
5660 	scx_bypass(parent, true);
5661 
5662 	scx_task_iter_start(&sti, sch->cgrp);
5663 	while ((p = scx_task_iter_next_locked(&sti))) {
5664 		if (scx_task_on_sched(parent, p))
5665 			continue;
5666 
5667 		scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) {
5668 			scx_disable_and_exit_task(sch, p);
5669 			rcu_assign_pointer(p->scx.sched, parent);
5670 		}
5671 	}
5672 	scx_task_iter_stop(&sti);
5673 }
5674 
5675 static void scx_sub_disable(struct scx_sched *sch)
5676 {
5677 	struct scx_sched *parent = scx_parent(sch);
5678 	struct scx_task_iter sti;
5679 	struct task_struct *p;
5680 	int ret;
5681 
5682 	/*
5683 	 * Guarantee forward progress and wait for descendants to be disabled.
5684 	 * To limit disruptions, $parent is not bypassed. Tasks are fully
5685 	 * prepped and then inserted back into $parent.
5686 	 */
5687 	scx_bypass(sch, true);
5688 	drain_descendants(sch);
5689 
5690 	/*
5691 	 * Here, every runnable task is guaranteed to make forward progress and
5692 	 * we can safely use blocking synchronization constructs. Actually
5693 	 * disable ops.
5694 	 */
5695 	mutex_lock(&scx_enable_mutex);
5696 	percpu_down_write(&scx_fork_rwsem);
5697 	scx_cgroup_lock();
5698 
5699 	set_cgroup_sched(sch_cgroup(sch), parent);
5700 
5701 	scx_task_iter_start(&sti, sch->cgrp);
5702 	while ((p = scx_task_iter_next_locked(&sti))) {
5703 		struct rq *rq;
5704 		struct rq_flags rf;
5705 
5706 		/* filter out duplicate visits */
5707 		if (scx_task_on_sched(parent, p))
5708 			continue;
5709 
5710 		/*
5711 		 * By the time control reaches here, all descendant schedulers
5712 		 * should already have been disabled.
5713 		 */
5714 		WARN_ON_ONCE(!scx_task_on_sched(sch, p));
5715 
5716 		/*
5717 		 * If $p is about to be freed, nothing prevents $sch from
5718 		 * unloading before $p reaches sched_ext_free(). Disable and
5719 		 * exit $p right away.
5720 		 */
5721 		if (!tryget_task_struct(p)) {
5722 			scx_disable_and_exit_task(sch, p);
5723 			continue;
5724 		}
5725 
5726 		scx_task_iter_unlock(&sti);
5727 
5728 		/*
5729 		 * $p is READY or ENABLED on @sch. Initialize for $parent,
5730 		 * disable and exit from @sch, and then switch over to $parent.
5731 		 *
5732 		 * If a task fails to initialize for $parent, the only available
5733 		 * action is disabling $parent too. While this allows disabling
5734 		 * of a child sched to cause the parent scheduler to fail, the
5735 		 * failure can only originate from ops.init_task() of the
5736 		 * parent. A child can't directly affect the parent through its
5737 		 * own failures.
5738 		 */
5739 		ret = __scx_init_task(parent, p, false);
5740 		if (ret) {
5741 			scx_fail_parent(sch, p, ret);
5742 			put_task_struct(p);
5743 			break;
5744 		}
5745 
5746 		rq = task_rq_lock(p, &rf);
5747 		scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) {
5748 			/*
5749 			 * $p is initialized for $parent and still attached to
5750 			 * @sch. Disable and exit for @sch, switch over to
5751 			 * $parent, override the state to READY to account for
5752 			 * $p having already been initialized, and then enable.
5753 			 */
5754 			scx_disable_and_exit_task(sch, p);
5755 			scx_set_task_state(p, SCX_TASK_INIT);
5756 			rcu_assign_pointer(p->scx.sched, parent);
5757 			scx_set_task_state(p, SCX_TASK_READY);
5758 			scx_enable_task(parent, p);
5759 		}
5760 		task_rq_unlock(rq, p, &rf);
5761 
5762 		put_task_struct(p);
5763 	}
5764 	scx_task_iter_stop(&sti);
5765 
5766 	scx_disable_dump(sch);
5767 
5768 	scx_cgroup_unlock();
5769 	percpu_up_write(&scx_fork_rwsem);
5770 
5771 	/*
5772 	 * All tasks are moved off of @sch but there may still be on-going
5773 	 * operations (e.g. ops.select_cpu()). Drain them by flushing RCU. Use
5774 	 * the expedited version as ancestors may be waiting in bypass mode.
5775 	 * Also, tell the parent that there is no need to keep running bypass
5776 	 * DSQs for us.
5777 	 */
5778 	synchronize_rcu_expedited();
5779 	disable_bypass_dsp(sch);
5780 
5781 	scx_unlink_sched(sch);
5782 
5783 	mutex_unlock(&scx_enable_mutex);
5784 
5785 	/*
5786 	 * @sch is now unlinked from the parent's children list. Notify and call
5787 	 * ops.sub_detach/exit(). Note that ops.sub_detach/exit() must be called
5788 	 * after unlinking and releasing all locks. See scx_claim_exit().
5789 	 */
5790 	wake_up_all(&scx_unlink_waitq);
5791 
5792 	if (parent->ops.sub_detach && sch->sub_attached) {
5793 		struct scx_sub_detach_args sub_detach_args = {
5794 			.ops = &sch->ops,
5795 			.cgroup_path = sch->cgrp_path,
5796 		};
5797 		SCX_CALL_OP(parent, sub_detach, NULL,
5798 			    &sub_detach_args);
5799 	}
5800 
5801 	if (sch->ops.exit)
5802 		SCX_CALL_OP(sch, exit, NULL, sch->exit_info);
5803 	if (sch->sub_kset)
5804 		kset_unregister(sch->sub_kset);
5805 	kobject_del(&sch->kobj);
5806 }
5807 #else	/* CONFIG_EXT_SUB_SCHED */
5808 static void drain_descendants(struct scx_sched *sch) { }
5809 static void scx_sub_disable(struct scx_sched *sch) { }
5810 #endif	/* CONFIG_EXT_SUB_SCHED */
5811 
5812 static void scx_root_disable(struct scx_sched *sch)
5813 {
5814 	struct scx_exit_info *ei = sch->exit_info;
5815 	struct scx_task_iter sti;
5816 	struct task_struct *p;
5817 	int cpu;
5818 
5819 	/* guarantee forward progress and wait for descendants to be disabled */
5820 	scx_bypass(sch, true);
5821 	drain_descendants(sch);
5822 
5823 	switch (scx_set_enable_state(SCX_DISABLING)) {
5824 	case SCX_DISABLING:
5825 		WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
5826 		break;
5827 	case SCX_DISABLED:
5828 		pr_warn("sched_ext: ops error detected without ops (%s)\n",
5829 			sch->exit_info->msg);
5830 		WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
5831 		goto done;
5832 	default:
5833 		break;
5834 	}
5835 
5836 	/*
5837 	 * Here, every runnable task is guaranteed to make forward progress and
5838 	 * we can safely use blocking synchronization constructs. Actually
5839 	 * disable ops.
5840 	 */
5841 	mutex_lock(&scx_enable_mutex);
5842 
5843 	static_branch_disable(&__scx_switched_all);
5844 	WRITE_ONCE(scx_switching_all, false);
5845 
5846 	/*
5847 	 * Shut down cgroup support before tasks so that the cgroup attach path
5848 	 * doesn't race against scx_disable_and_exit_task().
5849 	 */
5850 	scx_cgroup_lock();
5851 	scx_cgroup_exit(sch);
5852 	scx_cgroup_unlock();
5853 
5854 	/*
5855 	 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
5856 	 * must be switched out and exited synchronously.
5857 	 */
5858 	percpu_down_write(&scx_fork_rwsem);
5859 
5860 	scx_init_task_enabled = false;
5861 
5862 	scx_task_iter_start(&sti, NULL);
5863 	while ((p = scx_task_iter_next_locked(&sti))) {
5864 		unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
5865 		const struct sched_class *old_class = p->sched_class;
5866 		const struct sched_class *new_class = scx_setscheduler_class(p);
5867 
5868 		update_rq_clock(task_rq(p));
5869 
5870 		if (old_class != new_class)
5871 			queue_flags |= DEQUEUE_CLASS;
5872 
5873 		scoped_guard (sched_change, p, queue_flags) {
5874 			p->sched_class = new_class;
5875 		}
5876 
5877 		scx_disable_and_exit_task(scx_task_sched(p), p);
5878 	}
5879 	scx_task_iter_stop(&sti);
5880 
5881 	scx_disable_dump(sch);
5882 
5883 	scx_cgroup_lock();
5884 	set_cgroup_sched(sch_cgroup(sch), NULL);
5885 	scx_cgroup_unlock();
5886 
5887 	percpu_up_write(&scx_fork_rwsem);
5888 
5889 	/*
5890 	 * Invalidate all the rq clocks to prevent getting outdated
5891 	 * rq clocks from a previous scx scheduler.
5892 	 */
5893 	for_each_possible_cpu(cpu) {
5894 		struct rq *rq = cpu_rq(cpu);
5895 		scx_rq_clock_invalidate(rq);
5896 	}
5897 
5898 	/* no task is on scx, turn off all the switches and flush in-progress calls */
5899 	static_branch_disable(&__scx_enabled);
5900 	bitmap_zero(sch->has_op, SCX_OPI_END);
5901 	scx_idle_disable();
5902 	synchronize_rcu();
5903 
5904 	if (ei->kind >= SCX_EXIT_ERROR) {
5905 		pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5906 		       sch->ops.name, ei->reason);
5907 
5908 		if (ei->msg[0] != '\0')
5909 			pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg);
5910 #ifdef CONFIG_STACKTRACE
5911 		stack_trace_print(ei->bt, ei->bt_len, 2);
5912 #endif
5913 	} else {
5914 		pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5915 			sch->ops.name, ei->reason);
5916 	}
5917 
5918 	if (sch->ops.exit)
5919 		SCX_CALL_OP(sch, exit, NULL, ei);
5920 
5921 	scx_unlink_sched(sch);
5922 
5923 	/*
5924 	 * scx_root clearing must be inside cpus_read_lock(). See
5925 	 * handle_hotplug().
5926 	 */
5927 	cpus_read_lock();
5928 	RCU_INIT_POINTER(scx_root, NULL);
5929 	cpus_read_unlock();
5930 
5931 	/*
5932 	 * Delete the kobject from the hierarchy synchronously. Otherwise, sysfs
5933 	 * could observe an object of the same name still in the hierarchy when
5934 	 * the next scheduler is loaded.
5935 	 */
5936 #ifdef CONFIG_EXT_SUB_SCHED
5937 	if (sch->sub_kset)
5938 		kset_unregister(sch->sub_kset);
5939 #endif
5940 	kobject_del(&sch->kobj);
5941 
5942 	free_kick_syncs();
5943 
5944 	mutex_unlock(&scx_enable_mutex);
5945 
5946 	WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
5947 done:
5948 	scx_bypass(sch, false);
5949 }
5950 
5951 /*
5952  * Claim the exit on @sch. The caller must ensure that the helper kthread work
5953  * is kicked before the current task can be preempted. Once exit_kind is
5954  * claimed, scx_error() can no longer trigger, so if the current task gets
5955  * preempted and the BPF scheduler fails to schedule it back, the helper work
5956  * will never be kicked and the whole system can wedge.
5957  */
5958 static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
5959 {
5960 	int none = SCX_EXIT_NONE;
5961 
5962 	lockdep_assert_preemption_disabled();
5963 
5964 	if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
5965 		kind = SCX_EXIT_ERROR;
5966 
5967 	if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
5968 		return false;
5969 
5970 	/*
5971 	 * Some CPUs may be trapped in the dispatch paths. Set the aborting
5972 	 * flag to break potential live-lock scenarios, ensuring we can
5973 	 * successfully reach scx_bypass().
5974 	 */
5975 	WRITE_ONCE(sch->aborting, true);
5976 
5977 	/*
5978 	 * Propagate exits to descendants immediately. Each has a dedicated
5979 	 * helper kthread and can run in parallel. While most of disabling is
5980 	 * serialized, running them in separate threads allows parallelizing
5981 	 * ops.exit(), which can take arbitrarily long prolonging bypass mode.
5982 	 *
5983 	 * To guarantee forward progress, this propagation must be in-line so
5984 	 * that ->aborting is synchronously asserted for all sub-scheds. The
5985 	 * propagation is also the interlocking point against sub-sched
5986 	 * attachment. See scx_link_sched().
5987 	 *
5988 	 * This doesn't cause recursions as propagation only takes place for
5989 	 * non-propagation exits.
5990 	 */
5991 	if (kind != SCX_EXIT_PARENT) {
5992 		scoped_guard (raw_spinlock_irqsave, &scx_sched_lock) {
5993 			struct scx_sched *pos;
5994 			scx_for_each_descendant_pre(pos, sch)
5995 				scx_disable(pos, SCX_EXIT_PARENT);
5996 		}
5997 	}
5998 
5999 	return true;
6000 }
6001 
6002 static void scx_disable_workfn(struct kthread_work *work)
6003 {
6004 	struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
6005 	struct scx_exit_info *ei = sch->exit_info;
6006 	int kind;
6007 
6008 	kind = atomic_read(&sch->exit_kind);
6009 	while (true) {
6010 		if (kind == SCX_EXIT_DONE)	/* already disabled? */
6011 			return;
6012 		WARN_ON_ONCE(kind == SCX_EXIT_NONE);
6013 		if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
6014 			break;
6015 	}
6016 	ei->kind = kind;
6017 	ei->reason = scx_exit_reason(ei->kind);
6018 
6019 	if (scx_parent(sch))
6020 		scx_sub_disable(sch);
6021 	else
6022 		scx_root_disable(sch);
6023 }
6024 
6025 static void scx_disable(struct scx_sched *sch, enum scx_exit_kind kind)
6026 {
6027 	guard(preempt)();
6028 	if (scx_claim_exit(sch, kind))
6029 		irq_work_queue(&sch->disable_irq_work);
6030 }
6031 
6032 /**
6033  * scx_flush_disable_work - flush the disable work and wait for it to finish
6034  * @sch: the scheduler
6035  *
6036  * sch->disable_work might still not queued, causing kthread_flush_work()
6037  * as a noop. Syncing the irq_work first is required to guarantee the
6038  * kthread work has been queued before waiting for it.
6039  */
6040 static void scx_flush_disable_work(struct scx_sched *sch)
6041 {
6042 	int kind;
6043 
6044 	do {
6045 		irq_work_sync(&sch->disable_irq_work);
6046 		kthread_flush_work(&sch->disable_work);
6047 		kind = atomic_read(&sch->exit_kind);
6048 	} while (kind != SCX_EXIT_NONE && kind != SCX_EXIT_DONE);
6049 }
6050 
6051 static void dump_newline(struct seq_buf *s)
6052 {
6053 	trace_sched_ext_dump("");
6054 
6055 	/* @s may be zero sized and seq_buf triggers WARN if so */
6056 	if (s->size)
6057 		seq_buf_putc(s, '\n');
6058 }
6059 
6060 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
6061 {
6062 	va_list args;
6063 
6064 #ifdef CONFIG_TRACEPOINTS
6065 	if (trace_sched_ext_dump_enabled()) {
6066 		/* protected by scx_dump_lock */
6067 		static char line_buf[SCX_EXIT_MSG_LEN];
6068 
6069 		va_start(args, fmt);
6070 		vscnprintf(line_buf, sizeof(line_buf), fmt, args);
6071 		va_end(args);
6072 
6073 		trace_call__sched_ext_dump(line_buf);
6074 	}
6075 #endif
6076 	/* @s may be zero sized and seq_buf triggers WARN if so */
6077 	if (s->size) {
6078 		va_start(args, fmt);
6079 		seq_buf_vprintf(s, fmt, args);
6080 		va_end(args);
6081 
6082 		seq_buf_putc(s, '\n');
6083 	}
6084 }
6085 
6086 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
6087 			     const unsigned long *bt, unsigned int len)
6088 {
6089 	unsigned int i;
6090 
6091 	for (i = 0; i < len; i++)
6092 		dump_line(s, "%s%pS", prefix, (void *)bt[i]);
6093 }
6094 
6095 static void ops_dump_init(struct seq_buf *s, const char *prefix)
6096 {
6097 	struct scx_dump_data *dd = &scx_dump_data;
6098 
6099 	lockdep_assert_irqs_disabled();
6100 
6101 	dd->cpu = smp_processor_id();		/* allow scx_bpf_dump() */
6102 	dd->first = true;
6103 	dd->cursor = 0;
6104 	dd->s = s;
6105 	dd->prefix = prefix;
6106 }
6107 
6108 static void ops_dump_flush(void)
6109 {
6110 	struct scx_dump_data *dd = &scx_dump_data;
6111 	char *line = dd->buf.line;
6112 
6113 	if (!dd->cursor)
6114 		return;
6115 
6116 	/*
6117 	 * There's something to flush and this is the first line. Insert a blank
6118 	 * line to distinguish ops dump.
6119 	 */
6120 	if (dd->first) {
6121 		dump_newline(dd->s);
6122 		dd->first = false;
6123 	}
6124 
6125 	/*
6126 	 * There may be multiple lines in $line. Scan and emit each line
6127 	 * separately.
6128 	 */
6129 	while (true) {
6130 		char *end = line;
6131 		char c;
6132 
6133 		while (*end != '\n' && *end != '\0')
6134 			end++;
6135 
6136 		/*
6137 		 * If $line overflowed, it may not have newline at the end.
6138 		 * Always emit with a newline.
6139 		 */
6140 		c = *end;
6141 		*end = '\0';
6142 		dump_line(dd->s, "%s%s", dd->prefix, line);
6143 		if (c == '\0')
6144 			break;
6145 
6146 		/* move to the next line */
6147 		end++;
6148 		if (*end == '\0')
6149 			break;
6150 		line = end;
6151 	}
6152 
6153 	dd->cursor = 0;
6154 }
6155 
6156 static void ops_dump_exit(void)
6157 {
6158 	ops_dump_flush();
6159 	scx_dump_data.cpu = -1;
6160 }
6161 
6162 static void scx_dump_task(struct scx_sched *sch, struct seq_buf *s, struct scx_dump_ctx *dctx,
6163 			  struct rq *rq, struct task_struct *p, char marker)
6164 {
6165 	static unsigned long bt[SCX_EXIT_BT_LEN];
6166 	struct scx_sched *task_sch = scx_task_sched(p);
6167 	const char *own_marker;
6168 	char sch_id_buf[32];
6169 	char dsq_id_buf[19] = "(n/a)";
6170 	unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
6171 	unsigned int bt_len = 0;
6172 
6173 	own_marker = task_sch == sch ? "*" : "";
6174 
6175 	if (task_sch->level == 0)
6176 		scnprintf(sch_id_buf, sizeof(sch_id_buf), "root");
6177 	else
6178 		scnprintf(sch_id_buf, sizeof(sch_id_buf), "sub%d-%llu",
6179 			  task_sch->level, task_sch->ops.sub_cgroup_id);
6180 
6181 	if (p->scx.dsq)
6182 		scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
6183 			  (unsigned long long)p->scx.dsq->id);
6184 
6185 	dump_newline(s);
6186 	dump_line(s, " %c%c %s[%d] %s%s %+ldms",
6187 		  marker, task_state_to_char(p), p->comm, p->pid,
6188 		  own_marker, sch_id_buf,
6189 		  jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
6190 	dump_line(s, "      scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
6191 		  scx_get_task_state(p) >> SCX_TASK_STATE_SHIFT,
6192 		  p->scx.flags & ~SCX_TASK_STATE_MASK,
6193 		  p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
6194 		  ops_state >> SCX_OPSS_QSEQ_SHIFT);
6195 	dump_line(s, "      sticky/holding_cpu=%d/%d dsq_id=%s",
6196 		  p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf);
6197 	dump_line(s, "      dsq_vtime=%llu slice=%llu weight=%u",
6198 		  p->scx.dsq_vtime, p->scx.slice, p->scx.weight);
6199 	dump_line(s, "      cpus=%*pb no_mig=%u", cpumask_pr_args(p->cpus_ptr),
6200 		  p->migration_disabled);
6201 
6202 	if (SCX_HAS_OP(sch, dump_task)) {
6203 		ops_dump_init(s, "    ");
6204 		SCX_CALL_OP(sch, dump_task, rq, dctx, p);
6205 		ops_dump_exit();
6206 	}
6207 
6208 #ifdef CONFIG_STACKTRACE
6209 	bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
6210 #endif
6211 	if (bt_len) {
6212 		dump_newline(s);
6213 		dump_stack_trace(s, "    ", bt, bt_len);
6214 	}
6215 }
6216 
6217 /*
6218  * Dump scheduler state. If @dump_all_tasks is true, dump all tasks regardless
6219  * of which scheduler they belong to. If false, only dump tasks owned by @sch.
6220  * For SysRq-D dumps, @dump_all_tasks=false since all schedulers are dumped
6221  * separately. For error dumps, @dump_all_tasks=true since only the failing
6222  * scheduler is dumped.
6223  */
6224 static void scx_dump_state(struct scx_sched *sch, struct scx_exit_info *ei,
6225 			   size_t dump_len, bool dump_all_tasks)
6226 {
6227 	static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
6228 	struct scx_dump_ctx dctx = {
6229 		.kind = ei->kind,
6230 		.exit_code = ei->exit_code,
6231 		.reason = ei->reason,
6232 		.at_ns = ktime_get_ns(),
6233 		.at_jiffies = jiffies,
6234 	};
6235 	struct seq_buf s;
6236 	struct scx_event_stats events;
6237 	char *buf;
6238 	int cpu;
6239 
6240 	guard(raw_spinlock_irqsave)(&scx_dump_lock);
6241 
6242 	if (sch->dump_disabled)
6243 		return;
6244 
6245 	seq_buf_init(&s, ei->dump, dump_len);
6246 
6247 #ifdef CONFIG_EXT_SUB_SCHED
6248 	if (sch->level == 0)
6249 		dump_line(&s, "%s: root", sch->ops.name);
6250 	else
6251 		dump_line(&s, "%s: sub%d-%llu %s",
6252 			  sch->ops.name, sch->level, sch->ops.sub_cgroup_id,
6253 			  sch->cgrp_path);
6254 #endif
6255 	if (ei->kind == SCX_EXIT_NONE) {
6256 		dump_line(&s, "Debug dump triggered by %s", ei->reason);
6257 	} else {
6258 		dump_line(&s, "%s[%d] triggered exit kind %d:",
6259 			  current->comm, current->pid, ei->kind);
6260 		dump_line(&s, "  %s (%s)", ei->reason, ei->msg);
6261 		dump_newline(&s);
6262 		dump_line(&s, "Backtrace:");
6263 		dump_stack_trace(&s, "  ", ei->bt, ei->bt_len);
6264 	}
6265 
6266 	if (SCX_HAS_OP(sch, dump)) {
6267 		ops_dump_init(&s, "");
6268 		SCX_CALL_OP(sch, dump, NULL, &dctx);
6269 		ops_dump_exit();
6270 	}
6271 
6272 	dump_newline(&s);
6273 	dump_line(&s, "CPU states");
6274 	dump_line(&s, "----------");
6275 
6276 	for_each_possible_cpu(cpu) {
6277 		struct rq *rq = cpu_rq(cpu);
6278 		struct rq_flags rf;
6279 		struct task_struct *p;
6280 		struct seq_buf ns;
6281 		size_t avail, used;
6282 		bool idle;
6283 
6284 		rq_lock_irqsave(rq, &rf);
6285 
6286 		idle = list_empty(&rq->scx.runnable_list) &&
6287 			rq->curr->sched_class == &idle_sched_class;
6288 
6289 		if (idle && !SCX_HAS_OP(sch, dump_cpu))
6290 			goto next;
6291 
6292 		/*
6293 		 * We don't yet know whether ops.dump_cpu() will produce output
6294 		 * and we may want to skip the default CPU dump if it doesn't.
6295 		 * Use a nested seq_buf to generate the standard dump so that we
6296 		 * can decide whether to commit later.
6297 		 */
6298 		avail = seq_buf_get_buf(&s, &buf);
6299 		seq_buf_init(&ns, buf, avail);
6300 
6301 		dump_newline(&ns);
6302 		dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu ksync=%lu",
6303 			  cpu, rq->scx.nr_running, rq->scx.flags,
6304 			  rq->scx.cpu_released, rq->scx.ops_qseq,
6305 			  rq->scx.kick_sync);
6306 		dump_line(&ns, "          curr=%s[%d] class=%ps",
6307 			  rq->curr->comm, rq->curr->pid,
6308 			  rq->curr->sched_class);
6309 		if (!cpumask_empty(rq->scx.cpus_to_kick))
6310 			dump_line(&ns, "  cpus_to_kick   : %*pb",
6311 				  cpumask_pr_args(rq->scx.cpus_to_kick));
6312 		if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
6313 			dump_line(&ns, "  idle_to_kick   : %*pb",
6314 				  cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
6315 		if (!cpumask_empty(rq->scx.cpus_to_preempt))
6316 			dump_line(&ns, "  cpus_to_preempt: %*pb",
6317 				  cpumask_pr_args(rq->scx.cpus_to_preempt));
6318 		if (!cpumask_empty(rq->scx.cpus_to_wait))
6319 			dump_line(&ns, "  cpus_to_wait   : %*pb",
6320 				  cpumask_pr_args(rq->scx.cpus_to_wait));
6321 		if (!cpumask_empty(rq->scx.cpus_to_sync))
6322 			dump_line(&ns, "  cpus_to_sync   : %*pb",
6323 				  cpumask_pr_args(rq->scx.cpus_to_sync));
6324 
6325 		used = seq_buf_used(&ns);
6326 		if (SCX_HAS_OP(sch, dump_cpu)) {
6327 			ops_dump_init(&ns, "  ");
6328 			SCX_CALL_OP(sch, dump_cpu, rq, &dctx, cpu, idle);
6329 			ops_dump_exit();
6330 		}
6331 
6332 		/*
6333 		 * If idle && nothing generated by ops.dump_cpu(), there's
6334 		 * nothing interesting. Skip.
6335 		 */
6336 		if (idle && used == seq_buf_used(&ns))
6337 			goto next;
6338 
6339 		/*
6340 		 * $s may already have overflowed when $ns was created. If so,
6341 		 * calling commit on it will trigger BUG.
6342 		 */
6343 		if (avail) {
6344 			seq_buf_commit(&s, seq_buf_used(&ns));
6345 			if (seq_buf_has_overflowed(&ns))
6346 				seq_buf_set_overflow(&s);
6347 		}
6348 
6349 		if (rq->curr->sched_class == &ext_sched_class &&
6350 		    (dump_all_tasks || scx_task_on_sched(sch, rq->curr)))
6351 			scx_dump_task(sch, &s, &dctx, rq, rq->curr, '*');
6352 
6353 		list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
6354 			if (dump_all_tasks || scx_task_on_sched(sch, p))
6355 				scx_dump_task(sch, &s, &dctx, rq, p, ' ');
6356 	next:
6357 		rq_unlock_irqrestore(rq, &rf);
6358 	}
6359 
6360 	dump_newline(&s);
6361 	dump_line(&s, "Event counters");
6362 	dump_line(&s, "--------------");
6363 
6364 	scx_read_events(sch, &events);
6365 	scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK);
6366 	scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
6367 	scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST);
6368 	scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING);
6369 	scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
6370 	scx_dump_event(s, &events, SCX_EV_REENQ_IMMED);
6371 	scx_dump_event(s, &events, SCX_EV_REENQ_LOCAL_REPEAT);
6372 	scx_dump_event(s, &events, SCX_EV_REFILL_SLICE_DFL);
6373 	scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION);
6374 	scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH);
6375 	scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE);
6376 	scx_dump_event(s, &events, SCX_EV_INSERT_NOT_OWNED);
6377 	scx_dump_event(s, &events, SCX_EV_SUB_BYPASS_DISPATCH);
6378 
6379 	if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
6380 		memcpy(ei->dump + dump_len - sizeof(trunc_marker),
6381 		       trunc_marker, sizeof(trunc_marker));
6382 }
6383 
6384 static void scx_disable_irq_workfn(struct irq_work *irq_work)
6385 {
6386 	struct scx_sched *sch = container_of(irq_work, struct scx_sched, disable_irq_work);
6387 	struct scx_exit_info *ei = sch->exit_info;
6388 
6389 	if (ei->kind >= SCX_EXIT_ERROR)
6390 		scx_dump_state(sch, ei, sch->ops.exit_dump_len, true);
6391 
6392 	kthread_queue_work(sch->helper, &sch->disable_work);
6393 }
6394 
6395 static bool scx_vexit(struct scx_sched *sch,
6396 		      enum scx_exit_kind kind, s64 exit_code,
6397 		      const char *fmt, va_list args)
6398 {
6399 	struct scx_exit_info *ei = sch->exit_info;
6400 
6401 	guard(preempt)();
6402 
6403 	if (!scx_claim_exit(sch, kind))
6404 		return false;
6405 
6406 	ei->exit_code = exit_code;
6407 #ifdef CONFIG_STACKTRACE
6408 	if (kind >= SCX_EXIT_ERROR)
6409 		ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
6410 #endif
6411 	vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
6412 
6413 	/*
6414 	 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
6415 	 * in scx_disable_workfn().
6416 	 */
6417 	ei->kind = kind;
6418 	ei->reason = scx_exit_reason(ei->kind);
6419 
6420 	irq_work_queue(&sch->disable_irq_work);
6421 	return true;
6422 }
6423 
6424 static int alloc_kick_syncs(void)
6425 {
6426 	int cpu;
6427 
6428 	/*
6429 	 * Allocate per-CPU arrays sized by nr_cpu_ids. Use kvzalloc as size
6430 	 * can exceed percpu allocator limits on large machines.
6431 	 */
6432 	for_each_possible_cpu(cpu) {
6433 		struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu);
6434 		struct scx_kick_syncs *new_ksyncs;
6435 
6436 		WARN_ON_ONCE(rcu_access_pointer(*ksyncs));
6437 
6438 		new_ksyncs = kvzalloc_node(struct_size(new_ksyncs, syncs, nr_cpu_ids),
6439 					   GFP_KERNEL, cpu_to_node(cpu));
6440 		if (!new_ksyncs) {
6441 			free_kick_syncs();
6442 			return -ENOMEM;
6443 		}
6444 
6445 		rcu_assign_pointer(*ksyncs, new_ksyncs);
6446 	}
6447 
6448 	return 0;
6449 }
6450 
6451 static void free_pnode(struct scx_sched_pnode *pnode)
6452 {
6453 	if (!pnode)
6454 		return;
6455 	exit_dsq(&pnode->global_dsq);
6456 	kfree(pnode);
6457 }
6458 
6459 static struct scx_sched_pnode *alloc_pnode(struct scx_sched *sch, int node)
6460 {
6461 	struct scx_sched_pnode *pnode;
6462 
6463 	pnode = kzalloc_node(sizeof(*pnode), GFP_KERNEL, node);
6464 	if (!pnode)
6465 		return NULL;
6466 
6467 	if (init_dsq(&pnode->global_dsq, SCX_DSQ_GLOBAL, sch)) {
6468 		kfree(pnode);
6469 		return NULL;
6470 	}
6471 
6472 	return pnode;
6473 }
6474 
6475 /*
6476  * Allocate and initialize a new scx_sched. @cgrp's reference is always
6477  * consumed whether the function succeeds or fails.
6478  */
6479 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
6480 						 struct cgroup *cgrp,
6481 						 struct scx_sched *parent)
6482 {
6483 	struct scx_sched *sch;
6484 	s32 level = parent ? parent->level + 1 : 0;
6485 	s32 node, cpu, ret, bypass_fail_cpu = nr_cpu_ids;
6486 
6487 	sch = kzalloc_flex(*sch, ancestors, level + 1);
6488 	if (!sch) {
6489 		ret = -ENOMEM;
6490 		goto err_put_cgrp;
6491 	}
6492 
6493 	sch->exit_info = alloc_exit_info(ops->exit_dump_len);
6494 	if (!sch->exit_info) {
6495 		ret = -ENOMEM;
6496 		goto err_free_sch;
6497 	}
6498 
6499 	ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params);
6500 	if (ret < 0)
6501 		goto err_free_ei;
6502 
6503 	sch->pnode = kzalloc_objs(sch->pnode[0], nr_node_ids);
6504 	if (!sch->pnode) {
6505 		ret = -ENOMEM;
6506 		goto err_free_hash;
6507 	}
6508 
6509 	for_each_node_state(node, N_POSSIBLE) {
6510 		sch->pnode[node] = alloc_pnode(sch, node);
6511 		if (!sch->pnode[node]) {
6512 			ret = -ENOMEM;
6513 			goto err_free_pnode;
6514 		}
6515 	}
6516 
6517 	sch->dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
6518 	sch->pcpu = __alloc_percpu(struct_size_t(struct scx_sched_pcpu,
6519 						 dsp_ctx.buf, sch->dsp_max_batch),
6520 				   __alignof__(struct scx_sched_pcpu));
6521 	if (!sch->pcpu) {
6522 		ret = -ENOMEM;
6523 		goto err_free_pnode;
6524 	}
6525 
6526 	for_each_possible_cpu(cpu) {
6527 		ret = init_dsq(bypass_dsq(sch, cpu), SCX_DSQ_BYPASS, sch);
6528 		if (ret) {
6529 			bypass_fail_cpu = cpu;
6530 			goto err_free_pcpu;
6531 		}
6532 	}
6533 
6534 	for_each_possible_cpu(cpu) {
6535 		struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
6536 
6537 		pcpu->sch = sch;
6538 		INIT_LIST_HEAD(&pcpu->deferred_reenq_local.node);
6539 	}
6540 
6541 	sch->helper = kthread_run_worker(0, "sched_ext_helper");
6542 	if (IS_ERR(sch->helper)) {
6543 		ret = PTR_ERR(sch->helper);
6544 		goto err_free_pcpu;
6545 	}
6546 
6547 	sched_set_fifo(sch->helper->task);
6548 
6549 	if (parent)
6550 		memcpy(sch->ancestors, parent->ancestors,
6551 		       level * sizeof(parent->ancestors[0]));
6552 	sch->ancestors[level] = sch;
6553 	sch->level = level;
6554 
6555 	if (ops->timeout_ms)
6556 		sch->watchdog_timeout = msecs_to_jiffies(ops->timeout_ms);
6557 	else
6558 		sch->watchdog_timeout = SCX_WATCHDOG_MAX_TIMEOUT;
6559 
6560 	sch->slice_dfl = SCX_SLICE_DFL;
6561 	atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
6562 	init_irq_work(&sch->disable_irq_work, scx_disable_irq_workfn);
6563 	kthread_init_work(&sch->disable_work, scx_disable_workfn);
6564 	timer_setup(&sch->bypass_lb_timer, scx_bypass_lb_timerfn, 0);
6565 
6566 	if (!alloc_cpumask_var(&sch->bypass_lb_donee_cpumask, GFP_KERNEL)) {
6567 		ret = -ENOMEM;
6568 		goto err_stop_helper;
6569 	}
6570 	if (!alloc_cpumask_var(&sch->bypass_lb_resched_cpumask, GFP_KERNEL)) {
6571 		ret = -ENOMEM;
6572 		goto err_free_lb_cpumask;
6573 	}
6574 	sch->ops = *ops;
6575 	rcu_assign_pointer(ops->priv, sch);
6576 
6577 	sch->kobj.kset = scx_kset;
6578 
6579 #ifdef CONFIG_EXT_SUB_SCHED
6580 	char *buf = kzalloc(PATH_MAX, GFP_KERNEL);
6581 	if (!buf) {
6582 		ret = -ENOMEM;
6583 		goto err_free_lb_resched;
6584 	}
6585 	cgroup_path(cgrp, buf, PATH_MAX);
6586 	sch->cgrp_path = kstrdup(buf, GFP_KERNEL);
6587 	kfree(buf);
6588 	if (!sch->cgrp_path) {
6589 		ret = -ENOMEM;
6590 		goto err_free_lb_resched;
6591 	}
6592 
6593 	sch->cgrp = cgrp;
6594 	INIT_LIST_HEAD(&sch->children);
6595 	INIT_LIST_HEAD(&sch->sibling);
6596 
6597 	if (parent)
6598 		ret = kobject_init_and_add(&sch->kobj, &scx_ktype,
6599 					   &parent->sub_kset->kobj,
6600 					   "sub-%llu", cgroup_id(cgrp));
6601 	else
6602 		ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root");
6603 
6604 	if (ret < 0) {
6605 		kobject_put(&sch->kobj);
6606 		return ERR_PTR(ret);
6607 	}
6608 
6609 	if (ops->sub_attach) {
6610 		sch->sub_kset = kset_create_and_add("sub", NULL, &sch->kobj);
6611 		if (!sch->sub_kset) {
6612 			kobject_put(&sch->kobj);
6613 			return ERR_PTR(-ENOMEM);
6614 		}
6615 	}
6616 #else	/* CONFIG_EXT_SUB_SCHED */
6617 	ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root");
6618 	if (ret < 0) {
6619 		kobject_put(&sch->kobj);
6620 		return ERR_PTR(ret);
6621 	}
6622 #endif	/* CONFIG_EXT_SUB_SCHED */
6623 	return sch;
6624 
6625 err_free_lb_resched:
6626 	free_cpumask_var(sch->bypass_lb_resched_cpumask);
6627 err_free_lb_cpumask:
6628 	free_cpumask_var(sch->bypass_lb_donee_cpumask);
6629 err_stop_helper:
6630 	kthread_destroy_worker(sch->helper);
6631 err_free_pcpu:
6632 	for_each_possible_cpu(cpu) {
6633 		if (cpu == bypass_fail_cpu)
6634 			break;
6635 		exit_dsq(bypass_dsq(sch, cpu));
6636 	}
6637 	free_percpu(sch->pcpu);
6638 err_free_pnode:
6639 	for_each_node_state(node, N_POSSIBLE)
6640 		free_pnode(sch->pnode[node]);
6641 	kfree(sch->pnode);
6642 err_free_hash:
6643 	rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
6644 err_free_ei:
6645 	free_exit_info(sch->exit_info);
6646 err_free_sch:
6647 	kfree(sch);
6648 err_put_cgrp:
6649 #ifdef CONFIG_EXT_SUB_SCHED
6650 	cgroup_put(cgrp);
6651 #endif
6652 	return ERR_PTR(ret);
6653 }
6654 
6655 static int check_hotplug_seq(struct scx_sched *sch,
6656 			      const struct sched_ext_ops *ops)
6657 {
6658 	unsigned long long global_hotplug_seq;
6659 
6660 	/*
6661 	 * If a hotplug event has occurred between when a scheduler was
6662 	 * initialized, and when we were able to attach, exit and notify user
6663 	 * space about it.
6664 	 */
6665 	if (ops->hotplug_seq) {
6666 		global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
6667 		if (ops->hotplug_seq != global_hotplug_seq) {
6668 			scx_exit(sch, SCX_EXIT_UNREG_KERN,
6669 				 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
6670 				 "expected hotplug seq %llu did not match actual %llu",
6671 				 ops->hotplug_seq, global_hotplug_seq);
6672 			return -EBUSY;
6673 		}
6674 	}
6675 
6676 	return 0;
6677 }
6678 
6679 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops)
6680 {
6681 	/*
6682 	 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
6683 	 * ops.enqueue() callback isn't implemented.
6684 	 */
6685 	if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
6686 		scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
6687 		return -EINVAL;
6688 	}
6689 
6690 	/*
6691 	 * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle
6692 	 * selection policy to be enabled.
6693 	 */
6694 	if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) &&
6695 	    (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) {
6696 		scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled");
6697 		return -EINVAL;
6698 	}
6699 
6700 	if (ops->cpu_acquire || ops->cpu_release)
6701 		pr_warn("ops->cpu_acquire/release() are deprecated, use sched_switch TP instead\n");
6702 
6703 	return 0;
6704 }
6705 
6706 /*
6707  * scx_enable() is offloaded to a dedicated system-wide RT kthread to avoid
6708  * starvation. During the READY -> ENABLED task switching loop, the calling
6709  * thread's sched_class gets switched from fair to ext. As fair has higher
6710  * priority than ext, the calling thread can be indefinitely starved under
6711  * fair-class saturation, leading to a system hang.
6712  */
6713 struct scx_enable_cmd {
6714 	struct kthread_work	work;
6715 	struct sched_ext_ops	*ops;
6716 	int			ret;
6717 };
6718 
6719 static void scx_root_enable_workfn(struct kthread_work *work)
6720 {
6721 	struct scx_enable_cmd *cmd = container_of(work, struct scx_enable_cmd, work);
6722 	struct sched_ext_ops *ops = cmd->ops;
6723 	struct cgroup *cgrp = root_cgroup();
6724 	struct scx_sched *sch;
6725 	struct scx_task_iter sti;
6726 	struct task_struct *p;
6727 	int i, cpu, ret;
6728 
6729 	mutex_lock(&scx_enable_mutex);
6730 
6731 	if (scx_enable_state() != SCX_DISABLED) {
6732 		ret = -EBUSY;
6733 		goto err_unlock;
6734 	}
6735 
6736 	ret = alloc_kick_syncs();
6737 	if (ret)
6738 		goto err_unlock;
6739 
6740 #ifdef CONFIG_EXT_SUB_SCHED
6741 	cgroup_get(cgrp);
6742 #endif
6743 	sch = scx_alloc_and_add_sched(ops, cgrp, NULL);
6744 	if (IS_ERR(sch)) {
6745 		ret = PTR_ERR(sch);
6746 		goto err_free_ksyncs;
6747 	}
6748 
6749 	/*
6750 	 * Transition to ENABLING and clear exit info to arm the disable path.
6751 	 * Failure triggers full disabling from here on.
6752 	 */
6753 	WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED);
6754 	WARN_ON_ONCE(scx_root);
6755 
6756 	atomic_long_set(&scx_nr_rejected, 0);
6757 
6758 	for_each_possible_cpu(cpu) {
6759 		struct rq *rq = cpu_rq(cpu);
6760 
6761 		rq->scx.local_dsq.sched = sch;
6762 		rq->scx.cpuperf_target = SCX_CPUPERF_ONE;
6763 	}
6764 
6765 	/*
6766 	 * Keep CPUs stable during enable so that the BPF scheduler can track
6767 	 * online CPUs by watching ->on/offline_cpu() after ->init().
6768 	 */
6769 	cpus_read_lock();
6770 
6771 	/*
6772 	 * Make the scheduler instance visible. Must be inside cpus_read_lock().
6773 	 * See handle_hotplug().
6774 	 */
6775 	rcu_assign_pointer(scx_root, sch);
6776 
6777 	ret = scx_link_sched(sch);
6778 	if (ret) {
6779 		cpus_read_unlock();
6780 		goto err_disable;
6781 	}
6782 
6783 	scx_idle_enable(ops);
6784 
6785 	if (sch->ops.init) {
6786 		ret = SCX_CALL_OP_RET(sch, init, NULL);
6787 		if (ret) {
6788 			ret = ops_sanitize_err(sch, "init", ret);
6789 			cpus_read_unlock();
6790 			scx_error(sch, "ops.init() failed (%d)", ret);
6791 			goto err_disable;
6792 		}
6793 		sch->exit_info->flags |= SCX_EFLAG_INITIALIZED;
6794 	}
6795 
6796 	for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
6797 		if (((void (**)(void))ops)[i])
6798 			set_bit(i, sch->has_op);
6799 
6800 	ret = check_hotplug_seq(sch, ops);
6801 	if (ret) {
6802 		cpus_read_unlock();
6803 		goto err_disable;
6804 	}
6805 	scx_idle_update_selcpu_topology(ops);
6806 
6807 	cpus_read_unlock();
6808 
6809 	ret = validate_ops(sch, ops);
6810 	if (ret)
6811 		goto err_disable;
6812 
6813 	/*
6814 	 * Once __scx_enabled is set, %current can be switched to SCX anytime.
6815 	 * This can lead to stalls as some BPF schedulers (e.g. userspace
6816 	 * scheduling) may not function correctly before all tasks are switched.
6817 	 * Init in bypass mode to guarantee forward progress.
6818 	 */
6819 	scx_bypass(sch, true);
6820 
6821 	for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
6822 		if (((void (**)(void))ops)[i])
6823 			set_bit(i, sch->has_op);
6824 
6825 	if (sch->ops.cpu_acquire || sch->ops.cpu_release)
6826 		sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT;
6827 
6828 	/*
6829 	 * Lock out forks, cgroup on/offlining and moves before opening the
6830 	 * floodgate so that they don't wander into the operations prematurely.
6831 	 */
6832 	percpu_down_write(&scx_fork_rwsem);
6833 
6834 	WARN_ON_ONCE(scx_init_task_enabled);
6835 	scx_init_task_enabled = true;
6836 
6837 	/*
6838 	 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
6839 	 * preventing new tasks from being added. No need to exclude tasks
6840 	 * leaving as sched_ext_free() can handle both prepped and enabled
6841 	 * tasks. Prep all tasks first and then enable them with preemption
6842 	 * disabled.
6843 	 *
6844 	 * All cgroups should be initialized before scx_init_task() so that the
6845 	 * BPF scheduler can reliably track each task's cgroup membership from
6846 	 * scx_init_task(). Lock out cgroup on/offlining and task migrations
6847 	 * while tasks are being initialized so that scx_cgroup_can_attach()
6848 	 * never sees uninitialized tasks.
6849 	 */
6850 	scx_cgroup_lock();
6851 	set_cgroup_sched(sch_cgroup(sch), sch);
6852 	ret = scx_cgroup_init(sch);
6853 	if (ret)
6854 		goto err_disable_unlock_all;
6855 
6856 	scx_task_iter_start(&sti, NULL);
6857 	while ((p = scx_task_iter_next_locked(&sti))) {
6858 		/*
6859 		 * @p may already be dead, have lost all its usages counts and
6860 		 * be waiting for RCU grace period before being freed. @p can't
6861 		 * be initialized for SCX in such cases and should be ignored.
6862 		 */
6863 		if (!tryget_task_struct(p))
6864 			continue;
6865 
6866 		scx_task_iter_unlock(&sti);
6867 
6868 		ret = scx_init_task(sch, p, false);
6869 		if (ret) {
6870 			put_task_struct(p);
6871 			scx_task_iter_stop(&sti);
6872 			scx_error(sch, "ops.init_task() failed (%d) for %s[%d]",
6873 				  ret, p->comm, p->pid);
6874 			goto err_disable_unlock_all;
6875 		}
6876 
6877 		scx_set_task_sched(p, sch);
6878 		scx_set_task_state(p, SCX_TASK_READY);
6879 
6880 		put_task_struct(p);
6881 	}
6882 	scx_task_iter_stop(&sti);
6883 	scx_cgroup_unlock();
6884 	percpu_up_write(&scx_fork_rwsem);
6885 
6886 	/*
6887 	 * All tasks are READY. It's safe to turn on scx_enabled() and switch
6888 	 * all eligible tasks.
6889 	 */
6890 	WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
6891 	static_branch_enable(&__scx_enabled);
6892 
6893 	/*
6894 	 * We're fully committed and can't fail. The task READY -> ENABLED
6895 	 * transitions here are synchronized against sched_ext_free() through
6896 	 * scx_tasks_lock.
6897 	 */
6898 	percpu_down_write(&scx_fork_rwsem);
6899 	scx_task_iter_start(&sti, NULL);
6900 	while ((p = scx_task_iter_next_locked(&sti))) {
6901 		unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
6902 		const struct sched_class *old_class = p->sched_class;
6903 		const struct sched_class *new_class = scx_setscheduler_class(p);
6904 
6905 		if (scx_get_task_state(p) != SCX_TASK_READY)
6906 			continue;
6907 
6908 		if (old_class != new_class)
6909 			queue_flags |= DEQUEUE_CLASS;
6910 
6911 		scoped_guard (sched_change, p, queue_flags) {
6912 			p->scx.slice = READ_ONCE(sch->slice_dfl);
6913 			p->sched_class = new_class;
6914 		}
6915 	}
6916 	scx_task_iter_stop(&sti);
6917 	percpu_up_write(&scx_fork_rwsem);
6918 
6919 	scx_bypass(sch, false);
6920 
6921 	if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) {
6922 		WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE);
6923 		goto err_disable;
6924 	}
6925 
6926 	if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
6927 		static_branch_enable(&__scx_switched_all);
6928 
6929 	pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
6930 		sch->ops.name, scx_switched_all() ? "" : " (partial)");
6931 	kobject_uevent(&sch->kobj, KOBJ_ADD);
6932 	mutex_unlock(&scx_enable_mutex);
6933 
6934 	atomic_long_inc(&scx_enable_seq);
6935 
6936 	cmd->ret = 0;
6937 	return;
6938 
6939 err_free_ksyncs:
6940 	free_kick_syncs();
6941 err_unlock:
6942 	mutex_unlock(&scx_enable_mutex);
6943 	cmd->ret = ret;
6944 	return;
6945 
6946 err_disable_unlock_all:
6947 	scx_cgroup_unlock();
6948 	percpu_up_write(&scx_fork_rwsem);
6949 	/* we'll soon enter disable path, keep bypass on */
6950 err_disable:
6951 	mutex_unlock(&scx_enable_mutex);
6952 	/*
6953 	 * Returning an error code here would not pass all the error information
6954 	 * to userspace. Record errno using scx_error() for cases scx_error()
6955 	 * wasn't already invoked and exit indicating success so that the error
6956 	 * is notified through ops.exit() with all the details.
6957 	 *
6958 	 * Flush scx_disable_work to ensure that error is reported before init
6959 	 * completion. sch's base reference will be put by bpf_scx_unreg().
6960 	 */
6961 	scx_error(sch, "scx_root_enable() failed (%d)", ret);
6962 	scx_flush_disable_work(sch);
6963 	cmd->ret = 0;
6964 }
6965 
6966 #ifdef CONFIG_EXT_SUB_SCHED
6967 /* verify that a scheduler can be attached to @cgrp and return the parent */
6968 static struct scx_sched *find_parent_sched(struct cgroup *cgrp)
6969 {
6970 	struct scx_sched *parent = cgrp->scx_sched;
6971 	struct scx_sched *pos;
6972 
6973 	lockdep_assert_held(&scx_sched_lock);
6974 
6975 	/* can't attach twice to the same cgroup */
6976 	if (parent->cgrp == cgrp)
6977 		return ERR_PTR(-EBUSY);
6978 
6979 	/* does $parent allow sub-scheds? */
6980 	if (!parent->ops.sub_attach)
6981 		return ERR_PTR(-EOPNOTSUPP);
6982 
6983 	/* can't insert between $parent and its exiting children */
6984 	list_for_each_entry(pos, &parent->children, sibling)
6985 		if (cgroup_is_descendant(pos->cgrp, cgrp))
6986 			return ERR_PTR(-EBUSY);
6987 
6988 	return parent;
6989 }
6990 
6991 static bool assert_task_ready_or_enabled(struct task_struct *p)
6992 {
6993 	u32 state = scx_get_task_state(p);
6994 
6995 	switch (state) {
6996 	case SCX_TASK_READY:
6997 	case SCX_TASK_ENABLED:
6998 		return true;
6999 	default:
7000 		WARN_ONCE(true, "sched_ext: Invalid task state %d for %s[%d] during enabling sub sched",
7001 			  state, p->comm, p->pid);
7002 		return false;
7003 	}
7004 }
7005 
7006 static void scx_sub_enable_workfn(struct kthread_work *work)
7007 {
7008 	struct scx_enable_cmd *cmd = container_of(work, struct scx_enable_cmd, work);
7009 	struct sched_ext_ops *ops = cmd->ops;
7010 	struct cgroup *cgrp;
7011 	struct scx_sched *parent, *sch;
7012 	struct scx_task_iter sti;
7013 	struct task_struct *p;
7014 	s32 i, ret;
7015 
7016 	mutex_lock(&scx_enable_mutex);
7017 
7018 	if (!scx_enabled()) {
7019 		ret = -ENODEV;
7020 		goto out_unlock;
7021 	}
7022 
7023 	cgrp = cgroup_get_from_id(ops->sub_cgroup_id);
7024 	if (IS_ERR(cgrp)) {
7025 		ret = PTR_ERR(cgrp);
7026 		goto out_unlock;
7027 	}
7028 
7029 	raw_spin_lock_irq(&scx_sched_lock);
7030 	parent = find_parent_sched(cgrp);
7031 	if (IS_ERR(parent)) {
7032 		raw_spin_unlock_irq(&scx_sched_lock);
7033 		ret = PTR_ERR(parent);
7034 		goto out_put_cgrp;
7035 	}
7036 	kobject_get(&parent->kobj);
7037 	raw_spin_unlock_irq(&scx_sched_lock);
7038 
7039 	/* scx_alloc_and_add_sched() consumes @cgrp whether it succeeds or not */
7040 	sch = scx_alloc_and_add_sched(ops, cgrp, parent);
7041 	kobject_put(&parent->kobj);
7042 	if (IS_ERR(sch)) {
7043 		ret = PTR_ERR(sch);
7044 		goto out_unlock;
7045 	}
7046 
7047 	ret = scx_link_sched(sch);
7048 	if (ret)
7049 		goto err_disable;
7050 
7051 	if (sch->level >= SCX_SUB_MAX_DEPTH) {
7052 		scx_error(sch, "max nesting depth %d violated",
7053 			  SCX_SUB_MAX_DEPTH);
7054 		goto err_disable;
7055 	}
7056 
7057 	if (sch->ops.init) {
7058 		ret = SCX_CALL_OP_RET(sch, init, NULL);
7059 		if (ret) {
7060 			ret = ops_sanitize_err(sch, "init", ret);
7061 			scx_error(sch, "ops.init() failed (%d)", ret);
7062 			goto err_disable;
7063 		}
7064 		sch->exit_info->flags |= SCX_EFLAG_INITIALIZED;
7065 	}
7066 
7067 	if (validate_ops(sch, ops))
7068 		goto err_disable;
7069 
7070 	struct scx_sub_attach_args sub_attach_args = {
7071 		.ops = &sch->ops,
7072 		.cgroup_path = sch->cgrp_path,
7073 	};
7074 
7075 	ret = SCX_CALL_OP_RET(parent, sub_attach, NULL,
7076 			      &sub_attach_args);
7077 	if (ret) {
7078 		ret = ops_sanitize_err(sch, "sub_attach", ret);
7079 		scx_error(sch, "parent rejected (%d)", ret);
7080 		goto err_disable;
7081 	}
7082 	sch->sub_attached = true;
7083 
7084 	scx_bypass(sch, true);
7085 
7086 	for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
7087 		if (((void (**)(void))ops)[i])
7088 			set_bit(i, sch->has_op);
7089 
7090 	percpu_down_write(&scx_fork_rwsem);
7091 	scx_cgroup_lock();
7092 
7093 	/*
7094 	 * Set cgroup->scx_sched's and check CSS_ONLINE. Either we see
7095 	 * !CSS_ONLINE or scx_cgroup_lifetime_notify() sees and shoots us down.
7096 	 */
7097 	set_cgroup_sched(sch_cgroup(sch), sch);
7098 	if (!(cgrp->self.flags & CSS_ONLINE)) {
7099 		scx_error(sch, "cgroup is not online");
7100 		goto err_unlock_and_disable;
7101 	}
7102 
7103 	/*
7104 	 * Initialize tasks for the new child $sch without exiting them for
7105 	 * $parent so that the tasks can always be reverted back to $parent
7106 	 * sched on child init failure.
7107 	 */
7108 	WARN_ON_ONCE(scx_enabling_sub_sched);
7109 	scx_enabling_sub_sched = sch;
7110 
7111 	scx_task_iter_start(&sti, sch->cgrp);
7112 	while ((p = scx_task_iter_next_locked(&sti))) {
7113 		struct rq *rq;
7114 		struct rq_flags rf;
7115 
7116 		/*
7117 		 * Task iteration may visit the same task twice when racing
7118 		 * against exiting. Use %SCX_TASK_SUB_INIT to mark tasks which
7119 		 * finished __scx_init_task() and skip if set.
7120 		 *
7121 		 * A task may exit and get freed between __scx_init_task()
7122 		 * completion and scx_enable_task(). In such cases,
7123 		 * scx_disable_and_exit_task() must exit the task for both the
7124 		 * parent and child scheds.
7125 		 */
7126 		if (p->scx.flags & SCX_TASK_SUB_INIT)
7127 			continue;
7128 
7129 		/* see scx_root_enable() */
7130 		if (!tryget_task_struct(p))
7131 			continue;
7132 
7133 		if (!assert_task_ready_or_enabled(p)) {
7134 			ret = -EINVAL;
7135 			goto abort;
7136 		}
7137 
7138 		scx_task_iter_unlock(&sti);
7139 
7140 		/*
7141 		 * As $p is still on $parent, it can't be transitioned to INIT.
7142 		 * Let's worry about task state later. Use __scx_init_task().
7143 		 */
7144 		ret = __scx_init_task(sch, p, false);
7145 		if (ret)
7146 			goto abort;
7147 
7148 		rq = task_rq_lock(p, &rf);
7149 		p->scx.flags |= SCX_TASK_SUB_INIT;
7150 		task_rq_unlock(rq, p, &rf);
7151 
7152 		put_task_struct(p);
7153 	}
7154 	scx_task_iter_stop(&sti);
7155 
7156 	/*
7157 	 * All tasks are prepped. Disable/exit tasks for $parent and enable for
7158 	 * the new @sch.
7159 	 */
7160 	scx_task_iter_start(&sti, sch->cgrp);
7161 	while ((p = scx_task_iter_next_locked(&sti))) {
7162 		/*
7163 		 * Use clearing of %SCX_TASK_SUB_INIT to detect and skip
7164 		 * duplicate iterations.
7165 		 */
7166 		if (!(p->scx.flags & SCX_TASK_SUB_INIT))
7167 			continue;
7168 
7169 		scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) {
7170 			/*
7171 			 * $p must be either READY or ENABLED. If ENABLED,
7172 			 * __scx_disabled_and_exit_task() first disables and
7173 			 * makes it READY. However, after exiting $p, it will
7174 			 * leave $p as READY.
7175 			 */
7176 			assert_task_ready_or_enabled(p);
7177 			__scx_disable_and_exit_task(parent, p);
7178 
7179 			/*
7180 			 * $p is now only initialized for @sch and READY, which
7181 			 * is what we want. Assign it to @sch and enable.
7182 			 */
7183 			rcu_assign_pointer(p->scx.sched, sch);
7184 			scx_enable_task(sch, p);
7185 
7186 			p->scx.flags &= ~SCX_TASK_SUB_INIT;
7187 		}
7188 	}
7189 	scx_task_iter_stop(&sti);
7190 
7191 	scx_enabling_sub_sched = NULL;
7192 
7193 	scx_cgroup_unlock();
7194 	percpu_up_write(&scx_fork_rwsem);
7195 
7196 	scx_bypass(sch, false);
7197 
7198 	pr_info("sched_ext: BPF sub-scheduler \"%s\" enabled\n", sch->ops.name);
7199 	kobject_uevent(&sch->kobj, KOBJ_ADD);
7200 	ret = 0;
7201 	goto out_unlock;
7202 
7203 out_put_cgrp:
7204 	cgroup_put(cgrp);
7205 out_unlock:
7206 	mutex_unlock(&scx_enable_mutex);
7207 	cmd->ret = ret;
7208 	return;
7209 
7210 abort:
7211 	put_task_struct(p);
7212 	scx_task_iter_stop(&sti);
7213 
7214 	/*
7215 	 * Undo __scx_init_task() for tasks we marked. scx_enable_task() never
7216 	 * ran for @sch on them, so calling scx_disable_task() here would invoke
7217 	 * ops.disable() without a matching ops.enable(). scx_enabling_sub_sched
7218 	 * must stay set until SUB_INIT is cleared from every marked task -
7219 	 * scx_disable_and_exit_task() reads it when a task exits concurrently.
7220 	 */
7221 	scx_task_iter_start(&sti, sch->cgrp);
7222 	while ((p = scx_task_iter_next_locked(&sti))) {
7223 		if (p->scx.flags & SCX_TASK_SUB_INIT) {
7224 			scx_sub_init_cancel_task(sch, p);
7225 			p->scx.flags &= ~SCX_TASK_SUB_INIT;
7226 		}
7227 	}
7228 	scx_task_iter_stop(&sti);
7229 	scx_enabling_sub_sched = NULL;
7230 err_unlock_and_disable:
7231 	/* we'll soon enter disable path, keep bypass on */
7232 	scx_cgroup_unlock();
7233 	percpu_up_write(&scx_fork_rwsem);
7234 err_disable:
7235 	mutex_unlock(&scx_enable_mutex);
7236 	scx_flush_disable_work(sch);
7237 	cmd->ret = 0;
7238 }
7239 
7240 static s32 scx_cgroup_lifetime_notify(struct notifier_block *nb,
7241 				      unsigned long action, void *data)
7242 {
7243 	struct cgroup *cgrp = data;
7244 	struct cgroup *parent = cgroup_parent(cgrp);
7245 
7246 	if (!cgroup_on_dfl(cgrp))
7247 		return NOTIFY_OK;
7248 
7249 	switch (action) {
7250 	case CGROUP_LIFETIME_ONLINE:
7251 		/* inherit ->scx_sched from $parent */
7252 		if (parent)
7253 			rcu_assign_pointer(cgrp->scx_sched, parent->scx_sched);
7254 		break;
7255 	case CGROUP_LIFETIME_OFFLINE:
7256 		/* if there is a sched attached, shoot it down */
7257 		if (cgrp->scx_sched && cgrp->scx_sched->cgrp == cgrp)
7258 			scx_exit(cgrp->scx_sched, SCX_EXIT_UNREG_KERN,
7259 				 SCX_ECODE_RSN_CGROUP_OFFLINE,
7260 				 "cgroup %llu going offline", cgroup_id(cgrp));
7261 		break;
7262 	}
7263 
7264 	return NOTIFY_OK;
7265 }
7266 
7267 static struct notifier_block scx_cgroup_lifetime_nb = {
7268 	.notifier_call = scx_cgroup_lifetime_notify,
7269 };
7270 
7271 static s32 __init scx_cgroup_lifetime_notifier_init(void)
7272 {
7273 	return blocking_notifier_chain_register(&cgroup_lifetime_notifier,
7274 						&scx_cgroup_lifetime_nb);
7275 }
7276 core_initcall(scx_cgroup_lifetime_notifier_init);
7277 #endif	/* CONFIG_EXT_SUB_SCHED */
7278 
7279 static s32 scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
7280 {
7281 	static struct kthread_worker *helper;
7282 	static DEFINE_MUTEX(helper_mutex);
7283 	struct scx_enable_cmd cmd;
7284 
7285 	if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
7286 			   cpu_possible_mask)) {
7287 		pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
7288 		return -EINVAL;
7289 	}
7290 
7291 	if (!READ_ONCE(helper)) {
7292 		mutex_lock(&helper_mutex);
7293 		if (!helper) {
7294 			struct kthread_worker *w =
7295 				kthread_run_worker(0, "scx_enable_helper");
7296 			if (IS_ERR_OR_NULL(w)) {
7297 				mutex_unlock(&helper_mutex);
7298 				return -ENOMEM;
7299 			}
7300 			sched_set_fifo(w->task);
7301 			WRITE_ONCE(helper, w);
7302 		}
7303 		mutex_unlock(&helper_mutex);
7304 	}
7305 
7306 #ifdef CONFIG_EXT_SUB_SCHED
7307 	if (ops->sub_cgroup_id > 1)
7308 		kthread_init_work(&cmd.work, scx_sub_enable_workfn);
7309 	else
7310 #endif	/* CONFIG_EXT_SUB_SCHED */
7311 		kthread_init_work(&cmd.work, scx_root_enable_workfn);
7312 	cmd.ops = ops;
7313 
7314 	kthread_queue_work(READ_ONCE(helper), &cmd.work);
7315 	kthread_flush_work(&cmd.work);
7316 	return cmd.ret;
7317 }
7318 
7319 
7320 /********************************************************************************
7321  * bpf_struct_ops plumbing.
7322  */
7323 #include <linux/bpf_verifier.h>
7324 #include <linux/bpf.h>
7325 #include <linux/btf.h>
7326 
7327 static const struct btf_type *task_struct_type;
7328 
7329 static bool bpf_scx_is_valid_access(int off, int size,
7330 				    enum bpf_access_type type,
7331 				    const struct bpf_prog *prog,
7332 				    struct bpf_insn_access_aux *info)
7333 {
7334 	if (type != BPF_READ)
7335 		return false;
7336 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
7337 		return false;
7338 	if (off % size != 0)
7339 		return false;
7340 
7341 	return btf_ctx_access(off, size, type, prog, info);
7342 }
7343 
7344 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
7345 				     const struct bpf_reg_state *reg, int off,
7346 				     int size)
7347 {
7348 	const struct btf_type *t;
7349 
7350 	t = btf_type_by_id(reg->btf, reg->btf_id);
7351 	if (t == task_struct_type) {
7352 		/*
7353 		 * COMPAT: Will be removed in v6.23.
7354 		 */
7355 		if ((off >= offsetof(struct task_struct, scx.slice) &&
7356 		     off + size <= offsetofend(struct task_struct, scx.slice)) ||
7357 		    (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
7358 		     off + size <= offsetofend(struct task_struct, scx.dsq_vtime))) {
7359 			pr_warn("sched_ext: Writing directly to p->scx.slice/dsq_vtime is deprecated, use scx_bpf_task_set_slice/dsq_vtime()");
7360 			return SCALAR_VALUE;
7361 		}
7362 
7363 		if (off >= offsetof(struct task_struct, scx.disallow) &&
7364 		    off + size <= offsetofend(struct task_struct, scx.disallow))
7365 			return SCALAR_VALUE;
7366 	}
7367 
7368 	return -EACCES;
7369 }
7370 
7371 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
7372 	.get_func_proto = bpf_base_func_proto,
7373 	.is_valid_access = bpf_scx_is_valid_access,
7374 	.btf_struct_access = bpf_scx_btf_struct_access,
7375 };
7376 
7377 static int bpf_scx_init_member(const struct btf_type *t,
7378 			       const struct btf_member *member,
7379 			       void *kdata, const void *udata)
7380 {
7381 	const struct sched_ext_ops *uops = udata;
7382 	struct sched_ext_ops *ops = kdata;
7383 	u32 moff = __btf_member_bit_offset(t, member) / 8;
7384 	int ret;
7385 
7386 	switch (moff) {
7387 	case offsetof(struct sched_ext_ops, dispatch_max_batch):
7388 		if (*(u32 *)(udata + moff) > INT_MAX)
7389 			return -E2BIG;
7390 		ops->dispatch_max_batch = *(u32 *)(udata + moff);
7391 		return 1;
7392 	case offsetof(struct sched_ext_ops, flags):
7393 		if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
7394 			return -EINVAL;
7395 		ops->flags = *(u64 *)(udata + moff);
7396 		return 1;
7397 	case offsetof(struct sched_ext_ops, name):
7398 		ret = bpf_obj_name_cpy(ops->name, uops->name,
7399 				       sizeof(ops->name));
7400 		if (ret < 0)
7401 			return ret;
7402 		if (ret == 0)
7403 			return -EINVAL;
7404 		return 1;
7405 	case offsetof(struct sched_ext_ops, timeout_ms):
7406 		if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
7407 		    SCX_WATCHDOG_MAX_TIMEOUT)
7408 			return -E2BIG;
7409 		ops->timeout_ms = *(u32 *)(udata + moff);
7410 		return 1;
7411 	case offsetof(struct sched_ext_ops, exit_dump_len):
7412 		ops->exit_dump_len =
7413 			*(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
7414 		return 1;
7415 	case offsetof(struct sched_ext_ops, hotplug_seq):
7416 		ops->hotplug_seq = *(u64 *)(udata + moff);
7417 		return 1;
7418 #ifdef CONFIG_EXT_SUB_SCHED
7419 	case offsetof(struct sched_ext_ops, sub_cgroup_id):
7420 		ops->sub_cgroup_id = *(u64 *)(udata + moff);
7421 		return 1;
7422 #endif	/* CONFIG_EXT_SUB_SCHED */
7423 	}
7424 
7425 	return 0;
7426 }
7427 
7428 #ifdef CONFIG_EXT_SUB_SCHED
7429 static void scx_pstack_recursion_on_dispatch(struct bpf_prog *prog)
7430 {
7431 	struct scx_sched *sch;
7432 
7433 	guard(rcu)();
7434 	sch = scx_prog_sched(prog->aux);
7435 	if (unlikely(!sch))
7436 		return;
7437 
7438 	scx_error(sch, "dispatch recursion detected");
7439 }
7440 #endif	/* CONFIG_EXT_SUB_SCHED */
7441 
7442 static int bpf_scx_check_member(const struct btf_type *t,
7443 				const struct btf_member *member,
7444 				const struct bpf_prog *prog)
7445 {
7446 	u32 moff = __btf_member_bit_offset(t, member) / 8;
7447 
7448 	switch (moff) {
7449 	case offsetof(struct sched_ext_ops, init_task):
7450 #ifdef CONFIG_EXT_GROUP_SCHED
7451 	case offsetof(struct sched_ext_ops, cgroup_init):
7452 	case offsetof(struct sched_ext_ops, cgroup_exit):
7453 	case offsetof(struct sched_ext_ops, cgroup_prep_move):
7454 #endif
7455 	case offsetof(struct sched_ext_ops, cpu_online):
7456 	case offsetof(struct sched_ext_ops, cpu_offline):
7457 	case offsetof(struct sched_ext_ops, init):
7458 	case offsetof(struct sched_ext_ops, exit):
7459 	case offsetof(struct sched_ext_ops, sub_attach):
7460 	case offsetof(struct sched_ext_ops, sub_detach):
7461 		break;
7462 	default:
7463 		if (prog->sleepable)
7464 			return -EINVAL;
7465 	}
7466 
7467 #ifdef CONFIG_EXT_SUB_SCHED
7468 	/*
7469 	 * Enable private stack for operations that can nest along the
7470 	 * hierarchy.
7471 	 *
7472 	 * XXX - Ideally, we should only do this for scheds that allow
7473 	 * sub-scheds and sub-scheds themselves but I don't know how to access
7474 	 * struct_ops from here.
7475 	 */
7476 	switch (moff) {
7477 	case offsetof(struct sched_ext_ops, dispatch):
7478 		prog->aux->priv_stack_requested = true;
7479 		prog->aux->recursion_detected = scx_pstack_recursion_on_dispatch;
7480 	}
7481 #endif	/* CONFIG_EXT_SUB_SCHED */
7482 
7483 	return 0;
7484 }
7485 
7486 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
7487 {
7488 	return scx_enable(kdata, link);
7489 }
7490 
7491 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
7492 {
7493 	struct sched_ext_ops *ops = kdata;
7494 	struct scx_sched *sch = rcu_dereference_protected(ops->priv, true);
7495 
7496 	scx_disable(sch, SCX_EXIT_UNREG);
7497 	scx_flush_disable_work(sch);
7498 	RCU_INIT_POINTER(ops->priv, NULL);
7499 	kobject_put(&sch->kobj);
7500 }
7501 
7502 static int bpf_scx_init(struct btf *btf)
7503 {
7504 	task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
7505 
7506 	return 0;
7507 }
7508 
7509 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
7510 {
7511 	/*
7512 	 * sched_ext does not support updating the actively-loaded BPF
7513 	 * scheduler, as registering a BPF scheduler can always fail if the
7514 	 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
7515 	 * etc. Similarly, we can always race with unregistration happening
7516 	 * elsewhere, such as with sysrq.
7517 	 */
7518 	return -EOPNOTSUPP;
7519 }
7520 
7521 static int bpf_scx_validate(void *kdata)
7522 {
7523 	return 0;
7524 }
7525 
7526 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
7527 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
7528 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
7529 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
7530 static void sched_ext_ops__tick(struct task_struct *p) {}
7531 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
7532 static void sched_ext_ops__running(struct task_struct *p) {}
7533 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
7534 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
7535 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
7536 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
7537 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
7538 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
7539 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
7540 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
7541 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
7542 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
7543 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
7544 static void sched_ext_ops__enable(struct task_struct *p) {}
7545 static void sched_ext_ops__disable(struct task_struct *p) {}
7546 #ifdef CONFIG_EXT_GROUP_SCHED
7547 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
7548 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
7549 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
7550 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
7551 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
7552 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
7553 static void sched_ext_ops__cgroup_set_bandwidth(struct cgroup *cgrp, u64 period_us, u64 quota_us, u64 burst_us) {}
7554 static void sched_ext_ops__cgroup_set_idle(struct cgroup *cgrp, bool idle) {}
7555 #endif	/* CONFIG_EXT_GROUP_SCHED */
7556 static s32 sched_ext_ops__sub_attach(struct scx_sub_attach_args *args) { return -EINVAL; }
7557 static void sched_ext_ops__sub_detach(struct scx_sub_detach_args *args) {}
7558 static void sched_ext_ops__cpu_online(s32 cpu) {}
7559 static void sched_ext_ops__cpu_offline(s32 cpu) {}
7560 static s32 sched_ext_ops__init(void) { return -EINVAL; }
7561 static void sched_ext_ops__exit(struct scx_exit_info *info) {}
7562 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
7563 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
7564 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
7565 
7566 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
7567 	.select_cpu		= sched_ext_ops__select_cpu,
7568 	.enqueue		= sched_ext_ops__enqueue,
7569 	.dequeue		= sched_ext_ops__dequeue,
7570 	.dispatch		= sched_ext_ops__dispatch,
7571 	.tick			= sched_ext_ops__tick,
7572 	.runnable		= sched_ext_ops__runnable,
7573 	.running		= sched_ext_ops__running,
7574 	.stopping		= sched_ext_ops__stopping,
7575 	.quiescent		= sched_ext_ops__quiescent,
7576 	.yield			= sched_ext_ops__yield,
7577 	.core_sched_before	= sched_ext_ops__core_sched_before,
7578 	.set_weight		= sched_ext_ops__set_weight,
7579 	.set_cpumask		= sched_ext_ops__set_cpumask,
7580 	.update_idle		= sched_ext_ops__update_idle,
7581 	.cpu_acquire		= sched_ext_ops__cpu_acquire,
7582 	.cpu_release		= sched_ext_ops__cpu_release,
7583 	.init_task		= sched_ext_ops__init_task,
7584 	.exit_task		= sched_ext_ops__exit_task,
7585 	.enable			= sched_ext_ops__enable,
7586 	.disable		= sched_ext_ops__disable,
7587 #ifdef CONFIG_EXT_GROUP_SCHED
7588 	.cgroup_init		= sched_ext_ops__cgroup_init,
7589 	.cgroup_exit		= sched_ext_ops__cgroup_exit,
7590 	.cgroup_prep_move	= sched_ext_ops__cgroup_prep_move,
7591 	.cgroup_move		= sched_ext_ops__cgroup_move,
7592 	.cgroup_cancel_move	= sched_ext_ops__cgroup_cancel_move,
7593 	.cgroup_set_weight	= sched_ext_ops__cgroup_set_weight,
7594 	.cgroup_set_bandwidth	= sched_ext_ops__cgroup_set_bandwidth,
7595 	.cgroup_set_idle	= sched_ext_ops__cgroup_set_idle,
7596 #endif
7597 	.sub_attach		= sched_ext_ops__sub_attach,
7598 	.sub_detach		= sched_ext_ops__sub_detach,
7599 	.cpu_online		= sched_ext_ops__cpu_online,
7600 	.cpu_offline		= sched_ext_ops__cpu_offline,
7601 	.init			= sched_ext_ops__init,
7602 	.exit			= sched_ext_ops__exit,
7603 	.dump			= sched_ext_ops__dump,
7604 	.dump_cpu		= sched_ext_ops__dump_cpu,
7605 	.dump_task		= sched_ext_ops__dump_task,
7606 };
7607 
7608 static struct bpf_struct_ops bpf_sched_ext_ops = {
7609 	.verifier_ops = &bpf_scx_verifier_ops,
7610 	.reg = bpf_scx_reg,
7611 	.unreg = bpf_scx_unreg,
7612 	.check_member = bpf_scx_check_member,
7613 	.init_member = bpf_scx_init_member,
7614 	.init = bpf_scx_init,
7615 	.update = bpf_scx_update,
7616 	.validate = bpf_scx_validate,
7617 	.name = "sched_ext_ops",
7618 	.owner = THIS_MODULE,
7619 	.cfi_stubs = &__bpf_ops_sched_ext_ops
7620 };
7621 
7622 
7623 /********************************************************************************
7624  * System integration and init.
7625  */
7626 
7627 static void sysrq_handle_sched_ext_reset(u8 key)
7628 {
7629 	struct scx_sched *sch;
7630 
7631 	rcu_read_lock();
7632 	sch = rcu_dereference(scx_root);
7633 	if (likely(sch))
7634 		scx_disable(sch, SCX_EXIT_SYSRQ);
7635 	else
7636 		pr_info("sched_ext: BPF schedulers not loaded\n");
7637 	rcu_read_unlock();
7638 }
7639 
7640 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
7641 	.handler	= sysrq_handle_sched_ext_reset,
7642 	.help_msg	= "reset-sched-ext(S)",
7643 	.action_msg	= "Disable sched_ext and revert all tasks to CFS",
7644 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
7645 };
7646 
7647 static void sysrq_handle_sched_ext_dump(u8 key)
7648 {
7649 	struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
7650 	struct scx_sched *sch;
7651 
7652 	list_for_each_entry_rcu(sch, &scx_sched_all, all)
7653 		scx_dump_state(sch, &ei, 0, false);
7654 }
7655 
7656 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
7657 	.handler	= sysrq_handle_sched_ext_dump,
7658 	.help_msg	= "dump-sched-ext(D)",
7659 	.action_msg	= "Trigger sched_ext debug dump",
7660 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
7661 };
7662 
7663 static bool can_skip_idle_kick(struct rq *rq)
7664 {
7665 	lockdep_assert_rq_held(rq);
7666 
7667 	/*
7668 	 * We can skip idle kicking if @rq is going to go through at least one
7669 	 * full SCX scheduling cycle before going idle. Just checking whether
7670 	 * curr is not idle is insufficient because we could be racing
7671 	 * balance_one() trying to pull the next task from a remote rq, which
7672 	 * may fail, and @rq may become idle afterwards.
7673 	 *
7674 	 * The race window is small and we don't and can't guarantee that @rq is
7675 	 * only kicked while idle anyway. Skip only when sure.
7676 	 */
7677 	return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
7678 }
7679 
7680 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs)
7681 {
7682 	struct rq *rq = cpu_rq(cpu);
7683 	struct scx_rq *this_scx = &this_rq->scx;
7684 	const struct sched_class *cur_class;
7685 	bool should_wait = false;
7686 	unsigned long flags;
7687 
7688 	raw_spin_rq_lock_irqsave(rq, flags);
7689 	cur_class = rq->curr->sched_class;
7690 
7691 	/*
7692 	 * During CPU hotplug, a CPU may depend on kicking itself to make
7693 	 * forward progress. Allow kicking self regardless of online state. If
7694 	 * @cpu is running a higher class task, we have no control over @cpu.
7695 	 * Skip kicking.
7696 	 */
7697 	if ((cpu_online(cpu) || cpu == cpu_of(this_rq)) &&
7698 	    !sched_class_above(cur_class, &ext_sched_class)) {
7699 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
7700 			if (cur_class == &ext_sched_class)
7701 				rq->curr->scx.slice = 0;
7702 			cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
7703 		}
7704 
7705 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
7706 			if (cur_class == &ext_sched_class) {
7707 				cpumask_set_cpu(cpu, this_scx->cpus_to_sync);
7708 				ksyncs[cpu] = rq->scx.kick_sync;
7709 				should_wait = true;
7710 			}
7711 			cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
7712 		}
7713 
7714 		resched_curr(rq);
7715 	} else {
7716 		cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
7717 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
7718 	}
7719 
7720 	raw_spin_rq_unlock_irqrestore(rq, flags);
7721 
7722 	return should_wait;
7723 }
7724 
7725 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
7726 {
7727 	struct rq *rq = cpu_rq(cpu);
7728 	unsigned long flags;
7729 
7730 	raw_spin_rq_lock_irqsave(rq, flags);
7731 
7732 	if (!can_skip_idle_kick(rq) &&
7733 	    (cpu_online(cpu) || cpu == cpu_of(this_rq)))
7734 		resched_curr(rq);
7735 
7736 	raw_spin_rq_unlock_irqrestore(rq, flags);
7737 }
7738 
7739 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
7740 {
7741 	struct rq *this_rq = this_rq();
7742 	struct scx_rq *this_scx = &this_rq->scx;
7743 	struct scx_kick_syncs __rcu *ksyncs_pcpu = __this_cpu_read(scx_kick_syncs);
7744 	bool should_wait = false;
7745 	unsigned long *ksyncs;
7746 	s32 cpu;
7747 
7748 	/* can race with free_kick_syncs() during scheduler disable */
7749 	if (unlikely(!ksyncs_pcpu))
7750 		return;
7751 
7752 	ksyncs = rcu_dereference_bh(ksyncs_pcpu)->syncs;
7753 
7754 	for_each_cpu(cpu, this_scx->cpus_to_kick) {
7755 		should_wait |= kick_one_cpu(cpu, this_rq, ksyncs);
7756 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
7757 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
7758 	}
7759 
7760 	for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
7761 		kick_one_cpu_if_idle(cpu, this_rq);
7762 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
7763 	}
7764 
7765 	/*
7766 	 * Can't wait in hardirq — kick_sync can't advance, deadlocking if
7767 	 * CPUs wait for each other. Defer to kick_sync_wait_bal_cb().
7768 	 */
7769 	if (should_wait) {
7770 		raw_spin_rq_lock(this_rq);
7771 		this_scx->kick_sync_pending = true;
7772 		resched_curr(this_rq);
7773 		raw_spin_rq_unlock(this_rq);
7774 	}
7775 }
7776 
7777 /**
7778  * print_scx_info - print out sched_ext scheduler state
7779  * @log_lvl: the log level to use when printing
7780  * @p: target task
7781  *
7782  * If a sched_ext scheduler is enabled, print the name and state of the
7783  * scheduler. If @p is on sched_ext, print further information about the task.
7784  *
7785  * This function can be safely called on any task as long as the task_struct
7786  * itself is accessible. While safe, this function isn't synchronized and may
7787  * print out mixups or garbages of limited length.
7788  */
7789 void print_scx_info(const char *log_lvl, struct task_struct *p)
7790 {
7791 	struct scx_sched *sch;
7792 	enum scx_enable_state state = scx_enable_state();
7793 	const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
7794 	char runnable_at_buf[22] = "?";
7795 	struct sched_class *class;
7796 	unsigned long runnable_at;
7797 
7798 	guard(rcu)();
7799 
7800 	sch = scx_task_sched_rcu(p);
7801 
7802 	if (!sch)
7803 		return;
7804 
7805 	/*
7806 	 * Carefully check if the task was running on sched_ext, and then
7807 	 * carefully copy the time it's been runnable, and its state.
7808 	 */
7809 	if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
7810 	    class != &ext_sched_class) {
7811 		printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name,
7812 		       scx_enable_state_str[state], all);
7813 		return;
7814 	}
7815 
7816 	if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
7817 				      sizeof(runnable_at)))
7818 		scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
7819 			  jiffies_delta_msecs(runnable_at, jiffies));
7820 
7821 	/* print everything onto one line to conserve console space */
7822 	printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
7823 	       log_lvl, sch->ops.name, scx_enable_state_str[state], all,
7824 	       runnable_at_buf);
7825 }
7826 
7827 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
7828 {
7829 	struct scx_sched *sch;
7830 
7831 	guard(rcu)();
7832 
7833 	sch = rcu_dereference(scx_root);
7834 	if (!sch)
7835 		return NOTIFY_OK;
7836 
7837 	/*
7838 	 * SCX schedulers often have userspace components which are sometimes
7839 	 * involved in critial scheduling paths. PM operations involve freezing
7840 	 * userspace which can lead to scheduling misbehaviors including stalls.
7841 	 * Let's bypass while PM operations are in progress.
7842 	 */
7843 	switch (event) {
7844 	case PM_HIBERNATION_PREPARE:
7845 	case PM_SUSPEND_PREPARE:
7846 	case PM_RESTORE_PREPARE:
7847 		scx_bypass(sch, true);
7848 		break;
7849 	case PM_POST_HIBERNATION:
7850 	case PM_POST_SUSPEND:
7851 	case PM_POST_RESTORE:
7852 		scx_bypass(sch, false);
7853 		break;
7854 	}
7855 
7856 	return NOTIFY_OK;
7857 }
7858 
7859 static struct notifier_block scx_pm_notifier = {
7860 	.notifier_call = scx_pm_handler,
7861 };
7862 
7863 void __init init_sched_ext_class(void)
7864 {
7865 	s32 cpu, v;
7866 
7867 	/*
7868 	 * The following is to prevent the compiler from optimizing out the enum
7869 	 * definitions so that BPF scheduler implementations can use them
7870 	 * through the generated vmlinux.h.
7871 	 */
7872 	WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
7873 		   SCX_TG_ONLINE);
7874 
7875 	scx_idle_init_masks();
7876 
7877 	for_each_possible_cpu(cpu) {
7878 		struct rq *rq = cpu_rq(cpu);
7879 		int  n = cpu_to_node(cpu);
7880 
7881 		/* local_dsq's sch will be set during scx_root_enable() */
7882 		BUG_ON(init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL, NULL));
7883 
7884 		INIT_LIST_HEAD(&rq->scx.runnable_list);
7885 		INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
7886 
7887 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n));
7888 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
7889 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
7890 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
7891 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_sync, GFP_KERNEL, n));
7892 		raw_spin_lock_init(&rq->scx.deferred_reenq_lock);
7893 		INIT_LIST_HEAD(&rq->scx.deferred_reenq_locals);
7894 		INIT_LIST_HEAD(&rq->scx.deferred_reenq_users);
7895 		rq->scx.deferred_irq_work = IRQ_WORK_INIT_HARD(deferred_irq_workfn);
7896 		rq->scx.kick_cpus_irq_work = IRQ_WORK_INIT_HARD(kick_cpus_irq_workfn);
7897 
7898 		if (cpu_online(cpu))
7899 			cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
7900 	}
7901 
7902 	register_sysrq_key('S', &sysrq_sched_ext_reset_op);
7903 	register_sysrq_key('D', &sysrq_sched_ext_dump_op);
7904 	INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
7905 
7906 #ifdef CONFIG_EXT_SUB_SCHED
7907 	BUG_ON(rhashtable_init(&scx_sched_hash, &scx_sched_hash_params));
7908 #endif	/* CONFIG_EXT_SUB_SCHED */
7909 }
7910 
7911 
7912 /********************************************************************************
7913  * Helpers that can be called from the BPF scheduler.
7914  */
7915 static bool scx_vet_enq_flags(struct scx_sched *sch, u64 dsq_id, u64 *enq_flags)
7916 {
7917 	bool is_local = dsq_id == SCX_DSQ_LOCAL ||
7918 		(dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON;
7919 
7920 	if (*enq_flags & SCX_ENQ_IMMED) {
7921 		if (unlikely(!is_local)) {
7922 			scx_error(sch, "SCX_ENQ_IMMED on a non-local DSQ 0x%llx", dsq_id);
7923 			return false;
7924 		}
7925 	} else if ((sch->ops.flags & SCX_OPS_ALWAYS_ENQ_IMMED) && is_local) {
7926 		*enq_flags |= SCX_ENQ_IMMED;
7927 	}
7928 
7929 	return true;
7930 }
7931 
7932 static bool scx_dsq_insert_preamble(struct scx_sched *sch, struct task_struct *p,
7933 				    u64 dsq_id, u64 *enq_flags)
7934 {
7935 	lockdep_assert_irqs_disabled();
7936 
7937 	if (unlikely(!p)) {
7938 		scx_error(sch, "called with NULL task");
7939 		return false;
7940 	}
7941 
7942 	if (unlikely(*enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
7943 		scx_error(sch, "invalid enq_flags 0x%llx", *enq_flags);
7944 		return false;
7945 	}
7946 
7947 	/* see SCX_EV_INSERT_NOT_OWNED definition */
7948 	if (unlikely(!scx_task_on_sched(sch, p))) {
7949 		__scx_add_event(sch, SCX_EV_INSERT_NOT_OWNED, 1);
7950 		return false;
7951 	}
7952 
7953 	if (!scx_vet_enq_flags(sch, dsq_id, enq_flags))
7954 		return false;
7955 
7956 	return true;
7957 }
7958 
7959 static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p,
7960 				  u64 dsq_id, u64 enq_flags)
7961 {
7962 	struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
7963 	struct task_struct *ddsp_task;
7964 
7965 	ddsp_task = __this_cpu_read(direct_dispatch_task);
7966 	if (ddsp_task) {
7967 		mark_direct_dispatch(sch, ddsp_task, p, dsq_id, enq_flags);
7968 		return;
7969 	}
7970 
7971 	if (unlikely(dspc->cursor >= sch->dsp_max_batch)) {
7972 		scx_error(sch, "dispatch buffer overflow");
7973 		return;
7974 	}
7975 
7976 	dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
7977 		.task = p,
7978 		.qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
7979 		.dsq_id = dsq_id,
7980 		.enq_flags = enq_flags,
7981 	};
7982 }
7983 
7984 __bpf_kfunc_start_defs();
7985 
7986 /**
7987  * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
7988  * @p: task_struct to insert
7989  * @dsq_id: DSQ to insert into
7990  * @slice: duration @p can run for in nsecs, 0 to keep the current value
7991  * @enq_flags: SCX_ENQ_*
7992  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
7993  *
7994  * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
7995  * call this function spuriously. Can be called from ops.enqueue(),
7996  * ops.select_cpu(), and ops.dispatch().
7997  *
7998  * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
7999  * and @p must match the task being enqueued.
8000  *
8001  * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
8002  * will be directly inserted into the corresponding dispatch queue after
8003  * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
8004  * inserted into the local DSQ of the CPU returned by ops.select_cpu().
8005  * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
8006  * task is inserted.
8007  *
8008  * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
8009  * and this function can be called upto ops.dispatch_max_batch times to insert
8010  * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
8011  * remaining slots. scx_bpf_dsq_move_to_local() flushes the batch and resets the
8012  * counter.
8013  *
8014  * This function doesn't have any locking restrictions and may be called under
8015  * BPF locks (in the future when BPF introduces more flexible locking).
8016  *
8017  * @p is allowed to run for @slice. The scheduling path is triggered on slice
8018  * exhaustion. If zero, the current residual slice is maintained. If
8019  * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
8020  * scx_bpf_kick_cpu() to trigger scheduling.
8021  *
8022  * Returns %true on successful insertion, %false on failure. On the root
8023  * scheduler, %false return triggers scheduler abort and the caller doesn't need
8024  * to check the return value.
8025  */
8026 __bpf_kfunc bool scx_bpf_dsq_insert___v2(struct task_struct *p, u64 dsq_id,
8027 					 u64 slice, u64 enq_flags,
8028 					 const struct bpf_prog_aux *aux)
8029 {
8030 	struct scx_sched *sch;
8031 
8032 	guard(rcu)();
8033 	sch = scx_prog_sched(aux);
8034 	if (unlikely(!sch))
8035 		return false;
8036 
8037 	if (!scx_dsq_insert_preamble(sch, p, dsq_id, &enq_flags))
8038 		return false;
8039 
8040 	if (slice)
8041 		p->scx.slice = slice;
8042 	else
8043 		p->scx.slice = p->scx.slice ?: 1;
8044 
8045 	scx_dsq_insert_commit(sch, p, dsq_id, enq_flags);
8046 
8047 	return true;
8048 }
8049 
8050 /*
8051  * COMPAT: Will be removed in v6.23 along with the ___v2 suffix.
8052  */
8053 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id,
8054 				    u64 slice, u64 enq_flags,
8055 				    const struct bpf_prog_aux *aux)
8056 {
8057 	scx_bpf_dsq_insert___v2(p, dsq_id, slice, enq_flags, aux);
8058 }
8059 
8060 static bool scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p,
8061 				 u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags)
8062 {
8063 	if (!scx_dsq_insert_preamble(sch, p, dsq_id, &enq_flags))
8064 		return false;
8065 
8066 	if (slice)
8067 		p->scx.slice = slice;
8068 	else
8069 		p->scx.slice = p->scx.slice ?: 1;
8070 
8071 	p->scx.dsq_vtime = vtime;
8072 
8073 	scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
8074 
8075 	return true;
8076 }
8077 
8078 struct scx_bpf_dsq_insert_vtime_args {
8079 	/* @p can't be packed together as KF_RCU is not transitive */
8080 	u64			dsq_id;
8081 	u64			slice;
8082 	u64			vtime;
8083 	u64			enq_flags;
8084 };
8085 
8086 /**
8087  * __scx_bpf_dsq_insert_vtime - Arg-wrapped vtime DSQ insertion
8088  * @p: task_struct to insert
8089  * @args: struct containing the rest of the arguments
8090  *       @args->dsq_id: DSQ to insert into
8091  *       @args->slice: duration @p can run for in nsecs, 0 to keep the current value
8092  *       @args->vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
8093  *       @args->enq_flags: SCX_ENQ_*
8094  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8095  *
8096  * Wrapper kfunc that takes arguments via struct to work around BPF's 5 argument
8097  * limit. BPF programs should use scx_bpf_dsq_insert_vtime() which is provided
8098  * as an inline wrapper in common.bpf.h.
8099  *
8100  * Insert @p into the vtime priority queue of the DSQ identified by
8101  * @args->dsq_id. Tasks queued into the priority queue are ordered by
8102  * @args->vtime. All other aspects are identical to scx_bpf_dsq_insert().
8103  *
8104  * @args->vtime ordering is according to time_before64() which considers
8105  * wrapping. A numerically larger vtime may indicate an earlier position in the
8106  * ordering and vice-versa.
8107  *
8108  * A DSQ can only be used as a FIFO or priority queue at any given time and this
8109  * function must not be called on a DSQ which already has one or more FIFO tasks
8110  * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
8111  * SCX_DSQ_GLOBAL) cannot be used as priority queues.
8112  *
8113  * Returns %true on successful insertion, %false on failure. On the root
8114  * scheduler, %false return triggers scheduler abort and the caller doesn't need
8115  * to check the return value.
8116  */
8117 __bpf_kfunc bool
8118 __scx_bpf_dsq_insert_vtime(struct task_struct *p,
8119 			   struct scx_bpf_dsq_insert_vtime_args *args,
8120 			   const struct bpf_prog_aux *aux)
8121 {
8122 	struct scx_sched *sch;
8123 
8124 	guard(rcu)();
8125 
8126 	sch = scx_prog_sched(aux);
8127 	if (unlikely(!sch))
8128 		return false;
8129 
8130 	return scx_dsq_insert_vtime(sch, p, args->dsq_id, args->slice,
8131 				    args->vtime, args->enq_flags);
8132 }
8133 
8134 /*
8135  * COMPAT: Will be removed in v6.23.
8136  */
8137 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
8138 					  u64 slice, u64 vtime, u64 enq_flags)
8139 {
8140 	struct scx_sched *sch;
8141 
8142 	guard(rcu)();
8143 
8144 	sch = rcu_dereference(scx_root);
8145 	if (unlikely(!sch))
8146 		return;
8147 
8148 #ifdef CONFIG_EXT_SUB_SCHED
8149 	/*
8150 	 * Disallow if any sub-scheds are attached. There is no way to tell
8151 	 * which scheduler called us, just error out @p's scheduler.
8152 	 */
8153 	if (unlikely(!list_empty(&sch->children))) {
8154 		scx_error(scx_task_sched(p), "__scx_bpf_dsq_insert_vtime() must be used");
8155 		return;
8156 	}
8157 #endif
8158 
8159 	scx_dsq_insert_vtime(sch, p, dsq_id, slice, vtime, enq_flags);
8160 }
8161 
8162 __bpf_kfunc_end_defs();
8163 
8164 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
8165 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_IMPLICIT_ARGS | KF_RCU)
8166 BTF_ID_FLAGS(func, scx_bpf_dsq_insert___v2, KF_IMPLICIT_ARGS | KF_RCU)
8167 BTF_ID_FLAGS(func, __scx_bpf_dsq_insert_vtime, KF_IMPLICIT_ARGS | KF_RCU)
8168 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
8169 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
8170 
8171 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
8172 	.owner			= THIS_MODULE,
8173 	.set			= &scx_kfunc_ids_enqueue_dispatch,
8174 	.filter			= scx_kfunc_context_filter,
8175 };
8176 
8177 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
8178 			 struct task_struct *p, u64 dsq_id, u64 enq_flags)
8179 {
8180 	struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
8181 	struct scx_sched *sch;
8182 	struct rq *this_rq, *src_rq, *locked_rq;
8183 	bool dispatched = false;
8184 	bool in_balance;
8185 	unsigned long flags;
8186 
8187 	/*
8188 	 * The verifier considers an iterator slot initialized on any
8189 	 * KF_ITER_NEW return, so a BPF program may legally reach here after
8190 	 * bpf_iter_scx_dsq_new() failed and left @kit->dsq NULL.
8191 	 */
8192 	if (unlikely(!src_dsq))
8193 		return false;
8194 
8195 	sch = src_dsq->sched;
8196 
8197 	if (!scx_vet_enq_flags(sch, dsq_id, &enq_flags))
8198 		return false;
8199 
8200 	/*
8201 	 * If the BPF scheduler keeps calling this function repeatedly, it can
8202 	 * cause similar live-lock conditions as consume_dispatch_q().
8203 	 */
8204 	if (unlikely(READ_ONCE(sch->aborting)))
8205 		return false;
8206 
8207 	if (unlikely(!scx_task_on_sched(sch, p))) {
8208 		scx_error(sch, "scx_bpf_dsq_move[_vtime]() on %s[%d] but the task belongs to a different scheduler",
8209 			  p->comm, p->pid);
8210 		return false;
8211 	}
8212 
8213 	/*
8214 	 * Can be called from either ops.dispatch() locking this_rq() or any
8215 	 * context where no rq lock is held. If latter, lock @p's task_rq which
8216 	 * we'll likely need anyway.
8217 	 */
8218 	src_rq = task_rq(p);
8219 
8220 	local_irq_save(flags);
8221 	this_rq = this_rq();
8222 	in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
8223 
8224 	if (in_balance) {
8225 		if (this_rq != src_rq) {
8226 			raw_spin_rq_unlock(this_rq);
8227 			raw_spin_rq_lock(src_rq);
8228 		}
8229 	} else {
8230 		raw_spin_rq_lock(src_rq);
8231 	}
8232 
8233 	locked_rq = src_rq;
8234 	raw_spin_lock(&src_dsq->lock);
8235 
8236 	/* did someone else get to it while we dropped the locks? */
8237 	if (nldsq_cursor_lost_task(&kit->cursor, src_rq, src_dsq, p)) {
8238 		raw_spin_unlock(&src_dsq->lock);
8239 		goto out;
8240 	}
8241 
8242 	/* @p is still on $src_dsq and stable, determine the destination */
8243 	dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, task_cpu(p));
8244 
8245 	/*
8246 	 * Apply vtime and slice updates before moving so that the new time is
8247 	 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
8248 	 * this is safe as we're locking it.
8249 	 */
8250 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
8251 		p->scx.dsq_vtime = kit->vtime;
8252 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
8253 		p->scx.slice = kit->slice;
8254 
8255 	/* execute move */
8256 	locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq);
8257 	dispatched = true;
8258 out:
8259 	if (in_balance) {
8260 		if (this_rq != locked_rq) {
8261 			raw_spin_rq_unlock(locked_rq);
8262 			raw_spin_rq_lock(this_rq);
8263 		}
8264 	} else {
8265 		raw_spin_rq_unlock_irqrestore(locked_rq, flags);
8266 	}
8267 
8268 	kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
8269 			       __SCX_DSQ_ITER_HAS_VTIME);
8270 	return dispatched;
8271 }
8272 
8273 __bpf_kfunc_start_defs();
8274 
8275 /**
8276  * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
8277  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8278  *
8279  * Can only be called from ops.dispatch().
8280  */
8281 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(const struct bpf_prog_aux *aux)
8282 {
8283 	struct scx_sched *sch;
8284 
8285 	guard(rcu)();
8286 
8287 	sch = scx_prog_sched(aux);
8288 	if (unlikely(!sch))
8289 		return 0;
8290 
8291 	return sch->dsp_max_batch - __this_cpu_read(sch->pcpu->dsp_ctx.cursor);
8292 }
8293 
8294 /**
8295  * scx_bpf_dispatch_cancel - Cancel the latest dispatch
8296  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8297  *
8298  * Cancel the latest dispatch. Can be called multiple times to cancel further
8299  * dispatches. Can only be called from ops.dispatch().
8300  */
8301 __bpf_kfunc void scx_bpf_dispatch_cancel(const struct bpf_prog_aux *aux)
8302 {
8303 	struct scx_sched *sch;
8304 	struct scx_dsp_ctx *dspc;
8305 
8306 	guard(rcu)();
8307 
8308 	sch = scx_prog_sched(aux);
8309 	if (unlikely(!sch))
8310 		return;
8311 
8312 	dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
8313 
8314 	if (dspc->cursor > 0)
8315 		dspc->cursor--;
8316 	else
8317 		scx_error(sch, "dispatch buffer underflow");
8318 }
8319 
8320 /**
8321  * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
8322  * @dsq_id: DSQ to move task from. Must be a user-created DSQ
8323  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8324  * @enq_flags: %SCX_ENQ_*
8325  *
8326  * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
8327  * local DSQ for execution with @enq_flags applied. Can only be called from
8328  * ops.dispatch().
8329  *
8330  * Built-in DSQs (%SCX_DSQ_GLOBAL and %SCX_DSQ_LOCAL*) are not supported as
8331  * sources. Local DSQs support reenqueueing (a task can be picked up for
8332  * execution, dequeued for property changes, or reenqueued), but the BPF
8333  * scheduler cannot directly iterate or move tasks from them. %SCX_DSQ_GLOBAL
8334  * is similar but also doesn't support reenqueueing, as it maps to multiple
8335  * per-node DSQs making the scope difficult to define; this may change in the
8336  * future.
8337  *
8338  * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
8339  * before trying to move from the specified DSQ. It may also grab rq locks and
8340  * thus can't be called under any BPF locks.
8341  *
8342  * Returns %true if a task has been moved, %false if there isn't any task to
8343  * move.
8344  */
8345 __bpf_kfunc bool scx_bpf_dsq_move_to_local___v2(u64 dsq_id, u64 enq_flags,
8346 						const struct bpf_prog_aux *aux)
8347 {
8348 	struct scx_dispatch_q *dsq;
8349 	struct scx_sched *sch;
8350 	struct scx_dsp_ctx *dspc;
8351 
8352 	guard(rcu)();
8353 
8354 	sch = scx_prog_sched(aux);
8355 	if (unlikely(!sch))
8356 		return false;
8357 
8358 	if (!scx_vet_enq_flags(sch, SCX_DSQ_LOCAL, &enq_flags))
8359 		return false;
8360 
8361 	dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
8362 
8363 	flush_dispatch_buf(sch, dspc->rq);
8364 
8365 	dsq = find_user_dsq(sch, dsq_id);
8366 	if (unlikely(!dsq)) {
8367 		scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id);
8368 		return false;
8369 	}
8370 
8371 	if (consume_dispatch_q(sch, dspc->rq, dsq, enq_flags)) {
8372 		/*
8373 		 * A successfully consumed task can be dequeued before it starts
8374 		 * running while the CPU is trying to migrate other dispatched
8375 		 * tasks. Bump nr_tasks to tell balance_one() to retry on empty
8376 		 * local DSQ.
8377 		 */
8378 		dspc->nr_tasks++;
8379 		return true;
8380 	} else {
8381 		return false;
8382 	}
8383 }
8384 
8385 /*
8386  * COMPAT: ___v2 was introduced in v7.1. Remove this and ___v2 tag in the future.
8387  */
8388 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id, const struct bpf_prog_aux *aux)
8389 {
8390 	return scx_bpf_dsq_move_to_local___v2(dsq_id, 0, aux);
8391 }
8392 
8393 /**
8394  * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
8395  * @it__iter: DSQ iterator in progress
8396  * @slice: duration the moved task can run for in nsecs
8397  *
8398  * Override the slice of the next task that will be moved from @it__iter using
8399  * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
8400  * slice duration is kept.
8401  */
8402 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
8403 					    u64 slice)
8404 {
8405 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
8406 
8407 	kit->slice = slice;
8408 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
8409 }
8410 
8411 /**
8412  * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
8413  * @it__iter: DSQ iterator in progress
8414  * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
8415  *
8416  * Override the vtime of the next task that will be moved from @it__iter using
8417  * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
8418  * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
8419  * override is ignored and cleared.
8420  */
8421 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
8422 					    u64 vtime)
8423 {
8424 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
8425 
8426 	kit->vtime = vtime;
8427 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
8428 }
8429 
8430 /**
8431  * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
8432  * @it__iter: DSQ iterator in progress
8433  * @p: task to transfer
8434  * @dsq_id: DSQ to move @p to
8435  * @enq_flags: SCX_ENQ_*
8436  *
8437  * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
8438  * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
8439  * be the destination.
8440  *
8441  * For the transfer to be successful, @p must still be on the DSQ and have been
8442  * queued before the DSQ iteration started. This function doesn't care whether
8443  * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
8444  * been queued before the iteration started.
8445  *
8446  * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
8447  *
8448  * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
8449  * lock (e.g. BPF timers or SYSCALL programs).
8450  *
8451  * Returns %true if @p has been consumed, %false if @p had already been
8452  * consumed, dequeued, or, for sub-scheds, @dsq_id points to a disallowed local
8453  * DSQ.
8454  */
8455 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
8456 				  struct task_struct *p, u64 dsq_id,
8457 				  u64 enq_flags)
8458 {
8459 	return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
8460 			    p, dsq_id, enq_flags);
8461 }
8462 
8463 /**
8464  * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
8465  * @it__iter: DSQ iterator in progress
8466  * @p: task to transfer
8467  * @dsq_id: DSQ to move @p to
8468  * @enq_flags: SCX_ENQ_*
8469  *
8470  * Transfer @p which is on the DSQ currently iterated by @it__iter to the
8471  * priority queue of the DSQ specified by @dsq_id. The destination must be a
8472  * user DSQ as only user DSQs support priority queue.
8473  *
8474  * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
8475  * and scx_bpf_dsq_move_set_vtime() to update.
8476  *
8477  * All other aspects are identical to scx_bpf_dsq_move(). See
8478  * scx_bpf_dsq_insert_vtime() for more information on @vtime.
8479  */
8480 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
8481 					struct task_struct *p, u64 dsq_id,
8482 					u64 enq_flags)
8483 {
8484 	return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
8485 			    p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
8486 }
8487 
8488 #ifdef CONFIG_EXT_SUB_SCHED
8489 /**
8490  * scx_bpf_sub_dispatch - Trigger dispatching on a child scheduler
8491  * @cgroup_id: cgroup ID of the child scheduler to dispatch
8492  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8493  *
8494  * Allows a parent scheduler to trigger dispatching on one of its direct
8495  * child schedulers. The child scheduler runs its dispatch operation to
8496  * move tasks from dispatch queues to the local runqueue.
8497  *
8498  * Returns: true on success, false if cgroup_id is invalid, not a direct
8499  * child, or caller lacks dispatch permission.
8500  */
8501 __bpf_kfunc bool scx_bpf_sub_dispatch(u64 cgroup_id, const struct bpf_prog_aux *aux)
8502 {
8503 	struct rq *this_rq = this_rq();
8504 	struct scx_sched *parent, *child;
8505 
8506 	guard(rcu)();
8507 	parent = scx_prog_sched(aux);
8508 	if (unlikely(!parent))
8509 		return false;
8510 
8511 	child = scx_find_sub_sched(cgroup_id);
8512 
8513 	if (unlikely(!child))
8514 		return false;
8515 
8516 	if (unlikely(scx_parent(child) != parent)) {
8517 		scx_error(parent, "trying to dispatch a distant sub-sched on cgroup %llu",
8518 			  cgroup_id);
8519 		return false;
8520 	}
8521 
8522 	return scx_dispatch_sched(child, this_rq, this_rq->scx.sub_dispatch_prev,
8523 				  true);
8524 }
8525 #endif	/* CONFIG_EXT_SUB_SCHED */
8526 
8527 __bpf_kfunc_end_defs();
8528 
8529 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
8530 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots, KF_IMPLICIT_ARGS)
8531 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel, KF_IMPLICIT_ARGS)
8532 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local, KF_IMPLICIT_ARGS)
8533 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local___v2, KF_IMPLICIT_ARGS)
8534 /* scx_bpf_dsq_move*() also in scx_kfunc_ids_unlocked: callable from unlocked contexts */
8535 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU)
8536 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)
8537 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
8538 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
8539 #ifdef CONFIG_EXT_SUB_SCHED
8540 BTF_ID_FLAGS(func, scx_bpf_sub_dispatch, KF_IMPLICIT_ARGS)
8541 #endif
8542 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
8543 
8544 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
8545 	.owner			= THIS_MODULE,
8546 	.set			= &scx_kfunc_ids_dispatch,
8547 	.filter			= scx_kfunc_context_filter,
8548 };
8549 
8550 __bpf_kfunc_start_defs();
8551 
8552 /**
8553  * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
8554  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8555  *
8556  * Iterate over all of the tasks currently enqueued on the local DSQ of the
8557  * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
8558  * processed tasks. Can only be called from ops.cpu_release().
8559  */
8560 __bpf_kfunc u32 scx_bpf_reenqueue_local(const struct bpf_prog_aux *aux)
8561 {
8562 	struct scx_sched *sch;
8563 	struct rq *rq;
8564 
8565 	guard(rcu)();
8566 	sch = scx_prog_sched(aux);
8567 	if (unlikely(!sch))
8568 		return 0;
8569 
8570 	rq = cpu_rq(smp_processor_id());
8571 	lockdep_assert_rq_held(rq);
8572 
8573 	return reenq_local(sch, rq, SCX_REENQ_ANY);
8574 }
8575 
8576 __bpf_kfunc_end_defs();
8577 
8578 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
8579 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local, KF_IMPLICIT_ARGS)
8580 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
8581 
8582 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
8583 	.owner			= THIS_MODULE,
8584 	.set			= &scx_kfunc_ids_cpu_release,
8585 	.filter			= scx_kfunc_context_filter,
8586 };
8587 
8588 __bpf_kfunc_start_defs();
8589 
8590 /**
8591  * scx_bpf_create_dsq - Create a custom DSQ
8592  * @dsq_id: DSQ to create
8593  * @node: NUMA node to allocate from
8594  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8595  *
8596  * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
8597  * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
8598  */
8599 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node, const struct bpf_prog_aux *aux)
8600 {
8601 	struct scx_dispatch_q *dsq;
8602 	struct scx_sched *sch;
8603 	s32 ret;
8604 
8605 	if (unlikely(node >= (int)nr_node_ids ||
8606 		     (node < 0 && node != NUMA_NO_NODE)))
8607 		return -EINVAL;
8608 
8609 	if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN))
8610 		return -EINVAL;
8611 
8612 	dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
8613 	if (!dsq)
8614 		return -ENOMEM;
8615 
8616 	/*
8617 	 * init_dsq() must be called in GFP_KERNEL context. Init it with NULL
8618 	 * @sch and update afterwards.
8619 	 */
8620 	ret = init_dsq(dsq, dsq_id, NULL);
8621 	if (ret) {
8622 		kfree(dsq);
8623 		return ret;
8624 	}
8625 
8626 	rcu_read_lock();
8627 
8628 	sch = scx_prog_sched(aux);
8629 	if (sch) {
8630 		dsq->sched = sch;
8631 		ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node,
8632 						    dsq_hash_params);
8633 	} else {
8634 		ret = -ENODEV;
8635 	}
8636 
8637 	rcu_read_unlock();
8638 	if (ret) {
8639 		exit_dsq(dsq);
8640 		kfree(dsq);
8641 	}
8642 	return ret;
8643 }
8644 
8645 __bpf_kfunc_end_defs();
8646 
8647 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
8648 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_IMPLICIT_ARGS | KF_SLEEPABLE)
8649 /* also in scx_kfunc_ids_dispatch: also callable from ops.dispatch() */
8650 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU)
8651 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)
8652 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
8653 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
8654 /* also in scx_kfunc_ids_select_cpu: also callable from ops.select_cpu()/ops.enqueue() */
8655 BTF_ID_FLAGS(func, __scx_bpf_select_cpu_and, KF_IMPLICIT_ARGS | KF_RCU)
8656 BTF_ID_FLAGS(func, scx_bpf_select_cpu_and, KF_RCU)
8657 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_IMPLICIT_ARGS | KF_RCU)
8658 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
8659 
8660 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
8661 	.owner			= THIS_MODULE,
8662 	.set			= &scx_kfunc_ids_unlocked,
8663 	.filter			= scx_kfunc_context_filter,
8664 };
8665 
8666 __bpf_kfunc_start_defs();
8667 
8668 /**
8669  * scx_bpf_task_set_slice - Set task's time slice
8670  * @p: task of interest
8671  * @slice: time slice to set in nsecs
8672  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8673  *
8674  * Set @p's time slice to @slice. Returns %true on success, %false if the
8675  * calling scheduler doesn't have authority over @p.
8676  */
8677 __bpf_kfunc bool scx_bpf_task_set_slice(struct task_struct *p, u64 slice,
8678 					const struct bpf_prog_aux *aux)
8679 {
8680 	struct scx_sched *sch;
8681 
8682 	guard(rcu)();
8683 	sch = scx_prog_sched(aux);
8684 	if (unlikely(!sch || !scx_task_on_sched(sch, p)))
8685 		return false;
8686 
8687 	p->scx.slice = slice;
8688 	return true;
8689 }
8690 
8691 /**
8692  * scx_bpf_task_set_dsq_vtime - Set task's virtual time for DSQ ordering
8693  * @p: task of interest
8694  * @vtime: virtual time to set
8695  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8696  *
8697  * Set @p's virtual time to @vtime. Returns %true on success, %false if the
8698  * calling scheduler doesn't have authority over @p.
8699  */
8700 __bpf_kfunc bool scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime,
8701 					    const struct bpf_prog_aux *aux)
8702 {
8703 	struct scx_sched *sch;
8704 
8705 	guard(rcu)();
8706 	sch = scx_prog_sched(aux);
8707 	if (unlikely(!sch || !scx_task_on_sched(sch, p)))
8708 		return false;
8709 
8710 	p->scx.dsq_vtime = vtime;
8711 	return true;
8712 }
8713 
8714 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags)
8715 {
8716 	struct rq *this_rq;
8717 	unsigned long irq_flags;
8718 
8719 	if (!ops_cpu_valid(sch, cpu, NULL))
8720 		return;
8721 
8722 	local_irq_save(irq_flags);
8723 
8724 	this_rq = this_rq();
8725 
8726 	/*
8727 	 * While bypassing for PM ops, IRQ handling may not be online which can
8728 	 * lead to irq_work_queue() malfunction such as infinite busy wait for
8729 	 * IRQ status update. Suppress kicking.
8730 	 */
8731 	if (scx_bypassing(sch, cpu_of(this_rq)))
8732 		goto out;
8733 
8734 	/*
8735 	 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
8736 	 * rq locks. We can probably be smarter and avoid bouncing if called
8737 	 * from ops which don't hold a rq lock.
8738 	 */
8739 	if (flags & SCX_KICK_IDLE) {
8740 		struct rq *target_rq = cpu_rq(cpu);
8741 
8742 		if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
8743 			scx_error(sch, "PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
8744 
8745 		if (raw_spin_rq_trylock(target_rq)) {
8746 			if (can_skip_idle_kick(target_rq)) {
8747 				raw_spin_rq_unlock(target_rq);
8748 				goto out;
8749 			}
8750 			raw_spin_rq_unlock(target_rq);
8751 		}
8752 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
8753 	} else {
8754 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
8755 
8756 		if (flags & SCX_KICK_PREEMPT)
8757 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
8758 		if (flags & SCX_KICK_WAIT)
8759 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
8760 	}
8761 
8762 	irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
8763 out:
8764 	local_irq_restore(irq_flags);
8765 }
8766 
8767 /**
8768  * scx_bpf_kick_cpu - Trigger reschedule on a CPU
8769  * @cpu: cpu to kick
8770  * @flags: %SCX_KICK_* flags
8771  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8772  *
8773  * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
8774  * trigger rescheduling on a busy CPU. This can be called from any online
8775  * scx_ops operation and the actual kicking is performed asynchronously through
8776  * an irq work.
8777  */
8778 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags, const struct bpf_prog_aux *aux)
8779 {
8780 	struct scx_sched *sch;
8781 
8782 	guard(rcu)();
8783 	sch = scx_prog_sched(aux);
8784 	if (likely(sch))
8785 		scx_kick_cpu(sch, cpu, flags);
8786 }
8787 
8788 /**
8789  * scx_bpf_dsq_nr_queued - Return the number of queued tasks
8790  * @dsq_id: id of the DSQ
8791  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8792  *
8793  * Return the number of tasks in the DSQ matching @dsq_id. If not found,
8794  * -%ENOENT is returned.
8795  */
8796 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id, const struct bpf_prog_aux *aux)
8797 {
8798 	struct scx_sched *sch;
8799 	struct scx_dispatch_q *dsq;
8800 	s32 ret;
8801 
8802 	preempt_disable();
8803 
8804 	sch = scx_prog_sched(aux);
8805 	if (unlikely(!sch)) {
8806 		ret = -ENODEV;
8807 		goto out;
8808 	}
8809 
8810 	if (dsq_id == SCX_DSQ_LOCAL) {
8811 		ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
8812 		goto out;
8813 	} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
8814 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
8815 
8816 		if (ops_cpu_valid(sch, cpu, NULL)) {
8817 			ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
8818 			goto out;
8819 		}
8820 	} else {
8821 		dsq = find_user_dsq(sch, dsq_id);
8822 		if (dsq) {
8823 			ret = READ_ONCE(dsq->nr);
8824 			goto out;
8825 		}
8826 	}
8827 	ret = -ENOENT;
8828 out:
8829 	preempt_enable();
8830 	return ret;
8831 }
8832 
8833 /**
8834  * scx_bpf_destroy_dsq - Destroy a custom DSQ
8835  * @dsq_id: DSQ to destroy
8836  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8837  *
8838  * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
8839  * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
8840  * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
8841  * which doesn't exist. Can be called from any online scx_ops operations.
8842  */
8843 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id, const struct bpf_prog_aux *aux)
8844 {
8845 	struct scx_sched *sch;
8846 
8847 	guard(rcu)();
8848 	sch = scx_prog_sched(aux);
8849 	if (sch)
8850 		destroy_dsq(sch, dsq_id);
8851 }
8852 
8853 /**
8854  * bpf_iter_scx_dsq_new - Create a DSQ iterator
8855  * @it: iterator to initialize
8856  * @dsq_id: DSQ to iterate
8857  * @flags: %SCX_DSQ_ITER_*
8858  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8859  *
8860  * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
8861  * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
8862  * tasks which are already queued when this function is invoked.
8863  */
8864 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
8865 				     u64 flags, const struct bpf_prog_aux *aux)
8866 {
8867 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
8868 	struct scx_sched *sch;
8869 
8870 	BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
8871 		     sizeof(struct bpf_iter_scx_dsq));
8872 	BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
8873 		     __alignof__(struct bpf_iter_scx_dsq));
8874 	BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
8875 		     ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
8876 
8877 	/*
8878 	 * next() and destroy() will be called regardless of the return value.
8879 	 * Always clear $kit->dsq.
8880 	 */
8881 	kit->dsq = NULL;
8882 
8883 	sch = scx_prog_sched(aux);
8884 	if (unlikely(!sch))
8885 		return -ENODEV;
8886 
8887 	if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
8888 		return -EINVAL;
8889 
8890 	kit->dsq = find_user_dsq(sch, dsq_id);
8891 	if (!kit->dsq)
8892 		return -ENOENT;
8893 
8894 	kit->cursor = INIT_DSQ_LIST_CURSOR(kit->cursor, kit->dsq, flags);
8895 
8896 	return 0;
8897 }
8898 
8899 /**
8900  * bpf_iter_scx_dsq_next - Progress a DSQ iterator
8901  * @it: iterator to progress
8902  *
8903  * Return the next task. See bpf_iter_scx_dsq_new().
8904  */
8905 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
8906 {
8907 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
8908 
8909 	if (!kit->dsq)
8910 		return NULL;
8911 
8912 	guard(raw_spinlock_irqsave)(&kit->dsq->lock);
8913 
8914 	return nldsq_cursor_next_task(&kit->cursor, kit->dsq);
8915 }
8916 
8917 /**
8918  * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
8919  * @it: iterator to destroy
8920  *
8921  * Undo scx_iter_scx_dsq_new().
8922  */
8923 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
8924 {
8925 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
8926 
8927 	if (!kit->dsq)
8928 		return;
8929 
8930 	if (!list_empty(&kit->cursor.node)) {
8931 		unsigned long flags;
8932 
8933 		raw_spin_lock_irqsave(&kit->dsq->lock, flags);
8934 		list_del_init(&kit->cursor.node);
8935 		raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
8936 	}
8937 	kit->dsq = NULL;
8938 }
8939 
8940 /**
8941  * scx_bpf_dsq_peek - Lockless peek at the first element.
8942  * @dsq_id: DSQ to examine.
8943  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8944  *
8945  * Read the first element in the DSQ. This is semantically equivalent to using
8946  * the DSQ iterator, but is lockfree. Of course, like any lockless operation,
8947  * this provides only a point-in-time snapshot, and the contents may change
8948  * by the time any subsequent locking operation reads the queue.
8949  *
8950  * Returns the pointer, or NULL indicates an empty queue OR internal error.
8951  */
8952 __bpf_kfunc struct task_struct *scx_bpf_dsq_peek(u64 dsq_id,
8953 						 const struct bpf_prog_aux *aux)
8954 {
8955 	struct scx_sched *sch;
8956 	struct scx_dispatch_q *dsq;
8957 
8958 	sch = scx_prog_sched(aux);
8959 	if (unlikely(!sch))
8960 		return NULL;
8961 
8962 	if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN)) {
8963 		scx_error(sch, "peek disallowed on builtin DSQ 0x%llx", dsq_id);
8964 		return NULL;
8965 	}
8966 
8967 	dsq = find_user_dsq(sch, dsq_id);
8968 	if (unlikely(!dsq)) {
8969 		scx_error(sch, "peek on non-existent DSQ 0x%llx", dsq_id);
8970 		return NULL;
8971 	}
8972 
8973 	return rcu_dereference(dsq->first_task);
8974 }
8975 
8976 /**
8977  * scx_bpf_dsq_reenq - Re-enqueue tasks on a DSQ
8978  * @dsq_id: DSQ to re-enqueue
8979  * @reenq_flags: %SCX_RENQ_*
8980  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8981  *
8982  * Iterate over all of the tasks currently enqueued on the DSQ identified by
8983  * @dsq_id, and re-enqueue them in the BPF scheduler. The following DSQs are
8984  * supported:
8985  *
8986  * - Local DSQs (%SCX_DSQ_LOCAL or %SCX_DSQ_LOCAL_ON | $cpu)
8987  * - User DSQs
8988  *
8989  * Re-enqueues are performed asynchronously. Can be called from anywhere.
8990  */
8991 __bpf_kfunc void scx_bpf_dsq_reenq(u64 dsq_id, u64 reenq_flags,
8992 				   const struct bpf_prog_aux *aux)
8993 {
8994 	struct scx_sched *sch;
8995 	struct scx_dispatch_q *dsq;
8996 
8997 	guard(preempt)();
8998 
8999 	sch = scx_prog_sched(aux);
9000 	if (unlikely(!sch))
9001 		return;
9002 
9003 	if (unlikely(reenq_flags & ~__SCX_REENQ_USER_MASK)) {
9004 		scx_error(sch, "invalid SCX_REENQ flags 0x%llx", reenq_flags);
9005 		return;
9006 	}
9007 
9008 	/* not specifying any filter bits is the same as %SCX_REENQ_ANY */
9009 	if (!(reenq_flags & __SCX_REENQ_FILTER_MASK))
9010 		reenq_flags |= SCX_REENQ_ANY;
9011 
9012 	dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, smp_processor_id());
9013 	schedule_dsq_reenq(sch, dsq, reenq_flags, scx_locked_rq());
9014 }
9015 
9016 /**
9017  * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
9018  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9019  *
9020  * Iterate over all of the tasks currently enqueued on the local DSQ of the
9021  * caller's CPU, and re-enqueue them in the BPF scheduler. Can be called from
9022  * anywhere.
9023  *
9024  * This is now a special case of scx_bpf_dsq_reenq() and may be removed in the
9025  * future.
9026  */
9027 __bpf_kfunc void scx_bpf_reenqueue_local___v2(const struct bpf_prog_aux *aux)
9028 {
9029 	scx_bpf_dsq_reenq(SCX_DSQ_LOCAL, 0, aux);
9030 }
9031 
9032 __bpf_kfunc_end_defs();
9033 
9034 static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf,
9035 			 size_t line_size, char *fmt, unsigned long long *data,
9036 			 u32 data__sz)
9037 {
9038 	struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
9039 	s32 ret;
9040 
9041 	if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
9042 	    (data__sz && !data)) {
9043 		scx_error(sch, "invalid data=%p and data__sz=%u", (void *)data, data__sz);
9044 		return -EINVAL;
9045 	}
9046 
9047 	ret = copy_from_kernel_nofault(data_buf, data, data__sz);
9048 	if (ret < 0) {
9049 		scx_error(sch, "failed to read data fields (%d)", ret);
9050 		return ret;
9051 	}
9052 
9053 	ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
9054 				  &bprintf_data);
9055 	if (ret < 0) {
9056 		scx_error(sch, "format preparation failed (%d)", ret);
9057 		return ret;
9058 	}
9059 
9060 	ret = bstr_printf(line_buf, line_size, fmt,
9061 			  bprintf_data.bin_args);
9062 	bpf_bprintf_cleanup(&bprintf_data);
9063 	if (ret < 0) {
9064 		scx_error(sch, "(\"%s\", %p, %u) failed to format", fmt, data, data__sz);
9065 		return ret;
9066 	}
9067 
9068 	return ret;
9069 }
9070 
9071 static s32 bstr_format(struct scx_sched *sch, struct scx_bstr_buf *buf,
9072 		       char *fmt, unsigned long long *data, u32 data__sz)
9073 {
9074 	return __bstr_format(sch, buf->data, buf->line, sizeof(buf->line),
9075 			     fmt, data, data__sz);
9076 }
9077 
9078 __bpf_kfunc_start_defs();
9079 
9080 /**
9081  * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
9082  * @exit_code: Exit value to pass to user space via struct scx_exit_info.
9083  * @fmt: error message format string
9084  * @data: format string parameters packaged using ___bpf_fill() macro
9085  * @data__sz: @data len, must end in '__sz' for the verifier
9086  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9087  *
9088  * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
9089  * disabling.
9090  */
9091 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
9092 				   unsigned long long *data, u32 data__sz,
9093 				   const struct bpf_prog_aux *aux)
9094 {
9095 	struct scx_sched *sch;
9096 	unsigned long flags;
9097 
9098 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
9099 	sch = scx_prog_sched(aux);
9100 	if (likely(sch) &&
9101 	    bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
9102 		scx_exit(sch, SCX_EXIT_UNREG_BPF, exit_code, "%s", scx_exit_bstr_buf.line);
9103 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
9104 }
9105 
9106 /**
9107  * scx_bpf_error_bstr - Indicate fatal error
9108  * @fmt: error message format string
9109  * @data: format string parameters packaged using ___bpf_fill() macro
9110  * @data__sz: @data len, must end in '__sz' for the verifier
9111  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9112  *
9113  * Indicate that the BPF scheduler encountered a fatal error and initiate ops
9114  * disabling.
9115  */
9116 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
9117 				    u32 data__sz, const struct bpf_prog_aux *aux)
9118 {
9119 	struct scx_sched *sch;
9120 	unsigned long flags;
9121 
9122 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
9123 	sch = scx_prog_sched(aux);
9124 	if (likely(sch) &&
9125 	    bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
9126 		scx_exit(sch, SCX_EXIT_ERROR_BPF, 0, "%s", scx_exit_bstr_buf.line);
9127 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
9128 }
9129 
9130 /**
9131  * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler
9132  * @fmt: format string
9133  * @data: format string parameters packaged using ___bpf_fill() macro
9134  * @data__sz: @data len, must end in '__sz' for the verifier
9135  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9136  *
9137  * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
9138  * dump_task() to generate extra debug dump specific to the BPF scheduler.
9139  *
9140  * The extra dump may be multiple lines. A single line may be split over
9141  * multiple calls. The last line is automatically terminated.
9142  */
9143 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
9144 				   u32 data__sz, const struct bpf_prog_aux *aux)
9145 {
9146 	struct scx_sched *sch;
9147 	struct scx_dump_data *dd = &scx_dump_data;
9148 	struct scx_bstr_buf *buf = &dd->buf;
9149 	s32 ret;
9150 
9151 	guard(rcu)();
9152 
9153 	sch = scx_prog_sched(aux);
9154 	if (unlikely(!sch))
9155 		return;
9156 
9157 	if (raw_smp_processor_id() != dd->cpu) {
9158 		scx_error(sch, "scx_bpf_dump() must only be called from ops.dump() and friends");
9159 		return;
9160 	}
9161 
9162 	/* append the formatted string to the line buf */
9163 	ret = __bstr_format(sch, buf->data, buf->line + dd->cursor,
9164 			    sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
9165 	if (ret < 0) {
9166 		dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
9167 			  dd->prefix, fmt, data, data__sz, ret);
9168 		return;
9169 	}
9170 
9171 	dd->cursor += ret;
9172 	dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
9173 
9174 	if (!dd->cursor)
9175 		return;
9176 
9177 	/*
9178 	 * If the line buf overflowed or ends in a newline, flush it into the
9179 	 * dump. This is to allow the caller to generate a single line over
9180 	 * multiple calls. As ops_dump_flush() can also handle multiple lines in
9181 	 * the line buf, the only case which can lead to an unexpected
9182 	 * truncation is when the caller keeps generating newlines in the middle
9183 	 * instead of the end consecutively. Don't do that.
9184 	 */
9185 	if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
9186 		ops_dump_flush();
9187 }
9188 
9189 /**
9190  * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
9191  * @cpu: CPU of interest
9192  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9193  *
9194  * Return the maximum relative capacity of @cpu in relation to the most
9195  * performant CPU in the system. The return value is in the range [1,
9196  * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
9197  */
9198 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu, const struct bpf_prog_aux *aux)
9199 {
9200 	struct scx_sched *sch;
9201 
9202 	guard(rcu)();
9203 
9204 	sch = scx_prog_sched(aux);
9205 	if (likely(sch) && ops_cpu_valid(sch, cpu, NULL))
9206 		return arch_scale_cpu_capacity(cpu);
9207 	else
9208 		return SCX_CPUPERF_ONE;
9209 }
9210 
9211 /**
9212  * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
9213  * @cpu: CPU of interest
9214  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9215  *
9216  * Return the current relative performance of @cpu in relation to its maximum.
9217  * The return value is in the range [1, %SCX_CPUPERF_ONE].
9218  *
9219  * The current performance level of a CPU in relation to the maximum performance
9220  * available in the system can be calculated as follows:
9221  *
9222  *   scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
9223  *
9224  * The result is in the range [1, %SCX_CPUPERF_ONE].
9225  */
9226 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu, const struct bpf_prog_aux *aux)
9227 {
9228 	struct scx_sched *sch;
9229 
9230 	guard(rcu)();
9231 
9232 	sch = scx_prog_sched(aux);
9233 	if (likely(sch) && ops_cpu_valid(sch, cpu, NULL))
9234 		return arch_scale_freq_capacity(cpu);
9235 	else
9236 		return SCX_CPUPERF_ONE;
9237 }
9238 
9239 /**
9240  * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
9241  * @cpu: CPU of interest
9242  * @perf: target performance level [0, %SCX_CPUPERF_ONE]
9243  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9244  *
9245  * Set the target performance level of @cpu to @perf. @perf is in linear
9246  * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
9247  * schedutil cpufreq governor chooses the target frequency.
9248  *
9249  * The actual performance level chosen, CPU grouping, and the overhead and
9250  * latency of the operations are dependent on the hardware and cpufreq driver in
9251  * use. Consult hardware and cpufreq documentation for more information. The
9252  * current performance level can be monitored using scx_bpf_cpuperf_cur().
9253  */
9254 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf, const struct bpf_prog_aux *aux)
9255 {
9256 	struct scx_sched *sch;
9257 
9258 	guard(rcu)();
9259 
9260 	sch = scx_prog_sched(aux);
9261 	if (unlikely(!sch))
9262 		return;
9263 
9264 	if (unlikely(perf > SCX_CPUPERF_ONE)) {
9265 		scx_error(sch, "Invalid cpuperf target %u for CPU %d", perf, cpu);
9266 		return;
9267 	}
9268 
9269 	if (ops_cpu_valid(sch, cpu, NULL)) {
9270 		struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq();
9271 		struct rq_flags rf;
9272 
9273 		/*
9274 		 * When called with an rq lock held, restrict the operation
9275 		 * to the corresponding CPU to prevent ABBA deadlocks.
9276 		 */
9277 		if (locked_rq && rq != locked_rq) {
9278 			scx_error(sch, "Invalid target CPU %d", cpu);
9279 			return;
9280 		}
9281 
9282 		/*
9283 		 * If no rq lock is held, allow to operate on any CPU by
9284 		 * acquiring the corresponding rq lock.
9285 		 */
9286 		if (!locked_rq) {
9287 			rq_lock_irqsave(rq, &rf);
9288 			update_rq_clock(rq);
9289 		}
9290 
9291 		rq->scx.cpuperf_target = perf;
9292 		cpufreq_update_util(rq, 0);
9293 
9294 		if (!locked_rq)
9295 			rq_unlock_irqrestore(rq, &rf);
9296 	}
9297 }
9298 
9299 /**
9300  * scx_bpf_nr_node_ids - Return the number of possible node IDs
9301  *
9302  * All valid node IDs in the system are smaller than the returned value.
9303  */
9304 __bpf_kfunc u32 scx_bpf_nr_node_ids(void)
9305 {
9306 	return nr_node_ids;
9307 }
9308 
9309 /**
9310  * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
9311  *
9312  * All valid CPU IDs in the system are smaller than the returned value.
9313  */
9314 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
9315 {
9316 	return nr_cpu_ids;
9317 }
9318 
9319 /**
9320  * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
9321  */
9322 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
9323 {
9324 	return cpu_possible_mask;
9325 }
9326 
9327 /**
9328  * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
9329  */
9330 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
9331 {
9332 	return cpu_online_mask;
9333 }
9334 
9335 /**
9336  * scx_bpf_put_cpumask - Release a possible/online cpumask
9337  * @cpumask: cpumask to release
9338  */
9339 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
9340 {
9341 	/*
9342 	 * Empty function body because we aren't actually acquiring or releasing
9343 	 * a reference to a global cpumask, which is read-only in the caller and
9344 	 * is never released. The acquire / release semantics here are just used
9345 	 * to make the cpumask is a trusted pointer in the caller.
9346 	 */
9347 }
9348 
9349 /**
9350  * scx_bpf_task_running - Is task currently running?
9351  * @p: task of interest
9352  */
9353 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
9354 {
9355 	return task_rq(p)->curr == p;
9356 }
9357 
9358 /**
9359  * scx_bpf_task_cpu - CPU a task is currently associated with
9360  * @p: task of interest
9361  */
9362 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
9363 {
9364 	return task_cpu(p);
9365 }
9366 
9367 /**
9368  * scx_bpf_cpu_rq - Fetch the rq of a CPU
9369  * @cpu: CPU of the rq
9370  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9371  */
9372 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu, const struct bpf_prog_aux *aux)
9373 {
9374 	struct scx_sched *sch;
9375 
9376 	guard(rcu)();
9377 
9378 	sch = scx_prog_sched(aux);
9379 	if (unlikely(!sch))
9380 		return NULL;
9381 
9382 	if (!ops_cpu_valid(sch, cpu, NULL))
9383 		return NULL;
9384 
9385 	if (!sch->warned_deprecated_rq) {
9386 		printk_deferred(KERN_WARNING "sched_ext: %s() is deprecated; "
9387 				"use scx_bpf_locked_rq() when holding rq lock "
9388 				"or scx_bpf_cpu_curr() to read remote curr safely.\n", __func__);
9389 		sch->warned_deprecated_rq = true;
9390 	}
9391 
9392 	return cpu_rq(cpu);
9393 }
9394 
9395 /**
9396  * scx_bpf_locked_rq - Return the rq currently locked by SCX
9397  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9398  *
9399  * Returns the rq if a rq lock is currently held by SCX.
9400  * Otherwise emits an error and returns NULL.
9401  */
9402 __bpf_kfunc struct rq *scx_bpf_locked_rq(const struct bpf_prog_aux *aux)
9403 {
9404 	struct scx_sched *sch;
9405 	struct rq *rq;
9406 
9407 	guard(preempt)();
9408 
9409 	sch = scx_prog_sched(aux);
9410 	if (unlikely(!sch))
9411 		return NULL;
9412 
9413 	rq = scx_locked_rq();
9414 	if (!rq) {
9415 		scx_error(sch, "accessing rq without holding rq lock");
9416 		return NULL;
9417 	}
9418 
9419 	return rq;
9420 }
9421 
9422 /**
9423  * scx_bpf_cpu_curr - Return remote CPU's curr task
9424  * @cpu: CPU of interest
9425  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9426  *
9427  * Callers must hold RCU read lock (KF_RCU).
9428  */
9429 __bpf_kfunc struct task_struct *scx_bpf_cpu_curr(s32 cpu, const struct bpf_prog_aux *aux)
9430 {
9431 	struct scx_sched *sch;
9432 
9433 	guard(rcu)();
9434 
9435 	sch = scx_prog_sched(aux);
9436 	if (unlikely(!sch))
9437 		return NULL;
9438 
9439 	if (!ops_cpu_valid(sch, cpu, NULL))
9440 		return NULL;
9441 
9442 	return rcu_dereference(cpu_rq(cpu)->curr);
9443 }
9444 
9445 /**
9446  * scx_bpf_now - Returns a high-performance monotonically non-decreasing
9447  * clock for the current CPU. The clock returned is in nanoseconds.
9448  *
9449  * It provides the following properties:
9450  *
9451  * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
9452  *  to account for execution time and track tasks' runtime properties.
9453  *  Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
9454  *  eventually reads a hardware timestamp counter -- is neither performant nor
9455  *  scalable. scx_bpf_now() aims to provide a high-performance clock by
9456  *  using the rq clock in the scheduler core whenever possible.
9457  *
9458  * 2) High enough resolution for the BPF scheduler use cases: In most BPF
9459  *  scheduler use cases, the required clock resolution is lower than the most
9460  *  accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
9461  *  uses the rq clock in the scheduler core whenever it is valid. It considers
9462  *  that the rq clock is valid from the time the rq clock is updated
9463  *  (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
9464  *
9465  * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
9466  *  guarantees the clock never goes backward when comparing them in the same
9467  *  CPU. On the other hand, when comparing clocks in different CPUs, there
9468  *  is no such guarantee -- the clock can go backward. It provides a
9469  *  monotonically *non-decreasing* clock so that it would provide the same
9470  *  clock values in two different scx_bpf_now() calls in the same CPU
9471  *  during the same period of when the rq clock is valid.
9472  */
9473 __bpf_kfunc u64 scx_bpf_now(void)
9474 {
9475 	struct rq *rq;
9476 	u64 clock;
9477 
9478 	preempt_disable();
9479 
9480 	rq = this_rq();
9481 	if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
9482 		/*
9483 		 * If the rq clock is valid, use the cached rq clock.
9484 		 *
9485 		 * Note that scx_bpf_now() is re-entrant between a process
9486 		 * context and an interrupt context (e.g., timer interrupt).
9487 		 * However, we don't need to consider the race between them
9488 		 * because such race is not observable from a caller.
9489 		 */
9490 		clock = READ_ONCE(rq->scx.clock);
9491 	} else {
9492 		/*
9493 		 * Otherwise, return a fresh rq clock.
9494 		 *
9495 		 * The rq clock is updated outside of the rq lock.
9496 		 * In this case, keep the updated rq clock invalid so the next
9497 		 * kfunc call outside the rq lock gets a fresh rq clock.
9498 		 */
9499 		clock = sched_clock_cpu(cpu_of(rq));
9500 	}
9501 
9502 	preempt_enable();
9503 
9504 	return clock;
9505 }
9506 
9507 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events)
9508 {
9509 	struct scx_event_stats *e_cpu;
9510 	int cpu;
9511 
9512 	/* Aggregate per-CPU event counters into @events. */
9513 	memset(events, 0, sizeof(*events));
9514 	for_each_possible_cpu(cpu) {
9515 		e_cpu = &per_cpu_ptr(sch->pcpu, cpu)->event_stats;
9516 		scx_agg_event(events, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
9517 		scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
9518 		scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
9519 		scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_EXITING);
9520 		scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
9521 		scx_agg_event(events, e_cpu, SCX_EV_REENQ_IMMED);
9522 		scx_agg_event(events, e_cpu, SCX_EV_REENQ_LOCAL_REPEAT);
9523 		scx_agg_event(events, e_cpu, SCX_EV_REFILL_SLICE_DFL);
9524 		scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DURATION);
9525 		scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DISPATCH);
9526 		scx_agg_event(events, e_cpu, SCX_EV_BYPASS_ACTIVATE);
9527 		scx_agg_event(events, e_cpu, SCX_EV_INSERT_NOT_OWNED);
9528 		scx_agg_event(events, e_cpu, SCX_EV_SUB_BYPASS_DISPATCH);
9529 	}
9530 }
9531 
9532 /*
9533  * scx_bpf_events - Get a system-wide event counter to
9534  * @events: output buffer from a BPF program
9535  * @events__sz: @events len, must end in '__sz'' for the verifier
9536  */
9537 __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
9538 				size_t events__sz)
9539 {
9540 	struct scx_sched *sch;
9541 	struct scx_event_stats e_sys;
9542 
9543 	rcu_read_lock();
9544 	sch = rcu_dereference(scx_root);
9545 	if (sch)
9546 		scx_read_events(sch, &e_sys);
9547 	else
9548 		memset(&e_sys, 0, sizeof(e_sys));
9549 	rcu_read_unlock();
9550 
9551 	/*
9552 	 * We cannot entirely trust a BPF-provided size since a BPF program
9553 	 * might be compiled against a different vmlinux.h, of which
9554 	 * scx_event_stats would be larger (a newer vmlinux.h) or smaller
9555 	 * (an older vmlinux.h). Hence, we use the smaller size to avoid
9556 	 * memory corruption.
9557 	 */
9558 	events__sz = min(events__sz, sizeof(*events));
9559 	memcpy(events, &e_sys, events__sz);
9560 }
9561 
9562 #ifdef CONFIG_CGROUP_SCHED
9563 /**
9564  * scx_bpf_task_cgroup - Return the sched cgroup of a task
9565  * @p: task of interest
9566  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9567  *
9568  * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
9569  * from the scheduler's POV. SCX operations should use this function to
9570  * determine @p's current cgroup as, unlike following @p->cgroups,
9571  * @p->sched_task_group is stable for the duration of the SCX op. See
9572  * SCX_CALL_OP_TASK() for details.
9573  */
9574 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p,
9575 					       const struct bpf_prog_aux *aux)
9576 {
9577 	struct task_group *tg = p->sched_task_group;
9578 	struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
9579 	struct scx_sched *sch;
9580 
9581 	guard(rcu)();
9582 
9583 	sch = scx_prog_sched(aux);
9584 	if (unlikely(!sch))
9585 		goto out;
9586 
9587 	if (!scx_kf_arg_task_ok(sch, p))
9588 		goto out;
9589 
9590 	cgrp = tg_cgrp(tg);
9591 
9592 out:
9593 	cgroup_get(cgrp);
9594 	return cgrp;
9595 }
9596 #endif	/* CONFIG_CGROUP_SCHED */
9597 
9598 __bpf_kfunc_end_defs();
9599 
9600 BTF_KFUNCS_START(scx_kfunc_ids_any)
9601 BTF_ID_FLAGS(func, scx_bpf_task_set_slice, KF_IMPLICIT_ARGS | KF_RCU);
9602 BTF_ID_FLAGS(func, scx_bpf_task_set_dsq_vtime, KF_IMPLICIT_ARGS | KF_RCU);
9603 BTF_ID_FLAGS(func, scx_bpf_kick_cpu, KF_IMPLICIT_ARGS)
9604 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued, KF_IMPLICIT_ARGS)
9605 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq, KF_IMPLICIT_ARGS)
9606 BTF_ID_FLAGS(func, scx_bpf_dsq_peek, KF_IMPLICIT_ARGS | KF_RCU_PROTECTED | KF_RET_NULL)
9607 BTF_ID_FLAGS(func, scx_bpf_dsq_reenq, KF_IMPLICIT_ARGS)
9608 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local___v2, KF_IMPLICIT_ARGS)
9609 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_IMPLICIT_ARGS | KF_ITER_NEW | KF_RCU_PROTECTED)
9610 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
9611 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
9612 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_IMPLICIT_ARGS)
9613 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_IMPLICIT_ARGS)
9614 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_IMPLICIT_ARGS)
9615 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap, KF_IMPLICIT_ARGS)
9616 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur, KF_IMPLICIT_ARGS)
9617 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set, KF_IMPLICIT_ARGS)
9618 BTF_ID_FLAGS(func, scx_bpf_nr_node_ids)
9619 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
9620 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
9621 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
9622 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
9623 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
9624 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
9625 BTF_ID_FLAGS(func, scx_bpf_cpu_rq, KF_IMPLICIT_ARGS)
9626 BTF_ID_FLAGS(func, scx_bpf_locked_rq, KF_IMPLICIT_ARGS | KF_RET_NULL)
9627 BTF_ID_FLAGS(func, scx_bpf_cpu_curr, KF_IMPLICIT_ARGS | KF_RET_NULL | KF_RCU_PROTECTED)
9628 BTF_ID_FLAGS(func, scx_bpf_now)
9629 BTF_ID_FLAGS(func, scx_bpf_events)
9630 #ifdef CONFIG_CGROUP_SCHED
9631 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_IMPLICIT_ARGS | KF_RCU | KF_ACQUIRE)
9632 #endif
9633 BTF_KFUNCS_END(scx_kfunc_ids_any)
9634 
9635 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
9636 	.owner			= THIS_MODULE,
9637 	.set			= &scx_kfunc_ids_any,
9638 	.filter			= scx_kfunc_context_filter,
9639 };
9640 
9641 /*
9642  * Per-op kfunc allow flags. Each bit corresponds to a context-sensitive kfunc
9643  * group; an op may permit zero or more groups, with the union expressed in
9644  * scx_kf_allow_flags[]. The verifier-time filter (scx_kfunc_context_filter())
9645  * consults this table to decide whether a context-sensitive kfunc is callable
9646  * from a given SCX op.
9647  */
9648 enum scx_kf_allow_flags {
9649 	SCX_KF_ALLOW_UNLOCKED		= 1 << 0,
9650 	SCX_KF_ALLOW_CPU_RELEASE	= 1 << 1,
9651 	SCX_KF_ALLOW_DISPATCH		= 1 << 2,
9652 	SCX_KF_ALLOW_ENQUEUE		= 1 << 3,
9653 	SCX_KF_ALLOW_SELECT_CPU		= 1 << 4,
9654 };
9655 
9656 /*
9657  * Map each SCX op to the union of kfunc groups it permits, indexed by
9658  * SCX_OP_IDX(op). Ops not listed only permit kfuncs that are not
9659  * context-sensitive.
9660  */
9661 static const u32 scx_kf_allow_flags[] = {
9662 	[SCX_OP_IDX(select_cpu)]	= SCX_KF_ALLOW_SELECT_CPU | SCX_KF_ALLOW_ENQUEUE,
9663 	[SCX_OP_IDX(enqueue)]		= SCX_KF_ALLOW_SELECT_CPU | SCX_KF_ALLOW_ENQUEUE,
9664 	[SCX_OP_IDX(dispatch)]		= SCX_KF_ALLOW_ENQUEUE | SCX_KF_ALLOW_DISPATCH,
9665 	[SCX_OP_IDX(cpu_release)]	= SCX_KF_ALLOW_CPU_RELEASE,
9666 	[SCX_OP_IDX(init_task)]		= SCX_KF_ALLOW_UNLOCKED,
9667 	[SCX_OP_IDX(dump)]		= SCX_KF_ALLOW_UNLOCKED,
9668 #ifdef CONFIG_EXT_GROUP_SCHED
9669 	[SCX_OP_IDX(cgroup_init)]	= SCX_KF_ALLOW_UNLOCKED,
9670 	[SCX_OP_IDX(cgroup_exit)]	= SCX_KF_ALLOW_UNLOCKED,
9671 	[SCX_OP_IDX(cgroup_prep_move)]	= SCX_KF_ALLOW_UNLOCKED,
9672 	[SCX_OP_IDX(cgroup_cancel_move)] = SCX_KF_ALLOW_UNLOCKED,
9673 	[SCX_OP_IDX(cgroup_set_weight)]	= SCX_KF_ALLOW_UNLOCKED,
9674 	[SCX_OP_IDX(cgroup_set_bandwidth)] = SCX_KF_ALLOW_UNLOCKED,
9675 	[SCX_OP_IDX(cgroup_set_idle)]	= SCX_KF_ALLOW_UNLOCKED,
9676 #endif	/* CONFIG_EXT_GROUP_SCHED */
9677 	[SCX_OP_IDX(sub_attach)]	= SCX_KF_ALLOW_UNLOCKED,
9678 	[SCX_OP_IDX(sub_detach)]	= SCX_KF_ALLOW_UNLOCKED,
9679 	[SCX_OP_IDX(cpu_online)]	= SCX_KF_ALLOW_UNLOCKED,
9680 	[SCX_OP_IDX(cpu_offline)]	= SCX_KF_ALLOW_UNLOCKED,
9681 	[SCX_OP_IDX(init)]		= SCX_KF_ALLOW_UNLOCKED,
9682 	[SCX_OP_IDX(exit)]		= SCX_KF_ALLOW_UNLOCKED,
9683 };
9684 
9685 /*
9686  * Verifier-time filter for SCX kfuncs. Registered via the .filter field on
9687  * each per-group btf_kfunc_id_set. The BPF core invokes this for every kfunc
9688  * call in the registered hook (BPF_PROG_TYPE_STRUCT_OPS or
9689  * BPF_PROG_TYPE_SYSCALL), regardless of which set originally introduced the
9690  * kfunc - so the filter must short-circuit on kfuncs it doesn't govern by
9691  * falling through to "allow" when none of the SCX sets contain the kfunc.
9692  */
9693 int scx_kfunc_context_filter(const struct bpf_prog *prog, u32 kfunc_id)
9694 {
9695 	bool in_unlocked = btf_id_set8_contains(&scx_kfunc_ids_unlocked, kfunc_id);
9696 	bool in_select_cpu = btf_id_set8_contains(&scx_kfunc_ids_select_cpu, kfunc_id);
9697 	bool in_enqueue = btf_id_set8_contains(&scx_kfunc_ids_enqueue_dispatch, kfunc_id);
9698 	bool in_dispatch = btf_id_set8_contains(&scx_kfunc_ids_dispatch, kfunc_id);
9699 	bool in_cpu_release = btf_id_set8_contains(&scx_kfunc_ids_cpu_release, kfunc_id);
9700 	bool in_idle = btf_id_set8_contains(&scx_kfunc_ids_idle, kfunc_id);
9701 	bool in_any = btf_id_set8_contains(&scx_kfunc_ids_any, kfunc_id);
9702 	u32 moff, flags;
9703 
9704 	/* Not an SCX kfunc - allow. */
9705 	if (!(in_unlocked || in_select_cpu || in_enqueue || in_dispatch ||
9706 	      in_cpu_release || in_idle || in_any))
9707 		return 0;
9708 
9709 	/* SYSCALL progs (e.g. BPF test_run()) may call unlocked and select_cpu kfuncs. */
9710 	if (prog->type == BPF_PROG_TYPE_SYSCALL)
9711 		return (in_unlocked || in_select_cpu || in_idle || in_any) ? 0 : -EACCES;
9712 
9713 	if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
9714 		return (in_any || in_idle) ? 0 : -EACCES;
9715 
9716 	/*
9717 	 * add_subprog_and_kfunc() collects all kfunc calls, including dead code
9718 	 * guarded by bpf_ksym_exists(), before check_attach_btf_id() sets
9719 	 * prog->aux->st_ops. Allow all kfuncs when st_ops is not yet set;
9720 	 * do_check_main() re-runs the filter with st_ops set and enforces the
9721 	 * actual restrictions.
9722 	 */
9723 	if (!prog->aux->st_ops)
9724 		return 0;
9725 
9726 	/*
9727 	 * Non-SCX struct_ops: SCX kfuncs are not permitted.
9728 	 */
9729 	if (prog->aux->st_ops != &bpf_sched_ext_ops)
9730 		return -EACCES;
9731 
9732 	/* SCX struct_ops: check the per-op allow list. */
9733 	if (in_any || in_idle)
9734 		return 0;
9735 
9736 	moff = prog->aux->attach_st_ops_member_off;
9737 	flags = scx_kf_allow_flags[SCX_MOFF_IDX(moff)];
9738 
9739 	if ((flags & SCX_KF_ALLOW_UNLOCKED) && in_unlocked)
9740 		return 0;
9741 	if ((flags & SCX_KF_ALLOW_CPU_RELEASE) && in_cpu_release)
9742 		return 0;
9743 	if ((flags & SCX_KF_ALLOW_DISPATCH) && in_dispatch)
9744 		return 0;
9745 	if ((flags & SCX_KF_ALLOW_ENQUEUE) && in_enqueue)
9746 		return 0;
9747 	if ((flags & SCX_KF_ALLOW_SELECT_CPU) && in_select_cpu)
9748 		return 0;
9749 
9750 	return -EACCES;
9751 }
9752 
9753 static int __init scx_init(void)
9754 {
9755 	int ret;
9756 
9757 	/*
9758 	 * kfunc registration can't be done from init_sched_ext_class() as
9759 	 * register_btf_kfunc_id_set() needs most of the system to be up.
9760 	 *
9761 	 * Some kfuncs are context-sensitive and can only be called from
9762 	 * specific SCX ops. They are grouped into per-context BTF sets, each
9763 	 * registered with scx_kfunc_context_filter as its .filter callback. The
9764 	 * BPF core dedups identical filter pointers per hook
9765 	 * (btf_populate_kfunc_set()), so the filter is invoked exactly once per
9766 	 * kfunc lookup; it consults scx_kf_allow_flags[] to enforce per-op
9767 	 * restrictions at verify time.
9768 	 */
9769 	if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
9770 					     &scx_kfunc_set_enqueue_dispatch)) ||
9771 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
9772 					     &scx_kfunc_set_dispatch)) ||
9773 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
9774 					     &scx_kfunc_set_cpu_release)) ||
9775 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
9776 					     &scx_kfunc_set_unlocked)) ||
9777 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
9778 					     &scx_kfunc_set_unlocked)) ||
9779 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
9780 					     &scx_kfunc_set_any)) ||
9781 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
9782 					     &scx_kfunc_set_any)) ||
9783 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
9784 					     &scx_kfunc_set_any))) {
9785 		pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
9786 		return ret;
9787 	}
9788 
9789 	ret = scx_idle_init();
9790 	if (ret) {
9791 		pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret);
9792 		return ret;
9793 	}
9794 
9795 	ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
9796 	if (ret) {
9797 		pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
9798 		return ret;
9799 	}
9800 
9801 	ret = register_pm_notifier(&scx_pm_notifier);
9802 	if (ret) {
9803 		pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
9804 		return ret;
9805 	}
9806 
9807 	scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
9808 	if (!scx_kset) {
9809 		pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
9810 		return -ENOMEM;
9811 	}
9812 
9813 	ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
9814 	if (ret < 0) {
9815 		pr_err("sched_ext: Failed to add global attributes\n");
9816 		return ret;
9817 	}
9818 
9819 	return 0;
9820 }
9821 __initcall(scx_init);
9822